From d7ce062a4086579fa1991c125d395784c997f5e2 Mon Sep 17 00:00:00 2001 From: Carsten Leonhardt Date: Tue, 5 Feb 2019 00:10:15 +0100 Subject: Import bacula_9.4.2.orig.tar.gz [dgit import orig bacula_9.4.2.orig.tar.gz] --- .gitignore | 1306 + AUTHORS | 133 + COPYING | 2 + COPYRIGHT | 2 + ChangeLog | 7473 ++++ INSTALL | 77 + LICENSE | 786 + LICENSE-FAQ | 135 + LICENSE-FOSS | 338 + Makefile.in | 224 + README | 72 + README.AIX | 56 + ReleaseNotes | 2071 ++ SUPPORT | 33 + VERIFYING | 38 + autoconf/Make.common.in | 102 + autoconf/acconfig.h | 206 + autoconf/aclocal.m4 | 51 + autoconf/aclocal.m4.save | 51 + autoconf/bacula-macros/compare-version.m4 | 101 + autoconf/bacula-macros/db.m4 | 1006 + autoconf/bacula-macros/largefiles.m4 | 109 + autoconf/bacula-macros/os.m4 | 252 + autoconf/confdefs.h | 1 + autoconf/config.guess | 1480 + autoconf/config.h.in | 1286 + autoconf/config.log | 3 + autoconf/config.rpath | 571 + autoconf/config.sub | 1801 + autoconf/configure.in | 3665 ++ autoconf/gettext-macros/codeset.m4 | 21 + autoconf/gettext-macros/gettext.m4 | 549 + autoconf/gettext-macros/glibc2.m4 | 30 + autoconf/gettext-macros/glibc21.m4 | 30 + autoconf/gettext-macros/iconv.m4 | 101 + autoconf/gettext-macros/intdiv0.m4 | 70 + autoconf/gettext-macros/intmax.m4 | 30 + autoconf/gettext-macros/inttypes-pri.m4 | 30 + autoconf/gettext-macros/inttypes.m4 | 25 + autoconf/gettext-macros/inttypes_h.m4 | 26 + autoconf/gettext-macros/isc-posix.m4 | 24 + autoconf/gettext-macros/lcmessage.m4 | 30 + autoconf/gettext-macros/lib-ld.m4 | 110 + autoconf/gettext-macros/lib-link.m4 | 553 + autoconf/gettext-macros/lib-prefix.m4 | 153 + autoconf/gettext-macros/longdouble.m4 | 28 + autoconf/gettext-macros/longlong.m4 | 23 + autoconf/gettext-macros/nls.m4 | 51 + autoconf/gettext-macros/po.m4 | 429 + autoconf/gettext-macros/printf-posix.m4 | 44 + autoconf/gettext-macros/progtest.m4 | 92 + autoconf/gettext-macros/signed.m4 | 17 + autoconf/gettext-macros/size_max.m4 | 59 + autoconf/gettext-macros/stdint_h.m4 | 26 + autoconf/gettext-macros/uintmax_t.m4 | 30 + autoconf/gettext-macros/ulonglong.m4 | 23 + autoconf/gettext-macros/wchar_t.m4 | 20 + autoconf/gettext-macros/wint_t.m4 | 20 + autoconf/gettext-macros/xsize.m4 | 13 + autoconf/install-sh | 527 + autoconf/install.sh | 235 + autoconf/libtool/libtool.m4 | 7997 +++++ autoconf/libtool/ltdl.m4 | 817 + autoconf/libtool/ltoptions.m4 | 384 + autoconf/libtool/ltsugar.m4 | 123 + autoconf/libtool/ltversion.m4 | 23 + autoconf/libtool/lt~obsolete.m4 | 98 + autoconf/ltmain.sh | 9661 ++++++ autoconf/mkinstalldirs | 162 + autoconf/python.conf.py | 14 + autoconf/randpass | 27 + autoconf/randpass.awk | 14 + autoconf/randpass.bc | 44 + autoconf/who_and_what_of_libtool_patch | 170 + configure | 34198 +++++++++++++++++++ examples/AFS-README | 45 + examples/Write-DVD-with-UDF.txt | 75 + examples/afs-bacula | 296 + .../autochangers/Sun-desktop-20Gb-4mm-autoloader | 43 + examples/autochangers/ak-mtx-changer.txt | 218 + examples/autochangers/bacula-barcodes | 53 + examples/autochangers/chio-bacula | 152 + examples/autochangers/chio-changer | 125 + examples/autochangers/chio-changer-freebsd | 194 + examples/autochangers/chio-changer-openbsd | 134 + examples/autochangers/chio-changer.Sony-TSL-SA300C | 167 + examples/autochangers/locking-mtx-changer | 178 + examples/autochangers/mtx-changer.Adic-Scalar-100 | 1196 + examples/autochangers/mtx-changer.Adic-Scalar-24 | 133 + examples/autochangers/mtx-changer.Sony-TSL-SA300C | 40 + .../autochangers/mtx-changer.StorageTek-HPA4853 | 157 + examples/autochangers/multiple-drive-changer.txt | 257 + examples/autochangers/solaris-mtx-changer | 174 + examples/backup-acls.txt | 22 + examples/backup-every-other-week.txt | 76 + examples/backup-to-cdwriter.txt | 813 + examples/client-backup | 15 + examples/conf/bacula-defs.m4 | 45 + examples/conf/console.conf | 10 + examples/conf/defaultconfig | 23 + examples/conf/fileset_convert.pl | 34 + examples/conf/kernsconfig | 30 + examples/conf/m4.Makefile | 29 + examples/conf/m4.additions | 43 + examples/conf/m4.bacula-dir.conf | 364 + examples/conf/m4.bacula-fd.conf | 31 + examples/conf/m4.bacula-sd.conf | 152 + examples/conf/m4.email | 28 + examples/conf/many-clients.txt | 106 + examples/database/bacula-sqlite_2_mysqldump.sh | 23 + examples/database/dbcheck.sql | 197 + examples/database/postgresql-dump.txt | 42 + examples/database/postgresql-mysql-dump.txt | 99 + examples/database/sqlite2pgsql | 146 + examples/devices/DDS.conf | 10 + examples/devices/DDS4.conf | 19 + examples/devices/EXB-8200.conf | 13 + examples/devices/EXB-8500.conf | 10 + examples/devices/Exabyte.conf | 15 + examples/devices/FreeBSD.conf | 16 + examples/devices/HP-DLT80.conf | 12 + examples/devices/HP-autoloader.conf | 14 + examples/devices/LTO-2.conf | 20 + examples/devices/LTO-3.conf | 20 + examples/devices/LTO-4.conf | 20 + examples/devices/OnStream.conf | 19 + examples/devices/OpenBSD.conf | 14 + examples/devices/README | 17 + examples/devices/Sony-DDS.conf | 10 + examples/devices/two-drive-autoloader.conf | 48 + examples/local_partitions | 21 + examples/local_partitions.txt | 8 + examples/nagios/check_bacula/Makefile.in | 100 + examples/nagios/check_bacula/README | 15 + examples/nagios/check_bacula/authenticate.c | 178 + examples/nagios/check_bacula/check_bacula.c | 370 + examples/nagios/check_bacula/check_bacula.h | 121 + examples/nagios/check_bacula_pools.sh | 159 + examples/nagios/nagios.txt | 205 + examples/recover.pl | 2886 ++ examples/reports/bacula_mail_summary.sh | 73 + examples/reports/baculareport.pl | 1161 + examples/reports/check_bacula_pools.sh | 178 + examples/reports/checkhost | 53 + examples/reports/is_client_alive | 21 + examples/reports/next_tape.sh | 78 + examples/reports/pool-query.txt | 108 + examples/reports/report.pl | 116 + examples/sample-query.sql | 242 + examples/ssh-tunnel-README.txt | 112 + examples/ssh-tunnel.sh | 41 + examples/upgrade-win32-client.txt | 75 + examples/vm/bacula.data | 2 + examples/vm/blabela.exec | 83 + examples/vm/blabeli.exec | 83 + examples/vm/tape-label-tools.txt | 27 + examples/vm/vmbacula.tgz | Bin 0 -> 122401 bytes examples/vm/vmbacula.txt | 28 + manpages/Makefile.in | 50 + manpages/bacula-dir.8 | 104 + manpages/bacula-fd.8 | 75 + manpages/bacula-sd.8 | 75 + manpages/bacula-tray-monitor.1 | 46 + manpages/bacula.8 | 122 + manpages/bat.1 | 46 + manpages/bconsole.8 | 60 + manpages/bcopy.8 | 65 + manpages/bextract.8 | 68 + manpages/bls.8 | 86 + manpages/bpluginfo.8 | 121 + manpages/bregex.8 | 62 + manpages/bscan.8 | 106 + manpages/bsmtp.1 | 120 + manpages/btape.8 | 142 + manpages/btraceback.8 | 69 + manpages/bwild.8 | 67 + manpages/dbcheck.8 | 204 + platforms/Makefile.in | 109 + platforms/README | 5 + platforms/aix/Makefile.in | 88 + platforms/alpha/Makefile.in | 51 + platforms/alpha/bacula-fd.in | 44 + platforms/bsdi/Makefile.in | 143 + platforms/bsdi/bacula-dir.in | 41 + platforms/bsdi/bacula-fd.in | 42 + platforms/bsdi/bacula-sd.in | 42 + platforms/contrib-rpm/README | 61 + platforms/contrib-rpm/build_rpm.sh | 203 + platforms/contrib-rpm/rpm_wizard.sh | 121 + platforms/contrib-rpm/rpmkey.spec | 52 + platforms/darwin/Makefile.in | 49 + platforms/debian/Makefile.in | 91 + platforms/debian/bacula-dir.in | 79 + platforms/debian/bacula-fd.in | 79 + platforms/debian/bacula-sd.in | 79 + platforms/freebsd/Makefile.in | 143 + platforms/freebsd/bacula-dir.in | 42 + platforms/freebsd/bacula-fd.in | 42 + platforms/freebsd/bacula-sd.in | 42 + platforms/freebsd/pthreads-fix.txt | 206 + platforms/freebsd/tapetest.c | 605 + platforms/gentoo/1.36.1-cdrecord-configure.patch | 18 + platforms/gentoo/1.36.2-cdrecord-configure.patch | 19 + platforms/gentoo/Makefile.in | 60 + platforms/gentoo/bacula-dir.in | 31 + platforms/gentoo/bacula-fd.in | 31 + platforms/gentoo/bacula-init.in | 94 + platforms/gentoo/bacula-sd.in | 31 + platforms/gentoo/bacula.ebuild | 428 + platforms/hurd/Makefile.in | 91 + platforms/hurd/bacula-dir.in | 78 + platforms/hurd/bacula-fd.in | 78 + platforms/hurd/bacula-sd.in | 78 + platforms/install-symlinks | 99 + platforms/irix/Makefile.in | 88 + platforms/irix/bacula-dir.in | 42 + platforms/irix/bacula-fd.in | 44 + platforms/irix/bacula-sd.in | 42 + platforms/mandrake/Makefile.in | 88 + platforms/mandrake/bacula-dir.in | 48 + platforms/mandrake/bacula-fd.in | 48 + platforms/mandrake/bacula-sd.in | 48 + platforms/mandrake/bacula.spec.bluca.in | 501 + platforms/openbsd/Makefile.in | 143 + platforms/openbsd/bacula-dir.in | 42 + platforms/openbsd/bacula-fd.in | 42 + platforms/openbsd/bacula-sd.in | 42 + platforms/osx/Makefile.in | 166 + platforms/osx/README | 55 + .../osx/files/installer.pmdoc.in/01destdir.xml | 27 + platforms/osx/files/installer.pmdoc.in/index.xml | 30 + platforms/osx/files/org.bacula.bacula-fd.plist.in | 17 + platforms/osx/files/uninstall.command.in | 29 + platforms/osx/installer-gencontents.py | 56 + platforms/osx/resources/ReadMe.html.in | 62 + platforms/osx/resources/postflight.in | 36 + platforms/osx/resources/preupgrade.in | 7 + platforms/redhat/Makefile.in | 93 + platforms/redhat/bacula-dir.in | 93 + platforms/redhat/bacula-fd.in | 88 + platforms/redhat/bacula-sd.in | 94 + platforms/rpms/redhat/bacula-aligned.spec.in | 142 + platforms/rpms/redhat/bacula-bat.spec.in | 348 + platforms/rpms/redhat/bacula-client-static.spec.in | 286 + platforms/rpms/redhat/bacula-docs.spec.in | 90 + platforms/rpms/redhat/bacula-mtx.spec.in | 126 + platforms/rpms/redhat/bacula.spec.in | 1361 + platforms/rpms/suse/bacula-aligned.spec.in | 142 + platforms/rpms/suse/bacula-bat.spec.in | 351 + platforms/rpms/suse/bacula-docs.spec.in | 94 + platforms/rpms/suse/bacula-mtx.spec.in | 126 + platforms/rpms/suse/bacula.spec.in | 1384 + platforms/slackware/Makefile.in | 69 + platforms/slackware/functions.bacula.in | 191 + platforms/slackware/local-install.sh | 89 + platforms/slackware/rc.bacula-dir.in | 61 + platforms/slackware/rc.bacula-fd.in | 61 + platforms/slackware/rc.bacula-sd.in | 61 + platforms/solaris/Makefile.in | 91 + platforms/solaris/bacula-dir.in | 57 + platforms/solaris/bacula-fd.in | 57 + platforms/solaris/bacula-sd.in | 57 + platforms/solaris/copyright | 18 + platforms/solaris/makepkg.sh | 82 + platforms/solaris/pkginfo | 12 + platforms/solaris/postinstall | 9 + platforms/solaris/postremove | 9 + platforms/solaris/prototype.master | 24 + platforms/suse/Makefile.in | 87 + platforms/suse/bacula-dir-suse-sqlite.patch | 13 + platforms/suse/bacula-dir.in | 65 + platforms/suse/bacula-fd.in | 65 + platforms/suse/bacula-sd.in | 65 + platforms/suse/bacula.in | 216 + platforms/systemd/Makefile.in | 114 + platforms/systemd/bacula-dir.service.in | 34 + platforms/systemd/bacula-fd.service.in | 32 + platforms/systemd/bacula-sd.service.in | 32 + platforms/systemd/bacula.conf.in | 3 + platforms/ubuntu/Makefile.in | 91 + platforms/ubuntu/bacula-dir.in | 87 + platforms/ubuntu/bacula-fd.in | 87 + platforms/ubuntu/bacula-sd.in | 87 + platforms/unknown/Makefile.in | 49 + po/ChangeLog | 0 po/LINGUAS | 6 + po/Makefile.in.in | 393 + po/Makevars | 41 + po/POTFILES.in | 334 + po/README | 66 + po/Rules-quot | 47 + po/bacula.pot | 610 + po/boldquot.sed | 10 + po/de.po | 9259 +++++ po/en@boldquot.header | 25 + po/en@quot.header | 22 + po/es.po | 16931 +++++++++ po/es_AR.po | 14464 ++++++++ po/fi.po | 15929 +++++++++ po/fr.po | 13630 ++++++++ po/insert-header.sin | 23 + po/it.po | 15533 +++++++++ po/nl.po | 5618 +++ po/pl.po | 17501 ++++++++++ po/quot.sed | 6 + po/remove-potcdate.sin | 19 + po/sv.po | 609 + po/uk.po | 6436 ++++ release/README | 53 + release/ReleaseProcedure.txt | 75 + release/bgit.py | 117 + release/check_packages | 25 + release/clean | 7 + release/config | 53 + release/git2changelog.pl | 84 + release/makeall | 17 + release/makebacularel | 125 + release/makedocsonly | 59 + release/makedocsrel | 124 + release/makemanualsrel | 64 + release/package-list | 8 + release/pushtags | 32 + release/sign | 8 + release/upload | 38 + scripts/Makefile.in | 120 + scripts/bacula-ctl-dir.in | 262 + scripts/bacula-ctl-fd.in | 259 + scripts/bacula-ctl-sd.in | 260 + scripts/bacula-tray-monitor.desktop.in | 10 + scripts/bacula.in | 71 + scripts/bacula.png | Bin 0 -> 2983 bytes scripts/bacula.vim | 176 + scripts/baculabackupreport.in | 858 + scripts/bat.console_apps.in | 4 + scripts/bat.desktop.consolehelper.in | 11 + scripts/bat.desktop.in | 11 + scripts/bat.desktop.xsu.in | 11 + scripts/bat.pamd | 7 + scripts/bconsole.in | 41 + scripts/breload.in | 63 + scripts/btraceback.dbx | 53 + scripts/btraceback.gdb | 33 + scripts/btraceback.in | 65 + scripts/btraceback.mdb | 28 + scripts/defaultconfig | 26 + scripts/devel_bacula.in | 280 + scripts/disk-changer.in | 397 + scripts/filetype.vim | 7 + scripts/freespace | 34 + scripts/isworm | 84 + scripts/logrotate.in | 18 + scripts/logwatch/Makefile.in | 32 + scripts/logwatch/README | 43 + scripts/logwatch/applybaculadate | 46 + scripts/logwatch/bacula | 65 + scripts/logwatch/logfile.bacula.conf.in | 3 + scripts/logwatch/services.bacula.conf | 6 + scripts/magic.bacula | 11 + scripts/magic.bacula.txt | 17 + scripts/manual_prune.pl | 251 + scripts/mtx-changer.conf | 89 + scripts/mtx-changer.in | 353 + scripts/tapealert | 68 + src/.indent.pro | 27 + src/Makefile.in | 66 + src/baconfig.h | 752 + src/bacula.h | 205 + src/bc_types.h | 252 + src/bsd | 4 + src/c | 18 + src/c.scr | 8 + src/cats/Makefile.in | 323 + src/cats/bdb.h | 319 + src/cats/bdb_mysql.h | 70 + src/cats/bdb_postgresql.h | 64 + src/cats/bdb_sqlite.h | 72 + src/cats/bvfs.c | 1604 + src/cats/bvfs.h | 307 + src/cats/cats.c | 152 + src/cats/cats.h | 629 + src/cats/cats_null.c | 37 + src/cats/create_bacula_database.in | 50 + src/cats/create_mysql_database.in | 20 + src/cats/create_postgresql_database.in | 62 + src/cats/create_sqlite3_database.in | 15 + src/cats/create_test_database | 16 + src/cats/delete_catalog_backup.in | 25 + src/cats/drop_bacula_database.in | 43 + src/cats/drop_bacula_tables.in | 50 + src/cats/drop_mysql_database.in | 20 + src/cats/drop_mysql_tables.in | 49 + src/cats/drop_postgresql_database.in | 18 + src/cats/drop_postgresql_tables.in | 47 + src/cats/drop_sqlite3_database.in | 11 + src/cats/drop_sqlite3_tables.in | 11 + src/cats/drop_test_tables | 26 + src/cats/fix_postgresql_tables | 35 + src/cats/grant_bacula_privileges.in | 51 + src/cats/grant_mysql_privileges.in | 41 + src/cats/grant_postgresql_privileges.in | 79 + src/cats/grant_privileges | 10 + src/cats/grant_sqlite3_privileges.in | 10 + src/cats/install-default-backend.in | 44 + src/cats/make_bacula_tables.in | 50 + src/cats/make_catalog_backup.in | 111 + src/cats/make_catalog_backup.pl.in | 195 + src/cats/make_mysql_tables.in | 487 + src/cats/make_postgresql_tables.in | 498 + src/cats/make_sqlite3_tables.in | 484 + src/cats/make_test_tables | 152 + src/cats/mysql.c | 789 + src/cats/mysql.in | 11 + src/cats/postgresql.c | 1184 + src/cats/postgresql.in | 11 + src/cats/protos.h | 299 + src/cats/sql.c | 987 + src/cats/sql_cmds.c | 1053 + src/cats/sql_cmds.h | 92 + src/cats/sql_create.c | 1288 + src/cats/sql_delete.c | 254 + src/cats/sql_find.c | 540 + src/cats/sql_get.c | 1757 + src/cats/sql_list.c | 761 + src/cats/sql_update.c | 529 + src/cats/sqlite.c | 739 + src/cats/sqlite.in | 10 + src/cats/update_bacula_tables.in | 50 + src/cats/update_mysql_tables.in | 258 + src/cats/update_postgresql_tables.in | 175 + src/cats/update_sqlite3_tables.in | 270 + src/ch.h | 48 + src/console/Makefile.in | 141 + src/console/authenticate.c | 188 + src/console/bbconsjson.c | 598 + src/console/bconsole.conf.in | 13 + src/console/conio.c | 1146 + src/console/conio.h | 30 + src/console/console.c | 1770 + src/console/console_conf.c | 323 + src/console/console_conf.h | 99 + src/console/func.h | 103 + src/count-lines | 9 + src/dird/Makefile.in | 162 + src/dird/README-config | 134 + src/dird/admin.c | 125 + src/dird/authenticate.c | 459 + src/dird/autoprune.c | 227 + src/dird/backup.c | 1081 + src/dird/bacula-dir.conf.in | 323 + src/dird/bdirjson.c | 1488 + src/dird/bsr.c | 761 + src/dird/bsr.h | 57 + src/dird/catreq.c | 815 + src/dird/dir_plugins.c | 538 + src/dird/dir_plugins.h | 184 + src/dird/dird.c | 1520 + src/dird/dird.h | 83 + src/dird/dird_conf.c | 2578 ++ src/dird/dird_conf.h | 790 + src/dird/expand.c | 465 + src/dird/fd_cmds.c | 1051 + src/dird/getmsg.c | 414 + src/dird/inc_conf.c | 798 + src/dird/job.c | 1921 ++ src/dird/jobq.c | 906 + src/dird/jobq.h | 69 + src/dird/mac.c | 943 + src/dird/mac_sql.c | 763 + src/dird/mountreq.c | 73 + src/dird/msgchan.c | 622 + src/dird/newvol.c | 171 + src/dird/next_vol.c | 515 + src/dird/protos.h | 361 + src/dird/query.sql | 7 + src/dird/recycle.c | 80 + src/dird/restore.c | 755 + src/dird/run_conf.c | 688 + src/dird/scheduler.c | 455 + src/dird/snapshot.c | 766 + src/dird/ua.h | 145 + src/dird/ua_acl.c | 119 + src/dird/ua_cmds.c | 2678 ++ src/dird/ua_dotcmds.c | 2179 ++ src/dird/ua_input.c | 251 + src/dird/ua_label.c | 1288 + src/dird/ua_output.c | 1193 + src/dird/ua_prune.c | 859 + src/dird/ua_purge.c | 806 + src/dird/ua_query.c | 299 + src/dird/ua_restore.c | 1739 + src/dird/ua_run.c | 2605 ++ src/dird/ua_select.c | 1662 + src/dird/ua_server.c | 231 + src/dird/ua_status.c | 1537 + src/dird/ua_tree.c | 935 + src/dird/ua_update.c | 1073 + src/dird/vbackup.c | 600 + src/dird/verify.c | 887 + src/filed/Makefile.in | 194 + src/filed/accurate.c | 649 + src/filed/authenticate.c | 268 + src/filed/backup.c | 1456 + src/filed/backup.h | 86 + src/filed/bacl.c | 777 + src/filed/bacl.h | 195 + src/filed/bacl_freebsd.c | 533 + src/filed/bacl_freebsd.h | 70 + src/filed/bacl_linux.c | 344 + src/filed/bacl_linux.h | 64 + src/filed/bacl_osx.c | 294 + src/filed/bacl_osx.h | 64 + src/filed/bacl_solaris.c | 324 + src/filed/bacl_solaris.h | 84 + src/filed/bacula-fd.conf.in | 48 + src/filed/bfdjson.c | 642 + src/filed/bxattr.c | 954 + src/filed/bxattr.h | 231 + src/filed/bxattr_freebsd.c | 465 + src/filed/bxattr_freebsd.h | 85 + src/filed/bxattr_linux.c | 290 + src/filed/bxattr_linux.h | 68 + src/filed/bxattr_osx.c | 291 + src/filed/bxattr_osx.h | 66 + src/filed/bxattr_solaris.c | 986 + src/filed/bxattr_solaris.h | 162 + src/filed/crypto.c | 321 + src/filed/estimate.c | 117 + src/filed/fd_plugins.c | 2258 ++ src/filed/fd_plugins.h | 401 + src/filed/fd_snapshot.c | 1890 + src/filed/fd_snapshot.h | 67 + src/filed/filed.c | 706 + src/filed/filed.h | 72 + src/filed/filed_conf.c | 595 + src/filed/filed_conf.h | 148 + src/filed/heartbeat.c | 213 + src/filed/hello.c | 369 + src/filed/job.c | 3020 ++ src/filed/protos.h | 86 + src/filed/restore.c | 1810 + src/filed/restore.h | 73 + src/filed/status.c | 519 + src/filed/verify.c | 375 + src/filed/verify_vol.c | 584 + src/filed/win_efs.c | 310 + src/fileopts.h | 66 + src/filetypes.h | 78 + src/findlib/Makefile.in | 131 + src/findlib/attribs.c | 986 + src/findlib/bfile.c | 1176 + src/findlib/bfile.h | 138 + src/findlib/create_file.c | 484 + src/findlib/drivetype.c | 118 + src/findlib/enable_priv.c | 156 + src/findlib/find.c | 491 + src/findlib/find.h | 231 + src/findlib/find_one.c | 857 + src/findlib/fstype.c | 477 + src/findlib/match.c | 404 + src/findlib/mkpath.c | 323 + src/findlib/namedpipe.c | 338 + src/findlib/namedpipe.h | 48 + src/findlib/protos.h | 91 + src/findlib/savecwd.c | 120 + src/findlib/savecwd.h | 42 + src/findlib/win32filter.c | 97 + src/findlib/win32filter.h | 79 + src/host.h.in | 32 + src/jcr.h | 593 + src/lib/Makefile.in | 361 + src/lib/address_conf.c | 695 + src/lib/address_conf.h | 94 + src/lib/alist.c | 473 + src/lib/alist.h | 172 + src/lib/attr.c | 301 + src/lib/attr.h | 53 + src/lib/base64.c | 445 + src/lib/base64.h | 28 + src/lib/berrno.c | 105 + src/lib/berrno.h | 88 + src/lib/bget_msg.c | 168 + src/lib/bget_msg.h | 110 + src/lib/binflate.c | 103 + src/lib/bits.h | 59 + src/lib/bjson.c | 408 + src/lib/bjson.h | 57 + src/lib/bmtio.h | 233 + src/lib/bnet.c | 494 + src/lib/bnet_server.c | 251 + src/lib/bpipe.c | 586 + src/lib/bpipe.h | 32 + src/lib/breg.c | 441 + src/lib/breg.h | 110 + src/lib/bregex.c | 2024 ++ src/lib/bregex.h | 196 + src/lib/bsnprintf.c | 1071 + src/lib/bsock.c | 1001 + src/lib/bsock.h | 219 + src/lib/bsockcore.c | 1340 + src/lib/bsockcore.h | 219 + src/lib/bsys.c | 1468 + src/lib/btime.c | 468 + src/lib/btime.h | 97 + src/lib/btimers.c | 280 + src/lib/btimers.h | 39 + src/lib/bwlimit.c | 133 + src/lib/bwlimit.h | 63 + src/lib/cmd_parser.h | 196 + src/lib/cram-md5.c | 169 + src/lib/crc32.c | 546 + src/lib/crypto.c | 1585 + src/lib/crypto.h | 131 + src/lib/daemon.c | 134 + src/lib/devlock.c | 739 + src/lib/devlock.h | 86 + src/lib/dlist.c | 546 + src/lib/dlist.h | 207 + src/lib/edit.c | 578 + src/lib/flist.c | 191 + src/lib/flist.h | 95 + src/lib/fnmatch.c | 343 + src/lib/fnmatch.h | 55 + src/lib/guid_to_name.c | 197 + src/lib/guid_to_name.h | 36 + src/lib/hmac.c | 117 + src/lib/htable.c | 500 + src/lib/htable.h | 118 + src/lib/ini.c | 897 + src/lib/ini.h | 249 + src/lib/jcr.c | 1265 + src/lib/lex.c | 901 + src/lib/lex.h | 135 + src/lib/lib.h | 70 + src/lib/lockmgr.c | 1663 + src/lib/lockmgr.h | 298 + src/lib/lz4.c | 1480 + src/lib/lz4.h | 475 + src/lib/lz4_encoder.h | 258 + src/lib/md5.c | 373 + src/lib/md5.h | 43 + src/lib/mem_pool.c | 668 + src/lib/mem_pool.h | 116 + src/lib/message.c | 2002 ++ src/lib/message.h | 203 + src/lib/mutex_list.h | 36 + src/lib/openssl-compat.h | 43 + src/lib/openssl.c | 341 + src/lib/openssl.h | 44 + src/lib/output.c | 480 + src/lib/output.h | 162 + src/lib/parse_conf.c | 1272 + src/lib/parse_conf.h | 293 + src/lib/plugins.c | 261 + src/lib/plugins.h | 90 + src/lib/priv.c | 129 + src/lib/protos.h | 399 + src/lib/queue.c | 138 + src/lib/queue.h | 37 + src/lib/rblist.c | 488 + src/lib/rblist.h | 156 + src/lib/res.c | 148 + src/lib/runscript.c | 302 + src/lib/runscript.h | 106 + src/lib/rwlock.c | 677 + src/lib/rwlock.h | 73 + src/lib/scan.c | 620 + src/lib/sellist.c | 268 + src/lib/sellist.h | 113 + src/lib/serial.c | 332 + src/lib/serial.h | 169 + src/lib/sha1.c | 517 + src/lib/sha1.h | 107 + src/lib/sha2.c | 950 + src/lib/sha2.h | 118 + src/lib/signal.c | 444 + src/lib/smartall.c | 565 + src/lib/smartall.h | 173 + src/lib/status.h | 265 + src/lib/tcpd.h | 227 + src/lib/tls.c | 775 + src/lib/tls.h | 51 + src/lib/tree.c | 604 + src/lib/tree.h | 149 + src/lib/unittests.c | 127 + src/lib/unittests.h | 62 + src/lib/util.c | 1036 + src/lib/var.c | 2720 ++ src/lib/var.h | 123 + src/lib/waitq.h | 58 + src/lib/watchdog.c | 335 + src/lib/watchdog.h | 48 + src/lib/worker.c | 437 + src/lib/worker.h | 103 + src/lib/workq.c | 522 + src/lib/workq.h | 71 + src/plugins/Makefile | 30 + src/plugins/README | 46 + src/plugins/dir/Makefile.in | 50 + src/plugins/dir/example-plugin-dir.c | 163 + src/plugins/fd/Makefile.in | 77 + src/plugins/fd/bpipe-fd.c | 743 + src/plugins/fd/example-plugin-fd.c | 311 + src/plugins/fd/fd_common.h | 730 + src/plugins/fd/test-deltaseq-fd.c | 507 + src/plugins/fd/test-plugin-fd.c | 710 + src/plugins/sd/Makefile.in | 50 + src/plugins/sd/example-plugin-sd.c | 181 + src/plugins/sd/main.c | 110 + src/qt-console/COMMANDS | 125 + src/qt-console/External-qt-console | 26 + src/qt-console/PAGES | 44 + src/qt-console/PREFS | 9 + src/qt-console/README | 118 + src/qt-console/README.mingw32 | 120 + src/qt-console/RELEASEFEATURES | 73 + src/qt-console/TODO | 313 + src/qt-console/bat.conf.example | 10 + src/qt-console/bat.conf.in | 10 + src/qt-console/bat.h | 64 + src/qt-console/bat.pro.in | 189 + src/qt-console/bat.pro.mingw32.in | 191 + src/qt-console/bat.pro.mingw64 | 191 + src/qt-console/bat.pro.mingw64.in | 191 + src/qt-console/bat_conf.cpp | 330 + src/qt-console/bat_conf.h | 122 + src/qt-console/bcomm/dircomm.cpp | 575 + src/qt-console/bcomm/dircomm.h | 83 + src/qt-console/bcomm/dircomm_auth.cpp | 188 + src/qt-console/build-depkgs-qt-console | 165 + src/qt-console/clients/clients.cpp | 355 + src/qt-console/clients/clients.h | 64 + src/qt-console/clients/clients.ui | 82 + src/qt-console/console/console.cpp | 931 + src/qt-console/console/console.h | 160 + src/qt-console/console/console.ui | 117 + src/qt-console/fileset/fileset.cpp | 284 + src/qt-console/fileset/fileset.h | 61 + src/qt-console/fileset/fileset.ui | 52 + src/qt-console/help/clients.html | 35 + src/qt-console/help/console.html | 36 + src/qt-console/help/filesets.html | 29 + src/qt-console/help/help.cpp | 68 + src/qt-console/help/help.h | 51 + src/qt-console/help/help.ui | 73 + src/qt-console/help/index.html | 53 + src/qt-console/help/joblist.html | 84 + src/qt-console/help/jobplot.html | 28 + src/qt-console/help/jobs.html | 58 + src/qt-console/help/media.html | 41 + src/qt-console/help/restore.html | 139 + src/qt-console/help/storage.html | 33 + src/qt-console/images/0p.png | Bin 0 -> 240 bytes src/qt-console/images/16p.png | Bin 0 -> 252 bytes src/qt-console/images/32p.png | Bin 0 -> 257 bytes src/qt-console/images/48p.png | Bin 0 -> 261 bytes src/qt-console/images/64p.png | Bin 0 -> 261 bytes src/qt-console/images/80p.png | Bin 0 -> 262 bytes src/qt-console/images/96p.png | Bin 0 -> 240 bytes src/qt-console/images/A.png | Bin 0 -> 1379 bytes src/qt-console/images/R.png | Bin 0 -> 1094 bytes src/qt-console/images/T.png | Bin 0 -> 672 bytes src/qt-console/images/W.png | Bin 0 -> 1077 bytes src/qt-console/images/ajax-loader-big.gif | Bin 0 -> 3208 bytes src/qt-console/images/applications-graphics.png | Bin 0 -> 1668 bytes src/qt-console/images/applications-graphics.svg | 545 + src/qt-console/images/backup.png | Bin 0 -> 1651 bytes src/qt-console/images/bat.png | Bin 0 -> 96268 bytes src/qt-console/images/bat_icon.icns | Bin 0 -> 125067 bytes src/qt-console/images/bat_icon.png | Bin 0 -> 5054 bytes src/qt-console/images/browse.png | Bin 0 -> 3472 bytes src/qt-console/images/browse.svg | 620 + src/qt-console/images/cartridge-edit.png | Bin 0 -> 788 bytes src/qt-console/images/cartridge-edit.svg | 277 + src/qt-console/images/cartridge.png | Bin 0 -> 698 bytes src/qt-console/images/cartridge.svg | 148 + src/qt-console/images/cartridge1.png | Bin 0 -> 1138 bytes src/qt-console/images/check.png | Bin 0 -> 551 bytes src/qt-console/images/check.svg | 94 + src/qt-console/images/connected.png | Bin 0 -> 547 bytes src/qt-console/images/copy.png | Bin 0 -> 1339 bytes src/qt-console/images/cut.png | Bin 0 -> 1323 bytes src/qt-console/images/disconnected.png | Bin 0 -> 640 bytes src/qt-console/images/edit-cut.png | Bin 0 -> 807 bytes src/qt-console/images/edit-delete.png | Bin 0 -> 821 bytes src/qt-console/images/edit-delete.svg | 882 + src/qt-console/images/edit.png | Bin 0 -> 1092 bytes src/qt-console/images/emblem-system.png | Bin 0 -> 4050 bytes src/qt-console/images/emblem-system.svg | 235 + src/qt-console/images/estimate-job.png | Bin 0 -> 2049 bytes src/qt-console/images/estimate-job.svg | 222 + src/qt-console/images/extern.png | Bin 0 -> 901 bytes src/qt-console/images/f.png | Bin 0 -> 1379 bytes src/qt-console/images/folder.png | Bin 0 -> 551 bytes src/qt-console/images/folder.svg | 422 + src/qt-console/images/folderbothchecked.png | Bin 0 -> 758 bytes src/qt-console/images/folderbothchecked.svg | 438 + src/qt-console/images/folderchecked.png | Bin 0 -> 709 bytes src/qt-console/images/folderchecked.svg | 430 + src/qt-console/images/folderunchecked.png | Bin 0 -> 692 bytes src/qt-console/images/folderunchecked.svg | 430 + src/qt-console/images/go-down.png | Bin 0 -> 683 bytes src/qt-console/images/go-down.svg | 199 + src/qt-console/images/go-jump.png | Bin 0 -> 1491 bytes src/qt-console/images/go-jump.svg | 204 + src/qt-console/images/go-up.png | Bin 0 -> 652 bytes src/qt-console/images/go-up.svg | 195 + src/qt-console/images/graph1.png | Bin 0 -> 1853 bytes src/qt-console/images/graph1.svg | 380 + src/qt-console/images/help-browser.png | Bin 0 -> 862 bytes src/qt-console/images/help-browser.svg | 215 + src/qt-console/images/home.png | Bin 0 -> 1781 bytes src/qt-console/images/inflag0.png | Bin 0 -> 847 bytes src/qt-console/images/inflag1.png | Bin 0 -> 806 bytes src/qt-console/images/inflag2.png | Bin 0 -> 859 bytes src/qt-console/images/intern.png | Bin 0 -> 1079 bytes src/qt-console/images/joblog.png | Bin 0 -> 2369 bytes src/qt-console/images/joblog.svg | 252 + src/qt-console/images/label.png | Bin 0 -> 1291 bytes src/qt-console/images/mail-message-new.png | Bin 0 -> 1744 bytes src/qt-console/images/mail-message-new.svg | 465 + src/qt-console/images/mail-message-pending.png | Bin 0 -> 1772 bytes src/qt-console/images/mail-message-pending.svg | 467 + src/qt-console/images/mark.png | Bin 0 -> 452 bytes src/qt-console/images/media-floppy.svg | 340 + src/qt-console/images/network-server.png | Bin 0 -> 486 bytes src/qt-console/images/network-server.svg | 1005 + src/qt-console/images/new.png | Bin 0 -> 852 bytes src/qt-console/images/next.png | Bin 0 -> 1646 bytes src/qt-console/images/open.png | Bin 0 -> 2073 bytes src/qt-console/images/package-x-generic.png | Bin 0 -> 1270 bytes src/qt-console/images/package-x-generic.svg | 483 + src/qt-console/images/page-next.gif | Bin 0 -> 875 bytes src/qt-console/images/page-prev.gif | Bin 0 -> 879 bytes src/qt-console/images/paste.png | Bin 0 -> 1745 bytes src/qt-console/images/prev.png | Bin 0 -> 1605 bytes src/qt-console/images/print.png | Bin 0 -> 1732 bytes src/qt-console/images/purge.png | Bin 0 -> 1308 bytes src/qt-console/images/restore.png | Bin 0 -> 1352 bytes src/qt-console/images/run.png | Bin 0 -> 2296 bytes src/qt-console/images/runit.png | Bin 0 -> 36022 bytes src/qt-console/images/save.png | Bin 0 -> 1187 bytes src/qt-console/images/server.png | Bin 0 -> 2618 bytes src/qt-console/images/status-console.png | Bin 0 -> 1960 bytes src/qt-console/images/status-console.svg | 606 + src/qt-console/images/status.png | Bin 0 -> 952 bytes src/qt-console/images/status.svg | 380 + src/qt-console/images/system-file-manager.png | Bin 0 -> 540 bytes src/qt-console/images/system-file-manager.svg | 318 + src/qt-console/images/unchecked.png | Bin 0 -> 461 bytes src/qt-console/images/unchecked.svg | 97 + src/qt-console/images/undo.png | Bin 0 -> 1768 bytes src/qt-console/images/unmark.png | Bin 0 -> 387 bytes src/qt-console/images/up.png | Bin 0 -> 1579 bytes src/qt-console/images/utilities-terminal.png | Bin 0 -> 731 bytes src/qt-console/images/utilities-terminal.svg | 498 + src/qt-console/images/view-refresh.png | Bin 0 -> 3548 bytes src/qt-console/images/view-refresh.svg | 391 + src/qt-console/images/weather-severe-alert.png | Bin 0 -> 783 bytes src/qt-console/images/weather-severe-alert.svg | 4699 +++ src/qt-console/images/zoom.png | Bin 0 -> 1099 bytes src/qt-console/install_conf_file.in | 16 + src/qt-console/job/job.cpp | 494 + src/qt-console/job/job.h | 60 + src/qt-console/job/job.ui | 776 + src/qt-console/jobgraphs/jobplot.cpp | 571 + src/qt-console/jobgraphs/jobplot.h | 145 + src/qt-console/jobgraphs/jobplotcontrols.ui | 337 + src/qt-console/joblist/joblist.cpp | 728 + src/qt-console/joblist/joblist.h | 101 + src/qt-console/joblist/joblist.ui | 486 + src/qt-console/joblog/joblog.cpp | 157 + src/qt-console/joblog/joblog.h | 51 + src/qt-console/joblog/joblog.ui | 85 + src/qt-console/jobs/jobs.cpp | 264 + src/qt-console/jobs/jobs.h | 66 + src/qt-console/jobs/jobs.ui | 157 + src/qt-console/label/label.cpp | 127 + src/qt-console/label/label.h | 54 + src/qt-console/label/label.ui | 303 + src/qt-console/main.cpp | 269 + src/qt-console/main.qrc | 77 + src/qt-console/main.ui | 581 + src/qt-console/mainwin.cpp | 1027 + src/qt-console/mainwin.h | 171 + src/qt-console/make-win32 | 77 + src/qt-console/mediaedit/mediaedit.cpp | 420 + src/qt-console/mediaedit/mediaedit.h | 68 + src/qt-console/mediaedit/mediaedit.ui | 585 + src/qt-console/mediainfo/mediainfo.cpp | 257 + src/qt-console/mediainfo/mediainfo.h | 54 + src/qt-console/mediainfo/mediainfo.ui | 701 + src/qt-console/medialist/medialist.cpp | 480 + src/qt-console/medialist/medialist.h | 70 + src/qt-console/medialist/medialist.ui | 132 + src/qt-console/medialist/mediaview.cpp | 458 + src/qt-console/medialist/mediaview.h | 63 + src/qt-console/medialist/mediaview.ui | 289 + src/qt-console/mount/mount.cpp | 80 + src/qt-console/mount/mount.h | 51 + src/qt-console/mount/mount.ui | 199 + src/qt-console/pages.cpp | 445 + src/qt-console/pages.h | 106 + src/qt-console/prefs.ui | 733 + src/qt-console/qstd.cpp | 104 + src/qt-console/qstd.h | 79 + src/qt-console/qwtconfig.pri | 88 + src/qt-console/relabel/relabel.cpp | 116 + src/qt-console/relabel/relabel.h | 54 + src/qt-console/relabel/relabel.ui | 245 + src/qt-console/restore/brestore.cpp | 714 + src/qt-console/restore/brestore.ui | 666 + src/qt-console/restore/prerestore.cpp | 389 + src/qt-console/restore/prerestore.ui | 414 + src/qt-console/restore/restore.cpp | 498 + src/qt-console/restore/restore.h | 184 + src/qt-console/restore/restore.ui | 400 + src/qt-console/restore/restoretree.cpp | 1765 + src/qt-console/restore/restoretree.h | 115 + src/qt-console/restore/restoretree.ui | 428 + src/qt-console/restore/runrestore.ui | 366 + src/qt-console/run/estimate.cpp | 114 + src/qt-console/run/estimate.ui | 281 + src/qt-console/run/prune.cpp | 134 + src/qt-console/run/prune.ui | 302 + src/qt-console/run/run.cpp | 186 + src/qt-console/run/run.h | 110 + src/qt-console/run/run.ui | 357 + src/qt-console/run/runadmin.ui | 405 + src/qt-console/run/runbackup.ui | 405 + src/qt-console/run/runcmd.cpp | 171 + src/qt-console/run/runcmd.ui | 405 + src/qt-console/run/runcopy.ui | 405 + src/qt-console/run/runmigration.ui | 405 + src/qt-console/run/runrestore.ui | 405 + src/qt-console/select/select.cpp | 118 + src/qt-console/select/select.h | 59 + src/qt-console/select/select.ui | 82 + src/qt-console/select/textinput.cpp | 67 + src/qt-console/select/textinput.h | 47 + src/qt-console/select/textinput.ui | 143 + src/qt-console/status/clientstat.cpp | 280 + src/qt-console/status/clientstat.h | 66 + src/qt-console/status/clientstat.ui | 215 + src/qt-console/status/dirstat.cpp | 391 + src/qt-console/status/dirstat.h | 67 + src/qt-console/status/dirstat.ui | 296 + src/qt-console/status/storstat.cpp | 443 + src/qt-console/status/storstat.h | 75 + src/qt-console/status/storstat.ui | 250 + src/qt-console/storage/content.cpp | 345 + src/qt-console/storage/content.h | 62 + src/qt-console/storage/content.ui | 295 + src/qt-console/storage/storage.cpp | 474 + src/qt-console/storage/storage.h | 74 + src/qt-console/storage/storage.ui | 142 + src/qt-console/testprogs/examp/dock.pro | 15 + src/qt-console/testprogs/examp/dockwidgets.qrc | 8 + src/qt-console/testprogs/examp/main.cpp | 12 + src/qt-console/testprogs/examp/mainwindow.cpp | 302 + src/qt-console/testprogs/examp/mainwindow.h | 76 + src/qt-console/testprogs/putz/main.cpp | 16 + src/qt-console/testprogs/putz/putz.cpp | 11 + src/qt-console/testprogs/putz/putz.h | 23 + src/qt-console/testprogs/putz/putz.pro | 15 + src/qt-console/testprogs/putz/putz.ui | 83 + src/qt-console/tray-monitor/authenticate.cpp | 145 + .../tray-monitor/bacula-tray-monitor.conf.in | 32 + .../tray-monitor/clientselectwizardpage.cpp | 57 + .../tray-monitor/clientselectwizardpage.h | 53 + .../tray-monitor/clientselectwizardpage.ui | 68 + src/qt-console/tray-monitor/common.h | 71 + src/qt-console/tray-monitor/conf.cpp | 412 + src/qt-console/tray-monitor/conf.h | 85 + src/qt-console/tray-monitor/dir-monitor.ui | 176 + src/qt-console/tray-monitor/dirstatus.cpp | 127 + src/qt-console/tray-monitor/dirstatus.h | 42 + src/qt-console/tray-monitor/fd-monitor.ui | 176 + src/qt-console/tray-monitor/fdstatus.cpp | 125 + src/qt-console/tray-monitor/fdstatus.h | 42 + .../tray-monitor/fileselectwizardpage.cpp | 302 + src/qt-console/tray-monitor/fileselectwizardpage.h | 112 + .../tray-monitor/fileselectwizardpage.ui | 139 + src/qt-console/tray-monitor/filesmodel.h | 220 + src/qt-console/tray-monitor/install_conf_file.in | 19 + .../tray-monitor/jobselectwizardpage.cpp | 103 + src/qt-console/tray-monitor/jobselectwizardpage.h | 69 + src/qt-console/tray-monitor/jobselectwizardpage.ui | 52 + src/qt-console/tray-monitor/jobsmodel.cpp | 98 + src/qt-console/tray-monitor/jobsmodel.h | 59 + src/qt-console/tray-monitor/main-conf.ui | 350 + src/qt-console/tray-monitor/pluginmodel.h | 28 + src/qt-console/tray-monitor/pluginwizardpage.cpp | 177 + src/qt-console/tray-monitor/pluginwizardpage.h | 60 + src/qt-console/tray-monitor/pluginwizardpage.ui | 35 + src/qt-console/tray-monitor/res-conf.ui | 565 + .../tray-monitor/restoreoptionswizardpage.cpp | 111 + .../tray-monitor/restoreoptionswizardpage.h | 57 + .../tray-monitor/restoreoptionswizardpage.ui | 82 + src/qt-console/tray-monitor/restorewizard.cpp | 56 + src/qt-console/tray-monitor/restorewizard.h | 59 + src/qt-console/tray-monitor/restorewizard.ui | 123 + src/qt-console/tray-monitor/run.ui | 379 + src/qt-console/tray-monitor/runjob.cpp | 509 + src/qt-console/tray-monitor/runjob.h | 119 + src/qt-console/tray-monitor/sd-monitor.ui | 162 + src/qt-console/tray-monitor/sdstatus.cpp | 125 + src/qt-console/tray-monitor/sdstatus.h | 42 + src/qt-console/tray-monitor/status.cpp | 42 + src/qt-console/tray-monitor/status.h | 44 + src/qt-console/tray-monitor/task.cpp | 1619 + src/qt-console/tray-monitor/task.h | 158 + src/qt-console/tray-monitor/tray-monitor.conf.in | 32 + src/qt-console/tray-monitor/tray-monitor.cpp | 320 + src/qt-console/tray-monitor/tray-monitor.h | 31 + src/qt-console/tray-monitor/tray-monitor.pro.in | 58 + .../tray-monitor/tray-monitor.pro.mingw32.in | 67 + .../tray-monitor/tray-monitor.pro.mingw64.in | 71 + src/qt-console/tray-monitor/tray-ui.h | 440 + src/qt-console/tray-monitor/tray_conf.cpp | 393 + src/qt-console/tray-monitor/tray_conf.h | 188 + src/qt-console/tray-monitor/ts/tm_de.ts | 720 + src/qt-console/tray-monitor/ts/tm_fr.ts | 720 + src/qt-console/tray-monitor/ts/tm_ja.ts | 720 + src/qt-console/tray-monitor/win32/qmake.conf | 93 + src/qt-console/tray-monitor/win32/qplatformdefs.h | 161 + src/qt-console/ts/bat_de.ts | 4870 +++ src/qt-console/ts/bat_fr.ts | 5015 +++ src/qt-console/util/comboutil.cpp | 143 + src/qt-console/util/comboutil.h | 55 + src/qt-console/util/fmtwidgetitem.cpp | 551 + src/qt-console/util/fmtwidgetitem.h | 217 + src/qt-console/win32/qmake.conf | 93 + src/qt-console/win32/qplatformdefs.h | 161 + src/stored/Makefile.in | 325 + src/stored/acquire.c | 799 + src/stored/aligned_dev.c | 21 + src/stored/aligned_dev.h | 141 + src/stored/aligned_read.c | 25 + src/stored/aligned_write.c | 26 + src/stored/ansi_label.c | 449 + src/stored/append.c | 391 + src/stored/askdir.c | 985 + src/stored/authenticate.c | 346 + src/stored/autochanger.c | 808 + src/stored/bacula-sd.conf.in | 334 + src/stored/bcopy.c | 348 + src/stored/bextract.c | 670 + src/stored/block.c | 751 + src/stored/block.h | 169 + src/stored/block_util.c | 833 + src/stored/bls.c | 484 + src/stored/bscan.c | 1369 + src/stored/bsdjson.c | 684 + src/stored/bsr.h | 173 + src/stored/btape.c | 3132 ++ src/stored/butil.c | 317 + src/stored/cloud_dev.c | 2301 ++ src/stored/cloud_dev.h | 119 + src/stored/cloud_driver.h | 64 + src/stored/cloud_parts.c | 607 + src/stored/cloud_parts.h | 122 + src/stored/cloud_test.c | 256 + src/stored/cloud_transfer_mgr.c | 641 + src/stored/cloud_transfer_mgr.h | 296 + src/stored/dev.c | 1055 + src/stored/dev.h | 836 + src/stored/device.c | 328 + src/stored/dircmd.c | 1986 ++ src/stored/ebcdic.c | 182 + src/stored/fd_cmds.c | 501 + src/stored/fifo_dev.c | 51 + src/stored/fifo_dev.h | 39 + src/stored/file_dev.c | 541 + src/stored/file_dev.h | 37 + src/stored/file_driver.c | 501 + src/stored/file_driver.h | 72 + src/stored/global.c | 26 + src/stored/hello.c | 351 + src/stored/init_dev.c | 474 + src/stored/job.c | 412 + src/stored/label.c | 1397 + src/stored/lock.c | 536 + src/stored/lock.h | 123 + src/stored/match_bsr.c | 840 + src/stored/mount.c | 880 + src/stored/null_dev.c | 33 + src/stored/null_dev.h | 34 + src/stored/os.c | 421 + src/stored/parse_bsr.c | 1071 + src/stored/protos.h | 298 + src/stored/read.c | 288 + src/stored/read_records.c | 518 + src/stored/record.h | 258 + src/stored/record_read.c | 323 + src/stored/record_util.c | 335 + src/stored/record_write.c | 423 + src/stored/reserve.c | 1312 + src/stored/reserve.h | 63 + src/stored/s3_driver.c | 806 + src/stored/s3_driver.h | 67 + src/stored/scan.c | 159 + src/stored/sd_plugins.c | 489 + src/stored/sd_plugins.h | 190 + src/stored/spool.c | 782 + src/stored/status.c | 1159 + src/stored/stored.c | 808 + src/stored/stored.conf.in | 59 + src/stored/stored.h | 103 + src/stored/stored_conf.c | 955 + src/stored/stored_conf.h | 228 + src/stored/tape_alert.c | 230 + src/stored/tape_alert_msgs.h | 166 + src/stored/tape_dev.c | 1121 + src/stored/tape_dev.h | 72 + src/stored/tape_worm.c | 93 + src/stored/vbackup.c | 290 + src/stored/vol_mgr.c | 936 + src/stored/vol_mgr.h | 91 + src/stored/vtape_dev.c | 1014 + src/stored/vtape_dev.h | 126 + src/stored/wait.c | 351 + src/stored/win_file_dev.h | 31 + src/stored/win_tape_dev.h | 48 + src/streams.h | 156 + src/tools/Makefile.in | 212 + src/tools/bbatch.c | 363 + src/tools/bpluginfo.c | 632 + src/tools/bregex.c | 166 + src/tools/bregtest.c | 155 + src/tools/bsmtp.c | 679 + src/tools/bsnapshot.c | 1976 ++ src/tools/bvfs_test.c | 334 + src/tools/bwild.c | 132 + src/tools/cats_test.c | 702 + src/tools/dbcheck.c | 1501 + src/tools/drivetype.c | 122 + src/tools/fstype.c | 172 + src/tools/gigaslam.c | 50 + src/tools/grow.c | 58 + src/tools/testfind.c | 644 + src/tools/testls.c | 278 + src/tools/timelimit.1 | 217 + src/tools/timelimit.c | 543 + src/version.h | 193 + src/win32/External-mingw-w64 | 74 + src/win32/External-mingw32 | 70 + src/win32/External-msvc | 59 + src/win32/Makefile | 168 + src/win32/Makefile.full | 113 + src/win32/Makefile.inc.in | 161 + src/win32/Makefile.rules | 79 + src/win32/README.mingw | 376 + src/win32/README.vc8 | 246 + src/win32/bacula.sln | 403 + src/win32/bacula/bacula.vcproj | 225 + src/win32/build-depkgs-mingw-w64 | 451 + src/win32/build-depkgs-mingw32 | 659 + src/win32/cats/Makefile | 110 + src/win32/cats/bacula_cats.def | 273 + src/win32/cats/bacula_cats/bacula_cats.vcproj | 94 + src/win32/cats/cats_mysql/cats_mysql.vcproj | 428 + .../cats/cats_postgresql/cats_postgresql.vcproj | 432 + src/win32/cats/cats_sqlite3/cats_sqlite3.vcproj | 421 + src/win32/cats/create_mysql_database.cmd | 14 + src/win32/cats/create_postgresql_database.cmd | 23 + src/win32/cats/create_postgresql_database.sql | 2 + src/win32/cats/create_sqlite3_database.cmd | 6 + src/win32/cats/delete_catalog_backup.cmd | 5 + src/win32/cats/drop_mysql_database.cmd | 14 + src/win32/cats/drop_mysql_tables.cmd | 14 + src/win32/cats/drop_mysql_tables.sql | 26 + src/win32/cats/drop_postgresql_database.cmd | 14 + src/win32/cats/drop_postgresql_tables.cmd | 14 + src/win32/cats/drop_postgresql_tables.sql | 22 + src/win32/cats/drop_sqlite3_database.cmd | 7 + src/win32/cats/drop_sqlite3_tables.cmd | 8 + src/win32/cats/grant_mysql_privileges.cmd | 14 + src/win32/cats/grant_mysql_privileges.sql | 5 + src/win32/cats/grant_postgresql_privileges.cmd | 15 + src/win32/cats/grant_postgresql_privileges.sql | 37 + src/win32/cats/grant_sqlite3_privileges.cmd | 7 + src/win32/cats/make_def | 72 + src/win32/cats/make_mysql_catalog_backup.cmd | 41 + src/win32/cats/make_mysql_tables.cmd | 14 + src/win32/cats/make_mysql_tables.sql | 357 + src/win32/cats/make_postgresql_catalog_backup.cmd | 41 + src/win32/cats/make_postgresql_tables.cmd | 14 + src/win32/cats/make_postgresql_tables.sql | 380 + src/win32/cats/make_sqlite3_catalog_backup.cmd | 38 + src/win32/cats/make_sqlite3_tables.cmd | 7 + src/win32/cats/make_sqlite3_tables.sql | 384 + src/win32/compat/Makefile | 66 + src/win32/compat/alloca.h | 0 src/win32/compat/arpa/inet.h | 0 src/win32/compat/compat.cpp | 2958 ++ src/win32/compat/compat.h | 469 + src/win32/compat/dirent.h | 0 src/win32/compat/dlfcn.h | 33 + src/win32/compat/getopt.c | 187 + src/win32/compat/getopt.h | 41 + src/win32/compat/grp.h | 0 src/win32/compat/mingwconfig.h | 439 + src/win32/compat/ms_atl.h | 53 + src/win32/compat/mswinver.h | 35 + src/win32/compat/netdb.h | 0 src/win32/compat/netinet/in.h | 0 src/win32/compat/netinet/tcp.h | 0 src/win32/compat/print.cpp | 774 + src/win32/compat/pwd.h | 0 src/win32/compat/stdint.h | 3 + src/win32/compat/strings.h | 0 src/win32/compat/sys/file.h | 2 + src/win32/compat/sys/ioctl.h | 1 + src/win32/compat/sys/mtio.h | 277 + src/win32/compat/sys/socket.h | 0 src/win32/compat/sys/stat.h | 1 + src/win32/compat/sys/time.h | 0 src/win32/compat/sys/wait.h | 0 src/win32/compat/syslog.h | 32 + src/win32/compat/unistd.h | 3 + src/win32/compat/winapi.c | 349 + src/win32/compat/winapi.h | 242 + src/win32/compat/winhdrs.h | 14 + src/win32/compat/winhost.h | 51 + src/win32/compat/winsock.h | 6 + src/win32/console/Makefile | 59 + src/win32/console/console.vcproj | 293 + src/win32/cygwin.NET.bashrc | 63 + src/win32/dird/Makefile | 117 + src/win32/dird/bacula.rc | 1 + src/win32/dird/dird.vcproj | 1118 + src/win32/dird/main.cpp | 36 + src/win32/dird/service.cpp | 36 + src/win32/dird/who.h | 33 + src/win32/filed/Makefile | 123 + src/win32/filed/bacula-fd.manifest | 21 + src/win32/filed/bacula.rc | 1 + src/win32/filed/baculafd.vcproj | 610 + src/win32/filed/main.cpp | 24 + src/win32/filed/plugins/Makefile | 124 + src/win32/filed/plugins/alldrives-fd.c | 437 + src/win32/filed/plugins/alldrives-fd.def | 13 + src/win32/filed/plugins/api.c | 137 + src/win32/filed/plugins/api.h | 302 + src/win32/filed/plugins/bpipe-fd.c | 557 + src/win32/filed/plugins/bpipe-fd.def | 15 + src/win32/filed/plugins/comadmin.h | 878 + src/win32/filed/plugins/dbi_node.c | 283 + src/win32/filed/plugins/exch_api.c | 139 + src/win32/filed/plugins/exch_api.h | 293 + src/win32/filed/plugins/exch_dbi_node.c | 280 + src/win32/filed/plugins/exch_file_node.c | 229 + src/win32/filed/plugins/exch_node.c | 120 + src/win32/filed/plugins/exch_node.h | 177 + src/win32/filed/plugins/exch_root_node.c | 155 + src/win32/filed/plugins/exch_service_node.c | 223 + src/win32/filed/plugins/exch_storage_group_node.c | 471 + src/win32/filed/plugins/exch_store_node.c | 237 + src/win32/filed/plugins/exchange-fd.c | 528 + src/win32/filed/plugins/exchange-fd.def | 15 + src/win32/filed/plugins/exchange-fd.h | 157 + src/win32/filed/plugins/file_node.c | 227 + src/win32/filed/plugins/node.c | 118 + src/win32/filed/plugins/node.h | 177 + src/win32/filed/plugins/root_node.c | 153 + src/win32/filed/plugins/service_node.c | 220 + src/win32/filed/plugins/storage_group_node.c | 502 + src/win32/filed/plugins/store_node.c | 240 + src/win32/filed/service.cpp | 25 + src/win32/filed/trayMonitor.cpp | 25 + src/win32/filed/vss.cpp | 589 + src/win32/filed/vss.h | 344 + src/win32/filed/vss_Vista.cpp | 4 + src/win32/filed/vss_W2K3.cpp | 13 + src/win32/filed/vss_XP.cpp | 4 + src/win32/filed/vss_generic.cpp | 847 + src/win32/filed/who.h | 33 + src/win32/full_win32_installer/ConfigPage1.nsh | 294 + src/win32/full_win32_installer/ConfigPage2.nsh | 454 + src/win32/full_win32_installer/DumpLog.nsh | 46 + src/win32/full_win32_installer/InstallType.ini | 56 + src/win32/full_win32_installer/InstallType.nsh | 102 + src/win32/full_win32_installer/Start.bat | 5 + src/win32/full_win32_installer/Stop.bat | 5 + src/win32/full_win32_installer/WriteTemplates.ini | 48 + src/win32/full_win32_installer/bacula-dir.conf.in | 380 + src/win32/full_win32_installer/bacula-fd.conf.in | 44 + src/win32/full_win32_installer/bacula-logo.bmp | Bin 0 -> 26046 bytes src/win32/full_win32_installer/bacula-sd.conf.in | 115 + src/win32/full_win32_installer/bat.conf.in | 10 + src/win32/full_win32_installer/bconsole.conf.in | 10 + src/win32/full_win32_installer/build-installer.cmd | 70 + src/win32/full_win32_installer/bwx-console.conf.in | 10 + src/win32/full_win32_installer/client.conf.in | 11 + src/win32/full_win32_installer/installer.vcproj | 154 + src/win32/full_win32_installer/storage.conf.in | 10 + src/win32/full_win32_installer/winbacula.nsi | 1470 + src/win32/lib/Makefile | 182 + src/win32/lib/bacula32.def | 1063 + src/win32/lib/bacula64.def | 1013 + src/win32/lib/make_def32 | 34 + src/win32/lib/make_def64 | 34 + src/win32/libbac/Makefile | 112 + src/win32/libbac/libbac.vcproj | 1733 + src/win32/libbac/msvc/bacula.def | 510 + src/win32/libwin32/aboutDialog.cpp | 71 + src/win32/libwin32/aboutDialog.h | 39 + src/win32/libwin32/bacula.bmp | Bin 0 -> 3126 bytes src/win32/libwin32/bacula.ico | Bin 0 -> 766 bytes src/win32/libwin32/bacula.rc | 171 + src/win32/libwin32/error.ico | Bin 0 -> 766 bytes src/win32/libwin32/idle.ico | Bin 0 -> 766 bytes src/win32/libwin32/main.cpp | 666 + src/win32/libwin32/protos.h | 50 + src/win32/libwin32/res.h | 41 + src/win32/libwin32/running.ico | Bin 0 -> 766 bytes src/win32/libwin32/saving.ico | Bin 0 -> 766 bytes src/win32/libwin32/service.cpp | 586 + src/win32/libwin32/statusDialog.cpp | 165 + src/win32/libwin32/statusDialog.h | 45 + src/win32/libwin32/trayMonitor.cpp | 263 + src/win32/libwin32/trayMonitor.h | 59 + src/win32/libwin32/warn.ico | Bin 0 -> 766 bytes src/win32/libwin32/win32.h | 31 + src/win32/makeall | 7 + src/win32/patches/binutils_texinfo_version.patch | 31 + src/win32/patches/dvd+rw-tools.patch | 264 + src/win32/patches/mingw-utils.patch | 6670 ++++ src/win32/patches/mt.patch | 1689 + src/win32/patches/mtx-msvc1.patch | 75 + src/win32/patches/mtx-msvc2.patch | 40 + src/win32/patches/mtx.patch | 1576 + src/win32/patches/nsis.patch | 428 + src/win32/patches/openssl-w64.patch | 2663 ++ src/win32/patches/openssl.patch | 309 + src/win32/patches/pcre.patch | 130 + src/win32/patches/postgresql.patch | 123 + src/win32/patches/pthreads-w64.patch | 970 + src/win32/patches/pthreads.patch | 95 + src/win32/patches/qt4-compilation-see.patch | 21 + src/win32/patches/qt4-compilation.patch | 45 + src/win32/patches/qt4-intrinsics.patch | 13 + src/win32/patches/qt4-widget-ui.patch | 12 + src/win32/patches/sed.patch | 15 + src/win32/patches/sed_msc.patch | 927 + src/win32/patches/sqlite.patch | 162 + src/win32/patches/sqlite_msc.patch | 768 + src/win32/patches/stab2cv.patch | 11 + src/win32/patches/wx.sed | 9 + src/win32/patches/wx1.patch | 37 + src/win32/patches/wx2.patch | 22 + src/win32/patches/wxWidgets.patch | 67 + src/win32/patches/zlib.patch | 150 + src/win32/pebuilder/Makefile.in | 55 + src/win32/pebuilder/README | 13 + src/win32/pebuilder/bacula/bacula.inf | 37 + src/win32/pebuilder/bacula/bacula_nu2menu.xml | 19 + src/win32/scripts/Makefile | 39 + src/win32/scripts/bsleep.c | 23 + src/win32/scripts/bsleep.vcproj | 199 + src/win32/scripts/disk-changer.cmd | 201 + src/win32/scripts/dvd-handler.cmd | 387 + src/win32/scripts/mtx-changer.cmd | 189 + src/win32/stored/Makefile | 182 + src/win32/stored/bacula.rc | 1 + src/win32/stored/baculasd.vcproj | 396 + src/win32/stored/bcopy/bcopy.vcproj | 230 + src/win32/stored/bextract/bextract.vcproj | 229 + src/win32/stored/bls/bls.vcproj | 229 + src/win32/stored/bscan/bscan.vcproj | 229 + src/win32/stored/btape/btape.vcproj | 228 + src/win32/stored/main.cpp | 24 + src/win32/stored/mtops.cpp | 1175 + src/win32/stored/postest/postest.cpp | 299 + src/win32/stored/postest/postest.vcproj | 213 + src/win32/stored/service.cpp | 25 + src/win32/stored/storelib/storelib.vcproj | 827 + src/win32/stored/trayMonitor.cpp | 25 + src/win32/stored/who.h | 31 + src/win32/stored/win_tape_device.cpp | 1142 + src/win32/tools/Makefile | 107 + src/win32/tools/ScsiDeviceList.cpp | 363 + src/win32/tools/ScsiDeviceList.h | 169 + src/win32/tools/bsmtp/bsmtp.vcproj | 229 + src/win32/tools/dbcheck/dbcheck.vcproj | 293 + src/win32/tools/drivetype/drivetype.vcproj | 228 + src/win32/tools/fstype/fstype.vcproj | 230 + src/win32/tools/scsilist.cpp | 119 + src/win32/tools/scsilist/scsilist.vcproj | 216 + src/win32/tools/testfind/testfind.vcproj | 289 + src/win32/tools/testls/testls.vcproj | 229 + src/win32/win32_installer/ConfigPage1.nsh | 294 + src/win32/win32_installer/ConfigPage2.nsh | 454 + src/win32/win32_installer/DumpLog.nsh | 48 + src/win32/win32_installer/InstallType.ini | 56 + src/win32/win32_installer/InstallType.nsh | 93 + src/win32/win32_installer/Makefile | 178 + src/win32/win32_installer/Readme.txt | 47 + src/win32/win32_installer/Start.bat | 5 + src/win32/win32_installer/Stop.bat | 5 + src/win32/win32_installer/WriteTemplates.ini | 30 + src/win32/win32_installer/bacula-dir.conf.in | 383 + src/win32/win32_installer/bacula-fd.conf.in | 47 + src/win32/win32_installer/bacula-logo.bmp | Bin 0 -> 26046 bytes src/win32/win32_installer/bacula-sd.conf.in | 118 + src/win32/win32_installer/bat.conf.in | 13 + src/win32/win32_installer/bconsole.conf.in | 13 + src/win32/win32_installer/bs-logo.bmp | Bin 0 -> 33654 bytes src/win32/win32_installer/build-installer.cmd | 70 + src/win32/win32_installer/bwx-console.conf.in | 13 + src/win32/win32_installer/client.conf.in | 15 + src/win32/win32_installer/installer.vcproj | 154 + src/win32/win32_installer/storage.conf.in | 14 + src/win32/win32_installer/tray-monitor.conf.in | 30 + src/win32/win32_installer/winbacula.nsi | 1241 + src/win32/win32_installer/x64.nsh | 54 + src/win32/win64_installer/ConfigPage1.nsh | 294 + src/win32/win64_installer/ConfigPage2.nsh | 455 + src/win32/win64_installer/DumpLog.nsh | 48 + src/win32/win64_installer/InstallType.ini | 56 + src/win32/win64_installer/InstallType.nsh | 99 + src/win32/win64_installer/Makefile | 207 + src/win32/win64_installer/Readme.txt | 50 + src/win32/win64_installer/Start.bat | 5 + src/win32/win64_installer/Stop.bat | 5 + src/win32/win64_installer/WriteTemplates.ini | 30 + src/win32/win64_installer/bacula-dir.conf.in | 383 + src/win32/win64_installer/bacula-fd.conf.in | 47 + src/win32/win64_installer/bacula-logo.bmp | Bin 0 -> 26046 bytes src/win32/win64_installer/bacula-sd.conf.in | 118 + .../win64_installer/bacula-tray-monitor.conf.in | 34 + src/win32/win64_installer/bat.conf.in | 14 + src/win32/win64_installer/bconsole.conf.in | 14 + src/win32/win64_installer/bs-logo.bmp | Bin 0 -> 33654 bytes src/win32/win64_installer/bwx-console.conf.in | 14 + src/win32/win64_installer/client.conf.in | 15 + src/win32/win64_installer/installer.vcproj | 154 + src/win32/win64_installer/storage.conf.in | 14 + src/win32/win64_installer/tray-monitor.conf.in | 34 + src/win32/win64_installer/winbacula.nsi | 1223 + src/win32/winapi.h | 206 + src/win32/wx-console/Makefile | 87 + src/win32/wx-console/bwx-console.manifest | 21 + src/win32/wx-console/w32api.h | 11 + src/win32/wx-console/wx-console.vcproj | 352 + updatedb/README | 22 + updatedb/update_bacula_tables.in | 50 + updatedb/update_bacula_tables_8_to_9 | 17 + updatedb/update_mysql_tables.in | 190 + updatedb/update_mysql_tables_10_to_11.in | 69 + updatedb/update_mysql_tables_11_to_12.in | 79 + updatedb/update_mysql_tables_4_to_5 | 48 + updatedb/update_mysql_tables_5_to_6 | 97 + updatedb/update_mysql_tables_6_to_7 | 79 + updatedb/update_mysql_tables_7_to_8 | 50 + updatedb/update_mysql_tables_8_to_9 | 99 + updatedb/update_mysql_tables_9_to_10.in | 81 + updatedb/update_postgresql_tables.in | 175 + updatedb/update_postgresql_tables_10_to_11.in | 45 + updatedb/update_postgresql_tables_11_to_12.in | 100 + updatedb/update_postgresql_tables_7_to_8 | 72 + updatedb/update_postgresql_tables_8_to_9 | 133 + updatedb/update_postgresql_tables_9_to_10.in | 88 + updatedb/update_sqlite3_tables.in | 266 + updatedb/update_sqlite3_tables_10_to_11.in | 230 + updatedb/update_sqlite3_tables_11_to_12.in | 78 + updatedb/update_sqlite3_tables_8_to_9 | 338 + updatedb/update_sqlite3_tables_9_to_10.in | 255 + updatedb/update_sqlite_tables_10_to_11.in | 229 + updatedb/update_sqlite_tables_4_to_5 | 145 + updatedb/update_sqlite_tables_5_to_6 | 175 + updatedb/update_sqlite_tables_6_to_7 | 229 + updatedb/update_sqlite_tables_7_to_8 | 149 + updatedb/update_sqlite_tables_8_to_9 | 338 + updatedb/update_sqlite_tables_9_to_10.in | 256 + 1475 files changed, 580993 insertions(+) create mode 100644 .gitignore create mode 100644 AUTHORS create mode 100644 COPYING create mode 100644 COPYRIGHT create mode 100644 ChangeLog create mode 100644 INSTALL create mode 100644 LICENSE create mode 100644 LICENSE-FAQ create mode 100644 LICENSE-FOSS create mode 100755 Makefile.in create mode 100644 README create mode 100644 README.AIX create mode 100644 ReleaseNotes create mode 100644 SUPPORT create mode 100644 VERIFYING create mode 100644 autoconf/Make.common.in create mode 100644 autoconf/acconfig.h create mode 100644 autoconf/aclocal.m4 create mode 100644 autoconf/aclocal.m4.save create mode 100644 autoconf/bacula-macros/compare-version.m4 create mode 100644 autoconf/bacula-macros/db.m4 create mode 100644 autoconf/bacula-macros/largefiles.m4 create mode 100644 autoconf/bacula-macros/os.m4 create mode 100644 autoconf/confdefs.h create mode 100755 autoconf/config.guess create mode 100644 autoconf/config.h.in create mode 100644 autoconf/config.log create mode 100755 autoconf/config.rpath create mode 100755 autoconf/config.sub create mode 100644 autoconf/configure.in create mode 100644 autoconf/gettext-macros/codeset.m4 create mode 100644 autoconf/gettext-macros/gettext.m4 create mode 100644 autoconf/gettext-macros/glibc2.m4 create mode 100644 autoconf/gettext-macros/glibc21.m4 create mode 100644 autoconf/gettext-macros/iconv.m4 create mode 100644 autoconf/gettext-macros/intdiv0.m4 create mode 100644 autoconf/gettext-macros/intmax.m4 create mode 100644 autoconf/gettext-macros/inttypes-pri.m4 create mode 100644 autoconf/gettext-macros/inttypes.m4 create mode 100644 autoconf/gettext-macros/inttypes_h.m4 create mode 100644 autoconf/gettext-macros/isc-posix.m4 create mode 100644 autoconf/gettext-macros/lcmessage.m4 create mode 100644 autoconf/gettext-macros/lib-ld.m4 create mode 100644 autoconf/gettext-macros/lib-link.m4 create mode 100644 autoconf/gettext-macros/lib-prefix.m4 create mode 100644 autoconf/gettext-macros/longdouble.m4 create mode 100644 autoconf/gettext-macros/longlong.m4 create mode 100644 autoconf/gettext-macros/nls.m4 create mode 100644 autoconf/gettext-macros/po.m4 create mode 100644 autoconf/gettext-macros/printf-posix.m4 create mode 100644 autoconf/gettext-macros/progtest.m4 create mode 100644 autoconf/gettext-macros/signed.m4 create mode 100644 autoconf/gettext-macros/size_max.m4 create mode 100644 autoconf/gettext-macros/stdint_h.m4 create mode 100644 autoconf/gettext-macros/uintmax_t.m4 create mode 100644 autoconf/gettext-macros/ulonglong.m4 create mode 100644 autoconf/gettext-macros/wchar_t.m4 create mode 100644 autoconf/gettext-macros/wint_t.m4 create mode 100644 autoconf/gettext-macros/xsize.m4 create mode 100755 autoconf/install-sh create mode 100755 autoconf/install.sh create mode 100644 autoconf/libtool/libtool.m4 create mode 100644 autoconf/libtool/ltdl.m4 create mode 100644 autoconf/libtool/ltoptions.m4 create mode 100644 autoconf/libtool/ltsugar.m4 create mode 100644 autoconf/libtool/ltversion.m4 create mode 100644 autoconf/libtool/lt~obsolete.m4 create mode 100644 autoconf/ltmain.sh create mode 100755 autoconf/mkinstalldirs create mode 100644 autoconf/python.conf.py create mode 100755 autoconf/randpass create mode 100644 autoconf/randpass.awk create mode 100644 autoconf/randpass.bc create mode 100644 autoconf/who_and_what_of_libtool_patch create mode 100755 configure create mode 100644 examples/AFS-README create mode 100644 examples/Write-DVD-with-UDF.txt create mode 100755 examples/afs-bacula create mode 100644 examples/autochangers/Sun-desktop-20Gb-4mm-autoloader create mode 100755 examples/autochangers/ak-mtx-changer.txt create mode 100644 examples/autochangers/bacula-barcodes create mode 100755 examples/autochangers/chio-bacula create mode 100755 examples/autochangers/chio-changer create mode 100755 examples/autochangers/chio-changer-freebsd create mode 100755 examples/autochangers/chio-changer-openbsd create mode 100755 examples/autochangers/chio-changer.Sony-TSL-SA300C create mode 100755 examples/autochangers/locking-mtx-changer create mode 100644 examples/autochangers/mtx-changer.Adic-Scalar-100 create mode 100755 examples/autochangers/mtx-changer.Adic-Scalar-24 create mode 100755 examples/autochangers/mtx-changer.Sony-TSL-SA300C create mode 100755 examples/autochangers/mtx-changer.StorageTek-HPA4853 create mode 100644 examples/autochangers/multiple-drive-changer.txt create mode 100755 examples/autochangers/solaris-mtx-changer create mode 100644 examples/backup-acls.txt create mode 100644 examples/backup-every-other-week.txt create mode 100644 examples/backup-to-cdwriter.txt create mode 100755 examples/client-backup create mode 100644 examples/conf/bacula-defs.m4 create mode 100644 examples/conf/console.conf create mode 100755 examples/conf/defaultconfig create mode 100755 examples/conf/fileset_convert.pl create mode 100755 examples/conf/kernsconfig create mode 100644 examples/conf/m4.Makefile create mode 100644 examples/conf/m4.additions create mode 100644 examples/conf/m4.bacula-dir.conf create mode 100644 examples/conf/m4.bacula-fd.conf create mode 100644 examples/conf/m4.bacula-sd.conf create mode 100644 examples/conf/m4.email create mode 100644 examples/conf/many-clients.txt create mode 100755 examples/database/bacula-sqlite_2_mysqldump.sh create mode 100644 examples/database/dbcheck.sql create mode 100644 examples/database/postgresql-dump.txt create mode 100644 examples/database/postgresql-mysql-dump.txt create mode 100755 examples/database/sqlite2pgsql create mode 100644 examples/devices/DDS.conf create mode 100644 examples/devices/DDS4.conf create mode 100644 examples/devices/EXB-8200.conf create mode 100644 examples/devices/EXB-8500.conf create mode 100644 examples/devices/Exabyte.conf create mode 100644 examples/devices/FreeBSD.conf create mode 100644 examples/devices/HP-DLT80.conf create mode 100644 examples/devices/HP-autoloader.conf create mode 100644 examples/devices/LTO-2.conf create mode 100644 examples/devices/LTO-3.conf create mode 100644 examples/devices/LTO-4.conf create mode 100644 examples/devices/OnStream.conf create mode 100644 examples/devices/OpenBSD.conf create mode 100644 examples/devices/README create mode 100644 examples/devices/Sony-DDS.conf create mode 100644 examples/devices/two-drive-autoloader.conf create mode 100755 examples/local_partitions create mode 100644 examples/local_partitions.txt create mode 100644 examples/nagios/check_bacula/Makefile.in create mode 100644 examples/nagios/check_bacula/README create mode 100644 examples/nagios/check_bacula/authenticate.c create mode 100644 examples/nagios/check_bacula/check_bacula.c create mode 100644 examples/nagios/check_bacula/check_bacula.h create mode 100755 examples/nagios/check_bacula_pools.sh create mode 100644 examples/nagios/nagios.txt create mode 100755 examples/recover.pl create mode 100755 examples/reports/bacula_mail_summary.sh create mode 100755 examples/reports/baculareport.pl create mode 100755 examples/reports/check_bacula_pools.sh create mode 100755 examples/reports/checkhost create mode 100755 examples/reports/is_client_alive create mode 100755 examples/reports/next_tape.sh create mode 100644 examples/reports/pool-query.txt create mode 100755 examples/reports/report.pl create mode 100644 examples/sample-query.sql create mode 100644 examples/ssh-tunnel-README.txt create mode 100755 examples/ssh-tunnel.sh create mode 100644 examples/upgrade-win32-client.txt create mode 100644 examples/vm/bacula.data create mode 100644 examples/vm/blabela.exec create mode 100644 examples/vm/blabeli.exec create mode 100644 examples/vm/tape-label-tools.txt create mode 100644 examples/vm/vmbacula.tgz create mode 100644 examples/vm/vmbacula.txt create mode 100644 manpages/Makefile.in create mode 100644 manpages/bacula-dir.8 create mode 100644 manpages/bacula-fd.8 create mode 100644 manpages/bacula-sd.8 create mode 100644 manpages/bacula-tray-monitor.1 create mode 100644 manpages/bacula.8 create mode 100644 manpages/bat.1 create mode 100644 manpages/bconsole.8 create mode 100644 manpages/bcopy.8 create mode 100644 manpages/bextract.8 create mode 100644 manpages/bls.8 create mode 100644 manpages/bpluginfo.8 create mode 100644 manpages/bregex.8 create mode 100644 manpages/bscan.8 create mode 100644 manpages/bsmtp.1 create mode 100644 manpages/btape.8 create mode 100644 manpages/btraceback.8 create mode 100644 manpages/bwild.8 create mode 100644 manpages/dbcheck.8 create mode 100644 platforms/Makefile.in create mode 100644 platforms/README create mode 100644 platforms/aix/Makefile.in create mode 100644 platforms/alpha/Makefile.in create mode 100644 platforms/alpha/bacula-fd.in create mode 100644 platforms/bsdi/Makefile.in create mode 100755 platforms/bsdi/bacula-dir.in create mode 100755 platforms/bsdi/bacula-fd.in create mode 100755 platforms/bsdi/bacula-sd.in create mode 100644 platforms/contrib-rpm/README create mode 100755 platforms/contrib-rpm/build_rpm.sh create mode 100755 platforms/contrib-rpm/rpm_wizard.sh create mode 100644 platforms/contrib-rpm/rpmkey.spec create mode 100644 platforms/darwin/Makefile.in create mode 100644 platforms/debian/Makefile.in create mode 100644 platforms/debian/bacula-dir.in create mode 100644 platforms/debian/bacula-fd.in create mode 100644 platforms/debian/bacula-sd.in create mode 100644 platforms/freebsd/Makefile.in create mode 100755 platforms/freebsd/bacula-dir.in create mode 100755 platforms/freebsd/bacula-fd.in create mode 100755 platforms/freebsd/bacula-sd.in create mode 100644 platforms/freebsd/pthreads-fix.txt create mode 100644 platforms/freebsd/tapetest.c create mode 100644 platforms/gentoo/1.36.1-cdrecord-configure.patch create mode 100644 platforms/gentoo/1.36.2-cdrecord-configure.patch create mode 100644 platforms/gentoo/Makefile.in create mode 100755 platforms/gentoo/bacula-dir.in create mode 100755 platforms/gentoo/bacula-fd.in create mode 100755 platforms/gentoo/bacula-init.in create mode 100755 platforms/gentoo/bacula-sd.in create mode 100644 platforms/gentoo/bacula.ebuild create mode 100644 platforms/hurd/Makefile.in create mode 100644 platforms/hurd/bacula-dir.in create mode 100644 platforms/hurd/bacula-fd.in create mode 100644 platforms/hurd/bacula-sd.in create mode 100755 platforms/install-symlinks create mode 100644 platforms/irix/Makefile.in create mode 100755 platforms/irix/bacula-dir.in create mode 100755 platforms/irix/bacula-fd.in create mode 100755 platforms/irix/bacula-sd.in create mode 100644 platforms/mandrake/Makefile.in create mode 100755 platforms/mandrake/bacula-dir.in create mode 100755 platforms/mandrake/bacula-fd.in create mode 100755 platforms/mandrake/bacula-sd.in create mode 100644 platforms/mandrake/bacula.spec.bluca.in create mode 100644 platforms/openbsd/Makefile.in create mode 100755 platforms/openbsd/bacula-dir.in create mode 100755 platforms/openbsd/bacula-fd.in create mode 100755 platforms/openbsd/bacula-sd.in create mode 100644 platforms/osx/Makefile.in create mode 100644 platforms/osx/README create mode 100644 platforms/osx/files/installer.pmdoc.in/01destdir.xml create mode 100644 platforms/osx/files/installer.pmdoc.in/index.xml create mode 100644 platforms/osx/files/org.bacula.bacula-fd.plist.in create mode 100644 platforms/osx/files/uninstall.command.in create mode 100644 platforms/osx/installer-gencontents.py create mode 100644 platforms/osx/resources/ReadMe.html.in create mode 100644 platforms/osx/resources/postflight.in create mode 100644 platforms/osx/resources/preupgrade.in create mode 100644 platforms/redhat/Makefile.in create mode 100755 platforms/redhat/bacula-dir.in create mode 100755 platforms/redhat/bacula-fd.in create mode 100755 platforms/redhat/bacula-sd.in create mode 100644 platforms/rpms/redhat/bacula-aligned.spec.in create mode 100644 platforms/rpms/redhat/bacula-bat.spec.in create mode 100644 platforms/rpms/redhat/bacula-client-static.spec.in create mode 100644 platforms/rpms/redhat/bacula-docs.spec.in create mode 100644 platforms/rpms/redhat/bacula-mtx.spec.in create mode 100644 platforms/rpms/redhat/bacula.spec.in create mode 100644 platforms/rpms/suse/bacula-aligned.spec.in create mode 100644 platforms/rpms/suse/bacula-bat.spec.in create mode 100644 platforms/rpms/suse/bacula-docs.spec.in create mode 100644 platforms/rpms/suse/bacula-mtx.spec.in create mode 100644 platforms/rpms/suse/bacula.spec.in create mode 100644 platforms/slackware/Makefile.in create mode 100644 platforms/slackware/functions.bacula.in create mode 100755 platforms/slackware/local-install.sh create mode 100644 platforms/slackware/rc.bacula-dir.in create mode 100644 platforms/slackware/rc.bacula-fd.in create mode 100644 platforms/slackware/rc.bacula-sd.in create mode 100644 platforms/solaris/Makefile.in create mode 100755 platforms/solaris/bacula-dir.in create mode 100755 platforms/solaris/bacula-fd.in create mode 100755 platforms/solaris/bacula-sd.in create mode 100644 platforms/solaris/copyright create mode 100644 platforms/solaris/makepkg.sh create mode 100644 platforms/solaris/pkginfo create mode 100644 platforms/solaris/postinstall create mode 100644 platforms/solaris/postremove create mode 100644 platforms/solaris/prototype.master create mode 100644 platforms/suse/Makefile.in create mode 100644 platforms/suse/bacula-dir-suse-sqlite.patch create mode 100755 platforms/suse/bacula-dir.in create mode 100755 platforms/suse/bacula-fd.in create mode 100755 platforms/suse/bacula-sd.in create mode 100644 platforms/suse/bacula.in create mode 100644 platforms/systemd/Makefile.in create mode 100644 platforms/systemd/bacula-dir.service.in create mode 100644 platforms/systemd/bacula-fd.service.in create mode 100644 platforms/systemd/bacula-sd.service.in create mode 100644 platforms/systemd/bacula.conf.in create mode 100644 platforms/ubuntu/Makefile.in create mode 100644 platforms/ubuntu/bacula-dir.in create mode 100644 platforms/ubuntu/bacula-fd.in create mode 100644 platforms/ubuntu/bacula-sd.in create mode 100644 platforms/unknown/Makefile.in create mode 100644 po/ChangeLog create mode 100644 po/LINGUAS create mode 100644 po/Makefile.in.in create mode 100644 po/Makevars create mode 100644 po/POTFILES.in create mode 100644 po/README create mode 100644 po/Rules-quot create mode 100644 po/bacula.pot create mode 100644 po/boldquot.sed create mode 100644 po/de.po create mode 100644 po/en@boldquot.header create mode 100644 po/en@quot.header create mode 100644 po/es.po create mode 100644 po/es_AR.po create mode 100644 po/fi.po create mode 100644 po/fr.po create mode 100644 po/insert-header.sin create mode 100644 po/it.po create mode 100644 po/nl.po create mode 100644 po/pl.po create mode 100644 po/quot.sed create mode 100644 po/remove-potcdate.sin create mode 100644 po/sv.po create mode 100644 po/uk.po create mode 100644 release/README create mode 100644 release/ReleaseProcedure.txt create mode 100755 release/bgit.py create mode 100755 release/check_packages create mode 100755 release/clean create mode 100644 release/config create mode 100755 release/git2changelog.pl create mode 100755 release/makeall create mode 100755 release/makebacularel create mode 100755 release/makedocsonly create mode 100755 release/makedocsrel create mode 100755 release/makemanualsrel create mode 100644 release/package-list create mode 100755 release/pushtags create mode 100755 release/sign create mode 100755 release/upload create mode 100755 scripts/Makefile.in create mode 100644 scripts/bacula-ctl-dir.in create mode 100644 scripts/bacula-ctl-fd.in create mode 100644 scripts/bacula-ctl-sd.in create mode 100644 scripts/bacula-tray-monitor.desktop.in create mode 100755 scripts/bacula.in create mode 100644 scripts/bacula.png create mode 100644 scripts/bacula.vim create mode 100755 scripts/baculabackupreport.in create mode 100644 scripts/bat.console_apps.in create mode 100644 scripts/bat.desktop.consolehelper.in create mode 100644 scripts/bat.desktop.in create mode 100644 scripts/bat.desktop.xsu.in create mode 100644 scripts/bat.pamd create mode 100755 scripts/bconsole.in create mode 100644 scripts/breload.in create mode 100644 scripts/btraceback.dbx create mode 100644 scripts/btraceback.gdb create mode 100755 scripts/btraceback.in create mode 100644 scripts/btraceback.mdb create mode 100755 scripts/defaultconfig create mode 100755 scripts/devel_bacula.in create mode 100644 scripts/disk-changer.in create mode 100644 scripts/filetype.vim create mode 100755 scripts/freespace create mode 100755 scripts/isworm create mode 100644 scripts/logrotate.in create mode 100644 scripts/logwatch/Makefile.in create mode 100644 scripts/logwatch/README create mode 100755 scripts/logwatch/applybaculadate create mode 100755 scripts/logwatch/bacula create mode 100644 scripts/logwatch/logfile.bacula.conf.in create mode 100644 scripts/logwatch/services.bacula.conf create mode 100644 scripts/magic.bacula create mode 100644 scripts/magic.bacula.txt create mode 100755 scripts/manual_prune.pl create mode 100644 scripts/mtx-changer.conf create mode 100644 scripts/mtx-changer.in create mode 100755 scripts/tapealert create mode 100644 src/.indent.pro create mode 100644 src/Makefile.in create mode 100644 src/baconfig.h create mode 100644 src/bacula.h create mode 100644 src/bc_types.h create mode 100644 src/bsd create mode 100644 src/c create mode 100644 src/c.scr create mode 100644 src/cats/Makefile.in create mode 100644 src/cats/bdb.h create mode 100644 src/cats/bdb_mysql.h create mode 100644 src/cats/bdb_postgresql.h create mode 100644 src/cats/bdb_sqlite.h create mode 100644 src/cats/bvfs.c create mode 100644 src/cats/bvfs.h create mode 100644 src/cats/cats.c create mode 100644 src/cats/cats.h create mode 100644 src/cats/cats_null.c create mode 100644 src/cats/create_bacula_database.in create mode 100644 src/cats/create_mysql_database.in create mode 100644 src/cats/create_postgresql_database.in create mode 100644 src/cats/create_sqlite3_database.in create mode 100644 src/cats/create_test_database create mode 100755 src/cats/delete_catalog_backup.in create mode 100755 src/cats/drop_bacula_database.in create mode 100755 src/cats/drop_bacula_tables.in create mode 100644 src/cats/drop_mysql_database.in create mode 100644 src/cats/drop_mysql_tables.in create mode 100644 src/cats/drop_postgresql_database.in create mode 100644 src/cats/drop_postgresql_tables.in create mode 100644 src/cats/drop_sqlite3_database.in create mode 100644 src/cats/drop_sqlite3_tables.in create mode 100755 src/cats/drop_test_tables create mode 100755 src/cats/fix_postgresql_tables create mode 100755 src/cats/grant_bacula_privileges.in create mode 100644 src/cats/grant_mysql_privileges.in create mode 100644 src/cats/grant_postgresql_privileges.in create mode 100644 src/cats/grant_privileges create mode 100644 src/cats/grant_sqlite3_privileges.in create mode 100755 src/cats/install-default-backend.in create mode 100755 src/cats/make_bacula_tables.in create mode 100755 src/cats/make_catalog_backup.in create mode 100644 src/cats/make_catalog_backup.pl.in create mode 100644 src/cats/make_mysql_tables.in create mode 100644 src/cats/make_postgresql_tables.in create mode 100644 src/cats/make_sqlite3_tables.in create mode 100755 src/cats/make_test_tables create mode 100644 src/cats/mysql.c create mode 100644 src/cats/mysql.in create mode 100644 src/cats/postgresql.c create mode 100644 src/cats/postgresql.in create mode 100644 src/cats/protos.h create mode 100644 src/cats/sql.c create mode 100644 src/cats/sql_cmds.c create mode 100644 src/cats/sql_cmds.h create mode 100644 src/cats/sql_create.c create mode 100644 src/cats/sql_delete.c create mode 100644 src/cats/sql_find.c create mode 100644 src/cats/sql_get.c create mode 100644 src/cats/sql_list.c create mode 100644 src/cats/sql_update.c create mode 100644 src/cats/sqlite.c create mode 100644 src/cats/sqlite.in create mode 100755 src/cats/update_bacula_tables.in create mode 100644 src/cats/update_mysql_tables.in create mode 100644 src/cats/update_postgresql_tables.in create mode 100644 src/cats/update_sqlite3_tables.in create mode 100644 src/ch.h create mode 100644 src/console/Makefile.in create mode 100644 src/console/authenticate.c create mode 100644 src/console/bbconsjson.c create mode 100644 src/console/bconsole.conf.in create mode 100755 src/console/conio.c create mode 100644 src/console/conio.h create mode 100644 src/console/console.c create mode 100644 src/console/console_conf.c create mode 100644 src/console/console_conf.h create mode 100755 src/console/func.h create mode 100755 src/count-lines create mode 100644 src/dird/Makefile.in create mode 100644 src/dird/README-config create mode 100644 src/dird/admin.c create mode 100644 src/dird/authenticate.c create mode 100644 src/dird/autoprune.c create mode 100644 src/dird/backup.c create mode 100644 src/dird/bacula-dir.conf.in create mode 100644 src/dird/bdirjson.c create mode 100644 src/dird/bsr.c create mode 100644 src/dird/bsr.h create mode 100644 src/dird/catreq.c create mode 100644 src/dird/dir_plugins.c create mode 100644 src/dird/dir_plugins.h create mode 100644 src/dird/dird.c create mode 100644 src/dird/dird.h create mode 100644 src/dird/dird_conf.c create mode 100644 src/dird/dird_conf.h create mode 100644 src/dird/expand.c create mode 100644 src/dird/fd_cmds.c create mode 100644 src/dird/getmsg.c create mode 100644 src/dird/inc_conf.c create mode 100644 src/dird/job.c create mode 100644 src/dird/jobq.c create mode 100644 src/dird/jobq.h create mode 100644 src/dird/mac.c create mode 100644 src/dird/mac_sql.c create mode 100644 src/dird/mountreq.c create mode 100644 src/dird/msgchan.c create mode 100644 src/dird/newvol.c create mode 100644 src/dird/next_vol.c create mode 100644 src/dird/protos.h create mode 100644 src/dird/query.sql create mode 100644 src/dird/recycle.c create mode 100644 src/dird/restore.c create mode 100644 src/dird/run_conf.c create mode 100644 src/dird/scheduler.c create mode 100644 src/dird/snapshot.c create mode 100644 src/dird/ua.h create mode 100644 src/dird/ua_acl.c create mode 100644 src/dird/ua_cmds.c create mode 100644 src/dird/ua_dotcmds.c create mode 100644 src/dird/ua_input.c create mode 100644 src/dird/ua_label.c create mode 100644 src/dird/ua_output.c create mode 100644 src/dird/ua_prune.c create mode 100644 src/dird/ua_purge.c create mode 100644 src/dird/ua_query.c create mode 100644 src/dird/ua_restore.c create mode 100644 src/dird/ua_run.c create mode 100644 src/dird/ua_select.c create mode 100644 src/dird/ua_server.c create mode 100644 src/dird/ua_status.c create mode 100644 src/dird/ua_tree.c create mode 100644 src/dird/ua_update.c create mode 100644 src/dird/vbackup.c create mode 100644 src/dird/verify.c create mode 100644 src/filed/Makefile.in create mode 100644 src/filed/accurate.c create mode 100644 src/filed/authenticate.c create mode 100644 src/filed/backup.c create mode 100644 src/filed/backup.h create mode 100644 src/filed/bacl.c create mode 100644 src/filed/bacl.h create mode 100644 src/filed/bacl_freebsd.c create mode 100644 src/filed/bacl_freebsd.h create mode 100644 src/filed/bacl_linux.c create mode 100644 src/filed/bacl_linux.h create mode 100644 src/filed/bacl_osx.c create mode 100644 src/filed/bacl_osx.h create mode 100644 src/filed/bacl_solaris.c create mode 100644 src/filed/bacl_solaris.h create mode 100644 src/filed/bacula-fd.conf.in create mode 100644 src/filed/bfdjson.c create mode 100644 src/filed/bxattr.c create mode 100644 src/filed/bxattr.h create mode 100644 src/filed/bxattr_freebsd.c create mode 100644 src/filed/bxattr_freebsd.h create mode 100644 src/filed/bxattr_linux.c create mode 100644 src/filed/bxattr_linux.h create mode 100644 src/filed/bxattr_osx.c create mode 100644 src/filed/bxattr_osx.h create mode 100644 src/filed/bxattr_solaris.c create mode 100644 src/filed/bxattr_solaris.h create mode 100644 src/filed/crypto.c create mode 100644 src/filed/estimate.c create mode 100644 src/filed/fd_plugins.c create mode 100644 src/filed/fd_plugins.h create mode 100644 src/filed/fd_snapshot.c create mode 100644 src/filed/fd_snapshot.h create mode 100644 src/filed/filed.c create mode 100644 src/filed/filed.h create mode 100644 src/filed/filed_conf.c create mode 100644 src/filed/filed_conf.h create mode 100644 src/filed/heartbeat.c create mode 100644 src/filed/hello.c create mode 100644 src/filed/job.c create mode 100644 src/filed/protos.h create mode 100644 src/filed/restore.c create mode 100644 src/filed/restore.h create mode 100644 src/filed/status.c create mode 100644 src/filed/verify.c create mode 100644 src/filed/verify_vol.c create mode 100644 src/filed/win_efs.c create mode 100644 src/fileopts.h create mode 100644 src/filetypes.h create mode 100644 src/findlib/Makefile.in create mode 100644 src/findlib/attribs.c create mode 100644 src/findlib/bfile.c create mode 100644 src/findlib/bfile.h create mode 100644 src/findlib/create_file.c create mode 100644 src/findlib/drivetype.c create mode 100644 src/findlib/enable_priv.c create mode 100644 src/findlib/find.c create mode 100644 src/findlib/find.h create mode 100644 src/findlib/find_one.c create mode 100644 src/findlib/fstype.c create mode 100644 src/findlib/match.c create mode 100644 src/findlib/mkpath.c create mode 100644 src/findlib/namedpipe.c create mode 100644 src/findlib/namedpipe.h create mode 100644 src/findlib/protos.h create mode 100644 src/findlib/savecwd.c create mode 100644 src/findlib/savecwd.h create mode 100644 src/findlib/win32filter.c create mode 100644 src/findlib/win32filter.h create mode 100644 src/host.h.in create mode 100644 src/jcr.h create mode 100644 src/lib/Makefile.in create mode 100644 src/lib/address_conf.c create mode 100644 src/lib/address_conf.h create mode 100644 src/lib/alist.c create mode 100644 src/lib/alist.h create mode 100644 src/lib/attr.c create mode 100644 src/lib/attr.h create mode 100644 src/lib/base64.c create mode 100644 src/lib/base64.h create mode 100644 src/lib/berrno.c create mode 100644 src/lib/berrno.h create mode 100644 src/lib/bget_msg.c create mode 100644 src/lib/bget_msg.h create mode 100644 src/lib/binflate.c create mode 100644 src/lib/bits.h create mode 100644 src/lib/bjson.c create mode 100644 src/lib/bjson.h create mode 100644 src/lib/bmtio.h create mode 100644 src/lib/bnet.c create mode 100644 src/lib/bnet_server.c create mode 100644 src/lib/bpipe.c create mode 100644 src/lib/bpipe.h create mode 100644 src/lib/breg.c create mode 100644 src/lib/breg.h create mode 100644 src/lib/bregex.c create mode 100644 src/lib/bregex.h create mode 100644 src/lib/bsnprintf.c create mode 100644 src/lib/bsock.c create mode 100644 src/lib/bsock.h create mode 100644 src/lib/bsockcore.c create mode 100644 src/lib/bsockcore.h create mode 100644 src/lib/bsys.c create mode 100644 src/lib/btime.c create mode 100644 src/lib/btime.h create mode 100644 src/lib/btimers.c create mode 100644 src/lib/btimers.h create mode 100644 src/lib/bwlimit.c create mode 100644 src/lib/bwlimit.h create mode 100644 src/lib/cmd_parser.h create mode 100644 src/lib/cram-md5.c create mode 100644 src/lib/crc32.c create mode 100644 src/lib/crypto.c create mode 100644 src/lib/crypto.h create mode 100644 src/lib/daemon.c create mode 100644 src/lib/devlock.c create mode 100644 src/lib/devlock.h create mode 100644 src/lib/dlist.c create mode 100644 src/lib/dlist.h create mode 100644 src/lib/edit.c create mode 100644 src/lib/flist.c create mode 100644 src/lib/flist.h create mode 100644 src/lib/fnmatch.c create mode 100644 src/lib/fnmatch.h create mode 100644 src/lib/guid_to_name.c create mode 100644 src/lib/guid_to_name.h create mode 100644 src/lib/hmac.c create mode 100644 src/lib/htable.c create mode 100644 src/lib/htable.h create mode 100644 src/lib/ini.c create mode 100644 src/lib/ini.h create mode 100644 src/lib/jcr.c create mode 100644 src/lib/lex.c create mode 100644 src/lib/lex.h create mode 100644 src/lib/lib.h create mode 100644 src/lib/lockmgr.c create mode 100644 src/lib/lockmgr.h create mode 100644 src/lib/lz4.c create mode 100644 src/lib/lz4.h create mode 100644 src/lib/lz4_encoder.h create mode 100644 src/lib/md5.c create mode 100644 src/lib/md5.h create mode 100644 src/lib/mem_pool.c create mode 100644 src/lib/mem_pool.h create mode 100644 src/lib/message.c create mode 100644 src/lib/message.h create mode 100644 src/lib/mutex_list.h create mode 100644 src/lib/openssl-compat.h create mode 100644 src/lib/openssl.c create mode 100644 src/lib/openssl.h create mode 100644 src/lib/output.c create mode 100644 src/lib/output.h create mode 100644 src/lib/parse_conf.c create mode 100644 src/lib/parse_conf.h create mode 100644 src/lib/plugins.c create mode 100644 src/lib/plugins.h create mode 100644 src/lib/priv.c create mode 100644 src/lib/protos.h create mode 100644 src/lib/queue.c create mode 100644 src/lib/queue.h create mode 100644 src/lib/rblist.c create mode 100644 src/lib/rblist.h create mode 100644 src/lib/res.c create mode 100644 src/lib/runscript.c create mode 100644 src/lib/runscript.h create mode 100644 src/lib/rwlock.c create mode 100644 src/lib/rwlock.h create mode 100644 src/lib/scan.c create mode 100644 src/lib/sellist.c create mode 100644 src/lib/sellist.h create mode 100644 src/lib/serial.c create mode 100644 src/lib/serial.h create mode 100644 src/lib/sha1.c create mode 100644 src/lib/sha1.h create mode 100644 src/lib/sha2.c create mode 100644 src/lib/sha2.h create mode 100644 src/lib/signal.c create mode 100644 src/lib/smartall.c create mode 100644 src/lib/smartall.h create mode 100644 src/lib/status.h create mode 100644 src/lib/tcpd.h create mode 100644 src/lib/tls.c create mode 100644 src/lib/tls.h create mode 100644 src/lib/tree.c create mode 100644 src/lib/tree.h create mode 100644 src/lib/unittests.c create mode 100644 src/lib/unittests.h create mode 100644 src/lib/util.c create mode 100644 src/lib/var.c create mode 100644 src/lib/var.h create mode 100644 src/lib/waitq.h create mode 100644 src/lib/watchdog.c create mode 100644 src/lib/watchdog.h create mode 100644 src/lib/worker.c create mode 100644 src/lib/worker.h create mode 100644 src/lib/workq.c create mode 100644 src/lib/workq.h create mode 100644 src/plugins/Makefile create mode 100644 src/plugins/README create mode 100644 src/plugins/dir/Makefile.in create mode 100644 src/plugins/dir/example-plugin-dir.c create mode 100644 src/plugins/fd/Makefile.in create mode 100644 src/plugins/fd/bpipe-fd.c create mode 100644 src/plugins/fd/example-plugin-fd.c create mode 100644 src/plugins/fd/fd_common.h create mode 100644 src/plugins/fd/test-deltaseq-fd.c create mode 100644 src/plugins/fd/test-plugin-fd.c create mode 100644 src/plugins/sd/Makefile.in create mode 100644 src/plugins/sd/example-plugin-sd.c create mode 100644 src/plugins/sd/main.c create mode 100644 src/qt-console/COMMANDS create mode 100644 src/qt-console/External-qt-console create mode 100644 src/qt-console/PAGES create mode 100644 src/qt-console/PREFS create mode 100644 src/qt-console/README create mode 100644 src/qt-console/README.mingw32 create mode 100644 src/qt-console/RELEASEFEATURES create mode 100644 src/qt-console/TODO create mode 100644 src/qt-console/bat.conf.example create mode 100644 src/qt-console/bat.conf.in create mode 100644 src/qt-console/bat.h create mode 100644 src/qt-console/bat.pro.in create mode 100644 src/qt-console/bat.pro.mingw32.in create mode 100644 src/qt-console/bat.pro.mingw64 create mode 100644 src/qt-console/bat.pro.mingw64.in create mode 100644 src/qt-console/bat_conf.cpp create mode 100644 src/qt-console/bat_conf.h create mode 100644 src/qt-console/bcomm/dircomm.cpp create mode 100644 src/qt-console/bcomm/dircomm.h create mode 100644 src/qt-console/bcomm/dircomm_auth.cpp create mode 100755 src/qt-console/build-depkgs-qt-console create mode 100644 src/qt-console/clients/clients.cpp create mode 100644 src/qt-console/clients/clients.h create mode 100644 src/qt-console/clients/clients.ui create mode 100644 src/qt-console/console/console.cpp create mode 100644 src/qt-console/console/console.h create mode 100644 src/qt-console/console/console.ui create mode 100644 src/qt-console/fileset/fileset.cpp create mode 100644 src/qt-console/fileset/fileset.h create mode 100644 src/qt-console/fileset/fileset.ui create mode 100644 src/qt-console/help/clients.html create mode 100644 src/qt-console/help/console.html create mode 100644 src/qt-console/help/filesets.html create mode 100644 src/qt-console/help/help.cpp create mode 100644 src/qt-console/help/help.h create mode 100644 src/qt-console/help/help.ui create mode 100644 src/qt-console/help/index.html create mode 100644 src/qt-console/help/joblist.html create mode 100644 src/qt-console/help/jobplot.html create mode 100644 src/qt-console/help/jobs.html create mode 100644 src/qt-console/help/media.html create mode 100644 src/qt-console/help/restore.html create mode 100644 src/qt-console/help/storage.html create mode 100644 src/qt-console/images/0p.png create mode 100644 src/qt-console/images/16p.png create mode 100644 src/qt-console/images/32p.png create mode 100644 src/qt-console/images/48p.png create mode 100644 src/qt-console/images/64p.png create mode 100644 src/qt-console/images/80p.png create mode 100644 src/qt-console/images/96p.png create mode 100644 src/qt-console/images/A.png create mode 100644 src/qt-console/images/R.png create mode 100644 src/qt-console/images/T.png create mode 100644 src/qt-console/images/W.png create mode 100644 src/qt-console/images/ajax-loader-big.gif create mode 100644 src/qt-console/images/applications-graphics.png create mode 100644 src/qt-console/images/applications-graphics.svg create mode 100644 src/qt-console/images/backup.png create mode 100644 src/qt-console/images/bat.png create mode 100644 src/qt-console/images/bat_icon.icns create mode 100644 src/qt-console/images/bat_icon.png create mode 100644 src/qt-console/images/browse.png create mode 100644 src/qt-console/images/browse.svg create mode 100644 src/qt-console/images/cartridge-edit.png create mode 100644 src/qt-console/images/cartridge-edit.svg create mode 100644 src/qt-console/images/cartridge.png create mode 100644 src/qt-console/images/cartridge.svg create mode 100644 src/qt-console/images/cartridge1.png create mode 100644 src/qt-console/images/check.png create mode 100644 src/qt-console/images/check.svg create mode 100644 src/qt-console/images/connected.png create mode 100644 src/qt-console/images/copy.png create mode 100644 src/qt-console/images/cut.png create mode 100644 src/qt-console/images/disconnected.png create mode 100644 src/qt-console/images/edit-cut.png create mode 100644 src/qt-console/images/edit-delete.png create mode 100644 src/qt-console/images/edit-delete.svg create mode 100644 src/qt-console/images/edit.png create mode 100644 src/qt-console/images/emblem-system.png create mode 100644 src/qt-console/images/emblem-system.svg create mode 100644 src/qt-console/images/estimate-job.png create mode 100644 src/qt-console/images/estimate-job.svg create mode 100644 src/qt-console/images/extern.png create mode 100644 src/qt-console/images/f.png create mode 100644 src/qt-console/images/folder.png create mode 100644 src/qt-console/images/folder.svg create mode 100644 src/qt-console/images/folderbothchecked.png create mode 100644 src/qt-console/images/folderbothchecked.svg create mode 100644 src/qt-console/images/folderchecked.png create mode 100644 src/qt-console/images/folderchecked.svg create mode 100644 src/qt-console/images/folderunchecked.png create mode 100644 src/qt-console/images/folderunchecked.svg create mode 100644 src/qt-console/images/go-down.png create mode 100644 src/qt-console/images/go-down.svg create mode 100644 src/qt-console/images/go-jump.png create mode 100644 src/qt-console/images/go-jump.svg create mode 100644 src/qt-console/images/go-up.png create mode 100644 src/qt-console/images/go-up.svg create mode 100644 src/qt-console/images/graph1.png create mode 100644 src/qt-console/images/graph1.svg create mode 100644 src/qt-console/images/help-browser.png create mode 100644 src/qt-console/images/help-browser.svg create mode 100644 src/qt-console/images/home.png create mode 100644 src/qt-console/images/inflag0.png create mode 100644 src/qt-console/images/inflag1.png create mode 100644 src/qt-console/images/inflag2.png create mode 100644 src/qt-console/images/intern.png create mode 100644 src/qt-console/images/joblog.png create mode 100644 src/qt-console/images/joblog.svg create mode 100644 src/qt-console/images/label.png create mode 100644 src/qt-console/images/mail-message-new.png create mode 100644 src/qt-console/images/mail-message-new.svg create mode 100644 src/qt-console/images/mail-message-pending.png create mode 100644 src/qt-console/images/mail-message-pending.svg create mode 100644 src/qt-console/images/mark.png create mode 100644 src/qt-console/images/media-floppy.svg create mode 100644 src/qt-console/images/network-server.png create mode 100644 src/qt-console/images/network-server.svg create mode 100644 src/qt-console/images/new.png create mode 100644 src/qt-console/images/next.png create mode 100644 src/qt-console/images/open.png create mode 100644 src/qt-console/images/package-x-generic.png create mode 100644 src/qt-console/images/package-x-generic.svg create mode 100644 src/qt-console/images/page-next.gif create mode 100644 src/qt-console/images/page-prev.gif create mode 100644 src/qt-console/images/paste.png create mode 100644 src/qt-console/images/prev.png create mode 100644 src/qt-console/images/print.png create mode 100644 src/qt-console/images/purge.png create mode 100644 src/qt-console/images/restore.png create mode 100644 src/qt-console/images/run.png create mode 100644 src/qt-console/images/runit.png create mode 100644 src/qt-console/images/save.png create mode 100644 src/qt-console/images/server.png create mode 100644 src/qt-console/images/status-console.png create mode 100644 src/qt-console/images/status-console.svg create mode 100644 src/qt-console/images/status.png create mode 100644 src/qt-console/images/status.svg create mode 100644 src/qt-console/images/system-file-manager.png create mode 100644 src/qt-console/images/system-file-manager.svg create mode 100644 src/qt-console/images/unchecked.png create mode 100644 src/qt-console/images/unchecked.svg create mode 100644 src/qt-console/images/undo.png create mode 100644 src/qt-console/images/unmark.png create mode 100644 src/qt-console/images/up.png create mode 100644 src/qt-console/images/utilities-terminal.png create mode 100644 src/qt-console/images/utilities-terminal.svg create mode 100644 src/qt-console/images/view-refresh.png create mode 100644 src/qt-console/images/view-refresh.svg create mode 100644 src/qt-console/images/weather-severe-alert.png create mode 100644 src/qt-console/images/weather-severe-alert.svg create mode 100644 src/qt-console/images/zoom.png create mode 100755 src/qt-console/install_conf_file.in create mode 100644 src/qt-console/job/job.cpp create mode 100644 src/qt-console/job/job.h create mode 100644 src/qt-console/job/job.ui create mode 100644 src/qt-console/jobgraphs/jobplot.cpp create mode 100644 src/qt-console/jobgraphs/jobplot.h create mode 100644 src/qt-console/jobgraphs/jobplotcontrols.ui create mode 100644 src/qt-console/joblist/joblist.cpp create mode 100644 src/qt-console/joblist/joblist.h create mode 100644 src/qt-console/joblist/joblist.ui create mode 100644 src/qt-console/joblog/joblog.cpp create mode 100644 src/qt-console/joblog/joblog.h create mode 100644 src/qt-console/joblog/joblog.ui create mode 100644 src/qt-console/jobs/jobs.cpp create mode 100644 src/qt-console/jobs/jobs.h create mode 100644 src/qt-console/jobs/jobs.ui create mode 100644 src/qt-console/label/label.cpp create mode 100644 src/qt-console/label/label.h create mode 100644 src/qt-console/label/label.ui create mode 100644 src/qt-console/main.cpp create mode 100644 src/qt-console/main.qrc create mode 100644 src/qt-console/main.ui create mode 100644 src/qt-console/mainwin.cpp create mode 100644 src/qt-console/mainwin.h create mode 100755 src/qt-console/make-win32 create mode 100644 src/qt-console/mediaedit/mediaedit.cpp create mode 100644 src/qt-console/mediaedit/mediaedit.h create mode 100644 src/qt-console/mediaedit/mediaedit.ui create mode 100644 src/qt-console/mediainfo/mediainfo.cpp create mode 100644 src/qt-console/mediainfo/mediainfo.h create mode 100644 src/qt-console/mediainfo/mediainfo.ui create mode 100644 src/qt-console/medialist/medialist.cpp create mode 100644 src/qt-console/medialist/medialist.h create mode 100644 src/qt-console/medialist/medialist.ui create mode 100644 src/qt-console/medialist/mediaview.cpp create mode 100644 src/qt-console/medialist/mediaview.h create mode 100644 src/qt-console/medialist/mediaview.ui create mode 100644 src/qt-console/mount/mount.cpp create mode 100644 src/qt-console/mount/mount.h create mode 100644 src/qt-console/mount/mount.ui create mode 100644 src/qt-console/pages.cpp create mode 100644 src/qt-console/pages.h create mode 100644 src/qt-console/prefs.ui create mode 100644 src/qt-console/qstd.cpp create mode 100644 src/qt-console/qstd.h create mode 100644 src/qt-console/qwtconfig.pri create mode 100644 src/qt-console/relabel/relabel.cpp create mode 100644 src/qt-console/relabel/relabel.h create mode 100644 src/qt-console/relabel/relabel.ui create mode 100644 src/qt-console/restore/brestore.cpp create mode 100644 src/qt-console/restore/brestore.ui create mode 100644 src/qt-console/restore/prerestore.cpp create mode 100644 src/qt-console/restore/prerestore.ui create mode 100644 src/qt-console/restore/restore.cpp create mode 100644 src/qt-console/restore/restore.h create mode 100644 src/qt-console/restore/restore.ui create mode 100644 src/qt-console/restore/restoretree.cpp create mode 100644 src/qt-console/restore/restoretree.h create mode 100644 src/qt-console/restore/restoretree.ui create mode 100644 src/qt-console/restore/runrestore.ui create mode 100644 src/qt-console/run/estimate.cpp create mode 100644 src/qt-console/run/estimate.ui create mode 100644 src/qt-console/run/prune.cpp create mode 100644 src/qt-console/run/prune.ui create mode 100644 src/qt-console/run/run.cpp create mode 100644 src/qt-console/run/run.h create mode 100644 src/qt-console/run/run.ui create mode 100644 src/qt-console/run/runadmin.ui create mode 100644 src/qt-console/run/runbackup.ui create mode 100644 src/qt-console/run/runcmd.cpp create mode 100644 src/qt-console/run/runcmd.ui create mode 100644 src/qt-console/run/runcopy.ui create mode 100644 src/qt-console/run/runmigration.ui create mode 100644 src/qt-console/run/runrestore.ui create mode 100644 src/qt-console/select/select.cpp create mode 100644 src/qt-console/select/select.h create mode 100644 src/qt-console/select/select.ui create mode 100644 src/qt-console/select/textinput.cpp create mode 100644 src/qt-console/select/textinput.h create mode 100644 src/qt-console/select/textinput.ui create mode 100644 src/qt-console/status/clientstat.cpp create mode 100644 src/qt-console/status/clientstat.h create mode 100644 src/qt-console/status/clientstat.ui create mode 100644 src/qt-console/status/dirstat.cpp create mode 100644 src/qt-console/status/dirstat.h create mode 100644 src/qt-console/status/dirstat.ui create mode 100644 src/qt-console/status/storstat.cpp create mode 100644 src/qt-console/status/storstat.h create mode 100644 src/qt-console/status/storstat.ui create mode 100644 src/qt-console/storage/content.cpp create mode 100644 src/qt-console/storage/content.h create mode 100644 src/qt-console/storage/content.ui create mode 100644 src/qt-console/storage/storage.cpp create mode 100644 src/qt-console/storage/storage.h create mode 100644 src/qt-console/storage/storage.ui create mode 100644 src/qt-console/testprogs/examp/dock.pro create mode 100644 src/qt-console/testprogs/examp/dockwidgets.qrc create mode 100644 src/qt-console/testprogs/examp/main.cpp create mode 100644 src/qt-console/testprogs/examp/mainwindow.cpp create mode 100644 src/qt-console/testprogs/examp/mainwindow.h create mode 100644 src/qt-console/testprogs/putz/main.cpp create mode 100644 src/qt-console/testprogs/putz/putz.cpp create mode 100644 src/qt-console/testprogs/putz/putz.h create mode 100644 src/qt-console/testprogs/putz/putz.pro create mode 100644 src/qt-console/testprogs/putz/putz.ui create mode 100644 src/qt-console/tray-monitor/authenticate.cpp create mode 100644 src/qt-console/tray-monitor/bacula-tray-monitor.conf.in create mode 100644 src/qt-console/tray-monitor/clientselectwizardpage.cpp create mode 100644 src/qt-console/tray-monitor/clientselectwizardpage.h create mode 100644 src/qt-console/tray-monitor/clientselectwizardpage.ui create mode 100644 src/qt-console/tray-monitor/common.h create mode 100644 src/qt-console/tray-monitor/conf.cpp create mode 100644 src/qt-console/tray-monitor/conf.h create mode 100644 src/qt-console/tray-monitor/dir-monitor.ui create mode 100644 src/qt-console/tray-monitor/dirstatus.cpp create mode 100644 src/qt-console/tray-monitor/dirstatus.h create mode 100644 src/qt-console/tray-monitor/fd-monitor.ui create mode 100644 src/qt-console/tray-monitor/fdstatus.cpp create mode 100644 src/qt-console/tray-monitor/fdstatus.h create mode 100644 src/qt-console/tray-monitor/fileselectwizardpage.cpp create mode 100644 src/qt-console/tray-monitor/fileselectwizardpage.h create mode 100644 src/qt-console/tray-monitor/fileselectwizardpage.ui create mode 100644 src/qt-console/tray-monitor/filesmodel.h create mode 100755 src/qt-console/tray-monitor/install_conf_file.in create mode 100644 src/qt-console/tray-monitor/jobselectwizardpage.cpp create mode 100644 src/qt-console/tray-monitor/jobselectwizardpage.h create mode 100644 src/qt-console/tray-monitor/jobselectwizardpage.ui create mode 100644 src/qt-console/tray-monitor/jobsmodel.cpp create mode 100644 src/qt-console/tray-monitor/jobsmodel.h create mode 100644 src/qt-console/tray-monitor/main-conf.ui create mode 100644 src/qt-console/tray-monitor/pluginmodel.h create mode 100644 src/qt-console/tray-monitor/pluginwizardpage.cpp create mode 100644 src/qt-console/tray-monitor/pluginwizardpage.h create mode 100644 src/qt-console/tray-monitor/pluginwizardpage.ui create mode 100644 src/qt-console/tray-monitor/res-conf.ui create mode 100644 src/qt-console/tray-monitor/restoreoptionswizardpage.cpp create mode 100644 src/qt-console/tray-monitor/restoreoptionswizardpage.h create mode 100644 src/qt-console/tray-monitor/restoreoptionswizardpage.ui create mode 100644 src/qt-console/tray-monitor/restorewizard.cpp create mode 100644 src/qt-console/tray-monitor/restorewizard.h create mode 100644 src/qt-console/tray-monitor/restorewizard.ui create mode 100644 src/qt-console/tray-monitor/run.ui create mode 100644 src/qt-console/tray-monitor/runjob.cpp create mode 100644 src/qt-console/tray-monitor/runjob.h create mode 100644 src/qt-console/tray-monitor/sd-monitor.ui create mode 100644 src/qt-console/tray-monitor/sdstatus.cpp create mode 100644 src/qt-console/tray-monitor/sdstatus.h create mode 100644 src/qt-console/tray-monitor/status.cpp create mode 100644 src/qt-console/tray-monitor/status.h create mode 100644 src/qt-console/tray-monitor/task.cpp create mode 100644 src/qt-console/tray-monitor/task.h create mode 100644 src/qt-console/tray-monitor/tray-monitor.conf.in create mode 100644 src/qt-console/tray-monitor/tray-monitor.cpp create mode 100644 src/qt-console/tray-monitor/tray-monitor.h create mode 100644 src/qt-console/tray-monitor/tray-monitor.pro.in create mode 100644 src/qt-console/tray-monitor/tray-monitor.pro.mingw32.in create mode 100644 src/qt-console/tray-monitor/tray-monitor.pro.mingw64.in create mode 100644 src/qt-console/tray-monitor/tray-ui.h create mode 100644 src/qt-console/tray-monitor/tray_conf.cpp create mode 100644 src/qt-console/tray-monitor/tray_conf.h create mode 100644 src/qt-console/tray-monitor/ts/tm_de.ts create mode 100644 src/qt-console/tray-monitor/ts/tm_fr.ts create mode 100644 src/qt-console/tray-monitor/ts/tm_ja.ts create mode 100644 src/qt-console/tray-monitor/win32/qmake.conf create mode 100644 src/qt-console/tray-monitor/win32/qplatformdefs.h create mode 100644 src/qt-console/ts/bat_de.ts create mode 100644 src/qt-console/ts/bat_fr.ts create mode 100644 src/qt-console/util/comboutil.cpp create mode 100644 src/qt-console/util/comboutil.h create mode 100644 src/qt-console/util/fmtwidgetitem.cpp create mode 100644 src/qt-console/util/fmtwidgetitem.h create mode 100644 src/qt-console/win32/qmake.conf create mode 100644 src/qt-console/win32/qplatformdefs.h create mode 100644 src/stored/Makefile.in create mode 100644 src/stored/acquire.c create mode 100644 src/stored/aligned_dev.c create mode 100644 src/stored/aligned_dev.h create mode 100644 src/stored/aligned_read.c create mode 100644 src/stored/aligned_write.c create mode 100644 src/stored/ansi_label.c create mode 100644 src/stored/append.c create mode 100644 src/stored/askdir.c create mode 100644 src/stored/authenticate.c create mode 100644 src/stored/autochanger.c create mode 100644 src/stored/bacula-sd.conf.in create mode 100644 src/stored/bcopy.c create mode 100644 src/stored/bextract.c create mode 100644 src/stored/block.c create mode 100644 src/stored/block.h create mode 100644 src/stored/block_util.c create mode 100644 src/stored/bls.c create mode 100644 src/stored/bscan.c create mode 100644 src/stored/bsdjson.c create mode 100644 src/stored/bsr.h create mode 100644 src/stored/btape.c create mode 100644 src/stored/butil.c create mode 100644 src/stored/cloud_dev.c create mode 100644 src/stored/cloud_dev.h create mode 100644 src/stored/cloud_driver.h create mode 100644 src/stored/cloud_parts.c create mode 100644 src/stored/cloud_parts.h create mode 100644 src/stored/cloud_test.c create mode 100644 src/stored/cloud_transfer_mgr.c create mode 100644 src/stored/cloud_transfer_mgr.h create mode 100644 src/stored/dev.c create mode 100644 src/stored/dev.h create mode 100644 src/stored/device.c create mode 100644 src/stored/dircmd.c create mode 100644 src/stored/ebcdic.c create mode 100644 src/stored/fd_cmds.c create mode 100644 src/stored/fifo_dev.c create mode 100644 src/stored/fifo_dev.h create mode 100644 src/stored/file_dev.c create mode 100644 src/stored/file_dev.h create mode 100644 src/stored/file_driver.c create mode 100644 src/stored/file_driver.h create mode 100644 src/stored/global.c create mode 100644 src/stored/hello.c create mode 100644 src/stored/init_dev.c create mode 100644 src/stored/job.c create mode 100644 src/stored/label.c create mode 100644 src/stored/lock.c create mode 100644 src/stored/lock.h create mode 100644 src/stored/match_bsr.c create mode 100644 src/stored/mount.c create mode 100644 src/stored/null_dev.c create mode 100644 src/stored/null_dev.h create mode 100644 src/stored/os.c create mode 100644 src/stored/parse_bsr.c create mode 100644 src/stored/protos.h create mode 100644 src/stored/read.c create mode 100644 src/stored/read_records.c create mode 100644 src/stored/record.h create mode 100644 src/stored/record_read.c create mode 100644 src/stored/record_util.c create mode 100644 src/stored/record_write.c create mode 100644 src/stored/reserve.c create mode 100644 src/stored/reserve.h create mode 100644 src/stored/s3_driver.c create mode 100644 src/stored/s3_driver.h create mode 100644 src/stored/scan.c create mode 100644 src/stored/sd_plugins.c create mode 100644 src/stored/sd_plugins.h create mode 100644 src/stored/spool.c create mode 100644 src/stored/status.c create mode 100644 src/stored/stored.c create mode 100644 src/stored/stored.conf.in create mode 100644 src/stored/stored.h create mode 100644 src/stored/stored_conf.c create mode 100644 src/stored/stored_conf.h create mode 100644 src/stored/tape_alert.c create mode 100644 src/stored/tape_alert_msgs.h create mode 100644 src/stored/tape_dev.c create mode 100644 src/stored/tape_dev.h create mode 100644 src/stored/tape_worm.c create mode 100644 src/stored/vbackup.c create mode 100644 src/stored/vol_mgr.c create mode 100644 src/stored/vol_mgr.h create mode 100644 src/stored/vtape_dev.c create mode 100644 src/stored/vtape_dev.h create mode 100644 src/stored/wait.c create mode 100644 src/stored/win_file_dev.h create mode 100644 src/stored/win_tape_dev.h create mode 100644 src/streams.h create mode 100644 src/tools/Makefile.in create mode 100644 src/tools/bbatch.c create mode 100644 src/tools/bpluginfo.c create mode 100644 src/tools/bregex.c create mode 100644 src/tools/bregtest.c create mode 100644 src/tools/bsmtp.c create mode 100644 src/tools/bsnapshot.c create mode 100644 src/tools/bvfs_test.c create mode 100644 src/tools/bwild.c create mode 100644 src/tools/cats_test.c create mode 100644 src/tools/dbcheck.c create mode 100644 src/tools/drivetype.c create mode 100644 src/tools/fstype.c create mode 100644 src/tools/gigaslam.c create mode 100644 src/tools/grow.c create mode 100644 src/tools/testfind.c create mode 100644 src/tools/testls.c create mode 100644 src/tools/timelimit.1 create mode 100644 src/tools/timelimit.c create mode 100644 src/version.h create mode 100644 src/win32/External-mingw-w64 create mode 100644 src/win32/External-mingw32 create mode 100644 src/win32/External-msvc create mode 100644 src/win32/Makefile create mode 100644 src/win32/Makefile.full create mode 100644 src/win32/Makefile.inc.in create mode 100644 src/win32/Makefile.rules create mode 100644 src/win32/README.mingw create mode 100644 src/win32/README.vc8 create mode 100644 src/win32/bacula.sln create mode 100644 src/win32/bacula/bacula.vcproj create mode 100755 src/win32/build-depkgs-mingw-w64 create mode 100755 src/win32/build-depkgs-mingw32 create mode 100644 src/win32/cats/Makefile create mode 100644 src/win32/cats/bacula_cats.def create mode 100644 src/win32/cats/bacula_cats/bacula_cats.vcproj create mode 100644 src/win32/cats/cats_mysql/cats_mysql.vcproj create mode 100644 src/win32/cats/cats_postgresql/cats_postgresql.vcproj create mode 100644 src/win32/cats/cats_sqlite3/cats_sqlite3.vcproj create mode 100644 src/win32/cats/create_mysql_database.cmd create mode 100644 src/win32/cats/create_postgresql_database.cmd create mode 100644 src/win32/cats/create_postgresql_database.sql create mode 100644 src/win32/cats/create_sqlite3_database.cmd create mode 100644 src/win32/cats/delete_catalog_backup.cmd create mode 100644 src/win32/cats/drop_mysql_database.cmd create mode 100644 src/win32/cats/drop_mysql_tables.cmd create mode 100644 src/win32/cats/drop_mysql_tables.sql create mode 100644 src/win32/cats/drop_postgresql_database.cmd create mode 100644 src/win32/cats/drop_postgresql_tables.cmd create mode 100644 src/win32/cats/drop_postgresql_tables.sql create mode 100644 src/win32/cats/drop_sqlite3_database.cmd create mode 100644 src/win32/cats/drop_sqlite3_tables.cmd create mode 100644 src/win32/cats/grant_mysql_privileges.cmd create mode 100644 src/win32/cats/grant_mysql_privileges.sql create mode 100644 src/win32/cats/grant_postgresql_privileges.cmd create mode 100644 src/win32/cats/grant_postgresql_privileges.sql create mode 100644 src/win32/cats/grant_sqlite3_privileges.cmd create mode 100755 src/win32/cats/make_def create mode 100644 src/win32/cats/make_mysql_catalog_backup.cmd create mode 100644 src/win32/cats/make_mysql_tables.cmd create mode 100644 src/win32/cats/make_mysql_tables.sql create mode 100644 src/win32/cats/make_postgresql_catalog_backup.cmd create mode 100644 src/win32/cats/make_postgresql_tables.cmd create mode 100644 src/win32/cats/make_postgresql_tables.sql create mode 100644 src/win32/cats/make_sqlite3_catalog_backup.cmd create mode 100644 src/win32/cats/make_sqlite3_tables.cmd create mode 100644 src/win32/cats/make_sqlite3_tables.sql create mode 100644 src/win32/compat/Makefile create mode 100644 src/win32/compat/alloca.h create mode 100644 src/win32/compat/arpa/inet.h create mode 100644 src/win32/compat/compat.cpp create mode 100644 src/win32/compat/compat.h create mode 100644 src/win32/compat/dirent.h create mode 100644 src/win32/compat/dlfcn.h create mode 100644 src/win32/compat/getopt.c create mode 100644 src/win32/compat/getopt.h create mode 100644 src/win32/compat/grp.h create mode 100644 src/win32/compat/mingwconfig.h create mode 100644 src/win32/compat/ms_atl.h create mode 100644 src/win32/compat/mswinver.h create mode 100644 src/win32/compat/netdb.h create mode 100644 src/win32/compat/netinet/in.h create mode 100644 src/win32/compat/netinet/tcp.h create mode 100644 src/win32/compat/print.cpp create mode 100644 src/win32/compat/pwd.h create mode 100644 src/win32/compat/stdint.h create mode 100644 src/win32/compat/strings.h create mode 100644 src/win32/compat/sys/file.h create mode 100644 src/win32/compat/sys/ioctl.h create mode 100644 src/win32/compat/sys/mtio.h create mode 100644 src/win32/compat/sys/socket.h create mode 100644 src/win32/compat/sys/stat.h create mode 100644 src/win32/compat/sys/time.h create mode 100644 src/win32/compat/sys/wait.h create mode 100644 src/win32/compat/syslog.h create mode 100644 src/win32/compat/unistd.h create mode 100644 src/win32/compat/winapi.c create mode 100644 src/win32/compat/winapi.h create mode 100644 src/win32/compat/winhdrs.h create mode 100644 src/win32/compat/winhost.h create mode 100644 src/win32/compat/winsock.h create mode 100644 src/win32/console/Makefile create mode 100644 src/win32/console/console.vcproj create mode 100644 src/win32/cygwin.NET.bashrc create mode 100644 src/win32/dird/Makefile create mode 100644 src/win32/dird/bacula.rc create mode 100644 src/win32/dird/dird.vcproj create mode 100644 src/win32/dird/main.cpp create mode 100644 src/win32/dird/service.cpp create mode 100644 src/win32/dird/who.h create mode 100644 src/win32/filed/Makefile create mode 100644 src/win32/filed/bacula-fd.manifest create mode 100644 src/win32/filed/bacula.rc create mode 100644 src/win32/filed/baculafd.vcproj create mode 100644 src/win32/filed/main.cpp create mode 100644 src/win32/filed/plugins/Makefile create mode 100644 src/win32/filed/plugins/alldrives-fd.c create mode 100644 src/win32/filed/plugins/alldrives-fd.def create mode 100644 src/win32/filed/plugins/api.c create mode 100644 src/win32/filed/plugins/api.h create mode 100644 src/win32/filed/plugins/bpipe-fd.c create mode 100644 src/win32/filed/plugins/bpipe-fd.def create mode 100644 src/win32/filed/plugins/comadmin.h create mode 100644 src/win32/filed/plugins/dbi_node.c create mode 100644 src/win32/filed/plugins/exch_api.c create mode 100644 src/win32/filed/plugins/exch_api.h create mode 100644 src/win32/filed/plugins/exch_dbi_node.c create mode 100644 src/win32/filed/plugins/exch_file_node.c create mode 100644 src/win32/filed/plugins/exch_node.c create mode 100644 src/win32/filed/plugins/exch_node.h create mode 100644 src/win32/filed/plugins/exch_root_node.c create mode 100644 src/win32/filed/plugins/exch_service_node.c create mode 100644 src/win32/filed/plugins/exch_storage_group_node.c create mode 100644 src/win32/filed/plugins/exch_store_node.c create mode 100644 src/win32/filed/plugins/exchange-fd.c create mode 100644 src/win32/filed/plugins/exchange-fd.def create mode 100644 src/win32/filed/plugins/exchange-fd.h create mode 100644 src/win32/filed/plugins/file_node.c create mode 100644 src/win32/filed/plugins/node.c create mode 100644 src/win32/filed/plugins/node.h create mode 100644 src/win32/filed/plugins/root_node.c create mode 100644 src/win32/filed/plugins/service_node.c create mode 100644 src/win32/filed/plugins/storage_group_node.c create mode 100644 src/win32/filed/plugins/store_node.c create mode 100644 src/win32/filed/service.cpp create mode 100644 src/win32/filed/trayMonitor.cpp create mode 100644 src/win32/filed/vss.cpp create mode 100644 src/win32/filed/vss.h create mode 100644 src/win32/filed/vss_Vista.cpp create mode 100644 src/win32/filed/vss_W2K3.cpp create mode 100644 src/win32/filed/vss_XP.cpp create mode 100644 src/win32/filed/vss_generic.cpp create mode 100644 src/win32/filed/who.h create mode 100644 src/win32/full_win32_installer/ConfigPage1.nsh create mode 100644 src/win32/full_win32_installer/ConfigPage2.nsh create mode 100644 src/win32/full_win32_installer/DumpLog.nsh create mode 100644 src/win32/full_win32_installer/InstallType.ini create mode 100644 src/win32/full_win32_installer/InstallType.nsh create mode 100644 src/win32/full_win32_installer/Start.bat create mode 100644 src/win32/full_win32_installer/Stop.bat create mode 100644 src/win32/full_win32_installer/WriteTemplates.ini create mode 100644 src/win32/full_win32_installer/bacula-dir.conf.in create mode 100644 src/win32/full_win32_installer/bacula-fd.conf.in create mode 100644 src/win32/full_win32_installer/bacula-logo.bmp create mode 100644 src/win32/full_win32_installer/bacula-sd.conf.in create mode 100644 src/win32/full_win32_installer/bat.conf.in create mode 100644 src/win32/full_win32_installer/bconsole.conf.in create mode 100644 src/win32/full_win32_installer/build-installer.cmd create mode 100644 src/win32/full_win32_installer/bwx-console.conf.in create mode 100644 src/win32/full_win32_installer/client.conf.in create mode 100644 src/win32/full_win32_installer/installer.vcproj create mode 100644 src/win32/full_win32_installer/storage.conf.in create mode 100644 src/win32/full_win32_installer/winbacula.nsi create mode 100644 src/win32/lib/Makefile create mode 100644 src/win32/lib/bacula32.def create mode 100644 src/win32/lib/bacula64.def create mode 100755 src/win32/lib/make_def32 create mode 100755 src/win32/lib/make_def64 create mode 100644 src/win32/libbac/Makefile create mode 100644 src/win32/libbac/libbac.vcproj create mode 100644 src/win32/libbac/msvc/bacula.def create mode 100644 src/win32/libwin32/aboutDialog.cpp create mode 100644 src/win32/libwin32/aboutDialog.h create mode 100644 src/win32/libwin32/bacula.bmp create mode 100644 src/win32/libwin32/bacula.ico create mode 100644 src/win32/libwin32/bacula.rc create mode 100644 src/win32/libwin32/error.ico create mode 100644 src/win32/libwin32/idle.ico create mode 100644 src/win32/libwin32/main.cpp create mode 100644 src/win32/libwin32/protos.h create mode 100644 src/win32/libwin32/res.h create mode 100644 src/win32/libwin32/running.ico create mode 100644 src/win32/libwin32/saving.ico create mode 100644 src/win32/libwin32/service.cpp create mode 100644 src/win32/libwin32/statusDialog.cpp create mode 100644 src/win32/libwin32/statusDialog.h create mode 100644 src/win32/libwin32/trayMonitor.cpp create mode 100644 src/win32/libwin32/trayMonitor.h create mode 100644 src/win32/libwin32/warn.ico create mode 100644 src/win32/libwin32/win32.h create mode 100755 src/win32/makeall create mode 100644 src/win32/patches/binutils_texinfo_version.patch create mode 100644 src/win32/patches/dvd+rw-tools.patch create mode 100644 src/win32/patches/mingw-utils.patch create mode 100644 src/win32/patches/mt.patch create mode 100644 src/win32/patches/mtx-msvc1.patch create mode 100644 src/win32/patches/mtx-msvc2.patch create mode 100644 src/win32/patches/mtx.patch create mode 100644 src/win32/patches/nsis.patch create mode 100644 src/win32/patches/openssl-w64.patch create mode 100644 src/win32/patches/openssl.patch create mode 100644 src/win32/patches/pcre.patch create mode 100644 src/win32/patches/postgresql.patch create mode 100644 src/win32/patches/pthreads-w64.patch create mode 100644 src/win32/patches/pthreads.patch create mode 100644 src/win32/patches/qt4-compilation-see.patch create mode 100644 src/win32/patches/qt4-compilation.patch create mode 100644 src/win32/patches/qt4-intrinsics.patch create mode 100644 src/win32/patches/qt4-widget-ui.patch create mode 100644 src/win32/patches/sed.patch create mode 100644 src/win32/patches/sed_msc.patch create mode 100644 src/win32/patches/sqlite.patch create mode 100644 src/win32/patches/sqlite_msc.patch create mode 100644 src/win32/patches/stab2cv.patch create mode 100644 src/win32/patches/wx.sed create mode 100644 src/win32/patches/wx1.patch create mode 100644 src/win32/patches/wx2.patch create mode 100644 src/win32/patches/wxWidgets.patch create mode 100644 src/win32/patches/zlib.patch create mode 100644 src/win32/pebuilder/Makefile.in create mode 100644 src/win32/pebuilder/README create mode 100644 src/win32/pebuilder/bacula/bacula.inf create mode 100644 src/win32/pebuilder/bacula/bacula_nu2menu.xml create mode 100644 src/win32/scripts/Makefile create mode 100644 src/win32/scripts/bsleep.c create mode 100644 src/win32/scripts/bsleep.vcproj create mode 100644 src/win32/scripts/disk-changer.cmd create mode 100644 src/win32/scripts/dvd-handler.cmd create mode 100644 src/win32/scripts/mtx-changer.cmd create mode 100644 src/win32/stored/Makefile create mode 100644 src/win32/stored/bacula.rc create mode 100644 src/win32/stored/baculasd.vcproj create mode 100644 src/win32/stored/bcopy/bcopy.vcproj create mode 100644 src/win32/stored/bextract/bextract.vcproj create mode 100644 src/win32/stored/bls/bls.vcproj create mode 100644 src/win32/stored/bscan/bscan.vcproj create mode 100644 src/win32/stored/btape/btape.vcproj create mode 100644 src/win32/stored/main.cpp create mode 100644 src/win32/stored/mtops.cpp create mode 100644 src/win32/stored/postest/postest.cpp create mode 100644 src/win32/stored/postest/postest.vcproj create mode 100644 src/win32/stored/service.cpp create mode 100644 src/win32/stored/storelib/storelib.vcproj create mode 100644 src/win32/stored/trayMonitor.cpp create mode 100644 src/win32/stored/who.h create mode 100644 src/win32/stored/win_tape_device.cpp create mode 100644 src/win32/tools/Makefile create mode 100644 src/win32/tools/ScsiDeviceList.cpp create mode 100644 src/win32/tools/ScsiDeviceList.h create mode 100644 src/win32/tools/bsmtp/bsmtp.vcproj create mode 100644 src/win32/tools/dbcheck/dbcheck.vcproj create mode 100644 src/win32/tools/drivetype/drivetype.vcproj create mode 100644 src/win32/tools/fstype/fstype.vcproj create mode 100644 src/win32/tools/scsilist.cpp create mode 100644 src/win32/tools/scsilist/scsilist.vcproj create mode 100644 src/win32/tools/testfind/testfind.vcproj create mode 100644 src/win32/tools/testls/testls.vcproj create mode 100644 src/win32/win32_installer/ConfigPage1.nsh create mode 100644 src/win32/win32_installer/ConfigPage2.nsh create mode 100644 src/win32/win32_installer/DumpLog.nsh create mode 100644 src/win32/win32_installer/InstallType.ini create mode 100644 src/win32/win32_installer/InstallType.nsh create mode 100644 src/win32/win32_installer/Makefile create mode 100755 src/win32/win32_installer/Readme.txt create mode 100644 src/win32/win32_installer/Start.bat create mode 100644 src/win32/win32_installer/Stop.bat create mode 100644 src/win32/win32_installer/WriteTemplates.ini create mode 100644 src/win32/win32_installer/bacula-dir.conf.in create mode 100644 src/win32/win32_installer/bacula-fd.conf.in create mode 100644 src/win32/win32_installer/bacula-logo.bmp create mode 100644 src/win32/win32_installer/bacula-sd.conf.in create mode 100644 src/win32/win32_installer/bat.conf.in create mode 100644 src/win32/win32_installer/bconsole.conf.in create mode 100644 src/win32/win32_installer/bs-logo.bmp create mode 100644 src/win32/win32_installer/build-installer.cmd create mode 100644 src/win32/win32_installer/bwx-console.conf.in create mode 100644 src/win32/win32_installer/client.conf.in create mode 100644 src/win32/win32_installer/installer.vcproj create mode 100644 src/win32/win32_installer/storage.conf.in create mode 100644 src/win32/win32_installer/tray-monitor.conf.in create mode 100644 src/win32/win32_installer/winbacula.nsi create mode 100644 src/win32/win32_installer/x64.nsh create mode 100644 src/win32/win64_installer/ConfigPage1.nsh create mode 100644 src/win32/win64_installer/ConfigPage2.nsh create mode 100644 src/win32/win64_installer/DumpLog.nsh create mode 100644 src/win32/win64_installer/InstallType.ini create mode 100644 src/win32/win64_installer/InstallType.nsh create mode 100644 src/win32/win64_installer/Makefile create mode 100755 src/win32/win64_installer/Readme.txt create mode 100644 src/win32/win64_installer/Start.bat create mode 100644 src/win32/win64_installer/Stop.bat create mode 100644 src/win32/win64_installer/WriteTemplates.ini create mode 100644 src/win32/win64_installer/bacula-dir.conf.in create mode 100644 src/win32/win64_installer/bacula-fd.conf.in create mode 100644 src/win32/win64_installer/bacula-logo.bmp create mode 100644 src/win32/win64_installer/bacula-sd.conf.in create mode 100644 src/win32/win64_installer/bacula-tray-monitor.conf.in create mode 100644 src/win32/win64_installer/bat.conf.in create mode 100644 src/win32/win64_installer/bconsole.conf.in create mode 100644 src/win32/win64_installer/bs-logo.bmp create mode 100644 src/win32/win64_installer/bwx-console.conf.in create mode 100644 src/win32/win64_installer/client.conf.in create mode 100644 src/win32/win64_installer/installer.vcproj create mode 100644 src/win32/win64_installer/storage.conf.in create mode 100644 src/win32/win64_installer/tray-monitor.conf.in create mode 100644 src/win32/win64_installer/winbacula.nsi create mode 100644 src/win32/winapi.h create mode 100755 src/win32/wx-console/Makefile create mode 100644 src/win32/wx-console/bwx-console.manifest create mode 100644 src/win32/wx-console/w32api.h create mode 100644 src/win32/wx-console/wx-console.vcproj create mode 100644 updatedb/README create mode 100755 updatedb/update_bacula_tables.in create mode 100755 updatedb/update_bacula_tables_8_to_9 create mode 100644 updatedb/update_mysql_tables.in create mode 100644 updatedb/update_mysql_tables_10_to_11.in create mode 100644 updatedb/update_mysql_tables_11_to_12.in create mode 100755 updatedb/update_mysql_tables_4_to_5 create mode 100755 updatedb/update_mysql_tables_5_to_6 create mode 100755 updatedb/update_mysql_tables_6_to_7 create mode 100755 updatedb/update_mysql_tables_7_to_8 create mode 100755 updatedb/update_mysql_tables_8_to_9 create mode 100644 updatedb/update_mysql_tables_9_to_10.in create mode 100644 updatedb/update_postgresql_tables.in create mode 100644 updatedb/update_postgresql_tables_10_to_11.in create mode 100644 updatedb/update_postgresql_tables_11_to_12.in create mode 100755 updatedb/update_postgresql_tables_7_to_8 create mode 100755 updatedb/update_postgresql_tables_8_to_9 create mode 100644 updatedb/update_postgresql_tables_9_to_10.in create mode 100644 updatedb/update_sqlite3_tables.in create mode 100644 updatedb/update_sqlite3_tables_10_to_11.in create mode 100644 updatedb/update_sqlite3_tables_11_to_12.in create mode 100755 updatedb/update_sqlite3_tables_8_to_9 create mode 100644 updatedb/update_sqlite3_tables_9_to_10.in create mode 100644 updatedb/update_sqlite_tables_10_to_11.in create mode 100755 updatedb/update_sqlite_tables_4_to_5 create mode 100755 updatedb/update_sqlite_tables_5_to_6 create mode 100755 updatedb/update_sqlite_tables_6_to_7 create mode 100755 updatedb/update_sqlite_tables_7_to_8 create mode 100755 updatedb/update_sqlite_tables_8_to_9 create mode 100644 updatedb/update_sqlite_tables_9_to_10.in diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..a74eb12b --- /dev/null +++ b/.gitignore @@ -0,0 +1,1306 @@ +# +# git-ls-files --others --exclude-from=.git/info/exclude +# Lines that start with '#' are comments. +# For a project mostly in C, the following would be a good set of +# exclude patterns (uncomment them if you want to use them): +# *.[oa] +# *~ + +# +*.la +*.a +*.o +*.lo +*~ +*.patch +*.bak +1 +2 +autom4te.cache +bacula +btraceback +config.cache +config.log +config.out +config.status +configure.lineno +console.log +console.sum +count-lines +diff +fd +update +gconsole +kernsconfig +kernsconfignodb +kerns-console.conf +kernsdesign +kerns-dird.conf +kernsdisclaimers +kernsdone +kerns-filed.conf +kernsfloppymount +kerns-gnome-console.conf +kerns-gprof-config +kerns-mysql-config +kerns-nopython-conf +kernsnosqlconfig +kernsolarisconfig +kerns-postgresql-config +kernsproductionconfig +kerns-sqlite-config +kerns-stored.conf +kernssunproductionconfig +kernswinconfig +kernswinproductionconfig +Makefile +newdb +newtape +run_clean +run_clean_tape +run_two +run_two_broken +set-gnome1.4 +set-gnome2 +startit +startmysql +startpostgre +stopit +stopmysql +stoppostgre +test.out +test-tape +time.py +time.pyc +txt +*.txt +wxconsole +zapall +zaptapes +kerns-pgsql-config +fake-mtx +filter +label +relabel +sed +weof +kerns-static-config +wconsole +hardlink-test +bat +confdefs.h +rest +test-bacula.sh +runconsole +wx-console +oldtxt +kerns-bs-config +libtool +config.status.lineno + +# autoconf/ +autoconf/autom4te.cache +autoconf/Make.common +autoconf/config.h.in +autoconf/1 +autoconf/4 +autoconf/5 + +# examples/python/ +examples/python/DirStartUp.pyc +examples/python/FDStartUp.pyc +examples/python/SDStartUp.pyc + +# manpages/ +manpages/Makefile + +# platforms/ +platforms/Makefile +platforms/Makefile +platforms/bacula-dir +platforms/bacula-fd +platforms/bacula-sd + +# platforms/bsdi/ +platforms/bsdi/bacula-dir +platforms/bsdi/bacula-fd +platforms/bsdi/bacula-sd +platforms/bsdi/Makefile +platforms/bsdi/1 + +# platforms/debian/ +platforms/debian/bacula-sd +platforms/debian/bacula-fd +platforms/debian/bacula-dir +platforms/debian/Makefile + +# platforms/freebsd/ +platforms/freebsd/Makefile +platforms/freebsd/bacula-dir +platforms/freebsd/bacula-fd +platforms/freebsd/bacula-sd + +# platforms/redhat/ +platforms/redhat/Makefile +platforms/redhat/bacula-dir +platforms/redhat/bacula-fd +platforms/redhat/bacula-sd + +# platforms/rpms +platforms/rpms/redhat/*.spec +platforms/rpms/suse/*.spec + +# platforms/suse/ +platforms/suse/Makefile +platforms/suse/bacula +platforms/suse/bacula-dir +platforms/suse/bacula-fd +platforms/suse/bacula-sd +platforms/suse/bacula.spec + +# platforms/ubuntu/ +platforms/ubuntu/Makefile +platforms/ubuntu/bacula-dir +platforms/ubuntu/bacula-fd +platforms/ubuntu/bacula-sd + +# po/ +po/POTFILES +po/Makefile.in +po/Makefile +po/stamp-po +po/remove-potcdate.sed +po/*.mo +po/*.gmo + +# scripts/ +scripts/bs_alert +scripts/breload +scripts/bsg_persist +scripts/make_catalog_backup.pl +scripts/baculabackupreport +scripts/bacula_config +scripts/wxconsole.console_apps +scripts/wxconsole.desktop.consolehelper +scripts/wxconsole.desktop.xsu +scripts/disk-changer +scripts/bacula-tray-monitor.desktop +scripts/bacula-tray-monior.desktop +scripts/.xvpics +scripts/logrotate +scripts/bacula.desktop +scripts/bacula.desktop.gnome1 +scripts/bacula.desktop.gnome2 +scripts/bconsole +scripts/devel_bacula +scripts/gconsole +scripts/mtx-changer +scripts/dvd-handler +scripts/dvd-simulator +scripts/Makefile +scripts/bacula +scripts/btraceback +scripts/fd +scripts/reset-storageid +scripts/startit +scripts/startmysql +scripts/stopit +scripts/stopmysql +scripts/storage-ctl +scripts/gnome-console.console_apps +scripts/bacula.desktop.gnome2.xsu +scripts/bacula.desktop.gnome2.consolehelper +scripts/bacula.desktop.gnome1.xsu +scripts/bacula.desktop.gnome1.consolehelper +scripts/bacula-ctl-dir +scripts/bacula-ctl-fd +scripts/bacula-ctl-sd +scripts/bat.console_apps +scripts/bat.desktop +scripts/bat.desktop.consolehelper +scripts/bat.desktop.xsu +scripts/bgnome-console.console_apps + +# scripts/logwatch/ +scripts/logwatch/logfile.bacula.conf +scripts/logwatch/Makefile +scripts/logwatch/logfile.bacula.conf + +# src/ +src/1 +src/2 +src/3 +src/Makefile +src/config.h +src/testprogs +src/host.h +src/perlgui +src/python +src/pyqt +src/xqt-console +src/TAGS +src/bsd +src/add-bsd + +# src/cats/ +src/cats/1 +src/cats/2 +src/cats/3 +src/cats/install-default-backend +src/cats/make_catalog_backup.pl +src/cats/Makefile +src/cats/make_catalog_backup +src/cats/delete_catalog_backup +src/cats/create_bacula_database +src/cats/create_bdb_database +src/cats/create_mysql_database +src/cats/create_postgresql_database +src/cats/create_sqlite_database +src/cats/create_ingres_database +src/cats/drop_bdb_tables +src/cats/drop_mysql_tables +src/cats/drop_postgresql_tables +src/cats/drop_sqlite_tables +src/cats/drop_ingres_tables +src/cats/grant_bacula_privileges +src/cats/grant_mysql_privileges +src/cats/grant_postgresql_privileges +src/cats/grant_sqlite_privileges +src/cats/grant_ingres_privileges +src/cats/list_files.sql +src/cats/list_jobs.sql +src/cats/make_bdb_tables +src/cats/make_mysql_tables +src/cats/make_postgresql_tables +src/cats/make_sqlite_tables +src/cats/make_ingres_tables +src/cats/mysql +src/cats/sqlite +src/cats/make_bacula_tables +src/cats/drop_bacula_tables +src/cats/update.sql +src/cats/update2_sqlite_tables +src/cats/update_bacula_tables +src/cats/update_mysql_tables +src/cats/update_postgresql_tables +src/cats/update_sqlite_tables +src/cats/update_ingres_tables +src/cats/drop_bacula_database +src/cats/drop_bdb_database +src/cats/drop_mysql_database +src/cats/drop_postgresql_database +src/cats/drop_sqlite_database +src/cats/drop_ingres_database +src/cats/grant_bdb_privileges +src/cats/update_bdb_tables +src/cats/create_sqlite3_database +src/cats/drop_sqlite3_database +src/cats/drop_sqlite3_tables +src/cats/grant_sqlite3_privileges +src/cats/make_sqlite3_tables +src/cats/update_sqlite3_tables +src/cats/libsql.a +src/cats/.libs +src/cats/libbacsql.a + +# src/console/ +src/console/bbconsjson +src/console/console.conf +src/console/test-console.conf +src/console/test-bconsole.conf +src/console/Makefile +src/console/btraceback +src/console/btraceback.gdb +src/console/bconsole +src/console/bconsole.conf +src/console/startit +src/console/stopit +src/console/static-bconsole +src/console/.libs + +# src/dird/ +src/dird/1 +src/dird/2 +src/dird/3 +src/dird/bdirjson +src/dird/Makefile +src/dird/bacula-dir +src/dird/bacula-dir.conf +src/dird/btraceback +src/dird/btraceback.gdb +src/dird/startit +src/dird/stopit +src/dird/dird.conf +src/dird/filelist.exc +src/dird/.libs + +# src/filed/ +src/filed/Makefile +src/filed/bacula-fd +src/filed/bacula-fd.conf +src/filed/bfdjson +src/filed/btraceback +src/filed/btraceback.gdb +src/filed/host.h +src/filed/startit +src/filed/static-bacula-fd +src/filed/stopit +src/filed/filed.conf +src/filed/.libs + +# src/findlib/ +src/findlib/Makefile +src/findlib/1 +src/findlib/2 +src/findlib/libfind.a +src/findlib/.libs +src/findlib/libbacfind.a + +# src/gnome2-console/ +src/gnome2-console/test-gnome-console.conf +src/gnome2-console/Makefile +src/gnome2-console/gnome-console +src/gnome2-console/gnome-console.conf +src/gnome2-console/1 +src/gnome2-console/2 +src/gnome2-console/3 +src/gnome2-console/main.c +src/gnome2-console/static-gnome-console +src/gnome2-console/bgnome-console +src/gnome2-console/bgnome-console.conf +src/gnome2-console/support.c.orig +src/gnome2-console/.libs + +# src/lib/ +src/lib/bsnprintf +src/lib/save +src/lib/1 +src/lib/Makefile +src/lib/btraceback +src/lib/btraceback.gdb +src/lib/startit +src/lib/stopit +src/lib/sha1sum +src/lib/md5sum +src/lib/libbac.a +src/lib/.libs +src/lib/libbaccfg.a +src/lib/libbacpy.a + +# src/plugins/dir/ +src/plugins/dir/*.o +src/plugins/dir/*.so +src/plugins/dir/main +src/plugins/dir/Makefile +src/plugins/dir/.libs + +# src/plugins/fd/ +src/plugins/fd/*.o +src/plugins/fd/*.so +src/plugins/fd/main +src/plugins/fd/Makefile +src/plugins/fd/.libs + +# src/plugins/sd/ +src/plugins/sd/.libs/ +src/plugins/sd/*.o +src/plugins/sd/*.so +src/plugins/sd/main +src/plugins/sd/Makefile + +# src/qt-console/ +src/qt-console/ui_*.h +src/qt-console/bat +src/qt-console/about-func +src/qt-console/brestore.ui +src/qt-console/main.qrc +src/qt-console/mult-inheritance +src/qt-console/print-func +src/qt-console/qrc_main.cpp +src/qt-console/moc_*.cpp +src/qt-console/moc64 +src/qt-console/ui64 +src/qt-console/v +src/qt-console/ui +src/qt-console/Makefile +src/qt-console/bat.conf +src/qt-console/bat.conff +src/qt-console/.*.swp +src/qt-console/1 +src/qt-console/diff +src/qt-console/bat.pro +src/qt-console/install_conf_file +src/qt-console/config.out +src/qt-console/.kdbgrc.bat +src/qt-console/depkgs +src/qt-console/qwt +src/qt-console/test-bat.conf +src/qt-console/debug +src/qt-console/release +src/qt-console/Makefile.Debug +src/qt-console/Makefile.Release +src/qt-console/Makefile.mingw64 +src/qt-console/Makefile.mingw64.Debug +src/qt-console/Makefile.mingw64.Release +src/qt-console/object_script.bat.Debug +src/qt-console/object_script.bat.Release +src/qt-console/pkg-conf +src/qt-console/queries.txt +src/qt-console/.libs +src/qt-console/Makefile.mingw32 +src/qt-console/Makefile.mingw32.Debug +src/qt-console/Makefile.mingw32.Release +src/qt-console/bat.pro.mingw32 + +# src/qt-console/clients/ +src/qt-console/clients/.*.swp + +# src/qt-console/console/ +src/qt-console/console/1 + +# src/qt-console/joblist/ +src/qt-console/joblist/.*.swp +src/qt-console/joblist/1 + +# src/qt-console/mediaedit/ +src/qt-console/mediaedit/.*.swp + +# src/qt-console/medialist/ +src/qt-console/medialist/.*.swp + +# src/qt-console/moc/ +src/qt-console/moc/moc_*.cpp + +# src/qt-console/mount/ +src/qt-console/mount/.*.swp + +# src/qt-console/obj/ +src/qt-console/obj +src/qt-console/obj32 +src/qt-console/moc32 +src/qt-console/ui32 + +# src/qt-console/testprogs/examp/ +src/qt-console/testprogs/examp/main +src/qt-console/testprogs/examp/obj +src/qt-console/testprogs/examp/Makefile +src/qt-console/testprogs/examp/moc + +# src/qt-console/testprogs/putz/ +src/qt-console/testprogs/putz/putz +src/qt-console/testprogs/putz/obj +src/qt-console/testprogs/putz/ui +src/qt-console/testprogs/putz/Makefile +src/qt-console/testprogs/putz/moc + +# src/qt-console/tray-monitor +src/qt-console/tray-monitor/.libs/ +src/qt-console/tray-monitor/bacula-tray-monitor +src/qt-console/tray-monitor/qrc_main.cpp +src/qt-console/tray-monitor/Makefile.mingw32 +src/qt-console/tray-monitor/Makefile.mingw32.Debug +src/qt-console/tray-monitor/Makefile.mingw32.Release +src/qt-console/tray-monitor/Makefile.mingw64 +src/qt-console/tray-monitor/Makefile.mingw64.Debug +src/qt-console/tray-monitor/Makefile.mingw64.Release +src/qt-console/tray-monitor/debug/ +src/qt-console/tray-monitor/release/ +src/qt-console/tray-monitor/moc/ +src/qt-console/tray-monitor/tray-monitor.conf +src/qt-console/tray-monitor/tray-monitor.pro +src/qt-console/tray-monitor/ui/ +src/qt-console/tray-monitor/ui32/ +src/qt-console/tray-monitor/ui64/ +src/qt-console/tray-monitor/moc32/ +src/qt-console/tray-monitor/moc64/ +src/qt-console/tray-monitor/tray-monitor.pro.mingw32 +src/qt-console/tray-monitor/tray-monitor.pro.mingw64 +src/qt-console/tray-monitor/bacula-tray-monitor.conf +src/qt-console/tray-monitor/install_conf_file +src/qt-console/tray-monitor/object_script.bacula-tray-monitor.Debug +src/qt-console/tray-monitor/object_script.bacula-tray-monitor.Release + + +# src/qt-console/ts/ +src/qt-console/ts/bat_fr.qm +src/qt-console/ts/bat_de.qm + +# src/stored/ +src/stored/1 +src/stored/2 +src/stored/Makefile +src/stored/bacula-sd +src/stored/bacula-sd.conf +src/stored/bextract +src/stored/bls +src/stored/bscan +src/stored/btape +src/stored/bcopy +src/stored/bsdjson +src/stored/btraceback +src/stored/btraceback.gdb +src/stored/file1Job1.bsr +src/stored/file1Job2.bsr +src/stored/job1.bsr +src/stored/job2.bsr +src/stored/restore.bsr +src/stored/startit +src/stored/stopit +src/stored/changer.out +src/stored/mtx-changer +src/stored/stored.conf +src/stored/test.conf +src/stored/dvd-simulator +src/stored/mount-simulator +src/stored/mounted +src/stored/slots +src/stored/unmount-simulator +src/stored/test-dedup +src/stored/.libs +src/stored/obj32 +src/stored/obj64 + +# src/tools/ +src/tools/bsnapshot +src/tools/bpluginfo +src/tools/timelimit +src/tools/ing_test +src/tools/bregex +src/tools/bwild +src/tools/testls +src/tools/1 +src/tools/Makefile +src/tools/dbcheck +src/tools/smtp +src/tools/bsmtp +src/tools/testfind +src/tools/fstype +src/tools/drivetype +src/tools/gigaslam +src/tools/bbatch +src/tools/bregtest +src/tools/grow +src/tools/.libs +src/tools/bvfs_test + +# src/tray-monitor/ +src/tray-monitor/tray-monitor.conf +src/tray-monitor/Makefile +src/tray-monitor/Makefile.bak +src/tray-monitor/bacula-tray-monitor +src/tray-monitor/*~ +src/tray-monitor/.libs + +# src/win32/ +src/win32/Makefile.inc +src/win32/release +src/win32/pthreadGCE.dll +src/win32/install +src/win32/install-debug +src/win32/bacula.ncb +src/win32/bacula.suo +src/win32/1 +src/win32/2 +src/win32/3 +src/win32/mingwm10.dll +src/win32/Makefile.vcpp +src/win32/winbacula.nsi +src/win32/winres.res +src/win32/wx-console.res +src/win32/debug +src/win32/release32 +src/win32/release64 +src/win32/dll + +# src/win32 +src/win32*.user +src/win32Debug +src/win32Release + +# src/win32/cats/ +src/win32/cats/*.o +src/win32/cats/*.d +src/win32/cats/libcats.exp +src/win32/cats/bdb +src/win32/cats/mysql +src/win32/cats/pgsql +src/win32/cats/obj32 +src/win32/cats/obj64 + +# src/win32/cats/bacula_cats/ +src/win32/cats/bacula_cats/bacula_cats.vcproj.THE-NELSONS.robert.user +src/win32/cats/bacula_cats/Debug +src/win32/cats/bacula_cats/Release + +# src/win32/cats/cats_mysql/ +src/win32/cats/cats_mysql/*.user +src/win32/cats/cats_mysql/Debug +src/win32/cats/cats_mysql/Release + +# src/win32/cats/cats_postgresql/ +src/win32/cats/cats_postgresql/*.user +src/win32/cats/cats_postgresql/Debug +src/win32/cats/cats_postgresql/Release + +# src/win32/cats/cats_sqlite3/ +src/win32/cats/cats_sqlite3/*.user +src/win32/cats/cats_sqlite3/Debug +src/win32/cats/cats_sqlite3/Release + +# src/win32/compat/ + +# src/win32/console/ +src/win32/console/*.d +src/win32/console/*.user +src/win32/console/Debug +src/win32/console/Release +src/win32/console/obj32 +src/win32/console/obj64 + +# src/win32/dird/ +src/win32/dird/*.d +src/win32/dird/*.res +src/win32/dird/*.user +src/win32/dird/Debug +src/win32/dird/Release +src/win32/dird/obj32 + +# src/win32/filed/ +src/win32/filed/*.d +src/win32/filed/*.res +src/win32/filed/*.user +src/win32/filed/Debug +src/win32/filed/Release +src/win32/filed/obj32 +src/win32/filed/obj64 + +# src/win32/filed/plugins/ +src/win32/filed/plugins/exchange-fd.a +src/win32/filed/plugins/obj32 +src/win32/filed/plugins/obj64 + +# src/win32/lib/ +src/win32/lib/*.d +src/win32/lib/bacula.a +src/win32/lib/obj32 +src/win32/lib/obj64 + +# src/win32/libbac/ +src/win32/libbac/*.user +src/win32/libbac/Debug +src/win32/libbac/Release + +# src/win32/newinstaller/ +src/win32/newinstaller/release + +# src/win32/pebuilder/ +src/win32/pebuilder/Makefile + +# src/win32/scripts/ +src/win32/scripts/*.d +src/win32/scripts/*.user +src/win32/scripts/Debug +src/win32/scripts/Release +src/win32/scripts/obj32 +src/win32/scripts/obj64 + +# src/win32/stored/ +src/win32/stored/*.d +src/win32/stored/*.res +src/win32/stored/obj32 +src/win32/stored/obj64 + +# src/win32/stored/bcopy/ +src/win32/stored/bcopy/*.user +src/win32/stored/bcopy/Debug +src/win32/stored/bcopy/Release + +# src/win32/stored/bextract/ +src/win32/stored/bextract/*.user +src/win32/stored/bextract/Debug +src/win32/stored/bextract/Release + +# src/win32/stored/bls/ +src/win32/stored/bls/*.user +src/win32/stored/bls/Debug +src/win32/stored/bls/Release + +# src/win32/stored/bscan/ +src/win32/stored/bscan/*.user +src/win32/stored/bscan/Debug +src/win32/stored/bscan/Release + +# src/win32/stored/btape/ +src/win32/stored/btape/*.user +src/win32/stored/btape/Debug +src/win32/stored/btape/Release + +# src/win32/stored/postest/ +src/win32/stored/postest/*.user + +# src/win32/stored/storelib/ +src/win32/stored/storelib/*.user +src/win32/stored/storelib/Debug +src/win32/stored/storelib/Release + +# src/win32/tools/ +src/win32/tools/*.d +src/win32/tools/obj32 +src/win32/tools/obj64 + +# src/win32/tools/bsmtp/ +src/win32/tools/bsmtp/*.user +src/win32/tools/bsmtp/Debug +src/win32/tools/bsmtp/Release + +# src/win32/tools/dbcheck/ +src/win32/tools/dbcheck/*.user +src/win32/tools/dbcheck/Debug +src/win32/tools/dbcheck/Release + +# src/win32/tools/drivetype/ +src/win32/tools/drivetype/*.user +src/win32/tools/drivetype/Debug +src/win32/tools/drivetype/Release + +# src/win32/tools/fstype/ +src/win32/tools/fstype/*.user +src/win32/tools/fstype/Debug +src/win32/tools/fstype/Release + +# src/win32/tools/scsilist/ +src/win32/tools/scsilist/*.user +src/win32/tools/scsilist/Debug +src/win32/tools/scsilist/Release + +# src/win32/tools/testfind/ +src/win32/tools/testfind/*.user +src/win32/tools/testfind/Debug +src/win32/tools/testfind/Release + +# src/win32/tools/testls/ +src/win32/tools/testls/*.user +src/win32/tools/testls/Debug +src/win32/tools/testls/Release + +# src/win32/win32_installer/ +src/win32/win32_installer/*.dll +src/win32/win32_installer/*.exe +src/win32/win32_installer/*.dbg +src/win32/win32_installer/release +src/win32/win32_installer/*.user +src/win32/win32_installer/Debug +src/win32/win32_installer/Release +src/win32/win32_installer/release32 + +# src/win32/win64_installer/ +src/win32/win64_installer/release64 + +# src/win32/wx-console/ +src/win32/wx-console/*.idb +src/win32/wx-console/*.d +src/win32/wx-console/*.res +src/win32/wx-console/wx-console.conf +src/win32/wx-console/*.user +src/win32/wx-console/Debug +src/win32/wx-console/Release +src/win32/wx-console/obj32 + +# src/wx-console/ +src/wx-console/test-wx-console.conf +src/wx-console/Makefile +src/wx-console/wx-console +src/wx-console/wx-console.exe +src/wx-console/wx-console.conf +src/wx-console/wx-console.exe.stackdump +src/wx-console/wx-console_private.res +src/wx-console/bwx-console +src/wx-console/bwx-console.conf +src/wx-console/.libs + +# updatedb/ +updatedb/update_mysql_tables_1019_to_1020 +updatedb/update_postgresql_tables_1019_to_1020 +updatedb/update_mysql_tables +updatedb/update_postgresql_tables +updatedb/update_sqlite3_tables +updatedb/update_mysql_tables_12_to_14 +updatedb/update_postgresql_tables_12_to_14 +updatedb/update_sqlite3_tables_12_to_14 +updatedb/update_mysql_tables_11_to_12 +updatedb/update_postgresql_tables_11_to_12 +updatedb/update_sqlite3_tables_11_to_12 +updatedb/update_mysql_tables_10_to_11 +updatedb/update_postgresql_tables_10_to_11 +updatedb/update_sqlite3_tables_10_to_11 +updatedb/update_sqlite_tables_10_to_11 +updatedb/update_mysql_tables_9_to_10 +updatedb/update_postgresql_tables_9_to_10 +updatedb/update_sqlite3_tables_9_to_10 +updatedb/update_sqlite_tables_9_to_10 +updatedb/update_mysql_tables_1014_to_1015 +updatedb/update_mysql_tables_12_to_13 +updatedb/update_mysql_tables_13_to_14 +updatedb/update_mysql_tables_14_to_1014 +updatedb/update_mysql_tables_1015_to_1016 +updatedb/update_mysql_tables_1016_to_1017 +updatedb/update_postgresql_tables_1014_to_1015 +updatedb/update_postgresql_tables_12_to_13 +updatedb/update_postgresql_tables_13_to_14 +updatedb/update_postgresql_tables_14_to_1014 +updatedb/update_postgresql_tables_1015_to_1016 +updatedb/update_postgresql_tables_1016_to_1017 +updatedb/update_sqlite3_tables_1015_to_1016 +updatedb/update_mysql_tables_1017_to_1018 +updatedb/update_mysql_tables_1018_to_1019 +updatedb/update_postgresql_tables_1017_to_1018 +updatedb/update_postgresql_tables_1018_to_1019 + + +# /docs/ +/docs/manual-fr-base +/docs/manual-de-base +/docs/diff +/docs/Makefile +/docs/1 +/docs/2 +/docs/3 +/docs/autom4te.cache +/docs/bacula-doc-* +/docs/config.log +/docs/config.out +/docs/config.status +/docs/kernsconfig + +# /docs/autoconf/ +/docs/autoconf/config.log +/docs/autoconf/Make.common +/docs/autoconf/config.h.in + +# /docs/bacula-web/ +/docs/bacula-web/imagename_translations +/docs/bacula-web/bacula-web +/docs/bacula-web/bacula-web.html +/docs/bacula-web/bacula-web.pdf +/docs/bacula-web/bacula.html +/docs/bacula-web/bacula.pdf +/docs/bacula-web/dev-bacula.pdf +/docs/bacula-web/bacula-web +/docs/bacula-web/*.aux +/docs/bacula-web/*.png +/docs/bacula-web/*.idx +/docs/bacula-web/*.eps +/docs/bacula-web/*.jpg +/docs/bacula-web/*.dvi +/docs/bacula-web/*.out +/docs/bacula-web/*.log +/docs/bacula-web/*.toc +/docs/bacula-web/*.lof +/docs/bacula-web/*.ilg +/docs/bacula-web/*.dvi +/docs/bacula-web/*.css +/docs/bacula-web/*.lot +/docs/bacula-web/*.cdx +/docs/bacula-web/*.ddx +/docs/bacula-web/*.fdx +/docs/bacula-web/*.sdx +/docs/bacula-web/*.cnd +/docs/bacula-web/*.dnd +/docs/bacula-web/*.fnd +/docs/bacula-web/*.ind +/docs/bacula-web/*.snd +/docs/bacula-web/WARNINGS +/docs/bacula-web/internals.pl +/docs/bacula-web/index.html +/docs/bacula-web/labels.pl +/docs/bacula-web/1 +/docs/bacula-web/2 +/docs/bacula-web/images.pl +/docs/bacula-web/*.linked.tex +/docs/bacula-web/images.tex +/docs/bacula-web/bacula-webi.tex +/docs/bacula-web/baculai-*.tex +/docs/bacula-web/Makefile +/docs/bacula-web/version.tex + +# /docs/manuals/ +/docs/manuals/bacula.sty +/docs/manuals/update_version +/docs/manuals/version.tex + +# /docs/manuals/de/catalog/ +/docs/manuals/de/catalog/Makefile +/docs/manuals/de/catalog/version.tex +/docs/manuals/de/catalog/catalog +/docs/manuals/de/catalog/catalog.dvi +/docs/manuals/de/catalog/catalog.idx +/docs/manuals/de/catalog/catalog.pdf +/docs/manuals/de/catalog/catalog.toc +/docs/manuals/de/catalog/catalogi-console.tex +/docs/manuals/de/catalog/catalogi-dir.tex +/docs/manuals/de/catalog/catalogi-fd.tex +/docs/manuals/de/catalog/catalogi-general.tex +/docs/manuals/de/catalog/catalogi-sd.tex +/docs/manuals/de/catalog/update_version +/docs/manuals/de/catalog/bacula.sty + +# /docs/manuals/de/concepts/ +/docs/manuals/de/concepts/Makefile +/docs/manuals/de/concepts/version.tex +/docs/manuals/de/concepts/concepts +/docs/manuals/de/concepts/concepts.dvi +/docs/manuals/de/concepts/concepts.idx +/docs/manuals/de/concepts/concepts.pdf +/docs/manuals/de/concepts/concepts.toc +/docs/manuals/de/concepts/conceptsi-console.tex +/docs/manuals/de/concepts/conceptsi-dir.tex +/docs/manuals/de/concepts/conceptsi-fd.tex +/docs/manuals/de/concepts/conceptsi-general.tex +/docs/manuals/de/concepts/conceptsi-sd.tex +/docs/manuals/de/concepts/update_version +/docs/manuals/de/concepts/bacula.sty + +# /docs/manuals/de/console/ +/docs/manuals/de/console/Makefile +/docs/manuals/de/console/version.tex +/docs/manuals/de/console/console +/docs/manuals/de/console/console.dvi +/docs/manuals/de/console/console.idx +/docs/manuals/de/console/console.pdf +/docs/manuals/de/console/console.toc +/docs/manuals/de/console/consolei-console.tex +/docs/manuals/de/console/consolei-dir.tex +/docs/manuals/de/console/consolei-fd.tex +/docs/manuals/de/console/consolei-general.tex +/docs/manuals/de/console/consolei-sd.tex +/docs/manuals/de/console/update_version +/docs/manuals/de/console/bacula.sty + +# /docs/manuals/de/developers/ +/docs/manuals/de/developers/Makefile +/docs/manuals/de/developers/version.tex +/docs/manuals/de/developers/developers +/docs/manuals/de/developers/developers.dvi +/docs/manuals/de/developers/developers.idx +/docs/manuals/de/developers/developers.pdf +/docs/manuals/de/developers/developers.toc +/docs/manuals/de/developers/developersi-general.tex +/docs/manuals/de/developers/update_version +/docs/manuals/de/developers/bacula.sty + +# /docs/manuals/de/install/ +/docs/manuals/de/install/Makefile +/docs/manuals/de/install/version.tex +/docs/manuals/de/install/install +/docs/manuals/de/install/install.dvi +/docs/manuals/de/install/install.idx +/docs/manuals/de/install/install.pdf +/docs/manuals/de/install/install.toc +/docs/manuals/de/install/installi-console.tex +/docs/manuals/de/install/installi-dir.tex +/docs/manuals/de/install/installi-fd.tex +/docs/manuals/de/install/installi-general.tex +/docs/manuals/de/install/installi-sd.tex +/docs/manuals/de/install/update_version +/docs/manuals/de/install/bacula.sty + +# /docs/manuals/de/problems/ +/docs/manuals/de/problems/Makefile +/docs/manuals/de/problems/version.tex +/docs/manuals/de/problems/problems +/docs/manuals/de/problems/problems.dvi +/docs/manuals/de/problems/problems.idx +/docs/manuals/de/problems/problems.pdf +/docs/manuals/de/problems/problems.toc +/docs/manuals/de/problems/update_version +/docs/manuals/de/problems/bacula.sty + +# /docs/manuals/de/utility/ +/docs/manuals/de/utility/Makefile +/docs/manuals/de/utility/version.tex +/docs/manuals/de/utility/utility +/docs/manuals/de/utility/update_version +/docs/manuals/de/utility/utility.dvi +/docs/manuals/de/utility/utility.idx +/docs/manuals/de/utility/utility.pdf +/docs/manuals/de/utility/utility.toc +/docs/manuals/de/utility/utilityi-console.tex +/docs/manuals/de/utility/utilityi-dir.tex +/docs/manuals/de/utility/utilityi-fd.tex +/docs/manuals/de/utility/utilityi-general.tex +/docs/manuals/de/utility/utilityi-sd.tex +/docs/manuals/de/utility/bacula.sty + +# /docs/manuals/en/catalog/ +/docs/manuals/en/catalog/catalog +/docs/manuals/en/catalog/Makefile +/docs/manuals/en/catalog/bacula.sty +/docs/manuals/en/catalog/catalog.dvi +/docs/manuals/en/catalog/catalog.idx +/docs/manuals/en/catalog/catalog.pdf +/docs/manuals/en/catalog/catalog.toc +/docs/manuals/en/catalog/catalogi-console.tex +/docs/manuals/en/catalog/catalogi-dir.tex +/docs/manuals/en/catalog/catalogi-fd.tex +/docs/manuals/en/catalog/catalogi-general.tex +/docs/manuals/en/catalog/catalogi-sd.tex +/docs/manuals/en/catalog/update_version +/docs/manuals/en/catalog/version.tex + +# /docs/manuals/en/concepts/ +/docs/manuals/en/concepts/Makefile +/docs/manuals/en/concepts/version.tex +/docs/manuals/en/concepts/concepts +/docs/manuals/en/concepts/concepts.dvi +/docs/manuals/en/concepts/concepts.idx +/docs/manuals/en/concepts/concepts.pdf +/docs/manuals/en/concepts/concepts.toc +/docs/manuals/en/concepts/conceptsi-console.tex +/docs/manuals/en/concepts/conceptsi-dir.tex +/docs/manuals/en/concepts/conceptsi-fd.tex +/docs/manuals/en/concepts/conceptsi-general.tex +/docs/manuals/en/concepts/conceptsi-sd.tex +/docs/manuals/en/concepts/bacula.sty +/docs/manuals/en/concepts/update_version + +# /docs/manuals/en/console/ +/docs/manuals/en/console/Makefile +/docs/manuals/en/console/version.tex +/docs/manuals/en/console/console +/docs/manuals/en/console/console.dvi +/docs/manuals/en/console/console.idx +/docs/manuals/en/console/console.pdf +/docs/manuals/en/console/console.toc +/docs/manuals/en/console/consolei-console.tex +/docs/manuals/en/console/consolei-dir.tex +/docs/manuals/en/console/consolei-fd.tex +/docs/manuals/en/console/consolei-general.tex +/docs/manuals/en/console/consolei-sd.tex +/docs/manuals/en/console/bacula.sty +/docs/manuals/en/console/update_version + +# /docs/manuals/en/developers/ +/docs/manuals/en/developers/Makefile +/docs/manuals/en/developers/version.tex +/docs/manuals/en/developers/developers +/docs/manuals/en/developers/developers.dvi +/docs/manuals/en/developers/developers.idx +/docs/manuals/en/developers/developers.pdf +/docs/manuals/en/developers/developers.toc +/docs/manuals/en/developers/developersi-general.tex +/docs/manuals/en/developers/bacula.sty +/docs/manuals/en/developers/update_version + +# /docs/manuals/en/install/ +/docs/manuals/en/install/Makefile +/docs/manuals/en/install/version.tex +/docs/manuals/en/install/install +/docs/manuals/en/install/install.dvi +/docs/manuals/en/install/install.idx +/docs/manuals/en/install/install.pdf +/docs/manuals/en/install/install.toc +/docs/manuals/en/install/installi-console.tex +/docs/manuals/en/install/installi-dir.tex +/docs/manuals/en/install/installi-fd.tex +/docs/manuals/en/install/installi-general.tex +/docs/manuals/en/install/installi-sd.tex +/docs/manuals/en/install/bacula.sty +/docs/manuals/en/install/update_version + +# /docs/manuals/en/problems/ +/docs/manuals/en/problems/Makefile +/docs/manuals/en/problems/version.tex +/docs/manuals/en/problems/problems +/docs/manuals/en/problems/problems.dvi +/docs/manuals/en/problems/problems.idx +/docs/manuals/en/problems/problems.pdf +/docs/manuals/en/problems/problems.toc +/docs/manuals/en/problems/bacula.sty +/docs/manuals/en/problems/update_version + +# /docs/manuals/en/utility/ +/docs/manuals/en/utility/Makefile +/docs/manuals/en/utility/version.tex +/docs/manuals/en/utility/utility +/docs/manuals/en/utility/utility.dvi +/docs/manuals/en/utility/utility.idx +/docs/manuals/en/utility/utility.pdf +/docs/manuals/en/utility/utility.toc +/docs/manuals/en/utility/utilityi-console.tex +/docs/manuals/en/utility/utilityi-dir.tex +/docs/manuals/en/utility/utilityi-fd.tex +/docs/manuals/en/utility/utilityi-general.tex +/docs/manuals/en/utility/utilityi-sd.tex +/docs/manuals/en/utility/bacula.sty +/docs/manuals/en/utility/update_version + +# /docs/manuals/fr/catalog/ +/docs/manuals/fr/catalog/Makefile +/docs/manuals/fr/catalog/version.tex +/docs/manuals/fr/catalog/bacula.sty +/docs/manuals/fr/catalog/update_version + +# /docs/manuals/fr/concepts/ +/docs/manuals/fr/concepts/Makefile +/docs/manuals/fr/concepts/version.tex +/docs/manuals/fr/concepts/bacula.sty +/docs/manuals/fr/concepts/update_version + +# /docs/manuals/fr/console/ +/docs/manuals/fr/console/Makefile +/docs/manuals/fr/console/version.tex +/docs/manuals/fr/console/bacula.sty +/docs/manuals/fr/console/update_version + +# /docs/manuals/fr/developers/ +/docs/manuals/fr/developers/Makefile +/docs/manuals/fr/developers/version.tex +/docs/manuals/fr/developers/bacula.sty +/docs/manuals/fr/developers/update_version + +# /docs/manuals/fr/install/ +/docs/manuals/fr/install/Makefile +/docs/manuals/fr/install/version.tex +/docs/manuals/fr/install/bacula.sty +/docs/manuals/fr/install/update_version + +# /docs/manuals/fr/problems/ +/docs/manuals/fr/problems/Makefile +/docs/manuals/fr/problems/version.tex +/docs/manuals/fr/problems/bacula.sty +/docs/manuals/fr/problems/update_version + +# /docs/manuals/fr/utility/ +/docs/manuals/fr/utility/Makefile +/docs/manuals/fr/utility/version.tex +/docs/manuals/fr/utility/bacula.sty +/docs/manuals/fr/utility/update_version + +# /gui/ +/gui/diff +/gui/config.status +/gui/config.out +/gui/config.log +/gui/autom4te.cache +/gui/kernsconfig + +# /gui/autoconf/ +/gui/autoconf/config.log +/gui/autoconf/Make.common +/gui/autoconf/config.h.in + +# /gui/bacula-web/ +/gui/bacula-web/%%*.php + +# /gui/bimagemgr/ +/gui/bimagemgr/Makefile +/gui/bimagemgr/bacula-bimagemgr.spec + +# /regress/ +/regress/testtime.out +/regress/kerns.config +/regress/1 +/regress/config +/regress/config.out +/regress/build +/regress/bin +/regress/test.out +/regress/weird-files +/regress/weird-files2 +/regress/diff +/regress/tmp +/regress/working +/regress/Makefile +/regress/test1.out +/regress/time.out +/regress/DartConfiguration.tcl +/regress/DartTestfile.txt +/regress/Testing +/regress/rtest.out +/regress/testtime.out + +# /regress/scripts/ +/regress/scripts/bacula-dir.conf.vc +/regress/scripts/bacula-sd.conf.vc +/regress/scripts/multi-client-bacula-dir.conf +/regress/scripts/bacula-dir-2d.conf +/regress/scripts/bacula-dir.conf.errors +/regress/scripts/bacula-fd-2d.conf +/regress/scripts/bconsole-2d.conf +/regress/scripts/bacula-sd-2d.conf +/regress/scripts/bacula-dir-migration.conf +/regress/scripts/bacula-sd-migration.conf +/regress/scripts/crypto-bacula-fd.conf +/regress/scripts/new-test-bacula-dir.conf +/regress/scripts/bacula-dir.conf +/regress/scripts/bacula-fd.conf +/regress/scripts/bacula-sd.conf +/regress/scripts/console.conf +/regress/scripts/bconsole.conf +/regress/scripts/test-bacula-dir.conf +/regress/scripts/test-bacula-fd.conf +/regress/scripts/test-bacula-sd.conf +/regress/scripts/test-console.conf +/regress/scripts/testa-bacula-dir.conf +/regress/scripts/bacula-dir-tape.conf +/regress/scripts/bacula-sd-tape.conf +/regress/scripts/bacula-sd-2tape.conf +/regress/scripts/cleanup-tape +/regress/scripts/cleanup-2tape +/regress/scripts/prepare-two-tapes +/regress/scripts/cleanup-2drive +/regress/scripts/bacula-sd-2drive.conf +/regress/scripts/bacula-sd-win32-tape.conf +/regress/scripts/bacula-sd-2drive.conf +/regress/scripts/bacula-dir-win32-tape.conf +/regress/scripts/win32-bacula-dir-tape.conf +/regress/scripts/bacula-dir-fifo.conf +/regress/scripts/bacula-dir.conf.testrunscript +/regress/scripts/bacula-sd-2disk-drive.conf +/regress/scripts/bacula-sd-2disk.conf +/regress/scripts/bacula-sd-fifo.conf +/regress/scripts/bacula-dir.conf.maxtime +/regress/scripts/bacula-dir.conf.regexwhere +/regress/scripts/tls-bacula-dir.conf +/regress/scripts/tls-bacula-fd.conf +/regress/scripts/tls-bacula-sd.conf +/regress/scripts/tls-auth-bacula-dir.conf +/regress/scripts/tls-auth-bacula-fd.conf +/regress/scripts/tls-auth-bacula-sd.conf +/regress/scripts/tls-bacula-sd.conf.in +/regress/scripts/testb-bacula-dir.conf +/regress/scripts/ansi-sd-tape.conf +/regress/scripts/win32-bacula-sd-tape.conf +/regress/scripts/win32-bacula-fd.conf +/regress/scripts/bacula-dir.conf.accurate +/regress/scripts/update-ctest +/regress/scripts/bacula-dir-strip.conf +/regress/scripts/bacula-dir-faketape.conf +/regress/scripts/bacula-sd-faketape.conf +/regress/scripts/prepare-fake-autochanger +/regress/scripts/win32-bacula-dir.conf +/regress/scripts/win32-bacula-dir.conf.save +/regress/scripts/win32-bacula-sd.conf.save +/regress/scripts/bacula-dir-2client.conf +/regress/scripts/prepare-big-autochanger +/regress/scripts/bacula-dir-vtape.conf +/regress/scripts/bacula-sd-vtape.conf +/regress/scripts/regress-config +/regress/scripts/bacula-dir-virtual.conf +/regress/scripts/bacula-sd-virtual.conf +/regress/scripts/mtx-changer.conf +/regress/scripts/plugin-test-bacula-dir.conf +/regress/scripts/broken-media-bug-bacula-dir.conf +/regress/scripts/broken-media-bug-2-bacula-dir.conf +/regress/scripts/broken-media-bug-2-bacula-sd.conf +/regress/scripts/bacula-dir.conf.maxruntime + +# /regress/tests/ +/regress/tests/1 +/regress/tests/2 + +platforms/osx/build +platforms/osx/dl +platforms/osx/products +platforms/osx/tools +TAGS diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 00000000..4dc64ead --- /dev/null +++ b/AUTHORS @@ -0,0 +1,133 @@ + +This file contains a list of the people who have contributed code, +programs or documentation to the Bacula project. Thanks to all of +you. Without your participation, Bacula would not be the success it +is today. + +If you have made a contribution and do not find your name on this +list, please send a note to license@bacula.org or to the person from +whom you received the distribution. + +Contributors: + +Adam Thorton +Adrew J. Millar +Adrian Close +Aitor Matilla +Alan Brown +Aleksandar Milivojevic +Alexander Bergolth +Alexandre Baron +Alexandre Simon +Allan Black +Ana Emilia Arruda +Andreas Piesk +Andrew Ford +Arno Lehmann +Attila Fülöp +Ben Walton +Bernd Frick +Bill Moran +Bastian Friedrich +Carlos A. Molina G +Carsten Leonhardt +Carsten Paeth +Chris Lee +Christian Masopust +Christopher Hull +Craig Miskell +Dan Langille +Daniele Eccher +David Boyes +David Duchscher +Davide Franco +D. Scott Barninger +Devin Reade +Dirk Bartley +Eamon Brosnan +Edwin Groothuis +Eric Bollengier +Erich Prinz +Felix Schwarz +Francisco Reyes +Frank Kardel +Frank Sweetser +Graham Keeling +Grzegorz Grabowski +Howard Thomson +Jaime Ventura +James Harper +Jan Kesten +jgorig +Jim Raney +Joakim Tjernlund +Joao Henrique Freitas +John Goerzen +John Kodis +John Walker +Jorj Bauer +Jose Herrera +Jose Luis Tallon +Josip Almasi +Jo Simoens +Juan Luis Frances +Juergen Lock +Karl Cunningham +Kern Sibbald +Kjetil Torgrim Homme +Landon Fuller +Laurent Papier +Lukas Nykryn +Lorenz Schori +Luca Berra +Lucas B. Cohen +Lucas Di Pentima +Ludovic Strappazon +Marc Cousin +Marc Schiffbauer +Marcin Haba +Marco van Wieringen +Martin Schmid +Martin Simmons +Meno Abels +Michael Renner +Michael -buk- Scherer +Michael Stapelberg +Michel Meyers +Morgan +Nic Bellamy +Nicolas Boichat +Olivier Lehmann +Peter Buschmann +Peter Eriksson +Peter Much +Philippe Chauvat +Phil Stracchino +Preben Guldberg +Radoslaw Korzeniewski +Riccardo Ghetta +Richard Mortimer +Robert Nelson +Robert Oschwald +Rudolf Cejka +Russel Howe +Scott Bailey +Sebastian Andrzej Siewior +Sergey Svishchev +Simone Caronni +Stefan Reddig +Sven Hartge +Thomas Lohman +Thomas Glatthor +Thorsten Enge +Tim Oberfoell +Tomas Cameron +Tullio Andreatta +Ulrich Leodolter +V. Novy +Vitaliy Kosharskiy +Victor Hugo dos Santos +Wanderlei Huttel +Wolfgang Denk +Yuri Timofeev +Yves Orton diff --git a/COPYING b/COPYING new file mode 100644 index 00000000..062c2fbb --- /dev/null +++ b/COPYING @@ -0,0 +1,2 @@ +Please see the file LICENSE concerning the terms of the license, +copying and such. diff --git a/COPYRIGHT b/COPYRIGHT new file mode 100644 index 00000000..062c2fbb --- /dev/null +++ b/COPYRIGHT @@ -0,0 +1,2 @@ +Please see the file LICENSE concerning the terms of the license, +copying and such. diff --git a/ChangeLog b/ChangeLog new file mode 100644 index 00000000..2042736c --- /dev/null +++ b/ChangeLog @@ -0,0 +1,7473 @@ + Changelog on version 9.4.2 + +Release 9.4.2 + + + +04Feb19 + - Update Windows .def files + - Change create_postgresql_database.in script to be more flexible + - Implement eliminate verify records in dbcheck bug #2434 + - Enhance verify-voltocat-test to detect comparing deleted files + - Fix bug #2452 VerifyToCatalog reports deleted files as being new + - Use correct quoting for a character -- fixes previous patch + +03Feb19 + - Recompile configure.in + - Apply Carsten's multiarch patch fixes bug #2437 + - Apply Carsten's patch for adding CPPFLAGS to tools/gigaslam.c compile + - Allow . to terminate sql queries prompts + +02Feb19 + - baculum: Update Baculum API OpenAPI documentation + +29Jan19 + - Fix rwlock_test unittest bug #2449 Only call thr_setconcurrency if it's + available. Fix order of linking and installation. + - FixFix spelling errors found by lintian by Carston in bug #2436 + - Apply chmods from Leo in bug #2445 + +28Jan19 + - Add license files LICENSE and LICENSE-FOSS to the regression directory + +27Jan19 + - Display daemon pid in .apiV2 status output + - Attempt to ensure that ctest job output gets uploaded + - Apply varargs patch from Martin for bug 2443 + - Apply recv() hide patch from Martin + - Fix lz4.c register compilation from bug #2443 + +25Jan19 + - dbcheck: Improve error message when trying to prune Path records with BVFS is + used. + +24Jan19 + - Update cdash for version 9.4 + +19Jan19 + - Fix bug #2448 bregex and bwild do not accept -l command line option + - Partial update copyright year + - Fix struct transfer_manager to be class transfer_manager + - Print Device xxx requested by DIR disabled only if verbose is enabled in + SD + - Add migrate-job-no-resource-test to all-disk-tests + - Remove unused berrno call + return + - Remove mention of Beta release from ReleaseNotes + +15Jan19 + - Fix #3225 about Migration issue when the Job resource is no longer defined + +12Jan19 + - baculum: Fix restore paths with apostrophe + +31Dec18 + - baculum: Fix data level + - Change endblock edit to unsigned -- suggested by Martin Simmons + +27Dec18 + - Update DEPKGS_VERSION + +22Dec18 + - baculum: Adapt Apache configs to version 2.4 + +Bugs fixed/closed since last release: +2434 2436 2437 2443 2445 2448 2449 2452 3225 + + +====================================================== +Release 9.4.1 +21Dec18 + - Remove register attribute on variables as it is not supported by newer C++ + compilers + +20Dec18 + - Fix regression from 9.2 when backporting Enterprise code + - Add missing default flag so that configure looks for libs3 + +Bugs fixed/closed since last release: + + +========================================================== +16Dec18 Release 9.4.0 + +14Dec18 + - Add copyright and correct name on stop-restart-test + +12Dec18 + - Fix #4449 about an incorrect pool selected with the restart command + +21Nov18 + - Fix #4386 About incorrect permission on directories after a restore with + replace=ifnewer + +20Nov18 + - Fix bug #4379 certain fields of Media record not reset after Truncate command + +17Nov18 + - Revert "Update bdirjson.c" + +16Nov18 + - Improve volume truncation error messages + - Free ids buffer + +14Nov18 + - Update PO files + - Initial version and date update + - Initial cut of ChangeLog and ReleaseNotes + +13Nov18 + - Add use_dcr_only in cloud_dev.c so that manual truncate works + +11Nov18 + - More Enterprise backports + - More Enterprise backports + changes to the backporting + - Minor backport from Enterprise + my own changes + +10Nov18 + - Update bdirjson.c + +09Nov18 + - Add pseudo WORM support for vtape + - worm: Fix multiple display of the WORM Recycle message + +04Nov18 + - Add first cut cloud drivers + - Use bfopen in place of fopen + +31Oct18 + - Fix #3574 Add "clients" option to the "help list" output + - Add makedir() in fd_common.h + +29Oct18 + - Add bfile is_plugin_data() API + - Fix issue between FO_PORTABLE and FO_PORTABLE_DATA + - baculum: Update Japanese translation files + +25Oct18 + - baculum: Fix availability web config wizard when there is problem with access + to api + +24Oct18 + - baculum: Add new size directive control + +22Oct18 + - Fix NOATTR detection + +17Oct18 + - Implement worm cassette support + +14Oct18 + - Make detection of duplicate M_SECURITY messages work + - Remove unused prototype recv(len) + - Add new security monitoring test + +04Oct18 + - Implement new message numbers in stored/block.c + - Fix incorrectly indicating: malformed message + +03Oct18 + - baculum: Fix basic auth user setting in API install wizard + - Fix bugs #2335 and #2349 Volume messages printed many times + - Add new test for bug printing many multiple Max Volume jobs= info + - Add worning message about failure to update volume info + - Improve error messages when JobMedia errors + +27Sep18 + - baculum: Fix undefined index error on web config wizard page + +16Sep18 + - baculum: Fix #2418 creating or updating new resource + - baculum: Fix size unit formatters in restore browser reported by Wanderlei + Huttel + +14Sep18 + - Fix complier warning due to unused subroutine variable + +06Sep18 + - Fix bug #2334 seg fault when releasing globals + - Security: sleep(5) on error + aggregating identical messages + +27Aug18 + - Update sellist unittests. + - Update unittests for lockmgr.c and fix memory leak. + - Update unittests fir ConfigFile/ini.c. + - Update 'rm -f' for libtool $(RMF). + - Correct libs/Makefile.in separator. + - Update htable unittests. + - Update sha1 unittests. + +21Aug18 + - Add fnmatch unittests. + - Update unit tests and add regression tests for it. + +20Aug18 + - Fix escaping special characters in bvfs restore for sqlite catalog + +18Aug18 + - baculum: Improve error handling in web part + - baculum: Fix formatted size and time values on the volume details page + - Add new manual test + +17Aug18 + - baculum: Do not store any main oauth2 client nor main http basic user in api + config + +16Aug18 + - Fix tls_bsock_shutdown() compilation when no TLS available. + +15Aug18 + - Fix bsock compilation warning. + +14Aug18 + - Fix bsock compilation problem in *BSD. + +13Aug18 + - Permit negative FileIndex values in the catalog + +12Aug18 + - baculum: Fix logging output if it is not possible to decode to json + - baculum: Fix saving logs when an error occurs + - baculum: API panel and wizard improvements + +11Aug18 + - baculum: Add name field to api client parameters + +05Jun18 + - Fix format string is not a string literal (potentially insecure). + +Bugs fixed/closed since last release: +2334 2335 2418 3574 4379 4386 4449 + +====================== Version 9.2.2 ==================== + +06Nov18 + - Fix bug #2421 by Adam about quoting Windows paths in CreateChildProcess() + +05Nov18 + - Update po files + +29Oct18 + - baculum: Update Japanese translation files + +25Oct18 + - baculum: Fix availability web config wizard when there is problem with access + to api + +24Oct18 + - baculum: Add new size directive control + +04Oct18 + - Implement new message numbers in stored/block.c + - Fix incorrectly indicating: malformed message + +03Oct18 + - baculum: Fix basic auth user setting in API install wizard + - Fix bugs #2335 and #2349 Volume messages printed many times + - Add new test for bug printing many multiple Max Volume jobs= info + +27Sep18 + - baculum: Fix undefined index error on web config wizard page + +16Sep18 + - baculum: Fix #2418 creating or updating new resource + - baculum: Fix size unit formatters in restore browser reported by Wanderlei + Huttel + +14Sep18 + - Fix complier warning due to unused subroutine variable + +06Sep18 + - Fix bug #2334 seg fault when releasing globals + +03Sep18 + - rpm: Fix mysql dependency for bacula-postgresql + +20Aug18 + - Fix escaping special characters in bvfs restore for sqlite catalog + +18Aug18 + - baculum: Improve error handling in web part + - baculum: Fix formatted size and time values on the volume details page + - Add new manual test + +17Aug18 + - baculum: Do not store any main oauth2 client nor main http basic user in api + config + +16Aug18 + - Fix tls_bsock_shutdown() compilation when no TLS available. + +15Aug18 + - Fix bsock compilation warning. + +14Aug18 + - Fix bsock compilation problem in *BSD. + +12Aug18 + - baculum: Fix logging output if it is not possible to decode to json + - baculum: Fix saving logs when an error occurs + - baculum: API panel and wizard improvements + +11Aug18 + - baculum: Add name field to api client parameters + +Bugs fixed/closed since last release: +2334 2335 2418 2421 + + +============================================================ +Version 9.2.1 + +11Aug18 + - Permit catalog to contain negative FileIndexes + - baculum: Fix saving directives in messages resource + +10Aug18 + - Refactoring of BSOCK and introducing BSOCKCORE. + - baculum: Update API documentation + - baculum: Add status endpoint to available scopes endpoints + - Make print_ls_output identify delete files more clearly + - Backport stored/vbackup.c + +09Aug18 + - baculum: Add status director and status storage endpoints + - baculum: Add type and level filters to jobs endpoint + - baculum: Add support for .api 2 command in bconsole module + +08Aug18 + - Implement a keepalive on bpipe sockets fixes bug #2347 + - Backport bpipe enhancements + - Fix bug #2319 wrong port value stored in bsock giving incorrect error messages + +07Aug18 + - baculum: Add to jobs endpoint filtering by client and clientid + - Fix bug #2410 bdirjson output incorrect for day greater than 24 + - Attempt to avoid MySQL complaints about not allowing zero or empty in DATETIME + +06Aug18 + - Add M_SECURITY when connection is bad + fix bug where invalid probes sent to + Dir + - baculum: Fix schedule single day value setting + +05Aug18 + - Fix bug #2286 copied jobs always have level=Incremental + - baculum: Fix add slot parameter to label command + - baculum: Fix restoring backup from deleted clients + - baculum: Fix click action on remove config resource button + - baculum: Fix framework validation for active list type controls + - baculum: Implement ideas from Wanderlei Huttel + +04Aug18 + - Fix bug 2395 problem with man dir + - baculum: Fix saving subresources in config + - Start work on HAVE_CLIENT_ONLY install + - Switch to using /lib/systemd/system to install service files + - Install Bacula systemd files in /etc/systemd/system + - baculum: Update Portuguese translations + +03Aug18 + - baculum: Fix group most recent backups option in restore wizard for mysql + - Fix bug #2404 uninstall systemd service + - Fix warning during compilations of mainwin.cpp + +02Aug18 + - baculum: Implement second part ideas and fixes proposed by Wanderlei Huttel + - Update catalog update scripts in updatedb directory + - Fix bug #2340. Display of db_driver + - Add warning messages for bad or old self-signed certificates + +29Jul18 + - baculum: Fix #2403 error while writing diraddress directive in Bacula config + +26Jul18 + - baculum: Implement ideas and fixes proposed by Wanderlei Huttel + +24Jul18 + - baculum: Update Portuguese translations + +23Jul18 + - baculum: Fix pool does not exist error on pool details page + - baculum: Fix create directive base method + +03Jul18 + - rpm: Fix MySQL dependency on bacula-postgresql package + +Bugs fixed/closed since last release: +2410 2389 2286 2319 2340 2347 2357 2403 2404 2405 2395 2392 + +================================================================== + +Version 9.2.0 +20Jul18 + - Separate dequeuing msgs and dequeuing daemon msgs + +17Jul18 + - Replace uint with uint32_t + +15Jul18 + - Reset default status schedule limit to 30 + - Comment out use of uint that breaks Windows build + - Update win32 .def files + - Fix concurrent acquire/release of device + +14Jul18 + - Correct copyright + - Fix compiler warning generated by prior commit 1aad2088d21a3 + - Backport Enterprise src/findlib + +13Jul18 + - Backport Enterprise src/filed + - Backport Enterprise src/lib + +12Jul18 + - baculum: Fix cancel button in web config wizard + - baculum: Web interface password is no longer stored in settings.conf + - Add debug code for bug #2356 + +08Jul18 + - Fix bug #2385 -- compiler bug IMO + +02Jul18 + - fix #3945: Add "ocfs2" to list of filesystems known by "FsType" directive + +01Jul18 + - Backport parts of src/dird to community + - Use bstrcmp in place of strcmp + +30Jun18 + - baculum: Fix path validator for UTF-8 characters + +29Jun18 + - Recompile configure + - Update config.guess and config.sub + +27Jun18 + - Fix #3615 about bconsole Socket errors reported in the bacula log file + +19Jun18 + - Fix permissions of mtx-changer.conf + +16Jun18 + - Use /dev/sg0 rather than /dev/sg1 so vtape devices work + +15Jun18 + - Make out of freespace non-fatal for removable devices -- i.e. behaves like + tape + +10Jun18 + - Pull latest tls*.pem from BEE + +04Jun18 + - Fix #3854 missing tls library initialization in bdirjson, bfdjson, bsdjson + and bbconsjson + +27May18 + - Fix bug #2212 where restore jobid=nn file=xxx restores the files twice + - Apply patch from Wandlei Huttel to add Run Time and suffix to Restored + bytes + - Fix bug #2343 where truncate of explicit Volume name truncates non-purged + volumes + +26May18 + - Fix some file execute permissions. Fixes bug #2389 + +25May18 + - Fix license problems in Bug #2382 + - Apply patch from Leo to fix bug 2192 + +21May18 + - Fix bad placement of jcr->pool reference as pointed out by Martin Simmons + +18May18 + - rpm: Add OpenSuse Leap 42.3 + +17May18 + - rpm: Update bacula.spec for Fedora 27 + +16May18 + - Fix #3824 about incorrect setdebug command description + +15May18 + - baculum: Fix SQL grouping error in restore wizard reported by Rasmus Linden + +02May18 + - Fix Solaris 10 compilation error on BXATTR when no linkat(2) found. + +01May18 + - baculum: Fix catching API exceptions + - baculum: Clean up theme Baculum-v1 + - baculum: Fix initializing new resource page + - baculum: Add button to set job log order + - baculum: Add manual loading CSS files with versioning + - baculum: Move API panel CSS files to separate directory + - baculum: Move Web CSS files to separate directory + - baculum: Fix not showing 'gui on' command in bconsole output + +27Apr18 + - win32: Fix backup issue with path > 250 char + +25Apr18 + - Fix #3672 about bdirjson issue with the Autochanger directive + - baculum: Add capability to set maximum numer of jobs visible in tables + +24Apr18 + - baculum: Loading table data performance improvements + +23Apr18 + - baculum: Fix sending path load request by enter key + - baculum: Add patch to fix gettext class file in framework + - baculum: Add htaccess file to framework directory + - baculum: Update rpm and deb templates with apache and lighttpd config files + +22Apr18 + - baculum: Update example api endpoints + - baculum: Adapt Web and API to new framework version + - baculum: Updated PRADO framework to version 4.0.1 + - Enable build of Windows 64 bit tray monitor + - Fix build of Windows tray-monitor + - Some changes to configure.in + +21Apr18 + - Update some old copyrights + - baculum: Highlight main menu items for subpages + - Update some old copyrights + - baculum: Add prune and purge actions to volume view page + - baculum: Fix compatibility with old api for prune and purge actions + +20Apr18 + - baculum: Update Portuguese translations + - Fix showing PkiCipher and PkiDigest values in bfdjson output + - Fix buffer overrun at BXATTR_Solaris::os_get_xattr_names. + - Bring Branch-9.1 up to date with Branch-9.0 + +19Apr18 + - baculum: API v1 documentation as open API file + - baculum: Update Web requests form for the new API v1 + - baculum: New improved version API v1 + +18Apr18 + - Fix #3745 update the client SQL record after a reload + +17Apr18 + - Fix 'grep -m' when '-m' option is not available. + - Update the build for ACL/XATTR support. + +13Apr18 + - Add some debugging information to bacl_solaris. + - Fix backup ACL/XATTR when fatal error and not only error. + +12Apr18 + - Fix Solaris XATTR support on Solaris 11. + - Fix compile error on !HAVE_EXTENDED_ACL + - Add some debugging messages. + - baculum: Fix link to job history page + - baculum: Fix previous step button in restore wizard + - baculum: Enable debug for first config wizard run + - baculum: Fix directing to wizard if application config doesn't exist + - baculum: Fix opening configuration tabs bug reported by Heitor Faria + +11Apr18 + - baculum: Set curl connection timeout + - baculum: Show error message after connection to api test + - baculum: Update LICENSE file + - baculum: Solve old browser cache problem for javascript after upgrade + - baculum: New redesigned web interface + - baculum: Changes in api for the redesigned web interface + +26Mar18 + - Fix compilation warning on FreeBSD. + +23Mar18 + - Add command to change the pool of a job and the associated volumes + +15Mar18 + - Fix #3593 VirtualFull will select jobs to consolidate using Job name in + addition to Client/FileSet + +12Mar18 + - Do not increment the device num_writers if the call to dir_update_volume_info() + fails + +13Feb18 + - Add prune option to select volumes from a/all pool(s) + +03Feb18 + - baculum: Fix saving boolean values in schedule Run directive + +19Jan18 + - rpm: Add Fedora26-64 platform + +12Jan18 + - Add the RestoreClient directive for Restore job. + +11Jan18 + - Implementaion of .ls command for Plugins. + +10Jan18 + - baculum: Add link to go back from job configuration window + +09Dec17 + - Use correct SQL table encoding for Postgresql 10.x + +08Dec17 + - Fix Where/Replace parameter displayed in the Restore job summary + +05Dec17 + - use pthread_kill() instead of pthread_cancel() to terminate SD_msg_chan + +04Dec17 + - Recompile configure.in + - Recompile configure.in + +03Dec17 + - baculum: Add listbox control and use it for base and device directives + +02Dec17 + - baculum: Fix showing verify job fields in job run configuration window + - baculum: Revert back volume pool name in volume list window + - baculum: Fix error message about disabled bconsole + - baculum: API endpoints code refactor + - baculum: Add state, number, boolean and id validators + - baculum: Return bconsole disabled error if bconsole support isn't enabled + - baculum: Remove unused api endpoints + +01Dec17 + - Correction of my_name_is() function using realpath() + - Add a detection of realpath() function to configure. + - Fix tray-monitor compilation + +30Nov17 + - Use breaddir() in the tray monitor + - file_dev.c: replace readdir_r() wit new breaddir() + +28Nov17 + - new breaddir() function to replace readdir_r() + core update + - Fix #3098 Add debug tag 'record' for traces about records in the SD + - Fix #1826 Add Job Where and Replace variables to the Restore job summary + - Remove tests about "NULL Volume name. This shouldn't happen!!!* + - baculum: Fix oauth2 client working in the web part + +27Nov17 + - baculum: Fix auth setting radio buttons alignement + - baculum: Enlarge interface height to 100% + - baculum: Add more information to cURL error + - baculum: New reworked restore wizard + - baculum: Wizards view improvements + - baculum: Add restore hardlinks support in api + - baculum: Add strip_prefix, add_prefix, add_suffix and regex_where restore + options to api restore + - baculum: Add new volumes required api endpoint + +22Nov17 + - Port missing RestoreObject Plugin Config code from BEE. + +21Nov17 + - baculum: Stop using hidden fields to store item identifiers + - baculum: Fix redundant loading users portlet + - baculum: Add required config fields asterisk mark + +19Sep17 + - Enhance "status schedule" function to allow multiple job= and client= filters + - Add next_name() function to scan string lists + - Fix #1170. Enhance "status schedule" command. Display ordered output, add + Client and FileSet filters. + +12Sep17 + - bvfs: Add clients= option to .bvfs_get_jobids to handle clusters + +11Aug17 + - Add delete client bconsole command + +14Jun17 + - Fix #2910 about a problem in the "status network" command when the client is + not reachable + +15Jun15 + - Fix #1108 Enhance setdebug help command and console completion + +Bugs fixed/closed since last release: +1108 1170 1826 2212 2343 2356 2382 2385 2389 2910 3098 3593 3615 3672 3745 +3824 3854 3945 + +======================================================================== +Release 9.0.8 + +27May18 + - Fix bug #2212 where restore jobid=nn file=xxx restores the files twice + - Pull regression truncate-test from Branch-9.1 + - Apply patch from Wandlei Huttel to add Run Time and suffix to Restored + bytes + - Fix bug #2343 where truncate of explicit Volume name truncates non-purged + volumes + +26May18 + - Fix some file execute permissions. Fixes bug #2389 + +25May18 + - Fix license problems in Bug #2382 + - Apply patch from Leo to fix bug 2192 + +21May18 + - Fix bad placement of jcr->pool reference as pointed out by Martin Simmons + +18May18 + - rpm: Add OpenSuse Leap 42.3 + +17May18 + - rpm: Update bacula.spec for Fedora 27 + +15May18 + - baculum: Fix SQL grouping error in restore wizard reported by Rasmus Linden + +21Apr18 + - Update some old copyrights + +20Apr18 + - baculum: Update Portuguese translations + +19Apr18 + - Remove old Bacula Systems notices + +Bugs fixed/closed since last release: +2212 2320 2349 2354 2379 2382 2383 2330 2054 +2343 2369 2194 2359 2151 2366 2353 2381 2378 + + +Release 9.0.7 + + +18Apr18 + - Remove NSIS debug + +12Apr18 + - baculum: Fix opening configuration tabs bug reported by Heitor Faria + +25Feb18 + - Restore win32 dir from Branch-5.2 and update it + +23Feb18 + - Add Phil Stracchino's fix for Qt5 + +03Feb18 + - baculum: Fix saving boolean values in schedule Run directive + +19Jan18 + - rpm: Add Fedora26-64 platform + +10Jan18 + - baculum: Add link to go back from job configuration window + +09Dec17 + - Use correct SQL table encoding for Postgresql 10.x + +03Dec17 + - baculum: Add listbox control and use it for base and device directives + +02Dec17 + - baculum: Fix showing verify job fields in job run configuration window + - baculum: Revert back volume pool name in volume list window + - baculum: Fix error message about disabled bconsole + - baculum: API endpoints code refactor + - baculum: Add state, number, boolean and id validators + - baculum: Return bconsole disabled error if bconsole support isn't enabled + - baculum: Remove unused api endpoints + +28Nov17 + - baculum: Fix oauth2 client working in the web part + +27Nov17 + - baculum: Fix auth setting radio buttons alignement + - baculum: Enlarge interface height to 100% + - baculum: Add more information to cURL error + - baculum: New reworked restore wizard + - baculum: Wizards view improvements + - baculum: Add restore hardlinks support in api + - baculum: Add strip_prefix, add_prefix, add_suffix and regex_where restore + options to api restore + - baculum: Add new volumes required api endpoint + +22Nov17 + - Port missing RestoreObject Plugin Config code from BEE. + +21Nov17 + - baculum: Stop using hidden fields to store item identifiers + - baculum: Fix redundant loading users portlet + - baculum: Add required config fields asterisk mark + +Bugs fixed/closed since last release: + +========================================================== + +Release 9.0.6 + +19Nov17 + - Update AUTHORS for recent commits + - Remove incorrecly placed openssl-compat.h + - Add openssl-compat.h which went in wrong directory + - baculum: Add removing single resource + - baculum: Add module to check resource dependencies + - baculum: Fix saving names with spaces inside schedule Run directive + - baculum: Fix saving entire config by api request + - Backout vol size tests in previous attempt to fix bug #2349 + - Fix compiler warning in previous patch + - Apply patches from bugs #2325 and #2326 to fix FIFO bugs + - Fix bug #2315 INTEGER misspelled in update_sqlite3_tables.in + - Try to fix bug #2349 multiple recycle messages + +17Nov17 + - Add support for items with comma in ini_store_alist_str() + +15Nov17 + - Fix segfault after the reload of an incorrect configuration + - Add temporary fix to avoid a deadlock after a reload command on an incorrect + configuration + - baculum: Throw 404 error if service not known + +14Nov17 + - Fix race condition between setip and the access to CLIENT::address() + - Fix #3284 about Client address not reloaded properly + - baculum: Use home page url when an error is shown + +12Nov17 + - Fix bug #2346 Dir blocks when max reloads reached + - baculum: Send config to api server as json + - Remove enterprise code that breaks Mac install -- fixes bug #2351 + - Correct FS size calculation for FreeBSD, Solaris, and Windows + +11Nov17 + - baculum: Enable Portuguese language support in makefile + +10Nov17 + - baculum: Fix required directives in schedule resource configuration + - baculum: Fix saving messages resource + - baculum: Improve slow reloading config resource list + +09Nov17 + - crypto: remove most of OpenSSL initcallbacks for 1.1 + - Update ACL/XATTR code and define new ACL/XATTR API for Plugins. + - baculum: Fix numeric password setting bug reported by Heitor Faria + +08Nov17 + - crypto: convert EVP_PKEY access and remainings bits for OpenSSL 1.1 + - crypto: convert EVP_MD_CTX + EVP_CIPHER_CTX to OpenSSL 1.1 + - crypto: Use DEFINE_STACK_OF() + - crypto: Add a tiny OpenSSL compat level + - crypto: remove support for ancient openssl + +06Nov17 + - fix #3269 obey the user choice of "Are you sure you want to delete X JobIds + +02Nov17 + - Add restore wizard to the tray monitor. + - Preparation fixes: remove some warning + - Add ASSERTD() to track NULL Volume name error + +24Oct17 + - Add "noautoparent" restore command option to disable the automatic parent + directory selection + +20Oct17 + - Make qt-console compatible to Qt5 (Qt4 still work) + +Bugs fixed/closed since last release: +2315 2325 2346 2349 2351 + +====================================================================== + +Release 9.0.5 + +01Nov17 + - Use if exists on dropping MAC table in postgres. Fixes bug #2314 + - Fix bdirjson display of Minutes. Fixes bug #2318 + - baculum: Set default language if no language set + - baculum: Fix language setting in api + - baculum: Update generated .mo files for api + - baculum: Add missing texts to translations + - baculum: Fix add to translation static texts on the api default page + - baculum: Fix missing session start + - Make verify job log same as other logs -- fixes bug #2328 + - Take a more conservative approach for setting killable true + - Add extra safety for readdir buffer + +31Oct17 + - Retab systemd/Makefile.in + - Don't require mount for @piddir@ + - Use Debian systemd start/stop scripts supplied by Sven Hartge + +29Oct17 + - Fix bug #2316 add CacheRetention to Pool + - Skip tape ioctls on FreeBSD when using a FIFO fixes bug #2324 + - Fix bug #2338 to not truncate already truncated volumes + - Remove some old C int code and use bool + +28Oct17 + - Remove unused lib/lz4.c.orig file + - Update AUTHORS file + - Mark Volume read-only only if no access rights or read-only partition + - Add -P daemon option to supress creating PID file + - Fix too big copy to test FD plugin_ctx + +26Oct17 + - Backport Enterprise code + +23Oct17 + - When read-only volume found mark it in catalog -- fixes bug #2337 + - Make out of space on partition fatal + - Fix bug 2323 -- loop exit condition was backward and add error message + - Add missing copy-plugin-confs for regress + - Fix bug reported by jesper@schmitz.computer where bat hangs on FreeBSD + +08Oct17 + - baculum: Fix reading and writing schedule resource + +15Sep17 + - baculum: Fix undefined offset error during saving director config + - baculum: Fix listing days of week in schedule setting + +14Sep17 + - baculum: Fix saving schedule run directive value + +12Sep17 + - rpm: Add missing script baculabackupreport and query.sql for Suse + - rpm: Add missing libbacsd* file and tapealert script to Suse rpm spec file + - rpm: Add missing libs bbconsjson, bdirjson and bsdjson to Suse rpm spec + file + - rpm: Add aligned plugin rpm spec file for Suse + - rpm: Add bacula-tray-monitor.desktop launcher in scripts directory + - rpm: Add Suse Linux ES 12.1 platform + +11Sep17 + - rpm: Add bacula-tray-monitor.desktop file in script dir + +Bugs fixed/closed since last release: +2314 2316 2318 2324 2328 2337 2338 + + +Release 9.0.4 +06Sep17 + - Update po files + +05Sep17 + - Fix SQLite3 upgrade tables script fixes bug #2306 + - baculum: Fix language setting in config file + +03Sep17 + - Upgrade to latest lz4.c to fix bug #2310 bus error on 64 bit Solaris + - Recompile configure.in + - Ensure systemd/bacula.conf is created by configure fixed bug #2307 + - Fix compiler warning noted in bug #2309 + - Fix SQLite3 Version bug #2305 + - Remove unused variable to elimiate compiler warning + - Recompile configure.in + +28Aug17 + - Fix #2925 Do not try to stop non backup jobs (virtualfull, copy, migration, + restore, etc...) + +25Aug17 + - baculum: Fix broken symbolic links for lang files + +17Aug17 + - don't use add_event() when flag "l" is not set + +14Jun17 + - core: bwlimit measure bandwidth + +02Jun17 + - core: bwlimit handle backlog and allow burst + +25Apr17 + - Do not purge running jobs in autoprune + +Bugs fixed/closed since last release: +2305 2306 2307 2309 2310 2925 + + +========================================================== + +Release 9.0.3 + +06Aug17 + - baculum: Fix access denied error on api install wizard page + - baculum: Remove assigning to api host when user is deleted + - baculum: Fix empty admin setting + - baculum: Add ability to assign host to specific user + - baculum: Fix bconsole test connection for new api host that works with new + director + +05Aug17 + - baculum: Fix sqlite db support + +03Aug17 + - Fix bug #2301 Solaris Available space incorrectly reported by turning off the + output for Solaris + - Fix bug #2300 mount/unmount/release of single tape drive does not work + +01Aug17 + - baculum: Fix bconsole connection test in config wizard + - baculum: Fix writing config for schedule and message names with space + +31Jul17 + - bpipe: Fix compiler warning + - baculum: Fix drag & drop file version elements + +30Jul17 + - baculum: Add fileset info endpoint and use it in restore wizard + - baculum: Use client name instead of clientid and start using fileset to + prepare restore tree + - baculum: Remove fileset parameter from run restore + - baculum: Fix lstat regex pattern + - baculum: Get the most recent jobs by client and fileset or by clientid and + filesetid + - Fix: bug #3048: jobs are stuck in endless loop in reserve.c + - Add total time to test.out file + +29Jul17 + - baculum: Add restore job selection in restore job wizard + +26Jul17 + - Enhance verify job report from bug 2249 + +Bugs fixed/closed since last release: +2300 2301 3048 + + +====================================================== +Release 9.0.2 + +23Jul17 + - Use Bacula in place of Libz variables so we can build with/without libz and + lzo + - Apply ideas from bug #2255 prettier status slots output + - Configure and install bacula-tray-monitor.desktop + +22Jul17 + - Fix btape test which counted files incorrectly on EOT + +21Jul17 + - Fix bug #2296 where Bacula would not compile with postgres 8 or older + +20Jul17 + - Fix bug #2294 Bacula does not build with MariaDB 10.2 + +14Jul17 + - baculum: Fix multiple directors support + - baculum: Fix showing errors from the API + +Bugs fixed/closed since last release: +2255 2294 2296 + + +Release 9.0.1 + +10Jul17 + - Remove two incorrect trailing commas in bsock.h + - Fix bug #2293 bad big endian detection in lz4.c + - Add new tray-monitor files that were omitted in the backport from Enterprise + +06Jul17 + - bvfs: Do not insert deleted directories in PathVisibility table + +05Jul17 + - Fix compilation for Debian Stretch with GCC 6.3 + +Bugs fixed/closed since last release: +2293 + +Release 9.0.0 + +02Jul17 + - Skip verify-data-test if not running Linux + - Skip lzo-test if lzo not in Bacula + - Remove double define HAVE_LZO in config.h + +01Jul17 + - Add documentation on baculabackupreport to delete_catalog_backup.in + - Install baculabackupreport and ignore script without .in + - Recompile configure.in + - Add Bill's baculabackupreport script + +30Jun17 + - Update po files + +29Jun17 + - Fix error in FreeBSD during maxtime-test + +27Jun17 + - Fix #2853 About character substitution for "virtual full" job level in + RunAfterJob + +26Jun17 + - Attempt to fix timing problem with console-dotcmd-test on FreeBSD + - Ensure we have a DIR connection in dequeue_messages + +25Jun17 + - Add more debug to regress for FreeBSD failures + +23Jun17 + - Fix #2940 Allow specific Director job code in WriteBootstrap directive + +21Jun17 + - Fix pragma pack to allow lz4.c work on Solaris and other machines + +19Jun17 + - baculum: Fix working logout button + - A more correct fix for lz4.c on Solaris 10 + +18Jun17 + - Remove use of #pragma pack in lib/lz4.c for Solaris 10 + - Recompile configure from configure.in + - Detect Solaris 10 + + +17Jun17 + - Fix bug #2289 version 7.9 not compatible with old FDs -- comm compression + +10Jun17 + - Make getmsg.c compatible with old FDs + +09Jun17 + - Use one MAX_BLOCK_SIZE and set to 20M + +08Jun17 + - rpm: Add Fedora 25 build platform + +07Jun17 + - Remove vestiges of crc32_bad -- fixes Solaris build + - Fix #2890 about segfault in .status command on Redhat 5 32bit + - Add missing semi-colon in bsys.c + +06Jun17 + - baculum: Fix incorrect table name error during restore start + +30May17 + - Display the correct address in lockdump for db_lock() + +05Jun17 + - Fix getmsg to handle additional forms of Progress messages + +04Jun17 + - baculum: Fix double assets and runtime symbolic links in baculum-web deb + package + +03Jun17 + - baculum: Fix missing php-xml dependency in deb metafile + - baculum: Improve errors handling in API restore pages + +29May17 + - rpm: Remove libbacsd.la for both Red Hat and Suse + - rpm: Add missing libs bbconsjson, bdirjson and bsdjson + - rpm: Fix libstdc++ version in BAT spec file + +28May17 + - Fix some problems noted by clang + - baculum: Reorganize run job code + - baculum: Reorganize estimate job code + - baculum: Make get method definition not obligatory + - Make file-span-vol-test portable + - Attempt to fix deadlock in FreeBSD maxtime-test + +27May17 + - Do not produce error if MySQL database exists in create_mysql_database + +26May17 + - rpm: Add missing tapealert script + - rpm: Add missing libbacsd + - rpm: Remove dvd-handler script + +24May17 + - Fix bvfs queries + - Use FileId in place of Filename + - Revert "Put FilenameId in .bvfs_lsfiles output" + - Put FilenameId in .bvfs_lsfiles output + - Add more debug in src/cats/bvfs.c + - Fix bvfs_lsdirs and bvfs_lsfiles + - baculum: Add Japanese language support in deb and rpm packages + - Add DirectoryACL directive + - baculum: New Baculum API and Baculum Web + - Add forking info to LICENSE and LICENSE-FAQ + - Minor improvement to error message + - Fix race in steal_device_lock shown in truncate-concurrent-test + - Apply Marcin's fix for 6th week of the month + - Add new truncate test + - Retab Makefile.in in platforms/systemd.in + - Fix compiler warning + - Add FD backwards compatibility + - Fix regression minor scripting problems + - Fix #2807 about an issue with the show command when using incorrectly JobToVerify + directive + - Fix #2806 about the director service started before the database with systemd + - Update Dart control files + - Massive (70,000+ lines) backport of code from Bacula Enterprise 8.8. + See next line ... + - Adapt update_bacula_tables scripts for catalog version 15 + - Allow to use Base directive in a JobDefs + - Add more debug to the bpipe plugin + - Enhance error message when packets are too big + - Add '.storage unique' bconsole command + - Allow to use ".jobs type=!B" to display specific job type + - Add lockdump storage daemon information + + - Fix #2698 Display loaded driver list in status storage output + - Fix autochanger unload message that contains sometime an incorrect volume name + - Fix issue with open_bpipe() function that may flush stdio buffer if the + command is incorrect + - Fix unload tape messages to print correct volume + improve output format + - Fix unload/re-load same volume + - Fix DIR get unexpected "Connection reset by peer" for FD + - Fix #2548 about SQL connection leak with RunScript::Console commands + - Fix #2588 about segfault in bdirjson with JobDefs/Base directive + - Fix #2593 about incomplete jobs incorrectly rescheduled + - Fix #2629 about pool argument not listed in the "help cloud" output + - Fix #2632 about VolType not set correctly for Cloud volumes after a label problem + - Fix #2640 about a reference to the source directory in query.sql file + - Fix bug #2271 where poll interval causes tape mount message to repeat + - Fix segfault in bdirjson with incorrect configuration files + +Bugs fixed/closed since last release: +2271 2548 2563 2567 2588 2593 2602 2624 2625 2627 2629 2632 2638 2640 2646 +2698 2520 2559 2561 2582 2806 2807 2890 2289 2890 2853 2940 + + +============================================================== + +Release Version 7.4.7 + +15Mar17 + - Permit specifying query item number on bconsole query command line + - Fix Solaris 10 problems reported by Phil Stracchino + - Fix EPROTO on OpenBSD + +========================================================================== + +Release Version 7.4.6 + +10Mar17 + - Fix bug #2271 where poll interval causes tape mount message to repeat + - Attempt to fix IPV6 not configured + +09Mar17 + - Possible fix for acl seg fault on OpenBSD where no acl code defined + - Change release digest from SHA1 to SHA256 + +14Feb17 + - Fix getnameinfo() for FreeBSD fixes bug #2083 + +Bugs fixed/closed since last release: +2083 2271 + +========================================================================== + +Release version 7.4.5 + +07Feb17 + - Correct wrong word in message + +28Jan17 + - Remove restriction on using the scratch pool that can cause restore failures + - Remove debug code that breaks btape fill + +25Dec16 + - Initialize freespace_mutex fixes bug 2207 + +11Dec16 + - baculum: Update AUTHORS file + +08Dec16 + - baculum: Enable Japanese language on web interface + - baculum: Implement Japanese language support + +19Nov16 + - XACL - refactoring an ACL and XATTR codes. + - Revert "Warn of Storage Daemon version incompatibility if label fails. Bug + #2193" + +02Oct16 + - Make another attempt to resolve bug #2176 + - Warn of Storage Daemon version incompatibility if label fails. Bug #2193 + - Apply patch to list more pool info from bug #2202 + +21Sep16 + - Fix status alignment output reported by Wanderlei Huttel + +========================================================================== + +Release version 7.4.4 + +20Sep16 + - Fix broken backport commit + +12Sep16 + - Fix #2085 About director segfault in cram-md5 function + - Attempt to fix bug #2237 + +11Sep16 + - Recompile configure.in + - Fix systemd installation + - If using readline reset terminal at bconsole exit + +08Sep16 + - Fix compilation without SMARTALLOC + +02Sep16 + - Fix #2060 about SQL false error message with "update volume fromallpools" + command + +29Aug16 + - Fix spurious MD5 update errors when nothing changed should fix bug #2237 and + others + +23Aug16 + - Fix small memory leak with the restart command + +14Aug16 + - baculum: Update language files + +11Aug16 + - Fix #335 Avoid backups going to the scratch pool + +08Aug16 + - systemd: Give 3mins to the bacula-sd service to stop and close the dde + +31Jul16 + - Minor modifications to Ubuntu packaging + +22Jul16 + - Check if the ScratchPool points to the current Pool and print a warning + message in such case + +21Jul16 + - Fix #1968 print the ScratchPool name instead of just 'Scratch' + +20Jul16 + - Display PrefixLinks in "show job" output + - Add explicit LL to big integers to appease older compilers + +18Jul16 + - Enable the plugin directory for the FileDaemon by default + - Allow multiple mailcommand+operatorcommand in Messages. Fixes bug #2222 + +14Jul16 + - Handle NULL pointers in smartdump() and asciidump() + +12Jul16 + - Modify status to include Admin and Restore in Level field -- clearer + +11Jul16 + - Ensure that zero JobMedias are written for labelling + +07Jul16 + - Fix error message about the stream 26 (PLUGIN_NAME) in bextract + +Bugs fixed/closed since last release: +1968 2060 2085 2222 2237 335 + + +==================================================================== + +Release version 7.4.3 + +17Jul16 + - Add shortcut to RunScript console commands. Submitted by Wanderlei Huttel. + Fixes bug #2224 + - Fail when multiple mailcommand and other strings are specified in .conf. Fixes + bug #2222 + - Add support for terabytes in sizes. Submitted by Wanderlei Huttel. Fixes bug + #2223 + - Add error message for truncate command when actiononpurge not set. Fixes bug + #2221 +14Jul16 + - Fix optimization error with GCC 6.1 + - Fix compilation warnings with GCC 6.1 +13Jul16 + - Explicitly create MySQL user in grant_mysql_privileges.in + +Bugs fixed/closed since last release: +2221 2222 2223 2224 + +===================================================================== + +Release version 7.4.2 + +06Jul16 + - Fix #1926 about wrong duplicate job detection with Copy/Migration and Backup + jobs + +03Jul16 + - Recompile configure after db.m4 change + - Fix batch insert for MySQL 5.7 + +02Jul16 + - Fix zero level debug output -- now at 100 + +29Jun16 + - Fix #766 about Job logs displayed with unneeded linefeed + +27Jun16 + - Fix #1902 about a segfault with the "cancel inactive" command + +14Jun16 + - Fix bug where MySQL 5.7 is improperly linked on Ubuntu 16.04 + +Bugs fixed/closed since last release: +1902 1926 766 + + +============================================== + +Release version 7.4.1 + +31May16 + - Fix bug #1849 MySQL does not accept 0 for DATETIME default + +12May16 + - Modify the alist object to be reused after a destroy() + +24Apr16 + - baculum: Fix setting invalid timezone value for PHP + +18Apr16 + - Fix compilation for AIX + +07Apr16 + - Fix the restore termination string in the job report to take in account + JobErrors and SDErrors + +12Mar16 + - baculum: Show jobs for client + +04Mar16 + - Fix bconsole "llist job=" output + +01Mar16 + - Fix #146 about update volume command line usage + +29Feb16 + - bat: Fix #1066 about bad update pool command + - Fix #1653 about make_catalog_backup default user name + +28Feb16 + - baculum: Show jobs stored on volume + +15Feb16 + - Fix update Volume=x Slot=nn when Slot > MaxVols + - Set exit code for create_postgresql_database.in + +09Feb16 + - Fix bug #2197 -- build failure with --disable-libtool + - Fix bug #2204 -- superfluous END-OF-DATA in update_mysql_tables.in + +02Feb16 + - Convert a Migration job with errors into a Copy job + +31Jan16 + - Remove exporting add_mtab_item -- fixes bug #2198 + +23Jan16 + - Fix possible problem of show multiple resources + +18Jan16 + - Comment out tools/smtp-orig.c as it is for reference only + +Bugs fixed/closed since last release: +1066 146 1653 1849 2197 2198 2204 + +================= + +Release Version 7.4.0 + +14Jan16 + - Put back missing line in copyright + - Update date + - Implement MaxVirtualFullInterval + - Update AUTHORS + +13Jan16 + - Ensure relabel has latest vol info may fix bug #1412 + - Update AUTHORS + +12Jan16 + - Change license as per agreement with FSFE + - Apply Carsten's patch that fixes bug #2192 builds on kfreebsd + - Update AUTHORS file + - Add some additional tape slot debug code + +11Jan16 + - baculum: Add Wanderlei Huttel to AUTHORS + - baculum: Enable Portuguese language on web interface + - baculum: Implement Portuguese language support + - baculum: Assign Baculum copyright to Kern Sibbald + - Add more debug to recycle-test + - More debug info when aborting + +10Jan16 + - baculum: Fix sorting in restore by group most recent backups + - Change copyright as per agreement with FSFE + - Update po files + - Change copyright as per agreement with FSFE + +09Jan16 + - baculum: Fix restore group most recent backups for MySQL + +06Jan16 + - Fix FD DisableCommands + +05Jan16 + - baculum: Fix to change user password + +04Jan16 + - Add ExpiresIn field in list and llist media output + - Fix #1548 about Solaris SIGBUS with accurate mode backup + +02Jan16 + - Change copyright as per agreement with FSFE + update copyright year + - Finish revert of patch + - Revert commit 1157f172ea1c3b3 + - Update year to 2016 + - Backport some Enterprise code to sql_list.c + +01Jan16 + - Change copyright as per agreement with FSFE + - Add info message of #jobs consolidated in Virtual Full + - baculum: Unify user validation + - baculum: Fix showing bconsole test result in configuration wizard + - baculum: Fix auto-login after finishing wizard + - baculum: Check if auth data exists before log in try + - Add HasBase+Comment to llist Jobs + - Fix a few debug outputs that should be unsigned + +31Dec15 + - baculum: Add php-xml to requirements + - Fix seg fault in btape fixes bug #2180 + - Fix slight error in autoprune -- should fix bug #2151 + - Change copyright as per agreement with FSFE + - baculum: Add first unit tests + +30Dec15 + - Fix #1545 about fix in manual_prune.pl script with large number of volumes + - Fix false status output. Fixes bug #2103 + - Integrate patch into latest version, which fixes bug #1882 + - Fix bug #2090 correct detection of GCC + - baculum: Add support for terminated with warnings job status + - baculum: Hide users input on ESC key press + - baculum: Fix logout action on Apache web server + - baculum: Implement users management from web interface + - baculum: Prevent opening new sessions for each request + +27Dec15 + - Fix CLANG warning messages -- fixes bug #2090 + - Add new chio-changer-freebase from bug #2115 + - Applied modified patch from bug#2117 to fix bpipe end of stream + - Apply patch from bug #2165 to fix the update sqlite3 script + - baculum: Rework access by restricted consoles + +26Dec15 + - Fix update MD5 failure bug reported by Peter Keller + +25Dec15 + - baculum: Add dashboard panel + +23Dec15 + - Patch to add MySQL ssl access + +21Dec15 + - Change copyright as per agreement with FSFE + +20Dec15 + - Manually apply patch in bug #2156 to allow building on KFreeBSD + - Fix bug #2153 with patch submitted by Ana Arruda + - Prevent possible seg fault + - Fix possible seg fault if debug mode set + - baculum: Close console on press ESC key + - Change copyright as per agreement with FSFE + - baculum: Switch to started job status just after job start + +19Dec15 + - baculum: Add possibility to open configuration windows from URL + - Fix restore when storage specified on command line + - baculum: Add jobbytes and media type to volumes list + - Set jcr in heartbeat thread of FD + - Remove not yet implemented from UA Verify Volume Data message + +18Dec15 + - Fix restore of Windows streams to non-Windows machines + - Add new verify data test + - Implement level=Data to the Verify job + - baculum: Fix requirements path + +16Dec15 + - Fix #1524 about bextract trace file location + - Partial modification of copyrights as per agreement with FSFE + - Fix truncate bug free_volume problem + +14Dec15 + - baculum: Fix showing table header + - baculum: Search field works with all table cells + +13Dec15 + - baculum: Do not refresh window lists if toolbar is open or elements are + checked + - baculum: Remember sort order for data grids + - baculum: Add example Nginx config + - baculum: Fix working with php-fpm + - baculum: Fix sorting formatted values in tables + - baculum: Improve size formatter precision + - baculum: Fix jobs count in job list + - baculum: Show jobid in job configuration window + - baculum: Add jobbytes and jobfiles columns in job list + +12Dec15 + - baculum: Get system timezone for PHP if possible + - baculum: Fix restore when a lot of jobids given + - baculum: Remove temporary Bvfs table when restore starts + - baculum: Show copy job label in job list + - baculum: Set default job attributes (level, client, fileset, pool, storage, + priority) in Run job panel + - baculum: Get job show result by job name or jobid + +11Dec15 + - baculum: Set half size window as default window size + - baculum: Not remember controls state in jobs window + - baculum: Add session cache + - Fix truncate race bug #1382 + - Fix some low level messages to display correctly + +28Nov15 + - baculum: Remove deprecated message in initial wizard + - baculum: Update SELinux policy module + - baculum: Fix update pool action when no volumes in pool + - baculum: Split configuration windows into two tabs: actions and console + - baculum: Change default elements limit to 500 elements + - baculum: Add drive parameter to bconsole release command execution + - baculum: Switch interface from performance mode to normal mode due to no + cache checking in performance mode + +27Nov15 + - Fix #1470 Fix setdebug command when all components are selected + - baculum: Set default elements limit in window to 1000 elements + +25Nov15 + - baculum: Fix expectation failed error during restore + - baculum: Switch to debug mode only when debug is enabled in config + - baculum: Switch interface to performance mode + - baculum: Add JSMin from framework for complete switch interface to performance + mode + - baculum: Add Ulrich Leodolter to AUTHORS + - baculum: Fix end-of-file warning in bconsole calls. Patch from Ulrich Leodolter + +22Nov15 + - baculum: Fix remove users file during upgrade deb packages + +21Nov15 + - Add new JOB_DBR field + - #ifdef out bpluginfo since it does not compile + +20Nov15 + - Fix #1449 about a FileDaemon segfault with the fstype option + +17Nov15 + - Remove vestiges of rechdr_queue hopefully fixes bug #2180 + - Apply bconsole manpage patch from bug #2182 + - Apply ppc64el configure detection patch from bug #2183 + +12Nov15 + - Fix #1414 When the FD is down, status dir now prints "is waiting for Client + xx-fd" + +11Nov15 + - Ensure that JOB_DBR is properly initialized before a db_list_job_records() + +10Nov15 + - Implement new options in list command + - Add @tall command to log both input/output in a log file + +31Oct15 + - Fix #1360 about bextract -t not documented in the man page + +30Oct15 + - Update spec file for latest OSX versions + - Fix compilation on MacOS + +28Oct15 + - fix syntax error + +27Oct15 + - Improve Jmsg in response(), display SIGNAL number when appropriate + - Avoid segfault in dump_block() when the block_len is invalid + +23Oct15 + - Fix #1368 about xattr error not displayed correctly at restore time + +22Oct15 + - If we swapped a Volume, clear in_use bit on Volume + +19Oct15 + - Fix #1362 about libgcc dependency in sles12 bat package + - Remove installation of manpage for bplugininfo + - Fix bug 2171 cannot build tray-monitor + +18Oct15 + - Enhance bvfs performance .bvfs_update for MySQL + - Do some sanity checks on user inputs + - Enhance bvfs performance .bvfs_update for MySQL + - Enhance bvfs performance .bvfs_update for MySQL + - Recompile configure.in + - Fix bug 2173 QT tray monitor can not be built due to missing files in configure + +16Oct15 + - dedup fix bextract bug + +15Oct15 + - Move plugin_free() in free_jcr() + - Fix bug #2083 -- Fix sockaddr_to_ascii for FreeBSD + +10Oct15 + - Fix fadvise bug found by Robert Heinzmann + +22Sep15 + - Fix compilation without zlib and lzo + - Fix compilation error with new fstype_cmp() function + - Fix compilation problem with AFS + +19Sep15 + - Fix compilation on Solaris/FreeBSD + +18Sep15 + - Fix segfault in open_bpipe() when the program is empty + +17Sep15 + - Modify find_next_volume_for_append() to not send the same volume twice + +15Sep15 + - Avoid string displayed in restore menu + - Do not update state file after a bacula-xxx -t + +10Sep15 + - Fix #804 about misleading message with the purge command + - Fix automount feature after a label command + +20Aug15 + - rpms: Add Fedora22 and Systemd support for redhat rpms + +19Aug15 + - Add missing required packages versions + +18Aug15 + - Reinsert tabs in systemd Makefile.in + +17Aug15 + - Remove old DVD.conf in examples + +16Aug15 + - baculum: Fix default values in Makefile + - baculum: Split deb result package into baculum, baculum-lighttpd and baculum-apache2 + packages + - baculum: Provide LICENSE-FOSS file content in Baculum deb packages (copyright + file) + - baculum: Add deb template files and deb configs examples + - baculum: Change Makefile to using also for deb packages template + +11Aug15 + - Use Client Catalog resource in get_catalog_resource() if "client" is specified + in command line + +07Jul15 + - Fix #1131 about Job::Next Pool resource precedence over the Pool::Next pool + directive + +11Feb15 + - Fix #898 truncate volumes larger than 200 bytes + +Bugs fixed/closed since last release: +1131 1360 1362 1368 1382 1412 1414 1449 1470 1524 1545 1548 1882 2083 2090 +2103 2115 2117 2151 2153 2156 2165 2180 2182 2183 2192 804 898 + +========================================================== + +Release Version 7.2.0 + +12Aug15 + - Put back missing close_msg(NULL) to flush daemon messages at job end + - Add LICENSE-FOSS and update LICENSE for baculum + - Backport from Bacula Enterprise + +29Jul15 + - Put back old pruning + - Fix max vol size test accidently deleted + - Remove gigaslam and grow on uninstall -- from bug report + - Revert to Branch-8.3 fd_snapshot.c + - Pull more recent changes from Branch-8.2 + - Fix bvfs_lsdir pattern parameter setting + - Remove CheckList nolonger used + - Revert "Use db_lock()/unlock() around JobMedia creation transaction" + - Fix #1099 about director crash with rescheduled jobs + - Fix #1209 about bat segfault when clicking on Media + - Qmsg(M_FATAL) set jcr->JobStatus to JS_FatalError immediately + - snapshot: Abort the job by default if a snapshot creation fails + - Revert to old SD-FD close session protocol + - Remove drive reservation if no Jobs running + - Remove filename patch + - snapshot: Try to detect LVM when the filesystem is ext3 or XFS + - Fix bad debug message in mac_sql.c + - Fix restore-multi-session test by incrementing found files only on next + file + - Add -T description in man pages + - Correct incorrect Fatal error message text in bsock + - mysql: Add support for multiple instances binary backup in the same fileset + - Fix compilation with new debug hook + - mysql: Avoid warning with abort_on_job plugin option + - Fix compilation after patch "prune volume yes" + - Do not print message about retention when using "prune volume yes" command + - Fix #536 about Copy/Migration/VF that should not use Client "Maximum Concurrent + Jobs" + - Fix potential segfault with unused ConfigFile objects + - Fix #1108 Enhance setdebug help command and console completion + - Add more JCR variables in lockdump procedure + - Fix error in update_postgresql_tables.in caused by bad search and replace + - Fix #1127 about the repositioning enhancement during restore + - Correct try_reposition() return code after a seek() + - Add position information in the block structure + - Fix a number of acl and xattr bugs + give more understandable variable + names + - Make btraceback.dbx and .gdb use new sql engine name + - Revert most of patch ef57e6c4 and replace with old cats code + - Revert useless parts of patch 08d8e2d29 + - Revert patch d7f71d2c94a and rewrite it using simpler public domain example + - Fix batch mode detection for SQLite3 + - Revert d9aa76fa and simplify + - Revert patch 30388e447fa3 + fix bug #1948 + - Use a more appropriate name for the acl context + - Use class pointer rather than jcr in src/lib/jcr.c + - Revert patch f294b276 + - Change B_DB to BDB to correspond to naming convention + - Add -T option in bacula-sd to use trace file + - Force use of newer TLS protocols + - Avoid problem with db_get_job_record() when SchedTime or RealEndTime is + NULL + - Update our regexec() to support NULL argument + - Add function to copy a file in bsys.c + - Fix bug 2141 fork before TLS initialization + - Update LICENSE-FOSS + - Change license on src/lib/crc32.c as agreed with the author, Joakim Tjernlund + - Update po + - More license updates + - Fix compilation + - Add read_control command between Plugin/FD and Storage Daemon + - Add .bvfs_get_jobs and .bvfs_get_bootstrap functions + - Fix compilation for Solaris9 + - Fix Makefile.in tabs + - Update Windows .def files + - More copyright notices + - Fix Windows plugin licenses + - Change license copyright for updatedb and qt-console/tray-monitor + - Change copyright for logwatch + - Update more copyrights + - Update copyrights in pebuilder + - Update plugin licenses + - Add copyrights + license to platforms + - Update copyrights in po + - More license clarifications + - One more copyright in src/cats + - Update src/cats .in file copyrights + - Compute Job "Compression Ratio" using SDJobBytes instead of JobBytes + - Get correct attributions for bsmtp.c + - Switch from LGPLv3 for scripts to BSD 2-Clause + - Fix segfault on dot commands used in RunScript::Console directive + - Fix patch c0f0e6c01c7 to optimize retries only for autochangers + - Fix #876 about SD reads too far with complex bootstrap + - Correct unmount test in dev.c + - Add debug JobId in next-vol-test script + - Fix patch c59e5da29 to not orphan buffers + - Fix bad implementation of enable/disable job,client,schedules + implement + enable/disable storage devices + - Implement enable/disable schedule and client + - Optimize Volume protocol when Volume not InChanger + - Do not trash existing record during label of new volume + - During accurate restore unstrip as soon as possible + - Better handline of no storage device found + - Fix #1075 The replace=never flag was not properly handled when combined with + database= option in mysql/postgresql plugin + - display timestamp in X_msg() in one single pass to avoid double flush() + - Update copyrights in scripts directory + - Fix bug #1083 RT14512 + - configure.in: new HAVE_FCNTL_LOCK detect fcntl() locking capability + - Fix #1008 about status storage that displays "Writing" and "Reading" information + for the same DCR + - Add new %E job code to use non fatal job errors in scripts + - Revert to old htable, but add 64 bit hash + - Fix possible race condition in smartalloc + - Refactor + optimize fstype.c + revert mntent_cache.c/h + - snap: Fix small initialization problem with LVM backend + - Fix compilation warning in bextract + - lock the pid file using fcntl(F_SETLK) + - bat: Fix segfault in client view when the Uname field is empty + - bat: Fix #1047 about segfaults in Client, Media and Pool view + - Revert patch 62ab7eb5 for filed/backup.c + - Revert patch 62ab7eb5 for filed/verify.c + - Refactor mount/unmount to use class calls + - Add return status to DEVICE:close and report error at end of Job + - Fix seg fault + - fix a Dmsg in match_bsr.c:match_volume() + - Fix #861 about bad help command on status schedule + - Add new cats header file + - Refactor DB engine to be class based + - Remove regression cancel_test from do_all + - Fix invalid .mod command in BAT during restore (bugfix #858) + - Use B_ISXDIGIT() in rangescanner + - Handle hex numbers in str_to_uint64() + - Fix prune-migration-test -- wait in wrong place + - fix MA 987 cannot copy/migrate jobs with a Level=VF in the job resource + - Fix basejob error caused by patch on bug #965 + - Allow to list restore jobs in llist jobid= command + - Fix #940 about segfault in bat when doing an "update slots" + - Fix #983 about segfault on win32 filedaemon when using bat to monitor the + status + - Fix #969 about a segfault while doing a cancel of a copy job + - Fill errmsg after an error with FETCH query in db_big_sql_query() + - Fix #965 about an empty error message after a problem when sending accurate + file list + - Fix #972 about segfault in show command used with multiple resources + - Work bsnapshot for SLES12 and fix issue with ZFS + - Fix small memory leak in cancel command with ujobid and job parameters + - Ensure that client resource is not freed during setbandwidth command + - fix errors in the use of a Mmsg() + - Use a specific mutex for auth instead of jcr->mutex + - update po + - Add missing call to free_jcr() in previous patch + - Lock the jcr when using sd_calls_client_bsock variable + - Ensure that only one thread can use the auth code in the Storage + - Fix #951 about SDCallsClient not used during restore jobs + - snapshot: Get the creation date from the zfs list snapshot command + - snapshot: Fix small issue with Name parameter in list snapshot + - Fix bsnapshot to return status=0 on error + - fix a mempool error at SD shutdown + - snapshot: Call support() only if the device is in the fileset + - snapshot: Avoid double / in path and files when volume is / + - Fix segfault with Console runscript introduced by "Stop ua commands if comm + line drops" + - handle ctrl-C and SIGTERM the same way in SD + - Startup scripts return proper exitcode for service restart action + - Implement tables configuration + - Add ReadBytes to FD status output + - Accept 0/1 for @BOOL@ type in ConfigFile module + - Set cmd_plugin only in pluginCreateFile if not SKIP/ERROR/CORE + - Fix #13680 about systemd message "unknown lvalue" + - Stop ua commands if comm line drops + - Fix weird compilation problem on rhel5 + - Display TLS information in status client/storage +25Feb15 + - Fix rpms where unix user were not properly defined + - update extrajs package in debs/rpm package + - Fix segfault with new filesetcmd + - snapshot: Reset JobId in Snapshot table when deleting a job + - snapshot: Add ability to list snapshots from the FD + - snapshot: Add a confirmation message when pruning snapshots + - Add RunScript AfterSnapshot event + - Fix #431 About upon upgrade, RPMs resets group membership + - snapshot: Display bsnapshot error message if possible + - Fix jobmedia-bug3 + - Set error code in return from run regress script + - snapshot: More work on LVM backend and on list/sync commands + - snapshot: Add EnableSnapshot directive in fileset + - snapshot: Add errmsg and status to SNAPSHOT_DBR + - snapshot: Send SnapshotRetention parameter to the Client and work on the + prune command + - Add bacula-snapshot.spec + - Add disabled=yes/no in bsnapshot.conf + - Fix #875 about bvfs repeats the same output many times + - Revert "Storing the result in a local variable from sql_num_fields saves us a + lot of callbacks." + - Remove passing args to cats driver class initialization + - Simplify cats by eliminating the B_DB_PRIV class + - Convert more db funcs to class calls + - Add Snapshot Engine to bacula core + - Change more db calls into class calls + - Add files missed in last commit + - Convert db_lock/unlock to be called via class + - Fix small memory leak + - Remove more vestages of Ingres + - Fix #843 about "show storage" option missing in the help command output + - Use bzip2 for sles dependency + - Avoid warning with uninitialized variables + - update "help status" + - Revert "Small fix to Eric great patch for readline commandcompletion so it + also compiles on non gcc compilers." + - Separate out definitions into new header + - Remove bad restore.h + - Revert "Move restore struct defintions to seperate include file. Small change + to acl.h and xattr.h to use define inline with other header files." + - Revert "Fix MediaView::getSelection" + - Bat: ensure sufficient rows to display drives in storage display + - new MmsgDx() macro that combine Mmsg(errmsg, fmt, ...) and Dmsg in once + - add a ASEERTD() for DEVELOPPER + - Fix wrong KiB value + - Revert "Fix bug #1934 Wrong values at media list in BAT" + - Change bplugin_list to b_plugin_list which is more appropriate + - Remove Ingres related unused files + - Simplify rwlock coding + - Make subroutine names clearer + - Back out useless patches + - Put back old code prior to excessive edits + - Remove over complicated acl/xattr code + - Add license to files without any + - Fix #805 about nextpool command line argument not properly used + - Remove recursion from free_bsr() and free_bsr_item() to handle very large + BSR + - Avoid segfault in connect_to_file_daemon() when jcr->client is NULL + - #776 Volume created in the catalog but not on disk and #464 SD can't read an + existing volume + - Add schedule to show command tab completion + - Make global my_name hold 127 chars + - Mark file volumes that are not accessible in Error in mount_next_vol + - Fix #743 about bat permission conflict on /opt/bacula/etc + - Add copyright to Makefiles + - change in lockmgr.c to avoid the report of a memory leak in testls + - lib: integrate SHA2 into bacula + - Fix #747 about restore problem failing on "Unexpected cryptographic session + data stream + - Revert previous copyright accidentally changed + - Fix btape fill command by removing some debug code in empty_block() + - Add Accurate Fileset option "M" to compare ctime/mtime with the save_time + like with normal Incremental + - Add index on Job(JobTDate) to mysql catalog + - Fix bad check on bopen_rsrc return status. bug #2106 + - Do not stop the storage daemon startup if the File device is not yet accessible + - Fix double free in btape + - Fix failed mount request in btape fill test + - Avoid ASSERT() when using btape with vtape driver + - Possible fix for NULL client bug #2105 + - Fix compilation of Nagios check_bacula + - Add test for restict c99 in autoconf + - Allow to use device= option in release/mount/unmount command + - Fix #699 about duplicated job name when starting two jobs at the same time + - Fix #701 about status schedule missing from tab completion and correct job + filter + - remove autoconf/configre + - Fix #346 Add ipv6 support for Solaris + - Fix #692 about compatibility issue with community FD + - Fix new match_bsr patch + - Fix #588 Improve SD bsr read performance + - Fix ownership bug in html-manuals package + - Add EFS in the client status flag list + - Implement Win EFS Support + - Fix QT windows build for 32bit + - Add SLES113 to spec files + - Add @encode and sp_decode functions for plugins + - Fix tls-duplicate-job seg fault + harden pthread_kill() code + - Update plugin version to ensure 8.0 will not load 6.6 plugins + - Add JobBytes and ReadBytes to llist jobid= output + - Rewrite store_drivetype and store_fstype to allow a comma separated list of + items + - Fix #633 about JobDefs MaximumBandwidth Job inheritance + - Fix possible editing truncation due to 32 bit calculations + - Remove non-portable -ne in echo + - update po + - Add Makefile for mssql-fd plugin + - Improve error message of open_bpipe() on win32 + - Add jobid= parameter in .status dir running command + - Add worker states + - Pull latest worker files from development branch + - Add comment about incorrect scripting + - Put Dsm_check() on reasonable debug level + - Remove auto-generated tray-monitor.pro.mingwxx file + - Display message about MaximumBlockSize default value only if a value was + specified + - fix solaris : replace be64toh() by unserial_uint64() + - update SD <-> SD capabilities exchange + - Handle RestoreObjects with Copy/Migration jobs + - Add free list to worker class + - Fix bad caps with SDcallsClient + debug + fix seg fault on connection error + - Implement blowup=nn for FD and hangup+blowup for SD + - Correct bat copyright + - Change sizeof expressions to be more standard + - Remove regress trap that causes sd-sd-test to fail + - Dmsg was not handling tag anymore + - Fix for SD seg fault while swapping volumes + - Make bextract able to handle dedup streams + - Remove unused file + - Make sure mount_next_read_volume() will cancel the current job + - Forbid llist command in runscript + - Fix #295 about query file message + - Add no_mount_request to DCR + - Update Windows .def file + - Add spec file for redhat/suse html manual package + - Fix bug #2091 bad vtape device definitions + - Fix bug #2089 compiler warning + - Make sure level is tag free when printing debug message + - fix tags in Dmsg + - Regenerated configure script + - Remove spaces at the end of lines in Bat file + - Revert bat.pro.in file + - Fix recursive echo bug #2088 + - Add new fifo class flist.h/c + - Allow to create temp DEVICE from DEVRES + - For bat always use g++ + - Make selection by Volume Name or MediaId a bit clearer + - Optimize Dmsg() with tags by keeping current tags into a separate variable + - Make message more understandable + + +Release version 7.0.5 + +28Jul14 + - Fix #547 by adding .schedule command + +27Jul14 + - Update AUTHORS + - Fix bug #2079 with patch from Robert Oschwald + +26Jul14 + - Fix orphaned file descriptors during errors + - Yet another client==NULL + - Improve FD and SD cancel + - Jim Raney's TLS patch + - Update AUTHORS + - Fix bug #1679 pool overrides not shown in manual run display + - Attempt to avoid client==NULL + +23Jul14 + - Fix for bug #2082 (hopefully) + +22Jul14 + - Fix seg fault in jobq.c + +14Jul14 + - make stop after first error + +12Jul14 + - Increase status schedule days from 500 to 3000 + - Remove bad cherry-pick + - Fix compiler warning + - Allow options create_postgresql_database from patch in bug #2075 by roos + - Fix bug #2074 crashes when no conf file present + +10Jul14 + - Set pthread id in jcr at beginning so the job can be canceled. + - Fix possible heartbeat interval timing problems + +08Jul14 + - Fix some errors reported by valgrind. May fix the problem with bsmtp command. + - Ensure b_sterror() is using the correct size of input buffer + +07Jul14 + - Fix possible seg fault + +04Jul14 + - Fix segfault when trying to stop the bnet_server thread in terminate_stored() + +03Jul14 + - Fix bad link bug #2076 + +02Jul14 + - Fix compilation of bsock.c when TLS is not available + +27Jun14 + - Correct L suffix to be LL + - Fix bad copy/migrate data header + +26Jun14 + - On termination shutdown thread server + +23Jun14 + - baculum: Updated README file + - baculum: Update English language texts + - baculum: Saving auth file for web server HTTP Basic auth + - baculum: Added directory for web server logs + - baculum: Added example Lighttpd configuration for Baculum and sample web + server auth file + - Expanded auth error message + - baculum: Support for web servers which do not provide direct info about HTTP + Basic auth + +15Jun14 + - Fix limit bandwidth calculation + - Eliminate strcpy() from bsmtp + +12Jun14 + - Fix for configuring sudo option for bconsole access + - Display correct NextPool overrides + use Job NextPool in restore if available + +09Jun14 + - Fix Bacula to work with newer Windows pthreads library + +17May14 + - Fix bug #180 ERR=success in btape when tape error + +Bugs fixed/closed since last release: +1679 180 2074 2075 2076 2079 2082 547 + +==== + +Release version 7.0.4 +02Jun14 + - Better error handling for cancel command + +01Jun14 + - Fix compiler warning + simplify some #ifdefs + +22May14 + - Fix copy/migration to second SD + +19May14 + - Fix calls to sl.set_string() + - Improve sellist code + +===== + +Release version 7.0.3 +12May14 + - Fix error handling in do_alist_prompt + - Tighten error condition handling in sellist + - Add new cancel test + +06May14 + - Update LICENSE and LICENSE-FAQ + +03May14 + - Also update autoconf/aclocal.m4 + - Reschedule on error caused EndTime to be incorrect -- fixes bug #2029 + - Flush console queued job messages -- should fix bug #2054 + - Attempt to fix FreeBSD echo/printf, bug #2048 + - Update to newer libtool + config.guess + - Recompile configure + - Apply fix supplied for acl.c in bug #2050 + +01May14 + - Fix a SD seg fault that occurs with over committed drives + +28Apr14 + - Clear bvfs cache and set debug options available only for admin + - Moved auth params to curl opts + - Filtred single results for restricted consoles + +27Apr14 + - Removed unnecessary debug + - Changed e-mail address in gettext file + - Support for customized and restricted consoles + +15Apr14 + - Misc changes for rpm building (made by Louis) + +13Apr14 + - Updated requirements for Baculum + +12Apr14 + - Apply fix for bug 2049: wrong drive selected + +11Apr14 + - Fix #2047 about bthread_cond_wait_p not declared + +09Apr14 + - Fix Bacula bug #2044 -- fix Makefile for bplugininfo linking + - Fix Bacula bug #2046 -- sellist limited to 10000 + - Fix Bacula bug #2045 -- multiply defined daemon_event + - Fix Bacula bug #2020 overflow in btape -- Andreas Koch + +Bugs fixed/closed since last release: +2020 2029 2044 2045 2046 2047 2048 2050 2054 + + +Release version 7.0.2 +02Apr14 + - Remove more vestiges of libbacpy + - Put back @PYTHON@ path in configure + - Fix improper string in parser + - Remove libbacpy from rpm spec files + - Fix linking check_bacula + - Fix new SD login in check_bacula + - Tweek docs build process + +Release version 7.0.1 +31Mar14 + - Remove old plugin-test + - Update po files + - Enable installation of the bpluginfo utility + - More tray-monitor updates + - Add Simone Caronii to AUTHORS + - Align command line switches in manpages. + - Apply upgrade to config.guess + - Remove bgnome-console and bwx-console leftovers. + - Update tray-monitor header also for new bsock calls + - Attempt to fix nagios to use new bsock calls + - Update tray-monitor to new bsock calls + +========== +Release version 7.0.0 + +24Mar14 + - Add Josip Almasi to AUTHORS + - [PATCH] Support for restricted consoles in BAT config + - [PATCH] Fix for free director directive + - [PATCH] Fix auto-select restricted console for director in bconsole + - Realign output display + - Update ua_output.c from Branch-6.7 + - Add some missing Branch-6.7 updates + - Added needed empty directories to Baculum + - Fix for support PostgreSQL, MySQL and SQLite + - Framework adjusting to Baculum database connections + - Framework fix for lower case tables names in MySQL + - Fix for Baculum SQLite support + - Initial commit Baculum + - Add Marcin to AUTHORS file + - Strip trailing blanks + - Update copyright year + - Update LICENSE and header files + - Remove old file + - Add new header in misc files + - Remove tray-monitor bwx-console manual installation + - Remove FD python and examples + - Fixup spec files + - Remove pythonlib from lib + - Update package-list + - Fix SDCallsClient daemon synchronization + - Add debug code + make 127.0.0.1 same as localhost for tls tests + - Fix multiple DIRs in console + - Make failure for bat to connect to DIR non-fatal + - Fix bat style to one that works + - Take disk-changer from Branch-6.7 + - Simplify Version output + - Fix FDVersion for SD Calls Client test + - Update accurate test + - Update differential test + - Add new regress timing scripts + - Improve plugin make clean + - Implement regress FORCE_SDCALLS + - Remove win32 tray-monitor and wx-console directories + - Remove regress-config need only regress-config.in + - Add configure archivedir + - Improve SQL failure reporting + - Major backport from BEE to community + - Add copyright to mtx-changer.in + +Release version 5.2.13 + +19Feb13 + - Fix build/configure problems with bpluginfo.c + - Add missing vol_mgr.h file + - Add timer to run_multiple + +18Feb13 + - Refactor lock_volumes so most lock a vol rather than globally + - Add virtualfull-extreme test + +17Feb13 + - Apply patch for chio-changer-openbsd from bug #1984 -- Implements listall + - Add bat Mac patch from bug #1953 + +16Feb13 + - Fix bug #1812 cannot run Copy/Migrate jobs from bat + - Fix loading of bat translations. Patch from bug #1890 + - Fix text input in bat. Fixe bug #1965 + - Remove Device from show command as it is not used or updated + +13Feb13 + - Fix #1982 update enabled keyword in help command + +08Feb13 + - Fix update_postgresql_tables 10 to 11 + +30Jan13 + - Clean after building doc binaries requested by Willem vd Akker for Debian + build + +29Jan13 + - Remove unused method + - Require Qt version 4.8.4 + - Fix bug #1955 that OK to run? does not retry on bad response + - Backport new lock calls + debug for SD + +26Jan13 + - Remove old todo + +09Jan13 + - Fix bug #1975 new label ignored when first is duplicate. + +31Dec12 + - Require Qt 4.8.1 to build bat + +27Dec12 + - Make compile of timelimit a bit more portable + +26Dec12 + - Tweek turn off prune-test + - Pull src/lib changes from master + - Pull SD files from master + +25Dec12 + - Turn off prune-test + - Add logdir and bsrdir to regression config + - Add bsrdir and logdir to regress-config + - Rebuild configure + - Changed default directories from /var/bacula to /opt/bacula + - Fix patches from master + - Make spool size max message more explicit + - Display more info when maximum spool size is reached + +23Dec12 + - Fix TERM defs for Solaris 11 in conio.c + +21Dec12 + - Force DEVELOPER on during regression setup + +14Dec12 + - Allow group to cd to sysconfdir + +09Dec12 + - Make sysconfdir belong to Bacula user/group and exclude others + +07Dec12 + - Fix Virtual Full file close bug -- race condition + - Enhance mount message to include read/append + +06Dec12 + - Improve vol_mgr swap messages + - Add Jmsg7 and Jmsg8 + - Enhance output of Using Device to include for read/write + +04Dec12 + - Add jobid to Pmsg output + +29Nov12 + - Fix bug #1959 input validation on delete of jobs. + +24Nov12 + - Fix bug #1956 Authorization Errors false + +23Nov12 + - Add workaround for #5507 where autoprune and reload are in deadlock + +21Nov12 + - Allow conf file quoted strings to be used in a list + - Make bfgets handle very long lines + - Backport from Enterprise + +17Nov12 + - Avoid seg fault by checking for NULL client -- reported by Arno + +10Nov12 + - Final fix of bug #1943 + +08Nov12 + - Fix bug #1948 MailOnSuccess get executed on error. + +07Nov12 + - Fix missing index on Media table + +06Nov12 + - Fix bug #1943 no message storage on closed database connection. + - Fix bug #1946 Fix problem with MySQL with big Base jobs. + +02Nov12 + - Reduce wait time in regression RunScripts + - Attempt to indicate timed out tests + - Implement a timeout on regression tests of 15 minutes + - Fix #5346 .bvfs_lsfiles and .bvfs_restore to handle deleted files + +01Nov12 + - Fix spooldata, accurate and ingnoreduplicate run argument + +21Oct12 + - Add remote regress test script + +16Oct12 + - Fix messages segfault + +10Oct12 + - Recompile configure.in + - Implement feature request #1939 + +07Oct12 + - fix #1938 about PATH_MAX on hurd + - fix #1938 about MAXPATHLEN on hurd + +05Oct12 + - Fix bug #1937 OpenBSD autochanger example script + +25Sep12 + - Fix #4996 about MaxRunTime canceling the job too early + +19Sep12 + - Fix bug #1934 Wrong values at media list in BAT + +18Sep12 + - Fix race condition in close_msg that causes seg fault + +16Sep12 + - Fix double unlink + - More tests for freeing NULL pointers + +14Sep12 + - Add MaximumConcurrentReadJobs directive to Storage resource + +03Sep12 + - Add ujobid to .bvfs_get_jobids + +28Aug12 + - Do not try to strip RestoreObject during attribute encoding + - Avoid problem when stripping a relative path + +16Jul12 + - Fix #4513 about HIDDEN attribute set to parent directory during restore + +Bugs fixed/closed since last release: +1812 1890 1934 1937 1938 1939 1943 1946 1948 1953 1955 1956 1959 1965 1975 1982 1984 4513 4996 5346 5507 + + +Version 5.2.12 + +13Sep12 + - Fix accurate option in the estimate command + - Fix bug #1932 director crash. + +Version 5.2.11 + +10Sep12 + - Add JobId to SD debug output + - Commit batch session every 800,000 files + - Simplify safer delete code using a single regex + +08Sep12 + - Clean up error message generation during update attributes -- should fix bug + #1823 + - Eliminate heartbeat error messages. Fixes bug #1925 + +07Sep12 + - Set bsrdir default to workingdir + - Recompile configure.in + - Apply patch from bug #1911 to implement --with-logdir + +06Sep12 + - Add chio-changer-openbsd submitted in bug #1903 + - mysql database install/update scripts patch. Fixes bug #1901 + - Delegate uid/gid changing to systemd. Fixes bug #1905 + +05Sep12 + - Fix bug #1906 + - Fix manpage errors, fixes bug #1907 + - Final fix for #1859 missed one query. + +30Aug12 + - bpluginfo - Utility tool display various information + +19Aug12 + - Backport more master code + - Fix Makefile.in so that testfind builds with acl dependency + +18Aug12 + - Make dump_resource respect console ACL's + - Backport more from master + +17Aug12 + - backport code from master + +16Aug12 + - Fix #1923 about MySQL 4 support for BVFS + +05Jul12 + - Fix #1902 about bad status slots output + - Fix Sigma Copy bug #4377 + +03Jul12 + - Fix bsmtp + - Reset bsmtp to only send to IPv4 mailhosts. + +Bugs fixed/closed since last release: +1823 1859 1901 1902 1903 1905 1906 1907 1911 1923 1925 4377 + + +Version 5.2.10 + +27Jun12 + - Add extra test for unsupported Protocol Families. + +26Jun12 + - Fix inet_pton call. + - Real fix of bug #1897 5.2.9 breaks IPv6 connectivity + - Fix bug #1891 wrong daemon name printed with -? option + - Fix bug #1859 INSERT INTO Counters fails + +25Jun12 + - First try at fixing bug #1897 5.2.9 breaks IPv6 connectivity + +23Jun12 + - Convert write_rec_to_block() to a state machine + +18Jun12 + - Fix bat from eating all Dir connections -- fixes bug #1872 + - Back port fixes from BEE to fix Windows bug #1892 + +15Jun12 + - Add block checksum flag to version.h + - Add block checksum debug code + - Fix bug #1893 dbcheck -B gives rwl_writelock error. + +13Jun12 + - Remove restriction of network buffer size being multiple of a tape block + +11Jun12 + - Recompile configure.in + - Add rudimentary support for saving AFS acls. + +04Jun12 + - Send level before the fileset in estimate_cmd() like in backup() + +Bugs fixed/closed since last release: +1859 1872 1891 1892 1893 1897 + + +Version 5.2.9 +11Jun12 + - Remove patch: Apply readline without TERM_LIB dependency patch from bug #1871 + +Version 5.2.8 + +10Jun12 + - Apply readline without TERM_LIB dependency patch from bug #1871 + - Ensure cancel status is set for canceled duplicate jobs -- partial fix for + bug #1851 + - Enhance Win32 error messages -- fixes bug #1879 + +09Jun12 + - Apply patch from bug 1876 to permit dot commands in a runscript + - Remove config variables from examples/devices fixes bug #1884 + - Add more debug code for bug 1885 + - Add more debug to block.c when read rejected + - Fail job that attempts to read from closed device -- reported in bug 1885 but + not fixed + - Detect Python 2.7 fixes bug #1888 + - Recompile configure + - Apply systemd patch from bug #1886 + +08Jun12 + - Rewrite some SD subroutines as class members + +05Jun12 + - New overhaul of xattr code. + +03Jun12 + - Use IPPROTO_TCP for SOL_TCP if SOL_TCP isn't defined. + - Fix compiler warning. + +Bugs fixed/closed since last release: +1851 1871 1879 1884 1886 1888 + + +Version 5.2.7 + +02Jun12 + - Correct Qt version needed for bat + +01Jun12 + - Turn on SD deadlock detection by default + - Fix bug #1880 ltdl.m4 missing for new feature. + - Fix bug #1870 GNU binutils-gold build failure. + - Fix bug #1874 crash with xattr support on a BTRFS filesystem + +31May12 + - Add new empty netinet/tcp.h so Windows compile works + - Eliminate compiler warning in stored/dircmd.c + +30May12 + - Fix TCP Heartbeat code + +28May12 + - Align conio.c struct at 8 bytes to fix Sparc bus error. + +18May12 + - Fix get_basename() -- rewrite + +15May12 + - Apply James' exchange-fd.c regression fix + +05May12 + - Add first Dutch translation + +30Apr12 + - Ensure that StorageId is updated after write + - Fix updating of cached slots + error message numbering + +29Apr12 + - Remove old shared objects before install + +22Apr12 + - Add missing dummy bVarPrefixLinks variable retrieval. + - Rebuild configure. + - Fix mysql config for older versions of mysql+config. + +20Apr12 + - Fix problem in BVFS with concurrent queries + - Add more checks to bvfs + +19Apr12 + - Try to use multi-row insert statements for mysql. + - On some platforms intptr_t is in an other include file. + - Add support for fcntl(fd, F_CLOSEM) to close filedescriptors. + - Use closefrom if available instead of trying to close fds ourself. + - Fix class member shadowing. + - Move storages tls initialization to proper place. + - Fix Copyright on init scripts suse, add specific daemon user & group + - Add support for new POSIX getaddrinfo interface. + - Recompile configure.in + - Apply MSQL detection fixes from bug #1829 + - Recompile configure.in + - Fix bug #1805 cannot specify hostname in ./configure + - Require latest Qtlibs + - Fix restore_job restore keyword + +18Apr12 + - Allow plugin to backup a directory + - Print seconds in schedule time as noted in bug #1854 + - Change dev->open() to return bool + +17Apr12 + - Fix #1857 about restore cd command that blocks on empty directory + +15Apr12 + - Backport new StorageId code + +14Apr12 + - More definitive fix for update slots bug + +13Apr12 + - Fix old update slots bug + +12Apr12 + - Fix #4062 on make_catalog_backup.pl script when user/password are empty + +28Mar12 + - Fix bug #1853: bacula-sd dead but pid file exists. + +23Mar12 + - Fix bug #1841 estimate command level=differential goes wrong in accurate + mode. + +22Mar12 + - Fix bug #1842 Incorrect port number reported + - Fix bug #1848 bsock.c:335 Socket has errors=1 on call to client + +10Mar12 + - Force the re-initialization of BaseJobOpts, AccurateOpts and VerifyOpts + between two Include{} + +08Mar12 + - Add test-deltaseq source + - Rename delta-test plugin to test-deltaseq + +Bugs fixed/closed since last release: +1805 1829 1841 1842 1848 1853 1854 1857 1870 1874 1880 4062 + + +Version 5.2.6 + +17Feb12 + - Fix old exchange-fd plugin Accurate checkFile code. + +16Feb12 + - Insert the slot field as a numeric field. + - Update Change log and Release notes + +15Feb12 + - Fix #1831 by dropping the table before creating it + +14Feb12 + - Make cd accept wildcards + +13Feb12 + - First cut wild card in restore cd command -- works in one component only + +10Feb12 + - Remove bad optimization from Accurate code + +04Feb12 + - Lock read acquire in SD to prevent to read jobs getting the same thing + +03Feb12 + - Complicate the prune-migration regression by adding one more job + - Implement more robust check in other drives for tape slot wanted + - Add more debug to failing test + - Fix lost dcr point -- memory loss in Copy/Migration + possible confusion + - Make copy-job test handle one more job + +02Feb12 + - Ensure that bvfs SQL link is not shared + +31Jan12 + - Fix error printing in acl and xattr code. + +29Jan12 + - Backport better error debug output for sd plugins. + +28Jan12 + - Add wait on bad connection for security + - Make mtx-changer more fault tolerant + - Fix 32/64 bit problems in SD sscanf commands + - Fix 32/64 bit problems in scanning commands in SD + +26Jan12 + - Skip certain filesystem types on some platforms. + - Allow BVFS to browse and restore Base jobs + - Add error message to .bvfs_clear_cache command + +20Jan12 + - Fix plugin bug with multiple simultaneous jobs + +Bugs fixed/closed since last release: +1831 + +Version 5.2.5 + +25Jan12 + - Disable usage of JS_Warning waiting for a more complete implementation + +24Jan12 + - Removed old sd plugins which doesn't work anymore. + +23Jan12 + - Adapt sample-query.sql for Warning job status + +20Jan12 + - Fix location of Qt dlls for Win64 build + +18Jan12 + - Fix MediaView::getSelection + +Bugs fixed/closed since last release: +1824 (1814 1806 1818 non-bugs) + +Version 5.2.4 + +18Jan12 + - Prepare for adding IPv6 to Windows in the future + - Fix bug #1822 Schedule run line parsing fails if value is keyword + - Revert to 5.0 size computation in accurate elements, may fix #1821 + - Mark that a file was found in the accurate list in the ff_pkt with a + variable + +17Jan12 + - Fix Windows build + - Update po files + - Definitive fix for MySQL MaxValue problem + - Fix python module compilation + +15Jan12 + - Initialize delta_seq to -1 because 0 means that we have already a version + - Back out bad MinValue patch + +14Jan12 + - Fix bug #1810 use single quotes instead of double quotes for MinValue and + MaxValue in catalog + +11Jan12 + - Adapt bfileview for bvfs tables + - Set job status to running when restore job really starts + +10Jan12 + - Remove ifdeffing that turned off JS_Warning status -- must have been missed + test code + - Add support for soname setting in libtool. + +09Jan12 + - Make error somewhat more understandable. + - Fix bug #1815 + - Fix systemd pid files. + +08Jan12 + - Fix const char compiler warning + +07Jan12 + - Limit migration/copy jobs to starting 100 at a time + - Avoid conflict with PAGE_SIZE define + +06Jan12 + - Fix warning reported by clang + - Send previous Job name during Incremental/Differential to the FD + - Fix bug #1811 Filed fails to backup more then one xattr. + - system.posix_acl_default is also an acl stored as xattr + - Don't copy xattr and acl streams. + - Fix bug #1807 + - Recompile configure + - Add configure for new redhat spec files + - Backport Enterprise spec files + - Remove unnecessary break and unused variable + - Fix xattr/acl regression script. + +05Jan12 + - Fix bad memset() reported by clang + +04Jan12 + - Move ini.c to libbaccfg from libbac + - Test new plugin function in bpipe and test plugin + - Try to fix openssl problem with signal + - Add BSOCK::set_killable() + - Set directory attributes when using Replace=Never when Bacula creates them + - Use RestoreObject type in Catalog + - Add new features in plugin + - Add helper for ini files + - Implement run accurate=yes/no + - Add .bvfs_clear_cache + - Add Accurate and MaxRunSchedTime in Schedule resource + - fix warning + - Fix xattr/acl regression. + +03Jan12 + - Recompile configure + - Fix bug #1802 configure code to detect LZO is broken + - Fix bug #1806 failure to save second and more xattr. + - Add second xattr to show bug 1806 + +02Jan12 + - Enhance sellist to allow rescanning the list + - Fix lib/sellist.c + +01Jan12 + - Remove double include of sellist.h + - First cut selection list + +31Dec11 + - Fix seg fault in plugin event loop + +18Dec11 + - Implement slow regression for manual testing things like cancel/stop + - Fix slow regress setups + +16Dec11 + - Fix #3210 about slow restore with bvfs + +Bugs fixed/closed since last release: +1802 1806 1807 1810 1811 1815 3210 + + +Version 5.2.3 + +15Dec11 + - Revert patch from Ben to not compress man pages on Solaris fix #1801 + +14Dec11 + - Fix bug #3322 don't poll when operator is labeling tapes + +13Dec11 + - bvfs: Fix mysql REGEXP operator + +12Dec11 + - Fix Mysql 5.5.18 symbol lookup + +08Dec11 + - fix utf8/iso8859 problems + +06Dec11 + - Fix #3419, update stats problem + +05Dec11 + - Fix cats_test to use the new method names. + - Update accurate code to use 1 byte instead of 3 + - Fix segfault in accurate code + +03Dec11 + - Fix compilation warning on FreeBSD + +Bugs fixed/closed since last release: +1801 3322 3419 + +Version 5.2.2 +23Nov11 + - Implement -t option for Bat + - Require correct Qt version to build bat + - Get new lib/plugins.h + - Pull files from Master + - Add systemd files + - Update po files + - Fix script to set mode of cats scripts + - Create systemd dir if not already exists + - Add Systemd configure options + +17Nov11 + - Split messages line by line before sending it to syslog() fix #3325 + +16Nov11 + - Fix bvfs_restore on MySQL + - Fix #3308 where a SMTP problem can block the director + +12Nov11 + - Change locking scheme of the mountpoint cache. + +11Nov11 + - Enhance mountcache with rescan option after interval. + +10Nov11 + - Add %D option to edit_job_code, simplify callbacks on director side + +09Nov11 + - Move Zdeflate and Zinflate to seperate file. + - Put libraries in the correct order for non shared libs. + +07Nov11 + - Remove last traces of @SQL_BINDIR@ and @DB_TYPE@ + - Fix bug #1774 + - Move batch insert detection into db.m4 + +06Nov11 + - Free jcr in btape before other pointers + +04Nov11 + - bat: try to fix offset button problem when clicking too much on previous + - bvfs: Use single transaction for each job during update + +02Nov11 + - Add missing bwild & bregex man8 pages + - Fix compilation issue of wx-console #1778 + +31Oct11 + - bvfs: fix filter for pattern= bvfs parameter + - Improve speed of BVFS with SQLite, Thanks to J.Starek + +30May11 + - bvfs: add clear_cache function + - bvfs: Handle windows drive when building path hierarchy + +Bug fixes +1774 1778 3308 3325 + + +Version 5.2.1 + +30Oct11 + - Don't define HAVE_POSTGRESQL twice + - Don't use the -R (runtime link path) when not using libtool for the linking. + +29Oct11 + - Fix detection of batch insert enabler functions. + +29Oct11 + - Add script to list authors + - Update AUTHORS + - Add new updatedb files + - Make PurgeMigrationJob directive name correspond to doc + - Put Win exchange plugin debug code on level 100 + - Remove old gnome console files + - Update po files + +28Oct11 + - Fix bug #1771 BAT crashes. + +27Oct11 + - Fix regression on Solaris. + +25Oct11 + - Prohibit multiple Jobs from writing state file at same time + - Apply autochanger temp file security fix to examples files + - Update date + +23Oct11 + - Fix to Windows socket error detection -- should fix bug #1770 + +20Oct11 + - Fix regression in keyword of migrate pool patch + +19Oct11 + - Try to eliminate multiple blank lines output in bat + - Remove Version browser from bat (broken) -- use brestore panel instead + +16Oct11 + - Update Specs and ReleaseNotes + - Update po files + - Eliminate complier warning + - Pull files from master + - Fix small things in Windows depkgs build scripts + - Update autoconf conf file for Win32 + - Fix Win32 build after new debug patch in smartall.c + - Ensure log files in first backup are restored + +11Oct11 + - allow to use ./sign alone + - Modify ./release/sign to be able to use an other gpgkey (idea from Bruno) + - Send level command before fileset as sugested by Bastian. Fix #1768 + - Enhance smartalloc abort + +10Oct11 + - First attempt to fix vtape-autoselect-test bug + - Add more info to tape error msgs + +26Sep11 + - Fix lurking bug in match when db_driver is not set (everything but DBI). + +23Sep11 + - Fix bug #1764 plugin_list shadows global variable of mysql 5.5 + +19Sep11 + - Update auth troubleshooting URL to use MANUAL_AUTH_URL macro + - Define MANUAL_AUTH_URL in baconfig.h + +17Sep11 + - Fix #1762 about bat version browser performance problem + +16Sep11 + - On restore we don't have a full FF_PKT so we lstat the file. + - Add XATTR and ACL flags for backup and restore. + - Lower some messages from level M_ERROR to M_WARNING in acl/xatttr. + +07Sep11 + - Fix spooldata and ignoreduplicates run command options to use JCR instead of + the Job resource. + +30Aug11 + - Fix #1761 about create_postgresql_database + +22Aug11 + - Apply htmldir fix provided by Philipp + +20Aug11 + - Fix possible buffer overrun in exchange plugin + +17Aug11 + - Add help for "use" command as suggested by Thomas Mueller + - Fix error message with bad dot commands + +16Aug11 + - Drop unused sqlite_dump function from make_catalog_backup.pl + - Adapt make_catalog_backup.pl for backend + +09Aug11 + - Fix backtrace detection in configure + +06Aug11 + - Make bat run dialog present only allowed levels + +05Aug11 + - Fix bat seg fault in FileSet view + +Bug fixes +1389 1444 1448 1466 1467 1468 1476 1481 1486 1488 1493 1494 1497 1499 1501 +1502 1504 1509 1511 1513 1516 1524 1526 1527 1532 1536 1538 1541 1542 1549 +1551 1553 1554 1558 1559 1560 1564 1567 1568 1569 1571 1574 1577 1581 1582 +1584 1587 1594 1595 1600 1601 1602 1603 1604 1606 1608 1610 1612 1623 1624 +1633 1643 1648 1655 1661 1664 1666 1669 1672 1675 1684 1685 1695 1696 1699 +1700 1703 1735 1741 1749 1751 1761 1762 1764 1768 1770 1771 2710 + +Version 5.2.0rc1 + +08Jul11 + - Fix libtool definition and make the helper script executable. + - Use a helper script to link the correct database backend. + - Sync libtool to 2.4 version. + - Fix #1741 about possible problems with fnmatch + +07Jul11 + - Add missing stream in bscan, fix #1749 + - Change intmax_t to int64_t to fix #1664 + - Fix compilation with gcc 4.6.1 + +06Jul11 +- Add mutex priority check for changer mutex +- Fix deadlock with autochanger +- Fix #1602 about Uninstall /S that should not prompt for user interaction + +05Jul11 +- Change JobFiles display from %f to %F in RunScript because %f was already + affected to Fileset name + +28Jun11 +- Update config.h.in for LZO flags +- Fix brestore compilation from previous patch + +21Jun11 +- Solaris make doesn't like comments in make rules. +- Fix OSX acl regression test. +- Storing the result in a local variable from sql_num_fields saves us a lot of + callbacks. + +15Jun11 +- Add extra check for pInfo size when loading plugin + +14Jun11 +- Fix [bs #2710] about Storage combo that is too small +- Make bad Storage check in is_on_same_storage non-fatal + +04Jun11 +- Alter the manpages install target to handle optional compression +- Add barcodes help to label command +- Remove class member shadowing. +- Eliminate some old bnet code + +03Jun11 +- Fix small memory leak when job get canceled. + +01Jun11 +- Fix small memory leak in dbcheck not calling free_pool_memory for name + variable. +- Remove global variable shadowing by local variable. + +31May11 +- Fix the lockmgr test tool +- Fix dbcheck to use the new runtime checks for mysql and not compile time. +- Restore db_type printing to dbcheck -B + +30May11 +- Fix unitialized DeltaSeq during Verify jobs +- Make delta_seq same size as in other places +- Remove broken code + +18May11 +- Add LZO compression support in bacula-fd. +- Add COMPRESS_MANPAGES substituted variable to autoconf + +17May11 +- Allow va_arg in Dmsg/Jmsg director plugin functions + +16May11 +- Add db_strtime_handler to get DATE fields from database +- Drop AFS detection. +- AC_CHECK_DECL doesn't seem to work so lets do it somewhat differently. + +13May11 +- First attempt at fixing bug #1735 where acls on OSX doesn't seem to reach the + backup. + +10May11 +- Drop old unused protypes from before backend refactoring. + +04May11 +- Don't force job duplicate checking on copy and migration jobs. +- Simplify the code path in migration and copy jobs +- Allow duplicate job check override from run cmdline for migrate and copy + jobs. +- Initialize spool_data_set as boolean. +- Add stat packet size to encode/decode_ routines to detect compile differences + +29Apr11 +- Drop unused variable. +- Make the solaris init scripts user/group aware + +30Apr11 +- Correct fix for bat brestore crash +- Kludge fix to bat brestore crash + +29Apr11 +- bat: Call parent constructor in all pages +- Add debug to bat label seg fault bug + +21Apr11 +- Use user arguments when upgrading catalog +- Change all sm_checks into Dsm_check for performance reasons +- Free database results on class destruction (e.g. when reference count == + 0) + +20Apr11 +- Allow custom variables and checks from debian/ubuntu startup scripts + +19Apr11 +- Fix couple of g++ warnings + +15Apr11 +- Ensure the directories for solaris init scripts are created +- Honour DESTDIR in Solaris install-autostart targets +- Collapse Solaris install-autostart targets + +13Apr11 +- Update AC_INIT use to make configure handle docdir properly + +19Apr11 +- Some more use DeltaSeq instead of MarkId + +16Apr11 +- Update Catalog version and use DeltaSeq instead of MarkId +- Fix Sqlite driver seg fault + +13Apr11 +- Fix #1612 about checksum for hardlinks + +02Apr11 +- Some more code sniplets of no need to call thr_setconcurrency anymore. +- For Solaris 9 and higher there is no need to call thr_setconcurrency anymore. +- Execute the Index creation for mysql in bvfs.c +- Use B_ISSPACE in scan.c and reformat comments a bit. + +06Apr11 +- Fix nanosleep for Windows +- Use %s when displaying bandwidth limits in FD status + +03Apr11 +- Fix reference to uninitialized stack variable +- Remove FATAL error for ignored events in old Exchange plugin + +02Apr11 +- Fix uninitialized stack variable in bextract +- Remove temp index on start and term of dbcheck + +28Mar11 +- Fix brestore that should have miscDebug enabled to start a restore job + +10Mar11 +- Fix kb/s to kB/s in FD output +- bacula-web: Sync with Davide repos + +07Mar11 +- Fix for duplicate jobmedia records bug #1666 +- Fix bug #1703 unable to bextract compressed files + +06Mar11 +- Fix some esc_obj handling in DBI and wrong free in postgresql.c + +05Mar11 +- Get full Windows Version display string +- Fix seg fault in PostgreSQL driver code +- Remove _ in restore_job +- Attempt to disactivate old exchange-fd.dll if no plugin= line in FileSet + +03Mar11 +- Add tray monitor to windows installer +- Fix QT tray monitor compilation on windows + +02Mar11 +- Fix tray-monitor qmake project file + +01Mar11 +- Add extra Bacula plugin variables + +26Feb11 +- Fix Windows build entrypoints + +25Feb11 +- Add new maxuseduration-test +- Fix bug #1389 MaxUseDuration uses job start instead of first write time +- Better fix for bug #1603 restart of Virtual Full + +22Feb11 +- Use system malloc in strack_trace() instead of smartalloc + +24Feb11 +- Fix bug #1603 restart of Virtual Full becomes a Full +- Use jcr->is_xxx instead of direct tests +- Fix bug #1608 btape test failure when block size too big +- Fix week of year schduling bug #1699 +- Comment out unused variables in tray-monitor +- Fix uninitialized variable in bat +- Fix lock race conditions in bug #1675 +- Rename incomplete to rerunning for clarity + +23Feb11 +- Fix bug #1700 no timestamp with -dt on Windows +- Use jcr->setJobStatus() in favor of set_jcr_job_status(jcr...) +- replace set_Jobxxx by setJobxxx + +21Feb11 +- Add more debug to restart regress scripts +- Add stack_trace() function to print current thread backtrace +- Add HAVE_BACKTRACE in configure.in + +16Feb11 +- Add %h option in runscript to get client address + +21Feb11 +- Fix seg fault during cancel in SD + +19Feb11 +- Discard old messages in beginning of maxtime-test + +18Feb11 +- Fix #1696 about an error when displaying "Base" level keyword. + +14Feb11 +- Fix #1695 about bacula-sd crash in detach_dcr_from_dev() + +08Feb11 +- Rename ua->err to ua->errmsg + +07Feb11 +- B_DB is a class now not a struct lets fix the include files. +- Check some user inputs in purge and restore commands +- Check if resource name is valid in acl_access_ok() +- Avoid extra strlen() in is_name_valid() +- Add negative numbers to bsscanf +- Apply get_basename to printed filenames to reduce unnecessarily long paths + +06Feb11 +- Add MAKEOPT to conf for regression +- Fix double query in db_list_basefiles() +- Fix chgrp on bacula-x.conf +- Allow using sql_fetch_field() in db_sql_query() callback for SQLite + +05Feb11 +- Use old list_result() in db_list_xxx for good formating + +04Feb11 +- Increase Windows backup/restore privileges possible fix to Virtual Disk + ticket + +05Feb11 +- Use db_escape_string() in all db_xxx functions +- Pull src/lib/Makefile.in from master + +03Feb11 +- Add configure magic for detecting getpagesize +- Implement mntent_cache on top of the new htable code using a small htable big + buffer of 128 Kb to start with. +- Added support to htable for giving a hint on the number of pages to allocate + for the buffer used for hash_mallocs so we can have htables without the + previous default of 10 Mb. + +05Feb11 +- Implement chgrp on bacula-x.conf if user configs a Dir/SD/FD group +- Rebuild configure +- Remove old scripts no longer used +- Add patch from bug #1574 for Scientific Linux spec +- Apply patches from bug #1672 to clean up configuration +- Check if volume name is valid in select_media_dbr() +- Small change to exporting functions not found in include files on some + platforms. Wrap it as one extern "C" block just like its done on most platforms + in the proper include files. +- Fixed some compile errors in IRIX and TRU64 xattr code which is kind of hard + to test without a proper platform. Compiled it now using some tricks on an + other platform defining it is the given platform and fix the compile errors + that way using a fake header file. + +04Feb11 +- Improve bat stability by checking if page valid +- Fix segfault with print_memory_pool_stats() +- Drop last SQL_INC leftovers. Due to backend abstraction there is no need to + include backend specific include files anywhere then in the backend driver + which use now per backend specific include directives. SQL_INC is not set by + configure anymore and should not be used anymore. + +03Feb11 +- Remove reference to @SQL_INC@ in tools Makefile.in +- Pull in lib.h from master so new htable code compiles +- Use C99 __VA_ARGS__ instead of GCC extention ##__VA_ARGS__ +- Adapt bvfs for SQLite3 +- Adapt htable code to 64 bit keys +- Rename db_list_ctx.cat() to db_list_ctx.add() +- Fix bvfs for mysql + +01Feb11 +- Add simple way to add string elements to db_list_ctx + +03Feb11 +- Add code to trim heap after big mallocs + +01Feb11 +- Close cursor in big sql query + +30Jan11 +- Fix bad character in cats/sql_cmds.c + +31Jan11 +- Removed old queries from sql_cmds.[ch] which are no longer referenced and + thus polluting the namespace for no obvious reason. +- Fix possible core-dump as seen in regression testing + +28Jan11 +- Ensure that we always close the transaction in db_big_sql_query() +- Implement db_big_sql_query() that uses cursor on PostgreSQL and limit memory + usage in db_list_xxx +- Detect mount/junction points and ignore junctions in Windows + +25Jan11 +- Added set_db_type function to set global debug symbol removed when doing + class based catalog backends. Coding is a bit different as we need get_db_type + which needs a bdb class so we have to set it as part of the loop over all + catalogs. This way we may set the global variable a couple of times but the + code frees the global variable if needed so other then being somewhat overkill + no harm is done. +- Drop removed catalog function prototype. +- Fix logic inversion. +- Add more ingres code +- Adapt bbatch and cats_test tool to new cats + +18Jan11 +- Backport changes for Windows compiling +- Backport one more patch into master from class based catalog backend code. +- Backport of class based catalog backends into Branch-5.1. + +25Jan11 +- Fix #1684 Use --with-db-password option in grant_bacula_privileges scripts + +23Jan11 +- Free unused pool memory after restore +- Add pool memory debug output + +13Jan11 +- Fix build of Windows bat -- Qt depends on libgcc_s_dw2-1.dll + +23Jan11 +- Change accurate CurFile allocation size + +19Jan11 +- Add more tests on cats_test +- Add cats_test unit tests + +15Jan11 +- Fix #1685 about JobBytes counter when using Accurate/BaseJobs with checksum + +12Jan11 +- Ignore replace=xx for directories. Corrects (not total solution) ticket 2317 + and bug #1444 +- Add restore replace=never test ticket 2317 +- Add replace=xxx on restore command line + +10Jan11 +- Add restore_job= option to restore command + +06Jan11 +- Add windows tray monitor to bat make-win32 script +- Adapt traymonitor qmake project file for win32 +- Add cross-win32 target to qmake template +- Add new QT traymonitor +- Add qt traymonitor files to configure.in +- Add transparent icon with tape + +05Jan11 +- Fix #1661 about verify differences with VERIFY_VOLUME_TO_CATALOG + +02Jan11 +- Remove tray-monitor from Win32 build +- Fix some double Win32 #ifdefs +- Fix missing HAVE_LITTLE_ENDIAN for Win32 build + +30Dec10 +- Add helpers to add drives in VSS snapshot from plugins +- Make new crc32.c work on FreeBSD +- Fixes some problems in update_sqlite3_tables.in + +29Dec10 +- Handle all FD version in check_bacula +- Fix compilation warning in check_bacula + +21Dec10 +- Make new bat progress view compatible with previous FD +- Keep the same keywords as in previous version + +20Dec10 +- Modify Job view to follow backup progress in real-time +- Update ".status client running" output for new Bat screen + +25Dec10 +- Ensure that Job duration is not negative +- Add Slot to Media View table + +20Dec10 +- Fix compiler warning. + +19Dec10 +- Set proper status of autochanger slots with 0 on startup -- no volume rather + than unknown + +14Dec10 +- Add indentation in lsmark command output + +17Dec10 +- Change SELECT count(*) to SELECT 1 .. LIMIT 1 to speed up PostgreSQL and + Innodb in is_volume_purged() + +01Dec10 +- Fix #define when using --disable-smartalloc + +17Dec10 +- Track unbalanced start/end plugin stream + +12Dec10 +- Try to bug where we stop reading before the plugin end is returned +- Fix end plugin stream index + +16Dec10 +- Add missing switch values although the code works this is cleaner. Give + better error when trying to restore a particular type of acl on a filesystem + without support for those types of acls. + +15Dec10 +- Added support for NFSv4 ACLs added to FreeBSD 8.1. Some small updates to some + comments of other ACL implementations. +- Added additional extended attributes used under FreeBSD for the storage of + ACLs. + +14Dec10 +- Allow closing of all tabs + close tab clicked + +13Dec10 +- Add short-incremental regression test + +12Dec10 +- Print plugin end records in bls +- Center bat run window on screen + +03Dec10 +- Fix #1669 making director's address required in traymonitor.conf + +27Nov10 +- Update bat console display only if new text comes in +- xPut debug code on test + fix focus on Volume in label dialog box + +12Nov10 +- Improved error message when drive busy +- Ensure that bat.conf is updated when bconsole.conf is + +09Nov10 +- Make Win32 service name include the word Backup + +25Nov10 +- Add PurgeMigrateJob Job resource option to purge job on successful migration + +24Nov10 +- Move accurate test in BackupCommand for delta and delta-test plugins + +25Nov10 +- Add delta_seq available for plugin restore + +24Nov10 +- Prepare for adding high bits to Stream +- Use new FO_xxx bits in backup/restore and plugins +- Define new stream bits +- Separate definitions of streams, filetypes, and fileoptions to separate + files +- Rename bEventVssSnapshotLetters to bEventPrepareSnapshot +- rename plugin->len to plugin->file_len + +23Nov10 +- Handle all kind of plugins when computing plugin name length +- Allow plugins to add drives to vss snapshot +- Ensure that two plugins starting with the same name won't share events +- Don't define DLL_IMP_EXP twice in fd_common.h + +22Nov10 +- Add execute bit to systemstate test + +21Nov10 +- Update delta plugin to raise error if we get delta back melted +- Insert Delta in bootstrap by JobTDate +- Modify MySQL accurate query with Delta +- Add missing MarkId in mysql accurate query +- Add delta option to db_get_file_list() + +20Nov10 +- Support restore with Delta in Director +- Add delta_seq to restore tree code +- Add db_get_file_list_with_delta() for restore +- Fix delta-test-fd +- Ensure that sparse stream is set when delta enabled +- Make non accurate fatal for delta-test plugin +- Update bfile.offset when with plugin offset +- Update delta plugin +- Add delta-test plugin +- Pass Delta Sequence to save_pkt and restore_pkt +- Clear delta sequence when not using in plugin +- Remove spaces, try to stop FD faster on cancel +- Implement incrementation of delta_seq (I think) +- First cut Delta code in FD + +19Nov10 +- Don't compute accurate list with MD5 if not used +- Use JobTDate instead of StartTime, and sort by JobTDate instead of JobId + +05Nov10 +- Avoid warning for win64 + +19Nov10 +- Fix tray-monitor compilation + +04Nov10 +- Fix check_nagios compilation + +18Nov10 +- Add delta sequence to batch mode, accurate query and file daemon ff_pkt + +17Nov10 +- Start adding DeltaSeq + +15Nov10 +- bvfs: support basejobs and migration for PostgreSQL +- bvfs: support for bweb user acl + +13Nov10 +- Fix win32 build +- More porting from Branch-4.0 + +06Nov10 +- Add check_changes to bacula def + +27Oct10 +- Add -l and -a options to drivetype tool + +19Jul10 +- Add debug info to smartall.c for buffer overrun + +30Aug10 +- Make RestoreObject compatible with older FDs + +15Jul10 +- Make VSS snapshot failures fatal + +12Jul10 +- Add bEventVssBeforeCloseRestore per James + +24Jul10 +- Call plugin bEventEndxxxJob even if Job canceled + +06Nov10 +- Fix Windows build to know about exepath + +07Nov10 +- Allow some plugin variables to be available during loadPlugin() + +06Nov10 +- Add bVarExePath to let plugins know about exepath + +02Nov10 +- Fix compilation warning about void* to int cast + +24Oct10 +- Remove extra EndTime displayed on the first line of the Job status report + +12Nov10 +- Prevent timer recursion in bat + +07Nov10 +- Implement close button in bat tabs + +24Oct10 +- Set daemon name and host name to zero + +05Nov10 +- Permit to get Where/RegexWhere from Plugin and give access to Accurate + data +- Allow Plugin command without argument +- Fix compilation problem on win64 + +03Nov10 +- Stop restore if job is canceled +- Make sure JobErrors is always nonzero on fatal error + +02Nov10 +- Fix bug #1655 Quitting bconsole with ctrl+d doesn't save history + +31Oct10 +- Fix bad handling of c: during restore in bat +- Ignore unknown dot commands in restore tree code + +20Oct10 +- Fix ubuntu/debian startup scripts + +13Oct10 +- Fix bug #1643 about orphan records with delete volume= command + +07Oct10 +- bvfs: fix mysql query, restore seems to be ok +- bvfs: Restore should be ok with MySQL + +06Oct10 +- Use start-stop-daemon handler to run daemons as bacula:bacula + +26Sep10 +- Reverted the change of TRU64 to OSF1 for the OS detection rules. Although + Compaq/HP renamed OSF1 to TRU64 lets leave it for now. +- Fix new IRIX xattr code. Be a bit smarter when freeing data and do it in the + exception handler (e.g. after the goto label) +- Fix some trivial errors and implemented the restore of IRIX xattrs. +- Added first try at supporting IRIX extended attributes. + +25Sep10 +- Added configure logic for detecting the TRU64 extended attributes interface. + Also made the acl and xattr configure logic use the HAVE_XXX_OS_TRUE variable + which is set by BA_CHECK_OPSYS macro so we only check for a certain interface + that we know only exist on a certain platform when we are running on that + platform. This should make the configure output much cleaner. + +24Sep10 +- Added support for TRU64 Extended Attributes interface. + +06Oct10 +- Remove SQL lib dependency from btape + +05Oct10 +- Fix bad copyright in new src/lib/crc32.c + +04Oct10 +- bat: Add pattern filter and make restore to start from brestore +- Adapt bvfs for BaseJobs (path is ok) + +03Oct10 +- bat: more work on brestore panel +- Add .bvfs_restore command +- Add .bvfs_get_path to traduce a pathid into a path + +02Oct10 +- bat: Add new form to run a restore +- bat: Add restore job list in console +- bat: Add restore option box for brestore +- bat: Add runrestore.ui to bat project file +- bat: Use current set of jobids for directories +- bat: fix drag&drop for new brestore screen + +01Oct10 +- bat: Try to implement drag&drop for file selection +- bat: Add checkbox to allow automatic fileset merging +- Add "all" option to .bvfs_get_jobids dot command. +- bat: get a simple working brestore file browser using bvfs API +- bat: Implement a Date and FileType helper in ItemFormatter +- Implement .bvfs_versions command to display all file versions for a client +- Update BVFS file versions browser +- Add .bvfs_get_jobids to return jobids needed by a job to restore + +30Sep10 +- bat: Use BVFS on bRestore view + +05Oct10 +- Fix #1648 about make_catalog_backup.pl with multiple catalog + +01Oct10 +- Rebuild configure +- Fix batch insert detection for MySQL and no .a libraries + +30Sep10 +- Fix pt_out() that can loose a message if the trace file can't be opened. + +23Sep10 +- Add missing new stream definitions. +- Added xattr interface found in AIX 6.x and higher which mimics the Linux/OSF + interface but has just a bit different named functions and the error codes + are somewhat different so its not a perfect match to extend the generic + interface (which is named generic now instead of the linux interface as it + used by Linux and OSX). +- Added configure support for the AIX extended attributes interface. +- Added extra check for Solaris xattr support so we don't get false positives + on other os-es (AIX) etc. + +22Sep10 +- First compiling version of AIX 5.3 and later acl code using the new aclx_get + and aclx_put interface. +- AIX also supports setmntent/getmntent so use that instead of much more + complicated other interface which is left for AIX systems that might not + support the setmntent/getmntent interface. + +17Sep10 +- Handle the old stream using the old system call just to be safe for now. + +16Sep10 +- First steps for adding new interface for acls added to AIX 5.3 and later. + Added 2 new streams and basic code to handle these streams. Next step is the + actual implementation. +- Rewrote ACL configure logic to be somewhat better readable in the same style + as the new xattr configure logic. + +22Sep10 +- Add debug to next_vol.c + shorten retention for recycle-test +- Fix seg fault introduced during restore cut and paste + +21Sep10 +- Fix bat browser to ignore copy jobs. Fix bug #1604 +- Create dummy to create test data +- patch 96793e fixes bug #1584 + +20Sep10 +- Fix bconsole bug with new BNET signals +- Add skeleton of new bat run dialogs + +18Sep10 +- Fix getmntinfo handling on FreeBSD as it seems its the same as OSX (no real + surprise). + +10Sep10 +- Add TODO on AIX acl code +- Detect acl_get() on AIX to enable ACL code + +08Sep10 +- Tweack project file to mark Item 7 done + +30Aug10 +- Fix the insert_autokey_record functions to be 64bit compliant. This fixes a + long standing bug in the postgresql code where the id was converted using + atoi which is now changed into a str_to_int64. + +17Aug10 +- When moving zlib into its seperate ZLIBS variable forgot to update the stored + Makefile.in which still uses FDLIBS. But as FDLIBS now only contains libraries + explicitly needed for building the filed this doesn't work. This should fix + that. This is mostly a problem on Solaris like operating system as the linker + doesn't load all dependency libs and gives unresolved symbols. + +20Sep10 +- Fix bug #1623 about indexes that are already created or deleted in postgresql + upgrade process. +- First cut of bat rerun a Job from Jobs Run +- Allow sorting of more bat media columns +- Suppress superfluous warning messages if SD polling + +19Sep10 +- Add comm error output when cram-md5 fails with comm error + +18Sep10 +- Quote device name in SD and implement releasing message +- Massive bat notifier rewrite + fix seg fault + implement text input dialog + +15Sep10 +- Fix postgresql version detection in database creation script + +13Sep10 +- Make Ubuntu autostart scripts LSB compliant + +12Sep10 +- Fix restore browser to permit checking first item and to start in focus +- Fix another bat seg fault +- Fix bat seg fault + +11Sep10 +- Backport 5.0.3 rpm changes. + +04Sep10 +- Display job information on jobid in Verify screen +- Allow to verify any job specified in argument + +03Sep10 +- Send back updated jobs status from FD +- Allow plugin to skip restore if problem +- Attempt to eliminate waiting on Storage message during restore +- Add quotes around resource name in status output +- Change FD protocol version to avoid error message when sending RestoreObjects + +22Aug10 +- Allow limit= for all list sub commands + +12Aug10 +- Make third argument of DEVICE::d_ioctl optionnal to emulate varg +- Use SMARTALLOC+memset instead of overload new/delete that doesn't work in + bat + +02Sep10 +- Fix bug #1601 where prune client pool=xx apply pool retention for all volumes. + +31Aug10 +- Add level option to estimate help command +- Fix #1633 Windows service is not removed if still running + +30Aug10 +- Fix #1624 about segfault in dvd driver when calling rewind(NULL) + +29Aug10 +- Rebuild configure +- Add scripts/reload + +12Aug10 +- Fix batch insert detection on SLES 11 +- Add crc32sum in makefile + +11Aug10 +- Add ENDIAN detection in config.h/configure.in +- Use HAVE_LITTLE_ENDIAN in crc32 code + +08Aug10 +- crc32 optimization +- Add crc32 test program + +10Aug10 +- Update spec files +- Modify DEVICE class to use subclass for each driver implementation. +- Add USE_FTP in version.h +- Add new/delete operators with memset(0) on smartalloc + +08Aug10 +- Fix bug #1504 -- Error when creating tables in MySQL 5.5 + +03Aug10 +- Remove patches directory + +02Aug10 +- Fix prune sql handler +- Allow sorting of MeditView table columns + +01Aug10 +- Make Win32 no filesystem change more explicit + +31Jul10 +- Revert to old block.c to fix recycling + +29Jul10 +- Remove some old files + +28Jul10 +- Fix #1606 about OpenSSLv1 detection using non-standard location + +27Jul10 +- Update the license template files + +26Jul10 +- Fix postgresql catalog creation when version is not on the first line + +25Jul10 +- More changes from GPLv2 to AGPLv3 +- Fix some missed copyright changes +- Switch from GPLv2 to AGPLv3 + +21Jul10 +- Implement %f %b to get JobFiles, JobBytes in a RunScript +- Fix bug #1610 handle empty xattr values on Linux and xBSD. +- Really free free pool memory before sm_dump +- Garbage collect memory pool at end of job and before sm_dump +- Ignore STREAM_RESTORE_OBJECTS returned by SD to FD + +19Jul10 +- Add xattr seg fault protection suggested by Marco for bug #1610 + +17Jul10 +- Add archlinux to os.m4 + +14Jul10 +- More fixes for 32/64 bit problems in smartall +- Fix 32/64 bit problem in smartalloc dump routine + +11Jul10 +- Fix .dump, .die, .exit when DEVELOPER turned off + +10Jul10 +- Make SD automatically fix the Volume size in the Catalog when out of sync +- Add bigger print buffer for sm_dump +- Add .dump and .exit commands for daemons + +09Jul10 +- Use Pmsg in smartall.c rather than printf for tracing Windows + +07Jul10 +- Up maximum block size to 20M +- Fix for TLS bugs #1568 and #1599 + +02Jul10 +- Improve comm line error handling for TLS, may fix bug #1568 and #1599 + +04Jul10 +- Add new tls-duplicate-job test + +02Jul10 +- Fix SD crash due to mismatched lock/unlock in error condition + +29Jun10 +- add comments +- Fix #1600 about problems with Accurate code on MySQL + +26Jun10 +- Fix bug #1587 if you have clients with different catalogs configured, both + catalogs contains all clients after a reload or restart of the dir +- Fix bug #1577 During migration: ERROR in block.c:950 Failed ASSERT: dev->is_open() + by moving and commenting out the ASSERT() + +24Jun10 +- Fix new killsafe code +- Add USE_LOCKMGR_SAFEKILL in version.h +- Add wrapper for pthread_kill() to check if thread exists before using kill + +23Jun10 +- Skip HB kill in FD if HB thread terminated + +21Jun10 +- update windows defs + +20Jun10 +- Fix crash from unequal volume_lock/unlock calls +- Fix pruning for migration jobs and do some optimization +- Print TestName at beginning of dump + +19Jun10 +- Remove prototypes of removed prune queries. + +18Jun10 +- Adapt new prune code with old db_accurate_get_jobids() + +17Jun10 +- update prune code +- Fix db_get_base_jobid() + +16Jun10 +- Make new prune algo to work with backup + +19Jun10 +- Save any dumps during regression to dumps directory +- Update LICENSE +- Fix crash from rw_lock/unlock miss match + +18Jun10 +- Attempt to fix duplicate job kill seg fault +- Replace pthread_kill by my_thread_send_signal() in jcr.c +- Fix postgresql error in grant script +- fix #1595 about batch mode detection problem for postgresql with non standard + install dir +- Add TestName to gdb traceback +- Fix accurate code + +17Jun10 +- Remove bdb from configure + +16Jun10 +- make new update tables executable +- Improve the "update stats" sql command +- Improve performance for MySQL with update stats command +- Fix Makefile and build problems + +14Jun10 +- Fix #1594 about prune copy jobs +- Fix restore object compression flag -- James + +11Jun10 +- Add new upgrade script from DB vers 12 to 13 +- Add BDB_VERSION for scripts + prepare update 11 to 12 + automate BDB_VERSION + checking + +09Jun10 +- Add first cut Volume names to disk-changer + +07Jun10 +- Apply literal string fix in printf submitted by Luca Berra + +05Jun10 +- Drop 2 unneeded AC_DEFINE statements from configure.in for AFS. + +03Jun10 +- Fix bug #1582 Restore from multiple storage daemons breaks subsequent backups + +01Jun10 +- Fix segfault on "cancel jobid=" command +- Create table with jobids as a temporary table and move the sql definition to + sql_cmds.c. We use a default query for all databases but Ingres. +- Update makefiles for new libtool revision +- Simplify libtool revision and use VERSION as default +- Simplify libtool revision + +28May10 +- Convert all Jmsg on the watchdog thread to Qmsg + +30May10 +- Don't set socket buffer size unless explicite set by users. Suggested change + from bug #1493 +- Fix bug #1554 Windows installer doesn't honor /S flag + +29May10 +- Add z (size_t) prefix to bsnprintf + +28May10 +- Fix #1571 and install libraries with 755 perms + +27May10 +- Fix bug #1581 ASSERTs in src/dird/jobq.c can never trigger +- Removed workaround for bug in Ingres with insert into tables with sequences + from the batch insert temporary table. Added comment to the Ingres make table + script to document the bug numbers which should be fixed in the Ingres + version to be able to run batch inserts and currently known patch numbers for + Solaris x86 and Linux x86_64. + +25May10 +- Fix segfault in action on purge test + +24May10 +- First cut Share Point plugin +- Add more debug code for bactrace + +23May10 +- Fix for bug #1569 deadlock/crash in Dir + +20May10 +- Fix problem with BaseJob and Accurate FileSet options on client side +- Do not use mtime with basejobs + +18May10 +- Optimize the selection process for saving either AFS or Native ACLS by + keeping track of the filesystem we are doing a backup of. This way we don't + have to check if a filesystem is still either AFS or not as when we know if + it is all files on the same filesystem are either AFS or not. We also clear + the native save flag when a filesystem says it doesn't support acls so we + don't keep on trying saving acls on filesystem that don't support them. + +16May10 +- Added preliminary AFS acl support. The code may need some testing on a real + AFS enabled server as most of the code was written using information available + on the Internet. The code compiles and links on Linux using OpenAFS 1.4. +- Added fstype rewrite code so we have a uniform way of retrieving the fstype + on Linux and OSF1 without the need to add code for each new filesystem added + to a OS. + +14May10 +- First stab at a simple script to dump the content of the complete Ingres + bacula database for disaster recovery purposes. + +10May10 +- Add .dump command +- update comments + +08May10 +- remove pragma warnings + +06May10 +- Fix problem when sending Cancel event to plugin +- first attempt at making bacula set the backup level in VSS +- Implement bEventPluginCommand event for systemstate plugin +- MySQL compilation fix + +05May10 +- stop Bacula aborting the job when one of the writers reports a problem + +04May10 +- Fix #1567 about display of long volume names truncated during restore + +03May10 +- Fix segfault when loading Plugins + +02May10 +- Added new bacula-libs package. +- Fix RestoreObject for PostgreSQL + +01May10 +- Move some AC_MSG_RESULT macros around so the configure output looks somewhat + cleaner. +- Move libz out of FDLIBS into a seperate variable as we need it for libbac.so + now too as it also has libz compiled functions. This way we keep the FDLIBS + cleaner with only extra libs need for the Filed to compile. +- Fix RestoreObject schema on PostgreSQL +- Fix insertion of RestoreObject +- Change Byline +- Remove RestoreObjects when pruning Jobs +- Fix bug #1538 Start Scripts return incorrect value +- Fix bug #1564 init scripts not LSB compliant +- Rework sql queries for update copies + +30Apr10 +- Don't hardcode temporary table creation as some backend have a somewhat + different SQL syntax. +- Added missing column in Ingres database definition for new database object as + reported by Stefan. + +28Apr10 +- Patch from Martin to avoid error when single tape used +- James' patch +- Fix File and Job Retention in show pool command + +27Apr10 +- Add Polish translation. This translation project is sponsored by Inteos Sp. z + o.o. based in Warsaw, PL. +- Change Ingres query filter to set a realy early timestamp instead of an empty + string which doesn't seem to work on all Ingres versions. +- Add new file I missed from James' plugin patch + +26Apr10 +- Probable fix for SD crash bug #1553 +- Remove bad debug line +- Add extra lock debug code but turned off +- Reduce accurate test output when not debug +- Add lmgr_is_locked() function to test if a thread own a mutex + +25Apr10 +- Put attr.c debug on dbglvl + +24Apr10 +- Use lwps cmd in dbx and no threads as it seems to give some strange side + effects e.g. doesn't work and lwps seems to always work. So lwps it is + then. +- Fix #1559 problem when restoring pruned jobs with a regexp +- Do some work on the btraceback on Solaris, we test to see what debugger is + available and we prefer the debuggers in this order: dbx, gdb, mdb. Also + enchanced the dbx bactrace somewhat that it always dumps all available + threads (e.g. we also changed from lwp to threads for dbx). We also print + some variables that the gdb script also dumps on a bactrace. Hopefully we get + some better dumps using this. The mdb is used as a last resort as it should + be always installed on a Solaris box (dbx and gdb may not) but at the moment + I'm still trying to find out how to get some more understandable dumps from + mdb as its rather rudimentary and more a crash debugger then a source code + debugger. +- Fix for bug #1560 bcopy cannot find Volume +- Update autoconf scripts +- Fix Windows build + +23Apr10 +- Update Ingres RestoreObject table +- Add compress/decompress of Object Record data +- Fix #1558 about bscan that doesn't update PurgedFiles on Job records +- Remove pool zap code that breaks 2drive-concurrent-test +- Fix cancel crash bug #1551 + +21Apr10 +- A timestamp can only be assigned to a timestamp in Ingres unless we convert + it inline. But why should we the code to get Timestamps is already there so + change it and we are allright. +- For Ingres always enable batch mode inserts. +- reindent some Ingres DECLARE queries so they are better readable. +- Lets not use the mdb->transaction bool but create a private one named mdb->explicit_commit + as mdb->transaction is used by the bacula core and we need a private setting + for Ingres. +- Lets have a single exit from db_write_batch_file_records where we always drop + the temporary batch table. +- Fix job_metadata name problem +- Allow traceback with gdb on Solaris + +20Apr10 +- Implement IS_NUM for Ingres by using eqsqlda.h IISQ_ values for numeric types + supported by Ingres. +- Correct Pool display in SD status. Fixes bug #1541 +- Add performance notes in make_xxx_tables.in files + +19Apr10 +- Fix Ingres problems with queries with have both an LIMIT and OFFSET clause as + we need to reorder this query for Ingres to be understandable we need some + more regexp magic for now. Lets make things exentsible and allow for multiple + regexps all being fired on a query transforming it into the query to execute. + Dropped the encoding check which is for postgresql but not interesting for + Ingres now we use VARBYTE and not VARCHAR. +- Fix cancel crash reported by Stephen Thompson +- Check if sql backend is thread-safe +- Fix error message with PostgreSQL on JobHisto sequence + +18Apr10 +- Add RestoreObject postgresql privilege +- Change plugin FileSet function names add NewOptions and NewIncludes +- Second cut plugin filesets +- Add exclude to test program +- Correct new plugin names +- First cut more complicated fileset options for plugins +- Skip FT_RESTORE_FIRST in verify +- Start adding plugin regex filesets + +17Apr10 +- Reorganize regex code + +18Apr10 +- Rewind on close to fix #1549 +- Add bVarWorkingDir to filed plugins + +17Apr10 +- Update Ingres temporary table definitions with new datatypes and lengths. +- Added extra check to scream when we encounter a database datatype which we + don't support yet. This should never happen as we should implement new + datatypes when we use them in the schema but some defensive programming + doesn't hurt. +- Some small indent changes. +- Possible fix for big-files-test +- Use a dedicated connexion when VirtualFull computes the file list. +- Add back line accidentally deleted -- fixes verify-vol tests +- Remove inappropriate comments and unused line +- Fix badly named index +- First stab at rewriting the ingres database schema to allow bigger path and + filenames up to the current limit of 32000 bytes. Also reindented the database + definitions to the Bacula standards which make it somewhat easier to read. + Implemented the retrieval of some additional datatypes in the ingres backend + now we switched some types from VARCHAR to VARBYTE. + +16Apr10 +- Remove closelog() in bpipe fixes bug #1536 +- Convert restore object to use STREAM_RESTORE_OBJECT; cleaner code +- For now the Ingres type is not TEXT but VARCHAR(256) so lets define the + temporary table that way. +- Seperate some basejob queries into sql_cmds.c so the code is cleaner and we + can specify the Ingres specific definition of a temporary table. +- Remove some strange includes halve way this file which seems to be some + copied code from the top of the file. +- Fix RestoreObject make_sqlite3_table.in syntax + +15Apr10 +- DISTINCT ON postgresql queries won't work on Ingres so try if the MySQL + queries do any better on Ingres. +- Small changes to btraceback script so on solaris we don't get strange errors + as PNAME is not a single binary name and that doesn't work to great for doing + a gcore. +- Add install and uninstall of btraceback.mdb script + +14Apr10 +- Remove bad index tips on mysql creation script +- Apply James' patch with my restore object changes +- Add vss_close_backup_session after find_files + add object_name to restore + object + +13Apr10 +- Ingres doesn't understand a query string with ORDER BY LastWritten IS NULL,LastWritten + DESC,MediaId and we must rewrite it to ORDER BY IFNULL(LastWritten, '') + DESC,MediaId which we now do by using an array with the correct order statement + per backend in sql_cmds just as some of the other database specific queries. + +12Apr10 +- Add missing break. Pointed out by James + +11Apr10 +- Pass restore object to plugin +- Restore object now sent to FD +- Drop old bdb catalog scripts + +10Apr10 +- Extract restore object and send to FD first cut +- Add Ingres RestoreObject SQL changes +- Rename sql_insert_id into sql_insert_autokey_record so things are somewhat + cleaner in name and things don't seem to imply we are just a mysql_insert_id + replacement, which we are not. + +09Apr10 +- Turn off plugin debug +- Create and put data into RestoreObject table +- Create db_create_restore_object_record and code to insert it +- Drop all Ingres tables with one commit. +- Fix Win32 build +- Set level for debug code + +08Apr10 +- Make build and install of test-plugin for regress automatic +- Temp remvoe add exclude + +07Apr10 +- First attempt to fix plugin excludes +- Get binary objects working +- Make test-plugin regression test work +- Make test-plugin-test script +- Create test-plugin-fd +- Stuff object in extended attributes +- Add FT_RESTORE and first cut handling it + +06Apr10 +- Add more doc to example-plugin + +05Apr10 +- Start RESTORE_OBJECT code + +08Apr10 +- Fixed postgresql grant script which I seem to have updated while I thought I + updated the Ingres one. Also added a missing entry as reported by Martin + Simmons. +- First attempt at using mdb on Solaris instead of dbx which isn't standard on + most current Solaris versions. +- fix grant_postgresql_privileges doesn t grant permissions on jobhisto_jobid_seq +- Add back something that for whatever reason got lost on last big merge. + +07Apr10 +- Fix stupid inverted logic +- Make the batch insert functions return bool instead of int. +- Add lock position info to volume manager + +06Apr10 +- Permits to use lock manager with file/line from outside + +05Apr10 +- Added empty db_check_backend_thread_safe to dbi backend. +- Change cryptic aop name +- Make sql_insert_id a atomic function which executes the insert query and when + that succeeds retrieves the id the database used for storing the inserted + record. Some databases want this to be atomic (e.g. without a COMMIT in + between.) Coding wise this is also much cleaner. +- Drop have_insert_id in mdb as its always true for all backends. +- Lets call it COMMIT and not END as that seems to be the counterpart of BEGIN + for transactions. Updated Ingres lock queries to only start a transaction as + the current SQL is not valid Ingres SQL. + +04Apr10 +- First attempt at making things multi-threaded and make it possible to have + multiple connections to the database without having the dreaded ERR=E_LQ002E + The 'execute immediate' query has been issued outside of a DBMS session. This + compiles but seems to give some problems so into the debugger it is. +- Use -lq.1 -lcompat.1 as shared libraries instead of linking with -lingres + which is a static library. Also need to change generated code of esqlcc a bit + to get things to link with the IIsqlca function. +- As we have a multithreaded application let tell that to esqlcc so it can + optimize for that. +- Fix bscan segfault with new comment field + +03Apr10 +- Creating a database as an other user and giving grants doesn't seem to work + for Ingres. Strange so for now create everything using the -u${db_user} + option. This is something that needs to be investigated as it should be + possible to have an different owner of the database. +- Create user in the right database for Ingres. +- Do all Ingres operations as the user invoking the script just as for all the + other databases. The grant script will make sure the db_user will have the + proper access rights to the database tables and sequences. +- Fix username argument to sql for Ingres grants. +- Replace ASSERT in block.c with fail Job +- Make the Ingres grant script work. +- Use uint32_t for max_concurrent_jobs +- Fix grant script to set grants on correct sequence names and not PostgreSQL + names. +- Add new item to projects + +02Apr10 +- Fix bug #1542 File Daemon outputs usage message to stdout instead of stderr +- Apply James' rename events patch +- Apply James' plugin patch +- Added limit_filter which rewrites queries which use the LIMIT functions into + something Ingres can understand. Removed all static functions from the + myingres.sh file as they are no longer an exported interface (we can always + put that back when we have the need to export more functions). Some other + rewrites of the code to make it somewhat cleaner. + +01Apr10 +- Add make_def64 +- Fix win32/lib/bacula.defs as pointed out by James +- Drop Ingres specific versions of queries with #ifdefs +- fixed syntax LIMIT/NULL behavior/improved tests + +31Mar10 +- Simplify db_check_max_connections code and ifdeffing +- Fix SQL warning message about concurrency pointed out by Graham +- Fix compiler warning +- Add jobs running to status of daemons + +29Mar10 +- Use file and line for rwlock and lmgr in db_lock() +- Add File and Line info to rw_lock for lmgr + +27Mar10 +- Fix compiler warning. +- Fix database locking calling db_lock and returning from function without + calling db_unlock. +- Apply James' wide char patch +- Patch from James +- Add bVarVssDllHandle so that a plugin can get GetProcAddress + +26Mar10 +- Add missing db_unlock to bvfs_update_cache. + +22Mar10 +- Fix #1532 about permission on binaries +- Propose shell replacement to perl code, fix #1516 +- Fix #1526 about verify jobs and runscript + +20Mar10 +- Add 5 minute timout to alert directive -- fixes bug #1536 + +19Mar10 +- Add bEventInitializeVSS as requested by James + +17Mar10 +- Reduce compiler warnings on Windows build + +16Mar10 +- Add doc + make some compat subroutines static +- Fix Win64 build +- Correct mkdir in installer +- System State plugin update + +14Mar10 +- Allow users to build bat without static QT if desired. +- Add bat=no make option on Win32 +- Fix for qt mkspecs location on Fedora 12. +- Second plugin patch +- System State plugin first cut + +12Mar10 +- syntax fix, test improved, empty results fixed +- Add read_vol_list mutex to lockmgr prio +- Fix #1527 about deadlock during migration +- When including term.h we don't need prototypes for tgetent, tgetnum and + tgetstr as they are defined in term.h on Solaris. + +10Mar10 +- Another fix for OpenSSLv1 +- Add -lrt to Solaris links + +09Mar10 +- Fix tls.c for OpenSSLv1 +- Add plugin Exclude interface + +08Mar10 +- Add TODO in bvfs +- Fix #1511 when trying to insert more than 50.000 directories in bvfs +- Fix plugin load not to stop if one plugin bad -- pointed out by James + +07Mar10 +- Add Base and Copy to ua_dotcmds.c for Levels and Types for bat +- More devlock work +- Remove --without-qwt from configure statement. +- First cut new device lock code +- Fix bug in bvfs_update function, should work much better now +- Uses a dedicated connection when running bvfs_update command +- Add first cut at Win32 bpipe + +06Mar10 +- Switch from termlib to ncurses +- Second correct fix to bug #1524 verify fails after adding or removing files +- Fix bug #1524 verify fails after adding or removing files +- Fix bug 1523. +- Apply fix suggested by Andreas in bug #1502 for mediaview column sort problem +- Lets not generate the ingres code automatically when the .sc or .sh files + changes as it seems these files get outdated on some systems and then this + rule kicks in on systems that don't have the esqlcc ingres compiler installed. + +04Mar10 +- Fix CentOS detection in regression get-os script + +06Mar10 +- Cread subroutines to add exclusion for plugins + +05Mar10 +- This adds sql debugging to these classes that Eric wrote. +- This is a 1/2 fix of the issue. It allows for the sorting of the two non + graphic columns, but still does not allow for sorting the graphic columns. + The issue is with last. By setting the sort value, you are setting the value + on the last. To experiment with what I mean, use the set background that I + have commented out in this commit. + +04Mar10 +- Create AddExclude entry point for plugins + +03Mar10 +- Lets generate ingres code by calling esqlcc and not esqlc as we want c++ + code. Replaced some strncpy by bstrncpy calls. Add dependency in Makefile of + generated code on it source file. +- Another file to add James' code to get VssObject +- Add James' code to get VssObject +- Fix OpenSSL 1.x problem in crypto.c on Fedora 12 +- Display AllowCompress warning message only if compression used in FileSet + +01Mar10 +- Added comment on dbi backend needing a recent version of libdbi from CVS as + the version currently released 0.8.3 wont work for compiling the code. +- Add Warning check to SQL +- Fix readline to use TERM_LIB found for conio + +27Feb10 +- Move bacula shared objects in separate bacula-libs package. +- Fix for termcap lib on suse. + +26Feb10 +- Fix for shared objects name convention change. +- Pass jcr to VSS class + minor clean up of VSS code +- update date + +25Feb10 +- Fix compile link flags for gigaslam +- Add missing defines for gccver. + +24Feb10 +- bconsole: make problem in director selection fatal (exit=1) + +23Feb10 +- Undelete bat help files +- Use db_socket parameter in make_catalog_backup_.pl script +- Add skeleton of system state plugin +- Ensure SD asks for help when looping even if poll set. Fixes bug #1513 + +22Feb10 +- Fix compiler warnings in tools directory +- Temp remove savecwd to make Win32 build + +21Feb10 +- Fix three-pool regress bug +- Replace MIN by code as recommended by Marco to avoid compiler warning +- Add Fedora 11 and 12 build tags. +- Make script more portable + +20Feb10 +- Forgot one exit. +- Fixes for client only build. +- Fix seg fault in dup jobs regression test +- Remove duplicate. +- Possible fix for FreeBSD three-pool regress failure +- Remove mysql version variable. +- Enable readline support. +- Added patch from Stefan Reddig -- improved ingres db test +- Added patch from Stefan Reddig -- added some checks, db test prog +- bug report 1505 +- This version fixes an issue where the console window would start out not + docked. It is fixed by initiating the variables in the Pages class with a + constructor. + +19Feb10 +- fix bvfs that displays NULL from time to time +- Add comments about lock priority +- Fix make_catalog_backup.pl fails when catalog db is on other host + +18Feb10 +- Discard prev regress test results +- Make set_jcr_sd_job_status static + +17Feb10 +- Apply MacOSX installer patch from bug #1509 +- Add debug code for FreeBSD regress failures +- Reduce debug output + +16Feb10 +- Regress test Branch-5.0 prior to release +- Add more debug for three-pool regress failure +- Make dup jobs regress test work +- Eliminate error message in regress setup if get not present +- Avoid error message if git not installed + +15Feb10 +- Apply fix to previous fix of Copy problem. Fix proposed by reporter of bug + #1476 +- Set default Allow Duplicate Jobs = yes +- Better cancel + fix Allow Dups code +- Remove all bacula.spec.in + +14Feb10 +- Fix duplicate job bug +- Fix infinite wait on error when restore started +- Make undocking work +- Add check for sqlite3_threadsafe() in configure +- Add function to extract resource from config file +- First cut cd to dir during save and restore +- Add debug to testls + +13Feb10 +- Updates for 5.0.1. +- Clean up termcap requirements. +- Add dependency information. +- Changes to run bat as nonroot user. +- Refix version. +- Fix client only build. +- Add code to check and prevent setting StorageId to zero + +12Feb10 +- Lets call the Ingres version of the tables also version 12 as it already has + all the new tables introduced in version 12 on the other backends. +- First cut at fixing AllowDuplicateJobs bugs +- Fix bug #1501 -t does not print errors +- Add more doc in sample-query.sql + +11Feb10 +- Apply SQLite3 update fix from bug #1497 +- Apply bashism fix for diskchanger.in script from bug #1499 +- Apply Philipp Storz fix on bconsole history file +- Apply rpm fix for Sci Linux from bug #1494 +- Update po files +- Take most recent Ukranian po from bug #1448 +- Fix pages not in focus + +10Feb10 +- Work around SQLite3 bug in bat submitted by Andreas Piesk a.piesk@gmx.net +- Move shared object numbers into version.h + use -release for libtool rather + than -version-info +- Make mtx-changer errors clearer +- Check for existence of mtx-changer.conf in mtx-changer script + +09Feb10 +- Probable fix for Copy/Migration bug #1476 +- Fix SQL that fails on SQLite3 in bat reported by Andreas Piesk a.piesk@gmx.net +- Add "bbatch -r" option to test database performance + +08Feb10 +- Make bsnprintf test program compile +- Add deadlock option to .die command +- Rename directory +- Reorganize spec files +- Fix bat bug that consumes connections + add braces on ifs + rename subroutines +- Fix compiling gigaslam.c whith $(CFLAGS) for OpenSuse Build service + +01Feb10 +- fixed segfault/reworked query execution + +07Feb10 +- Fix Mysql database upgrade. +- Fix bug #1488 -- create delivery_error() subroutine to avoid recursion and + race conditions in messages.c +- Upgrade cats lib also to 5.1.0 + +06Feb10 +- Fix missing console page in bat +- Move msg trace into subroutine +- Move bat help files into help subdirectory +- Add bat help files to Win64 installer +- Win -- add help files to installer + stop any running bacula-fd before + install +- Fix bug #1481 -- bat consumes all console file descriptors +- Update date on win32 rc file + +04Feb10 +- Fix bug #1486 -- bat doesn't show any errors on command-line + +03Feb10 +- Correct .my.cnf umask in make_catalog_backup.pl + +02Feb10 +- Apply Philipp's fix for dbcheck use by make_catalog_backup.pl + +01Feb10 +- Free db_list when not used +- Fix seg fault in bscan from new comment field +- Implement new "purge volume action" command to trigg ActionOnPurge. +- Disable action_on_purge message in Storage +- Change db_get_media_ids() to use more MEDIA_DBR info in search +- Remove qmake-qt4 code so tht alternate qt4 build works. Fixes build problems + in bug #1468 +- Make configure.in changes recommended by Markus Elfring +- Patch from checks multple CNs when using TLS + +31Jan10 +- Commit spec+rpm build files from Scott + +30Jan10 +- Fix seg fault in SQlite driver + +29Jan10 +- - fixed the database/table scripts - db_user was missing - updated database + schema - fixed missing statements in sql*.h - since there is a db_type for + Ingres now, there have to be 5, not 4, members in the arrays - worked alot on + myingres.c - queries seems to be working, but the first INSERT fails with a + segfault +- Remove old bdb files + change DQUEUE to use dlist +- Change copyright date +- Document the empty query file + +28Jan10 +- Make versions of shared libs inline with the software version. +- Fix ActionOnPurge with a relabel command +- Remove file_index sequential check -- reported by Graham +- Enhance marking volume Used messages + +27Jan10 +- Disabled ActionOnPurge waiting for a fix +- Check pool memory size for truncate op + +26Jan10 +- Fix #1467 about ActionOnPurge with Devices having space +- Update bacula.spec with tips from Mory Henderson +- Add -D option to bconsole to choose between different directors + +18Jan10 +- Add comment= option to restore and run commands + +26Jan10 +- Fix #1466 about Bogus pruning message + +25Jan10 +- Fix make_catalog_backup.pl warning + +Release version 5.0.3 + +03Aug10 +- Fix Windows build +- Remove patches directory +- Fix compilation problem with zlib + +02Aug10 +- Remove comments field from tls-duplicate-job-test -- add back next database + upgrade + +01Aug10 +- Make Win32 no filesystem change more explicit + +31Jul10 +- Update askdir.c to Branch-5.1 +- Revert block.c to fix recycling +- More backport from Branch-5.1 to Branch-5.0 +- Backport Branch-4.0 release to Branch-5.0 +- Massive backport from Branch-5.1 to Branch-5.0 -- a bit more to do + +29Jul10 +- Remove some old files + +28Jul10 +- Fix #1606 about OpenSSLv1 detection using non-standard location +- Backport some 5.1 changes to 5.0 + +27Jul10 +- Update the license template files + +26Jul10 +- Fix postgresql catalog creation when version is not on the first line + +25Jul10 +- More changes from GPLv2 to AGPLv3 +- Fix some missed copyright changes +- Change license from GPLv2 to AGPLv3 +- Define restore object stream + +21Jul10 +- Fix bug #1610 handle empty xattr values on Linux and xBSD. +- Really free free pool memory before sm_dump +- Garbage collect memory pool at end of job and before sm_dump +- Ignore STREAM_RESTORE_OBJECTS returned by SD to FD + +19Jul10 +- Add xattr seg fault protection suggested by Marco for bug #1610 + +17Jul10 +- Add archlinux to os.m4 + +14Jul10 +- More fixes for 32/64 bit problems in smartall +- Fix 32/64 bit problem in smartalloc dump routine + +11Jul10 +- Fix bad copy/paste in commit c88dccb88 prably a seg fault +- Fix .dump, .die, .exit when DEVELOPER turned off + +10Jul10 +- Add .dump and .exit commands for daemons + +10May10 +- Add .dump command + +10Jul10 +- Make SD automatically fix the Volume size in the Catalog when out of sync +- Add bigger print buffer for sm_dump + +09Jul10 +- Use Pmsg in smartall.c rather than printf for tracing Windows + +11Jul10 +- Update to master's bsnprintf.c + +07Jul10 +- Up maximum block size to 20M + +04Jul10 +- Add new tls-duplicate-job test + +07Jul10 +- Fix for TLS bugs #1568 and #1599 + +02Jul10 +- Improve comm line error handling for TLS, may fix bug #1568 and #1599 + +03Jul10 +- Remove Linux dependency (seq) in regress script + +06May10 +- Fix problem when sending Cancel event to plugin + +01Jun10 +- Update makefiles for new libtool revision +- Simplify libtool revision and use VERSION as default +- Simplify libtool revision + +28May10 +- Convert all Jmsg on the watchdog thread to Qmsg + +20May10 +- Do not use mtime with basejobs + +02Jul10 +- Fix SD crash due to mismatched lock/unlock in error condition + +26Jun10 +- Fix bug #1587 if you have clients with different catalogs configured, both + catalogs contains all clients after a reload or restart of the dir +- Fix bug #1577 During migration: ERROR in block.c:950 Failed ASSERT: dev->is_open() + by moving and commenting out the ASSERT() + +23Jun10 +- Skip HB kill in FD if HB thread terminated + +21Jun10 +- update windows defs + +20Jun10 +- Fix crash from unequal volume_lock/unlock calls +- Fix pruning for migration jobs and do some optimization +- Print TestName at beginning of dump + +19Jun10 +- Remove prototypes of removed prune queries. + +18Jun10 +- Adapt new prune code with old db_accurate_get_jobids() + +17Jun10 +- Remove bdb from configure +- update prune code + +16Jun10 +- Make new prune algo to work with backup + +19Jun10 +- Save any dumps during regression to dumps directory +- Update LICENSE +- Fix crash from rw_lock/unlock miss match + +18Jun10 +- Attempt to fix duplicate job kill seg fault +- Replace pthread_kill by my_thread_send_signal() in jcr.c +- fix #1595 about batch mode detection problem for postgresql with non standard + install dir +- Fix accurate code +- Add TestName to gdb traceback + +17Jun10 +- Fix db_get_base_jobid() + +14Jun10 +- Fix #1594 about prune copy jobs + +07Jun10 +- Apply literal string fix in printf submitted by Luca Berra + +03Jun10 +- Fix bug #1582 Restore from multiple storage daemons breaks subsequent backups + +01Jun10 +- Fix segfault on "cancel jobid=" command +- Change libtool version + +30May10 +- Fix bug #1554 Windows installer doesn't honor /S flag + +23Apr10 +- Remove pool zap code that breaks 2drive-concurrent-test + +28May10 +- Fix #1571 and install libraries with 755 perms + +27May10 +- Fix bug #1581 ASSERTs in src/dird/jobq.c can never trigger + +20May10 +- Fix problem with BaseJob and Accurate FileSet options on client side + +24May10 +- Add more debug code for bactrace + +23May10 +- Fix for bug #1569 deadlock/crash in Dir + +03May10 +- Fix segfault when loading Plugins + +04May10 +- Fix #1567 about display of long volume names truncated during restore + +02May10 +- Added new bacula-libs package. + +01May10 +- Fix bug #1538 Start Scripts return incorrect value +- Fix bug #1564 init scripts not LSB compliant + +28Apr10 +- Fix File and Job Retention in show pool command + +Release Version 5.0.2 + +27Apr10 +- Add lmgr_is_locked() function to test if a thread own a mutex +- Probable fix for SD crash bug #1553 + +24Apr10 +- Fix #1559 problem when restoring pruned jobs with a regexp +- Fix for bug #1560 bcopy cannot find Volume + +23Apr10 +- Remove comment field added in last patch +- Fix cancel crash bug #1551 + +21Apr10 +- Check if sql backend is thread-safe + +20Apr10 +- Correct Pool display in SD status. Fixes bug #1541 + +19Apr10 +- Fix cancel crash reported by Stephen Thompson + +18Apr10 +- Rewind on close to fix #1549 + +16Apr10 +- Remove closelog() in bpipe fixes bug #1536 +- Fix #1517 about missing Base level in .level command + +08Apr10 +- fix grant_postgresql_privileges doesn t grant permissions on jobhisto_jobid_seq + +07Apr10 +- Add lock position info to volume manager +- Permits to use lock manager with file/line from outside + +03Apr10 +- Replace ASSERT in block.c with fail Job +- Use uint32_t for max_concurrent_jobs + +29Mar10 +- Use file and line for rwlock and lmgr in db_lock() +- Add File and Line info to rw_lock for lmgr + +27Mar10 +- Fix database locking calling db_lock and returning from function without + calling db_unlock. + +26Mar10 +- Add missing db_unlock to bvfs_update_cache. + +22Mar10 +- Fix #1532 about permission on binaries + +14Mar10 +- Allow users to build bat without static QT if desired. +- Fix for qt mkspecs location on Fedora 12. + +12Mar10 +- Fix #1527 about deadlock during migration + +10Mar10 +- Another fix for OpenSSLv1 +- Add -lrt to Solaris links + +09Mar10 +- Fix tls.c for OpenSSLv1 + +08Mar10 +- Fix #1511 when trying to insert more than 50.000 directories in bvfs +- Fix plugin load not to stop if one plugin bad -- pointed out by James + +07Mar10 +- Remove --without-qwt from configure statement. +- Fix bug in bvfs_update function, should work much better now + +06Mar10 +- Second correct fix to bug #1524 verify fails after adding or removing files +- Fix bug #1524 verify fails after adding or removing files +- Fix bug 1523. +- Apply fix suggested by Andreas in bug #1502 for mediaview column sort problem +- Lets not generate the ingres code automatically when the .sc or .sh files + changes as it seems these files get outdated on some systems and then this + rule kicks in on systems that don't have the esqlcc ingres compiler installed. +- Fix CentOS detection in regression get-os script + +05Mar10 +- Create subroutines to add exclusion for plugins +- This adds sql debugging to these classes that Eric wrote. +- This is a 1/2 fix of the issue. It allows for the sorting of the two non + graphic columns, but still does not allow for sorting the graphic columns. + The issue is with last. By setting the sort value, you are setting the value + on the last. To experiment with what I mean, use the set background that I + have commented out in this commit. + +03Mar10 +- Fix OpenSSL 1.x problem in crypto.c on Fedora 12 +- Display AllowCompress warning message only if compression used in FileSet + +01Mar10 +- Added comment on dbi backend needing a recent version of libdbi from CVS as + the version currently released 0.8.3 wont work for compiling the code. +- Add Warning check to SQL +- Fix readline to use TERM_LIB found for conio + +27Feb10 +- Move bacula shared objects in separate bacula-libs package. +- Fix for termcap lib on suse. + +26Feb10 +- Fix for shared objects name convention change. +- Remove mandrive and suse bacula.spec from configure + +25Feb10 +- Fix compile link flags for gigaslam +- Add missing defines for gccver. + +Release Version 4.0.4 + +03Sep10 +- Send back updated jobs status from FD +- Allow plugin to skip restore if problem +- Add comments to win32-systemstate-test +- Attempt to eliminate waiting on Storage message during restore +- Add quotes around resource name in status output +- Change FD protocol version to avoid error message when sending RestoreObjects +- Fix so .messages does not stop restore in bat (I think) + + +Release Version 4.0.3 + +31Aug10 +- Fix bug #1633 Windows service is not removed if still running + +30Aug10 +- Make RestoreObject compatible with older FDs +- Apply #1624 properly +- Use VERSION for libtool library version +- Change branding to Bacula Enterprise +- Fix #1624 about segfault in dvd driver when calling rewind(NULL) +- Add registered trademark + +17Aug10 +- Correct rpm .spec for putting hostname and password in bat.conf + + +Release Version 4.0.2 + +12Aug10 +- Fix batch insert detection on SLES 11 + +10Aug10 +- Update Enterprise specs to work with 4.0 + +08Aug10 +- Fix bug #1504 -- Error when creating tables in MySQL 5.5 + +02Aug10 +- Allow sorting of MeditView table columns + +01Aug10 +- Make Win32 no filesystem change message more explicit + +27Jul10 +- Update the license template files + +28Jul10 +- Fix #1606 about OpenSSLv1 detection using non-standard location + +Bug fixes +1504 1606 + + +Release Version 4.0.1 + +26Jul10 +- Fix pending_buf size calculation + malloc in big chunks +- Optimize mallocs in vssapi_registry.c +- Apply James' restore patch + +25Jul10 +- More changes from GPLv2 to AGPLv3 +- Fix some missed copyright changes +- Bring plugin license up to new version + +24Jul10 +- Apply James' restore patch +- Call plugin bEventEndxxxJob even if Job canceled +- Check some plugin error conditions on restore to prevent seg fault +- Switch to AGPLv3 + +22Jul10 +- Clear class buffer in operator new + add safe_free and correct some calls to + use it +- Really free free pool memory before sm_dump +- Garbage collect memory pool at end of job and before sm_dump +- Ignore STREAM_RESTORE_OBJECTS returned by SD to FD +- Implement %f %b to get JobFiles, JobBytes in a RunScript +- Fix some windows plugin crashes + +20Jul10 +- Add exclude code + fix a few orphaned buffers + +19Jul10 +- Fix malloc of wrong size causing buffer overrun on restore with Windows + plugins +- Fix free of unallocated buffer in win plugin code +- Add debug info to smartall.c for buffer overrun +- Add xattr seg fault protection suggested by Marco for bug #1610 + +18Jul10 +- Add archlinux to os.m4 +- Apply James' job_canceled fix + +17Jul10 +- Add some of James' comments +- Merge with previous KES changes +- Apply James' leaks patch +- Make VSS snapshot failures fatal +- Fix Win FD crash at termination +- Orphaned buffer fixes +- Fix 64 bit p_CreateVssExamineWriterMetadata entrypoint +- Apply James' backup orphaned buffer patch +- Modify win32 config to handle James new vss plugin +- Fix min compile problem + package vss-fd.dll instead of systemstate +- First cut merging James + +14Jul10 +- More fixes for 32/64 bit problems in smartall +- Fix 32/64 bit problem in smartalloc dump routine + +13Jul10 +- Make plugins ignore new event item +- Put all Windows plugin code on non-zero debug level +- Add bEventVssBeforeCloseRestore per James + +11Jul10 +- Fix .dump, .die, .exit when DEVELOPER turned off +- First cut systems state plugin cancel + release some of orphaned buffers +- Add bigger print buffer for sm_dump +- Add .dump and .exit commands for daemons +- Use Pmsg in smartall.c rather than printf for tracing Windows + +09Jul10 +- Add .dump command +- Permit using sm_dump in Windows plugins + +08Jul10 +- Add a make for winbmr directory + +Bug fixes +1610 + +Release Version 4.0.0 + +07Jul10 +- Fix for TLS bugs #1568 and #1599 +- Add new tls-duplicate-job test +- Improve comm line error handling for TLS, may fix bug #1568 and #1599 + +06Jul10 +- Make Windows binaries follow similar naming convention to source release +- Correct some of the Win64 compiler warnings +- Fix Win32 systemstate crash (hopefully) + +03Jul10 +- Remove Linux dependency (seq) in regress script + +02Jul10 +- Do not use mtime with basejobs +- Fix SD crash due to mismatched lock/unlock in error condition + +29Jun10 +- Use send_restore_object() when using plugin + +26Jun10 +- Fix bug #1587 if you have clients with different catalogs configured, both + catalogs contains all clients after a reload or restart of the dir +- Fix bug #1577 During migration: ERROR in block.c:950 Failed ASSERT: dev->is_open() + by moving and commenting out the ASSERT() + +Release Version 4.0.0-RC3 + +19Jun10 +- Move some AC_MSG_RESULT macros around so the configure output looks somewhat + cleaner. +- Save any dumps during regression to dumps directory +- Fix crash from rw_lock/unlock miss match +- Attempt to fix duplicate job kill seg fault +- Replace pthread_kill by my_thread_send_signal() in jcr.c +- Add systemstate plugin in windows installer + +18Jun10 +- Fix postgresql error in grant script +- fix #1595 about batch mode detection problem for postgresql with non standard + install dir +- Fix accurate code + +17Jun10 +- Fix db_get_base_jobid() + +16Jun10 +- Fix Solaris zlib link bug reported by Robert Garza +- Improve the "update stats" sql command +- Improve performance for MySQL with update stats command + +14Jun10 +- Fix #1594 about prune copy jobs +- Fix restore object compression flag -- James + +11Jun10 +- Add BDB_VERSION for scripts + prepare update 11 to 12 + automate BDB_VERSION + checking + +07Jun10 +- Fix bug #1582 Restore from multiple storage daemons breaks subsequent backups +- Fix #1567 about display of long volume names truncated during restore +- Fix for bug #1569 deadlock/crash in Dir +- Apply literal string fix in printf submitted by Luca Berra + +03Jun10 +- Fix bug #1582 Restore from multiple storage daemons breaks subsequent backups + +01Jun10 +- Fix segfault on "cancel jobid=" command + +30May10 +- Fix bug #1554 Windows installer doesn't honor /S flag + +28May10 +- Fix #1571 and install libraries with 755 perms + +27May10 +- Fix bug #1581 ASSERTs in src/dird/jobq.c can never trigger +- Fix segfault in action on purge test +- Fix problem with BaseJob and Accurate FileSet options on client side + +12May10 +- Adjusted path handling to make 'where' work. 'where' is basically ignored. a + 'regexwhere' that affects plugin paths will probably still break things + +11May10 +- Don't activate plugin on restore if no RestoreObjects have been seen +- Move "Including VSS Writer ..." so it only appears once + +10May10 +- memory leak fixes +- make tocharstring() report line and file correctly for sm_alloc +- use free instead of delete to free memory allocated with wcsdup remove DOS + EOLs +- Free some memory allocated in find_filespec +- plugin: Use smartalloc in find_filespec +- update find_filespec to allocate it's own memory and not retain any pointers + from the caller + +08May10 +- remove GUID from the writer 'directory' name. Rely on the name of the instance_{GUID} + dummy file instead +- add some deletes back in now that find_filespec does the expected +- Clean some stale code and superfluous debugging output +- forgot to mark file as seen when it is seen but not newer +- mark instance_{GUID} files as seen for incremental backups +- plugin: replace new/delete/malloc/free by smartalloc + +07May10 +- Remove bdb from cat makefile + +06May10 +- Fix problem when sending Cancel event to plugin +- Update plugin from James +- Implement bEventPluginCommand event for systemstate plugin +- MySQL compilation fix + +05May10 +- plugin: use smartalloc +- fix compilation for 64bit version + +Release Version 4.0.0-RC1 + +04May10 +- Fix xml_name in plugin + +03May10 +- Fix case order in vss_writer to resolve crash at the end of the writer + backup +- Fix segfault when loading Plugins + +01May10 +- Fix insertion of RestoreObject +- Fix RestoreObject schema on PostgreSQL +- Use lwps cmd in dbx and no threads as it seems to give some strange side + effects e.g. doesn't work and lwps seems to always work. So lwps it is + then. +- Do some work on the btraceback on Solaris, we test to see what debugger is + available and we prefer the debuggers in this order: dbx, gdb, mdb. Also + enchanced the dbx bactrace somewhat that it always dumps all available + threads (e.g. we also changed from lwp to threads for dbx). We also print + some variables that the gdb script also dumps on a bactrace. Hopefully we get + some better dumps using this. The mdb is used as a last resort as it should + be always installed on a Solaris box (dbx and gdb may not) but at the moment + I'm still trying to find out how to get some more understandable dumps from + mdb as its rather rudimentary and more a crash debugger then a source code + debugger. +- Remove RestoreObjects when pruning Jobs +- Fix File and Job Retention in show pool command +- Add Polish translation. This translation project is sponsored by Inteos Sp. z + o.o. based in Warsaw, PL. +- Fix bug #1538 Start Scripts return incorrect value +- Fix bug #1564 init scripts not LSB compliant + +28Apr10 +- Patch from Martin to avoid error when single tape used + +27Apr10 +- Add lmgr_is_locked() function to test if a thread own a mutex +- Add new file I missed from James' plugin patch +- Probable fix for SD crash bug #1553 + +26Apr10 +- Fix #1558 about bscan that doesn't update PurgedFiles on Job records +- Fix #1559 problem when restoring pruned jobs with a regexp + +24Apr10 +- Fix for bug #1560 bcopy cannot find Volume + +23Apr10 +- Add compress/decompress of Object Record data +- Remove pool zap code that breaks 2drive-concurrent-test +- Fix cancel crash bug #1551 + +21Apr10 +- Fix job_metadata name problem +- Backport regress from Branch-5.0 and master +- Check if sql backend is thread-safe +- Update DartConfiguration +- Add lock position info to volume manager +- Backport lockmgr.h +- Backport bbatch.c changes +- Add install of mdb script +- add mdb traceback + remove cats/bdb +- Pull master changes + +20Apr10 +- Big backport from master +- Correct Pool display in SD status. Fixes bug #1541 + +05Apr10 +- Fix compiler warning + +03Apr10 +- Replace ASSERT in block.c with fail Job +- Use uint32_t for max_concurrent_jobs + +29Mar10 +- Use file and line for rwlock and lmgr in db_lock() +- Add File and Line info to rw_lock for lmgr + +27Mar10 +- Fix database locking calling db_lock and returning from function without + calling db_unlock. + +26Mar10 +- Add missing db_unlock to bvfs_update_cache. +- Fix array dimensions due to adding Ingres to certain sql arrays. + +22Mar10 +- Update configure +- Fix #1532 about permission on binaries + +14Mar10 +- Allow users to build bat without static QT if desired. +- Fix for qt mkspecs location on Fedora 12. + +12Mar10 +- Run ingres template code through esqlcc. +- another small fix +- syntax fix, test improved, empty results fixed +- Fix #1527 about deadlock during migration + +10Mar10 +- Another fix for OpenSSLv1 +- Add -lrt to Solaris links + +09Mar10 +- Fix tls.c for OpenSSLv1 + +08Mar10 +- Fix #1511 when trying to insert more than 50.000 directories in bvfs +- Fix plugin load not to stop if one plugin bad -- pointed out by James + +07Mar10 +- Remove --without-qwt from configure statement. +- Fix bug in bvfs_update function, should work much better now + +06Mar10 +- Second correct fix to bug #1524 verify fails after adding or removing files +- Fix bug #1524 verify fails after adding or removing files +- Fix bug 1523. +- Apply fix suggested by Andreas in bug #1502 for mediaview column sort problem +- Lets not generate the ingres code automatically when the .sc or .sh files + changes as it seems these files get outdated on some systems and then this + rule kicks in on systems that don't have the esqlcc ingres compiler installed. +- Fix CentOS detection in regression get-os script + +05Mar10 +- Create subroutines to add exclusion for plugins +- This adds sql debugging to these classes that Eric wrote. +- This is a 1/2 fix of the issue. It allows for the sorting of the two non + graphic columns, but still does not allow for sorting the graphic columns. + The issue is with last. By setting the sort value, you are setting the value + on the last. To experiment with what I mean, use the set background that I + have commented out in this commit. + +03Mar10 +- Fix configure.in +- Lets generate ingres code by calling esqlcc and not esqlc as we want c++ + code. Replaced some strncpy by bstrncpy calls. Add dependency in Makefile of + generated code on it source file. +- Fix OpenSSL 1.x problem in crypto.c on Fedora 12 +- Display AllowCompress warning message only if compression used in FileSet + +01Mar10 +- Added comment on dbi backend needing a recent version of libdbi from CVS as + the version currently released 0.8.3 wont work for compiling the code. +- Add Warning check to SQL +- Fix readline to use TERM_LIB found for conio + +27Feb10 +- Move bacula shared objects in separate bacula-libs package. +- Fix for termcap lib on suse. + +26Feb10 +- Fix for shared objects name convention change. +- Remove mandrive and suse bacula.spec from configure + +25Feb10 +- Fix compile link flags for gigaslam +- Add missing defines for gccver. + +24Feb10 +- Final po changes for Release-5.0.1 +- Remove qt-console from POFILES + +23Feb10 +- Undelete bat help files +- Fix problem with MySQL with big Base jobs (temporary fix for 5.0.1) +- Fix compiler warnings in tools directory +- Ensure SD asks for help when looping even if poll set. Fixes bug #1513 + +21Feb10 +- Fix three-pool regress bug +- Replace MIN by code as recommended by Marco to avoid compiler warning +- Make script more portable +- Add Fedora 11 and 12 build tags. +- Lets close the Ingres cursor otherwise the code complains the cursor is still + open when we try to reuse it. +- Fix segv when indicator field is null. Also added some extra checks around + free calls so we don't try to free null pointers. +- Changed strcpy to bstrncpy and sprintf to bsnprintf and changed the bsnprintf + format. +- Small indent changes + +20Feb10 +- Sync between master and this branch as some ingres changes did go into master + but not into this branch. +- Remove include file that makes the compile complain and doesn't seem to be + used as removing makes the code compile ok. +- Some small indent changes. +- Reindented myingres.sc to Bacula "style" and regenerated myingres.c +- Merged in rejects from previous patch and regenerated new myingres.c from + myingres.sc using esqlc. Changed sprintf into snprintf. +- Added patch from Stefan Reddig -- fixed date types, errmsg +- Added patch from Stefan Reddig -- improved ingres db test +- Added patch from Stefan Reddig -- added some checks, db test prog +- Forgot one exit. +- Fixes for client only build. +- Fix seg fault in dup jobs regression test +- Possible fix for FreeBSD three-pool regress failure +- Remove duplicate. +- Remove mysql version variable. +- Enable readline support. +- bug report 1505 +- This version fixes an issue where the console window would start out not + docked. It is fixed by initiating the variables in the Pages class with a + constructor. + +19Feb10 +- fix bvfs that displays NULL from time to time + +23Feb10 +- Ensure SD asks for help when looping even if poll set. Fixes bug #1513 + +21Feb10 +- Fix three-pool regress bug +- Replace MIN by code as recommended by Marco to avoid compiler warning +- Make script more portable +- Add Fedora 11 and 12 build tags. +- Lets close the Ingres cursor otherwise the code complains the cursor is still + open when we try to reuse it. +- Fix segv when indicator field is null. Also added some extra checks around + free calls so we don't try to free null pointers. +- Changed strcpy to bstrncpy and sprintf to bsnprintf and changed the bsnprintf + format. +- Small indent changes + +20Feb10 +- Sync between master and this branch as some ingres changes did go into master + but not into this branch. +- Remove include file that makes the compile complain and doesn't seem to be + used as removing makes the code compile ok. +- Some small indent changes. +- Reindented myingres.sc to Bacula "style" and regenerated myingres.c +- Merged in rejects from previous patch and regenerated new myingres.c from + myingres.sc using esqlc. Changed sprintf into snprintf. +- Added patch from Stefan Reddig -- fixed date types, errmsg +- Added patch from Stefan Reddig -- improved ingres db test +- Added patch from Stefan Reddig -- added some checks, db test prog +- Forgot one exit. +- Fixes for client only build. +- Fix seg fault in dup jobs regression test +- Possible fix for FreeBSD three-pool regress failure +- Remove duplicate. +- Remove mysql version variable. +- Enable readline support. +- bug report 1505 +- This version fixes an issue where the console window would start out not + docked. It is fixed by initiating the variables in the Pages class with a + constructor. + +19Feb10 + - Fix make_catalog_backup.pl fails when catalog db is on other host + +17Feb10 +- Apply MacOSX installer patch from bug #1509 +- Add debug code for FreeBSD regress failures +- Reduce debug output + +16Feb10 +- Regress test Branch-4.0 prior to release +- Add more debug for three-pool regress failure +- Backport duplicate-job-test +- Make dup jobs regress test work +- Eliminate error message in regress setup if get not present +- Avoid error message if git not installed + +15Feb10 +- Apply fix to previous fix of Copy problem. Fix proposed by reporter o + #1476 +- Set default Allow Duplicate Jobs = yes +- Fix Allow Duplicates bug + +14Feb10 +- Fix duplicate job bug +- Update date +- Fix infinite wait on error when restore started +- Make undocking work +- update configure +- Add check for sqlite3_threadsafe() in configure +- Add debug to testls + +13Feb10 +- Updates for 5.0.1. +- Clean up termcap requirements. +- Add dependency information. +- Changes to run bat as nonroot user. +- Refix version. +- Fix client only build. +- Add code to check and prevent setting StorageId to zero + +12Feb10 +- Lets call the Ingres version of the tables also version 12 as it alre + all the new tables introduced in version 12 on the other backends. +- Fix library versions from 5.1.0 to 4.0.1 so things are inline with th + numbering scheme +- First cut at fixing AllowDuplicateJobs bugs +- Fix bug #1501 -t does not print errors +- Add more doc in sample-query.sql + +12Feb10 +- Lets call the Ingres version of the tables also version 12 as it alre + all the new tables introduced in version 12 on the other backends. +- Fix library versions from 5.1.0 to 5.0.1 so things are inline with th + numbering scheme +- First cut at fixing AllowDuplicateJobs bugs +- Fix bug #1501 -t does not print errors +- Add more doc in sample-query.sql + +11Feb10 +- Apply SQLite3 update fix from bug #1497 +- Apply bashism fix for diskchanger.in script from bug #1499 +- Apply Philipp Storz fix on bconsole history file +- Apply rpm fix for Sci Linux from bug #1494 +- Update po files +- Take most recent Ukranian po from bug #1448 +- Fix pages not in focus + +10Feb10 +- Work around SQLite3 bug in bat submitted by Andreas Piesk a.piesk@gmx +- Move shared object numbers into version.h + use -release for libtool + than -version-info +- Make mtx-changer errors clearer +- Check for existence of mtx-changer.conf in mtx-changer script + +09Feb10 +- Probable fix for Copy/Migration bug #1476 +- Fix SQL that fails on SQLite3 in bat reported by Andreas Piesk a.pies +- Make master run with DEVELOPER set + +08Feb10 +- Make bsnprintf test program compile +- Rename directory +- Reorganize spec files +- Fix bat bug that consumes connections + add braces on ifs + rename su +- Fix compiling gigaslam.c whith $(CFLAGS) for OpenSuse Build service +- Fix Mysql database upgrade. + +07Feb10 +- Fix bug #1488 -- create delivery_error() subroutine to avoid recursio + race conditions in messages.c +- Upgrade cats library also to 5.0.0 + +06Feb10 +- Fix missing console page in bat +- Move msg trace into subroutine +- Move bat help files into help subdirectory +- Add bat help files to Win64 installer +- Win -- add help files to installer + stop any running bacula-fd befor + install +- Fix bug #1481 -- bat consumes all console file descriptors +- Update date on win32 rc file + +04Feb10 +- Backport truncate on purge from 5.1.x +- Fix bug #1486 -- bat doesn't show any errors on command-line +- Update the bsock error url + +03Feb10 +- Correct .my.cnf umask in make_catalog_backup.pl + +02Feb10 +- Apply Philipp's fix for dbcheck use by make_catalog_backup.pl + +01Feb10 +- Backout patch that should not be in Branch-5.0 +- Backup patch that should not be in Branch-5.0 +- Free db_list when not used +- Fix seg fault in bscan from new comment field +- Remove qmake-qt4 code so tht alternate qt4 build works. Fixes build p + in bug #1468 +- Patch from checks multple CNs when using TLS + +31Jan10 +- Commit spec+rpm build files from Scott + +30Jan10 +- Fix seg fault in SQlite driver + +28Jan10 +- Make versions of shared libs inline with the software version. +- Remove file_index sequential check -- reported by Graham + +27Jan10 +- Disabled ActionOnPurge waiting for a fix +- Check pool memory size for truncate op + +26Jan10 +- Fix #1467 about ActionOnPurge with Devices having space +- Fix #1466 about Bogus pruning message + +25Jan10 +- update configure +- Fix make_catalog_backup.pl warning + + + + + +Release Version 5.0.0 + +21Jan10 +- Remove double secs in pruning output + +20Jan10 +- Fix priority mgnt in lock manager +- Fix FD crash when plugin running and cancel given +- Stop backup sooner after cancel +- Make a missing plugin during backup fatal +- Update sample-query.sql +- Use make_catalog_backup.pl by default + +19Jan10 +- Update makeall and add README to release directory +- Remove debug code +- Add more authors +- Fix possible memory leak +- Fix bscan bug introduced with serialization change +- Add a few status client commands +- Make Ukraine po to compile +- Update po files +- More spec improvements from bug #1455 +- Enhance FD status to have level + +18Jan10 +- Fix logwatch install in bacula.spec. Patch from bug #1454 +- Remove /etc/logwatch directories from spec file. Patch supplied by bug + #1456 +- Don't remove spec files -- fixes bug #1458 +- Remove FI sanity check code that does not apply after a VirtualFull +- Make Attr/record FI non-fatal if different -- possibly temp + add new session + record types +- Keep serial from overruning with bad data + +17Jan10 +- Add info message on how to stop the file relocation test + +15Jan10 +- Add new Path tables to grant and drop scripts +- Fix #1450 about FirstWritten not updated with bscan +- Fix error message in grant_postgresql_privileges + +12Jan10 +- Fix SQLite table creation script + +11Jan10 +- Add Job.Reviewed field +- Enable priority checks in lockmanager +- Change DB version from 11 to 12 +- Fix Win64 build +- Fix Win32 build + +10Jan10 +- Fix the subject to something easy to delete +- Turn off messages test -- it creates tons of emails + +09Jan10 +- Optimize strtok usage in ua_cmds + +08Jan10 +- Remove old code requiring different storages for migration +- More checking for OpenSSL in configure + +06Jan10 +- Add make_catalog_backup.pl script that uses env variables and disk file to + pass database password for backup +- Modify the sql query to get alphabetical order of clients when selecting the + most recent backup for a client + +05Jan10 +- Remove warning about make_catalog_backup.pl + +04Jan10 +- Add Ingres configuration +- Fix Ingres ID definition that caused seg fault +- Add commit from Stefan Reddig + +03Jan10 +- Simplify initgroups definition for AIX +- initgroups definition is ok on aix >= 5 + +31Dec09 +- Ignore scripts/bacula_config +- Remove QWT from configure -- not used + +28Dec09 +- Revert to configure with C due to old prototypes on some systems +- Make ioctl_req_t test less g++ dependent +- Make ioctl test more exact +- Try to force configure compiles to be more exact +- Rebuild configure to handle Marco's changes +- Clean old configure + +27Dec09 +- Let --enable-libtool and --disable-libtool determine if we build plugins and + drop the extra configure option + +26Dec09 +- Lets use a xattr specific constant and not the stdio BUFSIZ which is kind of + platform dependend. +- Attempt to autoconfigure ioctl_req_t +- Lets not mix initialized stack variable with uninitialized ones in the same + definition. +- Added some handling of EPERM and EOPNOTSUPP which seem to be returned when + you try to retrieve the system extended attributes a non root user. +- Lets not orphan the buffer returned by extattr_namespace_to_string as it + seems by looking at the code it strdups the string it returns so no need to + bstrdup it ourself. + +25Dec09 +- Show that plugins are enabled or not in configure output. + +24Dec09 +- Plugins are dependable on libtool now so cleaned the Makefiles and the fd + plugins are only build when --enable-plugins is set and libtool is used. +- Clarify catalog grant permissions message + +23Dec09 +- Fix depend for changes to SRC variables. +- Drop the usage of macros within macros as it seems some platforms native + makes don't support that. While converting the Makefiles also made things + somewhat more uniform in naming the lib objects. + +22Dec09 +- Add scripts/bacula_config script that displays configure options and results +- Continue to support Batch Mode with PostgreSQL < 8.2 + +21Dec09 +- M_ABORT Bacula If batch insert is turned on when we try to open a connection + and thread safe is not enabled +- Try to fix problem on FreeBSD with unsupported extended attributes which + gives lots of orphaned buffers as seen in regression testing. + +20Dec09 +- Fix thread safe detection in ./configure + +19Dec09 +- Update bweb deb +- Fix breload make_catalog_backup.pl rights + +17Dec09 +- LSBize Debian init scripts + +15Dec09 +- Fix #undef of lock manager + +13Dec09 +- Remove checkout master +- Add more debug for Solaris regression timeouts +- Make SUN C++ compiler stop complaining about enum definition. +- Use %c and %n in bootstrap in default bacula-dir.conf + +11Dec09 +- remove a ref to sqlite +- Remove ua_prune.c debug code +- Print any traceback or bactrace +- Fix old stuff in Makefile +- Allow deadlock manager on FreeBSD, MacOSX and SunOs +- Add allowcompress-test +- Add test for AllowCompress option + +10Dec09 +- Eliminate compiler warning +- Rework Mac OS 10.6 mtio.h fix +- Attempt to fix mtio.h problem on Mac 10.6 +- Allow mount to wake job waiting for op +- Make wait longer for better recycling +- Fix another volume_lock dlock order problem +- Fix and add ASSERT debug +- Fix compilation problem with lockmgr + +09Dec09 +- Fix misnamed variable +- Reorder lock_volumes and dlock in SD to avoid race conditions +- - five more updates to manpages options I had forgotten in a corner, - a + reformated and edited page for bsmtp - a brand new one for btraceback, based + on the Problem Resolution Guide and discussions (to be continued?) on this + list. +- Fix compilation pb +- Add mutex some priority info for SD +- Add info in dump about lock priority +- Fix CentOS/RHEL detection +- Fix #defines when lock manager turned off +- Add USE_LOCKMGR_PRIORITY switch to enable new priority management +- Add Macro BTHREAD_MUTEX_PRIORITY(p) and lib/mutex_list.h to manage locks +- Make pthread_mutex_init/destroy compatible with bthread_mutex_t +- Fix SD Vol+dev lock race bug + +08Dec09 +- Add init/destroy function for both p/bthread_mutex_t object +- Add new bthread_mutex_t object that check mutex priority order +- Add test for reload command +- More bstrncat doc +- Add bstrncat comments +- Fix bug #1431 about bad reload command. Still an object to free in this + case. +- Fix bug in bstrncat +- Temp fix for prune crash + +07Dec09 +- Update win32 testing +- Add new test for win32 +- Add cleandir option +- Add tool to edit configuration file remotely +- Add more sm_check debug +- Add memory corruption checks + +06Dec09 +- Fix CentOS/RedHat test in regress +- Add win32 regress helper +- Attempt to fix SQLite seg fault when listing fields +- Allow Dir to come up before use +- Fix python plugin to use the new get functions instead of the get_ functions. +- Fix seg fault in new AllowCompression code +- Use restore count from write_bsr +- Add some doc of Win32 stuff +- Update prototype.conf to have latest stuff +- Update techdoc + notes +- Add AllowCompression feature + +05Dec09 +- Remove debug +- Fix #1403 about windows directory attributes not well restored +- Change old get_Jobxxx to getJobxxx +- Fix Win32 build after adding Base Job code +- Fix backup-bacula-test +- Add new idea to kernstodo +- Improved database warning message + +04Dec09 +- Init lmgr in btape + +03Dec09 +- Disable SQLite2 code in cats.h +- Remove SQLite from configure +- Remove SQLite2 scripts +- Fix configure.in + +02Dec09 +- Remove JobMedia.Copy references +- bvfs: update bvfs_test +- update +- Remove brestore_ from bvfs table, use Job::HasCache instead of knownjobid + table +- Update po files +- Finally... the 100% of bacula spanish !!! ;-) +- Apply Postgres 8.4 SQL_ASCII fix +- Ignore bweb.conf in gui/bweb +- Apply manpage patches + +01Dec09 +- Fix backup-bacula regression +- Remove support for SQLite 2 +- Fix rpm spec files bug #1428 + #1420 +- Fix include MySQL libs to use shared object. Fixes bug #1427 +- Fix search to include 64 bit libs for --with-dbi option +- Change all log.d to logwatch in RedHat specs + +30Nov09 +- Add config doc in INSTALL + +23Nov09 +- Add another project to the projects file +- don't follow packagemaker permission recommendations +- OSX Installer: update README +- OSX installer: fix for a bug in 10.5 packagemaker, --id argument required +- OSX installer: switch to new pmdoc packagemaker format +- Add another project to the projects file +- Add new project + +22Nov09 +- Add error count to Fatal job errors + +21Nov09 +- Attempt to keep tls_shutdown from hanging + +16Nov09 +- Add completion for unmark + +15Nov09 +- Add Job and File Retention to Pool +- Remove version id +- Change some console.c socket calls into class calls +- Revert "Reduce diff output in weird-files-test" +- Reduce diff output in weird-files-test + +14Nov09 +- Fix bug #1367 by creating an empty query.sql file + +13Nov09 +- update +- Display only backups when running a new job +- Add list_backup, list_restore functions +- update +- Add optional type= argument to .jobs command. You can list only Backup, + Restore,... jobs. + +12Nov09 +- autocomplete restore mode + +11Nov09 +- update +- Fix segfault on basejob code +- Replace POOL_MEM by POOLMEM to avoid segfaults +- Fix base-job-test with Mysql +- Remove extra _ of _dbg_print_db func +- Update BDATE +- fix segfault on redhat4 + +10Nov09 +- Add base-job-test to regress +- update + +09Nov09 +- Optimize SQL to determine file selection with directories. It is enabled for + ascii path. + +07Nov09 +- Drop svn Id + +06Nov09 +- Add Solaris regress comments + +05Nov09 +- More debug when test fails +- Add SITE_NAME and EMAIL to cdash report +- Display a message if catalog max_connections setting is too low + +04Nov09 +- Apply James' conio patches +- Update po +- A small changes on bacula-es translate. Now, we go on ~90% !!! +- Fix double send of attributes introduced in 84aabba7cee82f0c1f6dae8882a2ee0bb26306ca +- Check pthread_mutex_lock return code in lockmgr + +03Nov09 +- Remove debug code in concurrent-jobs-test +- Remove debug code in encrypt-bug-test +- Remove annoying debug messages in four-jobs-test + +02Nov09 +- Increase width of ls size. Fixes bug #1409 +- Fix buffer clobber when editing SQL error +- Fix false zombie errors in FD during regression +- Merge branches 'master' and 'kaboom' +- Add -m option to DIR,FD,SD to dump kaboom output +- avoid orphan memory in testls + +01Nov09 +- tab-completion ok +- When using .help item=, we display only usage part +- Add bat to Win32 build and install +- Add more debug for zombie jobs +- Get regress bacula stop messages when debug on +- Fix SD DCR race condition that causes seg faults +- Stop regression tests after 30 minutes +- Commit configure +- Add more debug on failed source-addr regression +- Set use_libtool expicitly to yes so our other checks work for static tools. + +30Oct09 +- Rework bat so that Win32 version works better + +29Oct09 +- Make bat win32 build use /home/kern/bacula +- Make non-win32 build stop on failure +- Make win32/64 build stop on failure +- Remove definition of unused subroutine in ua_dotcmds.c +- Revert James' patch to simplify the Win32/64 build process + +28Oct09 +- disable debug in win32 revision +- update bat on win32 project file + +27Oct09 +- Small fix to Eric great patch for readline commandcompletion so it also + compiles on non gcc compilers. +- Add readline completion for commands and argument + +26Oct09 +- Fix another messages error problem pointed out by Eric + +25Oct09 +- Disable messages-test in automatic +- Show an other deadlock in messages.c +- Change W/G to Wanted/Granted in lock dump +- fix multi-console problem for bat on win32 +- change Jmsg to Qmsg in low level routine +- Set tries to 1 for a non-timeout mount or unmount operation. +- Fix comment +- bat: change info order in Job view +- Use Qmsg instead of Jmsg when fides_mutex is locked in close_msg() +- simplify the link process for win32/win64 + +24Oct09 +- initialize lmgr in bat +- add messages-test to regression suite +- update tools with lmgr +- force lock manger init +- add lmgr to bconsole +- Fix deadlock detection to work in daemon mode +- Implement syslog on Windows +- Make all ABORT and ERROR_EXIT msgs go to syslog +- Fix ref to dequeuing_msgs + +23Oct09 +- Disallow Qmsg when despooling +- Fix Qmsg race condition reported by Eric +- add test for bad messages ressource +- bat: Display a nice graphic on media usage depending on the average size for + this mediatype. + +22Oct09 +- bat: add volume capacity estimation in mediaview +- bat: try to center InChanger bullet +- bat: Use green/red bullet for InChanger in mediview +- bat: Don't set Expire field unless LastWritten is ok + +21Oct09 +- Make new configure per Marco's request +- bat: add new Expired button in MediaView +- fix vtape test +- Redirect errors to /dev/null and only run uname -p on Linux where we know it + works. This is probably the most portable way of doing things. + +20Oct09 +- Add MediaView form that displays the Media list in a QTableWidget. It permits + to search and sort media. Rename current "Media" tab to "Pool" as it displays + Pools +- Permit to use MediaId or VolumeName in MediaEdit panel +- add StringList to handle locations, volstatus and mediatypes +- add dot functions to list volstatus, locations and mediatypes +- update bacula32.def +- Add .volstatus command +- Add new MediaView +- permits "update slot(s)" and "label barcodes slot(s)" command + +19Oct09 +- Don't use uname -i but uname -p +- Added support for selecting the correct libdir on Linux +- fix status slots command when SD slots are mixed +- Test only master now + +18Oct09 +- update fr.po +- Added acl and xattr regression tests for FreeBSD +- Use getfattr and setfattr on Linux not attr as that is a XFS tool which works + too but is not native. +- Don't enable SUN compiler by default and make the configdir configurable. +- Add test when ActionOnPurge=None Add test for update volume=xxx actiononpurge=yyy +- fix crash on exchange restore fail +- Update project 5 (ActionOnPurge) with current status +- chmod new regress script +- update regress + +17Oct09 +- Ensure that Ubuntu /var/run is created in init.d scripts suggested by nick + +- Implement Dan's suggestion to avoid IPv6 problems +- Add makeall for release +- On release, make only a simple tag +- Fix Windows build + +16Oct09 +- Don't download and build fakeroot as part of make -C platforms/osx +- More Win installer changes +- Update installer +- Do some additional checks. +- Add Marco's run regression wrapper script +- Possible fix for problem with Volume in another drive +- Make new configure from Marco's configure.in +- Try to unmount by running the unmount command specified in the config on file + and dvd devices. +- Fix switching read device reported by Nicolae Mihalache + +15Oct09 +- Lets free the variable in the right part of the loop. +- Use bfree_and_null +- Change bsnprintf to normal strncpy +- Add comments to fileregexp test +- Add more debug in filregexp test +- Eliminate compiler warning +- Fix bug #1391 Job status improperly set due to subtle variable overload + problem + +14Oct09 +- Add release scripts +- add regress variables to functions.pm + +13Oct09 +- Update date +- Fix stupid error +- Avoid error when git-version doesn't exist +- Convert some old %x into %p for pointers +- Fix #1352 about double free with regexp and big filenames on windows + +11Oct09 +- Small update to cache the current attrspace we are saving and make a private + copy of that string representation. +- Change acl and xattr errors from Qmsg to Jmsg +- Add comment +- Recursively call fix_device_block_write_error on error +- Fix changing read device during VirtualFull in SD +- Eliminate possible false error message +- Fixed ifdefs + +10Oct09 +- Add posix acls stored in xattrs on FreeBSD +- Removed last direct message output by replacing Qmsg by Mmsg + +09Oct09 +- Add comment on OpenBSD not supporting the extattr functions (anymore ?) +- Add new Feature Requests to projects file +- Fix ifdefs +- Change strcmp to bstrcmp +- fix bash/sh problem in disk-changer.in +- Fix some comments +- remove old db_get_int_handler() +- Add support when extattr_get_file is supported but extattr_get_link is + not. +- Regenerated config.h.in + +08Oct09 +- Implement transfer and listall command in mtx-changer and disk-changer + scripts +- Add new autochanger command to get all information about content + +07Oct09 +- As things are no longer as generic rename the generic to the linux interface + (b.t.w. OSX also implements this interface) +- Just as with serializing first unserialize all xattr structures in memory and + then walk that list for restoring them. +- delete the alist in the destroy function +- Implement the extattr xBSD interface Moved some code around to be able to + reuse the encoding and decoding for both the extattr and xattr interface Use + an alist for storing the xattr list so we don't have to count the number of + xattr first +- Use extattr interface for all xBSD filesystems. Added OpenBSD also to the + mix. +- As it seems FreeBSD doesn't have the llistxattr or listxattr interface as + Linux, OSX and NetBSD have but has an interface named extattr. This is a + first breakdown to implement that interface. + +05Oct09 +- Make Checksum as default when not using FileSet->Include->Options->BaseJob +- Change debug level +- Fix postgresql MD5 missing field +- Fix unwanted printout in strip test +- Fix concurrent Job recycle bug #1288 +- Fixed inverted logic +- Remove useless ScratchPool refs from Bat media list + +03Oct09 +- Little fixes for the action_on_purge command +- Remove obsolete truncate_on_purge settings +- Implement action_on_purge command +- Implement the ActionOnPurge setting for pools and individual volumes +- Implement truncate on purge setting +- Add a MaxVolBytes test that create over 100 Vols +- Add some error messages in insane buffer size cases +- Fix bug #1382 newly created disk volumes -> file not found warning + +01Oct09 +- Cause the tree widget selected to keep up with the tab widget selected + +30Sep09 +- Send checksum only when the FD will use it +- remove debug +- Allow for external CFLAGS to overwrite the CFLAGS in this script. + +29Sep09 +- Change grep -e to egrep and use -c instead of using wc. +- Implement console 'timeout' feature using bacula socket timers +- revert the update_bacula_table script change with bad indexes +- Add index modification in update_sqlite/sqlite3/postgresql scripts +- Remove the inx4 index for SQLite2/3 (FilenameId, PathId) on File table. + Replace it by an index over (JobId, PathId, FilenameId). + +28Sep09 +- Attempt to fix MySQL SQL error + +27Sep09 +- Make sure we use the right chmod for Solaris +- Fix Win32 build +- Fix compiler warning + +26Sep09 +- Remove gnome-console +- Update man pages +- Implement store_size32 and store_size64 +- Correct regression tests to use Pool 'File' + +25Sep09 +- Up max vol size +- Define File Pool and limit size to 5G +- Fix Alpha ifdefing -- should fix bug #1359 + +22Sep09 +- Remove the file_fp_idx index for Postgresql (FilenameId, PathId) on File + table. It is useless for common usage, and causes performance issues. This + index fragments over the time and the update cost grows very quickly. (This + index is not present on Mysql schema) + +21Sep09 +- update speed-test +- update speedtest +- speed-test: small fix + +20Sep09 +- Update to slightly newer config.guess and config.sub +- Fix compile error on Mac +- Generate new configure +- Reorder configure test for cleaner print output + +19Sep09 +- Implement support of keeping readall capabilities after UID/GID switch + +18Sep09 +- Remove redundant includes of pwd.h and grp.h headers +- find_one_file: Remove unreliable and redundant access(2) directory check +- Fix link rules +- Force nightly-disk regress to 3.0.3 + +17Sep09 +- speed-test: small fix +- speed-test: Add basejob argument to test basejob speed +- Add new test +- Fix conf to use make as requested by Martin Simmons a long time ago +- Restore comments +- Fix for sqlite, now it works +- Fix seg fault in ignoredir code +- more mkdir -p changes +- small fix +- Restrict Join using JobTDate to a selection of JobIds +- Fix for sqlite +- Make several dir names unique to avoid test collisions +- Fix and document new queries +- Use new query for Mysql and Sqlite for basejob + accurate +- update sql query for basefile+accurate +- Change regression scripts to use mkdir -p to avoid spurious errors +- Fix some job-start changes +- more job-restart code + +16Sep09 +- Fix sql query for mysql +- Fix sql query +- Fix sqlite3 syntax +- fix sql +- update db_get_file_list() to use StartTime instead of FileIndex + +15Sep09 +- Update restore menu 12 explanation + +12Sep09 +- Try to distinguish CentOS/RHEL + +11Sep09 +- Modify tools to be able to correctly install testls for regress +- Create bigendian() inline to simplify code +- Set Volume Poll Interval to 5 mins by default +- Apply Frank's slash patch + +10Sep09 +- Reduce runscript wait times +- Update Win32 README.mingw +- Add patch for more SD info fix +- Add patch for bug #1371 +- Fix seg fault in SD bug #1371 +- Add append log to tape SD tests +- Add more info when SD connection refused +- Fix possible termination problem + +09Sep09 +- Keep valgrind happy +- Fix Exclude Dir Containing ignored when scanning the top_level dir +- Update the new exclude-dir-test with multiple Include {} section and different + Exclude Dir Contain directive. +- Add patch for #1370 +- Fix #1370 about the implementation of the "Exclude Dir Containing" option on + FD. +- Add new exclude-dir-test +- Add patch for 3.0.2 release +- Fix #1369 about segfault when using ExcludeDirContaining before defining + Options{} block. +- Verify if the Exclude Dir Containing directive does his job +- Add a regress script for Exclude Dir Containing option + +08Sep09 +- Remove bigendian check from autoconfig system +- check endianness at runtime in md5.c +- Fix minor issues in the osx installer package builder. +- Fix -p/-P in bscan.8 man page + +07Sep09 +- fix compilation warning +- Make output of new commands .lsdir/.lsfile more usable. Fix Bvfs with new + db_list object +- Fix header defines +- Remove some #ifdef-ed code and make it just as the acl and xattr code use a + const bool to enable the code. +- Redone comments +- Move restore struct defintions to seperate include file. Small change to + acl.h and xattr.h to use define inline with other header files. +- Move finderinfo restore into seperate function so we can use cleaner coding + in the restore loop. +- Loose #ifdef and use const bool wrapper for some cleaner coding. +- Added support for acl and xattr regression test on Solaris +- Apply Marco's git format-patch patches for bugs #1365 and #1366 +- This patch should fix bug #1366 +- This patch should fix bug #1365 + +06Sep09 +- Commit Mac pathlen patch +- Increment minor version to avoid future conflict +- Fix bug #1368. Increase default path/file length to 2048 + +05Sep09 +- Add acl script code for Mac +- Up timeout for rewind and changer on btest script +- Try to fix zombies in next-vol-test + +04Sep09 +- Fix broken editing code +- Use edit routine instead of %f for rates +- Implement BlockChecksum in Device +- btape: Add speed command and test drive speed with Bacula blocks + +03Sep09 +- btape: Add speed command +- btape: Add speed information on qfill and raw command +- Commit patch for: Fix bug #1355 Director crashes with double free in Accurate + SQL query +- Include file,blk in btape fill output +- Create exit_code for fill with multiple tapes +- Fix #1364 and #1363 about compression buffer error. + +02Sep09 +- fix variable name +- Work on certification scripts +- Eliminate DDS-4 in favor of tape + create btape-test-changer test + +01Sep09 +- Many debug code fixes in regression scripts +- Attempt to fix SQLite3 seg fault in list routines +- Merge master with SF +- Update tape tests for hardware certification +- Rename .lsdirs/.lsfiles/.update to .bvfs_xxx to avoid confusion with future + commands +- Update tape tests for hardware certification +- Eliminate xattr.c compiler warning + +31Aug09 +- Prohibit copy and assignment in db_list_ctx +- Use new db_list_ctx class instead of bad POOLMEM* +- Exclude OSX resource forks from saving using the xattr code Exclude OSX acl + data from saving using the xattr code when normal acl mode is also enabled. + Make excluding certain xattr in the generic functions somewhat easier for + certain OS specific attributes. +- Fix Win32/64 build + +30Aug09 +- Remove old subroutine prototype +- prohibe copy and assignment in Bvfs module +- Fix bug #1355 Director crashes with double free in Accurate SQL query + +29Aug09 +- Fix bug #1357 Verify jobs fail when job has zero files +- Rework verify-voltocat-test and add to normal tests + +26Aug09 +- Release orphanned buffers in accurate code +- Fix possible seg fault in db_get_int_handler in accurate code +- Update JobBytes only for Verify jobs when computing checksum +- Add BaseJob option to fileset + +25Aug09 +- Suppress some error messages generated after cancelling a job + +24Aug09 +- Apply and commit Lorenz Schori patch for OSX +- Apply Lorenz' OSX patch +- update basejob code to use checksum during accurate check +- Close bug #1351 SQLite2 to SQLite3 conversion +- Remove unnecessary subroutine import definition + +23Aug09 +- Test of git commit. +- bat: fix variable name +- bat: Simplify the code to make TableWidget in read-only +- Free Volume in several places. Fixes virtual-changer problem and possibly bug + #1346. +- Add SD Volume debug code + +22Aug09 +- Don't print different filesystem. Will not descend message if directory + explicitly excluded +- capitalize bytes in message + +21Aug09 +- Prune a few dev tests +- Made shorter developers test +- Rework the bsock.h class to put public structures last +- Update AUTHORS file +- Integrate patch for building dmg on OSX from Lorenz Schori +- Add commas in num files for estimate command + +20Aug09 +- in status slots command, unlock db just after sql query + +19Aug09 +- Add BWEBCONF env variable to specify the bweb.conf path without touching the + Bweb.pm file with lighttpd +- Fix bat crash due to alignment diff in bat and core code +- Fix acl-xattr-test by using attr +- Add BWEBCONF env variable to specify the bweb.conf path without touching the + Bweb.pm file with lighttpd +- Ensure that timestamp put in SQL log + +15Aug09 +- prevent status page from requesting status before it has been brought to the + front +- Move global acquire lock to lock by device +- Restrict acquire to one job at a time +- Fix acquire.c locking +- Try to fix acquire not to block during despooling +- Add fullname parameter to btime in order to debug regress script. +- No need to flush the members of the xattr link cache as they are not dynamically + created. Small oversight from my side, was thinking I had a memory leak. +- fix couple of segfault in acl/xattr code +- Eliminate git pull in config_dart +- Some small fixes to the counting and some small relayout of small code + sniplets. +- Lets first output statistics before freeing them. Saves a rather stupid + segmentation fault. +- Fix merge conflict leftover +- Add all acl and xattr related variables which are either global or already + part of the JCR into a simple structure and reference that from the JCR + structure. + +14Aug09 +- Some small fixes to the counting and some small relayout of small code + sniplets. +- Lets first output statistics before freeing them. Saves a rather stupid + segmentation fault. +- Ignore regress time.out file +- Fix merge conflict leftover +- Add all acl and xattr related variables which are either global or already + part of the JCR into a simple structure and reference that from the JCR + structure. +- fix compilation problem reported by Dan +- Correct spelling of writing -- reported by Dan +- Loose void pointer and use C++ supported declaration elsewhere defined + structure. +- Move global variables into jcr private xattr data structure so we can have + parallel saves of xattr on solaris. + +13Aug09 +- update lock manager to get better traces +- Fix files wiped out by merge +- Missed commiting one file last time +- bat: Add a re-run button on job info page, that allows to run the selected + job with the same properties (level, pool, etc...) +- bat: small fix on hrule +- fix missing copyright + +12Aug09 +- Make new big-virtual-changer test. Test concurrency +- fix offset in new lsdirs/lsfiles command +- add comments and list special dirs in .lsdirs +- Add .lsfiles, .lsdirs, .update command to interface user with bvfs object +- More comments +- Update virtual regress comments +- bat: fix sql with volreadtime volwritetime bat: add drive=0 in update slots + command +- bat: Put the content panel as a child of the Storage item bat: use slot + selection in update slots command and in label command + +11Aug09 +- bat: fix volreadtime/volwritetime and messages in stderr about unknown + slots +- Change backup-acl to acl-xattr-test and make it skip if acl/xattr not installed +- Use tmp instead of /tmp + fix strip-test so diff works +- Make regexwhere use tmp rather than /tmp +- Remove non-portable i option on sed + create tmp dir +- connect button callback +- display content on doubleclick only if storage and autochanger +- bat: Make new autochanger content working + +10Aug09 +- bat: display a Warning icon when having Errors>0 and Status=T +- Pull Philipp Storz' bacula.spec changes for OpenSuSE build service +- try to make content table work!! +- Implement MaximumConcurrentJobs for SD devices +- try to insert data on table content +- update content interface + +09Aug09 +- continue storage content panel +- Fix bug #1344 show pool displayed wrong variable for maxvolbytes +- Fix compiler warnings in acl and xattr code +- Fix screw up with setting JobLevel and JobType +- Change version +- Fixed references to xattr_link_cache_entry to use xattr_link_cache_entry_t +- Changes due to code review by Kern added. +- Move enum from protos.h to filed.h and move defines to top of filed.h +- Added some more ENOENT supressing. +- Small indent change +- Small change to comments +- Added comment on entry points +- Added missing ENOENT switch case. +- Fix problem with counting xattr and returning a non-ok on count 0, also added + some extra comment and make sure the xattr_value_list is initialized to NULL + and checked on bail_out. +- Loose default_stream parameter. +- Changed snprintf to bsnprintf +- Use bstrncpy instead of strncpy +- Fixed Jmsg4 to Mmsg4 +- Added support for counting xattr errors and only print a limited set of + errors but count all. The lower level functions now store there error message + in jcr->errmsg so the upper level routines have access to them. +- Added support for counting acl errors and only print a limited set of errors + but count all. The lower level functions now store there error message in + jcr->errmsg so the upper level routines have access to them. +- Small changes to ifdef layout. +- Drop unneeded target for libbacfind.a +- Moved the berrno as a local variable of the function and not a local context + variable. Same as previous change for acl.c +- Make sure it compiles and moved the berrno as a local variable of the function + and not a local context variable. Moved tri-state enum to protos.h +- Moved list of supported xattr to start of per OS implementation like in + acl.c +- Imported patch from older git tree. +- bat: init storage content view + +08Aug09 +- Fix cats.h date +- bat: display a Warning symbole when having Errors>0 and Status=T +- Add data end tracking an attribute spooling +- Fix compiler warning in bvfs code +- Use 64 bit ftell and fseek in send_bootstrap_file +- Turn of some debug code +- Ignore src/tools/bvfs_test + +07Aug09 +- bvfs: Add example to list files versions bvfs: Fix directory listing bvfs: + Add limit/offset implementation to save resources on director bvfs: Create + cache tables on the fly when using Bvfs object (for testing) +- Bvfs: Create cache tables when updating the cache if they don't exist + +06Aug09 +- fix makefile for bvfs_test +- add option to truncate cache table during startup for bvfs_test +- add user handler to print directory add -j, -p to bvfs_test tool +- Document FT_DELETED FileIndex=0 special value in database Schema +- Add the rest of files for new Bvfs +- Add a new Bvfs class that implements brestore instant navigation cache inside + Bacula. Works for Mysql, Postgresql and Sqlite3 +- bat: fix compiler warning for unreferenced argument +- fix mysql case problem in bweb/bresto +- Use the sqlite3_changes to implement sql_affected_rows() +- remove QFormLayout, and use QGridLayout instead + +05Aug09 +- in bweb, fix warning +- add a default bconsole command to bweb starthttp script. + +04Aug09 +- remove Page Selector keyword from main bat screen +- fix small pb in bweb starthttp script +- document bweb+lighttpd +- update display_log to display a message if no logs are found +- Apply patch for regress from Frank Sweester +- connect button from mediainfo to real actions +- bat fill mediainfo fields +- bat: Go to the media info panel when double-click on job page or media + list + +03Aug09 +- Add new mediainfo panel to bat +- update windows bat.pro +- small fix for bresto +- update doc +- don't reset bstat, rstat, dstat in stop_bacula +- add test for list basefiles and new list files +- don't reset bstat, rstat, dstat in stop_bacula +- add list basefiles command make list files compatible with basefiles +- fix the basefile catalog update +- work on stats, fix bug with batch connection +- enable FileSet { Options { accurate = 'pm5' } } +- add variables for stats +- display a message with basejobids +- stop copy/migration using basejobs +- replace free&NULL by free_and_null +- fix +- display base jobs during restore +- fix segfault +- check size also replace mtime/ctime by lstat field +- check for purge job +- purge basefiles at the same time than files +- make regression ok +- use batch connection +- fix update_job_end +- update HasBase in catalog +- remove some debug +- seems to work:!!! +- make base backup work +- update +- init base file +- compile +- fix sql for postgresql +- make sql part +- take ideas from previous try +- Remove useless title +- fix layout, now ok +- remove list volume menu from joblist +- make double click work with joblist +- add refresh and delete event +- fix script name + +02Aug09 +- Remove old sqlite3 build from bacula.spec +- Move bat from bacula.spec to bacula-bat.spec +- Remove installing gconsole start script from Makefile.in +- Split mtx into its own spec file +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat and mtx to their own spec files +- Remove docs from spec +- Split docs into bacula-docs.spec +- Remove installing gconsole start script from Makefile.in +- Split mtx into its own spec file +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat and mtx to their own spec files +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat from bacula.spec to bacula-bat.spec +- Remove installing gconsole start script from Makefile.in +- Split mtx into its own spec file +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat and mtx to their own spec files +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat from bacula.spec to bacula-bat.spec +- Remove installing gconsole start script from Makefile.in +- Split mtx into its own spec file +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat and mtx to their own spec files +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat from bacula.spec to bacula-bat.spec +- Remove installing gconsole start script from Makefile.in +- Split mtx into its own spec file +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat and mtx to their own spec files +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat from bacula.spec to bacula-bat.spec +- Remove installing gconsole start script from Makefile.in +- Split mtx into its own spec file +- Move bat and mtx to their own spec files +- Remove docs from spec +- Split docs into bacula-docs.spec +- Move bat and mtx to their own spec files +- found a bug. Joblist page would not come up unless dockPage was called when + page was opened with initial paramaters. + +01Aug09 +- Remove docs from spec +- Split docs into bacula-docs.spec +- Add bacula-docs.spec.in +- Add new form to see job details +- Add bacula-docs.spec +- Remove bacula-docs.spec +- Split docs into bacula-docs.spec +- Remobe bacula-docs.spec +- Split docs into bacula-docs.spec +- missing less than beforehistory | grep svn :D + +31Jul09 +- Split docs out of bacula.spec +- Insert the tabbed widgets as opposed to adding to the end. +- Don't dock the page for dirstat and restore tree until the page selector tree + widget is clicked. +- I like this sorting method better. +- Update bacula.spec +- Apply quick fix from Ulrich about the last feature. +- Continue work on bacula.spec + +30Jul09 +- rename the free_and_null macro +- fix test name +- update new features +- chmod +x regress/tests/multi-storage-test +- Allow restore from multiple storage +- Update News +- update tool +- Modify enable/disable commands to show only appropriate Jobs. + +29Jul09 +- Add ACL check for client in estimate command +- Change time_t by utime_t in accurate function +- Start reworking 3.0.2 bacula.spec file + +28Jul09 +- Display the job_zoom view when backup is finished + +27Jul09 +- Add DESTDIR to Makefile +- Add file to build html doc +- add changelog link +- Add changelog on the main menu +- update doc +- add downloads pages for all lang +- Remove the Patch section and add the new Download area +- Link to new downloads.php +- Add tool to convert nasty sourceforge download page to a simple one +- Update +- Update README for binary install tests + +25Jul09 +- change free_and_null function to a macro +- Add Spanish files translated + +24Jul09 +- Add format to a fprintf +- Attempt to fix SQLite seg fault problem +- Fix Expired option in media list to select Used media +- Set selection items on top. +- this should resolve issues that people are having with this splitter. If you + try to make it too large, it just snaps back. + +23Jul09 +- Fix int/int32_t problem in accurate_add_file +- Permit to filter on the directory name as for file +- fix bad english sentence +- add function to simplify free and null operation on pointers +- Remove Qt 4.4 code so it compiles on 4.3 (setHeaderHidden) + +22Jul09 +- Apply idea of part of Graham's tidy-bsr-source.patch, but modified +- Fix bug #1337 Console tries to build with SSL when libssl-dev not installed +- Modify setJobStatus so cancel has same priority as fatal errors +- Fix Solaris compiler warning in signal.c +- Change selection behavior in the director status screen. +- Do not restore spliiter settings if the settings do not contain settings for + that splitter. +- This is committing most of the patch received from Eric. + +21Jul09 +- Merge branch 'bwebII' +- use $conf, $tmp, $script, $rscript in accurate test +- Fix Spanish files translated + +20Jul09 +- es.po merged with bacula.pot 3.0.2 + +19Jul09 +- This is the change that may be used in the future to resolve the issue with + text input required during a console communication. +- Final changes +- Add es lingua +- Fix Spanish build +- Final changes +- Add first cut bsys spec +- update bweb INSTALL +- update INSTALL + +18Jul09 +- Add info on bweb + lighttpd +- Fix bat command line input bug +- Don't show the connection message after first connection is made + +17Jul09 +- update copyright year +- Optimize a bit the running job view +- Fix bresto file relocation box +- Update copyrights +- Put back old status icon +- Fix sql query for sqlite on suse10 on copy job +- Fix Spanish files translated + +16Jul09 +- Remove print and save menu +- Simplify the main screen, removing the status dir console icon and the "go + back" one +- fix spell +- fix case problem in field name +- update for status slots +- Replace info_msg by send_msg in status_slots for bat +- update ChangeLog for 3.0.2 +- document @help +- Ignore TAGS +- +x the test file +- test with file instead of vtape +- Add test for bug 1288 +- Add slot field in storage operations Update update volume page +- Use BUILD_DIR in default conf +- Fix #1323 about a problem when mounting a requested volume during a restore. +- Use the read as primary color in bweb +- Fix script for vtape tests +- Fix graph_font parameter to work with graph module Avoid warning messages in + apache log with graph module +- Update +- update INSTALL file +- update apache section in INSTALL file and try to choose better color for + graphs. +- Fix Spanish files translated + +15Jul09 +- Parameterize depkgs directory +- Add back bootstrap to FD +- Make CONF::init header and .c file agree about types +- update +- update cloud project +- Force the client_encoding to SQL_ASCII when database is already using this + mode. +- Fix #1335 about postgresql error message during copy session +- Add documentation about new restore menu +- remove non-essential directories +- Fix Win64 build +- Add more example SD Device configurations + +14Jul09 +- Apply patch in bug #1315 by McMichaeli that fixes scripts/logwatch +- Add more output when spooling and no space left +- fix next-vol-test +- Fix postgresql driver bug that displayed rows from time to time. +- Small fix on accurate code for new restore menu option +- Change default save dir to sbindir in bacula-dir.conf +- update project files for project 2 +- Should fix #1323 +- Implement project 2, about new restore menu +- Update date +- Remove some whitespace +- Send bootstrap directly from DIR to SD + +13Jul09 +- add -u to diff +- revert changes +- Update new features +- Create build scripts for Win64 somewhat equilavent to the Win32 ones + +10Jul09 +- Print correct JobId in bls, should fix #1331 +- Rebuild configure +- Apply python detect patch from Bastian Friedrich +- Add --with-hostname to ./configure + +09Jul09 +- More changes in es.po (37%) +- add --with-hostname to ./configure +- Changed ACL_OTHER into ACL_OTHER_OBJ as IRIX doesn't seem to have ACL_OTHER. + Fixes bug #1333 + +06Jul09 +- Remove non-portable reference to pthread mutex +- Display command output by default +- Add new Spanish files translated + +05Jul09 +- Turn off bat debug code +- Fix compilation bug +- Small change to allow the compiler to optimize it easier due to the constant. + +04Jul09 +- Add new Spanish files translated + +03Jul09 +- Correct registered trademark notice +- Should fix the first part #1323 about the restore option "List Jobs where a + given File is saved" wich display deleted files + +02Jul09 +- Add estimate accurate doc +- add accurate=yes/no test for estimate +- Update help page with accurate=yes/no +- Add estimate accurate=yes/no ebl Change the code to check jcr->accurate and + not jcr->job->accurate +- Add estimate test +- Make estimate command accurate compatible. Should fix #1318 +- Fix es concepts compile problem +- Update for Spanish +- Another fix for bug #1311 to get the correct last_full_time +- Add new Spanish files translated +- Update +- Add base of Spanish manual +- Add directory to Spanish docs; Spanish (es.po) 25% translated + +01Jul09 +- Fix false zombie detection in next-vol-test +- Add test with accurate mode (it shows the #1323 bug) +- Fix bug #1317 Allow duplicate jobs = no does not work +- Eliminate double job report when do_xxx_init() returns failure +- Fix regress tests to handle new default job name +- fix copy-volume-test +- update Item 2 +- Add debug code to MaxDiffInterval + +29Jun09 +- Change bacula-dir.conf default job name from Client1 to BackupClient1 +- Document #1318 about estimate and accurate mode + +28Jun09 +- Add a column for job type to the jobs table +- Fix missing case for NetBSD xattr restores. + +27Jun09 +- Fix Win32 build -- turn off lockmgr and remove lockmgr defs +- Added symbols required for win32 build + +25Jun09 +- Modify xattr.c and acl.c not to fail the job on errors. This should fix bug + #1305. + +23Jun09 +- Fix 2 rather big bugs in the xattr and acl code and fix a small memory leak + on a particular code path for Linux xattr/acl handling. +- update newfeature with project 37 +- Update FileSetId when initializing job. +- fix compilation problem +- add a note about /sys/class/scsi_tape/nst0/default_compression, fix #1314 +- Add '*' when volume is online when displaying volume list in restore. Should + complete project 31. + +21Jun09 +- remove bnet_fsend and use class call +- Re-fix bug #1311 if MaxDiffInterval exceeded ensure job upgraded + +20Jun09 +- Fix bat install for 3.x + +19Jun09 +- Add a boolean to console class to allow for the warning popup, except in + cases where I want to prevent the popup. So far this is only for purging + jobs. +- Correct a misspelling in a relatively trivial spot. +- Add job type and first volume of the job to the 4th pane the version table. + This is so that when looking at what version to restore with copy jobs the + user can make a better decision as to which job to restore from + +18Jun09 +- Back out previous maxdiff patch, which is broken +- Add all Job Types to job_type_to_str() for bat +- Commit maxdiff patch +- Fix bug #1311 if MaxDiffInterval exceeded ensure job upgraded + +17Jun09 +- Fix bug #1305 make errors obtaining acl during backup non-fatal +- Commit btape fill patch +- Fix bug #1309 inappropriate error message during btape fill command +- Fix bug #1307 AllowHigherDuplicates=no prevents automatic job escalation + +16Jun09 +- Update VirtualFull doc + BSys course dates + +14Jun09 +- move 3.0.0 patches to 3.0.x dir +- Add 3.0.x patches dir +- Update with 2009 Vote +- Forgot to add the images for the new button. +- Add a previous page button to the button bar. + +13Jun09 +- Add feature in pages to have a console command that does not set the console + current. Then also use that feature in joblist to purge or delete a job. +- Add a stringlist and a foreach after populating to at least acknowledge to + the user that a new fileset. Bat would not show the fileset until the database + table had the fileset which was not until used. + +12Jun09 +- Remove non-portable code referencing pthread_t fixes bug #1308. +- Remove non-portable code referencing pthread_t +- Create patch that may fix bug #1298 and bug #1304, which causes an SD crash + after canceling a job. + +11Jun09 +- When looking at what got backed up from a windows differential, found a bug + where it would create many "Bases". This fixes it subtly. + +09Jun09 +- Add checkbox widgets to filter out copy and migration jobs. + +08Jun09 +- Attempt to get bat conf file installation to work with DESTDIR +- Commit migrate patch for bug #1303 + +05Jun09 +- Improve error messages when a migration sql query is used and correct the + problem identified in bug #1303 with starting Job names containing spaces. +- Fix #1306 when building static bconsole + +04Jun09 +- Did not intend to leave those debugging lines there. +- fix vtape test to work with the latest trunk +- Reorder project file with poll result + +03Jun09 +- update + +01Jun09 +- Setting actions up like this I think is better. No restore from job or time + if more than one job selected. +- Add columns for first media and volume count. Change some semantics of how + signals execute functions when a selection list is changed. +- Prevent warnings from showing as dialog boxes that can interupt multiple + operations performed in a foreach. + +31May09 +- Sort the lists after population so that drop downs are sorted. + +30May09 +- A fix for mysql database sql syntax. +- Add columns to the tree to show read/write time, scratch pool, recycle + count. +- Somehow I was losing the refresh jobs action. This should keep it in. + +29May09 +- untabify +- Add ids on command.tpl to fix #1300 + +28May09 +- Correct incorrect placement of trap for m_firstpopulated. +- There was an issue with empty directories. The director would print node xxx + has no children and bat would interpret that as a directory. This fixes it + but it may not be the best solution. +- update + +27May09 +- Make sure that vtape directory is created during -tape tests ebl change grep + -e to grep -E to make source-addr-test working as expected +- Simplify mysql bresto part. Should be faster, but need tests + +26May09 +- On vacation I am having a little fun !! +- Add documentation for new directives DirSourceAddress and FDSourceAddress +- Apply Steve Polyack patch to add DirSourceAddress and FDSourceAddress directives. + That permits to choose the outgoing interface. +- make fileregexp-test working with git repository + +25May09 +- Apply Marco's patch: mvw Allow acl and xattr to be explicitly enabled and + fail the configure if we are asked to enable acl or xattr support and the OS + doesn't support acls or xattrs. +- Add more Spanish site modifications + +24May09 +- Commit changes for Spanish site + +23May09 +- Create Client record in database at startup -- makes bat work better +- Disable scratchpool-pool-test because it hangs +- Rework printed message when files are purged on restore +- Turn off useless End of file message during restore. +- When doing a tree selection restore, look at the PurgedFiles column in the + first JobId, and if non-zero, the Job was purged, so do not do selection +- Yet another try to get qmake to install bat correctly + +21May09 +- Add Catalog = all to the default Messages resource + +20May09 +- Add Spanish directory +- Fix race in director job start which was allowing the number of concurrent + jobs to exceed the maximum + +19May09 +- Comment out broken fake-autochanger change +- Speed up media list generation when using "compute with directories" option +- fix dates +- update +- Fix #1029 about resolution address order. Thanks to David. +- Add patch for #1029 +- During jcr destruction hold jcr_chaing lock only for minimum time necessary. + This should fix the SD deadlock in bug #1287. +- Simplify messages printed by SD when reserve fails. This should fix bug + #1285 +- Add sleep in vtape changer to simulate real hardware + +17May09 +- speed up mysql to compute restore table ebl speed up mysql when computing + media list ebl fix media list display ebl fix bresto problem with drag&drop + +16May09 +- Fix libxml dependency for rh7 per Pasi Kärkkäinen . +- Create archivedir + +15May09 +- Back out unwanted change. Should fix problem reported by Yuri +- add index on temporary table to speed up mysql part during restore +- Fix display of / on bfileview +- Yet another attempt to get qmake to generate valid Makefiles that installs + the binaries. It seems to require the binary to exist at qmake time +- Apply fix to sql_cmds.c suggested by Ulrich Leodolter which prevents restore by file selection from using Copy jobs. +- fix drag&drop + +14May09 +- Add new nagios_plugin_check_bacula.tgz from Masopust, Christian +- Reduce bconsole help to fit in 80 columns kes Add bconsole @help command kes + Fix Show FileSet command to handle spaces +- Allow specification of base daemon resource name. --with-basename= +- Fix bat to automatically use installed bat.conf +- bat not installed even if configured. Fix by working around apparent bug in + qmake + +13May09 +- update +- Turn on lockmanager when using DEVELOPER flag + +10May09 +- note that vtape is not ready for production +- remove unused file +- More scripting stuff + +09May09 +- More scripting parameterization +- More parameterization of scripts +- First cut parameterize better regression tests + +07May09 +- fix mysql problem with the restore query +- rename JobHistory to JobHisto + +06May09 +- update os table +- ebl update german version +- update supported os table +- change the ugly grey background on symbole and images to white +- small fix + +05May09 +- ebl Make working the "compute with directories" option --This line, and those + below, will be ignored-- + +04May09 +- Updates +- add screenshots +- Add screenshot and update french main page +- add screenshots +- update +- Add new images +- Fix ldconfig problem in client only build. +- don't display copies like in bconsole +- Added extra logging to determine the exchange server in use + +03May09 +- Fixes for client build. +- Update main page +- Update +- update +- Only define winapi's for 32 bit build. Already defined for 64 bit apparently... +- Change GetComputerName to GetComputerNameEx so that in a cluster we get the + name of the vserver instead of the local machine. Added prototypes for + GetComputerNameEx which appear to be missing. + +02May09 +- 3.0.1 +- Don't run backup at same time as restore to avoid deadlock in concurrent + test + +01May09 +- update +- Update documentation + +30Apr09 +- Ran into a problem deleting a volume. This fixes it. +- Update News +- Fix purged restore prompting +- Final changes + +29Apr09 +- update +- Add bypool option +- add bypool option +- Fix error message +- Fix bug #1282 Setting job.Priority in python crashes director by checking if + string addr is NULL. Not tested. +- Fix bug #1281 allow all on restore command line to restore pruned JobIds + without prompting. +- Add vectorized bat image + +28Apr09 +- Check for job_canceled() in fd_plugin code +- Update Win32/64 table creation to have new DB version 11 format +- Remove illegal Options in Exclude of default Win32/64 bacula-dir.conf +- This is the fix to http://bugs.bacula.org/view.php?id=1276. The select class + was just not working with the new requirements of setting and clearing the + notify. + +27Apr09 +- Fix bug #1274 where a migration job can be canceled like the original job by + the MaxRunTime directive. +- Added fix for bug #1275 where acl or xattr data is saved for virtual filenames + generated by filed plugins. + +26Apr09 +- Set slot max to 60 +- Permits to eject Used tape +- update ScriptAlias +- Remove Reposition info message +- Fix platform scripts not to clean configured files during 'make clean' use + 'make distclean' to clean everything. + +25Apr09 +- Add build tag for distribution name. + +21Apr09 +- Fix copyright + trademark name +- add doc for new VerId option + +20Apr09 +- Fix suggested in bug 1273 + +16Apr09 +- Add additional mysql connection debug code +- Fix bug #1246 Sometimes access denied with VSS enabled. UCS conversion cache + was not properly flushed at the end of a Job + +15Apr09 +- Update +- add missing BACULA define + +14Apr09 +- update +- Fix bug #1268 Full Max Run Time cancels jobs (when Max Run Time = 0). +- Ignore scripts/bacula-dir.conf.maxruntime +- revert pool changes and correct the problem + +13Apr09 +- Add new maxruntime test -- not yet working +- more fixes about pool=defaults +- more fixes +- fix more pool problem +- fix pool problem + +12Apr09 +- specify pool in label process to avoid problem +- use scripts/bacula-dir.conf instead of bin/bacula-dir.conf +- Add debug message when checking database encoding +- Test postgresql encoding at the start of the test + +11Apr09 +- Modify insertion of read Volumes in SD to be done before the drive reservation. + This ensures that a Volume to be read will not be reserved for writing. +- Clean old patchnotes +- Turn developer back on + +13Oct09 +Release Version 3.0.3: +16Jul09 +Release Version 3.0.2: +29Apr09 +Release Version 3.0.1: +05Apr09 +Release Version 3.0.0: +28Dec08 +Release Version 2.4.4 +27Oct08 +Release Version 2.4.3 +26Jul08 +Release Version 2.4.2 +07Jul08 +Release Version 2.4.1 +04Jun08 +Release Version 2.4.0 +25Jan08 +Release Version 2.2.8 diff --git a/INSTALL b/INSTALL new file mode 100644 index 00000000..2dd8a48e --- /dev/null +++ b/INSTALL @@ -0,0 +1,77 @@ + +This file is rather out of date, and if you want to avoid a lot +of pain, you will read the manual, which you can find at www.bacula.org + + +-------------------------------------------------------------------------- +Using GNU autoconfig +-------------------------------------------------------------------------- + 1. Read the Compiling and Installing section of the HTML manual + at http://www.bacula.org + + 2. Run ./configure to generate config.h and the various Makefiles. + ./configure --help gives a list of possible options. Note, in + configuring Bacula, you cannot get by with a simple ./configure, + it is much more complicated than that (unfortunately). + + You might look at the "defaultconfig" file in the examples directory. + It is an example of what is probably reasonable for defaults. + + Some systems require unusual options for compilation or linking that + the `configure' script does not know about. You can give `configure' + initial values for variables by setting them in the environment. Using + a Bourne-compatible shell, you can do that on the command line like + this: + CC=c89 CFLAGS=-O2 LIBS=-lposix ./configure + Or on systems that have the `env' program, you can do it like this: + env CPPFLAGS=-I/usr/local/include LDFLAGS=-s ./configure + Or if you're using a non Bourne-compatible shell, you can do: + sh -c 'CFLAGS="-O2 -g" ./configure + + A typical Bacula development configuration for Linux is: + + CFLAGS="-g -O2 -Wall" \ + ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --enable-smartalloc \ + --with-mysql \ + --with-working-dir=$HOME/bacula/bin/working \ + --with-dump-email=your@address.com \ + --with-job-email=your@address.com \ + --with-smtp-host=localhost + + + See the README for a few additional details and the online manual + for all the gory details: http://www.bacula.org/rel-manual + + 3. Carefully review the output from ./configure. If it is not + what you want, re-run the ./configure. Often ./configure "caches" + things and thus remembers the last ./configure options. If you + want to be sure you are starting fresh after a ./configure, + do a: + + make distclean + + before re-running ./configure. "make distclean" wipes out any + knowledge of the ./configure, so don't do it after you have a + configuration that suits your needs, otherwise the "make" will + not work. + + 4. Set any other main preferences (normally, you don't do this): + Edit "config.h" if you didn't use ./configure options + If you're cross-compiling, edit the following in "config.h" + + 5. Build it (repeat step 2 as desired): + make + + 6. Install it + make install + + 7. Run it + cd $HOME/bacula/bin + ./bacula start + ./console + (enter commands) diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..022dd1ba --- /dev/null +++ b/LICENSE @@ -0,0 +1,786 @@ + + Last revision: 21 May 2017 + +Bacula is licensed under the GNU Affero General Public License, version +3.0 as published by the Free Software Foundation, Inc. ("AGPLv3"). + +Additional Terms on the work licensed herein, pursuant to Section 7 of +Affero General Public License are as follows: + + 1. The name Bacula is a registered trademark of Kern Sibbald, and Kern + Sibbald hereby declines to grant a trademark license to "Bacula" + pursuant to AGPLv3, Section 7(e) without a separate agreement with Kern + Sibbald. + + 2. Pursuant to AGPLv3, Section 7(a), in addition to the warranty and + liability disclaimers already found in AGPLv3, the copyright holders + specifically disclaim any liability for accusations of patent + infringement by any third party. + + 3. Pursuant to AGPLv3, Section 7(b), the portions of the file AUTHORS that + are deemed to be specified reasonable legal notices and/or author + attributions shall be preserved in redistribution of source code and/or + modifications thereof. Furthermore, when the following notice appears in + a source code file, it must be preserved when source code is conveyed + and/or propagated: + + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is conveyed + and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + +Additional Permissions on the work licensed herein, pursuant to Section 7 of +AGPLv3 are as follows: + +1. As a special exception to the AGPLv3, the copyright holders give + permission to link the code of its release of Bacula with the OpenSSL + project's "OpenSSL" library (or with modified versions of it that use the + same license as the "OpenSSL" library), and distribute the linked + executables. You must follow the requirements of AGPLv3 in all respects + for all of the code used other than "OpenSSL". + +2. As a special exception to the AGPLv3, the copyright holders give + permission to link the code of its release of the Bacula Win32 File daemon + with the Microsoft supplied Volume Shadow Copy (VSS) libraries and + distribute the linked executables. You must follow the requirements of + the AGPLv3 in all respects for all of the code used other than for the + Microsoft VSS code. + +If you want to fork Bacula, please read the file LICENSE-FAQ. + +The copyright for certain source files may include in addition to what is +listed above the following copyright: + + Copyright (C) 2000-2014 Free Software Foundation Europe e.V. + +The copyright on the Baculum code is: + + Copyright (C) 2013-2017 Marcin Haba + +Copyrights of certain "script" files such as headers, shell script, Makefiles, +etc ... were previously never explicitly defined. In almost all cases, +they have been copyrighted with a BSD 2-Clause copyright to make them +easier. However, as is the case of all BSD type copyrights you must keep +the copyright in place and on any binary only released the copyright notice +must also be released with the binaries. An example of such a copyright +is: + +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +It is equivalent to the full BSD copyright of: + +===== +Copyright (C) 2000-2017 Kern Sibbald +License: BSD 2-Clause; see file LICENSE-FOSS + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +==== + +Note: the exact form of the copyright (dates, name, and text formatting) +might vary, but the intent is the same, namely that the full BSD 2-Clause +copyright applies. The file LICENSE-FOSS has a few more details. + + +###################################################################### +The entire AGPL is below, in the manuals distributed with the Bacula +documentation and can also be found online on the GNU web site at +www.bacula.org. You may also obtain a copy of the AGPL (or LGPL) by writing +to: Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +02110-1301 USA + + + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/LICENSE-FAQ b/LICENSE-FAQ new file mode 100644 index 00000000..7911fb46 --- /dev/null +++ b/LICENSE-FAQ @@ -0,0 +1,135 @@ + + Bacula License FAQ + +Note: This file is not the Bacula Community (bacula.org) license, +which you will find in the LICENSE file. This is a FAQ about the +Bacula AGPLv3 and the other third-party licenses used in certain +source code files of the Bacula software. + + + Affero General Public License +History: +The original Bacula code was Copyright Kern Sibbald and John Walker. +After November 2004, it became Copyright Kern Sibbald, and finally, +the copyright was transferred to the Free Software Foundation Europe +on 15 November 2006. The license was changed from GPLv2 to AGPLv3 +on 24 July 2010. The copyright Fiduciary License Agreement signed +with Kern Sibbald and the FSFE 15 November 2006 was terminated as +of 3 March 2015. Code subsequent to 3 March 2015 is +Copyright, Kern Sibbald. + +Trademark: +The name Bacula is a registered trademark of Kern Sibbald. + +"Fair use" of the trademark is permitted following standard customs +that any prominent use (e.g. cover of a book) or the first use of +the name Bacula will include a trademark symbol. If you fork the +Bacula project and make any significant changes to the functionality +of Bacula, to avoid confusion between your fork and Bacula, you must +give your fork another name. You are not required to remove all +references to Bacula in the source code or "fair uses" in the +documentation. + +=================================== + +What follows is information from the authors of the code: + +License: +To the best of our knowledge, all code used in Bacula, which is +copyrighted by a third party, has licenses that are compatible +with the OpenSSL license, and so given the exceptions that we have +made to the AGPLv3 (in LICENSE), Bacula can be freely linked and distributed +with the OpenSSL libraries, and in binary form with the Microsoft +VSS libraries. + +Intellectual Property rights: +Recipient understands that although each Contributor to Bacula grants +the licenses to its Contributions set forth herein, no assurances are +provided by any Contributor that the Program does not infringe the +patent or other intellectual property rights of any other entity. +Each Contributor disclaims any liability to Recipient for claims +brought by any other entity based on infringement of intellectual +property rights or otherwise. As a condition to exercising the rights +and licenses granted hereunder, each Recipient hereby assumes sole +responsibility to secure any other intellectual property rights +needed, if any. For example, if a third party patent license is +required to allow Recipient to distribute the Program, it is +Recipient's responsibility to acquire that license before distributing +the Program. + +Copyrights: +Each Contributor to Bacula represents that to its knowledge it has +sufficient copyright rights in its Contribution, if any, to grant +the copyright license set forth in this Agreement. + +Other Licenses used in some source files. Note, this list changes from +time to time, so is not exhaustive: + +GPLv2 or later license: + src/tools/bsmtp.c + Copyright (C) 1997 Ralf S. Engelschall, All Rights Reserved. + (note, bsmtp.c does not use OpenSSL, nor is it used with the code + of any other part of Bacula) + +3 clause BSD License notice for inclusion with the binary: + src/lib/fnmatch.c + * Copyright (c) 1989, 1993, 1994 + * The Regents of the University of California. All rights reserved. + src/lib/fnmatch.h + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + +Permissive licenses: + Most Makefile.in, *.in, and script files are copyright BSD 2-Clause as noted + in each individual file. + + src/lib/crc32.c + Copyright (C) 2010-2015 Joakim Tjernlund + Under BSD 2-Clause + + src/lib/var.c/h + ** OSSP var - Variable Expansion + ** Copyright (c) 2001-2002 Ralf S. Engelschall + ** Copyright (c) 2001-2002 The OSSP Project (http://www.ossp.org/) + ** Copyright (c) 2001-2002 Cable & Wireless Deutschland (http://www.cw.com/de/) + + src/lib/bsnprintf.c + * Copyright Patrick Powell 1995 + + src/bregex.c/h + * Copyright (c) 1991 Tatu Ylonen, Espoo, Finland + + src/lib/sha1.c/h + Copyright (C) The Internet Society (2001). All Rights Reserved. + + src/win32/compat/getopt.c + "... licensed under IBM copyrights to use the IBM-provided source code + in any way he or she deems fit ..." + + src/win32/compat/sys/mtio.h (LGPL) + Copyright (C) 1996, 1997 Free Software Foundation, Inc. + + +Bacula can be enabled with data encryption and/or communications +encryption. If this is the case, you will be including OpenSSL code that +contains cryptographic software written by Eric Young +(eay@cryptsoft.com) and also software written by Tim Hudson +(tjh@cryptsoft.com). + +There are parts of Bacula that are licensed under the BSD 2-Clause so +that those files may be used in proprietary code to interface with +Bacula. + +Finally there are parts of Bacula that are in the public domain. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/LICENSE-FOSS b/LICENSE-FOSS new file mode 100644 index 00000000..6ed18585 --- /dev/null +++ b/LICENSE-FOSS @@ -0,0 +1,338 @@ + + FOSS Licenses used in Bacula + Update 21 May 2017 + +Note: This file corresponds to the Bacula Community (bacula.org) license. + +Trademark: +The name Bacula is a registered trademark of Kern Sibbald. + +"Fair use" of the trademark is permitted following standard customs +that any prominent use (e.g. cover of a book) or the first use of +the name Bacula will include a trademark symbol. If you fork the +Bacula project and make any significant changes to the functionality +of Bacula, to avoid confusion between your fork and Bacula, you must +give your fork another name. You are not required to remove all +references to Bacula in the source code or "fair uses" in the +documentation. + +Forking Bacula: +Bacula is Free Software/Open Source Software and as such you are +permitted to fork it and/or use parts of it. However, if you fork +Bacula or you use more than one or two lines of code from it, you +must respect the copyright which requires you to maintain the headers +containing the Copyright information intact on any files which you +use. If you take more than a couple of lines of code, you are required +to add the Bacula Copyright to your code (see the file: LICENSE or any +header of a file for what is necessary). + +If you have previously forked Bacula, please be aware that the license +on the current code has most likely changed since your fork, and you +must respect the new license. + +If you are thinking that this requirement to maintain the license intact is +unusual or not open source, I would like to point out that most open source +licenses require this. One simple example is the BSD 2-clause license, +which is widely used, and which Bacula uses for most of the script files. + +=================================== + +What follows is information from the authors of the code: + +License: +To the best of our knowledge, all code used in Bacula, which is +copyrighted by a third party, has licenses that are compatible. + +Intellectual Property rights: +Recipient understands that although each Contributor to Bacula grants +the licenses to its Contributions set forth herein, no assurances are +provided by any Contributor that the Program does not infringe the +patent or other intellectual property rights of any other entity. +Each Contributor disclaims any liability to Recipient for claims +brought by any other entity based on infringement of intellectual +property rights or otherwise. As a condition to exercising the rights +and licenses granted hereunder, each Recipient hereby assumes sole +responsibility to secure any other intellectual property rights +needed, if any. For example, if a third party patent license is +required to allow Recipient to distribute the Program, it is +Recipient's responsibility to acquire that license before distributing +the Program. + +Copyrights: +Each Contributor to Bacula represents that to its knowledge it has +sufficient copyright rights in its Contribution, if any, to grant +the copyright license set forth in the LICENSE file.. + +Notices: +The Bacula community version uses a certain number of files that have +FOSS (Free or Open Software) licenses. Many of these files come from and are +identical to the Bacula community code. Most 3rd party FOSS licenses require +no notification so they are not included here. + +However, the Bacula Community binary releases consist of +files, with few source files (mostly scripts), and some of the FOSS licenses +such as the BSD (Berkeley Software Development) require publication of +the copyright notices if the code is released in binary format. For example +certain copyright notifications are simplifed such as: + +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS + +This license corresponds to the following: + +===== +Copyright (C) 2000-2017 Kern Sibbald +License: BSD 2-Clause; see file LICENSE-FOSS + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +==== + +The release of binaires mentioned above is covered in point 2 (above) of +license. This file contains a non-exhaustive list of such licenses. + +Database scripts (src/cats): +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS + +Translations (po): +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS + +# Copyright (C) 2010 Inteos Sp. z o.o. +# Copyright (C) 2010-2017 Kern Sibbald +# License: BSD 2-Clause + +# Copyright (C) 2005-2006 Free Software Foundation Europe e.V. +# License: BSD 2-Clause + +Manpages: +This man page document is released under the BSD 2-Clause license. + +Update datbase (updatedb): +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS + +Scripts (scripts): +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS + +Platforms (platforms): +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS + +Libraries (src/lib): +fnmatch.c/h +/* + * Copyright (c) 1989, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Guido van Rossum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +sha2.c/h +/* + * FIPS 180-2 SHA-224/256/384/512 implementation + * Last update: 02/02/2007 + * Issue date: 04/30/2005 + * + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +bmtio.h +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mtio.h 8.1 (Berkeley) 6/2/93 + * $FreeBSD: stable/7/sys/sys/mtio.h 139825 2005-01-07 02:29:27Z imp $ + */ + +bregex.c/h + * + * Author: Tatu Ylonen + * + * Copyright (c) 1991 Tatu Ylonen, Espoo, Finland + * + * Permission to use, copy, modify, distribute, and sell this software + * and its documentation for any purpose is hereby granted without + * fee, provided that the above copyright notice appear in all copies. + * This software is provided "as is" without express or implied + * warranty. + +var.c/h +** OSSP var - Variable Expansion +** Copyright (c) 2001-2002 Ralf S. Engelschall +** Copyright (c) 2001-2002 The OSSP Project (http://www.ossp.org/) +** Copyright (c) 2001-2002 Cable & Wireless Deutschland (http://www.cw.com/de/) +** +** This file is part of OSSP var, a variable expansion +** library which can be found at http://www.ossp.org/pkg/lib/var/. +** +** Permission to use, copy, modify, and distribute this software for +** any purpose with or without fee is hereby granted, provided that +** the above copyright notice and this permission notice appear in all +** copies. +** + +lz4.c lz4_encoder.h lz4.h +====== +/* + LZ4 - Fast LZ compression algorithm + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + - LZ4 source repository : https://github.com/lz4/lz4 +*/ +=== + +crc32.c +===== +/* + Bacula® - The Network Backup Solution + + Copyright (c) 2010-2015, Joakim Tjernlund + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +==== diff --git a/Makefile.in b/Makefile.in new file mode 100755 index 00000000..af957e9d --- /dev/null +++ b/Makefile.in @@ -0,0 +1,224 @@ +# +# Master Makefile +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +@MCOMMON@ + +working_dir=@working_dir@ +dir_group=@dir_group@ +dir_user=@dir_user@ + +srcdir = @srcdir@ +VPATH = @srcdir@ +.PATH: @srcdir@ +topdir = @BUILD_DIR@ +thisdir = @BUILD_DIR@ + + +first_rule: all +dummy: + +# --client-only directories +fd_subdirs = src scripts src/lib src/findlib src/filed \ + @READLINE_SRC@ @BAT_DIR@ src/console @FD_PLUGIN_DIR@ + +# Non-client-only directores +subdirs = src/cats @DIRD_DIR@ @STORED_DIR@ src/tools + +all_subdirs = ${fd_subdirs} ${@ALL_DIRS@} manpages + +DIST = INSTALL README.configure configure Makefile Makefile.in ChangeLog + +DIST_CFG = autoconf/aclocal.m4 autoconf/configure.in \ + autoconf/config.h.in autoconf/acconfig.h autoconf/Make.common.in \ + autoconf/install-sh autoconf/mkinstalldirs + +doc_files = VERIFYING ChangeLog README ReleaseNotes LICENSE \ + LICENSE-FAQ LICENSE-FOSS INSTALL + +MKDIR = $(srcdir)/autoconf/mkinstalldirs +LIBTOOL_DEPS = @LIBTOOL_DEPS@ + +#------------------------------------------------------------------------- + +all: Makefile + @for I in ${all_subdirs}; \ + do (cd $$I; echo "==>Entering directory `pwd`"; \ + $(MAKE) DESTDIR=$(DESTDIR) $@ || (echo ""; echo ""; echo " ====== Error in `pwd` ======"; \ + echo ""; echo ""; false ) || false) || exit 1; \ + done + +depend: + @for I in ${all_subdirs}; \ + do (cd $$I; echo "==>Entering directory `pwd`"; $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1); done + +bacula-fd: Makefile + @for I in ${fd_subdirs}; \ + do (cd $$I; echo "==>Entering directory `pwd`"; \ + $(MAKE) DESTDIR=$(DESTDIR) all || \ + (echo ""; echo ""; echo " ====== Error in `pwd` ======"; \ + echo ""; echo ""; false ) || false) || exit 1; \ + done + +#------------------------------------------------------------------------- + +autoconf/aclocal.m4: autoconf/configure.in autoconf/bacula-macros/* autoconf/gettext-macros/* autoconf/libtool/* + +# Note, the following is needed in the above if ever any new macro is added. +# However, at the current time, the -I libtool causes the autoconf/aclocal.m4 +# get messed up, so this is commented out +# cd autoconf && aclocal -I bacula-macros -I gettext-macros -I libtool + +configure: autoconf/configure.in autoconf/aclocal.m4 autoconf/acconfig.h autoconf/config.h.in + cd $(srcdir); + ${RMF} config.cache config.log config.out config.status src/config.h + ${RMF} -r autoconf/autom4te.cache autom4te.cache + autoconf --prepend-include=$(srcdir)/autoconf \ + autoconf/configure.in > configure + chmod 755 configure + ${RMF} -r autoconf/autom4te.cache autom4te.cache + +config.status: + if test -x config.status; then config.status --recheck; \ + else $(SHELL) configure; fi + +autoconf/config.h.in: autoconf/configure.in autoconf/acconfig.h + cd $(srcdir); + ${RMF} config.cache config.log config.out config.status src/config.h + autoheader --prepend-include=$(srcdir)/autoconf \ + autoconf/configure.in > autoconf/config.h.in + chmod 644 autoconf/config.h.in + +libtool: Makefile $(LIBTOOL_DEPS) + $(SHELL) ./config.status --recheck + +installdirs: + $(MKDIR) $(DESTDIR)$(sbindir) + $(MKDIR) $(DESTDIR)$(sysconfdir) + chmod 770 $(DESTDIR)$(sysconfdir) + -if test "x$(dir_user)" != "x" ; then \ + chown $(dir_user) $(DESTDIR)$(sysconfdir); \ + fi + -if test "x$(dir_group)" != "x" ; then \ + chgrp $(dir_group) $(DESTDIR)$(sysconfdir); \ + fi + $(MKDIR) $(DESTDIR)$(scriptdir) + $(MKDIR) $(DESTDIR)$(docdir) + $(MKDIR) $(DESTDIR)$(archivedir) + $(MKDIR) $(DESTDIR)$(bsrdir) + $(MKDIR) $(DESTDIR)${logdir} + -if test ! -d $(DESTDIR)$(working_dir) ; then \ + $(MKDIR) $(DESTDIR)$(working_dir); \ + chmod 770 $(DESTDIR)$(working_dir); \ + fi + -if test "x$(dir_user)" != "x" ; then \ + chown $(dir_user) $(DESTDIR)$(working_dir); \ + chown $(dir_user) $(DESTDIR)$(logdir); \ + fi + -if test "x$(dir_group)" != "x" ; then \ + chgrp $(dir_group) $(DESTDIR)$(working_dir); \ + chgrp $(dir_group) $(DESTDIR)$(logdir); \ + fi + +install: installdirs + @for I in $(doc_files) ; do $(INSTALL_DATA) $$I $(DESTDIR)${docdir}; done + @for I in $(all_subdirs); do (cd $$I && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1); done + +uninstall: + @for I in $(all_subdirs); do (cd $$I && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1); done + +install-autostart: + (cd platforms && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1) + +install-autostart-dir: + (cd platforms && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1) + +install-autostart-fd: + (cd platforms && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1) + +install-autostart-sd: + (cd platforms && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1) + +uninstall-autostart: + (cd platforms && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1) + +uninstall-autostart-dir: + (cd platforms && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1) + +uninstall-autostart-fd: + (cd platforms && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1) + +uninstall-autostart-sd: + (cd platforms && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1) + +Makefile: Makefile.in + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + +Makefiles: + $(SHELL) config.status + (cd scripts; \ + chmod 755 bacula sbtraceback mtx-changer bconsole devel_bacula) + (cd src/cats; \ + chmod 755 create_bacula_database update_bacula_tables \ + make_bacula_tables grant_bacula_privileges drop_bacula_tables \ + drop_bacula_database make_catalog_backup delete_catalog_backup) + @for I in @DB_BACKENDS@ ; do \ + (cd src/cats; \ + chmod 755 create_$${I}_database update_$${I}_tables \ + make_$${I}_tables grant_$${I}_privileges drop_$${I}_tables \ + drop_$${I}_database); \ + done + (cd src/qt-console; \ + chmod 755 install_conf_file build-depkgs-qt-console) + +clean: + @for I in ${all_subdirs} ; \ + do (cd $$I; echo "==>Entering directory `pwd`"; ${MAKE} $@ || exit 1); done + @(cd platforms; echo "==>Entering directory `pwd`"; ${MAKE} $@ || exit 1) + @$(RMF) *~ 1 2 3 core core.* config.guess console.log console.sum + @$(RMF) examples/1 examples/2 examples/devices/1 examples/devices/2 + @$(RMF) -r autom4te.cache + @find . -name ".#*" -exec $(RMF) {} \; + + +# clean for distribution +distclean: + @for I in $(all_subdirs); do (cd $$I && $(MAKE) $@ || exit 1); done + @for I in $(all_subdirs); do (cd $$I && $(RMF) startit stopit btraceback); done + @(cd $(srcdir) && $(RMF) *~ config.cache config.h config.log config.status config.out) + @(cd $(srcdir) && $(RMF) Makefile autoconf/Make.common) + @(cd platforms; echo "==>Entering directory `pwd`"; ${MAKE} $@ || exit 1) + @$(RMF) bacula fd Makefile startmysql stopmysql startit stopit btraceback + @$(RMF) bconsole gconsole + @$(RMF) *~ 1 2 3 core core.* config.guess console.log console.sum + @$(RMF) working/* kerns-* + @$(RMF) -rf txt diff src/python src/testprogs + @$(RMF) libtool + +devclean: + @for I in $(all_subdirs); do (cd $$I && $(MAKE) $@ || exit 1); done + @for I in $(all_subdirs); do (cd $$I && $(RMF) startit stopit btraceback); done + @(cd $(srcdir) && $(RMF) *~ config.cache config.h config.log config.status config.out) + @(cd $(srcdir) && $(RMF) Makefile autoconf/Make.common) + @(cd platforms; echo "==>Entering directory `pwd`"; ${MAKE} $@ || exit 1) + @$(RMF) bacula fd Makefile startmysql stopmysql startit stopit btraceback + @$(RMF) bconsole gconsole + @$(RMF) *~ 1 2 3 core core.* config.guess console.log console.sum + @$(RMF) working/* + +distdirs: + mkdir ../$(VERNAME); + mkdir ../$(VERNAME)/autoconf; + @for I in $(all_subdirs); do (cd $$I && $(MAKE) DESTDIR=$(DESTDIR) $@ || (echo "Failed to make distclean in $$I"; exit 0) ); done + +distcopy: + $(CP) -p $(DIST) ../$(VERNAME); + $(CP) -p $(DIST_CFG) ../$(VERNAME)/autoconf; + @for I in $(all_subdirs); do (cd $$I && $(MAKE) DESTDIR=$(DESTDIR) $@ || exit 1); done + +distrib: configure autoconf/config.h.in distdirs distcopy + +# ------------------------------------------------------------------------ diff --git a/README b/README new file mode 100644 index 00000000..65b441d7 --- /dev/null +++ b/README @@ -0,0 +1,72 @@ + +Your best bet to get Bacula compiled and running is +to read the online manual at: + + http://www.bacula.org + +You will save yourself a lot of pain if you read +the manual -- it is online at the above site. +Barring reading the manual, you might try the +following: + +To Configure it: + + CFLAGS="-g -O2" \ + ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --enable-smartalloc \ + --with-mysql \ + --with-working-dir=$HOME/bacula/bin/working \ + --with-dump-email=your@address.com \ + --with-job-email=your@address.com \ + --with-smtp-host=localhost + + +Build Bacula: + + make + + If you are doing a port, there should be no errors. The most + likely source of errors will probably come in the src/stored + directory in time.c or dev.c. There may also be problems in + lib/signal.c as I currently pull in all Linux signals, some of + which may not be available on your system. + +To create the database: + + src/cats/make_bacula_tables + +To install: + + make install + +To start it: + + cd $HOME/bacula/bin + ./bacula start + +To start it (as a developer). This is not appropriate if you +are installing Bacula. + + ./bacula start + ./console + +To stop it: + + ./bacula stop + +Well, it is all just a bit more complicated than that, +but you should have the idea. + +Be sure you read the chapter about testing your tape drive! +Don't waste a lot of time setting up and running Bacula, if +your drive is not supported. + +FreeBSD users, pay special attention to the Device configuration +parameters needed to deal with FreeBSD tape drivers. + +License: +Please see the file LICENSE for the terms of the license. diff --git a/README.AIX b/README.AIX new file mode 100644 index 00000000..bcb767a7 --- /dev/null +++ b/README.AIX @@ -0,0 +1,56 @@ + +Here are a few tips from James MacLean on +making Bacula work on an AIX system -- 15 Mar 2003: + +Hi Folks, + +I expect that an AIX with all the latest patches and a proper gcc will +compile. It is a boring story as Kern knows :), but here are the +highlights todate. + +I have had success with : + +/usr/local/bin/gcc -v +Reading specs from /usr/local/lib/gcc-lib/powerpc-ibm-aix4.1.5.0/egcs-2.91.60/specs +gcc version egcs-2.91.60 19981201 (egcs-1.1.1 release) + +Which was installed from www-frec.bull.com. But with that release, it +collides with sys/types.h from AIX and I had to redefine on my own in +src/bacula.h : + +typedef int crid_t; +typedef int __daddr_t; +typedef unsigned int class_id_t; + +With that I have had all parts of bacula running and did backups and a +restore fine. + +So I then went to IBM's own (current) gcc : + +/opt/freeware/GNUPro/bin/gcc -v +Reading specs from /opt/freeware/GNUPro/lib/gcc-lib/powerpc-ibm-aix4.3.3.0/2.9-aix51-020209/specs +gcc version 2.9-aix51-020209 + +from http://www-1.ibm.com/servers/aix/products/aixos/linux/download.html + +And found that it was not creating libbac.a correctly. Since then I've +been trying to compile my own gcc only to find out last night after I +broke down and looked at the GCC docs that there was a known bug in the +AIX "as" compiler, for which there was a patch, and I have since applied. + +So now I am trying again to compile my own gcc, so that I can then try to +compile Bacula and be comfortable that all is well with the latest Bacula +and GCC. + +We are at AIX oslevel 4.3.3.0, so even though we brought it right up to +date 150 days ago, there is already 250Megs of patches for it :(. So there +may be more to this story :). + +Bottom line is, yes it does work :), but because of our intstallation +here, your setup might need to be tweaked to compile it... Then again it +may not :). + +How's that for a confusing Saturday story ;). + +take care, +JES diff --git a/ReleaseNotes b/ReleaseNotes new file mode 100644 index 00000000..aa88b1bf --- /dev/null +++ b/ReleaseNotes @@ -0,0 +1,2071 @@ + + Release Notes for Bacula 9.4.2 + +Release 9.4.2 + +This is a bug fix release for version 9.4.1. It includes a number of bug +fixes and patches. Thanks to the community for your participation. +9 bug reports were closed. This version should fix virtually all +the problems found on FreeBSD. + +If you are trying to build the S3 drivers, please remember to use the +community supplied (from Bacula Enterprise) version of libs3.so found at: + +https://www.bacula.org/downloads/libs3-20181010.tar.gz + +04Feb19 + - Update Windows .def files + - Change create_postgresql_database.in script to be more flexible + - Implement eliminate verify records in dbcheck bug #2434 + - Enhance verify-voltocat-test to detect comparing deleted files + - Fix bug #2452 VerifyToCatalog reports deleted files as being new + - Use correct quoting for a character -- fixes previous patch + - Recompile configure.in + - Apply Carsten's multiarch patch fixes bug #2437 + - Apply Carsten's patch for adding CPPFLAGS to tools/gigaslam.c compile + - Allow . to terminate sql queries prompts + - baculum: Update Baculum API OpenAPI documentation + - Fix rwlock_test unittest bug #2449 Only call thr_setconcurrency if it's + available. Fix order of linking and installation. + - FixFix spelling errors found by lintian by Carston in bug #2436 + - Apply chmods from Leo in bug #2445 + - Add license files LICENSE and LICENSE-FOSS to the regression directory + - Display daemon pid in .apiV2 status output + - Attempt to ensure that ctest job output gets uploaded + - Apply varargs patch from Martin for bug 2443 + - Apply recv() hide patch from Martin + - Fix lz4.c register compilation from bug #2443 + - dbcheck: Improve error message when trying to prune Path records with BVFS is + used. + - Update cdash for version 9.4 + - Fix bug #2448 bregex and bwild do not accept -l command line option + - Partial update copyright year + - Fix struct transfer_manager to be class transfer_manager + - Print Device xxx requested by DIR disabled only if verbose is enabled in + SD + - Add migrate-job-no-resource-test to all-disk-tests + - Remove unused berrno call + return + - Remove mention of Beta release from ReleaseNotes + - Fix #3225 about Migration issue when the Job resource is no longer defined + - baculum: Fix restore paths with apostrophe + - baculum: Fix data level + - Change endblock edit to unsigned -- suggested by Martin Simmons + - Update DEPKGS_VERSION + - baculum: Adapt Apache configs to version 2.4 + +Bugs fixed/closed since last release: +2434 2436 2437 2443 2445 2448 2449 2452 3225 + +==================================================================== +Release 9.4.1 + +This is a minor bug fix release for 9.4.0. It should fix a few of +the warning messages, but not all, on FreeBSD and Solaris. More importantly +The ./configure process now properly detects that libs3 is installed +on your system. If you do not want to use the Amazon S3 driver, this +update is not required. + +In addition to this release, I have posted the current source code with +patches for libs3 to bacula.org. This package is needed if you wish to +build the S3 driver. You may download it from the following location: + +https://www.bacula.org/downloads/libs3-20181010.tar.gz + +21Dec18 + - Remove register attribute on variables as it is not supported by newer C++ + compilers + - Fix regression from 9.2 when backporting Enterprise code in bsock code + - Add missing default flag so that configure looks for libs3 + + +===================================================================== +Release 9.4.0 + +This is a major release comprised of more than +13,000 lines of differences since version 9.2.2. It has updates to Baculum +and small number of bug fixes and back ports from Bacula Systems Enterprise +since version 9.2.2, but primarily it has two new features ... + +The main new feature is the addition support for using Amazon S3 (and other +*identical* S3 providers), and WORM tape cassettes. Note: Azur, Oracle S3, +and Goggle S3 are not compatible with Amazon S3. + +16Dec18 + - Add copyright and correct name on stop-restart-test + - Fix #4449 about an incorrect pool selected with the restart command + - Fix #4386 About incorrect permission on directories after a restore with + replace=ifnewer + - Fix bug #4379 certain fields of Media record not reset after Truncate command + - Revert "Update bdirjson.c" + - Improve volume truncation error messages + - Free ids buffer + - Update PO files + - Initial version and date update + - Initial cut of ChangeLog and ReleaseNotes + - Add use_dcr_only in cloud_dev.c so that manual truncate works + - More Enterprise backports + - More Enterprise backports + changes to the backporting + - Minor backport from Enterprise + my own changes + - Update bdirjson.c + - Add pseudo WORM support for vtape + - worm: Fix multiple display of the WORM Recycle message + - Add first cut cloud drivers + - Use bfopen in place of fopen + - Fix #3574 Add "clients" option to the "help list" output + - Add makedir() in fd_common.h + - Add bfile is_plugin_data() API + - Fix issue between FO_PORTABLE and FO_PORTABLE_DATA + to api + - Fix NOATTR detection + - Implement worm cassette support + - Make detection of duplicate M_SECURITY messages work + - Remove unused prototype recv(len) + - Add new security monitoring test + - Implement new message numbers in stored/block.c + - Fix incorrectly indicating: malformed message + - Fix bugs #2335 and #2349 Volume messages printed many times + - Add new test for bug printing many multiple Max Volume jobs= info + - Add worning message about failure to update volume info + - Improve error messages when JobMedia errors + - Fix complier warning due to unused subroutine variable + - Fix bug #2334 seg fault when releasing globals + - Security: sleep(5) on error + aggregating identical messages + - Update sellist unittests. + - Update unittests for lockmgr.c and fix memory leak. + - Update unittests fir ConfigFile/ini.c. + - Update 'rm -f' for libtool $(RMF). + - Correct libs/Makefile.in separator. + - Update htable unittests. + - Update sha1 unittests. + - Add fnmatch unittests. + - Update unit tests and add regression tests for it. + - Fix escaping special characters in bvfs restore for sqlite catalog + - Add new manual test + - baculum: Do not store any main oauth2 client nor main http basic user in api + config + - Fix tls_bsock_shutdown() compilation when no TLS available. + - Fix bsock compilation warning. + - Fix bsock compilation problem in *BSD. + - Permit negative FileIndex values in the catalog + - Fix format string is not a string literal (potentially insecure). + + - baculum: Update Japanese translation files + - baculum: Fix availability web config wizard when there is problem with access + - baculum: Add new size directive control + - baculum: Fix basic auth user setting in API install wizard + - baculum: Fix undefined index error on web config wizard page + - baculum: Fix #2418 creating or updating new resource + - baculum: Fix size unit formatters in restore browser reported by Wanderlei + Huttel + - baculum: Fix logging output if it is not possible to decode to json + - baculum: Improve error handling in web part + - baculum: Fix formatted size and time values on the volume details page + - baculum: Fix saving logs when an error occurs + - baculum: API panel and wizard improvements + - baculum: Add name field to api client parameters + +Bugs fixed/closed since last release: +2334 2335 2418 3574 4379 4386 4449 + +====================== Release 9.2.2 ====================== +Release 9.2.2 + +This is a minor bug fix release (6,143 lines of diff). The main fixes to +this version are: eliminate most messages that are repeately printed, +eliminate malformed message output, error when compiling without TLS, ... + +Note: if you are running MySQL and have not recently executed +src/cats/update_bacula_tables, please do so. It will not change your +database version but it will fix some potential MySQL problems (for more +detals see the release notes for version 9.2.1). + +06Nov18 + - Fix bug #2421 by Adam about quoting Windows paths in CreateChildProcess() + - Update po files + - Implement new message numbers in stored/block.c + - Fix incorrectly indicating: malformed message + - Fix bugs #2335 and #2349 Volume messages printed many times + - Add new test for bug printing many multiple Max Volume jobs= info + - Fix complier warning due to unused subroutine variable + - Fix bug #2334 seg fault when releasing globals + - Fix escaping special characters in bvfs restore for sqlite catalog + - Fix tls_bsock_shutdown() compilation when no TLS available. + - Fix bsock compilation warning. + - Fix bsock compilation problem in *BSD. + - Add new manual test + + - rpm: Fix mysql dependency for bacula-postgresql + + - baculum: Fix basic auth user setting in API install wizard + - baculum: Improve error handling in web part + - baculum: Fix formatted size and time values on the volume details page + - baculum: Fix undefined index error on web config wizard page + - baculum: Fix #2418 creating or updating new resource + - baculum: Fix size unit formatters in restore browser reported by Wanderlei + Huttel + - baculum: Do not store any main oauth2 client nor main http basic user in api + config + - baculum: Update Japanese translation files + - baculum: Fix availability web config wizard when there is problem with access + to api + - baculum: Add new size directive control + - baculum: Fix logging output if it is not possible to decode to json + - baculum: Fix saving logs when an error occurs + - baculum: API panel and wizard improvements + - baculum: Add name field to api client parameters + +Bugs fixed/closed since last release: +2334 2335 2418 2421 + + +======================================================================= +Release 9.2.1 + +This is a bug fix release. It also contains some refactoring. That said, +there are 10,909 lines of diff between release 9.2.0 and this release. +One major improvement is that this release should eliminate the persistent +problem we have seen with MySQL unhappy with zero DATETIME fields. If you +have problems with that, please simply execute the script update_bacula_tables +found in the /src/cats library. It will modify the table default +values for DATETIME fields to be friendly to the whims of MySQL and MariaDB. + +12Aug18 + - baculum: Fix saving directives in messages resource + - Refactoring of BSOCK and introducing BSOCKCORE. + - baculum: Update API documentation + - baculum: Add status endpoint to available scopes endpoints + - Make print_ls_output identify delete files more clearly + - Backport stored/vbackup.c + - baculum: Add status director and status storage endpoints + - baculum: Add type and level filters to jobs endpoint + - baculum: Add support for .api 2 command in bconsole module + - Implement a keepalive on bpipe sockets fixes bug #2347 + - Backport bpipe enhancements + - Permit catalog to contain negative FileIndexes + - Fix bug #2319 wrong port value stored in bsock giving incorrect error messages + - baculum: Add to jobs endpoint filtering by client and clientid + - Fix bug #2410 bdirjson output incorrect for day greater than 24 + - Attempt to avoid MySQL complaints about not allowing zero or empty in DATETIME + - Add M_SECURITY when connection is bad + fix bug where invalid probes sent to + Dir + - baculum: Fix schedule single day value setting + - Fix bug #2286 copied jobs always have level=Incremental + - baculum: Fix add slot parameter to label command + - baculum: Fix restoring backup from deleted clients + - baculum: Fix click action on remove config resource button + - baculum: Fix framework validation for active list type controls + - baculum: Implement ideas from Wanderlei Huttel + - Fix bug 2395 problem with man dir + - baculum: Fix saving subresources in config + - Start work on HAVE_CLIENT_ONLY install + - Switch to using /lib/systemd/system to install service files + - Install Bacula systemd files in /etc/systemd/system + - baculum: Update Portuguese translations + - baculum: Fix group most recent backups option in restore wizard for mysql + - Fix bug #2404 uninstall systemd service + - Fix warning during compilations of mainwin.cpp + - baculum: Implement second part ideas and fixes proposed by Wanderlei Huttel + - Update catalog update scripts in updatedb directory + - Fix bug #2340. Display of db_driver + - Add warning messages for bad or old self-signed certificates + - baculum: Fix #2403 error while writing diraddress directive in Bacula config + - baculum: Implement ideas and fixes proposed by Wanderlei Huttel + - baculum: Update Portuguese translations + - baculum: Fix pool does not exist error on pool details page + - baculum: Fix create directive base method + - rpm: Fix MySQL dependency on bacula-postgresql package + +Bugs fixed/closed since last release: +2410 2389 2286 2319 2340 2347 2357 2403 2404 2405 2395 2392 + +===================================================================== + +Release 9.2.0 + +This is one of the biggest Bacula release ever made. It has +almost 540,000 lines of diff output between Release 9.0.8 and +this release. + +This is a major new release with a new version number. It has been +very thoroughly tested, but as always, please backup any previous +version and test this version prior to putting it into production. + +For the most part the changes were contributed to the Bacula +project by Bacula Systems SA and myself, but there were a number +of other contributors that I thank. + +Database Update +--------------- +There are no changes required to the catalog database. + +Compatibility: +-------------- +As always, both the Community Director and Storage daemon(s) must be upgraded +at the same time. Any File daemon running on the same machine as a Director +or Storage daemon must be of the same version. + +Older File Daemons should be compatible with the 9.2.0 Director and Storage +daemons. There should be no need to upgrade older File Daemons. + +20Jul18 + - Separate dequeuing msgs and dequeuing daemon msgs + - Replace uint with uint32_t + - Reset default status schedule limit to 30 + - Comment out use of uint that breaks Windows build + - Update win32 .def files + - Fix concurrent acquire/release of device + - Correct copyright + - Fix compiler warning generated by prior commit 1aad2088d21a3 + - Backport Enterprise src/findlib + - Backport Enterprise src/filed + - Backport Enterprise src/lib + - Add debug code for bug #2356 + - Fix bug #2385 -- compiler bug IMO + - fix #3945: Add "ocfs2" to list of filesystems known by "FsType" directive + - Backport parts of src/dird to community + - Use bstrcmp in place of strcmp + - Recompile configure + - Update config.guess and config.sub + - Fix #3615 about bconsole Socket errors reported in the bacula log file + - Fix permissions of mtx-changer.conf + - Use /dev/sg0 rather than /dev/sg1 so vtape devices work + - Make out of freespace non-fatal for removable devices -- i.e. behaves like + tape + - Pull latest tls*.pem from BEE + - Fix #3854 missing tls library initialization in bdirjson, bfdjson, bsdjson + and bbconsjson + - Fix bug #2212 where restore jobid=nn file=xxx restores the files twice + - Apply patch from Wandlei Huttel to add Run Time and suffix to Restored + bytes + - Fix bug #2343 where truncate of explicit Volume name truncates non-purged + volumes + - Fix some file execute permissions. Fixes bug #2389 + - Fix license problems in Bug #2382 + - Apply patch from Leo to fix bug 2192 + - Fix bad placement of jcr->pool reference as pointed out by Martin Simmons + - rpm: Add OpenSuse Leap 42.3 + - rpm: Update bacula.spec for Fedora 27 + - Fix #3824 about incorrect setdebug command description + - Fix Solaris 10 compilation error on BXATTR when no linkat(2) found. + - win32: Fix backup issue with path > 250 char + - Fix #3672 about bdirjson issue with the Autochanger directive + - Enable build of Windows 64 bit tray monitor + - Fix build of Windows tray-monitor + - Some changes to configure.in + - Update some old copyrights + - Update some old copyrights + - Fix showing PkiCipher and PkiDigest values in bfdjson output + - Fix buffer overrun at BXATTR_Solaris::os_get_xattr_names. + - Bring Branch-9.1 up to date with Branch-9.0 + - Fix #3745 update the client SQL record after a reload + - Fix 'grep -m' when '-m' option is not available. + - Update the build for ACL/XATTR support. + - Add some debugging information to bacl_solaris. + - Fix backup ACL/XATTR when fatal error and not only error. + - Fix Solaris XATTR support on Solaris 11. + - Fix compile error on !HAVE_EXTENDED_ACL + - Add some debugging messages. + - Fix compilation warning on FreeBSD. + - Add command to change the pool of a job and the associated volumes + - Fix #3593 VirtualFull will select jobs to consolidate using Job name in + addition to Client/FileSet + - Do not increment the device num_writers if the call to dir_update_volume_info() + fails + - Add prune option to select volumes from a/all pool(s) + - rpm: Add Fedora26-64 platform + - Add the RestoreClient directive for Restore job. + - Implementaion of .ls command for Plugins. + - Use correct SQL table encoding for Postgresql 10.x + - Fix Where/Replace parameter displayed in the Restore job summary + - use pthread_kill() instead of pthread_cancel() to terminate SD_msg_chan + - Recompile configure.in + - Recompile configure.in + - Correction of my_name_is() function using realpath() + - Add a detection of realpath() function to configure. + - Fix tray-monitor compilation + - Use breaddir() in the tray monitor + - file_dev.c: replace readdir_r() wit new breaddir() + - new breaddir() function to replace readdir_r() + core update + - Fix #3098 Add debug tag 'record' for traces about records in the SD + - Fix #1826 Add Job Where and Replace variables to the Restore job summary + - Remove tests about "NULL Volume name. This shouldn't happen!!!* + options to api restore + - Port missing RestoreObject Plugin Config code from BEE. + - Enhance "status schedule" function to allow multiple job= and client= filters + - Add next_name() function to scan string lists + - Fix #1170. Enhance "status schedule" command. Display ordered output, add + Client and FileSet filters. + - bvfs: Add clients= option to .bvfs_get_jobids to handle clusters + - Add delete client bconsole command + - Fix #2910 about a problem in the "status network" command when the client is + not reachable + - Fix #1108 Enhance setdebug help command and console completion + - baculum: Fix SQL grouping error in restore wizard reported by Rasmus Linden + - baculum: Fix cancel button in web config wizard + - baculum: Web interface password is no longer stored in settings.conf + - baculum: Fix path validator for UTF-8 characters + - baculum: Add capability to set maximum numer of jobs visible in tables + - baculum: Add prune and purge actions to volume view page + - baculum: Fix compatibility with old api for prune and purge actions + - baculum: Update Portuguese translations + - baculum: Fix catching API exceptions + - baculum: Clean up theme Baculum-v1 + - baculum: Fix initializing new resource page + - baculum: Add button to set job log order + - baculum: Add manual loading CSS files with versioning + - baculum: Move API panel CSS files to separate directory + - baculum: Move Web CSS files to separate directory + - baculum: Fix not showing 'gui on' command in bconsole output + - baculum: Loading table data performance improvements + - baculum: Fix sending path load request by enter key + - baculum: Add patch to fix gettext class file in framework + - baculum: Add htaccess file to framework directory + - baculum: Update rpm and deb templates with apache and lighttpd config files + - baculum: Update example api endpoints + - baculum: Adapt Web and API to new framework version + - baculum: Updated PRADO framework to version 4.0.1 + - baculum: Highlight main menu items for subpages + - baculum: API v1 documentation as open API file + - baculum: Update Web requests form for the new API v1 + - baculum: New improved version API v1 + - baculum: Fix link to job history page + - baculum: Fix previous step button in restore wizard + - baculum: Enable debug for first config wizard run + - baculum: Fix directing to wizard if application config doesn't exist + - baculum: Fix opening configuration tabs bug reported by Heitor Faria + - baculum: Set curl connection timeout + - baculum: Show error message after connection to api test + - baculum: Update LICENSE file + - baculum: Solve old browser cache problem for javascript after upgrade + - baculum: New redesigned web interface + - baculum: Changes in api for the redesigned web interface + - baculum: Fix saving boolean values in schedule Run directive + - baculum: Add link to go back from job configuration window + - baculum: Add new volumes required api endpoint + - baculum: Add listbox control and use it for base and device directives + - baculum: Fix showing verify job fields in job run configuration window + - baculum: Revert back volume pool name in volume list window + - baculum: Fix error message about disabled bconsole + - baculum: API endpoints code refactor + - baculum: Add state, number, boolean and id validators + - baculum: Return bconsole disabled error if bconsole support isn't enabled + - baculum: Remove unused api endpoints + - baculum: Fix oauth2 client working in the web part + - baculum: Fix auth setting radio buttons alignement + - baculum: Enlarge interface height to 100% + - baculum: Add more information to cURL error + - baculum: Stop using hidden fields to store item identifiers + - baculum: Fix redundant loading users portlet + - baculum: Add required config fields asterisk mark + - baculum: New reworked restore wizard + - baculum: Wizards view improvements + - baculum: Add restore hardlinks support in api + - baculum: Add strip_prefix, add_prefix, add_suffix and regex_where restore + - baculum: Fix link to job history page + - baculum: Fix previous step button in restore wizard + - baculum: Enable debug for first config wizard run + - baculum: Fix directing to wizard if application config doesn't exist + - baculum: Fix opening configuration tabs bug reported by Heitor Faria + - baculum: Set curl connection timeout + - baculum: Show error message after connection to api test + - baculum: Update LICENSE file + - baculum: Solve old browser cache problem for javascript after upgrade + - baculum: New redesigned web interface + - baculum: Changes in api for the redesigned web interface + +Bugs fixed/closed since last release: +1108 1170 1826 2212 2343 2356 2382 2385 2389 2910 3098 3593 3615 3672 3745 +3824 3854 3945 + + +======================================================================= + +Release 9.0.8 + +This is a minor release that fixes a couple of bugs and corrects +some copyrights that were not totally correct. + +28May18 + - Fix bug #2212 where restore jobid=nn file=xxx restores the files twice + - Pull regression truncate-test from Branch-9.1 + - Apply patch from Wandlei Huttel to add Run Time and suffix to Restored + bytes + - Fix bug #2343 where truncate of explicit Volume name truncates non-purged + volumes + - Fix some file execute permissions. Fixes bug #2389 + - Fix license problems in Bug #2382 + - Apply patch from Leo to fix bug 2192 + - Fix bad placement of jcr->pool reference as pointed out by Martin Simmons + - rpm: Add OpenSuse Leap 42.3 + - rpm: Update bacula.spec for Fedora 27 + - baculum: Fix SQL grouping error in restore wizard reported by Rasmus Linden + - Update some old copyrights + - baculum: Update Portuguese translations + - Remove old Bacula Systems notices + +Bugs fixed/closed since last release: +2212 2320 2349 2354 2379 2382 2383 2330 2054 +2343 2369 2194 2359 2151 2366 2353 2381 2378 + + +======================================================= + +Release 9.0.7 + +This is a significant release because it now has the Windows code +reintegrated and updated to work with this version. Other than +Baculum updates and the new Windows update, there is no significant +change to the other code. + +If you wish to use the Windows 9.0.7 File daemon binaries with +your existing 9.0.x Bacula Director and Storage daemon it should +work fine but has not been tested. + +The 64 bit version of the Windows binaries has been installed and +very quickly tested, as a consequence, please test it carefully before +putting into production. There seem to be some minor installation errors +that are probably related to .conf files. Also the Windows binaries do +not yet contain the tray-monitor or the old Exchange plug. Both currently +fail to build. + +18Apr18 + - Remove NSIS debug + - baculum: Fix opening configuration tabs bug reported by Heitor Faria + - Restore win32 dir from Branch-5.2 and update it + - Add Phil Stracchino's fix for Qt5 + - baculum: Fix saving boolean values in schedule Run directive + - rpm: Add Fedora26-64 platform + - baculum: Add link to go back from job configuration window + - Use correct SQL table encoding for Postgresql 10.x + - baculum: Add listbox control and use it for base and device directives + - baculum: Fix showing verify job fields in job run configuration window + - baculum: Revert back volume pool name in volume list window + - baculum: Fix error message about disabled bconsole + - baculum: API endpoints code refactor + - baculum: Add state, number, boolean and id validators + - baculum: Return bconsole disabled error if bconsole support isn't enabled + - baculum: Remove unused api endpoints + - baculum: Fix oauth2 client working in the web part + - baculum: Fix auth setting radio buttons alignement + - baculum: Enlarge interface height to 100% + - baculum: Add more information to cURL error + - baculum: New reworked restore wizard + - baculum: Wizards view improvements + - baculum: Add restore hardlinks support in api + - baculum: Add strip_prefix, add_prefix, add_suffix and regex_where restore + options to api restore + - Port missing RestoreObject Plugin Config code from BEE. + - baculum: Stop using hidden fields to store item identifiers + - baculum: Fix redundant loading users portlet + - baculum: Add required config fields asterisk mark + +Bugs fixed/closed since last release: +None + + +============================================================== + +Release 9.0.6 + +This is a bug fix and enhancement release. The two major enhancements are +support for Qt5 in bat and the tray monitor, and support for OpenSSL-1.1. +There were also a number of nice bug fixes. Thanks to the users who +supplied patches for the enhancements and bug fixes. They are much +appreciated. + + +19Nov17 + - Update AUTHORS for recent commits + - Remove incorrecly placed openssl-compat.h + - Add openssl-compat.h which went in wrong directory + - baculum: Add removing single resource + - baculum: Add module to check resource dependencies + - baculum: Fix saving names with spaces inside schedule Run directive + - baculum: Fix saving entire config by api request + - Backout vol size tests in previous attempt to fix bug #2349 + - Fix compiler warning in previous patch + - Apply patches from bugs #2325 and #2326 to fix FIFO bugs + - Fix bug #2315 INTEGER misspelled in update_sqlite3_tables.in + - Try to fix bug #2349 multiple recycle messages + - Add support for items with comma in ini_store_alist_str() + - Fix segfault after the reload of an incorrect configuration + - Add temporary fix to avoid a deadlock after a reload command on an incorrect + configuration + - baculum: Throw 404 error if service not known + - Fix race condition between setip and the access to CLIENT::address() + - Fix #3284 about Client address not reloaded properly + - baculum: Use home page url when an error is shown + - Fix bug #2346 Dir blocks when max reloads reached + - baculum: Send config to api server as json + - Remove enterprise code that breaks Mac install -- fixes bug #2351 + - Correct FS size calculation for FreeBSD, Solaris, and Windows + - baculum: Enable Portuguese language support in makefile + - baculum: Fix required directives in schedule resource configuration + - baculum: Fix saving messages resource + - baculum: Improve slow reloading config resource list + - crypto: remove most of OpenSSL initcallbacks for 1.1 + - Update ACL/XATTR code and define new ACL/XATTR API for Plugins. + - baculum: Fix numeric password setting bug reported by Heitor Faria + - crypto: convert EVP_PKEY access and remainings bits for OpenSSL 1.1 + - crypto: convert EVP_MD_CTX + EVP_CIPHER_CTX to OpenSSL 1.1 + - crypto: Use DEFINE_STACK_OF() + - crypto: Add a tiny OpenSSL compat level + - crypto: remove support for ancient openssl + - fix #3269 obey the user choice of "Are you sure you want to delete X JobIds + - Add restore wizard to the tray monitor. + - Preparation fixes: remove some warning + - Add ASSERTD() to track NULL Volume name error + - Add "noautoparent" restore command option to disable the automatic parent + directory selection + - Make qt-console compatible to Qt5 (Qt4 still work) + +Bugs fixed/closed since last release: +2315 2325 2346 2349 2351 + + +====================================================================== + +Release 9.0.5 + +This is an important bug fix release. In particular it fixes the cases +where Bacula would print a very large number of error messages. Additional +backported code from Bacula Enterprise is included as well as updates to +the rpm scripts. A number of minor Baculum issues have also been +corrected. + +01Nov17 + - Use if exists on dropping MAC table in postgres. Fixes bug #2314 + - Fix bdirjson display of Minutes. Fixes bug #2318 + - baculum: Set default language if no language set + - baculum: Fix language setting in api + - baculum: Update generated .mo files for api + - baculum: Add missing texts to translations + - baculum: Fix add to translation static texts on the api default page + - baculum: Fix missing session start + - Make verify job log same as other logs -- fixes bug #2328 + - Take a more conservative approach for setting killable true + - Add extra safety for readdir buffer + +31Oct17 + - Retab systemd/Makefile.in + - Don't require mount for @piddir@ + - Use Debian systemd start/stop scripts supplied by Sven Hartge + +29Oct17 + - Fix bug #2316 add CacheRetention to Pool + - Skip tape ioctls on FreeBSD when using a FIFO fixes bug #2324 + - Fix bug #2338 to not truncate already truncated volumes + - Remove some old C int code and use bool + +28Oct17 + - Remove unused lib/lz4.c.orig file + - Update AUTHORS file + - Mark Volume read-only only if no access rights or read-only partition + - Add -P daemon option to supress creating PID file + - Fix too big copy to test FD plugin_ctx + +26Oct17 + - Backport Enterprise code + +23Oct17 + - When read-only volume found mark it in catalog -- fixes bug #2337 + - Make out of space on partition fatal + - Fix bug 2323 -- loop exit condition was backward and add error message + - Add missing copy-plugin-confs for regress + - Fix bug reported by jesper@schmitz.computer where bat hangs on FreeBSD + +08Oct17 + - baculum: Fix reading and writing schedule resource + +15Sep17 + - baculum: Fix undefined offset error during saving director config + - baculum: Fix listing days of week in schedule setting + +14Sep17 + - baculum: Fix saving schedule run directive value + +12Sep17 + - rpm: Add missing script baculabackupreport and query.sql for Suse + - rpm: Add missing libbacsd* file and tapealert script to Suse rpm spec file + - rpm: Add missing libs bbconsjson, bdirjson and bsdjson to Suse rpm spec + file + - rpm: Add aligned plugin rpm spec file for Suse + - rpm: Add bacula-tray-monitor.desktop launcher in scripts directory + - rpm: Add Suse Linux ES 12.1 platform + +11Sep17 + - rpm: Add bacula-tray-monitor.desktop file in script dir + +Bugs fixed/closed since last release: +2314 2316 2318 2324 2328 2337 2338 + +================================================================= +Release 9.0.4 +This is a minor bug fix release. The main fix in this release +is to allow SQLite3 to work. + +Please note: SQLite3 has been depreciated for a long time. If the +community will step forward (as it did in this case) and prepare +the appropriate make_sqlite3_tables and update_sqlite3_tables files, +we can continue to leave the SQLite3 code in Bacula. However, we +strongly urge users to update to MySQL, MariaDB, and PostgreSQL, +which are our supported SQL databases. + +06Sep17 + - Update po files + - Fix SQLite3 upgrade tables script fixes bug #2306 + - baculum: Fix language setting in config file + - Upgrade to latest lz4.c to fix bug #2310 bus error on 64 bit Solaris + - Recompile configure.in + - Ensure systemd/bacula.conf is created by configure fixed bug #2307 + - Fix compiler warning noted in bug #2309 + - Fix SQLite3 Version bug #2305 + - Remove unused variable to elimiate compiler warning + - Recompile configure.in + - Fix #2925 Do not try to stop non backup jobs (virtualfull, copy, migration, + restore, etc...) + - baculum: Fix broken symbolic links for lang files + - don't use add_event() when flag "l" is not set + - core: bwlimit measure bandwidth + - core: bwlimit handle backlog and allow burst + - Do not purge running jobs in autoprune + +Bugs fixed/closed since last release: +2305 2306 2307 2309 2310 2925 + +================================================================== +Release 9.0.3 + +This is a minor bug fix release. + +08Aug17 + - baculum: Fix access denied error on api install wizard page + - baculum: Remove assigning to api host when user is deleted + - baculum: Fix empty admin setting + - baculum: Add ability to assign host to specific user + - baculum: Fix bconsole test connection for new api host that works with new + director + - baculum: Fix sqlite db support + - Fix bug #2301 Solaris Available space incorrectly reported by turning off the + output for Solaris + - Fix bug #2300 mount/unmount/release of single tape drive does not work + - baculum: Fix bconsole connection test in config wizard + - baculum: Fix writing config for schedule and message names with space + - bpipe: Fix compiler warning + - baculum: Fix drag & drop file version elements + - baculum: Add fileset info endpoint and use it in restore wizard + - baculum: Use client name instead of clientid and start using fileset to + prepare restore tree + - baculum: Remove fileset parameter from run restore + - baculum: Fix lstat regex pattern + - baculum: Get the most recent jobs by client and fileset or by clientid and + filesetid + - Fix: bug #3048: jobs are stuck in endless loop in reserve.c + - Add total time to test.out file + - baculum: Add restore job selection in restore job wizard + - Enhance verify job report from bug 2249 + +Bugs fixed/closed since last release: +2300 2301 3048 + + +=============================================================== + +This is a minor bug fix release, but a few of the bugs are important. +The main items fixed are: + +- Postgresql should now work with Postgresql prior to 9.0 + Note: the ssl connection feature added in 9.0 is not available on + postgresql servers older than 9.0 (it needs the new connection API). +- The issues with MariaDB (reconnect variable) are now fixed +- The problem of the btape "test" command finding a wrong number + of files in the append test was a bug. It is now fixed. It is + unlikely that it affected anything but btape. +- The bacula-tray-monitor.deskop script is released in the scripts + directory. +- We recommend that you build with libz and lzo library support (the + developer packages must be installed when building, and the shared + object libraries must be installed at run time). However we have + modified the code so that Bacula *should* build and run with either + or both libz or lzo absent. + +23Jul17 + - Use Bacula in place of Libz variables so we can build with/without + libz and lzo + - Apply ideas from bug #2255 prettier status slots output + - Configure and install bacula-tray-monitor.desktop + - Fix btape test which counted files incorrectly on EOT + - Fix bug #2296 where Bacula would not compile with postgres 8 or older + - Fix bug #2294 Bacula does not build with MariaDB 10.2 + - baculum: Fix multiple directors support + - baculum: Fix showing errors from the API + +Bugs fixed/closed since last release: +2255 2294 2296 + + +================================================================== + +Release 9.0.1 12Jul17: + +This is a minor bug fix release that mainly to include the new +tray-monitor files that were omitted. The tray-monitor now builds +and runs at least on Ubuntu Linux. + +12Jul17 + - Remove two incorrect trailing commas in bsock.h + - Fix bug #2293 bad big endian detection in lz4.c + - Add new tray-monitor files that were omitted in the backport from Enterprise + - bvfs: Do not insert deleted directories in PathVisibility table + - Fix compilation for Debian Stretch with GCC 6.3 + +Bugs fixed/closed since last release: +2293 + +======== + +This is either the biggest Bacula release ever made or one of the +biggest ones. Even without the new Aligned Volumes source code, which +is substantial, there are over 400,000 lines of diff output between +Release 7.4.7 and the release of 9.0.0 + +This is a major new release with a new version number. It has been +very thoroughly tested, but as always, please backup any previous +version and test this version prior to putting it into production. + +For the most part the changes were contributed to the Bacula +project by Bacula Systems SA and myself, but there were a number +of other contributors that I thank. + +Database Update +--------------- +This version of Bacula requires a database update. So either you or the +installation process must apply the update_bacula_tables script. As a +precaution, please do a database dump or run your nightly database backup +prior to running the update script. + +Compatibility: +-------------- +As always, both the Community Director and Storage daemon(s) must be upgraded +at the same time. Any File daemon running on the same machine as a Director +or Storage daemon must be of the same version. + +Older File Daemons should be compatible with the 9.0.0 Director and Storage +daemons. There should be no need to upgrade older File Daemons. + +Packagers: +--------- +There are a good number of new binaries (e.g. bbconsjson, bdirjson, +bfdjson, and bsdjson) to install; a new tapealert script file that should +be installed; and some new shared objects (e.g. libbacsd). The +dvd-handler script has been removed. Note also to run the +update_bacula_tables script after having dumped the catalog to bring any +existing catalog up to the new version needed for Bacula 9.0.0. + +New Features: +------------- +Please see the New Features chapter of the manual for documentation on +the new features. The new features are currently only in the New Features +chapter and have not yet been integrated into the main chapters of the +manual. + +New Features (summary): +----------------------- + + - Major rewrite of the Storage daemon to: put all drivers in class + structures, provide better separation of core/driver code, add new + drivers (aligned volumes, cloud), simplifies core code, allows loadable + device drivers much like plugins but which are better integrated into + the SD. + - There are a number of new Bacula Systems whitepapers available on + www.bacula.org, and a few more will be coming in the next few months. + - New unique message id will be added to every message (designed but + not yet implemented). + +Core Features: + - Implement a drive usage counter to do round robin drive assignment + - Enhance functionality of TapeAlert + - Implement a "Perpetual Virtual Full" feature that creates a Virtual Full backup + that is updated every day + - Increase Director's default "Maximum Concurrent Jobs" setting from 1 to 20 + - Add "PluginDirectory" by default in bacula-sd.conf and bacula-fd.conf + - Add support for terabytes in sizes. Submitted by Wanderlei Huttel. + - Restore mtime & atime for symlinks + - New "status network" command to test the connection and the bandwidth + between a Client and a Storage Daemon + - New Tape Alert tracking + - Loadable SD device drivers + - PostgeSQL SSL connections permitted + - JobStatistics improved + - DB update required + - Autochanger improvements to group Devices + - Improved .estimate command + - Comm line compression + - Separate bxxjson programs for Console, Dir, FD, SD to output .conf contents + in Json for easier reading with programs + - Read Only storage devices + + +Bconsole Features: + - Add "ExpiresIn" field in list and llist media output + - Add command to change the priority of a running job (update jobid=xxx priority=yyy) + - Add level= and jobtype= parameters to the "list jobs" command + - Add option to bconsole to list and select a specific Console + - Add shortcut to RunScript console commands. Submitted by Wanderlei Huttel. + - Display "IgnoreFileSetChanges" in show fileset command (#2107) + - Display PrefixLinks in "show job" output + - Display permission bits in .bvfs_decode + - Display the Comment field in "llist job" command + - Add "ActionOnPurge" field to "llist pool" command. Fix #2487 + - Add "long" keyword to list command, ie "list long job". This is + essentially an alias fo the "llist" command. + - Modify the "setbandwidth" limit parameter to accept speed input. ex: limit=10kb/s + - Modify the "setbandwidth" limit parameter so that the default + is no longer kb/s but b/s. + - Do not show disabled resources in selection list + - Fix bconsole readline and "dumb" terminal handling of CTRL-C + - Add the priority field to the .api 2 job listing output + - Improved restricted consoles when accessing catalog. + +Misc Features: + - New Tray Monitor program + - Client Initiated Backups + - Many performance enhancements + - Bandwidth limitation timing improved + - Global resource variables are not lost during a reload command + - Add -w option to btape to specify a working directory + - Enhance bls -D/-F help message + - The "list" command now filters the results using the current Console ACLs + - The WhereACL is now verified after the restore menu + + +02Jul17 + - Skip verify-data-test if not running Linux + - Skip lzo-test if lzo not in Bacula + - Remove double define HAVE_LZO in config.h + - Add documentation on baculabackupreport to delete_catalog_backup.in + - Install baculabackupreport and ignore script without .in + - Recompile configure.in + - Add Bill's baculabackupreport script + - Update po files + - Fix error in FreeBSD during maxtime-test + - Fix #2853 About character substitution for "virtual full" job level in + RunAfterJob + - Attempt to fix timing problem with console-dotcmd-test on FreeBSD + - Ensure we have a DIR connection in dequeue_messages + - Add more debug to regress for FreeBSD failures + - Fix #2940 Allow specific Director job code in WriteBootstrap directive + - Fix pragma pack to allow lz4.c work on Solaris and other machines + - baculum: Fix working logout button + - A more correct fix for lz4.c on Solaris 10 + - Remove use of #pragma pack in lib/lz4.c for Solaris 10 + - Recompile configure from configure.in + - Detect Solaris 10 + - Fix bug #2289 version 7.9 not compatible with old FDs -- comm compression + - Make getmsg.c compatible with old FDs + - Use one MAX_BLOCK_SIZE and set to 20M + - rpm: Add Fedora 25 build platform + - Remove vestiges of crc32_bad -- fixes Solaris build + - Fix #2890 about segfault in .status command on Redhat 5 32bit + - Add missing semi-colon in bsys.c + - baculum: Fix incorrect table name error during restore start + - Display the correct address in lockdump for db_lock() + - Fix getmsg to handle additional forms of Progress messages + - baculum: Fix double assets and runtime symbolic links in baculum-web deb + package + - baculum: Fix missing php-xml dependency in deb metafile + - baculum: Improve errors handling in API restore pages + - rpm: Remove libbacsd.la for both Red Hat and Suse + - rpm: Add missing libs bbconsjson, bdirjson and bsdjson + - rpm: Fix libstdc++ version in BAT spec file + - Fix some problems noted by clang + - baculum: Reorganize run job code + - baculum: Reorganize estimate job code + - baculum: Make get method definition not obligatory + - Make file-span-vol-test portable + - Attempt to fix deadlock in FreeBSD maxtime-test + - Do not produce error if MySQL database exists in create_mysql_database + - rpm: Add missing tapealert script + - rpm: Add missing libbacsd + - rpm: Remove dvd-handler script + - Fix bvfs queries + - Use FileId in place of Filename + - Revert "Put FilenameId in .bvfs_lsfiles output" + - Put FilenameId in .bvfs_lsfiles output + - Add more debug in src/cats/bvfs.c + - Fix bvfs_lsdirs and bvfs_lsfiles + - baculum: Add Japanese language support in deb and rpm packages + - Add DirectoryACL directive + - baculum: New Baculum API and Baculum Web + - Add forking info to LICENSE and LICENSE-FAQ + - Minor improvement to error message + - Fix race in steal_device_lock shown in truncate-concurrent-test + - Apply Marcin's fix for 6th week of the month + - Add new truncate test + - Retab Makefile.in in platforms/systemd.in + - Fix compiler warning + - Add FD backwards compatibility + - Fix regression minor scripting problems + - Fix #2807 about an issue with the show command when using incorrectly JobToVerify + directive + - Fix #2806 about the director service started before the database with systemd + - Update Dart control files + - Massive (70,000+ lines) backport of code from Bacula Enterprise 8.8. + See next line ... + - Adapt update_bacula_tables scripts for catalog version 15 + - Allow to use Base directive in a JobDefs + - Add more debug to the bpipe plugin + - Enhance error message when packets are too big + - Add '.storage unique' bconsole command + - Allow to use ".jobs type=!B" to display specific job type + - Add lockdump storage daemon information + - Fix #2698 Display loaded driver list in status storage output + - Fix autochanger unload message that contains sometime an incorrect volume name + - Fix issue with open_bpipe() function that may flush stdio buffer if the + command is incorrect + - Fix unload tape messages to print correct volume + improve output format + - Fix unload/re-load same volume + - Fix DIR get unexpected "Connection reset by peer" for FD + - Fix #2548 about SQL connection leak with RunScript::Console commands + - Fix #2588 about segfault in bdirjson with JobDefs/Base directive + - Fix #2593 about incomplete jobs incorrectly rescheduled + - Fix #2629 about pool argument not listed in the "help cloud" output + - Fix #2632 about VolType not set correctly for Cloud volumes after a label problem + - Fix #2640 about a reference to the source directory in query.sql file + - Fix bug #2271 where poll interval causes tape mount message to repeat + - Fix segfault in bdirjson with incorrect configuration files + +Bugs fixed/closed since last release: +2271 2548 2563 2567 2588 2593 2602 2624 2625 2627 2629 2632 2638 2640 2646 +2698 2520 2559 2561 2582 2806 2807 2890 2289 2890 2853 2940 + +====================================================================== +======================================================================= + +Release Version 7.4.7 + +This is a minor bug fix release, which hopefully corrects a seg fault +on OpenBSD due to the new ACL/XATTR code, and it also fixes most build +problems on Solaris 10 as well as EPROTO on OpenBSD. + +There is one minor new feature that allows you to specify the query +item number on the bconsole query command line. + +15Mar17 + - Permit specifying query item number on bconsole query command line + - Fix Solaris 10 problems reported by Phil Stracchino + - Fix EPROTO on OpenBSD + +===================================================== + +Release Version 7.4.6 + +This is a bug fix release, which hopefully corrects a seg fault on OpenBSD +due to the new ACL/XATTR code, and it also fixes the large number of tape +mount messages that are repeated at 5 minute intervals due to a bug in the +poll code. Various small fixes for FreeBSD. + +Please note, the signature hash files (.sig) for the source code was +previously SHA1. For this and future releases we have changed it to be +SHA256. + +10Mar17 + - Fix bug #2271 where poll interval causes tape mount message to repeat + - Attempt to fix IPV6 not configured + - Possible fix for acl seg fault on OpenBSD where no acl code defined + - Change release digest from SHA1 to SHA256 + - Fix getnameinfo() for FreeBSD fixes bug #2083 + +Bugs fixed/closed since last release: +2083 2271 + +===================================================== + +Release version 7.4.5 + +This is a minor bug fix plus a significant total rewrite of the +ACL and XATTR code by Radoslaw Korzeniewski. + +07Feb17 + - Correct wrong word in message + - Remove restriction on using the scratch pool that can + cause restore failures + - Remove debug code that breaks btape fill + - Initialize freespace_mutex fixes bug 2207 + - baculum: Update AUTHORS file + - baculum: Enable Japanese language on web interface + - baculum: Implement Japanese language support + - XACL - refactoring an ACL and XATTR codes. + - Revert "Warn of Storage Daemon version incompatibility if + label fails. Bug #2193" + - Make another attempt to resolve bug #2176 + - Warn of Storage Daemon version incompatibility if label fails. Bug #2193 + - Apply patch to list more pool info from bug #2202 + - Fix status alignment output reported by Wanderlei Huttel + + + +Release version 7.4.4 + +This is a bug fix release. + +20Sep16 + - Fix #2085 About director segfault in cram-md5 function + - Attempt to fix bug #2237 + - Recompile configure.in + - Fix systemd installation + - If using readline reset terminal at bconsole exit + - Fix compilation without SMARTALLOC + - Fix #2060 about SQL false error message with "update volume fromallpools" + command + - Fix spurious MD5 update errors when nothing changed should fix bug #2237 and + others + - Fix small memory leak with the restart command + - baculum: Update language files + - Fix #335 Avoid backups going to the scratch pool + - systemd: Give 3mins to the bacula-sd service to stop and close the dde + - Minor modifications to Ubuntu packaging + - Check if the ScratchPool points to the current Pool and print a warning + message in such case + - Fix #1968 print the ScratchPool name instead of just 'Scratch' + - Display PrefixLinks in "show job" output + - Add explicit LL to big integers to appease older compilers + - Enable the plugin directory for the FileDaemon by default + - Allow multiple mailcommand+operatorcommand in Messages. Fixes bug #2222 + - Handle NULL pointers in smartdump() and asciidump() + - Modify status to include Admin and Restore in Level field -- clearer + - Ensure that zero JobMedias are written for labelling + - Fix error message about the stream 26 (PLUGIN_NAME) in bextract + +Bugs fixed/closed since last release: +1968 2060 2085 2222 2237 335 + + + +Release version 7.4.3 + +This is a bug fix release. Most importantly, it fixes the new +GCC 6.0 aggressive compiler behavior that elides (deletes) code +written by the Bacula developers. There is no benefit to the +new GCC agressive optimization and it breaks a lot of programs +including Bacula. This problem showed up on ArchLinux and Fedora 24. + +17Jul16 + - Add LICENSE and LICENSE-FOSS files to the documentation + - Add shortcut to RunScript console commands. Submitted by Wanderlei Huttel. + Fixes bug #2224 + - Fail when multiple mailcommand and other strings are specified in .conf. Fixes + bug #2222 + - Add support for terabytes in sizes. Submitted by Wanderlei Huttel. Fixes bug + #2223 + - Add error message for truncate command when actiononpurge not set. Fixes bug + #2221 + - Fix optimization error with GCC 6.1 + - Fix compilation warnings with GCC 6.1 + - Explicitly create MySQL user in grant_mysql_privileges.in + +Bugs fixed/closed since last release: +2221 2222 2223 2224 + +New feature: + - There are two new Director directives that simplify doing + console commands rather than using RunScripts. They are + ConsoleRunBeforeJob = "console-command" + ConsoleRunAfterJob = "console-command" + + +=========================================================== + +Release version 7.4.2 + +This is an important bug fix release to version 7.4.1 mainly +fixes detection of MySQL 5.7 (as found in Ubuntu 16.04). Certain bug +fixes contributed by Bacula Systems. + +06Jul16 + - Fix #1926 about wrong duplicate job detection with Copy/Migration and + Backup jobs + - Recompile configure after db.m4 change + - Fix batch insert for MySQL 5.7 + - Fix zero level debug output -- now at 100 + - Fix #766 about Job logs displayed with unneeded linefeed + - Fix #1902 about a segfault with the "cancel inactive" command + - Fix bug where MySQL 5.7 is improperly linked on Ubuntu 16.04 + +Bugs fixed/closed since last release: +1902 1926 766 + + +================================================= + +Release version 7.4.1 + +This is a minor bug fix release to version 7.4.0. Most of the +fixes have been kindly contributed by Bacula Systems SA. + +31May16 + - Fix bug #1849 MySQL does not accept 0 for DATETIME default + - Modify the alist object to be reused after a destroy() + - baculum: Fix setting invalid timezone value for PHP + - Fix compilation for AIX + - Fix the restore termination string in the job report to take in account + JobErrors and SDErrors + - baculum: Show jobs for client + - Fix bconsole "llist job=" output + - Fix #146 about update volume command line usage + - bat: Fix #1066 about bad update pool command + - Fix #1653 about make_catalog_backup default user name + - baculum: Show jobs stored on volume + - Fix update Volume=x Slot=nn when Slot > MaxVols + - Set exit code for create_postgresql_database.in + - Fix bug #2197 -- build failure with --disable-libtool + - Fix bug #2204 -- superfluous END-OF-DATA in update_mysql_tables.in + - Convert a Migration job with errors into a Copy job + - Remove exporting add_mtab_item -- fixes bug #2198 + - Fix possible problem of show multiple resources + - Comment out tools/smtp-orig.c as it is for reference only + +Bugs fixed/closed since last release: +1066 146 1653 1849 2197 2198 2204 + +======================= +Release version 7.4.0 + +For the most part the changes were contributed to the Bacula +project by Bacula Systems SA. + +This is a new release with a new version number. It has been +very thoroughly tested, but as always, the new features may not +always work as expected. + +The Catalog database format has not changed since version the +prior release (7.2.0). + +Compatibility: +-------------- +As always, both the Community Director and Storage daemon(s) must be upgraded +at the same time. Any File daemon running on the same machine as a Director +or Storage daemon must be of the same version. + +Older File Daemons should be compatible with the 7.4.0 Director and Storage +daemons. There should be no need to upgrade older File Daemons. + +New features and changes: +Please see the New Features chapter of the manual for documentation on +the new features. The new features are currently only in the New Features +chapter and have not yet been integrated into the main chapters of the +manual. + +New Features and changes summary: +- Support for KFREEBSD OS +- Improved support for Clang +- Configure SSL connection to MySQL +- New chio-changer-freebase in examples/autochangers + New directives in bacula-dir.conf in Catalog{} resource + for the MySQL backend (not currently implemented for + Postgresql or SQLite). + dbsslkey + dbsslcert + dbsslca + dbsslcapath + dbsslcipher +- examples/autochangers/rc-chio-changer removed +- examples/devices/DVD.conf removed +- updated copyrights +- Add "Expires in" to list and llist volumes +- Implement a more efficient Volume selection algorithm between DIR and SD +- Implement new list/llist command keywords: + order=asc|ascending + order=desc|descending + limit=nn + jobstatus= + Client= + JobErrors +- Implement new bconsole @tall command that outputs input and + output to console and terminal. Note, this also outputs + bconsole input commands. +- Implement MaxVirtualFullInterval +- Implement VirtualFullPool override +- Pool overrides work better +- Automatic selection of catalog from client where possible. +- Implement VerifyData level type for Verify jobs. + + +More detailed changes: + +14Jan16 + - Implement MaxVirtualFullInterval + - Update AUTHORS + - Ensure relabel has latest vol info may fix bug #1412 + - Change license as per agreement with FSFE + - Apply Carsten's patch that fixes bug #2192 builds on kfreebsd + - baculum: Enable Portuguese language on web interface + - baculum: Implement Portuguese language support + - baculum: Assign Baculum copyright to Kern Sibbald + - baculum: Fix sorting in restore by group most recent backups + - baculum: Fix restore group most recent backups for MySQL + - Fix FD DisableCommands + - baculum: Fix to change user password + - Add ExpiresIn field in list and llist media output + - Fix #1548 about Solaris SIGBUS with accurate mode backup + - Backport more Enterprise code to sql_list.c + - Add info message of #jobs consolidated in Virtual Full + - baculum: Unify user validation + - Add HasBase+Comment to llist Jobs + - Fix seg fault in btape fixes bug #2180 + - Fix slight error in autoprune -- should fix bug #2151 + - baculum: Add first unit tests + - Fix #1545 about fix in manual_prune.pl script with large number of volumes + - Fix false status output. Fixes bug #2103 + - Integrate patch into latest version, which fixes bug #1882 + - Fix bug #2090 correct detection of GCC + - Fix CLANG warning messages -- fixes bug #2090 + - Add new chio-changer-freebase from bug #2115 + - Applied modified patch from bug#2117 to fix bpipe end of stream + - Apply patch from bug #2165 to fix the update sqlite3 script + - Fix update MD5 failure bug reported by Peter Keller + - baculum: Add dashboard panel + - Patch to add MySQL ssl access + - Manually apply patch in bug #2156 to allow building on KFreeBSD + - Fix bug #2153 with patch submitted by Ana Arruda + - baculum: Switch to started job status just after job start + - baculum: Add possibility to open configuration windows from URL + - Fix restore when storage specified on command line + - Fix restore of Windows streams to non-Windows machines + - Implement level=Data to the Verify job + - Fix #1524 about bextract trace file location + - Fix truncate bug free_volume problem + - baculum: Remember sort order for data grids + - baculum: Improve size formatter precision + - baculum: Fix jobs count in job list + - baculum: Add jobbytes and jobfiles columns in job list + - baculum: Get system timezone for PHP if possible + - baculum: Fix restore when a lot of jobids given + - baculum: Set default job attributes (level, client, fileset, pool, storage, + priority) in Run job panel + - Fix truncate race bug #1382 + - baculum: Fix update pool action when no volumes in pool + - baculum: Split configuration windows into two tabs: actions and console + - baculum: Change default elements limit to 500 elements + - baculum: Add drive parameter to bconsole release command execution + - Fix #1470 Fix setdebug command when all components are selected + - baculum: Fix expectation failed error during restore + - Add new JOB_DBR field + - #ifdef out bpluginfo since it does not compile + - Fix #1449 about a FileDaemon segfault with the fstype option + - Remove vestiges of rechdr_queue hopefully fixes bug #2180 + - Apply bconsole manpage patch from bug #2182 + - Apply ppc64el configure detection patch from bug #2183 + - Fix #1414 When the FD is down, status dir now prints "is waiting for Client + xx-fd" + - Implement new options in list command + - Add @tall command to log both input/output in a log file + - Fix #1360 about bextract -t not documented in the man page + - Update spec file for latest OSX versions + - Fix compilation on MacOS + - Improve Jmsg in response(), display SIGNAL number when appropriate + - Avoid segfault in dump_block() when the block_len is invalid + - Fix #1368 about xattr error not displayed correctly at restore time + - Fix bug 2173 QT tray monitor can not be built due to missing files in configure + - Move plugin_free() in free_jcr() + - Fix bug #2083 -- Fix sockaddr_to_ascii for FreeBSD + - Fix fadvise bug found by Robert Heinzmann + - Fix compilation without zlib and lzo + - Fix compilation error with new fstype_cmp() function + - Fix compilation problem with AFS + - Fix compilation on Solaris/FreeBSD + - Fix segfault in open_bpipe() when the program is empty + - Modify find_next_volume_for_append() to not send the same volume twice + - Avoid string displayed in restore menu + - Do not update state file after a bacula-xxx -t + - Fix #804 about misleading message with the purge command + - Fix automount feature after a label command + - Reinsert tabs in systemd Makefile.in + - baculum: Provide LICENSE-FOSS file content in Baculum deb packages (copyright + file) + - Use Client Catalog resource in get_catalog_resource() if "client" is specified + in command line + - Fix #1131 about Job::Next Pool resource precedence over the Pool::Next pool + directive + - Fix #898 truncate volumes larger than 200 bytes + +Bugs fixed/closed since last release: +1131 1360 1362 1368 1382 1412 1414 1449 1470 1524 1545 1548 1882 2083 2090 +2103 2115 2117 2151 2153 2156 2165 2180 2182 2183 2192 804 898 + + + +================================================================ +Release version 7.2.0 + +Bacula code: Total files = 733 Total lines = 303,426 +The diff between Bacula 7.0.6 and Bacula 7.2.0 is 254,442 +which represents very large change, for the most part +contributed to the Bacula project by Bacula Systems SA. + +This is a major new release with many new features and a +number of changes. Please take care to test this code carefully +before putting it into production. Although the new features +have been tested, they have not run in a production environment. + +============== !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! =================== +New Catalog format in version 7.2.0 and greater +----------------------------------------------- +This release of Bacula uses a new catalog format. We provide a script +(update_bacula_tables in bacula/src/cats and in bacula/updatedb) that +will update from Bacula 3.x, 5.2, or 7.0 to version 7.2.0 format. +The database upgrade is fast and simply. As always we strongly +recommand that you make a dump of your database prior to doing the +upgrade. + +NOTE: The upgrade will work only for PostgreSQL and MySQL. Upgrading is +not (yet) supported for SQLite3. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + +For packagers, if you change options, naming, and the way +we link our shared object files, as at least one of you does, +you are creating a situation where the user may not be able +to run multiple versions of Bacula on the same machine, which +is often very useful, and in addition, you create a configuration +that the project cannot properly support. + +Please note that the documentation has significantly changed. +You will need additional packages to build it such as inkscape. +Please see the README and README.pct files in the docs directory. +The packages come with pre-build English pdf and html files, +which are located in the docs/docs/manuals/en/pdf-and-html directory. + +Packagers: please note that the Bacula LICENSE has changed, it is still +AGPLv3 and still open source. A new requirement has been added which +requires other projects using the source to keep the acreditations. + +Packagers: please note that the docs license has changed. It is now +licensed: Creative Commons Attribution-ShareAlike 4.0 International +This is a common open source license. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +Compatibility: +-------------- +As always, both the Community Director and Storage daemon(s) must be upgraded +at the same time. Any File daemon running on the same machine as a Director +or Storage daemon must be of the same version. + +Older File Daemons should be compatible with the 7.2.0 Director and Storage +daemons. There should be no need to upgrade older File Daemons. However, +this has not been fully tested yet. Since we expect some problems, please +test before putting it into production. + +New Features: +Please see the New Features chapter of the manual for documentation on +the new features. The new features are currently only in the New Features +chapter and have not yet been integrated into the main chapters of the +manual. Also, since there were so many new features, it is possible that +a few that previously existed in version 7.0.x are documented a second +time in the 7.2.0 new features section. + +More detailed changes: + +12Aug15 + - Put back missing close_msg(NULL) to flush daemon messages at job end + - Add LICENSE-FOSS and update LICENSE for baculum + - Backport from Bacula Enterprise +29Jul15 + - Fix max vol size test accidently deleted + - Remove gigaslam and grow on uninstall -- from bug report + - Revert to Branch-8.3 fd_snapshot.c + - Pull more recent changes from Branch-8.2 + - Fix bvfs_lsdir pattern parameter setting + - Remove CheckList nolonger used + - Revert "Use db_lock()/unlock() around JobMedia creation transaction" + - Fix #1099 about director crash with rescheduled jobs + - Fix #1209 about bat segfault when clicking on Media + - Qmsg(M_FATAL) set jcr->JobStatus to JS_FatalError immediately + - snapshot: Abort the job by default if a snapshot creation fails + - Revert to old SD-FD close session protocol + - Remove drive reservation if no Jobs running + - Remove filename patch + - snapshot: Try to detect LVM when the filesystem is ext3 or XFS + - Fix bad debug message in mac_sql.c + - Fix restore-multi-session test by incrementing found files only on next + file + - Add -T description in man pages + - Correct incorrect Fatal error message text in bsock + - mysql: Add support for multiple instances binary backup in the same fileset + - Fix compilation with new debug hook + - mysql: Avoid warning with abort_on_job plugin option + - Fix compilation after patch "prune volume yes" + - Do not print message about retention when using "prune volume yes" command + - Fix #536 about Copy/Migration/VF that should not use Client "Maximum Concurrent + Jobs" + - Fix potential segfault with unused ConfigFile objects + - Fix #1108 Enhance setdebug help command and console completion + - Add more JCR variables in lockdump procedure + - Fix error in update_postgresql_tables.in caused by bad search and replace + - Fix #1127 about the repositioning enhancement during restore + - Correct try_reposition() return code after a seek() + - Add position information in the block structure + - Fix a number of acl and xattr bugs + give more understandable variable + names + - Make btraceback.dbx and .gdb use new sql engine name + - Revert most of patch ef57e6c4 and replace with old cats code + - Revert useless parts of patch 08d8e2d29 + - Revert patch d7f71d2c94a and rewrite it using simpler public domain example + - Fix batch mode detection for SQLite3 + - Revert d9aa76fa and simplify + - Revert patch 30388e447fa3 + fix bug #1948 + - Use a more appropriate name for the acl context + - Use class pointer rather than jcr in src/lib/jcr.c + - Revert patch f294b276 + - Change B_DB to BDB to correspond to naming convention + - Add -T option in bacula-sd to use trace file + - Force use of newer TLS protocols + - Avoid problem with db_get_job_record() when SchedTime or RealEndTime is + NULL + - Update our regexec() to support NULL argument + - Add function to copy a file in bsys.c + - Fix bug 2141 fork before TLS initialization + - Update LICENSE-FOSS + - Change license on src/lib/crc32.c as agreed with the author, Joakim Tjernlund + - Update po + - More license updates + - Fix compilation + - Add read_control command between Plugin/FD and Storage Daemon + - Add .bvfs_get_jobs and .bvfs_get_bootstrap functions + - Fix compilation for Solaris9 + - Fix Makefile.in tabs + - Update Windows .def files + - More copyright notices + - Fix Windows plugin licenses + - Change license copyright for updatedb and qt-console/tray-monitor + - Change copyright for logwatch + - Update more copyrights + - Update copyrights in pebuilder + - Update plugin licenses + - Add copyrights + license to platforms + - Update copyrights in po + - More license clarifications + - One more copyright in src/cats + - Update src/cats .in file copyrights + - Compute Job "Compression Ratio" using SDJobBytes instead of JobBytes + - Get correct attributions for bsmtp.c + - Switch from LGPLv3 for scripts to BSD 2-Clause + - Fix segfault on dot commands used in RunScript::Console directive + - Fix patch c0f0e6c01c7 to optimize retries only for autochangers + - Fix #876 about SD reads too far with complex bootstrap + - Correct unmount test in dev.c + - Add debug JobId in next-vol-test script + - Fix patch c59e5da29 to not orphan buffers + - Fix bad implementation of enable/disable job,client,schedules + implement + enable/disable storage devices + - Implement enable/disable schedule and client + - Optimize Volume protocol when Volume not InChanger + - Do not trash existing record during label of new volume + - During accurate restore unstrip as soon as possible + - Better handline of no storage device found + - Fix #1075 The replace=never flag was not properly handled when combined with + database= option in mysql/postgresql plugin + - display timestamp in X_msg() in one single pass to avoid double flush() + - Update copyrights in scripts directory + - Fix bug #1083 RT14512 + - configure.in: new HAVE_FCNTL_LOCK detect fcntl() locking capability + - Fix #1008 about status storage that displays "Writing" and "Reading" information + for the same DCR + - Add new %E job code to use non fatal job errors in scripts + - Revert to old htable, but add 64 bit hash + - Fix possible race condition in smartalloc + - Refactor + optimize fstype.c + revert mntent_cache.c/h + - snap: Fix small initialization problem with LVM backend + - Fix compilation warning in bextract + - lock the pid file using fcntl(F_SETLK) + - bat: Fix segfault in client view when the Uname field is empty + - bat: Fix #1047 about segfaults in Client, Media and Pool view + - Revert patch 62ab7eb5 for filed/backup.c + - Revert patch 62ab7eb5 for filed/verify.c + - Refactor mount/unmount to use class calls + - Add return status to DEVICE:close and report error at end of Job + - Fix seg fault + - fix a Dmsg in match_bsr.c:match_volume() + - Fix #861 about bad help command on status schedule + - Add new cats header file + - Refactor DB engine to be class based + - Remove regression cancel_test from do_all + - Fix invalid .mod command in BAT during restore (bugfix #858) + - Use B_ISXDIGIT() in rangescanner + - Handle hex numbers in str_to_uint64() + - Fix prune-migration-test -- wait in wrong place + - fix MA 987 cannot copy/migrate jobs with a Level=VF in the job resource + - Fix basejob error caused by patch on bug #965 + - Allow to list restore jobs in llist jobid= command + - Fix #940 about segfault in bat when doing an "update slots" + - Fix #983 about segfault on win32 filedaemon when using bat to monitor the + status + - Fix #969 about a segfault while doing a cancel of a copy job + - Fill errmsg after an error with FETCH query in db_big_sql_query() + - Fix #965 about an empty error message after a problem when sending accurate + file list + - Fix #972 about segfault in show command used with multiple resources + - Work bsnapshot for SLES12 and fix issue with ZFS + - Fix small memory leak in cancel command with ujobid and job parameters + - Ensure that client resource is not freed during setbandwidth command + - fix errors in the use of a Mmsg() + - Use a specific mutex for auth instead of jcr->mutex + - update po + - Add missing call to free_jcr() in previous patch + - Lock the jcr when using sd_calls_client_bsock variable + - Ensure that only one thread can use the auth code in the Storage + - Fix #951 about SDCallsClient not used during restore jobs + - snapshot: Get the creation date from the zfs list snapshot command + - snapshot: Fix small issue with Name parameter in list snapshot + - Fix bsnapshot to return status=0 on error + - fix a mempool error at SD shutdown + - snapshot: Call support() only if the device is in the fileset + - snapshot: Avoid double / in path and files when volume is / + - Fix segfault with Console runscript introduced by "Stop ua commands if comm + line drops" + - handle ctrl-C and SIGTERM the same way in SD + - Startup scripts return proper exitcode for service restart action + - Implement tables configuration + - Add ReadBytes to FD status output + - Accept 0/1 for @BOOL@ type in ConfigFile module + - Set cmd_plugin only in pluginCreateFile if not SKIP/ERROR/CORE + - Fix #13680 about systemd message "unknown lvalue" + - Stop ua commands if comm line drops + - Fix weird compilation problem on rhel5 + - Display TLS information in status client/storage + - Fix rpms where unix user were not properly defined + - update extrajs package in debs/rpm package + - Fix segfault with new filesetcmd + - snapshot: Reset JobId in Snapshot table when deleting a job + - snapshot: Add ability to list snapshots from the FD + - snapshot: Add a confirmation message when pruning snapshots + - Add RunScript AfterSnapshot event + - Fix #431 About upon upgrade, RPMs resets group membership + - snapshot: Display bsnapshot error message if possible + - Fix jobmedia-bug3 + - Set error code in return from run regress script + - snapshot: More work on LVM backend and on list/sync commands + - snapshot: Add EnableSnapshot directive in fileset + - snapshot: Add errmsg and status to SNAPSHOT_DBR + - snapshot: Send SnapshotRetention parameter to the Client and work on the + prune command + - Add bacula-snapshot.spec + - Add disabled=yes/no in bsnapshot.conf + - Fix #875 about bvfs repeats the same output many times + - Revert "Storing the result in a local variable from sql_num_fields saves us a + lot of callbacks." + - Remove passing args to cats driver class initialization + - Simplify cats by eliminating the B_DB_PRIV class + - Convert more db funcs to class calls + - Add Snapshot Engine to bacula core + - Change more db calls into class calls + - Add files missed in last commit + - Convert db_lock/unlock to be called via class + - Fix small memory leak + - Remove more vestages of Ingres + - Fix #843 about "show storage" option missing in the help command output + - Use bzip2 for sles dependency + - Avoid warning with uninitialized variables + - update "help status" + - Revert "Small fix to Eric great patch for readline commandcompletion so it + also compiles on non gcc compilers." + - Separate out definitions into new header + - Remove bad restore.h + - Revert "Move restore struct defintions to seperate include file. Small change + to acl.h and xattr.h to use define inline with other header files." + - Revert "Fix MediaView::getSelection" + - Bat: ensure sufficient rows to display drives in storage display + - new MmsgDx() macro that combine Mmsg(errmsg, fmt, ...) and Dmsg in once + - add a ASEERTD() for DEVELOPPER + - Fix wrong KiB value + - Revert "Fix bug #1934 Wrong values at media list in BAT" + - Change bplugin_list to b_plugin_list which is more appropriate + - Remove Ingres related unused files + - Simplify rwlock coding + - Make subroutine names clearer + - Back out useless patches + - Put back old code prior to excessive edits + - Remove over complicated acl/xattr code + - Add license to files without any + - Fix #805 about nextpool command line argument not properly used + - Remove recursion from free_bsr() and free_bsr_item() to handle very large + BSR + - Avoid segfault in connect_to_file_daemon() when jcr->client is NULL + - #776 Volume created in the catalog but not on disk and #464 SD can't read an + existing volume + - Add schedule to show command tab completion + - Make global my_name hold 127 chars + - Mark file volumes that are not accessible in Error in mount_next_vol + - Fix #743 about bat permission conflict on /opt/bacula/etc + - Add copyright to Makefiles + - change in lockmgr.c to avoid the report of a memory leak in testls + - lib: integrate SHA2 into bacula + - Fix #747 about restore problem failing on "Unexpected cryptographic session + data stream + - Revert previous copyright accidentally changed + - Fix btape fill command by removing some debug code in empty_block() + - Add Accurate Fileset option "M" to compare ctime/mtime with the save_time + like with normal Incremental + - Add index on Job(JobTDate) to mysql catalog + - Fix bad check on bopen_rsrc return status. bug #2106 + - Do not stop the storage daemon startup if the File device is not yet accessible + - Fix double free in btape + - Fix failed mount request in btape fill test + - Avoid ASSERT() when using btape with vtape driver + - Possible fix for NULL client bug #2105 + - Fix compilation of Nagios check_bacula + - Add test for restict c99 in autoconf + - Allow to use device= option in release/mount/unmount command + - Fix #699 about duplicated job name when starting two jobs at the same time + - Fix #701 about status schedule missing from tab completion and correct job + filter + - remove autoconf/configre + - Fix #346 Add ipv6 support for Solaris + - Fix #692 about compatibility issue with community FD + - Fix new match_bsr patch + - Fix #588 Improve SD bsr read performance + - Fix ownership bug in html-manuals package + - Add EFS in the client status flag list + - Implement Win EFS Support + - Fix QT windows build for 32bit + - Add SLES113 to spec files + - Add @encode and sp_decode functions for plugins + - Fix tls-duplicate-job seg fault + harden pthread_kill() code + - Update plugin version to ensure 8.0 will not load 6.6 plugins + - Add JobBytes and ReadBytes to llist jobid= output + - Rewrite store_drivetype and store_fstype to allow a comma separated list of + items + - Fix #633 about JobDefs MaximumBandwidth Job inheritance + - Fix possible editing truncation due to 32 bit calculations + - Remove non-portable -ne in echo + - update po + - Add Makefile for mssql-fd plugin + - Improve error message of open_bpipe() on win32 + - Add jobid= parameter in .status dir running command + - Add worker states + - Pull latest worker files from development branch + - Add comment about incorrect scripting + - Put Dsm_check() on reasonable debug level + - Remove auto-generated tray-monitor.pro.mingwxx file + - Display message about MaximumBlockSize default value only if a value was + specified + - fix solaris : replace be64toh() by unserial_uint64() + - update SD <-> SD capabilities exchange + - Handle RestoreObjects with Copy/Migration jobs + - Add free list to worker class + - Fix bad caps with SDcallsClient + debug + fix seg fault on connection error + - Implement blowup=nn for FD and hangup+blowup for SD + - Correct bat copyright + - Change sizeof expressions to be more standard + - Remove regress trap that causes sd-sd-test to fail + - Dmsg was not handling tag anymore + - Fix for SD seg fault while swapping volumes + - Make bextract able to handle dedup streams + - Remove unused file + - Make sure mount_next_read_volume() will cancel the current job + - Forbid llist command in runscript + - Fix #295 about query file message + - Add no_mount_request to DCR + - Update Windows .def file + - Add spec file for redhat/suse html manual package + - Fix bug #2091 bad vtape device definitions + - Fix bug #2089 compiler warning + - Make sure level is tag free when printing debug message + - fix tags in Dmsg + - Regenerated configure script + - Remove spaces at the end of lines in Bat file + - Revert bat.pro.in file + - Fix recursive echo bug #2088 + - Add new fifo class flist.h/c + - Allow to create temp DEVICE from DEVRES + - For bat always use g++ + - Make selection by Volume Name or MediaId a bit clearer + - Optimize Dmsg() with tags by keeping current tags into a separate variable + - Make message more understandable + + +========================================================================= + +Bugs fixed in this version: +1099 1209 536 1108 1127 876 1075 1083 1008 1047 861 858 965 940 +983 969 965 972 951 13680 431 875 843 1934 805 776 743 + + + + + + +================= Old 7.0.x Release ==================================== + +Release version 7.0.5 +This is an important bug fix release to version 7.0.4. Since it fixes several +major problems. We recommend that everyone upgrade to this version. + +28Jul14 + - Fix #547 by adding .schedule command + - Update AUTHORS + - Fix bug #2079 with patch from Robert Oschwald + - Fix orphaned file descriptors during errors + - Yet another client==NULL + - Improve FD and SD cancel + - Jim Raney's TLS patch + - Fix bug #1679 pool overrides not shown in manual run display + - Attempt to avoid client==NULL + - Fix for bug #2082 (hopefully) + - Fix seg fault in jobq.c + - make stop after first error + - Increase status schedule days from 500 to 3000 + - Remove bad cherry-pick + - Fix compiler warning + - Allow options create_postgresql_database from patch in bug #2075 by roos + - Fix bug #2074 crashes when no conf file present + - Set pthread id in jcr at beginning so the job can be canceled. + - Fix possible heartbeat interval timing problems + - Fix some errors reported by valgrind. May fix the problem with bsmtp command. + - Ensure b_sterror() is using the correct size of input buffer + - Fix possible seg fault + - Fix segfault when trying to stop the bnet_server thread in terminate_stored() + - Fix bad link bug #2076 + - Fix compilation of bsock.c when TLS is not available + - Correct L suffix to be LL + - Fix bad copy/migrate data header + - On termination shutdown thread server + - baculum: Updated README file + - baculum: Update English language texts + - baculum: Saving auth file for web server HTTP Basic auth + - baculum: Added directory for web server logs + - baculum: Added example Lighttpd configuration for Baculum and sample web + server auth file + - Expanded auth error message + - baculum: Support for web servers which do not provide direct info about HTTP + Basic auth + - Fix limit bandwidth calculation + - Eliminate strcpy() from bsmtp + - Fix for configuring sudo option for bconsole access + - Display correct NextPool overrides + use Job NextPool in restore if available + - Fix Bacula to work with newer Windows pthreads library + - Fix bug #180 ERR=success in btape when tape error + +Bugs fixed/closed since last release: +1679 180 2074 2075 2076 2079 2082 547 + + +Release version 7.0.4 +This is a bug fix release to version 7.0.3. We recommend that +everyone upgrade to this version. + +The main fixes are to make copy/migration to a second SD work, and +to cleanup some of the inconsistencies in the cancel command which +could confuse the user. + +02Jun14 + - Better error handling for cancel command + - Fix compiler warning + simplify some #ifdefs + - Fix copy/migration to second SD + - Fix calls to sl.set_string() + - Improve sellist code + +============================================================= +Release version 7.0.3 +This is a bug fix release to version 7.0.2. We recommend that +everyone using version 7.0.2 upgrade to this version. + +12May14 + - Fix error handling in do_alist_prompt + - Tighten error condition handling in sellist + - Add new cancel test + - Update LICENSE and LICENSE-FAQ + - Also update autoconf/aclocal.m4 + - Reschedule on error caused EndTime to be incorrect -- fixes bug #2029 + - Flush console queued job messages -- should fix bug #2054 + - Attempt to fix FreeBSD echo/printf, bug #2048 + - Update to newer libtool + config.guess + - Recompile configure + - Apply fix supplied for acl.c in bug #2050 + - Fix a SD seg fault that occurs with over committed drives + - Clear bvfs cache and set debug options available only for admin + - Moved auth params to curl opts + - Filtred single results for restricted consoles + - Removed unnecessary debug + - Changed e-mail address in gettext file + - Support for customized and restricted consoles + - Misc changes for rpm building (made by Louis) + - Updated requirements for Baculum + - Apply fix for bug 2049: wrong drive selected + - Fix #2047 about bthread_cond_wait_p not declared + - Fix Bacula bug #2044 -- fix Makefile for bplugininfo linking + - Fix Bacula bug #2046 -- sellist limited to 10000 + - Fix Bacula bug #2045 -- multiply defined daemon_event + - Fix Bacula bug #2020 overflow in btape -- Andreas Koch + +Bugs fixed/closed since last release: +2020 2029 2044 2045 2046 2047 2048 2050 2054 + + +=================================================================== +Release version 7.0.2 +This is a minor update since version 7.0.1 that is mostly cleanup. +However, there is one annoying bug concerning shell expansion of +config directory names that is fixed, and there is at least one +syntax error in building the full docs that appears on some systems +that is also fixed. + +02Apr14 + - Remove more vestiges of libbacpy + - Put back @PYTHON@ path in configure + - Fix improper string in parser + - Remove libbacpy from rpm spec files + - Fix linking check_bacula + - Fix new SD login in check_bacula + - Tweak docs build process + + +Release version 7.0.1 +This is a minor update since version 7.0.0 that is mostly cleanup. + +31Mar14 + - Remove old plugin-test + - Update po files + - Enable installation of the bpluginfo utility + - More tray-monitor updates + - Add Simone Caronii to AUTHORS + - Align command line switches in manpages. + - Apply upgrade to config.guess + - Remove bgnome-console and bwx-console leftovers. + - Update tray-monitor header also for new bsock calls + - Attempt to fix nagios to use new bsock calls + - Update tray-monitor to new bsock calls + +======================================== +Release 7.0.0 + + Bacula code: Total files = 713 Total lines = 305,722 + The diff between Bacula 5.2.13 and Bacula 7.0.0 is 622,577 lines, + which represents very large change. + +This is a major new release with many new features and a +number of changes. Please take care to test this code carefully +before putting it into production. Although the new features +have been tested, they have not run in a production environment. + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +For packagers, if you change options, naming, and the way +we link our shared object files, as at least one of you does, +you are creating a situation where the user may not be able +to run multiple versions of Bacula on the same machine, which +is often very useful, and in addition, you create a configuration +that the project cannot properly support. + +Please note that the documentation has significantly changed. +You will need additional packages to build it such as inkscape. +Please see the README and README.pct files in the docs directory. +The packages come with pre-build English pdf and html files, +which are located in the docs/docs/manuals/en/pdf-and-html directory. + +Packagers: please note that the Bacula LICENSE has changed, it is still +AGPLv3 and still open source. A new requirement has been added which +requires other projects using the source to keep the acreditations. + +Packagers: please note that the docs license has changed. It is now +licensed: Creative Commons Attribution-ShareAlike 4.0 International +This is a common open source license. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +Compatibility: +-------------- +As always, both the Director and Storage daemon(s) must be upgraded at +the same time. Any File daemon running on the same machine as a Director +or Storage daemon must be of the same version. + +Older File Daemons should be compatible with the 7.0.0 Director and Storage +daemons. There should be no need to upgrade older File Daemons. + +The following are new directives, commands and features: +- New Baculum web GUI interface. See the gui/baculum directory. +- Directive fdstorageaddress in Client +- Directive SD Calls Client in Client +- Directive Maximum Bandwidth per Job in Client +- Directive FD Storage Address in Storage +- Directive Maximum Spawned Jobs in Job +- setbandwidth command in bconsole +- Progress meter with FD in status dir +- LastDay of month in schedule +- sixth 6th week in month in schedule +- Improvements in bconsole SQL calls +- Allow list and ranges in cancel as well as the keyword: all +- truncate command in bconsole +- prune expired volumes? +- New hardlink performance enhancements +- restart command +- restore optimizespeed=yes|no for hardlinks default yes +- PkiCipher and PkiDigest in FD Client item + Cipher aes128, aes192, aes256, blowfish + Digest md5, sha1, sha256 +- Maximum Bandwidth Per Job in FD Client resource +- Maximum Bandwidth Per Job in FD Director Resource +- .bvfs_decode_lstat +- DisableCommand in FD Director resource +- DisableCommand in FD Client resource +- status scheduled bconsole command with the following options: + days=nn (0-500 default 10); limit=nn (0-2000 default 100) + time=YYYY-MM-DD HH:MM:SS + schedule=xxx job=xxx +- NextPool in Run override +- Directive NextPool in Job + +Please see the New Features chapter of the manual for more +details. + +The following features or directives have been removed: +- Win32 +- tray-monitor +- wx_console +- Removed cats DBI driver +- Python + +Detailed changes: +================= +24Mar14 + - Add Josip Almasi to AUTHORS + - [PATCH] Support for restricted consoles in BAT config + - [PATCH] Fix for free director directive + - [PATCH] Fix auto-select restricted console for director in bconsole + - Realign output display + - Update ua_output.c from Branch-6.7 + - Add some missing Branch-6.7 updates + - Added needed empty directories to Baculum + - Fix for support PostgreSQL, MySQL and SQLite + - Framework adjusting to Baculum database connections + - Framework fix for lower case tables names in MySQL + - Fix for Baculum SQLite support + - Initial commit Baculum + - Add Marcin to AUTHORS file + - Strip trailing blanks + - Update copyright year + - Update LICENSE and header files + - Remove old file + - Add new header in misc files + - Remove tray-monitor bwx-console manual installation + - Remove FD python and examples + - Fixup spec files + - Remove pythonlib from lib + - Update package-list + - Fix SDCallsClient daemon synchronization + - Add debug code + make 127.0.0.1 same as localhost for tls tests + - Fix multiple DIRs in console + - Make failure for bat to connect to DIR non-fatal + - Fix bat style to one that works + - Take disk-changer from Branch-6.7 + - Simplify Version output + - Fix FDVersion for SD Calls Client test + - Update accurate test + - Update differential test + - Add new regress timing scripts + - Improve plugin make clean + - Implement regress FORCE_SDCALLS + - Remove win32 tray-monitor and wx-console directories + - Remove regress-config need only regress-config.in + - Add configure archivedir + - Improve SQL failure reporting + - First cut backport BEE to community + - Add copyright to mtx-changer.in diff --git a/SUPPORT b/SUPPORT new file mode 100644 index 00000000..3fcf8ece --- /dev/null +++ b/SUPPORT @@ -0,0 +1,33 @@ + + Bacula Support request form. Please fill it out and email it to + bacula-users@lists.sourceforge.net + +Bacula version: + +OS type and version: + +How was Bacula built/installed? Source, rpms, ... be specific. + +Concise description of the problem: + + +Bacula output showing the problem: + + +Steps to reproduce the problem: + + + +Other information (optional): +A copy of your config.out file (if built from source) + +Tape drive/autochanger (for tape problems): + +Have you run btape "test" command? + +Database problems: what database are you using? +What is the database version? + +If the output involves configuration files, consider attaching +bacula-dir.conf, bacula-fd.conf, and bacula-sd.conf if necessary, +appropriate. diff --git a/VERIFYING b/VERIFYING new file mode 100644 index 00000000..2c7babd9 --- /dev/null +++ b/VERIFYING @@ -0,0 +1,38 @@ + +All Bacula packages released on Source Forge after 8 June 2003 +will be signed with the Bacula Distribution Verification Key. By +obtaining a copy of the Bacula Distribution Verification Public +key from either the home site (www.bacula.org) or from the Source +Forge project page, (www.sourceforge.net/projects/bacula). you +can verify that the code you have is complete, unaltered, and +packaged by myself (Kern Sibbald) or D. Scott Barninger. + +Putting the Bacula Key in your Keyring: + +Once you download the Bacula public key, you must insert it in +your keyring. The procedure will differ depending on whether you +are using PGP or GPG. For GPG, assuming you have put the key +in bacula.key, the procedure is: + + gpg --import bacula.key + +Verifying an RPM: + +The procedure for verification differs slightly if you are using +rpms or tar.gz files. For rpms, the signature becomes part of the +rpm package, and once the Bacula public key is in your keyring +you check the package with: + + rpm --checksig bacula-1.xx.rpm + +Verifying tar files: + +Tar files are distributed as they always have been in the past, +unchanged. However, for each xxxx.tar.gz file that is released there +will be a second file released with the same name but with .sig +appended, for example xxxx.tar.gz.sig. To verify the Bacula distribution, +you download both the files and put them in the same directory, then +for GPG, you use the following command to do the verification: + + gpg --verify xxxx.tar.gz.sig + diff --git a/autoconf/Make.common.in b/autoconf/Make.common.in new file mode 100644 index 00000000..2e965597 --- /dev/null +++ b/autoconf/Make.common.in @@ -0,0 +1,102 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is pulled in by all the Unix Bacula Makefiles +# so it has all the "common" definitions +# + +DATE="@DATE@" +LSMDATE=@LSMDATE@ +VERSION=@VERSION@ +VERNAME=bacula-$(VERSION)# +MAINT=Kern Sibbald# +MAINTEMAIL=# +WEBMAINT=# +WEBMAINTEMAIL=# +WEBPAGE=# +FTPSITENAME=# +FTPSITEDIR=# +#------------------------------------------------------------------------- + +SHELL = /bin/sh + +# Installation target directories & other installation stuff +prefix = @prefix@ +exec_prefix = @exec_prefix@ +binprefix = +manprefix = +datarootdir = @datarootdir@ +docdir = @docdir@ +sbindir = @sbindir@ +libdir = @libdir@ +includedir = @includedir@ +sysconfdir = @sysconfdir@ +plugindir = @plugindir@ +scriptdir = @scriptdir@ +logdir = @logdir@ +archivedir = @archivedir@ +mandir = @mandir@ +manext = 8 + +NO_ECHO = @ + +# Tools & program stuff +CC = @CC@ +CPP = @CPP@ +CXX = @CXX@ +MV = @MV@ +RM = @REMOVE@ +RMF = $(RM) -f +CP = @CP@ +SED = @SED@ +AWK = @AWK@ +ECHO = /bin/echo +CMP = @CMP@ +TBL = @TBL@ +AR = @AR@ +GMAKE = @GMAKE@ +RANLIB = @RANLIB@ +MKDIR = @BUILD_DIR@/autoconf/mkinstalldirs +INSTALL = @INSTALL@ +# add the -s to the following in PRODUCTION mode +INSTALL_PROGRAM = $(INSTALL) -m @SBINPERM@ +INSTALL_PROGRAM_ALL = $(INSTALL) -m 755 +INSTALL_LIB = $(INSTALL) -m 755 +INSTALL_DATA = $(INSTALL) -m 644 +INSTALL_SCRIPT = $(INSTALL) -m @SBINPERM@ +INSTALL_CONFIG = $(INSTALL) -m 660 + +# +# Libtool specific settings +# +DEFAULT_OBJECT_TYPE = @DEFAULT_OBJECT_TYPE@ +DEFAULT_ARCHIVE_TYPE = @DEFAULT_ARCHIVE_TYPE@ +DEFAULT_SHARED_OBJECT_TYPE = @DEFAULT_SHARED_OBJECT_TYPE@ +LIBTOOL = @BUILD_DIR@/libtool +LIBTOOL_COMPILE = @LIBTOOL@ --silent --tag=CXX --mode=compile +LIBTOOL_LINK = @LIBTOOL@ --silent --tag=CXX --mode=link +LIBTOOL_INSTALL = @LIBTOOL@ --silent --tag=CXX --mode=install +LIBTOOL_INSTALL_FINISH = @LIBTOOL@ --silent --tag=CXX --finish --mode=install +LIBTOOL_UNINSTALL = @LIBTOOL@ --silent --tag=CXX --mode=uninstall +LIBTOOL_CLEAN = @LIBTOOL@ --silent --tag=CXX --mode=clean + +# Flags & libs +CFLAGS = @CFLAGS@ @OPENSSL_INC@ + +CPPFLAGS = @CPPFLAGS@ @OPENSSL_INC@ +LDFLAGS = @LDFLAGS@ +TTOOL_LDFLAGS = @TTOOL_LDFLAGS@ +LIBS = @LIBS@ +WRAPLIBS = @WRAPLIBS@ +DINCLUDE = @DINCLUDE@ +DLIB = @DLIB@ +OPENSSL_LIBS = @OPENSSL_LIBS@ +DLLIBS = @LIBADD_DLOPEN@ + +# Windows (cygwin) flags +WCFLAGS = @WCFLAGS@ +WLDFLAGS = @WLDFLAGS@ + +# End of common section of the Makefile +#------------------------------------------------------------------------- diff --git a/autoconf/acconfig.h b/autoconf/acconfig.h new file mode 100644 index 00000000..310c77f7 --- /dev/null +++ b/autoconf/acconfig.h @@ -0,0 +1,206 @@ +/* ------------------------------------------------------------------------- */ +/* -- CONFIGURE SPECIFIED FEATURES -- */ +/* ------------------------------------------------------------------------- */ + +/* + * Copyright (C) 2000-2015 Kern Sibbald + * License: BSD 2-Clause; see file LICENSE-FOSS + */ + +/* Define if you want to use MySQL as Catalog database */ +#undef USE_MYSQL_DB + +/* Define if you want SmartAlloc debug code enabled */ +#undef SMARTALLOC + +/* Define to `int' if doesn't define. */ +#undef daddr_t + +/* Define to `int' if doesn't define. */ +#undef major_t + +/* Define to `int' if doesn't define. */ +#undef minor_t + +/* Define to `int' if doesn't define. */ +#undef ssize_t + +/* Define if you want to use PostgreSQL */ +#undef HAVE_POSTGRESQL + +/* Define if you want to use MySQL */ +#undef HAVE_MYSQL + +/* Define if you want to use embedded MySQL */ +#undef HAVE_EMBEDDED_MYSQL + +/* Define if you want to use SQLite3 */ +#undef HAVE_SQLITE3 + +/* ------------------------------------------------------------------------- */ +/* -- CONFIGURE DETECTED FEATURES -- */ +/* ------------------------------------------------------------------------- */ +@TOP@ + +/* Define if you need function prototypes */ +#undef PROTOTYPES + +/* Define if you have XPointer typedef */ +#undef HAVE_XPOINTER + +/* Define if you have _GNU_SOURCE getpt() */ +#undef HAVE_GETPT + +/* Define if you have GCC */ +#undef HAVE_GCC + +/* Define If you want find -nouser and -nogroup to make tables of + used UIDs and GIDs at startup instead of using getpwuid or + getgrgid when needed. Speeds up -nouser and -nogroup unless you + are running NIS or Hesiod, which make password and group calls + very expensive. */ +#undef CACHE_IDS + +/* Define to use SVR4 statvfs to get filesystem type. */ +#undef FSTYPE_STATVFS + +/* Define to use SVR3.2 statfs to get filesystem type. */ +#undef FSTYPE_USG_STATFS + +/* Define to use AIX3 statfs to get filesystem type. */ +#undef FSTYPE_AIX_STATFS + +/* Define to use 4.3BSD getmntent to get filesystem type. */ +#undef FSTYPE_MNTENT + +/* Define to use 4.4BSD and OSF1 statfs to get filesystem type. */ +#undef FSTYPE_STATFS + +/* Define to use Ultrix getmnt to get filesystem type. */ +#undef FSTYPE_GETMNT + +/* Define to `unsigned long' if doesn't define. */ +#undef dev_t + +/* Define to `unsigned long' if doesn't define. */ +#undef ino_t + +/* Define to 1 if utime.h exists and declares struct utimbuf. */ +#undef HAVE_UTIME_H + +/* Data types */ +#undef HAVE_U_INT +#undef HAVE_INTXX_T +#undef HAVE_U_INTXX_T +#undef HAVE_UINTXX_T +#undef HAVE_INT64_T +#undef HAVE_U_INT64_T +#undef HAVE_INTMAX_T +#undef HAVE_U_INTMAX_T + +/* Define if you want TCP Wrappers support */ +#undef HAVE_LIBWRAP + +/* Define if you have sys/bitypes.h */ +#undef HAVE_SYS_BITYPES_H + +/* Directory for PID files */ +#undef _PATH_BACULA_PIDDIR + +/* LOCALEDIR */ +#undef LOCALEDIR + +/* Define if you have zlib */ +#undef HAVE_LIBZ + +/* Define if you have lzo lib */ +#undef HAVE_LZO + +/* Define if you have libacl */ +#undef HAVE_ACL + +/* General libs */ +#undef LIBS + +/* File daemon specif libraries */ +#undef FDLIBS + +/* Path to Sendmail program */ +#undef SENDMAIL_PATH + +/* What kind of signals we have */ +#undef HAVE_POSIX_SIGNALS +#undef HAVE_BSD_SIGNALS +#undef HAVE_USG_SIGHOLD + +/* Operating systems */ +/* OSes */ +#undef HAVE_LINUX_OS +#undef HAVE_FREEBSD_OS +#undef HAVE_KFREEBSD_OS +#undef HAVE_NETBSD_OS +#undef HAVE_OPENBSD_OS +#undef HAVE_BSDI_OS +#undef HAVE_HPUX_OS +#undef HAVE_SUN_OS +#undef HAVE_IRIX_OS +#undef HAVE_AIX_OS +#undef HAVE_SGI_OS +#undef HAVE_CYGWIN +#undef HAVE_OSF1_OS +#undef HAVE_DARWIN_OS +#undef HAVE_HURD_OS + +/* Set to correct scanf value for long long int */ +#undef lld +#undef llu + +#undef HAVE_READLINE + +#undef HAVE_GMP + +#undef HAVE_CWEB + +#undef HAVE_FCHDIR + +#undef HAVE_GETOPT_LONG + +#undef HAVE_LIBSM + +/* Check for thread safe routines */ +#undef HAVE_LOCALTIME_R +#undef HAVE_READDIR_R +#undef HAVE_STRERROR_R +#undef HAVE_GETHOSTBYNAME_R + +#undef HAVE_STRTOLL +#undef HAVE_INET_PTON + +#undef HAVE_SOCKLEN_T + +#undef HAVE_OLD_SOCKOPT +#undef USE_THR_SETCONCURRENCY + +/* Defined if Gtk+-2.4 or greater is present */ +#undef HAVE_GTK_2_4 + +/* Needed on HP-UX/g++ systems to support long long ints (int64) */ +#undef _INCLUDE_LONGLONG + +/* Define to system config directory */ +#undef SYSCONFDIR + +/* Define if OPENSSL is available */ +#undef HAVE_OPENSSL + +/* Define if comm encryption should be enabled */ +#undef HAVE_TLS + +/* Define if data encryption should be enabled */ +#undef HAVE_CRYPTO + +/* Define the LOCALEDIR if a translation */ +#undef LOCALEDIR + +/* Define if language support is enabled */ +#undef ENABLE_NLS diff --git a/autoconf/aclocal.m4 b/autoconf/aclocal.m4 new file mode 100644 index 00000000..de6977da --- /dev/null +++ b/autoconf/aclocal.m4 @@ -0,0 +1,51 @@ +# generated automatically by aclocal 1.9.6 -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +# 2005 Free Software Foundation, Inc. +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +m4_include([libtool/libtool.m4]) +m4_include([libtool/ltoptions.m4]) +m4_include([libtool/ltsugar.m4]) +m4_include([libtool/ltversion.m4]) +m4_include([libtool/lt~obsolete.m4]) +m4_include([libtool/ltdl.m4]) +m4_include([gettext-macros/codeset.m4]) +m4_include([gettext-macros/gettext.m4]) +m4_include([gettext-macros/glibc2.m4]) +m4_include([gettext-macros/glibc21.m4]) +m4_include([gettext-macros/iconv.m4]) +m4_include([gettext-macros/intdiv0.m4]) +m4_include([gettext-macros/intmax.m4]) +m4_include([gettext-macros/inttypes-pri.m4]) +m4_include([gettext-macros/inttypes.m4]) +m4_include([gettext-macros/inttypes_h.m4]) +m4_include([gettext-macros/isc-posix.m4]) +m4_include([gettext-macros/lcmessage.m4]) +m4_include([gettext-macros/lib-ld.m4]) +m4_include([gettext-macros/lib-link.m4]) +m4_include([gettext-macros/lib-prefix.m4]) +m4_include([gettext-macros/longdouble.m4]) +m4_include([gettext-macros/longlong.m4]) +m4_include([gettext-macros/nls.m4]) +m4_include([gettext-macros/po.m4]) +m4_include([gettext-macros/printf-posix.m4]) +m4_include([gettext-macros/progtest.m4]) +m4_include([gettext-macros/signed.m4]) +m4_include([gettext-macros/size_max.m4]) +m4_include([gettext-macros/stdint_h.m4]) +m4_include([gettext-macros/uintmax_t.m4]) +m4_include([gettext-macros/ulonglong.m4]) +m4_include([gettext-macros/wchar_t.m4]) +m4_include([gettext-macros/wint_t.m4]) +m4_include([gettext-macros/xsize.m4]) +m4_include([bacula-macros/db.m4]) +m4_include([bacula-macros/largefiles.m4]) +m4_include([bacula-macros/os.m4]) diff --git a/autoconf/aclocal.m4.save b/autoconf/aclocal.m4.save new file mode 100644 index 00000000..de6977da --- /dev/null +++ b/autoconf/aclocal.m4.save @@ -0,0 +1,51 @@ +# generated automatically by aclocal 1.9.6 -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +# 2005 Free Software Foundation, Inc. +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +m4_include([libtool/libtool.m4]) +m4_include([libtool/ltoptions.m4]) +m4_include([libtool/ltsugar.m4]) +m4_include([libtool/ltversion.m4]) +m4_include([libtool/lt~obsolete.m4]) +m4_include([libtool/ltdl.m4]) +m4_include([gettext-macros/codeset.m4]) +m4_include([gettext-macros/gettext.m4]) +m4_include([gettext-macros/glibc2.m4]) +m4_include([gettext-macros/glibc21.m4]) +m4_include([gettext-macros/iconv.m4]) +m4_include([gettext-macros/intdiv0.m4]) +m4_include([gettext-macros/intmax.m4]) +m4_include([gettext-macros/inttypes-pri.m4]) +m4_include([gettext-macros/inttypes.m4]) +m4_include([gettext-macros/inttypes_h.m4]) +m4_include([gettext-macros/isc-posix.m4]) +m4_include([gettext-macros/lcmessage.m4]) +m4_include([gettext-macros/lib-ld.m4]) +m4_include([gettext-macros/lib-link.m4]) +m4_include([gettext-macros/lib-prefix.m4]) +m4_include([gettext-macros/longdouble.m4]) +m4_include([gettext-macros/longlong.m4]) +m4_include([gettext-macros/nls.m4]) +m4_include([gettext-macros/po.m4]) +m4_include([gettext-macros/printf-posix.m4]) +m4_include([gettext-macros/progtest.m4]) +m4_include([gettext-macros/signed.m4]) +m4_include([gettext-macros/size_max.m4]) +m4_include([gettext-macros/stdint_h.m4]) +m4_include([gettext-macros/uintmax_t.m4]) +m4_include([gettext-macros/ulonglong.m4]) +m4_include([gettext-macros/wchar_t.m4]) +m4_include([gettext-macros/wint_t.m4]) +m4_include([gettext-macros/xsize.m4]) +m4_include([bacula-macros/db.m4]) +m4_include([bacula-macros/largefiles.m4]) +m4_include([bacula-macros/os.m4]) diff --git a/autoconf/bacula-macros/compare-version.m4 b/autoconf/bacula-macros/compare-version.m4 new file mode 100644 index 00000000..6236998e --- /dev/null +++ b/autoconf/bacula-macros/compare-version.m4 @@ -0,0 +1,101 @@ +dnl # Copyright © 2008 Tim Toolan +dnl # +dnl # Copying and distribution of this file, with or without modification, +dnl # are permitted in any medium without royalty provided the copyright notice +dnl # and this notice are preserved. + +dnl ######################################################################### +AC_DEFUN([AX_COMPARE_VERSION], [ + AC_PROG_AWK + + # Used to indicate true or false condition + ax_compare_version=false + + # Convert the two version strings to be compared into a format that + # allows a simple string comparison. The end result is that a version + # string of the form 1.12.5-r617 will be converted to the form + # 0001001200050617. In other words, each number is zero padded to four + # digits, and non digits are removed. + AS_VAR_PUSHDEF([A],[ax_compare_version_A]) + A=`echo "$1" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \ + -e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \ + -e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \ + -e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \ + -e 's/[[^0-9]]//g'` + + AS_VAR_PUSHDEF([B],[ax_compare_version_B]) + B=`echo "$3" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \ + -e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \ + -e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \ + -e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \ + -e 's/[[^0-9]]//g'` + + dnl # In the case of le, ge, lt, and gt, the strings are sorted as necessary + dnl # then the first line is used to determine if the condition is true. + dnl # The sed right after the echo is to remove any indented white space. + m4_case(m4_tolower($2), + [lt],[ + ax_compare_version=`echo "x$A +x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/false/;s/x${B}/true/;1q"` + ], + [gt],[ + ax_compare_version=`echo "x$A +x$B" | sed 's/^ *//' | sort | sed "s/x${A}/false/;s/x${B}/true/;1q"` + ], + [le],[ + ax_compare_version=`echo "x$A +x$B" | sed 's/^ *//' | sort | sed "s/x${A}/true/;s/x${B}/false/;1q"` + ], + [ge],[ + ax_compare_version=`echo "x$A +x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/true/;s/x${B}/false/;1q"` + ],[ + dnl Split the operator from the subversion count if present. + m4_bmatch(m4_substr($2,2), + [0],[ + # A count of zero means use the length of the shorter version. + # Determine the number of characters in A and B. + ax_compare_version_len_A=`echo "$A" | $AWK '{print(length)}'` + ax_compare_version_len_B=`echo "$B" | $AWK '{print(length)}'` + + # Set A to no more than B's length and B to no more than A's length. + A=`echo "$A" | sed "s/\(.\{$ax_compare_version_len_B\}\).*/\1/"` + B=`echo "$B" | sed "s/\(.\{$ax_compare_version_len_A\}\).*/\1/"` + ], + [[0-9]+],[ + # A count greater than zero means use only that many subversions + A=`echo "$A" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"` + B=`echo "$B" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"` + ], + [.+],[ + AC_WARNING( + [illegal OP numeric parameter: $2]) + ],[]) + + # Pad zeros at end of numbers to make same length. + ax_compare_version_tmp_A="$A`echo $B | sed 's/./0/g'`" + B="$B`echo $A | sed 's/./0/g'`" + A="$ax_compare_version_tmp_A" + + # Check for equality or inequality as necessary. + m4_case(m4_tolower(m4_substr($2,0,2)), + [eq],[ + test "x$A" = "x$B" && ax_compare_version=true + ], + [ne],[ + test "x$A" != "x$B" && ax_compare_version=true + ],[ + AC_WARNING([illegal OP parameter: $2]) + ]) + ]) + + AS_VAR_POPDEF([A])dnl + AS_VAR_POPDEF([B])dnl + + dnl # Execute ACTION-IF-TRUE / ACTION-IF-FALSE. + if test "$ax_compare_version" = "true" ; then + m4_ifvaln([$4],[$4],[:])dnl + m4_ifvaln([$5],[else $5])dnl + fi +]) dnl AX_COMPARE_VERSION + diff --git a/autoconf/bacula-macros/db.m4 b/autoconf/bacula-macros/db.m4 new file mode 100644 index 00000000..988c737a --- /dev/null +++ b/autoconf/bacula-macros/db.m4 @@ -0,0 +1,1006 @@ +# +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +AC_DEFUN([BA_CHECK_DBI_DB], +[ +AC_MSG_CHECKING(for DBI support) +AC_ARG_WITH(dbi, +AC_HELP_STRING([--with-dbi@<:@=DIR@:>@], [Include DBI support. DIR is the DBD base install directory, default is to search through a number of common places for the DBI files.]), +[ + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + if test -f /usr/local/include/dbi/dbi.h; then + DBI_INCDIR=/usr/local/dbi/include + if test -d /usr/local/lib64; then + DBI_LIBDIR=/usr/local/lib64 + else + DBI_LIBDIR=/usr/local/lib + fi + DBI_BINDIR=/usr/local/bin + elif test -f /usr/include/dbi/dbi.h; then + DBI_INCDIR=/usr/include + if test -d /usr/lib64; then + DBI_LIBDIR=/usr/lib64 + else + DBI_LIBDIR=/usr/lib + fi + DBI_BINDIR=/usr/bin + elif test -f $prefix/include/dbi/dbi.h; then + DBI_INCDIR=$prefix/include + if test -d $prefix/lib64; then + DBI_LIBDIR=$prefix/lib64 + else + DBI_LIBDIR=$prefix/lib + fi + DBI_BINDIR=$prefix/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find dbi.h in standard locations) + fi + if test -d /usr/local/lib/dbd; then + DRIVERDIR=/usr/local/lib/dbd + if test -d /usr/local/lib64/dbd; then + DRIVERDIR=/usr/local/lib64/dbd + else + DRIVERDIR=/usr/local/lib/dbd + fi + elif test -d /usr/lib/dbd; then + DRIVERDIR=/usr/lib/dbd + if test -d /usr/lib64/dbd; then + DRIVERDIR=/usr/lib64/dbd + else + DRIVERDIR=/usr/lib/dbd + fi + elif test -d $prefix/lib/dbd; then + if test -d $prefix/lib64/dbd; then + DRIVERDIR=$prefix/lib64/dbd + else + DRIVERDIR=$prefix/lib/dbd + fi + elif test -d /usr/local/lib64/dbd; then + DRIVERDIR=/usr/local/lib64/dbd + elif test -d /usr/lib64/dbd; then + DRIVERDIR=/usr/lib64/dbd + elif test -d $prefix/lib64/dbd; then + DRIVERDIR=$prefix/lib64/dbd + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find DBD drivers in standard locations) + fi + else + if test -f $withval/dbi.h; then + DBI_INCDIR=$withval + DBI_LIBDIR=$withval + DBI_BINDIR=$withval + elif test -f $withval/include/dbi/dbi.h; then + DBI_INCDIR=$withval/include + if test -d $withval/lib64; then + DBI_LIBDIR=$withval/lib64 + else + DBI_LIBDIR=$withval/lib + fi + DBI_BINDIR=$withval/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Invalid DBI directory $withval - unable to find dbi.h under $withval) + fi + if test -d $withval/dbd; then + DRIVERDIR=$withval/dbd + elif test -d $withval/lib/; then + if test -d $withval/lib64/dbd; then + DRIVERDIR=$withval/lib64/dbd + else + DRIVERDIR=$withval/lib/dbd + fi + elif test -d $withval/lib64/dbd; then + DRIVERDIR=$withval/lib64/dbd + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Invalid DBD driver directory $withval - unable to find DBD drivers under $withval) + fi + fi + DBI_INCLUDE=-I$DBI_INCDIR + if test x$use_libtool != xno; then + DBI_LIBS="-R $DBI_LIBDIR -L$DBI_LIBDIR -ldbi" + else + DBI_LIBS="-L$DBI_LIBDIR -ldbi" + fi + DBI_LIB=$DBI_LIBDIR/libdbi.a + DBI_DBD_DRIVERDIR="-D DBI_DRIVER_DIR=\\\"$DRIVERDIR\\\"" + DB_LIBS="${DB_LIBS} ${DBI_LIBS}" + + AC_DEFINE(HAVE_DBI, 1, [Set if you have the DBI driver]) + AC_MSG_RESULT(yes) + + if test -z "${db_backends}"; then + db_backends="DBI" + else + db_backends="${db_backends} DBI" + fi + if test -z "${DB_BACKENDS}" ; then + DB_BACKENDS="dbi" + else + DB_BACKENDS="${DB_BACKENDS} dbi" + fi + uncomment_dbi=" " + + dnl ------------------------------------------- + dnl Push the DB_PROG onto the stack of supported database backends for DBI + dnl ------------------------------------------- + DB_BACKENDS="${DB_BACKENDS} ${DB_PROG}" + + dnl ------------------------------------------- + dnl Check if dbi supports batch mode + dnl ------------------------------------------- + if test "x$support_batch_insert" = "xyes"; then + if test $DB_PROG = postgresql; then + AC_CHECK_LIB(pq, PQisthreadsafe, AC_DEFINE(HAVE_PQISTHREADSAFE)) + AC_CHECK_LIB(pq, PQputCopyData, AC_DEFINE(HAVE_PQ_COPY)) + test "x$ac_cv_lib_pq_PQputCopyData" = "xyes" + pkg=$? + if test $pkg = 0; then + AC_DEFINE(HAVE_DBI_BATCH_FILE_INSERT, 1, [Set if DBI DB batch insert code enabled]) + fi + fi + fi + else + AC_MSG_RESULT(no) + fi +],[ + AC_MSG_RESULT(no) +]) + +AC_SUBST(DBI_LIBS) +AC_SUBST(DBI_INCLUDE) +AC_SUBST(DBI_BINDIR) +AC_SUBST(DBI_DBD_DRIVERDIR) + +]) + +AC_DEFUN([BA_CHECK_DBI_DRIVER], +[ +db_prog=no +AC_MSG_CHECKING(for DBI drivers support) +AC_ARG_WITH(dbi-driver, +AC_HELP_STRING([--with-dbi-driver@<:@=DRIVER@:>@], [Suport for DBI driver. DRIVER is the one DBI driver like Mysql, Postgresql, others. Default is to not configure any driver.]), +[ + if test "$withval" != "no"; then + case $withval in + "mysql") + db_prog="mysql" + if test -f /usr/local/mysql/bin/mysql; then + MYSQL_BINDIR=/usr/local/mysql/bin + if test -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.a \ + -o -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql + else + MYSQL_LIBDIR=/usr/local/mysql/lib/mysql + fi + elif test -f /usr/bin/mysql; then + MYSQL_BINDIR=/usr/bin + if test -f /usr/lib64/mysql/libmysqlclient_r.a \ + -o -f /usr/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib64/mysql + elif test -f /usr/lib/mysql/libmysqlclient_r.a \ + -o -f /usr/lib/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib/mysql + else + MYSQL_LIBDIR=/usr/lib + fi + elif test -f /usr/local/bin/mysql; then + MYSQL_BINDIR=/usr/local/bin + if test -f /usr/local/lib64/mysql/libmysqlclient_r.a \ + -o -f /usr/local/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/local/lib64/mysql + elif test -f /usr/local/lib/mysql/libmysqlclient_r.a \ + -o -f /usr/local/lib/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/local/lib/mysql + else + MYSQL_LIBDIR=/usr/local/lib + fi + elif test -f $withval/bin/mysql; then + MYSQL_BINDIR=$withval/bin + if test -f $withval/lib64/mysql/libmysqlclient_r.a \ + -o -f $withval/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib64/mysql + elif test -f $withval/lib64/libmysqlclient_r.a \ + -o -f $withval/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib64 + elif test -f $withval/lib/libmysqlclient_r.a \ + -o -f $withval/lib/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib/ + else + MYSQL_LIBDIR=$withval/lib/mysql + fi + elif test -f $prefix/lib/mysql55/bin/mysql; then + MYSQL_BINDIR=$prefix/lib/mysql55/bin + if test -f $prefix/lib/mysql55/mysql/libmysqlclient_r.a \ + -o -f $prefix/lib/mysql55/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=$prefix/lib/mysql55/mysql + fi + elif test -f $prefix/lib/mysql51/bin/mysql; then + MYSQL_BINDIR=$prefix/lib/mysql51/bin + if test -f $prefix/lib/mysql51/mysql/libmysqlclient_r.a \ + -o -f $prefix/lib/mysql51/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=$prefix/lib/mysql51/mysql + fi + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find mysql in standard locations) + fi + if test -f $SQL_LIBDIR/libmysqlclient_r.so; then + DB_PROG_LIB=$SQL_LIBDIR/libmysqlclient_r.so + else + DB_PROG_LIB=$SQL_LIBDIR/libmysqlclient_r.a + fi + ;; + "postgresql") + db_prog="postgresql" + PG_CONFIG=`which pg_config 2>/dev/null` + if test -n "$PG_CONFIG"; then + POSTGRESQL_BINDIR=`"$PG_CONFIG" --bindir` + POSTGRESQL_LIBDIR=`"$PG_CONFIG" --libdir` + elif test -f /usr/local/bin/psql; then + POSTGRESQL_BINDIR=/usr/local/bin + if test -d /usr/local/lib64; then + POSTGRESQL_LIBDIR=/usr/local/lib64 + else + POSTGRESQL_LIBDIR=/usr/local/lib + fi + elif test -f /usr/bin/psql; then + POSTGRESQL_BINDIR=/usr/local/bin + if test -d /usr/lib64/postgresql; then + POSTGRESQL_LIBDIR=/usr/lib64/postgresql + elif test -d /usr/lib/postgresql; then + POSTGRESQL_LIBDIR=/usr/lib/postgresql + elif test -d /usr/lib64; then + POSTGRESQL_LIBDIR=/usr/lib64 + else + POSTGRESQL_LIBDIR=/usr/lib + fi + elif test -f $withval/bin/psql; then + POSTGRESQL_BINDIR=$withval/bin + if test -d $withval/lib64; then + POSTGRESQL_LIBDIR=$withval/lib64 + else + POSTGRESQL_LIBDIR=$withval/lib + fi + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find psql in standard locations) + fi + if test -f $POSTGRESQL_LIBDIR/libpq.so; then + DB_PROG_LIB=$POSTGRESQL_LIBDIR/libpq.so + else + DB_PROG_LIB=$POSTGRESQL_LIBDIR/libpq.a + fi + ;; + "sqlite3") + db_prog="sqlite3" + if test -f /usr/local/bin/sqlite3; then + SQLITE_BINDIR=/usr/local/bin + if test -d /usr/local/lib64; then + SQLITE_LIBDIR=/usr/local/lib64 + else + SQLITE_LIBDIR=/usr/local/lib + fi + elif test -f /usr/bin/sqlite3; then + SQLITE_BINDIR=/usr/bin + if test -d /usr/lib64; then + SQLITE_LIBDIR=/usr/lib64 + else + SQLITE_LIBDIR=/usr/lib + fi + elif test -f $withval/bin/sqlite3; then + SQLITE_BINDIR=$withval/bin + if test -d $withval/lib64; then + SQLITE_LIBDIR=$withval/lib64 + else + SQLITE_LIBDIR=$withval/lib + fi + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find sqlite in standard locations) + fi + if test -f $SQLITE_LIBDIR/libsqlite3.so; then + DB_PROG_LIB=$SQLITE_LIBDIR/libsqlite3.so + else + DB_PROG_LIB=$SQLITE_LIBDIR/libsqlite3.a + fi + ;; + *) + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to set DBI driver. $withval is not supported) + ;; + esac + + AC_MSG_RESULT(yes) + DB_PROG=$db_prog + else + AC_MSG_RESULT(no) + fi +],[ + AC_MSG_RESULT(no) +]) + +AC_SUBST(MYSQL_BINDIR) +AC_SUBST(POSTGRESQL_BINDIR) +AC_SUBST(SQLITE_BINDIR) +AC_SUBST(DB_PROG) +AC_SUBST(DB_PROG_LIB) + +]) + + +AC_DEFUN([BA_CHECK_MYSQL_DB], +[ +AC_MSG_CHECKING(for MySQL support) +AC_ARG_WITH(mysql, +AC_HELP_STRING([--with-mysql@<:@=DIR@:>@], [Include MySQL support. DIR is the MySQL base install directory, default is to search through a number of common places for the MySQL files.]), +[ + HAVE_LIBSR="no" + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + MYSQL_CONFIG=`which mysql_config 2>/dev/null` + if test "x${MYSQL_CONFIG}" != x; then + MYSQL_BINDIR="${MYSQL_CONFIG%/*}" + ${MYSQL_CONFIG} --libs_r >/dev/null 2>&1 + if test $? = 0; then + MYSQL_LIBDIR=`${MYSQL_CONFIG} --libs_r` + MYSQL_INCDIR=`${MYSQL_CONFIG} --include` + HAVE_LIBSR="yes" + else + ${MYSQL_CONFIG} --variable=pkglibdir > /dev/null 2>&1 + if test $? = 0 ; then + MYSQL_LIBDIR=`${MYSQL_CONFIG} --variable=pkglibdir` + MYSQL_INCDIR=`${MYSQL_CONFIG} --variable=pkgincludedir` + fi + fi + fi + # if something wrong fall back to old method + if test "x${MYSQL_LIBDIR}" = x -o "x${MYSQL_INCDIR}" = x ; then + if test -f /usr/local/mysql/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/local/mysql/include/mysql + if test -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.a \ + -o -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql + else + MYSQL_LIBDIR=/usr/local/mysql/lib/mysql + fi + MYSQL_BINDIR=/usr/local/mysql/bin + elif test -f /usr/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/include/mysql + if test -f /usr/lib64/mysql/libmysqlclient_r.a \ + -o -f /usr/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib64/mysql + elif test -f /usr/lib64/libmysqlclient_r.a \ + -o -f /usr/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib64 + elif test -f /usr/lib/x86_64-linux-gnu/libmysqlclient_r.a \ + -o -f /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib/x86_64-linux-gnu + elif test -f /usr/lib/mysql/libmysqlclient_r.a \ + -o -f /usr/lib/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib/mysql + else + MYSQL_LIBDIR=/usr/lib + fi + MYSQL_BINDIR=/usr/bin + elif test -f /usr/include/mysql.h; then + MYSQL_INCDIR=/usr/include + if test -f /usr/lib64/libmysqlclient_r.a \ + -o -f /usr/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib64 + else + MYSQL_LIBDIR=/usr/lib + fi + MYSQL_BINDIR=/usr/bin + elif test -f /usr/local/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/local/include/mysql + if test -f /usr/local/lib64/mysql/libmysqlclient_r.a \ + -o -f /usr/local/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/local/lib64/mysql + else + MYSQL_LIBDIR=/usr/local/lib/mysql + fi + MYSQL_BINDIR=/usr/local/bin + elif test -f /usr/local/include/mysql.h; then + MYSQL_INCDIR=/usr/local/include + if test -f /usr/local/lib64/libmysqlclient_r.a \ + -o -f /usr/local/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/local/lib64 + else + MYSQL_LIBDIR=/usr/local/lib + fi + MYSQL_BINDIR=/usr/local/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find mysql.h in standard locations) + fi + fi + else + if test -f $withval/include/mysql/mysql.h; then + MYSQL_INCDIR=$withval/include/mysql + if test -f $withval/lib64/mysql/libmysqlclient_r.a \ + -o -f $withval/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib64/mysql + elif test -f $withval/lib64/libmysqlclient_r.a \ + -o -f $withval/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib64 + elif test -f $withval/lib/libmysqlclient_r.a \ + -o -f $withval/lib/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib + else + MYSQL_LIBDIR=$withval/lib/mysql + fi + MYSQL_BINDIR=$withval/bin + elif test -f $withval/include/mysql.h; then + MYSQL_INCDIR=$withval/include + if test -f $withval/lib64/libmysqlclient_r.a \ + -o -f $withval/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib64 + else + MYSQL_LIBDIR=$withval/lib + fi + MYSQL_BINDIR=$withval/bin + elif test -f $withval/mysql.h; then + dnl MacPorts on OSX has a special MySQL Layout. See #2079 + MYSQL_INCDIR=$withval + AC_MSG_NOTICE(Got with-mysql variable $MYSQL_INCDIR checking MySQL version) + case $MYSQL_INCDIR in + *mysql55*) + AC_MSG_NOTICE(Assuming MacPorts MySQL 5.5 variant installed) + dnl with-mysql given contains mysql55 - assuming OSX MacPorts MySQL55 variant + if test -f $prefix/lib/mysql55/mysql/libmysqlclient_r.a \ + -o -f $prefix/lib/mysql55/mysql/libmysqlclient_r.so; then + AC_MSG_NOTICE(Found MySQL 5.5 library in $prefix/lib/mysql55/mysql) + MYSQL_LIBDIR=$prefix/lib/mysql55/mysql + fi + MYSQL_BINDIR=$prefix/lib/mysql55/bin + ;; + *mysql51*) + AC_MSG_NOTICE(Assuming MacPorts MySQL 5.1 variant installed) + dnl with-mysql contains mysql51 - assuming OSX MacPorts MySQL51 variant + if test -f $prefix/lib/mysql51/mysql/libmysqlclient_r.a \ + -o -f $prefix/lib/mysql51/mysql/libmysqlclient_r.so; then + AC_MSG_NOTICE(Found MySQL 5.1 library in $prefix/lib/mysql55/mysql) + MYSQL_LIBDIR=$prefix/lib/mysql51/mysql + fi + MYSQL_BINDIR=$prefix/lib/mysql51/bin + ;; + esac + if test -z "${MYSQL_LIBDIR}" ; then + AC_MSG_RESULT(no) + AC_MSG_ERROR(MySQL $withval - unable to find MySQL libraries) + fi + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Invalid MySQL directory $withval - unable to find mysql.h under $withval) + fi + fi + if test "x${MYSQL_LIBDIR}" != x; then + MYSQL_INCLUDE=-I$MYSQL_INCDIR + if test "x$HAVE_LIBSR" = "xyes"; then + DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}" + MYSQL_LIBS="$MYSQL_LIBDIR" + MYSQL_INCLUDE="$MYSQL_INCDIR" + AC_DEFINE(HAVE_MYSQL_THREAD_SAFE, 1, [Set if have mysql_thread_safe]) + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="MySQL" + else + batch_insert_db_backends="${batch_insert_db_backends} MySQL" + fi + elif test -f $MYSQL_LIBDIR/libmysqlclient_r.a \ + -o -f $MYSQL_LIBDIR/libmysqlclient_r.so; then + if test x$use_libtool != xno; then + MYSQL_LIBS="-R $MYSQL_LIBDIR -L$MYSQL_LIBDIR -lmysqlclient_r -lz" + else + MYSQL_LIBS="-L$MYSQL_LIBDIR -lmysqlclient_r -lz" + fi + DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}" + fi + if test "x${MYSQL_LIBS}" = x; then + MYSQL_LIBS=$MYSQL_LIBDIR/libmysqlclient_r.a + fi + + AC_DEFINE(HAVE_MYSQL, 1, [Set if you have an MySQL Database]) + AC_MSG_RESULT(yes) + + if test -z "${db_backends}" ; then + db_backends="MySQL" + else + db_backends="${db_backends} MySQL" + fi + if test -z "${DB_BACKENDS}" ; then + DB_BACKENDS="mysql" + else + DB_BACKENDS="${DB_BACKENDS} mysql" + fi + + dnl ------------------------------------------- + dnl Check if mysql supports batch mode + dnl ------------------------------------------- + if test "x$HAVE_LIBSR" = "xno"; then + if test "x$support_batch_insert" = "xyes"; then + dnl For mysql checking + saved_LDFLAGS="${LDFLAGS}" + LDFLAGS="${saved_LDFLAGS} -L$MYSQL_LIBDIR" + saved_LIBS="${LIBS}" + LIBS="${saved_LIBS} -lz" + + AC_CHECK_LIB(mysqlclient_r, mysql_thread_safe, AC_DEFINE(HAVE_MYSQL_THREAD_SAFE, 1, [Set if have mysql_thread_safe])) + if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = "xyes"; then + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="MySQL" + else + batch_insert_db_backends="${batch_insert_db_backends} MySQL" + fi + fi + + dnl Revert after mysql checks + LDFLAGS="${saved_LDFLAGS}" + LIBS="${saved_LIBS}" + fi + fi + else + AC_MSG_RESULT(no) + fi + fi +],[ + AC_MSG_RESULT(no) +]) + +AC_MSG_CHECKING(for MySQL embedded support) +AC_ARG_WITH(embedded-mysql, +AC_HELP_STRING([--with-embedded-mysql@<:@=DIR@:>@], [Include MySQL support. DIR is the MySQL base install directory, default is to search through a number of common places for the MySQL files.]), +[ + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + if test -f /usr/local/mysql/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/local/mysql/include/mysql + if test -d /usr/local/mysql/lib64/mysql; then + MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql + else + MYSQL_LIBDIR=/usr/local/mysql/lib/mysql + fi + MYSQL_BINDIR=/usr/local/mysql/bin + elif test -f /usr/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/include/mysql + if test -d /usr/lib64/mysql; then + MYSQL_LIBDIR=/usr/lib64/mysql + else + MYSQL_LIBDIR=/usr/lib/mysql + fi + MYSQL_BINDIR=/usr/bin + elif test -f /usr/include/mysql.h; then + MYSQL_INCDIR=/usr/include + if test -d /usr/lib64; then + MYSQL_LIBDIR=/usr/lib64 + else + MYSQL_LIBDIR=/usr/lib + fi + MYSQL_BINDIR=/usr/bin + elif test -f /usr/local/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/local/include/mysql + if test -d /usr/local/lib64/mysql; then + MYSQL_LIBDIR=/usr/local/lib64/mysql + else + MYSQL_LIBDIR=/usr/local/lib/mysql + fi + MYSQL_BINDIR=/usr/local/bin + elif test -f /usr/local/include/mysql.h; then + MYSQL_INCDIR=/usr/local/include + if test -d /usr/local/lib64; then + MYSQL_LIBDIR=/usr/local/lib64 + else + MYSQL_LIBDIR=/usr/local/lib + fi + MYSQL_BINDIR=/usr/local/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find mysql.h in standard locations) + fi + else + if test -f $withval/include/mysql/mysql.h; then + MYSQL_INCDIR=$withval/include/mysql + if test -d $withval/lib64/mysql; then + MYSQL_LIBDIR=$withval/lib64/mysql + else + MYSQL_LIBDIR=$withval/lib/mysql + fi + MYSQL_BINDIR=$withval/bin + elif test -f $withval/include/mysql.h; then + MYSQL_INCDIR=$withval/include + if test -d $withval/lib64; then + MYSQL_LIBDIR=$withval/lib64 + else + MYSQL_LIBDIR=$withval/lib + fi + MYSQL_BINDIR=$withval/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Invalid MySQL directory $withval - unable to find mysql.h under $withval) + fi + fi + MYSQL_INCLUDE=-I$MYSQL_INCDIR + if test x$use_libtool != xno; then + MYSQL_LIBS="-R $MYSQL_LIBDIR -L$MYSQL_LIBDIR -lmysqld -lz -lm -lcrypt" + else + MYSQL_LIBS="-L$MYSQL_LIBDIR -lmysqld -lz -lm -lcrypt" + fi + MYSQL_LIB=$MYSQL_LIBDIR/libmysqld.a + DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}" + + AC_DEFINE(HAVE_MYSQL, 1, [Set if you have an MySQL Database]) + AC_DEFINE(HAVE_EMBEDDED_MYSQL, 1, [Set if you have an Embedded MySQL Database]) + AC_MSG_RESULT(yes) + + if test -z "${db_backends}"; then + db_backends="MySQL" + else + db_backends="${db_backends} MySQL" + fi + if test -z "${DB_BACKENDS}"; then + DB_BACKENDS="mysql" + else + DB_BACKENDS="${DB_BACKENDS} mysql" + fi + + dnl ------------------------------------------- + dnl Check if mysql supports batch mode + dnl ------------------------------------------- + if test "x$support_batch_insert" = "xyes"; then + dnl For mysql checking + saved_LDFLAGS="${LDFLAGS}" + LDFLAGS="${saved_LDFLAGS} -L$MYSQL_LIBDIR" + saved_LIBS="${LIBS}" + LIBS="${saved_LIBS} -lz -lm -lcrypt" + + AC_CHECK_LIB(mysqlclient_r, mysql_thread_safe, AC_DEFINE(HAVE_MYSQL_THREAD_SAFE, 1, [Set if have mysql_thread_safe])) + if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = "xyes"; then + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="MySQL" + else + batch_insert_db_backends="${batch_insert_db_backends} MySQL" + fi + fi + + dnl Revert after mysql checks + LDFLAGS="${saved_LDFLAGS}" + LIBS="${saved_LIBS}" + fi + else + AC_MSG_RESULT(no) + fi +],[ + AC_MSG_RESULT(no) +]) + +AC_SUBST(MYSQL_LIBS) +AC_SUBST(MYSQL_INCLUDE) +AC_SUBST(MYSQL_BINDIR) + +]) + +AC_DEFUN([BA_CHECK_INGRES_DB], +[ +AC_MSG_CHECKING(for Ingres support) +AC_ARG_WITH(ingres, +AC_HELP_STRING([--with-ingres@<:@=DIR@:>@], [Include Ingres support. DIR is the Ingres base install directory, default is to search through a number of common places for the Ingres files.]), +[ + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + if test -f ${II_SYSTEM}/files/eqdefc.h; then + INGRES_INCDIR=${II_SYSTEM}/files + INGRES_LIBDIR=${II_SYSTEM}/lib + INGRES_BINDIR=${II_SYSTEM}/bin + elif test -f ${II_SYSTEM}/ingres/files/eqdefc.h; then + INGRES_INCDIR=${II_SYSTEM}/ingres/files + INGRES_LIBDIR=${II_SYSTEM}/ingres/lib + INGRES_BINDIR=${II_SYSTEM}/ingres/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find eqdefc.h in standard locations) + fi + else + if test -f $withval/files/eqdefc.h; then + INGRES_INCDIR=$withval/files + INGRES_LIBDIR=$withval/lib + INGRES_BINDIR=$withval/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Invalid Ingres directory $withval - unable to find Ingres headers under $withval) + fi + fi + INGRES_INCLUDE=-I$INGRES_INCDIR + if test x$use_libtool != xno; then + INGRES_LIBS="-R $INGRES_LIBDIR -L$INGRES_LIBDIR -lq.1 -lcompat.1 -lframe.1" + else + INGRES_LIBS="-L$INGRES_LIBDIR -lq.1 -lcompat.1 -lframe.1" + fi + DB_LIBS="${DB_LIBS} ${INGRES_LIBS}" + AC_DEFINE(HAVE_INGRES, 1, [Set if have Ingres Database]) + AC_MSG_RESULT(yes) + + if test -z "${db_backends}"; then + db_backends="Ingres" + else + db_backends="${db_backends} Ingres" + fi + if test -z "${DB_BACKENDS}"; then + DB_BACKENDS="ingres" + else + DB_BACKENDS="${DB_BACKENDS} ingres" + fi + else + AC_MSG_RESULT(no) + fi +],[ + AC_MSG_RESULT(no) +]) + +AC_SUBST(INGRES_LIBS) +AC_SUBST(INGRES_INCLUDE) +AC_SUBST(INGRES_BINDIR) +]) + +AC_DEFUN([BA_CHECK_SQLITE3_DB], +[ +AC_MSG_CHECKING(for SQLite3 support) +AC_ARG_WITH(sqlite3, +AC_HELP_STRING([--with-sqlite3@<:@=DIR@:>@], [Include SQLite3 support. DIR is the SQLite3 base install directory, default is to search through a number of common places for the SQLite3 files.]), +[ + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + if test -f /usr/local/include/sqlite3.h; then + SQLITE_INCDIR=/usr/local/include + if test -d /usr/local/lib64; then + SQLITE_LIBDIR=/usr/local/lib64 + else + SQLITE_LIBDIR=/usr/local/lib + fi + SQLITE_BINDIR=/usr/local/bin + elif test -f /usr/include/sqlite3.h; then + SQLITE_INCDIR=/usr/include + if test -n $multiarch -a -d /usr/lib/$multiarch; then + SQLITE_LIBDIR=/usr/lib/$multiarch + elif test -d /usr/lib64; then + SQLITE_LIBDIR=/usr/lib64 + else + SQLITE_LIBDIR=/usr/lib + fi + SQLITE_BINDIR=/usr/bin + elif test -f $prefix/include/sqlite3.h; then + SQLITE_INCDIR=$prefix/include + if test -d $prefix/lib64; then + SQLITE_LIBDIR=$prefix/lib64 + else + SQLITE_LIBDIR=$prefix/lib + fi + SQLITE_BINDIR=$prefix/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find sqlite3.h in standard locations) + fi + else + if test -f $withval/sqlite3.h; then + SQLITE_INCDIR=$withval + SQLITE_LIBDIR=$withval + SQLITE_BINDIR=$withval + elif test -f $withval/include/sqlite3.h; then + SQLITE_INCDIR=$withval/include + if test -d $withval/lib64; then + SQLITE_LIBDIR=$withval/lib64 + else + SQLITE_LIBDIR=$withval/lib + fi + SQLITE_BINDIR=$withval/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Invalid SQLite3 directory $withval - unable to find sqlite3.h under $withval) + fi + fi + SQLITE_INCLUDE=-I$SQLITE_INCDIR + if test x$use_libtool != xno; then + SQLITE_LIBS="-R $SQLITE_LIBDIR -L$SQLITE_LIBDIR -lsqlite3" + else + SQLITE_LIBS="-L$SQLITE_LIBDIR -lsqlite3" + fi + SQLITE_LIB=$SQLITE_LIBDIR/libsqlite3.a + DB_LIBS="${DB_LIBS} ${SQLITE_LIBS}" + + AC_DEFINE(HAVE_SQLITE3, 1, [Set if you have an SQLite3 Database]) + AC_MSG_RESULT(yes) + + if test -z "${db_backends}"; then + db_backends="SQLite3" + else + db_backends="${db_backends} SQLite3" + fi + if test -z "${DB_BACKENDS}"; then + DB_BACKENDS="sqlite3" + else + DB_BACKENDS="${DB_BACKENDS} sqlite3" + fi + + dnl ------------------------------------------- + dnl Check if sqlite supports batch mode + dnl ------------------------------------------- + if test "x$support_batch_insert" = "xyes"; then + dnl For sqlite checking + saved_LDFLAGS="${LDFLAGS}" + LDFLAGS="${saved_LDFLAGS} -lpthread -L$SQLITE_LIBDIR" + + AC_CHECK_LIB(sqlite3, sqlite3_threadsafe, AC_DEFINE(HAVE_SQLITE3_THREADSAFE, 1, [Set if have sqlite3_threadsafe])) + if test "x$ac_cv_lib_sqlite3_sqlite3_threadsafe" = "xyes"; then + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="SQLite3" + else + batch_insert_db_backends="${batch_insert_db_backends} SQLite3" + fi + fi + + dnl Revert after sqlite checks + LDFLAGS="${saved_LDFLAGS}" + fi + else + AC_MSG_RESULT(no) + fi +],[ + AC_MSG_RESULT(no) +]) +AC_SUBST(SQLITE_LIBS) +AC_SUBST(SQLITE_INCLUDE) +AC_SUBST(SQLITE_BINDIR) + +]) + + +AC_DEFUN([BA_CHECK_POSTGRESQL_DB], +[ +AC_MSG_CHECKING(for PostgreSQL support) +AC_ARG_WITH(postgresql, +AC_HELP_STRING([--with-postgresql@<:@=DIR@:>@], [Include PostgreSQL support. DIR is the PostgreSQL base install directory, @<:@default=/usr/local/pgsql@:>@]), +[ + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + PG_CONFIG=`which pg_config 2>/dev/null` + if test -n "$PG_CONFIG"; then + POSTGRESQL_INCDIR=`"$PG_CONFIG" --includedir` + POSTGRESQL_LIBDIR=`"$PG_CONFIG" --libdir` + POSTGRESQL_BINDIR=`"$PG_CONFIG" --bindir` + elif test -f /usr/local/include/libpq-fe.h; then + POSTGRESQL_INCDIR=/usr/local/include + if test -d /usr/local/lib64; then + POSTGRESQL_LIBDIR=/usr/local/lib64 + else + POSTGRESQL_LIBDIR=/usr/local/lib + fi + POSTGRESQL_BINDIR=/usr/local/bin + elif test -f /usr/include/libpq-fe.h; then + POSTGRESQL_INCDIR=/usr/include + if test -d /usr/lib64; then + POSTGRESQL_LIBDIR=/usr/lib64 + else + POSTGRESQL_LIBDIR=/usr/lib + fi + POSTGRESQL_BINDIR=/usr/bin + elif test -f /usr/include/pgsql/libpq-fe.h; then + POSTGRESQL_INCDIR=/usr/include/pgsql + if test -d /usr/lib64/pgsql; then + POSTGRESQL_LIBDIR=/usr/lib64/pgsql + else + POSTGRESQL_LIBDIR=/usr/lib/pgsql + fi + POSTGRESQL_BINDIR=/usr/bin + elif test -f /usr/include/postgresql/libpq-fe.h; then + POSTGRESQL_INCDIR=/usr/include/postgresql + if test -d /usr/lib64/postgresql; then + POSTGRESQL_LIBDIR=/usr/lib64/postgresql + else + POSTGRESQL_LIBDIR=/usr/lib/postgresql + fi + POSTGRESQL_BINDIR=/usr/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find libpq-fe.h in standard locations) + fi + elif test -f $withval/include/libpq-fe.h; then + POSTGRESQL_INCDIR=$withval/include + POSTGRESQL_LIBDIR=$withval/lib + POSTGRESQL_BINDIR=$withval/bin + elif test -f $withval/include/postgresql/libpq-fe.h; then + POSTGRESQL_INCDIR=$withval/include/postgresql + if test -d $withval/lib64; then + POSTGRESQL_LIBDIR=$withval/lib64 + else + POSTGRESQL_LIBDIR=$withval/lib + fi + POSTGRESQL_BINDIR=$withval/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Invalid PostgreSQL directory $withval - unable to find libpq-fe.h under $withval) + fi + AC_DEFINE(HAVE_POSTGRESQL, 1, [Set if you have an PostgreSQL Database]) + AC_MSG_RESULT(yes) + + POSTGRESQL_INCLUDE=-I$POSTGRESQL_INCDIR + if test x$use_libtool != xno; then + POSTGRESQL_LIBS="-R $POSTGRESQL_LIBDIR -L$POSTGRESQL_LIBDIR -lpq" + else + POSTGRESQL_LIBS="-L$POSTGRESQL_LIBDIR -lpq" + fi + AC_CHECK_FUNC(crypt, , AC_CHECK_LIB(crypt, crypt, [POSTGRESQL_LIBS="$POSTGRESQL_LIBS -lcrypt"])) + POSTGRESQL_LIB=$POSTGRESQL_LIBDIR/libpq.a + DB_LIBS="${DB_LIBS} ${POSTGRESQL_LIBS}" + + if test -z "${db_backends}"; then + db_backends="PostgreSQL" + else + db_backends="${db_backends} PostgreSQL" + fi + if test -z "${DB_BACKENDS}"; then + DB_BACKENDS="postgresql" + else + DB_BACKENDS="${DB_BACKENDS} postgresql" + fi + + dnl ------------------------------------------- + dnl Check if postgresql supports batch mode + dnl ------------------------------------------- + if test "x$support_batch_insert" = "xyes"; then + dnl For postgresql checking + saved_LDFLAGS="${LDFLAGS}" + LDFLAGS="${saved_LDFLAGS} -L$POSTGRESQL_LIBDIR" + saved_LIBS="${LIBS}" + if test "x$ac_cv_lib_crypt_crypt" = "xyes" ; then + LIBS="${saved_LIBS} -lcrypt" + fi + + AC_CHECK_LIB(pq, PQisthreadsafe, AC_DEFINE(HAVE_PQISTHREADSAFE, 1, [Set if have PQisthreadsafe])) + AC_CHECK_LIB(pq, PQputCopyData, AC_DEFINE(HAVE_PQ_COPY, 1, [Set if have PQputCopyData])) + if test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"; then + if test $support_batch_insert = yes ; then + AC_DEFINE(HAVE_POSTGRESQL_BATCH_FILE_INSERT, 1, [Set if PostgreSQL DB batch insert code enabled]) + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="PostgreSQL" + else + batch_insert_db_backends="${batch_insert_db_backends} PostgreSQL" + fi + fi + fi + + if test x$ac_cv_lib_pq_PQisthreadsafe != xyes -a x$support_batch_insert = xyes + then + echo "WARNING: Your PostgreSQL client library is too old to detect " + echo "if it was compiled with --enable-thread-safety, consider to " + echo "upgrade it in order to avoid problems with Batch insert mode" + fi + + dnl Revert after postgresql checks + LDFLAGS="${saved_LDFLAGS}" + LIBS="${saved_LIBS}" + fi + else + AC_MSG_RESULT(no) + fi +],[ + AC_MSG_RESULT(no) +]) +AC_SUBST(POSTGRESQL_LIBS) +AC_SUBST(POSTGRESQL_INCLUDE) +AC_SUBST(POSTGRESQL_BINDIR) + +]) + +AC_DEFUN([AM_CONDITIONAL], +[AC_SUBST($1_TRUE) +AC_SUBST($1_FALSE) +if $2; then + $1_TRUE= + $1_FALSE='#' +else + $1_TRUE='#' + $1_FALSE= +fi]) diff --git a/autoconf/bacula-macros/largefiles.m4 b/autoconf/bacula-macros/largefiles.m4 new file mode 100644 index 00000000..e33d0056 --- /dev/null +++ b/autoconf/bacula-macros/largefiles.m4 @@ -0,0 +1,109 @@ +dnl +dnl ========= Large File Support ============== +dnl By default, many hosts won't let programs access large files; +dnl one must use special compiler options to get large-file access to work. +dnl For more details about this brain damage please see: +dnl http://www.sas.com/standards/large.file/x_open.20Mar96.html + +dnl Written by Paul Eggert . +dnl +dnl Modified by Kern Sibbald to turn on the large file +dnl flags on all machines. Otherwise functions such as +dnl fseek are not large file capable. +dnl + +dnl Internal subroutine of AC_SYS_LARGEFILE. +dnl AC_SYS_LARGEFILE_FLAGS(FLAGSNAME) +AC_DEFUN([AC_SYS_LARGEFILE_FLAGS], + [AC_CACHE_CHECK([for $1 value to request large file support], + ac_cv_sys_largefile_$1, + [ac_cv_sys_largefile_$1=`($GETCONF LFS_$1) 2>/dev/null` || { + ac_cv_sys_largefile_$1=no + ifelse($1, CFLAGS, + [case "$host_os" in + # IRIX 6.2 and later require cc -n32. +changequote(, )dnl + irix6.[2-9]* | irix6.1[0-9]* | irix[7-9].* | irix[1-9][0-9]*) +changequote([, ])dnl + if test "$GCC" != yes; then + ac_cv_sys_largefile_CFLAGS=-n32 + fi + ac_save_CC="$CC" + CC="$CC $ac_cv_sys_largefile_CFLAGS" + AC_TRY_LINK(, , , ac_cv_sys_largefile_CFLAGS=no) + CC="$ac_save_CC" + esac]) + }])]) + +dnl Internal subroutine of AC_SYS_LARGEFILE. +dnl AC_SYS_LARGEFILE_SPACE_APPEND(VAR, VAL) +AC_DEFUN([AC_SYS_LARGEFILE_SPACE_APPEND], + [case $2 in + no) ;; + ?*) + case "[$]$1" in + '') $1=$2 ;; + *) $1=[$]$1' '$2 ;; + esac ;; + esac]) + +dnl Internal subroutine of AC_SYS_LARGEFILE. +dnl AC_SYS_LARGEFILE_MACRO_VALUE(C-MACRO, CACHE-VAR, COMMENT, CODE-TO-SET-DEFAULT) +AC_DEFUN([AC_SYS_LARGEFILE_MACRO_VALUE], + [AC_CACHE_CHECK([for $1], $2, + [$2=no +changequote(, )dnl + $4 + for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do + case "$ac_flag" in + -D$1) + $2=1 ;; + -D$1=*) + $2=`expr " $ac_flag" : '[^=]*=\(.*\)'` ;; + esac + done +changequote([, ])dnl + ]) + if test "[$]$2" != no; then + AC_DEFINE_UNQUOTED([$1], [$]$2, [$3]) + fi]) + +AC_DEFUN([AC_BAC_LARGEFILE], + [AC_REQUIRE([AC_CANONICAL_HOST]) + AC_ARG_ENABLE(largefile, + [ --disable-largefile omit support for large files]) + if test "$enable_largefile" != no; then + AC_CHECK_TOOL(GETCONF, getconf) + AC_SYS_LARGEFILE_FLAGS(CFLAGS) + AC_SYS_LARGEFILE_FLAGS(LDFLAGS) + AC_SYS_LARGEFILE_FLAGS(LIBS) + + for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do + case "$ac_flag" in + no) ;; + -D_FILE_OFFSET_BITS=*) ;; + -D_LARGEFILE_SOURCE | -D_LARGEFILE_SOURCE=*) ;; + -D_LARGE_FILES | -D_LARGE_FILES=*) ;; + -D?* | -I?*) + AC_SYS_LARGEFILE_SPACE_APPEND(CPPFLAGS, "$ac_flag") ;; + *) + AC_SYS_LARGEFILE_SPACE_APPEND(CFLAGS, "$ac_flag") ;; + esac + done + AC_SYS_LARGEFILE_SPACE_APPEND(LDFLAGS, "$ac_cv_sys_largefile_LDFLAGS") + AC_SYS_LARGEFILE_SPACE_APPEND(LIBS, "$ac_cv_sys_largefile_LIBS") + AC_SYS_LARGEFILE_MACRO_VALUE(_FILE_OFFSET_BITS, + ac_cv_sys_file_offset_bits, + [Number of bits in a file offset, on hosts where this is settable.], + [ac_cv_sys_file_offset_bits=64]) + AC_SYS_LARGEFILE_MACRO_VALUE(_LARGEFILE_SOURCE, + ac_cv_sys_largefile_source, + [Define to make fseeko etc. visible, on some hosts.], + [ac_cv_sys_largefile_source=1]) + AC_SYS_LARGEFILE_MACRO_VALUE(_LARGE_FILES, + ac_cv_sys_large_files, + [Define for large files, on AIX-style hosts.], + [ac_cv_sys_large_files=1]) + fi + ]) +dnl ========================================================== diff --git a/autoconf/bacula-macros/os.m4 b/autoconf/bacula-macros/os.m4 new file mode 100644 index 00000000..b82deed5 --- /dev/null +++ b/autoconf/bacula-macros/os.m4 @@ -0,0 +1,252 @@ +dnl Check type of signal routines (posix, 4.2bsd, 4.1bsd or v7) + +dln Author: Nicolas Boichat +dnl License: BSD 2-Clause; see file LICENSE-FOSS + +AC_DEFUN([SIGNAL_CHECK], + [AC_REQUIRE([AC_TYPE_SIGNAL]) + AC_MSG_CHECKING(for type of signal functions) + AC_CACHE_VAL(bash_cv_signal_vintage, + [ + AC_TRY_LINK([#include ],[ + sigset_t ss; + struct sigaction sa; + sigemptyset(&ss); sigsuspend(&ss); + sigaction(SIGINT, &sa, (struct sigaction *) 0); + sigprocmask(SIG_BLOCK, &ss, (sigset_t *) 0); + ], bash_cv_signal_vintage="posix", + [ + AC_TRY_LINK([#include ], [ + int mask = sigmask(SIGINT); + sigsetmask(mask); sigblock(mask); sigpause(mask); + ], bash_cv_signal_vintage="4.2bsd", + [ + AC_TRY_LINK([ + #include + RETSIGTYPE foo() { }], [ + int mask = sigmask(SIGINT); + sigset(SIGINT, foo); sigrelse(SIGINT); + sighold(SIGINT); sigpause(SIGINT); + ], bash_cv_signal_vintage="svr3", bash_cv_signal_vintage="v7" + )] + )] + ) + ]) + AC_MSG_RESULT($bash_cv_signal_vintage) + if test "$bash_cv_signal_vintage" = "posix"; then + AC_DEFINE(HAVE_POSIX_SIGNALS) + elif test "$bash_cv_signal_vintage" = "4.2bsd"; then + AC_DEFINE(HAVE_BSD_SIGNALS) + elif test "$bash_cv_signal_vintage" = "svr3"; then + AC_DEFINE(HAVE_USG_SIGHOLD) + fi +]) + +AC_DEFUN([BA_CONDITIONAL], +[AC_SUBST($1_TRUE) +AC_SUBST($1_FALSE) +if $2; then + $1_TRUE= + $1_FALSE='#' +else + $1_TRUE='#' + $1_FALSE= +fi]) + + +AC_DEFUN([BA_CHECK_OPSYS], +[ +AC_CYGWIN +if test $HAVE_UNAME=yes -a x`uname -s` = xSunOS +then + BA_CONDITIONAL(HAVE_SUN_OS, $TRUEPRG) + AC_DEFINE(HAVE_SUN_OS) +else + BA_CONDITIONAL(HAVE_SUN_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xGNU +then + BA_CONDITIONAL(HAVE_HURD_OS, $TRUEPRG) + AC_DEFINE(HAVE_HURD_OS) +else + BA_CONDITIONAL(HAVE_HURD_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xOSF1 +then + BA_CONDITIONAL(HAVE_OSF1_OS, $TRUEPRG) + AC_DEFINE(HAVE_OSF1_OS) +else + BA_CONDITIONAL(HAVE_OSF1_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xAIX +then + BA_CONDITIONAL(HAVE_AIX_OS, $TRUEPRG) + AC_DEFINE(HAVE_AIX_OS) +else + BA_CONDITIONAL(HAVE_AIX_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xHP-UX +then + BA_CONDITIONAL(HAVE_HPUX_OS, $TRUEPRG) + AC_DEFINE(HAVE_HPUX_OS) +else + BA_CONDITIONAL(HAVE_HPUX_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xLinux +then + BA_CONDITIONAL(HAVE_LINUX_OS, $TRUEPRG) + AC_DEFINE(HAVE_LINUX_OS) +else + BA_CONDITIONAL(HAVE_LINUX_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xFreeBSD +then + BA_CONDITIONAL(HAVE_FREEBSD_OS, $TRUEPRG) + AC_DEFINE(HAVE_FREEBSD_OS) +else + BA_CONDITIONAL(HAVE_FREEBSD_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xNetBSD +then + BA_CONDITIONAL(HAVE_NETBSD_OS, $TRUEPRG) + AC_DEFINE(HAVE_NETBSD_OS) +else + BA_CONDITIONAL(HAVE_NETBSD_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xOpenBSD +then + BA_CONDITIONAL(HAVE_OPENBSD_OS, $TRUEPRG) + AC_DEFINE(HAVE_OPENBSD_OS) +else + BA_CONDITIONAL(HAVE_OPENBSD_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xBSD/OS +then + BA_CONDITIONAL(HAVE_BSDI_OS, $TRUEPRG) + AC_DEFINE(HAVE_BSDI_OS) +else + BA_CONDITIONAL(HAVE_BSDI_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xSGI +then + BA_CONDITIONAL(HAVE_SGI_OS, $TRUEPRG) + AC_DEFINE(HAVE_SGI_OS) +else + BA_CONDITIONAL(HAVE_SGI_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xIRIX -o x`uname -s` = xIRIX64 +then + BA_CONDITIONAL(HAVE_IRIX_OS, $TRUEPRG) + AC_DEFINE(HAVE_IRIX_OS) +else + BA_CONDITIONAL(HAVE_IRIX_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xDarwin +then + AM_CONDITIONAL(HAVE_DARWIN_OS, $TRUEPRG) + AC_DEFINE(HAVE_DARWIN_OS) +else + AM_CONDITIONAL(HAVE_DARWIN_OS, $FALSEPRG) +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xGNU/kFreeBSD +then + AM_CONDITIONAL(HAVE_KFREEBSD_OS, $TRUEPRG) + AC_DEFINE(HAVE_KFREEBSD_OS) +else + AM_CONDITIONAL(HAVE_KFREEBSD_OS, $FALSEPRG) +fi +]) + +AC_DEFUN([BA_CHECK_OPSYS_DISTNAME], +[AC_MSG_CHECKING(for Operating System Distribution) +if test "x$DISTNAME" != "x" +then + echo "distname set to $DISTNAME" +elif test $HAVE_UNAME=yes -a x`uname -s` = xOSF1 +then + DISTNAME=alpha +elif test $HAVE_UNAME=yes -a x`uname -s` = xAIX +then + DISTNAME=aix +elif test $HAVE_UNAME=yes -a x`uname -s` = xHP-UX +then + DISTNAME=hpux +elif test $HAVE_UNAME=yes -a x`uname -s` = xSunOS +then + DISTNAME=solaris +elif test $HAVE_UNAME=yes -a x`uname -s` = xGNU +then + DISTNAME=hurd +elif test $HAVE_UNAME=yes -a x`uname -s` = xFreeBSD +then + DISTNAME=freebsd +elif test $HAVE_UNAME=yes -a x`uname -s` = xNetBSD +then + DISTNAME=netbsd +elif test $HAVE_UNAME=yes -a x`uname -s` = xOpenBSD +then + DISTNAME=openbsd +elif test $HAVE_UNAME=yes -a x`uname -s` = xIRIX +then + DISTNAME=irix +elif test $HAVE_UNAME=yes -a x`uname -s` = xBSD/OS +then + DISTNAME=bsdi +elif test -f /etc/SuSE-release +then + DISTNAME=suse +elif test -d /etc/SuSEconfig +then + DISTNAME=suse5 +elif test -f /etc/mandrake-release +then + DISTNAME=mandrake +elif test -f /etc/whitebox-release +then + DISTNAME=redhat +elif test -f /etc/redhat-release +then + DISTNAME=redhat +elif test -f /etc/gentoo-release +then + DISTNAME=gentoo +elif test -f /etc/debian_version +then + DISTNAME=debian +elif test -f /etc/slackware-version +then + DISTNAME=slackware +elif test x$host_vendor = xapple +then + DISTNAME=osx +elif test $HAVE_UNAME=yes -a x`uname -s` = xDarwin +then + DISTNAME=darwin +elif test -f /etc/engarde-version +then + DISTNAME=engarde +elif test -f /etc/arch-release +then + DISTNAME=archlinux +elif test "$CYGWIN" = yes +then + DISTNAME=cygwin + AC_DEFINE(HAVE_CYGWIN) +else + DISTNAME=unknown +fi +AC_MSG_RESULT(done) +]) diff --git a/autoconf/confdefs.h b/autoconf/confdefs.h new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/autoconf/confdefs.h @@ -0,0 +1 @@ + diff --git a/autoconf/config.guess b/autoconf/config.guess new file mode 100755 index 00000000..f50dcdb6 --- /dev/null +++ b/autoconf/config.guess @@ -0,0 +1,1480 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2018 Free Software Foundation, Inc. + +timestamp='2018-02-24' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2018 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > "$dummy.c" ; + for c in cc gcc c89 c99 ; do + if ($c -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "$UNAME_SYSTEM" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval "$set_cc_for_build" + cat <<-EOF > "$dummy.c" + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`" + + # If ldd exists, use it to detect musl libc. + if command -v ldd >/dev/null && \ + ldd --version 2>&1 | grep -q ^musl + then + LIBC=musl + fi + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + "/sbin/$sysctl" 2>/dev/null || \ + "/usr/sbin/$sysctl" 2>/dev/null || \ + echo unknown)` + case "$UNAME_MACHINE_ARCH" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` + machine="${arch}${endian}"-unknown + ;; + *) machine="$UNAME_MACHINE_ARCH"-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "$UNAME_MACHINE_ARCH" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval "$set_cc_for_build" + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "$UNAME_MACHINE_ARCH" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "$UNAME_VERSION" in + Debian*) + release='-gnu' + ;; + *) + release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "$machine-${os}${release}${abi}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" + exit ;; + *:MidnightBSD:*:*) + echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" + exit ;; + *:ekkoBSD:*:*) + echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" + exit ;; + *:SolidBSD:*:*) + echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:MirBSD:*:*) + echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:Sortix:*:*) + echo "$UNAME_MACHINE"-unknown-sortix + exit ;; + *:Redox:*:*) + echo "$UNAME_MACHINE"-unknown-redox + exit ;; + mips:OSF1:*.*) + echo mips-dec-osf1 + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`" + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo "$UNAME_MACHINE"-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo "$UNAME_MACHINE"-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix"$UNAME_RELEASE" + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux"$UNAME_RELEASE" + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval "$set_cc_for_build" + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo "$SUN_ARCH"-pc-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`" + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos"$UNAME_RELEASE" + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos"$UNAME_RELEASE" + ;; + sun4) + echo sparc-sun-sunos"$UNAME_RELEASE" + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos"$UNAME_RELEASE" + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint"$UNAME_RELEASE" + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint"$UNAME_RELEASE" + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint"$UNAME_RELEASE" + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten"$UNAME_RELEASE" + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten"$UNAME_RELEASE" + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix"$UNAME_RELEASE" + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix"$UNAME_RELEASE" + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix"$UNAME_RELEASE" + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && + dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`"$dummy" "$dummyarg"` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos"$UNAME_RELEASE" + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ] + then + if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \ + [ "$TARGET_BINARY_INTERFACE"x = x ] + then + echo m88k-dg-dgux"$UNAME_RELEASE" + else + echo m88k-dg-dguxbcs"$UNAME_RELEASE" + fi + else + echo i586-dg-dgux"$UNAME_RELEASE" + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`" + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + fi + echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + else + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + fi + echo "$IBM_ARCH"-ibm-aix"$IBM_REV" + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + case "$UNAME_MACHINE" in + 9000/31?) HP_ARCH=m68000 ;; + 9000/[34]??) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "$sc_cpu_version" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "$sc_kernel_bits" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "$HP_ARCH" = "" ]; then + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ "$HP_ARCH" = hppa2.0w ] + then + eval "$set_cc_for_build" + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo "$HP_ARCH"-hp-hpux"$HPUX_REV" + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux"$HPUX_REV" + exit ;; + 3050*:HI-UX:*:*) + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo "$UNAME_MACHINE"-unknown-osf1mk + else + echo "$UNAME_MACHINE"-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi"$UNAME_RELEASE" + exit ;; + *:BSD/OS:*:*) + echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case "$UNAME_PROCESSOR" in + amd64) + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; + esac + echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" + exit ;; + i*:CYGWIN*:*) + echo "$UNAME_MACHINE"-pc-cygwin + exit ;; + *:MINGW64*:*) + echo "$UNAME_MACHINE"-pc-mingw64 + exit ;; + *:MINGW*:*) + echo "$UNAME_MACHINE"-pc-mingw32 + exit ;; + *:MSYS*:*) + echo "$UNAME_MACHINE"-pc-msys + exit ;; + i*:PW*:*) + echo "$UNAME_MACHINE"-pc-pw32 + exit ;; + *:Interix*:*) + case "$UNAME_MACHINE" in + x86) + echo i586-pc-interix"$UNAME_RELEASE" + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix"$UNAME_RELEASE" + exit ;; + IA64) + echo ia64-unknown-interix"$UNAME_RELEASE" + exit ;; + esac ;; + i*:UWIN*:*) + echo "$UNAME_MACHINE"-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" + exit ;; + *:GNU:*:*) + # the GNU system + echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`" + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC" + exit ;; + i*86:Minix:*:*) + echo "$UNAME_MACHINE"-pc-minix + exit ;; + aarch64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arm*:Linux:*:*) + eval "$set_cc_for_build" + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi + else + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + cris:Linux:*:*) + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + crisv32:Linux:*:*) + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + e2k:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + frv:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + hexagon:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + i*86:Linux:*:*) + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" + exit ;; + ia64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + k1om:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + m32r*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + m68*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU'`" + test "x$CPU" != x && { echo "$CPU-unknown-linux-$LIBC"; exit; } + ;; + mips64el:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-"$LIBC" + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-"$LIBC" + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-"$LIBC" + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; + PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; + *) echo hppa-unknown-linux-"$LIBC" ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-"$LIBC" + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-"$LIBC" + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-"$LIBC" + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-"$LIBC" + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" + exit ;; + sh64*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + sh*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + tile*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + vax:Linux:*:*) + echo "$UNAME_MACHINE"-dec-linux-"$LIBC" + exit ;; + x86_64:Linux:*:*) + if objdump -f /bin/sh | grep -q elf32-x86-64; then + echo "$UNAME_MACHINE"-pc-linux-"$LIBC"x32 + else + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" + fi + exit ;; + xtensa*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo "$UNAME_MACHINE"-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo "$UNAME_MACHINE"-unknown-stop + exit ;; + i*86:atheos:*:*) + echo "$UNAME_MACHINE"-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo "$UNAME_MACHINE"-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos"$UNAME_RELEASE" + exit ;; + i*86:*DOS:*:*) + echo "$UNAME_MACHINE"-pc-msdosdjgpp + exit ;; + i*86:*:4.*:*) + UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" + else + echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}{$UNAME_VERSION}" + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" + else + echo "$UNAME_MACHINE"-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos"$UNAME_RELEASE" + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos"$UNAME_RELEASE" + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos"$UNAME_RELEASE" + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos"$UNAME_RELEASE" + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv"$UNAME_RELEASE" + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo "$UNAME_MACHINE"-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo "$UNAME_MACHINE"-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux"$UNAME_RELEASE" + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv"$UNAME_RELEASE" + else + echo mips-unknown-sysv"$UNAME_RELEASE" + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux"$UNAME_RELEASE" + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux"$UNAME_RELEASE" + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux"$UNAME_RELEASE" + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux"$UNAME_RELEASE" + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux"$UNAME_RELEASE" + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux"$UNAME_RELEASE" + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux"$UNAME_RELEASE" + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody"$UNAME_RELEASE" + exit ;; + *:Rhapsody:*:*) + echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + eval "$set_cc_for_build" + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc + if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_PPC >/dev/null + then + UNAME_PROCESSOR=powerpc + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSV-*:NONSTOP_KERNEL:*:*) + echo nsv-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk"$UNAME_RELEASE" + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo "$UNAME_MACHINE"-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux"$UNAME_RELEASE" + exit ;; + *:DragonFly:*:*) + echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "$UNAME_MACHINE" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`" + exit ;; + i*86:rdos:*:*) + echo "$UNAME_MACHINE"-pc-rdos + exit ;; + i*86:AROS:*:*) + echo "$UNAME_MACHINE"-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo "$UNAME_MACHINE"-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; +esac + +echo "$0: unable to guess system type" >&2 + +case "$UNAME_MACHINE:$UNAME_SYSTEM" in + mips:Linux | mips64:Linux) + # If we got here on MIPS GNU/Linux, output extra information. + cat >&2 <&2 </dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = "$UNAME_MACHINE" +UNAME_RELEASE = "$UNAME_RELEASE" +UNAME_SYSTEM = "$UNAME_SYSTEM" +UNAME_VERSION = "$UNAME_VERSION" +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-functions 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/autoconf/config.h.in b/autoconf/config.h.in new file mode 100644 index 00000000..dacb235f --- /dev/null +++ b/autoconf/config.h.in @@ -0,0 +1,1286 @@ +/* autoconf/config.h.in. Generated from autoconf/configure.in by autoheader. */ +/* ------------------------------------------------------------------------- */ +/* -- CONFIGURE SPECIFIED FEATURES -- */ +/* ------------------------------------------------------------------------- */ + +/* + * Copyright (C) 2000-2015 Kern Sibbald + * License: BSD 2-Clause; see file LICENSE-FOSS + */ + +/* Define if you want to use MySQL as Catalog database */ +#undef USE_MYSQL_DB + +/* Define if you want SmartAlloc debug code enabled */ +#undef SMARTALLOC + +/* Define to `int' if doesn't define. */ +#undef daddr_t + +/* Define to `int' if doesn't define. */ +#undef major_t + +/* Define to `int' if doesn't define. */ +#undef minor_t + +/* Define to `int' if doesn't define. */ +#undef ssize_t + +/* Define if you want to use PostgreSQL */ +#undef HAVE_POSTGRESQL + +/* Define if you want to use MySQL */ +#undef HAVE_MYSQL + +/* Define if you want to use embedded MySQL */ +#undef HAVE_EMBEDDED_MYSQL + +/* Define if you want to use SQLite3 */ +#undef HAVE_SQLITE3 + +/* ------------------------------------------------------------------------- */ +/* -- CONFIGURE DETECTED FEATURES -- */ +/* ------------------------------------------------------------------------- */ + +/* Define if you need function prototypes */ +#undef PROTOTYPES + +/* Define if you have XPointer typedef */ +#undef HAVE_XPOINTER + +/* Define if you have _GNU_SOURCE getpt() */ +#undef HAVE_GETPT + +/* Define if you have GCC */ +#undef HAVE_GCC + +/* Define If you want find -nouser and -nogroup to make tables of + used UIDs and GIDs at startup instead of using getpwuid or + getgrgid when needed. Speeds up -nouser and -nogroup unless you + are running NIS or Hesiod, which make password and group calls + very expensive. */ +#undef CACHE_IDS + +/* Define to use SVR4 statvfs to get filesystem type. */ +#undef FSTYPE_STATVFS + +/* Define to use SVR3.2 statfs to get filesystem type. */ +#undef FSTYPE_USG_STATFS + +/* Define to use AIX3 statfs to get filesystem type. */ +#undef FSTYPE_AIX_STATFS + +/* Define to use 4.3BSD getmntent to get filesystem type. */ +#undef FSTYPE_MNTENT + +/* Define to use 4.4BSD and OSF1 statfs to get filesystem type. */ +#undef FSTYPE_STATFS + +/* Define to use Ultrix getmnt to get filesystem type. */ +#undef FSTYPE_GETMNT + +/* Define to `unsigned long' if doesn't define. */ +#undef dev_t + +/* Define to `unsigned long' if doesn't define. */ +#undef ino_t + +/* Define to 1 if utime.h exists and declares struct utimbuf. */ +#undef HAVE_UTIME_H + +/* Data types */ +#undef HAVE_U_INT +#undef HAVE_INTXX_T +#undef HAVE_U_INTXX_T +#undef HAVE_UINTXX_T +#undef HAVE_INT64_T +#undef HAVE_U_INT64_T +#undef HAVE_INTMAX_T +#undef HAVE_U_INTMAX_T + +/* Define if you want TCP Wrappers support */ +#undef HAVE_LIBWRAP + +/* Define if you have sys/bitypes.h */ +#undef HAVE_SYS_BITYPES_H + +/* Directory for PID files */ +#undef _PATH_BACULA_PIDDIR + +/* LOCALEDIR */ +#undef LOCALEDIR + +/* Define if you have zlib */ +#undef HAVE_LIBZ + +/* Define if you have lzo lib */ +#undef HAVE_LZO + +/* Define if you have libacl */ +#undef HAVE_ACL + +/* General libs */ +#undef LIBS + +/* File daemon specif libraries */ +#undef FDLIBS + +/* Path to Sendmail program */ +#undef SENDMAIL_PATH + +/* What kind of signals we have */ +#undef HAVE_POSIX_SIGNALS +#undef HAVE_BSD_SIGNALS +#undef HAVE_USG_SIGHOLD + +/* Operating systems */ +/* OSes */ +#undef HAVE_LINUX_OS +#undef HAVE_FREEBSD_OS +#undef HAVE_KFREEBSD_OS +#undef HAVE_NETBSD_OS +#undef HAVE_OPENBSD_OS +#undef HAVE_BSDI_OS +#undef HAVE_HPUX_OS +#undef HAVE_SUN_OS +#undef HAVE_IRIX_OS +#undef HAVE_AIX_OS +#undef HAVE_SGI_OS +#undef HAVE_CYGWIN +#undef HAVE_OSF1_OS +#undef HAVE_DARWIN_OS +#undef HAVE_HURD_OS + +/* Set to correct scanf value for long long int */ +#undef lld +#undef llu + +#undef HAVE_READLINE + +#undef HAVE_GMP + +#undef HAVE_CWEB + +#undef HAVE_FCHDIR + +#undef HAVE_GETOPT_LONG + +#undef HAVE_LIBSM + +/* Check for thread safe routines */ +#undef HAVE_LOCALTIME_R +#undef HAVE_READDIR_R +#undef HAVE_STRERROR_R +#undef HAVE_GETHOSTBYNAME_R + +#undef HAVE_STRTOLL +#undef HAVE_INET_PTON + +#undef HAVE_SOCKLEN_T + +#undef HAVE_OLD_SOCKOPT +#undef USE_THR_SETCONCURRENCY + +/* Defined if Gtk+-2.4 or greater is present */ +#undef HAVE_GTK_2_4 + +/* Needed on HP-UX/g++ systems to support long long ints (int64) */ +#undef _INCLUDE_LONGLONG + +/* Define to system config directory */ +#undef SYSCONFDIR + +/* Define if OPENSSL is available */ +#undef HAVE_OPENSSL + +/* Define if comm encryption should be enabled */ +#undef HAVE_TLS + +/* Define if data encryption should be enabled */ +#undef HAVE_CRYPTO + +/* Define the LOCALEDIR if a translation */ +#undef LOCALEDIR + +/* Define if language support is enabled */ +#undef ENABLE_NLS + +/* Define if building universal (internal helper macro) */ +#undef AC_APPLE_UNIVERSAL_BUILD + +/* Define to 1 if the `closedir' function returns void instead of `int'. */ +#undef CLOSEDIR_VOID + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +#undef CRAY_STACKSEG_END + +/* Define to 1 if using `alloca.c'. */ +#undef C_ALLOCA + +/* Define to 1 if translation of program messages to the user's native + language is requested. */ +#undef ENABLE_NLS + +/* Defined to 0 if not provided */ +#undef FD_CLOEXEC + +/* Define to 1 if you have the 'accept4' function. */ +#undef HAVE_ACCEPT4 + +/* Normal acl support */ +#undef HAVE_ACL + +/* Defines if your system have the ACL_TYPE_DEFAULT_DIR acl type */ +#undef HAVE_ACL_TYPE_DEFAULT_DIR + +/* Defines if your system have the ACL_TYPE_EXTENDED acl type */ +#undef HAVE_ACL_TYPE_EXTENDED + +/* Defines if your system have the ACL_TYPE_NFS4 acl type */ +#undef HAVE_ACL_TYPE_NFS4 + +/* Define to 1 if you have the 'add_proplist_entry' function. */ +#undef HAVE_ADD_PROPLIST_ENTRY + +/* Define to 1 if your system has AFS support */ +#undef HAVE_AFS + +/* Andrew FileSystem ACL support */ +#undef HAVE_AFS_ACL + +/* Define to 1 if you have the header file. */ +#undef HAVE_AFS_AFSINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_AFS_VENUS_H + +/* Define to 1 if you have `alloca', as a function or macro. */ +#undef HAVE_ALLOCA + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#undef HAVE_ALLOCA_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_ARGZ_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_ARPA_NAMESER_H + +/* Define to 1 if you have the `asprintf' function. */ +#undef HAVE_ASPRINTF + +/* Define to 1 if you have the header file. */ +#undef HAVE_ASSERT_H + +/* Define to 1 if you have the 'attropen' function. */ +#undef HAVE_ATTROPEN + +/* Define to 1 if you have the header file. */ +#undef HAVE_ATTR_ATTRIBUTES_H + +/* Defines if your system have the attr.h header file */ +#undef HAVE_ATTR_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_ATTR_XATTR_H + +/* Define to 1 if you have the `backtrace' function. */ +#undef HAVE_BACKTRACE + +/* Set if Bacula bat Qt4/5 GUI support enabled */ +#undef HAVE_BAT + +/* Define to 1 if you have the `be64toh' function. */ +#undef HAVE_BE64TOH + +/* Big Endian */ +#undef HAVE_BIG_ENDIAN + +/* Define to 1 if you have the '__builtin_bswap32' function */ +#undef HAVE_BSWAP32 + +/* Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the + CoreFoundation framework. */ +#undef HAVE_CFLOCALECOPYCURRENT + +/* Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in + the CoreFoundation framework. */ +#undef HAVE_CFPREFERENCESCOPYAPPVALUE + +/* Define to 1 if you have the `chflags' function. */ +#undef HAVE_CHFLAGS + +/* Set if we build only client */ +#undef HAVE_CLIENT_ONLY + +/* Define to 1 if you have the 'closefrom' function. */ +#undef HAVE_CLOSEFROM + +/* Set if Bacula conio support enabled */ +#undef HAVE_CONIO + +/* Define if encryption support should be enabled */ +#undef HAVE_CRYPTO + +/* Define to 1 if you have the header file. */ +#undef HAVE_CURSES_H + +/* Define if the GNU dcgettext() function is already present or preinstalled. + */ +#undef HAVE_DCGETTEXT + +/* Define to 1 if you have the declaration of `cygwin_conv_path', and to 0 if + you don't. */ +#undef HAVE_DECL_CYGWIN_CONV_PATH + +/* Define to 1 if you have the declaration of `FD_CLOEXEC', and to 0 if you + don't. */ +#undef HAVE_DECL_FD_CLOEXEC + +/* Define to 1 if you have the declaration of `feof_unlocked', and to 0 if you + don't. */ +#undef HAVE_DECL_FEOF_UNLOCKED + +/* Define to 1 if you have the declaration of `fgets_unlocked', and to 0 if + you don't. */ +#undef HAVE_DECL_FGETS_UNLOCKED + +/* Define to 1 if you have the declaration of `getc_unlocked', and to 0 if you + don't. */ +#undef HAVE_DECL_GETC_UNLOCKED + +/* Define to 1 if you have the declaration of `O_CLOEXEC', and to 0 if you + don't. */ +#undef HAVE_DECL_O_CLOEXEC + +/* Define to 1 if you have the declaration of `SOCK_CLOEXEC', and to 0 if you + don't. */ +#undef HAVE_DECL_SOCK_CLOEXEC + +/* Define to 1 if you have the declaration of `tzname', and to 0 if you don't. + */ +#undef HAVE_DECL_TZNAME + +/* Define to 1 if you have the declaration of `_snprintf', and to 0 if you + don't. */ +#undef HAVE_DECL__SNPRINTF + +/* Define to 1 if you have the declaration of `_snwprintf', and to 0 if you + don't. */ +#undef HAVE_DECL__SNWPRINTF + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#undef HAVE_DIRENT_H + +/* Define if you have the GNU dld library. */ +#undef HAVE_DLD + +/* Define to 1 if you have the `dlerror' function. */ +#undef HAVE_DLERROR + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +#undef HAVE_DOPRNT + +/* Define if you have the _dyld_func_lookup function. */ +#undef HAVE_DYLD + +/* Set if you have an Embedded MySQL Database */ +#undef HAVE_EMBEDDED_MYSQL + +/* Define to 1 if you have the 'extattr_get_file' function. */ +#undef HAVE_EXTATTR_GET_FILE + +/* Define to 1 if you have the 'extattr_get_link' function. */ +#undef HAVE_EXTATTR_GET_LINK + +/* Define to 1 if you have the 'extattr_list_file' function. */ +#undef HAVE_EXTATTR_LIST_FILE + +/* Define to 1 if you have the 'extattr_list_link' function. */ +#undef HAVE_EXTATTR_LIST_LINK + +/* Define to 1 if you have the 'extattr_namespace_to_string' function. */ +#undef HAVE_EXTATTR_NAMESPACE_TO_STRING + +/* Define to 1 if you have the 'extattr_set_file' function. */ +#undef HAVE_EXTATTR_SET_FILE + +/* Define to 1 if you have the 'extattr_set_link' function. */ +#undef HAVE_EXTATTR_SET_LINK + +/* Define to 1 if you have the 'extattr_string_to_namespace' function. */ +#undef HAVE_EXTATTR_STRING_TO_NAMESPACE + +/* Extended acl support */ +#undef HAVE_EXTENDED_ACL + +/* Define to 1 if you have the `fchdir' function. */ +#undef HAVE_FCHDIR + +/* Define to 1 if you have the `fchmod' function. */ +#undef HAVE_FCHMOD + +/* Define to 1 if you have the `fchown' function. */ +#undef HAVE_FCHOWN + +/* Define to 1 if you have the 'fchownat' function. */ +#undef HAVE_FCHOWNAT + +/* Set if you have 'F_CLOSEM' fcntl flag */ +#undef HAVE_FCNTL_F_CLOSEM + +/* Define to 1 if you have the header file. */ +#undef HAVE_FCNTL_H + +/* Set if fcntl supports file locking */ +#undef HAVE_FCNTL_LOCK + +/* Define to 1 if you have the `fdatasync' function. */ +#undef HAVE_FDATASYNC + +/* Define to 1 if you have the `fork' function. */ +#undef HAVE_FORK + +/* Define to 1 if you have the `fseeko' function. */ +#undef HAVE_FSEEKO + +/* Define to 1 if you have the `futimes' function. */ +#undef HAVE_FUTIMES + +/* Define to 1 if you have the 'futimesat' function. */ +#undef HAVE_FUTIMESAT + +/* Define to 1 if you have the `fwprintf' function. */ +#undef HAVE_FWPRINTF + +/* Define to 1 if you have the 'gai_strerror' function. */ +#undef HAVE_GAI_STRERROR + +/* Define to 1 if getaddrinfo exists and works */ +#undef HAVE_GETADDRINFO + +/* Define to 1 if you have the `getcwd' function. */ +#undef HAVE_GETCWD + +/* Define to 1 if you have the 'getea' function. */ +#undef HAVE_GETEA + +/* Define to 1 if you have the `getegid' function. */ +#undef HAVE_GETEGID + +/* Define to 1 if you have the `geteuid' function. */ +#undef HAVE_GETEUID + +/* Define to 1 if you have the `getgid' function. */ +#undef HAVE_GETGID + +/* Define to 1 if you have the `gethostbyname2' function. */ +#undef HAVE_GETHOSTBYNAME2 + +/* Define to 1 if you have the `gethostbyname_r' function. */ +#undef HAVE_GETHOSTBYNAME_R + +/* Define to 1 if you have the `gethostid' function. */ +#undef HAVE_GETHOSTID + +/* Define to 1 if you have the `gethostname' function. */ +#undef HAVE_GETHOSTNAME + +/* Define to 1 if you have the `getmntent' function. */ +#undef HAVE_GETMNTENT + +/* Define to 1 if you have the `getmntinfo' function. */ +#undef HAVE_GETMNTINFO + +/* Define to 1 if you have the `getnameinfo' function. */ +#undef HAVE_GETNAMEINFO + +/* Set if have getpagesize */ +#undef HAVE_GETPAGESIZE + +/* Define to 1 if you have the `getpid' function. */ +#undef HAVE_GETPID + +/* Define to 1 if you have the 'getproplist' function. */ +#undef HAVE_GETPROPLIST + +/* Define to 1 if you have the `getrlimit' function. */ +#undef HAVE_GETRLIMIT + +/* Define if the GNU gettext() function is already present or preinstalled. */ +#undef HAVE_GETTEXT + +/* Define to 1 if you have the `gettimeofday' function. */ +#undef HAVE_GETTIMEOFDAY + +/* Define to 1 if you have the `getuid' function. */ +#undef HAVE_GETUID + +/* Define to 1 if you have the 'getxattr' function. */ +#undef HAVE_GETXATTR + +/* Define to 1 if you have the 'get_proplist_entry' function. */ +#undef HAVE_GET_PROPLIST_ENTRY + +/* Define to 1 if you have the header file. */ +#undef HAVE_GRP_H + +/* Define to 1 if you have the `htobe64' function. */ +#undef HAVE_HTOBE64 + +/* Define to 1 if you have the `iconv()' function. */ +#undef HAVE_ICONV + +/* Define to 1 if you have the `inet_ntop' function. */ +#undef HAVE_INET_NTOP + +/* Define to 1 if you have the `inet_pton' function. */ +#undef HAVE_INET_PTON + +/* Define if you have the 'intmax_t' type in or . */ +#undef HAVE_INTMAX_T + +/* Define to 1 if the system has the type `intptr_t'. */ +#undef HAVE_INTPTR_T + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define if exists, doesn't clash with , and + declares uintmax_t. */ +#undef HAVE_INTTYPES_H_WITH_UINTMAX + +/* Set if ioctl request is unsigned long int */ +#undef HAVE_IOCTL_ULINT_REQUEST + +/* Whether to enable IPv6 support */ +#undef HAVE_IPV6 + +/* Define if you have and nl_langinfo(CODESET). */ +#undef HAVE_LANGINFO_CODESET + +/* Define to 1 if you have the `lchmod' function. */ +#undef HAVE_LCHMOD + +/* Define to 1 if you have the `lchown' function. */ +#undef HAVE_LCHOWN + +/* Define if your file defines LC_MESSAGES. */ +#undef HAVE_LC_MESSAGES + +/* Define to 1 if you have the 'lgetea' function. */ +#undef HAVE_LGETEA + +/* Define to 1 if you have the 'lgetxattr' function. */ +#undef HAVE_LGETXATTR + +/* Define if you have libcap */ +#undef HAVE_LIBCAP + +/* Define to 1 if you have the header file. */ +#undef HAVE_LIBC_H + +/* Define if you have the libdl library or equivalent. */ +#undef HAVE_LIBDL + +/* Define if libdlloader will be built on this platform */ +#undef HAVE_LIBDLLOADER + +/* Define to 1 if you have the `inet' library (-linet). */ +#undef HAVE_LIBINET + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +#undef HAVE_LIBNSL + +/* Define to 1 if you have the `resolv' library (-lresolv). */ +#undef HAVE_LIBRESOLV + +/* Define to 1 if you have libs3 */ +#undef HAVE_LIBS3 + +/* Define to 1 if you have the `socket' library (-lsocket). */ +#undef HAVE_LIBSOCKET + +/* Define to 1 if you have the `sun' library (-lsun). */ +#undef HAVE_LIBSUN + +/* Defines if your system have the libutil.h header file */ +#undef HAVE_LIBUTIL_H + +/* Set to enable libwraper support */ +#undef HAVE_LIBWRAP + +/* Define to 1 if you have the `xnet' library (-lxnet). */ +#undef HAVE_LIBXNET + +/* Define to 1 if you have the header file. */ +#undef HAVE_LIMITS_H + +/* Define to 1 if you have the 'linkat' function. */ +#undef HAVE_LINKAT + +/* Define to 1 if you have the 'listea' function. */ +#undef HAVE_LISTEA + +/* Define to 1 if you have the 'listxattr' function. */ +#undef HAVE_LISTXATTR + +/* Little Endian */ +#undef HAVE_LITTLE_ENDIAN + +/* Define to 1 if you have the 'llistea' function. */ +#undef HAVE_LLISTEA + +/* Define to 1 if you have the 'llistxattr' function. */ +#undef HAVE_LLISTXATTR + +/* Define to 1 if you have the header file. */ +#undef HAVE_LOCALE_H + +/* Define to 1 if you have the `localtime_r' function. */ +#undef HAVE_LOCALTIME_R + +/* Define if you have the 'long double' type. */ +#undef HAVE_LONG_DOUBLE + +/* Define if you have the 'long long' type. */ +#undef HAVE_LONG_LONG + +/* Define to 1 if you have the 'lsetea' function. */ +#undef HAVE_LSETEA + +/* Define to 1 if you have the 'lsetxattr' function. */ +#undef HAVE_LSETXATTR + +/* Define to 1 if you have the `lstat' function. */ +#undef HAVE_LSTAT + +/* Define to 1 if you have the `lutimes' function. */ +#undef HAVE_LUTIMES + +/* Define to 1 if you have LZO compression */ +#undef HAVE_LZO + +/* Define to 1 if you have the header file. */ +#undef HAVE_MALLOC_H + +/* Set if have malloc_trim */ +#undef HAVE_MALLOC_TRIM + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the `mempcpy' function. */ +#undef HAVE_MEMPCPY + +/* Define to 1 if you have a working `mmap' system call. */ +#undef HAVE_MMAP + +/* Define to 1 if you have the header file. */ +#undef HAVE_MTIO_H + +/* Define to 1 if you have the `munmap' function. */ +#undef HAVE_MUNMAP + +/* Set if you have an MySQL Database */ +#undef HAVE_MYSQL + +/* Set if have mysql_thread_safe */ +#undef HAVE_MYSQL_THREAD_SAFE + +/* Define to 1 if you have the `nanosleep' function. */ +#undef HAVE_NANOSLEEP + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +#undef HAVE_NDIR_H + +/* Define to 1 if you have the `nl_langinfo' function. */ +#undef HAVE_NL_LANGINFO + +/* Define to 1 if you have the header file. */ +#undef HAVE_NL_TYPES_H + +/* Define to 1 if you have the 'nvlist_next_nvpair' function. */ +#undef HAVE_NVLIST_NEXT_NVPAIR + +/* Define to 1 if you have the 'openat' function. */ +#undef HAVE_OPENAT + +/* Define if OpenSSL library is available */ +#undef HAVE_OPENSSL + +/* Define if the OpenSSL library is export-contrained to 128bit ciphers */ +#undef HAVE_OPENSSL_EXPORT_LIBRARY + +/* Set if have OpenSSL version 1.x */ +#undef HAVE_OPENSSLv1 + +/* Define to 1 if you have the `posix_fadvise' function. */ +#undef HAVE_POSIX_FADVISE + +/* Define to 1 if you have the `posix_fallocate' function. */ +#undef HAVE_POSIX_FALLOCATE + +/* Define if your printf() function supports format strings with positions. */ +#undef HAVE_POSIX_PRINTF + +/* Set if you have an PostgreSQL Database */ +#undef HAVE_POSTGRESQL + +/* Set if PostgreSQL DB batch insert code enabled */ +#undef HAVE_POSTGRESQL_BATCH_FILE_INSERT + +/* Set if have PQisthreadsafe */ +#undef HAVE_PQISTHREADSAFE + +/* Set if have PQputCopyData */ +#undef HAVE_PQ_COPY + +/* Define to 1 if you have the `prctl' function. */ +#undef HAVE_PRCTL + +/* Define to 1 if you have the `putenv' function. */ +#undef HAVE_PUTENV + +/* Define to 1 if you have the header file. */ +#undef HAVE_PWD_H + +/* Define to 1 if you have the `readdir_r' function. */ +#undef HAVE_READDIR_R + +/* Set to enable readline support */ +#undef HAVE_READLINE + +/* Define to 1 if you have the `realpath' function. */ +#undef HAVE_REALPATH + +/* Define to 1 if you have the header file. */ +#undef HAVE_REGEX_H + +/* Define if sa_len field exists in struct sockaddr */ +#undef HAVE_SA_LEN + +/* Define to 1 if you have the `select' function. */ +#undef HAVE_SELECT + +/* Define to 1 if you have the 'setea' function. */ +#undef HAVE_SETEA + +/* Define to 1 if you have the `setenv' function. */ +#undef HAVE_SETENV + +/* Define to 1 if you have the `setlocale' function. */ +#undef HAVE_SETLOCALE + +/* Define to 1 if you have the `setpgid' function. */ +#undef HAVE_SETPGID + +/* Define to 1 if you have the `setpgrp' function. */ +#undef HAVE_SETPGRP + +/* Define to 1 if you have the 'setproplist' function. */ +#undef HAVE_SETPROPLIST + +/* Define to 1 if you have the `setreuid' function. */ +#undef HAVE_SETREUID + +/* Define to 1 if you have the `setsid' function. */ +#undef HAVE_SETSID + +/* Define to 1 if you have the 'setxattr' function. */ +#undef HAVE_SETXATTR + +/* Define if the SHA-2 family of digest algorithms is available */ +#undef HAVE_SHA2 + +/* Define if you have the shl_load function. */ +#undef HAVE_SHL_LOAD + +/* Define to 1 if you have the `signal' function. */ +#undef HAVE_SIGNAL + +/* Define to 1 if you have the 'sizeof_proplist_entry' function. */ +#undef HAVE_SIZEOF_PROPLIST_ENTRY + +/* Define to 1 if you have the `snprintf' function. */ +#undef HAVE_SNPRINTF + +/* Set if socklen_t exists */ +#undef HAVE_SOCKLEN_T + +/* Set if building on Solaris 10 */ +#undef HAVE_SOLARIS10 + +/* Set if you have an SQLite3 Database */ +#undef HAVE_SQLITE3 + +/* Set if have sqlite3_threadsafe */ +#undef HAVE_SQLITE3_THREADSAFE + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDARG_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDDEF_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define if exists, doesn't clash with , and declares + uintmax_t. */ +#undef HAVE_STDINT_H_WITH_UINTMAX + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the `stpcpy' function. */ +#undef HAVE_STPCPY + +/* Define to 1 if you have the `strcasecmp' function. */ +#undef HAVE_STRCASECMP + +/* Define to 1 if you have the `strdup' function. */ +#undef HAVE_STRDUP + +/* Defined to 0 if not provided */ +#undef HAVE_STREAM_CLOEXEC + +/* Define to 1 if you have the `strerror' function. */ +#undef HAVE_STRERROR + +/* Define to 1 if you have the `strerror_r' function. */ +#undef HAVE_STRERROR_R + +/* Define to 1 if you have the `strftime' function. */ +#undef HAVE_STRFTIME + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the `strncmp' function. */ +#undef HAVE_STRNCMP + +/* Define to 1 if you have the `strncpy' function. */ +#undef HAVE_STRNCPY + +/* Define to 1 if you have the `strtoll' function. */ +#undef HAVE_STRTOLL + +/* Define to 1 if you have the `strtoul' function. */ +#undef HAVE_STRTOUL + +/* Define to 1 if `st_blksize' is a member of `struct stat'. */ +#undef HAVE_STRUCT_STAT_ST_BLKSIZE + +/* Define to 1 if `st_blocks' is a member of `struct stat'. */ +#undef HAVE_STRUCT_STAT_ST_BLOCKS + +/* Define to 1 if `st_rdev' is a member of `struct stat'. */ +#undef HAVE_STRUCT_STAT_ST_RDEV + +/* Define to 1 if `tm_zone' is a member of `struct tm'. */ +#undef HAVE_STRUCT_TM_TM_ZONE + +/* Define to 1 if your `struct stat' has `st_blksize'. Deprecated, use + `HAVE_STRUCT_STAT_ST_BLKSIZE' instead. */ +#undef HAVE_ST_BLKSIZE + +/* Define to 1 if your `struct stat' has `st_blocks'. Deprecated, use + `HAVE_STRUCT_STAT_ST_BLOCKS' instead. */ +#undef HAVE_ST_BLOCKS + +/* Define to 1 if your `struct stat' has `st_rdev'. Deprecated, use + `HAVE_STRUCT_STAT_ST_RDEV' instead. */ +#undef HAVE_ST_RDEV + +/* Define to 1 if systemd support should be enabled */ +#undef HAVE_SYSTEMD + +/* Defines if your system have the sys/acl.h header file */ +#undef HAVE_SYS_ACL_H + +/* Defines if your system have the sys/attr.h header file */ +#undef HAVE_SYS_ATTR_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_BITYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_BYTEORDER_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_CAPABILITY_H + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#undef HAVE_SYS_DIR_H + +/* Defines if your system have the sys/ea.h header file */ +#undef HAVE_SYS_EA_H + +/* Defines if your system have the sys/extattr.h header file */ +#undef HAVE_SYS_EXTATTR_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_IOCTL_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_MTIO_H + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#undef HAVE_SYS_NDIR_H + +/* Defines if your system have the sys/nvpair.h header file */ +#undef HAVE_SYS_NVPAIR_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_PARAM_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_PRCTL_H + +/* Defines if your system have the sys/proplist.h header file */ +#undef HAVE_SYS_PROPLIST_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_SELECT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_SOCKET_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_SOCKIO_H + +/* Defines if your system have the sys/statvfs.h header file */ +#undef HAVE_SYS_STATVFS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TAPE_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TIME_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#undef HAVE_SYS_WAIT_H + +/* Defines if your system have the sys/xattr.h header file */ +#undef HAVE_SYS_XATTR_H + +/* Define to 1 if you have the `tcgetattr' function. */ +#undef HAVE_TCGETATTR + +/* Define to 1 if you have the header file. */ +#undef HAVE_TERMCAP_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_TERMIOS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_TERM_H + +/* Define if TLS support should be enabled */ +#undef HAVE_TLS + +/* Define to 1 if your `struct tm' has `tm_zone'. Deprecated, use + `HAVE_STRUCT_TM_TM_ZONE' instead. */ +#undef HAVE_TM_ZONE + +/* Define to 1 if you have the `tsearch' function. */ +#undef HAVE_TSEARCH + +/* Defind to 1 if compiler has typeof */ +#undef HAVE_TYPEOF + +/* Define to 1 if you don't have `tm_zone' but do have the external array + `tzname'. */ +#undef HAVE_TZNAME + +/* Define if you have the 'uintmax_t' type in or . */ +#undef HAVE_UINTMAX_T + +/* Define to 1 if the system has the type `uintptr_t'. */ +#undef HAVE_UINTPTR_T + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if you have the 'unlinkat' function. */ +#undef HAVE_UNLINKAT + +/* Define if you have the 'unsigned long long' type. */ +#undef HAVE_UNSIGNED_LONG_LONG + +/* Set if utime.h exists */ +#undef HAVE_UTIME_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_VARARGS_H + +/* Set if va_copy exists */ +#undef HAVE_VA_COPY + +/* Define to 1 if you have the `vfprintf' function. */ +#undef HAVE_VFPRINTF + +/* Define to 1 if you have the `vprintf' function. */ +#undef HAVE_VPRINTF + +/* Define to 1 if you have the `vsnprintf' function. */ +#undef HAVE_VSNPRINTF + +/* Define if you have the 'wchar_t' type. */ +#undef HAVE_WCHAR_T + +/* Define to 1 if you have the `wcslen' function. */ +#undef HAVE_WCSLEN + +/* Define if you have the 'wint_t' type. */ +#undef HAVE_WINT_T + +/* Extended Attributes support */ +#undef HAVE_XATTR + +/* Define to 1 if you have the header file. */ +#undef HAVE_ZLIB_H + +/* Define to 1 if you have the `__argz_count' function. */ +#undef HAVE___ARGZ_COUNT + +/* Define to 1 if you have the `__argz_next' function. */ +#undef HAVE___ARGZ_NEXT + +/* Define to 1 if you have the `__argz_stringify' function. */ +#undef HAVE___ARGZ_STRINGIFY + +/* Define to 1 if you have the `__fsetlocking' function. */ +#undef HAVE___FSETLOCKING + +/* Define as const if the declaration of iconv() needs const. */ +#undef ICONV_CONST + +/* Define if integer division by zero raises signal SIGFPE. */ +#undef INTDIV0_RAISES_SIGFPE + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#undef LT_OBJDIR + +/* Define to 1 if `major', `minor', and `makedev' are declared in . + */ +#undef MAJOR_IN_MKDEV + +/* Define to 1 if `major', `minor', and `makedev' are declared in + . */ +#undef MAJOR_IN_SYSMACROS + +/* Whether to have in6addr_any support */ +#undef NEED_IN6ADDR_ANY + +/* Define to 1 if your C compiler doesn't accept -c and -o together. */ +#undef NO_MINUS_C_MINUS_O + +/* Defined to 0 if not provided */ +#undef O_CLOEXEC + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define if exists and defines unusable PRI* macros. */ +#undef PRI_MACROS_BROKEN + +/* Define as the return type of signal handlers (`int' or `void'). */ +#undef RETSIGTYPE + +/* Define to 1 if the `setpgrp' function takes no argument. */ +#undef SETPGRP_VOID + +/* The size of `char', as computed by sizeof. */ +#undef SIZEOF_CHAR + +/* The size of `int', as computed by sizeof. */ +#undef SIZEOF_INT + +/* The size of `int *', as computed by sizeof. */ +#undef SIZEOF_INT_P + +/* The size of `long int', as computed by sizeof. */ +#undef SIZEOF_LONG_INT + +/* The size of `long long int', as computed by sizeof. */ +#undef SIZEOF_LONG_LONG_INT + +/* The size of `short int', as computed by sizeof. */ +#undef SIZEOF_SHORT_INT + +/* Define as the maximum value of type 'size_t', if the system doesn't define + it. */ +#undef SIZE_MAX + +/* Set if you want Smartalloc enabled */ +#undef SMARTALLOC + +/* Defined to 0 if not provided */ +#undef SOCK_CLOEXEC + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +#undef STACK_DIRECTION + +/* Define to 1 if the `S_IS*' macros in do not work properly. */ +#undef STAT_MACROS_BROKEN + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* fopen() modifier for setting close on exec flag */ +#undef STREAM_CLOEXEC + +/* Define to 1 if you can safely include both and . */ +#undef TIME_WITH_SYS_TIME + +/* Define to 1 if your declares `struct tm'. */ +#undef TM_IN_SYS_TIME + +/* Set if DB batch insert code enabled */ +#undef USE_BATCH_FILE_INSERT + +/* Set if you want Lock Manager enabled */ +#undef USE_LOCKMGR + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +# undef WORDS_BIGENDIAN +# endif +#endif + +/* Define to 1 if the X Window System is missing or not being used. */ +#undef X_DISPLAY_MISSING + +/* Number of bits in a file offset, on hosts where this is settable. */ +#undef _FILE_OFFSET_BITS + +/* Define to make fseeko etc. visible, on some hosts. */ +#undef _LARGEFILE_SOURCE + +/* Define for large files, on AIX-style hosts. */ +#undef _LARGE_FILES + +/* Define to empty if `const' does not conform to ANSI C. */ +#undef const + +/* Define to `long' if does not define. */ +#undef daddr_t + +/* Define to `unsigned long' if does not define. */ +#undef dev_t + +/* Define to `int' if doesn't define. */ +#undef gid_t + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#ifndef __cplusplus +#undef inline +#endif + +/* Define to `unsigned long' if does not define. */ +#undef ino_t + +/* Define to the type of a signed integer type wide enough to hold a pointer, + if such a type exists, and if the system does not define it. */ +#undef intptr_t + +/* Define to `int' if does not define. */ +#undef major_t + +/* Define to `int' if does not define. */ +#undef minor_t + +/* Define to `int' if does not define. */ +#undef mode_t + +/* Define to `long int' if does not define. */ +#undef off_t + +/* Define to `int' if does not define. */ +#undef pid_t + +/* Define as the type of the result of subtracting two pointers, if the system + doesn't define it. */ +#undef ptrdiff_t + +/* Define to the equivalent of the C99 'restrict' keyword, or to + nothing if this is not supported. Do not define if restrict is + supported directly. */ +#undef restrict +/* Work around a bug in Sun C++: it does not support _Restrict or + __restrict__, even though the corresponding Sun C compiler ends up with + "#define restrict _Restrict" or "#define restrict __restrict__" in the + previous line. Perhaps some future version of Sun C++ will work with + restrict; if so, hopefully it defines __RESTRICT like Sun C does. */ +#if defined __SUNPRO_CC && !defined __RESTRICT +# define _Restrict +# define __restrict__ +#endif + +/* Define to empty if the C compiler doesn't support this keyword. */ +#undef signed + +/* Define to `unsigned int' if does not define. */ +#undef size_t + +/* Define to `int' if does not define. */ +#undef ssize_t + +/* Define to `int' if doesn't define. */ +#undef uid_t + +/* Define to unsigned long or unsigned long long if and + don't define. */ +#undef uintmax_t + +/* Define to the type of an unsigned integer type wide enough to hold a + pointer, if such a type exists, and if the system does not define it. */ +#undef uintptr_t diff --git a/autoconf/config.log b/autoconf/config.log new file mode 100644 index 00000000..fb9038e9 --- /dev/null +++ b/autoconf/config.log @@ -0,0 +1,3 @@ +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + diff --git a/autoconf/config.rpath b/autoconf/config.rpath new file mode 100755 index 00000000..3f1bef34 --- /dev/null +++ b/autoconf/config.rpath @@ -0,0 +1,571 @@ +#! /bin/sh +# Output a system dependent set of variables, describing how to set the +# run time search path of shared libraries in an executable. +# +# Copyright 1996-2005 Free Software Foundation, Inc. +# Taken from GNU libtool, 2001 +# Originally by Gordon Matzigkeit , 1996 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. +# +# The first argument passed to this file is the canonical host specification, +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld +# should be set by the caller. +# +# The set of defined variables is at the end of this script. + +# Known limitations: +# - On IRIX 6.5 with CC="cc", the run time search patch must not be longer +# than 256 bytes, otherwise the compiler driver will dump core. The only +# known workaround is to choose shorter directory names for the build +# directory and/or the installation directory. + +# All known linkers require a `.a' archive for static linking (except M$VC, +# which needs '.lib'). +libext=a +shrext=.so + +host="$1" +host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` +host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` +host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` + +cc_basename=`echo "$CC" | sed -e 's%^.*/%%'` + +# Code taken from libtool.m4's AC_LIBTOOL_PROG_COMPILER_PIC. + +wl= +if test "$GCC" = yes; then + wl='-Wl,' +else + case "$host_os" in + aix*) + wl='-Wl,' + ;; + darwin*) + case "$cc_basename" in + xlc*) + wl='-Wl,' + ;; + esac + ;; + mingw* | pw32* | os2*) + ;; + hpux9* | hpux10* | hpux11*) + wl='-Wl,' + ;; + irix5* | irix6* | nonstopux*) + wl='-Wl,' + ;; + newsos6) + ;; + linux*) + case $cc_basename in + icc* | ecc*) + wl='-Wl,' + ;; + pgcc | pgf77 | pgf90) + wl='-Wl,' + ;; + ccc*) + wl='-Wl,' + ;; + como) + wl='-lopt=' + ;; + esac + ;; + osf3* | osf4* | osf5*) + wl='-Wl,' + ;; + sco3.2v5*) + ;; + solaris*) + wl='-Wl,' + ;; + sunos4*) + wl='-Qoption ld ' + ;; + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + wl='-Wl,' + ;; + sysv4*MP*) + ;; + unicos*) + wl='-Wl,' + ;; + uts4*) + ;; + esac +fi + +# Code taken from libtool.m4's AC_LIBTOOL_PROG_LD_SHLIBS. + +hardcode_libdir_flag_spec= +hardcode_libdir_separator= +hardcode_direct=no +hardcode_minus_L=no + +case "$host_os" in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + openbsd*) + with_gnu_ld=no + ;; +esac + +ld_shlibs=yes +if test "$with_gnu_ld" = yes; then + case "$host_os" in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + fi + ;; + amigaos*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we cannot use + # them. + ld_shlibs=no + ;; + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + cygwin* | mingw* | pw32*) + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + netbsd*) + ;; + solaris* | sysv5*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + sunos4*) + hardcode_direct=yes + ;; + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + esac + if test "$ld_shlibs" = yes; then + # Unlike libtool, we use -rpath here, not --rpath, since the documented + # option of GNU ld is called -rpath, not --rpath. + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + fi +else + case "$host_os" in + aix3*) + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + else + aix_use_runtimelinking=no + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + esac + fi + hardcode_direct=yes + hardcode_libdir_separator=':' + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct=yes + else + # We have old collect2 + hardcode_direct=unsupported + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + esac + fi + # Begin _LT_AC_SYS_LIBPATH_AIX. + echo 'int main () { return 0; }' > conftest.c + ${CC} ${LDFLAGS} conftest.c -o conftest + aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` + if test -z "$aix_libpath"; then + aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` + fi + if test -z "$aix_libpath"; then + aix_libpath="/usr/lib:/lib" + fi + rm -f conftest.c conftest + # End _LT_AC_SYS_LIBPATH_AIX. + if test "$aix_use_runtimelinking" = yes; then + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + else + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + fi + fi + ;; + amigaos*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + # see comment about different semantics on the GNU ld section + ld_shlibs=no + ;; + bsdi[45]*) + ;; + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec=' ' + libext=lib + ;; + darwin* | rhapsody*) + hardcode_direct=no + if test "$GCC" = yes ; then + : + else + case "$cc_basename" in + xlc*) + ;; + *) + ld_shlibs=no + ;; + esac + fi + ;; + dgux*) + hardcode_libdir_flag_spec='-L$libdir' + ;; + freebsd1*) + ld_shlibs=no + ;; + freebsd2.2*) + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + ;; + freebsd2*) + hardcode_direct=yes + hardcode_minus_L=yes + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + ;; + hpux9*) + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + hpux10* | hpux11*) + if test "$with_gnu_ld" = no; then + case "$host_cpu" in + hppa*64*) + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=no + ;; + ia64*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=no + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + *) + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + irix5* | irix6* | nonstopux*) + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + netbsd*) + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + ;; + newsos6) + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + openbsd*) + hardcode_direct=yes + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + else + case "$host_os" in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + osf3*) + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + osf4* | osf5*) + if test "$GCC" = yes; then + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + # Both cc and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + hardcode_libdir_separator=: + ;; + sco3.2v5*) + ;; + solaris*) + hardcode_libdir_flag_spec='-R$libdir' + ;; + sunos4*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + ;; + sysv4) + case $host_vendor in + sni) + hardcode_direct=yes # is this really true??? + ;; + siemens) + hardcode_direct=no + ;; + motorola) + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + ;; + sysv4.3*) + ;; + sysv4*MP*) + if test -d /usr/nec; then + ld_shlibs=yes + fi + ;; + sysv4.2uw2*) + hardcode_direct=yes + hardcode_minus_L=no + ;; + sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[78]* | unixware7*) + ;; + sysv5*) + hardcode_libdir_flag_spec= + ;; + uts4*) + hardcode_libdir_flag_spec='-L$libdir' + ;; + *) + ld_shlibs=no + ;; + esac +fi + +# Check dynamic linker characteristics +# Code taken from libtool.m4's AC_LIBTOOL_SYS_DYNAMIC_LINKER. +libname_spec='lib$name' +case "$host_os" in + aix3*) + ;; + aix4* | aix5*) + ;; + amigaos*) + ;; + beos*) + ;; + bsdi[45]*) + ;; + cygwin* | mingw* | pw32*) + shrext=.dll + ;; + darwin* | rhapsody*) + shrext=.dylib + ;; + dgux*) + ;; + freebsd1*) + ;; + kfreebsd*-gnu) + ;; + freebsd*) + ;; + gnu*) + ;; + hpux9* | hpux10* | hpux11*) + case "$host_cpu" in + ia64*) + shrext=.so + ;; + hppa*64*) + shrext=.sl + ;; + *) + shrext=.sl + ;; + esac + ;; + irix5* | irix6* | nonstopux*) + case "$host_os" in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;; + *) libsuff= shlibsuff= ;; + esac + ;; + esac + ;; + linux*oldld* | linux*aout* | linux*coff*) + ;; + linux*) + ;; + knetbsd*-gnu) + ;; + netbsd*) + ;; + newsos6) + ;; + nto-qnx*) + ;; + openbsd*) + ;; + os2*) + libname_spec='$name' + shrext=.dll + ;; + osf3* | osf4* | osf5*) + ;; + sco3.2v5*) + ;; + solaris*) + ;; + sunos4*) + ;; + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + ;; + sysv4*MP*) + ;; + uts4*) + ;; +esac + +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' +escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"` +shlibext=`echo "$shrext" | sed -e 's,^\.,,'` +escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` + +LC_ALL=C sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' <. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2018 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo "$1" + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo "$1" | sed 's/-[^-]*$//'` + if [ "$basic_machine" != "$1" ] + then os=`echo "$1" | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo "$1" | sed -e 's/86-.*/86-sequent/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | ba \ + | be32 | be64 \ + | bfin \ + | c4x | c8051 | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia16 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 | nios2eb | nios2el \ + | ns16k | ns32k \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ + | pyramid \ + | riscv32 | riscv64 \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | visium \ + | wasm32 \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | ba-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | e2k-* | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia16-* | ia64-* \ + | ip2k-* | iq2000-* \ + | k1om-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | or1k*-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ + | pyramid-* \ + | riscv32-* | riscv64-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | visium-* \ + | wasm32-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-pc + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + asmjs) + basic_machine=asmjs-unknown + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2*) + basic_machine=m68k-bull + os=-sysv3 + ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=$os"spe" + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo "$basic_machine" | sed 's/-.*//'` + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=-linux + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + moxiebox) + basic_machine=moxie-unknown + os=-moxiebox + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo "$basic_machine" | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i686-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + nsv-tandem) + basic_machine=nsv-tandem + ;; + nsx-tandem) + basic_machine=nsx-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo "$basic_machine" | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + x64) + basic_machine=x86_64-pc + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo "$basic_machine" | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo "$basic_machine" | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo "$basic_machine" | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases that might get confused + # with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # es1800 is here to avoid being matched by es* (a different OS) + -es1800*) + os=-ose + ;; + # Now accept the basic system types. + # The portable systems comes first. + # Each alternative MUST end in a * to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* | -plan9* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* | -cloudabi* | -sortix* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ + | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox* | -bme* \ + | -midnightbsd*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -xray | -os68k* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo "$os" | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo "$os" | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo "$os" | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4*) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -pikeos*) + # Until real need of OS specific support for + # particular features comes up, bare metal + # configurations are quite functional. + case $basic_machine in + arm*) + os=-eabi + ;; + *) + os=-elf + ;; + esac + ;; + -nacl*) + ;; + -ios) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + pru-*) + os=-elf + ;; + *-be) + os=-beos + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo "$basic_machine" | sed "s/unknown/$vendor/"` + ;; +esac + +echo "$basic_machine$os" +exit + +# Local variables: +# eval: (add-hook 'write-file-functions 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/autoconf/configure.in b/autoconf/configure.in new file mode 100644 index 00000000..81d4efa8 --- /dev/null +++ b/autoconf/configure.in @@ -0,0 +1,3665 @@ +dnl +dnl +dnl Process this file with autoconf to produce a configure script. +dnl +dnl Copyright (C) 2000-2017 Kern Sibbald +dnl License: BSD 2-Clause; see file LICENSE-FOSS +dnl +dnl require a recent autoconf +AC_PREREQ(2.61) +AC_INIT([bacula], m4_esyscmd([sed -n -e 's/^#define VERSION.*"\(.*\)"$/\1/p' ../src/version.h src/version.h 2> /dev/null | tr -d '\n'])) +AC_CONFIG_SRCDIR(src/version.h) + +BUILD_DIR=`pwd` +cd .. +TOP_DIR=`pwd` +cd ${BUILD_DIR} +AC_SUBST(BUILD_DIR) +AC_SUBST(TOP_DIR) +AC_CONFIG_AUX_DIR(${BUILD_DIR}/autoconf) +AC_CONFIG_HEADERS(src/config.h:autoconf/config.h.in) + +dnl minimal Win32 stuff for "make clean" +WIN32BUILDDIR=${BUILD_DIR}/src/win32 +WIN32MAINDIR=${BUILD_DIR} +WIN32TOPDIR=${TOP_DIR} +AC_SUBST(WIN32BUILDDIR) +AC_SUBST(WIN32MAINDIR) +AC_SUBST(WIN32TOPDIR) + +dnl search for true and false programs. +AC_PATH_PROGS(TRUEPRG, true, :) +AC_PATH_PROGS(FALSEPRG, false, :) + + +dnl bacula version +post_host= +if test "x$BACULA" != x; then + post_host=`echo -${BACULA} | tr 'A-Z ' 'a-z-'` +fi +BACULA=${BACULA:-Bacula} +VERSION=`sed -n -e 's/^#define VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +RELEASE=`sed -n -e 's/^#define RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +DATE=`sed -n -e 's/^#define BDATE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LSMDATE=`sed -n -e 's/^#define LSMDATE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +BDB_VERSION=`sed -n -e 's/^#define BDB_VERSION \(.*\)$/\1/p' ${srcdir}/src/cats/cats.h` +DEPKGS_VERSION=`sed -n -e 's/^#define DEPKGS_VERSION \(.*\)$/\1/p' ${srcdir}/src/cats/cats.h` +DEPKGS_QT_VERSION=`sed -n -e 's/^#define DEPKGS_QT_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +BQT_VERSION=`sed -n -e 's/^#define BQT_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +VIX_VERSION=`sed -n -e 's/^#define VIX_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +JAVA_VERSION=`sed -n -e 's/^#define JAVA_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +NDMP_VERSION=`sed -n -e 's/^#define NDMP_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LIBRSYNC_VERSION=`sed -n -e 's/^#define LIBRSYNC_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +AC_SUBST(VERSION)dnl +AC_SUBST(DATE)dnl +AC_SUBST(LSMDATE)dnl +AC_SUBST(BACULA)dnl +AC_SUBST(post_host)dnl +AC_SUBST(BDB_VERSION)dnl +AC_SUBST(DEPKGS_QT_VERSION)dnl +AC_SUBST(DEPKGS_VERSION)dnl +AC_SUBST(VIX_VERSION)dnl +AC_SUBST(JAVA_VERSION)dnl +AC_SUBST(NDMP_VERSION)dnl +AC_SUBST(LIBRSYNC_VERSION)dnl + + +dnl src/lib +dnl can be overwritten by specific values from version.h +LIBBAC_LT_RELEASE=`sed -n -e 's/^#.*LIBBAC_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LIBBACCFG_LT_RELEASE=`sed -n -e 's/^#.*LIBBACCFG_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LIBBACPY_LT_RELEASE=`sed -n -e 's/^#.*LIBBACPY_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` + +LIBBAC_LT_RELEASE=${LIBBAC_LT_RELEASE:-$VERSION} +LIBBACCFG_LT_RELEASE=${LIBBACCFG_LT_RELEASE:-$VERSION} + +AC_SUBST(LIBBAC_LT_RELEASE)dnl +AC_SUBST(LIBBACCFG_LT_RELEASE)dnl + +dnl src/cats +dnl can be overwritten by specific values from version.h +LIBBACSQL_LT_RELEASE=`sed -n -e 's/^#.*LIBBACSQL_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LIBBACCATS_LT_RELEASE=`sed -n -e 's/^#.*LIBBACCATS_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` + +LIBBACSQL_LT_RELEASE=${LIBBACSQL_LT_RELEASE:-$VERSION} +LIBBACCATS_LT_RELEASE=${LIBBACCATS_LT_RELEASE:-$VERSION} + +AC_SUBST(LIBBACSQL_LT_RELEASE)dnl +AC_SUBST(LIBBACCATS_LT_RELEASE)dnl + +dnl src/findlib +dnl can be overwritten by specific values from version.h +LIBBACFIND_LT_RELEASE=`sed -n -e 's/^#.*LIBBACFIND_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` + +LIBBACFIND_LT_RELEASE=${LIBBACFIND_LT_RELEASE:-$VERSION} + +AC_SUBST(LIBBACFIND_LT_RELEASE)dnl + +dnl PFILES are platform or plugin specific files +PFILES="platforms/Makefile" + +echo "configuring for ${BACULA} $VERSION ($DATE)" + + +dnl ------------------------------------------------------- +dnl Check for compiler. +dnl ------------------------------------------------------ + +AC_PROG_CC dnl this sets $GCC if using GNU C compiler +AC_PROG_CXX +AC_PROG_CC_C_O dnl Determine if C compiler support -c -o. +AC_PROG_GCC_TRADITIONAL dnl Determine if ioctl() need -traditional. + +BASECC=`basename $CC` +have_gcc=no +if test x"$GCC" = "xyes"; then + AC_DEFINE(HAVE_GCC) + have_gcc=yes +fi +AC_PATH_PROG(CXX, $CXX, $CXX) +if test ! -e $CXX; then + AC_MSG_ERROR(Unable to find C++ compiler) +fi + +dnl ------------------------------------------------------- +dnl Check for multiarch. +dnl ------------------------------------------------------ +AC_MSG_CHECKING(for multiarch system) +multiarch=`$CC $CFLAGS -print-multiarch 2>/dev/null` +AC_MSG_RESULT($multiarch) + +dnl ------------------------------------------------------- +dnl Check for programs. +dnl ------------------------------------------------------ +AC_PROG_INSTALL +AC_PATH_PROG(MV, mv, mv) +dnl Alert !!! +dnl If we name the variable RM it will shadow the RM variable in the configure script and we overwrite the +dnl value with the name of the rm command and not rm -f which is its normal content. This gives all kind +dnl of strange output of the configure script (like things don't exist etc.). +dnl So we name it REMOVE (more software has run into this problem) +AC_PATH_PROG(REMOVE, rm, rm) +AC_PATH_PROG(CP, cp, cp) +AC_PATH_PROG(SED, sed, sed) +AC_PATH_PROG(ECHO, echo, echo) +AC_PATH_PROG(CMP, cmp, cmp) +AC_PATH_PROG(TBL, tbl, tbl) +AC_PATH_PROG(AR, ar, ar) +AC_PATH_PROG(OPENSSL, openssl, none) +AC_PATH_PROG(MTX, mtx, mtx) +AC_PATH_PROG(DD, dd, dd) +AC_PATH_PROG(MKISOFS, mkisofs, mkisofs) +AC_PATH_PROG(PYTHON, python, python) +AC_PATH_PROG(GROWISOFS, growisofs, growisofs) +AC_PATH_PROG(DVDRWMEDIAINFO, dvd+rw-mediainfo, dvd+rw-mediainfo) +AC_PATH_PROG(DVDRWFORMAT, dvd+rw-format, dvd+rw-format) +AC_PATH_PROG(PKGCONFIG, pkg-config, pkg-config) +AC_PATH_PROG(QMAKE, qmake, none) +AC_PATH_PROG(GMAKE, gmake, none) +AC_PATH_PROG(PIDOF, pidof, pidof) +AC_PROG_AWK +# Some AWK programs fail, so test it and warn the user +if echo xfoo | $AWK 'BEGIN { prog=ARGV[1]; ARGC=1 } + { if ((prog == $2) || (("(" prog ")") == $2) || + (("[" prog "]") == $2) || + ((prog ":") == $2)) { print $1 ; exit 0 } }' xfoo>/dev/null; then :; +else + AC_MSG_ERROR([!!!!!!!!! WARNING !!!!!!!!!!!!!! + The regex engine of $AWK is too broken to be used you + might want to install GNU AWK. + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!]) +fi +THE_AWK=$AWK +AC_PATH_PROG(AWK, $THE_AWK, $THE_AWK) + + +test -n "$ARFLAG" || ARFLAGS="cr" +AC_SUBST(ARFLAGS) + +MAKE_SHELL=/bin/sh +AC_SUBST(MAKE_SHELL) + +AC_SUBST(LOCAL_LIBS) +AC_SUBST(LOCAL_CFLAGS) +AC_SUBST(LOCAL_LDFLAGS) +AC_SUBST(LOCAL_DEFS) + +dnl -------------------------------------------------- +dnl Libtool config +dnl -------------------------------------------------- +use_libtool=yes +AC_ARG_ENABLE(libtool, + AC_HELP_STRING([--enable-libtool], [enable building using GNU libtool @<:@default=yes@:>@]), + [ + if test x$enableval = xno; then + use_libtool=no + fi + ] +) +LT_INIT([shared disable-static]) +LT_LIB_DLLOAD +LT_LANG([C++]) + +if test x$use_libtool != xno; then + DEFAULT_OBJECT_TYPE=".lo" + DEFAULT_ARCHIVE_TYPE=".la" + DEFAULT_SHARED_OBJECT_TYPE=".la" + LIBTOOL="\$(LIBTOOL)" + LIBTOOL_INSTALL_TARGET="libtool-install" + LIBTOOL_UNINSTALL_TARGET="libtool-uninstall" + LIBTOOL_CLEAN_TARGET="libtool-clean" + QMAKE_LIBTOOL="${BUILD_DIR}/libtool" + FD_PLUGIN_DIR="src/plugins/fd" + have_plugins=yes +else + DEFAULT_OBJECT_TYPE=".o" + DEFAULT_ARCHIVE_TYPE=".a" + DEFAULT_SHARED_OBJECT_TYPE=".so" + LIBTOOL="# \$(LIBTOOL)" + LIBTOOL_INSTALL_TARGET="" + LIBTOOL_UNINSTALL_TARGET="" + LIBTOOL_CLEAN_TARGET="" + QMAKE_LIBTOOL="# ${BUILD_DIR}/libtool" + FD_PLUGIN_DIR="" + have_plugins=no +fi + +AC_SUBST(DEFAULT_OBJECT_TYPE) +AC_SUBST(DEFAULT_ARCHIVE_TYPE) +AC_SUBST(DEFAULT_SHARED_OBJECT_TYPE) +AC_SUBST(LIBTOOL) +AC_SUBST(LIBTOOL_INSTALL_TARGET) +AC_SUBST(LIBTOOL_UNINSTALL_TARGET) +AC_SUBST(LIBTOOL_CLEAN_TARGET) +AC_SUBST(QMAKE_LIBTOOL) +AC_SUBST(FD_PLUGIN_DIR) + +dnl -------------------------------------------------- +dnl Include file handling +dnl -------------------------------------------------- +AC_ARG_ENABLE(includes, + AC_HELP_STRING([--enable-includes], [enable installing of include files @<:@default=no@:>@]), + [ + if test x$enableval = xyes; then + install_includes=yes + fi + ] +) + +dnl It only makes sense to install include files when you install libraries which only happens when +dnl libtool is enabled + +if test x$use_libtool != xno -a x$install_includes = xyes; then + INCLUDE_INSTALL_TARGET="install-includes" + INCLUDE_UNINSTALL_TARGET="uninstall-includes" +else + INCLUDE_INSTALL_TARGET="" + INCLUDE_UNINSTALL_TARGET="" +fi +AC_SUBST(INCLUDE_INSTALL_TARGET) +AC_SUBST(INCLUDE_UNINSTALL_TARGET) + +dnl -------------------------------------------------- +dnl Bacula OP Sys determination (see aclocal.m4) +dnl -------------------------------------------------- +BA_CHECK_OPSYS + +dnl ----------------------------------------------------------- +dnl Bacula OPSys Distribution determination (see aclocal.m4) +dnl ---------------------------------------------------------- +BA_CHECK_OPSYS_DISTNAME + +dnl -------------------------------------------------- +dnl Suppport for gettext (translations) +dnl By default, $datarootdir is ${prefix}/share +dnl -------------------------------------------------- +AM_GNU_GETTEXT([external]) + +dnl ------------------------------------------------------------------ +dnl If the user has not set --prefix, we set our default to nothing. +dnl In this case, if the user has not set --sysconfdir, we set it +dnl to the package default of /etc/bacula. If either --prefix or +dnl --sysconfdir is set, we leave sysconfdir alone except to eval it. +dnl If the user has not set --libdir, we set it to the package +dnl default of /usr/lib. If either --prefix or --libdir is set, +dnl we leave libdir alone except to eval it. If the user has not set +dnl --includedir, we set it to the package default of /usr/include. +dnl If either --prefix or --includedir is set, we leave includedir +dnl alone except to eval it +dnl ------------------------------------------------------------------ +os_name=`uname -s 2>/dev/null` +if test x${prefix} = xNONE ; then + if test `eval echo ${sysconfdir}` = NONE/etc ; then + sysconfdir=/etc/bacula + fi + + if test `eval echo ${libdir}` = NONE/lib ; then + case ${os_name} in + Linux) + os_processor=`uname -p 2>/dev/null` + case ${os_processor} in + x86_64) + libdir=/usr/lib64 + ;; + *) + libdir=/usr/lib + ;; + esac + ;; + *) + libdir=/usr/lib + ;; + esac + fi + + if test `eval echo ${includedir}` = NONE/include ; then + includedir=/usr/include + fi + + if test `eval echo ${datarootdir}` = NONE/share ; then + datarootdir=/usr/share + fi + prefix= +fi + +dnl ------------------------------------------------------------------------- +dnl If the user has not set --exec-prefix, we default to ${prefix} +dnl ------------------------------------------------------------------------- +if test x${exec_prefix} = xNONE ; then + exec_prefix=${prefix} +fi + +sysconfdir=`eval echo ${sysconfdir}` +datarootdir=`eval echo ${datarootdir}` +docdir=`eval echo ${docdir}` +htmldir=`eval echo ${htmldir}` +libdir=`eval echo ${libdir}` +includedir=`eval echo ${includedir}` +localedir=`eval echo ${datarootdir}/locale` +AC_DEFINE_UNQUOTED(SYSCONFDIR, "$sysconfdir") +AC_DEFINE_UNQUOTED(LOCALEDIR, "$localedir") + +dnl ------------------------------------------------------------------ +dnl If the user has not set --sbindir, we set our default as /sbin +dnl ------------------------------------------------------------------ +if test x$sbindir = x'${exec_prefix}/sbin' ; then + sbindir=${exec_prefix}/sbin +fi +sbindir=`eval echo ${sbindir}` + +dnl ------------------------------------------------------------------------- +dnl If the user has not set --mandir, we default to /usr/share/man +dnl ------------------------------------------------------------------------- +if test x$mandir = x'${datarootdir}/man' ; then + mandir=/usr/share/man +fi + +dnl ------------------------------------------------------------------------- +dnl If the user has not set --htmldir, we default to /usr/share/doc/bacula/html +dnl ------------------------------------------------------------------------- +if test x$htmldir = x'/usr/share/doc/bacula/' ; then + htmldir=`eval echo ${docdir}html` +fi + +dnl ------------------------------------------------------------------------- +dnl If the user has not set --docdir, we default to /usr/share/doc/bacula +dnl ------------------------------------------------------------------------- +if test x$docdir = x'/usr/share/doc/' ; then + docdir=`eval echo ${docdir}bacula` +fi + + +AC_PATH_PROGS(MSGFMT, msgfmt, no) +if test "$MSGFMT" = "no" +then + echo 'msgfmt program not found, disabling NLS !' + USE_NLS=no + USE_INCLUDED_LIBINTL=no +#else + AM_GNU_GETTEXT +fi + +support_smartalloc=yes +support_readline=yes +support_lzo=yes +support_s3=yes +support_conio=yes +support_bat=no +support_tls=no +support_crypto=no +support_static_tools=no +support_static_fd=no +support_static_sd=no +support_static_dir=no +support_static_cons=no +build_client_only=no +build_dird=yes +build_stored=yes +db_backends="" +batch_insert_db_backends="" +support_lockmgr=no + +dnl -------------------------------------------------------------------------- +dnl CHECKING COMMAND LINE OPTIONS +dnl -------------------------------------------------------------------------- + +dnl ------------------------------------------- +dnl bat (default off) +dnl ------------------------------------------- +AC_ARG_ENABLE(bat, + AC_HELP_STRING([--enable-bat], [enable build of bat Qt4/5 GUI @<:@default=no@:>@]), + [ + if test x$enableval = xyes; then + AC_DEFINE(HAVE_BAT, 1, [Set if Bacula bat Qt4/5 GUI support enabled]) + support_bat=yes + fi + ] +) + +BAT_DIR= +if test x$support_bat = xyes; then +dnl look for Qt4 + abc=`$PKGCONFIG QtGui` + pkg=$? + if test $pkg = 0; then + BAT_DIR=src/qt-console + else +dnl look for Qt5 + abc=`$PKGCONFIG Qt5Gui` + pkg=$? + if test $pkg = 0; then + BAT_DIR="src/qt-console src/qt-console/tray-monitor" + else + AC_MSG_ERROR(Unable to find either Qt4 or Qt5 installation needed by bat) + fi + fi +fi + +AC_SUBST(BAT_DIR) +AC_SUBST(QWT_INC) +AC_SUBST(QWT_LDFLAGS) +AC_SUBST(QWT_LIB) +AC_SUBST(QWT) + +dnl ------------------------------------------- +dnl smartalloc (default off) +dnl ------------------------------------------- +AC_ARG_ENABLE(smartalloc, + AC_HELP_STRING([--enable-smartalloc], [enable smartalloc debugging support @<:@default=no@:>@]), + [ + if test x$enableval = xno; then + support_smartalloc=no + fi + ] +) + +if test x$support_smartalloc = xyes; then + AC_DEFINE(SMARTALLOC, 1, [Set if you want Smartalloc enabled]) +fi + +dnl ------------------------------------------- +dnl Lock Manager (default off) +dnl ------------------------------------------- +AC_ARG_ENABLE(lockmgr, + AC_HELP_STRING([--enable-lockmgr], [enable lock manager support @<:@default=no@:>@]), + [ + if test x$enableval = xyes; then + support_lockmgr=yes + fi + ] +) + +if test x$support_lockmgr = xyes; then + AC_DEFINE(USE_LOCKMGR, 1, [Set if you want Lock Manager enabled]) +fi + + +dnl ------------------------------------------- +dnl static-tools (default off) +dnl ------------------------------------------- +AC_ARG_ENABLE(static-tools, + AC_HELP_STRING([--enable-static-tools], [enable static tape tools @<:@default=no@:>@]), + [ + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + AC_MSG_ERROR([Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool]) + fi + support_static_tools=yes + fi + ] +) + +TTOOL_LDFLAGS= +if test x$support_static_tools = xyes; then + TTOOL_LDFLAGS="-static" +fi +AC_SUBST(TTOOL_LDFLAGS) + +dnl ------------------------------------------- +dnl static-fd (default off) +dnl ------------------------------------------- +AC_ARG_ENABLE(static-fd, + AC_HELP_STRING([--enable-static-fd], [enable static File daemon @<:@default=no@:>@]), + [ + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + AC_MSG_ERROR([Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool]) + fi + support_static_fd=yes + fi + ] +) + +STATIC_FD= +if test x$support_static_fd = xyes; then + STATIC_FD="static-bacula-fd" +fi +AC_SUBST(STATIC_FD) + +dnl ------------------------------------------- +dnl static-sd (default off) +dnl ------------------------------------------- +AC_ARG_ENABLE(static-sd, + AC_HELP_STRING([--enable-static-sd], [enable static Storage daemon @<:@default=no@:>@]), + [ + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + AC_MSG_ERROR([Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool]) + fi + support_static_sd=yes + fi + ] +) + +STATIC_SD= +if test x$support_static_sd = xyes; then + STATIC_SD="static-bacula-sd" +fi +AC_SUBST(STATIC_SD) + +dnl ------------------------------------------- +dnl static-dir (default off) +dnl ------------------------------------------- +AC_ARG_ENABLE(static-dir, + AC_HELP_STRING([--enable-static-dir], [enable static Director @<:@default=no@:>@]), + [ + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + AC_MSG_ERROR([Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool]) + fi + support_static_dir=yes + fi + ] +) + +STATIC_DIR= +if test x$support_static_dir = xyes; then + STATIC_DIR="static-bacula-dir" +fi +AC_SUBST(STATIC_DIR) + +dnl ------------------------------------------- +dnl static-cons (default off) +dnl ------------------------------------------- +AC_ARG_ENABLE(static-cons, + AC_HELP_STRING([--enable-static-cons], [enable static Console @<:@default=no@:>@]), + [ + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + AC_MSG_ERROR([Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool]) + fi + support_static_cons=yes + fi + ] +) + +STATIC_CONS= +if test x$support_static_cons = xyes; then + STATIC_CONS="static-bconsole" +fi +AC_SUBST(STATIC_CONS) + +dnl ------------------------------------------- +dnl client_only (default off) +dnl ------------------------------------------- +AC_ARG_ENABLE(client-only, + AC_HELP_STRING([--enable-client-only], [build client (File daemon) only @<:@default=no@:>@]), + [ + if test x$enableval = xyes; then + build_client_only=yes + db_backends="None" + DB_BACKENDS="none" + fi + ] +) +if test x$build_client_only = xno; then + ALL_DIRS="subdirs" + AC_DEFINE(HAVE_CLIENT_ONLY, 1, [Set if we build only client]) +else + ALL_DIRS="" +fi +AC_SUBST(ALL_DIRS) + +dnl ------------------------------------------- +dnl director (default on) +dnl ------------------------------------------- +AC_ARG_ENABLE(build-dird, + AC_HELP_STRING([--enable-build-dird], [enable building of dird (Director) @<:@default=yes@:>@]), + [ + if test x$enableval = xno; then + build_dird=no + fi + ] +) + +DIRD_DIR="src/dird" +DIR_TOOLS="DIRTOOLS" + +AC_SUBST(DIRD_DIR) +AC_SUBST(DIR_TOOLS) + +dnl ------------------------------------------- +dnl stored (default on) +dnl ------------------------------------------- +AC_ARG_ENABLE(build-stored, + AC_HELP_STRING([--enable-build-stored], [enable building of stored (Storage daemon) @<:@default=yes@:>@]), + [ + if test x$enableval = xno; then + build_stored=no + fi + ] +) +if test x$build_stored = xyes; then + STORED_DIR="src/stored" +else + STORED_DIR="" +fi +AC_SUBST(STORED_DIR) + +dnl --------------------------------------------------- +dnl Check for conio (Bacula readline substitute)( +dnl --------------------------------------------------- +dnl this allows you to turn it completely off +AC_ARG_ENABLE(conio, + AC_HELP_STRING([--disable-conio], [disable conio support @<:@default=no@:>@]), + [ + if test x$enableval = xno; then + support_conio=no + fi + ] +) + + +dnl --------------------------------------------------- +dnl Check for IPv6 support +dnl --------------------------------------------------- +dnl this allows you to turn it completely off +support_ipv6=yes +AC_ARG_ENABLE(ipv6, + AC_HELP_STRING([--enable-ipv6], [enable ipv6 support @<:@default=yes@:>@]), + [ + if test x$enableval = xno; then + support_ipv6=no + fi + ] +) + +if test x$support_ipv6 = xyes; then + AC_TRY_LINK([ #include +#include +#include ], [struct in6_addr t=in6addr_any; t.s6_addr[0] = 0;], + [support_in6addr_any=yes], [support_in6addr_any=no]) + + if test x$support_in6addr_any = xno ; then + in6addr_any="const struct in6_addr in6addr_any" + else + in6addr_any="1" + fi + + AC_TRY_LINK([ #include +#include +#include ], [$in6addr_any; struct sockaddr_in6 s; struct in6_addr t=in6addr_any; int i=AF_INET6; s; t.s6_addr[0] = 0;], + [support_ipv6=yes], [support_ipv6=no]) +fi + +if test x$support_ipv6 = xyes; then + AC_DEFINE(HAVE_IPV6,1,[Whether to enable IPv6 support]) + + if test x$support_in6addr_any = xno ; then + AC_DEFINE(NEED_IN6ADDR_ANY,1,[Whether to have in6addr_any support]) + fi +fi + +TERM_LIB="" +AC_CHECK_HEADER(curses.h, [ + AC_CHECK_LIB(tinfo, tgetent, [ TERM_LIB="-ltinfo" ], [ + AC_CHECK_LIB(ncurses, tgetent, [ TERM_LIB="-lncurses" ], [ + AC_CHECK_LIB(termcap, tgetent, [ TERM_LIB="-ltermcap" ]) + ]) + ]) + ], + [ AC_CHECK_HEADERS(curses.h) + AC_CHECK_HEADER(term.h, + [ AC_CHECK_LIB(curses, tgetent, + [ TERM_LIB="-lcurses" ] ) + ]) + ]) + + +got_conio="no" +if test x$support_conio = xyes; then + if test x$TERM_LIB != x; then + CONS_LIBS=$TERM_LIB + CONS_OBJ="conio.o" + CONS_SRC="conio.c" + got_conio="yes" + support_readline=no + AC_DEFINE(HAVE_CONIO, 1, [Set if Bacula conio support enabled]) + else + echo " "; echo "Required libraries not found. CONIO turned off ..."; echo " " + fi +fi + + +dnl --------------------------------------------------- +dnl Check for readline support/directory (default off) +dnl --------------------------------------------------- +dnl this allows you to turn it completely off +AC_ARG_ENABLE(readline, + AC_HELP_STRING([--disable-readline], [disable readline support @<:@default=yes@:>@]), + [ + if test x$enableval = xno; then + support_readline=no + fi + ] +) + +got_readline="no" +READLINE_SRC= +if test x$support_readline = xyes; then + AC_ARG_WITH(readline, + AC_HELP_STRING([--with-readline@<:@=DIR@:>@], [specify readline library directory]), + [ + case "$with_readline" in + no) + : + ;; + yes|*) + if test -f ${with_readline}/readline.h; then + CONS_INC="-I${with_readline}" + CONS_LDFLAGS="-L$with_readline" + elif test -f ${with_readline}/include/readline/readline.h; then + CONS_INC="-I${with_readline}/include/readline" + CONS_LDFLAGS="-L${with_readline}/lib" + with_readline="${with_readline}/include/readline" + else + with_readline="/usr/include/readline" + fi + + AC_CHECK_HEADER(${with_readline}/readline.h, + [ + AC_DEFINE(HAVE_READLINE, 1, [Set to enable readline support]) + CONS_LIBS="-lreadline -lhistory ${TERM_LIB}" + got_readline="yes" + ], [ + echo " " + echo "readline.h not found. readline turned off ..." + echo " " + ] + ) + ;; + esac + ],[ + dnl check for standard readline library + AC_CHECK_HEADER(/usr/include/readline/readline.h, + [ + AC_DEFINE(HAVE_READLINE, 1, [Set to enable readline support]) + got_readline="yes" + CONS_INC="-I/usr/include/readline" + CONS_LIBS="-lreadline ${TERM_LIB}" + ], [ + dnl Did not find standard library, so try Bacula's default + AC_CHECK_HEADER(${TOP_DIR}/depkgs/readline/readline.h, + [ + AC_DEFINE(HAVE_READLINE, 1, [Set to enable readline support]) + got_readline="yes" + CONS_INC="-I${TOP_DIR}/depkgs/readline" + CONS_LIBS="-lreadline -lhistory ${TERM_LIB}" + CONS_LDFLAGS="-L${TOP_DIR}/depkgs/readline" + PRTREADLINE_SRC="${TOP_DIR}/depkgs/readline" + ], [ + echo " " + echo "readline.h not found. readline turned off ..." + echo " " + ] + ) + ] + ) + ] + ) +fi + +AC_SUBST(CONS_INC) +AC_SUBST(CONS_OBJ) +AC_SUBST(CONS_SRC) +AC_SUBST(CONS_LIBS) +AC_SUBST(CONS_LDFLAGS) +AC_SUBST(READLINE_SRC) + +dnl Minimal stuff for readline Makefile configuration +MAKE_SHELL=/bin/sh +AC_SUBST(MAKE_SHELL) +AC_HEADER_STAT +AC_HEADER_DIRENT +AC_CHECK_FUNCS(strcasecmp select setenv putenv tcgetattr) +AC_CHECK_FUNCS(lstat lchown lchmod futimes fchmod fchown lutimes) +AC_CHECK_FUNCS(nanosleep nl_langinfo) +AC_CHECK_FUNCS(be64toh htobe64) +AC_CHECK_HEADERS(varargs.h) + +dnl End of readline/conio stuff +dnl ----------------------------------------------------------------------- + +dnl +dnl Find where sockets are (especially for Solaris) +dnl Do this before the TCP Wrappers test since tcp wrappers +dnl uses the socket library and some linkers are stupid. +dnl +AC_CHECK_FUNC(socket, + AC_MSG_RESULT(using libc's socket), + AC_CHECK_LIB(xnet,socket) + AC_CHECK_LIB(socket,socket) + AC_CHECK_LIB(inet,socket)) + +dnl ----------------------------------------------------------- +dnl Check whether user wants TCP wrappers support (default off) +dnl ----------------------------------------------------------- +TCPW_MSG="no" +WRAPLIBS="" +AC_ARG_WITH(tcp-wrappers, + AC_HELP_STRING([--with-tcp-wrappers@<:@=DIR@:>@], [enable tcpwrappers support]), + [ + if test "x$withval" != "xno" ; then + saved_LIBS="$LIBS" + LIBS="$saved_LIBS -lwrap" + AC_SEARCH_LIBS(nanosleep, [rt]) + AC_MSG_CHECKING(for libwrap) + AC_TRY_LINK( + [ + #include + #include + int deny_severity = 0; + int allow_severity = 0; + struct request_info *req; + ], [ + hosts_access(req); + ], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_LIBWRAP, 1, [Set to enable libwraper support]) + TCPW_MSG="yes" + LIBS="$saved_LIBS" + WRAPLIBS="-lwrap" + ], [ + LIBS="$saved_LIBS -lwrap -lnsl" + WRAPLIBS="$saved_LIBS -lwrap -lnsl" + AC_TRY_LINK( + [ + #include + #include + int deny_severity = 0; + int allow_severity = 0; + struct request_info *req; + ], [ + hosts_access(req); + ], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_LIBWRAP, 1, [Set to enable libwraper support]) + TCPW_MSG="yes" + LIBS="$saved_LIBS" + WRAPLIBS="-lwrap" + ], [ + AC_MSG_ERROR([*** libwrap missing]) + ] + ) + ] + ) + fi + ] +) + +dnl ----------------------------------------------------------- +dnl Check whether OpenSSL is available +dnl ----------------------------------------------------------- +AC_MSG_CHECKING([for OpenSSL]) +dnl The following uses quadrigraphs: +dnl '@<:@' = '[' +dnl '@:>@' = ']' +AC_ARG_WITH(openssl, + AC_HELP_STRING([--with-openssl@<:@=DIR@:>@], [Include OpenSSL support. DIR is the OpenSSL base]), + [ + with_openssl_directory=${withval} + ] +) + +if test "x$with_openssl_directory" != "xno"; then + OPENSSL_LIBS="-lssl -lcrypto" + OPENSSL_INC="" + + if test "x$with_openssl_directory" != "xyes" && test x"${with_openssl_directory}" != "x"; then + # + # Make sure the $with_openssl_directory also makes sense + # + if test -d "$with_openssl_directory/lib" -a -d "$with_openssl_directory/include"; then + OPENSSL_LIBS="-L$with_openssl_directory/lib $OPENSSL_LIBS" + OPENSSL_INC="-I$with_openssl_directory/include $OPENSSL_INC" + fi + fi + + saved_LIBS="${LIBS}" + saved_CFLAGS="${CFLAGS}" + LIBS="${saved_LIBS} ${OPENSSL_LIBS}" + CFLAGS="${saved_CFLAGS} ${OPENSSL_INC}" + + AC_TRY_LINK( + [ + #include + ], [ + CRYPTO_set_id_callback(NULL); + ], [ + support_tls="yes" + support_crypto="yes" + ], [ + support_tls="no" + support_crypto="no" + ] + ) + + AC_TRY_LINK( + [ + #include + ], [ + EVP_sha512(); + ], [ + ac_cv_openssl_sha2="yes" + ], [ + ac_cv_openssl_sha2="no" + ] + ) + + dnl Solaris disables greater than 128+ bit encryption in their OpenSSL + dnl implementation, presumably for export reasons. If 192bit AES + dnl is available, we assume that we're running with a 'non-export' + dnl openssl library. + AC_TRY_LINK( + [ + #include + ], [ + EVP_aes_192_cbc(); + ], [ + ac_cv_openssl_export="no" + ], [ + ac_cv_openssl_export="yes" + ] + ) + + AC_MSG_RESULT([$support_tls]) + if test "$support_tls" = "yes"; then + AC_DEFINE(HAVE_OPENSSL, 1, [Define if OpenSSL library is available]) + AC_DEFINE(HAVE_TLS, 1, [Define if TLS support should be enabled]) + AC_DEFINE(HAVE_CRYPTO, 1, [Define if encryption support should be enabled]) + fi + + if test "$ac_cv_openssl_sha2" = "yes"; then + AC_DEFINE(HAVE_SHA2, 1, [Define if the SHA-2 family of digest algorithms is available]) + fi + + if test "$ac_cv_openssl_export" = "yes"; then + AC_DEFINE(HAVE_OPENSSL_EXPORT_LIBRARY, 1, [Define if the OpenSSL library is export-contrained to 128bit ciphers]) + fi + + if test "$support_crypto" = "yes"; then + AC_CHECK_LIB(crypto, EVP_PKEY_encrypt_old, AC_DEFINE(HAVE_OPENSSLv1, 1, [Set if have OpenSSL version 1.x])) + fi + + LIBS="${saved_LIBS}" + CFLAGS="${saved_CFLAGS}" +else + support_tls="no" + support_crypto="no" + AC_MSG_RESULT([$support_tls]) +fi + +if test "$support_tls" = "no" -o "$support_crypto" = "no"; then + OPENSSL_LIBS="" + OPENSSL_INC="" +fi + +AC_SUBST(OPENSSL_LIBS) +AC_SUBST(OPENSSL_INC) + +dnl ----------------------------------------------------------- +dnl dlopen is needed for plugins +dnl ----------------------------------------------------------- +AC_SEARCH_LIBS(dlopen, [dl]) + +dnl ------------------------------------------ +dnl Where to place working dir +dnl ------------------------------------------ +working_dir=`eval echo /opt/bacula/working` +AC_ARG_WITH(working-dir, + AC_HELP_STRING([--with-working-dir=PATH], [specify path of Bacula working directory]), + [ + if test "x$withval" != "xno" ; then + working_dir=$withval + fi + ] +) + +AC_SUBST(working_dir) + +dnl ------------------------------------------------------------------ +dnl If the user has not set archivedir, we set our default as /tmp +dnl ------------------------------------------------------------------ +archivedir=/tmp +AC_ARG_WITH(archivedir, + AC_HELP_STRING([--with-archivedir=PATH], [specify path of SD archive directory]), + [ + if test "x$withval" != "xno" ; then + archivedir=$withval + fi + ] +) + +AC_SUBST(archivedir) + +dnl ------------------------------------------------------------------ +dnl Allow the user to specify the daemon resource name default hostname +dnl ------------------------------------------------------------------ +basename=`hostname` +AC_ARG_WITH(basename, + AC_HELP_STRING([--with-basename=RESNAME], [specify base resource name for daemons]), + [ + if test "x$withval" != "xno" ; then + basename=$withval + fi + ] +) + +AC_SUBST(basename) + +dnl ------------------------------------------------------------------ +dnl Allow the user to override the hostname (default = machine hostname) +dnl ------------------------------------------------------------------ +hostname=`uname -n | cut -d '.' -f 1` +if test x${hostname} = x ; then + hostname="localhost" +fi +dnl Make sure hostname is resolved +ping -c 1 $hostname 2>/dev/null 1>/dev/null +if test ! $? = 0; then + hostname="localhost" +fi +AC_ARG_WITH(hostname, + AC_HELP_STRING([--with-hostname=RESNAME], [specify host name for daemons]), + [ + if test "x$withval" != "xno" ; then + hostname=$withval + fi + ] +) + +AC_SUBST(hostname) + + +dnl ------------------------------------------ +dnl Where to place scriptdir (script files) +dnl ------------------------------------------ +scriptdir=`eval echo ${sysconfdir}` +AC_ARG_WITH(scriptdir, + AC_HELP_STRING([--with-scriptdir=PATH], [specify path of Bacula scripts directory]), + [ + if test "x$withval" != "xno" ; then + scriptdir=$withval + fi + ] +) + +AC_SUBST(scriptdir) + + +dnl ------------------------------------------ +dnl Where to place bsrdir (bsr files) +dnl ------------------------------------------ +bsrdir=`eval echo /opt/bacula/bsr` +AC_ARG_WITH(bsrdir, + AC_HELP_STRING([--with-bsrdir=PATH], [specify path of Bacula bsrs directory]), + [ + if test "x$withval" != "xno" ; then + bsrdir=$withval + fi + ] +) + +AC_SUBST(bsrdir) + +dnl ------------------------------------------ +dnl Where to place logdir +dnl ------------------------------------------ +logdir=`eval echo /opt/bacula/log` +AC_ARG_WITH(logdir, + AC_HELP_STRING([--with-logdir=PATH], [specify path of Bacula logs directory]), + [ + if test "x$withval" != "xno" ; then + logdir=$withval + fi + ] +) + +AC_SUBST(logdir) + + +# ------------------------------------------ +# Where to place plugindir (plugin files) +# ------------------------------------------ +plugindir=`eval echo ${libdir}` +AC_ARG_WITH(plugindir, + AC_HELP_STRING([--with-plugindir=PATH], [specify path of Bacula plugins directory]), + [ + if test "x$withval" != "xno" ; then + plugindir=$withval + fi + ] +) + +AC_SUBST(plugindir) + +dnl ------------------------------------------ +dnl Where to send dump email +dnl ------------------------------------------ + +dnl ------------------------------------------ +dnl Where to send dump email +dnl ------------------------------------------ +dump_email=root@localhost +AC_ARG_WITH(dump-email, + AC_HELP_STRING([--with-dump-email=EMAIL], [dump email address]), + [ + if test "x$withval" != "xno" ; then + dump_email=$withval + fi + ] +) + +AC_SUBST(dump_email) + +dnl ------------------------------------------ +dnl Where to send job email +dnl ------------------------------------------ +job_email=root@localhost +AC_ARG_WITH(job-email, + AC_HELP_STRING([--with-job-email=EMAIL], [job output email address]), + [ + if test "x$withval" != "xno" ; then + job_email=$withval + fi + ] +) + +AC_SUBST(job_email) + +dnl ------------------------------------------ +dnl Where to find smtp host +dnl ------------------------------------------ +smtp_host=localhost +AC_ARG_WITH(smtp_host, + AC_HELP_STRING([--with-smtp-host=HOST], [SMTP mail host address]), + [ + if test "x$withval" != "xno" ; then + smtp_host=$withval + fi + ] +) + +AC_SUBST(smtp_host) + +dnl ------------------------------------ +dnl Where to place pid files +dnl ------------------------------------ +piddir=/var/run +AC_ARG_WITH(pid-dir, + AC_HELP_STRING([--with-pid-dir=PATH], [specify location of Bacula pid files]), + [ + if test "x$withval" != "xno" ; then + piddir=$withval + fi + ] +) + +AC_DEFINE_UNQUOTED(_PATH_BACULA_PIDDIR, "$piddir") +AC_SUBST(piddir) + +dnl ------------------------------------ +dnl Where to place subsys "lock file" +dnl ------------------------------------ +subsysdir=/var/run/subsys +if test -d /var/run/subsys; then + subsysdir=/var/run/subsys +elif test -d /var/lock/subsys; then + subsysdir=/var/lock/subsys +else + subsysdir=/var/run/subsys +fi +AC_ARG_WITH(subsys-dir, + AC_HELP_STRING([--with-subsys-dir=PATH], [specify location of Bacula subsys file]), + [ + if test "x$withval" != "xno" ; then + subsysdir=$withval + fi + ] +) + +AC_SUBST(subsysdir) + +dnl ------------------------------------ +dnl Where to start assigning ports +dnl ------------------------------------ +baseport=9101 +AC_ARG_WITH(baseport, + AC_HELP_STRING([--with-baseport=PORT], [specify base port address for daemons]), + [ + if test "x$withval" != "xno" ; then + baseport=$withval + fi + ] +) + +AC_SUBST(baseport) +dir_port=`expr $baseport` +fd_port=`expr $baseport + 1` +sd_port=`expr $fd_port + 1` + +AC_SUBST(dir_port) +AC_SUBST(fd_port) +AC_SUBST(sd_port) + +dnl ------------------------------------------ +dnl Generate passwords +dnl ------------------------------------------ +dir_password= +AC_ARG_WITH(dir-password, + AC_HELP_STRING([--with-dir-password=PASSWORD], [specify Director's password]), + [ + if test "x$withval" != "xno" ; then + dir_password=$withval + fi + ] +) + +if test "x$dir_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 33` + else + key=`openssl rand -base64 33` + fi + dir_password=$key +fi + +fd_password= +AC_ARG_WITH(fd-password, + AC_HELP_STRING([--with-fd-password=PASSWORD], [specify Client's password]), + [ + if test "x$withval" != "xno" ; then + fd_password=$withval + fi + ] +) + +if test "x$fd_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 37` + else + key=`openssl rand -base64 33` + fi + fd_password=$key +fi + +sd_password= +AC_ARG_WITH(sd-password, + AC_HELP_STRING([--with-sd-password=PASSWORD], [specify Storage daemon's password]), + [ + if test "x$withval" != "xno" ; then + sd_password=$withval + fi + ] +) + +if test "x$sd_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 41` + else + key=`openssl rand -base64 33` + fi + sd_password=$key +fi + +mon_dir_password= +AC_ARG_WITH(mon-dir-password, + AC_HELP_STRING([--with-mon-dir-password=PASSWORD], [specify Director's password used by the monitor]), + [ + if test "x$withval" != "xno" ; then + mon_dir_password=$withval + fi + ] +) + +if test "x$mon_dir_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 33` + else + key=`openssl rand -base64 33` + fi + mon_dir_password=$key +fi + +mon_fd_password= +AC_ARG_WITH(mon-fd-password, + AC_HELP_STRING([--with-mon-fd-password=PASSWORD], [specify Client's password used by the monitor]), + [ + if test "x$withval" != "xno" ; then + mon_fd_password=$withval + fi + ] +) + +if test "x$mon_fd_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 37` + else + key=`openssl rand -base64 33` + fi + mon_fd_password=$key +fi + +mon_sd_password= +AC_ARG_WITH(mon-sd-password, + AC_HELP_STRING([--with-mon-sd-password=PASSWORD], [specify Storage daemon's password used by the monitor]), + [ + if test "x$withval" != "xno" ; then + mon_sd_password=$withval + fi + ] +) + +if test "x$mon_sd_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 41` + else + key=`openssl rand -base64 33` + fi + mon_sd_password=$key +fi + +AC_SUBST(dir_password) +AC_SUBST(fd_password) +AC_SUBST(sd_password) +AC_SUBST(mon_dir_password) +AC_SUBST(mon_fd_password) +AC_SUBST(mon_sd_password) + +dnl +dnl Pickup any database name +dnl +db_name=bacula +AC_ARG_WITH(db_name, + AC_HELP_STRING([--with-db-name=DBNAME], [specify database name @<:@default=bacula@:>@]), + [ + if test "x$withval" != "x" ; then + db_name=$withval + fi + ] +) +AC_SUBST(db_name) + +db_user=bacula +AC_ARG_WITH(db_user, + AC_HELP_STRING([--with-db-user=UNAME], [specify database user @<:@default=bacula@:>@]), + [ + if test "x$withval" != "x" ; then + db_user=$withval + fi + ] +) +AC_SUBST(db_user) + +db_password= +AC_ARG_WITH(db_password, + AC_HELP_STRING([--with-db-password=PWD], [specify database password @<:@default=*none*@:>@]), + [ + if test "x$withval" != "x" ; then + db_password=$withval + fi + ] +) +AC_SUBST(db_password) + +dnl +dnl Pickup a database port +dnl +db_port=" " +AC_ARG_WITH(db_port, + AC_HELP_STRING([--with-db-port=DBPORT], [specify a database port @<:@default=null@:>@]), + [ + if test "x$withval" != "x" ; then + db_port=$withval + fi + ] +) +AC_SUBST(db_port) + +dnl +dnl Pickup MySQL SSL options for database user connection +dnl +db_ssl_options= +AC_ARG_WITH(db_ssl_options, + AC_HELP_STRING([--with-db-ssl-options=DBSSLOPTIONS], [specify SSL options for database user connection @<:@default=null@:>@]), + [ + if test "x$withval" != "x" ; then + db_ssl_options=$withval + fi + ] +) +AC_SUBST(db_ssl_options) + +# +# Handle users and groups for each daemon +# +dir_user= +AC_ARG_WITH(dir_user, + AC_HELP_STRING([--with-dir-user=USER], [specify user for Director daemon]), + [ + if test "x$withval" != "x" ; then + dir_user=$withval + fi + ] +) + +dir_group= +AC_ARG_WITH(dir_group, + AC_HELP_STRING([--with-dir-group=GROUP], [specify group for Director daemon]), + [ + if test "x$withval" != "x" ; then + dir_group=$withval + fi + ] +) + +sd_user= +AC_ARG_WITH(sd_user, + AC_HELP_STRING([--with-sd-user=USER], [specify user for Storage daemon]), + [ + if test "x$withval" != "x" ; then + sd_user=$withval + fi + ] +) + +sd_group= +AC_ARG_WITH(sd_group, + AC_HELP_STRING([--with-sd-group=GROUP], [specify group for Storage daemon]), + [ + if test "x$withval" != "x" ; then + sd_group=$withval + fi + ] +) + +fd_user= +AC_ARG_WITH(fd_user, + AC_HELP_STRING([--with-fd-user=USER], [specify user for File daemon]), + [ + if test "x$withval" != "x" ; then + fd_user=$withval + fi + ] +) + +fd_group= +AC_ARG_WITH(fd_group, + AC_HELP_STRING([--with-fd-group=GROUP], [specify group for File daemon]), + [ + if test "x$withval" != "x" ; then + fd_group=$withval + fi + ] +) + +AC_SUBST(dir_user) +AC_SUBST(dir_group) +AC_SUBST(sd_user) +AC_SUBST(sd_group) +AC_SUBST(fd_user) +AC_SUBST(fd_group) + +dnl +dnl allow setting default executable permissions +dnl +SBINPERM=0750 +AC_ARG_WITH(sbin-perm, + AC_HELP_STRING([--with-sbin-perm=MODE], [specify permissions for sbin binaries @<:@default=0750@:>@]), + [ + if test "x$withval" != "x" ; then + SBINPERM=$withval + fi + ] +) + +AC_SUBST(SBINPERM) + +dnl ------------------------------------------- +dnl enable batch attribute DB insert (default on) +dnl ------------------------------------------- +support_batch_insert=yes +AC_ARG_ENABLE(batch-insert, + AC_HELP_STRING([--enable-batch-insert], [enable the DB batch insert code @<:@default=yes@:>@]), + [ + if test x$enableval = xno; then + support_batch_insert=no + fi + ] +) + +if test x$support_batch_insert = xyes; then + AC_DEFINE(USE_BATCH_FILE_INSERT, 1, [Set if DB batch insert code enabled]) +fi + +dnl ------------------------------------------------ +dnl Bacula check for various SQL database engines +dnl ------------------------------------------------ + +dnl +dnl Set uncomment_dbi by default to '#' if DBI is enabled this will get reset +dnl +uncomment_dbi="#" + +BA_CHECK_POSTGRESQL_DB + +BA_CHECK_MYSQL_DB + +BA_CHECK_SQLITE3_DB + +dnl ------------------------------------------- +dnl If no batch insert backend are enable set +dnl variable to None +dnl ------------------------------------------- +if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="None" +fi + +dnl ------------------------------------------- +dnl Make sure at least one database backend is found +dnl ------------------------------------------- +if test "x${db_backends}" = "x" ; then + echo " " + echo " " + echo "You have not specified either --enable-client-only or one of the" + echo "supported databases: MySQL, PostgreSQL, or SQLite3." + echo "This is not permitted. Please reconfigure." + echo " " + echo "Aborting the configuration ..." + echo " " + echo " " + exit 1 +fi + +dnl ------------------------------------------- +dnl See how many catalog backends are configured. +dnl ------------------------------------------- +case `echo $DB_BACKENDS | wc -w | sed -e 's/^ *//'` in + 1) + DEFAULT_DB_TYPE="${DB_BACKENDS}" + if test x$use_libtool = xno; then + SHARED_CATALOG_TARGETS="" + else + SHARED_CATALOG_TARGETS="libbaccats-${DEFAULT_DB_TYPE}.la" + fi + ;; + *) + dnl ------------------------------------------------ + dnl Set the default backend to the first backend found + dnl ------------------------------------------------ + DEFAULT_DB_TYPE=`echo ${DB_BACKENDS} | cut -d' ' -f1` + + dnl ------------------------------------------------ + dnl For multiple backend we need libtool support. + dnl ------------------------------------------------ + if test x$use_libtool = xno; then + echo " " + echo " " + echo "You have specified two or more of the" + echo "supported databases: MySQL, PostgreSQL, or SQLite3." + echo "This is not permitted when not using libtool Please reconfigure." + echo " " + echo "Aborting the configuration ..." + echo " " + echo " " + exit 1 + fi + + SHARED_CATALOG_TARGETS="" + for db_type in ${DB_BACKENDS} + do + if test -z "${SHARED_CATALOG_TARGETS}"; then + SHARED_CATALOG_TARGETS="libbaccats-${db_type}.la" + else + SHARED_CATALOG_TARGETS="${SHARED_CATALOG_TARGETS} libbaccats-${db_type}.la" + fi + done + ;; +esac + +dnl ------------------------------------------- +dnl Unset DB_LIBS when using libtool as we link the +dnl shared library using the right database lib no need to +dnl set DB_LIBS which is only used for non shared versions +dnl of the backends. +dnl ------------------------------------------- +if test x$use_libtool = xyes; then + DB_LIBS="" +fi + +AC_SUBST(uncomment_dbi) +AC_SUBST(DB_BACKENDS) +AC_SUBST(DB_LIBS) +AC_SUBST(DEFAULT_DB_TYPE) +AC_SUBST(SHARED_CATALOG_TARGETS) + +AC_DEFINE(PROTOTYPES) + +dnl -------------------------------------------------------------------------- +dnl Supply default CFLAGS, if not specified by `CFLAGS=flags ./configure' +dnl +if test -z "$CFLAGS" -o "$CFLAGS" = "-g -O2"; then + if test -z "$CCOPTS"; then + CCOPTS='-g -O2 -Wall' + fi + CFLAGS="$CCOPTS" +fi + +dnl A few others +AC_EXEEXT + +dnl See if we can use 64 bit file addresses +largefile_support="no" +AC_BAC_LARGEFILE + +AC_PATH_XTRA + +dnl -------------------------------------------------------------------------- +dnl CHECKING FOR HEADER FILES +dnl -------------------------------------------------------------------------- +AC_CHECK_HEADERS( \ + assert.h \ + fcntl.h \ + grp.h \ + pwd.h \ + libc.h \ + limits.h \ + stdarg.h \ + stdlib.h \ + stdint.h \ + inttypes.h \ + string.h \ + strings.h \ + termios.h \ + termcap.h \ + term.h \ + unistd.h \ + sys/bitypes.h \ + sys/byteorder.h \ + sys/ioctl.h \ + sys/select.h \ + sys/socket.h \ + sys/sockio.h \ + sys/stat.h \ + sys/time.h \ + sys/types.h \ + arpa/nameser.h \ + mtio.h \ + sys/mtio.h \ + sys/tape.h \ + regex.h \ + attr/attributes.h \ + attr/xattr.h \ +) +AC_HEADER_STDC +AC_HEADER_MAJOR +AC_HEADER_DIRENT +AC_HEADER_STAT +AC_HEADER_SYS_WAIT +AC_HEADER_TIME +AC_STRUCT_ST_BLKSIZE +AC_STRUCT_ST_BLOCKS +AC_STRUCT_TIMEZONE + +dnl -------------------------------------------------------------------------- +dnl Check for utime.h structure +dnl -------------------------------------------------------------------------- +AC_CACHE_CHECK(for utime.h, ba_cv_header_utime_h, + [ + AC_TRY_COMPILE( + [ + #include + #include + ], [ + struct utimbuf foo + ], [ + ba_cv_header_utime_h=yes + ], [ + ba_cv_header_utime_h=no + ] + ) + ] +) +test $ba_cv_header_utime_h = yes && AC_DEFINE(HAVE_UTIME_H, 1, [Set if utime.h exists]) + +dnl -------------------------------------------------------------------------- +dnl Check for socklen_t +dnl -------------------------------------------------------------------------- +AC_CACHE_CHECK(for socklen_t, ba_cv_header_socklen_t, + [ + AC_TRY_COMPILE( + [ + #include + #include + ], [ + socklen_t x + ], [ + ba_cv_header_socklen_t=yes + ], [ + ba_cv_header_socklen_t=no + ] + ) + ] +) +test $ba_cv_header_socklen_t = yes && AC_DEFINE(HAVE_SOCKLEN_T, 1, [Set if socklen_t exists]) + +dnl -------------------------------------------------------------------------- +dnl Check for ioctl request type +dnl -------------------------------------------------------------------------- +AC_LANG(C++) +AC_CACHE_CHECK(for ioctl_req_t, ba_cv_header_ioctl_req_t, + [ + AC_TRY_COMPILE( + [ + #include + #include + #include + ], [ + int (*d_ioctl)(int fd, unsigned long int request, ...); + d_ioctl = ::ioctl; + ], [ + ba_cv_header_ioctl_req_t=yes + ], [ + ba_cv_header_ioctl_req_t=no + ] + ) + ] +) +test $ba_cv_header_ioctl_req_t = yes && AC_DEFINE(HAVE_IOCTL_ULINT_REQUEST, 1, [Set if ioctl request is unsigned long int]) + +dnl Note: it is more correct to use AC_LANG(C++) but some of the older +dnl *BSD systems still use old style C prototypes, which are wrong with +dnl compiled with a C++ compiler. +AC_LANG(C) + +dnl -------------------------------------------------------------------------- +dnl Check for typeof() +dnl -------------------------------------------------------------------------- +AC_LANG_PUSH(C++) +AC_CACHE_CHECK(for typeof, ba_cv_have_typeof, + [ + AC_TRY_RUN( + [ + main(){char *a = 0; a = (typeof a)a;} + ], [ + ba_cv_have_typeof=yes + ], [ + ba_cv_have_typeof=no + ], [ + ba_cv_have_typeof=no + ] + ) + ] +) +test $ba_cv_have_typeof = yes && AC_DEFINE([HAVE_TYPEOF], 1, [Defind to 1 if compiler has typeof]) +AC_LANG_POP(C++) + +AC_C_CONST + +AC_C_BIGENDIAN([AC_DEFINE([HAVE_BIG_ENDIAN], [1], [Big Endian])], [AC_DEFINE([HAVE_LITTLE_ENDIAN], [1], [Little Endian])]) + +dnl -------------------------------------------------------------------------- +dnl CHECKING FOR FILESYSTEM TYPE +dnl -------------------------------------------------------------------------- +AC_MSG_CHECKING(how to get filesystem type) +fstype=no +# The order of these tests is important. +AC_TRY_CPP( + [ + #include + #include + ], + AC_DEFINE(FSTYPE_STATVFS) fstype=SVR4 +) +if test $fstype = no; then + AC_TRY_CPP( + [ + #include + #include + ], + AC_DEFINE(FSTYPE_USG_STATFS) fstype=SVR3 + ) +fi +if test $fstype = no; then + AC_TRY_CPP( + [ + #include + #include + ], + AC_DEFINE(FSTYPE_AIX_STATFS) fstype=AIX + ) +fi +if test $fstype = no; then + AC_TRY_CPP( + [ + #include + ], + AC_DEFINE(FSTYPE_MNTENT) fstype=4.3BSD + ) +fi +if test $fstype = no; then + AC_EGREP_HEADER(f_type;, sys/mount.h, AC_DEFINE(FSTYPE_STATFS) fstype=4.4BSD/OSF1) +fi +if test $fstype = no; then + AC_TRY_CPP( + [ + #include + #include + ], + AC_DEFINE(FSTYPE_GETMNT) fstype=Ultrix + ) +fi +AC_MSG_RESULT($fstype) + +AC_CHECK_HEADER(sys/statvfs.h, [ AC_DEFINE(HAVE_SYS_STATVFS_H,1,[Defines if your system have the sys/statvfs.h header file])] , ) + +AC_CHECK_DECLS([O_CLOEXEC],,[AC_DEFINE([O_CLOEXEC],[0], [Defined to 0 if not provided])], +[[ +#ifdef HAVE_FCNTL_H +# include +#endif +]]) + +AC_CHECK_DECLS([FD_CLOEXEC],,[AC_DEFINE([FD_CLOEXEC],[0], [Defined to 0 if not provided])], +[[ +#ifdef HAVE_FCNTL_H +# include +#endif +]]) + +AC_CHECK_DECLS([SOCK_CLOEXEC],,[AC_DEFINE([SOCK_CLOEXEC],[0],[Defined to 0 if not provided])], +[[ +#ifdef HAVE_SYS_SOCKET_H +# include +#endif +]]) + +AC_CACHE_CHECK(for close on exec modifier for fopen(), ac_cv_feature_stream_cloexec_flag, + [if test $ac_cv_have_decl_O_CLOEXEC = yes ; then + if test $ac_cv_have_decl_SOCK_CLOEXEC = yes ; then + ac_cv_feature_stream_cloexec_flag="e" + fi + fi]) + +if test "x$ac_cv_feature_stream_cloexec_flag" = "xe" ; then + AC_DEFINE(HAVE_STREAM_CLOEXEC,[0],[Defined to 0 if not provided]) +fi + +AC_DEFINE_UNQUOTED([STREAM_CLOEXEC], "$ac_cv_feature_stream_cloexec_flag", [fopen() modifier for setting close on exec flag]) + +AC_CHECK_FUNC(accept4, [AC_DEFINE(HAVE_ACCEPT4, 1, [Define to 1 if you have the 'accept4' function.])]) + +S3_INC= +S3_LIBS= +S3_LDFLAGS= +have_libs3=no + +if test x$support_s3 = xyes; then + AC_ARG_WITH(s3, + AC_HELP_STRING([--with-s3@<:@=DIR@:>@], [specify s3 library directory]), + [ + case "$with_s3" in + no) + : + ;; + yes|*) + if test -f ${with_s3}/include/libs3.h; then + S3_INC="-I${with_s3}/include" + S3_LDFLAGS="-L${with_s3}/lib" + with_s3="${with_s3}/include" + else + with_s3="/usr/include" + fi + + AC_CHECK_HEADER(${with_s3}/libs3.h, + [ + AC_DEFINE(HAVE_LIBS3, 1, [Define to 1 if you have libs3]) + S3_LIBS="${S3_LDFLAGS} -ls3" + have_libs3="yes" + ], [ + echo " " + echo "libs3.h not found. s3 turned off ..." + echo " " + ] + ) + ;; + esac + ],[ + AC_CHECK_HEADER(libs3.h, + [ + AC_CHECK_LIB(s3, S3_initialize, + [ + S3_LIBS="-ls3" + AC_DEFINE(HAVE_LIBS3,1,[Define to 1 if you have libs3]) + have_libs3=yes + ]) + ]) + ]) +fi + +AC_SUBST(S3_INC) +AC_SUBST(S3_LIBS) + +AC_LANG_PUSH(C++) +AC_CHECK_FUNCS(backtrace) +AC_LANG_POP(C++) + +dnl -------------------------------------------------------------------------- +dnl CHECKING FOR TYPEDEFS, STRUCTURES, AND COMPILER CHARACTERISTICS. +dnl -------------------------------------------------------------------------- +AC_TYPE_SIGNAL +SIGNAL_CHECK +AC_TYPE_MODE_T +AC_TYPE_UID_T +AC_TYPE_SIZE_T +AC_TYPE_PID_T +AC_TYPE_OFF_T +AC_TYPE_INTPTR_T +AC_TYPE_UINTPTR_T +AC_CHECK_TYPE(ino_t, unsigned long) +AC_CHECK_TYPE(dev_t, unsigned long) +AC_CHECK_TYPE(daddr_t, long) +AC_CHECK_TYPE(major_t, int) +AC_CHECK_TYPE(minor_t, int) +AC_CHECK_TYPE(ssize_t, int) +AC_STRUCT_ST_BLOCKS +AC_STRUCT_ST_RDEV +AC_STRUCT_TM +AC_C_CONST +AC_C_RESTRICT + +AC_CHECK_SIZEOF(char, 1) +AC_CHECK_SIZEOF(short int, 2) +AC_CHECK_SIZEOF(int, 4) +AC_CHECK_SIZEOF(long int, 4) +AC_CHECK_SIZEOF(long long int, 8) +AC_CHECK_SIZEOF(int *, 4) + +dnl Check for sys/types.h types +AC_CACHE_CHECK([for u_int type], ac_cv_have_u_int, + [ + AC_TRY_COMPILE( + [ + #include + ], [ + u_int a; a = 1; + ], [ + ac_cv_have_u_int="yes" + ], [ + ac_cv_have_u_int="no" + ] + ) + ] +) +if test "x$ac_cv_have_u_int" = "xyes" ; then + AC_DEFINE(HAVE_U_INT) + have_u_int=1 +fi + +AC_CACHE_CHECK([for intmax_t type], ac_cv_have_intmax_t, + [ + AC_TRY_COMPILE( + [ + #include + ], [ + intmax_t a; a = 1; + ], [ + ac_cv_have_intmax_t="yes" + ], [ + AC_TRY_COMPILE( + [ + #include + ], [ + intmax_t a; a = 1; + ], [ + ac_cv_have_intmax_t="yes" + ], [ + ac_cv_have_intmax_t="no" + ] + ) + ] + ) + ] +) +if test "x$ac_cv_have_intmax_t" = "xyes" ; then + AC_DEFINE(HAVE_INTMAX_T) + have_intmax_t=1 +fi + +AC_CACHE_CHECK([for u_intmax_t type], ac_cv_have_u_intmax_t, + [ + AC_TRY_COMPILE( + [ + #include + ], [ + u_intmax_t a; a = 1; + ], [ + ac_cv_have_u_intmax_t="yes" + ], [ + AC_TRY_COMPILE( + [ + #include + ], [ + u_intmax_t a; a = 1; + ], [ + ac_cv_have_u_intmax_t="yes" + ], [ + ac_cv_have_u_intmax_t="no" + ] + ) + ] + ) + ] +) +if test "x$ac_cv_have_u_intmax_t" = "xyes" ; then + AC_DEFINE(HAVE_U_INTMAX_T) + have_u_intmax_t=1 +fi + +AC_CACHE_CHECK([for intXX_t types], ac_cv_have_intxx_t, + [ + AC_TRY_COMPILE( + [ + #include + ], [ + int8_t a; int16_t b; int32_t c; a = b = c = 1; + ], [ + ac_cv_have_intxx_t="yes" + ], [ + ac_cv_have_intxx_t="no" + ] + ) + ] +) +if test "x$ac_cv_have_intxx_t" = "xyes" ; then + AC_DEFINE(HAVE_INTXX_T) + have_intxx_t=1 +fi + +AC_CACHE_CHECK([for int64_t type], ac_cv_have_int64_t, + [ + AC_TRY_COMPILE( + [ + #include + ], [ + int64_t a; a = 1; + ], [ + ac_cv_have_int64_t="yes" + ], [ + ac_cv_have_int64_t="no" + ] + ) + ] +) +if test "x$ac_cv_have_int64_t" = "xyes" ; then + AC_DEFINE(HAVE_INT64_T) + have_int64_t=1 +fi + +AC_CACHE_CHECK([for u_intXX_t types], ac_cv_have_u_intxx_t, + [ + AC_TRY_COMPILE( + [ + #include + ], [ + u_int8_t a; u_int16_t b; u_int32_t c; a = b = c = 1; + ], [ + ac_cv_have_u_intxx_t="yes" + ], [ + ac_cv_have_u_intxx_t="no" + ] + ) + ] +) +if test "x$ac_cv_have_u_intxx_t" = "xyes" ; then + AC_DEFINE(HAVE_U_INTXX_T) + have_u_intxx_t=1 +fi + +AC_CACHE_CHECK([for u_int64_t types], ac_cv_have_u_int64_t, + [ + AC_TRY_COMPILE( + [ + #include + ], [ + u_int64_t a; a = 1; + ], [ + ac_cv_have_u_int64_t="yes" + ], [ + ac_cv_have_u_int64_t="no" + ] + ) + ] +) +if test "x$ac_cv_have_u_int64_t" = "xyes" ; then + AC_DEFINE(HAVE_U_INT64_T) + have_u_int64_t=1 +fi + +if (test -z "$have_u_intxx_t" || test -z "$have_intxx_t" && \ + test "x$ac_cv_header_sys_bitypes_h" = "xyes") +then + AC_MSG_CHECKING([for intXX_t and u_intXX_t types in sys/bitypes.h]) + AC_TRY_COMPILE( + [ + #include + ], [ + int8_t a; int16_t b; int32_t c; + u_int8_t e; u_int16_t f; u_int32_t g; + a = b = c = e = f = g = 1; + ], [ + AC_DEFINE(HAVE_U_INTXX_T) + AC_DEFINE(HAVE_INTXX_T) + AC_DEFINE(HAVE_SYS_BITYPES_H) + AC_MSG_RESULT(yes) + ], [ + AC_MSG_RESULT(no) + ] + ) +fi + +if test -z "$have_u_intxx_t" ; then + AC_CACHE_CHECK([for uintXX_t types], ac_cv_have_uintxx_t, + [ + AC_TRY_COMPILE( + [ + #include + ], [ + uint8_t a; uint16_t b; + uint32_t c; a = b = c = 1; + ], [ + ac_cv_have_uintxx_t="yes" + ], [ + ac_cv_have_uintxx_t="no" + ] + ) + ] + ) + if test "x$ac_cv_have_uintxx_t" = "xyes" ; then + AC_DEFINE(HAVE_UINTXX_T) + fi +fi + +if (test -z "$have_u_int64_t" || test -z "$have_int64_t" && \ + test "x$ac_cv_header_sys_bitypes_h" = "xyes") +then + AC_MSG_CHECKING([for int64_t and u_int64_t types in sys/bitypes.h]) + AC_TRY_COMPILE( + [ + #include + ], [ + int64_t a; u_int64_t b; + a = b = 1; + ], [ + AC_DEFINE(HAVE_U_INT64_T) + AC_DEFINE(HAVE_INT64_T) + AC_MSG_RESULT(yes) + ], [ + AC_MSG_RESULT(no) + ] + ) +fi + +if (test -z "$have_uintxx_t" && \ + test "x$ac_cv_header_sys_bitypes_h" = "xyes") +then + AC_MSG_CHECKING([for uintXX_t types in sys/bitypes.h]) + AC_TRY_COMPILE( + [ + #include + ], [ + uint8_t a; uint16_t b; + uint32_t c; a = b = c = 1; + ], [ + AC_DEFINE(HAVE_UINTXX_T) + AC_MSG_RESULT(yes) + ], [ + AC_MSG_RESULT(no) + ] + ) +fi + +dnl -------------------------------------------------------------------------- +dnl CHECKING FOR REQUIRED LIBRARY FUNCTIONS +dnl -------------------------------------------------------------------------- +AC_CHECK_FUNCS( \ + fork \ + getcwd \ + gethostname \ + getpid \ + gettimeofday \ + setpgid \ + setpgrp \ + setsid \ + signal \ + strerror \ + strncmp \ + strncpy \ + vfprintf \ + ,, + [echo 'configure: cannot find needed function.'; exit 1] +) + +AC_CHECK_DECL( + F_CLOSEM, + AC_DEFINE(HAVE_FCNTL_F_CLOSEM, 1, [Set if you have 'F_CLOSEM' fcntl flag]), + , + [#include ] +) + +AC_CHECK_DECL( + F_SETLK, + AC_DEFINE(HAVE_FCNTL_LOCK, 1, [Set if fcntl supports file locking]), + , + [#include ] +) + +AC_CHECK_FUNC(closefrom, [AC_DEFINE(HAVE_CLOSEFROM, 1, [Define to 1 if you have the 'closefrom' function.])]) +AC_CHECK_FUNCS(getpagesize, [AC_DEFINE(HAVE_GETPAGESIZE, 1, [Set if have getpagesize])]) +AC_CHECK_FUNCS(malloc_trim, [AC_DEFINE(HAVE_MALLOC_TRIM, 1, [Set if have malloc_trim])]) + +AC_CHECK_FUNCS(fchdir, [AC_DEFINE(HAVE_FCHDIR)]) +AC_CHECK_FUNCS(strtoll, [AC_DEFINE(HAVE_STRTOLL)]) +AC_CHECK_FUNCS(posix_fadvise) +AC_CHECK_FUNCS(posix_fallocate) +AC_CHECK_FUNCS(fdatasync) +AC_CHECK_FUNCS(realpath) +AC_CHECK_FUNCS(getrlimit) + +AC_CHECK_FUNCS(chflags) + +AC_TRY_LINK([ +int f(int b) { + return __builtin_bswap32(b); +} +],[int a = f(10);], [AC_DEFINE([HAVE_BSWAP32], 1, [Define to 1 if you have the '__builtin_bswap32' function ])]) + +AC_CHECK_FUNCS(snprintf vsnprintf gethostid fseeko) + +AC_CACHE_CHECK(for va_copy, ba_cv_va_copy, + [ + AC_TRY_LINK( + [ + #include + void use_va_copy(va_list args){va_list args2; va_copy(args2,args); va_end(args2);} + void call_use_va_copy(int junk,...){va_list args; va_start(args,junk); use_va_copy(args); va_end(args);} + ], [ + call_use_va_copy(1,2,3) + ], [ + ba_cv_va_copy=yes + ], [ + ba_cv_va_copy=no + ] + ) + ] +) +test $ba_cv_va_copy = yes && AC_DEFINE(HAVE_VA_COPY, 1, [Set if va_copy exists]) + +dnl -------------------------------------------------------------------------- +dnl CHECKING FOR THREAD SAFE FUNCTIONS +dnl -------------------------------------------------------------------------- +AC_CHECK_FUNCS(localtime_r readdir_r strerror_r gethostbyname_r) + +# If resolver functions are not in libc check for -lnsl or -lresolv. +AC_CHECK_FUNC(gethostbyname_r, + AC_MSG_RESULT(using libc's resolver), + AC_CHECK_LIB(nsl,gethostbyname_r) + AC_CHECK_LIB(resolv,gethostbyname_r)) + +AC_CHECK_FUNCS(inet_pton, [AC_DEFINE(HAVE_INET_PTON)]) +AC_CHECK_FUNCS(inet_ntop, [AC_DEFINE(HAVE_INET_NTOP)]) +AC_CHECK_FUNCS(gethostbyname2, [AC_DEFINE(HAVE_GETHOSTBYNAME2)]) +AC_CHECK_FUNCS(getnameinfo, [AC_DEFINE(HAVE_GETNAMEINFO)]) + + +dnl ---------------------------- +dnl check sa_len of sockaddr +dnl ---------------------------- +AC_CACHE_CHECK(for struct sockaddr has a sa_len field, ac_cv_struct_sockaddr_sa_len, + [ + AC_TRY_COMPILE( + [ + #include + ], [ + struct sockaddr s; s.sa_len; + ], [ + ac_cv_struct_sockaddr_sa_len=yes + ], [ac_cv_struct_sockaddr_sa_len=no + ] + ) + ] +) + +if test $ac_cv_struct_sockaddr_sa_len = yes; then + AC_DEFINE(HAVE_SA_LEN, 1, [Define if sa_len field exists in struct sockaddr]) +fi + +dnl +dnl check for working getaddrinfo() +dnl +dnl Note that if the system doesn't have gai_strerror(), we +dnl can't use getaddrinfo() because we can't get strings +dnl describing the error codes. +dnl +AC_CACHE_CHECK(for working getaddrinfo, ac_cv_working_getaddrinfo, + [ + AC_TRY_RUN( + [ + #include + #include + #include + #include + + void main(void) { + struct addrinfo hints, *ai; + int error; + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + error = getaddrinfo("127.0.0.1", NULL, &hints, &ai); + if (error) { + exit(1); + } + if (ai->ai_addr->sa_family != AF_INET) { + exit(1); + } + exit(0); + } + ],[ + ac_cv_working_getaddrinfo="yes" + ],[ + ac_cv_working_getaddrinfo="no" + ],[ + ac_cv_working_getaddrinfo="yes" + ] + ) + ] +) +AC_CHECK_FUNC(gai_strerror, [AC_DEFINE(HAVE_GAI_STRERROR, 1, [Define to 1 if you have the 'gai_strerror' function.])]) + +if test "$ac_cv_working_getaddrinfo" = "yes"; then + if test "$ac_cv_func_gai_strerror" != "yes"; then + ac_cv_working_getaddrinfo="no" + else + AC_DEFINE(HAVE_GETADDRINFO, 1, [Define to 1 if getaddrinfo exists and works]) + fi +fi + +AC_FUNC_STRFTIME +AC_FUNC_VPRINTF +AC_FUNC_ALLOCA +AC_FUNC_GETMNTENT +AC_CHECK_FUNCS(getmntinfo, [AC_DEFINE(HAVE_GETMNTINFO)]) +AC_FUNC_CLOSEDIR_VOID +AC_FUNC_SETPGRP dnl check for BSD setpgrp. +# AC_FUNC_FNMATCH dnl use local version + +AC_CHECK_LIB(intl, gettext, [LIBS="$LIBS -lintl"]) + +AC_CHECK_LIB(sun, getpwnam) + +AC_CHECK_HEADERS(zlib.h) +AC_CHECK_LIB(z, deflate, [ZLIBS="-lz"]) +have_zlib=no +if test x$ZLIBS = x-lz; then + AC_DEFINE(HAVE_LIBZ) + have_zlib=yes +fi +AC_SUBST(ZLIBS) + +dnl +dnl Check if we have AFS on this system +dnl +AFS_CFLAGS="" +AFS_LIBS="" +support_afs=auto +AC_ARG_ENABLE(afs, + AC_HELP_STRING([--disable-afs], [disable afs support @<:@default=auto@:>@]), + [ + if test x$enableval = xyes; then + support_afs=yes + elif test x$enableval = xno; then + support_afs=no + fi + ] +) + +have_afs=no +if test x$support_afs = xyes -o x$support_afs = xauto; then + AC_ARG_WITH(afsdir, + AC_HELP_STRING([--with-afsdir@<:@=DIR@:>@], [Directory holding AFS includes/libs]), + with_afsdir=$withval + ) + + dnl + dnl Search in standard places, or --with-afsdir not specified + dnl + if test x$with_afsdir = x; then + for root in /usr /usr/local; do + if test -d ${root}/include/afs/ ; then + with_afsdir=${root} + break + fi + if test -d ${root}/include/openafs/afs/ ; then + with_afsdir=${root} + break + fi + done + fi + + if test -d ${with_afsdir}/include/afs/ ; then + AFS_CFLAGS="-I${with_afsdir}/include" + else + if test -d ${with_afsdir}/include/openafs/afs/ ; then + AFS_CFLAGS="-I${with_afsdir}/include/openafs" + fi + fi + + saved_CFLAGS="${CFLAGS}" + saved_CPPFLAGS="${CPPFLAGS}" + CFLAGS="${AFS_CFLAGS} ${saved_CFLAGS}" + CPPFLAGS="${AFS_CFLAGS} ${saved_CPPFLAGS}" + + AC_CHECK_HEADERS(afs/afsint.h) + AC_TRY_CPP( + [ + #include + #include + ], + AC_DEFINE(HAVE_AFS_VENUS_H,1,[Define to 1 if you have the header file.]) + ) + + CFLAGS="${saved_CFLAGS}" + CPPFLAGS="${saved_CPPFLAGS}" + + dnl + dnl See if we can find a libsys with the pioctl symbol in there + dnl + AC_MSG_CHECKING(for pioctl in AFS libsys) + for dir in ${with_afsdir}/lib \ + ${with_afsdir}/lib/afs \ + ${with_afsdir}/lib/openafs \ + ${with_afsdir}/lib64 \ + ${with_afsdir}/lib64/afs \ + ${with_afsdir}/lib64/openafs + do + for arch_type in .a .so + do + A=`test -f ${dir}/libsys${arch_type} && nm ${dir}/libsys${arch_type} 2>/dev/null | grep pioctl` + pkg=$? + if test $pkg = 0; then + have_afs=yes + AFS_LIBS="-L${dir} -lsys -lrx -llwp ${dir}/util${arch_type}" + break + fi + done + done + + if test $have_afs = yes; then + AC_MSG_RESULT(yes) + else + AC_MSG_RESULT(no) + fi + + if test x$support_afs = xyes -a $have_afs != yes; then + AC_MSG_ERROR([afs support explicitly enabled but no supported afs implementation found, + please either load the afs libraries or rerun configure without --enable-afs]) + else + if test $have_afs = yes; then + AC_DEFINE(HAVE_AFS,1,[Define to 1 if your system has AFS support]) + AC_DEFINE(HAVE_AFS_ACL,1,[Andrew FileSystem ACL support]) + fi + fi +fi +AC_SUBST(AFS_CFLAGS) +AC_SUBST(AFS_LIBS) + +dnl --------------------------------------------------- +dnl Check for lzo support/directory (default on) +dnl --------------------------------------------------- +dnl this allows you to turn it completely off + +AC_ARG_ENABLE(lzo, + AC_HELP_STRING([--disable-lzo], [disable lzo support @<:@default=yes@:>@]), + [ + if test x$enableval = xno; then + support_lzo=no + fi + ] +) + +LZO_INC= +LZO_LIBS= +LZO_LDFLAGS= + +have_lzo="no" +if test x$support_lzo = xyes; then + AC_ARG_WITH(lzo, + AC_HELP_STRING([--with-lzo@<:@=DIR@:>@], [specify lzo library directory]), + [ + case "$with_lzo" in + no) + : + ;; + yes|*) + if test -f ${with_lzo}/include/lzo/lzoconf.h; then + LZO_INC="-I${with_lzo}/include" + LZO_LDFLAGS="-L${with_lzo}/lib" + with_lzo="${with_lzo}/include" + else + with_lzo="/usr/include" + fi + + AC_CHECK_HEADER(${with_lzo}/lzo/lzoconf.h, + [ + AC_DEFINE(HAVE_LZO, 1, [Define to 1 if you have LZO compression]) + LZO_LIBS="${LZO_LDFLAGS} -llzo2" + have_lzo="yes" + ], [ + echo " " + echo "lzoconf.h not found. lzo turned off ..." + echo " " + ] + ) + ;; + esac + ],[ + AC_CHECK_HEADER(lzo/lzoconf.h, + [ + AC_CHECK_HEADER(lzo/lzo1x.h, + [ + AC_CHECK_LIB(lzo2, lzo1x_1_compress, + [ + LZO_LIBS="-llzo2" + AC_DEFINE(HAVE_LZO,1,[Define to 1 if you have LZO compression]) + have_lzo=yes + ]) + ]) + ]) + ]) +fi + +AC_SUBST(LZO_INC) +AC_SUBST(LZO_LIBS) + + +dnl +dnl Check for ACL support and libraries +dnl +support_acl=auto +AC_ARG_ENABLE(acl, + AC_HELP_STRING([--disable-acl], [disable acl support @<:@default=auto@:>@]), + [ + if test x$enableval = xyes; then + support_acl=yes + elif test x$enableval = xno; then + support_acl=no + fi + ] +) + +have_acl=no +have_extended_acl=no +if test x$support_acl = xyes -o x$support_acl = xauto; then + AC_CHECK_HEADER(sys/acl.h, [ AC_DEFINE(HAVE_SYS_ACL_H,1,[Defines if your system have the sys/acl.h header file])] , ) + + dnl + dnl First check for acl_get_file in libc + dnl + AC_CHECK_FUNC(acl_get_file, + [ + have_acl=yes + ]) + + dnl + dnl Check for acl_get_file in libacl (Linux) + dnl + if test $have_acl = no; then + AC_CHECK_LIB(acl, acl_get_file, + [ + have_acl=yes + if test $have_afs = yes; then + dnl + dnl Because of possible naming conflict with AFS libacl make sure we use the one in /usr/lib64 or /usr/lib !!! + dnl + if test -d /usr/lib64/; then + FDLIBS="-L/usr/lib64 -lacl $FDLIBS" + else + FDLIBS="-L/usr/lib -lacl $FDLIBS" + fi + else + FDLIBS="-lacl $FDLIBS" + fi + ]) + fi + + dnl + dnl Check for acl_get_file in libpacl (OSF1) + dnl and if ACL_TYPE_DEFAULT_DIR is defined. + dnl + if test $have_acl = no -a x${HAVE_OSF1_OS_TRUE} = x; then + AC_CHECK_LIB(pacl, acl_get_file, + [ + have_acl=yes + FDLIBS="-lpacl $FDLIBS" + ]) + + AC_MSG_CHECKING(for ACL_TYPE_DEFAULT_DIR in acl.h include file) + grep ACL_TYPE_DEFAULT_DIR /usr/include/sys/acl.h > /dev/null 2>&1 + if test $? = 0; then + AC_DEFINE(HAVE_ACL_TYPE_DEFAULT_DIR,1,[Defines if your system have the ACL_TYPE_DEFAULT_DIR acl type]) + AC_MSG_RESULT(yes) + else + AC_MSG_RESULT(no) + fi + fi + + dnl + dnl On OSX check for availability of ACL_TYPE_EXTENDED + dnl + if test $have_acl = yes -a x${HAVE_DARWIN_OS_TRUE} = x; then + AC_MSG_CHECKING(for ACL_TYPE_EXTENDED in acl.h include file) + grep ACL_TYPE_EXTENDED /usr/include/sys/acl.h > /dev/null 2>&1 + if test $? = 0; then + AC_DEFINE(HAVE_ACL_TYPE_EXTENDED,1,[Defines if your system have the ACL_TYPE_EXTENDED acl type]) + AC_MSG_RESULT(yes) + else + AC_MSG_RESULT(no) + fi + fi + + dnl + dnl On FreeBSD check for availability of ACL_TYPE_NFS4 + dnl + if test $have_acl = yes -a \ + x${HAVE_FREEBSD_OS_TRUE} = x; then + AC_MSG_CHECKING(for ACL_TYPE_NFS4 in acl.h include file) + grep ACL_TYPE_NFS4 /usr/include/sys/acl.h > /dev/null 2>&1 + if test $? = 0; then + AC_DEFINE(HAVE_ACL_TYPE_NFS4,1,[Defines if your system have the ACL_TYPE_NFS4 acl type]) + AC_MSG_RESULT(yes) + else + AC_MSG_RESULT(no) + fi + fi + + dnl + dnl Check for acltotext and acl_totext (Solaris) + dnl + if test $have_acl = no -a \ + x${HAVE_SUN_OS_TRUE} = x; then + AC_CHECK_LIB(sec, acltotext, + [ + have_acl=yes + FDLIBS="-lsec $FDLIBS" + + AC_CHECK_LIB(sec, acl_totext, + [ + have_extended_acl=yes + ] + ) + ] + ) + fi + + dnl + dnl Check for acl_get and aclx_get (AIX) + dnl + if test $have_acl = no -a \ + x${HAVE_AIX_OS_TRUE} = x; then + AC_CHECK_FUNC(acl_get, + [ + have_acl=yes + + AC_CHECK_FUNC(aclx_get, + [ + have_extended_acl=yes + ] + ) + ] + ) + fi + +ACLOBJS= + if test x$support_acl = xyes -a $have_acl != yes; then + AC_MSG_ERROR([acl support explicitly enabled but no supported acl implementation found, + please either load the acl libraries or rerun configure without --enable-acl]) + else + if test $have_acl = yes; then + AC_DEFINE([HAVE_ACL],1,[Normal acl support]) + dnl + dnl Prepare obj building + dnl + if test x${HAVE_LINUX_OS_TRUE} = x; then + ACLOBJS="bacl.c bacl_linux.c" + fi + if test x${HAVE_SUN_OS_TRUE} = x; then + ACLOBJS="bacl.c bacl_solaris.c" + fi + if test x${HAVE_FREEBSD_OS_TRUE} = x; then + ACLOBJS="bacl.c bacl_freebsd.c" + fi + if test x${HAVE_DARWIN_OS_TRUE} = x; then + ACLOBJS="bacl.c bacl_osx.c" + fi + fi + if test $have_extended_acl = yes; then + AC_DEFINE([HAVE_EXTENDED_ACL],1,[Extended acl support]) + fi + fi +fi +AC_SUBST(ACLOBJS) + +dnl +dnl Check for XATTR support +dnl +support_xattr=auto +AC_ARG_ENABLE(xattr, + AC_HELP_STRING([--disable-xattr], [disable xattr support @<:@default=auto@:>@]), + [ + if test x$enableval = xyes; then + support_xattr=yes + elif test x$enableval = xno; then + support_xattr=no + fi + ] +) + +have_xattr=no +if test x$support_xattr = xyes -o x$support_xattr = xauto; then + dnl + dnl First check for *BSD support + dnl When running on a BSD variant + dnl + if test x${HAVE_FREEBSD_OS_TRUE} = x -o \ + x${HAVE_NETBSD_OS_TRUE} = x -o \ + x${HAVE_OPENBSD_OS_TRUE} = x; then + AC_CHECK_HEADER(sys/extattr.h, [ AC_DEFINE(HAVE_SYS_EXTATTR_H,1,[Defines if your system have the sys/extattr.h header file])] , ) + AC_CHECK_HEADER(libutil.h, [ AC_DEFINE(HAVE_LIBUTIL_H,1,[Defines if your system have the libutil.h header file])] , ) + AC_CHECK_FUNCS(extattr_get_link extattr_set_link extattr_list_link, + [ + have_xattr=yes + AC_DEFINE([HAVE_EXTATTR_GET_LINK],1,[Define to 1 if you have the 'extattr_get_link' function.]) + AC_DEFINE([HAVE_EXTATTR_SET_LINK],1,[Define to 1 if you have the 'extattr_set_link' function.]) + AC_DEFINE([HAVE_EXTATTR_LIST_LINK],1,[Define to 1 if you have the 'extattr_list_link' function.]) + ] + ) + + if test $have_xattr = no; then + AC_CHECK_FUNCS(extattr_get_file extattr_set_file extattr_list_file, + [ + have_xattr=yes + AC_DEFINE([HAVE_EXTATTR_GET_FILE],1,[Define to 1 if you have the 'extattr_get_file' function.]) + AC_DEFINE([HAVE_EXTATTR_SET_FILE],1,[Define to 1 if you have the 'extattr_set_file' function.]) + AC_DEFINE([HAVE_EXTATTR_LIST_FILE],1,[Define to 1 if you have the 'extattr_list_file' function.]) + ] + ) + fi + + if test $have_xattr = yes; then + have_extattr_string_in_libc=no + AC_CHECK_FUNCS(extattr_namespace_to_string extattr_string_to_namespace, + [ + have_extattr_string_in_libc=yes + AC_DEFINE([HAVE_EXTATTR_NAMESPACE_TO_STRING],1,[Define to 1 if you have the 'extattr_namespace_to_string' function.]) + AC_DEFINE([HAVE_EXTATTR_STRING_TO_NAMESPACE],1,[Define to 1 if you have the 'extattr_string_to_namespace' function.]) + ] + ) + + dnl + dnl If extattr_namespace_to_string and extattr_string_to_namespace are not in libc see if they are in libutil + dnl + if test $have_extattr_string_in_libc = no; then + AC_CHECK_LIB(util, extattr_namespace_to_string extattr_string_to_namespace, + [ + AC_DEFINE([HAVE_EXTATTR_NAMESPACE_TO_STRING],1,[Define to 1 if you have the 'extattr_namespace_to_string' function.]) + AC_DEFINE([HAVE_EXTATTR_STRING_TO_NAMESPACE],1,[Define to 1 if you have the 'extattr_string_to_namespace' function.]) + FDLIBS="-lutil $FDLIBS" + ] + ) + fi + fi + fi + + dnl + dnl If we failed to find *BSD support try the AIX implementation of extented attributes (EA) + dnl When running on AIX + dnl + if test $have_xattr = no -a \ + x${HAVE_AIX_OS_TRUE} = x; then + AC_CHECK_HEADER(sys/ea.h, [ AC_DEFINE(HAVE_SYS_EA_H,1,[Defines if your system have the sys/ea.h header file])] , ) + AC_CHECK_FUNCS(llistea lgetea lsetea, + [ + have_xattr=yes + AC_DEFINE([HAVE_LLISTEA],1,[Define to 1 if you have the 'llistea' function.]) + AC_DEFINE([HAVE_LGETEA],1,[Define to 1 if you have the 'lgetea' function.]) + AC_DEFINE([HAVE_LSETEA],1,[Define to 1 if you have the 'lsetea' function.]) + ] + ) + + if test $have_xattr = no; then + AC_CHECK_FUNCS(listea getea setea, + [ + have_xattr=yes + AC_DEFINE([HAVE_LISTEA],1,[Define to 1 if you have the 'listea' function.]) + AC_DEFINE([HAVE_GETEA],1,[Define to 1 if you have the 'getea' function.]) + AC_DEFINE([HAVE_SETEA],1,[Define to 1 if you have the 'setea' function.]) + ] + ) + fi + fi + + dnl + dnl If we failed to find AIX support try the TRU64 implementation of extented attributes + dnl when running on a TRU64 OS. + dnl + if test $have_xattr = no -a \ + x${HAVE_OSF1_OS_TRUE} = x; then + AC_CHECK_HEADER(sys/proplist.h, [ AC_DEFINE(HAVE_SYS_PROPLIST_H,1,[Defines if your system have the sys/proplist.h header file])] , ) + AC_CHECK_FUNCS(getproplist get_proplist_entry sizeof_proplist_entry add_proplist_entry setproplist, + [ + have_xattr=yes + AC_DEFINE([HAVE_GETPROPLIST],1,[Define to 1 if you have the 'getproplist' function.]) + AC_DEFINE([HAVE_GET_PROPLIST_ENTRY],1,[Define to 1 if you have the 'get_proplist_entry' function.]) + AC_DEFINE([HAVE_SIZEOF_PROPLIST_ENTRY],1,[Define to 1 if you have the 'sizeof_proplist_entry' function.]) + AC_DEFINE([HAVE_ADD_PROPLIST_ENTRY],1,[Define to 1 if you have the 'add_proplist_entry' function.]) + AC_DEFINE([HAVE_SETPROPLIST],1,[Define to 1 if you have the 'setproplist' function.]) + ] + ) + fi + + dnl + dnl If we failed to find TRU64 support try the SOLARIS implementation of extented and extensible attributes + dnl when running on a Solaris. + dnl + if test $have_xattr = no -a \ + x${HAVE_SUN_OS_TRUE} = x; then + AC_CHECK_HEADER(sys/attr.h, [ AC_DEFINE(HAVE_SYS_ATTR_H,1,[Defines if your system have the sys/attr.h header file])] , ) + AC_CHECK_HEADER(sys/nvpair.h, [ AC_DEFINE(HAVE_SYS_NVPAIR_H,1,[Defines if your system have the sys/nvpair.h header file])] , ) + AC_CHECK_HEADER(attr.h, [ AC_DEFINE(HAVE_ATTR_H,1,[Defines if your system have the attr.h header file])] , ) + + AC_CHECK_FUNCS(openat attropen unlinkat fchownat futimesat linkat, + [ + have_xattr=yes + AC_DEFINE([HAVE_OPENAT],1,[Define to 1 if you have the 'openat' function.]) + AC_DEFINE([HAVE_ATTROPEN],1,[Define to 1 if you have the 'attropen' function.]) + AC_DEFINE([HAVE_UNLINKAT],1,[Define to 1 if you have the 'unlinkat' function.]) + AC_DEFINE([HAVE_FCHOWNAT],1,[Define to 1 if you have the 'fchownat' function.]) + AC_DEFINE([HAVE_FUTIMESAT],1,[Define to 1 if you have the 'futimesat' function.]) + AC_DEFINE([HAVE_LINKAT],1,[Define to 1 if you have the 'linkat' function.]) + ] + ) + + if test $have_xattr = yes; then + AC_CHECK_LIB(nvpair, nvlist_next_nvpair, + [ + AC_DEFINE([HAVE_NVLIST_NEXT_NVPAIR],1,[Define to 1 if you have the 'nvlist_next_nvpair' function.]) + FDLIBS="-lnvpair $FDLIBS" + ] + ) + fi + fi + + dnl + dnl If we failed to find Solaris support try the generic xattr support code + dnl + if test $have_xattr = no; then + AC_CHECK_HEADER(sys/xattr.h, [ AC_DEFINE(HAVE_SYS_XATTR_H,1,[Defines if your system have the sys/xattr.h header file])] , ) + AC_CHECK_FUNCS(llistxattr lgetxattr lsetxattr, + [ + have_xattr=yes + AC_DEFINE([HAVE_LLISTXATTR],1,[Define to 1 if you have the 'llistxattr' function.]) + AC_DEFINE([HAVE_LGETXATTR],1,[Define to 1 if you have the 'lgetxattr' function.]) + AC_DEFINE([HAVE_LSETXATTR],1,[Define to 1 if you have the 'lsetxattr' function.]) + ] + ) + + if test $have_xattr = no; then + AC_CHECK_FUNCS(listxattr getxattr setxattr, + [ + have_xattr=yes + AC_DEFINE([HAVE_LISTXATTR],1,[Define to 1 if you have the 'listxattr' function.]) + AC_DEFINE([HAVE_GETXATTR],1,[Define to 1 if you have the 'getxattr' function.]) + AC_DEFINE([HAVE_SETXATTR],1,[Define to 1 if you have the 'setxattr' function.]) + ] + ) + fi + fi + +XATTROBJS= + if test x$support_xattr = xyes -a $have_xattr != yes; then + AC_MSG_ERROR([xattr support explicitly enabled but no supported xattr implementation found, + please either load the xattr libraries or rerun configure without --enable-xattr]) + else + if test $have_xattr = yes; then + AC_DEFINE([HAVE_XATTR],1,[Extended Attributes support]) + dnl + dnl Prepare obj building + dnl + if test x${HAVE_LINUX_OS_TRUE} = x; then + XATTROBJS="bxattr.c bxattr_linux.c" + fi + if test x${HAVE_SUN_OS_TRUE} = x; then + XATTROBJS="bxattr.c bxattr_solaris.c" + fi + if test x${HAVE_FREEBSD_OS_TRUE} = x; then + XATTROBJS="bxattr.c bxattr_freebsd.c" + fi + if test x${HAVE_DARWIN_OS_TRUE} = x; then + XATTROBJS="bxattr.c bxattr_osx.c" + fi + fi + fi +fi +AC_SUBST(XATTROBJS) + +dnl +dnl Check for pthread libraries +dnl +PTHREAD_LIB="" +AC_CHECK_LIB(pthread, pthread_create, PTHREAD_LIB="-lpthread", + [ + AC_CHECK_LIB(pthreads, pthread_create, PTHREAD_LIB="-lpthreads", + [ + AC_CHECK_LIB(c_r, pthread_create, PTHREAD_LIB="-lc_r", + [ + AC_CHECK_FUNC(pthread_create) + ] + ) + ] + ) + ] +) + +dnl +dnl Check for headers, functions and libraries required to support +dnl keeping readall capabilities +dnl +AC_CHECK_HEADERS(sys/prctl.h sys/capability.h) +AC_CHECK_FUNCS(prctl setreuid) +AC_CHECK_LIB([cap], [cap_set_proc], [CAP_LIBS="-lcap"], [CAP_LIBS=]) +if test x$CAP_LIBS = x-lcap; then + AC_DEFINE(HAVE_LIBCAP, 1, [Define if you have libcap]) +fi +AC_SUBST(CAP_LIBS) + +AC_SUBST(FDLIBS) +AC_DEFINE(FDLIBS) + +CFLAGS=${CFLAGS--O} + +if test x$have_gcc = xyes ; then + CPPFLAGS="$CPPFLAGS -x c++ -fno-strict-aliasing -fno-exceptions -fno-rtti" + CFLAGS="$CFLAGS -x c++ -fno-strict-aliasing -fno-exceptions -fno-rtti" +fi +LDFLAGS=${LDFLAGS--O} +CPPFLAGS="$CPPFLAGS" +CFLAGS="$CFLAGS" +AC_SUBST(DEBUG) +AC_SUBST(DINCLUDE) +AC_SUBST(CFLAGS) +AC_SUBST(CPPFLAGS) +AC_SUBST(LDFLAGS) +AC_SUBST(X_CFLAGS) +AC_SUBST(DEFS) +AC_SUBST(LIBS) +AC_SUBST(DLIB) +AC_SUBST(X_LIBS) +AC_SUBST(X_EXTRA_LIBS) +AC_SUBST(WCFLAGS) +AC_SUBST(WLDFLAGS) +AC_SUBST(WRAPLIBS) + +dnl extra configurable objects +OBJLIST= +AC_SUBST(OBJLIST) + +lld="lld" +llu="llu" + +WCFLAGS= +WLDFLAGS= + +dnl +dnl Finally we set appropriate distribution specific +dnl variables and defaults +dnl +PSCMD="ps -e" +WIN32= +MACOSX= +COMPRESS_MANPAGES=yes + +case "$DISTNAME" in +aix) + DISTVER=`uname -r` + PSCMD="ps -e -o pid,comm" + PFILES="${PFILES} platforms/aix/Makefile" + TAPEDRIVE="/dev/rmt0.1" + ;; +alpha) + DISTVER=`uname -r` + PTHREAD_LIB="-lpthread -lexc" + if test "${CC}" = "gcc" ; then + lld="lld" + llu="llu" + else + lld="ld" + llu="lu" + fi + TAPEDRIVE="/dev/nrmt0" + ;; +bsdi) + DISTVER=`uname -a |awk '{print $3}'` + TAPEDRIVE="/dev/nrmt0" + PTHREAD_LIB="-pthread" + CFLAGS="${CFLAGS} -pthread" + PSCMD="ps -ax -o pid,command" + lld="qd" + llu="qu" + PFILES="${PFILES} \ + platforms/bsdi/Makefile \ + platforms/bsdi/bacula-fd \ + platforms/bsdi/bacula-sd \ + platforms/bsdi/bacula-dir" + largefile_support="yes" + ;; +cygwin) + DISTVER=`uname -a |awk '{print $3}'` + TAPEDRIVE="/dev/nrst0" + WIN32=win32 + WCFLAGS="-mwindows" + WLDFLAGS="-mwindows" + ;; +darwin) + DISTVER=`uname -r` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + MACOSX=macosx + PFILES="${PFILES} \ + platforms/darwin/Makefile" + ;; +osx) + DISTVER=`uname -r` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + MACOSX=macosx + PFILES="${PFILES} \ + platforms/osx/Makefile" + ;; +debian) + if `test -f /etc/apt/sources.list && grep -q ubuntu /etc/apt/sources.list`; then + DISTNAME="ubuntu" + fi + DISTVER=`cat /etc/debian_version` + if test -f /etc/lsb-release ; then + . /etc/lsb-release + if test "x$DISTRIB_ID" != "x" ; then + DISTNAME=$DISTRIB_ID + fi + if test "x$DISTRIB_RELEASE" != "x" ; then + DISTVER=$DISTRIB_RELEASE + fi + fi + if test "$DISTNAME" = "Ubuntu" ; then + DISTNAME="ubuntu" + fi + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + if test "$DISTNAME" = "ubuntu" ; then + PFILES="${PFILES} \ + platforms/ubuntu/Makefile \ + platforms/ubuntu/bacula-fd \ + platforms/ubuntu/bacula-sd \ + platforms/ubuntu/bacula-dir" + else + PFILES="${PFILES} \ + platforms/debian/Makefile \ + platforms/debian/bacula-fd \ + platforms/debian/bacula-sd \ + platforms/debian/bacula-dir" + fi + ;; +freebsd) + DISTVER=`uname -a |awk '{print $3}'` + VER=`echo $DISTVER | cut -c 1` + if test x$VER = x4 ; then + PTHREAD_LIB="${PTHREAD_LIBS:--pthread}" + CFLAGS="${CFLAGS} ${PTHREAD_CFLAGS:--pthread}" + fi + lld="qd" + llu="qu" + TAPEDRIVE="/dev/nrsa0" + PSCMD="ps -ax -o pid,command" + PFILES="${PFILES} \ + platforms/freebsd/Makefile \ + platforms/freebsd/bacula-fd \ + platforms/freebsd/bacula-sd \ + platforms/freebsd/bacula-dir" + largefile_support="yes" + ;; +hurd) + DISTVER=`uname -r` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/hurd/Makefile \ + platforms/hurd/bacula-fd \ + platforms/hurd/bacula-sd \ + platforms/hurd/bacula-dir" + ;; +hpux) + PSCMD="UNIX95=1; ps -e -o pid,comm" + CFLAGS="${CFLAGS} -D_XOPEN_SOURCE_EXTENDED=1" + DISTVER=`uname -r` + TAPEDRIVE="/dev/rmt/0hnb" + PTHREAD_LIB="-lpthread" + AC_DEFINE([_INCLUDE_LONGLONG]) + ;; +irix) + DISTVER=`uname -r` + TAPEDRIVE="/dev/rmt/0cbn" + PSCMD="ps -e -o pid,comm" + PFILES="${PFILES} \ + platforms/irix/Makefile \ + platforms/irix/bacula-fd \ + platforms/irix/bacula-sd \ + platforms/irix/bacula-dir" + ;; +netbsd) + DISTVER=`uname -a |awk '{print $3}'` + lld="qd" + llu="qu" + TAPEDRIVE="/dev/nrst0" + PSCMD="ps -ax -o pid,command" + PTHREAD_LIB="-pthread" + CFLAGS="${CFLAGS} -pthread" + ;; +openbsd) + DISTVER=`uname -a |awk '{print $3}'` + lld="qd" + llu="qu" + TAPEDRIVE="/dev/nrst0" + PSCMD="ps -ax -o pid,command" + PTHREAD_LIB="-pthread" + CFLAGS="${CFLAGS} -pthread" + PFILES="${PFILES} \ + platforms/openbsd/Makefile \ + platforms/openbsd/bacula-fd \ + platforms/openbsd/bacula-sd \ + platforms/openbsd/bacula-dir" + ;; +redhat) + if test -f /etc/whitebox-release ; then + f=/etc/whitebox-release + else + f=/etc/redhat-release + fi + if test `cat $f | grep release |\ + cut -f 3 -d ' '`x = "Enterprise"x ; then + DISTVER="Enterprise "`cat $f | grep release |\ + cut -f 6 -d ' '` + else + DISTVER=`cat /etc/redhat-release | grep release |\ + cut -f 5 -d ' '` + fi + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/redhat/Makefile \ + platforms/redhat/bacula-fd \ + platforms/redhat/bacula-sd \ + platforms/redhat/bacula-dir + " + ;; +mandrake) + DISTVER=`cat /etc/mandrake-release | grep release |\ + cut -f 5 -d ' '` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/mandrake/Makefile \ + platforms/mandrake/bacula-fd \ + platforms/mandrake/bacula-sd \ + platforms/mandrake/bacula-dir \ + " + ;; +gentoo) + DISTVER=`awk '/version / {print $5}' < /etc/gentoo-release` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/gentoo/Makefile \ + platforms/gentoo/bacula-init \ + platforms/gentoo/bacula-fd \ + platforms/gentoo/bacula-sd \ + platforms/gentoo/bacula-dir" + ;; +slackware) + DISTVER=`cat /etc/slackware-version` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/slackware/Makefile \ + platforms/slackware/rc.bacula-fd \ + platforms/slackware/rc.bacula-sd \ + platforms/slackware/rc.bacula-dir\ + platforms/slackware/functions.bacula" + ;; +solaris) + DISTVER=`uname -r` + TAPEDRIVE="/dev/rmt/0cbn" + PSCMD="ps -e -o pid,comm" + PFILES="${PFILES} \ + platforms/solaris/Makefile \ + platforms/solaris/bacula-fd \ + platforms/solaris/bacula-sd \ + platforms/solaris/bacula-dir" + COMPRESS_MANPAGES= + case ${DISTVER} in + 5.5|5.6) + AC_DEFINE(HAVE_OLD_SOCKOPT) + AC_DEFINE(USE_THR_SETCONCURRENCY) + ;; + 5.7|5.8) + AC_DEFINE(USE_THR_SETCONCURRENCY) + ;; + 5.10) + AC_DEFINE(HAVE_SOLARIS10, 1, [Set if building on Solaris 10]) + ;; + *) + ;; + esac + LIBS="$LIBS -lresolv -lrt" + ;; +suse) + DISTVER=`cat /etc/SuSE-release |grep VERSION|\ + cut -f 3 -d ' '` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/suse/Makefile \ + platforms/suse/bacula-fd \ + platforms/suse/bacula-sd \ + platforms/suse/bacula-dir \ + platforms/suse/bacula" + ;; +suse5) + DISTNAME=suse + DISTVER=5.x + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/suse/Makefile \ + platforms/suse/bacula-fd \ + platforms/suse/bacula-sd \ + platforms/suse/bacula-dir" + ;; +unknown) + DISTVER=unknown + TAPEDRIVE="/dev/nst0" + ;; +*) + echo " === Something went wrong. Unknown DISTNAME $DISTNAME ===" + ;; +esac + +dnl ------------------------------------------- +dnl systemd (default off) +dnl ------------------------------------------- +AC_MSG_CHECKING(for systemd support) +AC_ARG_WITH(systemd, + AC_HELP_STRING([--with-systemd@<:@=UNITDIR@:>@], [Include systemd support. UNITDIR is where systemd system .service files are located, default is to ask systemctl.]), + [ + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + SYSTEMD_UNITDIR="/lib/systemd/system" + else + dnl Use user supplied path + SYSTEMD_UNITDIR="${withval}" + fi + + PFILES="${PFILES} \ + platforms/systemd/Makefile \ + platforms/systemd/bacula.conf \ + platforms/systemd/bacula-dir.service \ + platforms/systemd/bacula-fd.service \ + platforms/systemd/bacula-sd.service" + AC_DEFINE(HAVE_SYSTEMD, 1, [Define to 1 if systemd support should be enabled]) + AC_MSG_RESULT(yes) + support_systemd="yes" + else + AC_MSG_RESULT(no) + support_systemd="no" + fi + ],[ + support_systemd="no" + AC_MSG_RESULT(no) + ] +) +AC_SUBST(SYSTEMD_UNITDIR) + +AC_SUBST(hostname) + +LIBS="$PTHREAD_LIB $LIBS" + +AC_DEFINE_UNQUOTED(lld, "$lld") +AC_DEFINE_UNQUOTED(llu, "$llu") +AC_SUBST(TAPEDRIVE) +AC_SUBST(PSCMD) +AC_SUBST(WIN32) +AC_SUBST(MACOSX) +AC_SUBST(DISTNAME) +AC_SUBST(DISTVER) +AC_SUBST(COMPRESS_MANPAGES) + +dnl common parts of the Makefile +MCOMMON=./autoconf/Make.common +AC_SUBST_FILE(MCOMMON) + +dnl Insanity check +if test "x${subsysdir}" = "x${sbindir}" ; then + echo " " + echo " " + echo "You have set both --sbindir and --with-subsys-dir" + echo " equal to: ${subsysdir} " + echo "This is not permitted. Please reconfigure." + echo " " + echo "Aborting configuration ..." + echo " " + echo " " + exit 1 +fi + + +AC_OUTPUT([autoconf/Make.common \ + Makefile \ + manpages/Makefile \ + scripts/btraceback \ + scripts/bconsole \ + scripts/baculabackupreport \ + scripts/bacula \ + scripts/bacula-ctl-dir \ + scripts/bacula-ctl-fd \ + scripts/bacula-ctl-sd \ + scripts/devel_bacula \ + scripts/Makefile \ + scripts/logrotate \ + scripts/mtx-changer \ + scripts/disk-changer \ + scripts/logwatch/Makefile \ + scripts/logwatch/logfile.bacula.conf \ + scripts/bat.desktop \ + scripts/bat.desktop.xsu \ + scripts/bat.desktop.consolehelper \ + scripts/bat.console_apps \ + scripts/bacula-tray-monitor.desktop \ + src/Makefile \ + src/host.h \ + src/console/Makefile \ + src/console/bconsole.conf \ + src/qt-console/bat.conf \ + src/qt-console/bat.pro \ + src/qt-console/bat.pro.mingw32 \ + src/qt-console/bat.pro.mingw64 \ + src/qt-console/install_conf_file \ + src/qt-console/tray-monitor/tray-monitor.conf \ + src/qt-console/tray-monitor/bacula-tray-monitor.conf \ + src/qt-console/tray-monitor/tray-monitor.pro \ + src/qt-console/tray-monitor/tray-monitor.pro.mingw32 \ + src/qt-console/tray-monitor/tray-monitor.pro.mingw64 \ + src/dird/Makefile \ + src/dird/bacula-dir.conf \ + src/lib/Makefile \ + src/stored/Makefile \ + src/stored/bacula-sd.conf \ + src/filed/Makefile \ + src/filed/bacula-fd.conf \ + src/cats/Makefile \ + src/cats/make_catalog_backup.pl \ + src/cats/make_catalog_backup \ + src/cats/delete_catalog_backup \ + src/cats/create_postgresql_database \ + src/cats/update_postgresql_tables \ + src/cats/make_postgresql_tables \ + src/cats/grant_postgresql_privileges \ + src/cats/drop_postgresql_tables \ + src/cats/drop_postgresql_database \ + src/cats/create_mysql_database \ + src/cats/update_mysql_tables \ + src/cats/make_mysql_tables \ + src/cats/grant_mysql_privileges \ + src/cats/drop_mysql_tables \ + src/cats/drop_mysql_database \ + src/cats/create_sqlite3_database \ + src/cats/update_sqlite3_tables \ + src/cats/make_sqlite3_tables \ + src/cats/grant_sqlite3_privileges \ + src/cats/drop_sqlite3_tables \ + src/cats/drop_sqlite3_database \ + src/cats/sqlite \ + src/cats/mysql \ + src/cats/create_bacula_database \ + src/cats/update_bacula_tables \ + src/cats/grant_bacula_privileges \ + src/cats/make_bacula_tables \ + src/cats/drop_bacula_tables \ + src/cats/drop_bacula_database \ + src/cats/install-default-backend \ + src/findlib/Makefile \ + src/tools/Makefile \ + src/plugins/fd/Makefile \ + src/plugins/sd/Makefile \ + src/plugins/dir/Makefile \ + po/Makefile.in \ + updatedb/update_mysql_tables \ + updatedb/update_sqlite3_tables \ + updatedb/update_postgresql_tables \ + updatedb/update_mysql_tables_9_to_10 \ + updatedb/update_sqlite3_tables_9_to_10 \ + updatedb/update_postgresql_tables_9_to_10 \ + updatedb/update_mysql_tables_10_to_11 \ + updatedb/update_sqlite3_tables_10_to_11 \ + updatedb/update_postgresql_tables_10_to_11 \ + updatedb/update_mysql_tables_11_to_12 \ + updatedb/update_sqlite3_tables_11_to_12 \ + updatedb/update_postgresql_tables_11_to_12 \ + examples/nagios/check_bacula/Makefile \ + platforms/rpms/redhat/bacula.spec \ + platforms/rpms/redhat/bacula-bat.spec \ + platforms/rpms/redhat/bacula-docs.spec \ + platforms/rpms/redhat/bacula-mtx.spec \ + platforms/rpms/suse/bacula.spec \ + platforms/rpms/suse/bacula-bat.spec \ + platforms/rpms/suse/bacula-docs.spec \ + platforms/rpms/suse/bacula-mtx.spec \ + $PFILES ], + [ ] +) + +if test "${support_bat}" = "yes" ; then + if test "x$QMAKE" = "xnone"; then + AC_MSG_ERROR([Could not find qmake $PATH. Check your Qt installation]) + fi + + cd src/qt-console + echo "Creating bat Makefile" + touch bat + chmod 755 bat + rm -f Makefile + rm -rf moc32 obj32 moc64 obj64 ui32 ui64 + $QMAKE + ${MAKE:-make} clean + + cd tray-monitor + echo "Creating tray-monitor Makefile" + rm -f Makefile + rm -rf moc32 obj32 moc64 obj64 ui32 ui64 + $QMAKE + ${MAKE:-make} clean + $QMAKE + ${MAKE:-make} clean + cd ${BUILD_DIR} +fi + +dnl +dnl if CC is gcc, we can rebuild the dependencies (since the depend rule +dnl requires gcc). If it's not, don't rebuild dependencies +dnl +if test X"$GCC" = "Xyes" ; then + echo "Doing make of dependencies" + ${MAKE:-make} depend +fi + +cd src/qt-console +chmod 755 install_conf_file build-depkgs-qt-console +cd ${BUILD_DIR} + +cd scripts +chmod 755 bacula btraceback mtx-changer +chmod 755 bconsole disk-changer devel_bacula logrotate +cd .. + +c=updatedb +chmod 755 $c/update_mysql_tables_10_to_11 $c/update_sqlite3_tables_10_to_11 +chmod 755 $c/update_postgresql_tables_10_to_11 +chmod 755 $c/update_mysql_tables_11_to_12 $c/update_sqlite3_tables_11_to_12 +chmod 755 $c/update_postgresql_tables_11_to_12 + + +c=src/cats + +chmod 755 $c/create_bacula_database $c/update_bacula_tables $c/make_bacula_tables +chmod 755 $c/grant_bacula_privileges $c/drop_bacula_tables $c/drop_bacula_database + +chmod 755 $c/create_mysql_database $c/update_mysql_tables $c/make_mysql_tables +chmod 755 $c/grant_mysql_privileges $c/drop_mysql_tables $c/drop_mysql_database + +chmod 755 $c/create_sqlite3_database $c/update_sqlite3_tables $c/make_sqlite3_tables +chmod 755 $c/grant_sqlite3_privileges $c/drop_sqlite3_tables $c/drop_sqlite3_database + +chmod 755 $c/create_postgresql_database $c/update_postgresql_tables $c/make_postgresql_tables +chmod 755 $c/grant_postgresql_privileges $c/drop_postgresql_tables $c/drop_postgresql_database + +chmod 755 $c/make_catalog_backup $c/delete_catalog_backup $c/make_catalog_backup.pl +chmod 755 $c/sqlite +chmod 755 $c/mysql + +chmod 755 $c/install-default-backend + +chmod 755 src/win32/build-depkgs-mingw32 src/win32/build-depkgs-mingw-w64 + +if test "x$ac_cv_sys_largefile_CFLAGS" != "xno" ; then + largefile_support="yes" +fi + +dnl Only try to find out the version number of the compiler when we know its some kind of GCC compiler +if test X"$GCC" = "Xyes" ; then + dnl + dnl A whole lot of hand springs to get the compiler version. + dnl This is because gcc changed the output in version 3.0 + dnl + CCVERSION=`${CC} --version | tr '\n' ' ' | cut -f 3 -d ' '` + if test "x${CCVERSION}" = "x" ; then + CCVERSION=`${CC} --version | tr '\n' ' ' | cut -f 1 -d ' '` + fi + CXXVERSION=`${CXX} --version | tr '\n' ' ' | cut -f 3 -d ' '` + if test x"${CXXVERSION}" = x ; then + CXXVERSION=`${CXX} --version | tr '\n' ' ' | cut -f 1 -d ' '` + fi +fi + +# clean up any old junk +echo " " +echo "Cleaning up" +echo " " +${MAKE:-make} clean + +echo " +Configuration on `date`: + + Host: ${host}${post_host} -- ${DISTNAME} ${DISTVER} + Bacula version: ${BACULA} ${VERSION} (${DATE}) + Source code location: ${srcdir} + Install binaries: ${sbindir} + Install libraries: ${libdir} + Install config files: ${sysconfdir} + Scripts directory: ${scriptdir} + Archive directory: ${archivedir} + Working directory: ${working_dir} + PID directory: ${piddir} + Subsys directory: ${subsysdir} + Man directory: ${mandir} + Data directory: ${datarootdir} + Plugin directory: ${plugindir} + C Compiler: ${CC} ${CCVERSION} + C++ Compiler: ${CXX} ${CXXVERSION} + Compiler flags: ${WCFLAGS} ${CFLAGS} + Linker flags: ${WLDFLAGS} ${LDFLAGS} + Libraries: ${LIBS} + Statically Linked Tools: ${support_static_tools} + Statically Linked FD: ${support_static_fd} + Statically Linked SD: ${support_static_sd} + Statically Linked DIR: ${support_static_dir} + Statically Linked CONS: ${support_static_cons} + Database backends: ${db_backends} + Database port: ${db_port} + Database name: ${db_name} + Database user: ${db_user} + Database SSL options: ${db_ssl_options} + + Job Output Email: ${job_email} + Traceback Email: ${dump_email} + SMTP Host Address: ${smtp_host} + + Director Port: ${dir_port} + File daemon Port: ${fd_port} + Storage daemon Port: ${sd_port} + + Director User: ${dir_user} + Director Group: ${dir_group} + Storage Daemon User: ${sd_user} + Storage DaemonGroup: ${sd_group} + File Daemon User: ${fd_user} + File Daemon Group: ${fd_group} + + Large file support: $largefile_support + Bacula conio support: ${got_conio} ${CONS_LIBS} + readline support: ${got_readline} ${PRTREADLINE_SRC} + TCP Wrappers support: ${TCPW_MSG} ${WRAPLIBS} + TLS support: ${support_tls} + Encryption support: ${support_crypto} + ZLIB support: ${have_zlib} + LZO support: ${have_lzo} + enable-smartalloc: ${support_smartalloc} + enable-lockmgr: ${support_lockmgr} + bat support: ${support_bat} + client-only: ${build_client_only} + build-dird: ${build_dird} + build-stored: ${build_stored} + Plugin support: ${have_plugins} + AFS support: ${have_afs} + ACL support: ${have_acl} + XATTR support: ${have_xattr} + systemd support: ${support_systemd} ${SYSTEMD_UNITDIR} + Batch insert enabled: ${batch_insert_db_backends} + + " > config.out + +# create a small shell script useful for support with +# configure options and config.out info +cat > scripts/bacula_config << EOF +#!/bin/sh +cat << __EOC__ +$ $0 $ac_configure_args +EOF +cat config.out >> scripts/bacula_config +echo __EOC__ >> scripts/bacula_config +chmod 755 scripts/bacula_config + +cat config.out diff --git a/autoconf/gettext-macros/codeset.m4 b/autoconf/gettext-macros/codeset.m4 new file mode 100644 index 00000000..a6e67ec4 --- /dev/null +++ b/autoconf/gettext-macros/codeset.m4 @@ -0,0 +1,21 @@ +# codeset.m4 serial AM1 (gettext-0.10.40) +dnl Copyright (C) 2000-2002 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. + +AC_DEFUN([AM_LANGINFO_CODESET], +[ + AC_CACHE_CHECK([for nl_langinfo and CODESET], am_cv_langinfo_codeset, + [AC_TRY_LINK([#include ], + [char* cs = nl_langinfo(CODESET);], + am_cv_langinfo_codeset=yes, + am_cv_langinfo_codeset=no) + ]) + if test $am_cv_langinfo_codeset = yes; then + AC_DEFINE(HAVE_LANGINFO_CODESET, 1, + [Define if you have and nl_langinfo(CODESET).]) + fi +]) diff --git a/autoconf/gettext-macros/gettext.m4 b/autoconf/gettext-macros/gettext.m4 new file mode 100644 index 00000000..08f057ce --- /dev/null +++ b/autoconf/gettext-macros/gettext.m4 @@ -0,0 +1,549 @@ +# gettext.m4 serial 37 (gettext-0.14.4) +dnl Copyright (C) 1995-2005 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +dnl +dnl This file can can be used in projects which are not available under +dnl the GNU General Public License or the GNU Library General Public +dnl License but which still want to provide support for the GNU gettext +dnl functionality. +dnl Please note that the actual code of the GNU gettext library is covered +dnl by the GNU Library General Public License, and the rest of the GNU +dnl gettext package package is covered by the GNU General Public License. +dnl They are *not* in the public domain. + +dnl Authors: +dnl Ulrich Drepper , 1995-2000. +dnl Bruno Haible , 2000-2003. + +dnl Macro to add for using GNU gettext. + +dnl Usage: AM_GNU_GETTEXT([INTLSYMBOL], [NEEDSYMBOL], [INTLDIR]). +dnl INTLSYMBOL can be one of 'external', 'no-libtool', 'use-libtool'. The +dnl default (if it is not specified or empty) is 'no-libtool'. +dnl INTLSYMBOL should be 'external' for packages with no intl directory, +dnl and 'no-libtool' or 'use-libtool' for packages with an intl directory. +dnl If INTLSYMBOL is 'use-libtool', then a libtool library +dnl $(top_builddir)/intl/libintl.la will be created (shared and/or static, +dnl depending on --{enable,disable}-{shared,static} and on the presence of +dnl AM-DISABLE-SHARED). If INTLSYMBOL is 'no-libtool', a static library +dnl $(top_builddir)/intl/libintl.a will be created. +dnl If NEEDSYMBOL is specified and is 'need-ngettext', then GNU gettext +dnl implementations (in libc or libintl) without the ngettext() function +dnl will be ignored. If NEEDSYMBOL is specified and is +dnl 'need-formatstring-macros', then GNU gettext implementations that don't +dnl support the ISO C 99 formatstring macros will be ignored. +dnl INTLDIR is used to find the intl libraries. If empty, +dnl the value `$(top_builddir)/intl/' is used. +dnl +dnl The result of the configuration is one of three cases: +dnl 1) GNU gettext, as included in the intl subdirectory, will be compiled +dnl and used. +dnl Catalog format: GNU --> install in $(datadir) +dnl Catalog extension: .mo after installation, .gmo in source tree +dnl 2) GNU gettext has been found in the system's C library. +dnl Catalog format: GNU --> install in $(datadir) +dnl Catalog extension: .mo after installation, .gmo in source tree +dnl 3) No internationalization, always use English msgid. +dnl Catalog format: none +dnl Catalog extension: none +dnl If INTLSYMBOL is 'external', only cases 2 and 3 can occur. +dnl The use of .gmo is historical (it was needed to avoid overwriting the +dnl GNU format catalogs when building on a platform with an X/Open gettext), +dnl but we keep it in order not to force irrelevant filename changes on the +dnl maintainers. +dnl +AC_DEFUN([AM_GNU_GETTEXT], +[ + dnl Argument checking. + ifelse([$1], [], , [ifelse([$1], [external], , [ifelse([$1], [no-libtool], , [ifelse([$1], [use-libtool], , + [errprint([ERROR: invalid first argument to AM_GNU_GETTEXT +])])])])]) + ifelse([$2], [], , [ifelse([$2], [need-ngettext], , [ifelse([$2], [need-formatstring-macros], , + [errprint([ERROR: invalid second argument to AM_GNU_GETTEXT +])])])]) + define([gt_included_intl], ifelse([$1], [external], [no], [yes])) + define([gt_libtool_suffix_prefix], ifelse([$1], [use-libtool], [l], [])) + + AC_REQUIRE([AM_PO_SUBDIRS])dnl + ifelse(gt_included_intl, yes, [ + AC_REQUIRE([AM_INTL_SUBDIR])dnl + ]) + + dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. + AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) + AC_REQUIRE([AC_LIB_RPATH]) + + dnl Sometimes libintl requires libiconv, so first search for libiconv. + dnl Ideally we would do this search only after the + dnl if test "$USE_NLS" = "yes"; then + dnl if test "$gt_cv_func_gnugettext_libc" != "yes"; then + dnl tests. But if configure.in invokes AM_ICONV after AM_GNU_GETTEXT + dnl the configure script would need to contain the same shell code + dnl again, outside any 'if'. There are two solutions: + dnl - Invoke AM_ICONV_LINKFLAGS_BODY here, outside any 'if'. + dnl - Control the expansions in more detail using AC_PROVIDE_IFELSE. + dnl Since AC_PROVIDE_IFELSE is only in autoconf >= 2.52 and not + dnl documented, we avoid it. + ifelse(gt_included_intl, yes, , [ + AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) + ]) + + dnl Sometimes, on MacOS X, libintl requires linking with CoreFoundation. + gt_INTL_MACOSX + + dnl Set USE_NLS. + AM_NLS + + ifelse(gt_included_intl, yes, [ + BUILD_INCLUDED_LIBINTL=no + USE_INCLUDED_LIBINTL=no + ]) + LIBINTL= + LTLIBINTL= + POSUB= + + dnl If we use NLS figure out what method + if test "$USE_NLS" = "yes"; then + gt_use_preinstalled_gnugettext=no + ifelse(gt_included_intl, yes, [ + AC_MSG_CHECKING([whether included gettext is requested]) + AC_ARG_WITH(included-gettext, + AC_HELP_STRING([--with-included-gettext], [use the GNU gettext library included here]), + nls_cv_force_use_gnu_gettext=$withval, + nls_cv_force_use_gnu_gettext=no) + AC_MSG_RESULT($nls_cv_force_use_gnu_gettext) + + nls_cv_use_gnu_gettext="$nls_cv_force_use_gnu_gettext" + if test "$nls_cv_force_use_gnu_gettext" != "yes"; then + ]) + dnl User does not insist on using GNU NLS library. Figure out what + dnl to use. If GNU gettext is available we use this. Else we have + dnl to fall back to GNU NLS library. + + dnl Add a version number to the cache macros. + define([gt_api_version], ifelse([$2], [need-formatstring-macros], 3, ifelse([$2], [need-ngettext], 2, 1))) + define([gt_cv_func_gnugettext_libc], [gt_cv_func_gnugettext]gt_api_version[_libc]) + define([gt_cv_func_gnugettext_libintl], [gt_cv_func_gnugettext]gt_api_version[_libintl]) + + AC_CACHE_CHECK([for GNU gettext in libc], gt_cv_func_gnugettext_libc, + [AC_TRY_LINK([#include +]ifelse([$2], [need-formatstring-macros], +[#ifndef __GNU_GETTEXT_SUPPORTED_REVISION +#define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) +#endif +changequote(,)dnl +typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; +changequote([,])dnl +], [])[extern int _nl_msg_cat_cntr; +extern int *_nl_domain_bindings;], + [bindtextdomain ("", ""); +return * gettext ("")]ifelse([$2], [need-ngettext], [ + * ngettext ("", "", 0)], [])[ + _nl_msg_cat_cntr + *_nl_domain_bindings], + gt_cv_func_gnugettext_libc=yes, + gt_cv_func_gnugettext_libc=no)]) + + if test "$gt_cv_func_gnugettext_libc" != "yes"; then + dnl Sometimes libintl requires libiconv, so first search for libiconv. + ifelse(gt_included_intl, yes, , [ + AM_ICONV_LINK + ]) + dnl Search for libintl and define LIBINTL, LTLIBINTL and INCINTL + dnl accordingly. Don't use AC_LIB_LINKFLAGS_BODY([intl],[iconv]) + dnl because that would add "-liconv" to LIBINTL and LTLIBINTL + dnl even if libiconv doesn't exist. + AC_LIB_LINKFLAGS_BODY([intl]) + AC_CACHE_CHECK([for GNU gettext in libintl], + gt_cv_func_gnugettext_libintl, + [gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $INCINTL" + gt_save_LIBS="$LIBS" + LIBS="$LIBS $LIBINTL" + dnl Now see whether libintl exists and does not depend on libiconv. + AC_TRY_LINK([#include +]ifelse([$2], [need-formatstring-macros], +[#ifndef __GNU_GETTEXT_SUPPORTED_REVISION +#define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) +#endif +changequote(,)dnl +typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; +changequote([,])dnl +], [])[extern int _nl_msg_cat_cntr; +extern +#ifdef __cplusplus +"C" +#endif +const char *_nl_expand_alias (const char *);], + [bindtextdomain ("", ""); +return * gettext ("")]ifelse([$2], [need-ngettext], [ + * ngettext ("", "", 0)], [])[ + _nl_msg_cat_cntr + *_nl_expand_alias ("")], + gt_cv_func_gnugettext_libintl=yes, + gt_cv_func_gnugettext_libintl=no) + dnl Now see whether libintl exists and depends on libiconv. + if test "$gt_cv_func_gnugettext_libintl" != yes && test -n "$LIBICONV"; then + LIBS="$LIBS $LIBICONV" + AC_TRY_LINK([#include +]ifelse([$2], [need-formatstring-macros], +[#ifndef __GNU_GETTEXT_SUPPORTED_REVISION +#define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) +#endif +changequote(,)dnl +typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; +changequote([,])dnl +], [])[extern int _nl_msg_cat_cntr; +extern +#ifdef __cplusplus +"C" +#endif +const char *_nl_expand_alias (const char *);], + [bindtextdomain ("", ""); +return * gettext ("")]ifelse([$2], [need-ngettext], [ + * ngettext ("", "", 0)], [])[ + _nl_msg_cat_cntr + *_nl_expand_alias ("")], + [LIBINTL="$LIBINTL $LIBICONV" + LTLIBINTL="$LTLIBINTL $LTLIBICONV" + gt_cv_func_gnugettext_libintl=yes + ]) + fi + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS"]) + fi + + dnl If an already present or preinstalled GNU gettext() is found, + dnl use it. But if this macro is used in GNU gettext, and GNU + dnl gettext is already preinstalled in libintl, we update this + dnl libintl. (Cf. the install rule in intl/Makefile.in.) + if test "$gt_cv_func_gnugettext_libc" = "yes" \ + || { test "$gt_cv_func_gnugettext_libintl" = "yes" \ + && test "$PACKAGE" != gettext-runtime \ + && test "$PACKAGE" != gettext-tools; }; then + gt_use_preinstalled_gnugettext=yes + else + dnl Reset the values set by searching for libintl. + LIBINTL= + LTLIBINTL= + INCINTL= + fi + + ifelse(gt_included_intl, yes, [ + if test "$gt_use_preinstalled_gnugettext" != "yes"; then + dnl GNU gettext is not found in the C library. + dnl Fall back on included GNU gettext library. + nls_cv_use_gnu_gettext=yes + fi + fi + + if test "$nls_cv_use_gnu_gettext" = "yes"; then + dnl Mark actions used to generate GNU NLS library. + BUILD_INCLUDED_LIBINTL=yes + USE_INCLUDED_LIBINTL=yes + LIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.[]gt_libtool_suffix_prefix[]a $LIBICONV" + LTLIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.[]gt_libtool_suffix_prefix[]a $LTLIBICONV" + LIBS=`echo " $LIBS " | sed -e 's/ -lintl / /' -e 's/^ //' -e 's/ $//'` + fi + + CATOBJEXT= + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + dnl Mark actions to use GNU gettext tools. + CATOBJEXT=.gmo + fi + ]) + + if test -n "$INTL_MACOSX_LIBS"; then + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + dnl Some extra flags are needed during linking. + LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" + LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" + fi + fi + + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + AC_DEFINE(ENABLE_NLS, 1, + [Define to 1 if translation of program messages to the user's native language + is requested.]) + else + USE_NLS=no + fi + fi + + AC_MSG_CHECKING([whether to use NLS]) + AC_MSG_RESULT([$USE_NLS]) + if test "$USE_NLS" = "yes"; then + AC_MSG_CHECKING([where the gettext function comes from]) + if test "$gt_use_preinstalled_gnugettext" = "yes"; then + if test "$gt_cv_func_gnugettext_libintl" = "yes"; then + gt_source="external libintl" + else + gt_source="libc" + fi + else + gt_source="included intl directory" + fi + AC_MSG_RESULT([$gt_source]) + fi + + if test "$USE_NLS" = "yes"; then + + if test "$gt_use_preinstalled_gnugettext" = "yes"; then + if test "$gt_cv_func_gnugettext_libintl" = "yes"; then + AC_MSG_CHECKING([how to link with libintl]) + AC_MSG_RESULT([$LIBINTL]) + AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCINTL]) + fi + + dnl For backward compatibility. Some packages may be using this. + AC_DEFINE(HAVE_GETTEXT, 1, + [Define if the GNU gettext() function is already present or preinstalled.]) + AC_DEFINE(HAVE_DCGETTEXT, 1, + [Define if the GNU dcgettext() function is already present or preinstalled.]) + fi + + dnl We need to process the po/ directory. + POSUB=po + fi + + ifelse(gt_included_intl, yes, [ + dnl If this is used in GNU gettext we have to set BUILD_INCLUDED_LIBINTL + dnl to 'yes' because some of the testsuite requires it. + if test "$PACKAGE" = gettext-runtime || test "$PACKAGE" = gettext-tools; then + BUILD_INCLUDED_LIBINTL=yes + fi + + dnl Make all variables we use known to autoconf. + AC_SUBST(BUILD_INCLUDED_LIBINTL) + AC_SUBST(USE_INCLUDED_LIBINTL) + AC_SUBST(CATOBJEXT) + + dnl For backward compatibility. Some configure.ins may be using this. + nls_cv_header_intl= + nls_cv_header_libgt= + + dnl For backward compatibility. Some Makefiles may be using this. + DATADIRNAME=share + AC_SUBST(DATADIRNAME) + + dnl For backward compatibility. Some Makefiles may be using this. + INSTOBJEXT=.mo + AC_SUBST(INSTOBJEXT) + + dnl For backward compatibility. Some Makefiles may be using this. + GENCAT=gencat + AC_SUBST(GENCAT) + + dnl For backward compatibility. Some Makefiles may be using this. + INTLOBJS= + if test "$USE_INCLUDED_LIBINTL" = yes; then + INTLOBJS="\$(GETTOBJS)" + fi + AC_SUBST(INTLOBJS) + + dnl Enable libtool support if the surrounding package wishes it. + INTL_LIBTOOL_SUFFIX_PREFIX=gt_libtool_suffix_prefix + AC_SUBST(INTL_LIBTOOL_SUFFIX_PREFIX) + ]) + + dnl For backward compatibility. Some Makefiles may be using this. + INTLLIBS="$LIBINTL" + AC_SUBST(INTLLIBS) + + dnl Make all documented variables known to autoconf. + AC_SUBST(LIBINTL) + AC_SUBST(LTLIBINTL) + AC_SUBST(POSUB) +]) + + +dnl Checks for all prerequisites of the intl subdirectory, +dnl except for INTL_LIBTOOL_SUFFIX_PREFIX (and possibly LIBTOOL), INTLOBJS, +dnl USE_INCLUDED_LIBINTL, BUILD_INCLUDED_LIBINTL. +AC_DEFUN([AM_INTL_SUBDIR], +[ + AC_REQUIRE([AC_PROG_INSTALL])dnl + AC_REQUIRE([AM_MKINSTALLDIRS])dnl + AC_REQUIRE([AC_PROG_CC])dnl + AC_REQUIRE([AC_CANONICAL_HOST])dnl + AC_REQUIRE([gt_GLIBC2])dnl + AC_REQUIRE([AC_PROG_RANLIB])dnl + AC_REQUIRE([AC_ISC_POSIX])dnl + AC_REQUIRE([AC_HEADER_STDC])dnl + AC_REQUIRE([AC_C_CONST])dnl + AC_REQUIRE([bh_C_SIGNED])dnl + AC_REQUIRE([AC_C_INLINE])dnl + AC_REQUIRE([AC_TYPE_OFF_T])dnl + AC_REQUIRE([AC_TYPE_SIZE_T])dnl + AC_REQUIRE([gl_AC_TYPE_LONG_LONG])dnl + AC_REQUIRE([gt_TYPE_LONGDOUBLE])dnl + AC_REQUIRE([gt_TYPE_WCHAR_T])dnl + AC_REQUIRE([gt_TYPE_WINT_T])dnl + AC_REQUIRE([gl_AC_HEADER_INTTYPES_H]) + AC_REQUIRE([gl_AC_HEADER_STDINT_H]) + AC_REQUIRE([gt_TYPE_INTMAX_T]) + AC_REQUIRE([gt_PRINTF_POSIX]) + AC_REQUIRE([AC_FUNC_ALLOCA])dnl + AC_REQUIRE([AC_FUNC_MMAP])dnl + AC_REQUIRE([gl_GLIBC21])dnl + AC_REQUIRE([gt_INTDIV0])dnl + AC_REQUIRE([gl_AC_TYPE_UINTMAX_T])dnl + AC_REQUIRE([gt_HEADER_INTTYPES_H])dnl + AC_REQUIRE([gt_INTTYPES_PRI])dnl + AC_REQUIRE([gl_XSIZE])dnl + AC_REQUIRE([gt_INTL_MACOSX])dnl + + AC_CHECK_TYPE([ptrdiff_t], , + [AC_DEFINE([ptrdiff_t], [long], + [Define as the type of the result of subtracting two pointers, if the system doesn't define it.]) + ]) + AC_CHECK_HEADERS([argz.h limits.h locale.h nl_types.h malloc.h stddef.h \ +stdlib.h string.h unistd.h sys/param.h]) + AC_CHECK_FUNCS([asprintf fwprintf getcwd getegid geteuid getgid getuid \ +mempcpy munmap putenv setenv setlocale snprintf stpcpy strcasecmp strdup \ +strtoul tsearch wcslen __argz_count __argz_stringify __argz_next \ +__fsetlocking]) + + dnl Use the _snprintf function only if it is declared (because on NetBSD it + dnl is defined as a weak alias of snprintf; we prefer to use the latter). + gt_CHECK_DECL(_snprintf, [#include ]) + gt_CHECK_DECL(_snwprintf, [#include ]) + + dnl Use the *_unlocked functions only if they are declared. + dnl (because some of them were defined without being declared in Solaris + dnl 2.5.1 but were removed in Solaris 2.6, whereas we want binaries built + dnl on Solaris 2.5.1 to run on Solaris 2.6). + dnl Don't use AC_CHECK_DECLS because it isn't supported in autoconf-2.13. + gt_CHECK_DECL(feof_unlocked, [#include ]) + gt_CHECK_DECL(fgets_unlocked, [#include ]) + gt_CHECK_DECL(getc_unlocked, [#include ]) + + case $gt_cv_func_printf_posix in + *yes) HAVE_POSIX_PRINTF=1 ;; + *) HAVE_POSIX_PRINTF=0 ;; + esac + AC_SUBST([HAVE_POSIX_PRINTF]) + if test "$ac_cv_func_asprintf" = yes; then + HAVE_ASPRINTF=1 + else + HAVE_ASPRINTF=0 + fi + AC_SUBST([HAVE_ASPRINTF]) + if test "$ac_cv_func_snprintf" = yes; then + HAVE_SNPRINTF=1 + else + HAVE_SNPRINTF=0 + fi + AC_SUBST([HAVE_SNPRINTF]) + if test "$ac_cv_func_wprintf" = yes; then + HAVE_WPRINTF=1 + else + HAVE_WPRINTF=0 + fi + AC_SUBST([HAVE_WPRINTF]) + + AM_ICONV + AM_LANGINFO_CODESET + if test $ac_cv_header_locale_h = yes; then + gt_LC_MESSAGES + fi + + if test -n "$INTL_MACOSX_LIBS"; then + CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" + fi + + dnl intl/plural.c is generated from intl/plural.y. It requires bison, + dnl because plural.y uses bison specific features. It requires at least + dnl bison-1.26 because earlier versions generate a plural.c that doesn't + dnl compile. + dnl bison is only needed for the maintainer (who touches plural.y). But in + dnl order to avoid separate Makefiles or --enable-maintainer-mode, we put + dnl the rule in general Makefile. Now, some people carelessly touch the + dnl files or have a broken "make" program, hence the plural.c rule will + dnl sometimes fire. To avoid an error, defines BISON to ":" if it is not + dnl present or too old. + AC_CHECK_PROGS([INTLBISON], [bison]) + if test -z "$INTLBISON"; then + ac_verc_fail=yes + else + dnl Found it, now check the version. + AC_MSG_CHECKING([version of bison]) +changequote(<<,>>)dnl + ac_prog_version=`$INTLBISON --version 2>&1 | sed -n 's/^.*GNU Bison.* \([0-9]*\.[0-9.]*\).*$/\1/p'` + case $ac_prog_version in + '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; + 1.2[6-9]* | 1.[3-9][0-9]* | [2-9].*) +changequote([,])dnl + ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; + *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; + esac + AC_MSG_RESULT([$ac_prog_version]) + fi + if test $ac_verc_fail = yes; then + INTLBISON=: + fi +]) + + +dnl Checks for special options needed on MacOS X. +dnl Defines INTL_MACOSX_LIBS. +AC_DEFUN([gt_INTL_MACOSX], +[ + dnl Check for API introduced in MacOS X 10.2. + AC_CACHE_CHECK([for CFPreferencesCopyAppValue], + gt_cv_func_CFPreferencesCopyAppValue, + [gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" + gt_save_LIBS="$LIBS" + LIBS="$LIBS -framework CoreFoundation" + AC_TRY_LINK([#include ], + [CFPreferencesCopyAppValue(NULL, NULL)], + [gt_cv_func_CFPreferencesCopyAppValue=yes], + [gt_cv_func_CFPreferencesCopyAppValue=no]) + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS"]) + if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then + AC_DEFINE([HAVE_CFPREFERENCESCOPYAPPVALUE], 1, + [Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in the CoreFoundation framework.]) + fi + dnl Check for API introduced in MacOS X 10.3. + AC_CACHE_CHECK([for CFLocaleCopyCurrent], gt_cv_func_CFLocaleCopyCurrent, + [gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" + gt_save_LIBS="$LIBS" + LIBS="$LIBS -framework CoreFoundation" + AC_TRY_LINK([#include ], [CFLocaleCopyCurrent();], + [gt_cv_func_CFLocaleCopyCurrent=yes], + [gt_cv_func_CFLocaleCopyCurrent=no]) + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS"]) + if test $gt_cv_func_CFLocaleCopyCurrent = yes; then + AC_DEFINE([HAVE_CFLOCALECOPYCURRENT], 1, + [Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the CoreFoundation framework.]) + fi + INTL_MACOSX_LIBS= + if test $gt_cv_func_CFPreferencesCopyAppValue = yes || test $gt_cv_func_CFLocaleCopyCurrent = yes; then + INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" + fi + AC_SUBST([INTL_MACOSX_LIBS]) +]) + + +dnl gt_CHECK_DECL(FUNC, INCLUDES) +dnl Check whether a function is declared. +AC_DEFUN([gt_CHECK_DECL], +[ + AC_CACHE_CHECK([whether $1 is declared], ac_cv_have_decl_$1, + [AC_TRY_COMPILE([$2], [ +#ifndef $1 + char *p = (char *) $1; +#endif +], ac_cv_have_decl_$1=yes, ac_cv_have_decl_$1=no)]) + if test $ac_cv_have_decl_$1 = yes; then + gt_value=1 + else + gt_value=0 + fi + AC_DEFINE_UNQUOTED([HAVE_DECL_]translit($1, [a-z], [A-Z]), [$gt_value], + [Define to 1 if you have the declaration of `$1', and to 0 if you don't.]) +]) + + +dnl Usage: AM_GNU_GETTEXT_VERSION([gettext-version]) +AC_DEFUN([AM_GNU_GETTEXT_VERSION], []) diff --git a/autoconf/gettext-macros/glibc2.m4 b/autoconf/gettext-macros/glibc2.m4 new file mode 100644 index 00000000..e8f5bfe6 --- /dev/null +++ b/autoconf/gettext-macros/glibc2.m4 @@ -0,0 +1,30 @@ +# glibc2.m4 serial 1 +dnl Copyright (C) 2000-2002, 2004 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +# Test for the GNU C Library, version 2.0 or newer. +# From Bruno Haible. + +AC_DEFUN([gt_GLIBC2], + [ + AC_CACHE_CHECK(whether we are using the GNU C Library 2 or newer, + ac_cv_gnu_library_2, + [AC_EGREP_CPP([Lucky GNU user], + [ +#include +#ifdef __GNU_LIBRARY__ + #if (__GLIBC__ >= 2) + Lucky GNU user + #endif +#endif + ], + ac_cv_gnu_library_2=yes, + ac_cv_gnu_library_2=no) + ] + ) + AC_SUBST(GLIBC2) + GLIBC2="$ac_cv_gnu_library_2" + ] +) diff --git a/autoconf/gettext-macros/glibc21.m4 b/autoconf/gettext-macros/glibc21.m4 new file mode 100644 index 00000000..d95fd986 --- /dev/null +++ b/autoconf/gettext-macros/glibc21.m4 @@ -0,0 +1,30 @@ +# glibc21.m4 serial 3 +dnl Copyright (C) 2000-2002, 2004 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +# Test for the GNU C Library, version 2.1 or newer. +# From Bruno Haible. + +AC_DEFUN([gl_GLIBC21], + [ + AC_CACHE_CHECK(whether we are using the GNU C Library 2.1 or newer, + ac_cv_gnu_library_2_1, + [AC_EGREP_CPP([Lucky GNU user], + [ +#include +#ifdef __GNU_LIBRARY__ + #if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1) || (__GLIBC__ > 2) + Lucky GNU user + #endif +#endif + ], + ac_cv_gnu_library_2_1=yes, + ac_cv_gnu_library_2_1=no) + ] + ) + AC_SUBST(GLIBC21) + GLIBC21="$ac_cv_gnu_library_2_1" + ] +) diff --git a/autoconf/gettext-macros/iconv.m4 b/autoconf/gettext-macros/iconv.m4 new file mode 100644 index 00000000..bb2f5131 --- /dev/null +++ b/autoconf/gettext-macros/iconv.m4 @@ -0,0 +1,101 @@ +# iconv.m4 serial AM4 (gettext-0.11.3) +dnl Copyright (C) 2000-2002 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. + +AC_DEFUN([AM_ICONV_LINKFLAGS_BODY], +[ + dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. + AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) + AC_REQUIRE([AC_LIB_RPATH]) + + dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV + dnl accordingly. + AC_LIB_LINKFLAGS_BODY([iconv]) +]) + +AC_DEFUN([AM_ICONV_LINK], +[ + dnl Some systems have iconv in libc, some have it in libiconv (OSF/1 and + dnl those with the standalone portable GNU libiconv installed). + + dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV + dnl accordingly. + AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) + + dnl Add $INCICONV to CPPFLAGS before performing the following checks, + dnl because if the user has installed libiconv and not disabled its use + dnl via --without-libiconv-prefix, he wants to use it. The first + dnl AC_TRY_LINK will then fail, the second AC_TRY_LINK will succeed. + am_save_CPPFLAGS="$CPPFLAGS" + AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCICONV]) + + AC_CACHE_CHECK(for iconv, am_cv_func_iconv, [ + am_cv_func_iconv="no, consider installing GNU libiconv" + am_cv_lib_iconv=no + AC_TRY_LINK([#include +#include ], + [iconv_t cd = iconv_open("",""); + iconv(cd,NULL,NULL,NULL,NULL); + iconv_close(cd);], + am_cv_func_iconv=yes) + if test "$am_cv_func_iconv" != yes; then + am_save_LIBS="$LIBS" + LIBS="$LIBS $LIBICONV" + AC_TRY_LINK([#include +#include ], + [iconv_t cd = iconv_open("",""); + iconv(cd,NULL,NULL,NULL,NULL); + iconv_close(cd);], + am_cv_lib_iconv=yes + am_cv_func_iconv=yes) + LIBS="$am_save_LIBS" + fi + ]) + if test "$am_cv_func_iconv" = yes; then + AC_DEFINE(HAVE_ICONV, 1, [Define to 1 if you have the `iconv()' function.]) + fi + if test "$am_cv_lib_iconv" = yes; then + AC_MSG_CHECKING([how to link with libiconv]) + AC_MSG_RESULT([$LIBICONV]) + else + dnl If $LIBICONV didn't lead to a usable library, we don't need $INCICONV + dnl either. + CPPFLAGS="$am_save_CPPFLAGS" + LIBICONV= + LTLIBICONV= + fi + AC_SUBST(LIBICONV) + AC_SUBST(LTLIBICONV) +]) + +AC_DEFUN([AM_ICONV], +[ + AM_ICONV_LINK + if test "$am_cv_func_iconv" = yes; then + AC_MSG_CHECKING([for iconv declaration]) + AC_CACHE_VAL(am_cv_proto_iconv, [ + AC_TRY_COMPILE([ +#include +#include +extern +#ifdef __cplusplus +"C" +#endif +#if defined(__STDC__) || defined(__cplusplus) +size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); +#else +size_t iconv(); +#endif +], [], am_cv_proto_iconv_arg1="", am_cv_proto_iconv_arg1="const") + am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);"]) + am_cv_proto_iconv=`echo "[$]am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` + AC_MSG_RESULT([$]{ac_t:- + }[$]am_cv_proto_iconv) + AC_DEFINE_UNQUOTED(ICONV_CONST, $am_cv_proto_iconv_arg1, + [Define as const if the declaration of iconv() needs const.]) + fi +]) diff --git a/autoconf/gettext-macros/intdiv0.m4 b/autoconf/gettext-macros/intdiv0.m4 new file mode 100644 index 00000000..b8d78176 --- /dev/null +++ b/autoconf/gettext-macros/intdiv0.m4 @@ -0,0 +1,70 @@ +# intdiv0.m4 serial 1 (gettext-0.11.3) +dnl Copyright (C) 2002 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. + +AC_DEFUN([gt_INTDIV0], +[ + AC_REQUIRE([AC_PROG_CC])dnl + AC_REQUIRE([AC_CANONICAL_HOST])dnl + + AC_CACHE_CHECK([whether integer division by zero raises SIGFPE], + gt_cv_int_divbyzero_sigfpe, + [ + AC_TRY_RUN([ +#include +#include + +static void +#ifdef __cplusplus +sigfpe_handler (int sig) +#else +sigfpe_handler (sig) int sig; +#endif +{ + /* Exit with code 0 if SIGFPE, with code 1 if any other signal. */ + exit (sig != SIGFPE); +} + +int x = 1; +int y = 0; +int z; +int nan; + +int main () +{ + signal (SIGFPE, sigfpe_handler); +/* IRIX and AIX (when "xlc -qcheck" is used) yield signal SIGTRAP. */ +#if (defined (__sgi) || defined (_AIX)) && defined (SIGTRAP) + signal (SIGTRAP, sigfpe_handler); +#endif +/* Linux/SPARC yields signal SIGILL. */ +#if defined (__sparc__) && defined (__linux__) + signal (SIGILL, sigfpe_handler); +#endif + + z = x / y; + nan = y / y; + exit (1); +} +], gt_cv_int_divbyzero_sigfpe=yes, gt_cv_int_divbyzero_sigfpe=no, + [ + # Guess based on the CPU. + case "$host_cpu" in + alpha* | i[34567]86 | m68k | s390*) + gt_cv_int_divbyzero_sigfpe="guessing yes";; + *) + gt_cv_int_divbyzero_sigfpe="guessing no";; + esac + ]) + ]) + case "$gt_cv_int_divbyzero_sigfpe" in + *yes) value=1;; + *) value=0;; + esac + AC_DEFINE_UNQUOTED(INTDIV0_RAISES_SIGFPE, $value, + [Define if integer division by zero raises signal SIGFPE.]) +]) diff --git a/autoconf/gettext-macros/intmax.m4 b/autoconf/gettext-macros/intmax.m4 new file mode 100644 index 00000000..d99c999f --- /dev/null +++ b/autoconf/gettext-macros/intmax.m4 @@ -0,0 +1,30 @@ +# intmax.m4 serial 2 (gettext-0.14.2) +dnl Copyright (C) 2002-2005 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. +dnl Test whether the system has the 'intmax_t' type, but don't attempt to +dnl find a replacement if it is lacking. + +AC_DEFUN([gt_TYPE_INTMAX_T], +[ + AC_REQUIRE([gl_AC_HEADER_INTTYPES_H]) + AC_REQUIRE([gl_AC_HEADER_STDINT_H]) + AC_CACHE_CHECK(for intmax_t, gt_cv_c_intmax_t, + [AC_TRY_COMPILE([ +#include +#include +#if HAVE_STDINT_H_WITH_UINTMAX +#include +#endif +#if HAVE_INTTYPES_H_WITH_UINTMAX +#include +#endif +], [intmax_t x = -1;], gt_cv_c_intmax_t=yes, gt_cv_c_intmax_t=no)]) + if test $gt_cv_c_intmax_t = yes; then + AC_DEFINE(HAVE_INTMAX_T, 1, + [Define if you have the 'intmax_t' type in or .]) + fi +]) diff --git a/autoconf/gettext-macros/inttypes-pri.m4 b/autoconf/gettext-macros/inttypes-pri.m4 new file mode 100644 index 00000000..4d56a9ad --- /dev/null +++ b/autoconf/gettext-macros/inttypes-pri.m4 @@ -0,0 +1,30 @@ +# inttypes-pri.m4 serial 1 (gettext-0.11.4) +dnl Copyright (C) 1997-2002 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. + +# Define PRI_MACROS_BROKEN if exists and defines the PRI* +# macros to non-string values. This is the case on AIX 4.3.3. + +AC_DEFUN([gt_INTTYPES_PRI], +[ + AC_REQUIRE([gt_HEADER_INTTYPES_H]) + if test $gt_cv_header_inttypes_h = yes; then + AC_CACHE_CHECK([whether the inttypes.h PRIxNN macros are broken], + gt_cv_inttypes_pri_broken, + [ + AC_TRY_COMPILE([#include +#ifdef PRId32 +char *p = PRId32; +#endif +], [], gt_cv_inttypes_pri_broken=no, gt_cv_inttypes_pri_broken=yes) + ]) + fi + if test "$gt_cv_inttypes_pri_broken" = yes; then + AC_DEFINE_UNQUOTED(PRI_MACROS_BROKEN, 1, + [Define if exists and defines unusable PRI* macros.]) + fi +]) diff --git a/autoconf/gettext-macros/inttypes.m4 b/autoconf/gettext-macros/inttypes.m4 new file mode 100644 index 00000000..779bcea0 --- /dev/null +++ b/autoconf/gettext-macros/inttypes.m4 @@ -0,0 +1,25 @@ +# inttypes.m4 serial 1 (gettext-0.11.4) +dnl Copyright (C) 1997-2002 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Paul Eggert. + +# Define HAVE_INTTYPES_H if exists and doesn't clash with +# . + +AC_DEFUN([gt_HEADER_INTTYPES_H], +[ + AC_CACHE_CHECK([for inttypes.h], gt_cv_header_inttypes_h, + [ + AC_TRY_COMPILE( + [#include +#include ], + [], gt_cv_header_inttypes_h=yes, gt_cv_header_inttypes_h=no) + ]) + if test $gt_cv_header_inttypes_h = yes; then + AC_DEFINE_UNQUOTED(HAVE_INTTYPES_H, 1, + [Define if exists and doesn't clash with .]) + fi +]) diff --git a/autoconf/gettext-macros/inttypes_h.m4 b/autoconf/gettext-macros/inttypes_h.m4 new file mode 100644 index 00000000..a5d075d9 --- /dev/null +++ b/autoconf/gettext-macros/inttypes_h.m4 @@ -0,0 +1,26 @@ +# inttypes_h.m4 serial 6 +dnl Copyright (C) 1997-2004 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Paul Eggert. + +# Define HAVE_INTTYPES_H_WITH_UINTMAX if exists, +# doesn't clash with , and declares uintmax_t. + +AC_DEFUN([gl_AC_HEADER_INTTYPES_H], +[ + AC_CACHE_CHECK([for inttypes.h], gl_cv_header_inttypes_h, + [AC_TRY_COMPILE( + [#include +#include ], + [uintmax_t i = (uintmax_t) -1;], + gl_cv_header_inttypes_h=yes, + gl_cv_header_inttypes_h=no)]) + if test $gl_cv_header_inttypes_h = yes; then + AC_DEFINE_UNQUOTED(HAVE_INTTYPES_H_WITH_UINTMAX, 1, + [Define if exists, doesn't clash with , + and declares uintmax_t. ]) + fi +]) diff --git a/autoconf/gettext-macros/isc-posix.m4 b/autoconf/gettext-macros/isc-posix.m4 new file mode 100644 index 00000000..74dc8f26 --- /dev/null +++ b/autoconf/gettext-macros/isc-posix.m4 @@ -0,0 +1,24 @@ +# isc-posix.m4 serial 2 (gettext-0.11.2) +dnl Copyright (C) 1995-2002 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +# This file is not needed with autoconf-2.53 and newer. Remove it in 2005. + +# This test replaces the one in autoconf. +# Currently this macro should have the same name as the autoconf macro +# because gettext's gettext.m4 (distributed in the automake package) +# still uses it. Otherwise, the use in gettext.m4 makes autoheader +# give these diagnostics: +# configure.in:556: AC_TRY_COMPILE was called before AC_ISC_POSIX +# configure.in:556: AC_TRY_RUN was called before AC_ISC_POSIX + +undefine([AC_ISC_POSIX]) + +AC_DEFUN([AC_ISC_POSIX], + [ + dnl This test replaces the obsolescent AC_ISC_POSIX kludge. + AC_CHECK_LIB(cposix, strerror, [LIBS="$LIBS -lcposix"]) + ] +) diff --git a/autoconf/gettext-macros/lcmessage.m4 b/autoconf/gettext-macros/lcmessage.m4 new file mode 100644 index 00000000..19aa77e4 --- /dev/null +++ b/autoconf/gettext-macros/lcmessage.m4 @@ -0,0 +1,30 @@ +# lcmessage.m4 serial 4 (gettext-0.14.2) +dnl Copyright (C) 1995-2002, 2004-2005 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +dnl +dnl This file can can be used in projects which are not available under +dnl the GNU General Public License or the GNU Library General Public +dnl License but which still want to provide support for the GNU gettext +dnl functionality. +dnl Please note that the actual code of the GNU gettext library is covered +dnl by the GNU Library General Public License, and the rest of the GNU +dnl gettext package package is covered by the GNU General Public License. +dnl They are *not* in the public domain. + +dnl Authors: +dnl Ulrich Drepper , 1995. + +# Check whether LC_MESSAGES is available in . + +AC_DEFUN([gt_LC_MESSAGES], +[ + AC_CACHE_CHECK([for LC_MESSAGES], gt_cv_val_LC_MESSAGES, + [AC_TRY_LINK([#include ], [return LC_MESSAGES], + gt_cv_val_LC_MESSAGES=yes, gt_cv_val_LC_MESSAGES=no)]) + if test $gt_cv_val_LC_MESSAGES = yes; then + AC_DEFINE(HAVE_LC_MESSAGES, 1, + [Define if your file defines LC_MESSAGES.]) + fi +]) diff --git a/autoconf/gettext-macros/lib-ld.m4 b/autoconf/gettext-macros/lib-ld.m4 new file mode 100644 index 00000000..96c4e2c3 --- /dev/null +++ b/autoconf/gettext-macros/lib-ld.m4 @@ -0,0 +1,110 @@ +# lib-ld.m4 serial 3 (gettext-0.13) +dnl Copyright (C) 1996-2003 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl Subroutines of libtool.m4, +dnl with replacements s/AC_/AC_LIB/ and s/lt_cv/acl_cv/ to avoid collision +dnl with libtool.m4. + +dnl From libtool-1.4. Sets the variable with_gnu_ld to yes or no. +AC_DEFUN([AC_LIB_PROG_LD_GNU], +[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], acl_cv_prog_gnu_ld, +[# I'd rather use --version here, but apparently some GNU ld's only accept -v. +case `$LD -v 2>&1 conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by GCC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]* | [A-Za-z]:[\\/]*)] + [re_direlt='/[^/][^/]*/\.\./'] + # Canonicalize the path of ld + ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(acl_cv_path_LD, +[if test -z "$LD"; then + IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}" + for ac_dir in $PATH; do + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + acl_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some GNU ld's only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$acl_cv_path_LD" -v 2>&1 < /dev/null` in + *GNU* | *'with BFD'*) + test "$with_gnu_ld" != no && break ;; + *) + test "$with_gnu_ld" != yes && break ;; + esac + fi + done + IFS="$ac_save_ifs" +else + acl_cv_path_LD="$LD" # Let the user override the test with a path. +fi]) +LD="$acl_cv_path_LD" +if test -n "$LD"; then + AC_MSG_RESULT($LD) +else + AC_MSG_RESULT(no) +fi +test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH]) +AC_LIB_PROG_LD_GNU +]) diff --git a/autoconf/gettext-macros/lib-link.m4 b/autoconf/gettext-macros/lib-link.m4 new file mode 100644 index 00000000..ea0b0c4e --- /dev/null +++ b/autoconf/gettext-macros/lib-link.m4 @@ -0,0 +1,553 @@ +# lib-link.m4 serial 6 (gettext-0.14.3) +dnl Copyright (C) 2001-2005 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. + +AC_PREREQ(2.50) + +dnl AC_LIB_LINKFLAGS(name [, dependencies]) searches for libname and +dnl the libraries corresponding to explicit and implicit dependencies. +dnl Sets and AC_SUBSTs the LIB${NAME} and LTLIB${NAME} variables and +dnl augments the CPPFLAGS variable. +AC_DEFUN([AC_LIB_LINKFLAGS], +[ + AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) + AC_REQUIRE([AC_LIB_RPATH]) + define([Name],[translit([$1],[./-], [___])]) + define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])]) + AC_CACHE_CHECK([how to link with lib[]$1], [ac_cv_lib[]Name[]_libs], [ + AC_LIB_LINKFLAGS_BODY([$1], [$2]) + ac_cv_lib[]Name[]_libs="$LIB[]NAME" + ac_cv_lib[]Name[]_ltlibs="$LTLIB[]NAME" + ac_cv_lib[]Name[]_cppflags="$INC[]NAME" + ]) + LIB[]NAME="$ac_cv_lib[]Name[]_libs" + LTLIB[]NAME="$ac_cv_lib[]Name[]_ltlibs" + INC[]NAME="$ac_cv_lib[]Name[]_cppflags" + AC_LIB_APPENDTOVAR([CPPFLAGS], [$INC]NAME) + AC_SUBST([LIB]NAME) + AC_SUBST([LTLIB]NAME) + dnl Also set HAVE_LIB[]NAME so that AC_LIB_HAVE_LINKFLAGS can reuse the + dnl results of this search when this library appears as a dependency. + HAVE_LIB[]NAME=yes + undefine([Name]) + undefine([NAME]) +]) + +dnl AC_LIB_HAVE_LINKFLAGS(name, dependencies, includes, testcode) +dnl searches for libname and the libraries corresponding to explicit and +dnl implicit dependencies, together with the specified include files and +dnl the ability to compile and link the specified testcode. If found, it +dnl sets and AC_SUBSTs HAVE_LIB${NAME}=yes and the LIB${NAME} and +dnl LTLIB${NAME} variables and augments the CPPFLAGS variable, and +dnl #defines HAVE_LIB${NAME} to 1. Otherwise, it sets and AC_SUBSTs +dnl HAVE_LIB${NAME}=no and LIB${NAME} and LTLIB${NAME} to empty. +AC_DEFUN([AC_LIB_HAVE_LINKFLAGS], +[ + AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) + AC_REQUIRE([AC_LIB_RPATH]) + define([Name],[translit([$1],[./-], [___])]) + define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])]) + + dnl Search for lib[]Name and define LIB[]NAME, LTLIB[]NAME and INC[]NAME + dnl accordingly. + AC_LIB_LINKFLAGS_BODY([$1], [$2]) + + dnl Add $INC[]NAME to CPPFLAGS before performing the following checks, + dnl because if the user has installed lib[]Name and not disabled its use + dnl via --without-lib[]Name-prefix, he wants to use it. + ac_save_CPPFLAGS="$CPPFLAGS" + AC_LIB_APPENDTOVAR([CPPFLAGS], [$INC]NAME) + + AC_CACHE_CHECK([for lib[]$1], [ac_cv_lib[]Name], [ + ac_save_LIBS="$LIBS" + LIBS="$LIBS $LIB[]NAME" + AC_TRY_LINK([$3], [$4], [ac_cv_lib[]Name=yes], [ac_cv_lib[]Name=no]) + LIBS="$ac_save_LIBS" + ]) + if test "$ac_cv_lib[]Name" = yes; then + HAVE_LIB[]NAME=yes + AC_DEFINE([HAVE_LIB]NAME, 1, [Define if you have the $1 library.]) + AC_MSG_CHECKING([how to link with lib[]$1]) + AC_MSG_RESULT([$LIB[]NAME]) + else + HAVE_LIB[]NAME=no + dnl If $LIB[]NAME didn't lead to a usable library, we don't need + dnl $INC[]NAME either. + CPPFLAGS="$ac_save_CPPFLAGS" + LIB[]NAME= + LTLIB[]NAME= + fi + AC_SUBST([HAVE_LIB]NAME) + AC_SUBST([LIB]NAME) + AC_SUBST([LTLIB]NAME) + undefine([Name]) + undefine([NAME]) +]) + +dnl Determine the platform dependent parameters needed to use rpath: +dnl libext, shlibext, hardcode_libdir_flag_spec, hardcode_libdir_separator, +dnl hardcode_direct, hardcode_minus_L. +AC_DEFUN([AC_LIB_RPATH], +[ + dnl Tell automake >= 1.10 to complain if config.rpath is missing. + m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([config.rpath])]) + AC_REQUIRE([AC_PROG_CC]) dnl we use $CC, $GCC, $LDFLAGS + AC_REQUIRE([AC_LIB_PROG_LD]) dnl we use $LD, $with_gnu_ld + AC_REQUIRE([AC_CANONICAL_HOST]) dnl we use $host + AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT]) dnl we use $ac_aux_dir + AC_CACHE_CHECK([for shared library run path origin], acl_cv_rpath, [ + CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ + ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh + . ./conftest.sh + rm -f ./conftest.sh + acl_cv_rpath=done + ]) + wl="$acl_cv_wl" + libext="$acl_cv_libext" + shlibext="$acl_cv_shlibext" + hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" + hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" + hardcode_direct="$acl_cv_hardcode_direct" + hardcode_minus_L="$acl_cv_hardcode_minus_L" + dnl Determine whether the user wants rpath handling at all. + AC_ARG_ENABLE(rpath, + [ --disable-rpath do not hardcode runtime library paths], + :, enable_rpath=yes) +]) + +dnl AC_LIB_LINKFLAGS_BODY(name [, dependencies]) searches for libname and +dnl the libraries corresponding to explicit and implicit dependencies. +dnl Sets the LIB${NAME}, LTLIB${NAME} and INC${NAME} variables. +AC_DEFUN([AC_LIB_LINKFLAGS_BODY], +[ + define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])]) + dnl By default, look in $includedir and $libdir. + use_additional=yes + AC_LIB_WITH_FINAL_PREFIX([ + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + ]) + AC_LIB_ARG_WITH([lib$1-prefix], +[ --with-lib$1-prefix[=DIR] search for lib$1 in DIR/include and DIR/lib + --without-lib$1-prefix don't search for lib$1 in includedir and libdir], +[ + if test "X$withval" = "Xno"; then + use_additional=no + else + if test "X$withval" = "X"; then + AC_LIB_WITH_FINAL_PREFIX([ + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + ]) + else + additional_includedir="$withval/include" + additional_libdir="$withval/lib" + fi + fi +]) + dnl Search the library and its dependencies in $additional_libdir and + dnl $LDFLAGS. Using breadth-first-seach. + LIB[]NAME= + LTLIB[]NAME= + INC[]NAME= + rpathdirs= + ltrpathdirs= + names_already_handled= + names_next_round='$1 $2' + while test -n "$names_next_round"; do + names_this_round="$names_next_round" + names_next_round= + for name in $names_this_round; do + already_handled= + for n in $names_already_handled; do + if test "$n" = "$name"; then + already_handled=yes + break + fi + done + if test -z "$already_handled"; then + names_already_handled="$names_already_handled $name" + dnl See if it was already located by an earlier AC_LIB_LINKFLAGS + dnl or AC_LIB_HAVE_LINKFLAGS call. + uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` + eval value=\"\$HAVE_LIB$uppername\" + if test -n "$value"; then + if test "$value" = yes; then + eval value=\"\$LIB$uppername\" + test -z "$value" || LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$value" + eval value=\"\$LTLIB$uppername\" + test -z "$value" || LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$value" + else + dnl An earlier call to AC_LIB_HAVE_LINKFLAGS has determined + dnl that this library doesn't exist. So just drop it. + : + fi + else + dnl Search the library lib$name in $additional_libdir and $LDFLAGS + dnl and the already constructed $LIBNAME/$LTLIBNAME. + found_dir= + found_la= + found_so= + found_a= + if test $use_additional = yes; then + if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then + found_dir="$additional_libdir" + found_so="$additional_libdir/lib$name.$shlibext" + if test -f "$additional_libdir/lib$name.la"; then + found_la="$additional_libdir/lib$name.la" + fi + else + if test -f "$additional_libdir/lib$name.$libext"; then + found_dir="$additional_libdir" + found_a="$additional_libdir/lib$name.$libext" + if test -f "$additional_libdir/lib$name.la"; then + found_la="$additional_libdir/lib$name.la" + fi + fi + fi + fi + if test "X$found_dir" = "X"; then + for x in $LDFLAGS $LTLIB[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + case "$x" in + -L*) + dir=`echo "X$x" | sed -e 's/^X-L//'` + if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then + found_dir="$dir" + found_so="$dir/lib$name.$shlibext" + if test -f "$dir/lib$name.la"; then + found_la="$dir/lib$name.la" + fi + else + if test -f "$dir/lib$name.$libext"; then + found_dir="$dir" + found_a="$dir/lib$name.$libext" + if test -f "$dir/lib$name.la"; then + found_la="$dir/lib$name.la" + fi + fi + fi + ;; + esac + if test "X$found_dir" != "X"; then + break + fi + done + fi + if test "X$found_dir" != "X"; then + dnl Found the library. + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$found_dir -l$name" + if test "X$found_so" != "X"; then + dnl Linking with a shared library. We attempt to hardcode its + dnl directory into the executable's runpath, unless it's the + dnl standard /usr/lib. + if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then + dnl No hardcoding is needed. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" + else + dnl Use an explicit option to hardcode DIR into the resulting + dnl binary. + dnl Potentially add DIR to ltrpathdirs. + dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $found_dir" + fi + dnl The hardcoding into $LIBNAME is system dependent. + if test "$hardcode_direct" = yes; then + dnl Using DIR/libNAME.so during linking hardcodes DIR into the + dnl resulting binary. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" + else + if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then + dnl Use an explicit option to hardcode DIR into the resulting + dnl binary. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" + dnl Potentially add DIR to rpathdirs. + dnl The rpathdirs will be appended to $LIBNAME at the end. + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $found_dir" + fi + else + dnl Rely on "-L$found_dir". + dnl But don't add it if it's already contained in the LDFLAGS + dnl or the already constructed $LIBNAME + haveit= + for x in $LDFLAGS $LIB[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-L$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir" + fi + if test "$hardcode_minus_L" != no; then + dnl FIXME: Not sure whether we should use + dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" + dnl here. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" + else + dnl We cannot use $hardcode_runpath_var and LD_RUN_PATH + dnl here, because this doesn't fit in flags passed to the + dnl compiler. So give up. No hardcoding. This affects only + dnl very old systems. + dnl FIXME: Not sure whether we should use + dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" + dnl here. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" + fi + fi + fi + fi + else + if test "X$found_a" != "X"; then + dnl Linking with a static library. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_a" + else + dnl We shouldn't come here, but anyway it's good to have a + dnl fallback. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir -l$name" + fi + fi + dnl Assume the include files are nearby. + additional_includedir= + case "$found_dir" in + */lib | */lib/) + basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` + additional_includedir="$basedir/include" + ;; + esac + if test "X$additional_includedir" != "X"; then + dnl Potentially add $additional_includedir to $INCNAME. + dnl But don't add it + dnl 1. if it's the standard /usr/include, + dnl 2. if it's /usr/local/include and we are using GCC on Linux, + dnl 3. if it's already present in $CPPFLAGS or the already + dnl constructed $INCNAME, + dnl 4. if it doesn't exist as a directory. + if test "X$additional_includedir" != "X/usr/include"; then + haveit= + if test "X$additional_includedir" = "X/usr/local/include"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + for x in $CPPFLAGS $INC[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-I$additional_includedir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_includedir"; then + dnl Really add $additional_includedir to $INCNAME. + INC[]NAME="${INC[]NAME}${INC[]NAME:+ }-I$additional_includedir" + fi + fi + fi + fi + fi + dnl Look for dependencies. + if test -n "$found_la"; then + dnl Read the .la file. It defines the variables + dnl dlname, library_names, old_library, dependency_libs, current, + dnl age, revision, installed, dlopen, dlpreopen, libdir. + save_libdir="$libdir" + case "$found_la" in + */* | *\\*) . "$found_la" ;; + *) . "./$found_la" ;; + esac + libdir="$save_libdir" + dnl We use only dependency_libs. + for dep in $dependency_libs; do + case "$dep" in + -L*) + additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` + dnl Potentially add $additional_libdir to $LIBNAME and $LTLIBNAME. + dnl But don't add it + dnl 1. if it's the standard /usr/lib, + dnl 2. if it's /usr/local/lib and we are using GCC on Linux, + dnl 3. if it's already present in $LDFLAGS or the already + dnl constructed $LIBNAME, + dnl 4. if it doesn't exist as a directory. + if test "X$additional_libdir" != "X/usr/lib"; then + haveit= + if test "X$additional_libdir" = "X/usr/local/lib"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + haveit= + for x in $LDFLAGS $LIB[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + dnl Really add $additional_libdir to $LIBNAME. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$additional_libdir" + fi + fi + haveit= + for x in $LDFLAGS $LTLIB[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + dnl Really add $additional_libdir to $LTLIBNAME. + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$additional_libdir" + fi + fi + fi + fi + ;; + -R*) + dir=`echo "X$dep" | sed -e 's/^X-R//'` + if test "$enable_rpath" != no; then + dnl Potentially add DIR to rpathdirs. + dnl The rpathdirs will be appended to $LIBNAME at the end. + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $dir" + fi + dnl Potentially add DIR to ltrpathdirs. + dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $dir" + fi + fi + ;; + -l*) + dnl Handle this in the next round. + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` + ;; + *.la) + dnl Handle this in the next round. Throw away the .la's + dnl directory; it is already contained in a preceding -L + dnl option. + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` + ;; + *) + dnl Most likely an immediate library name. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$dep" + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$dep" + ;; + esac + done + fi + else + dnl Didn't find the library; assume it is in the system directories + dnl known to the linker and runtime loader. (All the system + dnl directories known to the linker should also be known to the + dnl runtime loader, otherwise the system is severely misconfigured.) + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-l$name" + fi + fi + fi + done + done + if test "X$rpathdirs" != "X"; then + if test -n "$hardcode_libdir_separator"; then + dnl Weird platform: only the last -rpath option counts, the user must + dnl pass all path elements in one option. We can arrange that for a + dnl single library, but not when more than one $LIBNAMEs are used. + alldirs= + for found_dir in $rpathdirs; do + alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" + done + dnl Note: hardcode_libdir_flag_spec uses $libdir and $wl. + acl_save_libdir="$libdir" + libdir="$alldirs" + eval flag=\"$hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" + else + dnl The -rpath options are cumulative. + for found_dir in $rpathdirs; do + acl_save_libdir="$libdir" + libdir="$found_dir" + eval flag=\"$hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" + done + fi + fi + if test "X$ltrpathdirs" != "X"; then + dnl When using libtool, the option that works for both libraries and + dnl executables is -R. The -R options are cumulative. + for found_dir in $ltrpathdirs; do + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-R$found_dir" + done + fi +]) + +dnl AC_LIB_APPENDTOVAR(VAR, CONTENTS) appends the elements of CONTENTS to VAR, +dnl unless already present in VAR. +dnl Works only for CPPFLAGS, not for LIB* variables because that sometimes +dnl contains two or three consecutive elements that belong together. +AC_DEFUN([AC_LIB_APPENDTOVAR], +[ + for element in [$2]; do + haveit= + for x in $[$1]; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X$element"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + [$1]="${[$1]}${[$1]:+ }$element" + fi + done +]) diff --git a/autoconf/gettext-macros/lib-prefix.m4 b/autoconf/gettext-macros/lib-prefix.m4 new file mode 100644 index 00000000..0d895ca6 --- /dev/null +++ b/autoconf/gettext-macros/lib-prefix.m4 @@ -0,0 +1,153 @@ +# lib-prefix.m4 serial 4 (gettext-0.14.2) +dnl Copyright (C) 2001-2005 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. + +dnl AC_LIB_ARG_WITH is synonymous to AC_ARG_WITH in autoconf-2.13, and +dnl similar to AC_ARG_WITH in autoconf 2.52...2.57 except that is doesn't +dnl require excessive bracketing. +ifdef([AC_HELP_STRING], +[AC_DEFUN([AC_LIB_ARG_WITH], [AC_ARG_WITH([$1],[[$2]],[$3],[$4])])], +[AC_DEFUN([AC_][LIB_ARG_WITH], [AC_ARG_WITH([$1],[$2],[$3],[$4])])]) + +dnl AC_LIB_PREFIX adds to the CPPFLAGS and LDFLAGS the flags that are needed +dnl to access previously installed libraries. The basic assumption is that +dnl a user will want packages to use other packages he previously installed +dnl with the same --prefix option. +dnl This macro is not needed if only AC_LIB_LINKFLAGS is used to locate +dnl libraries, but is otherwise very convenient. +AC_DEFUN([AC_LIB_PREFIX], +[ + AC_BEFORE([$0], [AC_LIB_LINKFLAGS]) + AC_REQUIRE([AC_PROG_CC]) + AC_REQUIRE([AC_CANONICAL_HOST]) + AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) + dnl By default, look in $includedir and $libdir. + use_additional=yes + AC_LIB_WITH_FINAL_PREFIX([ + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + ]) + AC_LIB_ARG_WITH([lib-prefix], +[ --with-lib-prefix[=DIR] search for libraries in DIR/include and DIR/lib + --without-lib-prefix don't search for libraries in includedir and libdir], +[ + if test "X$withval" = "Xno"; then + use_additional=no + else + if test "X$withval" = "X"; then + AC_LIB_WITH_FINAL_PREFIX([ + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + ]) + else + additional_includedir="$withval/include" + additional_libdir="$withval/lib" + fi + fi +]) + if test $use_additional = yes; then + dnl Potentially add $additional_includedir to $CPPFLAGS. + dnl But don't add it + dnl 1. if it's the standard /usr/include, + dnl 2. if it's already present in $CPPFLAGS, + dnl 3. if it's /usr/local/include and we are using GCC on Linux, + dnl 4. if it doesn't exist as a directory. + if test "X$additional_includedir" != "X/usr/include"; then + haveit= + for x in $CPPFLAGS; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-I$additional_includedir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test "X$additional_includedir" = "X/usr/local/include"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + if test -d "$additional_includedir"; then + dnl Really add $additional_includedir to $CPPFLAGS. + CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }-I$additional_includedir" + fi + fi + fi + fi + dnl Potentially add $additional_libdir to $LDFLAGS. + dnl But don't add it + dnl 1. if it's the standard /usr/lib, + dnl 2. if it's already present in $LDFLAGS, + dnl 3. if it's /usr/local/lib and we are using GCC on Linux, + dnl 4. if it doesn't exist as a directory. + if test "X$additional_libdir" != "X/usr/lib"; then + haveit= + for x in $LDFLAGS; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test "X$additional_libdir" = "X/usr/local/lib"; then + if test -n "$GCC"; then + case $host_os in + linux*) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + dnl Really add $additional_libdir to $LDFLAGS. + LDFLAGS="${LDFLAGS}${LDFLAGS:+ }-L$additional_libdir" + fi + fi + fi + fi + fi +]) + +dnl AC_LIB_PREPARE_PREFIX creates variables acl_final_prefix, +dnl acl_final_exec_prefix, containing the values to which $prefix and +dnl $exec_prefix will expand at the end of the configure script. +AC_DEFUN([AC_LIB_PREPARE_PREFIX], +[ + dnl Unfortunately, prefix and exec_prefix get only finally determined + dnl at the end of configure. + if test "X$prefix" = "XNONE"; then + acl_final_prefix="$ac_default_prefix" + else + acl_final_prefix="$prefix" + fi + if test "X$exec_prefix" = "XNONE"; then + acl_final_exec_prefix='${prefix}' + else + acl_final_exec_prefix="$exec_prefix" + fi + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" + prefix="$acl_save_prefix" +]) + +dnl AC_LIB_WITH_FINAL_PREFIX([statement]) evaluates statement, with the +dnl variables prefix and exec_prefix bound to the values they will have +dnl at the end of the configure script. +AC_DEFUN([AC_LIB_WITH_FINAL_PREFIX], +[ + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + $1 + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" +]) diff --git a/autoconf/gettext-macros/longdouble.m4 b/autoconf/gettext-macros/longdouble.m4 new file mode 100644 index 00000000..40cd7ce0 --- /dev/null +++ b/autoconf/gettext-macros/longdouble.m4 @@ -0,0 +1,28 @@ +# longdouble.m4 serial 1 (gettext-0.12) +dnl Copyright (C) 2002-2003 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. +dnl Test whether the compiler supports the 'long double' type. +dnl Prerequisite: AC_PROG_CC + +AC_DEFUN([gt_TYPE_LONGDOUBLE], +[ + AC_CACHE_CHECK([for long double], gt_cv_c_long_double, + [if test "$GCC" = yes; then + gt_cv_c_long_double=yes + else + AC_TRY_COMPILE([ + /* The Stardent Vistra knows sizeof(long double), but does not support it. */ + long double foo = 0.0; + /* On Ultrix 4.3 cc, long double is 4 and double is 8. */ + int array [2*(sizeof(long double) >= sizeof(double)) - 1]; + ], , + gt_cv_c_long_double=yes, gt_cv_c_long_double=no) + fi]) + if test $gt_cv_c_long_double = yes; then + AC_DEFINE(HAVE_LONG_DOUBLE, 1, [Define if you have the 'long double' type.]) + fi +]) diff --git a/autoconf/gettext-macros/longlong.m4 b/autoconf/gettext-macros/longlong.m4 new file mode 100644 index 00000000..7b399e01 --- /dev/null +++ b/autoconf/gettext-macros/longlong.m4 @@ -0,0 +1,23 @@ +# longlong.m4 serial 5 +dnl Copyright (C) 1999-2004 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Paul Eggert. + +# Define HAVE_LONG_LONG if 'long long' works. + +AC_DEFUN([gl_AC_TYPE_LONG_LONG], +[ + AC_CACHE_CHECK([for long long], ac_cv_type_long_long, + [AC_TRY_LINK([long long ll = 1LL; int i = 63;], + [long long llmax = (long long) -1; + return ll << i | ll >> i | llmax / ll | llmax % ll;], + ac_cv_type_long_long=yes, + ac_cv_type_long_long=no)]) + if test $ac_cv_type_long_long = yes; then + AC_DEFINE(HAVE_LONG_LONG, 1, + [Define if you have the 'long long' type.]) + fi +]) diff --git a/autoconf/gettext-macros/nls.m4 b/autoconf/gettext-macros/nls.m4 new file mode 100644 index 00000000..2082c3b2 --- /dev/null +++ b/autoconf/gettext-macros/nls.m4 @@ -0,0 +1,51 @@ +# nls.m4 serial 2 (gettext-0.14.3) +dnl Copyright (C) 1995-2003, 2005 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +dnl +dnl This file can can be used in projects which are not available under +dnl the GNU General Public License or the GNU Library General Public +dnl License but which still want to provide support for the GNU gettext +dnl functionality. +dnl Please note that the actual code of the GNU gettext library is covered +dnl by the GNU Library General Public License, and the rest of the GNU +dnl gettext package package is covered by the GNU General Public License. +dnl They are *not* in the public domain. + +dnl Authors: +dnl Ulrich Drepper , 1995-2000. +dnl Bruno Haible , 2000-2003. + +AC_PREREQ(2.50) + +AC_DEFUN([AM_NLS], +[ + AC_MSG_CHECKING([whether NLS is requested]) + dnl Default is enabled NLS + AC_ARG_ENABLE(nls, + [ --disable-nls do not use Native Language Support], + USE_NLS=$enableval, USE_NLS=yes) + AC_MSG_RESULT($USE_NLS) + AC_SUBST(USE_NLS) +]) + +AC_DEFUN([AM_MKINSTALLDIRS], +[ + dnl Tell automake >= 1.10 to complain if mkinstalldirs is missing. + m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([mkinstalldirs])]) + dnl If the AC_CONFIG_AUX_DIR macro for autoconf is used we possibly + dnl find the mkinstalldirs script in another subdir but $(top_srcdir). + dnl Try to locate it. + MKINSTALLDIRS= + if test -n "$ac_aux_dir"; then + case "$ac_aux_dir" in + /*) MKINSTALLDIRS="$ac_aux_dir/mkinstalldirs" ;; + *) MKINSTALLDIRS="\$(top_builddir)/$ac_aux_dir/mkinstalldirs" ;; + esac + fi + if test -z "$MKINSTALLDIRS"; then + MKINSTALLDIRS="\$(top_srcdir)/mkinstalldirs" + fi + AC_SUBST(MKINSTALLDIRS) +]) diff --git a/autoconf/gettext-macros/po.m4 b/autoconf/gettext-macros/po.m4 new file mode 100644 index 00000000..f2795eea --- /dev/null +++ b/autoconf/gettext-macros/po.m4 @@ -0,0 +1,429 @@ +# po.m4 serial 7 (gettext-0.14.3) +dnl Copyright (C) 1995-2005 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +dnl +dnl This file can can be used in projects which are not available under +dnl the GNU General Public License or the GNU Library General Public +dnl License but which still want to provide support for the GNU gettext +dnl functionality. +dnl Please note that the actual code of the GNU gettext library is covered +dnl by the GNU Library General Public License, and the rest of the GNU +dnl gettext package package is covered by the GNU General Public License. +dnl They are *not* in the public domain. + +dnl Authors: +dnl Ulrich Drepper , 1995-2000. +dnl Bruno Haible , 2000-2003. + +AC_PREREQ(2.50) + +dnl Checks for all prerequisites of the po subdirectory. +AC_DEFUN([AM_PO_SUBDIRS], +[ + AC_REQUIRE([AC_PROG_MAKE_SET])dnl + AC_REQUIRE([AC_PROG_INSTALL])dnl + AC_REQUIRE([AM_MKINSTALLDIRS])dnl + AC_REQUIRE([AM_NLS])dnl + + dnl Perform the following tests also if --disable-nls has been given, + dnl because they are needed for "make dist" to work. + + dnl Search for GNU msgfmt in the PATH. + dnl The first test excludes Solaris msgfmt and early GNU msgfmt versions. + dnl The second test excludes FreeBSD msgfmt. + AM_PATH_PROG_WITH_TEST(MSGFMT, msgfmt, + [$ac_dir/$ac_word --statistics /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && + (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], + :) + AC_PATH_PROG(GMSGFMT, gmsgfmt, $MSGFMT) + + dnl Search for GNU xgettext 0.12 or newer in the PATH. + dnl The first test excludes Solaris xgettext and early GNU xgettext versions. + dnl The second test excludes FreeBSD xgettext. + AM_PATH_PROG_WITH_TEST(XGETTEXT, xgettext, + [$ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && + (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], + :) + dnl Remove leftover from FreeBSD xgettext call. + rm -f messages.po + + dnl Search for GNU msgmerge 0.11 or newer in the PATH. + AM_PATH_PROG_WITH_TEST(MSGMERGE, msgmerge, + [$ac_dir/$ac_word --update -q /dev/null /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1], :) + + dnl This could go away some day; the PATH_PROG_WITH_TEST already does it. + dnl Test whether we really found GNU msgfmt. + if test "$GMSGFMT" != ":"; then + dnl If it is no GNU msgfmt we define it as : so that the + dnl Makefiles still can work. + if $GMSGFMT --statistics /dev/null >/dev/null 2>&1 && + (if $GMSGFMT --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then + : ; + else + GMSGFMT=`echo "$GMSGFMT" | sed -e 's,^.*/,,'` + AC_MSG_RESULT( + [found $GMSGFMT program is not GNU msgfmt; ignore it]) + GMSGFMT=":" + fi + fi + + dnl This could go away some day; the PATH_PROG_WITH_TEST already does it. + dnl Test whether we really found GNU xgettext. + if test "$XGETTEXT" != ":"; then + dnl If it is no GNU xgettext we define it as : so that the + dnl Makefiles still can work. + if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >/dev/null 2>&1 && + (if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then + : ; + else + AC_MSG_RESULT( + [found xgettext program is not GNU xgettext; ignore it]) + XGETTEXT=":" + fi + dnl Remove leftover from FreeBSD xgettext call. + rm -f messages.po + fi + + AC_OUTPUT_COMMANDS([ + for ac_file in $CONFIG_FILES; do + # Support "outfile[:infile[:infile...]]" + case "$ac_file" in + *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; + esac + # PO directories have a Makefile.in generated from Makefile.in.in. + case "$ac_file" in */Makefile.in) + # Adjust a relative srcdir. + ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` + ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`" + ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` + # In autoconf-2.13 it is called $ac_given_srcdir. + # In autoconf-2.50 it is called $srcdir. + test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" + case "$ac_given_srcdir" in + .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; + /*) top_srcdir="$ac_given_srcdir" ;; + *) top_srcdir="$ac_dots$ac_given_srcdir" ;; + esac + # Treat a directory as a PO directory if and only if it has a + # POTFILES.in file. This allows packages to have multiple PO + # directories under different names or in different locations. + if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then + rm -f "$ac_dir/POTFILES" + test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" + cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" + POMAKEFILEDEPS="POTFILES.in" + # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend + # on $ac_dir but don't depend on user-specified configuration + # parameters. + if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then + # The LINGUAS file contains the set of available languages. + if test -n "$OBSOLETE_ALL_LINGUAS"; then + test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" + fi + ALL_LINGUAS_=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"` + # Hide the ALL_LINGUAS assigment from automake. + eval 'ALL_LINGUAS''=$ALL_LINGUAS_' + POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" + else + # The set of available languages was given in configure.in. + eval 'ALL_LINGUAS''=$OBSOLETE_ALL_LINGUAS' + fi + # Compute POFILES + # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po) + # Compute UPDATEPOFILES + # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update) + # Compute DUMMYPOFILES + # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop) + # Compute GMOFILES + # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo) + case "$ac_given_srcdir" in + .) srcdirpre= ;; + *) srcdirpre='$(srcdir)/' ;; + esac + POFILES= + UPDATEPOFILES= + DUMMYPOFILES= + GMOFILES= + for lang in $ALL_LINGUAS; do + POFILES="$POFILES $srcdirpre$lang.po" + UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" + DUMMYPOFILES="$DUMMYPOFILES $lang.nop" + GMOFILES="$GMOFILES $srcdirpre$lang.gmo" + done + # CATALOGS depends on both $ac_dir and the user's LINGUAS + # environment variable. + INST_LINGUAS= + if test -n "$ALL_LINGUAS"; then + for presentlang in $ALL_LINGUAS; do + useit=no + if test "%UNSET%" != "$LINGUAS"; then + desiredlanguages="$LINGUAS" + else + desiredlanguages="$ALL_LINGUAS" + fi + for desiredlang in $desiredlanguages; do + # Use the presentlang catalog if desiredlang is + # a. equal to presentlang, or + # b. a variant of presentlang (because in this case, + # presentlang can be used as a fallback for messages + # which are not translated in the desiredlang catalog). + case "$desiredlang" in + "$presentlang"*) useit=yes;; + esac + done + if test $useit = yes; then + INST_LINGUAS="$INST_LINGUAS $presentlang" + fi + done + fi + CATALOGS= + if test -n "$INST_LINGUAS"; then + for lang in $INST_LINGUAS; do + CATALOGS="$CATALOGS $lang.gmo" + done + fi + test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" + sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" + for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do + if test -f "$f"; then + case "$f" in + *.orig | *.bak | *~) ;; + *) cat "$f" >> "$ac_dir/Makefile" ;; + esac + fi + done + fi + ;; + esac + done], + [# Capture the value of obsolete ALL_LINGUAS because we need it to compute + # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. But hide it + # from automake. + eval 'OBSOLETE_ALL_LINGUAS''="$ALL_LINGUAS"' + # Capture the value of LINGUAS because we need it to compute CATALOGS. + LINGUAS="${LINGUAS-%UNSET%}" + ]) +]) + +dnl Postprocesses a Makefile in a directory containing PO files. +AC_DEFUN([AM_POSTPROCESS_PO_MAKEFILE], +[ + # When this code is run, in config.status, two variables have already been + # set: + # - OBSOLETE_ALL_LINGUAS is the value of LINGUAS set in configure.in, + # - LINGUAS is the value of the environment variable LINGUAS at configure + # time. + +changequote(,)dnl + # Adjust a relative srcdir. + ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` + ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`" + ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` + # In autoconf-2.13 it is called $ac_given_srcdir. + # In autoconf-2.50 it is called $srcdir. + test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" + case "$ac_given_srcdir" in + .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; + /*) top_srcdir="$ac_given_srcdir" ;; + *) top_srcdir="$ac_dots$ac_given_srcdir" ;; + esac + + # Find a way to echo strings without interpreting backslash. + if test "X`(echo '\t') 2>/dev/null`" = 'X\t'; then + gt_echo='echo' + else + if test "X`(printf '%s\n' '\t') 2>/dev/null`" = 'X\t'; then + gt_echo='printf %s\n' + else + echo_func () { + cat < "$ac_file.tmp" + if grep -l '@TCLCATALOGS@' "$ac_file" > /dev/null; then + # Add dependencies that cannot be formulated as a simple suffix rule. + for lang in $ALL_LINGUAS; do + frobbedlang=`echo $lang | sed -e 's/\..*$//' -e 'y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/'` + cat >> "$ac_file.tmp" < /dev/null; then + # Add dependencies that cannot be formulated as a simple suffix rule. + for lang in $ALL_LINGUAS; do + frobbedlang=`echo $lang | sed -e 's/_/-/g' -e 's/^sr-CS/sr-SP/' -e 's/@latin$/-Latn/' -e 's/@cyrillic$/-Cyrl/' -e 's/^sr-SP$/sr-SP-Latn/' -e 's/^uz-UZ$/uz-UZ-Latn/'` + cat >> "$ac_file.tmp" <> "$ac_file.tmp" < +#include +/* The string "%2$d %1$d", with dollar characters protected from the shell's + dollar expansion (possibly an autoconf bug). */ +static char format[] = { '%', '2', '$', 'd', ' ', '%', '1', '$', 'd', '\0' }; +static char buf[100]; +int main () +{ + sprintf (buf, format, 33, 55); + return (strcmp (buf, "55 33") != 0); +}], gt_cv_func_printf_posix=yes, gt_cv_func_printf_posix=no, + [ + AC_EGREP_CPP(notposix, [ +#if defined __NetBSD__ || defined _MSC_VER || defined __MINGW32__ || defined __CYGWIN__ + notposix +#endif + ], gt_cv_func_printf_posix="guessing no", + gt_cv_func_printf_posix="guessing yes") + ]) + ]) + case $gt_cv_func_printf_posix in + *yes) + AC_DEFINE(HAVE_POSIX_PRINTF, 1, + [Define if your printf() function supports format strings with positions.]) + ;; + esac +]) diff --git a/autoconf/gettext-macros/progtest.m4 b/autoconf/gettext-macros/progtest.m4 new file mode 100644 index 00000000..a56365cd --- /dev/null +++ b/autoconf/gettext-macros/progtest.m4 @@ -0,0 +1,92 @@ +# progtest.m4 serial 4 (gettext-0.14.2) +dnl Copyright (C) 1996-2003, 2005 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +dnl +dnl This file can can be used in projects which are not available under +dnl the GNU General Public License or the GNU Library General Public +dnl License but which still want to provide support for the GNU gettext +dnl functionality. +dnl Please note that the actual code of the GNU gettext library is covered +dnl by the GNU Library General Public License, and the rest of the GNU +dnl gettext package package is covered by the GNU General Public License. +dnl They are *not* in the public domain. + +dnl Authors: +dnl Ulrich Drepper , 1996. + +AC_PREREQ(2.50) + +# Search path for a program which passes the given test. + +dnl AM_PATH_PROG_WITH_TEST(VARIABLE, PROG-TO-CHECK-FOR, +dnl TEST-PERFORMED-ON-FOUND_PROGRAM [, VALUE-IF-NOT-FOUND [, PATH]]) +AC_DEFUN([AM_PATH_PROG_WITH_TEST], +[ +# Prepare PATH_SEPARATOR. +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + +# Find out how to test for executable files. Don't use a zero-byte file, +# as systems may use methods other than mode bits to determine executability. +cat >conf$$.file <<_ASEOF +#! /bin/sh +exit 0 +_ASEOF +chmod +x conf$$.file +if test -x conf$$.file >/dev/null 2>&1; then + ac_executable_p="test -x" +else + ac_executable_p="test -f" +fi +rm -f conf$$.file + +# Extract the first word of "$2", so it can be a program name with args. +set dummy $2; ac_word=[$]2 +AC_MSG_CHECKING([for $ac_word]) +AC_CACHE_VAL(ac_cv_path_$1, +[case "[$]$1" in + [[\\/]]* | ?:[[\\/]]*) + ac_cv_path_$1="[$]$1" # Let the user override the test with a path. + ;; + *) + ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in ifelse([$5], , $PATH, [$5]); do + IFS="$ac_save_IFS" + test -z "$ac_dir" && ac_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then + echo "$as_me: trying $ac_dir/$ac_word..." >&AS_MESSAGE_LOG_FD + if [$3]; then + ac_cv_path_$1="$ac_dir/$ac_word$ac_exec_ext" + break 2 + fi + fi + done + done + IFS="$ac_save_IFS" +dnl If no 4th arg is given, leave the cache variable unset, +dnl so AC_PATH_PROGS will keep looking. +ifelse([$4], , , [ test -z "[$]ac_cv_path_$1" && ac_cv_path_$1="$4" +])dnl + ;; +esac])dnl +$1="$ac_cv_path_$1" +if test ifelse([$4], , [-n "[$]$1"], ["[$]$1" != "$4"]); then + AC_MSG_RESULT([$]$1) +else + AC_MSG_RESULT(no) +fi +AC_SUBST($1)dnl +]) diff --git a/autoconf/gettext-macros/signed.m4 b/autoconf/gettext-macros/signed.m4 new file mode 100644 index 00000000..048f5936 --- /dev/null +++ b/autoconf/gettext-macros/signed.m4 @@ -0,0 +1,17 @@ +# signed.m4 serial 1 (gettext-0.10.40) +dnl Copyright (C) 2001-2002 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. + +AC_DEFUN([bh_C_SIGNED], +[ + AC_CACHE_CHECK([for signed], bh_cv_c_signed, + [AC_TRY_COMPILE(, [signed char x;], bh_cv_c_signed=yes, bh_cv_c_signed=no)]) + if test $bh_cv_c_signed = no; then + AC_DEFINE(signed, , + [Define to empty if the C compiler doesn't support this keyword.]) + fi +]) diff --git a/autoconf/gettext-macros/size_max.m4 b/autoconf/gettext-macros/size_max.m4 new file mode 100644 index 00000000..4fe81c7b --- /dev/null +++ b/autoconf/gettext-macros/size_max.m4 @@ -0,0 +1,59 @@ +# size_max.m4 serial 2 +dnl Copyright (C) 2003 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. + +AC_DEFUN([gl_SIZE_MAX], +[ + AC_CHECK_HEADERS(stdint.h) + dnl First test whether the system already has SIZE_MAX. + AC_MSG_CHECKING([for SIZE_MAX]) + result= + AC_EGREP_CPP([Found it], [ +#include +#if HAVE_STDINT_H +#include +#endif +#ifdef SIZE_MAX +Found it +#endif +], result=yes) + if test -z "$result"; then + dnl Define it ourselves. Here we assume that the type 'size_t' is not wider + dnl than the type 'unsigned long'. + dnl The _AC_COMPUTE_INT macro works up to LONG_MAX, since it uses 'expr', + dnl which is guaranteed to work from LONG_MIN to LONG_MAX. + _AC_COMPUTE_INT([~(size_t)0 / 10], res_hi, + [#include ], result=?) + _AC_COMPUTE_INT([~(size_t)0 % 10], res_lo, + [#include ], result=?) + _AC_COMPUTE_INT([sizeof (size_t) <= sizeof (unsigned int)], fits_in_uint, + [#include ], result=?) + if test "$fits_in_uint" = 1; then + dnl Even though SIZE_MAX fits in an unsigned int, it must be of type + dnl 'unsigned long' if the type 'size_t' is the same as 'unsigned long'. + AC_TRY_COMPILE([#include + extern size_t foo; + extern unsigned long foo; + ], [], fits_in_uint=0) + fi + if test -z "$result"; then + if test "$fits_in_uint" = 1; then + result="$res_hi$res_lo"U + else + result="$res_hi$res_lo"UL + fi + else + dnl Shouldn't happen, but who knows... + result='~(size_t)0' + fi + fi + AC_MSG_RESULT([$result]) + if test "$result" != yes; then + AC_DEFINE_UNQUOTED([SIZE_MAX], [$result], + [Define as the maximum value of type 'size_t', if the system doesn't define it.]) + fi +]) diff --git a/autoconf/gettext-macros/stdint_h.m4 b/autoconf/gettext-macros/stdint_h.m4 new file mode 100644 index 00000000..3355f35a --- /dev/null +++ b/autoconf/gettext-macros/stdint_h.m4 @@ -0,0 +1,26 @@ +# stdint_h.m4 serial 5 +dnl Copyright (C) 1997-2004 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Paul Eggert. + +# Define HAVE_STDINT_H_WITH_UINTMAX if exists, +# doesn't clash with , and declares uintmax_t. + +AC_DEFUN([gl_AC_HEADER_STDINT_H], +[ + AC_CACHE_CHECK([for stdint.h], gl_cv_header_stdint_h, + [AC_TRY_COMPILE( + [#include +#include ], + [uintmax_t i = (uintmax_t) -1;], + gl_cv_header_stdint_h=yes, + gl_cv_header_stdint_h=no)]) + if test $gl_cv_header_stdint_h = yes; then + AC_DEFINE_UNQUOTED(HAVE_STDINT_H_WITH_UINTMAX, 1, + [Define if exists, doesn't clash with , + and declares uintmax_t. ]) + fi +]) diff --git a/autoconf/gettext-macros/uintmax_t.m4 b/autoconf/gettext-macros/uintmax_t.m4 new file mode 100644 index 00000000..bf83ed74 --- /dev/null +++ b/autoconf/gettext-macros/uintmax_t.m4 @@ -0,0 +1,30 @@ +# uintmax_t.m4 serial 9 +dnl Copyright (C) 1997-2004 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Paul Eggert. + +AC_PREREQ(2.13) + +# Define uintmax_t to 'unsigned long' or 'unsigned long long' +# if it is not already defined in or . + +AC_DEFUN([gl_AC_TYPE_UINTMAX_T], +[ + AC_REQUIRE([gl_AC_HEADER_INTTYPES_H]) + AC_REQUIRE([gl_AC_HEADER_STDINT_H]) + if test $gl_cv_header_inttypes_h = no && test $gl_cv_header_stdint_h = no; then + AC_REQUIRE([gl_AC_TYPE_UNSIGNED_LONG_LONG]) + test $ac_cv_type_unsigned_long_long = yes \ + && ac_type='unsigned long long' \ + || ac_type='unsigned long' + AC_DEFINE_UNQUOTED(uintmax_t, $ac_type, + [Define to unsigned long or unsigned long long + if and don't define.]) + else + AC_DEFINE(HAVE_UINTMAX_T, 1, + [Define if you have the 'uintmax_t' type in or .]) + fi +]) diff --git a/autoconf/gettext-macros/ulonglong.m4 b/autoconf/gettext-macros/ulonglong.m4 new file mode 100644 index 00000000..dee10ccc --- /dev/null +++ b/autoconf/gettext-macros/ulonglong.m4 @@ -0,0 +1,23 @@ +# ulonglong.m4 serial 4 +dnl Copyright (C) 1999-2004 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Paul Eggert. + +# Define HAVE_UNSIGNED_LONG_LONG if 'unsigned long long' works. + +AC_DEFUN([gl_AC_TYPE_UNSIGNED_LONG_LONG], +[ + AC_CACHE_CHECK([for unsigned long long], ac_cv_type_unsigned_long_long, + [AC_TRY_LINK([unsigned long long ull = 1ULL; int i = 63;], + [unsigned long long ullmax = (unsigned long long) -1; + return ull << i | ull >> i | ullmax / ull | ullmax % ull;], + ac_cv_type_unsigned_long_long=yes, + ac_cv_type_unsigned_long_long=no)]) + if test $ac_cv_type_unsigned_long_long = yes; then + AC_DEFINE(HAVE_UNSIGNED_LONG_LONG, 1, + [Define if you have the 'unsigned long long' type.]) + fi +]) diff --git a/autoconf/gettext-macros/wchar_t.m4 b/autoconf/gettext-macros/wchar_t.m4 new file mode 100644 index 00000000..cde2129a --- /dev/null +++ b/autoconf/gettext-macros/wchar_t.m4 @@ -0,0 +1,20 @@ +# wchar_t.m4 serial 1 (gettext-0.12) +dnl Copyright (C) 2002-2003 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. +dnl Test whether has the 'wchar_t' type. +dnl Prerequisite: AC_PROG_CC + +AC_DEFUN([gt_TYPE_WCHAR_T], +[ + AC_CACHE_CHECK([for wchar_t], gt_cv_c_wchar_t, + [AC_TRY_COMPILE([#include + wchar_t foo = (wchar_t)'\0';], , + gt_cv_c_wchar_t=yes, gt_cv_c_wchar_t=no)]) + if test $gt_cv_c_wchar_t = yes; then + AC_DEFINE(HAVE_WCHAR_T, 1, [Define if you have the 'wchar_t' type.]) + fi +]) diff --git a/autoconf/gettext-macros/wint_t.m4 b/autoconf/gettext-macros/wint_t.m4 new file mode 100644 index 00000000..b8fff9c8 --- /dev/null +++ b/autoconf/gettext-macros/wint_t.m4 @@ -0,0 +1,20 @@ +# wint_t.m4 serial 1 (gettext-0.12) +dnl Copyright (C) 2003 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. +dnl Test whether has the 'wint_t' type. +dnl Prerequisite: AC_PROG_CC + +AC_DEFUN([gt_TYPE_WINT_T], +[ + AC_CACHE_CHECK([for wint_t], gt_cv_c_wint_t, + [AC_TRY_COMPILE([#include + wint_t foo = (wchar_t)'\0';], , + gt_cv_c_wint_t=yes, gt_cv_c_wint_t=no)]) + if test $gt_cv_c_wint_t = yes; then + AC_DEFINE(HAVE_WINT_T, 1, [Define if you have the 'wint_t' type.]) + fi +]) diff --git a/autoconf/gettext-macros/xsize.m4 b/autoconf/gettext-macros/xsize.m4 new file mode 100644 index 00000000..85bb721e --- /dev/null +++ b/autoconf/gettext-macros/xsize.m4 @@ -0,0 +1,13 @@ +# xsize.m4 serial 3 +dnl Copyright (C) 2003-2004 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +AC_DEFUN([gl_XSIZE], +[ + dnl Prerequisites of lib/xsize.h. + AC_REQUIRE([gl_SIZE_MAX]) + AC_REQUIRE([AC_C_INLINE]) + AC_CHECK_HEADERS(stdint.h) +]) diff --git a/autoconf/install-sh b/autoconf/install-sh new file mode 100755 index 00000000..377bb868 --- /dev/null +++ b/autoconf/install-sh @@ -0,0 +1,527 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2011-11-20.07; # UTC + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# 'make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. + +nl=' +' +IFS=" "" $nl" + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit=${DOITPROG-} +if test -z "$doit"; then + doit_exec=exec +else + doit_exec=$doit +fi + +# Put in absolute file names if you don't have them in your path; +# or use environment vars. + +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_glob='?' +initialize_posix_glob=' + test "$posix_glob" != "?" || { + if (set -f) 2>/dev/null; then + posix_glob= + else + posix_glob=: + fi + } +' + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +chgrpcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog +rmcmd="$rmprog -f" +stripcmd= + +src= +dst= +dir_arg= +dst_arg= + +copy_on_change=false +no_target_directory= + +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve the last data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -s $stripprog installed files. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG +" + +while test $# -ne 0; do + case $1 in + -c) ;; + + -C) copy_on_change=true;; + + -d) dir_arg=true;; + + -g) chgrpcmd="$chgrpprog $2" + shift;; + + --help) echo "$usage"; exit $?;; + + -m) mode=$2 + case $mode in + *' '* | *' '* | *' +'* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; + + -o) chowncmd="$chownprog $2" + shift;; + + -s) stripcmd=$stripprog;; + + -t) dst_arg=$2 + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; + + -T) no_target_directory=true;; + + --version) echo "$0 $scriptversion"; exit $?;; + + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; + esac + shift +done + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + done +fi + +if test $# -eq 0; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call 'install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +if test -z "$dir_arg"; then + do_exit='(exit $ret); exit $ret' + trap "ret=129; $do_exit" 1 + trap "ret=130; $do_exit" 2 + trap "ret=141; $do_exit" 13 + trap "ret=143; $do_exit" 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + +for src +do + # Protect names problematic for 'test' and other utilities. + case $src in + -* | [=\(\)!]) src=./$src;; + esac + + if test -n "$dir_arg"; then + dst=$src + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + else + + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst_arg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + dst=$dst_arg + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + if test -n "$no_target_directory"; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 + fi + dstdir=$dst + dst=$dstdir/`basename "$src"` + dstdir_status=0 + else + # Prefer dirname, but fall back on a substitute if dirname fails. + dstdir=` + (dirname "$dst") 2>/dev/null || + expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$dst" : 'X\(//\)[^/]' \| \ + X"$dst" : 'X\(//\)$' \| \ + X"$dst" : 'X\(/\)' \| . 2>/dev/null || + echo X"$dst" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q' + ` + + test -d "$dstdir" + dstdir_status=$? + fi + fi + + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # Create intermediate dirs using mode 755 as modified by the umask. + # This is like FreeBSD 'install' as of 1997-10-28. + umask=`umask` + case $stripcmd.$umask in + # Optimize common cases. + *[2367][2367]) mkdir_umask=$umask;; + .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; + + *[0-7]) + mkdir_umask=`expr $umask + 22 \ + - $umask % 100 % 40 + $umask % 20 \ + - $umask % 10 % 4 + $umask % 2 + `;; + *) mkdir_umask=$umask,go-w;; + esac + + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + case $umask in + *[123567][0-7][0-7]) + # POSIX mkdir -p sets u+wx bits regardless of umask, which + # is incompatible with FreeBSD 'install' when (umask & 300) != 0. + ;; + *) + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 + + if (umask $mkdir_umask && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + ls_ld_tmpdir=`ls -ld "$tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/d" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null + fi + trap '' 0;; + esac;; + esac + + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else + + # The umask is ridiculous, or mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. + + case $dstdir in + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; + esac + + eval "$initialize_posix_glob" + + oIFS=$IFS + IFS=/ + $posix_glob set -f + set fnord $dstdir + shift + $posix_glob set +f + IFS=$oIFS + + prefixes= + + for d + do + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask=$mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true + fi + fi + fi + + if test -n "$dir_arg"; then + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 + else + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + + # Copy the file name to the temp name. + (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + + eval "$initialize_posix_glob" && + $posix_glob set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + $posix_glob set +f && + + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/autoconf/install.sh b/autoconf/install.sh new file mode 100755 index 00000000..ea88212b --- /dev/null +++ b/autoconf/install.sh @@ -0,0 +1,235 @@ +#!/bin/sh +# +# install - install a program, script, or datafile +# This comes from X11R5. +# +# $XConsortium: install.sh,v 1.2 89/12/18 14:47:22 jim Exp $ +# +# This script is compatible with the BSD install script, but was written +# from scratch. +# + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +tranformbasename="" +transform_arg="" +instcmd="$mvprog" +chmodcmd="$chmodprog 0755" +chowncmd="" +chgrpcmd="" +stripcmd="" +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src="" +dst="" +dir_arg="" + +while [ x"$1" != x ]; do + case $1 in + -c) instcmd="$cpprog" + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + -s) stripcmd="$stripprog" + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + *) if [ x"$src" = x ] + then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if [ x"$src" = x ] +then + echo "install: no input file specified" + exit 1 +else + true +fi + +if [ x"$dir_arg" != x ]; then + dst=$src + src="" + + if [ -d $dst ]; then + instcmd=: + else + instcmd=mkdir + fi +else + +# Waiting for this to be detected by the "$instcmd $src $dsttmp" command +# might cause directories to be created, which would be especially bad +# if $src (and thus $dsttmp) contains '*'. + + if [ -f $src -o -d $src ] + then + true + else + echo "install: $src does not exist" + exit 1 + fi + + if [ x"$dst" = x ] + then + echo "install: no destination specified" + exit 1 + else + true + fi + +# If destination is a directory, append the input filename; if your system +# does not like double slashes in filenames, you may need to add some logic + + if [ -d $dst ] + then + dst="$dst"/`basename $src` + else + true + fi +fi + +## this sed command emulates the dirname command +dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. +# this part is taken from Noah Friedman's mkinstalldirs script + +# Skip lots of stat calls in the usual case. +if [ ! -d "$dstdir" ]; then +defaultIFS=' +' +IFS="${IFS-${defaultIFS}}" + +oIFS="${IFS}" +# Some sh's can't handle IFS=/ for some reason. +IFS='%' +set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` +IFS="${oIFS}" + +pathcomp='' + +while [ $# -ne 0 ] ; do + pathcomp="${pathcomp}${1}" + shift + + if [ ! -d "${pathcomp}" ] ; + then + $mkdirprog "${pathcomp}" + else + true + fi + + pathcomp="${pathcomp}/" +done +fi + +if [ x"$dir_arg" != x ] +then + $doit $instcmd $dst && + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi +else + +# If we're going to rename the final executable, determine the name now. + + if [ x"$transformarg" = x ] + then + dstfile=`basename $dst` + else + dstfile=`basename $dst $transformbasename | + sed $transformarg`$transformbasename + fi + +# don't allow the sed command to completely eliminate the filename + + if [ x"$dstfile" = x ] + then + dstfile=`basename $dst` + else + true + fi + +# Make a temp file name in the proper directory. + + dsttmp=$dstdir/#inst.$$# + +# Move or copy the file name to the temp name + + $doit $instcmd $src $dsttmp && + + trap "rm -f ${dsttmp}" 0 && + +# and set any options; do chmod last to preserve setuid bits + +# If any of these fail, we abort the whole thing. If we want to +# ignore errors from any of these, just make sure not to ignore +# errors from the above "$doit $instcmd $src $dsttmp" command. + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && + +# Now rename the file to the real destination. + + $doit $rmcmd -f $dstdir/$dstfile && + $doit $mvcmd $dsttmp $dstdir/$dstfile + +fi && + + +exit 0 diff --git a/autoconf/libtool/libtool.m4 b/autoconf/libtool/libtool.m4 new file mode 100644 index 00000000..d7c043f4 --- /dev/null +++ b/autoconf/libtool/libtool.m4 @@ -0,0 +1,7997 @@ +# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +m4_define([_LT_COPYING], [dnl +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +]) + +# serial 57 LT_INIT + + +# LT_PREREQ(VERSION) +# ------------------ +# Complain and exit if this libtool version is less that VERSION. +m4_defun([LT_PREREQ], +[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, + [m4_default([$3], + [m4_fatal([Libtool version $1 or higher is required], + 63)])], + [$2])]) + + +# _LT_CHECK_BUILDDIR +# ------------------ +# Complain if the absolute build directory name contains unusual characters +m4_defun([_LT_CHECK_BUILDDIR], +[case `pwd` in + *\ * | *\ *) + AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; +esac +]) + + +# LT_INIT([OPTIONS]) +# ------------------ +AC_DEFUN([LT_INIT], +[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT +AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl +AC_BEFORE([$0], [LT_LANG])dnl +AC_BEFORE([$0], [LT_OUTPUT])dnl +AC_BEFORE([$0], [LTDL_INIT])dnl +m4_require([_LT_CHECK_BUILDDIR])dnl + +dnl Autoconf doesn't catch unexpanded LT_ macros by default: +m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl +m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl +dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 +dnl unless we require an AC_DEFUNed macro: +AC_REQUIRE([LTOPTIONS_VERSION])dnl +AC_REQUIRE([LTSUGAR_VERSION])dnl +AC_REQUIRE([LTVERSION_VERSION])dnl +AC_REQUIRE([LTOBSOLETE_VERSION])dnl +m4_require([_LT_PROG_LTMAIN])dnl + +_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) + +dnl Parse OPTIONS +_LT_SET_OPTIONS([$0], [$1]) + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +AC_SUBST(LIBTOOL)dnl + +_LT_SETUP + +# Only expand once: +m4_define([LT_INIT]) +])# LT_INIT + +# Old names: +AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) +AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PROG_LIBTOOL], []) +dnl AC_DEFUN([AM_PROG_LIBTOOL], []) + + +# _LT_CC_BASENAME(CC) +# ------------------- +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +m4_defun([_LT_CC_BASENAME], +[for cc_temp in $1""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +]) + + +# _LT_FILEUTILS_DEFAULTS +# ---------------------- +# It is okay to use these file commands and assume they have been set +# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. +m4_defun([_LT_FILEUTILS_DEFAULTS], +[: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} +])# _LT_FILEUTILS_DEFAULTS + + +# _LT_SETUP +# --------- +m4_defun([_LT_SETUP], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl + +_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl +dnl +_LT_DECL([], [host_alias], [0], [The host system])dnl +_LT_DECL([], [host], [0])dnl +_LT_DECL([], [host_os], [0])dnl +dnl +_LT_DECL([], [build_alias], [0], [The build system])dnl +_LT_DECL([], [build], [0])dnl +_LT_DECL([], [build_os], [0])dnl +dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +dnl +AC_REQUIRE([AC_PROG_LN_S])dnl +test -z "$LN_S" && LN_S="ln -s" +_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl +dnl +AC_REQUIRE([LT_CMD_MAX_LEN])dnl +_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl +_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl +dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl +m4_require([_LT_CMD_RELOAD])dnl +m4_require([_LT_CHECK_MAGIC_METHOD])dnl +m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl +m4_require([_LT_CMD_OLD_ARCHIVE])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_WITH_SYSROOT])dnl + +_LT_CONFIG_LIBTOOL_INIT([ +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi +]) +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +_LT_CHECK_OBJDIR + +m4_require([_LT_TAG_COMPILER])dnl + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +_LT_CC_BASENAME([$compiler]) + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + _LT_PATH_MAGIC + fi + ;; +esac + +# Use C for the default configuration in the libtool script +LT_SUPPORTED_TAG([CC]) +_LT_LANG_C_CONFIG +_LT_LANG_DEFAULT_CONFIG +_LT_CONFIG_COMMANDS +])# _LT_SETUP + + +# _LT_PREPARE_SED_QUOTE_VARS +# -------------------------- +# Define a few sed substitution that help us do robust quoting. +m4_defun([_LT_PREPARE_SED_QUOTE_VARS], +[# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\([["`\\]]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' +]) + +# _LT_PROG_LTMAIN +# --------------- +# Note that this code is called both from `configure', and `config.status' +# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, +# `config.status' has no value for ac_aux_dir unless we are using Automake, +# so we pass a copy along to make sure it has a sensible value anyway. +m4_defun([_LT_PROG_LTMAIN], +[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl +_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) +ltmain="$ac_aux_dir/ltmain.sh" +])# _LT_PROG_LTMAIN + + +## ------------------------------------- ## +## Accumulate code for creating libtool. ## +## ------------------------------------- ## + +# So that we can recreate a full libtool script including additional +# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS +# in macros and then make a single call at the end using the `libtool' +# label. + + +# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) +# ---------------------------------------- +# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL_INIT], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_INIT], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_INIT]) + + +# _LT_CONFIG_LIBTOOL([COMMANDS]) +# ------------------------------ +# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) + + +# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) +# ----------------------------------------------------- +m4_defun([_LT_CONFIG_SAVE_COMMANDS], +[_LT_CONFIG_LIBTOOL([$1]) +_LT_CONFIG_LIBTOOL_INIT([$2]) +]) + + +# _LT_FORMAT_COMMENT([COMMENT]) +# ----------------------------- +# Add leading comment marks to the start of each line, and a trailing +# full-stop to the whole comment if one is not present already. +m4_define([_LT_FORMAT_COMMENT], +[m4_ifval([$1], [ +m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], + [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) +)]) + + + +## ------------------------ ## +## FIXME: Eliminate VARNAME ## +## ------------------------ ## + + +# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) +# ------------------------------------------------------------------- +# CONFIGNAME is the name given to the value in the libtool script. +# VARNAME is the (base) name used in the configure script. +# VALUE may be 0, 1 or 2 for a computed quote escaped value based on +# VARNAME. Any other value will be used directly. +m4_define([_LT_DECL], +[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], + [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], + [m4_ifval([$1], [$1], [$2])]) + lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) + m4_ifval([$4], + [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) + lt_dict_add_subkey([lt_decl_dict], [$2], + [tagged?], [m4_ifval([$5], [yes], [no])])]) +]) + + +# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) +# -------------------------------------------------------- +m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) + + +# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_tag_varnames], +[_lt_decl_filter([tagged?], [yes], $@)]) + + +# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) +# --------------------------------------------------------- +m4_define([_lt_decl_filter], +[m4_case([$#], + [0], [m4_fatal([$0: too few arguments: $#])], + [1], [m4_fatal([$0: too few arguments: $#: $1])], + [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], + [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], + [lt_dict_filter([lt_decl_dict], $@)])[]dnl +]) + + +# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) +# -------------------------------------------------- +m4_define([lt_decl_quote_varnames], +[_lt_decl_filter([value], [1], $@)]) + + +# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_dquote_varnames], +[_lt_decl_filter([value], [2], $@)]) + + +# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_varnames_tagged], +[m4_assert([$# <= 2])dnl +_$0(m4_quote(m4_default([$1], [[, ]])), + m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), + m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) +m4_define([_lt_decl_varnames_tagged], +[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) + + +# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_all_varnames], +[_$0(m4_quote(m4_default([$1], [[, ]])), + m4_if([$2], [], + m4_quote(lt_decl_varnames), + m4_quote(m4_shift($@))))[]dnl +]) +m4_define([_lt_decl_all_varnames], +[lt_join($@, lt_decl_varnames_tagged([$1], + lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl +]) + + +# _LT_CONFIG_STATUS_DECLARE([VARNAME]) +# ------------------------------------ +# Quote a variable value, and forward it to `config.status' so that its +# declaration there will have the same value as in `configure'. VARNAME +# must have a single quote delimited value for this to work. +m4_define([_LT_CONFIG_STATUS_DECLARE], +[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) + + +# _LT_CONFIG_STATUS_DECLARATIONS +# ------------------------------ +# We delimit libtool config variables with single quotes, so when +# we write them to config.status, we have to be sure to quote all +# embedded single quotes properly. In configure, this macro expands +# each variable declared with _LT_DECL (and _LT_TAGDECL) into: +# +# ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' +m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], +[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), + [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAGS +# ---------------- +# Output comment and list of tags supported by the script +m4_defun([_LT_LIBTOOL_TAGS], +[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl +available_tags="_LT_TAGS"dnl +]) + + +# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) +# ----------------------------------- +# Extract the dictionary values for VARNAME (optionally with TAG) and +# expand to a commented shell variable setting: +# +# # Some comment about what VAR is for. +# visible_name=$lt_internal_name +m4_define([_LT_LIBTOOL_DECLARE], +[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], + [description])))[]dnl +m4_pushdef([_libtool_name], + m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl +m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), + [0], [_libtool_name=[$]$1], + [1], [_libtool_name=$lt_[]$1], + [2], [_libtool_name=$lt_[]$1], + [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl +m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl +]) + + +# _LT_LIBTOOL_CONFIG_VARS +# ----------------------- +# Produce commented declarations of non-tagged libtool config variables +# suitable for insertion in the LIBTOOL CONFIG section of the `libtool' +# script. Tagged libtool config variables (even for the LIBTOOL CONFIG +# section) are produced by _LT_LIBTOOL_TAG_VARS. +m4_defun([_LT_LIBTOOL_CONFIG_VARS], +[m4_foreach([_lt_var], + m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAG_VARS(TAG) +# ------------------------- +m4_define([_LT_LIBTOOL_TAG_VARS], +[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) + + +# _LT_TAGVAR(VARNAME, [TAGNAME]) +# ------------------------------ +m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) + + +# _LT_CONFIG_COMMANDS +# ------------------- +# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of +# variables for single and double quote escaping we saved from calls +# to _LT_DECL, we can put quote escaped variables declarations +# into `config.status', and then the shell code to quote escape them in +# for loops in `config.status'. Finally, any additional code accumulated +# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. +m4_defun([_LT_CONFIG_COMMANDS], +[AC_PROVIDE_IFELSE([LT_OUTPUT], + dnl If the libtool generation code has been placed in $CONFIG_LT, + dnl instead of duplicating it all over again into config.status, + dnl then we will have config.status run $CONFIG_LT later, so it + dnl needs to know what name is stored there: + [AC_CONFIG_COMMANDS([libtool], + [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], + dnl If the libtool generation code is destined for config.status, + dnl expand the accumulated commands and init code now: + [AC_CONFIG_COMMANDS([libtool], + [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) +])#_LT_CONFIG_COMMANDS + + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], +[ + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +_LT_CONFIG_STATUS_DECLARATIONS +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$[]1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_quote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_dquote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +_LT_OUTPUT_LIBTOOL_INIT +]) + +# _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) +# ------------------------------------ +# Generate a child script FILE with all initialization necessary to +# reuse the environment learned by the parent script, and make the +# file executable. If COMMENT is supplied, it is inserted after the +# `#!' sequence but before initialization text begins. After this +# macro, additional text can be appended to FILE to form the body of +# the child script. The macro ends with non-zero status if the +# file could not be fully written (such as if the disk is full). +m4_ifdef([AS_INIT_GENERATED], +[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], +[m4_defun([_LT_GENERATED_FILE_INIT], +[m4_require([AS_PREPARE])]dnl +[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl +[lt_write_fail=0 +cat >$1 <<_ASEOF || lt_write_fail=1 +#! $SHELL +# Generated by $as_me. +$2 +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$1 <<\_ASEOF || lt_write_fail=1 +AS_SHELL_SANITIZE +_AS_PREPARE +exec AS_MESSAGE_FD>&1 +_ASEOF +test $lt_write_fail = 0 && chmod +x $1[]dnl +m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT + +# LT_OUTPUT +# --------- +# This macro allows early generation of the libtool script (before +# AC_OUTPUT is called), incase it is used in configure for compilation +# tests. +AC_DEFUN([LT_OUTPUT], +[: ${CONFIG_LT=./config.lt} +AC_MSG_NOTICE([creating $CONFIG_LT]) +_LT_GENERATED_FILE_INIT(["$CONFIG_LT"], +[# Run this file to recreate a libtool stub with the current configuration.]) + +cat >>"$CONFIG_LT" <<\_LTEOF +lt_cl_silent=false +exec AS_MESSAGE_LOG_FD>>config.log +{ + echo + AS_BOX([Running $as_me.]) +} >&AS_MESSAGE_LOG_FD + +lt_cl_help="\ +\`$as_me' creates a local libtool stub from the current configuration, +for use in further configure time tests before the real libtool is +generated. + +Usage: $[0] [[OPTIONS]] + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + +Report bugs to ." + +lt_cl_version="\ +m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl +m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) +configured by $[0], generated by m4_PACKAGE_STRING. + +Copyright (C) 2011 Free Software Foundation, Inc. +This config.lt script is free software; the Free Software Foundation +gives unlimited permision to copy, distribute and modify it." + +while test $[#] != 0 +do + case $[1] in + --version | --v* | -V ) + echo "$lt_cl_version"; exit 0 ;; + --help | --h* | -h ) + echo "$lt_cl_help"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --quiet | --q* | --silent | --s* | -q ) + lt_cl_silent=: ;; + + -*) AC_MSG_ERROR([unrecognized option: $[1] +Try \`$[0] --help' for more information.]) ;; + + *) AC_MSG_ERROR([unrecognized argument: $[1] +Try \`$[0] --help' for more information.]) ;; + esac + shift +done + +if $lt_cl_silent; then + exec AS_MESSAGE_FD>/dev/null +fi +_LTEOF + +cat >>"$CONFIG_LT" <<_LTEOF +_LT_OUTPUT_LIBTOOL_COMMANDS_INIT +_LTEOF + +cat >>"$CONFIG_LT" <<\_LTEOF +AC_MSG_NOTICE([creating $ofile]) +_LT_OUTPUT_LIBTOOL_COMMANDS +AS_EXIT(0) +_LTEOF +chmod +x "$CONFIG_LT" + +# configure is writing to config.log, but config.lt does its own redirection, +# appending to config.log, which fails on DOS, as config.log is still kept +# open by configure. Here we exec the FD to /dev/null, effectively closing +# config.log, so it can be properly (re)opened and appended to by config.lt. +lt_cl_success=: +test "$silent" = yes && + lt_config_lt_args="$lt_config_lt_args --quiet" +exec AS_MESSAGE_LOG_FD>/dev/null +$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false +exec AS_MESSAGE_LOG_FD>>config.log +$lt_cl_success || AS_EXIT(1) +])# LT_OUTPUT + + +# _LT_CONFIG(TAG) +# --------------- +# If TAG is the built-in tag, create an initial libtool script with a +# default configuration from the untagged config vars. Otherwise add code +# to config.status for appending the configuration named by TAG from the +# matching tagged config vars. +m4_defun([_LT_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_CONFIG_SAVE_COMMANDS([ + m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl + m4_if(_LT_TAG, [C], [ + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +_LT_COPYING +_LT_LIBTOOL_TAGS + +# ### BEGIN LIBTOOL CONFIG +_LT_LIBTOOL_CONFIG_VARS +_LT_LIBTOOL_TAG_VARS +# ### END LIBTOOL CONFIG + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + _LT_PROG_LTMAIN + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + _LT_PROG_REPLACE_SHELLFNS + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +], +[cat <<_LT_EOF >> "$ofile" + +dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded +dnl in a comment (ie after a #). +# ### BEGIN LIBTOOL TAG CONFIG: $1 +_LT_LIBTOOL_TAG_VARS(_LT_TAG) +# ### END LIBTOOL TAG CONFIG: $1 +_LT_EOF +])dnl /m4_if +], +[m4_if([$1], [], [ + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile'], []) +])dnl /_LT_CONFIG_SAVE_COMMANDS +])# _LT_CONFIG + + +# LT_SUPPORTED_TAG(TAG) +# --------------------- +# Trace this macro to discover what tags are supported by the libtool +# --tag option, using: +# autoconf --trace 'LT_SUPPORTED_TAG:$1' +AC_DEFUN([LT_SUPPORTED_TAG], []) + + +# C support is built-in for now +m4_define([_LT_LANG_C_enabled], []) +m4_define([_LT_TAGS], []) + + +# LT_LANG(LANG) +# ------------- +# Enable libtool support for the given language if not already enabled. +AC_DEFUN([LT_LANG], +[AC_BEFORE([$0], [LT_OUTPUT])dnl +m4_case([$1], + [C], [_LT_LANG(C)], + [C++], [_LT_LANG(CXX)], + [Go], [_LT_LANG(GO)], + [Java], [_LT_LANG(GCJ)], + [Fortran 77], [_LT_LANG(F77)], + [Fortran], [_LT_LANG(FC)], + [Windows Resource], [_LT_LANG(RC)], + [m4_ifdef([_LT_LANG_]$1[_CONFIG], + [_LT_LANG($1)], + [m4_fatal([$0: unsupported language: "$1"])])])dnl +])# LT_LANG + + +# _LT_LANG(LANGNAME) +# ------------------ +m4_defun([_LT_LANG], +[m4_ifdef([_LT_LANG_]$1[_enabled], [], + [LT_SUPPORTED_TAG([$1])dnl + m4_append([_LT_TAGS], [$1 ])dnl + m4_define([_LT_LANG_]$1[_enabled], [])dnl + _LT_LANG_$1_CONFIG($1)])dnl +])# _LT_LANG + + +m4_ifndef([AC_PROG_GO], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_GO. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ +m4_defun([AC_PROG_GO], +[AC_LANG_PUSH(Go)dnl +AC_ARG_VAR([GOC], [Go compiler command])dnl +AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl +_AC_ARG_VAR_LDFLAGS()dnl +AC_CHECK_TOOL(GOC, gccgo) +if test -z "$GOC"; then + if test -n "$ac_tool_prefix"; then + AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) + fi +fi +if test -z "$GOC"; then + AC_CHECK_PROG(GOC, gccgo, gccgo, false) +fi +])#m4_defun +])#m4_ifndef + + +# _LT_LANG_DEFAULT_CONFIG +# ----------------------- +m4_defun([_LT_LANG_DEFAULT_CONFIG], +[AC_PROVIDE_IFELSE([AC_PROG_CXX], + [LT_LANG(CXX)], + [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) + +AC_PROVIDE_IFELSE([AC_PROG_F77], + [LT_LANG(F77)], + [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) + +AC_PROVIDE_IFELSE([AC_PROG_FC], + [LT_LANG(FC)], + [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) + +dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal +dnl pulling things in needlessly. +AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([LT_PROG_GCJ], + [LT_LANG(GCJ)], + [m4_ifdef([AC_PROG_GCJ], + [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([A][M_PROG_GCJ], + [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([LT_PROG_GCJ], + [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) + +AC_PROVIDE_IFELSE([AC_PROG_GO], + [LT_LANG(GO)], + [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) + +AC_PROVIDE_IFELSE([LT_PROG_RC], + [LT_LANG(RC)], + [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) +])# _LT_LANG_DEFAULT_CONFIG + +# Obsolete macros: +AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) +AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) +AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) +AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) +AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_CXX], []) +dnl AC_DEFUN([AC_LIBTOOL_F77], []) +dnl AC_DEFUN([AC_LIBTOOL_FC], []) +dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) +dnl AC_DEFUN([AC_LIBTOOL_RC], []) + + +# _LT_TAG_COMPILER +# ---------------- +m4_defun([_LT_TAG_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl + +_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl +_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl +_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl +_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_TAG_COMPILER + + +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +m4_defun([_LT_COMPILER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* +])# _LT_COMPILER_BOILERPLATE + + +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +m4_defun([_LT_LINKER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* +])# _LT_LINKER_BOILERPLATE + +# _LT_REQUIRED_DARWIN_CHECKS +# ------------------------- +m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ + case $host_os in + rhapsody* | darwin*) + AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) + AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) + AC_CHECK_TOOL([LIPO], [lipo], [:]) + AC_CHECK_TOOL([OTOOL], [otool], [:]) + AC_CHECK_TOOL([OTOOL64], [otool64], [:]) + _LT_DECL([], [DSYMUTIL], [1], + [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) + _LT_DECL([], [NMEDIT], [1], + [Tool to change global to local symbols on Mac OS X]) + _LT_DECL([], [LIPO], [1], + [Tool to manipulate fat objects and archives on Mac OS X]) + _LT_DECL([], [OTOOL], [1], + [ldd/readelf like tool for Mach-O binaries on Mac OS X]) + _LT_DECL([], [OTOOL64], [1], + [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) + + AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], + [lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&AS_MESSAGE_LOG_FD + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test $_lt_result -eq 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi]) + + AC_CACHE_CHECK([for -exported_symbols_list linker flag], + [lt_cv_ld_exported_symbols_list], + [lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [lt_cv_ld_exported_symbols_list=yes], + [lt_cv_ld_exported_symbols_list=no]) + LDFLAGS="$save_LDFLAGS" + ]) + + AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], + [lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD + echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD + $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD + echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD + $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&AS_MESSAGE_LOG_FD + elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + ]) + case $host_os in + rhapsody* | darwin1.[[012]]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[[012]]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac +]) + + +# _LT_DARWIN_LINKER_FEATURES([TAG]) +# --------------------------------- +# Checks for linker and compiler features on darwin +m4_defun([_LT_DARWIN_LINKER_FEATURES], +[ + m4_require([_LT_REQUIRED_DARWIN_CHECKS]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_automatic, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], + [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='' + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + m4_if([$1], [CXX], +[ if test "$lt_cv_apple_cc_single_mod" != "yes"; then + _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi +],[]) + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi +]) + +# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) +# ---------------------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +# Store the results from the different compilers for each TAGNAME. +# Allow to override them for all tags through lt_cv_aix_libpath. +m4_defun([_LT_SYS_MODULE_PATH_AIX], +[m4_require([_LT_DECL_SED])dnl +if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], + [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ + lt_aix_libpath_sed='[ + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }]' + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi],[]) + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib" + fi + ]) + aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) +fi +])# _LT_SYS_MODULE_PATH_AIX + + +# _LT_SHELL_INIT(ARG) +# ------------------- +m4_define([_LT_SHELL_INIT], +[m4_divert_text([M4SH-INIT], [$1 +])])# _LT_SHELL_INIT + + + +# _LT_PROG_ECHO_BACKSLASH +# ----------------------- +# Find how we can fake an echo command that does not interpret backslash. +# In particular, with Autoconf 2.60 or later we add some code to the start +# of the generated configure script which will find a shell with a builtin +# printf (which we can use as an echo command). +m4_defun([_LT_PROG_ECHO_BACKSLASH], +[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +AC_MSG_CHECKING([how to print strings]) +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$[]1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + +case "$ECHO" in + printf*) AC_MSG_RESULT([printf]) ;; + print*) AC_MSG_RESULT([print -r]) ;; + *) AC_MSG_RESULT([cat]) ;; +esac + +m4_ifdef([_AS_DETECT_SUGGESTED], +[_AS_DETECT_SUGGESTED([ + test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test "X`printf %s $ECHO`" = "X$ECHO" \ + || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) + +_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) +_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) +])# _LT_PROG_ECHO_BACKSLASH + + +# _LT_WITH_SYSROOT +# ---------------- +AC_DEFUN([_LT_WITH_SYSROOT], +[AC_MSG_CHECKING([for sysroot]) +AC_ARG_WITH([sysroot], +[ --with-sysroot[=DIR] Search for dependent libraries within DIR + (or the compiler's sysroot if not specified).], +[], [with_sysroot=no]) + +dnl lt_sysroot will always be passed unquoted. We quote it here +dnl in case the user passed a directory name. +lt_sysroot= +case ${with_sysroot} in #( + yes) + if test "$GCC" = yes; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + AC_MSG_RESULT([${with_sysroot}]) + AC_MSG_ERROR([The sysroot must be an absolute path.]) + ;; +esac + + AC_MSG_RESULT([${lt_sysroot:-no}]) +_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl +[dependent libraries, and in which our libraries should be installed.])]) + +# _LT_ENABLE_LOCK +# --------------- +m4_defun([_LT_ENABLE_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AS_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + case `/usr/bin/file conftest.o` in + *x86-64*) + LD="${LD-ld} -m elf32_x86_64" + ;; + *) + LD="${LD-ld} -m elf_i386" + ;; + esac + ;; + powerpc64le-*) + LD="${LD-ld} -m elf32lppclinux" + ;; + powerpc64-*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + powerpcle-*) + LD="${LD-ld} -m elf64lppc" + ;; + powerpc-*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD="${LD-ld}_sol2" + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" +])# _LT_ENABLE_LOCK + + +# _LT_PROG_AR +# ----------- +m4_defun([_LT_PROG_AR], +[AC_CHECK_TOOLS(AR, [ar], false) +: ${AR=ar} +: ${AR_FLAGS=cru} +_LT_DECL([], [AR], [1], [The archiver]) +_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) + +AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], + [lt_cv_ar_at_file=no + AC_COMPILE_IFELSE([AC_LANG_PROGRAM], + [echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' + AC_TRY_EVAL([lt_ar_try]) + if test "$ac_status" -eq 0; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + AC_TRY_EVAL([lt_ar_try]) + if test "$ac_status" -ne 0; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + ]) + ]) + +if test "x$lt_cv_ar_at_file" = xno; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi +_LT_DECL([], [archiver_list_spec], [1], + [How to feed a file listing to the archiver]) +])# _LT_PROG_AR + + +# _LT_CMD_OLD_ARCHIVE +# ------------------- +m4_defun([_LT_CMD_OLD_ARCHIVE], +[_LT_PROG_AR + +AC_CHECK_TOOL(STRIP, strip, :) +test -z "$STRIP" && STRIP=: +_LT_DECL([], [STRIP], [1], [A symbol stripping program]) + +AC_CHECK_TOOL(RANLIB, ranlib, :) +test -z "$RANLIB" && RANLIB=: +_LT_DECL([], [RANLIB], [1], + [Commands used to install an old-style archive]) + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac +_LT_DECL([], [old_postinstall_cmds], [2]) +_LT_DECL([], [old_postuninstall_cmds], [2]) +_LT_TAGDECL([], [old_archive_cmds], [2], + [Commands used to build an old-style archive]) +_LT_DECL([], [lock_old_archive_extraction], [0], + [Whether to use a lock for old archive extraction]) +])# _LT_CMD_OLD_ARCHIVE + + +# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([_LT_COMPILER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $RM conftest* +]) + +if test x"[$]$2" = xyes; then + m4_if([$5], , :, [$5]) +else + m4_if([$6], , :, [$6]) +fi +])# _LT_COMPILER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) + + +# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------- +# Check whether the given linker option works +AC_DEFUN([_LT_LINKER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $3" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" +]) + +if test x"[$]$2" = xyes; then + m4_if([$4], , :, [$4]) +else + m4_if([$5], , :, [$5]) +fi +])# _LT_LINKER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) + + +# LT_CMD_MAX_LEN +#--------------- +AC_DEFUN([LT_CMD_MAX_LEN], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len" && \ + test undefined != "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac +]) +if test -n $lt_cv_sys_max_cmd_len ; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +max_cmd_len=$lt_cv_sys_max_cmd_len +_LT_DECL([], [max_cmd_len], [0], + [What is the maximum length of a command?]) +])# LT_CMD_MAX_LEN + +# Old name: +AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) + + +# _LT_HEADER_DLFCN +# ---------------- +m4_defun([_LT_HEADER_DLFCN], +[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl +])# _LT_HEADER_DLFCN + + +# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# ---------------------------------------------------------------- +m4_defun([_LT_TRY_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "$cross_compiling" = yes; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +[#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +}] +_LT_EOF + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_dlunknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_TRY_DLOPEN_SELF + + +# LT_SYS_DLOPEN_SELF +# ------------------ +AC_DEFUN([LT_SYS_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen="shl_load"], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen="dlopen"], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +_LT_DECL([dlopen_support], [enable_dlopen], [0], + [Whether dlopen is supported]) +_LT_DECL([dlopen_self], [enable_dlopen_self], [0], + [Whether dlopen of programs is supported]) +_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], + [Whether dlopen of statically linked programs is supported]) +])# LT_SYS_DLOPEN_SELF + +# Old name: +AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) + + +# _LT_COMPILER_C_O([TAGNAME]) +# --------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler. +# This macro does not hard code the compiler like AC_PROG_CC_C_O. +m4_defun([_LT_COMPILER_C_O], +[m4_require([_LT_DECL_SED])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . 2>&AS_MESSAGE_LOG_FD + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* +]) +_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], + [Does compiler simultaneously support -c and -o options?]) +])# _LT_COMPILER_C_O + + +# _LT_COMPILER_FILE_LOCKS([TAGNAME]) +# ---------------------------------- +# Check to see if we can do hard links to lock some files if needed +m4_defun([_LT_COMPILER_FILE_LOCKS], +[m4_require([_LT_ENABLE_LOCK])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_COMPILER_C_O([$1]) + +hard_links="nottested" +if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test "$hard_links" = no; then + AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) +])# _LT_COMPILER_FILE_LOCKS + + +# _LT_CHECK_OBJDIR +# ---------------- +m4_defun([_LT_CHECK_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +_LT_DECL([], [objdir], [0], + [The name of the directory that contains temporary libtool files])dnl +m4_pattern_allow([LT_OBJDIR])dnl +AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", + [Define to the sub-directory in which libtool stores uninstalled libraries.]) +])# _LT_CHECK_OBJDIR + + +# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) +# -------------------------------------- +# Check hardcoding attributes. +m4_defun([_LT_LINKER_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || + test -n "$_LT_TAGVAR(runpath_var, $1)" || + test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && + test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then + # Linking always hardcodes the temporary library directory. + _LT_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) + +if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || + test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi +_LT_TAGDECL([], [hardcode_action], [0], + [How to hardcode a shared library path into an executable]) +])# _LT_LINKER_HARDCODE_LIBPATH + + +# _LT_CMD_STRIPLIB +# ---------------- +m4_defun([_LT_CMD_STRIPLIB], +[m4_require([_LT_DECL_EGREP]) +striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) +_LT_DECL([], [striplib], [1]) +])# _LT_CMD_STRIPLIB + + +# _LT_SYS_DYNAMIC_LINKER([TAG]) +# ----------------------------- +# PORTME Fill in your ld.so characteristics +m4_defun([_LT_SYS_DYNAMIC_LINKER], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_OBJDUMP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +AC_MSG_CHECKING([dynamic linker characteristics]) +m4_if([$1], + [], [ +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq="s,=\([[A-Za-z]]:\),\1,g" ;; + *) lt_sed_strip_eq="s,=/,/,g" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[[lt_foo]]++; } + if (lt_freq[[lt_foo]] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's,/\([[A-Za-z]]:\),\1,g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[[4-9]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[[23]].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ + freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[[3-9]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], + [lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ + LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], + [lt_cv_shlibpath_overrides_runpath=yes])]) + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + ]) + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[[89]] | openbsd2.[[89]].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + +_LT_DECL([], [variables_saved_for_relink], [1], + [Variables whose values should be saved in libtool wrapper scripts and + restored at link time]) +_LT_DECL([], [need_lib_prefix], [0], + [Do we need the "lib" prefix for modules?]) +_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) +_LT_DECL([], [version_type], [0], [Library versioning type]) +_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) +_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) +_LT_DECL([], [shlibpath_overrides_runpath], [0], + [Is shlibpath searched before the hard-coded library search path?]) +_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) +_LT_DECL([], [library_names_spec], [1], + [[List of archive names. First name is the real one, the rest are links. + The last name is the one that the linker finds with -lNAME]]) +_LT_DECL([], [soname_spec], [1], + [[The coded name of the library, if different from the real name]]) +_LT_DECL([], [install_override_mode], [1], + [Permission mode override for installation of shared libraries]) +_LT_DECL([], [postinstall_cmds], [2], + [Command to use after installation of a shared archive]) +_LT_DECL([], [postuninstall_cmds], [2], + [Command to use after uninstallation of a shared archive]) +_LT_DECL([], [finish_cmds], [2], + [Commands used to finish a libtool library installation in a directory]) +_LT_DECL([], [finish_eval], [1], + [[As "finish_cmds", except a single script fragment to be evaled but + not shown]]) +_LT_DECL([], [hardcode_into_libs], [0], + [Whether we should hardcode library paths into libraries]) +_LT_DECL([], [sys_lib_search_path_spec], [2], + [Compile-time system search path for libraries]) +_LT_DECL([], [sys_lib_dlsearch_path_spec], [2], + [Run-time system search path for libraries]) +])# _LT_SYS_DYNAMIC_LINKER + + +# _LT_PATH_TOOL_PREFIX(TOOL) +# -------------------------- +# find a file program which can recognize shared library +AC_DEFUN([_LT_PATH_TOOL_PREFIX], +[m4_require([_LT_DECL_EGREP])dnl +AC_MSG_CHECKING([for $1]) +AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +[case $MAGIC_CMD in +[[\\/*] | ?:[\\/]*]) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR +dnl $ac_dummy forces splitting on constant user-supplied paths. +dnl POSIX.2 word splitting is done only on the output of word expansions, +dnl not every word. This closes a longstanding sh security hole. + ac_dummy="m4_if([$2], , $PATH, [$2])" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/$1; then + lt_cv_path_MAGIC_CMD="$ac_dir/$1" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac]) +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + AC_MSG_RESULT($MAGIC_CMD) +else + AC_MSG_RESULT(no) +fi +_LT_DECL([], [MAGIC_CMD], [0], + [Used to examine libraries when file_magic_cmd begins with "file"])dnl +])# _LT_PATH_TOOL_PREFIX + +# Old name: +AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) + + +# _LT_PATH_MAGIC +# -------------- +# find a file program which can recognize a shared library +m4_defun([_LT_PATH_MAGIC], +[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + else + MAGIC_CMD=: + fi +fi +])# _LT_PATH_MAGIC + + +# LT_PATH_LD +# ---------- +# find the pathname to the GNU or non-GNU linker +AC_DEFUN([LT_PATH_LD], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PROG_ECHO_BACKSLASH])dnl + +AC_ARG_WITH([gnu-ld], + [AS_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test "$withval" = no || with_gnu_ld=yes], + [with_gnu_ld=no])dnl + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by $CC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(lt_cv_path_LD, +[if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[[3-9]]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +]) + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + +_LT_DECL([], [deplibs_check_method], [1], + [Method to check whether dependent libraries are shared objects]) +_LT_DECL([], [file_magic_cmd], [1], + [Command to use when deplibs_check_method = "file_magic"]) +_LT_DECL([], [file_magic_glob], [1], + [How to find potential files when deplibs_check_method = "file_magic"]) +_LT_DECL([], [want_nocaseglob], [1], + [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) +])# _LT_CHECK_MAGIC_METHOD + + +# LT_PATH_NM +# ---------- +# find the pathname to a BSD- or MS-compatible name lister +AC_DEFUN([LT_PATH_NM], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, +[if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} +fi]) +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) + case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols" + ;; + *) + DUMPBIN=: + ;; + esac + fi + AC_SUBST([DUMPBIN]) + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm +AC_SUBST([NM]) +_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl + +AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], + [lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) + cat conftest.out >&AS_MESSAGE_LOG_FD + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest*]) +])# LT_PATH_NM + +# Old names: +AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) +AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_PROG_NM], []) +dnl AC_DEFUN([AC_PROG_NM], []) + +# _LT_CHECK_SHAREDLIB_FROM_LINKLIB +# -------------------------------- +# how to determine the name of the shared library +# associated with a specific link library. +# -- PORTME fill in with the dynamic library characteristics +m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], +[m4_require([_LT_DECL_EGREP]) +m4_require([_LT_DECL_OBJDUMP]) +m4_require([_LT_DECL_DLLTOOL]) +AC_CACHE_CHECK([how to associate runtime and link libraries], +lt_cv_sharedlib_from_linklib_cmd, +[lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh + # decide which to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd="$ECHO" + ;; +esac +]) +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + +_LT_DECL([], [sharedlib_from_linklib_cmd], [1], + [Command to associate shared and link libraries]) +])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB + + +# _LT_PATH_MANIFEST_TOOL +# ---------------------- +# locate the manifest tool +m4_defun([_LT_PATH_MANIFEST_TOOL], +[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], + [lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&AS_MESSAGE_LOG_FD + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest*]) +if test "x$lt_cv_path_mainfest_tool" != xyes; then + MANIFEST_TOOL=: +fi +_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl +])# _LT_PATH_MANIFEST_TOOL + + +# LT_LIB_M +# -------- +# check for math library +AC_DEFUN([LT_LIB_M], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +LIBM= +case $host in +*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) + # These system don't have libm, or don't need it + ;; +*-ncr-sysv4.3*) + AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") + AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") + ;; +*) + AC_CHECK_LIB(m, cos, LIBM="-lm") + ;; +esac +AC_SUBST([LIBM]) +])# LT_LIB_M + +# Old name: +AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_CHECK_LIBM], []) + + +# _LT_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------- +m4_defun([_LT_COMPILER_NO_RTTI], +[m4_require([_LT_TAG_COMPILER])dnl + +_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + +if test "$GCC" = yes; then + case $cc_basename in + nvcc*) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; + *) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; + esac + + _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +fi +_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], + [Compiler flag to turn off builtin functions]) +])# _LT_COMPILER_NO_RTTI + + +# _LT_CMD_GLOBAL_SYMBOLS +# ---------------------- +m4_defun([_LT_CMD_GLOBAL_SYMBOLS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([LT_PATH_NM])dnl +AC_REQUIRE([LT_PATH_LD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_TAG_COMPILER])dnl + +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[[BCDT]]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[[ABCDEGRST]]' + fi + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris*) + symcode='[[BDRT]]' + ;; +sco3.2v5*) + symcode='[[DT]]' + ;; +sysv4.2uw2*) + symcode='[[DT]]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[[ABDT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK ['"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx]" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if AC_TRY_EVAL(ac_compile); then + # Now try to grab the symbols. + nlist=conftest.nm + if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT@&t@_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT@&t@_DLSYM_CONST +#else +# define LT@&t@_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT@&t@_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[[]] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) +else + AC_MSG_RESULT(ok) +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + +_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], + [Take the output of nm and produce a listing of raw symbols and C names]) +_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], + [Transform the output of nm in a proper C declaration]) +_LT_DECL([global_symbol_to_c_name_address], + [lt_cv_sys_global_symbol_to_c_name_address], [1], + [Transform the output of nm in a C name address pair]) +_LT_DECL([global_symbol_to_c_name_address_lib_prefix], + [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], + [Transform the output of nm in a C name address pair when lib prefix is needed]) +_LT_DECL([], [nm_file_list_spec], [1], + [Specify filename containing input files for $NM]) +]) # _LT_CMD_GLOBAL_SYMBOLS + + +# _LT_COMPILER_PIC([TAGNAME]) +# --------------------------- +m4_defun([_LT_COMPILER_PIC], +[m4_require([_LT_TAG_COMPILER])dnl +_LT_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_TAGVAR(lt_prog_compiler_static, $1)= + +m4_if([$1], [CXX], [ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + case $host_os in + aix[[4-9]]*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + dgux*) + case $cc_basename in + ec++*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64 which still supported -KPIC. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd* | netbsdelf*-gnu) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +], +[ + if test "$GCC" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' + if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + hpux9* | hpux10* | hpux11*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' + _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' + ;; + nagfor*) + # NAG Fortran compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + ccc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='' + ;; + *Sun\ F* | *Sun*Fortran*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + *Intel*\ [[CF]]*Compiler*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + *Portland\ Group*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + rdos*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + solaris*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; + + sunos4*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + unicos*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + + uts4*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +]) +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" + ;; +esac + +AC_CACHE_CHECK([for $compiler option to produce PIC], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) +_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], + [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], + [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], + [Additional compiler flags for building library objects]) + +_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], + [How to pass a linker flag through the compiler]) +# +# Check to make sure the static flag actually works. +# +wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" +_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], + _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), + $lt_tmp_static_flag, + [], + [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) +_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], + [Compiler flag to prevent dynamic linking]) +])# _LT_COMPILER_PIC + + +# _LT_LINKER_SHLIBS([TAGNAME]) +# ---------------------------- +# See if the linker supports building shared libraries. +m4_defun([_LT_LINKER_SHLIBS], +[AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +m4_if([$1], [CXX], [ + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + case $host_os in + aix[[4-9]]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global defined + # symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + ;; + esac + ;; + linux* | k*bsd*-gnu | gnu*) + _LT_TAGVAR(link_all_deplibs, $1)=no + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac +], [ + runpath_var= + _LT_TAGVAR(allow_undefined_flag, $1)= + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(archive_cmds, $1)= + _LT_TAGVAR(archive_expsym_cmds, $1)= + _LT_TAGVAR(compiler_needs_object, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(hardcode_automatic, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_separator, $1)= + _LT_TAGVAR(hardcode_minus_L, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_TAGVAR(inherit_rpath, $1)=no + _LT_TAGVAR(link_all_deplibs, $1)=unknown + _LT_TAGVAR(module_cmds, $1)= + _LT_TAGVAR(module_expsym_cmds, $1)= + _LT_TAGVAR(old_archive_from_new_cmds, $1)= + _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_TAGVAR(thread_safe_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. +dnl Note also adjust exclude_expsyms for C++ above. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + linux* | k*bsd*-gnu | gnu*) + _LT_TAGVAR(link_all_deplibs, $1)=no + ;; + esac + + _LT_TAGVAR(ld_shlibs, $1)=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test "$with_gnu_ld" = yes; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; + *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test "$lt_use_gnu_ld_interface" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[[3-9]]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + _LT_TAGVAR(whole_archive_flag_spec, $1)= + tmp_sharedflag='--shared' ;; + xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + sunos4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + + if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then + runpath_var= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global + # defined symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GCC" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + _LT_TAGVAR(link_all_deplibs, $1)=no + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + bsdi[[45]]*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + esac + ;; + + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + hpux9*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + m4_if($1, [], [ + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + _LT_LINKER_OPTION([if $CC understands -b], + _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], + [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) + ;; + esac + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], + [lt_cv_irix_exported_symbol], + [save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + AC_LINK_IFELSE( + [AC_LANG_SOURCE( + [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], + [C++], [[int foo (void) { return 0; }]], + [Fortran 77], [[ + subroutine foo + end]], + [Fortran], [[ + subroutine foo + end]])])], + [lt_cv_irix_exported_symbol=yes], + [lt_cv_irix_exported_symbol=no]) + LDFLAGS="$save_LDFLAGS"]) + if test "$lt_cv_irix_exported_symbol" = yes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + fi + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + newsos6) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *nto* | *qnx*) + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + else + case $host_os in + openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + ;; + esac + fi + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + os2*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + solaris*) + _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + fi + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4) + case $host_vendor in + sni) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4.3*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_TAGVAR(ld_shlibs, $1)=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' + ;; + esac + fi + fi +]) +AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) +test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld + +_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl +_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl +_LT_DECL([], [extract_expsyms_cmds], [2], + [The commands to extract the exported symbol list from a shared archive]) + +# +# Do we need to explicitly link libc? +# +case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $_LT_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_CACHE_CHECK([whether -lc should be explicitly linked in], + [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), + [$RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) + pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) + _LT_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) + then + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no + else + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + ]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) + ;; + esac + fi + ;; +esac + +_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], + [Whether or not to add -lc for building shared libraries]) +_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], + [enable_shared_with_static_runtimes], [0], + [Whether or not to disallow shared libs when runtime libs are static]) +_LT_TAGDECL([], [export_dynamic_flag_spec], [1], + [Compiler flag to allow reflexive dlopens]) +_LT_TAGDECL([], [whole_archive_flag_spec], [1], + [Compiler flag to generate shared objects directly from archives]) +_LT_TAGDECL([], [compiler_needs_object], [1], + [Whether the compiler copes with passing no objects directly]) +_LT_TAGDECL([], [old_archive_from_new_cmds], [2], + [Create an old-style archive from a shared archive]) +_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], + [Create a temporary old-style archive to link instead of a shared archive]) +_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) +_LT_TAGDECL([], [archive_expsym_cmds], [2]) +_LT_TAGDECL([], [module_cmds], [2], + [Commands used to build a loadable module if different from building + a shared archive.]) +_LT_TAGDECL([], [module_expsym_cmds], [2]) +_LT_TAGDECL([], [with_gnu_ld], [1], + [Whether we are building with GNU ld or not]) +_LT_TAGDECL([], [allow_undefined_flag], [1], + [Flag that allows shared libraries with undefined symbols to be built]) +_LT_TAGDECL([], [no_undefined_flag], [1], + [Flag that enforces no undefined symbols]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], + [Flag to hardcode $libdir into a binary during linking. + This must work even if $libdir does not exist]) +_LT_TAGDECL([], [hardcode_libdir_separator], [1], + [Whether we need a single "-rpath" flag with a separated argument]) +_LT_TAGDECL([], [hardcode_direct], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary]) +_LT_TAGDECL([], [hardcode_direct_absolute], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary and the resulting library dependency is + "absolute", i.e impossible to change by setting ${shlibpath_var} if the + library is relocated]) +_LT_TAGDECL([], [hardcode_minus_L], [0], + [Set to "yes" if using the -LDIR flag during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_shlibpath_var], [0], + [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_automatic], [0], + [Set to "yes" if building a shared library automatically hardcodes DIR + into the library and all subsequent libraries and executables linked + against it]) +_LT_TAGDECL([], [inherit_rpath], [0], + [Set to yes if linker adds runtime paths of dependent libraries + to runtime path list]) +_LT_TAGDECL([], [link_all_deplibs], [0], + [Whether libtool must link a program against all its dependency libraries]) +_LT_TAGDECL([], [always_export_symbols], [0], + [Set to "yes" if exported symbols are required]) +_LT_TAGDECL([], [export_symbols_cmds], [2], + [The commands to list exported symbols]) +_LT_TAGDECL([], [exclude_expsyms], [1], + [Symbols that should not be listed in the preloaded symbols]) +_LT_TAGDECL([], [include_expsyms], [1], + [Symbols that must always be exported]) +_LT_TAGDECL([], [prelink_cmds], [2], + [Commands necessary for linking programs (against libraries) with templates]) +_LT_TAGDECL([], [postlink_cmds], [2], + [Commands necessary for finishing linking programs]) +_LT_TAGDECL([], [file_list_spec], [1], + [Specify filename containing input files]) +dnl FIXME: Not yet implemented +dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], +dnl [Compiler flag to generate thread safe objects]) +])# _LT_LINKER_SHLIBS + + +# _LT_LANG_C_CONFIG([TAG]) +# ------------------------ +# Ensure that the configuration variables for a C compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_C_CONFIG], +[m4_require([_LT_DECL_EGREP])dnl +lt_save_CC="$CC" +AC_LANG_PUSH(C) + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + +_LT_TAG_COMPILER +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + LT_SYS_DLOPEN_SELF + _LT_CMD_STRIPLIB + + # Report which library types will actually be built + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_CONFIG($1) +fi +AC_LANG_POP +CC="$lt_save_CC" +])# _LT_LANG_C_CONFIG + + +# _LT_LANG_CXX_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a C++ compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_CXX_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_PROG_CXXCPP +else + _lt_caught_CXX_error=yes +fi + +AC_LANG_PUSH(C++) +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(compiler_needs_object, $1)=no +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + else + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + fi + + if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + LT_PATH_LD + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) + _LT_TAGVAR(ld_shlibs, $1)=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GXX" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared + # libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + freebsd-elf*) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + hpux9*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + esac + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) + _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + openbsd2*) + # C++ shared libraries are fairly broken + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + case $host in + osf3*) + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + ;; + *) + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ + $RM $lib.exp' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + fi + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ + '"$_LT_TAGVAR(old_archive_cmds, $1)" + _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ + '"$_LT_TAGVAR(reload_cmds, $1)" + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) + test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + + _LT_TAGVAR(GCC, $1)="$GXX" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test "$_lt_caught_CXX_error" != yes + +AC_LANG_POP +])# _LT_LANG_CXX_CONFIG + + +# _LT_FUNC_STRIPNAME_CNF +# ---------------------- +# func_stripname_cnf prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# +# This function is identical to the (non-XSI) version of func_stripname, +# except this one can be used by m4 code that may be executed by configure, +# rather than the libtool script. +m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl +AC_REQUIRE([_LT_DECL_SED]) +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) +func_stripname_cnf () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname_cnf +])# _LT_FUNC_STRIPNAME_CNF + +# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) +# --------------------------------- +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +m4_defun([_LT_SYS_HIDDEN_LIBDEPS], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl +# Dependencies to place before and after the object being linked: +_LT_TAGVAR(predep_objects, $1)= +_LT_TAGVAR(postdep_objects, $1)= +_LT_TAGVAR(predeps, $1)= +_LT_TAGVAR(postdeps, $1)= +_LT_TAGVAR(compiler_lib_search_path, $1)= + +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF +int a; +void foo (void) { a = 0; } +_LT_EOF +], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF +], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer*4 a + a=0 + return + end +_LT_EOF +], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer a + a=0 + return + end +_LT_EOF +], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF +public class foo { + private int a; + public void bar (void) { + a = 0; + } +}; +_LT_EOF +], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF +package foo +func foo() { +} +_LT_EOF +]) + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac + +dnl Parse the compiler output and extract the necessary +dnl objects, libraries and library flags. +if AC_TRY_EVAL(ac_compile); then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case ${prev}${p} in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" || + test $p = "-R"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test "$pre_test_object_deps_done" = no; then + case ${prev} in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then + _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" + else + _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$_LT_TAGVAR(postdeps, $1)"; then + _LT_TAGVAR(postdeps, $1)="${prev}${p}" + else + _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$_LT_TAGVAR(predep_objects, $1)"; then + _LT_TAGVAR(predep_objects, $1)="$p" + else + _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" + fi + else + if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then + _LT_TAGVAR(postdep_objects, $1)="$p" + else + _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling $1 test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +m4_if([$1], [CXX], +[case $host_os in +interix[[3-9]]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + _LT_TAGVAR(predep_objects,$1)= + _LT_TAGVAR(postdep_objects,$1)= + _LT_TAGVAR(postdeps,$1)= + ;; + +linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; + +solaris*) + case $cc_basename in + CC* | sunCC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; +esac +]) + +case " $_LT_TAGVAR(postdeps, $1) " in +*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; +esac + _LT_TAGVAR(compiler_lib_search_dirs, $1)= +if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then + _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi +_LT_TAGDECL([], [compiler_lib_search_dirs], [1], + [The directories searched by this compiler when creating a shared library]) +_LT_TAGDECL([], [predep_objects], [1], + [Dependencies to place before and after the objects being linked to + create a shared library]) +_LT_TAGDECL([], [postdep_objects], [1]) +_LT_TAGDECL([], [predeps], [1]) +_LT_TAGDECL([], [postdeps], [1]) +_LT_TAGDECL([], [compiler_lib_search_path], [1], + [The library search path used internally by the compiler when linking + a shared library]) +])# _LT_SYS_HIDDEN_LIBDEPS + + +# _LT_LANG_F77_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a Fortran 77 compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_F77_CONFIG], +[AC_LANG_PUSH(Fortran 77) +if test -z "$F77" || test "X$F77" = "Xno"; then + _lt_disable_F77=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the F77 compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_F77" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${F77-"f77"} + CFLAGS=$FFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + GCC=$G77 + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$G77" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC="$lt_save_CC" + CFLAGS="$lt_save_CFLAGS" +fi # test "$_lt_disable_F77" != yes + +AC_LANG_POP +])# _LT_LANG_F77_CONFIG + + +# _LT_LANG_FC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for a Fortran compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_FC_CONFIG], +[AC_LANG_PUSH(Fortran) + +if test -z "$FC" || test "X$FC" = "Xno"; then + _lt_disable_FC=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for fc test sources. +ac_ext=${ac_fc_srcext-f} + +# Object file extension for compiled fc test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the FC compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_FC" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${FC-"f95"} + CFLAGS=$FCFLAGS + compiler=$CC + GCC=$ac_cv_fc_compiler_gnu + + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS +fi # test "$_lt_disable_FC" != yes + +AC_LANG_POP +])# _LT_LANG_FC_CONFIG + + +# _LT_LANG_GCJ_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Java Compiler compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_GCJ_CONFIG], +[AC_REQUIRE([LT_PROG_GCJ])dnl +AC_LANG_SAVE + +# Source file extension for Java test sources. +ac_ext=java + +# Object file extension for compiled Java test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}" + +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC=yes +CC=${GCJ-"gcj"} +CFLAGS=$GCJFLAGS +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)="$LD" +_LT_CC_BASENAME([$compiler]) + +# GCJ did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_GCJ_CONFIG + + +# _LT_LANG_GO_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Go compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_GO_CONFIG], +[AC_REQUIRE([LT_PROG_GO])dnl +AC_LANG_SAVE + +# Source file extension for Go test sources. +ac_ext=go + +# Object file extension for compiled Go test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="package main; func main() { }" + +# Code to be used in simple link tests +lt_simple_link_test_code='package main; func main() { }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC=yes +CC=${GOC-"gccgo"} +CFLAGS=$GOFLAGS +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)="$LD" +_LT_CC_BASENAME([$compiler]) + +# Go did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_GO_CONFIG + + +# _LT_LANG_RC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for the Windows resource compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_RC_CONFIG], +[AC_REQUIRE([LT_PROG_RC])dnl +AC_LANG_SAVE + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' + +# Code to be used in simple link tests +lt_simple_link_test_code="$lt_simple_compile_test_code" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC= +CC=${RC-"windres"} +CFLAGS= +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) +_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + +if test -n "$compiler"; then + : + _LT_CONFIG($1) +fi + +GCC=$lt_save_GCC +AC_LANG_RESTORE +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_RC_CONFIG + + +# LT_PROG_GCJ +# ----------- +AC_DEFUN([LT_PROG_GCJ], +[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], + [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], + [AC_CHECK_TOOL(GCJ, gcj,) + test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS)])])[]dnl +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_GCJ], []) + + +# LT_PROG_GO +# ---------- +AC_DEFUN([LT_PROG_GO], +[AC_CHECK_TOOL(GOC, gccgo,) +]) + + +# LT_PROG_RC +# ---------- +AC_DEFUN([LT_PROG_RC], +[AC_CHECK_TOOL(RC, windres,) +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_RC], []) + + +# _LT_DECL_EGREP +# -------------- +# If we don't have a new enough Autoconf to choose the best grep +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_EGREP], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_REQUIRE([AC_PROG_FGREP])dnl +test -z "$GREP" && GREP=grep +_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) +_LT_DECL([], [EGREP], [1], [An ERE matcher]) +_LT_DECL([], [FGREP], [1], [A literal string matcher]) +dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too +AC_SUBST([GREP]) +]) + + +# _LT_DECL_OBJDUMP +# -------------- +# If we don't have a new enough Autoconf to choose the best objdump +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_OBJDUMP], +[AC_CHECK_TOOL(OBJDUMP, objdump, false) +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) +AC_SUBST([OBJDUMP]) +]) + +# _LT_DECL_DLLTOOL +# ---------------- +# Ensure DLLTOOL variable is set. +m4_defun([_LT_DECL_DLLTOOL], +[AC_CHECK_TOOL(DLLTOOL, dlltool, false) +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) +AC_SUBST([DLLTOOL]) +]) + +# _LT_DECL_SED +# ------------ +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +m4_defun([_LT_DECL_SED], +[AC_PROG_SED +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" +_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) +_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], + [Sed that helps us avoid accidentally triggering echo(1) options like -n]) +])# _LT_DECL_SED + +m4_ifndef([AC_PROG_SED], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ + +m4_defun([AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +IFS=$as_save_IFS +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_SUBST([SED]) +AC_MSG_RESULT([$SED]) +])#AC_PROG_SED +])#m4_ifndef + +# Old name: +AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_SED], []) + + +# _LT_CHECK_SHELL_FEATURES +# ------------------------ +# Find out whether the shell is Bourne or XSI compatible, +# or has some other useful features. +m4_defun([_LT_CHECK_SHELL_FEATURES], +[AC_MSG_CHECKING([whether the shell understands some XSI constructs]) +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +AC_MSG_RESULT([$xsi_shell]) +_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) + +AC_MSG_CHECKING([whether the shell understands "+="]) +lt_shell_append=no +( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +AC_MSG_RESULT([$lt_shell_append]) +_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi +_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac +_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl +_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl +])# _LT_CHECK_SHELL_FEATURES + + +# _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY) +# ------------------------------------------------------ +# In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and +# '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY. +m4_defun([_LT_PROG_FUNCTION_REPLACE], +[dnl { +sed -e '/^$1 ()$/,/^} # $1 /c\ +$1 ()\ +{\ +m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1]) +} # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: +]) + + +# _LT_PROG_REPLACE_SHELLFNS +# ------------------------- +# Replace existing portable implementations of several shell functions with +# equivalent extended shell implementations where those features are available.. +m4_defun([_LT_PROG_REPLACE_SHELLFNS], +[if test x"$xsi_shell" = xyes; then + _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac]) + + _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl + func_basename_result="${1##*/}"]) + + _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac + func_basename_result="${1##*/}"]) + + _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary parameter first. + func_stripname_result=${3} + func_stripname_result=${func_stripname_result#"${1}"} + func_stripname_result=${func_stripname_result%"${2}"}]) + + _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl + func_split_long_opt_name=${1%%=*} + func_split_long_opt_arg=${1#*=}]) + + _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"}]) + + _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl + case ${1} in + *.lo) func_lo2o_result=${1%.lo}.${objext} ;; + *) func_lo2o_result=${1} ;; + esac]) + + _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo]) + + _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))]) + + _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}]) +fi + +if test x"$lt_shell_append" = xyes; then + _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"]) + + _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl + func_quote_for_eval "${2}" +dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \ + eval "${1}+=\\\\ \\$func_quote_for_eval_result"]) + + # Save a `func_append' function call where possible by direct use of '+=' + sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +else + # Save a `func_append' function call even when '+=' is not available + sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +fi + +if test x"$_lt_function_replace_fail" = x":"; then + AC_MSG_WARN([Unable to substitute extended shell functions in $ofile]) +fi +]) + +# _LT_PATH_CONVERSION_FUNCTIONS +# ----------------------------- +# Determine which file name conversion functions should be used by +# func_to_host_file (and, implicitly, by func_to_host_path). These are needed +# for certain cross-compile configurations and native mingw. +m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_MSG_CHECKING([how to convert $build file names to $host format]) +AC_CACHE_VAL(lt_cv_to_host_file_cmd, +[case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac +]) +to_host_file_cmd=$lt_cv_to_host_file_cmd +AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) +_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], + [0], [convert $build file names to $host format])dnl + +AC_MSG_CHECKING([how to convert $build file names to toolchain format]) +AC_CACHE_VAL(lt_cv_to_tool_file_cmd, +[#assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac +]) +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) +_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], + [0], [convert $build files to toolchain format])dnl +])# _LT_PATH_CONVERSION_FUNCTIONS diff --git a/autoconf/libtool/ltdl.m4 b/autoconf/libtool/ltdl.m4 new file mode 100644 index 00000000..1ef84a50 --- /dev/null +++ b/autoconf/libtool/ltdl.m4 @@ -0,0 +1,817 @@ +# ltdl.m4 - Configure ltdl for the target system. -*-Autoconf-*- +# +# Copyright (C) 1999-2006, 2007, 2008, 2011 Free Software Foundation, Inc. +# Written by Thomas Tanner, 1999 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 18 LTDL_INIT + +# LT_CONFIG_LTDL_DIR(DIRECTORY, [LTDL-MODE]) +# ------------------------------------------ +# DIRECTORY contains the libltdl sources. It is okay to call this +# function multiple times, as long as the same DIRECTORY is always given. +AC_DEFUN([LT_CONFIG_LTDL_DIR], +[AC_BEFORE([$0], [LTDL_INIT]) +_$0($*) +])# LT_CONFIG_LTDL_DIR + +# We break this out into a separate macro, so that we can call it safely +# internally without being caught accidentally by the sed scan in libtoolize. +m4_defun([_LT_CONFIG_LTDL_DIR], +[dnl remove trailing slashes +m4_pushdef([_ARG_DIR], m4_bpatsubst([$1], [/*$])) +m4_case(_LTDL_DIR, + [], [dnl only set lt_ltdl_dir if _ARG_DIR is not simply `.' + m4_if(_ARG_DIR, [.], + [], + [m4_define([_LTDL_DIR], _ARG_DIR) + _LT_SHELL_INIT([lt_ltdl_dir=']_ARG_DIR['])])], + [m4_if(_ARG_DIR, _LTDL_DIR, + [], + [m4_fatal([multiple libltdl directories: `]_LTDL_DIR[', `]_ARG_DIR['])])]) +m4_popdef([_ARG_DIR]) +])# _LT_CONFIG_LTDL_DIR + +# Initialise: +m4_define([_LTDL_DIR], []) + + +# _LT_BUILD_PREFIX +# ---------------- +# If Autoconf is new enough, expand to `${top_build_prefix}', otherwise +# to `${top_builddir}/'. +m4_define([_LT_BUILD_PREFIX], +[m4_ifdef([AC_AUTOCONF_VERSION], + [m4_if(m4_version_compare(m4_defn([AC_AUTOCONF_VERSION]), [2.62]), + [-1], [m4_ifdef([_AC_HAVE_TOP_BUILD_PREFIX], + [${top_build_prefix}], + [${top_builddir}/])], + [${top_build_prefix}])], + [${top_builddir}/])[]dnl +]) + + +# LTDL_CONVENIENCE +# ---------------- +# sets LIBLTDL to the link flags for the libltdl convenience library and +# LTDLINCL to the include flags for the libltdl header and adds +# --enable-ltdl-convenience to the configure arguments. Note that +# AC_CONFIG_SUBDIRS is not called here. LIBLTDL will be prefixed with +# '${top_build_prefix}' if available, otherwise with '${top_builddir}/', +# and LTDLINCL will be prefixed with '${top_srcdir}/' (note the single +# quotes!). If your package is not flat and you're not using automake, +# define top_build_prefix, top_builddir, and top_srcdir appropriately +# in your Makefiles. +AC_DEFUN([LTDL_CONVENIENCE], +[AC_BEFORE([$0], [LTDL_INIT])dnl +dnl Although the argument is deprecated and no longer documented, +dnl LTDL_CONVENIENCE used to take a DIRECTORY orgument, if we have one +dnl here make sure it is the same as any other declaration of libltdl's +dnl location! This also ensures lt_ltdl_dir is set when configure.ac is +dnl not yet using an explicit LT_CONFIG_LTDL_DIR. +m4_ifval([$1], [_LT_CONFIG_LTDL_DIR([$1])])dnl +_$0() +])# LTDL_CONVENIENCE + +# AC_LIBLTDL_CONVENIENCE accepted a directory argument in older libtools, +# now we have LT_CONFIG_LTDL_DIR: +AU_DEFUN([AC_LIBLTDL_CONVENIENCE], +[_LT_CONFIG_LTDL_DIR([m4_default([$1], [libltdl])]) +_LTDL_CONVENIENCE]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBLTDL_CONVENIENCE], []) + + +# _LTDL_CONVENIENCE +# ----------------- +# Code shared by LTDL_CONVENIENCE and LTDL_INIT([convenience]). +m4_defun([_LTDL_CONVENIENCE], +[case $enable_ltdl_convenience in + no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;; + "") enable_ltdl_convenience=yes + ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;; +esac +LIBLTDL='_LT_BUILD_PREFIX'"${lt_ltdl_dir+$lt_ltdl_dir/}libltdlc.la" +LTDLDEPS=$LIBLTDL +LTDLINCL='-I${top_srcdir}'"${lt_ltdl_dir+/$lt_ltdl_dir}" + +AC_SUBST([LIBLTDL]) +AC_SUBST([LTDLDEPS]) +AC_SUBST([LTDLINCL]) + +# For backwards non-gettext consistent compatibility... +INCLTDL="$LTDLINCL" +AC_SUBST([INCLTDL]) +])# _LTDL_CONVENIENCE + + +# LTDL_INSTALLABLE +# ---------------- +# sets LIBLTDL to the link flags for the libltdl installable library +# and LTDLINCL to the include flags for the libltdl header and adds +# --enable-ltdl-install to the configure arguments. Note that +# AC_CONFIG_SUBDIRS is not called from here. If an installed libltdl +# is not found, LIBLTDL will be prefixed with '${top_build_prefix}' if +# available, otherwise with '${top_builddir}/', and LTDLINCL will be +# prefixed with '${top_srcdir}/' (note the single quotes!). If your +# package is not flat and you're not using automake, define top_build_prefix, +# top_builddir, and top_srcdir appropriately in your Makefiles. +# In the future, this macro may have to be called after LT_INIT. +AC_DEFUN([LTDL_INSTALLABLE], +[AC_BEFORE([$0], [LTDL_INIT])dnl +dnl Although the argument is deprecated and no longer documented, +dnl LTDL_INSTALLABLE used to take a DIRECTORY orgument, if we have one +dnl here make sure it is the same as any other declaration of libltdl's +dnl location! This also ensures lt_ltdl_dir is set when configure.ac is +dnl not yet using an explicit LT_CONFIG_LTDL_DIR. +m4_ifval([$1], [_LT_CONFIG_LTDL_DIR([$1])])dnl +_$0() +])# LTDL_INSTALLABLE + +# AC_LIBLTDL_INSTALLABLE accepted a directory argument in older libtools, +# now we have LT_CONFIG_LTDL_DIR: +AU_DEFUN([AC_LIBLTDL_INSTALLABLE], +[_LT_CONFIG_LTDL_DIR([m4_default([$1], [libltdl])]) +_LTDL_INSTALLABLE]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBLTDL_INSTALLABLE], []) + + +# _LTDL_INSTALLABLE +# ----------------- +# Code shared by LTDL_INSTALLABLE and LTDL_INIT([installable]). +m4_defun([_LTDL_INSTALLABLE], +[if test -f $prefix/lib/libltdl.la; then + lt_save_LDFLAGS="$LDFLAGS" + LDFLAGS="-L$prefix/lib $LDFLAGS" + AC_CHECK_LIB([ltdl], [lt_dlinit], [lt_lib_ltdl=yes]) + LDFLAGS="$lt_save_LDFLAGS" + if test x"${lt_lib_ltdl-no}" = xyes; then + if test x"$enable_ltdl_install" != xyes; then + # Don't overwrite $prefix/lib/libltdl.la without --enable-ltdl-install + AC_MSG_WARN([not overwriting libltdl at $prefix, force with `--enable-ltdl-install']) + enable_ltdl_install=no + fi + elif test x"$enable_ltdl_install" = xno; then + AC_MSG_WARN([libltdl not installed, but installation disabled]) + fi +fi + +# If configure.ac declared an installable ltdl, and the user didn't override +# with --disable-ltdl-install, we will install the shipped libltdl. +case $enable_ltdl_install in + no) ac_configure_args="$ac_configure_args --enable-ltdl-install=no" + LIBLTDL="-lltdl" + LTDLDEPS= + LTDLINCL= + ;; + *) enable_ltdl_install=yes + ac_configure_args="$ac_configure_args --enable-ltdl-install" + LIBLTDL='_LT_BUILD_PREFIX'"${lt_ltdl_dir+$lt_ltdl_dir/}libltdl.la" + LTDLDEPS=$LIBLTDL + LTDLINCL='-I${top_srcdir}'"${lt_ltdl_dir+/$lt_ltdl_dir}" + ;; +esac + +AC_SUBST([LIBLTDL]) +AC_SUBST([LTDLDEPS]) +AC_SUBST([LTDLINCL]) + +# For backwards non-gettext consistent compatibility... +INCLTDL="$LTDLINCL" +AC_SUBST([INCLTDL]) +])# LTDL_INSTALLABLE + + +# _LTDL_MODE_DISPATCH +# ------------------- +m4_define([_LTDL_MODE_DISPATCH], +[dnl If _LTDL_DIR is `.', then we are configuring libltdl itself: +m4_if(_LTDL_DIR, [], + [], + dnl if _LTDL_MODE was not set already, the default value is `subproject': + [m4_case(m4_default(_LTDL_MODE, [subproject]), + [subproject], [AC_CONFIG_SUBDIRS(_LTDL_DIR) + _LT_SHELL_INIT([lt_dlopen_dir="$lt_ltdl_dir"])], + [nonrecursive], [_LT_SHELL_INIT([lt_dlopen_dir="$lt_ltdl_dir"; lt_libobj_prefix="$lt_ltdl_dir/"])], + [recursive], [], + [m4_fatal([unknown libltdl mode: ]_LTDL_MODE)])])dnl +dnl Be careful not to expand twice: +m4_define([$0], []) +])# _LTDL_MODE_DISPATCH + + +# _LT_LIBOBJ(MODULE_NAME) +# ----------------------- +# Like AC_LIBOBJ, except that MODULE_NAME goes into _LT_LIBOBJS instead +# of into LIBOBJS. +AC_DEFUN([_LT_LIBOBJ], [ + m4_pattern_allow([^_LT_LIBOBJS$]) + _LT_LIBOBJS="$_LT_LIBOBJS $1.$ac_objext" +])# _LT_LIBOBJS + + +# LTDL_INIT([OPTIONS]) +# -------------------- +# Clients of libltdl can use this macro to allow the installer to +# choose between a shipped copy of the ltdl sources or a preinstalled +# version of the library. If the shipped ltdl sources are not in a +# subdirectory named libltdl, the directory name must be given by +# LT_CONFIG_LTDL_DIR. +AC_DEFUN([LTDL_INIT], +[dnl Parse OPTIONS +_LT_SET_OPTIONS([$0], [$1]) + +dnl We need to keep our own list of libobjs separate from our parent project, +dnl and the easiest way to do that is redefine the AC_LIBOBJs macro while +dnl we look for our own LIBOBJs. +m4_pushdef([AC_LIBOBJ], m4_defn([_LT_LIBOBJ])) +m4_pushdef([AC_LIBSOURCES]) + +dnl If not otherwise defined, default to the 1.5.x compatible subproject mode: +m4_if(_LTDL_MODE, [], + [m4_define([_LTDL_MODE], m4_default([$2], [subproject])) + m4_if([-1], [m4_bregexp(_LTDL_MODE, [\(subproject\|\(non\)?recursive\)])], + [m4_fatal([unknown libltdl mode: ]_LTDL_MODE)])]) + +AC_ARG_WITH([included_ltdl], + [AS_HELP_STRING([--with-included-ltdl], + [use the GNU ltdl sources included here])]) + +if test "x$with_included_ltdl" != xyes; then + # We are not being forced to use the included libltdl sources, so + # decide whether there is a useful installed version we can use. + AC_CHECK_HEADER([ltdl.h], + [AC_CHECK_DECL([lt_dlinterface_register], + [AC_CHECK_LIB([ltdl], [lt_dladvise_preload], + [with_included_ltdl=no], + [with_included_ltdl=yes])], + [with_included_ltdl=yes], + [AC_INCLUDES_DEFAULT + #include ])], + [with_included_ltdl=yes], + [AC_INCLUDES_DEFAULT] + ) +fi + +dnl If neither LT_CONFIG_LTDL_DIR, LTDL_CONVENIENCE nor LTDL_INSTALLABLE +dnl was called yet, then for old times' sake, we assume libltdl is in an +dnl eponymous directory: +AC_PROVIDE_IFELSE([LT_CONFIG_LTDL_DIR], [], [_LT_CONFIG_LTDL_DIR([libltdl])]) + +AC_ARG_WITH([ltdl_include], + [AS_HELP_STRING([--with-ltdl-include=DIR], + [use the ltdl headers installed in DIR])]) + +if test -n "$with_ltdl_include"; then + if test -f "$with_ltdl_include/ltdl.h"; then : + else + AC_MSG_ERROR([invalid ltdl include directory: `$with_ltdl_include']) + fi +else + with_ltdl_include=no +fi + +AC_ARG_WITH([ltdl_lib], + [AS_HELP_STRING([--with-ltdl-lib=DIR], + [use the libltdl.la installed in DIR])]) + +if test -n "$with_ltdl_lib"; then + if test -f "$with_ltdl_lib/libltdl.la"; then : + else + AC_MSG_ERROR([invalid ltdl library directory: `$with_ltdl_lib']) + fi +else + with_ltdl_lib=no +fi + +case ,$with_included_ltdl,$with_ltdl_include,$with_ltdl_lib, in + ,yes,no,no,) + m4_case(m4_default(_LTDL_TYPE, [convenience]), + [convenience], [_LTDL_CONVENIENCE], + [installable], [_LTDL_INSTALLABLE], + [m4_fatal([unknown libltdl build type: ]_LTDL_TYPE)]) + ;; + ,no,no,no,) + # If the included ltdl is not to be used, then use the + # preinstalled libltdl we found. + AC_DEFINE([HAVE_LTDL], [1], + [Define this if a modern libltdl is already installed]) + LIBLTDL=-lltdl + LTDLDEPS= + LTDLINCL= + ;; + ,no*,no,*) + AC_MSG_ERROR([`--with-ltdl-include' and `--with-ltdl-lib' options must be used together]) + ;; + *) with_included_ltdl=no + LIBLTDL="-L$with_ltdl_lib -lltdl" + LTDLDEPS= + LTDLINCL="-I$with_ltdl_include" + ;; +esac +INCLTDL="$LTDLINCL" + +# Report our decision... +AC_MSG_CHECKING([where to find libltdl headers]) +AC_MSG_RESULT([$LTDLINCL]) +AC_MSG_CHECKING([where to find libltdl library]) +AC_MSG_RESULT([$LIBLTDL]) + +_LTDL_SETUP + +dnl restore autoconf definition. +m4_popdef([AC_LIBOBJ]) +m4_popdef([AC_LIBSOURCES]) + +AC_CONFIG_COMMANDS_PRE([ + _ltdl_libobjs= + _ltdl_ltlibobjs= + if test -n "$_LT_LIBOBJS"; then + # Remove the extension. + _lt_sed_drop_objext='s/\.o$//;s/\.obj$//' + for i in `for i in $_LT_LIBOBJS; do echo "$i"; done | sed "$_lt_sed_drop_objext" | sort -u`; do + _ltdl_libobjs="$_ltdl_libobjs $lt_libobj_prefix$i.$ac_objext" + _ltdl_ltlibobjs="$_ltdl_ltlibobjs $lt_libobj_prefix$i.lo" + done + fi + AC_SUBST([ltdl_LIBOBJS], [$_ltdl_libobjs]) + AC_SUBST([ltdl_LTLIBOBJS], [$_ltdl_ltlibobjs]) +]) + +# Only expand once: +m4_define([LTDL_INIT]) +])# LTDL_INIT + +# Old names: +AU_DEFUN([AC_LIB_LTDL], [LTDL_INIT($@)]) +AU_DEFUN([AC_WITH_LTDL], [LTDL_INIT($@)]) +AU_DEFUN([LT_WITH_LTDL], [LTDL_INIT($@)]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIB_LTDL], []) +dnl AC_DEFUN([AC_WITH_LTDL], []) +dnl AC_DEFUN([LT_WITH_LTDL], []) + + +# _LTDL_SETUP +# ----------- +# Perform all the checks necessary for compilation of the ltdl objects +# -- including compiler checks and header checks. This is a public +# interface mainly for the benefit of libltdl's own configure.ac, most +# other users should call LTDL_INIT instead. +AC_DEFUN([_LTDL_SETUP], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([LT_SYS_MODULE_EXT])dnl +AC_REQUIRE([LT_SYS_MODULE_PATH])dnl +AC_REQUIRE([LT_SYS_DLSEARCH_PATH])dnl +AC_REQUIRE([LT_LIB_DLLOAD])dnl +AC_REQUIRE([LT_SYS_SYMBOL_USCORE])dnl +AC_REQUIRE([LT_FUNC_DLSYM_USCORE])dnl +AC_REQUIRE([LT_SYS_DLOPEN_DEPLIBS])dnl +AC_REQUIRE([gl_FUNC_ARGZ])dnl + +m4_require([_LT_CHECK_OBJDIR])dnl +m4_require([_LT_HEADER_DLFCN])dnl +m4_require([_LT_CHECK_DLPREOPEN])dnl +m4_require([_LT_DECL_SED])dnl + +dnl Don't require this, or it will be expanded earlier than the code +dnl that sets the variables it relies on: +_LT_ENABLE_INSTALL + +dnl _LTDL_MODE specific code must be called at least once: +_LTDL_MODE_DISPATCH + +# In order that ltdl.c can compile, find out the first AC_CONFIG_HEADERS +# the user used. This is so that ltdl.h can pick up the parent projects +# config.h file, The first file in AC_CONFIG_HEADERS must contain the +# definitions required by ltdl.c. +# FIXME: Remove use of undocumented AC_LIST_HEADERS (2.59 compatibility). +AC_CONFIG_COMMANDS_PRE([dnl +m4_pattern_allow([^LT_CONFIG_H$])dnl +m4_ifset([AH_HEADER], + [LT_CONFIG_H=AH_HEADER], + [m4_ifset([AC_LIST_HEADERS], + [LT_CONFIG_H=`echo "AC_LIST_HEADERS" | $SED 's,^[[ ]]*,,;s,[[ :]].*$,,'`], + [])])]) +AC_SUBST([LT_CONFIG_H]) + +AC_CHECK_HEADERS([unistd.h dl.h sys/dl.h dld.h mach-o/dyld.h dirent.h], + [], [], [AC_INCLUDES_DEFAULT]) + +AC_CHECK_FUNCS([closedir opendir readdir], [], [AC_LIBOBJ([lt__dirent])]) +AC_CHECK_FUNCS([strlcat strlcpy], [], [AC_LIBOBJ([lt__strl])]) + +m4_pattern_allow([LT_LIBEXT])dnl +AC_DEFINE_UNQUOTED([LT_LIBEXT],["$libext"],[The archive extension]) + +name= +eval "lt_libprefix=\"$libname_spec\"" +m4_pattern_allow([LT_LIBPREFIX])dnl +AC_DEFINE_UNQUOTED([LT_LIBPREFIX],["$lt_libprefix"],[The archive prefix]) + +name=ltdl +eval "LTDLOPEN=\"$libname_spec\"" +AC_SUBST([LTDLOPEN]) +])# _LTDL_SETUP + + +# _LT_ENABLE_INSTALL +# ------------------ +m4_define([_LT_ENABLE_INSTALL], +[AC_ARG_ENABLE([ltdl-install], + [AS_HELP_STRING([--enable-ltdl-install], [install libltdl])]) + +case ,${enable_ltdl_install},${enable_ltdl_convenience} in + *yes*) ;; + *) enable_ltdl_convenience=yes ;; +esac + +m4_ifdef([AM_CONDITIONAL], +[AM_CONDITIONAL(INSTALL_LTDL, test x"${enable_ltdl_install-no}" != xno) + AM_CONDITIONAL(CONVENIENCE_LTDL, test x"${enable_ltdl_convenience-no}" != xno)]) +])# _LT_ENABLE_INSTALL + + +# LT_SYS_DLOPEN_DEPLIBS +# --------------------- +AC_DEFUN([LT_SYS_DLOPEN_DEPLIBS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_CACHE_CHECK([whether deplibs are loaded by dlopen], + [lt_cv_sys_dlopen_deplibs], + [# PORTME does your system automatically load deplibs for dlopen? + # or its logical equivalent (e.g. shl_load for HP-UX < 11) + # For now, we just catch OSes we know something about -- in the + # future, we'll try test this programmatically. + lt_cv_sys_dlopen_deplibs=unknown + case $host_os in + aix3*|aix4.1.*|aix4.2.*) + # Unknown whether this is true for these versions of AIX, but + # we want this `case' here to explicitly catch those versions. + lt_cv_sys_dlopen_deplibs=unknown + ;; + aix[[4-9]]*) + lt_cv_sys_dlopen_deplibs=yes + ;; + amigaos*) + case $host_cpu in + powerpc) + lt_cv_sys_dlopen_deplibs=no + ;; + esac + ;; + darwin*) + # Assuming the user has installed a libdl from somewhere, this is true + # If you are looking for one http://www.opendarwin.org/projects/dlcompat + lt_cv_sys_dlopen_deplibs=yes + ;; + freebsd* | dragonfly*) + lt_cv_sys_dlopen_deplibs=yes + ;; + gnu* | linux* | k*bsd*-gnu | kopensolaris*-gnu) + # GNU and its variants, using gnu ld.so (Glibc) + lt_cv_sys_dlopen_deplibs=yes + ;; + hpux10*|hpux11*) + lt_cv_sys_dlopen_deplibs=yes + ;; + interix*) + lt_cv_sys_dlopen_deplibs=yes + ;; + irix[[12345]]*|irix6.[[01]]*) + # Catch all versions of IRIX before 6.2, and indicate that we don't + # know how it worked for any of those versions. + lt_cv_sys_dlopen_deplibs=unknown + ;; + irix*) + # The case above catches anything before 6.2, and it's known that + # at 6.2 and later dlopen does load deplibs. + lt_cv_sys_dlopen_deplibs=yes + ;; + netbsd* | netbsdelf*-gnu) + lt_cv_sys_dlopen_deplibs=yes + ;; + openbsd*) + lt_cv_sys_dlopen_deplibs=yes + ;; + osf[[1234]]*) + # dlopen did load deplibs (at least at 4.x), but until the 5.x series, + # it did *not* use an RPATH in a shared library to find objects the + # library depends on, so we explicitly say `no'. + lt_cv_sys_dlopen_deplibs=no + ;; + osf5.0|osf5.0a|osf5.1) + # dlopen *does* load deplibs and with the right loader patch applied + # it even uses RPATH in a shared library to search for shared objects + # that the library depends on, but there's no easy way to know if that + # patch is installed. Since this is the case, all we can really + # say is unknown -- it depends on the patch being installed. If + # it is, this changes to `yes'. Without it, it would be `no'. + lt_cv_sys_dlopen_deplibs=unknown + ;; + osf*) + # the two cases above should catch all versions of osf <= 5.1. Read + # the comments above for what we know about them. + # At > 5.1, deplibs are loaded *and* any RPATH in a shared library + # is used to find them so we can finally say `yes'. + lt_cv_sys_dlopen_deplibs=yes + ;; + qnx*) + lt_cv_sys_dlopen_deplibs=yes + ;; + solaris*) + lt_cv_sys_dlopen_deplibs=yes + ;; + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + libltdl_cv_sys_dlopen_deplibs=yes + ;; + esac + ]) +if test "$lt_cv_sys_dlopen_deplibs" != yes; then + AC_DEFINE([LTDL_DLOPEN_DEPLIBS], [1], + [Define if the OS needs help to load dependent libraries for dlopen().]) +fi +])# LT_SYS_DLOPEN_DEPLIBS + +# Old name: +AU_ALIAS([AC_LTDL_SYS_DLOPEN_DEPLIBS], [LT_SYS_DLOPEN_DEPLIBS]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LTDL_SYS_DLOPEN_DEPLIBS], []) + + +# LT_SYS_MODULE_EXT +# ----------------- +AC_DEFUN([LT_SYS_MODULE_EXT], +[m4_require([_LT_SYS_DYNAMIC_LINKER])dnl +AC_CACHE_CHECK([which extension is used for runtime loadable modules], + [libltdl_cv_shlibext], +[ +module=yes +eval libltdl_cv_shlibext=$shrext_cmds +module=no +eval libltdl_cv_shrext=$shrext_cmds + ]) +if test -n "$libltdl_cv_shlibext"; then + m4_pattern_allow([LT_MODULE_EXT])dnl + AC_DEFINE_UNQUOTED([LT_MODULE_EXT], ["$libltdl_cv_shlibext"], + [Define to the extension used for runtime loadable modules, say, ".so".]) +fi +if test "$libltdl_cv_shrext" != "$libltdl_cv_shlibext"; then + m4_pattern_allow([LT_SHARED_EXT])dnl + AC_DEFINE_UNQUOTED([LT_SHARED_EXT], ["$libltdl_cv_shrext"], + [Define to the shared library suffix, say, ".dylib".]) +fi +])# LT_SYS_MODULE_EXT + +# Old name: +AU_ALIAS([AC_LTDL_SHLIBEXT], [LT_SYS_MODULE_EXT]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LTDL_SHLIBEXT], []) + + +# LT_SYS_MODULE_PATH +# ------------------ +AC_DEFUN([LT_SYS_MODULE_PATH], +[m4_require([_LT_SYS_DYNAMIC_LINKER])dnl +AC_CACHE_CHECK([which variable specifies run-time module search path], + [lt_cv_module_path_var], [lt_cv_module_path_var="$shlibpath_var"]) +if test -n "$lt_cv_module_path_var"; then + m4_pattern_allow([LT_MODULE_PATH_VAR])dnl + AC_DEFINE_UNQUOTED([LT_MODULE_PATH_VAR], ["$lt_cv_module_path_var"], + [Define to the name of the environment variable that determines the run-time module search path.]) +fi +])# LT_SYS_MODULE_PATH + +# Old name: +AU_ALIAS([AC_LTDL_SHLIBPATH], [LT_SYS_MODULE_PATH]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LTDL_SHLIBPATH], []) + + +# LT_SYS_DLSEARCH_PATH +# -------------------- +AC_DEFUN([LT_SYS_DLSEARCH_PATH], +[m4_require([_LT_SYS_DYNAMIC_LINKER])dnl +AC_CACHE_CHECK([for the default library search path], + [lt_cv_sys_dlsearch_path], + [lt_cv_sys_dlsearch_path="$sys_lib_dlsearch_path_spec"]) +if test -n "$lt_cv_sys_dlsearch_path"; then + sys_dlsearch_path= + for dir in $lt_cv_sys_dlsearch_path; do + if test -z "$sys_dlsearch_path"; then + sys_dlsearch_path="$dir" + else + sys_dlsearch_path="$sys_dlsearch_path$PATH_SEPARATOR$dir" + fi + done + m4_pattern_allow([LT_DLSEARCH_PATH])dnl + AC_DEFINE_UNQUOTED([LT_DLSEARCH_PATH], ["$sys_dlsearch_path"], + [Define to the system default library search path.]) +fi +])# LT_SYS_DLSEARCH_PATH + +# Old name: +AU_ALIAS([AC_LTDL_SYSSEARCHPATH], [LT_SYS_DLSEARCH_PATH]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LTDL_SYSSEARCHPATH], []) + + +# _LT_CHECK_DLPREOPEN +# ------------------- +m4_defun([_LT_CHECK_DLPREOPEN], +[m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +AC_CACHE_CHECK([whether libtool supports -dlopen/-dlpreopen], + [libltdl_cv_preloaded_symbols], + [if test -n "$lt_cv_sys_global_symbol_pipe"; then + libltdl_cv_preloaded_symbols=yes + else + libltdl_cv_preloaded_symbols=no + fi + ]) +if test x"$libltdl_cv_preloaded_symbols" = xyes; then + AC_DEFINE([HAVE_PRELOADED_SYMBOLS], [1], + [Define if libtool can extract symbol lists from object files.]) +fi +])# _LT_CHECK_DLPREOPEN + + +# LT_LIB_DLLOAD +# ------------- +AC_DEFUN([LT_LIB_DLLOAD], +[m4_pattern_allow([^LT_DLLOADERS$]) +LT_DLLOADERS= +AC_SUBST([LT_DLLOADERS]) + +AC_LANG_PUSH([C]) + +LIBADD_DLOPEN= +AC_SEARCH_LIBS([dlopen], [dl], + [AC_DEFINE([HAVE_LIBDL], [1], + [Define if you have the libdl library or equivalent.]) + if test "$ac_cv_search_dlopen" != "none required" ; then + LIBADD_DLOPEN="-ldl" + fi + libltdl_cv_lib_dl_dlopen="yes" + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la"], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([[#if HAVE_DLFCN_H +# include +#endif + ]], [[dlopen(0, 0);]])], + [AC_DEFINE([HAVE_LIBDL], [1], + [Define if you have the libdl library or equivalent.]) + libltdl_cv_func_dlopen="yes" + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la"], + [AC_CHECK_LIB([svld], [dlopen], + [AC_DEFINE([HAVE_LIBDL], [1], + [Define if you have the libdl library or equivalent.]) + LIBADD_DLOPEN="-lsvld" libltdl_cv_func_dlopen="yes" + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la"])])]) +if test x"$libltdl_cv_func_dlopen" = xyes || test x"$libltdl_cv_lib_dl_dlopen" = xyes +then + lt_save_LIBS="$LIBS" + LIBS="$LIBS $LIBADD_DLOPEN" + AC_CHECK_FUNCS([dlerror]) + LIBS="$lt_save_LIBS" +fi +AC_SUBST([LIBADD_DLOPEN]) + +LIBADD_SHL_LOAD= +AC_CHECK_FUNC([shl_load], + [AC_DEFINE([HAVE_SHL_LOAD], [1], + [Define if you have the shl_load function.]) + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}shl_load.la"], + [AC_CHECK_LIB([dld], [shl_load], + [AC_DEFINE([HAVE_SHL_LOAD], [1], + [Define if you have the shl_load function.]) + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}shl_load.la" + LIBADD_SHL_LOAD="-ldld"])]) +AC_SUBST([LIBADD_SHL_LOAD]) + +case $host_os in +darwin[[1567]].*) +# We only want this for pre-Mac OS X 10.4. + AC_CHECK_FUNC([_dyld_func_lookup], + [AC_DEFINE([HAVE_DYLD], [1], + [Define if you have the _dyld_func_lookup function.]) + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dyld.la"]) + ;; +beos*) + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}load_add_on.la" + ;; +cygwin* | mingw* | os2* | pw32*) + AC_CHECK_DECLS([cygwin_conv_path], [], [], [[#include ]]) + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}loadlibrary.la" + ;; +esac + +AC_CHECK_LIB([dld], [dld_link], + [AC_DEFINE([HAVE_DLD], [1], + [Define if you have the GNU dld library.]) + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dld_link.la"]) +AC_SUBST([LIBADD_DLD_LINK]) + +m4_pattern_allow([^LT_DLPREOPEN$]) +LT_DLPREOPEN= +if test -n "$LT_DLLOADERS" +then + for lt_loader in $LT_DLLOADERS; do + LT_DLPREOPEN="$LT_DLPREOPEN-dlpreopen $lt_loader " + done + AC_DEFINE([HAVE_LIBDLLOADER], [1], + [Define if libdlloader will be built on this platform]) +fi +AC_SUBST([LT_DLPREOPEN]) + +dnl This isn't used anymore, but set it for backwards compatibility +LIBADD_DL="$LIBADD_DLOPEN $LIBADD_SHL_LOAD" +AC_SUBST([LIBADD_DL]) + +AC_LANG_POP +])# LT_LIB_DLLOAD + +# Old name: +AU_ALIAS([AC_LTDL_DLLIB], [LT_LIB_DLLOAD]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LTDL_DLLIB], []) + + +# LT_SYS_SYMBOL_USCORE +# -------------------- +# does the compiler prefix global symbols with an underscore? +AC_DEFUN([LT_SYS_SYMBOL_USCORE], +[m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +AC_CACHE_CHECK([for _ prefix in compiled symbols], + [lt_cv_sys_symbol_underscore], + [lt_cv_sys_symbol_underscore=no + cat > conftest.$ac_ext <<_LT_EOF +void nm_test_func(){} +int main(){nm_test_func;return 0;} +_LT_EOF + if AC_TRY_EVAL(ac_compile); then + # Now try to grab the symbols. + ac_nlist=conftest.nm + if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $ac_nlist) && test -s "$ac_nlist"; then + # See whether the symbols have a leading underscore. + if grep '^. _nm_test_func' "$ac_nlist" >/dev/null; then + lt_cv_sys_symbol_underscore=yes + else + if grep '^. nm_test_func ' "$ac_nlist" >/dev/null; then + : + else + echo "configure: cannot find nm_test_func in $ac_nlist" >&AS_MESSAGE_LOG_FD + fi + fi + else + echo "configure: cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "configure: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.c >&AS_MESSAGE_LOG_FD + fi + rm -rf conftest* + ]) + sys_symbol_underscore=$lt_cv_sys_symbol_underscore + AC_SUBST([sys_symbol_underscore]) +])# LT_SYS_SYMBOL_USCORE + +# Old name: +AU_ALIAS([AC_LTDL_SYMBOL_USCORE], [LT_SYS_SYMBOL_USCORE]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LTDL_SYMBOL_USCORE], []) + + +# LT_FUNC_DLSYM_USCORE +# -------------------- +AC_DEFUN([LT_FUNC_DLSYM_USCORE], +[AC_REQUIRE([LT_SYS_SYMBOL_USCORE])dnl +if test x"$lt_cv_sys_symbol_underscore" = xyes; then + if test x"$libltdl_cv_func_dlopen" = xyes || + test x"$libltdl_cv_lib_dl_dlopen" = xyes ; then + AC_CACHE_CHECK([whether we have to add an underscore for dlsym], + [libltdl_cv_need_uscore], + [libltdl_cv_need_uscore=unknown + save_LIBS="$LIBS" + LIBS="$LIBS $LIBADD_DLOPEN" + _LT_TRY_DLOPEN_SELF( + [libltdl_cv_need_uscore=no], [libltdl_cv_need_uscore=yes], + [], [libltdl_cv_need_uscore=cross]) + LIBS="$save_LIBS" + ]) + fi +fi + +if test x"$libltdl_cv_need_uscore" = xyes; then + AC_DEFINE([NEED_USCORE], [1], + [Define if dlsym() requires a leading underscore in symbol names.]) +fi +])# LT_FUNC_DLSYM_USCORE + +# Old name: +AU_ALIAS([AC_LTDL_DLSYM_USCORE], [LT_FUNC_DLSYM_USCORE]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LTDL_DLSYM_USCORE], []) diff --git a/autoconf/libtool/ltoptions.m4 b/autoconf/libtool/ltoptions.m4 new file mode 100644 index 00000000..5d9acd8e --- /dev/null +++ b/autoconf/libtool/ltoptions.m4 @@ -0,0 +1,384 @@ +# Helper functions for option handling. -*- Autoconf -*- +# +# Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation, +# Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 7 ltoptions.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) + + +# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) +# ------------------------------------------ +m4_define([_LT_MANGLE_OPTION], +[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) + + +# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) +# --------------------------------------- +# Set option OPTION-NAME for macro MACRO-NAME, and if there is a +# matching handler defined, dispatch to it. Other OPTION-NAMEs are +# saved as a flag. +m4_define([_LT_SET_OPTION], +[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl +m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), + _LT_MANGLE_DEFUN([$1], [$2]), + [m4_warning([Unknown $1 option `$2'])])[]dnl +]) + + +# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) +# ------------------------------------------------------------ +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +m4_define([_LT_IF_OPTION], +[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) + + +# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) +# ------------------------------------------------------- +# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME +# are set. +m4_define([_LT_UNLESS_OPTIONS], +[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), + [m4_define([$0_found])])])[]dnl +m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 +])[]dnl +]) + + +# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) +# ---------------------------------------- +# OPTION-LIST is a space-separated list of Libtool options associated +# with MACRO-NAME. If any OPTION has a matching handler declared with +# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about +# the unknown option and exit. +m4_defun([_LT_SET_OPTIONS], +[# Set options +m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [_LT_SET_OPTION([$1], _LT_Option)]) + +m4_if([$1],[LT_INIT],[ + dnl + dnl Simply set some default values (i.e off) if boolean options were not + dnl specified: + _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no + ]) + _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no + ]) + dnl + dnl If no reference was made to various pairs of opposing options, then + dnl we run the default mode handler for the pair. For example, if neither + dnl `shared' nor `disable-shared' was passed, we enable building of shared + dnl archives by default: + _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) + _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], + [_LT_ENABLE_FAST_INSTALL]) + ]) +])# _LT_SET_OPTIONS + + +## --------------------------------- ## +## Macros to handle LT_INIT options. ## +## --------------------------------- ## + +# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) +# ----------------------------------------- +m4_define([_LT_MANGLE_DEFUN], +[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) + + +# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) +# ----------------------------------------------- +m4_define([LT_OPTION_DEFINE], +[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl +])# LT_OPTION_DEFINE + + +# dlopen +# ------ +LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes +]) + +AU_DEFUN([AC_LIBTOOL_DLOPEN], +[_LT_SET_OPTION([LT_INIT], [dlopen]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `dlopen' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) + + +# win32-dll +# --------- +# Declare package support for building win32 dll's. +LT_OPTION_DEFINE([LT_INIT], [win32-dll], +[enable_win32_dll=yes + +case $host in +*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; +esac + +test -z "$AS" && AS=as +_LT_DECL([], [AS], [1], [Assembler program])dnl + +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl + +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl +])# win32-dll + +AU_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +_LT_SET_OPTION([LT_INIT], [win32-dll]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `win32-dll' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) + + +# _LT_ENABLE_SHARED([DEFAULT]) +# ---------------------------- +# implement the --enable-shared flag, and supports the `shared' and +# `disable-shared' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_SHARED], +[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([shared], + [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) + + _LT_DECL([build_libtool_libs], [enable_shared], [0], + [Whether or not to build shared libraries]) +])# _LT_ENABLE_SHARED + +LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) +]) + +AC_DEFUN([AC_DISABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], [disable-shared]) +]) + +AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_SHARED], []) +dnl AC_DEFUN([AM_DISABLE_SHARED], []) + + + +# _LT_ENABLE_STATIC([DEFAULT]) +# ---------------------------- +# implement the --enable-static flag, and support the `static' and +# `disable-static' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_STATIC], +[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([static], + [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_static=]_LT_ENABLE_STATIC_DEFAULT) + + _LT_DECL([build_old_libs], [enable_static], [0], + [Whether or not to build static libraries]) +])# _LT_ENABLE_STATIC + +LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) +]) + +AC_DEFUN([AC_DISABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], [disable-static]) +]) + +AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_STATIC], []) +dnl AC_DEFUN([AM_DISABLE_STATIC], []) + + + +# _LT_ENABLE_FAST_INSTALL([DEFAULT]) +# ---------------------------------- +# implement the --enable-fast-install flag, and support the `fast-install' +# and `disable-fast-install' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_FAST_INSTALL], +[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([fast-install], + [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) + +_LT_DECL([fast_install], [enable_fast_install], [0], + [Whether or not to optimize for fast installation])dnl +])# _LT_ENABLE_FAST_INSTALL + +LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) + +# Old names: +AU_DEFUN([AC_ENABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `fast-install' option into LT_INIT's first parameter.]) +]) + +AU_DEFUN([AC_DISABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `disable-fast-install' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) +dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) + + +# _LT_WITH_PIC([MODE]) +# -------------------- +# implement the --with-pic flag, and support the `pic-only' and `no-pic' +# LT_INIT options. +# MODE is either `yes' or `no'. If omitted, it defaults to `both'. +m4_define([_LT_WITH_PIC], +[AC_ARG_WITH([pic], + [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for lt_pkg in $withval; do + IFS="$lt_save_ifs" + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [pic_mode=default]) + +test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) + +_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl +])# _LT_WITH_PIC + +LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) + +# Old name: +AU_DEFUN([AC_LIBTOOL_PICMODE], +[_LT_SET_OPTION([LT_INIT], [pic-only]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `pic-only' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) + +## ----------------- ## +## LTDL_INIT Options ## +## ----------------- ## + +m4_define([_LTDL_MODE], []) +LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], + [m4_define([_LTDL_MODE], [nonrecursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [recursive], + [m4_define([_LTDL_MODE], [recursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [subproject], + [m4_define([_LTDL_MODE], [subproject])]) + +m4_define([_LTDL_TYPE], []) +LT_OPTION_DEFINE([LTDL_INIT], [installable], + [m4_define([_LTDL_TYPE], [installable])]) +LT_OPTION_DEFINE([LTDL_INIT], [convenience], + [m4_define([_LTDL_TYPE], [convenience])]) diff --git a/autoconf/libtool/ltsugar.m4 b/autoconf/libtool/ltsugar.m4 new file mode 100644 index 00000000..9000a057 --- /dev/null +++ b/autoconf/libtool/ltsugar.m4 @@ -0,0 +1,123 @@ +# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 6 ltsugar.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) + + +# lt_join(SEP, ARG1, [ARG2...]) +# ----------------------------- +# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their +# associated separator. +# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier +# versions in m4sugar had bugs. +m4_define([lt_join], +[m4_if([$#], [1], [], + [$#], [2], [[$2]], + [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) +m4_define([_lt_join], +[m4_if([$#$2], [2], [], + [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) + + +# lt_car(LIST) +# lt_cdr(LIST) +# ------------ +# Manipulate m4 lists. +# These macros are necessary as long as will still need to support +# Autoconf-2.59 which quotes differently. +m4_define([lt_car], [[$1]]) +m4_define([lt_cdr], +[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], + [$#], 1, [], + [m4_dquote(m4_shift($@))])]) +m4_define([lt_unquote], $1) + + +# lt_append(MACRO-NAME, STRING, [SEPARATOR]) +# ------------------------------------------ +# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. +# Note that neither SEPARATOR nor STRING are expanded; they are appended +# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). +# No SEPARATOR is output if MACRO-NAME was previously undefined (different +# than defined and empty). +# +# This macro is needed until we can rely on Autoconf 2.62, since earlier +# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. +m4_define([lt_append], +[m4_define([$1], + m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) + + + +# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) +# ---------------------------------------------------------- +# Produce a SEP delimited list of all paired combinations of elements of +# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list +# has the form PREFIXmINFIXSUFFIXn. +# Needed until we can rely on m4_combine added in Autoconf 2.62. +m4_define([lt_combine], +[m4_if(m4_eval([$# > 3]), [1], + [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl +[[m4_foreach([_Lt_prefix], [$2], + [m4_foreach([_Lt_suffix], + ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, + [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) + + +# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) +# ----------------------------------------------------------------------- +# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited +# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. +m4_define([lt_if_append_uniq], +[m4_ifdef([$1], + [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], + [lt_append([$1], [$2], [$3])$4], + [$5])], + [lt_append([$1], [$2], [$3])$4])]) + + +# lt_dict_add(DICT, KEY, VALUE) +# ----------------------------- +m4_define([lt_dict_add], +[m4_define([$1($2)], [$3])]) + + +# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) +# -------------------------------------------- +m4_define([lt_dict_add_subkey], +[m4_define([$1($2:$3)], [$4])]) + + +# lt_dict_fetch(DICT, KEY, [SUBKEY]) +# ---------------------------------- +m4_define([lt_dict_fetch], +[m4_ifval([$3], + m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), + m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) + + +# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) +# ----------------------------------------------------------------- +m4_define([lt_if_dict_fetch], +[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], + [$5], + [$6])]) + + +# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) +# -------------------------------------------------------------- +m4_define([lt_dict_filter], +[m4_if([$5], [], [], + [lt_join(m4_quote(m4_default([$4], [[, ]])), + lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), + [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl +]) diff --git a/autoconf/libtool/ltversion.m4 b/autoconf/libtool/ltversion.m4 new file mode 100644 index 00000000..07a8602d --- /dev/null +++ b/autoconf/libtool/ltversion.m4 @@ -0,0 +1,23 @@ +# ltversion.m4 -- version numbers -*- Autoconf -*- +# +# Copyright (C) 2004 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# @configure_input@ + +# serial 3337 ltversion.m4 +# This file is part of GNU Libtool + +m4_define([LT_PACKAGE_VERSION], [2.4.2]) +m4_define([LT_PACKAGE_REVISION], [1.3337]) + +AC_DEFUN([LTVERSION_VERSION], +[macro_version='2.4.2' +macro_revision='1.3337' +_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) +_LT_DECL(, macro_revision, 0) +]) diff --git a/autoconf/libtool/lt~obsolete.m4 b/autoconf/libtool/lt~obsolete.m4 new file mode 100644 index 00000000..c573da90 --- /dev/null +++ b/autoconf/libtool/lt~obsolete.m4 @@ -0,0 +1,98 @@ +# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004. +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 5 lt~obsolete.m4 + +# These exist entirely to fool aclocal when bootstrapping libtool. +# +# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) +# which have later been changed to m4_define as they aren't part of the +# exported API, or moved to Autoconf or Automake where they belong. +# +# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN +# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us +# using a macro with the same name in our local m4/libtool.m4 it'll +# pull the old libtool.m4 in (it doesn't see our shiny new m4_define +# and doesn't know about Autoconf macros at all.) +# +# So we provide this file, which has a silly filename so it's always +# included after everything else. This provides aclocal with the +# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything +# because those macros already exist, or will be overwritten later. +# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. +# +# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. +# Yes, that means every name once taken will need to remain here until +# we give up compatibility with versions before 1.7, at which point +# we need to keep only those names which we still refer to. + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) + +m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) +m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) +m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) +m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) +m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) +m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) +m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) +m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) +m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) +m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) +m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) +m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) +m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) +m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) +m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) +m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) +m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) +m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) +m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) +m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) +m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) +m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) +m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) +m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) +m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) +m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) +m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) +m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) +m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) +m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) +m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) +m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) +m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) +m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) +m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) +m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) +m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) +m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) +m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) +m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) +m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) +m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) +m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) +m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) +m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) +m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) +m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) +m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) +m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) +m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) diff --git a/autoconf/ltmain.sh b/autoconf/ltmain.sh new file mode 100644 index 00000000..a356acaf --- /dev/null +++ b/autoconf/ltmain.sh @@ -0,0 +1,9661 @@ + +# libtool (GNU libtool) 2.4.2 +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, +# 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, +# or obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +# Usage: $progname [OPTION]... [MODE-ARG]... +# +# Provide generalized library-building support services. +# +# --config show all configuration variables +# --debug enable verbose shell tracing +# -n, --dry-run display commands without modifying any files +# --features display basic configuration information and exit +# --mode=MODE use operation mode MODE +# --preserve-dup-deps don't remove duplicate dependency libraries +# --quiet, --silent don't print informational messages +# --no-quiet, --no-silent +# print informational messages (default) +# --no-warn don't display warning messages +# --tag=TAG use configuration variables from tag TAG +# -v, --verbose print more informational messages than default +# --no-verbose don't print the extra informational messages +# --version print version information +# -h, --help, --help-all print short, long, or detailed help message +# +# MODE must be one of the following: +# +# clean remove files from the build directory +# compile compile a source file into a libtool object +# execute automatically set library path, then run a program +# finish complete the installation of libtool libraries +# install install libraries or executables +# link create a library or an executable +# uninstall remove libraries from an installed directory +# +# MODE-ARGS vary depending on the MODE. When passed as first option, +# `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that. +# Try `$progname --help --mode=MODE' for a more detailed description of MODE. +# +# When reporting a bug, please describe a test case to reproduce it and +# include the following information: +# +# host-triplet: $host +# shell: $SHELL +# compiler: $LTCC +# compiler flags: $LTCFLAGS +# linker: $LD (gnu? $with_gnu_ld) +# $progname: (GNU libtool) 2.4.2 Debian-2.4.2-1.7ubuntu1 +# automake: $automake_version +# autoconf: $autoconf_version +# +# Report bugs to . +# GNU libtool home page: . +# General help using GNU software: . + +PROGRAM=libtool +PACKAGE=libtool +VERSION="2.4.2 Debian-2.4.2-1.7ubuntu1" +TIMESTAMP="" +package_revision=1.3337 + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# NLS nuisances: We save the old values to restore during execute mode. +lt_user_locale= +lt_safe_locale= +for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test \"\${$lt_var+set}\" = set; then + save_$lt_var=\$$lt_var + $lt_var=C + export $lt_var + lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" + lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" + fi" +done +LC_ALL=C +LANGUAGE=C +export LANGUAGE LC_ALL + +$lt_unset CDPATH + + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + + + +: ${CP="cp -f"} +test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} +: ${Xsed="$SED -e 1s/^X//"} + +# Global variables: +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +exit_status=$EXIT_SUCCESS + +# Make sure IFS has a sensible default +lt_nl=' +' +IFS=" $lt_nl" + +dirname="s,/[^/]*$,," +basename="s,^.*/,," + +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi +} # func_dirname may be replaced by extended shell implementation + + +# func_basename file +func_basename () +{ + func_basename_result=`$ECHO "${1}" | $SED "$basename"` +} # func_basename may be replaced by extended shell implementation + + +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + # Extract subdirectory from the argument. + func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi + func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` +} # func_dirname_and_basename may be replaced by extended shell implementation + + +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# func_strip_suffix prefix name +func_stripname () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname may be replaced by extended shell implementation + + +# These SED scripts presuppose an absolute path with a trailing slash. +pathcar='s,^/\([^/]*\).*$,\1,' +pathcdr='s,^/[^/]*,,' +removedotparts=':dotsl + s@/\./@/@g + t dotsl + s,/\.$,/,' +collapseslashes='s@/\{1,\}@/@g' +finalslash='s,/*$,/,' + +# func_normal_abspath PATH +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +# value returned in "$func_normal_abspath_result" +func_normal_abspath () +{ + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"` + while :; do + # Processed it all yet? + if test "$func_normal_abspath_tpath" = / ; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result" ; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + +# func_relative_path SRCDIR DSTDIR +# generates a relative path from SRCDIR to DSTDIR, with a trailing +# slash if non-empty, suitable for immediately appending a filename +# without needing to append a separator. +# value returned in "$func_relative_path_result" +func_relative_path () +{ + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=${func_dirname_result} + if test "x$func_relative_path_tlibdir" = x ; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test "x$func_stripname_result" != x ; then + func_relative_path_result=${func_relative_path_result}/${func_stripname_result} + fi + + # Normalisation. If bindir is libdir, return empty string, + # else relative path ending with a slash; either way, target + # file name can be directly appended. + if test ! -z "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result/" + func_relative_path_result=$func_stripname_result + fi +} + +# The name of this program: +func_dirname_and_basename "$progpath" +progname=$func_basename_result + +# Make sure we have an absolute path for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=$func_dirname_result + progdir=`cd "$progdir" && pwd` + progpath="$progdir/$progname" + ;; + *) + save_IFS="$IFS" + IFS=${PATH_SEPARATOR-:} + for progdir in $PATH; do + IFS="$save_IFS" + test -x "$progdir/$progname" && break + done + IFS="$save_IFS" + test -n "$progdir" || progdir=`pwd` + progpath="$progdir/$progname" + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed="${SED}"' -e 1s/^X//' +sed_quote_subst='s/\([`"$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s,[].[^$\\*\/],\\&,g' + +# Sed substitution that converts a w32 file name or path +# which contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-`\' parameter expansions in output of double_quote_subst that were +# `\'-ed in input to the same. If an odd number of `\' preceded a '$' +# in input to double_quote_subst, that '$' was protected from expansion. +# Since each input `\' is now two `\'s, look for any number of runs of +# four `\'s followed by two `\'s and then a '$'. `\' that '$'. +bs='\\' +bs2='\\\\' +bs4='\\\\\\\\' +dollar='\$' +sed_double_backslash="\ + s/$bs4/&\\ +/g + s/^$bs2$dollar/$bs&/ + s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g + s/\n//g" + +# Standard options: +opt_dry_run=false +opt_help=false +opt_quiet=false +opt_verbose=false +opt_warning=: + +# func_echo arg... +# Echo program name prefixed message, along with the current mode +# name if it has been set yet. +func_echo () +{ + $ECHO "$progname: ${opt_mode+$opt_mode: }$*" +} + +# func_verbose arg... +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $opt_verbose && func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + +# func_error arg... +# Echo program name prefixed message to standard error. +func_error () +{ + $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2 +} + +# func_warning arg... +# Echo program name prefixed warning message to standard error. +func_warning () +{ + $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2 + + # bash bug again: + : +} + +# func_fatal_error arg... +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + +# func_fatal_help arg... +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + func_error ${1+"$@"} + func_fatal_error "$help" +} +help="Try \`$progname --help' for more information." ## default + + +# func_grep expression filename +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_mkdir_p directory-path +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + my_directory_path="$1" + my_dir_list= + + if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then + + # Protect directory names starting with `-' + case $my_directory_path in + -*) my_directory_path="./$my_directory_path" ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$my_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + my_dir_list="$my_directory_path:$my_dir_list" + + # If the last portion added has no slash in it, the list is done + case $my_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"` + done + my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'` + + save_mkdir_p_IFS="$IFS"; IFS=':' + for my_dir in $my_dir_list; do + IFS="$save_mkdir_p_IFS" + # mkdir can fail with a `File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$my_dir" 2>/dev/null || : + done + IFS="$save_mkdir_p_IFS" + + # Bail out if we (or some other process) failed to create a directory. + test -d "$my_directory_path" || \ + func_fatal_error "Failed to create \`$1'" + fi +} + + +# func_mktempdir [string] +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, STRING is the basename for that directory. +func_mktempdir () +{ + my_template="${TMPDIR-/tmp}/${1-$progname}" + + if test "$opt_dry_run" = ":"; then + # Return a directory name, but don't create it in dry-run mode + my_tmpdir="${my_template}-$$" + else + + # If mktemp works, use that first and foremost + my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` + + if test ! -d "$my_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + my_tmpdir="${my_template}-${RANDOM-0}$$" + + save_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$my_tmpdir" + umask $save_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$my_tmpdir" || \ + func_fatal_error "cannot create temporary directory \`$my_tmpdir'" + fi + + $ECHO "$my_tmpdir" +} + + +# func_quote_for_eval arg +# Aesthetically quote ARG to be evaled later. +# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT +# is double-quoted, suitable for a subsequent eval, whereas +# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters +# which are still active within double quotes backslashified. +func_quote_for_eval () +{ + case $1 in + *[\\\`\"\$]*) + func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;; + *) + func_quote_for_eval_unquoted_result="$1" ;; + esac + + case $func_quote_for_eval_unquoted_result in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and and variable + # expansion for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" + ;; + *) + func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" + esac +} + + +# func_quote_for_expand arg +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + case $1 in + *[\\\`\"]*) + my_arg=`$ECHO "$1" | $SED \ + -e "$double_quote_subst" -e "$sed_double_backslash"` ;; + *) + my_arg="$1" ;; + esac + + case $my_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + my_arg="\"$my_arg\"" + ;; + esac + + func_quote_for_expand_result="$my_arg" +} + + +# func_show_eval cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$my_cmd" + my_status=$? + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + + +# func_show_eval_locale cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$lt_user_locale + $my_cmd" + my_status=$? + eval "$lt_safe_locale" + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + +# func_tr_sh +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_version +# Echo version message to standard output and exit. +func_version () +{ + $opt_debug + + $SED -n '/(C)/!b go + :more + /\./!{ + N + s/\n# / / + b more + } + :go + /^# '$PROGRAM' (GNU /,/# warranty; / { + s/^# // + s/^# *$// + s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ + p + }' < "$progpath" + exit $? +} + +# func_usage +# Echo short help message to standard output and exit. +func_usage () +{ + $opt_debug + + $SED -n '/^# Usage:/,/^# *.*--help/ { + s/^# // + s/^# *$// + s/\$progname/'$progname'/ + p + }' < "$progpath" + echo + $ECHO "run \`$progname --help | more' for full usage" + exit $? +} + +# func_help [NOEXIT] +# Echo long help message to standard output and exit, +# unless 'noexit' is passed as argument. +func_help () +{ + $opt_debug + + $SED -n '/^# Usage:/,/# Report bugs to/ { + :print + s/^# // + s/^# *$// + s*\$progname*'$progname'* + s*\$host*'"$host"'* + s*\$SHELL*'"$SHELL"'* + s*\$LTCC*'"$LTCC"'* + s*\$LTCFLAGS*'"$LTCFLAGS"'* + s*\$LD*'"$LD"'* + s/\$with_gnu_ld/'"$with_gnu_ld"'/ + s/\$automake_version/'"`(${AUTOMAKE-automake} --version) 2>/dev/null |$SED 1q`"'/ + s/\$autoconf_version/'"`(${AUTOCONF-autoconf} --version) 2>/dev/null |$SED 1q`"'/ + p + d + } + /^# .* home page:/b print + /^# General help using/b print + ' < "$progpath" + ret=$? + if test -z "$1"; then + exit $ret + fi +} + +# func_missing_arg argname +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $opt_debug + + func_error "missing argument for $1." + exit_cmd=exit +} + + +# func_split_short_opt shortopt +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +func_split_short_opt () +{ + my_sed_short_opt='1s/^\(..\).*$/\1/;q' + my_sed_short_rest='1s/^..\(.*\)$/\1/;q' + + func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"` + func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"` +} # func_split_short_opt may be replaced by extended shell implementation + + +# func_split_long_opt longopt +# Set func_split_long_opt_name and func_split_long_opt_arg shell +# variables after splitting LONGOPT at the `=' sign. +func_split_long_opt () +{ + my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q' + my_sed_long_arg='1s/^--[^=]*=//' + + func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"` + func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"` +} # func_split_long_opt may be replaced by extended shell implementation + +exit_cmd=: + + + + + +magic="%%%MAGIC variable%%%" +magic_exe="%%%MAGIC EXE variable%%%" + +# Global variables. +nonopt= +preserve_args= +lo2o="s/\\.lo\$/.${objext}/" +o2lo="s/\\.${objext}\$/.lo/" +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "${1}=\$${1}\${2}" +} # func_append may be replaced by extended shell implementation + +# func_append_quoted var value +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +func_append_quoted () +{ + func_quote_for_eval "${2}" + eval "${1}=\$${1}\\ \$func_quote_for_eval_result" +} # func_append_quoted may be replaced by extended shell implementation + + +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=`expr "${@}"` +} # func_arith may be replaced by extended shell implementation + + +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len` +} # func_len may be replaced by extended shell implementation + + +# func_lo2o object +func_lo2o () +{ + func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +} # func_lo2o may be replaced by extended shell implementation + + +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +} # func_xform may be replaced by extended shell implementation + + +# func_fatal_configuration arg... +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func_error ${1+"$@"} + func_error "See the $PACKAGE documentation for more information." + func_fatal_error "Fatal configuration error." +} + + +# func_config +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + +# func_features +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test "$build_libtool_libs" = yes; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test "$build_old_libs" = yes; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + +# func_enable_tag tagname +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname="$1" + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf="/$re_begincf/,/$re_endcf/p" + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + +# func_check_version_match +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# Shorthand for --mode=foo, only valid as the first argument +case $1 in +clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; +compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; +execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; +finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; +install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; +link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; +uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; +esac + + + +# Option defaults: +opt_debug=: +opt_dry_run=false +opt_config=false +opt_preserve_dup_deps=false +opt_features=false +opt_finish=false +opt_help=false +opt_help_all=false +opt_silent=: +opt_warning=: +opt_verbose=: +opt_silent=false +opt_verbose=false + + +# Parse options once, thoroughly. This comes as soon as possible in the +# script to make things like `--version' happen as quickly as we can. +{ + # this just eases exit handling + while test $# -gt 0; do + opt="$1" + shift + case $opt in + --debug|-x) opt_debug='set -x' + func_echo "enabling shell trace mode" + $opt_debug + ;; + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + --config) + opt_config=: +func_config + ;; + --dlopen|-dlopen) + optarg="$1" + opt_dlopen="${opt_dlopen+$opt_dlopen +}$optarg" + shift + ;; + --preserve-dup-deps) + opt_preserve_dup_deps=: + ;; + --features) + opt_features=: +func_features + ;; + --finish) + opt_finish=: +set dummy --mode finish ${1+"$@"}; shift + ;; + --help) + opt_help=: + ;; + --help-all) + opt_help_all=: +opt_help=': help-all' + ;; + --mode) + test $# = 0 && func_missing_arg $opt && break + optarg="$1" + opt_mode="$optarg" +case $optarg in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $opt" + exit_cmd=exit + break + ;; +esac + shift + ;; + --no-silent|--no-quiet) + opt_silent=false +func_append preserve_args " $opt" + ;; + --no-warning|--no-warn) + opt_warning=false +func_append preserve_args " $opt" + ;; + --no-verbose) + opt_verbose=false +func_append preserve_args " $opt" + ;; + --silent|--quiet) + opt_silent=: +func_append preserve_args " $opt" + opt_verbose=false + ;; + --verbose|-v) + opt_verbose=: +func_append preserve_args " $opt" +opt_silent=false + ;; + --tag) + test $# = 0 && func_missing_arg $opt && break + optarg="$1" + opt_tag="$optarg" +func_append preserve_args " $opt $optarg" +func_enable_tag "$optarg" + shift + ;; + + -\?|-h) func_usage ;; + --help) func_help ;; + --version) func_version ;; + + # Separate optargs to long options: + --*=*) + func_split_long_opt "$opt" + set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-n*|-v*) + func_split_short_opt "$opt" + set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) break ;; + -*) func_fatal_help "unrecognized option \`$opt'" ;; + *) set dummy "$opt" ${1+"$@"}; shift; break ;; + esac + done + + # Validate options: + + # save first non-option argument + if test "$#" -gt 0; then + nonopt="$opt" + shift + fi + + # preserve --debug + test "$opt_debug" = : || func_append preserve_args " --debug" + + case $host in + *cygwin* | *mingw* | *pw32* | *cegcc*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then + func_fatal_configuration "not configured to build any kind of library" + fi + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test "$opt_mode" != execute; then + func_error "unrecognized option \`-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help="$help" + help="Try \`$progname --help --mode=$opt_mode' for more information." + } + + + # Bail if the options were screwed + $exit_cmd $EXIT_FAILURE +} + + + + +## ----------- ## +## Main. ## +## ----------- ## + +# func_lalib_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null \ + | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if `file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case "$lalib_p_line" in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test "$lalib_p" = yes +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + func_lalib_p "$1" +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $opt_debug + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$save_ifs + eval cmd=\"$cmd\" + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# `FILE.' does not work on cygwin managed mounts. +func_source () +{ + $opt_debug + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case "$lt_sysroot:$1" in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result="=$func_stripname_result" + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $opt_debug + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with \`--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=${1} + if test "$build_libtool_libs" = yes; then + write_lobj=\'${2}\' + else + write_lobj=none + fi + + if test "$build_old_libs" = yes; then + write_oldobj=\'${3}\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$lt_sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $opt_debug + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result="" + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result" ; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result" + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $opt_debug + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $opt_debug + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $opt_debug + if test -z "$2" && test -n "$1" ; then + func_error "Could not determine host file name corresponding to" + func_error " \`$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result="$1" + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $opt_debug + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " \`$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result="$3" + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $opt_debug + case $4 in + $1 ) func_to_host_path_result="$3$func_to_host_path_result" + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via `$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $opt_debug + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $opt_debug + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result="$1" +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result="$func_convert_core_msys_to_w32_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result="$func_convert_core_file_wine_to_w32_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result="$func_cygpath_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result="$func_cygpath_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via `$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $opt_debug + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd="func_convert_path_${func_stripname_result}" + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $opt_debug + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result="$1" +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result="$func_convert_core_msys_to_w32_result" + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result="$func_convert_core_path_wine_to_w32_result" + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result="$func_cygpath_result" + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result="$func_cygpath_result" + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_mode_compile arg... +func_mode_compile () +{ + $opt_debug + # Get the compilation command and the source file. + base_compile= + srcfile="$nonopt" # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg="$arg" + arg_mode=normal + ;; + + target ) + libobj="$arg" + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify \`-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs="$IFS"; IFS=',' + for arg in $args; do + IFS="$save_ifs" + func_append_quoted lastarg "$arg" + done + IFS="$save_ifs" + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg="$srcfile" + srcfile="$arg" + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with \`-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj="$func_basename_result" + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from \`$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name \`$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname="$func_basename_result" + xdir="$func_dirname_result" + lobj=${xdir}$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test "$build_old_libs" = yes; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test "$compiler_c_o" = no; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext} + lockfile="$output_obj.lock" + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test "$need_locks" = yes; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test "$need_locks" = warn; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test "$build_libtool_libs" = yes; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test "$pic_mode" != no; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test "$suppress_opt" = yes; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test "$build_old_libs" = yes; then + if test "$pic_mode" != yes; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test "$compiler_c_o" = yes; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test "$need_locks" != no; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test "$opt_mode" = compile && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a \`.o' file suitable for static linking + -static only build a \`.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a \`standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix \`.c' with the +library object suffix, \`.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to \`-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the \`--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the \`install' or \`cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with \`-') are ignored. + +Every other argument is treated as a filename. Files ending in \`.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in \`.la', then a libtool library is created, +only library objects (\`.lo' files) may be specified, and \`-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created +using \`ar' and \`ranlib', or on Windows using \`lib'. + +If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode \`$opt_mode'" + ;; + esac + + echo + $ECHO "Try \`$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test "$opt_help" = :; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | sed -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + sed '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $opt_debug + # The first argument is the command name. + cmd="$nonopt" + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "\`$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "\`$file' was not linked with \`-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir="$func_dirname_result" + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir="$func_dirname_result" + ;; + + *) + func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir="$absdir" + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic="$magic" + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file="$progdir/$program" + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file="$progdir/$program" + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if test "X$opt_dry_run" = Xfalse; then + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd="\$cmd$args" + else + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + fi +} + +test "$opt_mode" = execute && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $opt_debug + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "\`$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument \`$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and \`=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_silent && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the \`-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the \`$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the \`$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the \`$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test "$opt_mode" = finish && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $opt_debug + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac; then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=no + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=yes ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test "x$prev" = x-m && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi + func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the \`$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" + func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=yes + if test "$isdir" = yes; then + destdir="$dest" + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir="$func_dirname_result" + destname="$func_basename_result" + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "\`$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "\`$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir="$func_dirname_result" + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking \`$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname="$1" + shift + + srcname="$realname" + test -n "$relink_command" && srcname="$realname"T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme="$stripme" + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme="" + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try `ln -sf' first, because the `ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib="$destdir/$realname" + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name="$func_basename_result" + instname="$dir/$name"i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest="$destfile" + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to \`$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test "$build_old_libs" = yes; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext="" + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=".exe" + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script \`$wrapper'" + + finalize=yes + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "\`$lib' has not been installed in \`$libdir'" + finalize=no + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test "$fast_install" = no && test -n "$relink_command"; then + $opt_dry_run || { + if test "$finalize" = yes; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file="$func_basename_result" + outputname="$tmpdir/$file" + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_silent || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink \`$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file="$outputname" + else + func_warning "cannot relink \`$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name="$func_basename_result" + + # Set up the ranlib parameters. + oldlib="$destdir/$name" + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $tool_oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run \`$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test "$opt_mode" = install && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $opt_debug + my_outputname="$1" + my_originator="$2" + my_pic_p="${3-no}" + my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms="${my_outputname}S.c" + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist="$output_objdir/${my_outputname}.nm" + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +/* External symbol declarations for the compiler. */\ +" + + if test "$dlself" = yes; then + func_verbose "generating symbol list for \`$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from \`$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols="$output_objdir/$outputname.exp" + $opt_dry_run || { + $RM $export_symbols + eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from \`$dlprefile'" + func_basename "$dlprefile" + name="$func_basename_result" + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename="" + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname" ; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename="$func_basename_result" + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename" ; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[]; +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{\ + { \"$my_originator\", (void *) 0 }," + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + if test "X$my_pic_p" != Xno; then + pic_flag_for_symtable=" $pic_flag" + fi + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' + + # Transform the symbol file into the correct name. + symfileobj="$output_objdir/${my_outputname}S.$objext" + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for \`$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $opt_debug + win32_libid_type="unknown" + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s,.*,import, + p + q + } + }'` + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $opt_debug + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $opt_debug + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive which possess that section. Heuristic: eliminate + # all those which have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $opt_debug + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $opt_debug + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $opt_debug + if func_cygming_gnu_implib_p "$1" ; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1" ; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result="" + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $opt_debug + f_ex_an_ar_dir="$1"; shift + f_ex_an_ar_oldlib="$1" + if test "$lock_old_archive_extraction" = yes; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test "$lock_old_archive_extraction" = yes; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $opt_debug + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib="$func_basename_result" + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir="$my_gentop/$my_xlib_u" + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`basename "$darwin_archive"` + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + func_extract_an_archive "`pwd`" "${darwin_base_archive}" + cd "$darwin_curdir" + $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result="$my_oldobjs" +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory in which it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=\"$qECHO\" + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ which is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options which match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + case \" \$* \" in + *\\ --lt-*) + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done ;; + esac + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test "$fast_install" = yes; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +/* declarations of non-ANSI functions */ +#if defined(__MINGW32__) +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined(__CYGWIN__) +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined (other platforms) ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined(_MSC_VER) +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +# ifndef _INTPTR_T_DEFINED +# define _INTPTR_T_DEFINED +# define intptr_t int +# endif +#elif defined(__MINGW32__) +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined(__CYGWIN__) +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined (other platforms) ... */ +#endif + +#if defined(PATH_MAX) +# define LT_PATHMAX PATH_MAX +#elif defined(MAXPATHLEN) +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ + defined (__OS2__) +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free ((void *) stale); stale = 0; } \ +} while (0) + +#if defined(LT_DEBUGWRAPPER) +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + int tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = q - p; + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (strcmp (str, pat) == 0) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + int len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + int orig_value_len = strlen (orig_value); + int add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + int len = strlen (new_value); + while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[len-1] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -n -e ' +s/^\(.\{79\}\)\(..*\)/\1\ +\2/ +h +s/\([\\"]\)/\\\1/g +s/$/\\n/ +s/\([^\n]*\).*/ fputs ("\1", f);/p +g +D' + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $opt_debug + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $opt_debug + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # which system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll which has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=no + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module="${wl}-single_module" + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg="$1" + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir="$arg" + prev= + continue + ;; + dlfiles|dlprefiles) + if test "$preload" = no; then + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=yes + fi + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test "$dlself" = no; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test "$prev" = dlprefiles; then + dlself=yes + elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test "$prev" = dlfiles; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols="$arg" + test -f "$arg" \ + || func_fatal_error "symbol file \`$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex="$arg" + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir="$arg" + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file \`$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + precious_regex) + precious_files_regex="$arg" + prev= + continue + ;; + release) + release="-$arg" + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg="$arg" + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "\`-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test "X$arg" = "X-export-symbols"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between \`-L' and \`$1'" + else + func_fatal_error "need path for \`-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of \`$dir'" + dir="$absdir" + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test "X$arg" = "X-lc" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test "X$arg" = "X-lc" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test "X$arg" = "X-lc" && continue + ;; + esac + elif test "X$arg" = "X-lc_r"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module="${wl}-multi_module" + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "\`-no-install' is ignored for $host" + func_warning "assuming \`-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + func_append arg " $func_quote_for_eval_result" + func_append compiler_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + func_append arg " $wl$func_quote_for_eval_result" + func_append compiler_flags " $wl$func_quote_for_eval_result" + func_append linker_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-flto*|-fwhopr*|-fuse-linker-plugin) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the \`$prevarg' option requires an argument" + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname="$func_basename_result" + libobjs_save="$libobjs" + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + func_dirname "$output" "/" "" + output_objdir="$func_dirname_result$objdir" + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps ; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test "$linkmode" = lib; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=no + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test "$linkmode,$pass" = "lib,link"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs="$tmp_deplibs" + fi + + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan"; then + libs="$deplibs" + deplibs= + fi + if test "$linkmode" = prog; then + case $pass in + dlopen) libs="$dlfiles" ;; + dlpreopen) libs="$dlprefiles" ;; + link) + libs="$deplibs %DEPLIBS%" + test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" + ;; + esac + fi + if test "$linkmode,$pass" = "lib,dlpreopen"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs="$dlprefiles" + fi + if test "$pass" = dlopen; then + # Collect dlpreopened libraries + save_deplibs="$deplibs" + deplibs= + fi + + for deplib in $libs; do + lib= + found=no + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test "$linkmode" != lib && test "$linkmode" != prog; then + func_warning "\`-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test "$linkmode" = lib; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done + done + if test "$found" != yes; then + # deplib doesn't seem to be a libtool library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + else # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll="$l" + done + if test "X$ll" = "X$old_library" ; then # only static version available + found=no + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + lib=$ladir/$old_library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + fi + ;; # -l + *.ltframework) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + if test "$pass" = scan; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "\`-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test "$pass" = link; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + else + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + fi + ;; + esac + continue + ;; + prog) + if test "$pass" != link; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + elif test "$linkmode" = prog; then + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=yes + continue + ;; + esac # case $deplib + + if test "$found" = yes || test -f "$lib"; then : + else + func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" + fi + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "\`$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test "$pass" = conv; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + elif test "$linkmode" != prog && test "$linkmode" != lib; then + func_fatal_error "\`$lib' is not a convenience library" + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test "$prefer_static_libs" = yes || + test "$prefer_static_libs,$installed" = "built,no"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib="$l" + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + + # This library was specified with -dlopen. + if test "$pass" = dlopen; then + if test -z "$libdir"; then + func_fatal_error "cannot -dlopen a convenience library: \`$lib'" + fi + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of \`$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir="$ladir" + fi + ;; + esac + func_basename "$lib" + laname="$func_basename_result" + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library \`$lib' was moved." + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else + dir="$lt_sysroot$libdir" + absdir="$lt_sysroot$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test "$pass" = dlpreopen; then + if test -z "$libdir" && test "$linkmode" = prog; then + func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" + fi + case "$host" in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test "$linkmode" = lib; then + deplibs="$dir/$old_library $deplibs" + elif test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test "$linkmode" = prog && test "$pass" != link; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no + if test "$link_all_deplibs" != no || test -z "$library_names" || + test "$build_libtool_libs" = no; then + linkalldeplibs=yes + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if test "$linkalldeplibs" = yes; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test "$linkmode,$pass" = "prog,link"; then + if test -n "$library_names" && + { { test "$prefer_static_libs" = no || + test "$prefer_static_libs,$installed" = "built,yes"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then + # Make sure the rpath contains only unique directories. + case "$temp_rpath:" in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if test "$alldeplibs" = yes && + { test "$deplibs_check_method" = pass_all || + { test "$build_libtool_libs" = yes && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test "$use_static_libs" = built && test "$installed" = yes; then + use_static_libs=no + fi + if test -n "$library_names" && + { test "$use_static_libs" = no || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test "$installed" = no; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule="" + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule="$dlpremoduletest" + break + fi + done + if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then + echo + if test "$linkmode" = prog; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test "$linkmode" = lib && + test "$hardcode_into_libs" = yes; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname="$1" + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc*) + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + esac + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot="$soname" + func_basename "$soroot" + soname="$func_basename_result" + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from \`$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for \`$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test "$linkmode" = prog || test "$opt_mode" != relink; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test "$hardcode_direct" = no; then + add="$dir/$linklib" + case $host in + *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; + *-*-sysv4*uw2*) add_dir="-L$dir" ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir="-L$dir" ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we can not + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null ; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library" ; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add="$dir/$old_library" + fi + elif test -n "$old_library"; then + add="$dir/$old_library" + fi + fi + esac + elif test "$hardcode_minus_L" = no; then + case $host in + *-*-sunos*) add_shlibpath="$dir" ;; + esac + add_dir="-L$dir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = no; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + relink) + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$dir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$absdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test "$lib_linked" != yes; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test "$hardcode_direct" != yes && + test "$hardcode_minus_L" != yes && + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test "$linkmode" = prog || test "$opt_mode" = relink; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$libdir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$libdir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then + add="$inst_prefix_dir$libdir/$linklib" + else + add="$libdir/$linklib" + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir="-L$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + fi + + if test "$linkmode" = prog; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test "$linkmode" = prog; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test "$hardcode_direct" != unsupported; then + test -n "$old_library" && linklib="$old_library" + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test "$build_libtool_libs" = yes; then + # Not a shared library + if test "$deplibs_check_method" != pass_all; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system can not link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test "$module" = yes; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test "$linkmode" = lib; then + if test -n "$dependency_libs" && + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || + test "$link_static" = yes; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test "$link_all_deplibs" != no; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path="$deplib" ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of \`$dir'" + absdir="$dir" + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl" ; then + depdepl="$absdir/$objdir/$depdepl" + darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" + func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}" + path= + fi + fi + ;; + *) + path="-L$absdir/$objdir" + ;; + esac + else + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "\`$deplib' seems to be moved" + + path="-L$absdir" + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test "$pass" = link; then + if test "$linkmode" = "prog"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs="$newdependency_libs" + if test "$pass" = dlpreopen; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test "$pass" != dlopen; then + if test "$pass" != conv; then + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + fi + + if test "$linkmode,$pass" != "prog,link"; then + vars="deplibs" + else + vars="compile_deplibs finalize_deplibs" + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs ; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i="" + ;; + esac + if test -n "$i" ; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test "$linkmode" = prog; then + dlfiles="$newdlfiles" + fi + if test "$linkmode" = prog || test "$linkmode" = lib; then + dlprefiles="$newdlprefiles" + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "\`-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "\`-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form `libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test "$module" = no && \ + func_fatal_help "libtool library \`$output' must begin with \`lib'" + + if test "$need_lib_prefix" != no; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test "$deplibs_check_method" != pass_all; then + func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test "$dlself" != no && \ + func_warning "\`-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test "$#" -gt 1 && \ + func_warning "ignoring multiple \`-rpath's for a libtool library" + + install_libdir="$1" + + oldlibs= + if test -z "$rpath"; then + if test "$build_libtool_libs" = yes; then + # Building a libtool convenience library. + # Some compilers have problems with a `.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "\`-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs="$IFS"; IFS=':' + set dummy $vinfo 0 0 0 + shift + IFS="$save_ifs" + + test -n "$7" && \ + func_fatal_help "too many parameters to \`-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major="$1" + number_minor="$2" + number_revision="$3" + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # which has an extra 1 added just for fun + # + case $version_type in + # correct linux to gnu/linux during the next big refactor + darwin|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_revision" + ;; + freebsd-aout|freebsd-elf|qnx|sunos) + current="$number_major" + revision="$number_minor" + age="0" + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_minor" + lt_irix_increment=no + ;; + *) + func_fatal_configuration "$modename: unknown library version type \`$version_type'" + ;; + esac + ;; + no) + current="$1" + revision="$2" + age="$3" + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT \`$current' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION \`$revision' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE \`$age' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE \`$age' is greater than the current interface number \`$current'" + func_fatal_error "\`$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + + freebsd-aout) + major=".$current" + versuffix=".$current.$revision"; + ;; + + freebsd-elf) + major=".$current" + versuffix=".$current" + ;; + + irix | nonstopux) + if test "X$lt_irix_increment" = "Xno"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring="$verstring_prefix$major.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test "$loop" -ne 0; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring_prefix$major.$iface:$verstring" + done + + # Before this point, $major must not contain `.'. + major=.$major + versuffix="$major.$revision" + ;; + + linux) # correct to gnu/linux during the next big refactor + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=".$current.$age.$revision" + verstring="$current.$age.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$age + while test "$loop" -ne 0; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring:${iface}.0" + done + + # Make executables depend on our current version. + func_append verstring ":${current}.0" + ;; + + qnx) + major=".$current" + versuffix=".$current" + ;; + + sunos) + major=".$current" + versuffix=".$current.$revision" + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 filesystems. + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + + *) + func_fatal_configuration "unknown library version type \`$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring="0.0" + ;; + esac + if test "$need_version" = no; then + versuffix= + else + versuffix=".0.0" + fi + fi + + # Remove version info from name if versioning should be avoided + if test "$avoid_version" = yes && test "$need_version" = no; then + major= + versuffix= + verstring="" + fi + + # Check to see if the archive will have undefined symbols. + if test "$allow_undefined" = yes; then + if test "$allow_undefined_flag" = unsupported; then + func_warning "undefined symbols not allowed in $host shared libraries" + build_libtool_libs=no + build_old_libs=yes + fi + else + # Don't allow undefined symbols. + allow_undefined_flag="$no_undefined_flag" + fi + + fi + + func_generate_dlsyms "$libname" "$libname" "yes" + func_append libobjs " $symfileobj" + test "X$libobjs" = "X " && libobjs= + + if test "$opt_mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles="$dlfiles" + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles="$dlprefiles" + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test "$build_libtool_libs" = yes; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release="" + versuffix="" + major="" + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib="$potent_lib" + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; + *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs="" + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + for i in $predeps $postdeps ; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test "X$deplibs_check_method" = "Xnone"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test "$droppeddeps" = yes; then + if test "$module" = yes; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test "$allow_undefined" = no; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs="$new_libs" + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test "$build_libtool_libs" = yes; then + # Remove ${wl} instances when linking with ld. + # FIXME: should test the right _cmds variable. + case $archive_cmds in + *\$LD\ *) wl= ;; + esac + if test "$hardcode_into_libs" = yes; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" + test "$opt_mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath="$finalize_shlibpath" + test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname="$1" + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib="$output_objdir/$realname" + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols="$output_objdir/$libname.uexp" + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + if test "x`$SED 1q $export_symbols`" != xEXPORTS; then + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols="$export_symbols" + export_symbols= + always_export_symbols=yes + fi + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' + for cmd1 in $cmds; do + IFS="$save_ifs" + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test "$try_normal_branch" = yes \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=${output_objdir}/${output_la}.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS="$save_ifs" + if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs="$tmp_deplibs" + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test "$compiler_needs_object" = yes && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test "$opt_mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test "X$skipped_export" != "X:" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then + output=${output_objdir}/${output_la}.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then + output=${output_objdir}/${output_la}.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test "$compiler_needs_object" = yes; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-${k}.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test "X$objlist" = X || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-${k}.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\${concat_cmds}$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + if ${skipped_export-false}; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + fi + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs="$IFS"; IFS='~' + for cmd in $concat_cmds; do + IFS="$save_ifs" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + if ${skipped_export-false}; then + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + fi + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test "$module" = yes || test "$export_dynamic" = yes; then + # On all known operating systems, these are identical. + dlname="$soname" + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "\`-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object \`$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj="$output" + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # reload_cmds runs $LD directly, so let us get rid of + # -Wl from whole_archive_flag_spec and hope we can get by with + # turning comma into space.. + wl= + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + else + gentop="$output_objdir/${obj}x" + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test "$build_libtool_libs" != yes && libobjs="$non_pic_objects" + + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + + output="$obj" + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + if test "$build_libtool_libs" != yes; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + + if test -n "$pic_flag" || test "$pic_mode" != default; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output="$libobj" + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "\`-release' is ignored for programs" + + test "$preload" = yes \ + && test "$dlopen_support" = unknown \ + && test "$dlopen_self" = unknown \ + && test "$dlopen_self_static" = unknown && \ + func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test "$tagname" = CXX ; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " ${wl}-bind_at_load" + func_append finalize_command " ${wl}-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs="$new_libs" + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" + + if test -n "$libobjs" && test "$build_old_libs" = yes; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" "no" + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=yes + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=no + ;; + *cygwin* | *mingw* ) + if test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + *) + if test "$need_relink" = no || test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + esac + if test "$wrappers_required" = no; then + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command="$compile_command$compile_rpath" + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.${objext}"; then + func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' + fi + + exit $exit_status + fi + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test "$no_install" = yes; then + # We don't need to create a wrapper script. + link_command="$compile_var$compile_command$compile_rpath" + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + if test "$hardcode_action" = relink; then + # Fast installation is not supported + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "\`$output' will be relinked during installation" + else + if test "$fast_install" != no; then + link_command="$finalize_var$compile_command$finalize_rpath" + if test "$fast_install" = yes; then + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + else + # fast_install is set to needless + relink_command= + fi + else + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + fi + fi + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource="$output_path/$objdir/lt-$output_name.c" + cwrapper="$output_path/$output_name.exe" + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host" ; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + if test "$build_libtool_libs" = convenience; then + oldobjs="$libobjs_save $symfileobj" + addlibs="$convenience" + build_libtool_libs=no + else + if test "$build_libtool_libs" = module; then + oldobjs="$libobjs_save" + build_libtool_libs=no + else + oldobjs="$old_deplibs $non_pic_objects" + if test "$preload" = yes && test -f "$symfileobj"; then + func_append oldobjs " $symfileobj" + fi + fi + addlibs="$old_convenience" + fi + + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase="$func_basename_result" + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj" ; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test "$build_old_libs" = yes && old_library="$libname.$libext" + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test "$installed" = yes; then + if test -z "$install_libdir"; then + break + fi + output="$output_objdir/$outputname"i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name="$func_basename_result" + func_resolve_sysroot "$deplib" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs="$newdependency_libs" + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles="$newdlprefiles" + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test "x$bindir" != x ; + then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that can not go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test "$installed" = no && test "$need_relink" = yes; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +{ test "$opt_mode" = link || test "$opt_mode" = relink; } && + func_mode_link ${1+"$@"} + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $opt_debug + RM="$nonopt" + files= + rmforce= + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=yes ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir="$func_dirname_result" + if test "X$dir" = X.; then + odir="$objdir" + else + odir="$dir/$objdir" + fi + func_basename "$file" + name="$func_basename_result" + test "$opt_mode" = uninstall && odir="$dir" + + # Remember odir for removal later, being careful to avoid duplicates + if test "$opt_mode" = clean; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif test "$rmforce" = yes; then + continue + fi + + rmfiles="$file" + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case "$opt_mode" in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && + test "$pic_object" != none; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && + test "$non_pic_object" != none; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test "$opt_mode" = clean ; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.${objext}" + if test "$fast_install" = yes && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name" ; then + func_append rmfiles " $odir/lt-${noexename}.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the ${objdir}s in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +{ test "$opt_mode" = uninstall || test "$opt_mode" = clean; } && + func_mode_uninstall ${1+"$@"} + +test -z "$opt_mode" && { + help="$generic_help" + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode \`$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# in which we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: +# vi:sw=2 + diff --git a/autoconf/mkinstalldirs b/autoconf/mkinstalldirs new file mode 100755 index 00000000..4191a45d --- /dev/null +++ b/autoconf/mkinstalldirs @@ -0,0 +1,162 @@ +#! /bin/sh +# mkinstalldirs --- make directory hierarchy + +scriptversion=2009-04-28.21; # UTC + +# Original author: Noah Friedman +# Created: 1993-05-16 +# Public domain. +# +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +nl=' +' +IFS=" "" $nl" +errstatus=0 +dirmode= + +usage="\ +Usage: mkinstalldirs [-h] [--help] [--version] [-m MODE] DIR ... + +Create each directory DIR (with mode MODE, if specified), including all +leading file name components. + +Report bugs to ." + +# process command line arguments +while test $# -gt 0 ; do + case $1 in + -h | --help | --h*) # -h for help + echo "$usage" + exit $? + ;; + -m) # -m PERM arg + shift + test $# -eq 0 && { echo "$usage" 1>&2; exit 1; } + dirmode=$1 + shift + ;; + --version) + echo "$0 $scriptversion" + exit $? + ;; + --) # stop option processing + shift + break + ;; + -*) # unknown option + echo "$usage" 1>&2 + exit 1 + ;; + *) # first non-opt arg + break + ;; + esac +done + +for file +do + if test -d "$file"; then + shift + else + break + fi +done + +case $# in + 0) exit 0 ;; +esac + +# Solaris 8's mkdir -p isn't thread-safe. If you mkdir -p a/b and +# mkdir -p a/c at the same time, both will detect that a is missing, +# one will create a, then the other will try to create a and die with +# a "File exists" error. This is a problem when calling mkinstalldirs +# from a parallel make. We use --version in the probe to restrict +# ourselves to GNU mkdir, which is thread-safe. +case $dirmode in + '') + if mkdir -p --version . >/dev/null 2>&1 && test ! -d ./--version; then + echo "mkdir -p -- $*" + exec mkdir -p -- "$@" + else + # On NextStep and OpenStep, the `mkdir' command does not + # recognize any option. It will interpret all options as + # directories to create, and then abort because `.' already + # exists. + test -d ./-p && rmdir ./-p + test -d ./--version && rmdir ./--version + fi + ;; + *) + if mkdir -m "$dirmode" -p --version . >/dev/null 2>&1 && + test ! -d ./--version; then + echo "mkdir -m $dirmode -p -- $*" + exec mkdir -m "$dirmode" -p -- "$@" + else + # Clean up after NextStep and OpenStep mkdir. + for d in ./-m ./-p ./--version "./$dirmode"; + do + test -d $d && rmdir $d + done + fi + ;; +esac + +for file +do + case $file in + /*) pathcomp=/ ;; + *) pathcomp= ;; + esac + oIFS=$IFS + IFS=/ + set fnord $file + shift + IFS=$oIFS + + for d + do + test "x$d" = x && continue + + pathcomp=$pathcomp$d + case $pathcomp in + -*) pathcomp=./$pathcomp ;; + esac + + if test ! -d "$pathcomp"; then + echo "mkdir $pathcomp" + + mkdir "$pathcomp" || lasterr=$? + + if test ! -d "$pathcomp"; then + errstatus=$lasterr + else + if test ! -z "$dirmode"; then + echo "chmod $dirmode $pathcomp" + lasterr= + chmod "$dirmode" "$pathcomp" || lasterr=$? + + if test ! -z "$lasterr"; then + errstatus=$lasterr + fi + fi + fi + fi + + pathcomp=$pathcomp/ + done +done + +exit $errstatus + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/autoconf/python.conf.py b/autoconf/python.conf.py new file mode 100644 index 00000000..9f679f2b --- /dev/null +++ b/autoconf/python.conf.py @@ -0,0 +1,14 @@ +import sys +import os +from distutils import sysconfig + +print 'PYTHON_INCDIR="-I%s"' % \ + ( sysconfig.get_config_var('INCLUDEPY'), ) + +libdir=sysconfig.get_config_var('LIBPL') +filename = sysconfig.get_config_var('LDLIBRARY'); +lib = os.path.splitext(filename)[0]; +if lib[0:3] == 'lib': + lib = lib[3:] + +print 'PYTHON_LIBS="-L%s -l%s"' % ( libdir, lib ) diff --git a/autoconf/randpass b/autoconf/randpass new file mode 100755 index 00000000..2294a233 --- /dev/null +++ b/autoconf/randpass @@ -0,0 +1,27 @@ +#! /bin/sh +# +# Generate a random password, written to standard output +# By John Walker +# +LANG=C +if test "x$1" = "x" ; then + PWL=48 # Password length in characters +else + PWL=$1 +fi +tmp=`mktemp randpass.XXXXXXXXXX` +if test x$tmp = x; then + tmp=/tmp/p.tmp.$$ + if test -f $tmp; then + echo "Temp file security problem on: $tmp" + exit 1 + fi +fi +cp autoconf/randpass.bc $tmp +ps | sum | tr -d ':[:alpha:] ' | sed 's/^/k=/' >>$tmp +date | tr -d ':[:alpha:] ' | sed 's/^/k=k*/' >>$tmp +ls -l /tmp | sum | tr -d ':[:alpha:] ' | sed 's/^/k=k*/' >>$tmp +echo "j=s(k); for (i = 0; i < $PWL; i++) r()" >>$tmp +echo "quit" >>$tmp +bc $tmp | awk -f autoconf/randpass.awk +rm $tmp diff --git a/autoconf/randpass.awk b/autoconf/randpass.awk new file mode 100644 index 00000000..04cfd13b --- /dev/null +++ b/autoconf/randpass.awk @@ -0,0 +1,14 @@ + +# Dumb little AWK program to convert random decimal +# values generated by rand.bc into passwords from the +# character set defined below as "charset". + +BEGIN { + charset = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + for (i = 0; i < length(charset); i++) { + set[i] = substr(charset, i, 1) + } +} + { printf "%s", set[$1 % length(charset)] } + +END { printf "\n" } diff --git a/autoconf/randpass.bc b/autoconf/randpass.bc new file mode 100644 index 00000000..8a35df67 --- /dev/null +++ b/autoconf/randpass.bc @@ -0,0 +1,44 @@ + /* + "bc" implementation of the "Minimal Standard" + multiplicative congruential generator of Park and Miller. + [Park, S.K. and K.W. Miller, Communications of the ACM 31, + 1192-1201 (1988).] + + The generation algorithm is: + + I[j+1] = (I[j] 16807) & 0x7FFFFFFF + + Note that the intermediate value of the multiplication by 16807 + (7^5) exceeds that representable in 32 bits; this has + deterred use of this generator in most portable languages. + Fortunately, bc computes with arbitrary precision so this + poses no problem. + + Designed and implemented in September 2002 by John Walker, + http://www.fourmilab.ch/. + */ + + /* Initialise state to default value of 1. */ + + t = 1 + + /* Generate and return the next random byte, updating + the state t. */ + + define r() { + t = (t * 16807) % (2^31) + return ((t / 2048) % (2^8)) + } + + /* Set the seed to the x argument. The state t is + set from the seed, after an initial shuffle. If + you don't want 0 printed when setting the seed, + assign s(x) to a junk variable. */ + + define s(x) { + auto i, j + if (x == 0) { "Seed must be nonzero"; return } + t = x % (2^32) + /* Perform initial shuffle of state. */ + for (i = 0; i < 11; i++) j = r() + } diff --git a/autoconf/who_and_what_of_libtool_patch b/autoconf/who_and_what_of_libtool_patch new file mode 100644 index 00000000..70eff1ea --- /dev/null +++ b/autoconf/who_and_what_of_libtool_patch @@ -0,0 +1,170 @@ +Small walk trough of libtool patch (what in there ...) + +- Toplevel Makefile.in + - Added depend definition for LIBTOOL + - Rewrote some of the autoconf targets for the new name and always remove the cache autoconf makes + - Updated some install targets to use the RMF macro (minor cleanup) + - Remove libtool (which is autogenerated by configure) on make distclean + +- autoconf/Make.common.in + - Added libdir and includedir + - Changed RM and RMF macros (See configure.in warning why its named @REMOVE@ now) + - Small update in INSTALL_ macros to use the INSTALL_PROGRAM macro better and not have configure + replace all the instances. + - Added the LIBTOOL macros which are used when building with libtool (the DEFAULT_* macros + are a workaround for easily disabling libtool and build in the old fashion way but still use + the same Makefile.in everywhere. + +- autoconf/aclocal.m4 + - Added libtool m4 files to be included + +- autoconf/bacula-macros/db.m4 + - Updated all help strings to use the Autoconf AC_HELP_STRING function which formats help strings + better so we never have to change those in the future (e.g. relayout ever again even when changing the content) + +- autoconf/config.h.in + - updated by configure run with libtool updates + +- autoconf/config.on.save + - removed redundant file (because things are in SVN don't see why you want this) + +- autoconf/configure.in + - This is a complete overhaul of the original file I reindented most stuff + (killed all tabs ...) I think its better readable now and when changing it I think + I also got some small bugs fixed. Also updated all comments to use dnl and not + sometimes # and then dnl as comment. As we probably never gonna read the resulting + configure script generated by autoconf anyhow I think using this style in configure.in is + more consistent. + - also added all libtool specific coding which gives us the --disable-libtool option + which makes sure we can build everything using the old tools too. The LT_INIT is not in + the if statement because it breaks the generated configure. So we always test for libtool + but you can overwrite it usage by using the --disable-libtool option. + +- autoconf/gettext-macros/gettext.m4 + - updated to use AC_HELP_STRING function + +- autoconf/libtool/libtool.m4 +- autoconf/libtool/ltoptions.m4 +- autoconf/libtool/ltsugar.m4 +- autoconf/libtool/ltversion.m4 +- autoconf/libtool/lt~obsolete.m4 +- autoconf/libtool/ltmain.sh + - Files from libtool 2.2.6 install which are needed to build libtool support in package + +- configure + - Regenerated file from autoconf 2.63 run on new configure.in + +- src/Makefile.in + - removed stray empty line + +- src/cats/Makefile.in + - added support for libtool + - use dynamic generation of names of object files + - add version for shared lib + - new inference rule for .c.lo + - changed lib name from libsql to libbacsql (to make it linked to the Bacula project) + - extra target to create libtool .la (libtool archive) + - extra libtool-clean target only called when libtool is used + - libtool-install and libtool-uninstall target only called when libtool is used + - install include files when using libtool + +- src/console/Makefile.in + - added support for libtool + - updated to use libtool when defined and have depend on correct archive type + - extra libtool-clean target only called when libtool is used + +- src/dird/Makefile.in + - use dynamic generation of names of object files + - added support for libtool + - updated to use libtool when defined and have depend on correct archive type + - extra libtool-clean target only called when libtool is used + +- src/dird/dird.c + - update for python interface change (because of full prototypes we wrap things in #ifdef HAVE_PYTHON) + +- src/dird/pythondir.c + - update for python interface change (some prototypes are moved to pythonlib.h) + +- src/dird/ua_cmds.c + - update for python interface change (because of full prototypes we wrap things in #ifdef HAVE_PYTHON) + +- src/filed/Makefile.in + - use dynamic generation of names of object files + - added support for libtool + - updated to use libtool when defined and have depend on correct archive type + - extra libtool-clean target only called when libtool is used + +- src/filed/filed.c + - update for python interface change (because of full prototypes we wrap things in #ifdef HAVE_PYTHON) + +- src/filed/pythonfd.c + - update for python interface change (some prototypes are moved to pythonlib.h) + +- src/findlib/Makefile.in + - added support for libtool + - use dynamic generation of names of object files + - add version for shared lib + - new inference rule for .c.lo + - changed lib name from libfind to libbacfind (to make it linked to the Bacula project) + - extra target to create libtool .la (libtool archive) + - extra libtool-clean target only called when libtool is used + - libtool-install and libtool-uninstall target only called when libtool is used + - install include files when using libtool + +- src/lib/Makefile.in + - added support for libtool + - use dynamic generation of names of object files + - splitted objects into 3 libs (because of symbol reference problems otherwise) + - kept all in the same dir to not interfere with win32 build etc. (first moved it to configlib and pythonlib + but that would break non libtool/unix builds. + - add version for shared libs + - new inference rule for .c.lo and .cc.lo + - extra target to create libtool .la (libtool archive) + - extra libtool-clean target only called when libtool is used + - libtool-install and libtool-uninstall target only called when libtool is used + - install include files when using libtool + - need to see if we need all those include files (already removed tcpd.h) + +- src/lib/protos.h + - update for python interface change (some prototypes are moved to pythonlib.h) + +- src/lib/pythonlib.c + - update for python interface change (some prototypes are moved to pythonlib.h) + - extern references to global vars/functions not needed anymore + +- src/lib/pythonlib.h + - new file for python interface change (some prototypes are moved here) + +- src/plugins/fd/Makefile.in + - added support for libtool + - new inference rule for .c.lo and .cc.lo + - kept old rule but that doesn't get called for libtool builds + - install rule extended to remove .la file which gets installed by the libtool --mode=install (Apache does same for DSO) + +- src/qt-console/bat.pro.in + - added support for libtool + +- src/stored/Makefile.in + - removed pythond.o from BEXTOBJS unneeded + - added support for libtool + - updated to use libtool when defined and have depend on correct archive type + - extra libtool-clean target only called when libtool is used + +- src/stored/pythonsd.c + - update for python interface change (some prototypes are moved to pythonlib.h) + +- src/stored/stored.c + - update for python interface change (because of full prototypes we wrap things in #ifdef HAVE_PYTHON) + +- src/tools/Makefile.in + - added support for libtool + - updated to use libtool when defined and have depend on correct archive type + - extra libtool-clean target only called when libtool is used + +Todo: + +- check if install of libs and includes also goes nicely when you don't do things like --prefix=/opt/ELMbacula + e.g. where does the stuff then gets installed. +- do a full regression test on the new stuff build with libtool +- see how it works on a real install (currently running 72 hours with shared libs on Solaris 10 64 bits) + diff --git a/configure b/configure new file mode 100755 index 00000000..e0db7dd6 --- /dev/null +++ b/configure @@ -0,0 +1,34198 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69 for bacula 9.4.2. +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1 + + test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ + || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, +$0: including any error possibly output before this +$0: message. Then install a modern shell, or manually run +$0: the script under such a shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + +SHELL=${CONFIG_SHELL-/bin/sh} + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='bacula' +PACKAGE_TARNAME='bacula' +PACKAGE_VERSION='9.4.2' +PACKAGE_STRING='bacula 9.4.2' +PACKAGE_BUGREPORT='' +PACKAGE_URL='' + +ac_unique_file="src/version.h" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_header_list= +ac_subst_vars='LTLIBOBJS +COMPRESS_MANPAGES +DISTVER +DISTNAME +MACOSX +WIN32 +PSCMD +TAPEDRIVE +SYSTEMD_UNITDIR +OBJLIST +WRAPLIBS +WLDFLAGS +WCFLAGS +DLIB +DINCLUDE +DEBUG +FDLIBS +CAP_LIBS +XATTROBJS +ACLOBJS +LZO_LIBS +LZO_INC +AFS_LIBS +AFS_CFLAGS +ZLIBS +S3_LIBS +S3_INC +LIBOBJS +X_EXTRA_LIBS +X_LIBS +X_PRE_LIBS +X_CFLAGS +XMKMF +GETCONF +SHARED_CATALOG_TARGETS +DEFAULT_DB_TYPE +DB_LIBS +DB_BACKENDS +uncomment_dbi +SQLITE_BINDIR +SQLITE_INCLUDE +SQLITE_LIBS +MYSQL_BINDIR +MYSQL_INCLUDE +MYSQL_LIBS +POSTGRESQL_BINDIR +POSTGRESQL_INCLUDE +POSTGRESQL_LIBS +SBINPERM +fd_group +fd_user +sd_group +sd_user +dir_group +dir_user +db_ssl_options +db_port +db_password +db_user +db_name +mon_sd_password +mon_fd_password +mon_dir_password +sd_password +fd_password +dir_password +sd_port +fd_port +dir_port +baseport +subsysdir +piddir +smtp_host +job_email +dump_email +plugindir +logdir +bsrdir +scriptdir +hostname +basename +archivedir +working_dir +OPENSSL_INC +OPENSSL_LIBS +READLINE_SRC +CONS_LDFLAGS +CONS_LIBS +CONS_SRC +CONS_OBJ +CONS_INC +STORED_DIR +DIR_TOOLS +DIRD_DIR +ALL_DIRS +STATIC_CONS +STATIC_DIR +STATIC_SD +STATIC_FD +TTOOL_LDFLAGS +QWT +QWT_LIB +QWT_LDFLAGS +QWT_INC +BAT_DIR +INTL_LIBTOOL_SUFFIX_PREFIX +INTLOBJS +GENCAT +INSTOBJEXT +DATADIRNAME +CATOBJEXT +USE_INCLUDED_LIBINTL +BUILD_INCLUDED_LIBINTL +INTLBISON +HAVE_WPRINTF +HAVE_SNPRINTF +HAVE_ASPRINTF +HAVE_POSIX_PRINTF +GLIBC21 +ALLOCA +GLIBC2 +POSUB +LTLIBINTL +LIBINTL +INTLLIBS +LTLIBICONV +LIBICONV +INTL_MACOSX_LIBS +MSGMERGE +XGETTEXT +GMSGFMT +MSGFMT +USE_NLS +MKINSTALLDIRS +SET_MAKE +HAVE_KFREEBSD_OS_FALSE +HAVE_KFREEBSD_OS_TRUE +HAVE_DARWIN_OS_FALSE +HAVE_DARWIN_OS_TRUE +HAVE_IRIX_OS_FALSE +HAVE_IRIX_OS_TRUE +HAVE_SGI_OS_FALSE +HAVE_SGI_OS_TRUE +HAVE_BSDI_OS_FALSE +HAVE_BSDI_OS_TRUE +HAVE_OPENBSD_OS_FALSE +HAVE_OPENBSD_OS_TRUE +HAVE_NETBSD_OS_FALSE +HAVE_NETBSD_OS_TRUE +HAVE_FREEBSD_OS_FALSE +HAVE_FREEBSD_OS_TRUE +HAVE_LINUX_OS_FALSE +HAVE_LINUX_OS_TRUE +HAVE_HPUX_OS_FALSE +HAVE_HPUX_OS_TRUE +HAVE_AIX_OS_FALSE +HAVE_AIX_OS_TRUE +HAVE_OSF1_OS_FALSE +HAVE_OSF1_OS_TRUE +HAVE_HURD_OS_FALSE +HAVE_HURD_OS_TRUE +HAVE_SUN_OS_FALSE +HAVE_SUN_OS_TRUE +INCLUDE_UNINSTALL_TARGET +INCLUDE_INSTALL_TARGET +FD_PLUGIN_DIR +QMAKE_LIBTOOL +LIBTOOL_CLEAN_TARGET +LIBTOOL_UNINSTALL_TARGET +LIBTOOL_INSTALL_TARGET +DEFAULT_SHARED_OBJECT_TYPE +DEFAULT_ARCHIVE_TYPE +DEFAULT_OBJECT_TYPE +LIBADD_DL +LT_DLPREOPEN +LIBADD_DLD_LINK +LIBADD_SHL_LOAD +LIBADD_DLOPEN +LT_DLLOADERS +CXXCPP +OTOOL64 +OTOOL +LIPO +NMEDIT +DSYMUTIL +MANIFEST_TOOL +RANLIB +STRIP +ac_ct_AR +DLLTOOL +OBJDUMP +LN_S +NM +ac_ct_DUMPBIN +DUMPBIN +LD +FGREP +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +LIBTOOL +LOCAL_DEFS +LOCAL_LDFLAGS +LOCAL_CFLAGS +LOCAL_LIBS +MAKE_SHELL +ARFLAGS +AWK +PIDOF +GMAKE +QMAKE +PKGCONFIG +DVDRWFORMAT +DVDRWMEDIAINFO +GROWISOFS +PYTHON +MKISOFS +DD +MTX +OPENSSL +AR +TBL +CMP +ECHO +SED +CP +REMOVE +MV +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +EGREP +GREP +CPP +ac_ct_CXX +CXXFLAGS +CXX +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +LIBBACFIND_LT_RELEASE +LIBBACCATS_LT_RELEASE +LIBBACSQL_LT_RELEASE +LIBBACCFG_LT_RELEASE +LIBBAC_LT_RELEASE +LIBRSYNC_VERSION +NDMP_VERSION +JAVA_VERSION +VIX_VERSION +DEPKGS_VERSION +DEPKGS_QT_VERSION +BDB_VERSION +post_host +BACULA +LSMDATE +DATE +VERSION +FALSEPRG +TRUEPRG +WIN32TOPDIR +WIN32MAINDIR +WIN32BUILDDIR +TOP_DIR +BUILD_DIR +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +runstatedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='MCOMMON' +ac_user_opts=' +enable_option_checking +enable_libtool +enable_shared +enable_static +with_pic +enable_fast_install +with_gnu_ld +with_sysroot +enable_libtool_lock +enable_includes +enable_nls +enable_rpath +with_libiconv_prefix +with_libintl_prefix +with_included_gettext +enable_bat +enable_smartalloc +enable_lockmgr +enable_static_tools +enable_static_fd +enable_static_sd +enable_static_dir +enable_static_cons +enable_client_only +enable_build_dird +enable_build_stored +enable_conio +enable_ipv6 +enable_readline +with_readline +with_tcp_wrappers +with_openssl +with_working_dir +with_archivedir +with_basename +with_hostname +with_scriptdir +with_bsrdir +with_logdir +with_plugindir +with_dump_email +with_job_email +with_smtp_host +with_pid_dir +with_subsys_dir +with_baseport +with_dir_password +with_fd_password +with_sd_password +with_mon_dir_password +with_mon_fd_password +with_mon_sd_password +with_db_name +with_db_user +with_db_password +with_db_port +with_db_ssl_options +with_dir_user +with_dir_group +with_sd_user +with_sd_group +with_fd_user +with_fd_group +with_sbin_perm +enable_batch_insert +with_postgresql +with_mysql +with_embedded_mysql +with_sqlite3 +enable_largefile +with_x +with_s3 +enable_afs +with_afsdir +enable_lzo +with_lzo +enable_acl +enable_xattr +with_systemd +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CXX +CXXFLAGS +CCC +CPP +CXXCPP +XMKMF' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +runstatedir='${localstatedir}/run' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -runstatedir | --runstatedir | --runstatedi | --runstated \ + | --runstate | --runstat | --runsta | --runst | --runs \ + | --run | --ru | --r) + ac_prev=runstatedir ;; + -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ + | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ + | --run=* | --ru=* | --r=*) + runstatedir=$ac_optarg ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir runstatedir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures bacula 9.4.2 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/bacula] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +X features: + --x-includes=DIR X include files are in DIR + --x-libraries=DIR X library files are in DIR + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of bacula 9.4.2:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-libtool enable building using GNU libtool [default=yes] + --enable-shared[=PKGS] build shared libraries [default=yes] + --enable-static[=PKGS] build static libraries [default=no] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + --enable-includes enable installing of include files [default=no] + --disable-nls do not use Native Language Support + --disable-rpath do not hardcode runtime library paths + --enable-bat enable build of bat Qt4/5 GUI [default=no] + --enable-smartalloc enable smartalloc debugging support [default=no] + --enable-lockmgr enable lock manager support [default=no] + --enable-static-tools enable static tape tools [default=no] + --enable-static-fd enable static File daemon [default=no] + --enable-static-sd enable static Storage daemon [default=no] + --enable-static-dir enable static Director [default=no] + --enable-static-cons enable static Console [default=no] + --enable-client-only build client (File daemon) only [default=no] + --enable-build-dird enable building of dird (Director) [default=yes] + --enable-build-stored enable building of stored (Storage daemon) + [default=yes] + --disable-conio disable conio support [default=no] + --enable-ipv6 enable ipv6 support [default=yes] + --disable-readline disable readline support [default=yes] + --enable-batch-insert enable the DB batch insert code [default=yes] + --disable-largefile omit support for large files + --disable-afs disable afs support [default=auto] + --disable-lzo disable lzo support [default=yes] + --disable-acl disable acl support [default=auto] + --disable-xattr disable xattr support [default=auto] + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-sysroot=DIR Search for dependent libraries within DIR + (or the compiler's sysroot if not specified). + --with-gnu-ld assume the C compiler uses GNU ld default=no + --with-libiconv-prefix[=DIR] search for libiconv in DIR/include and DIR/lib + --without-libiconv-prefix don't search for libiconv in includedir and libdir + --with-libintl-prefix[=DIR] search for libintl in DIR/include and DIR/lib + --without-libintl-prefix don't search for libintl in includedir and libdir + --with-included-gettext use the GNU gettext library included here + --with-readline[=DIR] specify readline library directory + --with-tcp-wrappers[=DIR] + enable tcpwrappers support + --with-openssl[=DIR] Include OpenSSL support. DIR is the OpenSSL base + --with-working-dir=PATH specify path of Bacula working directory + --with-archivedir=PATH specify path of SD archive directory + --with-basename=RESNAME specify base resource name for daemons + --with-hostname=RESNAME specify host name for daemons + --with-scriptdir=PATH specify path of Bacula scripts directory + --with-bsrdir=PATH specify path of Bacula bsrs directory + --with-logdir=PATH specify path of Bacula logs directory + --with-plugindir=PATH specify path of Bacula plugins directory + --with-dump-email=EMAIL dump email address + --with-job-email=EMAIL job output email address + --with-smtp-host=HOST SMTP mail host address + --with-pid-dir=PATH specify location of Bacula pid files + --with-subsys-dir=PATH specify location of Bacula subsys file + --with-baseport=PORT specify base port address for daemons + --with-dir-password=PASSWORD + specify Director's password + --with-fd-password=PASSWORD + specify Client's password + --with-sd-password=PASSWORD + specify Storage daemon's password + --with-mon-dir-password=PASSWORD + specify Director's password used by the monitor + --with-mon-fd-password=PASSWORD + specify Client's password used by the monitor + --with-mon-sd-password=PASSWORD + specify Storage daemon's password used by the + monitor + --with-db-name=DBNAME specify database name [default=bacula] + --with-db-user=UNAME specify database user [default=bacula] + --with-db-password=PWD specify database password [default=*none*] + --with-db-port=DBPORT specify a database port [default=null] + --with-db-ssl-options=DBSSLOPTIONS + specify SSL options for database user connection + [default=null] + --with-dir-user=USER specify user for Director daemon + --with-dir-group=GROUP specify group for Director daemon + --with-sd-user=USER specify user for Storage daemon + --with-sd-group=GROUP specify group for Storage daemon + --with-fd-user=USER specify user for File daemon + --with-fd-group=GROUP specify group for File daemon + --with-sbin-perm=MODE specify permissions for sbin binaries [default=0750] + --with-postgresql[=DIR] Include PostgreSQL support. DIR is the PostgreSQL + base install directory, [default=/usr/local/pgsql] + --with-mysql[=DIR] Include MySQL support. DIR is the MySQL base install + directory, default is to search through a number of + common places for the MySQL files. + --with-embedded-mysql[=DIR] + Include MySQL support. DIR is the MySQL base install + directory, default is to search through a number of + common places for the MySQL files. + --with-sqlite3[=DIR] Include SQLite3 support. DIR is the SQLite3 base + install directory, default is to search through a + number of common places for the SQLite3 files. + --with-x use the X Window System + --with-s3[=DIR] specify s3 library directory + --with-afsdir[=DIR] Directory holding AFS includes/libs + --with-lzo[=DIR] specify lzo library directory + --with-systemd[=UNITDIR] + Include systemd support. UNITDIR is where systemd + system .service files are located, default is to ask + systemctl. + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CPP C preprocessor + CXXCPP C++ preprocessor + XMKMF Path to xmkmf, Makefile generator for X Window System + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to the package provider. +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +bacula configure 9.4.2 +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + +# ac_fn_cxx_try_cpp LINENO +# ------------------------ +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_cpp + +# ac_fn_cxx_try_link LINENO +# ------------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_link + +# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES +# --------------------------------------------- +# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR +# accordingly. +ac_fn_c_check_decl () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + as_decl_name=`echo $2|sed 's/ *(.*//'` + as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 +$as_echo_n "checking whether $as_decl_name is declared... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +#ifndef $as_decl_name +#ifdef __cplusplus + (void) $as_decl_use; +#else + (void) $as_decl_name; +#endif +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_decl + +# ac_fn_c_check_type LINENO TYPE VAR INCLUDES +# ------------------------------------------- +# Tests whether TYPE exists after having included INCLUDES, setting cache +# variable VAR accordingly. +ac_fn_c_check_type () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=no" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof ($2)) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof (($2))) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + eval "$3=yes" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_type + +# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists, giving a warning if it cannot be compiled using +# the include files in INCLUDES and setting the cache variable VAR +# accordingly. +ac_fn_c_check_header_mongrel () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if eval \${$3+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 +$as_echo_n "checking $2 usability... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_header_compiler=yes +else + ac_header_compiler=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 +$as_echo_n "checking $2 presence... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <$2> +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + ac_header_preproc=yes +else + ac_header_preproc=no +fi +rm -f conftest.err conftest.i conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( + yes:no: ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; + no:yes:* ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=\$ac_header_compiler" +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_mongrel + +# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES +# -------------------------------------------- +# Tries to find the compile-time value of EXPR in a program that includes +# INCLUDES, setting VAR accordingly. Returns whether the value could be +# computed +ac_fn_c_compute_int () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) >= 0)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_lo=0 ac_mid=0 + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) <= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=$ac_mid; break +else + as_fn_arith $ac_mid + 1 && ac_lo=$as_val + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) < 0)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=-1 ac_mid=-1 + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) >= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_lo=$ac_mid; break +else + as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + ac_lo= ac_hi= +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) <= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=$ac_mid +else + as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in #(( +?*) eval "$3=\$ac_lo"; ac_retval=0 ;; +'') ac_retval=1 ;; +esac + else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +static long int longval () { return $2; } +static unsigned long int ulongval () { return $2; } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + return 1; + if (($2) < 0) + { + long int i = longval (); + if (i != ($2)) + return 1; + fprintf (f, "%ld", i); + } + else + { + unsigned long int i = ulongval (); + if (i != ($2)) + return 1; + fprintf (f, "%lu", i); + } + /* Do not output a trailing newline, as this causes \r\n confusion + on some platforms. */ + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + echo >>conftest.val; read $3 &5 +$as_echo_n "checking for $2.$3... " >&6; } +if eval \${$4+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$4=yes" +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (sizeof ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$4=yes" +else + eval "$4=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$4 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_member + +# ac_fn_cxx_try_run LINENO +# ------------------------ +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_cxx_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_run + +# ac_fn_cxx_check_func LINENO FUNC VAR +# ------------------------------------ +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_cxx_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_func +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by bacula $as_me 9.4.2, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +as_fn_append ac_header_list " stdlib.h" +as_fn_append ac_header_list " unistd.h" +as_fn_append ac_header_list " sys/param.h" +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + +BUILD_DIR=`pwd` +cd .. +TOP_DIR=`pwd` +cd ${BUILD_DIR} + + +ac_aux_dir= +for ac_dir in ${BUILD_DIR}/autoconf "$srcdir"/${BUILD_DIR}/autoconf; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in ${BUILD_DIR}/autoconf \"$srcdir\"/${BUILD_DIR}/autoconf" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +ac_config_headers="$ac_config_headers src/config.h:autoconf/config.h.in" + + +WIN32BUILDDIR=${BUILD_DIR}/src/win32 +WIN32MAINDIR=${BUILD_DIR} +WIN32TOPDIR=${TOP_DIR} + + + + +for ac_prog in true +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_TRUEPRG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $TRUEPRG in + [\\/]* | ?:[\\/]*) + ac_cv_path_TRUEPRG="$TRUEPRG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_TRUEPRG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +TRUEPRG=$ac_cv_path_TRUEPRG +if test -n "$TRUEPRG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $TRUEPRG" >&5 +$as_echo "$TRUEPRG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$TRUEPRG" && break +done +test -n "$TRUEPRG" || TRUEPRG=":" + +for ac_prog in false +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_FALSEPRG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $FALSEPRG in + [\\/]* | ?:[\\/]*) + ac_cv_path_FALSEPRG="$FALSEPRG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_FALSEPRG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +FALSEPRG=$ac_cv_path_FALSEPRG +if test -n "$FALSEPRG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FALSEPRG" >&5 +$as_echo "$FALSEPRG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$FALSEPRG" && break +done +test -n "$FALSEPRG" || FALSEPRG=":" + + + +post_host= +if test "x$BACULA" != x; then + post_host=`echo -${BACULA} | tr 'A-Z ' 'a-z-'` +fi +BACULA=${BACULA:-Bacula} +VERSION=`sed -n -e 's/^#define VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +RELEASE=`sed -n -e 's/^#define RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +DATE=`sed -n -e 's/^#define BDATE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LSMDATE=`sed -n -e 's/^#define LSMDATE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +BDB_VERSION=`sed -n -e 's/^#define BDB_VERSION \(.*\)$/\1/p' ${srcdir}/src/cats/cats.h` +DEPKGS_VERSION=`sed -n -e 's/^#define DEPKGS_VERSION \(.*\)$/\1/p' ${srcdir}/src/cats/cats.h` +DEPKGS_QT_VERSION=`sed -n -e 's/^#define DEPKGS_QT_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +BQT_VERSION=`sed -n -e 's/^#define BQT_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +VIX_VERSION=`sed -n -e 's/^#define VIX_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +JAVA_VERSION=`sed -n -e 's/^#define JAVA_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +NDMP_VERSION=`sed -n -e 's/^#define NDMP_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LIBRSYNC_VERSION=`sed -n -e 's/^#define LIBRSYNC_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` + + +LIBBAC_LT_RELEASE=`sed -n -e 's/^#.*LIBBAC_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LIBBACCFG_LT_RELEASE=`sed -n -e 's/^#.*LIBBACCFG_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LIBBACPY_LT_RELEASE=`sed -n -e 's/^#.*LIBBACPY_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` + +LIBBAC_LT_RELEASE=${LIBBAC_LT_RELEASE:-$VERSION} +LIBBACCFG_LT_RELEASE=${LIBBACCFG_LT_RELEASE:-$VERSION} + + +LIBBACSQL_LT_RELEASE=`sed -n -e 's/^#.*LIBBACSQL_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` +LIBBACCATS_LT_RELEASE=`sed -n -e 's/^#.*LIBBACCATS_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` + +LIBBACSQL_LT_RELEASE=${LIBBACSQL_LT_RELEASE:-$VERSION} +LIBBACCATS_LT_RELEASE=${LIBBACCATS_LT_RELEASE:-$VERSION} + + +LIBBACFIND_LT_RELEASE=`sed -n -e 's/^#.*LIBBACFIND_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` + +LIBBACFIND_LT_RELEASE=${LIBBACFIND_LT_RELEASE:-$VERSION} + + +PFILES="platforms/Makefile" + +echo "configuring for ${BACULA} $VERSION ($DATE)" + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +if test "x$CC" != xcc; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC and cc understand -c and -o together" >&5 +$as_echo_n "checking whether $CC and cc understand -c and -o together... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cc understands -c and -o together" >&5 +$as_echo_n "checking whether cc understands -c and -o together... " >&6; } +fi +set dummy $CC; ac_cc=`$as_echo "$2" | + sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` +if eval \${ac_cv_prog_cc_${ac_cc}_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +# Make sure it works both with $CC and with simple cc. +# We do the test twice because some compilers refuse to overwrite an +# existing .o file with -o, though they will create one. +ac_try='$CC -c conftest.$ac_ext -o conftest2.$ac_objext >&5' +rm -f conftest2.* +if { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && + test -f conftest2.$ac_objext && { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; +then + eval ac_cv_prog_cc_${ac_cc}_c_o=yes + if test "x$CC" != xcc; then + # Test first that cc exists at all. + if { ac_try='cc -c conftest.$ac_ext >&5' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + ac_try='cc -c conftest.$ac_ext -o conftest2.$ac_objext >&5' + rm -f conftest2.* + if { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && + test -f conftest2.$ac_objext && { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; + then + # cc works too. + : + else + # cc exists but doesn't like -o. + eval ac_cv_prog_cc_${ac_cc}_c_o=no + fi + fi + fi +else + eval ac_cv_prog_cc_${ac_cc}_c_o=no +fi +rm -f core conftest* + +fi +if eval test \$ac_cv_prog_cc_${ac_cc}_c_o = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +$as_echo "#define NO_MINUS_C_MINUS_O 1" >>confdefs.h + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +if test $ac_cv_c_compiler_gnu = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC needs -traditional" >&5 +$as_echo_n "checking whether $CC needs -traditional... " >&6; } +if ${ac_cv_prog_gcc_traditional+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_pattern="Autoconf.*'x'" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +Autoconf TIOCGETP +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "$ac_pattern" >/dev/null 2>&1; then : + ac_cv_prog_gcc_traditional=yes +else + ac_cv_prog_gcc_traditional=no +fi +rm -f conftest* + + + if test $ac_cv_prog_gcc_traditional = no; then + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +Autoconf TCGETA +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "$ac_pattern" >/dev/null 2>&1; then : + ac_cv_prog_gcc_traditional=yes +fi +rm -f conftest* + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_gcc_traditional" >&5 +$as_echo "$ac_cv_prog_gcc_traditional" >&6; } + if test $ac_cv_prog_gcc_traditional = yes; then + CC="$CC -traditional" + fi +fi + +BASECC=`basename $CC` +have_gcc=no +if test x"$GCC" = "xyes"; then + $as_echo "#define HAVE_GCC 1" >>confdefs.h + + have_gcc=yes +fi +# Extract the first word of "$CXX", so it can be a program name with args. +set dummy $CXX; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $CXX in + [\\/]* | ?:[\\/]*) + ac_cv_path_CXX="$CXX" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_CXX="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_CXX" && ac_cv_path_CXX="$CXX" + ;; +esac +fi +CXX=$ac_cv_path_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test ! -e $CXX; then + as_fn_error $? "Unable to find C++ compiler" "$LINENO" 5 +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for multiarch system" >&5 +$as_echo_n "checking for multiarch system... " >&6; } +multiarch=`$CC $CFLAGS -print-multiarch 2>/dev/null` +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $multiarch" >&5 +$as_echo "$multiarch" >&6; } + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +# Extract the first word of "mv", so it can be a program name with args. +set dummy mv; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MV+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MV in + [\\/]* | ?:[\\/]*) + ac_cv_path_MV="$MV" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_MV="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_MV" && ac_cv_path_MV="mv" + ;; +esac +fi +MV=$ac_cv_path_MV +if test -n "$MV"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MV" >&5 +$as_echo "$MV" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "rm", so it can be a program name with args. +set dummy rm; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_REMOVE+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $REMOVE in + [\\/]* | ?:[\\/]*) + ac_cv_path_REMOVE="$REMOVE" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_REMOVE="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_REMOVE" && ac_cv_path_REMOVE="rm" + ;; +esac +fi +REMOVE=$ac_cv_path_REMOVE +if test -n "$REMOVE"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $REMOVE" >&5 +$as_echo "$REMOVE" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "cp", so it can be a program name with args. +set dummy cp; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_CP+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $CP in + [\\/]* | ?:[\\/]*) + ac_cv_path_CP="$CP" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_CP="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_CP" && ac_cv_path_CP="cp" + ;; +esac +fi +CP=$ac_cv_path_CP +if test -n "$CP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CP" >&5 +$as_echo "$CP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "sed", so it can be a program name with args. +set dummy sed; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_SED+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $SED in + [\\/]* | ?:[\\/]*) + ac_cv_path_SED="$SED" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_SED="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_SED" && ac_cv_path_SED="sed" + ;; +esac +fi +SED=$ac_cv_path_SED +if test -n "$SED"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SED" >&5 +$as_echo "$SED" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "echo", so it can be a program name with args. +set dummy echo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_ECHO+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $ECHO in + [\\/]* | ?:[\\/]*) + ac_cv_path_ECHO="$ECHO" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_ECHO="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_ECHO" && ac_cv_path_ECHO="echo" + ;; +esac +fi +ECHO=$ac_cv_path_ECHO +if test -n "$ECHO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ECHO" >&5 +$as_echo "$ECHO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "cmp", so it can be a program name with args. +set dummy cmp; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_CMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $CMP in + [\\/]* | ?:[\\/]*) + ac_cv_path_CMP="$CMP" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_CMP="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_CMP" && ac_cv_path_CMP="cmp" + ;; +esac +fi +CMP=$ac_cv_path_CMP +if test -n "$CMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CMP" >&5 +$as_echo "$CMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "tbl", so it can be a program name with args. +set dummy tbl; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_TBL+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $TBL in + [\\/]* | ?:[\\/]*) + ac_cv_path_TBL="$TBL" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_TBL="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_TBL" && ac_cv_path_TBL="tbl" + ;; +esac +fi +TBL=$ac_cv_path_TBL +if test -n "$TBL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $TBL" >&5 +$as_echo "$TBL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $AR in + [\\/]* | ?:[\\/]*) + ac_cv_path_AR="$AR" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_AR="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_AR" && ac_cv_path_AR="ar" + ;; +esac +fi +AR=$ac_cv_path_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "openssl", so it can be a program name with args. +set dummy openssl; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_OPENSSL+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $OPENSSL in + [\\/]* | ?:[\\/]*) + ac_cv_path_OPENSSL="$OPENSSL" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_OPENSSL="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_OPENSSL" && ac_cv_path_OPENSSL="none" + ;; +esac +fi +OPENSSL=$ac_cv_path_OPENSSL +if test -n "$OPENSSL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OPENSSL" >&5 +$as_echo "$OPENSSL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "mtx", so it can be a program name with args. +set dummy mtx; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MTX+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MTX in + [\\/]* | ?:[\\/]*) + ac_cv_path_MTX="$MTX" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_MTX="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_MTX" && ac_cv_path_MTX="mtx" + ;; +esac +fi +MTX=$ac_cv_path_MTX +if test -n "$MTX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MTX" >&5 +$as_echo "$MTX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "dd", so it can be a program name with args. +set dummy dd; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_DD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $DD in + [\\/]* | ?:[\\/]*) + ac_cv_path_DD="$DD" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_DD="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_DD" && ac_cv_path_DD="dd" + ;; +esac +fi +DD=$ac_cv_path_DD +if test -n "$DD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DD" >&5 +$as_echo "$DD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "mkisofs", so it can be a program name with args. +set dummy mkisofs; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MKISOFS+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MKISOFS in + [\\/]* | ?:[\\/]*) + ac_cv_path_MKISOFS="$MKISOFS" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_MKISOFS="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_MKISOFS" && ac_cv_path_MKISOFS="mkisofs" + ;; +esac +fi +MKISOFS=$ac_cv_path_MKISOFS +if test -n "$MKISOFS"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKISOFS" >&5 +$as_echo "$MKISOFS" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "python", so it can be a program name with args. +set dummy python; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PYTHON+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PYTHON in + [\\/]* | ?:[\\/]*) + ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_PYTHON" && ac_cv_path_PYTHON="python" + ;; +esac +fi +PYTHON=$ac_cv_path_PYTHON +if test -n "$PYTHON"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5 +$as_echo "$PYTHON" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "growisofs", so it can be a program name with args. +set dummy growisofs; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_GROWISOFS+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $GROWISOFS in + [\\/]* | ?:[\\/]*) + ac_cv_path_GROWISOFS="$GROWISOFS" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_GROWISOFS="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_GROWISOFS" && ac_cv_path_GROWISOFS="growisofs" + ;; +esac +fi +GROWISOFS=$ac_cv_path_GROWISOFS +if test -n "$GROWISOFS"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GROWISOFS" >&5 +$as_echo "$GROWISOFS" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "dvd+rw-mediainfo", so it can be a program name with args. +set dummy dvd+rw-mediainfo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_DVDRWMEDIAINFO+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $DVDRWMEDIAINFO in + [\\/]* | ?:[\\/]*) + ac_cv_path_DVDRWMEDIAINFO="$DVDRWMEDIAINFO" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_DVDRWMEDIAINFO="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_DVDRWMEDIAINFO" && ac_cv_path_DVDRWMEDIAINFO="dvd+rw-mediainfo" + ;; +esac +fi +DVDRWMEDIAINFO=$ac_cv_path_DVDRWMEDIAINFO +if test -n "$DVDRWMEDIAINFO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DVDRWMEDIAINFO" >&5 +$as_echo "$DVDRWMEDIAINFO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "dvd+rw-format", so it can be a program name with args. +set dummy dvd+rw-format; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_DVDRWFORMAT+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $DVDRWFORMAT in + [\\/]* | ?:[\\/]*) + ac_cv_path_DVDRWFORMAT="$DVDRWFORMAT" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_DVDRWFORMAT="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_DVDRWFORMAT" && ac_cv_path_DVDRWFORMAT="dvd+rw-format" + ;; +esac +fi +DVDRWFORMAT=$ac_cv_path_DVDRWFORMAT +if test -n "$DVDRWFORMAT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DVDRWFORMAT" >&5 +$as_echo "$DVDRWFORMAT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "pkg-config", so it can be a program name with args. +set dummy pkg-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PKGCONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PKGCONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_PKGCONFIG="$PKGCONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PKGCONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_PKGCONFIG" && ac_cv_path_PKGCONFIG="pkg-config" + ;; +esac +fi +PKGCONFIG=$ac_cv_path_PKGCONFIG +if test -n "$PKGCONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKGCONFIG" >&5 +$as_echo "$PKGCONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "qmake", so it can be a program name with args. +set dummy qmake; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_QMAKE+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $QMAKE in + [\\/]* | ?:[\\/]*) + ac_cv_path_QMAKE="$QMAKE" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_QMAKE="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_QMAKE" && ac_cv_path_QMAKE="none" + ;; +esac +fi +QMAKE=$ac_cv_path_QMAKE +if test -n "$QMAKE"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $QMAKE" >&5 +$as_echo "$QMAKE" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "gmake", so it can be a program name with args. +set dummy gmake; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_GMAKE+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $GMAKE in + [\\/]* | ?:[\\/]*) + ac_cv_path_GMAKE="$GMAKE" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_GMAKE="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_GMAKE" && ac_cv_path_GMAKE="none" + ;; +esac +fi +GMAKE=$ac_cv_path_GMAKE +if test -n "$GMAKE"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GMAKE" >&5 +$as_echo "$GMAKE" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "pidof", so it can be a program name with args. +set dummy pidof; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PIDOF+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PIDOF in + [\\/]* | ?:[\\/]*) + ac_cv_path_PIDOF="$PIDOF" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PIDOF="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_PIDOF" && ac_cv_path_PIDOF="pidof" + ;; +esac +fi +PIDOF=$ac_cv_path_PIDOF +if test -n "$PIDOF"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PIDOF" >&5 +$as_echo "$PIDOF" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + +# Some AWK programs fail, so test it and warn the user +if echo xfoo | $AWK 'BEGIN { prog=ARGV1; ARGC=1 } + { if ((prog == $2) || (("(" prog ")") == $2) || + (("" prog "") == $2) || + ((prog ":") == $2)) { print $1 ; exit 0 } }' xfoo>/dev/null; then :; +else + as_fn_error $? "!!!!!!!!! WARNING !!!!!!!!!!!!!! + The regex engine of $AWK is too broken to be used you + might want to install GNU AWK. + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" "$LINENO" 5 +fi +THE_AWK=$AWK +# Extract the first word of "$THE_AWK", so it can be a program name with args. +set dummy $THE_AWK; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $AWK in + [\\/]* | ?:[\\/]*) + ac_cv_path_AWK="$AWK" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_AWK="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_AWK" && ac_cv_path_AWK="$THE_AWK" + ;; +esac +fi +AWK=$ac_cv_path_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + +test -n "$ARFLAG" || ARFLAGS="cr" + + +MAKE_SHELL=/bin/sh + + + + + + + +use_libtool=yes +# Check whether --enable-libtool was given. +if test "${enable_libtool+set}" = set; then : + enableval=$enable_libtool; + if test x$enableval = xno; then + use_libtool=no + fi + + +fi + +case `pwd` in + *\ * | *\ *) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +esac + + + +macro_version='2.4.2' +macro_revision='1.3337' + + + + + + + + + + + + + +ltmain="$ac_aux_dir/ltmain.sh" + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 +$as_echo_n "checking how to print strings... " >&6; } +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "" +} + +case "$ECHO" in + printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 +$as_echo "printf" >&6; } ;; + print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 +$as_echo "print -r" >&6; } ;; + *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 +$as_echo "cat" >&6; } ;; +esac + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +$as_echo_n "checking for a sed that does not truncate output... " >&6; } +if ${ac_cv_path_SED+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + { ac_script=; unset ac_script;} + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_SED" || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_SED_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_SED"; then + as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 + fi +else + ac_cv_path_SED=$SED +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +$as_echo "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed + +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 +$as_echo_n "checking for fgrep... " >&6; } +if ${ac_cv_path_FGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else + if test -z "$FGREP"; then + ac_path_FGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in fgrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_FGREP" || continue +# Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +case `"$ac_path_FGREP" --version 2>&1` in +*GNU*) + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_FGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_FGREP="$ac_path_FGREP" + ac_path_FGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_FGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_FGREP"; then + as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_FGREP=$FGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 +$as_echo "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" + + +test -z "$GREP" && GREP=grep + + + + + + + + + + + + + + + + + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 +$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } +if ${lt_cv_path_NM+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 +$as_echo "$lt_cv_path_NM" >&6; } +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + if test -n "$ac_tool_prefix"; then + for ac_prog in dumpbin "link -dump" + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DUMPBIN=$ac_cv_prog_DUMPBIN +if test -n "$DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 +$as_echo "$DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$DUMPBIN" && break + done +fi +if test -z "$DUMPBIN"; then + ac_ct_DUMPBIN=$DUMPBIN + for ac_prog in dumpbin "link -dump" +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN +if test -n "$ac_ct_DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 +$as_echo "$ac_ct_DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_DUMPBIN" && break +done + + if test "x$ac_ct_DUMPBIN" = x; then + DUMPBIN=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DUMPBIN=$ac_ct_DUMPBIN + fi +fi + + case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols" + ;; + *) + DUMPBIN=: + ;; + esac + fi + + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 +$as_echo_n "checking the name lister ($NM) interface... " >&6; } +if ${lt_cv_nm_interface+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 +$as_echo "$lt_cv_nm_interface" >&6; } + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 +$as_echo_n "checking whether ln -s works... " >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 +$as_echo "no, using $LN_S" >&6; } +fi + +# find the maximum length of command line arguments +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 +$as_echo_n "checking the maximum length of command line arguments... " >&6; } +if ${lt_cv_sys_max_cmd_len+:} false; then : + $as_echo_n "(cached) " >&6 +else + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len" && \ + test undefined != "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac + +fi + +if test -n $lt_cv_sys_max_cmd_len ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 +$as_echo "$lt_cv_sys_max_cmd_len" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 +$as_echo "none" >&6; } +fi +max_cmd_len=$lt_cv_sys_max_cmd_len + + + + + + +: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5 +$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5 +$as_echo "$xsi_shell" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5 +$as_echo_n "checking whether the shell understands \"+=\"... " >&6; } +lt_shell_append=no +( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5 +$as_echo "$lt_shell_append" >&6; } + + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi + + + + + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 +$as_echo_n "checking how to convert $build file names to $host format... " >&6; } +if ${lt_cv_to_host_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac + +fi + +to_host_file_cmd=$lt_cv_to_host_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 +$as_echo "$lt_cv_to_host_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 +$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } +if ${lt_cv_to_tool_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + #assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac + +fi + +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 +$as_echo "$lt_cv_to_tool_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 +$as_echo_n "checking for $LD option to reload object files... " >&6; } +if ${lt_cv_ld_reload_flag+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_reload_flag='-r' +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 +$as_echo "$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + if test "$GCC" != yes; then + reload_cmds=false + fi + ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +$as_echo "$OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +$as_echo "$ac_ct_OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OBJDUMP=$ac_ct_OBJDUMP + fi +else + OBJDUMP="$ac_cv_prog_OBJDUMP" +fi + +test -z "$OBJDUMP" && OBJDUMP=objdump + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 +$as_echo_n "checking how to recognize dependent libraries... " >&6; } +if ${lt_cv_deplibs_check_method+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# `unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# which responds to the $file_magic_cmd with a given extended regex. +# If you have `file' or equivalent on your system and you're not sure +# whether `pass_all' will *always* work, you probably want this one. + +case $host_os in +aix[4-9]*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin. + if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[3-9]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 +$as_echo "$lt_cv_deplibs_check_method" >&6; } + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + + + + + + + + + + + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +$as_echo "$DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +$as_echo "$ac_ct_DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi +else + DLLTOOL="$ac_cv_prog_DLLTOOL" +fi + +test -z "$DLLTOOL" && DLLTOOL=dlltool + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 +$as_echo_n "checking how to associate runtime and link libraries... " >&6; } +if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh + # decide which to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd="$ECHO" + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 +$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + + + + + + + +if test -n "$ac_tool_prefix"; then + for ac_prog in ar + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AR" && break + done +fi +if test -z "$AR"; then + ac_ct_AR=$AR + for ac_prog in ar +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_AR" && break +done + + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +fi + +: ${AR=ar} +: ${AR_FLAGS=cru} + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 +$as_echo_n "checking for archiver @FILE support... " >&6; } +if ${lt_cv_ar_at_file+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ar_at_file=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -eq 0; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -ne 0; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 +$as_echo "$lt_cv_ar_at_file" >&6; } + +if test "x$lt_cv_ar_at_file" = xno; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +test -z "$STRIP" && STRIP=: + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +test -z "$RANLIB" && RANLIB=: + + + + + + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 +$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } +if ${lt_cv_sys_global_symbol_pipe+:} false; then : + $as_echo_n "(cached) " >&6 +else + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[ABCDGISTW]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[ABCDEGRST]' + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 + (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +$as_echo "failed" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 +$as_echo_n "checking for sysroot... " >&6; } + +# Check whether --with-sysroot was given. +if test "${with_sysroot+set}" = set; then : + withval=$with_sysroot; +else + with_sysroot=no +fi + + +lt_sysroot= +case ${with_sysroot} in #( + yes) + if test "$GCC" = yes; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_sysroot}" >&5 +$as_echo "${with_sysroot}" >&6; } + as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 + ;; +esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 +$as_echo "${lt_sysroot:-no}" >&6; } + + + + + +# Check whether --enable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then : + enableval=$enable_libtool_lock; +fi + +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + case `/usr/bin/file conftest.o` in + *x86-64*) + LD="${LD-ld} -m elf32_x86_64" + ;; + *) + LD="${LD-ld} -m elf_i386" + ;; + esac + ;; + powerpc64le-*) + LD="${LD-ld} -m elf32lppclinux" + ;; + powerpc64-*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + powerpcle-*) + LD="${LD-ld} -m elf64lppc" + ;; + powerpc-*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 +$as_echo_n "checking whether the C compiler needs -belf... " >&6; } +if ${lt_cv_cc_needs_belf+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_cc_needs_belf=yes +else + lt_cv_cc_needs_belf=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 +$as_echo "$lt_cv_cc_needs_belf" >&6; } + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD="${LD-ld}_sol2" + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. +set dummy ${ac_tool_prefix}mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$MANIFEST_TOOL"; then + ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL +if test -n "$MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 +$as_echo "$MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_MANIFEST_TOOL"; then + ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL + # Extract the first word of "mt", so it can be a program name with args. +set dummy mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_MANIFEST_TOOL"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL +if test -n "$ac_ct_MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 +$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_MANIFEST_TOOL" = x; then + MANIFEST_TOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL + fi +else + MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" +fi + +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 +$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } +if ${lt_cv_path_mainfest_tool+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&5 + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 +$as_echo "$lt_cv_path_mainfest_tool" >&6; } +if test "x$lt_cv_path_mainfest_tool" != xyes; then + MANIFEST_TOOL=: +fi + + + + + + + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 +$as_echo "$DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 +$as_echo "$ac_ct_DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DSYMUTIL=$ac_ct_DSYMUTIL + fi +else + DSYMUTIL="$ac_cv_prog_DSYMUTIL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 +$as_echo "$NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 +$as_echo "$ac_ct_NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + NMEDIT=$ac_ct_NMEDIT + fi +else + NMEDIT="$ac_cv_prog_NMEDIT" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. +set dummy ${ac_tool_prefix}lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +LIPO=$ac_cv_prog_LIPO +if test -n "$LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 +$as_echo "$LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. +set dummy lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_LIPO="lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO +if test -n "$ac_ct_LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 +$as_echo "$ac_ct_LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_LIPO" = x; then + LIPO=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + LIPO=$ac_ct_LIPO + fi +else + LIPO="$ac_cv_prog_LIPO" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL=$ac_cv_prog_OTOOL +if test -n "$OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 +$as_echo "$OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. +set dummy otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL="otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL +if test -n "$ac_ct_OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 +$as_echo "$ac_ct_OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL" = x; then + OTOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL=$ac_ct_OTOOL + fi +else + OTOOL="$ac_cv_prog_OTOOL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL64=$ac_cv_prog_OTOOL64 +if test -n "$OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 +$as_echo "$OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. +set dummy otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL64="otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 +if test -n "$ac_ct_OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 +$as_echo "$ac_ct_OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL64" = x; then + OTOOL64=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL64=$ac_ct_OTOOL64 + fi +else + OTOOL64="$ac_cv_prog_OTOOL64" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 +$as_echo_n "checking for -single_module linker flag... " >&6; } +if ${lt_cv_apple_cc_single_mod+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&5 + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test $_lt_result -eq 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 +$as_echo "$lt_cv_apple_cc_single_mod" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 +$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } +if ${lt_cv_ld_exported_symbols_list+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_ld_exported_symbols_list=yes +else + lt_cv_ld_exported_symbols_list=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 +$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 +$as_echo_n "checking for -force_load linker flag... " >&6; } +if ${lt_cv_ld_force_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 + echo "$RANLIB libconftest.a" >&5 + $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&5 + elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&5 + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 +$as_echo "$lt_cv_ld_force_load" >&6; } + case $host_os in + rhapsody* | darwin1.[012]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[91]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[012]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +for ac_header in dlfcn.h +do : + ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default +" +if test "x$ac_cv_header_dlfcn_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_DLFCN_H 1 +_ACEOF + +fi + +done + + + + +func_stripname_cnf () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname_cnf + + + + + +# Set options +# Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then : + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_shared=yes +fi + + + + + + + +# Check whether --enable-static was given. +if test "${enable_static+set}" = set; then : + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_static=no +fi + + + + + + + + + + + enable_dlopen=no + + + enable_win32_dll=no + + + + + +# Check whether --with-pic was given. +if test "${with_pic+set}" = set; then : + withval=$with_pic; lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for lt_pkg in $withval; do + IFS="$lt_save_ifs" + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + pic_mode=default +fi + + +test -z "$pic_mode" && pic_mode=default + + + + + + + + # Check whether --enable-fast-install was given. +if test "${enable_fast_install+set}" = set; then : + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_fast_install=yes +fi + + + + + + + + + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +test -z "$LN_S" && LN_S="ln -s" + + + + + + + + + + + + + + +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 +$as_echo_n "checking for objdir... " >&6; } +if ${lt_cv_objdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 +$as_echo "$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir + + + + + +cat >>confdefs.h <<_ACEOF +#define LT_OBJDIR "$lt_cv_objdir/" +_ACEOF + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` + + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 +$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/${ac_tool_prefix}file; then + lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 +$as_echo_n "checking for file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/file; then + lt_cv_path_MAGIC_CMD="$ac_dir/file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +# Use C for the default configuration in the libtool script + +lt_save_CC="$CC" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + +lt_prog_compiler_no_builtin_flag= + +if test "$GCC" = yes; then + case $cc_basename in + nvcc*) + lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; + *) + lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + + + + + + + lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + lt_prog_compiler_wl='-Xlinker ' + if test -n "$lt_prog_compiler_pic"; then + lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-qpic' + lt_prog_compiler_static='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; + *Sun\ F* | *Sun*Fortran*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; + *Intel*\ [CF]*Compiler*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + *Portland\ Group*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + rdos*) + lt_prog_compiler_static='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic=$lt_prog_compiler_pic +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 +$as_echo "$lt_cv_prog_compiler_pic" >&6; } +lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +if ${lt_cv_prog_compiler_pic_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } + +if test x"$lt_cv_prog_compiler_pic_works" = xyes; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi + + + + + + + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works=yes + fi + else + lt_cv_prog_compiler_static_works=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 +$as_echo "$lt_cv_prog_compiler_static_works" >&6; } + +if test x"$lt_cv_prog_compiler_static_works" = xyes; then + : +else + lt_prog_compiler_static= +fi + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test "$hard_links" = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + runpath_var= + allow_undefined_flag= + always_export_symbols=no + archive_cmds= + archive_expsym_cmds= + compiler_needs_object=no + enable_shared_with_static_runtimes=no + export_dynamic_flag_spec= + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + hardcode_automatic=no + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + inherit_rpath=no + link_all_deplibs=unknown + module_cmds= + module_expsym_cmds= + old_archive_from_new_cmds= + old_archive_from_expsyms_cmds= + thread_safe_flag_spec= + whole_archive_flag_spec= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs=no + ;; + esac + + ld_shlibs=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test "$with_gnu_ld" = yes; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; + *\ \(GNU\ Binutils\)\ [3-9]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test "$lt_use_gnu_ld_interface" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + export_dynamic_flag_spec='${wl}--export-all-symbols' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + haiku*) + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + link_all_deplibs=yes + ;; + + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; + xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + ld_shlibs=no + fi + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test "$ld_shlibs" = no; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix[4-9]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global + # defined symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + file_list_spec='${wl}-f,' + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + link_all_deplibs=no + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + export_dynamic_flag_spec='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' ${wl}-bernotok' + allow_undefined_flag=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + fi + archive_cmds_need_lc=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + always_export_symbols=yes + file_list_spec='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, )='true' + enable_shared_with_static_runtimes=yes + exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + old_postinstall_cmds='chmod 644 $oldlib' + postlink_cmds='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_from_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' + enable_shared_with_static_runtimes=yes + ;; + esac + ;; + + darwin* | rhapsody*) + + + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec='' + fi + link_all_deplibs=yes + allow_undefined_flag="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + + else + ld_shlibs=no + fi + + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 +$as_echo_n "checking if $CC understands -b... " >&6; } +if ${lt_cv_prog_compiler__b+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler__b=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -b" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler__b=yes + fi + else + lt_cv_prog_compiler__b=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 +$as_echo "$lt_cv_prog_compiler__b" >&6; } + +if test x"$lt_cv_prog_compiler__b" = xyes; then + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' +fi + + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 +$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } +if ${lt_cv_irix_exported_symbol+:} false; then : + $as_echo_n "(cached) " >&6 +else + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int foo (void) { return 0; } +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_irix_exported_symbol=yes +else + lt_cv_irix_exported_symbol=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 +$as_echo "$lt_cv_irix_exported_symbol" >&6; } + if test "$lt_cv_irix_exported_symbol" = yes; then + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + *nto* | *qnx*) + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + else + ld_shlibs=no + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + archive_cmds_need_lc='no' + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi + ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag='${wl}-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='${wl}-z,text' + allow_undefined_flag='${wl}-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + export_dynamic_flag_spec='${wl}-Blargedynsym' + ;; + esac + fi + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 +$as_echo "$ld_shlibs" >&6; } +test "$ld_shlibs" = no && can_build_shared=no + +with_gnu_ld=$with_gnu_ld + + + + + + + + + + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc=no + else + lt_cv_archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } + archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;; + *) lt_sed_strip_eq="s,=/,/,g" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[lt_foo]++; } + if (lt_freq[lt_foo] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's,/\([A-Za-z]:\),\1,g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || + test "X$hardcode_automatic" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && + test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 +$as_echo "$hardcode_action" >&6; } + +if test "$hardcode_action" = relink || + test "$inherit_rpath" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + *) + ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +if test "x$ac_cv_func_shl_load" = xyes; then : + lt_cv_dlopen="shl_load" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +$as_echo_n "checking for shl_load in -ldld... " >&6; } +if ${ac_cv_lib_dld_shl_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_shl_load=yes +else + ac_cv_lib_dld_shl_load=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +$as_echo "$ac_cv_lib_dld_shl_load" >&6; } +if test "x$ac_cv_lib_dld_shl_load" = xyes; then : + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" +else + ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +if test "x$ac_cv_func_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +$as_echo_n "checking for dlopen in -lsvld... " >&6; } +if ${ac_cv_lib_svld_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_svld_dlopen=yes +else + ac_cv_lib_svld_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +$as_echo "$ac_cv_lib_svld_dlopen" >&6; } +if test "x$ac_cv_lib_svld_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +$as_echo_n "checking for dld_link in -ldld... " >&6; } +if ${ac_cv_lib_dld_dld_link+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dld_link (); +int +main () +{ +return dld_link (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_dld_link=yes +else + ac_cv_lib_dld_dld_link=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +$as_echo "$ac_cv_lib_dld_dld_link" >&6; } +if test "x$ac_cv_lib_dld_dld_link" = xyes; then : + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 +$as_echo_n "checking whether a program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 +$as_echo "$lt_cv_dlopen_self" >&6; } + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 +$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self_static+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 +$as_echo "$lt_cv_dlopen_self_static" >&6; } + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + + + + + + + + + + + + + + + + +striplib= +old_striplib= +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 +$as_echo_n "checking whether stripping libraries is possible... " >&6; } +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + ;; + esac +fi + + + + + + + + + + + + + # Report which library types will actually be built + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 +$as_echo_n "checking if libtool supports shared libraries... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 +$as_echo "$can_build_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 +$as_echo_n "checking whether to build shared libraries... " >&6; } + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[4-9]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 +$as_echo "$enable_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 +$as_echo_n "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 +$as_echo "$enable_static" >&6; } + + + + +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +$as_echo_n "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if ${ac_cv_prog_CXXCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +$as_echo "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +else + _lt_caught_CXX_error=yes +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +archive_cmds_need_lc_CXX=no +allow_undefined_flag_CXX= +always_export_symbols_CXX=no +archive_expsym_cmds_CXX= +compiler_needs_object_CXX=no +export_dynamic_flag_spec_CXX= +hardcode_direct_CXX=no +hardcode_direct_absolute_CXX=no +hardcode_libdir_flag_spec_CXX= +hardcode_libdir_separator_CXX= +hardcode_minus_L_CXX=no +hardcode_shlibpath_var_CXX=unsupported +hardcode_automatic_CXX=no +inherit_rpath_CXX=no +module_cmds_CXX= +module_expsym_cmds_CXX= +link_all_deplibs_CXX=unknown +old_archive_cmds_CXX=$old_archive_cmds +reload_flag_CXX=$reload_flag +reload_cmds_CXX=$reload_cmds +no_undefined_flag_CXX= +whole_archive_flag_spec_CXX= +enable_shared_with_static_runtimes_CXX=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +objext_CXX=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + + # save warnings/boilerplate of simple test code + ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + + ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + compiler_CXX=$CC + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` + + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test "$GXX" = yes; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' + else + lt_prog_compiler_no_builtin_flag_CXX= + fi + + if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + ld_shlibs_CXX=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aix[4-9]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_CXX='' + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + file_list_spec_CXX='${wl}-f,' + + if test "$GXX" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct_CXX=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_CXX=yes + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_libdir_separator_CXX= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + export_dynamic_flag_spec_CXX='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + always_export_symbols_CXX=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_CXX='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + + archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_CXX=' ${wl}-bernotok' + allow_undefined_flag_CXX=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX='$convenience' + fi + archive_cmds_need_lc_CXX=yes + # This is similar to how AIX traditionally builds its shared + # libraries. + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_CXX=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_CXX=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_CXX=' ' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=yes + file_list_spec_CXX='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' + enable_shared_with_static_runtimes_CXX=yes + # Don't use ranlib + old_postinstall_cmds_CXX='chmod 644 $oldlib' + postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_CXX='-L$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=no + enable_shared_with_static_runtimes_CXX=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_CXX=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + + + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec_CXX='' + fi + link_all_deplibs_CXX=yes + allow_undefined_flag_CXX="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + if test "$lt_cv_apple_cc_single_mod" != "yes"; then + archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi + + else + ld_shlibs_CXX=no + fi + + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + ld_shlibs_CXX=no + ;; + + freebsd-elf*) + archive_cmds_need_lc_CXX=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + + haiku*) + archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + link_all_deplibs_CXX=yes + ;; + + hpux9*) + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='${wl}-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + export_dynamic_flag_spec_CXX='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + ;; + *) + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + interix[3-9]*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + inherit_rpath_CXX=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [1-5].* | *pgcpp\ [1-5].*) + prelink_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + old_archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + hardcode_libdir_flag_spec_CXX='-R$libdir' + whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object_CXX=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + ld_shlibs_CXX=yes + ;; + + openbsd2*) + # C++ shared libraries are fairly broken + ld_shlibs_CXX=no + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + hardcode_direct_absolute_CXX=yes + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='${wl}-E' + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + ld_shlibs_CXX=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + case $host in + osf3*) + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + ;; + *) + allow_undefined_flag_CXX=' -expect_unresolved \*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ + $RM $lib.exp' + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + ;; + esac + + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' + ;; + esac + link_all_deplibs_CXX=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + no_undefined_flag_CXX=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + fi + + hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag_CXX='${wl}-z,text' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_CXX='${wl}-z,text' + allow_undefined_flag_CXX='${wl}-z,nodefs' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir' + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + export_dynamic_flag_spec_CXX='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ + '"$old_archive_cmds_CXX" + reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ + '"$reload_cmds_CXX" + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } + test "$ld_shlibs_CXX" = no && can_build_shared=no + + GCC_CXX="$GXX" + LD_CXX="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + # Dependencies to place before and after the object being linked: +predep_objects_CXX= +postdep_objects_CXX= +predeps_CXX= +postdeps_CXX= +compiler_lib_search_path_CXX= + +cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF + + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac + +if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case ${prev}${p} in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" || + test $p = "-R"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test "$pre_test_object_deps_done" = no; then + case ${prev} in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then + compiler_lib_search_path_CXX="${prev}${p}" + else + compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$postdeps_CXX"; then + postdeps_CXX="${prev}${p}" + else + postdeps_CXX="${postdeps_CXX} ${prev}${p}" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$predep_objects_CXX"; then + predep_objects_CXX="$p" + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then + postdep_objects_CXX="$p" + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling CXX test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +case $host_os in +interix[3-9]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + predep_objects_CXX= + postdep_objects_CXX= + postdeps_CXX= + ;; + +linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + if test "$solaris_use_stlport4" != yes; then + postdeps_CXX='-library=Cstd -library=Crun' + fi + ;; + esac + ;; + +solaris*) + case $cc_basename in + CC* | sunCC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + if test "$solaris_use_stlport4" != yes; then + postdeps_CXX='-library=Cstd -library=Crun' + fi + ;; + esac + ;; +esac + + +case " $postdeps_CXX " in +*" -lc "*) archive_cmds_need_lc_CXX=no ;; +esac + compiler_lib_search_dirs_CXX= +if test -n "${compiler_lib_search_path_CXX}"; then + compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lt_prog_compiler_wl_CXX= +lt_prog_compiler_pic_CXX= +lt_prog_compiler_static_CXX= + + + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic_CXX='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_CXX='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + lt_prog_compiler_pic_CXX= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static_CXX= + ;; + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_CXX=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + else + case $host_os in + aix[4-9]*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else + lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + dgux*) + case $cc_basename in + ec++*) + lt_prog_compiler_pic_CXX='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_CXX='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + lt_prog_compiler_wl_CXX='--backend -Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64 which still supported -KPIC. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + lt_prog_compiler_static_CXX='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fpic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-qpic' + lt_prog_compiler_static_CXX='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + lt_prog_compiler_pic_CXX='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd* | netbsdelf*-gnu) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + lt_prog_compiler_wl_CXX='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + lt_prog_compiler_pic_CXX='-pic' + ;; + cxx*) + # Digital/Compaq C++ + lt_prog_compiler_wl_CXX='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + lt_prog_compiler_pic_CXX='-pic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + lcc*) + # Lucid + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + lt_prog_compiler_pic_CXX='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + lt_prog_compiler_can_build_shared_CXX=no + ;; + esac + fi + +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; + *) + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } +lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } +if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works_CXX=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } + +if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; + esac +else + lt_prog_compiler_pic_CXX= + lt_prog_compiler_can_build_shared_CXX=no +fi + +fi + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works_CXX=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works_CXX=yes + fi + else + lt_cv_prog_compiler_static_works_CXX=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } + +if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then + : +else + lt_prog_compiler_static_CXX= +fi + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test "$hard_links" = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + case $host_os in + aix[4-9]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global defined + # symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + export_symbols_cmds_CXX="$ltdll_cmds" + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + ;; + esac + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs_CXX=no + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } +test "$ld_shlibs_CXX" = no && can_build_shared=no + +with_gnu_ld_CXX=$with_gnu_ld + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_CXX" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_CXX + pic_flag=$lt_prog_compiler_pic_CXX + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_CXX + allow_undefined_flag_CXX= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc_CXX=no + else + lt_cv_archive_cmds_need_lc_CXX=yes + fi + allow_undefined_flag_CXX=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } + archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || + test -n "$runpath_var_CXX" || + test "X$hardcode_automatic_CXX" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$hardcode_direct_CXX" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no && + test "$hardcode_minus_L_CXX" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 +$as_echo "$hardcode_action_CXX" >&6; } + +if test "$hardcode_action_CXX" = relink || + test "$inherit_rpath_CXX" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test "$_lt_caught_CXX_error" != yes + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + + + + + + + + + + + + ac_config_commands="$ac_config_commands libtool" + + + + +# Only expand once: + + + +LT_DLLOADERS= + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +LIBADD_DLOPEN= +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 +$as_echo_n "checking for library containing dlopen... " >&6; } +if ${ac_cv_search_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +for ac_lib in '' dl; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_dlopen=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_dlopen+:} false; then : + break +fi +done +if ${ac_cv_search_dlopen+:} false; then : + +else + ac_cv_search_dlopen=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 +$as_echo "$ac_cv_search_dlopen" >&6; } +ac_res=$ac_cv_search_dlopen +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +$as_echo "#define HAVE_LIBDL 1" >>confdefs.h + + if test "$ac_cv_search_dlopen" != "none required" ; then + LIBADD_DLOPEN="-ldl" + fi + libltdl_cv_lib_dl_dlopen="yes" + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la" +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#if HAVE_DLFCN_H +# include +#endif + +int +main () +{ +dlopen(0, 0); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + +$as_echo "#define HAVE_LIBDL 1" >>confdefs.h + + libltdl_cv_func_dlopen="yes" + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +$as_echo_n "checking for dlopen in -lsvld... " >&6; } +if ${ac_cv_lib_svld_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_svld_dlopen=yes +else + ac_cv_lib_svld_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +$as_echo "$ac_cv_lib_svld_dlopen" >&6; } +if test "x$ac_cv_lib_svld_dlopen" = xyes; then : + +$as_echo "#define HAVE_LIBDL 1" >>confdefs.h + + LIBADD_DLOPEN="-lsvld" libltdl_cv_func_dlopen="yes" + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la" +fi + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi + +if test x"$libltdl_cv_func_dlopen" = xyes || test x"$libltdl_cv_lib_dl_dlopen" = xyes +then + lt_save_LIBS="$LIBS" + LIBS="$LIBS $LIBADD_DLOPEN" + for ac_func in dlerror +do : + ac_fn_c_check_func "$LINENO" "dlerror" "ac_cv_func_dlerror" +if test "x$ac_cv_func_dlerror" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_DLERROR 1 +_ACEOF + +fi +done + + LIBS="$lt_save_LIBS" +fi + + +LIBADD_SHL_LOAD= +ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +if test "x$ac_cv_func_shl_load" = xyes; then : + +$as_echo "#define HAVE_SHL_LOAD 1" >>confdefs.h + + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}shl_load.la" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +$as_echo_n "checking for shl_load in -ldld... " >&6; } +if ${ac_cv_lib_dld_shl_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_shl_load=yes +else + ac_cv_lib_dld_shl_load=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +$as_echo "$ac_cv_lib_dld_shl_load" >&6; } +if test "x$ac_cv_lib_dld_shl_load" = xyes; then : + +$as_echo "#define HAVE_SHL_LOAD 1" >>confdefs.h + + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}shl_load.la" + LIBADD_SHL_LOAD="-ldld" +fi + +fi + + + +case $host_os in +darwin[1567].*) +# We only want this for pre-Mac OS X 10.4. + ac_fn_c_check_func "$LINENO" "_dyld_func_lookup" "ac_cv_func__dyld_func_lookup" +if test "x$ac_cv_func__dyld_func_lookup" = xyes; then : + +$as_echo "#define HAVE_DYLD 1" >>confdefs.h + + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dyld.la" +fi + + ;; +beos*) + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}load_add_on.la" + ;; +cygwin* | mingw* | os2* | pw32*) + ac_fn_c_check_decl "$LINENO" "cygwin_conv_path" "ac_cv_have_decl_cygwin_conv_path" "#include +" +if test "x$ac_cv_have_decl_cygwin_conv_path" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_CYGWIN_CONV_PATH $ac_have_decl +_ACEOF + + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}loadlibrary.la" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +$as_echo_n "checking for dld_link in -ldld... " >&6; } +if ${ac_cv_lib_dld_dld_link+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dld_link (); +int +main () +{ +return dld_link (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_dld_link=yes +else + ac_cv_lib_dld_dld_link=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +$as_echo "$ac_cv_lib_dld_dld_link" >&6; } +if test "x$ac_cv_lib_dld_dld_link" = xyes; then : + +$as_echo "#define HAVE_DLD 1" >>confdefs.h + + LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dld_link.la" +fi + + + + +LT_DLPREOPEN= +if test -n "$LT_DLLOADERS" +then + for lt_loader in $LT_DLLOADERS; do + LT_DLPREOPEN="$LT_DLPREOPEN-dlpreopen $lt_loader " + done + +$as_echo "#define HAVE_LIBDLLOADER 1" >>confdefs.h + +fi + + +LIBADD_DL="$LIBADD_DLOPEN $LIBADD_SHL_LOAD" + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + +if test x$use_libtool != xno; then + DEFAULT_OBJECT_TYPE=".lo" + DEFAULT_ARCHIVE_TYPE=".la" + DEFAULT_SHARED_OBJECT_TYPE=".la" + LIBTOOL="\$(LIBTOOL)" + LIBTOOL_INSTALL_TARGET="libtool-install" + LIBTOOL_UNINSTALL_TARGET="libtool-uninstall" + LIBTOOL_CLEAN_TARGET="libtool-clean" + QMAKE_LIBTOOL="${BUILD_DIR}/libtool" + FD_PLUGIN_DIR="src/plugins/fd" + have_plugins=yes +else + DEFAULT_OBJECT_TYPE=".o" + DEFAULT_ARCHIVE_TYPE=".a" + DEFAULT_SHARED_OBJECT_TYPE=".so" + LIBTOOL="# \$(LIBTOOL)" + LIBTOOL_INSTALL_TARGET="" + LIBTOOL_UNINSTALL_TARGET="" + LIBTOOL_CLEAN_TARGET="" + QMAKE_LIBTOOL="# ${BUILD_DIR}/libtool" + FD_PLUGIN_DIR="" + have_plugins=no +fi + + + + + + + + + + + +# Check whether --enable-includes was given. +if test "${enable_includes+set}" = set; then : + enableval=$enable_includes; + if test x$enableval = xyes; then + install_includes=yes + fi + + +fi + + + +if test x$use_libtool != xno -a x$install_includes = xyes; then + INCLUDE_INSTALL_TARGET="install-includes" + INCLUDE_UNINSTALL_TARGET="uninstall-includes" +else + INCLUDE_INSTALL_TARGET="" + INCLUDE_UNINSTALL_TARGET="" +fi + + + + + +case $host_os in + *cygwin* ) CYGWIN=yes;; + * ) CYGWIN=no;; +esac + +if test $HAVE_UNAME=yes -a x`uname -s` = xSunOS +then + + +if $TRUEPRG; then + HAVE_SUN_OS_TRUE= + HAVE_SUN_OS_FALSE='#' +else + HAVE_SUN_OS_TRUE='#' + HAVE_SUN_OS_FALSE= +fi + $as_echo "#define HAVE_SUN_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_SUN_OS_TRUE= + HAVE_SUN_OS_FALSE='#' +else + HAVE_SUN_OS_TRUE='#' + HAVE_SUN_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xGNU +then + + +if $TRUEPRG; then + HAVE_HURD_OS_TRUE= + HAVE_HURD_OS_FALSE='#' +else + HAVE_HURD_OS_TRUE='#' + HAVE_HURD_OS_FALSE= +fi + $as_echo "#define HAVE_HURD_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_HURD_OS_TRUE= + HAVE_HURD_OS_FALSE='#' +else + HAVE_HURD_OS_TRUE='#' + HAVE_HURD_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xOSF1 +then + + +if $TRUEPRG; then + HAVE_OSF1_OS_TRUE= + HAVE_OSF1_OS_FALSE='#' +else + HAVE_OSF1_OS_TRUE='#' + HAVE_OSF1_OS_FALSE= +fi + $as_echo "#define HAVE_OSF1_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_OSF1_OS_TRUE= + HAVE_OSF1_OS_FALSE='#' +else + HAVE_OSF1_OS_TRUE='#' + HAVE_OSF1_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xAIX +then + + +if $TRUEPRG; then + HAVE_AIX_OS_TRUE= + HAVE_AIX_OS_FALSE='#' +else + HAVE_AIX_OS_TRUE='#' + HAVE_AIX_OS_FALSE= +fi + $as_echo "#define HAVE_AIX_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_AIX_OS_TRUE= + HAVE_AIX_OS_FALSE='#' +else + HAVE_AIX_OS_TRUE='#' + HAVE_AIX_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xHP-UX +then + + +if $TRUEPRG; then + HAVE_HPUX_OS_TRUE= + HAVE_HPUX_OS_FALSE='#' +else + HAVE_HPUX_OS_TRUE='#' + HAVE_HPUX_OS_FALSE= +fi + $as_echo "#define HAVE_HPUX_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_HPUX_OS_TRUE= + HAVE_HPUX_OS_FALSE='#' +else + HAVE_HPUX_OS_TRUE='#' + HAVE_HPUX_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xLinux +then + + +if $TRUEPRG; then + HAVE_LINUX_OS_TRUE= + HAVE_LINUX_OS_FALSE='#' +else + HAVE_LINUX_OS_TRUE='#' + HAVE_LINUX_OS_FALSE= +fi + $as_echo "#define HAVE_LINUX_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_LINUX_OS_TRUE= + HAVE_LINUX_OS_FALSE='#' +else + HAVE_LINUX_OS_TRUE='#' + HAVE_LINUX_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xFreeBSD +then + + +if $TRUEPRG; then + HAVE_FREEBSD_OS_TRUE= + HAVE_FREEBSD_OS_FALSE='#' +else + HAVE_FREEBSD_OS_TRUE='#' + HAVE_FREEBSD_OS_FALSE= +fi + $as_echo "#define HAVE_FREEBSD_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_FREEBSD_OS_TRUE= + HAVE_FREEBSD_OS_FALSE='#' +else + HAVE_FREEBSD_OS_TRUE='#' + HAVE_FREEBSD_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xNetBSD +then + + +if $TRUEPRG; then + HAVE_NETBSD_OS_TRUE= + HAVE_NETBSD_OS_FALSE='#' +else + HAVE_NETBSD_OS_TRUE='#' + HAVE_NETBSD_OS_FALSE= +fi + $as_echo "#define HAVE_NETBSD_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_NETBSD_OS_TRUE= + HAVE_NETBSD_OS_FALSE='#' +else + HAVE_NETBSD_OS_TRUE='#' + HAVE_NETBSD_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xOpenBSD +then + + +if $TRUEPRG; then + HAVE_OPENBSD_OS_TRUE= + HAVE_OPENBSD_OS_FALSE='#' +else + HAVE_OPENBSD_OS_TRUE='#' + HAVE_OPENBSD_OS_FALSE= +fi + $as_echo "#define HAVE_OPENBSD_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_OPENBSD_OS_TRUE= + HAVE_OPENBSD_OS_FALSE='#' +else + HAVE_OPENBSD_OS_TRUE='#' + HAVE_OPENBSD_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xBSD/OS +then + + +if $TRUEPRG; then + HAVE_BSDI_OS_TRUE= + HAVE_BSDI_OS_FALSE='#' +else + HAVE_BSDI_OS_TRUE='#' + HAVE_BSDI_OS_FALSE= +fi + $as_echo "#define HAVE_BSDI_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_BSDI_OS_TRUE= + HAVE_BSDI_OS_FALSE='#' +else + HAVE_BSDI_OS_TRUE='#' + HAVE_BSDI_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xSGI +then + + +if $TRUEPRG; then + HAVE_SGI_OS_TRUE= + HAVE_SGI_OS_FALSE='#' +else + HAVE_SGI_OS_TRUE='#' + HAVE_SGI_OS_FALSE= +fi + $as_echo "#define HAVE_SGI_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_SGI_OS_TRUE= + HAVE_SGI_OS_FALSE='#' +else + HAVE_SGI_OS_TRUE='#' + HAVE_SGI_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xIRIX -o x`uname -s` = xIRIX64 +then + + +if $TRUEPRG; then + HAVE_IRIX_OS_TRUE= + HAVE_IRIX_OS_FALSE='#' +else + HAVE_IRIX_OS_TRUE='#' + HAVE_IRIX_OS_FALSE= +fi + $as_echo "#define HAVE_IRIX_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_IRIX_OS_TRUE= + HAVE_IRIX_OS_FALSE='#' +else + HAVE_IRIX_OS_TRUE='#' + HAVE_IRIX_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xDarwin +then + + +if $TRUEPRG; then + HAVE_DARWIN_OS_TRUE= + HAVE_DARWIN_OS_FALSE='#' +else + HAVE_DARWIN_OS_TRUE='#' + HAVE_DARWIN_OS_FALSE= +fi + $as_echo "#define HAVE_DARWIN_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_DARWIN_OS_TRUE= + HAVE_DARWIN_OS_FALSE='#' +else + HAVE_DARWIN_OS_TRUE='#' + HAVE_DARWIN_OS_FALSE= +fi +fi + +if test $HAVE_UNAME=yes -a x`uname -s` = xGNU/kFreeBSD +then + + +if $TRUEPRG; then + HAVE_KFREEBSD_OS_TRUE= + HAVE_KFREEBSD_OS_FALSE='#' +else + HAVE_KFREEBSD_OS_TRUE='#' + HAVE_KFREEBSD_OS_FALSE= +fi + $as_echo "#define HAVE_KFREEBSD_OS 1" >>confdefs.h + +else + + +if $FALSEPRG; then + HAVE_KFREEBSD_OS_TRUE= + HAVE_KFREEBSD_OS_FALSE='#' +else + HAVE_KFREEBSD_OS_TRUE='#' + HAVE_KFREEBSD_OS_FALSE= +fi +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Operating System Distribution" >&5 +$as_echo_n "checking for Operating System Distribution... " >&6; } +if test "x$DISTNAME" != "x" +then + echo "distname set to $DISTNAME" +elif test $HAVE_UNAME=yes -a x`uname -s` = xOSF1 +then + DISTNAME=alpha +elif test $HAVE_UNAME=yes -a x`uname -s` = xAIX +then + DISTNAME=aix +elif test $HAVE_UNAME=yes -a x`uname -s` = xHP-UX +then + DISTNAME=hpux +elif test $HAVE_UNAME=yes -a x`uname -s` = xSunOS +then + DISTNAME=solaris +elif test $HAVE_UNAME=yes -a x`uname -s` = xGNU +then + DISTNAME=hurd +elif test $HAVE_UNAME=yes -a x`uname -s` = xFreeBSD +then + DISTNAME=freebsd +elif test $HAVE_UNAME=yes -a x`uname -s` = xNetBSD +then + DISTNAME=netbsd +elif test $HAVE_UNAME=yes -a x`uname -s` = xOpenBSD +then + DISTNAME=openbsd +elif test $HAVE_UNAME=yes -a x`uname -s` = xIRIX +then + DISTNAME=irix +elif test $HAVE_UNAME=yes -a x`uname -s` = xBSD/OS +then + DISTNAME=bsdi +elif test -f /etc/SuSE-release +then + DISTNAME=suse +elif test -d /etc/SuSEconfig +then + DISTNAME=suse5 +elif test -f /etc/mandrake-release +then + DISTNAME=mandrake +elif test -f /etc/whitebox-release +then + DISTNAME=redhat +elif test -f /etc/redhat-release +then + DISTNAME=redhat +elif test -f /etc/gentoo-release +then + DISTNAME=gentoo +elif test -f /etc/debian_version +then + DISTNAME=debian +elif test -f /etc/slackware-version +then + DISTNAME=slackware +elif test x$host_vendor = xapple +then + DISTNAME=osx +elif test $HAVE_UNAME=yes -a x`uname -s` = xDarwin +then + DISTNAME=darwin +elif test -f /etc/engarde-version +then + DISTNAME=engarde +elif test -f /etc/arch-release +then + DISTNAME=archlinux +elif test "$CYGWIN" = yes +then + DISTNAME=cygwin + $as_echo "#define HAVE_CYGWIN 1" >>confdefs.h + +else + DISTNAME=unknown +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 +$as_echo "done" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + + + + MKINSTALLDIRS= + if test -n "$ac_aux_dir"; then + case "$ac_aux_dir" in + /*) MKINSTALLDIRS="$ac_aux_dir/mkinstalldirs" ;; + *) MKINSTALLDIRS="\$(top_builddir)/$ac_aux_dir/mkinstalldirs" ;; + esac + fi + if test -z "$MKINSTALLDIRS"; then + MKINSTALLDIRS="\$(top_srcdir)/mkinstalldirs" + fi + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether NLS is requested" >&5 +$as_echo_n "checking whether NLS is requested... " >&6; } + # Check whether --enable-nls was given. +if test "${enable_nls+set}" = set; then : + enableval=$enable_nls; USE_NLS=$enableval +else + USE_NLS=yes +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 +$as_echo "$USE_NLS" >&6; } + + + + + + +# Prepare PATH_SEPARATOR. +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + +# Find out how to test for executable files. Don't use a zero-byte file, +# as systems may use methods other than mode bits to determine executability. +cat >conf$$.file <<_ASEOF +#! /bin/sh +exit 0 +_ASEOF +chmod +x conf$$.file +if test -x conf$$.file >/dev/null 2>&1; then + ac_executable_p="test -x" +else + ac_executable_p="test -f" +fi +rm -f conf$$.file + +# Extract the first word of "msgfmt", so it can be a program name with args. +set dummy msgfmt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MSGFMT+:} false; then : + $as_echo_n "(cached) " >&6 +else + case "$MSGFMT" in + [\\/]* | ?:[\\/]*) + ac_cv_path_MSGFMT="$MSGFMT" # Let the user override the test with a path. + ;; + *) + ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$ac_save_IFS" + test -z "$ac_dir" && ac_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then + echo "$as_me: trying $ac_dir/$ac_word..." >&5 + if $ac_dir/$ac_word --statistics /dev/null >&5 2>&1 && + (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then + ac_cv_path_MSGFMT="$ac_dir/$ac_word$ac_exec_ext" + break 2 + fi + fi + done + done + IFS="$ac_save_IFS" + test -z "$ac_cv_path_MSGFMT" && ac_cv_path_MSGFMT=":" + ;; +esac +fi +MSGFMT="$ac_cv_path_MSGFMT" +if test "$MSGFMT" != ":"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGFMT" >&5 +$as_echo "$MSGFMT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + # Extract the first word of "gmsgfmt", so it can be a program name with args. +set dummy gmsgfmt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_GMSGFMT+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $GMSGFMT in + [\\/]* | ?:[\\/]*) + ac_cv_path_GMSGFMT="$GMSGFMT" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_GMSGFMT="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_GMSGFMT" && ac_cv_path_GMSGFMT="$MSGFMT" + ;; +esac +fi +GMSGFMT=$ac_cv_path_GMSGFMT +if test -n "$GMSGFMT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GMSGFMT" >&5 +$as_echo "$GMSGFMT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + +# Prepare PATH_SEPARATOR. +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + +# Find out how to test for executable files. Don't use a zero-byte file, +# as systems may use methods other than mode bits to determine executability. +cat >conf$$.file <<_ASEOF +#! /bin/sh +exit 0 +_ASEOF +chmod +x conf$$.file +if test -x conf$$.file >/dev/null 2>&1; then + ac_executable_p="test -x" +else + ac_executable_p="test -f" +fi +rm -f conf$$.file + +# Extract the first word of "xgettext", so it can be a program name with args. +set dummy xgettext; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_XGETTEXT+:} false; then : + $as_echo_n "(cached) " >&6 +else + case "$XGETTEXT" in + [\\/]* | ?:[\\/]*) + ac_cv_path_XGETTEXT="$XGETTEXT" # Let the user override the test with a path. + ;; + *) + ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$ac_save_IFS" + test -z "$ac_dir" && ac_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then + echo "$as_me: trying $ac_dir/$ac_word..." >&5 + if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&5 2>&1 && + (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then + ac_cv_path_XGETTEXT="$ac_dir/$ac_word$ac_exec_ext" + break 2 + fi + fi + done + done + IFS="$ac_save_IFS" + test -z "$ac_cv_path_XGETTEXT" && ac_cv_path_XGETTEXT=":" + ;; +esac +fi +XGETTEXT="$ac_cv_path_XGETTEXT" +if test "$XGETTEXT" != ":"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XGETTEXT" >&5 +$as_echo "$XGETTEXT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + rm -f messages.po + + +# Prepare PATH_SEPARATOR. +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + +# Find out how to test for executable files. Don't use a zero-byte file, +# as systems may use methods other than mode bits to determine executability. +cat >conf$$.file <<_ASEOF +#! /bin/sh +exit 0 +_ASEOF +chmod +x conf$$.file +if test -x conf$$.file >/dev/null 2>&1; then + ac_executable_p="test -x" +else + ac_executable_p="test -f" +fi +rm -f conf$$.file + +# Extract the first word of "msgmerge", so it can be a program name with args. +set dummy msgmerge; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MSGMERGE+:} false; then : + $as_echo_n "(cached) " >&6 +else + case "$MSGMERGE" in + [\\/]* | ?:[\\/]*) + ac_cv_path_MSGMERGE="$MSGMERGE" # Let the user override the test with a path. + ;; + *) + ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$ac_save_IFS" + test -z "$ac_dir" && ac_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then + echo "$as_me: trying $ac_dir/$ac_word..." >&5 + if $ac_dir/$ac_word --update -q /dev/null /dev/null >&5 2>&1; then + ac_cv_path_MSGMERGE="$ac_dir/$ac_word$ac_exec_ext" + break 2 + fi + fi + done + done + IFS="$ac_save_IFS" + test -z "$ac_cv_path_MSGMERGE" && ac_cv_path_MSGMERGE=":" + ;; +esac +fi +MSGMERGE="$ac_cv_path_MSGMERGE" +if test "$MSGMERGE" != ":"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGMERGE" >&5 +$as_echo "$MSGMERGE" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + if test "$GMSGFMT" != ":"; then + if $GMSGFMT --statistics /dev/null >/dev/null 2>&1 && + (if $GMSGFMT --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then + : ; + else + GMSGFMT=`echo "$GMSGFMT" | sed -e 's,^.*/,,'` + { $as_echo "$as_me:${as_lineno-$LINENO}: result: found $GMSGFMT program is not GNU msgfmt; ignore it" >&5 +$as_echo "found $GMSGFMT program is not GNU msgfmt; ignore it" >&6; } + GMSGFMT=":" + fi + fi + + if test "$XGETTEXT" != ":"; then + if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >/dev/null 2>&1 && + (if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then + : ; + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: found xgettext program is not GNU xgettext; ignore it" >&5 +$as_echo "found xgettext program is not GNU xgettext; ignore it" >&6; } + XGETTEXT=":" + fi + rm -f messages.po + fi + + ac_config_commands="$ac_config_commands default-1" + + + + if test "X$prefix" = "XNONE"; then + acl_final_prefix="$ac_default_prefix" + else + acl_final_prefix="$prefix" + fi + if test "X$exec_prefix" = "XNONE"; then + acl_final_exec_prefix='${prefix}' + else + acl_final_exec_prefix="$exec_prefix" + fi + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" + prefix="$acl_save_prefix" + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +# Prepare PATH_SEPARATOR. +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by GCC" >&5 +$as_echo_n "checking for ld used by GCC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | [A-Za-z]:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the path of ld + ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${acl_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}" + for ac_dir in $PATH; do + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + acl_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some GNU ld's only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$acl_cv_path_LD" -v 2>&1 < /dev/null` in + *GNU* | *'with BFD'*) + test "$with_gnu_ld" != no && break ;; + *) + test "$with_gnu_ld" != yes && break ;; + esac + fi + done + IFS="$ac_save_ifs" +else + acl_cv_path_LD="$LD" # Let the user override the test with a path. +fi +fi + +LD="$acl_cv_path_LD" +if test -n "$LD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${acl_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU ld's only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$acl_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$acl_cv_prog_gnu_ld + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shared library run path origin" >&5 +$as_echo_n "checking for shared library run path origin... " >&6; } +if ${acl_cv_rpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + + CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ + ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh + . ./conftest.sh + rm -f ./conftest.sh + acl_cv_rpath=done + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $acl_cv_rpath" >&5 +$as_echo "$acl_cv_rpath" >&6; } + wl="$acl_cv_wl" + libext="$acl_cv_libext" + shlibext="$acl_cv_shlibext" + hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" + hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" + hardcode_direct="$acl_cv_hardcode_direct" + hardcode_minus_L="$acl_cv_hardcode_minus_L" + # Check whether --enable-rpath was given. +if test "${enable_rpath+set}" = set; then : + enableval=$enable_rpath; : +else + enable_rpath=yes +fi + + + + + + + + + use_additional=yes + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + +# Check whether --with-libiconv-prefix was given. +if test "${with_libiconv_prefix+set}" = set; then : + withval=$with_libiconv_prefix; + if test "X$withval" = "Xno"; then + use_additional=no + else + if test "X$withval" = "X"; then + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + else + additional_includedir="$withval/include" + additional_libdir="$withval/lib" + fi + fi + +fi + + LIBICONV= + LTLIBICONV= + INCICONV= + rpathdirs= + ltrpathdirs= + names_already_handled= + names_next_round='iconv ' + while test -n "$names_next_round"; do + names_this_round="$names_next_round" + names_next_round= + for name in $names_this_round; do + already_handled= + for n in $names_already_handled; do + if test "$n" = "$name"; then + already_handled=yes + break + fi + done + if test -z "$already_handled"; then + names_already_handled="$names_already_handled $name" + uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` + eval value=\"\$HAVE_LIB$uppername\" + if test -n "$value"; then + if test "$value" = yes; then + eval value=\"\$LIB$uppername\" + test -z "$value" || LIBICONV="${LIBICONV}${LIBICONV:+ }$value" + eval value=\"\$LTLIB$uppername\" + test -z "$value" || LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$value" + else + : + fi + else + found_dir= + found_la= + found_so= + found_a= + if test $use_additional = yes; then + if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then + found_dir="$additional_libdir" + found_so="$additional_libdir/lib$name.$shlibext" + if test -f "$additional_libdir/lib$name.la"; then + found_la="$additional_libdir/lib$name.la" + fi + else + if test -f "$additional_libdir/lib$name.$libext"; then + found_dir="$additional_libdir" + found_a="$additional_libdir/lib$name.$libext" + if test -f "$additional_libdir/lib$name.la"; then + found_la="$additional_libdir/lib$name.la" + fi + fi + fi + fi + if test "X$found_dir" = "X"; then + for x in $LDFLAGS $LTLIBICONV; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + case "$x" in + -L*) + dir=`echo "X$x" | sed -e 's/^X-L//'` + if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then + found_dir="$dir" + found_so="$dir/lib$name.$shlibext" + if test -f "$dir/lib$name.la"; then + found_la="$dir/lib$name.la" + fi + else + if test -f "$dir/lib$name.$libext"; then + found_dir="$dir" + found_a="$dir/lib$name.$libext" + if test -f "$dir/lib$name.la"; then + found_la="$dir/lib$name.la" + fi + fi + fi + ;; + esac + if test "X$found_dir" != "X"; then + break + fi + done + fi + if test "X$found_dir" != "X"; then + LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$found_dir -l$name" + if test "X$found_so" != "X"; then + if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then + LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" + else + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $found_dir" + fi + if test "$hardcode_direct" = yes; then + LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" + else + if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then + LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $found_dir" + fi + else + haveit= + for x in $LDFLAGS $LIBICONV; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-L$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir" + fi + if test "$hardcode_minus_L" != no; then + LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" + else + LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" + fi + fi + fi + fi + else + if test "X$found_a" != "X"; then + LIBICONV="${LIBICONV}${LIBICONV:+ }$found_a" + else + LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir -l$name" + fi + fi + additional_includedir= + case "$found_dir" in + */lib | */lib/) + basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` + additional_includedir="$basedir/include" + ;; + esac + if test "X$additional_includedir" != "X"; then + if test "X$additional_includedir" != "X/usr/include"; then + haveit= + if test "X$additional_includedir" = "X/usr/local/include"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + for x in $CPPFLAGS $INCICONV; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-I$additional_includedir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_includedir"; then + INCICONV="${INCICONV}${INCICONV:+ }-I$additional_includedir" + fi + fi + fi + fi + fi + if test -n "$found_la"; then + save_libdir="$libdir" + case "$found_la" in + */* | *\\*) . "$found_la" ;; + *) . "./$found_la" ;; + esac + libdir="$save_libdir" + for dep in $dependency_libs; do + case "$dep" in + -L*) + additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` + if test "X$additional_libdir" != "X/usr/lib"; then + haveit= + if test "X$additional_libdir" = "X/usr/local/lib"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + haveit= + for x in $LDFLAGS $LIBICONV; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + LIBICONV="${LIBICONV}${LIBICONV:+ }-L$additional_libdir" + fi + fi + haveit= + for x in $LDFLAGS $LTLIBICONV; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$additional_libdir" + fi + fi + fi + fi + ;; + -R*) + dir=`echo "X$dep" | sed -e 's/^X-R//'` + if test "$enable_rpath" != no; then + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $dir" + fi + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $dir" + fi + fi + ;; + -l*) + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` + ;; + *.la) + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` + ;; + *) + LIBICONV="${LIBICONV}${LIBICONV:+ }$dep" + LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$dep" + ;; + esac + done + fi + else + LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" + LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-l$name" + fi + fi + fi + done + done + if test "X$rpathdirs" != "X"; then + if test -n "$hardcode_libdir_separator"; then + alldirs= + for found_dir in $rpathdirs; do + alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" + done + acl_save_libdir="$libdir" + libdir="$alldirs" + eval flag=\"$hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" + else + for found_dir in $rpathdirs; do + acl_save_libdir="$libdir" + libdir="$found_dir" + eval flag=\"$hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" + done + fi + fi + if test "X$ltrpathdirs" != "X"; then + for found_dir in $ltrpathdirs; do + LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-R$found_dir" + done + fi + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFPreferencesCopyAppValue" >&5 +$as_echo_n "checking for CFPreferencesCopyAppValue... " >&6; } +if ${gt_cv_func_CFPreferencesCopyAppValue+:} false; then : + $as_echo_n "(cached) " >&6 +else + gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" + gt_save_LIBS="$LIBS" + LIBS="$LIBS -framework CoreFoundation" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +CFPreferencesCopyAppValue(NULL, NULL) + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + gt_cv_func_CFPreferencesCopyAppValue=yes +else + gt_cv_func_CFPreferencesCopyAppValue=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFPreferencesCopyAppValue" >&5 +$as_echo "$gt_cv_func_CFPreferencesCopyAppValue" >&6; } + if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then + +$as_echo "#define HAVE_CFPREFERENCESCOPYAPPVALUE 1" >>confdefs.h + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLocaleCopyCurrent" >&5 +$as_echo_n "checking for CFLocaleCopyCurrent... " >&6; } +if ${gt_cv_func_CFLocaleCopyCurrent+:} false; then : + $as_echo_n "(cached) " >&6 +else + gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" + gt_save_LIBS="$LIBS" + LIBS="$LIBS -framework CoreFoundation" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +CFLocaleCopyCurrent(); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + gt_cv_func_CFLocaleCopyCurrent=yes +else + gt_cv_func_CFLocaleCopyCurrent=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFLocaleCopyCurrent" >&5 +$as_echo "$gt_cv_func_CFLocaleCopyCurrent" >&6; } + if test $gt_cv_func_CFLocaleCopyCurrent = yes; then + +$as_echo "#define HAVE_CFLOCALECOPYCURRENT 1" >>confdefs.h + + fi + INTL_MACOSX_LIBS= + if test $gt_cv_func_CFPreferencesCopyAppValue = yes || test $gt_cv_func_CFLocaleCopyCurrent = yes; then + INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" + fi + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether NLS is requested" >&5 +$as_echo_n "checking whether NLS is requested... " >&6; } + # Check whether --enable-nls was given. +if test "${enable_nls+set}" = set; then : + enableval=$enable_nls; USE_NLS=$enableval +else + USE_NLS=yes +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 +$as_echo "$USE_NLS" >&6; } + + + + + LIBINTL= + LTLIBINTL= + POSUB= + + if test "$USE_NLS" = "yes"; then + gt_use_preinstalled_gnugettext=no + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libc" >&5 +$as_echo_n "checking for GNU gettext in libc... " >&6; } +if ${gt_cv_func_gnugettext1_libc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +extern int _nl_msg_cat_cntr; +extern int *_nl_domain_bindings; +int +main () +{ +bindtextdomain ("", ""); +return * gettext ("") + _nl_msg_cat_cntr + *_nl_domain_bindings + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + gt_cv_func_gnugettext1_libc=yes +else + gt_cv_func_gnugettext1_libc=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libc" >&5 +$as_echo "$gt_cv_func_gnugettext1_libc" >&6; } + + if test "$gt_cv_func_gnugettext1_libc" != "yes"; then + + + + + + am_save_CPPFLAGS="$CPPFLAGS" + + for element in $INCICONV; do + haveit= + for x in $CPPFLAGS; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X$element"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" + fi + done + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv" >&5 +$as_echo_n "checking for iconv... " >&6; } +if ${am_cv_func_iconv+:} false; then : + $as_echo_n "(cached) " >&6 +else + + am_cv_func_iconv="no, consider installing GNU libiconv" + am_cv_lib_iconv=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +int +main () +{ +iconv_t cd = iconv_open("",""); + iconv(cd,NULL,NULL,NULL,NULL); + iconv_close(cd); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + am_cv_func_iconv=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test "$am_cv_func_iconv" != yes; then + am_save_LIBS="$LIBS" + LIBS="$LIBS $LIBICONV" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +int +main () +{ +iconv_t cd = iconv_open("",""); + iconv(cd,NULL,NULL,NULL,NULL); + iconv_close(cd); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + am_cv_lib_iconv=yes + am_cv_func_iconv=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LIBS="$am_save_LIBS" + fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv" >&5 +$as_echo "$am_cv_func_iconv" >&6; } + if test "$am_cv_func_iconv" = yes; then + +$as_echo "#define HAVE_ICONV 1" >>confdefs.h + + fi + if test "$am_cv_lib_iconv" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libiconv" >&5 +$as_echo_n "checking how to link with libiconv... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBICONV" >&5 +$as_echo "$LIBICONV" >&6; } + else + CPPFLAGS="$am_save_CPPFLAGS" + LIBICONV= + LTLIBICONV= + fi + + + + + + + use_additional=yes + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + +# Check whether --with-libintl-prefix was given. +if test "${with_libintl_prefix+set}" = set; then : + withval=$with_libintl_prefix; + if test "X$withval" = "Xno"; then + use_additional=no + else + if test "X$withval" = "X"; then + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + else + additional_includedir="$withval/include" + additional_libdir="$withval/lib" + fi + fi + +fi + + LIBINTL= + LTLIBINTL= + INCINTL= + rpathdirs= + ltrpathdirs= + names_already_handled= + names_next_round='intl ' + while test -n "$names_next_round"; do + names_this_round="$names_next_round" + names_next_round= + for name in $names_this_round; do + already_handled= + for n in $names_already_handled; do + if test "$n" = "$name"; then + already_handled=yes + break + fi + done + if test -z "$already_handled"; then + names_already_handled="$names_already_handled $name" + uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` + eval value=\"\$HAVE_LIB$uppername\" + if test -n "$value"; then + if test "$value" = yes; then + eval value=\"\$LIB$uppername\" + test -z "$value" || LIBINTL="${LIBINTL}${LIBINTL:+ }$value" + eval value=\"\$LTLIB$uppername\" + test -z "$value" || LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$value" + else + : + fi + else + found_dir= + found_la= + found_so= + found_a= + if test $use_additional = yes; then + if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then + found_dir="$additional_libdir" + found_so="$additional_libdir/lib$name.$shlibext" + if test -f "$additional_libdir/lib$name.la"; then + found_la="$additional_libdir/lib$name.la" + fi + else + if test -f "$additional_libdir/lib$name.$libext"; then + found_dir="$additional_libdir" + found_a="$additional_libdir/lib$name.$libext" + if test -f "$additional_libdir/lib$name.la"; then + found_la="$additional_libdir/lib$name.la" + fi + fi + fi + fi + if test "X$found_dir" = "X"; then + for x in $LDFLAGS $LTLIBINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + case "$x" in + -L*) + dir=`echo "X$x" | sed -e 's/^X-L//'` + if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then + found_dir="$dir" + found_so="$dir/lib$name.$shlibext" + if test -f "$dir/lib$name.la"; then + found_la="$dir/lib$name.la" + fi + else + if test -f "$dir/lib$name.$libext"; then + found_dir="$dir" + found_a="$dir/lib$name.$libext" + if test -f "$dir/lib$name.la"; then + found_la="$dir/lib$name.la" + fi + fi + fi + ;; + esac + if test "X$found_dir" != "X"; then + break + fi + done + fi + if test "X$found_dir" != "X"; then + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$found_dir -l$name" + if test "X$found_so" != "X"; then + if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" + else + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $found_dir" + fi + if test "$hardcode_direct" = yes; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" + else + if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $found_dir" + fi + else + haveit= + for x in $LDFLAGS $LIBINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-L$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir" + fi + if test "$hardcode_minus_L" != no; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" + else + LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" + fi + fi + fi + fi + else + if test "X$found_a" != "X"; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_a" + else + LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir -l$name" + fi + fi + additional_includedir= + case "$found_dir" in + */lib | */lib/) + basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` + additional_includedir="$basedir/include" + ;; + esac + if test "X$additional_includedir" != "X"; then + if test "X$additional_includedir" != "X/usr/include"; then + haveit= + if test "X$additional_includedir" = "X/usr/local/include"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + for x in $CPPFLAGS $INCINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-I$additional_includedir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_includedir"; then + INCINTL="${INCINTL}${INCINTL:+ }-I$additional_includedir" + fi + fi + fi + fi + fi + if test -n "$found_la"; then + save_libdir="$libdir" + case "$found_la" in + */* | *\\*) . "$found_la" ;; + *) . "./$found_la" ;; + esac + libdir="$save_libdir" + for dep in $dependency_libs; do + case "$dep" in + -L*) + additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` + if test "X$additional_libdir" != "X/usr/lib"; then + haveit= + if test "X$additional_libdir" = "X/usr/local/lib"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + haveit= + for x in $LDFLAGS $LIBINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + LIBINTL="${LIBINTL}${LIBINTL:+ }-L$additional_libdir" + fi + fi + haveit= + for x in $LDFLAGS $LTLIBINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$additional_libdir" + fi + fi + fi + fi + ;; + -R*) + dir=`echo "X$dep" | sed -e 's/^X-R//'` + if test "$enable_rpath" != no; then + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $dir" + fi + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $dir" + fi + fi + ;; + -l*) + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` + ;; + *.la) + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` + ;; + *) + LIBINTL="${LIBINTL}${LIBINTL:+ }$dep" + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$dep" + ;; + esac + done + fi + else + LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-l$name" + fi + fi + fi + done + done + if test "X$rpathdirs" != "X"; then + if test -n "$hardcode_libdir_separator"; then + alldirs= + for found_dir in $rpathdirs; do + alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" + done + acl_save_libdir="$libdir" + libdir="$alldirs" + eval flag=\"$hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" + else + for found_dir in $rpathdirs; do + acl_save_libdir="$libdir" + libdir="$found_dir" + eval flag=\"$hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" + done + fi + fi + if test "X$ltrpathdirs" != "X"; then + for found_dir in $ltrpathdirs; do + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-R$found_dir" + done + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libintl" >&5 +$as_echo_n "checking for GNU gettext in libintl... " >&6; } +if ${gt_cv_func_gnugettext1_libintl+:} false; then : + $as_echo_n "(cached) " >&6 +else + gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $INCINTL" + gt_save_LIBS="$LIBS" + LIBS="$LIBS $LIBINTL" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +extern int _nl_msg_cat_cntr; +extern +#ifdef __cplusplus +"C" +#endif +const char *_nl_expand_alias (const char *); +int +main () +{ +bindtextdomain ("", ""); +return * gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias ("") + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + gt_cv_func_gnugettext1_libintl=yes +else + gt_cv_func_gnugettext1_libintl=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test "$gt_cv_func_gnugettext1_libintl" != yes && test -n "$LIBICONV"; then + LIBS="$LIBS $LIBICONV" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +extern int _nl_msg_cat_cntr; +extern +#ifdef __cplusplus +"C" +#endif +const char *_nl_expand_alias (const char *); +int +main () +{ +bindtextdomain ("", ""); +return * gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias ("") + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + LIBINTL="$LIBINTL $LIBICONV" + LTLIBINTL="$LTLIBINTL $LTLIBICONV" + gt_cv_func_gnugettext1_libintl=yes + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + fi + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libintl" >&5 +$as_echo "$gt_cv_func_gnugettext1_libintl" >&6; } + fi + + if test "$gt_cv_func_gnugettext1_libc" = "yes" \ + || { test "$gt_cv_func_gnugettext1_libintl" = "yes" \ + && test "$PACKAGE" != gettext-runtime \ + && test "$PACKAGE" != gettext-tools; }; then + gt_use_preinstalled_gnugettext=yes + else + LIBINTL= + LTLIBINTL= + INCINTL= + fi + + + + if test -n "$INTL_MACOSX_LIBS"; then + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" + LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" + fi + fi + + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + +$as_echo "#define ENABLE_NLS 1" >>confdefs.h + + else + USE_NLS=no + fi + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use NLS" >&5 +$as_echo_n "checking whether to use NLS... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 +$as_echo "$USE_NLS" >&6; } + if test "$USE_NLS" = "yes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking where the gettext function comes from" >&5 +$as_echo_n "checking where the gettext function comes from... " >&6; } + if test "$gt_use_preinstalled_gnugettext" = "yes"; then + if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then + gt_source="external libintl" + else + gt_source="libc" + fi + else + gt_source="included intl directory" + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_source" >&5 +$as_echo "$gt_source" >&6; } + fi + + if test "$USE_NLS" = "yes"; then + + if test "$gt_use_preinstalled_gnugettext" = "yes"; then + if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libintl" >&5 +$as_echo_n "checking how to link with libintl... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBINTL" >&5 +$as_echo "$LIBINTL" >&6; } + + for element in $INCINTL; do + haveit= + for x in $CPPFLAGS; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X$element"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" + fi + done + + fi + + +$as_echo "#define HAVE_GETTEXT 1" >>confdefs.h + + +$as_echo "#define HAVE_DCGETTEXT 1" >>confdefs.h + + fi + + POSUB=po + fi + + + + INTLLIBS="$LIBINTL" + + + + + + + +os_name=`uname -s 2>/dev/null` +if test x${prefix} = xNONE ; then + if test `eval echo ${sysconfdir}` = NONE/etc ; then + sysconfdir=/etc/bacula + fi + + if test `eval echo ${libdir}` = NONE/lib ; then + case ${os_name} in + Linux) + os_processor=`uname -p 2>/dev/null` + case ${os_processor} in + x86_64) + libdir=/usr/lib64 + ;; + *) + libdir=/usr/lib + ;; + esac + ;; + *) + libdir=/usr/lib + ;; + esac + fi + + if test `eval echo ${includedir}` = NONE/include ; then + includedir=/usr/include + fi + + if test `eval echo ${datarootdir}` = NONE/share ; then + datarootdir=/usr/share + fi + prefix= +fi + +if test x${exec_prefix} = xNONE ; then + exec_prefix=${prefix} +fi + +sysconfdir=`eval echo ${sysconfdir}` +datarootdir=`eval echo ${datarootdir}` +docdir=`eval echo ${docdir}` +htmldir=`eval echo ${htmldir}` +libdir=`eval echo ${libdir}` +includedir=`eval echo ${includedir}` +localedir=`eval echo ${datarootdir}/locale` +cat >>confdefs.h <<_ACEOF +#define SYSCONFDIR "$sysconfdir" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define LOCALEDIR "$localedir" +_ACEOF + + +if test x$sbindir = x'${exec_prefix}/sbin' ; then + sbindir=${exec_prefix}/sbin +fi +sbindir=`eval echo ${sbindir}` + +if test x$mandir = x'${datarootdir}/man' ; then + mandir=/usr/share/man +fi + +if test x$htmldir = x'/usr/share/doc/bacula/' ; then + htmldir=`eval echo ${docdir}html` +fi + +if test x$docdir = x'/usr/share/doc/' ; then + docdir=`eval echo ${docdir}bacula` +fi + + +for ac_prog in msgfmt +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MSGFMT+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MSGFMT in + [\\/]* | ?:[\\/]*) + ac_cv_path_MSGFMT="$MSGFMT" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_MSGFMT="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +MSGFMT=$ac_cv_path_MSGFMT +if test -n "$MSGFMT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGFMT" >&5 +$as_echo "$MSGFMT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$MSGFMT" && break +done +test -n "$MSGFMT" || MSGFMT="no" + +if test "$MSGFMT" = "no" +then + echo 'msgfmt program not found, disabling NLS !' + USE_NLS=no + USE_INCLUDED_LIBINTL=no +#else + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C Library 2 or newer" >&5 +$as_echo_n "checking whether we are using the GNU C Library 2 or newer... " >&6; } +if ${ac_cv_gnu_library_2+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#ifdef __GNU_LIBRARY__ + #if (__GLIBC__ >= 2) + Lucky GNU user + #endif +#endif + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "Lucky GNU user" >/dev/null 2>&1; then : + ac_cv_gnu_library_2=yes +else + ac_cv_gnu_library_2=no +fi +rm -f conftest* + + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_gnu_library_2" >&5 +$as_echo "$ac_cv_gnu_library_2" >&6; } + + GLIBC2="$ac_cv_gnu_library_2" + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for strerror in -lcposix" >&5 +$as_echo_n "checking for strerror in -lcposix... " >&6; } +if ${ac_cv_lib_cposix_strerror+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lcposix $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char strerror (); +int +main () +{ +return strerror (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_cposix_strerror=yes +else + ac_cv_lib_cposix_strerror=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_cposix_strerror" >&5 +$as_echo "$ac_cv_lib_cposix_strerror" >&6; } +if test "x$ac_cv_lib_cposix_strerror" = xyes; then : + LIBS="$LIBS -lcposix" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 +$as_echo_n "checking for an ANSI C-conforming const... " >&6; } +if ${ac_cv_c_const+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#ifndef __cplusplus + /* Ultrix mips cc rejects this sort of thing. */ + typedef int charset[2]; + const charset cs = { 0, 0 }; + /* SunOS 4.1.1 cc rejects this. */ + char const *const *pcpcc; + char **ppc; + /* NEC SVR4.0.2 mips cc rejects this. */ + struct point {int x, y;}; + static struct point const zero = {0,0}; + /* AIX XL C 1.02.0.0 rejects this. + It does not let you subtract one const X* pointer from another in + an arm of an if-expression whose if-part is not a constant + expression */ + const char *g = "string"; + pcpcc = &g + (g ? g-g : 0); + /* HPUX 7.0 cc rejects these. */ + ++pcpcc; + ppc = (char**) pcpcc; + pcpcc = (char const *const *) ppc; + { /* SCO 3.2v4 cc rejects this sort of thing. */ + char tx; + char *t = &tx; + char const *s = 0 ? (char *) 0 : (char const *) 0; + + *t++ = 0; + if (s) return 0; + } + { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ + int x[] = {25, 17}; + const int *foo = &x[0]; + ++foo; + } + { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ + typedef const int *iptr; + iptr p = 0; + ++p; + } + { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying + "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ + struct s { int j; const int *ap[3]; } bx; + struct s *b = &bx; b->j = 5; + } + { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ + const int foo = 10; + if (!foo) return 0; + } + return !cs[0] && !zero.x; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_const=yes +else + ac_cv_c_const=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 +$as_echo "$ac_cv_c_const" >&6; } +if test $ac_cv_c_const = no; then + +$as_echo "#define const /**/" >>confdefs.h + +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for signed" >&5 +$as_echo_n "checking for signed... " >&6; } +if ${bh_cv_c_signed+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +signed char x; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + bh_cv_c_signed=yes +else + bh_cv_c_signed=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $bh_cv_c_signed" >&5 +$as_echo "$bh_cv_c_signed" >&6; } + if test $bh_cv_c_signed = no; then + +$as_echo "#define signed /**/" >>confdefs.h + + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 +$as_echo_n "checking for inline... " >&6; } +if ${ac_cv_c_inline+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_c_inline=no +for ac_kw in inline __inline__ __inline; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifndef __cplusplus +typedef int foo_t; +static $ac_kw foo_t static_foo () {return 0; } +$ac_kw foo_t foo () {return 0; } +#endif + +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_inline=$ac_kw +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + test "$ac_cv_c_inline" != no && break +done + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 +$as_echo "$ac_cv_c_inline" >&6; } + +case $ac_cv_c_inline in + inline | yes) ;; + *) + case $ac_cv_c_inline in + no) ac_val=;; + *) ac_val=$ac_cv_c_inline;; + esac + cat >>confdefs.h <<_ACEOF +#ifndef __cplusplus +#define inline $ac_val +#endif +_ACEOF + ;; +esac + +ac_fn_c_check_type "$LINENO" "off_t" "ac_cv_type_off_t" "$ac_includes_default" +if test "x$ac_cv_type_off_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define off_t long int +_ACEOF + +fi + +ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" +if test "x$ac_cv_type_size_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define size_t unsigned int +_ACEOF + +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for long long" >&5 +$as_echo_n "checking for long long... " >&6; } +if ${ac_cv_type_long_long+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +long long ll = 1LL; int i = 63; +int +main () +{ +long long llmax = (long long) -1; + return ll << i | ll >> i | llmax / ll | llmax % ll; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_type_long_long=yes +else + ac_cv_type_long_long=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_long_long" >&5 +$as_echo "$ac_cv_type_long_long" >&6; } + if test $ac_cv_type_long_long = yes; then + +$as_echo "#define HAVE_LONG_LONG 1" >>confdefs.h + + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for long double" >&5 +$as_echo_n "checking for long double... " >&6; } +if ${gt_cv_c_long_double+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$GCC" = yes; then + gt_cv_c_long_double=yes + else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + /* The Stardent Vistra knows sizeof(long double), but does not support it. */ + long double foo = 0.0; + /* On Ultrix 4.3 cc, long double is 4 and double is 8. */ + int array [2*(sizeof(long double) >= sizeof(double)) - 1]; + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + gt_cv_c_long_double=yes +else + gt_cv_c_long_double=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_c_long_double" >&5 +$as_echo "$gt_cv_c_long_double" >&6; } + if test $gt_cv_c_long_double = yes; then + +$as_echo "#define HAVE_LONG_DOUBLE 1" >>confdefs.h + + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for wchar_t" >&5 +$as_echo_n "checking for wchar_t... " >&6; } +if ${gt_cv_c_wchar_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + wchar_t foo = (wchar_t)'\0'; +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + gt_cv_c_wchar_t=yes +else + gt_cv_c_wchar_t=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_c_wchar_t" >&5 +$as_echo "$gt_cv_c_wchar_t" >&6; } + if test $gt_cv_c_wchar_t = yes; then + +$as_echo "#define HAVE_WCHAR_T 1" >>confdefs.h + + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for wint_t" >&5 +$as_echo_n "checking for wint_t... " >&6; } +if ${gt_cv_c_wint_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + wint_t foo = (wchar_t)'\0'; +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + gt_cv_c_wint_t=yes +else + gt_cv_c_wint_t=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_c_wint_t" >&5 +$as_echo "$gt_cv_c_wint_t" >&6; } + if test $gt_cv_c_wint_t = yes; then + +$as_echo "#define HAVE_WINT_T 1" >>confdefs.h + + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inttypes.h" >&5 +$as_echo_n "checking for inttypes.h... " >&6; } +if ${gl_cv_header_inttypes_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +int +main () +{ +uintmax_t i = (uintmax_t) -1; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + gl_cv_header_inttypes_h=yes +else + gl_cv_header_inttypes_h=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gl_cv_header_inttypes_h" >&5 +$as_echo "$gl_cv_header_inttypes_h" >&6; } + if test $gl_cv_header_inttypes_h = yes; then + +cat >>confdefs.h <<_ACEOF +#define HAVE_INTTYPES_H_WITH_UINTMAX 1 +_ACEOF + + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdint.h" >&5 +$as_echo_n "checking for stdint.h... " >&6; } +if ${gl_cv_header_stdint_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +int +main () +{ +uintmax_t i = (uintmax_t) -1; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + gl_cv_header_stdint_h=yes +else + gl_cv_header_stdint_h=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gl_cv_header_stdint_h" >&5 +$as_echo "$gl_cv_header_stdint_h" >&6; } + if test $gl_cv_header_stdint_h = yes; then + +cat >>confdefs.h <<_ACEOF +#define HAVE_STDINT_H_WITH_UINTMAX 1 +_ACEOF + + fi + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for intmax_t" >&5 +$as_echo_n "checking for intmax_t... " >&6; } +if ${gt_cv_c_intmax_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include +#if HAVE_STDINT_H_WITH_UINTMAX +#include +#endif +#if HAVE_INTTYPES_H_WITH_UINTMAX +#include +#endif + +int +main () +{ +intmax_t x = -1; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + gt_cv_c_intmax_t=yes +else + gt_cv_c_intmax_t=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_c_intmax_t" >&5 +$as_echo "$gt_cv_c_intmax_t" >&6; } + if test $gt_cv_c_intmax_t = yes; then + +$as_echo "#define HAVE_INTMAX_T 1" >>confdefs.h + + fi + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether printf() supports POSIX/XSI format strings" >&5 +$as_echo_n "checking whether printf() supports POSIX/XSI format strings... " >&6; } +if ${gt_cv_func_printf_posix+:} false; then : + $as_echo_n "(cached) " >&6 +else + + if test "$cross_compiling" = yes; then : + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#if defined __NetBSD__ || defined _MSC_VER || defined __MINGW32__ || defined __CYGWIN__ + notposix +#endif + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "notposix" >/dev/null 2>&1; then : + gt_cv_func_printf_posix="guessing no" +else + gt_cv_func_printf_posix="guessing yes" +fi +rm -f conftest* + + +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include +/* The string "%2$d %1$d", with dollar characters protected from the shell's + dollar expansion (possibly an autoconf bug). */ +static char format[] = { '%', '2', '$', 'd', ' ', '%', '1', '$', 'd', '\0' }; +static char buf[100]; +int main () +{ + sprintf (buf, format, 33, 55); + return (strcmp (buf, "55 33") != 0); +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + gt_cv_func_printf_posix=yes +else + gt_cv_func_printf_posix=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_printf_posix" >&5 +$as_echo "$gt_cv_func_printf_posix" >&6; } + case $gt_cv_func_printf_posix in + *yes) + +$as_echo "#define HAVE_POSIX_PRINTF 1" >>confdefs.h + + ;; + esac + +# The Ultrix 4.2 mips builtin alloca declared by alloca.h only works +# for constant arguments. Useless! +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5 +$as_echo_n "checking for working alloca.h... " >&6; } +if ${ac_cv_working_alloca_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +char *p = (char *) alloca (2 * sizeof (int)); + if (p) return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_working_alloca_h=yes +else + ac_cv_working_alloca_h=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_alloca_h" >&5 +$as_echo "$ac_cv_working_alloca_h" >&6; } +if test $ac_cv_working_alloca_h = yes; then + +$as_echo "#define HAVE_ALLOCA_H 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for alloca" >&5 +$as_echo_n "checking for alloca... " >&6; } +if ${ac_cv_func_alloca_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __GNUC__ +# define alloca __builtin_alloca +#else +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef HAVE_ALLOCA_H +# include +# else +# ifdef _AIX + #pragma alloca +# else +# ifndef alloca /* predefined by HP cc +Olibcalls */ +void *alloca (size_t); +# endif +# endif +# endif +# endif +#endif + +int +main () +{ +char *p = (char *) alloca (1); + if (p) return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_func_alloca_works=yes +else + ac_cv_func_alloca_works=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_alloca_works" >&5 +$as_echo "$ac_cv_func_alloca_works" >&6; } + +if test $ac_cv_func_alloca_works = yes; then + +$as_echo "#define HAVE_ALLOCA 1" >>confdefs.h + +else + # The SVR3 libPW and SVR4 libucb both contain incompatible functions +# that cause trouble. Some versions do not even contain alloca or +# contain a buggy version. If you still want to use their alloca, +# use ar to extract alloca.o from them instead of compiling alloca.c. + +ALLOCA=\${LIBOBJDIR}alloca.$ac_objext + +$as_echo "#define C_ALLOCA 1" >>confdefs.h + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether \`alloca.c' needs Cray hooks" >&5 +$as_echo_n "checking whether \`alloca.c' needs Cray hooks... " >&6; } +if ${ac_cv_os_cray+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#if defined CRAY && ! defined CRAY2 +webecray +#else +wenotbecray +#endif + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "webecray" >/dev/null 2>&1; then : + ac_cv_os_cray=yes +else + ac_cv_os_cray=no +fi +rm -f conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_os_cray" >&5 +$as_echo "$ac_cv_os_cray" >&6; } +if test $ac_cv_os_cray = yes; then + for ac_func in _getb67 GETB67 getb67; do + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + +cat >>confdefs.h <<_ACEOF +#define CRAY_STACKSEG_END $ac_func +_ACEOF + + break +fi + + done +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking stack direction for C alloca" >&5 +$as_echo_n "checking stack direction for C alloca... " >&6; } +if ${ac_cv_c_stack_direction+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + ac_cv_c_stack_direction=0 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +find_stack_direction (int *addr, int depth) +{ + int dir, dummy = 0; + if (! addr) + addr = &dummy; + *addr = addr < &dummy ? 1 : addr == &dummy ? 0 : -1; + dir = depth ? find_stack_direction (addr, depth - 1) : 0; + return dir + dummy; +} + +int +main (int argc, char **argv) +{ + return find_stack_direction (0, argc + !argv + 20) < 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_c_stack_direction=1 +else + ac_cv_c_stack_direction=-1 +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_stack_direction" >&5 +$as_echo "$ac_cv_c_stack_direction" >&6; } +cat >>confdefs.h <<_ACEOF +#define STACK_DIRECTION $ac_cv_c_stack_direction +_ACEOF + + +fi + + + + + for ac_header in $ac_header_list +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + + + + + + + +for ac_func in getpagesize +do : + ac_fn_c_check_func "$LINENO" "getpagesize" "ac_cv_func_getpagesize" +if test "x$ac_cv_func_getpagesize" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GETPAGESIZE 1 +_ACEOF + +fi +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working mmap" >&5 +$as_echo_n "checking for working mmap... " >&6; } +if ${ac_cv_func_mmap_fixed_mapped+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + ac_cv_func_mmap_fixed_mapped=no +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +/* malloc might have been renamed as rpl_malloc. */ +#undef malloc + +/* Thanks to Mike Haertel and Jim Avera for this test. + Here is a matrix of mmap possibilities: + mmap private not fixed + mmap private fixed at somewhere currently unmapped + mmap private fixed at somewhere already mapped + mmap shared not fixed + mmap shared fixed at somewhere currently unmapped + mmap shared fixed at somewhere already mapped + For private mappings, we should verify that changes cannot be read() + back from the file, nor mmap's back from the file at a different + address. (There have been systems where private was not correctly + implemented like the infamous i386 svr4.0, and systems where the + VM page cache was not coherent with the file system buffer cache + like early versions of FreeBSD and possibly contemporary NetBSD.) + For shared mappings, we should conversely verify that changes get + propagated back to all the places they're supposed to be. + + Grep wants private fixed already mapped. + The main things grep needs to know about mmap are: + * does it exist and is it safe to write into the mmap'd area + * how to use it (BSD variants) */ + +#include +#include + +#if !defined STDC_HEADERS && !defined HAVE_STDLIB_H +char *malloc (); +#endif + +/* This mess was copied from the GNU getpagesize.h. */ +#ifndef HAVE_GETPAGESIZE +# ifdef _SC_PAGESIZE +# define getpagesize() sysconf(_SC_PAGESIZE) +# else /* no _SC_PAGESIZE */ +# ifdef HAVE_SYS_PARAM_H +# include +# ifdef EXEC_PAGESIZE +# define getpagesize() EXEC_PAGESIZE +# else /* no EXEC_PAGESIZE */ +# ifdef NBPG +# define getpagesize() NBPG * CLSIZE +# ifndef CLSIZE +# define CLSIZE 1 +# endif /* no CLSIZE */ +# else /* no NBPG */ +# ifdef NBPC +# define getpagesize() NBPC +# else /* no NBPC */ +# ifdef PAGESIZE +# define getpagesize() PAGESIZE +# endif /* PAGESIZE */ +# endif /* no NBPC */ +# endif /* no NBPG */ +# endif /* no EXEC_PAGESIZE */ +# else /* no HAVE_SYS_PARAM_H */ +# define getpagesize() 8192 /* punt totally */ +# endif /* no HAVE_SYS_PARAM_H */ +# endif /* no _SC_PAGESIZE */ + +#endif /* no HAVE_GETPAGESIZE */ + +int +main () +{ + char *data, *data2, *data3; + const char *cdata2; + int i, pagesize; + int fd, fd2; + + pagesize = getpagesize (); + + /* First, make a file with some known garbage in it. */ + data = (char *) malloc (pagesize); + if (!data) + return 1; + for (i = 0; i < pagesize; ++i) + *(data + i) = rand (); + umask (0); + fd = creat ("conftest.mmap", 0600); + if (fd < 0) + return 2; + if (write (fd, data, pagesize) != pagesize) + return 3; + close (fd); + + /* Next, check that the tail of a page is zero-filled. File must have + non-zero length, otherwise we risk SIGBUS for entire page. */ + fd2 = open ("conftest.txt", O_RDWR | O_CREAT | O_TRUNC, 0600); + if (fd2 < 0) + return 4; + cdata2 = ""; + if (write (fd2, cdata2, 1) != 1) + return 5; + data2 = (char *) mmap (0, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, 0L); + if (data2 == MAP_FAILED) + return 6; + for (i = 0; i < pagesize; ++i) + if (*(data2 + i)) + return 7; + close (fd2); + if (munmap (data2, pagesize)) + return 8; + + /* Next, try to mmap the file at a fixed address which already has + something else allocated at it. If we can, also make sure that + we see the same garbage. */ + fd = open ("conftest.mmap", O_RDWR); + if (fd < 0) + return 9; + if (data2 != mmap (data2, pagesize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_FIXED, fd, 0L)) + return 10; + for (i = 0; i < pagesize; ++i) + if (*(data + i) != *(data2 + i)) + return 11; + + /* Finally, make sure that changes to the mapped area do not + percolate back to the file as seen by read(). (This is a bug on + some variants of i386 svr4.0.) */ + for (i = 0; i < pagesize; ++i) + *(data2 + i) = *(data2 + i) + 1; + data3 = (char *) malloc (pagesize); + if (!data3) + return 12; + if (read (fd, data3, pagesize) != pagesize) + return 13; + for (i = 0; i < pagesize; ++i) + if (*(data + i) != *(data3 + i)) + return 14; + close (fd); + free (data); + free (data3); + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_func_mmap_fixed_mapped=yes +else + ac_cv_func_mmap_fixed_mapped=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_mmap_fixed_mapped" >&5 +$as_echo "$ac_cv_func_mmap_fixed_mapped" >&6; } +if test $ac_cv_func_mmap_fixed_mapped = yes; then + +$as_echo "#define HAVE_MMAP 1" >>confdefs.h + +fi +rm -f conftest.mmap conftest.txt + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C Library 2.1 or newer" >&5 +$as_echo_n "checking whether we are using the GNU C Library 2.1 or newer... " >&6; } +if ${ac_cv_gnu_library_2_1+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#ifdef __GNU_LIBRARY__ + #if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1) || (__GLIBC__ > 2) + Lucky GNU user + #endif +#endif + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "Lucky GNU user" >/dev/null 2>&1; then : + ac_cv_gnu_library_2_1=yes +else + ac_cv_gnu_library_2_1=no +fi +rm -f conftest* + + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_gnu_library_2_1" >&5 +$as_echo "$ac_cv_gnu_library_2_1" >&6; } + + GLIBC21="$ac_cv_gnu_library_2_1" + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether integer division by zero raises SIGFPE" >&5 +$as_echo_n "checking whether integer division by zero raises SIGFPE... " >&6; } +if ${gt_cv_int_divbyzero_sigfpe+:} false; then : + $as_echo_n "(cached) " >&6 +else + + if test "$cross_compiling" = yes; then : + + # Guess based on the CPU. + case "$host_cpu" in + alpha* | i3456786 | m68k | s390*) + gt_cv_int_divbyzero_sigfpe="guessing yes";; + *) + gt_cv_int_divbyzero_sigfpe="guessing no";; + esac + +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include + +static void +#ifdef __cplusplus +sigfpe_handler (int sig) +#else +sigfpe_handler (sig) int sig; +#endif +{ + /* Exit with code 0 if SIGFPE, with code 1 if any other signal. */ + exit (sig != SIGFPE); +} + +int x = 1; +int y = 0; +int z; +int nan; + +int main () +{ + signal (SIGFPE, sigfpe_handler); +/* IRIX and AIX (when "xlc -qcheck" is used) yield signal SIGTRAP. */ +#if (defined (__sgi) || defined (_AIX)) && defined (SIGTRAP) + signal (SIGTRAP, sigfpe_handler); +#endif +/* Linux/SPARC yields signal SIGILL. */ +#if defined (__sparc__) && defined (__linux__) + signal (SIGILL, sigfpe_handler); +#endif + + z = x / y; + nan = y / y; + exit (1); +} + +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + gt_cv_int_divbyzero_sigfpe=yes +else + gt_cv_int_divbyzero_sigfpe=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_int_divbyzero_sigfpe" >&5 +$as_echo "$gt_cv_int_divbyzero_sigfpe" >&6; } + case "$gt_cv_int_divbyzero_sigfpe" in + *yes) value=1;; + *) value=0;; + esac + +cat >>confdefs.h <<_ACEOF +#define INTDIV0_RAISES_SIGFPE $value +_ACEOF + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unsigned long long" >&5 +$as_echo_n "checking for unsigned long long... " >&6; } +if ${ac_cv_type_unsigned_long_long+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +unsigned long long ull = 1ULL; int i = 63; +int +main () +{ +unsigned long long ullmax = (unsigned long long) -1; + return ull << i | ull >> i | ullmax / ull | ullmax % ull; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_type_unsigned_long_long=yes +else + ac_cv_type_unsigned_long_long=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_unsigned_long_long" >&5 +$as_echo "$ac_cv_type_unsigned_long_long" >&6; } + if test $ac_cv_type_unsigned_long_long = yes; then + +$as_echo "#define HAVE_UNSIGNED_LONG_LONG 1" >>confdefs.h + + fi + + + + + if test $gl_cv_header_inttypes_h = no && test $gl_cv_header_stdint_h = no; then + + test $ac_cv_type_unsigned_long_long = yes \ + && ac_type='unsigned long long' \ + || ac_type='unsigned long' + +cat >>confdefs.h <<_ACEOF +#define uintmax_t $ac_type +_ACEOF + + else + +$as_echo "#define HAVE_UINTMAX_T 1" >>confdefs.h + + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inttypes.h" >&5 +$as_echo_n "checking for inttypes.h... " >&6; } +if ${gt_cv_header_inttypes_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + gt_cv_header_inttypes_h=yes +else + gt_cv_header_inttypes_h=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_header_inttypes_h" >&5 +$as_echo "$gt_cv_header_inttypes_h" >&6; } + if test $gt_cv_header_inttypes_h = yes; then + +cat >>confdefs.h <<_ACEOF +#define HAVE_INTTYPES_H 1 +_ACEOF + + fi + + + + if test $gt_cv_header_inttypes_h = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the inttypes.h PRIxNN macros are broken" >&5 +$as_echo_n "checking whether the inttypes.h PRIxNN macros are broken... " >&6; } +if ${gt_cv_inttypes_pri_broken+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#ifdef PRId32 +char *p = PRId32; +#endif + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + gt_cv_inttypes_pri_broken=no +else + gt_cv_inttypes_pri_broken=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_inttypes_pri_broken" >&5 +$as_echo "$gt_cv_inttypes_pri_broken" >&6; } + fi + if test "$gt_cv_inttypes_pri_broken" = yes; then + +cat >>confdefs.h <<_ACEOF +#define PRI_MACROS_BROKEN 1 +_ACEOF + + fi + + + for ac_header in stdint.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "stdint.h" "ac_cv_header_stdint_h" "$ac_includes_default" +if test "x$ac_cv_header_stdint_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_STDINT_H 1 +_ACEOF + +fi + +done + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SIZE_MAX" >&5 +$as_echo_n "checking for SIZE_MAX... " >&6; } + result= + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#if HAVE_STDINT_H +#include +#endif +#ifdef SIZE_MAX +Found it +#endif + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "Found it" >/dev/null 2>&1; then : + result=yes +fi +rm -f conftest* + + if test -z "$result"; then + if ac_fn_c_compute_int "$LINENO" "~(size_t)0 / 10" "res_hi" "#include "; then : + +else + result=? +fi + + + if ac_fn_c_compute_int "$LINENO" "~(size_t)0 % 10" "res_lo" "#include "; then : + +else + result=? +fi + + + if ac_fn_c_compute_int "$LINENO" "sizeof (size_t) <= sizeof (unsigned int)" "fits_in_uint" "#include "; then : + +else + result=? +fi + + + if test "$fits_in_uint" = 1; then + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + extern size_t foo; + extern unsigned long foo; + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + fits_in_uint=0 +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test -z "$result"; then + if test "$fits_in_uint" = 1; then + result="$res_hi$res_lo"U + else + result="$res_hi$res_lo"UL + fi + else + result='~(size_t)0' + fi + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $result" >&5 +$as_echo "$result" >&6; } + if test "$result" != yes; then + +cat >>confdefs.h <<_ACEOF +#define SIZE_MAX $result +_ACEOF + + fi + + + + + for ac_header in stdint.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "stdint.h" "ac_cv_header_stdint_h" "$ac_includes_default" +if test "x$ac_cv_header_stdint_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_STDINT_H 1 +_ACEOF + +fi + +done + + + + + + + + + ac_fn_c_check_type "$LINENO" "ptrdiff_t" "ac_cv_type_ptrdiff_t" "$ac_includes_default" +if test "x$ac_cv_type_ptrdiff_t" = xyes; then : + +else + +$as_echo "#define ptrdiff_t long" >>confdefs.h + + +fi + + for ac_header in argz.h limits.h locale.h nl_types.h malloc.h stddef.h \ +stdlib.h string.h unistd.h sys/param.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + for ac_func in asprintf fwprintf getcwd getegid geteuid getgid getuid \ +mempcpy munmap putenv setenv setlocale snprintf stpcpy strcasecmp strdup \ +strtoul tsearch wcslen __argz_count __argz_stringify __argz_next \ +__fsetlocking +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether _snprintf is declared" >&5 +$as_echo_n "checking whether _snprintf is declared... " >&6; } +if ${ac_cv_have_decl__snprintf+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + +#ifndef _snprintf + char *p = (char *) _snprintf; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_have_decl__snprintf=yes +else + ac_cv_have_decl__snprintf=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl__snprintf" >&5 +$as_echo "$ac_cv_have_decl__snprintf" >&6; } + if test $ac_cv_have_decl__snprintf = yes; then + gt_value=1 + else + gt_value=0 + fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL__SNPRINTF $gt_value +_ACEOF + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether _snwprintf is declared" >&5 +$as_echo_n "checking whether _snwprintf is declared... " >&6; } +if ${ac_cv_have_decl__snwprintf+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + +#ifndef _snwprintf + char *p = (char *) _snwprintf; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_have_decl__snwprintf=yes +else + ac_cv_have_decl__snwprintf=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl__snwprintf" >&5 +$as_echo "$ac_cv_have_decl__snwprintf" >&6; } + if test $ac_cv_have_decl__snwprintf = yes; then + gt_value=1 + else + gt_value=0 + fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL__SNWPRINTF $gt_value +_ACEOF + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether feof_unlocked is declared" >&5 +$as_echo_n "checking whether feof_unlocked is declared... " >&6; } +if ${ac_cv_have_decl_feof_unlocked+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + +#ifndef feof_unlocked + char *p = (char *) feof_unlocked; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_have_decl_feof_unlocked=yes +else + ac_cv_have_decl_feof_unlocked=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl_feof_unlocked" >&5 +$as_echo "$ac_cv_have_decl_feof_unlocked" >&6; } + if test $ac_cv_have_decl_feof_unlocked = yes; then + gt_value=1 + else + gt_value=0 + fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_FEOF_UNLOCKED $gt_value +_ACEOF + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether fgets_unlocked is declared" >&5 +$as_echo_n "checking whether fgets_unlocked is declared... " >&6; } +if ${ac_cv_have_decl_fgets_unlocked+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + +#ifndef fgets_unlocked + char *p = (char *) fgets_unlocked; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_have_decl_fgets_unlocked=yes +else + ac_cv_have_decl_fgets_unlocked=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl_fgets_unlocked" >&5 +$as_echo "$ac_cv_have_decl_fgets_unlocked" >&6; } + if test $ac_cv_have_decl_fgets_unlocked = yes; then + gt_value=1 + else + gt_value=0 + fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_FGETS_UNLOCKED $gt_value +_ACEOF + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether getc_unlocked is declared" >&5 +$as_echo_n "checking whether getc_unlocked is declared... " >&6; } +if ${ac_cv_have_decl_getc_unlocked+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + +#ifndef getc_unlocked + char *p = (char *) getc_unlocked; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_have_decl_getc_unlocked=yes +else + ac_cv_have_decl_getc_unlocked=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl_getc_unlocked" >&5 +$as_echo "$ac_cv_have_decl_getc_unlocked" >&6; } + if test $ac_cv_have_decl_getc_unlocked = yes; then + gt_value=1 + else + gt_value=0 + fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_GETC_UNLOCKED $gt_value +_ACEOF + + + + case $gt_cv_func_printf_posix in + *yes) HAVE_POSIX_PRINTF=1 ;; + *) HAVE_POSIX_PRINTF=0 ;; + esac + + if test "$ac_cv_func_asprintf" = yes; then + HAVE_ASPRINTF=1 + else + HAVE_ASPRINTF=0 + fi + + if test "$ac_cv_func_snprintf" = yes; then + HAVE_SNPRINTF=1 + else + HAVE_SNPRINTF=0 + fi + + if test "$ac_cv_func_wprintf" = yes; then + HAVE_WPRINTF=1 + else + HAVE_WPRINTF=0 + fi + + + + + + + + am_save_CPPFLAGS="$CPPFLAGS" + + for element in $INCICONV; do + haveit= + for x in $CPPFLAGS; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X$element"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" + fi + done + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv" >&5 +$as_echo_n "checking for iconv... " >&6; } +if ${am_cv_func_iconv+:} false; then : + $as_echo_n "(cached) " >&6 +else + + am_cv_func_iconv="no, consider installing GNU libiconv" + am_cv_lib_iconv=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +int +main () +{ +iconv_t cd = iconv_open("",""); + iconv(cd,NULL,NULL,NULL,NULL); + iconv_close(cd); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + am_cv_func_iconv=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test "$am_cv_func_iconv" != yes; then + am_save_LIBS="$LIBS" + LIBS="$LIBS $LIBICONV" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +int +main () +{ +iconv_t cd = iconv_open("",""); + iconv(cd,NULL,NULL,NULL,NULL); + iconv_close(cd); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + am_cv_lib_iconv=yes + am_cv_func_iconv=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LIBS="$am_save_LIBS" + fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv" >&5 +$as_echo "$am_cv_func_iconv" >&6; } + if test "$am_cv_func_iconv" = yes; then + +$as_echo "#define HAVE_ICONV 1" >>confdefs.h + + fi + if test "$am_cv_lib_iconv" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libiconv" >&5 +$as_echo_n "checking how to link with libiconv... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBICONV" >&5 +$as_echo "$LIBICONV" >&6; } + else + CPPFLAGS="$am_save_CPPFLAGS" + LIBICONV= + LTLIBICONV= + fi + + + + if test "$am_cv_func_iconv" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv declaration" >&5 +$as_echo_n "checking for iconv declaration... " >&6; } + if ${am_cv_proto_iconv+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include +extern +#ifdef __cplusplus +"C" +#endif +#if defined(__STDC__) || defined(__cplusplus) +size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); +#else +size_t iconv(); +#endif + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + am_cv_proto_iconv_arg1="" +else + am_cv_proto_iconv_arg1="const" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);" +fi + + am_cv_proto_iconv=`echo "$am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${ac_t:- + }$am_cv_proto_iconv" >&5 +$as_echo "${ac_t:- + }$am_cv_proto_iconv" >&6; } + +cat >>confdefs.h <<_ACEOF +#define ICONV_CONST $am_cv_proto_iconv_arg1 +_ACEOF + + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for nl_langinfo and CODESET" >&5 +$as_echo_n "checking for nl_langinfo and CODESET... " >&6; } +if ${am_cv_langinfo_codeset+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +char* cs = nl_langinfo(CODESET); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + am_cv_langinfo_codeset=yes +else + am_cv_langinfo_codeset=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_langinfo_codeset" >&5 +$as_echo "$am_cv_langinfo_codeset" >&6; } + if test $am_cv_langinfo_codeset = yes; then + +$as_echo "#define HAVE_LANGINFO_CODESET 1" >>confdefs.h + + fi + + if test $ac_cv_header_locale_h = yes; then + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LC_MESSAGES" >&5 +$as_echo_n "checking for LC_MESSAGES... " >&6; } +if ${gt_cv_val_LC_MESSAGES+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +return LC_MESSAGES + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + gt_cv_val_LC_MESSAGES=yes +else + gt_cv_val_LC_MESSAGES=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_val_LC_MESSAGES" >&5 +$as_echo "$gt_cv_val_LC_MESSAGES" >&6; } + if test $gt_cv_val_LC_MESSAGES = yes; then + +$as_echo "#define HAVE_LC_MESSAGES 1" >>confdefs.h + + fi + + fi + + if test -n "$INTL_MACOSX_LIBS"; then + CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" + fi + + for ac_prog in bison +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_INTLBISON+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$INTLBISON"; then + ac_cv_prog_INTLBISON="$INTLBISON" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_INTLBISON="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +INTLBISON=$ac_cv_prog_INTLBISON +if test -n "$INTLBISON"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INTLBISON" >&5 +$as_echo "$INTLBISON" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$INTLBISON" && break +done + + if test -z "$INTLBISON"; then + ac_verc_fail=yes + else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking version of bison" >&5 +$as_echo_n "checking version of bison... " >&6; } + ac_prog_version=`$INTLBISON --version 2>&1 | sed -n 's/^.*GNU Bison.* \([0-9]*\.[0-9.]*\).*$/\1/p'` + case $ac_prog_version in + '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; + 1.2[6-9]* | 1.[3-9][0-9]* | [2-9].*) + ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; + *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_prog_version" >&5 +$as_echo "$ac_prog_version" >&6; } + fi + if test $ac_verc_fail = yes; then + INTLBISON=: + fi + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFPreferencesCopyAppValue" >&5 +$as_echo_n "checking for CFPreferencesCopyAppValue... " >&6; } +if ${gt_cv_func_CFPreferencesCopyAppValue+:} false; then : + $as_echo_n "(cached) " >&6 +else + gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" + gt_save_LIBS="$LIBS" + LIBS="$LIBS -framework CoreFoundation" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +CFPreferencesCopyAppValue(NULL, NULL) + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + gt_cv_func_CFPreferencesCopyAppValue=yes +else + gt_cv_func_CFPreferencesCopyAppValue=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFPreferencesCopyAppValue" >&5 +$as_echo "$gt_cv_func_CFPreferencesCopyAppValue" >&6; } + if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then + +$as_echo "#define HAVE_CFPREFERENCESCOPYAPPVALUE 1" >>confdefs.h + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLocaleCopyCurrent" >&5 +$as_echo_n "checking for CFLocaleCopyCurrent... " >&6; } +if ${gt_cv_func_CFLocaleCopyCurrent+:} false; then : + $as_echo_n "(cached) " >&6 +else + gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" + gt_save_LIBS="$LIBS" + LIBS="$LIBS -framework CoreFoundation" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +CFLocaleCopyCurrent(); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + gt_cv_func_CFLocaleCopyCurrent=yes +else + gt_cv_func_CFLocaleCopyCurrent=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFLocaleCopyCurrent" >&5 +$as_echo "$gt_cv_func_CFLocaleCopyCurrent" >&6; } + if test $gt_cv_func_CFLocaleCopyCurrent = yes; then + +$as_echo "#define HAVE_CFLOCALECOPYCURRENT 1" >>confdefs.h + + fi + INTL_MACOSX_LIBS= + if test $gt_cv_func_CFPreferencesCopyAppValue = yes || test $gt_cv_func_CFLocaleCopyCurrent = yes; then + INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" + fi + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether NLS is requested" >&5 +$as_echo_n "checking whether NLS is requested... " >&6; } + # Check whether --enable-nls was given. +if test "${enable_nls+set}" = set; then : + enableval=$enable_nls; USE_NLS=$enableval +else + USE_NLS=yes +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 +$as_echo "$USE_NLS" >&6; } + + + + + BUILD_INCLUDED_LIBINTL=no + USE_INCLUDED_LIBINTL=no + + LIBINTL= + LTLIBINTL= + POSUB= + + if test "$USE_NLS" = "yes"; then + gt_use_preinstalled_gnugettext=no + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether included gettext is requested" >&5 +$as_echo_n "checking whether included gettext is requested... " >&6; } + +# Check whether --with-included-gettext was given. +if test "${with_included_gettext+set}" = set; then : + withval=$with_included_gettext; nls_cv_force_use_gnu_gettext=$withval +else + nls_cv_force_use_gnu_gettext=no +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $nls_cv_force_use_gnu_gettext" >&5 +$as_echo "$nls_cv_force_use_gnu_gettext" >&6; } + + nls_cv_use_gnu_gettext="$nls_cv_force_use_gnu_gettext" + if test "$nls_cv_force_use_gnu_gettext" != "yes"; then + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libc" >&5 +$as_echo_n "checking for GNU gettext in libc... " >&6; } +if ${gt_cv_func_gnugettext1_libc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +extern int _nl_msg_cat_cntr; +extern int *_nl_domain_bindings; +int +main () +{ +bindtextdomain ("", ""); +return * gettext ("") + _nl_msg_cat_cntr + *_nl_domain_bindings + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + gt_cv_func_gnugettext1_libc=yes +else + gt_cv_func_gnugettext1_libc=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libc" >&5 +$as_echo "$gt_cv_func_gnugettext1_libc" >&6; } + + if test "$gt_cv_func_gnugettext1_libc" != "yes"; then + + + + use_additional=yes + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + +# Check whether --with-libintl-prefix was given. +if test "${with_libintl_prefix+set}" = set; then : + withval=$with_libintl_prefix; + if test "X$withval" = "Xno"; then + use_additional=no + else + if test "X$withval" = "X"; then + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + else + additional_includedir="$withval/include" + additional_libdir="$withval/lib" + fi + fi + +fi + + LIBINTL= + LTLIBINTL= + INCINTL= + rpathdirs= + ltrpathdirs= + names_already_handled= + names_next_round='intl ' + while test -n "$names_next_round"; do + names_this_round="$names_next_round" + names_next_round= + for name in $names_this_round; do + already_handled= + for n in $names_already_handled; do + if test "$n" = "$name"; then + already_handled=yes + break + fi + done + if test -z "$already_handled"; then + names_already_handled="$names_already_handled $name" + uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` + eval value=\"\$HAVE_LIB$uppername\" + if test -n "$value"; then + if test "$value" = yes; then + eval value=\"\$LIB$uppername\" + test -z "$value" || LIBINTL="${LIBINTL}${LIBINTL:+ }$value" + eval value=\"\$LTLIB$uppername\" + test -z "$value" || LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$value" + else + : + fi + else + found_dir= + found_la= + found_so= + found_a= + if test $use_additional = yes; then + if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then + found_dir="$additional_libdir" + found_so="$additional_libdir/lib$name.$shlibext" + if test -f "$additional_libdir/lib$name.la"; then + found_la="$additional_libdir/lib$name.la" + fi + else + if test -f "$additional_libdir/lib$name.$libext"; then + found_dir="$additional_libdir" + found_a="$additional_libdir/lib$name.$libext" + if test -f "$additional_libdir/lib$name.la"; then + found_la="$additional_libdir/lib$name.la" + fi + fi + fi + fi + if test "X$found_dir" = "X"; then + for x in $LDFLAGS $LTLIBINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + case "$x" in + -L*) + dir=`echo "X$x" | sed -e 's/^X-L//'` + if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then + found_dir="$dir" + found_so="$dir/lib$name.$shlibext" + if test -f "$dir/lib$name.la"; then + found_la="$dir/lib$name.la" + fi + else + if test -f "$dir/lib$name.$libext"; then + found_dir="$dir" + found_a="$dir/lib$name.$libext" + if test -f "$dir/lib$name.la"; then + found_la="$dir/lib$name.la" + fi + fi + fi + ;; + esac + if test "X$found_dir" != "X"; then + break + fi + done + fi + if test "X$found_dir" != "X"; then + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$found_dir -l$name" + if test "X$found_so" != "X"; then + if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" + else + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $found_dir" + fi + if test "$hardcode_direct" = yes; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" + else + if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $found_dir" + fi + else + haveit= + for x in $LDFLAGS $LIBINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-L$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir" + fi + if test "$hardcode_minus_L" != no; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" + else + LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" + fi + fi + fi + fi + else + if test "X$found_a" != "X"; then + LIBINTL="${LIBINTL}${LIBINTL:+ }$found_a" + else + LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir -l$name" + fi + fi + additional_includedir= + case "$found_dir" in + */lib | */lib/) + basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` + additional_includedir="$basedir/include" + ;; + esac + if test "X$additional_includedir" != "X"; then + if test "X$additional_includedir" != "X/usr/include"; then + haveit= + if test "X$additional_includedir" = "X/usr/local/include"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + for x in $CPPFLAGS $INCINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-I$additional_includedir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_includedir"; then + INCINTL="${INCINTL}${INCINTL:+ }-I$additional_includedir" + fi + fi + fi + fi + fi + if test -n "$found_la"; then + save_libdir="$libdir" + case "$found_la" in + */* | *\\*) . "$found_la" ;; + *) . "./$found_la" ;; + esac + libdir="$save_libdir" + for dep in $dependency_libs; do + case "$dep" in + -L*) + additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` + if test "X$additional_libdir" != "X/usr/lib"; then + haveit= + if test "X$additional_libdir" = "X/usr/local/lib"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + haveit= + for x in $LDFLAGS $LIBINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + LIBINTL="${LIBINTL}${LIBINTL:+ }-L$additional_libdir" + fi + fi + haveit= + for x in $LDFLAGS $LTLIBINTL; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$additional_libdir" + fi + fi + fi + fi + ;; + -R*) + dir=`echo "X$dep" | sed -e 's/^X-R//'` + if test "$enable_rpath" != no; then + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $dir" + fi + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $dir" + fi + fi + ;; + -l*) + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` + ;; + *.la) + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` + ;; + *) + LIBINTL="${LIBINTL}${LIBINTL:+ }$dep" + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$dep" + ;; + esac + done + fi + else + LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-l$name" + fi + fi + fi + done + done + if test "X$rpathdirs" != "X"; then + if test -n "$hardcode_libdir_separator"; then + alldirs= + for found_dir in $rpathdirs; do + alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" + done + acl_save_libdir="$libdir" + libdir="$alldirs" + eval flag=\"$hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" + else + for found_dir in $rpathdirs; do + acl_save_libdir="$libdir" + libdir="$found_dir" + eval flag=\"$hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" + done + fi + fi + if test "X$ltrpathdirs" != "X"; then + for found_dir in $ltrpathdirs; do + LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-R$found_dir" + done + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libintl" >&5 +$as_echo_n "checking for GNU gettext in libintl... " >&6; } +if ${gt_cv_func_gnugettext1_libintl+:} false; then : + $as_echo_n "(cached) " >&6 +else + gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $INCINTL" + gt_save_LIBS="$LIBS" + LIBS="$LIBS $LIBINTL" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +extern int _nl_msg_cat_cntr; +extern +#ifdef __cplusplus +"C" +#endif +const char *_nl_expand_alias (const char *); +int +main () +{ +bindtextdomain ("", ""); +return * gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias ("") + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + gt_cv_func_gnugettext1_libintl=yes +else + gt_cv_func_gnugettext1_libintl=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test "$gt_cv_func_gnugettext1_libintl" != yes && test -n "$LIBICONV"; then + LIBS="$LIBS $LIBICONV" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +extern int _nl_msg_cat_cntr; +extern +#ifdef __cplusplus +"C" +#endif +const char *_nl_expand_alias (const char *); +int +main () +{ +bindtextdomain ("", ""); +return * gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias ("") + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + LIBINTL="$LIBINTL $LIBICONV" + LTLIBINTL="$LTLIBINTL $LTLIBICONV" + gt_cv_func_gnugettext1_libintl=yes + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + fi + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libintl" >&5 +$as_echo "$gt_cv_func_gnugettext1_libintl" >&6; } + fi + + if test "$gt_cv_func_gnugettext1_libc" = "yes" \ + || { test "$gt_cv_func_gnugettext1_libintl" = "yes" \ + && test "$PACKAGE" != gettext-runtime \ + && test "$PACKAGE" != gettext-tools; }; then + gt_use_preinstalled_gnugettext=yes + else + LIBINTL= + LTLIBINTL= + INCINTL= + fi + + + if test "$gt_use_preinstalled_gnugettext" != "yes"; then + nls_cv_use_gnu_gettext=yes + fi + fi + + if test "$nls_cv_use_gnu_gettext" = "yes"; then + BUILD_INCLUDED_LIBINTL=yes + USE_INCLUDED_LIBINTL=yes + LIBINTL="\${top_builddir}/intl/libintl.a $LIBICONV" + LTLIBINTL="\${top_builddir}/intl/libintl.a $LTLIBICONV" + LIBS=`echo " $LIBS " | sed -e 's/ -lintl / /' -e 's/^ //' -e 's/ $//'` + fi + + CATOBJEXT= + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + CATOBJEXT=.gmo + fi + + + if test -n "$INTL_MACOSX_LIBS"; then + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" + LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" + fi + fi + + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + +$as_echo "#define ENABLE_NLS 1" >>confdefs.h + + else + USE_NLS=no + fi + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use NLS" >&5 +$as_echo_n "checking whether to use NLS... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 +$as_echo "$USE_NLS" >&6; } + if test "$USE_NLS" = "yes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking where the gettext function comes from" >&5 +$as_echo_n "checking where the gettext function comes from... " >&6; } + if test "$gt_use_preinstalled_gnugettext" = "yes"; then + if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then + gt_source="external libintl" + else + gt_source="libc" + fi + else + gt_source="included intl directory" + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_source" >&5 +$as_echo "$gt_source" >&6; } + fi + + if test "$USE_NLS" = "yes"; then + + if test "$gt_use_preinstalled_gnugettext" = "yes"; then + if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libintl" >&5 +$as_echo_n "checking how to link with libintl... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBINTL" >&5 +$as_echo "$LIBINTL" >&6; } + + for element in $INCINTL; do + haveit= + for x in $CPPFLAGS; do + + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + eval x=\"$x\" + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" + + if test "X$x" = "X$element"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" + fi + done + + fi + + +$as_echo "#define HAVE_GETTEXT 1" >>confdefs.h + + +$as_echo "#define HAVE_DCGETTEXT 1" >>confdefs.h + + fi + + POSUB=po + fi + + + if test "$PACKAGE" = gettext-runtime || test "$PACKAGE" = gettext-tools; then + BUILD_INCLUDED_LIBINTL=yes + fi + + + + + + nls_cv_header_intl= + nls_cv_header_libgt= + + DATADIRNAME=share + + + INSTOBJEXT=.mo + + + GENCAT=gencat + + + INTLOBJS= + if test "$USE_INCLUDED_LIBINTL" = yes; then + INTLOBJS="\$(GETTOBJS)" + fi + + + INTL_LIBTOOL_SUFFIX_PREFIX= + + + + INTLLIBS="$LIBINTL" + + + + + + +fi + +support_smartalloc=yes +support_readline=yes +support_lzo=yes +support_s3=yes +support_conio=yes +support_bat=no +support_tls=no +support_crypto=no +support_static_tools=no +support_static_fd=no +support_static_sd=no +support_static_dir=no +support_static_cons=no +build_client_only=no +build_dird=yes +build_stored=yes +db_backends="" +batch_insert_db_backends="" +support_lockmgr=no + + +# Check whether --enable-bat was given. +if test "${enable_bat+set}" = set; then : + enableval=$enable_bat; + if test x$enableval = xyes; then + +$as_echo "#define HAVE_BAT 1" >>confdefs.h + + support_bat=yes + fi + + +fi + + +BAT_DIR= +if test x$support_bat = xyes; then + abc=`$PKGCONFIG QtGui` + pkg=$? + if test $pkg = 0; then + BAT_DIR=src/qt-console + else + abc=`$PKGCONFIG Qt5Gui` + pkg=$? + if test $pkg = 0; then + BAT_DIR="src/qt-console src/qt-console/tray-monitor" + else + as_fn_error $? "Unable to find either Qt4 or Qt5 installation needed by bat" "$LINENO" 5 + fi + fi +fi + + + + + + + +# Check whether --enable-smartalloc was given. +if test "${enable_smartalloc+set}" = set; then : + enableval=$enable_smartalloc; + if test x$enableval = xno; then + support_smartalloc=no + fi + + +fi + + +if test x$support_smartalloc = xyes; then + +$as_echo "#define SMARTALLOC 1" >>confdefs.h + +fi + +# Check whether --enable-lockmgr was given. +if test "${enable_lockmgr+set}" = set; then : + enableval=$enable_lockmgr; + if test x$enableval = xyes; then + support_lockmgr=yes + fi + + +fi + + +if test x$support_lockmgr = xyes; then + +$as_echo "#define USE_LOCKMGR 1" >>confdefs.h + +fi + + +# Check whether --enable-static-tools was given. +if test "${enable_static_tools+set}" = set; then : + enableval=$enable_static_tools; + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + as_fn_error $? "Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool" "$LINENO" 5 + fi + support_static_tools=yes + fi + + +fi + + +TTOOL_LDFLAGS= +if test x$support_static_tools = xyes; then + TTOOL_LDFLAGS="-static" +fi + + +# Check whether --enable-static-fd was given. +if test "${enable_static_fd+set}" = set; then : + enableval=$enable_static_fd; + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + as_fn_error $? "Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool" "$LINENO" 5 + fi + support_static_fd=yes + fi + + +fi + + +STATIC_FD= +if test x$support_static_fd = xyes; then + STATIC_FD="static-bacula-fd" +fi + + +# Check whether --enable-static-sd was given. +if test "${enable_static_sd+set}" = set; then : + enableval=$enable_static_sd; + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + as_fn_error $? "Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool" "$LINENO" 5 + fi + support_static_sd=yes + fi + + +fi + + +STATIC_SD= +if test x$support_static_sd = xyes; then + STATIC_SD="static-bacula-sd" +fi + + +# Check whether --enable-static-dir was given. +if test "${enable_static_dir+set}" = set; then : + enableval=$enable_static_dir; + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + as_fn_error $? "Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool" "$LINENO" 5 + fi + support_static_dir=yes + fi + + +fi + + +STATIC_DIR= +if test x$support_static_dir = xyes; then + STATIC_DIR="static-bacula-dir" +fi + + +# Check whether --enable-static-cons was given. +if test "${enable_static_cons+set}" = set; then : + enableval=$enable_static_cons; + if test x$enableval = xyes; then + if test x$use_libtool = xyes; then + as_fn_error $? "Libtool is enabled, not compatible with static tools, + please rerun configure with --disable-libtool" "$LINENO" 5 + fi + support_static_cons=yes + fi + + +fi + + +STATIC_CONS= +if test x$support_static_cons = xyes; then + STATIC_CONS="static-bconsole" +fi + + +# Check whether --enable-client-only was given. +if test "${enable_client_only+set}" = set; then : + enableval=$enable_client_only; + if test x$enableval = xyes; then + build_client_only=yes + db_backends="None" + DB_BACKENDS="none" + fi + + +fi + +if test x$build_client_only = xno; then + ALL_DIRS="subdirs" + +$as_echo "#define HAVE_CLIENT_ONLY 1" >>confdefs.h + +else + ALL_DIRS="" +fi + + +# Check whether --enable-build-dird was given. +if test "${enable_build_dird+set}" = set; then : + enableval=$enable_build_dird; + if test x$enableval = xno; then + build_dird=no + fi + + +fi + + +DIRD_DIR="src/dird" +DIR_TOOLS="DIRTOOLS" + + + + +# Check whether --enable-build-stored was given. +if test "${enable_build_stored+set}" = set; then : + enableval=$enable_build_stored; + if test x$enableval = xno; then + build_stored=no + fi + + +fi + +if test x$build_stored = xyes; then + STORED_DIR="src/stored" +else + STORED_DIR="" +fi + + +# Check whether --enable-conio was given. +if test "${enable_conio+set}" = set; then : + enableval=$enable_conio; + if test x$enableval = xno; then + support_conio=no + fi + + +fi + + + +support_ipv6=yes +# Check whether --enable-ipv6 was given. +if test "${enable_ipv6+set}" = set; then : + enableval=$enable_ipv6; + if test x$enableval = xno; then + support_ipv6=no + fi + + +fi + + +if test x$support_ipv6 = xyes; then + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + #include +#include +#include +int +main () +{ +struct in6_addr t=in6addr_any; t.s6_addr[0] = 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + support_in6addr_any=yes +else + support_in6addr_any=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + if test x$support_in6addr_any = xno ; then + in6addr_any="const struct in6_addr in6addr_any" + else + in6addr_any="1" + fi + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + #include +#include +#include +int +main () +{ +$in6addr_any; struct sockaddr_in6 s; struct in6_addr t=in6addr_any; int i=AF_INET6; s; t.s6_addr[0] = 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + support_ipv6=yes +else + support_ipv6=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi + +if test x$support_ipv6 = xyes; then + +$as_echo "#define HAVE_IPV6 1" >>confdefs.h + + + if test x$support_in6addr_any = xno ; then + +$as_echo "#define NEED_IN6ADDR_ANY 1" >>confdefs.h + + fi +fi + +TERM_LIB="" +ac_fn_c_check_header_mongrel "$LINENO" "curses.h" "ac_cv_header_curses_h" "$ac_includes_default" +if test "x$ac_cv_header_curses_h" = xyes; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tgetent in -ltinfo" >&5 +$as_echo_n "checking for tgetent in -ltinfo... " >&6; } +if ${ac_cv_lib_tinfo_tgetent+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ltinfo $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char tgetent (); +int +main () +{ +return tgetent (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_tinfo_tgetent=yes +else + ac_cv_lib_tinfo_tgetent=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_tinfo_tgetent" >&5 +$as_echo "$ac_cv_lib_tinfo_tgetent" >&6; } +if test "x$ac_cv_lib_tinfo_tgetent" = xyes; then : + TERM_LIB="-ltinfo" +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tgetent in -lncurses" >&5 +$as_echo_n "checking for tgetent in -lncurses... " >&6; } +if ${ac_cv_lib_ncurses_tgetent+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lncurses $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char tgetent (); +int +main () +{ +return tgetent (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ncurses_tgetent=yes +else + ac_cv_lib_ncurses_tgetent=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ncurses_tgetent" >&5 +$as_echo "$ac_cv_lib_ncurses_tgetent" >&6; } +if test "x$ac_cv_lib_ncurses_tgetent" = xyes; then : + TERM_LIB="-lncurses" +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tgetent in -ltermcap" >&5 +$as_echo_n "checking for tgetent in -ltermcap... " >&6; } +if ${ac_cv_lib_termcap_tgetent+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ltermcap $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char tgetent (); +int +main () +{ +return tgetent (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_termcap_tgetent=yes +else + ac_cv_lib_termcap_tgetent=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_termcap_tgetent" >&5 +$as_echo "$ac_cv_lib_termcap_tgetent" >&6; } +if test "x$ac_cv_lib_termcap_tgetent" = xyes; then : + TERM_LIB="-ltermcap" +fi + + +fi + + +fi + + +else + for ac_header in curses.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "curses.h" "ac_cv_header_curses_h" "$ac_includes_default" +if test "x$ac_cv_header_curses_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_CURSES_H 1 +_ACEOF + +fi + +done + + ac_fn_c_check_header_mongrel "$LINENO" "term.h" "ac_cv_header_term_h" "$ac_includes_default" +if test "x$ac_cv_header_term_h" = xyes; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tgetent in -lcurses" >&5 +$as_echo_n "checking for tgetent in -lcurses... " >&6; } +if ${ac_cv_lib_curses_tgetent+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lcurses $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char tgetent (); +int +main () +{ +return tgetent (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_curses_tgetent=yes +else + ac_cv_lib_curses_tgetent=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_curses_tgetent" >&5 +$as_echo "$ac_cv_lib_curses_tgetent" >&6; } +if test "x$ac_cv_lib_curses_tgetent" = xyes; then : + TERM_LIB="-lcurses" +fi + + +fi + + + +fi + + + + +got_conio="no" +if test x$support_conio = xyes; then + if test x$TERM_LIB != x; then + CONS_LIBS=$TERM_LIB + CONS_OBJ="conio.o" + CONS_SRC="conio.c" + got_conio="yes" + support_readline=no + +$as_echo "#define HAVE_CONIO 1" >>confdefs.h + + else + echo " "; echo "Required libraries not found. CONIO turned off ..."; echo " " + fi +fi + + +# Check whether --enable-readline was given. +if test "${enable_readline+set}" = set; then : + enableval=$enable_readline; + if test x$enableval = xno; then + support_readline=no + fi + + +fi + + +got_readline="no" +READLINE_SRC= +if test x$support_readline = xyes; then + +# Check whether --with-readline was given. +if test "${with_readline+set}" = set; then : + withval=$with_readline; + case "$with_readline" in + no) + : + ;; + yes|*) + if test -f ${with_readline}/readline.h; then + CONS_INC="-I${with_readline}" + CONS_LDFLAGS="-L$with_readline" + elif test -f ${with_readline}/include/readline/readline.h; then + CONS_INC="-I${with_readline}/include/readline" + CONS_LDFLAGS="-L${with_readline}/lib" + with_readline="${with_readline}/include/readline" + else + with_readline="/usr/include/readline" + fi + + as_ac_Header=`$as_echo "ac_cv_header_${with_readline}/readline.h" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "${with_readline}/readline.h" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + + +$as_echo "#define HAVE_READLINE 1" >>confdefs.h + + CONS_LIBS="-lreadline -lhistory ${TERM_LIB}" + got_readline="yes" + +else + + echo " " + echo "readline.h not found. readline turned off ..." + echo " " + + +fi + + + ;; + esac + +else + + ac_fn_c_check_header_mongrel "$LINENO" "/usr/include/readline/readline.h" "ac_cv_header__usr_include_readline_readline_h" "$ac_includes_default" +if test "x$ac_cv_header__usr_include_readline_readline_h" = xyes; then : + + +$as_echo "#define HAVE_READLINE 1" >>confdefs.h + + got_readline="yes" + CONS_INC="-I/usr/include/readline" + CONS_LIBS="-lreadline ${TERM_LIB}" + +else + + as_ac_Header=`$as_echo "ac_cv_header_${TOP_DIR}/depkgs/readline/readline.h" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "${TOP_DIR}/depkgs/readline/readline.h" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + + +$as_echo "#define HAVE_READLINE 1" >>confdefs.h + + got_readline="yes" + CONS_INC="-I${TOP_DIR}/depkgs/readline" + CONS_LIBS="-lreadline -lhistory ${TERM_LIB}" + CONS_LDFLAGS="-L${TOP_DIR}/depkgs/readline" + PRTREADLINE_SRC="${TOP_DIR}/depkgs/readline" + +else + + echo " " + echo "readline.h not found. readline turned off ..." + echo " " + + +fi + + + + +fi + + + + +fi + +fi + + + + + + + + +MAKE_SHELL=/bin/sh + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stat file-mode macros are broken" >&5 +$as_echo_n "checking whether stat file-mode macros are broken... " >&6; } +if ${ac_cv_header_stat_broken+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include + +#if defined S_ISBLK && defined S_IFDIR +extern char c1[S_ISBLK (S_IFDIR) ? -1 : 1]; +#endif + +#if defined S_ISBLK && defined S_IFCHR +extern char c2[S_ISBLK (S_IFCHR) ? -1 : 1]; +#endif + +#if defined S_ISLNK && defined S_IFREG +extern char c3[S_ISLNK (S_IFREG) ? -1 : 1]; +#endif + +#if defined S_ISSOCK && defined S_IFREG +extern char c4[S_ISSOCK (S_IFREG) ? -1 : 1]; +#endif + +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stat_broken=no +else + ac_cv_header_stat_broken=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stat_broken" >&5 +$as_echo "$ac_cv_header_stat_broken" >&6; } +if test $ac_cv_header_stat_broken = yes; then + +$as_echo "#define STAT_MACROS_BROKEN 1" >>confdefs.h + +fi + +ac_header_dirent=no +for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do + as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5 +$as_echo_n "checking for $ac_hdr that defines DIR... " >&6; } +if eval \${$as_ac_Header+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include <$ac_hdr> + +int +main () +{ +if ((DIR *) 0) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$as_ac_Header=yes" +else + eval "$as_ac_Header=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$as_ac_Header + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 +_ACEOF + +ac_header_dirent=$ac_hdr; break +fi + +done +# Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. +if test $ac_header_dirent = dirent.h; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 +$as_echo_n "checking for library containing opendir... " >&6; } +if ${ac_cv_search_opendir+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char opendir (); +int +main () +{ +return opendir (); + ; + return 0; +} +_ACEOF +for ac_lib in '' dir; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_opendir=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_opendir+:} false; then : + break +fi +done +if ${ac_cv_search_opendir+:} false; then : + +else + ac_cv_search_opendir=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 +$as_echo "$ac_cv_search_opendir" >&6; } +ac_res=$ac_cv_search_opendir +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 +$as_echo_n "checking for library containing opendir... " >&6; } +if ${ac_cv_search_opendir+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char opendir (); +int +main () +{ +return opendir (); + ; + return 0; +} +_ACEOF +for ac_lib in '' x; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_opendir=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_opendir+:} false; then : + break +fi +done +if ${ac_cv_search_opendir+:} false; then : + +else + ac_cv_search_opendir=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 +$as_echo "$ac_cv_search_opendir" >&6; } +ac_res=$ac_cv_search_opendir +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +fi + +for ac_func in strcasecmp select setenv putenv tcgetattr +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + +for ac_func in lstat lchown lchmod futimes fchmod fchown lutimes +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + +for ac_func in nanosleep nl_langinfo +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + +for ac_func in be64toh htobe64 +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + +for ac_header in varargs.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "varargs.h" "ac_cv_header_varargs_h" "$ac_includes_default" +if test "x$ac_cv_header_varargs_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_VARARGS_H 1 +_ACEOF + +fi + +done + + + +ac_fn_c_check_func "$LINENO" "socket" "ac_cv_func_socket" +if test "x$ac_cv_func_socket" = xyes; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: using libc's socket" >&5 +$as_echo "using libc's socket" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -lxnet" >&5 +$as_echo_n "checking for socket in -lxnet... " >&6; } +if ${ac_cv_lib_xnet_socket+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lxnet $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char socket (); +int +main () +{ +return socket (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_xnet_socket=yes +else + ac_cv_lib_xnet_socket=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xnet_socket" >&5 +$as_echo "$ac_cv_lib_xnet_socket" >&6; } +if test "x$ac_cv_lib_xnet_socket" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBXNET 1 +_ACEOF + + LIBS="-lxnet $LIBS" + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -lsocket" >&5 +$as_echo_n "checking for socket in -lsocket... " >&6; } +if ${ac_cv_lib_socket_socket+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsocket $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char socket (); +int +main () +{ +return socket (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_socket_socket=yes +else + ac_cv_lib_socket_socket=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_socket" >&5 +$as_echo "$ac_cv_lib_socket_socket" >&6; } +if test "x$ac_cv_lib_socket_socket" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBSOCKET 1 +_ACEOF + + LIBS="-lsocket $LIBS" + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -linet" >&5 +$as_echo_n "checking for socket in -linet... " >&6; } +if ${ac_cv_lib_inet_socket+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-linet $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char socket (); +int +main () +{ +return socket (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_inet_socket=yes +else + ac_cv_lib_inet_socket=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_inet_socket" >&5 +$as_echo "$ac_cv_lib_inet_socket" >&6; } +if test "x$ac_cv_lib_inet_socket" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBINET 1 +_ACEOF + + LIBS="-linet $LIBS" + +fi + +fi + + +TCPW_MSG="no" +WRAPLIBS="" + +# Check whether --with-tcp-wrappers was given. +if test "${with_tcp_wrappers+set}" = set; then : + withval=$with_tcp_wrappers; + if test "x$withval" != "xno" ; then + saved_LIBS="$LIBS" + LIBS="$saved_LIBS -lwrap" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing nanosleep" >&5 +$as_echo_n "checking for library containing nanosleep... " >&6; } +if ${ac_cv_search_nanosleep+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char nanosleep (); +int +main () +{ +return nanosleep (); + ; + return 0; +} +_ACEOF +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_nanosleep=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_nanosleep+:} false; then : + break +fi +done +if ${ac_cv_search_nanosleep+:} false; then : + +else + ac_cv_search_nanosleep=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_nanosleep" >&5 +$as_echo "$ac_cv_search_nanosleep" >&6; } +ac_res=$ac_cv_search_nanosleep +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libwrap" >&5 +$as_echo_n "checking for libwrap... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + int deny_severity = 0; + int allow_severity = 0; + struct request_info *req; + +int +main () +{ + + hosts_access(req); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_LIBWRAP 1" >>confdefs.h + + TCPW_MSG="yes" + LIBS="$saved_LIBS" + WRAPLIBS="-lwrap" + +else + + LIBS="$saved_LIBS -lwrap -lnsl" + WRAPLIBS="$saved_LIBS -lwrap -lnsl" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + int deny_severity = 0; + int allow_severity = 0; + struct request_info *req; + +int +main () +{ + + hosts_access(req); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_LIBWRAP 1" >>confdefs.h + + TCPW_MSG="yes" + LIBS="$saved_LIBS" + WRAPLIBS="-lwrap" + +else + + as_fn_error $? "*** libwrap missing" "$LINENO" 5 + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + fi + + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for OpenSSL" >&5 +$as_echo_n "checking for OpenSSL... " >&6; } + +# Check whether --with-openssl was given. +if test "${with_openssl+set}" = set; then : + withval=$with_openssl; + with_openssl_directory=${withval} + + +fi + + +if test "x$with_openssl_directory" != "xno"; then + OPENSSL_LIBS="-lssl -lcrypto" + OPENSSL_INC="" + + if test "x$with_openssl_directory" != "xyes" && test x"${with_openssl_directory}" != "x"; then + # + # Make sure the $with_openssl_directory also makes sense + # + if test -d "$with_openssl_directory/lib" -a -d "$with_openssl_directory/include"; then + OPENSSL_LIBS="-L$with_openssl_directory/lib $OPENSSL_LIBS" + OPENSSL_INC="-I$with_openssl_directory/include $OPENSSL_INC" + fi + fi + + saved_LIBS="${LIBS}" + saved_CFLAGS="${CFLAGS}" + LIBS="${saved_LIBS} ${OPENSSL_LIBS}" + CFLAGS="${saved_CFLAGS} ${OPENSSL_INC}" + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + CRYPTO_set_id_callback(NULL); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + support_tls="yes" + support_crypto="yes" + +else + + support_tls="no" + support_crypto="no" + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + EVP_sha512(); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + ac_cv_openssl_sha2="yes" + +else + + ac_cv_openssl_sha2="no" + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + EVP_aes_192_cbc(); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + ac_cv_openssl_export="no" + +else + + ac_cv_openssl_export="yes" + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $support_tls" >&5 +$as_echo "$support_tls" >&6; } + if test "$support_tls" = "yes"; then + +$as_echo "#define HAVE_OPENSSL 1" >>confdefs.h + + +$as_echo "#define HAVE_TLS 1" >>confdefs.h + + +$as_echo "#define HAVE_CRYPTO 1" >>confdefs.h + + fi + + if test "$ac_cv_openssl_sha2" = "yes"; then + +$as_echo "#define HAVE_SHA2 1" >>confdefs.h + + fi + + if test "$ac_cv_openssl_export" = "yes"; then + +$as_echo "#define HAVE_OPENSSL_EXPORT_LIBRARY 1" >>confdefs.h + + fi + + if test "$support_crypto" = "yes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for EVP_PKEY_encrypt_old in -lcrypto" >&5 +$as_echo_n "checking for EVP_PKEY_encrypt_old in -lcrypto... " >&6; } +if ${ac_cv_lib_crypto_EVP_PKEY_encrypt_old+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lcrypto $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char EVP_PKEY_encrypt_old (); +int +main () +{ +return EVP_PKEY_encrypt_old (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_crypto_EVP_PKEY_encrypt_old=yes +else + ac_cv_lib_crypto_EVP_PKEY_encrypt_old=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_EVP_PKEY_encrypt_old" >&5 +$as_echo "$ac_cv_lib_crypto_EVP_PKEY_encrypt_old" >&6; } +if test "x$ac_cv_lib_crypto_EVP_PKEY_encrypt_old" = xyes; then : + +$as_echo "#define HAVE_OPENSSLv1 1" >>confdefs.h + +fi + + fi + + LIBS="${saved_LIBS}" + CFLAGS="${saved_CFLAGS}" +else + support_tls="no" + support_crypto="no" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $support_tls" >&5 +$as_echo "$support_tls" >&6; } +fi + +if test "$support_tls" = "no" -o "$support_crypto" = "no"; then + OPENSSL_LIBS="" + OPENSSL_INC="" +fi + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 +$as_echo_n "checking for library containing dlopen... " >&6; } +if ${ac_cv_search_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +for ac_lib in '' dl; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_dlopen=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_dlopen+:} false; then : + break +fi +done +if ${ac_cv_search_dlopen+:} false; then : + +else + ac_cv_search_dlopen=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 +$as_echo "$ac_cv_search_dlopen" >&6; } +ac_res=$ac_cv_search_dlopen +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + +working_dir=`eval echo /opt/bacula/working` + +# Check whether --with-working-dir was given. +if test "${with_working_dir+set}" = set; then : + withval=$with_working_dir; + if test "x$withval" != "xno" ; then + working_dir=$withval + fi + + +fi + + + + +archivedir=/tmp + +# Check whether --with-archivedir was given. +if test "${with_archivedir+set}" = set; then : + withval=$with_archivedir; + if test "x$withval" != "xno" ; then + archivedir=$withval + fi + + +fi + + + + +basename=`hostname` + +# Check whether --with-basename was given. +if test "${with_basename+set}" = set; then : + withval=$with_basename; + if test "x$withval" != "xno" ; then + basename=$withval + fi + + +fi + + + + +hostname=`uname -n | cut -d '.' -f 1` +if test x${hostname} = x ; then + hostname="localhost" +fi +ping -c 1 $hostname 2>/dev/null 1>/dev/null +if test ! $? = 0; then + hostname="localhost" +fi + +# Check whether --with-hostname was given. +if test "${with_hostname+set}" = set; then : + withval=$with_hostname; + if test "x$withval" != "xno" ; then + hostname=$withval + fi + + +fi + + + + + +scriptdir=`eval echo ${sysconfdir}` + +# Check whether --with-scriptdir was given. +if test "${with_scriptdir+set}" = set; then : + withval=$with_scriptdir; + if test "x$withval" != "xno" ; then + scriptdir=$withval + fi + + +fi + + + + + +bsrdir=`eval echo /opt/bacula/bsr` + +# Check whether --with-bsrdir was given. +if test "${with_bsrdir+set}" = set; then : + withval=$with_bsrdir; + if test "x$withval" != "xno" ; then + bsrdir=$withval + fi + + +fi + + + + +logdir=`eval echo /opt/bacula/log` + +# Check whether --with-logdir was given. +if test "${with_logdir+set}" = set; then : + withval=$with_logdir; + if test "x$withval" != "xno" ; then + logdir=$withval + fi + + +fi + + + + + +# ------------------------------------------ +# Where to place plugindir (plugin files) +# ------------------------------------------ +plugindir=`eval echo ${libdir}` + +# Check whether --with-plugindir was given. +if test "${with_plugindir+set}" = set; then : + withval=$with_plugindir; + if test "x$withval" != "xno" ; then + plugindir=$withval + fi + + +fi + + + + + +dump_email=root@localhost + +# Check whether --with-dump-email was given. +if test "${with_dump_email+set}" = set; then : + withval=$with_dump_email; + if test "x$withval" != "xno" ; then + dump_email=$withval + fi + + +fi + + + + +job_email=root@localhost + +# Check whether --with-job-email was given. +if test "${with_job_email+set}" = set; then : + withval=$with_job_email; + if test "x$withval" != "xno" ; then + job_email=$withval + fi + + +fi + + + + +smtp_host=localhost + +# Check whether --with-smtp_host was given. +if test "${with_smtp_host+set}" = set; then : + withval=$with_smtp_host; + if test "x$withval" != "xno" ; then + smtp_host=$withval + fi + + +fi + + + + +piddir=/var/run + +# Check whether --with-pid-dir was given. +if test "${with_pid_dir+set}" = set; then : + withval=$with_pid_dir; + if test "x$withval" != "xno" ; then + piddir=$withval + fi + + +fi + + +cat >>confdefs.h <<_ACEOF +#define _PATH_BACULA_PIDDIR "$piddir" +_ACEOF + + + +subsysdir=/var/run/subsys +if test -d /var/run/subsys; then + subsysdir=/var/run/subsys +elif test -d /var/lock/subsys; then + subsysdir=/var/lock/subsys +else + subsysdir=/var/run/subsys +fi + +# Check whether --with-subsys-dir was given. +if test "${with_subsys_dir+set}" = set; then : + withval=$with_subsys_dir; + if test "x$withval" != "xno" ; then + subsysdir=$withval + fi + + +fi + + + + +baseport=9101 + +# Check whether --with-baseport was given. +if test "${with_baseport+set}" = set; then : + withval=$with_baseport; + if test "x$withval" != "xno" ; then + baseport=$withval + fi + + +fi + + + +dir_port=`expr $baseport` +fd_port=`expr $baseport + 1` +sd_port=`expr $fd_port + 1` + + + + + +dir_password= + +# Check whether --with-dir-password was given. +if test "${with_dir_password+set}" = set; then : + withval=$with_dir_password; + if test "x$withval" != "xno" ; then + dir_password=$withval + fi + + +fi + + +if test "x$dir_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 33` + else + key=`openssl rand -base64 33` + fi + dir_password=$key +fi + +fd_password= + +# Check whether --with-fd-password was given. +if test "${with_fd_password+set}" = set; then : + withval=$with_fd_password; + if test "x$withval" != "xno" ; then + fd_password=$withval + fi + + +fi + + +if test "x$fd_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 37` + else + key=`openssl rand -base64 33` + fi + fd_password=$key +fi + +sd_password= + +# Check whether --with-sd-password was given. +if test "${with_sd_password+set}" = set; then : + withval=$with_sd_password; + if test "x$withval" != "xno" ; then + sd_password=$withval + fi + + +fi + + +if test "x$sd_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 41` + else + key=`openssl rand -base64 33` + fi + sd_password=$key +fi + +mon_dir_password= + +# Check whether --with-mon-dir-password was given. +if test "${with_mon_dir_password+set}" = set; then : + withval=$with_mon_dir_password; + if test "x$withval" != "xno" ; then + mon_dir_password=$withval + fi + + +fi + + +if test "x$mon_dir_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 33` + else + key=`openssl rand -base64 33` + fi + mon_dir_password=$key +fi + +mon_fd_password= + +# Check whether --with-mon-fd-password was given. +if test "${with_mon_fd_password+set}" = set; then : + withval=$with_mon_fd_password; + if test "x$withval" != "xno" ; then + mon_fd_password=$withval + fi + + +fi + + +if test "x$mon_fd_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 37` + else + key=`openssl rand -base64 33` + fi + mon_fd_password=$key +fi + +mon_sd_password= + +# Check whether --with-mon-sd-password was given. +if test "${with_mon_sd_password+set}" = set; then : + withval=$with_mon_sd_password; + if test "x$withval" != "xno" ; then + mon_sd_password=$withval + fi + + +fi + + +if test "x$mon_sd_password" = "x" ; then + if test "x$OPENSSL" = "xnone" ; then + key=`autoconf/randpass 41` + else + key=`openssl rand -base64 33` + fi + mon_sd_password=$key +fi + + + + + + + + +db_name=bacula + +# Check whether --with-db_name was given. +if test "${with_db_name+set}" = set; then : + withval=$with_db_name; + if test "x$withval" != "x" ; then + db_name=$withval + fi + + +fi + + + +db_user=bacula + +# Check whether --with-db_user was given. +if test "${with_db_user+set}" = set; then : + withval=$with_db_user; + if test "x$withval" != "x" ; then + db_user=$withval + fi + + +fi + + + +db_password= + +# Check whether --with-db_password was given. +if test "${with_db_password+set}" = set; then : + withval=$with_db_password; + if test "x$withval" != "x" ; then + db_password=$withval + fi + + +fi + + + +db_port=" " + +# Check whether --with-db_port was given. +if test "${with_db_port+set}" = set; then : + withval=$with_db_port; + if test "x$withval" != "x" ; then + db_port=$withval + fi + + +fi + + + +db_ssl_options= + +# Check whether --with-db_ssl_options was given. +if test "${with_db_ssl_options+set}" = set; then : + withval=$with_db_ssl_options; + if test "x$withval" != "x" ; then + db_ssl_options=$withval + fi + + +fi + + + +# +# Handle users and groups for each daemon +# +dir_user= + +# Check whether --with-dir_user was given. +if test "${with_dir_user+set}" = set; then : + withval=$with_dir_user; + if test "x$withval" != "x" ; then + dir_user=$withval + fi + + +fi + + +dir_group= + +# Check whether --with-dir_group was given. +if test "${with_dir_group+set}" = set; then : + withval=$with_dir_group; + if test "x$withval" != "x" ; then + dir_group=$withval + fi + + +fi + + +sd_user= + +# Check whether --with-sd_user was given. +if test "${with_sd_user+set}" = set; then : + withval=$with_sd_user; + if test "x$withval" != "x" ; then + sd_user=$withval + fi + + +fi + + +sd_group= + +# Check whether --with-sd_group was given. +if test "${with_sd_group+set}" = set; then : + withval=$with_sd_group; + if test "x$withval" != "x" ; then + sd_group=$withval + fi + + +fi + + +fd_user= + +# Check whether --with-fd_user was given. +if test "${with_fd_user+set}" = set; then : + withval=$with_fd_user; + if test "x$withval" != "x" ; then + fd_user=$withval + fi + + +fi + + +fd_group= + +# Check whether --with-fd_group was given. +if test "${with_fd_group+set}" = set; then : + withval=$with_fd_group; + if test "x$withval" != "x" ; then + fd_group=$withval + fi + + +fi + + + + + + + + + +SBINPERM=0750 + +# Check whether --with-sbin-perm was given. +if test "${with_sbin_perm+set}" = set; then : + withval=$with_sbin_perm; + if test "x$withval" != "x" ; then + SBINPERM=$withval + fi + + +fi + + + + +support_batch_insert=yes +# Check whether --enable-batch-insert was given. +if test "${enable_batch_insert+set}" = set; then : + enableval=$enable_batch_insert; + if test x$enableval = xno; then + support_batch_insert=no + fi + + +fi + + +if test x$support_batch_insert = xyes; then + +$as_echo "#define USE_BATCH_FILE_INSERT 1" >>confdefs.h + +fi + + +uncomment_dbi="#" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PostgreSQL support" >&5 +$as_echo_n "checking for PostgreSQL support... " >&6; } + +# Check whether --with-postgresql was given. +if test "${with_postgresql+set}" = set; then : + withval=$with_postgresql; + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + PG_CONFIG=`which pg_config 2>/dev/null` + if test -n "$PG_CONFIG"; then + POSTGRESQL_INCDIR=`"$PG_CONFIG" --includedir` + POSTGRESQL_LIBDIR=`"$PG_CONFIG" --libdir` + POSTGRESQL_BINDIR=`"$PG_CONFIG" --bindir` + elif test -f /usr/local/include/libpq-fe.h; then + POSTGRESQL_INCDIR=/usr/local/include + if test -d /usr/local/lib64; then + POSTGRESQL_LIBDIR=/usr/local/lib64 + else + POSTGRESQL_LIBDIR=/usr/local/lib + fi + POSTGRESQL_BINDIR=/usr/local/bin + elif test -f /usr/include/libpq-fe.h; then + POSTGRESQL_INCDIR=/usr/include + if test -d /usr/lib64; then + POSTGRESQL_LIBDIR=/usr/lib64 + else + POSTGRESQL_LIBDIR=/usr/lib + fi + POSTGRESQL_BINDIR=/usr/bin + elif test -f /usr/include/pgsql/libpq-fe.h; then + POSTGRESQL_INCDIR=/usr/include/pgsql + if test -d /usr/lib64/pgsql; then + POSTGRESQL_LIBDIR=/usr/lib64/pgsql + else + POSTGRESQL_LIBDIR=/usr/lib/pgsql + fi + POSTGRESQL_BINDIR=/usr/bin + elif test -f /usr/include/postgresql/libpq-fe.h; then + POSTGRESQL_INCDIR=/usr/include/postgresql + if test -d /usr/lib64/postgresql; then + POSTGRESQL_LIBDIR=/usr/lib64/postgresql + else + POSTGRESQL_LIBDIR=/usr/lib/postgresql + fi + POSTGRESQL_BINDIR=/usr/bin + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Unable to find libpq-fe.h in standard locations" "$LINENO" 5 + fi + elif test -f $withval/include/libpq-fe.h; then + POSTGRESQL_INCDIR=$withval/include + POSTGRESQL_LIBDIR=$withval/lib + POSTGRESQL_BINDIR=$withval/bin + elif test -f $withval/include/postgresql/libpq-fe.h; then + POSTGRESQL_INCDIR=$withval/include/postgresql + if test -d $withval/lib64; then + POSTGRESQL_LIBDIR=$withval/lib64 + else + POSTGRESQL_LIBDIR=$withval/lib + fi + POSTGRESQL_BINDIR=$withval/bin + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Invalid PostgreSQL directory $withval - unable to find libpq-fe.h under $withval" "$LINENO" 5 + fi + +$as_echo "#define HAVE_POSTGRESQL 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + + POSTGRESQL_INCLUDE=-I$POSTGRESQL_INCDIR + if test x$use_libtool != xno; then + POSTGRESQL_LIBS="-R $POSTGRESQL_LIBDIR -L$POSTGRESQL_LIBDIR -lpq" + else + POSTGRESQL_LIBS="-L$POSTGRESQL_LIBDIR -lpq" + fi + ac_fn_c_check_func "$LINENO" "crypt" "ac_cv_func_crypt" +if test "x$ac_cv_func_crypt" = xyes; then : + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for crypt in -lcrypt" >&5 +$as_echo_n "checking for crypt in -lcrypt... " >&6; } +if ${ac_cv_lib_crypt_crypt+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lcrypt $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char crypt (); +int +main () +{ +return crypt (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_crypt_crypt=yes +else + ac_cv_lib_crypt_crypt=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypt_crypt" >&5 +$as_echo "$ac_cv_lib_crypt_crypt" >&6; } +if test "x$ac_cv_lib_crypt_crypt" = xyes; then : + POSTGRESQL_LIBS="$POSTGRESQL_LIBS -lcrypt" +fi + +fi + + POSTGRESQL_LIB=$POSTGRESQL_LIBDIR/libpq.a + DB_LIBS="${DB_LIBS} ${POSTGRESQL_LIBS}" + + if test -z "${db_backends}"; then + db_backends="PostgreSQL" + else + db_backends="${db_backends} PostgreSQL" + fi + if test -z "${DB_BACKENDS}"; then + DB_BACKENDS="postgresql" + else + DB_BACKENDS="${DB_BACKENDS} postgresql" + fi + + if test "x$support_batch_insert" = "xyes"; then + saved_LDFLAGS="${LDFLAGS}" + LDFLAGS="${saved_LDFLAGS} -L$POSTGRESQL_LIBDIR" + saved_LIBS="${LIBS}" + if test "x$ac_cv_lib_crypt_crypt" = "xyes" ; then + LIBS="${saved_LIBS} -lcrypt" + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PQisthreadsafe in -lpq" >&5 +$as_echo_n "checking for PQisthreadsafe in -lpq... " >&6; } +if ${ac_cv_lib_pq_PQisthreadsafe+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpq $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char PQisthreadsafe (); +int +main () +{ +return PQisthreadsafe (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_pq_PQisthreadsafe=yes +else + ac_cv_lib_pq_PQisthreadsafe=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pq_PQisthreadsafe" >&5 +$as_echo "$ac_cv_lib_pq_PQisthreadsafe" >&6; } +if test "x$ac_cv_lib_pq_PQisthreadsafe" = xyes; then : + +$as_echo "#define HAVE_PQISTHREADSAFE 1" >>confdefs.h + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PQputCopyData in -lpq" >&5 +$as_echo_n "checking for PQputCopyData in -lpq... " >&6; } +if ${ac_cv_lib_pq_PQputCopyData+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpq $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char PQputCopyData (); +int +main () +{ +return PQputCopyData (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_pq_PQputCopyData=yes +else + ac_cv_lib_pq_PQputCopyData=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pq_PQputCopyData" >&5 +$as_echo "$ac_cv_lib_pq_PQputCopyData" >&6; } +if test "x$ac_cv_lib_pq_PQputCopyData" = xyes; then : + +$as_echo "#define HAVE_PQ_COPY 1" >>confdefs.h + +fi + + if test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"; then + if test $support_batch_insert = yes ; then + +$as_echo "#define HAVE_POSTGRESQL_BATCH_FILE_INSERT 1" >>confdefs.h + + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="PostgreSQL" + else + batch_insert_db_backends="${batch_insert_db_backends} PostgreSQL" + fi + fi + fi + + if test x$ac_cv_lib_pq_PQisthreadsafe != xyes -a x$support_batch_insert = xyes + then + echo "WARNING: Your PostgreSQL client library is too old to detect " + echo "if it was compiled with --enable-thread-safety, consider to " + echo "upgrade it in order to avoid problems with Batch insert mode" + fi + + LDFLAGS="${saved_LDFLAGS}" + LIBS="${saved_LIBS}" + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for MySQL support" >&5 +$as_echo_n "checking for MySQL support... " >&6; } + +# Check whether --with-mysql was given. +if test "${with_mysql+set}" = set; then : + withval=$with_mysql; + HAVE_LIBSR="no" + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + MYSQL_CONFIG=`which mysql_config 2>/dev/null` + if test "x${MYSQL_CONFIG}" != x; then + MYSQL_BINDIR="${MYSQL_CONFIG%/*}" + ${MYSQL_CONFIG} --libs_r >/dev/null 2>&1 + if test $? = 0; then + MYSQL_LIBDIR=`${MYSQL_CONFIG} --libs_r` + MYSQL_INCDIR=`${MYSQL_CONFIG} --include` + HAVE_LIBSR="yes" + else + ${MYSQL_CONFIG} --variable=pkglibdir > /dev/null 2>&1 + if test $? = 0 ; then + MYSQL_LIBDIR=`${MYSQL_CONFIG} --variable=pkglibdir` + MYSQL_INCDIR=`${MYSQL_CONFIG} --variable=pkgincludedir` + fi + fi + fi + # if something wrong fall back to old method + if test "x${MYSQL_LIBDIR}" = x -o "x${MYSQL_INCDIR}" = x ; then + if test -f /usr/local/mysql/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/local/mysql/include/mysql + if test -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.a \ + -o -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql + else + MYSQL_LIBDIR=/usr/local/mysql/lib/mysql + fi + MYSQL_BINDIR=/usr/local/mysql/bin + elif test -f /usr/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/include/mysql + if test -f /usr/lib64/mysql/libmysqlclient_r.a \ + -o -f /usr/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib64/mysql + elif test -f /usr/lib64/libmysqlclient_r.a \ + -o -f /usr/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib64 + elif test -f /usr/lib/x86_64-linux-gnu/libmysqlclient_r.a \ + -o -f /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib/x86_64-linux-gnu + elif test -f /usr/lib/mysql/libmysqlclient_r.a \ + -o -f /usr/lib/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib/mysql + else + MYSQL_LIBDIR=/usr/lib + fi + MYSQL_BINDIR=/usr/bin + elif test -f /usr/include/mysql.h; then + MYSQL_INCDIR=/usr/include + if test -f /usr/lib64/libmysqlclient_r.a \ + -o -f /usr/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/lib64 + else + MYSQL_LIBDIR=/usr/lib + fi + MYSQL_BINDIR=/usr/bin + elif test -f /usr/local/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/local/include/mysql + if test -f /usr/local/lib64/mysql/libmysqlclient_r.a \ + -o -f /usr/local/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/local/lib64/mysql + else + MYSQL_LIBDIR=/usr/local/lib/mysql + fi + MYSQL_BINDIR=/usr/local/bin + elif test -f /usr/local/include/mysql.h; then + MYSQL_INCDIR=/usr/local/include + if test -f /usr/local/lib64/libmysqlclient_r.a \ + -o -f /usr/local/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=/usr/local/lib64 + else + MYSQL_LIBDIR=/usr/local/lib + fi + MYSQL_BINDIR=/usr/local/bin + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Unable to find mysql.h in standard locations" "$LINENO" 5 + fi + fi + else + if test -f $withval/include/mysql/mysql.h; then + MYSQL_INCDIR=$withval/include/mysql + if test -f $withval/lib64/mysql/libmysqlclient_r.a \ + -o -f $withval/lib64/mysql/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib64/mysql + elif test -f $withval/lib64/libmysqlclient_r.a \ + -o -f $withval/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib64 + elif test -f $withval/lib/libmysqlclient_r.a \ + -o -f $withval/lib/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib + else + MYSQL_LIBDIR=$withval/lib/mysql + fi + MYSQL_BINDIR=$withval/bin + elif test -f $withval/include/mysql.h; then + MYSQL_INCDIR=$withval/include + if test -f $withval/lib64/libmysqlclient_r.a \ + -o -f $withval/lib64/libmysqlclient_r.so; then + MYSQL_LIBDIR=$withval/lib64 + else + MYSQL_LIBDIR=$withval/lib + fi + MYSQL_BINDIR=$withval/bin + elif test -f $withval/mysql.h; then + MYSQL_INCDIR=$withval + { $as_echo "$as_me:${as_lineno-$LINENO}: Got with-mysql variable $MYSQL_INCDIR checking MySQL version" >&5 +$as_echo "$as_me: Got with-mysql variable $MYSQL_INCDIR checking MySQL version" >&6;} + case $MYSQL_INCDIR in + *mysql55*) + { $as_echo "$as_me:${as_lineno-$LINENO}: Assuming MacPorts MySQL 5.5 variant installed" >&5 +$as_echo "$as_me: Assuming MacPorts MySQL 5.5 variant installed" >&6;} + if test -f $prefix/lib/mysql55/mysql/libmysqlclient_r.a \ + -o -f $prefix/lib/mysql55/mysql/libmysqlclient_r.so; then + { $as_echo "$as_me:${as_lineno-$LINENO}: Found MySQL 5.5 library in $prefix/lib/mysql55/mysql" >&5 +$as_echo "$as_me: Found MySQL 5.5 library in $prefix/lib/mysql55/mysql" >&6;} + MYSQL_LIBDIR=$prefix/lib/mysql55/mysql + fi + MYSQL_BINDIR=$prefix/lib/mysql55/bin + ;; + *mysql51*) + { $as_echo "$as_me:${as_lineno-$LINENO}: Assuming MacPorts MySQL 5.1 variant installed" >&5 +$as_echo "$as_me: Assuming MacPorts MySQL 5.1 variant installed" >&6;} + if test -f $prefix/lib/mysql51/mysql/libmysqlclient_r.a \ + -o -f $prefix/lib/mysql51/mysql/libmysqlclient_r.so; then + { $as_echo "$as_me:${as_lineno-$LINENO}: Found MySQL 5.1 library in $prefix/lib/mysql55/mysql" >&5 +$as_echo "$as_me: Found MySQL 5.1 library in $prefix/lib/mysql55/mysql" >&6;} + MYSQL_LIBDIR=$prefix/lib/mysql51/mysql + fi + MYSQL_BINDIR=$prefix/lib/mysql51/bin + ;; + esac + if test -z "${MYSQL_LIBDIR}" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "MySQL $withval - unable to find MySQL libraries" "$LINENO" 5 + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Invalid MySQL directory $withval - unable to find mysql.h under $withval" "$LINENO" 5 + fi + fi + if test "x${MYSQL_LIBDIR}" != x; then + MYSQL_INCLUDE=-I$MYSQL_INCDIR + if test "x$HAVE_LIBSR" = "xyes"; then + DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}" + MYSQL_LIBS="$MYSQL_LIBDIR" + MYSQL_INCLUDE="$MYSQL_INCDIR" + +$as_echo "#define HAVE_MYSQL_THREAD_SAFE 1" >>confdefs.h + + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="MySQL" + else + batch_insert_db_backends="${batch_insert_db_backends} MySQL" + fi + elif test -f $MYSQL_LIBDIR/libmysqlclient_r.a \ + -o -f $MYSQL_LIBDIR/libmysqlclient_r.so; then + if test x$use_libtool != xno; then + MYSQL_LIBS="-R $MYSQL_LIBDIR -L$MYSQL_LIBDIR -lmysqlclient_r -lz" + else + MYSQL_LIBS="-L$MYSQL_LIBDIR -lmysqlclient_r -lz" + fi + DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}" + fi + if test "x${MYSQL_LIBS}" = x; then + MYSQL_LIBS=$MYSQL_LIBDIR/libmysqlclient_r.a + fi + + +$as_echo "#define HAVE_MYSQL 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + + if test -z "${db_backends}" ; then + db_backends="MySQL" + else + db_backends="${db_backends} MySQL" + fi + if test -z "${DB_BACKENDS}" ; then + DB_BACKENDS="mysql" + else + DB_BACKENDS="${DB_BACKENDS} mysql" + fi + + if test "x$HAVE_LIBSR" = "xno"; then + if test "x$support_batch_insert" = "xyes"; then + saved_LDFLAGS="${LDFLAGS}" + LDFLAGS="${saved_LDFLAGS} -L$MYSQL_LIBDIR" + saved_LIBS="${LIBS}" + LIBS="${saved_LIBS} -lz" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mysql_thread_safe in -lmysqlclient_r" >&5 +$as_echo_n "checking for mysql_thread_safe in -lmysqlclient_r... " >&6; } +if ${ac_cv_lib_mysqlclient_r_mysql_thread_safe+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lmysqlclient_r $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char mysql_thread_safe (); +int +main () +{ +return mysql_thread_safe (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_mysqlclient_r_mysql_thread_safe=yes +else + ac_cv_lib_mysqlclient_r_mysql_thread_safe=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mysqlclient_r_mysql_thread_safe" >&5 +$as_echo "$ac_cv_lib_mysqlclient_r_mysql_thread_safe" >&6; } +if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = xyes; then : + +$as_echo "#define HAVE_MYSQL_THREAD_SAFE 1" >>confdefs.h + +fi + + if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = "xyes"; then + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="MySQL" + else + batch_insert_db_backends="${batch_insert_db_backends} MySQL" + fi + fi + + LDFLAGS="${saved_LDFLAGS}" + LIBS="${saved_LIBS}" + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + fi + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for MySQL embedded support" >&5 +$as_echo_n "checking for MySQL embedded support... " >&6; } + +# Check whether --with-embedded-mysql was given. +if test "${with_embedded_mysql+set}" = set; then : + withval=$with_embedded_mysql; + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + if test -f /usr/local/mysql/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/local/mysql/include/mysql + if test -d /usr/local/mysql/lib64/mysql; then + MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql + else + MYSQL_LIBDIR=/usr/local/mysql/lib/mysql + fi + MYSQL_BINDIR=/usr/local/mysql/bin + elif test -f /usr/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/include/mysql + if test -d /usr/lib64/mysql; then + MYSQL_LIBDIR=/usr/lib64/mysql + else + MYSQL_LIBDIR=/usr/lib/mysql + fi + MYSQL_BINDIR=/usr/bin + elif test -f /usr/include/mysql.h; then + MYSQL_INCDIR=/usr/include + if test -d /usr/lib64; then + MYSQL_LIBDIR=/usr/lib64 + else + MYSQL_LIBDIR=/usr/lib + fi + MYSQL_BINDIR=/usr/bin + elif test -f /usr/local/include/mysql/mysql.h; then + MYSQL_INCDIR=/usr/local/include/mysql + if test -d /usr/local/lib64/mysql; then + MYSQL_LIBDIR=/usr/local/lib64/mysql + else + MYSQL_LIBDIR=/usr/local/lib/mysql + fi + MYSQL_BINDIR=/usr/local/bin + elif test -f /usr/local/include/mysql.h; then + MYSQL_INCDIR=/usr/local/include + if test -d /usr/local/lib64; then + MYSQL_LIBDIR=/usr/local/lib64 + else + MYSQL_LIBDIR=/usr/local/lib + fi + MYSQL_BINDIR=/usr/local/bin + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Unable to find mysql.h in standard locations" "$LINENO" 5 + fi + else + if test -f $withval/include/mysql/mysql.h; then + MYSQL_INCDIR=$withval/include/mysql + if test -d $withval/lib64/mysql; then + MYSQL_LIBDIR=$withval/lib64/mysql + else + MYSQL_LIBDIR=$withval/lib/mysql + fi + MYSQL_BINDIR=$withval/bin + elif test -f $withval/include/mysql.h; then + MYSQL_INCDIR=$withval/include + if test -d $withval/lib64; then + MYSQL_LIBDIR=$withval/lib64 + else + MYSQL_LIBDIR=$withval/lib + fi + MYSQL_BINDIR=$withval/bin + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Invalid MySQL directory $withval - unable to find mysql.h under $withval" "$LINENO" 5 + fi + fi + MYSQL_INCLUDE=-I$MYSQL_INCDIR + if test x$use_libtool != xno; then + MYSQL_LIBS="-R $MYSQL_LIBDIR -L$MYSQL_LIBDIR -lmysqld -lz -lm -lcrypt" + else + MYSQL_LIBS="-L$MYSQL_LIBDIR -lmysqld -lz -lm -lcrypt" + fi + MYSQL_LIB=$MYSQL_LIBDIR/libmysqld.a + DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}" + + +$as_echo "#define HAVE_MYSQL 1" >>confdefs.h + + +$as_echo "#define HAVE_EMBEDDED_MYSQL 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + + if test -z "${db_backends}"; then + db_backends="MySQL" + else + db_backends="${db_backends} MySQL" + fi + if test -z "${DB_BACKENDS}"; then + DB_BACKENDS="mysql" + else + DB_BACKENDS="${DB_BACKENDS} mysql" + fi + + if test "x$support_batch_insert" = "xyes"; then + saved_LDFLAGS="${LDFLAGS}" + LDFLAGS="${saved_LDFLAGS} -L$MYSQL_LIBDIR" + saved_LIBS="${LIBS}" + LIBS="${saved_LIBS} -lz -lm -lcrypt" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mysql_thread_safe in -lmysqlclient_r" >&5 +$as_echo_n "checking for mysql_thread_safe in -lmysqlclient_r... " >&6; } +if ${ac_cv_lib_mysqlclient_r_mysql_thread_safe+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lmysqlclient_r $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char mysql_thread_safe (); +int +main () +{ +return mysql_thread_safe (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_mysqlclient_r_mysql_thread_safe=yes +else + ac_cv_lib_mysqlclient_r_mysql_thread_safe=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mysqlclient_r_mysql_thread_safe" >&5 +$as_echo "$ac_cv_lib_mysqlclient_r_mysql_thread_safe" >&6; } +if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = xyes; then : + +$as_echo "#define HAVE_MYSQL_THREAD_SAFE 1" >>confdefs.h + +fi + + if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = "xyes"; then + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="MySQL" + else + batch_insert_db_backends="${batch_insert_db_backends} MySQL" + fi + fi + + LDFLAGS="${saved_LDFLAGS}" + LIBS="${saved_LIBS}" + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for SQLite3 support" >&5 +$as_echo_n "checking for SQLite3 support... " >&6; } + +# Check whether --with-sqlite3 was given. +if test "${with_sqlite3+set}" = set; then : + withval=$with_sqlite3; + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + if test -f /usr/local/include/sqlite3.h; then + SQLITE_INCDIR=/usr/local/include + if test -d /usr/local/lib64; then + SQLITE_LIBDIR=/usr/local/lib64 + else + SQLITE_LIBDIR=/usr/local/lib + fi + SQLITE_BINDIR=/usr/local/bin + elif test -f /usr/include/sqlite3.h; then + SQLITE_INCDIR=/usr/include + if test -n $multiarch -a -d /usr/lib/$multiarch; then + SQLITE_LIBDIR=/usr/lib/$multiarch + elif test -d /usr/lib64; then + SQLITE_LIBDIR=/usr/lib64 + else + SQLITE_LIBDIR=/usr/lib + fi + SQLITE_BINDIR=/usr/bin + elif test -f $prefix/include/sqlite3.h; then + SQLITE_INCDIR=$prefix/include + if test -d $prefix/lib64; then + SQLITE_LIBDIR=$prefix/lib64 + else + SQLITE_LIBDIR=$prefix/lib + fi + SQLITE_BINDIR=$prefix/bin + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Unable to find sqlite3.h in standard locations" "$LINENO" 5 + fi + else + if test -f $withval/sqlite3.h; then + SQLITE_INCDIR=$withval + SQLITE_LIBDIR=$withval + SQLITE_BINDIR=$withval + elif test -f $withval/include/sqlite3.h; then + SQLITE_INCDIR=$withval/include + if test -d $withval/lib64; then + SQLITE_LIBDIR=$withval/lib64 + else + SQLITE_LIBDIR=$withval/lib + fi + SQLITE_BINDIR=$withval/bin + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Invalid SQLite3 directory $withval - unable to find sqlite3.h under $withval" "$LINENO" 5 + fi + fi + SQLITE_INCLUDE=-I$SQLITE_INCDIR + if test x$use_libtool != xno; then + SQLITE_LIBS="-R $SQLITE_LIBDIR -L$SQLITE_LIBDIR -lsqlite3" + else + SQLITE_LIBS="-L$SQLITE_LIBDIR -lsqlite3" + fi + SQLITE_LIB=$SQLITE_LIBDIR/libsqlite3.a + DB_LIBS="${DB_LIBS} ${SQLITE_LIBS}" + + +$as_echo "#define HAVE_SQLITE3 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + + if test -z "${db_backends}"; then + db_backends="SQLite3" + else + db_backends="${db_backends} SQLite3" + fi + if test -z "${DB_BACKENDS}"; then + DB_BACKENDS="sqlite3" + else + DB_BACKENDS="${DB_BACKENDS} sqlite3" + fi + + if test "x$support_batch_insert" = "xyes"; then + saved_LDFLAGS="${LDFLAGS}" + LDFLAGS="${saved_LDFLAGS} -lpthread -L$SQLITE_LIBDIR" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sqlite3_threadsafe in -lsqlite3" >&5 +$as_echo_n "checking for sqlite3_threadsafe in -lsqlite3... " >&6; } +if ${ac_cv_lib_sqlite3_sqlite3_threadsafe+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsqlite3 $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char sqlite3_threadsafe (); +int +main () +{ +return sqlite3_threadsafe (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_sqlite3_sqlite3_threadsafe=yes +else + ac_cv_lib_sqlite3_sqlite3_threadsafe=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_sqlite3_sqlite3_threadsafe" >&5 +$as_echo "$ac_cv_lib_sqlite3_sqlite3_threadsafe" >&6; } +if test "x$ac_cv_lib_sqlite3_sqlite3_threadsafe" = xyes; then : + +$as_echo "#define HAVE_SQLITE3_THREADSAFE 1" >>confdefs.h + +fi + + if test "x$ac_cv_lib_sqlite3_sqlite3_threadsafe" = "xyes"; then + if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="SQLite3" + else + batch_insert_db_backends="${batch_insert_db_backends} SQLite3" + fi + fi + + LDFLAGS="${saved_LDFLAGS}" + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + + + + + + +if test -z "${batch_insert_db_backends}"; then + batch_insert_db_backends="None" +fi + +if test "x${db_backends}" = "x" ; then + echo " " + echo " " + echo "You have not specified either --enable-client-only or one of the" + echo "supported databases: MySQL, PostgreSQL, or SQLite3." + echo "This is not permitted. Please reconfigure." + echo " " + echo "Aborting the configuration ..." + echo " " + echo " " + exit 1 +fi + +case `echo $DB_BACKENDS | wc -w | sed -e 's/^ *//'` in + 1) + DEFAULT_DB_TYPE="${DB_BACKENDS}" + if test x$use_libtool = xno; then + SHARED_CATALOG_TARGETS="" + else + SHARED_CATALOG_TARGETS="libbaccats-${DEFAULT_DB_TYPE}.la" + fi + ;; + *) + DEFAULT_DB_TYPE=`echo ${DB_BACKENDS} | cut -d' ' -f1` + + if test x$use_libtool = xno; then + echo " " + echo " " + echo "You have specified two or more of the" + echo "supported databases: MySQL, PostgreSQL, or SQLite3." + echo "This is not permitted when not using libtool Please reconfigure." + echo " " + echo "Aborting the configuration ..." + echo " " + echo " " + exit 1 + fi + + SHARED_CATALOG_TARGETS="" + for db_type in ${DB_BACKENDS} + do + if test -z "${SHARED_CATALOG_TARGETS}"; then + SHARED_CATALOG_TARGETS="libbaccats-${db_type}.la" + else + SHARED_CATALOG_TARGETS="${SHARED_CATALOG_TARGETS} libbaccats-${db_type}.la" + fi + done + ;; +esac + +if test x$use_libtool = xyes; then + DB_LIBS="" +fi + + + + + + + +$as_echo "#define PROTOTYPES 1" >>confdefs.h + + +if test -z "$CFLAGS" -o "$CFLAGS" = "-g -O2"; then + if test -z "$CCOPTS"; then + CCOPTS='-g -O2 -Wall' + fi + CFLAGS="$CCOPTS" +fi + + + +largefile_support="no" + + # Check whether --enable-largefile was given. +if test "${enable_largefile+set}" = set; then : + enableval=$enable_largefile; +fi + + if test "$enable_largefile" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}getconf", so it can be a program name with args. +set dummy ${ac_tool_prefix}getconf; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_GETCONF+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$GETCONF"; then + ac_cv_prog_GETCONF="$GETCONF" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_GETCONF="${ac_tool_prefix}getconf" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +GETCONF=$ac_cv_prog_GETCONF +if test -n "$GETCONF"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GETCONF" >&5 +$as_echo "$GETCONF" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_GETCONF"; then + ac_ct_GETCONF=$GETCONF + # Extract the first word of "getconf", so it can be a program name with args. +set dummy getconf; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_GETCONF+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_GETCONF"; then + ac_cv_prog_ac_ct_GETCONF="$ac_ct_GETCONF" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_GETCONF="getconf" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_GETCONF=$ac_cv_prog_ac_ct_GETCONF +if test -n "$ac_ct_GETCONF"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_GETCONF" >&5 +$as_echo "$ac_ct_GETCONF" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_GETCONF" = x; then + GETCONF="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + GETCONF=$ac_ct_GETCONF + fi +else + GETCONF="$ac_cv_prog_GETCONF" +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLAGS value to request large file support" >&5 +$as_echo_n "checking for CFLAGS value to request large file support... " >&6; } +if ${ac_cv_sys_largefile_CFLAGS+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_sys_largefile_CFLAGS=`($GETCONF LFS_CFLAGS) 2>/dev/null` || { + ac_cv_sys_largefile_CFLAGS=no + case "$host_os" in + # IRIX 6.2 and later require cc -n32. + irix6.[2-9]* | irix6.1[0-9]* | irix[7-9].* | irix[1-9][0-9]*) + if test "$GCC" != yes; then + ac_cv_sys_largefile_CFLAGS=-n32 + fi + ac_save_CC="$CC" + CC="$CC $ac_cv_sys_largefile_CFLAGS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + +else + ac_cv_sys_largefile_CFLAGS=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CC="$ac_save_CC" + esac + } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CFLAGS" >&5 +$as_echo "$ac_cv_sys_largefile_CFLAGS" >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LDFLAGS value to request large file support" >&5 +$as_echo_n "checking for LDFLAGS value to request large file support... " >&6; } +if ${ac_cv_sys_largefile_LDFLAGS+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_sys_largefile_LDFLAGS=`($GETCONF LFS_LDFLAGS) 2>/dev/null` || { + ac_cv_sys_largefile_LDFLAGS=no + + } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_LDFLAGS" >&5 +$as_echo "$ac_cv_sys_largefile_LDFLAGS" >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBS value to request large file support" >&5 +$as_echo_n "checking for LIBS value to request large file support... " >&6; } +if ${ac_cv_sys_largefile_LIBS+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_sys_largefile_LIBS=`($GETCONF LFS_LIBS) 2>/dev/null` || { + ac_cv_sys_largefile_LIBS=no + + } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_LIBS" >&5 +$as_echo "$ac_cv_sys_largefile_LIBS" >&6; } + + for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do + case "$ac_flag" in + no) ;; + -D_FILE_OFFSET_BITS=*) ;; + -D_LARGEFILE_SOURCE | -D_LARGEFILE_SOURCE=*) ;; + -D_LARGE_FILES | -D_LARGE_FILES=*) ;; + -D?* | -I?*) + case "$ac_flag" in + no) ;; + ?*) + case "$CPPFLAGS" in + '') CPPFLAGS="$ac_flag" ;; + *) CPPFLAGS=$CPPFLAGS' '"$ac_flag" ;; + esac ;; + esac ;; + *) + case "$ac_flag" in + no) ;; + ?*) + case "$CFLAGS" in + '') CFLAGS="$ac_flag" ;; + *) CFLAGS=$CFLAGS' '"$ac_flag" ;; + esac ;; + esac ;; + esac + done + case "$ac_cv_sys_largefile_LDFLAGS" in + no) ;; + ?*) + case "$LDFLAGS" in + '') LDFLAGS="$ac_cv_sys_largefile_LDFLAGS" ;; + *) LDFLAGS=$LDFLAGS' '"$ac_cv_sys_largefile_LDFLAGS" ;; + esac ;; + esac + case "$ac_cv_sys_largefile_LIBS" in + no) ;; + ?*) + case "$LIBS" in + '') LIBS="$ac_cv_sys_largefile_LIBS" ;; + *) LIBS=$LIBS' '"$ac_cv_sys_largefile_LIBS" ;; + esac ;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS" >&5 +$as_echo_n "checking for _FILE_OFFSET_BITS... " >&6; } +if ${ac_cv_sys_file_offset_bits+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_sys_file_offset_bits=no + ac_cv_sys_file_offset_bits=64 + for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do + case "$ac_flag" in + -D_FILE_OFFSET_BITS) + ac_cv_sys_file_offset_bits=1 ;; + -D_FILE_OFFSET_BITS=*) + ac_cv_sys_file_offset_bits=`expr " $ac_flag" : '[^=]*=\(.*\)'` ;; + esac + done + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 +$as_echo "$ac_cv_sys_file_offset_bits" >&6; } + if test "$ac_cv_sys_file_offset_bits" != no; then + +cat >>confdefs.h <<_ACEOF +#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits +_ACEOF + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGEFILE_SOURCE" >&5 +$as_echo_n "checking for _LARGEFILE_SOURCE... " >&6; } +if ${ac_cv_sys_largefile_source+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_sys_largefile_source=no + ac_cv_sys_largefile_source=1 + for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do + case "$ac_flag" in + -D_LARGEFILE_SOURCE) + ac_cv_sys_largefile_source=1 ;; + -D_LARGEFILE_SOURCE=*) + ac_cv_sys_largefile_source=`expr " $ac_flag" : '[^=]*=\(.*\)'` ;; + esac + done + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_source" >&5 +$as_echo "$ac_cv_sys_largefile_source" >&6; } + if test "$ac_cv_sys_largefile_source" != no; then + +cat >>confdefs.h <<_ACEOF +#define _LARGEFILE_SOURCE $ac_cv_sys_largefile_source +_ACEOF + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES" >&5 +$as_echo_n "checking for _LARGE_FILES... " >&6; } +if ${ac_cv_sys_large_files+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_sys_large_files=no + ac_cv_sys_large_files=1 + for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do + case "$ac_flag" in + -D_LARGE_FILES) + ac_cv_sys_large_files=1 ;; + -D_LARGE_FILES=*) + ac_cv_sys_large_files=`expr " $ac_flag" : '[^=]*=\(.*\)'` ;; + esac + done + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 +$as_echo "$ac_cv_sys_large_files" >&6; } + if test "$ac_cv_sys_large_files" != no; then + +cat >>confdefs.h <<_ACEOF +#define _LARGE_FILES $ac_cv_sys_large_files +_ACEOF + + fi + fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for X" >&5 +$as_echo_n "checking for X... " >&6; } + + +# Check whether --with-x was given. +if test "${with_x+set}" = set; then : + withval=$with_x; +fi + +# $have_x is `yes', `no', `disabled', or empty when we do not yet know. +if test "x$with_x" = xno; then + # The user explicitly disabled X. + have_x=disabled +else + case $x_includes,$x_libraries in #( + *\'*) as_fn_error $? "cannot use X directory names containing '" "$LINENO" 5;; #( + *,NONE | NONE,*) if ${ac_cv_have_x+:} false; then : + $as_echo_n "(cached) " >&6 +else + # One or both of the vars are not set, and there is no cached value. +ac_x_includes=no ac_x_libraries=no +rm -f -r conftest.dir +if mkdir conftest.dir; then + cd conftest.dir + cat >Imakefile <<'_ACEOF' +incroot: + @echo incroot='${INCROOT}' +usrlibdir: + @echo usrlibdir='${USRLIBDIR}' +libdir: + @echo libdir='${LIBDIR}' +_ACEOF + if (export CC; ${XMKMF-xmkmf}) >/dev/null 2>/dev/null && test -f Makefile; then + # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. + for ac_var in incroot usrlibdir libdir; do + eval "ac_im_$ac_var=\`\${MAKE-make} $ac_var 2>/dev/null | sed -n 's/^$ac_var=//p'\`" + done + # Open Windows xmkmf reportedly sets LIBDIR instead of USRLIBDIR. + for ac_extension in a so sl dylib la dll; do + if test ! -f "$ac_im_usrlibdir/libX11.$ac_extension" && + test -f "$ac_im_libdir/libX11.$ac_extension"; then + ac_im_usrlibdir=$ac_im_libdir; break + fi + done + # Screen out bogus values from the imake configuration. They are + # bogus both because they are the default anyway, and because + # using them would break gcc on systems where it needs fixed includes. + case $ac_im_incroot in + /usr/include) ac_x_includes= ;; + *) test -f "$ac_im_incroot/X11/Xos.h" && ac_x_includes=$ac_im_incroot;; + esac + case $ac_im_usrlibdir in + /usr/lib | /usr/lib64 | /lib | /lib64) ;; + *) test -d "$ac_im_usrlibdir" && ac_x_libraries=$ac_im_usrlibdir ;; + esac + fi + cd .. + rm -f -r conftest.dir +fi + +# Standard set of common directories for X headers. +# Check X11 before X11Rn because it is often a symlink to the current release. +ac_x_header_dirs=' +/usr/X11/include +/usr/X11R7/include +/usr/X11R6/include +/usr/X11R5/include +/usr/X11R4/include + +/usr/include/X11 +/usr/include/X11R7 +/usr/include/X11R6 +/usr/include/X11R5 +/usr/include/X11R4 + +/usr/local/X11/include +/usr/local/X11R7/include +/usr/local/X11R6/include +/usr/local/X11R5/include +/usr/local/X11R4/include + +/usr/local/include/X11 +/usr/local/include/X11R7 +/usr/local/include/X11R6 +/usr/local/include/X11R5 +/usr/local/include/X11R4 + +/usr/X386/include +/usr/x386/include +/usr/XFree86/include/X11 + +/usr/include +/usr/local/include +/usr/unsupported/include +/usr/athena/include +/usr/local/x11r5/include +/usr/lpp/Xamples/include + +/usr/openwin/include +/usr/openwin/share/include' + +if test "$ac_x_includes" = no; then + # Guess where to find include files, by looking for Xlib.h. + # First, try using that file with no special directory specified. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # We can compile using X headers with no special include directory. +ac_x_includes= +else + for ac_dir in $ac_x_header_dirs; do + if test -r "$ac_dir/X11/Xlib.h"; then + ac_x_includes=$ac_dir + break + fi +done +fi +rm -f conftest.err conftest.i conftest.$ac_ext +fi # $ac_x_includes = no + +if test "$ac_x_libraries" = no; then + # Check for the libraries. + # See if we find them without any special options. + # Don't add to $LIBS permanently. + ac_save_LIBS=$LIBS + LIBS="-lX11 $LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +XrmInitialize () + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + LIBS=$ac_save_LIBS +# We can link X programs with no special library path. +ac_x_libraries= +else + LIBS=$ac_save_LIBS +for ac_dir in `$as_echo "$ac_x_includes $ac_x_header_dirs" | sed s/include/lib/g` +do + # Don't even attempt the hair of trying to link an X program! + for ac_extension in a so sl dylib la dll; do + if test -r "$ac_dir/libX11.$ac_extension"; then + ac_x_libraries=$ac_dir + break 2 + fi + done +done +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi # $ac_x_libraries = no + +case $ac_x_includes,$ac_x_libraries in #( + no,* | *,no | *\'*) + # Didn't find X, or a directory has "'" in its name. + ac_cv_have_x="have_x=no";; #( + *) + # Record where we found X for the cache. + ac_cv_have_x="have_x=yes\ + ac_x_includes='$ac_x_includes'\ + ac_x_libraries='$ac_x_libraries'" +esac +fi +;; #( + *) have_x=yes;; + esac + eval "$ac_cv_have_x" +fi # $with_x != no + +if test "$have_x" != yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_x" >&5 +$as_echo "$have_x" >&6; } + no_x=yes +else + # If each of the values was on the command line, it overrides each guess. + test "x$x_includes" = xNONE && x_includes=$ac_x_includes + test "x$x_libraries" = xNONE && x_libraries=$ac_x_libraries + # Update the cache value to reflect the command line values. + ac_cv_have_x="have_x=yes\ + ac_x_includes='$x_includes'\ + ac_x_libraries='$x_libraries'" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: libraries $x_libraries, headers $x_includes" >&5 +$as_echo "libraries $x_libraries, headers $x_includes" >&6; } +fi + +if test "$no_x" = yes; then + # Not all programs may use this symbol, but it does not hurt to define it. + +$as_echo "#define X_DISPLAY_MISSING 1" >>confdefs.h + + X_CFLAGS= X_PRE_LIBS= X_LIBS= X_EXTRA_LIBS= +else + if test -n "$x_includes"; then + X_CFLAGS="$X_CFLAGS -I$x_includes" + fi + + # It would also be nice to do this for all -L options, not just this one. + if test -n "$x_libraries"; then + X_LIBS="$X_LIBS -L$x_libraries" + # For Solaris; some versions of Sun CC require a space after -R and + # others require no space. Words are not sufficient . . . . + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -R must be followed by a space" >&5 +$as_echo_n "checking whether -R must be followed by a space... " >&6; } + ac_xsave_LIBS=$LIBS; LIBS="$LIBS -R$x_libraries" + ac_xsave_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + X_LIBS="$X_LIBS -R$x_libraries" +else + LIBS="$ac_xsave_LIBS -R $x_libraries" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + X_LIBS="$X_LIBS -R $x_libraries" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: neither works" >&5 +$as_echo "neither works" >&6; } +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_c_werror_flag=$ac_xsave_c_werror_flag + LIBS=$ac_xsave_LIBS + fi + + # Check for system-dependent libraries X programs must link with. + # Do this before checking for the system-independent R6 libraries + # (-lICE), since we may need -lsocket or whatever for X linking. + + if test "$ISC" = yes; then + X_EXTRA_LIBS="$X_EXTRA_LIBS -lnsl_s -linet" + else + # Martyn Johnson says this is needed for Ultrix, if the X + # libraries were built with DECnet support. And Karl Berry says + # the Alpha needs dnet_stub (dnet does not exist). + ac_xsave_LIBS="$LIBS"; LIBS="$LIBS $X_LIBS -lX11" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char XOpenDisplay (); +int +main () +{ +return XOpenDisplay (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dnet_ntoa in -ldnet" >&5 +$as_echo_n "checking for dnet_ntoa in -ldnet... " >&6; } +if ${ac_cv_lib_dnet_dnet_ntoa+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldnet $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dnet_ntoa (); +int +main () +{ +return dnet_ntoa (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dnet_dnet_ntoa=yes +else + ac_cv_lib_dnet_dnet_ntoa=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dnet_dnet_ntoa" >&5 +$as_echo "$ac_cv_lib_dnet_dnet_ntoa" >&6; } +if test "x$ac_cv_lib_dnet_dnet_ntoa" = xyes; then : + X_EXTRA_LIBS="$X_EXTRA_LIBS -ldnet" +fi + + if test $ac_cv_lib_dnet_dnet_ntoa = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dnet_ntoa in -ldnet_stub" >&5 +$as_echo_n "checking for dnet_ntoa in -ldnet_stub... " >&6; } +if ${ac_cv_lib_dnet_stub_dnet_ntoa+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldnet_stub $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dnet_ntoa (); +int +main () +{ +return dnet_ntoa (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dnet_stub_dnet_ntoa=yes +else + ac_cv_lib_dnet_stub_dnet_ntoa=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dnet_stub_dnet_ntoa" >&5 +$as_echo "$ac_cv_lib_dnet_stub_dnet_ntoa" >&6; } +if test "x$ac_cv_lib_dnet_stub_dnet_ntoa" = xyes; then : + X_EXTRA_LIBS="$X_EXTRA_LIBS -ldnet_stub" +fi + + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LIBS="$ac_xsave_LIBS" + + # msh@cis.ufl.edu says -lnsl (and -lsocket) are needed for his 386/AT, + # to get the SysV transport functions. + # Chad R. Larson says the Pyramis MIS-ES running DC/OSx (SVR4) + # needs -lnsl. + # The nsl library prevents programs from opening the X display + # on Irix 5.2, according to T.E. Dickey. + # The functions gethostbyname, getservbyname, and inet_addr are + # in -lbsd on LynxOS 3.0.1/i386, according to Lars Hecking. + ac_fn_c_check_func "$LINENO" "gethostbyname" "ac_cv_func_gethostbyname" +if test "x$ac_cv_func_gethostbyname" = xyes; then : + +fi + + if test $ac_cv_func_gethostbyname = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lnsl" >&5 +$as_echo_n "checking for gethostbyname in -lnsl... " >&6; } +if ${ac_cv_lib_nsl_gethostbyname+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lnsl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char gethostbyname (); +int +main () +{ +return gethostbyname (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_nsl_gethostbyname=yes +else + ac_cv_lib_nsl_gethostbyname=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nsl_gethostbyname" >&5 +$as_echo "$ac_cv_lib_nsl_gethostbyname" >&6; } +if test "x$ac_cv_lib_nsl_gethostbyname" = xyes; then : + X_EXTRA_LIBS="$X_EXTRA_LIBS -lnsl" +fi + + if test $ac_cv_lib_nsl_gethostbyname = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lbsd" >&5 +$as_echo_n "checking for gethostbyname in -lbsd... " >&6; } +if ${ac_cv_lib_bsd_gethostbyname+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lbsd $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char gethostbyname (); +int +main () +{ +return gethostbyname (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_bsd_gethostbyname=yes +else + ac_cv_lib_bsd_gethostbyname=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_bsd_gethostbyname" >&5 +$as_echo "$ac_cv_lib_bsd_gethostbyname" >&6; } +if test "x$ac_cv_lib_bsd_gethostbyname" = xyes; then : + X_EXTRA_LIBS="$X_EXTRA_LIBS -lbsd" +fi + + fi + fi + + # lieder@skyler.mavd.honeywell.com says without -lsocket, + # socket/setsockopt and other routines are undefined under SCO ODT + # 2.0. But -lsocket is broken on IRIX 5.2 (and is not necessary + # on later versions), says Simon Leinen: it contains gethostby* + # variants that don't use the name server (or something). -lsocket + # must be given before -lnsl if both are needed. We assume that + # if connect needs -lnsl, so does gethostbyname. + ac_fn_c_check_func "$LINENO" "connect" "ac_cv_func_connect" +if test "x$ac_cv_func_connect" = xyes; then : + +fi + + if test $ac_cv_func_connect = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for connect in -lsocket" >&5 +$as_echo_n "checking for connect in -lsocket... " >&6; } +if ${ac_cv_lib_socket_connect+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsocket $X_EXTRA_LIBS $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char connect (); +int +main () +{ +return connect (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_socket_connect=yes +else + ac_cv_lib_socket_connect=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_connect" >&5 +$as_echo "$ac_cv_lib_socket_connect" >&6; } +if test "x$ac_cv_lib_socket_connect" = xyes; then : + X_EXTRA_LIBS="-lsocket $X_EXTRA_LIBS" +fi + + fi + + # Guillermo Gomez says -lposix is necessary on A/UX. + ac_fn_c_check_func "$LINENO" "remove" "ac_cv_func_remove" +if test "x$ac_cv_func_remove" = xyes; then : + +fi + + if test $ac_cv_func_remove = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for remove in -lposix" >&5 +$as_echo_n "checking for remove in -lposix... " >&6; } +if ${ac_cv_lib_posix_remove+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lposix $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char remove (); +int +main () +{ +return remove (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_posix_remove=yes +else + ac_cv_lib_posix_remove=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_posix_remove" >&5 +$as_echo "$ac_cv_lib_posix_remove" >&6; } +if test "x$ac_cv_lib_posix_remove" = xyes; then : + X_EXTRA_LIBS="$X_EXTRA_LIBS -lposix" +fi + + fi + + # BSDI BSD/OS 2.1 needs -lipc for XOpenDisplay. + ac_fn_c_check_func "$LINENO" "shmat" "ac_cv_func_shmat" +if test "x$ac_cv_func_shmat" = xyes; then : + +fi + + if test $ac_cv_func_shmat = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shmat in -lipc" >&5 +$as_echo_n "checking for shmat in -lipc... " >&6; } +if ${ac_cv_lib_ipc_shmat+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lipc $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shmat (); +int +main () +{ +return shmat (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ipc_shmat=yes +else + ac_cv_lib_ipc_shmat=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ipc_shmat" >&5 +$as_echo "$ac_cv_lib_ipc_shmat" >&6; } +if test "x$ac_cv_lib_ipc_shmat" = xyes; then : + X_EXTRA_LIBS="$X_EXTRA_LIBS -lipc" +fi + + fi + fi + + # Check for libraries that X11R6 Xt/Xaw programs need. + ac_save_LDFLAGS=$LDFLAGS + test -n "$x_libraries" && LDFLAGS="$LDFLAGS -L$x_libraries" + # SM needs ICE to (dynamically) link under SunOS 4.x (so we have to + # check for ICE first), but we must link in the order -lSM -lICE or + # we get undefined symbols. So assume we have SM if we have ICE. + # These have to be linked with before -lX11, unlike the other + # libraries we check for below, so use a different variable. + # John Interrante, Karl Berry + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for IceConnectionNumber in -lICE" >&5 +$as_echo_n "checking for IceConnectionNumber in -lICE... " >&6; } +if ${ac_cv_lib_ICE_IceConnectionNumber+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lICE $X_EXTRA_LIBS $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char IceConnectionNumber (); +int +main () +{ +return IceConnectionNumber (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ICE_IceConnectionNumber=yes +else + ac_cv_lib_ICE_IceConnectionNumber=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ICE_IceConnectionNumber" >&5 +$as_echo "$ac_cv_lib_ICE_IceConnectionNumber" >&6; } +if test "x$ac_cv_lib_ICE_IceConnectionNumber" = xyes; then : + X_PRE_LIBS="$X_PRE_LIBS -lSM -lICE" +fi + + LDFLAGS=$ac_save_LDFLAGS + +fi + + +for ac_header in \ + assert.h \ + fcntl.h \ + grp.h \ + pwd.h \ + libc.h \ + limits.h \ + stdarg.h \ + stdlib.h \ + stdint.h \ + inttypes.h \ + string.h \ + strings.h \ + termios.h \ + termcap.h \ + term.h \ + unistd.h \ + sys/bitypes.h \ + sys/byteorder.h \ + sys/ioctl.h \ + sys/select.h \ + sys/socket.h \ + sys/sockio.h \ + sys/stat.h \ + sys/time.h \ + sys/types.h \ + arpa/nameser.h \ + mtio.h \ + sys/mtio.h \ + sys/tape.h \ + regex.h \ + attr/attributes.h \ + attr/xattr.h \ + +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether sys/types.h defines makedev" >&5 +$as_echo_n "checking whether sys/types.h defines makedev... " >&6; } +if ${ac_cv_header_sys_types_h_makedev+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +return makedev(0, 0); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_header_sys_types_h_makedev=yes +else + ac_cv_header_sys_types_h_makedev=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_types_h_makedev" >&5 +$as_echo "$ac_cv_header_sys_types_h_makedev" >&6; } + +if test $ac_cv_header_sys_types_h_makedev = no; then +ac_fn_c_check_header_mongrel "$LINENO" "sys/mkdev.h" "ac_cv_header_sys_mkdev_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_mkdev_h" = xyes; then : + +$as_echo "#define MAJOR_IN_MKDEV 1" >>confdefs.h + +fi + + + + if test $ac_cv_header_sys_mkdev_h = no; then + ac_fn_c_check_header_mongrel "$LINENO" "sys/sysmacros.h" "ac_cv_header_sys_sysmacros_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_sysmacros_h" = xyes; then : + +$as_echo "#define MAJOR_IN_SYSMACROS 1" >>confdefs.h + +fi + + + fi +fi + +ac_header_dirent=no +for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do + as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5 +$as_echo_n "checking for $ac_hdr that defines DIR... " >&6; } +if eval \${$as_ac_Header+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include <$ac_hdr> + +int +main () +{ +if ((DIR *) 0) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$as_ac_Header=yes" +else + eval "$as_ac_Header=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$as_ac_Header + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 +_ACEOF + +ac_header_dirent=$ac_hdr; break +fi + +done +# Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. +if test $ac_header_dirent = dirent.h; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 +$as_echo_n "checking for library containing opendir... " >&6; } +if ${ac_cv_search_opendir+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char opendir (); +int +main () +{ +return opendir (); + ; + return 0; +} +_ACEOF +for ac_lib in '' dir; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_opendir=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_opendir+:} false; then : + break +fi +done +if ${ac_cv_search_opendir+:} false; then : + +else + ac_cv_search_opendir=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 +$as_echo "$ac_cv_search_opendir" >&6; } +ac_res=$ac_cv_search_opendir +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 +$as_echo_n "checking for library containing opendir... " >&6; } +if ${ac_cv_search_opendir+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char opendir (); +int +main () +{ +return opendir (); + ; + return 0; +} +_ACEOF +for ac_lib in '' x; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_opendir=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_opendir+:} false; then : + break +fi +done +if ${ac_cv_search_opendir+:} false; then : + +else + ac_cv_search_opendir=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 +$as_echo "$ac_cv_search_opendir" >&6; } +ac_res=$ac_cv_search_opendir +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stat file-mode macros are broken" >&5 +$as_echo_n "checking whether stat file-mode macros are broken... " >&6; } +if ${ac_cv_header_stat_broken+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include + +#if defined S_ISBLK && defined S_IFDIR +extern char c1[S_ISBLK (S_IFDIR) ? -1 : 1]; +#endif + +#if defined S_ISBLK && defined S_IFCHR +extern char c2[S_ISBLK (S_IFCHR) ? -1 : 1]; +#endif + +#if defined S_ISLNK && defined S_IFREG +extern char c3[S_ISLNK (S_IFREG) ? -1 : 1]; +#endif + +#if defined S_ISSOCK && defined S_IFREG +extern char c4[S_ISSOCK (S_IFREG) ? -1 : 1]; +#endif + +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stat_broken=no +else + ac_cv_header_stat_broken=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stat_broken" >&5 +$as_echo "$ac_cv_header_stat_broken" >&6; } +if test $ac_cv_header_stat_broken = yes; then + +$as_echo "#define STAT_MACROS_BROKEN 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5 +$as_echo_n "checking for sys/wait.h that is POSIX.1 compatible... " >&6; } +if ${ac_cv_header_sys_wait_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#ifndef WEXITSTATUS +# define WEXITSTATUS(stat_val) ((unsigned int) (stat_val) >> 8) +#endif +#ifndef WIFEXITED +# define WIFEXITED(stat_val) (((stat_val) & 255) == 0) +#endif + +int +main () +{ + int s; + wait (&s); + s = WIFEXITED (s) ? WEXITSTATUS (s) : 1; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_sys_wait_h=yes +else + ac_cv_header_sys_wait_h=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_wait_h" >&5 +$as_echo "$ac_cv_header_sys_wait_h" >&6; } +if test $ac_cv_header_sys_wait_h = yes; then + +$as_echo "#define HAVE_SYS_WAIT_H 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5 +$as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } +if ${ac_cv_header_time+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include + +int +main () +{ +if ((struct tm *) 0) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_time=yes +else + ac_cv_header_time=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_time" >&5 +$as_echo "$ac_cv_header_time" >&6; } +if test $ac_cv_header_time = yes; then + +$as_echo "#define TIME_WITH_SYS_TIME 1" >>confdefs.h + +fi + +ac_fn_c_check_member "$LINENO" "struct stat" "st_blksize" "ac_cv_member_struct_stat_st_blksize" "$ac_includes_default" +if test "x$ac_cv_member_struct_stat_st_blksize" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 +_ACEOF + + +$as_echo "#define HAVE_ST_BLKSIZE 1" >>confdefs.h + +fi + + +ac_fn_c_check_member "$LINENO" "struct stat" "st_blocks" "ac_cv_member_struct_stat_st_blocks" "$ac_includes_default" +if test "x$ac_cv_member_struct_stat_st_blocks" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_STAT_ST_BLOCKS 1 +_ACEOF + + +$as_echo "#define HAVE_ST_BLOCKS 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" fileblocks.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS fileblocks.$ac_objext" + ;; +esac + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 +$as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } +if ${ac_cv_struct_tm+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include + +int +main () +{ +struct tm tm; + int *p = &tm.tm_sec; + return !p; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_struct_tm=time.h +else + ac_cv_struct_tm=sys/time.h +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 +$as_echo "$ac_cv_struct_tm" >&6; } +if test $ac_cv_struct_tm = sys/time.h; then + +$as_echo "#define TM_IN_SYS_TIME 1" >>confdefs.h + +fi + +ac_fn_c_check_member "$LINENO" "struct tm" "tm_zone" "ac_cv_member_struct_tm_tm_zone" "#include +#include <$ac_cv_struct_tm> + +" +if test "x$ac_cv_member_struct_tm_tm_zone" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_TM_TM_ZONE 1 +_ACEOF + + +fi + +if test "$ac_cv_member_struct_tm_tm_zone" = yes; then + +$as_echo "#define HAVE_TM_ZONE 1" >>confdefs.h + +else + ac_fn_c_check_decl "$LINENO" "tzname" "ac_cv_have_decl_tzname" "#include +" +if test "x$ac_cv_have_decl_tzname" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_TZNAME $ac_have_decl +_ACEOF + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tzname" >&5 +$as_echo_n "checking for tzname... " >&6; } +if ${ac_cv_var_tzname+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#if !HAVE_DECL_TZNAME +extern char *tzname[]; +#endif + +int +main () +{ +return tzname[0][0]; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_var_tzname=yes +else + ac_cv_var_tzname=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_var_tzname" >&5 +$as_echo "$ac_cv_var_tzname" >&6; } + if test $ac_cv_var_tzname = yes; then + +$as_echo "#define HAVE_TZNAME 1" >>confdefs.h + + fi +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for utime.h" >&5 +$as_echo_n "checking for utime.h... " >&6; } +if ${ba_cv_header_utime_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + +int +main () +{ + + struct utimbuf foo + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ba_cv_header_utime_h=yes + +else + + ba_cv_header_utime_h=no + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_header_utime_h" >&5 +$as_echo "$ba_cv_header_utime_h" >&6; } +test $ba_cv_header_utime_h = yes && +$as_echo "#define HAVE_UTIME_H 1" >>confdefs.h + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for socklen_t" >&5 +$as_echo_n "checking for socklen_t... " >&6; } +if ${ba_cv_header_socklen_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + +int +main () +{ + + socklen_t x + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ba_cv_header_socklen_t=yes + +else + + ba_cv_header_socklen_t=no + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_header_socklen_t" >&5 +$as_echo "$ba_cv_header_socklen_t" >&6; } +test $ba_cv_header_socklen_t = yes && +$as_echo "#define HAVE_SOCKLEN_T 1" >>confdefs.h + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ioctl_req_t" >&5 +$as_echo_n "checking for ioctl_req_t... " >&6; } +if ${ba_cv_header_ioctl_req_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + #include + +int +main () +{ + + int (*d_ioctl)(int fd, unsigned long int request, ...); + d_ioctl = ::ioctl; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + + ba_cv_header_ioctl_req_t=yes + +else + + ba_cv_header_ioctl_req_t=no + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_header_ioctl_req_t" >&5 +$as_echo "$ba_cv_header_ioctl_req_t" >&6; } +test $ba_cv_header_ioctl_req_t = yes && +$as_echo "#define HAVE_IOCTL_ULINT_REQUEST 1" >>confdefs.h + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for typeof" >&5 +$as_echo_n "checking for typeof... " >&6; } +if ${ba_cv_have_typeof+:} false; then : + $as_echo_n "(cached) " >&6 +else + + if test "$cross_compiling" = yes; then : + + ba_cv_have_typeof=no + + +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + main(){char *a = 0; a = (typeof a)a;} + +_ACEOF +if ac_fn_cxx_try_run "$LINENO"; then : + + ba_cv_have_typeof=yes + +else + + ba_cv_have_typeof=no + +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_have_typeof" >&5 +$as_echo "$ba_cv_have_typeof" >&6; } +test $ba_cv_have_typeof = yes && +$as_echo "#define HAVE_TYPEOF 1" >>confdefs.h + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 +$as_echo_n "checking for an ANSI C-conforming const... " >&6; } +if ${ac_cv_c_const+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#ifndef __cplusplus + /* Ultrix mips cc rejects this sort of thing. */ + typedef int charset[2]; + const charset cs = { 0, 0 }; + /* SunOS 4.1.1 cc rejects this. */ + char const *const *pcpcc; + char **ppc; + /* NEC SVR4.0.2 mips cc rejects this. */ + struct point {int x, y;}; + static struct point const zero = {0,0}; + /* AIX XL C 1.02.0.0 rejects this. + It does not let you subtract one const X* pointer from another in + an arm of an if-expression whose if-part is not a constant + expression */ + const char *g = "string"; + pcpcc = &g + (g ? g-g : 0); + /* HPUX 7.0 cc rejects these. */ + ++pcpcc; + ppc = (char**) pcpcc; + pcpcc = (char const *const *) ppc; + { /* SCO 3.2v4 cc rejects this sort of thing. */ + char tx; + char *t = &tx; + char const *s = 0 ? (char *) 0 : (char const *) 0; + + *t++ = 0; + if (s) return 0; + } + { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ + int x[] = {25, 17}; + const int *foo = &x[0]; + ++foo; + } + { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ + typedef const int *iptr; + iptr p = 0; + ++p; + } + { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying + "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ + struct s { int j; const int *ap[3]; } bx; + struct s *b = &bx; b->j = 5; + } + { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ + const int foo = 10; + if (!foo) return 0; + } + return !cs[0] && !zero.x; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_const=yes +else + ac_cv_c_const=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 +$as_echo "$ac_cv_c_const" >&6; } +if test $ac_cv_c_const = no; then + +$as_echo "#define const /**/" >>confdefs.h + +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 +$as_echo_n "checking whether byte ordering is bigendian... " >&6; } +if ${ac_cv_c_bigendian+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_c_bigendian=unknown + # See if we're dealing with a universal compiler. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifndef __APPLE_CC__ + not a universal capable compiler + #endif + typedef int dummy; + +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + # Check for potential -arch flags. It is not universal unless + # there are at least two -arch flags with different values. + ac_arch= + ac_prev= + for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do + if test -n "$ac_prev"; then + case $ac_word in + i?86 | x86_64 | ppc | ppc64) + if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then + ac_arch=$ac_word + else + ac_cv_c_bigendian=universal + break + fi + ;; + esac + ac_prev= + elif test "x$ac_word" = "x-arch"; then + ac_prev=arch + fi + done +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + if test $ac_cv_c_bigendian = unknown; then + # See if sys/param.h defines the BYTE_ORDER macro. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ + && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ + && LITTLE_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + # It does; now see whether it defined to BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if BYTE_ORDER != BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + # It does; now see whether it defined to _BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#ifndef _BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # Compile a test program. + if test "$cross_compiling" = yes; then : + # Try to guess by grepping values from an object file. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +short int ascii_mm[] = + { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; + short int ascii_ii[] = + { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; + int use_ascii (int i) { + return ascii_mm[i] + ascii_ii[i]; + } + short int ebcdic_ii[] = + { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; + short int ebcdic_mm[] = + { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; + int use_ebcdic (int i) { + return ebcdic_mm[i] + ebcdic_ii[i]; + } + extern int foo; + +int +main () +{ +return use_ascii (foo) == use_ebcdic (foo); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then + ac_cv_c_bigendian=yes + fi + if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then + if test "$ac_cv_c_bigendian" = unknown; then + ac_cv_c_bigendian=no + else + # finding both strings is unlikely to happen, but who knows? + ac_cv_c_bigendian=unknown + fi + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ + + /* Are we little or big endian? From Harbison&Steele. */ + union + { + long int l; + char c[sizeof (long int)]; + } u; + u.l = 1; + return u.c[sizeof (long int) - 1] == 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_c_bigendian=no +else + ac_cv_c_bigendian=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 +$as_echo "$ac_cv_c_bigendian" >&6; } + case $ac_cv_c_bigendian in #( + yes) + +$as_echo "#define HAVE_BIG_ENDIAN 1" >>confdefs.h +;; #( + no) + +$as_echo "#define HAVE_LITTLE_ENDIAN 1" >>confdefs.h + ;; #( + universal) + +$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h + + ;; #( + *) + as_fn_error $? "unknown endianness + presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; + esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to get filesystem type" >&5 +$as_echo_n "checking how to get filesystem type... " >&6; } +fstype=no +# The order of these tests is important. +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + $as_echo "#define FSTYPE_STATVFS 1" >>confdefs.h + fstype=SVR4 + +fi +rm -f conftest.err conftest.i conftest.$ac_ext +if test $fstype = no; then + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + $as_echo "#define FSTYPE_USG_STATFS 1" >>confdefs.h + fstype=SVR3 + +fi +rm -f conftest.err conftest.i conftest.$ac_ext +fi +if test $fstype = no; then + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + $as_echo "#define FSTYPE_AIX_STATFS 1" >>confdefs.h + fstype=AIX + +fi +rm -f conftest.err conftest.i conftest.$ac_ext +fi +if test $fstype = no; then + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + $as_echo "#define FSTYPE_MNTENT 1" >>confdefs.h + fstype=4.3BSD + +fi +rm -f conftest.err conftest.i conftest.$ac_ext +fi +if test $fstype = no; then + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "f_type;" >/dev/null 2>&1; then : + $as_echo "#define FSTYPE_STATFS 1" >>confdefs.h + fstype=4.4BSD/OSF1 +fi +rm -f conftest* + +fi +if test $fstype = no; then + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + $as_echo "#define FSTYPE_GETMNT 1" >>confdefs.h + fstype=Ultrix + +fi +rm -f conftest.err conftest.i conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $fstype" >&5 +$as_echo "$fstype" >&6; } + +ac_fn_c_check_header_mongrel "$LINENO" "sys/statvfs.h" "ac_cv_header_sys_statvfs_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_statvfs_h" = xyes; then : + +$as_echo "#define HAVE_SYS_STATVFS_H 1" >>confdefs.h + +fi + + + +ac_fn_c_check_decl "$LINENO" "O_CLOEXEC" "ac_cv_have_decl_O_CLOEXEC" " +#ifdef HAVE_FCNTL_H +# include +#endif + +" +if test "x$ac_cv_have_decl_O_CLOEXEC" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_O_CLOEXEC $ac_have_decl +_ACEOF +if test $ac_have_decl = 1; then : + +else + +$as_echo "#define O_CLOEXEC 0" >>confdefs.h + +fi + + +ac_fn_c_check_decl "$LINENO" "FD_CLOEXEC" "ac_cv_have_decl_FD_CLOEXEC" " +#ifdef HAVE_FCNTL_H +# include +#endif + +" +if test "x$ac_cv_have_decl_FD_CLOEXEC" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_FD_CLOEXEC $ac_have_decl +_ACEOF +if test $ac_have_decl = 1; then : + +else + +$as_echo "#define FD_CLOEXEC 0" >>confdefs.h + +fi + + +ac_fn_c_check_decl "$LINENO" "SOCK_CLOEXEC" "ac_cv_have_decl_SOCK_CLOEXEC" " +#ifdef HAVE_SYS_SOCKET_H +# include +#endif + +" +if test "x$ac_cv_have_decl_SOCK_CLOEXEC" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_SOCK_CLOEXEC $ac_have_decl +_ACEOF +if test $ac_have_decl = 1; then : + +else + +$as_echo "#define SOCK_CLOEXEC 0" >>confdefs.h + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for close on exec modifier for fopen()" >&5 +$as_echo_n "checking for close on exec modifier for fopen()... " >&6; } +if ${ac_cv_feature_stream_cloexec_flag+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test $ac_cv_have_decl_O_CLOEXEC = yes ; then + if test $ac_cv_have_decl_SOCK_CLOEXEC = yes ; then + ac_cv_feature_stream_cloexec_flag="e" + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_feature_stream_cloexec_flag" >&5 +$as_echo "$ac_cv_feature_stream_cloexec_flag" >&6; } + +if test "x$ac_cv_feature_stream_cloexec_flag" = "xe" ; then + +$as_echo "#define HAVE_STREAM_CLOEXEC 0" >>confdefs.h + +fi + + +cat >>confdefs.h <<_ACEOF +#define STREAM_CLOEXEC "$ac_cv_feature_stream_cloexec_flag" +_ACEOF + + +ac_fn_c_check_func "$LINENO" "accept4" "ac_cv_func_accept4" +if test "x$ac_cv_func_accept4" = xyes; then : + +$as_echo "#define HAVE_ACCEPT4 1" >>confdefs.h + +fi + + +S3_INC= +S3_LIBS= +S3_LDFLAGS= +have_libs3=no + +if test x$support_s3 = xyes; then + +# Check whether --with-s3 was given. +if test "${with_s3+set}" = set; then : + withval=$with_s3; + case "$with_s3" in + no) + : + ;; + yes|*) + if test -f ${with_s3}/include/libs3.h; then + S3_INC="-I${with_s3}/include" + S3_LDFLAGS="-L${with_s3}/lib" + with_s3="${with_s3}/include" + else + with_s3="/usr/include" + fi + + as_ac_Header=`$as_echo "ac_cv_header_${with_s3}/libs3.h" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "${with_s3}/libs3.h" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + + +$as_echo "#define HAVE_LIBS3 1" >>confdefs.h + + S3_LIBS="${S3_LDFLAGS} -ls3" + have_libs3="yes" + +else + + echo " " + echo "libs3.h not found. s3 turned off ..." + echo " " + + +fi + + + ;; + esac + +else + + ac_fn_c_check_header_mongrel "$LINENO" "libs3.h" "ac_cv_header_libs3_h" "$ac_includes_default" +if test "x$ac_cv_header_libs3_h" = xyes; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for S3_initialize in -ls3" >&5 +$as_echo_n "checking for S3_initialize in -ls3... " >&6; } +if ${ac_cv_lib_s3_S3_initialize+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ls3 $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char S3_initialize (); +int +main () +{ +return S3_initialize (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_s3_S3_initialize=yes +else + ac_cv_lib_s3_S3_initialize=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_s3_S3_initialize" >&5 +$as_echo "$ac_cv_lib_s3_S3_initialize" >&6; } +if test "x$ac_cv_lib_s3_S3_initialize" = xyes; then : + + S3_LIBS="-ls3" + +$as_echo "#define HAVE_LIBS3 1" >>confdefs.h + + have_libs3=yes + +fi + + +fi + + + +fi + +fi + + + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +for ac_func in backtrace +do : + ac_fn_cxx_check_func "$LINENO" "backtrace" "ac_cv_func_backtrace" +if test "x$ac_cv_func_backtrace" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_BACKTRACE 1 +_ACEOF + +fi +done + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking return type of signal handlers" >&5 +$as_echo_n "checking return type of signal handlers... " >&6; } +if ${ac_cv_type_signal+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include + +int +main () +{ +return *(signal (0, 0)) (0) == 1; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_type_signal=int +else + ac_cv_type_signal=void +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_signal" >&5 +$as_echo "$ac_cv_type_signal" >&6; } + +cat >>confdefs.h <<_ACEOF +#define RETSIGTYPE $ac_cv_type_signal +_ACEOF + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for type of signal functions" >&5 +$as_echo_n "checking for type of signal functions... " >&6; } + if ${bash_cv_signal_vintage+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + + sigset_t ss; + struct sigaction sa; + sigemptyset(&ss); sigsuspend(&ss); + sigaction(SIGINT, &sa, (struct sigaction *) 0); + sigprocmask(SIG_BLOCK, &ss, (sigset_t *) 0); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + bash_cv_signal_vintage="posix" +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + + int mask = sigmask(SIGINT); + sigsetmask(mask); sigblock(mask); sigpause(mask); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + bash_cv_signal_vintage="4.2bsd" +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + RETSIGTYPE foo() { } +int +main () +{ + + int mask = sigmask(SIGINT); + sigset(SIGINT, foo); sigrelse(SIGINT); + sighold(SIGINT); sigpause(SIGINT); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + bash_cv_signal_vintage="svr3" +else + bash_cv_signal_vintage="v7" + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $bash_cv_signal_vintage" >&5 +$as_echo "$bash_cv_signal_vintage" >&6; } + if test "$bash_cv_signal_vintage" = "posix"; then + $as_echo "#define HAVE_POSIX_SIGNALS 1" >>confdefs.h + + elif test "$bash_cv_signal_vintage" = "4.2bsd"; then + $as_echo "#define HAVE_BSD_SIGNALS 1" >>confdefs.h + + elif test "$bash_cv_signal_vintage" = "svr3"; then + $as_echo "#define HAVE_USG_SIGHOLD 1" >>confdefs.h + + fi + +ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default" +if test "x$ac_cv_type_mode_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define mode_t int +_ACEOF + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5 +$as_echo_n "checking for uid_t in sys/types.h... " >&6; } +if ${ac_cv_type_uid_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "uid_t" >/dev/null 2>&1; then : + ac_cv_type_uid_t=yes +else + ac_cv_type_uid_t=no +fi +rm -f conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5 +$as_echo "$ac_cv_type_uid_t" >&6; } +if test $ac_cv_type_uid_t = no; then + +$as_echo "#define uid_t int" >>confdefs.h + + +$as_echo "#define gid_t int" >>confdefs.h + +fi + +ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" +if test "x$ac_cv_type_size_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define size_t unsigned int +_ACEOF + +fi + +ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default" +if test "x$ac_cv_type_pid_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define pid_t int +_ACEOF + +fi + +ac_fn_c_check_type "$LINENO" "off_t" "ac_cv_type_off_t" "$ac_includes_default" +if test "x$ac_cv_type_off_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define off_t long int +_ACEOF + +fi + + + ac_fn_c_check_type "$LINENO" "intptr_t" "ac_cv_type_intptr_t" "$ac_includes_default" +if test "x$ac_cv_type_intptr_t" = xyes; then : + +$as_echo "#define HAVE_INTPTR_T 1" >>confdefs.h + +else + for ac_type in 'int' 'long int' 'long long int'; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +static int test_array [1 - 2 * !(sizeof (void *) <= sizeof ($ac_type))]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +cat >>confdefs.h <<_ACEOF +#define intptr_t $ac_type +_ACEOF + + ac_type= +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + test -z "$ac_type" && break + done +fi + + + + ac_fn_c_check_type "$LINENO" "uintptr_t" "ac_cv_type_uintptr_t" "$ac_includes_default" +if test "x$ac_cv_type_uintptr_t" = xyes; then : + +$as_echo "#define HAVE_UINTPTR_T 1" >>confdefs.h + +else + for ac_type in 'unsigned int' 'unsigned long int' \ + 'unsigned long long int'; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +static int test_array [1 - 2 * !(sizeof (void *) <= sizeof ($ac_type))]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +cat >>confdefs.h <<_ACEOF +#define uintptr_t $ac_type +_ACEOF + + ac_type= +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + test -z "$ac_type" && break + done +fi + + +ac_fn_c_check_type "$LINENO" "ino_t" "ac_cv_type_ino_t" "$ac_includes_default" +if test "x$ac_cv_type_ino_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define ino_t unsigned long +_ACEOF + +fi + +ac_fn_c_check_type "$LINENO" "dev_t" "ac_cv_type_dev_t" "$ac_includes_default" +if test "x$ac_cv_type_dev_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define dev_t unsigned long +_ACEOF + +fi + +ac_fn_c_check_type "$LINENO" "daddr_t" "ac_cv_type_daddr_t" "$ac_includes_default" +if test "x$ac_cv_type_daddr_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define daddr_t long +_ACEOF + +fi + +ac_fn_c_check_type "$LINENO" "major_t" "ac_cv_type_major_t" "$ac_includes_default" +if test "x$ac_cv_type_major_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define major_t int +_ACEOF + +fi + +ac_fn_c_check_type "$LINENO" "minor_t" "ac_cv_type_minor_t" "$ac_includes_default" +if test "x$ac_cv_type_minor_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define minor_t int +_ACEOF + +fi + +ac_fn_c_check_type "$LINENO" "ssize_t" "ac_cv_type_ssize_t" "$ac_includes_default" +if test "x$ac_cv_type_ssize_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define ssize_t int +_ACEOF + +fi + +ac_fn_c_check_member "$LINENO" "struct stat" "st_blocks" "ac_cv_member_struct_stat_st_blocks" "$ac_includes_default" +if test "x$ac_cv_member_struct_stat_st_blocks" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_STAT_ST_BLOCKS 1 +_ACEOF + + +$as_echo "#define HAVE_ST_BLOCKS 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" fileblocks.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS fileblocks.$ac_objext" + ;; +esac + +fi + + +ac_fn_c_check_member "$LINENO" "struct stat" "st_rdev" "ac_cv_member_struct_stat_st_rdev" "$ac_includes_default" +if test "x$ac_cv_member_struct_stat_st_rdev" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_STAT_ST_RDEV 1 +_ACEOF + + +$as_echo "#define HAVE_ST_RDEV 1" >>confdefs.h + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 +$as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } +if ${ac_cv_struct_tm+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include + +int +main () +{ +struct tm tm; + int *p = &tm.tm_sec; + return !p; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_struct_tm=time.h +else + ac_cv_struct_tm=sys/time.h +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 +$as_echo "$ac_cv_struct_tm" >&6; } +if test $ac_cv_struct_tm = sys/time.h; then + +$as_echo "#define TM_IN_SYS_TIME 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 +$as_echo_n "checking for an ANSI C-conforming const... " >&6; } +if ${ac_cv_c_const+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#ifndef __cplusplus + /* Ultrix mips cc rejects this sort of thing. */ + typedef int charset[2]; + const charset cs = { 0, 0 }; + /* SunOS 4.1.1 cc rejects this. */ + char const *const *pcpcc; + char **ppc; + /* NEC SVR4.0.2 mips cc rejects this. */ + struct point {int x, y;}; + static struct point const zero = {0,0}; + /* AIX XL C 1.02.0.0 rejects this. + It does not let you subtract one const X* pointer from another in + an arm of an if-expression whose if-part is not a constant + expression */ + const char *g = "string"; + pcpcc = &g + (g ? g-g : 0); + /* HPUX 7.0 cc rejects these. */ + ++pcpcc; + ppc = (char**) pcpcc; + pcpcc = (char const *const *) ppc; + { /* SCO 3.2v4 cc rejects this sort of thing. */ + char tx; + char *t = &tx; + char const *s = 0 ? (char *) 0 : (char const *) 0; + + *t++ = 0; + if (s) return 0; + } + { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ + int x[] = {25, 17}; + const int *foo = &x[0]; + ++foo; + } + { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ + typedef const int *iptr; + iptr p = 0; + ++p; + } + { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying + "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ + struct s { int j; const int *ap[3]; } bx; + struct s *b = &bx; b->j = 5; + } + { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ + const int foo = 10; + if (!foo) return 0; + } + return !cs[0] && !zero.x; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_const=yes +else + ac_cv_c_const=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 +$as_echo "$ac_cv_c_const" >&6; } +if test $ac_cv_c_const = no; then + +$as_echo "#define const /**/" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C/C++ restrict keyword" >&5 +$as_echo_n "checking for C/C++ restrict keyword... " >&6; } +if ${ac_cv_c_restrict+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_c_restrict=no + # The order here caters to the fact that C++ does not require restrict. + for ac_kw in __restrict __restrict__ _Restrict restrict; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +typedef int * int_ptr; + int foo (int_ptr $ac_kw ip) { + return ip[0]; + } +int +main () +{ +int s[1]; + int * $ac_kw t = s; + t[0] = 0; + return foo(t) + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_restrict=$ac_kw +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + test "$ac_cv_c_restrict" != no && break + done + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_restrict" >&5 +$as_echo "$ac_cv_c_restrict" >&6; } + + case $ac_cv_c_restrict in + restrict) ;; + no) $as_echo "#define restrict /**/" >>confdefs.h + ;; + *) cat >>confdefs.h <<_ACEOF +#define restrict $ac_cv_c_restrict +_ACEOF + ;; + esac + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of char" >&5 +$as_echo_n "checking size of char... " >&6; } +if ${ac_cv_sizeof_char+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (char))" "ac_cv_sizeof_char" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_char" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (char) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_char=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_char" >&5 +$as_echo "$ac_cv_sizeof_char" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_CHAR $ac_cv_sizeof_char +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of short int" >&5 +$as_echo_n "checking size of short int... " >&6; } +if ${ac_cv_sizeof_short_int+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (short int))" "ac_cv_sizeof_short_int" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_short_int" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (short int) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_short_int=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_short_int" >&5 +$as_echo "$ac_cv_sizeof_short_int" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_SHORT_INT $ac_cv_sizeof_short_int +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5 +$as_echo_n "checking size of int... " >&6; } +if ${ac_cv_sizeof_int+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_int" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (int) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_int=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5 +$as_echo "$ac_cv_sizeof_int" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_INT $ac_cv_sizeof_int +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long int" >&5 +$as_echo_n "checking size of long int... " >&6; } +if ${ac_cv_sizeof_long_int+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long int))" "ac_cv_sizeof_long_int" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_long_int" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (long int) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_long_int=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_int" >&5 +$as_echo "$ac_cv_sizeof_long_int" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_LONG_INT $ac_cv_sizeof_long_int +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long int" >&5 +$as_echo_n "checking size of long long int... " >&6; } +if ${ac_cv_sizeof_long_long_int+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long int))" "ac_cv_sizeof_long_long_int" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_long_long_int" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (long long int) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_long_long_int=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long_int" >&5 +$as_echo "$ac_cv_sizeof_long_long_int" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_LONG_LONG_INT $ac_cv_sizeof_long_long_int +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int *" >&5 +$as_echo_n "checking size of int *... " >&6; } +if ${ac_cv_sizeof_int_p+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int *))" "ac_cv_sizeof_int_p" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_int_p" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (int *) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_int_p=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int_p" >&5 +$as_echo "$ac_cv_sizeof_int_p" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_INT_P $ac_cv_sizeof_int_p +_ACEOF + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for u_int type" >&5 +$as_echo_n "checking for u_int type... " >&6; } +if ${ac_cv_have_u_int+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + u_int a; a = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_u_int="yes" + +else + + ac_cv_have_u_int="no" + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_u_int" >&5 +$as_echo "$ac_cv_have_u_int" >&6; } +if test "x$ac_cv_have_u_int" = "xyes" ; then + $as_echo "#define HAVE_U_INT 1" >>confdefs.h + + have_u_int=1 +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for intmax_t type" >&5 +$as_echo_n "checking for intmax_t type... " >&6; } +if ${ac_cv_have_intmax_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + intmax_t a; a = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_intmax_t="yes" + +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + intmax_t a; a = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_intmax_t="yes" + +else + + ac_cv_have_intmax_t="no" + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_intmax_t" >&5 +$as_echo "$ac_cv_have_intmax_t" >&6; } +if test "x$ac_cv_have_intmax_t" = "xyes" ; then + $as_echo "#define HAVE_INTMAX_T 1" >>confdefs.h + + have_intmax_t=1 +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for u_intmax_t type" >&5 +$as_echo_n "checking for u_intmax_t type... " >&6; } +if ${ac_cv_have_u_intmax_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + u_intmax_t a; a = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_u_intmax_t="yes" + +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + u_intmax_t a; a = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_u_intmax_t="yes" + +else + + ac_cv_have_u_intmax_t="no" + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_u_intmax_t" >&5 +$as_echo "$ac_cv_have_u_intmax_t" >&6; } +if test "x$ac_cv_have_u_intmax_t" = "xyes" ; then + $as_echo "#define HAVE_U_INTMAX_T 1" >>confdefs.h + + have_u_intmax_t=1 +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for intXX_t types" >&5 +$as_echo_n "checking for intXX_t types... " >&6; } +if ${ac_cv_have_intxx_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + int8_t a; int16_t b; int32_t c; a = b = c = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_intxx_t="yes" + +else + + ac_cv_have_intxx_t="no" + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_intxx_t" >&5 +$as_echo "$ac_cv_have_intxx_t" >&6; } +if test "x$ac_cv_have_intxx_t" = "xyes" ; then + $as_echo "#define HAVE_INTXX_T 1" >>confdefs.h + + have_intxx_t=1 +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for int64_t type" >&5 +$as_echo_n "checking for int64_t type... " >&6; } +if ${ac_cv_have_int64_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + int64_t a; a = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_int64_t="yes" + +else + + ac_cv_have_int64_t="no" + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_int64_t" >&5 +$as_echo "$ac_cv_have_int64_t" >&6; } +if test "x$ac_cv_have_int64_t" = "xyes" ; then + $as_echo "#define HAVE_INT64_T 1" >>confdefs.h + + have_int64_t=1 +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for u_intXX_t types" >&5 +$as_echo_n "checking for u_intXX_t types... " >&6; } +if ${ac_cv_have_u_intxx_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + u_int8_t a; u_int16_t b; u_int32_t c; a = b = c = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_u_intxx_t="yes" + +else + + ac_cv_have_u_intxx_t="no" + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_u_intxx_t" >&5 +$as_echo "$ac_cv_have_u_intxx_t" >&6; } +if test "x$ac_cv_have_u_intxx_t" = "xyes" ; then + $as_echo "#define HAVE_U_INTXX_T 1" >>confdefs.h + + have_u_intxx_t=1 +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for u_int64_t types" >&5 +$as_echo_n "checking for u_int64_t types... " >&6; } +if ${ac_cv_have_u_int64_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + u_int64_t a; a = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_u_int64_t="yes" + +else + + ac_cv_have_u_int64_t="no" + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_u_int64_t" >&5 +$as_echo "$ac_cv_have_u_int64_t" >&6; } +if test "x$ac_cv_have_u_int64_t" = "xyes" ; then + $as_echo "#define HAVE_U_INT64_T 1" >>confdefs.h + + have_u_int64_t=1 +fi + +if (test -z "$have_u_intxx_t" || test -z "$have_intxx_t" && \ + test "x$ac_cv_header_sys_bitypes_h" = "xyes") +then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for intXX_t and u_intXX_t types in sys/bitypes.h" >&5 +$as_echo_n "checking for intXX_t and u_intXX_t types in sys/bitypes.h... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + int8_t a; int16_t b; int32_t c; + u_int8_t e; u_int16_t f; u_int32_t g; + a = b = c = e = f = g = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + $as_echo "#define HAVE_U_INTXX_T 1" >>confdefs.h + + $as_echo "#define HAVE_INTXX_T 1" >>confdefs.h + + $as_echo "#define HAVE_SYS_BITYPES_H 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +if test -z "$have_u_intxx_t" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uintXX_t types" >&5 +$as_echo_n "checking for uintXX_t types... " >&6; } +if ${ac_cv_have_uintxx_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + uint8_t a; uint16_t b; + uint32_t c; a = b = c = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_have_uintxx_t="yes" + +else + + ac_cv_have_uintxx_t="no" + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_uintxx_t" >&5 +$as_echo "$ac_cv_have_uintxx_t" >&6; } + if test "x$ac_cv_have_uintxx_t" = "xyes" ; then + $as_echo "#define HAVE_UINTXX_T 1" >>confdefs.h + + fi +fi + +if (test -z "$have_u_int64_t" || test -z "$have_int64_t" && \ + test "x$ac_cv_header_sys_bitypes_h" = "xyes") +then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for int64_t and u_int64_t types in sys/bitypes.h" >&5 +$as_echo_n "checking for int64_t and u_int64_t types in sys/bitypes.h... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + int64_t a; u_int64_t b; + a = b = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + $as_echo "#define HAVE_U_INT64_T 1" >>confdefs.h + + $as_echo "#define HAVE_INT64_T 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +if (test -z "$have_uintxx_t" && \ + test "x$ac_cv_header_sys_bitypes_h" = "xyes") +then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uintXX_t types in sys/bitypes.h" >&5 +$as_echo_n "checking for uintXX_t types in sys/bitypes.h... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + uint8_t a; uint16_t b; + uint32_t c; a = b = c = 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + $as_echo "#define HAVE_UINTXX_T 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +for ac_func in \ + fork \ + getcwd \ + gethostname \ + getpid \ + gettimeofday \ + setpgid \ + setpgrp \ + setsid \ + signal \ + strerror \ + strncmp \ + strncpy \ + vfprintf \ + +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +else + echo 'configure: cannot find needed function.'; exit 1 + +fi +done + + +ac_fn_c_check_decl "$LINENO" "F_CLOSEM" "ac_cv_have_decl_F_CLOSEM" "#include + +" +if test "x$ac_cv_have_decl_F_CLOSEM" = xyes; then : + +$as_echo "#define HAVE_FCNTL_F_CLOSEM 1" >>confdefs.h + +fi + + +ac_fn_c_check_decl "$LINENO" "F_SETLK" "ac_cv_have_decl_F_SETLK" "#include + +" +if test "x$ac_cv_have_decl_F_SETLK" = xyes; then : + +$as_echo "#define HAVE_FCNTL_LOCK 1" >>confdefs.h + +fi + + +ac_fn_c_check_func "$LINENO" "closefrom" "ac_cv_func_closefrom" +if test "x$ac_cv_func_closefrom" = xyes; then : + +$as_echo "#define HAVE_CLOSEFROM 1" >>confdefs.h + +fi + +for ac_func in getpagesize +do : + ac_fn_c_check_func "$LINENO" "getpagesize" "ac_cv_func_getpagesize" +if test "x$ac_cv_func_getpagesize" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GETPAGESIZE 1 +_ACEOF + +$as_echo "#define HAVE_GETPAGESIZE 1" >>confdefs.h + +fi +done + +for ac_func in malloc_trim +do : + ac_fn_c_check_func "$LINENO" "malloc_trim" "ac_cv_func_malloc_trim" +if test "x$ac_cv_func_malloc_trim" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_MALLOC_TRIM 1 +_ACEOF + +$as_echo "#define HAVE_MALLOC_TRIM 1" >>confdefs.h + +fi +done + + +for ac_func in fchdir +do : + ac_fn_c_check_func "$LINENO" "fchdir" "ac_cv_func_fchdir" +if test "x$ac_cv_func_fchdir" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_FCHDIR 1 +_ACEOF + $as_echo "#define HAVE_FCHDIR 1" >>confdefs.h + +fi +done + +for ac_func in strtoll +do : + ac_fn_c_check_func "$LINENO" "strtoll" "ac_cv_func_strtoll" +if test "x$ac_cv_func_strtoll" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_STRTOLL 1 +_ACEOF + $as_echo "#define HAVE_STRTOLL 1" >>confdefs.h + +fi +done + +for ac_func in posix_fadvise +do : + ac_fn_c_check_func "$LINENO" "posix_fadvise" "ac_cv_func_posix_fadvise" +if test "x$ac_cv_func_posix_fadvise" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_POSIX_FADVISE 1 +_ACEOF + +fi +done + +for ac_func in posix_fallocate +do : + ac_fn_c_check_func "$LINENO" "posix_fallocate" "ac_cv_func_posix_fallocate" +if test "x$ac_cv_func_posix_fallocate" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_POSIX_FALLOCATE 1 +_ACEOF + +fi +done + +for ac_func in fdatasync +do : + ac_fn_c_check_func "$LINENO" "fdatasync" "ac_cv_func_fdatasync" +if test "x$ac_cv_func_fdatasync" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_FDATASYNC 1 +_ACEOF + +fi +done + +for ac_func in realpath +do : + ac_fn_c_check_func "$LINENO" "realpath" "ac_cv_func_realpath" +if test "x$ac_cv_func_realpath" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_REALPATH 1 +_ACEOF + +fi +done + +for ac_func in getrlimit +do : + ac_fn_c_check_func "$LINENO" "getrlimit" "ac_cv_func_getrlimit" +if test "x$ac_cv_func_getrlimit" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GETRLIMIT 1 +_ACEOF + +fi +done + + +for ac_func in chflags +do : + ac_fn_c_check_func "$LINENO" "chflags" "ac_cv_func_chflags" +if test "x$ac_cv_func_chflags" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_CHFLAGS 1 +_ACEOF + +fi +done + + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int f(int b) { + return __builtin_bswap32(b); +} + +int +main () +{ +int a = f(10); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + +$as_echo "#define HAVE_BSWAP32 1" >>confdefs.h + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +for ac_func in snprintf vsnprintf gethostid fseeko +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for va_copy" >&5 +$as_echo_n "checking for va_copy... " >&6; } +if ${ba_cv_va_copy+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + void use_va_copy(va_list args){va_list args2; va_copy(args2,args); va_end(args2);} + void call_use_va_copy(int junk,...){va_list args; va_start(args,junk); use_va_copy(args); va_end(args);} + +int +main () +{ + + call_use_va_copy(1,2,3) + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + ba_cv_va_copy=yes + +else + + ba_cv_va_copy=no + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_va_copy" >&5 +$as_echo "$ba_cv_va_copy" >&6; } +test $ba_cv_va_copy = yes && +$as_echo "#define HAVE_VA_COPY 1" >>confdefs.h + + +for ac_func in localtime_r readdir_r strerror_r gethostbyname_r +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + +# If resolver functions are not in libc check for -lnsl or -lresolv. +ac_fn_c_check_func "$LINENO" "gethostbyname_r" "ac_cv_func_gethostbyname_r" +if test "x$ac_cv_func_gethostbyname_r" = xyes; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: using libc's resolver" >&5 +$as_echo "using libc's resolver" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname_r in -lnsl" >&5 +$as_echo_n "checking for gethostbyname_r in -lnsl... " >&6; } +if ${ac_cv_lib_nsl_gethostbyname_r+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lnsl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char gethostbyname_r (); +int +main () +{ +return gethostbyname_r (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_nsl_gethostbyname_r=yes +else + ac_cv_lib_nsl_gethostbyname_r=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nsl_gethostbyname_r" >&5 +$as_echo "$ac_cv_lib_nsl_gethostbyname_r" >&6; } +if test "x$ac_cv_lib_nsl_gethostbyname_r" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBNSL 1 +_ACEOF + + LIBS="-lnsl $LIBS" + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname_r in -lresolv" >&5 +$as_echo_n "checking for gethostbyname_r in -lresolv... " >&6; } +if ${ac_cv_lib_resolv_gethostbyname_r+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lresolv $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char gethostbyname_r (); +int +main () +{ +return gethostbyname_r (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_resolv_gethostbyname_r=yes +else + ac_cv_lib_resolv_gethostbyname_r=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_resolv_gethostbyname_r" >&5 +$as_echo "$ac_cv_lib_resolv_gethostbyname_r" >&6; } +if test "x$ac_cv_lib_resolv_gethostbyname_r" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBRESOLV 1 +_ACEOF + + LIBS="-lresolv $LIBS" + +fi + +fi + + +for ac_func in inet_pton +do : + ac_fn_c_check_func "$LINENO" "inet_pton" "ac_cv_func_inet_pton" +if test "x$ac_cv_func_inet_pton" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_INET_PTON 1 +_ACEOF + $as_echo "#define HAVE_INET_PTON 1" >>confdefs.h + +fi +done + +for ac_func in inet_ntop +do : + ac_fn_c_check_func "$LINENO" "inet_ntop" "ac_cv_func_inet_ntop" +if test "x$ac_cv_func_inet_ntop" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_INET_NTOP 1 +_ACEOF + $as_echo "#define HAVE_INET_NTOP 1" >>confdefs.h + +fi +done + +for ac_func in gethostbyname2 +do : + ac_fn_c_check_func "$LINENO" "gethostbyname2" "ac_cv_func_gethostbyname2" +if test "x$ac_cv_func_gethostbyname2" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GETHOSTBYNAME2 1 +_ACEOF + $as_echo "#define HAVE_GETHOSTBYNAME2 1" >>confdefs.h + +fi +done + +for ac_func in getnameinfo +do : + ac_fn_c_check_func "$LINENO" "getnameinfo" "ac_cv_func_getnameinfo" +if test "x$ac_cv_func_getnameinfo" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GETNAMEINFO 1 +_ACEOF + $as_echo "#define HAVE_GETNAMEINFO 1" >>confdefs.h + +fi +done + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for struct sockaddr has a sa_len field" >&5 +$as_echo_n "checking for struct sockaddr has a sa_len field... " >&6; } +if ${ac_cv_struct_sockaddr_sa_len+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + struct sockaddr s; s.sa_len; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + ac_cv_struct_sockaddr_sa_len=yes + +else + ac_cv_struct_sockaddr_sa_len=no + + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_sockaddr_sa_len" >&5 +$as_echo "$ac_cv_struct_sockaddr_sa_len" >&6; } + +if test $ac_cv_struct_sockaddr_sa_len = yes; then + +$as_echo "#define HAVE_SA_LEN 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working getaddrinfo" >&5 +$as_echo_n "checking for working getaddrinfo... " >&6; } +if ${ac_cv_working_getaddrinfo+:} false; then : + $as_echo_n "(cached) " >&6 +else + + if test "$cross_compiling" = yes; then : + + ac_cv_working_getaddrinfo="yes" + + +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + #include + #include + + void main(void) { + struct addrinfo hints, *ai; + int error; + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + error = getaddrinfo("127.0.0.1", NULL, &hints, &ai); + if (error) { + exit(1); + } + if (ai->ai_addr->sa_family != AF_INET) { + exit(1); + } + exit(0); + } + +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + + ac_cv_working_getaddrinfo="yes" + +else + + ac_cv_working_getaddrinfo="no" + +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_getaddrinfo" >&5 +$as_echo "$ac_cv_working_getaddrinfo" >&6; } +ac_fn_c_check_func "$LINENO" "gai_strerror" "ac_cv_func_gai_strerror" +if test "x$ac_cv_func_gai_strerror" = xyes; then : + +$as_echo "#define HAVE_GAI_STRERROR 1" >>confdefs.h + +fi + + +if test "$ac_cv_working_getaddrinfo" = "yes"; then + if test "$ac_cv_func_gai_strerror" != "yes"; then + ac_cv_working_getaddrinfo="no" + else + +$as_echo "#define HAVE_GETADDRINFO 1" >>confdefs.h + + fi +fi + +for ac_func in strftime +do : + ac_fn_c_check_func "$LINENO" "strftime" "ac_cv_func_strftime" +if test "x$ac_cv_func_strftime" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_STRFTIME 1 +_ACEOF + +else + # strftime is in -lintl on SCO UNIX. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for strftime in -lintl" >&5 +$as_echo_n "checking for strftime in -lintl... " >&6; } +if ${ac_cv_lib_intl_strftime+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lintl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char strftime (); +int +main () +{ +return strftime (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_intl_strftime=yes +else + ac_cv_lib_intl_strftime=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_intl_strftime" >&5 +$as_echo "$ac_cv_lib_intl_strftime" >&6; } +if test "x$ac_cv_lib_intl_strftime" = xyes; then : + $as_echo "#define HAVE_STRFTIME 1" >>confdefs.h + +LIBS="-lintl $LIBS" +fi + +fi +done + +for ac_func in vprintf +do : + ac_fn_c_check_func "$LINENO" "vprintf" "ac_cv_func_vprintf" +if test "x$ac_cv_func_vprintf" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_VPRINTF 1 +_ACEOF + +ac_fn_c_check_func "$LINENO" "_doprnt" "ac_cv_func__doprnt" +if test "x$ac_cv_func__doprnt" = xyes; then : + +$as_echo "#define HAVE_DOPRNT 1" >>confdefs.h + +fi + +fi +done + + +# The Ultrix 4.2 mips builtin alloca declared by alloca.h only works +# for constant arguments. Useless! +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5 +$as_echo_n "checking for working alloca.h... " >&6; } +if ${ac_cv_working_alloca_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +char *p = (char *) alloca (2 * sizeof (int)); + if (p) return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_working_alloca_h=yes +else + ac_cv_working_alloca_h=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_alloca_h" >&5 +$as_echo "$ac_cv_working_alloca_h" >&6; } +if test $ac_cv_working_alloca_h = yes; then + +$as_echo "#define HAVE_ALLOCA_H 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for alloca" >&5 +$as_echo_n "checking for alloca... " >&6; } +if ${ac_cv_func_alloca_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __GNUC__ +# define alloca __builtin_alloca +#else +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef HAVE_ALLOCA_H +# include +# else +# ifdef _AIX + #pragma alloca +# else +# ifndef alloca /* predefined by HP cc +Olibcalls */ +void *alloca (size_t); +# endif +# endif +# endif +# endif +#endif + +int +main () +{ +char *p = (char *) alloca (1); + if (p) return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_func_alloca_works=yes +else + ac_cv_func_alloca_works=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_alloca_works" >&5 +$as_echo "$ac_cv_func_alloca_works" >&6; } + +if test $ac_cv_func_alloca_works = yes; then + +$as_echo "#define HAVE_ALLOCA 1" >>confdefs.h + +else + # The SVR3 libPW and SVR4 libucb both contain incompatible functions +# that cause trouble. Some versions do not even contain alloca or +# contain a buggy version. If you still want to use their alloca, +# use ar to extract alloca.o from them instead of compiling alloca.c. + +ALLOCA=\${LIBOBJDIR}alloca.$ac_objext + +$as_echo "#define C_ALLOCA 1" >>confdefs.h + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether \`alloca.c' needs Cray hooks" >&5 +$as_echo_n "checking whether \`alloca.c' needs Cray hooks... " >&6; } +if ${ac_cv_os_cray+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#if defined CRAY && ! defined CRAY2 +webecray +#else +wenotbecray +#endif + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "webecray" >/dev/null 2>&1; then : + ac_cv_os_cray=yes +else + ac_cv_os_cray=no +fi +rm -f conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_os_cray" >&5 +$as_echo "$ac_cv_os_cray" >&6; } +if test $ac_cv_os_cray = yes; then + for ac_func in _getb67 GETB67 getb67; do + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + +cat >>confdefs.h <<_ACEOF +#define CRAY_STACKSEG_END $ac_func +_ACEOF + + break +fi + + done +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking stack direction for C alloca" >&5 +$as_echo_n "checking stack direction for C alloca... " >&6; } +if ${ac_cv_c_stack_direction+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + ac_cv_c_stack_direction=0 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +find_stack_direction (int *addr, int depth) +{ + int dir, dummy = 0; + if (! addr) + addr = &dummy; + *addr = addr < &dummy ? 1 : addr == &dummy ? 0 : -1; + dir = depth ? find_stack_direction (addr, depth - 1) : 0; + return dir + dummy; +} + +int +main (int argc, char **argv) +{ + return find_stack_direction (0, argc + !argv + 20) < 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_c_stack_direction=1 +else + ac_cv_c_stack_direction=-1 +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_stack_direction" >&5 +$as_echo "$ac_cv_c_stack_direction" >&6; } +cat >>confdefs.h <<_ACEOF +#define STACK_DIRECTION $ac_cv_c_stack_direction +_ACEOF + + +fi + +# getmntent is in the standard C library on UNICOS, in -lsun on Irix 4, +# -lseq on Dynix/PTX, -lgen on Unixware. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing getmntent" >&5 +$as_echo_n "checking for library containing getmntent... " >&6; } +if ${ac_cv_search_getmntent+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char getmntent (); +int +main () +{ +return getmntent (); + ; + return 0; +} +_ACEOF +for ac_lib in '' sun seq gen; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_getmntent=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_getmntent+:} false; then : + break +fi +done +if ${ac_cv_search_getmntent+:} false; then : + +else + ac_cv_search_getmntent=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_getmntent" >&5 +$as_echo "$ac_cv_search_getmntent" >&6; } +ac_res=$ac_cv_search_getmntent +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + ac_cv_func_getmntent=yes + +$as_echo "#define HAVE_GETMNTENT 1" >>confdefs.h + +else + ac_cv_func_getmntent=no +fi + + +for ac_func in getmntinfo +do : + ac_fn_c_check_func "$LINENO" "getmntinfo" "ac_cv_func_getmntinfo" +if test "x$ac_cv_func_getmntinfo" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GETMNTINFO 1 +_ACEOF + $as_echo "#define HAVE_GETMNTINFO 1" >>confdefs.h + +fi +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether closedir returns void" >&5 +$as_echo_n "checking whether closedir returns void... " >&6; } +if ${ac_cv_func_closedir_void+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + ac_cv_func_closedir_void=yes +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header_dirent> +#ifndef __cplusplus +int closedir (); +#endif + +int +main () +{ +return closedir (opendir (".")) != 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_func_closedir_void=no +else + ac_cv_func_closedir_void=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_closedir_void" >&5 +$as_echo "$ac_cv_func_closedir_void" >&6; } +if test $ac_cv_func_closedir_void = yes; then + +$as_echo "#define CLOSEDIR_VOID 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether setpgrp takes no argument" >&5 +$as_echo_n "checking whether setpgrp takes no argument... " >&6; } +if ${ac_cv_func_setpgrp_void+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + as_fn_error $? "cannot check setpgrp when cross compiling" "$LINENO" 5 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +/* If this system has a BSD-style setpgrp which takes arguments, + setpgrp(1, 1) will fail with ESRCH and return -1, in that case + exit successfully. */ + return setpgrp (1,1) != -1; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_func_setpgrp_void=no +else + ac_cv_func_setpgrp_void=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_setpgrp_void" >&5 +$as_echo "$ac_cv_func_setpgrp_void" >&6; } +if test $ac_cv_func_setpgrp_void = yes; then + +$as_echo "#define SETPGRP_VOID 1" >>confdefs.h + +fi + # AC_FUNC_FNMATCH dnl use local version + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for gettext in -lintl" >&5 +$as_echo_n "checking for gettext in -lintl... " >&6; } +if ${ac_cv_lib_intl_gettext+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lintl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char gettext (); +int +main () +{ +return gettext (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_intl_gettext=yes +else + ac_cv_lib_intl_gettext=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_intl_gettext" >&5 +$as_echo "$ac_cv_lib_intl_gettext" >&6; } +if test "x$ac_cv_lib_intl_gettext" = xyes; then : + LIBS="$LIBS -lintl" +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for getpwnam in -lsun" >&5 +$as_echo_n "checking for getpwnam in -lsun... " >&6; } +if ${ac_cv_lib_sun_getpwnam+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsun $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char getpwnam (); +int +main () +{ +return getpwnam (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_sun_getpwnam=yes +else + ac_cv_lib_sun_getpwnam=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_sun_getpwnam" >&5 +$as_echo "$ac_cv_lib_sun_getpwnam" >&6; } +if test "x$ac_cv_lib_sun_getpwnam" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBSUN 1 +_ACEOF + + LIBS="-lsun $LIBS" + +fi + + +for ac_header in zlib.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" +if test "x$ac_cv_header_zlib_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_ZLIB_H 1 +_ACEOF + +fi + +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for deflate in -lz" >&5 +$as_echo_n "checking for deflate in -lz... " >&6; } +if ${ac_cv_lib_z_deflate+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lz $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char deflate (); +int +main () +{ +return deflate (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_z_deflate=yes +else + ac_cv_lib_z_deflate=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_deflate" >&5 +$as_echo "$ac_cv_lib_z_deflate" >&6; } +if test "x$ac_cv_lib_z_deflate" = xyes; then : + ZLIBS="-lz" +fi + +have_zlib=no +if test x$ZLIBS = x-lz; then + $as_echo "#define HAVE_LIBZ 1" >>confdefs.h + + have_zlib=yes +fi + + +AFS_CFLAGS="" +AFS_LIBS="" +support_afs=auto +# Check whether --enable-afs was given. +if test "${enable_afs+set}" = set; then : + enableval=$enable_afs; + if test x$enableval = xyes; then + support_afs=yes + elif test x$enableval = xno; then + support_afs=no + fi + + +fi + + +have_afs=no +if test x$support_afs = xyes -o x$support_afs = xauto; then + +# Check whether --with-afsdir was given. +if test "${with_afsdir+set}" = set; then : + withval=$with_afsdir; with_afsdir=$withval + +fi + + + if test x$with_afsdir = x; then + for root in /usr /usr/local; do + if test -d ${root}/include/afs/ ; then + with_afsdir=${root} + break + fi + if test -d ${root}/include/openafs/afs/ ; then + with_afsdir=${root} + break + fi + done + fi + + if test -d ${with_afsdir}/include/afs/ ; then + AFS_CFLAGS="-I${with_afsdir}/include" + else + if test -d ${with_afsdir}/include/openafs/afs/ ; then + AFS_CFLAGS="-I${with_afsdir}/include/openafs" + fi + fi + + saved_CFLAGS="${CFLAGS}" + saved_CPPFLAGS="${CPPFLAGS}" + CFLAGS="${AFS_CFLAGS} ${saved_CFLAGS}" + CPPFLAGS="${AFS_CFLAGS} ${saved_CPPFLAGS}" + + for ac_header in afs/afsint.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "afs/afsint.h" "ac_cv_header_afs_afsint_h" "$ac_includes_default" +if test "x$ac_cv_header_afs_afsint_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_AFS_AFSINT_H 1 +_ACEOF + +fi + +done + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +$as_echo "#define HAVE_AFS_VENUS_H 1" >>confdefs.h + + +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + CFLAGS="${saved_CFLAGS}" + CPPFLAGS="${saved_CPPFLAGS}" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pioctl in AFS libsys" >&5 +$as_echo_n "checking for pioctl in AFS libsys... " >&6; } + for dir in ${with_afsdir}/lib \ + ${with_afsdir}/lib/afs \ + ${with_afsdir}/lib/openafs \ + ${with_afsdir}/lib64 \ + ${with_afsdir}/lib64/afs \ + ${with_afsdir}/lib64/openafs + do + for arch_type in .a .so + do + A=`test -f ${dir}/libsys${arch_type} && nm ${dir}/libsys${arch_type} 2>/dev/null | grep pioctl` + pkg=$? + if test $pkg = 0; then + have_afs=yes + AFS_LIBS="-L${dir} -lsys -lrx -llwp ${dir}/util${arch_type}" + break + fi + done + done + + if test $have_afs = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + + if test x$support_afs = xyes -a $have_afs != yes; then + as_fn_error $? "afs support explicitly enabled but no supported afs implementation found, + please either load the afs libraries or rerun configure without --enable-afs" "$LINENO" 5 + else + if test $have_afs = yes; then + +$as_echo "#define HAVE_AFS 1" >>confdefs.h + + +$as_echo "#define HAVE_AFS_ACL 1" >>confdefs.h + + fi + fi +fi + + + + +# Check whether --enable-lzo was given. +if test "${enable_lzo+set}" = set; then : + enableval=$enable_lzo; + if test x$enableval = xno; then + support_lzo=no + fi + + +fi + + +LZO_INC= +LZO_LIBS= +LZO_LDFLAGS= + +have_lzo="no" +if test x$support_lzo = xyes; then + +# Check whether --with-lzo was given. +if test "${with_lzo+set}" = set; then : + withval=$with_lzo; + case "$with_lzo" in + no) + : + ;; + yes|*) + if test -f ${with_lzo}/include/lzo/lzoconf.h; then + LZO_INC="-I${with_lzo}/include" + LZO_LDFLAGS="-L${with_lzo}/lib" + with_lzo="${with_lzo}/include" + else + with_lzo="/usr/include" + fi + + as_ac_Header=`$as_echo "ac_cv_header_${with_lzo}/lzo/lzoconf.h" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "${with_lzo}/lzo/lzoconf.h" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + + +$as_echo "#define HAVE_LZO 1" >>confdefs.h + + LZO_LIBS="${LZO_LDFLAGS} -llzo2" + have_lzo="yes" + +else + + echo " " + echo "lzoconf.h not found. lzo turned off ..." + echo " " + + +fi + + + ;; + esac + +else + + ac_fn_c_check_header_mongrel "$LINENO" "lzo/lzoconf.h" "ac_cv_header_lzo_lzoconf_h" "$ac_includes_default" +if test "x$ac_cv_header_lzo_lzoconf_h" = xyes; then : + + ac_fn_c_check_header_mongrel "$LINENO" "lzo/lzo1x.h" "ac_cv_header_lzo_lzo1x_h" "$ac_includes_default" +if test "x$ac_cv_header_lzo_lzo1x_h" = xyes; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for lzo1x_1_compress in -llzo2" >&5 +$as_echo_n "checking for lzo1x_1_compress in -llzo2... " >&6; } +if ${ac_cv_lib_lzo2_lzo1x_1_compress+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-llzo2 $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char lzo1x_1_compress (); +int +main () +{ +return lzo1x_1_compress (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_lzo2_lzo1x_1_compress=yes +else + ac_cv_lib_lzo2_lzo1x_1_compress=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lzo2_lzo1x_1_compress" >&5 +$as_echo "$ac_cv_lib_lzo2_lzo1x_1_compress" >&6; } +if test "x$ac_cv_lib_lzo2_lzo1x_1_compress" = xyes; then : + + LZO_LIBS="-llzo2" + +$as_echo "#define HAVE_LZO 1" >>confdefs.h + + have_lzo=yes + +fi + + +fi + + + +fi + + + +fi + +fi + + + + + +support_acl=auto +# Check whether --enable-acl was given. +if test "${enable_acl+set}" = set; then : + enableval=$enable_acl; + if test x$enableval = xyes; then + support_acl=yes + elif test x$enableval = xno; then + support_acl=no + fi + + +fi + + +have_acl=no +have_extended_acl=no +if test x$support_acl = xyes -o x$support_acl = xauto; then + ac_fn_c_check_header_mongrel "$LINENO" "sys/acl.h" "ac_cv_header_sys_acl_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_acl_h" = xyes; then : + +$as_echo "#define HAVE_SYS_ACL_H 1" >>confdefs.h + +fi + + + + ac_fn_c_check_func "$LINENO" "acl_get_file" "ac_cv_func_acl_get_file" +if test "x$ac_cv_func_acl_get_file" = xyes; then : + + have_acl=yes + +fi + + + if test $have_acl = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for acl_get_file in -lacl" >&5 +$as_echo_n "checking for acl_get_file in -lacl... " >&6; } +if ${ac_cv_lib_acl_acl_get_file+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lacl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char acl_get_file (); +int +main () +{ +return acl_get_file (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_acl_acl_get_file=yes +else + ac_cv_lib_acl_acl_get_file=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_acl_acl_get_file" >&5 +$as_echo "$ac_cv_lib_acl_acl_get_file" >&6; } +if test "x$ac_cv_lib_acl_acl_get_file" = xyes; then : + + have_acl=yes + if test $have_afs = yes; then + if test -d /usr/lib64/; then + FDLIBS="-L/usr/lib64 -lacl $FDLIBS" + else + FDLIBS="-L/usr/lib -lacl $FDLIBS" + fi + else + FDLIBS="-lacl $FDLIBS" + fi + +fi + + fi + + if test $have_acl = no -a x${HAVE_OSF1_OS_TRUE} = x; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for acl_get_file in -lpacl" >&5 +$as_echo_n "checking for acl_get_file in -lpacl... " >&6; } +if ${ac_cv_lib_pacl_acl_get_file+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpacl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char acl_get_file (); +int +main () +{ +return acl_get_file (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_pacl_acl_get_file=yes +else + ac_cv_lib_pacl_acl_get_file=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pacl_acl_get_file" >&5 +$as_echo "$ac_cv_lib_pacl_acl_get_file" >&6; } +if test "x$ac_cv_lib_pacl_acl_get_file" = xyes; then : + + have_acl=yes + FDLIBS="-lpacl $FDLIBS" + +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ACL_TYPE_DEFAULT_DIR in acl.h include file" >&5 +$as_echo_n "checking for ACL_TYPE_DEFAULT_DIR in acl.h include file... " >&6; } + grep ACL_TYPE_DEFAULT_DIR /usr/include/sys/acl.h > /dev/null 2>&1 + if test $? = 0; then + +$as_echo "#define HAVE_ACL_TYPE_DEFAULT_DIR 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + fi + + if test $have_acl = yes -a x${HAVE_DARWIN_OS_TRUE} = x; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ACL_TYPE_EXTENDED in acl.h include file" >&5 +$as_echo_n "checking for ACL_TYPE_EXTENDED in acl.h include file... " >&6; } + grep ACL_TYPE_EXTENDED /usr/include/sys/acl.h > /dev/null 2>&1 + if test $? = 0; then + +$as_echo "#define HAVE_ACL_TYPE_EXTENDED 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + fi + + if test $have_acl = yes -a \ + x${HAVE_FREEBSD_OS_TRUE} = x; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ACL_TYPE_NFS4 in acl.h include file" >&5 +$as_echo_n "checking for ACL_TYPE_NFS4 in acl.h include file... " >&6; } + grep ACL_TYPE_NFS4 /usr/include/sys/acl.h > /dev/null 2>&1 + if test $? = 0; then + +$as_echo "#define HAVE_ACL_TYPE_NFS4 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + fi + + if test $have_acl = no -a \ + x${HAVE_SUN_OS_TRUE} = x; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for acltotext in -lsec" >&5 +$as_echo_n "checking for acltotext in -lsec... " >&6; } +if ${ac_cv_lib_sec_acltotext+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsec $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char acltotext (); +int +main () +{ +return acltotext (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_sec_acltotext=yes +else + ac_cv_lib_sec_acltotext=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_sec_acltotext" >&5 +$as_echo "$ac_cv_lib_sec_acltotext" >&6; } +if test "x$ac_cv_lib_sec_acltotext" = xyes; then : + + have_acl=yes + FDLIBS="-lsec $FDLIBS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for acl_totext in -lsec" >&5 +$as_echo_n "checking for acl_totext in -lsec... " >&6; } +if ${ac_cv_lib_sec_acl_totext+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsec $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char acl_totext (); +int +main () +{ +return acl_totext (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_sec_acl_totext=yes +else + ac_cv_lib_sec_acl_totext=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_sec_acl_totext" >&5 +$as_echo "$ac_cv_lib_sec_acl_totext" >&6; } +if test "x$ac_cv_lib_sec_acl_totext" = xyes; then : + + have_extended_acl=yes + + +fi + + + +fi + + fi + + if test $have_acl = no -a \ + x${HAVE_AIX_OS_TRUE} = x; then + ac_fn_c_check_func "$LINENO" "acl_get" "ac_cv_func_acl_get" +if test "x$ac_cv_func_acl_get" = xyes; then : + + have_acl=yes + + ac_fn_c_check_func "$LINENO" "aclx_get" "ac_cv_func_aclx_get" +if test "x$ac_cv_func_aclx_get" = xyes; then : + + have_extended_acl=yes + + +fi + + + +fi + + fi + +ACLOBJS= + if test x$support_acl = xyes -a $have_acl != yes; then + as_fn_error $? "acl support explicitly enabled but no supported acl implementation found, + please either load the acl libraries or rerun configure without --enable-acl" "$LINENO" 5 + else + if test $have_acl = yes; then + +$as_echo "#define HAVE_ACL 1" >>confdefs.h + + if test x${HAVE_LINUX_OS_TRUE} = x; then + ACLOBJS="bacl.c bacl_linux.c" + fi + if test x${HAVE_SUN_OS_TRUE} = x; then + ACLOBJS="bacl.c bacl_solaris.c" + fi + if test x${HAVE_FREEBSD_OS_TRUE} = x; then + ACLOBJS="bacl.c bacl_freebsd.c" + fi + if test x${HAVE_DARWIN_OS_TRUE} = x; then + ACLOBJS="bacl.c bacl_osx.c" + fi + fi + if test $have_extended_acl = yes; then + +$as_echo "#define HAVE_EXTENDED_ACL 1" >>confdefs.h + + fi + fi +fi + + +support_xattr=auto +# Check whether --enable-xattr was given. +if test "${enable_xattr+set}" = set; then : + enableval=$enable_xattr; + if test x$enableval = xyes; then + support_xattr=yes + elif test x$enableval = xno; then + support_xattr=no + fi + + +fi + + +have_xattr=no +if test x$support_xattr = xyes -o x$support_xattr = xauto; then + if test x${HAVE_FREEBSD_OS_TRUE} = x -o \ + x${HAVE_NETBSD_OS_TRUE} = x -o \ + x${HAVE_OPENBSD_OS_TRUE} = x; then + ac_fn_c_check_header_mongrel "$LINENO" "sys/extattr.h" "ac_cv_header_sys_extattr_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_extattr_h" = xyes; then : + +$as_echo "#define HAVE_SYS_EXTATTR_H 1" >>confdefs.h + +fi + + + ac_fn_c_check_header_mongrel "$LINENO" "libutil.h" "ac_cv_header_libutil_h" "$ac_includes_default" +if test "x$ac_cv_header_libutil_h" = xyes; then : + +$as_echo "#define HAVE_LIBUTIL_H 1" >>confdefs.h + +fi + + + for ac_func in extattr_get_link extattr_set_link extattr_list_link +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + + have_xattr=yes + +$as_echo "#define HAVE_EXTATTR_GET_LINK 1" >>confdefs.h + + +$as_echo "#define HAVE_EXTATTR_SET_LINK 1" >>confdefs.h + + +$as_echo "#define HAVE_EXTATTR_LIST_LINK 1" >>confdefs.h + + + +fi +done + + + if test $have_xattr = no; then + for ac_func in extattr_get_file extattr_set_file extattr_list_file +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + + have_xattr=yes + +$as_echo "#define HAVE_EXTATTR_GET_FILE 1" >>confdefs.h + + +$as_echo "#define HAVE_EXTATTR_SET_FILE 1" >>confdefs.h + + +$as_echo "#define HAVE_EXTATTR_LIST_FILE 1" >>confdefs.h + + + +fi +done + + fi + + if test $have_xattr = yes; then + have_extattr_string_in_libc=no + for ac_func in extattr_namespace_to_string extattr_string_to_namespace +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + + have_extattr_string_in_libc=yes + +$as_echo "#define HAVE_EXTATTR_NAMESPACE_TO_STRING 1" >>confdefs.h + + +$as_echo "#define HAVE_EXTATTR_STRING_TO_NAMESPACE 1" >>confdefs.h + + + +fi +done + + + if test $have_extattr_string_in_libc = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for extattr_namespace_to_string extattr_string_to_namespace in -lutil" >&5 +$as_echo_n "checking for extattr_namespace_to_string extattr_string_to_namespace in -lutil... " >&6; } +if ${ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lutil $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char extattr_namespace_to_string extattr_string_to_namespace (); +int +main () +{ +return extattr_namespace_to_string extattr_string_to_namespace (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace=yes +else + ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace" >&5 +$as_echo "$ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace" >&6; } +if test "x$ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace" = xyes; then : + + +$as_echo "#define HAVE_EXTATTR_NAMESPACE_TO_STRING 1" >>confdefs.h + + +$as_echo "#define HAVE_EXTATTR_STRING_TO_NAMESPACE 1" >>confdefs.h + + FDLIBS="-lutil $FDLIBS" + + +fi + + fi + fi + fi + + if test $have_xattr = no -a \ + x${HAVE_AIX_OS_TRUE} = x; then + ac_fn_c_check_header_mongrel "$LINENO" "sys/ea.h" "ac_cv_header_sys_ea_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_ea_h" = xyes; then : + +$as_echo "#define HAVE_SYS_EA_H 1" >>confdefs.h + +fi + + + for ac_func in llistea lgetea lsetea +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + + have_xattr=yes + +$as_echo "#define HAVE_LLISTEA 1" >>confdefs.h + + +$as_echo "#define HAVE_LGETEA 1" >>confdefs.h + + +$as_echo "#define HAVE_LSETEA 1" >>confdefs.h + + + +fi +done + + + if test $have_xattr = no; then + for ac_func in listea getea setea +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + + have_xattr=yes + +$as_echo "#define HAVE_LISTEA 1" >>confdefs.h + + +$as_echo "#define HAVE_GETEA 1" >>confdefs.h + + +$as_echo "#define HAVE_SETEA 1" >>confdefs.h + + + +fi +done + + fi + fi + + if test $have_xattr = no -a \ + x${HAVE_OSF1_OS_TRUE} = x; then + ac_fn_c_check_header_mongrel "$LINENO" "sys/proplist.h" "ac_cv_header_sys_proplist_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_proplist_h" = xyes; then : + +$as_echo "#define HAVE_SYS_PROPLIST_H 1" >>confdefs.h + +fi + + + for ac_func in getproplist get_proplist_entry sizeof_proplist_entry add_proplist_entry setproplist +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + + have_xattr=yes + +$as_echo "#define HAVE_GETPROPLIST 1" >>confdefs.h + + +$as_echo "#define HAVE_GET_PROPLIST_ENTRY 1" >>confdefs.h + + +$as_echo "#define HAVE_SIZEOF_PROPLIST_ENTRY 1" >>confdefs.h + + +$as_echo "#define HAVE_ADD_PROPLIST_ENTRY 1" >>confdefs.h + + +$as_echo "#define HAVE_SETPROPLIST 1" >>confdefs.h + + + +fi +done + + fi + + if test $have_xattr = no -a \ + x${HAVE_SUN_OS_TRUE} = x; then + ac_fn_c_check_header_mongrel "$LINENO" "sys/attr.h" "ac_cv_header_sys_attr_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_attr_h" = xyes; then : + +$as_echo "#define HAVE_SYS_ATTR_H 1" >>confdefs.h + +fi + + + ac_fn_c_check_header_mongrel "$LINENO" "sys/nvpair.h" "ac_cv_header_sys_nvpair_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_nvpair_h" = xyes; then : + +$as_echo "#define HAVE_SYS_NVPAIR_H 1" >>confdefs.h + +fi + + + ac_fn_c_check_header_mongrel "$LINENO" "attr.h" "ac_cv_header_attr_h" "$ac_includes_default" +if test "x$ac_cv_header_attr_h" = xyes; then : + +$as_echo "#define HAVE_ATTR_H 1" >>confdefs.h + +fi + + + + for ac_func in openat attropen unlinkat fchownat futimesat linkat +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + + have_xattr=yes + +$as_echo "#define HAVE_OPENAT 1" >>confdefs.h + + +$as_echo "#define HAVE_ATTROPEN 1" >>confdefs.h + + +$as_echo "#define HAVE_UNLINKAT 1" >>confdefs.h + + +$as_echo "#define HAVE_FCHOWNAT 1" >>confdefs.h + + +$as_echo "#define HAVE_FUTIMESAT 1" >>confdefs.h + + +$as_echo "#define HAVE_LINKAT 1" >>confdefs.h + + + +fi +done + + + if test $have_xattr = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for nvlist_next_nvpair in -lnvpair" >&5 +$as_echo_n "checking for nvlist_next_nvpair in -lnvpair... " >&6; } +if ${ac_cv_lib_nvpair_nvlist_next_nvpair+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lnvpair $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char nvlist_next_nvpair (); +int +main () +{ +return nvlist_next_nvpair (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_nvpair_nvlist_next_nvpair=yes +else + ac_cv_lib_nvpair_nvlist_next_nvpair=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nvpair_nvlist_next_nvpair" >&5 +$as_echo "$ac_cv_lib_nvpair_nvlist_next_nvpair" >&6; } +if test "x$ac_cv_lib_nvpair_nvlist_next_nvpair" = xyes; then : + + +$as_echo "#define HAVE_NVLIST_NEXT_NVPAIR 1" >>confdefs.h + + FDLIBS="-lnvpair $FDLIBS" + + +fi + + fi + fi + + if test $have_xattr = no; then + ac_fn_c_check_header_mongrel "$LINENO" "sys/xattr.h" "ac_cv_header_sys_xattr_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_xattr_h" = xyes; then : + +$as_echo "#define HAVE_SYS_XATTR_H 1" >>confdefs.h + +fi + + + for ac_func in llistxattr lgetxattr lsetxattr +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + + have_xattr=yes + +$as_echo "#define HAVE_LLISTXATTR 1" >>confdefs.h + + +$as_echo "#define HAVE_LGETXATTR 1" >>confdefs.h + + +$as_echo "#define HAVE_LSETXATTR 1" >>confdefs.h + + + +fi +done + + + if test $have_xattr = no; then + for ac_func in listxattr getxattr setxattr +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + + have_xattr=yes + +$as_echo "#define HAVE_LISTXATTR 1" >>confdefs.h + + +$as_echo "#define HAVE_GETXATTR 1" >>confdefs.h + + +$as_echo "#define HAVE_SETXATTR 1" >>confdefs.h + + + +fi +done + + fi + fi + +XATTROBJS= + if test x$support_xattr = xyes -a $have_xattr != yes; then + as_fn_error $? "xattr support explicitly enabled but no supported xattr implementation found, + please either load the xattr libraries or rerun configure without --enable-xattr" "$LINENO" 5 + else + if test $have_xattr = yes; then + +$as_echo "#define HAVE_XATTR 1" >>confdefs.h + + if test x${HAVE_LINUX_OS_TRUE} = x; then + XATTROBJS="bxattr.c bxattr_linux.c" + fi + if test x${HAVE_SUN_OS_TRUE} = x; then + XATTROBJS="bxattr.c bxattr_solaris.c" + fi + if test x${HAVE_FREEBSD_OS_TRUE} = x; then + XATTROBJS="bxattr.c bxattr_freebsd.c" + fi + if test x${HAVE_DARWIN_OS_TRUE} = x; then + XATTROBJS="bxattr.c bxattr_osx.c" + fi + fi + fi +fi + + +PTHREAD_LIB="" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 +$as_echo_n "checking for pthread_create in -lpthread... " >&6; } +if ${ac_cv_lib_pthread_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_pthread_pthread_create=yes +else + ac_cv_lib_pthread_pthread_create=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 +$as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } +if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : + PTHREAD_LIB="-lpthread" +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthreads" >&5 +$as_echo_n "checking for pthread_create in -lpthreads... " >&6; } +if ${ac_cv_lib_pthreads_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthreads $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_pthreads_pthread_create=yes +else + ac_cv_lib_pthreads_pthread_create=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthreads_pthread_create" >&5 +$as_echo "$ac_cv_lib_pthreads_pthread_create" >&6; } +if test "x$ac_cv_lib_pthreads_pthread_create" = xyes; then : + PTHREAD_LIB="-lpthreads" +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lc_r" >&5 +$as_echo_n "checking for pthread_create in -lc_r... " >&6; } +if ${ac_cv_lib_c_r_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lc_r $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_c_r_pthread_create=yes +else + ac_cv_lib_c_r_pthread_create=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_c_r_pthread_create" >&5 +$as_echo "$ac_cv_lib_c_r_pthread_create" >&6; } +if test "x$ac_cv_lib_c_r_pthread_create" = xyes; then : + PTHREAD_LIB="-lc_r" +else + + ac_fn_c_check_func "$LINENO" "pthread_create" "ac_cv_func_pthread_create" +if test "x$ac_cv_func_pthread_create" = xyes; then : + +fi + + + +fi + + + +fi + + + +fi + + +for ac_header in sys/prctl.h sys/capability.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +for ac_func in prctl setreuid +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for cap_set_proc in -lcap" >&5 +$as_echo_n "checking for cap_set_proc in -lcap... " >&6; } +if ${ac_cv_lib_cap_cap_set_proc+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lcap $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char cap_set_proc (); +int +main () +{ +return cap_set_proc (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_cap_cap_set_proc=yes +else + ac_cv_lib_cap_cap_set_proc=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_cap_cap_set_proc" >&5 +$as_echo "$ac_cv_lib_cap_cap_set_proc" >&6; } +if test "x$ac_cv_lib_cap_cap_set_proc" = xyes; then : + CAP_LIBS="-lcap" +else + CAP_LIBS= +fi + +if test x$CAP_LIBS = x-lcap; then + +$as_echo "#define HAVE_LIBCAP 1" >>confdefs.h + +fi + + + +$as_echo "#define FDLIBS 1" >>confdefs.h + + +CFLAGS=${CFLAGS--O} + +if test x$have_gcc = xyes ; then + CPPFLAGS="$CPPFLAGS -x c++ -fno-strict-aliasing -fno-exceptions -fno-rtti" + CFLAGS="$CFLAGS -x c++ -fno-strict-aliasing -fno-exceptions -fno-rtti" +fi +LDFLAGS=${LDFLAGS--O} +CPPFLAGS="$CPPFLAGS" +CFLAGS="$CFLAGS" + + + + + + + + + + + + + + + +OBJLIST= + + +lld="lld" +llu="llu" + +WCFLAGS= +WLDFLAGS= + +PSCMD="ps -e" +WIN32= +MACOSX= +COMPRESS_MANPAGES=yes + +case "$DISTNAME" in +aix) + DISTVER=`uname -r` + PSCMD="ps -e -o pid,comm" + PFILES="${PFILES} platforms/aix/Makefile" + TAPEDRIVE="/dev/rmt0.1" + ;; +alpha) + DISTVER=`uname -r` + PTHREAD_LIB="-lpthread -lexc" + if test "${CC}" = "gcc" ; then + lld="lld" + llu="llu" + else + lld="ld" + llu="lu" + fi + TAPEDRIVE="/dev/nrmt0" + ;; +bsdi) + DISTVER=`uname -a |awk '{print $3}'` + TAPEDRIVE="/dev/nrmt0" + PTHREAD_LIB="-pthread" + CFLAGS="${CFLAGS} -pthread" + PSCMD="ps -ax -o pid,command" + lld="qd" + llu="qu" + PFILES="${PFILES} \ + platforms/bsdi/Makefile \ + platforms/bsdi/bacula-fd \ + platforms/bsdi/bacula-sd \ + platforms/bsdi/bacula-dir" + largefile_support="yes" + ;; +cygwin) + DISTVER=`uname -a |awk '{print $3}'` + TAPEDRIVE="/dev/nrst0" + WIN32=win32 + WCFLAGS="-mwindows" + WLDFLAGS="-mwindows" + ;; +darwin) + DISTVER=`uname -r` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + MACOSX=macosx + PFILES="${PFILES} \ + platforms/darwin/Makefile" + ;; +osx) + DISTVER=`uname -r` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + MACOSX=macosx + PFILES="${PFILES} \ + platforms/osx/Makefile" + ;; +debian) + if `test -f /etc/apt/sources.list && grep -q ubuntu /etc/apt/sources.list`; then + DISTNAME="ubuntu" + fi + DISTVER=`cat /etc/debian_version` + if test -f /etc/lsb-release ; then + . /etc/lsb-release + if test "x$DISTRIB_ID" != "x" ; then + DISTNAME=$DISTRIB_ID + fi + if test "x$DISTRIB_RELEASE" != "x" ; then + DISTVER=$DISTRIB_RELEASE + fi + fi + if test "$DISTNAME" = "Ubuntu" ; then + DISTNAME="ubuntu" + fi + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + if test "$DISTNAME" = "ubuntu" ; then + PFILES="${PFILES} \ + platforms/ubuntu/Makefile \ + platforms/ubuntu/bacula-fd \ + platforms/ubuntu/bacula-sd \ + platforms/ubuntu/bacula-dir" + else + PFILES="${PFILES} \ + platforms/debian/Makefile \ + platforms/debian/bacula-fd \ + platforms/debian/bacula-sd \ + platforms/debian/bacula-dir" + fi + ;; +freebsd) + DISTVER=`uname -a |awk '{print $3}'` + VER=`echo $DISTVER | cut -c 1` + if test x$VER = x4 ; then + PTHREAD_LIB="${PTHREAD_LIBS:--pthread}" + CFLAGS="${CFLAGS} ${PTHREAD_CFLAGS:--pthread}" + fi + lld="qd" + llu="qu" + TAPEDRIVE="/dev/nrsa0" + PSCMD="ps -ax -o pid,command" + PFILES="${PFILES} \ + platforms/freebsd/Makefile \ + platforms/freebsd/bacula-fd \ + platforms/freebsd/bacula-sd \ + platforms/freebsd/bacula-dir" + largefile_support="yes" + ;; +hurd) + DISTVER=`uname -r` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/hurd/Makefile \ + platforms/hurd/bacula-fd \ + platforms/hurd/bacula-sd \ + platforms/hurd/bacula-dir" + ;; +hpux) + PSCMD="UNIX95=1; ps -e -o pid,comm" + CFLAGS="${CFLAGS} -D_XOPEN_SOURCE_EXTENDED=1" + DISTVER=`uname -r` + TAPEDRIVE="/dev/rmt/0hnb" + PTHREAD_LIB="-lpthread" + $as_echo "#define _INCLUDE_LONGLONG 1" >>confdefs.h + + ;; +irix) + DISTVER=`uname -r` + TAPEDRIVE="/dev/rmt/0cbn" + PSCMD="ps -e -o pid,comm" + PFILES="${PFILES} \ + platforms/irix/Makefile \ + platforms/irix/bacula-fd \ + platforms/irix/bacula-sd \ + platforms/irix/bacula-dir" + ;; +netbsd) + DISTVER=`uname -a |awk '{print $3}'` + lld="qd" + llu="qu" + TAPEDRIVE="/dev/nrst0" + PSCMD="ps -ax -o pid,command" + PTHREAD_LIB="-pthread" + CFLAGS="${CFLAGS} -pthread" + ;; +openbsd) + DISTVER=`uname -a |awk '{print $3}'` + lld="qd" + llu="qu" + TAPEDRIVE="/dev/nrst0" + PSCMD="ps -ax -o pid,command" + PTHREAD_LIB="-pthread" + CFLAGS="${CFLAGS} -pthread" + PFILES="${PFILES} \ + platforms/openbsd/Makefile \ + platforms/openbsd/bacula-fd \ + platforms/openbsd/bacula-sd \ + platforms/openbsd/bacula-dir" + ;; +redhat) + if test -f /etc/whitebox-release ; then + f=/etc/whitebox-release + else + f=/etc/redhat-release + fi + if test `cat $f | grep release |\ + cut -f 3 -d ' '`x = "Enterprise"x ; then + DISTVER="Enterprise "`cat $f | grep release |\ + cut -f 6 -d ' '` + else + DISTVER=`cat /etc/redhat-release | grep release |\ + cut -f 5 -d ' '` + fi + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/redhat/Makefile \ + platforms/redhat/bacula-fd \ + platforms/redhat/bacula-sd \ + platforms/redhat/bacula-dir + " + ;; +mandrake) + DISTVER=`cat /etc/mandrake-release | grep release |\ + cut -f 5 -d ' '` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/mandrake/Makefile \ + platforms/mandrake/bacula-fd \ + platforms/mandrake/bacula-sd \ + platforms/mandrake/bacula-dir \ + " + ;; +gentoo) + DISTVER=`awk '/version / {print $5}' < /etc/gentoo-release` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/gentoo/Makefile \ + platforms/gentoo/bacula-init \ + platforms/gentoo/bacula-fd \ + platforms/gentoo/bacula-sd \ + platforms/gentoo/bacula-dir" + ;; +slackware) + DISTVER=`cat /etc/slackware-version` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/slackware/Makefile \ + platforms/slackware/rc.bacula-fd \ + platforms/slackware/rc.bacula-sd \ + platforms/slackware/rc.bacula-dir\ + platforms/slackware/functions.bacula" + ;; +solaris) + DISTVER=`uname -r` + TAPEDRIVE="/dev/rmt/0cbn" + PSCMD="ps -e -o pid,comm" + PFILES="${PFILES} \ + platforms/solaris/Makefile \ + platforms/solaris/bacula-fd \ + platforms/solaris/bacula-sd \ + platforms/solaris/bacula-dir" + COMPRESS_MANPAGES= + case ${DISTVER} in + 5.5|5.6) + $as_echo "#define HAVE_OLD_SOCKOPT 1" >>confdefs.h + + $as_echo "#define USE_THR_SETCONCURRENCY 1" >>confdefs.h + + ;; + 5.7|5.8) + $as_echo "#define USE_THR_SETCONCURRENCY 1" >>confdefs.h + + ;; + 5.10) + +$as_echo "#define HAVE_SOLARIS10 1" >>confdefs.h + + ;; + *) + ;; + esac + LIBS="$LIBS -lresolv -lrt" + ;; +suse) + DISTVER=`cat /etc/SuSE-release |grep VERSION|\ + cut -f 3 -d ' '` + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/suse/Makefile \ + platforms/suse/bacula-fd \ + platforms/suse/bacula-sd \ + platforms/suse/bacula-dir \ + platforms/suse/bacula" + ;; +suse5) + DISTNAME=suse + DISTVER=5.x + TAPEDRIVE="/dev/nst0" + PSCMD="ps -e -o pid,command" + PFILES="${PFILES} \ + platforms/suse/Makefile \ + platforms/suse/bacula-fd \ + platforms/suse/bacula-sd \ + platforms/suse/bacula-dir" + ;; +unknown) + DISTVER=unknown + TAPEDRIVE="/dev/nst0" + ;; +*) + echo " === Something went wrong. Unknown DISTNAME $DISTNAME ===" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for systemd support" >&5 +$as_echo_n "checking for systemd support... " >&6; } + +# Check whether --with-systemd was given. +if test "${with_systemd+set}" = set; then : + withval=$with_systemd; + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + SYSTEMD_UNITDIR="/lib/systemd/system" + else + SYSTEMD_UNITDIR="${withval}" + fi + + PFILES="${PFILES} \ + platforms/systemd/Makefile \ + platforms/systemd/bacula.conf \ + platforms/systemd/bacula-dir.service \ + platforms/systemd/bacula-fd.service \ + platforms/systemd/bacula-sd.service" + +$as_echo "#define HAVE_SYSTEMD 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + support_systemd="yes" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + support_systemd="no" + fi + +else + + support_systemd="no" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + +fi + + + + + +LIBS="$PTHREAD_LIB $LIBS" + +cat >>confdefs.h <<_ACEOF +#define lld "$lld" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define llu "$llu" +_ACEOF + + + + + + + + + +MCOMMON=./autoconf/Make.common + + +if test "x${subsysdir}" = "x${sbindir}" ; then + echo " " + echo " " + echo "You have set both --sbindir and --with-subsys-dir" + echo " equal to: ${subsysdir} " + echo "This is not permitted. Please reconfigure." + echo " " + echo "Aborting configuration ..." + echo " " + echo " " + exit 1 +fi + + +ac_config_files="$ac_config_files autoconf/Make.common Makefile manpages/Makefile scripts/btraceback scripts/bconsole scripts/baculabackupreport scripts/bacula scripts/bacula-ctl-dir scripts/bacula-ctl-fd scripts/bacula-ctl-sd scripts/devel_bacula scripts/Makefile scripts/logrotate scripts/mtx-changer scripts/disk-changer scripts/logwatch/Makefile scripts/logwatch/logfile.bacula.conf scripts/bat.desktop scripts/bat.desktop.xsu scripts/bat.desktop.consolehelper scripts/bat.console_apps scripts/bacula-tray-monitor.desktop src/Makefile src/host.h src/console/Makefile src/console/bconsole.conf src/qt-console/bat.conf src/qt-console/bat.pro src/qt-console/bat.pro.mingw32 src/qt-console/bat.pro.mingw64 src/qt-console/install_conf_file src/qt-console/tray-monitor/tray-monitor.conf src/qt-console/tray-monitor/bacula-tray-monitor.conf src/qt-console/tray-monitor/tray-monitor.pro src/qt-console/tray-monitor/tray-monitor.pro.mingw32 src/qt-console/tray-monitor/tray-monitor.pro.mingw64 src/dird/Makefile src/dird/bacula-dir.conf src/lib/Makefile src/stored/Makefile src/stored/bacula-sd.conf src/filed/Makefile src/filed/bacula-fd.conf src/cats/Makefile src/cats/make_catalog_backup.pl src/cats/make_catalog_backup src/cats/delete_catalog_backup src/cats/create_postgresql_database src/cats/update_postgresql_tables src/cats/make_postgresql_tables src/cats/grant_postgresql_privileges src/cats/drop_postgresql_tables src/cats/drop_postgresql_database src/cats/create_mysql_database src/cats/update_mysql_tables src/cats/make_mysql_tables src/cats/grant_mysql_privileges src/cats/drop_mysql_tables src/cats/drop_mysql_database src/cats/create_sqlite3_database src/cats/update_sqlite3_tables src/cats/make_sqlite3_tables src/cats/grant_sqlite3_privileges src/cats/drop_sqlite3_tables src/cats/drop_sqlite3_database src/cats/sqlite src/cats/mysql src/cats/create_bacula_database src/cats/update_bacula_tables src/cats/grant_bacula_privileges src/cats/make_bacula_tables src/cats/drop_bacula_tables src/cats/drop_bacula_database src/cats/install-default-backend src/findlib/Makefile src/tools/Makefile src/plugins/fd/Makefile src/plugins/sd/Makefile src/plugins/dir/Makefile po/Makefile.in updatedb/update_mysql_tables updatedb/update_sqlite3_tables updatedb/update_postgresql_tables updatedb/update_mysql_tables_9_to_10 updatedb/update_sqlite3_tables_9_to_10 updatedb/update_postgresql_tables_9_to_10 updatedb/update_mysql_tables_10_to_11 updatedb/update_sqlite3_tables_10_to_11 updatedb/update_postgresql_tables_10_to_11 updatedb/update_mysql_tables_11_to_12 updatedb/update_sqlite3_tables_11_to_12 updatedb/update_postgresql_tables_11_to_12 examples/nagios/check_bacula/Makefile platforms/rpms/redhat/bacula.spec platforms/rpms/redhat/bacula-bat.spec platforms/rpms/redhat/bacula-docs.spec platforms/rpms/redhat/bacula-mtx.spec platforms/rpms/suse/bacula.spec platforms/rpms/suse/bacula-bat.spec platforms/rpms/suse/bacula-docs.spec platforms/rpms/suse/bacula-mtx.spec $PFILES" + +ac_config_commands="$ac_config_commands default" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by bacula $as_me 9.4.2, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to the package provider." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +bacula config.status 9.4.2 +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS +# + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' +macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' +enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' +enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' +pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' +enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' +SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' +ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' +PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' +host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' +host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' +host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' +build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' +build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' +build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' +SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' +Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' +GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' +EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' +FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' +LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' +NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' +LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' +max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' +ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' +exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' +lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' +lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' +lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' +lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' +lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' +reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' +reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' +OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' +deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' +file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' +file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' +want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' +DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' +sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' +AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' +AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' +archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' +STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' +RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' +old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' +lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' +CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' +CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' +compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' +GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' +nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' +lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' +objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' +MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' +need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' +MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' +DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' +NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' +LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' +OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' +libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' +shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' +extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' +compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' +module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' +with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' +no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' +hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' +hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' +inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' +link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' +exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' +include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' +prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' +postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' +file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' +variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' +need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' +version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' +runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' +libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' +library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' +soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' +install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' +postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' +postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' +finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' +hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' +sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' +sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`' +hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' +enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' +old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' +striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' +predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' +postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' +predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' +postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' +LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' +reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' +reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' +GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' +inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' +link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' +always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' +exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' +predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' +postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' +predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' +postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' + +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL \ +ECHO \ +PATH_SEPARATOR \ +SED \ +GREP \ +EGREP \ +FGREP \ +LD \ +NM \ +LN_S \ +lt_SP2NL \ +lt_NL2SP \ +reload_flag \ +OBJDUMP \ +deplibs_check_method \ +file_magic_cmd \ +file_magic_glob \ +want_nocaseglob \ +DLLTOOL \ +sharedlib_from_linklib_cmd \ +AR \ +AR_FLAGS \ +archiver_list_spec \ +STRIP \ +RANLIB \ +CC \ +CFLAGS \ +compiler \ +lt_cv_sys_global_symbol_pipe \ +lt_cv_sys_global_symbol_to_cdecl \ +lt_cv_sys_global_symbol_to_c_name_address \ +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ +nm_file_list_spec \ +lt_prog_compiler_no_builtin_flag \ +lt_prog_compiler_pic \ +lt_prog_compiler_wl \ +lt_prog_compiler_static \ +lt_cv_prog_compiler_c_o \ +need_locks \ +MANIFEST_TOOL \ +DSYMUTIL \ +NMEDIT \ +LIPO \ +OTOOL \ +OTOOL64 \ +shrext_cmds \ +export_dynamic_flag_spec \ +whole_archive_flag_spec \ +compiler_needs_object \ +with_gnu_ld \ +allow_undefined_flag \ +no_undefined_flag \ +hardcode_libdir_flag_spec \ +hardcode_libdir_separator \ +exclude_expsyms \ +include_expsyms \ +file_list_spec \ +variables_saved_for_relink \ +libname_spec \ +library_names_spec \ +soname_spec \ +install_override_mode \ +finish_eval \ +old_striplib \ +striplib \ +compiler_lib_search_dirs \ +predep_objects \ +postdep_objects \ +predeps \ +postdeps \ +compiler_lib_search_path \ +LD_CXX \ +reload_flag_CXX \ +compiler_CXX \ +lt_prog_compiler_no_builtin_flag_CXX \ +lt_prog_compiler_pic_CXX \ +lt_prog_compiler_wl_CXX \ +lt_prog_compiler_static_CXX \ +lt_cv_prog_compiler_c_o_CXX \ +export_dynamic_flag_spec_CXX \ +whole_archive_flag_spec_CXX \ +compiler_needs_object_CXX \ +with_gnu_ld_CXX \ +allow_undefined_flag_CXX \ +no_undefined_flag_CXX \ +hardcode_libdir_flag_spec_CXX \ +hardcode_libdir_separator_CXX \ +exclude_expsyms_CXX \ +include_expsyms_CXX \ +file_list_spec_CXX \ +compiler_lib_search_dirs_CXX \ +predep_objects_CXX \ +postdep_objects_CXX \ +predeps_CXX \ +postdeps_CXX \ +compiler_lib_search_path_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds \ +old_postinstall_cmds \ +old_postuninstall_cmds \ +old_archive_cmds \ +extract_expsyms_cmds \ +old_archive_from_new_cmds \ +old_archive_from_expsyms_cmds \ +archive_cmds \ +archive_expsym_cmds \ +module_cmds \ +module_expsym_cmds \ +export_symbols_cmds \ +prelink_cmds \ +postlink_cmds \ +postinstall_cmds \ +postuninstall_cmds \ +finish_cmds \ +sys_lib_search_path_spec \ +sys_lib_dlsearch_path_spec \ +reload_cmds_CXX \ +old_archive_cmds_CXX \ +old_archive_from_new_cmds_CXX \ +old_archive_from_expsyms_cmds_CXX \ +archive_cmds_CXX \ +archive_expsym_cmds_CXX \ +module_cmds_CXX \ +module_expsym_cmds_CXX \ +export_symbols_cmds_CXX \ +prelink_cmds_CXX \ +postlink_cmds_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +ac_aux_dir='$ac_aux_dir' +xsi_shell='$xsi_shell' +lt_shell_append='$lt_shell_append' + +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile' + + + + + +# Capture the value of obsolete ALL_LINGUAS because we need it to compute + # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. But hide it + # from automake. + eval 'OBSOLETE_ALL_LINGUAS''="$ALL_LINGUAS"' + # Capture the value of LINGUAS because we need it to compute CATALOGS. + LINGUAS="${LINGUAS-%UNSET%}" + + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "src/config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/config.h:autoconf/config.h.in" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "default-1") CONFIG_COMMANDS="$CONFIG_COMMANDS default-1" ;; + "autoconf/Make.common") CONFIG_FILES="$CONFIG_FILES autoconf/Make.common" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "manpages/Makefile") CONFIG_FILES="$CONFIG_FILES manpages/Makefile" ;; + "scripts/btraceback") CONFIG_FILES="$CONFIG_FILES scripts/btraceback" ;; + "scripts/bconsole") CONFIG_FILES="$CONFIG_FILES scripts/bconsole" ;; + "scripts/baculabackupreport") CONFIG_FILES="$CONFIG_FILES scripts/baculabackupreport" ;; + "scripts/bacula") CONFIG_FILES="$CONFIG_FILES scripts/bacula" ;; + "scripts/bacula-ctl-dir") CONFIG_FILES="$CONFIG_FILES scripts/bacula-ctl-dir" ;; + "scripts/bacula-ctl-fd") CONFIG_FILES="$CONFIG_FILES scripts/bacula-ctl-fd" ;; + "scripts/bacula-ctl-sd") CONFIG_FILES="$CONFIG_FILES scripts/bacula-ctl-sd" ;; + "scripts/devel_bacula") CONFIG_FILES="$CONFIG_FILES scripts/devel_bacula" ;; + "scripts/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/Makefile" ;; + "scripts/logrotate") CONFIG_FILES="$CONFIG_FILES scripts/logrotate" ;; + "scripts/mtx-changer") CONFIG_FILES="$CONFIG_FILES scripts/mtx-changer" ;; + "scripts/disk-changer") CONFIG_FILES="$CONFIG_FILES scripts/disk-changer" ;; + "scripts/logwatch/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/logwatch/Makefile" ;; + "scripts/logwatch/logfile.bacula.conf") CONFIG_FILES="$CONFIG_FILES scripts/logwatch/logfile.bacula.conf" ;; + "scripts/bat.desktop") CONFIG_FILES="$CONFIG_FILES scripts/bat.desktop" ;; + "scripts/bat.desktop.xsu") CONFIG_FILES="$CONFIG_FILES scripts/bat.desktop.xsu" ;; + "scripts/bat.desktop.consolehelper") CONFIG_FILES="$CONFIG_FILES scripts/bat.desktop.consolehelper" ;; + "scripts/bat.console_apps") CONFIG_FILES="$CONFIG_FILES scripts/bat.console_apps" ;; + "scripts/bacula-tray-monitor.desktop") CONFIG_FILES="$CONFIG_FILES scripts/bacula-tray-monitor.desktop" ;; + "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; + "src/host.h") CONFIG_FILES="$CONFIG_FILES src/host.h" ;; + "src/console/Makefile") CONFIG_FILES="$CONFIG_FILES src/console/Makefile" ;; + "src/console/bconsole.conf") CONFIG_FILES="$CONFIG_FILES src/console/bconsole.conf" ;; + "src/qt-console/bat.conf") CONFIG_FILES="$CONFIG_FILES src/qt-console/bat.conf" ;; + "src/qt-console/bat.pro") CONFIG_FILES="$CONFIG_FILES src/qt-console/bat.pro" ;; + "src/qt-console/bat.pro.mingw32") CONFIG_FILES="$CONFIG_FILES src/qt-console/bat.pro.mingw32" ;; + "src/qt-console/bat.pro.mingw64") CONFIG_FILES="$CONFIG_FILES src/qt-console/bat.pro.mingw64" ;; + "src/qt-console/install_conf_file") CONFIG_FILES="$CONFIG_FILES src/qt-console/install_conf_file" ;; + "src/qt-console/tray-monitor/tray-monitor.conf") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/tray-monitor.conf" ;; + "src/qt-console/tray-monitor/bacula-tray-monitor.conf") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/bacula-tray-monitor.conf" ;; + "src/qt-console/tray-monitor/tray-monitor.pro") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/tray-monitor.pro" ;; + "src/qt-console/tray-monitor/tray-monitor.pro.mingw32") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/tray-monitor.pro.mingw32" ;; + "src/qt-console/tray-monitor/tray-monitor.pro.mingw64") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/tray-monitor.pro.mingw64" ;; + "src/dird/Makefile") CONFIG_FILES="$CONFIG_FILES src/dird/Makefile" ;; + "src/dird/bacula-dir.conf") CONFIG_FILES="$CONFIG_FILES src/dird/bacula-dir.conf" ;; + "src/lib/Makefile") CONFIG_FILES="$CONFIG_FILES src/lib/Makefile" ;; + "src/stored/Makefile") CONFIG_FILES="$CONFIG_FILES src/stored/Makefile" ;; + "src/stored/bacula-sd.conf") CONFIG_FILES="$CONFIG_FILES src/stored/bacula-sd.conf" ;; + "src/filed/Makefile") CONFIG_FILES="$CONFIG_FILES src/filed/Makefile" ;; + "src/filed/bacula-fd.conf") CONFIG_FILES="$CONFIG_FILES src/filed/bacula-fd.conf" ;; + "src/cats/Makefile") CONFIG_FILES="$CONFIG_FILES src/cats/Makefile" ;; + "src/cats/make_catalog_backup.pl") CONFIG_FILES="$CONFIG_FILES src/cats/make_catalog_backup.pl" ;; + "src/cats/make_catalog_backup") CONFIG_FILES="$CONFIG_FILES src/cats/make_catalog_backup" ;; + "src/cats/delete_catalog_backup") CONFIG_FILES="$CONFIG_FILES src/cats/delete_catalog_backup" ;; + "src/cats/create_postgresql_database") CONFIG_FILES="$CONFIG_FILES src/cats/create_postgresql_database" ;; + "src/cats/update_postgresql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/update_postgresql_tables" ;; + "src/cats/make_postgresql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/make_postgresql_tables" ;; + "src/cats/grant_postgresql_privileges") CONFIG_FILES="$CONFIG_FILES src/cats/grant_postgresql_privileges" ;; + "src/cats/drop_postgresql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/drop_postgresql_tables" ;; + "src/cats/drop_postgresql_database") CONFIG_FILES="$CONFIG_FILES src/cats/drop_postgresql_database" ;; + "src/cats/create_mysql_database") CONFIG_FILES="$CONFIG_FILES src/cats/create_mysql_database" ;; + "src/cats/update_mysql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/update_mysql_tables" ;; + "src/cats/make_mysql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/make_mysql_tables" ;; + "src/cats/grant_mysql_privileges") CONFIG_FILES="$CONFIG_FILES src/cats/grant_mysql_privileges" ;; + "src/cats/drop_mysql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/drop_mysql_tables" ;; + "src/cats/drop_mysql_database") CONFIG_FILES="$CONFIG_FILES src/cats/drop_mysql_database" ;; + "src/cats/create_sqlite3_database") CONFIG_FILES="$CONFIG_FILES src/cats/create_sqlite3_database" ;; + "src/cats/update_sqlite3_tables") CONFIG_FILES="$CONFIG_FILES src/cats/update_sqlite3_tables" ;; + "src/cats/make_sqlite3_tables") CONFIG_FILES="$CONFIG_FILES src/cats/make_sqlite3_tables" ;; + "src/cats/grant_sqlite3_privileges") CONFIG_FILES="$CONFIG_FILES src/cats/grant_sqlite3_privileges" ;; + "src/cats/drop_sqlite3_tables") CONFIG_FILES="$CONFIG_FILES src/cats/drop_sqlite3_tables" ;; + "src/cats/drop_sqlite3_database") CONFIG_FILES="$CONFIG_FILES src/cats/drop_sqlite3_database" ;; + "src/cats/sqlite") CONFIG_FILES="$CONFIG_FILES src/cats/sqlite" ;; + "src/cats/mysql") CONFIG_FILES="$CONFIG_FILES src/cats/mysql" ;; + "src/cats/create_bacula_database") CONFIG_FILES="$CONFIG_FILES src/cats/create_bacula_database" ;; + "src/cats/update_bacula_tables") CONFIG_FILES="$CONFIG_FILES src/cats/update_bacula_tables" ;; + "src/cats/grant_bacula_privileges") CONFIG_FILES="$CONFIG_FILES src/cats/grant_bacula_privileges" ;; + "src/cats/make_bacula_tables") CONFIG_FILES="$CONFIG_FILES src/cats/make_bacula_tables" ;; + "src/cats/drop_bacula_tables") CONFIG_FILES="$CONFIG_FILES src/cats/drop_bacula_tables" ;; + "src/cats/drop_bacula_database") CONFIG_FILES="$CONFIG_FILES src/cats/drop_bacula_database" ;; + "src/cats/install-default-backend") CONFIG_FILES="$CONFIG_FILES src/cats/install-default-backend" ;; + "src/findlib/Makefile") CONFIG_FILES="$CONFIG_FILES src/findlib/Makefile" ;; + "src/tools/Makefile") CONFIG_FILES="$CONFIG_FILES src/tools/Makefile" ;; + "src/plugins/fd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/fd/Makefile" ;; + "src/plugins/sd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sd/Makefile" ;; + "src/plugins/dir/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/dir/Makefile" ;; + "po/Makefile.in") CONFIG_FILES="$CONFIG_FILES po/Makefile.in" ;; + "updatedb/update_mysql_tables") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables" ;; + "updatedb/update_sqlite3_tables") CONFIG_FILES="$CONFIG_FILES updatedb/update_sqlite3_tables" ;; + "updatedb/update_postgresql_tables") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables" ;; + "updatedb/update_mysql_tables_9_to_10") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_9_to_10" ;; + "updatedb/update_sqlite3_tables_9_to_10") CONFIG_FILES="$CONFIG_FILES updatedb/update_sqlite3_tables_9_to_10" ;; + "updatedb/update_postgresql_tables_9_to_10") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_9_to_10" ;; + "updatedb/update_mysql_tables_10_to_11") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_10_to_11" ;; + "updatedb/update_sqlite3_tables_10_to_11") CONFIG_FILES="$CONFIG_FILES updatedb/update_sqlite3_tables_10_to_11" ;; + "updatedb/update_postgresql_tables_10_to_11") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_10_to_11" ;; + "updatedb/update_mysql_tables_11_to_12") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_11_to_12" ;; + "updatedb/update_sqlite3_tables_11_to_12") CONFIG_FILES="$CONFIG_FILES updatedb/update_sqlite3_tables_11_to_12" ;; + "updatedb/update_postgresql_tables_11_to_12") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_11_to_12" ;; + "examples/nagios/check_bacula/Makefile") CONFIG_FILES="$CONFIG_FILES examples/nagios/check_bacula/Makefile" ;; + "platforms/rpms/redhat/bacula.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/redhat/bacula.spec" ;; + "platforms/rpms/redhat/bacula-bat.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/redhat/bacula-bat.spec" ;; + "platforms/rpms/redhat/bacula-docs.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/redhat/bacula-docs.spec" ;; + "platforms/rpms/redhat/bacula-mtx.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/redhat/bacula-mtx.spec" ;; + "platforms/rpms/suse/bacula.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/suse/bacula.spec" ;; + "platforms/rpms/suse/bacula-bat.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/suse/bacula-bat.spec" ;; + "platforms/rpms/suse/bacula-docs.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/suse/bacula-docs.spec" ;; + "platforms/rpms/suse/bacula-mtx.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/suse/bacula-mtx.spec" ;; + "$PFILES") CONFIG_FILES="$CONFIG_FILES $PFILES" ;; + "default") CONFIG_COMMANDS="$CONFIG_COMMANDS default" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + +if $AWK 'BEGIN { getline <"/dev/null" }' /dev/null; then + ac_cs_awk_getline=: + ac_cs_awk_pipe_init= + ac_cs_awk_read_file=' + while ((getline aline < (F[key])) > 0) + print(aline) + close(F[key])' + ac_cs_awk_pipe_fini= +else + ac_cs_awk_getline=false + ac_cs_awk_pipe_init="print \"cat <<'|#_!!_#|' &&\"" + ac_cs_awk_read_file=' + print "|#_!!_#|" + print "cat " F[key] " &&" + '$ac_cs_awk_pipe_init + # The final `:' finishes the AND list. + ac_cs_awk_pipe_fini='END { print "|#_!!_#|"; print ":" }' +fi +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + +# Create commands to substitute file output variables. +{ + echo "cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1" && + echo 'cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&' && + echo "$ac_subst_files" | sed 's/.*/F["&"]="$&"/' && + echo "_ACAWK" && + echo "_ACEOF" +} >conf$$files.sh && +. ./conf$$files.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +rm -f conf$$files.sh + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + \$ac_cs_awk_pipe_init +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + if (nfields == 3 && !substed) { + key = field[2] + if (F[key] != "" && line ~ /^[ ]*@.*@[ ]*$/) { + \$ac_cs_awk_read_file + next + } + } + print line +} +\$ac_cs_awk_pipe_fini +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | +if $ac_cs_awk_getline; then + $AWK -f "$ac_tmp/subs.awk" +else + $AWK -f "$ac_tmp/subs.awk" | $SHELL +fi \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "libtool":C) + + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + +# The names of the tagged configurations supported by this script. +available_tags="CXX " + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive. +AR_FLAGS=$lt_AR_FLAGS + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and in which our libraries should be installed. +lt_sysroot=$lt_sysroot + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \${shlibpath_var} if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects +postdep_objects=$lt_postdep_objects +predeps=$lt_predeps +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# ### END LIBTOOL CONFIG + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + +ltmain="$ac_aux_dir/ltmain.sh" + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + if test x"$xsi_shell" = xyes; then + sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ +func_dirname ()\ +{\ +\ case ${1} in\ +\ */*) func_dirname_result="${1%/*}${2}" ;;\ +\ * ) func_dirname_result="${3}" ;;\ +\ esac\ +} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_basename ()$/,/^} # func_basename /c\ +func_basename ()\ +{\ +\ func_basename_result="${1##*/}"\ +} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ +func_dirname_and_basename ()\ +{\ +\ case ${1} in\ +\ */*) func_dirname_result="${1%/*}${2}" ;;\ +\ * ) func_dirname_result="${3}" ;;\ +\ esac\ +\ func_basename_result="${1##*/}"\ +} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ +func_stripname ()\ +{\ +\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ +\ # positional parameters, so assign one to ordinary parameter first.\ +\ func_stripname_result=${3}\ +\ func_stripname_result=${func_stripname_result#"${1}"}\ +\ func_stripname_result=${func_stripname_result%"${2}"}\ +} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ +func_split_long_opt ()\ +{\ +\ func_split_long_opt_name=${1%%=*}\ +\ func_split_long_opt_arg=${1#*=}\ +} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ +func_split_short_opt ()\ +{\ +\ func_split_short_opt_arg=${1#??}\ +\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ +} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ +func_lo2o ()\ +{\ +\ case ${1} in\ +\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ +\ *) func_lo2o_result=${1} ;;\ +\ esac\ +} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_xform ()$/,/^} # func_xform /c\ +func_xform ()\ +{\ + func_xform_result=${1%.*}.lo\ +} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_arith ()$/,/^} # func_arith /c\ +func_arith ()\ +{\ + func_arith_result=$(( $* ))\ +} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_len ()$/,/^} # func_len /c\ +func_len ()\ +{\ + func_len_result=${#1}\ +} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + +fi + +if test x"$lt_shell_append" = xyes; then + sed -e '/^func_append ()$/,/^} # func_append /c\ +func_append ()\ +{\ + eval "${1}+=\\${2}"\ +} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ +func_append_quoted ()\ +{\ +\ func_quote_for_eval "${2}"\ +\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ +} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + # Save a `func_append' function call where possible by direct use of '+=' + sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +else + # Save a `func_append' function call even when '+=' is not available + sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +fi + +if test x"$_lt_function_replace_fail" = x":"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 +$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} +fi + + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + + cat <<_LT_EOF >> "$ofile" + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# How to create reloadable object files. +reload_flag=$lt_reload_flag_CXX +reload_cmds=$lt_reload_cmds_CXX + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds_CXX + +# A language specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU compiler? +with_gcc=$GCC_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object_CXX + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld_CXX + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \${shlibpath_var} if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath_CXX + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds_CXX + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds_CXX + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec_CXX + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects_CXX +postdep_objects=$lt_postdep_objects_CXX +predeps=$lt_predeps_CXX +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# ### END LIBTOOL TAG CONFIG: CXX +_LT_EOF + + ;; + "default-1":C) + for ac_file in $CONFIG_FILES; do + # Support "outfile[:infile[:infile...]]" + case "$ac_file" in + *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; + esac + # PO directories have a Makefile.in generated from Makefile.in.in. + case "$ac_file" in */Makefile.in) + # Adjust a relative srcdir. + ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` + ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`" + ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` + # In autoconf-2.13 it is called $ac_given_srcdir. + # In autoconf-2.50 it is called $srcdir. + test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" + case "$ac_given_srcdir" in + .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; + /*) top_srcdir="$ac_given_srcdir" ;; + *) top_srcdir="$ac_dots$ac_given_srcdir" ;; + esac + # Treat a directory as a PO directory if and only if it has a + # POTFILES.in file. This allows packages to have multiple PO + # directories under different names or in different locations. + if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then + rm -f "$ac_dir/POTFILES" + test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" + cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" + POMAKEFILEDEPS="POTFILES.in" + # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend + # on $ac_dir but don't depend on user-specified configuration + # parameters. + if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then + # The LINGUAS file contains the set of available languages. + if test -n "$OBSOLETE_ALL_LINGUAS"; then + test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" + fi + ALL_LINGUAS_=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"` + # Hide the ALL_LINGUAS assigment from automake. + eval 'ALL_LINGUAS''=$ALL_LINGUAS_' + POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" + else + # The set of available languages was given in configure.in. + eval 'ALL_LINGUAS''=$OBSOLETE_ALL_LINGUAS' + fi + # Compute POFILES + # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po) + # Compute UPDATEPOFILES + # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update) + # Compute DUMMYPOFILES + # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop) + # Compute GMOFILES + # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo) + case "$ac_given_srcdir" in + .) srcdirpre= ;; + *) srcdirpre='$(srcdir)/' ;; + esac + POFILES= + UPDATEPOFILES= + DUMMYPOFILES= + GMOFILES= + for lang in $ALL_LINGUAS; do + POFILES="$POFILES $srcdirpre$lang.po" + UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" + DUMMYPOFILES="$DUMMYPOFILES $lang.nop" + GMOFILES="$GMOFILES $srcdirpre$lang.gmo" + done + # CATALOGS depends on both $ac_dir and the user's LINGUAS + # environment variable. + INST_LINGUAS= + if test -n "$ALL_LINGUAS"; then + for presentlang in $ALL_LINGUAS; do + useit=no + if test "%UNSET%" != "$LINGUAS"; then + desiredlanguages="$LINGUAS" + else + desiredlanguages="$ALL_LINGUAS" + fi + for desiredlang in $desiredlanguages; do + # Use the presentlang catalog if desiredlang is + # a. equal to presentlang, or + # b. a variant of presentlang (because in this case, + # presentlang can be used as a fallback for messages + # which are not translated in the desiredlang catalog). + case "$desiredlang" in + "$presentlang"*) useit=yes;; + esac + done + if test $useit = yes; then + INST_LINGUAS="$INST_LINGUAS $presentlang" + fi + done + fi + CATALOGS= + if test -n "$INST_LINGUAS"; then + for lang in $INST_LINGUAS; do + CATALOGS="$CATALOGS $lang.gmo" + done + fi + test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" + sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" + for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do + if test -f "$f"; then + case "$f" in + *.orig | *.bak | *~) ;; + *) cat "$f" >> "$ac_dir/Makefile" ;; + esac + fi + done + fi + ;; + esac + done ;; + "default":C) + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + + +if test "${support_bat}" = "yes" ; then + if test "x$QMAKE" = "xnone"; then + as_fn_error $? "Could not find qmake $PATH. Check your Qt installation" "$LINENO" 5 + fi + + cd src/qt-console + echo "Creating bat Makefile" + touch bat + chmod 755 bat + rm -f Makefile + rm -rf moc32 obj32 moc64 obj64 ui32 ui64 + $QMAKE + ${MAKE:-make} clean + + cd tray-monitor + echo "Creating tray-monitor Makefile" + rm -f Makefile + rm -rf moc32 obj32 moc64 obj64 ui32 ui64 + $QMAKE + ${MAKE:-make} clean + $QMAKE + ${MAKE:-make} clean + cd ${BUILD_DIR} +fi + +if test X"$GCC" = "Xyes" ; then + echo "Doing make of dependencies" + ${MAKE:-make} depend +fi + +cd src/qt-console +chmod 755 install_conf_file build-depkgs-qt-console +cd ${BUILD_DIR} + +cd scripts +chmod 755 bacula btraceback mtx-changer +chmod 755 bconsole disk-changer devel_bacula logrotate +cd .. + +c=updatedb +chmod 755 $c/update_mysql_tables_10_to_11 $c/update_sqlite3_tables_10_to_11 +chmod 755 $c/update_postgresql_tables_10_to_11 +chmod 755 $c/update_mysql_tables_11_to_12 $c/update_sqlite3_tables_11_to_12 +chmod 755 $c/update_postgresql_tables_11_to_12 + + +c=src/cats + +chmod 755 $c/create_bacula_database $c/update_bacula_tables $c/make_bacula_tables +chmod 755 $c/grant_bacula_privileges $c/drop_bacula_tables $c/drop_bacula_database + +chmod 755 $c/create_mysql_database $c/update_mysql_tables $c/make_mysql_tables +chmod 755 $c/grant_mysql_privileges $c/drop_mysql_tables $c/drop_mysql_database + +chmod 755 $c/create_sqlite3_database $c/update_sqlite3_tables $c/make_sqlite3_tables +chmod 755 $c/grant_sqlite3_privileges $c/drop_sqlite3_tables $c/drop_sqlite3_database + +chmod 755 $c/create_postgresql_database $c/update_postgresql_tables $c/make_postgresql_tables +chmod 755 $c/grant_postgresql_privileges $c/drop_postgresql_tables $c/drop_postgresql_database + +chmod 755 $c/make_catalog_backup $c/delete_catalog_backup $c/make_catalog_backup.pl +chmod 755 $c/sqlite +chmod 755 $c/mysql + +chmod 755 $c/install-default-backend + +chmod 755 src/win32/build-depkgs-mingw32 src/win32/build-depkgs-mingw-w64 + +if test "x$ac_cv_sys_largefile_CFLAGS" != "xno" ; then + largefile_support="yes" +fi + +if test X"$GCC" = "Xyes" ; then + CCVERSION=`${CC} --version | tr '\n' ' ' | cut -f 3 -d ' '` + if test "x${CCVERSION}" = "x" ; then + CCVERSION=`${CC} --version | tr '\n' ' ' | cut -f 1 -d ' '` + fi + CXXVERSION=`${CXX} --version | tr '\n' ' ' | cut -f 3 -d ' '` + if test x"${CXXVERSION}" = x ; then + CXXVERSION=`${CXX} --version | tr '\n' ' ' | cut -f 1 -d ' '` + fi +fi + +# clean up any old junk +echo " " +echo "Cleaning up" +echo " " +${MAKE:-make} clean + +echo " +Configuration on `date`: + + Host: ${host}${post_host} -- ${DISTNAME} ${DISTVER} + Bacula version: ${BACULA} ${VERSION} (${DATE}) + Source code location: ${srcdir} + Install binaries: ${sbindir} + Install libraries: ${libdir} + Install config files: ${sysconfdir} + Scripts directory: ${scriptdir} + Archive directory: ${archivedir} + Working directory: ${working_dir} + PID directory: ${piddir} + Subsys directory: ${subsysdir} + Man directory: ${mandir} + Data directory: ${datarootdir} + Plugin directory: ${plugindir} + C Compiler: ${CC} ${CCVERSION} + C++ Compiler: ${CXX} ${CXXVERSION} + Compiler flags: ${WCFLAGS} ${CFLAGS} + Linker flags: ${WLDFLAGS} ${LDFLAGS} + Libraries: ${LIBS} + Statically Linked Tools: ${support_static_tools} + Statically Linked FD: ${support_static_fd} + Statically Linked SD: ${support_static_sd} + Statically Linked DIR: ${support_static_dir} + Statically Linked CONS: ${support_static_cons} + Database backends: ${db_backends} + Database port: ${db_port} + Database name: ${db_name} + Database user: ${db_user} + Database SSL options: ${db_ssl_options} + + Job Output Email: ${job_email} + Traceback Email: ${dump_email} + SMTP Host Address: ${smtp_host} + + Director Port: ${dir_port} + File daemon Port: ${fd_port} + Storage daemon Port: ${sd_port} + + Director User: ${dir_user} + Director Group: ${dir_group} + Storage Daemon User: ${sd_user} + Storage DaemonGroup: ${sd_group} + File Daemon User: ${fd_user} + File Daemon Group: ${fd_group} + + Large file support: $largefile_support + Bacula conio support: ${got_conio} ${CONS_LIBS} + readline support: ${got_readline} ${PRTREADLINE_SRC} + TCP Wrappers support: ${TCPW_MSG} ${WRAPLIBS} + TLS support: ${support_tls} + Encryption support: ${support_crypto} + ZLIB support: ${have_zlib} + LZO support: ${have_lzo} + enable-smartalloc: ${support_smartalloc} + enable-lockmgr: ${support_lockmgr} + bat support: ${support_bat} + client-only: ${build_client_only} + build-dird: ${build_dird} + build-stored: ${build_stored} + Plugin support: ${have_plugins} + AFS support: ${have_afs} + ACL support: ${have_acl} + XATTR support: ${have_xattr} + systemd support: ${support_systemd} ${SYSTEMD_UNITDIR} + Batch insert enabled: ${batch_insert_db_backends} + + " > config.out + +# create a small shell script useful for support with +# configure options and config.out info +cat > scripts/bacula_config << EOF +#!/bin/sh +cat << __EOC__ +$ $0 $ac_configure_args +EOF +cat config.out >> scripts/bacula_config +echo __EOC__ >> scripts/bacula_config +chmod 755 scripts/bacula_config + +cat config.out diff --git a/examples/AFS-README b/examples/AFS-README new file mode 100644 index 00000000..233ede48 --- /dev/null +++ b/examples/AFS-README @@ -0,0 +1,45 @@ +From: Lucas Mingarro +To: bacula-users@lists.sourceforge.net +Subject: [Bacula-users] OpenAFS with bacula + +Hi, + I'm using Bacula for backing up an AFS file set. I don't know if +anyone else is doing it, but here is my explaination about how Bacula +works with it. + +I'm using Bacula 1.26a on a RedHat Linux 7.1 on the Bacula Director +machine and RedHat Linux 7.1 and OpenAFS 1.26 on the Bacula client machine. + +First make a user bacula in your kas server an give him rl permission on +all volumes that you want to backup with Bacula. + +In order for bacula-fd to reads the files on your +AFS server you have to give the client a kerberos ticket with +the right privileges to read the volumes. Here my script to +obtain the ticket. + +I made a script that obtains the tiket and then runs bacula-fd, + +I put this script in /sbin/afs-bacula with permissions 700. +(See current directory for a copy) + +Then you have to change the bacula-fd start/stop script. +Replace the line + +daemon /usr/local/sbin/bacula-fd $2 -c /usr/local/etc/bacula-fd.conf + +with + +/sbin/afs_bacula daemon /usr/local/sbin/bacula-fd $2 -c \ + /usr/local/etc/bacula-fd.conf + + +Ok that's all. You've got a bacula-fd validated with the kerberos server. + +Lucas Mingarro +lucas@easytech.com.ar + +Note: Don't forget that kerberos tickets have a life time :) + +See: http://www.angelfire.com/hi/plutonic/afs-faq.html for FAQ on AFS. + diff --git a/examples/Write-DVD-with-UDF.txt b/examples/Write-DVD-with-UDF.txt new file mode 100644 index 00000000..17aaf63a --- /dev/null +++ b/examples/Write-DVD-with-UDF.txt @@ -0,0 +1,75 @@ +From stephan.ebelt@net-linx.com Fri Apr 7 21:01:24 2006 +From: Stephan Ebelt +Subject: [Bacula-devel] DVD + packet writing + UDF + +Hello, + +is anyone using this approach in production? + +I do for about a week now. And it appears to be easier to setup and is, +in fact, quite media independent. (I actually gave up on trying the +growisofs/dvd-handler method - bacula constantly rejected most of my +media...) + +Here is how it works for me (all done with bacula 1.38.5 on FC4 with the +udftools package installed): + +1. load udf file system support (modprobe udf) + +2. create a packet writing device on top of the dvd drive (pktsetup + dvd-writer /dev/hdc) + +3. format a blank media with UDF file system (mkudffs --media-type=dvd + /dev/pktcdvd/dvd-writer), I tried with DVD-RAM, DVD+RW and CD-RW + media. All appear to work fine - at different speeds of course. + +4. mount the freshly formatted media (mount -t udf + /dev/pktcdvd/dvd-writer /mnt/dvd-writer -o noatime). Note that + 'noatime' makes the media living longer. + +5. configure the SD as it would write to hard disk: + + Device { + Name = DVD-Writer + Media Type = DVD + + ArchiveDevice = /mnt/dvd-writer + + LabelMedia = no # I want only one Volume per DVD + AutomaticMount = yes + RemovableMedia = yes + AlwaysOpen = no + } + +Now the whole thing can be automated further: steps 1, 2 and 5 are +one-time system setup. + +Step 3 would require some logic to determine whether a media really is +blank. I guess something the like is already in dvd-handler?. + +Step 4 should be automatic with 1.39.5+ (with the RequiresMount, +MountPoint, MountCommand... etc... options being more general). I havn't +tried this yet. + +I do not know how stable this is yet. I am a bit concerned because I +read in a older mail (from Nicolas Boichat, end of 2004) that he +considered packet-writing/UDF as not reliable enough at that time. + +However, the few restores I tried were all successful. Also I can read +the DVDs on at least one other computer. Maybe the UDF and pkcdvd code +matured in the meantime? I'll leave it running here and keep on testing... + +best regards, +Stephan + + + +------------------------------------------------------- +This SF.Net email is sponsored by xPML, a groundbreaking scripting language +that extends applications into web and mobile media. Attend the live webcast +and join the prime developer group breaking into this new coding territory! +http://sel.as-us.falkag.net/sel?cmd=lnk&kid=110944&bid=241720&dat=121642 +_______________________________________________ +Bacula-devel mailing list +Bacula-devel@lists.sourceforge.net +https://lists.sourceforge.net/lists/listinfo/bacula-devel diff --git a/examples/afs-bacula b/examples/afs-bacula new file mode 100755 index 00000000..ebbb2c1b --- /dev/null +++ b/examples/afs-bacula @@ -0,0 +1,296 @@ +#!/usr/afsws/bin/pagsh +# +# Get a Kerbos authentication ticket for AFS for Bacula, then run +# the Bacula client. See AFS-README for documentation. +# +# NAME afs_bacula +# AUTHOR Lucas Mingarro +# PURPOSE Run an AFS authenticated program. +# Get a PAG, get the user's token, +# then exec user's command +# +TEXTDOMAIN=initscripts +TEXTDOMAINDIR=/etc/locale + +# Make sure umask is sane +umask 022 + +# First set up a default search path. +export PATH="/sbin:/usr/sbin:/bin:/usr/bin:/usr/X11R6/bin" + +# Get a sane screen width +[ -z "$COLUMNS" ] && COLUMNS=80 + +if [ -f /etc/sysconfig/i18n -a -z "$NOLOCALE" ] ; then + . /etc/sysconfig/i18n + if [ "$LANG" = "ja_JP.eucJP" -a "`/sbin/consoletype`" != "pty" ]; then + unset LANG + else + export LANG + fi +fi + +# Read in our configuration +if [ -z "$BOOTUP" ]; then + if [ -f /etc/sysconfig/init ]; then + . /etc/sysconfig/init + else + # This all seem confusing? Look in /etc/sysconfig/init, + # or in /usr/doc/initscripts-*/sysconfig.txt + BOOTUP=color + RES_COL=60 + MOVE_TO_COL="echo -en \\033[${RES_COL}G" + SETCOLOR_SUCCESS="echo -en \\033[1;32m" + SETCOLOR_FAILURE="echo -en \\033[1;31m" + SETCOLOR_WARNING="echo -en \\033[1;33m" + SETCOLOR_NORMAL="echo -en \\033[0;39m" + LOGLEVEL=1 + fi + if [ -x /sbin/consoletype ]; then + if [ "`consoletype`" = "serial" ]; then + BOOTUP=serial + MOVE_TO_COL= + SETCOLOR_SUCCESS= + SETCOLOR_FAILURE= + SETCOLOR_WARNING= + SETCOLOR_NORMAL= + fi + fi +fi + +if [ "$BOOTUP" != "verbose" ]; then + INITLOG_ARGS="-q" +else + INITLOG_ARGS= +fi + +# Check if $pid (could be plural) are running +checkpid() { + while [ "$1" ]; do + [ -d /proc/$1 ] && return 0 + shift + done + return 1 +} + + +# A function to start a program. +daemon() { + # Test syntax. + local gotbase= + local base= user= nice= bg= pid + nicelevel=0 + while [ "$1" != "${1##[-+]}" ]; do + case $1 in + '') echo $"$0: Usage: daemon [+/-nicelevel] {program}" + return 1;; + --check) + base=$2 + gotbase="yes" + shift 2 + ;; + --check=?*) + base=${1#--user=} + shift + ;; + --user) + user=$2 + shift 2 + ;; + --user=?*) + user=${1#--user=} + shift + ;; + [-+][0-9]*) + nice="nice -n $1" + shift + ;; + *) echo $"$0: Usage: daemon [+/-nicelevel] {program}" + return 1;; + esac + done + + # Save basename. + [ -z $gotbase ] && base=${1##*/} + + # See if it's already running. Look *only* at the pid file. + pidlist=`pidfileofproc $base` + + [ -n "$pid" ] && return + + # make sure it doesn't core dump anywhere; while this could mask + # problems with the daemon, it also closes some security problems + ulimit -S -c 0 >/dev/null 2>&1 + + # Echo daemon + [ "$BOOTUP" = "verbose" ] && echo -n " $base" + + # And start it up. + if [ -z "$user" ]; then + $nice initlog $INITLOG_ARGS -c "$*" + else + $nice initlog $INITLOG_ARGS -c "su - $user -c \"$*\"" && +success $"$base startup" || failure $"$base startup" + fi + [ $? = 0 ] && success $"$base startup" || failure $"$base startup" +} + + +# A function to find the pid of a program. Looks *only* at the pidfile +pidfileofproc() { + local base=${1##*/} + local pid + + # Test syntax. + if [ $# = 0 ] ; then + echo $"Usage: pidfileofproc {program}" + return 1 + fi + + # First try "/var/run/*.pid" files + if [ -f /var/run/${base}.pid ] ; then + read pid < /var/run/${base}.pid + for p in $line ; do + [ -z "${p//[0-9]/}" -a -d /proc/$p ] && +pid="$pid $p" + done + if [ -n "$pid" ] ; then + echo $pid + return 0 + fi + fi +} + +echo_success() { + [ "$BOOTUP" = "color" ] && $MOVE_TO_COL + echo -n "[ " + [ "$BOOTUP" = "color" ] && $SETCOLOR_SUCCESS + echo -n $"OK" + [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL + echo -n " ]" + echo -ne "\r" + return 0 +} + +echo_failure() { + [ "$BOOTUP" = "color" ] && $MOVE_TO_COL + echo -n "[" + [ "$BOOTUP" = "color" ] && $SETCOLOR_FAILURE + echo -n $"FAILED" + [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL + echo -n "]" + echo -ne "\r" + return 1 +} + +echo_passed() { + [ "$BOOTUP" = "color" ] && $MOVE_TO_COL + echo -n "[" + [ "$BOOTUP" = "color" ] && $SETCOLOR_WARNING + echo -n $"PASSED" + [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL + echo -n "]" + echo -ne "\r" + return 1 +} + +# Log that something succeeded +success() { + if [ -z "$IN_INITLOG" ]; then + initlog $INITLOG_ARGS -n $0 -s "$1" -e 1 + else + # silly hack to avoid EPIPE killing rc.sysinit + trap "" SIGPIPE + echo "$INITLOG_ARGS -n $0 -s \"$1\" -e 1" >&21 + trap - SIGPIPE + fi + [ "$BOOTUP" != "verbose" ] && echo_success + return 0 +} + +# Log that something failed +failure() { + rc=$? + if [ -z "$IN_INITLOG" ]; then + initlog $INITLOG_ARGS -n $0 -s "$1" -e 2 + else + trap "" SIGPIPE + echo "$INITLOG_ARGS -n $0 -s \"$1\" -e 2" >&21 + trap - SIGPIPE + fi + [ "$BOOTUP" != "verbose" ] && echo_failure + return $rc +} + +# Log that something passed, but may have had errors. Useful for fsck +passed() { + rc=$? + if [ -z "$IN_INITLOG" ]; then + initlog $INITLOG_ARGS -n $0 -s "$1" -e 1 + else + trap "" SIGPIPE + echo "$INITLOG_ARGS -n $0 -s \"$1\" -e 1" >&21 + trap - SIGPIPE + fi + [ "$BOOTUP" != "verbose" ] && echo_passed + return $rc +} + +# Run some action. Log its output. +action() { + STRING=$1 + echo -n "$STRING " + shift + initlog $INITLOG_ARGS -c "$*" && success $"$STRING" || failure $"$STRING" + rc=$? + echo + return $rc +} + +# returns OK if $1 contains $2 +strstr() { + [ "$1" = "$2" ] && return 0 + slice=${1#*$2*} + [ "$slice" = "$1" ] && return 1 + return 0 +} + +# Confirm whether we really want to run this service +confirm() { + local YES=$"yY" + local NO=$"nN" + local CONT=$"cC" + + while : ; do + echo -n $"Start service $1 (Y)es/(N)o/(C)ontinue? [Y] " + read answer + if strstr "$YES" "$answer" || [ "$answer" = "" ] ; then + return 0 + elif strstr "$CONT" "$answer" ; then + return 2 + elif strstr "$NO" "$answer" ; then + return 1 + fi + done +} + +# Here is the authentication with the kas server + +CMD=`basename ${0}` +PRINCIPAL='bacula' +passwordfile='/etc/security/afs_bacula.pw' +klog $PRINCIPAL -pipe < ${passwordfile} +command_line="$*" +command=`echo ${command_line} | awk '{print $1}'` +# Check if we can run the command. +# If we got this far, it is likely that the command name is correct +# but there may be a problem in accessing the command file. +# If there is an error, log it via syslog (logger) rather than ">&2" + + #if [ ! -x "${command}" ]; then + #M="error: unable to execute command ${command}" + #logger -i -t "${CMD}" "${M}" + #exit 1 + #fi +#fi +$command_line diff --git a/examples/autochangers/Sun-desktop-20Gb-4mm-autoloader b/examples/autochangers/Sun-desktop-20Gb-4mm-autoloader new file mode 100644 index 00000000..4ffbda58 --- /dev/null +++ b/examples/autochangers/Sun-desktop-20Gb-4mm-autoloader @@ -0,0 +1,43 @@ +From: Lucas Mingarro +To: +Subject: [Bacula-users] Sun Desktop 20Gb 4mm autoloader +Date: Mon, 2 Dec 2002 15:42:43 -0300 (ART) + +Hi, + If you have an old Sun Desktop 20Gb 4mm autoloader (Archive Python +29279) and you want to make it work on Linux with Bacula here are my conf +files. + +I'm using Redhat Linux 7.1 with Bacula 1.26a. + +First add these lines to your stini.def + +manufacturer=ARCHIVE modepl = "Python 29279" { +scsi2logical=0 can-bsr can-partitions auto-lock +mode1 blocksize=0 compression=0 density=0x13 # /dev/nst0 DDS (61000 bpi) +mode2 blocksize=1024 compression=0 density=0x13 # /dev/nst0l DDS (61000 bpi) +mode3 blocksize=0 compression=1 density=0x24 # /dev/nst0m DDS-2 +mode4 blocksize=1024 compression=1 density=0x24 # /dev/nst0a DDS-2 +} + +and Python29279-autoloader.conf + +# +# This is the definition Lucas uses for a +# Sun Desktop 20Gb 4mm autoloader (Archive Python 29279) +# +Device { + Name = "Python 29279" + Media Type = "4mm" + Archive Device = /dev/nst0a + Auto Changer = Yes + Changer Device = /dev/sg0 + Changer Command = "/usr/local/etc/mtx-changer %c %o %S %a" + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = no; +} + +Thats all +Good Luck + +Lucas Mingarro diff --git a/examples/autochangers/ak-mtx-changer.txt b/examples/autochangers/ak-mtx-changer.txt new file mode 100755 index 00000000..671d2335 --- /dev/null +++ b/examples/autochangers/ak-mtx-changer.txt @@ -0,0 +1,218 @@ +#!/bin/sh +# +# The use of this script for Totally Automating Tape usage is +# described in the Tips chapter of the manual. +# +# Bacula interface to mtx autoloader +# +# Created OCT/31/03 by Alexander Kuehn, derived from Ludwig Jaffe's script +# +# Works with the HP C1537A L708 DDS3 +# +#set -x +# these are the labels of the tapes in each virtual slot, not the slots! +labels="PSE-0001 PSE-0002 PSE-0003 PSE-0004 PSE-0005 PSE-0006 PSE-0007 PSE-0008 PSE-0009 PSE-0010 PSE-0011 PSE-0012" + +# who to send a mail to? +recipient=root@localhost +logfile=/var/log/mtx.log + +# Delay in seconds how often to check whether a new tape has been inserted +TAPEDELAY=10 # the default is every 10 seconds +echo `date` ":" $@ >>$logfile + +# change this if mt is not in the path (use different quotes!) +mt=`which mt` +grep=`which grep` +# +# how to run the console application? +console="/usr/local/sbin/console -c /usr/local/etc/console.conf" + +command="$1" + +#TAPEDRIVE0 holds the device/name of your 1st and only drive (Bacula supports only 1 drive currently) +#Read TAPEDRIVE from command line parameters +if [ -z "$2" ] ; then + TAPEDRIVE0=/dev/nsa0 +else + TAPEDRIVE0=$2 +fi + +#Read slot from command line parameters +if [ -z "$3" ] ; then + slot=`expr 1` +else + slot=`expr $3` +fi + +if [ -z "$command" ] ; then + echo "" + echo "The mtx-changer script for Bacula" + echo "---------------------------------" + echo "" + echo "usage: mtx-changer [slot]" + echo " mtx-changer" + echo "" + echo "Valid commands:" + echo "" + echo "unload Unloads a tape into the slot" + echo " from where it was loaded." + echo "load Loads a tape from the slot " + echo "list Lists full storage slots" + echo "loaded Gives slot from where the tape was loaded." + echo " 0 means the tape drive is empty." + echo "slots Gives Number of avialable slots." + echo "volumes List avialable slots and the label of the." + echo " tape in it (slot:volume)" + echo "Example:" + echo " mtx-changer load /dev/nst0 1 loads a tape from slot1" + echo " mtx-changer %a %o %S " + echo "" + exit 0 +fi + + +case "$command" in + unload) + # At first do mt -f /dev/st0 offline to unload the tape + # + # Check if you want to fool me + echo "unmount"|$console >/dev/null 2>/dev/null + echo "mtx-changer: Checking if drive is loaded before we unload. Request unload" >>$logfile + if $mt -f $TAPEDRIVE0 status >/dev/null 2>/dev/null ; then # mt says status ok + echo "mtx-changer: Doing mt -f $TAPEDRIVE0 rewoffl to rewind and unload the tape!" >>$logfile + $mt -f $TAPEDRIVE0 rewoffl + else + echo "mtx-changer: *** Don't fool me! *** The Drive $TAPEDRIVE0 is empty." >>$logfile + fi + exit 0 + ;; + + load) + #Let's check if drive is loaded before we load it + echo "mtx-changer: Checking if drive is loaded before we load. I Request loaded" >>$logfile + LOADEDVOL=`echo "status Storage"|$console|$grep $TAPEDRIVE0|grep ^Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` +# if [ -z "$LOADEDVOL" ] ; then # this is wrong, becaus Bacula would try to use the tape if we mount it! +# LOADEDVOL=`echo "mount"|$console|$grep $TAPEDRIVE0|grep Device|grep -v "not open."|grep -v "ERR="|sed -e s/^.*Volume\ //|cut -d\" -f2` +# if [ -z "$LOADEDVOL" ] ; then +# echo "mtx-changer: The Drive $TAPEDRIVE0 is empty." >>$logfile +# else # restore state? +# if [ $LOADEDVOL = $3 ] ; then # requested Volume mounted -> exit +# echo "mtx-changer: *** Don't fool me! *** Tape $LOADEDVOL is already in drive $TAPEDRIVE0!" >>$logfile +# exit +# else # oops, wrong volume +# echo "unmount"|$console >/dev/null 2>/dev/null +# fi +# fi +# fi + if [ -z "$LOADEDVOL" ] ; then + echo "unmount"|$console >/dev/null 2>/dev/null + LOADEDVOL=0 + else + #Check if you want to fool me + if [ $LOADEDVOL = $3 ] ; then + echo "mtx-changer: *** Don't fool me! *** Tape $LOADEDVOL is already in drive $TAPEDRIVE0!" >>$logfile + exit + fi + echo "mtx-changer: The Drive $TAPEDRIVE0 is loaded with the tape $LOADEDVOL" >>$logfile + echo "mtx-changer: Unmounting..." >>$logfile + echo "unmount"|$console >/dev/null 2>/dev/null + fi + echo "mtx-changer: Unloading..." >>$logfile + echo "mtx-changer: Doing mt -f $TAPEDRIVE0 rewoffl to rewind and unload the tape!" >>$logfile + mt -f $TAPEDRIVE0 rewoffl 2>/dev/null + #Now we can load the drive as desired + echo "mtx-changer: Doing mtx -f $1 $2 $3" >>$logfile + # extract label for the mail + count=`expr 1` + for label in $labels ; do + if [ $slot -eq $count ] ; then volume=$label ; fi + count=`expr $count + 1` + done + + mail -s "Bacula needs volume $volume." $recipient </dev/null 2>/dev/null + while [ $? -ne 0 ] ; do + sleep $TAPEDELAY + $mt status >/dev/null 2>/dev/null + done + mail -s "Bacula says thank you." $recipient <>$logfile + echo "Loading finished." ; >>$logfile + echo "$slot" + exit 0 + ;; + + list) + echo "mtx-changer: Requested list" >>$logfile + LOADEDVOL=`echo "status Storage"|$console|$grep $TAPEDRIVE0|grep ^Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z $LOADEDVOL ] ; then # try mounting + LOADEDVOL=`echo "mount"|$console|$grep $TAPEDRIVE0|grep Device|grep -v "not open."|grep -v "ERR="|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z $LOADEDVOL ] ; then # no luck + LOADEDVOL="_no_tape" + else # restore state + echo "unmount"|$console >/dev/null 2>/dev/null + fi + fi + count=`expr 1` + for label in $labels ; do + if [ "$label" != "$LOADEDVOL" ] ; then + printf "$count " + fi + count=`expr $count + 1` + done + printf "\n" + ;; + + loaded) + echo "mtx-changer: Request loaded, dev $TAPEDRIVE0" >>$logfile + LOADEDVOL=`echo "status Storage"|$console|$grep $TAPEDRIVE0|grep ^Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z $LOADEDVOL ] ; then + LOADEDVOL=`echo "mount"|$console|$grep $TAPEDRIVE0|grep Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z "$LOADEDVOL" ] ; then # no luck + echo "$TAPEDRIVE0 not mounted!" >>$logfile + else # restore state + echo "unmount"|$console >/dev/null 2>/dev/null + fi + fi + if [ -z "$LOADEDVOL" ] ; then + LOADEDVOL="_no_tape" >>$logfile + echo "0" + else + count=`expr 1` + for label in $labels ; do + if [ $LOADEDVOL = $label ] ; then echo $count ; fi + count=`expr $count + 1` + done + fi + exit 0 + ;; + + slots) + echo "mtx-changer: Request slots" >>$logfile + count=`expr 0` + for label in $labels ; do + count=`expr $count + 1` + done + echo $count + ;; + + volumes) + echo "mtx-changer: Request volumes" >>$logfile + count=`expr 1` + for label in $labels ; do + printf "$count:$label " + count=`expr $count + 1` + done + printf "\n" + ;; +esac diff --git a/examples/autochangers/bacula-barcodes b/examples/autochangers/bacula-barcodes new file mode 100644 index 00000000..53410101 --- /dev/null +++ b/examples/autochangers/bacula-barcodes @@ -0,0 +1,53 @@ +# +# Bacula barcode simulation file +# used by ${PREFIX}/sbin/chio-bacula (FreeBSD) +# +# Contributed by Lars Koeller +# +# The volumenames are returned by the "changer list" command +# labeling in the console is done by "label barcodes" +# (then all volumes belog to the default pool). +# All Lines with an "#" at the bedinning are ignored +# +# !!!! If you export an tape and reinsert another one, +# !!!! don't forget to change the volume name in this file! +# +1:Volume1-100 +2:Volume1-101 +3:Volume1-102 +4:Volume1-103 +5:Volume1-104 +6:Volume1-105 +7:Volume1-106 +8:Volume1-107 +9:Volume1-108 +10:Volume1-109 +11:Volume1-110 +12:Volume1-111 +# +# Further volumes exported from the changer +# +# 36GB AIT2 tapes +#Volume1-100 +#Volume1-101 +#Volume1-102 +#Volume1-103 +#Volume1-104 +#Volume1-105 +#Volume1-106 +#Volume1-107 +#Volume1-108 +#Volume1-109 +#Volume1-110 +#Volume1-111 +#Volume1-112 +#Volume1-113 +#Volume1-114 +#Volume1-115 +# +# 50GB AIT2 tapes +#Volume2-200 +#Volume2-201 +#Volume2-202 +#Volume2-203 +#Volume2-204 diff --git a/examples/autochangers/chio-bacula b/examples/autochangers/chio-bacula new file mode 100755 index 00000000..e12dc204 --- /dev/null +++ b/examples/autochangers/chio-bacula @@ -0,0 +1,152 @@ +#!/bin/sh +# +# Bacula interface to mtx autoloader +# (By Lars Koeller, lars+bacula@koellers.net) +# +# If you set in your Device resource +# +# Changer Command = "path-to-this-script/chio-bacula %c %o %S %a" +# you will have the following input to this script: +# +# chio-bacula "changer-device" "command" "slot" "archive-device" +# +# for example: +# +# chio-bacula /dev/sg0 load 1 /dev/nst0 (on a FreeBSD system) +# +# If you need to to an offline, refer to the drive as $4 +# e.g. mt -f $f offline +# +# Many changers need an offline after the unload. Also many +# changers need a sleep 60 after the mtx load. +# +# N.B. If you change the script, take care to return either +# the mtx exit code or a 0. If the script exits with a non-zero +# exit code, Bacula will assume the request failed. +# + +# This simulates a barcode reader in the changer. +# The labels of the virtual barcode reader are located in the BARCODE_FILE +SIMULATE_BARCODE=true +BARCODE_FILE=/usr/local/etc/bacula-barcodes +TMPDIR=/tmp + +make_temp_file() +{ + TMPFILE=`mktemp ${TMPDIR}/mtx$1.XXXXXXXXXX 2> /dev/null` + if test $? -ne 0 || test x${TMPFILE} = x; then + TMPFILE="${TMPDIR}/mtx$1.$$" + if test -f ${TMPFILE}; then + echo "ERROR: Temp file security problem on: ${TMPFILE}" + exit 1 + fi + fi +} + +me=$(basename $0) + +# Debug +echo "$me $@" > /dev/console + +if [ -z "$1" ] ; then + usage; +fi + +if [ -z "$2" ] ; then + usage; +fi + +MTX=/bin/chio +CHANGER=$1 +COMMAND=$2 +if [ ! -z "$3" ]; then + SLOT=$3 +fi +if [ ! -z "$4" ]; then + TAPE=$4 +else + TAPE=/dev/nrsa2 +fi + +# Time to wait for loading +SLEEP=20 +# What drive of the autochanger should be used primary +# At the moment bacula (1.31a) could not deal with more drives +DRIVE=1 + +usage() +{ + echo "" + echo "The $me script for bacula" + echo "--------------------------------------" + echo "" + echo "usage: $me [slot] [devicename of tapedrive]" + echo "" + echo "Valid commands:" + echo "" + echo "unload Unloads a tape into the slot" + echo " from where it was loaded." + echo "load Loads a tape from the slot " + echo " (slot-base is calculated to 1 as first slot)" + echo "list Lists full storage slots" + echo "loaded Gives slot from where the tape was loaded." + echo " 0 means the tape drive is empty." + echo "slots Gives Number of available slots." + echo "" + echo "Example:" + echo " mtx-changer /dev/changer load 1 loads a tape from slot 1" + echo "" + exit 2 +} + + +case ${COMMAND} in + unload) + # enable the following line if you need to eject the cartridge + #mt -f ${TAPE} off + #sleep 2 + ${MTX} -f ${CHANGER} return drive ${DRIVE} + ;; + + load) + ${MTX} -f ${CHANGER} move slot $((${SLOT}-1)) drive ${DRIVE} + rtn=$? + # Increase the sleep time if you have a slow device + sleep $SLEEP + exit $rtn + ;; + + list) + if [ "${SIMULATE_BARCODE}" = "true" ]; then + if [ -f "$BARCODE_FILE" ]; then + cat $BARCODE_FILE | grep -v "^#" + exit 0 + else + echo "Barcode file $BARCODE_FILE missing ... exiting!" + exit 1 + fi + else + ${MTX} -f ${CHANGER} status | grep "^slot .*: .*FULL>" | awk '{print $2}' | awk -F: '{print $1+1" "}' | tr -d "[\r\n]" + fi + ;; + + loaded) + # echo "Request loaded" + make_temp_file + ${MTX} -f ${CHANGER} status -S > ${TMPFILE} + rtn=$? + cat ${TMPFILE} | grep "^drive ${DRIVE}: " | awk '{print $6+1}' | tr -d ">" + cat ${TMPFILE} | grep "^drive ${DRIVE}: source: <>" | awk "{print 0}" + rm -f ${TMPFILE} + exit $rtn + ;; + + slots) + # echo "Request slots" + ${MTX} -f ${CHANGER} status | grep "^slot " | tail -1 | awk '{print $2+1}' | tr -d ":" + ;; + + *) + usage + ;; +esac diff --git a/examples/autochangers/chio-changer b/examples/autochangers/chio-changer new file mode 100755 index 00000000..60d1ed09 --- /dev/null +++ b/examples/autochangers/chio-changer @@ -0,0 +1,125 @@ +#!/bin/sh +# +# Bacula interface to autoloader +# +# By Pascal Pederiva +# +# Known to work on FreeBSD 5.2 with a TZ875 changer. +# +# This script mimics mtx-changer with the following differences +# - it automatically stows the cartridge to the slot it came from when +# unloading. +# - a load will automatically unload the drive if there is a +# different cartridge loaded. +# - it uses chio instead of mtx (which is +# available as a package) +# +# If you set in your Device resource +# +# Changer Command = "path-to-this-script/chio-changer %c %o %S %a" +# you will have the following input to this script: +# +# chio-changer "changer-device" "command" "slot" "archive-device" +# +# for example: +# +# chio-changer /dev/sg0 load 1 /dev/nst0 (on a Linux system) +# +# If you need to to an offline, refer to the drive as $4 +# e.g. mt -f $4 offline +# +# Many changers need an offline after the unload. Also many +# changers need a sleep 60 after the chio load. +# +# N.B. If you change the script, take care to return either +# the chio exit code or a 0. If the script exits with a non-zero +# exit code, Bacula will assume the request failed. +# +# Examples: +# chio-changer load 1 ; load slot 1 into drive 0 (and unload old cartridge if necessary) +# chio-changer unload N ; unload drive into source slot (slot number is ignored) +# chio-changer loaded N ; Return loaded slot # +# chio-changer /dev/ch0 loaded N /dev/nsa0 ; has the same effect + + +#echo `date` "chio: $*" >>/tmp/changer + +# If the first parameter is not a device, assume it is a command +# and you want to use the default changer + +if [ -c $1 ]; then + CHANGER=$1 + shift +else + CHANGER=/dev/ch0 +fi + +COMMAND=$1 +ELEMENT=$2 +DRIVE=$3 + +MTX=chio + +############################################################################## + +case "$COMMAND" in + unload) + +# enable the following line if you need to eject the cartridge +# mt -f $DRIVE offline + SOURCE=`${MTX} status -S | grep drive | grep FULL | cut -d: -f 3 | tr -d '<>a-z '` + if [ ! -z "$SOURCE" ]; then + echo -n "Unloading Data Transfer Element into Storage Element $ELEMENT..." + ${MTX} -f $CHANGER move drive 0 slot $SOURCE + rtn=$? + echo "done" + else + echo "Storage Element $ELEMENT is Already Full" + fi + exit $rtn + ;; + + load) + if [ -z "$ELEMENT" ]; then + echo "ERROR: load reguired" + return 1 + fi + TARGET=$ELEMENT + if [ $TARGET -le 0 ]; then + TARGET=1 + fi + TARGET=`expr $TARGET - 1` + + SOURCE=`${MTX} status -S | grep drive | grep FULL | cut -d: -f 3 | tr -d '<>a-z '` + if [ ! -z "$SOURCE" ]; then + if [ "$SOURCE" != "$TARGET" ]; then + # Only unload if there is something different in the drive + ${MTX} -f $CHANGER move drive 0 slot $SOURCE + fi + fi + + if [ "$SOURCE" != "$TARGET" ]; then + ${MTX} -f $CHANGER move slot $TARGET drive 0 + rtn=$? + fi + exit $rtn + ;; + + list) + ${MTX} -f $CHANGER status slot | grep "FULL" | awk '{print $2+1":"}' + ;; + + loaded) + SOURCE=`${MTX} status -S | grep drive | grep FULL | cut -d: -f 3 | tr -d '<>a-z '` + rtn=$? + if [ -z "$SOURCE" ]; then + SOURCE=-1 + fi + echo `expr 1 + ${SOURCE}` + exit $rtn + ;; + + slots) + ${MTX} -f $CHANGER status slot | wc -l + ;; +esac diff --git a/examples/autochangers/chio-changer-freebsd b/examples/autochangers/chio-changer-freebsd new file mode 100755 index 00000000..b9cec93c --- /dev/null +++ b/examples/autochangers/chio-changer-freebsd @@ -0,0 +1,194 @@ +#!/bin/sh +# +# Copyright (C) 2015 Rudolf Cejka +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Bacula interface to tape libraries and autoloaders for FreeBSD +# (by Rudolf Cejka , v1.2, 2012/11/14) +# +# +# If you set in your Device resource +# Changer Command = "path-to-this-script/chio-changer %c %o %S %a %d" +# you will have the following input to this script: +# chio-changer "changer-device" "command" "slot" "tape-device" "drive-index" +# $1 $2 $3 $4 $5 +# for example (on a FreeBSD system): +# chio-changer /dev/ch0 load 1 /dev/nsa0 0 +# +# If you change the script, take care to return either the chio exit +# code or a 0. If the script exits with a non-zero exit code, Bacula +# will assume the request failed. +# + +PROGNAME=`basename $0` + +# Uncomment the following line, if you want to log debug output. +#DEBUG=/var/run/bacula/${PROGNAME}.log + +# Uncomment the following line, if you need to eject a tape before moving +# it from the drive. +#OFFLINE=yes + +# Uncomment one or more of the following lines, if you need to wait for +# some time (in seconds) after unloading, loading or transferring a tape. +#OFFLINE_SLEEP=10 +#LOAD_SLEEP=10 +#MOVE_SLEEP=10 + +# Uncomment the following line, if you do not have a changer with volume +# reader. +#FAKE_BARCODES=/usr/local/etc/bacula-barcodes + +usage() +{ + cat < [slot] [tape-device] [drive-index] + +Commands (): + unload Unload a tape into the slot from where it was loaded + load Load a tape from the slot (1-based) + transfer Transfer a tape from the slot to + the slot (1-based) + list List full storage slots + listall List all storage slots and drives with source information + loaded Give slot from where the tape was loaded (0 = empty drive) + slots Give number of available slots + +Example: + ${PROGNAME} /dev/ch0 load 1 Load a tape from the slot 1 + +EOF + exit 1 +} + +# Default settings +CHANGER=/dev/ch0 +TAPE=/dev/nsa0 +DRIVE=0 + +CHIO=/bin/chio +MT=/usr/bin/mt + +if [ -n "${DEBUG}" ]; then + MSG=$0 + for PAR; do MSG="${MSG} \"${PAR}\""; done + echo `date +"%Y/%m/%d %H:%M:%S"` ${MSG} >> ${DEBUG} +fi + +if [ -n "$1" ]; then + CHANGER=$1; +fi +COMMAND=$2 +SLOT=$3 +SLOTDST=$4 +if [ -n "$4" ]; then + TAPE=$4 +fi +if [ -n "$5" ]; then + DRIVE=$5 +fi + +case ${COMMAND} in +unload) + if [ "${OFFLINE}" = yes ]; then + ${MT} -f ${TAPE} offline + if [ $? = 0 -a -n "${OFFLINE_SLEEP}" ]; then + sleep ${OFFLINE_SLEEP} + fi + fi + if [ -z "${SLOT}" ]; then + ${CHIO} -f ${CHANGER} return drive ${DRIVE} + else + ${CHIO} -f ${CHANGER} move drive ${DRIVE} slot $((${SLOT} - 1)) + fi + if [ $? -ne 0 ]; then + # In case of an error, try to unload the cartridge to the first free slot + FREE=`${CHIO} -f ${CHANGER} status slot | \ + sed -ne '/FULL/d;s/^slot *\([0-9]*\):.*/\1/p' | \ + awk 'BEGIN { n = 0 } { n = $1 + 1; exit } END { print n }'` + if [ ${FREE} -gt 0 ]; then + ${CHIO} -f ${CHANGER} move drive ${DRIVE} slot $((${FREE} - 1)) + else + exit 1 + fi + fi + ;; +load) + if [ -z "${SLOT}" ]; then + usage + fi + ${CHIO} -f ${CHANGER} move slot $((${SLOT} - 1)) drive ${DRIVE} + if [ $? -ne 0 ]; then + exit 1 + fi + if [ -n "${LOAD_SLEEP}" ]; then + sleep ${LOAD_SLEEP} + fi + ;; +transfer) + if [ -z "${SLOT}" -o -z "${SLOTDST}" ]; then + usage + fi + ${CHIO} -f ${CHANGER} move slot $((${SLOT} - 1)) slot $((${SLOTDST} - 1)) + if [ $? -ne 0 ]; then + exit 1 + fi + if [ -n "${MOVE_SLEEP}" ]; then + sleep ${MOVE_SLEEP} + fi + ;; +list) + if [ -z "${FAKE_BARCODES}" ]; then + ${CHIO} -f ${CHANGER} status -v slot | \ + sed -ne 's/^slot *\([0-9]*\):.*FULL.*voltag.*<\([^:]*\):.*/\1:\2/p' | \ + awk -F: '{ print $1 + 1 ":" $2 }' + else + if [ -f "${FAKE_BARCODES}" ]; then + grep -v -e "^#" -e "^$" < ${FAKE_BARCODES} + else + echo "${PROGNAME}: Barcode file ${FAKE_BARCODES} is missing" + exit 1 + fi + fi + ;; +listall) + if [ -z "${FAKE_BARCODES}" ]; then + ${CHIO} -f ${CHANGER} status -vS | \ + sed -ne ' + s/^slot *\([0-9]*\):.*ENAB.*FULL.*voltag.*<\([^:]*\):.*/I:\1:F:\2/p;t + s/^slot *\([0-9]*\):.*FULL.*voltag.*<\([^:]*\):.*/S:\1:F:\2/p;t + s/^drive *\([0-9]*\):.*FULL.*voltag.*<\([^:]*\):.*source.*<[^0-9]*\([0-9]*\)>.*/D:\1:F:\3:\2/p;t + s/^slot *\([0-9]*\):.*ENAB.*voltag.*<\([^:]*\):.*/I:\1:E/p;t + s/^slot *\([0-9]*\):.*voltag.*<\([^:]*\):.*/S:\1:E/p;t + s/^drive *\([0-9]*\):.*voltag.*<\([^:]*\):.*/D:\1:E/p' | \ + awk -F: '{ for (n = 1; n <= NF; n++) printf "%s%s", + (n == ($1 == "D" ? 4 : 2)) ? ($n == "" ? 0 : $n + 1) : $n, + (n == NF) ? "\n" : ":" }' + else + if [ -f "${FAKE_BARCODES}" ]; then + grep -v -e "^#" -e "^$" < ${FAKE_BARCODES} | \ + awk -F: '{ print "S:" $1 (match($2, "^ *$") ? ":E" : ":F:" $2) }' + else + echo "${PROGNAME}: Barcode file ${FAKE_BARCODES} is missing" + exit 1 + fi + fi + ;; +loaded) + # If a tape is loaded, but the source slot is unknown (for example, + # after library reboot), try to report the first free slot + FREE=`${CHIO} -f ${CHANGER} status slot | \ + sed -ne '/FULL/d;s/^slot *\([0-9]*\):.*/\1/p' | \ + awk 'BEGIN { n = 0 } { n = $1 + 1; exit } END { print n }'` + ${CHIO} -f ${CHANGER} status -S drive | \ + sed -ne 's/^drive *'${DRIVE}':.*FULL.*source.*<[^0-9]*\([0-9]*\)>.*/\1/p' \ + | awk 'BEGIN { n = 0 } { n = ($1 == "") ? '${FREE}' : $1 + 1 } \ + END { print n }' + ;; +slots) + ${CHIO} -f ${CHANGER} status | grep -c "^slot " + ;; +*) + usage + ;; +esac diff --git a/examples/autochangers/chio-changer-openbsd b/examples/autochangers/chio-changer-openbsd new file mode 100755 index 00000000..050095a6 --- /dev/null +++ b/examples/autochangers/chio-changer-openbsd @@ -0,0 +1,134 @@ +#!/bin/sh +# +# Bacula interface to chio(1) autoloader for OpenBSD +# +# Adapted from NetBSD pkgsrc and examples/autochangers in bacula source) +# by Antoine Jacoutot for OpenBSD. +# Tested on an LTO-4 device with 1 drive and 8 slots. +# The user Bacula is running as needs rw access to the ch(4) and st(4) +# devices. +# +# If you set in your Device resource: +# Changer Command = "/path/to/chio-changer-openbsd %c %o %S %a %d" +# you will have the following input to this script: +# chio-changer-openbsd "changer-device" "command" "slot" "archive-device" "drive-index" +# $1 $2 $3 $4 $5 +# +# So Bacula will always call with all the following arguments, even though +# in come cases, not all are used. +# +# N.B. If you change the script, take care to return either +# the chio exit code or a 0. If the script exits with a non-zero +# exit code, Bacula will assume the request failed. + +# time (in seconds) for the unit to settle after (un)loading a tape +SLEEP=1 + +CHIO=/bin/chio + +usage() { + echo "usage: ${0##*/} ctl-device command [slot archive-device drive-index]" +} + +# check parameters count +check_parm_count() { + pCount=$1 + pCountNeed=$2 + if test ${pCount} -lt ${pCountNeed}; then + usage + echo "!!! insufficient number of arguments given" + exit 1 + if test ${pCount} -lt 2; then + usage + echo "!!! mimimum usage is the first two arguments" + exit 1 + else + usage + echo "!!! command expected ${pCountNeed} arguments" + exit 1 + fi + usage + exit 1 + fi +} + +# check arguments count for specific actions +case $2 in + list|listall) + check_parm_count $# 2 + ;; + slots) + check_parm_count $# 2 + ;; + transfer) + check_parm_count $# 4 + ;; + *) + check_parm_count $# 5 + ;; +esac + + +# get arguments +ctl=$1 +cmd="$2" +slot=$3 +device=$4 +drive=$5 + +case ${cmd} in + unload) + ${CHIO} -f ${ctl} move drive ${drive} slot $((${slot} - 1)) + rtn=$? + [ ${rtn} -eq 0 ] && sleep ${SLEEP} + exit ${rtn} + ;; + load) + ${CHIO} -f ${ctl} move slot $((${slot} - 1)) drive ${drive} + rtn=$? + [ ${rtn} -eq 0 ] && sleep ${SLEEP} + exit ${rtn} + ;; + list) + ${CHIO} -f ${ctl} status -v slot | \ + sed -ne 's/^slot *\([0-9]*:\).*FULL.*voltag.*<\(.*\):.*/\1\2/p' | \ + awk -F: '{ print $1 + 1 ":" $2 }' + exit $? + ;; + listall) + # XXX only one drive is queried + _list=$(${0} ${1} list) + _loaded_s=$(${0} ${1} loaded ${slot} ${device} ${drive}) + _loaded_t=$(${CHIO} -f ${ctl} status -v | grep "drive ${drive}" | awk '{ print $NF }' | sed -e 's,<,,' -e 's,:.*,,') + + [ -n "${_list}" -a -n "${_loaded_s}" -a -n "${_loaded_t}" ] || exit 1 + + (for i in ${_list}; do + echo "S:${i}" | sed 's/\(.*\):/\1:F:/' + done + echo S:${_loaded_s}:E + if [ "${_loaded_s}" -ne 0 ]; then + echo D:${drive}:F:${_loaded_s}:${_loaded_t} + else + echo D:${drive}:E + fi) | sort + ;; + loaded) + # XXX output the first empty slot if the drive is loaded + _slot=$(${CHIO} -f ${ctl} status -v | egrep '^slot.* voltag: <:[0-9]>$' | awk '{ print $2 }' | awk -F: '{ print $1 + 1 }') + rtn=$? + _loaded=$(${CHIO} -f ${ctl} status -v | egrep "^drive ${drive}: voltag: <.*:[0-9]>") + [ -z "${_slot}" -o -z "${_loaded}" ] && _slot=0 + echo ${_slot} | awk '{ print $1 }' + exit ${rtn} + ;; + slots) + ${CHIO} -f ${ctl} params | awk "/slots/{print \$2}" + exit $? + ;; + transfer) + slotdest=${device} + ${CHIO} -f ${ctl} move slot $((${slot} - 1)) slot ${slotdest} + exit $? + ;; +esac diff --git a/examples/autochangers/chio-changer.Sony-TSL-SA300C b/examples/autochangers/chio-changer.Sony-TSL-SA300C new file mode 100755 index 00000000..6e3505ab --- /dev/null +++ b/examples/autochangers/chio-changer.Sony-TSL-SA300C @@ -0,0 +1,167 @@ +#!/bin/sh +# +# Bacula interface to mtx autoloader +# (By Lars Kller, lars+bacula@koellers.net) +# +# Modified by Jesse D. Guardiani (jesse@wingnet.net) in Feb 2004 +# to be more error resistant and compatible with my 4 tape SONY +# AIT-1 TSL-SA300C autoloader. +# +# If you set in your Device resource +# +# Changer Command = "path-to-this-script/chio-bacula %c %o %S %a" +# you will have the following input to this script: +# +# chio-bacula "changer-device" "command" "slot" "archive-device" +# +# for example: +# +# chio-bacula /dev/ch0 load 1 /dev/nst0 (on a FreeBSD system) +# +# If you need to to an offline, refer to the drive as $4 +# e.g. mt -f $f offline +# +# Many changers need an offline after the unload. Also many +# changers need a sleep 60 after the mtx load. +# +# N.B. If you change the script, take care to return either +# the mtx exit code or a 0. If the script exits with a non-zero +# exit code, Bacula will assume the request failed. +# + +# This simulates a barcode reader in the changer. +# The labels of the virtual barcode reader are located in the BARCODE_FILE +SIMULATE_BARCODE=true +BARCODE_FILE=/usr/local/etc/bacula-barcodes + +TMPDIR=/tmp + +make_temp_file() +{ + TMPFILE=`mktemp ${TMPDIR}/mtx$1.XXXXXXXXXX 2> /dev/null` + if test $? -ne 0 || test x${TMPFILE} = x; then + TMPFILE="${TMPDIR}/mtx$1.$$" + if test -f ${TMPFILE}; then + echo "ERROR: Temp file security problem on: ${TMPFILE}" + exit 1 + fi + fi +} + + +me=$(basename $0) +fullpath_me=$0 + +# Debug +logger -p user.err "$fullpath_me $@" + +if [ -z "$1" ] ; then + usage; +fi + +if [ -z "$2" ] ; then + usage; +fi + +MTX=/bin/chio +CHANGER=$1 +COMMAND=$2 +if [ ! -z "$3" ]; then + SLOT=$3 +fi +if [ ! -z "$4" ]; then + TAPE=$4 +else + TAPE=/dev/nrsa1 +fi + +# Time to wait for loading +SLEEP=20 +# What drive of the autochanger should be used primary +# At the moment bacula (1.31a) could not deal with more than one drive +DRIVE=0 + +usage() +{ + echo "" + echo "The $me script for bacula" + echo "--------------------------------------" + echo "" + echo "usage: $me [slot] [devicename of tapedrive]" + echo "" + echo "Valid commands:" + echo "" + echo "unload Unloads a tape into the slot" + echo " from where it was loaded." + echo "load Loads a tape from the slot " + echo " (slot-base is calculated to 1 as first slot)" + echo "list Lists full storage slots" + echo "loaded Gives slot from where the tape was loaded." + echo " 0 means the tape drive is empty." + echo "slots Gives Number of aviable slots." + echo "" + echo "Example:" + echo " mtx-changer /dev/changer load 1 loads a tape from slot 1" + echo "" + exit 2 +} + + +case ${COMMAND} in + unload) + # enable the following line if you need to eject the cartridge + #mt -f ${TAPE} off + #sleep 2 + # If the changer is power cycled with a tape loaded in a drive + if [ `${fullpath_me} ${CHANGER} loaded` -gt 0 ]; then + free_slot=`${fullpath_me} ${CHANGER} loaded` + free_slot=`expr $free_slot - 1` + ${MTX} -f ${CHANGER} move drive ${DRIVE} slot $free_slot + fi + ;; + + load) + ${MTX} -f ${CHANGER} move slot $((${SLOT}-1)) drive ${DRIVE} + rtn=$? + # Increase the sleep time if you have a slow device + sleep $SLEEP + exit $rtn + ;; + + list) + if [ "${SIMULATE_BARCODE}" = "true" ]; then + if [ -f "$BARCODE_FILE" ]; then + cat $BARCODE_FILE | grep -v "^#" + exit 0 + else + echo "Barcode file $BARCODE_FILE missing ... exiting!" + exit 1 + fi + else + ${MTX} -f ${CHANGER} status | grep "^slot .*: .*FULL>" | awk '{print $2}' | awk -F: '{print $1+1" "}' | tr -d "[\r\n]" + fi + ;; + + loaded) + # echo "Request loaded" + make_temp_file + ${MTX} -f ${CHANGER} status -S > ${TMPFILE} + rtn=$? + cat ${TMPFILE} | grep "^slot .: " | awk '{print $2+1}' | tr -d ":" + drive=`cat ${TMPFILE}| grep "^drive .: "` + if [ -n "$drive" ]; then + echo 0 + fi + rm -f ${TMPFILE} + exit $rtn + ;; + + slots) + # echo "Request slots" + ${MTX} -f ${CHANGER} status | grep "^slot " | tail -1 | awk '{print $2+1}' | tr -d ":" + ;; + + *) + usage + ;; +esac diff --git a/examples/autochangers/locking-mtx-changer b/examples/autochangers/locking-mtx-changer new file mode 100755 index 00000000..f3717d26 --- /dev/null +++ b/examples/autochangers/locking-mtx-changer @@ -0,0 +1,178 @@ +#!/bin/ksh +# +# Bacula interface to mtx autoloader +# +# This script is not needed with Bacula version 1.38 or later +# since the Storage daemon automatically ensures that only one +# thread accesses the script at a time. +# +# +# $Id$ +# +# If you set in your Device resource +# +# Changer Command = "path-to-this-script/mtx-changer %c %o %S %a %d +# you will have the following input to this script: +# +# mtx-changer "changer-device" "command" "slot" "archive-device" "drive-index" +# $1 $2 $3 $4 $5 +# +# for example: +# +# mtx-changer /dev/sg0 load 1 /dev/nst0 0 (on a Linux system) +# +# If you need to an offline, refer to the drive as $4 +# e.g. mt -f $4 offline +# +# Many changers need an offline after the unload. Also many +# changers need a sleep 60 after the mtx load. +# +# N.B. If you change the script, take care to return either +# the mtx exit code or a 0. If the script exits with a non-zero +# exit code, Bacula will assume the request failed. +# + +MTX=/lysator/bin/mtx +LOCKDIR=/tmp + +TMPDIR=/tmp + +make_temp_file() +{ + TMPFILE=`mktemp ${TMPDIR}/mtx$1.XXXXXXXXXX 2> /dev/null` + if test $? -ne 0 || test x${TMPFILE} = x; then + TMPFILE="${TMPDIR}/mtx$1.$$" + if test -f ${TMPFILE}; then + echo "ERROR: Temp file security problem on: ${TMPFILE}" + exit 1 + fi + fi +} + + +if test $# -lt 2 ; then + echo "usage: mtx-changer ctl-device command slot archive-device drive" + echo " Insufficient number of arguments arguments given." + echo " Mimimum usage is first two arguments ..." + exit 1 +fi + +# Setup arguments +ctl=$1 +cmd="$2" +slot=$3 +device=$4 +# If drive not given, default to 0 +if test $# = 5 ; then + drive=$5 +else + drive=0 +fi + +wait_for_drive() { + while ! mt -f $1 status >/dev/null 2>/dev/null; do +# echo "Device $1 - not ready, retrying..." + sleep 5 + done +} + +LOCKFILE="${LOCKDIR}/mtx-changer:`echo $ctl | tr / _`" + +changer_lock() { + make_temp_file lock + echo "$$" >${TMPFILE} + + while ! ln -n ${TMPFILE} $LOCKFILE 2>/dev/null; do + echo "$0: changer lock busy, retrying in 30 seconds..." + sleep 30 + done + + rm ${TMPFILE} +} + +changer_unlock() { + LOCKPID="`cat $LOCKFILE 2>/dev/null`" + if [ "$LOCKPID" != $$ ]; then + echo "$0: Invalid lock file (${LOCKFILE}) - not owned by us!" + exit 1 + fi + rm -f $LOCKFILE +} + + + +# +# Check for special cases where only 2 arguments are needed, +# all others are a minimum of 3 +case $cmd in + loaded) + ;; + unload) + ;; + list) + ;; + slots) + ;; + *) + if test $# -lt 3; then + echo "usage: mtx-changer ctl-device command slot archive-device drive" + echo " Insufficient number of arguments arguments given." + echo " Mimimum usage is first three arguments ..." + exit 1 + fi + ;; +esac + +changer_lock $ctl + +case $cmd in + unload) +# echo "Doing mtx -f $ctl unload $slot $drive" +# +# enable the following line if you need to eject the cartridge + mt -f $device offline + if test x$slot = x; then + ${MTX} -f $ctl unload + rtn=$? + else + ${MTX} -f $ctl unload $slot $drive + rtn=$? + fi + ;; + + load) +# echo "Doing mtx -f $ctl load $slot $drive" + ${MTX} -f $ctl load $slot $drive + rtn=$? + + wait_for_drive $device + changer_unlock $ctl + exit $rtn + ;; + + list) +# echo "Requested list" + ${MTX} -f $ctl status | tr ':=' ' ' | nawk '($1 == "Storage" && $2 == "Element" && $4 == "Full") { printf "%s:%s\n", $3, $6 }' + rtn=$? + ;; + + loaded) + make_temp_file + ${MTX} -f $ctl status >${TMPFILE} + rtn=$? + cat ${TMPFILE} | grep "^Data Transfer Element $drive:Full" | awk "{print \$7}" + cat ${TMPFILE} | grep "^Data Transfer Element $drive:Empty" | awk "{print 0}" + rm -f ${TMPFILE} + changer_unlock $ctl + exit $rtn + ;; + + slots) +# echo "Request slots" + ${MTX} -f $ctl status | grep " *Storage Changer" | awk "{print \$5}" + rtn=$? + ;; +esac + +changer_unlock $ctl +exit $rtn diff --git a/examples/autochangers/mtx-changer.Adic-Scalar-100 b/examples/autochangers/mtx-changer.Adic-Scalar-100 new file mode 100644 index 00000000..3795410f --- /dev/null +++ b/examples/autochangers/mtx-changer.Adic-Scalar-100 @@ -0,0 +1,1196 @@ +From bacula-users-admin@lists.sourceforge.net Wed Dec 10 15:04:47 2003 +Return-Path: +Received: from sc8-sf-mx1.sourceforge.net (lists.sourceforge.net + [66.35.250.206]) by matou.sibbald.com (8.11.6/8.11.6) with ESMTP id + hBAE4lY32735 for ; Wed, 10 Dec 2003 15:04:47 +0100 +Received: from sc8-sf-list2-b.sourceforge.net ([10.3.1.8] + helo=sc8-sf-list2.sourceforge.net) by sc8-sf-mx1.sourceforge.net with esmtp + (TLSv1:AES256-SHA:256) (Exim 4.24) id 1AU4wc-0007gs-Ls; Wed, 10 Dec 2003 + 06:04:02 -0800 +Received: from localhost.localdomain ([127.0.0.1] + helo=projects.sourceforge.net) by sc8-sf-list2.sourceforge.net with esmtp + (Exim 4.24) id 1AU4wc-0006AC-FQ; Wed, 10 Dec 2003 06:04:02 -0800 +Received: from sc8-sf-mx2-b.sourceforge.net ([10.3.1.12] + helo=sc8-sf-mx2.sourceforge.net) by sc8-sf-list2.sourceforge.net with esmtp + (Exim 4.24) id 1AU4vz-00068m-GL for bacula-users@lists.sourceforge.net; + Wed, 10 Dec 2003 06:03:23 -0800 +Received: from mta2.navair.navy.mil ([192.58.199.164]) by + sc8-sf-mx2.sourceforge.net with esmtp (Exim 4.24) id 1AU4vy-0006U5-2Q for + bacula-users@lists.sourceforge.net; Wed, 10 Dec 2003 06:03:22 -0800 +Received: by mta2.navair.navy.mil (Postfix, from userid 0) id EEB0B16C1FD; + Wed, 10 Dec 2003 09:03:10 -0500 (EST) +Received: from neim02.nawcad.navy.mil (neim02.nawcad.navy.mil + [140.229.37.205]) by mta2.navair.navy.mil (Postfix) with ESMTP id + 64D5116C2A9; Wed, 10 Dec 2003 09:03:10 -0500 (EST) +Received: by neim02.nawcad.navy.mil with Internet Mail Service + (5.5.2657.72) id ; Wed, 10 Dec 2003 09:02:18 -0500 +Message-ID: +From: "Kirkpatrick, Drew (ARINC)" +To: "'terry@mrtux.co.uk'" , bacula-users@lists.sourceforge.net +Subject: RE: [Bacula-users] Re: Autochanger setup +MIME-Version: 1.0 +X-Mailer: Internet Mail Service (5.5.2657.72) +Content-Type: text/plain; charset="ISO-8859-1" +X-Spam-Score: 0.0 (/) +X-Spam-Report: Spam Filtering performed by sourceforge.net. See + http://spamassassin.org/tag/ for more details. Report problems to + https://sf.net/tracker/?func=add&group_id=1&atid=200001 0.0 CLICK_BELOW + Asks you to click below +Sender: bacula-users-admin@lists.sourceforge.net +Errors-To: bacula-users-admin@lists.sourceforge.net +X-BeenThere: bacula-users@lists.sourceforge.net +X-Mailman-Version: 2.0.9-sf.net +Precedence: bulk +List-Unsubscribe: + , + +List-Id: Bacula user's email list for support and discussions + +List-Post: +List-Help: +List-Subscribe: + , + +List-Archive: + +Date: Wed, 10 Dec 2003 09:02:14 -0500 +X-Spam-Score: 0.0 (/) +X-Spam-Report: Spam Filtering performed by sourceforge.net. See + http://spamassassin.org/tag/ for more details. Report problems to + https://sf.net/tracker/?func=add&group_id=1&atid=200001 0.0 CLICK_BELOW + Asks you to click below +Status: RO +X-Status: F +X-Keywords: +X-UID: 76 +Content-Transfer-Encoding: 8bit + +So far this seems to be working for me, but it's not all quite finished :) + +Device { + Name = "firstTapeDrive" + Media Type = AIT3 + Archive Device = /dev/nst0 + Changer Device = /dev/sg3 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a" + AutoChanger = yes + Backward Space Record = no + RemovableMedia = yes + AlwaysOpen = yes + AutomaticMount = yes + LabelMedia = no +# HardwareEndOfMedium = no +} + + + +And here's the current version of my mtx-changer script (which changes +frequently as I figure out more of this stuff) + +**************************************************************************** +********************** + +#!/bin/bash + +# mtx-changer script modded to work with a Adic Scalar 100 better +# Drew Kirkpatrick, kirkpatricda@navair.navy.mil + + +echo "mtx-change called with: "$@ + + +AUTOLOADERDEV=$1 +DRIVE=0 +DRIVE_0_SEARCH="Data Transfer Element 0" +DRIVE_1_SEARCH="Data Transfer Element 1" +MTX=/usr/local/sbin/mtx + + + + +######################### Functions + +TMPDIR=/tmp + +make_temp_file() +{ + TMPFILE=`mktemp ${TMPDIR}/mtx$1.XXXXXXXXXX 2> /dev/null` + if test $? -ne 0 || test x${TMPFILE} = x; then + TMPFILE="${TMPDIR}/mtx$1.$$" + if test -f ${TMPFILE}; then + echo "ERROR: Temp file security problem on: ${TMPFILE}" + exit 1 + fi + fi +} + + + + + +# Take a single argument, a barcode number +# and returns the slot in which said tape +# should reside according to the +# tapeSlotMap file. That file is +# created using the setTapeSlotMap +# script +getSlotNum() +{ + if [ -z "$1" ] + then + echo getSlotNum"() called" \ + "without a parameter, error!!!" + exit 1 + fi + +# makes sure there is an entry in .tapeSlotMap file +# for that barcode number + found=`grep "$1" /etc/bacula/.tapeSlotMap | wc -l | awk '{print $1}'` + + if [ "$found" -ne 1 ] + then + echo "FATAL ERROR, in getSlotNum($1), error looking up" + echo "that barcode number. Found ($found) instances in the" + echo "tapeSlotMap file. There should be 1 instance. Do you" + echo "need to update this file with setTapeSlotMap script???" + exit 1 + fi + + slotNum=`grep "$1" /etc/bacula/.tapeSlotMap | awk '{print $1}'` + return $slotNum +} + + + + + + +# Take 1 arg, a 0 or 1 (or, how about the DRIVE var: ) +# for the drive it is interested in +# returns 1 if that drive has a tape loaded, and 0 if not +doesDriveGottaTape() +{ + if [ -z "$1" ] + then + echo "doesDriveGottaTape() called" \ + "without a parameter, error fool!!!" + exit 1 + else + case "$1" in + 0 ) + ANSWER=`${MTX} -f $AUTOLOADERDEV status | grep +"$DRIVE_0_SEARCH" \ + | awk '{sub("0:",""); print $4}'` + ;; + 1 ) + ANSWER=`${MTX} -f $AUTOLOADERDEV status | grep +"$DRIVE_1_SEARCH" \ + | awk '{sub("1:",""); print $4}'` + ;; + * ) + echo "Invalid drive num passed to +doesDriveGottaTape($1)" + exit 1 + ;; + esac + fi + + case "$ANSWER" in + Full ) + return 1 + ;; + Empty ) + return 0 + ;; + * ) + echo "Fatal error, invalid answer in doesDriveGottaTape(), +$ANSWER" + exit 1 + ;; + esac +} + + + + + + +# This handles 'correctly' unloading a drive to make sure it ends up +# in the slot it originaly came from. If you're moving/adding tapes +# please check the setTapeSlotMap script, and use accordingly +# to update the barcode->slot mappings. +# This function is needed because the Adic Scalar 100 +# doesn't seem to like returning a tape to it's original slot +# everytime. It seems to pick the first available, and I can't +# seem to figure out how to make it stop doing this. +# oh, and it takes 1 arg, the drive to unload +unloadDrive() +{ + if [ -z "$1" ] + then + echo "unloadDrive() called " \ + "without a parameter, error!!!" + exit 1 + fi + + +# double check to see if that drive does actually have a tape in it + doesDriveGottaTape "$1" + rtn=$? + if [ "$rtn" == 0 ] + then + echo "errrm, in unloadDrive($1), that drive " \ + "doesn't have a tape in it!!!" + return 0 + fi + + +# Get the barcode of the tape in the drive + case "$1" in + 0 ) + barcode=`${MTX} -f $AUTOLOADERDEV status | grep +"$DRIVE_0_SEARCH" | awk '{print $10}'` + ;; + 1 ) + barcode=`${MTX} -f $AUTOLOADERDEV status | grep +"$DRIVE_1_SEARCH" | awk '{print $10}'` + ;; + * ) + echo "ERROR, invalid drive num in unloadDrive($1)" + exit 1 + ;; + esac + +# return tape with barcode to which slot??? + getSlotNum "$barcode" + returnTo=$? + + +# unloading the tape now + ${MTX} -f $AUTOLOADERDEV unload $returnTo $1 + rtn=$? + if [ "$rtn" -ne 0 ] + then + echo "MTX came back with code ($rtn)" + echo "ERROR, in unloadDrive($1), mtx unload failed" + exit 1 + fi + return $rtn +} + + + + +# This handles loading tapes into drives +# It checks if there is currently a tape +# in the target drive, and if there is +# it unloads it prior to loading the new tape +# just in case bacula doesn't explicitly +# request this. +# This function take two args, the drive +# to load, and the tape slot to load from +loadDrive() +{ + if [ -z "$1" ] || [ -z "$2" ] + then + echo "loadDrive() not called " \ + "with the correct params, error!!!" + exit 1 + fi + +# Check to see if it already has a tape in it... + doesDriveGottaTape "$1" + rtn=$? + if [ "$rtn" == 1 ] + then + echo "Oops, in loadDrive() that drive" + echo "already has a tape. Unloading it now" + unloadDrive "$1" + fi + + +# You're now ready to load the drive + echo "Now loading tape in slot $2 into drive $1" + + ${MTX} -f $AUTOLOADERDEV load $2 $1 + rtn=$? + if [ "$rtn" -ne 0 ] + then + echo "MTX came back with code ($rtn)" + echo "ERROR, in loadDrive($1, $2), mtx load failed" + exit 1 + fi + return $rtn +} + + + + + +####################### End Functions + + + + +####################### Start Script + +# Changes the nst* device bacula passes to a number that mtx likes +if [ "$2" == load ] || [ "$2" == unload ] +then + echo "In mtx-changer drive selection code...." + case "$4" in + /dev/nst0 ) + DRIVE="0" + ;; + + /dev/nst1 ) + DRIVE="1" + ;; + + * ) + echo "Error, invalid drive for autoloader." + echo "Maybe you mispelled the drive name, or tried to" + echo "use a drive that doesn't exist" + echo "Only two drives handled in /etc/bacula/mtx-changer script" + exit 1 + ;; + esac +fi + + + + +case "$2" in + unload) + unloadDrive "$DRIVE" + rtn=$? + + sleep 35 + exit $rtn + ;; + + load) + loadDrive "$DRIVE" "$3" + rtn=$? + sleep 55 + exit $rtn + ;; + + list) + echo "Requested list, DREW YOU HAVEN'T IMPLEMENTED YET!!!!" + ${MTX} -f $1 status | grep " *Storage Element [0-9]*:.*Full" | \ + awk "{print \$3 \$4}" | sed "s/Full *\(:VolumeTag=\)*//" + + rtn=$? + echo "mtx-changer returning value: $rtn" + ;; + + loaded) + echo "Request loaded, DREW YOU HAVEN'T IMPLEMENTED YET!!!!" + make_temp_file + ${MTX} -f $1 status >${TMPFILE} + rtn=$? + cat ${TMPFILE} | grep "^Data Transfer Element 0:Full" | awk "{print +\$7}" + cat ${TMPFILE} | grep "^Data Transfer Element 0:Empty" | awk "{print +0}" +# cat ${TMPFILE} | grep "^Data Transfer Element 1:Full" | awk "{print +\$7}" +# cat ${TMPFILE} | grep "^Data Transfer Element 1:Empty" | awk +"{print 0}" + rm -f ${TMPFILE} + + echo "mtx-changer returning value: $rtn" + exit $rtn + ;; + + slots) + echo "Request slots, DREW YOU HAVEN'T IMPLEMENTED YET!!!!" + ${MTX} -f $1 status | grep " *Storage Changer" | awk "{print \$5}" + rtn=$? + echo "mtx-changer returning value: $rtn" + ;; +esac + + +**************************************************************************** +** + + + + + +Hope that helps. Sorry, it's kinda setup for our Adic, and for two drives. + + + +-Drew + + +-----Original Message----- +From: Terry [mailto:terry@mrtux.co.uk] +Sent: Wednesday, December 10, 2003 8:23 AM +To: bacula-users@lists.sourceforge.net +Subject: [Bacula-users] Re: Autochanger setup + + +bacula-users-request@lists.sourceforge.net wrote: + +>Send Bacula-users mailing list submissions to +> bacula-users@lists.sourceforge.net +> +>To subscribe or unsubscribe via the World Wide Web, visit +> https://lists.sourceforge.net/lists/listinfo/bacula-users +>or, via email, send a message with subject or body 'help' to +> bacula-users-request@lists.sourceforge.net +> +>You can reach the person managing the list at +> bacula-users-admin@lists.sourceforge.net +> +>When replying, please edit your Subject line so it is more specific +>than "Re: Contents of Bacula-users digest..." +> +> +>Today's Topics: +> +> 1. Two beta releases (Kern Sibbald) +> 2. Re: Current Volume "Cinta-0006" not acceptable +> because (Kern Sibbald) +> 3. Re: Jobs awaiting execution? (Kern Sibbald) +> 4. Re: Mount need for every job... me too (Gregory Brauer) +> 5. Importtant: BIG Problems with schedule / bacula doesn't do +Differnetial +> backups (Volker Sauer) +> 6. Re: Mount need for every job... me too (fixed!) (Gregory Brauer) +> +>--__--__-- +> +>Message: 1 +>From: Kern Sibbald +>To: bacula-announce +>Cc: bacula-users +>Organization: +>Date: 09 Dec 2003 22:11:47 +0100 +>Subject: [Bacula-users] Two beta releases +> +>Hello, +> +>I have just made two Beta releases. Unfortunately, I mistakenly put the +>1.32e-08Dec03 Windows binaries in main win-binaries section rather than +>win-binaries-beta. At this point, I won't change it because it would +>require a new upload, ... +> +>The 1.33-08Dec03 release is definitely a Beta release in the sense that +>version 1.33 is still under very active development. The main features +>of this release are: +>- all features in 1.32e-08Dec03 +>- update slots (marks any other Volumes in the same pool with the same +> Slot number as "not InChanger"). This permits retaining the Slot on +> Volumes not currently in the magazine. +>- update slots=,,, ... +> where slot1, slot2 are slot numbers you want updated from barcode +> information and slot-range is a slot-number1-slot-number2 (e.g. 3-5). +>- update slots[=...] scan +> causes the Storage daemon to read the volume label for each slot +> listed, or in the absence of a slot list, all slots occupied. +>- Automatic synchronization of clocks between DIR and FD so that there +> is no missed files due to timezone differences or shewed clocks. +>- A database upgrade is mandatory. +>- None of the daemons are compatible with version 1.32x, which means +> you must upgrade all or nothing. +>- NOTE! FreeBSD users must add: +> Two EOF = yes +> to each of their tape Device resources in the SD. I forgot to document +> this elsewhere!!!!! +> +>Release 1.32e-08Dec03 was meant to be an official 1.32e release with all +>the current patches and a few strategic updates from 1.33. In the end, I +>threw in *all* the updates from 1.33 except those that require a +>database upgrade or force a daemon incompatibility, or other operating +>incompatibility. +> +>I haven't made 1.32-08Dec03 an official release for three reasons: +>- I cannot test it in production because my production system is on +> release 1.33. +>- There is still a bit more documentation to do. +>- I added more of the 1.33 code than I originally planned, but some of +> it such as the new 10 job history for each daemon is so neat that I +> wanted to put it in your hands (one of the features not yet +> documented). +> +>That said, I think this is a very good release. I have HEAVILY tested it +>with my regression testing, and 95% of the code has been running in my +>production 1.33 system since 24 November. I encourage all of you to +>test it. Note, all of the daemons should be totally compatible with any +>other 1.32x release -- please send feedback. +> +>Once I finish the documentation and get some feedback on how it runs, +>I'll "officially" release it. +> +>As most of you know, from tomorrow morning, I will be out for one week +>and then a bit slow responding for a week or two. Thanks for all the +>well wishes I have received. +> +>Regards, Kern +> +> +> +> +> +> +>--__--__-- +> +>Message: 2 +>Subject: Re: [Bacula-users] Current Volume "Cinta-0006" not acceptable +> because +>From: Kern Sibbald +>To: Carlos Molina Molina +>Cc: Phil Stracchino , +> bacula-users +>Organization: +>Date: 09 Dec 2003 22:30:07 +0100 +> +> +>--=-HANNTFvbGW9Q5KQ00dee +>Content-Type: text/plain; charset=ISO-8859-1 +>Content-Transfer-Encoding: quoted-printable +> +>On Tue, 2003-12-09 at 19:03, Carlos Molina Molina wrote: +> +> +>>no, I haven't the volume.... but the only thing that I need from bacula i= +>> +>> +>t's=20 +> +> +>>that write in any volume +>>other thing it's that when finish it send this part of messages. I put "u= +>> +>> +>se=20 +> +> +>>any volume" +>>=20 +>>AZOG: Naboo-Backup.2003-12-09_14.12.07 Warning: Unexpected Client Job=20 +>>message: 2801 End Backup Job TermCode=3D84 JobFiles=3D56064 ReadBytes=3D1= +>> +>> +>501707569=20 +> +> +>>JobBytes=3D621946654 +>>=20 +>>AZOG: Naboo-Backup.2003-12-09_14.12.07 Fatal error: No Job status returne= +>> +>> +>d=20 +> +> +>>from FD. +>>=20 +>>What means??? +>> +>> +> +>I cannot comment on the tape Volume error messages, but the message +>listed above "Unexpected Client Job message ..." seems to me to come +>from the fact that you have upgraded that client (File daemon) to a more +>recent version of Bacula than you are running on your Director. Please +>check your version numbers. If they are both the same number they should +>work together. If not, you need to get them both on the same version. +> +>Be sure to read the ReleaseNotes on any upgrade to ensure you do any +>required database upgrading. +> +>Regards, Kern +> +> +> +> +>>Thank +>>=20 +>> +>> +>>>From: Phil Stracchino +>>>To: bacula-users@lists.sourceforge.net +>>>Subject: Re: [Bacula-users] Current Volume "Cinta-0006" not acceptable=20 +>>>because +>>>Date: Tue, 9 Dec 2003 12:56:43 -0500 +>>> +>>>On Tue, Dec 09, 2003 at 03:47:29PM +0000, Carlos Molina Molina wrote: +>>> +>>> +>>>>How I avoid this message???, The only solution it's that I erase the=20 +>>>> +>>>> +>>>tape +>>> +>>> +>>>>manually... +>>>> +>>>> +>>>>SDT-10000: Naboo-Backup.2003-12-09_12.38.04 Warning: mount.c:232=20 +>>>> +>>>> +>>>Director +>>> +>>> +>>>>wanted Volume "Cinta-0001". +>>>> Current Volume "Cinta-0006" not acceptable because: +>>>> 1997 Volume "Cinta-0006" not in catalog. +>>>>SDT-10000: Please mount Volume "Cinta-0001" on Storage Device=20 +>>>> +>>>> +>>>"SDT-10000" +>>> +>>> +>>>>for Job Naboo-Backup.2003-12-09_12.38.04 +>>>>Use "mount" command to release Job. +>>>> +>>>> +>>>Don't you have the volume it's asking for? +>>> +>>> +>>>-- +>>> .********* Fight Back! It may not be just YOUR life at risk. =20 +>>>*********. +>>> : phil stracchino : unix ronin : renaissance man : mystic zen biker ge= +>>> +>>> +>ek=20 +> +> +>>>: +>>> : alaric@caerllewys.net : alaric-ruthven@earthlink.net : phil@latt.ne= +>>> +>>> +>t =20 +> +> +>>>: +>>> : 2000 CBR929RR, 1991 VFR750F3 (foully murdered), 1986 VF500F (sold)= +>>> +>>> +> =20 +> +> +>>>: +>>> : Linux Now! ...Because friends don't let friends use Microsoft. = +>>> +>>> +> =20 +> +> +>>>: +>>> +>>> +>>>------------------------------------------------------- +>>>This SF.net email is sponsored by: SF.net Giveback Program. +>>>Does SourceForge.net help you be more productive? Does it +>>>help you create better code? SHARE THE LOVE, and help us help +>>>YOU! Click Here: http://sourceforge.net/donate/ +>>>_______________________________________________ +>>>Bacula-users mailing list +>>>Bacula-users@lists.sourceforge.net +>>>https://lists.sourceforge.net/lists/listinfo/bacula-users +>>> +>>> +>>=20 +>>_________________________________________________________________ +>>MSN Amor: busca tu =BD naranja http://latam.msn.com/amor/ +>>=20 +>>=20 +>>=20 +>>------------------------------------------------------- +>>This SF.net email is sponsored by: SF.net Giveback Program. +>>Does SourceForge.net help you be more productive? Does it +>>help you create better code? SHARE THE LOVE, and help us help +>>YOU! Click Here: http://sourceforge.net/donate/ +>>_______________________________________________ +>>Bacula-users mailing list +>>Bacula-users@lists.sourceforge.net +>>https://lists.sourceforge.net/lists/listinfo/bacula-users +>> +>> +> +>--=-HANNTFvbGW9Q5KQ00dee +>Content-Type: application/pgp-signature; name=signature.asc +>Content-Description: This is a digitally signed message part +> +>-----BEGIN PGP SIGNATURE----- +>Version: GnuPG v1.2.1 (GNU/Linux) +> +>iD8DBQA/1j7fNgfoSvWqwEgRAsEDAJ0aF6qvQVHiH0X4DCTwXQh3wux1gACfQLxl +>wKgzEjDQMg3v1vh4yS6kUys= +>=6xcc +>-----END PGP SIGNATURE----- +> +>--=-HANNTFvbGW9Q5KQ00dee-- +> +> +> +>--__--__-- +> +>Message: 3 +>Subject: Re: [Bacula-users] Jobs awaiting execution? +>From: Kern Sibbald +>To: Danie Theron +>Cc: bacula-users +>Organization: +>Date: 09 Dec 2003 22:39:39 +0100 +> +>Hello, +> +>Move up to version 1.32d or 1.32e. The job scheduler was totally +>rewritten between 1.31 and 1.32 and a number of bizarre little problems +>that some people (not me) were having totally went away. I suspect that +>they were pthreads related on non-Linux systems, but who knows ... +> +>Regards, Kern +> +>On Tue, 2003-12-09 at 12:09, Danie Theron wrote: +> +> +>>Hi +>> +>>I have recently split my backups into more manageable sets , and now it +>>seems they are not running automatically. Here is an output when I do a +>>"status" command : +>> +>>Last Job BackupCatalog.2003-12-09_01.10.00 finished at 09-Dec-2003 01:10 +>> Files=1 Bytes=43,308,017 Termination Status=OK +>>Console connected at 09-Dec-2003 19:06 +>>JobId 132 Job prometheus-rapstech.2003-12-09_19.00.03 is waiting +>>execution. +>>JobId 131 Job prometheus-consult-gis.2003-12-09_19.00.02 is waiting +>>execution. +>>JobId 130 Job prometheus-consult-documents.2003-12-09_19.00.01 is +>>waiting on max +>> Client jobs. +>>JobId 129 Job prometheus-system.2003-12-09_19.00.00 is running. +>>Level Type Scheduled Name +>>================================================================= +>>Incremental Backup 10-Dec-2003 19:00 prometheus-system +>>Incremental Backup 10-Dec-2003 19:00 prometheus-consult-documents +>>Incremental Backup 10-Dec-2003 19:00 prometheus-consult-gis +>>Incremental Backup 10-Dec-2003 19:00 prometheus-rapstech +>>Full Backup 10-Dec-2003 01:10 BackupCatalog +>>==== +>>Connecting to Storage daemon File at localhost:9103 +>> +>>prometheus-sd Version: 1.31a (02 Aug 2003) +>>Daemon started 08-Dec-2003 19:30, 3 Jobs run. +>>Last Job BackupCatalog.2003-12-09_01.10.00 finished at 09-Dec-2003 01:10 +>> Files=1 Bytes=43,308,119 Termination Status=OK +>>Device /data/server is mounted with Volume "backup" +>> Total Bytes=791,751,921 Blocks=12,274 Bytes/block=64,506 +>> Positioned at File=0 Block=9,351 +>>Device /data/server is not open. +>>Device /data/server is not open. +>>Device /data/server is not open. +>>Device /data/server is not open. +>>Full Backup job backup using Volume +>>"prometheus-system.2003-12-09_19.00.00" on d +>>evice /data/server +>> Files=15,341 Bytes=602,255,638 Bytes/sec=1,517,016 +>> FDReadSeqNo=134,036 in_msg=96746 out_msg=5 fd=6 +>> +>> +>>When I manually run a job it backups fine. +>> +>>Ta in advance +>>Daniel +>> +>> +>> +>>------------------------------------------------------- +>>This SF.net email is sponsored by: SF.net Giveback Program. +>>Does SourceForge.net help you be more productive? Does it +>>help you create better code? SHARE THE LOVE, and help us help +>>YOU! Click Here: http://sourceforge.net/donate/ +>>_______________________________________________ +>>Bacula-users mailing list +>>Bacula-users@lists.sourceforge.net +>>https://lists.sourceforge.net/lists/listinfo/bacula-users +>> +>> +> +> +> +>--__--__-- +> +>Message: 4 +>Date: Tue, 09 Dec 2003 14:57:32 -0800 +>From: Gregory Brauer +>To: bacula-users +>Subject: Re: [Bacula-users] Mount need for every job... me too +> +> +>I have looked at all of the comments in this thread, but I +>am also having this problem and cannot seem to fix it. +>I am using an autochanger that blocks waiting for me to type +>the "mount" command every time it autoloads a tape. It +>worked fine for me with 1.31, but has been doing this +>since I upgraded to 1.32d. Here are some excerpts from my +>configuration: +> +> +> From bacula-dir.conf: +> +>Storage { +> Autochanger = yes +> Name = "kiev AIT2 Autoloader" +> Address = kiev +> SDPort = 9103 +> Password = "yV3C5gZXhF/RrYadlwKWnwAbTV1oUCXVqUIrAaUjMov+" +> Device = "AIT2 Autoloader" +> MediaType = "AIT2" +>} +> +> +> From bacula-sd.conf: +> +>Device { +> Autochanger = yes +> Changer Device = /dev/sg0 +> Changer Command = "/etc/bacula/mtx-changer %c %o %S %a" +> Name = "AIT2 Autoloader" +> Media Type = "AIT2" +> Archive Device = /dev/nst0 +> Hardware end of medium = No; +> Automatic Mount = yes; +> Always Open = yes; +> Removable Media = yes; +>} +> +>I had tried this with both Always Open = yes, and no... it worked fine +>with "no" under 1.31. +> +> +>I have also noticed that every time bacula issues a "load slot" command, +>I get an error message saying mtx-changer exited 1. +> +> +>09-Dec-2003 14:42 kiev-sd: 3303 Issuing autochanger "load slot 15" command. +>09-Dec-2003 14:44 kiev-sd: 3992 Bad autochanger "load slot" status=1. +> +> +>But if I run mtx-changer manually from the command line, it always +>seems to exit 0, so I'm not sure where bacula is seeing this error. +> +> +>basic [root@kiev bacula]$ ./mtx-changer /dev/sg0 load 15 /dev/nst0 +>basic [root@kiev bacula]$ echo $status +>0 +> +> +>I have tried increasing my sleep time on load to arbitrarily high +>values, but that hasn't helped. +> +> +> load) +># echo "Doing mtx -f $1 load $3" +> ${MTX} -f $1 load $3 +> rtn=$? +># +># Increase the sleep time if you have a slow device +> #sleep 15 +> sleep 120 +> exit $rtn +> ;; +> +> +>What else can I check? +> +>Greg +> +> +> +>--__--__-- +> +>Message: 5 +>Date: Wed, 10 Dec 2003 00:48:19 +0100 +>From: Volker Sauer +>To: Kern Sibbald , +> bacula-users +>Subject: [Bacula-users] Importtant: BIG Problems with schedule / bacula +doesn't do Differnetial +> backups +> +>This is an OpenPGP/MIME signed message (RFC 2440 and 3156) +>--------------enig1AEBD91EBCB59C2EB43FEF98 +>Content-Type: text/plain; charset=us-ascii; format=flowed +>Content-Transfer-Encoding: 7bit +> +>Hallo Kern, +> +>as I already worried about in 2 other mails is now proven: +> +>Bacula doesn't do a diffential backup! +> +>My schedule is this: +> +>Schedule { +> Name = "WeeklyCycle" +> Run = Level=Full Pool=Full 1st tue at 2:00pm +> Run = Level=Differential Pool=Full 2nd-5th tue at 7:00pm +> Run = Level=Incremental Pool=Inc wed-mon at 7:00pm +>} +> +> +>and this is what list jobs says (I hope you can read that): +> +> +>| 55 | Zaphod | 2003-12-02 14:00:04 | B | F | +>504,634 | 20,669,282,940 | T | +>| 56 | Arthur | 2003-12-02 16:31:30 | B | F | +>380,719 | 15,498,385,751 | T | +>| 57 | Alexis | 2003-12-02 18:23:45 | B | F | +>167,710 | 5,157,651,432 | T | +>| 58 | BackupCatalog | 2003-12-02 19:23:13 | B | F | +> 1 | 404,663,640 | T | +>| 59 | ReminderIncCartridge | 2003-12-03 08:00:05 | D | F | +> 0 | 0 | T | +>| 60 | Zaphod | 2003-12-03 19:00:04 | B | I | +> 693 | 262,166,208 | T | +>| 61 | Arthur | 2003-12-03 19:12:57 | B | I | +> 314 | 889,290,521 | T | +>| 62 | Alexis | 2003-12-03 19:24:46 | B | I | +> 748 | 56,013,471 | T | +>| 63 | BackupCatalog | 2003-12-03 22:00:04 | B | F | +> 1 | 404,846,132 | T | +>| 64 | Zaphod | 2003-12-04 19:00:05 | B | I | +> 740 | 385,140,941 | T | +>| 65 | Arthur | 2003-12-04 19:16:03 | B | I | +> 441 | 904,698,439 | T | +>| 66 | Alexis | 2003-12-04 19:28:09 | B | I | +> 669 | 68,876,161 | T | +>| 67 | BackupCatalog | 2003-12-04 22:00:04 | B | F | +> 1 | 253,231,104 | E | +>| 68 | BackupCatalog | 0000-00-00 00:00:00 | B | I | +> 0 | 0 | C | +>| 69 | BackupCatalog | 0000-00-00 00:00:00 | B | I | +> 0 | 0 | C | +>| 70 | BackupCatalog | 2003-12-05 01:25:19 | B | I | +> 1 | 405,045,253 | T | +>| 71 | Zaphod | 2003-12-05 19:00:04 | B | I | +> 8,389 | 373,643,708 | T | +>| 72 | Arthur | 2003-12-05 19:13:59 | B | I | +> 6,904 | 1,007,785,097 | T | +>| 73 | Alexis | 2003-12-05 19:28:23 | B | I | +> 4,085 | 114,727,583 | T | +>| 74 | BackupCatalog | 2003-12-05 22:00:04 | B | F | +> 1 | 407,222,289 | T | +>| 75 | Zaphod | 2003-12-06 19:00:05 | B | I | +> 923 | 175,466,045 | T | +>| 76 | Arthur | 2003-12-06 19:14:01 | B | I | +> 1,872 | 516,372,637 | T | +>| 77 | Alexis | 2003-12-06 19:25:01 | B | I | +> 5,422 | 190,847,977 | T | +>| 78 | BackupCatalog | 2003-12-06 22:00:04 | B | F | +> 1 | 408,121,544 | T | +>| 79 | Zaphod | 2003-12-07 19:00:04 | B | I | +> 1,347 | 93,787,356 | T | +>| 80 | Arthur | 2003-12-07 19:12:06 | B | I | +> 705 | 496,647,729 | T | +>| 81 | Alexis | 2003-12-07 19:25:08 | B | I | +> 1,414 | 102,756,100 | T | +>| 82 | BackupCatalog | 2003-12-07 22:00:04 | B | F | +> 1 | 408,519,580 | T | +>| 83 | Zaphod | 2003-12-08 19:00:05 | B | I | +> 4,428 | 461,132,237 | T | +>| 84 | Arthur | 2003-12-08 19:15:59 | B | I | +> 307 | 384,713,540 | T | +>| 85 | Alexis | 2003-12-08 19:25:56 | B | I | +> 708 | 78,420,311 | T | +>| 86 | BackupCatalog | 2003-12-08 22:00:04 | B | F | +> 1 | 409,139,688 | T | +>| 87 | ReminderFullCartridge | 2003-12-09 06:00:05 | D | F | +> 0 | 0 | T | +>| 88 | Zaphod | 2003-12-09 19:00:05 | B | F | +>507,058 | 21,050,750,780 | T | +>| 89 | Arthur | 2003-12-09 21:27:07 | B | F | +>382,397 | 15,246,847,324 | T | +>| 90 | Alexis | 2003-12-09 23:18:30 | B | F | +>167,868 | 4,595,334,742 | T | +>| 91 | BackupCatalog | 2003-12-10 00:10:05 | B | F | +> 1 | 528,055,685 | T | +>+-------+-----------------------+---------------------+------+-------+----- +-----+----------------+-----------+ +>* +> +>Bacula did a FullBackup on December 2nd, which was the first Tuesday in +>this month. Correct. Then it did Incremenatial backups from 3rd until +>8th. Also correct. +>Yesterday, on December 9th - according to my schedule there should have +>been a differential backup!! But as you can see above, bacula did a Full +>backup - without reason, I think!! +>(This brings real big problems to my storage managment, because I don't +>have enough space for 4 Full-Backups a month. I urgently need diffential +>backups in the conecept) +>Could the cause be, that there's a pool change! Should the diffential +>backup better go to the inc-pool?? +> +>Please help!! +> +>Regards +>Volker +> +>P.S.: this is the job output of the jobs from Tue 9th, which should have +>been differential: +> +> +>z217-dir: Start Backup JobId 88, Job=Zaphod.2003-12-09_19.00.00 +>z217-sd: 3301 Issuing autochanger "loaded" command. +>z217-sd: Volume "F-6" previously written, moving to end of data. +>z217-sd: Ready to append to end of Volume at file=6. +>z217-sd: block.c:480 End of medium on device /dev/tape. Write of 64512 +>bytes got 32768. +>z217-sd: End of medium on Volume "F-6" Bytes=23,935,131,136 +>Blocks=371,020 at 09-Dec-2003 21:06. +>z217-sd: 3301 Issuing autochanger "loaded" command. +>z217-sd: 3302 Issuing autochanger "unload" command. +>z217-sd: 3303 Issuing autochanger "load slot 7" command. +>z217-sd: 3304 Autochanger "load slot 7" status is OK. +>z217-sd: Wrote label to prelabeled Volume "F-7" on device /dev/tape +>z217-sd: New volume "F-7" mounted on device /dev/tape at 09-Dec-2003 21:08. +>z217-dir: Bacula 1.32d (02Nov03): 09-Dec-2003 21:27 +>JobId: 88 +>Job: Zaphod.2003-12-09_19.00.00 +>Backup Level: Full +>Client: zaphod-fd +>FileSet: "Set Zaphod" 2003-11-21 16:32:26 +>Start time: 09-Dec-2003 19:00 +>End time: 09-Dec-2003 21:27 +>FD Files Written: 507,058 +>SD Files Written: 507,058 +>FD Bytes Written: 21,050,750,780 +>SD Bytes Written: 21,121,287,812 +>Rate: 2387.2 KB/s +>Software Compression: None +>Volume name(s): F-6|F-7 +>Volume Session Id: 14 +>Volume Session Time: 1070650983 +>Last Volume Bytes: 2,327,668,804 +>Non-fatal FD errors: 0 +>SD Errors: 0 +>FD termination status: OK +>SD termination status: OK +>Termination: Backup OK +> +>z217-dir: Begin pruning Jobs. +>z217-dir: No Jobs found to prune. +>z217-dir: Begin pruning Files. +>z217-dir: No Files found to prune. +>z217-dir: End auto prune. +> +> +> +>z217-dir: Start Backup JobId 89, Job=Arthur.2003-12-09_19.00.01 +>z217-dir: Bacula 1.32d (02Nov03): 09-Dec-2003 23:18 +>JobId: 89 +>Job: Arthur.2003-12-09_19.00.01 +>Backup Level: Full +>Client: z217-fd +>FileSet: "Set Arthur" 2003-11-21 18:47:14 +>Start time: 09-Dec-2003 21:27 +>End time: 09-Dec-2003 23:18 +>FD Files Written: 382,397 +>SD Files Written: 382,397 +>FD Bytes Written: 15,246,847,324 +>SD Bytes Written: 15,300,937,041 +>Rate: 2283.1 KB/s +>Software Compression: None +>Volume name(s): F-7 +>Volume Session Id: 15 +>Volume Session Time: 1070650983 +>Last Volume Bytes: 17,654,556,715 +>Non-fatal FD errors: 0 +>SD Errors: 0 +>FD termination status: OK +>SD termination status: OK +>Termination: Backup OK +> +>z217-dir: Begin pruning Jobs. +>z217-dir: No Jobs found to prune. +>z217-dir: Begin pruning Files. +>z217-dir: No Files found to prune. +>z217-dir: End auto prune. +> +> +> +>z217-dir: Start Backup JobId 90, Job=Alexis.2003-12-09_19.00.02 +>z217-dir: Bacula 1.32d (02Nov03): 10-Dec-2003 00:10 +>JobId: 90 +>Job: Alexis.2003-12-09_19.00.02 +>Backup Level: Full +>Client: alexis-fd +>FileSet: "Set Alexis" 2003-11-21 20:58:58 +>Start time: 09-Dec-2003 23:18 +>End time: 10-Dec-2003 00:10 +>FD Files Written: 167,868 +>SD Files Written: 167,868 +>FD Bytes Written: 4,595,334,742 +>SD Bytes Written: 4,616,435,343 +>Rate: 1487.2 KB/s +>Software Compression: None +>Volume name(s): F-7 +>Volume Session Id: 16 +>Volume Session Time: 1070650983 +>Last Volume Bytes: 22,280,313,500 +>Non-fatal FD errors: 0 +>SD Errors: 0 +>FD termination status: OK +>SD termination status: OK +>Termination: Backup OK +> +>z217-dir: Begin pruning Jobs. +>z217-dir: No Jobs found to prune. +>z217-dir: Begin pruning Files. +>z217-dir: No Files found to prune. +>z217-dir: End auto prune. +> +> +I'm trying to setup a surestore 12000e autochanger so far in my +bacula-sd.conf i have added this +Device { + Name = autochanger # + Media Type = DDS-2 + Archive Device = /dev/sa1 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + LabelMedia = yes; + RemovableMedia = yes; + Autochanger = yes; + Changer Device = /dev/pass0 #changer device +} +But i need to make an entry for the Changer Command = .I have +installed mtx but there are no sample files to be found or any +bacula samples for autochangers +Cheers Terry + + + + +------------------------------------------------------- +This SF.net email is sponsored by: SF.net Giveback Program. +Does SourceForge.net help you be more productive? Does it +help you create better code? SHARE THE LOVE, and help us help +YOU! Click Here: http://sourceforge.net/donate/ +_______________________________________________ +Bacula-users mailing list +Bacula-users@lists.sourceforge.net +https://lists.sourceforge.net/lists/listinfo/bacula-users + + +------------------------------------------------------- +This SF.net email is sponsored by: SF.net Giveback Program. +Does SourceForge.net help you be more productive? Does it +help you create better code? SHARE THE LOVE, and help us help +YOU! Click Here: http://sourceforge.net/donate/ +_______________________________________________ +Bacula-users mailing list +Bacula-users@lists.sourceforge.net +https://lists.sourceforge.net/lists/listinfo/bacula-users diff --git a/examples/autochangers/mtx-changer.Adic-Scalar-24 b/examples/autochangers/mtx-changer.Adic-Scalar-24 new file mode 100755 index 00000000..eec9e569 --- /dev/null +++ b/examples/autochangers/mtx-changer.Adic-Scalar-24 @@ -0,0 +1,133 @@ +#!/bin/sh +# +# Bacula interface to mtx autoloader +# +# $Id$ +# +# If you set in your Device resource +# +# Changer Command = "path-to-this-script/mtx-changer %c %o %S %a %d" +# you will have the following input to this script: +# +# mtx-changer "changer-device" "command" "slot" "archive-device" "drive-index" +# $1 $2 $3 $4 $5 +# +# for example: +# +# mtx-changer /dev/sg0 load 1 /dev/nst0 0 (on a Linux system) +# +# If you need to an offline, refer to the drive as $4 +# e.g. mt -f $4 offline +# +# Many changers need an offline after the unload. Also many +# changers need a sleep 60 after the mtx load. +# +# N.B. If you change the script, take care to return either +# the mtx exit code or a 0. If the script exits with a non-zero +# exit code, Bacula will assume the request failed. +# + +MTX=/usr/sbin/mtx + +TMPDIR=/tmp + +make_temp_file() +{ + TMPFILE=`mktemp ${TMPDIR}/mtx$1.XXXXXXXXXX 2> /dev/null` + if test $? -ne 0 || test x${TMPFILE} = x; then + TMPFILE="${TMPDIR}/mtx$1.$$" + if test -f ${TMPFILE}; then + echo "ERROR: Temp file security problem on: ${TMPFILE}" + exit 1 + fi + fi +} + + +if test $# -lt 2 ; then + echo "usage: mtx-changer ctl-device command slot archive-device drive" + echo " Insufficient number of arguments arguments given." + echo " Mimimum usage is first two arguments ..." + exit 1 +fi + +# Setup arguments +ctl=$1 +cmd="$2" +slot=$3 +device=$4 +# If drive not given, default to 0 +if test $# = 5 ; then + drive=$5 +else + drive=0 +fi + +${MTX} -f $1 inventory + +# +# Check for special cases where only 2 arguments are needed, +# all others are a minimum of 3 +case $cmd in + loaded) + ;; + unload) + ;; + list) + ;; + slots) + ;; + *) + if test $# -lt 3; then + echo "usage: mtx-changer ctl-device command slot archive-device drive" + echo " Insufficient number of arguments arguments given." + echo " Mimimum usage is first three arguments ..." + exit 1 + fi + ;; +esac + + +case $cmd in + unload) +# echo "Doing mtx -f $ctl unload $slot $drive" +# +# enable the following line if you need to eject the cartridge +# mt -f $device offline + if test x$slot = x; then + ${MTX} -f $ctl unload + else + ${MTX} -f $ctl unload $slot $drive + fi + ;; + + load) +# echo "Doing mtx -f $ctl load $slot $drive" + ${MTX} -f $ctl load $slot $drive + rtn=$? +# +# Increase the sleep time if you have a slow device + sleep 15 + exit $rtn + ;; + + list) +# echo "Requested list" + ${MTX} -f $ctl status | grep " *Storage Element [0-9]*:.*Full" | awk '{print $3 $4}' | sed "s/Full *\(:VolumeTag=\)*//" + ;; + + loaded) + make_temp_file + ${MTX} -f $ctl status > ${TMPFILE} + rtn=$? + cat ${TMPFILE} | grep "^Data Transfer Element $drive:Full" | awk '{print $7}' + cat ${TMPFILE} | grep "^Data Transfer Element $drive:Empty" | awk '{print 0}' + rm -f ${TMPFILE} + exit $rtn + ;; + + slots) +# echo "Request slots" + ${MTX} -f $ctl status | grep " *Storage Changer" | awk '{print $5}' + ;; +esac diff --git a/examples/autochangers/mtx-changer.Sony-TSL-SA300C b/examples/autochangers/mtx-changer.Sony-TSL-SA300C new file mode 100755 index 00000000..ef4783d7 --- /dev/null +++ b/examples/autochangers/mtx-changer.Sony-TSL-SA300C @@ -0,0 +1,40 @@ +#!/bin/sh +# +# Bacula interface to mtx autoloader +# +# This mtx-changer script was contributed by Fryderyk Wlostowski. +# It works with a Sony TLS-11000 changer, which needs +# the slot number to do an unload +# +# mtx-changer "changer-device" "command" "slot" +# +# +MTX=/opt/mtx1.2.17/sbin/mtx +case "$2" in + unload) +# echo "Doing mtx -f $1 $2" + NR_KASETY=`$MTX -f $1 status | grep "Empty" | tr " " "~" | tr ":" "~" | cut -d "~" -f 9` + $MTX -f $1 $2 $NR_KASETY + ;; + + load) +# echo "Doing mtx -f $1 $2 $3" + $MTX -f $1 $2 $3 + ;; + + list) +# echo "Requested list" + $MTX -f $1 status | grep "^[ ]*Storage Element [0-9]*:.*Full" | awk "{print \$3}" | sed "s/:.*$/ /g" | tr -d "[\r\n]" + ;; + + loaded) +# echo "Request loaded" + $MTX -f $1 status | grep "Empty" | tr " " "~" | tr ":" "~" | cut -d "~" -f 9 + echo 0 + ;; + + slots) +# echo "Request slots" + $MTX -f $1 status | grep "[ ]Storage Changer" | awk "{print \$5}" + ;; +esac diff --git a/examples/autochangers/mtx-changer.StorageTek-HPA4853 b/examples/autochangers/mtx-changer.StorageTek-HPA4853 new file mode 100755 index 00000000..25428db9 --- /dev/null +++ b/examples/autochangers/mtx-changer.StorageTek-HPA4853 @@ -0,0 +1,157 @@ +#!/bin/sh +# +# Bacula interface to mtx autoloader +# +# Created JAN/23/02 by Ludwig Jaffe +# +# Works with the HP A4853 DLT Library +# and the Storagetek Timberwolf 9730 DLT Library +# +#TAPEDRIVE0 holds the device/name of your 1st and only DLT drive (Bacula supports only 1 drive currently) +# +#Read TAPEDRIVE from command line parameters + +TMPDIR=/tmp + +make_temp_file() +{ + TMPFILE=`mktemp ${TMPDIR}/mtx$1.XXXXXXXXXX 2> /dev/null` + if test $? -ne 0 || test x${TMPFILE} = x; then + TMPFILE="${TMPDIR}/mtx$1.$$" + if test -f ${TMPFILE}; then + echo "ERROR: Temp file security problem on: ${TMPFILE}" + exit 1 + fi + fi +} + + + +if [ -z "$4" ] ; then + TAPEDRIVE0=/dev/st0 +else + TAPEDRIVE0=$4 +fi + +#Delay in seconds the tape needs to load the tape. Needed to stop bacula from using the tape too early. +TAPEDELAY=65 #The StorageTek Timberwolf 9730 with DLT7000 needs approx. 50 seconds to load. 65 sec gives safety +MTXCHVERBOSE=1 +if [ -z "$1" ] ; then + echo "" + echo "The mtx-changer script for bacula" + echo "---------------------------------" + echo "" + echo "usage: mtx-changer [slot] [devicename of tapedrive]" + echo " mtx-changer" + echo "" + echo "Valid commands:" + echo "" + echo "unload Unloads a tape into the slot" + echo " from where it was loaded." + echo "load Loads a tape from the slot " + echo "list Lists full storage slots" + echo "loaded Gives slot from where the tape was loaded." + echo " 0 means the tape drive is empty." + echo "slots Gives Number of aviable slots." + echo "" + echo "Example:" + echo " mtx-changer /dev/changer load 1 loads a tape from slot1" + echo "" + exit +fi + + +case "$2" in + unload) +# At first do mt -f /dev/st0 offline to unload the tape because HP A4853 aka Timberwolf9730 +# refuses to unload the tape from the drive if the DLT streamer did not unloaded it!!! +# + #Check if you want to fool me + if [ $MTXCHVERBOSE -eq 1 ] ; then echo "mtx-changer: Checking if drive is loaded before we unload. I Request loaded" ; fi + make_temp_file + mtx -f $1 status >${TMPFILE} + rm -f /tmp/mtxloaded + cat ${TMPFILE} | grep "^Data Transfer Element 0:Full" | awk "{print \$7}" > /tmp/mtxloaded + rm -f ${TMPFILE} + read LOADEDVOL ${TMPFILE} + rm -f /tmp/mtxloaded + cat ${TMPFILE}| grep "^Data Transfer Element 0:Full" | awk "{print \$7}" > /tmp/mtxloaded + rm -f ${TMPFILE} + read LOADEDVOL ${TMPFILE} + cat ${TMPFILE} | grep "^Data Transfer Element 0:Full" | awk "{print \$7}" + cat ${TMPFILE} | grep "^Data Transfer Element 0:Empty" | awk "{print 0}" + rm -f ${TMPFILE} + ;; + + slots) + if [ $MTXCHVERBOSE -eq 1 ] ; then echo "mtx-changer: Request slots" ; fi + mtx -f $1 status | grep "[ ]Storage Changer" | awk "{print \$5}" + ;; +esac diff --git a/examples/autochangers/multiple-drive-changer.txt b/examples/autochangers/multiple-drive-changer.txt new file mode 100644 index 00000000..e9fe18d4 --- /dev/null +++ b/examples/autochangers/multiple-drive-changer.txt @@ -0,0 +1,257 @@ +Date: Tue, 15 Feb 2005 17:36:06 +0100 +From: Mario Wolff +To: kern@sibbald.com +CC: bacula-devel +Subject: Re: Using multiple tapedrives as autochanger! + +Hi Kern! + +Kern Sibbald schrieb: + > Hello, + > + > There is a similar script named ak-mtx-changer.txt in the + > /examples/autochangers directory of the source tree. If you feel + > that your script brings something different, then I'll be happy to + > include it in the same directory. If that is the case, please put a + > small note in an email message to me with the script and I'll include it + > (I know you've already sent the script, but it saves me the time of + > searching for it, and I'm sure I've got the right one). + > + > Best regards, Kern + +As far as i understood the ak-mtx-changer.txt it's simply a +human-resource-changer!?! +In the past every single server had it's own DDS-3 Tape, since we switch + to a centralized networkbackup all these drives are unused. +Now i took 6 of these drives and placed it in large case on one +scsi-controller. Running bacula with multitape-changer these six drives +are used as one drive with a 6-Slot-Changer! + +Regards, +Mario + +PS: I've posted a deploy-script (deploy windows-fd from linux, automated + your hints) a few days ago over gmane to the devellist. This message +was never posted and i got no error message. Maybe problem with the size +(Windows-FD was attached!) ??? + +[pasted the hints once again for Kern!] + +Example: +bacula-dir.conf: + +# Definition of DDS tape storage device +Storage { + Name = Multitape + #Do not use "localhost" here + Address = mystorage # N.B. Use a fully qualified name here + SDPort = 9103 + Password = "strongsecret" # password for Storage daemon + Device = Multitape + Autochanger = yes + Media Type = DDS-3 +} + +bacula-sd.conf: +Device { + Name = Multitape # + Media Type = DDS-3 + Archive Device = /dev/tape + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + # That's the magic!!! + Changer Command = "/etc/bacula/scripts/multitape-changer %c %o %S %a %d" + Changer Device = /dev/null + AutoChanger = yes +} + +Hints: +- Important to use a virtual name! multidrive-changer will create links! + (Archive Device = /dev/tape) +- It's a bash script not a sh script! +- SD must run as root or the multitape-changer must be called with sudo +to have write permission to the /dev dir! +- the default-config does an umount on tape-change! Don't switch this! +SD has to release the device-file! +- don't use your tapedrives directly anymore! SD does not know that +/dev/tape is /dev/nst0! +- don't remove the sleep 1 after getslot...! Without the sleep you will +get a "Operation not permitted" error! +- only tested with Debian/GNU-Linux-SID + +[end of paste] + + +--------------050209030507060501040304 +Content-Type: text/plain; + name="multitape-changer" +Content-Transfer-Encoding: 7bit +Content-Disposition: inline; + filename="multitape-changer" + +#!/bin/bash +# +# Bacula interface to use multiple drives as one tape-changer +# Arguments are copied from the mtx-script! Simply exchange +# the scriptname from demo-config +# +# If you set in your Device resource +# +# Changer Command = "path-to-this-script/multitape-changer %c %o %S %a %d" +# you will have the following input to this script: +# +# multitape-changer "changer-device" "command" "slot" "archive-device" "drive-index" +# $1 $2 $3 $4 $5 +# +# for example: +# +# multitape-changer /dev/sg0 load 1 /dev/nst0 0 (on a Linux system) +# +# Setup arguments +cmd="$2" +slot=$3 +DEVICE=${4:-/dev/tape} + +LABELDIR=/etc/bacula/tapelabel # where to find labelfiles +NULLDEVICE=/dev/null # if unmount link to this +SLOT01=/dev/nst0 # first slot +SLOT02=/dev/nst1 # second slot + # simply append more + +#get device for a given slotnumber +getdevice(){ + myslot=${1:-1} + if [ ${#myslot} -gt 2 ];then exit 1;fi + if [ ${#myslot} -eq 2 ];then + eval echo \$SLOT$myslot + return + else + eval echo \$SLOT0$myslot + return + fi +} + +#get name of labelfile for a given slot +getname(){ + myslot=${1:-1} + if [ ${#myslot} -gt 2 ];then exit 2;fi + if [ ${#myslot} -eq 2 ];then + echo SLOT$myslot + return + else + echo SLOT0$myslot + return + fi +} + +#how many tapes/slots to manage +getslots(){ + count=1 + while [ "$( getdevice $count )" ];do + count=$[ $count + 1 ] + done + echo $[ $count - 1 ] + return +} + +#get slot for a given device-file +getslot(){ + device=${1:-$NULLDEVICE} + if [ "$device" = "$NULLDEVICE" ];then + echo -en "0\n" + return + else + count=1 + slotdev=$( getdevice $count ) + while [ "$slotdev" ]; do + if [ "$slotdev" = "$device" ];then + echo -en "$count\n" + return + fi + count=$[ $count + 1 ] + slotdev=$( getdevice $count ) + done + exit 3 + fi +} + +if test $# -lt 2 ; then + echo "usage: multitape-changer ctl-device command slot archive-device drive" + echo " Insufficient number of arguments arguments given." + echo " Mimimum usage is first two arguments ..." + exit 4 +fi + +# +# Check for special cases where only 2 arguments are needed, +# all others are a minimum of 3 +case $cmd in + loaded) + ;; + unload) + ;; + list) + ;; + slots) + ;; + *) + if test $# -lt 3; then + echo "usage: multitape-changer ctl-device command slot archive-device drive" + echo " Insufficient number of arguments arguments given." + echo " Mimimum usage is first three arguments ..." + exit 5 + fi + ;; +esac + + +case $cmd in + unload) + if [ -e $DEVICE ];then + rm $DEVICE && ln -s $NULLDEVICE $DEVICE || exit 6 + else + ln -s $NULLDEVICE $DEVICE || exit 7 + fi + ;; + + load) + CURDEV=$( getdevice $slot ) + if [ -e $DEVICE ];then + rm $DEVICE && ln -s $CURDEV $DEVICE || exit 8 + else + ln -s $CURDEV $DEVICE || exit 9 + fi + ;; + + list) + slots=$( getslots ) + for slot in $( seq 1 $slots );do + labelfile=$LABELDIR/$( getname $slot ) + if [ -f "$labelfile" ];then + echo "$slot:$( head -n 1 $labelfile)" + else + echo "$slot:" + fi + done + exit 0 + ;; + + loaded) + if [ -e $DEVICE ];then + line=$( ls -la $DEVICE ) + fi + line=${line:-$NULLDEVICE} + getslot ${line##* } + sleep 1 + exit 0 + ;; + + slots) + getslots + exit 0 + ;; +esac + +--------------050209030507060501040304-- diff --git a/examples/autochangers/solaris-mtx-changer b/examples/autochangers/solaris-mtx-changer new file mode 100755 index 00000000..30bce538 --- /dev/null +++ b/examples/autochangers/solaris-mtx-changer @@ -0,0 +1,174 @@ +#!/bin/bash +# +# /bin/sh isn't always compatible so use /bin/bash +# +# Bacula interface to mtx autoloader +# +# $Id$ +# +# If you set in your Device resource +# +# Changer Command = "path-to-this-script/mtx-changer %c %o %S %a %d" +# you will have the following input to this script: +# +# mtx-changer "changer-device" "command" "slot" "archive-device" "drive-index" +# $1 $2 $3 $4 $5 +# +# for example: +# +# mtx-changer /dev/sg0 load 1 /dev/nst0 0 (on a Linux system) +# +# If you need to an offline, refer to the drive as $4 +# e.g. mt -f $4 offline +# +# Many changers need an offline after the unload. Also many +# changers need a sleep 60 after the mtx load. +# +# N.B. If you change the script, take care to return either +# the mtx exit code or a 0. If the script exits with a non-zero +# exit code, Bacula will assume the request failed. +# + +# Sun sed/awk etc are not sufficient, working versions are in /usr/xpg4/bin +export PATH="/usr/local/bin:/usr/sfw/bin:/usr/xpg4/bin:/usr/bin" + +MTX=mtx + +TMPDIR=/tmp + +make_temp_file() +{ + TMPFILE=`mktemp ${TMPDIR}/mtx$1.XXXXXXXXXX 2> /dev/null` + if test $? -ne 0 || test x${TMPFILE} = x; then + TMPFILE="${TMPDIR}/mtx$1.$$" + if test -f ${TMPFILE}; then + echo "ERROR: Temp file security problem on: ${TMPFILE}" + exit 1 + fi + fi +} + + + + +# +# The purpose of this function to wait a maximum +# time for the drive. It will +# return as soon as the drive is ready, or after +# waiting a maximum of 180 seconds. +# Note, this is very system dependent, so if you are +# not running on Linux, you will probably need to +# re-write it. +# +# If you have a FreeBSD system, you might want to change +# the $(seq 180) to $(jot 180) -- tip from Brian McDonald +# +wait_for_drive() { + for i in $(seq 180); do # Wait max 180 seconds + if ( mt -f $1 status | grep 0x0 ) >/dev/null 2>&1; then + #echo "Device $1 READY" + break + fi + #echo "Device $1 - not ready, retry ${i}..." + sleep 1 + done +} + + +if test $# -lt 2 ; then + echo "usage: mtx-changer ctl-device command slot archive-device drive" + echo " Insufficient number of arguments arguments given." + echo " Mimimum usage is first two arguments ..." + exit 1 +fi + +# Setup arguments +ctl=$1 +cmd="$2" +slot=$3 +device=$4 +# If drive not given, default to 0 +if test $# = 5 ; then + drive=$5 +else + drive=0 +fi + +# +# Check for special cases where only 2 arguments are needed, +# all others are a minimum of 3 +case $cmd in + loaded) + ;; + unload) + ;; + list) + ;; + slots) + ;; + *) + if test $# -lt 3; then + echo "usage: mtx-changer ctl-device command slot archive-device drive" + echo " Insufficient number of arguments arguments given." + echo " Mimimum usage is first three arguments ..." + exit 1 + fi + ;; +esac + + +case $cmd in + unload) +# echo "Doing mtx -f $ctl unload $slot $drive" +# +# enable the following line if you need to eject the cartridge + #mt -f $device offline + mt -f $device rewoffl + if test x$slot = x; then + ${MTX} -f $ctl unload + else + ${MTX} -f $ctl unload $slot $drive + fi + ;; + + load) +# echo "Doing mtx -f $ctl load $slot $drive" + ${MTX} -f $ctl load $slot $drive + rtn=$? +# +# Increase the sleep time if you have a slow device +# or remove the sleep and add the following: + #sleep 15 + wait_for_drive $device + exit $rtn + ;; + + list) +# echo "Requested list" + # Some tape changers lose track of their inventory (well, mine does) so + # do one before trying to get a status out of it. + ${MTX} -f $ctl inventory + ${MTX} -f $ctl status | grep " *Storage Element [0-9]*:.*Full" | awk "{print \$3 \$4}" | sed "s/Full *\(:VolumeTag=\)*//" +# Comment out the previous line and add a line here +# to print "fake" barcodes. +# +# If you have a VXA PacketLoader and the above does not work, try +# turning it off and enabling the following line. +# ${MTX} -f $ctl status | grep " *Storage Element [0-9]*:.*Full" | sed "s/*Storage Element //" | sed "s/Full :VolumeTag=//" + ;; + + loaded) + make_temp_file + ${MTX} -f $ctl status >${TMPFILE} + rtn=$? + cat ${TMPFILE} | grep "^Data Transfer Element $drive:Full" | awk "{print \$7}" + cat ${TMPFILE} | grep "^Data Transfer Element $drive:Empty" | awk "{print 0}" + rm -f ${TMPFILE} + exit $rtn + ;; + + slots) +# echo "Request slots" + ${MTX} -f $ctl status | grep " *Storage Changer" | awk "{print \$5}" + ;; +esac diff --git a/examples/backup-acls.txt b/examples/backup-acls.txt new file mode 100644 index 00000000..8aa1f4ff --- /dev/null +++ b/examples/backup-acls.txt @@ -0,0 +1,22 @@ +From: Volker Sauer +To: =?ISO-8859-1?Q?Daniel_Ch=E9nard?= +CC: Bacula users +Subject: Re: [Bacula-users] Bacula and the ACL +Date: Tue, 20 Jan 2004 23:32:52 +0100 + +You could use a script like that to dump the acls to a file which you +could restore with "setfacl --restore=" in case of a restore. + + +#!/bin/bash + +BACKUP_DIRS="/foo /bar" + +STORE_ACL=/root/acl-backup +umask 077 + +for i in $BACKUP_DIRS; do + cd $i + /usr/bin/getfacl -R --skip-base . >$STORE_ACL/${i//\//_} +done + diff --git a/examples/backup-every-other-week.txt b/examples/backup-every-other-week.txt new file mode 100644 index 00000000..b26a456a --- /dev/null +++ b/examples/backup-every-other-week.txt @@ -0,0 +1,76 @@ + +From: Robert L Mathews +To: +Subject: [Bacula-users] Making Backups Run Every Other Week +Date: Wed, 13 Aug 2003 10:04:23 -0700 + +In case anyone is interested, here's a tip I came up with. + +My backup policy is such that I need backups to run every other week. I +have two separate "offsite" tape pools, and a backup is made to each of +them on alternating weeks. + +Bacula's scheduler currently doesn't handle "every two weeks", and using +something like "the first and third weeks for backup A, and the second +and fourth weeks for backup B" means there will be no backup done on the +fifth week if the month contains one. Scheduling a backup for the fifth +week doesn't help; it means that the same backup would sometimes run +twice in a row, which ruins the alternating week scheme. + +I first thought of poking around the code to make the scheduler support +"every two weeks", and I someday may still do so. However, I found an +easier way to do this is in the meantime: with a RunBeforeJob line. + +What I do is schedule both jobs to run every single week. Then the job +that runs my "Offsite Backup A" has this line: + + RunBeforeJob = "/etc/bacula/two_week_script 'July 6 2003'" + +And the job with my "Offsite Backup B" has this one: + + RunBeforeJob = "/etc/bacula/two_week_script 'July 13 2003'" + +And two_week_script is the following Perl script: + +---------------- + +#!/usr/bin/perl -w + +use strict; +use constant SECONDS_IN_WEEK => 86400 * 7; +use constant SECONDS_IN_TWO_WEEKS => SECONDS_IN_WEEK * 2; + +# Calculate the elapsed seconds since the starting date, +# which must be in a format that /bin/date can understand +# Note that this relies on the GNU "%s" date extension +my $start_date = shift; +$start_date = `/bin/date -d '$start_date' +'%s'`; +chomp $start_date; +my $time_since_start_date = time - $start_date; + +# Now take those seconds modulo the number of seconds in +# two weeks. If we're in the second half of the two week +# period, exit with an error to stop the Bacula job. If +# we're in the first half, the script will terminate normally +# and the Bacula job will run. +if ($time_since_start_date % SECONDS_IN_TWO_WEEKS + >= SECONDS_IN_WEEK) +{ + exit 1; +} + +---------------- + +The result is that the script cancels the job that should not be run that +week, while allowing the other job to continue. + +This idea could be trivially changed to support running every three +weeks, every two months, every prime number of days, etc. + +Anyway, just a tip in case anyone else needs to schedule things in a way +that the scheduler doesn't currently support. It's pretty obvious that +this is the right way to do it now, but I puzzled over it for a little +while before coming up with this. + +-- +Robert L Mathews, Tiger Technologies diff --git a/examples/backup-to-cdwriter.txt b/examples/backup-to-cdwriter.txt new file mode 100644 index 00000000..c0f85112 --- /dev/null +++ b/examples/backup-to-cdwriter.txt @@ -0,0 +1,813 @@ +From: "Johan Decock" +To: bacula-users@lists.sourceforge.net +Subject: Re: [Bacula-users] cdwriter +Date: Fri, 30 Jan 2004 17:11:34 +0100 + +I thought it would be easy to find them in the archives. I'll post them +again. I changed some things in them anyway in the mean time. I also +include the Director config. Then you see how they are to be used in +Runbefore- and RunafterJobs + +I'm interested in any remarks or improvements you might have. (I'm +still getting started in shell scripting) + + +# +# Default Bacula Director Configuration file +# +# The only thing that MUST be changed is to add one or more +# file or directory names in the Include directive of the +# FileSet resource. +# +# For Bacula release 1.32b (14 Oct 2003) -- mandrake (FiveStar) +# +# You might also want to change the default email address +# from root to your address. See the "mail" and "operator" +# directives in the Messages resource. +# + +Director { # define myself + Name = tosfeb33-dir + DIRport = 9101 # where we listen for UA connections + QueryFile = "/etc/bacula/query.sql" + WorkingDirectory = "/var/bacula/working" + PidDirectory = "/var/run" + Maximum Concurrent Jobs = 1 + Password = "abis" # Console password + Messages = Standard +} + +Job { + Name = "Tosfeb31AllHDD" + Type = Backup + Level = Full + Client = tosfeb31-fd + FileSet = "Tosfeb31All" + Schedule = "Weekdays" + Messages = Standard + Pool = HDD + Storage = HDdrive + RunBeforeJob = "/etc/bacula/prepare_DVDspool /mnt/backupdaily 1700000" + Write Bootstrap = "/var/bacula/working/Tosfeb31All.bsr" + Priority = 10 +} + +Job { + Name = "Tosfeb31AllUSB" + Type = Backup + Level = Full + Client = tosfeb31-fd + FileSet = "Tosfeb31All" + Schedule = "WeeklyUSB" + Messages = Standard + Pool = USB + Storage = USBdrive + Write Bootstrap = "/var/bacula/working/Tosfeb31All.bsr" + Priority = 10 +} + +Job { + Name = "Tosfeb31EssentialDVD" + Type = Backup + Level = Full + Client = tosfeb31-fd + FileSet = "Tosfeb31Essential" + Schedule = "MonthlyDVD" + Messages = Standard + Pool = DVD + Storage = DVDdrive + RunBeforeJob = "/etc/bacula/prepare_DVDspool /mnt/backupdaily 4700000" + Write Bootstrap = "/var/bacula/working/Tosfeb31Essential.bsr" + RunAfterJob = "/etc/bacula/inventorize_volumes %v" + Priority = 10 +} + +Job { + Name = "Tosfeb31AllCompressedDVD" + Type = Backup + Level = Full + Client = tosfeb31-fd + FileSet = "Tosfeb31AllCompressed" + Messages = Standard + Pool = DVD + Storage = DVDdrive + RunBeforeJob = "/etc/bacula/prepare_DVDspool /mnt/backupdaily 4700000" + Write Bootstrap = "/var/bacula/working/Tosfeb31Essential.bsr" + RunAfterJob = "/etc/bacula/inventorize_volumes %v" + Priority = 10 +} + +Job { + Name = "Tosfeb32AllHDD" + Type = Backup + Level = Full + Client = tosfeb32-fd + FileSet = "Tosfeb32All" + Schedule = "Weekdays" + Messages = Standard + Pool = HDD + Storage = HDdrive + Write Bootstrap = "/var/bacula/working/Tosfeb32All.bsr" + Priority = 20 +} + +Job { + Name = "Tosfeb32AllUSB" + Type = Backup + Level = Full + Client = tosfeb32-fd + FileSet = "Tosfeb32All" + Schedule = "WeeklyUSB" + Messages = Standard + Pool = USB + Storage = USBdrive + Write Bootstrap = "/var/bacula/working/Tosfeb32All.bsr" + Priority = 10 +} + +Job { + Name = "Tosfeb32EssentialDVD" + Type = Backup + Level = Full + Client = tosfeb32-fd + FileSet = "Tosfeb32Essential" + Schedule = "MonthlyDVD" + Messages = Standard + Pool = DVD + Storage = DVDdrive + Write Bootstrap = "/var/bacula/working/Tosfeb32System.bsr" + RunAfterJob = "/etc/bacula/inventorize_volumes %v" + Priority = 20 +} + +#Job { +# Name = "Tosfeb30System" +# Type = Backup +# Level = Full +# Client = tosfeb30-fd +# FileSet = "Tosfeb30System" +# Schedule = "WeeklyUSB" +# Messages = Standard +# Pool = USB +# Storage = USBdrive +# Write Bootstrap = "/etc/bacula/working/Tosfeb32System.bsr" +# Priority = 20 +#} + +Job { + Name = "Tosfeb33SystemHDD" + Type = Backup + Level = Full + Client = tosfeb33-fd + FileSet = "Tosfeb33System" + Schedule = "Weekdays" + Messages = Standard + Pool = HDD + Storage = HDdrive + Write Bootstrap = "/var/bacula/working/Tosfeb33System.bsr" + Priority = 30 +} + +Job { + Name = "Tosfeb33SystemUSB" + Type = Backup + Level = Full + Client = tosfeb33-fd + FileSet = "Tosfeb33System" + Schedule = "WeeklyUSB" + Messages = Standard + Pool = USB + Storage = USBdrive + Write Bootstrap = "/var/bacula/working/Tosfeb33All.bsr" + Priority = 30 +} + +Job { + Name = "Tosfeb33EssentialDVD" + Type = Backup + Level = Full + Client = tosfeb33-fd + FileSet = "Tosfeb33Essential" + Schedule = "MonthlyDVD" + Messages = Standard + Pool = DVD + Storage = DVDdrive + Write Bootstrap = "/var/bacula/working/Tosfeb33Essential.bsr" + RunAfterJob = "/etc/bacula/inventorize_volumes %v" + Priority = 30 +} + +Job { + Name = "BackupCatalogHDD" + Type = Backup + Level = Full + Client = tosfeb33-fd + FileSet="Catalog" + Schedule = "Weekdays" + Storage = HDdrive + Messages = Standard + Pool = HDD + # This creates an ASCII copy of the catalog + RunBeforeJob = "/etc/bacula/make_catalog_backup -u bacula" + # This deletes the copy of the catalog + RunAfterJob = "/etc/bacula/delete_catalog_backup" + Write Bootstrap = "/var/bacula/working/BackupCatalog.bsr" + Priority = 90 # run after main backup +} + +Job { + Name = "BackupCatalogUSB" + Type = Backup + Level = Full + Client = tosfeb33-fd + FileSet="Catalog" + Schedule = "WeeklyUSB" + Storage = USBdrive + Messages = Standard + Pool = USB + # This creates an ASCII copy of the catalog + RunBeforeJob = "/etc/bacula/make_catalog_backup -u bacula" + # This writes the volume file to the proper USB device + RunAfterJob = "/etc/bacula/write_to_USB %v" +# RunAfterJob = "/etc/bacula/delete_catalog_backup" + Write Bootstrap = "/var/bacula/working/BackupCatalog.bsr" + Priority = 90 # run after main backup +} + +Job { + Name = "BackupCatalogDVD" + Type = Backup + Level = Full + Client = tosfeb33-fd + FileSet="Catalog" + Schedule = "MonthlyDVD" + Storage = DVDdrive + Messages = Standard + Pool = DVD + # This creates an ASCII copy of the catalog + RunBeforeJob = "/etc/bacula/make_catalog_backup -u bacula" + # This writes the volume file to a DVD + RunAfterJob = "/etc/bacula/write_to_DVD %v" + Write Bootstrap = "/var/bacula/working/BackupCatalog.bsr" + Priority = 90 # run after main backup +} + +# Standard Restore template, to be changed by Console program +Job { + Name = "RestoreFiles" + Type = Restore + Client = tosfeb33-fd + FileSet="Full Set" + Storage = HDdrive + Messages = Standard + Pool = HDD + Where = /tmp/bacula-restores +} + +# List of files to be backed up +FileSet { + Name = "Full Set" + Include = signature=MD5 portable=yes { +# +# Put your list of files here, one per line or include an +# external list with: +# +# > /var/bacula/volumes +done + +exit 0 + + + +write_to_DVD: + +#!/bin/bash +# +# shell script to copy DVD volume files from /mnt/backupdaily to DVD+/-R(W) + +# The last volume was not added to the inventory list yet, so we do that first +/etc/bacula/inventorize_volumes $1 + +supermount -m=/mnt/cdrom disable + +umount /mnt/cdrom + +# another shell script run as a RunAfterJob has been putting the volume names into a text file +# now we retrieve those names and write them to DVD + +# this awk command produces a space delimited list of the lines of the text file and assigns it to $lines + +lines=`awk '{print $1}' /var/bacula/volumes` + +# now we can iterate over those volume names + +let "count=1" +for i in $lines ; do + if [ $count -le 1 ] ; then + # When creating a DVD with growisofs one needs to use -Z the first time + echo "Writing first volume file" $count + growisofs -Z /dev/scd0 -R -J /mnt/backupdaily/$i + else + # and -M the for the next volume files + echo "Writing another volume file" $count + growisofs -M /dev/scd0 -R -J /mnt/backupdaily/$i + fi + let count="$count+1" +# sleep 5 + ls -al /mnt/cdrom +# sleep 5 +done + +ls -al /mnt/backupdaily +mount /mnt/cdrom +ls -al /mnt/cdrom +umount /mnt/cdrom + +#supermount -m=/mnt/cdrom enable + +# This is run as a RunAfterJob of the catalog backup. It already had a RunAfterJob, so we execute that here +/etc/bacula/delete_catalog_backup + +exit 0 + + + + +This last one is to write to removable USB-storage. + +write_to_USB + +#!/bin/bash +# +# shell script to finalize USB backup + +volumename=$1 +echo " Creating mount points, if they don't exist" +if ! [ -d /mnt/usbdrv ] ; then + mkdir /mnt/usbdrv +fi +if ! [ -d /mnt/usbdrv/bacula ] ; then + mkdir /mnt/usbdrv/bacula +fi +if ! [ -d /mnt/usbdrv/misc ] ; then + mkdir /mnt/usbdrv/misc +fi + #unmount all mount points related to USB + +echo " Making sure no USB devices are mounted" +umount /mnt/usbdrv +umount /mnt/usbdrv/bacula +umount /mnt/usbdrv/misc +umount /mnt/removable +umount /mnt/removable2 +umount /mnt/removable3 + + #determine which /dev/sd? contains the right USBVolume for this backup + +echo " Determining whether the right USB device containing our USBVolume is present physically" + +found=nothing +for i in sda sdb sdc; do + sd=$i"1" + echo " Trying with $sd" + mount /dev/$sd /mnt/usbdrv/bacula -t ext2 + ls -al /mnt/usbdrv/bacula + + if [ -f /mnt/usbdrv/bacula/$volumename ] ; then + found=/dev/$i + fi + + umount /mnt/usbdrv/bacula +done + +echo +echo " Found: $found containing $volumename " +echo +if [ $found != nothing ] ; then + # mount /mnt/usbdrv/bacula and /mnt/usbdrv/misc on /dev/sd?1 and /dev/sd?5 respectively + mount $found"1" /mnt/usbdrv/bacula -t ext2 + mount $found"5" /mnt/usbdrv/misc -t vfat + + # copy Ghost images of Windows system drives, the contents of /etc/bacula/* + # and a text file with restore instructions to /mnt/usbdrv/misc + + echo " Copying Ghost image of TOSFEB31 to /mnt/usbdrv/misc" + cp -v /mnt/Ghost/PROD/TOSFEB31/SYSTEMAG.GHO /mnt/usbdrv/misc + echo " Copying Ghost image of TOSFEB32 to /mnt/usbdrv/misc" + cp -v /mnt/Ghost/PROD/TOSFEB32/COFTF32.GHO /mnt/usbdrv/misc + echo " Copying /etc/bacula to /mnt/usbdrv/misc" + cp -Rfv /etc/bacula/ /mnt/usbdrv/misc + echo " Copying Restore instructions to /mnt/usbdrv/misc" + cp -v /etc/bacula/How_To_Restore.txt /mnt/usbdrv/misc + + # copy the current USBVolume to /mnt/usbdrv/bacula overwriting the previous volume file + echo + echo " Copying Bacula USB-volume to /mnt/usbdrv/bacula" + +# cp -fv /mnt/spool/$volumename /mnt/usbdrv/bacula + + # give some feedback about what was done + + echo + echo " Directory contents of /mnt/usbdrv/bacula" + ls -al /mnt/usbdrv/bacula + echo + echo " Directory contents of /mnt/usbdrv/misc" + ls -al /mnt/usbdrv/misc + + # unmount /mnt/usbdrv/bacula and /mnt/usbdrv/misc + echo + echo " Unmounting /mnt/usbdrv/... mount points" + umount /mnt/usbdrv/bacula + umount /mnt/usbdrv/misc + +else + # send emails if correct medium is not present +# /usr/sbin/smtp + echo "hello" +fi + +# This is run as a RunAfterJob of the catalog backup. It already had a +# RunAfterJob, so we execute that here + +/etc/bacula/delete_catalog_backup + diff --git a/examples/client-backup b/examples/client-backup new file mode 100755 index 00000000..e488b6f7 --- /dev/null +++ b/examples/client-backup @@ -0,0 +1,15 @@ +#!/bin/sh +# +# A small script to start a Backup job from any Client machine +# +# You may need to add a full path and possibly the console.conf location +# +# Assume we are in the binary directory +# +JOB="NightlySave" +# +./console -c console.conf <output-file +# +use warnings; +use strict; + +my $in; +$in .= $_ while (<>); + +sub options { + return "Options { ".join('; ',split(/\s+/, shift)) . " } "; +} + +sub file_lines { + return join($/, map {" File = $_"} split(/\n\s*/, shift)); +} + +$in =~ s/Include\s*=\s*((?:\w+=\w+\s+)*){\s*((?:.*?\n)+?)\s*}/ + "Include { " . + ( $1 ? options($1) : '' ) . "\n" . + file_lines($2) . + "\n }" /eg; + +$in =~ s/Exclude\s*=\s*{\s*}/ + "Exclude { }"/eg; + +$in =~ s/Exclude\s*=\s*{\s*((?:.*?\n)+?)\s*}/ + "Exclude {\n" . file_lines($1) . "\n }"/eg; + +print $in; diff --git a/examples/conf/kernsconfig b/examples/conf/kernsconfig new file mode 100755 index 00000000..4aff6af6 --- /dev/null +++ b/examples/conf/kernsconfig @@ -0,0 +1,30 @@ +#!/bin/sh +# This is Kern's configure script for Bacula +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +PREFIX=/opt/bacula +CFLAGS="-g -O2 -Wall" \ + ./configure \ + --sbindir=${PREFIX}/bin \ + --sysconfdir=${PREFIX}/etc \ + --docdir=${PREFIX}/html \ + --htmldir=${PREFIX}/html \ + --with-working-dir=${PREFIX}/working \ + --with-pid-dir=${PREFIX}/working \ + --with-scriptdir=${PREFIX}/scripts \ + --with-plugindir=${PREFIX}/plugins \ + --libdir=${PREFIX}/lib \ + --enable-smartalloc \ + --disable-conio \ + --enable-readline \ + --enable-bat \ + --with-mysql \ + --with-dump-email=localhost \ + --with-job-email=localhost \ + --with-smtp-host=localhost \ + --with-baseport=9101 + +exit 0 diff --git a/examples/conf/m4.Makefile b/examples/conf/m4.Makefile new file mode 100644 index 00000000..38b3f472 --- /dev/null +++ b/examples/conf/m4.Makefile @@ -0,0 +1,29 @@ +ETCDIR=/opt/bacula/etc +M4=/usr/ccs/bin/m4 +DIR=/opt/bacula/sbin/bacula-dir +FD=/opt/bacula/sbin/bacula-fd +SD=/opt/bacula/sbin/bacula-sd +BCON=/opt/bacula/sbin/bconsole + +all: $(ETCDIR)/bacula-dir.conf $(ETCDIR)/bacula-sd.conf \ + $(ETCDIR)/bacula-fd.conf $(ETCDIR)/bconsole.conf + +$(ETCDIR)/bacula-dir.conf: bacula-dir.conf bacula-defs.m4 + $(M4) bacula-dir.conf >$(ETCDIR)/bacula-dir.tmp && \ + $(DIR) -t -c $(ETCDIR)/bacula-dir.tmp && \ + mv $(ETCDIR)/bacula-dir.tmp $(ETCDIR)/bacula-dir.conf + +$(ETCDIR)/bacula-sd.conf: bacula-sd.conf bacula-defs.m4 + $(M4) bacula-sd.conf >$(ETCDIR)/bacula-sd.tmp && \ + $(SD) -t -c $(ETCDIR)/bacula-sd.tmp && \ + mv $(ETCDIR)/bacula-sd.tmp $(ETCDIR)/bacula-sd.conf + +$(ETCDIR)/bacula-fd.conf: bacula-fd.conf bacula-defs.m4 + $(M4) bacula-fd.conf >$(ETCDIR)/bacula-fd.tmp && \ + $(FD) -t -c $(ETCDIR)/bacula-fd.tmp && \ + mv $(ETCDIR)/bacula-fd.tmp $(ETCDIR)/bacula-fd.conf + +$(ETCDIR)/bconsole.conf: bconsole.conf bacula-defs.m4 + $(M4) bconsole.conf >$(ETCDIR)/bconsole.tmp && \ + $(BCON) -t -c $(ETCDIR)/bconsole.tmp && \ + mv $(ETCDIR)/bconsole.tmp $(ETCDIR)/bconsole.conf diff --git a/examples/conf/m4.additions b/examples/conf/m4.additions new file mode 100644 index 00000000..e3acdb4e --- /dev/null +++ b/examples/conf/m4.additions @@ -0,0 +1,43 @@ +From: Marc Schoechlin +To: Peter Eriksson +Cc: bacula-users@lists.sourceforge.net +Subject: Re: [Bacula-users] RE: Feature Request : includes for config-files + +Hi ! + +On Fri, May 21, 2004 at 11:24:13AM +0200, Peter Eriksson wrote: + +> > I think that is the 99%-solution for this problem - +> > but I think many users would be happy with a 90%-solution, which +> > allows to store configuration-data in distributed files. +> +> Or you could do as I just did - generate the configuration +> files using a Makefile and the m4 macro processor... That way you +> don't have to reinvent the wheel again inside Bacula but can delegate +> the tasks to external programs. +> +> [See the attached files for details. They can be expanded +> a lot though, it's just a beginning] + +Many thanks for the files! + +I adopted this way now - and it works with good results :-) + +The different client-definitions can now be placed on distributed +locations. + +Look at the make-target below : +-- +$(ETCDIR)/bacula-dir.conf: bacula-dir.conf bacula-defs.m4 + cat bacula-dir.conf > $(ETCDIR)/bacula-dir.conf.tmp && \ + $(FIND) $(CUSTOMERS) -name "*.cfg" -exec cat {} >> $(ETCDIR)/bacula-dir.conf.tmp \; && \ + $(M4) $(ETCDIR)/bacula-dir.conf.tmp >$(ETCDIR)/bacula-dir.tmp && \ + $(DIR) -t -c $(ETCDIR)/bacula-dir.tmp && \ + mv $(ETCDIR)/bacula-dir.tmp $(ETCDIR)/bacula-dir.conf +-- + + +Best regards + +Marc Schoechlin + diff --git a/examples/conf/m4.bacula-dir.conf b/examples/conf/m4.bacula-dir.conf new file mode 100644 index 00000000..14699c83 --- /dev/null +++ b/examples/conf/m4.bacula-dir.conf @@ -0,0 +1,364 @@ +# bacula-dir.conf +# +# Default Bacula Director Configuration file +# +# WARNING: +# This file is generated from /opt/lysator/etc/bacula/bacula-dir.conf +# Edit the source file and then run 'make'. +# + +include(bacula-defs.m4) + +Director { # define myself + Name = Baccus + DIRport = 9101 # where we listen for UA connections + QueryFile = "/opt/bacula/etc/query.sql" + WorkingDirectory = "/var/bacula/working" + PidDirectory = "/var/run" + Maximum Concurrent Jobs = 10 + Password = "djUGGqG0ckdbbTp0J0cAnK6FqZC5YX5i6" # Console password + Messages = Standard +} + + +# Generic catalog service +Catalog { + Name = MyCatalog + dbname = bacula; user = bacula; password = "" +} + + + +JobDefs { + Name = "DefaultJob" + Type = Backup + Level = Incremental + Schedule = "WeeklyCycle" + Storage = "DLT-0" + Messages = Standard + Spool Data = yes + Pool = Default + Max Start Delay = 20h + Priority = 10 +} + + + +JobDefs { + Name = "InservitusJob" + Type = Backup + Level = Incremental + Schedule = "WeeklyCycle" + Storage = "DLT-1" + Messages = Standard + Spool Data = yes + Pool = Inservitus + Max Start Delay = 20h + Priority = 10 +} + +JobDefs { + Name = "LysdiskJob" + Type = Backup + Level = Incremental + Schedule = "WeeklyCycle" + Storage = "DLT-2" + Messages = Standard + Spool Data = yes + Pool = Lysdisk + Max Start Delay = 20h + Priority = 10 +} + +JobDefs { + Name = "ShermanJob" + Type = Backup + Level = Incremental + Schedule = "WeeklyCycle" + Storage = "DLT-3" + Messages = Standard + Spool Data = yes + Pool = Sherman + Max Start Delay = 20h + Priority = 10 +} + +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + Client = Baccus + JobDefs = "DefaultJob" + Level = Full + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + # This creates an ASCII copy of the catalog + RunBeforeJob = "/opt/bacula/etc/make_catalog_backup -u bacula" + # This deletes the copy of the catalog + RunAfterJob = "/opt/bacula/etc/delete_catalog_backup" + Write Bootstrap = "/var/bacula/working/BackupCatalog.bsr" + Priority = 11 # run after main backup +} + +# Standard Restore template, to be changed by Console program +Job { + Name = "Restore" + Type = Restore + Client = Baccus + FileSet="Baccus" + Storage = "DLT-0" + Pool = Default + Messages = Standard + Where = /tmp/bacula-restores +} + + +# Clients to backup -------------------------------------------------- + +#stalingrad +#hanna +#venom +#klorin +#britney +#sherman +#inservitus +#tokaimura +#u137 + +#elfwood +#hal +#sten +#sirius (skippa? Networkerservern...) + + +CLIENT(Baccus, baccus.ifm.liu.se, DefaultJob) +FileSet { + Name = "Baccus" + Include = signature=MD5 { + / + /usr + /var + /opt + } + + Exclude = { + /proc /tmp /var/tmp /devices /etc/mnttab /dev/fd /var/run + /export + } +} + + + +CLIENT(Stalingrad, stalingrad.lysator.liu.se, DefaultJob) +FileSet { + Name = "Stalingrad" + Include = signature=MD5 { + / + /cvsroot + } + Exclude = { + /proc /tmp /var/tmp /etc/mnttab /dev/fd /var/run /dev/shm + } +} + + + +CLIENT(Hanna, hanna.lysator.liu.se, DefaultJob) +FileSet { + Name = "Hanna" + Include = signature=MD5 { + / + /var + /local + /export/hanna + } + Exclude = { + /proc /tmp /var/tmp /devices /etc/mnttab /dev/fd /var/run + /export/hanna/mirror + /export/hanna/ftp/mirror + } +} + + +CLIENT(Venom, venom.lysator.liu.se, DefaultJob) +FileSet { + Name = "Venom" + Include = signature=MD5 { + / + /clone/dsk1 + /clone/dsk2 + /export/dsk1 + /export/dsk2 + } + Exclude = { + /proc /tmp /var/tmp /devices /etc/mnttab /dev/fd /var/run + } +} + + +CLIENT(Klorin, klorin.lysator.liu.se, DefaultJob) +FileSet { + Name = "Klorin" + Include = signature=MD5 { + / + /export/mdsk1 + } + Exclude = { + /proc /tmp /var/tmp /devices /etc/mnttab /dev/fd /var/run + } +} + + +CLIENT(Britney, britney.lysator.liu.se, DefaultJob) +FileSet { + Name = "Britney" + Include = signature=MD5 { + / + /export/dsk1 + /export/oldroot + /export/lysdisk1 + /export/lysdisk3 + /export/lysdisk4 + /export/lysdisk6 + /export/lysdisk7 + /export/lysdisk8 + /export/lysdisk9 + /export/lysdisk11 + } + Exclude = { + /proc /tmp /var/tmp /devices /etc/mnttab /dev/fd /var/run + } +} + + + +CLIENT(Sherman, sherman.lysator.liu.se, DefaultJob) +FileSet { + Name = "Sherman" + Include = signature=MD5 { + / + /web + /boot + /var/opt/mysql + } + Exclude = { + /proc /tmp /var/tmp /etc/mnttab /dev/fd /var/run + } +} + + + +CLIENT(U137, u137.lysator.liu.se, DefaultJob) +FileSet { + Name = "U137" + Include = signature=MD5 { + / + /export/dsk1 + /export/dsk2 + } + Exclude = { + /proc /tmp /var/tmp /etc/mnttab /dev/fd /var/run + } +} + + +CLIENT(Tokaimura, tokaimura.lysator.liu.se, DefaultJob) +FileSet { + Name = "Tokaimura" + Include = signature=MD5 { + / + /usr + /var + /opt + /export/mdsk + } + Exclude = { + /proc /tmp /var/tmp /devices /etc/mnttab /dev/fd /var/run + } +} + + +CLIENT(Inservitus, inservitus.lysator.liu.se, InservitusJob) +FileSet { + Name = "Inservitus" + Include = signature=MD5 { + / + /var + /export + /export/d1 + /export/d2 + /export/d3 + /export/home + } + Exclude = { + /proc /tmp /var/tmp /devices /etc/mnttab /dev/fd /var/run + /export/snapshot + /snapshot + } +} + + + +# +# When to do the backups, full backup on first sunday of the month, +# differential (i.e. incremental since full) every other sunday, +# and incremental backups other days +Schedule { + Name = "WeeklyCycle" + Run = Full 1st sun at 1:05 + Run = Differential 2nd-5th sun at 1:05 + Run = Incremental mon-sat at 1:05 +} + +# This schedule does the catalog. It starts after the WeeklyCycle +Schedule { + Name = "WeeklyCycleAfterBackup" + Run = Full sun-sat at 1:10 +} + +# This is the backup of the catalog +FileSet { + Name = "Catalog" + Include = signature=MD5 { + /var/bacula/working/bacula.sql + } +} + + +STORAGE(File-0, File, baccus.ifm.liu.se) +STORAGE(DLT-0, DLT7000, baccus.ifm.liu.se) +STORAGE(DLT-1, DLT7000, baccus.ifm.liu.se) +STORAGE(DLT-2, DLT7000, baccus.ifm.liu.se) +STORAGE(DLT-3, DLT7000, baccus.ifm.liu.se) +STORAGE(DLT-4, DLT7000, baccus.ifm.liu.se) +STORAGE(DLT-5, DLT7000, baccus.ifm.liu.se) + + + +# Reasonable message delivery -- send most everything to email address +# and to the console +Messages { + Name = Standard +# +# NOTE! If you send to two email or more email addresses, you will need +# to replace the %r in the from field (-f part) with a single valid +# email address in both the mailcommand and the operatorcommand. +# + mailcommand = "/opt/bacula/sbin/bsmtp -h ifm.liu.se -f \"\(Bacula\) bacula@ifm.liu.se\" -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "/opt/bacula/sbin/bsmtp -h ifm.liu.se -f \"\(Bacula\) bacula@ifm.liu.se\" -s \"Bacula: Intervention needed for %j\" %r" + mail = peter@ifm.liu.se,backup-admin@lysator.liu.se = all, !skipped + operator = peter@ifm.liu.se,backup-admin@lysator.liu.se = mount + console = all, !skipped, !saved +# +# WARNING! the following will create a file that you must cycle from +# time to time as it will grow indefinitely. However, it will +# also keep all your messages if they scroll off the console. +# + append = "/var/bacula/working/log" = all, !skipped +} + + +# Define Pools -------------------------------------- +POOL(Default) +POOL(Inservitus) +POOL(Sherman) +POOL(Lysdisk) diff --git a/examples/conf/m4.bacula-fd.conf b/examples/conf/m4.bacula-fd.conf new file mode 100644 index 00000000..070bce5a --- /dev/null +++ b/examples/conf/m4.bacula-fd.conf @@ -0,0 +1,31 @@ +# bacula-fd.conf +# +# Default Bacula File Daemon Configuration file +# +# WARNING: +# This file is generated from /opt/lysator/etc/bacula/bacula-dir.conf +# Edit the source file and then run 'make'. + +# +# List Directors who are permitted to contact this File daemon +# +Director { + Name = Baccus + Password = "ilF0PZoICjQ60R3E3dks08Rq36KK8cDGJUAaW" +} + +# +# "Global" File daemon configuration specifications +# +FileDaemon { # this is me + Name = Baccus + FDport = 9102 # where we listen for the director + WorkingDirectory = /var/bacula/working + Pid Directory = /var/run +} + +# Send all messages except skipped files back to Director +Messages { + Name = Standard + director = Baccus = all, !skipped +} diff --git a/examples/conf/m4.bacula-sd.conf b/examples/conf/m4.bacula-sd.conf new file mode 100644 index 00000000..57e65ec4 --- /dev/null +++ b/examples/conf/m4.bacula-sd.conf @@ -0,0 +1,152 @@ +# bacula-sd.conf +# +# Default Bacula Storage Daemon Configuration file +# +# WARNING: +# This file is generated from /opt/lysator/etc/bacula/bacula-dir.conf +# Edit the source file and then run 'make'. +# + +Storage { # definition of myself + Name = Baccus + SDPort = 9103 # Director's port + WorkingDirectory = "/var/bacula/working" + Pid Directory = "/var/run" + Maximum Concurrent Jobs = 20 +} + +# +# List Directors who are permitted to contact Storage daemon +# +Director { + Name = Baccus + Password = "KLUwcp1ZTeIc0x265UPrpWW28t7d7cRXmhOqyHxRr" +} + +# +# Devices supported by this Storage daemon +# To connect, the Director's bacula-dir.conf must have the +# same Name and MediaType. +# + +Device { + Name = File-0 + Media Type = File + Archive Device = /var/bacula/storage/file-0 + LabelMedia = yes; # lets Bacula label unlabeled media + Random Access = Yes; + AutomaticMount = yes; # when device opened, read it + RemovableMedia = no; + AlwaysOpen = no; +} + +Device { + Name = DLT-0 + Media Type = DLT7000 + Archive Device = /dev/rmt/0cbn + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Autochanger = yes; + Changer Device = /dev/scsi/changer/c1t0d0 + Changer Command = "/opt/bacula/etc/mtx-changer %c %o %S %a %d" + Drive Index = 0 + Maximum Spool Size = 4gb + Maximum Job Spool Size = 1gb + Spool Directory = /var/bacula/spool/dlt-0 +} + +Device { + Name = DLT-1 + Media Type = DLT7000 + Archive Device = /dev/rmt/1cbn + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Autochanger = yes; + Changer Device = /dev/scsi/changer/c1t0d0 + Changer Command = "/opt/bacula/etc/mtx-changer %c %o %S %a %d" + Drive Index = 1 + Maximum Spool Size = 2gb + Maximum Job Spool Size = 1gb + Spool Directory = /var/bacula/spool/dlt-1 +} + +Device { + Name = DLT-2 + Media Type = DLT7000 + Archive Device = /dev/rmt/2cbn + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Autochanger = yes; + Changer Device = /dev/scsi/changer/c1t0d0 + Changer Command = "/opt/bacula/etc/mtx-changer %c %o %S %a %d" + Drive Index = 2 + Maximum Spool Size = 2gb + Maximum Job Spool Size = 1gb + Spool Directory = /var/bacula/spool/dlt-2 +} + +Device { + Name = DLT-3 + Media Type = DLT7000 + Archive Device = /dev/rmt/3cbn + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Autochanger = yes; + Changer Device = /dev/scsi/changer/c1t0d0 + Changer Command = "/opt/bacula/etc/mtx-changer %c %o %S %a %d" + Drive Index = 3 + Maximum Spool Size = 2gb + Maximum Job Spool Size = 1gb + Spool Directory = /var/bacula/spool/dlt-3 +} + +Device { + Name = DLT-4 + Media Type = DLT7000 + Archive Device = /dev/rmt/4cbn + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Autochanger = yes; + Changer Device = /dev/scsi/changer/c1t0d0 + Changer Command = "/opt/bacula/etc/mtx-changer %c %o %S %a %d" + Drive Index = 4 + Maximum Spool Size = 2gb + Maximum Job Spool Size = 1gb + Spool Directory = /var/bacula/spool/dlt-4 +} + +Device { + Name = DLT-5 + Media Type = DLT7000 + Archive Device = /dev/rmt/5cbn + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Autochanger = yes; + Changer Device = /dev/scsi/changer/c1t0d0 + Changer Command = "/opt/bacula/etc/mtx-changer %c %o %S %a %d" + Drive Index = 5 + Maximum Spool Size = 2gb + Maximum Job Spool Size = 1gb + Spool Directory = /var/bacula/spool/dlt-5 +} + +# +# Send all messages to the Director, +# mount messages also are sent to the email address +# +Messages { + Name = Standard + director = Baccus = all +} diff --git a/examples/conf/m4.email b/examples/conf/m4.email new file mode 100644 index 00000000..7e8d5804 --- /dev/null +++ b/examples/conf/m4.email @@ -0,0 +1,28 @@ +From: Peter Eriksson +Reply-To: Peter Eriksson +Subject: Re: [Bacula-users] RE: Feature Request : includes for config-files +To: bacula-users@lists.sourceforge.net + +Marc Schoechlin writes: + +> I think that is the 99%-solution for this problem - +> but I think many users would be happy with a 90%-solution, which +> allows to store configuration-data in distributed files. + +Or you could do as I just did - generate the configuration +files using a Makefile and the m4 macro processor... That way you +don't have to reinvent the wheel again inside Bacula but can delegate +the tasks to external programs. + +[See the attached files for details. They can be expanded +a lot though, it's just a beginning] + +-- +Peter Eriksson Phone: +46 13 28 2786 +Computer Systems Manager/BOFH Cell/GSM: +46 705 18 2786 +Physics Department, Linkping University Room: Building F, F203 +SE-581 83 Linkping, Sweden http://www.ifm.liu.se/~peter + +See the files bacula-defs.m4 m4.bacula-dir.conf m4.bacula-fd.conf and +m4.bacula-sd.conf in this directory for the attachments to this +email. diff --git a/examples/conf/many-clients.txt b/examples/conf/many-clients.txt new file mode 100644 index 00000000..d8dba663 --- /dev/null +++ b/examples/conf/many-clients.txt @@ -0,0 +1,106 @@ +From: Dan Langille +To: bacula-devel@lists.sourceforge.net +Subject: [Bacula-devel] script for creating many client FDs +Date Tue, 2 Dec 2003 19:41:28 -0500 (EST) + +I created this script which may be useful to others. If you have to +create a bacula-fd.conf file for more than a few clients, it can become +time consuming, not to mention error prone. + +Here is the usage: + +$ sh client-build.sh +client-build.sh : usage client-build.sh DIRNAME DIRPASSWD CLIENTNAME +[CLIENTNAME...] + +Where DIRNAME is the director name which can contact this client + PASSWD is the passwd to be supplied by the director + CLIENTNAME is the name of the client file daemon + +The script creates ./tmp and places all output there. + +bacula-fd.conf.in is the template it uses for creating the client scripts +and it must reside in the same directory as the script. + +Enjoy +-- +Dan Langille - http://www.langille.org/ + +==== bacula-fd.conf.in template ====== +# +# Default Bacula File Daemon Configuration file +# +# For Bacula release 1.32b (14 Oct 2003) -- freebsd 4.8-STABLE +# +# There is not much to change here except perhaps the +# File daemon Name to +# + +# +# List Directors who are permitted to contact this File daemon +# +Director { + Name = @dir-name@ + Password = "@dir-password@" +} + +# +# "Global" File daemon configuration specifications +# +FileDaemon { + Name = @client-name@ + FDport = 9102 # where we listen for the director + WorkingDirectory = /var/db/bacula + Pid Directory = /var/run +} + +# Send all messages except skipped files back to Director +Messages { + Name = Standard + director = undef-dir = all, !skipped +} +=== end bacula-fd.conf.in template ======== + + +==== client-build.sh =========== +#!/bin/sh +# +# Copyright 2003 Dan Langille +# +# Use as you wish, but keep retain this copyright notice. +# Please forward improvements. +# +# usage client-build.sh DIRNAME DIRPASSWD CLIENTNAME [CLIENTNAME...] +# +# Where DIRNAME is the director name which can contact this client +# PASSWD is the passwd to be supplied by the director +# CLIENTNAME is the name of the client file daemon +# +# This script creates ./tmp and places all output there. +# +# bacula-fd.conf.in is the template it uses for creating the client scripts. +# + + +if [ $# -lt 3 ] +then + echo $0 : usage $0 DIRNAME DIRPASSWD CLIENTNAME [CLIENTNAME...] + exit 1 +fi + +mkdir -p ./tmp + +DIRNAME=$1 +DIRPASSWD=$2 + +shift 2 + +echo creating stuff for $DIRNAME with password $DIRPASSWD + +while [ $# -gt 0 ] ; do + CLIENT=$1 + shift + echo "creating config for " $CLIENT + sed "s/@dir-name@/undef-dir/;s/@dir-password@/password/;s/@client-name@/$CLIENT/" bacula-fd.conf.in > ./tmp/$CLIENT-bacula-fd.conf +done +=== end client-build.sh =============== diff --git a/examples/database/bacula-sqlite_2_mysqldump.sh b/examples/database/bacula-sqlite_2_mysqldump.sh new file mode 100755 index 00000000..58f4dbf0 --- /dev/null +++ b/examples/database/bacula-sqlite_2_mysqldump.sh @@ -0,0 +1,23 @@ +#! /bin/sh +# +# bacula-sqlite_2_mysqldump.sh +# +# Convert a Bacula 1.36.2 Sqlite database to MySQL +# Originally Written by Nic Bellamy , Sept/Oct 2003. +# Modified by Silas Bennett , April 2006 for use with Bacula 1.36.2 +# + +if [ $1 = '-h' ] || [ $1 = '--help' ] ; then + echo `basename "$0"`" Usage:" + echo " "`basename $0`" takes a ASCII bacula sqlite database dump as an argument," + echo " and writes an SQL dump suitable for use with MySQL to STDOUT." + echo + echo " Example Use: "`basename $0`" bacula.sqlite.sql > bacula.mysql.sql" + echo " Example Use: "cat bacula.sqlite.sql | `basename $0`" - | mysql -p -u baculadb" + exit +fi + +# If $1 is '-' then cat will read /dev/stdin +cat $1 | +awk '/^INSERT INTO / && $3 != "NextId" && $3 != "Version" { print $0 }' | +sed '/^INSERT INTO [a-zA-Z]* VALUES(/s/(NULL)/(0)/g ; /^INSERT INTO [a-zA-Z]* VALUES(/s/(NULL,/(0,/g ; /^INSERT INTO [a-zA-Z]* VALUES(/s/,NULL,/,0,/g ; /^INSERT INTO [a-zA-Z]* VALUES(/s/,NULL,/,0,/g ; /^INSERT INTO [a-zA-Z]* VALUES(/s/,NULL)/,0)/g' diff --git a/examples/database/dbcheck.sql b/examples/database/dbcheck.sql new file mode 100644 index 00000000..454b8d68 --- /dev/null +++ b/examples/database/dbcheck.sql @@ -0,0 +1,197 @@ + +-- This script does the same as dbcheck, but in full SQL in order to be faster +-- To run it, exec it like this : psql -U bacula bacula (YOUR username and database) +-- then \i dbckeck.sql +-- It will tell you what it does. At the end you'll have to commit yourself. +-- Check the numbers of altered records before ... +-- +-- Notes from Marc Cousin, the author of this script: 01Sep08 +-- The script version won't work better with mysql without indexes. + +-- The reason is that the script works with global queries instead of many small +-- queries like dbcheck. So PostgreSQL can optimise the query by building hash +-- joins or merge joins. + +-- Mysql can't do that (last time I checked, at least ...), and will do nested +-- loops between job and file for instance. And without the missing indexes, +-- mysql will still be as slow as with dbcheck, as you'll more or less have +----thousands of full scans on the job table (where postgresql will do only a few +-- to build its hash). + +-- So for dbcheck with mysql, there is no other solution than adding the missing +-- indexes (but adding and dropping them just for the dbcheck is a good option). + +--repair_bad_paths(): +-- - SELECT PathId,Path from Path " +-- "WHERE Path NOT LIKE '%/' +-- - ask for confirmation +-- - add a slash, doing one update for each record to be updated ... +-- +--Proposal : +-- UPDATE Path Set Path=Path || '/' WHERE Path NOT LIKE '%/'; # Should work everywhere +-- +--repair_bad_filenames(): +-- - SELECT FileNameId,Name from FileName +-- WHERE Name LIKE '%/' +-- - ask for confirmation +-- - remove the slash, one update per row ... +-- +--Proposal : +-- UPDATE FileName Set Name=substr(Name,1,char_length(Name)-1) WHERE Name LIKE '%/'; # Works at least with Pg and Mysql +-- +--eliminate_duplicate_filenames(): +-- - Lists all min filenameids from filename where there are duplicates on name +-- - Updates filetable to this entry instead of one of its duplicates +-- - Deletes filenameids from filename which are duplicate and not the min filenameids +-- +--Proposal : +-- CREATE TEMP TABLE t1 AS SELECT Name,min(FileNameId) AS minfilenameid FROM FileName GROUP BY Name HAVING count(*) > 1; +-- CREATE TEMP TABLE t2 AS SELECT FileName.Name, FileName.FileNameId, t1.minfilenameid from FileName join t1 ON (FileName.Name=t1.Name) WHERE FileNameId <> minfilenameid; +-- UPDATE File SET FileNameId=(SELECT t2.minfilenameid FROM t2 WHERE t2.FileNameId=File.FileNameId) WHERE FileNameId IN (SELECT FileNameId FROM t2); +-- DELETE FROM FileName WHERE FileNameId IN (SELECT FileNameId FROM t2); +-- DROP TABLE t1; +-- DROP TABLE t2; +-- +--eliminate_duplicate_paths(): +-- Does exactly the same as above ... +-- +--Proposal : +-- CREATE TEMP TABLE t1 AS SELECT Path,min(PathId) AS minpathid FROM Path GROUP BY Path HAVING count(*) > 1; +-- CREATE TEMP TABLE t2 AS SELECT Path.Path, Path.PathId, t1.minpathid from Path join t1 ON (Path.Path=t1.Path) WHERE PathId <> minpathid; +-- UPDATE Path SET PathId=(SELECT t2.minpathid FROM t2 WHERE t2.PathId=Path.PathId) WHERE PathId IN (SELECT PathId FROM t2); +-- DELETE FROM Path WHERE PathId IN (SELECT PathId FROM t2); +-- DROP TABLE t1; +-- DROP TABLE t2; +-- +-- +--All the orphaned below delete records from a table when they are not referenced anymore in the others... +-- +--eliminate_orphaned_jobmedia_records(): +--Proposal : +-- DELETE FROM JobMedia WHERE JobId NOT IN (SELECT JobId FROM Job) OR MediaID NOT IN (SELECT MediaID FROM Media); +-- +--eliminate_orphaned_file_records(): +--Proposal : +-- DELETE FROM File WHERE JobId NOT IN (SELECT JobId FROM JOB); +-- +--eliminate_orphaned_path_records(): +--Here, the problem is that File is a big table ... we'd better avoid NOT IN on it ... +--Proposal : +-- CREATE TEMP TABLE t1 AS +-- SELECT Path.PathId +-- FROM Path LEFT OUTER JOIN File ON (Path.PathId=File.PathId) +-- WHERE File.PathId IS NULL; +-- DELETE FROM Path WHERE PathId IN (SELECT PathID FROM t1); +-- DROP TABLE t1; +-- +--eliminate_orphaned_filename_records(): +--Here, again, the problem is that File is a big table ... we'd better avoid NOT IN on it ... +--Proposal : +-- CREATE TEMP TABLE t1 AS +-- SELECT FileName.FileNameId +-- FROM FileName LEFT OUTER JOIN File ON (FileName.FileNameId=File.FileNameId) +-- WHERE File.FileNameId IS NULL; +-- DELETE FROM FileName WHERE FileNameId IN (SELECT FileNameId FROM t1); +-- DROP TABLE t1; +-- +--eliminate_orphaned_fileset_records(): +-- +--Proposal : +-- DELETE FROM FileSet WHERE FileSetId NOT IN (SELECT DISTINCT FileSetId FROM Job); +-- +--eliminate_orphaned_client_records(): +--Proposal : +-- DELETE FROM Client WHERE ClientId NOT IN (SELECT DISTINCT ClientId FROM Job); +-- +--eliminate_orphaned_job_records(): +--Proposal : +-- DELETE FROM Job WHERE ClientId NOT IN (SELECT ClientId FROM Client); +-- +--eliminate_admin_records(): +--Proposal : +-- DELETE FROM Job WHERE Job.Type='D'; +-- +--eliminate_restore_records(): +--Proposal : +-- DELETE FROM Job WHERE Job.Type='R'; +-- +-- +-- +--One script that does it all : +-- +\t +\a +BEGIN; +-- Uncomment to raise to '1GB' or more to get better results +-- SET work_mem TO '1GB'; + +SELECT('eliminate_admin_records()'); +DELETE FROM Job WHERE Job.Type='D'; + +SELECT('eliminate_restore_records()'); +DELETE FROM Job WHERE Job.Type='R'; + +SELECT('repair_bad_paths()'); +UPDATE Path Set Path=Path||'/' WHERE Path NOT LIKE '%/' AND Path <> ''; + +SELECT('repair_bad_filenames()'); +UPDATE FileName Set Name=substr(Name,1,char_length(Name)-1) WHERE Name LIKE '%/'; + +SELECT('eliminate_duplicate_filenames()'); +CREATE TEMP TABLE t1 AS SELECT Name,min(FileNameId) AS minfilenameid FROM FileName GROUP BY Name HAVING count(*) > 1; +ANALYSE t1; +CREATE TEMP TABLE t2 AS SELECT FileName.Name, FileName.FileNameId, t1.minfilenameid from FileName join t1 ON (FileName.Name=t1.Name) WHERE FileNameId <> minfilenameid; +ANALYSE t2; +UPDATE File SET FileNameId=(SELECT t2.minfilenameid FROM t2 WHERE t2.FileNameId=File.FileNameId) WHERE FileNameId IN (SELECT FileNameId FROM t2); +DELETE FROM FileName WHERE FileNameId IN (SELECT FileNameId FROM t2); +DROP TABLE t1; +DROP TABLE t2; + +SELECT('eliminate_duplicate_paths()'); +CREATE TEMP TABLE t1 AS SELECT Path,min(PathId) AS minpathid FROM Path GROUP BY Path HAVING count(*) > 1; +ANALYSE t1; +CREATE TEMP TABLE t2 AS SELECT Path.Path, Path.PathId, t1.minpathid from Path join t1 ON (Path.Path=t1.Path) WHERE PathId <> minpathid; +ANALYSE t2; +UPDATE Path SET PathId=(SELECT t2.minpathid FROM t2 WHERE t2.PathId=Path.PathId) WHERE PathId IN (SELECT PathId FROM t2); +DELETE FROM Path WHERE PathId IN (SELECT PathId FROM t2); +DROP TABLE t1; +DROP TABLE t2; + +SELECT('eliminate_orphaned_job_records()'); +DELETE FROM Job WHERE ClientId NOT IN (SELECT ClientId FROM Client); + +SELECT('eliminate_orphaned_jobmedia_records()'); +DELETE FROM JobMedia WHERE JobId NOT IN (SELECT JobId FROM Job) OR MediaID NOT IN (SELECT MediaID FROM Media); + +SELECT('eliminate_orphaned_file_records()'); +DELETE FROM File WHERE JobId NOT IN (SELECT JobId FROM JOB); + +SELECT('eliminate_orphaned_path_records()'); +CREATE TEMP TABLE t1 AS + SELECT Path.PathId + FROM Path LEFT OUTER JOIN File ON (Path.PathId=File.PathId) + WHERE File.PathId IS NULL; +ANALYSE t1; +DELETE FROM Path WHERE PathId IN (SELECT PathID FROM t1); +DROP TABLE t1; + +SELECT('eliminate_orphaned_filename_records()'); +CREATE TEMP TABLE t1 AS + SELECT FileName.FileNameId + FROM FileName LEFT OUTER JOIN File ON (FileName.FileNameId=File.FileNameId) + WHERE File.FileNameId IS NULL; +ANALYSE t1; +DELETE FROM FileName WHERE FileNameId IN (SELECT FileNameId FROM t1); +DROP TABLE t1; + +SELECT('eliminate_orphaned_fileset_records()'); +DELETE FROM FileSet WHERE FileSetId NOT IN (SELECT DISTINCT FileSetId FROM Job); + +SELECT('eliminate_orphaned_client_records()'); +DELETE FROM Client WHERE ClientId NOT IN (SELECT DISTINCT ClientId FROM Job); + + +SELECT('Now you should commit,'); +SELECT('but check that the amount of deleted or updated data is sane...'); +SELECT('If you''re sure, type ''COMMIT;'''); +SELECT('THIS SCRIPT IS STILL BETA !'); diff --git a/examples/database/postgresql-dump.txt b/examples/database/postgresql-dump.txt new file mode 100644 index 00000000..7c82c3f1 --- /dev/null +++ b/examples/database/postgresql-dump.txt @@ -0,0 +1,42 @@ +To: bacula-users@lists.sourceforge.net +Subject: Re: [Bacula-users] backup postgresql databases +From: Valtteri Vuorikoski +Date: 11 Mar 2004 14:56:13 +0000 + +Mathieu Arnold writes: + +> I was wondering if someone already had some script, or ways of doings scripts +> to backup (and maybe restore) pgsql databases. I'm balancing between taking a +> snapshot of the database directory and backuping that, dumping the datas into +> .sql.gz files, into .tgz files, or into a pipe letting bacula deal with the +> compression. + +Here's a quick shell script hack to dump all databases into separate tars +with pg_dump: + +do_pgsql() { + mkdir $dump_pg || exit 3 + psql -Atc 'select datname from pg_database where datistemplate=false' template1 postgres > $dump_pg/databases || exit 4 + + touch $dump_pg/dump.log + for d in `cat $dump_pg/databases` ; do + pg_dump -U postgres -Ft "$d" > $dump_pg/"$d.tar" >> $dump_pg/dump.log 2>&1 + [ "$retval" -eq 0 ] && retval=$? + done +} + +Set the variable dump_pg to point to the directly where you want the dump. Then +back it up and delete when you're done. + +You could probably use fs snapshots if you LOCK EXCLUSIVE all tables and CHECKPOINT +the transaction log, but as postgresql's relationship between files and tables is +not very transparent (such as with mysql), I think particularly partial restores +would end up being rather problematic. + +Backup/restore capability does not really appear to be postgresql's +forte, unfortunately. + +-- + Valtteri Vuorikoski + MagentaSites Oy + diff --git a/examples/database/postgresql-mysql-dump.txt b/examples/database/postgresql-mysql-dump.txt new file mode 100644 index 00000000..68963f7a --- /dev/null +++ b/examples/database/postgresql-mysql-dump.txt @@ -0,0 +1,99 @@ +From: Mathieu Arnold +To: bacula-users@lists.sourceforge.net +Subject: Re: [Bacula-users] backup postgresql databases +Date: Fri, 12 Mar 2004 22:31:58 +0100 + ++-Le 11/03/2004 15:20 +0100, Mathieu Arnold a dit : +| Hi, +| +| I was wondering if someone already had some script, or ways of doings +| scripts to backup (and maybe restore) pgsql databases. I'm balancing +| between taking a snapshot of the database directory and backuping that, +| dumping the datas into .sql.gz files, into .tgz files, or into a pipe +| letting bacula deal with the compression. +| +| Any ideas ? :) + +Thanks to all ppl I got answers from (many used awfully hard way to get +databases), I cooked up my scripts (I needed mysql too), and here they are : + +-------------------------------------------- +#!/bin/sh + +export TMPDIR="/usr/tmp/" +export TEMP="/usr/tmp/" +export SAVE="/usr/tmp/dumps/" +export LANG="C" + +pg_user=pgsql +pg_dbuser=pgsql +pg_template=template1 +exclude=template +host=plouf + +sed=/usr/bin/sed +pg_dump=/usr/local/bin/pg_dump +pg_dumpall=/usr/local/bin/pg_dumpall +psql=/usr/local/bin/psql + +gzip="| /usr/bin/gzip -nc9" +gzext=".gz" + +if [ ! -d $SAVE ] +then + mkdir $SAVE +else + rm -f $SAVE/$host-pgsql* +fi + +su - $pg_user -c "$pg_dumpall -g $gzip" > $SAVE/$host-pgsql$gzext + +for i in $($psql -l $pg_template $pg_dbuser|sed -e '1,4d' -e +'/rows)$/,/\eof/d' -e '/template/d' -e 's/ \([^ ]*\).*$/\1/') +do + su - $pg_user -c "$pg_dump -c -F p $i $gzip" > $SAVE/$host-pgsql-$i$gzext +done +-------------------------------------------- + +For those using complicate selects to get databases list, I advise psql -l +:) + +and for mysql : + +-------------------------------------------- +#!/bin/sh + +export TMPDIR="/usr/tmp/" +export TEMP="/usr/tmp/" +export SAVE="/usr/tmp/dumps/" +export LANG="C" + +my_user=root +my_passwd=password +host=plouf + +sed=/usr/bin/sed +mysql=/usr/local/bin/mysql +mysqldump=/usr/local/bin/mysqldump + +gzip="/usr/bin/gzip -nc9" +gzext=".gz" + +if [ ! -d $SAVE ] +then + mkdir $SAVE +else + rm -f $SAVE/$host-mysql* +fi + +for i in $($mysql -u $my_user -p$my_passwd -e 'show databases'|$sed '1d') +do + $mysqldump -u $my_user -p$my_passwd $i | $gzip > $SAVE/$host-mysql-$i$gzext +done +-------------------------------------------- + +maybe those scripts will save some ppl some time :) + +-- +Mathieu Arnold + diff --git a/examples/database/sqlite2pgsql b/examples/database/sqlite2pgsql new file mode 100755 index 00000000..7423f3d5 --- /dev/null +++ b/examples/database/sqlite2pgsql @@ -0,0 +1,146 @@ +#!/bin/bash + +# Import an SQLite dump of a Bacula catalog into Postgres +# Designed for v1.63.3 (as found on Debian sarge) +# +# v0.5 +# +# Copyright (c) 2006 Russell Howe + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +FILE=bacula.sql +# Tables, in order of size +TABLES=(File Filename Path Job Media Pool CDImages Counters Version Client FileSet JobMedia NextId UnsavedFiles BaseFiles) +# Tables, in insert order +TABLESINSERT=(Pool CDImages Client Counters FileSet Filename Job Media JobMedia NextId Path File UnsavedFiles Version BaseFiles) +DBNAME=bacula +LOGFILE="/var/tmp/sqlite2pgsql.$$.log" + +importdata() { + if [ "x" == "x$1" ]; then + echo "Error: importdata() called without an argument. Aborting." + exit 1 + fi + + SQLFILE="$1" + + if [ ! -r "$SQLFILE" ]; then + echo "Error: Cannot read from $SQLFILE. Aborting." + exit 1 + fi + + echo -n "Loading $SQLFILE into database $DBNAME..." + psql -d "$DBNAME" -f "$SQLFILE" || ( + echo "Failed to load $SQLFILE into database $DBNAME. psql exited with return code $?. Aborting." + exit 1 + ) +} + + +# Go through each of the table names, splitting the INSERT statements off +# into seperate files +for table in ${TABLES[@]}; do + SRC="$FILE.other" + if [ ! -f "$FILE.other" ]; then + SRC="$FILE" + fi + PATTERN="^INSERT INTO $table " + if [ ! -f "$FILE.data.$table" ]; then + echo -n "Separating $table table from database dump..." + + echo "BEGIN;" > "$FILE.data.$table.tmp" + grep "$PATTERN" "$SRC" >> "$FILE.data.$table.tmp" + echo "COMMIT;" >> "$FILE.data.$table.tmp" + + mv "$FILE.data.$table.tmp" "$FILE.data.$table" + echo "done. ($FILE.data.$table)" + echo -n "Stripping matched lines from the source file to speed up the next round..." + grep -v "$PATTERN" "$SRC" > "$FILE.other.tmp" + mv "$FILE.other.tmp" "$FILE.other" + echo "done." + else + echo "$FILE.data.$table already exists. Assuming this table has already been split" + echo "off from the main dump. Not regenerating." + fi +done + +echo "Seperating DDL statements from INSERT statements" + +grep -v "^INSERT" "$FILE.other" > "$FILE.ddl" +echo "DDL statements are now in $FILE.ddl" + +grep "^INSERT" "$FILE.other" > "$FILE.data.other" +echo "Any remaining INSERT statements are now in $FILE.data.other" + +echo "Fixing up datatypes used in the DDL..." + +sed -e 's/TINYINT/SMALLINT/g' \ + -e 's/DATETIME/TIMESTAMP/g' \ + -e 's/INTEGER UNSIGNED/INTEGER/g' \ + -e 's/BIGINT UNSIGNED/BIGINT/g' \ + -e 's/INTEGER AUTOINCREMENT/SERIAL/g' \ + -e s/\ DEFAULT\ \"\"/\ DEFAULT\ \'\'/g \ + -e s#\ TIMESTAMP\ DEFAULT\ 0#\ TIMESTAMP\ DEFAULT\ \'1/1/1970\'#g "$FILE.ddl" > "$FILE.ddl.postgres" + +echo "Fixing Pool table..." + +sed -e 's/,0,0);$/,NULL,NULL);/' "$FILE.data.Pool" > "$FILE.data.Pool.fixed" + +echo "Fixing removing entries from Job table which no longer have a Pool to link to" + +# Remove jobs which refer to nonexistent pools, and fix up invalid start and end times to be 1/1/1970 +grep -vE '([2589]|1[0-5]),[0-9]+,[0-9]+,[0-9]+\);' "$FILE.data.Job" \ + |sed -e s@^\\\(INSERT\ INTO\ Job\ VALUES\(\\\(\[^,\]\\\+,\\\)\\\{8\\\}\\\)0,@\\1NULL,@ \ + -e s@^\\\(INSERT\ INTO\ Job\ VALUES\(\\\(\[^,\]\\\+,\\\)\\\{9\\\}\\\)0,@\\1\NULL,@ \ + -e s@^\\\(INSERT\ INTO\ Job\ VALUES\(\\\(\[^,\]\\\+,\\\)\\\{17\\\}\\\)0,@\\1\NULL,@ \ + -e s@^\\\(INSERT\ INTO\ Job\ VALUES\(\\\(\[^,\]\\\+,\\\)\\\{18\\\}\\\)0,@\\1\NULL,@ \ + -e s@^\\\(INSERT\ INTO\ Job\ VALUES\(\\\(\[^,\]\\\+,\\\)\\\{5\\\}\\\)0,@\\1NULL,@ > "$FILE.data.Job.fixed" + +# Remove JobMedia entries which refer to nonexistent Jobs + +echo "Cleaning up the dump of the JobMedia table..." + +grep -vE 'INSERT INTO JobMedia VALUES\([0-9]+,([12589]|1[0-4]),' "$FILE.data.JobMedia" > "$FILE.data.JobMedia.fixed" + +# Remove File entries which refer to nonexistent Jobs + +echo "Cleaning up the dump of the File table..." + +grep -vE 'INSERT INTO File VALUES\([0-9]+,[0-9]+,([12589]|1[0-4]),' "$FILE.data.File" > "$FILE.data.File.fixed" + +echo "OK, we should be ready to import data into PostgreSQL now. DDL first..." +echo "This will probably fail the first time. You will have to edit $FILE.other" +echo "and rearrange the CREATE TABLE statements so that the tables are created" +echo "in the correct order." +echo "After editing $FILE.other, simply rerun this script and it will carry on" +echo "where it left off." + +importdata "$FILE.ddl.postgres" + +for table in ${TABLESINSERT[@]} other; do + IMPORTFILE="$FILE.data.$table" + if [ -f "$FILE.data.$table.fixed" ]; then + IMPORTFILE="$FILE.data.$table.fixed" + fi + importdata "$IMPORTFILE" 2>&1 |tee -a "$LOGFILE" +done + +echo "All done! Check $LOGFILE for errors." + diff --git a/examples/devices/DDS.conf b/examples/devices/DDS.conf new file mode 100644 index 00000000..e0bcd002 --- /dev/null +++ b/examples/devices/DDS.conf @@ -0,0 +1,10 @@ +# +# Device definition virtually any DDS drive +# +Device { + Name = "DDS-Drive" + Media Type = "4mm" + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = Yes; +} diff --git a/examples/devices/DDS4.conf b/examples/devices/DDS4.conf new file mode 100644 index 00000000..b2d6577c --- /dev/null +++ b/examples/devices/DDS4.conf @@ -0,0 +1,19 @@ +# +# A Linux or Solaris tape drive +# +Device { + Name = DDS-4 # + Media Type = DDS-4 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; +# Changer Command = "/opt/bacula/scripts/mtx-changer %c %o %S %a %d" +# Changer Device = /dev/sg0 +# AutoChanger = yes + # Enable the Alert command only if you have the mtx package loaded +# Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +# If you have smartctl, enable this, it has more info than tapeinfo +# Alert Command = "sh -c 'smartctl -H -l error %c'" +} diff --git a/examples/devices/EXB-8200.conf b/examples/devices/EXB-8200.conf new file mode 100644 index 00000000..08ed95b3 --- /dev/null +++ b/examples/devices/EXB-8200.conf @@ -0,0 +1,13 @@ +# +# Device definition for a VERY OLD Exabyte +# 8mm drive. It does not have a Hardware +# end of medium function. +# +Device { + Name = "Exabyte 8mm" + Media Type = "8mm" + Archive Device = /dev/nst1 + Hardware end of medium = No; + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = Yes; +} diff --git a/examples/devices/EXB-8500.conf b/examples/devices/EXB-8500.conf new file mode 100644 index 00000000..e557dda3 --- /dev/null +++ b/examples/devices/EXB-8500.conf @@ -0,0 +1,10 @@ +# +# Device definition for an 8mm drive. +# +Device { + Name = "Exabyte 8mm" + Media Type = "8mm" + Archive Device = /dev/nst1 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = Yes; +} diff --git a/examples/devices/Exabyte.conf b/examples/devices/Exabyte.conf new file mode 100644 index 00000000..4a53ca11 --- /dev/null +++ b/examples/devices/Exabyte.conf @@ -0,0 +1,15 @@ +# +# A very old Exabyte with no end of media detection +# +Device { + Name = "Exabyte 8mm" + Media Type = "8mm" + Archive Device = /dev/nst0 + Hardware end of medium = No; + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = Yes; + RemovableMedia = yes; + RandomAccess = no; + If you have smartctl, enable this, it has more info than tapeinfo + Alert Command = "sh -c 'smartctl -H -l error %c'" +} diff --git a/examples/devices/FreeBSD.conf b/examples/devices/FreeBSD.conf new file mode 100644 index 00000000..b545aabc --- /dev/null +++ b/examples/devices/FreeBSD.conf @@ -0,0 +1,16 @@ +Device { + Name = DDS-4 + Description = "DDS-4 for FreeBSD" + Media Type = DDS-4 + Archive Device = /dev/nsa1 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes + Offline On Unmount = no + Hardware End of Medium = no + BSF at EOM = yes + Backward Space Record = no + Fast Forward Space File = no + TWO EOF = yes + If you have smartctl, enable this, it has more info than tapeinfo + Alert Command = "sh -c 'smartctl -H -l error %c'" +} diff --git a/examples/devices/HP-DLT80.conf b/examples/devices/HP-DLT80.conf new file mode 100644 index 00000000..a194ea65 --- /dev/null +++ b/examples/devices/HP-DLT80.conf @@ -0,0 +1,12 @@ +# +# This works for the HP DLT8000 drive (it is +# actually manufactured by Quantum). +# Nothing special +# +Device { + Name = "HP DLT 80" + Media Type = DLT8000 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; +} diff --git a/examples/devices/HP-autoloader.conf b/examples/devices/HP-autoloader.conf new file mode 100644 index 00000000..367e843b --- /dev/null +++ b/examples/devices/HP-autoloader.conf @@ -0,0 +1,14 @@ +# +# This is the definition Kern uses for a +# HP Surestore DAT autoloader (dat 40x6e) +# +Device { + Name = "DAT-40x6e" + Media Type = DDS-4 + Archive Device = /dev/nst0 + Changer Device = /dev/sg0 + Changer Command = "/opt/bacula/scripts/mtx-changer %c %o %S %a" + AutoChanger = yes + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; +} diff --git a/examples/devices/LTO-2.conf b/examples/devices/LTO-2.conf new file mode 100644 index 00000000..d28e66cf --- /dev/null +++ b/examples/devices/LTO-2.conf @@ -0,0 +1,20 @@ +# +# A Linux or Solaris LTO-2 tape drive +# +Device { + Name = LTO-2 + Media Type = LTO-2 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Maximum File Size = 3GB +# Changer Command = "/opt/bacula/scripts/mtx-changer %c %o %S %a %d" +# Changer Device = /dev/sg0 +# AutoChanger = yes + # Enable the Alert command only if you have the mtx package loaded +# Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +# If you have smartctl, enable this, it has more info than tapeinfo +# Alert Command = "sh -c 'smartctl -H -l error %c'" +} diff --git a/examples/devices/LTO-3.conf b/examples/devices/LTO-3.conf new file mode 100644 index 00000000..6c68b21c --- /dev/null +++ b/examples/devices/LTO-3.conf @@ -0,0 +1,20 @@ +# +# A Linux or Solaris LTO-3 tape drive +# +Device { + Name = LTO-3 + Media Type = LTO-3 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Maximum File Size = 4GB +# Changer Command = "/opt/bacula/scripts/mtx-changer %c %o %S %a %d" +# Changer Device = /dev/sg0 +# AutoChanger = yes + # Enable the Alert command only if you have the mtx package loaded +# Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +# If you have smartctl, enable this, it has more info than tapeinfo +# Alert Command = "sh -c 'smartctl -H -l error %c'" +} diff --git a/examples/devices/LTO-4.conf b/examples/devices/LTO-4.conf new file mode 100644 index 00000000..28260ebb --- /dev/null +++ b/examples/devices/LTO-4.conf @@ -0,0 +1,20 @@ +# +# A Linux or Solaris LTO-4 tape drive +# +Device { + Name = LTO-4 + Media Type = LTO-4 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Maximum File Size = 5GB +# Changer Command = "/opt/bacula/scripts/mtx-changer %c %o %S %a %d" +# Changer Device = /dev/sg0 +# AutoChanger = yes + # Enable the Alert command only if you have the mtx package loaded +# Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +# If you have smartctl, enable this, it has more info than tapeinfo +# Alert Command = "sh -c 'smartctl -H -l error %c'" +} diff --git a/examples/devices/OnStream.conf b/examples/devices/OnStream.conf new file mode 100644 index 00000000..be225233 --- /dev/null +++ b/examples/devices/OnStream.conf @@ -0,0 +1,19 @@ +# +# A OnStream tape drive. +# You need the kernel osst driver 0.9.14 or later, and +# do "mt -f /dev/nosst0 defblksize 32768" once as root. +# +Device { + Name = OnStream + Description = "OnStream drive on Linux" + Media Type = OnStream + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes + Offline On Unmount = no +# The min/max blocksizes of 32768 are *required* + Minimum Block Size = 32768 + Maximum Block Size = 32768 + If you have smartctl, enable this, it has more info than tapeinfo + Alert Command = "sh -c 'smartctl -H -l error %c'" +} diff --git a/examples/devices/OpenBSD.conf b/examples/devices/OpenBSD.conf new file mode 100644 index 00000000..88b71c6d --- /dev/null +++ b/examples/devices/OpenBSD.conf @@ -0,0 +1,14 @@ +Device { + Name = DDS-3 + Media Type = DDS-3 + Archive Device = /dev/nrst0 + Use MTIOCGET= no + BSF at EOM = yes + TWO EOF = no + AutomaticMount = yes; + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + If you have smartctl, enable this, it has more info than tapeinfo + Alert Command = "sh -c 'smartctl -H -l error %c'" +} diff --git a/examples/devices/README b/examples/devices/README new file mode 100644 index 00000000..a28860d1 --- /dev/null +++ b/examples/devices/README @@ -0,0 +1,17 @@ + +This directory contains example Device resource +definitions that are known to work for various +tape drives. + +In general, you can set the Name record +and the Media Type to anything you want, but +they must correspond with what you have in your +Director's config file. + +The Archive Device must be changed to agree +with your physical device depending on the system +and the drive. + +There are many other records that you might want +to add as well. + diff --git a/examples/devices/Sony-DDS.conf b/examples/devices/Sony-DDS.conf new file mode 100644 index 00000000..e0bcd002 --- /dev/null +++ b/examples/devices/Sony-DDS.conf @@ -0,0 +1,10 @@ +# +# Device definition virtually any DDS drive +# +Device { + Name = "DDS-Drive" + Media Type = "4mm" + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = Yes; +} diff --git a/examples/devices/two-drive-autoloader.conf b/examples/devices/two-drive-autoloader.conf new file mode 100644 index 00000000..2358b7f7 --- /dev/null +++ b/examples/devices/two-drive-autoloader.conf @@ -0,0 +1,48 @@ +# +# An autochanger device with two drives +# +Autochanger { + Name = Autochanger + Device = Drive-1 + Device = Drive-2 + Changer Command = "/opt/bacula/scripts/mtx-changer %c %o %S %a %d" + Changer Device = /dev/sg0 +} + +Device { + Name = Drive-1 # + Drive Index = 0 + Media Type = DLT-8000 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + AutoChanger = yes + # + # Enable the Alert command only if you have the mtx package loaded + # Note, apparently on some systems, tapeinfo resets the SCSI controller + # thus if you turn this on, make sure it does not reset your SCSI + # controller. I have never had any problems, and smartctl does + # not seem to cause such problems. + # + Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" + If you have smartctl, enable this, it has more info than tapeinfo + Alert Command = "sh -c 'smartctl -H -l error %c'" +} + +Device { + Name = Drive-2 # + Drive Index = 1 + Media Type = DLT-8000 + Archive Device = /dev/nst1 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + AutoChanger = yes + # Enable the Alert command only if you have the mtx package loaded + Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" + If you have smartctl, enable this, it has more info than tapeinfo + Alert Command = "sh -c 'smartctl -H -l error %c'" +} diff --git a/examples/local_partitions b/examples/local_partitions new file mode 100755 index 00000000..39461462 --- /dev/null +++ b/examples/local_partitions @@ -0,0 +1,21 @@ +#!/bin/sh +# +# A script to extract the local, permanently mounted, real, filesystems. +# Tested on Solaris, Linux, IRIX +# +# Written by: Peter Eriksson 2004-12-13 +# + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +export PATH + +if [ -f /etc/fstab ]; then + awk '($1 ~ /^\/dev/ && $2 ~ /^\// && $4 !~ /noauto/) {print $2}' &2 + exit 1 +fi + +exit 0 diff --git a/examples/local_partitions.txt b/examples/local_partitions.txt new file mode 100644 index 00000000..cbcbfced --- /dev/null +++ b/examples/local_partitions.txt @@ -0,0 +1,8 @@ +Date: Mon, 13 Dec 2004 11:52:51 +0100 (MET) +From: Peter Eriksson +To: kern@sibbald.com +Subject: A script to extract local partitions to backup +Please find enclosed a small script that perhaps could be included +in Bacula as an example for how to use the \| feature in FileSets. + +See: local_partitions in this directory for the script. diff --git a/examples/nagios/check_bacula/Makefile.in b/examples/nagios/check_bacula/Makefile.in new file mode 100644 index 00000000..7cdc7a37 --- /dev/null +++ b/examples/nagios/check_bacula/Makefile.in @@ -0,0 +1,100 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +@MCOMMON@ + +srcdir = . +VPATH = . +.PATH: . + +# one up +basedir = ../../../src +# top dir +topdir = ../../.. +# this dir relative to top dir +thisdir = ../examples/nagios/check_bacula + +DEBUG=@DEBUG@ + +first_rule: all +dummy: + +# +CHECKSRCS = check_bacula.c authenticate.c +CHECKOBJS = check_bacula.o authenticate.o + +# these are the objects that are changed by the .configure process +EXTRAOBJS = @OBJLIST@ + +CHECK_CPPFLAGS= +CHECK_LDFLAGS= + +.SUFFIXES: .c .o +.PHONY: +.DONTCARE: + +# inference rules +.c.o: + @echo "Compiling $<" + $(NO_ECHO) $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) $(CHECK_CPPFLAGS) \ + -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< +#------------------------------------------------------------------------- +all: Makefile check_bacula + @echo "==== Make of check_bacula is good ====" + @echo " " + +check_bacula: Makefile $(CHECKOBJS) $(basedir)/lib/libbac$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) $(CHECK_LDFLAGS) -L$(basedir)/lib -o $@ \ + $(CHECKOBJS) $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + + +Makefile: $(srcdir)/Makefile.in $(topdir)/config.status + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + +libtool-clean: + $(RMF) -r .libs _libs + +clean: + @$(RMF) check_bacula core core.* a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 + +realclean: clean + @$(RMF) tags + +distclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +devclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +install: all + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) check_bacula $(DESTDIR)$(sbindir)/check_bacula + +uninstall: + (cd $(DESTDIR)$(sbindir); $(RMF) check_bacula) + + + +# Semi-automatic generation of dependencies: +# Use gcc -MM because X11 `makedepend' doesn't work on all systems +# and it also includes system headers. +# `semi'-automatic since dependencies are generated at distribution time. + +depend: + @$(MV) Makefile Makefile.bak + @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile + @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile + @$(CXX) -S -M $(CPPFLAGS) $(CHECK_CPPFLAGS) -I$(srcdir) -I$(basedir) *.c >> Makefile + @if test -f Makefile ; then \ + $(RMF) Makefile.bak; \ + else \ + $(MV) Makefile.bak Makefile; \ + echo -e "Something went wrong\n\a"; \ + fi + +# ----------------------------------------------------------------------- +# DO NOT DELETE: nice dependency list follows diff --git a/examples/nagios/check_bacula/README b/examples/nagios/check_bacula/README new file mode 100644 index 00000000..2914b0b4 --- /dev/null +++ b/examples/nagios/check_bacula/README @@ -0,0 +1,15 @@ +# It's more or less untested, though. +# Submitted by Arno Lehmann +# + +run ./configure with your usual options at the toplevel. Afterwards, use +'make' to create your binaries. + +If you encounter problems, you'll have to setup the Bacula source manually. + +Note that check_bacula does not support TLS and is an unsupported add-on to +Bacula. Even if it's not part of the core Bacula programs, questions can be +asked at the bacula-users mailing list. + +Bacula is a Trademark of Kern Sibbald. Bacula and the accompanying programs +are open source. See the LICENSE file for more information. diff --git a/examples/nagios/check_bacula/authenticate.c b/examples/nagios/check_bacula/authenticate.c new file mode 100644 index 00000000..691b5cf1 --- /dev/null +++ b/examples/nagios/check_bacula/authenticate.c @@ -0,0 +1,178 @@ +/* + * + * Bacula authentication. Provides authentication with + * File and Storage daemons. + * + * Nicolas Boichat, August MMIV + * + * This routine runs as a thread and must be thread reentrant. + * + * Basic tasks done here: + * + */ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2004-2014 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation plus additions + that are listed in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of John Walker. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ + +#include "bacula.h" +#include "check_bacula.h" + +void senditf(const char *fmt, ...); +void sendit(const char *buf); + +/* Commands sent to Director */ +static char DIRhello[] = "Hello %s calling\n"; + +/* Response from Director */ +static char DIROKhello[] = "1000 OK:"; + +/* Commands sent to Storage daemon */ +static char SDhello[] = "Hello SD: Bacula Director %s calling\n"; + +/* Commands sent to File daemon */ +static char FDhello[] = "Hello Director %s calling\n"; + +/* Response from SD */ +static char SDOKhello[] = "3000 OK Hello"; +/* Response from FD */ +static char FDOKhello[] = "2000 OK Hello"; + +/* Forward referenced functions */ + +/* + * Authenticate Director + */ +int authenticate_director(BSOCK *dir, char *dirname, char *password) +{ + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; + char bashed_name[MAX_NAME_LENGTH]; + + bstrncpy(bashed_name, dirname, sizeof(bashed_name)); + bash_spaces(bashed_name); + + /* Timeout Hello after 5 mins */ + btimer_t *tid = start_bsock_timer(dir, 60 * 5); + dir->fsend(DIRhello, bashed_name); + + if (!cram_md5_respond(dir, password, &tls_remote_need, &compatible) || + !cram_md5_challenge(dir, password, tls_local_need, compatible)) { + stop_bsock_timer(tid); + return 0; + } + + Dmsg1(6, ">dird: %s", dir->msg); + if (dir->recv() <= 0) { + stop_bsock_timer(tid); + return 0; + } + Dmsg1(10, "msg); + stop_bsock_timer(tid); + if (strncmp(dir->msg, DIROKhello, sizeof(DIROKhello)-1) != 0) { + return 0; + } + return 1; +} + +/* + * Authenticate Storage daemon connection + */ +int authenticate_storage_daemon(BSOCK *sd, char *sdname, char* password) +{ + char dirname[MAX_NAME_LENGTH]; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; + + /* + * Send my name to the Storage daemon then do authentication + */ + bstrncpy(dirname, sdname, sizeof(dirname)); + bash_spaces(dirname); + /* Timeout Hello after 5 mins */ + btimer_t *tid = start_bsock_timer(sd, 60 * 5); + if (!sd->fsend(SDhello, dirname)) { + stop_bsock_timer(tid); + return 0; + } + if (!cram_md5_respond(sd, password, &tls_remote_need, &compatible) || + !cram_md5_challenge(sd, password, tls_local_need, compatible)) { + stop_bsock_timer(tid); + return 0; + } + Dmsg1(116, ">stored: %s", sd->msg); + if (sd->recv() <= 0) { + stop_bsock_timer(tid); + return 0; + } + Dmsg1(110, "msg); + stop_bsock_timer(tid); + if (strncmp(sd->msg, SDOKhello, strlen(SDOKhello)) != 0) { + return 0; + } + return 1; +} + +/* + * Authenticate File daemon connection + */ +int authenticate_file_daemon(BSOCK *fd, char *fdname, char *password) +{ + char dirname[MAX_NAME_LENGTH]; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; + + /* + * Send my name to the File daemon then do authentication + */ + bstrncpy(dirname, fdname, sizeof(dirname)); + bash_spaces(dirname); + /* Timeout Hello after 5 mins */ + btimer_t *tid = start_bsock_timer(fd, 60 * 5); + if (!fd->fsend(FDhello, dirname)) { + stop_bsock_timer(tid); + return 0; + } + if (!cram_md5_respond(fd, password, &tls_remote_need, &compatible) || + !cram_md5_challenge(fd, password, tls_local_need, compatible)) { + stop_bsock_timer(tid); + return 0; + } + Dmsg1(116, ">filed: %s", fd->msg); + if (fd->recv() <= 0) { + stop_bsock_timer(tid); + return 0; + } + Dmsg1(110, "msg); + stop_bsock_timer(tid); + if ((strncmp(fd->msg, FDOKhello, strlen(FDOKhello)) != 0)) { + return 0; + } + return 1; +} diff --git a/examples/nagios/check_bacula/check_bacula.c b/examples/nagios/check_bacula/check_bacula.c new file mode 100644 index 00000000..db0e22d2 --- /dev/null +++ b/examples/nagios/check_bacula/check_bacula.c @@ -0,0 +1,370 @@ +/* + * + * Nagios Plugin check_bacula + * + * Christian Masopust, (c)2005-2012 + */ + +/* + Copyright (C) 2005-2012 Christian Masopust + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, + MA 02111-1307, USA. + + */ + +#include "bacula.h" +#include "check_bacula.h" + +#define STATE_OK 0 +#define STATE_WARNING 1 +#define STATE_CRITICAL 2 +#define STATE_UNKNOWN 3 + + +/* Imported functions */ +int authenticate_director(BSOCK *s, char *dirname, char *password); +int authenticate_file_daemon(BSOCK *s, char *fdname, char *password); +int authenticate_storage_daemon(BSOCK *s, char* sdname, char *password); + +/* Forward referenced functions */ +void writecmd(monitoritem* item, const char* command); +int docmd(monitoritem* item, const char* command, char *answer); + +/* Static variables */ +static monitoritem mitem; + +/* Data received from DIR/FD/SD */ +static char OKqstatus[] = "%c000 OK .status\n"; + + + +static void usage() +{ + fprintf(stderr, _( +"Copyright (C) 2005 Christian Masopust\n" +"Written by Christian Masopust (2005)\n" +"\nVersion: " VERSION " (" BDATE ") %s %s %s\n\n" +"Usage: check_bacula [-d debug_level] -H host -D daemon -M name -P port\n" +" -H hostname where daemon runs\n" +" -D which daemon to check: dir|sd|fd\n" +" -M name of monitor (as in bacula-*.conf)\n" +" -K password for access to daemon\n" +" -P port where daemon listens\n" +" -dnn set debug level to nn\n" +" -? print this message.\n" +"\n"), HOST_OS, DISTNAME, DISTVER); +} + + +/********************************************************************* + * + * Main Bacula Tray Monitor -- User Interface Program + * + */ +int main(int argc, char *argv[]) +{ + int ch; + DIRRES s_dird; + CLIENT s_filed; + STORE s_stored; + + char host[250]; + char daemon[20]; + char monitorname[100]; + char pw[200]; + int port = 0; + + char answer[1024]; + int retcode = STATE_UNKNOWN; + + unsigned int i, j; + struct MD5Context md5c; + unsigned char signature[16]; + + + struct sigaction sigignore; + sigignore.sa_flags = 0; + sigignore.sa_handler = SIG_IGN; + sigfillset(&sigignore.sa_mask); + sigaction(SIGPIPE, &sigignore, NULL); + + strcpy (pw, ""); + + init_stack_dump(); + my_name_is(argc, argv, "check_bacula"); + textdomain("bacula"); + init_msg(NULL, NULL); + + while ((ch = getopt(argc, argv, "H:D:M:P:K:d:h?")) != -1) { + + switch (ch) { + + case 'H': + strcpy (host, optarg); + break; + + case 'D': + strcpy (daemon, optarg); + break; + + case 'M': + strcpy (monitorname, optarg); + break; + + case 'P': + port = atoi(optarg); + break; + + case 'K': + strcpy (pw, optarg); + break; + + case 'd': + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + break; + + case 'h': + case '?': + default: + usage(); + exit(1); + } + } + argc -= optind; + //argv += optind; + + if (argc) { + usage(); + exit(STATE_UNKNOWN); + } + + lmgr_init_thread(); + + char sig[100]; + MD5Init(&md5c); + MD5Update(&md5c, (unsigned char *) pw, strlen(pw)); + MD5Final(signature, &md5c); + for (i = j = 0; i < sizeof(signature); i++) { + sprintf(&sig[j], "%02x", signature[i]); + j += 2; + } + + + /* director ? */ + if (strcmp (daemon, "dir") == 0) { + + if (port != 0) + s_dird.DIRport = port; + else + s_dird.DIRport = 9101; + + s_dird.address = host; + s_dird.password = sig; + s_dird.hdr.name = monitorname; + + mitem.type = R_DIRECTOR; + mitem.resource = &s_dird; + mitem.D_sock = NULL; + + } else if (strcmp (daemon, "sd") == 0) { + + if (port != 0) + s_stored.SDport = port; + else + s_stored.SDport = 9103; + + s_stored.address = host; + s_stored.password = sig; + s_stored.hdr.name = monitorname; + + mitem.type = R_STORAGE; + mitem.resource = &s_stored; + mitem.D_sock = NULL; + + } else if (strcmp (daemon, "fd") == 0) { + + if (port != 0) + s_filed.FDport = port; + else + s_filed.FDport = 9102; + + s_filed.address = host; + s_filed.password = sig; + s_filed.hdr.name = monitorname; + + mitem.type = R_CLIENT; + mitem.resource = &s_filed; + mitem.D_sock = NULL; + + } else { + + usage(); + exit(1); + } + + + if (mitem.type == R_DIRECTOR) + retcode = docmd(&mitem, ".status dir current\n", answer); + else + retcode = docmd(&mitem, ".status current\n", answer); + + + if (mitem.D_sock) { + mitem.D_sock->signal(BNET_TERMINATE); /* send EOF */ + mitem.D_sock->close(); + } + + printf ("%s\n", answer); + return retcode; +} + + +static int authenticate_daemon(monitoritem* item) { + + DIRRES *d; + CLIENT *f; + STORE *s; + + switch (item->type) { + case R_DIRECTOR: + d = (DIRRES *)item->resource; + return authenticate_director(item->D_sock, d->hdr.name, d->password); + break; + case R_CLIENT: + f = (CLIENT *)item->resource; + return authenticate_file_daemon(item->D_sock, f->hdr.name, f->password); + break; + case R_STORAGE: + s = (STORE *)item->resource; + return authenticate_storage_daemon(item->D_sock, s->hdr.name, s->password); + break; + default: + printf("Error, currentitem is not a Client or a Storage..\n"); + return FALSE; + } +} + + + +int docmd(monitoritem* item, const char* command, char *answer) { + + int stat; + char num; + const char *dname; + + dname = ""; + + if (!item->D_sock) { + + DIRRES* dird; + CLIENT* filed; + STORE* stored; + + switch (item->type) { + case R_DIRECTOR: + dird = (DIRRES*)item->resource; + item->D_sock = new_bsock(); + item->D_sock->connect(NULL, 0, 0, 0, "Director daemon", dird->address, NULL, dird->DIRport, 0); + dname = "Director"; + break; + case R_CLIENT: + filed = (CLIENT*)item->resource; + item->D_sock = new_bsock(); + item->D_sock->connect(NULL, 0, 0, 0, "File daemon", filed->address, NULL, filed->FDport, 0); + dname = "FileDaemon"; + break; + case R_STORAGE: + stored = (STORE*)item->resource; + item->D_sock = new_bsock(); + item->D_sock->connect(NULL, 0, 0, 0, "Storage daemon", stored->address, NULL, stored->SDport, 0); + dname = "StorageDaemon"; + break; + default: + printf("Error, currentitem is not a Client, a Storage or a Director..\n"); + return STATE_UNKNOWN; + } + + if (item->D_sock == NULL) { + sprintf (answer, "BACULA CRITICAL - Cannot connect to %s!", dname); + return STATE_CRITICAL; + } + + if (!authenticate_daemon(item)) { + sprintf (answer, "BACULA CRITICAL - Cannot authenticate to %s: %s", dname, item->D_sock->msg); + item->D_sock = NULL; + return STATE_CRITICAL; + } + + } + + if (command[0] != 0) + writecmd(item, command); + + while(1) { + if ((stat = item->D_sock->recv()) >= 0) { + + /* welcome message of director */ + if ((item->type == R_DIRECTOR) && (strncmp(item->D_sock->msg, "Using ", 6) == 0)) + continue; + + if (sscanf(item->D_sock->msg, OKqstatus, &num) != 1) { + /* Error, couldn't find OK */ + sprintf (answer, "BACULA CRITICAL - %s Status: %s", dname, item->D_sock->msg); + return STATE_CRITICAL; + } else { + sprintf (answer, "BACULA OK - %s Status OK", dname); + return STATE_OK; + } + } + else if (stat == BNET_SIGNAL) { + if (item->D_sock->msglen == BNET_EOD) { + strcpy(answer, "BACULA WARNING - << EOD >>"); + return STATE_WARNING; + } + else if (item->D_sock->msglen == BNET_SUB_PROMPT) { + strcpy(answer, "BACULA WARNING - BNET_SUB_PROMPT signal received."); + return STATE_WARNING; + } + else if (item->D_sock->msglen == BNET_HEARTBEAT) { + item->D_sock->signal(BNET_HB_RESPONSE); + } + else { + sprintf(answer, "BACULA WARNING - Unexpected signal received : %s ", bnet_sig_to_ascii(item->D_sock->msglen)); + } + } + else { /* BNET_HARDEOF || BNET_ERROR */ + strcpy(answer, "BACULA CRITICAL - ERROR: BNET_HARDEOF or BNET_ERROR"); + item->D_sock = NULL; + return STATE_CRITICAL; + } + + if (item->D_sock->is_stop()) { + item->D_sock = NULL; + return STATE_WARNING; + } + } +} + +void writecmd(monitoritem* item, const char* command) { + if (item->D_sock) { + item->D_sock->msglen = strlen(command); + pm_strcpy(&item->D_sock->msg, command); + item->D_sock->send(); + } +} diff --git a/examples/nagios/check_bacula/check_bacula.h b/examples/nagios/check_bacula/check_bacula.h new file mode 100644 index 00000000..03811af5 --- /dev/null +++ b/examples/nagios/check_bacula/check_bacula.h @@ -0,0 +1,121 @@ +/* + * Includes specific to the tray monitor + * + * Nicolas Boichat, August MMIV + * + * Version $Id: tray-monitor.h,v 1.6 2004/08/25 12:20:01 nboichat Exp $ + */ +/* + Copyright (C) 2004 Kern Sibbald and John Walker + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; if not, write to the Free + Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, + MA 02111-1307, USA. + + */ + +/* + * Resource codes -- they must be sequential for indexing + */ +enum rescode { + R_MONITOR = 1001, + R_DIRECTOR, + R_CLIENT, + R_STORAGE, + R_FIRST = R_MONITOR, + R_LAST = R_STORAGE /* keep this updated */ +}; + + +/* + * Some resource attributes + */ +enum { + R_NAME = 1020, + R_ADDRESS, + R_PASSWORD, + R_TYPE, + R_BACKUP +}; + +/* Director */ +struct DIRRES { + RES hdr; + int DIRport; /* UA server port */ + char *address; /* UA server address */ + char *password; /* UA server password */ + int enable_ssl; /* Use SSL */ +}; + +/* + * Tray Monitor Resource + * + */ +struct MONITOR { + RES hdr; + int require_ssl; /* Require SSL for all connections */ + MSGS *messages; /* Daemon message handler */ + char *password; /* UA server password */ + utime_t RefreshInterval; /* Status refresh interval */ + utime_t FDConnectTimeout; /* timeout for connect in seconds */ + utime_t SDConnectTimeout; /* timeout in seconds */ +}; + + +/* + * Client Resource + * + */ +struct CLIENT { + RES hdr; + + int FDport; /* Where File daemon listens */ + char *address; + char *password; + int enable_ssl; /* Use SSL */ +}; + +/* + * Store Resource + * + */ +struct STORE { + RES hdr; + + int SDport; /* port where Directors connect */ + char *address; + char *password; + int enable_ssl; /* Use SSL */ +}; + + + +/* Define the Union of all the above + * resource structure definitions. + */ +union URES { + MONITOR res_monitor; + DIRRES res_dir; + CLIENT res_client; + STORE res_store; + RES hdr; +}; + + + +struct monitoritem { + rescode type; /* R_DIRECTOR, R_CLIENT or R_STORAGE */ + void* resource; /* DIRRES*, CLIENT* or STORE* */ + BSOCK *D_sock; +}; diff --git a/examples/nagios/check_bacula_pools.sh b/examples/nagios/check_bacula_pools.sh new file mode 100755 index 00000000..c745adb4 --- /dev/null +++ b/examples/nagios/check_bacula_pools.sh @@ -0,0 +1,159 @@ +#! /bin/sh +# Author : Ludovic Strappazon. l.strappazon@gmail.com +# Copyright 2004, Free Software Foundation Europe e.V. +# Any comment, advice or enhancement are welcome :-) + +PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin +MYSQL="/usr/bin/mysql -u bacula --password=mypassword" +TMP=/tmp +BACULA=/usr/local/bacula + +PROGNAME=`basename $0` +PROGPATH=`echo $0 | sed -e 's,[\\/][^\\/][^\\/]*$,,'` +STATUS="" + +. $PROGPATH/utils.sh + +print_usage() { + echo "Usage: $PROGNAME -P -M -w -c [-S]" +} + +print_help() { + echo "" + print_usage + echo "" + echo "This plugin checks the space available in the pool against the space required for the next scheduled backups" + echo "Example : $PROGNAME -P default -M LTO -w 20 -c 10 will check the default pool, return OK if (available space) > 1,20*(required space), WARNING if 1,20*(required space) > (available space) > 1,10*(required space), and CRITICAL else." + echo "" + echo "With the -S option, it will check the pool named Scratch and return WARNING instead of CRITICAL if the Scratch pool can save the situation." + echo "Example : $PROGNAME -P default -M LTO -w 20 -c 10 -S will check the default pool, return OK if (available space) > 1,20*(required space), WARNING if 1,20*(required space) > (available space) > 1,10*(required space) or if (available space in default and Scratch) > 1,10*(required space) > (available space in default), and CRITICAL else." + echo "" + echo "The evaluation of the space required is done by adding the biggest backups of the same level than the scheduled jobs" + echo "The available space is evaluated by the number of out of retention tapes and the average VolBytes of these Full tapes" + echo "" + echo "The Information Status are : \"Required, Available, Volume Errors\" and \"Will use Scratch pool\" if necessary." + echo "" + echo "I think this plugin should be used in passive mode, and ran by a RunAfterJob" + exit 3 +} + +NB_ARGS=$# +SCRATCH=0 +while getopts :P:M:w:c:hS OPTION +do + case $OPTION in + P) POOL="$OPTARG" + ;; + M) MEDIA_TYPE="$OPTARG" + ;; + S) SCRATCH=1 + ;; + w) WARNING="$OPTARG" + ;; + c) CRITICAL="$OPTARG" + ;; + h) print_help + exit 3 + ;; + *) print_usage + exit 3 + ;; + esac +done +shift $(($OPTIND - 1)) + +if [ "$NB_ARGS" -ne 8 -a "$NB_ARGS" -ne 9 ]; then + print_revision $PROGNAME 25/05/2005 + print_usage + exit 3 +fi + +LAST_CHECK=`ps -ef | grep check_ba[Cc]ula_pools.sh | awk {'print $5'} | uniq | wc -l` +if [ "$LAST_CHECK" -gt 1 ]; then + echo "The last check was not complete, you should increase the check_period." + exit 3 +fi + + NB_VOLUMES_OUT_OF_RETENTION=`$MYSQL << EOF +USE bacula +SELECT COUNT(MediaId) from Media, Pool where Media.PoolId=Pool.PoolId and Pool.Name="$POOL" AND LastWritten <> "0000-00-00 00:00:00" AND UNIX_TIMESTAMP()-UNIX_TIMESTAMP(LastWritten)>Media.VolRetention AND Inchanger = "1"; +EOF +` + NB_VOLUMES_OUT_OF_RETENTION=`echo $NB_VOLUMES_OUT_OF_RETENTION | cut -f 2 -d ' '` + +NB_VOLUMES_ERROR=`$MYSQL << EOF +USE bacula +SELECT COUNT(MediaId) from Media, Pool where Media.PoolId=Pool.PoolId and Pool.Name="$POOL" AND VolStatus="Error" AND Inchanger = "1"; +EOF +` +NB_VOLUMES_ERROR=`echo $NB_VOLUMES_ERROR | cut -f 2 -d ' '` + +AVERAGE_CAPA_VOLUME=`$MYSQL << EOF +USE bacula +SELECT SUM(VolBytes)/COUNT(MediaId) FROM Media where VolStatus="Full" AND MediaType="$MEDIA_TYPE"; +EOF +` +AVERAGE_CAPA_VOLUME=`echo $AVERAGE_CAPA_VOLUME | cut -f 2 -d ' ' | cut -f 1 -d '.'` + +CAPA_VOLUMES_APPEND=`$MYSQL << EOF +USE bacula +SELECT SUM("$AVERAGE_CAPA_VOLUME"-VolBytes) from Media, Pool where Media.PoolId=Pool.PoolId and Pool.Name="$POOL" AND (VolStatus = "Append" OR VolStatus = "Recycle" OR VolStatus = "Purge") AND Inchanger = "1" AND MediaType="$MEDIA_TYPE"; +EOF +` +CAPA_VOLUMES_APPEND=`echo $CAPA_VOLUMES_APPEND | cut -f 2 -d ' '` + +if [ $SCRATCH -eq 1 ] +then +CAPA_VOLUMES_SCRATCH=`$MYSQL << EOF +USE bacula +SELECT SUM("$AVERAGE_CAPA_VOLUME"-VolBytes) from Media, Pool where Media.PoolId=Pool.PoolId and Pool.Name="Scratch" AND VolStatus = "Append" AND Inchanger = "1" AND MediaType="$MEDIA_TYPE"; +EOF +` +CAPA_VOLUMES_SCRATCH=`echo $CAPA_VOLUMES_SCRATCH | cut -f 2 -d ' '` +else +CAPA_VOLUMES_SCRATCH=0 +fi + +echo "st +1 +q" | $BACULA/etc/bconsole | sed -n /Scheduled/,/Running/p | grep Backup | tr -s [:blank:] | tr '[:blank:]' '@' > ${TMP}/Scheduled.txt + +CAPA_REQUIRED=0 +for LINE in `cat ${TMP}/Scheduled.txt` +do + SCHEDULED_JOB=`echo $LINE | awk -F@ '{print $6}'` + LEVEL=`echo $LINE | awk -F@ '{print $1}' | cut -c 1` + +MAX_VOLUME_JOB_FOR_LEVEL=`$MYSQL << EOF +USE bacula +SELECT MAX(JobBytes) from Job, Pool where Level="$LEVEL" AND Job.Name="$SCHEDULED_JOB" AND Job.PoolId=Pool.PoolId AND Pool.Name="$POOL"; +EOF +` +MAX_VOLUME_JOB_FOR_LEVEL=`echo $MAX_VOLUME_JOB_FOR_LEVEL | cut -f 2 -d ' ' ` + +CAPA_REQUIRED=$[CAPA_REQUIRED+MAX_VOLUME_JOB_FOR_LEVEL] +done + +rm ${TMP}/Scheduled.txt + +CAPA_WARNING=`echo $[(WARNING+100)*CAPA_REQUIRED]/100 | bc | cut -f 1 -d '.'` +CAPA_CRITICAL=`echo $[(CRITICAL+100)*CAPA_REQUIRED]/100 | bc | cut -f 1 -d '.'` +CAPA_DISP=$[NB_VOLUMES_OUT_OF_RETENTION*AVERAGE_CAPA_VOLUME+CAPA_VOLUMES_APPEND] +CAPA_DISP_INCLUDING_SCRATCH=$[CAPA_DISP+CAPA_VOLUMES_SCRATCH] + +MESSAGE="Required : $[CAPA_REQUIRED/1000000000] Go, available : $[CAPA_DISP/1000000000] Go, Volumes Error : $NB_VOLUMES_ERROR" + +if [ "$CAPA_DISP" -gt $CAPA_WARNING ]; then + echo $MESSAGE + exit 0 +elif [ "$CAPA_DISP" -gt $CAPA_CRITICAL ];then + echo $MESSAGE + exit 1 +elif [ "$CAPA_DISP_INCLUDING_SCRATCH" -gt $CAPA_CRITICAL ];then + MESSAGE="${MESSAGE}. Will use Scratch Pool !" + echo $MESSAGE + exit 1 +else + exit 2 +fi +exit 3 diff --git a/examples/nagios/nagios.txt b/examples/nagios/nagios.txt new file mode 100644 index 00000000..03af1401 --- /dev/null +++ b/examples/nagios/nagios.txt @@ -0,0 +1,205 @@ +Subject: RE: [Bacula-users] monitoring bacula with Nagios +From: "Julian Hein" +To: + +Hi, + +> Anyway: I would really like to write such a check_bacula +> plugin. I just +> don't know what I need to implement to achive a successful +> authentication. And maybe to get some infos out. Like current +> number of +> jobs, runtime or so. + +We are checking bacula with Nagios in two ways: First we check all servers if the neccessary services are running, like the fd on all bacula clients (windows & linux), directors, sd, etc. And the second check is to look in baculas mysql database if there is a successful job for every host within the last 24 hours: + + +1. Check if the fd is running +============================= + +Services: +--------- + +# bacula-fd linux +check_command check_spezial_procs_by_ssh!2:!1:!bacula-fd + +# bacula-sd +check_command check_spezial_procs_by_ssh!2:!1:!bacula-sd + +# bacula-dir +check_command check_spezial_procs_by_ssh!2:!1:!bacula-dir + +# bacula-fd windows +check_command check_nt_service!bacula + +Commands: +--------- + +# check for services by name with ssh +define command { + command_name check_spezial_procs_by_ssh + command_line $USER1$/check_by_ssh -t 60 -H $HOSTADDRESS$ -C "/opt/nagios/libexec/check_procs -w $ARG1$ -c $ARG2$ -C $ARG3$" +} + +# check for the bacula-fd on windows with nsclient +define command { + command_name check_nt_service + command_line $USER1$/check_nt -H $HOSTADDRESS$ -p portno. -s password -v SERVICESTATE -l $ARG1$ +} + +2. Is there a successful job in the database +============================================ + +Services: +--------- + +# bacula jobs +check_command check_bacula_by_ssh!27!1!1 + +Commands: +--------- +The name of our backup jobs have to match the hostname in Nagios. So we can check on the backup server, for a job called $HOSTNAME$: + +define command { + command_name check_bacula_by_ssh + command_line $USER1$/check_by_ssh -t 60 -H my.backup.server -C "/opt/nagios/libexec/check_bacula.pl -H $ARG1$ -w $ +ARG2$ -c $ARG3$ -j $HOSTNAME$" +} + +check_bacula.pl: +---------------- + +#!/usr/bin/perl -w +use strict; +use POSIX; +use File::Basename; +use DBI; +use Getopt::Long; +use vars qw( + $opt_help + $opt_job + $opt_critical + $opt_warning + $opt_hours + $opt_usage + $opt_version + $out + $sql + $date_start + $date_stop + $state + $count + ); + +sub print_help(); +sub print_usage(); +sub get_now(); +sub get_date; + +my $progname = basename($0); + +my %ERRORS = ( 'UNKNOWN' => '-1', + 'OK' => '0', + 'WARNING' => '1', + 'CRITICAL' => '2'); + +Getopt::Long::Configure('bundling'); +GetOptions + ( + "c=s" => \$opt_critical, "critical=s" => \$opt_critical, + "w=s" => \$opt_warning, "warning=s" => \$opt_warning, + "H=s" => \$opt_hours, "hours=s" => \$opt_hours, + "j=s" => \$opt_job, "job=s" => \$opt_job, + "h" => \$opt_help, "help" => \$opt_help, + "usage" => \$opt_usage, + "V" => \$opt_version, "version" => \$opt_version + ) || die "Try '$progname --help' for more information.\n"; + +sub print_help() { +print "\n"; +print "PRINT HELP...\n"; +print "\n"; +} + +sub print_usage() { + print "PRINT USAGE...\n"; + print "\n"; +} + +sub get_now() { + my $now = defined $_[0] ? $_[0] : time; + my $out = strftime("%Y-%m-%d %X", localtime($now)); + return($out); +} + +sub get_date { + my $day = shift; + my $now = defined $_[0] ? $_[0] : time; + my $new = $now - ((60*60*1) * $day); + my $out = strftime("%Y-%m-%d %X", localtime($new)); + return ($out); +} + +if ($opt_help) { + print_help(); + exit $ERRORS{'UNKNOWN'}; +} + +if ($opt_usage) { + print_usage(); + exit $ERRORS{'UNKNOWN'}; +} + +if ($opt_version) { + print "$progname 0.0.1\n"; + exit $ERRORS{'UNKNOWN'}; +} + + +if ($opt_job && $opt_warning && $opt_critical) { + my $dsn = "DBI:mysql:database=bacula;host=localhost"; + my $dbh = DBI->connect( $dsn,'root','' ) or die "Error connecting to: '$dsn': $DBI::errstr\n"; + + if ($opt_hours) + { + $date_stop = get_date($opt_hours); + } + else + { + $date_stop = '1970-01-01 01:00:00'; + } + + $date_start = get_now(); + + $sql = "SELECT count(*) as 'count' from Job where (Name='$opt_job') and (JobStatus='T') and (EndTime <> '') and ((EndTime <= '$date_start') and (EndTime >= '$date_stop'));"; + + my $sth = $dbh->prepare($sql) or die "Error preparing statemment",$dbh->errstr; + $sth->execute; + + while (my @row = $sth->fetchrow_array()) { + ($count) = @row; + } +$state = 'OK'; +if ($count<$opt_warning) { $state='WARNING' } +if ($count<$opt_critical) { $state='CRITICAL' } + +print "Bacula $state: Found $count successfull jobs\n"; +exit $ERRORS{$state}; + $dbh->disconnect(); +} + else { + print_usage(); + } + +Well, this script is not really finished, but it works for us. Maybe it is helpful for you. If somebody makes enhancements, I would be happy to recieve a copy. + +cu, +Julian + +-- +Julian Hein NETWAYS GmbH +Managing Director Deutschherrnstr. 47a +Fon.0911/92885-0 D-90429 Nrnberg +Fax.0911/92885-31 +jhein@netways.de www.netways.de + diff --git a/examples/recover.pl b/examples/recover.pl new file mode 100755 index 00000000..1476f424 --- /dev/null +++ b/examples/recover.pl @@ -0,0 +1,2886 @@ +#!/usr/bin/perl -w + +=head1 NAME + +recover.pl - a script to provide an interface for restore files similar +to Legatto Networker's recover program. + +=cut + +use strict; +use Getopt::Std; +use DBI; +use Term::ReadKey; +use Term::ReadLine; +use Fcntl ':mode'; +use Time::ParseDate; +use Date::Format; +use Text::ParseWords; + +# Location of config file. +my $CONF_FILE = "$ENV{HOME}/.recoverrc"; +my $HIST_FILE = "$ENV{HOME}/.recover.hist"; + +######################################################################## +### Queries needed to gather files from directory. +######################################################################## + +my %queries = ( + 'postgres' => { + 'dir' => + "( + select + distinct on (name) + Filename.name, + Path.path, + File.lstat, + File.fileid, + File.fileindex, + Job.jobtdate - ? as visible, + Job.jobid + from + Path, + File, + Filename, + Job + where + clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + Path.path = ? and + File.pathid = Path.pathid and + Filename.filenameid = File.filenameid and + Filename.name != '' and + File.jobid = Job.jobid + order by + name, + jobid desc + ) + union + ( + select + distinct on (name) + substring(Path.path from ? + 1) as name, + substring(Path.path from 1 for ?) as path, + File.lstat, + File.fileid, + File.fileindex, + Job.jobtdate - ? as visible, + Job.jobid + from + Path, + File, + Filename, + Job + where + clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + File.jobid = Job.jobid and + Filename.name = '' and + Filename.filenameid = File.filenameid and + File.pathid = Path.pathid and + Path.path ~ ('^' || ? || '[^/]*/\$') + order by + name, + jobid desc + ) + order by + name + ", + 'sel' => + "( + select + distinct on (name) + Path.path || Filename.name as name, + File.fileid, + File.lstat, + File.fileindex, + Job.jobid + from + Path, + File, + Filename, + Job + where + clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + Job.jobtdate >= ? and + Path.path like ? || '%' and + File.pathid = Path.pathid and + Filename.filenameid = File.filenameid and + Filename.name != '' and + File.jobid = Job.jobid + order by + name, jobid desc + ) + union + ( + select + distinct on (name) + Path.path as name, + File.fileid, + File.lstat, + File.fileindex, + Job.jobid + from + Path, + File, + Filename, + Job + where + clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + Job.jobtdate >= ? and + File.jobid = Job.jobid and + Filename.name = '' and + Filename.filenameid = File.filenameid and + File.pathid = Path.pathid and + Path.path like ? || '%' + order by + name, jobid desc + ) + ", + 'cache' => + "select + distinct on (path, name) + Path.path, + Filename.name, + File.fileid, + File.lstat, + File.fileindex, + Job.jobtdate - ? as visible, + Job.jobid + from + Path, + File, + Filename, + Job + where + clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + Job.jobtdate >= ? and + File.pathid = Path.pathid and + File.filenameid = Filename.filenameid and + File.jobid = Job.jobid + order by + path, name, jobid desc + ", + 'ver' => + "select + Path.path, + Filename.name, + File.fileid, + File.fileindex, + File.lstat, + Job.jobtdate, + Job.jobid, + Job.jobtdate - ? as visible, + Media.volumename + from + Job, Path, Filename, File, JobMedia, Media + where + File.pathid = Path.pathid and + File.filenameid = Filename.filenameid and + File.jobid = Job.jobid and + File.Jobid = JobMedia.jobid and + File.fileindex >= JobMedia.firstindex and + File.fileindex <= JobMedia.lastindex and + Job.jobtdate <= ? and + JobMedia.mediaid = Media.mediaid and + Path.path = ? and + Filename.name = ? and + Job.clientid = ? and + Job.name = ? + order by job + " + }, + 'mysql' => { + 'dir' => + " + ( + select + distinct(Filename.name), + Path.path, + File.lstat, + File.fileid, + File.fileindex, + Job.jobtdate - ? as visible, + Job.jobid + from + Path, + File, + Filename, + Job + where + clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + Path.path = ? and + File.pathid = Path.pathid and + Filename.filenameid = File.filenameid and + Filename.name != '' and + File.jobid = Job.jobid + group by + name + order by + name, + jobid desc + ) + union + ( + select + distinct(substring(Path.path from ? + 1)) as name, + substring(Path.path from 1 for ?) as path, + File.lstat, + File.fileid, + File.fileindex, + Job.jobtdate - ? as visible, + Job.jobid + from + Path, + File, + Filename, + Job + where + clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + File.jobid = Job.jobid and + Filename.name = '' and + Filename.filenameid = File.filenameid and + File.pathid = Path.pathid and + Path.path rlike concat('^', ?, '[^/]*/\$') + group by + name + order by + name, + jobid desc + ) + order by + name + ", + 'sel' => + " + ( + select + distinct(concat(Path.path, Filename.name)) as name, + File.fileid, + File.lstat, + File.fileindex, + Job.jobid + from + Path, + File, + Filename, + Job + where + Job.clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + Job.jobtdate >= ? and + Path.path like concat(?, '%') and + File.pathid = Path.pathid and + Filename.filenameid = File.filenameid and + Filename.name != '' and + File.jobid = Job.jobid + group by + path, name + order by + name, + jobid desc + ) + union + ( + select + distinct(Path.path) as name, + File.fileid, + File.lstat, + File.fileindex, + Job.jobid + from + Path, + File, + Filename, + Job + where + Job.clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + Job.jobtdate >= ? and + File.jobid = Job.jobid and + Filename.name = '' and + Filename.filenameid = File.filenameid and + File.pathid = Path.pathid and + Path.path like concat(?, '%') + group by + path + order by + name, + jobid desc + ) + ", + 'cache' => + "select + distinct path, + Filename.name, + File.fileid, + File.lstat, + File.fileindex, + Job.jobtdate - ? as visible, + Job.jobid + from + Path, + File, + Filename, + Job + where + clientid = ? and + Job.name = ? and + Job.jobtdate <= ? and + Job.jobtdate >= ? and + File.pathid = Path.pathid and + File.filenameid = Filename.filenameid and + File.jobid = Job.jobid + group by + path, name + order by + path, name, jobid desc + ", + 'ver' => + "select + Path.path, + Filename.name, + File.fileid, + File.fileindex, + File.lstat, + Job.jobtdate, + Job.jobid, + Job.jobtdate - ? as visible, + Media.volumename + from + Job, Path, Filename, File, JobMedia, Media + where + File.pathid = Path.pathid and + File.filenameid = Filename.filenameid and + File.jobid = Job.jobid and + File.Jobid = JobMedia.jobid and + File.fileindex >= JobMedia.firstindex and + File.fileindex <= JobMedia.lastindex and + Job.jobtdate <= ? and + JobMedia.mediaid = Media.mediaid and + Path.path = ? and + Filename.name = ? and + Job.clientid = ? and + Job.name = ? + order by job + " + } +); + +############################################################################ +### Command lists for help and file completion +############################################################################ + +my %COMMANDS = ( + 'add' => '(add files) - Add files recursively to restore list', + 'bootstrap' => 'print bootstrap file', + 'cd' => '(cd dir) - Change working directory', + 'changetime', '(changetime date/time) - Change database view to date', + 'client' => '(client client-name) - change client to view', + 'debug' => 'toggle debug flag', + 'delete' => 'Remove files from restore list.', + 'help' => 'Display this list', + 'history', 'Print command history', + 'info', '(info files) - Print stat and tape information about files', + 'ls' => '(ls [opts] files) - List files in current directory', + 'pwd' => 'Print current working directory', + 'quit' => 'Exit program', + 'recover', 'Create table for bconsole to use in recover', + 'relocate', '(relocate dir) - specify new location for recovered files', + 'show', '(show item) - Display information about item', + 'verbose' => 'toggle verbose flag', + 'versions', '(versions files) - Show all versions of file on tape', + 'volumes', 'Show volumes needed for restore.' +); + +my %SHOW = ( + 'cache' => 'Display cached directories', + 'catalog' => 'Display name of current catalog from config file', + 'client' => 'Display current client', + 'clients' => 'Display clients available in this catalog', + 'restore' => 'Display information about pending restore', + 'volumes' => 'Show volumes needed for restore.' +); + +############################################################################## +### Read config and command line. +############################################################################## + +my %catalogs; +my $catalog; # Current catalog + +## Globals + +my %restore; +my $rnum = 0; +my $rbytes = 0; +my $debug = 0; +my $verbose = 0; +my $rtime; +my $cwd; +my $lwd; +my $files; +my $restore_to = '/'; +my $start_dir; +my $preload; +my $dircache = {}; +my $usecache = 1; + +=head1 SYNTAX + +B [B<-b> I] [B<-c> I B<-j> I] +[B<-i> I] [B<-p>] [B<-t> I] + +B [B<-h>] + +Most of the command line arguments can be specified in the init file +B<$HOME/.recoverrc> (see CONFIG FILE FORMAT below). The command +line arguments will override the options in the init file. If no +I is specified, the first one found in the init file will +be used. + +=head1 DESCRIPTION + +B will read the specified catalog and provide a shell like +environment from which a time based view of the specified client/jobname +and be exampled and selected for restoration. + +The command line option B<-b> specified the DBI compatible connect +script to use when connecting to the catalog database. The B<-c> and +B<-j> options specify the client and jobname respectively to view from +the catalog database. The B<-i> option will set the initial directory +you are viewing to the specified directory. if B<-i> is not specified, +it will default to /. You can set the initial time to view the catalog +from using the B<-t> option. + +The B<-p> option will pre-load the entire catalog into memory. This +could take a lot of memory, so use it with caution. + +The B<-d> option turns on debugging and the B<-v> option turns on +verbose output. + +By specifying a I, the default options for connecting to +the catalog database will be taken from the section of the init file +specified by that name. + +The B<-h> option will display this document. + +In order for this program to have a chance of not being painfully slow, +the following indexs should be added to your database. + +B + +B + +=cut + +my $vars = {}; +getopts("c:b:hi:j:pt:vd", $vars) || die "Usage: bad arguments\n"; + +if ($vars->{'h'}) { + system("perldoc $0"); + exit; +} + +$preload = $vars->{'p'} if ($vars->{'p'}); +$debug = $vars->{'d'} if ($vars->{'d'}); +$verbose = $vars->{'v'} if ($vars->{'v'}); + +# Set initial time to view the catalog + +if ($vars->{'t'}) { + $rtime = parsedate($vars->{'t'}, FUZZY => 1, PREFER_PAST => 1); +} +else { + $rtime = time(); +} + +my $dbconnect; +my $username = ""; +my $password = ""; +my $db; +my $client; +my $jobname; +my $jobs; +my $ftime; + +my $cstr; + +# Read config file (if available). + +&read_config($CONF_FILE); + +# Set defaults + +$catalog = $ARGV[0] if (@ARGV); + +if ($catalog) { + $cstr = ${catalogs{$catalog}}->{'client'} + if (${catalogs{$catalog}}->{'client'}); + + $jobname = $catalogs{$catalog}->{'jobname'} + if ($catalogs{$catalog}->{'jobname'}); + + $dbconnect = $catalogs{$catalog}->{'dbconnect'} + if ($catalogs{$catalog}->{'dbconnect'}); + + $username = $catalogs{$catalog}->{'username'} + if ($catalogs{$catalog}->{'username'}); + + $password = $catalogs{$catalog}->{'password'} + if ($catalogs{$catalog}->{'password'}); + + $start_dir = $catalogs{$catalog}->{'cd'} + if ($catalogs{$catalog}->{'cd'}); + + $preload = $catalogs{$catalog}->{'preload'} + if ($catalogs{$catalog}->{'preload'} && !defined($vars->{'p'})); + + $verbose = $catalogs{$catalog}->{'verbose'} + if ($catalogs{$catalog}->{'verbose'} && !defined($vars->{'v'})); + + $debug = $catalogs{$catalog}->{'debug'} + if ($catalogs{$catalog}->{'debug'} && !defined($vars->{'d'})); +} + +#### Command line overries config file + +$start_dir = $vars->{'i'} if ($vars->{'i'}); +$start_dir = '/' if (!$start_dir); + +$start_dir .= '/' if (substr($start_dir, length($start_dir) - 1, 1) ne '/'); + +if ($vars->{'b'}) { + $dbconnect = $vars->{'b'}; +} + +die "You must supply a db connect string.\n" if (!defined($dbconnect)); + +if ($dbconnect =~ /^dbi:Pg/) { + $db = 'postgres'; +} +elsif ($dbconnect =~ /^dbi:mysql/) { + $db = 'mysql'; +} +else { + die "Unknown database type specified in $dbconnect\n"; +} + +# Initialize database connection + +print STDERR "DBG: Connect using: $dbconnect\n" if ($debug); + +my $dbh = DBI->connect($dbconnect, $username, $password) || + die "Can't open bacula database\nDatabase connect string '$dbconnect'"; + +die "Client id required.\n" if (!($cstr || $vars->{'c'})); + +$cstr = $vars->{'c'} if ($vars->{'c'}); +$client = &lookup_client($cstr); + +# Set job information +$jobname = $vars->{'j'} if ($vars->{'j'}); + +die "You need to specify a job name.\n" if (!$jobname); + +&setjob; + +die "Failed to set client\n" if (!$client); + +# Prepare our query +my $dir_sth = $dbh->prepare($queries{$db}->{'dir'}) + || die "Can't prepare $queries{$db}->{'dir'}\n"; + +my $sel_sth = $dbh->prepare($queries{$db}->{'sel'}) + || die "Can't prepare $queries{$db}->{'sel'}\n"; + +my $ver_sth = $dbh->prepare($queries{$db}->{'ver'}) + || die "Can't prepare $queries{$db}->{'ver'}\n"; + +my $clients; + +# Initialize readline. +my $term = new Term::ReadLine('Bacula Recover'); +$term->ornaments(0); + +my $readline = $term->ReadLine; +my $tty_attribs = $term->Attribs; + +# Needed for base64 decode + +my @base64_digits = ( + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', + 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/' +); +my @base64_map = (0) x 128; + +for (my $i=0; $i<64; $i++) { + $base64_map[ord($base64_digits[$i])] = $i; +} + +############################################################################## +### Support routines +############################################################################## + +=head1 FILES + +B<$HOME/.recoverrc> Configuration file for B. + +=head1 CONFIG FILE FORMAT + +The config file will allow you to specify the defaults for your +catalog(s). Each catalog definition starts with B<[>IB<]>. +Blank lines and lines starting with # are ignored. + +The first catalog specified will be used as the default catalog. + +All values are specified in I B<=> I format. You can +specify the following Is for each catalog. + +=cut + +sub read_config { + my $conf_file = shift; + my $c; + + # No nothing if config file can't be read. + + if (-r $conf_file) { + open(CONF, "<$conf_file") || die "$!: Can't open $conf_file\n"; + + while () { + chomp; + # Skip comments and blank links + next if (/^\s*#/); + next if (/^\s*$/); + + if (/^\[(\w+)\]$/) { + $c = $1; + $catalog = $c if (!$catalog); + + if ($catalogs{$c}) { + die "Duplicate catalog definition in $conf_file\n"; + } + + $catalogs{$c} = {}; + } + elsif (!$c) { + die "Conf file must start with catalog definition [catname]\n"; + } + else { + + if (/^(\w+)\s*=\s*(.*)/) { + my $item = $1; + my $value = $2; + +=head2 client + +The name of the default client to view when connecting to this +catalog. This can be changed later with the B command. + +=cut + + if ($item eq 'client') { + $catalogs{$c}->{'client'} = $value; + } + +=head2 dbconnect + +The DBI compatible database string to use to connect to this catalog. + +=over 4 + +=item B + +dbi:Pg:dbname=bacula;host=backuphost + +=back + +=cut + elsif ($item eq 'dbconnect') { + $catalogs{$c}->{'dbconnect'} = $value; + } + +=head2 jobname + +The name of the default job to view when connecting to the catalog. This +can be changed later with the B command. + +=cut + elsif ($item eq 'jobname') { + $catalogs{$c}->{'jobname'} = $value; + } + +=head2 password + +The password to use when connecing to the catalog database. + +=cut + elsif ($item eq 'password') { + $catalogs{$c}->{'password'} = $value; + } + +=head2 preload + +Set the preload flag. A preload flag of 1 or on will load the entire +catalog when recover.pl is start. This is a memory hog, so use with +caution. + +=cut + elsif ($item eq 'preload') { + + if ($value =~ /^(1|on)$/i) { + $catalogs{$c}->{'preload'} = 1; + } + elsif ($value =~ /^(0|off)$/i) { + $catalogs{$c}->{'preload'} = 0; + } + else { + die "$value: Unknown value for preload.\n"; + } + + } + +=head2 username + +The username to use when connecing to the catalog database. + +=cut + elsif ($item eq 'username') { + $catalogs{$c}->{'username'} = $value; + } + else { + die "Unknown opton $item in $conf_file.\n"; + } + + } + else { + die "Bad line $_ in $conf_file.\n"; + } + + } + + } + + close(CONF); + } + +} + +sub create_file_entry { + my $name = shift; + my $fileid = shift; + my $fileindex = shift; + my $jobid = shift; + my $visible = shift; + my $lstat = shift; + + print STDERR "DBG: name = $name\n" if ($debug); + print STDERR "DBG: fileid = $fileid\n" if ($debug); + print STDERR "DBG: fileindex = $fileindex\n" if ($debug); + print STDERR "DBG: jobid = $jobid\n" if ($debug); + print STDERR "DBG: visible = $visible\n" if ($debug); + print STDERR "DBG: lstat = $lstat\n" if ($debug); + + my $data = { + fileid => $fileid, + fileindex => $fileindex, + jobid => $jobid, + visible => ($visible >= 0) ? 1 : 0 + }; + + # decode file stat + my @stat = (); + + foreach my $s (split(' ', $lstat)) { + print STDERR "DBG: Add $s to stat array.\n" if ($debug); + push(@stat, from_base64($s)); + } + + $data->{'lstat'} = { + 'st_dev' => $stat[0], + 'st_ino' => $stat[1], + 'st_mode' => $stat[2], + 'st_nlink' => $stat[3], + 'st_uid' => $stat[4], + 'st_gid' => $stat[5], + 'st_rdev' => $stat[6], + 'st_size' => $stat[7], + 'st_blksize' => $stat[8], + 'st_blocks' => $stat[9], + 'st_atime' => $stat[10], + 'st_mtime' => $stat[11], + 'st_ctime' => $stat[12], + 'LinkFI' => $stat[13], + 'st_flags' => $stat[14], + 'data_stream' => $stat[15] + }; + + # Create mode string. + my $sstr = &mode2str($stat[2]); + $data->{'lstat'}->{'statstr'} = $sstr; + return $data; +} +# Read directory data, return hash reference. + +sub fetch_dir { + my $dir = shift; + + return $dircache->{$dir} if ($dircache->{$dir}); + + print "$dir not cached, fetching from database.\n" if ($verbose); + my $data = {}; + my $fmax = 0; + + my $dl = length($dir); + + print STDERR "? - 1: ftime = $ftime\n" if ($debug); + print STDERR "? - 2: client = $client\n" if ($debug); + print STDERR "? - 3: jobname = $jobname\n" if ($debug); + print STDERR "? - 4: rtime = $rtime\n" if ($debug); + print STDERR "? - 5: dir = $dir\n" if ($debug); + print STDERR "? - 6, 7: dl = $dl, $dl\n" if ($debug); + print STDERR "? - 8: ftime = $ftime\n" if ($debug); + print STDERR "? - 9: client = $client\n" if ($debug); + print STDERR "? - 10: jobname = $jobname\n" if ($debug); + print STDERR "? - 11: rtime = $rtime\n" if ($debug); + print STDERR "? - 12: dir = $dir\n" if ($debug); + + print STDERR "DBG: Execute - $queries{$db}->{'dir'}\n" if ($debug); + $dir_sth->execute( + $ftime, + $client, + $jobname, + $rtime, + $dir, + $dl, $dl, + $ftime, + $client, + $jobname, + $rtime, + $dir + ) || die "Can't execute $queries{$db}->{'dir'}\n"; + + while (my $ref = $dir_sth->fetchrow_hashref) { + my $file = $$ref{name}; + print STDERR "DBG: File $file found in database.\n" if ($debug); + my $l = length($file); + $fmax = $l if ($l > $fmax); + + $data->{$file} = &create_file_entry( + $file, + $ref->{'fileid'}, + $ref->{'fileindex'}, + $ref->{'jobid'}, + $ref->{'visible'}, + $ref->{'lstat'} + ); + } + + return undef if (!$fmax); + + $dircache->{$dir} = $data if ($usecache); + return $data; +} + +sub cache_catalog { + print "Loading entire catalog, please wait...\n"; + my $sth = $dbh->prepare($queries{$db}->{'cache'}) + || die "Can't prepare $queries{$db}->{'cache'}\n"; + print STDERR "DBG: Execute - $queries{$db}->{'cache'}\n" if ($debug); + $sth->execute($ftime, $client, $jobname, $rtime, $ftime) + || die "Can't execute $queries{$db}->{'cache'}\n"; + + print "Query complete, building catalog cache...\n" if ($verbose); + + while (my $ref = $sth->fetchrow_hashref) { + my $dir = $ref->{path}; + my $file = $ref->{name}; + print STDERR "DBG: File $dir$file found in database.\n" if ($debug); + + next if ($dir eq '/' and $file eq ''); # Skip data for / + + # Rearrange directory + + if ($file eq '' and $dir =~ m|(.*/)([^/]+/)$|) { + $dir = $1; + $file = $2; + } + + my $data = &create_file_entry( + $file, + $ref->{'fileid'}, + $ref->{'fileindex'}, + $ref->{'jobid'}, + $ref->{'visible'}, + $ref->{'lstat'} + ); + + $dircache->{$dir} = {} if (!$dircache->{$dir}); + $dircache->{$dir}->{$file} = $data; + } + + $sth->finish(); +} + +# Break a path up into dir and file. + +sub path_parts { + my $path = shift; + my $fqdir; + my $dir; + my $file; + + if (substr($path, 0, 1) eq '/') { + + # Find dir vs. file + if ($path =~ m|^(/.*/)([^/]*$)|) { + $fqdir = $dir = $1; + $file = $2; + } + else { # Must be in / + $fqdir = $dir = '/'; + $file = substr($path, 1); + } + + print STDERR "DBG: / Dir - $dir; file = $file\n" if ($debug); + } + # relative path + elsif ($path =~ m|^(.*/)([^/]*)$|) { + $fqdir = "$cwd$1"; + $dir = $1; + $file = $2; + print STDERR "DBG: Dir - $dir; file = $file\n" if ($debug); + } + # File is in our current directory. + else { + $fqdir = $cwd; + $dir = ''; + $file = $path; + print STDERR "DBG: Set dir to $dir\n" if ($debug); + } + + return ($fqdir, $dir, $file); +} + +sub lookup_client { + my $c = shift; + + if (!$clients) { + $clients = {}; + my $query = "select clientid, name from Client"; + my $sth = $dbh->prepare($query) || die "Can't prepare $query\n"; + $sth->execute || die "Can't execute $query\n"; + + while (my $ref = $sth->fetchrow_hashref) { + $clients->{$ref->{'name'}} = $ref->{'clientid'}; + } + + $sth->finish; + } + + if ($c !~ /^\d+$/) { + + if ($clients->{$c}) { + $c = $clients->{$c}; + } + else { + warn "Could not find client $c\n"; + $c = $client; + } + + } + + return $c; +} + +sub setjob { + + if (!$jobs) { + $jobs = {}; + my $query = "select distinct name from Job order by name"; + my $sth = $dbh->prepare($query) || die "Can't prepare $query\n"; + $sth->execute || die "Can't execute $query\n"; + + while (my $ref = $sth->fetchrow_hashref) { + $jobs->{$$ref{'name'}} = $$ref{'name'}; + } + + $sth->finish; + } + + my $query = "select + jobtdate + from + Job + where + jobtdate <= $rtime and + name = '$jobname' and + level = 'F' + order by jobtdate desc + limit 1 + "; + + my $sth = $dbh->prepare($query) || die "Can't prepare $query\n"; + $sth->execute || die "Can't execute $query\n"; + + if ($sth->rows == 1) { + my $ref = $sth->fetchrow_hashref; + $ftime = $$ref{jobtdate}; + } + else { + warn "Could not find full backup. Setting full time to 0.\n"; + $ftime = 0; + } + + $sth->finish; +} + +sub select_files { + my $mark = shift; + my $opts = shift; + my $dir = shift; + my @flist = @_; + + if (!@flist) { + + if ($cwd eq '/') { + my $finfo = &fetch_dir('/'); + @flist = keys %$finfo; + } + else { + @flist = ($cwd); + } + + } + + foreach my $f (@flist) { + $f =~ s|/+$||; + my $path = (substr($f, 0, 1) eq '/') ? $f : "$dir$f"; + my ($fqdir, $dir, $file) = &path_parts($path); + my $finfo = &fetch_dir($fqdir); + + if (!$finfo->{$file}) { + + if (!$finfo->{"$file/"}) { + warn "$f: File not found.\n"; + next; + } + + $file .= '/'; + } + + my $info = $finfo->{$file}; + + my $fid = $info->{'fileid'}; + my $fidx = $info->{'fileindex'}; + my $jid = $info->{'jobid'}; + my $size = $info->{'lstat'}->{'st_size'}; + + if ($opts->{'all'} || $info->{'visible'}) { + print STDERR "DBG: $file - $size bytes\n" + if ($debug); + + if ($mark) { + + if (!$restore{$fid}) { + print "Adding $fqdir$file\n" if (!$opts->{'quiet'}); + $restore{$fid} = [$jid, $fidx]; + $rnum++; + $rbytes += $size; + } + + } + else { + + if ($restore{$fid}) { + print "Removing $fqdir$file\n" if (!$opts->{'quiet'}); + delete $restore{$fid}; + $rnum--; + $rbytes -= $size; + } + + } + + if ($file =~ m|/$|) { + + # Use preloaded files if we already retrieved them. + if ($preload) { + my $newdir = "$dir$file"; + my $finfo = &fetch_dir($newdir); + &select_files($mark, $opts, $newdir, keys %$finfo); + next; + } + else { + my $newdir = "$fqdir$file"; + my $begin = ($opts->{'all'}) ? 0 : $ftime; + + print STDERR "DBG: Execute - $queries{$db}->{'sel'}\n" + if ($debug); + + $sel_sth->execute( + $client, + $jobname, + $rtime, + $begin, + $newdir, + $client, + $jobname, + $rtime, + $begin, + $newdir + ) || die "Can't execute $queries{$db}->{'sel'}\n"; + + while (my $ref = $sel_sth->fetchrow_hashref) { + my $file = $$ref{'name'}; + my $fid = $$ref{'fileid'}; + my $fidx = $$ref{'fileindex'}; + my $jid = $$ref{'jobid'}; + my @stat_enc = split(' ', $$ref{'lstat'}); + my $size = &from_base64($stat_enc[7]); + + if ($mark) { + + if (!$restore{$fid}) { + print "Adding $file\n" if (!$opts->{'quiet'}); + $restore{$fid} = [$jid, $fidx]; + $rnum++; + $rbytes += $size; + } + + } + else { + + if ($restore{$fid}) { + print "Removing $file\n" if (!$opts->{'quiet'}); + delete $restore{$fid}; + $rnum--; + $rbytes -= $size; + } + + } + + } + + } + + } + + } + + } + +} + +# Expand shell wildcards + +sub expand_files { + my $path = shift; + my ($fqdir, $dir, $file) = &path_parts($path); + my $finfo = &fetch_dir($fqdir); + return ($path) if (!$finfo); + + my $pat = "^$file\$"; + + # Add / for dir match + my $dpat = $file; + $dpat =~ s|/+$||; + $dpat = "^$dpat/\$"; + + my @match; + + $pat =~ s/\./\\./g; + $dpat =~ s/\./\\./g; + $pat =~ s/\?/./g; + $dpat =~ s/\?/./g; + $pat =~ s/\*/.*/g; + $dpat =~ s/\*/.*/g; + + foreach my $f (sort keys %$finfo) { + + if ($f =~ /$pat/) { + push (@match, ($fqdir eq $cwd) ? $f : "$fqdir$f"); + } + elsif ($f =~ /$dpat/) { + push (@match, ($fqdir eq $cwd) ? $f : "$fqdir$f"); + } + + } + + return ($path) if (!@match); + return @match; +} + +sub expand_dirs { + my $path = shift; + my ($fqdir, $dir, $file) = &path_parts($path, 1); + + print STDERR "Expand $path\n" if ($debug); + + my $finfo = &fetch_dir($fqdir); + return ($path) if (!$finfo); + + $file =~ s|/+$||; + + my $pat = "^$file/\$"; + my @match; + + $pat =~ s/\./\\./g; + $pat =~ s/\?/./g; + $pat =~ s/\*/.*/g; + + foreach my $f (sort keys %$finfo) { + print STDERR "Match $f to $pat\n" if ($debug); + push (@match, ($fqdir eq $cwd) ? $f : "$fqdir$f") if ($f =~ /$pat/); + } + + return ($path) if (!@match); + return @match; +} + +sub mode2str { + my $mode = shift; + my $sstr = ''; + + if (S_ISDIR($mode)) { + $sstr = 'd'; + } + elsif (S_ISCHR($mode)) { + $sstr = 'c'; + } + elsif (S_ISBLK($mode)) { + $sstr = 'b'; + } + elsif (S_ISREG($mode)) { + $sstr = '-'; + } + elsif (S_ISFIFO($mode)) { + $sstr = 'f'; + } + elsif (S_ISLNK($mode)) { + $sstr = 'l'; + } + elsif (S_ISSOCK($mode)) { + $sstr = 's'; + } + else { + $sstr = '?'; + } + + $sstr .= ($mode&S_IRUSR) ? 'r' : '-'; + $sstr .= ($mode&S_IWUSR) ? 'w' : '-'; + $sstr .= ($mode&S_IXUSR) ? + (($mode&S_ISUID) ? 's' : 'x') : + (($mode&S_ISUID) ? 'S' : '-'); + $sstr .= ($mode&S_IRGRP) ? 'r' : '-'; + $sstr .= ($mode&S_IWGRP) ? 'w' : '-'; + $sstr .= ($mode&S_IXGRP) ? + (($mode&S_ISGID) ? 's' : 'x') : + (($mode&S_ISGID) ? 'S' : '-'); + $sstr .= ($mode&S_IROTH) ? 'r' : '-'; + $sstr .= ($mode&S_IWOTH) ? 'w' : '-'; + $sstr .= ($mode&S_IXOTH) ? + (($mode&S_ISVTX) ? 't' : 'x') : + (($mode&S_ISVTX) ? 'T' : '-'); + + return $sstr; +} + +# Base 64 decoder +# Algorithm copied from bacula source + +sub from_base64 { + my $where = shift; + my $val = 0; + my $i = 0; + my $neg = 0; + + if (substr($where, 0, 1) eq '-') { + $neg = 1; + $where = substr($where, 1); + } + + while ($where ne '') { + $val <<= 6; + my $d = substr($where, 0, 1); + #print STDERR "\n$d - " . ord($d) . " - " . $base64_map[ord($d)] . "\n"; + $val += $base64_map[ord(substr($where, 0, 1))]; + $where = substr($where, 1); + } + + return $val; +} + +### Command completion code + +sub get_match { + my @m = @_; + my $r = ''; + + for (my $i = 0, my $matched = 1; $i < length($m[0]) && $matched; $i++) { + my $c = substr($m[0], $i, 1); + + for (my $j = 1; $j < @m; $j++) { + + if ($c ne substr($m[$j], $i, 1)) { + $matched = 0; + last; + } + + } + + $r .= $c if ($matched); + } + + return $r; +} + +sub complete { + my $text = shift; + my $line = shift; + my $start = shift; + my $end = shift; + + $tty_attribs->{'completion_append_character'} = ' '; + $tty_attribs->{completion_entry_function} = \&nocomplete; + print STDERR "\nDBG: text - $text; line - $line; start - $start; end = $end\n" + if ($debug); + + # Complete command if we are at start of line. + + if ($start == 0 || substr($line, 0, $start) =~ /^\s*$/) { + my @list = grep (/^$text/, sort keys %COMMANDS); + return () if (!@list); + my $match = (@list > 1) ? &get_match(@list) : ''; + return $match, @list; + } + else { + # Count arguments + my $cstr = $line; + $cstr =~ s/^\s+//; # Remove leading spaces + + my ($cmd, @args) = shellwords($cstr); + return () if (!defined($cmd)); + + # Complete dirs for cd + if ($cmd eq 'cd') { + return () if (@args > 1); + return &complete_files($text, 1); + } + # Complete files/dirs for info and ls + elsif ($cmd =~ /^(add|delete|info|ls|mark|unmark|versions)$/) { + return &complete_files($text, 0); + } + # Complete clients for client + elsif ($cmd eq 'client') { + return () if (@args > 2); + my $pat = $text; + $pat =~ s/\./\\./g; + my @flist; + + print STDERR "DBG: " . (@args) . " arguments found.\n" if ($debug); + + if (@args < 1 || (@args == 1 and $line =~ /[^\s]$/)) { + @flist = grep (/^$pat/, sort keys %$clients); + } + else { + @flist = grep (/^$pat/, sort keys %$jobs); + } + + return () if (!@flist); + my $match = (@flist > 1) ? &get_match(@flist) : ''; + + #return $match, map {s/ /\\ /g; $_} @flist; + return $match, @flist; + } + # Complete show options for show + elsif ($cmd eq 'show') { + return () if (@args > 1); + # attempt to suggest match. + my @list = grep (/^$text/, sort keys %SHOW); + return () if (!@list); + my $match = (@list > 1) ? &get_match(@list) : ''; + return $match, @list; + } + elsif ($cmd =~ /^(bsr|bootstrap|relocate)$/) { + $tty_attribs->{completion_entry_function} = + $tty_attribs->{filename_completion_function}; + } + + } + + return (); +} + +sub complete_files { + my $path = shift; + my $dironly = shift; + my $finfo; + my @flist; + + my ($fqdir, $dir, $pat) = &path_parts($path, 1); + + $pat =~ s/([.\[\]\\])/\\$1/g; + # First check for absolute name. + + $finfo = &fetch_dir($fqdir); + print STDERR "DBG: " . join(', ', keys %$finfo) . "\n" if ($debug); + return () if (!$finfo); # Nothing if dir not found. + + if ($dironly) { + @flist = grep (m|^$pat.*/$|, sort keys %$finfo); + } + else { + @flist = grep (/^$pat/, sort keys %$finfo); + } + + return undef if (!@flist); + + print STDERR "DBG: Files found\n" if ($debug); + + if (@flist == 1 && $flist[0] =~ m|/$|) { + $tty_attribs->{'completion_append_character'} = ''; + } + + @flist = map {s/ /\\ /g; ($fqdir eq $cwd) ? $_ : "$dir$_"} @flist; + my $match = (@flist > 1) ? &get_match(@flist) : ''; + + print STDERR "DBG: Dir - $dir; cwd - $cwd\n" if ($debug); + # Fill in dir if necessary. + return $match, @flist; +} + +sub nocomplete { + return (); +} + +# subroutine to create printf format for long listing of ls + +sub long_fmt { + my $flist = shift; + my $fmax = 0; + my $lmax = 0; + my $umax = 0; + my $gmax = 0; + my $smax = 0; + + foreach my $f (@$flist) { + my $file = $f->[0]; + my $info = $f->[1]; + my $lstat = $info->{'lstat'}; + + my $l = length($file); + $fmax = $l if ($l > $fmax); + + $l = length($lstat->{'st_nlink'}); + $lmax = $l if ($l > $lmax); + $l = length($lstat->{'st_uid'}); + $umax = $l if ($l > $umax); + $l = length($lstat->{'st_gid'}); + $gmax = $l if ($l > $gmax); + $l = length($lstat->{'st_size'}); + $smax = $l if ($l > $smax); + } + + return "%s %${lmax}d %${umax}d %${gmax}d %${smax}d %s %s\n"; +} + +sub print_by_cols { + my @list = @_; + my $l = @list; + my $w = $term->get_screen_size; + my @wds = (1); + my $m = $w/3 + 1; + my $max_cols = ($m < @list) ? $w : @list; + my $fpc = 1; + my $cols = 1; + + print STDERR "Need to print $l files\n" if ($debug); + + while($max_cols > 1) { + my $used = 0; + + # Initialize array of widths + @wds = 0 x $max_cols; + + for ($cols = 0; $cols < $max_cols && $used < $w; $cols++) { + my $cw = 0; + + for (my $j = $cols*$fpc; $j < ($cols + 1)*$fpc && $j < $l; $j++ ) { + my $fl = length($list[$j]->[0]); + $cw = $fl if ($fl > $cw); + } + + $wds[$cols] = $cw; + $used += $cw; + print STDERR "DBG: Total so far is $used\n" if ($debug); + + if ($used >= $w) { + $cols++; + last; + } + + $used += 3; + } + + print STDERR "DBG: $cols of $max_cols columns uses $used space.\n" + if ($debug); + + print STDERR "DBG: Print $fpc files per column\n" + if ($debug); + + last if ($used <= $w && $cols == $max_cols); + $fpc = int($l/$cols); + $fpc++ if ($l % $cols); + $max_cols = $cols - 1; + } + + if ($max_cols == 1) { + $cols = 1; + $fpc = $l; + } + + print STDERR "Print out $fpc rows with $cols columns\n" + if ($debug); + + for (my $i = 0; $i < $fpc; $i++) { + + for (my $j = $i; $j < $fpc*$cols; $j += $fpc) { + my $cw = $wds[($j - $i)/$fpc]; + my $fmt = "%s%-${cw}s"; + my $file; + my $r; + + if ($j < @list) { + $file = $list[$j]->[0]; + my $fdata = $list[$j]->[1]; + $r = ($restore{$fdata->{'fileid'}}) ? '+' : ' '; + } + else { + $file = ''; + $r = ' '; + } + + print ' ' if ($i != $j); + printf $fmt, $r, $file; + } + + print "\n"; + } + +} + +sub ls_date { + my $seconds = shift; + my $date; + + if (abs(time() - $seconds) > 15724800) { + $date = time2str('%b %e %Y', $seconds); + } + else { + $date = time2str('%b %e %R', $seconds); + } + + return $date; +} + +# subroutine to load entire bacula database. +=head1 SHELL + +Once running, B will present the user with a shell like +environment where file can be exampled and selected for recover. The +shell will provide command history and editing and if you have the +Gnu readline module installed on your system, it will also provide +command completion. When interacting with files, wildcards should work +as expected. + +The following commands are understood. + +=cut + +sub parse_command { + my $cstr = shift; + my @command; + my $cmd; + my @args; + + # Nop on blank or commented lines + return ('nop') if ($cstr =~ /^\s*$/); + return ('nop') if ($cstr =~ /^\s*#/); + + # Get rid of leading white space to make shellwords work better + $cstr =~ s/^\s*//; + + ($cmd, @args) = shellwords($cstr); + + if (!defined($cmd)) { + warn "Could not warse $cstr\n"; + return ('nop'); + } + +=head2 add [I] + +Mark I for recovery. If I is not specified, mark all +files in the current directory. B is an alias for this command. + +=cut + elsif ($cmd eq 'add' || $cmd eq 'mark') { + my $options = {}; + @ARGV = @args; + + # Parse ls options + my $vars = {}; + getopts("aq", $vars) || return ('error', 'Add: Usage add [-q|-a] files'); + $options->{'all'} = $vars->{'a'}; + $options->{'quiet'} =$vars->{'q'}; + + + @command = ('add', $options); + + foreach my $a (@ARGV) { + push(@command, &expand_files($a)); + } + + } + +=head2 bootstrap I + +Create a bootstrap file suitable for use with the bacula B +command. B is an alias for this command. + +=cut + elsif ($cmd eq 'bootstrap' || $cmd eq 'bsr') { + return ('error', 'bootstrap takes single argument (file to write to)') + if (@args != 1); + @command = ('bootstrap', $args[0]); + } + +=head2 cd I + +Allows you to set your current directory. This command understands . for +the current directory and .. for the parent. Also, cd - will change you +back to the previous directory you were in. + +=cut + elsif ($cmd eq 'cd') { + # Cd with no args goes to / + @args = ('/') if (!@args); + + if (@args != 1) { + return ('error', 'Bad cd. cd requires 1 and only 1 argument.'); + } + + my $todir = $args[0]; + + # cd - should cd to previous directory. It is handled later. + return ('cd', '-') if ($todir eq '-'); + + # Expand wilecards + my @e = expand_dirs($todir); + + if (@e > 1) { + return ('error', 'Bad cd. Wildcard expands to more than 1 dir.'); + } + + $todir = $e[0]; + + print STDERR "Initial target is $todir\n" if ($debug); + + # remove prepended . + + while ($todir =~ m|^\./(.*)|) { + $todir = $1; + $todir = '.' if (!$todir); + } + + # If only . is left, replace with current directory. + $todir = $cwd if ($todir eq '.'); + print STDERR "target after . processing is $todir\n" if ($debug); + + # Now deal with .. + my $prefix = $cwd; + + while ($todir =~ m|^\.\./(.*)|) { + $todir = $1; + print STDERR "DBG: ../ found, new todir - $todir\n" if ($debug); + $prefix =~ s|/[^/]*/$|/|; + } + + if ($todir eq '..') { + $prefix =~ s|/[^/]*/$|/|; + $todir = ''; + } + + print STDERR "target after .. processing is $todir\n" if ($debug); + print STDERR "DBG: Final prefix - $prefix\n" if ($debug); + + $todir = "$prefix$todir" if ($prefix ne $cwd); + + print STDERR "DBG: todir after .. handling - $todir\n" if ($debug); + + # Turn relative directories into absolute directories. + + if (substr($todir, 0, 1) ne '/') { + print STDERR "DBG: $todir has no leading /, prepend $cwd\n" if ($debug); + $todir = "$cwd$todir"; + } + + # Make sure we have a trailing / + + if (substr($todir, length($todir) - 1) ne '/') { + print STDERR "DBG: No trailing /, append /\n" if ($debug); + $todir .= '/'; + } + + @command = ('cd', $todir); + } + +=head2 changetime I + +This command changes the time used in generating the view of the +filesystem. Files that were backed up before the specified time +(optionally until the next full backup) will be the only files seen. + +The time can be specifed in almost any reasonable way. Here are a few +examples: + +=over 4 + +=item 1/1/2006 + +=item yesterday + +=item sunday + +=item 5 days ago + +=item last month + +=back + +=cut + elsif ($cmd eq 'changetime') { + @command = ($cmd, join(' ', @args)); + } + +=head2 client I I + +Specify the client and jobname to view. + +=cut + elsif ($cmd eq 'client') { + + if (@args != 2) { + return ('error', 'client takes a two arguments client-name job-name'); + } + + @command = ('client', @args); + } + +=head2 debug + +Toggle debug flag. + +=cut + elsif ($cmd eq 'debug') { + @command = ('debug'); + } + +=head2 delete [I] + +Un-mark file that were previous marked for recovery. If I is +not specified, mark all files in the current directory. B is an +alias for this command. + +=cut + elsif ($cmd eq 'delete' || $cmd eq 'unmark') { + @command = ('delete'); + + foreach my $a (@args) { + push(@command, &expand_files($a)); + } + + } + +=head2 help + +Show list of command with brief description of what they do. + +=cut + elsif ($cmd eq 'help') { + @command = ('help'); + } + +=head2 history + +Display command line history. B is an alias for this command. + +=cut + elsif ($cmd eq 'h' || $cmd eq 'history') { + @command = ('history'); + } + +=head2 info [I] + +Display information about the specified files. The format of the +information provided is reminiscent of the bootstrap file. + +=cut + elsif ($cmd eq 'info') { + push(@command, 'info'); + + foreach my $a (@args) { + push(@command, &expand_files($a)); + } + + } + +=head2 ls [I] + +This command will list the specified files (defaults to all files in +the current directory). Files are sorted alphabetically be default. It +understand the following options. + +=over 4 + +=item -a + +Causes ls to list files even if they are only on backups preceding the +closest full backup to the currently selected date/time. + +=item -l + +List files in long format (like unix ls command). + +=item -r + +reverse direction of sort. + +=item -S + +Sort files by size. + +=item -t + +Sort files by time + +=back + +=cut + elsif ($cmd eq 'ls' || $cmd eq 'dir' || $cmd eq 'll') { + my $options = {}; + @ARGV = @args; + + # Parse ls options + my $vars = {}; + getopts("altSr", $vars) || return ('error', 'Bad ls usage.'); + $options->{'all'} = $vars->{'a'}; + $options->{'long'} = $vars->{'l'}; + $options->{'long'} = 1 if ($cmd eq 'dir' || $cmd eq 'll'); + + $options->{'sort'} = 'time' if ($vars->{'t'}); + + return ('error', 'Only one sort at a time allowed.') + if ($options->{'sort'} && ($vars->{'S'})); + + $options->{'sort'} = 'size' if ($vars->{'S'}); + $options->{'sort'} = 'alpha' if (!$options->{'sort'}); + + $options->{'sort'} = 'r' . $options->{'sort'} if ($vars->{'r'}); + + @command = ('ls', $options); + + foreach my $a (@ARGV) { + push(@command, &expand_files($a)); + } + + } + +=head2 pwd + +Show current directory. + +=cut + elsif ($cmd eq 'pwd') { + @command = ('pwd'); + } + +=head2 quit + +Exit program. + +B, B and B are all aliases for this command. + +=cut + elsif ($cmd eq 'quit' || $cmd eq 'q' || $cmd eq 'exit' || $cmd eq 'x') { + @command = ('quit'); + } + +=head2 recover + +This command creates a table in the bacula catalog that case be used to +restore the selected files. It will also display the command to enter +into bconsole to start the restore. + +=cut + elsif ($cmd eq 'recover') { + @command = ('recover'); + } + +=head2 relocate I + +Specify the directory to restore files to. Defaults to /. + +=cut + elsif ($cmd eq 'relocate') { + return ('error', 'relocate required a single directory to relocate to') + if (@args != 1); + + my $todir = $args[0]; + $todir = `pwd` . $todir if (substr($todir, 0, 1) ne '/'); + @command = ('relocate', $todir); + } + +=head2 show I + +Show various information about B. The following items can be specified. + +=over 4 + +=item cache + +Display's a list of cached directories. + +=item catalog + +Displays the name of the catalog we are talking to. + +=item client + +Display current client and job named that are being viewed. + +=item restore + +Display the number of files and size to be restored. + +=item volumes + +Display the volumes that will be required to perform a restore on the +selected files. + +=back + +=cut + elsif ($cmd eq 'show') { + return ('error', 'show takes a single argument') if (@args != 1); + @command = ('show', $args[0]); + } + +=head2 verbose + +Toggle verbose flag. + +=cut + elsif ($cmd eq 'verbose') { + @command = ('verbose'); + } + +=head2 versions [I] + +View all version of specified files available from the current +time. B is an alias for this command. + +=cut + elsif ($cmd eq 'versions' || $cmd eq 'ver') { + push(@command, 'versions'); + + foreach my $a (@args) { + push(@command, &expand_files($a)); + } + + } + +=head2 volumes + +Display the volumes that will be required to perform a restore on the +selected files. + +=cut + elsif ($cmd eq 'volumes') { + @command = ('volumes'); + } + else { + @command = ('error', "$cmd: Unknown command"); + } + + return @command; +} + +############################################################################## +### Command processing +############################################################################## + +# Add files to restore list. + +sub cmd_add { + my $opts = shift; + my @flist = @_; + + my $save_rnum = $rnum; + &select_files(1, $opts, $cwd, @flist); + print "" . ($rnum - $save_rnum) . " files marked for restore\n"; +} + +sub cmd_bootstrap { + my $bsrfile = shift; + my %jobs; + my @media; + my %bootstrap; + + # Get list of job ids to restore from. + + foreach my $fid (keys %restore) { + $jobs{$restore{$fid}->[0]} = 1; + } + + my $jlist = join(', ', sort keys %jobs); + + if (!$jlist) { + print "Nothing to restore.\n"; + return; + } + + # Read in media info + + my $query = "select + Job.jobid, + volumename, + mediatype, + volsessionid, + volsessiontime, + firstindex, + lastindex, + startfile as volfile, + JobMedia.startblock, + JobMedia.endblock, + volindex + from + Job, + Media, + JobMedia + where + Job.jobid in ($jlist) and + Job.jobid = JobMedia.jobid and + JobMedia.mediaid = Media.mediaid + order by + volumename, + volsessionid, + volindex + "; + + my $sth = $dbh->prepare($query) || die "Can't prepare $query\n"; + $sth->execute || die "Can't execute $query\n"; + + while (my $ref = $sth->fetchrow_hashref) { + push(@media, { + 'jobid' => $ref->{'jobid'}, + 'volumename' => $ref->{'volumename'}, + 'mediatype' => $ref->{'mediatype'}, + 'volsessionid' => $ref->{'volsessionid'}, + 'volsessiontime' => $ref->{'volsessiontime'}, + 'firstindex' => $ref->{'firstindex'}, + 'lastindex' => $ref->{'lastindex'}, + 'volfile' => $ref->{'volfile'}, + 'startblock' => $ref->{'startblock'}, + 'endblock' => $ref->{'endblock'}, + 'volindex' => $ref->{'volindex'} + }); + } + +# Gather bootstrap info +# +# key - jobid.volumename.volumesession.volindex +# job +# name +# type +# session +# time +# file +# startblock +# endblock +# array of file indexes. + + for my $info (values %restore) { + my $jobid = $info->[0]; + my $fidx = $info->[1]; + + foreach my $m (@media) { + + if ($jobid == $m->{'jobid'} && $fidx >= $m->{'firstindex'} && $fidx <= $m->{'lastindex'}) { + my $key = "$jobid."; + $key .= "$m->{volumename}.$m->{volsessionid}.$m->{volindex}"; + + $bootstrap{$key} = { + 'job' => $jobid, + 'name' => $m->{'volumename'}, + 'type' => $m->{'mediatype'}, + 'session' => $m->{'volsessionid'}, + 'index' => $m->{'volindex'}, + 'time' => $m->{'volsessiontime'}, + 'file' => $m->{'volfile'}, + 'startblock' => $m->{'startblock'}, + 'endblock' => $m->{'endblock'} + } + if (!$bootstrap{$key}); + + $bootstrap{$key}->{'files'} = [] + if (!$bootstrap{$key}->{'files'}); + push(@{$bootstrap{$key}->{'files'}}, $fidx); + } + + } + + } + + # print bootstrap + + print STDERR "DBG: Keys = " . join(', ', keys %bootstrap) . "\n" + if ($debug); + + my @keys = sort { + return $bootstrap{$a}->{'time'} <=> $bootstrap{$b}->{'time'} + if ($bootstrap{$a}->{'time'} != $bootstrap{$b}->{'time'}); + return $bootstrap{$a}->{'name'} cmp $bootstrap{$b}->{'name'} + if ($bootstrap{$a}->{'name'} ne $bootstrap{$b}->{'name'}); + return $bootstrap{$a}->{'session'} <=> $bootstrap{$b}->{'session'} + if ($bootstrap{$a}->{'session'} != $bootstrap{$b}->{'session'}); + return $bootstrap{$a}->{'index'} <=> $bootstrap{$b}->{'index'}; + } keys %bootstrap; + + if (!open(BSR, ">$bsrfile")) { + warn "$bsrfile: $|\n"; + return; + } + + foreach my $key (@keys) { + my $info = $bootstrap{$key}; + print BSR "Volume=\"$info->{name}\"\n"; + print BSR "MediaType=\"$info->{type}\"\n"; + print BSR "VolSessionId=$info->{session}\n"; + print BSR "VolSessionTime=$info->{time}\n"; + print BSR "VolFile=$info->{file}\n"; + print BSR "VolBlock=$info->{startblock}-$info->{endblock}\n"; + + my @fids = sort { $a <=> $b} @{$bootstrap{$key}->{'files'}}; + my $first; + my $prev; + + for (my $i = 0; $i < @fids; $i++) { + $first = $fids[$i] if (!$first); + + if ($prev) { + + if ($fids[$i] != $prev + 1) { + print BSR "FileIndex=$first"; + print BSR "-$prev" if ($first != $prev); + print BSR "\n"; + $first = $fids[$i]; + } + + } + + $prev = $fids[$i]; + } + + print BSR "FileIndex=$first"; + print BSR "-$prev" if ($first != $prev); + print BSR "\n"; + print BSR "Count=" . (@fids) . "\n"; + } + + close(BSR); +} + +# Change directory + +sub cmd_cd { + my $dir = shift; + + my $save = $files; + + $dir = $lwd if ($dir eq '-' && defined($lwd)); + + if ($dir ne '-') { + $files = &fetch_dir($dir); + } + else { + warn "Previous director not defined.\n"; + } + + if ($files) { + $lwd = $cwd; + $cwd = $dir; + } + else { + print STDERR "Could not locate directory $dir\n"; + $files = $save; + } + + $cwd = '/' if (!$cwd); +} + +sub cmd_changetime { + my $tstr = shift; + + if (!$tstr) { + print "Time currently set to " . localtime($rtime) . "\n"; + return; + } + + my $newtime = parsedate($tstr, FUZZY => 1, PREFER_PAST => 1); + + if (defined($newtime)) { + print STDERR "Time evaluated to $newtime\n" if ($debug); + $rtime = $newtime; + print "Setting date/time to " . localtime($rtime) . "\n"; + &setjob; + + # Clean cache. + $dircache = {}; + &cache_catalog if ($preload); + + # Get directory based on new time. + $files = &fetch_dir($cwd); + } + else { + print STDERR "Could not parse $tstr as date/time\n"; + } + +} + +# Change client + +sub cmd_client { + my $c = shift; + $jobname = shift; # Set global job name + + # Lookup client id. + $client = &lookup_client($c); + + # Clear cache, we changed machines/jobs + $dircache = {}; + &cache_catalog if ($preload); + + # Find last full backup time. + &setjob; + + # Get current directory on new client. + $files = &fetch_dir($cwd); + + # Clear restore info + $rnum = 0; + $rbytes = 0; + %restore = (); +} + +sub cmd_debug { + $debug = 1 - $debug; +} + +sub cmd_delete { + my @flist = @_; + my $opts = {quiet=>1}; + + my $save_rnum = $rnum; + &select_files(0, $opts, $cwd, @flist); + print "" . ($save_rnum - $rnum) . " files un-marked for restore\n"; +} + +sub cmd_help { + + foreach my $h (sort keys %COMMANDS) { + printf "%-12s %s\n", $h, $COMMANDS{$h}; + } + +} + +sub cmd_history { + + foreach my $h ($term->GetHistory) { + print "$h\n"; + } + +} + +# Print catalog/tape info about files + +sub cmd_info { + my @flist = @_; + @flist = ($cwd) if (!@flist); + + foreach my $f (@flist) { + $f =~ s|/+$||; + my ($fqdir, $dir, $file) = &path_parts($f); + my $finfo = &fetch_dir($fqdir); + + if (!$finfo->{$file}) { + + if (!$finfo->{"$file/"}) { + warn "$f: File not found.\n"; + next; + } + + $file .= '/'; + } + + my $fileid = $finfo->{$file}->{fileid}; + my $fileindex = $finfo->{$file}->{fileindex}; + my $jobid = $finfo->{$file}->{jobid}; + + print "#$f -\n"; + print "#FileID : $finfo->{$file}->{fileid}\n"; + print "#JobID : $jobid\n"; + print "#Visible : $finfo->{$file}->{visible}\n"; + + my $query = "select + volumename, + mediatype, + volsessionid, + volsessiontime, + startfile, + JobMedia.startblock, + JobMedia.endblock + from + Job, + Media, + JobMedia + where + Job.jobid = $jobid and + Job.jobid = JobMedia.jobid and + $fileindex >= firstindex and + $fileindex <= lastindex and + JobMedia.mediaid = Media.mediaid + "; + + my $sth = $dbh->prepare($query) || die "Can't prepare $query\n"; + $sth->execute || die "Can't execute $query\n"; + + while (my $ref = $sth->fetchrow_hashref) { + print "Volume=\"$ref->{volumename}\"\n"; + print "MediaType=\"$ref->{mediatype}\"\n"; + print "VolSessionId=$ref->{volsessionid}\n"; + print "VolSessionTime=$ref->{volsessiontime}\n"; + print "VolFile=$ref->{startfile}\n"; + print "VolBlock=$ref->{startblock}-$ref->{endblock}\n"; + print "FileIndex=$finfo->{$file}->{fileindex}\n"; + print "Count=1\n"; + } + + $sth->finish; + } + +} + +# List files. + +sub cmd_ls { + my $opts = shift; + my @flist = @_; + my @keys; + + print STDERR "DBG: " . (@flist) . " files to list.\n" if ($debug); + + if (!@flist) { + @flist = keys %$files; + } + + # Sort files as specified. + + if ($opts->{sort} eq 'alpha') { + print STDERR "DBG: Sort by alpha\n" if ($debug); + @keys = sort @flist; + } + elsif ($opts->{sort} eq 'ralpha') { + print STDERR "DBG: Sort by reverse alpha\n" if ($debug); + @keys = sort {$b cmp $a} @flist; + } + elsif ($opts->{sort} eq 'time') { + print STDERR "DBG: Sort by time\n" if ($debug); + @keys = sort { + return $a cmp $b + if ($files->{$b}->{'lstat'}->{'st_mtime'} == + $files->{$a}->{'lstat'}->{'st_mtime'}); + $files->{$b}->{'lstat'}->{'st_mtime'} <=> + $files->{$a}->{'lstat'}->{'st_mtime'} + } @flist; + } + elsif ($opts->{sort} eq 'rtime') { + print STDERR "DBG: Sort by reverse time\n" if ($debug); + @keys = sort { + return $b cmp $a + if ($files->{$a}->{'lstat'}->{'st_mtime'} == + $files->{$b}->{'lstat'}->{'st_mtime'}); + $files->{$a}->{'lstat'}->{'st_mtime'} <=> + $files->{$b}->{'lstat'}->{'st_mtime'} + } @flist; + } + elsif ($opts->{sort} eq 'size') { + print STDERR "DBG: Sort by size\n" if ($debug); + @keys = sort { + return $a cmp $b + if ($files->{$a}->{'lstat'}->{'st_size'} == + $files->{$b}->{'lstat'}->{'st_size'}); + $files->{$b}->{'lstat'}->{'st_size'} <=> + $files->{$a}->{'lstat'}->{'st_size'} + } @flist; + } + elsif ($opts->{sort} eq 'rsize') { + print STDERR "DBG: Sort by reverse size\n" if ($debug); + @keys = sort { + return $b cmp $a + if ($files->{$a}->{'lstat'}->{'st_size'} == + $files->{$b}->{'lstat'}->{'st_size'}); + $files->{$a}->{'lstat'}->{'st_size'} <=> + $files->{$b}->{'lstat'}->{'st_size'} + } @flist; + } + else { + print STDERR "DBG: $opts->{sort}, no sort\n" if ($debug); + @keys = @flist; + } + + @flist = (); + + foreach my $f (@keys) { + print STDERR "DBG: list $f\n" if ($debug); + $f =~ s|/+$||; + my ($fqdir, $dir, $file) = &path_parts($f); + my $finfo = &fetch_dir($fqdir); + + if (!$finfo->{$file}) { + + if (!$finfo->{"$file/"}) { + warn "$f: File not found.\n"; + next; + } + + $file .= '/'; + } + + my $fdata = $finfo->{$file}; + + if ($opts->{'all'} || $fdata->{'visible'}) { + push(@flist, ["$dir$file", $fdata]); + } + + } + + if ($opts->{'long'}) { + my $lfmt = &long_fmt(\@flist) if ($opts->{'long'}); + + foreach my $f (@flist) { + my $file = $f->[0]; + my $fdata = $f->[1]; + my $r = ($restore{$fdata->{'fileid'}}) ? '+' : ' '; + my $lstat = $fdata->{'lstat'}; + + printf $lfmt, $lstat->{'statstr'}, $lstat->{'st_nlink'}, + $lstat->{'st_uid'}, $lstat->{'st_gid'}, $lstat->{'st_size'}, + ls_date($lstat->{'st_mtime'}), "$r$file"; + } + } + else { + &print_by_cols(@flist); + } + +} + +sub cmd_pwd { + print "$cwd\n"; +} + +# Create restore data for bconsole + +sub cmd_recover { + my $query = "create table recover (jobid int, fileindex int)"; + + $dbh->do($query) + || warn "Could not create recover table. Hope it's already there.\n"; + + if ($db eq 'postgres') { + $query = "COPY recover FROM STDIN"; + + $dbh->do($query) || die "Can't execute $query\n"; + + foreach my $finfo (values %restore) { + $dbh->pg_putline("$finfo->[0]\t$finfo->[1]\n"); + } + + $dbh->pg_endcopy; + } + else { + + foreach my $finfo (values %restore) { + $query = "insert into recover ( + 'jobid', 'fileindex' + ) + values ( + $finfo->[0], $finfo->[1] + )"; + $dbh->do($query) || die "Can't execute $query\n"; + } + + } + + $query = "GRANT all on recover to bacula"; + $dbh->do($query) || die "Can't execute $query\n"; + + $query = "select name from Client where clientid = $client"; + my $sth = $dbh->prepare($query) || die "Can't prepare $query\n"; + $sth->execute || die "Can't execute $query\n"; + + my $ref = $sth->fetchrow_hashref; + print "Restore prepared. Run bconsole and enter the following command\n"; + print "restore client=$$ref{name} where=$restore_to file=\?recover\n"; + $sth->finish; +} + +sub cmd_relocate { + $restore_to = shift; +} + +# Display information about recover's state + +sub cmd_show { + my $what = shift; + + if ($what eq 'clients') { + + foreach my $c (sort keys %$clients) { + print "$c\n"; + } + + } + elsif ($what eq 'catalog') { + print "$catalog\n"; + } + elsif ($what eq 'client') { + my $query = "select name from Client where clientid = $client"; + my $sth = $dbh->prepare($query) || die "Can't prepare $query\n"; + $sth->execute || die "Can't execute $query\n"; + + my $ref = $sth->fetchrow_hashref; + print "$$ref{name}; $jobname\n"; + $sth->finish; + } + elsif ($what eq 'cache') { + print "The following directories are cached\n"; + + foreach my $d (sort keys %$dircache) { + print "$d\n"; + } + + } + elsif ($what eq 'restore') { + print "There are $rnum files marked for restore.\n"; + + print STDERR "DBG: Bytes = $rbytes\n" if ($debug); + + if ($rbytes < 1024) { + print "The restore will require $rbytes bytes.\n"; + } + elsif ($rbytes < 1024*1024) { + my $rk = $rbytes/1024; + printf "The restore will require %.2f KB.\n", $rk; + } + elsif ($rbytes < 1024*1024*1024) { + my $rm = $rbytes/1024/1024; + printf "The restore will require %.2f MB.\n", $rm; + } + else { + my $rg = $rbytes/1024/1024/1024; + printf "The restore will require %.2f GB.\n", $rg; + } + + print "Restores will be placed in $restore_to\n"; + } + elsif ($what eq 'volumes') { + &cmd_volumes; + } + elsif ($what eq 'qinfo') { + my $dl = length($cwd); + print "? - 1: ftime = $ftime\n"; + print "? - 2: client = $client\n"; + print "? - 3: jobname = $jobname\n"; + print "? - 4: rtime = $rtime\n"; + print "? - 5: dir = $cwd\n"; + print "? - 6, 7: dl = $dl\n"; + print "? - 8: ftime = $ftime\n"; + print "? - 9: client = $client\n"; + print "? - 10: jobname = $jobname\n"; + print "? - 11: rtime = $rtime\n"; + print "? - 12: dir = $cwd\n"; + } + else { + warn "Don't know how to show $what\n"; + } + +} + +sub cmd_verbose { + $verbose = 1 - $verbose; +} + +sub cmd_versions { + my @flist = @_; + + @flist = ($cwd) if (!@flist); + + foreach my $f (@flist) { + my $path; + my $data = {}; + + print STDERR "DBG: Get versions for $f\n" if ($debug); + + $f =~ s|/+$||; + my ($fqdir, $dir, $file) = &path_parts($f); + my $finfo = &fetch_dir($fqdir); + + if (!$finfo->{$file}) { + + if (!$finfo->{"$file/"}) { + warn "$f: File not found.\n"; + next; + } + + $file .= '/'; + } + + if ($file =~ m|/$|) { + $path = "$fqdir$file"; + $file = ''; + } + else { + $path = $fqdir; + } + + print STDERR "DBG: Use $ftime, $path, $file, $client, $jobname\n" + if ($debug); + + $ver_sth->execute($ftime, $rtime, $path, $file, $client, $jobname) + || die "Can't execute $queries{$db}->{'ver'}\n"; + + # Gather stats + + while (my $ref = $ver_sth->fetchrow_hashref) { + my $f = "$ref->{name};$ref->{jobtdate}"; + $data->{$f} = &create_file_entry( + $f, + $ref->{'fileid'}, + $ref->{'fileindex'}, + $ref->{'jobid'}, + $ref->{'visible'}, + $ref->{'lstat'} + ); + + $data->{$f}->{'jobtdate'} = $ref->{'jobtdate'}; + $data->{$f}->{'volume'} = $ref->{'volumename'}; + } + + my @keys = sort { + $data->{$a}->{'jobtdate'} <=> + $data->{$b}->{'jobtdate'} + } keys %$data; + + my @list = (); + + foreach my $f (@keys) { + push(@list, [$file, $data->{$f}]); + } + + my $lfmt = &long_fmt(\@list); + print "\nVersions of \`$path$file' earlier than "; + print localtime($rtime) . ":\n\n"; + + foreach my $f (@keys) { + my $lstat = $data->{$f}->{'lstat'}; + printf $lfmt, $lstat->{'statstr'}, $lstat->{'st_nlink'}, + $lstat->{'st_uid'}, $lstat->{'st_gid'}, $lstat->{'st_size'}, + time2str('%c', $lstat->{'st_mtime'}), $file; + print "save time: " . localtime($data->{$f}->{'jobtdate'}) . "\n"; + print " location: $data->{$f}->{volume}\n\n"; + } + + } + +} + +# List volumes needed for restore. + +sub cmd_volumes { + my %media; + my @jobmedia; + my %volumes; + + # Get media. + my $query = "select mediaid, volumename from Media"; + my $sth = $dbh->prepare($query) || die "Can't prepare $query\n"; + + $sth->execute || die "Can't execute $query\n"; + + while (my $ref = $sth->fetchrow_hashref) { + $media{$$ref{'mediaid'}} = $$ref{'volumename'}; + } + + $sth->finish(); + + # Get media usage. + $query = "select mediaid, jobid, firstindex, lastindex from JobMedia"; + $sth = $dbh->prepare($query) || die "Can't prepare $query\n"; + + $sth->execute || die "Can't execute $query\n"; + + while (my $ref = $sth->fetchrow_hashref) { + push(@jobmedia, { + 'mediaid' => $$ref{'mediaid'}, + 'jobid' => $$ref{'jobid'}, + 'firstindex' => $$ref{'firstindex'}, + 'lastindex' => $$ref{'lastindex'} + }); + } + + $sth->finish(); + + # Find needed volumes + + foreach my $fileid (keys %restore) { + my ($jobid, $idx) = @{$restore{$fileid}}; + + foreach my $jm (@jobmedia) { + next if ($jm->{'jobid'}) != $jobid; + + if ($idx >= $jm->{'firstindex'} && $idx <= $jm->{'lastindex'}) { + $volumes{$media{$jm->{'mediaid'}}} = 1; + } + + } + + } + + print "The following volumes are needed for restore.\n"; + + foreach my $v (sort keys %volumes) { + print "$v\n"; + } + +} + +sub cmd_error { + my $msg = shift; + print STDERR "$msg\n"; +} + +############################################################################## +### Start of program +############################################################################## + +&cache_catalog if ($preload); + +print "Using $readline for command processing\n" if ($verbose); + +# Initialize command completion + +# Add binding for Perl readline. Issue warning. +if ($readline eq 'Term::ReadLine::Gnu') { + $term->ReadHistory($HIST_FILE); + print STDERR "DBG: FCD - $tty_attribs->{filename_completion_desired}\n" + if ($debug); + $tty_attribs->{attempted_completion_function} = \&complete; + $tty_attribs->{attempted_completion_function} = \&complete; + print STDERR "DBG: Quote chars = '$tty_attribs->{filename_quote_characters}'\n" if ($debug); +} +elsif ($readline eq 'Term::ReadLine::Perl') { + readline::rl_bind('TAB', 'ViComplete'); + warn "Command completion disabled. $readline is seriously broken\n"; +} +else { + warn "Can't deal with $readline, Command completion disabled.\n"; +} + +&cmd_cd($start_dir); + +while (defined($cstr = $term->readline('recover> '))) { + print "\n" if ($readline eq 'Term::ReadLine::Perl'); + my @command = parse_command($cstr); + last if ($command[0] eq 'quit'); + next if ($command[0] eq 'nop'); + + print STDERR "Execute $command[0] command.\n" if ($debug); + + my $cmd = \&{"cmd_$command[0]"}; + + # The following line will call the subroutine named cmd_ prepended to + # the name of the command returned by parse_command. + + &$cmd(@command[1..$#command]); +}; + +$dir_sth->finish(); +$sel_sth->finish(); +$ver_sth->finish(); +$dbh->disconnect(); + +print "\n" if (!defined($cstr)); + +$term->WriteHistory($HIST_FILE) if ($readline eq 'Term::ReadLine::Gnu'); + +=head1 DEPENDENCIES + +The following CPAN modules are required to run this program. + +DBI, Term::ReadKey, Time::ParseDate, Date::Format, Text::ParseWords + +Additionally, you will only get command line completion if you also have + +Term::ReadLine::Gnu + +=head1 AUTHOR + +Karl Hakimian + +=head1 LICENSE + +Copyright (C) 2006 Karl Hakimian + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +=cut diff --git a/examples/reports/bacula_mail_summary.sh b/examples/reports/bacula_mail_summary.sh new file mode 100755 index 00000000..901f2efc --- /dev/null +++ b/examples/reports/bacula_mail_summary.sh @@ -0,0 +1,73 @@ +#!/bin/sh +# This script is to create a summary of the job notifications from bacula +# and send it to people who care. +# +# For it to work, you need to have all Bacula job report +# loggin to file, edit path for Your needs +# This should be run after all backup jobs have finished. +# Tested with bacula-1.38.0 + +# Some improvements by: Andrey Yakovlev (ISP Farlep) +# Contributed by Andrew J. Millar +# Patched by Andrey A. Yakovlev + +# Use awk to create the report, pass to column to be +# formatted nicely, then on to mail to be sent to +# people who care. + +EMAIL_LIST="freedom@kiev.farlep.net" + +LOG='/var/db/bacula/log' + + +#--------------------------------------------------------------------- + +awk -F\:\ 'BEGIN { + print "Client Status Type StartTime EndTime Files Bytes" + } + + /orion-dir: New file:/ { + print $3 + } + + /orion-dir: File:/ { + print $3 + } + + /Client/ { + CLIENT=$2; sub(/"/, "", CLIENT) ; sub(/".*$/, "", CLIENT) + } + /Backup Level/ { + TYPE=$2 ; sub(/,.*$/, "", TYPE) + } + /Start time/ { + STARTTIME=$2; sub(/.*-.*-.* /, "", STARTTIME) + } + /End time/ { + ENDTIME=$2; sub(/.*-.*-.* /, "", ENDTIME) + } + /Files Examined/ { + SDFILES=$2 + SDBYTES=0 + } + /SD Files Written/ { + SDFILES=$2 + } + /SD Bytes Written/ { + SDBYTES=$2 + } + /Termination/ { + TERMINATION=$2 ; + sub(/Backup/, "", TERMINATION) ; + gsub(/\*\*\*/, "", TERMINATION) ; + sub(/Verify OK/, "OK-Verify", TERMINATION) ; + sub(/y[ ]/, "y-", TERMINATION) ; + printf "%s %s %s %s %s %s %s \n", CLIENT,TERMINATION,TYPE,STARTTIME,ENDTIME,SDFILES,SDBYTES}' ${LOG} | \ + column -t -x | \ + mail -s "Bacula Summary for `date -v -1d +'%a, %F'`" ${EMAIL_LIST} +# +# Empty the LOG +cat ${LOG} > ${LOG}.old +cat /dev/null > ${LOG} +# +# That's all folks diff --git a/examples/reports/baculareport.pl b/examples/reports/baculareport.pl new file mode 100755 index 00000000..bf014a03 --- /dev/null +++ b/examples/reports/baculareport.pl @@ -0,0 +1,1161 @@ +#!/usr/bin/perl -w +# +# bacula report generation +# +# (C) Arno Lehmann 2005 +# IT-Service Lehmann +# + +# +# Usage: See funtion print_usage +# or use this script with option --help +# +# Version history: +# +# 0.2 publicly available, works reliable +# 0.3 increasing weight of No. of tapes in guess reliability +# and including tape capacity guessing when no volumes in subpool +# using default values from temp. table + +use strict; +use DBI; +use Getopt::Long; +use Math::BigInt; + +my $version="0.3"; +$0 =~ /.*\/([^\/]*)$/; +my $ME = $1; + +my $debug = 0; +my $db_host = ""; +my $db_user = "bacula"; +my $db_database = "mysql:bacula"; +my $db_pass = ""; + +my $do_usage = ""; +my $do_version = ""; + +my @temp_tables; + +my @the_pools; + +my $out_pooldetails = ""; +my $out_bargraph = 1; +my $out_bargraphlen = 70; +my $out_subpools = ""; +my $out_subpooldetails = ""; +my $out_subbargraph = ""; +my $out_cutmarks = ""; + +# This is the data we're interested in: +# In this array we have a hash reference to each Pool. +# A pool consists of a hash having +# Name +# Id +# BytesTotal +# VolumesTotal +# VolumesFull (This is State Full +# VolumesEmpty (This is Purged and Recycle) +# VolumesPartly (Append) +# VolumesAway (Archive, Read-Only) +# VolumesOther (Busy, Used) +# VolumesOff (Disabled, Error) +# VolumesCleaning +# BytesFree +# GuessReliability (This is the weighted average of the Reliability +# of all the Media Type Guesses in this Pool) +# MediaTypes is an array of references to hashes for collected +# information for all the Media Types in this pool. +# This has the same as the pools summary and adds +# MediaType The String +# AvgFullBytes (The Avg. Number of Bytes per full Volume) +# BytesFreeEmpty (The estimated Free Bytes on Empty Volumes) +# BytesFreePartly +# +# We use: $the_pools[0]->MediaTypes[0]->{MediaType} or +# $the_pools[1]->Id +# I hope you get the point. I hope I do. + +Getopt::Long::Configure("bundling"); +GetOptions("host=s"=>\$db_host, + "user|U=s"=>\$db_user, + "database|D=s"=>\$db_database, + "password|P=s"=>\$db_pass, + "debug=i"=>\$debug, + "help|h"=>\$do_usage, + "version|V"=>\$do_version, + "subpools|s"=>\$out_subpools, + "subpool-details"=>\$out_subpooldetails, + "pool-details|d"=>\$out_pooldetails, + "pool-bargraph!"=>\$out_bargraph, + "bar-length|l=i"=>\$out_bargraphlen, + "cutmarks|c"=>\$out_cutmarks, + "subpool-bargraph"=>\$out_subbargraph + ); + +debug_out(100, "I've got +host: $db_host +user: $db_user +database: $db_database +password: $db_pass +debug: $debug +help: $do_usage +version: $do_version +output requested: + pool details: $out_pooldetails + subpools: $out_subpools + subpool details: $out_subpooldetails + bargraph: $out_bargraph + subpool bargraph: $out_subbargraph + bar length: $out_bargraphlen + cutmarks: $out_cutmarks +I was called as $0 and am version $version. +Was that helpful?"); + +if ($do_usage) { + do_usage(); + exit 1; +} +if ($do_version) { + do_version(); + exit 1; +} + +$out_subpools = 1 if ($out_subpooldetails); +$out_subpools = 1 if ($out_subbargraph); +$out_bargraphlen = 70 if (15 > $out_bargraphlen); +$out_bargraphlen = 70 if (200 < $out_bargraphlen); +$out_bargraph = 1 if (! $out_pooldetails); + +debug_out(100, "Output options after dependencies: + pool details: $out_pooldetails + subpools: $out_subpools + subpool details: $out_subpooldetails + bargraph: $out_bargraph + subpool bargraph: $out_subbargraph + bar length: $out_bargraphlen + cutmarks: $out_cutmarks +"); + +my (undef, $min, $hour, $mday, $mon, $year) = localtime(); +$year += 1900; +$mon = sprintf("%02i", $mon+1); +$mday = sprintf("%02i", $mday); +$min = sprintf("%02i", $min); +$hour = sprintf("%02i", $hour); +print "bacula volume / pool status report $year-$mon-$mday $hour:$min\n", + "Volumes Are Full, Other, Append, Empty, aWay or X (error)\n"; +my $dbconn = "dbi:" . $db_database; +$dbconn .= "\@" . $db_host if $db_host; +debug_out(40, "DBI connect with $dbconn"); + +my $h_db = DBI->connect($dbconn, + $db_user, $db_pass, + { PrintError => 0, + AutoCommit => 1 } + ) || die DBI::errstr; +debug_out(10, "Have database connection $h_db"); + +debug_out(100, "creating temp tables..."); + +$h_db->do("CREATE TABLE alrep_M(PoolId INT(10) UNSIGNED,MediaType TINYBLOB)") || debug_abort(0, "Can't create temp table alrep_M - another script running?"); +unshift @temp_tables, "alrep_M"; +debug_out(45, "Table alrep_M created."); + + +debug_out(40, "All tables done."); + +debug_out(40, "Filling temp tables..."); +if ($h_db->do("INSERT INTO alrep_M SELECT Pool.PoolId,Media.MediaType FROM Pool,Media WHERE Pool.PoolId=Media.PoolId GROUP BY PoolId,MediaType")) { + debug_out(45, "PoolId-MediaType table populated."); +} else { + debug_abort(0, "Couldn't populate PoolId and MediaType table alrep_M."); +} + +debug_out(40, "All tables done."); + +debug_out(40, "Getting Pool Names."); +my $h_st = $h_db->prepare("SELECT Name,PoolId FROM Pool ORDER BY Name") || + debug_abort(0, "Couldn't get Pool Information.", $h_db->errstr()); +$h_st->execute() || debug_abort(0, "Couldn't query Pool information.", + $h_db->errstr()); +my $pools; +while ($pools=$h_st->fetchrow_hashref()) { + process_pool($pools->{Name}, $pools->{PoolId}) +} +debug_out(10, "All Pool data collected."); +debug_out(7, "Pools analyzed: $#the_pools."); +debug_out(10, "Going to print..."); + +my $pi; +for $pi (@the_pools) { + output_pool($pi); +} + +debug_out(10, "Program terminates normally."); +do_closedb(); +debug_out(10, "Finishing."); +exit 0; + +=pod + +=head1 NAME + +baculareport.pl - a script to produce some bacula reports out of +the catalog database. + +=head1 SYNTAX + +B B<--help>|B<-h> + +B B<--version>|B<-V> + +B [B<--host> I] [B<--user>|B<-U> I] +[B<--database>|B<-D> I] [B<--password>|B<-P> I] +[B<--debug> I] [B<--pool-details>|B<-d>] +[B<--pool-bargraph>|B<--nopool-bargraph>] [B<--subpools>|B<-s>] +[B<--subpool-details>] [B<--subpool-bargraph>] [B<--bar-length>|B<-l> +I] [B<--cutmarks>|B<-c>] + +The long options can be abbreviated, as long as they remain unique. +Short options (and values) can be grouped, for more information see +B. + +=head1 DESCRIPTION + +B accesses the catalog used by the backup program bacula +to produce some report about pool and volume usage. + +The command line options B<--host> I, B<--user> or B<-U> +I, B<--database> or B<-D> and B<--password> or B<-P> define the +database to query. See below for security considerations concerning +databse passwords. + +The I must be given in perl's B-syntax, as in +I. Currently, only MySQL is supported, though PostgreSQL +should work with only minor modifications to B. + +Output of reports is controlled using the command-line switches +B<--*pool*>, B<--bar-length> and B<--cutmarks> or there one-letter +equivalents. + +The report for a pool can contain a one-line overview of the volumes +in that pool, giving the numbers of volumes in different states, the +total bytes stored and an estimate of the available capacity. + +The estimated consists of a percentage describing the reliability of +this estimate and the guessed free capacity. + +A visual representation of the pools state represented as a bar graph, +together with the number of full, appendable and free volumes is the +default report. + +The length of this graph can be set with B<--bar-length> or B<-l> +I. + +As a pool can contain volumes of different media type, the report's +output can include the information about those collections of volumes +called subpools in Bs documentation. + +The subpool overview data presents the same information about the +volumes the pool details have, but includes the media type and excludes +the free capacity guess. + +Subpool details report the average amount of data on full volumes, +together with what is estimated to be available on appendable and empty +volumes. A measurement on the reliability of this estimate is given as a +percent value. See below in L<"CAPACITY GUESSING"> for more +information. + +Finally, a bar graph representing this subpools fill-level can be printed. +For easier overview it is scaled like the pools bargraph. + +B<--cutmarks> or B<-c> prints some marks above each pool report to +make cutting the report easier if you want to file it. + +Sample reports are in L<"SAMPLE REPORTS">. + +The B<--debug>-option activates debug output. Without understanding the +source code this will not be helpful. See below L<"DEBUG OUTPUT">. + +=head1 DATABASE ACCESS AND SECURITY + +baculareport.pl needs access to baculas catalog. This might introduce +a security risk if the database access password is published to people who +shouldn't know it, but need to create reports. + +The solution is to set up a database account which can only read from +baculas catalog. Use your favorite database administration tool for +this. + +Command line passing of the password is also not really secure - anybody +with sufficient access rights can read the command line etc. So, if you use this script on a multi-user machine, you are well advised to + +=over 4 + +=item 1. + +I, or + +=item 2. + +I + +=back + +This should limit security risks to a minimum. + +If B is used by your backup admin only, don't bother +- she has access to all your data anyway. (B) + +=head1 SAMPLE REPORTS + +The reports can be customized using the above explained command line switches. +Some examples are: + + bacula volume / pool status report 2005-01-18 23:40 + Volumes Are Full, Other, Append, Empty, aWay or X (error) + + Pool Diff + ######################################################---------------- + |0% |20% |40% |60% |80% 100%| + 48.38GB used Rel: 24% free 13.88GB + 17 F Volumes 3 A and 4 E Volumes + + Pool Full + #######################################------------------------------- + |0% |20% |40% |60% |80% 100%| + 310.66GB used Rel: 58% free 241.64GB + 43 F Volumes 2 A and 14 E Volumes + + Pool Incr + #######################################################--------------- + |0% |20% |40% |60% |80% 100%| + 28.51GB used Rel: 0% (def.) free 7.61GB + 0 F Volumes 3 A and 4 E Volumes + + Pool TMPDisk + Nothing to report. + +This is the sort of report you get when you use this script without +any special output options. After a short header, for all pools in +the catalog a graphic representation of its usage is +printed. Below that, you find some essential information: The +capacity used, a guess of the remaining capacity (see +L<"CAPACITY GUESSING"> below), and +an overview of the volumes: Here, in pool Incr we have no full +volumes, 3 appendable ones and 4 empty volumes. + +In this example, the pool TMPDisk does not contain anything which can +be reported. + +Following you have an example with all output options set. + + - - + Pool Incr + ###################################################---- + |0% |25% |50% |75% 100%| + 10 Volumes (2 F, 0 O, 2 A, 6 E, 0 W, 0 X) Total 59.64GB Rel: 29% avail.: 4.57GB + Details by Mediatype: + DDS1 (0 F, 0 O, 1 A, 4 E, 0 W, 0 X) Total 4.53GB + #### + |0% |25% |50% |75% 100%| + Avg, avail. Partly, Empty, Total, Rel.: N/A N/A N/A N/A 0% + DDS2 (0 F, 0 O, 0 A, 2 E, 0 W, 0 X) Total 0.00B + Avg, avail. Partly, Empty, Total, Rel.: N/A N/A N/A N/A 0% + DLTIV (2 F, 0 O, 1 A, 0 E, 0 W, 0 X) Total 55.11GB + #############################################---- + |0% |25% |50% |75% 100%| + Avg, avail. Partly, Empty, Total, Rel.: 19.89GB 4.57GB N/A 4.57GB 96% + - - + Pool TMPDisk + Nothing to report. + 1 Volumes (0 F, 0 O, 0 A, 1 E, 0 W, 0 X) Total 0.00B Rel: 0% avail.: 0.00B + Details by Mediatype: + File (0 F, 0 O, 0 A, 1 E, 0 W, 0 X) Total 0.00B + Nothing to report. + Avg, avail. Partly, Empty, Total, Rel.: N/A N/A N/A N/A 0% + +Cut marks are included for easier cutting in case you want to file the +printed report. Then, the length of the bar graphs was changed. + +More detail for the pools is shown: Not only the overwiev graphics, +but also a listing of the status of all media in this +pool, followed by the reliability of the guess of available +capacity and the probable available capacity itself. + +After this summary you find a similar report for all media types in +this pool. Here, the media type starts the details line. The next +line is a breakdown of the capacity inside this subpool: The +average capacity of the full volumes, followed by the probable +available capacity on appendable and empty volumes. Total is the +probable free capacity on these volumes, and Rel is the +reliability of the capacity guessing. + +Note that some of the items are not always displayed: A pool or +subpool with no bytes in it will not have a bar graph, and some of +the statistical data is marked as N/A for not available. + +The above output was generated with the following command: + +B<< C<< + baculareport.pl --password \ + --pool-bargraph --pool-details --subpools\ + --subpool-details --subpool-bargraph --bar-length 55\ + --cutmarks >> >> + +The following command would have given the same output: + +B<< C<< + baculareport.pl -P -csdl55\ + --subpool-d --subpool-b >> >> + +=head1 CAPACITY GUESSING + +For empty and appendable volumes, the average capacity of the full +volumes is used as the base for estimating what can be +stored. This usually depends heavily on the type of data to store, +and of course this works only with volumes of the same nominal +capacity. + +The reliability of all this guesswork is expressed based on the +standard deviation among the full volumes, scaled to percent. 100% +is a very reliable estimate (Note: NOT absolutely reliable!) while +a small percentage (from personal experience: below 60-70 percent) +means that you shouldn't rely on the reported available data storage. + +To determine the overall reliability in a pool, the reliabilites of +the subpools are weighted - a subpool with many volumes has a higer +influence on overall reliability. + +Keep in mind that the reported free capacities and reliabilities can +only be a help and don't rely on these figures alone. Keep enough +spare tapes available! + +Default capacities for some media types are included now. Consider this +feature a temporarily kludge - At the moment, there is a very simple +media capacity guessing implemented. Search for the function +`get_default_bytes' and modify it to your needs. + +In the future, I expect some nominal volume capacity knowledge inside +baculas catalog, and when this is available, that data will be used. + +Capacity estimates with defaults in the calculation are marked with +B<(def.)> after the reliability percentage. If you see B<0% (def.)> +only the defaults are used because no full tapes were available. + +=head1 DEBUG OUTPUT + +Debugging, or more generally verbose output, is activated by the +--debug command switch. + +The higher the level, the more output you get. + +Currently, levels 10 and up are real debugging output. Levels above +100 are not used. I + +The debug levels used are: + +=over 4 + +=item 1 + +Some warnings are printed. + +=item 10 + +Program Flow is reported. + +=item 15 + +More detailed Program flow, for example loops. + +=item 40 + +Database actions are printed. + +=item 45 + +Table actions are reported. + +=item 48 + +Even more database activity. + +=item 100 + +All internal state data is printed. Beware: This includes the database +password! + +=back + +=head1 BUGS + +Probably many. If you find one, notify the author. Better: notify me +how to correct it. + +Currently this script works only with MySQL and catalog version 8 +(probably older versions as well, but that is untested). + +=head1 AUTHOR + +Arno Lehmann al@its-lehmann.de + +=head1 LICENSE + +This is copyrighted work: (C) 2005 Arno Lehmann IT-Service Lehmann + +Use, modification and (re-)distribution are allowed provided this +license and the names of all contributing authors are included. + +No author or contributor gives any warranty on this script. If you +want to use it, you are all on your own. Please read the documentation, +and, if you feel unsure, read and understand the sourcecode. + +The terms and idea of the GNU GPL, version 2 or, at you option, any +later version, apply. See http://www.fsf.org. + +You can contact the author using the above email address. I will try to +answer any question concerning this script, but still - no promises! + +Bacula is (C) copyright 2000-2006 Free Software Foundation Europe e.V. See http://www.bacula.org. + +(Bacula consulting available.) + +=cut + +sub process_pool { + my %pool = (BytesTotal=>0, + VolumesTotal=>0, + VolumesFull=>0, + VolumesEmpty=>0, + VolumesPartly=>0, + VolumesAway=>0, + VolumesOther=>0, + VolumesOff=>0, + VolumesCleaning=>"Not counted", + BytesFree=>0, + GuessReliability=>0, + AvgFullUsesDefaults=>"" + ); + debug_out(10, "Working on Pool $pools->{Name}."); + $pool{Name} = shift; + $pool{Id} = shift; + my @subpools; + + debug_out(30, "Pool $pool{Name} is Id $pool{Id}."); + my $h_st = $h_db->prepare("SELECT MediaType FROM alrep_M WHERE + PoolId = $pool{Id} ORDER BY MediaType") || + debug_abort(0, + "Can't query Media table.", $h_st->errstr()); + $h_st->execute() || + debug_abort(0, + "Can't get Media Information", $h_st->errstr()); + while (my $mt=$h_st->fetchrow_hashref()) { +# In this loop, we process one media type in a pool + my %subpool = (MediaType=>$mt->{MediaType}); + debug_out(45, "Working on MediaType $mt->{MediaType}."); + my $h_qu = + $h_db->prepare("SELECT COUNT(*) AS Nr,SUM(VolBytes) AS Bytes," . + "STD(VolBytes) AS Std,AVG(VolBytes) AS Avg " . + "FROM Media WHERE (PoolId=$pool{Id}) AND " . + "(MediaType=" . $h_db->quote($mt->{MediaType}) . + ") AND (VolStatus=\'Full\')") + || debug_abort(0, + "Can't query Media Summary Information by MediaType.", + $h_db->errstr()); + debug_out(48, "Query active: ", $h_qu->{Active}?"Yes":"No"); + debug_out(45, "Now selecting Summary Information for $pool{Name}:$mt->{MediaType}:Full"); + debug_out(48, "Query: ", $h_qu->{Statement}, "Params: ", + $h_qu->{NUM_OF_PARAMS}, " Rows: ", $h_qu->rows); + $h_qu->execute(); + debug_out(48, "Result:", $h_qu->rows(), "Rows."); +# Don't know why, but otherwise the handle access +# methods result in a warning... + $^W = 0; + if (1 == $h_qu->rows()) { + if (my $qr = $h_qu->fetchrow_hashref) { + debug_out(45, "Got $qr->{Nr} and $qr->{Bytes}."); + $subpool{VolumesFull} = $qr->{Nr}; + $subpool{VolumesTotal} += $qr->{Nr}; + $subpool{BytesTotal} = $qr->{Bytes} if (defined($qr->{Bytes})); + if (defined($qr->{Bytes}) && (0 < $qr->{Bytes}) && + (0 < $qr->{Nr})) { + $subpool{AvgFullBytes} = int($qr->{Bytes} / $qr->{Nr}); + } else { + $subpool{AvgFullBytes} = get_default_bytes($mt->{MediaType}); + $subpool{AvgFullUsesDefaults} = 1; + } + if (defined($qr->{Std}) && + defined($qr->{Avg}) && + (0 < $qr->{Avg})) { +# $subpool{GuessReliability} = 100-(100*$qr->{Std}/$qr->{Avg}); + $subpool{GuessReliability} = + 100 - # 100 Percent minus... + ( 100 * # Percentage of + ( $qr->{Std}/$qr->{Avg} ) * # V + ( 1 - 1 / $qr->{Nr} ) # ... the more tapes + # the better the guess + ); + } else { + $subpool{GuessReliability} = 0; + } + } else { + debug_out(1, "Can't get Media Summary Information by MediaType.", + $h_qu->errstr()); + $subpool{VolumesFull} = 0; + $subpool{BytesTotal} = 0; + $subpool{GuessReliability} = 0; + $subpool{AvgFullBytes} = -1; + } + } else { + debug_out(45, "Got nothing: ", (defined($h_qu->errstr()))?$h_qu->errstr():"No error."); + } + $^W = 1; +# Here, Full Media are done + debug_out(15, "Full Media done. Now Empty ones."); + $h_qu = + $h_db->prepare("SELECT COUNT(*) AS Nr " . + "FROM Media WHERE (PoolId=$pool{Id}) AND " . + "(MediaType=" . $h_db->quote($mt->{MediaType}) . + ") AND ((VolStatus=\'Purged\') OR " . + "(VolStatus=\'Recycle\'))") + || debug_abort(0, + "Can't query Media Summary Information by MediaType.", + $h_db->errstr()); + debug_out(48, "Query active: ", $h_qu->{Active}?"Yes":"No"); + debug_out(45, "Now selecting Summary Information for $pool{Name}:$mt->{MediaType}:Recycle OR Purged"); + debug_out(48, "Query: ", $h_qu->{Statement}, "Params: ", + $h_qu->{NUM_OF_PARAMS}, " Rows: ", $h_qu->rows); + $h_qu->execute(); + debug_out(48, "Result:", $h_qu->rows(), "Rows."); +# Don't know why, but otherwise the handle access +# methods result in a warning... + $^W = 0; + if (1 == $h_qu->rows()) { + if (my $qr = $h_qu->fetchrow_hashref) { + debug_out(45, "Got $qr->{Nr} and $qr->{Bytes}."); + $subpool{VolumesEmpty} = $qr->{Nr}; + $subpool{VolumesTotal} += $qr->{Nr}; + if (($subpool{AvgFullBytes} > 0) && ($qr->{Nr} > 0)) { + $subpool{BytesFreeEmpty} = $qr->{Nr} * $subpool{AvgFullBytes}; + } else { + $subpool{BytesFreeEmpty} = -1; + } + } else { + debug_out(1, "Can't get Media Summary Information by MediaType.", + $h_qu->errstr()); + $subpool{VolumesEmpty} = 0; + $subpool{BytesFreeEmpty} = 0; + } + } else { + debug_out(45, "Got nothing: ", (defined($h_qu->errstr()))?$h_qu->errstr():"No error."); + } + $^W = 1; +# Here, Empty Volumes are processed. + + debug_out(15, "Empty Media done. Now Partly filled ones."); + $h_qu = + $h_db->prepare("SELECT COUNT(*) AS Nr,SUM(VolBytes) AS Bytes " . + "FROM Media WHERE (PoolId=$pool{Id}) AND " . + "(MediaType=" . $h_db->quote($mt->{MediaType}) . + ") AND (VolStatus=\'Append\')") + || debug_abort(0, + "Can't query Media Summary Information by MediaType.", + $h_db->errstr()); + debug_out(48, "Query active: ", $h_qu->{Active}?"Yes":"No"); + debug_out(45, "Now selecting Summary Information for $pool{Name}:$mt->{MediaType}:Append"); + debug_out(48, "Query: ", $h_qu->{Statement}, "Params: ", + $h_qu->{NUM_OF_PARAMS}, " Rows: ", $h_qu->rows); + $h_qu->execute(); + debug_out(48, "Result:", $h_qu->rows(), "Rows."); +# Don't know why, but otherwise the handle access +# methods result in a warning... + $^W = 0; + if (1 == $h_qu->rows()) { + if (my $qr = $h_qu->fetchrow_hashref) { + debug_out(45, "Got $qr->{Nr} and $qr->{Bytes}."); + $subpool{VolumesPartly} = $qr->{Nr}; + $subpool{VolumesTotal} += $qr->{Nr}; + $subpool{BytesTotal} += $qr->{Bytes}; + if (($subpool{AvgFullBytes} > 0) && ($qr->{Nr} > 0)) { + $subpool{BytesFreePartly} = $qr->{Nr} * $subpool{AvgFullBytes} - $qr->{Bytes}; + $subpool{BytesFreePartly} = $qr->{Nr} if $subpool{BytesFreePartly} < 1; + } else { + $subpool{BytesFreePartly} = -1; + } + } else { + debug_out(1, "Can't get Media Summary Information by MediaType.", + $h_qu->errstr()); + $subpool{VolumesPartly} = 0; + $subpool{BytesFreePartly} = 0; + } + } else { + debug_out(45, "Got nothing: ", (defined($h_qu->errstr()))?$h_qu->errstr():"No error."); + } + $^W = 1; +# Here, Partly filled volumes are done + + debug_out(15, "Partly Media done. Now Away ones."); + $h_qu = + $h_db->prepare("SELECT COUNT(*) AS Nr,SUM(VolBytes) AS Bytes " . + "FROM Media WHERE (PoolId=$pool{Id}) AND " . + "(MediaType=" . $h_db->quote($mt->{MediaType}) . + ") AND ((VolStatus=\'Archive\') OR " . + "(VolStatus=\'Read-Only\'))") + || debug_abort(0, + "Can't query Media Summary Information by MediaType.", + $h_db->errstr()); + debug_out(48, "Query active: ", $h_qu->{Active}?"Yes":"No"); + debug_out(45, "Now selecting Summary Information for $pool{Name}:$mt->{MediaType}:Recycle OR Purged"); + debug_out(48, "Query: ", $h_qu->{Statement}, "Params: ", + $h_qu->{NUM_OF_PARAMS}, " Rows: ", $h_qu->rows); + $h_qu->execute(); + debug_out(48, "Result:", $h_qu->rows(), "Rows."); +# Don't know why, but otherwise the handle access +# methods result in a warning... + $^W = 0; + if (1 == $h_qu->rows()) { + if (my $qr = $h_qu->fetchrow_hashref) { + debug_out(45, "Got $qr->{Nr} and $qr->{Bytes}."); + $subpool{VolumesAway} = $qr->{Nr}; + $subpool{VolumesTotal} += $qr->{Nr}; + $subpool{BytesTotal} += $qr->{Bytes}; + } else { + debug_out(1, "Can't get Media Summary Information by MediaType.", + $h_qu->errstr()); + $subpool{VolumesAway} = 0; + } + } else { + debug_out(45, "Got nothing: ", (defined($h_qu->errstr()))?$h_qu->errstr():"No error."); + } + $^W = 1; +# Here, Away Volumes are processed. + + debug_out(15, "Away Media done. Now Other ones."); + $h_qu = + $h_db->prepare("SELECT COUNT(*) AS Nr,SUM(VolBytes) AS Bytes " . + "FROM Media WHERE (PoolId=$pool{Id}) AND " . + "(MediaType=" . $h_db->quote($mt->{MediaType}) . + ") AND ((VolStatus=\'Busy\') OR " . + "(VolStatus=\'Used\'))") + || debug_abort(0, + "Can't query Media Summary Information by MediaType.", + $h_db->errstr()); + debug_out(48, "Query active: ", $h_qu->{Active}?"Yes":"No"); + debug_out(45, "Now selecting Summary Information for $pool{Name}:$mt->{MediaType}:Recycle OR Purged"); + debug_out(48, "Query: ", $h_qu->{Statement}, "Params: ", + $h_qu->{NUM_OF_PARAMS}, " Rows: ", $h_qu->rows); + $h_qu->execute(); + debug_out(48, "Result:", $h_qu->rows(), "Rows."); +# Don't know why, but otherwise the handle access +# methods result in a warning... + $^W = 0; + if (1 == $h_qu->rows()) { + if (my $qr = $h_qu->fetchrow_hashref) { + debug_out(45, "Got $qr->{Nr} and $qr->{Bytes}."); + $subpool{VolumesOther} = $qr->{Nr}; + $subpool{VolumesTotal} += $qr->{Nr}; + $subpool{BytesTotal} += $qr->{Bytes}; + } else { + debug_out(1, "Can't get Media Summary Information by MediaType.", + $h_qu->errstr()); + $subpool{VolumesOther} = 0; + } + } else { + debug_out(45, "Got nothing: ", (defined($h_qu->errstr()))?$h_qu->errstr():"No error."); + } + $^W = 1; +# Here, Other Volumes are processed. + + debug_out(15, "Other Media done. Now Off ones."); + $h_qu = + $h_db->prepare("SELECT COUNT(*) AS Nr,SUM(VolBytes) AS Bytes " . + "FROM Media WHERE (PoolId=$pool{Id}) AND " . + "(MediaType=" . $h_db->quote($mt->{MediaType}) . + ") AND ((VolStatus=\'Disabled\') OR " . + "(VolStatus=\'Error\'))") + || debug_abort(0, + "Can't query Media Summary Information by MediaType.", + $h_db->errstr()); + debug_out(48, "Query active: ", $h_qu->{Active}?"Yes":"No"); + debug_out(45, "Now selecting Summary Information for $pool{Name}:$mt->{MediaType}:Recycle OR Purged"); + debug_out(48, "Query: ", $h_qu->{Statement}, "Params: ", + $h_qu->{NUM_OF_PARAMS}, " Rows: ", $h_qu->rows); + $h_qu->execute(); + debug_out(48, "Result:", $h_qu->rows(), "Rows."); +# Don't know why, but otherwise the handle access +# methods result in a warning... + $^W = 0; + if (1 == $h_qu->rows()) { + if (my $qr = $h_qu->fetchrow_hashref) { + debug_out(45, "Got $qr->{Nr} and $qr->{Bytes}."); + $subpool{VolumesOff} = $qr->{Nr}; + $subpool{VolumesTotal} += $qr->{Nr}; + } else { + debug_out(1, "Can't get Media Summary Information by MediaType.", + $h_qu->errstr()); + $subpool{VolumesOff} = 0; + } + } else { + debug_out(45, "Got nothing: ", (defined($h_qu->errstr()))?$h_qu->errstr():"No error."); + } + $^W = 1; +# Here, Off Volumes are processed. + + if ((0 < $subpool{BytesFreeEmpty}) || + (0 < $subpool{BytesFreePartly})) { + debug_out(15, "We have a guess."); + $subpool{BytesFree} = 0; + $subpool{BytesFree} += $subpool{BytesFreeEmpty} if + (0 < $subpool{BytesFreeEmpty}); + $subpool{BytesFree} += $subpool{BytesFreePartly} if + (0 < $subpool{BytesFreePartly}); + } else { + debug_out(15, "Neither Empty nor Partly BytesFree available - no guess!"); + $subpool{BytesFree} = -1; + } + if ($subpool{AvgFullUsesDefaults}) { + debug_out(15, "Average Full Capacity calculation included defaults."); + $pool{AvgFullUsesDefaults} = 1; + } + $pool{BytesTotal} += $subpool{BytesTotal}; + $pool{VolumesTotal} += $subpool{VolumesTotal}; + $pool{VolumesFull} += $subpool{VolumesFull}; + $pool{VolumesEmpty} += $subpool{VolumesEmpty}; + $pool{VolumesPartly} += $subpool{VolumesPartly}; + $pool{VolumesAway} += $subpool{VolumesAway}; + $pool{VolumesOther} += $subpool{VolumesOther}; + $pool{VolumesOff} += $subpool{VolumesOff}; +# not counted! +# $pool{VolumesCleaning} += $subpool{VolumesCleaning}; + + $pool{BytesFree} += $subpool{BytesFree} if ($subpool{BytesFree} > 0); + + debug_out(10, "Now storing sub-pool with MediaType", $subpool{MediaType}); + push @subpools, \%subpool; + } + $pool{MediaTypes} = \@subpools; +# GuessReliability + my $allrels = 0; + my $subcnt = scalar(@{$pool{MediaTypes}}); + my $guess_includes_defaults = 0; + debug_out(10, "Summarizing Reliabilities from $subcnt sub-pools."); + foreach my $rel (@{$pool{MediaTypes}}) { + $allrels += $rel->{GuessReliability} * $rel->{VolumesTotal}; + } + debug_out(15, "We have $allrels summed/weighted reliabilites and $pool{VolumesTotal} Volumes."); + if ($pool{VolumesTotal} > 0) { + $pool{GuessReliability} = $allrels / $pool{VolumesTotal}; + } else { + $pool{GuessReliability} = "N/A"; + } + push @the_pools, \%pool; +} + +sub output_pool { + debug_out(10, "Printing pool data."); + my $pool = shift; + $pool->{GuessReliability} += 1000.0 if + (($pool->{GuessReliability} ne "N/A") && + $pool->{AvgFullUsesDefaults}); + printf((($out_cutmarks)?" -" . " " x ($out_bargraphlen - 6) . "-\n": + "\n") . + "Pool%15.15s%s\n", "$pool->{Name}", + ($debug>=5)?sprintf(" %5.9s", "(" . $pool->{Id} . ")"):""); + my $poolbarbytes = $pool->{BytesTotal} + $pool->{BytesFree}; + if ($out_bargraph) { + print bargraph($out_bargraphlen, 2, + $poolbarbytes, + $pool->{BytesTotal}, $pool->{BytesFree}); + } + if ($out_pooldetails) { + print(" $pool->{VolumesTotal} Volumes ($pool->{VolumesFull} F, ", + "$pool->{VolumesOther} O, $pool->{VolumesPartly} A, ", + "$pool->{VolumesEmpty} E, $pool->{VolumesAway} W, ", + "$pool->{VolumesOff} X) Total ", + human_readable("B", $pool->{BytesTotal}), + " Rel: ", human_readable("P", $pool->{GuessReliability}), + " avail.: ", human_readable("B", $pool->{BytesFree}), "\n"); + } else { + print bargraph_legend($out_bargraphlen, 2, + $pool->{BytesTotal} + $pool->{BytesFree}, + $pool->{BytesTotal}, $pool->{BytesFree}, + $pool->{VolumesFull}, $pool->{VolumesPartly}, + $pool->{VolumesEmpty}, $pool->{GuessReliability}); + } + if ($out_subpools) { + debug_out(10, "Printing details:", $#{$pool->{MediaTypes}}+1, "MediaTypes"); + if (0 < scalar($pool->{MediaTypes})) { + print " Details by Mediatype:\n"; + foreach my $i (@{$pool->{MediaTypes}}) { + debug_out(15, "Media Type $i->{MediaType}"); + $i->{GuessReliability} += 1000.0 if ($i->{AvgFullUsesDefaults}); + print(" $i->{MediaType} ($i->{VolumesFull} F, ", + "$i->{VolumesOther} O, $i->{VolumesPartly} A, ", + "$i->{VolumesEmpty} E, $i->{VolumesAway} W, " , + "$i->{VolumesOff} X) Total ", + human_readable("B", $i->{BytesTotal}), "\n"); + if ($out_subbargraph) { + print bargraph($out_bargraphlen - 3, 5, + $poolbarbytes, + $i->{BytesTotal}, + $i->{BytesFree}); + } + if ($out_subpooldetails) { + print " Avg, avail. Partly, Empty, Total, Rel.: ", + ($i->{AvgFullBytes} > 0)?human_readable("B", $i->{AvgFullBytes}):"N/A", " ", + ($i->{BytesFreePartly} > 0)?human_readable("B", $i->{BytesFreePartly}):"N/A", " ", + ($i->{BytesFreeEmpty} > 0)?human_readable("B", $i->{BytesFreeEmpty}):"N/A", " ", + ($i->{BytesFree} > 0)?human_readable("B", $i->{BytesFree}):"N/A", " ", + human_readable("P", $i->{GuessReliability}), "\n"; + } else { + print bargraph_legend($out_bargraphlen - 3, 5, + $poolbarbytes, + $i->{BytesTotal}, + $i->{BytesFree}, + $i->{VolumesFull}, + $i->{VolumesPartly}, + $i->{VolumesEmpty}, + $i->{GuessReliability} + ) if ($out_subbargraph); + } + } + } + } +} + +sub bargraph_legend { + debug_out(15, "bargraph_legend called with ", join(":", @_)); + my ($len, $pad, $b_all, $b_tot, $b_free, $v_total, $v_app, + $v_empty, $g_r) = @_; + if ((9 == scalar(@_)) && + defined($len) && ($len >= 0) && ($len =~ /^\d+$/) && + defined($pad) && ($pad >= 0) && ($pad =~ /^\d+$/) && + defined($b_all) && ($b_all =~ /^\d+$/) && + defined($b_tot) && ($b_tot =~ /^-?\d+$/) && + defined($b_free) && ($b_free =~ /^-?\d+$/) && + defined($v_total) && ($v_total =~ /^\d+$/) && + defined($v_app) && ($v_app =~ /^\d+$/) && + defined($v_empty) && ($v_empty =~ /^\d+$/) && + ($g_r =~ /^([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?/) + ) { + return "" if ( 0 == $b_all); + $b_tot = 0 if ($b_tot < 0); + $b_free = 0 if ($b_free < 0); + return "" if (0 == ($b_tot + $b_free)); + my ($ll, $lm); + my $l1 = human_readable("B", $b_tot) . " used "; + my $l2 = "Rel: " . human_readable("P", $g_r) . " free " . human_readable("B", $b_free); + $ll = $l1 . " " x ($len - length($l1) - length($l2)) . $l2; + $l1 = $v_total . " F Volumes "; + $l2 = $v_app . " A and " . $v_empty . " E Volumes"; + $lm = $l1 . " " x ($len - length($l1) - length($l2)) . $l2; + return " " x $pad . $ll . "\n" . + " " x $pad . $lm . "\n"; + } else { + debug_out(1, "bargraph_legend called without proper parameters"); + return ""; + } +} + +sub bargraph { + debug_out(15, "bargraph called with ", join(":", @_)); + my ($len, $pad, $p_all, $p_full, $p_empty) = @_; + if ((5 == scalar(@_)) && + defined($len) && ($len >= 0) && ($len =~ /^\d+$/) && + defined($pad) && ($pad >= 0) && ($pad =~ /^\d+$/) && + defined($p_full) && ($p_full =~ /^-?\d+$/) && + defined($p_empty) && ($p_empty =~ /^-?\d+$/) && + defined($p_all) && ($p_all >= $p_full + $p_empty) && + ($p_all =~ /^\d+$/) + ) { + $len = 12 if ($len < 12); + $p_full = 0 if ($p_full < 0); + $p_empty = 0 if ($p_empty < 0); + debug_out(15, "bargraph: len $len all $p_all full $p_full empty $p_empty"); + return " " x $pad . "Nothing to report.\n" if (0 == $p_all); + return "" if (0 == ($p_full + $p_empty)); + my $contperbox = $p_all / $len; + my $boxfull = sprintf("%u", ($p_full / $contperbox) + 0.5); + my $boxempty = sprintf("%u", ($p_empty / $contperbox) + 0.5); + my $boxnon = $len - $boxfull - $boxempty; + debug_out(15, "bargraph: output $boxfull $boxempty $boxnon"); + $contperbox = sprintf("%f", $len / 100.0); + my $leg = "|0%"; + my $ticks = sprintf("%u", ($len-12) / 12.5); + my $be = 0; + my $now = 4; + for my $i (1..$ticks) { + debug_out(15, "Tick loop. Previous pos: $now Previous Tick: ", $i-1); + my $pct = sprintf("%f", 100.0 / ($ticks+1.0) * $i); + $be = sprintf("%u", 0.5 + ($pct * $contperbox)); + debug_out(15, "Tick $i ($pct percent) goes to pos $be. Chars per Percent: $contperbox"); + my $bl = $be - $now; + debug_out(15, "Need $bl blanks to fill up."); + $leg .= " " x $bl . sprintf("|%2u%%", 0.5 + $pct); + $now = $be + 4; + } + debug_out(15, "Fillup... Now at pos $now and $contperbox char/pct."); + $be = $len - $now - 4; + $leg .= " " x $be . "100%|"; + return " " x $pad . "#" x $boxfull . "-" x $boxempty . + " " x $boxnon . "\n" . " " x $pad . "$leg\n"; + } else { + debug_out(1, "bargrahp called without proper parameters."); + return ""; + } +} + +sub human_readable { + debug_out(15, "human_readable called with ", join(":", @_)); + if (2 == scalar(@_)) { + debug_out(15, "2 Params - let's see what we've got."); + my ($t, $v) = @_; + SWITCH: for ($t) { + /B/ && do { + debug_out(15, "Working with Bytes."); + my $d = 'B'; + if ($v > 1024) { + $v /= 1024; + $d = 'kB'; + } + if ($v > 1024) { + $v /= 1024; + $d = 'MB'; + } + if ($v > 1024) { + $v /= 1024; + $d = 'GB'; + } + if ($v > 1024) { + $v /= 1024; + $d = 'TB'; + } + return sprintf("%0.2f%s", $v, $d); + last SWITCH; + }; + /P/ && do { + debug_out(15, "Working with Percent value."); + my $ret = $v; + if ($v =~ /^([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?/) { + if ($v >= 1000.0) { + $ret = " (def.)"; + $v -= 1000.0; + } else { + $ret = ""; + } + $ret = sprintf("%1.0f%%", $v) . $ret; + } + return $ret; + last SWITCH; + }; + return $v; + } + } else { + return join("", @_); + } +} + +sub get_default_bytes { + debug_out(15, "get_default_bytes called with ", join(":", @_)); + if (1 == scalar(@_)) { + debug_out(15, "1 Param - let's see what we've got."); + SWITCH: for (@_) { + /DDS/ && return 2000000000; + /DDS1/ && return 2000000000; + /DDS2/ && return 4000000000; + /DLTIV/ && return 20000000000; + /DC6525/ && return 525000000; + /File/ && return 128*1024*1024; + { + debug_out(0, "$_ is not a known Media Type. Assuming 1 kBytes"); + return 1024; + }; + }; + } else { + debug_out(0, "This is not right..."); + return 999; + } +} + +sub debug_out { + if ($debug >= shift) { + print "@_\n"; + } +} + +sub debug_abort { + debug_out(@_); + do_closedb(); + exit 1; +} + +sub do_closedb { + my $t; + debug_out(40, "Closing database connection..."); + while ($t=shift @temp_tables) { + debug_out(40, "Now dropping table $t"); + $h_db->do("DROP TABLE $t") || debug_out(0, "Can't drop $t."); + } + $h_db->disconnect(); + debug_out(40, "Database disconnected."); +} + +sub do_usage { + print< -M -w -c [-S]" +} + +print_help() { + echo "" + print_usage + echo "" + echo "This plugin checks the space available in the pool against the space required for the next scheduled backups" + echo "Example : $PROGNAME -P default -M LTO -w 20 -c 10 will check the default pool, return OK if (available space) > 1,20*(required space), WARNING if 1,20*(required space) > (available space) > 1,10*(required space), and CRITICAL else." + echo "" + echo "With the -S option, it will check the pool named Scratch and return WARNING instead of CRITICAL if the Scratch pool can save the situation." + echo "Example : $PROGNAME -P default -M LTO -w 20 -c 10 -S will check the default pool, return OK if (available space) > 1,20*(required space), WARNING if 1,20*(required space) > (available space) > 1,10*(required space) or if (available space in default and Scratch) > 1,10*(required space) > (available space in default), and CRITICAL else." + echo "" + echo "The evaluation of the space required is done by adding the biggest backups of the same level than the scheduled jobs" + echo "The available space is evaluated by the number of out of retention tapes and the average VolBytes of these Full tapes" + echo "" + echo "The Information Status are : \"Required, Available, Volume Errors\" and \"Will use Scratch pool\" if necessary." + echo "" + echo "I think this plugin should be used in passive mode, and ran by a RunAfterJob" + exit 3 +} + +NB_ARGS=$# +SCRATCH=0 +while getopts :P:M:w:c:hS OPTION +do + case $OPTION in + P) POOL="$OPTARG" + ;; + M) MEDIA_TYPE="$OPTARG" + ;; + S) SCRATCH=1 + ;; + w) WARNING="$OPTARG" + ;; + c) CRITICAL="$OPTARG" + ;; + h) print_help + exit 3 + ;; + *) print_usage + exit 3 + ;; + esac +done +shift $(($OPTIND - 1)) + +if [ "$NB_ARGS" -ne 8 -a "$NB_ARGS" -ne 9 ]; then + print_revision $PROGNAME 13/06/2006 + print_usage + exit 3 +fi + +LAST_CHECK=`ps -ef | grep check_ba[Cc]ula_pools.sh | awk {'print $5'} | uniq | wc -l` +if [ "$LAST_CHECK" -gt 1 ]; then + echo "The last check was not complete, you should increase the check_period." + exit 3 +fi + +NB_VOLUMES=`$MYSQL << EOF +USE bacula +SELECT COUNT(MediaId) from Media, Pool where Media.PoolId=Pool.PoolId and Pool.Name="$POOL" AND Inchanger = "1"; +EOF` + +NB_VOLUMES=`echo $NB_VOLUMES | cut -f 2 -d ' '` + +echo "st +1 +q" | $BACULA/etc/bconsole | sed -n /Scheduled/,/Running/p | grep Backup | tr -s [:blank:] | tr '[:blank:]' '@' > ${TMP}/Scheduled.txt + +CAPA_REQUIRED=0 +for LINE in `cat ${TMP}/Scheduled.txt` +do + SCHEDULED_JOB=`echo $LINE | awk -F@ '{print $6}'` + LEVEL=`echo $LINE | awk -F@ '{print $1}' | cut -c 1` + +MAX_VOLUME_JOB_FOR_LEVEL=`$MYSQL << EOF +USE bacula +SELECT MAX(JobBytes) from Job, Pool where Level="$LEVEL" AND Job.Name="$SCHEDULED_JOB" AND Job.PoolId=Pool.PoolId AND Pool.Name="$POOL"; +EOF +` +MAX_VOLUME_JOB_FOR_LEVEL=`echo $MAX_VOLUME_JOB_FOR_LEVEL | cut -f 2 -d ' ' ` + +CAPA_REQUIRED=$[CAPA_REQUIRED+MAX_VOLUME_JOB_FOR_LEVEL] +done + +rm ${TMP}/Scheduled.txt + + +if [ $NB_VOLUMES -gt 0 ] +then + + NB_VOLUMES_OUT_OF_RETENTION=`$MYSQL << EOF +USE bacula +SELECT COUNT(MediaId) from Media, Pool where Media.PoolId=Pool.PoolId and Pool.Name="$POOL" AND LastWritten <> "0000-00-00 00:00:00" AND UNIX_TIMESTAMP()-UNIX_TIMESTAMP(LastWritten)>Media.VolRetention AND Inchanger = "1"; +EOF +` + NB_VOLUMES_OUT_OF_RETENTION=`echo $NB_VOLUMES_OUT_OF_RETENTION | cut -f 2 -d ' '` + +NB_VOLUMES_ERROR=`$MYSQL << EOF +USE bacula +SELECT COUNT(MediaId) from Media, Pool where Media.PoolId=Pool.PoolId and Pool.Name="$POOL" AND VolStatus="Error" AND Inchanger = "1"; +EOF +` +NB_VOLUMES_ERROR=`echo $NB_VOLUMES_ERROR | cut -f 2 -d ' '` + +AVERAGE_CAPA_VOLUME=`$MYSQL << EOF +USE bacula +SELECT SUM(VolBytes)/COUNT(MediaId) FROM Media where VolStatus="Full" AND MediaType="$MEDIA_TYPE"; +EOF +` +AVERAGE_CAPA_VOLUME=`echo $AVERAGE_CAPA_VOLUME | cut -f 2 -d ' ' | cut -f 1 -d '.'` + +CAPA_VOLUMES_APPEND=`$MYSQL << EOF +USE bacula +SELECT SUM("$AVERAGE_CAPA_VOLUME"-VolBytes) from Media, Pool where Media.PoolId=Pool.PoolId and Pool.Name="$POOL" AND (VolStatus = "Append" OR VolStatus = "Recycle" OR VolStatus = "Purge") AND Inchanger = "1" AND MediaType="$MEDIA_TYPE"; +EOF +` +CAPA_VOLUMES_APPEND=`echo $CAPA_VOLUMES_APPEND | cut -f 2 -d ' '` + +if [ $SCRATCH -eq 1 ] +then +CAPA_VOLUMES_SCRATCH=`$MYSQL << EOF +USE bacula +SELECT SUM("$AVERAGE_CAPA_VOLUME"-VolBytes) from Media, Pool where Media.PoolId=Pool.PoolId and Pool.Name="Scratch" AND VolStatus = "Append" AND Inchanger = "1" AND MediaType="$MEDIA_TYPE"; +EOF +` +CAPA_VOLUMES_SCRATCH=`echo $CAPA_VOLUMES_SCRATCH | cut -f 2 -d ' '` +else +CAPA_VOLUMES_SCRATCH=0 +fi + +CAPA_WARNING=`echo $[(WARNING+100)*CAPA_REQUIRED]/100 | bc | cut -f 1 -d '.'` +CAPA_CRITICAL=`echo $[(CRITICAL+100)*CAPA_REQUIRED]/100 | bc | cut -f 1 -d '.'` +CAPA_DISP=$[NB_VOLUMES_OUT_OF_RETENTION*AVERAGE_CAPA_VOLUME+CAPA_VOLUMES_APPEND] +CAPA_DISP_INCLUDING_SCRATCH=$[CAPA_DISP+CAPA_VOLUMES_SCRATCH] + +MESSAGE="Required : $[CAPA_REQUIRED/1000000000] Go, available : $[CAPA_DISP/1000000000] Go, Volumes Error : $NB_VOLUMES_ERROR" + +if [ "$CAPA_DISP" -gt $CAPA_WARNING ]; then + echo $MESSAGE + exit 0 +elif [ "$CAPA_DISP" -gt $CAPA_CRITICAL ];then + echo $MESSAGE + exit 1 +elif [ "$CAPA_DISP_INCLUDING_SCRATCH" -gt $CAPA_CRITICAL ];then + MESSAGE="${MESSAGE}. Will use Scratch Pool !" + echo $MESSAGE + exit 1 +else + exit 2 +fi +exit 3 + +else + echo "No volume in pool ${POOL}" + if [ "$CAPA_REQUIRED" -gt 0 ] + then exit 2 + else exit 0 + fi +fi diff --git a/examples/reports/checkhost b/examples/reports/checkhost new file mode 100755 index 00000000..b201798a --- /dev/null +++ b/examples/reports/checkhost @@ -0,0 +1,53 @@ +#!/usr/bin/perl + +use strict; +use Net::Ping; +use Net::Telnet (); +use Getopt::Long; +use IPC::Open2; + +# +# Check if a Bacula client is alive. By Phil Stracchino. +# +# Return values: +# -1 Program error or no host specified +# 0 Success, FD found and responding +# 1 Client alive but FD not listening +# 2 Client not found on network + +my $ret = -1; +my ($host, $p, $ret, $verbose); +GetOptions('verbose' => \$verbose, + 'v' => \$verbose); + +$host = shift || die "No host specified!\n"; + +$p = Net::Ping->new(); +if ($p->ping($host)) +{ + print "Host $host is alive\n" if ($verbose); + my $t = new Net::Telnet (Timeout => 10, + Port => 9102, + Prompt => '/bash\$ $/'); + if ($t->open($host)) + { + print "Bacula-FD listening on port 9102\n" if ($verbose); + $ret = 0; + } + else + { + print "Bacula-FD not found\n" if ($verbose); + $ret = 1; + } + $t->close; +} +else +{ + print "$host is unreachable\n" if ($verbose); + $ret = 2; +} +$p->close(); + +print "Returning value $ret\n" if ($verbose); + +exit ($ret); diff --git a/examples/reports/is_client_alive b/examples/reports/is_client_alive new file mode 100755 index 00000000..3bd25047 --- /dev/null +++ b/examples/reports/is_client_alive @@ -0,0 +1,21 @@ +#!/bin/sh +# +# Test if Bacula Client is alive +# exit 0 if specified client (arg1) is alive +# exit 1 if specified client (arg1) is NOT alive +# exit 2 if console could NOT connect to Director +# +OUTF=/tmp/client-${1}-alive +rm -f ${OUTF} +./console >/dev/null <<__EOD +@output /dev/null +messages +@output ${OUTF} +status client=${1} +messages +quit +__EOD +grep -s "Daemon started" ${OUTF} 2>&1 >/dev/null +rtn=$? +rm -f ${OUTF} +exit ${rtn} diff --git a/examples/reports/next_tape.sh b/examples/reports/next_tape.sh new file mode 100755 index 00000000..d09f44a6 --- /dev/null +++ b/examples/reports/next_tape.sh @@ -0,0 +1,78 @@ +#!/bin/bash +# +# A script which kicks out messages if a new tape is required for the next job. +# It may be used as RunAfterJob script and it works fine for me. +# Maybe someone considers it useful or has some ideas to improve it. +# +# Contributed by Dirk grosse Osterhues +# +# select language: english (en) or german (de) +LANG="en" +# reciepient-address for notification +MAILTO_ADDR="your-email-address" +# bcc-address for notification +BCC_ADDR="email-address" +# directory for temp-files +TEMP_DIR="/tmp/bacula" +# bacula's console.conf +CONSOLE_CONF=/etc/bacula/bconsole.conf +############################################ + +# test if console.conf exists +if [ ! -f $CONSOLE_CONF ]; then + echo "You need to reconfigure varible \$CONSOLE_CONF" + exit 1 +fi +# get todays tape +director_output() { +/usr/sbin/bacula-console -c $CONSOLE_CONF <$TEMP_DIR/NEXT-TAPE-$YESTERDAY +fi +echo $TAPE_TODAY>$TEMP_DIR/NEXT-TAPE-$TODAY + +# definition of language-dependent variables +case $LANG in + de) + MAIL_SUBJECT="[Bacula] Bitte Tape wechslen!" + MAIL_BODY="Nachricht von Bacula-Backup-System auf $HOST:\ + \n\n Tape entfernen:\t\""$TAPE_YESTERDAY"\"\ + \n Tape einlegen: \t\""$TAPE_TODAY"\"" + ;; + en) + MAIL_SUBJECT="[Bacula] Please replace Tape tonight!" + MAIL_BODY="Message from bacula-backup-service on $HOST:\ + \n\n Remove Tape:\t\""$TAPE_YESTERDAY"\"\ + \n Insert Tape:\t\""$TAPE_TODAY"\"" + ;; +esac + +# send notification +if [ $TAPE_TODAY != $TAPE_YESTERDAY ] ; then + echo -e $MAIL_BODY | mail -a "X-Bacula: Tape-Notifier on $HOST" -s "`echo $MAIL_SUBJECT`" -b $BCC_ADDR $MAILTO_ADDR +fi + +# remove older temp-files +find $TEMP_DIR -type f -name NEXT-TAPE-\*| while read I; do + TAPE_FILE=${I##/tmp/bacula/} + if [ $TAPE_FILE ]; then + if [ $TAPE_FILE != NEXT-TAPE-$TODAY ] && [ $TAPE_FILE != NEXT-TAPE-$YESTERDAY ]; then + rm $TEMP_DIR/$TAPE_FILE + fi + fi +done diff --git a/examples/reports/pool-query.txt b/examples/reports/pool-query.txt new file mode 100644 index 00000000..5e233546 --- /dev/null +++ b/examples/reports/pool-query.txt @@ -0,0 +1,108 @@ +From: Arno Lehmann +Organization: IT-Service Lehmann +Subject: [Bacula-users] Pool information +Date: Wed, 15 Dec 2004 23:00:50 +0100 + +Hi all, + +I've been playing around a bit and created a small SQL program which +tries to give some useful information on pool usage in bacula. + +It gives you information like this: ++--------+----+------------+---------+----------+------------+------+ +| Pool | Nr | GB_Total | Nr_Full | Nr_Avail | GB_Avail | V | ++--------+----+------------+---------+----------+------------+------+ +| D-Full | 10 | 130.002 | 5 | 4 | 90.364 | 87% | +| Diff | 5 | 16.217 | 2 | 3 | 12.773 | 52% | +| Full | 29 | 63.994 | 23 | 6 | 14.284 | 25% | +| Incr | 9 | 32.844 | 7 | 2 | 6.838 | 91% | +| QIC | 15 | 3.978 | 1 | 14 | 3.657 | 0% | ++--------+----+------------+---------+----------+------------+------+ +and doesn't break the catalog :-) +It's in no way optimized, but the impact on the database should not be +too big. + +Might be helpful sometimes, for example before a holiday. + +Here, it runs with MySQL 3.23.33. I'm not sure, but the function STD is +probably not ANSI-SQL. According to the MySQL manual, STDDEV is Oracles +version, so probably PostgreSQL has something similar... Implementing +Standard Deviation is otherwise quite inefficient, I'm afraid... + +If someone can improve or enhance the script - go on! + +Simply add this to he end of the query.sql file, usually found in +/etc/bacula under linux. + +Oh, and to make this as clearly as possible: +Anybody may use, modify, distribute or ignore this script without any +limitations. + +Arno + + +# 20 +:Show Pool usage +CREATE TABLE tempal_F(Pool TINYBLOB NOT NULL, + Nr_Full INTEGER NOT NULL,GB_Full DECIMAL(9,3) NOT NULL, + Cap_Avg DECIMAL(15,0),V DECIMAL(3,2)); +CREATE TABLE tempal_E(Pool TINYBLOB NOT NULL, + Nr_Empty INTEGER NOT NULL); +CREATE TABLE tempal_P(Pool TINYBLOB NOT NULL,Nr_Partly INTEGER NOT NULL, + GB_Partly DECIMAL(9,3) NOT NULL); +CREATE TABLE tempal_T(Pool TINYBLOB NOT NULL,Nr INTEGER NOT NULL, + GB_Total DECIMAL(9,3) NOT NULL); +INSERT INTO tempal_F + SELECT Pool.Name,COUNT(*),ROUND(SUM(VolBytes)/1024/1024/1024,3), + AVG(VolBytes),STD(VolBytes)/AVG(VolBytes) FROM Media,Pool + WHERE Media.VolStatus='Full' AND Media.PoolId=Pool.PoolId + GROUP BY Pool.PoolId; +INSERT INTO tempal_P + SELECT Pool.Name,COUNT(*),ROUND(SUM(VolBytes)/1024/1024/1024,3) + FROM Media,Pool + WHERE (Media.VolStatus='Append' OR Media.VolStatus='Busy') + AND Media.PoolId=Pool.PoolId + GROUP BY Pool.PoolId; +INSERT INTO tempal_E + SELECT Pool.Name,COUNT(*) + FROM Media,Pool + WHERE (Media.VolStatus='Recycle' OR Media.VolStatus='Purged') + AND Media.PoolId=Pool.PoolId + GROUP BY Pool.PoolId; +INSERT INTO tempal_T + SELECT Pool.Name AS Pool,COUNT(*), + ROUND(SUM(VolBytes)/1024/1024/1024,3) + FROM Media,Pool + WHERE (Media.VolStatus='Full' OR (Media.Volstatus='Archive') + OR (Media.Volstatus='Append') OR (Media.Volstatus='Read-Only') + OR (Media.Volstatus='Busy') OR (Media.Volstatus='Used') + OR (Media.VolStatus='Disabled') OR (Media.VolStatus='Error')) + AND Media.PoolId=Pool.PoolId + GROUP BY Pool.PoolId; +CREATE TABLE tempal_N(Note TINYBLOB); +INSERT INTO tempal_N + VALUES("Only Pools with full and appendable volumes are shown!"); +INSERT INTO tempal_N + VALUES("V is a measurement for the reliability of the *guess*"); +INSERT INTO tempal_N + VALUES("of average volume capacity."); +SELECT * FROM tempal_N; +DROP TABLE IF EXISTS tempal_N; +SELECT tempal_F.Pool,Nr+Nr_Empty AS Nr,LPAD(GB_Total,10,' ') AS GB_Total, + Nr_Full,Nr_Partly+Nr_Empty AS Nr_Avail, + LPAD(ROUND(GREATEST(0.0007,(Nr_Partly+Nr_Empty)* + (GB_Full/Nr_Full)-GB_Partly),3),10,' ') AS GB_Avail, + CONCAT(LPAD(ROUND( + 100-(100*(V+1/(Nr_Full*Nr_Full*Nr_Full))),0),3,' '),'%') + AS V + FROM tempal_P,tempal_F,tempal_T,tempal_E + WHERE tempal_F.Pool=tempal_T.Pool + AND tempal_F.Pool=tempal_P.Pool + AND tempal_E.Pool=tempal_T.Pool + GROUP BY Pool + ORDER BY Pool; +!DROP TABLE tempal_P,tempal_E,tempal_T,tempal_F; + +-- +IT-Service Lehmann al@its-lehmann.de +Arno Lehmann http://www.its-lehmann.de diff --git a/examples/reports/report.pl b/examples/reports/report.pl new file mode 100755 index 00000000..7859d8f4 --- /dev/null +++ b/examples/reports/report.pl @@ -0,0 +1,116 @@ +#!/usr/bin/perl +# +# A bacula job report generator. +# It require MySQL 4.1.x or later +# +# If you have any comments question feel free to contact me, jb@soe.se +# +# /Jonas Bjrklund +# + +use DBI; + +$db_host = "localhost"; +$database = "bacula"; +$db_username = "bacula"; +$db_password = "bacula"; +$email = "$ARGV[0]"; +$from = "backup\@example.net"; +$when = "$ARGV[1]"; + +if (!@ARGV) { + print "\n report.pl email@hostname.com (TODAY|YESTERDAY|WEEK|MONTH)\n\n"; + exit; +} + + +if ($when eq "MONTH") { + $where = "StartTime > DATE_FORMAT(now() - INTERVAL 1 MONTH, '%Y-%m-%d')"; + $order = "ORDER BY StartTime DESC"; +} elsif ($when eq "WEEK") { + $where = "StartTime > DATE_FORMAT(now() - INTERVAL 7 DAY, '%Y-%m-%d')"; + $order = "ORDER BY StartTime DESC"; +} elsif ($when eq "YESTERDAY") { + $where = "StartTime > DATE_FORMAT(now() - INTERVAL 1 DAY, '%Y-%m-%d') AND StartTime < DATE_FORMAT(now(), '%Y-%m-%d')"; + $order = "ORDER BY JobStatus,Time DESC"; +} else { + $when = "TODAY"; + $where = "StartTime > curdate()"; + $order = "ORDER BY JobStatus,Time DESC"; +} + +$sqlquery = "SELECT JobStatus,Name,Level,JobBytes,JobFiles,DATE_FORMAT(StartTime, '%Y-%m-%d %H:%i') AS Start, TIMEDIFF(EndTime,StartTime) AS Time,PoolId + FROM Job WHERE + $where + $order"; + +$dbh = DBI->connect("DBI:mysql:database=$database:host=$db_host", $db_username,$db_password) or die; + +my $sth = $dbh->prepare("$sqlquery"); $sth->execute() or die "Can't execute SQL statement : $dbh->errstr"; +while(($jobstatus,$name,$level,$jobbytes,$jobfiles,$start,$time,$poolid) = $sth->fetchrow_array()) { + my $sth2 = $dbh->prepare("SELECT Name FROM Pool WHERE PoolId = $poolid"); $sth2->execute() or die "Can't execute SQL statement : $dbh->errstr"; + ($poolname) = $sth2->fetchrow_array(); + ($hours,$minutes,$seconds) = split(":", $time); + $seconds = sprintf("%.1f", $seconds + ($minutes * 60) + ($hours * 60 * 60)); + $time = sprintf("%.1f", ($seconds + ($minutes * 60) + ($hours * 60 * 60)) / 60); + $bytesANDfiles = sprintf "%7.0f/%d", $jobbytes/1024/1024,$jobfiles; + $kbs = 0; + if ($jobbytes != 0) { + $kbs = ($jobbytes/$seconds)/1024; + } + + $text .= sprintf "%s %18.18s %1s %14s %16s %5sm %4.0f %9.9s\n", $jobstatus,$name,$level,$bytesANDfiles,$start,$time,$kbs,$poolname; + $totalfiles = $totalfiles + $jobfiles; + $totalbytes = $totalbytes + $jobbytes; +} +$totalbytes = sprintf("%.1f",$totalbytes / 1024 / 1024 / 1024); + +my $sth = $dbh->prepare("SELECT count(*) FROM Job WHERE $where"); $sth->execute() or die "Can't execute SQL statement : $dbh->errstr"; +($count_total) = $sth->fetchrow_array(); +my $sth = $dbh->prepare("SELECT count(*) FROM Job WHERE $where AND JobStatus = 'T'"); $sth->execute() or die "Can't execute SQL statement : $dbh->errstr"; +($count_ok) = $sth->fetchrow_array(); +$count_fail = $count_total - $count_ok; +$counts = sprintf("%.1f", 100- (($count_fail/$count_total)*100)); + + +open(MAIL,"|/usr/lib/sendmail -f$from -t"); +print MAIL "From: $from\n"; +print MAIL "To: $email\n"; +print MAIL "Subject: Backup ($when) $counts% OK - Total $count_total jobs, $count_fail failed\n"; +print MAIL "\n"; +print MAIL "Total $count_total jobs - $count_ok jobs are OK.\n"; +print MAIL "Total $totalbytes GB / $totalfiles files\n"; +print MAIL "\n"; + +print MAIL "Status JobName Lvl MBytes/Files Start Time KB/s Pool\n"; +print MAIL "============================================================================\n"; +print MAIL $text; + +print MAIL "============================================================================\n"; +print MAIL <= '%2' + ORDER BY Job.StartTime; +# 5 +:List all backups for a Client +*Enter Client Name: +SELECT DISTINCT Job.JobId as JobId,Client.Name as Client, + FileSet.FileSet AS FileSet,Level,StartTime, + JobFiles,JobBytes,VolumeName + FROM Client,Job,JobMedia,Media,FileSet + WHERE Client.Name='%1' + AND Client.ClientId=Job.ClientId AND Job.Type='B' + AND Job.JobStatus IN ('T','W') AND Job.FileSetId=FileSet.FileSetId + AND JobMedia.JobId=Job.JobId AND JobMedia.MediaId=Media.MediaId + ORDER BY Job.StartTime; +# 6 +:List Volume Attributes for a selected Volume +*Enter Volume name: +SELECT Slot,MaxVolBytes,VolCapacityBytes,VolStatus,Recycle,VolRetention, + VolUseDuration,MaxVolJobs,MaxVolFiles + FROM Media + WHERE VolumeName='%1'; +# 7 +:List Volumes used by selected JobId +*Enter JobId: +SELECT DISTINCT Job.JobId,VolumeName + FROM Job,JobMedia,Media + WHERE Job.JobId=%1 + AND Job.JobId=JobMedia.JobId + AND JobMedia.MediaId=Media.MediaId; +# 8 +:List Volumes to Restore All Files +*Enter Client Name: +!DROP TABLE temp; +!DROP TABLE temp2; +CREATE TABLE temp (JobId BIGINT NOT NULL, + JobTDate BIGINT, + ClientId BIGINT, + Level CHAR, + StartTime TEXT, + VolumeName TEXT, + StartFile BIGINT, + VolSessionId BIGINT, + VolSessionTime BIGINT ); +CREATE TABLE temp2 (JobId BIGINT NOT NULL, + StartTime TEXT, + VolumeName TEXT, + Level CHAR, + StartFile BIGINT, + VolSessionId BIGINT, + VolSessionTime BIGINT); +# Select last Full save +INSERT INTO temp SELECT Job.JobId,JobTDate,Job.ClientId,Job.Level, + StartTime,VolumeName,JobMedia.StartFile,VolSessionId,VolSessionTime + FROM Client,Job,JobMedia,Media WHERE Client.Name='%1' + AND Client.ClientId=Job.ClientId + AND Level='F' AND JobStatus IN ('T', 'W') + AND JobMedia.JobId=Job.JobId + AND JobMedia.MediaId=Media.MediaId + ORDER BY Job.JobTDate DESC LIMIT 1; +# Copy into temp 2 getting all volumes of Full save +INSERT INTO temp2 SELECT Job.JobId,Job.StartTime,Media.VolumeName,Job.Level, + JobMedia.StartFile,Job.VolSessionId,Job.VolSessionTime + FROM temp,Job,JobMedia,Media WHERE temp.JobId=Job.JobId + AND Job.Level='F' AND Job.JobStatus IN ('T', 'W') + AND JobMedia.JobId=Job.JobId + AND JobMedia.MediaId=Media.MediaId; +# Now add subsequent incrementals +INSERT INTO temp2 SELECT DISTINCT Job.JobId,Job.StartTime,Media.VolumeName, + Job.Level,JobMedia.StartFile,Job.VolSessionId,Job.VolSessionTime + FROM Job,temp,JobMedia,Media + WHERE Job.JobTDate>temp.JobTDate + AND Job.ClientId=temp.ClientId + AND Job.Level IN ('I','D') AND JobStatus IN ('T', 'W') + AND JobMedia.JobId=Job.JobId + AND JobMedia.MediaId=Media.MediaId; +# list results +SELECT DISTINCT VolumeName from temp2; +!DROP TABLE temp; +!DROP TABLE temp2; +# 9 +:List Pool Attributes for a selected Pool +*Enter Pool name: +SELECT Recycle,VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,MaxVolBytes + FROM Pool + WHERE Name='%1'; +# 10 +:List total files/bytes by Job +SELECT count(*) AS Jobs,sum(JobFiles) AS Files,sum(JobBytes) AS Bytes,Name AS Job + FROM Job GROUP by Name; +# 11 +:List total files/bytes by Volume +SELECT count(*) AS Jobs,sum(JobFiles) AS Files,sum(JobBytes) AS Bytes,VolumeName + FROM Job,JobMedia,Media + WHERE JobMedia.JobId=Job.JobId + AND JobMedia.MediaId=Media.MediaId + GROUP by VolumeName; +# 12 +:List Files for a selected JobId +*Enter JobId: +SELECT Path.Path,Filename.Name FROM File,Filename,Path WHERE File.JobId=%1 + AND Filename.FilenameId=File.FilenameId + AND Path.PathId=File.PathId ORDER BY + Path.Path,Filename.Name; +# 13 +:List Jobs stored on a selected MediaId +*Enter MediaId: +SELECT DISTINCT Job.JobId,Job.Name,Job.StartTime,Job.Type, + Job.Level,Job.JobFiles,Job.JobBytes,Job.JobStatus + FROM JobMedia,Job + WHERE JobMedia.JobId=Job.JobId + AND JobMedia.MediaId=%1 + ORDER by Job.StartTime; +# 14 +:List Jobs stored for a given Volume name +*Enter Volume name: +SELECT DISTINCT Job.JobId as JobId,Job.Name as Name,Job.StartTime as StartTime, + Job.Type as Type,Job.Level as Level,Job.JobFiles as Files, + Job.JobBytes as Bytes,Job.JobStatus as Status + FROM Media,JobMedia,Job + WHERE Media.VolumeName='%1' + AND Media.MediaId=JobMedia.MediaId + AND JobMedia.JobId=Job.JobId + ORDER by Job.StartTime; +# 15 +:List Volumes Bacula thinks are in changer +SELECT MediaId,VolumeName,VolBytes/(1024*1024*1024) AS GB,Storage.Name + AS Storage,Slot,Pool.Name AS Pool,MediaType,VolStatus + FROM Media,Pool,Storage + WHERE Media.PoolId=Pool.PoolId + AND Slot>0 AND InChanger=1 + AND Media.StorageId=Storage.StorageId + ORDER BY MediaType ASC, Slot ASC; +# 16 +:List Volumes likely to need replacement from age or errors +SELECT VolumeName AS Volume,VolMounts AS Mounts,VolErrors AS Errors, + VolWrites AS Writes,VolStatus AS Status + FROM Media + WHERE (VolErrors>0) OR (VolStatus='Error') OR (VolMounts>50) OR + (VolStatus='Disabled') OR (VolWrites>3999999) + ORDER BY VolStatus ASC, VolErrors,VolMounts,VolumeName DESC; +# 17 +:List Volumes Bacula thinks are eligible for the changer +SELECT VolumeName,VolStatus,Storage.Name AS Location, + VolBytes/(1024*1024*1024) AS GB,MediaId,MediaType,Pool.Name AS Pool + FROM Media,Pool,Storage + WHERE Media.PoolId=Pool.PoolId + AND Media.StorageId=Storage.StorageId + AND InChanger=0 + AND ((VolStatus='Purged') OR (VolStatus='Append') OR (VolStatus='Recycle')) + ORDER BY VolMounts ASC, Pool.Name ASC, VolumeName ASC +# 18 +:List Volumes by Volume: +SELECT VolumeName, Job.JobId as JobID, Job.Name as JobName, Job.StartTime as +Start, sum(JobFiles) AS Files,sum(JobBytes) AS Bytes + FROM Job,JobMedia,Media + WHERE JobMedia.JobId=Job.JobId + AND JobMedia.MediaId=Media.MediaId + GROUP by VolumeName, Job.JobID, Job.Name, Job.StartTime + ORDER by VolumeName; +# 19 +:List Volumes by Jobs: +SELECT Job.Name as JobName, Job.JobId as JobID, VolumeName, Job.StartTime as +Start, sum(JobFiles) AS Files,sum(JobBytes) AS Bytes + FROM Job,JobMedia,Media + WHERE JobMedia.JobId=Job.JobId + AND JobMedia.MediaId=Media.MediaId + GROUP by VolumeName, Job.JobID, Job.Name, Job.StartTime + ORDER by JobName, Start; +# 20 +:List Volumes for a jobname: +*Enter Job name: +SELECT Job.Name as JobName, Job.JobId as JobID, VolumeName, Job.StartTime as +Start, sum(JobFiles) AS Files,sum(JobBytes) AS Bytes + FROM Job,JobMedia,Media + WHERE Job.Name='%1' + AND JobMedia.JobId=Job.JobId + AND JobMedia.MediaId=Media.MediaId + GROUP by VolumeName, Job.JobID, Job.Name, Job.StartTime + ORDER by JobName, Start; + diff --git a/examples/ssh-tunnel-README.txt b/examples/ssh-tunnel-README.txt new file mode 100644 index 00000000..6da62508 --- /dev/null +++ b/examples/ssh-tunnel-README.txt @@ -0,0 +1,112 @@ +From: Joshua Kugler +To: bacula-users@lists.sourceforge.net +Subject: [Bacula-users] SSH tunneling mini-howto +Date: Tue, 13 Dec 2005 16:59:47 -0900 + +Here is an outline of the steps I took to get ssh tunneling to work for me. + +NOTES: +I modified to ssh-tunnel.sh file from CVS because: +1) I didn't need director->client communications encrypted. My main reason +for using SSH tunneling was so the clients in the DMZ could get back through +the firewall to connect to the storage server. +2) There was a bug in the method it used to get the PID of the tunnel. +It used 'cut -d" " -f1' The problem was that ps sometimes has a leading space +in front of the PID if PID < 10,000, so cut would return a blank PID. +Instead I used awk '{ print $1 }' and that worked even with leading spaces. +3) I also took out ssh's 'v' option for production work +4) I added '> /dev/null 2> /dev/null' because for some reason ssh wasn't fully +disconnecting from the terminal, thus the ssh-tunnel script would actually +hang the job +5) I changed it to exit with the status of the SSH command, so the job would +fail right away if the tunnel didn't go up. +6) The $CLIENT is now specified on the command line so it can be specified in +the Run Before Job directive. As a result, you must specify the client when +you start *and* stop the tunnel. + +OK, on to the how to: + +1. I placed the attached script in /usr/local/bacula/scripts + +2. I modified bacula-dir.conf to have a second Storage directive entry that +referenced the same storage resource in bacula-sd.conf. (Based on a recent +e-mail, might this be dangerous? Testing will tell.) + +The modified Storage entry looked like this: + +Storage { + Name = herodotus-sd-ops + Address = localhost + SDPort = 9103 + Password = "Apassword" + Device = AdicFastStor22 + Media Type = DLT8000 + Autochanger = yes + Maximum Concurrent Jobs = 30 +} + +The Address is set to localhost, because when the tunnel is up, the client +will connect to localhost:9103 in order to connect to the Storage director. + +3. In the client configuration, each client that uses this configuration has +these lines added: + +Run Before Job =3D "/usr/local/bacula/scripts/ssh-tunnel start FQDN" +Run After Job =3D "/usr/local/bacula/scripts/ssh-tunnel stop FQDN" + +=46QDN =3D fully qualifed domain name (i.e. full host name) + +And their storage is set to "herodotus-sd-ops" (in our case, OPS is the name +of our DMZ). + +4. Now, ssh keys must be created in order for all this to go on unattended. + +At the prompt, type: + +ssh -b 2048 -t dsa + +(if you want less bit strength, you can use a number less than 2048) + +When asked where to save the key, specify a location, or accept the default +Just remember the location, because you will have to put it in the script +(replace "/usr/local/bacula/ssh/id_dsa" with your file's location). Make +sure the user as which bacula runs can read the file. + +When asked for a password, leave that blank also as this will be running +unattended. + +After it generates the key, it will save a file called id_dsa, and in that +same directory, there will be a file called id_dsa.pub, which is your public +SSH key. + +On your backup client, create a user ('bacula' is probably a good choice). +In that user's home directory, create a directory named '.ssh' (note the +leading dot). In that directory, copy the id_dsa.pub file you create +earlier. Once that file is in that user's directory copy it to a file name +"authorized_keys" in that same directory. If you're not doing this as the +user, make sure the directory and files a owned by that user. And for good +measure make sure only the user can read them. + +5. Now, the test. Your keys are generated. They are in place on the client. +You've pointed your script to your private key's file (id_dsa). Now, at the +prompt on your server type: + +location_of_script/ssh-tunnel start client.host.name + +then type + +echo $? + +That should be 0, which will mean everything went well. + +If you need to debug, remove the redirection on the ssh command and add 'v' to +the switches for verbose output. + +If the test went well, reload your modified config, and try running a job. If +all goes well, the job report will look like it always does, save notices at +the top and bottom letting you know that the tunnel went up and down. + +I've also gotten this to work on a Windows box using CopSSH (and OpenSSH +server for Windows), so this isn't a Unix-only solution. + +The script is in /examples/ssh-tunnel.sh diff --git a/examples/ssh-tunnel.sh b/examples/ssh-tunnel.sh new file mode 100755 index 00000000..2e084448 --- /dev/null +++ b/examples/ssh-tunnel.sh @@ -0,0 +1,41 @@ +#!/bin/sh +# script for creating / stopping a ssh-tunnel to a backupclient +# Stephan Holl +# Modified by Joshua Kugler +# +# + +# variables +USER=bacula +CLIENT=$2 +LOCAL=your.backup.server.host.name +SSH=/usr/bin/ssh + +case "$1" in + start) + # create ssh-tunnel + echo "Starting SSH-tunnel to $CLIENT..." + $SSH -fnCN2 -o PreferredAuthentications=publickey -i /usr/local/bacula/ssh/id_dsa -l $USER -R 9101:$LOCAL:9101 -R 9103:$LOCAL:9103 $CLIENT > /dev/null 2> /dev/null + exit $? + ;; + + stop) + # remove tunnel + echo "Stopping SSH-tunnel to $CLIENT..." + # find PID killem + PID=`ps ax | grep "ssh -fnCN2 -o PreferredAuthentications=publickey -i /usr/local/bacula/ssh/id_dsa" | grep "$CLIENT" | awk '{ print $1 }'` + kill $PID + exit $? + ;; + *) + # usage: + echo " " + echo " Start SSH-tunnel to client-host" + echo " to bacula-director and storage-daemon" + echo " " + echo " USAGE:" + echo " ssh-tunnel.sh {start|stop} client.fqdn" + echo "" + exit 1 + ;; +esac diff --git a/examples/upgrade-win32-client.txt b/examples/upgrade-win32-client.txt new file mode 100644 index 00000000..ef110a6c --- /dev/null +++ b/examples/upgrade-win32-client.txt @@ -0,0 +1,75 @@ +From: "Michel Meyers" +To: "bacula-users" +Subject: [Bacula-users] Script for pushing new clients to Windows boxes +Date: Mon, 2 Feb 2004 16:10:48 +0100 + +Hello, + +Some of you may remember my document on how to remotely push a Win32 bacula +client onto a WinNT/2k/XP box. Well, I've written a script to do it for me +and thought I'd share it with you: +- ---------------------------------------------------------------- +#!/bin/bash +# +# Remote Win32 client upgrade script +# written by Michel Meyers (last update 2006-09-25 11:34) +# +# WARNING: Make sure that no bacula-fd.conf exists in the source directory! +# You will destroy/overwrite all your client's configs if you don't +# be careful with this. +# +# The upgrade function does the following: +# - Shutdown Bacula service on remote machine +# - Wait 30 seconds (to allow proper shutdown) +# - Mount C: drive of remote box +# - Copy new client to remote machine +# - Unmount C; +# - Startup the new Bacula service +# +# To upgrade a machine append the following at the bottom of this file: +# +# SERVERNAME= +# USERNAME= +# PASSWORD= +# upgrade + +upgrade() { +net rpc -S $SERVERNAME -U $USERNAME%"$PASSWORD" service stop bacula +sleep 30 +smbmount //$SERVERNAME/c$ /mnt -o username=$USERNAME,password="$PASSWORD" +cp /home/michel/winbacula/bin/* /mnt/bacula/bin +umount /mnt +net rpc -S $SERVERNAME -U $USERNAME%"$PASSWORD" service start bacula +} + +SERVERNAME=xerxes +USERNAME=administrator +PASSWORD=secret +upgrade + +SERVERNAME=shodan +USERNAME=teh_one +PASSWORD="" +upgrade +- ---------------------------------------------------------------- + +It should be pretty self-explanatory. I'm not good at shell programming and +I don't know whether there's any notion of arrays or 'for' loops that could +make it cleaner so I simply wrote a function which references some variables +and then call that repeatedly (once per machine). You can of course change +the values according to your system and liking (if 30 secs seem to much for +you, just reduce the value after sleep, make sure to check on the paths and +mountpoint /mnt may not be usable on your system, ...) + +Note: The requirements are the same as described in my other document +(Samba-TNG clients among others, otherwise you'll be missing rpcclient). + +Update 2006-09-25: Samba-TNG is no longer required, the 'net' command from +Samba 3 works for starting and stopping services. Paths may need to be +updated with quotation marks as the new Bacula Win32 Installer no longer +installs into C:\bacula but into 'C:\Program Files\bacula' (on English +Windows versions). + + +Enjoy! + diff --git a/examples/vm/bacula.data b/examples/vm/bacula.data new file mode 100644 index 00000000..a316f7ee --- /dev/null +++ b/examples/vm/bacula.data @@ -0,0 +1,2 @@ +Bacula Data Tape + diff --git a/examples/vm/blabela.exec b/examples/vm/blabela.exec new file mode 100644 index 00000000..5ffa6d47 --- /dev/null +++ b/examples/vm/blabela.exec @@ -0,0 +1,83 @@ +/* REXX ****************************************************/ +/* BLABELA EXEC -- Example VM procedure for labeling tapes */ +/* to be used with Bacula in ANSI label */ +/* mode. */ +/* */ +/* Author: David Boyes */ +/* */ +/* Prereq: Tape drive attached at virtual addr */ +/* 181 (TAP1). */ +/* */ +/* Blank tape inserted in drive. */ +/* */ +/* External file BACULA DATA (used as a */ +/* dummy input file for MOVEFILE, since */ +/* MOVEFILE insists on having a real file */ +/* for input if we want actual output */ +/* by the CMS OS sim routines). */ +/* */ +/* Usage: BLABELA volumeid ownerid */ +/* */ +/* where: */ +/* */ +/* volumeid = ANSI volume id to be */ +/* written in VOL1 label and made */ +/* visible to the library automation */ +/* and other OSes. */ +/* */ +/* ownerid = local userid to "own" the */ +/* tape from the TMS viewpoint. Used */ +/* to validate mount requests and for */ +/* TMS housekeeping purposes. */ +/* */ +/* Maintenance Log: */ +/* */ +/* 16 Feb 2005 -- DB: Created procedure and released to */ +/* Bacula development list. */ +/* */ +/* */ +/* */ +/* */ +/* */ +/* */ +/***********************************************************/ + +/***********************************************************/ +/* Parse command line arguments */ +/***********************************************************/ + +arg volser owner + +/***********************************************************/ +/* Rewind tape and write ANSI VOL1 label and logical EOT */ +/***********************************************************/ + +'TAPE REW (TAP1' +'TAPE WVOL1' volser owner '( TAP1 AL' +'TAPE WTM 2' + +/***********************************************************/ +/* Rewind tape and write Bacula ANSI label signature file */ +/* (HDR1 file containing BACULA.DATA FID as 1st file on */ +/* tape). Note that for some reason the LABELDEF command */ +/* requires FIDs longer than 8 chars to be passed via the */ +/* program stack, even if it would not cause the resulting */ +/* command to exceed the 255 char maximum. This is (IMHO) */ +/* an APARable bug, but c'est la vie. */ +/***********************************************************/ + +'TAPE REW ( TAP1' +'SET CMSTYPE HT' /* supress output to hide prompt for FID */ +'FILEDEF INMOVE DISK BACULA DATA A' +'FILEDEF OUTMOVE TAP1 AL ( RECFM F LRECL 80' +queue "BACULA.DATA" +'LABELDEF OUTMOVE FID ? VOLID' volser 'VOLSEQ 1 FSEQ 1' +'MOVEFILE' +'SET CMSTYPE RT' /* resume normal console output */ + +/***********************************************************/ +/* Print nice exit message and exit */ +/***********************************************************/ + +say "Labeled ANSI" volser "for use with Bacula." +exit diff --git a/examples/vm/blabeli.exec b/examples/vm/blabeli.exec new file mode 100644 index 00000000..16d678e7 --- /dev/null +++ b/examples/vm/blabeli.exec @@ -0,0 +1,83 @@ +/* REXX ****************************************************/ +/* BLABELI EXEC -- Example VM procedure for labeling tapes */ +/* to be used with Bacula in IBM SL label */ +/* mode. */ +/* */ +/* Author: David Boyes */ +/* */ +/* Prereq: Tape drive attached at virtual addr */ +/* 181 (TAP1). */ +/* */ +/* Blank tape inserted in drive. */ +/* */ +/* External file BACULA DATA (used as a */ +/* dummy input file for MOVEFILE, since */ +/* MOVEFILE insists on having a real file */ +/* for input if we want actual output */ +/* by the CMS OS sim routines). */ +/* */ +/* Usage: BLABELI volumeid ownerid */ +/* */ +/* where: */ +/* */ +/* volumeid = IBM SL volume id to be */ +/* written in VOL1 label and made */ +/* visible to the library automation */ +/* and other OSes. */ +/* */ +/* ownerid = local userid to "own" the */ +/* tape from the TMS viewpoint. Used */ +/* to validate mount requests and for */ +/* TMS housekeeping purposes. */ +/* */ +/* Maintenance Log: */ +/* */ +/* 16 Feb 2005 -- DB: Created procedure and released to */ +/* Bacula development list. */ +/* */ +/* */ +/* */ +/* */ +/* */ +/* */ +/***********************************************************/ + +/***********************************************************/ +/* Parse command line arguments */ +/***********************************************************/ + +arg volser owner + +/***********************************************************/ +/* Rewind tape and write IBM VOL1 label and logical EOT */ +/***********************************************************/ + +'TAPE REW (TAP1' +'TAPE WVOL1' volser owner '( TAP1 SL' +'TAPE WTM 2' + +/***********************************************************/ +/* Rewind tape and write Bacula IBM label signature file */ +/* (HDR1 file containing BACULA.DATA FID as 1st file on */ +/* tape). Note that for some reason the LABELDEF command */ +/* requires FIDs longer than 8 chars to be passed via the */ +/* program stack, even if it would not cause the resulting */ +/* command to exceed the 255 char maximum. This is (IMHO) */ +/* an APARable bug, but c'est la vie. */ +/***********************************************************/ + +'TAPE REW ( TAP1' +'SET CMSTYPE HT' /* supress output to hide prompt for FID */ +'FILEDEF INMOVE DISK BACULA DATA A' +'FILEDEF OUTMOVE TAP1 SL ( RECFM F LRECL 80' +queue "BACULA.DATA" +'LABELDEF OUTMOVE FID ? VOLID' volser 'VOLSEQ 1 FSEQ 1' +'MOVEFILE' +'SET CMSTYPE RT' /* resume normal console output */ + +/***********************************************************/ +/* Print nice exit message and exit */ +/***********************************************************/ + +say "Labeled IBM SL" volser "for use with Bacula." +exit diff --git a/examples/vm/tape-label-tools.txt b/examples/vm/tape-label-tools.txt new file mode 100644 index 00000000..5ef73407 --- /dev/null +++ b/examples/vm/tape-label-tools.txt @@ -0,0 +1,27 @@ +From: David Boyes +To: "'kern@sibbald.com'" , +Cc: "'bacula-devel'" +Subject: RE: [Bacula-devel] ANSI label tape creation tools for VM +Date: Thu, 17 Feb 2005 11:03:26 -0500 + +I don't seem to have a way to update the CVS (nor really a lot of +experience with *update* access to the beastie), so appended below are +some example scripts to use on a z/VM system to create labeled tapes +suitable for use with the ANSI label support in Bacula. I suggest that +we put these in the "examples" section of the tree as contributed tools +-- I'll leave it to your discretion as to where/how to tag these. + +I'm finishing up the documentation and packaging for my z/VM tape mount +helper daemon and mtx-changer variant, so you may want to think about +where those should go as well. + +File BLABELA EXEC is the script for creating ANSI labeled tapes with the +proper signature. + +File BLABELI EXEC is a variation of the above script for creating IBM SL +label tapes with the proper signature. + +File BACULA.DATA is a dummy file required by the annoying fact that the +OS simulation routines in CMS require an actual file as input to the OS +sim file creation utility (grr). + diff --git a/examples/vm/vmbacula.tgz b/examples/vm/vmbacula.tgz new file mode 100644 index 00000000..0309fff5 Binary files /dev/null and b/examples/vm/vmbacula.tgz differ diff --git a/examples/vm/vmbacula.txt b/examples/vm/vmbacula.txt new file mode 100644 index 00000000..f706054c --- /dev/null +++ b/examples/vm/vmbacula.txt @@ -0,0 +1,28 @@ +Tape Mount Client/Server for z/VM + +This package provides a simple Linux client and a VM disconnected +virtual machine allowing Linux applications to mount and manage +tapes hosted by a z/VM systems. + +The tools were written to support running Bacula, a open-source +file level backup tool, as a Linux appliance running in a z/VM +virtual machine, however they are general purpose enough to +employ with other applications with some minor thought. The +application consists of a Perl script to run within the Linux +guest, and a REXX/CMS Pipelines-based server to interact with a +CMS-based tape management system. + +This package contains only the support for basic VM tape +operations, and does not include support for the popular +commercial TMS systems such as CA-VM:Tape or others. A +commercially supported version is available for a fee from Sine +Nomine and includes detailed documentation, 24x7 support, and +additional features. Please contact Sine Nomine at info (at) +sinenomine.net for information and pricing for the full +commercial version of the software. + + +This software is distributed according to the Artistic License. +Please send bugs or suggestions to deb390 (at) sinenomine.net. + +Web site: http://sinenomine.net/vm/tapemount diff --git a/manpages/Makefile.in b/manpages/Makefile.in new file mode 100644 index 00000000..dab1f180 --- /dev/null +++ b/manpages/Makefile.in @@ -0,0 +1,50 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +@MCOMMON@ + +.PHONY: dummy + +MAN8 = bacula.8 bacula-dir.8 bacula-fd.8 bacula-sd.8 \ + bconsole.8 bcopy.8 bextract.8 bls.8 bscan.8 btape.8 \ + btraceback.8 dbcheck.8 bwild.8 bregex.8 + +MAN1 = bsmtp.1 bat.1 + +all: + +nothing: + +depend: + +install: + $(MKDIR) $(DESTDIR)/$(mandir)/man8 + for I in ${MAN8}; \ + do ($(RMF) $$I.gz; gzip -c $$I >$$I.gz; \ + $(INSTALL_DATA) $$I.gz $(DESTDIR)$(mandir)/man8/$$I.gz; \ + rm -f $$I.gz); \ + done + $(MKDIR) $(DESTDIR)/$(mandir)/man1 + for I in ${MAN1}; \ + do ($(RMF) $$I.gz; gzip -c $$I >$$I.gz; \ + $(INSTALL_DATA) $$I.gz $(DESTDIR)$(mandir)/man1/$$I.gz; \ + rm -f $$I.gz); \ + done + +uninstall: + for I in ${MAN8}; \ + do (rm -f $(DESTDIR)$(mandir)/man8/$$I.gz); \ + done + for I in ${MAN1}; \ + do (rm -f $(DESTDIR)$(mandir)/man1/$$I.gz); \ + done + +clean: + @$(RMF) *~ 1 2 3 *.bak + @find . -name .#* -exec $(RMF) {} \; + +depend: + +distclean: clean + $(RMF) Makefile diff --git a/manpages/bacula-dir.8 b/manpages/bacula-dir.8 new file mode 100644 index 00000000..662bc4e8 --- /dev/null +++ b/manpages/bacula-dir.8 @@ -0,0 +1,104 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BACULA\-DIR 8 "28 October 2017" "Kern Sibbald" "Network backup, recovery&verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME +.B bacula\-dir +\- Bacula Director +.SH SYNOPSIS +.B bacula\-dir +.RI [ options ] +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bacula\-dir +command. +.br +Bacula's Director Daemon acts as the controller of the +network backup system: it is responsible for scheduling and +coordinating backups across the network. +.SH OPTIONS +.TP +.BI \-c\ file +Specify the configuration file to use. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output. +.TP +.BI \-T +Send debug messages to the trace file. +.TP +.BI \-f +Run in foreground (for debugging). +.TP +.BI \-g\ group +Set the group/gid to run as. +.TP +.BI \-m +Print kaboom output (for debugging). +.TP +.BI \-P +Do not create a PID file. +.TP +.BI \-r\ job +Run . +.TP +.BI \-s +No signals (for debugging). +.TP +.B \-t +Test the configuration file and report errors. +.TP +.BI \-u\ user +Set the username/uid to run as. +.TP +.BI \-v +Set verbose mode. +.TP +.BI \-? +Show version and usage of program. +.SH TCP-WRAPPERS CONFIGURATION +Tcpwrappers looks for the service name of the bacula daemons in +.I hosts.allow +, and the service names of these daemons is configured to be different from the +binary. +The service names are configured to be +.B %hostname%-%component% +rather than +.B bacula-dir +(As defined in the bacula-dir.conf.in file) + + +So the hosts.allow entry has to match +.B %hostname%-%component% +(servername-dir for example) instead of bacula-%component% + +.B WARNING: +This means that if the hosts.allow file has the entry: + +.B bacula-dir: ALL + +you will not be able to run bconsole to connect to the local director! + +The entry would have to read: + +.B server-dir: ALL + +and this will allow the console to connect to the director. +(The process running is bacula-dir.) +.SH SEE ALSO +.BR bacula-fd (8), +.BR bacula-sd (8). + +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bacula-fd.8 b/manpages/bacula-fd.8 new file mode 100644 index 00000000..c5f21946 --- /dev/null +++ b/manpages/bacula-fd.8 @@ -0,0 +1,75 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BACULA\-SD 8 "28 October 2017" "Kern Sibbald" "Network backup, recovery & verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME +.B bacula\-fd +\- Bacula's File Daemon +.SH SYNOPSIS +.B bacula\-fd +.RI [ options ] +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bacula +command. +.br +Bacula's File Daemon acts as the interface between the Bacula +network backup system and the filesystems to be backed up: it is +responsible for reading/writing/verifying the files to be +backup'd/verified/restored. Network transfer can optionally be +compressed. +.SH OPTIONS +.TP +.BI \-c\ file +Specify the configuration file to use. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output. +.TP +.BI \-T +Send debug messages to the trace file. +.TP +.BI \-f +Run in foreground (for debugging). +.TP +.BI \-g\ group +Set the group/gid to run as. +.TP +.BI \-k +Keep readall permission when dropping privileges. +.TP +.BI \-m +Print kaboom output (for debugging). +.TP +.BI \-P +Do not create a PID file. +.TP +.BI \-s +No signals (for debugging). +.TP +.B \-t +Test the configuration file and report errors. +.TP +.BI \-u\ user +Set the username/uid to run as. +.TP +.BI \-v +Set verbose mode. +.TP +.B \-? +Show version and usage of program. +.SH SEE ALSO +.BR bacula\-dir (8), +.BR bacula\-sd (8). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon . +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bacula-sd.8 b/manpages/bacula-sd.8 new file mode 100644 index 00000000..4a54c26f --- /dev/null +++ b/manpages/bacula-sd.8 @@ -0,0 +1,75 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BACULA\-SD 8 "28 October 2017" "Kern Sibbald" "Network backup, recovery & verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME +.B bacula\-sd +\- Bacula's Storage Daemon +.SH SYNOPSIS +.B bacula\-sd +.RI [ options ] +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bacula\-sd +command. +.br +Bacula's Storage Daemon acts as the interface between the Bacula +network backup system and a tape drive/autochanger or filesystem where +the backups will be stored. +.SH OPTIONS +.TP +.BI \-c\ file +Specify the configuration file to use. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output. +.TP +.BI \-T +Send debug messages to the trace file. +.TP +.BI \-f +Run in foreground (for debugging). +.TP +.BI \-g\ group +Set the group/gid to run as. +.TP +.BI \-P +Do not create a PID file. +.TP +.BI \-p +Proceed in spite of I/O errors +.TP +.BI \-m +Print kaboom output (for debugging) +.TP +.BI \-s +No signals (for debugging). +.TP +.B \-t +Test the configuration file and report errors. +.TP +.BI \-u\ user +Set the username/uid to run as. +.TP +.BI \-v +Set verbose mode. +.TP +.B \-? +Show version and usage of program. +.SH SEE ALSO +.BR bacula\-dir (8), +.BR bacula\-fd (8). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bacula-tray-monitor.1 b/manpages/bacula-tray-monitor.1 new file mode 100644 index 00000000..a07cb596 --- /dev/null +++ b/manpages/bacula-tray-monitor.1 @@ -0,0 +1,46 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BACULA-TRAY-MONITOR 1 "May 10, 2006" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + bacula-tray-monitor \- Bacula's 'System Tray' monitor +.SH SYNOPSIS +.B bacula-tray-monitor +.RI [options] +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bacula-tray-monitor +command, a simple monitor for the 'system tray' in KDE/GNOME +.PP +.SH OPTIONS +bacula-tray-monitor [\-c config_file] [\-d debug_level] [\-t] +.TP +.B \-c +Specify configuration file. +.TP +.B \-d +Set debug level to \fInn\fP. +.TP +.B \-dt +Print timestamp in debug output. +.TP +.B \-t +Test config mode: read configuration and exit. +.TP +.B \-? +Show version and usage of program. +.SH SEE ALSO +.BR bacula-dir (8), +.BR bls (1), +.BR bextract (1). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bacula.8 b/manpages/bacula.8 new file mode 100644 index 00000000..3066f02c --- /dev/null +++ b/manpages/bacula.8 @@ -0,0 +1,122 @@ +.\" manual page [] for Bacula +.\" SH section heading +.\" SS subsection heading +.\" LP paragraph +.\" IP indented paragraph +.\" TP hanging label +.TH Bacula 8 "The Network Backup Solution" +.SH NAME +Bacula \- The Network Backup Solution +.SH SYNOPSIS +.B bacula-dir \- Director +.br +.B bacula-fd \- File daemon or Client +.br +.B bacula-sd \- Storage daemon +.br +.B bconsole \- Console to control Bacula +.br +.SH DESCRIPTION +.LP +Bacula is a set of computer programs that permits you (or the +system administrator) to manage backup, recovery, and +verification of computer data across a network of computers of +different kinds. In technical terms, it is a network +Client/Server based backup program. Bacula is relatively easy to +use and efficient, while offering many advanced storage +management features that make it easy to find and recover lost or +damaged files. Due to its modular design, Bacula is scalable +from small single computer systems to systems consisting of +hundreds of computers located over a large network. + +.LP +Bacula Director service consists of the program that supervises +all the backup, restore, verify and archive operations. The +system administrator uses the Bacula Director to schedule backups +and to recover files. For more details see the Director Services +Daemon Design Document in the Bacula Developer's Guild. The +Director runs as a daemon or a service (i.e. in the background). + +.LP +Bacula Console services is the program that allows the +administrator or user to communicate with the Bacula Director +(see above). Currently, the Bacula Console is available in two +versions. The first and simplest is to run the Console program +in a shell window (i.e. TTY interface). Most system +administrators will find this completely adequate. The second +version is a Qt 4.2 GUI interface named bat that has +more features than the bconsole program. + +.LP +Bacula File services (or Client program) is the software program +that is installed on the machine to be backed up. It is specific +to the operating system on which it runs and is responsible for +providing the file attributes and data when requested by the +Director. The File services are also responsible for the file +system dependent part of restoring the file attributes and data +during a recovery operation. For more details see the File +Services Daemon Design Document in the Bacula Developer's Guide. +This program runs as a daemon on the machine to be backed up, and +in some of the documentation, the File daemon is referred to as +the Client (for example in Bacula's configuration file). In +addition to Unix/Linux File daemons, there is a Windows File +daemon (normally distributed in binary format). The Windows File +daemon runs on all currently known Windows versions (2K, 2003, XP, +and Vista). + +.LP +Bacula Storage services consist of the software programs that +perform the storage and recovery of the file attributes and data +to the physical backup media or volumes. In other words, the +Storage daemon is responsible for reading and writing your tapes +(or other storage media, e.g. files). For more details see the +Storage Services Daemon Design Document in the Bacula Developer's +Guide. The Storage services runs as a daemon on the machine that +has the backup device (usually a tape drive). + +.LP +Catalog services are comprised of the software programs +responsible for maintaining the file indexes and volume databases +for all files backed up. The Catalog services permit the System +Administrator or user to quickly locate and restore any desired +file. The Catalog services sets Bacula apart from simple backup +programs like tar and bru, because the catalog maintains a record +of all Volumes used, all Jobs run, and all Files saved, +permitting efficient restoration and Volume management. Bacula +currently supports three different databases, MySQL, PostgreSQL, +and SQLite3, one of which must be chosen when building Bacula. + +.SH OPTIONS +See the HTML/PDF documentation at: +.br + +.br +for details of the command line options. + + +.SH CONFIGURATION +Each daemon has its own configuration file which must be +tailored for each particular installation. Please see the HTML/PDF +documentation for the details. + +.SH SEE ALSO +The HTML manual installed on your system (typically found in +.br +/usr/share/doc/bacula-) or the online manual at: +.br + + +.SH BUGS +See + +.SH AUTHOR +Kern Sibbald +.SS Current maintainer +Kern Sibbald +.SS Contributors +An enormous list of past and former persons who have devoted +their time and energy to this project -- thanks. See the AUTHORS +file in the main Bacula source directory. + +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bat.1 b/manpages/bat.1 new file mode 100644 index 00000000..87eefb87 --- /dev/null +++ b/manpages/bat.1 @@ -0,0 +1,46 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BAT 1 "26 September 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + bat \- Bacula Administration Tool Console +.SH SYNOPSIS +.B bat +.RI [options] +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bat +command, the Qt4 version of the Bacula Administration Tool console. +This is a GUI full featured program similar the bconsole program, +but it is graphical oriented and more features. +.PP +.SH OPTIONS +bat [\-s] [\-c config_file] [\-d debug_level] [\-t] +.TP +.B \-c +Specify configuration file. Default is bat.conf. +.TP +.B \-d +Set debug level to \fInn\fP. +.TP +.B \-s +No signals. Used in debugging only. +.TP +.B \-t +Test config mode: read configuration and exit. +.TP +.B \-? +Show version and usage of program. +.SH SEE ALSO +.BR bacula-dir (8), +.BR bls (1), +.BR bextract (1). +.br +.SH AUTHOR +This manual page was written by Kern Sibbald. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bconsole.8 b/manpages/bconsole.8 new file mode 100644 index 00000000..478f02c1 --- /dev/null +++ b/manpages/bconsole.8 @@ -0,0 +1,60 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BCONSOLE 8 "4 December 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + bconsole \- Bacula's management Console +.SH SYNOPSIS +.B bconsole +.RI [options] +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bconsole +command. +.PP +.SH OPTIONS +.TP +.BI \-D\ dir +Select a Director. +.TP +.BI \-l +List defined Directors. +.TP +.BI \-c\ config +Specify configuration file. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output. +.TP +.B \-n +No conio (for scripting). +.TP +.B \-s +No signals (for debugging). +.TP +.B \-u\ nn +Set command execution timeout to \fInn\fP seconds. +.TP +.B \-t +Test the configuration file and report errors. +.TP +.B \-? +Show version and usage of program. +.SH SEE ALSO +.BR bacula\-dir (8), +.BR bls (1), +.BR bextract (1). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bcopy.8 b/manpages/bcopy.8 new file mode 100644 index 00000000..c6aebfe1 --- /dev/null +++ b/manpages/bcopy.8 @@ -0,0 +1,65 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BCOPY 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + bcopy \- Bacula's 'Copy from tape' +.SH SYNOPSIS +.B bcopy +.RI [ options ] +.I input-archive +.I output-archive +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bcopy +command. +.PP +.\" TeX users may be more comfortable with the \fB\fP and +.\" \fI\fP escape sequences to invoke bold face and italics, +.\" respectively. +.SH OPTIONS +A summary of options is included below. +.TP +.B \-? +Show version and usage of program. +.TP +.BI \-b\ bootstrap +Specify a bootstrap file. +.TP +.BI \-c\ config +Specify configuration file. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output. +.TP +.BI \-i\ input +Specify input Volume names (separated by '|') +.TP +.BI \-o\ output +Specify output Volume names (separated by '|') +.TP +.BI \-p +Proceed in spite of I/O errors. +.TP +.BI \-w\ directory +Specify working directory (default \fI/tmp\fP). +.TP +.B \-v +Set verbose mode. +.SH SEE ALSO +.BR bls (1), +.BR bextract (1). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bextract.8 b/manpages/bextract.8 new file mode 100644 index 00000000..4cd9157d --- /dev/null +++ b/manpages/bextract.8 @@ -0,0 +1,68 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BEXTRACT 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + bextract \- Bacula's 'Extract from tape' +.SH SYNOPSIS +.B bextract +.RI [ options ] +.I bacula-archive-device-name +.I output-directory +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bextract +command. +.PP +.\" TeX users may be more comfortable with the \fB\fP and +.\" \fI\fP escape sequences to invoke bold face and italics, +.\" respectively. +.SH OPTIONS +A summary of options is included below. +.TP +.B \-? +Show version and usage of program. +.TP +.BI \-b\ bootstrap +Specify a bootstrap file. +.TP +.BI \-c\ config +Specify configuration file. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output. +.TP +.BI \-e\ file +Specify exclude list. +.TP +.BI \-i\ file +Specify include list. +.TP +.BI \-p +Proceed in spite of I/O errors. +.TP +.BI \-t\ +Read data from volume, do not write anything +.TP +.B \-v +Set verbose mode. +.TP +.BI \-V\ volume-name +Specify volume names. +.SH SEE ALSO +.BR bls (1), +.BR bextract (1). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bls.8 b/manpages/bls.8 new file mode 100644 index 00000000..41d695bc --- /dev/null +++ b/manpages/bls.8 @@ -0,0 +1,86 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BLS 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.\" Some roff macros, for reference: +.\" .nh disable hyphenation +.\" .hy enable hyphenation +.\" .ad l left justify +.\" .ad b justify to both left and right margins +.\" .nf disable filling +.\" .fi enable filling +.\" .br insert line break +.\" .sp insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME + bls \- Bacula's 'Tape LS' +.SH SYNOPSIS +.B bls +.RI [ options ] +.I +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bls +command. +.PP +.\" TeX users may be more comfortable with the \fB\fP and +.\" \fI\fP escape sequences to invoke bold face and italics, +.\" respectively. +.SH OPTIONS +A summary of options is included below. +.TP +.B \-? +Show version and usage of program. +.TP +.BI \-b\ bootstrap +Specify a bootstrap file. +.TP +.BI \-c\ config +Specify configuration file. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output +.TP +.BI \-e\ +Specify exclude list file. +.TP +.BI \-i\ +Specify include list file. +.TP +.BI \-j +List jobs. +.TP +.BI \-k +List blocks. +.TP +.I (no \-j or \-k option) +List saved files +.TP +.BI -L +Dump label. +.TP +.BI \-p +Proceed in spite of errors. +.TP +.BI \-V\ volumes +Specify volume names (separated by '|'). +.TP +.B \-v +Set verbose mode. +.SH SEE ALSO +.BR bscan (8), +.BR bextract (8). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bpluginfo.8 b/manpages/bpluginfo.8 new file mode 100644 index 00000000..bc6e313c --- /dev/null +++ b/manpages/bpluginfo.8 @@ -0,0 +1,121 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH bpluginfo "8" "July 2012" "bpluginfo" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME +bpluginfo \- Bacula Plugin information utility +.SH SYNOPSIS +.B bplufinfo +.RI [ options ] +.I plugin_filename.so +.br +.SH DESCRIPTION +.LP +The main purpose of +.B bpluginfo +is to display different information about Bacula plugin. You can use it to +check a plugin name, author, license and short description. You can use +'-f' option to display API implemented by the plugin. Some plugins may require +additional '-a' option for validating a Bacula Daemons API. In most cases it +is not required. +.PP +./ Bacula is a set of programs for performing a +./ .PP +./ - +./ .BR bpluginfo + +.PP +.SH OPTIONS +A summary of options is included below. +.TP +.B \-h +Show usage of the program. +.TP +.BI \-v +Verbose information printing all available data from the plugin, including +plugin header and implemented API. +.TP +.BI \-i +Display short information from plugin header only. This is a default option. +Option incompatible with +.B -f +option. +.TP +.BI \-f +Display information about implemented API functions. +.TP +.BI \-a\ +You can supply the plugin initialization function with a particular Bacula +API number. Without this option a default API number is '1'. Option require +a numeric argument. +.SH RETURN CODE +.BR bpluginfo +returns 0 on success, and non-zero on error. +.TP +You can check return code to find what was a cause of the error. + * 0 - success + * 1 - cannot load a plugin + * 2 - cannot find a loadPlugin function + * 3 - cannot find an unloadPlugin function + * 10 - not enough memory +.SH EXAMPLE USAGE +This is an example of bplufinfo usage with verbose option (-v) and default plugin. +.LP +.sp +.RS +.nf + +\fB$ bpluginfo -v bpipe-fd.so + +Plugin type: File Daemon plugin +Plugin magic: *FDPluginData* +Plugin version: 1 +Plugin release date: January 2008 +Plugin author: Kern Sibbald +Plugin license: Bacula or Bacula Enterprise +Plugin description: Bacula Pipe File Daemon Plugin +Plugin API version: 6 + +Plugin functions: + newPlugin() + freePlugin() + getPluginValue() + setPluginValue() + handlePluginEvent() + startBackupFile() + endBackupFile() + startRestoreFile() + endRestoreFile() + pluginIO() + createFile() + setFileAttributes() +.fi +.RE + +.SH AUTHOR +Written by Radoslaw Korzeniewski (c) Inteos Sp. z o.o. +.SH BUGS +Does not handle all required bacula functions callbacks which can lead to +utility crash. +.\".SH TODO" + +.PP + +.PP + +.SH "REPORTING BUGS" +Report bugs to . +.SH COPYRIGHT +Copyright \(co 2012 Free Software Foundation Europe e.V. +.br +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +.SH "SEE ALSO" +.BR bacula-dir, +.BR bacula-sd, +.BR bacula-fd, +.BR "Bacula Plugins API" +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bregex.8 b/manpages/bregex.8 new file mode 100644 index 00000000..b2314e0f --- /dev/null +++ b/manpages/bregex.8 @@ -0,0 +1,62 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BREGEX 8 "30 October 2011" "Kern Sibbald" "Network backup, utilities" +.\" Please adjust this date whenever revising the manpage. +.\" +.\" Some roff macros, for reference: +.\" .nh disable hyphenation +.\" .hy enable hyphenation +.\" .ad l left justify +.\" .ad b justify to both left and right margins +.\" .nf disable filling +.\" .fi enable filling +.\" .br insert line break +.\" .sp insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME + bregex \- Bacula's 'regex' engine +.SH SYNOPSIS +.B bregex +.RI [ options ] +.I -f +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bregex +command. +.br +This program can be useful for testing regex expressions to be applied against a list of filenames. +.PP +.\" TeX users may be more comfortable with the \fB\fP and +.\" \fI\fP escape sequences to invoke bold face and italics, +.\" respectively. +.SH OPTIONS +A summary of options is included below. +.TP +.B \-? +Show version and usage of program. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output +.TP +.BI \-f\ +The data-file is a filename that contains lines of data to be matched (or not) against one or more patterns. When the program is run, it will prompt you for a regular expression pattern, then apply it one line at a time against the data in the file. Each line that matches will be printed preceded by its line number. You will then be prompted again for another pattern. +.TP +.BI \-n +Print lines that do not match +.TP +.BI \-l +Suppress lines numbers. +.SH SEE ALSO +.BR regex(7) +.br +.SH AUTHOR +This manual page was written by Bruno Friedmann +.nh +. +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bscan.8 b/manpages/bscan.8 new file mode 100644 index 00000000..cdd1d24f --- /dev/null +++ b/manpages/bscan.8 @@ -0,0 +1,106 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BSCAN 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + bscan \- Bacula's 'Scan tape' +.SH SYNOPSIS +.B bscan +.RI [ options ] +.I bacula-archive +.br +.SH DESCRIPTION +.LP +The purpose of bscan is to read (scan) a Bacula Volume and to recreate +or update the database contents with the information found on the Volume. +This is done in a non-destructive way. This permits restoring database +entries that have been lost by pruning, purging, deleting, or a database +corruption problem. + +.LP +Normally, it should not be necessary to run the bscan command because +the database is self maintaining, and most corrupted databases can be +repaired by the tools provided by the database vendors. +In addition, if you have maintained bootstrap files during backups, you +should be able to recover all your data from the bootstrap file +without needed an up to date catalog. + +.B bscan +command. +.PP +.\" TeX users may be more comfortable with the \fB\fP and +.\" \fI\fP escape sequences to invoke bold face and italics, +.\" respectively. +.SH OPTIONS +A summary of options is included below. +.TP +.B \-? +Show version and usage of program. +.TP +.BI \-b\ bootstrap +Specify a bootstrap file. +.TP +.BI \-c\ config +Specify configuration file. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output. +.TP +.B \-m +Update media info in database. +.TP +.B \-D +Specify the driver database name (default: \fINULL\fP) +.TP +.BI \-n\ name +Specify the database name (default: \fIbacula\fP) +.TP +.BI \-u\ username +Specify database username (default: \fIbacula\fP) +.TP +.BI \-P\ password +Specify database password (default: \fInone\fP) +.TP +.BI \-h\ host +Specify database host (default: \fINULL\fP) +.TP +.BI \-t\ port +Specify database port (default: 0) +.TP +.B \-p +Proceed in spite of I/O errors. +.TP +.B \-r +List records. +.TP +.B \-s +Synchronize or store in Database. +.TP +.B \-S +Show scan progress periodically. +.TP +.B \-v +Verbose output mode. +.TP +.BI \-V\ volume +Specify volume names (separated by '|') +.TP +.BI \-w\ dir +Specify working directory (default from conf file) +.SH SEE ALSO +.BR bls (8), +.BR bextract (8). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +, +for the Debian GNU/Linux system (but may be used by others). +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bsmtp.1 b/manpages/bsmtp.1 new file mode 100644 index 00000000..4d3801af --- /dev/null +++ b/manpages/bsmtp.1 @@ -0,0 +1,120 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BSMTP 1 "6 December 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + bsmtp \- Bacula's SMTP client (mail submission program) + +.SH SYNOPSIS +.B bsmtp +.RI [ options ] +.I <...> + +.SH DESCRIPTION +.B bsmtp +is a simple mail user agent designed to permit more flexibility +than the standard mail programs typically found on Unix systems, and to +ease portability. It can even run on Windows machines. It is used +by the Director daemon to send notifications and requests to the +operator. + +.SH OPTIONS +.TP +.B \-8 +Encode the mail in UTF-8. +.TP +.B \-c +Set the \fBCc:\fR header. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output. +.TP +.B \-f +Set the \fBFrom:\fR header. If not specified, +.B bsmtp +will try to use your username. +.TP +.BI \-h\ mailhost:port +Use mailhost:port as the SMTP server. (default port: 25) +.TP +.B \-s +Set the \fBSubject:\fR header. +.TP +.B \-r +Set the \fBReply-To:\fR: header. +.TP +.B \-l +Set the maximum number of lines to be sent. (default: unlimited) +.TP +.B \-? +Show version and usage of program. + +.SH USAGE +\fIrecipients\fR is a space separated list of email addresses. + +The body of the email message is read from standard input. Message is +ended by sending the EOF character (Ctrl-D on many systems) on the +start of a new line, much like many 'mail' commands. + +The actual, automated behavior of \fBbsmtp\fR will depend on the +mail-related configuration of the Director in the \fIMessages\fR resource +of \fIbacula-dir.conf\fR. + +Interactive use of \fBbsmtp\fR is pertinent to manually test and ensure these +configuration bits are valid. This is highly recommended. + +.SH CONFIGURATION +These commands should each appear on a single line in the configuration +file. + +Messages { + Name = Standard + mailcommand = "/home/bacula/bin/bsmtp \-h mail.domain.com \-f \\"\\(Bacula\\) \\<%r\\>\\" + \-s \\"Bacula: %t %e of %c %l\\" %r" + operatorcommand = "/home/bacula/bin/bsmtp \-h mail.domain.com \-f \\"\\(Bacula\\) \\<%r\\>\\" + \-s \\"Bacula: Intervention needed for %j\\" %r" + mail = sysadmin@site.domain.com = all, !skipped + operator = sysop@site.domain.com = mount + console = all, !skipped, !saved + } + +\fIhome/bacula/bin\fR is replaced with the path to the Bacula +binary directory, and mail.domain.com is replaced with the fully +qualified name of an SMTP server, which usually listen on port 25. + +.SH ENVIRONMENT +If the \fB-h\fR option is not specified, \fBbsmtp\fR will use environment variable \fBSMTPSERVER\fR, or 'localhost' if not set. + +.SH NOTES +Since \fBbsmtp\fR always uses a TCP connection rather than writing to a +spool file, you may find that your \fBFrom:\fR address is being rejected +because it does not contain a valid domain, or because your +message has gotten caught in spam filtering rules. Generally, you +should specify a fully qualified domain name in the from field, and +depending on whether your SMTP gateway is Exim or Sendmail, you may +need to modify the syntax of the from part of the message. + +If \fBbsmtp\fR cannot connect to the specified mail host, it will retry +to connect to \fBlocalhost\fR. + +.SH BUGS +If you are getting incorrect dates (e.g. 1970) and you are +running with a non-English locale, you might try setting the +\fBLANG="en_US"\fR environment variable. + +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +, revised and edited by Lucas B. Cohen +.nh +. +.SH SEE ALSO +.BR "bacula-dir" "(8) " +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/btape.8 b/manpages/btape.8 new file mode 100644 index 00000000..6ea914e9 --- /dev/null +++ b/manpages/btape.8 @@ -0,0 +1,142 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BTAPE 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + btape \- Bacula's Tape interface test program +.SH SYNOPSIS +.B btape +.RI [ options ] +.I device-name +.br +.SH DESCRIPTION +This manual page documents briefly the +.B btape +command. +.PP +.\" TeX users may be more comfortable with the \fB\fP and +.\" \fI\fP escape sequences to invoke bold face and italics, +.\" respectively. +.SH OPTIONS +A summary of options is included below. +.TP +.B \-? +Show summary of options and commands. +.TP +.BI \-b\ bootstrap +Specify a bootstrap file. +.TP +.BI \-c\ config +Specify configuration file. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-p +Proceed inspite of I/O errors. +.TP +.B \-s +No signals (for debugging). +.TP +.B \-v +Set verbose mode. +.sp 3 +.SH COMMANDS +.TP +.B autochanger +test autochanger +.TP +.B bsf +backspace file +.TP +.B bsr +backspace record +.TP +.B cap +list device capabilities +.TP +.B clear +clear tape errors +.TP +.B eod +go to end of Bacula data for append +.TP +.B eom +go to the physical end of medium +.TP +.B fill +fill tape, write onto second volume +.TP +.B unfill +read filled tape +.TP +.B fsf +forward space a file +.TP +.B fsr +forward space a record +.TP +.B help +print this reference +.TP +.B label +write a Bacula label to the tape +.TP +.B load +load a tape +.TP +.B quit +quit btape +.TP +.B rawfill +use write() to fill tape +.TP +.B readlabel +read and print the Bacula tape label +.TP +.B rectest +test record handling functions +.TP +.B rewind +rewind the tape +.TP +.B scan +read() tape block by block to EOT and report +.TP +.B scanblocks +Bacula read block by block to EOT and report +.TP +.B status +print tape status +.TP +.B test +General test Bacula tape functions +.TP +.B weof +write an EOF on the tape +.TP +.B wr +write a single Bacula block +.TP +.B rr +read a single record +.TP +.B rb +read a single bacula block +.TP +.B qfill +quick fill command +.br +.SH SEE ALSO +.BR bscan (1), +.BR bextract (1). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/btraceback.8 b/manpages/btraceback.8 new file mode 100644 index 00000000..c17b2872 --- /dev/null +++ b/manpages/btraceback.8 @@ -0,0 +1,69 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BTRACEBACK 1 "6 December 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + btraceback \- wrapper script around gdb and bsmtp + +.SH SYNOPSIS +.B btraceback +.I /path/to/binary +.I pid + +.SH DESCRIPTION +\fBbtraceback\fR is a wrapper shell script around the \fBgdb\fR debugger +(or \fBdbx\fR on Solaris systems) and \fBbsmtp\fR, provided for debugging purposes. + +.SH USAGE +\fBbtraceback\fR is called by the exception handlers of the Bacula +daemons during a crash. It can also be called interactively to view +the current state of the threads belonging to a process, but this is +not recommended unless you are trying to debug a problem (see below). + +.SH NOTES +In order to work properly, debugging symbols must be available to the +debugger on the system, and gdb, or dbx (on Solaris systems) must be +available in the \fB$PATH\fR. + +If the Director or Storage daemon runs under a non-root uid, you will +probably need to be modify the \fBbtraceback\fR script to elevate +privileges for the call to \fBgdb\fR/\fBdbx\fR, to ensure it has the proper +permissions to debug when called by the daemon. + +Although Bacula's use of \fBbtraceback\fR within its exception handlers is +always safe, manual or interactive use of \fBbtraceback\fR is subject to the +same risks than live debugging of any program, which means it could cause +Bacula to crash under rare and abnormal circumstances. Consequently we +do not recommend manual use of \fBbtraceback\fR in production environments +unless it is required for debugging a problem. + +.SH ENVIRONMENT +\fBbtracback\fR relies on \fB$PATH\fR to find the debugger. + +.SH FILES +.TP +.I /usr/lib/bacula/btraceback +.RS +The script itself. +.RE +.TP +.I /usr/sbin/btraceback +.RS +symbolic link to \fI/usr/lib/bacula/btraceback\fR +.RE +.TP +.I /etc/bacula/scripts/btraceback.gdb +.RS +the GDB command batch used to output a stack trace +.RE + +.SH AUTHOR +This manual page was written by Lucas B. Cohen +.nh + +.SH SEE ALSO +.BR "bsmtp" "(1) " +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/bwild.8 b/manpages/bwild.8 new file mode 100644 index 00000000..aa097f9f --- /dev/null +++ b/manpages/bwild.8 @@ -0,0 +1,67 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH BWILD 8 "30 October 2011" "Kern Sibbald" "Network backup, utilities" +.\" Please adjust this date whenever revising the manpage. +.\" +.\" Some roff macros, for reference: +.\" .nh disable hyphenation +.\" .hy enable hyphenation +.\" .ad l left justify +.\" .ad b justify to both left and right margins +.\" .nf disable filling +.\" .fi enable filling +.\" .br insert line break +.\" .sp insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME + bwild \- Bacula's 'wildcard' engine +.SH SYNOPSIS +.B bwild +.RI [ options ] +.I -f +.br +.SH DESCRIPTION +This manual page documents briefly the +.B bwild +command. +.br +This is a simple program that will allow you to test wild-card expressions against a file of data. +.PP +.\" TeX users may be more comfortable with the \fB\fP and +.\" \fI\fP escape sequences to invoke bold face and italics, +.\" respectively. +.SH OPTIONS +A summary of options is included below. +.TP +.B \-? +Show version and usage of program. +.TP +.BI \-d\ nn +Set debug level to \fInn\fP. +.TP +.BI \-dt +Print timestamp in debug output +.TP +.BI \-f\ +The data-file is a filename that contains lines of data to be matched (or not) against one or more patterns. When the program is run, it will prompt you for a wild-card pattern, then apply it one line at a time against the data in the file. Each line that matches will be printed preceded by its line number. You will then be prompted again for another pattern. +.br +Enter an empty line for a pattern to terminate the program. You can print only lines that do not match by using the \-n option, and you can suppress printing of line numbers with the \-l option. +.TP +.BI \-n +Print lines that do not match +.TP +.BI \-l +Suppress lines numbers. +.TP +.BI \-i +use case insensitive match. +.SH SEE ALSO +.BR fnmatch(3) +.br +.SH AUTHOR +This manual page was written by Bruno Friedmann +.nh +. +This man page document is released under the BSD 2-Clause license. diff --git a/manpages/dbcheck.8 b/manpages/dbcheck.8 new file mode 100644 index 00000000..691c3dc4 --- /dev/null +++ b/manpages/dbcheck.8 @@ -0,0 +1,204 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH DBCHECK 8 "26 September 2009" "Kern Sibbald" "Network backup, recovery and verification" +.\" Please adjust this date whenever revising the manpage. +.\" +.SH NAME + dbcheck \- Bacula's Catalog Database Check/Clean program +.SH SYNOPSIS +.B dbcheck +.RI [ options ] +.I working-directory +.I bacula-database +.I user +.I password +.RI [ dbhost ] +.RI [ dbport ] +.br +.SH DESCRIPTION +This manual page documents briefly the +.B dbcheck +command. +.PP +dbcheck will not repair your database if it is broken. Please see your +vendor's instructions for fixing broken database. + +dbcheck is a simple program that will search for logical +inconsistencies in the Bacula tables in your database, and optionally fix them. +It is a database maintenance routine, in the sense that it can +detect and remove unused rows, but it is not a database repair +routine. To repair a database, see the tools furnished by the +database vendor. Normally dbcheck should never need to be run, +but if Bacula has crashed or you have a lot of Clients, Pools, or +Jobs that you have removed, it could be useful. +.SH OPTIONS +A summary of options is included below. +.TP +.B \-? +Show version and usage of program. +.TP +.BI \-b +If specified, dbcheck will run in batch mode, and it will proceed to examine +and fix (if \-f is set) all programmed inconsistency checks. By default, +dbcheck will enter interactive mode (see below). +.TP +.BI \-C\ catalog +catalog name in the director conf file. +.TP +.BI \-c\ config +If the \-c option is given with the Director's conf file, there is no need to +enter any of the command line arguments, in particular the working directory +as dbcheck will read them from the file. +.TP +.BI \-B +print catalog configuration and exit. +.TP +.BI -d\ nn +set debug level to \fInn\fP. +.TP +.BI \-dt +print timestamp in debug output. +.TP +.BI \-f +If specified, dbcheck will repair (fix) the inconsistencies it finds. +Otherwise, it will report only. +.TP +.BI \-v +Set verbose mode. +.SH INTERACTIVE MODE +In interactive mode dbcheck will prompt with the following: +.PP +Hello, this is the database check/correct program. +Please select the function you want to perform. + 1) Toggle modify database flag + 2) Toggle verbose flag + 3) Repair bad Filename records + 4) Repair bad Path records + 5) Eliminate duplicate Filename records + 6) Eliminate duplicate Path records + 7) Eliminate orphaned Jobmedia records + 8) Eliminate orphaned File records + 9) Eliminate orphaned Path records + 10) Eliminate orphaned Filename records + 11) Eliminate orphaned FileSet records + 12) Eliminate orphaned Client records + 13) Eliminate orphaned Job records + 14) Eliminate all Admin records + 15) Eliminate all Restore records + 16) All (3-15) + 17) Quit +Select function number: + +By entering 1 or 2, you can toggle the modify database flag (\-f option) and +the verbose flag (\-v). It can be helpful and reassuring to turn off the +modify database flag, then select one or more of the consistency checks +(items 3 through 9) to see what will be done, then toggle the modify flag +on and re-run the check. + +The inconsistencies examined are the following: + +.BR +Duplicate filename records. This can happen if you accidentally run two + copies of Bacula at the same time, and they are both adding filenames + simultaneously. It is a rare occurrence, but will create an + inconsistent database. If this is the case, you will receive error + messages during Jobs warning of duplicate database records. If you are + not getting these error messages, there is no reason to run this check. + +.BR +Repair bad Filename records. This checks and corrects filenames that have + a trailing slash. They should not. + +.BR +Repair bad Path records. This checks and corrects path names that do not + have a trailing slash. They should. + +.BR +Duplicate path records. This can happen if you accidentally run two copies + of Bacula at the same time, and they are both adding filenames + simultaneously. It is a rare occurrence, but will create an + inconsistent database. See the item above for why this occurs and how + you know it is happening. + +.BR +Orphaned JobMedia records. This happens when a Job record is deleted + (perhaps by a user issued SQL statement), but the corresponding JobMedia + record (one for each Volume used in the Job) was not deleted. Normally, + this should not happen, and even if it does, these records generally do + not take much space in your database. However, by running this check, + you can eliminate any such orphans. + +.BR +Orphaned File records. This happens when a Job record is deleted (perhaps + by a user issued SQL statement), but the corresponding File record (one + for each Volume used in the Job) was not deleted. Note, searching for + these records can be very time consuming (i.e. it may take hours) for a + large database. Normally this should not happen as Bacula takes care to + prevent it. Just the same, this check can remove any orphaned File + records. It is recommended that you run this once a year since orphaned + File records can take a large amount of space in your database. You + might want to ensure that you have indexes on JobId, FilenameId, and + PathId for the File table in your catalog before running this command. + +.BR +Orphaned Path records. This condition happens any time a directory is + deleted from your system and all associated Job records have been + purged. During standard purging (or pruning) of Job records, Bacula + does not check for orphaned Path records. As a consequence, over a + period of time, old unused Path records will tend to accumulate and use + space in your database. This check will eliminate them. It is + recommended that you run this check at least once a year. + +.BR +Orphaned Filename records. This condition happens any time a file is + deleted from your system and all associated Job records have been + purged. This can happen quite frequently as there are quite a large + number of files that are created and then deleted. In addition, if you + do a system update or delete an entire directory, there can be a very + large number of Filename records that remain in the catalog but are no + longer used. + + During standard purging (or pruning) of Job records, Bacula does not + check for orphaned Filename records. As a consequence, over a period of + time, old unused Filename records will accumulate and use space in your + database. This check will eliminate them. It is strongly recommended + that you run this check at least once a year, and for large database + (more than 200 Megabytes), it is probably better to run this once every + 6 months. + +.BR +Orphaned Client records. These records can remain in the database long + after you have removed a client. + +.BR +Orphaned Job records. If no client is defined for a job or you do not run + a job for a long time, you can accumulate old job records. This option + allow you to remove jobs that are not attached to any client (and thus + useless). + +.BR +All Admin records. This command will remove all Admin records, + regardless of their age. + +.BR +All Restore records. This command will remove all Restore records, + regardless of their age. + +By the way, I personally run dbcheck only where I have messed up +my database due to a bug in developing Bacula code, so normally +you should never need to run dbcheck inspite of the +recommendations given above, which are given so that users don't +waste their time running dbcheck too often. + +.SH SEE ALSO +.BR bls (1), +.BR bextract (1). +.br +.SH AUTHOR +This manual page was written by Jose Luis Tallon +.nh +. +.SH COPYRIGHT +This man page document is released under the BSD 2-Clause license. diff --git a/platforms/Makefile.in b/platforms/Makefile.in new file mode 100644 index 00000000..afdb9c86 --- /dev/null +++ b/platforms/Makefile.in @@ -0,0 +1,109 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This is the makefile template for the platform directory +# which contains general platform installation. +# +# 15 November 2001 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + + +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL = @INSTALL@ + +SUBDIRS = hurd freebsd redhat solaris unknown openbsd osx irix gentoo \ + debian darwin aix bsdi mandrake slackware alpha ubuntu systemd + +MAKE = make + +DISTNAME=@DISTNAME@ +DISTVER=@DISTVER@ + +all: + @for subdir in ${SUBDIRS}; do \ + if [ -f $${subdir}/Makefile ]; then \ + (cd $${subdir}; $(MAKE) DESTDIR=$(DESTDIR);) \ + fi; \ + done + + +install: install-autostart + +install-autostart: + @if test x$(DISTNAME) != x ; then \ + (cd $(DISTNAME); \ + $(MAKE) DESTDIR=$(DESTDIR) "DISTNAME=$(DISTNAME)" "DISTVER=$(DISTVER)" $@) \ + fi + +install-autostart-dir: + @if test x$(DISTNAME) != x ; then \ + (cd $(DISTNAME); \ + $(MAKE) DESTDIR=$(DESTDIR) "DISTNAME=$(DISTNAME)" "DISTVER=$(DISTVER)" $@) \ + fi + +install-autostart-fd: + @if test x$(DISTNAME) != x ; then \ + (cd $(DISTNAME); \ + $(MAKE) DESTDIR=$(DESTDIR) "DISTNAME=$(DISTNAME)" "DISTVER=$(DISTVER)" $@) \ + fi + +install-autostart-sd: + @if test x$(DISTNAME) != x ; then \ + (cd $(DISTNAME); \ + $(MAKE) DESTDIR=$(DESTDIR) "DISTNAME=$(DISTNAME)" "DISTVER=$(DISTVER)" $@) \ + fi + +uninstall: uninstall-autostart + +uninstall-autostart: + @if test x$(DISTNAME) != x ; then \ + (cd $(DISTNAME); \ + $(MAKE) DESTDIR=$(DESTDIR) "DISTNAME=$(DISTNAME)" "DISTVER=$(DISTVER)" $@) \ + fi + +uninstall-autostart-dir: + @if test x$(DISTNAME) != x ; then \ + (cd $(DISTNAME); \ + $(MAKE) DESTDIR=$(DESTDIR) "DISTNAME=$(DISTNAME)" "DISTVER=$(DISTVER)" $@) \ + fi + +uninstall-autostart-fd: + @if test x$(DISTNAME) != x ; then \ + (cd $(DISTNAME); \ + $(MAKE) DESTDIR=$(DESTDIR) "DISTNAME=$(DISTNAME)" "DISTVER=$(DISTVER)" $@) \ + fi + +uninstall-autostart-sd: + @if test x$(DISTNAME) != x ; then \ + (cd $(DISTNAME); \ + $(MAKE) DESTDIR=$(DESTDIR) "DISTNAME=$(DISTNAME)" "DISTVER=$(DISTVER)" $@) \ + fi + +depend: + +clean: + @for subdir in ${SUBDIRS}; do \ + if [ -f $${subdir}/Makefile ]; then \ + (cd $${subdir}; $(MAKE) clean) \ + fi; \ + done + @rm -f 1 2 3 + +distclean: + @rm -f Makefile + @for subdir in ${SUBDIRS}; do \ + if [ -f $${subdir}/Makefile ]; then \ + (cd $${subdir}; $(MAKE) distclean) \ + fi; \ + done + +devclean: + @rm -f Makefile + @for subdir in ${SUBDIRS}; do \ + if [ -f $${subdir}/Makefile ]; then \ + (cd $${subdir}; $(MAKE) devclean) \ + fi; \ + done diff --git a/platforms/README b/platforms/README new file mode 100644 index 00000000..e8d21529 --- /dev/null +++ b/platforms/README @@ -0,0 +1,5 @@ + +This directory, /platforms, contains the platform +specific installation files. Files that are common to all +platforms are in this directory, and files that are specific +to a particular platform are contained in subdirectories. diff --git a/platforms/aix/Makefile.in b/platforms/aix/Makefile.in new file mode 100644 index 00000000..65663837 --- /dev/null +++ b/platforms/aix/Makefile.in @@ -0,0 +1,88 @@ +# +# This file is used as the template to create the +# Makefile for the AXI specific installation. +# +# 1 March 2003 -- Kern Sibbald +# +# Copyright (C) 2003-2015, Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + + +install-autostart-fd: + @rm -f /etc/rc0.d/K20bacula-fd + @rm -f /etc/rc1.d/S99bacula-fd + @rm -f /etc/rc2.d/S99bacula-fd + @$(INSTALL_PROGRAM) -m 744 bacula-fd /etc/init.d/bacula-fd + # set symlinks for script at startup and shutdown + @ln -f -s /etc/init.d/bacula-fd /etc/rc0.d/K20bacula-fd + @ln -f -s /etc/init.d/bacula-fd /etc/rc1.d/S99bacula-fd + @ln -f -s /etc/init.d/bacula-fd /etc/rc2.d/S99bacula-fd + + +install-autostart-sd: + @rm -f /etc/rc0.d/K20bacula-sd + @rm -f /etc/rc1.d/S99bacula-sd + @rm -f /etc/rc2.d/S99bacula-sd + @$(INSTALL_PROGRAM) -m 744 bacula-sd /etc/rc.d/init.d/bacula-sd + # set symlinks for script at startup and shutdown + @ln -f -s /etc/init.d/bacula-sd /etc/rc0.d/K20bacula-sd + @ln -f -s /etc/init.d/bacula-sd /etc/rc1.d/S99bacula-sd + @ln -f -s /etc/init.d/bacula-sd /etc/rc2.d/S99bacula-sd + + +install-autostart-dir: + @rm -f /etc/rc0.d/K20bacula-dir + @rm -f /etc/rc1.d/S99bacula-dir + @rm -f /etc/rc2.d/S99bacula-dir + @$(INSTALL_PROGRAM) -m 744 bacula-dir /etc/rc.d/init.d/bacula-dir + # set symlinks for script at startup and shutdown + @ln -f -s /etc/init.d/bacula-dir /etc/rc0.d/K20bacula-dir + @ln -f -s /etc/init.d/bacula-dir /etc/rc1.d/S99bacula-dir + @ln -f -s /etc/init.d/bacula-dir /etc/rc2.d/S99bacula-dir + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @rm -f /etc/rc0.d/K20bacula-fd + @rm -f /etc/rc1.d/S99bacula-fd + @rm -f /etc/rc2.d/S99bacula-fd + @rm -f /etc/rc.d/init.d/bacula-fd + + +uninstall-autostart-sd: + @rm -f /etc/rc0.d/K20bacula-sd + @rm -f /etc/rc1.d/S99bacula-sd + @rm -f /etc/rc2.d/S99bacula-sd + @rm -f /etc/rc.d/init.d/bacula-sd + +uninstall-autostart-dir: + @rm -f /etc/rc0.d/K20bacula-dir + @rm -f /etc/rc1.d/S99bacula-dir + @rm -f /etc/rc2.d/S99bacula-dir + @rm -f /etc/rc.d/init.d/bacula-dir + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec + +devclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec diff --git a/platforms/alpha/Makefile.in b/platforms/alpha/Makefile.in new file mode 100644 index 00000000..3e2d9c54 --- /dev/null +++ b/platforms/alpha/Makefile.in @@ -0,0 +1,51 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Tru64 specific installation. +# +# 28 May 2004 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd + + +install-autostart-fd: + @rm -f /sbin/rc0.d/K20bacula-fd + @rm -f /sbin/rc3.d/S99bacula-fd + @$(INSTALL_PROGRAM) -m 744 bacula-fd /sbin/init.d/bacula-fd + # set symlinks for script at startup and shutdown + @ln -f -s /sbin/init.d/bacula-fd /sbin/rc0.d/K20bacula-fd + @ln -f -s /sbin/init.d/bacula-fd /sbin/rc3.d/S99bacula-fd + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd + +uninstall-autostart-fd: + @rm -f /sbin/rc0.d/K20bacula-fd + @rm -f /sbin/rc3.d/S99bacula-fd + @rm -f /sbin/init.d/bacula-fd + + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f bacula-fd + @rm -f Makefile + +devclean: clean + @rm -f bacula-fd + @rm -f Makefile diff --git a/platforms/alpha/bacula-fd.in b/platforms/alpha/bacula-fd.in new file mode 100644 index 00000000..b162cb49 --- /dev/null +++ b/platforms/alpha/bacula-fd.in @@ -0,0 +1,44 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula File daemon: " + /sbin/bacula-fd $2 -c /etc/bacula/bacula-fd.conf + RETVAL=$? + echo +## [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-fd + ;; + stop) + echo "Stopping the Bacula File daemon: " +# killproc @sbindir@/bacula-fd + ID=`ps -ef | grep -F bacula-fd | grep -Fv grep | awk '{print $2}'` + [ -n "$ID" ] && kill $ID + RETVAL=$? + echo +## [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-fd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/bsdi/Makefile.in b/platforms/bsdi/Makefile.in new file mode 100644 index 00000000..d228a8b6 --- /dev/null +++ b/platforms/bsdi/Makefile.in @@ -0,0 +1,143 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Solaris specific installation. +# +# 15 November 2001 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +VPATH = @srcdir@ +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +SED = /usr/bin/sed + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + + +install-autostart-fd: + @echo "FreeBSD platform installation" + $(INSTALL_PROGRAM) -m 744 bacula-fd /etc/rc.bacula-fd + @-today="`date +%Y%m%d%H%M`"; \ + grep -q /etc/rc.bacula-fd /etc/rc.local; \ + if [ $$? -eq 0 ]; then \ + echo "/etc/rc.local already patched"; \ + else \ + rm -f /etc/rc.local.$$today; \ + cp -p /etc/rc.local /etc/rc.local.$$today; \ + ( echo "Start the Bacula File daemon. Do not remove the 'TAG_BACULA_FD' text"; \ + echo "if [ -x /etc/rc.bacula-fd ]; then # TAG_BACULA_FD"; \ + echo " /etc/rc.bacula-fd start # TAG_BACULA_FD"; \ + echo "fi # TAG_BACULA_FD"; \ + ) >> /etc/rc.local; \ + echo ""; \ + fi + + +install-autostart-sd: + @echo "FreeBSD platform installation" + $(INSTALL_PROGRAM) -m 744 bacula-sd /etc/rc.bacula-sd + @-today="`date +%Y%m%d%H%M`"; \ + grep -q /etc/rc.bacula-sd /etc/rc.local; \ + if [ $$? -eq 0 ]; then \ + echo "/etc/rc.local already patched"; \ + else \ + rm -f /etc/rc.local.$$today; \ + cp -p /etc/rc.local /etc/rc.local.$$today; \ + ( echo "Start the Bacula Storage daemon. Do not remove the 'TAG_BACULA_SD' text"; \ + echo "if [ -x /etc/rc.bacula-fd ]; then # TAG_BACULA_SD"; \ + echo " /etc/rc.bacula-fd start # TAG_BACULA_SD"; \ + echo "fi # TAG_BACULA_SD"; \ + ) >> /etc/rc.local; \ + echo ""; \ + fi + +install-autostart-dir: + @echo "FreeBSD platform installation" + $(INSTALL_PROGRAM) -m 744 bacula-dir /etc/rc.bacula-dir + @-today="`date +%Y%m%d%H%M`"; \ + grep -q /etc/rc.bacula-dir /etc/rc.local; \ + if [ $$? -eq 0 ]; then \ + echo "/etc/rc.local already patched"; \ + else \ + rm -f /etc/rc.local.$$today; \ + cp -p /etc/rc.local /etc/rc.local.$$today; \ + ( echo "Start the Bacula Director. Do not remove the 'TAG_BACULA_DIR' text"; \ + echo "if [ -x /etc/rc.bacula-dir ]; then # TAG_BACULA_DIR"; \ + echo " /etc/rc.bacula-dir start # TAG_BACULA_DIR"; \ + echo "fi # TAG_BACULA_DIR"; \ + ) >> /etc/rc.local; \ + echo ""; \ + fi + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @echo "FreeBSD platform uninstall" + rm -f /etc/rc.bacula-fd + @-today="`date +%Y%m%d%H%M`"; \ + for f in /etc/rc.local ; do \ + grep -q '# TAG_BACULA_FD' $$f; \ + if [ $$? -eq 0 ]; then \ + echo "removing Bacula lines from $$f"; \ + rm -f $$f.$$today; \ + cp -p $$f $$f.$$today; \ + $(SED) -e '/TAG_BACULA_FD/d;' \ + < $$f.$$today > $$f; \ + chmod 644 $$f; \ + fi; \ + done + + +uninstall-autostart-sd: + @echo "FreeBSD platform uninstall" + rm -f /etc/rc.bacula-sd + @-today="`date +%Y%m%d%H%M`"; \ + for f in /etc/rc.local ; do \ + grep -q '# TAG_BACULA_SD' $$f; \ + if [ $$? -eq 0 ]; then \ + echo "removing Bacula lines from $$f"; \ + rm -f $$f.$$today; \ + cp -p $$f $$f.$$today; \ + $(SED) -e '/TAG_BACULA_SD/d;' \ + < $$f.$$today > $$f; \ + chmod 644 $$f; \ + fi; \ + done + +uninstall-autostart-dir: + @echo "FreeBSD platform uninstall" + rm -f /etc/rc.bacula-dir + @-today="`date +%Y%m%d%H%M`"; \ + for f in /etc/rc.local ; do \ + grep -q '# TAG_BACULA_DIR' $$f; \ + if [ $$? -eq 0 ]; then \ + echo "removing Bacula lines from $$f"; \ + rm -f $$f.$$today; \ + cp -p $$f $$f.$$today; \ + $(SED) -e '/TAG_BACULA_DIR/d;' \ + < $$f.$$today > $$f; \ + chmod 644 $$f; \ + fi; \ + done + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec + +devclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec diff --git a/platforms/bsdi/bacula-dir.in b/platforms/bsdi/bacula-dir.in new file mode 100755 index 00000000..342a0638 --- /dev/null +++ b/platforms/bsdi/bacula-dir.in @@ -0,0 +1,41 @@ +#! /bin/sh +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula Director: " + @sbindir@/bacula-dir $2 -c @sysconfdir@/bacula-dir.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-dir + ;; + stop) + echo "Stopping the Director daemon: " +# killproc @sbindir@/bacula-dir + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-dir + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/bsdi/bacula-fd.in b/platforms/bsdi/bacula-fd.in new file mode 100755 index 00000000..e98f7ad2 --- /dev/null +++ b/platforms/bsdi/bacula-fd.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula File daemon: " + @sbindir@/bacula-fd $2 -c @sysconfdir@/bacula-fd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-fd + ;; + stop) + echo "Stopping the Bacula File daemon: " +# killproc @sbindir@/bacula-fd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-fd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/bsdi/bacula-sd.in b/platforms/bsdi/bacula-sd.in new file mode 100755 index 00000000..7aafe60b --- /dev/null +++ b/platforms/bsdi/bacula-sd.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Storage daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula Storage daemon: " + @sbindir@/bacula-sd $2 -c @sysconfdir@/bacula-sd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-sd + ;; + stop) + echo "Stopping the Bacula Storage daemon: " +# killproc @sbindir@/bacula-sd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-sd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/contrib-rpm/README b/platforms/contrib-rpm/README new file mode 100644 index 00000000..498ac9bc --- /dev/null +++ b/platforms/contrib-rpm/README @@ -0,0 +1,61 @@ +README file for bacula third party rpm contributors + +Sun Jul 16 2006 +D. Scott Barninger + +This document outlines the procedures to create rpm packages for +bacula for platforms supported in the rpm spec file but not published +on sourceforge. Contributors wishing to build and supply such rpm +packages for release on the sourceforge project page should read this +documentation. Contributors should contact either Kern Sibbald +or Scott Barninger . + +The general requirements to have contrib rpm packages published on the project +page are: + +1. Packages must be created using the current released source rpm and the +shell script build_rpm.sh in this directory. +2. The packager must sign all rpm packages with his/her personal gpg key +and supply a copy of the public key as both a text file and as an rpm +using the spec file rpmkey.spec in this directory. +3. No modifications to either the bacula source code or spec file are +permitted without consulting the project admins. +4. There will be only one sanctioned packager for a given distribution. +5. Contributors who submit two or more successful releases may be given +release permissions to release their files directly to sourceforge. Prior to +that you will need to coordinate the upload of your files to sourceforge with +Kern or Scott to get them posted to the project page. + +How to create an rpmkey package: + +1. Create a plaintext copy of your gpg public key in a file named yourname.asc +where yourname is in the form first initial and last name, ie. sbarninger.asc +2. Edit the rpmkey.spec file and edit the line + %define pubkeyname yourname +replacing yourname with your name as in step 1 above. +3. Edit the rpmkey.spec file and edit the line + Packager: Your Name +inserting your name and email information. +4. Copy your key file to your SOURCES directory and the spec file to your SPECS +directory and create an rpm package. +5. Both the key text file and the rpm will be published on sourceforge in your +release package rpms-contrib-yourname. + +How to build a release: +1. Copy the file build_rpm.sh to a temporary working directory and open it in a +text editor. Examine and edit the configuration section of the script to match +your platform and build options. Set permissions on the script to 755. +3. Download the srpm you wish to build to the same directory. +4. Execute the script by ./build_rpm.sh + +The script will build all the necessary packages, move them into the current +working directory, rename them for your platform, and sign them with your key. + +Note: you must have a file named .rpmmacros in your home directory containing +at least the following 2 lines: + +%_signature gpg +%_gpg_name Your Name + +The name and email information in the above line must correspond to the information +in the key used to sign the packages when you generated the key. diff --git a/platforms/contrib-rpm/build_rpm.sh b/platforms/contrib-rpm/build_rpm.sh new file mode 100755 index 00000000..07f4efe4 --- /dev/null +++ b/platforms/contrib-rpm/build_rpm.sh @@ -0,0 +1,203 @@ +#!/bin/bash + +# shell script to build bacula rpm release +# copy this script into a working directory with the src rpm to build and execute +# 19 Aug 2006 D. Scott Barninger +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS + + +# signing rpms +# Make sure you have a .rpmmacros file in your home directory containing the following: +# +# %_signature gpg +# %_gpg_name Your Name +# +# the %_gpg_name information must match your key + + +# usage: ./build_rpm.sh + +########################################################################################### +# script configuration section + +VERSION=5.0.2 +RELEASE=1 + +# build platform for spec +# set to one of rh7,rh8,rh9,fc1,fc3,fc4,fc5,fc6,fc7,fc8,fc9,wb3,rhel3,rhel4,rhel5,centos3,centos4,centos5,sl3, sl4,sl5,su9,su10,su102,su103,su110,su111,su112,mdk,mdv +PLATFORM=su111 + +# platform designator for file names +# for RedHat/Fedora set to one of rh7,rh8,rh9,fc1,fc3,fc4,fc5,fc6,fc7,fc8,fc9 OR +# for RHEL3/clones wb3, rhel3, sl3 & centos3 set to el3 OR +# for RHEL4/clones rhel4, sl4 & centos4 set to el4 OR +# for RHEL5/clones rhel5, sl5 & centos5 set to el5 OR +# for SuSE set to su90, su91, su92, su100 or su101 or su102 or su103 or su110 or su111 or su112 OR +# for Mandrake set to 101mdk or 20060mdk +FILENAME=su111 + +# MySQL version +# set to empty (for MySQL 3), 4 or 5 +MYSQL= + +# enter your name and email address here +PACKAGER="D. Scott Barninger " + +# enter the full path to your RPMS output directory +RPMDIR=/usr/src/packages/RPMS/i586 +RPMDIR2=/usr/src/packages/RPMS/noarch + +# enter the full path to your rpm BUILD directory +RPMBUILD=/usr/src/packages/BUILD + +# enter your arch string here (i386, i586, i686, x86_64) +ARCH=i586 + +# if the src rpm is not in the current working directory enter the directory location +# with trailing slash where it is found. +SRPMDIR= + +# to build the mtx package set to 1, else 0 +BUILDMTX=0 + +# to build the bat package set to 1, else 0 +BUILDBAT=1 + +# set to 1 to sign packages, 0 not to sign if you want to sign on another machine. +SIGN=0 + +# to save the bacula-updatedb package set to 1, else 0 +# only one updatedb package is required per release so normally this should be 0 +# for all contrib packagers +SAVEUPDATEDB=0 + +# to override your language shell variable uncomment and edit this +# export LANG=en_US.UTF-8 + +# this is now in the spec file but when building bat on older versions uncomment +#export QTDIR=$(pkg-config --variable=prefix QtCore) +#export QTINC=$(pkg-config --variable=includedir QtCore) +#export QTLIB=$(pkg-config --variable=libdir QtCore) +#export PATH=${QTDIR}/bin/:${PATH} + +# Make no changes below this point without consensus + +############################################################################################ + +SRPM=${SRPMDIR}bacula-$VERSION-$RELEASE.src.rpm +SRPM2=${SRPMDIR}bacula-bat-$VERSION-$RELEASE.src.rpm +SRPM3=${SRPMDIR}bacula-docs-$VERSION-$RELEASE.src.rpm +SRPM4=${SRPMDIR}bacula-mtx-$VERSION-$RELEASE.src.rpm + +echo Building MySQL packages for "$PLATFORM"... +sleep 2 +rpmbuild --rebuild --define "build_${PLATFORM} 1" \ +--define "build_mysql${MYSQL} 1" \ +--define "build_python 1" \ +--define "contrib_packager ${PACKAGER}" ${SRPM} +rm -rf ${RPMBUILD}/* + +echo Building PostgreSQL packages for "$PLATFORM"... +sleep 2 +rpmbuild --rebuild --define "build_${PLATFORM} 1" \ +--define "build_postgresql 1" \ +--define "contrib_packager ${PACKAGER}" \ +--define "build_python 1" ${SRPM} +rm -rf ${RPMBUILD}/* + +echo Building SQLite packages for "$PLATFORM"... +sleep 2 +rpmbuild --rebuild --define "build_${PLATFORM} 1" \ +--define "build_sqlite 1" \ +--define "contrib_packager ${PACKAGER}" \ +--define "build_python 1" ${SRPM} +rm -rf ${RPMBUILD}/* + +if [ "$BUILDBAT" = "1" ]; then + echo Building Bat package for "$PLATFORM"... + sleep 2 + rpmbuild --rebuild ${SRPM2} + rm -rf ${RPMBUILD}/* +fi + +echo Building Docs package for "$PLATFORM"... +sleep 2 +rpmbuild --rebuild ${SRPM3} +rm -rf ${RPMBUILD}/* + +if [ "$BUILDMTX" = "1" ]; then + echo Building mtx package for "$PLATFORM"... + sleep 2 + rpmbuild --rebuild ${SRPM4} + rm -rf ${RPMBUILD}/* +fi + +# delete the updatedb package and any debuginfo packages built +rm -f ${RPMDIR}/bacula*debug* +if [ "$SAVEUPDATEDB" = "1" ]; then + mv -f ${RPMDIR}/bacula-updatedb* ./; +else + rm -f ${RPMDIR}/bacula-updatedb*; +fi + +# copy files to cwd and rename files to final upload names + +mv -f ${RPMDIR}/bacula-mysql-${VERSION}-${RELEASE}.${ARCH}.rpm \ +./bacula-mysql-${VERSION}-${RELEASE}.${FILENAME}.${ARCH}.rpm + +mv -f ${RPMDIR}/bacula-postgresql-${VERSION}-${RELEASE}.${ARCH}.rpm \ +./bacula-postgresql-${VERSION}-${RELEASE}.${FILENAME}.${ARCH}.rpm + +mv -f ${RPMDIR}/bacula-sqlite-${VERSION}-${RELEASE}.${ARCH}.rpm \ +./bacula-sqlite-${VERSION}-${RELEASE}.${FILENAME}.${ARCH}.rpm + +if [ "$BUILDMTX" = "1" ]; then + mv -f ${RPMDIR}/bacula-mtx-${VERSION}-${RELEASE}.${ARCH}.rpm \ + ./bacula-mtx-${VERSION}-${RELEASE}.${FILENAME}.${ARCH}.rpm +fi + +mv -f ${RPMDIR}/bacula-client-${VERSION}-${RELEASE}.${ARCH}.rpm \ +./bacula-client-${VERSION}-${RELEASE}.${FILENAME}.${ARCH}.rpm + +mv -f ${RPMDIR}/bacula-libs-${VERSION}-${RELEASE}.${ARCH}.rpm \ +./bacula-libs-${VERSION}-${RELEASE}.${FILENAME}.${ARCH}.rpm + +if [ "$BUILDBAT" = "1" ]; then + mv -f ${RPMDIR}/bacula-bat-${VERSION}-${RELEASE}.${ARCH}.rpm \ + ./bacula-bat-${VERSION}-${RELEASE}.${FILENAME}.${ARCH}.rpm +fi + +mv -f ${RPMDIR2}/bacula-docs-${VERSION}-${RELEASE}.noarch.rpm . + +# now sign the packages +if [ "$SIGN" = "1" ]; then + echo Ready to sign packages...; + sleep 2; + rpm --addsign ./*.rpm; +fi + +echo +echo Finished. +echo +ls + +# changelog +# 16 Jul 2006 initial release +# 05 Aug 2006 add python support +# 06 Aug 2006 add remote source directory, add switch for signing, refine file names +# 19 Aug 2006 add $LANG override to config section per request Felix Schwartz +# 27 Jan 2007 add fc6 target +# 29 Apr 2007 add sl3 & sl4 target and bat package +# 06 May 2007 add fc7 target +# 15 Sep 2007 add rhel5 and clones +# 10 Nov 2007 add su103 +# 12 Jan 2008 add fc8 +# 23 May 2008 add fc9 +# 28 Jun 2008 add su110 +# 08 Nov 2008 add use of pkgconfig to obtain QT4 paths +# 31 Dec 2008 add su111 +# 05 Apr 2009 deprecate gconsole and wxconsole, bat built by default +# 30 Jan 2010 adjust for mtx, bat and docs in separate srpm +# 02 May 2010 add bacula-libs package diff --git a/platforms/contrib-rpm/rpm_wizard.sh b/platforms/contrib-rpm/rpm_wizard.sh new file mode 100755 index 00000000..0ff3628f --- /dev/null +++ b/platforms/contrib-rpm/rpm_wizard.sh @@ -0,0 +1,121 @@ +#!/bin/sh + +# shell script wizard to build bacula rpm using gnome dialogs +# requires zenity to be installed +# 30 Jul 2006 D. Scott Barninger +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS + + +# usage ./rpm_wizard.sh + +# check for zenity +HAVE_ZENITY=`which zenity` +if [ -z $HAVE_ZENITY ]; +then + echo You need zenity installed to run this script; + exit; +fi + +zenity --question --text "Bacula rpm rebuilding wizard. Do you wish to continue?" + +RESULT="$?" +if [ "$RESULT" = "1" ]; +then + exit; +fi + +# get packager name and email adddress +PACKAGER=`zenity --text-info --editable --height=25 --width=300 --title="Enter Your Name "` + +# get location of src rpm +SELECTED_FILE=`zenity --file-selection --title "Choose SRPM file to rebuild"` + +RESULT="$?" +if [ "$RESULT" = "1" ]; +then + exit; +fi + +# select build platform +PLATFORM=`zenity --title "Select Platform" --text "Please choose a build platform." --list --radiolist --column "Select" --column "Platform" False rh7 False rh8 False rh9 False fc1 False fc3 False fc4 False fc5 False fc6 False fc7 False fc8 False fc9 False wb3 False rhel3 False rhel4 False rhel5 False centos3 False centos4 False centos5 False sl3 False sl4 False sl5 False su9 False su10 False su102 False su103 False su110 False su111 False mdk False mdv` + +RESULT="$?" +if [ "$RESULT" = "1" ]; +then + exit; +fi + +# select database support +DATABASE=`zenity --title "Select Database" --text "Please choose database support." --list --radiolist --column "Select" --column "Database" False sqlite False mysql False mysql4 False mysql5 False postgresql False client_only` + +RESULT="$?" +if [ "$RESULT" = "1" ]; +then + exit; +fi + +# select other build options +OPTIONS=`zenity --title "Select Options" --text "Please choose other options." --list --checklist --column "Select" --column "Other" False build_bat False build_wxconsole False nobuild_gconsole False build_x86_64 False build_python` + +RESULT="$?" +if [ "$RESULT" = "1" ]; +then + exit; +fi + +OPTION1=`echo $OPTIONS|cut --delimiter=\| -f1` +OPTION2=`echo $OPTIONS|cut --delimiter=\| -f2` +OPTION3=`echo $OPTIONS|cut --delimiter=\| -f3` +OPTION4=`echo $OPTIONS|cut --delimiter=\| -f4` +OPTION5=`echo $OPTIONS|cut --delimiter=\| -f5` + +# construct rpmbuild command +COMMAND="rpmbuild --rebuild --define 'build_$PLATFORM 1' --define 'build_$DATABASE 1' --define 'contrib_packager ${PACKAGER}'" + +if [ ! -z $OPTION1 ]; +then + COMMAND="${COMMAND} --define '$OPTION1 1'"; +fi +if [ ! -z $OPTION2 ]; +then + COMMAND="${COMMAND} --define '$OPTION2 1'"; +fi +if [ ! -z $OPTION3 ]; +then + COMMAND="${COMMAND} --define '$OPTION3 1'"; +fi +if [ ! -z $OPTION4 ]; +then + COMMAND="${COMMAND} --define '$OPTION4 1'"; +fi +if [ ! -z $OPTION5 ]; +then + COMMAND="${COMMAND} --define '$OPTION5 1'"; +fi + +COMMAND="${COMMAND} ${SELECTED_FILE}" + +zenity --question --text "Ready to rebuild the src rpm with $COMMAND. Do you wish to continue?" + +RESULT="$?" +if [ "$RESULT" = "1" ]; +then + exit; +fi + +# execute the build +echo $COMMAND | sh + +# ChangeLog +# 30 Jul 2006 initial release +# 05 Aug 2006 add option for build_python +# 27 Jan 2007 add fc6 +# 29 Apr 2007 add sl3 & sl4 target and bat option +# 06 May 2007 add fc7 target +# 10 Nov 2007 add rhel5 and su103 targets +# 12 Jan 2008 add fc8 target +# 23 May 2008 add fc9 target +# 28 Jun 2008 add su110 target +# 31 Dec 2008 add su111 target diff --git a/platforms/contrib-rpm/rpmkey.spec b/platforms/contrib-rpm/rpmkey.spec new file mode 100644 index 00000000..662e8e39 --- /dev/null +++ b/platforms/contrib-rpm/rpmkey.spec @@ -0,0 +1,52 @@ +# rpm public key package +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# replace the string yournam with your name +# first initial and lastname, ie. sbarninger +%define pubkeyname yourname + +# replace below with your name and email address +Packager: Your Name + +Summary: The %{pubkeyname} rpm public key +Name: rpmkey-%{pubkeyname} +Version: 0.1 +Release: 1 +License: BSD 2-Clause; see file LICENSE-FOSS +Group: System/Packages +Source0: %{pubkeyname}.asc +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-root + +%define gpgkeypath /etc/bacula/pubkeys + +%description +The %{pubkeyname} rpm public key. If you trust %{pubkeyname} component +and you want to import this key to the RPM database, then install this +RPM. After installing this package you may import the key into your rpm +database, as root: + +rpm --import %{gpgkeypath}/%{pubkeyname}.asc + +%prep +%setup -c -T a1 + +%build + +%install +mkdir -p %{buildroot}%{gpgkeypath} +cp -a %{SOURCE0} %{buildroot}%{gpgkeypath}/ + +%files +%defattr(-, root, root) +%{gpgkeypath}/%{pubkeyname}.asc + + +%changelog +* Sat Aug 19 2006 D. Scott Barninger &2 + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/debian/bacula-fd.in b/platforms/debian/bacula-fd.in new file mode 100644 index 00000000..86568cb7 --- /dev/null +++ b/platforms/debian/bacula-fd.in @@ -0,0 +1,79 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon on Debian/Ubuntu/Kubuntu +# systems. +# +# Kern E. Sibbald - 21 March 2008 +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# +### BEGIN INIT INFO +# Provides: bacula-fd +# Required-Start: $network +# Required-Stop: $network +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start @BACULA@ Client daemon at boot time +# Description: Enable @BACULA@ Client. +### END INIT INFO + + +NAME="bacula-fd" +DESC="@BACULA@ File Daemon" +DAEMON=@sbindir@/${NAME} +BUSER=@fd_user@ +BGROUP=@fd_group@ +BOPTIONS="-c @sysconfdir@/${NAME}.conf" +BPORT=@fd_port@ + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +test -f $DAEMON || exit 0 + +if [ -n "`getent services ${NAME}`" ]; then + BPORT=`getent services ${NAME} | awk '{ gsub("/tcp","",$2); print $2; }'` +fi + +if [ -f /etc/default/$NAME ]; then + . /etc/default/$NAME +fi + +PIDFILE=@piddir@/${NAME}.${BPORT}.pid + +if [ "x${BUSER}" != "x" ]; then + USERGRP="--chuid ${BUSER}" + if [ "x${BGROUP}" != "x" ]; then + USERGRP="${USERGRP}:${BGROUP}" + fi +fi + +RETVAL=0 +case "$1" in + start) + echo -n "Starting ${DESC}: " + start-stop-daemon --start --quiet --pidfile ${PIDFILE} ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + stop) + echo -n "Stopping ${DESC}: " + start-stop-daemon --oknodo --stop --quiet ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + restart|force-reload) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: /etc/init.d/${NAME} {start|stop|restart|force-reload}" >&2 + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/debian/bacula-sd.in b/platforms/debian/bacula-sd.in new file mode 100644 index 00000000..13b726e5 --- /dev/null +++ b/platforms/debian/bacula-sd.in @@ -0,0 +1,79 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon on Debian/Ubuntu/Kubuntu +# systems. +# +# Kern E. Sibbald - 21 March 2008 +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# +### BEGIN INIT INFO +# Provides: bacula-sd +# Required-Start: $network +# Required-Stop: $network +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start @BACULA@ Storage daemon at boot time +# Description: Enable @BACULA@ Storage daemon. +### END INIT INFO + + +NAME="bacula-sd" +DESC="@BACULA@ Storage Daemon" +DAEMON=@sbindir@/${NAME} +BUSER=@sd_user@ +BGROUP=@sd_group@ +BOPTIONS="-c @sysconfdir@/${NAME}.conf" +BPORT=@sd_port@ + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +test -f $DAEMON || exit 0 + +if [ -n "`getent services ${NAME}`" ]; then + BPORT=`getent services ${NAME} | awk '{ gsub("/tcp","",$2); print $2; }'` +fi + +if [ -f /etc/default/$NAME ]; then + . /etc/default/$NAME +fi + +PIDFILE=@piddir@/${NAME}.${BPORT}.pid + +if [ "x${BUSER}" != "x" ]; then + USERGRP="--chuid ${BUSER}" + if [ "x${BGROUP}" != "x" ]; then + USERGRP="${USERGRP}:${BGROUP}" + fi +fi + +RETVAL=0 +case "$1" in + start) + echo -n "Starting ${DESC}: " + start-stop-daemon --start --quiet --pidfile ${PIDFILE} ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + stop) + echo -n "Stopping ${DESC}: " + start-stop-daemon --oknodo --stop --quiet ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + restart|force-reload) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: /etc/init.d/${NAME} {start|stop|restart|force-reload}" >&2 + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/freebsd/Makefile.in b/platforms/freebsd/Makefile.in new file mode 100644 index 00000000..9aab6943 --- /dev/null +++ b/platforms/freebsd/Makefile.in @@ -0,0 +1,143 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Solaris specific installation. +# +# 15 November 2001 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +VPATH = @srcdir@ +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +SED = /usr/bin/sed + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + + +install-autostart-fd: + @echo "FreeBSD platform installation" + $(INSTALL_PROGRAM) -m 744 bacula-fd /etc/rc.bacula-fd + @-today="`date +%Y%m%d%H%M`"; \ + grep -q /etc/rc.bacula-fd /etc/rc.conf; \ + if [ $$? -eq 0 ]; then \ + echo "/etc/rc.conf already patched"; \ + else \ + rm -f /etc/rc.conf.$$today; \ + cp -p /etc/rc.conf /etc/rc.conf.$$today; \ + ( echo "Start the Bacula File daemon. Do not remove the 'TAG_BACULA_FD' text"; \ + echo "if [ -x /etc/rc.bacula-fd ]; then # TAG_BACULA_FD"; \ + echo " /etc/rc.bacula-fd start # TAG_BACULA_FD"; \ + echo "fi # TAG_BACULA_FD"; \ + ) >> /etc/rc.conf; \ + echo ""; \ + fi + + +install-autostart-sd: + @echo "FreeBSD platform installation" + $(INSTALL_PROGRAM) -m 744 bacula-sd /etc/rc.bacula-sd + @-today="`date +%Y%m%d%H%M`"; \ + grep -q /etc/rc.bacula-sd /etc/rc.conf; \ + if [ $$? -eq 0 ]; then \ + echo "/etc/rc.conf already patched"; \ + else \ + rm -f /etc/rc.conf.$$today; \ + cp -p /etc/rc.conf /etc/rc.conf.$$today; \ + ( echo "Start the Bacula Storage daemon. Do not remove the 'TAG_BACULA_SD' text"; \ + echo "if [ -x /etc/rc.bacula-sd ]; then # TAG_BACULA_SD"; \ + echo " /etc/rc.bacula-sd start # TAG_BACULA_SD"; \ + echo "fi # TAG_BACULA_SD"; \ + ) >> /etc/rc.conf; \ + echo ""; \ + fi + +install-autostart-dir: + @echo "FreeBSD platform installation" + $(INSTALL_PROGRAM) -m 744 bacula-dir /etc/rc.bacula-dir + @-today="`date +%Y%m%d%H%M`"; \ + grep -q /etc/rc.bacula-dir /etc/rc.conf; \ + if [ $$? -eq 0 ]; then \ + echo "/etc/rc.conf already patched"; \ + else \ + rm -f /etc/rc.conf.$$today; \ + cp -p /etc/rc.conf /etc/rc.conf.$$today; \ + ( echo "Start the Bacula Director. Do not remove the 'TAG_BACULA_DIR' text"; \ + echo "if [ -x /etc/rc.bacula-dir ]; then # TAG_BACULA_DIR"; \ + echo " /etc/rc.bacula-dir start # TAG_BACULA_DIR"; \ + echo "fi # TAG_BACULA_DIR"; \ + ) >> /etc/rc.conf; \ + echo ""; \ + fi + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @echo "FreeBSD platform uninstall" + rm -f /etc/rc.bacula-fd + @-today="`date +%Y%m%d%H%M`"; \ + for f in /etc/rc.conf ; do \ + grep -q '# TAG_BACULA_FD' $$f; \ + if [ $$? -eq 0 ]; then \ + echo "removing Bacula lines from $$f"; \ + rm -f $$f.$$today; \ + cp -p $$f $$f.$$today; \ + $(SED) -e '/TAG_BACULA_FD/d;' \ + < $$f.$$today > $$f; \ + chmod 644 $$f; \ + fi; \ + done + + +uninstall-autostart-sd: + @echo "FreeBSD platform uninstall" + rm -f /etc/rc.bacula-sd + @-today="`date +%Y%m%d%H%M`"; \ + for f in /etc/rc.conf ; do \ + grep -q '# TAG_BACULA_SD' $$f; \ + if [ $$? -eq 0 ]; then \ + echo "removing Bacula lines from $$f"; \ + rm -f $$f.$$today; \ + cp -p $$f $$f.$$today; \ + $(SED) -e '/TAG_BACULA_SD/d;' \ + < $$f.$$today > $$f; \ + chmod 644 $$f; \ + fi; \ + done + +uninstall-autostart-dir: + @echo "FreeBSD platform uninstall" + rm -f /etc/rc.bacula-dir + @-today="`date +%Y%m%d%H%M`"; \ + for f in /etc/rc.conf ; do \ + grep -q '# TAG_BACULA_DIR' $$f; \ + if [ $$? -eq 0 ]; then \ + echo "removing Bacula lines from $$f"; \ + rm -f $$f.$$today; \ + cp -p $$f $$f.$$today; \ + $(SED) -e '/TAG_BACULA_DIR/d;' \ + < $$f.$$today > $$f; \ + chmod 644 $$f; \ + fi; \ + done + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec + +devclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec diff --git a/platforms/freebsd/bacula-dir.in b/platforms/freebsd/bacula-dir.in new file mode 100755 index 00000000..78dde036 --- /dev/null +++ b/platforms/freebsd/bacula-dir.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula Director: " + @sbindir@/bacula-dir $2 -c @sysconfdir@/bacula-dir.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-dir + ;; + stop) + echo "Stopping the Director daemon: " +# killproc @sbindir@/bacula-dir + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-dir + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/freebsd/bacula-fd.in b/platforms/freebsd/bacula-fd.in new file mode 100755 index 00000000..e98f7ad2 --- /dev/null +++ b/platforms/freebsd/bacula-fd.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula File daemon: " + @sbindir@/bacula-fd $2 -c @sysconfdir@/bacula-fd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-fd + ;; + stop) + echo "Stopping the Bacula File daemon: " +# killproc @sbindir@/bacula-fd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-fd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/freebsd/bacula-sd.in b/platforms/freebsd/bacula-sd.in new file mode 100755 index 00000000..7aafe60b --- /dev/null +++ b/platforms/freebsd/bacula-sd.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Storage daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula Storage daemon: " + @sbindir@/bacula-sd $2 -c @sysconfdir@/bacula-sd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-sd + ;; + stop) + echo "Stopping the Bacula Storage daemon: " +# killproc @sbindir@/bacula-sd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-sd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/freebsd/pthreads-fix.txt b/platforms/freebsd/pthreads-fix.txt new file mode 100644 index 00000000..3dda1308 --- /dev/null +++ b/platforms/freebsd/pthreads-fix.txt @@ -0,0 +1,206 @@ +From: "Dan Langille" +To: bacula-users@lists.sourceforge.net +Subject: [Bacula-users] FreeBSD - large backups to tape +Date: Mon, 20 Oct 2003 15:29:18 -0400 + +Kern and I have been working on a FreeBSD/Bacula problem. +He's asked me to post this to the list. The problem was within the +FreeBSD pthreads library. A solution has been found. + +PROBLEM DESCRIPTION: + +The FreeBSD pthreads library does not properly handle End Of Tape. +This problem will be fixed in FreeBSD 4.9. + +UPDATE 2004/02/24: Note, the problem was apparently not fixed in + 4.9-RELEASE. 4.9-RELEASE contained a partial patch that did not + prevent data loss. To date, the latest FreeBSD -RELEASE versions + (4.9-RELEASE and 5.2.1-RELEASE) are *broken* as shipped. If + you are running one of these systems, please either patch + your system as described below or upgrade to -STABLE or + -CURRENT immediately. + + We expect 4.10-RELEASE to be available within a few weeks + (written 24 Apr 2004) and 5.3-RELEASE to be available in a + few months. 4.10 and 5.3 *should* contain the fix, but we + can't know for sure until we've had a chance to test them. + +The bug results in more data being written than the tape will +hold because of a lost status code. Any backup which involving +more than one tape would have data lost. + +DEMONSTRATION: + +To demonstrate the problem, tapetest.c can be obtained from +http://www.freebsd.org/cgi/query-pr.cgi?pr=56274 + +tapetest.c can also be found in the Bacula source distribution +in /platforms/freebsd/tapetest.c + +This tests without pthreads: + + * If you build this program with: + * + * c++ -g -O2 -Wall -c tapetest.c + * c++ -g -O2 -Wall tapetest.o -o tapetest + * + * Procedure for testing tape + * ./tapetest /dev/your-tape-device + * rewind + * rawfill + * rewind + * scan + * + * The output will be something like: + * + * ======== + * Rewound /dev/nsa0 + * *Begin writing blocks of 64512 bytes. + * ++++++++++++++++++++ ... + * Write failed. Last block written=17294. stat=0 ERR=Unknown error: 0 + * weof_dev + * Wrote EOF to /dev/nsa0 + * *Rewound /dev/nsa0 + * *Starting scan at file 0 + * 17294 blocks of 64512 bytes in file 0 + * End of File mark. + * End of File mark. + * End of tape + * Total files=1, blocks=17294, bytes = 1115670528 + * ======== + * + * which is correct. Notice that the return status is + * 0, while in the example below, which fails, the return + * status is -1. + +This tests with pthreads: + + * If you build this program with: + * + * c++ -g -O2 -Wall -pthread -c tapetest.c + * c++ -g -O2 -Wall -pthread tapetest.o -o tapetest + * Note, we simply added -pthread compared to the + * previous example. + * + * Procedure for testing tape + * ./tapetest /dev/your-tape-device + * rewind + * rawfill + * rewind + * scan + * + * The output will be something like: + * + * ======== + * Rewound /dev/nsa0 + * *Begin writing blocks of 64512 bytes. + * +++++++++++++++++++++++++++++ ... + * Write failed. Last block written=17926. stat=-1 ERR=No space left on device + * weof_dev + * Wrote EOF to /dev/nsa0 + * *Rewound /dev/nsa0 + * *Starting scan at file 0 + * 17913 blocks of 64512 bytes in file 0 + * End of File mark. + * End of File mark. + * End of tape + * Total files=1, blocks=17913, bytes = 1155603456 + * ======== + * + * which is incorrect because it wrote 17,926 blocks and the + * status on the last block written is stat=-1, which is incorrect. + * In addition only 17,913 blocks were read back. + * + * Similarly, if you ran this test on 4.9-RELEASE or 5.2.1-RELEASE + * (these versions contain an incomplete patch) then you would + * probably see something like this: + * + * ======== + * Rewound /dev/nsa0 + * *Begin writing blocks of 64512 bytes. + * +++++++++++++++ [...] + * weof_dev + * Wrote EOF to /dev/nsa0 + * Write failed. Last block written=271163. stat=-1 ERR=No space left on device + * *Rewound /dev/nsa0 + * *Starting scan at file 0 + * Bad status from read -1. ERR=Input/output error + * 271163 blocks of 64512 bytes in file 0 + * ======== + * + * The above output is also incorrect. The block counts match, + * but note the -1 error code on the read and write. This is + * just as dangerous as the first example. If you see this + * output then you should patch or upgrade to -STABLE or -CURRENT + * immediately. + +If you get the same number of blocks written and read WHEN using +pthreads, AND the test with pthreads enabled returns a stat=0 +on the last write, and the scan operation returns no error +code, then you've been correctly patched. It is important that +stat=0 rather than -1 even if the correct number of blocks +are read back. If the status is -1 on the pthreads test, you +will lose data. + +SOLUTION: + +For FreeBSD versions prior to 4.10-RELEASE and 5.3-RELEASE you +have two choices to ensure proper backups. These instructions +assume you are familiar with patching FreeBSD and already have +the FreeBSD source code installed on your machine. + +For FreeBSD 4.x: + +Do one of the following: + +- cvsup and build your system to FreeBSD 4.x-STABLE after the + date Mon Dec 29 15:18:01 2003 UTC + +- Apply this patch. + + http://www.freebsd.org/cgi/cvsweb.cgi/src/lib/libc_r/uthread/uthread_write.c.diff?r1=1.16.2.6&r2=1.16.2.8 + + To apply the patch, follow these instructions as root. + + cd /usr/src/lib/libc_r/uthread/ + fetch -o pthread.diff 'http://www.freebsd.org/cgi/cvsweb.cgi/src/lib/libc_r/uthread/uthread_write.c.diff?r1=1.16.2.6&r2=1.16.2.8' + patch < pthread.diff + cd .. + make all install + +For FreeBSD 5.x: + +Do one of the following: + +- cvsup and build your system to FreeBSD -CURRENT after the + date Wed Dec 17 16:44:03 2003 UTC + +- Apply this patch. + + http://www.freebsd.org/cgi/cvsweb.cgi/src/lib/libc_r/uthread/uthread_write.c.diff?r1=1.22&r2=1.23 + + Wed Dec 17 16:44:03 2003 UTC + + To apply the patch, follow these instructions as root. + + cd /usr/src/lib/libc_r/uthread/ + fetch -o pthread.diff 'http://www.freebsd.org/cgi/cvsweb.cgi/src/lib/libc_r/uthread/uthread_write.c.diff?r1=1.22&r2=1.23' + patch < pthread.diff + cd .. + make all install + +After patching your system as shown above, +you should then recompile Bacula to get the new library +code included by doing: + + cd + make clean + make + ... + + +TESTING: + +I suggest running tapetest on your patched system and then +conducting a backup which spans two tapes. Restore the data +and compare to the original. If not identical, please let us know. diff --git a/platforms/freebsd/tapetest.c b/platforms/freebsd/tapetest.c new file mode 100644 index 00000000..d3a5b1c4 --- /dev/null +++ b/platforms/freebsd/tapetest.c @@ -0,0 +1,605 @@ +/* + * + * Program to test loss of data at EOM on + * FreeBSD systems. + * + * Kern Sibbald, August 2003 + * + * If you build this program with: + * + * c++ -g -O2 -Wall -c tapetest.c + * c++ -g -O2 -Wall tapetest.o -o tapetest + * + * Procedure for testing tape + * ./tapetest /dev/your-tape-device + * rewind + * rawfill + * rewind + * scan + * quit + * + * The output will be: + * + * ======== + * Rewound /dev/nsa0 + * *Begin writing blocks of 64512 bytes. + * ++++++++++++++++++++ ... + * Write failed. Last block written=17294. stat=0 ERR=Unknown error: 0 + * weof_dev + * Wrote EOF to /dev/nsa0 + * *Rewound /dev/nsa0 + * *Starting scan at file 0 + * 17294 blocks of 64512 bytes in file 0 + * End of File mark. + * End of File mark. + * End of tape + * Total files=1, blocks=17294, bytes = 1115670528 + * ======== + * + * which is correct. Notice that the return status is + * 0, while in the example below, which fails, the return + * status is -1. + * + + * If you build this program with: + * + * c++ -g -O2 -Wall -pthread -c tapetest.c + * c++ -g -O2 -Wall -pthread tapetest.o -o tapetest + * Note, we simply added -pthread compared to the + * previous example. + * + * Procedure for testing tape + * ./tapetest /dev/your-tape-device + * rewind + * rawfill + * rewind + * scan + * quit + * + * The output will be: + * + * ======== + * Rewound /dev/nsa0 + * *Begin writing blocks of 64512 bytes. + * +++++++++++++++++++++++++++++ ... + * Write failed. Last block written=17926. stat=-1 ERR=No space left on device + * weof_dev + * Wrote EOF to /dev/nsa0 + * *Rewound /dev/nsa0 + * *Starting scan at file 0 + * 17913 blocks of 64512 bytes in file 0 + * End of File mark. + * End of File mark. + * End of tape + * Total files=1, blocks=17913, bytes = 1155603456 + * ======== + * + * which is incroorect because it wrote 17,926 blocks but read + * back only 17,913 blocks, AND because the return status on + * the last block written was -1 when it should have been + * 0 (ie. stat=0 above). + * + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FALSE 0 +#define TRUE 1 + +#define dev_state(dev, state) ((dev)->state & (state)) + +/* Device state bits */ +#define ST_OPENED (1<<0) /* set when device opened */ +#define ST_TAPE (1<<1) /* is a tape device */ +#define ST_FILE (1<<2) /* is a file device */ +#define ST_FIFO (1<<3) /* is a fifo device */ +#define ST_PROG (1<<4) /* is a program device */ +#define ST_LABEL (1<<5) /* label found */ +#define ST_MALLOC (1<<6) /* dev packet malloc'ed in init_dev() */ +#define ST_APPEND (1<<7) /* ready for Bacula append */ +#define ST_READ (1<<8) /* ready for Bacula read */ +#define ST_EOT (1<<9) /* at end of tape */ +#define ST_WEOT (1<<10) /* Got EOT on write */ +#define ST_EOF (1<<11) /* Read EOF i.e. zero bytes */ +#define ST_NEXTVOL (1<<12) /* Start writing on next volume */ +#define ST_SHORT (1<<13) /* Short block read */ + +#define BLOCK_SIZE (512 * 126) + + +/* Exported variables */ +int quit = 0; +char buf[100000]; +int verbose = 0; +int debug_level = 0; +int fd = 0; + +struct DEVICE { + int fd; + int dev_errno; + int file; + int block_num; + int state; + char *buf; + int buf_len; + char *dev_name; + int file_addr; +}; + +DEVICE *dev; + +#define uint32_t unsigned long +#define uint64_t unsigned long long + +/* Forward referenced subroutines */ +static void do_tape_cmds(); +static void helpcmd(); +static void scancmd(); +static void rewindcmd(); +static void rawfill_cmd(); + + +/* Static variables */ + +static char cmd[1000]; + +static void usage(); +int get_cmd(char *prompt); + + +/********************************************************************* + * + * Main Bacula Pool Creation Program + * + */ +int main(int argc, char *argv[]) +{ + int ch; + + while ((ch = getopt(argc, argv, "d:v?")) != -1) { + switch (ch) { + case 'd': /* set debug level */ + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + break; + + case 'v': + verbose++; + break; + + case '?': + default: + helpcmd(); + exit(0); + + } + } + argc -= optind; + argv += optind; + + + /* See if we can open a device */ + if (argc == 0) { + printf("No archive name specified.\n"); + usage(); + exit(1); + } else if (argc != 1) { + printf("Improper number of arguments specified.\n"); + usage(); + exit(1); + } + + fd = open(argv[0], O_RDWR); + if (fd < 0) { + printf("Error opening %s ERR=%s\n", argv[0], strerror(errno)); + exit(1); + } + dev = (DEVICE *)malloc(sizeof(DEVICE)); + memset(dev, 0, sizeof(DEVICE)); + dev->fd = fd; + dev->dev_name = strdup(argv[0]); + dev->buf_len = BLOCK_SIZE; + dev->buf = (char *)malloc(BLOCK_SIZE); + + do_tape_cmds(); + return 0; +} + + +int rewind_dev(DEVICE *dev) +{ + struct mtop mt_com; + + if (dev->fd < 0) { + dev->dev_errno = EBADF; + printf("Bad call to rewind_dev. Device %s not open\n", + dev->dev_name); + return 0; + } + dev->state &= ~(ST_APPEND|ST_READ|ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ + dev->block_num = dev->file = 0; + mt_com.mt_op = MTREW; + mt_com.mt_count = 1; + if (ioctl(dev->fd, MTIOCTOP, (char *)&mt_com) < 0) { + dev->dev_errno = errno; + printf("Rewind error on %s. ERR=%s.\n", + dev->dev_name, strerror(dev->dev_errno)); + return 0; + } + return 1; +} + +/* + * Write an end of file on the device + * Returns: 0 on success + * non-zero on failure + */ +int +weof_dev(DEVICE *dev, int num) +{ + struct mtop mt_com; + int stat; + + if (dev->fd < 0) { + dev->dev_errno = EBADF; + printf("Bad call to fsf_dev. Archive not open\n"); + return -1; + } + + dev->state &= ~(ST_EOT | ST_EOF); /* remove EOF/EOT flags */ + dev->block_num = 0; + printf("weof_dev\n"); + mt_com.mt_op = MTWEOF; + mt_com.mt_count = num; + stat = ioctl(dev->fd, MTIOCTOP, (char *)&mt_com); + if (stat == 0) { + dev->file++; + dev->file_addr = 0; + } else { + dev->dev_errno = errno; + printf("ioctl MTWEOF error on %s. ERR=%s.\n", + dev->dev_name, strerror(dev->dev_errno)); + } + return stat; +} + + + + + +void quitcmd() +{ + quit = 1; +} + + +/* + * Rewind the tape. + */ +static void rewindcmd() +{ + if (!rewind_dev(dev)) { + printf("Bad status from rewind. ERR=%s\n", strerror(dev->dev_errno)); + } else { + printf("Rewound %s\n", dev->dev_name); + } +} + +/* + * Write and end of file on the tape + */ +static void weofcmd() +{ + int stat; + + if ((stat = weof_dev(dev, 1)) < 0) { + printf("Bad status from weof %d. ERR=%s\n", stat, strerror(dev->dev_errno)); + return; + } else { + printf("Wrote EOF to %s\n", dev->dev_name); + } +} + + +/* + * Read a record from the tape + */ +static void rrcmd() +{ + char *buf; + int stat, len; + + if (!get_cmd("Enter length to read: ")) { + return; + } + len = atoi(cmd); + if (len < 0 || len > 1000000) { + printf("Bad length entered, using default of 1024 bytes.\n"); + len = 1024; + } + buf = (char *)malloc(len); + stat = read(fd, buf, len); + if (stat > 0 && stat <= len) { + errno = 0; + } + printf("Read of %d bytes gives stat=%d. ERR=%s\n", + len, stat, strerror(errno)); + free(buf); +} + +/* + * Write a record to the tape + */ +static void wrcmd() +{ + int stat; + int rfd; + + rfd = open("/dev/urandom", O_RDONLY); + if (rfd) { + read(rfd, dev->buf, dev->buf_len); + } else { + printf("Cannot open /dev/urandom.\n"); + return; + } + printf("Write one block of %u bytes.\n", dev->buf_len); + stat = write(dev->fd, dev->buf, dev->buf_len); + if (stat != (int)dev->buf_len) { + if (stat == -1) { + printf("Bad status from write. ERR=%s\n", strerror(errno)); + } else { + printf("Expected to write %d bytes but wrote only %d.\n", + dev->buf_len, stat); + } + } +} + + + +/* + * Scan tape by reading block by block. Report what is + * on the tape. Note, this command does raw reads, and as such + * will not work with fixed block size devices. + */ +static void scancmd() +{ + int stat; + int blocks, tot_blocks, tot_files; + int block_size; + uint64_t bytes; + + + blocks = block_size = tot_blocks = 0; + bytes = 0; + if (dev->state & ST_EOT) { + printf("End of tape\n"); + return; + } + tot_files = dev->file; + printf("Starting scan at file %u\n", dev->file); + for (;;) { + if ((stat = read(dev->fd, buf, sizeof(buf))) < 0) { + dev->dev_errno = errno; + printf("Bad status from read %d. ERR=%s\n", stat, strerror(dev->dev_errno)); + if (blocks > 0) + printf("%d block%s of %d bytes in file %d\n", + blocks, blocks>1?"s":"", block_size, dev->file); + return; + } + if (stat != block_size) { + if (blocks > 0) { + printf("%d block%s of %d bytes in file %d\n", + blocks, blocks>1?"s":"", block_size, dev->file); + blocks = 0; + } + block_size = stat; + } + if (stat == 0) { /* EOF */ + printf("End of File mark.\n"); + /* Two reads of zero means end of tape */ + if (dev->state & ST_EOF) + dev->state |= ST_EOT; + else { + dev->state |= ST_EOF; + dev->file++; + } + if (dev->state & ST_EOT) { + printf("End of tape\n"); + break; + } + } else { /* Got data */ + dev->state &= ~ST_EOF; + blocks++; + tot_blocks++; + bytes += stat; + } + } + tot_files = dev->file - tot_files; + printf("Total files=%d, blocks=%d, bytes = %d\n", tot_files, tot_blocks, + (int)bytes); +} + + +static void rawfill_cmd() +{ + int stat; + int rfd; + uint32_t block_num = 0; + uint32_t *p; + int my_errno; + + rfd = open("/dev/urandom", O_RDONLY); + if (rfd) { + read(rfd, dev->buf, dev->buf_len); + } else { + printf("Cannot open /dev/urandom.\n"); + return; + } + p = (uint32_t *)dev->buf; + printf("Begin writing blocks of %u bytes.\n", dev->buf_len); + for ( ;; ) { + *p = block_num; + stat = write(dev->fd, dev->buf, dev->buf_len); + if (stat == (int)dev->buf_len) { + if ((block_num++ % 100) == 0) { + printf("+"); + fflush(stdout); + } + continue; + } + break; + } + my_errno = errno; + printf("\n"); + weofcmd(); + printf("Write failed. Last block written=%d. stat=%d ERR=%s\n", (int)block_num, stat, + strerror(my_errno)); + +} + +/* Strip any trailing junk from the command */ +void strip_trailing_junk(char *cmd) +{ + char *p; + p = cmd + strlen(cmd) - 1; + + /* strip trailing junk from command */ + while ((p >= cmd) && (*p == '\n' || *p == '\r' || *p == ' ')) + *p-- = 0; +} + +/* folded search for string - case insensitive */ +int +fstrsch(char *a, char *b) /* folded case search */ +{ + register char *s1,*s2; + register char c1=0, c2=0; + + s1=a; + s2=b; + while (*s1) { /* do it the fast way */ + if ((*s1++ | 0x20) != (*s2++ | 0x20)) + return 0; /* failed */ + } + while (*a) { /* do it over the correct slow way */ + if (isupper(c1 = *a)) { + c1 = tolower((int)c1); + } + if (isupper(c2 = *b)) { + c2 = tolower((int)c2); + } + if (c1 != c2) { + return 0; + } + a++; + b++; + } + return 1; +} + + +struct cmdstruct { char *key; void (*func)(); char *help; }; +static struct cmdstruct commands[] = { + {"help", helpcmd, "print this command"}, + {"quit", quitcmd, "quit tapetest"}, + {"rawfill", rawfill_cmd, "use write() to fill tape"}, + {"rewind", rewindcmd, "rewind the tape"}, + {"rr", rrcmd, "raw read the tape"}, + {"wr", wrcmd, "raw write one block to the tape"}, + {"scan", scancmd, "read() tape block by block to EOT and report"}, + {"weof", weofcmd, "write an EOF on the tape"}, + }; +#define comsize (sizeof(commands)/sizeof(struct cmdstruct)) + +static void +do_tape_cmds() +{ + unsigned int i; + int found; + + while (get_cmd("*")) { + found = 0; + for (i=0; i 0) + cmd[--i] = 0; + continue; + } + + cmd[i++] = ch; + cmd[i] = 0; + } + quit = 1; + return 0; +} diff --git a/platforms/gentoo/1.36.1-cdrecord-configure.patch b/platforms/gentoo/1.36.1-cdrecord-configure.patch new file mode 100644 index 00000000..573e7bd1 --- /dev/null +++ b/platforms/gentoo/1.36.1-cdrecord-configure.patch @@ -0,0 +1,18 @@ +--- configure.old 2005-02-06 07:44:05.221997769 -0500 ++++ configure 2005-02-06 07:45:18.300994158 -0500 +@@ -7478,15 +7478,6 @@ + # get scsibus,target,lun + # ------------------------------------------- + CDSTL="3,0,0" +-if test ! x$CDRECORD = x ; then +- CDSTL=`${CDRECORD} -scanbus 2>/dev/null | grep CD-RW | ${AWK} '{print $1}'` +- if test x${CDSTL} = x ; then +- CDSTL=`${CDRECORD} -scanbus 2>/dev/null | grep CD+RW | ${AWK} '{print $1}'` +- fi +- if test x${CDSTL} = x ; then +- CDSTL="3,0,0" +- fi +-fi + + + diff --git a/platforms/gentoo/1.36.2-cdrecord-configure.patch b/platforms/gentoo/1.36.2-cdrecord-configure.patch new file mode 100644 index 00000000..6fa85114 --- /dev/null +++ b/platforms/gentoo/1.36.2-cdrecord-configure.patch @@ -0,0 +1,19 @@ +diff -uNr bacula-1.36.2/configure bacula-1.36.2-fixed/configure +--- bacula-1.36.2/configure 2005-02-25 04:46:49.000000000 -0500 ++++ bacula-1.36.2-fixed/configure 2005-03-06 10:11:23.905848861 -0500 +@@ -7301,15 +7301,6 @@ + # get scsibus,target,lun + # ------------------------------------------- + CDSTL="3,0,0" +-if test ! x$CDRECORD = x ; then +- CDSTL=`${CDRECORD} -scanbus 2>/dev/null | grep CD-RW | ${AWK} '{print $1}'` +- if test x${CDSTL} = x ; then +- CDSTL=`${CDRECORD} -scanbus 2>/dev/null | grep CD+RW | ${AWK} '{print $1}'` +- fi +- if test x${CDSTL} = x ; then +- CDSTL="3,0,0" +- fi +-fi + + + diff --git a/platforms/gentoo/Makefile.in b/platforms/gentoo/Makefile.in new file mode 100644 index 00000000..ee90620f --- /dev/null +++ b/platforms/gentoo/Makefile.in @@ -0,0 +1,60 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Gentoo specific installation. +# +# 22 January 2003 -- Kern Sibbald +# and corrected for Gentoo by +# Patrick Naubert 25 Jan 2003 +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + + +install-autostart-fd: + @$(INSTALL) -m 744 bacula-fd $(DESTDIR)/etc/init.d/bacula-fd + + +install-autostart-sd: + @$(INSTALL) -m 744 bacula-sd $(DESTDIR)/etc/init.d/bacula-sd + + +install-autostart-dir: + @$(INSTALL) -m 744 bacula-dir $(DESTDIR)/etc/init.d/bacula-dir + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @rm -f $(DESTDIR)/etc/init.d/bacula-fd + + +uninstall-autostart-sd: + @rm -f $(DESTDIR)/etc/init.d/bacula-sd + +uninstall-autostart-dir: + @rm -f $(DESTDIR)/etc/init.d/bacula-dir + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile + +devclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile diff --git a/platforms/gentoo/bacula-dir.in b/platforms/gentoo/bacula-dir.in new file mode 100755 index 00000000..391cbf63 --- /dev/null +++ b/platforms/gentoo/bacula-dir.in @@ -0,0 +1,31 @@ +#!/sbin/runscript +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon for the Gentoo release +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +start() { + ebegin "Starting the Bacula Director" + start-stop-daemon --start --quiet --exec @sbindir@/bacula-dir -- $2 -c @sysconfdir@/bacula-dir.conf + eend $? +} + +stop() { + ebegin "Stopping the Director daemon" + start-stop-daemon --stop --quiet --exec @sbindir@/bacula-dir + eend $? +} + +restart() { + stop + sleep 5 + start +} diff --git a/platforms/gentoo/bacula-fd.in b/platforms/gentoo/bacula-fd.in new file mode 100755 index 00000000..fdf15555 --- /dev/null +++ b/platforms/gentoo/bacula-fd.in @@ -0,0 +1,31 @@ +#!/sbin/runscript +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon for the Gentoo release. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +start() { + ebegin "Starting the Bacula File daemon" + start-stop-daemon --start --quiet --exec @sbindir@/bacula-fd -- $2 -c @sysconfdir@/bacula-fd.conf + eend $? +} + +stop() { + ebegin "Stopping the Bacula File daemon" + start-stop-daemon --stop --quiet --exec @sbindir@/bacula-fd + eend $? +} + +restart() { + stop + sleep 5 + start +} diff --git a/platforms/gentoo/bacula-init.in b/platforms/gentoo/bacula-init.in new file mode 100755 index 00000000..0c78153f --- /dev/null +++ b/platforms/gentoo/bacula-init.in @@ -0,0 +1,94 @@ +#!/sbin/runscript +# Copyright 1999-2004 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# Modified for user/group information +# 24 Oct 2004 D. Scott Barninger +# +# added cdrom rescue for 1.36.1 +# init script now comes from source package not ${FILES} dir +# 26 Nov 2004 D. Scott Barninger +# +# fix symlink creation in rescue package in post script +# remove mask on x86 keyword +# fix post script so it doesn't talk about server config for client-only build +# bug #181 - unable to reproduce on 2.4 kernel system so add FEATURES="-sandbox" +# 04 Dec 2004 D. Scott Barninger +# +# more on bug #181 - another user has reported a sandbox violation trying to +# write to /dev/sg0 - still can't reproduce this behavior +# add an 'addpredict /dev/sg0' +# 08 Dec 2004 D. Scott Barninger +# +# resolve bug #181 - problem is caused by configure calling cdrecord to scan +# the scsi bus. patch configure to remove this. add logrotate script. +# 06 Feb 2005 D. Scott Barninger +# +# fix documentation bug +# 07 Feb 2005 D. Scott Barninger +# +# new USE keywords bacula-clientonly bacula-split +# add new logwatch scripts +# 06 Mar 2005 D. Scott Barninger +# +# 1.36.3 doc changes +# 17 Apr 2005 D. Scott Barninger + +DESCRIPTION="featureful client/server network backup suite" +HOMEPAGE="http://www.bacula.org/" +SRC_URI="mirror://sourceforge/bacula/${P}.tar.gz" + +LICENSE="BSD 2-Clause" +SLOT="0" +KEYWORDS="x86 ~ppc ~sparc ~amd64" +IUSE="readline tcpd gnome mysql sqlite X static postgres wxwindows bacula-clientonly bacula-split" + +inherit eutils + +# there is a local sqlite use flag. use it -OR- mysql, not both. +# mysql is the recommended choice ... +# may need sys-libs/libtermcap-compat but try without first +DEPEND=">=sys-libs/zlib-1.1.4 + readline? ( >=sys-libs/readline-4.1 ) + tcpd? ( >=sys-apps/tcp-wrappers-7.6 ) + gnome? ( gnome-base/libgnome ) + gnome? ( app-admin/gnomesu ) + !bacula-clientonly? ( + sqlite? ( =dev-db/sqlite-2* ) + mysql? ( >=dev-db/mysql-3.23 ) + postgres? ( >=dev-db/postgresql-7.4.0 ) + sys-apps/mtx + ) + X? ( virtual/x11 ) + wxwindows? ( >=x11-libs/wxGTK-2.4.2 ) + virtual/mta + dev-libs/gmp + app-text/tetex + dev-tex/latex2html" +RDEPEND="${DEPEND} + !bacula-clientonly? ( + sys-apps/mtx + app-arch/mt-st + )" + +src_compile() { + + # this resolves bug #181 + epatch ${FILESDIR}/1.36.2-cdrecord-configure.patch + + local myconf="" + + myconf=" + `use_enable readline` + `use_enable gnome` + `use_enable tcpd tcp-wrappers` + `use_enable X x`" + + # define this to skip building the other daemons ... + if use bacula-clientonly + then + myconf="${myconf} --enable-client-only" + fi + + # select database support + if ! use bacula-clientonly + then + # mysql is the recomended choice ... + if use mysql + then + myconf="${myconf} --with-mysql=/usr" + elif use postgres + then + myconf="${myconf} --with-postgresql=/usr" + elif use sqlite + then + myconf="${myconf} --with-sqlite=/usr" + elif use sqlite && use mysql + then + myconf="${myconf/--with-sqlite/}" + fi + fi + + if use wxwindows + then + myconf="${myconf} --enable-wx-console" + fi + + if use readline + then + myconf="${myconf} --enable-readline" + fi + + if use gnome + then + myconf="${myconf} --enable-tray-monitor" + fi + + ./configure \ + --enable-smartalloc \ + --prefix=/usr \ + --mandir=/usr/share/man \ + --with-pid-dir=/var/run \ + --sysconfdir=/etc/bacula \ + --infodir=/usr/share/info \ + --with-subsys-dir=/var/lock/subsys \ + --with-working-dir=/var/bacula \ + --with-scriptdir=/etc/bacula \ + --with-dir-user=root \ + --with-dir-group=bacula \ + --with-sd-user=root \ + --with-sd-group=bacula \ + --with-fd-user=root \ + --with-fd-group=bacula \ + --host=${CHOST} ${myconf} || die "bad ./configure" + + emake || die "compile problem" + + # for the rescue package regardless of use static + cd ${S}/src/filed + make static-bacula-fd + cd ${S} + + # make the docs + cd ${S}/doc/latex + make + cd ${S} + + if use static + then + cd ${S}/src/console + make static-console + cd ${S}/src/dird + make static-bacula-dir + if use gnome + then + cd ${S}/src/gnome-console + make static-gnome-console + fi + if use wxwindows + then + cd ${S}/src/wx-console + make static-wx-console + fi + cd ${S}/src/stored + make static-bacula-sd + fi +} + +src_install() { + make DESTDIR=${D} install || die + + if use static + then + cd ${S}/src/filed + cp static-bacula-fd ${D}/usr/sbin/bacula-fd + cd ${S}/src/console + cp static-console ${D}/usr/sbin/console + cd ${S}/src/dird + cp static-bacula-dir ${D}/usr/sbin/bacula-dir + if use gnome + then + cd ${S}/src/gnome-console + cp static-gnome-console ${D}/usr/sbin/gnome-console + fi + if use wxwindows + then + cd ${S}/src/wx-console + cp static-wx-console ${D}/usr/sbin/wx-console + fi + cd ${S}/src/stored + cp static-bacula-sd ${D}/usr/sbin/bacula-sd + fi + + # the menu stuff + if use gnome + then + mkdir -p ${D}/usr/share/pixmaps + mkdir -p ${D}/usr/share/applications + cp ${S}/scripts/bacula.png ${D}/usr/share/pixmaps/bacula.png + cp ${S}/scripts/bacula.desktop.gnome2.xsu ${D}/usr/share/applications/bacula.desktop + cp ${S}/src/tray-monitor/generic.xpm ${D}/usr/share/pixmaps/bacula-tray-monitor.xpm + cp ${S}/scripts/bacula-tray-monitor.desktop \ + ${D}/usr/share/applications/bacula-tray-monitor.desktop + chmod 755 ${D}/usr/sbin/bacula-tray-monitor + chmod 644 ${D}/etc/bacula/tray-monitor.conf + fi + + if ! use bacula-clientonly + then + # the database update scripts + mkdir -p ${D}/etc/bacula/updatedb + cp ${S}/updatedb/* ${D}/etc/bacula/updatedb/ + chmod 754 ${D}/etc/bacula/updatedb/* + + # the logrotate configuration + mkdir -p ${D}/etc/logrotate.d + cp ${S}/scripts/logrotate ${D}/etc/logrotate.d/bacula + chmod 644 ${D}/etc/logrotate.d/bacula + + # the logwatch scripts + mkdir -p ${D}/etc/log.d/conf/logfiles + mkdir -p ${D}/etc/log.d/conf/services + mkdir -p ${D}/etc/log.d/scripts/services + cp ${S}/scripts/logwatch/bacula ${D}/etc/log.d/scripts/services/bacula + cp ${S}/scripts/logwatch/logfile.bacula.conf ${D}/etc/log.d/conf/logfiles/bacula.conf + cp ${S}/scripts/logwatch/services.bacula.conf ${D}/etc/log.d/conf/services/bacula.conf + chmod 755 ${D}/etc/log.d/scripts/services/bacula + chmod 644 ${D}/etc/log.d/conf/logfiles/bacula.conf + chmod 644 ${D}/etc/log.d/conf/services/bacula.conf + + fi + + # the cdrom rescue package + mkdir -p ${D}/etc/bacula/rescue/cdrom + cp -R ${S}/rescue/linux/cdrom/* ${D}/etc/bacula/rescue/cdrom/ + mkdir ${D}/etc/bacula/rescue/cdrom/bin + cp ${S}/src/filed/static-bacula-fd ${D}/etc/bacula/rescue/cdrom/bin/bacula-fd + chmod 754 ${D}/etc/bacula/rescue/cdrom/bin/bacula-fd + + # documentation + for a in ${S}/{ChangeLog,README,ReleaseNotes,kernstodo,LICENSE,doc/latex/bacula.pdf} + do + dodoc $a + done + + dohtml -r ${S}/doc/latex/bacula + + # clean up permissions left broken by install + chmod o-r ${D}/etc/bacula/query.sql + + # remove the working dir so we can add it postinst with group + rmdir ${D}/var/bacula + + # init scripts + exeinto /etc/init.d + if use bacula-clientonly + then + newexe ${S}/platforms/gentoo/bacula-fd bacula-fd + else + if use bacula-split + then + newexe ${S}/platforms/gentoo/bacula-fd bacula-fd + newexe ${S}/platforms/gentoo/bacula-sd bacula-sd + newexe ${S}/platforms/gentoo/bacula-dir bacula-dir + else + newexe ${S}/platforms/gentoo/bacula-init bacula + fi + fi +} + +pkg_postinst() { + # create the daemon group + HAVE_BACULA=`cat /etc/group | grep bacula 2>/dev/null` + if [ -z $HAVE_BACULA ]; then + enewgroup bacula + einfo + einfo "The group bacula has been created. Any users you add to" + einfo "this group have access to files created by the daemons." + fi + + # the working directory + install -m0750 -o root -g bacula -d ${ROOT}/var/bacula + + # link installed bacula-fd.conf into rescue directory + #ln -s /etc/bacula/bacula-fd.conf /etc/bacula/rescue/cdrom/bacula-fd.conf + # no longer necessary after 1.36.2 + + einfo + einfo "The CDRom rescue disk package has been installed into the" + einfo "/etc/bacula/rescue/cdrom/ directory. Please examine the manual" + einfo "for information on creating a rescue CD. CDR device detection" + einfo "during build has been disabled to prevent sandbox violations." + einfo "You need to examine /etc/bacula/rescue/cdrom/Makefile and adjust" + einfo "the device information for your CD recorder." + einfo + + if ! use bacula-clientonly; then + einfo + einfo "Please note either/or nature of database USE flags for" + einfo "Bacula. If mysql is set, it will be used, else postgres" + einfo "else finally SQLite. If you wish to have multiple DBs on" + einfo "one system, you may wish to unset auxillary DBs for this" + einfo "build." + einfo + + if use mysql + then + # test for an existing database + # note: this ASSUMES no password has been set for bacula database + DB_VER=`mysql 2>/dev/null bacula -e 'select * from Version;'|tail -n 1` + if [ -z "$DB_VER" ]; then + einfo "This appears to be a new install and you plan to use mysql" + einfo "for your catalog database. You should now create it by doing" + einfo "these commands:" + einfo " sh /etc/bacula/grant_mysql_privileges" + einfo " sh /etc/bacula/create_mysql_database" + einfo " sh /etc/bacula/make_mysql_tables" + elif [ "$DB_VER" -lt "8" ]; then + elinfo "This release requires an upgrade to your bacula database" + einfo "as the database format has changed. Please read the" + einfo "manual chapter for how to upgrade your database!!!" + einfo + einfo "Backup your database with the command:" + einfo " mysqldump -f --opt bacula | bzip2 > /var/bacula/bacula_backup.sql.bz" + einfo + einfo "Then update your database using the scripts found in" + einfo "/etc/bacula/updatedb/ from your current version $DB_VER to" + einfo "version 8. Note that scripts must be run in order from your" + einfo "version to the current version." + fi + fi + + if use postgres + then + # test for an existing database + # note: this ASSUMES no password has been set for bacula database + DB_VER=`echo 'select * from Version;' | psql bacula 2>/dev/null | tail -3 | head -1` + if [ -z "$DB_VER" ]; then + einfo "This appears to be a new install and you plan to use postgresql" + einfo "for your catalog database. You should now create it by doing" + einfo "these commands:" + einfo " sh /etc/bacula/create_postgresql_database" + einfo " sh /etc/bacula/make_postgresql_tables" + einfo " sh /etc/bacula/grant_postgresql_privileges" + elif [ "$DB_VER" -lt "8" ]; then + elinfo "This release requires an upgrade to your bacula database" + einfo "as the database format has changed. Please read the" + einfo "manual chapter for how to upgrade your database!!!" + einfo + einfo "Backup your database with the command:" + einfo " pg_dump bacula | bzip2 > /var/bacula/bacula_backup.sql.bz2" + einfo + einfo "Then update your database using the scripts found in" + einfo "/etc/bacula/updatedb/ from your current version $DB_VER to" + einfo "version 8. Note that scripts must be run in order from your" + einfo "version to the current version." + fi + fi + + if use sqlite + then + # test for an existing database + # note: this ASSUMES no password has been set for bacula database + DB_VER=`echo "select * from Version;" | sqlite 2>/dev/null /var/bacula/bacula.db | tail -n 1` + if [ -z "$DB_VER" ]; then + einfo "This appears to be a new install and you plan to use sqlite" + einfo "for your catalog database. You should now create it by doing" + einfo "these commands:" + einfo " sh /etc/bacula/grant_sqlite_privileges" + einfo " sh /etc/bacula/create_sqlite_database" + einfo " sh /etc/bacula/make_sqlite_tables" + elif [ "$DB_VER" -lt "8" ]; then + elinfo "This release requires an upgrade to your bacula database" + einfo "as the database format has changed. Please read the" + einfo "manual chapter for how to upgrade your database!!!" + einfo + einfo "Backup your database with the command:" + einfo " echo .dump | sqlite /var/bacula/bacula.db | bzip2 > \\" + einfo " /var/bacula/bacula_backup.sql.bz2" + einfo + einfo "Then update your database using the scripts found in" + einfo "/etc/bacula/updatedb/ from your current version $DB_VER to" + einfo "version 8. Note that scripts must be run in order from your" + einfo "version to the current version." + fi + fi + fi + + einfo + einfo "Review your configuration files in /etc/bacula and" + einfo "start the daemons:" + if use bacula-clientonly; then + einfo " /etc/init.d/bacula-fd start" + else + if use bacula-split; then + einfo " /etc/init.d/bacula-sd start" + einfo " /etc/init.d/bacula-dir start" + einfo " /etc/init.d/bacula-fd start" + einfo " or /etc/bacula/bacula will start all three." + else + einfo " /etc/init.d/bacula start" + fi + fi + einfo + einfo "You may also wish to:" + if use bacula-clientonly; then + einfo " rc-update add bacula-fd default" + else + if use bacula-split; then + einfo " rc-update add bacula-sd default" + einfo " rc-update add bacula-dir default" + einfo " rc-update add bacula-fd default" + else + einfo " rc-update add bacula default" + fi + fi + einfo +} diff --git a/platforms/hurd/Makefile.in b/platforms/hurd/Makefile.in new file mode 100644 index 00000000..9f272482 --- /dev/null +++ b/platforms/hurd/Makefile.in @@ -0,0 +1,91 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Debian GNU Hurd specific installation. +# +# 21 March 2008 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ -m 754 + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + +install_logrotate: + @$(INSTALL_PROGRAM) ../../scripts/logrotate $(DESTDIR)/etc/logrotate.d/bacula + +install-autostart-fd: uninstall-autostart-fd + @echo "Installing bacula-fd boot script ..." + @$(INSTALL_PROGRAM) bacula-fd $(DESTDIR)/etc/init.d/bacula-fd + @echo "Installing bacula-fd symlinks ..." + @if test x$(DESTDIR) = x ; then \ + /usr/sbin/update-rc.d bacula-fd start 91 2 3 4 5 . stop 9 0 1 6 .; \ + fi + + +install-autostart-sd: uninstall-autostart-sd + @echo "Installing bacula-sd boot script ..." + @$(INSTALL_PROGRAM) bacula-sd $(DESTDIR)/etc/init.d/bacula-sd + @echo "Installing bacula-sd symlinks ..." + @if test "x$(DESTDIR)" = "x" ; then \ + /usr/sbin/update-rc.d bacula-sd start 91 2 3 4 5 . stop 9 0 1 6 .; \ + fi + + +install-autostart-dir: uninstall-autostart-dir + @echo "Installing bacula-dir boot script ..." + @$(INSTALL_PROGRAM) bacula-dir $(DESTDIR)/etc/init.d/bacula-dir + @echo "Installing bacula-dir symlinks ..." + @if test "x$(DESTDIR)" = "x" ; then \ + /usr/sbin/update-rc.d bacula-dir start 90 2 3 4 5 . stop 9 0 1 6 .; \ + fi + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-logrotate: + @rm -f $(DESTDIR)/etc/logrotate.d/bacula + +uninstall-autostart-fd: + @if test "x$(DESTDIR)" = "x" -a -f /etc/init.d/bacula-fd; then \ + /etc/init.d/bacula-fd stop; \ + rm -f $(DESTDIR)/etc/init.d/bacula-fd; \ + /usr/sbin/update-rc.d bacula-fd remove; \ + fi + + +uninstall-autostart-sd: + @if test "x$(DESTDIR)" = "x" -a -f /etc/init.d/bacula-sd; then \ + /etc/init.d/bacula-sd stop; \ + rm -f $(DESTDIR)/etc/init.d/bacula-sd; \ + /usr/sbin/update-rc.d bacula-sd remove; \ + fi + +uninstall-autostart-dir: + @if test "x$(DESTDIR)" = "x" -a -f /etc/init.d/bacula-dir; then \ + /etc/init.d/bacula-dir stop; \ + rm -f $(DESTDIR)/etc/init.d/bacula-dir; \ + /usr/sbin/update-rc.d bacula-dir remove; \ + fi + @rm -f $(DESTDIR)/etc/init.d/bacula-dir + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f Makefile + @rm -f bacula-sd bacula-fd bacula-dir + +devclean: clean + @rm -f Makefile + @rm -f bacula-sd bacula-fd bacula-dir diff --git a/platforms/hurd/bacula-dir.in b/platforms/hurd/bacula-dir.in new file mode 100644 index 00000000..8c4535a4 --- /dev/null +++ b/platforms/hurd/bacula-dir.in @@ -0,0 +1,78 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon on Debian GNU Hurd systems. +# +# Kern E. Sibbald - 21 March 2008 +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# +### BEGIN INIT INFO +# Provides: bacula-dir +# Required-Start: $network +# Required-Stop: $network +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start @BACULA@ Director daemon at boot time +# Description: Enable @BACULA@ Director. +### END INIT INFO +# + +NAME="bacula-dir" +DESC="@BACULA@ Director" +DAEMON=@sbindir@/${NAME} +BUSER=@dir_user@ +BGROUP=@dir_group@ +BOPTIONS="-c @sysconfdir@/${NAME}.conf" +BPORT=@dir_port@ + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +test -f $DAEMON || exit 0 + +if [ -n "`getent services ${NAME}`" ]; then + BPORT=`getent services ${NAME} | awk '{ gsub("/tcp","",$2); print $2; }'` +fi + +if [ -f /etc/default/$NAME ]; then + . /etc/default/$NAME +fi + +PIDFILE=@piddir@/${NAME}.${BPORT}.pid + +if [ "x${BUSER}" != "x" ]; then + USERGRP="--chuid ${BUSER}" + if [ "x${BGROUP}" != "x" ]; then + USERGRP="${USERGRP}:${BGROUP}" + fi +fi + +RETVAL=0 +case "$1" in + start) + echo -n "Starting ${DESC}: " + start-stop-daemon --start --quiet --pidfile ${PIDFILE} ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + stop) + echo -n "Stopping ${DESC}: " + start-stop-daemon --oknodo --stop --quiet ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + restart|force-reload) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: /etc/init.d/${NAME} {start|stop|restart|force-reload}" >&2 + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/hurd/bacula-fd.in b/platforms/hurd/bacula-fd.in new file mode 100644 index 00000000..b2578c59 --- /dev/null +++ b/platforms/hurd/bacula-fd.in @@ -0,0 +1,78 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon on Debian GNU Hurd systems. +# +# Kern E. Sibbald - 21 March 2008 +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# +### BEGIN INIT INFO +# Provides: bacula-fd +# Required-Start: $network +# Required-Stop: $network +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start @BACULA@ Client daemon at boot time +# Description: Enable @BACULA@ Client. +### END INIT INFO + + +NAME="bacula-fd" +DESC="@BACULA@ File Daemon" +DAEMON=@sbindir@/${NAME} +BUSER=@fd_user@ +BGROUP=@fd_group@ +BOPTIONS="-c @sysconfdir@/${NAME}.conf" +BPORT=@fd_port@ + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +test -f $DAEMON || exit 0 + +if [ -n "`getent services ${NAME}`" ]; then + BPORT=`getent services ${NAME} | awk '{ gsub("/tcp","",$2); print $2; }'` +fi + +if [ -f /etc/default/$NAME ]; then + . /etc/default/$NAME +fi + +PIDFILE=@piddir@/${NAME}.${BPORT}.pid + +if [ "x${BUSER}" != "x" ]; then + USERGRP="--chuid ${BUSER}" + if [ "x${BGROUP}" != "x" ]; then + USERGRP="${USERGRP}:${BGROUP}" + fi +fi + +RETVAL=0 +case "$1" in + start) + echo -n "Starting ${DESC}: " + start-stop-daemon --start --quiet --pidfile ${PIDFILE} ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + stop) + echo -n "Stopping ${DESC}: " + start-stop-daemon --oknodo --stop --quiet ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + restart|force-reload) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: /etc/init.d/${NAME} {start|stop|restart|force-reload}" >&2 + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/hurd/bacula-sd.in b/platforms/hurd/bacula-sd.in new file mode 100644 index 00000000..28920782 --- /dev/null +++ b/platforms/hurd/bacula-sd.in @@ -0,0 +1,78 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon on Debian GNU Hurd systems. +# +# Kern E. Sibbald - 21 March 2008 +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# +### BEGIN INIT INFO +# Provides: bacula-sd +# Required-Start: $network +# Required-Stop: $network +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start @BACULA@ Storage daemon at boot time +# Description: Enable @BACULA@ Storage daemon. +### END INIT INFO + + +NAME="bacula-sd" +DESC="@BACULA@ Storage Daemon" +DAEMON=@sbindir@/${NAME} +BUSER=@sd_user@ +BGROUP=@sd_group@ +BOPTIONS="-c @sysconfdir@/${NAME}.conf" +BPORT=@sd_port@ + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +test -f $DAEMON || exit 0 + +if [ -n "`getent services ${NAME}`" ]; then + BPORT=`getent services ${NAME} | awk '{ gsub("/tcp","",$2); print $2; }'` +fi + +if [ -f /etc/default/$NAME ]; then + . /etc/default/$NAME +fi + +PIDFILE=@piddir@/${NAME}.${BPORT}.pid + +if [ "x${BUSER}" != "x" ]; then + USERGRP="--chuid ${BUSER}" + if [ "x${BGROUP}" != "x" ]; then + USERGRP="${USERGRP}:${BGROUP}" + fi +fi + +RETVAL=0 +case "$1" in + start) + echo -n "Starting ${DESC}: " + start-stop-daemon --start --quiet --pidfile ${PIDFILE} ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + stop) + echo -n "Stopping ${DESC}: " + start-stop-daemon --oknodo --stop --quiet ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + restart|force-reload) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: /etc/init.d/${NAME} {start|stop|restart|force-reload}" >&2 + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/install-symlinks b/platforms/install-symlinks new file mode 100755 index 00000000..ed91cd62 --- /dev/null +++ b/platforms/install-symlinks @@ -0,0 +1,99 @@ +#!/bin/sh +# +# install-symlinks.sh - shell script for installing symbolic lynks +# for system startup +# +# Copyright (C) 1999-2000 Riccardo Facchetti +# +# Modified for use with Bacula 15 November 2001, Kern Sibbald +# +# +# Theory of operation: +# this script attempts to detect which runlevels are appropriate for +# apcupsd startup and consequently installs the OS startup symbolic links +# in the correct locations. +# +# For example, suse distribution uses sysvinit so the scripts will do: +# 1. searches for init scripts directory +# 2. try to detect on which runlevels is appropriate to run apcupsd +# (presumably all the runlevels at which also syslogd runs) +# 3. installs the symbolic links into the previously detected runlevels + +action=$1 +dist=$2 + +if [ -z "$action" -o -z "$dist" ] +then + echo "Missing parameter on command line." + exit 1 +fi + +case $action in + install) + echo "Generic symlinks installation..." + case $dist in + suse) + + if [ -d /etc/rc.d ] + then + initrcd="/etc/rc.d" + elif [ -d /sbin/init.d ] + then + initrcd="/sbin/init.d" + else + echo "Can not find init scripts directory." + exit 1 + fi + + for runlevel in 1 2 3 4 5 + do + if [ -L $initrcd/rc$runlevel.d/S*syslog ] + then + echo " Installing runlevel $runlevel..." + ln -sf $initrcd/apcupsd $initrcd/rc$runlevel.d/K20apcupsd + ln -sf $initrcd/apcupsd $initrcd/rc$runlevel.d/S20apcupsd + fi + done + ;; + *) + echo " relying on $dist-specific Makefile for symlink installation" + ;; + esac + ;; + uninstall) + echo "Genering symlinks uninstallation..." + case $dist in + suse) + + if [ -d /etc/rc.d ] + then + initrcd="/etc/rc.d" + elif [ -d /sbin/init.d ] + then + initrcd="/sbin/init.d" + else + echo "Can not detect init scripts directory." + exit 1 + fi + + for runlevel in 1 2 3 4 5 + do + if [ -L $initrcd/rc$runlevel.d/S20apcupsd ] + then + echo " Removing runlevel $runlevel..." + rm -f $initrcd/rc$runlevel.d/K20apcupsd + rm -f $initrcd/rc$runlevel.d/S20apcupsd + fi + done + ;; + *) + echo " relying on $dist-specific Makefile for symlink uninstallation" + ;; + esac + ;; + *) + echo "Wrong parameter $action." + exit 1 +esac + +exit 0 diff --git a/platforms/irix/Makefile.in b/platforms/irix/Makefile.in new file mode 100644 index 00000000..ac0250d7 --- /dev/null +++ b/platforms/irix/Makefile.in @@ -0,0 +1,88 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Solaris specific installation. +# +# 15 November 2001 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + + +install-autostart-fd: + @rm -f /etc/rc0.d/K20bacula-fd + @rm -f /etc/rc1.d/S99bacula-fd + @rm -f /etc/rc2.d/S99bacula-fd + @$(INSTALL_PROGRAM) -m 744 bacula-fd /etc/init.d/bacula-fd + # set symlinks for script at startup and shutdown + @ln -f -s /etc/init.d/bacula-fd /etc/rc0.d/K20bacula-fd + @ln -f -s /etc/init.d/bacula-fd /etc/rc1.d/S99bacula-fd + @ln -f -s /etc/init.d/bacula-fd /etc/rc2.d/S99bacula-fd + + +install-autostart-sd: + @rm -f /etc/rc0.d/K20bacula-sd + @rm -f /etc/rc1.d/S99bacula-sd + @rm -f /etc/rc2.d/S99bacula-sd + @$(INSTALL_PROGRAM) -m 744 bacula-sd /etc/rc.d/init.d/bacula-sd + # set symlinks for script at startup and shutdown + @ln -f -s /etc/init.d/bacula-sd /etc/rc0.d/K20bacula-sd + @ln -f -s /etc/init.d/bacula-sd /etc/rc1.d/S99bacula-sd + @ln -f -s /etc/init.d/bacula-sd /etc/rc2.d/S99bacula-sd + + +install-autostart-dir: + @rm -f /etc/rc0.d/K20bacula-dir + @rm -f /etc/rc1.d/S99bacula-dir + @rm -f /etc/rc2.d/S99bacula-dir + @$(INSTALL_PROGRAM) -m 744 bacula-dir /etc/rc.d/init.d/bacula-dir + # set symlinks for script at startup and shutdown + @ln -f -s /etc/init.d/bacula-dir /etc/rc0.d/K20bacula-dir + @ln -f -s /etc/init.d/bacula-dir /etc/rc1.d/S99bacula-dir + @ln -f -s /etc/init.d/bacula-dir /etc/rc2.d/S99bacula-dir + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @rm -f /etc/rc0.d/K20bacula-fd + @rm -f /etc/rc1.d/S99bacula-fd + @rm -f /etc/rc2.d/S99bacula-fd + @rm -f /etc/rc.d/init.d/bacula-fd + + +uninstall-autostart-sd: + @rm -f /etc/rc0.d/K20bacula-sd + @rm -f /etc/rc1.d/S99bacula-sd + @rm -f /etc/rc2.d/S99bacula-sd + @rm -f /etc/rc.d/init.d/bacula-sd + +uninstall-autostart-dir: + @rm -f /etc/rc0.d/K20bacula-dir + @rm -f /etc/rc1.d/S99bacula-dir + @rm -f /etc/rc2.d/S99bacula-dir + @rm -f /etc/rc.d/init.d/bacula-dir + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec + +devclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec diff --git a/platforms/irix/bacula-dir.in b/platforms/irix/bacula-dir.in new file mode 100755 index 00000000..78dde036 --- /dev/null +++ b/platforms/irix/bacula-dir.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula Director: " + @sbindir@/bacula-dir $2 -c @sysconfdir@/bacula-dir.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-dir + ;; + stop) + echo "Stopping the Director daemon: " +# killproc @sbindir@/bacula-dir + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-dir + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/irix/bacula-fd.in b/platforms/irix/bacula-fd.in new file mode 100755 index 00000000..678d4af1 --- /dev/null +++ b/platforms/irix/bacula-fd.in @@ -0,0 +1,44 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + 'start') + echo "Starting the Bacula File daemon: " + if test -x @sbindir@/bacula-fd && test -f @sysconfidir@/bacula-fd.conf; then + exec @sbindir@/bacula-fd $2 -c @sysconfdir@/bacula-fd.conf + fi + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-fd + ;; + 'stop') + echo "Stopping the Bacula File daemon: " + /sbin/killall -k 10 -TERM @sbindir@/bacula-fd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-fd + ;; + 'restart') + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/irix/bacula-sd.in b/platforms/irix/bacula-sd.in new file mode 100755 index 00000000..7aafe60b --- /dev/null +++ b/platforms/irix/bacula-sd.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Storage daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula Storage daemon: " + @sbindir@/bacula-sd $2 -c @sysconfdir@/bacula-sd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-sd + ;; + stop) + echo "Stopping the Bacula Storage daemon: " +# killproc @sbindir@/bacula-sd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-sd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/mandrake/Makefile.in b/platforms/mandrake/Makefile.in new file mode 100644 index 00000000..7a80062f --- /dev/null +++ b/platforms/mandrake/Makefile.in @@ -0,0 +1,88 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Mandrake specific installation. +# +# 15 November 2001 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + + +install-autostart-fd: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-fd; then \ + /sbin/chkconfig --del bacula-fd; \ + fi + @$(INSTALL_PROGRAM) -m 744 bacula-fd $(DESTDIR)/etc/rc.d/init.d/bacula-fd + # set symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /sbin/chkconfig --add bacula-fd; \ + fi + + +install-autostart-sd: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-sd; then \ + /sbin/chkconfig --del bacula-sd; \ + fi + @$(INSTALL_PROGRAM) -m 744 bacula-sd $(DESTDIR)/etc/rc.d/init.d/bacula-sd + # set symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /sbin/chkconfig --add bacula-sd; \ + fi + + +install-autostart-dir: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-dir; then \ + /sbin/chkconfig --del bacula-dir; \ + fi + @$(INSTALL_PROGRAM) -m 744 bacula-dir $(DESTDIR)/etc/rc.d/init.d/bacula-dir + # set symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /sbin/chkconfig --add bacula-dir; \ + fi + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-fd; then \ + /sbin/chkconfig --del bacula-fd; \ + fi + @rm -f $(DESTDIR)/etc/rc.d/init.d/bacula-fd + + +uninstall-autostart-sd: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-sd; then \ + /sbin/chkconfig --del bacula-sd; \ + fi + @rm -f $(DESTDIR)/etc/rc.d/init.d/bacula-sd + +uninstall-autostart-dir: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-dir; then \ + /sbin/chkconfig --del bacula-dir; \ + fi + @rm -f $(DESTDIR)/etc/rc.d/init.d/bacula-dir + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f Makefile bacula-*.spec bacula.*.spec bacula.spec + @rm -f bacula-sd bacula-fd bacula-dir + +devclean: clean + @rm -f Makefile bacula-*.spec bacula.*.spec bacula.spec + @rm -f bacula-sd bacula-fd bacula-dir diff --git a/platforms/mandrake/bacula-dir.in b/platforms/mandrake/bacula-dir.in new file mode 100755 index 00000000..e1f7bdbd --- /dev/null +++ b/platforms/mandrake/bacula-dir.in @@ -0,0 +1,48 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +# Source function library +. /etc/rc.d/init.d/functions + +RETVAL=0 +case "$1" in + start) + echo -n "Starting the Bacula Director: " + daemon @sbindir@/bacula-dir $2 -c @sysconfdir@/bacula-dir.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-dir + ;; + stop) + echo -n "Stopping the Director daemon: " + killproc @sbindir@/bacula-dir + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-dir + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + status @sbindir@/bacula-dir + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/mandrake/bacula-fd.in b/platforms/mandrake/bacula-fd.in new file mode 100755 index 00000000..2b7ff01a --- /dev/null +++ b/platforms/mandrake/bacula-fd.in @@ -0,0 +1,48 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +# Source function library +. /etc/rc.d/init.d/functions + +RETVAL=0 +case "$1" in + start) + echo -n "Starting the Bacula File daemon: " + daemon @sbindir@/bacula-fd $2 -c @sysconfdir@/bacula-fd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-fd + ;; + stop) + echo -n "Stopping the Bacula File daemon: " + killproc @sbindir@/bacula-fd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-fd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + status @sbindir@/bacula-fd + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/mandrake/bacula-sd.in b/platforms/mandrake/bacula-sd.in new file mode 100755 index 00000000..61de07b7 --- /dev/null +++ b/platforms/mandrake/bacula-sd.in @@ -0,0 +1,48 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Storage daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +# Source function library +. /etc/rc.d/init.d/functions + +RETVAL=0 +case "$1" in + start) + echo -n "Starting the Bacula Storage daemon: " + daemon @sbindir@/bacula-sd $2 -c @sysconfdir@/bacula-sd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-sd + ;; + stop) + echo -n "Stopping the Bacula Storage daemon: " + killproc @sbindir@/bacula-sd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-sd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + status @sbindir@/bacula-sd + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/mandrake/bacula.spec.bluca.in b/platforms/mandrake/bacula.spec.bluca.in new file mode 100644 index 00000000..01ac1315 --- /dev/null +++ b/platforms/mandrake/bacula.spec.bluca.in @@ -0,0 +1,501 @@ +%define name bacula +%define version 1.32d +%define release 1mdk + +%define MYSQL 0 +%define GNOME 0 +%define TCPW 1 + +%{?_with_mysql: %{expand: %%global MYSQL 1}} +%{?_without_mysql: %{expand: %%global MYSQL 0}} +%{?_with_gnome: %{expand: %%global GNOME 1}} +%{?_without_gnome: %{expand: %%global GNOME 0}} +%{?_with_wrap: %{expand: %%global TCPW 1}} +%{?_without_wrap: %{expand: %%global TCPW 0}} + +%define blurb Bacula - The Leading Open Source Backup Solution. + +Summary: Bacula - The Network Backup Solution +Name: %{name} +Version: %{version} +Release: %{release} +Group: Archiving/Backup +License: BSD 2-Clause; see file LICENSE-FOSS +Source: http://www.prdownloads.sourceforge.net/bacula/%{name}-%{version}.tar.bz2 +Source1: %{name}-icons.tar.bz2 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +BuildRequires: readline-devel, perl-base, zlib-devel +%if %{TCPW} +BuildRequires: tcp_wrappers-devel +%endif +Patch0: bacula-1.32d-config.diff.bz2 +Patch1: bacula-1.30a-prompt.diff.bz2 + +%description +%{blurb} +Bacula is a set of computer programs that permit you (or the system +administrator) to manage backup, recovery, and verification of computer +data across a network of computers of different kinds. In technical terms, +it is a network client/server based backup program. Bacula is relatively +easy to use and efficient, while offering many advanced storage management +features that make it easy to find and recover lost or damaged files. + +%package dir +Summary: Bacula Director and Catalog services +Group: Archiving/Backup +Prereq: rpm-helper, perl-base +%if %{MYSQL} +Requires: MySQL >= 3.23 +BuildRequires: MySQL-devel >= 3.23 +%else +Requires: sqlite-tools +BuildRequires: sqlite-devel +%endif +Conflicts: bacula-fd < %{version} +Conflicts: bacula-sd < %{version} + +%description dir +%{blurb} +Bacula Director is the program that supervises all the backup, restore, verify +and archive operations. The system administrator uses the Bacula Director to +schedule backups and to recover files. +Catalog services are comprised of the software programs responsible for +maintaining the file indexes and volume databases for all files backed up. +The Catalog services permit the System Administrator or user to quickly locate +and restore any desired file, since it maintains a record of all Volumes used, +all Jobs run, and all Files saved. +%if %{MYSQL} +This build requires MySQL to be installed separately as the catalog database. +%else +This build requires sqlite to be installed separately as the catalog database. +%endif + +%package console +Summary: Bacula Console +Group: Archiving/Backup +Prereq: sed + +%description console +%{blurb} +Bacula Console is the program that allows the administrator or user to +communicate with the Bacula Director. +This is the text only console interface. + +%if %{GNOME} +%package console-gnome +Summary: Bacula Gnome Console +Group: Archiving/Backup +#BuildRequires: libgnome32-devel +BuildRequires: libgnomeui2-devel +Prereq: sed + +%description console-gnome +%{blurb} +Bacula Console is the program that allows the administrator or user to +communicate with the Bacula Director. +This is the GNOME GUI interface. +%endif + +%package fd +Summary: Bacula File services (Client) +Group: Archiving/Backup +Prereq: rpm-helper +Prereq: sed +Conflicts: bacula-dir < %{version} + +%description fd +%{blurb} +Bacula File services (or Client program) is the software program that is +installed on the machine to be backed up. It is specific to the operating +system on which it runs and is responsible for providing the file attributes +and data when requested by the Director. The File services are also responsible +for the file system dependent part of restoring the file attributes and data +during a recovery operation. +This program runs as a daemon on the machine to be backed up, and in some of +the documentation, the File daemon is referred to as the Client (for example in +Bacula configuration file). + +%package sd +Summary: Bacula Storage services +Group: Archiving/Backup +Prereq: rpm-helper +Prereq: sed +Conflicts: bacula-dir < %{version} + +%description sd +%{blurb} +Bacula Storage services consist of the software programs that perform the +storage and recovery of the file attributes and data to the physical backup +media or volumes. In other words, the Storage daemon is responsible for reading +and writing your tapes (or other storage media, e.g. files). +The Storage services runs as a daemon on the machine that has the backup +device (usually a tape drive). + +%prep +%setup -q -c +cd %{name}-%{version} +%patch0 -p1 -b .config +%patch1 -p1 -b .prompt +mkdir doc/manual +mv doc/html-manual/*.html doc/manual +mv doc/html-manual/*.css doc/manual +mv doc/html-manual/*.gif doc/manual + +%build +cd %{name}-%{version} +%serverbuild +%configure \ + --enable-smartalloc \ +%if %{MYSQL} + --with-mysql \ +%else + --with-sqlite \ +%endif +%if %{GNOME} + --enable-gnome \ +%endif +%if %{TCPW} + --with-tcp-wrappers \ +%endif + --sysconfdir=%{_sysconfdir}/%{name} \ + --with-scriptdir=%{_libexecdir}/%{name} \ + --with-working-dir=%{_localstatedir}/%{name} \ + --with-subsys-dir=/var/lock/subsys \ + --with-dir-password="#FAKE#DIR#PASSWORD#" \ + --with-fd-password="#FAKE#FD#PASSWORD#" \ + --with-sd-password="#FAKE#SD#PASSWORD#" + +%make + +%install +cd %{name}-%{version} +rm -rf %{buildroot} +%makeinstall sysconfdir=%{buildroot}%{_sysconfdir}/%{name} scriptdir=%{buildroot}%{_libexecdir}/%{name} working_dir=%{buildroot}%{_localstatedir}/%{name} + +# install the upgrade scripts +%if %{MYSQL} +install -m 755 src/cats/alter_mysql_tables %{buildroot}%{_libexecdir}/%{name} +%else +install -m 755 src/cats/alter_sqlite_tables %{buildroot}%{_libexecdir}/%{name} +%endif + +# install the init scripts +mkdir -p %{buildroot}%{_initrddir} +install -m 755 platforms/mandrake/bacula-dir %{buildroot}%{_initrddir}/bacula-dir +install -m 755 platforms/mandrake/bacula-fd %{buildroot}%{_initrddir}/bacula-fd +install -m 755 platforms/mandrake/bacula-sd %{buildroot}%{_initrddir}/bacula-sd + +# install the logrotate file +mkdir -p %{buildroot}%{_sysconfdir}/logrotate.d +cp scripts/logrotate %{buildroot}%{_sysconfdir}/logrotate.d/bacula-dir + +mkdir -p %{buildroot}%{_localstatedir}/%{name} + +%if %{GNOME} +# install the menu stuff +mkdir -p $RPM_BUILD_ROOT%{_menudir} +cat << EOF > $RPM_BUILD_ROOT%{_menudir}/%{name}-console-gnome +?package(%{name}-console-gnome): command="%{_sbindir}/gnome-console" icon="%{name}.png" needs="x11" title="Bacula Console" longtitle="Bacula Director Console" section="Applications/Archiving/Backup" +EOF +mkdir -p %{buildroot}%{_iconsdir} +tar jxvf %{SOURCE1} -C %{buildroot}%{_iconsdir} +%endif + +perl -spi -e 's/"#FAKE#(\w+)#PASSWORD#"/#YOU MUST SET THE $1 PASSWORD#/' %{buildroot}%{_sysconfdir}/%{name}/*.conf +touch %{buildroot}%{_sysconfdir}/%{name}/.pw.sed + +%clean +rm -rf %{buildroot} + +%files dir +%defattr(644, root, root, 755) +%doc %{name}-%{version}/ChangeLog %{name}-%{version}/CheckList %{name}-%{version}/ReleaseNotes %{name}-%{version}/kernstodo +%doc %{name}-%{version}/doc/*.pdf %{name}-%{version}/doc/manual %{name}-%{version}/examples +%attr(600, root, root) %config(noreplace) %{_sysconfdir}/%{name}/bacula-dir.conf +%ghost %{_sysconfdir}/%{name}/.pw.sed +%config(noreplace) %{_sysconfdir}/logrotate.d/bacula-dir +%defattr (755, root, root) +%config(noreplace) %{_initrddir}/bacula-dir +%{_sbindir}/bacula-dir +%{_sbindir}/btraceback +%{_sbindir}/dbcheck +%{_sbindir}/smtp +%dir %{_libexecdir}/%{name} +%attr(644, root, root) %{_libexecdir}/%{name}/btraceback.gdb +%if %{MYSQL} +%{_libexecdir}/%{name}/create_mysql_database +%{_libexecdir}/%{name}/drop_mysql_tables +%{_libexecdir}/%{name}/grant_mysql_privileges +%{_libexecdir}/%{name}/make_mysql_tables +%{_libexecdir}/%{name}/alter_mysql_tables +%else +%{_libexecdir}/%{name}/create_sqlite_database +%{_libexecdir}/%{name}/drop_sqlite_tables +%exclude %{_libexecdir}/%{name}/grant_mysql_privileges +%{_libexecdir}/%{name}/make_sqlite_tables +%{_libexecdir}/%{name}/alter_sqlite_tables +%endif +%{_libexecdir}/%{name}/delete_catalog_backup +%{_libexecdir}/%{name}/drop_bacula_tables +%{_libexecdir}/%{name}/make_bacula_tables +%{_libexecdir}/%{name}/make_catalog_backup +%attr(644, root, root) %{_libexecdir}/%{name}/query.sql +%attr(700, root, root) %dir %{_localstatedir}/%{name} + +%pre dir -p /usr/bin/perl +umask(0077); +if ( -f "%{_sysconfdir}/%{name}/.pw.sed") { + if ( -f "%{_sysconfdir}/%{name}/bacula-dir.conf") { + system "sed -e 's/s!\(.*\)!\(.*\)!/s!\2!\1!/' %{_sysconfdir}/%{name}/.pw.sed > %{_sysconfdir}/%{name}/.upw.sed"; + system "sed -f %{_sysconfdir}/%{name}/.upw.sed %{_sysconfdir}/%{name}/bacula-dir.conf > %{_sysconfdir}/%{name}/bacula-dir.conf.tmp"; + unlink "%{_sysconfdir}/%{name}/bacula-dir.conf"; + rename "%{_sysconfdir}/%{name}/bacula-dir.conf.tmp", "%{_sysconfdir}/%{name}/bacula-dir.conf"; + unlink "%{_sysconfdir}/%{name}/.upw.sed"; + } +} else { + mkdir("%{_sysconfdir}/%{name}"); + open(IN, "/dev/random") or die "$!"; + open(OUT, ">%{_sysconfdir}/%{name}/.pw.sed") or die "$!"; + foreach $c ("DIR","SD","FD") { + read(IN, $buf, 32); + my $res = pack("u", $buf); + $res =~ s/^.//mg; + $res =~ s/\n//g; + $res =~ tr|` -_|AA-Za-z0-9+/|; + print OUT "s!#YOU MUST SET THE $c PASSWORD#!\"$res\"!\n"; + } + close (IN); + close (OUT); +} + +%post dir +%if %{MYSQL} +# NOTE: IF THIS FAILS DUE TO MYSQL NEEDING A PASSWORD YOU ARE ON YOUR OWN +DB_VER=`mysql bacula -e 'select * from Version;'|tail -n 1 2>/dev/null` +if [ -z "$DB_VER" ]; then +# grant privileges and create tables + echo "Granting privileges for MySQL user bacula..." + %{_libexecdir}/%{name}/grant_mysql_privileges > dev/null + echo "Creating MySQL bacula database..." + %{_libexecdir}/%{name}/create_mysql_database > dev/null + echo "Creating bacula tables..." + %{_libexecdir}/%{name}/make_mysql_tables > dev/null +elif [ "$DB_VER" -lt "6" ]; then + echo "Backing up bacula tables" + mysqldump -f --opt bacula | bzip2 > %{_localstatedir}/%{name}/bacula_backup.sql.bz2 + echo "Upgrading bacula tables" + %{_libexecdir}/%{name}/alter_mysql_tables + echo "If bacula works correctly you can remove the backup file %{_localstatedir}/%{name}/bacula_backup.sql.bz2" +fi +%else +if [ -s %{_localstatedir}/%{name}/bacula.db ]; then +DB_VER=`echo "select * from Version;" | sqlite %{_localstatedir}/%{name}/bacula.db|tail -n 1 2>/dev/null` + if [ "$DB_VER" -lt "6" ]; then + echo "Backing up bacula tables" + echo ".dump" | sqlite %{_localstatedir}/%{name}/bacula.db | bzip2 > %{_localstatedir}/%{name}/bacula_backup.sql.bz2 + echo "Upgrading bacula tables" + %{_libexecdir}/%{name}/alter_sqlite_tables + echo "If bacula works correctly you can remove the backup file %{_localstatedir}/%{name}/bacula_backup.sql.bz2" + fi +else +# create the tables + echo "Creating bacula tables..." + %{_libexecdir}/%{name}/make_sqlite_tables > dev/null +fi +%endif +chmod -R 600 %{_localstatedir}/%{name}/* +for i in %{_sysconfdir}/%{name}/*.conf %{_sysconfdir}/%{name}/*.conf.rpmnew %{_sysconfdir}/%{name}/*.conf.rpmsave; do + if [ -s $i ]; then + sed -f %{_sysconfdir}/%{name}/.pw.sed $i > $i.tmp +# this is needed if upgrading from 1.30a or lower + sed -e '/SubSys[[:space:]]*Directory/I d' $i.tmp > $i + rm -f $i.tmp + fi +done +%_post_service bacula-dir + +%preun dir +%_preun_service bacula-dir + +%files fd +%defattr(755, root, root) +%attr(600, root, root) %config(noreplace) %{_sysconfdir}/%{name}/bacula-fd.conf +%config(noreplace) %{_initrddir}/bacula-fd +%{_sbindir}/bacula-fd +%{_sbindir}/btraceback +%{_sbindir}/smtp +%dir %{_libexecdir}/%{name} +%attr(644, root, root) %{_libexecdir}/%{name}/btraceback.gdb +%attr(700, root, root) %dir %{_localstatedir}/%{name} + +%pre fd +umask 077 +i=%{_sysconfdir}/%{name}/bacula-fd.conf +if [ -s %{_sysconfdir}/%{name}/.pw.sed -a -s $i ]; then + sed -e 's/s!\(.*\)!\(.*\)!/s!\2!\1!/' %{_sysconfdir}/%{name}/.pw.sed > %{_sysconfdir}/%{name}/.upw.sed + sed -f %{_sysconfdir}/%{name}/.upw.sed $i > $i.tmp + mv -f $i.tmp $i + rm -f %{_sysconfdir}/%{name}/.upw.sed +fi + +%post fd +%_post_service bacula-fd +if [ -s %{_sysconfdir}/%{name}/.pw.sed ]; then + for i in %{_sysconfdir}/%{name}/bacula-fd.conf %{_sysconfdir}/%{name}/bacula-fd.conf.rpmnew %{_sysconfdir}/%{name}/bacula-fd.conf.rpmsave; do + if [ -s $i ]; then + sed -f %{_sysconfdir}/%{name}/.pw.sed $i > $i.tmp +# this is needed if upgrading from 1.30a or lower + sed -e '/SubSys[[:space:]]*Directory/I d' $i.tmp > $i + rm -f $i.tmp + fi + done +fi + +%preun fd +%_preun_service bacula-fd + +%files sd +%defattr(755, root, root) +%attr(600, root, root) %config(noreplace) %{_sysconfdir}/%{name}/bacula-sd.conf +%config(noreplace) %{_initrddir}/bacula-sd +%{_sbindir}/bacula-sd +%{_sbindir}/bcopy +%{_sbindir}/bextract +%{_sbindir}/bls +%{_sbindir}/bscan +%{_sbindir}/btape +%{_sbindir}/btraceback +%{_sbindir}/smtp +%dir %{_libexecdir}/%{name} +%attr(644, root, root) %{_libexecdir}/%{name}/btraceback.gdb +%{_libexecdir}/%{name}/mtx-changer +%attr(700, root, root) %dir %{_localstatedir}/%{name} + +%pre sd +umask 077 +i=%{_sysconfdir}/%{name}/bacula-sd.conf +if [ -s %{_sysconfdir}/%{name}/.pw.sed -a -s $i ]; then + sed -e 's/s!\(.*\)!\(.*\)!/s!\2!\1!/' %{_sysconfdir}/%{name}/.pw.sed > %{_sysconfdir}/%{name}/.upw.sed + sed -f %{_sysconfdir}/%{name}/.upw.sed $i > $i.tmp + mv -f $i.tmp $i + rm -f %{_sysconfdir}/%{name}/.upw.sed +fi + +%post sd +%_post_service bacula-sd +if [ -s %{_sysconfdir}/%{name}/.pw.sed ]; then + for i in %{_sysconfdir}/%{name}/bacula-sd.conf %{_sysconfdir}/%{name}/bacula-sd.conf.rpmnew %{_sysconfdir}/%{name}/bacula-sd.conf.rpmsave; do + if [ -s $i ]; then + sed -f %{_sysconfdir}/%{name}/.pw.sed $i > $i.tmp +# this is needed if upgrading from 1.30a or lower + sed -e '/SubSys[[:space:]]*Directory/I d' $i.tmp > $i + rm -f $i.tmp + fi + done +fi + +%preun sd +%_preun_service bacula-sd + +%files console +%defattr(755, root, root) +%attr(600, root, root) %config(noreplace) %{_sysconfdir}/%{name}/console.conf +%{_sbindir}/console +%{_sbindir}/btraceback +%{_sbindir}/smtp +%dir %{_libexecdir}/%{name} +%attr(644, root, root) %{_libexecdir}/%{name}/btraceback.gdb + +%pre console +umask 077 +i=%{_sysconfdir}/%{name}/console.conf +if [ -s %{_sysconfdir}/%{name}/.pw.sed -a -s $i ]; then + sed -e 's/s!\(.*\)!\(.*\)!/s!\2!\1!/' %{_sysconfdir}/%{name}/.pw.sed > %{_sysconfdir}/%{name}/.upw.sed + sed -f %{_sysconfdir}/%{name}/.upw.sed $i > $i.tmp + mv -f $i.tmp $i + rm -f %{_sysconfdir}/%{name}/.upw.sed +fi + +%post console +if [ -s %{_sysconfdir}/%{name}/.pw.sed ]; then + for i in %{_sysconfdir}/%{name}/console.conf %{_sysconfdir}/%{name}/console.conf.rpmnew %{_sysconfdir}/%{name}/console.conf.rpmsave; do + if [ -s $i ]; then + sed -f %{_sysconfdir}/%{name}/.pw.sed $i > $i.tmp +# this is needed if upgrading from 1.30a or lower + sed -e '/SubSys[[:space:]]*Directory/I d' $i.tmp > $i + rm -f $i.tmp + fi + done +fi + +%if %{GNOME} +%files console-gnome +%defattr(644, root, root, 755) +%{_iconsdir}/bacula.png +%{_iconsdir}/mini/bacula.png +%{_iconsdir}/large/bacula.png +%{_menudir}/bacula-console-gnome +%attr(600, root, root) %config(noreplace) %{_sysconfdir}/%{name}/gnome-console.conf +%attr(755, root, root) %{_sbindir}/gnome-console +%attr(755, root, root) %{_sbindir}/btraceback +%attr(755, root, root) %{_sbindir}/smtp +%dir %{_libexecdir}/%{name} +%{_libexecdir}/%{name}/btraceback.gdb + +%pre console-gnome +umask 077 +i=%{_sysconfdir}/%{name}/gnome-console.conf +if [ -s %{_sysconfdir}/%{name}/.pw.sed -a -s $i ]; then + sed -e 's/s!\(.*\)!\(.*\)!/s!\2!\1!/' %{_sysconfdir}/%{name}/.pw.sed > %{_sysconfdir}/%{name}/.upw.sed + sed -f %{_sysconfdir}/%{name}/.upw.sed $i > $i.tmp + mv -f $i.tmp $i + rm -f %{_sysconfdir}/%{name}/.upw.sed +fi + +%post console-gnome +%update_menus +if [ -s %{_sysconfdir}/%{name}/.pw.sed ]; then + for i in %{_sysconfdir}/%{name}/gnome-console.conf %{_sysconfdir}/%{name}/gnome-console.conf.rpmnew %{_sysconfdir}/%{name}/gnome-console.conf.rpmsave; do + if [ -s $i ]; then + sed -f %{_sysconfdir}/%{name}/.pw.sed $i > $i.tmp +# this is needed if upgrading from 1.30a or lower + sed -e '/SubSys[[:space:]]*Directory/I d' $i.tmp > $i + rm -f $i.tmp + fi + done +fi + +%postun console-gnome +%clean_menus + +%endif + +%changelog +* Tue Nov 04 2003 Luca Berra 1.32d-1mdk +- 1.32d +- use gnome2 for console +- use tcp_wrappers +- try not to create unneeded .rpmnew in configuration + +* Fri Aug 12 2003 Luca Berra 1.31a-1mdk +- 1.31a +- modified %%post script to upgrade database if needed and do not try to create if it already exists. +- %%post_service should be last in %%post script +- changed sql upgrade script to remove multiple Version rows introduced by %%post +- trying to create the tables multiple times in previous releases. +- smtp is required by btraceback which is required by every daemon. +- added conflicts for lower version + +* Wed Aug 6 2003 Luca Berra 1.30a-3mdk +- changed sqlite requires to match package in contrib +- set passwords for single components to produce an error until user changes them +- try to change passwords automatically if dir package is installed + +* Sun Aug 3 2003 Luca Berra 1.30a-2mdk +- many spec changes +- almost please rpmlint +- fixed readline eats last message bug in console + +* Fri Aug 1 2003 Luca Berra 1.30a-1mdk +- Initial spec file from spec by D. Scott Barninger diff --git a/platforms/openbsd/Makefile.in b/platforms/openbsd/Makefile.in new file mode 100644 index 00000000..d228a8b6 --- /dev/null +++ b/platforms/openbsd/Makefile.in @@ -0,0 +1,143 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Solaris specific installation. +# +# 15 November 2001 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +VPATH = @srcdir@ +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +SED = /usr/bin/sed + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + + +install-autostart-fd: + @echo "FreeBSD platform installation" + $(INSTALL_PROGRAM) -m 744 bacula-fd /etc/rc.bacula-fd + @-today="`date +%Y%m%d%H%M`"; \ + grep -q /etc/rc.bacula-fd /etc/rc.local; \ + if [ $$? -eq 0 ]; then \ + echo "/etc/rc.local already patched"; \ + else \ + rm -f /etc/rc.local.$$today; \ + cp -p /etc/rc.local /etc/rc.local.$$today; \ + ( echo "Start the Bacula File daemon. Do not remove the 'TAG_BACULA_FD' text"; \ + echo "if [ -x /etc/rc.bacula-fd ]; then # TAG_BACULA_FD"; \ + echo " /etc/rc.bacula-fd start # TAG_BACULA_FD"; \ + echo "fi # TAG_BACULA_FD"; \ + ) >> /etc/rc.local; \ + echo ""; \ + fi + + +install-autostart-sd: + @echo "FreeBSD platform installation" + $(INSTALL_PROGRAM) -m 744 bacula-sd /etc/rc.bacula-sd + @-today="`date +%Y%m%d%H%M`"; \ + grep -q /etc/rc.bacula-sd /etc/rc.local; \ + if [ $$? -eq 0 ]; then \ + echo "/etc/rc.local already patched"; \ + else \ + rm -f /etc/rc.local.$$today; \ + cp -p /etc/rc.local /etc/rc.local.$$today; \ + ( echo "Start the Bacula Storage daemon. Do not remove the 'TAG_BACULA_SD' text"; \ + echo "if [ -x /etc/rc.bacula-fd ]; then # TAG_BACULA_SD"; \ + echo " /etc/rc.bacula-fd start # TAG_BACULA_SD"; \ + echo "fi # TAG_BACULA_SD"; \ + ) >> /etc/rc.local; \ + echo ""; \ + fi + +install-autostart-dir: + @echo "FreeBSD platform installation" + $(INSTALL_PROGRAM) -m 744 bacula-dir /etc/rc.bacula-dir + @-today="`date +%Y%m%d%H%M`"; \ + grep -q /etc/rc.bacula-dir /etc/rc.local; \ + if [ $$? -eq 0 ]; then \ + echo "/etc/rc.local already patched"; \ + else \ + rm -f /etc/rc.local.$$today; \ + cp -p /etc/rc.local /etc/rc.local.$$today; \ + ( echo "Start the Bacula Director. Do not remove the 'TAG_BACULA_DIR' text"; \ + echo "if [ -x /etc/rc.bacula-dir ]; then # TAG_BACULA_DIR"; \ + echo " /etc/rc.bacula-dir start # TAG_BACULA_DIR"; \ + echo "fi # TAG_BACULA_DIR"; \ + ) >> /etc/rc.local; \ + echo ""; \ + fi + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @echo "FreeBSD platform uninstall" + rm -f /etc/rc.bacula-fd + @-today="`date +%Y%m%d%H%M`"; \ + for f in /etc/rc.local ; do \ + grep -q '# TAG_BACULA_FD' $$f; \ + if [ $$? -eq 0 ]; then \ + echo "removing Bacula lines from $$f"; \ + rm -f $$f.$$today; \ + cp -p $$f $$f.$$today; \ + $(SED) -e '/TAG_BACULA_FD/d;' \ + < $$f.$$today > $$f; \ + chmod 644 $$f; \ + fi; \ + done + + +uninstall-autostart-sd: + @echo "FreeBSD platform uninstall" + rm -f /etc/rc.bacula-sd + @-today="`date +%Y%m%d%H%M`"; \ + for f in /etc/rc.local ; do \ + grep -q '# TAG_BACULA_SD' $$f; \ + if [ $$? -eq 0 ]; then \ + echo "removing Bacula lines from $$f"; \ + rm -f $$f.$$today; \ + cp -p $$f $$f.$$today; \ + $(SED) -e '/TAG_BACULA_SD/d;' \ + < $$f.$$today > $$f; \ + chmod 644 $$f; \ + fi; \ + done + +uninstall-autostart-dir: + @echo "FreeBSD platform uninstall" + rm -f /etc/rc.bacula-dir + @-today="`date +%Y%m%d%H%M`"; \ + for f in /etc/rc.local ; do \ + grep -q '# TAG_BACULA_DIR' $$f; \ + if [ $$? -eq 0 ]; then \ + echo "removing Bacula lines from $$f"; \ + rm -f $$f.$$today; \ + cp -p $$f $$f.$$today; \ + $(SED) -e '/TAG_BACULA_DIR/d;' \ + < $$f.$$today > $$f; \ + chmod 644 $$f; \ + fi; \ + done + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec + +devclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec diff --git a/platforms/openbsd/bacula-dir.in b/platforms/openbsd/bacula-dir.in new file mode 100755 index 00000000..78dde036 --- /dev/null +++ b/platforms/openbsd/bacula-dir.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula Director: " + @sbindir@/bacula-dir $2 -c @sysconfdir@/bacula-dir.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-dir + ;; + stop) + echo "Stopping the Director daemon: " +# killproc @sbindir@/bacula-dir + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-dir + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/openbsd/bacula-fd.in b/platforms/openbsd/bacula-fd.in new file mode 100755 index 00000000..e98f7ad2 --- /dev/null +++ b/platforms/openbsd/bacula-fd.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula File daemon: " + @sbindir@/bacula-fd $2 -c @sysconfdir@/bacula-fd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-fd + ;; + stop) + echo "Stopping the Bacula File daemon: " +# killproc @sbindir@/bacula-fd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-fd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/openbsd/bacula-sd.in b/platforms/openbsd/bacula-sd.in new file mode 100755 index 00000000..7aafe60b --- /dev/null +++ b/platforms/openbsd/bacula-sd.in @@ -0,0 +1,42 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Storage daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +RETVAL=0 +case "$1" in + start) + echo "Starting the Bacula Storage daemon: " + @sbindir@/bacula-sd $2 -c @sysconfdir@/bacula-sd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-sd + ;; + stop) + echo "Stopping the Bacula Storage daemon: " +# killproc @sbindir@/bacula-sd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-sd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/osx/Makefile.in b/platforms/osx/Makefile.in new file mode 100644 index 00000000..c51ba18d --- /dev/null +++ b/platforms/osx/Makefile.in @@ -0,0 +1,166 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This is the makefile template for the platform directory +# which contains general platform installation. +# +# 17 August 2009 -- Lorenz Schori +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + + +# bacula version and download site +BACULA_VERSION:=@VERSION@ +BACULA_DL_URL:=http://downloads.sourceforge.net/project/bacula/bacula/${BACULA_VERSION}/bacula-${BACULA_VERSION}.tar.gz + +# Build universal binary. Comment out when building versions of bacula < 3.0.0 +ARCHFLAGS:= +MACOSX_SDK_SYSROOT:= +MACOSX_VERSION_FLAGS:= + +# Tools +PB:=/usr/bin/pkgbuild +MAKE:=/usr/bin/make +CURL:=/usr/bin/curl +TAR:=/usr/bin/tar + +########### you should not have to edit anything beyond this line ########### + +# Build paths +DL_DIR:=dl +BUILD_DIR:=build +PRODUCTS_DIR:=products + +WORKING_DIR:=${BUILD_DIR}/${BACULA_VERSION} +BACULA_TAR:=${DL_DIR}/bacula-${BACULA_VERSION}.tar.gz +BACULA_SOURCE:=${WORKING_DIR}/bacula-${BACULA_VERSION} +BACULA_DESTDIR:=${WORKING_DIR}/destdir +BACULA_PREFIX:=/usr/local/bacula-${BACULA_VERSION} +BACULA_FD_CONF:=/Library/Preferences/bacula/bacula-fd.conf +BACULA_WORKING_DIR:=/private/var/bacula/working +BACULA_PMDOC:=${WORKING_DIR}/installer.pmdoc + +# Detect whether we sit inside the Bacula source tree. In this case we won't +# download the tar from sourceforge but instead work with what is there +# already +CURSUB:=$(CURDIR:%/platforms/osx=%) +ifneq ($(CURDIR),$(CURSUB)) + BACULA_TAR:= + BACULA_SOURCE:=../../ +# BACULA_VERSION:=$(shell sed -n 's,^VERSION=,,p' $(CURSUB)/autoconf/Make.common) +endif + +PACKAGE_TITLE:=Bacula File Daemon ${BACULA_VERSION} +PACKAGE_ID:=org.bacula.bacula-fd.pkg +PACKAGE_DIR:=${PRODUCTS_DIR}/${PACKAGE_TITLE} +PACKAGE_BUNDLE:=${PACKAGE_DIR}/${PACKAGE_TITLE}.pkg +PACKAGE_DMG:=${PRODUCTS_DIR}/${PACKAGE_TITLE}.dmg +PACKAGE_RESOURCES:=ReadMe.html postflight preupgrade +PACKAGE_XRESOURCES:=postflight preupgrade + +# Flags for the toolchain +CONFIGFLAGS:= \ + --enable-client-only \ + --prefix=${BACULA_PREFIX} \ + --with-dir-password=@DIR_PW@ \ + --with-fd-password=@FD_PW@ \ + --with-sd-password=@SD_PW@ \ + --with-mon-dir-password=@MON_DIR_PW@ \ + --with-mon-fd-password=@MON_FD_PW@ \ + --with-mon-sd-password=@MON_SD_PW@ \ + --with-basename=@BASENAME@ \ + --with-hostname=@HOSTNAME@ \ + --with-working-dir=${BACULA_WORKING_DIR} +CPPFLAGS:= +CFLAGS:=-O -g +CXXFLAGS:=${CFLAGS} +LDFLAGS:= + +# required on snow leopard: compiling for 10.4 requires usage of gcc 4.0 +# system defaults to version 4.2 +CC:=gcc +CPP:=cpp +CXX:=g++ +CXXPP:=cpp + +# Placeholders for *.in files +INFILE_SUBST=\ + -e "s,@PREFIX@,${BACULA_PREFIX},g" \ + -e "s,@BACULA_VERSION@,${BACULA_VERSION},g" \ + -e "s,@FD_CONF@,${BACULA_FD_CONF},g" \ + -e "s,@BACULA_DESTDIR@,${BACULA_DESTDIR},g" \ + -e "s,@PACKAGE_ID@,${PACKAGE_ID},g" + +dmg: pkg + hdiutil create -srcfolder "${PACKAGE_DIR}" "${PACKAGE_DMG}" + +pkg: ${BACULA_DESTDIR} ${BACULA_PMDOC} ${WORKING_DIR}/resources + mkdir -p "${PACKAGE_DIR}" + + mkdir -p "${CURDIR}/${BACULA_DESTDIR}${WORKING_DIR}" + + ${PB} --identifier "${PACKAGE_ID}" --root "${CURDIR}/${BACULA_DESTDIR}" "Bacula Enterprise File Daemon-${BACULA_VERSION}.pkg" + + cp "Bacula Enterprise File Daemon-${BACULA_VERSION}.pkg" "${PACKAGE_DIR}" + cp ${WORKING_DIR}/resources/ReadMe.html "${PACKAGE_DIR}/ReadMe.html" + + sed ${INFILE_SUBST} \ + files/uninstall.command.in > "${PACKAGE_DIR}/uninstall.command"; + chmod 0775 "${PACKAGE_DIR}/uninstall.command" + +${BACULA_PMDOC}: ${BACULA_DESTDIR} ${WORKING_DIR}/resources + mkdir -p "${BACULA_PMDOC}" + + for f in index.xml 01destdir.xml; do \ + sed ${INFILE_SUBST} \ + files/installer.pmdoc.in/$$f > "${BACULA_PMDOC}/$$f"; \ + done + + python installer-gencontents.py ${BACULA_DESTDIR} > ${BACULA_PMDOC}/01destdir-contents.xml + +${WORKING_DIR}/resources: ${BACULA_DESTDIR} + mkdir -p "${WORKING_DIR}/resources" + + for res in ${PACKAGE_RESOURCES}; do \ + sed ${INFILE_SUBST} \ + resources/$$res.in > "${WORKING_DIR}/resources/$$res"; \ + done + + for xres in ${PACKAGE_XRESOURCES}; do \ + chmod +x "${WORKING_DIR}/resources/$$xres"; \ + done + + cp "${BACULA_SOURCE}/LICENSE" "${WORKING_DIR}/resources/License.txt" + +${BACULA_DESTDIR}: ${BACULA_SOURCE} + (cd ${BACULA_SOURCE} && ./configure ${CONFIGFLAGS} CPPFLAGS="${CPPFLAGS}" CFLAGS="${CFLAGS}" CXXFLAGS="${CXXFLAGS}" LDFLAGS="${LDFLAGS}" CC="${CC}" CPP="${CPP}" CXX="${CXX}" CXXPP="${CXXPP}") + ${MAKE} -C ${BACULA_SOURCE} + ${MAKE} -C ${BACULA_SOURCE} install DESTDIR="${CURDIR}/${BACULA_DESTDIR}" + + rm -rf "${BACULA_DESTDIR}/tmp" + + for conffile in ${BACULA_DESTDIR}${BACULA_PREFIX}/etc/*.conf; do \ + mv $$conffile $$conffile.example; \ + done + + mkdir -p "${BACULA_DESTDIR}${BACULA_PREFIX}/Library/LaunchDaemons" + sed ${INFILE_SUBST} files/org.bacula.bacula-fd.plist.in \ + > "${BACULA_DESTDIR}${BACULA_PREFIX}/Library/LaunchDaemons/org.bacula.bacula-fd.plist" + +${BACULA_SOURCE}: ${BACULA_TAR} + mkdir -p "${WORKING_DIR}" + ${TAR} -xzf "${BACULA_TAR}" -C "${WORKING_DIR}" + +${BACULA_TAR}: + mkdir -p "${DL_DIR}" + ${CURL} -L -o "${BACULA_TAR}" "${BACULA_DL_URL}" + +.PHONY: distclean +distclean: clean + rm -rf "${DL_DIR}" "${PRODUCTS_DIR}" + +.PHONY: clean +clean: + rm -rf "${BUILD_DIR}" "${PACKAGE_DIR}" "${PACKAGE_DMG}" diff --git a/platforms/osx/README b/platforms/osx/README new file mode 100644 index 00000000..4b1391d6 --- /dev/null +++ b/platforms/osx/README @@ -0,0 +1,55 @@ +Bacula file daemon package builder for Mac OS X +=============================================== + +This package build script lets you download, compile and package the bacula +file daemon easily with a single command line. In addition to the Bacula file daemon +the resulting installer package contains a short ReadMe file with instructions +on how to install and use the software. Also a basic launchd property list is +included along with preupgrade and postflight installer scripts to stop and +restart the daemon while upgrading. To ensure the security of the users the +passwords in the configuration files are generated during a first time +installation and file permissions are checked and corrected on upgrades. + +Requirements: +* Mac OS X 10.5 or later (building/packaging), 10.4 or later (deployment) +* Mac OS X developer tools installed + +Example (compile and create package from within Bacula source tree): +$ ./configure --enable-client-only +$ make -C platforms/osx + +By moving the contents of platforms/osx to some other directory (e.g. +~/bacula-standalone-bulder), it is possible to create installer packages from +older bacula versions and for specific platforms. + +Examples (standalone mode): + 1. Create an installer package from the newest supported bacula source + containing the bacula file daemon as a PPC/Intel universal binary. + $ make dmg + + 2. Create an installer package from a specified version of the bacula source + containing the bacula file daemon as a PPC/Intel universal binary. + $ make dmg BACULA_VERSION=3.0.0 + + 3. Create an installer package from the newest supported bacula source + containing the bacula file daemon for PPC architecture only. + $ make dmg ARCHFLAGS="-arch ppc" PACKAGE_TITLE="Bacula File Daemon PPC x.y.z" + + 4. Create an installer package from a specified version of the bacula source + containing the bacula file daemon for the current architecture. + $ make dmg BACULA_VERSION=2.4.4 ARCHFLAGS="" + +You find the built disk images in the products folder. + +Misc commands: + 1. Cleanup the build directory + $ make clean + 2. Additionally remove the downloads and products: + $ make distclean + +Additional notes on the build-script: + * The following *FLAGS are used tu build universal binary with 10.4 SDK: + CPPFLAGS=-isysroot /Developer/SDKs/MacOSX10.4u.sdk -mmacosx-version-min=10.4 + CFLAGS=-O -g -arch i386 -arch ppc + CXXFLAGS=${CFLAGS} + LDFLAGS=-Wl,-syslibroot,/Developer/SDKs/MacOSX10.4u.sdk -mmacosx-version-min=10.4 -arch i386 -arch ppc diff --git a/platforms/osx/files/installer.pmdoc.in/01destdir.xml b/platforms/osx/files/installer.pmdoc.in/01destdir.xml new file mode 100644 index 00000000..b8a8cfad --- /dev/null +++ b/platforms/osx/files/installer.pmdoc.in/01destdir.xml @@ -0,0 +1,27 @@ + + + + @PACKAGE_ID@ + @BACULA_VERSION@ + + + + destdir + / + + + + + version + identifier + parent + + + resources/postflight + resources/preupgrade + + + 01destdir-contents.xml + /\.DS_Store$ + + diff --git a/platforms/osx/files/installer.pmdoc.in/index.xml b/platforms/osx/files/installer.pmdoc.in/index.xml new file mode 100644 index 00000000..04db0671 --- /dev/null +++ b/platforms/osx/files/installer.pmdoc.in/index.xml @@ -0,0 +1,30 @@ + + + + Bacula File Daemon @BACULA_VERSION@ + org.bacula + + + + + + + + + + + + + + + + resources/License.txt + resources/ReadMe.html + + + + 01destdir.xml + properties.title + properties.anywhereDomain + properties.systemDomain + diff --git a/platforms/osx/files/org.bacula.bacula-fd.plist.in b/platforms/osx/files/org.bacula.bacula-fd.plist.in new file mode 100644 index 00000000..47a8da15 --- /dev/null +++ b/platforms/osx/files/org.bacula.bacula-fd.plist.in @@ -0,0 +1,17 @@ + + + + + Label + org.bacula.bacula-fd + ProgramArguments + + @PREFIX@/sbin/bacula-fd + -f + -c + @FD_CONF@ + + RunAtLoad + + + diff --git a/platforms/osx/files/uninstall.command.in b/platforms/osx/files/uninstall.command.in new file mode 100644 index 00000000..90b1efaa --- /dev/null +++ b/platforms/osx/files/uninstall.command.in @@ -0,0 +1,29 @@ +#!/bin/sh + +echo "Bacula file daemon @BACULA_VERSION@ uninstaller" + +# Remove startup item +echo "* Bacula startup item... " +if [ -f /Library/LaunchDaemons/org.bacula.bacula-fd.plist ]; then + sudo launchctl unload /Library/LaunchDaemons/org.bacula.bacula-fd.plist + sudo rm /Library/LaunchDaemons/org.bacula.bacula-fd.plist + echo " + removed successfully" +else + echo " - not found, nothing to remove" +fi + +echo "* Bacula file daemon... " +if [ -d "/usr/local/bacula-@BACULA_VERSION@" ]; then + sudo rm -r "/usr/local/bacula-@BACULA_VERSION@" + echo " + removed successfully" +else + echo " - not found, nothing to remove" +fi + +echo "* Installer receipt... " +if [ -d "/Library/Receipts/Bacula File Daemon @BACULA_VERSION@.pkg" ]; then + sudo rm -r "/Library/Receipts/Bacula File Daemon @BACULA_VERSION@.pkg" + echo " + removed successfully" +else + echo " - not found, nothing to remove" +fi diff --git a/platforms/osx/installer-gencontents.py b/platforms/osx/installer-gencontents.py new file mode 100644 index 00000000..a061946c --- /dev/null +++ b/platforms/osx/installer-gencontents.py @@ -0,0 +1,56 @@ +import os +import stat +import sys +from xml.dom.minidom import Document + +def createFtags(doc, path, isrootpath=True): + """ + create f-tags for packagemaker's contents.xml files recursively replacing + owner with "root" and group with "wheel" in each entry + """ + + statinfo = os.lstat(path) + isdir = stat.S_ISDIR(statinfo[0]) + + ftag = doc.createElement("f") + ftag.setAttribute("n",os.path.split(path)[1]) + ftag.setAttribute("p","%d" % statinfo[0]) + ftag.setAttribute("o","root") + ftag.setAttribute("g","wheel") + + # we additionally have to create owner and group + # within each f-tag + ftag.appendChild(doc.createElement("mod").appendChild(doc.createTextNode("owner"))) + ftag.appendChild(doc.createElement("mod").appendChild(doc.createTextNode("group"))) + + if isrootpath: + # needs to be the full path + ftag.setAttribute("pt",os.path.abspath(path)) + # no idea what those attributes mean: + ftag.setAttribute("m","false") + ftag.setAttribute("t","file") + + if isdir: + for item in os.listdir(path): + ftag.appendChild(createFtags(doc, os.path.join(path,item), False)) + + return ftag + +def generateContentsDocument(path): + """ + create new minidom document and generate contents by recursively traver- + sing the given path. + """ + + doc = Document() + root = doc.createElement("pkg-contents") + root.setAttribute("spec","1.12") + root.appendChild(createFtags(doc, path)) + doc.appendChild(root) + + return doc + +if __name__ == "__main__": + # construct document + doc = generateContentsDocument(sys.argv[1]) + print doc.toprettyxml(indent=" "), diff --git a/platforms/osx/resources/ReadMe.html.in b/platforms/osx/resources/ReadMe.html.in new file mode 100644 index 00000000..41b396fe --- /dev/null +++ b/platforms/osx/resources/ReadMe.html.in @@ -0,0 +1,62 @@ + + + + + + Bacula File Daemon @BACULA_VERSION@ + + + + + +

Bacula File Daemon @BACULA_VERSION@

+

+ Bacula is on Open Source, enterprise ready, network based backup program. + This installer package contains the bacula file daemon for Mac OS X 10.4 + or later built as an universal binary for PPC and Intel processors. +

+

Requirements

+

+ The bacula file daemon is only the client component of the backup system. + For proper operation the file daemon needs to have access to a bacula + director and storage daemon, typically installed on a server machine in + the local network. +

+

Installation

+

+ Open the Bacula File Daemon @BACULA_VERSION@ installer package and follow + the directions given to you. +

+

Configuration

+

+ After the installation is complete you have to adapt the configuration + file to your needs. The file is located in system-wide Preferences folder: +

/Library/Preferences/bacula/bacula-fd.conf
+

+

+ Note: The configuration file contains passwords and + therefore must not be accessible for any users except root. Use the + following command line to edit the file as root-user: +

sudo /Applications/TextEdit.app/Contents/MacOS/TextEdit /Library/Preferences/bacula/bacula-fd.conf
+

+

Operating the File Daemon

+

+ Use launchctl to enable and disable the bacula file daemon. +

sudo launchctl load -w /Library/LaunchDaemons/org.bacula.bacula-fd.plist

+
sudo launchctl unload -w /Library/LaunchDaemons/org.bacula.bacula-fd.plist

+

+

Uninstalling the File Daemon

+

+ Doubleclick the script uninstaller.command to remove the bacula file + daemon completely from your system. +

+

Resources

+

+ Refer to the bacula website for more information. +

+ http://bacula.org + + diff --git a/platforms/osx/resources/postflight.in b/platforms/osx/resources/postflight.in new file mode 100644 index 00000000..f0d18ef2 --- /dev/null +++ b/platforms/osx/resources/postflight.in @@ -0,0 +1,36 @@ +#!/bin/sh + +function genpw() { + openssl rand -base64 33 +} + +# copy example config files and fix permissions +if [ ! -f $3@FD_CONF@ ]; then + DIR_PW=$(genpw) + FD_PW=$(genpw) + SD_PW=$(genpw) + MON_DIR_PW=$(genpw) + MON_FD_PW=$(genpw) + MON_SD_PW=$(genpw) + HOSTNAME=$(hostname -s) + mkdir -p "$(dirname $3@FD_CONF@)" + sed \ + -e "s,@DIR_PW@,$DIR_PW,g" \ + -e "s,@FD_PW@,$FD_PW,g" \ + -e "s,@SD_PW@,$SD_PW,g" \ + -e "s,@MON_DIR_PW@,$MON_DIR_PW,g" \ + -e "s,@MON_FD_PW@,$MON_FD_PW,g" \ + -e "s,@MON_SD_PW@,$MON_SD_PW,g" \ + -e "s,@BASENAME@,$HOSTNAME,g" \ + -e "s,@HOSTNAME@,$HOSTNAME,g" \ + "$3@PREFIX@/etc/bacula-fd.conf.example" > "$3@FD_CONF@" +fi +chmod 0600 "$3@FD_CONF@" + +# install startup item +mkdir -p -m 0755 "$3/Library/LaunchDaemons" +chmod 0644 "$3@PREFIX@/Library/LaunchDaemons/org.bacula.bacula-fd.plist" +ln -fs "$3@PREFIX@/Library/LaunchDaemons/org.bacula.bacula-fd.plist" "$3/Library/LaunchDaemons/org.bacula.bacula-fd.plist" + +# Load startup item +/bin/launchctl load "$3/Library/LaunchDaemons/org.bacula.bacula-fd.plist" diff --git a/platforms/osx/resources/preupgrade.in b/platforms/osx/resources/preupgrade.in new file mode 100644 index 00000000..99f8d30d --- /dev/null +++ b/platforms/osx/resources/preupgrade.in @@ -0,0 +1,7 @@ +#!/bin/sh +# unload bacula file daemon before upgrading + +if [ -f "$3/Library/LaunchDaemons/org.bacula.bacula-fd.plist" ]; then + /bin/launchctl unload "$3/Library/LaunchDaemons/org.bacula.bacula-fd.plist" +fi + diff --git a/platforms/redhat/Makefile.in b/platforms/redhat/Makefile.in new file mode 100644 index 00000000..2cd8f1d7 --- /dev/null +++ b/platforms/redhat/Makefile.in @@ -0,0 +1,93 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the RedHat specific installation. +# +# 15 November 2001 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ -m 754 + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + +install_logrotate: + @$(INSTALL_PROGRAM) ../../scripts/logrotate $(DESTDIR)/etc/logrotate.d/bacula + +install-autostart-fd: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-fd; then \ + /sbin/chkconfig --del bacula-fd; \ + fi + @$(INSTALL_PROGRAM) bacula-fd $(DESTDIR)/etc/rc.d/init.d/bacula-fd + # set symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /sbin/chkconfig --add bacula-fd; \ + fi + + +install-autostart-sd: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-sd; then \ + /sbin/chkconfig --del bacula-sd; \ + fi + @$(INSTALL_PROGRAM) bacula-sd $(DESTDIR)/etc/rc.d/init.d/bacula-sd + # set symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /sbin/chkconfig --add bacula-sd; \ + fi + + +install-autostart-dir: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-dir; then \ + /sbin/chkconfig --del bacula-dir; \ + fi + @$(INSTALL_PROGRAM) bacula-dir $(DESTDIR)/etc/rc.d/init.d/bacula-dir + # set symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /sbin/chkconfig --add bacula-dir; \ + fi + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-logrotate: + @rm -f $(DESTDIR)/etc/logrotate.d/bacula + +uninstall-autostart-fd: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-fd; then \ + /sbin/chkconfig --del bacula-fd; \ + fi + @rm -f $(DESTDIR)/etc/rc.d/init.d/bacula-fd + + +uninstall-autostart-sd: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-sd; then \ + /sbin/chkconfig --del bacula-sd; \ + fi + @rm -f $(DESTDIR)/etc/rc.d/init.d/bacula-sd + +uninstall-autostart-dir: + @if test x$(DESTDIR) = x -a -f /etc/rc.d/init.d/bacula-dir; then \ + /sbin/chkconfig --del bacula-dir; \ + fi + @rm -f $(DESTDIR)/etc/rc.d/init.d/bacula-dir + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f Makefile + @rm -f bacula-sd bacula-fd bacula-dir + +devclean: clean + @rm -f Makefile + @rm -f bacula-sd bacula-fd bacula-dir diff --git a/platforms/redhat/bacula-dir.in b/platforms/redhat/bacula-dir.in new file mode 100755 index 00000000..edbf1ea3 --- /dev/null +++ b/platforms/redhat/bacula-dir.in @@ -0,0 +1,93 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon +# +# chkconfig: 2345 92 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +# Source function library +. /etc/rc.d/init.d/functions + +DAEMON_OPTIONS='' +DAEMON_USER='yes' +DIR_USER=@dir_user@ +DIR_GROUP=@dir_group@ +DIR_OPTIONS='' +OS=`uname -s` + +# if /lib/tls exists, force Bacula to use the glibc pthreads instead +if [ -d "/lib/tls" -a $OS = "Linux" -a `uname -r | cut -c1-3` = "2.4" ] ; then + export LD_ASSUME_KERNEL=2.4.19 +fi + +# pull in any user defined DIR_DIR_OPTIONS, DIR_USER, DIR_GROUP or DAEMON_USER +[ -f /etc/sysconfig/bacula ] && . /etc/sysconfig/bacula + +# +# Disable Glibc malloc checks, it doesn't help and it keeps from getting +# good dumps +MALLOC_CHECK_=0 +export MALLOC_CHECK_ + +RETVAL=0 +case "$1" in + start) + if [ "${DIR_USER}" != '' ]; then + DIR_OPTIONS="${DIR_OPTIONS} -u ${DIR_USER}" + fi + + if [ "${DIR_GROUP}" != '' ]; then + DIR_OPTIONS="${DIR_OPTIONS} -g ${DIR_GROUP}" + fi + + if [ "${DAEMON_USER}" != '' -a "${DIR_USER}" != '' ]; then + DIR_OPTIONS="" + if [ "${DIR_GROUP}" != '' ]; then + chown ${DIR_USER}:${DIR_GROUP} @working_dir@/bacula-dir* 2> /dev/null + else + chown ${DIR_USER} @working_dir@/bacula-dir* 2> /dev/null + fi + DAEMON_OPTIONS="--user ${DIR_USER}" + fi + echo -n "Starting Bacula Director services: " + daemon $DAEMON_OPTIONS @sbindir@/bacula-dir $2 ${DIR_OPTIONS} -c @sysconfdir@/bacula-dir.conf + RETVAL=$? + echo + if [ $RETVAL -eq 0 ]; then + touch @subsysdir@/bacula-dir + logger -p daemon.info "bacula-dir started" >/dev/null 2>/dev/null + fi + ;; + stop) + echo -n "Stopping Bacula Director services: " + killproc @sbindir@/bacula-dir + RETVAL=$? + echo + if [ $RETVAL -eq 0 ]; then + rm -f @subsysdir@/bacula-dir + logger -p daemon.info "bacula-dir stopped" >/dev/null 2>/dev/null + fi + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + status @sbindir@/bacula-dir + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/redhat/bacula-fd.in b/platforms/redhat/bacula-fd.in new file mode 100755 index 00000000..72a897ae --- /dev/null +++ b/platforms/redhat/bacula-fd.in @@ -0,0 +1,88 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# chkconfig: 2345 91 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +# Source function library +. /etc/rc.d/init.d/functions + +DAEMON_OPTIONS='' +DAEMON_USER='yes' +FD_USER=@fd_user@ +FD_GROUP=@fd_group@ +FD_OPTIONS='' +OS=`uname -s` + +# if /lib/tls exists, force Bacula to use the glibc pthreads instead +if [ -d "/lib/tls" -a $OS = "Linux" -a `uname -r | cut -c1-3` = "2.4" ] ; then + export LD_ASSUME_KERNEL=2.4.19 +fi + +# pull in any user defined FD_OPTIONS, FD_USER, FD_GROUP +[ -f /etc/sysconfig/bacula ] && . /etc/sysconfig/bacula + +# +# Disable Glibc malloc checks, it doesn't help and it keeps from getting +# good dumps +MALLOC_CHECK_=0 +export MALLOC_CHECK_ + +RETVAL=0 +case "$1" in + start) + if [ "${FD_USER}" != '' ]; then + FD_OPTIONS="${FD_OPTIONS} -u ${FD_USER}" + fi + + if [ "${FD_GROUP}" != '' ]; then + FD_OPTIONS="${FD_OPTIONS} -g ${FD_GROUP}" + fi + + if [ "${DAEMON_USER}" != '' -a "${FD_USER}" != '' ]; then + FD_OPTIONS="" + if [ "${FD_GROUP}" != '' ]; then + chown ${FD_USER}:${FD_GROUP} @working_dir@/bacula-fd* 2> /dev/null + else + chown ${FD_USER} @working_dir@/bacula-fd* 2> /dev/null + fi + DAEMON_OPTIONS="--user ${FD_USER}" + fi + + echo -n "Starting Bacula File services: " + daemon ${DAEMON_OPTIONS} @sbindir@/bacula-fd $2 ${FD_OPTIONS} -c @sysconfdir@/bacula-fd.conf + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-fd + ;; + stop) + echo -n "Stopping Bacula File services: " + killproc @sbindir@/bacula-fd + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-fd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + status @sbindir@/bacula-fd + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/redhat/bacula-sd.in b/platforms/redhat/bacula-sd.in new file mode 100755 index 00000000..e8abce3f --- /dev/null +++ b/platforms/redhat/bacula-sd.in @@ -0,0 +1,94 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Storage daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +# Source function library +. /etc/rc.d/init.d/functions + +DAEMON_OPTIONS='' +DAEMON_USER=yes +SD_USER=@sd_user@ +SD_GROUP=@sd_group@ +SD_OPTIONS='' +OS=`uname -s` + +# if /lib/tls exists, force Bacula to use the glibc pthreads instead +if [ -d "/lib/tls" -a $OS = "Linux" -a `uname -r | cut -c1-3` = "2.4" ] ; then + export LD_ASSUME_KERNEL=2.4.19 +fi + +# pull in any user defined SD_OPTIONS, SD_USER, SD_GROUP or DAEMON_USER +[ -f /etc/sysconfig/bacula ] && . /etc/sysconfig/bacula + +# +# Disable Glibc malloc checks, it doesn't help and it keeps from getting +# good dumps +MALLOC_CHECK_=0 +export MALLOC_CHECK_ + +RETVAL=0 +case "$1" in + start) + if [ "${SD_USER}" != '' ]; then + SD_OPTIONS="${SD_OPTIONS} -u ${SD_USER}" + fi + + if [ "${SD_GROUP}" != '' ]; then + SD_OPTIONS="${SD_OPTIONS} -g ${SD_GROUP}" + fi + + if [ "${DAEMON_USER}" != '' -a "${SD_USER}" != '' ]; then + SD_OPTIONS="" + if [ "${SD_GROUP}" != '' ]; then + chown ${SD_USER}:${SD_GROUP} @working_dir@/bacula-sd* 2> /dev/null + else + chown ${SD_USER} @working_dir@/bacula-sd* 2> /dev/null + fi + DAEMON_OPTIONS="--user ${SD_USER}" + fi + + echo -n "Starting Bacula Storage services: " + daemon $DAEMON_OPTIONS @sbindir@/bacula-sd $2 ${SD_OPTIONS} -c @sysconfdir@/bacula-sd.conf + RETVAL=$? + echo + if [ $RETVAL -eq 0 ]; then + touch @subsysdir@/bacula-sd + logger -p daemon.info "bacula-sd started" >/dev/null 2>/dev/null + fi + ;; + stop) + echo -n "Stopping Bacula Storage services: " + killproc @sbindir@/bacula-sd + RETVAL=$? + echo + if [ $RETVAL -eq 0 ]; then + rm -f @subsysdir@/bacula-sd + logger -p daemon.info "bacula-sd stopped" >/dev/null 2>/dev/null + fi + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + status @sbindir@/bacula-sd + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/rpms/redhat/bacula-aligned.spec.in b/platforms/rpms/redhat/bacula-aligned.spec.in new file mode 100644 index 00000000..e6906a5e --- /dev/null +++ b/platforms/rpms/redhat/bacula-aligned.spec.in @@ -0,0 +1,142 @@ +# Bacula RPM spec file +# +# Copyright (C) 2013-2014 Bacula Systems SA + +# Platform Build Configuration +# TODO: merge all plugins into one spec file + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define product bacula +%define _lsm @LSMDATE@ +%define _packager Kern Sibbald +%define manpage_ext gz + +# Don't strip binaries +%define __os_install_post %{nil} +%define __debug_install_post %{nil} +%define debug_package %{nil} + +# Installation Directory locations +%define _prefix /opt/bacula +%define _sbindir /opt/bacula/bin +%define _bindir /opt/bacula/bin +%define _subsysdir /opt/bacula/working +%define sqlite_bindir /opt/bacula/sqlite +%define _mandir /usr/share/man +%define sysconf_dir /opt/bacula/etc +%define scripts_dir /opt/bacula/scripts +%define working_dir /opt/bacula/working +%define pid_dir /opt/bacula/working +%define plugin_dir /opt/bacula/plugins +%define lib_dir /opt/bacula/lib + +# Daemon user:group Don't change them unless you know what you are doing +%define director_daemon_user bacula +%define storage_daemon_user bacula +%define file_daemon_user root +%define daemon_group bacula +# group that has write access to tape devices, usually disk on Linux +%define storage_daemon_group disk + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%define base_package_name bacula + +%{?contrib_packager:%define _packager %{contrib_packager}} + +Summary: Bacula - The Network Backup Solution +Name: %{base_package_name}-aligned +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +Prefix: %{_prefix} +Distribution: Bacula Aligned Volumes + +Source0: http://www.prdownloads.sourceforge.net/bacula/%{product}-%{version}.tar.gz + +Requires: bacula-sd, %{base_package_name}-libs = %{_version} + +# define the basic package description +%define blurb Bacula Aligned - The Network Backup Solution. +%define blurb2 Bacula Aligned plugin allows you to backup to a +%define blurb3 Volume with aligned blocks permitting easier deduplication. + +Summary: Bacula Aligned Volume Storage - The Network Backup Solution +Group: System Environment/Daemons + +%description +%{blurb} + +%{blurb2} +%{blurb3} + +This is Bacula Aligned Volumes Storage plugin. + +%prep +%setup -T -D -n bacula-%{_version} -b 0 + +%build + +cd ../bacula-%{_version} +./configure \ + --prefix=%{_prefix} \ + --sbindir=%{_sbindir} \ + --sysconfdir=%{sysconf_dir} \ + --mandir=%{_mandir} \ + --with-scriptdir=%{scripts_dir} \ + --with-working-dir=%{working_dir} \ + --with-plugindir=%{plugin_dir} \ + --with-pid-dir=%{pid_dir} \ + --with-subsys-dir=%{_subsysdir} \ + --enable-smartalloc \ + --disable-bat \ + --enable-client-only \ + --with-dir-user=%{director_daemon_user} \ + --with-dir-group=%{daemon_group} \ + --with-sd-user=%{storage_daemon_user} \ + --with-sd-group=%{storage_daemon_group} \ + --with-fd-user=%{file_daemon_user} \ + --with-fd-group=%{daemon_group} \ + --with-basename="XXX_HOSTNAME_XXX" \ + --with-hostname="XXX_HOSTNAME_XXX" \ + --with-dir-password="XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX" \ + --with-fd-password="XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX" \ + --with-sd-password="XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX" \ + --with-mon-dir-password="XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX" \ + --with-mon-fd-password="XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX" \ + --with-mon-sd-password="XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX" + +make + +%install + +mkdir -p $RPM_BUILD_ROOT%{_sbindir} +mkdir -p $RPM_BUILD_ROOT%{plugin_dir} +mkdir -p $RPM_BUILD_ROOT%{scripts_dir} +mkdir -p $RPM_BUILD_ROOT%{lib_dir} + +make DESTDIR=$RPM_BUILD_ROOT -C ../bacula-%{_version}/src/stored install-aligned + +# Remove unneeded file(s) +rm -f $RPM_BUILD_ROOT%{plugin_dir}/bacula-sd-aligned-driver.so + +%files +%defattr(-,root,%{daemon_group}) +%attr(-, root, %{daemon_group}) %{plugin_dir}/bacula-sd-aligned-driver-%{_version}.so + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" + +%changelog +* Mon Jul 3 2017 Davide Franco +- First version diff --git a/platforms/rpms/redhat/bacula-bat.spec.in b/platforms/rpms/redhat/bacula-bat.spec.in new file mode 100644 index 00000000..1b4a3bb1 --- /dev/null +++ b/platforms/rpms/redhat/bacula-bat.spec.in @@ -0,0 +1,348 @@ +# +# Bacula RPM spec file +# +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# Platform Build Configuration + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define depkgs_qt_version @DEPKGS_QT_VERSION@ +%define product bacula + +# Don't strip binaries +%define __os_install_post %{nil} +%define __debug_install_post %{nil} +%define debug_package %{nil} + +# this is the Qt version in depkgs_qt +%define qt4ver @BQT_VERSION@ + +%define _packager Kern Sibbald + +%define manpage_ext gz + +# Force single file build +%define single_dir 1 +%{?single_dir_install:%define single_dir 1} + +# Installation Directory locations +%if %{single_dir} +%define _prefix /opt/bacula +%define _sbindir /opt/bacula/bin +%define _bindir /opt/bacula/bin +%define _subsysdir /opt/bacula/working +%define sqlite_bindir /opt/bacula/sqlite +%define _mandir /usr/share/man +%define docs_dir /opt/bacula/docs +%define archive_dir /opt/bacula/archive +%define sysconf_dir /opt/bacula/etc +%define script_dir /opt/bacula/scripts +%define working_dir /opt/bacula/working +%define pid_dir /opt/bacula/working +%define plugin_dir /opt/bacula/plugins +%define lib_dir /opt/bacula/lib +%else +%define _prefix /usr +%define _sbindir %_prefix/sbin +%define _bindir %_prefix/bin +%define _subsysdir /var/lock/subsys +%define sqlite_bindir %_libdir/bacula/sqlite +%define _mandir %_prefix/share/man +%define sysconf_dir /etc/bacula +%define script_dir %_libdir/bacula +%define working_dir /var/lib/bacula +%define pid_dir /var/run +%define plugin_dir %_libdir/bacula/plugins +%define lib_dir %_libdir/bacula/lib +%endif + +# Daemon user:group Don't change them unless you know what you are doing +%define director_daemon_user bacula +%define daemon_group bacula + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%{?contrib_packager:%define _packager %{contrib_packager}} + +%{expand: %%define gccver %(rpm -q --queryformat %%{version} gcc)} +%{expand: %%define gccrel %(rpm -q --queryformat %%{release} gcc)} + +%define staticqt 1 +%{?nobuild_staticqt:%define staticqt 0} + +# determine what platform we are building on +%define fedora 0 +%define suse 0 +%define mdk 0 + +%if %{_vendor} == redhat + %define fedora 1 + %define _dist %(cat /etc/redhat-release) +%endif +%if %{_vendor} == suse + %define suse 1 + %define _dist %(grep -i SuSE /etc/SuSE-release) +%endif +%if %{_vendor} == Mandriva + %define mdk 1 + %define _dist %(grep Mand /etc/mandrake-release) +%endif +%if ! %{fedora} && ! %{suse} && ! %{mdk} +%{error: Unknown platform. Please examine the spec file.} +exit 1 +%endif + +%define sqlite 0 +%{?build_sqlite:%define sqlite 1} + +%define base_package_name bacula + +Summary: Bacula - The Network Backup Solution +Name: %{base_package_name}-bat +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +Prefix: %{_prefix} +Distribution: %{_dist} + +Source0: http://www.prdownloads.sourceforge.net/bacula/%{product}-%{version}.tar.gz +Source1: http://www.prdownloads.sourceforge.net/bacula/depkgs-qt-%{depkgs_qt_version}.tar.gz + +BuildRequires: gcc, gcc-c++, make, autoconf +BuildRequires: libstdc++-devel, zlib-devel +BuildRequires: openssl-devel, fontconfig-devel, libpng-devel, libstdc++-devel, zlib-devel + +Requires: openssl +Requires: fontconfig +Requires: libgcc +Requires: libpng +Requires: libstdc++ +Requires: zlib +Requires: %{base_package_name}-libs + +%if %{suse} +Requires: freetype2 +BuildRequires: freetype2-devel +%else +Requires: usermode +Requires: freetype +BuildRequires: freetype-devel +%endif + +# Source directory locations +%define depkgs_qt ../depkgs-qt + +# define the basic package description +%define blurb Bacula - The Network Backup Solution. +%define blurb2 Bacula is a set of computer programs that permit you (or the system +%define blurb3 administrator) to manage backup, recovery, and verification of computer +%define blurb4 data across a network of computers of different kinds. In technical terms, +%define blurb5 it is a network client/server based backup program. Bacula is relatively +%define blurb6 easy to use and efficient, while offering many advanced storage management +%define blurb7 features that make it easy to find and recover lost or damaged files. + +%define group_file /etc/group +%define groupadd /usr/sbin/groupadd + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} + +This is the Bacula Administration Tool (bat) graphical user interface package. +It is an add-on to the client or server packages. + +# Don't strip symbols +%define debug_package %{nil} + +# Must explicitly enable debug pkg on SuSE +# but not in opensuse_bs +#%if %{suse} && ! 0%{?opensuse_bs} +#%debug_package +#%endif + +%prep +%setup -T -n %{product}-%{_version} -b 0 +%setup -T -D -n %{product}-%{_version} -b 1 + +%build + + +cwd=${PWD} +%if ! %{staticqt} +export QTDIR=$(pkg-config --variable=prefix QtCore) +export QTINC=$(pkg-config --variable=includedir QtCore) +export QTLIB=$(pkg-config --variable=libdir QtCore) +export PATH=${QTDIR}/bin/:${PATH} +%else +# You can use a cache for depkgs-qt +# tar xfz depkgs-qt*gz -C ~/ +# cd ~/depkgs-qt +# echo yes | make qt4 +# touch %{depkgs_qt_version} # depkgs version +if [ -f $HOME/depkgs-qt/%{depkgs_qt_version} ]; then + rm -rf %{depkgs_qt} + ln -s $HOME/depkgs-qt %{depkgs_qt} + cd %{depkgs_qt} +else + cd %{depkgs_qt} + make qt4 </dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{daemon_group} > /dev/null 2>&1 + echo "The group %{daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi + + +%post +if [ -d %{sysconf_dir} ]; then + cd %{sysconf_dir} + if [ ! -f .rpm.sed ]; then + (umask 0177 + echo "# This file is used to ensure that all passwords will" > .rpm.sed + echo "# match between configuration files" >> .rpm.sed + ) + for string in XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX; do + pass=`openssl rand -base64 33` + echo "s@${string}@${pass}@g" >> .rpm.sed + done + fi + host=`hostname -s` + for file in *.conf; do + sed -f .rpm.sed $file > $file.new + sed "s@XXX_HOSTNAME_XXX@${host}@g" $file.new > $file + rm -f $file.new + done +fi +/sbin/ldconfig + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +rm -rf $RPM_BUILD_DIR/depkgs-qt + +%changelog +* Sat Aug 1 2009 Kern Sibbald +- Split bat into separate bacula-bat.spec diff --git a/platforms/rpms/redhat/bacula-client-static.spec.in b/platforms/rpms/redhat/bacula-client-static.spec.in new file mode 100644 index 00000000..d3bff82c --- /dev/null +++ b/platforms/rpms/redhat/bacula-client-static.spec.in @@ -0,0 +1,286 @@ +# Bacula RPM spec file +# +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# Platform Build Configuration +# TODO: merge all plugins into one spec file + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define product bacula +%define _packager Kern Sibbald +%define depkgs_version @DEPKGS_VERSION@ + +# Don't strip binaries +%define __os_install_post %{nil} +%define __debug_install_post %{nil} +%define debug_package %{nil} + +%define single_dir 1 +%{?single_dir_install:%define single_dir 1} + +# Installation Directory locations +%if %{single_dir} +# Installation Directory locations +%define _prefix /opt/bacula +%define _sbindir /opt/bacula/sbin +%define _bindir /opt/bacula/bin +%define _subsysdir /opt/bacula/working +%define sqlite_bindir /opt/bacula/sqlite +%define _mandir /usr/share/man +%define _man_bacula /opt/bacula/share/man/ +%define sysconf_dir /opt/bacula/etc +%define scripts_dir /opt/bacula/scripts +%define working_dir /opt/bacula/working +%define pid_dir /opt/bacula/working +%define plugin_dir /opt/bacula/plugins +%define lib_dir /opt/bacula/lib +%define script_dir /opt/bacula/scripts +%define doc_dir /opt/bacula/share/doc +%define var_dir /opt/bacula/var +%else +%define _prefix /usr +%define _sbindir %_prefix/sbin +%define _bindir %_prefix/bin +%define _subsysdir /var/lock/subsys +%define sqlite_bindir %_libdir/bacula/sqlite +%define _mandir %_prefix/share/man +%define sysconf_dir /etc/bacula +%define script_dir %_libdir/bacula +%define working_dir /var/lib/bacula +%define pid_dir /var/run +%define plugin_dir %_libdir/bacula/plugins +%define lib_dir %_libdir/bacula/lib +%endif + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%define file_daemon_user root +%define daemon_group bacula + +%define base_package_name bacula + +%{?contrib_packager:%define _packager %{contrib_packager}} + +Summary: Bacula - The Network Backup Solution +Name: %{base_package_name}-client-static +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.baculasystems.com/ +Vendor: Bacula Project +Packager: %{_packager} +Prefix: %{_prefix} +Distribution: Bacula Static Client +Source0: https://www.baculasystems.com/dl/download/bacula-%{_version}.tar.gz + +%description +%{blurb} This is Bacula static FD package. + +# define the basic package description +%define blurb Bacula static FD - The Network Backup Solution. +%define blurb2 Bacula static FD staticly linked FD + +BuildRequires: gcc, gcc-c++, make, autoconf +BuildRequires: glibc, glibc-devel +BuildRequires: ncurses-devel, perl, readline-devel +BuildRequires: libstdc++-devel, zlib-devel +BuildRequires: libacl-devel +BuildRequires: pkgconfig + +Provides: bacula-fd +Conflicts: bacula-client + +Summary: Bacula static fd package - The Network Backup Solution +Group: System Environment/Daemons + +%prep +%setup -T -D -n bacula-%{_version} -b 0 + +%build + +cd ../bacula-%{_version} +./configure \ + --prefix=%{_prefix} \ + --sbindir=%{_sbindir} \ + --sysconfdir=%{sysconf_dir} \ + --with-scriptdir=%{scripts_dir} \ + --with-working-dir=%{working_dir} \ + --with-plugindir=%{plugin_dir} \ + --with-pid-dir=%{pid_dir} \ + --with-subsys-dir=%{_subsysdir} \ + --enable-smartalloc \ + --disable-bat \ + --enable-client-only \ + --with-basename="XXX_HOSTNAME_XXX" \ + --with-hostname="XXX_HOSTNAME_XXX" \ + --with-fd-password="XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX" \ + --with-mon-fd-password="XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX" \ + --enable-static-fd=yes \ + --enable-static-tools=yes \ + --enable-static-cons=yes \ + --without-openssl \ + --disable-libtool +make -j3 + +%install + +cwd=${PWD} +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +mkdir -p $RPM_BUILD_ROOT/etc/init.d +mkdir -p $RPM_BUILD_ROOT%{_sbindir} +mkdir -p $RPM_BUILD_ROOT%{_bindir} +mkdir -p $RPM_BUILD_ROOT%{plugin_dir} +mkdir -p $RPM_BUILD_ROOT%{sysconf_dir} +mkdir -p $RPM_BUILD_ROOT%{working_dir} +mkdir -p $RPM_BUILD_ROOT/etc/logrotate.d/bacula + +cp -p platforms/redhat/bacula-fd $RPM_BUILD_ROOT/etc/init.d/static-bacula-fd +chmod 0754 $RPM_BUILD_ROOT/etc/init.d/* + +make DESTDIR=$RPM_BUILD_ROOT install + +# fix me - building enable-client-only installs files not included in bacula-client package +# Program docs not installed on client + +cp $RPM_BUILD_ROOT%{sysconf_dir}/bacula-fd.conf $RPM_BUILD_ROOT%{sysconf_dir}/static-bacula-fd.conf + +rm -f $RPM_BUILD_ROOT%{_sbindir}/bconsole +rm -f $RPM_BUILD_ROOT%{_sbindir}/bbconsjson +rm -f $RPM_BUILD_ROOT%{_sbindir}/btraceback +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/mtx-changer.conf.new +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/btraceback.mdb +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/dvd-handler.old +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/storage-ctl.conf.new +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/disk-changer.old +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/bacula-ctl-sd +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/bconsole +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/btraceback.gdb +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/disk-changer +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/dvd-handler +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/storage-ctl +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/mtx-changer +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/mtx-changer.conf +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/bacula-ctl-dir +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/bconsole.conf.new +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/storage-ctl.conf +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/btraceback.dbx +rm -f $RPM_BUILD_ROOT%{sysconf_dir}/bacula-fd.conf +rm -f $RPM_BUILD_ROOT%{_bindir}/bfdjson +rm -f $RPM_BUILD_ROOT%{_bindir}/bacula +rm -f $RPM_BUILD_ROOT%{_sbindir}/bacula +rm -f $RPM_BUILD_ROOT%{_sbindir}/bacula-fd +rm -f $RPM_BUILD_ROOT%{_sbindir}/bfdjson +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man1/bacula-tray-monitor.1.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man1/bat.1.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man1/bsmtp.1.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/bacula-dir.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/bacula-sd.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/bacula.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/bcopy.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/bextract.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/bls.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/bregex.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/bscan.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/btape.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/btraceback.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/bwild.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man8/dbcheck.8.gz +rm -f $RPM_BUILD_ROOT%{_man_bacula}/man1/bacula-bwxconsole.1.gz +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula-ctl-dir +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula-ctl-sd +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula_config +rm -f $RPM_BUILD_ROOT%{script_dir}/bconsole +rm -f $RPM_BUILD_ROOT%{script_dir}/breload +rm -f $RPM_BUILD_ROOT%{script_dir}/btraceback.dbx +rm -f $RPM_BUILD_ROOT%{script_dir}/btraceback.gdb +rm -f $RPM_BUILD_ROOT%{script_dir}/btraceback.mdb +rm -f $RPM_BUILD_ROOT%{script_dir}/disk-changer +rm -f $RPM_BUILD_ROOT%{script_dir}/dvd-handler +rm -f $RPM_BUILD_ROOT%{script_dir}/manual_prune.pl +rm -f $RPM_BUILD_ROOT%{script_dir}/mtx-changer +rm -f $RPM_BUILD_ROOT%{script_dir}/mtx-changer.conf +rm -f $RPM_BUILD_ROOT%{script_dir}/storage-ctl +rm -f $RPM_BUILD_ROOT%{script_dir}/storage-ctl.conf +rm -f $RPM_BUILD_ROOT%{doc_dir}/bacula/ChangeLog +rm -f $RPM_BUILD_ROOT%{doc_dir}/bacula/INSTALL +rm -f $RPM_BUILD_ROOT%{doc_dir}/bacula/LICENSE +rm -f $RPM_BUILD_ROOT%{doc_dir}/bacula/README +rm -f $RPM_BUILD_ROOT%{doc_dir}/bacula/ReleaseNotes +rm -f $RPM_BUILD_ROOT%{doc_dir}/bacula/VERIFYING +rm -f $RPM_BUILD_ROOT%/opt/bacula/share/man/man1/bacula-bwxconsole.1.gz + +%files +%defattr(-,root,root) + +%attr(-, root, %{daemon_group}) %dir %{script_dir} +%attr(-, root, %{daemon_group}) %dir %{sysconf_dir} +%attr(-, root, %{daemon_group}) %dir %{working_dir} +%attr(-, root, %{daemon_group}) %{script_dir}/bacula-ctl-fd +%attr(-, root, %{daemon_group}) %dir %{plugin_dir} +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/static-bacula-fd.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bconsole.conf +%attr(-, root, %{daemon_group}) %{_sbindir}/static-bconsole +%attr(-, root, %{daemon_group}) %{_sbindir}/static-bacula-fd + +%attr(-, root, %{daemon_group}) /etc/init.d/static-bacula-fd +%attr(-, root, %{daemon_group}) %dir /etc/logrotate.d/bacula +%attr(-, root, %{daemon_group}) %{_man_bacula}/man8/bconsole.8.gz +%attr(-, root, %{daemon_group}) %{_man_bacula}/man8/bacula-fd.8.gz + +%post +# add our link + +if [ "$1" -ge 1 ] ; then + /sbin/chkconfig --add static-bacula-fd +fi + +if [ -d %{sysconf_dir} ]; then + cd %{sysconf_dir} + if [ ! -f .rpm.sed ]; then + (umask 0177 + echo "# This file is used to ensure that all passwords will" > .rpm.sed + echo "# match between configuration files" >> .rpm.sed + ) + for string in XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_MONITOR_PAS\ +SWORD_XXX; do + pass=`openssl rand -base64 33` + echo "s@${string}@${pass}@g" >> .rpm.sed + done + fi + host=`hostname -s` + echo "s@XXX_HOSTNAME_XXX@${host}@g" >> .rpm.sed + for file in *.conf; do + sed -f .rpm.sed $file > $file.new + sed "s@XXX_HOSTNAME_XXX@${host}@g" $file.new > $file + rm -f $file.new + done + rm .rpm.sed + cd /tmp + cat /etc/init.d/static-bacula-fd | sed s@bacula-fd@static-bacula-fd@g > .rpm.sed + cp .rpm.sed /etc/init.d/static-bacula-fd + chmod 755 /etc/init.d/static-bacula-fd + rm .rpm.sed + cd %{sysconf_dir} +fi + +%preun +# delete our link +if [ $1 = 0 ]; then + /sbin/chkconfig --del static-bacula-fd +fi + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" + +%changelog diff --git a/platforms/rpms/redhat/bacula-docs.spec.in b/platforms/rpms/redhat/bacula-docs.spec.in new file mode 100644 index 00000000..afe26823 --- /dev/null +++ b/platforms/rpms/redhat/bacula-docs.spec.in @@ -0,0 +1,90 @@ +# +# Bacula RPM spec file +# + +# Platform Build Configuration + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define product bacula +%define _packager Kern Sibbald + +%define _prefix /usr +%define _sbindir %_prefix/sbin +%define _mandir %_prefix/share/man + + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%{?contrib_packager:%define _packager %{contrib_packager}} + +Summary: Bacula - The Network Backup Solution +Name: %{product}-docs +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +BuildArchitectures: noarch +Prefix: %{_prefix} +Distribution: Bacula Documentation + +Source: %{name}-%{_version}.tar.bz2 + +# Source directory locations +%define _docsrc . + +# define the basic package description +%define blurb Bacula - The Leading Open Source Backup Solution. +%define blurb2 Bacula is a set of computer programs that permit you (or the system +%define blurb3 administrator) to manage backup, recovery, and verification of computer +%define blurb4 data across a network of computers of different kinds. In technical terms, +%define blurb5 it is a network client/server based backup program. Bacula is relatively +%define blurb6 easy to use and efficient, while offering many advanced storage management +%define blurb7 features that make it easy to find and recover lost or damaged files. +%define blurb8 Bacula source code has been released under the AGPL version 3 license. + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +This package installs the Bacula pdf and html documentation. + +%prep +%setup + + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +rm -rf $RPM_BUILD_DIR/%{name}-%{_version} + +%files +%doc %{_docsrc}/manuals/en/console/console %{_docsrc}/manuals/en/console/console.pdf +%doc %{_docsrc}/manuals/en/developers/developers %{_docsrc}/manuals/en/developers/developers.pdf +%doc %{_docsrc}/manuals/en/main/main %{_docsrc}/manuals/en/main/main.pdf +%doc %{_docsrc}/manuals/en/misc/misc %{_docsrc}/manuals/en/misc/misc.pdf +%doc %{_docsrc}/manuals/en/problems/problems %{_docsrc}/manuals/en/problems/problems.pdf +%doc %{_docsrc}/manuals/en/utility/utility %{_docsrc}/manuals/en/utility/utility.pdf + +%changelog +* Sat Jan 30 2010 D. Scott Barninger +- change source file to bz2, update for new doc structure +* Sat Aug 1 2009 Kern Sibbald +- Split docs into separate bacula-docs.spec diff --git a/platforms/rpms/redhat/bacula-mtx.spec.in b/platforms/rpms/redhat/bacula-mtx.spec.in new file mode 100644 index 00000000..b53067cb --- /dev/null +++ b/platforms/rpms/redhat/bacula-mtx.spec.in @@ -0,0 +1,126 @@ +# +# Bacula RPM spec file +# +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# Platform Build Configuration + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define product bacula +%define depkgs_version @DEPKGS_VERSION@ +%define _packager Kern Sibbald +%define manpage_ext gz + +# Don't strip binaries +%define __os_install_post %{nil} +%define __debug_install_post %{nil} +%define debug_package %{nil} + +%define single_dir 0 +%{?single_dir_install:%define single_dir 1} + +# Installation Directory locations +%define _prefix /usr +%define _sbindir %_prefix/sbin +%define _bindir %_prefix/bin +%define _subsysdir /var/lock/subsys +%define sqlite_bindir %_libdir/bacula/sqlite +%define _mandir %_prefix/share/man +%define sysconf_dir /etc/bacula +%define script_dir %_libdir/bacula +%define working_dir /var/lib/bacula +%define pid_dir /var/run +%define plugin_dir %_libdir/bacula/plugins +%define lib_dir %_libdir/bacula/lib + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%{?contrib_packager:%define _packager %{contrib_packager}} + +Summary: Bacula - The Network Backup Solution +Name: %{product}-mtx +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +Prefix: %{_prefix} +Distribution: Bacula Bat + +Source: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.tar.gz + +# define the basic package description +%define blurb Bacula - The Network Backup Solution. +%define blurb2 Bacula is a set of computer programs that permit you (or the system +%define blurb3 administrator) to manage backup, recovery, and verification of computer +%define blurb4 data across a network of computers of different kinds. In technical terms, +%define blurb5 it is a network client/server based backup program. Bacula is relatively +%define blurb6 easy to use and efficient, while offering many advanced storage management +%define blurb7 features that make it easy to find and recover lost or damaged files. + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} + +This is Bacula's version of mtx tape utilities for Linux distributions that +do not provide their own mtx package + +%prep +%setup -T -n depkgs -b 0 + +%build + +make mtx + +%install +make \ + prefix=$RPM_BUILD_ROOT%{_prefix} \ + sbindir=$RPM_BUILD_ROOT%{_sbindir} \ + sysconfdir=$RPM_BUILD_ROOT%{sysconf_dir} \ + scriptdir=$RPM_BUILD_ROOT%{script_dir} \ + working_dir=$RPM_BUILD_ROOT%{working_dir} \ + piddir=$RPM_BUILD_ROOT%{pid_dir} \ + mandir=$RPM_BUILD_ROOT%{_mandir} \ + mtx-install + +%files +%defattr(-,root,root) +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/loaderinfo +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/mtx +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/scsitape +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/tapeinfo +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/scsieject +%{_mandir}/man1/loaderinfo.1.%{manpage_ext} +%{_mandir}/man1/mtx.1.%{manpage_ext} +%{_mandir}/man1/scsitape.1.%{manpage_ext} +%{_mandir}/man1/tapeinfo.1.%{manpage_ext} +%{_mandir}/man1/scsieject.1.%{manpage_ext} + + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +rm -rf $RPM_BUILD_DIR/depkgs + +%changelog +* Sat Aug 1 2009 Kern Sibbald +- Split mtx out into bacula-mtx.spec diff --git a/platforms/rpms/redhat/bacula.spec.in b/platforms/rpms/redhat/bacula.spec.in new file mode 100644 index 00000000..7563c275 --- /dev/null +++ b/platforms/rpms/redhat/bacula.spec.in @@ -0,0 +1,1361 @@ +# +# Bacula RPM spec file +# + +# Platform Build Configuration + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define product bacula +%define _packager Kern Sibbald +%define depkgs_version @DEPKGS_VERSION@ + +# Don't strip binaries +%define __os_install_post %{nil} +%define __debug_install_post %{nil} +%define debug_package %{nil} + +%define postgres_version 8 +%define pgre84 0 +%{?build_rhel5:%define pgre84 1} +%{?build_el5:%define pgre84 1} +%if %{pgre84} +%define postgres_package postgresql84 +%define postgres_server_package postgresql84-server +%define postgres_devel_package postgresql84-devel +%else +%define postgres_package postgresql +%define postgres_server_package postgresql-server +%define postgres_devel_package postgresql-devel +%endif + +%define single_dir 1 +%{?single_dir_install:%define single_dir 1} + +# Installation Directory locations +%define _prefix /opt/bacula +%define _sbindir /opt/bacula/bin +%define _bindir /opt/bacula/bin +%define _subsysdir /opt/bacula/working +%define sqlite_bindir /opt/bacula/sqlite +%define _mandir /usr/share/man +%define docs_dir /opt/bacula/docs +%define archive_dir /opt/bacula/archive +%define sysconf_dir /opt/bacula/etc +%define script_dir /opt/bacula/scripts +%define working_dir /opt/bacula/working +%define pid_dir /opt/bacula/working +%define plugin_dir /opt/bacula/plugins +%define lib_dir /opt/bacula/lib +%define log_dir /opt/bacula/log +%define systemd_dir /lib/systemd/system + +# Daemon user:group Don't change them unless you know what you are doing +%define director_daemon_user bacula +%define storage_daemon_user bacula +%define file_daemon_user root +%define daemon_group bacula +# group that has write access to tape devices, usually disk on Linux +%define storage_daemon_group disk + +%define depkgs ../depkgs + +# problems with mandriva build: +# nothing provides libbonobo2_0-devel, nothing provides libbonoboui2_0-devel + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%define base_package_name %{product} + +%{?contrib_packager:%define _packager %{contrib_packager}} + +Summary: Bacula - The Network Backup Solution +Name: %{base_package_name} +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +Prefix: %{_prefix} + +# opensuse build service changes the release itself +# what happens if the release is not 1? DSB +Source0: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.tar.gz +Source1: http://www.prdownloads.sourceforge.net/bacula/%{product}-%{version}.tar.gz +Source2: Release_Notes-%{version}-%{release}.tar.gz + +# define the basic package description +%define blurb Bacula - The Leading Open Source Backup Solution. +%define blurb2 Bacula is a set of computer programs that permit you (or the system +%define blurb3 administrator) to manage backup, recovery, and verification of computer +%define blurb4 data across a network of computers of different kinds. In technical terms, +%define blurb5 it is a network client/server based backup program. Bacula is relatively +%define blurb6 easy to use and efficient, while offering many advanced storage management +%define blurb7 features that make it easy to find and recover lost or damaged files. +%define blurb8 Bacula source code has been released under the AGPL version 3 license. + +%define user_file /etc/passwd +%define group_file /etc/group + +# program locations +%define useradd /usr/sbin/useradd +%define groupadd /usr/sbin/groupadd +%define usermod /usr/sbin/usermod + +# platform defines - set one below or define the build_xxx on the command line +# RedHat builds +%define rh7 0 +%{?build_rh7:%define rh7 1} +%define rh8 0 +%{?build_rh8:%define rh8 1} +%define rh9 0 +%{?build_rh9:%define rh9 1} +# Fedora Core build +%define fc16 0 +%{?build_fc16:%define fc16 1} +%define fc17 0 +%{?build_fc17:%define fc17 1} +%define fc18 0 +%{?build_fc18:%define fc18 1} +%define fc22 0 +%{?build_fc22:%define fc22 1} +%define fc25 0 +%{?build_fc25:%define fc25 1} +%define fc26 0 +%{?build_fc26:%define fc26 1} +%define fc27 0 +%{?build_fc27:%define fc27 1} +# RedHat Enterprise builds +%define rhel3 0 +%{?build_rhel3:%define rhel3 1} +%define rhel4 0 +%{?build_rhel4:%define rhel4 1} +%{?build_rhel4:%define fc3 1} +%{?build_el4:%define rhel4 1} +%{?build_el4:%define fc3 1} +%define rhel5 0 +%{?build_rhel5:%define rhel5 1} +%{?build_rhel5:%define fc6 1} +%{?build_el5:%define rhel5 1} +%{?build_el5:%define fc6 1} +%define rhel6 0 +%{?build_rhel6:%define rhel6 1} +%{?build_el6:%define rhel6 1} +%define rhel7 0 +%{?build_rhel7:%define rhel7 1} +%{?build_el7:%define rhel7 1} +# CentOS build +%define centos3 0 +%{?build_centos3:%define centos3 1} +%define centos4 0 +%{?build_centos4:%define centos4 1} +%{?build_centos4:%define fc3 1} +%define centos5 0 +%{?build_centos5:%define centos5 1} +%{?build_centos5:%define fc6 1} +%define centos6 0 +%{?build_centos6:%define centos6 1} +%define centos7 0 +%{?build_centos7:%define centos7 1} +# SL build +%define sl3 0 +%{?build_sl3:%define sl3 1} +%define sl4 0 +%{?build_sl4:%define sl4 1} +%{?build_sl4:%define fc3 1} +%define sl5 0 +%{?build_sl5:%define sl5 1} +%{?build_sl5:%define fc6 1} +# SuSE build +%define su9 0 +%{?build_su9:%define su9 1} +%define su10 0 +%{?build_su10:%define su10 1} +%define su102 0 +%{?build_su102:%define su102 1} +%define su103 0 +%{?build_su103:%define su103 1} +%define su110 0 +%{?build_su110:%define su110 1} +%define su111 0 +%{?build_su111:%define su111 1} +%define su112 0 +%{?build_su112:%define su112 1} +%define su113 0 +%{?build_su113:%define su113 1} +%define su120 0 +%{?build_su120:%define su120 1} +%define su131 0 +%{?build_su131:%define su131 1} +# Mandrake builds +%define mdk 0 +%{?build_mdk:%define mdk 1} +%define mdv 0 +%{?build_mdv:%define mdv 1} +%{?build_mdv:%define mdk 1} + +# client only build +%define client_only 0 +%{?build_client_only:%define client_only 1} + +# if the platform is using systemd +%define usesystemd 0 + +%if %{rhel7} || %{fc22}|| %{fc25} || %{fc26} || %{fc27} || %{centos7} +%define usesystemd 1 +%endif + +# depending if we use systemd or not, we use chkconfig or systemctl +%if %{usesystemd} +%define service_enable systemctl enable +%define service_disable systemctl disable +%else +%define service_enable /sbin/chkconfig --add +%define service_disable /sbin/chkconfig --del +%endif + +# Setup some short cuts +%define rhat 0 +%if %{rh7} || %{rh8} || %{rh9} +%define rhat 1 +%endif +%define fed 0 +%if %{fc16} || %{fc17} || %{fc18} || %{fc22} || %{fc25} || %{fc26} || %{fc27} +%define fed 1 +%endif +%define suse 0 +%if %{su9} || %{su10} || %{su102} || %{su103} || %{su110} || %{su111} || %{su112} || %{su120} || %{su131} +%define suse 1 +%endif +%define rhel 0 +%if %{rhel3} || %{rhel4} || %{rhel5} || %{rhel6} || %{rhel7} || %{centos3} || %{centos4} || %{centos5} || %{centos6} || %{centos7} +%define rhel 1 +%endif +%define scil 0 +%if %{sl3} || %{sl4} || %{sl5} +%define scil 1 +%endif + + +# test for a platform definition +%if !%{rhat} && !%{rhel} && !%{fed} && !%{suse} && !%{mdk} && !%{scil} +%{error: You must specify a platform. Please examine the spec file.} +exit 1 +%endif + +# distribution-specific directory for logwatch +%if %{rh7} || %{rh8} || %{rh9} +%define logwatch_dir /etc/log.d +%else +%define logwatch_dir /etc/logwatch +%endif + +# database defines +# set for database support desired or define the build_xxx on the command line +%define mysql 0 +%{?build_mysql:%define mysql 1} +%define sqlite 0 +%{?build_sqlite:%define sqlite 1} +%define postgresql 0 +%{?build_postgresql:%define postgresql 1} + +# test for a database definition +%if ! %{mysql} && ! %{sqlite} && ! %{postgresql} && ! %{client_only} +%{error: You must specify database support, by passing one of the following to rpmbuild:} +%{error: --define build_postgresql=1} +%{error: --define build_sqlite=1} +%{error: --define build_mysql=1} +exit 1 +%endif + +%if %{mysql} +%define db_backend mysql +%endif +%if %{sqlite} +%define db_backend sqlite3 +%endif +%if %{postgresql} +%define db_backend postgresql +%endif + +# 64 bit support +%define x86_64 0 +%{?build_x86_64:%define x86_64 1} + +# check what distribution we are +%if %{fc16} || %{fc17} || %{fc18} || %{fc22} || %{fc25} || %{fc26} || %{fc27} +%define _dist %(grep Fedora /etc/redhat-release) +%endif +%if %{centos7} || %{centos6} +%define _dist %(grep CentOS /etc/redhat-release) +%endif +%if %{centos5} || %{centos4} || %{centos3} +%define _dist %(grep CentOS /etc/redhat-release) +%endif +%if %{sl5} ||%{sl4} || %{sl3} +%define _dist %(grep 'Scientific Linux' /etc/redhat-release) +%endif +%if %{suse} +%define _dist %(grep -i SuSE /etc/SuSE-release) +%endif + +Distribution: %_dist + +# should we enable tcp wrappers support +%define tcpwrappers 1 +%{?build_tcpwrappers:%define tcpwrappers 1} + +# do we need to patch for old postgresql version? +%define old_pgsql 0 +%{?build_old_pgsql:%define old_pgsql 1} + +# Mandriva somehow forces the manpage file extension to bz2 rather than gz +%if %{mdk} +%define manpage_ext bz2 +%else +%define manpage_ext gz +%endif + +# for client only build +%if %{client_only} +%define mysql 0 +%define postgresql 0 +%define sqlite 0 +%endif + +BuildRequires: gcc, gcc-c++, make, autoconf +BuildRequires: glibc, glibc-devel +BuildRequires: ncurses-devel, readline-devel +BuildRequires: libstdc++-devel, zlib-devel +BuildRequires: openssl-devel +BuildRequires: libacl-devel +BuildRequires: pkgconfig +BuildRequires: bzip2-devel +%if ! %{rh7} +BuildRequires: libxml2-devel +%endif + +%if %{rh7} +BuildRequires: libxml-devel +%endif +%if %{mdk} +BuildRequires: libstdc++-static-devel +BuildRequires: glibc-static-devel +%endif + +%if %{mysql} && ! %{suse} +BuildRequires: mysql-devel +%endif + +%if %{postgresql} +BuildRequires: %{postgres_devel_package} >= %{postgres_version} +%endif + +%description +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +# +# =========================================================== +# Generate mysql, sqlite, or postgresql rpm +# =========================================================== +# +%if %{mysql} +%package mysql +Provides: bacula-mysql +# +# The following provides is to work around an +# auto generated requires from src/scripts/logwatch/applybaculadate +# this is a gross kludge to keep it from being a requirement +# +Provides: perl(Logwatch) +%endif +%if %{postgresql} +%package postgresql +Provides: bacula-postgresql +%endif + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons +Provides: bacula-dir, bacula-sd, bacula-fd, bacula-server +Provides: libbaccats-%{version}.so()(64bit) +Conflicts: bacula-client + +Requires: ncurses, libstdc++, zlib, openssl +Requires: glibc, readline, %{name}-libs + +%if %{suse} +Conflicts: bacula +%endif + +%if %{mysql} +Requires: mysql +%endif + +%if %{postgresql} +Requires: postgresql >= 7 +%endif + +%if %{mysql} +%description mysql +%endif +%if %{sqlite} +%description sqlite +%endif +%if %{postgresql} +%description postgresql +%endif + +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +%if %{mysql} +This build requires MySQL to be installed separately as the catalog database. +%endif +%if %{postgresql} +This build requires PostgreSQL to be installed separately as the catalog database. +%endif +%if %{sqlite} +This build incorporates sqlite3 as the catalog database, statically compiled. +%endif +%if %{tcpwrappers} +This build includes tcp-wrappers support. +%endif + + + +# +# =========================================================== +# Client -- bacula-fd rpm +# =========================================================== +# +%package client +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons +Provides: bacula-fd +Conflicts: bacula-mysql +Conflicts: bacula-sqlite +Conflicts: bacula-postgresql + +%if %{suse} +Provides: %{product} +Provides: %{product}-libs +%endif + +Requires: libstdc++, zlib, openssl, bzip2-libs +Requires: glibc, readline, %{name}-libs + +%description client +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +This is the File daemon (Client) only package. It includes the command line +console program. +%if %{tcpwrappers} +This build includes tcp-wrappers support. +%endif + +# +# =========================================================== +# Generate updatedb rpm +# =========================================================== +# +%if ! %{client_only} +%package updatedb + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description updatedb +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +This package installs scripts for updating older versions of the bacula +database. +%endif + +# +# =========================================================== +# Generate libs rpm +# =========================================================== +# +%package libs + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description libs +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +This package installs the shared libraries used by many bacula programs. + +# Must explicitly enable debug pkg on SuSE +# but not in opensuse_bs +#%if %{suse} && ! 0%{?opensuse_bs} +#%debug_package +#%endif + +%prep +%setup -T -b 0 -n depkgs +%setup -T -b 1 -n %{product}-%{version} +%setup -T -D -b 2 -n %{product}-%{version} +# extract depkgs + + +%build + +%if %{suse} +#export LDFLAGS="${LDFLAGS} -L/usr/lib/termcap -L/usr/lib64/termcap" +%endif + +# You can use a cache for depkgs +# tar xfz depkgs*gz -C ~/ +# cd ~/depkgs +# touch %{depkgs_version} # depkgs version + +if [ -f $HOME/depkgs/%{depkgs_version} ]; then + rm -rf %{depkgs} + ln -s $HOME/depkgs %{depkgs} +fi + +cwd=${PWD} +%if %{sqlite} +cd %{depkgs} +make sqlite3 +cd ${cwd} +%endif + +cd %{depkgs} +make lzo +export LDFLAGS="${LDFLAGS} -L${PWD}/lzo/lib" +export CPPFLAGS="${CPPFLAGS} -I${PWD}/lzo/include" +cd ${cwd} + +# hostname is the build hostname, so use XXX_HOSTNAME_XXX for Address parameter +sed -i s/hostname/basename/ src/console/bconsole.conf.in src/dird/bacula-dir.conf.in src/qt-console/bat.conf.in + +%if %{sqlite} +# patches for the bundled sqlite scripts +sed -i s:.SQLITE_BINDIR.:/opt/bacula/sqlite: src/cats/*_sqlite3_*.in + +# patch the bacula-dir init script to remove sqlite service +%if %{suse} +sed -i 's/network .DB_TYPE./network/' platforms/suse/bacula-dir.in +%endif +%endif + +# 64 bit lib location hacks +# as of 1.39.18 it should not be necessary to enable x86_64 as configure is +# reported to be fixed to properly detect lib locations. +%if %{x86_64} +export LDFLAGS="${LDFLAGS} -L/usr/lib64" +%endif +%if %{mysql} && %{x86_64} +export LDFLAGS="${LDFLAGS} -L/usr/lib64/mysql" +%endif + +export BACULA="Bacula" + +# Main Bacula configuration +%configure \ + --prefix=%{_prefix} \ + --sbindir=%{_sbindir} \ + --sysconfdir=%{sysconf_dir} \ + --mandir=%{_mandir} \ + --with-scriptdir=%{script_dir} \ + --with-working-dir=%{working_dir} \ + --with-plugindir=%{plugin_dir} \ + --with-logdir=%{log_dir} \ + --with-pid-dir=%{pid_dir} \ + --with-subsys-dir=%{_subsysdir} \ + --enable-smartalloc \ + --disable-conio \ + --enable-readline \ +%if %{mysql} + --with-mysql \ +%endif +%if %{postgresql} + --with-postgresql \ +%endif + --disable-bat \ +%if %{client_only} + --enable-client-only \ +%endif +%if %{rh7} || %{rh8} || %{rh9} + --disable-batch-insert \ +%endif +%if %{usesystemd} + --with-systemd=%{systemd_dir} \ +%endif + --with-tcp-wrappers \ + --with-dir-user=%{director_daemon_user} \ + --with-dir-group=%{daemon_group} \ + --with-sd-user=%{storage_daemon_user} \ + --with-sd-group=%{storage_daemon_group} \ + --with-fd-user=%{file_daemon_user} \ + --with-fd-group=%{daemon_group} \ + --with-basename="XXX_HOSTNAME_XXX" \ + --with-hostname="XXX_HOSTNAME_XXX" \ + --with-dir-password="XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX" \ + --with-fd-password="XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX" \ + --with-sd-password="XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX" \ + --with-mon-dir-password="XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX" \ + --with-mon-fd-password="XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX" \ + --with-mon-sd-password="XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX" \ + --with-openssl + +make -j3 + +%install + +cwd=${PWD} +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +mkdir -p $RPM_BUILD_ROOT/etc/init.d +mkdir -p $RPM_BUILD_ROOT/etc/logrotate.d +mkdir -p $RPM_BUILD_ROOT%{logwatch_dir}/conf/logfiles +mkdir -p $RPM_BUILD_ROOT%{logwatch_dir}/conf/services +mkdir -p $RPM_BUILD_ROOT%{logwatch_dir}/scripts/services +mkdir -p $RPM_BUILD_ROOT%{logwatch_dir}/scripts/shared +mkdir -p $RPM_BUILD_ROOT%{script_dir}/updatedb + +mkdir -p $RPM_BUILD_ROOT/etc/pam.d +mkdir -p $RPM_BUILD_ROOT%{_sbindir} + +%if %{sqlite} +mkdir -p $RPM_BUILD_ROOT%{sqlite_bindir} +%endif + +make DESTDIR=$RPM_BUILD_ROOT install + +# bsnapshot +make DESTDIR=$RPM_BUILD_ROOT -C ../bacula-%{_version}/src/tools/ install-bsnapshot + +%if %{usesystemd} +make DESTDIR=$RPM_BUILD_ROOT -C platforms/systemd install-dir install-service +%endif + +# Remove docs for programs that are depreciated +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/bacula-bgnome-console.1.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/bacula-bwxconsole.1.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/bacula-tray-monitor.1.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{script_dir}/gconsole + +# Remove docs for programs we do not distribute +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bpluginfo.8.%{manpage_ext} + +# Remove storage-ctl packaged in shstore rpm +rm -f $RPM_BUILD_ROOT%{script_dir}/storage-ctl +rm -f $RPM_BUILD_ROOT%{script_dir}/storage-ctl.conf + +# fixme - make installs the mysql scripts for sqlite build +%if %{sqlite} +rm -f $RPM_BUILD_ROOT%{script_dir}/startmysql +rm -f $RPM_BUILD_ROOT%{script_dir}/stopmysql +rm -f $RPM_BUILD_ROOT%{script_dir}/grant_mysql_privileges +%endif + +# fixme - make installs the mysql scripts for postgresql build +%if %{postgresql} +rm -f $RPM_BUILD_ROOT%{script_dir}/startmysql +rm -f $RPM_BUILD_ROOT%{script_dir}/stopmysql +%endif + +# Remove symlinks +#rm -f $RPM_BUILD_ROOT%{_libdir}/libbaccats.so +#rm -f $RPM_BUILD_ROOT%{_libdir}/libbaccats-%{version}.so +rm -f $RPM_BUILD_ROOT%{_libdir}/libbacsd.la + +# install the init scripts +%if !%{usesystemd} +%if %{suse} +cp -p platforms/suse/bacula-dir $RPM_BUILD_ROOT/etc/init.d/bacula-dir +cp -p platforms/suse/bacula-fd $RPM_BUILD_ROOT/etc/init.d/bacula-fd +cp -p platforms/suse/bacula-sd $RPM_BUILD_ROOT/etc/init.d/bacula-sd +%endif # suse +%if %{mdk} +cp -p platforms/mandrake/bacula-dir $RPM_BUILD_ROOT/etc/init.d/bacula-dir +cp -p platforms/mandrake/bacula-fd $RPM_BUILD_ROOT/etc/init.d/bacula-fd +cp -p platforms/mandrake/bacula-sd $RPM_BUILD_ROOT/etc/init.d/bacula-sd +%endif # mdk +%if ! %{suse} && ! %{mdk} +cp -p platforms/redhat/bacula-dir $RPM_BUILD_ROOT/etc/init.d/bacula-dir +cp -p platforms/redhat/bacula-fd $RPM_BUILD_ROOT/etc/init.d/bacula-fd +cp -p platforms/redhat/bacula-sd $RPM_BUILD_ROOT/etc/init.d/bacula-sd +%endif # !suse && !mdk +chmod 0754 $RPM_BUILD_ROOT/etc/init.d/* +%endif # ! usesystemd + +%if %{client_only} +rm -f $RPM_BUILD_ROOT/etc/init.d/bacula-dir +rm -f $RPM_BUILD_ROOT/etc/init.d/bacula-sd +rm -f $RPM_BUILD_ROOT%{script_dir}/breload +rm -f $RPM_BUILD_ROOT%{script_dir}/manual_prune.pl +rm -f $RPM_BUILD_ROOT%{systemd_dir}/bacula-dir.service +rm -f $RPM_BUILD_ROOT%{systemd_dir}/bacula-sd.service +%endif + +# install sqlite +%if %{sqlite} +cp -p %{depkgs}/sqlite3/sqlite3 $RPM_BUILD_ROOT%{sqlite_bindir}/sqlite3 +cp -p %{depkgs}/sqlite3/sqlite3.h $RPM_BUILD_ROOT%{sqlite_bindir}/sqlite3.h +cp -p %{depkgs}/sqlite3/libsqlite3.a $RPM_BUILD_ROOT%{sqlite_bindir}/libsqlite3.a +%endif + + +# install the logrotate file +cp -p scripts/logrotate $RPM_BUILD_ROOT/etc/logrotate.d/bacula + +# install the updatedb scripts +cp -p updatedb/* $RPM_BUILD_ROOT%{script_dir}/updatedb/ + +# install specific scripts + +%if ! %{client_only} +# install the sample-query.sql file +cp -p examples/sample-query.sql $RPM_BUILD_ROOT%{script_dir}/sample-query.sql + +# install the logwatch scripts +cp -p scripts/logwatch/bacula $RPM_BUILD_ROOT%{logwatch_dir}/scripts/services/bacula +cp -p scripts/logwatch/applybaculadate $RPM_BUILD_ROOT%{logwatch_dir}/scripts/shared/applybaculadate +cp -p scripts/logwatch/logfile.bacula.conf $RPM_BUILD_ROOT%{logwatch_dir}/conf/logfiles/bacula.conf +cp -p scripts/logwatch/services.bacula.conf $RPM_BUILD_ROOT%{logwatch_dir}/conf/services/bacula.conf +chmod 755 $RPM_BUILD_ROOT%{logwatch_dir}/scripts/services/bacula +chmod 755 $RPM_BUILD_ROOT%{logwatch_dir}/scripts/shared/applybaculadate +chmod 644 $RPM_BUILD_ROOT%{logwatch_dir}/conf/logfiles/bacula.conf +chmod 644 $RPM_BUILD_ROOT%{logwatch_dir}/conf/services/bacula.conf +%endif + +# now clean up permissions that are left broken by the install +chmod o-rwx $RPM_BUILD_ROOT%{working_dir} + +# fix me - building enable-client-only installs files not included in bacula-client package +%if %{client_only} +# Program docs not installed on client +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bacula-dir.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bacula-sd.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bcopy.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bextract.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bls.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bscan.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/btape.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/dbcheck.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bregex.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bwild.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/bsmtp.1.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula_config +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula-ctl-dir +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula-ctl-sd +rm -f $RPM_BUILD_ROOT%{script_dir}/disk-changer +rm -f $RPM_BUILD_ROOT%{script_dir}/dvd-handler +rm -f $RPM_BUILD_ROOT%{script_dir}/mtx-changer +rm -f $RPM_BUILD_ROOT%{script_dir}/startmysql +rm -f $RPM_BUILD_ROOT%{script_dir}/stopmysql +rm -rf $RPM_BUILD_ROOT%{script_dir}/updatedb +rm -f $RPM_BUILD_ROOT%{script_dir}/bconsole +rm -f $RPM_BUILD_ROOT%{script_dir}/mtx-changer.conf +rm -f $RPM_BUILD_ROOT%{_sbindir}/bacula + +%endif + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +rm -f $RPM_BUILD_DIR/Release_Notes-%{version}-%{release}.txt + + +%if %{mysql} +# MySQL specific files +%files mysql +%defattr(-, root, root) +%attr(-, root, %{daemon_group}) %{script_dir}/create_mysql_database +%attr(-, root, %{daemon_group}) %{script_dir}/drop_mysql_database +%attr(-, root, %{daemon_group}) %{script_dir}/make_mysql_tables +%attr(-, root, %{daemon_group}) %{script_dir}/drop_mysql_tables +%attr(-, root, %{daemon_group}) %{script_dir}/update_mysql_tables +%attr(-, root, %{daemon_group}) %{script_dir}/grant_mysql_privileges +%{_libdir}/libbaccats* +%{_libdir}/libbacsql* +%{_libdir}/libbacsd*.so +%endif + + +%if %{postgresql} +%files postgresql +%defattr(-,root,root) +%attr(755, root, %{daemon_group}) %{script_dir}/create_postgresql_database +%attr(755, root, %{daemon_group}) %{script_dir}/drop_postgresql_database +%attr(755, root, %{daemon_group}) %{script_dir}/make_postgresql_tables +%attr(755, root, %{daemon_group}) %{script_dir}/drop_postgresql_tables +%attr(755, root, %{daemon_group}) %{script_dir}/update_postgresql_tables +%attr(755, root, %{daemon_group}) %{script_dir}/grant_postgresql_privileges +%{_libdir}/libbaccats* +%{_libdir}/libbacsql* +%{_libdir}/libbacsd*.so +%endif + +# The rest is DB backend independent + +%if ! %{client_only} +%attr(-, root, %{daemon_group}) %dir %{script_dir} +%attr(-, root, %{daemon_group}) %dir %{sysconf_dir} +%attr(-, root, %{daemon_group}) %{script_dir}/bacula +%attr(-, root, %{daemon_group}) %{script_dir}/bacula_config +%attr(-, root, %{daemon_group}) %{script_dir}/bconsole +%attr(755, root, %{daemon_group}) %{script_dir}/create_bacula_database +%attr(755, root, %{daemon_group}) %{script_dir}/drop_bacula_database +%attr(755, root, %{daemon_group}) %{script_dir}/grant_bacula_privileges +%attr(755, root, %{daemon_group}) %{script_dir}/make_bacula_tables +%attr(755, root, %{daemon_group}) %{script_dir}/drop_bacula_tables +%attr(755, root, %{daemon_group}) %{script_dir}/update_bacula_tables +%attr(-, root, %{daemon_group}) %{script_dir}/make_catalog_backup +%attr(-, root, %{daemon_group}) %{script_dir}/make_catalog_backup.pl +%attr(-, root, %{daemon_group}) %{script_dir}/delete_catalog_backup +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.dbx +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.gdb +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.mdb +%attr(-, root, %{daemon_group}) %{script_dir}/disk-changer +%attr(-, root, %{daemon_group}) %{script_dir}/bacula-ctl-dir +%attr(-, root, %{daemon_group}) %{script_dir}/bacula-ctl-fd +%attr(-, root, %{daemon_group}) %{script_dir}/bacula-ctl-sd +%attr(-, root, %{daemon_group}) %{script_dir}/tapealert +%attr(-, root, %{daemon_group}) %{script_dir}/baculabackupreport + +%attr(-, root, %{daemon_group}) %{plugin_dir}/bpipe-fd.so +%attr(-, root, %{daemon_group}) %{_sbindir}/dbcheck +%attr(-, root, %{storage_daemon_group}) %{script_dir}/mtx-changer +%attr(-, root, %{storage_daemon_group}) %config(noreplace) %{script_dir}/mtx-changer.conf + +%if %{usesystemd} +%attr(-, root, %{daemon_group}) %{systemd_dir}/bacula-dir.service +%attr(-, root, %{daemon_group}) %{systemd_dir}/bacula-fd.service +%attr(-, root, %{daemon_group}) %{systemd_dir}/bacula-sd.service +%else +%attr(-, root, %{daemon_group}) /etc/init.d/bacula-dir +%attr(-, root, %{daemon_group}) /etc/init.d/bacula-fd +%attr(-, root, %{storage_daemon_group}) /etc/init.d/bacula-sd +%endif + +/etc/logrotate.d/bacula +%{logwatch_dir}/scripts/services/bacula +%{logwatch_dir}/scripts/shared/applybaculadate +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bacula-dir.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bacula-fd.conf +%attr(-, root, %{storage_daemon_group}) %config(noreplace) %{sysconf_dir}/bacula-sd.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bconsole.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{logwatch_dir}/conf/logfiles/bacula.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{logwatch_dir}/conf/services/bacula.conf +%attr(-, root, %{daemon_group}) %{script_dir}/sample-query.sql +%attr(-, root, %{daemon_group}) %{script_dir}/query.sql + +%attr(-, %{storage_daemon_user}, %{daemon_group}) %dir %{working_dir} + +%attr(-, root, %{daemon_group}) %{_sbindir}/bacula-dir +%attr(-, root, %{daemon_group}) %{_sbindir}/bacula-sd +%attr(-, root, %{daemon_group}) %{_sbindir}/btraceback +%attr(-, root, %{daemon_group}) %{_sbindir}/bconsole +%attr(-, root, %{daemon_group}) %{_sbindir}/bbconsjson +%attr(-, root, %{daemon_group}) %{_sbindir}/bsmtp +%attr(-, root, %{daemon_group}) %{_sbindir}/bscan +%attr(-, root, %{daemon_group}) %{_sbindir}/btape +%attr(-, root, %{daemon_group}) %{_sbindir}/bdirjson +%attr(-, root, %{daemon_group}) %{_sbindir}/bsdjson + +%attr(755, root, root) %{_sbindir}/bsnapshot +%attr(755, root, root) %{_sbindir}/bfdjson + +%{_sbindir}/bacula-fd +%{_sbindir}/bacula +%{_sbindir}/bcopy +%{_sbindir}/bextract +%{_sbindir}/bls +%{_sbindir}/bregex +%{_sbindir}/bwild +%{_mandir}/man8/bacula-fd.8.%{manpage_ext} +%{_mandir}/man8/bacula-dir.8.%{manpage_ext} +%{_mandir}/man8/bacula-sd.8.%{manpage_ext} +%{_mandir}/man8/bacula.8.%{manpage_ext} +%{_mandir}/man8/bconsole.8.%{manpage_ext} +%{_mandir}/man8/bcopy.8.%{manpage_ext} +%{_mandir}/man8/bextract.8.%{manpage_ext} +%{_mandir}/man8/bls.8.%{manpage_ext} +%{_mandir}/man8/bscan.8.%{manpage_ext} +%{_mandir}/man8/btape.8.%{manpage_ext} +%{_mandir}/man8/btraceback.8.%{manpage_ext} +%{_mandir}/man8/dbcheck.8.%{manpage_ext} +%{_mandir}/man8/bregex.8.%{manpage_ext} +%{_mandir}/man8/bwild.8.%{manpage_ext} +%{_mandir}/man1/bsmtp.1.%{manpage_ext} +%{_mandir}/man1/bat.1.%{manpage_ext} +%_prefix/share/doc/* + +%attr(-, root, %{daemon_group}) %{script_dir}/bacula-tray-monitor.desktop + +%doc ../Release_Notes-%{version}-%{release}.txt + +%endif + +%if %{mysql} +%pre mysql +# test for bacula database older than version 13 +# note: this ASSUMES no password has been set for bacula database +DB_VER=`mysql 2>/dev/null bacula -e 'select * from Version;'|tail -n 1` +%endif + +%if %{sqlite} +%pre sqlite +# are we upgrading from sqlite to sqlite3? +if [ -s %{working_dir}/bacula.db ] && [ -s %{sqlite_bindir}/sqlite ];then + echo "This version of bacula-sqlite involves an upgrade to sqlite3." + echo "Your catalog database file is not compatible with sqlite3, thus" + echo "you will need to dump the data, delete the old file, and re-run" + echo "this rpm upgrade." + echo "" + echo "Backing up your current database..." + echo ".dump" | %{sqlite_bindir}/sqlite %{working_dir}/bacula.db > %{working_dir}/bacula_backup.sql + mv %{working_dir}/bacula.db %{working_dir}/bacula.db.old + echo "Your catalog data has been saved in %{working_dir}/bacula_backup.sql and your" + echo "catalog file has been renamed %{working_dir}/bacula.db.old." + echo "" + echo "Please re-run this rpm package upgrade." + echo "After the upgrade is complete, restore your catalog" + echo "with the following commands:" + echo "%{script_dir}/drop_sqlite3_tables" + echo "cd %{working_dir}" + echo "%{sqlite_bindir}/sqlite3 $* bacula.db < bacula_backup.sql" + echo "chown bacula.bacula bacula.db" + exit 1 +fi +# test for bacula database older than version 12 and sqlite3 +if [ -s %{working_dir}/bacula.db ] && [ -s %{sqlite_bindir}/sqlite3 ];then + DB_VER=`echo "select * from Version;" | %{sqlite_bindir}/sqlite3 2>/dev/null %{working_dir}/bacula.db | tail -n 1` +%endif + +%if %{postgresql} +%pre postgresql +DB_VER=`echo 'SELECT * FROM Version LIMIT 1;' | su - postgres -c 'psql bacula' 2>/dev/null | tail -3 | head -1` +%endif + +%if ! %{client_only} +if [ -n "$DB_VER" ] && [ "$DB_VER" -lt "12" ]; then + echo "This bacula upgrade will update a bacula database from version 12 to 1014." + echo "You appear to be running database version $DB_VER. You must first update" + echo "your database to version 12 and then install this upgrade. The alternative" + echo "is to use %{script_dir}/drop_%{db_backend}_tables to delete all your your current" + echo "catalog information, then do the upgrade. Information on updating a" + echo "database older than version 12 can be found in the release notes." + exit 1 +fi +%endif + +%if %{sqlite} +fi +%endif + +%if ! %{client_only} +# check for and copy %{sysconf_dir}/console.conf to bconsole.conf +if [ -s %{sysconf_dir}/console.conf ];then + cp -p %{sysconf_dir}/console.conf %{sysconf_dir}/bconsole.conf +fi + +# create the daemon users and groups +# first create the groups if they don't exist +HAVE_BACULA=`grep %{daemon_group} %{group_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{daemon_group} > /dev/null 2>&1 + echo "The group %{daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +HAVE_BACULA=`grep %{storage_daemon_group} %{group_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{storage_daemon_group} > /dev/null 2>&1 + echo "The group %{storage_daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +# now create the users if they do not exist +# we do not use the -g option allowing the primary group to be set to system default +# this will be a unique group on redhat type systems or the group users on some systems +HAVE_BACULA=`grep %{storage_daemon_user} %{user_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{useradd} -r -c "Bacula" -d %{working_dir} -g %{storage_daemon_group} -M -s /sbin/nologin %{storage_daemon_user} > /dev/null 2>&1 + echo "The user %{storage_daemon_user} has been added to %{user_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +HAVE_BACULA=`grep %{director_daemon_user} %{user_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{useradd} -r -c "Bacula" -d %{working_dir} -g %{daemon_group} -M -s /sbin/nologin %{director_daemon_user} > /dev/null 2>&1 + echo "The user %{director_daemon_user} has been added to %{user_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +HAVE_BACULA=`grep %{file_daemon_user} %{user_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{useradd} -r -c "Bacula" -d %{working_dir} -g %{daemon_group} -M -s /sbin/nologin %{file_daemon_user} > /dev/null 2>&1 + echo "The user %{file_daemon_user} has been added to %{user_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +# now we add the supplementary groups, this is ok to call even if the users already exist +# we only do this if the user is NOT root +IS_ROOT=%{director_daemon_user} +if [ "$IS_ROOT" != "root" ]; then +%{usermod} -G %{daemon_group} %{director_daemon_user} +fi +IS_ROOT=%{storage_daemon_user} +if [ "$IS_ROOT" != "root" ]; then +%{usermod} -G %{daemon_group},%{storage_daemon_group} %{storage_daemon_user} +fi +IS_ROOT=%{file_daemon_user} +if [ "$IS_ROOT" != "root" ]; then +%{usermod} -G %{daemon_group} %{file_daemon_user} +fi +%endif + +%if %{mysql} +%post mysql +%endif +%if %{sqlite} +%post sqlite +%endif +%if %{postgresql} +%post postgresql +%endif +%if ! %{client_only} +# add our links +if [ "$1" -ge 1 ] ; then +%if %{suse} && %{mysql} + %{service_enable} mysql +%endif +%if %{suse} && %{postgresql} + %{service_enable} postgresql +%endif + %{service_enable} bacula-dir + %{service_enable} bacula-fd + %{service_enable} bacula-sd +fi +%endif + +if [ -d %{sysconf_dir} ]; then + cd %{sysconf_dir} + if [ ! -f .rpm.sed ]; then + (umask 0177 + echo "# This file is used to ensure that all passwords will" > .rpm.sed + echo "# match between configuration files" >> .rpm.sed + ) + for string in XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX; do + pass=`openssl rand -base64 33` + echo "s@${string}@${pass}@g" >> .rpm.sed + done + fi + host=`hostname -s` + if [ "$host" = "" ]; then + host=localhost + fi + for file in *.conf; do + sed -f .rpm.sed $file > $file.new + sed "s@XXX_HOSTNAME_XXX@${host}@g" $file.new > $file + rm -f $file.new + done +fi + + +%if %{mysql} + +#check, if mysql can be called successfully at all +if mysql 2>/dev/null bacula -e 'select * from Version;' ; then + + # test for an existing database + # note: this ASSUMES no password has been set for bacula database + DB_VER=`mysql 2>/dev/null bacula -e 'select * from Version;'|tail -n 1` + + # grant privileges and create tables if they do not exist + if [ -z "$DB_VER" ]; then + echo "Hmm, it doesn't look like you have an existing database." + echo "Granting privileges for MySQL user bacula..." + %{script_dir}/grant_mysql_privileges + echo "Creating MySQL bacula database..." + %{script_dir}/create_mysql_database + echo "Creating bacula tables..." + %{script_dir}/make_mysql_tables + + elif [ "$DB_VER" -ge "12" -a "$DB_VER" -lt "1015" ]; then + echo "This release requires an upgrade to your bacula database." + echo "Backing up your current database..." + mysqldump -f --opt bacula | bzip2 > %{working_dir}/bacula_backup.sql.bz2 + echo "Upgrading bacula database ..." + %{script_dir}/update_mysql_tables + echo "If bacula works correctly you can remove the backup file %{working_dir}/bacula_backup.sql.bz2" + + fi +fi +%endif + +%if %{sqlite} +# test for an existing database +if [ -s %{working_dir}/bacula.db ]; then + DB_VER=`echo "select * from Version;" | %{sqlite_bindir}/sqlite3 2>/dev/null %{working_dir}/bacula.db | tail -n 1` + # check to see if we need to upgrade a 3.x database + if [ "$DB_VER" -le "13" ] && [ "$DB_VER" -ge "12" ]; then + echo "This release requires an upgrade to your bacula database." + echo "Backing up your current database..." + echo ".dump" | %{sqlite_bindir}/sqlite3 %{working_dir}/bacula.db | bzip2 > %{working_dir}/bacula_backup.sql.bz2 + echo "Upgrading bacula database ..." + %{script_dir}/update_sqlite3_tables + echo "If bacula works correctly you can remove the backup file %{working_dir}/bacula_backup.sql.bz2" + fi +else + # create the database and tables + echo "Hmm, doesn't look like you have an existing database." + echo "Creating SQLite database..." + %{script_dir}/create_sqlite3_database + echo "Creating the SQLite tables..." + %{script_dir}/make_sqlite3_tables + chown %{director_daemon_user}.%{daemon_group} %{working_dir}/bacula.db +fi +%endif + +%if %{postgresql} +# check if psql can be called successfully at all +if echo 'select * from Version;' | su - postgres -c 'psql bacula' 2>/dev/null; then + + # test for an existing database + # note: this ASSUMES no password has been set for bacula database + DB_VER=`echo 'SELECT * FROM Version LIMIT 1;' | su - postgres -c 'psql bacula' 2>/dev/null | tail -3 | head -1` + + # grant privileges and create tables if they do not exist + if [ -z "$DB_VER" ]; then + echo "Hmm, doesn't look like you have an existing database." + echo "Creating PostgreSQL bacula database..." + su - postgres -c %{script_dir}/create_postgresql_database + echo "Creating bacula tables..." + su - postgres -c %{script_dir}/make_postgresql_tables + echo "Granting privileges for PostgreSQL user bacula..." + su - postgres -c %{script_dir}/grant_postgresql_privileges + + elif [ "$DB_VER" -ge "12" -a "$DB_VER" -lt "1015" ]; then + echo "This release requires an upgrade to your bacula database." + echo "Backing up your current database..." + su - postgres -c 'pg_dump bacula' | bzip2 > %{working_dir}/bacula_backup.sql.bz2 + echo "Upgrading bacula database ..." + su - postgres -c %{script_dir}/update_postgresql_tables + echo "If bacula works correctly you can remove the backup file %{working_dir}/bacula_backup.sql.bz2" + echo "Granting privileges for PostgreSQL user bacula..." + su - postgres -c %{script_dir}/grant_postgresql_privileges + + fi +fi +%endif + +%if ! %{client_only} +if [ -d %{sysconf_dir} ]; then + cd %{sysconf_dir} + if [ ! -f .rpm.sed ]; then + (umask 0177 + echo "# This file is used to ensure that all passwords will" > .rpm.sed + echo "# match between configuration files" >> .rpm.sed + ) + for string in XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX; do + pass=`openssl rand -base64 33` + echo "s@${string}@${pass}@g" >> .rpm.sed + done + fi + host=`hostname -s` + if [ "$host" = "" ]; then + host=localhost + fi + for file in *.conf; do + sed -f .rpm.sed $file > $file.new + sed "s@XXX_HOSTNAME_XXX@${host}@g" $file.new > $file + rm -f $file.new + done +fi +%endif + + +%if %{mysql} +%preun mysql +%endif +%if %{sqlite} +%preun sqlite +%endif +%if %{postgresql} +%preun postgresql +%endif + +%if ! %{client_only} +# delete our links +if [ $1 = 0 ]; then + %{service_disable} bacula-dir + %{service_disable} bacula-fd + %{service_disable} bacula-sd +fi +%endif + +%files client +%defattr(-,root,root) +%attr(-, root, %{daemon_group}) %dir %{script_dir} +%attr(-, root, %{daemon_group}) %dir %{plugin_dir} +%attr(-, root, %{daemon_group}) %dir %{sysconf_dir} + +# SD/DIR might write here +%attr(-, %{storage_daemon_user}, %{daemon_group}) %dir %{log_dir} + +%if %{usesystemd} +%attr(-, root, %{daemon_group}) %{systemd_dir}/bacula-fd.service +%else +%{script_dir}/bacula-ctl-fd +/etc/init.d/bacula-fd +%endif + +/etc/logrotate.d/bacula + +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bacula-fd.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bconsole.conf +%attr(-, root, %{daemon_group}) %dir %{working_dir} + +%{_sbindir}/bacula-fd +%{_sbindir}/btraceback +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.gdb +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.dbx +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.mdb +%attr(-, root, %{daemon_group}) %{plugin_dir}/bpipe-fd.so +%{_sbindir}/bconsole +%{_mandir}/man8/bacula-fd.8.%{manpage_ext} +%{_mandir}/man8/bacula.8.%{manpage_ext} +%{_mandir}/man8/bconsole.8.%{manpage_ext} +%{_mandir}/man8/btraceback.8.%{manpage_ext} +%{_mandir}/man1/bat.1.%{manpage_ext} +%_prefix/share/doc/* + +%pre client +# create the daemon group and user +HAVE_BACULA=`grep %{daemon_group} %{group_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{daemon_group} > /dev/null 2>&1 + echo "The group %{daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +# we do not use the -g option allowing the primary group to be set to system default +# this will be a unique group on redhat type systems or the group users on some systems +HAVE_BACULA=`grep %{file_daemon_user} %{user_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{useradd} -r -c "Bacula" -d %{working_dir} -g %{daemon_group} -M -s /sbin/nologin %{file_daemon_user} > /dev/null 2>&1 + echo "The user %{file_daemon_user} has been added to %{user_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +# now we add the supplementary group, this is ok to call even if the user already exists +# we only do this if the user is NOT root +IS_ROOT=%{file_daemon_user} +if [ "$IS_ROOT" != "root" ]; then +%{usermod} -G %{daemon_group} %{file_daemon_user} +fi + +%post client +# add our link +if [ "$1" -ge 1 ] ; then + %{service_enable} bacula-fd +fi + +if [ -d %{sysconf_dir} ]; then + cd %{sysconf_dir} + if [ ! -f .rpm.sed ]; then + (umask 0177 + echo "# This file is used to ensure that all passwords will" > .rpm.sed + echo "# match between configuration files" >> .rpm.sed + ) + for string in XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX; do + pass=`openssl rand -base64 33` + echo "s@${string}@${pass}@g" >> .rpm.sed + done + fi + host=`hostname -s` + if [ "$host" = "" ]; then + host=localhost + fi + for file in *.conf; do + sed -f .rpm.sed $file > $file.new + sed "s@XXX_HOSTNAME_XXX@${host}@g" $file.new > $file + rm -f $file.new + done +fi + +%preun client +# delete our link +if [ $1 = 0 ]; then + %{service_disable} bacula-fd +fi + +%files libs +%defattr(-,root,root) +%{_libdir}/libbac-* +%{_libdir}/libbac.* +%{_libdir}/libbaccfg* +%{_libdir}/libbacfind* + +%post libs +/sbin/ldconfig +exit 0 + +%postun libs +/sbin/ldconfig +exit 0 + +%if ! %{client_only} +%files updatedb +%defattr(-,root,%{daemon_group}) +%{script_dir}/updatedb/* + +%pre updatedb +# create the daemon group +HAVE_BACULA=`grep %{daemon_group} %{group_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{daemon_group} > /dev/null 2>&1 + echo "The group %{daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi + +%post updatedb +echo "The database update scripts were installed to %{script_dir}/updatedb" +%endif + +%changelog diff --git a/platforms/rpms/suse/bacula-aligned.spec.in b/platforms/rpms/suse/bacula-aligned.spec.in new file mode 100644 index 00000000..b09bebc8 --- /dev/null +++ b/platforms/rpms/suse/bacula-aligned.spec.in @@ -0,0 +1,142 @@ +# Bacula RPM spec file +# +# Copyright (C) 2013-2014 Bacula Systems SA + +# Platform Build Configuration +# TODO: merge all plugins into one spec file + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define product bacula +%define _lsm @LSMDATE@ +%define _packager Kern Sibbald +%define manpage_ext gz + +# Don't strip binaries +%define __os_install_post %{nil} +%define __debug_install_post %{nil} +%define debug_package %{nil} + +# Installation Directory locations +%define _prefix /opt/bacula +%define _sbindir /opt/bacula/bin +%define _bindir /opt/bacula/bin +%define _subsysdir /opt/bacula/working +%define sqlite_bindir /opt/bacula/sqlite +%define _mandir /usr/share/man +%define sysconf_dir /opt/bacula/etc +%define scripts_dir /opt/bacula/scripts +%define working_dir /opt/bacula/working +%define pid_dir /opt/bacula/working +%define plugin_dir /opt/bacula/plugins +%define lib_dir /opt/bacula/lib + +# Daemon user:group Don't change them unless you know what you are doing +%define director_daemon_user bacula +%define storage_daemon_user bacula +%define file_daemon_user root +%define daemon_group bacula +# group that has write access to tape devices, usually disk on Linux +%define storage_daemon_group disk + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%define base_package_name bacula + +%{?contrib_packager:%define _packager %{contrib_packager}} + +Summary: Bacula - The Network Backup Solution +Name: %{base_package_name}-aligned +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +Prefix: %{_prefix} +Distribution: Bacula Aligned Volumes + +Source0: http://www.prdownloads.sourceforge.net/bacula/%{product}-%{version}.tar.gz + +Requires: bacula-sd, %{base_package_name}-libs = %{_version} + +# define the basic package description +%define blurb Bacula Aligned - The Network Backup Solution. +%define blurb2 Bacula Aligned plugin allows you to backup to a +%define blurb3 Volume with aligned blocks permitting easier deduplication. + +Summary: Bacula Aligned Volume Storage - The Network Backup Solution +Group: System Environment/Daemons + +%description +%{blurb} + +%{blurb2} +%{blurb3} + +This is Bacula Aligned Volumes Storage plugin. + +%prep +%setup -T -D -n bacula-%{_version} -b 0 + +%build + +cd ../bacula-%{_version} +./configure \ + --prefix=%{_prefix} \ + --sbindir=%{_sbindir} \ + --sysconfdir=%{sysconf_dir} \ + --mandir=%{_mandir} \ + --with-scriptdir=%{scripts_dir} \ + --with-working-dir=%{working_dir} \ + --with-plugindir=%{plugin_dir} \ + --with-pid-dir=%{pid_dir} \ + --with-subsys-dir=%{_subsysdir} \ + --enable-smartalloc \ + --disable-bat \ + --enable-client-only \ + --with-dir-user=%{director_daemon_user} \ + --with-dir-group=%{daemon_group} \ + --with-sd-user=%{storage_daemon_user} \ + --with-sd-group=%{storage_daemon_group} \ + --with-fd-user=%{file_daemon_user} \ + --with-fd-group=%{daemon_group} \ + --with-basename="XXX_HOSTNAME_XXX" \ + --with-hostname="XXX_HOSTNAME_XXX" \ + --with-dir-password="XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX" \ + --with-fd-password="XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX" \ + --with-sd-password="XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX" \ + --with-mon-dir-password="XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX" \ + --with-mon-fd-password="XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX" \ + --with-mon-sd-password="XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX" + +make + +%install + +mkdir -p $RPM_BUILD_ROOT%{_sbindir} +mkdir -p $RPM_BUILD_ROOT%{plugin_dir} +mkdir -p $RPM_BUILD_ROOT%{scripts_dir} +mkdir -p $RPM_BUILD_ROOT%{lib_dir} + +make DESTDIR=$RPM_BUILD_ROOT -C ../bacula-%{_version}/src/stored install-aligned + +# Remove unneeded file(s) +rm -f $RPM_BUILD_ROOT%{plugin_dir}/bacula-sd-aligned-driver.so + +%files +%defattr(-,root,%{daemon_group}) +%attr(-, root, %{daemon_group}) %{plugin_dir}/bacula-sd-aligned-driver-%{_version}.so + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" + +%changelog +* Mon Jul 3 2017 Davide Franco - 7.4.7-1 +- First version of Bacula aligned plugin rpm spec file diff --git a/platforms/rpms/suse/bacula-bat.spec.in b/platforms/rpms/suse/bacula-bat.spec.in new file mode 100644 index 00000000..b4eed6db --- /dev/null +++ b/platforms/rpms/suse/bacula-bat.spec.in @@ -0,0 +1,351 @@ +# +# Bacula RPM spec file +# +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# Platform Build Configuration + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define depkgs_qt_version @DEPKGS_QT_VERSION@ +%define product bacula + +# Don't strip binaries +%define __os_install_post %{nil} +%define __debug_install_post %{nil} +%define debug_package %{nil} + +# this is the Qt version in depkgs_qt +%define qt4ver @BQT_VERSION@ + +%define _packager Kern Sibbald + +%define manpage_ext gz + +# Force single file build +%define single_dir 1 +%{?single_dir_install:%define single_dir 1} + +# Installation Directory locations +%if %{single_dir} +%define _prefix /opt/bacula +%define _sbindir /opt/bacula/bin +%define _bindir /opt/bacula/bin +%define _subsysdir /opt/bacula/working +%define sqlite_bindir /opt/bacula/sqlite +%define _mandir /usr/share/man +%define docs_dir /opt/bacula/docs +%define archive_dir /opt/bacula/archive +%define sysconf_dir /opt/bacula/etc +%define script_dir /opt/bacula/scripts +%define working_dir /opt/bacula/working +%define pid_dir /opt/bacula/working +%define plugin_dir /opt/bacula/plugins +%define lib_dir /opt/bacula/lib +%else +%define _prefix /usr +%define _sbindir %_prefix/sbin +%define _bindir %_prefix/bin +%define _subsysdir /var/lock/subsys +%define sqlite_bindir %_libdir/bacula/sqlite +%define _mandir %_prefix/share/man +%define sysconf_dir /etc/bacula +%define script_dir %_libdir/bacula +%define working_dir /var/lib/bacula +%define pid_dir /var/run +%define plugin_dir %_libdir/bacula/plugins +%define lib_dir %_libdir/bacula/lib +%endif + +# Daemon user:group Don't change them unless you know what you are doing +%define director_daemon_user bacula +%define daemon_group bacula + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%{?contrib_packager:%define _packager %{contrib_packager}} + +%{expand: %%define gccver %(rpm -q --queryformat %%{version} gcc)} +%{expand: %%define gccrel %(rpm -q --queryformat %%{release} gcc)} + +%define staticqt 1 +%{?nobuild_staticqt:%define staticqt 0} + +# determine what platform we are building on +%define fedora 0 +%define suse 0 +%define mdk 0 + +%if %{_vendor} == redhat + %define fedora 1 + %define _dist %(cat /etc/redhat-release) +%endif +%if %{_vendor} == suse + %define suse 1 + %define _dist %(grep -i SuSE /etc/SuSE-release) +%endif +%if %{_vendor} == Mandriva + %define mdk 1 + %define _dist %(grep Mand /etc/mandrake-release) +%endif +%if ! %{fedora} && ! %{suse} && ! %{mdk} +%{error: Unknown platform. Please examine the spec file.} +exit 1 +%endif + +%define sqlite 0 +%{?build_sqlite:%define sqlite 1} + +%define base_package_name bacula + +Summary: Bacula - The Network Backup Solution +Name: %{base_package_name}-bat +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +Prefix: %{_prefix} +Distribution: %{_dist} + +Source0: http://www.prdownloads.sourceforge.net/bacula/%{product}-%{version}.tar.gz +Source1: http://www.prdownloads.sourceforge.net/bacula/depkgs-qt-%{depkgs_qt_version}.tar.gz + +BuildRequires: gcc, gcc-c++, make, autoconf +BuildRequires: libstdc++-devel, zlib-devel +BuildRequires: openssl-devel, fontconfig-devel, libpng-devel, libstdc++-devel, zlib-devel + +Requires: openssl +Requires: fontconfig +Requires: libpng +%if 0%{?suse_version} > 1210 +Requires: libstdc++.6 +%else +Requires: libstdc++ +%endif +Requires: zlib +Requires: %{base_package_name}-libs + +%if %{suse} +Requires: freetype2 +BuildRequires: freetype2-devel +%else +Requires: usermode +Requires: freetype +BuildRequires: freetype-devel +%endif + +# Source directory locations +%define depkgs_qt ../depkgs-qt + +# define the basic package description +%define blurb Bacula - The Network Backup Solution. +%define blurb2 Bacula is a set of computer programs that permit you (or the system +%define blurb3 administrator) to manage backup, recovery, and verification of computer +%define blurb4 data across a network of computers of different kinds. In technical terms, +%define blurb5 it is a network client/server based backup program. Bacula is relatively +%define blurb6 easy to use and efficient, while offering many advanced storage management +%define blurb7 features that make it easy to find and recover lost or damaged files. + +%define group_file /etc/group +%define groupadd /usr/sbin/groupadd + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} + +This is the Bacula Administration Tool (bat) graphical user interface package. +It is an add-on to the client or server packages. + +# Don't strip symbols +%define debug_package %{nil} + +# Must explicitly enable debug pkg on SuSE +# but not in opensuse_bs +#%if %{suse} && ! 0%{?opensuse_bs} +#%debug_package +#%endif + +%prep +%setup -T -n %{product}-%{_version} -b 0 +%setup -T -D -n %{product}-%{_version} -b 1 + +%build + + +cwd=${PWD} +%if ! %{staticqt} +export QTDIR=$(pkg-config --variable=prefix QtCore) +export QTINC=$(pkg-config --variable=includedir QtCore) +export QTLIB=$(pkg-config --variable=libdir QtCore) +export PATH=${QTDIR}/bin/:${PATH} +%else +# You can use a cache for depkgs-qt +# tar xfz depkgs-qt*gz -C ~/ +# cd ~/depkgs-qt +# echo yes | make qt4 +# touch %{depkgs_qt_version} # depkgs version +if [ -f $HOME/depkgs-qt/%{depkgs_qt_version} ]; then + rm -rf %{depkgs_qt} + ln -s $HOME/depkgs-qt %{depkgs_qt} + cd %{depkgs_qt} +else + cd %{depkgs_qt} + make qt4 </dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{daemon_group} > /dev/null 2>&1 + echo "The group %{daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi + + +%post +if [ -d %{sysconf_dir} ]; then + cd %{sysconf_dir} + if [ ! -f .rpm.sed ]; then + (umask 0177 + echo "# This file is used to ensure that all passwords will" > .rpm.sed + echo "# match between configuration files" >> .rpm.sed + ) + for string in XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX; do + pass=`openssl rand -base64 33` + echo "s@${string}@${pass}@g" >> .rpm.sed + done + fi + host=`hostname -s` + for file in *.conf; do + sed -f .rpm.sed $file > $file.new + sed "s@XXX_HOSTNAME_XXX@${host}@g" $file.new > $file + rm -f $file.new + done +fi +/sbin/ldconfig + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +rm -rf $RPM_BUILD_DIR/depkgs-qt + +%changelog +* Sat Aug 1 2009 Kern Sibbald +- Split bat into separate bacula-bat.spec diff --git a/platforms/rpms/suse/bacula-docs.spec.in b/platforms/rpms/suse/bacula-docs.spec.in new file mode 100644 index 00000000..605b9cea --- /dev/null +++ b/platforms/rpms/suse/bacula-docs.spec.in @@ -0,0 +1,94 @@ +# +# Bacula RPM spec file +# +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# Platform Build Configuration + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define product bacula +%define _packager Kern Sibbald + +%define _prefix /usr +%define _sbindir %_prefix/sbin +%define _mandir %_prefix/share/man + + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%{?contrib_packager:%define _packager %{contrib_packager}} + +Summary: Bacula - The Network Backup Solution +Name: %{product}-docs +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +BuildArchitectures: noarch +Prefix: %{_prefix} +Distribution: Bacula Documentation + +Source: %{name}-%{_version}.tar.bz2 + +# Source directory locations +%define _docsrc . + +# define the basic package description +%define blurb Bacula - The Leading Open Source Backup Solution. +%define blurb2 Bacula is a set of computer programs that permit you (or the system +%define blurb3 administrator) to manage backup, recovery, and verification of computer +%define blurb4 data across a network of computers of different kinds. In technical terms, +%define blurb5 it is a network client/server based backup program. Bacula is relatively +%define blurb6 easy to use and efficient, while offering many advanced storage management +%define blurb7 features that make it easy to find and recover lost or damaged files. +%define blurb8 Bacula source code has been released under the AGPL version 3 license. + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +This package installs the Bacula pdf and html documentation. + +%prep +%setup + + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +rm -rf $RPM_BUILD_DIR/%{name}-%{_version} + +%files +%doc %{_docsrc}/manuals/en/console/console %{_docsrc}/manuals/en/console/console.pdf +%doc %{_docsrc}/manuals/en/developers/developers %{_docsrc}/manuals/en/developers/developers.pdf +%doc %{_docsrc}/manuals/en/main/main %{_docsrc}/manuals/en/main/main.pdf +%doc %{_docsrc}/manuals/en/misc/misc %{_docsrc}/manuals/en/misc/misc.pdf +%doc %{_docsrc}/manuals/en/problems/problems %{_docsrc}/manuals/en/problems/problems.pdf +%doc %{_docsrc}/manuals/en/utility/utility %{_docsrc}/manuals/en/utility/utility.pdf + +%changelog +* Sat Jan 30 2010 D. Scott Barninger +- change source file to bz2, update for new doc structure +* Sat Aug 1 2009 Kern Sibbald +- Split docs into separate bacula-docs.spec diff --git a/platforms/rpms/suse/bacula-mtx.spec.in b/platforms/rpms/suse/bacula-mtx.spec.in new file mode 100644 index 00000000..b53067cb --- /dev/null +++ b/platforms/rpms/suse/bacula-mtx.spec.in @@ -0,0 +1,126 @@ +# +# Bacula RPM spec file +# +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# Platform Build Configuration + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define product bacula +%define depkgs_version @DEPKGS_VERSION@ +%define _packager Kern Sibbald +%define manpage_ext gz + +# Don't strip binaries +%define __os_install_post %{nil} +%define __debug_install_post %{nil} +%define debug_package %{nil} + +%define single_dir 0 +%{?single_dir_install:%define single_dir 1} + +# Installation Directory locations +%define _prefix /usr +%define _sbindir %_prefix/sbin +%define _bindir %_prefix/bin +%define _subsysdir /var/lock/subsys +%define sqlite_bindir %_libdir/bacula/sqlite +%define _mandir %_prefix/share/man +%define sysconf_dir /etc/bacula +%define script_dir %_libdir/bacula +%define working_dir /var/lib/bacula +%define pid_dir /var/run +%define plugin_dir %_libdir/bacula/plugins +%define lib_dir %_libdir/bacula/lib + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%{?contrib_packager:%define _packager %{contrib_packager}} + +Summary: Bacula - The Network Backup Solution +Name: %{product}-mtx +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +Prefix: %{_prefix} +Distribution: Bacula Bat + +Source: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.tar.gz + +# define the basic package description +%define blurb Bacula - The Network Backup Solution. +%define blurb2 Bacula is a set of computer programs that permit you (or the system +%define blurb3 administrator) to manage backup, recovery, and verification of computer +%define blurb4 data across a network of computers of different kinds. In technical terms, +%define blurb5 it is a network client/server based backup program. Bacula is relatively +%define blurb6 easy to use and efficient, while offering many advanced storage management +%define blurb7 features that make it easy to find and recover lost or damaged files. + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} + +This is Bacula's version of mtx tape utilities for Linux distributions that +do not provide their own mtx package + +%prep +%setup -T -n depkgs -b 0 + +%build + +make mtx + +%install +make \ + prefix=$RPM_BUILD_ROOT%{_prefix} \ + sbindir=$RPM_BUILD_ROOT%{_sbindir} \ + sysconfdir=$RPM_BUILD_ROOT%{sysconf_dir} \ + scriptdir=$RPM_BUILD_ROOT%{script_dir} \ + working_dir=$RPM_BUILD_ROOT%{working_dir} \ + piddir=$RPM_BUILD_ROOT%{pid_dir} \ + mandir=$RPM_BUILD_ROOT%{_mandir} \ + mtx-install + +%files +%defattr(-,root,root) +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/loaderinfo +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/mtx +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/scsitape +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/tapeinfo +%attr(-, root, %{storage_daemon_group}) %{_sbindir}/scsieject +%{_mandir}/man1/loaderinfo.1.%{manpage_ext} +%{_mandir}/man1/mtx.1.%{manpage_ext} +%{_mandir}/man1/scsitape.1.%{manpage_ext} +%{_mandir}/man1/tapeinfo.1.%{manpage_ext} +%{_mandir}/man1/scsieject.1.%{manpage_ext} + + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +rm -rf $RPM_BUILD_DIR/depkgs + +%changelog +* Sat Aug 1 2009 Kern Sibbald +- Split mtx out into bacula-mtx.spec diff --git a/platforms/rpms/suse/bacula.spec.in b/platforms/rpms/suse/bacula.spec.in new file mode 100644 index 00000000..1eaef010 --- /dev/null +++ b/platforms/rpms/suse/bacula.spec.in @@ -0,0 +1,1384 @@ +# Bacula RPM spec file +# +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# Platform Build Configuration + +# basic defines for every build +%define _release @RELEASE@ +%define _version @VERSION@ +%define product bacula +%define _packager Kern Sibbald +%define depkgs_version @DEPKGS_VERSION@ + +# Don't strip binaries +%define __os_install_post %{nil} +%define __debug_install_post %{nil} +%define debug_package %{nil} + +%define postgres_version 8 +%define pgre84 0 +%{?build_rhel5:%define pgre84 1} +%{?build_el5:%define pgre84 1} +%if %{pgre84} +%define postgres_package postgresql84 +%define postgres_server_package postgresql84-server +%define postgres_devel_package postgresql84-devel +%else +%define postgres_package postgresql +%define postgres_server_package postgresql-server +%define postgres_devel_package postgresql-devel +%endif + +%define single_dir 1 +%{?single_dir_install:%define single_dir 1} + +# Installation Directory locations +%if %{single_dir} +%define _prefix /opt/bacula +%define _sbindir /opt/bacula/bin +%define _bindir /opt/bacula/bin +%define _subsysdir /opt/bacula/working +%define sqlite_bindir /opt/bacula/sqlite +%define _mandir /usr/share/man +%define docs_dir /opt/bacula/docs +%define archive_dir /opt/bacula/archive +%define sysconf_dir /opt/bacula/etc +%define script_dir /opt/bacula/scripts +%define working_dir /opt/bacula/working +%define pid_dir /opt/bacula/working +%define plugin_dir /opt/bacula/plugins +%define lib_dir /opt/bacula/lib +%define log_dir /opt/bacula/log +%else +%define _prefix /usr +%define _sbindir %_prefix/sbin +%define _bindir %_prefix/bin +%define _subsysdir /var/lock/subsys +%define sqlite_bindir %_libdir/bacula/sqlite +%define _mandir %_prefix/share/man +%define sysconf_dir /etc/bacula +%define script_dir %_libdir/bacula +%define working_dir /var/lib/bacula +%define pid_dir /var/run +%define plugin_dir %_libdir/bacula/plugins +%define lib_dir %_libdir/bacula/lib +%define log_dir %_libdir/bacula/log +%endif + +# Daemon user:group Don't change them unless you know what you are doing +%define director_daemon_user bacula +%define storage_daemon_user bacula +%define file_daemon_user root +%define daemon_group bacula +# group that has write access to tape devices, usually disk on Linux +%define storage_daemon_group disk + +%define depkgs ../depkgs + +# probems with mandriva build: +# nothing provides libbonobo2_0-devel, nothing provides libbonoboui2_0-devel + +#-------------------------------------------------------------------------- +# it should not be necessary to change anything below here for a release +# except for patch macros in the setup section +#-------------------------------------------------------------------------- + +%define base_package_name %{product} + +%{?contrib_packager:%define _packager %{contrib_packager}} + +Summary: Bacula - The Network Backup Solution +Name: %{base_package_name} +Version: %{_version} +Release: %{_release} +Group: System Environment/Daemons +License: AGPLv3 +BuildRoot: %{_tmppath}/%{name}-root +URL: http://www.bacula.org/ +Vendor: The Bacula Team +Packager: %{_packager} +Prefix: %{_prefix} + +# opensuse build service changes the release itself +# what happens if the release is not 1? DSB +Source0: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.tar.gz +Source1: http://www.prdownloads.sourceforge.net/bacula/%{product}-%{version}.tar.gz +%if 0%{?opensuse_bs} +Source2: Release_Notes-%{version}-1.tar.gz +%else +Source2: Release_Notes-%{version}-%{release}.tar.gz +%endif + +# define the basic package description +%define blurb Bacula - The Leading Open Source Backup Solution. +%define blurb2 Bacula is a set of computer programs that permit you (or the system +%define blurb3 administrator) to manage backup, recovery, and verification of computer +%define blurb4 data across a network of computers of different kinds. In technical terms, +%define blurb5 it is a network client/server based backup program. Bacula is relatively +%define blurb6 easy to use and efficient, while offering many advanced storage management +%define blurb7 features that make it easy to find and recover lost or damaged files. +%define blurb8 Bacula source code has been released under the AGPL version 3 license. + +%define user_file /etc/passwd +%define group_file /etc/group + +# program locations +%define useradd /usr/sbin/useradd +%define groupadd /usr/sbin/groupadd +%define usermod /usr/sbin/usermod + +# platform defines - set one below or define the build_xxx on the command line +# RedHat builds +%define rh7 0 +%{?build_rh7:%define rh7 1} +%define rh8 0 +%{?build_rh8:%define rh8 1} +%define rh9 0 +%{?build_rh9:%define rh9 1} +# Fedora Core build +%define fc16 0 +%{?build_fc16:%define fc16 1} +%define fc17 0 +%{?build_fc17:%define fc17 1} +%define fc18 0 +%{?build_fc18:%define fc18 1} +# Whitebox Enterprise build +%define wb3 0 +%{?build_wb3:%define wb3 1} +# RedHat Enterprise builds +%define rhel3 0 +%{?build_rhel3:%define rhel3 1} +%{?build_rhel3:%define wb3 1} +%define rhel4 0 +%{?build_rhel4:%define rhel4 1} +%{?build_rhel4:%define fc3 1} +%{?build_el4:%define rhel4 1} +%{?build_el4:%define fc3 1} +%define rhel5 0 +%{?build_rhel5:%define rhel5 1} +%{?build_rhel5:%define fc6 1} +%{?build_el5:%define rhel5 1} +%{?build_el5:%define fc6 1} +%define rhel6 0 +%{?build_rhel6:%define rhel6 1} +%{?build_el6:%define rhel6 1} +# CentOS build +%define centos3 0 +%{?build_centos3:%define centos3 1} +%{?build_centos3:%define wb3 1} +%define centos4 0 +%{?build_centos4:%define centos4 1} +%{?build_centos4:%define fc3 1} +%define centos5 0 +%{?build_centos5:%define centos5 1} +%{?build_centos5:%define fc6 1} +%define centos6 0 +%{?build_centos6:%define centos6 1} +# SL build +%define sl3 0 +%{?build_sl3:%define sl3 1} +%{?build_sl3:%define wb3 1} +%define sl4 0 +%{?build_sl4:%define sl4 1} +%{?build_sl4:%define fc3 1} +%define sl5 0 +%{?build_sl5:%define sl5 1} +%{?build_sl5:%define fc6 1} +# SuSE build +%define su9 0 +%{?build_su9:%define su9 1} +%define su10 0 +%{?build_su10:%define su10 1} +%define su102 0 +%{?build_su102:%define su102 1} +%define su103 0 +%{?build_su103:%define su103 1} +%define su110 0 +%{?build_su110:%define su110 1} +%define su111 0 +%{?build_su111:%define su111 1} +%define su112 0 +%{?build_su112:%define su112 1} +%define su121 0 +%{?build_su121:%define su121 1} +%define su423 0 +%{?build_su423:%define su423 1} +# Mandrake builds +%define mdk 0 +%{?build_mdk:%define mdk 1} +%define mdv 0 +%{?build_mdv:%define mdv 1} +%{?build_mdv:%define mdk 1} + +# client only build +%define client_only 0 +%{?build_client_only:%define client_only 1} + +# Setup some short cuts +%define rhat 0 +%if %{rh7} || %{rh8} || %{rh9} +%define rhat 1 +%endif +%define fed 0 +%if %{fc16} || %{fc17} || %{fc18} +%define fed 1 +%endif +%define suse 0 +%if %{su9} || %{su10} || %{su102} || %{su103} || %{su110} || %{su111} || %{su112} || %{su121} || %{su423} +%define suse 1 +%endif +%define rhel 0 +%if %{rhel3} || %{rhel4} || %{rhel5} || %{rhel6} || %{centos3} || %{centos4} || %{centos5} || %{centos6} +%define rhel 1 +%endif +%define scil 0 +%if %{sl3} || %{sl4} || %{sl5} +%define scil 1 +%endif + + +# test for a platform definition +%if !%{rhat} && !%{rhel} && !%{fed} && !%{wb3} && !%{suse} && !%{mdk} && !%{scil} +%{error: You must specify a platform. Please examine the spec file.} +exit 1 +%endif + +# distribution-specific directory for logwatch +%if %{wb3} || %{rh7} || %{rh8} || %{rh9} +%define logwatch_dir /etc/log.d +%else +%define logwatch_dir /etc/logwatch +%endif + +# database defines +# set for database support desired or define the build_xxx on the command line +%define mysql 0 +%{?build_mysql:%define mysql 1} +%define sqlite 0 +%{?build_sqlite:%define sqlite 1} +%define postgresql 0 +%{?build_postgresql:%define postgresql 1} + +# test for a database definition +%if ! %{mysql} && ! %{sqlite} && ! %{postgresql} && ! %{client_only} +%{error: You must specify database support, by passing one of the following to rpmbuild:} +%{error: --define build_postgresql=1} +%{error: --define build_sqlite=1} +%{error: --define build_mysql=1} +exit 1 +%endif + +%if %{mysql} +%define db_backend mysql +%endif +%if %{sqlite} +%define db_backend sqlite3 +%endif +%if %{postgresql} +%define db_backend postgresql +%endif + +# 64 bit support +%define x86_64 0 +%{?build_x86_64:%define x86_64 1} + +# check what distribution we are +%if %{fc16} || %{fc17} || %{fc18} +%define _dist %(grep Fedora /etc/redhat-release) +%endif +%if %{centos5} || %{centos4} || %{centos3} +%define _dist %(grep CentOS /etc/redhat-release) +%endif +%if %{sl5} ||%{sl4} || %{sl3} +%define _dist %(grep 'Scientific Linux' /etc/redhat-release) +%endif +%if %{wb3} && ! %{rhel3} && ! %{centos3} && ! %{sl3} +%define _dist %(grep White /etc/whitebox-release) +%endif +%if %{suse} +%define _dist %(grep -i SuSE /etc/SuSE-release) +%endif +%if %{mdk} +%define _dist %(grep Mand /etc/mandrake-release) +%endif +%if %{rhat} || %{rhel} +%define _dist %(grep Red /etc/redhat-release) +%endif +%{?DISTNAME:%define _dist %{DISTNAME}} + +# only set Disribution if not in opensuse build service, as it sets it itself +%if ! 0%{?opensuse_bs} +Distribution: %{_dist} +%endif + +%if 0%{?opensuse_bs} && %{mysql} && %{suse} +# needed in opensuse_bs, as rpm is installed during build process +BuildRequires: libmysqlclient-devel +BuildRequires: mysql-client +BuildRequires: mysql +%endif +%if 0%{?opensuse_bs} && %{suse} && %{postgresql} +BuildRequires: %{postgres_package} +BuildRequires: %{postgres_server_package} +%endif +BuildRequires: openssl + +%if 0%{?opensuse_bs} && %{suse} +BuildRequires: pwdutils +BuildRequires: sysconfig +%endif + +# should we turn on python support +%define python 0 +%{?build_python:%define python 1} + +# should we enable tcp wrappers support +%define tcpwrappers 1 +%{?build_tcpwrappers:%define tcpwrappers 1} + +# do we need to patch for old postgresql version? +%define old_pgsql 0 +%{?build_old_pgsql:%define old_pgsql 1} + +# Mandriva somehow forces the manpage file extension to bz2 rather than gz +%if %{mdk} +%define manpage_ext bz2 +%else +%define manpage_ext gz +%endif + +# for client only build +%if %{client_only} +%define mysql 0 +%define postgresql 0 +%define sqlite 0 +%endif + +BuildRequires: gcc, gcc-c++, make, autoconf +BuildRequires: glibc, glibc-devel +BuildRequires: ncurses-devel, readline-devel +BuildRequires: libstdc++-devel, zlib-devel +BuildRequires: openssl-devel +BuildRequires: libacl-devel +BuildRequires: pkgconfig +%if ! %{rh7} +BuildRequires: libxml2-devel +%endif +%if %{python} +BuildRequires: python, python-devel +%{expand: %%define pyver %(python -c 'import sys;print(sys.version[0:3])')} +%endif + +%if %{rh7} +BuildRequires: libxml-devel +%endif +%if %{mdk} +BuildRequires: libstdc++-static-devel +BuildRequires: glibc-static-devel +%endif + +%if %{mysql} && ! %{suse} +BuildRequires: mysql-devel +%endif + +%if %{postgresql} && %{wb3} +BuildRequires: rh-postgresql-devel >= %{postgres_version} +%endif + +%if %{postgresql} && ! %{wb3} +BuildRequires: %{postgres_devel_package} >= %{postgres_version} +%endif + +%description +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +%if %{mysql} +%package mysql +%endif +%if %{sqlite} +%package sqlite +%endif +%if %{postgresql} +%package postgresql +%endif + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons +Provides: bacula-dir, bacula-sd, bacula-fd, bacula-server +Conflicts: bacula-client + +Requires: ncurses, libstdc++, zlib, openssl +Requires: glibc, readline, %{name}-libs + +%if %{suse} +Conflicts: bacula +%endif + +%if %{mysql} +Requires: mysql +%endif + +%if %{postgresql} && %{wb3} +Requires: rh-postgresql >= 7 +%endif +%if %{postgresql} && ! %{wb3} +Requires: postgresql >= 7 +%endif + +%if %{mysql} +%description mysql +%endif +%if %{sqlite} +%description sqlite +%endif +%if %{postgresql} +%description postgresql +%endif + +%if %{python} +Requires: python >= %{pyver} +%endif + +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +%if %{mysql} +This build requires MySQL to be installed separately as the catalog database. +%endif +%if %{postgresql} +This build requires PostgreSQL to be installed separately as the catalog database. +%endif +%if %{sqlite} +This build incorporates sqlite3 as the catalog database, statically compiled. +%endif +%if %{python} +This build includes python scripting support. +%endif +%if %{tcpwrappers} +This build includes tcp-wrappers support. +%endif + +%package client +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons +Provides: bacula-fd +Conflicts: bacula-mysql +Conflicts: bacula-sqlite +Conflicts: bacula-postgresql + +%if %{suse} +Provides: %{product} +Provides: %{product}-libs +%endif + +Requires: libstdc++, zlib, openssl +Requires: glibc, readline, %{name}-libs + +%if %{python} +Requires: python >= %{pyver} +%endif + +%description client +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +This is the File daemon (Client) only package. It includes the command line +console program. +%if %{python} +This build includes python scripting support. +%endif +%if %{tcpwrappers} +This build includes tcp-wrappers support. +%endif + +%if ! %{client_only} +%package updatedb + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description updatedb +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +This package installs scripts for updating older versions of the bacula +database. +%endif + +%package libs + +Summary: Bacula - The Network Backup Solution +Group: System Environment/Daemons + +%description libs +%{blurb} + +%{blurb2} +%{blurb3} +%{blurb4} +%{blurb5} +%{blurb6} +%{blurb7} +%{blurb8} + +This package installs the shared libraries used by many bacula programs. + +# Must explicitly enable debug pkg on SuSE +# but not in opensuse_bs +#%if %{suse} && ! 0%{?opensuse_bs} +#%debug_package +#%endif + +%prep +%setup -T -b 0 -n depkgs +%setup -T -b 1 -n %{product}-%{version} +%setup -T -D -b 2 -n %{product}-%{version} +# extract depkgs + + +%build + +%if %{suse} +#export LDFLAGS="${LDFLAGS} -L/usr/lib/termcap -L/usr/lib64/termcap" +%endif + +# You can use a cache for depkgs +# tar xfz depkgs*gz -C ~/ +# cd ~/depkgs +# touch %{depkgs_version} # depkgs version + +if [ -f $HOME/depkgs/%{depkgs_version} ]; then + rm -rf %{depkgs} + ln -s $HOME/depkgs %{depkgs} +fi + +cwd=${PWD} +%if %{sqlite} +cd %{depkgs} +make sqlite3 +cd ${cwd} +%endif + +cd %{depkgs} +make lzo +export LDFLAGS="${LDFLAGS} -L${PWD}/lzo/lib" +export CPPFLAGS="${CPPFLAGS} -I${PWD}/lzo/include" +cd ${cwd} + +# hostname is the build hostname, so use XXX_HOSTNAME_XXX for Address parameter +sed -i s/hostname/basename/ src/console/bconsole.conf.in src/dird/bacula-dir.conf.in src/qt-console/bat.conf.in + +%if %{sqlite} +# patches for the bundled sqlite scripts +sed -i s:.SQLITE_BINDIR.:/opt/bacula/sqlite: src/cats/*_sqlite3_*.in + +# patch the bacula-dir init script to remove sqlite service +%if %{suse} +sed -i 's/network .DB_TYPE./network/' platforms/suse/bacula-dir.in +%endif +%endif + +# 64 bit lib location hacks +# as of 1.39.18 it should not be necessary to enable x86_64 as configure is +# reported to be fixed to properly detect lib locations. +%if %{x86_64} +export LDFLAGS="${LDFLAGS} -L/usr/lib64" +%endif +%if %{mysql} && %{x86_64} +export LDFLAGS="${LDFLAGS} -L/usr/lib64/mysql" +%endif +%if %{python} && %{x86_64} +export LDFLAGS="${LDFLAGS} -L/usr/lib64/python%{pyver}" +%endif + +export BACULA="Bacula" + +# Main Bacula configuration +%configure \ + --prefix=%{_prefix} \ + --sbindir=%{_sbindir} \ + --sysconfdir=%{sysconf_dir} \ + --mandir=%{_mandir} \ + --with-scriptdir=%{script_dir} \ + --with-working-dir=%{working_dir} \ + --with-plugindir=%{plugin_dir} \ + --with-logdir=%{log_dir} \ + --with-pid-dir=%{pid_dir} \ + --with-subsys-dir=%{_subsysdir} \ + --enable-smartalloc \ + --disable-bwx-console \ + --disable-tray-monitor \ + --disable-conio \ + --enable-readline \ +%if %{mysql} + --with-mysql \ +%endif +%if %{sqlite} + --with-sqlite3=${cwd}/%{depkgs}/sqlite3 \ +%endif +%if %{postgresql} + --with-postgresql \ +%endif + --disable-bat \ +%if %{python} + --with-python \ +%endif +%if %{client_only} + --enable-client-only \ +%endif +%if %{rh7} || %{rh8} || %{rh9} + --disable-batch-insert \ +%endif + --with-tcp-wrappers \ + --with-dir-user=%{director_daemon_user} \ + --with-dir-group=%{daemon_group} \ + --with-sd-user=%{storage_daemon_user} \ + --with-sd-group=%{storage_daemon_group} \ + --with-fd-user=%{file_daemon_user} \ + --with-fd-group=%{daemon_group} \ + --with-basename="XXX_HOSTNAME_XXX" \ + --with-hostname="XXX_HOSTNAME_XXX" \ + --with-dir-password="XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX" \ + --with-fd-password="XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX" \ + --with-sd-password="XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX" \ + --with-mon-dir-password="XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX" \ + --with-mon-fd-password="XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX" \ + --with-mon-sd-password="XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX" \ + --with-openssl + +make -j3 + +%install + +cwd=${PWD} +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +mkdir -p $RPM_BUILD_ROOT/etc/init.d +mkdir -p $RPM_BUILD_ROOT/etc/logrotate.d +mkdir -p $RPM_BUILD_ROOT%{logwatch_dir}/conf/logfiles +mkdir -p $RPM_BUILD_ROOT%{logwatch_dir}/conf/services +mkdir -p $RPM_BUILD_ROOT%{logwatch_dir}/scripts/services +mkdir -p $RPM_BUILD_ROOT%{logwatch_dir}/scripts/shared +mkdir -p $RPM_BUILD_ROOT%{script_dir}/updatedb + +mkdir -p $RPM_BUILD_ROOT/etc/pam.d +mkdir -p $RPM_BUILD_ROOT%{_sbindir} + +%if %{sqlite} +mkdir -p $RPM_BUILD_ROOT%{sqlite_bindir} +%endif + +make DESTDIR=$RPM_BUILD_ROOT install + +# Remove docs for programs that are depreciated +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/bacula-bgnome-console.1.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/bacula-bwxconsole.1.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/bacula-tray-monitor.1.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{script_dir}/gconsole + +# Storage-ctl packaged in shstore rpm +rm -f $RPM_BUILD_ROOT%{script_dir}/storage-ctl +rm -f $RPM_BUILD_ROOT%{script_dir}/storage-ctl.conf + +# Remove libtool unneeded file +rm -f $RPM_BUILD_ROOT%{_libdir}/libbacsd.la + +# fixme - make installs the mysql scripts for sqlite build +%if %{sqlite} +rm -f $RPM_BUILD_ROOT%{script_dir}/startmysql +rm -f $RPM_BUILD_ROOT%{script_dir}/stopmysql +rm -f $RPM_BUILD_ROOT%{script_dir}/grant_mysql_privileges +%endif + +# fixme - make installs the mysql scripts for postgresql build +%if %{postgresql} +rm -f $RPM_BUILD_ROOT%{script_dir}/startmysql +rm -f $RPM_BUILD_ROOT%{script_dir}/stopmysql +%endif + +# install the init scripts +%if %{suse} +cp -p platforms/suse/bacula-dir $RPM_BUILD_ROOT/etc/init.d/bacula-dir +cp -p platforms/suse/bacula-fd $RPM_BUILD_ROOT/etc/init.d/bacula-fd +cp -p platforms/suse/bacula-sd $RPM_BUILD_ROOT/etc/init.d/bacula-sd +%endif +%if %{mdk} +cp -p platforms/mandrake/bacula-dir $RPM_BUILD_ROOT/etc/init.d/bacula-dir +cp -p platforms/mandrake/bacula-fd $RPM_BUILD_ROOT/etc/init.d/bacula-fd +cp -p platforms/mandrake/bacula-sd $RPM_BUILD_ROOT/etc/init.d/bacula-sd +%endif +%if ! %{suse} && ! %{mdk} +cp -p platforms/redhat/bacula-dir $RPM_BUILD_ROOT/etc/init.d/bacula-dir +cp -p platforms/redhat/bacula-fd $RPM_BUILD_ROOT/etc/init.d/bacula-fd +cp -p platforms/redhat/bacula-sd $RPM_BUILD_ROOT/etc/init.d/bacula-sd +%endif +chmod 0754 $RPM_BUILD_ROOT/etc/init.d/* +%if %{client_only} +rm -f $RPM_BUILD_ROOT/etc/init.d/bacula-dir +rm -f $RPM_BUILD_ROOT/etc/init.d/bacula-sd +%endif + +# install sqlite +%if %{sqlite} +cp -p %{depkgs}/sqlite3/sqlite3 $RPM_BUILD_ROOT%{sqlite_bindir}/sqlite3 +cp -p %{depkgs}/sqlite3/sqlite3.h $RPM_BUILD_ROOT%{sqlite_bindir}/sqlite3.h +cp -p %{depkgs}/sqlite3/libsqlite3.a $RPM_BUILD_ROOT%{sqlite_bindir}/libsqlite3.a +%endif + + +# install the logrotate file +cp -p scripts/logrotate $RPM_BUILD_ROOT/etc/logrotate.d/bacula + +# install the updatedb scripts +cp -p updatedb/* $RPM_BUILD_ROOT%{script_dir}/updatedb/ + +# install specific scripts + +%if ! %{client_only} +# install the sample-query.sql file +cp -p examples/sample-query.sql $RPM_BUILD_ROOT%{script_dir}/sample-query.sql + +# install the logwatch scripts +cp -p scripts/logwatch/bacula $RPM_BUILD_ROOT%{logwatch_dir}/scripts/services/bacula +cp -p scripts/logwatch/applybaculadate $RPM_BUILD_ROOT%{logwatch_dir}/scripts/shared/applybaculadate +cp -p scripts/logwatch/logfile.bacula.conf $RPM_BUILD_ROOT%{logwatch_dir}/conf/logfiles/bacula.conf +cp -p scripts/logwatch/services.bacula.conf $RPM_BUILD_ROOT%{logwatch_dir}/conf/services/bacula.conf +chmod 755 $RPM_BUILD_ROOT%{logwatch_dir}/scripts/services/bacula +chmod 755 $RPM_BUILD_ROOT%{logwatch_dir}/scripts/shared/applybaculadate +chmod 644 $RPM_BUILD_ROOT%{logwatch_dir}/conf/logfiles/bacula.conf +chmod 644 $RPM_BUILD_ROOT%{logwatch_dir}/conf/services/bacula.conf +%endif + +# now clean up permissions that are left broken by the install +chmod o-rwx $RPM_BUILD_ROOT%{working_dir} + +# fix me - building enable-client-only installs files not included in bacula-client package +%if %{client_only} +# Program docs not installed on client +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bacula-dir.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bacula-sd.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bcopy.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bextract.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bls.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bscan.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/btape.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/dbcheck.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bregex.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/bwild.8.%{manpage_ext} +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/bsmtp.1.%{manpage_ext} + +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula_config +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula-ctl-dir +rm -f $RPM_BUILD_ROOT%{script_dir}/bacula-ctl-sd +rm -f $RPM_BUILD_ROOT%{script_dir}/disk-changer +rm -f $RPM_BUILD_ROOT%{script_dir}/dvd-handler +rm -f $RPM_BUILD_ROOT%{script_dir}/mtx-changer +rm -f $RPM_BUILD_ROOT%{script_dir}/startmysql +rm -f $RPM_BUILD_ROOT%{script_dir}/stopmysql +rm -rf $RPM_BUILD_ROOT%{script_dir}/updatedb +rm -f $RPM_BUILD_ROOT%{script_dir}/bconsole +rm -f $RPM_BUILD_ROOT%{script_dir}/mtx-changer.conf +rm -f $RPM_BUILD_ROOT%{_sbindir}/bacula +%endif + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" +%if 0%{?opensuse_bs} +rm -f $RPM_BUILD_DIR/Release_Notes-%{version}-1.txt +%else +rm -f $RPM_BUILD_DIR/Release_Notes-%{version}-%{release}.txt +%endif + + +%if %{mysql} +# MySQL specific files +%files mysql +%defattr(-, root, root) +%attr(-, root, %{daemon_group}) %{script_dir}/create_mysql_database +%attr(-, root, %{daemon_group}) %{script_dir}/drop_mysql_database +%attr(-, root, %{daemon_group}) %{script_dir}/make_mysql_tables +%attr(-, root, %{daemon_group}) %{script_dir}/drop_mysql_tables +%attr(-, root, %{daemon_group}) %{script_dir}/update_mysql_tables +%attr(-, root, %{daemon_group}) %{script_dir}/grant_mysql_privileges +%{_libdir}/libbaccats* +%{_libdir}/libbacsql* +%{_libdir}/libbacsd*.so +%endif + +%if %{sqlite} +%files sqlite +%defattr(-,root,root) +%attr(-, root, %{daemon_group}) %{script_dir}/create_sqlite3_database +%attr(-, root, %{daemon_group}) %{script_dir}/drop_sqlite3_database +%attr(-, root, %{daemon_group}) %{script_dir}/grant_sqlite3_privileges +%attr(-, root, %{daemon_group}) %{script_dir}/make_sqlite3_tables +%attr(-, root, %{daemon_group}) %{script_dir}/drop_sqlite3_tables +%attr(-, root, %{daemon_group}) %{script_dir}/update_sqlite3_tables +%{sqlite_bindir}/libsqlite3.a +%{sqlite_bindir}/sqlite3.h +%{sqlite_bindir}/sqlite3 +%{_libdir}/libbaccats* +%{_libdir}/libbacsql* +%{_libdir}/libbacsd*.so +%endif + + + +%if %{postgresql} +%files postgresql +%defattr(-,root,root) +%attr(755, root, %{daemon_group}) %{script_dir}/create_postgresql_database +%attr(755, root, %{daemon_group}) %{script_dir}/drop_postgresql_database +%attr(755, root, %{daemon_group}) %{script_dir}/make_postgresql_tables +%attr(755, root, %{daemon_group}) %{script_dir}/drop_postgresql_tables +%attr(755, root, %{daemon_group}) %{script_dir}/update_postgresql_tables +%attr(755, root, %{daemon_group}) %{script_dir}/grant_postgresql_privileges +%{_libdir}/libbaccats* +%{_libdir}/libbacsql* +%{_libdir}/libbacsd*.so +%endif + +# The rest is DB backend independent + +%if ! %{client_only} +%attr(-, root, %{daemon_group}) %dir %{script_dir} +%attr(-, root, %{daemon_group}) %dir %{sysconf_dir} +#%attr(-, %{director_daemon_user}, %{daemon_group}) %dir %{log_dir} +%attr(-, root, %{daemon_group}) %{script_dir}/bacula +%attr(-, root, %{daemon_group}) %{script_dir}/bacula_config +%attr(-, root, %{daemon_group}) %{script_dir}/bconsole +%attr(-, root, %{daemon_group}) %{script_dir}/create_bacula_database +%attr(755, root, %{daemon_group}) %{script_dir}/drop_bacula_database +%attr(755, root, %{daemon_group}) %{script_dir}/grant_bacula_privileges +%attr(755, root, %{daemon_group}) %{script_dir}/make_bacula_tables +%attr(755, root, %{daemon_group}) %{script_dir}/drop_bacula_tables +%attr(755, root, %{daemon_group}) %{script_dir}/update_bacula_tables +%attr(-, root, %{daemon_group}) %{script_dir}/make_catalog_backup +%attr(-, root, %{daemon_group}) %{script_dir}/make_catalog_backup.pl +%attr(-, root, %{daemon_group}) %{script_dir}/delete_catalog_backup +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.dbx +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.gdb +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.mdb +%attr(-, root, %{daemon_group}) %{script_dir}/disk-changer +%attr(-, root, %{daemon_group}) %{script_dir}/bacula-ctl-dir +%attr(-, root, %{daemon_group}) %{script_dir}/bacula-ctl-fd +%attr(-, root, %{daemon_group}) %{script_dir}/bacula-ctl-sd +%attr(-, root, %{daemon_group}) %{script_dir}/tapealert +%attr(-, root, %{daemon_group}) %{script_dir}/baculabackupreport +%attr(-, root, %{daemon_group}) %{script_dir}/query.sql + +%attr(-, root, %{daemon_group}) %{plugin_dir}/bpipe-fd.so + +%attr(-, root, %{daemon_group}) /etc/init.d/bacula-dir +%attr(-, root, %{daemon_group}) /etc/init.d/bacula-fd + +%attr(-, root, %{daemon_group}) %{_sbindir}/dbcheck +%attr(-, root, %{storage_daemon_group}) /etc/init.d/bacula-sd +%attr(-, root, %{storage_daemon_group}) %{script_dir}/mtx-changer +%attr(-, root, %{storage_daemon_group}) %config(noreplace) %{script_dir}/mtx-changer.conf + +/etc/logrotate.d/bacula +%{logwatch_dir}/scripts/services/bacula +%{logwatch_dir}/scripts/shared/applybaculadate +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bacula-dir.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bacula-fd.conf +%attr(-, root, %{storage_daemon_group}) %config(noreplace) %{sysconf_dir}/bacula-sd.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bconsole.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{logwatch_dir}/conf/logfiles/bacula.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{logwatch_dir}/conf/services/bacula.conf +%attr(-, root, %{daemon_group}) %{script_dir}/sample-query.sql + +# Bacula tray-monitor shortcut +%attr(-, root, %{daemon_group}) %{script_dir}/bacula-tray-monitor.desktop + +%attr(-, %{storage_daemon_user}, %{daemon_group}) %dir %{working_dir} + +%attr(-, root, %{daemon_group}) %{_sbindir}/bacula-dir +%attr(-, root, %{daemon_group}) %{_sbindir}/bacula-sd +%attr(-, root, %{daemon_group}) %{_sbindir}/btraceback +%attr(-, root, %{daemon_group}) %{_sbindir}/bconsole +%attr(-, root, %{daemon_group}) %{_sbindir}/bsmtp +%attr(-, root, %{daemon_group}) %{_sbindir}/bscan +%attr(-, root, %{daemon_group}) %{_sbindir}/btape +%attr(-, root, %{daemon_group}) %{_sbindir}/bbconsjson +%attr(-, root, %{daemon_group}) %{_sbindir}/bdirjson +%attr(-, root, %{daemon_group}) %{_sbindir}/bsdjson + +%attr(755, root, %{daemon_group}) %{_sbindir}/bfdjson + +%{_sbindir}/bacula-fd +%{_sbindir}/bacula +%{_sbindir}/bcopy +%{_sbindir}/bextract +%{_sbindir}/bls +%{_sbindir}/bregex +%{_sbindir}/bwild +%{_mandir}/man8/bacula-fd.8.%{manpage_ext} +%{_mandir}/man8/bacula-dir.8.%{manpage_ext} +%{_mandir}/man8/bacula-sd.8.%{manpage_ext} +%{_mandir}/man8/bacula.8.%{manpage_ext} +%{_mandir}/man8/bconsole.8.%{manpage_ext} +%{_mandir}/man8/bcopy.8.%{manpage_ext} +%{_mandir}/man8/bextract.8.%{manpage_ext} +%{_mandir}/man8/bls.8.%{manpage_ext} +%{_mandir}/man8/bscan.8.%{manpage_ext} +%{_mandir}/man8/btape.8.%{manpage_ext} +%{_mandir}/man8/btraceback.8.%{manpage_ext} +%{_mandir}/man8/dbcheck.8.%{manpage_ext} +%{_mandir}/man8/bregex.8.%{manpage_ext} +%{_mandir}/man8/bwild.8.%{manpage_ext} +%{_mandir}/man1/bsmtp.1.%{manpage_ext} +%{_mandir}/man1/bat.1.%{manpage_ext} +%_prefix/share/doc/* + +# opensuse build service changes the release itself +%if 0%{?opensuse_bs} +%doc ../Release_Notes-%{version}-1.txt +%else +%doc ../Release_Notes-%{version}-%{release}.txt +%endif +%endif + +%if %{mysql} +%pre mysql +# test for bacula database older than version 13 +# note: this ASSUMES no password has been set for bacula database +DB_VER=`mysql 2>/dev/null bacula -e 'select * from Version;'|tail -n 1` +%endif + +%if %{sqlite} +%pre sqlite +# are we upgrading from sqlite to sqlite3? +if [ -s %{working_dir}/bacula.db ] && [ -s %{sqlite_bindir}/sqlite ];then + echo "This version of bacula-sqlite involves an upgrade to sqlite3." + echo "Your catalog database file is not compatible with sqlite3, thus" + echo "you will need to dump the data, delete the old file, and re-run" + echo "this rpm upgrade." + echo "" + echo "Backing up your current database..." + echo ".dump" | %{sqlite_bindir}/sqlite %{working_dir}/bacula.db > %{working_dir}/bacula_backup.sql + mv %{working_dir}/bacula.db %{working_dir}/bacula.db.old + echo "Your catalog data has been saved in %{working_dir}/bacula_backup.sql and your" + echo "catalog file has been renamed %{working_dir}/bacula.db.old." + echo "" + echo "Please re-run this rpm package upgrade." + echo "After the upgrade is complete, restore your catalog" + echo "with the following commands:" + echo "%{script_dir}/drop_sqlite3_tables" + echo "cd %{working_dir}" + echo "%{sqlite_bindir}/sqlite3 $* bacula.db < bacula_backup.sql" + echo "chown bacula.bacula bacula.db" + exit 1 +fi +# test for bacula database older than version 12 and sqlite3 +if [ -s %{working_dir}/bacula.db ] && [ -s %{sqlite_bindir}/sqlite3 ];then + DB_VER=`echo "select * from Version;" | %{sqlite_bindir}/sqlite3 2>/dev/null %{working_dir}/bacula.db | tail -n 1` +%endif + +%if %{postgresql} +%pre postgresql +DB_VER=`echo 'SELECT * FROM Version LIMIT 1;' | su - postgres -c 'psql bacula' 2>/dev/null | tail -3 | head -1` +%endif + +%if ! %{client_only} +if [ -n "$DB_VER" ] && [ "$DB_VER" -lt "12" ]; then + echo "This bacula upgrade will update a bacula database from version 12 to 1014." + echo "You appear to be running database version $DB_VER. You must first update" + echo "your database to version 12 and then install this upgrade. The alternative" + echo "is to use %{script_dir}/drop_%{db_backend}_tables to delete all your your current" + echo "catalog information, then do the upgrade. Information on updating a" + echo "database older than version 12 can be found in the release notes." + exit 1 +fi +%endif + +%if %{sqlite} +fi +%endif + +%if ! %{client_only} +# check for and copy %{sysconf_dir}/console.conf to bconsole.conf +if [ -s %{sysconf_dir}/console.conf ];then + cp -p %{sysconf_dir}/console.conf %{sysconf_dir}/bconsole.conf +fi + +# create the daemon users and groups +# first create the groups if they don't exist +HAVE_BACULA=`grep %{daemon_group} %{group_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{daemon_group} > /dev/null 2>&1 + echo "The group %{daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +HAVE_BACULA=`grep %{storage_daemon_group} %{group_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{storage_daemon_group} > /dev/null 2>&1 + echo "The group %{storage_daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +# now create the users if they do not exist +# we do not use the -g option allowing the primary group to be set to system default +# this will be a unique group on redhat type systems or the group users on some systems +HAVE_BACULA=`grep %{storage_daemon_user} %{user_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{useradd} -r -c "Bacula" -d %{working_dir} -g %{storage_daemon_group} -M -s /sbin/nologin %{storage_daemon_user} > /dev/null 2>&1 + echo "The user %{storage_daemon_user} has been added to %{user_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +HAVE_BACULA=`grep %{director_daemon_user} %{user_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{useradd} -r -c "Bacula" -d %{working_dir} -g %{daemon_group} -M -s /sbin/nologin %{director_daemon_user} > /dev/null 2>&1 + echo "The user %{director_daemon_user} has been added to %{user_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +HAVE_BACULA=`grep %{file_daemon_user} %{user_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{useradd} -r -c "Bacula" -d %{working_dir} -g %{daemon_group} -M -s /sbin/nologin %{file_daemon_user} > /dev/null 2>&1 + echo "The user %{file_daemon_user} has been added to %{user_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +# now we add the supplementary groups, this is ok to call even if the users already exist +# we only do this if the user is NOT root +IS_ROOT=%{director_daemon_user} +if [ "$IS_ROOT" != "root" ]; then +%{usermod} -G %{daemon_group} %{director_daemon_user} +fi +IS_ROOT=%{storage_daemon_user} +if [ "$IS_ROOT" != "root" ]; then +%{usermod} -G %{daemon_group},%{storage_daemon_group} %{storage_daemon_user} +fi +IS_ROOT=%{file_daemon_user} +if [ "$IS_ROOT" != "root" ]; then +%{usermod} -G %{daemon_group} %{file_daemon_user} +fi +%endif + +%if %{mysql} +%post mysql +%endif +%if %{sqlite} +%post sqlite +%endif +%if %{postgresql} +%post postgresql +%endif +%if ! %{client_only} +# add our links +if [ "$1" -ge 1 ] ; then +%if %{suse} && %{mysql} + /sbin/chkconfig --add mysql +%endif +%if %{suse} && %{postgresql} + /sbin/chkconfig --add postgresql +%endif + /sbin/chkconfig --add bacula-dir + /sbin/chkconfig --add bacula-fd + /sbin/chkconfig --add bacula-sd +fi +%endif + +if [ -d %{sysconf_dir} ]; then + cd %{sysconf_dir} + if [ ! -f .rpm.sed ]; then + (umask 0177 + echo "# This file is used to ensure that all passwords will" > .rpm.sed + echo "# match between configuration files" >> .rpm.sed + ) + for string in XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX; do + pass=`openssl rand -base64 33` + echo "s@${string}@${pass}@g" >> .rpm.sed + done + fi + host=`hostname -s` + for file in *.conf; do + sed -f .rpm.sed $file > $file.new + sed "s@XXX_HOSTNAME_XXX@${host}@g" $file.new > $file + rm -f $file.new + done +fi + + +%if %{mysql} + +#check, if mysql can be called successfully at all +if mysql 2>/dev/null bacula -e 'select * from Version;' ; then + + # test for an existing database + # note: this ASSUMES no password has been set for bacula database + DB_VER=`mysql 2>/dev/null bacula -e 'select * from Version;'|tail -n 1` + + # grant privileges and create tables if they do not exist + if [ -z "$DB_VER" ]; then + echo "Hmm, it doesn't look like you have an existing database." + echo "Granting privileges for MySQL user bacula..." + %{script_dir}/grant_mysql_privileges + echo "Creating MySQL bacula database..." + %{script_dir}/create_mysql_database + echo "Creating bacula tables..." + %{script_dir}/make_mysql_tables + + elif [ "$DB_VER" -ge "12" -a "$DB_VER" -lt "1015" ]; then + echo "This release requires an upgrade to your bacula database." + echo "Backing up your current database..." + mysqldump -f --opt bacula | bzip2 > %{working_dir}/bacula_backup.sql.bz2 + echo "Upgrading bacula database ..." + %{script_dir}/update_mysql_tables + echo "If bacula works correctly you can remove the backup file %{working_dir}/bacula_backup.sql.bz2" + + fi +fi +%endif + +%if %{sqlite} +# test for an existing database +if [ -s %{working_dir}/bacula.db ]; then + DB_VER=`echo "select * from Version;" | %{sqlite_bindir}/sqlite3 2>/dev/null %{working_dir}/bacula.db | tail -n 1` + # check to see if we need to upgrade a 3.x database + if [ "$DB_VER" -le "13" ] && [ "$DB_VER" -ge "12" ]; then + echo "This release requires an upgrade to your bacula database." + echo "Backing up your current database..." + echo ".dump" | %{sqlite_bindir}/sqlite3 %{working_dir}/bacula.db | bzip2 > %{working_dir}/bacula_backup.sql.bz2 + echo "Upgrading bacula database ..." + %{script_dir}/update_sqlite3_tables + echo "If bacula works correctly you can remove the backup file %{working_dir}/bacula_backup.sql.bz2" + fi +else + # create the database and tables + echo "Hmm, doesn't look like you have an existing database." + echo "Creating SQLite database..." + %{script_dir}/create_sqlite3_database + echo "Creating the SQLite tables..." + %{script_dir}/make_sqlite3_tables + chown %{director_daemon_user}.%{daemon_group} %{working_dir}/bacula.db +fi +%endif + +%if %{postgresql} +# check if psql can be called successfully at all +if echo 'select * from Version;' | su - postgres -c 'psql bacula' 2>/dev/null; then + + # test for an existing database + # note: this ASSUMES no password has been set for bacula database + DB_VER=`echo 'SELECT * FROM Version LIMIT 1;' | su - postgres -c 'psql bacula' 2>/dev/null | tail -3 | head -1` + + # grant privileges and create tables if they do not exist + if [ -z "$DB_VER" ]; then + echo "Hmm, doesn't look like you have an existing database." + echo "Creating PostgreSQL bacula database..." + su - postgres -c %{script_dir}/create_postgresql_database + echo "Creating bacula tables..." + su - postgres -c %{script_dir}/make_postgresql_tables + echo "Granting privileges for PostgreSQL user bacula..." + su - postgres -c %{script_dir}/grant_postgresql_privileges + + elif [ "$DB_VER" -ge "12" -a "$DB_VER" -lt "1015" ]; then + echo "This release requires an upgrade to your bacula database." + echo "Backing up your current database..." + su - postgres -c 'pg_dump bacula' | bzip2 > %{working_dir}/bacula_backup.sql.bz2 + echo "Upgrading bacula database ..." + su - postgres -c %{script_dir}/update_postgresql_tables + echo "If bacula works correctly you can remove the backup file %{working_dir}/bacula_backup.sql.bz2" + echo "Granting privileges for PostgreSQL user bacula..." + su - postgres -c %{script_dir}/grant_postgresql_privileges + + fi +fi +%endif + +%if ! %{client_only} +if [ -d %{sysconf_dir} ]; then + cd %{sysconf_dir} + if [ ! -f .rpm.sed ]; then + (umask 0177 + echo "# This file is used to ensure that all passwords will" > .rpm.sed + echo "# match between configuration files" >> .rpm.sed + ) + for string in XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX; do + pass=`openssl rand -base64 33` + echo "s@${string}@${pass}@g" >> .rpm.sed + done + fi + host=`hostname -s` + for file in *.conf; do + sed -f .rpm.sed $file > $file.new + sed "s@XXX_HOSTNAME_XXX@${host}@g" $file.new > $file + rm -f $file.new + done +fi +%endif + + +%if %{mysql} +%preun mysql +%endif +%if %{sqlite} +%preun sqlite +%endif +%if %{postgresql} +%preun postgresql +%endif + +%if ! %{client_only} +# delete our links +if [ $1 = 0 ]; then + /sbin/chkconfig --del bacula-dir + /sbin/chkconfig --del bacula-fd + /sbin/chkconfig --del bacula-sd +fi +%endif + +%files client +%defattr(-,root,root) +%attr(-, root, %{daemon_group}) %dir %{script_dir} +%attr(-, root, %{daemon_group}) %dir %{plugin_dir} +#%attr(-, root, %{daemon_group}) %dir %{log_dir} +%attr(-, root, %{daemon_group}) %dir %{sysconf_dir} +%{script_dir}/bacula-ctl-fd +/etc/init.d/bacula-fd + +/etc/logrotate.d/bacula + +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bacula-fd.conf +%attr(-, root, %{daemon_group}) %config(noreplace) %{sysconf_dir}/bconsole.conf +%attr(-, root, %{daemon_group}) %dir %{working_dir} + +%{_sbindir}/bacula-fd +%{_sbindir}/btraceback +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.gdb +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.dbx +%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.mdb +%attr(-, root, %{daemon_group}) %{plugin_dir}/bpipe-fd.so +%{_sbindir}/bconsole +%{_mandir}/man8/bacula-fd.8.%{manpage_ext} +%{_mandir}/man8/bacula.8.%{manpage_ext} +%{_mandir}/man8/bconsole.8.%{manpage_ext} +%{_mandir}/man8/btraceback.8.%{manpage_ext} +%{_mandir}/man1/bat.1.%{manpage_ext} +%_prefix/share/doc/* + +%pre client +# create the daemon group and user +HAVE_BACULA=`grep %{daemon_group} %{group_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{daemon_group} > /dev/null 2>&1 + echo "The group %{daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +# we do not use the -g option allowing the primary group to be set to system default +# this will be a unique group on redhat type systems or the group users on some systems +HAVE_BACULA=`grep %{file_daemon_user} %{user_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{useradd} -r -c "Bacula" -d %{working_dir} -g %{daemon_group} -M -s /sbin/nologin %{file_daemon_user} > /dev/null 2>&1 + echo "The user %{file_daemon_user} has been added to %{user_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi +# now we add the supplementary group, this is ok to call even if the user already exists +# we only do this if the user is NOT root +IS_ROOT=%{file_daemon_user} +if [ "$IS_ROOT" != "root" ]; then +%{usermod} -G %{daemon_group} %{file_daemon_user} +fi + +%post client +# add our link +if [ "$1" -ge 1 ] ; then + /sbin/chkconfig --add bacula-fd +fi + +if [ -d %{sysconf_dir} ]; then + cd %{sysconf_dir} + if [ ! -f .rpm.sed ]; then + (umask 0177 + echo "# This file is used to ensure that all passwords will" > .rpm.sed + echo "# match between configuration files" >> .rpm.sed + ) + for string in XXX_REPLACE_WITH_DIRECTOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_PASSWORD_XXX XXX_REPLACE_WITH_DIRECTOR_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_CLIENT_MONITOR_PASSWORD_XXX XXX_REPLACE_WITH_STORAGE_MONITOR_PASSWORD_XXX; do + pass=`openssl rand -base64 33` + echo "s@${string}@${pass}@g" >> .rpm.sed + done + fi + host=`hostname -s` + for file in *.conf; do + sed -f .rpm.sed $file > $file.new + sed "s@XXX_HOSTNAME_XXX@${host}@g" $file.new > $file + rm -f $file.new + done +fi + +%preun client +# delete our link +if [ $1 = 0 ]; then + /sbin/chkconfig --del bacula-fd +fi + +%files libs +%defattr(-,root,root) +%{_libdir}/libbac-* +%{_libdir}/libbac.* +%{_libdir}/libbaccfg* +%{_libdir}/libbacfind* + +%post libs +/sbin/ldconfig +exit 0 + +%postun libs +/sbin/ldconfig +exit 0 + +%if ! %{client_only} +%files updatedb +%defattr(-,root,%{daemon_group}) +%{script_dir}/updatedb/* +#oensuse_bs: directories not owned by any package +#%{script_dir}/updatedb + +%pre updatedb +# create the daemon group +HAVE_BACULA=`grep %{daemon_group} %{group_file} 2>/dev/null` +if [ -z "$HAVE_BACULA" ]; then + %{groupadd} -r %{daemon_group} > /dev/null 2>&1 + echo "The group %{daemon_group} has been added to %{group_file}." + echo "See the manual chapter \"Running Bacula\" for details." +fi + +%post updatedb +echo "The database update scripts were installed to %{script_dir}/updatedb" +%endif + +%changelog +* Thu Mar 27 2014 Kern Sibbald 7.0.0-1 +- New rpm spec file for Suse diff --git a/platforms/slackware/Makefile.in b/platforms/slackware/Makefile.in new file mode 100644 index 00000000..908aa548 --- /dev/null +++ b/platforms/slackware/Makefile.in @@ -0,0 +1,69 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Slackware specific installation. +# +# 22 January 2003 -- Kern Sibbald +# and corrected for Gentoo by +# Patrick Naubert 25 Jan 2003 +# and reworked for Slackware by +# Matt Howard 09 Mar 2004 +# further reworked for Slackware without Perl dependency by +# Phil Stracchino 13 Mar 2004 +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + +install-autostart-fd: install-autostart-rc + @$(INSTALL) -m 744 rc.bacula-fd $(DESTDIR)/etc/rc.d/rc.bacula-fd + +install-autostart-sd: install-autostart-rc + @$(INSTALL) -m 744 rc.bacula-sd $(DESTDIR)/etc/rc.d/rc.bacula-sd + +install-autostart-dir: install-autostart-rc + @$(INSTALL) -m 744 rc.bacula-dir $(DESTDIR)/etc/rc.d/rc.bacula-dir + +install-autostart-rc: + @$(INSTALL) -m 744 functions.bacula $(DESTDIR)/etc/rc.d + sh ./local-install.sh install $(DESTDIR) + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: uninstall-autostart-rc + @rm -f $(DESTDIR)/etc/rc.d/rc.bacula-fd + +uninstall-autostart-sd: uninstall-autostart-rc + @rm -f $(DESTDIR)/etc/rc.d/rc.bacula-sd + +uninstall-autostart-dir: uninstall-autostart-rc + @rm -f $(DESTDIR)/etc/rc.d/rc.bacula-dir + +uninstall-autostart-rc: + @rm -f $(DESTDIR)/etc/rc.d/functions.bacula + sh ./local-install.sh remove $(DESTDIR) + + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f rc.bacula-sd rc.bacula-fd rc.bacula-dir + @rm -f Makefile + +devclean: clean + @rm -f rc.bacula-sd rc.bacula-fd rc.bacula-dir + @rm -f Makefile diff --git a/platforms/slackware/functions.bacula.in b/platforms/slackware/functions.bacula.in new file mode 100644 index 00000000..dc956087 --- /dev/null +++ b/platforms/slackware/functions.bacula.in @@ -0,0 +1,191 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula daemons. +# +# This is pretty much watered down version of the RedHat script +# that works on Solaris as well as Linux, but it won't work everywhere. +# +# description: The Leading Open Source Backup Solution. +# + +PSCMD="@PSCMD@" + +# All these are not *really* needed but it makes it +# easier to "steal" this code for the development +# environment where they are different. +# +BACFDBIN=@sbindir@ +BACFDCFG=@sysconfdir@ +BACSDBIN=@sbindir@ +BACSDCFG=@sysconfdir@ +BACDIRBIN=@sbindir@ +BACDIRCFG=@sysconfdir@ +PIDDIR=@piddir@ +SUBSYSDIR=@subsysdir@ + +DIR_PORT=@dir_port@ +FD_PORT=@fd_port@ +SD_PORT=@sd_port@ + +DIR_USER=@dir_user@ +DIR_GROUP=@dir_group@ +FD_USER=@fd_user@ +FD_GROUP=@fd_group@ +SD_USER=@sd_user@ +SD_GROUP=@sd_group@ + +# A function to stop a program. +killproc() { + RC=0 + # Test syntax. + if [ $# = 0 ]; then + echo "Usage: killproc {program} [signal]" + return 1 + fi + + notset=0 + # check for third arg to be kill level + if [ "$3" != "" ] ; then + killlevel=$3 + else + notset=1 + killlevel="-9" + fi + + # Get base program name + base=`basename $1` + + # Find pid. + pid=`pidofproc $base $2` + + # Kill it. + if [ "$pid" != "" ] ; then + if [ "$notset" = "1" ] ; then + if ps -p $pid>/dev/null 2>&1; then + # TERM first, then KILL if not dead + kill -TERM $pid 2>/dev/null + sleep 1 + if ps -p $pid >/dev/null 2>&1 ; then + sleep 1 + if ps -p $pid >/dev/null 2>&1 ; then + sleep 3 + if ps -p $pid >/dev/null 2>&1 ; then + kill -KILL $pid 2>/dev/null + fi + fi + fi + fi + ps -p $pid >/dev/null 2>&1 + RC=$? + [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" + # RC=$((! $RC)) + # use specified level only + else + if ps -p $pid >/dev/null 2>&1; then + kill $killlevel $pid 2>/dev/null + RC=$? + [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" + fi + fi + else + failure "$base shutdown" + fi + # Remove pid file if any. + if [ "$notset" = "1" ]; then + rm -f ${PIDDIR}/$base.$2.pid + fi + return $RC +} + +# A function to find the pid of a program. +pidofproc() { + pid="" + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: pidofproc {program}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try PID file + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + fi + + # Next try "pidof" + if [ -x /sbin/pidof ] ; then + pid=`/sbin/pidof $1` + fi + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + + # Finally try to extract it from ps + ${PSCMD} | grep $1 | awk '{ print $1 }' | tr '\n' ' ' + return 0 +} + +status() { + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: status {program}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try "pidof" + if [ -x /sbin/pidof ] ; then + pid=`/sbin/pidof $1` + fi + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + else + pid=`${PSCMD} | awk 'BEGIN { prog=ARGV[1]; ARGC=1 } + { if ((prog == $2) || (("(" prog ")") == $2) || + (("[" prog "]") == $2) || + ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + fi + fi + + # Next try the PID files + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo "$base not running, but pid file exists" + return 1 + fi + fi + # See if the subsys lock exists + if [ -f ${SUBSYSDIR}/$base ] ; then + echo "$base not running, but subsys locked" + return 2 + fi + echo "$base is stopped" + return 3 +} + +success() { + return 0 +} + +failure() { + rc=$? + return $rc +} diff --git a/platforms/slackware/local-install.sh b/platforms/slackware/local-install.sh new file mode 100755 index 00000000..68a17c11 --- /dev/null +++ b/platforms/slackware/local-install.sh @@ -0,0 +1,89 @@ +#!/bin/sh +# local-install.sh +# for Bacula on Slackware platform +# Phil Stracchino 13 Mar 2004 +# +# Installs and removes Bacula install section into /etc/rc.d/rc.local +# provided /etc/rc.d/rc.local is writeable. Creates a backup copy of +# /etc/rc.d/rc.local in /etc/rc.d/rc.local.bak if /etc/rc.d is writeable. +# +# Usage: local-install.sh install|remove [destdir] +# +# uncomment for debugging: +#set -x + +if [ -n "$2" ] ; then + TARG=$DESTDIR/etc/rc.d/rc.local +else + TARG=/etc/rc.d/rc.local +fi + +if [ ! -f $TARG ] ; then + echo $TARG does not appear to exist. Bailing out. + exit -1 +fi + +if [ "$1" = "install" ] ; then + echo Installing Bacula autostart into $TARG: + COUNT=`grep -c "Bacula section @@@@" $TARG` + if [ ! "$COUNT" == "0" ] ; then + echo -e "\tBacula autostart section appears to be already installed.\n\tIf you have changed the configuration, make uninstall-autostart\n\tthen make install-autostart again.\n" + else + if [ -w $TARG ] ; then + if [ -w `dirname $TARG` ] ; then + cp -p $TARG $TARG.bak + echo -e "\tBackup copy of $TARG saved in $TARG.bak." + else + echo -e "\tWARNING: Unable to create backup copy of $TARG.\n\tAttempting to continue anyway."; + fi + cat >> $TARG << EOF +# @@@@ Start Bacula section @@@@ +# The line above is needed to automatically remove bacula. + +if [ -x /etc/rc.d/rc.bacula-sd ]; then + /etc/rc.d/rc.bacula-sd start +fi +if [ -x /etc/rc.d/rc.bacula-fd ]; then + /etc/rc.d/rc.bacula-fd start +fi +if [ -x /etc/rc.d/rc.bacula-dir ]; then + /etc/rc.d/rc.bacula-dir start +fi + +# @@@@ End Bacula section @@@@ +EOF + echo -e "\tBacula autostart section has been installed in $TARG.\n"; + else + echo -e "\tERROR! Cannot write to $TARG.\n\tBailing out.\n" + exit -1 + fi + fi +elif [ "$1" = "remove" ] ; then + echo Removing Bacula autostart from $TARG: + COUNT=`grep -c "Bacula section @@@@" $TARG` + if [ ! "$COUNT" == "2" ] ; then + echo -e "\tCould not find Bacula autostart section in $TARG. Bailing out.\n" + exit -1 + else + if [ -w $TARG ] ; then + if [ -w `dirname $TARG` ] ; then + cp -p $TARG $TARG.bak + echo -e "\tBackup copy of $TARG saved in $TARG.bak." + else + echo -e "\tWARNING: Unable to create backup copy of $TARG.\n\tAttempting to continue anyway."; + fi + FIRST=`grep -n "@@@@ Start Bacula section @@@@" $TARG | cut -d: -f1` + LAST=`grep -n "@@@@ End Bacula section @@@@" $TARG | cut -d: -f1` + FIRST=`expr $FIRST - 1` + LAST=`expr $LAST + 1` + head -$FIRST $TARG > ./installtmp + tail +$LAST $TARG >> ./installtmp + cat ./installtmp > $TARG + rm ./installtmp + echo -e "\tBacula autostart section has been removed from $TARG.\n"; + fi + fi +else + echo -e "\tUSAGE: $0 install|remove [destdir]" +fi +exit 0 diff --git a/platforms/slackware/rc.bacula-dir.in b/platforms/slackware/rc.bacula-dir.in new file mode 100644 index 00000000..8c9745bc --- /dev/null +++ b/platforms/slackware/rc.bacula-dir.in @@ -0,0 +1,61 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon +# +# chkconfig: 2345 92 99 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +# Source function library +. /etc/rc.d/functions.bacula + +RETVAL=0 +case "$1" in + start) + [ -x ${BACDIRBIN}/bacula-dir ] && { + sleep 2 + echo -n "Starting the Director daemon: " + OPTIONS='' + if [ "${DIR_USER}" != '' ]; then + OPTIONS="${OPTIONS} -u ${DIR_USER}" + fi + + if [ "${DIR_GROUP}" != '' ]; then + OPTIONS="${OPTIONS} -g ${DIR_GROUP}" + fi + + ${BACDIRBIN}/bacula-dir $2 ${OPTIONS} -v -c ${BACDIRCFG}/bacula-dir.conf + RETVAL=$? + echo Done. + } + ;; + stop) + [ -x ${BACDIRBIN}/bacula-dir ] && { + echo -n "Stopping the Director daemon: " + killproc ${BACDIRBIN}/bacula-dir ${DIR_PORT} + RETVAL=$? + echo Done. + } + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + [ -x ${BACDIRBIN}/bacula-dir ] && status ${BACDIRBIN}/bacula-dir ${DIR_PORT} + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/slackware/rc.bacula-fd.in b/platforms/slackware/rc.bacula-fd.in new file mode 100644 index 00000000..b5f0b672 --- /dev/null +++ b/platforms/slackware/rc.bacula-fd.in @@ -0,0 +1,61 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# chkconfig: 2345 91 99 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +# Source function library +. /etc/rc.d/functions.bacula + +RETVAL=0 +case "$1" in + start) + [ -x ${BACFDBIN}/bacula-fd ] && { + sleep 2 + echo -n "Starting the File daemon: " + OPTIONS='' + if [ "${FD_USER}" != '' ]; then + OPTIONS="${OPTIONS} -u ${FD_USER}" + fi + + if [ "${FD_GROUP}" != '' ]; then + OPTIONS="${OPTIONS} -g ${FD_GROUP}" + fi + + ${BACFDBIN}/bacula-fd $2 ${OPTIONS} -v -c ${BACFDCFG}/bacula-fd.conf + RETVAL=$? + echo Done. + } + ;; + stop) + [ -x ${BACFDBIN}/bacula-fd ] && { + echo -n "Stopping the File daemon: " + killproc ${BACFDBIN}/bacula-fd ${FD_PORT} + RETVAL=$? + echo Done. + } + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + [ -x ${BACFDBIN}/bacula-fd ] && status ${BACFDBIN}/bacula-fd ${FD_PORT} + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/slackware/rc.bacula-sd.in b/platforms/slackware/rc.bacula-sd.in new file mode 100644 index 00000000..ff7a687a --- /dev/null +++ b/platforms/slackware/rc.bacula-sd.in @@ -0,0 +1,61 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Storage daemon. +# +# chkconfig: 2345 90 99 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +# Source function library +. /etc/rc.d/functions.bacula + +RETVAL=0 +case "$1" in + start) + [ -x ${BACSDBIN}/bacula-sd ] && { + sleep 2 + echo -n "Starting the Storage daemon: " + OPTIONS='' + if [ "${SD_USER}" != '' ]; then + OPTIONS="${OPTIONS} -u ${SD_USER}" + fi + + if [ "${SD_GROUP}" != '' ]; then + OPTIONS="${OPTIONS} -g ${SD_GROUP}" + fi + + ${BACSDBIN}/bacula-sd $2 ${OPTIONS} -v -c ${BACSDCFG}/bacula-sd.conf + RETVAL=$? + echo Done. + } + ;; + stop) + [ -x ${BACSDBIN}/bacula-sd ] && { + echo -n "Stopping the Storage daemon: " + killproc ${BACSDBIN}/bacula-sd ${SD_PORT} + RETVAL=$? + echo Done. + } + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + [ -x ${BACSDBIN}/bacula-sd ] && status ${BACSDBIN}/bacula-sd ${SD_PORT} + RETVAL=$? + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/solaris/Makefile.in b/platforms/solaris/Makefile.in new file mode 100644 index 00000000..fae0ff0c --- /dev/null +++ b/platforms/solaris/Makefile.in @@ -0,0 +1,91 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Solaris specific installation. +# +# 15 November 2001 -- Kern Sibbald +# +# 03 November 2003 corrections to the paths made by +# Kenneth ragnor at virtualsd dot net +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + + +install-autostart-fd: + @rm -f /etc/rc0.d/K20bacula-fd + @rm -f /etc/rc1.d/S99bacula-fd + @rm -f /etc/rc2.d/S99bacula-fd + @$(INSTALL_PROGRAM) -m 744 bacula-fd /etc/init.d/bacula-fd + # set symlinks for script at startup and shutdown + @ln -f -s /etc/init.d/bacula-fd /etc/rc0.d/K20bacula-fd + @ln -f -s /etc/init.d/bacula-fd /etc/rc1.d/S99bacula-fd + @ln -f -s /etc/init.d/bacula-fd /etc/rc2.d/S99bacula-fd + + +install-autostart-sd: + @rm -f /etc/rc0.d/K20bacula-sd + @rm -f /etc/rc1.d/S99bacula-sd + @rm -f /etc/rc2.d/S99bacula-sd + @$(INSTALL_PROGRAM) -m 744 bacula-sd /etc/init.d/bacula-sd + # set symlinks for script at startup and shutdown + @ln -f -s /etc/init.d/bacula-sd /etc/rc0.d/K20bacula-sd + @ln -f -s /etc/init.d/bacula-sd /etc/rc1.d/S99bacula-sd + @ln -f -s /etc/init.d/bacula-sd /etc/rc2.d/S99bacula-sd + + +install-autostart-dir: + @rm -f /etc/rc0.d/K20bacula-dir + @rm -f /etc/rc1.d/S99bacula-dir + @rm -f /etc/rc2.d/S99bacula-dir + @$(INSTALL_PROGRAM) -m 744 bacula-dir /etc/init.d/bacula-dir + # set symlinks for script at startup and shutdown + @ln -f -s /etc/init.d/bacula-dir /etc/rc0.d/K20bacula-dir + @ln -f -s /etc/init.d/bacula-dir /etc/rc1.d/S99bacula-dir + @ln -f -s /etc/init.d/bacula-dir /etc/rc2.d/S99bacula-dir + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @rm -f /etc/rc0.d/K20bacula-fd + @rm -f /etc/rc1.d/S99bacula-fd + @rm -f /etc/rc2.d/S99bacula-fd + @rm -f /etc/init.d/bacula-fd + + +uninstall-autostart-sd: + @rm -f /etc/rc0.d/K20bacula-sd + @rm -f /etc/rc1.d/S99bacula-sd + @rm -f /etc/rc2.d/S99bacula-sd + @rm -f /etc/init.d/bacula-sd + +uninstall-autostart-dir: + @rm -f /etc/rc0.d/K20bacula-dir + @rm -f /etc/rc1.d/S99bacula-dir + @rm -f /etc/rc2.d/S99bacula-dir + @rm -f /etc/init.d/bacula-dir + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec + +devclean: clean + @rm -f bacula-sd bacula-fd bacula-dir + @rm -f Makefile bacula-*.spec diff --git a/platforms/solaris/bacula-dir.in b/platforms/solaris/bacula-dir.in new file mode 100755 index 00000000..bc78e595 --- /dev/null +++ b/platforms/solaris/bacula-dir.in @@ -0,0 +1,57 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon +# +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +DIR_USER=@dir_user@ +DIR_GROUP=@dir_group@ +DIR_OPTIONS='' + +case "$1" in + start) + if [ ! -z "${DIR_USER}" ]; then + [ -z "${DIR_OPTIONS}" ] && DIR_OPTIONS="-u ${DIR_USER}" || \ + DIR_OPTIONS="${DIR_OPTIONS} -u ${DIR_USER}" + fi + if [ ! -z "${DIR_GROUP}" ]; then + [ -z "${DIR_OPTIONS}" ] && DIR_OPTIONS="-g ${DIR_GROUP}" || \ + DIR_OPTIONS="${DIR_OPTIONS} -g ${DIR_GROUP}" + fi + + echo "Starting the Bacula Director: " + @sbindir@/bacula-dir $2 ${DIR_OPTIONS} -c @sysconfdir@/bacula-dir.conf + ;; + stop) + echo "Stopping the Director daemon: " + if [ -x /usr/bin/zonename ]; then + case `/usr/bin/zonename` in + global) + pkill -z global -x bacula-dir + ;; + *) + pkill -x bacula-dir + ;; + esac + else + pkill -x bacula-dir + fi + ;; + restart) + $0 stop + sleep 5 + $0 start + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit 0 diff --git a/platforms/solaris/bacula-fd.in b/platforms/solaris/bacula-fd.in new file mode 100755 index 00000000..36fb431c --- /dev/null +++ b/platforms/solaris/bacula-fd.in @@ -0,0 +1,57 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +FD_USER=@fd_user@ +FD_GROUP=@fd_group@ +FD_OPTIONS='' + +case "$1" in + start) + if [ ! -z "${DIR_USER}" ]; then + [ -z "${DIR_OPTIONS}" ] && DIR_OPTIONS="-u ${DIR_USER}" || \ + DIR_OPTIONS="${DIR_OPTIONS} -u ${DIR_USER}" + fi + if [ ! -z "${DIR_GROUP}" ]; then + [ -z "${DIR_OPTIONS}" ] && DIR_OPTIONS="-g ${DIR_GROUP}" || \ + DIR_OPTIONS="${DIR_OPTIONS} -g ${DIR_GROUP}" + fi + + echo "Starting the Bacula File daemon: " + @sbindir@/bacula-fd $2 ${FD_OPTIONS} -c @sysconfdir@/bacula-fd.conf + ;; + stop) + echo "Stopping the Bacula File daemon: " + if [ -x /usr/bin/zonename ]; then + case `/usr/bin/zonename` in + global) + pkill -z global -x bacula-fd + ;; + *) + pkill -x bacula-fd + ;; + esac + else + pkill -x bacula-fd + fi + ;; + restart) + $0 stop + sleep 5 + $0 start + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit 0 diff --git a/platforms/solaris/bacula-sd.in b/platforms/solaris/bacula-sd.in new file mode 100755 index 00000000..9ffff5cf --- /dev/null +++ b/platforms/solaris/bacula-sd.in @@ -0,0 +1,57 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Storage daemon. +# +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +SD_USER=@sd_user@ +SD_GROUP=@sd_group@ +SD_OPTIONS='' + +case "$1" in + start) + if [ ! -z "${DIR_USER}" ]; then + [ -z "${DIR_OPTIONS}" ] && DIR_OPTIONS="-u ${DIR_USER}" || \ + DIR_OPTIONS="${DIR_OPTIONS} -u ${DIR_USER}" + fi + if [ ! -z "${DIR_GROUP}" ]; then + [ -z "${DIR_OPTIONS}" ] && DIR_OPTIONS="-g ${DIR_GROUP}" || \ + DIR_OPTIONS="${DIR_OPTIONS} -g ${DIR_GROUP}" + fi + + echo "Starting the Bacula Storage daemon: " + @sbindir@/bacula-sd $2 ${SD_OPTIONS} -c @sysconfdir@/bacula-sd.conf + ;; + stop) + echo "Stopping the Bacula Storage daemon: " + if [ -x /usr/bin/zonename ]; then + case `/usr/bin/zonename` in + global) + pkill -z global -x bacula-sd + ;; + *) + pkill -x bacula-sd + ;; + esac + else + pkill -x bacula-sd + fi + ;; + restart) + $0 stop + sleep 5 + $0 start + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 + ;; +esac +exit 0 diff --git a/platforms/solaris/copyright b/platforms/solaris/copyright new file mode 100644 index 00000000..ee235b21 --- /dev/null +++ b/platforms/solaris/copyright @@ -0,0 +1,18 @@ + + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + Copyright (C) 2000-2014 Free Software Foundation Europe e.V. + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. diff --git a/platforms/solaris/makepkg.sh b/platforms/solaris/makepkg.sh new file mode 100644 index 00000000..536b9d89 --- /dev/null +++ b/platforms/solaris/makepkg.sh @@ -0,0 +1,82 @@ +#!/bin/bash +# +# + +# make use of Studio 11 or 12 compiler +# +export CC=cc +export CXX=CC + +INSTALL_BASE=/opt/bacula +SBIN_DIR=$INSTALL_BASE/sbin +MAN_DIR=$INSTALL_BASE/man +SYSCONF_DIR=$INSTALL_BASE/etc +SCRIPT_DIR=$INSTALL_BASE/etc +WORKING_DIR=/var/bacula + +VERSION=2.2.5 + +CWD=`pwd` +# Try to guess the distribution base +DISTR_BASE=`dirname \`pwd\` | sed -e 's@/platforms$@@'` +echo "Distribution base: $DISTR_BASE" + +TMPINSTALLDIR=/tmp/`basename $DISTR_BASE`-build +echo "Temp install dir: $TMPINSTALLDIR" +echo "Install directory: $INSTALL_BASE" + +cd $DISTR_BASE + +if [ "x$1" = "xbuild" ]; then + ./configure --prefix=$INSTALL_BASE \ + --sbindir=$SBIN_DIR \ + --sysconfdir=$SYSCONF_DIR \ + --mandir=$MAN_DIR \ + --with-scriptdir=$SCRIPT_DIR \ + --with-working-dir=$WORKING_DIR \ + --with-subsys-dir=/var/lock/subsys \ + --with-pid-dir=/var/run \ + --enable-smartalloc \ + --enable-conio \ + --enable-readline \ + --enable-client-only \ + --disable-ipv6 + + make +fi + +if [ -d $TMPINSTALLDIR ]; then + rm -rf $TMPINSTALLDIR +fi +mkdir $TMPINSTALLDIR + +make DESTDIR=$TMPINSTALLDIR install + +# copy additional files to install-dir +# + + +# change conf-files that they won't be overwritten by install +# +cd $TMPINSTALLDIR/$SYSCONF_DIR +for x in *.conf; do + mv ${x} ${x}-dist +done + + +# cd back to my start-dir +# +cd $CWD + +#cp prototype.master prototype +sed -e "s|__PKGSOURCE__|$CWD|" prototype.master > prototype + +pkgproto $TMPINSTALLDIR/$INSTALL_BASE=. >> prototype + +pkgmk -o -d /tmp -b $TMPINSTALLDIR/$INSTALL_BASE -f prototype + +if [ $? = 0 ]; then + pkgtrans /tmp bacula-$VERSION.pkg Bacula + echo "Package has been created in /tmp" +fi + diff --git a/platforms/solaris/pkginfo b/platforms/solaris/pkginfo new file mode 100644 index 00000000..88256636 --- /dev/null +++ b/platforms/solaris/pkginfo @@ -0,0 +1,12 @@ +PKG=Bacula +NAME=Bacula Client +ARCH=sparc +VERSION=2.2.5 +MAXINST=1 +CATEGORY=application +DESC=Bacula - The Leading Open Source Backup Solution. +VENDOR=http://www.bacula.org +HOTLINE=none +EMAIL=christian.masopust@siemens.com +CLASSES=none +BASEDIR=/opt/bacula diff --git a/platforms/solaris/postinstall b/platforms/solaris/postinstall new file mode 100644 index 00000000..c05972d4 --- /dev/null +++ b/platforms/solaris/postinstall @@ -0,0 +1,9 @@ +#!/bin/sh +# +# postinstall for Bacula +# +if [ ! -d /var/bacula ]; then + mkdir /var/bacula +fi + +exit 0 diff --git a/platforms/solaris/postremove b/platforms/solaris/postremove new file mode 100644 index 00000000..ea142ec8 --- /dev/null +++ b/platforms/solaris/postremove @@ -0,0 +1,9 @@ +#!/bin/sh +# +# postremove for Bacula +# +if [ ! -d /var/bacula ]; then + rm -rf /var/bacula +fi + +exit 0 diff --git a/platforms/solaris/prototype.master b/platforms/solaris/prototype.master new file mode 100644 index 00000000..7f5d76a7 --- /dev/null +++ b/platforms/solaris/prototype.master @@ -0,0 +1,24 @@ +# +# Information files +# +i pkginfo=./pkginfo +i copyright=./copyright +i postinstall=./postinstall +i postremove=./postremove +# +# Stuff that goes into the system areas +# +d none /etc ? ? ? +d none /etc/init.d ? ? ? +f none /etc/init.d/bacula=__PKGSOURCE__/bacula-fd 0754 root bin +d none /etc/rc2.d ? ? ? +s none /etc/rc2.d/S99bacula=/etc/init.d/bacula +d none /etc/rc0.d ? ? ? +s none /etc/rc0.d/K01bacula=/etc/init.d/bacula +# +d none /var ? ? ? +d none /var/bacula 0755 root root +# +# +# Dynamically added entries (by pkgproto) +# diff --git a/platforms/suse/Makefile.in b/platforms/suse/Makefile.in new file mode 100644 index 00000000..cb782dd0 --- /dev/null +++ b/platforms/suse/Makefile.in @@ -0,0 +1,87 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the SuSe specific installation. +# +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + + +install-autostart-fd: + @if test x$(DESTDIR) = x -a -f /etc/init.d/bacula-fd; then \ + /sbin/chkconfig --del bacula-fd; \ + fi + @$(INSTALL_PROGRAM) -m 744 bacula-fd $(DESTDIR)/etc/init.d/bacula-fd + # set symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /sbin/chkconfig --add bacula-fd; \ + fi + + +install-autostart-sd: + @if test x$(DESTDIR) = x -a -f /etc/init.d/bacula-sd; then \ + /sbin/chkconfig --del bacula-sd; \ + fi + @$(INSTALL_PROGRAM) -m 744 bacula-sd $(DESTDIR)/etc/init.d/bacula-sd + # set symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /sbin/chkconfig --add bacula-sd; \ + fi + + +install-autostart-dir: + @if test x$(DESTDIR) = x -a -f /etc/init.d/bacula-dir; then \ + /sbin/chkconfig --del bacula-dir; \ + fi + @$(INSTALL_PROGRAM) -m 744 bacula-dir $(DESTDIR)/etc/init.d/bacula-dir + # set symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /sbin/chkconfig --add bacula-dir; \ + fi + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @if test x$(DESTDIR) = x -a -f /etc/init.d/bacula-fd; then \ + /sbin/chkconfig --del bacula-fd; \ + fi + @rm -f $(DESTDIR)/etc/init.d/bacula-fd + + +uninstall-autostart-sd: + @if test x$(DESTDIR) = x -a -f /etc/init.d/bacula-sd; then \ + /sbin/chkconfig --del bacula-sd; \ + fi + @rm -f $(DESTDIR)/etc/init.d/bacula-sd + +uninstall-autostart-dir: + @if test x$(DESTDIR) = x -a -f /etc/init.d/bacula-dir; then \ + /sbin/chkconfig --del bacula-dir; \ + fi + @rm -f $(DESTDIR)/etc/init.d/bacula-dir + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f Makefile bacula-*.spec bacula.*.spec bacula.spec + @rm -f bacula-sd bacula-fd bacula-dir + +devclean: clean + @rm -f Makefile bacula-*.spec bacula.*.spec bacula.spec + @rm -f bacula-sd bacula-fd bacula-dir diff --git a/platforms/suse/bacula-dir-suse-sqlite.patch b/platforms/suse/bacula-dir-suse-sqlite.patch new file mode 100644 index 00000000..f5b00860 --- /dev/null +++ b/platforms/suse/bacula-dir-suse-sqlite.patch @@ -0,0 +1,13 @@ +--- ../../../bacula-2.1.10/platforms/suse/bacula-dir.in 2007-03-30 17:46:04.000000000 -0400 ++++ bacula-dir.in 2007-05-19 12:42:30.000000000 -0400 +@@ -13,8 +13,8 @@ + # + ### BEGIN INIT INFO + # Provides: bacula-dir +-# Required-Start: $local_fs $network @DEFAULT_DB_TYPE@ +-# Required-Stop: $local_fs $network @DEFAULT_DB_TYPE@ ++# Required-Start: $local_fs $network ++# Required-Stop: $local_fs $network + # Default-Start: 3 5 + # Default-Stop: 0 1 2 6 + # Short-Description: bacula director diff --git a/platforms/suse/bacula-dir.in b/platforms/suse/bacula-dir.in new file mode 100755 index 00000000..16e9edda --- /dev/null +++ b/platforms/suse/bacula-dir.in @@ -0,0 +1,65 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# Modified to work on SuSE 1/31/2004 D. Scott Barninger +# Added rc.status functions 3/13/2004 D. Scott Barninger +# Added LSB init info 10/14/2004 D. Scott Barninger +# +### BEGIN INIT INFO +# Provides: bacula-dir +# Required-Start: $local_fs $network @DEFAULT_DB_TYPE@ +# Required-Stop: $local_fs $network @DEFAULT_DB_TYPE@ +# Default-Start: 3 5 +# Default-Stop: 0 1 2 6 +# Short-Description: bacula director +# Description: Bacula network backup system director daemon +### END INIT INFO + +# source process status functions +# this gives us funtion rc_status -v to tell us if we succeed or fail +. /etc/rc.status + +RETVAL=0 +case "$1" in + start) + echo -n "Starting the Bacula Director: " + /sbin/startproc @sbindir@/bacula-dir $2 -c @sysconfdir@/bacula-dir.conf + RETVAL=$? + rc_status -v + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-dir + ;; + stop) + echo -n "Stopping the Director daemon: " + /sbin/killproc @sbindir@/bacula-dir + RETVAL=$? + rc_status -v + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-dir + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + /sbin/checkproc @sbindir@/bacula-dir + RETVAL=$? + rc_status -v + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/suse/bacula-fd.in b/platforms/suse/bacula-fd.in new file mode 100755 index 00000000..b1c770fc --- /dev/null +++ b/platforms/suse/bacula-fd.in @@ -0,0 +1,65 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula File daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# Modified to work on SuSE 1/31/2004 D. Scott Barninger +# Added rc.status functions 3/13/2004 D. Scott Barninger +# Added LSB init info 10/14/2004 D. Scott Barninger +# +### BEGIN INIT INFO +# Provides: bacula-fd +# Required-Start: $local_fs $network +# Required-Stop: $local_fs $network +# Default-Start: 3 5 +# Default-Stop: 0 1 2 6 +# Short-Description: bacula file daemon +# Description: Bacula network backup system file daemon +### END INIT INFO + +# source process status functions +# this gives us funtion rc_status -v to tell us if we succeed or fail +. /etc/rc.status + +RETVAL=0 +case "$1" in + start) + echo -n "Starting the Bacula File daemon: " + /sbin/startproc @sbindir@/bacula-fd $2 -c @sysconfdir@/bacula-fd.conf + RETVAL=$? + rc_status -v + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-fd + ;; + stop) + echo -n "Stopping the Bacula File daemon: " + /sbin/killproc @sbindir@/bacula-fd + RETVAL=$? + rc_status -v + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-fd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + /sbin/checkproc @sbindir@/bacula-fd + RETVAL=$? + rc_status -v + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/suse/bacula-sd.in b/platforms/suse/bacula-sd.in new file mode 100755 index 00000000..0444ff49 --- /dev/null +++ b/platforms/suse/bacula-sd.in @@ -0,0 +1,65 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Storage daemon. +# +# chkconfig: 2345 90 9 +# description: The Leading Open Source Backup Solution. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# Modified to work on SuSE 1/31/2004 D. Scott Barninger +# Added rc.status functions 3/13/2004 D. Scott Barninger +# Added LSB init info 10/14/2004 D. Scott Barninger +# +### BEGIN INIT INFO +# Provides: bacula-sd +# Required-Start: $local_fs $network +# Required-Stop: $local_fs $network +# Default-Start: 3 5 +# Default-Stop: 0 1 2 6 +# Short-Description: bacula storage daemon +# Description: Bacula network backup system storage daemon +### END INIT INFO + +# source process status functions +# this gives us funtion rc_status -v to tell us if we succeed or fail +. /etc/rc.status + +RETVAL=0 +case "$1" in + start) + echo -n "Starting the Bacula Storage daemon: " + /sbin/startproc @sbindir@/bacula-sd $2 -c @sysconfdir@/bacula-sd.conf + RETVAL=$? + rc_status -v + echo + [ $RETVAL -eq 0 ] && touch @subsysdir@/bacula-sd + ;; + stop) + echo -n "Stopping the Bacula Storage daemon: " + /sbin/killproc @sbindir@/bacula-sd + RETVAL=$? + rc_status -v + echo + [ $RETVAL -eq 0 ] && rm -f @subsysdir@/bacula-sd + ;; + restart) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + status) + /sbin/checkproc @sbindir@/bacula-sd + RETVAL=$? + rc_status -v + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/suse/bacula.in b/platforms/suse/bacula.in new file mode 100644 index 00000000..2bd9685f --- /dev/null +++ b/platforms/suse/bacula.in @@ -0,0 +1,216 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula daemons. +# +# This is pretty much watered down version of the RedHat script +# that works on Solaris as well as Linux, but it won't work everywhere. +# +# Submitted by Volker Sauer 21Feb04 +# Tweaked a bit by Kern to convert it to a .in file +# +# description: The Leading Open Source Backup Solution. +# +### BEGIN INIT INFO +# Provides: bacula +# Required-Start: network mysql +# Required-Stop: +# Default-Start: 3 5 +# Default-Stop: +# Description: run bacula daemon(s) +### END INIT INFO + +PSCMD="@PSCMD@" +PIDDIR=@piddir@ +SUBSYSDIR=@subsysdir@ + +# A function to stop a program. +killproc() { + RC=0 + # Test syntax. + if [ $# = 0 ]; then + echo "Usage: killproc {program} [signal]" + return 1 + fi + + notset=0 + # check for third arg to be kill level + if [ "$3" != "" ] ; then + killlevel=$3 + else + notset=1 + killlevel="-9" + fi + + # Get base program name + base=`basename $1` + + # Find pid. + pid=`pidofproc $base $2` + + # Kill it. + if [ "$pid" != "" ] ; then + if [ "$notset" = "1" ] ; then + if ps -p $pid>/dev/null 2>&1; then + # TERM first, then KILL if not dead + kill -TERM $pid 2>/dev/null + sleep 1 + if ps -p $pid >/dev/null 2>&1 ; then + sleep 1 + if ps -p $pid >/dev/null 2>&1 ; then + sleep 3 + if ps -p $pid >/dev/null 2>&1 ; then + kill -KILL $pid 2>/dev/null + fi + fi + fi + fi + ps -p $pid >/dev/null 2>&1 + RC=$? + [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" + # RC=$((! $RC)) + # use specified level only + else + if ps -p $pid >/dev/null 2>&1; then + kill $killlevel $pid 2>/dev/null + RC=$? + [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" + fi + fi + else + failure "$base shutdown" + fi + # Remove pid file if any. + if [ "$notset" = "1" ]; then + rm -f ${PIDDIR}/$base.$2.pid + fi + return $RC +} + +# A function to find the pid of a program. +pidofproc() { + pid="" + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: pidofproc {program}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try PID file + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + fi + + # Next try "pidof" + if [ -x /sbin/pidof ] ; then + pid=`/sbin/pidof $1` + fi + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + + # Finally try to extract it from ps + ${PSCMD} | grep $1 | awk '{ print $1 }' | tr '\n' ' ' + return 0 +} + +status() { + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: status {program}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try "pidof" + if [ -x /sbin/pidof ] ; then + pid=`/sbin/pidof $1` + fi + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + else + pid=`${PSCMD} | awk 'BEGIN { prog=ARGV[1]; ARGC=1 } + { if ((prog == $2) || (("(" prog ")") == $2) || + (("[" prog "]") == $2) || + ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + fi + fi + + # Next try the PID files + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo "$base not running, but pid file exists" + return 1 + fi + fi + # See if the subsys lock exists + if [ -f ${SUBSYSDIR}/$base ] ; then + echo "$base not running, but subsys locked" + return 2 + fi + echo "$base is stopped" + return 3 +} + +success() { + return 0 +} + +failure() { + rc=$? + return $rc +} + +case "$1" in + start) + echo "Starting the Storage daemon" + @sbindir@/bacula-sd $2 -v -c @sysconfdir@//bacula-sd.conf + echo "Starting the File daemon" + @sbindir@/bacula-fd $2 -v -c @sysconfdir@//bacula-fd.conf + sleep 2 + echo "Starting the Director daemon" + @sbindir@/bacula-dir $2 -v -c @sysconfdir@//bacula-dir.conf + ;; + stop) + echo "Stopping the File daemon" + killproc @sbindir@/bacula-fd 9102 + echo "Stopping the Storage daemon" + killproc @sbindir@/bacula-sd 9103 + echo "Stopping the Director daemon" + killproc @sbindir@/bacula-dir 9101 + echo + ;; + restart) + $0 stop + sleep 5 + $0 start + ;; + status) + status @sbindir@/bacula-sd 9103 + status @sbindir@/bacula-fd 9102 + status @sbindir@/bacula-dir 9101 + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit 0 diff --git a/platforms/systemd/Makefile.in b/platforms/systemd/Makefile.in new file mode 100644 index 00000000..516c0159 --- /dev/null +++ b/platforms/systemd/Makefile.in @@ -0,0 +1,114 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the systemd specific installation. +# +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +SYSTEMD_UNITDIR = @SYSTEMD_UNITDIR@ +SYSTEMD_TMPFILES = /etc/tmpfiles.d + +nothing: + +install: install-dir install-conf install-autostart + +install-dir: + mkdir -p $(DESTDIR)/$(SYSTEMD_UNITDIR) $(DESTDIR)/$(SYSTEMD_TMPFILES) + +install-conf: + @$(INSTALL_PROGRAM) -m 644 bacula.conf $(DESTDIR)/$(SYSTEMD_TMPFILES)/bacula.conf + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + +install-service: + @$(INSTALL_PROGRAM) -m 644 bacula-fd.service $(DESTDIR)/$(SYSTEMD_UNITDIR) + @$(INSTALL_PROGRAM) -m 644 bacula-dir.service $(DESTDIR)/$(SYSTEMD_UNITDIR) + @$(INSTALL_PROGRAM) -m 644 bacula-sd.service $(DESTDIR)/$(SYSTEMD_UNITDIR) + +install-autostart-fd: + @if test x$(DESTDIR) = x -a -f $(SYSTEMD_UNITDIR)/bacula-fd.service; then \ + /bin/systemctl stop bacula-fd.service; \ + /bin/systemctl disable bacula-fd.service; \ + fi + @$(INSTALL_PROGRAM) -m 644 bacula-fd.service $(DESTDIR)/$(SYSTEMD_UNITDIR)/bacula-fd.service + # set bacula-fd symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /bin/systemctl disable bacula-fd.service; \ + /bin/systemctl enable bacula-fd.service; \ + /bin/systemctl start bacula-fd.service; \ + fi + + +install-autostart-sd: + @if test x$(DESTDIR) = x -a -f $(SYSTEMD_UNITDIR)/bacula-sd.service; then \ + /bin/systemctl stop bacula-sd.service; \ + /bin/systemctl disable bacula-sd.service; \ + fi + @$(INSTALL_PROGRAM) -m 644 bacula-sd.service $(DESTDIR)/$(SYSTEMD_UNITDIR)/bacula-sd.service + # set bacula-sd symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /bin/systemctl disable bacula-sd.service; \ + /bin/systemctl enable bacula-sd.service; \ + /bin/systemctl start bacula-sd.service; \ + fi + + +install-autostart-dir: + @if test x$(DESTDIR) = x -a -f $(SYSTEMD_UNITDIR)/bacula-dir.service; then \ + /bin/systemctl stop bacula-dir.service; \ + /bin/systemctl disable bacula-dir.service; \ + fi + @$(INSTALL_PROGRAM) -m 644 bacula-dir.service $(DESTDIR)/$(SYSTEMD_UNITDIR)/bacula-dir.service + # set bacula-dir symlinks for script at startup and shutdown + @if test x$(DESTDIR) = x ; then \ + /bin/systemctl disable bacula-dir.service; \ + /bin/systemctl enable bacula-dir.service; \ + /bin/systemctl start bacula-dir.service; \ + fi + + +uninstall: uninstall-autostart uninstall-conf + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-autostart-fd: + @if test x$(DESTDIR) = x -a -f $(SYSTEMD_UNITDIR)/bacula-fd.service; then \ + /bin/systemctl stop bacula-fd.service; \ + /bin/systemctl disable bacula-fd.service; \ + fi + @rm -f $(DESTDIR)$(SYSTEMD_UNITDIR)/bacula-fd.service + + +uninstall-autostart-sd: + @if test x$(DESTDIR) = x -a -f $(SYSTEMD_UNITDIR)/bacula-sd.service; then \ + /bin/systemctl stop bacula-sd.service; \ + /bin/systemctl disable bacula-sd.service; \ + fi + @rm -f $(DESTDIR)$(SYSTEMD_UNITDIR)/bacula-sd.service + +uninstall-autostart-dir: + @if test x$(DESTDIR) = x -a -f $(SYSTEMD_UNITDIR)/bacula-dir.service; then \ + /bin/systemctl stop bacula-dir.service; \ + /bin/systemctl disable bacula-dir.service; \ + fi + @rm -f $(DESTDIR)$(SYSTEMD_UNITDIR)/bacula-dir.service + +uninstall-conf: + @rm -f $(DESTDIR)/$(SYSTEMD_TMPFILES)/bacula.conf + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f Makefile bacula-*.spec bacula.*.spec bacula.spec + @rm -f bacula.conf bacula-sd.service bacula-fd.service bacula-dir.service + +devclean: clean + @rm -f Makefile bacula-*.spec bacula.*.spec bacula.spec + @rm -f bacula.conf bacula-sd.service bacula-fd.service bacula-dir.service diff --git a/platforms/systemd/bacula-dir.service.in b/platforms/systemd/bacula-dir.service.in new file mode 100644 index 00000000..e8d39b0a --- /dev/null +++ b/platforms/systemd/bacula-dir.service.in @@ -0,0 +1,34 @@ +# Systemd Bacula service file +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# /lib/systemd/system/bacula-dir.service +# +# Description: +# Used to start/stop/reload the bacula director daemon service (bacula-dir) +# +# enable : systemctl enable bacula-dir +# start : systemctl start bacula-dir +# +# + +# From http://www.freedesktop.org/software/systemd/man/systemd.unit.html +[Unit] +Description=Bacula Director Daemon service +Requires=network.target +After=network.target multi-user.target +RequiresMountsFor=@working_dir@ @sysconfdir@ @sbindir@ + +# From http://www.freedesktop.org/software/systemd/man/systemd.service.html +[Service] +Type=simple +User=@dir_user@ +Group=@dir_group@ +ExecStart=@sbindir@/bacula-dir -fP -c @sysconfdir@/bacula-dir.conf +ExecReload=@sbindir@/bacula-dir -t -c @sysconfdir@/bacula-dir.conf +ExecReload=/bin/kill -HUP $MAINPID +SuccessExitStatus=15 + +[Install] +WantedBy=multi-user.target diff --git a/platforms/systemd/bacula-fd.service.in b/platforms/systemd/bacula-fd.service.in new file mode 100644 index 00000000..48ca58e5 --- /dev/null +++ b/platforms/systemd/bacula-fd.service.in @@ -0,0 +1,32 @@ +# Systemd Bacula service file +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# /lib/systemd/system/bacula-fd.service +# +# Description: +# Used to start the bacula file daemon service (bacula-fd) +# +# enable : systemctl enable bacula-fd +# start : systemctl start bacula-fd +# +# + +# from http://www.freedesktop.org/software/systemd/man/systemd.unit.html +[Unit] +Description=Bacula File Daemon service +Requires=network.target +After=network.target +RequiresMountsFor=@working_dir@ @sysconfdir@ @sbindir@ + +# from http://www.freedesktop.org/software/systemd/man/systemd.service.html +[Service] +Type=simple +User=@fd_user@ +Group=@fd_group@ +ExecStart=@sbindir@/bacula-fd -fP -c @sysconfdir@/bacula-fd.conf +SuccessExitStatus=15 + +[Install] +WantedBy=multi-user.target diff --git a/platforms/systemd/bacula-sd.service.in b/platforms/systemd/bacula-sd.service.in new file mode 100644 index 00000000..ba15a664 --- /dev/null +++ b/platforms/systemd/bacula-sd.service.in @@ -0,0 +1,32 @@ +# Systemd Bacula service file +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# /lib/systemd/system/bacula-sd.service +# +# Description: +# Used to start the bacula storage daemon service (bacula-sd) +# enable : systemctl enable bacula-sd +# start : systemctl start bacula-sd +# +# + +# from http://www.freedesktop.org/software/systemd/man/systemd.unit.html +[Unit] +Description=Bacula Storage Daemon service +Requires=network.target +After=network.target +RequiresMountsFor=@working_dir@ @sysconfdir@ @sbindir@ + +# from http://www.freedesktop.org/software/systemd/man/systemd.service.html +[Service] +Type=simple +User=@sd_user@ +Group=@sd_group@ +ExecStart=@sbindir@/bacula-sd -fP -c @sysconfdir@/bacula-sd.conf +SuccessExitStatus=15 +LimitMEMLOCK=infinity + +[Install] +WantedBy=multi-user.target diff --git a/platforms/systemd/bacula.conf.in b/platforms/systemd/bacula.conf.in new file mode 100644 index 00000000..6266f989 --- /dev/null +++ b/platforms/systemd/bacula.conf.in @@ -0,0 +1,3 @@ +# +# See tmpfiles.d(5) for details +d @piddir@ 2775 bacula bacula - diff --git a/platforms/ubuntu/Makefile.in b/platforms/ubuntu/Makefile.in new file mode 100644 index 00000000..7f0aa2dc --- /dev/null +++ b/platforms/ubuntu/Makefile.in @@ -0,0 +1,91 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the Debian/Ubuntu/Kubuntu specific installation. +# +# 21 March 2008 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ -m 754 + +nothing: + +install: install-autostart + +install-autostart: install-autostart-fd install-autostart-sd install-autostart-dir + +install_logrotate: + @$(INSTALL_PROGRAM) ../../scripts/logrotate $(DESTDIR)/etc/logrotate.d/bacula + +install-autostart-fd: uninstall-autostart-fd + @echo "Installing bacula-fd boot script ..." + @$(INSTALL_PROGRAM) bacula-fd $(DESTDIR)/etc/init.d/bacula-fd + @echo "Installing bacula-fd symlinks ..." + @if test x$(DESTDIR) = x ; then \ + /usr/sbin/update-rc.d bacula-fd start 91 2 3 4 5 . stop 9 0 1 6 .; \ + fi + + +install-autostart-sd: uninstall-autostart-sd + @echo "Installing bacula-sd boot script ..." + @$(INSTALL_PROGRAM) bacula-sd $(DESTDIR)/etc/init.d/bacula-sd + @echo "Installing bacula-sd symlinks ..." + @if test "x$(DESTDIR)" = "x" ; then \ + /usr/sbin/update-rc.d bacula-sd start 91 2 3 4 5 . stop 9 0 1 6 .; \ + fi + + +install-autostart-dir: uninstall-autostart-dir + @echo "Installing bacula-dir boot script ..." + @$(INSTALL_PROGRAM) bacula-dir $(DESTDIR)/etc/init.d/bacula-dir + @echo "Installing bacula-dir symlinks ..." + @if test "x$(DESTDIR)" = "x" ; then \ + /usr/sbin/update-rc.d bacula-dir start 90 2 3 4 5 . stop 9 0 1 6 .; \ + fi + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-fd uninstall-autostart-sd uninstall-autostart-dir + +uninstall-logrotate: + @rm -f $(DESTDIR)/etc/logrotate.d/bacula + +uninstall-autostart-fd: + @if test "x$(DESTDIR)" = "x" -a -f /etc/init.d/bacula-fd; then \ + /etc/init.d/bacula-fd stop; \ + rm -f $(DESTDIR)/etc/init.d/bacula-fd; \ + /usr/sbin/update-rc.d bacula-fd remove; \ + fi + + +uninstall-autostart-sd: + @if test "x$(DESTDIR)" = "x" -a -f /etc/init.d/bacula-sd; then \ + /etc/init.d/bacula-sd stop; \ + rm -f $(DESTDIR)/etc/init.d/bacula-sd; \ + /usr/sbin/update-rc.d bacula-sd remove; \ + fi + +uninstall-autostart-dir: + @if test "x$(DESTDIR)" = "x" -a -f /etc/init.d/bacula-dir; then \ + /etc/init.d/bacula-dir stop; \ + rm -f $(DESTDIR)/etc/init.d/bacula-dir; \ + /usr/sbin/update-rc.d bacula-dir remove; \ + fi + @rm -f $(DESTDIR)/etc/init.d/bacula-dir + +clean: + @rm -f 1 2 3 + +distclean: clean + @rm -f Makefile + @rm -f bacula-sd bacula-fd bacula-dir + +devclean: clean + @rm -f Makefile + @rm -f bacula-sd bacula-fd bacula-dir diff --git a/platforms/ubuntu/bacula-dir.in b/platforms/ubuntu/bacula-dir.in new file mode 100644 index 00000000..798d49e6 --- /dev/null +++ b/platforms/ubuntu/bacula-dir.in @@ -0,0 +1,87 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon on Debian/Ubuntu/Kubuntu +# systems. +# +# Kern E. Sibbald - 21 March 2008 +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +### BEGIN INIT INFO +# Provides: bacula-dir +# Required-Start: $local_fs $remote_fs $network $time +# Required-Stop: $local_fs $remote_fs $network $time +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Bacula Director +# Description: Bacula is a network backup and restore program +### END INIT INFO + + +NAME="bacula-dir" +DESC="Bacula Director" +DAEMON=@sbindir@/${NAME} +BUSER=@dir_user@ +BGROUP=@dir_group@ +BOPTIONS="-c @sysconfdir@/${NAME}.conf" +BPORT=@dir_port@ + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +test -f $DAEMON || exit 0 + +# +# Disable Glibc malloc checks, it doesn't help and it keeps from getting +# good dumps +MALLOC_CHECK_=0 +export MALLOC_CHECK_ + +if [ -n "`getent services ${NAME}`" ]; then + BPORT=`getent services ${NAME} | awk '{ gsub("/tcp","",$2); print $2; }'` +fi + +if [ -f /etc/default/$NAME ]; then + . /etc/default/$NAME +fi + +mkdir -p @piddir@ +PIDFILE=@piddir@/${NAME}.${BPORT}.pid + +if [ "x${BUSER}" != "x" ]; then + USERGRP="--chuid ${BUSER}" + if [ "x${BGROUP}" != "x" ]; then + USERGRP="${USERGRP}:${BGROUP}" + fi +fi + +RETVAL=0 +case "$1" in + start) + echo -n "Starting ${DESC}: " + start-stop-daemon --start --quiet --pidfile ${PIDFILE} ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + stop) + echo -n "Stopping ${DESC}: " + start-stop-daemon --oknodo --stop --quiet ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + restart|force-reload) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: /etc/init.d/${NAME} {start|stop|restart|force-reload}" >&2 + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/ubuntu/bacula-fd.in b/platforms/ubuntu/bacula-fd.in new file mode 100644 index 00000000..ebdb4bfe --- /dev/null +++ b/platforms/ubuntu/bacula-fd.in @@ -0,0 +1,87 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon on Debian/Ubuntu/Kubuntu +# systems. +# +# Kern E. Sibbald - 21 March 2008 +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +### BEGIN INIT INFO +# Provides: bacula-fd +# Required-Start: $local_fs $remote_fs $network $time +# Required-Stop: $local_fs $remote_fs $network $time +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Bacula File Daemon +# Description: Bacula is a network backup and restore program +### END INIT INFO + + +NAME="bacula-fd" +DESC="Bacula File Daemon" +DAEMON=@sbindir@/${NAME} +BUSER=@fd_user@ +BGROUP=@fd_group@ +BOPTIONS="-c @sysconfdir@/${NAME}.conf" +BPORT=@fd_port@ + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +test -f $DAEMON || exit 0 + +# +# Disable Glibc malloc checks, it doesn't help and it keeps from getting +# good dumps +MALLOC_CHECK_=0 +export MALLOC_CHECK_ + +if [ -n "`getent services ${NAME}`" ]; then + BPORT=`getent services ${NAME} | awk '{ gsub("/tcp","",$2); print $2; }'` +fi + +if [ -f /etc/default/$NAME ]; then + . /etc/default/$NAME +fi + +mkdir -p @piddir@ +PIDFILE=@piddir@/${NAME}.${BPORT}.pid + +if [ "x${BUSER}" != "x" ]; then + USERGRP="--chuid ${BUSER}" + if [ "x${BGROUP}" != "x" ]; then + USERGRP="${USERGRP}:${BGROUP}" + fi +fi + +RETVAL=0 +case "$1" in + start) + echo -n "Starting ${DESC}: " + start-stop-daemon --start --quiet --pidfile ${PIDFILE} ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + stop) + echo -n "Stopping ${DESC}: " + start-stop-daemon --oknodo --stop --quiet ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + restart|force-reload) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: /etc/init.d/${NAME} {start|stop|restart|force-reload}" >&2 + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/ubuntu/bacula-sd.in b/platforms/ubuntu/bacula-sd.in new file mode 100644 index 00000000..16763b7e --- /dev/null +++ b/platforms/ubuntu/bacula-sd.in @@ -0,0 +1,87 @@ +#! /bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula Director daemon on Debian/Ubuntu/Kubuntu +# systems. +# +# Kern E. Sibbald - 21 March 2008 +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +### BEGIN INIT INFO +# Provides: bacula-sdr +# Required-Start: $local_fs $remote_fs $network $time +# Required-Stop: $local_fs $remote_fs $network $time +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Bacula Storage Daemon +# Description: Bacula is a network backup and restore program +### END INIT INFO + + +NAME="bacula-sd" +DESC="Bacula Storage Daemon" +DAEMON=@sbindir@/${NAME} +BUSER=@sd_user@ +BGROUP=@sd_group@ +BOPTIONS="-c @sysconfdir@/${NAME}.conf" +BPORT=@sd_port@ + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +test -f $DAEMON || exit 0 + +# +# Disable Glibc malloc checks, it doesn't help and it keeps from getting +# good dumps +MALLOC_CHECK_=0 +export MALLOC_CHECK_ + +if [ -n "`getent services ${NAME}`" ]; then + BPORT=`getent services ${NAME} | awk '{ gsub("/tcp","",$2); print $2; }'` +fi + +if [ -f /etc/default/$NAME ]; then + . /etc/default/$NAME +fi + +mkdir -p @piddir@ +PIDFILE=@piddir@/${NAME}.${BPORT}.pid + +if [ "x${BUSER}" != "x" ]; then + USERGRP="--chuid ${BUSER}" + if [ "x${BGROUP}" != "x" ]; then + USERGRP="${USERGRP}:${BGROUP}" + fi +fi + +RETVAL=0 +case "$1" in + start) + echo -n "Starting ${DESC}: " + start-stop-daemon --start --quiet --pidfile ${PIDFILE} ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + stop) + echo -n "Stopping ${DESC}: " + start-stop-daemon --oknodo --stop --quiet ${USERGRP} --exec ${DAEMON} -- ${BOPTIONS} + RETVAL=$? + echo "${NAME}" + ;; + restart|force-reload) + $0 stop + sleep 5 + $0 start + RETVAL=$? + ;; + *) + echo "Usage: /etc/init.d/${NAME} {start|stop|restart|force-reload}" >&2 + exit 1 + ;; +esac +exit $RETVAL diff --git a/platforms/unknown/Makefile.in b/platforms/unknown/Makefile.in new file mode 100644 index 00000000..519527b3 --- /dev/null +++ b/platforms/unknown/Makefile.in @@ -0,0 +1,49 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file is used as the template to create the +# Makefile for the unknown specific installation. +# +# 15 November 2001 -- Kern Sibbald +# +# for Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ +# + +INSTALL = @INSTALL@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ + +install: install-autostart + +all: install-autostart + +install-autostart: install-autostart-FD install-autostart-SD install-autostart-DIR + + +install-autostart-FD: + + +install-autostart-SD: + + +install-autostart-DIR: + + +uninstall: uninstall-autostart + +uninstall-autostart: uninstall-autostart-FD uninstall-autostart-SD uninstall-autostart-DIR + +uninstall-autostart-FD: + + +uninstall-autostart-SD: + +uninstall-autostart-DIR: + +clean: + +distclean: + @rm -f bacula-SD bacula-FD bacula-DIR Makefile bacula-*.spec + +devclean: + @rm -f bacula-SD bacula-FD bacula-DIR Makefile bacula-*.spec diff --git a/po/ChangeLog b/po/ChangeLog new file mode 100644 index 00000000..e69de29b diff --git a/po/LINGUAS b/po/LINGUAS new file mode 100644 index 00000000..8d3a0ace --- /dev/null +++ b/po/LINGUAS @@ -0,0 +1,6 @@ +de +es +fr +nl +sv +uk diff --git a/po/Makefile.in.in b/po/Makefile.in.in new file mode 100644 index 00000000..61e4f526 --- /dev/null +++ b/po/Makefile.in.in @@ -0,0 +1,393 @@ +# Makefile for PO directory in any package using GNU gettext. +# Copyright (C) 1995-1997, 2000-2005 by Ulrich Drepper +# +# This file can be copied and used freely without restrictions. It can +# be used in projects which are not available under the GNU General Public +# License but which still want to provide support for the GNU gettext +# functionality. +# Please note that the actual code of GNU gettext is covered by the GNU +# General Public License and is *not* in the public domain. +# +# Origin: gettext-0.14.4 + +PACKAGE = bacula +VERSION = @VERSION@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ + +SHELL = /bin/sh +@SET_MAKE@ + +srcdir = @srcdir@ +top_srcdir = @top_srcdir@ +top_builddir = @top_builddir@ +VPATH = @srcdir@ + +prefix = @prefix@ +exec_prefix = @exec_prefix@ +datadir = @datadir@ +datarootdir = @datarootdir@ +localedir = $(datadir)/locale +gettextsrcdir = $(datadir)/gettext/po + +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +MKINSTALLDIRS = @MKINSTALLDIRS@ +mkinstalldirs = $(SHELL) $(MKINSTALLDIRS) + +GMSGFMT = @GMSGFMT@ +MSGFMT = @MSGFMT@ +XGETTEXT = @XGETTEXT@ +MSGMERGE = msgmerge +MSGMERGE_UPDATE = @MSGMERGE@ --update +MSGINIT = msginit +MSGCONV = msgconv +MSGFILTER = msgfilter + +POFILES = @POFILES@ +GMOFILES = @GMOFILES@ +UPDATEPOFILES = @UPDATEPOFILES@ +DUMMYPOFILES = @DUMMYPOFILES@ +DISTFILES.common = Makefile.in.in remove-potcdate.sin \ +$(DISTFILES.common.extra1) $(DISTFILES.common.extra2) $(DISTFILES.common.extra3) +DISTFILES = $(DISTFILES.common) Makevars POTFILES.in \ +$(POFILES) $(GMOFILES) \ +$(DISTFILES.extra1) $(DISTFILES.extra2) $(DISTFILES.extra3) + +POTFILES = \ + +CATALOGS = @CATALOGS@ + +# Makevars gets inserted here. (Don't remove this line!) + +.SUFFIXES: +.SUFFIXES: .po .gmo .mo .sed .sin .nop .po-create .po-update + +.po.mo: + @echo "$(MSGFMT) -c -o $@ $<"; \ + $(MSGFMT) -c -o t-$@ $< && mv t-$@ $@ + +.po.gmo: + @lang=`echo $* | sed -e 's,.*/,,'`; \ + test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ + echo "$${cdcmd}rm -f $${lang}.gmo && $(GMSGFMT) -c --statistics -o $${lang}.gmo $${lang}.po"; \ + cd $(srcdir) && rm -f $${lang}.gmo && $(GMSGFMT) -c --statistics -o t-$${lang}.gmo $${lang}.po && mv t-$${lang}.gmo $${lang}.gmo + +.sin.sed: + sed -e '/^#/d' $< > t-$@ + mv t-$@ $@ + + +all: all-@USE_NLS@ + +all-yes: stamp-po +all-no: + +# $(srcdir)/$(DOMAIN).pot is only created when needed. When xgettext finds no +# internationalized messages, no $(srcdir)/$(DOMAIN).pot is created (because +# we don't want to bother translators with empty POT files). We assume that +# LINGUAS is empty in this case, i.e. $(POFILES) and $(GMOFILES) are empty. +# In this case, stamp-po is a nop (i.e. a phony target). + +# stamp-po is a timestamp denoting the last time at which the CATALOGS have +# been loosely updated. Its purpose is that when a developer or translator +# checks out the package via CVS, and the $(DOMAIN).pot file is not in CVS, +# "make" will update the $(DOMAIN).pot and the $(CATALOGS), but subsequent +# invocations of "make" will do nothing. This timestamp would not be necessary +# if updating the $(CATALOGS) would always touch them; however, the rule for +# $(POFILES) has been designed to not touch files that don't need to be +# changed. +stamp-po: $(srcdir)/$(DOMAIN).pot + test ! -f $(srcdir)/$(DOMAIN).pot || \ + test -z "$(GMOFILES)" || $(MAKE) $(GMOFILES) + @test ! -f $(srcdir)/$(DOMAIN).pot || { \ + echo "touch stamp-po" && \ + echo timestamp > stamp-poT && \ + mv stamp-poT stamp-po; \ + } + +# Note: Target 'all' must not depend on target '$(DOMAIN).pot-update', +# otherwise packages like GCC can not be built if only parts of the source +# have been downloaded. + +# This target rebuilds $(DOMAIN).pot; it is an expensive operation. +# Note that $(DOMAIN).pot is not touched if it doesn't need to be changed. +$(DOMAIN).pot-update: $(POTFILES) $(srcdir)/POTFILES.in remove-potcdate.sed + if test -n '$(MSGID_BUGS_ADDRESS)' || test '$(PACKAGE_BUGREPORT)' = '@'PACKAGE_BUGREPORT'@'; then \ + msgid_bugs_address='$(MSGID_BUGS_ADDRESS)'; \ + else \ + msgid_bugs_address='$(PACKAGE_BUGREPORT)'; \ + fi; \ + $(XGETTEXT) --default-domain=$(DOMAIN) --directory=$(top_srcdir) \ + --add-comments=TRANSLATORS: $(XGETTEXT_OPTIONS) \ + --files-from=$(srcdir)/POTFILES.in \ + --copyright-holder='$(COPYRIGHT_HOLDER)' \ + --msgid-bugs-address="$$msgid_bugs_address" + test ! -f $(DOMAIN).po || { \ + if test -f $(srcdir)/$(DOMAIN).pot; then \ + sed -f remove-potcdate.sed < $(srcdir)/$(DOMAIN).pot > $(DOMAIN).1po && \ + sed -f remove-potcdate.sed < $(DOMAIN).po > $(DOMAIN).2po && \ + if cmp $(DOMAIN).1po $(DOMAIN).2po >/dev/null 2>&1; then \ + rm -f $(DOMAIN).1po $(DOMAIN).2po $(DOMAIN).po; \ + else \ + rm -f $(DOMAIN).1po $(DOMAIN).2po $(srcdir)/$(DOMAIN).pot && \ + mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ + fi; \ + else \ + mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ + fi; \ + } + +# This rule has no dependencies: we don't need to update $(DOMAIN).pot at +# every "make" invocation, only create it when it is missing. +# Only "make $(DOMAIN).pot-update" or "make dist" will force an update. +$(srcdir)/$(DOMAIN).pot: + $(MAKE) $(DOMAIN).pot-update + +# This target rebuilds a PO file if $(DOMAIN).pot has changed. +# Note that a PO file is not touched if it doesn't need to be changed. +$(POFILES): $(srcdir)/$(DOMAIN).pot + @lang=`echo $@ | sed -e 's,.*/,,' -e 's/\.po$$//'`; \ + if test -f "$(srcdir)/$${lang}.po"; then \ + test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ + echo "$${cdcmd}$(MSGMERGE_UPDATE) $${lang}.po $(DOMAIN).pot"; \ + cd $(srcdir) && $(MSGMERGE_UPDATE) $${lang}.po $(DOMAIN).pot; \ + else \ + $(MAKE) $${lang}.po-create; \ + fi + + +install: install-exec install-data +install-exec: +install-data: install-data-@USE_NLS@ + if test "$(PACKAGE)" = "gettext-tools"; then \ + $(mkinstalldirs) $(DESTDIR)$(gettextsrcdir); \ + for file in $(DISTFILES.common) Makevars.template; do \ + $(INSTALL_DATA) $(srcdir)/$$file \ + $(DESTDIR)$(gettextsrcdir)/$$file; \ + done; \ + for file in Makevars; do \ + rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ + done; \ + else \ + : ; \ + fi +install-data-no: all +install-data-yes: all + $(mkinstalldirs) $(DESTDIR)$(datadir) + @catalogs='$(CATALOGS)'; \ + for cat in $$catalogs; do \ + cat=`basename $$cat`; \ + lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ + dir=$(localedir)/$$lang/LC_MESSAGES; \ + $(mkinstalldirs) $(DESTDIR)$$dir; \ + if test -r $$cat; then realcat=$$cat; else realcat=$(srcdir)/$$cat; fi; \ + $(INSTALL_DATA) $$realcat $(DESTDIR)$$dir/$(DOMAIN).mo; \ + echo "installing $$realcat as $(DESTDIR)$$dir/$(DOMAIN).mo"; \ + for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ + if test -n "$$lc"; then \ + if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ + link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ + mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ + mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ + (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ + for file in *; do \ + if test -f $$file; then \ + ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ + fi; \ + done); \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ + else \ + if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ + :; \ + else \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ + mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ + fi; \ + fi; \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ + ln -s ../LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ + ln $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ + cp -p $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ + echo "installing $$realcat link as $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo"; \ + fi; \ + done; \ + done + +install-strip: install + +installdirs: installdirs-exec installdirs-data +installdirs-exec: +installdirs-data: installdirs-data-@USE_NLS@ + if test "$(PACKAGE)" = "gettext-tools"; then \ + $(mkinstalldirs) $(DESTDIR)$(gettextsrcdir); \ + else \ + : ; \ + fi +installdirs-data-no: +installdirs-data-yes: + $(mkinstalldirs) $(DESTDIR)$(datadir) + @catalogs='$(CATALOGS)'; \ + for cat in $$catalogs; do \ + cat=`basename $$cat`; \ + lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ + dir=$(localedir)/$$lang/LC_MESSAGES; \ + $(mkinstalldirs) $(DESTDIR)$$dir; \ + for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ + if test -n "$$lc"; then \ + if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ + link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ + mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ + mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ + (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ + for file in *; do \ + if test -f $$file; then \ + ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ + fi; \ + done); \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ + else \ + if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ + :; \ + else \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ + mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ + fi; \ + fi; \ + fi; \ + done; \ + done + +# Define this as empty until I found a useful application. +installcheck: + +uninstall: uninstall-exec uninstall-data +uninstall-exec: +uninstall-data: uninstall-data-@USE_NLS@ + if test "$(PACKAGE)" = "gettext-tools"; then \ + for file in $(DISTFILES.common) Makevars.template; do \ + rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ + done; \ + else \ + : ; \ + fi +uninstall-data-no: +uninstall-data-yes: + catalogs='$(CATALOGS)'; \ + for cat in $$catalogs; do \ + cat=`basename $$cat`; \ + lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ + for lc in LC_MESSAGES $(EXTRA_LOCALE_CATEGORIES); do \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ + done; \ + done + +check: all + +info dvi ps pdf html tags TAGS ctags CTAGS ID: + +mostlyclean: + rm -f remove-potcdate.sed + rm -f stamp-poT + rm -f core core.* $(DOMAIN).po $(DOMAIN).1po $(DOMAIN).2po *.new.po + rm -fr *.o + +clean: mostlyclean + +distclean: clean + rm -f Makefile Makefile.in POTFILES *.mo + +maintainer-clean: distclean + @echo "This command is intended for maintainers to use;" + @echo "it deletes files that may require special tools to rebuild." + rm -f stamp-po $(GMOFILES) + +distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir) +dist distdir: + $(MAKE) update-po + @$(MAKE) dist2 +# This is a separate target because 'update-po' must be executed before. +dist2: stamp-po $(DISTFILES) + dists="$(DISTFILES)"; \ + if test "$(PACKAGE)" = "gettext-tools"; then \ + dists="$$dists Makevars.template"; \ + fi; \ + if test -f $(srcdir)/$(DOMAIN).pot; then \ + dists="$$dists $(DOMAIN).pot stamp-po"; \ + fi; \ + if test -f $(srcdir)/ChangeLog; then \ + dists="$$dists ChangeLog"; \ + fi; \ + for i in 0 1 2 3 4 5 6 7 8 9; do \ + if test -f $(srcdir)/ChangeLog.$$i; then \ + dists="$$dists ChangeLog.$$i"; \ + fi; \ + done; \ + if test -f $(srcdir)/LINGUAS; then dists="$$dists LINGUAS"; fi; \ + for file in $$dists; do \ + if test -f $$file; then \ + cp -p $$file $(distdir) || exit 1; \ + else \ + cp -p $(srcdir)/$$file $(distdir) || exit 1; \ + fi; \ + done + +update-po: Makefile + $(MAKE) $(DOMAIN).pot-update + test -z "$(UPDATEPOFILES)" || $(MAKE) $(UPDATEPOFILES) + $(MAKE) update-gmo + +# General rule for creating PO files. + +.nop.po-create: + @lang=`echo $@ | sed -e 's/\.po-create$$//'`; \ + echo "File $$lang.po does not exist. If you are a translator, you can create it through 'msginit'." 1>&2; \ + exit 1 + +# General rule for updating PO files. + +.nop.po-update: + @lang=`echo $@ | sed -e 's/\.po-update$$//'`; \ + if test "$(PACKAGE)" = "gettext-tools"; then PATH=`pwd`/../src:$$PATH; fi; \ + tmpdir=`pwd`; \ + echo "$$lang:"; \ + test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ + echo "$${cdcmd}$(MSGMERGE) $$lang.po $(DOMAIN).pot -o $$lang.new.po"; \ + cd $(srcdir); \ + if $(MSGMERGE) $$lang.po $(DOMAIN).pot -o $$tmpdir/$$lang.new.po; then \ + if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \ + rm -f $$tmpdir/$$lang.new.po; \ + else \ + if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \ + :; \ + else \ + echo "msgmerge for $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \ + exit 1; \ + fi; \ + fi; \ + else \ + echo "msgmerge for $$lang.po failed!" 1>&2; \ + rm -f $$tmpdir/$$lang.new.po; \ + fi + +$(DUMMYPOFILES): + +update-gmo: Makefile $(GMOFILES) + @: + +Makefile: Makefile.in.in Makevars $(top_builddir)/config.status @POMAKEFILEDEPS@ + cd $(top_builddir) \ + && CONFIG_FILES=$(subdir)/$@.in CONFIG_HEADERS= \ + $(SHELL) ./config.status + +force: + +depend: + +gen-potfiles: + echo "# List of source files containing translatable strings." > POTFILES.in + echo "# To generate this file, type 'make gen-potfiles'" >> POTFILES.in + cd .. && git ls-files *.c *.h *.cpp >> po/POTFILES.in + +# Tell versions [3.59,3.63) of GNU make not to export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/po/Makevars b/po/Makevars new file mode 100644 index 00000000..eb97304b --- /dev/null +++ b/po/Makevars @@ -0,0 +1,41 @@ +# Makefile variables for PO directory in any package using GNU gettext. + +# Usually the message domain is the same as the package name. +DOMAIN = bacula + +# These two variables depend on the location of this directory. +subdir = po +top_builddir = .. + +# These options get passed to xgettext. +XGETTEXT_OPTIONS = --keyword=_ + +# This is the copyright holder that gets inserted into the header of the +# $(DOMAIN).pot file. Set this to the copyright holder of the surrounding +# package. (Note that the msgstr strings, extracted from the package's +# sources, belong to the copyright holder of the package.) Translators are +# expected to transfer the copyright for their translations to this person +# or entity, or to disclaim their copyright. The empty string stands for +# the public domain; in this case the translators are expected to disclaim +# their copyright. +COPYRIGHT_HOLDER = Kern Sibbald + +# This is the email address or URL to which the translators shall report +# bugs in the untranslated strings: +# - Strings which are not entire sentences, see the maintainer guidelines +# in the GNU gettext documentation, section 'Preparing Strings'. +# - Strings which use unclear terms or require additional context to be +# understood. +# - Strings which make invalid assumptions about notation of date, time or +# money. +# - Pluralisation problems. +# - Incorrect English spelling. +# - Incorrect formatting. +# It can be your email address, or a mailing list address where translators +# can write to without being subscribed, or the URL of a web page through +# which the translators can contact you. +MSGID_BUGS_ADDRESS = bacula-devel@lists.sourceforge.net + +# This is the list of locale categories, beyond LC_MESSAGES, for which the +# message catalogs shall be used. It is usually empty. +EXTRA_LOCALE_CATEGORIES = diff --git a/po/POTFILES.in b/po/POTFILES.in new file mode 100644 index 00000000..2a3ad625 --- /dev/null +++ b/po/POTFILES.in @@ -0,0 +1,334 @@ +# List of source files containing translatable strings. +# To generate this file, type 'make gen-potfiles' +autoconf/acconfig.h +autoconf/confdefs.h +examples/nagios/check_bacula/check_bacula.h +src/baconfig.h +src/bacula.h +src/bc_types.h +src/cats/bdb.h +src/cats/bdb_mysql.h +src/cats/bdb_postgresql.h +src/cats/bdb_sqlite.h +src/cats/bvfs.h +src/cats/cats.h +src/cats/protos.h +src/cats/sql_cmds.h +src/ch.h +src/console/conio.h +src/console/console_conf.h +src/console/func.h +src/dird/bsr.h +src/dird/dir_plugins.h +src/dird/dird.h +src/dird/dird_conf.h +src/dird/jobq.h +src/dird/protos.h +src/dird/ua.h +src/filed/backup.h +src/filed/bacl.h +src/filed/bacl_freebsd.h +src/filed/bacl_linux.h +src/filed/bacl_osx.h +src/filed/bacl_solaris.h +src/filed/bxattr.h +src/filed/bxattr_freebsd.h +src/filed/bxattr_linux.h +src/filed/bxattr_osx.h +src/filed/bxattr_solaris.h +src/filed/fd_plugins.h +src/filed/fd_snapshot.h +src/filed/filed.h +src/filed/filed_conf.h +src/filed/protos.h +src/filed/restore.h +src/fileopts.h +src/filetypes.h +src/findlib/bfile.h +src/findlib/find.h +src/findlib/namedpipe.h +src/findlib/protos.h +src/findlib/savecwd.h +src/findlib/win32filter.h +src/jcr.h +src/lib/address_conf.h +src/lib/alist.h +src/lib/attr.h +src/lib/base64.h +src/lib/berrno.h +src/lib/bget_msg.h +src/lib/bits.h +src/lib/bjson.h +src/lib/bmtio.h +src/lib/bpipe.h +src/lib/breg.h +src/lib/bregex.h +src/lib/bsock.h +src/lib/bsockcore.h +src/lib/btime.h +src/lib/btimers.h +src/lib/bwlimit.h +src/lib/cmd_parser.h +src/lib/crypto.h +src/lib/devlock.h +src/lib/dlist.h +src/lib/flist.h +src/lib/fnmatch.h +src/lib/guid_to_name.h +src/lib/htable.h +src/lib/ini.h +src/lib/lex.h +src/lib/lib.h +src/lib/lockmgr.h +src/lib/lz4.h +src/lib/lz4_encoder.h +src/lib/md5.h +src/lib/mem_pool.h +src/lib/message.h +src/lib/mutex_list.h +src/lib/openssl-compat.h +src/lib/openssl.h +src/lib/output.h +src/lib/parse_conf.h +src/lib/plugins.h +src/lib/protos.h +src/lib/queue.h +src/lib/rblist.h +src/lib/runscript.h +src/lib/rwlock.h +src/lib/sellist.h +src/lib/serial.h +src/lib/sha1.h +src/lib/sha2.h +src/lib/smartall.h +src/lib/status.h +src/lib/tcpd.h +src/lib/tls.h +src/lib/tree.h +src/lib/unittests.h +src/lib/var.h +src/lib/waitq.h +src/lib/watchdog.h +src/lib/worker.h +src/lib/workq.h +src/plugins/fd/fd_common.h +src/qt-console/bat.h +src/qt-console/bat_conf.cpp +src/qt-console/bat_conf.h +src/qt-console/bcomm/dircomm.cpp +src/qt-console/bcomm/dircomm.h +src/qt-console/bcomm/dircomm_auth.cpp +src/qt-console/clients/clients.cpp +src/qt-console/clients/clients.h +src/qt-console/console/console.cpp +src/qt-console/console/console.h +src/qt-console/fileset/fileset.cpp +src/qt-console/fileset/fileset.h +src/qt-console/help/help.cpp +src/qt-console/help/help.h +src/qt-console/job/job.cpp +src/qt-console/job/job.h +src/qt-console/jobgraphs/jobplot.cpp +src/qt-console/jobgraphs/jobplot.h +src/qt-console/joblist/joblist.cpp +src/qt-console/joblist/joblist.h +src/qt-console/joblog/joblog.cpp +src/qt-console/joblog/joblog.h +src/qt-console/jobs/jobs.cpp +src/qt-console/jobs/jobs.h +src/qt-console/label/label.cpp +src/qt-console/label/label.h +src/qt-console/main.cpp +src/qt-console/mainwin.cpp +src/qt-console/mainwin.h +src/qt-console/mediaedit/mediaedit.cpp +src/qt-console/mediaedit/mediaedit.h +src/qt-console/mediainfo/mediainfo.cpp +src/qt-console/mediainfo/mediainfo.h +src/qt-console/medialist/medialist.cpp +src/qt-console/medialist/medialist.h +src/qt-console/medialist/mediaview.cpp +src/qt-console/medialist/mediaview.h +src/qt-console/mount/mount.cpp +src/qt-console/mount/mount.h +src/qt-console/pages.cpp +src/qt-console/pages.h +src/qt-console/qstd.cpp +src/qt-console/qstd.h +src/qt-console/relabel/relabel.cpp +src/qt-console/relabel/relabel.h +src/qt-console/restore/brestore.cpp +src/qt-console/restore/prerestore.cpp +src/qt-console/restore/restore.cpp +src/qt-console/restore/restore.h +src/qt-console/restore/restoretree.cpp +src/qt-console/restore/restoretree.h +src/qt-console/run/estimate.cpp +src/qt-console/run/prune.cpp +src/qt-console/run/run.cpp +src/qt-console/run/run.h +src/qt-console/run/runcmd.cpp +src/qt-console/select/select.cpp +src/qt-console/select/select.h +src/qt-console/select/textinput.cpp +src/qt-console/select/textinput.h +src/qt-console/status/clientstat.cpp +src/qt-console/status/clientstat.h +src/qt-console/status/dirstat.cpp +src/qt-console/status/dirstat.h +src/qt-console/status/storstat.cpp +src/qt-console/status/storstat.h +src/qt-console/storage/content.cpp +src/qt-console/storage/content.h +src/qt-console/storage/storage.cpp +src/qt-console/storage/storage.h +src/qt-console/testprogs/examp/main.cpp +src/qt-console/testprogs/examp/mainwindow.cpp +src/qt-console/testprogs/examp/mainwindow.h +src/qt-console/testprogs/putz/main.cpp +src/qt-console/testprogs/putz/putz.cpp +src/qt-console/testprogs/putz/putz.h +src/qt-console/tray-monitor/authenticate.cpp +src/qt-console/tray-monitor/clientselectwizardpage.cpp +src/qt-console/tray-monitor/clientselectwizardpage.h +src/qt-console/tray-monitor/common.h +src/qt-console/tray-monitor/conf.cpp +src/qt-console/tray-monitor/conf.h +src/qt-console/tray-monitor/dirstatus.cpp +src/qt-console/tray-monitor/dirstatus.h +src/qt-console/tray-monitor/fdstatus.cpp +src/qt-console/tray-monitor/fdstatus.h +src/qt-console/tray-monitor/fileselectwizardpage.cpp +src/qt-console/tray-monitor/fileselectwizardpage.h +src/qt-console/tray-monitor/filesmodel.h +src/qt-console/tray-monitor/jobselectwizardpage.cpp +src/qt-console/tray-monitor/jobselectwizardpage.h +src/qt-console/tray-monitor/jobsmodel.cpp +src/qt-console/tray-monitor/jobsmodel.h +src/qt-console/tray-monitor/pluginmodel.h +src/qt-console/tray-monitor/pluginwizardpage.cpp +src/qt-console/tray-monitor/pluginwizardpage.h +src/qt-console/tray-monitor/restoreoptionswizardpage.cpp +src/qt-console/tray-monitor/restoreoptionswizardpage.h +src/qt-console/tray-monitor/restorewizard.cpp +src/qt-console/tray-monitor/restorewizard.h +src/qt-console/tray-monitor/runjob.cpp +src/qt-console/tray-monitor/runjob.h +src/qt-console/tray-monitor/sdstatus.cpp +src/qt-console/tray-monitor/sdstatus.h +src/qt-console/tray-monitor/status.cpp +src/qt-console/tray-monitor/status.h +src/qt-console/tray-monitor/task.cpp +src/qt-console/tray-monitor/task.h +src/qt-console/tray-monitor/tray-monitor.cpp +src/qt-console/tray-monitor/tray-monitor.h +src/qt-console/tray-monitor/tray-ui.h +src/qt-console/tray-monitor/tray_conf.cpp +src/qt-console/tray-monitor/tray_conf.h +src/qt-console/tray-monitor/win32/qplatformdefs.h +src/qt-console/util/comboutil.cpp +src/qt-console/util/comboutil.h +src/qt-console/util/fmtwidgetitem.cpp +src/qt-console/util/fmtwidgetitem.h +src/qt-console/win32/qplatformdefs.h +src/stored/aligned_dev.h +src/stored/block.h +src/stored/bsr.h +src/stored/cloud_dev.h +src/stored/cloud_driver.h +src/stored/cloud_parts.h +src/stored/cloud_transfer_mgr.h +src/stored/dev.h +src/stored/fifo_dev.h +src/stored/file_dev.h +src/stored/file_driver.h +src/stored/lock.h +src/stored/null_dev.h +src/stored/protos.h +src/stored/record.h +src/stored/reserve.h +src/stored/s3_driver.h +src/stored/sd_plugins.h +src/stored/stored.h +src/stored/stored_conf.h +src/stored/tape_alert_msgs.h +src/stored/tape_dev.h +src/stored/vol_mgr.h +src/stored/vtape_dev.h +src/stored/win_file_dev.h +src/stored/win_tape_dev.h +src/streams.h +src/version.h +src/win32/compat/alloca.h +src/win32/compat/arpa/inet.h +src/win32/compat/compat.cpp +src/win32/compat/compat.h +src/win32/compat/dirent.h +src/win32/compat/dlfcn.h +src/win32/compat/getopt.h +src/win32/compat/grp.h +src/win32/compat/mingwconfig.h +src/win32/compat/ms_atl.h +src/win32/compat/mswinver.h +src/win32/compat/netdb.h +src/win32/compat/netinet/in.h +src/win32/compat/netinet/tcp.h +src/win32/compat/print.cpp +src/win32/compat/pwd.h +src/win32/compat/stdint.h +src/win32/compat/strings.h +src/win32/compat/sys/file.h +src/win32/compat/sys/ioctl.h +src/win32/compat/sys/mtio.h +src/win32/compat/sys/socket.h +src/win32/compat/sys/stat.h +src/win32/compat/sys/time.h +src/win32/compat/sys/wait.h +src/win32/compat/syslog.h +src/win32/compat/unistd.h +src/win32/compat/winapi.h +src/win32/compat/winhdrs.h +src/win32/compat/winhost.h +src/win32/compat/winsock.h +src/win32/dird/main.cpp +src/win32/dird/service.cpp +src/win32/dird/who.h +src/win32/filed/main.cpp +src/win32/filed/plugins/api.h +src/win32/filed/plugins/comadmin.h +src/win32/filed/plugins/exch_api.h +src/win32/filed/plugins/exch_node.h +src/win32/filed/plugins/exchange-fd.h +src/win32/filed/plugins/node.h +src/win32/filed/service.cpp +src/win32/filed/trayMonitor.cpp +src/win32/filed/vss.cpp +src/win32/filed/vss.h +src/win32/filed/vss_Vista.cpp +src/win32/filed/vss_W2K3.cpp +src/win32/filed/vss_XP.cpp +src/win32/filed/vss_generic.cpp +src/win32/filed/who.h +src/win32/libwin32/aboutDialog.cpp +src/win32/libwin32/aboutDialog.h +src/win32/libwin32/main.cpp +src/win32/libwin32/protos.h +src/win32/libwin32/res.h +src/win32/libwin32/service.cpp +src/win32/libwin32/statusDialog.cpp +src/win32/libwin32/statusDialog.h +src/win32/libwin32/trayMonitor.cpp +src/win32/libwin32/trayMonitor.h +src/win32/libwin32/win32.h +src/win32/stored/main.cpp +src/win32/stored/mtops.cpp +src/win32/stored/postest/postest.cpp +src/win32/stored/service.cpp +src/win32/stored/trayMonitor.cpp +src/win32/stored/who.h +src/win32/stored/win_tape_device.cpp +src/win32/tools/ScsiDeviceList.cpp +src/win32/tools/ScsiDeviceList.h +src/win32/tools/scsilist.cpp +src/win32/winapi.h +src/win32/wx-console/w32api.h diff --git a/po/README b/po/README new file mode 100644 index 00000000..d6e9fb83 --- /dev/null +++ b/po/README @@ -0,0 +1,66 @@ + +Notes about Bacula translations. +-------------------------------- + +--- + +To edit .po files, I recommend poedit, downloadable at http://www.poedit.org/ . + +--- + +To refresh bacula.pot and *.po, when some strings have been added, modified or +removed from the sources files, run: + +# make update-po + +--- + +To refresh Bacula source files list (POTFILES.in), when a source file is +added or removed from the repository, run: + +# make gen-potfiles && make update-po + +Note, the gen-potfiles pulls in all files found under the top +directory, so it is much better to do something like: + +cd bacula +rm -rf x +svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula/trunk/bacula x +cd x +./configure +cd po +make gen-potfiles +cp POTFILES.in +cd .. +./configure + +This avoids picking up any files that are not in the SVN. + +--- + +To add a new translation language (e.g. German), add a new line to LINGUAS +containing the language code (e.g. de), then run: +# msginit -l (e.g. "# msginit -l de_DE") + +Open the newly created file in an editor (e.g. de.po), and look for this line: +"Project-Id-Version: ...\n" +If it still look like this: +"Project-Id-Version: PACKAGE VERSION\n" +Correct it to: +"Project-Id-Version: Bacula 1.38\n" + +You may also want to correct the language team to: +"Language-Team: German \n" + +Finally, add .mo (e.g. de.mo) to .cvsignore, and don't forget to +run: +# svn add .po .gmo + +--- + +For more information, see the gettext manual: +http://www.gnu.org/software/gettext/manual/ + +--- + +Nicolas Boichat , August 2005 diff --git a/po/Rules-quot b/po/Rules-quot new file mode 100644 index 00000000..9c2a995e --- /dev/null +++ b/po/Rules-quot @@ -0,0 +1,47 @@ +# Special Makefile rules for English message catalogs with quotation marks. + +DISTFILES.common.extra1 = quot.sed boldquot.sed en@quot.header en@boldquot.header insert-header.sin Rules-quot + +.SUFFIXES: .insert-header .po-update-en + +en@quot.po-create: + $(MAKE) en@quot.po-update +en@boldquot.po-create: + $(MAKE) en@boldquot.po-update + +en@quot.po-update: en@quot.po-update-en +en@boldquot.po-update: en@boldquot.po-update-en + +.insert-header.po-update-en: + @lang=`echo $@ | sed -e 's/\.po-update-en$$//'`; \ + if test "$(PACKAGE)" = "gettext"; then PATH=`pwd`/../src:$$PATH; GETTEXTLIBDIR=`cd $(top_srcdir)/src && pwd`; export GETTEXTLIBDIR; fi; \ + tmpdir=`pwd`; \ + echo "$$lang:"; \ + ll=`echo $$lang | sed -e 's/@.*//'`; \ + LC_ALL=C; export LC_ALL; \ + cd $(srcdir); \ + if $(MSGINIT) -i $(DOMAIN).pot --no-translator -l $$ll -o - 2>/dev/null | sed -f $$tmpdir/$$lang.insert-header | $(MSGCONV) -t UTF-8 | $(MSGFILTER) sed -f `echo $$lang | sed -e 's/.*@//'`.sed 2>/dev/null > $$tmpdir/$$lang.new.po; then \ + if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \ + rm -f $$tmpdir/$$lang.new.po; \ + else \ + if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \ + :; \ + else \ + echo "creation of $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \ + exit 1; \ + fi; \ + fi; \ + else \ + echo "creation of $$lang.po failed!" 1>&2; \ + rm -f $$tmpdir/$$lang.new.po; \ + fi + +en@quot.insert-header: insert-header.sin + sed -e '/^#/d' -e 's/HEADER/en@quot.header/g' $(srcdir)/insert-header.sin > en@quot.insert-header + +en@boldquot.insert-header: insert-header.sin + sed -e '/^#/d' -e 's/HEADER/en@boldquot.header/g' $(srcdir)/insert-header.sin > en@boldquot.insert-header + +mostlyclean: mostlyclean-quot +mostlyclean-quot: + rm -f *.insert-header diff --git a/po/bacula.pot b/po/bacula.pot new file mode 100644 index 00000000..35211cec --- /dev/null +++ b/po/bacula.pot @@ -0,0 +1,610 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR Kern Sibbald +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2018-08-11 21:43+0200\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=CHARSET\n" +"Content-Transfer-Encoding: 8bit\n" + +#: src/baconfig.h:62 src/baconfig.h:63 src/baconfig.h:68 src/baconfig.h:69 +#: src/baconfig.h:80 src/baconfig.h:81 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "" + +#: src/baconfig.h:89 +msgid "*None*" +msgstr "" + +#: src/lib/status.h:84 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" + +#: src/lib/status.h:91 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/lib/status.h:93 +msgid "===================================================================\n" +msgstr "" + +#: src/lib/status.h:119 +msgid "Created" +msgstr "" + +#: src/lib/status.h:123 +msgid "Error" +msgstr "" + +#: src/lib/status.h:126 +msgid "Diffs" +msgstr "" + +#: src/lib/status.h:129 +msgid "Cancel" +msgstr "" + +#: src/lib/status.h:132 +msgid "OK" +msgstr "" + +#: src/lib/status.h:135 +msgid "OK -- with warnings" +msgstr "" + +#: src/lib/status.h:138 +msgid "Incomplete" +msgstr "" + +#: src/lib/status.h:141 +msgid "Other" +msgstr "" + +#: src/lib/status.h:153 +#, c-format +msgid "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +msgstr "" + +#: src/lib/status.h:182 +#, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "" + +#: src/lib/status.h:214 src/lib/status.h:225 src/lib/status.h:239 +#: src/lib/status.h:243 src/lib/status.h:247 +msgid "Bacula " +msgstr "" + +#: src/qt-console/bat_conf.cpp:133 +#, c-format +msgid "No record for %d %s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:142 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:146 +#, c-format +msgid "Console: name=%s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:149 +#, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:153 src/qt-console/bat_conf.cpp:235 +#: src/qt-console/bat_conf.cpp:282 src/qt-console/bat_conf.cpp:312 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:259 +#: src/qt-console/tray-monitor/tray_conf.cpp:311 +#, c-format +msgid "\"%s\" directive is required in \"%s\" resource, but not found.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:88 +#, c-format +msgid "Already connected\"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:99 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:101 +#, c-format +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:153 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:176 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:198 +#: src/qt-console/tray-monitor/task.cpp:233 +msgid "Director daemon" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:236 +msgid "Initializing ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:252 src/qt-console/console/console.cpp:133 +msgid "Connected" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:377 +msgid "Command completed ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:384 src/qt-console/console/console.cpp:370 +msgid "Processing command ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:391 +msgid "At main prompt waiting for input ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:398 src/qt-console/bcomm/dircomm.cpp:408 +msgid "At prompt waiting for input ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:416 +msgid "Command failed." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:488 +msgid "Director disconnected." +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:110 +#, c-format +msgid "Director authorization problem at \"%s:%d\"\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:117 +#, c-format +msgid "" +"Authorization problem: Remote server at \"%s:%d\" did not advertise required " +"TLS support.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:125 +#, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\": Remote server requires " +"TLS.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:136 +#, c-format +msgid "TLS negotiation failed with Director at \"%s:%d\"\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:148 +#, c-format +msgid "" +"Bad response to Hello command: ERR=%s\n" +"The Director at \"%s:%d\" is probably not running.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:165 +#, c-format +msgid "Director at \"%s:%d\" rejected Hello command\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:182 +#, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\"\n" +"Most likely the passwords do not agree.\n" +"If you are using TLS, there may have been a certificate validation error " +"during the TLS handshake.\n" +"For help, please see " +msgstr "" + +#: src/qt-console/main.cpp:160 +msgid "Cryptography library initialization failed.\n" +msgstr "" + +#: src/qt-console/main.cpp:164 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "" + +#: src/qt-console/main.cpp:188 +#, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/qt-console/main.cpp:221 src/qt-console/main.cpp:251 +msgid "TLS required but not configured in Bacula.\n" +msgstr "" + +#: src/qt-console/main.cpp:229 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" + +#: src/qt-console/main.cpp:238 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/qt-console/main.cpp:259 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:86 +msgid "" +"Authorization problem.\n" +"Most likely the passwords do not agree.\n" +"For help, please see " +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:94 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:110 +msgid "TLS negotiation failed\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:117 +#, c-format +msgid "Bad response to Hello command: ERR=%s\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:134 +msgid "Daemon rejected Hello command\n" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:89 +msgid "The Name of the Monitor should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:129 +msgid "The name of the Resource should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:138 +#, c-format +msgid "The address of the Resource should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:147 +#, c-format +msgid "The Password of should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:171 +#, c-format +msgid "The TLS CA Certificate File should be a PEM file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:182 +#, c-format +msgid "The TLS CA Certificate Directory should be a directory for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:193 +#, c-format +msgid "The TLS Certificate File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:204 +#, c-format +msgid "The TLS Key File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:45 +msgid "This restricted console does not have access to Backup jobs" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:123 +msgid "Nothing selected" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:97 +msgid "Bandwidth can set only set on Client" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:102 +msgid "Bandwidth parameter is invalid" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:177 +msgid "Client daemon" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:205 +msgid "Storage daemon" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:45 +#, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: tray-monitor [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -W 0/1 force the detection of the systray\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:118 +msgid "TLS PassPhrase" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:164 +#, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one Monitor " +"resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-ui.h:105 +#, c-format +msgid "Failed to initialize TLS context for \"%s\".\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-ui.h:320 +msgid "Select a Director" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:172 +#, c-format +msgid "No %s resource defined\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:181 +#, c-format +msgid "Monitor: name=%s\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:184 +#, c-format +msgid "Director: name=%s address=%s port=%d\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:188 +#, c-format +msgid "Client: name=%s address=%s port=%d\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:192 +#, c-format +msgid "Storage: name=%s address=%s port=%d\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:196 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:284 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:318 +#, c-format +msgid "Too many directives in \"%s\" resource\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:338 +#: src/qt-console/tray-monitor/tray_conf.cpp:372 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "" + +#: src/win32/compat/compat.cpp:2879 +msgid "" +"\n" +"\n" +"Bacula ERROR: " +msgstr "" + +#: src/win32/filed/vss.cpp:244 src/win32/filed/vss.cpp:259 +#, c-format +msgid "pthread key create failed: ERR=%s\n" +msgstr "" + +#: src/win32/filed/vss.cpp:267 +#, c-format +msgid "pthread_setspecific failed: ERR=%s\n" +msgstr "" + +#: src/win32/filed/vss_generic.cpp:725 +#, c-format +msgid "Unable to find volume %ls in the device list\n" +msgstr "" + +#: src/win32/libwin32/main.cpp:227 +msgid "Bad Command Line Option" +msgstr "" + +#: src/win32/libwin32/service.cpp:98 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:99 +msgid "Failure contacting the Service Handler" +msgstr "" + +#: src/win32/libwin32/service.cpp:110 +msgid "Service start report failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:163 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/win32/libwin32/service.cpp:170 +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "" + +#: src/win32/libwin32/service.cpp:180 +msgid "Registry service not found: Bacula service not started" +msgstr "" + +#: src/win32/libwin32/service.cpp:182 +msgid "Registry service entry point not found" +msgstr "" + +#: src/win32/libwin32/service.cpp:204 +msgid "Report Service failure" +msgstr "" + +#: src/win32/libwin32/service.cpp:235 +msgid "Unable to install the service" +msgstr "" + +#: src/win32/libwin32/service.cpp:243 +msgid "Service command length too long" +msgstr "" + +#: src/win32/libwin32/service.cpp:244 +msgid "Service command length too long. Service not registered." +msgstr "" + +#: src/win32/libwin32/service.cpp:257 +msgid "" +"The Service Control Manager could not be contacted - the service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:280 src/win32/libwin32/service.cpp:309 +#: src/win32/libwin32/service.cpp:355 src/win32/libwin32/service.cpp:362 +#: src/win32/libwin32/service.cpp:366 +msgid "The Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:287 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/win32/libwin32/service.cpp:298 +msgid "Cannot write System Registry for " +msgstr "" + +#: src/win32/libwin32/service.cpp:299 +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:308 +msgid "Cannot add Bacula key to System Registry" +msgstr "" + +#: src/win32/libwin32/service.cpp:319 +msgid "The " +msgstr "" + +#: src/win32/libwin32/service.cpp:373 +msgid "An existing Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:381 +msgid "" +"The service Manager could not be contacted - the Bacula service was not " +"removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:394 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:401 +msgid "Could not delete Registry key for " +msgstr "" + +#: src/win32/libwin32/service.cpp:411 +msgid "Bacula could not be contacted, probably not running" +msgstr "" + +#: src/win32/libwin32/service.cpp:418 +msgid "The Bacula service has been removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:459 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:485 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" + +#: src/win32/libwin32/service.cpp:561 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:565 +#, c-format +msgid "No longer locked\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:569 +msgid "Could not lock database" +msgstr "" diff --git a/po/boldquot.sed b/po/boldquot.sed new file mode 100644 index 00000000..4b937aa5 --- /dev/null +++ b/po/boldquot.sed @@ -0,0 +1,10 @@ +s/"\([^"]*\)"/“\1”/g +s/`\([^`']*\)'/‘\1’/g +s/ '\([^`']*\)' / ‘\1’ /g +s/ '\([^`']*\)'$/ ‘\1’/g +s/^'\([^`']*\)' /‘\1’ /g +s/“”/""/g +s/“/“/g +s/”/”/g +s/‘/‘/g +s/’/’/g diff --git a/po/de.po b/po/de.po new file mode 100644 index 00000000..3e3df94e --- /dev/null +++ b/po/de.po @@ -0,0 +1,9259 @@ +# +# German translations for Bacula package +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +msgid "" +msgstr "" +"Project-Id-Version: de\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2018-08-11 21:43+0200\n" +"PO-Revision-Date: 2007-06-25 20:38+0200\n" +"Last-Translator: Kern Sibbald \n" +"Language-Team: \n" +"Language: German\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-1\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: src/baconfig.h:62 src/baconfig.h:63 src/baconfig.h:68 src/baconfig.h:69 +#: src/baconfig.h:80 src/baconfig.h:81 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "" + +#: src/baconfig.h:89 +msgid "*None*" +msgstr "" + +#: src/lib/status.h:84 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" + +#: src/lib/status.h:91 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/lib/status.h:93 +msgid "===================================================================\n" +msgstr "" + +#: src/lib/status.h:119 +msgid "Created" +msgstr "" + +#: src/lib/status.h:123 +msgid "Error" +msgstr "" + +#: src/lib/status.h:126 +msgid "Diffs" +msgstr "" + +#: src/lib/status.h:129 +msgid "Cancel" +msgstr "" + +#: src/lib/status.h:132 +msgid "OK" +msgstr "" + +#: src/lib/status.h:135 +#, fuzzy +msgid "OK -- with warnings" +msgstr "%s OK -- mit Warnungen" + +#: src/lib/status.h:138 +msgid "Incomplete" +msgstr "" + +#: src/lib/status.h:141 +msgid "Other" +msgstr "" + +#: src/lib/status.h:153 +#, fuzzy, c-format +msgid "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +msgstr "1000 OK: %s Version: %s (%s)\n" + +#: src/lib/status.h:182 +#, fuzzy, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "1000 OK: %s Version: %s (%s)\n" + +#: src/lib/status.h:214 src/lib/status.h:225 src/lib/status.h:239 +#: src/lib/status.h:243 src/lib/status.h:247 +msgid "Bacula " +msgstr "Bacula " + +#: src/qt-console/bat_conf.cpp:133 +#, c-format +msgid "No record for %d %s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:142 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:146 +#, c-format +msgid "Console: name=%s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:149 +#, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:153 src/qt-console/bat_conf.cpp:235 +#: src/qt-console/bat_conf.cpp:282 src/qt-console/bat_conf.cpp:312 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:259 +#: src/qt-console/tray-monitor/tray_conf.cpp:311 +#, fuzzy, c-format +msgid "\"%s\" directive is required in \"%s\" resource, but not found.\n" +msgstr "%s item wird in %s resource bentigt, wurde aber nicht gefunden.\n" + +#: src/qt-console/bcomm/dircomm.cpp:88 +#, c-format +msgid "Already connected\"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:99 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:101 +#, c-format +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:153 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:176 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:198 +#: src/qt-console/tray-monitor/task.cpp:233 +msgid "Director daemon" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:236 +msgid "Initializing ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:252 src/qt-console/console/console.cpp:133 +msgid "Connected" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:377 +msgid "Command completed ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:384 src/qt-console/console/console.cpp:370 +msgid "Processing command ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:391 +msgid "At main prompt waiting for input ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:398 src/qt-console/bcomm/dircomm.cpp:408 +msgid "At prompt waiting for input ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:416 +msgid "Command failed." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:488 +msgid "Director disconnected." +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:110 +#, fuzzy, c-format +msgid "Director authorization problem at \"%s:%d\"\n" +msgstr "Authorisationsproblem: FD an \"%s:%d\" erfordert TLS.\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:117 +#, fuzzy, c-format +msgid "" +"Authorization problem: Remote server at \"%s:%d\" did not advertise required " +"TLS support.\n" +msgstr "" +"Authorisationsproblem: Entfernter Server hat bentigte TLS Untersttzung " +"nicht angeboten.\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:125 +#, fuzzy, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\": Remote server requires " +"TLS.\n" +msgstr "Authorisationsproblem: Enfernter Server erfordert TLS.\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:136 +#, fuzzy, c-format +msgid "TLS negotiation failed with Director at \"%s:%d\"\n" +msgstr "TLS Aushandlung fehlgeschlagen mit SD an \"%s:%d\"\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:148 +#, c-format +msgid "" +"Bad response to Hello command: ERR=%s\n" +"The Director at \"%s:%d\" is probably not running.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:165 +#, fuzzy, c-format +msgid "Director at \"%s:%d\" rejected Hello command\n" +msgstr "File daemon auf \"%s:%d\" hat Hello Kommando abgelehnt\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:182 +#, fuzzy, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\"\n" +"Most likely the passwords do not agree.\n" +"If you are using TLS, there may have been a certificate validation error " +"during the TLS handshake.\n" +"For help, please see " +msgstr "" +"Kann mit File daemon on \"%s:%d\" nicht authentisieren. Mgliche Ursachen:\n" +"Passworte oder Namen nicht gleich oder\n" +"Maximum Concurrent Jobs berschritten auf dem FD oder\n" +"FD Netzwerk durcheinander (Daemon neustarten).\n" +"Fr Hilfe bitte unter http://www.bacula.org/rel-manual/faq." +"html#AuthorizationErrors nachsehen.\n" + +#: src/qt-console/main.cpp:160 +msgid "Cryptography library initialization failed.\n" +msgstr "Initialisierung der Verschlsselungsbibliothek fehlgeschlagen.\n" + +#: src/qt-console/main.cpp:164 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "Bitte die Konfigurationsdatei korrigieren: %s\n" + +#: src/qt-console/main.cpp:188 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +" -c setze Konfigurationsdatei auf Datei\n" +" -dnn setze debug level auf nn\n" +" -f starte im Vordergrund (fr debugging Zwecke)\n" +" -g groupid\n" +" -r starte jetzt\n" +" -s no signals\n" +" -t test - Konfiguration Lesen und beenden\n" +" -u userid\n" +" -v ausfhrliche Benutzer Meldungen\n" +" -? diese Meldung ausgeben.\n" +"\n" + +#: src/qt-console/main.cpp:221 src/qt-console/main.cpp:251 +msgid "TLS required but not configured in Bacula.\n" +msgstr "TLS bentigt aber nicht konfiguriert in Bacula.\n" + +#: src/qt-console/main.cpp:229 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" + +#: src/qt-console/main.cpp:238 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/qt-console/main.cpp:259 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:86 +#, fuzzy +msgid "" +"Authorization problem.\n" +"Most likely the passwords do not agree.\n" +"For help, please see " +msgstr "" +"Kann mit File daemon on \"%s:%d\" nicht authentisieren. Mgliche Ursachen:\n" +"Passworte oder Namen nicht gleich oder\n" +"Maximum Concurrent Jobs berschritten auf dem FD oder\n" +"FD Netzwerk durcheinander (Daemon neustarten).\n" +"Fr Hilfe bitte unter http://www.bacula.org/rel-manual/faq." +"html#AuthorizationErrors nachsehen.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:94 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" +"Authorisationsproblem: Entfernter Server hat bentigte TLS Untersttzung " +"nicht angeboten.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:110 +#, fuzzy +msgid "TLS negotiation failed\n" +msgstr "TLS Aushandlung gescheitert.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:117 +#, c-format +msgid "Bad response to Hello command: ERR=%s\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:134 +#, fuzzy +msgid "Daemon rejected Hello command\n" +msgstr "File daemon hat Hello Kommando abgelehnt\n" + +#: src/qt-console/tray-monitor/conf.cpp:89 +msgid "The Name of the Monitor should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:129 +msgid "The name of the Resource should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:138 +#, c-format +msgid "The address of the Resource should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:147 +#, c-format +msgid "The Password of should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:171 +#, fuzzy, c-format +msgid "The TLS CA Certificate File should be a PEM file for resource %s" +msgstr "\"TLS Certificate\" Datei nicht fr Director definiert \"%s\" in %s.\n" + +#: src/qt-console/tray-monitor/conf.cpp:182 +#, c-format +msgid "The TLS CA Certificate Directory should be a directory for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:193 +#, fuzzy, c-format +msgid "The TLS Certificate File should be a file for resource %s" +msgstr "\"TLS Certificate\" Datei nicht fr Director definiert \"%s\" in %s.\n" + +#: src/qt-console/tray-monitor/conf.cpp:204 +#, c-format +msgid "The TLS Key File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:45 +msgid "This restricted console does not have access to Backup jobs" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:123 +#, fuzzy +msgid "Nothing selected" +msgstr "Kein storage angegeben.\n" + +#: src/qt-console/tray-monitor/task.cpp:97 +msgid "Bandwidth can set only set on Client" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:102 +msgid "Bandwidth parameter is invalid" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:177 +#, fuzzy +msgid "Client daemon" +msgstr "File daemon" + +#: src/qt-console/tray-monitor/task.cpp:205 +msgid "Storage daemon" +msgstr "Storage daemon" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:45 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: tray-monitor [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -W 0/1 force the detection of the systray\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +" -c setze Konfigurationsdatei auf Datei\n" +" -dnn setze debug level auf nn\n" +" -f starte im Vordergrund (fr debugging Zwecke)\n" +" -g groupid\n" +" -r starte jetzt\n" +" -s no signals\n" +" -t test - Konfiguration Lesen und beenden\n" +" -u userid\n" +" -v ausfhrliche Benutzer Meldungen\n" +" -? diese Meldung ausgeben.\n" +"\n" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:118 +msgid "TLS PassPhrase" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:164 +#, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one Monitor " +"resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-ui.h:105 +#, fuzzy, c-format +msgid "Failed to initialize TLS context for \"%s\".\n" +msgstr "Konnte TLS context fr Storage \"%s\" in %s nicht initialisieren.\n" + +#: src/qt-console/tray-monitor/tray-ui.h:320 +#, fuzzy +msgid "Select a Director" +msgstr "Pool Ressource" + +#: src/qt-console/tray-monitor/tray_conf.cpp:172 +#, c-format +msgid "No %s resource defined\n" +msgstr "Keine %s resource definiert\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:181 +#, fuzzy, c-format +msgid "Monitor: name=%s\n" +msgstr "FileSet: name=%s\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:184 +#, fuzzy, c-format +msgid "Director: name=%s address=%s port=%d\n" +msgstr "Client: name=%s address=%s FDport=%d MaxJobs=%u\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:188 +#, fuzzy, c-format +msgid "Client: name=%s address=%s port=%d\n" +msgstr "Client: name=%s address=%s FDport=%d MaxJobs=%u\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:192 +#, fuzzy, c-format +msgid "Storage: name=%s address=%s port=%d\n" +msgstr "Client: name=%s address=%s FDport=%d MaxJobs=%u\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:196 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "Unbekannter resource type %d in dump_resource.\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:284 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "Unbekannter resource type %d in free_resource.\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:318 +#, fuzzy, c-format +msgid "Too many directives in \"%s\" resource\n" +msgstr "zu viele items in %s resource\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:338 +#: src/qt-console/tray-monitor/tray_conf.cpp:372 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "Unbekannter resource type %d in save_resource.\n" + +#: src/win32/compat/compat.cpp:2879 +#, fuzzy +msgid "" +"\n" +"\n" +"Bacula ERROR: " +msgstr "Bacula " + +#: src/win32/filed/vss.cpp:244 src/win32/filed/vss.cpp:259 +#, fuzzy, c-format +msgid "pthread key create failed: ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/win32/filed/vss.cpp:267 +#, fuzzy, c-format +msgid "pthread_setspecific failed: ERR=%s\n" +msgstr "Media id select fehlgeschlagen: ERR=%s\n" + +#: src/win32/filed/vss_generic.cpp:725 +#, fuzzy, c-format +msgid "Unable to find volume %ls in the device list\n" +msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#: src/win32/libwin32/main.cpp:227 +msgid "Bad Command Line Option" +msgstr "" + +#: src/win32/libwin32/service.cpp:98 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:99 +msgid "Failure contacting the Service Handler" +msgstr "" + +#: src/win32/libwin32/service.cpp:110 +msgid "Service start report failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:163 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/win32/libwin32/service.cpp:170 +#, fuzzy +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#: src/win32/libwin32/service.cpp:180 +#, fuzzy +msgid "Registry service not found: Bacula service not started" +msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#: src/win32/libwin32/service.cpp:182 +#, fuzzy +msgid "Registry service entry point not found" +msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#: src/win32/libwin32/service.cpp:204 +msgid "Report Service failure" +msgstr "" + +#: src/win32/libwin32/service.cpp:235 +#, fuzzy +msgid "Unable to install the service" +msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#: src/win32/libwin32/service.cpp:243 +#, fuzzy +msgid "Service command length too long" +msgstr "Job nicht gefunden: %s\n" + +#: src/win32/libwin32/service.cpp:244 +#, fuzzy +msgid "Service command length too long. Service not registered." +msgstr "Job nicht gefunden: %s\n" + +#: src/win32/libwin32/service.cpp:257 +msgid "" +"The Service Control Manager could not be contacted - the service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:280 src/win32/libwin32/service.cpp:309 +#: src/win32/libwin32/service.cpp:355 src/win32/libwin32/service.cpp:362 +#: src/win32/libwin32/service.cpp:366 +msgid "The Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:287 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/win32/libwin32/service.cpp:298 +#, fuzzy +msgid "Cannot write System Registry for " +msgstr "Kann Storage resource %s nicht finden\n" + +#: src/win32/libwin32/service.cpp:299 +#, fuzzy +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#: src/win32/libwin32/service.cpp:308 +msgid "Cannot add Bacula key to System Registry" +msgstr "" + +#: src/win32/libwin32/service.cpp:319 +msgid "The " +msgstr "" + +#: src/win32/libwin32/service.cpp:373 +msgid "An existing Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:381 +#, fuzzy +msgid "" +"The service Manager could not be contacted - the Bacula service was not " +"removed" +msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#: src/win32/libwin32/service.cpp:394 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:401 +#, fuzzy +msgid "Could not delete Registry key for " +msgstr "Kann Storage resource %s nicht finden\n" + +#: src/win32/libwin32/service.cpp:411 +msgid "Bacula could not be contacted, probably not running" +msgstr "" + +#: src/win32/libwin32/service.cpp:418 +msgid "The Bacula service has been removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:459 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:485 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" + +#: src/win32/libwin32/service.cpp:561 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:565 +#, c-format +msgid "No longer locked\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:569 +#, fuzzy +msgid "Could not lock database" +msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#~ msgid "Query failed: %s: ERR=%s\n" +#~ msgstr "Abfrage gescheitert: %s: ERR=%s\n" + +#~ msgid "A user name for MySQL must be supplied.\n" +#~ msgstr "Ein Benutzername fr MySQL muss angegeben werden.\n" + +#~ msgid "Unable to initialize DB lock. ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to MySQL server.\n" +#~ "Database=%s User=%s\n" +#~ "MySQL connect failed either server not running or your authorization is " +#~ "incorrect.\n" +#~ msgstr "" +#~ "Kann Verbindung zu MySQL Server nicht aufbauen. \n" +#~ "Datenbank=%s Benutzer=%s\n" +#~ "Der Datenbankserver luft mglicherweise nicht oder das Passwort ist " +#~ "nicht korrekt.\n" + +#~ msgid "Attribute create error. %s" +#~ msgstr "Attribute create error. %s" + +#~ msgid "A user name for PostgreSQL must be supplied.\n" +#~ msgstr "Ein Benutzername fr PostgreSQL muss angegeben werden.\n" + +#~ msgid "error fetching row: %s\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to PostgreSQL server. Database=%s User=%s\n" +#~ "Possible causes: SQL server not running; password incorrect; " +#~ "max_connections exceeded.\n" +#~ msgstr "" +#~ "Kann Verbindung zu PostgreSQL Server nicht aufbauen.\n" +#~ "Datenbank=%s Benutzer=%s\n" +#~ "Der Datenbankserver luft mglicherweise nicht oder das Passwort ist " +#~ "nicht korrekt.\n" + +#, fuzzy +#~ msgid "Fetch failed: ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#~ msgid "error fetching currval: %s\n" +#~ msgstr "Fehler beim Holen des aktuellen Wertes: %s\n" + +#, fuzzy +#~ msgid "error starting batch mode: %s" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "error ending batch mode: %s" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "error copying in batch mode: %s" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#~ msgid "" +#~ "query %s failed:\n" +#~ "%s\n" +#~ msgstr "" +#~ "Abfrage %s gescheitert:\n" +#~ "%s\n" + +#~ msgid "" +#~ "insert %s failed:\n" +#~ "%s\n" +#~ msgstr "" +#~ "einfgen %s gescheitert:\n" +#~ "%s\n" + +#~ msgid "Insertion problem: affected_rows=%s\n" +#~ msgstr "Problem beim Einfgen: affected_rows=%s\n" + +#~ msgid "" +#~ "update %s failed:\n" +#~ "%s\n" +#~ msgstr "" +#~ "Aktualisierung %s gescheitert:\n" +#~ "%s\n" + +#, fuzzy +#~ msgid "Update failed: affected_rows=%s for %s\n" +#~ msgstr "Aktualisierungsproblem: affected_rows=%s\n" + +#~ msgid "" +#~ "delete %s failed:\n" +#~ "%s\n" +#~ msgstr "" +#~ "lschen von %s fehlgeschlagen:\n" +#~ "%s\n" + +#~ msgid "Path length is zero. File=%s\n" +#~ msgstr "Pfadlnge ist null. Datei=%s\n" + +#~ msgid "No results to list.\n" +#~ msgstr "Keine Ergebnisse zu \"list\" Kommando.\n" + +#, fuzzy +#~ msgid "Could not init database batch connection\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Could not open database \"%s\": ERR=%s\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#~ msgid "Create DB Job record %s failed. ERR=%s\n" +#~ msgstr "Erzeugung von DB Job Eintrag %s fehlgeschlagen. ERR=%s\n" + +#~ msgid "Create JobMedia record %s failed: ERR=%s\n" +#~ msgstr "Erzeugung von JobMedia Eintrag %s fehlgeschlagen: ERR=%s\n" + +#~ msgid "Update Media record %s failed: ERR=%s\n" +#~ msgstr "Aktualisierung von Media Eintrag %s fehlgeschlagen: ERR=%s\n" + +#~ msgid "pool record %s already exists\n" +#~ msgstr "pool Eintrag %s bereits vorhanden\n" + +#~ msgid "Create db Pool record %s failed: ERR=%s\n" +#~ msgstr "Erzeugung von db Pool Eintrag %s fehlgeschlagen: ERR=%s\n" + +#~ msgid "Device record %s already exists\n" +#~ msgstr "Device Eintrag %s bereits vorhanden\n" + +#~ msgid "Create db Device record %s failed: ERR=%s\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#~ msgid "More than one Storage record!: %d\n" +#~ msgstr "Mehr als ein Storage Eintrag!: %d\n" + +#~ msgid "error fetching Storage row: %s\n" +#~ msgstr "Fehler beim holen der Storage Zeile: %s\n" + +#~ msgid "Create DB Storage record %s failed. ERR=%s\n" +#~ msgstr "Erzeugung des DB Storage Eintrags %s fehlgeschlagen. ERR=%s\n" + +#~ msgid "mediatype record %s already exists\n" +#~ msgstr "Medientyp Eintrag %s bereits vorhanden\n" + +#~ msgid "Create db mediatype record %s failed: ERR=%s\n" +#~ msgstr "Erzeuge db Medientyp Eintrag %s fehlgeschlagen: ERR=%s\n" + +#~ msgid "Volume \"%s\" already exists.\n" +#~ msgstr "Volume \"%s\" bereits vorhanden.\n" + +#~ msgid "Create DB Media record %s failed. ERR=%s\n" +#~ msgstr "Erzeugung DB Media Eintrag %s fehlgeschlagen. ERR=%s\n" + +#~ msgid "More than one Client!: %d\n" +#~ msgstr "Mehr als ein Client!: %d\n" + +#~ msgid "error fetching Client row: %s\n" +#~ msgstr "Fehler beim holen der Client Zeile: %s\n" + +#~ msgid "Create DB Client record %s failed. ERR=%s\n" +#~ msgstr "Erzeugung des DB Client Eintrags %s fehlgeschlagen. ERR=%s\n" + +#~ msgid "More than one Path!: %s for path: %s\n" +#~ msgstr "Mehr als ein Pfad!: %s fr Pfad: %s\n" + +#~ msgid "Create db Path record %s failed. ERR=%s\n" +#~ msgstr "Erzeugung des db Path Eintrags %s fehlgeschlagen. ERR=%s\n" + +#~ msgid "Create DB Counters record %s failed. ERR=%s\n" +#~ msgstr "Erzeugung des DB Counters Eintrag %s fehlgeschlagen. ERR=%s\n" + +#~ msgid "More than one FileSet!: %d\n" +#~ msgstr "Mehr als ein FileSet!: %d\n" + +#~ msgid "error fetching FileSet row: ERR=%s\n" +#~ msgstr "Fehler beim holen der FileSet Zeile: ERR=%s\n" + +#~ msgid "Create DB FileSet record %s failed. ERR=%s\n" +#~ msgstr "Erzeugung des DB FileSet Eintrags %s fehlgeschlagen. ERR=%s\n" + +#~ msgid "Create db File record %s failed. ERR=%s" +#~ msgstr "Erzeugung des db File Eintrags %s fehlgeschlagen. ERR=%s" + +#~ msgid "More than one Filename! %s for file: %s\n" +#~ msgstr "Mehr als ein Dateiname! %s fr Datei: %s\n" + +#~ msgid "Error fetching row for file=%s: ERR=%s\n" +#~ msgstr "Fehler beim Holen der Zeile fr Datei=%s: ERR=%s\n" + +#~ msgid "Create db Filename record %s failed. ERR=%s\n" +#~ msgstr "Erzeugung des db Filename Eintrags %s fehlgeschlagen. ERR=%s\n" + +#~ msgid "Attempt to put non-attributes into catalog. Stream=%d\n" +#~ msgstr "Versuche \"non-attributes\" in catalog einzufgen. Stream=%d\n" + +#, fuzzy +#~ msgid "Create db Object record %s failed. ERR=%s" +#~ msgstr "Erzeugung des db File Eintrags %s fehlgeschlagen. ERR=%s" + +#~ msgid "No pool record %s exists\n" +#~ msgstr "Kein pool Eintrag %s vorhanden\n" + +#~ msgid "Expecting one pool record, got %d\n" +#~ msgstr "Erwartete einen \"pool\" Eintrag, erhielt %d\n" + +#~ msgid "Error fetching row %s\n" +#~ msgstr "Fehler beim Holen der Zeile %s\n" + +#, fuzzy +#~ msgid "" +#~ "Query error for end time request: ERR=%s\n" +#~ "CMD=%s\n" +#~ msgstr "" +#~ "Fehler bei Abfrage von Startzeit: ERR=%s\n" +#~ "CMD=%s\n" + +#, fuzzy +#~ msgid "No prior backup Job record found.\n" +#~ msgstr "Keinen vorherigen \"Full backup\" Job Eintrag gefunden.\n" + +#~ msgid "" +#~ "Query error for start time request: ERR=%s\n" +#~ "CMD=%s\n" +#~ msgstr "" +#~ "Fehler bei Abfrage von Startzeit: ERR=%s\n" +#~ "CMD=%s\n" + +#~ msgid "No prior Full backup Job record found.\n" +#~ msgstr "Keinen vorherigen \"Full backup\" Job Eintrag gefunden.\n" + +#~ msgid "Unknown level=%d\n" +#~ msgstr "Unbekannter level=%d\n" + +#~ msgid "" +#~ "No Job record found: ERR=%s\n" +#~ "CMD=%s\n" +#~ msgstr "" +#~ "Kein \"Job\" Eintrag gefunden: ERR=%s\n" +#~ "CMD=%s\n" + +#~ msgid "Unknown Job level=%d\n" +#~ msgstr "Unbekannter Job level=%d\n" + +#~ msgid "No Job found for: %s.\n" +#~ msgstr "Kein Job gefunden fr: %s.\n" + +#~ msgid "No Job found for: %s\n" +#~ msgstr "Keinen Job gefunden fr: %s\n" + +#~ msgid "Request for Volume item %d greater than max %d or less than 1\n" +#~ msgstr "" +#~ "Anforderung von \"Volume item\" %d grer als Max %d oder weniger als 1\n" + +#, fuzzy +#~ msgid "No Volume record found for item %d.\n" +#~ msgstr "Kein Volume Eintrag gefunden fr item %d.\n" + +#~ msgid "Error fetching row: %s\n" +#~ msgstr "Fehler beim holen der Zeile: %s\n" + +#, fuzzy +#~ msgid "get_file_record want 1 got rows=%d PathId=%s FilenameId=%s\n" +#~ msgstr "get_file_record erwartet 1 erhalten rows=%d\n" + +#, fuzzy +#~ msgid "File record for PathId=%s FilenameId=%s not found.\n" +#~ msgstr "File Eintrag fr PathId=%s FilenameId=%s nicht gefunden.\n" + +#~ msgid "File record not found in Catalog.\n" +#~ msgstr "File Eintrag nicht im Catalog gefunden.\n" + +#~ msgid "More than one Filename!: %s for file: %s\n" +#~ msgstr "Mehr als einen Dateinamen gefunden! : %s fr Datei: %s\n" + +#, fuzzy +#~ msgid "Get DB Filename record %s found bad record: %d\n" +#~ msgstr "Get DB Filename record %s hat fehlerhaften Eintrag gefunden: %d\n" + +#~ msgid "Filename record: %s not found.\n" +#~ msgstr "Filename record: %s nicht gefunden.\n" + +#~ msgid "Filename record: %s not found in Catalog.\n" +#~ msgstr "Filename record: %s nicht in Catalog gefunden.\n" + +#~ msgid "Get DB path record %s found bad record: %s\n" +#~ msgstr "Get DB path record %s fehlerhaften Eintrag gefunden: %s\n" + +#~ msgid "Path record: %s not found.\n" +#~ msgstr "Path record: %s nicht gefunden.\n" + +#~ msgid "Path record: %s not found in Catalog.\n" +#~ msgstr "Path record: %s nicht in Catalog gefunden.\n" + +#~ msgid "No Job found for JobId %s\n" +#~ msgstr "Kein Job fr JobId %s gefunden\n" + +#~ msgid "No volumes found for JobId=%d\n" +#~ msgstr "Keine volumes fr JobId=%d gefunden\n" + +#~ msgid "Error fetching row %d: ERR=%s\n" +#~ msgstr "Fehler beim Holen von Zeile %d: ERR=%s\n" + +#~ msgid "No Volume for JobId %d found in Catalog.\n" +#~ msgstr "Kein Volume fr JobId %d in Catalog gefunden.\n" + +#~ msgid "Pool id select failed: ERR=%s\n" +#~ msgstr "Pool id select fehlgeschlagen: ERR=%s\n" + +#~ msgid "Client id select failed: ERR=%s\n" +#~ msgstr "Client id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "More than one Pool! Num=%s\n" +#~ msgstr "Mehr als ein Pool!: %s\n" + +#~ msgid "Pool record not found in Catalog.\n" +#~ msgstr "Pool Eintrag in Catalog nicht gefunden.\n" + +#, fuzzy +#~ msgid "Error got %s RestoreObjects but expected only one!\n" +#~ msgstr "Fehler erhalten %s FileSets aber nur einen erwartet!\n" + +#, fuzzy +#~ msgid "RestoreObject record \"%d\" not found.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "RestoreObject record not found in Catalog.\n" +#~ msgstr "FileSet Eintrag in Catalog nicht gefunden.\n" + +#~ msgid "More than one Client!: %s\n" +#~ msgstr "Mehr als ein Client!: %s\n" + +#~ msgid "Client record not found in Catalog.\n" +#~ msgstr "Client Eintrag nicht in Catalog gefunden\n" + +#~ msgid "More than one Counter!: %d\n" +#~ msgstr "Mehr als ein Counter!: %d\n" + +#~ msgid "error fetching Counter row: %s\n" +#~ msgstr "Fehler beim Holen der Counter Zeile: %s\n" + +#~ msgid "Counter record: %s not found in Catalog.\n" +#~ msgstr "Counter Eintrag: %s in Catalog nicht gefunden.\n" + +#~ msgid "Error got %s FileSets but expected only one!\n" +#~ msgstr "Fehler erhalten %s FileSets aber nur einen erwartet!\n" + +#~ msgid "FileSet record \"%s\" not found.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#~ msgid "FileSet record not found in Catalog.\n" +#~ msgstr "FileSet Eintrag in Catalog nicht gefunden.\n" + +#~ msgid "Media id select failed: ERR=%s\n" +#~ msgstr "Media id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "query dbids failed: ERR=%s\n" +#~ msgstr "Abfrage gescheitert: %s: ERR=%s\n" + +#~ msgid "More than one Volume!: %s\n" +#~ msgstr "Mehr als ein Volume!: %s\n" + +#, fuzzy +#~ msgid "Media record with MediaId=%s not found.\n" +#~ msgstr "Media Eintrag MediaId=%s nicht gefunden.\n" + +#, fuzzy +#~ msgid "Media record for Volume name \"%s\" not found.\n" +#~ msgstr "Media Eintrag fr Volume \"%s\" nicht gefunden.\n" + +#~ msgid "Media record for MediaId=%u not found in Catalog.\n" +#~ msgstr "Media Eintrag fr MediaId=%u in Catalog nicht gefunden.\n" + +#, fuzzy +#~ msgid "Media record for Volume Name \"%s\" not found in Catalog.\n" +#~ msgstr "Media Eintrag fr Vol=%s in Catalog nicht gefunden.\n" + +#, fuzzy +#~ msgid "More than one Snapshot!: %s\n" +#~ msgstr "Mehr als ein Client!: %s\n" + +#, fuzzy +#~ msgid "Snapshot record with SnapshotId=%s not found.\n" +#~ msgstr "Media Eintrag MediaId=%s nicht gefunden.\n" + +#, fuzzy +#~ msgid "Snapshot record for Snapshot name \"%s\" not found.\n" +#~ msgstr "Media Eintrag fr Volume \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "More than one Result!: %s\n" +#~ msgstr "Mehr als ein Client!: %s\n" + +#~ msgid "Query failed: %s\n" +#~ msgstr "Abfrage fehlgeschlagen: %s\n" + +#~ msgid "Database %s does not exist, please create it.\n" +#~ msgstr "Datenbank %s existiert nicht, bitte erzeugen.\n" + +#~ msgid "Unable to open Database=%s. ERR=%s\n" +#~ msgstr "Kann Datenbank=%s.nicht ffen. ERR=%s\n" + +#~ msgid "unknown" +#~ msgstr "unbekannt" + +#~ msgid "Authorization problem: Remote server requires TLS.\n" +#~ msgstr "Authorisationsproblem: Enfernter Server erfordert TLS.\n" + +#, fuzzy +#~ msgid "Director rejected Hello command\n" +#~ msgstr "File daemon hat Hello Kommando abgelehnt\n" + +#, fuzzy +#~ msgid "" +#~ "Director authorization problem.\n" +#~ "Most likely the passwords do not agree.\n" +#~ "If you are using TLS, there may have been a certificate validation error " +#~ "during the TLS handshake.\n" +#~ "For help, please see " +#~ msgstr "" +#~ "Kann mit File daemon on \"%s:%d\" nicht authentisieren. Mgliche " +#~ "Ursachen:\n" +#~ "Passworte oder Namen nicht gleich oder\n" +#~ "Maximum Concurrent Jobs berschritten auf dem FD oder\n" +#~ "FD Netzwerk durcheinander (Daemon neustarten).\n" +#~ "Fr Hilfe bitte unter http://www.bacula.org/rel-manual/faq." +#~ "html#AuthorizationErrors nachsehen.\n" + +#, fuzzy +#~ msgid "Please use valid -l argument: %s\n" +#~ msgstr "FileSet: name=%s\n" + +#, fuzzy +#~ msgid "sleep specified time" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Illegal separator character.\n" +#~ msgstr "Illegales Zeichen in \"Volume name\" \"%s\"\n" + +#, fuzzy +#~ msgid "Can't find %s in Director list\n" +#~ msgstr "Kann Director resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Can't find %s in Console list\n" +#~ msgstr "Kann Director resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Connecting to Director %s:%d\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Cannot open file %s for input. ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open file %s for output. ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot popen(\"%s\", \"r\"): ERR=%s\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "@exec error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Console: name=%s rcfile=%s histfile=%s\n" +#~ msgstr "Console: name=%s SSL=%d\n" + +#~ msgid "Start Admin JobId %d, Job=%s\n" +#~ msgstr "Starte Admin JobId %d, Job=%s\n" + +#, fuzzy +#~ msgid "Error getting Job record for Job report: ERR=%s" +#~ msgstr "Fehler beim Holen des job Eintrags fr den job Bericht: %s" + +#~ msgid "Admin OK" +#~ msgstr "Admin OK" + +#~ msgid "*** Admin Error ***" +#~ msgstr "*** Admin Fehler ***" + +#~ msgid "Admin Canceled" +#~ msgstr "Admin abgebrochen" + +#~ msgid "Inappropriate term code: %c\n" +#~ msgstr "Unangebrachter Beendigungskode: %c\n" + +#~ msgid "Error sending Hello to Storage daemon. ERR=%s\n" +#~ msgstr "Fehler beim senden von \"Hello\" an Storage daemon. ERR=%s\n" + +#~ msgid "Director and Storage daemon passwords or names not the same.\n" +#~ msgstr "Director und Storage daemon Passworte or Namen nicht gleich.\n" + +#, fuzzy +#~ msgid "" +#~ "Director unable to authenticate with Storage daemon at \"%s:%d\". " +#~ "Possible causes:\n" +#~ "Passwords or names not the same or\n" +#~ "Maximum Concurrent Jobs exceeded on the SD or\n" +#~ "SD networking messed up (restart daemon).\n" +#~ "For help, please see: " +#~ msgstr "" +#~ "Director kann sich nicht an Storage daemon an \"%s:%d\" authentisieren. " +#~ "Mgliche Ursachen:\n" +#~ "Passworte oder Namen nicht gleich oder\n" +#~ "Maximum Concurrent Jobs berschritten am SD oder\n" +#~ "SD Netzwerk durcheinander (daemon neustarten).\n" +#~ "Fr Hilfe bitte unter http://www.bacula.org/rel-manual/faq." +#~ "html#AuthorizationErrors nachsehen.\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with SD at \"%s:%d\"\n" +#~ msgstr "TLS Aushandlung fehlgeschlagen mit SD an \"%s:%d\"\n" + +#~ msgid "bdird] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -R do not apply JobDefs to Job\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read configuration and exit\n" +#~ " -s output in show text format\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#~ msgid "" +#~ "No Director resource defined in %s\n" +#~ "Without that I don't know who I am :-(\n" +#~ msgstr "" +#~ "Keine Director resource definiert in %s\n" +#~ "Ohne dies weiss ich nicht wer ich bin :-(\n" + +#~ msgid "No Messages resource defined in %s\n" +#~ msgstr "Keine Messages resource definiert in %s\n" + +#~ msgid "Only one Director resource permitted in %s\n" +#~ msgstr "Nur eine Director resource erlaubt in %s\n" + +#, fuzzy +#~ msgid "\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n" +#~ msgstr "\"TLS Key\" Datei nicht fr Director definiert \"%s\" in %s.\n" + +#~ msgid "\"TLS Key\" file not defined for Director \"%s\" in %s.\n" +#~ msgstr "\"TLS Key\" Datei nicht fr Director definiert \"%s\" in %s.\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Director \"%s\" in %s. At least one CA certificate store is required " +#~ "when using \"TLS Verify Peer\".\n" +#~ msgstr "" +#~ "Weder \"TLS CA Certificate\" noch \"TLS CA Certificate Dir\" sind fr " +#~ "Director \"%s\" in %s definert. Mindestens ein CA certificate store wird " +#~ "bentigt wenn \"TLS Verify Peer\" eingesetzt wird.\n" + +#~ msgid "\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n" +#~ msgstr "" +#~ "\"TLS Certificate\" Datei nicht definiert fr Console \"%s\" in %s.\n" + +#~ msgid "\"TLS Key\" file not defined for Console \"%s\" in %s.\n" +#~ msgstr "\"TLS Key\" Datei nicht definiert fr Console \"%s\" in %s.\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Console \"%s\" in %s. At least one CA certificate store is required " +#~ "when using \"TLS Verify Peer\".\n" +#~ msgstr "" +#~ "Weder \"TLS CA Certificate\" noch \"TLS CA Certificate Dir\" sind " +#~ "definiert frConsole \"%s\" in %s. Mindestens ein CA certificate store " +#~ "wird bentigt beiEinsatz von \"TLS Verify Peer\".\n" + +#~ msgid "Failed to initialize TLS context for File daemon \"%s\" in %s.\n" +#~ msgstr "" +#~ "Konnte TLS context fr File daemon \"%s\" in %s nicht initialisieren.\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for File daemon \"%s\" in %s.\n" +#~ msgstr "" +#~ "Weder \"TLS CA Certificate\" noch \"TLS CA Certificate Dir\" sind " +#~ "definiert fr File daemon \"%s\" in %s.\n" + +#~ msgid "No Job records defined in %s\n" +#~ msgstr "Keine Job records definiert in %s\n" + +#~ msgid "Hey something is wrong. p=0x%lu\n" +#~ msgstr "Hey etwas luft schief. p=0x%lu\n" + +#~ msgid "" +#~ "\"%s\" directive in Job \"%s\" resource is required, but not found.\n" +#~ msgstr "" +#~ "\"%s\" Directive in Job \"%s\" resource wird bentigt, wurde aber nicht " +#~ "gefunden.\n" + +#~ msgid "Too many items in Job resource\n" +#~ msgstr "zu viele items in Job resource\n" + +#~ msgid "No storage specified in Job \"%s\" nor in Pool.\n" +#~ msgstr "Weder storage noch Pool in Job \"%s\" definiert.\n" + +#~ msgid "Unable to get Job record. ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#~ msgid "Unable to get Job Volume Parameters. ERR=%s\n" +#~ msgstr "Konnte Job Volume Parameter nicht holen. ERR=%s\n" + +#~ msgid "Unable to create bootstrap file %s. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "No files found to read. No bootstrap file written.\n" +#~ msgstr "" +#~ "Keine Dateien fr Wiederherstellung/Migration gefunden. Keine Bootstrap " +#~ "Datei geschrieben.\n" + +#~ msgid "Error writing bsr file.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "" +#~ "The Job will require the following (*=>InChanger):\n" +#~ " Volume(s) Storage(s) SD Device(s)\n" +#~ "===========================================================================\n" +#~ msgstr "" +#~ "Der Job wird folgendes bentigen:\n" +#~ " Volume(s) Storage(s) SD Device(s)\n" +#~ "===========================================================================\n" + +#~ msgid "No Volumes found to restore.\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#~ msgid "1990 Invalid Catalog Request: %s" +#~ msgstr "1990 Ungltige Catalog Anfrage: %s" + +#~ msgid "Invalid Catalog request; DB not open: %s" +#~ msgstr "Ungltige Catalog Anfrage; DB nicht offen: %s" + +#~ msgid "1901 No Media.\n" +#~ msgstr "1901 Keine Medien.\n" + +#~ msgid "not in Pool" +#~ msgstr "nicht in Pool" + +#~ msgid "not correct MediaType" +#~ msgstr "nicht korrekter Medientyp" + +#~ msgid "is not Enabled" +#~ msgstr "ist nicht aktiviert" + +#, fuzzy +#~ msgid "1998 Volume \"%s\" catalog status is %s, %s.\n" +#~ msgstr "1998 Volume \"%s\" status ist %s, %s.\n" + +#~ msgid "1997 Volume \"%s\" not in catalog.\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#~ msgid "Unable to get Media record for Volume %s: ERR=%s\n" +#~ msgstr "Konnte Media record fr Volume %s nicht holen: ERR=%s\n" + +#~ msgid "1991 Catalog Request for vol=%s failed: %s" +#~ msgstr "1991 Catalog Anfrage fr vol=%s fehlgeschlagen: %s" + +#, fuzzy +#~ msgid "" +#~ "Attempt to set Volume Files from %u to %u for Volume \"%s\". Ignored.\n" +#~ msgstr "" +#~ "Volume Files bei %u werden auf %u fr Volumen \"%s\" gesetzt. Dies ist " +#~ "nicht Korrekt.\n" + +#~ msgid "Catalog error updating Media record. %s" +#~ msgstr "Catalog Fehler beim Aktualisieren des Media Eintrags. %s" + +#~ msgid "1993 Update Media error\n" +#~ msgstr "1993 Update Media Fehler\n" + +#~ msgid "Catalog error creating JobMedia record. %s" +#~ msgstr "Catalog Fehler beim Erzeugen des JobMedia Eintrags. %s" + +#, fuzzy +#~ msgid "1992 Create JobMedia error\n" +#~ msgstr "1991 Update JobMedia Fehler\n" + +#~ msgid "Invalid Catalog request: %s" +#~ msgstr "Ungltige Catalog Anfrage: %s" + +#, fuzzy +#~ msgid "Attribute create error: ERR=%s" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Restore object create error. %s" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "%s not same FileIndex=%d as attributes FI=%d\n" +#~ msgstr "Erhielt %s aber nicht die gleiche Datei wie Attribute\n" + +#~ msgid "" +#~ "Catalog error updating file digest. Unsupported digest stream type: %d" +#~ msgstr "" +#~ "Catalog Fehler beim Aktualisieren des file digest. Nicht untersttzter " +#~ "digest stream typ: %d" + +#, fuzzy +#~ msgid "attribute create error. ERR=%s" +#~ msgstr "Attribute create error. %s" + +#~ msgid "Catalog error updating file digest. %s" +#~ msgstr "Catalog Fehler beim Aktualisieren des file digest. %s" + +#, fuzzy +#~ msgid "1994 Invalid Catalog Update: %s" +#~ msgstr "1991 Ungltige Catalog Aktualisierung: %s" + +#~ msgid "Invalid Catalog Update; DB not open: %s" +#~ msgstr "Ungltige Catalog Aktualisierung; DB nicht geffnet: %s" + +#, fuzzy +#~ msgid "" +#~ "fread attr spool error. Wanted %ld bytes, maximum permitted 10000000 " +#~ "bytes\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool error. Wanted %ld bytes but got %lld ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool error. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-dir [-f -s] [-c config_file] [-d debug_level] " +#~ "[config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -r run now\n" +#~ " -s no signals\n" +#~ " -t test - read configuration and exit\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Already doing a reload request, request ignored.\n" +#~ msgstr "zu viele offene reload Anforderungen, Anforderung ignoriert.\n" + +#~ msgid "Too many open reload requests. Request ignored.\n" +#~ msgstr "zu viele offene reload Anforderungen, Anforderung ignoriert.\n" + +#~ msgid "Out of reload table entries. Giving up.\n" +#~ msgstr "Keine reload table Eintrge brig. Gebe auf.\n" + +#~ msgid "Resetting previous configuration.\n" +#~ msgstr "Vorherige Konfiguration zurckgesetzt.\n" + +#~ msgid "Failed to initialize TLS context for Director \"%s\" in %s.\n" +#~ msgstr "" +#~ "Konnte TLS context fr Director nicht initialisieren \"%s\" in %s.\n" + +#, fuzzy +#~ msgid "PoolType required in Pool resource \"%s\".\n" +#~ msgstr "Kann Pool resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Invalid PoolType \"%s\" in Pool resource \"%s\".\n" +#~ msgstr "Kann Pool resource %s nicht finden\n" + +#, fuzzy +#~ msgid "NextPool \"Scratch\" not valid in Pool \"%s\".\n" +#~ msgstr "Keine \"Next Pool\" Spezifikation in Pool \"%s\" gefunden.\n" + +#~ msgid "Could not open Catalog \"%s\", database \"%s\".\n" +#~ msgstr "Konnte Catalog \"%s\", database \"%s\" nicht ffnen.\n" + +#, fuzzy +#~ msgid "Could not create storage record for %s\n" +#~ msgstr "Kann Storage resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Could not update storage record for %s\n" +#~ msgstr "Kann Storage resource %s nicht finden\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Storage \"%s\" in %s.\n" +#~ msgstr "" +#~ "Weder \"TLS CA Certificate\" noch \"TLS CA Certificate Dir\" sind " +#~ "definiert frStorage \"%s\" in %s.\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for Storage \"%s\" in %s.\n" +#~ msgstr "" +#~ "Konnte TLS context fr Director nicht initialisieren \"%s\" in %s.\n" + +#~ msgid "Could not compile regex pattern \"%s\" ERR=%s\n" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n" +#~ msgstr "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n" + +#~ msgid " query_file=%s\n" +#~ msgstr " query_file=%s\n" + +#~ msgid " --> " +#~ msgstr " --> " + +#~ msgid "Console: name=%s SSL=%d\n" +#~ msgstr "Console: name=%s SSL=%d\n" + +#~ msgid "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n" +#~ msgstr "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n" + +#~ msgid "Counter: name=%s min=%d max=%d\n" +#~ msgstr "Counter: name=%s min=%d max=%d\n" + +#, fuzzy +#~ msgid "" +#~ "Client: Name=%s Enabled=%d Address=%s FDport=%d MaxJobs=%u NumJobs=%u\n" +#~ msgstr "Client: name=%s address=%s FDport=%d MaxJobs=%u\n" + +#~ msgid " JobRetention=%s FileRetention=%s AutoPrune=%d\n" +#~ msgstr " JobRetention=%s FileRetention=%s AutoPrune=%d\n" + +#~ msgid "" +#~ "Device: name=%s ok=%d num_writers=%d max_writers=%d\n" +#~ " reserved=%d open=%d append=%d read=%d labeled=%d offline=%d " +#~ "autochgr=%d\n" +#~ " poolid=%s volname=%s MediaType=%s\n" +#~ msgstr "" +#~ "Device: name=%s ok=%d num_writers=%d max_writers=%d\n" +#~ " reserved=%d open=%d append=%d read=%d labeled=%d offline=%d " +#~ "autochgr=%d\n" +#~ " poolid=%s volname=%s MediaType=%s\n" + +#, fuzzy +#~ msgid "" +#~ "%s: name=%s address=%s SDport=%d MaxJobs=%u NumJobs=%u\n" +#~ " DeviceName=%s MediaType=%s StorageId=%s Autochanger=%d\n" +#~ msgstr "" +#~ "Storage: name=%s address=%s SDport=%d MaxJobs=%u\n" +#~ " DeviceName=%s MediaType=%s StorageId=%s\n" + +#, fuzzy +#~ msgid " Parent --> " +#~ msgstr " --> " + +#, fuzzy +#~ msgid "" +#~ "Catalog: name=%s address=%s DBport=%d db_name=%s\n" +#~ " db_driver=%s db_user=%s MutliDBConn=%d\n" +#~ msgstr "" +#~ "Catalog: name=%s address=%s DBport=%d db_name=%s\n" +#~ " db_user=%s MutliDBConn=%d\n" + +#~ msgid "%s: name=%s JobType=%d level=%s Priority=%d Enabled=%d\n" +#~ msgstr "%s: name=%s JobType=%d level=%s Priority=%d Enabled=%d\n" + +#~ msgid "Job" +#~ msgstr "Job" + +#~ msgid "JobDefs" +#~ msgstr "JobDefs" + +#, fuzzy +#~ msgid "" +#~ " MaxJobs=%u NumJobs=%u Resched=%d Times=%d Interval=%s Spool=%d " +#~ "WritePartAfterJob=%d\n" +#~ msgstr "" +#~ " MaxJobs=%u Resched=%d Times=%d Interval=%s Spool=%d " +#~ "WritePartAfterJob=%d\n" + +#, fuzzy +#~ msgid " SpoolSize=%s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid " Accurate=%d\n" +#~ msgstr " SelectionType=%d\n" + +#~ msgid " SelectionType=%d\n" +#~ msgstr " SelectionType=%d\n" + +#, fuzzy +#~ msgid " PrefixLinks=%d\n" +#~ msgstr " mins=%d\n" + +#~ msgid " --> Where=%s\n" +#~ msgstr " --> Where=%s\n" + +#, fuzzy +#~ msgid " --> RegexWhere=%s\n" +#~ msgstr " --> Where=%s\n" + +#~ msgid " --> Bootstrap=%s\n" +#~ msgstr " --> Bootstrap=%s\n" + +#~ msgid " --> WriteBootstrap=%s\n" +#~ msgstr " --> WriteBootstrap=%s\n" + +#, fuzzy +#~ msgid " --> PluginOptions=%s\n" +#~ msgstr " --> Run=%s\n" + +#, fuzzy +#~ msgid " --> MaxRunTime=%u\n" +#~ msgstr " --> RunWhen=%u\n" + +#, fuzzy +#~ msgid " --> MaxWaitTime=%u\n" +#~ msgstr " --> RunWhen=%u\n" + +#, fuzzy +#~ msgid " --> MaxStartDelay=%u\n" +#~ msgstr " --> Target=%s\n" + +#, fuzzy +#~ msgid " --> MaxRunSchedTime=%u\n" +#~ msgstr " --> RunWhen=%u\n" + +#, fuzzy +#~ msgid " --> Base %s\n" +#~ msgstr " --> Target=%s\n" + +#~ msgid " --> RunScript\n" +#~ msgstr " --> RunScript\n" + +#~ msgid " --> Command=%s\n" +#~ msgstr " --> Command=%s\n" + +#~ msgid " --> Target=%s\n" +#~ msgstr " --> Target=%s\n" + +#~ msgid " --> RunOnSuccess=%u\n" +#~ msgstr " --> RunOnSuccess=%u\n" + +#~ msgid " --> RunOnFailure=%u\n" +#~ msgstr " --> RunOnFailure=%u\n" + +#, fuzzy +#~ msgid " --> FailJobOnError=%u\n" +#~ msgstr " --> AbortJobOnError=%u\n" + +#~ msgid " --> RunWhen=%u\n" +#~ msgstr " --> RunWhen=%u\n" + +#, fuzzy +#~ msgid " --> Next" +#~ msgstr " --> " + +#~ msgid " --> Run=%s\n" +#~ msgstr " --> Run=%s\n" + +#~ msgid " --> SelectionPattern=%s\n" +#~ msgstr " --> SelectionPattern=%s\n" + +#, fuzzy +#~ msgid "Schedule: Name=%s Enabled=%d\n" +#~ msgstr "Schedule: name=%s\n" + +#~ msgid " --> Run Level=%s\n" +#~ msgstr " --> Run Level=%s\n" + +#, fuzzy +#~ msgid " MaxRunSchedTime=%u\n" +#~ msgstr " --> RunWhen=%u\n" + +#, fuzzy +#~ msgid " Priority=%u\n" +#~ msgstr " woy=" + +#~ msgid " hour=" +#~ msgstr " hour=" + +#~ msgid " mday=" +#~ msgstr " mday=" + +#~ msgid " month=" +#~ msgstr " month=" + +#~ msgid " wday=" +#~ msgstr " wday=" + +#~ msgid " wom=" +#~ msgstr " wom=" + +#~ msgid " woy=" +#~ msgstr " woy=" + +#~ msgid " mins=%d\n" +#~ msgstr " mins=%d\n" + +#~ msgid " --> " +#~ msgstr " --> " + +#, fuzzy +#~ msgid " --> Next" +#~ msgstr " --> " + +#~ msgid "Schedule: name=%s\n" +#~ msgstr "Schedule: name=%s\n" + +#~ msgid "Pool: name=%s PoolType=%s\n" +#~ msgstr "Pool: name=%s PoolType=%s\n" + +#~ msgid " use_cat=%d use_once=%d cat_files=%d\n" +#~ msgstr " use_cat=%d use_once=%d cat_files=%d\n" + +#~ msgid " max_vols=%d auto_prune=%d VolRetention=%s\n" +#~ msgstr " max_vols=%d auto_prune=%d VolRetention=%s\n" + +#~ msgid " VolUse=%s recycle=%d LabelFormat=%s\n" +#~ msgstr " VolUse=%s recycle=%d LabelFormat=%s\n" + +#~ msgid " CleaningPrefix=%s LabelType=%d\n" +#~ msgstr " CleaningPrefix=%s LabelType=%d\n" + +#, fuzzy +#~ msgid " RecyleOldest=%d PurgeOldest=%d ActionOnPurge=%d\n" +#~ msgstr " RecyleOldest=%d PurgeOldest=%d MaxVolJobs=%d MaxVolFiles=%d\n" + +#, fuzzy +#~ msgid " MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n" +#~ msgstr " max_vols=%d auto_prune=%d VolRetention=%s\n" + +#~ msgid " MigTime=%s MigHiBytes=%s MigLoBytes=%s\n" +#~ msgstr " MigTime=%s MigHiBytes=%s MigLoBytes=%s\n" + +#, fuzzy +#~ msgid " CacheRetention=%s\n" +#~ msgstr " mailcmd=%s\n" + +#, fuzzy +#~ msgid " JobRetention=%s FileRetention=%s\n" +#~ msgstr " JobRetention=%s FileRetention=%s AutoPrune=%d\n" + +#, fuzzy +#~ msgid " NextPool=%s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid " RecyclePool=%s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid " ScratchPool=%s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid " Catalog=%s\n" +#~ msgstr " mailcmd=%s\n" + +#~ msgid "Messages: name=%s\n" +#~ msgstr "Messages: name=%s\n" + +#~ msgid " mailcmd=%s\n" +#~ msgstr " mailcmd=%s\n" + +#~ msgid " opcmd=%s\n" +#~ msgstr " opcmd=%s\n" + +#~ msgid "Cannot find Pool resource %s\n" +#~ msgstr "Kann Pool resource %s nicht finden\n" + +#~ msgid "Cannot find Console resource %s\n" +#~ msgstr "Kann Console resource %s nicht finden\n" + +#~ msgid "Cannot find Director resource %s\n" +#~ msgstr "Kann Director resource %s nicht finden\n" + +#~ msgid "Cannot find Storage resource %s\n" +#~ msgstr "Kann Storage resource %s nicht finden\n" + +#~ msgid "Cannot find Job resource %s\n" +#~ msgstr "Kann Job resource %s nicht finden\n" + +#~ msgid "Cannot find Counter resource %s\n" +#~ msgstr "Kann Counter resource %s nicht finden\n" + +#~ msgid "Cannot find Client resource %s\n" +#~ msgstr "Kann Client resource %s nicht finden\n" + +#~ msgid "Cannot find Schedule resource %s\n" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Expected one of: %s, got: %s" +#~ msgstr "Erwartet: %s, erhalten: %s" + +#, fuzzy +#~ msgid "Could not find Storage Resource %s referenced on line %d : %s\n" +#~ msgstr "" +#~ "Konnte config Resource %s , referenziert in Zeile %d : %s nicht finden\n" + +#, fuzzy +#~ msgid "" +#~ "Attempt to redefine Storage resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Konnte config Resource %s , referenziert in Zeile %d : %s nicht finden\n" + +#~ msgid "Expected a Migration Job Type keyword, got: %s" +#~ msgstr " Migration Job Type Schlsselwort erwartet, erhalten: %s" + +#~ msgid "Expected a Job Type keyword, got: %s" +#~ msgstr "Job Type Schlsselwort erwartet, erhalten: %s" + +#~ msgid "Expected a Job Level keyword, got: %s" +#~ msgstr "Job Level Schlsselwort erwartet, erhalten: %s" + +#~ msgid "Expected a Restore replacement option, got: %s" +#~ msgstr "Restore replacement Option erwartet, erhalten: %s" + +#~ msgid "Expect %s, got: %s" +#~ msgstr "Erwartet: %s, erhalten: %s" + +#~ msgid "Could not find config Resource %s referenced on line %d : %s\n" +#~ msgstr "" +#~ "Konnte config Resource %s , referenziert in Zeile %d : %s nicht finden\n" + +#~ msgid "Expecting open brace. Got %s" +#~ msgstr "Geschweifte Klammer auf erwartet, erhalten: %s" + +#~ msgid "Expecting keyword, got: %s\n" +#~ msgstr "Schlsselwort erwartet, erhalten:%s\n" + +#~ msgid "expected an equals, got: %s" +#~ msgstr "Erwartete ein \"ist gleich\", erhalten: %s" + +#~ msgid "Keyword %s not permitted in this resource" +#~ msgstr "Schlsselwort %s ist in dieser Ressource nicht erlaubt." + +#~ msgid "Count not update counter %s: ERR=%s\n" +#~ msgstr "Konnte counter %s: nicht aktualisieren: ERR=%s\n" + +#~ msgid "Cannot create var context: ERR=%s\n" +#~ msgstr "Kann var context nicht erzeugen: ERR=%s\n" + +#~ msgid "Cannot set var callback: ERR=%s\n" +#~ msgstr "kann Variable callback nicht setzen: ERR=%s\n" + +#~ msgid "Cannot set var operate: ERR=%s\n" +#~ msgstr "kann Variable operate nicht setzen: ERR=%s\n" + +#~ msgid "Cannot unescape string: ERR=%s\n" +#~ msgstr "Cannot unescape string: ERR=%s\n" + +#~ msgid "Cannot expand expression \"%s\": ERR=%s\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot destroy var context: ERR=%s\n" +#~ msgstr "kann Variable context nicht zerstren: ERR=%s\n" + +#, fuzzy +#~ msgid "Client: " +#~ msgstr "File daemon" + +#~ msgid "File daemon \"%s\" rejected Job command: %s\n" +#~ msgstr "File daemon \"%s\" hat Job Kommando abgewiesen: %s\n" + +#~ msgid "Error updating Client record. ERR=%s\n" +#~ msgstr "Fehler beim Aktualisieren des Client Eintrags. ERR=%s\n" + +#~ msgid "FD gave bad response to JobId command: %s\n" +#~ msgstr "FD hat fehlerhafte Antwort auf JobId Kommando zurckgegeben: %s\n" + +#~ msgid ", since=" +#~ msgstr ", seit=" + +#~ msgid "" +#~ "No prior or suitable Full backup found in catalog. Doing FULL backup.\n" +#~ msgstr "" +#~ "Kein vorheriges oder passendes Full backup in catalog gefunden. Fhre " +#~ "FULL backup durch.\n" + +#~ msgid " (upgraded from %s)" +#~ msgstr " (erweitert von %s)" + +#, fuzzy +#~ msgid "" +#~ "No prior or suitable Full backup found in catalog. Doing Virtual FULL " +#~ "backup.\n" +#~ msgstr "" +#~ "Kein vorheriges oder passendes Full backup in catalog gefunden. Fhre " +#~ "FULL backup durch.\n" + +#, fuzzy +#~ msgid "" +#~ "No prior or suitable Differential backup found in catalog. Doing " +#~ "Differential backup.\n" +#~ msgstr "" +#~ "Kein vorheriges oder passendes Full backup in catalog gefunden. Fhre " +#~ "FULL backup durch.\n" + +#~ msgid "Prior failed job found in catalog. Upgrading to %s.\n" +#~ msgstr "" +#~ "Vorheriger fehlgeschlagener job in catalog gefunden. Erweitere auf %s.\n" + +#~ msgid "Unimplemented backup level %d %c\n" +#~ msgstr "Nicht implementierter backup level %d %c\n" + +#~ msgid "Cannot run program: %s. ERR=%s\n" +#~ msgstr "Kann Programm: %s nicht starten. ERR=%s\n" + +#~ msgid ">filed: write error on socket\n" +#~ msgstr ">filed: Schreibfehler auf socket\n" + +#~ msgid "Error running program: %s. ERR=%s\n" +#~ msgstr "Fehler beim Start des Programms: %s. ERR=%s\n" + +#~ msgid "Cannot open included file: %s. ERR=%s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Client \"%s\" RunScript failed.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "RestoreObject failed.\n" +#~ msgstr "Wiederherstellung luft..." + +#, fuzzy +#~ msgid "ComponentInfo failed.\n" +#~ msgstr "Job Einrichtung Fehlgeschlagen.\n" + +#~ msgid "" +#~ " Run=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Can't find %s command.\n" +#~ "\n" +#~ msgstr "Kann Programm: %s nicht starten. ERR=%s\n" + +#, fuzzy +#~ msgid "%s Version: %s (%s) %s %s %s %s\n" +#~ msgstr "1000 OK: %s Version: %s (%s)\n" + +#, fuzzy +#~ msgid "No authorization for Catalog \"%s\"\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Could not find a Catalog resource\n" +#~ msgstr "Kann Storage resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Could not open catalog database \"%s\".\n" +#~ msgstr "Konnte Catalog \"%s\", database \"%s\" nicht ffnen.\n" + +#, fuzzy +#~ msgid "Using Catalog \"%s\"\n" +#~ msgstr " mailcmd=%s\n" + +#, fuzzy +#~ msgid "path name missing.\n" +#~ msgstr "FileSet: name=%s\n" + +#, fuzzy +#~ msgid "Failed to send command to Client.\n" +#~ msgstr "Verbindung zu File daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Unable to get Job record for Job=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get last Job record for Job=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get Client record for Client=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get last Job record for Client=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get Job record for JobId=%s: ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown command: %s\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Invalid argument for %s\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "Invalid argument for job\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "Unable to open the catalog.\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Query failed: %s. ERR=%s\n" +#~ msgstr "Abfrage gescheitert: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "query keyword not found.\n" +#~ msgstr "Path record: %s nicht gefunden.\n" + +#, fuzzy +#~ msgid "List MediaType failed: ERR=%s\n" +#~ msgstr "Media id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "List Media failed: ERR=%s\n" +#~ msgstr "Media id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "List Location failed: ERR=%s\n" +#~ msgstr "Client id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Expected a positive integer, got: %s\n" +#~ msgstr "fstype Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "Illegal character \"%c\" in a comment.\n" +#~ msgstr "Illegales Zeichen in \"Volume name\" \"%s\"\n" + +#, fuzzy +#~ msgid "Comment too long.\n" +#~ msgstr "Job nicht gefunden: %s\n" + +#, fuzzy +#~ msgid "Slot too large.\n" +#~ msgstr "Job nicht gefunden: %s\n" + +#, fuzzy +#~ msgid "No Volumes found to label, or no barcodes.\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#, fuzzy +#~ msgid "Slot %d greater than max %d ignored.\n" +#~ msgstr "" +#~ "Anforderung von \"Volume item\" %d grer als Max %d oder weniger als 1\n" + +#, fuzzy +#~ msgid "Catalog record for Volume \"%s\" updated to reference slot %d.\n" +#~ msgstr "Media Eintrag fr Volume \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Catalog record for Volume \"%s\" is up to date.\n" +#~ msgstr "Media Eintrag fr Volume \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Media record for new Volume \"%s\" already exists.\n" +#~ msgstr "Volume \"%s\" bereits vorhanden.\n" + +#, fuzzy +#~ msgid "Delete of Volume \"%s\" failed. ERR=%s" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Old volume \"%s\" deleted from catalog.\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#, fuzzy +#~ msgid "Media record for Slot %d Volume \"%s\" already exists.\n" +#~ msgstr "Volume \"%s\" bereits vorhanden.\n" + +#, fuzzy +#~ msgid "Error setting InChanger: ERR=%s" +#~ msgstr "Fehler beim Holen von Zeile %d: ERR=%s\n" + +#, fuzzy +#~ msgid "Catalog error on cleaning tape: %s" +#~ msgstr "Catalog Fehler beim Erzeugen des JobMedia Eintrags. %s" + +#, fuzzy +#~ msgid "Illegal character \"%c\" in a volume name.\n" +#~ msgstr "Illegales Zeichen in \"Volume name\" \"%s\"\n" + +#, fuzzy +#~ msgid "Sending label command for Volume \"%s\" Slot %d ...\n" +#~ msgstr "Media Eintrag fr Volume \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Catalog record for Volume \"%s\", Slot %d successfully created.\n" +#~ msgstr "Media Eintrag fr Volume \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Label command failed for Volume %s.\n" +#~ msgstr "Mehr als ein Dateiname! %s fr Datei: %s\n" + +#, fuzzy +#~ msgid "Could not open SD socket.\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Invalid Slot number: %s\n" +#~ msgstr "Ungltige Catalog Anfrage: %s" + +#, fuzzy +#~ msgid "Device \"%s\" has %d slots.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Pool \"%s\" resource not found for volume \"%s\"!\n" +#~ msgstr "Job Ressource fr \"%s\"nicht gefunden.\n" + +#, fuzzy +#~ msgid "No Volumes found, or no barcodes.\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#, fuzzy +#~ msgid "Disabled Jobs:\n" +#~ msgstr "ist nicht aktiviert" + +#, fuzzy +#~ msgid "No disabled Jobs.\n" +#~ msgstr "ist nicht aktiviert" + +#, fuzzy +#~ msgid "%s resource %s not found.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Resource %s not found\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Unknown order type %s\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Invalid jobid argument\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "Unknown ObjectType %s\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "No Pool specified.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Error obtaining pool ids. ERR=%s\n" +#~ msgstr "Fehler beim Start des Programms: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown list keyword: %s\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Could not find Pool for Job %s\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Could not find next Volume for Job %s (Pool=%s, Level=%s).\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Could not find next Volume for Job %s.\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#~ msgid "Pool %s not in database. %s" +#~ msgstr "Pool %s nicht in der Datenbank. %s" + +#~ msgid "Pool %s created in database.\n" +#~ msgstr "Pool %s in der Datenbank angelegt.\n" + +#, fuzzy +#~ msgid "Cannot prune Volume \"%s\" because it is archived.\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Pruned Jobs from JobHisto catalog.\n" +#~ msgstr "File Eintrag nicht im Catalog gefunden.\n" + +#, fuzzy +#~ msgid "Begin pruning Files.\n" +#~ msgstr "Beginn automatische Suberung von Dateien.\n" + +#, fuzzy +#~ msgid "No Files found to prune.\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#, fuzzy +#~ msgid "Begin pruning Jobs older than %s.\n" +#~ msgstr "Beginn automatische Suberung von Auftrgen.\n" + +#, fuzzy +#~ msgid "No Jobs found to prune.\n" +#~ msgstr "Kein Job gefunden fr: %s.\n" + +#, fuzzy +#~ msgid "%d expired volume%s found\n" +#~ msgstr "Keinen vorherigen Job zum migrieren gefunden.\n" + +#, fuzzy +#~ msgid "Begin purging files for Client \"%s\"\n" +#~ msgstr "Beginn automatische Suberung von Auftrgen.\n" + +#, fuzzy +#~ msgid "Begin purging jobs from Client \"%s\"\n" +#~ msgstr "Beginn automatische Suberung von Auftrgen.\n" + +#, fuzzy +#~ msgid "%d Job%s on Volume \"%s\" purged from catalog.\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#, fuzzy +#~ msgid "Can't update volume size in the catalog\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "Unable to truncate volume \"%s\"\n" +#~ msgstr "Kann Konsole \"%s\" an %s:%s:%d nicht authentisieren.\n" + +#, fuzzy +#~ msgid "Could not open %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Available queries:\n" +#~ msgstr "FileSet: name=%s\n" + +#, fuzzy +#~ msgid "Invalid command line query item specified.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Could not find query.\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Restore not done.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "No files selected to be restored.\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#~ msgid "Bootstrap records written to %s\n" +#~ msgstr "Bootstrap Eintrge geschrieben nach %s\n" + +#, fuzzy +#~ msgid "No Client resource found!\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Missing value for keyword: %s\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "Unknown keyword: %s\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Improper date format: %s\n" +#~ msgstr "Unangebrachter Beendigungskode: %c\n" + +#, fuzzy +#~ msgid "Error: Pool resource \"%s\" does not exist.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Error: Pool resource \"%s\" access not allowed.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#~ msgid "" +#~ "\n" +#~ "First you select one or more JobIds that contain files\n" +#~ "to be restored. You will be presented several methods\n" +#~ "of specifying the JobIds. Then you will be allowed to\n" +#~ "select which files from those JobIds are to be restored.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Zuerst whlen Sie eine oder mehrere JobIds die Dateien enthalten\n" +#~ "die wiederhergestellt werden sollen. Es werden mehrere Methoden\n" +#~ "zur ermittlung der JobIds angezeigt. Dann knnen Sie auswhlen, \n" +#~ "welche Dateien von diesen JobIds wiederhergestellt werden sollen.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Select item: " +#~ msgstr "Pool Ressource" + +#, fuzzy +#~ msgid "Enter SQL list command: " +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Enter full filename: " +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Invalid JobId in list.\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "Cannot open file %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error occurred on line %d of file \"%s\"\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "No database record found for: %s\n" +#~ msgstr "Keinen Job gefunden fr: %s\n" + +#, fuzzy +#~ msgid "No JobId specified cannot continue.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "No table found: %s\n" +#~ msgstr "Keinen Job gefunden fr: %s\n" + +#, fuzzy +#~ msgid "Regex compile error: %s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create component file %s. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to fdopen component file %s. ERR=%s\n" +#~ msgstr "Kann Datenbank=%s.nicht ffen. ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing component file.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Error getting FileSet \"%s\": ERR=%s\n" +#~ msgstr "Fehler beim holen der FileSet Zeile: ERR=%s\n" + +#, fuzzy +#~ msgid "FileSet argument: %s\n" +#~ msgstr "FileSet: name=%s\n" + +#, fuzzy +#~ msgid "The defined FileSet resources are:\n" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Select FileSet resource" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "No FileSet found for client \"%s\".\n" +#~ msgstr "Keine Messages resource definiert in %s\n" + +#, fuzzy +#~ msgid "Error getting FileSet record: %s\n" +#~ msgstr "Fehler beim holen der FileSet Zeile: ERR=%s\n" + +#, fuzzy +#~ msgid "Pool \"%s\" not found, using any pool.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "No Full backup before %s found.\n" +#~ msgstr "Keinen vorherigen \"Full backup\" Job Eintrag gefunden.\n" + +#, fuzzy +#~ msgid "No jobs found.\n" +#~ msgstr "Kein Job gefunden fr: %s.\n" + +#, fuzzy +#~ msgid "Job %s failed.\n" +#~ msgstr "%s Abgebrochen" + +#, fuzzy +#~ msgid "Job queued. JobId=%s\n" +#~ msgstr "Kein Job fr JobId %s gefunden\n" + +#, fuzzy +#~ msgid "Job \"%s\" not found\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "A job name must be specified.\n" +#~ msgstr "Ein Benutzername fr MySQL muss angegeben werden.\n" + +#, fuzzy +#~ msgid "No authorization. Job \"%s\".\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Pool \"%s\" not found.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "No authorization. Pool \"%s\".\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "NextPool \"%s\" not found.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "No authorization. NextPool \"%s\".\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Restore Client \"%s\" not found.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "No authorization. Client \"%s\".\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "FileSet \"%s\" not found.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "No authorization. FileSet \"%s\".\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Storage \"%s\" not found.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "user selection" +#~ msgstr "File daemon" + +#, fuzzy +#~ msgid "No authorization. Storage \"%s\".\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "No JobId specified.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Invalid or no Job name specified.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Could not get job record for selected JobId=%d. ERR=%s" +#~ msgstr "Konnte job record fr JobId %s zum migrieren nicht holen. ERR=%s" + +#, fuzzy +#~ msgid "Unable to use current plugin configuration, discarding it." +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Plugin Restore Options\n" +#~ msgstr " --> Run=%s\n" + +#, fuzzy +#~ msgid "No plugin to configure\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Plugins to configure:\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Select plugin to configure" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Can't configure %32s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Restore Client" +#~ msgstr "Wiederherstellung luft..." + +#, fuzzy +#~ msgid "Priority" +#~ msgstr " woy=" + +#, fuzzy +#~ msgid "Pool" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "NextPool" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "Bootstrap" +#~ msgstr " --> Bootstrap=%s\n" + +#, fuzzy +#~ msgid "File Relocation" +#~ msgstr "File daemon" + +#, fuzzy +#~ msgid "Plugin Options" +#~ msgstr " --> Run=%s\n" + +#, fuzzy +#~ msgid "Please enter the Bootstrap file name: " +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Warning cannot open %s: ERR=%s\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "Select replace option" +#~ msgstr "Pool Ressource" + +#, fuzzy +#~ msgid "Invalid replace option: %s\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "regexwhere=%s\n" +#~ msgstr " --> Where=%s\n" + +#, fuzzy +#~ msgid "Cannot use your regexp\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "%s -> %s\n" +#~ msgstr " --> Run=%s\n" + +#, fuzzy +#~ msgid "Cannot use your regexp.\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Since" +#~ msgstr ", seit=" + +#, fuzzy +#~ msgid "Could not get job record for selected JobId. ERR=%s" +#~ msgstr "Konnte job record fr JobId %s zum migrieren nicht holen. ERR=%s" + +#, fuzzy +#~ msgid "User specified" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "RegexWhere: %s\n" +#~ msgstr " --> Where=%s\n" + +#, fuzzy +#~ msgid "Where: %s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "Run Copy job\n" +#~ msgstr "Konnte Migrationsjob nicht starten.\n" + +#, fuzzy +#~ msgid "Run Migration job\n" +#~ msgstr "Konnte Migrationsjob nicht starten.\n" + +#, fuzzy +#~ msgid "Unknown Job Type=%d\n" +#~ msgstr "Unbekannter Job level=%d\n" + +#, fuzzy +#~ msgid "JobId specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Client specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "FileSet specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Level specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Storage specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "RegexWhere or Where specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "No authorization for \"regexwhere\" specification.\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Where or RegexWhere specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "No authoriztion for \"where\" specification.\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Bootstrap specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Replace specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "When specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Priority specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Verify Job specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Migration Job specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Pool specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Restore Client specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Plugin Options not yet implemented.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Plugin Options specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "No authoriztion for \"PluginOptions\" specification.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Spool flag specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Invalid spooldata flag.\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "IgnoreDuplicateCheck flag specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Invalid ignoreduplicatecheck flag.\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "Accurate flag specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Invalid accurate flag.\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "Job name specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Media Type specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "NextPool specified twice.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Invalid keyword: %s\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "Catalog \"%s\" not found\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "No authorization. Catalog \"%s\".\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Verify Job \"%s\" not found.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "Migration Job \"%s\" not found.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Invalid period.\n" +#~ msgstr "Ungltige MediaId gefunden.\n" + +#, fuzzy +#~ msgid "The defined Storage resources are:\n" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Select Storage resource" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "The defined Catalog resources are:\n" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Select Catalog resource" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "The disabled Job resources are:\n" +#~ msgstr "Kann Job resource %s nicht finden\n" + +#, fuzzy +#~ msgid "The enabled Job resources are:\n" +#~ msgstr "Kann Job resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Select Job resource" +#~ msgstr "Job Ressource" + +#, fuzzy +#~ msgid "The defined Job resources are:\n" +#~ msgstr "Kann Job resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Error: Restore Job resource \"%s\" does not exist.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "The defined Restore Job resources are:\n" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Select Restore Job" +#~ msgstr "Pool Ressource" + +#, fuzzy +#~ msgid "The defined Client resources are:\n" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Select Client resource" +#~ msgstr "Pool Ressource" + +#, fuzzy +#~ msgid "Select Client (File daemon) resource" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Error: Client resource %s does not exist.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "The defined Schedule resources are:\n" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Schedule" +#~ msgstr "Schedule: name=%s\n" + +#, fuzzy +#~ msgid "Select Schedule resource" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Could not find Client %s: ERR=%s" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find Client \"%s\": ERR=%s" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid "Error obtaining client ids. ERR=%s\n" +#~ msgstr "Fehler beim Aktualisieren des Client Eintrags. ERR=%s\n" + +#, fuzzy +#~ msgid "Select the Client" +#~ msgstr "Pool Ressource" + +#, fuzzy +#~ msgid "Could not find Pool \"%s\": ERR=%s" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid "No access to Pool \"%s\"\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "The defined Pool resources are:\n" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Select Pool resource" +#~ msgstr "Pool Ressource" + +#, fuzzy +#~ msgid "Could not find Job \"%s\": ERR=%s" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Expecting jobid=nn command, got: %s\n" +#~ msgstr "Schlsselwort erwartet, erhalten:%s\n" + +#, fuzzy +#~ msgid "Expecting job=xxx, got: %s.\n" +#~ msgstr "Schlsselwort erwartet, erhalten:%s\n" + +#, fuzzy +#~ msgid "Job \"%s\" is not running.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "Expecting ujobid=xxx, got: %s.\n" +#~ msgstr "Schlsselwort erwartet, erhalten:%s\n" + +#, fuzzy +#~ msgid "Storage resource \"%s\": not found\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Media Types defined in conf file:\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Select the Media Type" +#~ msgstr "nicht korrekter Medientyp" + +#, fuzzy +#~ msgid "No Jobs running.\n" +#~ msgstr "Kein Job gefunden fr: %s.\n" + +#, fuzzy +#~ msgid "No value given for \"jobid\".\n" +#~ msgstr "Keine volumes fr JobId=%d gefunden\n" + +#, fuzzy +#~ msgid "No value given for \"job\".\n" +#~ msgstr "Keine volumes fr JobId=%d gefunden\n" + +#, fuzzy +#~ msgid "No value given for \"ujobid\".\n" +#~ msgstr "Keine volumes fr JobId=%d gefunden\n" + +#, fuzzy +#~ msgid "Select Job(s):\n" +#~ msgstr "ist nicht aktiviert" + +#, fuzzy +#~ msgid "JobId=%s Job=%s" +#~ msgstr "Migration benutzt JobId=%s Job=%s\n" + +#, fuzzy +#~ msgid "Invalid argument \"action\".\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "No Volumes found to perform the command.\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#, fuzzy +#~ msgid "Cannot create UA thread: %s\n" +#~ msgstr "Kann \"message thread\" nicht erzeugen: %s\n" + +#, fuzzy +#~ msgid "Connecting to Storage %s at %s:%d\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Failed to connect to Storage.\n" +#~ msgstr "Verbindung zu Storage daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "%s %sVersion: %s (%s) %s %s %s\n" +#~ msgstr "1000 OK: %s Version: %s (%s)\n" + +#, fuzzy +#~ msgid "No authorization for Storage \"%s\"\n" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Failed to connect to Storage daemon %s.\n" +#~ "====\n" +#~ msgstr "Verbindung zu Storage daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "" +#~ "Failed to connect to Client %s.\n" +#~ "====\n" +#~ msgstr "Verbindung zu File daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Connected to file daemon\n" +#~ msgstr "Verbindung zu File daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Scheduled Jobs:\n" +#~ msgstr "ist nicht aktiviert" + +#, fuzzy +#~ msgid "%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n" +#~ msgstr "1000 OK: %s Version: %s (%s)\n" + +#, fuzzy +#~ msgid "%-14s %-8s %3d %-18s %-18s %s\n" +#~ msgstr "1000 OK: %s Version: %s (%s)\n" + +#, fuzzy +#~ msgid "Console connected %sat %s\n" +#~ msgstr "Console: name=%s SSL=%d\n" + +#, fuzzy +#~ msgid "is blocked" +#~ msgstr "Job Einrichtung Fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "has terminated" +#~ msgstr "%s OK -- mit Warnungen" + +#, fuzzy +#~ msgid "has terminated with warnings" +#~ msgstr "%s OK -- mit Warnungen" + +#, fuzzy +#~ msgid "has terminated in incomplete state" +#~ msgstr "%s OK -- mit Warnungen" + +#, fuzzy +#~ msgid "has errors" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "has a fatal error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "has been canceled" +#~ msgstr "%s Abgebrochen" + +#, fuzzy +#~ msgid "is waiting on Client" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting on Client %s" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting on Storage \"%s\"" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting on Storage" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting on max Storage jobs" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting on max Client jobs" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting on max Job jobs" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting on max total jobs" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting for its start time (%s)" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting for a Shared Storage device" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is in unknown state %c" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "is waiting for a mount request" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting for an appendable Volume" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "is waiting for Client to connect to Storage daemon" +#~ msgstr "Verbindung zu Storage daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "is waiting for Client %s to connect to Storage %s" +#~ msgstr "Verbindung zu Storage daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "%6d\t%-6s\t%-20s\t%s\t%s\n" +#~ msgstr "1000 OK: %s Version: %s (%s)\n" + +#, fuzzy +#~ msgid "%6d %-4s %-3s %10s %10s %-17s %s\n" +#~ msgstr "1000 OK: %s Version: %s (%s)\n" + +#, fuzzy +#~ msgid "change current directory" +#~ msgstr "kann Variable operate nicht setzen: ERR=%s\n" + +#, fuzzy +#~ msgid "print current working directory" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Invalid command \"%s\". Enter \"done\" to exit.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "No file specification given.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Invalid path given.\n" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "Volume parameters" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Pool from resource" +#~ msgstr "Pool Ressource" + +#, fuzzy +#~ msgid "Slots from autochanger" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "Snapshot parameters" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Invalid VolStatus specified: %s\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "New Volume status is: %s\n" +#~ msgstr "1998 Volume \"%s\" status ist %s, %s.\n" + +#, fuzzy +#~ msgid "Invalid cache retention period specified: %s\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Invalid use duration specified: %s\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Error updating media record Slot: ERR=%s" +#~ msgstr "Fehler beim Aktualisieren des Client Eintrags. ERR=%s\n" + +#, fuzzy +#~ msgid "New RecyclePool is: %s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "Error updating Volume record: ERR=%s" +#~ msgstr "Fehler beim Aktualisieren des Client Eintrags. ERR=%s\n" + +#, fuzzy +#~ msgid "Error updating Volume records: ERR=%s" +#~ msgstr "Fehler beim Aktualisieren des Client Eintrags. ERR=%s\n" + +#, fuzzy +#~ msgid "Error updating media record Enabled: ERR=%s" +#~ msgstr "Fehler beim Aktualisieren des Client Eintrags. ERR=%s\n" + +#, fuzzy +#~ msgid "Error updating media record ActionOnPurge: ERR=%s" +#~ msgstr "Fehler beim Aktualisieren des Client Eintrags. ERR=%s\n" + +#, fuzzy +#~ msgid "Enabled" +#~ msgstr "ist nicht aktiviert" + +#, fuzzy +#~ msgid "RecyclePool" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "Updating Volume \"%s\"\n" +#~ msgstr "Kann Konsole \"%s\" an %s:%s:%d nicht authentisieren.\n" + +#, fuzzy +#~ msgid "Current max files is: %u\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Current value is: %s\n" +#~ msgstr "FileSet: name=%s\n" + +#, fuzzy +#~ msgid "Enter new Enabled: " +#~ msgstr "ist nicht aktiviert" + +#, fuzzy +#~ msgid "Current RecyclePool is: %s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "No current RecyclePool\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "Expect JobId keyword, not found.\n" +#~ msgstr "Job Type Schlsselwort erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Neither Client, StartTime or Priority specified.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Job not found.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "Start Virtual Backup JobId %s, Job=%s\n" +#~ msgstr "Start Sicherung JobId %s, Job=%s\n" + +#, fuzzy +#~ msgid "No valid Jobs found from user selection.\n" +#~ msgstr "Kein Job gefunden fr: %s.\n" + +#, fuzzy +#~ msgid "Using user supplied JobIds=%s\n" +#~ msgstr "Migration benutzt JobId=%s Job=%s\n" + +#, fuzzy +#~ msgid "No previous Jobs found.\n" +#~ msgstr "Keinen vorherigen Job zum migrieren gefunden.\n" + +#, fuzzy +#~ msgid "Error getting Job record for previous Job: ERR=%s" +#~ msgstr "Fehler beim Holen des job Eintrags fr den job Bericht: %s" + +#~ msgid "Backup OK -- with warnings" +#~ msgstr "Sicherung OK -- mit Warnungen" + +#, fuzzy +#~ msgid "" +#~ "%s %s %s (%s):\n" +#~ " Build OS: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " Backup Level: Virtual Full\n" +#~ " Client: \"%s\" %s\n" +#~ " FileSet: \"%s\" %s\n" +#~ " Pool: \"%s\" (From %s)\n" +#~ " Catalog: \"%s\" (From %s)\n" +#~ " Storage: \"%s\" (From %s)\n" +#~ " Scheduled time: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Elapsed time: %s\n" +#~ " Priority: %d\n" +#~ " SD Files Written: %s\n" +#~ " SD Bytes Written: %s (%sB)\n" +#~ " Rate: %.1f KB/s\n" +#~ " Volume name(s): %s\n" +#~ " Volume Session Id: %d\n" +#~ " Volume Session Time: %d\n" +#~ " Last Volume Bytes: %s (%sB)\n" +#~ " SD Errors: %d\n" +#~ " SD termination status: %s\n" +#~ " Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s (%s): %s\n" +#~ " vorheriges Backup JobId:%s\n" +#~ " neues Backup JobId: %s\n" +#~ " Migration JobId: %s\n" +#~ " Migration Job: %s\n" +#~ " Backup Level: %s%s\n" +#~ " Client: %s\n" +#~ " FileSet: \"%s\" %s\n" +#~ " Lese Pool: \"%s\" (Von %s)\n" +#~ " Lese Storage: \"%s\" (Von %s)\n" +#~ " Schreib Pool: \"%s\" (Von %s)\n" +#~ " Schreib Storage: \"%s\" (Von %s)\n" +#~ " Start Zeit: %s\n" +#~ " End Zeit: %s\n" +#~ " Verstichene Zeit: %s\n" +#~ " Prioritt: %d\n" +#~ " SD Dateien geschrieben: %s\n" +#~ " SD Bytes geschrieben: %s (%sB)\n" +#~ " Geschwindigkeit: %.1f KB/s\n" +#~ " Volume name(s): %s\n" +#~ " Volume Session Id: %d\n" +#~ " Volume Session Time: %d\n" +#~ " Last Volume Bytes: %s (%sB)\n" +#~ " SD Fehler: %d\n" +#~ " SD Beendigungsstatus : %s\n" +#~ " Beendigungsstatus: %s\n" +#~ "\n" + +#, fuzzy +#~ msgid "Unimplemented Verify level %d(%c)\n" +#~ msgstr "Nicht implementierter backup level %d %c\n" + +#, fuzzy +#~ msgid "Could not get job record for previous Job. ERR=%s" +#~ msgstr "Konnte job record fr JobId %s zum migrieren nicht holen. ERR=%s" + +#, fuzzy +#~ msgid "Verifying against JobId=%d Job=%s\n" +#~ msgstr "Migration benutzt JobId=%s Job=%s\n" + +#, fuzzy +#~ msgid "Could not get fileset record from previous Job. ERR=%s" +#~ msgstr "Konnte job record fr JobId %s zum migrieren nicht holen. ERR=%s" + +#, fuzzy +#~ msgid "Could not find FileSet resource \"%s\" from previous Job\n" +#~ msgstr "" +#~ "Konnte config Resource %s , referenziert in Zeile %d : %s nicht finden\n" + +#, fuzzy +#~ msgid "Could not get FileSet resource for verify Job." +#~ msgstr "Kann Storage resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Start Verify JobId=%s Level=%s Job=%s\n" +#~ msgstr "Start Sicherung JobId %s, Job=%s\n" + +#, fuzzy +#~ msgid "Unimplemented verify level %d\n" +#~ msgstr "Nicht implementierter backup level %d %c\n" + +#, fuzzy +#~ msgid "Verify OK -- with warnings" +#~ msgstr "%s OK -- mit Warnungen" + +#, fuzzy +#~ msgid "*** Verify Error ***" +#~ msgstr "*** %s Fehler ***" + +#, fuzzy +#~ msgid "Verify Canceled" +#~ msgstr "%s Abgebrochen" + +#, fuzzy +#~ msgid "Inappropriate term code: %d %c\n" +#~ msgstr "Unangebrachter Beendigungskode: %c\n" + +#, fuzzy +#~ msgid "" +#~ "%s %s %s (%s):\n" +#~ " Build OS: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " FileSet: %s\n" +#~ " Verify Level: %s\n" +#~ " Client: %s\n" +#~ " Verify JobId: %d\n" +#~ " Verify Job: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Elapsed time: %s\n" +#~ " Accurate: %s\n" +#~ " Files Expected: %s\n" +#~ " Files Examined: %s\n" +#~ " Non-fatal FD errors: %d\n" +#~ " SD Errors: %d\n" +#~ " FD termination status: %s\n" +#~ " SD termination status: %s\n" +#~ " Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s (%s): %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " Client: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Dateien erwartet: %s\n" +#~ " Dateien. wiederhergestellt: %s\n" +#~ " Bytes wiederhergestellt: %s\n" +#~ " Geschwindigkeit: %.1f KB/s\n" +#~ " FD Fehler: %d\n" +#~ " FD Beendigungsstatus: %s\n" +#~ " SD Beendigungsstatus: %s\n" +#~ " Beendigungsstatus: %s\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "%s %s %s (%s):\n" +#~ " Build: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " FileSet: %s\n" +#~ " Verify Level: %s\n" +#~ " Client: %s\n" +#~ " Verify JobId: %d\n" +#~ " Verify Job: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Elapsed time: %s\n" +#~ " Files Examined: %s\n" +#~ " Non-fatal FD errors: %d\n" +#~ " FD termination status: %s\n" +#~ " Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s (%s): %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " Client: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Dateien erwartet: %s\n" +#~ " Dateien. wiederhergestellt: %s\n" +#~ " Bytes wiederhergestellt: %s\n" +#~ " Geschwindigkeit: %.1f KB/s\n" +#~ " FD Fehler: %d\n" +#~ " FD Beendigungsstatus: %s\n" +#~ " SD Beendigungsstatus: %s\n" +#~ " Beendigungsstatus: %s\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "bird Target=%s\n" + +#, fuzzy +#~ msgid "Cannot verify checksum for %s\n" +#~ msgstr "Kann Schedule resource %s nicht finden\n" + +#, fuzzy +#~ msgid "%s digest initialization failed\n" +#~ msgstr "Initialisierung der Verschlsselungsbibliothek fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization problem: Remote server did not advertize required TLS " +#~ "support.\n" +#~ msgstr "" +#~ "Authorisationsproblem: Entfernter Server hat bentigte TLS Untersttzung " +#~ "nicht angeboten.\n" + +#, fuzzy +#~ msgid " Could not access \"%s\": ERR=%s\n" +#~ msgstr "Konnte counter %s: nicht aktualisieren: ERR=%s\n" + +#, fuzzy +#~ msgid " Could not follow link \"%s\": ERR=%s\n" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid " Could not stat \"%s\": ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Could not open directory \"%s\": ERR=%s\n" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid " Unknown file type %d; not saved: %s\n" +#~ msgstr "Unbekannter resource type %d in save_resource.\n" + +#, fuzzy +#~ msgid " Cannot open \"%s\": ERR=%s.\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "Network send error to SD. ERR=%s\n" +#~ msgstr "Netzwerkfehler mit FD bei %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Read error on file %s. ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Too many errors. JobErrors=%d.\n" +#~ msgstr "zu viele items in Job resource\n" + +#, fuzzy +#~ msgid "Encryption error\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Invalid file flags, no supported data stream type.\n" +#~ msgstr "" +#~ "Catalog Fehler beim Aktualisieren des file digest. Nicht untersttzter " +#~ "digest stream typ: %d" + +#, fuzzy +#~ msgid "Network send error to SD. Data=%s ERR=%s\n" +#~ msgstr "Netzwerkfehler mit FD bei %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Cannot open resource fork for \"%s\": ERR=%s.\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "Compression deflate error: %d\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Compression LZO error: %d\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bfdjson [options] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -t test configuration file and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_datei] [-d debug_level]\n" +#~ " -c benutze als Konfigurationsdatei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte in Vordergrund (fr debugging)\n" +#~ " -g groupid\n" +#~ " -s no signals (fr debugging)\n" +#~ " -t Konfigurationsdatei testen und beenden\n" +#~ " -u userid\n" +#~ " -v Ausfhrliche Benutzermeldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "No File daemon resource defined in %s\n" +#~ "Without that I don't know who I am :-(\n" +#~ msgstr "" +#~ "Keine Director resource definiert in %s\n" +#~ "Ohne dies weiss ich nicht wer ich bin :-(\n" + +#, fuzzy +#~ msgid "Only one Client resource permitted in %s\n" +#~ msgstr "Nur eine Director resource erlaubt in %s\n" + +#, fuzzy +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for File daemon in %s.\n" +#~ msgstr "" +#~ "Weder \"TLS CA Certificate\" noch \"TLS CA Certificate Dir\" sind " +#~ "definiert fr File daemon \"%s\" in %s.\n" + +#, fuzzy +#~ msgid "No Director resource defined in %s\n" +#~ msgstr "Keine Messages resource definiert in %s\n" + +#, fuzzy +#~ msgid "Failed to initialize encryption context.\n" +#~ msgstr "Konnte TLS context fr Storage \"%s\" in %s nicht initialisieren.\n" + +#, fuzzy +#~ msgid "%s signature digest initialization failed\n" +#~ msgstr "Initialisierung der Verschlsselungsbibliothek fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Unsupported cipher on this system.\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "An error occurred while encrypting the stream.\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "An error occurred while adding signer the stream.\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "An error occurred while signing the stream.\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "An error occurred finalizing signing the stream.\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "Plugin save packet not found.\n" +#~ msgstr "Path record: %s nicht gefunden.\n" + +#, fuzzy +#~ msgid "Plugin=%s not found.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Could not create %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error while creating command string %s.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Error while executing \"%s\" %s. %s %s\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Unable to parse snapshot command output\n" +#~ msgstr "Verbindung zu File daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Unable to create snapshot record. ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create snapshot record, got %s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to delete snapshot record. ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to delete snapshot record, got %s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get snapshot record. ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get snapshot record, got %s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to parse command output\n" +#~ msgstr "Verbindung zu File daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid " Delete Snapshot for %s\n" +#~ msgstr "Erzeugung des DB Storage Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid " Unable to delete snapshot of %s ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid " Create Snapshot for %s\n" +#~ msgstr "Erzeugung des DB Storage Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid " Unable to create snapshot of %s ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +#~ " -c use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -k keep readall capabilities\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test configuration file and exit\n" +#~ " -T set trace on\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_datei] [-d debug_level]\n" +#~ " -c benutze als Konfigurationsdatei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte in Vordergrund (fr debugging)\n" +#~ " -g groupid\n" +#~ " -s no signals (fr debugging)\n" +#~ " -t Konfigurationsdatei testen und beenden\n" +#~ " -u userid\n" +#~ " -v Ausfhrliche Benutzermeldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Disable Command \"%s\" not found.\n" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "Failed to load public certificate for File daemon \"%s\" in %s.\n" +#~ msgstr "" +#~ "Konnte TLS context fr File daemon \"%s\" in %s nicht initialisieren.\n" + +#, fuzzy +#~ msgid "Failed to load private key for File daemon \"%s\" in %s.\n" +#~ msgstr "" +#~ "Konnte TLS context fr File daemon \"%s\" in %s nicht initialisieren.\n" + +#, fuzzy +#~ msgid "" +#~ "Failed to load private key from file %s for File daemon \"%s\" in %s.\n" +#~ msgstr "" +#~ "Konnte TLS context fr File daemon \"%s\" in %s nicht initialisieren.\n" + +#, fuzzy +#~ msgid "" +#~ "Failed to load trusted signer certificate from file %s for File daemon " +#~ "\"%s\" in %s.\n" +#~ msgstr "" +#~ "Konnte TLS context fr File daemon \"%s\" in %s nicht initialisieren.\n" + +#, fuzzy +#~ msgid "" +#~ "Failed to load master key certificate from file %s for File daemon \"%s\" " +#~ "in %s.\n" +#~ msgstr "" +#~ "Konnte TLS context fr File daemon \"%s\" in %s nicht initialisieren.\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for Console \"%s\" in %s.\n" +#~ msgstr "" +#~ "Konnte TLS context fr Director nicht initialisieren \"%s\" in %s.\n" + +#, fuzzy +#~ msgid "Expected a Cipher Type keyword, got: %s" +#~ msgstr "Job Type Schlsselwort erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Cannot find any Console resource for remote access\n" +#~ msgstr "Kann Console resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Bad Hello command from Director at %s. Len=%d.\n" +#~ msgstr "UA Hello von %s:%s:%d ist ungltig. Len=%d\n" + +#, fuzzy +#~ msgid "Connection from unknown Director %s at %s rejected.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "SD connect failed: Bad Hello command\n" +#~ msgstr "Job nicht gefunden: %s\n" + +#, fuzzy +#~ msgid "SD connect failed: Job name not found: %s\n" +#~ msgstr "Job nicht gefunden: %s\n" + +#, fuzzy +#~ msgid "SD \"%s\" tried to connect two times.\n" +#~ msgstr "Verbindung zu File daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Command: \"%s\" is disabled.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Bad command from %s. Len=%d.\n" +#~ msgstr "UA Hello von %s:%s:%d ist ungltig. Len=%d\n" + +#, fuzzy +#~ msgid "2901 Job %s not found.\n" +#~ msgstr "Path record: %s nicht gefunden.\n" + +#, fuzzy +#~ msgid "2001 Job \"%s\" marked to be %s.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "2991 Bad setbandwidth command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "2991 Bad setdebug command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad estimate command: %s" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad Job Command: %s" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad RunBeforeJob command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "2905 Bad RunBeforeJob command.\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad RunAfter command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad RunScript command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "2905 Bad RunScript command.\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad RestoreObject command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "2909 Bad RestoreObject command.\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Error running program: %s. stat=%d: ERR=%s\n" +#~ msgstr "Fehler beim Start des Programms: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open FileSet input file: %s. ERR=%s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "REGEX %s compile error. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Invalid FileSet command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Unknown backup level: %s\n" +#~ msgstr "Unbekannter Job level=%d\n" + +#, fuzzy +#~ msgid "Bad level command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad session command: %s" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad storage command: %s" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Verbindung zu Storage daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Failed connect from Storage daemon. SD bsock=NULL.\n" +#~ msgstr "Verbindung zu Storage daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "ACL support not configured for Client.\n" +#~ msgstr "TLS bentigt aber nicht konfiguriert in Bacula.\n" + +#, fuzzy +#~ msgid "Cannot contact Storage daemon\n" +#~ msgstr "Verbindung zu Storage daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Bad response to append open: %s\n" +#~ msgstr "FD hat fehlerhafte Antwort auf JobId Kommando zurckgegeben: %s\n" + +#, fuzzy +#~ msgid "Bad response from stored to open command\n" +#~ msgstr "Fehlerhafte Antwort von File daemon auf Hello Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status %d %c returned from Storage Daemon.\n" +#~ msgstr "FD hat keinen Jobstatus zurckgegeben.\n" + +#, fuzzy +#~ msgid "2994 Bad verify command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad replace command. CMD=%s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Bad response to SD read open: %s\n" +#~ msgstr "FD hat fehlerhafte Antwort auf JobId Kommando zurckgegeben: %s\n" + +#, fuzzy +#~ msgid "Bad response from stored to read open command\n" +#~ msgstr "Fehlerhafte Antwort von File daemon auf Hello Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Bad response from SD to %s command. Wanted %s, got len=%ld msg=\"%s\"\n" +#~ msgstr "Schlechte Antwort auf %s Kommando: erwartet %s, erhalten %s\n" + +#, fuzzy +#~ msgid "Bad response from SD to %s command. Wanted %s, got SIGNAL %s\n" +#~ msgstr "Schlechte Antwort auf %s Kommando: erwartet %s, erhalten %s\n" + +#, fuzzy +#~ msgid "Error setting Finder Info on \"%s\"\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "LZO init failed\n" +#~ msgstr "TLS Aushandlung gescheitert.\n" + +#, fuzzy +#~ msgid "Record header scan error: %s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Data record error. ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Actual data size %d not same as header %d\n" +#~ msgstr "%s index %d ist nicht identisch mit attributen %d\n" + +#, fuzzy +#~ msgid "Could not create digest.\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Signer not found. Decryption failed.\n" +#~ msgstr "FileSet MD5 Prfsumme nicht gefunden.\n" + +#, fuzzy +#~ msgid "" +#~ "An error=%d occurred while decoding encrypted session data stream: ERR=" +#~ "%s\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "Missing encryption session data stream for %s\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "Failed to initialize decryption context for %s\n" +#~ msgstr "Konnte TLS context fr Storage \"%s\" in %s nicht initialisieren.\n" + +#, fuzzy +#~ msgid "Cannot open resource fork for %s.\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to decode message signature for %s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Zlib data error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Seek to %s error on %s: ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "LZO uncompression error on file %s. ERR=%d\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Uncompression error on file %s. ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "" +#~ "Write error at byte=%lld block=%d write_len=%d lerror=%d on %s: ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Signature validation failed for file %s: ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Digest one file failed for file: %s\n" +#~ msgstr "Mehr als ein Dateiname! %s fr Datei: %s\n" + +#, fuzzy +#~ msgid "Signature validation failed for %s: %s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Director connected %sat: %s\n" +#~ msgstr "Verbindung zu File daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "JobId %d Job %s is running.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid " %s %s Job started: %s\n" +#~ msgstr "Clone JobId %d gestartet.\n" + +#, fuzzy +#~ msgid "" +#~ " Files=%s Bytes=%s AveBytes/sec=%s LastBytes/sec=%s Errors=%d\n" +#~ " Bwlimit=%s ReadBytes=%s\n" +#~ msgstr " MigTime=%s MigHiBytes=%s MigLoBytes=%s\n" + +#, fuzzy +#~ msgid " Files: Examined=%s Backed up=%s\n" +#~ msgstr "FileSet: name=%s\n" + +#, fuzzy +#~ msgid " Processing file: %s\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "Bad .status command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid " Could not access %s: ERR=%s\n" +#~ msgstr "Konnte counter %s: nicht aktualisieren: ERR=%s\n" + +#, fuzzy +#~ msgid " Could not follow link %s: ERR=%s\n" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid " Could not stat %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Could not open directory %s: ERR=%s\n" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid " Unknown file type %d: %s\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Network error in send to Director: ERR=%s\n" +#~ msgstr "Netzwerkfehler mit FD bei %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Cannot open %s: ERR=%s.\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid " Cannot open resource fork for %s: ERR=%s.\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading file %s: ERR=%s\n" +#~ msgstr "Fehler beim Holen der Zeile fr Datei=%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to stat file \"%s\": ERR=%s\n" +#~ msgstr "Kann Datenbank=%s.nicht ffen. ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to serialize extended attributes on file \"%s\"\n" +#~ msgstr " %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not restore file flags for file %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not hard link %s -> %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reset file flags for file %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown file type %d; not restored: %s\n" +#~ msgstr "Unbekannter resource type %d in dump_resource.\n" + +#, fuzzy +#~ msgid "Zero length filename: %s\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "Plugin: \"%s\" not found.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Cannot stat file %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create directory %s: ERR=%s\n" +#~ msgstr "kann Variable operate nicht setzen: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot change owner and/or group of %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot change permissions of %s: ERR=%s\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "Too many subdirectories. Some permissions not reset.\n" +#~ msgstr "zu viele items in %s resource\n" + +#, fuzzy +#~ msgid "Cannot open current directory: ERR=%s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot get current directory: ERR=%s\n" +#~ msgstr "kann Variable operate nicht setzen: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot reset current directory: ERR=%s\n" +#~ msgstr "kann Variable operate nicht setzen: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot resolve service(%s)" +#~ msgstr "Kann Console resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Cannot resolve hostname(%s) %s" +#~ msgstr "Kann Console resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Expected a block to begin with { but got: %s" +#~ msgstr "wild-card Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "Expected a string but got: %s" +#~ msgstr "regulren Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "Expected a string [ip|ipv4|ipv6] but got: %s" +#~ msgstr "regulren Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "Expected a string [ip|ipv4] but got: %s" +#~ msgstr "regulren Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "Expected an equal = but got: %s" +#~ msgstr "Erwartete ein \"ist gleich\", erhalten: %s" + +#, fuzzy +#~ msgid "Expected an identifier [addr|port] but got: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Expected a identifier [addr|port] but got: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Expected a equal =, got: %s" +#~ msgstr "Erwartete ein \"ist gleich\", erhalten: %s" + +#, fuzzy +#~ msgid "Expected a number or a string but got: %s" +#~ msgstr "regulren Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "Expected an IP number or a hostname but got: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Expected a end of block with } but got: %s" +#~ msgstr "Erwartet: %s, erhalten: %s" + +#, fuzzy +#~ msgid "Expected an end of block with } but got: %s" +#~ msgstr "Erwartet: %s, erhalten: %s" + +#, fuzzy +#~ msgid "Expected an IP number or a hostname, got: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Expected a port number or string, got: %s" +#~ msgstr "regulren Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "Error scanning attributes: %s\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "bget_msg: unknown signal %d\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Attr spool write error. wrote=%d wanted=%d bytes. ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "TLS connection initialization failed.\n" +#~ msgstr "TLS Aushandlung gescheitert.\n" + +#, fuzzy +#~ msgid "TLS Negotiation failed.\n" +#~ msgstr "TLS Aushandlung gescheitert.\n" + +#, fuzzy +#~ msgid "TLS enabled but not configured.\n" +#~ msgstr "TLS bentigt aber nicht konfiguriert in Bacula.\n" + +#, fuzzy +#~ msgid "TLS enable but not configured.\n" +#~ msgstr "TLS bentigt aber nicht konfiguriert in Bacula.\n" + +#, fuzzy +#~ msgid "Unknown error." +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Unknown sig %d" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Cannot bind port %d: ERR=%s: Retrying ...\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot bind port %d: ERR=%s.\n" +#~ msgstr "Kann Programm: %s nicht starten. ERR=%s\n" + +#, fuzzy +#~ msgid "No addr/port found to listen on.\n" +#~ msgstr "Keine JobIds zum migrieren gefunden.\n" + +#, fuzzy +#~ msgid "Could not init client queue: ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Error in select: %s\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "Could not create client BSOCK.\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not add job to client queue: ERR=%s\n" +#~ msgstr "Konnte job queue nicht hinzufgen: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not destroy client queue: ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Could not connect to %s on %s:%d. ERR=%s\n" +#~ "Retrying ...\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to connect to %s on %s:%d. ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "gethostbyname() for host \"%s\" failed: ERR=%s\n" +#~ msgstr "Erzeugung des db Filename Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "Socket open error. proto=%d port=%d. ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Source address bind error. proto=%d. ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock read mutex. ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock write mutex. ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock attribute mutex. ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Write error sending %d bytes to %s:%s:%d: ERR=%s\n" +#~ msgstr "" +#~ "Fehler beim senden des Kommandos Hello an File daemon auf \"%s:%d\". ERR=" +#~ "%s\n" + +#, fuzzy +#~ msgid "Read expected %d got %d from %s:%s:%d\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Read error from %s:%s:%d: ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool error. Wanted=%d got=%d bytes.\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool I/O error.\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not malloc BSOCK data buffer\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "sockopt error: %s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "fcntl F_GETFL error. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "fcntl F_SETFL error. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Director authorization error at \"%s:%d\"\n" +#~ msgstr "Authorisationsproblem: FD an \"%s:%d\" erfordert TLS.\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error: Remote server at \"%s:%d\" did not advertise " +#~ "required TLS support.\n" +#~ msgstr "" +#~ "Authorisationsproblem: Entfernter Server hat bentigte TLS Untersttzung " +#~ "nicht angeboten.\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error with Director at \"%s:%d\": Remote server requires " +#~ "TLS.\n" +#~ msgstr "Authorisationsproblem: Enfernter Server erfordert TLS.\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error with Director at \"%s:%d\"\n" +#~ "Most likely the passwords do not agree.\n" +#~ "If you are using TLS, there may have been a certificate validation error " +#~ "during the TLS handshake.\n" +#~ "For help, please see: " +#~ msgstr "" +#~ "Kann mit File daemon on \"%s:%d\" nicht authentisieren. Mgliche " +#~ "Ursachen:\n" +#~ "Passworte oder Namen nicht gleich oder\n" +#~ "Maximum Concurrent Jobs berschritten auf dem FD oder\n" +#~ "FD Netzwerk durcheinander (Daemon neustarten).\n" +#~ "Fr Hilfe bitte unter http://www.bacula.org/rel-manual/faq." +#~ "html#AuthorizationErrors nachsehen.\n" + +#, fuzzy +#~ msgid "safe_unlink could not compile regex pattern \"%s\" ERR=%s\n" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid "Out of memory: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Bad errno" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Cannot open %s file. %s ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open %s file. %s ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot lock %s file. %s ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot not open %s file. %s ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create state file. %s ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Write final hdr error: ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "1999 Authorization failed.\n" +#~ msgstr "TLS Aushandlung gescheitert.\n" + +#, fuzzy +#~ msgid "Unable to open certificate file" +#~ msgstr "Kann Datenbank=%s.nicht ffen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to read certificate from file" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open private key file" +#~ msgstr "Kann Konsole \"%s\" an %s:%s:%d nicht authentisieren.\n" + +#, fuzzy +#~ msgid "Unable to read private key from file" +#~ msgstr "Kann Konsole \"%s\" an %s:%s:%d nicht authentisieren.\n" + +#, fuzzy +#~ msgid "Unsupported digest type: %d\n" +#~ msgstr "nicht implementierter job Typ: %d\n" + +#, fuzzy +#~ msgid "OpenSSL digest initialization failed" +#~ msgstr "Initialisierung der Verschlsselungsbibliothek fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "No signers found for crypto verify.\n" +#~ msgstr "Keine %ss zum Migrieren gefunden.\n" + +#, fuzzy +#~ msgid "Signature creation failed" +#~ msgstr "FileSet MD5 Prfsumme nicht gefunden.\n" + +#, fuzzy +#~ msgid "SHA1Update() returned an error: %d\n" +#~ msgstr "1993 Update Media Fehler\n" + +#, fuzzy +#~ msgid "No error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Signer not found" +#~ msgstr "FileSet MD5 Prfsumme nicht gefunden.\n" + +#, fuzzy +#~ msgid "Recipient not found" +#~ msgstr "FileSet MD5 Prfsumme nicht gefunden.\n" + +#, fuzzy +#~ msgid "Internal error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Unknown error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Cannot fork to become daemon: ERR=%s\n" +#~ msgstr "Kann var context nicht erzeugen: ERR=%s\n" + +#, fuzzy +#~ msgid "Illegal character \"%c\" in name.\n" +#~ msgstr "Illegales Zeichen in \"Volume name\" \"%s\"\n" + +#, fuzzy +#~ msgid "Name too long.\n" +#~ msgstr "Job nicht gefunden: %s\n" + +#, fuzzy +#~ msgid "Cannot open config file %s: %s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open lex\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Backup" +#~ msgstr "Sicherung OK" + +#, fuzzy +#~ msgid "Restoring" +#~ msgstr "Wiederherstellung luft..." + +#, fuzzy +#~ msgid "Migration" +#~ msgstr "Konnte Migrationsjob nicht starten.\n" + +#, fuzzy +#~ msgid "Unknown operation" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "backup" +#~ msgstr "Sicherung OK" + +#, fuzzy +#~ msgid "verified" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "restored" +#~ msgstr "Job Ressource" + +#, fuzzy +#~ msgid "restore" +#~ msgstr "Job Ressource" + +#, fuzzy +#~ msgid "migrated" +#~ msgstr "Konnte Migrationsjob nicht starten.\n" + +#, fuzzy +#~ msgid "migrate" +#~ msgstr "Konnte Migrationsjob nicht starten.\n" + +#, fuzzy +#~ msgid "unknown action" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "pthread_once failed. ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init msg_queue mutex. ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Config error: %s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "string" +#~ msgstr "Wiederherstellung luft..." + +#, fuzzy +#~ msgid "quoted_string" +#~ msgstr "Wiederherstellung luft..." + +#, fuzzy +#~ msgid "expected a positive integer number, got: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Cannot open included config file %s: %s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "expected an integer or a range, got %s: %s" +#~ msgstr "regulren Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "expected an integer number, got %s: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "expected a name, got %s: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "expected a string, got %s: %s" +#~ msgstr "regulren Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "Mutex lock failure. ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "Mutex unlock failure. ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "pthread_create failed: ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open console message file %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not get con mutex: ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Bacula Message" +#~ msgstr "Bacula " + +#, fuzzy +#~ msgid "open mail pipe %s failed: ERR=%s\n" +#~ msgstr "Client id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "close error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "fopen %s failed: ERR=%s\n" +#~ msgstr "Client id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Msg delivery error: fopen %s failed: ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "%s JobId %u: Warning: " +#~ msgstr "Clone JobId %d gestartet.\n" + +#, fuzzy +#~ msgid "Unable to init mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to destroy mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to init OpenSSL threading: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Attempt to define second \"%s\" resource named \"%s\" is not permitted.\n" +#~ msgstr "" +#~ "Der Versuch der Definition einer zweiten %s resource mit dem Namen \"%s" +#~ "\" ist nicht erlaubt.\n" + +#, fuzzy +#~ msgid "Inserted res: %s index=%d\n" +#~ msgstr "Inserting %s res: %s index=%d pass=%d\n" + +#, fuzzy +#~ msgid "expected an =, got: %s" +#~ msgstr "Erwartete ein \"ist gleich\", erhalten: %s" + +#, fuzzy +#~ msgid "Unknown item code: %d\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "message type: %s not found" +#~ msgstr "FileSet Eintrag \"%s\" nicht gefunden\n" + +#, fuzzy +#~ msgid "" +#~ "Attempt to redefine \"%s\" from \"%s\" to \"%s\" referenced on line %d : " +#~ "%s\n" +#~ msgstr "" +#~ "Konnte config Resource %s , referenziert in Zeile %d : %s nicht finden\n" + +#, fuzzy +#~ msgid "Could not find config Resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Konnte config Resource %s , referenziert in Zeile %d : %s nicht finden\n" + +#, fuzzy +#~ msgid "Attempt to redefine resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Konnte config Resource %s , referenziert in Zeile %d : %s nicht finden\n" + +#, fuzzy +#~ msgid "Too many %s directives. Max. is %d. line %d: %s\n" +#~ msgstr "zu viele items in %s resource\n" + +#, fuzzy +#~ msgid "Missing config Resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Konnte config Resource %s , referenziert in Zeile %d : %s nicht finden\n" + +#, fuzzy +#~ msgid "expected a size number, got: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "expected a speed number, got: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "expected a %s, got: %s" +#~ msgstr "Erwartete ein \"ist gleich\", erhalten: %s" + +#, fuzzy +#~ msgid "Expected a Tape Label keyword, got: %s" +#~ msgstr "Job Level Schlsselwort erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Unable to initialize resource lock. ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "Config filename too long.\n" +#~ msgstr "Job nicht gefunden: %s\n" + +#, fuzzy +#~ msgid "Cannot open config file \"%s\": %s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Expected a Resource name identifier, got: %s" +#~ msgstr "Restore replacement Option erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "expected resource name, got: %s" +#~ msgstr "Dateiname erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "not in resource definition: %s" +#~ msgstr "Keine Messages resource definiert in %s\n" + +#, fuzzy +#~ msgid "Name not specified for resource" +#~ msgstr "Schlsselwort %s ist in dieser Ressource nicht erlaubt." + +#, fuzzy +#~ msgid "Unknown parser state %d\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Failed to open Plugin directory %s: ERR=%s\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to find any plugins in %s\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "dlopen plugin %s failed: ERR=%s\n" +#~ msgstr "Client id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Lookup of loadPlugin in plugin %s failed: ERR=%s\n" +#~ msgstr "Client id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Lookup of unloadPlugin in plugin %s failed: ERR=%s\n" +#~ msgstr "Client id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find userid=%s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find password entry. ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find group=%s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not initgroups for group=%s, userid=%s: ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not initgroups for userid=%s: ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not set group=%s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "prctl failed: ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#, fuzzy +#~ msgid "setreuid failed: ERR=%s\n" +#~ msgstr "Abfrage gescheitert: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "cap_from_text failed: ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#, fuzzy +#~ msgid "cap_set_proc failed: ERR=%s\n" +#~ msgstr "Media id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not set specified userid: %s\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "rwl_writelock failure at %s:%d: ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writeunlock failure at %s:%d:. ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "%s: %s\n" +#~ msgstr " --> Run=%s\n" + +#, fuzzy +#~ msgid "Fork error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Calling: %s %s %s %s\n" +#~ msgstr "1000 OK: %s Version: %s (%s)\n" + +#, fuzzy +#~ msgid "execv: %s failed: ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#, fuzzy +#~ msgid "BUS error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "%6d\t%-7s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +#~ msgstr "1000 OK: %s Version: %s (%s)\n" + +#, fuzzy +#~ msgid "Error loading certificate file" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Unable to open DH parameters file" +#~ msgstr "Kann Datenbank=%s.nicht ffen. ERR=%s\n" + +#, fuzzy +#~ msgid "Non-fatal error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Canceled" +#~ msgstr "%s Abgebrochen" + +#, fuzzy +#~ msgid "Waiting on FD" +#~ msgstr "File daemon" + +#, fuzzy +#~ msgid "Waiting for mount" +#~ msgstr "File daemon" + +#, fuzzy +#~ msgid "Waiting for Storage resource" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Waiting for Job resource" +#~ msgstr "Job Ressource" + +#, fuzzy +#~ msgid "Waiting for Client resource" +#~ msgstr "Pool Ressource" + +#, fuzzy +#~ msgid "Waiting for Start Time" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Unknown Job termination status=%d" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Completed with warnings" +#~ msgstr "%s OK -- mit Warnungen" + +#, fuzzy +#~ msgid "Terminated with errors" +#~ msgstr "%s OK -- mit Warnungen" + +#, fuzzy +#~ msgid "Fatal error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Waiting for File daemon" +#~ msgstr "File daemon" + +#, fuzzy +#~ msgid "Waiting for Storage daemon" +#~ msgstr "Storage daemon" + +#, fuzzy +#~ msgid "Fatal Error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Unknown term code" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Migrated Job" +#~ msgstr "Konnte Migrationsjob nicht starten.\n" + +#, fuzzy +#~ msgid "Restore" +#~ msgstr "Job Ressource" + +#, fuzzy +#~ msgid "Admin" +#~ msgstr "Admin OK" + +#, fuzzy +#~ msgid "Migrate" +#~ msgstr "Konnte Migrationsjob nicht starten.\n" + +#, fuzzy +#~ msgid "Unknown Type" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Unknown Job Level" +#~ msgstr "Unbekannter Job level=%d\n" + +#, fuzzy +#~ msgid "Disabled" +#~ msgstr "ist nicht aktiviert" + +#, fuzzy +#~ msgid "Working Directory: \"%s\" not found. Cannot continue.\n" +#~ msgstr "Counter Eintrag: %s in Catalog nicht gefunden.\n" + +#, fuzzy +#~ msgid "invalid expansion configuration" +#~ msgstr "Vorherige Konfiguration zurckgesetzt.\n" + +#, fuzzy +#~ msgid "unknown command character in variable" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "malformatted search and replace operation" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "unknown flag in search and replace operation" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "invalid regex in search and replace operation" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "empty search string in search and replace operation" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "offsets in cut operation delimited by unknown character" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "unknown quoted pair in search and replace operation" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "invalid argument" +#~ msgstr "Ungltige JobId gefunden.\n" + +#, fuzzy +#~ msgid "undefined operation" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "unknown error" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Unable to initialize watchdog lock. ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writelock failure. ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writeunlock failure. ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init work queue: ERR=%s\n" +#~ msgstr "Konnte job queue nicht initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not add work to queue: ERR=%s\n" +#~ msgstr "Konnte job queue nicht hinzufgen: ERR=%s\n" + +#, fuzzy +#~ msgid "Waiting for workq to be empty: ERR=%s\n" +#~ msgstr "Kann var context nicht erzeugen: ERR=%s\n" + +#, fuzzy +#~ msgid "Error in workq_destroy: ERR=%s\n" +#~ msgstr "Fehler beim Holen der Zeile fr Datei=%s: ERR=%s\n" + +#, fuzzy +#~ msgid "No volumes specified for reading. Job %s canceled.\n" +#~ msgstr "Weder storage noch Pool in Job \"%s\" definiert.\n" + +#, fuzzy +#~ msgid "Job %s canceled.\n" +#~ msgstr "%s Abgebrochen" + +#, fuzzy +#~ msgid "Read open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Ready to read from volume \"%s\" on %s device %s.\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "Could not ready %s device %s for append.\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create JobMedia record for Volume=\"%s\" Job=%s\n" +#~ msgstr "Kann Storage resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Read error on device %s in ANSI label. ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not write ANSI HDR1 label. ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing EOF to tape. ERR=%s" +#~ msgstr "Fehler beim Aktualisieren der DB Media Datei. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set network buffer size.\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Write session label failed. ERR=%s\n" +#~ msgstr "Fehler beim Aktualisieren der DB Media Datei. ERR=%s\n" + +#, fuzzy +#~ msgid "Network send error to FD. ERR=%s\n" +#~ msgstr "Netzwerkfehler mit FD bei %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading data header from FD. n=%d msglen=%d ERR=%s\n" +#~ msgstr "Fehler beim Lesen der catalog DB Steuerdatei. ERR=%s\n" + +#, fuzzy +#~ msgid "Malformed data header from FD: %s\n" +#~ msgstr "Nicht wohlgeformte Nachricht: %s\n" + +#, fuzzy +#~ msgid "Network error reading from FD. ERR=%s\n" +#~ msgstr "Netzwerkfehler mit FD bei %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal append error on device %s: ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Set ok=FALSE after write_block_to_device.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Error writing end session label. ERR=%s\n" +#~ msgstr "Fehler beim Aktualisieren der DB Media Datei. ERR=%s\n" + +#, fuzzy +#~ msgid "Set ok=FALSE after write_final_block_to_device.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Error updating file attributes. ERR=%s\n" +#~ msgstr "Fehler beim Aktualisieren des Client Eintrags. ERR=%s\n" + +#, fuzzy +#~ msgid "Error getting Volume info: %s" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#, fuzzy +#~ msgid "Error creating JobMedia records: ERR=%s\n" +#~ msgstr "Catalog Fehler beim Erzeugen des JobMedia Eintrags. %s" + +#, fuzzy +#~ msgid "Error creating JobMedia records: %s\n" +#~ msgstr "Catalog Fehler beim Erzeugen des JobMedia Eintrags. %s" + +#, fuzzy +#~ msgid "" +#~ "Incorrect password given by Director.\n" +#~ "For help, please see: " +#~ msgstr "Director und Storage daemon Passworte or Namen nicht gleich.\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with DIR at \"%s:%d\"\n" +#~ msgstr "TLS Aushandlung fehlgeschlagen mit SD an \"%s:%d\"\n" + +#, fuzzy +#~ msgid "Unable to authenticate Director at %s.\n" +#~ msgstr "Kann Konsole \"%s\" an %s:%s:%d nicht authentisieren.\n" + +#, fuzzy +#~ msgid "" +#~ "Incorrect authorization key from File daemon at %s rejected.\n" +#~ "For help, please see: " +#~ msgstr "Director und File daemon Passworte oder Namen sind nicht gleich.\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with FD at \"%s:%d\"\n" +#~ msgstr "TLS Aushandlung fehlgeschlagen mit FD an \"%s:%d\".\n" + +#, fuzzy +#~ msgid "Lock failure on autochanger. ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "Unlock failure on autochanger. ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" wanted on %s is in use by device %s\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#, fuzzy +#~ msgid "3993 Device %s not an autochanger device.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Autochanger error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bcopy [-d debug_level] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -i specify input Volume names (separated by |)\n" +#~ " -o specify output Volume names (separated by |)\n" +#~ " -p proceed inspite of errors\n" +#~ " -v verbose\n" +#~ " -w specify working directory (default /tmp)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "dev open failed: %s\n" +#~ msgstr "Abfrage fehlgeschlagen: %s\n" + +#, fuzzy +#~ msgid "Write of last block failed.\n" +#~ msgstr "Job Einrichtung Fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Cannot fixup device error. %s\n" +#~ msgstr "Kann Client resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Unknown" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bextract \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T send debug traces to trace file (stored in /tmp)\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -t read data from volume, do not write anything\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_datei] [-d debug_level]\n" +#~ " -c benutze als Konfigurationsdatei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte in Vordergrund (fr debugging)\n" +#~ " -g groupid\n" +#~ " -s no signals (fr debugging)\n" +#~ " -t Konfigurationsdatei testen und beenden\n" +#~ " -u userid\n" +#~ " -v Ausfhrliche Benutzermeldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not open exclude file: %s, ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open include file: %s, ERR=%s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot stat %s. It must exist. ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Write error on %s: %s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Cannot continue.\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Seek error Addr=%llu on %s: %s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Uncompression error. ERR=%d\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "LZO uncompression error. ERR=%d\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing JobMedia record to catalog.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Error writing final JobMedia record to catalog.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Attempt to write on closed device=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Write error at %s on device %s Vol=%s. ERR=%s.\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Job failed or canceled.\n" +#~ msgstr "%s Abgebrochen" + +#, fuzzy +#~ msgid "The %sVolume=%s on device=%s appears to be unlabeled.%s\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "Read error on fd=%d at addr=%s on device %s. ERR=%s.\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Read zero %sbytes Vol=%s at %s on device %s.\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "" +#~ "User defined maximum volume size %s will be exceeded on device %s.\n" +#~ " Marking Volume \"%s\" as Full.\n" +#~ msgstr "\"Max Volume bytes\"erreicht. Markiere Volume \"%s\" als Voll.\n" + +#, fuzzy +#~ msgid "Backspace file at EOT failed. ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#, fuzzy +#~ msgid "Backspace record at EOT failed. ERR=%s\n" +#~ msgstr "Aktualisierung von Media Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Re-read last block at EOT failed. ERR=%s" +#~ msgstr "Erzeugung des DB Client Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending Volume info to Director.\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bls [options] \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -j list jobs\n" +#~ " -k list blocks\n" +#~ " (no j or k option) list saved files\n" +#~ " -L dump label\n" +#~ " -p proceed inspite of errors\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -E Check records to detect errors\n" +#~ " -v be verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "No archive name specified\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Got EOM at file %u on device %s, Volume \"%s\"\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Mounted Volume \"%s\".\n" +#~ msgstr "Kann Konsole \"%s\" an %s:%s:%d nicht authentisieren.\n" + +#, fuzzy +#~ msgid "End of file %u on device %s, Volume \"%s\"\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bscan [ options ] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -m update media info in database\n" +#~ " -D specify the driver database name (default NULL)\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database password (default none)\n" +#~ " -h specify database host (default NULL)\n" +#~ " -t specify database port (default 0)\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -r list records\n" +#~ " -s synchronize or store in database\n" +#~ " -S show scan progress periodically\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -w specify working directory (default from conf " +#~ "file)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "No Working Directory defined in %s. Cannot continue.\n" +#~ msgstr "Keine Job records definiert in %s\n" + +#, fuzzy +#~ msgid "Working Directory: %s not found. Cannot continue.\n" +#~ msgstr "Counter Eintrag: %s in Catalog nicht gefunden.\n" + +#, fuzzy +#~ msgid "Create JobMedia for Job %s\n" +#~ msgstr "1991 Update JobMedia Fehler\n" + +#, fuzzy +#~ msgid "Could not create JobMedia record for Volume=%s Job=%s\n" +#~ msgstr "Kann Storage resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Pool record for %s found in DB.\n" +#~ msgstr "Pool Eintrag in Catalog nicht gefunden.\n" + +#, fuzzy +#~ msgid "VOL_LABEL: Pool record not found for Pool: %s\n" +#~ msgstr "Pool Eintrag in Catalog nicht gefunden.\n" + +#, fuzzy +#~ msgid "Media record for %s found in DB.\n" +#~ msgstr "Media Eintrag fr MediaId=%u in Catalog nicht gefunden.\n" + +#, fuzzy +#~ msgid "SOS_LABEL: Found Job record for JobId: %d\n" +#~ msgstr "Kein Job fr JobId %s gefunden\n" + +#, fuzzy +#~ msgid "SOS_LABEL: Job record not found for JobId: %d\n" +#~ msgstr "Kein Job fr JobId %s gefunden\n" + +#, fuzzy +#~ msgid "Could not update job record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Got Prog Names Stream: %s\n" +#~ msgstr "Kann \"message thread\" nicht erzeugen: %s\n" + +#, fuzzy +#~ msgid "Could not create File Attributes record. ERR=%s\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Created File record: %s\n" +#~ msgstr "Erzeugung des db File Eintrags %s fehlgeschlagen. ERR=%s" + +#, fuzzy +#~ msgid "Could not create media record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update media record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Created Media record for Volume: %s\n" +#~ msgstr "Konnte Media record fr Volume %s nicht holen: ERR=%s\n" + +#, fuzzy +#~ msgid "Updated Media record at end of Volume: %s\n" +#~ msgstr "Aktualisierung von Media Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create pool record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Created Pool record for Pool: %s\n" +#~ msgstr "Erzeuge db Medientyp Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not get Client record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Created Client record for Client: %s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Fileset \"%s\" already exists.\n" +#~ msgstr "Volume \"%s\" bereits vorhanden.\n" + +#, fuzzy +#~ msgid "Could not create FileSet record \"%s\". ERR=%s\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Created FileSet record \"%s\"\n" +#~ msgstr "Erzeugung des DB FileSet Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create JobId record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update job start record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Created new JobId=%u record for original JobId=%u\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update JobId=%u record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "%s\n" +#~ "JobId: %d\n" +#~ "Job: %s\n" +#~ "FileSet: %s\n" +#~ "Backup Level: %s\n" +#~ "Client: %s\n" +#~ "Start time: %s\n" +#~ "End time: %s\n" +#~ "Files Written: %s\n" +#~ "Bytes Written: %s\n" +#~ "Volume Session Id: %d\n" +#~ "Volume Session Time: %d\n" +#~ "Last Volume Bytes: %s\n" +#~ "Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s (%s): %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " Client: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Dateien erwartet: %s\n" +#~ " Dateien. wiederhergestellt: %s\n" +#~ " Bytes wiederhergestellt: %s\n" +#~ " Geschwindigkeit: %.1f KB/s\n" +#~ " FD Fehler: %d\n" +#~ " FD Beendigungsstatus: %s\n" +#~ " SD Beendigungsstatus: %s\n" +#~ " Beendigungsstatus: %s\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not create JobMedia record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Created JobMedia record JobId %d, MediaId %d\n" +#~ msgstr "Erzeugung von JobMedia Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not add MD5/SHA1 to File record. ERR=%s\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bsdjson [options] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read config and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_datei] [-d debug_level]\n" +#~ " -c benutze als Konfigurationsdatei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte in Vordergrund (fr debugging)\n" +#~ " -g groupid\n" +#~ " -s no signals (fr debugging)\n" +#~ " -t Konfigurationsdatei testen und beenden\n" +#~ " -u userid\n" +#~ " -v Ausfhrliche Benutzermeldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "No Storage resource defined in %s. Cannot continue.\n" +#~ msgstr "Keine Messages resource definiert in %s\n" + +#, fuzzy +#~ msgid "Only one Storage resource permitted in %s\n" +#~ msgstr "Nur eine Director resource erlaubt in %s\n" + +#, fuzzy +#~ msgid "No Director resource defined in %s. Cannot continue.\n" +#~ msgstr "" +#~ "Keine Director resource definiert in %s\n" +#~ "Ohne dies weiss ich nicht wer ich bin :-(\n" + +#, fuzzy +#~ msgid "No Device resource defined in %s. Cannot continue.\n" +#~ msgstr "Keine Messages resource definiert in %s\n" + +#, fuzzy +#~ msgid "No Messages resource defined in %s. Cannot continue.\n" +#~ msgstr "Keine Messages resource definiert in %s\n" + +#, fuzzy +#~ msgid "\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n" +#~ msgstr "" +#~ "\"TLS Certificate\" Datei nicht definiert fr Console \"%s\" in %s.\n" + +#, fuzzy +#~ msgid "\"TLS Key\" file not defined for Storage \"%s\" in %s.\n" +#~ msgstr "\"TLS Key\" Datei nicht fr Director definiert \"%s\" in %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Storage \"%s\" in %s. At least one CA certificate store is required " +#~ "when using \"TLS Verify Peer\".\n" +#~ msgstr "" +#~ "Weder \"TLS CA Certificate\" noch \"TLS CA Certificate Dir\" sind fr " +#~ "Director \"%s\" in %s definert. Mindestens ein CA certificate store wird " +#~ "bentigt wenn \"TLS Verify Peer\" eingesetzt wird.\n" + +#, fuzzy +#~ msgid "No archive name specified.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Improper number of arguments specified.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Device open failed. ERR=%s\n" +#~ msgstr "Erzeugung des DB Client Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "Wrote Volume label for volume \"%s\".\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#, fuzzy +#~ msgid "Volume has no label.\n" +#~ msgstr "volume ist abgelaufen" + +#, fuzzy +#~ msgid "Volume label read correctly.\n" +#~ msgstr "Volume \"%s\" bereits vorhanden.\n" + +#, fuzzy +#~ msgid "I/O error on device: ERR=%s" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume type error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Volume name error\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Error creating label. ERR=%s" +#~ msgstr "Fehler beim Aktualisieren der DB Media Datei. ERR=%s\n" + +#, fuzzy +#~ msgid "Volume version error.\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Unknown error.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Bad status from load. ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from rewind. ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from weof. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Wrote 1 EOF to %s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "Bad status from bsf. ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from bsr. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing record to block.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Error writing block to device.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Backspace file failed! ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#, fuzzy +#~ msgid "Backspace record failed! ERR=%s\n" +#~ msgstr "Aktualisierung von Media Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Read block failed! ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad data in record. Test failed!\n" +#~ msgstr "Aktualisierung von Media Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Write failed at block %u. stat=%d ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Error writing record to block.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Error writing block to device.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Read block %d failed! ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "Read record failed. Block %d! ERR=%s\n" +#~ msgstr "Erzeugung des db Path Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "Read record failed! ERR=%s\n" +#~ msgstr "Erzeugung von db Pool Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "3991 Bad autochanger command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "3992 Bad autochanger command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "3993 Bad autochanger command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "Now forward spacing 1 file.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Bad status from fsr. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Now forward spacing 2 files.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Now forward spacing 4 files.\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "Bad status from fsf. ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Wrote block to device.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "End of tape\n" +#~ msgstr "" +#~ "Ende automatische Suberung.\n" +#~ "\n" + +#, fuzzy +#~ msgid "read error on %s. ERR=%s.\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Bad status from read %d. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Short block read.\n" +#~ msgstr "Job Einrichtung Fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Error reading block. ERR=%s\n" +#~ msgstr "Fehler beim aktualisieren der DB Job Datei . ERR=%s\n" + +#, fuzzy +#~ msgid "Wrote Start of Session label.\n" +#~ msgstr "Fehler beim Aktualisieren der DB Media Datei. ERR=%s\n" + +#, fuzzy +#~ msgid "Flush block failed.\n" +#~ msgstr "Job Einrichtung Fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "%s Flush block, write EOF\n" +#~ msgstr "Job Einrichtung Fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Job canceled.\n" +#~ msgstr "%s Abgebrochen" + +#, fuzzy +#~ msgid "Set ok=false after write_block_to_device.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Wrote End of Session label.\n" +#~ msgstr "Fehler beim Aktualisieren der DB Media Datei. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create state file: %s ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "do_unfill failed.\n" +#~ msgstr "Job Einrichtung Fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Reposition error. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading block: ERR=%s\n" +#~ msgstr "Fehler beim Holen von Zeile %d: ERR=%s\n" + +#, fuzzy +#~ msgid "clear tape errors" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "forward space a record" +#~ msgstr "Job Ressource" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: btape \n" +#~ " -b specify bootstrap file\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -s turn off signals\n" +#~ " -w set working directory to dir\n" +#~ " -v be verbose\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" %d records.\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "Cannot open Dev=%s, Vol=%s\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find device \"%s\" in config file %s.\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot init device %s\n" +#~ msgstr "Kann Client resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Cannot open %s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find device \"%s\" in config file %s.\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Using device: \"%s\" for writing.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Using device: \"%s\" for reading.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Unexpected End of Tape\n" +#~ msgstr "Unerwartete Client Job Nachricht: %s\n" + +#, fuzzy +#~ msgid "Seek error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "lseek error on %s. ERR=%s.\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Error closing device %s. ERR=%s.\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "No FreeSpace command defined.\n" +#~ msgstr "Keine %s resource definiert\n" + +#, fuzzy +#~ msgid "Cannot run free space command. Results=%s ERR=%s\n" +#~ msgstr "Kann Programm: %s nicht starten. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write EOF. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "New volume \"%s\" mounted on device %s at %s.\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#, fuzzy +#~ msgid "write_block_to_device Volume label failed. ERR=%s" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Connection request from %s failed.\n" +#~ msgstr "Erzeugung des DB Client Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0101] Bad client command: %s" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "[SF0102] Failed to connect to Client daemon: %s:%d\n" +#~ msgstr "Verbindung zu File daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "[SF0103] Bad storage command: %s" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "[SF0104] Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Verbindung zu Storage daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "3991 Bad setdebug command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "3904 Job %s not found.\n" +#~ msgstr "Path record: %s nicht gefunden.\n" + +#, fuzzy +#~ msgid "3000 JobId=%ld Job=\"%s\" marked to be %s.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "3999 Device \"%s\" not found or could not be opened.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3900 Truncate cache for volume \"%s\" failed. ERR=%s\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "3900 Not yet implemented\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "3912 Error scanning upload command: ERR=%s\n" +#~ msgstr "Fehler beim Start des Programms: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "3999 Error with the upload: ERR=%s\n" +#~ msgstr "Fehler beim Holen von Zeile %d: ERR=%s\n" + +#, fuzzy +#~ msgid "3929 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "Kann Datenbank=%s.nicht ffen. ERR=%s\n" + +#, fuzzy +#~ msgid "3912 Failed to label Volume %s: ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "3913 Failed to open next part: ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "3917 Failed to label Volume: ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "3918 Failed to label Volume (no media): ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "3001 Mounted Volume: %s\n" +#~ msgstr "Mehr als ein Volume!: %s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0106] Device \"%s\" requested by DIR could not be opened or does not " +#~ "exist.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0107] Device \"%s\" in changer \"%s\" requested by DIR could not be " +#~ "opened or does not exist.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0108] Device \"%s\" requested by DIR could not be opened or does not " +#~ "exist.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0109] Device \"%s\" in changer \"%s\" requested by DIR could not be " +#~ "opened or does not exist.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Specified slot ignored. " +#~ msgstr "SQL fehlgeschlagen ERR=%s\n" + +#, fuzzy +#~ msgid "3901 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "Kann Datenbank=%s.nicht ffen. ERR=%s\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" is mounted with Volume \"%s\"\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" is doing acquire.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3903 Device \"%s\" is being labeled.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3001 Device \"%s\" is already mounted with Volume \"%s\"\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3002 Device \"%s\" is mounted.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3906 File device \"%s\" is always mounted.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3930 Device \"%s\" is being released.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3905 Unknown wait state %d\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3909 Error scanning mount command: %s\n" +#~ msgstr "Fehler beim Holen der Counter Zeile: %s\n" + +#, fuzzy +#~ msgid "3003 Device \"%s\" already enabled.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" enabled.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3004 Device \"%s\" deleted %d alert%s.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" disabled.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" unmounted.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3901 Device \"%s\" is already unmounted.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3001 Device \"%s\" unmounted.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3902 Device \"%s\" is busy in acquire.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3921 Device \"%s\" already released.\n" +#~ msgstr "Device Eintrag %s bereits vorhanden\n" + +#, fuzzy +#~ msgid "3922 Device \"%s\" waiting for sysop.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3922 Device \"%s\" waiting for mount.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3923 Device \"%s\" is busy in acquire.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3914 Device \"%s\" is being labeled.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3022 Device \"%s\" released.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "[SF0110] Could not create bootstrap file %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0111] Error parsing bootstrap file.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "3998 Device \"%s\" is not an autochanger.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3931 Device \"%s\" is BLOCKED. user unmounted.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3933 Device \"%s\" is BLOCKED waiting for media.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3934 Device \"%s\" is being initialized.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3935 Device \"%s\" is blocked labeling a Volume.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3935 Device \"%s\" is blocked for unknown reason.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3936 Device \"%s\" is busy reading.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3937 Device \"%s\" is busy with writers=%d reserved=%d.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Command error with FD msg=\"%s\", SD hanging up. ERR=%s\n" +#~ msgstr "Netzwerkfehler mit FD bei %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Command error with FD msg=\"%s\", SD hanging up.\n" +#~ msgstr "Netzwerkfehler mit FD bei %s: ERR=%s\n" + +#, fuzzy +#~ msgid "FD command not found: %s\n" +#~ msgstr "Job nicht gefunden: %s\n" + +#, fuzzy +#~ msgid "Cannot open session, received bad parameters.\n" +#~ msgstr "Kann Ausdruck\"%s\"nicht auflsen: ERR=%s\n" + +#, fuzzy +#~ msgid "Rewind failed: device %s is not open.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Could not open file device %s. No Volume name given.\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open(%s,%s,0640): ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to truncate device %s. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to stat device %s. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reopen: %s, ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Device %s cannot be %smounted. ERR=%s\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "Ready to append to end of Volume \"%s\" size=%s\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Error updating Catalog\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "" +#~ "Connection from unknown Director %s at %s rejected.\n" +#~ "Please see " +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Invalid connection from %s. Len=%d\n" +#~ msgstr "UA Hello von %s:%s:%d ist ungltig. Len=%d\n" + +#, fuzzy +#~ msgid "Invalid Hello from %s. Len=%d\n" +#~ msgstr "UA Hello von %s:%s:%d ist ungltig. Len=%d\n" + +#, fuzzy +#~ msgid "Client connect failed: Job name not found: %s\n" +#~ msgstr "Job nicht gefunden: %s\n" + +#, fuzzy +#~ msgid "Unable to authenticate File daemon\n" +#~ msgstr "Kann Konsole \"%s\" an %s:%s:%d nicht authentisieren.\n" + +#, fuzzy +#~ msgid "Recv request to Client failed. ERR=%s\n" +#~ msgstr "Erzeugung des DB Client Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "Bad Hello from Client: %s.\n" +#~ msgstr "UA Hello von %s:%s:%d ist ungltig. Len=%d\n" + +#, fuzzy +#~ msgid "[SE0001] Unable to stat device %s at %s: ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "[SE0002] %s is an unknown device type. Must be tape or directory. st_mode=" +#~ "%x\n" +#~ msgstr "unbekannt\n" + +#, fuzzy +#~ msgid "[SA0003] Unable to stat mount point %s: ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0009] Unable to init mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0010] Unable to init cond variable: ERR=%s\n" +#~ msgstr "konnte job cond Variable nicht initialisieren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0011] Unable to init cond variable: ERR=%s\n" +#~ msgstr "konnte job cond Variable nicht initialisieren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0012] Unable to init spool mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0013] Unable to init acquire mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0014] Unable to init freespace mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0015] Unable to init read acquire mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0016] Unable to init volcat mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0017] Unable to init dcrs mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0020] dlopen of SD driver=%s at %s failed: ERR=%s\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Couldn't rewind %s device %s: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not unserialize Volume label: ERR=%s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume on %s device %s has bad Bacula label type: %ld\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "Could not reserve volume %s on %s device %s\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot write Volume label to block for %s device %s\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Open %s device %s Volume \"%s\" failed: ERR=%s" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Rewind error on %s device %s: ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Truncate error on %s device %s: ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to re-open device after truncate on %s device %s: ERR=%s" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write %s device %s: ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Recycled volume \"%s\" on %s device %s, all previous data lost.\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "Wrote label to prelabeled Volume \"%s\" on %s device %s\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "Unknown %d" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Date written : %s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "pthread_cond_wait failure. ERR=%s\n" +#~ msgstr "pthread_cond_wait: ERR=%s\n" + +#, fuzzy +#~ msgid "unknown blocked code" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Too many errors trying to mount %s device %s.\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#, fuzzy +#~ msgid "Job %d canceled.\n" +#~ msgstr "%s Abgebrochen" + +#, fuzzy +#~ msgid "Open of %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to position to end of data on %s device %s: ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" not loaded on %s device %s.\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#, fuzzy +#~ msgid "%s device %s not configured to autolabel Volumes.\n" +#~ msgstr "TLS bentigt aber nicht konfiguriert in Bacula.\n" + +#, fuzzy +#~ msgid "Marking Volume \"%s\" in Error in Catalog.\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#, fuzzy +#~ msgid "Cannot open %s Dev=%s, Vol=%s for reading.\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set eotmodel on device %s: ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid " Bacula status:" +#~ msgstr "Bacula " + +#, fuzzy +#~ msgid "ioctl MTIOCGET error on %s. ERR=%s.\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "unknown func code %d" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "I/O function \"%s\" not supported on this device.\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#, fuzzy +#~ msgid "Cannot open bootstrap file %s: %s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Device \"%s\" in bsr at inappropriate place.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "REGEX '%s' compile error. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "JobType not yet implemented\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "JobLevel not yet implemented\n" +#~ msgstr "Kein storage angegeben.\n" + +#, fuzzy +#~ msgid "VolumeName : %s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid " MediaType : %s\n" +#~ msgstr "Media id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid " Device : %s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "Client : %s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "done : %s\n" +#~ msgstr " opcmd=%s\n" + +#, fuzzy +#~ msgid "No Volume names found for restore.\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#, fuzzy +#~ msgid "Error sending header to Client. ERR=%s\n" +#~ msgstr "Fehler beim senden von \"Hello\" an Storage daemon. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending to FD. ERR=%s\n" +#~ msgstr "Fehler beim senden von \"Hello\" an Storage daemon. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending data to Client. ERR=%s\n" +#~ msgstr "Fehler beim senden von \"Hello\" an Storage daemon. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending to File daemon. ERR=%s\n" +#~ msgstr "Fehler beim senden von \"Hello\" an Storage daemon. ERR=%s\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" at addr=%s on device %s.\n" +#~ msgstr "1997 Volume \"%s\" Nicht in Catalog.\n" + +#, fuzzy +#~ msgid "Unknown code %d\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "unknown: %d" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Unable to initialize reservation lock. ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "3939 Could not get dcr\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Device reservation failed for JobId=%d: %s\n" +#~ msgstr "Mehr als ein Dateiname! %s fr Datei: %s\n" + +#, fuzzy +#~ msgid "Failed command: %s\n" +#~ msgstr "Storage daemon hat \"Job command\": %s abgelehnt\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ " Device \"%s\" in changer \"%s\" requested by DIR could not be opened " +#~ "or does not exist.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ " Device \"%s\" requested by DIR could not be opened or does not " +#~ "exist.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ " Device \"%s\" requested by DIR is disabled.\n" +#~ msgstr "Pool Ressource \"%s\" nicht gefunden.\n" + +#, fuzzy +#~ msgid "3926 Could not get dcr for device: %s\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "3603 JobId=%u %s device %s is busy reading.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3604 JobId=%u %s device %s is BLOCKED due to user unmount.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3601 JobId=%u %s device %s is BLOCKED due to user unmount.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "" +#~ "3602 JobId=%u %s device %s is busy (already reading/writing). read=%d, " +#~ "writers=%d reserved=%d\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "3609 JobId=%u Max concurrent jobs=%d exceeded on %s device %s.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "3611 JobId=%u Volume max jobs=%d exceeded on %s device %s.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "" +#~ "3608 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" nreserve=%d on %s " +#~ "device %s.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "3605 JobId=%u wants free drive but %s device %s is busy.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "" +#~ "3606 JobId=%u prefers mounted drives, but %s device %s has no Volume.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "" +#~ "3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on %s device %s.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "3911 JobId=%u failed reserve %s device %s.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "Job cancelled.\n" +#~ msgstr "%s Abgebrochen" + +#, fuzzy +#~ msgid "Open data spool file %s failed: ERR=%s\n" +#~ msgstr "Client id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "Ftruncate spool file failed: ERR=%s\n" +#~ msgstr "Abfrage gescheitert: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Spool header read error. ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Spool read error. Wanted %u bytes, got %d\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal despooling error." +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Error writing block to spool file. ERR=%s\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Network error on BlastAttributes.\n" +#~ msgstr " use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g set groupid to group\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -p proceed despite I/O errors\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test - read config and exit\n" +#~ " -u userid to \n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_datei] [-d debug_level]\n" +#~ " -c benutze als Konfigurationsdatei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte in Vordergrund (fr debugging)\n" +#~ " -g groupid\n" +#~ " -s no signals (fr debugging)\n" +#~ " -t Konfigurationsdatei testen und beenden\n" +#~ " -u userid\n" +#~ " -v Ausfhrliche Benutzermeldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Unable to create thread. ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not initialize SD device \"%s\"\n" +#~ msgstr "Konnte TLS context fr Storage \"%s\" in %s nicht initialisieren.\n" + +#, fuzzy +#~ msgid "Unable to stat ControlDevice %s: ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open device %s\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Could not mount device %s\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Expected a Device Type keyword, got: %s" +#~ msgstr "Job Type Schlsselwort erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Expected a Cloud driver keyword, got: %s" +#~ msgstr "Job Level Schlsselwort erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Expected a Truncate Cache option keyword, got: %s" +#~ msgstr "FileSet option Schlsselwort erwartet, erhalten:%s:" + +#, fuzzy +#~ msgid "Expected a Cloud Upload option keyword, got: %s" +#~ msgstr "FileSet option Schlsselwort erwartet, erhalten:%s:" + +#, fuzzy +#~ msgid "Expected a Cloud communications protocol option keyword, got: %s" +#~ msgstr " Migration Job Type Schlsselwort erwartet, erhalten: %s" + +#, fuzzy +#~ msgid "Expected a Cloud Uri Style option keyword, got: %s" +#~ msgstr "FileSet option Schlsselwort erwartet, erhalten:%s:" + +#, fuzzy +#~ msgid "Warning: no \"%s\" resource (%d) defined.\n" +#~ msgstr "Keine %s resource definiert\n" + +#, fuzzy +#~ msgid "dump_resource type=%d\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Warning: unknown resource type %d\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Cannot find AutoChanger resource %s\n" +#~ msgstr "Kann Storage resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Unable to init lock for Autochanger=%s: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Device resource %s\n" +#~ msgstr "Kann Director resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Alert: Volume=\"%s\" alert=%d: ERR=%s\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "3997 Bad alert command: %s: ERR=%s.\n" +#~ msgstr "Kann Programm: %s nicht starten. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open device %s: ERR=%s\n" +#~ msgstr "Kann Datenbank=%s.nicht ffen. ERR=%s\n" + +#, fuzzy +#~ msgid "Rewind error on %s. ERR=%s.\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Ready to append to end of Volume \"%s\" at file=%d.\n" +#~ msgstr "Erzeugung von db Device Eintrag %s fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTEOM error on %s. ERR=%s.\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "ioctl MTLOAD error on %s. ERR=%s.\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "ioctl MTOFFL error on %s. ERR=%s.\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Device %s at End of Tape.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "ioctl MTFSF error on %s. ERR=%s.\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "Device %s cannot BSF because it is not a tape.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "ioctl MTBSF error on %s. ERR=%s.\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "ioctl MTFSR %d error on %s. ERR=%s.\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "ioctl MTBSR error on %s. ERR=%s.\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "ioctl MTWEOF error on %s. ERR=%s.\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "No Volume names found for %s.\n" +#~ msgstr "Keine Volumes zum Wiederherstellen gefunden.\n" + +#, fuzzy +#~ msgid "Unable to initialize volume list lock. ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reserve volume \"%s\", because job canceled.\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Could not reserve volume \"%s\" for append, because it will be read.\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Volume %s is busy swapping from %s to %s\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Volume %s is busy swapping.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "%s device %s is busy.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "pthread timedwait error. ERR=%s\n" +#~ msgstr "Regex bersetzungsfehler. ERR=%s\n" + +#, fuzzy +#~ msgid "JobId=%s, Job %s waiting to reserve a device.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "JobId=%s, Job %s waiting device %s.\n" +#~ msgstr "JobId %s, Job %s zum Abbruch markiert.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "Example : bbatch -w /path/to/workdir -h localhost -f dat1 -f dat -f datx\n" +#~ " will start 3 thread and load dat1, dat and datx in your catalog\n" +#~ "See bbatch.c to generate datafile\n" +#~ "\n" +#~ "Usage: bbatch [ options ] -w working/dir -f datafile\n" +#~ " -b with batch mode\n" +#~ " -B without batch mode\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database host (default NULL)\n" +#~ " -k path name to the key file (default NULL)\n" +#~ " -e path name to the certificate file (default " +#~ "NULL)\n" +#~ " -a path name to the CA certificate file (default " +#~ "NULL)\n" +#~ " -w specify working directory\n" +#~ " -r call restore code with given jobids\n" +#~ " -v verbose\n" +#~ " -f specify data file\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not init Bacula database\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Error opening datafile %s\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "Error while inserting file\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Could not open data file: %s\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Fatal fgets error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +#~ " -4 forces bsmtp to use IPv4 addresses only.\n" +#~ " -6 forces bsmtp to use IPv6 addresses only.\n" +#~ " -8 set charset to UTF-8\n" +#~ " -a use any ip protocol for address resolution\n" +#~ " -c set the Cc: field\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f set the From: field\n" +#~ " -h use mailhost:port as the SMTP server\n" +#~ " -s set the Subject: field\n" +#~ " -r set the Reply-To: field\n" +#~ " -l set the maximum number of lines to send (default: " +#~ "unlimited)\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Fatal gethostname error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Fatal getaddrinfo for myself failed \"%s\": ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Fatal gethostbyname for myself failed \"%s\": ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Error unknown mail host \"%s\": ERR=%s\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Failed to connect to mailhost %s\n" +#~ msgstr "Verbindung zu File daemon fehlgeschlagen.\n" + +#, fuzzy +#~ msgid "Fatal socket error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Fatal connect error to %s: ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Fatal _open_osfhandle error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Fatal fdopen error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Fatal dup error: ERR=%s\n" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid "Unable to open -p argument for reading" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database host (default NULL)\n" +#~ " -k path name to the key file (default NULL)\n" +#~ " -e path name to the certificate file (default " +#~ "NULL)\n" +#~ " -a path name to the CA certificate file (default " +#~ "NULL)\n" +#~ " -w specify working directory\n" +#~ " -j specify jobids\n" +#~ " -p specify path\n" +#~ " -f specify file\n" +#~ " -l maximum tuple to fetch\n" +#~ " -T truncate cache table before starting\n" +#~ " -v verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database host (default NULL)\n" +#~ " -w specify working directory\n" +#~ " -p specify path\n" +#~ " -f specify file\n" +#~ " -l maximum tuple to fetch\n" +#~ " -q print only errors\n" +#~ " -v verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not open, database \"%s\".\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Error no Director resource defined.\n" +#~ msgstr "Keine %s resource definiert\n" + +#, fuzzy +#~ msgid "Deleting: %s\n" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "Found %d for: %s\n" +#~ msgstr "Keinen Job gefunden fr: %s\n" + +#, fuzzy +#~ msgid "Found %d orphaned File records.\n" +#~ msgstr "Konnte FileSet Eintrag weder holen noch erzeugen.\n" + +#, fuzzy +#~ msgid "Found %d orphaned Filename records.\n" +#~ msgstr "Konnte FileSet Eintrag weder holen noch erzeugen.\n" + +#, fuzzy +#~ msgid "Found %d orphaned FileSet records.\n" +#~ msgstr "Konnte FileSet Eintrag weder holen noch erzeugen.\n" + +#, fuzzy +#~ msgid "Deleting %d orphaned FileSet records.\n" +#~ msgstr "Konnte FileSet Eintrag weder holen noch erzeugen.\n" + +#, fuzzy +#~ msgid "Found %d orphaned Client records.\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Deleting %d orphaned Client records.\n" +#~ msgstr "Konnte Client Eintrag nicht anlegen. ERR=%s\n" + +#, fuzzy +#~ msgid "Deleting JobMedia records of orphaned Job records.\n" +#~ msgstr "Fehler beim Holen des job Eintrags fr den job Bericht: %s" + +#, fuzzy +#~ msgid "Deleting Log records of orphaned Job records.\n" +#~ msgstr "Get DB path record %s fehlerhaften Eintrag gefunden: %s\n" + +#, fuzzy +#~ msgid "Found %d Restore Job records.\n" +#~ msgstr "Konnte keinen Pool Eintrag holen oder erzeugen.\n" + +#, fuzzy +#~ msgid "Reparing %d bad Filename records.\n" +#~ msgstr "Erzeugung des db Filename Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "%s: unknown\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "%s: unknown file system type\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Err: Could not access %s: %s\n" +#~ msgstr "Konnte counter %s: nicht aktualisieren: ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Could not follow ff->link %s: %s\n" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Could not stat %s: %s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Could not open directory %s: %s\n" +#~ msgstr "Konnte regex pattern \"%s\" nicht kompilieren ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Unknown file ff->type %d: %s\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "========== Path length is zero. File=%s\n" +#~ msgstr "Pfadlnge ist null. Datei=%s\n" + +#, fuzzy +#~ msgid "Could not open include file: %s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open exclude file: %s\n" +#~ msgstr "Kann bootstrap Datei nicht ffnen: %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "No Client, Storage or Director resource defined in %s\n" +#~ "Without that I don't how to get status from the File, Storage or Director " +#~ "Daemon :-(\n" +#~ msgstr "" +#~ "Keine Director resource definiert in %s\n" +#~ "Ohne dies weiss ich nicht wer ich bin :-(\n" + +#, fuzzy +#~ msgid "attribute create error. %s" +#~ msgstr "Attribute create error. %s" + +#, fuzzy +#~ msgid " Shared --> " +#~ msgstr " --> " + +#~ msgid "%s OK -- with warnings" +#~ msgstr "%s OK -- mit Warnungen" + +#, fuzzy +#~ msgid "aclx_get error on file \"%s\": ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to convert acl into text on file \"%s\"\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "aclx_scanStr error on file \"%s\": ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "aclx_put error on file \"%s\": ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Generate VSS snapshots. Driver=\"%s\"\n" +#~ msgstr "Erzeugung des DB Storage Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "VSS CreateSnapshots failed. ERR=%s\n" +#~ msgstr "Erzeugung des DB Storage Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "VSS was not initialized properly. ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "WriteEncryptedFileRaw failure: ERR=%s\n" +#~ msgstr "Media id select fehlgeschlagen: ERR=%s\n" + +#, fuzzy +#~ msgid "llistea error on file \"%s\": ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "lgetea error on file \"%s\": ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "attr_list error on file \"%s\": ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "attr_set error on file \"%s\": ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "getproplist error on file \"%s\": ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Unable create proper proplist to restore xattrs on file \"%s\"\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "setproplist error on file \"%s\": ERR=%s\n" +#~ msgstr "Catalog Fehler beim Aktualisieren von volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Unable to open xattr %s on \"%s\": ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to read symlin %s on \"%s\": ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to chdir to xattr space of file \"%s\": ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open xattr space %s on file \"%s\": ERR=%s\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to chdir to xattr space on file \"%s\": ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to chdir to xattr space %s on file \"%s\": ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to mkfifo xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to mknod xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to mkdir xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to symlink xattr %s to %s on file \"%s\": ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't use replace=ifnewer with Delta plugin on %s\n" +#~ msgstr "Cannot unescape string: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't use replace=ifolder with Delta plugin on %s\n" +#~ msgstr "Cannot unescape string: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't delete working directory %s. ERR=%s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown parameter for %s. Expecting block or file\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Unknown parameter %s.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "Unable to access guest volume\n" +#~ msgstr "Kann Konsole \"%s\" an %s:%s:%d nicht authentisieren.\n" + +#, fuzzy +#~ msgid "Unable to create temporary file %s. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't use mode=%s in MySQL plugin\n" +#~ msgstr "Cannot unescape string: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to fdopen file %s. ERR=%s\n" +#~ msgstr "Kann Datenbank=%s.nicht ffen. ERR=%s\n" + +#, fuzzy +#~ msgid " Dumping database \"%s\"\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Unable to detect the MySQL data_directory on this system.\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't get server configuration.\n" +#~ msgstr "Vorherige Konfiguration zurckgesetzt.\n" + +#, fuzzy +#~ msgid "Unable to get the BINLOG list.\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to determine the last LSN for %s (Previous job is %s)\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to detect datadir from MySQL\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get last LSN from the backup\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Restoring target database \"%s\"\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Creating target database \"%s\"\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "Database \"%s\" already exists. Skipping creation.\n" +#~ msgstr "Datenbank %s existiert nicht, bitte erzeugen.\n" + +#, fuzzy +#~ msgid "Can't use mode=%s in postgresql plugin\n" +#~ msgstr "Cannot unescape string: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't use service=%s in postgresql plugin ERR=%s\n" +#~ msgstr "Cannot unescape string: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't get cluster configuration.\n" +#~ msgstr "Vorherige Konfiguration zurckgesetzt.\n" + +#, fuzzy +#~ msgid "Can't determine the last WAL file\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't determine WAL directory\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't open WAL directory %s. ERR=%s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to detect the PostgreSQL data_directory on this system.\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to find data_directory=%s on this system. ERR=%s\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to determine the first WAL file on this system.\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse tablespaces %s on this system. ERR=%s\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse data_directory %s on this system. ERR=%s\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't create the %s file for recovery. ERR=%s\n" +#~ msgstr "Konnte FileSet \"%s\" Eintrag nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "3612 JobId=%u waiting because device %s is reserved by: %s.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "DDE commit failed. ERR=%s\n" +#~ msgstr "pthread_create: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create DedupDirectory: %s" +#~ msgstr "kann Variable operate nicht setzen: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create DedupIndexDirectory: %s" +#~ msgstr "kann Variable operate nicht setzen: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create recovery directory: %s" +#~ msgstr "kann Variable operate nicht setzen: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot delete temporary recovery directory: %s" +#~ msgstr "kann Variable operate nicht setzen: ERR=%s\n" + +#, fuzzy +#~ msgid "Socket error or stop during rehydration. ERR=%d\n" +#~ msgstr "Socket Fehler auf %s Kommando: ERR=%s\n" + +#, fuzzy +#~ msgid "Unexpected message from FD, n=%d msglen=%d msg=%s\n" +#~ msgstr "Fehler beim Lesen der catalog DB Steuerdatei. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to init adata mutex: ERR=%s\n" +#~ msgstr "Kann DB lock nicht initialisieren. ERR=%s\n" + +#, fuzzy +#~ msgid "Send caps to Client failed. ERR=%s\n" +#~ msgstr "Erzeugung des DB Client Eintrags %s fehlgeschlagen. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or File type Volume %s on Dedup device %s. Wanted File.\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or Dedup type Volume %s on File device %s. Wanted File.\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "" +#~ "Got File or Dedup type Volume %s on Aligned device %s. Wanted Aligned.\n" +#~ msgstr "Neues Volume \"%s\" in catalog erzeugt.\n" + +#, fuzzy +#~ msgid "Unable to parse user supplied restore configuration\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#~ msgid "Expected an fstype string, got: %s\n" +#~ msgstr "fstype Ausdruck erwartet, erhalten: %s\n" + +#~ msgid "Expected an drivetype string, got: %s\n" +#~ msgstr "drivetype Ausdruck erwartet, erhalten: %s\n" + +#, fuzzy +#~ msgid "Storage from Run NextPool override" +#~ msgstr "Storage aus der \"NextPool\" Ressource des Pools." + +#, fuzzy +#~ msgid "Storage from Job's NextPool resource" +#~ msgstr "Storage aus der \"NextPool\" Ressource des Pools." + +#~ msgid "Storage from Pool's NextPool resource" +#~ msgstr "Storage aus der \"NextPool\" Ressource des Pools." + +#, fuzzy +#~ msgid "Storage from NextPool override" +#~ msgstr "Storage aus der \"NextPool\" Ressource des Pools." + +#, fuzzy +#~ msgid "FD connect failed: Job name not found: %s\n" +#~ msgstr "Job nicht gefunden: %s\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to Ingres server.\n" +#~ "Database=%s User=%s\n" +#~ "It is probably not running or your password is incorrect.\n" +#~ msgstr "" +#~ "Kann Verbindung zu MySQL Server nicht aufbauen. \n" +#~ "Datenbank=%s Benutzer=%s\n" +#~ "Der Datenbankserver luft mglicherweise nicht oder das Passwort ist " +#~ "nicht korrekt.\n" + +#, fuzzy +#~ msgid "A user name for Ingres must be supplied.\n" +#~ msgstr "Ein Benutzername fr MySQL muss angegeben werden.\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to DBI interface. Type=%s Database=%s User=%s\n" +#~ "Possible causes: SQL server not running; password incorrect; " +#~ "max_connections exceeded.\n" +#~ msgstr "" +#~ "Kann Verbindung zu PostgreSQL Server nicht aufbauen.\n" +#~ "Datenbank=%s Benutzer=%s\n" +#~ "Der Datenbankserver luft mglicherweise nicht oder das Passwort ist " +#~ "nicht korrekt.\n" + +#, fuzzy +#~ msgid "error inserting batch mode: %s" +#~ msgstr "Fehler beim holen von Zeile: %s\n" + +#, fuzzy +#~ msgid "Illegal JobId %s ignored\n" +#~ msgstr "Clone JobId %d gestartet.\n" + +#~ msgid "Restoring..." +#~ msgstr "Wiederherstellung luft..." + +#, fuzzy +#~ msgid "# Bacula bwx-console Configuration File\n" +#~ msgstr "Bitte die Konfigurationsdatei korrigieren: %s\n" + +#~ msgid "1992 Update Media error. VolFiles=%u, CatFiles=%u\n" +#~ msgstr "1992 Update Media Fehler. VolFiles=%u, CatFiles=%u\n" + +#, fuzzy +#~ msgid "Unable to get the Full job for %s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write to %s to save full job name. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to %s exitcode=%d\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to save last controlfile into file %s. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to save last SCN into file %s. ERR=%s\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find last control file ret=%d\n" +#~ msgstr "Kann Console resource %s nicht finden\n" + +#, fuzzy +#~ msgid "Can't open /etc/oratab. ERR=%s\n" +#~ msgstr "Kann inkludierte Datei nicht ffnen: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to list schema for %s. exitcode=%d\n" +#~ msgstr "Konnte bootstrap Datei %s nicht erzeugen. ERR=%s\n" + +#, fuzzy +#~ msgid "Error occured while selecting instance.\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "Unable to open %s to save RMAN output. ERR=%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Error occured while dumping tempfiles ERR=%s\n" +#~ msgstr "Fehler beim schreiben der bsr Datei.\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to copy schema \"%s\" to \"%s\" ret=%d\n" +#~ "%s\n" +#~ msgstr "Konnte Job Eintrag nicht holen. ERR=%s\n" + +#, fuzzy +#~ msgid "Disable a job" +#~ msgstr "ist nicht aktiviert" + +#, fuzzy +#~ msgid "Failed to initialize database backend\n" +#~ msgstr "Konnte Datenbank \"%s\" nicht ffen.\n" + +#, fuzzy +#~ msgid "disabled" +#~ msgstr "ist nicht aktiviert" + +#~ msgid "Name item is required in %s resource, but not found.\n" +#~ msgstr "" +#~ "Name item wird bentigt in %s resource, wurde aber nicht gefunden.\n" + +#, fuzzy +#~ msgid "Append data error.\n" +#~ msgstr "unbekannt" + +#, fuzzy +#~ msgid "A dbi driver for DBI must be supplied.\n" +#~ msgstr "Ein Benutzername fr MySQL muss angegeben werden.\n" + +#, fuzzy +#~ msgid "Max sched run time exceeded. Job canceled.\n" +#~ msgstr "\"Max run time\" berschritten, Job abgebrochen.\n" + +#, fuzzy +#~ msgid "" +#~ "Connection from unknown Director %s at %s rejected.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "unbekannthttp://www.bacula.org/rel-manual/faq.html" + +#, fuzzy +#~ msgid "" +#~ "Incorrect authorization key from File daemon at %s rejected.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "unbekannthttp://www.bacula.org/rel-manual/faq.html" + +#~ msgid "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +#~ msgstr "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + +#~ msgid "WARNING!!!! The Internal Database is NOT OPERATIONAL!\n" +#~ msgstr "WARNUNG!!!! Die Interne Datenbank ist NICHT BETRIEBSBEREIT!\n" + +#~ msgid "You should use SQLite, PostgreSQL, or MySQL\n" +#~ msgstr "Sie sollten SQLite, PostgreSQL, oder MySQL benutzen\n" + +#~ msgid "Unable to open Catalog DB control file %s: ERR=%s\n" +#~ msgstr "Kann Catalog DB Steuerdatei nicht ffnen %s: ERR=%s\n" + +#~ msgid "" +#~ "Error, catalog DB control file wrong version. Wanted %d, got %d\n" +#~ "Please reinitialize the working directory.\n" +#~ msgstr "" +#~ "Fehler, catalog DB steuerdatei hat die falsche Version: erwartet: %d, " +#~ "erhalten %d\n" +#~ "Bitte das Arbeitsverzeichnis neu initialisieren.\n" + +#~ msgid "Read storage \"%s\" same as write storage.\n" +#~ msgstr "Lese storage \"%s\" ist gleich mit Schreib storage.\n" + +#, fuzzy +#~ msgid "No Client record defined for job %s\n" +#~ msgstr "Keine Job records definiert in %s\n" + +#, fuzzy +#~ msgid "No FileSet record defined for job %s\n" +#~ msgstr "Keine Job records definiert in %s\n" + +#, fuzzy +#~ msgid "No Pool resource defined for job %s\n" +#~ msgstr "Keine %s resource definiert\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Version: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Usage: bgnome-console [-s] [-c config_file] [-d debug_level] " +#~ "[config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s no signals\n" +#~ " -t test - read configuration and exit\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c setze Konfigurationsdatei auf Datei\n" +#~ " -dnn setze debug level auf nn\n" +#~ " -f starte im Vordergrund (fr debugging Zwecke)\n" +#~ " -g groupid\n" +#~ " -r starte jetzt\n" +#~ " -s no signals\n" +#~ " -t test - Konfiguration Lesen und beenden\n" +#~ " -u userid\n" +#~ " -v ausfhrliche Benutzer Meldungen\n" +#~ " -? diese Meldung ausgeben.\n" +#~ "\n" + +#~ msgid "pthread_mutex_lock: ERR=%s\n" +#~ msgstr "pthread_mutex_lock: ERR=%s\n" + +#~ msgid "pthread_mutex_unlock: ERR=%s\n" +#~ msgstr "pthread_mutex_unlock: ERR=%s\n" + +#~ msgid "Start Migration JobId %s, Job=%s\n" +#~ msgstr "Starte Migration JobId %s, Job=%s\n" + +#~ msgid "No Volumes found to migrate.\n" +#~ msgstr "Keine Volumes zum Migrieren gefunden.\n" + +#~ msgid "Migration JobId %d started.\n" +#~ msgstr "Migration JobId %d gestartet.\n" + +#, fuzzy +#~ msgid "" +#~ "Wanted to append to Volume \"%s\", but device %s is busy writing on \"%s" +#~ "\" .\n" +#~ msgstr "" +#~ "Wollte Volume \"%s\"erzeugen, aber , but it already exists. Trying " +#~ "again.\n" diff --git a/po/en@boldquot.header b/po/en@boldquot.header new file mode 100644 index 00000000..fedb6a06 --- /dev/null +++ b/po/en@boldquot.header @@ -0,0 +1,25 @@ +# All this catalog "translates" are quotation characters. +# The msgids must be ASCII and therefore cannot contain real quotation +# characters, only substitutes like grave accent (0x60), apostrophe (0x27) +# and double quote (0x22). These substitutes look strange; see +# http://www.cl.cam.ac.uk/~mgk25/ucs/quotes.html +# +# This catalog translates grave accent (0x60) and apostrophe (0x27) to +# left single quotation mark (U+2018) and right single quotation mark (U+2019). +# It also translates pairs of apostrophe (0x27) to +# left single quotation mark (U+2018) and right single quotation mark (U+2019) +# and pairs of quotation mark (0x22) to +# left double quotation mark (U+201C) and right double quotation mark (U+201D). +# +# When output to an UTF-8 terminal, the quotation characters appear perfectly. +# When output to an ISO-8859-1 terminal, the single quotation marks are +# transliterated to apostrophes (by iconv in glibc 2.2 or newer) or to +# grave/acute accent (by libiconv), and the double quotation marks are +# transliterated to 0x22. +# When output to an ASCII terminal, the single quotation marks are +# transliterated to apostrophes, and the double quotation marks are +# transliterated to 0x22. +# +# This catalog furthermore displays the text between the quotation marks in +# bold face, assuming the VT100/XTerm escape sequences. +# diff --git a/po/en@quot.header b/po/en@quot.header new file mode 100644 index 00000000..a9647fc3 --- /dev/null +++ b/po/en@quot.header @@ -0,0 +1,22 @@ +# All this catalog "translates" are quotation characters. +# The msgids must be ASCII and therefore cannot contain real quotation +# characters, only substitutes like grave accent (0x60), apostrophe (0x27) +# and double quote (0x22). These substitutes look strange; see +# http://www.cl.cam.ac.uk/~mgk25/ucs/quotes.html +# +# This catalog translates grave accent (0x60) and apostrophe (0x27) to +# left single quotation mark (U+2018) and right single quotation mark (U+2019). +# It also translates pairs of apostrophe (0x27) to +# left single quotation mark (U+2018) and right single quotation mark (U+2019) +# and pairs of quotation mark (0x22) to +# left double quotation mark (U+201C) and right double quotation mark (U+201D). +# +# When output to an UTF-8 terminal, the quotation characters appear perfectly. +# When output to an ISO-8859-1 terminal, the single quotation marks are +# transliterated to apostrophes (by iconv in glibc 2.2 or newer) or to +# grave/acute accent (by libiconv), and the double quotation marks are +# transliterated to 0x22. +# When output to an ASCII terminal, the single quotation marks are +# transliterated to apostrophes, and the double quotation marks are +# transliterated to 0x22. +# diff --git a/po/es.po b/po/es.po new file mode 100644 index 00000000..19d24145 --- /dev/null +++ b/po/es.po @@ -0,0 +1,16931 @@ +# Spanish translations for branch package +# Traducciones al español para el paquete branch. +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +msgid "" +msgstr "" +"Project-Id-Version: Bacula 3.0.2\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2018-08-11 21:43+0200\n" +"PO-Revision-Date: 2009-12-01 16:04-0300\n" +"Last-Translator: Victor Hugo dos Santos \n" +"Language-Team: Bacula Spanish Team \n" +"Language: Spanish\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"X-Poedit-Language: Spanish\n" +"X-Poedit-SourceCharset: utf-8\n" + +#: src/baconfig.h:62 src/baconfig.h:63 src/baconfig.h:68 src/baconfig.h:69 +#: src/baconfig.h:80 src/baconfig.h:81 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "Fallo ASSERT: %s\n" + +#: src/baconfig.h:89 +msgid "*None*" +msgstr "*Ninguno*" + +#: src/lib/status.h:84 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" +"\n" +"Jobs No Terminados:\n" + +#: src/lib/status.h:91 +#, fuzzy +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "JobId Nivel Archivos Bytes Estado Finalizado Nombre \n" + +#: src/lib/status.h:93 +msgid "===================================================================\n" +msgstr "===================================================================\n" + +#: src/lib/status.h:119 +msgid "Created" +msgstr "Creado" + +#: src/lib/status.h:123 +msgid "Error" +msgstr "Error" + +#: src/lib/status.h:126 +msgid "Diffs" +msgstr "Diferencias" + +#: src/lib/status.h:129 +msgid "Cancel" +msgstr "Cancelar" + +#: src/lib/status.h:132 +msgid "OK" +msgstr "OK" + +#: src/lib/status.h:135 +msgid "OK -- with warnings" +msgstr "OK - con advertencias" + +#: src/lib/status.h:138 +msgid "Incomplete" +msgstr "" + +#: src/lib/status.h:141 +msgid "Other" +msgstr "Otros" + +#: src/lib/status.h:153 +#, fuzzy, c-format +msgid "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +msgstr "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" + +#: src/lib/status.h:182 +#, fuzzy, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "%6d %-6s %8s %10s %-7s %-8s %s\n" + +#: src/lib/status.h:214 src/lib/status.h:225 src/lib/status.h:239 +#: src/lib/status.h:243 src/lib/status.h:247 +msgid "Bacula " +msgstr "Bacula" + +#: src/qt-console/bat_conf.cpp:133 +#, c-format +msgid "No record for %d %s\n" +msgstr "Ningún registro para %d %s\n" + +#: src/qt-console/bat_conf.cpp:142 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "Director: nombre=%s dirección=%s DIRport=%d\n" + +#: src/qt-console/bat_conf.cpp:146 +#, c-format +msgid "Console: name=%s\n" +msgstr "Console: nombre=%s\n" + +#: src/qt-console/bat_conf.cpp:149 +#, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "ConsoleFont: nombre=%s font face=%s\n" + +#: src/qt-console/bat_conf.cpp:153 src/qt-console/bat_conf.cpp:235 +#: src/qt-console/bat_conf.cpp:282 src/qt-console/bat_conf.cpp:312 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "Tipo de recurso desconocido %d\n" + +#: src/qt-console/bat_conf.cpp:259 +#: src/qt-console/tray-monitor/tray_conf.cpp:311 +#, fuzzy, c-format +msgid "\"%s\" directive is required in \"%s\" resource, but not found.\n" +msgstr "ítem \"%s\" es necesario en recurso \"%s\", pero no se encuentra.\n" + +#: src/qt-console/bcomm/dircomm.cpp:88 +#, c-format +msgid "Already connected\"%s\".\n" +msgstr "Ya conectado\"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:99 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "Conectando con Director %s:%d" + +#: src/qt-console/bcomm/dircomm.cpp:101 +#, c-format +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "" +"Conectando con Director %s:%d\n" +"\n" + +#: src/qt-console/bcomm/dircomm.cpp:153 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "Fallo al inicializar el contexto TLS para la consola \"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:176 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "Fallo al inicializar el contexto TLS para el Director \"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:198 +#: src/qt-console/tray-monitor/task.cpp:233 +msgid "Director daemon" +msgstr "Servicio Director" + +#: src/qt-console/bcomm/dircomm.cpp:236 +msgid "Initializing ..." +msgstr "Inicializando ..." + +#: src/qt-console/bcomm/dircomm.cpp:252 src/qt-console/console/console.cpp:133 +msgid "Connected" +msgstr "Conectado" + +#: src/qt-console/bcomm/dircomm.cpp:377 +msgid "Command completed ..." +msgstr "Comando completado ..." + +#: src/qt-console/bcomm/dircomm.cpp:384 src/qt-console/console/console.cpp:370 +msgid "Processing command ..." +msgstr "Procesando comando ..." + +#: src/qt-console/bcomm/dircomm.cpp:391 +msgid "At main prompt waiting for input ..." +msgstr "En prompt principal esperando por una entrada..." + +#: src/qt-console/bcomm/dircomm.cpp:398 src/qt-console/bcomm/dircomm.cpp:408 +msgid "At prompt waiting for input ..." +msgstr "En prompt esperando por una entrada..." + +#: src/qt-console/bcomm/dircomm.cpp:416 +msgid "Command failed." +msgstr "Comando fallido." + +#: src/qt-console/bcomm/dircomm.cpp:488 +msgid "Director disconnected." +msgstr "Director desconectado." + +#: src/qt-console/bcomm/dircomm_auth.cpp:110 +#, c-format +msgid "Director authorization problem at \"%s:%d\"\n" +msgstr "Problema de autorización de Director en \"%s:%d\"\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:117 +#, c-format +msgid "" +"Authorization problem: Remote server at \"%s:%d\" did not advertise required " +"TLS support.\n" +msgstr "" +"Problema de autorización: El servidor remoto en \"%s:%d\" no anuncio soporte " +"TLS requiere.\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:125 +#, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\": Remote server requires " +"TLS.\n" +msgstr "" +"Problema de autorización con el Director en \"%s:%d\": El servidor remoto " +"requiere TLS.\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:136 +#, c-format +msgid "TLS negotiation failed with Director at \"%s:%d\"\n" +msgstr "Fallo negociación TLS no con el Director en \"%s:%d\"\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:148 +#, c-format +msgid "" +"Bad response to Hello command: ERR=%s\n" +"The Director at \"%s:%d\" is probably not running.\n" +msgstr "" +"Mala respuesta al comando Hello: ERR=%s\n" +"El director en \"%s:%d\" probablemente no esta corriendo.\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:165 +#, c-format +msgid "Director at \"%s:%d\" rejected Hello command\n" +msgstr "Director en \"%s:%d\" rechazó comando Hello\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:182 +#, fuzzy, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\"\n" +"Most likely the passwords do not agree.\n" +"If you are using TLS, there may have been a certificate validation error " +"during the TLS handshake.\n" +"For help, please see " +msgstr "" +"Problema de autorización con el Director en \"%s:%d\"\n" +"Lo mas probable es que las contraseñas no están de acuerdo.\n" +"Si está usando TLS, puede haber habido un error de validación de " +"certificados durante la negociación TLS.\n" +" Por favor vea http://www.bacula.org/en/rel-manual/" +"Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000 para ayuda.\n" + +#: src/qt-console/main.cpp:160 +msgid "Cryptography library initialization failed.\n" +msgstr "Inicialización de la librería de criptografía ha fallado.\n" + +#: src/qt-console/main.cpp:164 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "Por favor, corrija el archivo de configuración: %s\n" + +#: src/qt-console/main.cpp:188 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Versión: %s (%s) %s %s %s\n" +"\n" +"Utilice: bat [-s] [-c archivo_de_configuración] [-d nivel_depuración " +"[archivo_de_configuración]\n" +" -c establecer archivo de configuración para el archivo\n" +" -dnn establecer el nivel de depuración a nn\n" +" -s no hay señales\n" +" -t prueba - leer la configuración y salir\n" +" -? imprimir este mensaje.\n" +"\n" + +#: src/qt-console/main.cpp:221 src/qt-console/main.cpp:251 +msgid "TLS required but not configured in Bacula.\n" +msgstr "Se requiere TLS pero no está configurado en Bacula.\n" + +#: src/qt-console/main.cpp:229 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" +"Ni \"Certificado TLS CA\" o \"Directorio de Certificado TLS CA\" están " +"definidos para Director \"%s\" en %s. Por lo menos un almacén de " +"Certificados CA es necesario.\n" + +#: src/qt-console/main.cpp:238 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" +"Recurso Director no definido en %s\n" +"Sin eso no sé cómo hablar con el Director :-(\n" + +#: src/qt-console/main.cpp:259 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" +"Ni \"Certificado TLS CA\" o \"Directorio de Certificado TLS CA\" están " +"definidos para Console \"%s\" en %s.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:86 +#, fuzzy +msgid "" +"Authorization problem.\n" +"Most likely the passwords do not agree.\n" +"For help, please see " +msgstr "" +"Director problema de autorización.\n" +"Lo mas probable es que las contraseñas no están de acuerdo.\n" +"Por favor vea http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 para ayuda.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:94 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" +"Problema de Autorización: El servidor remoto no anuncio soporte TLS " +"requerido.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:110 +msgid "TLS negotiation failed\n" +msgstr "Negociación TLS fallida\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:117 +#, c-format +msgid "Bad response to Hello command: ERR=%s\n" +msgstr "mala respuesta al comando Hello: ERR=%s\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:134 +#, fuzzy +msgid "Daemon rejected Hello command\n" +msgstr "El demonio File rechazó el comando Hello\n" + +#: src/qt-console/tray-monitor/conf.cpp:89 +msgid "The Name of the Monitor should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:129 +msgid "The name of the Resource should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:138 +#, c-format +msgid "The address of the Resource should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:147 +#, c-format +msgid "The Password of should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:171 +#, fuzzy, c-format +msgid "The TLS CA Certificate File should be a PEM file for resource %s" +msgstr "\"Certificado TLS\" archivo no definido para director \"%s\" en %s.\n" + +#: src/qt-console/tray-monitor/conf.cpp:182 +#, c-format +msgid "The TLS CA Certificate Directory should be a directory for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:193 +#, fuzzy, c-format +msgid "The TLS Certificate File should be a file for resource %s" +msgstr "\"Certificado TLS\" archivo no definido para director \"%s\" en %s.\n" + +#: src/qt-console/tray-monitor/conf.cpp:204 +#, c-format +msgid "The TLS Key File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:45 +msgid "This restricted console does not have access to Backup jobs" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:123 +#, fuzzy +msgid "Nothing selected" +msgstr "Jobs no seleccionados.\n" + +#: src/qt-console/tray-monitor/task.cpp:97 +msgid "Bandwidth can set only set on Client" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:102 +#, fuzzy +msgid "Bandwidth parameter is invalid" +msgstr "parámetro de anchura ausente en la operación de relleno" + +#: src/qt-console/tray-monitor/task.cpp:177 +#, fuzzy +msgid "Client daemon" +msgstr "demonio File" + +#: src/qt-console/tray-monitor/task.cpp:205 +msgid "Storage daemon" +msgstr "Demonio Storage" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:45 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: tray-monitor [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -W 0/1 force the detection of the systray\n" +" -? print this message.\n" +"\n" +msgstr "" +"Escrito por Nicolas Boichat (2004)\n" +"\n" +"Versión: %s (%s) %s %s %s\n" +"\n" +"Utilice: tray-monitor [-c archivo_configuración] [-d nivel_depuración]\n" +" -c establece archivo de configuración para archivo\n" +" -d establece nivel de depuración para \n" +" -dt imprime timestamp en salida de depuración\n" +" -t prueba - leer configuración y salir\n" +" -? imprimir este mensaje.\n" +"\n" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:118 +msgid "TLS PassPhrase" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:164 +#, fuzzy, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one Monitor " +"resource.\n" +msgstr "" +"Error: %d Monitor de recursos definidos en %s. Usted debe definir un único " +"monitor de recursos.\n" + +#: src/qt-console/tray-monitor/tray-ui.h:105 +#, fuzzy, c-format +msgid "Failed to initialize TLS context for \"%s\".\n" +msgstr "Fallo al inicializar el contexto TLS para la consola \"%s\".\n" + +#: src/qt-console/tray-monitor/tray-ui.h:320 +#, fuzzy +msgid "Select a Director" +msgstr "Seleccione recurso Job" + +#: src/qt-console/tray-monitor/tray_conf.cpp:172 +#, c-format +msgid "No %s resource defined\n" +msgstr "Recurso %s no definido\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:181 +#, fuzzy, c-format +msgid "Monitor: name=%s\n" +msgstr "Console: nombre=%s\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:184 +#, fuzzy, c-format +msgid "Director: name=%s address=%s port=%d\n" +msgstr "Director: nombre=%s dirección=%s FDport=%d\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:188 +#, fuzzy, c-format +msgid "Client: name=%s address=%s port=%d\n" +msgstr "Cliente: nombre=%s dirección=%s FDport=%d\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:192 +#, fuzzy, c-format +msgid "Storage: name=%s address=%s port=%d\n" +msgstr "Storage: nombre=%s dirección=%s SDport=%d\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:196 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "Tipo de recurso desconocido %d en dump_resource.\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:284 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "Tipo de recurso desconocido %d en free_resource.\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:318 +#, fuzzy, c-format +msgid "Too many directives in \"%s\" resource\n" +msgstr "Demasiados elementos en recursos \"%s\"\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:338 +#: src/qt-console/tray-monitor/tray_conf.cpp:372 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "Tipo de recurso desconocido %d en save_resource.\n" + +#: src/win32/compat/compat.cpp:2879 +msgid "" +"\n" +"\n" +"Bacula ERROR: " +msgstr "" +"\n" +"\n" +"Bacula ERROR: " + +#: src/win32/filed/vss.cpp:244 src/win32/filed/vss.cpp:259 +#, c-format +msgid "pthread key create failed: ERR=%s\n" +msgstr "fallo crear clave pthread: ERR=%s\n" + +#: src/win32/filed/vss.cpp:267 +#, c-format +msgid "pthread_setspecific failed: ERR=%s\n" +msgstr "fallo pthread_setspecific: ERR=%s\n" + +#: src/win32/filed/vss_generic.cpp:725 +#, fuzzy, c-format +msgid "Unable to find volume %ls in the device list\n" +msgstr "Etiquetada nuevo Volumen \"%s\" en el dispositivo %s.\n" + +#: src/win32/libwin32/main.cpp:227 +msgid "Bad Command Line Option" +msgstr "Mala Opción de Línea de Comandos" + +#: src/win32/libwin32/service.cpp:98 +msgid "RegisterServiceCtlHandler failed" +msgstr "RegisterServiceCtlHandler fallido" + +#: src/win32/libwin32/service.cpp:99 +msgid "Failure contacting the Service Handler" +msgstr "Error al contactar con el Manejador de Servicio" + +#: src/win32/libwin32/service.cpp:110 +msgid "Service start report failed" +msgstr "Fallo en inicio de Servicio de Reporte" + +#: src/win32/libwin32/service.cpp:163 +msgid "StartServiceCtrlDispatcher failed." +msgstr "StartServiceCtrlDispatcher fallido." + +#: src/win32/libwin32/service.cpp:170 +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "KERNEL32.DLL no encontrado: Servicio Bacula no iniciado" + +#: src/win32/libwin32/service.cpp:180 +msgid "Registry service not found: Bacula service not started" +msgstr "Servicio Registry no encontrado: Servicio Bacula no iniciado" + +#: src/win32/libwin32/service.cpp:182 +msgid "Registry service entry point not found" +msgstr "Registro de servicio no se encuentra punto de entrada" + +#: src/win32/libwin32/service.cpp:204 +msgid "Report Service failure" +msgstr "Reporte de Servicio fallido" + +#: src/win32/libwin32/service.cpp:235 +msgid "Unable to install the service" +msgstr "No se puede instalar el servicio" + +#: src/win32/libwin32/service.cpp:243 +msgid "Service command length too long" +msgstr "Longitud de comandos de servicio demasiado largo" + +#: src/win32/libwin32/service.cpp:244 +msgid "Service command length too long. Service not registered." +msgstr "" +"Longitud de comandos de servicio demasiado largo. Servicio no registrado." + +#: src/win32/libwin32/service.cpp:257 +msgid "" +"The Service Control Manager could not be contacted - the service was not " +"installed" +msgstr "" +"El Manejador de Control de Servicios no puede ser contactado - el servicio " +"no fue instalado" + +#: src/win32/libwin32/service.cpp:280 src/win32/libwin32/service.cpp:309 +#: src/win32/libwin32/service.cpp:355 src/win32/libwin32/service.cpp:362 +#: src/win32/libwin32/service.cpp:366 +msgid "The Bacula service: " +msgstr "El servicio Bacula:" + +#: src/win32/libwin32/service.cpp:287 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" +"Proporciona servicios de copia de seguridad y restauración. Bacula - la " +"solución de copia de seguridad de red." + +#: src/win32/libwin32/service.cpp:298 +msgid "Cannot write System Registry for " +msgstr "No se puede escribir Registro de Sistema para" + +#: src/win32/libwin32/service.cpp:299 +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "" +"El Registro del Sistema no ha podido ser actualizado - el servicio Bacula no " +"se ha instalado" + +#: src/win32/libwin32/service.cpp:308 +msgid "Cannot add Bacula key to System Registry" +msgstr "No se puede agregar clave Bacula al Registro del Sistema" + +#: src/win32/libwin32/service.cpp:319 +msgid "The " +msgstr "El" + +#: src/win32/libwin32/service.cpp:373 +#, fuzzy +msgid "An existing Bacula service: " +msgstr "Un servicio Bacula existente:" + +#: src/win32/libwin32/service.cpp:381 +msgid "" +"The service Manager could not be contacted - the Bacula service was not " +"removed" +msgstr "" +"El Administrador de Servicio no puedo ser contactado - El Servicio Bacula no " +"se elimino" + +#: src/win32/libwin32/service.cpp:394 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" +"No se pudo encontrar la entrada del Registro.\n" +"Servicio probablemente no Registrado - el servicio de Bacula no se ha quitado" + +#: src/win32/libwin32/service.cpp:401 +msgid "Could not delete Registry key for " +msgstr "No se pudo borrar la clave del Registro para" + +#: src/win32/libwin32/service.cpp:411 +msgid "Bacula could not be contacted, probably not running" +msgstr "Bacula no pudo ser contactado, probablemente no se está ejecutando" + +#: src/win32/libwin32/service.cpp:418 +msgid "The Bacula service has been removed" +msgstr "El servicio Bacula se ha eliminado" + +#: src/win32/libwin32/service.cpp:459 +msgid "SetServiceStatus failed" +msgstr "SetServiceStatus fallido" + +#: src/win32/libwin32/service.cpp:485 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" +"\n" +"\n" +"%s error: %ld en %s:%d" + +#: src/win32/libwin32/service.cpp:561 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "Bloqueado por: %s, duración: %ld segundos\n" + +#: src/win32/libwin32/service.cpp:565 +#, c-format +msgid "No longer locked\n" +msgstr "Ya no está bloqueado\n" + +#: src/win32/libwin32/service.cpp:569 +msgid "Could not lock database" +msgstr "No se pudo bloquear la base de datos" + +#~ msgid "Query failed: %s: ERR=%s\n" +#~ msgstr "Consulta fallida: %s: ERR=%s\n" + +#~ msgid "A user name for MySQL must be supplied.\n" +#~ msgstr "Un nombre de usuario para MySQL debe de ser suministrado.\n" + +#~ msgid "Unable to initialize DB lock. ERR=%s\n" +#~ msgstr "No se puede inicializar el bloqueo de la BD. ERR=%s\n" + +#~ msgid "" +#~ "Unable to connect to MySQL server.\n" +#~ "Database=%s User=%s\n" +#~ "MySQL connect failed either server not running or your authorization is " +#~ "incorrect.\n" +#~ msgstr "" +#~ "No se puede conectar al servidor MySQL.\n" +#~ "Base de Datos=%s Usuario=%s\n" + +#~ msgid "Attribute create error. %s" +#~ msgstr "error al crear Atributo.%s" + +#~ msgid "A user name for PostgreSQL must be supplied.\n" +#~ msgstr "Un nombre de usuario para PostgreSQL debe de ser suministrado.\n" + +#~ msgid "error fetching row: %s\n" +#~ msgstr "error al obtener la fila:%s\n" + +#~ msgid "Encoding error for database \"%s\". Wanted SQL_ASCII, got %s\n" +#~ msgstr "" +#~ "Error de codificación de la base de datos \"%s\". Busco SQL_ASCII, obtuvo " +#~ "%s\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to PostgreSQL server. Database=%s User=%s\n" +#~ "Possible causes: SQL server not running; password incorrect; " +#~ "max_connections exceeded.\n" +#~ msgstr "" +#~ "No se puede conectar al servidor PostgreSQL.\n" +#~ "Base de datos=%s Usuario=%s\n" +#~ ", probablemente no funciona o tu contraseña es incorrecta.\n" + +#~ msgid "PQescapeStringConn returned non-zero.\n" +#~ msgstr "PQescapeStringConn devuelto no-cero.\n" + +#, fuzzy +#~ msgid "PQescapeByteaConn returned NULL.\n" +#~ msgstr "PQescapeStringConn devuelto no-cero.\n" + +#, fuzzy +#~ msgid "PQunescapeByteaConn returned NULL.\n" +#~ msgstr "PQescapeStringConn devuelto no-cero.\n" + +#, fuzzy +#~ msgid "Fetch failed: ERR=%s\n" +#~ msgstr "prctl fallido: ERR=%s\n" + +#~ msgid "error fetching currval: %s\n" +#~ msgstr "error al obtener el valor: %s\n" + +#~ msgid "error starting batch mode: %s" +#~ msgstr "Error iniciando modo batch: %s" + +#~ msgid "error ending batch mode: %s" +#~ msgstr "Error finalizando modo batch: %s" + +#~ msgid "error copying in batch mode: %s" +#~ msgstr "Error copiando en modo batch: %s" + +#~ msgid "" +#~ "query %s failed:\n" +#~ "%s\n" +#~ msgstr "" +#~ "consulta %s fallida:\n" +#~ "%s\n" + +#~ msgid "" +#~ "insert %s failed:\n" +#~ "%s\n" +#~ msgstr "" +#~ "Inserción %s fallida:\n" +#~ "%s\n" + +#~ msgid "Insertion problem: affected_rows=%s\n" +#~ msgstr "Problemas con la inserción: filas afectadas=%s\n" + +#~ msgid "" +#~ "update %s failed:\n" +#~ "%s\n" +#~ msgstr "" +#~ "Actualizar %s fallida:\n" +#~ "%s\n" + +#~ msgid "Update failed: affected_rows=%s for %s\n" +#~ msgstr "Actualización fallida: celdas afectadas =%s por %s\n" + +#~ msgid "" +#~ "delete %s failed:\n" +#~ "%s\n" +#~ msgstr "" +#~ "Borrado %s fallido:\n" +#~ "%s\n" + +#~ msgid "Path length is zero. File=%s\n" +#~ msgstr "La longitud de la ruta es nula. Archivo=%s\n" + +#~ msgid "No results to list.\n" +#~ msgstr "No hay resultados para listar.\n" + +#, fuzzy +#~ msgid "Could not init database batch connection\n" +#~ msgstr "No se pudo iniciar base de datos de Bacula\n" + +#~ msgid "Could not open database \"%s\": ERR=%s\n" +#~ msgstr "No se pudo abrir la base de datos \"%s\": ERR=%s\n" + +#~ msgid "Create DB Job record %s failed. ERR=%s\n" +#~ msgstr "Creación de registro de Job %s en BD fallido. ERR=%s\n" + +#~ msgid "Create JobMedia record %s failed: ERR=%s\n" +#~ msgstr "Creación de registro de JobMedia %s fallido. ERR=%s\n" + +#~ msgid "Update Media record %s failed: ERR=%s\n" +#~ msgstr "Actualización del registro de Media %s fallido: ERR=%s\n" + +#~ msgid "pool record %s already exists\n" +#~ msgstr "registro del pool %s ya existe\n" + +#~ msgid "Create db Pool record %s failed: ERR=%s\n" +#~ msgstr "Creación del registro BD Pool %s fallido: ERR=%s\n" + +#~ msgid "Device record %s already exists\n" +#~ msgstr "Registro de Dispositivo %s ya existe\n" + +#~ msgid "Create db Device record %s failed: ERR=%s\n" +#~ msgstr "Creación del registro de BD Device %s fallido: ERR=%s\n" + +#~ msgid "More than one Storage record!: %d\n" +#~ msgstr "Mas de un registro de almacenamiento!: %d\n" + +#~ msgid "error fetching Storage row: %s\n" +#~ msgstr "error obteniendo fila del Almacenamiento:%s\n" + +#~ msgid "Create DB Storage record %s failed. ERR=%s\n" +#~ msgstr "Creación del registro BD Almacenamiento %s fallido. ERR=%s\n" + +#~ msgid "mediatype record %s already exists\n" +#~ msgstr "registro de tipo de media %s ya existe\n" + +#~ msgid "Create db mediatype record %s failed: ERR=%s\n" +#~ msgstr "Fallo al crear la db_mediatype_record %s: ERR=%s\n" + +#~ msgid "Volume \"%s\" already exists.\n" +#~ msgstr "Volumen \"%s\" ya existe.\n" + +#~ msgid "Create DB Media record %s failed. ERR=%s\n" +#~ msgstr "Creación del registro BD Almacenamiento %s fallido. ERR=%s\n" + +#~ msgid "More than one Client!: %d\n" +#~ msgstr "Mas de un cliente!: %d\n" + +#~ msgid "error fetching Client row: %s\n" +#~ msgstr "error al obtener la fila Cliente:%s\n" + +#~ msgid "Create DB Client record %s failed. ERR=%s\n" +#~ msgstr "Creación del registro BD Cliente %s fallido. ERR=%s\n" + +#~ msgid "More than one Path!: %s for path: %s\n" +#~ msgstr "Mas de un Path!: %s para path: %s\n" + +#~ msgid "Create db Path record %s failed. ERR=%s\n" +#~ msgstr "Creación del registro Path db %s fallido. ERR=%s\n" + +#~ msgid "Create DB Counters record %s failed. ERR=%s\n" +#~ msgstr "Creación del registro BD Contadores %s fallido. ERR=%s\n" + +#~ msgid "More than one FileSet!: %d\n" +#~ msgstr "Más de un FileSet!: %d\n" + +#~ msgid "error fetching FileSet row: ERR=%s\n" +#~ msgstr "error al obtener la fila FileSet: ERR=%s\n" + +#~ msgid "Create DB FileSet record %s failed. ERR=%s\n" +#~ msgstr "Creación del registro BD FileSet %s fallido. ERR=%s\n" + +#~ msgid "Create db File record %s failed. ERR=%s" +#~ msgstr "Creación del registro File db %s fallido. ERR=%s" + +#~ msgid "More than one Filename! %s for file: %s\n" +#~ msgstr "Mas de un nombre de Archivo! %s para el archivo: %s\n" + +#~ msgid "Error fetching row for file=%s: ERR=%s\n" +#~ msgstr "Error al obtener fila para el archivo=%s: ERR=%s\n" + +#~ msgid "Create db Filename record %s failed. ERR=%s\n" +#~ msgstr "Creación del registro Filename db %s fallido. ERR=%s\n" + +#~ msgid "Attempt to put non-attributes into catalog. Stream=%d\n" +#~ msgstr "Intento de poner non-atributos en el catálogo. Stream=%d\n" + +#~ msgid "ERR=JobIds are empty\n" +#~ msgstr "ERR=JobIds están vacíos\n" + +#, fuzzy +#~ msgid "Create db Object record %s failed. ERR=%s" +#~ msgstr "Creación del registro File db %s fallido. ERR=%s" + +#~ msgid "No pool record %s exists\n" +#~ msgstr "Registro de pool %s inexistente\n" + +#~ msgid "Expecting one pool record, got %d\n" +#~ msgstr "Esperando un registro pool, tiene %d\n" + +#~ msgid "Error fetching row %s\n" +#~ msgstr "Error obteniendo fila %s\n" + +#, fuzzy +#~ msgid "" +#~ "Query error for end time request: ERR=%s\n" +#~ "CMD=%s\n" +#~ msgstr "" +#~ "Error de consulta al solicitar tiempo inicial: ERR=%s\n" +#~ "CMD=%s\n" + +#, fuzzy +#~ msgid "No prior backup Job record found.\n" +#~ msgstr "No encontrado registro anterior de Job de respaldo completo.\n" + +#~ msgid "" +#~ "Query error for start time request: ERR=%s\n" +#~ "CMD=%s\n" +#~ msgstr "" +#~ "Error de consulta al solicitar tiempo inicial: ERR=%s\n" +#~ "CMD=%s\n" + +#~ msgid "No prior Full backup Job record found.\n" +#~ msgstr "No encontrado registro anterior de Job de respaldo completo.\n" + +#~ msgid "Unknown level=%d\n" +#~ msgstr "Nivel desconocido=%d\n" + +#~ msgid "" +#~ "No Job record found: ERR=%s\n" +#~ "CMD=%s\n" +#~ msgstr "" +#~ "No se encuentra el registro de trabajo: ERR=%s\n" +#~ " CMD=%s\n" + +#~ msgid "Unknown Job level=%d\n" +#~ msgstr "Nivel del Job desconocido=%d\n" + +#~ msgid "No Job found for: %s.\n" +#~ msgstr "No se encontró ningún Job para: %s.\n" + +#~ msgid "No Job found for: %s\n" +#~ msgstr "Job no encontrado para:%s\n" + +#~ msgid "Request for Volume item %d greater than max %d or less than 1\n" +#~ msgstr "Solicitud de ítem Volumen %d mayor que el máximo %d o menor que 1\n" + +#~ msgid "No Volume record found for item %d.\n" +#~ msgstr "Registro de ítem Volumen no encontrado %d.\n" + +#~ msgid "Error fetching row: %s\n" +#~ msgstr "Error obteniendo fila: %s\n" + +#, fuzzy +#~ msgid "get_file_record want 1 got rows=%d PathId=%s FilenameId=%s\n" +#~ msgstr "get_file_record falta 1 obtuvo filas=%d\n" + +#, fuzzy +#~ msgid "File record for PathId=%s FilenameId=%s not found.\n" +#~ msgstr "Registro File para PathID=%s FilenameID=%s no encontrado.\n" + +#~ msgid "File record not found in Catalog.\n" +#~ msgstr "Registro File no se encuentra en Catalogo.\n" + +#~ msgid "More than one Filename!: %s for file: %s\n" +#~ msgstr "Más de un Filename!: %s en archivo: %s\n" + +#~ msgid "Get DB Filename record %s found bad record: %d\n" +#~ msgstr "Obtener registro BD Filename %s encuentro registro malo: %d\n" + +#~ msgid "Filename record: %s not found.\n" +#~ msgstr "Registro Filename: %s no encontrado.\n" + +#~ msgid "Filename record: %s not found in Catalog.\n" +#~ msgstr "Registro Filename: %s no encontrado en Catalogo.\n" + +#~ msgid "Get DB path record %s found bad record: %s\n" +#~ msgstr "Obtener registro ruta %s de BD encontró malo registro: %s\n" + +#~ msgid "Path record: %s not found.\n" +#~ msgstr "Registro Path: %s no encontrado.\n" + +#~ msgid "Path record: %s not found in Catalog.\n" +#~ msgstr "Registro Path: %s no encontrado en el Catalogo.\n" + +#~ msgid "No Job found for JobId %s\n" +#~ msgstr "No se encontró un job para el JobId %s\n" + +#~ msgid "No volumes found for JobId=%d\n" +#~ msgstr "Volúmenes no encontrados para JobId=%d\n" + +#~ msgid "Error fetching row %d: ERR=%s\n" +#~ msgstr "Error obteniendo fila %d: ERR=%s\n" + +#~ msgid "No Volume for JobId %d found in Catalog.\n" +#~ msgstr "Volúmenes para JobId=%d no encontrado en el Catalogo.\n" + +#~ msgid "Pool id select failed: ERR=%s\n" +#~ msgstr "Fallo al selecciona id del Pool: ERR=%s\n" + +#~ msgid "Client id select failed: ERR=%s\n" +#~ msgstr "Fallo al seleccionar ID del Cliente: ERR=%s\n" + +#, fuzzy +#~ msgid "More than one Pool! Num=%s\n" +#~ msgstr "Mas de un Poll!: %s\n" + +#~ msgid "Pool record not found in Catalog.\n" +#~ msgstr "Registro del Pool no encontrado en Catalogo.\n" + +#, fuzzy +#~ msgid "Error got %s RestoreObjects but expected only one!\n" +#~ msgstr "Error al obtener %s FileSets pero se esperaba sólo uno!\n" + +#, fuzzy +#~ msgid "RestoreObject record \"%d\" not found.\n" +#~ msgstr "Registro FileSet \"%s\" no encontrado.\n" + +#, fuzzy +#~ msgid "RestoreObject record not found in Catalog.\n" +#~ msgstr "Registro FileSet no encontrado en Catalogo.\n" + +#~ msgid "More than one Client!: %s\n" +#~ msgstr "Mas de un Cliente!: %s\n" + +#~ msgid "Client record not found in Catalog.\n" +#~ msgstr "Registro de cliente no encontrado en catalogo.\n" + +#~ msgid "More than one Counter!: %d\n" +#~ msgstr "Mas de un Contador!: %d\n" + +#~ msgid "error fetching Counter row: %s\n" +#~ msgstr "error al obtener fila Contador: %s\n" + +#~ msgid "Counter record: %s not found in Catalog.\n" +#~ msgstr "registro Contador: %s no encontrado en Catalogo.\n" + +#~ msgid "Error got %s FileSets but expected only one!\n" +#~ msgstr "Error al obtener %s FileSets pero se esperaba sólo uno!\n" + +#~ msgid "FileSet record \"%s\" not found.\n" +#~ msgstr "Registro FileSet \"%s\" no encontrado.\n" + +#~ msgid "FileSet record not found in Catalog.\n" +#~ msgstr "Registro FileSet no encontrado en Catalogo.\n" + +#~ msgid "Media id select failed: ERR=%s\n" +#~ msgstr "Fallo al seleccionar ID del Medio: ERR=%s\n" + +#~ msgid "query dbids failed: ERR=%s\n" +#~ msgstr "Consulta fallida a dbids: ERR=%s\n" + +#~ msgid "More than one Volume!: %s\n" +#~ msgstr "Mas de un Volumen!: %s\n" + +#, fuzzy +#~ msgid "Media record with MediaId=%s not found.\n" +#~ msgstr "Registro Media MediaID=%s no encontrado.\n" + +#, fuzzy +#~ msgid "Media record for Volume name \"%s\" not found.\n" +#~ msgstr "Registro Media para Volumen \"%s\" no encontrado.\n" + +#~ msgid "Media record for MediaId=%u not found in Catalog.\n" +#~ msgstr "Registro Media para MediaId=%u no encontrado en el Catalogo.\n" + +#, fuzzy +#~ msgid "Media record for Volume Name \"%s\" not found in Catalog.\n" +#~ msgstr "Registro Media para Volumen=%s no encontrado en el Catalogo.\n" + +#, fuzzy +#~ msgid "More than one Snapshot!: %s\n" +#~ msgstr "Mas de un Cliente!: %s\n" + +#, fuzzy +#~ msgid "Snapshot record with SnapshotId=%s not found.\n" +#~ msgstr "Registro Media MediaID=%s no encontrado.\n" + +#, fuzzy +#~ msgid "Snapshot record for Snapshot name \"%s\" not found.\n" +#~ msgstr "Registro Media para Volumen \"%s\" no encontrado.\n" + +#, fuzzy +#~ msgid "More than one Result!: %s\n" +#~ msgstr "Mas de un Cliente!: %s\n" + +#~ msgid "Query failed: %s\n" +#~ msgstr "Consulta fallida: %s\n" + +#~ msgid "These JobIds have copies as follows:\n" +#~ msgstr "Estos JobIds tienen copias de la siguiente manera:\n" + +#~ msgid "The catalog contains copies as follows:\n" +#~ msgstr "El catálogo contiene copias de la siguiente manera:\n" + +#~ msgid "Database %s does not exist, please create it.\n" +#~ msgstr "No existe base de datos %s, por favor crearla.\n" + +#~ msgid "Unable to open Database=%s. ERR=%s\n" +#~ msgstr "No se puede abrir la base de datos=%s. ERR=%s\n" + +#~ msgid "unknown" +#~ msgstr "desconocido" + +#~ msgid "Authorization problem: Remote server requires TLS.\n" +#~ msgstr "Problema de autorización. Servidor remoto requiere TLS.\n" + +#~ msgid "Director rejected Hello command\n" +#~ msgstr "Director rechazo comando Hello\n" + +#, fuzzy +#~ msgid "" +#~ "Director authorization problem.\n" +#~ "Most likely the passwords do not agree.\n" +#~ "If you are using TLS, there may have been a certificate validation error " +#~ "during the TLS handshake.\n" +#~ "For help, please see " +#~ msgstr "" +#~ "Problema de Autorización de Director.\n" +#~ "El más probable es que las contraseñas no están de acuerdo.\n" +#~ "Si usted esta utilizando TLS, puede haber habido un error de validación " +#~ "de certificado durante el apretón de manos TLS.\n" +#~ "Por favor, consulte http://www.bacula.org/en/rel-manual/" +#~ "Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000 para ayuda.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: " +#~ msgstr "" +#~ "\n" +#~ "Versión: " + +#, fuzzy +#~ msgid "Please use valid -l argument: %s\n" +#~ msgstr "FileSet: nombre=%s\n" + +#~ msgid "input from file" +#~ msgstr "entrada desde archivo" + +#~ msgid "output to file" +#~ msgstr "salida a archivo" + +#~ msgid "quit" +#~ msgstr "salir" + +#~ msgid "output to file and terminal" +#~ msgstr "salida a archivo y pantalla" + +#, fuzzy +#~ msgid "output everything to file and terminal (tee all)" +#~ msgstr "salida a archivo y pantalla" + +#~ msgid "sleep specified time" +#~ msgstr "tiempo de espera especificado" + +#~ msgid "print current time" +#~ msgstr "imprimir hora actual" + +#~ msgid "print Console's version" +#~ msgstr "imprimir la versión de la Consola" + +#~ msgid "echo command string" +#~ msgstr "cadena de comando echo" + +#~ msgid "execute an external command" +#~ msgstr "ejecutar un comando externo" + +#~ msgid "exit = quit" +#~ msgstr "exit = salir" + +#, fuzzy +#~ msgid "send a file to the director" +#~ msgstr "Reconectar al director" + +#~ msgid "zed_keys = use zed keys instead of bash keys" +#~ msgstr "zed_keys = usar las teclas zed en lugar de teclas bash" + +#~ msgid "help listing" +#~ msgstr "listado de ayuda" + +#~ msgid "set command separator" +#~ msgstr "configurar separador de comandos" + +#~ msgid ": is an invalid command\n" +#~ msgstr ": es un comando invalido\n" + +#~ msgid "Illegal separator character.\n" +#~ msgstr "Ilegal carácter de separación.\n" + +#~ msgid "Command logic problem\n" +#~ msgstr "Problema lógico de comando\n" + +#, fuzzy +#~ msgid "Can't find %s in Director list\n" +#~ msgstr "No se puede encontrar el recurso Director %s\n" + +#~ msgid "Available Directors:\n" +#~ msgstr "Directors disponibles:\n" + +#~ msgid "%2d: %s at %s:%d\n" +#~ msgstr "%2d: %s en %s:%d\n" + +#~ msgid "Select Director by entering a number: " +#~ msgstr "Seleccione Director introduciendo un numero:" + +#~ msgid "%s is not a number. You must enter a number between 1 and %d\n" +#~ msgstr "%s no es un número. Debe introducir un número entre 1 y %d\n" + +#~ msgid "You must enter a number between 1 and %d\n" +#~ msgstr "Debe de introducir un numero entre 1 y %d\n" + +#, fuzzy +#~ msgid "Can't find %s in Console list\n" +#~ msgstr "No se puede encontrar el recurso Director %s\n" + +#~ msgid "Connecting to Director %s:%d\n" +#~ msgstr "Conectando al Director %s:%d\n" + +#~ msgid "Enter a period to cancel a command.\n" +#~ msgstr "Introduzca un período para cancelar un comando.\n" + +#~ msgid "Too many arguments on input command.\n" +#~ msgstr "Demasiados argumentos en comando de entrada.\n" + +#~ msgid "First argument to input command must be a filename.\n" +#~ msgstr "" +#~ "Primer argumento para comandos de entrada debe ser un nombre de archivo.\n" + +#~ msgid "Cannot open file %s for input. ERR=%s\n" +#~ msgstr "No se puede abrir el archivo %s para entrada. ERR=%s\n" + +#~ msgid "Too many arguments on output/tee command.\n" +#~ msgstr "Demasiados argumentos en la salida del comando output/tee.\n" + +#~ msgid "Cannot open file %s for output. ERR=%s\n" +#~ msgstr "No se puede abrir el archivo %s para salida. ERR=%s\n" + +#~ msgid "Too many arguments. Enclose command in double quotes.\n" +#~ msgstr "Demasiados argumentos. Incluya comando entre comillas dobles.\n" + +#~ msgid "Cannot popen(\"%s\", \"r\"): ERR=%s\n" +#~ msgstr "No puede popen(\"%s\", \"r\"): ERR=%s\n" + +#, fuzzy +#~ msgid "@exec error: ERR=%s\n" +#~ msgstr "Seek error: ERR=%s\n" + +#~ msgid "Console: name=%s rcfile=%s histfile=%s\n" +#~ msgstr "Console: nombre=%s rcfile=%s histfile=%s\n" + +#~ msgid "Start Admin JobId %d, Job=%s\n" +#~ msgstr "Inicio Admin JobId %d, Job=%s\n" + +#~ msgid "Error getting Job record for Job report: ERR=%s" +#~ msgstr "" +#~ "Error al obtener el registro del Job para reporte de trabajo: ERR=%s" + +#~ msgid "Admin OK" +#~ msgstr "Administración Ok" + +#~ msgid "*** Admin Error ***" +#~ msgstr "***Administración Error***" + +#~ msgid "Admin Canceled" +#~ msgstr "Administración Cancelada" + +#~ msgid "Inappropriate term code: %c\n" +#~ msgstr "Inadecuado código de terminación: %c\n" + +#~ msgid "Error sending Hello to Storage daemon. ERR=%s\n" +#~ msgstr "Error enviando Hello al servicio de Almacenamiento. ERR=%s\n" + +#~ msgid "Director and Storage daemon passwords or names not the same.\n" +#~ msgstr "Nombres o contraseñas en el Director o Storage no son las mismas.\n" + +#, fuzzy +#~ msgid "" +#~ "Director unable to authenticate with Storage daemon at \"%s:%d\". " +#~ "Possible causes:\n" +#~ "Passwords or names not the same or\n" +#~ "Maximum Concurrent Jobs exceeded on the SD or\n" +#~ "SD networking messed up (restart daemon).\n" +#~ "For help, please see: " +#~ msgstr "" +#~ "Director no se puede autenticar con el demonio Storage en \"%s:%d\". " +#~ "Posibles causas:\n" +#~ "Contraseñas o nombres no son los mismos o\n" +#~ "el Máximo de Concurrentes Jobs ha superado en el SD o\n" +#~ "red SD desordenada (reiniciar demonio).\n" +#~ "Por favor, consulte http://www.bacula.org/en/rel-manual/" +#~ "Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000 por ayuda.\n" + +#~ msgid "TLS negotiation failed with SD at \"%s:%d\"\n" +#~ msgstr "Negociación TLS fallida con SD en \"%s:%d\"\n" + +#~ msgid "bdird] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -R do not apply JobDefs to Job\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read configuration and exit\n" +#~ " -s output in show text format\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "\n" +#~ "Utilice: dird [-f -s] [-c archivo_configuración] [-d nivel_depuración] " +#~ "[archivo_configuración]\n" +#~ " -c establecer archivo de configuración para archivo \n" +#~ " -d establecer el nivel de depuración para \n" +#~ " -dt imprimir timestamp en salida de depuración\n" +#~ " -f ejecutar en primer plano (para depuración)\n" +#~ " -g groupid\n" +#~ " -m imprimir salida kaboom para depuración)\n" +#~ " -r ejecutar ahora\n" +#~ " -s sin señales\n" +#~ " -t prueba - leer la configuración y salir\n" +#~ " -u userid\n" +#~ " -v mensajes de usuario detallados\n" +#~ " -? imprimir este mensaje.\n" +#~ "\n" + +#~ msgid "" +#~ "No Director resource defined in %s\n" +#~ "Without that I don't know who I am :-(\n" +#~ msgstr "" +#~ "Recurso Director no definido en %s\n" +#~ "Sin eso no sé quién soy :-(\n" + +#~ msgid "No Messages resource defined in %s\n" +#~ msgstr "Recursos de Mensajes no definidos en %s\n" + +#~ msgid "Only one Director resource permitted in %s\n" +#~ msgstr "Sólo un recurso Director permitido en %s\n" + +#, fuzzy +#~ msgid "\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n" +#~ msgstr "" +#~ "Archivo \"TLS Certificate\" no definido para Storage \"%s\" en %s.\n" + +#~ msgid "\"TLS Key\" file not defined for Director \"%s\" in %s.\n" +#~ msgstr "\"Clave TLS\" archivo no definido para director \"%s\" en %s.\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Director \"%s\" in %s. At least one CA certificate store is required " +#~ "when using \"TLS Verify Peer\".\n" +#~ msgstr "" +#~ "Ninguno \"TLS CA Certificate\" o \"TLS CA Certificate Dir\" esta definido " +#~ "para Director \"%s\" en %s. Al menos un almacén de certificado CA es " +#~ "requerido cuando se utiliza \"TLS Verify Peer\".\n" + +#~ msgid "\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n" +#~ msgstr "" +#~ "Archivo de \"Certificado TLS\" no definido para Console \"%s\" en %s.\n" + +#~ msgid "\"TLS Key\" file not defined for Console \"%s\" in %s.\n" +#~ msgstr "Archivo de \"Llave TLS\" no definido para Console \"%s\" en %s.\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Console \"%s\" in %s. At least one CA certificate store is required " +#~ "when using \"TLS Verify Peer\".\n" +#~ msgstr "" +#~ "Ninguno \"TLS CA Certificate\" o \"TLS CA Certificate Dir\" esta definido " +#~ "para Console \"%s\" en %s. Al menos un almacén de certificado CA es " +#~ "requerido cuando se utiliza \"TLS Verify Peer\".\n" + +#~ msgid "Failed to initialize TLS context for File daemon \"%s\" in %s.\n" +#~ msgstr "Fallo al inicializar contexto TLS para demonio File \"%s\" en %s.\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for File daemon \"%s\" in %s.\n" +#~ msgstr "" +#~ "Ninguno \"TLS CA Certificate\" o \"TLS CA Certificate Dir\" esta definido " +#~ "para demonio File \"%s\" en %s.\n" + +#~ msgid "No Job records defined in %s\n" +#~ msgstr "Registros de Job no definidos en %s\n" + +#~ msgid "Hey something is wrong. p=0x%lu\n" +#~ msgstr "Oye, algo está mal. p=0x%lu\n" + +#~ msgid "" +#~ "\"%s\" directive in Job \"%s\" resource is required, but not found.\n" +#~ msgstr "" +#~ "Directiva \"%s\" en recurso Job \"%s\" es requerida, pero no se " +#~ "encuentra.\n" + +#~ msgid "Too many items in Job resource\n" +#~ msgstr "Demasiados elementos en el recurso Job\n" + +#~ msgid "No storage specified in Job \"%s\" nor in Pool.\n" +#~ msgstr "Ningún storage especificada en Job \"%s\" ni en Pool.\n" + +#~ msgid "Unable to get Job record. ERR=%s\n" +#~ msgstr "No se pudo obtener registro de Job. ERR=%s\n" + +#~ msgid "Unable to get Job Volume Parameters. ERR=%s\n" +#~ msgstr "No se pudo obtener Parámetros de Volumen de Job. ERR=%s\n" + +#~ msgid "Unable to create bootstrap file %s. ERR=%s\n" +#~ msgstr "No se puede crear el archivo bootstrap %s. ERR=%s\n" + +#~ msgid "No files found to read. No bootstrap file written.\n" +#~ msgstr "" +#~ "Ninguno archivo encontrado para leer. Ninguno archivo bootstrap escrito.\n" + +#~ msgid "Error writing bsr file.\n" +#~ msgstr "Error escribiendo archivo bsr.\n" + +#, fuzzy +#~ msgid "" +#~ "The Job will require the following (*=>InChanger):\n" +#~ " Volume(s) Storage(s) SD Device(s)\n" +#~ "===========================================================================\n" +#~ msgstr "" +#~ "El job irá requerir los siguientes\n" +#~ " Storage(s) Dispositivo(s) SD Volumen(es)\n" +#~ "===========================================================================\n" + +#~ msgid "No Volumes found to restore.\n" +#~ msgstr "Volúmenes no encontrados para restaurar.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Volumes marked with \"*\" are in the Autochanger.\n" +#~ msgstr "" +#~ "\n" +#~ "Volúmenes marcados con \"*\" están en línea.\n" + +#~ msgid "1990 Invalid Catalog Request: %s" +#~ msgstr "1990 Solicitud de Catalogo no válido: %s" + +#~ msgid "Invalid Catalog request; DB not open: %s" +#~ msgstr "Solicitud de Catalogo no válido; BD no abierta: %s" + +#, fuzzy +#~ msgid "Pool \"%s\" not found for SD find media request.\n" +#~ msgstr "Pool \"%s\" no encontrado, utilizando cualquier pool.\n" + +#~ msgid "1901 No Media.\n" +#~ msgstr "1901 Ninguna Media.\n" + +#~ msgid "not in Pool" +#~ msgstr "no está¡ en Pool" + +#~ msgid "not correct MediaType" +#~ msgstr "Tipo de Medio incorrecto" + +#~ msgid "is not Enabled" +#~ msgstr "no está¡ Habilitado" + +#, fuzzy +#~ msgid "1998 Volume \"%s\" catalog status is %s, %s.\n" +#~ msgstr "1998 Volumen \"%s\" en estado %s, %s.\n" + +#~ msgid "1997 Volume \"%s\" not in catalog.\n" +#~ msgstr "1197 Volumen \"%s\" no está¡ en catálogo.\n" + +#~ msgid "Unable to get Media record for Volume %s: ERR=%s\n" +#~ msgstr "No es posible obtener registro Media para el Volumen %s: ERR=%s\n" + +#~ msgid "1991 Catalog Request for vol=%s failed: %s" +#~ msgstr "1991 Solicitud de Catalogo para vol=%s fallida:%s" + +#, fuzzy +#~ msgid "" +#~ "Attempt to set Volume Files from %u to %u for Volume \"%s\". Ignored.\n" +#~ msgstr "" +#~ "Archivos de Volumen en %u se establece en %u para Volumen \"%s\". Esto es " +#~ "incorrecto.\n" + +#~ msgid "Catalog error updating Media record. %s" +#~ msgstr "Error de catalogo actualizando registro Media. %s" + +#~ msgid "1993 Update Media error\n" +#~ msgstr "1993 Error de actualización de Media\n" + +#~ msgid "Catalog error creating JobMedia record. %s" +#~ msgstr "Error de Catalogo al crear registro JobMedia. %s" + +#~ msgid "1992 Create JobMedia error\n" +#~ msgstr "1992 Error al crear JobMedia\n" + +#~ msgid "Invalid Catalog request: %s" +#~ msgstr "Invalida petición de Catalogo: %s" + +#, fuzzy +#~ msgid "Attribute create error: ERR=%s" +#~ msgstr "error al crear Atributo.%s" + +#, fuzzy +#~ msgid "Restore object create error. %s" +#~ msgstr "error al crear Atributo.%s" + +#, fuzzy +#~ msgid "%s not same FileIndex=%d as attributes FI=%d\n" +#~ msgstr "Obtuvo %s, pero no igual que los atributos de File\n" + +#~ msgid "" +#~ "Catalog error updating file digest. Unsupported digest stream type: %d" +#~ msgstr "" +#~ "Error al actualizar el resumen de archivo de catálogo. Incompatible " +#~ "resumen de tipo de flujo: %d" + +#, fuzzy +#~ msgid "attribute create error. ERR=%s" +#~ msgstr "error al crear Atributo.%s" + +#~ msgid "Catalog error updating file digest. %s" +#~ msgstr "Error de catalogo actualizando archivo de resumen. %s" + +#~ msgid "1994 Invalid Catalog Update: %s" +#~ msgstr "1994 Invalido Catálogo de Actualizaciones: %s" + +#~ msgid "Invalid Catalog Update; DB not open: %s" +#~ msgstr "Actualización de Catalogo Inválida; La Base de Datos no se abre: %s" + +#, fuzzy +#~ msgid "" +#~ "fread attr spool error. Wanted %ld bytes, maximum permitted 10000000 " +#~ "bytes\n" +#~ msgstr "Error fread attr spool. ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool error. Wanted %ld bytes but got %lld ERR=%s\n" +#~ msgstr "Error fread attr spool. ERR=%s\n" + +#~ msgid "fread attr spool error. ERR=%s\n" +#~ msgstr "Error fread attr spool. ERR=%s\n" + +#~ msgid "Loaded plugin: %s\n" +#~ msgstr "Cargado el plugin: %s\n" + +#~ msgid "Plugin magic wrong. Plugin=%s wanted=%s got=%s\n" +#~ msgstr "Incorrecto plugin mágico. Plugin=%s esperaba=%s obtuvo=%s\n" + +#~ msgid "Plugin version incorrect. Plugin=%s wanted=%d got=%d\n" +#~ msgstr "Plugin versión incorrecta. Plugin=%s quería=%d obtuvo=%d\n" + +#~ msgid "Plugin license incompatible. Plugin=%s license=%s\n" +#~ msgstr "Plugin licencia incompatible. Plugin=%s licencia=%s\n" + +#, fuzzy +#~ msgid "Plugin size incorrect. Plugin=%s wanted=%d got=%d\n" +#~ msgstr "Plugin versión incorrecta. Plugin=%s quería=%d obtuvo=%d\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-dir [-f -s] [-c config_file] [-d debug_level] " +#~ "[config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -r run now\n" +#~ " -s no signals\n" +#~ " -t test - read configuration and exit\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "\n" +#~ "Utilice: dird [-f -s] [-c archivo_configuración] [-d nivel_depuración] " +#~ "[archivo_configuración]\n" +#~ " -c establecer archivo de configuración para archivo \n" +#~ " -d establecer el nivel de depuración para \n" +#~ " -dt imprimir timestamp en salida de depuración\n" +#~ " -f ejecutar en primer plano (para depuración)\n" +#~ " -g groupid\n" +#~ " -m imprimir salida kaboom para depuración)\n" +#~ " -r ejecutar ahora\n" +#~ " -s sin señales\n" +#~ " -t prueba - leer la configuración y salir\n" +#~ " -u userid\n" +#~ " -v mensajes de usuario detallados\n" +#~ " -? imprimir este mensaje.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Already doing a reload request, request ignored.\n" +#~ msgstr "Demasiadas solicitudes de recarga abiertas. Solicitud ignorada.\n" + +#~ msgid "Too many open reload requests. Request ignored.\n" +#~ msgstr "Demasiadas solicitudes de recarga abiertas. Solicitud ignorada.\n" + +#~ msgid "Out of reload table entries. Giving up.\n" +#~ msgstr "Fuera de entradas de las tablas recargadas. Abandonando.\n" + +#~ msgid "Resetting previous configuration.\n" +#~ msgstr "Restablecimiento de la configuración anterior.\n" + +#, fuzzy +#~ msgid "Storage=%s not found. Assuming it was removed!!!\n" +#~ msgstr "" +#~ "Storage \"%s\" no encontrado, usando Storage \"%s\" desde MediaType\"%s" +#~ "\".\n" + +#~ msgid "Failed to initialize TLS context for Director \"%s\" in %s.\n" +#~ msgstr "Fallo al inicializar contexto TLS para Director \"%s\" en %s.\n" + +#, fuzzy +#~ msgid "PoolType required in Pool resource \"%s\".\n" +#~ msgstr "Los recursos Pool definidos son:\n" + +#, fuzzy +#~ msgid "Invalid PoolType \"%s\" in Pool resource \"%s\".\n" +#~ msgstr "No se puede encontrar el recurso Pool %s\n" + +#, fuzzy +#~ msgid "NextPool \"Scratch\" not valid in Pool \"%s\".\n" +#~ msgstr "Especificación Next Pool no encontrada en Pool \"%s\".\n" + +#~ msgid "Could not open Catalog \"%s\", database \"%s\".\n" +#~ msgstr "No se pudo abrir Catálogo \"%s\", base de datos \"%s\".\n" + +#~ msgid "%s" +#~ msgstr "%s" + +#, fuzzy +#~ msgid "Could not create storage record for %s\n" +#~ msgstr "No es posible crear el registro Pool. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update storage record for %s\n" +#~ msgstr "No se pudo actualizar el registro de trabajo. ERR=%s\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Storage \"%s\" in %s.\n" +#~ msgstr "" +#~ "Ninguno \"TLS CA Certificate\" o \"TLS CA Certificate Dir\" esta definido " +#~ "para Storage \"%s\" en %s.\n" + +#~ msgid "Failed to initialize TLS context for Storage \"%s\" in %s.\n" +#~ msgstr "Fallo al inicializar contexto TLS para Storage \"%s\" en %s.\n" + +#~ msgid "Could not compile regex pattern \"%s\" ERR=%s\n" +#~ msgstr "No se ha podido compilar patrón regex \"%s\" ERR=%s\n" + +#~ msgid "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n" +#~ msgstr "Director: nombre=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n" + +#~ msgid " query_file=%s\n" +#~ msgstr "query_file=%s\n" + +#~ msgid " --> " +#~ msgstr "--> " + +#~ msgid "Console: name=%s SSL=%d\n" +#~ msgstr "Console: nombre=%s SSL=%d\n" + +#~ msgid "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n" +#~ msgstr "Counter: nombre=%s mínimo=%d máximo=%d cur=%d wrapcntr=%s\n" + +#~ msgid "Counter: name=%s min=%d max=%d\n" +#~ msgstr "Counter: nombre=%s mínimo=%d máximo=%d\n" + +#, fuzzy +#~ msgid "" +#~ "Client: Name=%s Enabled=%d Address=%s FDport=%d MaxJobs=%u NumJobs=%u\n" +#~ msgstr "Cliente: nombre=%s dirección=%s FDport=%d MaxJobs=%u\n" + +#~ msgid " JobRetention=%s FileRetention=%s AutoPrune=%d\n" +#~ msgstr "JobRetention=%s FileRetention=%s AutoPrune=%d\n" + +#~ msgid "" +#~ "Device: name=%s ok=%d num_writers=%d max_writers=%d\n" +#~ " reserved=%d open=%d append=%d read=%d labeled=%d offline=%d " +#~ "autochgr=%d\n" +#~ " poolid=%s volname=%s MediaType=%s\n" +#~ msgstr "" +#~ "Device: nombre=%s ok=%d num_writers=%d max_writers=%d\n" +#~ " reservado=%d abierto=%d append=%d leer=%d etiquetado=%d offline=%d " +#~ "autochgr=%d\n" +#~ " poolid=%s volname=%s MediaType=%s\n" + +#, fuzzy +#~ msgid "" +#~ "%s: name=%s address=%s SDport=%d MaxJobs=%u NumJobs=%u\n" +#~ " DeviceName=%s MediaType=%s StorageId=%s Autochanger=%d\n" +#~ msgstr "" +#~ "Storage: nombre=%s dirección=%s SDport=%d MaxJobs=%u\n" +#~ " DeviceName=%s MediaType=%s StorageId=%s\n" + +#, fuzzy +#~ msgid " Parent --> " +#~ msgstr "--> " + +#~ msgid "" +#~ "Catalog: name=%s address=%s DBport=%d db_name=%s\n" +#~ " db_driver=%s db_user=%s MutliDBConn=%d\n" +#~ msgstr "" +#~ "Catálogo: nombre=%s dirección=%s DBport=%d db_nombre=%s\n" +#~ " db_driver=%s db_user=%s MutliDBConn=%d\n" + +#~ msgid "%s: name=%s JobType=%d level=%s Priority=%d Enabled=%d\n" +#~ msgstr "%s: nombre=%s JobType=%d nivel=%s Prioridad=%d Activo=%d\n" + +#~ msgid "Job" +#~ msgstr "Job" + +#~ msgid "JobDefs" +#~ msgstr "JobDefs" + +#, fuzzy +#~ msgid "" +#~ " MaxJobs=%u NumJobs=%u Resched=%d Times=%d Interval=%s Spool=%d " +#~ "WritePartAfterJob=%d\n" +#~ msgstr "" +#~ "MaxJobs=%u Resched=%d Times=%d Intervalo=%s Spool=%d WritePartAfterJob=" +#~ "%d\n" + +#~ msgid " SpoolSize=%s\n" +#~ msgstr "SpoolSize=%s\n" + +#~ msgid " Accurate=%d\n" +#~ msgstr "Preciso=%d\n" + +#~ msgid " SelectionType=%d\n" +#~ msgstr "SelectionType=%d\n" + +#, fuzzy +#~ msgid " PrefixLinks=%d\n" +#~ msgstr "mins=%d\n" + +#~ msgid " --> Where=%s\n" +#~ msgstr "--> Donde=%s\n" + +#~ msgid " --> RegexWhere=%s\n" +#~ msgstr "--> RegexDonde=%s\n" + +#~ msgid " --> Bootstrap=%s\n" +#~ msgstr "--> Bootstrap=%s\n" + +#~ msgid " --> WriteBootstrap=%s\n" +#~ msgstr "--> WriteBootstrap=%s\n" + +#~ msgid " --> PluginOptions=%s\n" +#~ msgstr "--> PluginOptions=%s\n" + +#~ msgid " --> MaxRunTime=%u\n" +#~ msgstr "--> MaxRunTime=%u\n" + +#~ msgid " --> MaxWaitTime=%u\n" +#~ msgstr "--> MaxWaitTime=%u\n" + +#~ msgid " --> MaxStartDelay=%u\n" +#~ msgstr "--> MaxStartDelay=%u\n" + +#, fuzzy +#~ msgid " --> MaxRunSchedTime=%u\n" +#~ msgstr "--> MaxRunTime=%u\n" + +#, fuzzy +#~ msgid " --> Base %s\n" +#~ msgstr "--> Objetivo=%s\n" + +#~ msgid " --> RunScript\n" +#~ msgstr "--> EjecutarScript\n" + +#~ msgid " --> Command=%s\n" +#~ msgstr "--> Comando=%s\n" + +#~ msgid " --> Target=%s\n" +#~ msgstr "--> Objetivo=%s\n" + +#~ msgid " --> RunOnSuccess=%u\n" +#~ msgstr "--> RunOnSuccess=%u\n" + +#~ msgid " --> RunOnFailure=%u\n" +#~ msgstr "--> RunOnFailure=%u\n" + +#~ msgid " --> FailJobOnError=%u\n" +#~ msgstr "--> FailJobOnError=%u\n" + +#~ msgid " --> RunWhen=%u\n" +#~ msgstr "--> EjecutarCuando=%u\n" + +#, fuzzy +#~ msgid " --> VFullBackup" +#~ msgstr "Incremental" + +#, fuzzy +#~ msgid " --> FullBackup" +#~ msgstr "Incremental" + +#, fuzzy +#~ msgid " --> IncrementalBackup" +#~ msgstr "Incremental" + +#, fuzzy +#~ msgid " --> DifferentialBackup" +#~ msgstr "Diferencial" + +#, fuzzy +#~ msgid " --> Next" +#~ msgstr "--> " + +#~ msgid " --> Run=%s\n" +#~ msgstr "--> Ejecutar=%s\n" + +#~ msgid " --> SelectionPattern=%s\n" +#~ msgstr "--> SeleccionPatron=%s\n" + +#, fuzzy +#~ msgid "Schedule: Name=%s Enabled=%d\n" +#~ msgstr "Schedule: nombre=%s\n" + +#~ msgid " --> Run Level=%s\n" +#~ msgstr "--> Ejecutar Nivel=%s\n" + +#, fuzzy +#~ msgid " MaxRunSchedTime=%u\n" +#~ msgstr "--> MaxRunTime=%u\n" + +#, fuzzy +#~ msgid " Priority=%u\n" +#~ msgstr "woy=" + +#~ msgid " hour=" +#~ msgstr "hora=" + +#~ msgid " mday=" +#~ msgstr "mdia=" + +#~ msgid " month=" +#~ msgstr "mes=" + +#~ msgid " wday=" +#~ msgstr "wdia=" + +#~ msgid " wom=" +#~ msgstr "wom=" + +#~ msgid " woy=" +#~ msgstr "woy=" + +#~ msgid " mins=%d\n" +#~ msgstr "mins=%d\n" + +#~ msgid " --> " +#~ msgstr "--> " + +#, fuzzy +#~ msgid " --> Next" +#~ msgstr "--> " + +#~ msgid "Schedule: name=%s\n" +#~ msgstr "Schedule: nombre=%s\n" + +#~ msgid "Pool: name=%s PoolType=%s\n" +#~ msgstr "Pool: nombre=%s PoolType=%s\n" + +#~ msgid " use_cat=%d use_once=%d cat_files=%d\n" +#~ msgstr "use_cat=%d use_once=%d cat_files=%d\n" + +#~ msgid " max_vols=%d auto_prune=%d VolRetention=%s\n" +#~ msgstr "max_vols=%d auto_prune=%d VolRetention=%s\n" + +#~ msgid " VolUse=%s recycle=%d LabelFormat=%s\n" +#~ msgstr "VolUse=%s recycle=%d LabelFormat=%s\n" + +#~ msgid " CleaningPrefix=%s LabelType=%d\n" +#~ msgstr "CleaningPrefix=%s LabelType=%d\n" + +#~ msgid " RecyleOldest=%d PurgeOldest=%d ActionOnPurge=%d\n" +#~ msgstr "RecyleOldest=%d PurgeOldest=%d ActionOnPurge=%d\n" + +#~ msgid " MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n" +#~ msgstr "MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n" + +#~ msgid " MigTime=%s MigHiBytes=%s MigLoBytes=%s\n" +#~ msgstr "MigTime=%s MigHiBytes=%s MigLoBytes=%s\n" + +#, fuzzy +#~ msgid " CacheRetention=%s\n" +#~ msgstr "Catálogo=%s\n" + +#, fuzzy +#~ msgid " JobRetention=%s FileRetention=%s\n" +#~ msgstr "JobRetention=%s FileRetention=%s AutoPrune=%d\n" + +#~ msgid " NextPool=%s\n" +#~ msgstr "NextPool=%s\n" + +#~ msgid " RecyclePool=%s\n" +#~ msgstr "RecyclePool=%s\n" + +#~ msgid " ScratchPool=%s\n" +#~ msgstr "ScratchPool=%s\n" + +#~ msgid " Catalog=%s\n" +#~ msgstr "Catálogo=%s\n" + +#~ msgid "Messages: name=%s\n" +#~ msgstr "Mensajes: nombre=%s\n" + +#~ msgid " mailcmd=%s\n" +#~ msgstr "mailcmd=%s\n" + +#~ msgid " opcmd=%s\n" +#~ msgstr "opcmd=%s\n" + +#~ msgid "Cannot find Pool resource %s\n" +#~ msgstr "No se puede encontrar el recurso Pool %s\n" + +#~ msgid "Cannot find Console resource %s\n" +#~ msgstr "No se puede encontrar el recurso Console %s\n" + +#~ msgid "Cannot find Director resource %s\n" +#~ msgstr "No se puede encontrar el recurso Director %s\n" + +#~ msgid "Cannot find Storage resource %s\n" +#~ msgstr "No se puede encontrar el recurso Storage %s\n" + +#~ msgid "Cannot find Job resource %s\n" +#~ msgstr "No se puede encontrar el recurso Job %s\n" + +#~ msgid "Cannot find Counter resource %s\n" +#~ msgstr "No se puede encontrar el recurso Counter %s\n" + +#~ msgid "Cannot find Client resource %s\n" +#~ msgstr "No se puede encontrar el recurso Cliente %s\n" + +#~ msgid "Cannot find Schedule resource %s\n" +#~ msgstr "No se puede encontrar el recurso Schedule %s\n" + +#~ msgid "Expected one of: %s, got: %s" +#~ msgstr "Esperaba uno de: %s, obtuvo: %s" + +#, fuzzy +#~ msgid "Could not find Storage Resource %s referenced on line %d : %s\n" +#~ msgstr "" +#~ "No se pudo encontrar Recursos de configuración %s referenciado en la " +#~ "línea %d: %s\n" + +#, fuzzy +#~ msgid "" +#~ "Attempt to redefine Storage resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Intento de redefinir recurso \"%s\" referenciado en la línea %d: %s\n" + +#~ msgid "Expected a Migration Job Type keyword, got: %s" +#~ msgstr "Espera una palabra clave Tipo Job de Migración, obtuvo: %s" + +#~ msgid "Expected a Job Type keyword, got: %s" +#~ msgstr "Esperaba una palabra clave Tipo Job, obtuvo: %s" + +#~ msgid "Expected a Job Level keyword, got: %s" +#~ msgstr "Esperaba una palabra clave Tipo Level, obtuvo: %s" + +#~ msgid "Expected a Restore replacement option, got: %s" +#~ msgstr "Esperaba una opción de reemplazo de Restauración, obtuvo: %s" + +#~ msgid "Expect %s, got: %s" +#~ msgstr "Esperaba %s, obtuvo: %s" + +#~ msgid "Could not find config Resource %s referenced on line %d : %s\n" +#~ msgstr "" +#~ "No se pudo encontrar Recursos de configuración %s referenciado en la " +#~ "línea %d: %s\n" + +#~ msgid "Expecting open brace. Got %s" +#~ msgstr "Esperando abrir paréntesis. Obtuvo %s" + +#~ msgid "Expecting keyword, got: %s\n" +#~ msgstr "Esperando palabra clave, obtuvo: %s\n" + +#~ msgid "expected an equals, got: %s" +#~ msgstr "esperaba una igual, obtuvo: %s" + +#~ msgid "Keyword %s not permitted in this resource" +#~ msgstr "Palabra clave %s no esta permitido en este recurso" + +#~ msgid "Count not update counter %s: ERR=%s\n" +#~ msgstr "Conteo no actualizo contador %s: ERR=%s\n" + +#~ msgid "Cannot create var context: ERR=%s\n" +#~ msgstr "No se puede crear contexto var: ERR=%s\n" + +#~ msgid "Cannot set var callback: ERR=%s\n" +#~ msgstr "No se puede establecer llamada var: ERR=%s\n" + +#~ msgid "Cannot set var operate: ERR=%s\n" +#~ msgstr "No se puede establecer variable operar: ERR=%s\n" + +#~ msgid "Cannot unescape string: ERR=%s\n" +#~ msgstr "No se puede unescapar cadena: ERR=%s\n" + +#~ msgid "Cannot expand expression \"%s\": ERR=%s\n" +#~ msgstr "No se puede ampliar la expresión \"%s\": ERR=%s\n" + +#~ msgid "Cannot destroy var context: ERR=%s\n" +#~ msgstr "No se puede destruir variable contexto: ERR=%s\n" + +#~ msgid "Client: " +#~ msgstr "Cliente:" + +#~ msgid "File daemon \"%s\" rejected Job command: %s\n" +#~ msgstr "Demonio File \"%s\" rechazó comando Job: %s\n" + +#~ msgid "Error updating Client record. ERR=%s\n" +#~ msgstr "Error al actualizar el registro Cliente:ERR=%s\n" + +#~ msgid "FD gave bad response to JobId command: %s\n" +#~ msgstr "FD dio mala respuesta al comando jobId: %s\n" + +#~ msgid ", since=" +#~ msgstr ", desde=" + +#~ msgid "" +#~ "No prior or suitable Full backup found in catalog. Doing FULL backup.\n" +#~ msgstr "" +#~ "Ninguna copia de seguridad Completa anterior encontrada en el catálogo. " +#~ "Haciendo copia de seguridad COMPLETA.\n" + +#~ msgid " (upgraded from %s)" +#~ msgstr "(actualizar desde %s)" + +#, fuzzy +#~ msgid "" +#~ "No prior or suitable Full backup found in catalog. Doing Virtual FULL " +#~ "backup.\n" +#~ msgstr "" +#~ "Ninguna copia de seguridad Completa anterior encontrada en el catálogo. " +#~ "Haciendo copia de seguridad COMPLETA.\n" + +#~ msgid "" +#~ "No prior or suitable Differential backup found in catalog. Doing " +#~ "Differential backup.\n" +#~ msgstr "" +#~ "Ninguna copia de seguridad Diferencial previa o adecuada encontrada en el " +#~ "catálogo. Haciendo copia de seguridad Diferencial.\n" + +#~ msgid "Prior failed job found in catalog. Upgrading to %s.\n" +#~ msgstr "" +#~ "Trabajo anterior fallido encontrado en el catálogo. Actualizando a %s.\n" + +#~ msgid "Unimplemented backup level %d %c\n" +#~ msgstr "Nivel %d %c de respaldo no implementado\n" + +#~ msgid "Cannot run program: %s. ERR=%s\n" +#~ msgstr "No se puede ejecutar el programa: %s. ERR=%s\n" + +#~ msgid ">filed: write error on socket\n" +#~ msgstr ">filed: error de escritura en socket\n" + +#~ msgid "Error running program: %s. ERR=%s\n" +#~ msgstr "Error ejecutando el programa: %s. ERR=%s\n" + +#~ msgid "Cannot open included file: %s. ERR=%s\n" +#~ msgstr "No se puede abrir el archivo incluido: %s. ERR=%s\n" + +#~ msgid "Client \"%s\" RunScript failed.\n" +#~ msgstr "Cliente \"%s\" RunScript fallido.\n" + +#, fuzzy +#~ msgid "RestoreObject failed.\n" +#~ msgstr "Restaurar Archivos" + +#, fuzzy +#~ msgid "ComponentInfo failed.\n" +#~ msgstr "Comando fallido." + +#~ msgid "" +#~ "fixed name. Max=%d: " +#~ msgstr "" +#~ "Introduzca el número de volúmenes para crear. 0=>nombre fijo. Máximo=%d:" + +#~ msgid "The number must be between 0 and %d\n" +#~ msgstr "El número debe estar comprendido entre 0 y %d\n" + +#~ msgid "Enter Volume name: " +#~ msgstr "Introduzca nombre de Volumen:" + +#~ msgid "Enter base volume name: " +#~ msgstr "Introduzca nombre de Volumen base:" + +#~ msgid "Volume name too long.\n" +#~ msgstr "Nombre de Volumen demasiado largo.\n" + +#~ msgid "Volume name must be at least one character long.\n" +#~ msgstr "Nombre de volumen debe ser de al menos un carácter de largo.\n" + +#~ msgid "Enter the starting number: " +#~ msgstr "Introduzca el número inicial:" + +#~ msgid "Start number must be greater than zero.\n" +#~ msgstr "Número de inicio debe ser mayor que cero.\n" + +#~ msgid "Enter slot (0 for none): " +#~ msgstr "Introduzca ranura (0 para ninguno):" + +#~ msgid "InChanger? yes/no: " +#~ msgstr "InChanger? si/no: " + +#~ msgid "%d Volumes created in pool %s\n" +#~ msgstr "%d Volúmenes creados en el pool %s\n" + +#~ msgid "Turn on or off? " +#~ msgstr "Encender o apagar?" + +#~ msgid "" +#~ "Can't set %s RecyclePool to %s, %s is not in database.\n" +#~ "Try to update it with 'update pool=%s'\n" +#~ msgstr "" +#~ "No se puede establecer %s RecyclePool para %s, %s no está¡ en la base de " +#~ "datos.\n" +#~ "Intente actualizarlo con 'update pool=%s'\n" + +#~ msgid "" +#~ "Can't set %s ScratchPool to %s, %s is not in database.\n" +#~ "Try to update it with 'update pool=%s'\n" +#~ msgstr "" +#~ "No se puede establecer %s ScratchPool para %s, %s no está en la base de " +#~ "datos.\n" +#~ "Intente actualizarlo con 'update pool=%s'\n" + +#~ msgid "" +#~ "Error: Pool %s already exists.\n" +#~ "Use update to change it.\n" +#~ msgstr "" +#~ "Error: Pool %s ya existe.\n" +#~ "Use update para cambiarlo.\n" + +#~ msgid "Pool %s created.\n" +#~ msgstr "Pool %s creado.\n" + +#, fuzzy +#~ msgid "Failed to set bandwidth limit to Client.\n" +#~ msgstr "Fallo al conectar con el cliente.\n" + +#, fuzzy +#~ msgid "Set Bandwidth choice:\n" +#~ msgstr "Actualizar selección:\n" + +#, fuzzy +#~ msgid "Running Job" +#~ msgstr "" +#~ "\n" +#~ "Jobs Ejecutando:\n" + +#, fuzzy +#~ msgid "Invalid value for limit parameter. Expecting speed.\n" +#~ msgstr "Valor no válido para exacto. Debe ser sí o no.\n" + +#, fuzzy +#~ msgid "Enter new bandwidth limit: " +#~ msgstr "Introduzca nuevo Máximo de Trabajos" + +#~ msgid "Unauthorized command from this console.\n" +#~ msgstr "Comando no autorizado desde esta consola.\n" + +#~ msgid "Client \"%s\" not found.\n" +#~ msgstr "Cliente \"%s\" no encontrado.\n" + +#~ msgid "Client \"%s\" address set to %s\n" +#~ msgstr "Cliente \"%s\" dirección configurada para %s\n" + +#~ msgid "Job \"%s\" %sabled\n" +#~ msgstr "Job \"%s\" %sabled\n" + +#, fuzzy +#~ msgid "Client \"%s\" %sabled\n" +#~ msgstr "Job \"%s\" %sabled\n" + +#, fuzzy +#~ msgid "Schedule \"%s\" %sabled\n" +#~ msgstr "Job \"%s\" %sabled\n" + +#~ msgid "Connecting to Storage daemon %s at %s:%d\n" +#~ msgstr "Conectando al demonio Storage %s en %s:%d\n" + +#~ msgid "Connected to storage daemon\n" +#~ msgstr "Conectado al demonio Storage\n" + +#~ msgid "Enter new debug level: " +#~ msgstr "Introduzca el nuevo nivel de depuración:" + +#, fuzzy +#~ msgid "Incorrect tags found on command line %s\n" +#~ msgstr "No se puede usar comando %s en un runscript" + +#~ msgid "Available daemons are: \n" +#~ msgstr "Demonios disponible son: \n" + +#~ msgid "Director" +#~ msgstr "Director" + +#~ msgid "Storage" +#~ msgstr "Storage" + +#~ msgid "Client" +#~ msgstr "Client" + +#~ msgid "All" +#~ msgstr "All" + +#~ msgid "Select daemon type to set debug level" +#~ msgstr "" +#~ "Seleccione el tipo de Demonio para establecer el nivel de depuración" + +#~ msgid "No authorization for Client \"%s\"\n" +#~ msgstr "No autorización para Cliente \"%s\"\n" + +#~ msgid "Client name missing.\n" +#~ msgstr "Falta el nombre del cliente.\n" + +#~ msgid "Job \"%s\" not found.\n" +#~ msgstr "Job \"%s\" no encontrado.\n" + +#~ msgid "No authorization for Job \"%s\"\n" +#~ msgstr "No autorización para Job \"%s\"\n" + +#~ msgid "Job name missing.\n" +#~ msgstr "Falta el nombre del Job.\n" + +#~ msgid "Fileset \"%s\" not found.\n" +#~ msgstr "FileSet \"%s\" no encontrado.\n" + +#~ msgid "No authorization for FileSet \"%s\"\n" +#~ msgstr "No autorización para FileSet \"%s\"\n" + +#~ msgid "Fileset name missing.\n" +#~ msgstr "Falta Nombre del Fileset.\n" + +#~ msgid "Level \"%s\" not valid.\n" +#~ msgstr "Nivel \"%s\" no es válido.\n" + +#~ msgid "Level value missing.\n" +#~ msgstr "Valor del nivel ausente.\n" + +#~ msgid "Invalid value for accurate. It must be yes or no.\n" +#~ msgstr "Valor no válido para exacto. Debe ser sí o no.\n" + +#, fuzzy +#~ msgid "Accurate value missing.\n" +#~ msgstr "Valor del nivel ausente.\n" + +#~ msgid "No job specified.\n" +#~ msgstr "Job no especificado.\n" + +#~ msgid "Error sending include list.\n" +#~ msgstr "Error al enviar lista incluir.\n" + +#~ msgid "Error sending exclude list.\n" +#~ msgstr "Error al enviar lista excluir.\n" + +#~ msgid "" +#~ "In general it is not a good idea to delete either a\n" +#~ "Pool or a Volume since they may contain data.\n" +#~ "\n" +#~ msgstr "" +#~ "En general, esto no es una buena idea para eliminar un\n" +#~ "Pool o un Volumen ya que pueden contener datos.\n" +#~ "\n" + +#~ msgid "Choose catalog item to delete" +#~ msgstr "Seleccione el ítem del catalogo para eliminar" + +#~ msgid "Nothing done.\n" +#~ msgstr "Nada hecho.\n" + +#, fuzzy +#~ msgid "Are you sure you want to delete %d JobIds ? (yes/no): " +#~ msgstr "¿Esta usted seguro de que desea eliminar el Pool \"%s\"? (si/no):" + +#~ msgid "Enter JobId to delete: " +#~ msgstr "Introduzca jobId para eliminar:" + +#, fuzzy +#~ msgid "JobId=%s and associated records deleted from the catalog.\n" +#~ msgstr "Job %s y los registros asociados eliminados del catálogo.\n" + +#~ msgid "" +#~ "\n" +#~ "This command will delete volume %s\n" +#~ "and all Jobs saved on that volume from the Catalog\n" +#~ msgstr "" +#~ "\n" +#~ "Este comando eliminara los volúmenes %s\n" +#~ "y todos los Jobs guardados en este volumen desde el Catalogo\n" + +#~ msgid "Are you sure you want to delete Volume \"%s\"? (yes/no): " +#~ msgstr "" +#~ "¿Estas usted seguro de que desea eliminar el Volumen \"%s\"? (si/no):" + +#~ msgid "Are you sure you want to delete Pool \"%s\"? (yes/no): " +#~ msgstr "¿Esta usted seguro de que desea eliminar el Pool \"%s\"? (si/no):" + +#, fuzzy +#~ msgid "Invalid device name. %s" +#~ msgstr "Opción replace no valida: %s\n" + +#, fuzzy +#~ msgid "Unable to %s for volume \"%s\"\n" +#~ msgstr "No se puede truncar el dispositivo %s. ERR=%s\n" + +#, fuzzy +#~ msgid "+----------+---------------+----------------------+\n" +#~ msgstr "" +#~ "------+------------------+-----------+----------------------" +#~ "+--------------------|\n" + +#, fuzzy +#~ msgid "| Part | Size | MTime |\n" +#~ msgstr "Ranura | Nombre Volumen | Estado | Tipo de Media | Pool |\n" + +#, fuzzy +#~ msgid "" +#~ "+--------------------+-----------+----------------------" +#~ "+----------------------+---------------+\n" +#~ msgstr "" +#~ "------+------------------+-----------+----------------------" +#~ "+--------------------|\n" + +#, fuzzy +#~ msgid "" +#~ "| Volume Name | Status | Media Type | " +#~ "Pool | VolBytes |\n" +#~ msgstr "Ranura | Nombre Volumen | Estado | Tipo de Media | Pool |\n" + +#, fuzzy +#~ msgid "Cloud choice: \n" +#~ msgstr "Actualizar selección:\n" + +#, fuzzy +#~ msgid "Truncate a Volume Cache" +#~ msgstr "Creado Registro Media para Volumen: %s\n" + +#, fuzzy +#~ msgid "Select action to perform on Cloud" +#~ msgstr "Seleccione el tipo de demonio para estado" + +#~ msgid "Using Catalog name=%s DB=%s\n" +#~ msgstr "Utilizando Catalogo nombre=%s BD=%s\n" + +#~ msgid "ERR: Can't open db\n" +#~ msgstr "ERR: No se puede abrir db\n" + +#~ msgid "Wait on mount timed out\n" +#~ msgstr "Tiempo agotado en esperar mount \n" + +#~ msgid "ERR: Job was not found\n" +#~ msgstr "ERR: Job no se ha encontrado\n" + +#~ msgid "" +#~ " Command Description\n" +#~ " ======= ===========\n" +#~ msgstr "" +#~ "Comando Descripción\n" +#~ " ======== ==============\n" + +#~ msgid "" +#~ " %-13s %s\n" +#~ "\n" +#~ "Arguments:\n" +#~ "\t%s\n" +#~ msgstr "" +#~ "%-13s %s\n" +#~ "\n" +#~ "Argumentos:\n" +#~ "\t%s\n" + +#~ msgid " %-13s %s\n" +#~ msgstr " %-13s %s\n" + +#~ msgid "" +#~ "\n" +#~ "Can't find %s command.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "No se puede encontrar comando %s. \n" +#~ "\n" + +#~ msgid "" +#~ "\n" +#~ "When at a prompt, entering a period cancels the command.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Cuando en un prompt, entrando un período cancela el comando.\n" +#~ "\n" + +#~ msgid "%s Version: %s (%s) %s %s %s %s\n" +#~ msgstr "%s Versión: %s (%s) %s %s %s %s\n" + +#~ msgid "No authorization for Catalog \"%s\"\n" +#~ msgstr "No autorización para Catalogo \"%s\"\n" + +#~ msgid "Could not find a Catalog resource\n" +#~ msgstr "No pudo encontrar un Catalogo de recursos\n" + +#~ msgid "Could not open catalog database \"%s\".\n" +#~ msgstr "No se pudo abrir la base de datos de catálogo \"%s\".\n" + +#~ msgid "Using Catalog \"%s\"\n" +#~ msgstr "Usando Catalogo \"%s\"\n" + +#~ msgid ": is an invalid command.\n" +#~ msgstr ": es un comando inválido.\n" + +#, fuzzy +#~ msgid "path name missing.\n" +#~ msgstr "Falta el nombre del Job.\n" + +#, fuzzy +#~ msgid "Failed to send command to Client.\n" +#~ msgstr "Fallo al conectar con el cliente.\n" + +#, fuzzy +#~ msgid "Unable to get Job record for Job=%s\n" +#~ msgstr "No se puede obtener el registro Job para JobId=%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get last Job record for Job=%s\n" +#~ msgstr "No se puede obtener el registro Job para JobId=%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get Client record for Client=%s\n" +#~ msgstr "Creado registro Cliente para Cliente: %s\n" + +#, fuzzy +#~ msgid "Unable to get last Job record for Client=%s\n" +#~ msgstr "No se puede obtener el registro Job para JobId=%s: ERR=%s\n" + +#~ msgid "Unable to get Job record for JobId=%s: ERR=%s\n" +#~ msgstr "No se puede obtener el registro Job para JobId=%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown command: %s\n" +#~ msgstr "Comando desconocido." + +#~ msgid "Select daemon type to make die" +#~ msgstr "Seleccione tipo de demonio para matar" + +#, fuzzy +#~ msgid "The Director will generate a deadlock.\n" +#~ msgstr "El Director tendrá una violación de segmento.\n" + +#~ msgid "The Director will segment fault.\n" +#~ msgstr "El Director tendrá una violación de segmento.\n" + +#, fuzzy +#~ msgid "Invalid argument for %s\n" +#~ msgstr "argumento invalido" + +#, fuzzy +#~ msgid "Invalid argument for job\n" +#~ msgstr "argumento invalido" + +#, fuzzy +#~ msgid "Access to specified Job, FileSet or Client not allowed.\n" +#~ msgstr "El acceso a determinado Cliente o FileSet no permitido.\n" + +#, fuzzy +#~ msgid "Unable to open the catalog.\n" +#~ msgstr "No se puede escribir en %s\n" + +#~ msgid "Access to specified Client or FileSet not allowed.\n" +#~ msgstr "El acceso a determinado Cliente o FileSet no permitido.\n" + +#~ msgid "Query failed: %s. ERR=%s\n" +#~ msgstr "Consulta fallida: %s. ERR=%s\n" + +#~ msgid "query keyword not found.\n" +#~ msgstr "consulta de palabra clave no encontrada.\n" + +#~ msgid "List MediaType failed: ERR=%s\n" +#~ msgstr "Fallo al listar MediaType: ERR=%s\n" + +#~ msgid "List Media failed: ERR=%s\n" +#~ msgstr "Fallo al listar Media: ERR=%s\n" + +#~ msgid "List Location failed: ERR=%s\n" +#~ msgstr "Fallo al listar Ubicación: ERR=%s\n" + +#~ msgid "Enter slot" +#~ msgstr "Introduzca ranura" + +#~ msgid "Expected a positive integer, got: %s\n" +#~ msgstr "Esperaba un número entero positivo, obtuvo: %s\n" + +#~ msgid "Invalid response. You must answer yes or no.\n" +#~ msgstr "Respuesta no válida. Usted tiene que contestar sí o no.\n" + +#~ msgid "Invalid Enabled value, it must be yes, no, archived, 0, 1, or 2\n" +#~ msgstr "Inválido valor Habilitado, debe ser sí, no, archivado, 0, 1 o 2\n" + +#, fuzzy +#~ msgid "Illegal character \"%c\" in a comment.\n" +#~ msgstr "Carácter ilegal \"%c\" en el nombre.\n" + +#, fuzzy +#~ msgid "Comment too long.\n" +#~ msgstr "Nombre demasiado largo.\n" + +#, fuzzy +#~ msgid "Comment must be at least one character long.\n" +#~ msgstr "Nombre de volumen debe ser de al menos un carácter de largo.\n" + +#~ msgid "Negative numbers not permitted\n" +#~ msgstr "Números negativos no permitidos.\n" + +#~ msgid "Range end is not integer.\n" +#~ msgstr "Rango final no es entero.\n" + +#~ msgid "Range start is not an integer.\n" +#~ msgstr "Rango inicial no es entero.\n" + +#~ msgid "Range end not bigger than start.\n" +#~ msgstr "Rango final no más grande que inicial.\n" + +#~ msgid "Input value is not an integer.\n" +#~ msgstr "Valor de entrada no es un entero.\n" + +#~ msgid "Values must be be greater than zero.\n" +#~ msgstr "Los valores deben ser ser mayor que cero.\n" + +#~ msgid "Slot too large.\n" +#~ msgstr "Ranura demasiado grande.\n" + +#, fuzzy +#~ msgid "Command input" +#~ msgstr "línea de comandos" + +#~ msgid "No slots in changer to scan.\n" +#~ msgstr "No hay ranuras en cambiador para analizar.\n" + +#~ msgid "No Volumes found to label, or no barcodes.\n" +#~ msgstr "No encontraron volúmenes para etiquetar, o sin códigos de barras.\n" + +#~ msgid "Slot %d greater than max %d ignored.\n" +#~ msgstr "Ranura %d mayor que el máximo %d ignorado.\n" + +#~ msgid "No VolName for Slot=%d InChanger set to zero.\n" +#~ msgstr "Ninguno VolName para Ranura=%d InChanger establecido en cero.\n" + +#~ msgid "Catalog record for Volume \"%s\" updated to reference slot %d.\n" +#~ msgstr "" +#~ "Registro de catálogo para Volumen \"%s\" actualizado para referenciar " +#~ "ranura %d.\n" + +#~ msgid "Catalog record for Volume \"%s\" is up to date.\n" +#~ msgstr "Registro de catálogo para Volumen \"%s\" esta actualizado.\n" + +#~ msgid "Volume \"%s\" not found in catalog. Slot=%d InChanger set to zero.\n" +#~ msgstr "" +#~ "Volumen \"%s\" no se encuentra en el catálogo. Ranura=%d InChanger " +#~ "configurado para cero.\n" + +#~ msgid "" +#~ "Volume \"%s\" has VolStatus %s. It must be Purged or Recycled before " +#~ "relabeling.\n" +#~ msgstr "" +#~ "Volumen \"%s\" hay VolStatus %s. Debe ser purgado o reciclado antes de " +#~ "volver a etiquetar.\n" + +#~ msgid "Enter new Volume name: " +#~ msgstr "Introduzca el nuevo nombre de Volumen:" + +#~ msgid "Media record for new Volume \"%s\" already exists.\n" +#~ msgstr "Registro de Medios para nuevo volumen \"%s\" ya existe.\n" + +#~ msgid "Enter slot (0 or Enter for none): " +#~ msgstr "Introduzca ranura (0 o Enter para ninguno):" + +#~ msgid "Delete of Volume \"%s\" failed. ERR=%s" +#~ msgstr "Fallo al eliminar el Volumen \"%s\". ERR=%s" + +#~ msgid "Old volume \"%s\" deleted from catalog.\n" +#~ msgstr "Antiguo volumen \"%s\" borrado del catalogo.\n" + +#~ msgid "Requesting to mount %s ...\n" +#~ msgstr "Solicitando para montar %s ...\n" + +#~ msgid "Do not forget to mount the drive!!!\n" +#~ msgstr "No se olvide de montar la unidad!!!\n" + +#~ msgid "" +#~ "The following Volumes will be labeled:\n" +#~ "Slot Volume\n" +#~ "==============\n" +#~ msgstr "" +#~ "Los siguientes volúmenes serán etiquetados:\n" +#~ "Ranura Volumen\n" +#~ "==============\n" + +#~ msgid "Do you want to label these Volumes? (yes|no): " +#~ msgstr "¿Quieres etiquetar estos volúmenes? (sí | no):" + +#~ msgid "Media record for Slot %d Volume \"%s\" already exists.\n" +#~ msgstr "Registro de Medios para ranura %d Volumen \"%s\" ya existe.\n" + +#~ msgid "Error setting InChanger: ERR=%s" +#~ msgstr "Error al configurar InChanger: ERR =% s" + +#~ msgid "Maximum pool Volumes=%d reached.\n" +#~ msgstr "Máximo pool Volúmenes=%d alcanzado.\n" + +#~ msgid "Catalog record for cleaning tape \"%s\" successfully created.\n" +#~ msgstr "" +#~ "Registro de catálogo para la limpieza de la cinta \"%s\" ha creado " +#~ "correctamente.\n" + +#~ msgid "Catalog error on cleaning tape: %s" +#~ msgstr "Error de catálogo en la limpieza de la cinta: %s" + +#~ msgid "Illegal character \"%c\" in a volume name.\n" +#~ msgstr "Ilegal carácter \"%c\" en un nombre de volumen.\n" + +#~ msgid "Sending relabel command from \"%s\" to \"%s\" ...\n" +#~ msgstr "Enviando comando relabel desde \"%s\" para \"%s\" ...\n" + +#~ msgid "Sending label command for Volume \"%s\" Slot %d ...\n" +#~ msgstr "Enviando comando relabel para Volumen \"%s\" Ranura %d ...\n" + +#~ msgid "Catalog record for Volume \"%s\", Slot %d successfully created.\n" +#~ msgstr "" +#~ "Registro Catalogo para Volumen \"%s\", Ranura %d creado correctamente.\n" + +#~ msgid "Label command failed for Volume %s.\n" +#~ msgstr "Comando Label fallido para Volumen %s.\n" + +#~ msgid "Could not open SD socket.\n" +#~ msgstr "No se pudo abrir socket SD.\n" + +#~ msgid "Invalid Slot number: %s\n" +#~ msgstr "Número de Ranura no válido:%s\n" + +#, fuzzy +#~ msgid "Invalid Volume name: %s. Volume skipped.\n" +#~ msgstr "Nombre de Volumen no válido:%s\n" + +#~ msgid "Device \"%s\" has %d slots.\n" +#~ msgstr "Dispositivo \"%s\" tiene %d ranuras.\n" + +#~ msgid "Pool \"%s\" resource not found for volume \"%s\"!\n" +#~ msgstr "Recurso Pool \"%s\" no encontrado para volumen \"%s\"!\n" + +#~ msgid "No Volumes found, or no barcodes.\n" +#~ msgstr "Volúmenes no encontrados, o no códigos de barras.\n" + +#, fuzzy +#~ msgid "" +#~ "+------+----------------------+-----------+-----------------" +#~ "+--------------------+\n" +#~ msgstr "" +#~ "------+------------------+-----------+----------------------" +#~ "+--------------------|\n" + +#, fuzzy +#~ msgid "" +#~ "| Slot | Volume Name | Status | Media Type | " +#~ "Pool |\n" +#~ msgstr "Ranura | Nombre Volumen | Estado | Tipo de Media | Pool |\n" + +#~ msgid "ON or OFF keyword missing.\n" +#~ msgstr "Faltan palabra clave ON u OFF.\n" + +#~ msgid "Disabled Jobs:\n" +#~ msgstr "Jobs Deshabilitados:\n" + +#~ msgid "No disabled Jobs.\n" +#~ msgstr "Ningún Jobs Deshabilitado.\n" + +#~ msgid "Keywords for the show command are:\n" +#~ msgstr "Palabras clave para el comando show son:\n" + +#~ msgid "%s resource %s not found.\n" +#~ msgstr "%s recurso %s no encontrado.\n" + +#~ msgid "Resource %s not found\n" +#~ msgstr "Recursos %s no encontrado\n" + +#~ msgid "Hey! DB is NULL\n" +#~ msgstr "Hey! BD esta VACÍA\n" + +#, fuzzy +#~ msgid "Unknown order type %s\n" +#~ msgstr "Tipo de recurso desconocido %d\n" + +#, fuzzy +#~ msgid "Invalid jobid argument\n" +#~ msgstr "argumento invalido" + +#, fuzzy +#~ msgid "Unknown ObjectType %s\n" +#~ msgstr "Tipo de base de datos desconocido: %s\n" + +#~ msgid "Jobid %d used %d Volume(s): %s\n" +#~ msgstr "Jobid %d usado %d Volumen(s): %s\n" + +#~ msgid "No Pool specified.\n" +#~ msgstr "Ningún Pool especificado.\n" + +#~ msgid "Error obtaining pool ids. ERR=%s\n" +#~ msgstr "Error al obteniendo pool ids. ERR=%s\n" + +#~ msgid "Pool: %s\n" +#~ msgstr "Pool: %s\n" + +#~ msgid "Ignoring invalid value for days. Max is 50.\n" +#~ msgstr "Ignorando valor invalido para días. Máximo es 50.\n" + +#~ msgid "Unknown list keyword: %s\n" +#~ msgstr "Lista de palabras clave desconocida: %s\n" + +#~ msgid "%s is not a job name.\n" +#~ msgstr "%s no es un nombre de Job.\n" + +#~ msgid "Could not find Pool for Job %s\n" +#~ msgstr "No se pudo encontrar Pool para Job %s\n" + +#~ msgid "Could not find next Volume for Job %s (Pool=%s, Level=%s).\n" +#~ msgstr "" +#~ "No se pudo encontrar el siguiente Volumen para Job %s (Pool=%s, Nivel=" +#~ "%s).\n" + +#~ msgid "" +#~ "The next Volume to be used by Job \"%s\" (Pool=%s, Level=%s) will be %s\n" +#~ msgstr "" +#~ "El próximo Volumen que se utilizará por Job \"%s\" (Pool=%s, Nivel=%s) " +#~ "será %s\n" + +#~ msgid "Could not find next Volume for Job %s.\n" +#~ msgstr "No se pudo encontrar el siguiente Volumen para Job %s.\n" + +#~ msgid "Pool %s not in database. %s" +#~ msgstr "Pool %s no creado en la base de datos. %s" + +#~ msgid "Pool %s created in database.\n" +#~ msgstr "Pool %s creado en la base de datos.\n" + +#~ msgid "You have no messages.\n" +#~ msgstr "Usted no tiene mensajes.\n" + +#~ msgid "Message too long to display.\n" +#~ msgstr "Mensaje demasiado largo para mostrar.\n" + +#~ msgid "Choose item to prune" +#~ msgstr "Elija el ítem para podar" + +#~ msgid "Cannot prune Volume \"%s\" because it is archived.\n" +#~ msgstr "No se puede podar Volumen \"%s\", porque el esta archivo.\n" + +#~ msgid "Pruned Jobs from JobHisto catalog.\n" +#~ msgstr "Podar Jobs del catálogo JobHisto.\n" + +#, fuzzy +#~ msgid "Begin pruning Files.\n" +#~ msgstr "Comenzar poda de Archivos.\n" + +#~ msgid "No Files found to prune.\n" +#~ msgstr "No se encontraron archivos para podar.\n" + +#~ msgid "Pruned Files from %s Jobs for client %s from catalog.\n" +#~ msgstr "" +#~ "Archivos podados desde Jobs %s para el cliente %s desde el catálogo.\n" + +#, fuzzy +#~ msgid "Begin pruning Jobs older than %s.\n" +#~ msgstr "Comenzar poda de Jobs.\n" + +#~ msgid "Pruned %d %s for client %s from catalog.\n" +#~ msgstr "Podados %d %s para el cliente %s desde el catálogo.\n" + +#~ msgid "Jobs" +#~ msgstr "Jobs" + +#~ msgid "No Jobs found to prune.\n" +#~ msgstr "No encontraron Jobs para podar.\n" + +#, fuzzy +#~ msgid "Volume \"%s\"" +#~ msgstr "Volumen" + +#, fuzzy +#~ msgid "%d expired volume%s found\n" +#~ msgstr "%s recurso %s no encontrado.\n" + +#, fuzzy +#~ msgid "" +#~ "Found %d Job(s) associated with the Volume \"%s\" that will be pruned\n" +#~ msgstr "" +#~ "No hay más Jobs relacionados con Volumen \"%s\". Marcando el para " +#~ "purgar.\n" + +#, fuzzy +#~ msgid "Found no Job associated with the Volume \"%s\" to prune\n" +#~ msgstr "" +#~ "No hay más Jobs relacionados con Volumen \"%s\". Marcando el para " +#~ "purgar.\n" + +#~ msgid "" +#~ "\n" +#~ "This command can be DANGEROUS!!!\n" +#~ "\n" +#~ "It purges (deletes) all Files from a Job,\n" +#~ "JobId, Client or Volume; or it purges (deletes)\n" +#~ "all Jobs from a Client or Volume without regard\n" +#~ "to retention periods. Normally you should use the\n" +#~ "PRUNE command, which respects retention periods.\n" +#~ msgstr "" +#~ "\n" +#~ "Este comando puede ser PELIGROSO!!!\n" +#~ "\n" +#~ "El purgas (elimina) todos los archivos de un Job,\n" +#~ "JobId, Cliente o Volumen; o el purgas (elimina)\n" +#~ "Todos los Jobs de un Cliente o Volumen sin tener en cuenta\n" +#~ "los periodos de retención. Normalmente debería utilizar el\n" +#~ "comando PRUNE, que respecta los plazos de retención.\n" + +#~ msgid "Choose item to purge" +#~ msgstr "Elija el ítem para purgar" + +#~ msgid "Begin purging files for Client \"%s\"\n" +#~ msgstr "Iniciando purga de archivos para Cliente \"%s\"\n" + +#~ msgid "No Files found for client %s to purge from %s catalog.\n" +#~ msgstr "" +#~ "No encuentra los archivos del cliente %s para purgar %s del catálogo.\n" + +#~ msgid "Files for %d Jobs for client \"%s\" purged from %s catalog.\n" +#~ msgstr "" +#~ "Archivos para Jobs %d para cliente \"%s\" purgado del catalogo %s.\n" + +#~ msgid "Begin purging jobs from Client \"%s\"\n" +#~ msgstr "Iniciando purga de jobs para Cliente \"%s\"\n" + +#, fuzzy +#~ msgid "No Jobs found for client %s to purge from %s catalog.\n" +#~ msgstr "" +#~ "No encuentra los archivos del cliente %s para purgar %s del catálogo.\n" + +#~ msgid "%d Jobs for client %s purged from %s catalog.\n" +#~ msgstr "Jobs %d para cliente \"%s\" purgado del catalogo %s.\n" + +#~ msgid "" +#~ "\n" +#~ "Volume \"%s\" has VolStatus \"%s\" and cannot be purged.\n" +#~ "The VolStatus must be: Append, Full, Used, or Error to be purged.\n" +#~ msgstr "" +#~ "\n" +#~ "Volumen \"%s\" tiene VolStatus \"%s\" y no puede ser purgado.\n" +#~ "El VolStatus debe ser: Añadir, Lleno, Usado, o Error para ser purgado.\n" + +#, fuzzy +#~ msgid "%d Job%s on Volume \"%s\" purged from catalog.\n" +#~ msgstr "%d Archivo%s en Volumen \"%s\" purgado desde el catalogo.\n" + +#~ msgid "" +#~ "There are no more Jobs associated with Volume \"%s\". Marking it purged.\n" +#~ msgstr "" +#~ "No hay más Jobs relacionados con Volumen \"%s\". Marcando el para " +#~ "purgar.\n" + +#, fuzzy +#~ msgid "Can't update volume size in the catalog\n" +#~ msgstr "Se creó un Volumen nuevo \"%s\" en el catálogo.\n" + +#, fuzzy +#~ msgid "Unable to truncate volume \"%s\"\n" +#~ msgstr "No se puede truncar el dispositivo %s. ERR=%s\n" + +#~ msgid "Unable move recycled Volume in full Pool \"%s\" MaxVols=%d\n" +#~ msgstr "" +#~ "No se puede mover el Volumen reciclado en full Pool \"%s\" MaxVols=%d\n" + +#~ msgid "All records pruned from Volume \"%s\"; marking it \"Purged\"\n" +#~ msgstr "" +#~ "Todos los registros del Volumen \"%s\" podados; marcando el \"Purgados\"\n" + +#~ msgid "Cannot purge Volume with VolStatus=%s\n" +#~ msgstr "No se puede purgar Volumen con VolStatus=%s\n" + +#~ msgid "Could not open %s: ERR=%s\n" +#~ msgstr "No se pudo abrir %s: ERR=%s\n" + +#~ msgid "Available queries:\n" +#~ msgstr "Consultas disponibles:\n" + +#, fuzzy +#~ msgid "Invalid command line query item specified.\n" +#~ msgstr "Nombre del Job especificado dos veces.\n" + +#~ msgid "Choose a query" +#~ msgstr "Elija una consulta" + +#~ msgid "Could not find query.\n" +#~ msgstr "No se pudo encontrar la consulta.\n" + +#~ msgid "Too many prompts in query, max is 9.\n" +#~ msgstr "Demasiadas consolas en consulta, máximo es 9.\n" + +#~ msgid "Warning prompt %d missing.\n" +#~ msgstr "Alerta de consola %d perdida.\n" + +#~ msgid "" +#~ "Entering SQL query mode.\n" +#~ "Terminate each query with a semicolon.\n" +#~ "Terminate query mode with a blank line.\n" +#~ msgstr "" +#~ "Entrando en modo de consulta SQL.\n" +#~ "Termine cada consulta con un punto-coma.\n" +#~ "Termine el modo de consulta con una linea en blanco.\n" + +#~ msgid "Enter SQL query: " +#~ msgstr "Ingrese una consulta SQL:" + +#~ msgid "Add to SQL query: " +#~ msgstr "Agregar a la consulta SQL:" + +#~ msgid "End query mode.\n" +#~ msgstr "Fin modo de consulta.\n" + +#~ msgid "\"RegexWhere\" specification not authorized.\n" +#~ msgstr "\"RegexWhere\" especificación no autorizada.\n" + +#~ msgid "\"where\" specification not authorized.\n" +#~ msgstr "especificación \"where\" no autorizada.\n" + +#~ msgid "" +#~ "No Restore Job Resource found in bacula-dir.conf.\n" +#~ "You must create at least one before running this command.\n" +#~ msgstr "" +#~ "Recurso Job de Restauración no encontrado en bacula-dir.conf.\n" +#~ "Usted debe crear al menos uno antes de ejecutar este comando.\n" + +#~ msgid "Restore not done.\n" +#~ msgstr "Restauración no hecha.\n" + +#~ msgid "Unable to construct a valid BSR. Cannot continue.\n" +#~ msgstr "No se puede construir un BSR válido. No puede continuar.\n" + +#~ msgid "No files selected to be restored.\n" +#~ msgstr "No hay archivos seleccionados para ser restaurado.\n" + +#~ msgid "Bootstrap records written to %s\n" +#~ msgstr "Registros Bootstrap escritos para %s\n" + +#~ msgid "" +#~ "\n" +#~ "1 file selected to be restored.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "1 archivo seleccionado para ser restaurado.\n" +#~ "\n" + +#~ msgid "" +#~ "\n" +#~ "%s files selected to be restored.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "%s archivos seleccionados para ser restaurado.\n" +#~ "\n" + +#~ msgid "No Client resource found!\n" +#~ msgstr "Ningún recurso Cliente encontrado!\n" + +#, fuzzy +#~ msgid "The restore will use the following job(s) as Base\n" +#~ msgstr "Usted ha seleccionado los siguientes JobIds: %s\n" + +#~ msgid "Missing value for keyword: %s\n" +#~ msgstr "Falta el valor de palabra clave: %s\n" + +#~ msgid "List last 20 Jobs run" +#~ msgstr "Listar los Últimos 20 Jobs ejecutados" + +#~ msgid "List Jobs where a given File is saved" +#~ msgstr "Listado de Jobs donde un determinado archivo se ha guardado" + +#~ msgid "Enter list of comma separated JobIds to select" +#~ msgstr "Introduzca lista de JobIds separados por comas para seleccionar" + +#~ msgid "Enter SQL list command" +#~ msgstr "Introduzca lista de comandos SQL" + +#~ msgid "Select the most recent backup for a client" +#~ msgstr "Seleccionar el respaldo mas reciente para un cliente" + +#~ msgid "Select backup for a client before a specified time" +#~ msgstr "" +#~ "Seleccione un respaldo de un cliente antes de un período de tiempo " +#~ "especificado" + +#~ msgid "Enter a list of files to restore" +#~ msgstr "Introduzca una lista de archivos para restaurar" + +#~ msgid "Enter a list of files to restore before a specified time" +#~ msgstr "" +#~ "Introduzca una lista de archivos para restaurar antes de un período de " +#~ "tiempo especificado" + +#~ msgid "Find the JobIds of the most recent backup for a client" +#~ msgstr "Encuentre el JobIds del respaldo más reciente para un cliente" + +#~ msgid "Find the JobIds for a backup for a client before a specified time" +#~ msgstr "" +#~ "Encuentre el JobIds del respaldo de un cliente antes de un período de " +#~ "tiempo especificado" + +#~ msgid "Enter a list of directories to restore for found JobIds" +#~ msgstr "" +#~ "Introduzca una lista de directorios para restaurar por JobIds encontrado" + +#~ msgid "Select full restore to a specified Job date" +#~ msgstr "Seleccione restauración completa para una fecha especifica de Job" + +#~ msgid "Unknown keyword: %s\n" +#~ msgstr "Palabra clave desconocida: %s\n" + +#~ msgid "Improper date format: %s\n" +#~ msgstr "Inadecuado formato de fecha: %s\n" + +#~ msgid "Error: Pool resource \"%s\" does not exist.\n" +#~ msgstr "Error: recurso Pool \"%s\" no existe.\n" + +#~ msgid "Error: Pool resource \"%s\" access not allowed.\n" +#~ msgstr "Error: recurso Pool \"%s\" acceso no permitido.\n" + +#~ msgid "" +#~ "\n" +#~ "First you select one or more JobIds that contain files\n" +#~ "to be restored. You will be presented several methods\n" +#~ "of specifying the JobIds. Then you will be allowed to\n" +#~ "select which files from those JobIds are to be restored.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Primero debe seleccionar una o más JobIds que contienen archivos\n" +#~ "para ser restaurado.Se le presentará varios métodos\n" +#~ "para especificar los JobIDs. Luego se le permitirá seleccionar los " +#~ "archivos de los JobIds que deben ser restaurados.\n" +#~ "\n" + +#~ msgid "To select the JobIds, you have the following choices:\n" +#~ msgstr "" +#~ "Para seleccionar el JobIds, usted dispone de las siguientes opciones:\n" + +#~ msgid "Select item: " +#~ msgstr "Seleccione un ítem:" + +#~ msgid "SQL query not authorized.\n" +#~ msgstr "Consulta SQL no autorizada.\n" + +#~ msgid "Enter Filename (no path):" +#~ msgstr "Introduzca Nombre de Archivo (sin ruta):" + +#~ msgid "Enter JobId(s), comma separated, to restore: " +#~ msgstr "Introduzca JobId(s), separados por comas, para restaurar:" + +#~ msgid "Enter SQL list command: " +#~ msgstr "Introduzca lista de comandos SQL:" + +#~ msgid "" +#~ "Enter file names with paths, or < to enter a filename\n" +#~ "containing a list of file names with paths, and terminate\n" +#~ "them with a blank line.\n" +#~ msgstr "" +#~ "Escriba los nombres de archivo con las rutas, o < para introducir un " +#~ "nombre de archivo\n" +#~ "conteniendo una lista de nombres de archivo con las rutas, y terminado\n" +#~ "con una línea en blanco.\n" + +#~ msgid "Enter full filename: " +#~ msgstr "Introduzca el nombre de archivo completo:" + +#~ msgid "You have already selected the following JobIds: %s\n" +#~ msgstr "Usted ya ha seleccionado los siguientes JobIds: %s\n" + +#~ msgid "" +#~ "Enter full directory names or start the name\n" +#~ "with a < to indicate it is a filename containing a list\n" +#~ "of directories and terminate them with a blank line.\n" +#~ msgstr "" +#~ "Escriba los nombres de directorio completo o inicie el nombre\n" +#~ " con un < para indicar que es un nombre de archivo que contiene una " +#~ "lista\n" +#~ "de directorios y terminado con una línea en blanco.\n" + +#~ msgid "Enter directory name: " +#~ msgstr "Introduzca nombre de directorio:" + +#~ msgid "Enter JobId to get the state to restore: " +#~ msgstr "Introduzca JobId para obtener el estado para restaurar:" + +#~ msgid "Selecting jobs to build the Full state at %s\n" +#~ msgstr "Seleccionando trabajos para construir el estado completo en %s\n" + +#~ msgid "Invalid JobId in list.\n" +#~ msgstr "JobId inválido en la lista.\n" + +#~ msgid "Access to JobId=%s (Job \"%s\") not authorized. Not selected.\n" +#~ msgstr "Acceso al JobId=%s (Job \"%s\") no autorizado. No seleccionado.\n" + +#~ msgid "You have selected the following JobIds: %s\n" +#~ msgstr "Usted ha seleccionado los siguientes JobIds: %s\n" + +#~ msgid "You have selected the following JobId: %s\n" +#~ msgstr "Usted ha seleccionado el siguiente JobId: %s\n" + +#~ msgid "" +#~ "The restored files will the most current backup\n" +#~ "BEFORE the date you specify below.\n" +#~ "\n" +#~ msgstr "" +#~ "Los archivos restaurados serán los más actuales respaldados\n" +#~ "ANTES de la fecha especificada a continuación.\n" +#~ "\n" + +#~ msgid "Enter date as YYYY-MM-DD HH:MM:SS :" +#~ msgstr "Introduzca la fecha en formato YYYY-MM-DD HH:MM:SS :" + +#~ msgid "Improper date format.\n" +#~ msgstr "Inadecuado formato de fecha.\n" + +#~ msgid "Cannot open file %s: ERR=%s\n" +#~ msgstr "No se puede abrir el archivo %s: ERR=%s\n" + +#~ msgid "Error occurred on line %d of file \"%s\"\n" +#~ msgstr "Se ha producido un error en la línea %d del archivo \"%s\"\n" + +#~ msgid "No database record found for: %s\n" +#~ msgstr "No encuentra registro en base de datos para: %s\n" + +#~ msgid "No JobId specified cannot continue.\n" +#~ msgstr "JobId no especificado, imposible continuar.\n" + +#~ msgid "No table found: %s\n" +#~ msgstr "Tabla no encontrada: %s\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "For one or more of the JobIds selected, no files were found,\n" +#~ "so file selection is not possible.\n" +#~ "Most likely your retention policy pruned the files.\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "Para uno o más de los JobIds seleccionado, no se encontraron archivos,\n" +#~ "por lo tanto, selección de archivos no es posible.\n" +#~ "El mas probable es que su política de retención podo los archivos.\n" + +#~ msgid "" +#~ "\n" +#~ "Do you want to restore all the files? (yes|no): " +#~ msgstr "" +#~ "\n" +#~ "Desea restaurar todos los archivos? (sí­|no):" + +#~ msgid "" +#~ "\n" +#~ "Regexp matching files to restore? (empty to abort): " +#~ msgstr "" +#~ "\n" +#~ "Regexp para restaurar los archivos que coinciden? (vacío para abortar):" + +#~ msgid "Regex compile error: %s\n" +#~ msgstr "Error de compilación Regex: %s\n" + +#, fuzzy +#~ msgid "Unable to create component file %s. ERR=%s\n" +#~ msgstr "No se puede crear el archivo bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to fdopen component file %s. ERR=%s\n" +#~ msgstr "No se puede abrir el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing component file.\n" +#~ msgstr "Error escribiendo archivo bsr.\n" + +#~ msgid "" +#~ "\n" +#~ "Building directory tree for JobId(s) %s ... " +#~ msgstr "" +#~ "\n" +#~ "Construyendo árbol de directorios para JobId(s) %s ..." + +#~ msgid "" +#~ "\n" +#~ "%s files inserted into the tree and marked for extraction.\n" +#~ msgstr "" +#~ "\n" +#~ "%s archivos insertados en el árbol y marcados para la extracción.\n" + +#~ msgid "" +#~ "\n" +#~ "%s files inserted into the tree.\n" +#~ msgstr "" +#~ "\n" +#~ "%s archivos insertados en el árbol.\n" + +#~ msgid "Error getting FileSet \"%s\": ERR=%s\n" +#~ msgstr "Error al obtener FileSet \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "FileSet argument: %s\n" +#~ msgstr "FileSet: nombre=%s\n" + +#~ msgid "The defined FileSet resources are:\n" +#~ msgstr "Los recursos FileSet definidos son:\n" + +#~ msgid "FileSet" +#~ msgstr "FileSet" + +#~ msgid "Select FileSet resource" +#~ msgstr "Seleccionar recurso FileSet" + +#~ msgid "No FileSet found for client \"%s\".\n" +#~ msgstr "FileSet para cliente \"%s\" no encontrado.\n" + +#~ msgid "Error getting FileSet record: %s\n" +#~ msgstr "Error al obtener el registro FileSet: %s\n" + +#~ msgid "" +#~ "This probably means you modified the FileSet.\n" +#~ "Continuing anyway.\n" +#~ msgstr "" +#~ "Esto probablemente significa que usted modifico el FileSet.\n" +#~ "Continuando de todos modos.\n" + +#~ msgid "Pool \"%s\" not found, using any pool.\n" +#~ msgstr "Pool \"%s\" no encontrado, utilizando cualquier pool.\n" + +#~ msgid "No Full backup before %s found.\n" +#~ msgstr "Full Backup no encontrado antes de %s.\n" + +#~ msgid "No jobs found.\n" +#~ msgstr "Jobs no encontrados.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Warning Storage is overridden by \"%s\" on the command line.\n" +#~ msgstr "" +#~ "Advertencia, storage por defecto reemplazada por \"%s\" en la línea de " +#~ "comandos.\n" + +#, fuzzy +#~ msgid "Using Storage \"%s\" from MediaType \"%s\".\n" +#~ msgstr "" +#~ "Storage \"%s\" no encontrado, usando Storage \"%s\" desde MediaType\"%s" +#~ "\".\n" + +#~ msgid "" +#~ "Storage \"%s\" not found, using Storage \"%s\" from MediaType \"%s\".\n" +#~ msgstr "" +#~ "Storage \"%s\" no encontrado, usando Storage \"%s\" desde MediaType\"%s" +#~ "\".\n" + +#~ msgid "" +#~ "\n" +#~ "Unable to find Storage resource for\n" +#~ "MediaType \"%s\", needed by the Jobs you selected.\n" +#~ msgstr "" +#~ "\n" +#~ "No se puede encontrar los recursos Storage para\n" +#~ "MediaType \"%s\", necesario por los Jobs que ha seleccionado.\n" + +#~ msgid "OK to run? (yes/mod/no): " +#~ msgstr "OK para ejecutar? (si/mod/no): " + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Bad response: %s. You must answer yes, mod, or no.\n" +#~ "\n" +#~ msgstr "Respuesta no válida. Usted tiene que contestar sí o no.\n" + +#~ msgid "Job not run.\n" +#~ msgstr "Job no ejecutando.\n" + +#, fuzzy +#~ msgid "Job failed. Client \"%s\" not authorized on this console\n" +#~ msgstr "I/O función \"%s\" no es compatible con este dispositivo. \n" + +#, fuzzy +#~ msgid "Job %s failed.\n" +#~ msgstr "Job fallido.\n" + +#~ msgid "Job queued. JobId=%s\n" +#~ msgstr "Cola de Job. JobId=%s\n" + +#~ msgid "Job \"%s\" not found\n" +#~ msgstr "Job \"%s\" no encontrado\n" + +#~ msgid "A job name must be specified.\n" +#~ msgstr "Un nombre de job debe de ser especificado.\n" + +#~ msgid "No authorization. Job \"%s\".\n" +#~ msgstr "Sin autorización. Job \"%s\".\n" + +#~ msgid "Pool \"%s\" not found.\n" +#~ msgstr "Pool \"%s\" no encontrado.\n" + +#~ msgid "No authorization. Pool \"%s\".\n" +#~ msgstr "Sin autorización. Pool \"%s\".\n" + +#, fuzzy +#~ msgid "NextPool \"%s\" not found.\n" +#~ msgstr "Pool \"%s\" no encontrado.\n" + +#, fuzzy +#~ msgid "No authorization. NextPool \"%s\".\n" +#~ msgstr "Sin autorización. Pool \"%s\".\n" + +#~ msgid "Restore Client \"%s\" not found.\n" +#~ msgstr "Restaurar Cliente \"%s\" no encontrado.\n" + +#~ msgid "No authorization. Client \"%s\".\n" +#~ msgstr "Sin autorización. Cliente \"%s\".\n" + +#~ msgid "FileSet \"%s\" not found.\n" +#~ msgstr "FileSet \"%s\" no encontrado.\n" + +#~ msgid "No authorization. FileSet \"%s\".\n" +#~ msgstr "Sin autorización. FileSet \"%s\".\n" + +#~ msgid "Storage \"%s\" not found.\n" +#~ msgstr "Storage \"%s\" no encontrado\n" + +#~ msgid "user selection" +#~ msgstr "Selección de usuario" + +#~ msgid "No authorization. Storage \"%s\".\n" +#~ msgstr "Sin autorización. Storage \"%s\".\n" + +#, fuzzy +#~ msgid "No JobId specified.\n" +#~ msgstr "Job no especificado.\n" + +#, fuzzy +#~ msgid "Invalid or no Job name specified.\n" +#~ msgstr "Nombre del Job especificado dos veces.\n" + +#, fuzzy +#~ msgid "Enter the JobId list to select: " +#~ msgstr "Introduzca el JobId para seleccionar:" + +#, fuzzy +#~ msgid "Could not get job record for selected JobId=%d. ERR=%s" +#~ msgstr "No se pudo obtener el registro Job para JobId %s para %s. ERR=%s" + +#~ msgid "You have the following choices:\n" +#~ msgstr "Usted tiene las siguientes opciones:\n" + +#, fuzzy +#~ msgid "Select termination code: " +#~ msgstr "Código de Terminación del Job: %d" + +#, fuzzy +#~ msgid "Unable to use current plugin configuration, discarding it." +#~ msgstr "No es posible leer el archivo de configuración" + +#, fuzzy +#~ msgid "Plugin Restore Options\n" +#~ msgstr "Opciones de Plug-in" + +#, fuzzy +#~ msgid "Use above plugin configuration? (yes/mod/no): " +#~ msgstr "¿Continuar? (sí/mod/no):" + +#~ msgid "mod" +#~ msgstr "mod" + +#, fuzzy +#~ msgid "Please enter a value for %s: " +#~ msgstr "Por favor, introduzca un JobId para restaurar:" + +#, fuzzy +#~ msgid "No plugin to configure\n" +#~ msgstr "Plugin=%s no encontrado.\n" + +#, fuzzy +#~ msgid "Plugins to configure:\n" +#~ msgstr "Plugin=%s no encontrado.\n" + +#, fuzzy +#~ msgid "Select plugin to configure" +#~ msgstr "TLS permitido, pero no configurado.\n" + +#, fuzzy +#~ msgid "Can't configure %32s\n" +#~ msgstr "No se puede continuar.\n" + +#~ msgid "Level" +#~ msgstr "Level" + +#~ msgid "Restore Client" +#~ msgstr "Restaurar Client" + +#~ msgid "When" +#~ msgstr "Cuando " + +#~ msgid "Priority" +#~ msgstr "Prioridad" + +#~ msgid "Pool" +#~ msgstr "Pool" + +#, fuzzy +#~ msgid "NextPool" +#~ msgstr "Pool" + +#~ msgid "Verify Job" +#~ msgstr "Verificar Job" + +#~ msgid "Bootstrap" +#~ msgstr "Bootstrap" + +#~ msgid "Where" +#~ msgstr "Donde" + +#~ msgid "File Relocation" +#~ msgstr "Reubicar Archivo" + +#~ msgid "Replace" +#~ msgstr "Reemplazar" + +#~ msgid "JobId" +#~ msgstr "JobId" + +#~ msgid "Plugin Options" +#~ msgstr "Opciones de Plug-in" + +#, fuzzy +#~ msgid "" +#~ "Please enter start time as a duration or YYYY-MM-DD HH:MM:SS or return " +#~ "for now: " +#~ msgstr "" +#~ "Por favor, introduzca hora de inicio deseada YYYY-MM-DD HH:MM:SS (Enter " +#~ "para ahora)" + +#~ msgid "Invalid time, using current time.\n" +#~ msgstr "Hora inválida, usando hora actual.\n" + +#~ msgid "Enter new Priority: " +#~ msgstr "Introduzca nueva prioridad:" + +#~ msgid "Priority must be a positive integer.\n" +#~ msgstr "Prioridad debe ser un entero positivo.\n" + +#~ msgid "Please enter the Bootstrap file name: " +#~ msgstr "Por favor, introduzca nombre de archivo Bootstrap:" + +#~ msgid "Warning cannot open %s: ERR=%s\n" +#~ msgstr "Advertencia no puede abrir %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Please enter the full path prefix for restore (/ for none): " +#~ msgstr "" +#~ "Por favor, introduzca el prefijo de ruta para restaurar (/ para ninguno):" + +#~ msgid "Replace:\n" +#~ msgstr "Reemplazar:\n" + +#~ msgid "Select replace option" +#~ msgstr "Seleccione la opción sustituir" + +#~ msgid "" +#~ "You must set the bootstrap file to NULL to be able to specify a JobId.\n" +#~ msgstr "" +#~ "Usted debe configurar el archivo bootstrap a NULL para poder especificar " +#~ "un jobId.\n" + +#~ msgid "Please Plugin Options string: " +#~ msgstr "Por favor, cadena de Opciones de Plugin:" + +#~ msgid "User input" +#~ msgstr "Entrada del usuario" + +#~ msgid "Invalid replace option: %s\n" +#~ msgstr "Opción replace no valida: %s\n" + +#~ msgid "strip_prefix=%s add_prefix=%s add_suffix=%s\n" +#~ msgstr "strip_prefix=%s add_prefix=%s add_suffix=%s\n" + +#~ msgid "This will replace your current Where value\n" +#~ msgstr "Este sustituirá a su actual valor Donde\n" + +#~ msgid "Strip prefix" +#~ msgstr "Eliminar prefijo" + +#~ msgid "Add prefix" +#~ msgstr "Añadir prefijo" + +#~ msgid "Add file suffix" +#~ msgstr "Añadir sufijo de archivo" + +#~ msgid "Enter a regexp" +#~ msgstr "Introduzca una expresión regular" + +#~ msgid "Test filename manipulation" +#~ msgstr "Prueba de la manipulación de nombre de archivo" + +#~ msgid "Use this ?" +#~ msgstr "Usar esto ?" + +#, fuzzy +#~ msgid "Please enter the path prefix to strip: " +#~ msgstr "Por favor, introduzca la ruta de prefijo para eliminar:" + +#, fuzzy +#~ msgid "Please enter the path prefix to add (/ for none): " +#~ msgstr "" +#~ "Por favor, introduzca el prefijo de la ruta para añadir (/ para ninguno):" + +#, fuzzy +#~ msgid "Please enter the file suffix to add: " +#~ msgstr "Por favor, introduzca el sufijo de archivo para añadir:" + +#~ msgid "Please enter a valid regexp (!from!to!): " +#~ msgstr "Por favor, introduzca una expresión regular válida (!from!to!):" + +#~ msgid "regexwhere=%s\n" +#~ msgstr "regexdonde=%s\n" + +#~ msgid "strip_prefix=%s add_prefix=%s add_suffix=%s result=%s\n" +#~ msgstr "strip_prefix=%s add_prefix=%s add_suffix=%s resultado=%s\n" + +#~ msgid "Cannot use your regexp\n" +#~ msgstr "No se pudo utilizar su regexp\n" + +#~ msgid "Please enter filename to test: " +#~ msgstr "Por favor, introduzca el nombre de archivo de prueba:" + +#~ msgid "%s -> %s\n" +#~ msgstr "%s -> %s\n" + +#~ msgid "Cannot use your regexp.\n" +#~ msgstr "No puede utilizar su expresión regular.\n" + +#~ msgid "Levels:\n" +#~ msgstr "Niveles:\n" + +#~ msgid "Full" +#~ msgstr "Completo" + +#~ msgid "Incremental" +#~ msgstr "Incremental" + +#~ msgid "Differential" +#~ msgstr "Diferencial" + +#~ msgid "Since" +#~ msgstr "Desde" + +#~ msgid "VirtualFull" +#~ msgstr "VirtualFull" + +#~ msgid "Select level" +#~ msgstr "Seleccionar Nivel" + +#~ msgid "Initialize Catalog" +#~ msgstr "Iniciar Catálogo" + +#~ msgid "Verify Catalog" +#~ msgstr "Verificar Catálogo" + +#~ msgid "Verify Volume to Catalog" +#~ msgstr "Verificar volumen de catalogo" + +#~ msgid "Verify Disk to Catalog" +#~ msgstr "Verificar disco de catalogo" + +#, fuzzy +#~ msgid "Verify Volume Data" +#~ msgstr "Verificar volumen de catalogo" + +#~ msgid "Level not appropriate for this Job. Cannot be changed.\n" +#~ msgstr "Nivel no adecuado para este trabajo. No se puede cambiar.\n" + +#, fuzzy +#~ msgid "" +#~ "Run Admin Job\n" +#~ "JobName: %s\n" +#~ "FileSet: %s\n" +#~ "Client: %s\n" +#~ "Storage: %s\n" +#~ "When: %s\n" +#~ "Priority: %d\n" +#~ msgstr "" +#~ "Ejecutar %s job\n" +#~ "JobName: %s\n" +#~ "FileSet: %s\n" +#~ "Cliente: %s\n" +#~ "Storage: %s\n" +#~ "Cuando: %s\n" +#~ "Prioridad: %d\n" + +#, fuzzy +#~ msgid "" +#~ "Run Backup job\n" +#~ "JobName: %s\n" +#~ "Level: %s\n" +#~ "Client: %s\n" +#~ "FileSet: %s\n" +#~ "Pool: %s (From %s)\n" +#~ "%sStorage: %s (From %s)\n" +#~ "When: %s\n" +#~ "Priority: %d\n" +#~ "%s%s%s" +#~ msgstr "" +#~ "Ejecutar %s job\n" +#~ "Nombre del Job: %s\n" +#~ "Nivel: %s\n" +#~ "Cliente: %s\n" +#~ "FileSet: %s\n" +#~ "Pool: %s (Desde %s)\n" +#~ "Storage: %s (Desde %s)\n" +#~ "Cuando: %s\n" +#~ "Prioridad: %d\n" +#~ "%s%s%s" + +#, fuzzy +#~ msgid "Could not get job record for selected JobId. ERR=%s" +#~ msgstr "No se ha podido obtener el registro Job para Job anterior. ERR=%s" + +#, fuzzy +#~ msgid "" +#~ "Run Verify Job\n" +#~ "JobName: %s\n" +#~ "Level: %s\n" +#~ "Client: %s\n" +#~ "FileSet: %s\n" +#~ "Pool: %s (From %s)\n" +#~ "Storage: %s (From %s)\n" +#~ "Verify Job: %s\n" +#~ "Verify List: %s\n" +#~ "When: %s\n" +#~ "Priority: %d\n" +#~ msgstr "" +#~ "Ejecutar %s job\n" +#~ "JobName: %s\n" +#~ "Level: %s\n" +#~ "Cliente: %s\n" +#~ "FileSet: %s\n" +#~ "Pool: %s (Desde %s)\n" +#~ "Storage: %s (Desde %s)\n" +#~ "Verify Job: %s\n" +#~ "Verify List: %s\n" +#~ "Cuando: %s\n" +#~ "Prioridad: %d\n" + +#~ msgid "Please enter a JobId for restore: " +#~ msgstr "Por favor, introduzca un JobId para restaurar:" + +#, fuzzy +#~ msgid "User specified" +#~ msgstr "tiempo de espera especificado" + +#~ msgid "" +#~ "Run Restore job\n" +#~ "JobName: %s\n" +#~ "Bootstrap: %s\n" +#~ "RegexWhere: %s\n" +#~ "Replace: %s\n" +#~ "FileSet: %s\n" +#~ "Backup Client: %s\n" +#~ "Restore Client: %s\n" +#~ "Storage: %s\n" +#~ "When: %s\n" +#~ "Catalog: %s\n" +#~ "Priority: %d\n" +#~ "Plugin Options: %s\n" +#~ msgstr "" +#~ "Ejecutar Job de Restauración\n" +#~ "Nombre del Job: %s\n" +#~ "Bootstrap: %s\n" +#~ "RegexWhere: %s\n" +#~ "Reemplazar: %s\n" +#~ "FileSet: %s\n" +#~ "Cliente de Respaldo: %s\n" +#~ "Cliente de Restauración: %s\n" +#~ "Storage: %s\n" +#~ "Cuando: %s\n" +#~ "Catalogo: %s\n" +#~ "Prioridad: %d\n" +#~ "Opciones de Plugin: %s\n" + +#~ msgid "" +#~ "Run Restore job\n" +#~ "JobName: %s\n" +#~ "Bootstrap: %s\n" +#~ "Where: %s\n" +#~ "Replace: %s\n" +#~ "FileSet: %s\n" +#~ "Backup Client: %s\n" +#~ "Restore Client: %s\n" +#~ "Storage: %s\n" +#~ "When: %s\n" +#~ "Catalog: %s\n" +#~ "Priority: %d\n" +#~ "Plugin Options: %s\n" +#~ msgstr "" +#~ "Ejecutar Job de Restauración\n" +#~ "Nombre del Job: %s\n" +#~ "Bootstrap: %s\n" +#~ "Donde: %s\n" +#~ "Reemplazar: %s\n" +#~ "FileSet: %s\n" +#~ "Respaldo Cliente: %s\n" +#~ "Restaurar Cliente: %s\n" +#~ "Storage: %s\n" +#~ "Cuando: %s\n" +#~ "Catalogo: %s\n" +#~ "Prioridad: %d\n" +#~ "Opciones de Plugin: %s\n" + +#~ msgid "" +#~ "Run Restore job\n" +#~ "JobName: %s\n" +#~ "Bootstrap: %s\n" +#~ msgstr "" +#~ "Ejecutar Job Restaurar\n" +#~ "JobName: %s\n" +#~ "Bootstrap: %s\n" + +#~ msgid "RegexWhere: %s\n" +#~ msgstr "RegexWhere: %s\n" + +#~ msgid "Where: %s\n" +#~ msgstr "Donde: %s\n" + +#~ msgid "" +#~ "Replace: %s\n" +#~ "Client: %s\n" +#~ "Storage: %s\n" +#~ "JobId: %s\n" +#~ "When: %s\n" +#~ "Catalog: %s\n" +#~ "Priority: %d\n" +#~ "Plugin Options: %s\n" +#~ msgstr "" +#~ "Remplazar: %s\n" +#~ "Cliente: %s\n" +#~ "Storage: %s\n" +#~ "JobId: %s\n" +#~ "Cuando: %s\n" +#~ "Catalogo: %s\n" +#~ "Prioridad: %d\n" +#~ "Opciones de Plugin: %s\n" + +#~ msgid "Run Copy job\n" +#~ msgstr "Ejecutar Job de Copia\n" + +#~ msgid "Run Migration job\n" +#~ msgstr "Ejecutar Job de Migración\n" + +#~ msgid "Unknown Job Type=%d\n" +#~ msgstr "Tipo de Job desconocido=%d\n" + +#~ msgid "Value missing for keyword %s\n" +#~ msgstr "Falta valor para la palabra clave %s\n" + +#~ msgid "JobId specified twice.\n" +#~ msgstr "JobId especificada dos veces.\n" + +#~ msgid "Client specified twice.\n" +#~ msgstr "Cliente especificado dos veces.\n" + +#~ msgid "FileSet specified twice.\n" +#~ msgstr "FileSet especificado dos veces.\n" + +#~ msgid "Level specified twice.\n" +#~ msgstr "Level especificado dos veces.\n" + +#~ msgid "Storage specified twice.\n" +#~ msgstr "Storage especificado dos veces.\n" + +#~ msgid "RegexWhere or Where specified twice.\n" +#~ msgstr "RegexDónde o Dónde especificado dos veces.\n" + +#~ msgid "No authorization for \"regexwhere\" specification.\n" +#~ msgstr "Sin autorización para especificación \"regexwhere\".\n" + +#~ msgid "Where or RegexWhere specified twice.\n" +#~ msgstr "Dónde o RegexDónde especificado dos veces.\n" + +#~ msgid "No authoriztion for \"where\" specification.\n" +#~ msgstr "Sin autorización para especificación \"where\".\n" + +#~ msgid "Bootstrap specified twice.\n" +#~ msgstr "Bootstrap especificado dos veces.\n" + +#~ msgid "Replace specified twice.\n" +#~ msgstr "Replace especificado dos veces.\n" + +#~ msgid "When specified twice.\n" +#~ msgstr "Cuando especificado dos veces.\n" + +#~ msgid "Priority specified twice.\n" +#~ msgstr "Prioridad especificada dos veces.\n" + +#~ msgid "Priority must be positive nonzero setting it to 10.\n" +#~ msgstr "" +#~ "Prioridad debe ser positivo y distinto a cero, configurando en 10.\n" + +#~ msgid "Verify Job specified twice.\n" +#~ msgstr "Job Verificar especificado dos veces.\n" + +#~ msgid "Migration Job specified twice.\n" +#~ msgstr "Job Migración especificado dos veces.\n" + +#~ msgid "Pool specified twice.\n" +#~ msgstr "Pool especificado dos veces.\n" + +#~ msgid "Restore Client specified twice.\n" +#~ msgstr "Restaurar Cliente especificado dos veces.\n" + +#~ msgid "Plugin Options not yet implemented.\n" +#~ msgstr "Opciones de Plugin todavía no se ha implementado.\n" + +#~ msgid "Plugin Options specified twice.\n" +#~ msgstr "Opciones de Plugin especificado dos veces.\n" + +#~ msgid "No authoriztion for \"PluginOptions\" specification.\n" +#~ msgstr "Sin autorización para especificación \"PluginOptions\".\n" + +#~ msgid "Spool flag specified twice.\n" +#~ msgstr "Bandera de cola especificada dos veces.\n" + +#~ msgid "Invalid spooldata flag.\n" +#~ msgstr "Invalida bandera spooldata.\n" + +#, fuzzy +#~ msgid "IgnoreDuplicateCheck flag specified twice.\n" +#~ msgstr "Bandera de cola especificada dos veces.\n" + +#, fuzzy +#~ msgid "Invalid ignoreduplicatecheck flag.\n" +#~ msgstr "Invalida bandera spooldata.\n" + +#, fuzzy +#~ msgid "Accurate flag specified twice.\n" +#~ msgstr "Bandera de cola especificada dos veces.\n" + +#, fuzzy +#~ msgid "Invalid accurate flag.\n" +#~ msgstr "Invalida bandera spooldata.\n" + +#~ msgid "Job name specified twice.\n" +#~ msgstr "Nombre del Job especificado dos veces.\n" + +#, fuzzy +#~ msgid "Media Type specified twice.\n" +#~ msgstr "Replace especificado dos veces.\n" + +#, fuzzy +#~ msgid "NextPool specified twice.\n" +#~ msgstr "Pool especificado dos veces.\n" + +#~ msgid "Invalid keyword: %s\n" +#~ msgstr "Palabra clave inválida: %s\n" + +#~ msgid "Catalog \"%s\" not found\n" +#~ msgstr "Catalogo \"%s\" no encontrado\n" + +#~ msgid "No authorization. Catalog \"%s\".\n" +#~ msgstr "Sin autorización. Catalogo \"%s\".\n" + +#~ msgid "Verify Job \"%s\" not found.\n" +#~ msgstr "Job Verificar \"%s\" no encontrado.\n" + +#~ msgid "Migration Job \"%s\" not found.\n" +#~ msgstr "Job Migración \"%s\" no encontrado.\n" + +#~ msgid "The current %s retention period is: %s\n" +#~ msgstr "El período actual %s de retención es: %s\n" + +#, fuzzy +#~ msgid "Continue? (yes/no): " +#~ msgstr "¿Continuar? (sí/mod/no):" + +#~ msgid "Continue? (yes/mod/no): " +#~ msgstr "¿Continuar? (sí/mod/no):" + +#~ msgid "Enter new retention period: " +#~ msgstr "Introduzca el nuevo periodo de retención:" + +#~ msgid "Invalid period.\n" +#~ msgstr "Período no válido.\n" + +#~ msgid "The defined Storage resources are:\n" +#~ msgstr "Los recursos Storage definidos son:\n" + +#~ msgid "Select Storage resource" +#~ msgstr "Seleccione recurso Storage" + +#~ msgid "" +#~ "You must specify a \"use \" command before continuing.\n" +#~ msgstr "" +#~ "Usted debe especificar un comando \"use \" antes de " +#~ "continuar.\n" + +#~ msgid "The defined Catalog resources are:\n" +#~ msgstr "Los recursos Catalogo definidos son:\n" + +#~ msgid "Catalog" +#~ msgstr "Catalogo" + +#~ msgid "Select Catalog resource" +#~ msgstr "Seleccione recurso Catalogo" + +#, fuzzy +#~ msgid "The disabled Job resources are:\n" +#~ msgstr "Los recursos Job definidos son:\n" + +#, fuzzy +#~ msgid "The enabled Job resources are:\n" +#~ msgstr "Los recursos Job definidos son:\n" + +#, fuzzy +#~ msgid "Select Job resource" +#~ msgstr "Seleccione recurso Pool" + +#~ msgid "The defined Job resources are:\n" +#~ msgstr "Los recursos Job definidos son:\n" + +#, fuzzy +#~ msgid "Error: Restore Job resource \"%s\" does not exist.\n" +#~ msgstr "Error: recurso Pool \"%s\" no existe.\n" + +#~ msgid "The defined Restore Job resources are:\n" +#~ msgstr "Los recursos Restore definidos son:\n" + +#~ msgid "Select Restore Job" +#~ msgstr "Seleccione recurso Restore" + +#~ msgid "The defined Client resources are:\n" +#~ msgstr "Los recursos Clientes definidos son:\n" + +#, fuzzy +#~ msgid "Select Client resource" +#~ msgstr "Seleccionar recurso FileSet" + +#~ msgid "Select Client (File daemon) resource" +#~ msgstr "Seleccione recurso Cliente (File Daemon)" + +#~ msgid "Error: Client resource %s does not exist.\n" +#~ msgstr "Error: Recurso Cliente %s no existe.\n" + +#, fuzzy +#~ msgid "The defined Schedule resources are:\n" +#~ msgstr "Los recursos Clientes definidos son:\n" + +#, fuzzy +#~ msgid "Schedule" +#~ msgstr "" +#~ "\n" +#~ "Scheduled Jobs:\n" + +#, fuzzy +#~ msgid "Select Schedule resource" +#~ msgstr "Seleccione recurso Pool" + +#~ msgid "Could not find Client %s: ERR=%s" +#~ msgstr "No se pudo encontrar el Cliente %s: ERR=%s" + +#~ msgid "Could not find Client \"%s\": ERR=%s" +#~ msgstr "No se pudo encontrar Cliente \"%s\": ERR=%s" + +#~ msgid "Error obtaining client ids. ERR=%s\n" +#~ msgstr "Error al obtener el ID del cliente. ERR=%s\n" + +#~ msgid "No clients defined. You must run a job before using this command.\n" +#~ msgstr "" +#~ "Clientes no definidos. Usted debe ejecutar un Job antes de usar este " +#~ "comando.\n" + +#~ msgid "Defined Clients:\n" +#~ msgstr "Clientes definidos:\n" + +#~ msgid "Select the Client" +#~ msgstr "Seleccione el Cliente" + +#~ msgid "Could not find Pool \"%s\": ERR=%s" +#~ msgstr "No se pudo encontrar Pool \"%s\": ERR=%s" + +#~ msgid "No pools defined. Use the \"create\" command to create one.\n" +#~ msgstr "Pools no definidos. Utilice el comando \"create\" para crear uno.\n" + +#~ msgid "Defined Pools:\n" +#~ msgstr "Pools definidos:\n" + +#~ msgid "Select the Pool" +#~ msgstr "Seleccione el Pool" + +#~ msgid "No access to Pool \"%s\"\n" +#~ msgstr "No tienen acceso al Pool \"%s\"\n" + +#, fuzzy +#~ msgid "Enter a Volume name or *MediaId: " +#~ msgstr "Introduzca nombre de Volumen:" + +#~ msgid "The defined Pool resources are:\n" +#~ msgstr "Los recursos Pool definidos son:\n" + +#~ msgid "Select Pool resource" +#~ msgstr "Seleccione recurso Pool" + +#~ msgid "Enter the JobId to select: " +#~ msgstr "Introduzca el JobId para seleccionar:" + +#~ msgid "Could not find Job \"%s\": ERR=%s" +#~ msgstr "No pudo encontrar Job \"%s\": ERR=%s" + +#~ msgid "Automatically selected %s: %s\n" +#~ msgstr "Seleccionado automáticamente %s: %s\n" + +#~ msgid "" +#~ "Your request has multiple choices for \"%s\". Selection is not possible " +#~ "in batch mode.\n" +#~ msgstr "" +#~ "Su petición ha múltiples opciones para \"%s\". La selección no es posible " +#~ "en modo batch.\n" + +#~ msgid "Selection list for \"%s\" is empty!\n" +#~ msgstr "Lista de selección para \"%s\" está vacía!\n" + +#~ msgid "Automatically selected: %s\n" +#~ msgstr "Seleccionado automáticamente: %s\n" + +#~ msgid "Selection aborted, nothing done.\n" +#~ msgstr "Selección abortada, no hay nada hecho.\n" + +#~ msgid "Please enter a number between 1 and %d\n" +#~ msgstr "Por favor, introduzca un número entre 1 y %d\n" + +#~ msgid "Storage name given twice.\n" +#~ msgstr "Nombre de Storage especificado dos veces.\n" + +#~ msgid "Expecting jobid=nn command, got: %s\n" +#~ msgstr "Esperando comando jobid=nn, obtuvo: %s.\n" + +#~ msgid "JobId %s is not running.\n" +#~ msgstr "JobId %s no está en ejecución.\n" + +#~ msgid "Expecting job=xxx, got: %s.\n" +#~ msgstr "Esperando job=xxx, obtuvo: %s.\n" + +#~ msgid "Job \"%s\" is not running.\n" +#~ msgstr "Job \"%s\" no está en ejecución.\n" + +#~ msgid "Expecting ujobid=xxx, got: %s.\n" +#~ msgstr "Esperando ujobid=xxx, obtuvo: %s.\n" + +#~ msgid "Storage resource \"%s\": not found\n" +#~ msgstr "Recurso Storage \"%s\": No encontrado\n" + +#~ msgid "Enter autochanger drive[0]: " +#~ msgstr "Introduzca unidad Autochanger [0]:" + +#~ msgid "Enter autochanger slot: " +#~ msgstr "Introduzca ranura Autochanger:" + +#~ msgid "Media Types defined in conf file:\n" +#~ msgstr "Tipos de Media definidos en el archivo de configuración:\n" + +#~ msgid "Media Type" +#~ msgstr "Tipo de Media" + +#~ msgid "Select the Media Type" +#~ msgstr "Seleccione el Tipo de Media" + +#~ msgid "No Jobs running.\n" +#~ msgstr "No hay Jobs en ejecución.\n" + +#~ msgid "None of your jobs are running.\n" +#~ msgstr "Ninguno de sus trabajos se está ejecutando.\n" + +#, fuzzy +#~ msgid "No value given for \"jobid\".\n" +#~ msgstr "Volúmenes no encontrados para JobId=%d\n" + +#, fuzzy +#~ msgid "Unauthorized command from this console for JobId=%d.\n" +#~ msgstr "Comando no autorizado desde esta consola.\n" + +#, fuzzy +#~ msgid "Warning Job JobId=%d is not running.\n" +#~ msgstr "" +#~ "Advertencia Job %s no está¡ en ejecución. Continuar de todos modos ...\n" + +#, fuzzy +#~ msgid "Confirm %s of %d Job%s (yes/no): " +#~ msgstr "Confirmar cancelar(si/no): " + +#, fuzzy +#~ msgid "No value given for \"job\".\n" +#~ msgstr "Volúmenes no encontrados para JobId=%d\n" + +#, fuzzy +#~ msgid "Unauthorized command from this console for job=%s.\n" +#~ msgstr "Comando no autorizado desde esta consola.\n" + +#, fuzzy +#~ msgid "Warning Job %s is not running.\n" +#~ msgstr "JobId %s no está en ejecución.\n" + +#, fuzzy +#~ msgid "No value given for \"ujobid\".\n" +#~ msgstr "Volúmenes no encontrados para JobId=%d\n" + +#, fuzzy +#~ msgid "Unauthorized command from this console for ujobid=%s.\n" +#~ msgstr "Comando no autorizado desde esta consola.\n" + +#, fuzzy +#~ msgid "Select Job(s):\n" +#~ msgstr "Seleccione Job:\n" + +#~ msgid "JobId=%s Job=%s" +#~ msgstr "JobId=%s Job=%s" + +#, fuzzy +#~ msgid "Choose Job list to %s" +#~ msgstr "Elija Job para cancelar" + +#, fuzzy +#~ msgid "Invalid argument \"action\".\n" +#~ msgstr "argumento invalido" + +#, fuzzy +#~ msgid "No Volumes found to perform the command.\n" +#~ msgstr "No encontraron volúmenes para etiquetar, o sin códigos de barras.\n" + +#~ msgid "Cannot create UA thread: %s\n" +#~ msgstr "No se puede crear hilo UA: %s\n" + +#~ msgid "You have messages.\n" +#~ msgstr "Usted tiene mensajes.\n" + +#, fuzzy +#~ msgid "Connecting to Storage %s at %s:%d\n" +#~ msgstr "Conectando al demonio Storage %s en %s:%d\n" + +#, fuzzy +#~ msgid "Failed to connect to Storage.\n" +#~ msgstr "Error al conectar con demonio Storage.\n" + +#~ msgid "Status available for:\n" +#~ msgstr "Estado disponible para:\n" + +#~ msgid "Select daemon type for status" +#~ msgstr "Seleccione el tipo de demonio para estado" + +#, fuzzy +#~ msgid "%s %sVersion: %s (%s) %s %s %s\n" +#~ msgstr "%s Versión: %s (%s) %s %s %s\n" + +#, fuzzy +#~ msgid "Daemon started %s, conf reloaded %s\n" +#~ msgstr "Demonio iniciado %s, 1 Job ejecutando desde el inicio.\n" + +#, fuzzy +#~ msgid " Jobs: run=%d, running=%d mode=%d,%d\n" +#~ msgstr "Demonio iniciado %s, %d Job ejecutando desde el inicio.\n" + +#~ msgid " Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n" +#~ msgstr "Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n" + +#, fuzzy +#~ msgid "No authorization for Storage \"%s\"\n" +#~ msgstr "Sin autorización. Storage \"%s\".\n" + +#~ msgid "" +#~ "\n" +#~ "Failed to connect to Storage daemon %s.\n" +#~ "====\n" +#~ msgstr "" +#~ "\n" +#~ "Fallo al conectar con el demonio Storage %s.\n" +#~ "====\n" + +#~ msgid "" +#~ "Failed to connect to Client %s.\n" +#~ "====\n" +#~ msgstr "" +#~ "Fallo al conectar con el Cliente %s.\n" +#~ "====\n" + +#~ msgid "Connected to file daemon\n" +#~ msgstr "Conectado al demonio file\n" + +#~ msgid "" +#~ "\n" +#~ "Scheduled Jobs:\n" +#~ msgstr "" +#~ "\n" +#~ "Scheduled Jobs:\n" + +#, fuzzy +#~ msgid "" +#~ "Level Type Pri Scheduled Job Name " +#~ "Volume\n" +#~ msgstr "Nivel Tipo Pri Scheduled Nombre Volumen\n" + +#~ msgid "===================================================================================\n" +#~ msgstr "===================================================================================\n" + +#, fuzzy +#~ msgid "" +#~ "Level Type Pri Scheduled Job Name " +#~ "Schedule\n" +#~ msgstr "Nivel Tipo Pri Scheduled Nombre Volumen\n" + +#, fuzzy +#~ msgid "=====================================================================================\n" +#~ msgstr "===================================================================================\n" + +#~ msgid "%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n" +#~ msgstr "%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n" + +#~ msgid "%-14s %-8s %3d %-18s %-18s %s\n" +#~ msgstr "%-14s %-8s %3d %-18s %-18s %s\n" + +#, fuzzy +#~ msgid "Ignoring invalid value for days. Max is 3000.\n" +#~ msgstr "Ignorando valores inválidos para el día. Máximo es de 500.\n" + +#, fuzzy +#~ msgid "Ignoring invalid value for limit. Max is 2000.\n" +#~ msgstr "Ignorando valores inválidos para el día. Máximo es de 500.\n" + +#, fuzzy +#~ msgid "Ignoring invalid time.\n" +#~ msgstr "Ruta dada no válida.\n" + +#~ msgid "No Scheduled Jobs.\n" +#~ msgstr "No hay Jobs Programados.\n" + +#~ msgid "Ignoring invalid value for days. Max is 500.\n" +#~ msgstr "Ignorando valores inválidos para el día. Máximo es de 500.\n" + +#~ msgid "" +#~ "\n" +#~ "Running Jobs:\n" +#~ msgstr "" +#~ "\n" +#~ "Jobs Ejecutando:\n" + +#, fuzzy +#~ msgid "Console connected %sat %s\n" +#~ msgstr "Consola conectada en %s\n" + +#, fuzzy +#~ msgid " JobId Type Level Files Bytes Name Status\n" +#~ msgstr "JobId Nivel Nombre Estado\n" + +#~ msgid "======================================================================\n" +#~ msgstr "======================================================================\n" + +#~ msgid "is waiting execution" +#~ msgstr "esta esperando ejecución" + +#~ msgid "is running" +#~ msgstr "esta ejecutando" + +#~ msgid "is blocked" +#~ msgstr "esta bloqueado" + +#~ msgid "has terminated" +#~ msgstr "ha terminado" + +#~ msgid "has terminated with warnings" +#~ msgstr "ha terminado con advertencias" + +#, fuzzy +#~ msgid "has terminated in incomplete state" +#~ msgstr "ha terminado con advertencias" + +#~ msgid "has erred" +#~ msgstr "ha errado" + +#~ msgid "has errors" +#~ msgstr "tiene errores" + +#~ msgid "has a fatal error" +#~ msgstr "tiene un error fatal" + +#~ msgid "has verify differences" +#~ msgstr "ha verificar diferencias" + +#~ msgid "has been canceled" +#~ msgstr "ha sido cancelado" + +#~ msgid "is waiting on Client" +#~ msgstr "esta esperando por Client" + +#~ msgid "is waiting on Client %s" +#~ msgstr "esta esperando por Cliente %s" + +#, fuzzy +#~ msgid "is waiting on Storage \"%s\"" +#~ msgstr "esta esperando por Storage %s" + +#~ msgid "is waiting on Storage" +#~ msgstr "esta esperando por Storage" + +#~ msgid "is waiting on max Storage jobs" +#~ msgstr "esta esperando por máximo Storage jobs" + +#~ msgid "is waiting on max Client jobs" +#~ msgstr "esta esperando por máximo Cliente jobs" + +#~ msgid "is waiting on max Job jobs" +#~ msgstr "esta esperando por máximo Jobs jobs" + +#~ msgid "is waiting on max total jobs" +#~ msgstr "esta esperando por máximo total jobs" + +#, fuzzy +#~ msgid "is waiting for its start time (%s)" +#~ msgstr "esta esperando por su hora de inicio" + +#~ msgid "is waiting for higher priority jobs to finish" +#~ msgstr "está esperando por jobs de una mayor prioridad para terminar" + +#, fuzzy +#~ msgid "is waiting for a Shared Storage device" +#~ msgstr "esta esperando por Storage" + +#~ msgid "SD committing Data" +#~ msgstr "SD perpetrando Datos" + +#~ msgid "SD despooling Data" +#~ msgstr "SD desencolando Datos" + +#~ msgid "SD despooling Attributes" +#~ msgstr "SD desencolando Atributos" + +#~ msgid "Dir inserting Attributes" +#~ msgstr "Insertando Atributos Dir" + +#~ msgid "is in unknown state %c" +#~ msgstr "esta en estado desconocido %c" + +#~ msgid "is waiting for a mount request" +#~ msgstr "esta esperando por petición de montaje" + +#~ msgid "is waiting for an appendable Volume" +#~ msgstr "está esperando un Volumen appendable" + +#~ msgid "is waiting for Client to connect to Storage daemon" +#~ msgstr "está esperando por cliente para conectarse al demonio Storage" + +#~ msgid "is waiting for Client %s to connect to Storage %s" +#~ msgstr "está aguardando por el Cliente %s para conectarse al Storage %s" + +#, fuzzy +#~ msgid "%6d\t%-6s\t%-20s\t%s\t%s\n" +#~ msgstr "%6d\t%-6s\t%-20s\t%s\n" + +#, fuzzy +#~ msgid "%6d %-4s %-3s %10s %10s %-17s %s\n" +#~ msgstr "%6d %-6s %8s %10s %-7s %-8s %s\n" + +#~ msgid "" +#~ "No Jobs running.\n" +#~ "====\n" +#~ msgstr "" +#~ "Ningún Jobs ejecutando.\n" +#~ "====\n" + +#~ msgid "No Terminated Jobs.\n" +#~ msgstr "Jobs No Terminados.\n" + +#~ msgid "====================================================================\n" +#~ msgstr "====================================================================\n" + +#~ msgid "\n" +#~ msgstr "\n" + +#~ msgid "add dir/file to be restored recursively, wildcards allowed" +#~ msgstr "" +#~ "añadir directorio/archivo a ser restaurado de forma recursiva, comodines " +#~ "permitido" + +#~ msgid "change current directory" +#~ msgstr "cambio del directorio actual" + +#~ msgid "count marked files in and below the cd" +#~ msgstr "contar archivos marcados dentro y por debajo de CD" + +#~ msgid "delete dir/file to be restored recursively in dir" +#~ msgstr "eliminar dir/archivo a restaurar recursivamente en dir" + +#~ msgid "long list current directory, wildcards allowed" +#~ msgstr "larga lista directorio actual, comodines permitido" + +#~ msgid "leave file selection mode" +#~ msgstr "abandonar el modo de selección de archivos" + +#~ msgid "estimate restore size" +#~ msgstr "tamaño estimado de restauración" + +#~ msgid "same as done command" +#~ msgstr "mismo que el comando done" + +#~ msgid "find files, wildcards allowed" +#~ msgstr "encontrar los archivos, comodines permitidos" + +#~ msgid "print help" +#~ msgstr "imprimir ayuda" + +#~ msgid "list current directory, wildcards allowed" +#~ msgstr "lista el directorio actual, comodines permitidos" + +#, fuzzy +#~ msgid "list subdir in current directory, wildcards allowed" +#~ msgstr "lista el directorio actual, comodines permitidos" + +#~ msgid "list the marked files in and below the cd" +#~ msgstr "lista los archivos marcados dentro y por debajo de CD" + +#, fuzzy +#~ msgid "list the marked files in" +#~ msgstr "lista los archivos marcados dentro y por debajo de CD" + +#~ msgid "mark dir/file to be restored recursively, wildcards allowed" +#~ msgstr "" +#~ "marca directorio/archivo para restaurar recursivamente, comodines " +#~ "permitido" + +#~ msgid "mark directory name to be restored (no files)" +#~ msgstr "marca nombre del directorio para ser restaurado (sin archivos)" + +#~ msgid "print current working directory" +#~ msgstr "imprimir directorio de trabajo actual" + +#~ msgid "unmark dir/file to be restored recursively in dir" +#~ msgstr "" +#~ "desmarcar directorio/archivo para ser restaurado en el directorio " +#~ "recursivamente" + +#~ msgid "unmark directory name only no recursion" +#~ msgstr "desmarcar solo nombre del directorio sin recursividad" + +#~ msgid "quit and do not do restore" +#~ msgstr "salir y no restaurar" + +#~ msgid "" +#~ "\n" +#~ "You are now entering file selection mode where you add (mark) and\n" +#~ "remove (unmark) files to be restored. No files are initially added, " +#~ "unless\n" +#~ "you used the \"all\" keyword on the command line.\n" +#~ "Enter \"done\" to leave this mode.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Usted está entrando ahora en el modo de selección de archivo donde usted " +#~ "agrega(mark) y\n" +#~ "elimina (unmark) los archivos que va a restaurar. No hay archivos añadido " +#~ "inicialmente, a menos que\n" +#~ "usted utilice la palabra clave \"all\" en la línea de comandos. " +#~ "Introduzca \"done\" para salir de este modo.\n" +#~ "\n" + +#~ msgid "cwd is: %s\n" +#~ msgstr "cwd es: %s\n" + +#, fuzzy +#~ msgid "Invalid command \"%s\". Enter \"done\" to exit.\n" +#~ msgstr "Invalido comando \"%s\". Introduzca \"done\" para salir.\n" + +#~ msgid "No files marked.\n" +#~ msgstr "Ningún archivo marcado.\n" + +#~ msgid "1 file marked.\n" +#~ msgstr "1 archivo marcado.\n" + +#~ msgid "%s files marked.\n" +#~ msgstr "%s archivos marcados.\n" + +#~ msgid "No directories marked.\n" +#~ msgstr "Ningún directorio marcado.\n" + +#~ msgid "1 directory marked.\n" +#~ msgstr "1 directorio marcado.\n" + +#~ msgid "%s directories marked.\n" +#~ msgstr "%s directorios marcados.\n" + +#~ msgid "%s total files/dirs. %s marked to be restored.\n" +#~ msgstr "" +#~ "%s total de los archivos/directorios. %s marcado para ser restaurado.\n" + +#~ msgid "No file specification given.\n" +#~ msgstr "Especificación de archivo no dado.\n" + +#~ msgid "Node %s has no children.\n" +#~ msgstr "Nodo %s no tiene hijos.\n" + +#~ msgid "%d total files; %d marked to be restored; %s bytes.\n" +#~ msgstr "" +#~ "%d total de los archivos; %d marcado para ser restaurado; %s bytes.\n" + +#~ msgid "" +#~ " Command Description\n" +#~ " ======= ===========\n" +#~ msgstr "" +#~ "Comando Descripción\n" +#~ " ======= ===========\n" + +#~ msgid "Too few or too many arguments. Try using double quotes.\n" +#~ msgstr "Muy pocos o demasiados argumentos. Trate de usar comillas dobles.\n" + +#~ msgid "Invalid path given.\n" +#~ msgstr "Ruta dada no válida.\n" + +#, fuzzy +#~ msgid "Invalid path given. Permission denied.\n" +#~ msgstr "Ruta dada no válida.\n" + +#~ msgid "No files unmarked.\n" +#~ msgstr "No hay archivos sin marcar.\n" + +#~ msgid "1 file unmarked.\n" +#~ msgstr "1 archivo sin marcar.\n" + +#~ msgid "%s files unmarked.\n" +#~ msgstr "%s archivos sin marcar.\n" + +#~ msgid "No directories unmarked.\n" +#~ msgstr "No hay directorios sin marcar.\n" + +#~ msgid "1 directory unmarked.\n" +#~ msgstr "1 directorio sin marcar.\n" + +#~ msgid "%d directories unmarked.\n" +#~ msgstr "%d directorios sin marcar.\n" + +#~ msgid "Update choice:\n" +#~ msgstr "Actualizar selección:\n" + +#~ msgid "Volume parameters" +#~ msgstr "Parámetros del Volumen" + +#~ msgid "Pool from resource" +#~ msgstr "Pool de recursos" + +#~ msgid "Slots from autochanger" +#~ msgstr "Slots de cargador" + +#~ msgid "Long term statistics" +#~ msgstr "Estadísticas a largo plazo" + +#, fuzzy +#~ msgid "Snapshot parameters" +#~ msgstr "Parámetros del Volumen" + +#~ msgid "item" +#~ msgstr "í­tem" + +#~ msgid "Choose catalog item to update" +#~ msgstr "Seleccione el ítem del catalogo para ser actualizado" + +#~ msgid "Invalid VolStatus specified: %s\n" +#~ msgstr "Invalido VolStatus especificado: %s\n" + +#~ msgid "New Volume status is: %s\n" +#~ msgstr "Nuevo estado del Volumen es: %s\n" + +#, fuzzy +#~ msgid "Invalid cache retention period specified: %s\n" +#~ msgstr "Periodo de retención especificado no valido: %s\n" + +#, fuzzy +#~ msgid "New Cache Retention period is: %s\n" +#~ msgstr "Nuevo periodo de retención es: %s\n" + +#~ msgid "Invalid use duration specified: %s\n" +#~ msgstr "Duración de uso especificado no válido: %s\n" + +#~ msgid "New use duration is: %s\n" +#~ msgstr "Nueva duración de uso es: %s\n" + +#~ msgid "New max jobs is: %s\n" +#~ msgstr "Nuevo máximo jobs es: %s\n" + +#~ msgid "New max files is: %s\n" +#~ msgstr "Nuevo máximo files es: %s\n" + +#~ msgid "Invalid max. bytes specification: %s\n" +#~ msgstr "Invalido máximo bytes especificación: %s\n" + +#~ msgid "New Max bytes is: %s\n" +#~ msgstr "Nuevo máximo bytes es: %s\n" + +#~ msgid "Invalid value. It must be yes or no.\n" +#~ msgstr "Valor no válido. Debe ser sí o no.\n" + +#~ msgid "New Recycle flag is: %s\n" +#~ msgstr "Nueva Recycle flag es: %s\n" + +#~ msgid "New InChanger flag is: %s\n" +#~ msgstr "Nueva InChanger flag es: %s\n" + +#~ msgid "Invalid slot, it must be between 0 and MaxVols=%d\n" +#~ msgstr "Ranura no válido, debe estar entre 0 y MaxVols=%d\n" + +#~ msgid "Error updating media record Slot: ERR=%s" +#~ msgstr "Error actualizando registro de medios de Ranuras: ERR=%s" + +#~ msgid "New Slot is: %d\n" +#~ msgstr "Nueva Ranura es: %d\n" + +#~ msgid "New Pool is: %s\n" +#~ msgstr "Nuevo Pool es: %s\n" + +#~ msgid "New RecyclePool is: %s\n" +#~ msgstr "Nuevo RecyclePool es: %s\n" + +#~ msgid "Error updating Volume record: ERR=%s" +#~ msgstr "Error actualizando registro Volumen: ERR=%s" + +#~ msgid "Volume defaults updated from \"%s\" Pool record.\n" +#~ msgstr "Volumen por defecto actualizado desde registro \"%s\" Pool.\n" + +#~ msgid "Error updating Volume records: ERR=%s" +#~ msgstr "Error actualizando registros Volumen: ERR=%s" + +#~ msgid "All Volume defaults updated from \"%s\" Pool record.\n" +#~ msgstr "" +#~ "Todos Volúmenes por defectos actualizado desde registro \"%s\" Pool.\n" + +#~ msgid "Updating all pools, but skipped PoolId=%d. ERR=%s\n" +#~ msgstr "Actualizando todos los pools, pero saltando PoolId=%d. ERR=%s\n" + +#~ msgid "Error updating media record Enabled: ERR=%s" +#~ msgstr "Error actualizando registro media Activado: ERR=%s" + +#~ msgid "New Enabled is: %d\n" +#~ msgstr "Nuevo Activado es: %d\n" + +#~ msgid "Error updating media record ActionOnPurge: ERR=%s" +#~ msgstr "Error actualizando registro de medios ActionOnPurge: ERR=%s" + +#~ msgid "New ActionOnPurge is: %s\n" +#~ msgstr "Nueva ActionOnPurge es: %s\n" + +#~ msgid "Volume Status" +#~ msgstr "Estado del Volumen" + +#~ msgid "Volume Retention Period" +#~ msgstr "Periodo de Retención del Volumen" + +#~ msgid "Volume Use Duration" +#~ msgstr "Duración de uso del Volumen" + +#~ msgid "Maximum Volume Jobs" +#~ msgstr "Máximo Volumen Jobs" + +#~ msgid "Maximum Volume Files" +#~ msgstr "Máximo Archivos de Volumen" + +#~ msgid "Maximum Volume Bytes" +#~ msgstr "Máximo Volumen Bytes" + +#~ msgid "Recycle Flag" +#~ msgstr "Recycle Flag" + +#~ msgid "Slot" +#~ msgstr "Ranura" + +#~ msgid "InChanger Flag" +#~ msgstr "InChanger Flag" + +#~ msgid "Volume Files" +#~ msgstr "Archivos de Volumen" + +#~ msgid "Volume from Pool" +#~ msgstr "Volumen de Pool" + +#~ msgid "All Volumes from Pool" +#~ msgstr "Todos Volúmenes de Pool" + +#~ msgid "All Volumes from all Pools" +#~ msgstr "Todos los Volúmenes de todas los Pools" + +#~ msgid "Enabled" +#~ msgstr "Activado" + +#~ msgid "RecyclePool" +#~ msgstr "RecyclePool" + +#~ msgid "Action On Purge" +#~ msgstr "Action On Purge" + +#, fuzzy +#~ msgid "Cache Retention" +#~ msgstr "Periodo de Retención del Volumen" + +#~ msgid "Updating Volume \"%s\"\n" +#~ msgstr "Actualizando Volumen \"%s\"\n" + +#~ msgid "Current Volume status is: %s\n" +#~ msgstr "Actual estado del Volumen es: %s\n" + +#~ msgid "Possible Values are:\n" +#~ msgstr "Posibles Valores son:\n" + +#~ msgid "Choose new Volume Status" +#~ msgstr "Seleccione el nuevo estado del Volumen" + +#~ msgid "Enter Volume Retention period: " +#~ msgstr "Introduzca periodo de Retención del Volumen:" + +#~ msgid "Current use duration is: %s\n" +#~ msgstr "Duración de uso actual es: %s\n" + +#~ msgid "Enter Volume Use Duration: " +#~ msgstr "Introduzca Duración de Uso de Volumen:" + +#~ msgid "Current max jobs is: %u\n" +#~ msgstr "Máximo jobs actual es: %u\n" + +#~ msgid "Enter new Maximum Jobs: " +#~ msgstr "Introduzca nuevo Máximo de Trabajos" + +#~ msgid "Current max files is: %u\n" +#~ msgstr "Actual máximo de archivos es: %u\n" + +#~ msgid "Enter new Maximum Files: " +#~ msgstr "Introduzca nuevo máximo de archivos:" + +#~ msgid "Current value is: %s\n" +#~ msgstr "Valor actual es: %s\n" + +#~ msgid "Enter new Maximum Bytes: " +#~ msgstr "Introduzca nuevo Máximo de Bytes:" + +#~ msgid "Current recycle flag is: %s\n" +#~ msgstr "Recycle flag actual es: %s\n" + +#~ msgid "Enter new Recycle status: " +#~ msgstr "Introduzca el nuevo estado de Reciclaje:" + +#~ msgid "Current Slot is: %d\n" +#~ msgstr "Ranura Actual es: %d\n" + +#~ msgid "Enter new Slot: " +#~ msgstr "Introduzca nueva Ranura:" + +#~ msgid "Current InChanger flag is: %d\n" +#~ msgstr "Bandera InChanger actual es: %d\n" + +#~ msgid "Set InChanger flag for Volume \"%s\": yes/no: " +#~ msgstr "Configurar bandera InChanger para Volumen \"%s\": sí/no: " + +#~ msgid "New InChanger flag is: %d\n" +#~ msgstr "Nueva InChanger flag es: %d\n" + +#~ msgid "" +#~ "Warning changing Volume Files can result\n" +#~ "in loss of data on your Volume\n" +#~ "\n" +#~ msgstr "" +#~ "Advertencia, cambiar Archivos de Volumen puede resultar\n" +#~ "en pérdida de datos en el Volumen\n" +#~ "\n" + +#~ msgid "Current Volume Files is: %u\n" +#~ msgstr "Archivos de Volumen actual es: %u\n" + +#~ msgid "Enter new number of Files for Volume: " +#~ msgstr "Introduzca el nuevo número de Archivos para el Volumen:" + +#~ msgid "Normally, you should only increase Volume Files by one!\n" +#~ msgstr "Normalmente, sólo debería aumentar el Archivo de Volumen en uno!\n" + +#~ msgid "Increase Volume Files? (yes/no): " +#~ msgstr "Incrementar Archivo de Volumen? (sí/no):" + +#~ msgid "New Volume Files is: %u\n" +#~ msgstr "Nuevo Archivo de Volumen es: %u\n" + +#~ msgid "Current Pool is: %s\n" +#~ msgstr "Pool actual es: %s\n" + +#~ msgid "Enter new Pool name: " +#~ msgstr "Introduzca el nuevo nombre del Pool:" + +#~ msgid "Current Enabled is: %d\n" +#~ msgstr "Habilitado(Enabled) actual es: %d\n" + +#~ msgid "Enter new Enabled: " +#~ msgstr "Introduzca nuevo Habilitado(Enabled):" + +#~ msgid "Current RecyclePool is: %s\n" +#~ msgstr "RecyclePool actual es: %s\n" + +#~ msgid "No current RecyclePool\n" +#~ msgstr "No actual RecyclePool\n" + +#~ msgid "Current ActionOnPurge is: %s\n" +#~ msgstr "Actual ActionOnPurge es: %s\n" + +#~ msgid "Enter new ActionOnPurge (one of: Truncate, None): " +#~ msgstr "Introduzca nuevo ActionOnPurge (uno de: Truncar, Ninguno):" + +#, fuzzy +#~ msgid "Current Cache Retention period is: %s\n" +#~ msgstr "Actual periodo de retención es: %s\n" + +#, fuzzy +#~ msgid "Enter Cache Retention period: " +#~ msgstr "Introduzca el nuevo periodo de retención:" + +#~ msgid "Updating %i job(s).\n" +#~ msgstr "Actualizando %i job(s).\n" + +#~ msgid "db_update_pool_record returned %d. ERR=%s\n" +#~ msgstr "db_update_pool_record regreso %d. ERR=%s\n" + +#~ msgid "Pool DB record updated from resource.\n" +#~ msgstr "registro BD Pool actualizado desde recursos.\n" + +#~ msgid "Expect JobId keyword, not found.\n" +#~ msgstr "Esperaba palabra clave JobId, no encontrada.\n" + +#, fuzzy +#~ msgid "Update failed. Job not authorized on this console\n" +#~ msgstr "Comando no autorizado desde esta consola.\n" + +#, fuzzy +#~ msgid "Neither Client, StartTime or Priority specified.\n" +#~ msgstr "Ni el Cliente ni StartTime especificado.\n" + +#, fuzzy +#~ msgid "Job not found.\n" +#~ msgstr "Job %s no encontrado\n" + +#~ msgid "Start Virtual Backup JobId %s, Job=%s\n" +#~ msgstr "Respaldo Virtual iniciado JobId %s, Job=%s\n" + +#~ msgid "" +#~ "This Job is not an Accurate backup so is not equivalent to a Full " +#~ "backup.\n" +#~ msgstr "" +#~ "Este trabajo no es una copia de seguridad exacta, por lo que no es " +#~ "equivalente a una copia de seguridad completa.\n" + +#, fuzzy +#~ msgid "No valid Jobs found from user selection.\n" +#~ msgstr "No se encontró ningún Job para: %s.\n" + +#, fuzzy +#~ msgid "Using user supplied JobIds=%s\n" +#~ msgstr "%s utilizando JobId=%s Job=%s\n" + +#~ msgid "No previous Jobs found.\n" +#~ msgstr "Jobs previos no encontrados.\n" + +#~ msgid "Error getting Job record for previous Job: ERR=%s" +#~ msgstr "Error obteniendo registro Job para Job anterior: ERR=%s" + +#~ msgid "Backup OK -- with warnings" +#~ msgstr "Respaldo OK -- con advertencias" + +#, fuzzy +#~ msgid "" +#~ "%s %s %s (%s):\n" +#~ " Build OS: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " Backup Level: Virtual Full\n" +#~ " Client: \"%s\" %s\n" +#~ " FileSet: \"%s\" %s\n" +#~ " Pool: \"%s\" (From %s)\n" +#~ " Catalog: \"%s\" (From %s)\n" +#~ " Storage: \"%s\" (From %s)\n" +#~ " Scheduled time: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Elapsed time: %s\n" +#~ " Priority: %d\n" +#~ " SD Files Written: %s\n" +#~ " SD Bytes Written: %s (%sB)\n" +#~ " Rate: %.1f KB/s\n" +#~ " Volume name(s): %s\n" +#~ " Volume Session Id: %d\n" +#~ " Volume Session Time: %d\n" +#~ " Last Volume Bytes: %s (%sB)\n" +#~ " SD Errors: %d\n" +#~ " SD termination status: %s\n" +#~ " Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s %s (%s): %s\n" +#~ " Build OS: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " Nivel del Respaldo: Virtual Completo\n" +#~ " Cliente: \"%s\" %s\n" +#~ " FileSet: \"%s\" %s\n" +#~ " Pool: \"%s\" (Desde %s)\n" +#~ " Catalogo: \"%s\" (Desde %s)\n" +#~ " Storage: \"%s\" (Desde %s)\n" +#~ " Hora programada: %s\n" +#~ " Hora de inicio: %s\n" +#~ " Hora de finalización: %s\n" +#~ " Tiempo transcurrido: %s\n" +#~ " Prioridad: %d\n" +#~ " SD Archivos Escritos: %s\n" +#~ " SD Bytes Escritos: %s (%sB)\n" +#~ " Tasa: %.1f KB/s\n" +#~ " Nombre del Volumen(es): %s\n" +#~ " Id de Sesión de Volumen: %d\n" +#~ " Tiempo de Sesión de Volumen: %d\n" +#~ " Ultimo Bytes del Volumen: %s (%sB)\n" +#~ " Errores del SD: %d\n" +#~ " Estado de terminación del SD: %s\n" +#~ " Terminación: %s\n" +#~ "\n" + +#~ msgid "Unimplemented Verify level %d(%c)\n" +#~ msgstr "Nivel Verify no implementado %d(%c)\n" + +#~ msgid "" +#~ "Unable to find JobId of previous InitCatalog Job.\n" +#~ "Please run a Verify with Level=InitCatalog before\n" +#~ "running the current Job.\n" +#~ msgstr "" +#~ "No se puede encontrar JobId de Job InitCatalog anterior.\n" +#~ "Por favor, ejecutar Verificar con Nivel=InitCatalog antes de\n" +#~ " ejecutar el Job actual.\n" + +#~ msgid "Unable to find JobId of previous Job for this client.\n" +#~ msgstr "" +#~ "No se puede encontrar JobId del trabajo anterior por este cliente.\n" + +#~ msgid "Could not get job record for previous Job. ERR=%s" +#~ msgstr "No se ha podido obtener el registro Job para Job anterior. ERR=%s" + +#~ msgid "Last Job %d did not terminate normally. JobStatus=%c\n" +#~ msgstr "Último Job %d no termino normalmente. JobStatus=%c\n" + +#~ msgid "Verifying against JobId=%d Job=%s\n" +#~ msgstr "Verificar contra JobId=%d Job=%s\n" + +#, fuzzy +#~ msgid "Could not get fileset record from previous Job. ERR=%s" +#~ msgstr "No se ha podido obtener el registro Job para Job anterior. ERR=%s" + +#, fuzzy +#~ msgid "Could not find FileSet resource \"%s\" from previous Job\n" +#~ msgstr "No se pudo encontrar el siguiente Volumen para Job %s.\n" + +#, fuzzy +#~ msgid "Using FileSet \"%\"\n" +#~ msgstr "Usando Dispositivo \"%s\"\n" + +#, fuzzy +#~ msgid "Could not get FileSet resource for verify Job." +#~ msgstr "No se ha podido obtener el registro Job para Job anterior. ERR=%s" + +#~ msgid "Start Verify JobId=%s Level=%s Job=%s\n" +#~ msgstr "Inicio Verificar JobId=%s Nivel=%s Job=%s\n" + +#~ msgid "Deprecated feature ... use bootstrap.\n" +#~ msgstr "Función obsoleta ... use bootstrap.\n" + +#~ msgid "Unimplemented verify level %d\n" +#~ msgstr "Nivel Verify no implementado %d\n" + +#, fuzzy +#~ msgid "Verify OK -- with warnings" +#~ msgstr "OK - con advertencias" + +#~ msgid "Verify OK" +#~ msgstr "Verificar OK" + +#~ msgid "*** Verify Error ***" +#~ msgstr "*** Verificar Error ***" + +#~ msgid "Verify warnings" +#~ msgstr "Verificar advertencias" + +#~ msgid "Verify Canceled" +#~ msgstr "Verificar Cancelada" + +#~ msgid "Verify Differences" +#~ msgstr "Verificar Diferencias" + +#~ msgid "Inappropriate term code: %d %c\n" +#~ msgstr "Inadecuado código de terminación: %d %c\n" + +#, fuzzy +#~ msgid "" +#~ "%s %s %s (%s):\n" +#~ " Build OS: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " FileSet: %s\n" +#~ " Verify Level: %s\n" +#~ " Client: %s\n" +#~ " Verify JobId: %d\n" +#~ " Verify Job: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Elapsed time: %s\n" +#~ " Accurate: %s\n" +#~ " Files Expected: %s\n" +#~ " Files Examined: %s\n" +#~ " Non-fatal FD errors: %d\n" +#~ " SD Errors: %d\n" +#~ " FD termination status: %s\n" +#~ " SD termination status: %s\n" +#~ " Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s %s (%s): %s\n" +#~ " Build OS: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " FileSet: %s\n" +#~ " Verificar Nivel: %s\n" +#~ " Cliente: %s\n" +#~ " Verificar JobId: %d\n" +#~ " Verificar Job: %s\n" +#~ " Hora de inicio: %s\n" +#~ " Hora de finalización: %s\n" +#~ " Archivos Esperado: %s\n" +#~ " Archivos Examinados: %s\n" +#~ " No-fatal FD errores: %d\n" +#~ " Estado de terminación del FD: %s\n" +#~ " Estado de terminación del SD: %s\n" +#~ " Terminación: %s\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "%s %s %s (%s):\n" +#~ " Build: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " FileSet: %s\n" +#~ " Verify Level: %s\n" +#~ " Client: %s\n" +#~ " Verify JobId: %d\n" +#~ " Verify Job: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Elapsed time: %s\n" +#~ " Files Examined: %s\n" +#~ " Non-fatal FD errors: %d\n" +#~ " FD termination status: %s\n" +#~ " Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s %s (%s): %s\n" +#~ " Build: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " FileSet: %s\n" +#~ " Verificar Nivel: %s\n" +#~ " Cliente: %s\n" +#~ " Verificar JobId: %d\n" +#~ " Verificar Job: %s\n" +#~ " Hora de Inicio: %s\n" +#~ " Hora de Finalización: %s\n" +#~ " Archivos Examinados: %s\n" +#~ " No-fatal FD errores: %d\n" +#~ " FD estado de finalización: %s\n" +#~ " Terminación: %s\n" +#~ "\n" + +#~ msgid "" +#~ "bird Objetivo=%s\n" + +#, fuzzy +#~ msgid "Cannot verify checksum for %s\n" +#~ msgstr "No se puede encontrar el recurso Schedule %s\n" + +#~ msgid "%s digest initialization failed\n" +#~ msgstr "Inicialización de %s Digest ha fallado\n" + +#~ msgid "2991 Bad accurate command\n" +#~ msgstr "2991 Malo comando accurate\n" + +#~ msgid "Incorrect password given by Director at %s.\n" +#~ msgstr "Contraseña incorrecta dada por el Director en %s.\n" + +#~ msgid "" +#~ "Authorization problem: Remote server did not advertize required TLS " +#~ "support.\n" +#~ msgstr "" +#~ "Problema de autorización: El servidor remoto no anunció suporte TLS " +#~ "requerido.\n" + +#~ msgid "Cannot set buffer size FD->SD.\n" +#~ msgstr "No puede establecer el tamaño del búfer FD-> SD.\n" + +#, fuzzy +#~ msgid "Had %ld acl errors while doing backup\n" +#~ msgstr "Detectado %ld errores de acl al hacer copia de seguridad\n" + +#, fuzzy +#~ msgid "Had %ld xattr errors while doing backup\n" +#~ msgstr "Detectado %ld errores de xattr al hacer copia de seguridad\n" + +#~ msgid " Recursion turned off. Will not descend from %s into %s\n" +#~ msgstr "Recursión desactivado. No descenderá de %s dentro de %s\n" + +#, fuzzy +#~ msgid "" +#~ " %s is a different filesystem. Will not descend from %s into it.\n" +#~ msgstr "" +#~ "%s es un sistema de ficheros diferentes. No descenderá de %s dentro de " +#~ "%s\n" + +#~ msgid " Disallowed filesystem. Will not descend from %s into %s\n" +#~ msgstr "Sistema de Archivos no permitido. No descenderá de %s en %s\n" + +#~ msgid " Disallowed drive type. Will not descend into %s\n" +#~ msgstr "Tipo de unidad no permitido. No descenderá en %s\n" + +#~ msgid " Socket file skipped: %s\n" +#~ msgstr "Archivo de Socket omitido: %s\n" + +#~ msgid " Could not access \"%s\": ERR=%s\n" +#~ msgstr "No se pudo acceder a \"%s\": ERR=%s\n" + +#~ msgid " Could not follow link \"%s\": ERR=%s\n" +#~ msgstr "No se pudo seguir el enlace \"%s\": ERR=%s\n" + +#~ msgid " Could not stat \"%s\": ERR=%s\n" +#~ msgstr "No se pudo stat \"%s\": ERR=%s\n" + +#~ msgid " Unchanged file skipped: %s\n" +#~ msgstr "Archivo sin modificar omitido: %s\n" + +#~ msgid " Archive file not saved: %s\n" +#~ msgstr "Archivo no guardado: %s\n" + +#~ msgid " Could not open directory \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir directorio \"%s\": ERR=%s.\n" + +#~ msgid " Unknown file type %d; not saved: %s\n" +#~ msgstr " Tipo de archivo %d desconocido; no ha sido guardado: %s\n" + +#~ msgid " Cannot open \"%s\": ERR=%s.\n" +#~ msgstr "No se puede abrir \"%s\": ERR=%s.\n" + +#~ msgid "Network send error to SD. ERR=%s\n" +#~ msgstr "Error de red al enviar para SD. ERR=%s\n" + +#, fuzzy +#~ msgid "Windows Encrypted data not supported on this OS.\n" +#~ msgstr "flujo %s no suportado en este Cliente.\n" + +#~ msgid "Read error on file %s. ERR=%s\n" +#~ msgstr "Error de lectura en el archivo %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Too many errors. JobErrors=%d.\n" +#~ msgstr "Demasiados errores.\n" + +#~ msgid "Encryption padding error\n" +#~ msgstr "Error de relleno de cifrado\n" + +#~ msgid "Encryption error\n" +#~ msgstr "Error de cifrado\n" + +#~ msgid "Invalid file flags, no supported data stream type.\n" +#~ msgstr "" +#~ "Invalidas banderas de archivo, tipo de flujo de datos no soportado.\n" + +#, fuzzy +#~ msgid "Network send error to SD. Data=%s ERR=%s\n" +#~ msgstr "Error de red al enviar para SD. ERR=%s\n" + +#~ msgid "Compression deflateParams error: %d\n" +#~ msgstr "Error de compresión DeflateParams: %d\n" + +#~ msgid " Cannot open resource fork for \"%s\": ERR=%s.\n" +#~ msgstr "No se puede abrir recurso fork para \"%s\": ERR=%s.\n" + +#~ msgid "Compression deflate error: %d\n" +#~ msgstr "Error de compresión Deflate: %d\n" + +#~ msgid "Compression deflateReset error: %d\n" +#~ msgstr "Error de compresión DeflateReset: %d\n" + +#, fuzzy +#~ msgid "Compression LZO error: %d\n" +#~ msgstr "Error de compresión Deflate: %d\n" + +#~ msgid "VSS Writer (BackupComplete): %s\n" +#~ msgstr "VSS Writer (BackupComplete): %s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bfdjson [options] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -t test configuration file and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Utilice: bacula-fd [-f -s] [-c archivo_de_configuración] [-d " +#~ "nivel_depuración]\n" +#~ " -c establecer archivo de configuración para archivo \n" +#~ " -d establecer el nivel de depuración para \n" +#~ " -dt imprimir timestamp en salida de depuración\n" +#~ " -f ejecutar en primer plano (para depuración)\n" +#~ " -g groupid\n" +#~ " -k keep readall capabilities\n" +#~ " -m imprimir salida kaboom para depuración)\n" +#~ " -s sin señales(para depuración)\n" +#~ " -t prueba - leer la configuración y salir\n" +#~ " -u userid\n" +#~ " -v mensajes de usuario detallados\n" +#~ " -? imprimir este mensaje.\n" +#~ "\n" + +#~ msgid "" +#~ "No File daemon resource defined in %s\n" +#~ "Without that I don't know who I am :-(\n" +#~ msgstr "" +#~ "Ninguno recurso File Daemon definido en %s\n" +#~ "Sin eso yo no sé quién soy :-(\n" + +#~ msgid "Only one Client resource permitted in %s\n" +#~ msgstr "Sólo un recurso de cliente permitido en %s\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for File daemon in %s.\n" +#~ msgstr "" +#~ "Ninguno \"Certificado TLS de CA\" o \"Directorio de Certificado TLS de CA" +#~ "\" están definidos para el demonio File en %s.\n" + +#~ msgid "" +#~ "\"PKI Key Pair\" must be defined for File daemon \"%s\" in %s if either " +#~ "\"PKI Sign\" or \"PKI Encrypt\" are enabled.\n" +#~ msgstr "" +#~ "\"Par de Claves PKI\" debe estar definido para el demonio File \"%s\" en " +#~ "%s, si uno o otro \"Firma PKI\" o \"Cifrar PKI\" están habilitados.\n" + +#~ msgid "No Director resource defined in %s\n" +#~ msgstr "Recurso Director no definido en %s\n" + +#, fuzzy +#~ msgid "Encrypting sparse or offset data not supported.\n" +#~ msgstr "Cifrado de datos dispersos no soportado.\n" + +#~ msgid "Failed to initialize encryption context.\n" +#~ msgstr "Fallo al inicializar el contexto de cifrado.\n" + +#~ msgid "%s signature digest initialization failed\n" +#~ msgstr "Fallo la inicialización de la firma digest %s\n" + +#, fuzzy +#~ msgid "Unsupported cipher on this system.\n" +#~ msgstr "Tipo de cifrado especificados no soportado\n" + +#~ msgid "An error occurred while encrypting the stream.\n" +#~ msgstr "A ocurrido un error al cifrar el stream.\n" + +#~ msgid "Failed to allocate memory for crypto signature.\n" +#~ msgstr "No se pudo asignar memoria para la firma de cifrado.\n" + +#, fuzzy +#~ msgid "An error occurred while adding signer the stream.\n" +#~ msgstr "Se produjo un error al firmar el stream.\n" + +#~ msgid "An error occurred while signing the stream.\n" +#~ msgstr "Se produjo un error al firmar el stream.\n" + +#~ msgid "An error occurred finalizing signing the stream.\n" +#~ msgstr "Se produjo un error concluir la firma del stream.\n" + +#, fuzzy +#~ msgid "Command plugin \"%s\": no type in startBackupFile packet.\n" +#~ msgstr "Comando plugin \"%s\" regresó malo paquete startBackupFile.\n" + +#, fuzzy +#~ msgid "Command plugin \"%s\": no object_name in startBackupFile packet.\n" +#~ msgstr "Comando plugin \"%s\" regresó malo paquete startBackupFile.\n" + +#, fuzzy +#~ msgid "Command plugin \"%s\": no fname in startBackupFile packet.\n" +#~ msgstr "Comando plugin \"%s\" regresó malo paquete startBackupFile.\n" + +#~ msgid "Plugin save packet not found.\n" +#~ msgstr "Plugin guardar paquetes no encontrado.\n" + +#~ msgid "Plugin=%s not found.\n" +#~ msgstr "Plugin=%s no encontrado.\n" + +#~ msgid "Plugin createFile call failed. Stat=%d file=%s\n" +#~ msgstr "Fallo llamada plugin createFile. Stat=%d archivo=%s\n" + +#~ msgid "Plugin createFile call failed. Returned CF_ERROR file=%s\n" +#~ msgstr "Fallo llamada plugin createFile. Retorno CF_ERROR archivo=%s\n" + +#~ msgid "Could not create %s: ERR=%s\n" +#~ msgstr "No se pudo crear %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Command plugin: no fname in baculaCheckChanges packet.\n" +#~ msgstr "Comando plugin \"%s\" regresó malo paquete startBackupFile.\n" + +#, fuzzy +#~ msgid "Error while creating command string %s.\n" +#~ msgstr "" +#~ "Error al analizar los argumentos de línea de comandos, usando valores por " +#~ "defecto.\n" + +#, fuzzy +#~ msgid "Error while executing \"%s\" %s. %s %s\n" +#~ msgstr "A ocurrido un error al cifrar el stream.\n" + +#, fuzzy +#~ msgid "Unable to parse snapshot command output\n" +#~ msgstr "Fallo al conectar con el cliente.\n" + +#, fuzzy +#~ msgid "Unable to create snapshot record. ERR=%s\n" +#~ msgstr "No se puede crear hilo. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create snapshot record, got %s\n" +#~ msgstr "No se puede obtener el registro Job para JobId=%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to delete snapshot record. ERR=%s\n" +#~ msgstr "No se pudo obtener registro de Job. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to delete snapshot record, got %s\n" +#~ msgstr "No se puede obtener el registro Job para JobId=%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get snapshot record. ERR=%s\n" +#~ msgstr "No se pudo obtener registro de Job. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get snapshot record, got %s\n" +#~ msgstr "No se puede obtener el registro Job para JobId=%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to parse command output\n" +#~ msgstr "Fallo al conectar con el cliente.\n" + +#, fuzzy +#~ msgid " Delete Snapshot for %s\n" +#~ msgstr "Fallo al generar VSS snapshots.\n" + +#, fuzzy +#~ msgid " Unable to delete snapshot of %s ERR=%s\n" +#~ msgstr "No se puede escribir última en %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Create Snapshot for %s\n" +#~ msgstr "Fallo al generar VSS snapshots.\n" + +#, fuzzy +#~ msgid " Unable to create snapshot of %s ERR=%s\n" +#~ msgstr "No se puede escribir última en %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +#~ " -c use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -k keep readall capabilities\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test configuration file and exit\n" +#~ " -T set trace on\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Utilice: bacula-fd [-f -s] [-c archivo_de_configuración] [-d " +#~ "nivel_depuración]\n" +#~ " -c establecer archivo de configuración para archivo \n" +#~ " -d establecer el nivel de depuración para \n" +#~ " -dt imprimir timestamp en salida de depuración\n" +#~ " -f ejecutar en primer plano (para depuración)\n" +#~ " -g groupid\n" +#~ " -k keep readall capabilities\n" +#~ " -m imprimir salida kaboom para depuración)\n" +#~ " -s sin señales(para depuración)\n" +#~ " -t prueba - leer la configuración y salir\n" +#~ " -u userid\n" +#~ " -v mensajes de usuario detallados\n" +#~ " -? imprimir este mensaje.\n" +#~ "\n" + +#~ msgid "-k option has no meaning without -u option.\n" +#~ msgstr "la opción -k no tiene sentido sin la opción -u.\n" + +#, fuzzy +#~ msgid "Disable Command \"%s\" not found.\n" +#~ msgstr "Registro FileSet \"%s\" no encontrado.\n" + +#~ msgid "PKI encryption/signing enabled but not compiled into Bacula.\n" +#~ msgstr "Cifrado o Firma PKI habilitado, pero no compilado en Bacula.\n" + +#~ msgid "Failed to allocate a new keypair object.\n" +#~ msgstr "No se ha podido asignar un nuevo objeto keypair.\n" + +#~ msgid "Failed to load public certificate for File daemon \"%s\" in %s.\n" +#~ msgstr "" +#~ "Fallo al cargar certificado publico para File Daemon \"%s\" en %s.\n" + +#~ msgid "Failed to load private key for File daemon \"%s\" in %s.\n" +#~ msgstr "Fallo al cargar llave privada para File Daemon \"%s\" en %s.\n" + +#~ msgid "" +#~ "Failed to load private key from file %s for File daemon \"%s\" in %s.\n" +#~ msgstr "" +#~ "Fallo al cargar llave privada desde el archivo %s para File Daemon \"%s\" " +#~ "en %s.\n" + +#~ msgid "" +#~ "Failed to load trusted signer certificate from file %s for File daemon " +#~ "\"%s\" in %s.\n" +#~ msgstr "" +#~ "Fallo al cargar certificado del firmante desde el archivo %s para File " +#~ "Daemon \"%s\" en %s.\n" + +#~ msgid "" +#~ "Failed to load master key certificate from file %s for File daemon \"%s\" " +#~ "in %s.\n" +#~ msgstr "" +#~ "Fallo al cargar certificado de llave maestro desde el archivo %s para " +#~ "File Daemon \"%s\" en %s.\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for Console \"%s\" in %s.\n" +#~ msgstr "Fallo al inicializar el contexto TLS para la consola \"%s\".\n" + +#, fuzzy +#~ msgid "Expected a Cipher Type keyword, got: %s" +#~ msgstr "Esperaba una palabra clave Tipo Device, obtuvo: %s" + +#, fuzzy +#~ msgid "Cannot find any Console resource for remote access\n" +#~ msgstr "No se puede encontrar el recurso Console %s\n" + +#~ msgid "Bad Hello command from Director at %s. Len=%d.\n" +#~ msgstr "Malo comando Hello desde Director en %s. Len=%d\n" + +#~ msgid "Bad Hello command from Director at %s: %s\n" +#~ msgstr "Malo comando Hello desde Director en %s: %s\n" + +#~ msgid "Connection from unknown Director %s at %s rejected.\n" +#~ msgstr "Conexión desde Director %s desconocido en %s rechazada.\n" + +#, fuzzy +#~ msgid "SD connect failed: Bad Hello command\n" +#~ msgstr "Director rechazo comando Hello\n" + +#, fuzzy +#~ msgid "SD connect failed: Job name not found: %s\n" +#~ msgstr "Fallo al conectar a FD: Nombre del Job no encontrado: %s\n" + +#, fuzzy +#~ msgid "SD \"%s\" tried to connect two times.\n" +#~ msgstr "Fallo al conectar con el cliente.\n" + +#, fuzzy +#~ msgid "Command: \"%s\" is disabled.\n" +#~ msgstr "Job \"%s\" %sabled\n" + +#, fuzzy +#~ msgid "Bad command from %s. Len=%d.\n" +#~ msgstr "Malo comando Hello desde Director en %s. Len=%d\n" + +#~ msgid "2902 Error scanning cancel command.\n" +#~ msgstr "2902 Error escaneando comando cancelar.\n" + +#~ msgid "2901 Job %s not found.\n" +#~ msgstr "2901 Job %s no encontrado.\n" + +#, fuzzy +#~ msgid "2001 Job \"%s\" marked to be %s.\n" +#~ msgstr "2001 Job %s marcado para ser cancelado.\n" + +#, fuzzy +#~ msgid "2991 Bad setbandwidth command: %s\n" +#~ msgstr "2991 Comando setdebug malo: %s\n" + +#~ msgid "2991 Bad setdebug command: %s\n" +#~ msgstr "2991 Comando setdebug malo: %s\n" + +#~ msgid "Bad estimate command: %s" +#~ msgstr "Malo comando estimate: %s" + +#~ msgid "2992 Bad estimate command.\n" +#~ msgstr "2992 Malo comando estimación.\n" + +#~ msgid "Bad Job Command: %s" +#~ msgstr "Malo Comando Job: %s" + +#~ msgid "Bad RunBeforeJob command: %s\n" +#~ msgstr "Malo comando RunBeforeJob: %s\n" + +#~ msgid "2905 Bad RunBeforeJob command.\n" +#~ msgstr "2905 Malo comando RunBeforeJob.\n" + +#~ msgid "2905 Bad RunBeforeNow command.\n" +#~ msgstr "2905 Malo comando RunBeforeNow.\n" + +#~ msgid "Bad RunAfter command: %s\n" +#~ msgstr "Malo comando RunAfter: %s\n" + +#~ msgid "2905 Bad RunAfterJob command.\n" +#~ msgstr "2905 Malo comando RunAfterJob.\n" + +#~ msgid "Bad RunScript command: %s\n" +#~ msgstr "Malo comando RunScript: %s\n" + +#~ msgid "2905 Bad RunScript command.\n" +#~ msgstr "2905 Malo comando RunScript.\n" + +#, fuzzy +#~ msgid "Bad RestoreObject command: %s\n" +#~ msgstr "Malo comando storage: %s" + +#, fuzzy +#~ msgid "2909 Bad RestoreObject command.\n" +#~ msgstr "2905 Malo comando RunBeforeJob.\n" + +#, fuzzy +#~ msgid "Plugin Directory not defined. Cannot use plugin: \"%s\"\n" +#~ msgstr "Directorio de Plugin no definido. No se puede usar plug-in: \"%\"\n" + +#~ msgid "Error running program: %s. stat=%d: ERR=%s\n" +#~ msgstr "Error al ejecutar el programa: %s. stat=%d: ERR=%s\n" + +#~ msgid "Cannot open FileSet input file: %s. ERR=%s\n" +#~ msgstr "No se puede abrir archivo de entrada FileSet: %s. ERR=%s\n" + +#~ msgid "REGEX %s compile error. ERR=%s\n" +#~ msgstr "Error de compilación REGEX %s. ERR=%s\n" + +#~ msgid "Invalid FileSet command: %s\n" +#~ msgstr "Comando FilseSet invalido: %s\n" + +#~ msgid "Unknown include/exclude option: %c\n" +#~ msgstr "Opción incluir/excluir desconocida: %c\n" + +#~ msgid "" +#~ "DIR and FD clocks differ by %lld seconds, FD automatically compensating.\n" +#~ msgstr "" +#~ "Relojes de DIR y FD difieren por %lld segundos, FD compensando " +#~ "automáticamente.\n" + +#~ msgid "Unknown backup level: %s\n" +#~ msgstr "Nivel de copia de seguridad desconocido: %s\n" + +#~ msgid "Bad level command: %s\n" +#~ msgstr "Malo comando nivel: %s\n" + +#~ msgid "Bad session command: %s" +#~ msgstr "Malo comando sesión: %s" + +#~ msgid "Bad storage command: %s" +#~ msgstr "Malo comando storage: %s" + +#~ msgid "Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Error al conectar con el demonio de Storage: %s:%d\n" + +#, fuzzy +#~ msgid "Failed connect from Storage daemon. SD bsock=NULL.\n" +#~ msgstr "Error al conectar con demonio Storage.\n" + +#, fuzzy +#~ msgid "ACL support not configured for Client.\n" +#~ msgstr "Soporte ACL no configurado para su máquina. \n" + +#, fuzzy +#~ msgid "XATTR support not configured for Client.\n" +#~ msgstr "Soporte XATTR no está configurado para su máquina.\n" + +#~ msgid "Cannot contact Storage daemon\n" +#~ msgstr "No puede contactar demonio Storage\n" + +#~ msgid "Bad response to append open: %s\n" +#~ msgstr "Mala respuesta para añadir abierto: %s\n" + +#~ msgid "Bad response from stored to open command\n" +#~ msgstr "Mala respuesta desde almacén para comando abrir\n" + +#~ msgid "Append Close with SD failed.\n" +#~ msgstr "Añadir Cierre con SD fallido.\n" + +#, fuzzy +#~ msgid "Bad status %d %c returned from Storage Daemon.\n" +#~ msgstr "Mal estado %d regresado desde demonio Storage.\n" + +#~ msgid "2994 Bad verify command: %s\n" +#~ msgstr "2994 Malo comando verificar: %s\n" + +#~ msgid "2994 Bad verify level: %s\n" +#~ msgstr "2994 Malo nivel de verificar: %s\n" + +#~ msgid "Bad replace command. CMD=%s\n" +#~ msgstr "Comando replace malo. CMD=%s\n" + +#~ msgid "Bad where regexp. where=%s\n" +#~ msgstr "Mala regexp where. where=%s\n" + +#~ msgid "Improper calling sequence.\n" +#~ msgstr "Secuencia de llamada impropia.\n" + +#~ msgid "Bad response to SD read open: %s\n" +#~ msgstr "Mala respuesta para SD leer abrir: %s\n" + +#~ msgid "Bad response from stored to read open command\n" +#~ msgstr "Mala respuesta desde storage para comando leer abir\n" + +#, fuzzy +#~ msgid "" +#~ "Bad response from SD to %s command. Wanted %s, got len=%ld msg=\"%s\"\n" +#~ msgstr "Mala respuesta al comando %s. Esperaba %s, obtuvo %s\n" + +#, fuzzy +#~ msgid "Bad response from SD to %s command. Wanted %s, got SIGNAL %s\n" +#~ msgstr "Mala respuesta al comando %s. Esperaba %s, obtuvo %s\n" + +#~ msgid "" +#~ "Size of data or stream of %s not correct. Original %s, restored %s.\n" +#~ msgstr "" +#~ "Tamaño de datos o flujo de %s no es correcto. Original %s, restaurado %s\n" + +#, fuzzy +#~ msgid "Invalid length of Finder Info (got %d, wanted 32)\n" +#~ msgstr "Longitud no válida de Buscador de Información (obtuvo %d, no 32)\n" + +#, fuzzy +#~ msgid "Error setting Finder Info on \"%s\"\n" +#~ msgstr "No se pudo establecer Finder Info en %s\n" + +#~ msgid "Unknown stream=%d ignored. This shouldn't happen!\n" +#~ msgstr "Flujo desconocido=%d ignorado. Esto no debería suceder!\n" + +#, fuzzy +#~ msgid "LZO init failed\n" +#~ msgstr "Negociación TLS fallida\n" + +#~ msgid "Record header scan error: %s\n" +#~ msgstr "Error analizando registro de cabecera: %s\n" + +#~ msgid "Data record error. ERR=%s\n" +#~ msgstr "Error de registro de datos. ERR=%s\n" + +#~ msgid "Actual data size %d not same as header %d\n" +#~ msgstr "Tamaño actual de los datos %d no igual a cabecera %d\n" + +#~ msgid "%s stream not supported on this Client.\n" +#~ msgstr "flujo %s no suportado en este Cliente.\n" + +#~ msgid "Unexpected cryptographic session data stream.\n" +#~ msgstr "Inesperado flujo de datos de sesión criptográfica.\n" + +#~ msgid "" +#~ "No private decryption keys have been defined to decrypt encrypted backup " +#~ "data.\n" +#~ msgstr "" +#~ "Ninguna claves privadas de descifrado se ha definido para descifrar los " +#~ "datos cifrados de las copia de seguridad.\n" + +#~ msgid "Could not create digest.\n" +#~ msgstr "No es posible crear sumario.\n" + +#~ msgid "Missing private key required to decrypt encrypted backup data.\n" +#~ msgstr "" +#~ "Falta la clave privada requerida para descifrar los datos del respaldo " +#~ "cifrado.\n" + +#~ msgid "Decrypt of the session key failed.\n" +#~ msgstr "Fallo al descifrar la clave de sesión.\n" + +#, fuzzy +#~ msgid "Signer not found. Decryption failed.\n" +#~ msgstr "Fallo en creación de firma" + +#, fuzzy +#~ msgid "Unsupported digest algorithm. Decrypt failed.\n" +#~ msgstr "Algoritmo de resumen no soportado" + +#, fuzzy +#~ msgid "Unsupported encryption algorithm. Decrypt failed.\n" +#~ msgstr "Algoritmo de cifrado no soportado" + +#, fuzzy +#~ msgid "" +#~ "An error=%d occurred while decoding encrypted session data stream: ERR=" +#~ "%s\n" +#~ msgstr "" +#~ "Se produjo un error al descodificar flujo de datos de sesión encriptados: " +#~ "%s\n" + +#~ msgid "Missing encryption session data stream for %s\n" +#~ msgstr "Falta cifrado de flujo de datos de sesión para %s\n" + +#~ msgid "Failed to initialize decryption context for %s\n" +#~ msgstr "Fallo al inicializar el contexto de descifrado para %s\n" + +#~ msgid "Cannot open resource fork for %s.\n" +#~ msgstr "No se puede abrir recursos fork para %s.\n" + +#~ msgid "Unexpected cryptographic signature data stream.\n" +#~ msgstr "Inesperado signatura de datos de sesión criptográfica.\n" + +#~ msgid "Failed to decode message signature for %s\n" +#~ msgstr "No se ha podido descifrar la firma del mensaje para %s\n" + +#~ msgid "Encountered %ld acl errors while doing restore\n" +#~ msgstr "Detectado %ld errores de acl mientras que hace restaurar\n" + +#~ msgid "Encountered %ld xattr errors while doing restore\n" +#~ msgstr "Detectado %ld errores de xattr al hacer restauración\n" + +#~ msgid "" +#~ "%d non-supported data streams and %d non-supported attrib streams " +#~ "ignored.\n" +#~ msgstr "" +#~ "%d incompatible flujos de datos y %d incompatible atributos de flujos " +#~ "ignorados.\n" + +#~ msgid "%d non-supported resource fork streams ignored.\n" +#~ msgstr "%d incompatible flujo de recurso fork ignorado.\n" + +#~ msgid "%d non-supported Finder Info streams ignored.\n" +#~ msgstr "%d incompatible flujo Finder Info ignorado.\n" + +#~ msgid "%d non-supported acl streams ignored.\n" +#~ msgstr "%d incompatible flujo de acl ignorado.\n" + +#~ msgid "%d non-supported crypto streams ignored.\n" +#~ msgstr "%d incompatible flujo de cifrado ignorado.\n" + +#~ msgid "%d non-supported xattr streams ignored.\n" +#~ msgstr "ignorado %d no soportado flujo xattr.\n" + +#~ msgid "None" +#~ msgstr "Ninguno" + +#~ msgid "Zlib errno" +#~ msgstr "Zlib error" + +#~ msgid "Zlib stream error" +#~ msgstr "Zlib error de stream" + +#~ msgid "Zlib data error" +#~ msgstr "Zlib error de datos" + +#~ msgid "Zlib memory error" +#~ msgstr "Zlib error de memoria" + +#~ msgid "Zlib buffer error" +#~ msgstr "Zlib error de buffer" + +#~ msgid "Zlib version error" +#~ msgstr "Zlib error de version" + +#~ msgid "*none*" +#~ msgstr "*ninguno*" + +#~ msgid "Seek to %s error on %s: ERR=%s\n" +#~ msgstr "Tratar de %s error en %s: ERR=%s\n" + +#, fuzzy +#~ msgid "LZO uncompression error on file %s. ERR=%d\n" +#~ msgstr "Error de descompresión en el archivo %s. ERR=%s\n" + +#~ msgid "Uncompression error on file %s. ERR=%s\n" +#~ msgstr "Error de descompresión en el archivo %s. ERR=%s\n" + +#~ msgid "GZIP data stream found, but GZIP not configured!\n" +#~ msgstr "Flujo de datos GZIP encontrado, pero GZIP no está configurado!\n" + +#~ msgid "Write error in Win32 Block Decomposition on %s: %s\n" +#~ msgstr "Error de escritura en Bloque de Descomposición Win32 en %s: %s\n" + +#, fuzzy +#~ msgid "Wrong write size error at byte=%lld block=%d wanted=%d wrote=%d\n" +#~ msgstr "Error de escritura en Bloque de Descomposición Win32 en %s: %s\n" + +#, fuzzy +#~ msgid "" +#~ "Write error at byte=%lld block=%d write_len=%d lerror=%d on %s: ERR=%s\n" +#~ msgstr "Error de escritura en Bloque de Descomposición Win32 en %s: %s\n" + +#~ msgid "Decryption error\n" +#~ msgstr "Error de descifrado\n" + +#~ msgid "Logic error: output file should be open\n" +#~ msgstr "Error lógico: el archivo de salida debe estar abierto\n" + +#~ msgid "Logic error: output file should not be open\n" +#~ msgstr "Error lógica: archivo de salida no debe estar abierto\n" + +#~ msgid "Decryption error. buf_len=%d decrypt_len=%d on file %s\n" +#~ msgstr "Error de descifrado. buf_len=%d decrypt_len=%d en el archivo %s\n" + +#~ msgid "Missing cryptographic signature for %s\n" +#~ msgstr "Falta la firma criptográfica para %s\n" + +#~ msgid "Signature validation failed for file %s: ERR=%s\n" +#~ msgstr "Validación de la firma fallida para el archivo %s: ERR=%s\n" + +#~ msgid "Digest one file failed for file: %s\n" +#~ msgstr "Digest de un archivo fallido para el archivo: %s\n" + +#~ msgid "Signature validation failed for %s: %s\n" +#~ msgstr "Validación de la firma fallida para %s: %s\n" + +#, fuzzy +#~ msgid "Daemon started %s. Jobs: run=%d running=%d.\n" +#~ msgstr "Demonio iniciado %s, %d Job ejecutando desde el inicio.\n" + +#, fuzzy +#~ msgid "" +#~ " Sizes: boffset_t=%d size_t=%d debug=%s trace=%d mode=%d,%d bwlimit=%skB/" +#~ "s\n" +#~ msgstr "Sizeof: boffset_t=%d size_t=%d debug=%d trace=%d\n" + +#, fuzzy +#~ msgid "Director connected %sat: %s\n" +#~ msgstr "Director conectado en: %s\n" + +#~ msgid "JobId %d Job %s is running.\n" +#~ msgstr "JobId %d Job %s se está ejecutando.\n" + +#, fuzzy +#~ msgid " %s %s Job started: %s\n" +#~ msgstr " %s%s Job iniciado: %s\n" + +#, fuzzy +#~ msgid "" +#~ " Files=%s Bytes=%s AveBytes/sec=%s LastBytes/sec=%s Errors=%d\n" +#~ " Bwlimit=%s ReadBytes=%s\n" +#~ msgstr "Archivos=%s Bytes=%s Bytes/sec=%s Errores=%d\n" + +#, fuzzy +#~ msgid " Files: Examined=%s Backed up=%s\n" +#~ msgstr "Archivos examinados=%s\n" + +#~ msgid " Processing file: %s\n" +#~ msgstr "Procesando archivo: %s\n" + +#~ msgid " SDSocket closed.\n" +#~ msgstr "SDSocket cerrado.\n" + +#~ msgid "====\n" +#~ msgstr "====\n" + +#~ msgid "Bad .status command: %s\n" +#~ msgstr "Comando .status malo: %s\n" + +#~ msgid "2900 Bad .status command, missing argument.\n" +#~ msgstr "2900 Malo comando .status, falta argumento.\n" + +#~ msgid "2900 Bad .status command, wrong argument.\n" +#~ msgstr "2900 Malo comando .status, malo argumento.\n" + +#~ msgid "Cannot malloc %d network read buffer\n" +#~ msgstr "No se pudo leer búfer de red malloc %d\n" + +#~ msgid " Could not access %s: ERR=%s\n" +#~ msgstr "No es posible acceder %s: ERR=%s\n" + +#~ msgid " Could not follow link %s: ERR=%s\n" +#~ msgstr " No es posible seguir el enlace %s: ERR=%s\n" + +#~ msgid " Could not stat %s: ERR=%s\n" +#~ msgstr "No se pudo stat %s: ERR=%s\n" + +#~ msgid " Archive file skipped: %s\n" +#~ msgstr "Archivo Archive omitido: %s\n" + +#~ msgid " Recursion turned off. Directory skipped: %s\n" +#~ msgstr "Recursión apagado. Directorio omitido: %s\n" + +#~ msgid " File system change prohibited. Directory skipped: %s\n" +#~ msgstr "" +#~ "Prohibido el cambio de sistema de archivos. Directorio omitido: %s\n" + +#~ msgid " Could not open directory %s: ERR=%s\n" +#~ msgstr "No se pudo abrir el directorio %s: ERR=%s\n" + +#~ msgid " Unknown file type %d: %s\n" +#~ msgstr "Tipo de archivo desconocido %d: %s\n" + +#~ msgid "Network error in send to Director: ERR=%s\n" +#~ msgstr "Error de red en enviar al Director: ERR=%s\n" + +#~ msgid " Cannot open %s: ERR=%s.\n" +#~ msgstr "No se puede abrir %s: ERR=%s.\n" + +#~ msgid " Cannot open resource fork for %s: ERR=%s.\n" +#~ msgstr "No se puede abrir recursos fork para %s: ERR=%s.\n" + +#~ msgid "Error reading file %s: ERR=%s\n" +#~ msgstr "Error leyendo archivo %s: ERR=%s\n" + +#, fuzzy +#~ msgid " st_size differs on \"%s\". Vol: %s File: %s\n" +#~ msgstr "st_size diferentes. Cat: %s Archivo: %s\n" + +#~ msgid "Storage command not issued before Verify.\n" +#~ msgstr "Comando Storage no publicado antes de Verificar.\n" + +#, fuzzy +#~ msgid " %s differs on \"%s\". File=%s Vol=%s\n" +#~ msgstr "%s es diferente. Archivo=%s Cat=%s\n" + +#, fuzzy +#~ msgid "Verification of encrypted file data is not supported.\n" +#~ msgstr "flujo %s no suportado en este Cliente.\n" + +#, fuzzy +#~ msgid "Unable to stat file \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir el archivo \"%s\": ERR=%s\n" + +#~ msgid "" +#~ "Can't restore ACLs of %s - incompatible acl stream encountered - %d\n" +#~ msgstr "" +#~ "No puede restaurar ACL de %s - incompatible flujo acl encontrado - %d\n" + +#~ msgid "" +#~ "Can't restore Extended Attributes of %s - incompatible xattr stream " +#~ "encountered - %d\n" +#~ msgstr "" +#~ "No se puede restaurar los atributos extendidos de %s - incompatible flujo " +#~ "xattr encontrado - %d\n" + +#~ msgid "Failed to serialize extended attributes on file \"%s\"\n" +#~ msgstr "" +#~ "No se pudo serializar los atributos extendidos en el archivo \"%s\"\n" + +#, fuzzy +#~ msgid "Illegal empty xattr attribute name\n" +#~ msgstr "Carácter ilegal en el nombre del Volumen \"%s\"\n" + +#~ msgid "Illegal xattr stream, no XATTR_MAGIC on file \"%s\"\n" +#~ msgstr "Flujo xattr ilegal, no hay XATTR_MAGIC en el archivo \"%s\"\n" + +#, fuzzy +#~ msgid "Illegal xattr stream, xattr name length <= 0 on file \"%s\"\n" +#~ msgstr "" +#~ "Ilegal flujo xattr, fallo al analizar flujo xattr en el archivo \"%s\"\n" + +#, fuzzy +#~ msgid "pioctl VIOCGETAL error on file \"%s\": ERR=%s\n" +#~ msgstr "acl(SETACL) error en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "pioctl VIOCSETAL error on file \"%s\": ERR=%s\n" +#~ msgstr "acl(SETACL) error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "pathconf error on file \"%s\": ERR=%s\n" +#~ msgstr "error de pathconf en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Trying to restore acl on file \"%s\" on filesystem without %s acl " +#~ "support\n" +#~ msgstr "" +#~ "Tratando de restaurar acl en el archivo \"%s\" en sistema de ficheros sin " +#~ "soporte a acl\n" + +#~ msgid "Failed to convert %d into namespace on file \"%s\"\n" +#~ msgstr "No se pudo convertir %d al namespace en el archivo \"%s\"\n" + +#~ msgid "acl_to_text error on file \"%s\": ERR=%s\n" +#~ msgstr "acl_to_text error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "acl_get_file error on file \"%s\": ERR=%s\n" +#~ msgstr "error de acl_get_file en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "acl_delete_def_file error on file \"%s\": filesystem doesn't support " +#~ "ACLs\n" +#~ msgstr "acl_delete_def_file error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "acl_delete_def_file error on file \"%s\": ERR=%s\n" +#~ msgstr "acl_delete_def_file error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "acl_from_text error on file \"%s\": ERR=%s\n" +#~ msgstr "acl_from_text error en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "acl_set_file error on file \"%s\": filesystem doesn't support ACLs\n" +#~ msgstr "acl_set_file error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "acl_set_file error on file \"%s\": ERR=%s\n" +#~ msgstr "acl_set_file error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "extattr_list_link error on file \"%s\": ERR=%s\n" +#~ msgstr "error de extattr_list_link en el archivo \"%s\": ERR=%s\n" + +#~ msgid "extattr_get_link error on file \"%s\": ERR=%s\n" +#~ msgstr "error de extattr_get_link en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Failed to split %s into namespace and name part on file \"%s\"\n" +#~ msgstr "" +#~ "No se ha podido separar %s en el namespace y parte del nombre en el " +#~ "archivo \"%s\"\n" + +#~ msgid "Failed to convert %s into namespace on file \"%s\"\n" +#~ msgstr "No se pudo convertir %s al namespace en el archivo \"%s\"\n" + +#~ msgid "extattr_set_link error on file \"%s\": ERR=%s\n" +#~ msgstr "error de extattr_set_link en el archivo \"%s\": ERR=%s\n" + +#~ msgid "acl_valid error on file \"%s\": ERR=%s\n" +#~ msgstr "error de acl_valid en el archivo \"%s\": ERR=%s\n" + +#~ msgid "llistxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "error de llistxattr en el archivo \"%s\": ERR=%s\n" + +#~ msgid "lgetxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "error de lgetxattr en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "setxattr error on file \"%s\": filesystem doesn't support XATTR\n" +#~ msgstr "" +#~ "Tratando de restaurar acl en el archivo \"%s\" en sistema de ficheros sin " +#~ "soporte a acl\n" + +#, fuzzy +#~ msgid "setxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "error de lsetxattr en el archivo \"%s\": ERR=%s\n" + +#~ msgid "" +#~ "Trying to restore acl on file \"%s\" on filesystem without acl support\n" +#~ msgstr "" +#~ "Tratando de restaurar acl en el archivo \"%s\" en sistema de ficheros sin " +#~ "soporte a acl\n" + +#, fuzzy +#~ msgid "" +#~ "Trying to restore POSIX acl on file \"%s\" on filesystem without aclent " +#~ "acl support\n" +#~ msgstr "" +#~ "Tratando de restaurar acl en el archivo \"%s\" en sistema de ficheros sin " +#~ "soporte a acl aclent\n" + +#, fuzzy +#~ msgid "" +#~ "Trying to restore NFSv4 acl on file \"%s\" on filesystem without ace acl " +#~ "support\n" +#~ msgstr "" +#~ "Tratando de restaurar acl en el archivo \"%s\" en sistema de ficheros sin " +#~ "soporte a acl ace\n" + +#, fuzzy +#~ msgid "Unable to get status on xattr \"%s\" on file \"%s\": ERR=%s\n" +#~ msgstr "" +#~ "No se puede obtener estado en xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unsupported extended attribute type: %i for \"%s\" on file \"%s\"\n" +#~ msgstr "Fallo al restablecer atributos extendidos en el archivo \"%s\"\n" + +#, fuzzy +#~ msgid "Failed to send extended attribute \"%s\" on file \"%s\"\n" +#~ msgstr "Fallo al restablecer atributos extendidos en el archivo \"%s\"\n" + +#~ msgid "Failed to restore extensible attributes on file \"%s\"\n" +#~ msgstr "Fallo al restablecer atributos extensible en el archivo \"%s\"\n" + +#~ msgid "Failed to restore extended attributes on file \"%s\"\n" +#~ msgstr "Fallo al restablecer atributos extendidos en el archivo \"%s\"\n" + +#~ msgid "acl_get error on file \"%s\": ERR=%s\n" +#~ msgstr "error de acl_get en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get xattr acl on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir xattr espacio en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to get acl on xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede obtener acl en xattr %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to get acl text on xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "" +#~ "No se puede obtener acl texto en xattr %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to convert acl from text on file \"%s\"\n" +#~ msgstr "No se puede convertir acl de texto en el archivo \"%s\"\n" + +#~ msgid "Unable to restore acl of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "" +#~ "No se puede restablecer acl de xattr %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "acl_fromtext error on file \"%s\": ERR=%s\n" +#~ msgstr "error de acl_fromtext en el archivo \"%s\": ERR=%s\n" + +#~ msgid "wrong encoding of acl type in acl stream on file \"%s\"\n" +#~ msgstr "" +#~ "codificación errónea del tipo de ACL en el flujo de ACL en el archivo \"%s" +#~ "\" \n" + +#~ msgid "acl_set error on file \"%s\": ERR=%s\n" +#~ msgstr "error de acl_set en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open xattr on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to list the xattr on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede listar el xattr espacio en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to close xattr list on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to stat xattr \"%s\" on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir xattr %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to open file \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to link xattr %s to %s on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede link xattr %s para %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open attribute \"%s\" at file \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir xattr %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to restore data of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "" +#~ "No se puede restaurar los datos de xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unsupported xattr type %s on file \"%s\"\n" +#~ msgstr "No es posible leer el contenido de xattr %s en el archivo \"%s\"\n" + +#~ msgid "Unable to restore owner of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "" +#~ "No es posible restablecer el propietario de xattr %s en el archivo \"%s" +#~ "\": ERR=%s\n" + +#~ msgid "Unable to restore filetimes of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "" +#~ "No es posible restablecer filetimes de xattr %s en el archivo \"%s\": ERR=" +#~ "%s\n" + +#~ msgid "Unable to set file owner %s: ERR=%s\n" +#~ msgstr "No se pudo establecer propietario del archivo %s: ERR=%s\n" + +#~ msgid "Unable to set file modes %s: ERR=%s\n" +#~ msgstr "No se pudo establecer modos del archivo %s: ERR=%s\n" + +#~ msgid "Unable to set file times %s: ERR=%s\n" +#~ msgstr "No se pudo establecer tiempos del archivo %s: ERR=%s\n" + +#~ msgid "" +#~ "File size of restored file %s not correct. Original %s, restored %s.\n" +#~ msgstr "" +#~ "Tamaño del archivo %s restaurado no es correcto. Original %s, restaurado " +#~ "%s.\n" + +#~ msgid "Unable to set file flags %s: ERR=%s\n" +#~ msgstr "No es posible establecer banderas en archivo %s: ERR=%s\n" + +#~ msgid "Unix attributes" +#~ msgstr "Atributos Unix" + +#~ msgid "File data" +#~ msgstr "Archivo de datos" + +#~ msgid "MD5 digest" +#~ msgstr "Sumario MD5" + +#~ msgid "GZIP data" +#~ msgstr "GZIP datos" + +#, fuzzy +#~ msgid "Compressed data" +#~ msgstr "Datos dispersos" + +#~ msgid "Extended attributes" +#~ msgstr "Atributos extendidos" + +#~ msgid "Sparse data" +#~ msgstr "Datos dispersos" + +#~ msgid "GZIP sparse data" +#~ msgstr "Datos GZIP dispersos" + +#, fuzzy +#~ msgid "Compressed sparse data" +#~ msgstr "Datos GZIP dispersos" + +#~ msgid "Program names" +#~ msgstr "Programa de nombres" + +#~ msgid "Program data" +#~ msgstr "Datos de programa" + +#~ msgid "SHA1 digest" +#~ msgstr "Sumario SHA1" + +#~ msgid "Win32 data" +#~ msgstr "Win32 datos" + +#~ msgid "Win32 GZIP data" +#~ msgstr "Win32 GZIP datos" + +#, fuzzy +#~ msgid "Win32 compressed data" +#~ msgstr "Win32 datos" + +#~ msgid "MacOS Fork data" +#~ msgstr "Datos rama MacOS" + +#~ msgid "HFS+ attribs" +#~ msgstr "HFS+ attribs" + +#~ msgid "Standard Unix ACL attribs" +#~ msgstr "ACL estándar de Unix attribs" + +#~ msgid "Default Unix ACL attribs" +#~ msgstr "ACL por defecto de Unix attribs" + +#~ msgid "SHA256 digest" +#~ msgstr "Sumario SHA256" + +#~ msgid "SHA512 digest" +#~ msgstr "Sumario SAH512" + +#~ msgid "Signed digest" +#~ msgstr "Sumario Firmado" + +#~ msgid "Encrypted File data" +#~ msgstr "Cifrado de Archivo de datos" + +#~ msgid "Encrypted Win32 data" +#~ msgstr "Cifrado de datos Win32" + +#~ msgid "Encrypted session data" +#~ msgstr "Cifrado de datos de sesiones " + +#~ msgid "Encrypted GZIP data" +#~ msgstr "Cifrado de datos GZIP" + +#, fuzzy +#~ msgid "Encrypted compressed data" +#~ msgstr "Cifrado de datos de sesiones " + +#~ msgid "Encrypted Win32 GZIP data" +#~ msgstr "Cifrado de datos Win32 GZIP" + +#, fuzzy +#~ msgid "Encrypted Win32 Compressed data" +#~ msgstr "Cifrado de datos Win32" + +#~ msgid "Encrypted MacOS fork data" +#~ msgstr "Datos encriptados rama MacOS" + +#, fuzzy +#~ msgid "Plugin Name" +#~ msgstr "Opciones de Plug-in" + +#, fuzzy +#~ msgid "Plugin Data" +#~ msgstr "Opciones de Plug-in" + +#, fuzzy +#~ msgid "Restore Object" +#~ msgstr "Restauración OK" + +#, fuzzy +#~ msgid "AIX ACL attribs" +#~ msgstr "Atributos ACL específicos de AIX" + +#, fuzzy +#~ msgid "Darwin ACL attribs" +#~ msgstr "Atributos ACL específicos de Darwin" + +#, fuzzy +#~ msgid "FreeBSD Default ACL attribs" +#~ msgstr "Atributos ACL por defecto específicos de FreeBSD" + +#, fuzzy +#~ msgid "FreeBSD Access ACL attribs" +#~ msgstr "Atributos ACL de acceso específicos de FreeBSD" + +#, fuzzy +#~ msgid "HPUX ACL attribs" +#~ msgstr "Atributos ACL específicos de HPUX" + +#, fuzzy +#~ msgid "Irix Default ACL attribs" +#~ msgstr "Atributos ACL por defecto específicos de Irix" + +#, fuzzy +#~ msgid "Irix Access ACL attribs" +#~ msgstr "Atributos ACL de acceso específicos de Irix" + +#, fuzzy +#~ msgid "Linux Default ACL attribs" +#~ msgstr "Atributos ACL por defecto específicos de Linux" + +#, fuzzy +#~ msgid "Linux Access ACL attribs" +#~ msgstr "Atributos ACL de acceso específicos de Linux" + +#, fuzzy +#~ msgid "TRU64 Default ACL attribs" +#~ msgstr "Atributos ACL por defecto específicos de Irix" + +#, fuzzy +#~ msgid "TRU64 Access ACL attribs" +#~ msgstr "Atributos ACL de acceso específicos de Irix" + +#, fuzzy +#~ msgid "Solaris POSIX ACL attribs" +#~ msgstr "Atributos ACL específicos de Solaris" + +#, fuzzy +#~ msgid "Solaris NFSv4/ZFS ACL attribs" +#~ msgstr "Atributos ACL específicos de Solaris" + +#, fuzzy +#~ msgid "AFS ACL attribs" +#~ msgstr "Atributos ACL específicos de AIX" + +#, fuzzy +#~ msgid "AIX POSIX ACL attribs" +#~ msgstr "Atributos ACL específicos de AIX" + +#, fuzzy +#~ msgid "AIX NFSv4 ACL attribs" +#~ msgstr "Atributos ACL específicos de AIX" + +#, fuzzy +#~ msgid "FreeBSD NFSv4/ZFS ACL attribs" +#~ msgstr "Atributos ACL de acceso específicos de FreeBSD" + +#, fuzzy +#~ msgid "GNU Hurd Default ACL attribs" +#~ msgstr "Atributos ACL por defecto específicos de Irix" + +#, fuzzy +#~ msgid "GNU Hurd Access ACL attribs" +#~ msgstr "Atributos ACL de acceso específicos de Irix" + +#, fuzzy +#~ msgid "GNU Hurd Extended attribs" +#~ msgstr "Atributos Extendidos Específicos de Linux" + +#, fuzzy +#~ msgid "IRIX Extended attribs" +#~ msgstr "Atributos Extendidos Específicos de Linux" + +#, fuzzy +#~ msgid "TRU64 Extended attribs" +#~ msgstr "Atributos Extendidos Específicos de Linux" + +#, fuzzy +#~ msgid "AIX Extended attribs" +#~ msgstr "Atributos extendidos" + +#, fuzzy +#~ msgid "OpenBSD Extended attribs" +#~ msgstr "Atributos Extendidos específicos de OpenBSD" + +#, fuzzy +#~ msgid "Solaris Extensible attribs or System Extended attribs" +#~ msgstr "" +#~ "Atributos extensible específicos de Solaris o atributos de Sistema de " +#~ "Extensión" + +#, fuzzy +#~ msgid "Solaris Extended attribs" +#~ msgstr "Atributos Extendidos Específicos de Solaris" + +#, fuzzy +#~ msgid "Darwin Extended attribs" +#~ msgstr "Atributos Extendidos Específicos de Darwin" + +#, fuzzy +#~ msgid "FreeBSD Extended attribs" +#~ msgstr "Atributos Extendidos Específicos de FreeBSD" + +#, fuzzy +#~ msgid "Linux Extended attribs" +#~ msgstr "Atributos Extendidos Específicos de Linux" + +#, fuzzy +#~ msgid "NetBSD Extended attribs" +#~ msgstr "Atributos Extendidos Específicos de NetBSD" + +#~ msgid "File skipped. Not newer: %s\n" +#~ msgstr "Archivo omitido. No más reciente: %s\n" + +#~ msgid "File skipped. Not older: %s\n" +#~ msgstr "Archivo omitido. No más viejo: %s\n" + +#~ msgid "File skipped. Already exists: %s\n" +#~ msgstr "Archivo omitido. Ya existe: %s\n" + +#~ msgid "File %s already exists and could not be replaced. ERR=%s.\n" +#~ msgstr "El archivo %s ya existe y no puede ser reemplazado. ERR=%s.\n" + +#~ msgid "bpkt already open fid=%d\n" +#~ msgstr "bpkt ya abierto fid=%d\n" + +#~ msgid "Cannot make fifo %s: ERR=%s\n" +#~ msgstr "No puede crear fifo %s: ERR=%s\n" + +#~ msgid "Cannot make node %s: ERR=%s\n" +#~ msgstr "No puede crear nodo %s: ERR=%s\n" + +#~ msgid "Could not symlink %s -> %s: ERR=%s\n" +#~ msgstr "No se pudo crear enlace simbólico %s -> %s: ERR=%s\n" + +#~ msgid "Could not restore file flags for file %s: ERR=%s\n" +#~ msgstr "" +#~ "No se pudo restaurar el archivo de banderas para el archivo% s: ERR=%s\n" + +#~ msgid "Could not hard link %s -> %s: ERR=%s\n" +#~ msgstr "No se pudo crear enlace duro %s -> %s: ERR=%s\n" + +#~ msgid "Could not reset file flags for file %s: ERR=%s\n" +#~ msgstr "" +#~ "No se pudo restablecer el archivo de banderas para el archivo %s: ERR=%s\n" + +#~ msgid "Original file %s have been deleted: type=%d\n" +#~ msgstr "Archivo %s original se han eliminado: tipo=%d\n" + +#~ msgid "Original file %s not saved: type=%d\n" +#~ msgstr "Archivo original %s no guardado: tipo=%d\n" + +#~ msgid "Unknown file type %d; not restored: %s\n" +#~ msgstr "Tipo de archivo desconocido %d; no restaurado: %s\n" + +#~ msgid "Zero length filename: %s\n" +#~ msgstr "Nombre de archivo con longitud cero: %s\n" + +#, fuzzy +#~ msgid "Plugin: \"%s\" not found.\n" +#~ msgstr "Plugin=%s no encontrado.\n" + +#~ msgid " NODUMP flag set - will not process %s\n" +#~ msgstr "Indicador NODUMP establecido - no procesará %s\n" + +#~ msgid "Cannot stat file %s: ERR=%s\n" +#~ msgstr "No puede stat archivo %s: ERR=%s\n" + +#~ msgid "%s mtime changed during backup.\n" +#~ msgstr "%s mtime cambiado durante la copia de seguridad.\n" + +#~ msgid "%s ctime changed during backup.\n" +#~ msgstr "%s ctime cambiado durante la copia de seguridad.\n" + +#, fuzzy +#~ msgid "%s size of %lld changed during backup to %lld.n" +#~ msgstr "%s tamaño cambiado durante la copia de seguridad.\n" + +#~ msgid "Top level directory \"%s\" has unlisted fstype \"%s\"\n" +#~ msgstr "" +#~ "Directorio de nivel superior \"%s\" tiene fstype \"%s\" no listado\n" + +#~ msgid "Top level directory \"%s\" has an unlisted drive type \"%s\"\n" +#~ msgstr "" +#~ "Directorio de nivel superior \"%s\" tiene un tipo de unidad \"%s\" no " +#~ "listado\n" + +#~ msgid "Cannot create directory %s: ERR=%s\n" +#~ msgstr "No se puede crear directorio %s: ERR=%s\n" + +#~ msgid "%s exists but is not a directory.\n" +#~ msgstr "%s existe pero no es un directorio.\n" + +#~ msgid "Cannot change owner and/or group of %s: ERR=%s\n" +#~ msgstr "No se puede cambiar el propietario y/o grupo de %s: ERR=%s\n" + +#~ msgid "Cannot change permissions of %s: ERR=%s\n" +#~ msgstr "No puede cambiar los permisos de %s: ERR=%s\n" + +#~ msgid "Too many subdirectories. Some permissions not reset.\n" +#~ msgstr "Demasiados subdirectorios. Algunos permisos no se restablece.\n" + +#~ msgid "Cannot open current directory: ERR=%s\n" +#~ msgstr "No se puede abrir el directorio actual: ERR=%s\n" + +#~ msgid "Cannot get current directory: ERR=%s\n" +#~ msgstr "No se puede obtener el directorio actual: ERR=%s\n" + +#~ msgid "Cannot reset current directory: ERR=%s\n" +#~ msgstr "No se puede restablecer el directorio actual: ERR=%s\n" + +#~ msgid "Only ipv4 and ipv6 are supported (%d)\n" +#~ msgstr "Solo ipv4 y ipv6 estan soportado (%d)\n" + +#~ msgid "Only ipv4 is supported (%d)\n" +#~ msgstr "Solo ipv4 esta soportado (%d)\n" + +#, fuzzy +#~ msgid "You tried to assign a ipv6 address to an ipv4(%d)\n" +#~ msgstr "Se trató de asignar una dirección IPv4 a IPv6(%d)\n" + +#, fuzzy +#~ msgid "You tried to assign an ipv4 address to an ipv6(%d)\n" +#~ msgstr "Se trató de asignar una dirección IPv4 a IPv6(%d)\n" + +#, fuzzy +#~ msgid "Can't add default IPv4 address (%s)\n" +#~ msgstr "No se puede agregar dirección por defecto (%s)\n" + +#, fuzzy +#~ msgid "" +#~ "Old style addresses cannot be mixed with new style. Try removing Port=nnn." +#~ msgstr "" +#~ "el viejo estilo de las direcciones no se pueden mezclar con el nuevo " +#~ "estilo" + +#, fuzzy +#~ msgid "Cannot resolve service(%s)" +#~ msgstr "no se puede resolver el servicio(%s)" + +#, fuzzy +#~ msgid "Cannot resolve hostname(%s) %s" +#~ msgstr "no se puede resolver el hostname(%s) %s" + +#, fuzzy +#~ msgid "Expected a block to begin with { but got: %s" +#~ msgstr "Esperaba un inicio de bloque {, obtuvo: %s" + +#~ msgid "Empty addr block is not allowed" +#~ msgstr "Bloque de direcciones vacío no está permitido" + +#, fuzzy +#~ msgid "Expected a string but got: %s" +#~ msgstr "Esperaba una cadena, obtuvo: %s" + +#, fuzzy +#~ msgid "Expected a string [ip|ipv4|ipv6] but got: %s" +#~ msgstr "Espera una cadena [ip|ipv4|ipv6], obtuvo: %s" + +#, fuzzy +#~ msgid "Expected a string [ip|ipv4] but got: %s" +#~ msgstr "Espera una cadena [ip|ipv4], obtuvo: %s" + +#, fuzzy +#~ msgid "Expected an equal = but got: %s" +#~ msgstr "Esperaba una igual =, obtuvo: %s" + +#, fuzzy +#~ msgid "Expected an identifier [addr|port] but got: %s" +#~ msgstr "Esperaba un identificador [addr|port], obtuvo: %s" + +#~ msgid "Only one port per address block" +#~ msgstr "Sólo un puerto por bloque de direcciones" + +#~ msgid "Only one addr per address block" +#~ msgstr "Sólo una dirección por bloque de direcciones" + +#, fuzzy +#~ msgid "Expected a identifier [addr|port] but got: %s" +#~ msgstr "Esperaba un identificador [addr|port], obtuvo: %s" + +#~ msgid "Expected a equal =, got: %s" +#~ msgstr "Esperaba una igual =, obtuvo: %s" + +#, fuzzy +#~ msgid "Expected a number or a string but got: %s" +#~ msgstr "Esperaba un numero o una cadena , obtuvo: %s" + +#, fuzzy +#~ msgid "Expected an IP number or a hostname but got: %s" +#~ msgstr "Esperaba un numero IP o un hostname, obtuvo: %s" + +#, fuzzy +#~ msgid "State machine mismatch" +#~ msgstr "Estado de la máquina desajustado" + +#, fuzzy +#~ msgid "Expected a end of block with } but got: %s" +#~ msgstr "Esperaba un fin de bloque }, obtuvo: %s" + +#, fuzzy +#~ msgid "Cannot add hostname(%s) and port(%s) to addrlist (%s)" +#~ msgstr "" +#~ "No se puede agregar el nombre de maquina(%s) y puerto(%s) a addrlist(%s)" + +#, fuzzy +#~ msgid "Expected an end of block with } but got: %s" +#~ msgstr "Esperaba un fin de bloque }, obtuvo: %s" + +#~ msgid "Expected an IP number or a hostname, got: %s" +#~ msgstr "Esperaba un numero IP o un hostname, obtuvo: %s" + +#, fuzzy +#~ msgid "Cannot add port (%s) to (%s)" +#~ msgstr "no puede agregar el puerto (%s) a (%s)" + +#~ msgid "Expected a port number or string, got: %s" +#~ msgstr "Esperaba un numero de puerto o cadena, obtuvo: %s" + +#~ msgid "Error scanning attributes: %s\n" +#~ msgstr "Error escaneando atributos: %s\n" + +#~ msgid "Child exited normally." +#~ msgstr "Hijo salió normalmente." + +#~ msgid "Unknown error during program execvp" +#~ msgstr "Error desconocido durante el programa execvp" + +#~ msgid "Child exited with code %d" +#~ msgstr "Hijo salió con el código %d" + +#~ msgid "Child died from signal %d: %s" +#~ msgstr "Niño muerto con la señal %d: %s" + +#~ msgid "Invalid errno. No error message possible." +#~ msgstr "Invalido errorno. Imposible mensaje de error." + +#~ msgid "Status OK\n" +#~ msgstr "Estado OK\n" + +#~ msgid "bget_msg: unknown signal %d\n" +#~ msgstr "bget_msg: señal desconocida %d\n" + +#, fuzzy +#~ msgid "Attr spool write error. wrote=%d wanted=%d bytes. ERR=%s\n" +#~ msgstr "Error de escritura en cola de atributo. ERR=%s\n" + +#~ msgid "TLS connection initialization failed.\n" +#~ msgstr "Ha fallado la conexión de inicialización TLS.\n" + +#~ msgid "TLS Negotiation failed.\n" +#~ msgstr "Negociación TLS fallida.\n" + +#~ msgid "" +#~ "TLS certificate verification failed. Peer certificate did not match a " +#~ "required commonName\n" +#~ msgstr "" +#~ "Comprobación de certificados TLS fallido. Certificado equivalente no " +#~ "corresponde con el commonName requerido\n" + +#~ msgid "" +#~ "TLS host certificate verification failed. Host name \"%s\" did not match " +#~ "presented certificate\n" +#~ msgstr "" +#~ "Fallo en verificación en certificado TLS de la maquina. Nombre de la " +#~ "maquina \"%s\" no coincide con el certificado presentado\n" + +#~ msgid "TLS enabled but not configured.\n" +#~ msgstr "TLS activado, pero no configurado.\n" + +#~ msgid "TLS enable but not configured.\n" +#~ msgstr "TLS permitido, pero no configurado.\n" + +#~ msgid "No problem." +#~ msgstr "No hay problema." + +#~ msgid "Authoritative answer for host not found." +#~ msgstr "Respuesta autoritativa para el host no encontrado." + +#~ msgid "Non-authoritative for host not found, or ServerFail." +#~ msgstr "Non-autoritativa para la maquina no encontrada, o ServerFail." + +#~ msgid "Non-recoverable errors, FORMERR, REFUSED, or NOTIMP." +#~ msgstr "Errores no recuperables, FORMERR, RECHAZADO o NOTIMP." + +#~ msgid "Valid name, no data record of resquested type." +#~ msgstr "Nombre válido, ningún registro de datos del tipo solicitado." + +#~ msgid "Unknown error." +#~ msgstr "Error desconocido." + +#~ msgid "Unknown sig %d" +#~ msgstr "Sig desconocido %d" + +#~ msgid "Cannot open stream socket. ERR=%s. Current %s All %s\n" +#~ msgstr "No se puede abrir el socket de flujo. ERR=%s. Actual %s Todos %s\n" + +#~ msgid "Cannot set SO_REUSEADDR on socket: %s\n" +#~ msgstr "No se puede establecer SO_REUSEADDR en el socket: %s\n" + +#~ msgid "Cannot bind port %d: ERR=%s: Retrying ...\n" +#~ msgstr "No se puede enlazar a puerto %d: ERR=%s: Reintentando ...\n" + +#~ msgid "Cannot bind port %d: ERR=%s.\n" +#~ msgstr "No se puede enlazar a puerto %d: ERR=%s.\n" + +#, fuzzy +#~ msgid "No addr/port found to listen on.\n" +#~ msgstr "Ninguno %s encontrado para %s.\n" + +#~ msgid "Could not init client queue: ERR=%s\n" +#~ msgstr "No se pudo iniciar cola cliente: ERR=%s\n" + +#~ msgid "Error in select: %s\n" +#~ msgstr "Error al seleccionar: %s\n" + +#~ msgid "Connection from %s:%d refused by hosts.access\n" +#~ msgstr "Conexión desde %s:%d rechazada por hosts.access\n" + +#~ msgid "Cannot set SO_KEEPALIVE on socket: %s\n" +#~ msgstr "No se puede establecer SO_KEEPALIVE en el socket: %s\n" + +#~ msgid "Could not create client BSOCK.\n" +#~ msgstr "No es posible crear cliente BSOCK. \n" + +#~ msgid "Could not add job to client queue: ERR=%s\n" +#~ msgstr "No se pudo agregar job a la cola de cliente: ERR=%s\n" + +#~ msgid "Could not destroy client queue: ERR=%s\n" +#~ msgstr "No es posible destruir la cola de cliente: ERR=%s\n" + +#~ msgid "Program killed by Bacula (timeout)\n" +#~ msgstr "Programa finalizado(killed) por Bacula (timeout) \n" + +#~ msgid "" +#~ "Could not connect to %s on %s:%d. ERR=%s\n" +#~ "Retrying ...\n" +#~ msgstr "" +#~ "No se pudo conectar a %s en %s:%d. ERR=%s\n" +#~ "Reintentando ...\n" + +#~ msgid "Unable to connect to %s on %s:%d. ERR=%s\n" +#~ msgstr "No se puede conectar a %s en %s:%d. ERR=%s\n" + +#~ msgid "gethostbyname() for host \"%s\" failed: ERR=%s\n" +#~ msgstr "fallo gethostbyname() para la maquina \"%s\": ERR=%s\n" + +#~ msgid "Socket open error. proto=%d port=%d. ERR=%s\n" +#~ msgstr "Error al abrir socket. proto=%d puerto=%d. ERR=%s\n" + +#~ msgid "Source address bind error. proto=%d. ERR=%s\n" +#~ msgstr "Error al enlazar dirección de origen. proto=%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot set TCP_KEEPIDLE on socket: %s\n" +#~ msgstr "No se puede establecer SO_KEEPIDLE en el socket: %s\n" + +#, fuzzy +#~ msgid "Could not init bsock read mutex. ERR=%s\n" +#~ msgstr "No se pudo iniciar bsock mutex. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock write mutex. ERR=%s\n" +#~ msgstr "No se pudo iniciar bsock mutex. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock attribute mutex. ERR=%s\n" +#~ msgstr "No se pudo iniciar bsock mutex. ERR=%s\n" + +#, fuzzy +#~ msgid "Socket is closed\n" +#~ msgstr "SDSocket cerrado.\n" + +#~ msgid "Write error sending %d bytes to %s:%s:%d: ERR=%s\n" +#~ msgstr "Error de escritura enviando %d bytes para %s:%s:%d: ERR=%s\n" + +#~ msgid "Wrote %d bytes to %s:%s:%d, but only %d accepted.\n" +#~ msgstr "Escribió %d bytes para %s:%s:%d, pero solo %d aceptado.\n" + +#~ msgid "Read expected %d got %d from %s:%s:%d\n" +#~ msgstr "Esperaba leer %d obtuvo %d desde %s:%s:%d\n" + +#, fuzzy +#~ msgid "" +#~ "Packet size=%d too big from \"%s:%s:%d\". Maximum permitted 1000000. " +#~ "Terminating connection.\n" +#~ msgstr "" +#~ "Tamaño de paquete muy grande de \"%s:%s:%d. Conexión de terminación.\n" + +#~ msgid "Read error from %s:%s:%d: ERR=%s\n" +#~ msgstr "Error de lectura desde %s:%s:%d: ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool error. Wanted=%d got=%d bytes.\n" +#~ msgstr "Error fread attr spool. ERR=%s\n" + +#~ msgid "fread attr spool I/O error.\n" +#~ msgstr "Error fread attr spool I/O.\n" + +#~ msgid "Could not malloc BSOCK data buffer\n" +#~ msgstr "No se pudo malloc datos BSOCK buffer.\n" + +#~ msgid "sockopt error: %s\n" +#~ msgstr "error de sockopt: %s\n" + +#~ msgid "Warning network buffer = %d bytes not max size.\n" +#~ msgstr "Alerta, búfer de red=%d bytes no tamaño máximo.\n" + +#~ msgid "fcntl F_GETFL error. ERR=%s\n" +#~ msgstr "error F_GETFL fcntl. ERR=%s\n" + +#~ msgid "fcntl F_SETFL error. ERR=%s\n" +#~ msgstr "error F_SETFL fcntl. ERR=%s\n" + +#, fuzzy +#~ msgid "Director authorization error at \"%s:%d\"\n" +#~ msgstr "Problema de autorización de Director en \"%s:%d\"\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error: Remote server at \"%s:%d\" did not advertise " +#~ "required TLS support.\n" +#~ msgstr "" +#~ "Problema de autorización: El servidor remoto en \"%s:%d\" no anuncio " +#~ "soporte TLS requiere.\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error with Director at \"%s:%d\": Remote server requires " +#~ "TLS.\n" +#~ msgstr "" +#~ "Problema de autorización con el Director en \"%s:%d\": El servidor remoto " +#~ "requiere TLS.\n" + +#, fuzzy +#~ msgid "" +#~ "Bad errmsg to Hello command: ERR=%s\n" +#~ "The Director at \"%s:%d\" may not be running.\n" +#~ msgstr "" +#~ "Mala respuesta al comando Hello: ERR=%s\n" +#~ "El director en \"%s:%d\" probablemente no esta corriendo.\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error with Director at \"%s:%d\"\n" +#~ "Most likely the passwords do not agree.\n" +#~ "If you are using TLS, there may have been a certificate validation error " +#~ "during the TLS handshake.\n" +#~ "For help, please see: " +#~ msgstr "" +#~ "Problema de autorización con el Director en \"%s:%d\"\n" +#~ "Lo mas probable es que las contraseñas no están de acuerdo.\n" +#~ "Si está usando TLS, puede haber habido un error de validación de " +#~ "certificados durante la negociación TLS.\n" +#~ " Por favor vea http://www.bacula.org/en/rel-manual/" +#~ "Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000 para ayuda.\n" + +#, fuzzy +#~ msgid "safe_unlink could not compile regex pattern \"%s\" ERR=%s\n" +#~ msgstr "No se ha podido compilar patrón regex \"%s\" ERR=%s\n" + +#~ msgid "Out of memory: ERR=%s\n" +#~ msgstr "Fuera de memoria: ERR=%s\n" + +#~ msgid "Buffer overflow.\n" +#~ msgstr "Desbordamiento de búfer.\n" + +#~ msgid "Bad errno" +#~ msgstr "Malo errno" + +#~ msgid "Memset for %d bytes at %s:%d\n" +#~ msgstr "Memset para %d bytes en %s:%d\n" + +#, fuzzy +#~ msgid "Cannot open %s file. %s ERR=%s\n" +#~ msgstr "No se puede abrir el archivo pid. %s ERR=%s\n" + +#~ msgid "" +#~ "%s is already running. pid=%d\n" +#~ "Check file %s\n" +#~ msgstr "" +#~ "%s ya está en ejecución. pid=%d\n" +#~ "Compruebe el archivo %s\n" + +#, fuzzy +#~ msgid "Could not open %s file. %s ERR=%s\n" +#~ msgstr "No se pudo abrir el archivo pid. %s ERR=%s\n" + +#, fuzzy +#~ msgid "%s is already running. pid=%d, check file %s\n" +#~ msgstr "" +#~ "%s ya está en ejecución. pid=%d\n" +#~ "Compruebe el archivo %s\n" + +#, fuzzy +#~ msgid "Cannot lock %s file. %s ERR=%s\n" +#~ msgstr "No se puede abrir el archivo pid. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot not open %s file. %s ERR=%s\n" +#~ msgstr "No se puede abrir el archivo pid. %s ERR=%s\n" + +#~ msgid "Could not create state file. %s ERR=%s\n" +#~ msgstr "No es posible crear el archivo de estado. %s ERR=%s\n" + +#~ msgid "Write final hdr error: ERR=%s\n" +#~ msgstr "Error de escritura HDR final: ERR=%s\n" + +#~ msgid "stop_btimer called with NULL btimer_id\n" +#~ msgstr "stop_btimer llamado con NULL btimer_id\n" + +#~ msgid "1999 Authorization failed.\n" +#~ msgstr "1999 Fallo de Autorización.\n" + +#~ msgid "Unable to open certificate file" +#~ msgstr "No se puede abrir el archivo de certificado" + +#~ msgid "Unable to read certificate from file" +#~ msgstr "No es posible leer el archivo de certificado" + +#~ msgid "Unable to extract public key from certificate" +#~ msgstr "No se puede extraer la clave pública del certificado" + +#~ msgid "" +#~ "Provided certificate does not include the required subjectKeyIdentifier " +#~ "extension." +#~ msgstr "" +#~ "Certificado suministrado no incluye la extensión subjectKeyIdentifier " +#~ "necesaria." + +#~ msgid "Unsupported key type provided: %d\n" +#~ msgstr "Tipo de clave prevista no soportada: %d\n" + +#~ msgid "Unable to open private key file" +#~ msgstr "No se puede abrir archivo de clave privada" + +#~ msgid "Unable to read private key from file" +#~ msgstr "No se puede leer la clave privada del archivo" + +#~ msgid "Unsupported digest type: %d\n" +#~ msgstr "Tipo Digest no soportado: %d\n" + +#~ msgid "OpenSSL digest initialization failed" +#~ msgstr "Fallo al inicializar resume OpenSSL" + +#~ msgid "OpenSSL digest update failed" +#~ msgstr "Fallo al actualizar resume OpenSSL" + +#~ msgid "OpenSSL digest finalize failed" +#~ msgstr "Fallo al finalizar resume OpenSSL" + +#~ msgid "OpenSSL digest_new failed" +#~ msgstr "fallo digest_new OpenSSL" + +#~ msgid "OpenSSL sign get digest failed" +#~ msgstr "Fallo OpenSSL al obtener firma digest " + +#~ msgid "OpenSSL digest Verify final failed" +#~ msgstr "Fallo OpenSSL Verificación final digest " + +#~ msgid "No signers found for crypto verify.\n" +#~ msgstr "Firmantes no encontrados para verificar el cifrado.\n" + +#~ msgid "Signature creation failed" +#~ msgstr "Fallo en creación de firma" + +#~ msgid "Signature decoding failed" +#~ msgstr "Fallo en decodificación de firma" + +#~ msgid "Unsupported cipher type specified\n" +#~ msgstr "Tipo de cifrado especificados no soportado\n" + +#~ msgid "CryptoData decoding failed" +#~ msgstr "decodificación CryptoData fallida" + +#~ msgid "Failure decrypting the session key" +#~ msgstr "Error al descifrar la clave de sesión" + +#~ msgid "Unsupported contentEncryptionAlgorithm: %d\n" +#~ msgstr "ContentEncryptionAlgorithm no soportado: %d\n" + +#~ msgid "OpenSSL cipher context initialization failed" +#~ msgstr "Fallo al inicializar contexto cipher OpenSSl" + +#~ msgid "Encryption session provided an invalid symmetric key" +#~ msgstr "Cifrado de sesión suministro una clave simétrica inválida" + +#~ msgid "Encryption session provided an invalid IV" +#~ msgstr "Cifrado de sesión suministro una IV inválida" + +#~ msgid "OpenSSL cipher context key/IV initialization failed" +#~ msgstr "Fallido inicialización de contexto OpenSSL de cifrado de clave/IV " + +#~ msgid "Unsupported digest type=%d specified\n" +#~ msgstr "Incompatible resume tipo=%d especificado\n" + +#~ msgid "SHA1Update() returned an error: %d\n" +#~ msgstr "SHA1Update() retorno un error: %d\n" + +#~ msgid "No error" +#~ msgstr "Ningún error" + +#~ msgid "Signer not found" +#~ msgstr "Firmante no encontrado" + +#~ msgid "Recipient not found" +#~ msgstr "Recipiente no encontrado" + +#~ msgid "Unsupported digest algorithm" +#~ msgstr "Algoritmo de resumen no soportado" + +#~ msgid "Unsupported encryption algorithm" +#~ msgstr "Algoritmo de cifrado no soportado" + +#~ msgid "Signature is invalid" +#~ msgstr "La firma no es válida" + +#~ msgid "Decryption error" +#~ msgstr "Error de Descifrado" + +#~ msgid "Internal error" +#~ msgstr "Error interno" + +#~ msgid "Unknown error" +#~ msgstr "Error desconocido" + +#~ msgid "Cannot fork to become daemon: ERR=%s\n" +#~ msgstr "No se puede fork para convertirse en demonio: ERR =%s\n" + +#, fuzzy +#~ msgid "writeunlock called too many times.\n" +#~ msgstr "rwl_writeunlock llamado muchas veces.\n" + +#, fuzzy +#~ msgid "writeunlock by non-owner.\n" +#~ msgstr "rwl_writeunlock por no-propietario.\n" + +#~ msgid "Thread %d found unchanged elements %d times\n" +#~ msgstr "Hilo %d encontró elementos sin cambios %d veces\n" + +#~ msgid "%02d: interval %d, writes %d, reads %d\n" +#~ msgstr "%02d: intervalo %d, escritos %d, leídos %d\n" + +#~ msgid "data %02d: value %d, %d writes\n" +#~ msgstr "datos %02d: valor %d, %d escritos\n" + +#~ msgid "Total: %d thread writes, %d data writes\n" +#~ msgstr "Total: %d hilos escritos, %d datos escritos\n" + +#~ msgid "Try write lock" +#~ msgstr "Intente escribir bloqueo" + +#~ msgid "Try read lock" +#~ msgstr "Intente leer bloqueo" + +#~ msgid "Create thread" +#~ msgstr "Crear hilo" + +#~ msgid "Join thread" +#~ msgstr "Unir hilo" + +#~ msgid "%02d: interval %d, updates %d, r_collisions %d, w_collisions %d\n" +#~ msgstr "" +#~ "%02d: intervalo %d, actualizados %d, r_collisions %d, w_collisions %d\n" + +#~ msgid "data %02d: value %d, %d updates\n" +#~ msgstr "datos %02d: valor %d, %d actualizados\n" + +#, fuzzy +#~ msgid "Empty name not allowed.\n" +#~ msgstr "Bloque de direcciones vacío no está permitido" + +#~ msgid "Illegal character \"%c\" in name.\n" +#~ msgstr "Carácter ilegal \"%c\" en el nombre.\n" + +#~ msgid "Name too long.\n" +#~ msgstr "Nombre demasiado largo.\n" + +#, fuzzy +#~ msgid "" +#~ "Config file error: %s\n" +#~ " : Line %d, col %d of file %s\n" +#~ "%s\n" +#~ msgstr "" +#~ "Error de configuración: %s\n" +#~ " : línea %d, columna %d en el archivo %s\n" +#~ "%s\n" +#~ "%s" + +#, fuzzy +#~ msgid "Cannot open config file %s: %s\n" +#~ msgstr "No se pudo abrir archivo de configuración \"%s\": %s\n" + +#, fuzzy +#~ msgid "Cannot open lex\n" +#~ msgstr "No se pudo abrir %s\n" + +#~ msgid "Backup" +#~ msgstr "Backup" + +#~ msgid "Verifying" +#~ msgstr "Verificando" + +#~ msgid "Restoring" +#~ msgstr "Restaurando" + +#~ msgid "Archiving" +#~ msgstr "Archivando" + +#~ msgid "Copying" +#~ msgstr "Copiando" + +#~ msgid "Migration" +#~ msgstr "Migración" + +#~ msgid "Scanning" +#~ msgstr "Escaneando" + +#~ msgid "Unknown operation" +#~ msgstr "Operación desconocida" + +#~ msgid "backup" +#~ msgstr "backup" + +#~ msgid "verified" +#~ msgstr "verificado" + +#~ msgid "verify" +#~ msgstr "verificar" + +#~ msgid "restored" +#~ msgstr "restaurado" + +#~ msgid "restore" +#~ msgstr "restaurar" + +#~ msgid "archived" +#~ msgstr "archivado" + +#~ msgid "archive" +#~ msgstr "archivo" + +#~ msgid "copied" +#~ msgstr "copiado" + +#~ msgid "copy" +#~ msgstr "copia" + +#~ msgid "migrated" +#~ msgstr "migrado" + +#~ msgid "migrate" +#~ msgstr "migrar" + +#~ msgid "scanned" +#~ msgstr "escaneado" + +#~ msgid "scan" +#~ msgstr "escanear" + +#~ msgid "unknown action" +#~ msgstr "acción desconocida" + +#~ msgid "pthread_once failed. ERR=%s\n" +#~ msgstr "fallo pthread_once. ERR=%s\n" + +#~ msgid "Could not init msg_queue mutex. ERR=%s\n" +#~ msgstr "No se pudo iniciar msg_queue mutex. ERR=%s\n" + +#~ msgid "NULL jcr.\n" +#~ msgstr "NULL jcr.\n" + +#~ msgid "" +#~ "Watchdog sending kill after %d secs to thread stalled reading Storage " +#~ "daemon.\n" +#~ msgstr "" +#~ "Watchdog envío kill después de %d segundos para hilo estancado leyendo " +#~ "demonio Storage.\n" + +#~ msgid "" +#~ "Watchdog sending kill after %d secs to thread stalled reading File " +#~ "daemon.\n" +#~ msgstr "" +#~ "Watchdog envío kill después de %d segundos para hilo estancado leyendo " +#~ "demonio File.\n" + +#~ msgid "" +#~ "Watchdog sending kill after %d secs to thread stalled reading Director.\n" +#~ msgstr "" +#~ "Watchdog envío kill después de %d segundos para hilo estancado leyendo " +#~ "demonio Director.\n" + +#~ msgid "Problem probably begins at line %d.\n" +#~ msgstr "Probablemente comienza el problema en la línea %d\n" + +#~ msgid "" +#~ "Config error: %s\n" +#~ " : line %d, col %d of file %s\n" +#~ "%s\n" +#~ "%s" +#~ msgstr "" +#~ "Error de configuración: %s\n" +#~ " : línea %d, columna %d en el archivo %s\n" +#~ "%s\n" +#~ "%s" + +#~ msgid "Config error: %s\n" +#~ msgstr "Error de configuración: %s\n" + +#~ msgid "Close of NULL file\n" +#~ msgstr "Cierre de archivo NULL\n" + +#~ msgid "" +#~ "get_char: called after EOF. You may have a open double quote without the " +#~ "closing double quote.\n" +#~ msgstr "" +#~ "get_char: llamado después de EOF. Usted puede tener una comilla doble " +#~ "abierta sin el cierre de comillas dobles.\n" + +#~ msgid "Config token too long, file: %s, line %d, begins at line %d\n" +#~ msgstr "" +#~ "Configuración de señal demasiado largo, archivo: %s, línea %d, se inicia " +#~ "en la línea %d\n" + +#~ msgid "none" +#~ msgstr "ninguno" + +#~ msgid "comment" +#~ msgstr "comentario" + +#~ msgid "number" +#~ msgstr "numero" + +#~ msgid "ip_addr" +#~ msgstr "ip_addr" + +#~ msgid "identifier" +#~ msgstr "identificar" + +#~ msgid "string" +#~ msgstr "cadena" + +#~ msgid "quoted_string" +#~ msgstr "quoted_string" + +#~ msgid "include" +#~ msgstr "incluir" + +#~ msgid "include_quoted_string" +#~ msgstr "include_quoted_string" + +#~ msgid "UTF-8 Byte Order Mark" +#~ msgstr "Marca de Orden de UTF-8 Byte" + +#~ msgid "UTF-16le Byte Order Mark" +#~ msgstr "Marca de Orden de UTF-16le Byte" + +#~ msgid "expected a positive integer number, got: %s" +#~ msgstr "esperaba un numero entero positivo, obtuvo: %s" + +#~ msgid "" +#~ "This config file appears to be in an unsupported Unicode format " +#~ "(UTF-16be). Please resave as UTF-8\n" +#~ msgstr "" +#~ "Este archivo de configuración parece estar en un formato no compatible " +#~ "con Unicode (UTF-16be). Por favor, vuelva a guardar como UTF-8\n" + +#~ msgid "Cannot open included config file %s: %s\n" +#~ msgstr "No se puede abrir el archivo de configuración incluido %s: %s\n" + +#~ msgid "expected an integer or a range, got %s: %s" +#~ msgstr "esperaba un numero entero o un rango, obtuvo %s: %s" + +#~ msgid "expected an integer number, got %s: %s" +#~ msgstr "esperaba un numero entero, obtuvo %s: %s" + +#~ msgid "expected a name, got %s: %s" +#~ msgstr "esperaba un nombre, obtuvo %s: %s" + +#~ msgid "name %s length %d too long, max is %d\n" +#~ msgstr "nombre %s longitud %d demasiado largo, el máximo es %d\n" + +#~ msgid "expected a string, got %s: %s" +#~ msgstr "esperaba una cadena, obtuvo %s: %s" + +#~ msgid "Mutex lock failure. ERR=%s\n" +#~ msgstr "Error bloqueando Mutex. ERR=%s\n" + +#~ msgid "Mutex unlock failure. ERR=%s\n" +#~ msgstr "Error desbloqueando Mutex. ERR=%s\n" + +#~ msgid "pthread_create failed: ERR=%s\n" +#~ msgstr "pthread_create fallido: ERR=%s\n" + +#~ msgid "MemPool index %d larger than max %d\n" +#~ msgstr "MemPool índice %d mayor que máximo %d\n" + +#~ msgid "Out of memory requesting %d bytes\n" +#~ msgstr "Fuera de memoria solicitando %d bytes\n" + +#~ msgid "obuf is NULL\n" +#~ msgstr "obuf es NULL\n" + +#~ msgid "Could not open console message file %s: ERR=%s\n" +#~ msgstr "No puede abrir el archivo de mensajes de la consola %s: ERR=%s\n" + +#~ msgid "Could not get con mutex: ERR=%s\n" +#~ msgstr "No se pudo obtener con mutex: ERR=%s\n" + +#~ msgid "Bacula Message" +#~ msgstr "Bacula Message" + +#~ msgid "open mail pipe %s failed: ERR=%s\n" +#~ msgstr "Fallo al abrir tubería de correo %s: ERR=%s\n" + +#~ msgid "open mail pipe failed.\n" +#~ msgstr "Fallo al abrir tubería de correo .\n" + +#~ msgid "close error: ERR=%s\n" +#~ msgstr "Error al cerrar: ERR=%s\n" + +#~ msgid "Mail prog: %s" +#~ msgstr "Programa de Correo: %s" + +#~ msgid "" +#~ "Mail program terminated in error.\n" +#~ "CMD=%s\n" +#~ "ERR=%s\n" +#~ msgstr "" +#~ "Programa de correo terminado en error.\n" +#~ "CMD=%s\n" +#~ "ERR=%s\n" + +#~ msgid "fopen %s failed: ERR=%s\n" +#~ msgstr "fopen %s ha fallado: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Msg delivery error: Operator mail program terminated in error.\n" +#~ "CMD=%s\n" +#~ "ERR=%s\n" +#~ msgstr "" +#~ "Operador de programa de correo terminado en error.\n" +#~ "CMD=%s\n" +#~ "ERR=%s\n" + +#, fuzzy +#~ msgid "Msg delivery error: fopen %s failed: ERR=%s\n" +#~ msgstr "fopen %s ha fallado: ERR=%s\n" + +#~ msgid "%s: ABORTING due to ERROR in %s:%d\n" +#~ msgstr "%s: ABORTADO debido a un ERROR en %s:%d\n" + +#~ msgid "%s: ERROR TERMINATION at %s:%d\n" +#~ msgstr "%s: ERROR DE TERMINACIÓN en %s:%d\n" + +#~ msgid "%s: Fatal Error because: " +#~ msgstr "%s: Error Fatal porque: " + +#~ msgid "%s: Fatal Error at %s:%d because:\n" +#~ msgstr "%s: Error Fatal en %s:%d porque:\n" + +#~ msgid "%s: ERROR: " +#~ msgstr "%s: ERROR: " + +#~ msgid "%s: ERROR in %s:%d " +#~ msgstr "%s: ERROR en %s:%d " + +#~ msgid "%s: Warning: " +#~ msgstr "%s: Advertencia: " + +#~ msgid "%s: Security violation: " +#~ msgstr "%s: Violación de seguridad: " + +#~ msgid "%s ABORTING due to ERROR\n" +#~ msgstr "%s: ABORTADO debido a un ERROR\n" + +#~ msgid "%s ERROR TERMINATION\n" +#~ msgstr "%s: ERROR DE TERMINACIÓN\n" + +#~ msgid "%s JobId %u: Fatal error: " +#~ msgstr "%s JobId %u: Error Fatal: " + +#~ msgid "%s JobId %u: Error: " +#~ msgstr "%s JobId %u: Error: " + +#~ msgid "%s JobId %u: Warning: " +#~ msgstr "%s JobId %u: Advertencia: " + +#~ msgid "%s JobId %u: Security violation: " +#~ msgstr "%s JobId %u: Violación de seguridad: " + +#, fuzzy +#~ msgid "Debug lock information" +#~ msgstr "Instrucción ilegal" + +#, fuzzy +#~ msgid "Debug network information" +#~ msgstr "Instrucción ilegal" + +#, fuzzy +#~ msgid "Debug plugin information" +#~ msgstr "Instrucción ilegal" + +#, fuzzy +#~ msgid "Debug volume information" +#~ msgstr "Instrucción ilegal" + +#, fuzzy +#~ msgid "Debug memory allocation" +#~ msgstr "Instrucción ilegal" + +#, fuzzy +#~ msgid "Debug scheduler information" +#~ msgstr "Instrucción ilegal" + +#, fuzzy +#~ msgid "Debug protocol information" +#~ msgstr "Instrucción ilegal" + +#, fuzzy +#~ msgid "Debug all information" +#~ msgstr "Instrucción ilegal" + +#~ msgid "Unable to init mutex: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#~ msgid "Unable to destroy mutex: ERR=%s\n" +#~ msgstr "No se puede destruir mutex: ERR=%s\n" + +#~ msgid "Unable to init OpenSSL threading: ERR=%s\n" +#~ msgstr "No se puede inicializar hilo OpenSSL: ERR=%s\n" + +#~ msgid "Failed to seed OpenSSL PRNG\n" +#~ msgstr "Fallo al seed OpenSSL PRNG\n" + +#~ msgid "Failed to save OpenSSL PRNG\n" +#~ msgstr "Fallo al guardar OpenSSL PRNG\n" + +#~ msgid "***UNKNOWN***" +#~ msgstr "***DESCONOCIDO***" + +#~ msgid "" +#~ "Attempt to define second \"%s\" resource named \"%s\" is not permitted.\n" +#~ msgstr "" +#~ "Intento para definir secundo recurso \"%s\" denominado \"%s\" no está " +#~ "permitido.\n" + +#, fuzzy +#~ msgid "Inserted res: %s index=%d\n" +#~ msgstr "Insertando %s res: %s index=%d pass=%d\n" + +#~ msgid "expected an =, got: %s" +#~ msgstr "esperaba un =, obtuvo: %s" + +#~ msgid "Unknown item code: %d\n" +#~ msgstr "Código de ítem desconocido: %d\n" + +#~ msgid "message type: %s not found" +#~ msgstr "tipo de mensaje: %s no encontrado" + +#, fuzzy +#~ msgid "" +#~ "Attempt to redefine \"%s\" from \"%s\" to \"%s\" referenced on line %d : " +#~ "%s\n" +#~ msgstr "" +#~ "Intento de redefinir recurso \"%s\" referenciado en la línea %d: %s\n" + +#~ msgid "Could not find config Resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "No pudo encontrar recurso de configuración \"%s\" referenciado en linea " +#~ "%d : %s\n" + +#~ msgid "Attempt to redefine resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Intento de redefinir recurso \"%s\" referenciado en la línea %d: %s\n" + +#~ msgid "Too many %s directives. Max. is %d. line %d: %s\n" +#~ msgstr "Demasiadas directivas %s. Máximo es %d. linea %d: %s\n" + +#~ msgid "Missing config Resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Falta la configuración de Recurso \"%s\" referenciado en linea %d : %s\n" + +#~ msgid "expected a size number, got: %s" +#~ msgstr "esperaba un numero de tamaño, obtuvo: %s" + +#, fuzzy +#~ msgid "expected a speed number, got: %s" +#~ msgstr "esperaba un numero de tamaño, obtuvo: %s" + +#, fuzzy +#~ msgid "expected a %s, got: %s" +#~ msgstr "esperaba un tamaño, obtuvo: %s" + +#~ msgid "Expected a Tape Label keyword, got: %s" +#~ msgstr "esperaba una etiqueta Tape Label , obtuvo: %s" + +#~ msgid "Unable to initialize resource lock. ERR=%s\n" +#~ msgstr "No se puede inicializar recurso de bloqueo. ERR=%s\n" + +#~ msgid "Config filename too long.\n" +#~ msgstr "Nombre de archivo de configuración muy largo.\n" + +#~ msgid "Cannot open config file \"%s\": %s\n" +#~ msgstr "No se pudo abrir archivo de configuración \"%s\": %s\n" + +#~ msgid "" +#~ "Currently we cannot handle UTF-16 source files. Please convert the conf " +#~ "file to UTF-8\n" +#~ msgstr "" +#~ "Actualmente no podemos manejar archivos de origen UTF-16 . Por favor, " +#~ "convierta el archivo de configuración a UTF-8\n" + +#~ msgid "Expected a Resource name identifier, got: %s" +#~ msgstr "esperaba un nombre de identificación de Recurso, obtuvo: %s" + +#~ msgid "expected resource name, got: %s" +#~ msgstr "esperaba un nombre de recurso, obtuvo: %s" + +#~ msgid "not in resource definition: %s" +#~ msgstr "no en la definición de recurso: %s" + +#~ msgid "" +#~ "Keyword \"%s\" not permitted in this resource.\n" +#~ "Perhaps you left the trailing brace off of the previous resource." +#~ msgstr "" +#~ "Palabra clave \"%s\" no permitida en este recurso.\n" +#~ "Tal vez a la izquierda de la llave de los recursos anteriores." + +#~ msgid "Name not specified for resource" +#~ msgstr "Nombre no especificado para el recurso" + +#~ msgid "unexpected token %d %s in resource definition" +#~ msgstr "símbolo %d no soporte %s en la definición de los recursos" + +#~ msgid "Unknown parser state %d\n" +#~ msgstr "Estado del Analizador %d desconocido\n" + +#~ msgid "End of conf file reached with unclosed resource." +#~ msgstr "Fin de archivo de configuración alcanzado con recursos sin cerrar." + +#~ msgid "Failed to open Plugin directory %s: ERR=%s\n" +#~ msgstr "No se pudo abrir el directorio de Plugin %s: ERR=%s\n" + +#~ msgid "Failed to find any plugins in %s\n" +#~ msgstr "No se pudo encontrar ningún plugin en %s\n" + +#, fuzzy +#~ msgid "dlopen plugin %s failed: ERR=%s\n" +#~ msgstr "fopen %s ha fallado: ERR=%s\n" + +#~ msgid "Lookup of loadPlugin in plugin %s failed: ERR=%s\n" +#~ msgstr "Fallo buscando por loadPlugin en plugin %s: ERR=%s\n" + +#~ msgid "Lookup of unloadPlugin in plugin %s failed: ERR=%s\n" +#~ msgstr "Fallo buscando por unloadPlugin en plugin %s: ERR=%s\n" + +#~ msgid "Could not find userid=%s: ERR=%s\n" +#~ msgstr "No se pudo encontrar userid=%s: ERR=%s\n" + +#~ msgid "Could not find password entry. ERR=%s\n" +#~ msgstr "No se pudo encontrar contraseña de entrada. ERR=%s\n" + +#~ msgid "Could not find group=%s: ERR=%s\n" +#~ msgstr "No se pudo encontrar grupo=%s: ERR=%s\n" + +#~ msgid "Could not initgroups for group=%s, userid=%s: ERR=%s\n" +#~ msgstr "Podría no initgroups para grupo=%s, userid=%s: ERR=%s\n" + +#~ msgid "Could not initgroups for userid=%s: ERR=%s\n" +#~ msgstr "Podría no initgroups para userid=%s: ERR=%s\n" + +#~ msgid "Could not set group=%s: ERR=%s\n" +#~ msgstr "No se pudo establecer grupo=%s: ERR=%s\n" + +#~ msgid "prctl failed: ERR=%s\n" +#~ msgstr "prctl fallido: ERR=%s\n" + +#~ msgid "setreuid failed: ERR=%s\n" +#~ msgstr "setreuid fallido: ERR=%s\n" + +#~ msgid "cap_from_text failed: ERR=%s\n" +#~ msgstr "cap_from_text fallido: ERR=%s\n" + +#~ msgid "cap_set_proc failed: ERR=%s\n" +#~ msgstr "cap_set_proc fallido: ERR=%s\n" + +#~ msgid "Keep readall caps not implemented this OS or missing libraries.\n" +#~ msgstr "" +#~ "seguir readall caps no implementado en este SO o faltan bibliotecas.\n" + +#~ msgid "Could not set specified userid: %s\n" +#~ msgstr "No se pudo establecer userid especificado: %s\n" + +#~ msgid "rwl_writelock failure at %s:%d: ERR=%s\n" +#~ msgstr "Fallo rwl_writelock en %s:%d: ERR=%s\n" + +#~ msgid "rwl_writeunlock failure at %s:%d:. ERR=%s\n" +#~ msgstr "Fallo \trwl_writeunlock en %s:%d: ERR=%s\n" + +#~ msgid "%s: run %s \"%s\"\n" +#~ msgstr "%s: ejecutar %s \"%s\"\n" + +#~ msgid "Runscript: %s could not execute. ERR=%s\n" +#~ msgstr "Runscript: No pudo ejecutar %s. ERR=%s\n" + +#~ msgid "%s: %s\n" +#~ msgstr "%s: %s\n" + +#~ msgid "Runscript: %s returned non-zero status=%d. ERR=%s\n" +#~ msgstr "Runscript: %s devolvió estado=%d distinto de cero. ERR=%s\n" + +#~ msgid "rwl_writeunlock called too many times.\n" +#~ msgstr "rwl_writeunlock llamado muchas veces.\n" + +#~ msgid "rwl_writeunlock by non-owner.\n" +#~ msgstr "rwl_writeunlock por no-propietario.\n" + +#, fuzzy +#~ msgid "Negative numbers not permitted.\n" +#~ msgstr "Números negativos no permitidos.\n" + +#, fuzzy +#~ msgid "User cancel requested.\n" +#~ msgstr "Restauración cancelada.\n" + +#, fuzzy +#~ msgid "Selection items must be be greater than zero.\n" +#~ msgstr "Los valores deben ser ser mayor que cero.\n" + +#~ msgid "Invalid signal number" +#~ msgstr "Número de la señal no válida" + +#~ msgid "Bacula interrupted by signal %d: %s\n" +#~ msgstr "Bacula interrumpido por señal %d: %s\n" + +#, fuzzy +#~ msgid "Kaboom! %s, %s got signal %d - %s at %s. Attempting traceback.\n" +#~ msgstr "Kaboom! %s, %s obtuvo la señal %d - %s. Intentando rastreo.\n" + +#~ msgid "Kaboom! exepath=%s\n" +#~ msgstr "Kaboom! exepath=%s\n" + +#~ msgid "Fork error: ERR=%s\n" +#~ msgstr "Fork error: ERR=%s\n" + +#~ msgid "Calling: %s %s %s %s\n" +#~ msgstr "Llamando: %s %s %s %s\n" + +#~ msgid "execv: %s failed: ERR=%s\n" +#~ msgstr "execv: %s ha fallado: ERR=%s\n" + +#, fuzzy +#~ msgid "It looks like the traceback worked...\n" +#~ msgstr "Parece que el rastreo trabaja ...\n" + +#~ msgid "BA_NSIG too small (%d) should be (%d)\n" +#~ msgstr "BA_NSIG demasiado pequeño (%d) debe ser (%d)\n" + +#~ msgid "UNKNOWN SIGNAL" +#~ msgstr "SEÑAL DESCONOCIDA" + +#~ msgid "Hangup" +#~ msgstr "Colgar" + +#~ msgid "Interrupt" +#~ msgstr "Interrumpir" + +#~ msgid "Quit" +#~ msgstr "Salir" + +#~ msgid "Illegal instruction" +#~ msgstr "Instrucción ilegal" + +#~ msgid "Trace/Breakpoint trap" +#~ msgstr "Trace/Trampa de Punto de Interrupción" + +#~ msgid "Abort" +#~ msgstr "Anular" + +#~ msgid "EMT instruction (Emulation Trap)" +#~ msgstr "Instrucción EMT (Emulación de Trampa)" + +#~ msgid "IOT trap" +#~ msgstr "trampa IOT" + +#~ msgid "BUS error" +#~ msgstr "BUS error" + +#~ msgid "Floating-point exception" +#~ msgstr "Excepción de punto flotante" + +#~ msgid "Kill, unblockable" +#~ msgstr "Matar, imbloqueable" + +#~ msgid "User-defined signal 1" +#~ msgstr "Señal 1 definida por el usuario" + +#~ msgid "Segmentation violation" +#~ msgstr "Violación de segmento" + +#~ msgid "User-defined signal 2" +#~ msgstr "Señal definida por el usuario 2" + +#~ msgid "Broken pipe" +#~ msgstr "Tuberí­a rota" + +#~ msgid "Alarm clock" +#~ msgstr "Alarma" + +#~ msgid "Termination" +#~ msgstr "Terminación" + +#~ msgid "Stack fault" +#~ msgstr "Error de pila" + +#~ msgid "Child status has changed" +#~ msgstr "El estado de hijo ha cambiado" + +#~ msgid "Continue" +#~ msgstr "Continue" + +#~ msgid "Stop, unblockable" +#~ msgstr "Detener, imbloqueable" + +#~ msgid "Keyboard stop" +#~ msgstr "Teclado detenido" + +#~ msgid "Background read from tty" +#~ msgstr "Leer en segundo plan desde tty" + +#~ msgid "Background write to tty" +#~ msgstr "Escribir en segundo plan al tty" + +#~ msgid "Urgent condition on socket" +#~ msgstr "Condición de urgencia en el socket" + +#~ msgid "CPU limit exceeded" +#~ msgstr "Limite de CPU superado" + +#~ msgid "File size limit exceeded" +#~ msgstr "Superado el límite de tamaño de archivo" + +#~ msgid "Virtual alarm clock" +#~ msgstr "Despertador virtual" + +#~ msgid "Profiling alarm clock" +#~ msgstr "Perfiles de alarma" + +#~ msgid "Window size change" +#~ msgstr "Cambiar el tamaño de la ventana" + +#~ msgid "I/O now possible" +#~ msgstr "I/O posible ahora" + +#~ msgid "Power failure restart" +#~ msgstr "Fallo reiniciar energía" + +#~ msgid "No runnable lwp" +#~ msgstr "LWP no ejecutable" + +#~ msgid "SIGLWP special signal used by thread library" +#~ msgstr "La señal especial SIGLWP utilizado por la biblioteca del hilo" + +#~ msgid "Checkpoint Freeze" +#~ msgstr "Checkpoint de Congelación" + +#~ msgid "Checkpoint Thaw" +#~ msgstr "Checkpoint de Thaw" + +#~ msgid "Thread Cancellation" +#~ msgstr "Cancelación de Hilo" + +#~ msgid "Resource Lost (e.g. record-lock lost)" +#~ msgstr "Recurso Perdido (por ejemplo, registro de bloqueo perdido)" + +#~ msgid "Out of memory\n" +#~ msgstr "Fuera de memoria\n" + +#~ msgid "Too much memory used." +#~ msgstr "Demasiada memoria utilizada." + +#~ msgid "Attempt to free NULL called from %s:%d\n" +#~ msgstr "Intento para liberar NULL llamado desde %s:%d\n" + +#, fuzzy +#~ msgid "in-use bit not set: double free from %s:%d\n" +#~ msgstr "doble libre desde %s:%d\n" + +#~ msgid "qp->qnext->qprev != qp called from %s:%d\n" +#~ msgstr "qp->qnext->qprev != qp llamado desde %s:%d\n" + +#~ msgid "qp->qprev->qnext != qp called from %s:%d\n" +#~ msgstr "qp->qprev->qnext != qp llamado desde %s:%d\n" + +#~ msgid "sm_realloc size: %d\n" +#~ msgstr "sm_realloc tamaño: %d\n" + +#~ msgid "sm_realloc %d at %p from %s:%d\n" +#~ msgstr "sm_realloc %d en %p desde %s:%d\n" + +#~ msgid "" +#~ "\n" +#~ "Orphaned buffers exist. Dump terminated following\n" +#~ " discovery of bad links in chain of orphaned buffers.\n" +#~ " Buffer address with bad links: %p\n" +#~ msgstr "" +#~ "\n" +#~ "Buffers huérfanos existen. Volcado terminado tras el\n" +#~ "descubrimiento de malos vínculos en la cadena de buffers huérfanos.\n" +#~ " Dirección del buffer con enlaces malos: %p\n" + +#~ msgid "Damaged buffer found. Called from %s:%d\n" +#~ msgstr "Búfer dañado encontrado. Llamado desde %s:%d\n" + +#~ msgid "" +#~ "\n" +#~ "Damaged buffers found at %s:%d\n" +#~ msgstr "" +#~ "\n" +#~ "Búfers dañado encontrado en %s:%d\n" + +#~ msgid " discovery of bad prev link.\n" +#~ msgstr "descubrimiento de un malo vínculo anterior.\n" + +#~ msgid " discovery of bad next link.\n" +#~ msgstr "descubrimiento del siguiente malo enlace malo.\n" + +#~ msgid " discovery of data overrun.\n" +#~ msgstr "descubrimiento de datos de rebosamiento.\n" + +#~ msgid " NULL pointer.\n" +#~ msgstr "Puntero NULL.\n" + +#~ msgid " Buffer address: %p\n" +#~ msgstr "Dirección de buffer : %p\n" + +#~ msgid "Damaged buffer: %6u bytes allocated at line %d of %s %s\n" +#~ msgstr "Búfer dañados: %6u bytes asignados en la línea %d de %s %s\n" + +#, fuzzy +#~ msgid "%6d\t%-7s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +#~ msgstr "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" + +#~ msgid "" +#~ "Error with certificate at depth: %d, issuer = %s, subject = %s, ERR=%d:" +#~ "%s\n" +#~ msgstr "" +#~ "Error con el certificado en profundidad: %d, emisor=%s, asunto=%s, ERR=%d:" +#~ "%s\n" + +#~ msgid "Error initializing SSL context" +#~ msgstr "Error inicializando contexto SSL" + +#~ msgid "Error loading certificate verification stores" +#~ msgstr "Error al cargar los almacenes de verificación del certificado" + +#~ msgid "" +#~ "Either a certificate file or a directory must be specified as a " +#~ "verification store\n" +#~ msgstr "" +#~ "Un certificado de archivo o un directorio debe especificarse como un " +#~ "almacenes de verificación\n" + +#~ msgid "Error loading certificate file" +#~ msgstr "Error cargando archivos de certificados" + +#~ msgid "Error loading private key" +#~ msgstr "Error cargando llaves privadas" + +#~ msgid "Unable to open DH parameters file" +#~ msgstr "No se puede abrir el archivo de parámetros de DH" + +#~ msgid "Unable to load DH parameters from specified file" +#~ msgstr "" +#~ "No es posible cargar los parámetros de DH desde el archivo especificado" + +#~ msgid "Failed to set TLS Diffie-Hellman parameters" +#~ msgstr "No se pudo establecer parámetros TLS de Diffie-Hellman" + +#~ msgid "Error setting cipher list, no valid ciphers available\n" +#~ msgstr "" +#~ "Error configurando lista de cipher, no hay ciphers válidos disponibles\n" + +#~ msgid "Peer failed to present a TLS certificate\n" +#~ msgstr "Par ha fallado al presentar un certificado TLS\n" + +#~ msgid "Peer %s failed to present a TLS certificate\n" +#~ msgstr "Par %s ha fallado al presentar un certificado TLS\n" + +#~ msgid "Error creating file descriptor-based BIO" +#~ msgstr "Error al crear archivo descriptor basado en BIO" + +#~ msgid "Error creating new SSL object" +#~ msgstr "Error creando nuevo objeto SSL" + +#~ msgid "Connect failure" +#~ msgstr "Fallo al conectar" + +#~ msgid "TLS shutdown failure." +#~ msgstr "Error de apagado TLS." + +#~ msgid "TLS read/write failure." +#~ msgstr "Error de lectura/escritura TLS." + +#~ msgid "Running" +#~ msgstr "Ejecutando" + +#~ msgid "Blocked" +#~ msgstr "Bloqueado" + +#, fuzzy +#~ msgid "Incomplete job" +#~ msgstr "Job insertado" + +#~ msgid "Non-fatal error" +#~ msgstr "No Fatal Error" + +#~ msgid "Canceled" +#~ msgstr "Cancelado" + +#~ msgid "Verify differences" +#~ msgstr "Verificar diferencias" + +#~ msgid "Waiting on FD" +#~ msgstr "Esperando al FD" + +#~ msgid "Wait on SD" +#~ msgstr "Espere al SD" + +#~ msgid "Wait for new Volume" +#~ msgstr "Espere por un nuevo Volumen" + +#~ msgid "Waiting for mount" +#~ msgstr "Esperando por montaje" + +#~ msgid "Waiting for Storage resource" +#~ msgstr "Esperando al recurso Storage" + +#~ msgid "Waiting for Job resource" +#~ msgstr "Esperando al recurso Job" + +#~ msgid "Waiting for Client resource" +#~ msgstr "Esperando al recurso Cliente" + +#~ msgid "Waiting on Max Jobs" +#~ msgstr "Esperando el máximo Jobs" + +#~ msgid "Waiting for Start Time" +#~ msgstr "Esperando a Hora de Inicio" + +#~ msgid "Waiting on Priority" +#~ msgstr "Esperando por prioridad" + +#~ msgid "Unknown Job termination status=%d" +#~ msgstr "Estado de terminación de Job desconocido=%d" + +#~ msgid "Completed successfully" +#~ msgstr "Se ha completado con éxito" + +#~ msgid "Completed with warnings" +#~ msgstr "Completado con advertencias" + +#~ msgid "Terminated with errors" +#~ msgstr "Terminado con errores" + +#~ msgid "Fatal error" +#~ msgstr "Error fatal" + +#~ msgid "Created, not yet running" +#~ msgstr "Creado, aún no se ejecuta" + +#~ msgid "Canceled by user" +#~ msgstr "Cancelada por el usuario" + +#~ msgid "Verify found differences" +#~ msgstr "Verificar diferencias encontradas" + +#~ msgid "Waiting for File daemon" +#~ msgstr "Esperando por demonio File" + +#~ msgid "Waiting for Storage daemon" +#~ msgstr "Esperando por demonio Storage" + +#~ msgid "Waiting for higher priority jobs" +#~ msgstr "Esperando por trabajos de mayor prioridad" + +#~ msgid "Batch inserting file records" +#~ msgstr "Insertando registros de archivo en lote" + +#~ msgid "Fatal Error" +#~ msgstr "Error Fatal" + +#~ msgid "Differences" +#~ msgstr "Diferencias" + +#~ msgid "Unknown term code" +#~ msgstr "Código del término desconocido" + +#~ msgid "Migrated Job" +#~ msgstr "Jod de Migración" + +#~ msgid "Verify" +#~ msgstr "Verificar" + +#~ msgid "Restore" +#~ msgstr "Restaurar" + +#~ msgid "Console" +#~ msgstr "Consola" + +#~ msgid "System or Console" +#~ msgstr "Sistema o Consola" + +#~ msgid "Admin" +#~ msgstr "Admin" + +#~ msgid "Archive" +#~ msgstr "Archivo" + +#~ msgid "Job Copy" +#~ msgstr "Job de Copia" + +#~ msgid "Copy" +#~ msgstr "Copiar" + +#~ msgid "Migrate" +#~ msgstr "Migrar" + +#~ msgid "Scan" +#~ msgstr "Escanear" + +#~ msgid "Unknown Type" +#~ msgstr "Tipo Desconocido" + +#~ msgid "Truncate" +#~ msgstr "Truncar" + +#~ msgid "Base" +#~ msgstr "Base" + +#~ msgid "Verify Init Catalog" +#~ msgstr "Verificar Catálogo Inicial" + +#~ msgid "Verify Data" +#~ msgstr "Verificar Datos" + +#~ msgid "Virtual Full" +#~ msgstr "Virtual completa" + +#~ msgid "Unknown Job Level" +#~ msgstr "Nivel del Job desconocido" + +#~ msgid "Append" +#~ msgstr "Añadir" + +#~ msgid "Disabled" +#~ msgstr "Inhabilitado" + +#~ msgid "Used" +#~ msgstr "Usado" + +#~ msgid "Cleaning" +#~ msgstr "Limpieza" + +#~ msgid "Purged" +#~ msgstr "Purga" + +#~ msgid "Recycle" +#~ msgstr "Reciclar" + +#~ msgid "Read-Only" +#~ msgstr "Sólo-Lectura" + +#~ msgid "Invalid volume status" +#~ msgstr "Invalido estado de volumen" + +#~ msgid "Working directory not defined. Cannot continue.\n" +#~ msgstr "Directorio de trabajo no definido. No se puede continuar.\n" + +#~ msgid "Working Directory: \"%s\" not found. Cannot continue.\n" +#~ msgstr "" +#~ "Directorio de Trabajo: \"%s\" no encontrado. No se puede continuar.\n" + +#~ msgid "Working Directory: \"%s\" is not a directory. Cannot continue.\n" +#~ msgstr "" +#~ "Directorio de Trabajo: \"%s\" no es un directorio. No se puede " +#~ "continuar.\n" + +#~ msgid "everything ok" +#~ msgstr "todo bien" + +#~ msgid "incomplete named character" +#~ msgstr "carácter nombrado incompleto" + +#~ msgid "incomplete hexadecimal value" +#~ msgstr "valor hexadecimal incompleto" + +#~ msgid "invalid hexadecimal value" +#~ msgstr "valor hexadecimal inválido" + +#~ msgid "octal value too large" +#~ msgstr "valor octal demasiado grande" + +#~ msgid "invalid octal value" +#~ msgstr "valor octal inválido" + +#~ msgid "incomplete octal value" +#~ msgstr "valor octal incompleto" + +#~ msgid "incomplete grouped hexadecimal value" +#~ msgstr "valor hexadecimal agrupado incompleto" + +#~ msgid "incorrect character class specification" +#~ msgstr "incorrecta especificación de clase de caracteres" + +#~ msgid "invalid expansion configuration" +#~ msgstr "configuración de expansión inválido" + +#~ msgid "out of memory" +#~ msgstr "sin memoria" + +#~ msgid "incomplete variable specification" +#~ msgstr "especificación de la variable incompleta" + +#~ msgid "undefined variable" +#~ msgstr "variable no definida" + +#~ msgid "input is neither text nor variable" +#~ msgstr "de entrada no es ni texto ni variable" + +#~ msgid "unknown command character in variable" +#~ msgstr "carácter de comando desconocido en la variable" + +#~ msgid "malformatted search and replace operation" +#~ msgstr "mal-formada operación de búsqueda y reemplazo" + +#~ msgid "unknown flag in search and replace operation" +#~ msgstr "bandera desconocida en operación de búsqueda y reemplazo" + +#~ msgid "invalid regex in search and replace operation" +#~ msgstr "regex no válida en operación de búsqueda y reemplazo" + +#~ msgid "missing parameter in command" +#~ msgstr "faltan parámetros en el comando" + +#~ msgid "empty search string in search and replace operation" +#~ msgstr "palabra de búsqueda vacía en operación de búsqueda y reemplazo" + +#~ msgid "start offset missing in cut operation" +#~ msgstr "inicio offset ausente en operación de corte" + +#~ msgid "offsets in cut operation delimited by unknown character" +#~ msgstr "offset en operación de corte delimitado por carácter desconocido" + +#~ msgid "range out of bounds in cut operation" +#~ msgstr "rango fuera de límites en operaciones de corte" + +#~ msgid "offset out of bounds in cut operation" +#~ msgstr "desplazamiento fuera de límites en operaciones de corte" + +#~ msgid "logic error in cut operation" +#~ msgstr "error lógico en la operación de corte" + +#~ msgid "malformatted transpose operation" +#~ msgstr "malformación en operación de transposición" + +#~ msgid "source and target class mismatch in transpose operation" +#~ msgstr "" +#~ "clase de origen y de destino desajustada en operación de transposición" + +#~ msgid "empty character class in transpose operation" +#~ msgstr "clase de caracteres vacía en operación de transposición" + +#~ msgid "incorrect character class in transpose operation" +#~ msgstr "clase de caracteres incorrectos en la operación adaptación" + +#~ msgid "malformatted padding operation" +#~ msgstr "malformación en operación de relleno" + +#~ msgid "fill string missing in padding operation" +#~ msgstr "cadena de llenado ausente en la operación de relleno" + +#~ msgid "unknown quoted pair in search and replace operation" +#~ msgstr "par de citado desconocido en operación de búsqueda y reemplazo" + +#~ msgid "sub-matching reference out of range" +#~ msgstr "referencia de sub-coincidente fuera de rango" + +#~ msgid "invalid argument" +#~ msgstr "argumento invalido" + +#~ msgid "incomplete quoted pair" +#~ msgstr "par de la cita incompleta" + +#~ msgid "lookup function does not support variable arrays" +#~ msgstr "función de búsqueda no soporta conjuntos de variables" + +#~ msgid "index of array variable contains an invalid character" +#~ msgstr "índice de la variable de matriz contiene un carácter inválido" + +#~ msgid "index of array variable is incomplete" +#~ msgstr "índice de la variable de matriz está incompleta" + +#~ msgid "bracket expression in array variable's index not closed" +#~ msgstr "expresión de corchetes en el índice variable de matriz no cerrado" + +#~ msgid "division by zero error in index specification" +#~ msgstr "Error de división por cero en especificación del índice" + +#~ msgid "unterminated loop construct" +#~ msgstr "no terminado la construcción de bucle" + +#~ msgid "invalid character in loop limits" +#~ msgstr "carácter no válido en el bucle de límites" + +#~ msgid "malformed operation argument list" +#~ msgstr "lista de argumentos de operación mal formada " + +#~ msgid "undefined operation" +#~ msgstr "operación no definida" + +#~ msgid "formatting failure" +#~ msgstr "Error de formato" + +#~ msgid "unknown error" +#~ msgstr "error desconocido" + +#~ msgid "Unable to initialize watchdog lock. ERR=%s\n" +#~ msgstr "No se puede inicializar bloqueo de vigilancia. ERR=%s\n" + +#~ msgid "BUG! register_watchdog called before start_watchdog\n" +#~ msgstr "BUG! register_watchdog llamado antes de start_watchdog\n" + +#~ msgid "BUG! Watchdog %p has NULL callback\n" +#~ msgstr "BUG! Watchdog %p tiene llamada NULL\n" + +#~ msgid "BUG! Watchdog %p has zero interval\n" +#~ msgstr "BUG! Watchdog %p tiene intervalo cero \n" + +#~ msgid "BUG! unregister_watchdog_unlocked called before start_watchdog\n" +#~ msgstr "ERROR! unregister_watchdog_unlocked llamado antes start_watchdog\n" + +#~ msgid "rwl_writelock failure. ERR=%s\n" +#~ msgstr "rwl_writelock failure. ERR=%s\n" + +#~ msgid "rwl_writeunlock failure. ERR=%s\n" +#~ msgstr "rwl_writeunlock failure. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init work queue: ERR=%s\n" +#~ msgstr "No se pudo iniciar la cola de trabajos: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not add work to queue: ERR=%s\n" +#~ msgstr "No se ha podido añadir la cola de trabajo: ERR=%s\n" + +#, fuzzy +#~ msgid "Waiting for workq to be empty: ERR=%s\n" +#~ msgstr "No se puede fork para convertirse en demonio: ERR =%s\n" + +#, fuzzy +#~ msgid "Error in workq_destroy: ERR=%s\n" +#~ msgstr "Error en %s: ERR=%s\n" + +#~ msgid "Acquire read: num_writers=%d not zero. Job %d canceled.\n" +#~ msgstr "Adquirir leer: num_writers=%d no es cero. Empleo %d cancelado.\n" + +#~ msgid "No volumes specified for reading. Job %s canceled.\n" +#~ msgstr "No se especifica el volumen de lectura. Job %s cancelado.\n" + +#~ msgid "Logic error: no next volume to read. Numvol=%d Curvol=%d\n" +#~ msgstr "" +#~ "Error logico: no hay prójimo volumen para leer. Numvol=%d Curvol=%d\n" + +#, fuzzy +#~ msgid "" +#~ "Changing read device. Want Media Type=\"%s\" have=\"%s\"\n" +#~ " %s device=%s\n" +#~ msgstr "" +#~ "Cambiando dispositivo de lectura. Media Type=\"%s\" tiene=\"%s\"\n" +#~ " dispositivo=%s\n" + +#, fuzzy +#~ msgid "Media Type change. New read %s device %s chosen.\n" +#~ msgstr "Cambio Media Type. Nuevo dispositivo de lectura %s elegido.\n" + +#~ msgid "No suitable device found to read Volume \"%s\"\n" +#~ msgstr "No encuentra dispositivo adecuado para leer Volumen \"%s\"\n" + +#~ msgid "Job %s canceled.\n" +#~ msgstr "Job %s cancelado.\n" + +#, fuzzy +#~ msgid "Read open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Fallo al leer dispositivo %s abierto, Volumen \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Too many errors trying to mount %s device %s for reading.\n" +#~ msgstr "" +#~ "Demasiados errores tratando de montar el dispositivo %s para la lectura.\n" + +#, fuzzy +#~ msgid "Ready to read from volume \"%s\" on %s device %s.\n" +#~ msgstr "Listo para leer desde volumen \"%s\" en el dispositivo %s.\n" + +#, fuzzy +#~ msgid "Could not ready %s device %s for append.\n" +#~ msgstr "Dispositivo %s puede no está listo para anexar.\n" + +#~ msgid "Could not create JobMedia record for Volume=\"%s\" Job=%s\n" +#~ msgstr "" +#~ "No es posible crear el registro JobMedia para Volumen=\"%s\" Job=%s\n" + +#~ msgid "Read error on device %s in ANSI label. ERR=%s\n" +#~ msgstr "Error de lectura en el dispositivo %s en la etiqueta ANSI. ERR=%s\n" + +#~ msgid "Insane! End of tape while reading ANSI label.\n" +#~ msgstr "Insano! Fin de la cinta mientras leía la etiqueta ANSI.\n" + +#~ msgid "No VOL1 label while reading ANSI/IBM label.\n" +#~ msgstr "Ninguna etiqueta VOL1 al leer etiqueta ANSI/IBM.\n" + +#~ msgid "Wanted ANSI Volume \"%s\" got \"%s\"\n" +#~ msgstr "Buscando volumen ANSI \"%s\" obtuvo \"%s\"\n" + +#~ msgid "No HDR1 label while reading ANSI label.\n" +#~ msgstr "Ninguna etiqueta HDR1 al leer etiqueta ANSI.\n" + +#~ msgid "ANSI/IBM Volume \"%s\" does not belong to Bacula.\n" +#~ msgstr "Volumen \"%s\" ANSI/IBM, no pertenece a Bacula.\n" + +#~ msgid "No HDR2 label while reading ANSI/IBM label.\n" +#~ msgstr "Ninguna etiqueta HDR2 al leer etiqueta ANSI/IBM.\n" + +#~ msgid "Unknown or bad ANSI/IBM label record.\n" +#~ msgstr "Registro de etiqueta ANSI/IBM desconocido o malo.\n" + +#~ msgid "Too many records in while reading ANSI/IBM label.\n" +#~ msgstr "Demasiados registros mientras leía etiqueta ANSI/IBM.\n" + +#~ msgid "ANSI Volume label name \"%s\" longer than 6 chars.\n" +#~ msgstr "Nombre de etiqueta de Volumen ANSI \"%s\" más de 6 caracteres.\n" + +#, fuzzy +#~ msgid "Could not write ANSI VOL1 label. Wanted size=%d got=%d ERR=%s\n" +#~ msgstr "No ha podido escribir etiqueta ANSI VOL1. ERR=%s\n" + +#~ msgid "Could not write ANSI HDR1 label. ERR=%s\n" +#~ msgstr "No ha podido escribir etiqueta ANSI HDR1. ERR=%s\n" + +#~ msgid "Could not write ANSI HDR1 label.\n" +#~ msgstr "No se puede escribir la etiqueta ANSI HDR1.\n" + +#~ msgid "Error writing EOF to tape. ERR=%s" +#~ msgstr "Error al escribir EOF a la cinta. ERR=%s" + +#~ msgid "write_ansi_ibm_label called for non-ANSI/IBM type\n" +#~ msgstr "write_ansi_ibm_label llamado para tipo non-ANSI/IBM\n" + +#~ msgid "DCR is NULL!!!\n" +#~ msgstr "DCR es NULL!!!\n" + +#~ msgid "DEVICE is NULL!!!\n" +#~ msgstr "DEVICE es NULL!!!\n" + +#~ msgid "Unable to set network buffer size.\n" +#~ msgstr "No se puede establecer el tamaño del búfer de red.\n" + +#~ msgid "NULL Volume name. This shouldn't happen!!!\n" +#~ msgstr "Nombre de volumen VACÍO. Esto no debería suceder!\n" + +#~ msgid "Write session label failed. ERR=%s\n" +#~ msgstr "Fallo al escribir la etiqueta de sesión. ERR=%s\n" + +#~ msgid "Network send error to FD. ERR=%s\n" +#~ msgstr "Error de red al enviar a FD. ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading data header from FD. n=%d msglen=%d ERR=%s\n" +#~ msgstr "Error al leer datos de cabecera de FD. ERR=%s\n" + +#~ msgid "Malformed data header from FD: %s\n" +#~ msgstr "Datos de cabecera mal formados desde FD: %s\n" + +#, fuzzy +#~ msgid "FI=%d from FD not positive or last_FI=%d\n" +#~ msgstr "Archivo de índice de FD no es positivo o secuencial\n" + +#~ msgid "Network error reading from FD. ERR=%s\n" +#~ msgstr "Error al leer la red desde FD. ERR=%s\n" + +#~ msgid "Fatal append error on device %s: ERR=%s\n" +#~ msgstr "Error Fatal añadiendo en el dispositivo %s: ERR=%s\n" + +#~ msgid "Set ok=FALSE after write_block_to_device.\n" +#~ msgstr "Set ok=FALSE después de write_block_to_device.\n" + +#~ msgid "Error writing end session label. ERR=%s\n" +#~ msgstr "Error escribiendo etiqueta de fin de sesión. ERR=%s\n" + +#, fuzzy +#~ msgid "Set ok=FALSE after write_final_block_to_device.\n" +#~ msgstr "Set ok=FALSE después de write_block_to_device.\n" + +#, fuzzy +#~ msgid "Elapsed time=%02d:%02d:%02d, Transfer rate=%s Bytes/second\n" +#~ msgstr "" +#~ "Tiempo transcurrido de escritura del Job = %02d:%02d:%02d, Tasa de " +#~ "transferencia = %s Bytes/segundo\n" + +#~ msgid "Error updating file attributes. ERR=%s\n" +#~ msgstr "Error al actualizar los atributos de archivo. ERR=%s\n" + +#~ msgid "Mount Volume \"%s\" on device %s and press return when ready: " +#~ msgstr "" +#~ "Monte Volumen \"%s\" en el dispositivo %s y presione ENTER cuando esté " +#~ "preparado:" + +#~ msgid "Network error on bnet_recv in req_vol_info.\n" +#~ msgstr "Error de red en bnet_recv en req_vol_info.\n" + +#~ msgid "Error getting Volume info: %s" +#~ msgstr "Error al obtener Volumen información: %s" + +#~ msgid "Didn't get vol info vol=%s: ERR=%s" +#~ msgstr "no recibió información de volumen vol=%s: ERR=%s" + +#, fuzzy +#~ msgid "Error creating JobMedia records: ERR=%s\n" +#~ msgstr "Error al crear registro JobMedia: ERR=%s\n" + +#, fuzzy +#~ msgid "Error creating JobMedia records: %s\n" +#~ msgstr "Error al crear registro JobMedia: %s\n" + +#~ msgid "Job %s canceled while waiting for mount on Storage Device \"%s\".\n" +#~ msgstr "" +#~ "Job %s cancelado a la espera de montar el dispositivo Storage \"%s\".\n" + +#, fuzzy +#~ msgid "" +#~ "Job %s is waiting. Cannot find any appendable volumes.\n" +#~ "Please use the \"label\" command to create a new Volume for:\n" +#~ " Storage: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Job %s en espera. No se puede encontrar ningún volumen grabable.\n" +#~ "Por favor utilice el comando \"label\" para crear un nuevo Volumen para:\n" +#~ "Storage: %s\n" +#~ "Pool: %s\n" +#~ "Tipo de Media: %s\n" + +#~ msgid "Max time exceeded waiting to mount Storage Device %s for Job %s\n" +#~ msgstr "" +#~ "Tiempo máximo de espera excedido para montar dispositivo Storage %s para " +#~ "el Job %s\n" + +#~ msgid "pthread error in mount_next_volume.\n" +#~ msgstr "pthread error en mount_next_volume.\n" + +#~ msgid "Cannot request another volume: no volume name given.\n" +#~ msgstr "No puede solicitar otro volumen: nombre de volumen no entregado.\n" + +#, fuzzy +#~ msgid "" +#~ "%sPlease mount append Volume \"%s\" or label a new one for:\n" +#~ " Job: %s\n" +#~ " Storage: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Por favor, montar Volumen \"%s\" o etiquete uno nuevo para :\n" +#~ " Job: %s\n" +#~ " Storage: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" + +#, fuzzy +#~ msgid "" +#~ "%sPlease mount read Volume \"%s\" for:\n" +#~ " Job: %s\n" +#~ " Storage: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Por favor, montar el volumen \"%s\" for:\n" +#~ " Job: %s\n" +#~ " Storage: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" + +#~ msgid "pthread error in mount_volume\n" +#~ msgstr "pthread error en mount_volume\n" + +#~ msgid "Job %s canceled while waiting for mount on Storage Device %s.\n" +#~ msgstr "Job %s cancelado a la espera de montar el dispositivo Storage %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Incorrect password given by Director.\n" +#~ "For help, please see: " +#~ msgstr "Contraseña incorrecta dada por el Director en %s.\n" + +#~ msgid "TLS negotiation failed with DIR at \"%s:%d\"\n" +#~ msgstr "Fallida la negociación TLS con DIR en \"%s:%d\"\n" + +#~ msgid "Unable to authenticate Director at %s.\n" +#~ msgstr "No se puede autenticar Director en %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Incorrect authorization key from File daemon at %s rejected.\n" +#~ "For help, please see: " +#~ msgstr "Contraseña incorrecta dada por el Director en %s.\n" + +#~ msgid "TLS negotiation failed with FD at \"%s:%d\"\n" +#~ msgstr "Fallida la negociación TLS con FD en \"%s:%d\"\n" + +#~ msgid "No Changer Name given for device %s. Cannot continue.\n" +#~ msgstr "" +#~ "Ningún Nombre Changer obtenido para el dispositivo %s. No se puede " +#~ "continuar.\n" + +#~ msgid "No Changer Command given for device %s. Cannot continue.\n" +#~ msgstr "" +#~ "Ningún Comando Changer obtenido para el dispositivo %s. No se puede " +#~ "continuar.\n" + +#, fuzzy +#~ msgid "No slot defined in catalog (slot=%d) for Volume \"%s\" on %s.\n" +#~ msgstr "" +#~ "Invalida ranura=%d definida en el catálogo para volumen \"%s\" en %s. " +#~ "Carga manual puede ser requerida.\n" + +#~ msgid "" +#~ "No \"Changer Device\" for %s. Manual load of Volume may be required.\n" +#~ msgstr "" +#~ "Ninguno \"Dispositivo Changer\" para %s. Carga manual de volumen puede " +#~ "ser requerido.\n" + +#~ msgid "" +#~ "No \"Changer Command\" for %s. Manual load of Volume may be requird.\n" +#~ msgstr "" +#~ "Ninguno \"Comando Changer\" para %s. Carga manual de volumen puede ser " +#~ "requerido.\n" + +#, fuzzy +#~ msgid "" +#~ "3304 Issuing autochanger \"load Volume %s, Slot %d, Drive %d\" command.\n" +#~ msgstr "" +#~ "3304 Emitiendo comando auto-cambiador \"cargar ranura %d, unidad %d\".\n" + +#, fuzzy +#~ msgid "" +#~ "3305 Autochanger \"load Volume %s, Slot %d, Drive %d\", status is OK.\n" +#~ msgstr "3305 Auto-cambiador \"carga ranura %d, unidad %d\", estado es OK.\n" + +#, fuzzy +#~ msgid "" +#~ "3992 Bad autochanger \"load Volume %s Slot %d, Drive %d\": ERR=%s.\n" +#~ "Results=%s\n" +#~ msgstr "" +#~ "2992 Malo Auto-cambiador \"carga ranura %d, unidad %d\": ERR=%s.\n" +#~ "Resultados=%s\n" + +#~ msgid "3301 Issuing autochanger \"loaded? drive %d\" command.\n" +#~ msgstr "3301 Emitiendo comando auto-cambiador \"¿cargado? unidad %d\".\n" + +#~ msgid "3302 Autochanger \"loaded? drive %d\", result is Slot %d.\n" +#~ msgstr "" +#~ "3302 Auto-cambiador \"¿cargado? unidad %d\", el resultado es Ranura %d.\n" + +#~ msgid "3302 Autochanger \"loaded? drive %d\", result: nothing loaded.\n" +#~ msgstr "" +#~ "3302 Auto-cambiador \"¿cargado? unidad %d\", resultado: nada cargado.\n" + +#~ msgid "" +#~ "3991 Bad autochanger \"loaded? drive %d\" command: ERR=%s.\n" +#~ "Results=%s\n" +#~ msgstr "" +#~ "3991 Malo comando auto-cambiador comando \"cargar? unidad %d\": ERR=%s\n" +#~ "Resultados=%s\n" + +#, fuzzy +#~ msgid "Lock failure on autochanger. ERR=%s\n" +#~ msgstr "Error bloqueando Mutex. ERR=%s\n" + +#, fuzzy +#~ msgid "Unlock failure on autochanger. ERR=%s\n" +#~ msgstr "Error desbloqueando Mutex. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "3307 Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" " +#~ "command.\n" +#~ msgstr "" +#~ "3307 Emitiendo comando auto-cambiador \"descargar ranura %d, unidad %d\"\n" + +#, fuzzy +#~ msgid "" +#~ "3995 Bad autochanger \"unload Volume %s, Slot %d, Drive %d\": ERR=%s\n" +#~ "Results=%s\n" +#~ msgstr "" +#~ "3995 Malo comando auto-cambiador \"descargar ranura %d, unidad %d\": ERR=" +#~ "%s\n" +#~ "Resultados=%s\n" + +#~ msgid "Volume \"%s\" wanted on %s is in use by device %s\n" +#~ msgstr "Volumen \"%s\" buscado en %s está en uso por el dispositivo %s\n" + +#, fuzzy +#~ msgid "" +#~ "3997 Bad autochanger \"unload Volume %s, Slot %d, Drive %d\": ERR=%s.\n" +#~ msgstr "" +#~ "3995 Malo comando auto-cambiador \"descargar ranura %d, unidad %d\": ERR=" +#~ "%s.\n" + +#~ msgid "3993 Device %s not an autochanger device.\n" +#~ msgstr "3993 Dispositivo %s no es un dispositivo auto-cargador.\n" + +#~ msgid "3306 Issuing autochanger \"%s\" command.\n" +#~ msgstr "3306 Emitiendo comando \"%s\" al auto-cambiador.\n" + +#~ msgid "3996 Open bpipe failed.\n" +#~ msgstr "3996 Fallo al abrir bpipe.\n" + +#~ msgid "Autochanger error: ERR=%s\n" +#~ msgstr "Auto-cambiador error: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bcopy [-d debug_level] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -i specify input Volume names (separated by |)\n" +#~ " -o specify output Volume names (separated by |)\n" +#~ " -p proceed inspite of errors\n" +#~ " -v verbose\n" +#~ " -w specify working directory (default /tmp)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "\n" +#~ "Utilice: bcopy [-d nivel_ depuración] \n" +#~ " -b bootstrap especifica un archivo bootstrap\n" +#~ " -c especifica un archivo Storage de configuración\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -i especifica nombres de Volúmenes de entrada (separados por |)\n" +#~ " -o especifica nombres de Volúmenes de salida (separados por |)\n" +#~ " -p proceder a pesar de los errores\n" +#~ "\n" +#~ " -v detallado\n" +#~ " -w especifica directorio de trabajo (por defecto /tmp)\n" +#~ " -? imprime esta mensaje\n" +#~ "\n" + +#~ msgid "Wrong number of arguments: \n" +#~ msgstr "Incorrecto numero de argumentos: \n" + +#~ msgid "dev open failed: %s\n" +#~ msgstr "Fallo al abrir dev: %s\n" + +#~ msgid "Write of last block failed.\n" +#~ msgstr "Fallo al escribir ultimo bloque.\n" + +#~ msgid "%u Jobs copied. %u records copied.\n" +#~ msgstr "%u Jobs copiado. %u registros copiados.\n" + +#~ msgid "Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n" +#~ msgstr "Registro: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n" + +#~ msgid "Volume is prelabeled. This volume cannot be copied.\n" +#~ msgstr "Volumen esta pre-etiquetado. Este volumen no puede ser copiado.\n" + +#~ msgid "Volume label not copied.\n" +#~ msgstr "Etiqueta Volumen no copiada.\n" + +#~ msgid "Copy skipped. Record does not match BSR filter.\n" +#~ msgstr "Copia omitida. Registro no coincide con filtro de BSR.\n" + +#~ msgid "Cannot fixup device error. %s\n" +#~ msgstr "No se puede corregir error de dispositivo. %s\n" + +#~ msgid "EOM label not copied.\n" +#~ msgstr "Etiqueta EOM no copiada.\n" + +#~ msgid "EOT label not copied.\n" +#~ msgstr "Etiqueta EOT no copiada.\n" + +#~ msgid "Fresh Volume Label" +#~ msgstr "Nueva Etiqueta de Volumen" + +#~ msgid "Volume Label" +#~ msgstr "Etiqueta de Volumen" + +#~ msgid "Begin Job Session" +#~ msgstr "Inicio Job Sesión" + +#~ msgid "End Job Session" +#~ msgstr "Fin Job Sesión" + +#~ msgid "End of Medium" +#~ msgstr "Fin de Medio" + +#~ msgid "Unknown" +#~ msgstr "Desconocido" + +#~ msgid "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n" +#~ msgstr "" +#~ "%s Registro: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bextract \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T send debug traces to trace file (stored in /tmp)\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -t read data from volume, do not write anything\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "\n" +#~ "Utilice: bextract " +#~ "\n" +#~ " -b especifica un archivo bootstrap\n" +#~ " -c especifica un archivo Storage de configuración\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -e lista de exclusión\n" +#~ " -i lista de inclusión\n" +#~ " -p proceder a pesar de los errores de E/S\n" +#~ " -v detallado\n" +#~ " -V especifica nombres de Volúmenes (separados por |)\n" +#~ " -? imprime esta mensaje\n" +#~ "\n" + +#~ msgid "Could not open exclude file: %s, ERR=%s\n" +#~ msgstr "No se pudo abrir archivo excluir: %s, ERR=%s\n" + +#~ msgid "Could not open include file: %s, ERR=%s\n" +#~ msgstr "No se pudo abrir archivo incluir: %s, ERR=%s\n" + +#~ msgid "%d Program Name and/or Program Data Stream records ignored.\n" +#~ msgstr "" +#~ "Nombre del Programa %d y/o registros de Datos de Flujo del Programa " +#~ "ignorado.\n" + +#~ msgid "%d Win32 data or Win32 gzip data stream records. Ignored.\n" +#~ msgstr "" +#~ "%d registros de flujo de datos Win32 or Win32 datos gzip. Ignorados.\n" + +#~ msgid "Cannot stat %s. It must exist. ERR=%s\n" +#~ msgstr "No se puede stat %s. El debe existir. ERR=%s\n" + +#~ msgid "%s must be a directory.\n" +#~ msgstr "%s debe ser un directorio.\n" + +#~ msgid "%u files restored.\n" +#~ msgstr "%u archivos restaurados.\n" + +#, fuzzy +#~ msgid "Found %s error%s\n" +#~ msgstr "Encontrados %d para: %s\n" + +#~ msgid "Write error on %s: %s\n" +#~ msgstr "Error de escritura en %s: %s\n" + +#~ msgid "Cannot continue.\n" +#~ msgstr "No se puede continuar.\n" + +#~ msgid "Logic error output file should be open but is not.\n" +#~ msgstr "" +#~ "Error lógico, archivo de salida debería estar abierto, pero no esta.\n" + +#~ msgid "%s was deleted.\n" +#~ msgstr "%s se ha eliminado.\n" + +#, fuzzy +#~ msgid "Seek error Addr=%llu on %s: %s\n" +#~ msgstr "Buscar error en %s: %s\n" + +#~ msgid "Uncompression error. ERR=%d\n" +#~ msgstr "Error de descompresión. ERR=%d\n" + +#, fuzzy +#~ msgid "LZO uncompression error. ERR=%d\n" +#~ msgstr "Error de descompresión. ERR=%d\n" + +#~ msgid "Got Program Name or Data Stream. Ignored.\n" +#~ msgstr "Obtener Nombre de Programa o Secuencia de Datos. Ignorado.\n" + +#, fuzzy +#~ msgid "Error writing JobMedia record to catalog.\n" +#~ msgstr "Error al escribir registro al bloque.\n" + +#, fuzzy +#~ msgid "Error writing final JobMedia record to catalog.\n" +#~ msgstr "Error al escribir registro al bloque.\n" + +#, fuzzy +#~ msgid "Cannot write block. Device is disabled. dev=%s\n" +#~ msgstr "No se pudo escribir bloque. Dispositivo en EOM.\n" + +#, fuzzy +#~ msgid "Cannot write block. Device at EOM. dev=%s\n" +#~ msgstr "No se pudo escribir bloque. Dispositivo en EOM.\n" + +#, fuzzy +#~ msgid "Attempt to write on read-only Volume. dev=%s\n" +#~ msgstr "Intento de escritura en volumen de sólo lectura.\n" + +#, fuzzy +#~ msgid "Attempt to write on closed device=%s\n" +#~ msgstr "Intento de escritura en volumen de sólo lectura.\n" + +#~ msgid "Write block header zeroed.\n" +#~ msgstr "Escribir cabecera de bloque cero.\n" + +#, fuzzy +#~ msgid "Write error at %s on device %s Vol=%s. ERR=%s.\n" +#~ msgstr "Error de escritura en %u:%u en el dispositivo %s. ERR=%s.\n" + +#, fuzzy +#~ msgid "" +#~ "Out of freespace caused End of Volume \"%s\" at %s on device %s. Write of " +#~ "%u bytes got %d.\n" +#~ msgstr "" +#~ "Fin de Volumen \"%s\" en %u:%u en el dispositivo %s. Escribió %u bytes " +#~ "obtuvo %d.\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" at %s on device %s. Write of %u bytes got %d.\n" +#~ msgstr "" +#~ "Fin de Volumen \"%s\" en %u:%u en el dispositivo %s. Escribió %u bytes " +#~ "obtuvo %d.\n" + +#, fuzzy +#~ msgid "Job failed or canceled.\n" +#~ msgstr "Job %d cancelado.\n" + +#, fuzzy +#~ msgid "At EOT: attempt to read past end of Volume.\n" +#~ msgstr "Intento de leer sesión no abierta.\n" + +#, fuzzy +#~ msgid "" +#~ "Attempt to read closed device: fd=%d at file:blk %u:%u on device %s\n" +#~ msgstr "" +#~ "Error de lectura en fd=%d desde archivo:blk %u:%u en el dispositivo %s. " +#~ "ERR=%s.\n" + +#~ msgid "Block buffer size looping problem on device %s\n" +#~ msgstr "Problema de tamaño de búfer del bloque en el dispositivo %s\n" + +#, fuzzy +#~ msgid "The %sVolume=%s on device=%s appears to be unlabeled.%s\n" +#~ msgstr "Volumen en %s tiene una malo tipo de etiqueta Bacula: %x\n" + +#, fuzzy +#~ msgid "Read error on fd=%d at addr=%s on device %s. ERR=%s.\n" +#~ msgstr "" +#~ "Error de lectura en fd=%d desde archivo:blk %u:%u en el dispositivo %s. " +#~ "ERR=%s.\n" + +#, fuzzy +#~ msgid "Read zero %sbytes Vol=%s at %s on device %s.\n" +#~ msgstr "Leer cero bytes en %u:%u en el dispositivo %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Volume data error at %s! Very short block of %d bytes on device %s " +#~ "discarded.\n" +#~ msgstr "" +#~ "Error datos de volumen en %u:%u! Bloque muy corto de %d bytes en " +#~ "dispositivo %s descartado.\n" + +#~ msgid "Block length %u is greater than buffer %u. Attempting recovery.\n" +#~ msgstr "" +#~ "Longitud del bloque %u es mayor que el buffer %u. Intentando " +#~ "recuperación.\n" + +#~ msgid "Setting block buffer size to %u bytes.\n" +#~ msgstr "Configurando tamaño del bufer del bloque para %u bytes.\n" + +#~ msgid "" +#~ "Volume data error at %u:%u! Short block of %d bytes on device %s " +#~ "discarded.\n" +#~ msgstr "" +#~ "Error datos de volumen en %u:%u! Bloque corto de %d bytes en dispositivo " +#~ "%s descartado.\n" + +#, fuzzy +#~ msgid "" +#~ "Dump block %s %p: adata=%d size=%d BlkNum=%d\n" +#~ " Hdrcksum=%x cksum=%x\n" +#~ msgstr "" +#~ "Volcado de bloque %s %x: tamaño=%d BlkNum=%d\n" +#~ " Hdrcksum=%x cksum=%x\n" + +#, fuzzy +#~ msgid " Rec: VId=%u VT=%u FI=%s Strm=%s len=%d reclen=%d\n" +#~ msgstr "Rec: VId=%u VT=%u FI=%s Strm=%s len=%d p=%x\n" + +#~ msgid "%d block read errors not printed.\n" +#~ msgstr "%d errores de lectura de bloques no impresos.\n" + +#, fuzzy +#~ msgid "" +#~ "Volume data error at %lld!\n" +#~ "Adata block checksum mismatch in block=%u len=%d: calc=%x blk=%x\n" +#~ msgstr "" +#~ "Error de datos de Volumen en %u:%u!\n" +#~ "Bloque checksum desajustado en bloque=%u len=%d: calc=%x blk=%x\n" + +#~ msgid "" +#~ "Volume data error at %u:%u! Wanted ID: \"%s\", got \"%s\". Buffer " +#~ "discarded.\n" +#~ msgstr "" +#~ "Error de volumen de datos en %u:%u! Se busca ID: \"%s\",se obtiene \"%s" +#~ "\". Buffer descartado.\n" + +#~ msgid "" +#~ "Volume data error at %u:%u! Block length %u is insane (too large), " +#~ "probably due to a bad archive.\n" +#~ msgstr "" +#~ "Error de datos de volumen en %u:%u! Longitud de bloque %u es demente " +#~ "(demasiado grande), probablemente debido a un archivo malo.\n" + +#~ msgid "" +#~ "Volume data error at %u:%u!\n" +#~ "Block checksum mismatch in block=%u len=%d: calc=%x blk=%x\n" +#~ msgstr "" +#~ "Error de datos de Volumen en %u:%u!\n" +#~ "Bloque checksum desajustado en bloque=%u len=%d: calc=%x blk=%x\n" + +#, fuzzy +#~ msgid "" +#~ "User defined maximum volume size %s will be exceeded on device %s.\n" +#~ " Marking Volume \"%s\" as Full.\n" +#~ msgstr "" +#~ "Capacidad máxima %s de volumen definido por el usuario superado en el " +#~ "dispositivo %s.\n" + +#~ msgid "Backspace file at EOT failed. ERR=%s\n" +#~ msgstr "Fallo en retroceso de archivo desde EOT. ERR=%s\n" + +#~ msgid "Backspace record at EOT failed. ERR=%s\n" +#~ msgstr "Fallo en retroceso de registro desde EOT. ERR=%s\n" + +#~ msgid "Re-read last block at EOT failed. ERR=%s" +#~ msgstr "Fallo en re-lectura del último bloque en EOT. ERR=%s" + +#~ msgid "" +#~ "Re-read of last block: block numbers differ by more than one.\n" +#~ "Probable tape misconfiguration and data loss. Read block=%u Want block=" +#~ "%u.\n" +#~ msgstr "" +#~ "Re-lectura del último bloque: número de bloque difieren en más de uno.\n" +#~ "Probable mala configuración de la cinta y pérdida de datos. Leer bloque=" +#~ "%u Espera bloque=%u.\n" + +#~ msgid "" +#~ "Re-read of last block OK, but block numbers differ. Read block=%u Want " +#~ "block=%u.\n" +#~ msgstr "" +#~ "Re-lectura del último bloque OK, pero número de bloque difieren. Leer " +#~ "bloque=%u Espera bloque=%u.\n" + +#~ msgid "Re-read of last block succeeded.\n" +#~ msgstr "Éxito en re-lectura del último bloque.\n" + +#, fuzzy +#~ msgid "" +#~ "Error writing final EOF to tape. Volume %s may not be readable.\n" +#~ "%s" +#~ msgstr "" +#~ "Error al escribir EOF final a la cinta. Este volumen puede no ser " +#~ "legible.\n" +#~ "%s" + +#, fuzzy +#~ msgid "Error sending Volume info to Director.\n" +#~ msgstr "Error al obtener Volumen información: %s" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bls [options] \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -j list jobs\n" +#~ " -k list blocks\n" +#~ " (no j or k option) list saved files\n" +#~ " -L dump label\n" +#~ " -p proceed inspite of errors\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -E Check records to detect errors\n" +#~ " -v be verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "\n" +#~ "Utilice: bls [opciones] \n" +#~ " -b especifica un archivo bootstrap\n" +#~ " -c especifica un archivo Storage de configuración\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -e lista de exclusión\n" +#~ " -i lista de inclusión\n" +#~ " -j lista de trabajos\n" +#~ " -k lista de bloques\n" +#~ " (no j o k opción) lista de archivos guardados\n" +#~ " -L dump label\n" +#~ " -p proceder a pesar de los errores\n" +#~ " -v detallado\n" +#~ " -V especifica nombres de Volumen (separados por |)\n" +#~ " -? imprime esta mensaje\n" +#~ "\n" + +#~ msgid "No archive name specified\n" +#~ msgstr "Nombre de archivo no especificado\n" + +#~ msgid "" +#~ "\n" +#~ "Warning, this Volume is a continuation of Volume %s\n" +#~ msgstr "" +#~ "\n" +#~ "Advertencia, este Volumen es una continuación del Volumen %s\n" + +#~ msgid "Got EOM at file %u on device %s, Volume \"%s\"\n" +#~ msgstr "Obtuvo EOM en archivo %u en el dispositivo %s, Volumen \"%s\"\n" + +#~ msgid "Mounted Volume \"%s\".\n" +#~ msgstr "Volumen Montado \"%s\".\n" + +#~ msgid "End of file %u on device %s, Volume \"%s\"\n" +#~ msgstr "Fin de archivo %u en el dispositivo %s, Volumen \"%s\"\n" + +#~ msgid "" +#~ "FileIndex=%d VolSessionId=%d VolSessionTime=%d Stream=%d DataLen=%d\n" +#~ msgstr "" +#~ "FileIndex=%d VolSessionId=%d VolSessionTime=%d Stream=%d DataLen=%d\n" + +#, fuzzy +#~ msgid "End of Physical Medium" +#~ msgstr "Fin de Medio" + +#, fuzzy +#~ msgid "Start of object" +#~ msgstr "Fin de la Cinta" + +#, fuzzy +#~ msgid "End of object" +#~ msgstr "Fin de la Cinta" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bscan [ options ] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -m update media info in database\n" +#~ " -D specify the driver database name (default NULL)\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database password (default none)\n" +#~ " -h specify database host (default NULL)\n" +#~ " -t specify database port (default 0)\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -r list records\n" +#~ " -s synchronize or store in database\n" +#~ " -S show scan progress periodically\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -w specify working directory (default from conf " +#~ "file)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "\n" +#~ "Utilice: bscan [opciones] \n" +#~ " -b especifica un archivo bootstrap\n" +#~ " -c especifica un archivo de configuración\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -m Actualiza información de los medios en la base de datos\n" +#~ " -D especifica el nombre del driver de base de datos " +#~ "(por defecto NULL)\n" +#~ " -n especifica el nombre de la base de datos (por defecto " +#~ "bacula)\n" +#~ " -u especifica el nombre de usuario de la base de datos (por " +#~ "defecto bacula)\n" +#~ " -P especifica la contraseña de la base de datos (por " +#~ "defecto none)\n" +#~ " -h especifica servidor de la base de datos (por defecto " +#~ "NULL)\n" +#~ " -t especifica el puerto de la base de datos (por defecto 0)\n" +#~ " -p proceder a pesar de los errores\n" +#~ " -r lista de registros\n" + +#~ msgid "No Working Directory defined in %s. Cannot continue.\n" +#~ msgstr "Directorio de Trabajo no definido en %s. No se puede continuar.\n" + +#~ msgid "Working Directory: %s not found. Cannot continue.\n" +#~ msgstr "Directorio de Trabajo: %s no encontrado. No se puede continuar.\n" + +#~ msgid "Working Directory: %s is not a directory. Cannot continue.\n" +#~ msgstr "" +#~ "Directorio de Trabajo: %s no es un directorio. No se puede continuar.\n" + +#~ msgid "First Volume Size = %s\n" +#~ msgstr "Tamaño Primero Volumen = %s\n" + +#~ msgid "Using Database: %s, User: %s\n" +#~ msgstr "Usando Base de Datos: %s, Usuario: %s\n" + +#~ msgid "Create JobMedia for Job %s\n" +#~ msgstr "Crear JobMedia para Job %s\n" + +#~ msgid "Could not create JobMedia record for Volume=%s Job=%s\n" +#~ msgstr "No es posible crear registro JobMedia para Volumen=%s Job=%s\n" + +#~ msgid "done: %d%%\n" +#~ msgstr "Hecho: %d%%\n" + +#~ msgid "Volume is prelabeled. This tape cannot be scanned.\n" +#~ msgstr "El volumen esta prelabeled. Esta cinta no puede ser escaneada. \n" + +#~ msgid "Pool record for %s found in DB.\n" +#~ msgstr "Registro Pool para %s encontrado en BD.\n" + +#~ msgid "VOL_LABEL: Pool record not found for Pool: %s\n" +#~ msgstr "VOL_LABEL: Registro Pool no encontrado para Pool: %s\n" + +#~ msgid "VOL_LABEL: PoolType mismatch. DB=%s Vol=%s\n" +#~ msgstr "VOL_LABEL: PoolType desajustado. BD=%s Vol=%s\n" + +#~ msgid "Pool type \"%s\" is OK.\n" +#~ msgstr "Tipo de Pool \"%s\" esta OK.\n" + +#~ msgid "Media record for %s found in DB.\n" +#~ msgstr "Registro Media para %s encontrado en BD.\n" + +#~ msgid "VOL_LABEL: Media record not found for Volume: %s\n" +#~ msgstr "VOL_LABEL: Registro Media no encontrado para Volumen: %s\n" + +#~ msgid "VOL_LABEL: MediaType mismatch. DB=%s Vol=%s\n" +#~ msgstr "VOL_LABEL: MediaType desajustada. BD=%s Vol=%s\n" + +#~ msgid "Media type \"%s\" is OK.\n" +#~ msgstr "Tipo de Media \"%s\" esta OK.\n" + +#~ msgid "VOL_LABEL: OK for Volume: %s\n" +#~ msgstr "VOL_LABEL: OK para Volumen: %s\n" + +#~ msgid "%d \"errors\" ignored before first Start of Session record.\n" +#~ msgstr "" +#~ "%d \"errores\" ignorados antes del primero registro de Inicio de Sesión.\n" + +#~ msgid "SOS_LABEL: Found Job record for JobId: %d\n" +#~ msgstr "SOS_LABEL: Encontrado registro Job para JobId: %d\n" + +#~ msgid "SOS_LABEL: Job record not found for JobId: %d\n" +#~ msgstr "SOS_LABEL: Registro Job no encontrado para JobId: %d\n" + +#~ msgid "SOS_LABEL: VolSessId mismatch for JobId=%u. DB=%d Vol=%d\n" +#~ msgstr "SOS_LABEL: VolSessId desajustado para JobId=%u. BD=%d Vol=%d\n" + +#~ msgid "SOS_LABEL: VolSessTime mismatch for JobId=%u. DB=%d Vol=%d\n" +#~ msgstr "SOS_LABEL: VolSessTime desajustado para JobId=%u. BD=%d Vol=%d\n" + +#~ msgid "SOS_LABEL: PoolId mismatch for JobId=%u. DB=%d Vol=%d\n" +#~ msgstr "SOS_LABEL: PoolId diferente para JobId=%u. BD=%d Vol=%d\n" + +#~ msgid "Could not find SessId=%d SessTime=%d for EOS record.\n" +#~ msgstr "No se pudo encontrar SessId=%d SessTime=%d para registro EOS.\n" + +#~ msgid "Could not update job record. ERR=%s\n" +#~ msgstr "No se pudo actualizar el registro de trabajo. ERR=%s\n" + +#~ msgid "End of all Volumes. VolFiles=%u VolBlocks=%u VolBytes=%s\n" +#~ msgstr "" +#~ "Final de todos los volúmenes. VolFiles=%u VolBlocks=%u VolBytes=%s\n" + +#~ msgid "Could not find Job for SessId=%d SessTime=%d record.\n" +#~ msgstr "No se pudo encontrar registro trabajo para SessId=%d SessTime=%d.\n" + +#, fuzzy +#~ msgid "%s file records. At addr=%s bytes=%s\n" +#~ msgstr "%s registros de archivo. En el archivo:blk=%s:%s bytes=%s\n" + +#~ msgid "Got MD5 record: %s\n" +#~ msgstr "Obtuvo registro MD5: %s\n" + +#~ msgid "Got SHA1 record: %s\n" +#~ msgstr "Obtuvo registro SHA1: %s\n" + +#~ msgid "Got SHA256 record: %s\n" +#~ msgstr "Obtuvo registro SHA256: %s\n" + +#~ msgid "Got SHA512 record: %s\n" +#~ msgstr "Obtuvo registro SHA512: %s\n" + +#~ msgid "Got signed digest record\n" +#~ msgstr "Obtuvo registro resume firmado: %s\n" + +#~ msgid "Got Prog Names Stream: %s\n" +#~ msgstr "Obtuvo Stream Nombres de Programas: %s\n" + +#~ msgid "Got Prog Data Stream record.\n" +#~ msgstr "Obtuvo Registro Stream Datos de Programas: %s\n" + +#~ msgid "Unknown stream type!!! stream=%d len=%i\n" +#~ msgstr "Tipo de flujo desconocido!!! stream=%d len=%i\n" + +#~ msgid "Could not create File Attributes record. ERR=%s\n" +#~ msgstr "No se pudo crear el registro de Archivos de Atributos. ERR=%s\n" + +#~ msgid "Created File record: %s\n" +#~ msgstr "Registro Archivo creado: %s\n" + +#~ msgid "Could not create media record. ERR=%s\n" +#~ msgstr "No es posible crear registro media. ERR=%s\n" + +#~ msgid "Could not update media record. ERR=%s\n" +#~ msgstr "No es posible actualizar registro media. ERR=%s\n" + +#~ msgid "Created Media record for Volume: %s\n" +#~ msgstr "Creado Registro Media para Volumen: %s\n" + +#~ msgid "Updated Media record at end of Volume: %s\n" +#~ msgstr "Actualizado el registro Media al final del Volumen: %s\n" + +#~ msgid "Could not create pool record. ERR=%s\n" +#~ msgstr "No es posible crear el registro Pool. ERR=%s\n" + +#~ msgid "Created Pool record for Pool: %s\n" +#~ msgstr "Creado el registro Pool para Pool: %s\n" + +#~ msgid "Could not get Client record. ERR=%s\n" +#~ msgstr "No se ha podido obtener el registro del cliente. ERR=%s\n" + +#~ msgid "Created Client record for Client: %s\n" +#~ msgstr "Creado registro Cliente para Cliente: %s\n" + +#~ msgid "Fileset \"%s\" already exists.\n" +#~ msgstr "Fileset \"%s\" ya existe.\n" + +#~ msgid "Could not create FileSet record \"%s\". ERR=%s\n" +#~ msgstr "No es posible crear registro FileSet \"%s\". ERR=%s\n" + +#~ msgid "Created FileSet record \"%s\"\n" +#~ msgstr "Creado registro FileSet \"%s\"\n" + +#~ msgid "Could not create JobId record. ERR=%s\n" +#~ msgstr "No es posible crear registro JobId. ERR=%s\n" + +#~ msgid "Could not update job start record. ERR=%s\n" +#~ msgstr "No se pudo actualizar el registro job de inicio. ERR=%s\n" + +#~ msgid "Created new JobId=%u record for original JobId=%u\n" +#~ msgstr "Creado nuevo registro JobId=%u para JobId=%u original\n" + +#, fuzzy +#~ msgid "Could not find JobStatus for SessId=%d SessTime=%d in EOS record.\n" +#~ msgstr "No se pudo encontrar registro trabajo para SessId=%d SessTime=%d.\n" + +#~ msgid "Could not update JobId=%u record. ERR=%s\n" +#~ msgstr "No se puede actualizar registro JobId=%u. ERR=%s\n" + +#~ msgid "Updated Job termination record for JobId=%u Level=%s TermStat=%c\n" +#~ msgstr "" +#~ "Actualizado el registro de terminación de Job para JobId=%u Nivel=%s " +#~ "TermStat=%c\n" + +#~ msgid "Job Termination code: %d" +#~ msgstr "Código de Terminación del Job: %d" + +#~ msgid "" +#~ "%s\n" +#~ "JobId: %d\n" +#~ "Job: %s\n" +#~ "FileSet: %s\n" +#~ "Backup Level: %s\n" +#~ "Client: %s\n" +#~ "Start time: %s\n" +#~ "End time: %s\n" +#~ "Files Written: %s\n" +#~ "Bytes Written: %s\n" +#~ "Volume Session Id: %d\n" +#~ "Volume Session Time: %d\n" +#~ "Last Volume Bytes: %s\n" +#~ "Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s\n" +#~ "JobId: %d\n" +#~ "Job: %s\n" +#~ "FileSet: %s\n" +#~ "Nivel de Respaldo: %s\n" +#~ "Cliente: %s\n" +#~ "Hora de Inicio: %s\n" +#~ "Hora de Finalización: %s\n" +#~ "Archivos Escritos: %s\n" +#~ "Bytes Escritos: %s\n" +#~ "Id Volumen Sesión: %d\n" +#~ "Tiempo de la Sesión del Volumen: %d\n" +#~ "Últimos Bytes del Volumen: %s\n" +#~ "Terminación: %s\n" +#~ "\n" + +#~ msgid "Could not create JobMedia record. ERR=%s\n" +#~ msgstr "No es posible crear registro JobMedia. ERR=%s\n" + +#~ msgid "Created JobMedia record JobId %d, MediaId %d\n" +#~ msgstr "Creado registro JobMedia JobID %d, MediaID %d\n" + +#~ msgid "Could not find SessId=%d SessTime=%d for MD5/SHA1 record.\n" +#~ msgstr "" +#~ "No se pudo encontrar SessId=%d SessTime=%d para registro MD5/SHA1.\n" + +#~ msgid "Could not add MD5/SHA1 to File record. ERR=%s\n" +#~ msgstr "No se pudo agregar MD5/SHA1 al registro File. ERR=%s\n" + +#~ msgid "Updated MD5/SHA1 record\n" +#~ msgstr "Actualizado registro MD5/SHA1\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bsdjson [options] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read config and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "\n" +#~ "Utilice: stored [opciones] [-c archivo_configuración] " +#~ "[archivo_configuración]\n" +#~ " -c usar como archivo de configuración\n" +#~ " -d establecer el nivel de depuración para \n" +#~ " -dt imprimir timestamp en salida de depuración\n" +#~ " -f ejecutar en primer plano (para depuración)\n" +#~ " -g establecer groupid para grupo\n" +#~ " -m imprimir salida kaboom para depuración)\n" +#~ " -p continuar a pesar de errores de E/S\n" +#~ " -s sin señales (para depuración)\n" +#~ " -t prueba - leer la configuración y salir\n" +#~ " -u establecer userid para \n" +#~ " -v mensajes de usuario detallados\n" +#~ " -? imprimir este mensaje.\n" +#~ "\n" + +#~ msgid "No Storage resource defined in %s. Cannot continue.\n" +#~ msgstr "Recurso Storage no definido en %s. No se puede continuar.\n" + +#~ msgid "Only one Storage resource permitted in %s\n" +#~ msgstr "Sólo un recurso Storage permitido en %s\n" + +#~ msgid "No Director resource defined in %s. Cannot continue.\n" +#~ msgstr "Recurso Director no definido en %s. No se puede continuar.\n" + +#~ msgid "No Device resource defined in %s. Cannot continue.\n" +#~ msgstr "Recurso Device no definido en %s. No se puede continuar.\n" + +#~ msgid "No Messages resource defined in %s. Cannot continue.\n" +#~ msgstr "Recurso Mensajes no definido en %s. No se puede continuar.\n" + +#~ msgid "\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n" +#~ msgstr "" +#~ "Archivo \"TLS Certificate\" no definido para Storage \"%s\" en %s.\n" + +#~ msgid "\"TLS Key\" file not defined for Storage \"%s\" in %s.\n" +#~ msgstr "Archivo \"TLS Key\" no definido para Storage \"%s\" en %s.\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Storage \"%s\" in %s. At least one CA certificate store is required " +#~ "when using \"TLS Verify Peer\".\n" +#~ msgstr "" +#~ "Ni \"Certificado TLS CA \" o \"Directorio del Certificado TLS CA\" están " +#~ "definidos para el Storage \"%s\" en %s. Por lo menos un almacén de " +#~ "Certificados CA es necesario cuando se utiliza \"Verificar TLS Peer\".\n" + +#~ msgid "Tape block size (%d) not multiple of system size (%d)\n" +#~ msgstr "" +#~ "Tamaño de bloque de cinta (%d) no es múltiplo del tamaño de sistema (% " +#~ "d)\n" + +#~ msgid "Tape block size (%d) is not a power of 2\n" +#~ msgstr "Tamaño de bloque de cinta (%d) no es una potencia de 2\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "!!!! Warning large disk addressing disabled. boffset_t=%d should be 8 or " +#~ "more !!!!!\n" +#~ "\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "!!!! Advertencia, deshabilitado direccionamiento de discos largos. " +#~ "boffset_t=%d debe ser de 8 o más !!!!!\n" +#~ "\n" +#~ "\n" + +#~ msgid "32 bit printf/scanf problem. i=%d x32=%u y32=%u\n" +#~ msgstr "32 bit printf/scanf problema. i=%d x32=%u y32=%u\n" + +#~ msgid "64 bit printf/scanf problem. i=%d x64=%" +#~ msgstr "64 bit printf/scanf problema. i=%d x64=%" + +#~ msgid "Tape block granularity is %d bytes.\n" +#~ msgstr "Granularidad del bloque de cinta es %d bytes.\n" + +#~ msgid "No archive name specified.\n" +#~ msgstr "Nombre de archivo no especificado.\n" + +#~ msgid "Improper number of arguments specified.\n" +#~ msgstr "Número incorrecto de argumentos especificados.\n" + +#~ msgid "btape only works with tape storage.\n" +#~ msgstr "btape sólo funciona con almacenamiento en cinta.\n" + +#~ msgid "Total Volume bytes=%sB. Total Write rate = %sB/s\n" +#~ msgstr "Volumen total bytes=%sB. Tasa total de Escritura = %sB/s\n" + +#~ msgid "Volume bytes=%sB. Write rate = %sB/s\n" +#~ msgstr "Volumen bytes=%sB. Tasa de Escritura = %sB/s\n" + +#~ msgid "open device %s: OK\n" +#~ msgstr "abrir dispositivo %s: OK\n" + +#~ msgid "Enter Volume Name: " +#~ msgstr "Ingrese nombre de Volumen:" + +#~ msgid "Device open failed. ERR=%s\n" +#~ msgstr "Fallo al abrir dispositivo. ERR=%s\n" + +#~ msgid "Wrote Volume label for volume \"%s\".\n" +#~ msgstr "Escribe la etiqueta de Volumen para el volumen \"%s\".\n" + +#~ msgid "Volume has no label.\n" +#~ msgstr "Volumen no tiene etiqueta.\n" + +#~ msgid "Volume label read correctly.\n" +#~ msgstr "Etiqueta de volumen leída correctamente.\n" + +#~ msgid "I/O error on device: ERR=%s" +#~ msgstr "error de E/S en el dispositivo: ERR=%s" + +#, fuzzy +#~ msgid "Volume type error: ERR=%s\n" +#~ msgstr "Seek error: ERR=%s\n" + +#~ msgid "Volume name error\n" +#~ msgstr "Nombre de Volumen error\n" + +#~ msgid "Error creating label. ERR=%s" +#~ msgstr "Error creando etiqueta. ERR=%s" + +#~ msgid "Volume version error.\n" +#~ msgstr "Volumen versión error.\n" + +#~ msgid "Bad Volume label type.\n" +#~ msgstr "Malo tipo de etiqueta de volumen.\n" + +#~ msgid "Unknown error.\n" +#~ msgstr "Error desconocido.\n" + +#~ msgid "Bad status from load. ERR=%s\n" +#~ msgstr "Malo estado de carga. ERR=%s\n" + +#~ msgid "Loaded %s\n" +#~ msgstr "Cargado %s\n" + +#~ msgid "Bad status from rewind. ERR=%s\n" +#~ msgstr "Malo estado de rebobinado. ERR=%s\n" + +#~ msgid "Rewound %s\n" +#~ msgstr "Rebobinado %s\n" + +#~ msgid "Bad status from weof. ERR=%s\n" +#~ msgstr "Malo estado de weof. ERR=%s\n" + +#~ msgid "Wrote 1 EOF to %s\n" +#~ msgstr "Escribir 1 EOF para %s\n" + +#~ msgid "Wrote %d EOFs to %s\n" +#~ msgstr "Escribir %d EOFs para %s\n" + +#~ msgid "Moved to end of medium.\n" +#~ msgstr "Movido para el final de la media.\n" + +#~ msgid "Bad status from bsf. ERR=%s\n" +#~ msgstr "Mal estado desde BSF. ERR=%s\n" + +#~ msgid "Backspaced %d file%s.\n" +#~ msgstr "Backspaced %d archivo%s.\n" + +#~ msgid "Bad status from bsr. ERR=%s\n" +#~ msgstr "Malo estado de BSR. ERR=%s\n" + +#~ msgid "Backspaced %d record%s.\n" +#~ msgstr "Backspaced %d registro%s.\n" + +#~ msgid "Configured device capabilities:\n" +#~ msgstr "Capacidades del dispositivo configuradas:\n" + +#~ msgid "Device status:\n" +#~ msgstr "Estado del Dispositivo:\n" + +#~ msgid "Device parameters:\n" +#~ msgstr "Parámetros del Dispositivo:\n" + +#~ msgid "Status:\n" +#~ msgstr "Estados:\n" + +#~ msgid "" +#~ "Test writing larger and larger records.\n" +#~ "This is a torture test for records.\n" +#~ "I am going to write\n" +#~ "larger and larger records. It will stop when the record size\n" +#~ "plus the header exceeds the block size (by default about 64K)\n" +#~ msgstr "" +#~ "Prueba de escritura de registros cada vez mayores.\n" +#~ "Esta es una prueba de tortura para los registros.\n" +#~ "Voy a escribir\n" +#~ "registros cada vez mayores. Se detendrá cuando el tamaño de registro " +#~ "además de la cabecera excede el tamaño de bloque (por defecto sobre 64K)\n" + +#~ msgid "Do you want to continue? (y/n): " +#~ msgstr "Desea continuar? (y/n)" + +#~ msgid "Command aborted.\n" +#~ msgstr "Comando Abortado.\n" + +#~ msgid "Block %d i=%d\n" +#~ msgstr "Bloque %d i=%d\n" + +#~ msgid "Skipping read backwards test because BSR turned off.\n" +#~ msgstr "" +#~ "Saltar prueba de lectura hacia atrás, porque BSR esta desactivado.\n" + +#~ msgid "" +#~ "\n" +#~ "=== Write, backup, and re-read test ===\n" +#~ "\n" +#~ "I'm going to write three records and an EOF\n" +#~ "then backup over the EOF and re-read the last record.\n" +#~ "Bacula does this after writing the last block on the\n" +#~ "tape to verify that the block was written correctly.\n" +#~ "\n" +#~ "This is not an *essential* feature ...\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "=== Prueba de escritura, copia de seguridad y re-lectura ===\n" +#~ "\n" +#~ "Yo voy a escribir 3 registros y un EOF\n" +#~ "a continuación respaldar sobre el EOF y re-leer el ultimo registro.\n" +#~ " Bacula hace esto después de escribir el último bloque en la\n" +#~ "cinta para comprobar que el bloque ha sido escrito correctamente.\n" +#~ "\n" +#~ "Esta es una característica *esencial* ...\n" +#~ "\n" + +#~ msgid "Error writing record to block.\n" +#~ msgstr "Error al escribir registro al bloque.\n" + +#~ msgid "Error writing block to device.\n" +#~ msgstr "Error al escribir bloque al dispositivo.\n" + +#~ msgid "Wrote first record of %d bytes.\n" +#~ msgstr "Escribió primer registro de %d bytes.\n" + +#~ msgid "Wrote second record of %d bytes.\n" +#~ msgstr "Escribió segundo registro de %d bytes.\n" + +#~ msgid "Wrote third record of %d bytes.\n" +#~ msgstr "Escribió tercer registro de %d bytes.\n" + +#~ msgid "Backspace file failed! ERR=%s\n" +#~ msgstr "Fallo en retroceso de archivo! ERR=%s\n" + +#~ msgid "Backspaced over EOF OK.\n" +#~ msgstr "Retroceso sobre EOF OK.\n" + +#~ msgid "Backspace record failed! ERR=%s\n" +#~ msgstr "Fallo en retroceso de registro! ERR=%s\n" + +#~ msgid "Backspace record OK.\n" +#~ msgstr "Retroceso de registro OK.\n" + +#~ msgid "Read block failed! ERR=%s\n" +#~ msgstr "Lectura de bloque fallida! ERR=%s\n" + +#~ msgid "Bad data in record. Test failed!\n" +#~ msgstr "Datos erróneos en el registro. Prueba fallida!\n" + +#~ msgid "" +#~ "\n" +#~ "Block re-read correct. Test succeeded!\n" +#~ msgstr "" +#~ "\n" +#~ "Re-lectura de bloque correcta. Prueba Satisfactoria!\n" + +#~ msgid "" +#~ "=== End Write, backup, and re-read test ===\n" +#~ "\n" +#~ msgstr "" +#~ "=== Fin prueba de escritura, copia de seguridad, y re-lectura ===\n" +#~ "\n" + +#~ msgid "" +#~ "This is not terribly serious since Bacula only uses\n" +#~ "this function to verify the last block written to the\n" +#~ "tape. Bacula will skip the last block verification\n" +#~ "if you add:\n" +#~ "\n" +#~ "Backward Space Record = No\n" +#~ "\n" +#~ "to your Storage daemon's Device resource definition.\n" +#~ msgstr "" +#~ "Esto no es terriblemente grave, desde que sólo utilices Bacula\n" +#~ "esta función para verificar el último bloque por escrito en la\n" +#~ "cinta. Bacula saltará la verificación del último bloque\n" +#~ "si usted agrega:\n" +#~ "\n" +#~ "Backward Space Record = NO\n" +#~ "\n" +#~ " en la definición de los recursos de Dispositivos de su demonio Storage.\n" + +#~ msgid "Begin writing %i files of %sB with raw blocks of %u bytes.\n" +#~ msgstr "" +#~ "Inicia escribiendo %i archivos de %sB con raw bloques de %u bytes.\n" + +#~ msgid "Write failed at block %u. stat=%d ERR=%s\n" +#~ msgstr "Error al escritura en el bloque %u. stat=%d ERR=%s\n" + +#~ msgid "Begin writing %i files of %sB with blocks of %u bytes.\n" +#~ msgstr "Inicia escribiendo %i archivos de %sB con bloques de %u bytes.\n" + +#~ msgid "" +#~ "\n" +#~ "Error writing record to block.\n" +#~ msgstr "" +#~ "\n" +#~ "Error escribiendo registro al bloque.\n" + +#~ msgid "" +#~ "\n" +#~ "Error writing block to device.\n" +#~ msgstr "" +#~ "\n" +#~ "Error escribiendo bloque al dispositivo.\n" + +#~ msgid "The file_size is too big, stop this test with Ctrl-c.\n" +#~ msgstr "" +#~ "El tamaño_archivo es demasiado grande, detenga esta prueba con Ctrl-c.\n" + +#~ msgid "Test with zero data, should give the maximum throughput.\n" +#~ msgstr "Prueba con cero de datos, debe dar el máximo rendimiento.\n" + +#~ msgid "Test with random data, should give the minimum throughput.\n" +#~ msgstr "Prueba con datos aleatorios, deben dar el rendimiento mínimo.\n" + +#~ msgid "Test with zero data and bacula block structure.\n" +#~ msgstr "Prueba con cero de datos y estructura del bloque de Bacula.\n" + +#~ msgid "" +#~ "\n" +#~ "=== Write, rewind, and re-read test ===\n" +#~ "\n" +#~ "I'm going to write %d records and an EOF\n" +#~ "then write %d records and an EOF, then rewind,\n" +#~ "and re-read the data to verify that it is correct.\n" +#~ "\n" +#~ "This is an *essential* feature ...\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "=== Probar Escritura, Rebobinado y Re-lectura === \n" +#~ "\n" +#~ "Voy a escribir %d registros y un EOF\n" +#~ "a continuación escribiré %d registros y un EOF, a continuación, " +#~ "rebobinar,\n" +#~ "y re-leer los datos para verificar que son correctos.\n" +#~ "\n" +#~ "Este es una característica *esencial*...\n" +#~ "\n" + +#~ msgid "Wrote %d blocks of %d bytes.\n" +#~ msgstr "Escribió %d bloques de %d bytes.\n" + +#~ msgid "Rewind OK.\n" +#~ msgstr "Rebobinar OK.\n" + +#~ msgid "Got EOF on tape.\n" +#~ msgstr "Obtuvo EOF en la cinta.\n" + +#~ msgid "Read block %d failed! ERR=%s\n" +#~ msgstr "Fallo al leer bloque %d! ERR=%s\n" + +#~ msgid "Read record failed. Block %d! ERR=%s\n" +#~ msgstr "Fallo al leer registro. Bloque %d! ERR =%s\n" + +#~ msgid "Bad data in record. Expected %d, got %d at byte %d. Test failed!\n" +#~ msgstr "" +#~ "Datos incorrectos en el registro. Esperaba %d, obtuvo %d en byte %d. " +#~ "Prueba Fallida!\n" + +#~ msgid "%d blocks re-read correctly.\n" +#~ msgstr "%d bloques releídos correctamente.\n" + +#~ msgid "" +#~ "=== Test Succeeded. End Write, rewind, and re-read test ===\n" +#~ "\n" +#~ msgstr "" +#~ "=== Prueba Satisfactoria. Fin prueba de Escritura, Rebobinado y Re-" +#~ "lectura ===\n" +#~ "\n" + +#~ msgid "Block position test\n" +#~ msgstr "Prueba de posición del bloque\n" + +#~ msgid "Reposition to file:block %d:%d\n" +#~ msgstr "Reposición para archivo:bloque %d:%d\n" + +#~ msgid "Reposition error.\n" +#~ msgstr "Error de reposición.\n" + +#~ msgid "" +#~ "Read block %d failed! file=%d blk=%d. ERR=%s\n" +#~ "\n" +#~ msgstr "" +#~ "Fallo al leer bloque %d! archivo=%d blk=%d. ERR=%s\n" +#~ "\n" + +#~ msgid "" +#~ "This may be because the tape drive block size is not\n" +#~ " set to variable blocking as normally used by Bacula.\n" +#~ " Please see the Tape Testing chapter in the manual and \n" +#~ " look for using mt with defblksize and setoptions\n" +#~ "If your tape drive block size is correct, then perhaps\n" +#~ " your SCSI driver is *really* stupid and does not\n" +#~ " correctly report the file:block after a FSF. In this\n" +#~ " case try setting:\n" +#~ " Fast Forward Space File = no\n" +#~ " in your Device resource.\n" +#~ msgstr "" +#~ "Esto puede ser porque el tamaño del bloque de la\n" +#~ "unidad de cinta no está establecido en la variable\n" +#~ "de bloqueo como utilizado normalmente por Bacula.\n" +#~ "Consulte el capítulo Probando Cintas en el manual y\n" +#~ "busque por usando mt con defblksize y setoptions.\n" +#~ "Si el tamaño del bloque de la unidad de cinta es correcto,\n" +#~ "entonces quizá el controlador SCSI es *realmente* estúpido\n" +#~ "y no informa correctamente el archivo:bloque después de un FSF.\n" +#~ "En este caso, intente configurar:\n" +#~ " Fast Forward Space File = no\n" +#~ "en su recurso de Dispositivos.\n" + +#~ msgid "Read record failed! ERR=%s\n" +#~ msgstr "Fallo al leer registro! ERR=%s\n" + +#~ msgid "Block %d re-read correctly.\n" +#~ msgstr "Bloque %d re-leído correctamente.\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "=== Append files test ===\n" +#~ "\n" +#~ "This test is essential to Bacula.\n" +#~ "\n" +#~ "I'm going to write one record in file 0,\n" +#~ " two records in file 1,\n" +#~ " and three records in file 2\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "=== Prueba de Añadir Archivos===\n" +#~ "\n" +#~ "Esta prueba es esencial para Bacula.\n" +#~ "\n" +#~ "Voy a escribir un registro en el archivo 0,\n" +#~ " dos registros en el archivo 1,\n" +#~ " y tres registros en el archivo de 2\n" +#~ "\n" + +#~ msgid "Now moving to end of medium.\n" +#~ msgstr "Moviendo ahora a final de medio.\n" + +#~ msgid "We should be in file 3. I am at file %d. %s\n" +#~ msgstr "Deberíamos estar en el archivo 3. Estoy en el archivo %d. %s\n" + +#~ msgid "This is correct!" +#~ msgstr "Esto es correcto!" + +#~ msgid "This is NOT correct!!!!" +#~ msgstr "Esto NO es correcto!" + +#~ msgid "" +#~ "\n" +#~ "Now the important part, I am going to attempt to append to the tape.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Ahora la parte importante, voy a tratar de anexar a la cinta.\n" +#~ "\n" + +#~ msgid "" +#~ "Done appending, there should be no I/O errors\n" +#~ "\n" +#~ msgstr "" +#~ "Añadiendo hecho, no debería haber ningún error de E/S\n" +#~ "\n" + +#~ msgid "Doing Bacula scan of blocks:\n" +#~ msgstr "Hacer Bacula exploración de los bloques:\n" + +#~ msgid "End scanning the tape.\n" +#~ msgstr "Fin del escaneo de la cinta.\n" + +#~ msgid "We should be in file 4. I am at file %d. %s\n" +#~ msgstr "Deberíamos estar en el archivo 4. Estoy en el archivo %d. %s\n" + +#~ msgid "" +#~ "\n" +#~ "Autochanger enabled, but no name or no command device specified.\n" +#~ msgstr "" +#~ "\n" +#~ "Auto-cambiador habilitado, pero ningún nombre o comando de dispositivo " +#~ "especificado.\n" + +#~ msgid "" +#~ "\n" +#~ "Ah, I see you have an autochanger configured.\n" +#~ "To test the autochanger you must have a blank tape\n" +#~ " that I can write on in Slot 1.\n" +#~ msgstr "" +#~ "\n" +#~ "Ah, veo que tienen un auto-cargador configurado.\n" +#~ "Para probar el auto-cargador debe tener una cinta en blanco\n" +#~ " que yo pueda escribir en la Ranura 1.\n" + +#~ msgid "" +#~ "\n" +#~ "Do you wish to continue with the Autochanger test? (y/n): " +#~ msgstr "" +#~ "\n" +#~ "¿Desea continuar con la prueba del auto-cambiador? (y/n):" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "=== Autochanger test ===\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "=== Prueba Auto-cambiador ===\n" +#~ "\n" + +#~ msgid "3301 Issuing autochanger \"loaded\" command.\n" +#~ msgstr "3301 Emitiendo comando auto-cambiador \"cargado\".\n" + +#~ msgid "3991 Bad autochanger command: %s\n" +#~ msgstr "3991 Malo comando cargador: %s\n" + +#~ msgid "3991 result=\"%s\": ERR=%s\n" +#~ msgstr "3991 resultado=\"%s\": ERR=%s\n" + +#~ msgid "Slot %d loaded. I am going to unload it.\n" +#~ msgstr "Ranura %d cargada. Voy a descargarla.\n" + +#~ msgid "Nothing loaded in the drive. OK.\n" +#~ msgstr "Nada cargado en la unidad. OK.\n" + +#~ msgid "3302 Issuing autochanger \"unload %d %d\" command.\n" +#~ msgstr "3302 Emitiendo comando auto-cambiador \"descargar %d %d\".\n" + +#~ msgid "unload status=%s %d\n" +#~ msgstr "estado descargar=%s %d\n" + +#~ msgid "Bad" +#~ msgstr "Malo" + +#~ msgid "3992 Bad autochanger command: %s\n" +#~ msgstr "3992 Malo comando cargador: %s\n" + +#~ msgid "3992 result=\"%s\": ERR=%s\n" +#~ msgstr "3992 resultado=\"%s\": ERR=%s\n" + +#~ msgid "3303 Issuing autochanger \"load %d %d\" command.\n" +#~ msgstr "3303 Emitiendo comando auto-cambiador \"cargar %d %d\".\n" + +#~ msgid "3303 Autochanger \"load %d %d\" status is OK.\n" +#~ msgstr "3303 Estado auto-cambiador \"carga %d %d\" está OK.\n" + +#~ msgid "3993 Bad autochanger command: %s\n" +#~ msgstr "3993 Malo comando cargador: %s\n" + +#~ msgid "3993 result=\"%s\": ERR=%s\n" +#~ msgstr "3993 resultado=\"%s\": ERR=%s\n" + +#~ msgid "" +#~ "\n" +#~ "The test failed, probably because you need to put\n" +#~ "a longer sleep time in the mtx-script in the load) case.\n" +#~ "Adding a 30 second sleep and trying again ...\n" +#~ msgstr "" +#~ "\n" +#~ "Fallo la prueba, probablemente porque es necesario poner\n" +#~ "un mayor tiempo de espera en su mtx-script en la clausula load).\n" +#~ "Añadiendo 30 segundos de espera y volviendo a intentarlo...\n" + +#~ msgid "Wrote EOF to %s\n" +#~ msgstr "Escribió EOF para %s\n" + +#~ msgid "" +#~ "\n" +#~ "The test worked this time. Please add:\n" +#~ "\n" +#~ " sleep %d\n" +#~ "\n" +#~ "to your mtx-changer script in the load) case.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "La prueba funcionó esta vez. Por favor, añada:\n" +#~ "\n" +#~ " sleep %d\n" +#~ "\n" +#~ "en su script mtx-cambiador dentro de de la clausula load).\n" +#~ "\n" + +#~ msgid "" +#~ "\n" +#~ "The test autochanger worked!!\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "La prueba del auto-cargador funcionó!!\n" +#~ "\n" + +#~ msgid "You must correct this error or the Autochanger will not work.\n" +#~ msgstr "Usted debe corregir este error o la Auto-cambiador no funcionará.\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "=== Forward space files test ===\n" +#~ "\n" +#~ "This test is essential to Bacula.\n" +#~ "\n" +#~ "I'm going to write five files then test forward spacing\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "=== Probar espaciar archivos hacia adelante ===\n" +#~ "\n" +#~ "Esta prueba es esencial para Bacula.\n" +#~ "\n" +#~ "Voy a escribir cinco archivo, a continuación probar espaciado hacia " +#~ "adelante\n" +#~ "\n" + +#~ msgid "Now forward spacing 1 file.\n" +#~ msgstr "Ahora espaciando 1 archivo hacia adelante.\n" + +#~ msgid "Bad status from fsr. ERR=%s\n" +#~ msgstr "Malo estado desde FSR. ERR=%s\n" + +#~ msgid "We should be in file 1. I am at file %d. %s\n" +#~ msgstr "Debemos estar en el archivo 1. Estoy en el archivo %d. %s\n" + +#~ msgid "Now forward spacing 2 files.\n" +#~ msgstr "Ahora espaciando 2 archivo hacia adelante.\n" + +#~ msgid "Now forward spacing 4 files.\n" +#~ msgstr "Ahora espaciando 4 archivo hacia adelante.\n" + +#~ msgid "" +#~ "The test worked this time. Please add:\n" +#~ "\n" +#~ " Fast Forward Space File = no\n" +#~ "\n" +#~ "to your Device resource for this drive.\n" +#~ msgstr "" +#~ "La prueba funcionó esta vez. Por favor, añadir:\n" +#~ "\n" +#~ " Fast Forward Space File = no\n" +#~ "\n" +#~ "a su recurso Device para esta unidad.\n" + +#~ msgid "Now forward spacing 1 more file.\n" +#~ msgstr "Ahora espaciando un archivo mas hacia adelante.\n" + +#~ msgid "We should be in file 5. I am at file %d. %s\n" +#~ msgstr "Debemos estar en el archivo 5. Estoy en el archivo %d. %s\n" + +#~ msgid "" +#~ "\n" +#~ "=== End Forward space files test ===\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "=== Finalizo prueba de Espaciar archivos hacia adelante ===\n" +#~ "\n" + +#~ msgid "" +#~ "\n" +#~ "The forward space file test failed.\n" +#~ msgstr "" +#~ "\n" +#~ "Fallida la prueba de espaciar archivos hacia adelante.\n" + +#~ msgid "" +#~ "You have Fast Forward Space File enabled.\n" +#~ "I am turning it off then retrying the test.\n" +#~ msgstr "" +#~ "usted tiene habilitado Espaciar Archivos Hacia Adelante Rápido.\n" +#~ "Voy deshabilitarlo y luego volver a intentar la prueba.\n" + +#~ msgid "" +#~ "You must correct this error or Bacula will not work.\n" +#~ "Some systems, e.g. OpenBSD, require you to set\n" +#~ " Use MTIOCGET= no\n" +#~ "in your device resource. Use with caution.\n" +#~ msgstr "" +#~ "Usted debe corregir este error o Bacula no funcionará.\n" +#~ "Algunos Sistemas, por ejemplo OpenBSD, requiere que usted establezca\n" +#~ "Use MTIOCGET = no\n" +#~ "en su recurso de dispositivo. Utilizar con precaución.\n" + +#~ msgid "" +#~ "\n" +#~ "Append test failed. Attempting again.\n" +#~ "Setting \"Hardware End of Medium = no\n" +#~ " and \"Fast Forward Space File = no\n" +#~ "and retrying append test.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Prueba de Añadir fallida. Intentando de nuevo.\n" +#~ "Configurando \"Hardware End of Medium = no\n" +#~ " y \"Fast Forward Space File = no\n" +#~ "y volviendo a intentar la prueba de añadir.\n" +#~ "\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "It looks like the test worked this time, please add:\n" +#~ "\n" +#~ " Hardware End of Medium = No\n" +#~ "\n" +#~ " Fast Forward Space File = No\n" +#~ "to your Device resource in the Storage conf file.\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "Parece que la prueba funcionó esta vez, por favor, añadir:\n" +#~ "\n" +#~ " Hardware End of Medium = No\n" +#~ "\n" +#~ " Fast Forward Space File = No\n" +#~ "a su recurso de Device en el archivo configuración del Storage.\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "That appears *NOT* to have corrected the problem.\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "Parece que *NO* se ha corregido el problema.\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "It looks like the append failed. Attempting again.\n" +#~ "Setting \"BSF at EOM = yes\" and retrying append test.\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "Parece que ha fallado el añadir.Intentando de nuevo.\n" +#~ "Configurando \"BSF en EOM = yes\" y reintentando probar añadir.\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "It looks like the test worked this time, please add:\n" +#~ "\n" +#~ " Hardware End of Medium = No\n" +#~ " Fast Forward Space File = No\n" +#~ " BSF at EOM = yes\n" +#~ "\n" +#~ "to your Device resource in the Storage conf file.\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "Parece que la prueba funcionó esta vez, por favor agregue:\n" +#~ "\n" +#~ "Hardware End of Medium = No\n" +#~ " Fast Forward Space File = No\n" +#~ " BSF at EOM = yes\n" +#~ "\n" +#~ "en su recurso Device en el archivo de configuración del Storage.\n" + +#~ msgid "" +#~ "\n" +#~ "Append test failed.\n" +#~ "\n" +#~ "\n" +#~ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +#~ "Unable to correct the problem. You MUST fix this\n" +#~ "problem before Bacula can use your tape drive correctly\n" +#~ "\n" +#~ "Perhaps running Bacula in fixed block mode will work.\n" +#~ "Do so by setting:\n" +#~ "\n" +#~ "Minimum Block Size = nnn\n" +#~ "Maximum Block Size = nnn\n" +#~ "\n" +#~ "in your Storage daemon's Device definition.\n" +#~ "nnn must match your tape driver's block size, which\n" +#~ "can be determined by reading your tape manufacturers\n" +#~ "information, and the information on your kernel dirver.\n" +#~ "Fixed block sizes, however, are not normally an ideal solution.\n" +#~ "\n" +#~ "Some systems, e.g. OpenBSD, require you to set\n" +#~ " Use MTIOCGET= no\n" +#~ "in your device resource. Use with caution.\n" +#~ msgstr "" +#~ "\n" +#~ "Fallo prueba Anexar.\n" +#~ "\n" +#~ "\n" +#~ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +#~ "No se puede corregir el problema. Usted DEBE corregir este\n" +#~ "problema antes de que Bacula pueda utilizar su unidad de cinta " +#~ "correctamente \n" +#~ "\n" +#~ "Tal vez ejecutando Bacula en modo de bloque fijo va a funcionar.\n" +#~ "Hacerlo estableciendo:\n" +#~ "\n" +#~ "Minimum Block Size = nnn\n" +#~ "Maximum Block Size = nnn\n" +#~ "\n" +#~ "en la definición de dispositivo del demonio Storage.\n" +#~ "nnn debe coincidir con el tamaño de bloque controlador de su cinta, que\n" +#~ "puede determinarse mediante lectura de información del fabricante de la " +#~ "cinta, y la información en su driver del núcleo.\n" +#~ "Tamaños de bloque fijo, sin embargo, normalmente no son una solución " +#~ "ideal.\n" +#~ "\n" +#~ "Algunos sistemas, por ejemplo OpenBSD, exigen que se establezcan\n" +#~ "Use MTIOCGET= no\n" +#~ "en su recurso de dispositivo. Utilice con precaución.\n" + +#~ msgid "" +#~ "\n" +#~ "The above Bacula scan should have output identical to what follows.\n" +#~ "Please double check it ...\n" +#~ "=== Sample correct output ===\n" +#~ "1 block of 64448 bytes in file 1\n" +#~ "End of File mark.\n" +#~ "2 blocks of 64448 bytes in file 2\n" +#~ "End of File mark.\n" +#~ "3 blocks of 64448 bytes in file 3\n" +#~ "End of File mark.\n" +#~ "1 block of 64448 bytes in file 4\n" +#~ "End of File mark.\n" +#~ "Total files=4, blocks=7, bytes = 451,136\n" +#~ "=== End sample correct output ===\n" +#~ "\n" +#~ "If the above scan output is not identical to the\n" +#~ "sample output, you MUST correct the problem\n" +#~ "or Bacula will not be able to write multiple Jobs to \n" +#~ "the tape.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "La anterior Bacula análisis debe tener una salida idéntica a lo que " +#~ "sigue.\n" +#~ "Por favor, verifique bien ...\n" +#~ "Ejemplo de la salida correcta === n1 bloque de 64448 bytes en el " +#~ "archivo 1\n" +#~ "Fin de la marca de archivo.\n" +#~ "2 bloques de 64.448 bytes en el archivo 2\n" +#~ "Fin de la marca de archivo.\n" +#~ "3 bloques de 64448 bytes en el archivo 3\n" +#~ "Fin de la marca de archivo.\n" +#~ "1 bloque de 64448 bytes en el archivo 4\n" +#~ "Fin de la marca de archivo.\n" +#~ "Total de archivos = 4, bloques = 7, bytes = 451.136\n" +#~ "===Fin ejemplo de la salida correcta===\n" +#~ "\n" +#~ "Si la anterior salida de escaneo no es idéntica a la \n" +#~ "salida de ejemplo, usted DEBE corregir el problema\n" +#~ "o Bacula no será capaz de escribir varios Jobs en\n" +#~ "la cinta.\n" +#~ "\n" + +#~ msgid "Bad status from fsf. ERR=%s\n" +#~ msgstr "Malo estado desde FSF. ERR=%s\n" + +#~ msgid "Forward spaced 1 file.\n" +#~ msgstr "Espaciado 1 archivo hacia adelante.\n" + +#~ msgid "Forward spaced %d files.\n" +#~ msgstr "Espaciados %d archivos hacia adelante.\n" + +#~ msgid "Forward spaced 1 record.\n" +#~ msgstr "Espaciado 1 registro hacia adelante.\n" + +#~ msgid "Forward spaced %d records.\n" +#~ msgstr "Espaciados %d registros hacia adelante.\n" + +#~ msgid "Wrote one record of %d bytes.\n" +#~ msgstr "Escribió un registro de %d bytes.\n" + +#~ msgid "Wrote block to device.\n" +#~ msgstr "Escribió bloques al dispositivo.\n" + +#~ msgid "Enter length to read: " +#~ msgstr "Introduzca la longitud para leer:" + +#~ msgid "Bad length entered, using default of 1024 bytes.\n" +#~ msgstr "Mala longitud introducida, utilizando 1024 bytes por defecto.\n" + +#~ msgid "Read of %d bytes gives stat=%d. ERR=%s\n" +#~ msgstr "Lectura de %d bytes obtuvo stat=%d. ERR=%s\n" + +#~ msgid "End of tape\n" +#~ msgstr "Fin de la cinta\n" + +#~ msgid "Starting scan at file %u\n" +#~ msgstr "Iniciando escaneo en el archivo %u\n" + +#~ msgid "read error on %s. ERR=%s.\n" +#~ msgstr "error de lectura en %s. ERR=%s.\n" + +#~ msgid "Bad status from read %d. ERR=%s\n" +#~ msgstr "Malo estado desde lectura %d. ERR=%s\n" + +#~ msgid "1 block of %d bytes in file %d\n" +#~ msgstr "1 bloque de %d bytes en el archivo %d\n" + +#~ msgid "%d blocks of %d bytes in file %d\n" +#~ msgstr "%d bloques de %d bytes en archivo %d\n" + +#~ msgid "End of File mark.\n" +#~ msgstr "Fin de la marca de archivo.\n" + +#~ msgid "Total files=%d, blocks=%d, bytes = %s\n" +#~ msgstr "Total de archivos=%d, bloques=%d, bytes = %s\n" + +#~ msgid "Short block read.\n" +#~ msgstr "Leer bloque corto.\n" + +#~ msgid "Error reading block. ERR=%s\n" +#~ msgstr "Error leyendo el bloque. ERR=%s\n" + +#~ msgid "" +#~ "Block=%u file,blk=%u,%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=" +#~ "%s rlen=%d\n" +#~ msgstr "" +#~ "Bloque=%u archivo,blk=%u,%u blen=%u Primero rec FI=%s SessId=%u SessTim=" +#~ "%u Strm=%s rlen=%d\n" + +#~ msgid "Device status: %u. ERR=%s\n" +#~ msgstr "Estado del dispositivo: %u. ERR=%s\n" + +#~ msgid "" +#~ "\n" +#~ "This command simulates Bacula writing to a tape.\n" +#~ "It requires either one or two blank tapes, which it\n" +#~ "will label and write.\n" +#~ "\n" +#~ "If you have an autochanger configured, it will use\n" +#~ "the tapes that are in slots 1 and 2, otherwise, you will\n" +#~ "be prompted to insert the tapes when necessary.\n" +#~ "\n" +#~ "It will print a status approximately\n" +#~ "every 322 MB, and write an EOF every %s. If you have\n" +#~ "selected the simple test option, after writing the first tape\n" +#~ "it will rewind it and re-read the last block written.\n" +#~ "\n" +#~ "If you have selected the multiple tape test, when the first tape\n" +#~ "fills, it will ask for a second, and after writing a few more \n" +#~ "blocks, it will stop. Then it will begin re-reading the\n" +#~ "two tapes.\n" +#~ "\n" +#~ "This may take a long time -- hours! ...\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Este comando simula Bacula escrito en una cinta.\n" +#~ "Se requiere de una o dos cintas en blanco, que se pueda etiquetar y " +#~ "escribir.\n" +#~ "\n" +#~ "Si usted tiene un auto-cargador configurado, se usará\n" +#~ "las cintas que se encuentran en las ranuras 1 y 2, de lo contrario,\n" +#~ "se le pedirá que inserte las cintas cuando sea necesario.\n" +#~ "\n" +#~ "Se imprimirá un estado a aproximadamente cada 322 MB, y escribirá un EOF " +#~ "cada %s.Si ha seleccionado\n" +#~ "la opción de prueba simple, después de escribir la primera cinta\n" +#~ "se rebobinará y volverá a leer el último bloque escrito.\n" +#~ "\n" +#~ "Si ha seleccionado\n" +#~ "la prueba de la cinta múltiple, cuando la primera cinta se llena, se le " +#~ "preguntará por la segunda, y después de escribir unos pocos\n" +#~ "bloques más, se detendrá. Entonces comenzará a releer las\n" +#~ "dos cintas.\n" +#~ "\n" +#~ "Esto puede tomar mucho tiempo - horas! ...\n" + +#~ msgid "" +#~ "Do you want to run the simplified test (s) with one tape\n" +#~ "or the complete multiple tape (m) test: (s/m) " +#~ msgstr "" +#~ "¿Desea ejecutar la prueba simplificada (s) con una cinta\n" +#~ "o la prueba completa con múltiples (m) cintas?: (s/m)" + +#~ msgid "Simple test (single tape) selected.\n" +#~ msgstr "Prueba simple (una sola cinta) seleccionada.\n" + +#~ msgid "Multiple tape test selected.\n" +#~ msgstr "Múltiples cintas de prueba seleccionados.\n" + +#~ msgid "Wrote Start of Session label.\n" +#~ msgstr "Escribe la etiqueta de Inicio de Sesión.\n" + +#~ msgid "%s Begin writing Bacula records to tape ...\n" +#~ msgstr "%s Empezar a escribir registros Bacula en cinta ...\n" + +#~ msgid "%s Begin writing Bacula records to first tape ...\n" +#~ msgstr "%s Empezar a escribir registros Bacula en la primera cinta ...\n" + +#~ msgid "Flush block failed.\n" +#~ msgstr "Fallo Flush bloque.\n" + +#~ msgid "Wrote block=%u, file,blk=%u,%u VolBytes=%s rate=%sB/s\n" +#~ msgstr "Escribió bloque=%u, archivo,blk=%u,%u VolBytes=%s tasa=%sB/s\n" + +#~ msgid "%s Flush block, write EOF\n" +#~ msgstr "%s Flush bloque, escribir EOF\n" + +#~ msgid "Wrote 1000 blocks on second tape. Done.\n" +#~ msgstr "Escribió 1000 bloques en segunda cinta. Hecho.\n" + +#~ msgid "Not OK\n" +#~ msgstr "No OK\n" + +#~ msgid "Job canceled.\n" +#~ msgstr "Job cancelado.\n" + +#~ msgid "Set ok=false after write_block_to_device.\n" +#~ msgstr "Establecer ok=false después de write_block_to_device.\n" + +#~ msgid "Wrote End of Session label.\n" +#~ msgstr "Escribió etiqueta de Fin de Sesión.\n" + +#~ msgid "Wrote state file last_block_num1=%d last_block_num2=%d\n" +#~ msgstr "Escribió archivo de estado last_block_num1=%d last_block_num2=%d\n" + +#~ msgid "Could not create state file: %s ERR=%s\n" +#~ msgstr "No se puede crear archivo de estado: %s ERR=%s\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "%s Done filling tape at %d:%d. Now beginning re-read of tape ...\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "%s Hecho llenado de cinta en %d:%d. Ahora empezando a releer la " +#~ "cinta ...\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "%s Done filling tapes at %d:%d. Now beginning re-read of first tape ...\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "%s Hecho llenado de cintas en %d:%d. Ahora empezando a releer la primera " +#~ "cinta ...\n" + +#~ msgid "do_unfill failed.\n" +#~ msgstr "do_unfill fallido.\n" + +#~ msgid "%s: Error during test.\n" +#~ msgstr "%s: Error durante la prueba.\n" + +#~ msgid "" +#~ "\n" +#~ "The state file level has changed. You must redo\n" +#~ "the fill command.\n" +#~ msgstr "" +#~ "\n" +#~ "El estado del archivo de nivel de ha cambiado. Usted debe rehacer\n" +#~ "el comando llenar.\n" + +#~ msgid "" +#~ "\n" +#~ "Could not find the state file: %s ERR=%s\n" +#~ "You must redo the fill command.\n" +#~ msgstr "" +#~ "\n" +#~ "No se pudo encontrar el archivo de estado: %s ERR=%s\n" +#~ "Usted debe rehacer el comando llenar.\n" + +#~ msgid "Mount first tape. Press enter when ready: " +#~ msgstr "Monte primera cinta. Cuando esté listo, presione ENTER:" + +#~ msgid "Rewinding.\n" +#~ msgstr "Rebobinado.\n" + +#~ msgid "Reading the first 10000 records from %u:%u.\n" +#~ msgstr "Leyendo los primeros 10.000 registros desde %u:%u.\n" + +#~ msgid "Reposition from %u:%u to %u:%u\n" +#~ msgstr "Reposición desde %u:%u para %u:%u\n" + +#~ msgid "Reposition error. ERR=%s\n" +#~ msgstr "Error de reposición. ERR=%s\n" + +#~ msgid "Reading block %u.\n" +#~ msgstr "Leyendo bloque %u.\n" + +#~ msgid "Error reading block: ERR=%s\n" +#~ msgstr "Error leyendo bloque: ERR=%s.\n" + +#~ msgid "" +#~ "\n" +#~ "The last block on the tape matches. Test succeeded.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "El último bloque de la cinta concuerdan. Prueba Satisfactoria.\n" +#~ "\n" + +#~ msgid "" +#~ "\n" +#~ "The last block of the first tape matches.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "El último bloque de la primera cinta concuerdan.\n" +#~ "\n" + +#~ msgid "Mount second tape. Press enter when ready: " +#~ msgstr "Monte la segunda cinta. Cuando esté listo, presione ENTER:" + +#~ msgid "Reposition from %u:%u to 0:1\n" +#~ msgstr "Reposición desde %u:%u para 0:1\n" + +#~ msgid "Reading block %d.\n" +#~ msgstr "Leyendo bloque %d.\n" + +#~ msgid "" +#~ "\n" +#~ "The first block on the second tape matches.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "El primer bloque de la segunda cinta concuerdan.\n" +#~ "\n" + +#~ msgid "" +#~ "\n" +#~ "The last block on the second tape matches. Test succeeded.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "El último bloque de la segunda cinta concuerdan. Prueba Satisfactoria.\n" +#~ "\n" + +#~ msgid "10000 records read now at %d:%d\n" +#~ msgstr "Leer 10000 registros ahora desde %d:%d\n" + +#~ msgid "Last block written" +#~ msgstr "Ultimo bloque escrito" + +#~ msgid "Block read back" +#~ msgstr "Bloque que leer" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "The blocks differ at byte %u\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "Los bloques difieren en %u byte\n" + +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "!!!! The last block written and the block\n" +#~ "that was read back differ. The test FAILED !!!!\n" +#~ "This must be corrected before you use Bacula\n" +#~ "to write multi-tape Volumes.!!!!\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "!!!! El último bloque escrito y el bloque\n" +#~ "que se vuelve a leer difieren. La prueba FALLO !!!!\n" +#~ "Esto debe ser corregido antes de utilizar Bacula\n" +#~ "para escribir volúmenes multi-cinta !!!!\n" + +#~ msgid "Last block at: %u:%u this_dev_block_num=%d\n" +#~ msgstr "Último bloque en: %u:%u this_dev_block_num=%d\n" + +#~ msgid "Block not written: FileIndex=%u blk_block=%u Size=%u\n" +#~ msgstr "Bloque no está escrito: FileIndex=%u blk_block=%u Tamaño=%u\n" + +#~ msgid "Block not written" +#~ msgstr "Bloque no escrito" + +#~ msgid "End of tape %d:%d. Volume Bytes=%s. Write rate = %sB/s\n" +#~ msgstr "" +#~ "Fin de la cinta %d:%d. Volumen Bytes=%s. Velocidad de Escritura = %sB/s\n" + +#~ msgid "Test writing blocks of 64512 bytes to tape.\n" +#~ msgstr "Prueba de escritura de bloques de 64512 bytes a cinta.\n" + +#~ msgid "How many blocks do you want to write? (1000): " +#~ msgstr "¿Cuántos bloques usted quiere escribir? (1000):" + +#~ msgid "Begin writing %d Bacula blocks to tape ...\n" +#~ msgstr "Empezando a escribir %d bloques Bacula en la cinta ...\n" + +#~ msgid "Begin writing raw blocks of %u bytes.\n" +#~ msgstr "Empezando a escribir bloques raw de %u bytes.\n" + +#~ msgid "test autochanger" +#~ msgstr "prueba Autochanger" + +#~ msgid "backspace file" +#~ msgstr "archivo de retroceso" + +#~ msgid "backspace record" +#~ msgstr "registro de retroceso" + +#~ msgid "list device capabilities" +#~ msgstr "lista de las capacidades del dispositivo" + +#~ msgid "clear tape errors" +#~ msgstr "Errores Cinta de Limpieza" + +#~ msgid "go to end of Bacula data for append" +#~ msgstr "ir al final de los datos de Bacula para añadir" + +#~ msgid "go to the physical end of medium" +#~ msgstr "ir al final del medio físico" + +#~ msgid "fill tape, write onto second volume" +#~ msgstr "llenar cinta, escribir en segundo volumen" + +#~ msgid "read filled tape" +#~ msgstr "leer cinta llena" + +#~ msgid "forward space a file" +#~ msgstr "espaciar un archivo hacia adelante" + +#~ msgid "forward space a record" +#~ msgstr "espaciar un registro hacia adelante" + +#~ msgid "print this command" +#~ msgstr "imprimir este comando" + +#~ msgid "write a Bacula label to the tape" +#~ msgstr "escribir una etiqueta Bacula en la cinta" + +#~ msgid "load a tape" +#~ msgstr "cargar una cinta" + +#~ msgid "quit btape" +#~ msgstr "salir btape" + +#~ msgid "use write() to fill tape" +#~ msgstr "usar write() para llenar la cinta" + +#~ msgid "read and print the Bacula tape label" +#~ msgstr "leer e imprimir la etiqueta Bacula de la cinta" + +#~ msgid "test record handling functions" +#~ msgstr "prueba de manejo de registro de funciones" + +#~ msgid "rewind the tape" +#~ msgstr "rebobinar la cinta" + +#~ msgid "read() tape block by block to EOT and report" +#~ msgstr "leer() cinta bloque por bloque para EOT y reportar" + +#~ msgid "Bacula read block by block to EOT and report" +#~ msgstr "Bacula leer bloque por bloque para EOT y reportar" + +#~ msgid "" +#~ "[file_size=n(GB)|nb_file=3|skip_zero|skip_random|skip_raw|skip_block] " +#~ "report drive speed" +#~ msgstr "" +#~ "[file_size=n(GB)|nb_file=3|skip_zero|skip_random|skip_raw|skip_block] " +#~ "informe de velocidad de la unidad" + +#~ msgid "print tape status" +#~ msgstr "imprimir estado de la cinta" + +#~ msgid "General test Bacula tape functions" +#~ msgstr "Prueba general de las funciones de cinta Bacula" + +#~ msgid "write an EOF on the tape" +#~ msgstr "escribir un EOF en la cinta" + +#~ msgid "write a single Bacula block" +#~ msgstr "escribir un único bloque de Bacula" + +#~ msgid "read a single record" +#~ msgstr "leer un solo registro" + +#~ msgid "read a single Bacula block" +#~ msgstr "leer un único bloque de Bacula" + +#~ msgid "quick fill command" +#~ msgstr "comando de llenado rápido" + +#~ msgid "\"%s\" is an invalid command\n" +#~ msgstr "\"%s\" es un comando inválido\n" + +#~ msgid "Interactive commands:\n" +#~ msgstr "Comandos interactivos:\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: btape \n" +#~ " -b specify bootstrap file\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -s turn off signals\n" +#~ " -w set working directory to dir\n" +#~ " -v be verbose\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "\n" +#~ "Utilice: btape \n" +#~ " -b especifica un archivo bootstrap\n" +#~ " -c especifica un archivo File de configuración\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -p proceder a pesar de los errores de E/S\n" +#~ " -s desactivar señales\n" +#~ " -v detallado\n" +#~ " -? imprime esta mensaje.\n" +#~ "\n" + +#~ msgid "Mount second Volume on device %s and press return when ready: " +#~ msgstr "" +#~ "Monte el segundo volumen en el dispositivo %s y pulse ENTER cuando esté " +#~ "listo:" + +#~ msgid "Mount blank Volume on device %s and press return when ready: " +#~ msgstr "" +#~ "Monte un volumen en blanco en el dispositivo %s y pulse ENTER cuando esté " +#~ "listo:" + +#~ msgid "End of Volume \"%s\" %d records.\n" +#~ msgstr "Fin del Volumen \"%s\" %d registros.\n" + +#~ msgid "Read block=%u, VolBytes=%s rate=%sB/s\n" +#~ msgstr "Leer bloque=%u, VolBytes=%s velocidad=%sB/s\n" + +#~ msgid "Cannot open Dev=%s, Vol=%s\n" +#~ msgstr "No se puede abrir Dev=%s, Vol=%s\n" + +#~ msgid "Nohdr," +#~ msgstr "Nohdr," + +#~ msgid "partial," +#~ msgstr "parcial," + +#~ msgid "empty," +#~ msgstr "vacío," + +#~ msgid "Nomatch," +#~ msgstr "Nomatch," + +#~ msgid "cont," +#~ msgstr "cont," + +#~ msgid "Volume name or names is too long. Please use a .bsr file.\n" +#~ msgstr "" +#~ "Nombre de Volumen o nombres es demasiado largo. Por favor, use un " +#~ "archivo .BSR.\n" + +#~ msgid "Cannot find device \"%s\" in config file %s.\n" +#~ msgstr "" +#~ "No se puede encontrar el dispositivo \"%s\" en el archivo de " +#~ "configuración %s.\n" + +#~ msgid "Cannot init device %s\n" +#~ msgstr "No se pudo iniciar dispositivo %s\n" + +#~ msgid "Cannot open %s\n" +#~ msgstr "No se pudo abrir %s\n" + +#~ msgid "Could not find device \"%s\" in config file %s.\n" +#~ msgstr "" +#~ "No se pudo encontrar dispositivo \"%s\" en el archivo de configuración " +#~ "%s.\n" + +#~ msgid "Using device: \"%s\" for writing.\n" +#~ msgstr "Utilizando el dispositivo: \"%s\" para escritura.\n" + +#~ msgid "Using device: \"%s\" for reading.\n" +#~ msgstr "Utilizando el dispositivo: \"%s\" para lectura.\n" + +#~ msgid "Unexpected End of Data\n" +#~ msgstr "Fin Inesperado de los Datos\n" + +#~ msgid "Unexpected End of Tape\n" +#~ msgstr "Fin Inesperado de la Cinta\n" + +#~ msgid "Unexpected End of File\n" +#~ msgstr "Fin Inesperado del Archivo\n" + +#~ msgid "Tape Door is Open\n" +#~ msgstr "Puerta de la cinta está abierta\n" + +#~ msgid "Unexpected Tape is Off-line\n" +#~ msgstr "Inesperado Cinta esta off-line\n" + +#~ msgid "Illegal mode given to open dev.\n" +#~ msgstr "Modo ilegal dado para abrir dev.\n" + +#~ msgid "Bad device call. Device not open\n" +#~ msgstr "Mala llamada de dispositivo. Dispositivo no abierto\n" + +#~ msgid "Seek error: ERR=%s\n" +#~ msgstr "Seek error: ERR=%s\n" + +#~ msgid "lseek error on %s. ERR=%s.\n" +#~ msgstr "Iseek error en %s. ERR=%s.\n" + +#, fuzzy +#~ msgid "Error closing device %s. ERR=%s.\n" +#~ msgstr "Error leyendo archivo %s: ERR=%s\n" + +#~ msgid "No FreeSpace command defined.\n" +#~ msgstr "Comando FreeSpace no definido.\n" + +#~ msgid "Cannot run free space command. Results=%s ERR=%s\n" +#~ msgstr "" +#~ "No se puede ejecutar comando de espacio libre. Resultados=%s ERR=%s\n" + +#, fuzzy +#~ msgid "Bad call to weof_dev. Device %s not open\n" +#~ msgstr "Mala llamada a weof_dev. El dispositivo no abre\n" + +#, fuzzy +#~ msgid "Attempt to WEOF on non-appendable Volume %s\n" +#~ msgstr "Intento de WEOF en Volumen no-appendable\n" + +#~ msgid "Bad call to eod. Device %s not open\n" +#~ msgstr "Mala llamada para eod. Dispositivo %s no abierto\n" + +#~ msgid "Unable to write EOF. ERR=%s\n" +#~ msgstr "No se pudo escribir EOF. ERR=%s\n" + +#~ msgid "End of medium on Volume \"%s\" Bytes=%s Blocks=%s at %s.\n" +#~ msgstr "Fin de medio en Volumen \"%s\" Bytes=%s Bloques=%s en %s.\n" + +#~ msgid "New volume \"%s\" mounted on device %s at %s.\n" +#~ msgstr "Nuevo volumen \"%s\" montado en el dispositivo %s en %s.\n" + +#~ msgid "write_block_to_device Volume label failed. ERR=%s" +#~ msgstr "Fallo de etiqueta de volumen write_block_to_device. ERR=%s" + +#~ msgid "write_block_to_device overflow block failed. ERR=%s" +#~ msgstr "Fallo de desbordamiento de bloque write_block_to_device. ERR=%s" + +#~ msgid "Catastrophic error. Cannot write overflow block to device %s. ERR=%s" +#~ msgstr "" +#~ "Error catastrófico. No se puede escribir bloque de desbordamiento al " +#~ "dispositivo %s. ERR=%s" + +#~ msgid "Connection request from %s failed.\n" +#~ msgstr "Fallo solicitud de conexión desde %s.\n" + +#, fuzzy +#~ msgid "[SF0100] Unable to authenticate Director\n" +#~ msgstr "No se puede autentificar el Director\n" + +#, fuzzy +#~ msgid "[SF0101] Bad client command: %s" +#~ msgstr "Malo comando nivel: %s\n" + +#, fuzzy +#~ msgid "[SF0102] Failed to connect to Client daemon: %s:%d\n" +#~ msgstr "Error al conectar con el demonio de Storage: %s:%d\n" + +#, fuzzy +#~ msgid "[SF0103] Bad storage command: %s" +#~ msgstr "Malo comando storage: %s" + +#, fuzzy +#~ msgid "[SF0104] Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Error al conectar con el demonio de Storage: %s:%d\n" + +#~ msgid "3991 Bad setdebug command: %s\n" +#~ msgstr "3991 Malo comando setdebug: %s\n" + +#~ msgid "3903 Error scanning cancel command.\n" +#~ msgstr "3903 Error escaneando comando cancel.\n" + +#~ msgid "3904 Job %s not found.\n" +#~ msgstr "3904 Job %s no encontrado.\n" + +#, fuzzy +#~ msgid "3000 JobId=%ld Job=\"%s\" marked to be %s.\n" +#~ msgstr "JobId %s, Job %s marcado para ser cancelado.\n" + +#, fuzzy +#~ msgid "3908 Error reserving Volume=\"%s\": %s" +#~ msgstr "Error al obtener Volumen información: %s" + +#~ msgid "3999 Device \"%s\" not found or could not be opened.\n" +#~ msgstr "3999 Dispositivo \"%s\" no encontrado o no pudo ser abierto.\n" + +#~ msgid "3903 Error scanning label command: %s\n" +#~ msgstr "3903 Error escaneando comando cancel: %s\n" + +#, fuzzy +#~ msgid "3900 Truncate cache for volume \"%s\" failed. ERR=%s\n" +#~ msgstr "Fallo al eliminar el Volumen \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "3911 Error scanning truncate command: %s\n" +#~ msgstr "3909 Error escaneando comando mount: %s\n" + +#, fuzzy +#~ msgid "3900 Not yet implemented\n" +#~ msgstr "JobType aún no se han implementado\n" + +#, fuzzy +#~ msgid "3912 Error scanning upload command: ERR=%s\n" +#~ msgstr "3927 Error escaneando comando de liberación: %s\n" + +#, fuzzy +#~ msgid "3999 Error with the upload: ERR=%s\n" +#~ msgstr "Error en %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3929 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "3910 No se puede abrir el dispositivo %s: ERR=%s\n" + +#~ msgid "3920 Cannot label Volume because it is already labeled: \"%s\"\n" +#~ msgstr "" +#~ "3920 No se puede etiquetar el volumen porque ya está etiquetado: \"%s\"\n" + +#~ msgid "3921 Wrong volume mounted.\n" +#~ msgstr "3921 Volumen incorrecto montado.\n" + +#~ msgid "3922 Cannot relabel an ANSI/IBM labeled Volume.\n" +#~ msgstr "3922 No se puede renombrar un volumen ANSI/IBM etiquetado.\n" + +#, fuzzy +#~ msgid "3912 Failed to label Volume %s: ERR=%s\n" +#~ msgstr "3912 Fallo al etiquetar el Volumen: ERR=%s\n" + +#, fuzzy +#~ msgid "3913 Failed to open next part: ERR=%s\n" +#~ msgstr "No se puede abrir próxima parte %s del dispositivo: ERR=%s\n" + +#, fuzzy +#~ msgid "3917 Failed to label Volume: ERR=%s\n" +#~ msgstr "3912 Fallo al etiquetar el Volumen: ERR=%s\n" + +#, fuzzy +#~ msgid "3918 Failed to label Volume (no media): ERR=%s\n" +#~ msgstr "3914 No se ha podido etiquetar el volumen (no hay medios): ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "3919 Cannot label Volume. Unknown status %d from read_volume_label()\n" +#~ msgstr "" +#~ "3913 No se puede etiquetar el volumen. Estado desconocido %d de " +#~ "read_volume_label()\n" + +#~ msgid "3001 Mounted Volume: %s\n" +#~ msgstr "3001 Volumen Montado: %s\n" + +#, fuzzy +#~ msgid "" +#~ "3902 Cannot mount Volume on Storage Device \"%s\" because:\n" +#~ "%s" +#~ msgstr "" +#~ "3902 No se puede montar el volumen en Storage Device %s debido a que:\n" +#~ "%s" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0106] Device \"%s\" requested by DIR could not be opened or does not " +#~ "exist.\n" +#~ msgstr "" +#~ "\n" +#~ "Dispositivo \"%s\" solicitado por el DIR no se pudo abrir o no existe.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0107] Device \"%s\" in changer \"%s\" requested by DIR could not be " +#~ "opened or does not exist.\n" +#~ msgstr "" +#~ "\n" +#~ "Dispositivo \"%s\" en el cambiador \"%s\" solicitado por el DIR no se " +#~ "pudo abrir o no existe.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0108] Device \"%s\" requested by DIR could not be opened or does not " +#~ "exist.\n" +#~ msgstr "" +#~ "\n" +#~ "Dispositivo \"%s\" solicitado por el DIR no se pudo abrir o no existe.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0109] Device \"%s\" in changer \"%s\" requested by DIR could not be " +#~ "opened or does not exist.\n" +#~ msgstr "" +#~ "\n" +#~ "Dispositivo \"%s\" en el cambiador \"%s\" solicitado por el DIR no se " +#~ "pudo abrir o no existe.\n" + +#~ msgid "Specified slot ignored. " +#~ msgstr "Ranura especificado ignorado." + +#, fuzzy +#~ msgid "3901 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "3901 No se puede abrir el dispositivo %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" is mounted with Volume \"%s\"\n" +#~ msgstr "3001 Dispositivo %s esta montado con volumen \"%s\"\n" + +#, fuzzy +#~ msgid "" +#~ "3905 Device \"%s\" open but no Bacula volume is mounted.\n" +#~ "If this is not a blank tape, try unmounting and remounting the Volume.\n" +#~ msgstr "" +#~ "3905 Dispositivo %s abierto, pero ninguno volumen Bacula está montado.\n" +#~ "Si esto no es una cinta en blanco, trate de desmontar y volver a montar " +#~ "el volumen.\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" is doing acquire.\n" +#~ msgstr "3001% s dispositivo está haciendo adquirir.\n" + +#, fuzzy +#~ msgid "3903 Device \"%s\" is being labeled.\n" +#~ msgstr "3903 Dispositivo %s ha sido etiquetada.\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" is already mounted with Volume \"%s\"\n" +#~ msgstr "3001 Dispositivo %s ya está montado con el volumen \"%s\"\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" is mounted.\n" +#~ msgstr "3002 Dispositivo %s está montado.\n" + +#~ msgid "3907 %s" +#~ msgstr "3907 %s" + +#, fuzzy +#~ msgid "3906 File device \"%s\" is always mounted.\n" +#~ msgstr "3906 Dispositivo de Archivo %s está siempre montado.\n" + +#, fuzzy +#~ msgid "3930 Device \"%s\" is being released.\n" +#~ msgstr "3903 Dispositivo %s ha sido etiquetada.\n" + +#, fuzzy +#~ msgid "3905 Unknown wait state %d\n" +#~ msgstr "Estado del Analizador %d desconocido\n" + +#~ msgid "3909 Error scanning mount command: %s\n" +#~ msgstr "3909 Error escaneando comando mount: %s\n" + +#, fuzzy +#~ msgid "3003 Device \"%s\" already enabled.\n" +#~ msgstr "3022 Dispositivo %s liberado.\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" enabled.\n" +#~ msgstr "3022 Dispositivo %s liberado.\n" + +#, fuzzy +#~ msgid "3004 Device \"%s\" deleted %d alert%s.\n" +#~ msgstr "3022 Dispositivo %s liberado.\n" + +#, fuzzy +#~ msgid "3907 Error scanning \"enable\" command: %s\n" +#~ msgstr "3903 Error escaneando comando cancel: %s\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" disabled.\n" +#~ msgstr "3002 Dispositivo %s está montado.\n" + +#, fuzzy +#~ msgid "3907 Error scanning \"disable\" command: %s\n" +#~ msgstr "3903 Error escaneando comando cancel: %s\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" unmounted.\n" +#~ msgstr "3002 Dispositivo %s desmontado.\n" + +#, fuzzy +#~ msgid "3901 Device \"%s\" is already unmounted.\n" +#~ msgstr "3901 Dispositivos %s ya está desmontado.\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" unmounted.\n" +#~ msgstr "3001 Dispositivo %s desmontado.\n" + +#, fuzzy +#~ msgid "3902 Device \"%s\" is busy in acquire.\n" +#~ msgstr "3902 Dispositivo %s está ocupado en adquirir.\n" + +#~ msgid "3907 Error scanning unmount command: %s\n" +#~ msgstr "3907 Error escaneando comando unmount: %s\n" + +#~ msgid "3916 Error scanning action_on_purge command\n" +#~ msgstr "3916 Error de escaneando comando action_on_purge\n" + +#, fuzzy +#~ msgid "3921 Device \"%s\" already released.\n" +#~ msgstr "3921 Dispositivo %s ya liberado.\n" + +#, fuzzy +#~ msgid "3922 Device \"%s\" waiting for sysop.\n" +#~ msgstr "3922 Dispositivo %s aguardando por sysop.\n" + +#, fuzzy +#~ msgid "3922 Device \"%s\" waiting for mount.\n" +#~ msgstr "3922 Dispositivo %s aguardando por montar.\n" + +#, fuzzy +#~ msgid "3923 Device \"%s\" is busy in acquire.\n" +#~ msgstr "3923 Dispositivo %s está ocupado en adquirir.\n" + +#, fuzzy +#~ msgid "3914 Device \"%s\" is being labeled.\n" +#~ msgstr "3914 Dispositivo %s ha sido etiquetada.\n" + +#, fuzzy +#~ msgid "3022 Device \"%s\" released.\n" +#~ msgstr "3022 Dispositivo %s liberado.\n" + +#~ msgid "3927 Error scanning release command: %s\n" +#~ msgstr "3927 Error escaneando comando de liberación: %s\n" + +#, fuzzy +#~ msgid "[SF0110] Could not create bootstrap file %s: ERR=%s\n" +#~ msgstr "No se pudo crear el archivo de arranque %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0111] Error parsing bootstrap file.\n" +#~ msgstr "Error analizando archivo bootstrap.\n" + +#, fuzzy +#~ msgid "3998 Device \"%s\" is not an autochanger.\n" +#~ msgstr "3995 Dispositivo %s no es un auto-cargador.\n" + +#, fuzzy +#~ msgid "3909 Error scanning autochanger drives/list/slots command: %s\n" +#~ msgstr "" +#~ "3908 Error comando scanning auto-cambiador drives/list/ranuras: %s\n" + +#~ msgid "3909 Error scanning readlabel command: %s\n" +#~ msgstr "3909 Error comando scanning readlabel: %s\n" + +#~ msgid "3001 Volume=%s Slot=%d\n" +#~ msgstr "3001 Volumen=%s Ranura=%d\n" + +#, fuzzy +#~ msgid "3931 Device \"%s\" is BLOCKED. user unmounted.\n" +#~ msgstr "3931 Dispositivo %s está BLOQUEADO. Usuario sin montar.\n" + +#, fuzzy +#~ msgid "" +#~ "3932 Device \"%s\" is BLOCKED. user unmounted during wait for media/" +#~ "mount.\n" +#~ msgstr "" +#~ "3932 Dispositivo %s está BLOQUEADO. Usuario sin montar en espera por " +#~ "medios/montar.\n" + +#, fuzzy +#~ msgid "3933 Device \"%s\" is BLOCKED waiting for media.\n" +#~ msgstr "3933 Dispositivo %s está BLOQUEADO esperando por media.\n" + +#, fuzzy +#~ msgid "3934 Device \"%s\" is being initialized.\n" +#~ msgstr "3934 Dispositivo %s se está inicializado.\n" + +#, fuzzy +#~ msgid "3935 Device \"%s\" is blocked labeling a Volume.\n" +#~ msgstr "3935 Dispositivo %s está BLOQUEADO etiquetando un Volumen.\n" + +#, fuzzy +#~ msgid "3935 Device \"%s\" is blocked for unknown reason.\n" +#~ msgstr "3935 Dispositivo %s está BLOQUEADO por razón desconocida.\n" + +#, fuzzy +#~ msgid "3936 Device \"%s\" is busy reading.\n" +#~ msgstr "3936 Dispositivo %s está ocupado leyendo.\n" + +#, fuzzy +#~ msgid "3937 Device \"%s\" is busy with writers=%d reserved=%d.\n" +#~ msgstr "3937 Dispositivo %s está ocupado con %d escritura(s).\n" + +#, fuzzy +#~ msgid "Command error with FD msg=\"%s\", SD hanging up. ERR=%s\n" +#~ msgstr "Error de comando con FD, colgando. %s\n" + +#, fuzzy +#~ msgid "Command error with FD msg=\"%s\", SD hanging up.\n" +#~ msgstr "Error de comando con FD, colgando.\n" + +#~ msgid "FD command not found: %s\n" +#~ msgstr "Comando FD no encontrado:% s \n" + +#~ msgid "Attempt to append on non-open session.\n" +#~ msgstr "Intento de anexar en sesión no abierta.\n" + +#~ msgid "Attempt to close non-open session.\n" +#~ msgstr "Intento de cerrar sesión no abierta.\n" + +#~ msgid "Attempt to open already open session.\n" +#~ msgstr "Intento de abrir sesión ya abierta.\n" + +#~ msgid "Attempt to read on non-open session.\n" +#~ msgstr "Intento de leer sesión no abierta.\n" + +#, fuzzy +#~ msgid "Attempt to open an already open session.\n" +#~ msgstr "Intento de abrir sesión ya abierta.\n" + +#, fuzzy +#~ msgid "Cannot open session, received bad parameters.\n" +#~ msgstr "No se puede abrir recursos fork para %s.\n" + +#, fuzzy +#~ msgid "Rewind failed: device %s is not open.\n" +#~ msgstr "Mala llamada para eod. Dispositivo %s no abierto\n" + +#~ msgid "Bad call to reposition. Device not open\n" +#~ msgstr "Mala llamada a reposición. El dispositivo no abre\n" + +#~ msgid "Could not open file device %s. No Volume name given.\n" +#~ msgstr "" +#~ "No se pudo abrir dispositivo de archivo %s. No hay nombre volumen " +#~ "determinado.\n" + +#, fuzzy +#~ msgid "Could not open(%s,%s,0640): ERR=%s\n" +#~ msgstr "No se pudo abrir %s: ERR=%s\n" + +#~ msgid "Unable to truncate device %s. ERR=%s\n" +#~ msgstr "No se puede truncar el dispositivo %s. ERR=%s\n" + +#~ msgid "Unable to stat device %s. ERR=%s\n" +#~ msgstr "No se pudo stat dispositivo %s. ERR=%s\n" + +#~ msgid "Device %s doesn't support ftruncate(). Recreating file %s.\n" +#~ msgstr "Dispositivo %s no admite ftruncate(). Recreando el archivo %s.\n" + +#~ msgid "Could not reopen: %s, ERR=%s\n" +#~ msgstr "No se pudo reabrir: %s, ERR=%s\n" + +#~ msgid "Device %s cannot be %smounted. ERR=%s\n" +#~ msgstr "Dispositivo %s no puede ser %smounted. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Ready to append to end of Volumes \"%s\" ameta size=%s adata size=%s\n" +#~ msgstr "Listo para anexar al final del Volumen \"%s\" parte=%d tamaño=%s\n" + +#~ msgid "Ready to append to end of Volume \"%s\" size=%s\n" +#~ msgstr "Listo para anexar al final del volumen \"%s\" tamaño=%s\n" + +#, fuzzy +#~ msgid "" +#~ "For Volume \"%s\":\n" +#~ " The sizes do not match! Metadata Volume=%s Catalog=%s\n" +#~ " Correcting Catalog\n" +#~ msgstr "" +#~ "Bacula no puedo escribir en el volumen DVD \"%s\" porque: Los tamaños no " +#~ "coinciden! Volumen=%s Catálogo=%s\n" + +#, fuzzy +#~ msgid "" +#~ "For aligned Volume \"%s\":\n" +#~ " Aligned sizes do not match! Aligned Volume=%s Catalog=%s\n" +#~ " Correcting Catalog\n" +#~ msgstr "" +#~ "Bacula no puedo escribir en el volumen DVD \"%s\" porque: Los tamaños no " +#~ "coinciden! Volumen=%s Catálogo=%s\n" + +#, fuzzy +#~ msgid "Error updating Catalog\n" +#~ msgstr "Error abriendo archivo de datos %s\n" + +#~ msgid "" +#~ "Bacula cannot write on disk Volume \"%s\" because: The sizes do not " +#~ "match! Volume=%s Catalog=%s\n" +#~ msgstr "" +#~ "Bacula no puede escribir en el Volumen de cinta \"%s\" porque:Los tamaños " +#~ "no coinciden! Volumen=%s Catalogo=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Connection from unknown Director %s at %s rejected.\n" +#~ "Please see " +#~ msgstr "Conexión desde Director %s desconocido en %s rechazada.\n" + +#~ msgid "Invalid connection from %s. Len=%d\n" +#~ msgstr "Inválida conexión desde %s. Len=%d\n" + +#, fuzzy +#~ msgid "Invalid Hello from %s. Len=%d\n" +#~ msgstr "Inválida conexión desde %s. Len=%d\n" + +#, fuzzy +#~ msgid "Client connect failed: Job name not found: %s\n" +#~ msgstr "Fallo al conectar a FD: Nombre del Job no encontrado: %s\n" + +#~ msgid "Unable to authenticate File daemon\n" +#~ msgstr "No se puede autenticar demonio File\n" + +#, fuzzy +#~ msgid "Client socket not open. Could not connect to Client.\n" +#~ msgstr "Fallo al conectar con el cliente.\n" + +#, fuzzy +#~ msgid "Recv request to Client failed. ERR=%s\n" +#~ msgstr "Fallo al abrir dispositivo. ERR=%s\n" + +#, fuzzy +#~ msgid "Bad Hello from Client: %s.\n" +#~ msgstr "Malo comando Hello desde Director en %s: %s\n" + +#, fuzzy +#~ msgid "[SE0001] Unable to stat device %s at %s: ERR=%s\n" +#~ msgstr "No se puede stat dispositivo %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "[SE0002] %s is an unknown device type. Must be tape or directory. st_mode=" +#~ "%x\n" +#~ msgstr "" +#~ "%s es un tipo de dispositivo desconocido. Debe ser cinta o directorio\n" +#~ " o tener RequiresMount=yes para DVD. st_mode=%x\n" + +#, fuzzy +#~ msgid "[SF0001] Invalid device type=%d name=\"%s\"\n" +#~ msgstr "Opción replace no valida: %s\n" + +#, fuzzy +#~ msgid "[SA0003] Unable to stat mount point %s: ERR=%s\n" +#~ msgstr "No se puede stat punto de montaje %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "[SA0004] Mount and unmount commands must defined for a device which " +#~ "requires mount.\n" +#~ msgstr "" +#~ "Comandos mount y unmount deben ser definidos para un dispositivo que " +#~ "requiere montaje.\n" + +#, fuzzy +#~ msgid "[SA0005] Min block size > max on device %s\n" +#~ msgstr "Mínimo tamaño de bloque > máximo en el dispositivo %s\n" + +#, fuzzy +#~ msgid "[SA0006] Block size %u on device %s is too large, using default %u\n" +#~ msgstr "" +#~ "Tamaño de bloque %u en el dispositivo %s es demasiado grande, usando %u " +#~ "omisión\n" + +#, fuzzy +#~ msgid "" +#~ "[SW0007] Max block size %u not multiple of device %s block size=%d.\n" +#~ msgstr "" +#~ "Tamaño máximo de bloque %u no es múltiplo de tamaño de bloque en " +#~ "dispositivo %s.\n" + +#, fuzzy +#~ msgid "[SA0008] Max Vol Size < 8 * Max Block Size for device %s\n" +#~ msgstr "" +#~ "Máximo Tamaño de Volumen < 8 * Máximo Tamaño de Bloque en dispositivo %s\n" + +#, fuzzy +#~ msgid "[SA0009] Unable to init mutex: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0010] Unable to init cond variable: ERR=%s\n" +#~ msgstr "No se puede iniciar variable cond: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0011] Unable to init cond variable: ERR=%s\n" +#~ msgstr "No se puede iniciar variable cond: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0012] Unable to init spool mutex: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0013] Unable to init acquire mutex: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0014] Unable to init freespace mutex: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0015] Unable to init read acquire mutex: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0016] Unable to init volcat mutex: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0017] Unable to init dcrs mutex: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "[SF0018] Plugin directory not defined. Cannot load SD %s driver for " +#~ "device %s.\n" +#~ msgstr "Directorio de Plugin no definido. No se puede usar plug-in: \"%\"\n" + +#, fuzzy +#~ msgid "[SF0019] Plugin directory not defined. Cannot load drivers.\n" +#~ msgstr "Directorio de trabajo no definido. No se puede continuar.\n" + +#, fuzzy +#~ msgid "[SF0020] dlopen of SD driver=%s at %s failed: ERR=%s\n" +#~ msgstr "Fallo al abrir dispositivo %s Volumen \"%s\": ERR=%s\n" + +#~ msgid "In free_jcr(), but still attached to device!!!!\n" +#~ msgstr "En free_jcr(), pero todavía conectado al dispositivo!!!!\n" + +#, fuzzy +#~ msgid "Couldn't rewind %s device %s: ERR=%s\n" +#~ msgstr "No se puede rebobinar dispositivo %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Wrong Volume mounted on %s device %s: Wanted %s have %s\n" +#~ msgstr "" +#~ "Volumen incorrecto montado en el dispositivo %s: Esperaba %s tiene %s\n" + +#~ msgid "Too many tries: %s" +#~ msgstr "Demasiados intentos: %s" + +#, fuzzy +#~ msgid "" +#~ "Read label block failed: requested Volume \"%s\" on %s device %s is not a " +#~ "Bacula labeled Volume, because: ERR=%s" +#~ msgstr "" +#~ "Volumen requerido \"%s\" en %s no es un volumen etiquetado Bacula, debido " +#~ "a que: ERR=%s" + +#~ msgid "Could not read Volume label from block.\n" +#~ msgstr "No se pudo leer la etiqueta de volumen desde bloque.\n" + +#~ msgid "Could not unserialize Volume label: ERR=%s\n" +#~ msgstr "No se pudo unserialize etiqueta del Volumen: ERR=%s\n" + +#~ msgid "Volume Header Id bad: %s\n" +#~ msgstr "Malo Id de cabecera de Volumen: %s\n" + +#, fuzzy +#~ msgid "Volume on %s device %s has wrong Bacula version. Wanted %d got %d\n" +#~ msgstr "" +#~ "Volumen en %s tiene la versión incorrecta de Bacula. Busco %d tiene %d\n" + +#, fuzzy +#~ msgid "Volume on %s device %s has bad Bacula label type: %ld\n" +#~ msgstr "Volumen en %s tiene una malo tipo de etiqueta Bacula: %x\n" + +#, fuzzy +#~ msgid "" +#~ "Wrong Volume Type. Wanted an Aligned Volume %s on device %s, but got: %s\n" +#~ msgstr "" +#~ "Volumen incorrecto montado en el dispositivo %s: Esperaba %s tiene %s\n" + +#, fuzzy +#~ msgid "" +#~ "Wrong Volume Type. Wanted a Cloud Volume %s on device %s, but got: %s\n" +#~ msgstr "" +#~ "Volumen incorrecto montado en el dispositivo %s: Esperaba %s tiene %s\n" + +#, fuzzy +#~ msgid "Could not reserve volume %s on %s device %s\n" +#~ msgstr "No es posible reservar volumen %s en %s\n" + +#, fuzzy +#~ msgid "Cannot write Volume label to block for %s device %s\n" +#~ msgstr "" +#~ "No se puede escribir la etiqueta de volumen para bloquear el dispositivo " +#~ "%s\n" + +#, fuzzy +#~ msgid "Open %s device %s Volume \"%s\" failed: ERR=%s" +#~ msgstr "Fallo al abrir dispositivo %s Volumen \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Fallo al abrir dispositivo %s Volumen \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Rewind error on %s device %s: ERR=%s\n" +#~ msgstr "Error de rebobinado en el dispositivo %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Truncate error on %s device %s: ERR=%s\n" +#~ msgstr "Error al truncar en el dispositivo %s :ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to re-open device after truncate on %s device %s: ERR=%s" +#~ msgstr "" +#~ "No se ha podido re-abrir el DVD después de truncar el dispositivo %s: ERR=" +#~ "%s\n" + +#, fuzzy +#~ msgid "Unable to write %s device %s: ERR=%s\n" +#~ msgstr "No es posible escribir el dispositivo %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Recycled volume \"%s\" on %s device %s, all previous data lost.\n" +#~ msgstr "" +#~ "Volumen \"%s\" reciclado en el dispositivo %s, todos los datos anteriores " +#~ "perdidos.\n" + +#, fuzzy +#~ msgid "Wrote label to prelabeled Volume \"%s\" on %s device %s\n" +#~ msgstr "" +#~ "Escribió etiqueta de volumen \"%s\" pre-etiquetada en el dispositivo %s\n" + +#, fuzzy +#~ msgid "Bad Volume session label request=%d\n" +#~ msgstr "Mala etiqueta de Volumen de sesión = %d\n" + +#~ msgid "Expecting Volume Label, got FI=%s Stream=%s len=%d\n" +#~ msgstr "Esperando Etiqueta de Volumen, obtuvo FI=%s Stream=%s len=%d\n" + +#~ msgid "Unknown %d" +#~ msgstr "Desconocido %d" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Volume Label:\n" +#~ "Adata : %d\n" +#~ "Id : %sVerNo : %d\n" +#~ "VolName : %s\n" +#~ "PrevVolName : %s\n" +#~ "VolFile : %d\n" +#~ "LabelType : %s\n" +#~ "LabelSize : %d\n" +#~ "PoolName : %s\n" +#~ "MediaType : %s\n" +#~ "PoolType : %s\n" +#~ "HostName : %s\n" +#~ msgstr "" +#~ "\n" +#~ "Volumen Label:\n" +#~ "Id : %sVerNo : %d\n" +#~ "VolName : %s\n" +#~ "PrevVolName : %s\n" +#~ "VolFile : %d\n" +#~ "LabelType : %s\n" +#~ "LabelSize : %d\n" +#~ "PoolName : %s\n" +#~ "MediaType : %s\n" +#~ "PoolType : %s\n" +#~ "HostName : %s\n" + +#~ msgid "Date label written: %s\n" +#~ msgstr "Fecha de etiqueta escrito: %s\n" + +#~ msgid "Date label written: %04d-%02d-%02d at %02d:%02d\n" +#~ msgstr "Fecha de etiqueta escrito: %04d-%02d-%02d at %02d:%02d\n" + +#~ msgid "" +#~ "\n" +#~ "%s Record:\n" +#~ "JobId : %d\n" +#~ "VerNum : %d\n" +#~ "PoolName : %s\n" +#~ "PoolType : %s\n" +#~ "JobName : %s\n" +#~ "ClientName : %s\n" +#~ msgstr "" +#~ "\n" +#~ "%s Registro:\n" +#~ "JobId : %d\n" +#~ "VerNum : %d\n" +#~ "Nombre Pool : %s\n" +#~ "Tipo del Pool : %s\n" +#~ "Nombre del Job : %s\n" +#~ "Nombre del Cliente : %s\n" + +#~ msgid "" +#~ "Job (unique name) : %s\n" +#~ "FileSet : %s\n" +#~ "JobType : %c\n" +#~ "JobLevel : %c\n" +#~ msgstr "" +#~ "Job (nombre único) : %s\n" +#~ "FileSet : %s\n" +#~ "Tipo de Job : %c\n" +#~ "JobLevel : %c\n" + +#~ msgid "" +#~ "JobFiles : %s\n" +#~ "JobBytes : %s\n" +#~ "StartBlock : %s\n" +#~ "EndBlock : %s\n" +#~ "StartFile : %s\n" +#~ "EndFile : %s\n" +#~ "JobErrors : %s\n" +#~ "JobStatus : %c\n" +#~ msgstr "" +#~ "JobFiles : %s\n" +#~ "JobBytes : %s\n" +#~ "StartBlock : %s\n" +#~ "EndBlock : %s\n" +#~ "StartFile : %s\n" +#~ "EndFile : %s\n" +#~ "JobErrors : %s\n" +#~ "JobStatus : %c\n" + +#~ msgid "Date written : %s\n" +#~ msgstr "Fecha escrito: %s\n" + +#~ msgid "Date written : %04d-%02d-%02d at %02d:%02d\n" +#~ msgstr "Fecha escrito: %04d-%02d-%02d at %02d:%02d\n" + +#~ msgid "Fresh Volume" +#~ msgstr "Volumen fresco" + +#~ msgid "Volume" +#~ msgstr "Volumen" + +#~ msgid "End of Media" +#~ msgstr "Fin de la Media" + +#~ msgid "End of Tape" +#~ msgstr "Fin de la Cinta" + +#~ msgid "" +#~ "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n" +#~ msgstr "" +#~ "%s Registro: Archivo:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n" + +#~ msgid "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n" +#~ msgstr "Registro %s: Archivo:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n" + +#~ msgid " Job=%s Date=%s Level=%c Type=%c\n" +#~ msgstr "Job=%s Fecha=%s Nivel=%c Tipo=%c\n" + +#~ msgid " Date=%s Level=%c Type=%c Files=%s Bytes=%s Errors=%d Status=%c\n" +#~ msgstr "" +#~ "Fecha=%s Nivel=%c Tipo=%c Archivos=%s Bytes=%s Errores=%d Estado=%c\n" + +#~ msgid "pthread_cond_wait failure. ERR=%s\n" +#~ msgstr "pthread_cond_wait fallido. ERR=%s\n" + +#~ msgid "unknown blocked code" +#~ msgstr "Código desconocido bloqueado" + +#, fuzzy +#~ msgid "Too many errors trying to mount %s device %s.\n" +#~ msgstr "Demasiados errores tratando de montar el dispositivo %s.\n" + +#~ msgid "Job %d canceled.\n" +#~ msgstr "Job %d cancelado.\n" + +#, fuzzy +#~ msgid "Open of %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Fallo al abrir dispositivo %s Volumen \"%s\": ERR=%s\n" + +#~ msgid "Volume \"%s\" previously written, moving to end of data.\n" +#~ msgstr "" +#~ "Volumen \"%s\" previamente escrito, moviendo a finales de los datos.\n" + +#, fuzzy +#~ msgid "Unable to position to end of data on %s device %s: ERR=%s\n" +#~ msgstr "" +#~ "No se puede posicionar al final los datos en el dispositivo %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" not loaded on %s device %s.\n" +#~ msgstr "Volumen \"%s\" no en dispositivo %s.\n" + +#~ msgid "" +#~ "Director wanted Volume \"%s\".\n" +#~ " Current Volume \"%s\" not acceptable because:\n" +#~ " %s" +#~ msgstr "" +#~ "Director quería Volumen \"%s\".\n" +#~ " Volumen actual \"%s\" no es aceptable porque:\n" +#~ " %s" + +#, fuzzy +#~ msgid "%s device %s not configured to autolabel Volumes.\n" +#~ msgstr "Dispositivo %s no configurado para volúmenes autolabel.\n" + +#~ msgid "Marking Volume \"%s\" in Error in Catalog.\n" +#~ msgstr "Marcado volumen \"%s\" en Error en el Catálogo.\n" + +#~ msgid "" +#~ "Autochanger Volume \"%s\" not found in slot %d.\n" +#~ " Setting InChanger to zero in catalog.\n" +#~ msgstr "" +#~ "Volumen auto-cambiador \"%s\" no se encuentra en la ranura %d\n" +#~ "Estableciendo InChanger a cero en el catálogo.\n" + +#~ msgid "Hey!!!!! WroteVol non-zero !!!!!\n" +#~ msgstr "Hey !!!!! No WroteVol cero !!!!!\n" + +#~ msgid "" +#~ "Invalid tape position on volume \"%s\" on device %s. Expected %d, got %d\n" +#~ msgstr "" +#~ "Posición de la cinta no válida en el volumen \"%s\" en el dispositivo %s. " +#~ "Esperaba %d, obtuvo %d\n" + +#, fuzzy +#~ msgid "Cannot open %s Dev=%s, Vol=%s for reading.\n" +#~ msgstr "No se puede abrir Dev=%s, Vol=%s\n" + +#~ msgid "Unable to set eotmodel on device %s: ERR=%s\n" +#~ msgstr "No se puede establecer eotmodel en el dispositivo %s: ERR=%s\n" + +#~ msgid " Bacula status:" +#~ msgstr "Estado del Bacula:" + +#~ msgid " file=%d block=%d\n" +#~ msgstr "archivo=%d bloque=%d\n" + +#~ msgid "ioctl MTIOCGET error on %s. ERR=%s.\n" +#~ msgstr "ioctl MTIOCGET error en %s. ERR=%s.\n" + +#~ msgid " Device status:" +#~ msgstr "Estado del Dispositivo:" + +#~ msgid "unknown func code %d" +#~ msgstr "código de función %d desconocido" + +#~ msgid "I/O function \"%s\" not supported on this device.\n" +#~ msgstr "I/O función \"%s\" no es compatible con este dispositivo. \n" + +#~ msgid "Cannot open bootstrap file %s: %s\n" +#~ msgstr "No se puede abrir el archivo bootstrap %s: %s\n" + +#~ msgid "Device \"%s\" in bsr at inappropriate place.\n" +#~ msgstr "Dispositivo %s en BSR en el lugar inadecuado.\n" + +#~ msgid "REGEX '%s' compile error. ERR=%s\n" +#~ msgstr "error de compilación REGEX '%s'. ERR=%s\n" + +#~ msgid "JobType not yet implemented\n" +#~ msgstr "JobType aún no se han implementado\n" + +#~ msgid "JobLevel not yet implemented\n" +#~ msgstr "JobLevel aún no se han implementado\n" + +#~ msgid "MediaType %s in bsr at inappropriate place.\n" +#~ msgstr "MediaType %s en BSR en el lugar inadecuado.\n" + +#~ msgid "Slot %d in bsr at inappropriate place.\n" +#~ msgstr "Ranura %d en BSR en el lugar inadecuado.\n" + +#~ msgid "VolFile : %u-%u\n" +#~ msgstr "VolFile : %u-%u\n" + +#~ msgid "VolBlock : %u-%u\n" +#~ msgstr "VolBlock : %u-%u\n" + +#, fuzzy +#~ msgid "VolAddr : %s-%llu\n" +#~ msgstr "VolAddr : %llu-%llu\n" + +#~ msgid "VolAddr : %llu-%llu\n" +#~ msgstr "VolAddr : %llu-%llu\n" + +#~ msgid "FileIndex : %u\n" +#~ msgstr "FileIndex : %u\n" + +#~ msgid "FileIndex : %u-%u\n" +#~ msgstr "FileIndex : %u-%u\n" + +#~ msgid "JobId : %u\n" +#~ msgstr "JobId : %u\n" + +#~ msgid "JobId : %u-%u\n" +#~ msgstr "JobId : %u-%u\n" + +#~ msgid "SessId : %u\n" +#~ msgstr "SessId : %u\n" + +#~ msgid "SessId : %u-%u\n" +#~ msgstr "SessId : %u-%u\n" + +#~ msgid "VolumeName : %s\n" +#~ msgstr "VolumeName : %s\n" + +#~ msgid " MediaType : %s\n" +#~ msgstr "MediaType : %s\n" + +#~ msgid " Device : %s\n" +#~ msgstr "Dispositivo : %s\n" + +#~ msgid " Slot : %d\n" +#~ msgstr "Ranura : %d\n" + +#~ msgid "Client : %s\n" +#~ msgstr "Cliente : %s\n" + +#~ msgid "Job : %s\n" +#~ msgstr "Job : %s\n" + +#~ msgid "SessTime : %u\n" +#~ msgstr "SessTime : %u\n" + +#~ msgid "BSR is NULL\n" +#~ msgstr "BSR is NULL\n" + +#~ msgid "Next : 0x%x\n" +#~ msgstr "Siguiente : 0x%x\n" + +#~ msgid "Root bsr : 0x%x\n" +#~ msgstr "Root bsr : 0x%x\n" + +#~ msgid "count : %u\n" +#~ msgstr "contado : %u\n" + +#~ msgid "found : %u\n" +#~ msgstr "encontrado : %u\n" + +#~ msgid "done : %s\n" +#~ msgstr "realizado : %s\n" + +#~ msgid "positioning : %d\n" +#~ msgstr "posicionamiento : %d\n" + +#~ msgid "fast_reject : %d\n" +#~ msgstr "fast_reject : %d\n" + +#~ msgid "" +#~ "Bootstrap file error: %s\n" +#~ " : Line %d, col %d of file %s\n" +#~ "%s\n" +#~ msgstr "" +#~ "Error archivo Bootstrap: %s\n" +#~ " : Linea %d, columna %d del archivo %s\n" +#~ "%s\n" + +#~ msgid "No Volume names found for restore.\n" +#~ msgstr "Nombres de Volumen no encontrados para restaurar.\n" + +#~ msgid ">filed: Error Hdr=%s\n" +#~ msgstr ">filed: Error Hdr=%s\n" + +#, fuzzy +#~ msgid "Error sending header to Client. ERR=%s\n" +#~ msgstr "Error escribiendo encabezado para archivo de cola. ERR=%s\n" + +#~ msgid "Error sending to FD. ERR=%s\n" +#~ msgstr "Error enviando para FD. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending data to Client. ERR=%s\n" +#~ msgstr "Error enviando para FD. ERR=%s\n" + +#~ msgid "Error sending to File daemon. ERR=%s\n" +#~ msgstr "Error enviando para demonio File. ERR=%s\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" at addr=%s on device %s.\n" +#~ msgstr "Volumen \"%s\" no en dispositivo %s.\n" + +#~ msgid "Did fsr in attemp to skip bad record.\n" +#~ msgstr "fsr hizo en un intento para saltar malo registro.\n" + +#, fuzzy +#~ msgid "Forward spacing Volume \"%s\" to addr=%s\n" +#~ msgstr "" +#~ "Espaciando hacia adelante Volumen \"%s\" para archivo:bloque %u:%u.\n" + +#~ msgid "Begin Session" +#~ msgstr "Inicio de Sesión" + +#~ msgid "End Session" +#~ msgstr "Fin de Sesión" + +#~ msgid "Unknown code %d\n" +#~ msgstr "Código desconocido %d\n" + +#~ msgid "Sanity check failed. maxlen=%d datalen=%d. Block discarded.\n" +#~ msgstr "" +#~ "Chequeo de Sanidad fracasado. maxlen=%d datalen=%d. Bloque descartado.\n" + +#~ msgid "unknown: %d" +#~ msgstr "desconocido: %d" + +#~ msgid "Unable to initialize reservation lock. ERR=%s\n" +#~ msgstr "No se puede inicializar bloqueo de reserva. ERR=%s\n" + +#~ msgid "Hey! num_writers=%d!!!!\n" +#~ msgstr "Hey! num_writers=%d!!!!\n" + +#~ msgid "3939 Could not get dcr\n" +#~ msgstr "3939 No se pudo obtener DCR\n" + +#~ msgid "Device reservation failed for JobId=%d: %s\n" +#~ msgstr "Reservación del Dispositivo fallida para JobId=%d: %s\n" + +#~ msgid "Failed command: %s\n" +#~ msgstr "Comando fallido: %s\n" + +#~ msgid "" +#~ "\n" +#~ " Device \"%s\" in changer \"%s\" requested by DIR could not be opened " +#~ "or does not exist.\n" +#~ msgstr "" +#~ "\n" +#~ "Dispositivo \"%s\" en el cambiador \"%s\" solicitado por el DIR no se " +#~ "pudo abrir o no existe.\n" + +#~ msgid "" +#~ "\n" +#~ " Device \"%s\" requested by DIR could not be opened or does not " +#~ "exist.\n" +#~ msgstr "" +#~ "\n" +#~ "Dispositivo \"%s\" solicitado por el DIR no se pudo abrir o no existe.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ " Device \"%s\" requested by DIR is disabled.\n" +#~ msgstr "" +#~ "\n" +#~ "Dispositivo \"%s\" solicitado por el DIR no se pudo abrir o no existe.\n" + +#~ msgid "3926 Could not get dcr for device: %s\n" +#~ msgstr "3926 No se pudo obtener dcr para el dispositivo: %s\n" + +#, fuzzy +#~ msgid "3603 JobId=%u %s device %s is busy reading.\n" +#~ msgstr "3603 JobId=%u dispositivo %s está ocupado leyendo.\n" + +#, fuzzy +#~ msgid "3604 JobId=%u %s device %s is BLOCKED due to user unmount.\n" +#~ msgstr "" +#~ "3604 JobId=%u dispositivo %s está BLOQUEADO debido al desmonte de " +#~ "usuario.\n" + +#, fuzzy +#~ msgid "3601 JobId=%u %s device %s is BLOCKED due to user unmount.\n" +#~ msgstr "" +#~ "3601 JobId=%u dispositivo %s está BLOQUEADO debido al desmontar por el " +#~ "usuario.\n" + +#, fuzzy +#~ msgid "" +#~ "3602 JobId=%u %s device %s is busy (already reading/writing). read=%d, " +#~ "writers=%d reserved=%d\n" +#~ msgstr "" +#~ "3602 JobId=%u dispositivo %s está ocupado (ya leyendo/escribiendo).\n" + +#, fuzzy +#~ msgid "3609 JobId=%u Max concurrent jobs=%d exceeded on %s device %s.\n" +#~ msgstr "" +#~ "3609 JobId=%u Máximo trabajos simultáneos superado en la unidad %s.\n" + +#, fuzzy +#~ msgid "3611 JobId=%u Volume max jobs=%d exceeded on %s device %s.\n" +#~ msgstr "3610 JobId=%u máximo volumen jobs excedido en la unidad %s.\n" + +#, fuzzy +#~ msgid "" +#~ "3608 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" nreserve=%d on %s " +#~ "device %s.\n" +#~ msgstr "" +#~ "3608 JobId=%u requiere Pool=\"%s\", pero hay Pool=\"%s\" nreserve=%d en " +#~ "la unidad %s.\n" + +#, fuzzy +#~ msgid "3605 JobId=%u wants free drive but %s device %s is busy.\n" +#~ msgstr "" +#~ "3605 JobId=%u requiere la unidad disponible, pero el dispositivo %s está " +#~ "ocupado.\n" + +#, fuzzy +#~ msgid "" +#~ "3606 JobId=%u prefers mounted drives, but %s device %s has no Volume.\n" +#~ msgstr "" +#~ "3606 JobId=%u prefiere unidades montadas, pero la unidad %s no tiene " +#~ "Volumen.\n" + +#, fuzzy +#~ msgid "" +#~ "3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on %s device %s.\n" +#~ msgstr "" +#~ "3607 JobId=%u quiere Vol=\"%s\" la unidad tiene Vol=\"%s\" en la unidad " +#~ "%s.\n" + +#~ msgid "Logic error!!!! JobId=%u Should not get here.\n" +#~ msgstr "Error lógico!! JobId=%u No debería llegar hasta aquí.\n" + +#, fuzzy +#~ msgid "3910 JobId=%u Logic error!!!! %s device %s Should not get here.\n" +#~ msgstr "" +#~ "3910 JobId=%u Error de lógica!!! la unidad %s no debería llegar aquí.\n" + +#~ msgid "Logic error!!!! Should not get here.\n" +#~ msgstr "Error lógico!!! No debe llegar aquí.\n" + +#, fuzzy +#~ msgid "3911 JobId=%u failed reserve %s device %s.\n" +#~ msgstr "3911 JobId=%u fallo al reservar unidad %s.\n" + +#, fuzzy +#~ msgid "Job cancelled.\n" +#~ msgstr "Job cancelado.\n" + +#~ msgid "Spooling statistics:\n" +#~ msgstr "Estadísticas de cola:\n" + +#~ msgid "" +#~ "Data spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes/" +#~ "job.\n" +#~ msgstr "" +#~ "Encolando datos: %u jobs activos, %s bytes; %u total de jobs, %s máximo " +#~ "bytes/job.\n" + +#~ msgid "" +#~ "Attr spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes.\n" +#~ msgstr "" +#~ "Encolando Atributos: %u jobs activos, %s bytes; %u jobs total, %s bytes " +#~ "máximos.\n" + +#~ msgid "Spooling data ...\n" +#~ msgstr "Datos en cola ...\n" + +#~ msgid "Bad return from despool WroteVol=%d\n" +#~ msgstr "Mal retorno de despool WroteVol=%d\n" + +#~ msgid "Open data spool file %s failed: ERR=%s\n" +#~ msgstr "Fallo al abrir archivo %s de datos de cola: ERR=%s\n" + +#~ msgid "Despooling zero bytes. Your disk is probably FULL!\n" +#~ msgstr "Desencolando cero bytes. El disco probablemente esta LLENO!\n" + +#~ msgid "Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n" +#~ msgstr "" +#~ "Perpetrando datos encolados al volumen \"%s\". Desencolando %s bytes ...\n" + +#~ msgid "Writing spooled data to Volume. Despooling %s bytes ...\n" +#~ msgstr "Escribiendo datos encolados al volumen. Desencolando %s bytes ...\n" + +#~ msgid "" +#~ "Despooling elapsed time = %02d:%02d:%02d, Transfer rate = %s Bytes/" +#~ "second\n" +#~ msgstr "" +#~ "Tiempo transcurrido desencolando = %02d:%02d:%02d, Tasa de transferencia " +#~ "= %s Bytes/segundo\n" + +#~ msgid "Ftruncate spool file failed: ERR=%s\n" +#~ msgstr "Fallo ftruncate archivo de cola: ERR=%s\n" + +#~ msgid "Spool header read error. ERR=%s\n" +#~ msgstr "Error de lectura de la cola. ERR=%s\n" + +#~ msgid "Spool read error. Wanted %u bytes, got %d\n" +#~ msgstr "Error de lectura de la cola. Esperaba %u bytes, obtuvo %d\n" + +#~ msgid "Spool header read error. Wanted %u bytes, got %d\n" +#~ msgstr "" +#~ "Error de lectura de cabecera de la cola. Esperaba %u bytes, obtuvo %d\n" + +#~ msgid "Spool block too big. Max %u bytes, got %u\n" +#~ msgstr "Bloque de la cola demasiado grande. Máximo %u bytes, obtuvo %u\n" + +#~ msgid "Spool data read error. Wanted %u bytes, got %d\n" +#~ msgstr "" +#~ "Error de lectura de datos de la cola. Esperaba %u bytes, obtuvo %d\n" + +#, fuzzy +#~ msgid "" +#~ "User specified Job spool size reached: JobSpoolSize=%s MaxJobSpoolSize=" +#~ "%s\n" +#~ msgstr "Tamaño de cola especificada por el usuario alcanzado.\n" + +#, fuzzy +#~ msgid "" +#~ "User specified Device spool size reached: DevSpoolSize=%s MaxDevSpoolSize=" +#~ "%s\n" +#~ msgstr "Tamaño de cola especificada por el usuario alcanzado.\n" + +#~ msgid "Bad return from despool in write_block.\n" +#~ msgstr "Malo retorno desde despool en write_block.\n" + +#~ msgid "Spooling data again ...\n" +#~ msgstr "Encolando datos de nuevo ...\n" + +#~ msgid "" +#~ "Error writing header to spool file. Disk probably full. Attempting " +#~ "recovery. Wanted to write=%d got=%d\n" +#~ msgstr "" +#~ "Error al escribir encabezado al archivo de cola. Probablemente disco " +#~ "lleno. Intentando recuperación. Esperaba escribir=%d tiene=%d\n" + +#~ msgid "Fatal despooling error." +#~ msgstr "Error fatal desencolando." + +#, fuzzy +#~ msgid "Error writing block to spool file. ERR=%s\n" +#~ msgstr "Error escribiendo datos en archivo de cola. ERR=%s\n" + +#~ msgid "Network error on BlastAttributes.\n" +#~ msgstr "Error de red en BlastAttributes.\n" + +#~ msgid "Fseek on attributes file failed: ERR=%s\n" +#~ msgstr "Fallo fseek en los atributos de archivo: ERR=%s\n" + +#, fuzzy +#~ msgid "Truncate on attributes file failed: ERR=%s\n" +#~ msgstr "Fallo fseek en los atributos de archivo: ERR=%s\n" + +#~ msgid "Sending spooled attrs to the Director. Despooling %s bytes ...\n" +#~ msgstr "Enviando attrs encolados al Director. Desencolando %s bytes ...\n" + +#~ msgid "fopen attr spool file %s failed: ERR=%s\n" +#~ msgstr "Fallo fopen en archivo %s de atributo de cola: ERR=%s\n" + +#, fuzzy +#~ msgid " %s Alert: at %s Volume=\"%s\" alert=%s\n" +#~ msgstr "Fallo al abrir dispositivo %s Volumen \"%s\": ERR=%s\n" + +#~ msgid "Used Volume status:\n" +#~ msgstr "Estado de Volumen Usados:\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Device \"%s\" is not open or does not exist.\n" +#~ msgstr "Dispositivo \"%s\" no está abierto o no existe.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Device %s is %s %s:\n" +#~ " Volume: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Dispositivo %s está montado con:\n" +#~ " Volumen: %s\n" +#~ " Pool: %s\n" +#~ " Tipo de Media: %s\n" + +#, fuzzy +#~ msgid "waiting for" +#~ msgstr "Esperando por montaje" + +#, fuzzy +#~ msgid "*unknown*" +#~ msgstr "desconocido" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Device %s: %s open but no Bacula volume is currently mounted.\n" +#~ msgstr "Dispositivo %s abierto, pero volumen actual montado no es Bacula.\n" + +#~ msgid " Total Bytes=%s Blocks=%s Bytes/block=%s\n" +#~ msgstr "Total de Bytes=%s Bloques=%s Bytes/bloques=%s\n" + +#~ msgid " Total Bytes Read=%s Blocks Read=%s Bytes/block=%s\n" +#~ msgstr "Total de Bytes Leídos=%s Bloques Leídos=%s Bytes/bloque=%s\n" + +#~ msgid " Positioned at File=%s Block=%s\n" +#~ msgstr "Situado en el archivo=%s Bloque=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Device %s: %s is not open.\n" +#~ msgstr "Dispositivo %s no está abierto.\n" + +#, fuzzy +#~ msgid " Available %sSpace=%sB\n" +#~ msgstr "Archivos examinados=%s\n" + +#~ msgid "Autochanger \"%s\" with devices:\n" +#~ msgstr "Auto-cambiador \"%s\" con los dispositivos:\n" + +#~ msgid "" +#~ "\n" +#~ "Device status:\n" +#~ msgstr "" +#~ "\n" +#~ "Estado del dispositivo:\n" + +#, fuzzy +#~ msgid "Daemon started %s. Jobs: run=%d, running=%d.\n" +#~ msgstr "Demonio iniciado %s, %d Job ejecutando desde el inicio.\n" + +#~ msgid "" +#~ "No DEVICE structure.\n" +#~ "\n" +#~ msgstr "" +#~ "Ninguna estructura del DISPOSITIVO.\n" +#~ "\n" + +#, fuzzy +#~ msgid " Device is disabled. User command.\n" +#~ msgstr "Dispositivo está BLOQUEADO. Usuario desmontado.\n" + +#, fuzzy +#~ msgid " Device is BLOCKED. User unmounted.\n" +#~ msgstr "Dispositivo está BLOQUEADO. Usuario desmontado.\n" + +#, fuzzy +#~ msgid " Device is BLOCKED. User unmounted during wait for media/mount.\n" +#~ msgstr "" +#~ " Dispositivo está BLOQUEADO. Usuario desmontado durante espera por " +#~ "media/mount.\n" + +#, fuzzy +#~ msgid "" +#~ " Device is BLOCKED waiting for mount of volume \"%s\",\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Dispositivo está BLOQUEADO esperando por montaje de volumen \"%s\",\n" +#~ " Pool: %s\n" +#~ " Tipo de Media: %s\n" + +#, fuzzy +#~ msgid "" +#~ " Device is BLOCKED waiting to create a volume for:\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Dispositivo está BLOQUEADO esperando para crear un volumen para:\n" +#~ " Pool: %s\n" +#~ " Tipo de Media: %s\n" + +#, fuzzy +#~ msgid " Device is BLOCKED waiting for media.\n" +#~ msgstr "Dispositivo está bloqueado esperando por medios.\n" + +#, fuzzy +#~ msgid " Device is being initialized.\n" +#~ msgstr "Dispositivo se está inicializando.\n" + +#, fuzzy +#~ msgid " Device is blocked labeling a Volume.\n" +#~ msgstr "El dispositivo está bloqueado etiquetando un volumen.\n" + +#, fuzzy +#~ msgid " Slot %d %s loaded in drive %d.\n" +#~ msgstr "Ranura %d esta cargado en la unidad %d.\n" + +#, fuzzy +#~ msgid " Drive %d is not loaded.\n" +#~ msgstr "Unidad %d no está cargada.\n" + +#~ msgid "Device state:\n" +#~ msgstr "Estado del dispositivo:\n" + +#, fuzzy +#~ msgid " Writers=%d reserves=%d blocked=%d enabled=%d usage=%s\n" +#~ msgstr "" +#~ "num_writers=%d reservado=%d bloque=%d\n" +#~ "\n" + +#, fuzzy +#~ msgid "Attached JobIds: " +#~ msgstr "" +#~ "\n" +#~ "Scheduled Jobs:\n" + +#, fuzzy +#~ msgid " Archive name: %s Device name: %s\n" +#~ msgstr "Nombre del archivo: %s Nombre del dispositivo: %s\n" + +#, fuzzy +#~ msgid " File=%u block=%u\n" +#~ msgstr "Archivo=%u bloque=%u\n" + +#, fuzzy +#~ msgid " Min block=%u Max block=%u\n" +#~ msgstr "Bloque Min=%u Bloque Max=%u\n" + +#~ msgid "%s Job %s waiting for Client connection.\n" +#~ msgstr "%s Job %s esperando por conexión de cliente.\n" + +#, fuzzy +#~ msgid "" +#~ "Reading: %s %s job %s JobId=%d Volume=\"%s\"\n" +#~ " pool=\"%s\" device=%s newbsr=%d\n" +#~ msgstr "" +#~ "Leyendo: %s %s trabajo %s JobId=%d Volumen=\"%s\"\n" +#~ " pool=\"%s\" dispositivo=%s\n" + +#~ msgid "" +#~ "Writing: %s %s job %s JobId=%d Volume=\"%s\"\n" +#~ " pool=\"%s\" device=%s\n" +#~ msgstr "" +#~ "Escribiendo: %s %s trabajo %s JobId=%d Volumen=\"%s\"\n" +#~ " pool=\"%s\" dispositivo=%s\n" + +#~ msgid " spooling=%d despooling=%d despool_wait=%d\n" +#~ msgstr "encolando=%d desencolando=%d despool_wait=%d\n" + +#, fuzzy +#~ msgid " Files=%s Bytes=%s AveBytes/sec=%s LastBytes/sec=%s\n" +#~ msgstr "Archivos=%s Bytes=%s Bytes/sec=%s\n" + +#~ msgid " FDReadSeqNo=%s in_msg=%u out_msg=%d fd=%d\n" +#~ msgstr " FDReadSeqNo=%s in_msg=%u out_msg=%d fd=%d\n" + +#~ msgid " FDSocket closed\n" +#~ msgstr "FDSocket cerrado\n" + +#~ msgid "" +#~ "\n" +#~ "Jobs waiting to reserve a drive:\n" +#~ msgstr "" +#~ "\n" +#~ "Jobs esperando para reservar una unidad:\n" + +#, fuzzy +#~ msgid "3900 No arg in .status command: %s\n" +#~ msgstr "Comando .status malo: %s\n" + +#, fuzzy +#~ msgid "3900 Unknown arg in .status command: %s\n" +#~ msgstr "Comando .status malo: %s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-sd [options] [-c config_file] [config_file]\n" +#~ " -c use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g set groupid to group\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -p proceed despite I/O errors\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test - read config and exit\n" +#~ " -u userid to \n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "\n" +#~ "Utilice: stored [opciones] [-c archivo_configuración] " +#~ "[archivo_configuración]\n" +#~ " -c usar como archivo de configuración\n" +#~ " -d establecer el nivel de depuración para \n" +#~ " -dt imprimir timestamp en salida de depuración\n" +#~ " -f ejecutar en primer plano (para depuración)\n" +#~ " -g establecer groupid para grupo\n" +#~ " -m imprimir salida kaboom para depuración)\n" +#~ " -p continuar a pesar de errores de E/S\n" +#~ " -s sin señales (para depuración)\n" +#~ " -t prueba - leer la configuración y salir\n" +#~ " -u establecer userid para \n" +#~ " -v mensajes de usuario detallados\n" +#~ " -? imprimir este mensaje.\n" +#~ "\n" + +#~ msgid "Volume Session Time is ZERO!\n" +#~ msgstr "Tiempo de Sesión de Volumen es CERO!\n" + +#~ msgid "Unable to create thread. ERR=%s\n" +#~ msgstr "No se puede crear hilo. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not initialize SD device \"%s\"\n" +#~ msgstr "No se puede iniciar %s\n" + +#, fuzzy +#~ msgid "Unable to stat ControlDevice %s: ERR=%s\n" +#~ msgstr "No se puede stat dispositivo %s: ERR=%s\n" + +#~ msgid "Could not open device %s\n" +#~ msgstr "No se pudo abrir el dispositivo %s\n" + +#~ msgid "Could not mount device %s\n" +#~ msgstr "No se pudo montar el dispositivo %s\n" + +#~ msgid "Expected a Device Type keyword, got: %s" +#~ msgstr "Esperaba una palabra clave Tipo Device, obtuvo: %s" + +#, fuzzy +#~ msgid "Expected a Cloud driver keyword, got: %s" +#~ msgstr "Esperaba una palabra clave Tipo Level, obtuvo: %s" + +#, fuzzy +#~ msgid "Expected a Truncate Cache option keyword, got: %s" +#~ msgstr "Esperaba una palabra clave Tipo FileSet, obtuvo: %s" + +#, fuzzy +#~ msgid "Expected a Cloud Upload option keyword, got: %s" +#~ msgstr "Esperaba una palabra clave Tipo FileSet, obtuvo: %s" + +#, fuzzy +#~ msgid "Expected a Cloud communications protocol option keyword, got: %s" +#~ msgstr "Espera una palabra clave Tipo Job de Migración, obtuvo: %s" + +#, fuzzy +#~ msgid "Expected a Cloud Uri Style option keyword, got: %s" +#~ msgstr "Esperaba una palabra clave Tipo FileSet, obtuvo: %s" + +#~ msgid "" +#~ "Maximum Block Size configured value %u is greater than allowed maximum: %u" +#~ msgstr "" +#~ "Valor máximo de tamaño bloque configurado %u es mayor de lo máximo " +#~ "permitido: %u" + +#~ msgid "Warning: no \"%s\" resource (%d) defined.\n" +#~ msgstr "Advertencia: Recurso \"%s\" (%d) no definido.\n" + +#~ msgid "dump_resource type=%d\n" +#~ msgstr "dump_resource tipo=%d\n" + +#~ msgid "Warning: unknown resource type %d\n" +#~ msgstr "Advertencia: Tipo de recurso %d desconocido\n" + +#~ msgid "Cannot find AutoChanger resource %s\n" +#~ msgstr "No puede encontrar recurso Auto-cambiador %s\n" + +#, fuzzy +#~ msgid "Unable to init lock for Autochanger=%s: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Device resource %s\n" +#~ msgstr "No se puede encontrar el recurso Director %s\n" + +#, fuzzy +#~ msgid "Alert: Volume=\"%s\" alert=%d: ERR=%s\n" +#~ msgstr "Fallo al eliminar el Volumen \"%s\". ERR=%s" + +#~ msgid "3997 Bad alert command: %s: ERR=%s.\n" +#~ msgstr "3997 Malo comando alerta: %s: ERR=%s.\n" + +#~ msgid "Unable to open device %s: ERR=%s\n" +#~ msgstr "No se puede abrir el dispositivo %s: ERR=%s\n" + +#~ msgid "No tape loaded or drive offline on %s.\n" +#~ msgstr "Ninguna cinta cargada o unidad offline en %s.\n" + +#~ msgid "Rewind error on %s. ERR=%s.\n" +#~ msgstr "Rebobinar error en %s. ERR=%s.\n" + +#~ msgid "Ready to append to end of Volume \"%s\" at file=%d.\n" +#~ msgstr "Listo para anexar al final del volumen \"%s\" en el archivo=%d\n" + +#, fuzzy +#~ msgid "" +#~ "For Volume \"%s\":\n" +#~ "The number of files mismatch! Volume=%u Catalog=%u\n" +#~ "Correcting Catalog\n" +#~ msgstr "" +#~ "Bacula no puede escribir en el Volumen de cinta \"%s\" porque:\n" +#~ "El número de archivos de desfasa! Volumen=%u Catalogo=%u\n" + +#~ msgid "" +#~ "Bacula cannot write on tape Volume \"%s\" because:\n" +#~ "The number of files mismatch! Volume=%u Catalog=%u\n" +#~ msgstr "" +#~ "Bacula no puede escribir en el Volumen de cinta \"%s\" porque:\n" +#~ "El número de archivos de desfasa! Volumen=%u Catalogo=%u\n" + +#~ msgid "ioctl MTEOM error on %s. ERR=%s.\n" +#~ msgstr "ioctl MTEOM error en %s. ERR=%s.\n" + +#~ msgid "Bad call to load_dev. Device not open\n" +#~ msgstr "Mala llamada a load_dev. Dispositivo no abierto\n" + +#~ msgid "ioctl MTLOAD error on %s. ERR=%s.\n" +#~ msgstr "ioctl MTLOAD error en %s. ERR=%s.\n" + +#~ msgid "ioctl MTOFFL error on %s. ERR=%s.\n" +#~ msgstr "ioctl MTOFFL error en %s. ERR=%s.\n" + +#~ msgid "Bad call to fsf. Device not open\n" +#~ msgstr "Mala llamada a fsf. Dispositivo no abierto\n" + +#~ msgid "Device %s at End of Tape.\n" +#~ msgstr "Dispositivo %s en el final de la cinta.\n" + +#~ msgid "ioctl MTFSF error on %s. ERR=%s.\n" +#~ msgstr "ioctl MTFSF error en %s. ERR=%s.\n" + +#~ msgid "Bad call to bsf. Device not open\n" +#~ msgstr "Mala llamada a bsf. Dispositivo no abierto\n" + +#~ msgid "Device %s cannot BSF because it is not a tape.\n" +#~ msgstr "Dispositivo %s no puede BSF, porque no es una cinta.\n" + +#~ msgid "ioctl MTBSF error on %s. ERR=%s.\n" +#~ msgstr "ioctl MTBSF error en %s. ERR=%s.\n" + +#~ msgid "Bad call to fsr. Device not open\n" +#~ msgstr "Mala llamada a FSR. El dispositivo no abre\n" + +#~ msgid "ioctl MTFSR not permitted on %s.\n" +#~ msgstr "ioctl MTFSR no permitido en %s.\n" + +#~ msgid "ioctl MTFSR %d error on %s. ERR=%s.\n" +#~ msgstr "ioctl MTFSR %d error en %s. ERR=%s.\n" + +#~ msgid "Bad call to bsr_dev. Device not open\n" +#~ msgstr "Mala llamada a bsr_dev. El dispositivo no abre\n" + +#~ msgid "ioctl MTBSR not permitted on %s.\n" +#~ msgstr "ioctl MTBSR no permitido en %s.\n" + +#~ msgid "ioctl MTBSR error on %s. ERR=%s.\n" +#~ msgstr "ioctl MTBSR error en %s. ERR=%s.\n" + +#~ msgid "Bad call to weof_dev. Device not open\n" +#~ msgstr "Mala llamada a weof_dev. El dispositivo no abre\n" + +#~ msgid "Attempt to WEOF on non-appendable Volume\n" +#~ msgstr "Intento de WEOF en Volumen no-appendable\n" + +#~ msgid "ioctl MTWEOF error on %s. ERR=%s.\n" +#~ msgstr "ioctl MTWEOF error en %s. ERR=%s.\n" + +#~ msgid "Read and write devices not properly initialized.\n" +#~ msgstr "" +#~ "Dispositivos de lectura y escritura no se han iniciado correctamente.\n" + +#~ msgid "No Volume names found for %s.\n" +#~ msgstr "Nombres de Volúmenes no encontrados para %s.\n" + +#~ msgid "Unable to initialize volume list lock. ERR=%s\n" +#~ msgstr "No se puede inicializar la lista de bloqueo de volumen. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reserve volume \"%s\", because job canceled.\n" +#~ msgstr "No es posible reservar volumen %s en %s\n" + +#, fuzzy +#~ msgid "" +#~ "Could not reserve volume \"%s\" for append, because it will be read.\n" +#~ msgstr "No es posible reservar volumen %s en %s\n" + +#, fuzzy +#~ msgid "" +#~ "Cannot reserve Volume=%s because drive is busy with Volume=%s (JobId=" +#~ "%ld).\n" +#~ msgstr "No se puede podar Volumen \"%s\", porque el esta archivo.\n" + +#, fuzzy +#~ msgid "Volume %s is busy swapping from %s to %s\n" +#~ msgstr "3936 Dispositivo %s está ocupado leyendo.\n" + +#, fuzzy +#~ msgid "Volume %s is busy swapping.\n" +#~ msgstr "3936 Dispositivo %s está ocupado leyendo.\n" + +#, fuzzy +#~ msgid "%s device %s is busy.\n" +#~ msgstr "3936 Dispositivo %s está ocupado leyendo.\n" + +#~ msgid "pthread timedwait error. ERR=%s\n" +#~ msgstr "error timedwait pthread. ERR=%s\n" + +#~ msgid "JobId=%s, Job %s waiting to reserve a device.\n" +#~ msgstr "JobId=%s, Job %s esperando para reservar un dispositivo.\n" + +#, fuzzy +#~ msgid "JobId=%s, Job %s waiting device %s.\n" +#~ msgstr "JobId=%s, Job %s esperando para reservar un dispositivo.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "Example : bbatch -w /path/to/workdir -h localhost -f dat1 -f dat -f datx\n" +#~ " will start 3 thread and load dat1, dat and datx in your catalog\n" +#~ "See bbatch.c to generate datafile\n" +#~ "\n" +#~ "Usage: bbatch [ options ] -w working/dir -f datafile\n" +#~ " -b with batch mode\n" +#~ " -B without batch mode\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database host (default NULL)\n" +#~ " -k path name to the key file (default NULL)\n" +#~ " -e path name to the certificate file (default " +#~ "NULL)\n" +#~ " -a path name to the CA certificate file (default " +#~ "NULL)\n" +#~ " -w specify working directory\n" +#~ " -r call restore code with given jobids\n" +#~ " -v verbose\n" +#~ " -f specify data file\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ "Ejemplo : bbatch -w /ruta/para/directorio/trabajo -h localhost -f dat1 -f " +#~ "dat -f datx\n" +#~ " inicializará 3 hilos y cargará dat1, dat y datx en su catalogo\n" +#~ "Vea bbatch.c para generar archivos de datos\n" +#~ "\n" +#~ "Utilice: bbatch [opciones] -w directorio/trabajo -f archivo_de_datos\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -n especifica el nombre de la base de datos (por defecto " +#~ "bacula)\n" +#~ " -u especifica el nombre de usuario de la base de datos (por " +#~ "defecto bacula)\n" +#~ " -P especifica la contraseña de la base de datos (por " +#~ "defecto none)\n" +#~ " -h especifica servidor de la base de datos (por defecto " +#~ "NULL)\n" +#~ " -w especifica el directorio de trabajo\n" +#~ " -v detallado\n" +#~ " -f especifica archivo de datos\n" +#~ " -? imprime esta mensaje\n" +#~ "\n" + +#~ msgid "Could not init Bacula database\n" +#~ msgstr "No se pudo iniciar base de datos de Bacula\n" + +#~ msgid "Error opening datafile %s\n" +#~ msgstr "Error abriendo archivo de datos %s\n" + +#~ msgid "Error while inserting file\n" +#~ msgstr "Error insertando el archivo\n" + +#~ msgid "Could not open data file: %s\n" +#~ msgstr "No se pudo abrir el archivo de datos: %s\n" + +#~ msgid "Fatal malformed reply from %s: %s\n" +#~ msgstr "Fatal malformación de respuesta desde %s: %s\n" + +#~ msgid "Fatal fgets error: ERR=%s\n" +#~ msgstr "fgets fatal error: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +#~ " -4 forces bsmtp to use IPv4 addresses only.\n" +#~ " -6 forces bsmtp to use IPv6 addresses only.\n" +#~ " -8 set charset to UTF-8\n" +#~ " -a use any ip protocol for address resolution\n" +#~ " -c set the Cc: field\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f set the From: field\n" +#~ " -h use mailhost:port as the SMTP server\n" +#~ " -s set the Subject: field\n" +#~ " -r set the Reply-To: field\n" +#~ " -l set the maximum number of lines to send (default: " +#~ "unlimited)\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Utilice: %s [-f desde] [-h servidor de correo] [-s asunto] [-c con copia] " +#~ "[destinatario ...]\n" +#~ " -8 conjunto charset a UTF-8\n" +#~ " -c establece el campo Cc:\n" +#~ " -d establece el nivel de depuración para \n" +#~ " -dt imprime un timestamp en salida de depuración\n" +#~ " -f establece el campo Desde:\n" +#~ " -h use servidor de correo:puerto como servidor SMTP\n" +#~ " -s establece el campo Asunto:\n" +#~ " -r establece el campo Responder-Para:\n" +#~ " -l establece el número máximo de líneas a enviar (por defecto: sin " +#~ "límite)\n" +#~ " -? imprimir este mensaje.\n" +#~ "\n" + +#~ msgid "Fatal error: no recipient given.\n" +#~ msgstr "Fatal error: destinatario no determinado.\n" + +#~ msgid "Fatal gethostname error: ERR=%s\n" +#~ msgstr "gethostname fatal error: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal getaddrinfo for myself failed \"%s\": ERR=%s\n" +#~ msgstr "Fallo fatal gethostbyname para mi mismo \"%s\": ERR=%s\n" + +#~ msgid "Fatal gethostbyname for myself failed \"%s\": ERR=%s\n" +#~ msgstr "Fallo fatal gethostbyname para mi mismo \"%s\": ERR=%s\n" + +#~ msgid "Error unknown mail host \"%s\": ERR=%s\n" +#~ msgstr "Error servidor de correo \"%s\" desconocido: ERR=%s\n" + +#~ msgid "Retrying connection using \"localhost\".\n" +#~ msgstr "Reintentando la conexión usando \"localhost\".\n" + +#, fuzzy +#~ msgid "Failed to connect to mailhost %s\n" +#~ msgstr "Fallo al conectar con el cliente.\n" + +#~ msgid "Fatal error: Unknown address family for smtp host: %d\n" +#~ msgstr "" +#~ "Fatal error: Desconocida familia de direcciones para servidor smtp: %d\n" + +#~ msgid "Fatal socket error: ERR=%s\n" +#~ msgstr "Fatal error de socket: ERR=%s\n" + +#~ msgid "Fatal connect error to %s: ERR=%s\n" +#~ msgstr "Fatal error de conexión para %s: ERR=%s\n" + +#~ msgid "Fatal _open_osfhandle error: ERR=%s\n" +#~ msgstr "Fatal error _open_osfhandle: ERR=%s\n" + +#~ msgid "Fatal fdopen error: ERR=%s\n" +#~ msgstr "Fatal error fdopen: ERR=%s\n" + +#~ msgid "Fatal dup error: ERR=%s\n" +#~ msgstr "Fatal error dup: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "ERROR %s\n" +#~ "\n" +#~ msgstr "\n" + +#, fuzzy +#~ msgid "Unable to open -p argument for reading" +#~ msgstr "No se puede abrir el archivo de parámetros de DH" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database host (default NULL)\n" +#~ " -k path name to the key file (default NULL)\n" +#~ " -e path name to the certificate file (default " +#~ "NULL)\n" +#~ " -a path name to the CA certificate file (default " +#~ "NULL)\n" +#~ " -w specify working directory\n" +#~ " -j specify jobids\n" +#~ " -p specify path\n" +#~ " -f specify file\n" +#~ " -l maximum tuple to fetch\n" +#~ " -T truncate cache table before starting\n" +#~ " -v verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -n especifica el nombre de la base de datos (por defecto " +#~ "bacula)\n" +#~ " -u especifica el nombre de usuario de la base de datos (por " +#~ "defecto bacula)\n" +#~ " -P especifica la contraseña de la base de datos (por " +#~ "defecto none)\n" +#~ " -h especifica servidor de la base de datos (por defecto " +#~ "NULL)\n" +#~ " -w especifica el directorio de trabajo\n" +#~ " -j especifica jobids\n" +#~ " -p especifica la ruta\n" +#~ " -f especifica el archivo\n" +#~ " -l tupla máxima a buscar\n" +#~ " -T Truncar tabla de caché antes de empezar\n" +#~ " -v detallado\n" +#~ " -? imprime esta mensaje\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database host (default NULL)\n" +#~ " -w specify working directory\n" +#~ " -p specify path\n" +#~ " -f specify file\n" +#~ " -l maximum tuple to fetch\n" +#~ " -q print only errors\n" +#~ " -v verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versión: %s (%s)\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -n especifica el nombre de la base de datos (por defecto " +#~ "bacula)\n" +#~ " -u especifica el nombre de usuario de la base de datos (por " +#~ "defecto bacula)\n" +#~ " -P especifica la contraseña de la base de datos (por " +#~ "defecto none)\n" +#~ " -h especifica servidor de la base de datos (por defecto " +#~ "NULL)\n" +#~ " -w especifica el directorio de trabajo\n" +#~ " -j especifica jobids\n" +#~ " -p especifica la ruta\n" +#~ " -f especifica el archivo\n" +#~ " -l tupla máxima a buscar\n" +#~ " -T Truncar tabla de caché antes de empezar\n" +#~ " -v detallado\n" +#~ " -? imprime esta mensaje\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not open, database \"%s\".\n" +#~ msgstr "No se puede abrir la base de datos \"%s\".\n" + +#~ msgid "" +#~ "Warning skipping the additional parameters for working directory/dbname/" +#~ "user/password/host.\n" +#~ msgstr "" +#~ "Advertencia, saltando los parámetros adicionales para el directorio de " +#~ "trabajo/dbname/usuario/contraseña/maquina.\n" + +#~ msgid "" +#~ "Error can not find the Catalog name[%s] in the given config file [%s]\n" +#~ msgstr "" +#~ "Error, no puede encontrar el nombre del Catálogo [%s] en el archivo [%s] " +#~ "de configuración dado\n" + +#~ msgid "Error there is no Catalog section in the given config file [%s]\n" +#~ msgstr "" +#~ "Error, no hay una sección de Catálogo en el archivo de configuración dado " +#~ "[% s]\n" + +#~ msgid "Error no Director resource defined.\n" +#~ msgstr "Error, recurso Director no definido.\n" + +#~ msgid "Wrong number of arguments.\n" +#~ msgstr "Número incorrecto de argumentos.\n" + +#~ msgid "Working directory not supplied.\n" +#~ msgstr "Directorio de trabajo no suministrado.\n" + +#~ msgid "Database port must be a numeric value.\n" +#~ msgstr "Puerto de la BD debe ser un valor numérico.\n" + +#~ msgid "Database port must be a int value.\n" +#~ msgstr "Puerto de la BD debe ser un valor entero.\n" + +#~ msgid "Hello, this is the database check/correct program.\n" +#~ msgstr "" +#~ "Hola, este es el programa de comprobación/corrección de la base de " +#~ "datos.\n" + +#~ msgid "Modify database is on." +#~ msgstr "Modificación de base de datos esta activada." + +#~ msgid "Modify database is off." +#~ msgstr "Modificación base de datos esta desactivada." + +#~ msgid " Verbose is on.\n" +#~ msgstr "Detallado esta activado.\n" + +#~ msgid " Verbose is off.\n" +#~ msgstr "Detallado está apagado.\n" + +#~ msgid "Please select the function you want to perform.\n" +#~ msgstr "Por favor seleccione la función que desea realizar.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ " 1) Toggle modify database flag\n" +#~ " 2) Toggle verbose flag\n" +#~ " 3) Repair bad Filename records\n" +#~ " 4) Repair bad Path records\n" +#~ " 5) Eliminate duplicate Filename records\n" +#~ " 6) Eliminate duplicate Path records\n" +#~ " 7) Eliminate orphaned Jobmedia records\n" +#~ " 8) Eliminate orphaned File records\n" +#~ " 9) Eliminate orphaned Path records\n" +#~ " 10) Eliminate orphaned Filename records\n" +#~ " 11) Eliminate orphaned FileSet records\n" +#~ " 12) Eliminate orphaned Client records\n" +#~ " 13) Eliminate orphaned Job records\n" +#~ " 14) Eliminate all Admin records\n" +#~ " 15) Eliminate all Restore records\n" +#~ " 16) All (3-15)\n" +#~ " 17) Quit\n" +#~ msgstr "" +#~ "\n" +#~ " 1) Activar bandera modificar base de datos\n" +#~ " 2) Activar bandera detallado\n" +#~ " 3) Reparar malos registros de Nombre de Archivo\n" +#~ " 4) Reparar malos registros de Rutas\n" +#~ " 5) Eliminar registros de Nombre de Archivo duplicados\n" +#~ " 6) Eliminar registros de Rutas duplicados\n" +#~ " 7) Eliminar registros de Jobmedia huérfanos\n" +#~ " 8) Eliminar registros de Archivo huérfanos\n" +#~ " 9) Eliminar registros de Rutas huérfanos\n" +#~ " 10) Eliminar registros de nombre de archivo huérfanos\n" +#~ " 11) Eliminar registros de FileSet huérfanos\n" +#~ " 12) Eliminar registros de Cliente huérfanos\n" +#~ " 13) Eliminar registros de Job huérfanos\n" +#~ " 14) Eliminar todos los registros de Administración\n" +#~ " 15) Eliminar todos los registros de Restauración\n" +#~ " 16) Todos (3-15)\n" +#~ " 17) Salir\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ " 1) Toggle modify database flag\n" +#~ " 2) Toggle verbose flag\n" +#~ " 3) Check for bad Filename records\n" +#~ " 4) Check for bad Path records\n" +#~ " 5) Check for duplicate Filename records\n" +#~ " 6) Check for duplicate Path records\n" +#~ " 7) Check for orphaned Jobmedia records\n" +#~ " 8) Check for orphaned File records\n" +#~ " 9) Check for orphaned Path records\n" +#~ " 10) Check for orphaned Filename records\n" +#~ " 11) Check for orphaned FileSet records\n" +#~ " 12) Check for orphaned Client records\n" +#~ " 13) Check for orphaned Job records\n" +#~ " 14) Check for all Admin records\n" +#~ " 15) Check for all Restore records\n" +#~ " 16) All (3-15)\n" +#~ " 17) Quit\n" +#~ msgstr "" +#~ "\n" +#~ " 1) Activar bandera modificar base de datos\n" +#~ " 2) Activar bandera detallado\n" +#~ " 3) Verificar malos registros de Nombre de Archivo\n" +#~ " 4) Verificar malos registros de Rutas\n" +#~ " 5) Verificar registros de Nombre de Archivo duplicados\n" +#~ " 6) Verificar registros de Rutas duplicados\n" +#~ " 7) Verificar registros de Jobmedia huérfanos\n" +#~ " 8) Verificar registros de Archivo huérfanos\n" +#~ " 9) Verificar registros de Rutas huérfanos\n" +#~ " 10) Verificar registros de nombre de archivo huérfanos\n" +#~ " 11) Verificar registros de FileSet huérfanos\n" +#~ " 12) Verificar registros de Cliente huérfanos\n" +#~ " 13) Verificar registros de Job huérfanos\n" +#~ " 14) Verificar todos los registros de Administración\n" +#~ " 15) Verificar todos los registros de Restauración\n" +#~ " 16) Todos (3-15)\n" +#~ " 17) Salir\n" + +#~ msgid "Select function number: " +#~ msgstr "Seleccione número de función:" + +#~ msgid "Database will be modified.\n" +#~ msgstr "Base de datos será modificada.\n" + +#~ msgid "Database will NOT be modified.\n" +#~ msgstr "Base de datos NO será modificada.\n" + +#~ msgid "JobId=%s Name=\"%s\" StartTime=%s\n" +#~ msgstr "JobId=%s Nombre=\"%s\" HoraInicio=%s\n" + +#~ msgid "Orphaned JobMediaId=%s JobId=%s Volume=\"%s\"\n" +#~ msgstr "Huérfanos JobMediaId=%s JobId=%s Volumen=\"%s\"\n" + +#~ msgid "Orphaned FileId=%s JobId=%s Volume=\"%s\"\n" +#~ msgstr "Huérfanos FileId=%s JobId=%s Volumen=\"%s\"\n" + +#~ msgid "Orphaned FileSetId=%s FileSet=\"%s\" MD5=%s\n" +#~ msgstr "Huérfanos FileSetId=%s FileSet=\"%s\" MD5=%s\n" + +#~ msgid "Orphaned ClientId=%s Name=\"%s\"\n" +#~ msgstr "Huérfanos ClientId=%s Nombre=\"%s\"\n" + +#~ msgid "Deleting: %s\n" +#~ msgstr "Eliminando: %s\n" + +#~ msgid "Checking for duplicate Filename entries.\n" +#~ msgstr "Comprobando entradas Nombre de Archivo(Filename) duplicadas.\n" + +#~ msgid "Found %d duplicate Filename records.\n" +#~ msgstr "Encontrados %d registros Nombre de Archivo(Filename) duplicados.\n" + +#~ msgid "Print the list? (yes/no): " +#~ msgstr "Imprimir la lista? (sí/no):" + +#~ msgid "Found %d for: %s\n" +#~ msgstr "Encontrados %d para: %s\n" + +#~ msgid "Checking for duplicate Path entries.\n" +#~ msgstr "Comprobando entradas Path duplicadas.\n" + +#~ msgid "Found %d duplicate Path records.\n" +#~ msgstr "Encontrados %d registros de Rutas duplicados.\n" + +#~ msgid "Print them? (yes/no): " +#~ msgstr "Imprimirlos? (sí/no):" + +#~ msgid "Checking for orphaned JobMedia entries.\n" +#~ msgstr "Comprobando entradas JobMedia huérfanas.\n" + +#~ msgid "Found %d orphaned JobMedia records.\n" +#~ msgstr "Encontrados %d registros JobMedia huérfanos.\n" + +#~ msgid "Deleting %d orphaned JobMedia records.\n" +#~ msgstr "Eliminando %d registros JobMedia huérfanos.\n" + +#~ msgid "Checking for orphaned File entries. This may take some time!\n" +#~ msgstr "" +#~ "Comprobando entradas Files huérfanas. Esto puede tomar algún tiempo.\n" + +#~ msgid "Found %d orphaned File records.\n" +#~ msgstr "Encontrados %d registros File huérfanos.\n" + +#~ msgid "Deleting %d orphaned File records.\n" +#~ msgstr "Eliminando %d registros File huérfanos.\n" + +#~ msgid "Create temporary index? (yes/no): " +#~ msgstr "Crear índice temporal? (sí/no):" + +#~ msgid "Checking for orphaned Path entries. This may take some time!\n" +#~ msgstr "" +#~ "Comprobando de entradas huérfanas de Ruta. Esto puede tomar algún " +#~ "tiempo!\n" + +#~ msgid "Found %d orphaned Path records.\n" +#~ msgstr "Se han encontrado %d registros huérfanos de Ruta.\n" + +#~ msgid "Deleting %d orphaned Path records.\n" +#~ msgstr "Eliminando %d registros huérfanos de Ruta.\n" + +#~ msgid "Checking for orphaned Filename entries. This may take some time!\n" +#~ msgstr "" +#~ "Comprobando de entradas de Nombre de Archivo huérfanos. Esto puede tomar " +#~ "algún tiempo!\n" + +#~ msgid "Found %d orphaned Filename records.\n" +#~ msgstr "Se han encontrado %d registros de Nombre de Archivos huérfanos.\n" + +#~ msgid "Deleting %d orphaned Filename records.\n" +#~ msgstr "Eliminando %d registros de Nombre de Archivos huérfanos.\n" + +#~ msgid "Checking for orphaned FileSet entries. This takes some time!\n" +#~ msgstr "" +#~ "Comprobando de entradas FileSet huérfanos. Esto puede tomar algún " +#~ "tiempo!\n" + +#~ msgid "Found %d orphaned FileSet records.\n" +#~ msgstr "Se han encontrado %d registros FileSet huérfanos.\n" + +#~ msgid "Deleting %d orphaned FileSet records.\n" +#~ msgstr "Eliminando %d registros FileSet huérfanos.\n" + +#~ msgid "Checking for orphaned Client entries.\n" +#~ msgstr "" +#~ "Comprobando entradas de Clientes huérfanos. Esto puede tomar algún " +#~ "tiempo!\n" + +#~ msgid "Found %d orphaned Client records.\n" +#~ msgstr "Se han encontrado %d registros de Clientes huérfanos.\n" + +#~ msgid "Deleting %d orphaned Client records.\n" +#~ msgstr "Eliminando %d registros de Clientes huérfanos.\n" + +#~ msgid "Checking for orphaned Job entries.\n" +#~ msgstr "" +#~ "Comprobando entradas de Job huérfanos. Esto puede tomar algún tiempo!\n" + +#~ msgid "Found %d orphaned Job records.\n" +#~ msgstr "Se han encontrado %d registros de Job huérfanos.\n" + +#~ msgid "Deleting %d orphaned Job records.\n" +#~ msgstr "Eliminando %d registros de Job huérfanos.\n" + +#~ msgid "Deleting JobMedia records of orphaned Job records.\n" +#~ msgstr "Eliminando registros JobMedia de registros de Job huérfanos.\n" + +#~ msgid "Deleting Log records of orphaned Job records.\n" +#~ msgstr "Eliminando registros Log de registros de Job huérfanos.\n" + +#~ msgid "Checking for Admin Job entries.\n" +#~ msgstr "Comprobando entradas para Job Administrativo.\n" + +#~ msgid "Found %d Admin Job records.\n" +#~ msgstr "Se han encontrado %d registros de Job Administrativo.\n" + +#~ msgid "Deleting %d Admin Job records.\n" +#~ msgstr "Eliminando %d registros de Job Administrativo.\n" + +#~ msgid "Checking for Restore Job entries.\n" +#~ msgstr "Comprobando entradas para Job de Restauración.\n" + +#~ msgid "Found %d Restore Job records.\n" +#~ msgstr "Se han encontrado %d registros de Job de Restauración.\n" + +#~ msgid "Deleting %d Restore Job records.\n" +#~ msgstr "Eliminando %d registros de Job de Restauración.\n" + +#~ msgid "Checking for Filenames with a trailing slash\n" +#~ msgstr "Comprobando Nombres de Archivo con una barra diagonal\n" + +#~ msgid "Found %d bad Filename records.\n" +#~ msgstr "Se han encontrado %d malos registros de Nombre de Archivo.\n" + +#~ msgid "Reparing %d bad Filename records.\n" +#~ msgstr "Reparando %d malos registros de Nombre de Archivo.\n" + +#~ msgid "Checking for Paths without a trailing slash\n" +#~ msgstr "Comprobando Rutas sin una barra diagonal\n" + +#~ msgid "Found %d bad Path records.\n" +#~ msgstr "Se han encontrado %d malos registros de Rutas.\n" + +#~ msgid "" +#~ "Ok. Index over the %s column already exists and dbcheck will work " +#~ "faster.\n" +#~ msgstr "" +#~ "Listo. Índice sobre la columna %s ya existe y dbcheck funcionará más " +#~ "rápido.\n" + +#~ msgid "" +#~ "Note. Index over the %s column not found, that can greatly slow down " +#~ "dbcheck.\n" +#~ msgstr "" +#~ "Nota. Índice sobre la columna %s no encontrado, esto puede en gran medida " +#~ "ralentizar dbcheck.\n" + +#~ msgid "Create temporary index... This may take some time!\n" +#~ msgstr "Crear índice temporal... Esto puede tomar algún tiempo!\n" + +#~ msgid "Temporary index created.\n" +#~ msgstr "Índice temporal creado.\n" + +#~ msgid "Drop temporary index.\n" +#~ msgstr "Eliminar índice temporal.\n" + +#~ msgid "Temporary index %s deleted.\n" +#~ msgstr "Índice temporal %s eliminado.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Usage: drivetype [-v] path ...\n" +#~ "\n" +#~ " Print the drive type a given file/directory is on.\n" +#~ " The following options are supported:\n" +#~ "\n" +#~ " -l print local fixed hard drive\n" +#~ " -a display information on all drives\n" +#~ " -v print both path and file system type.\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Utilice: drivetype [-v] ruta ...\n" +#~ "\n" +#~ " Imprime el tipo de unidad dado a archivo/directorio esta activada.\n" +#~ " Las siguientes opciones están soportados:\n" +#~ "\n" +#~ " -v imprime ambos tipos de rutas y archivo de sistemas.\n" +#~ " -? imprime esta mensaje.\n" +#~ "\n" + +#~ msgid "%s: unknown\n" +#~ msgstr "%s: desconocido\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Usage: fstype [-v] path ...\n" +#~ "\n" +#~ " Print the file system type for each file/directory argument " +#~ "given.\n" +#~ " The following options are supported:\n" +#~ "\n" +#~ " -l print all file system types in mtab.\n" +#~ " -m print full entries in mtab.\n" +#~ " -v print both path and file system type of each argument.\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Utilice: drivetype [-v] ruta ...\n" +#~ "\n" +#~ " Imprime el tipo de unidad dado a archivo/directorio esta activada.\n" +#~ " Las siguientes opciones están soportados:\n" +#~ "\n" +#~ " -v imprime ambos tipos de rutas y archivo de sistemas.\n" +#~ " -? imprime esta mensaje.\n" +#~ "\n" + +#, fuzzy +#~ msgid "%s: unknown file system type\n" +#~ msgstr "Tipo de archivo desconocido %d: %s\n" + +#~ msgid "" +#~ "\n" +#~ "Usage: testfind [-d debug_level] [-] [pattern1 ...]\n" +#~ " -a print extended attributes (Win32 debug)\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -c specify config file containing FileSet resources\n" +#~ " -f specify which FileSet to use\n" +#~ " -? print this message.\n" +#~ "\n" +#~ "Patterns are used for file inclusion -- normally directories.\n" +#~ "Debug level >= 1 prints each file found.\n" +#~ "Debug level >= 10 prints path/file for catalog.\n" +#~ "Errors are always printed.\n" +#~ "Files/paths truncated is the number of files/paths with len > 255.\n" +#~ "Truncation is only in the catalog.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Utilice: testfind [-d nivel_ depuración] [-] [patrón1 ...]\n" +#~ " -a imprime atributos extendidos (depuración de Win32)\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -c especifica archivo de conteniendo recursos FileSet\n" +#~ " -f especifica cual FileSet para usar\n" +#~ " -? imprime esta mensaje.\n" +#~ "\n" +#~ "Los patrones son archivos de inclusión - normalmente directorios.\n" +#~ "Nivel de depuración >= 1 imprime cada archivo encontrado.\n" +#~ "Nivel de depuración >= 10 imprime ruta/archivo para catalogo.\n" +#~ "Los errores siempre se imprimen.\n" +#~ "Archivos/rutas truncados es numero con longitud > 255.\n" +#~ "Truncamiento es sólo en catálogo..\n" +#~ "\n" + +#~ msgid "" +#~ "\n" +#~ "Total files : %d\n" +#~ "Max file length: %d\n" +#~ "Max path length: %d\n" +#~ "Files truncated: %d\n" +#~ "Paths truncated: %d\n" +#~ "Hard links : %d\n" +#~ msgstr "" +#~ "\n" +#~ "Total de Archivos : %d\n" +#~ "Longitud máxima de archivo: %d\n" +#~ "Longitud máxima de ruta: %d\n" +#~ "Archivos truncados: %d\n" +#~ "Rutas truncadas: %d\n" +#~ "Enlaces duros : %d\n" + +#~ msgid "Reg: %s\n" +#~ msgstr "Reg: %s\n" + +#~ msgid "\t[will not descend: recursion turned off]" +#~ msgstr "\t[no descenderá: recursividad desactivado]" + +#~ msgid "\t[will not descend: file system change not allowed]" +#~ msgstr "\t[no descenderá: no permitido cambio de sistema de archivo]" + +#~ msgid "\t[will not descend: disallowed file system]" +#~ msgstr "\t[no descenderá: sistema de archivo no permitido]" + +#~ msgid "\t[will not descend: disallowed drive type]" +#~ msgstr "\t[no descenderá: tipo de unidad no permitido]" + +#~ msgid "Err: Could not access %s: %s\n" +#~ msgstr "Err: No es posible acceder %s: %s\n" + +#~ msgid "Err: Could not follow ff->link %s: %s\n" +#~ msgstr "Err: no podía seguir ff->link %s: %s\n" + +#~ msgid "Err: Could not stat %s: %s\n" +#~ msgstr "Err: no se pudo stat %s: %s\n" + +#~ msgid "Skip: File not saved. No change. %s\n" +#~ msgstr "Saltar: Archivo no guardado. Sin cambios. %s\n" + +#~ msgid "Err: Attempt to backup archive. Not saved. %s\n" +#~ msgstr "Err: Intento de copia de seguridad. No guardado. %s\n" + +#~ msgid "Err: Could not open directory %s: %s\n" +#~ msgstr "Err: no se pudo abrir el directorio %s: %s\n" + +#~ msgid "Err: Unknown file ff->type %d: %s\n" +#~ msgstr "Err: Archivo desconocido ff->tipo %d: %s\n" + +#~ msgid "===== Filename truncated to 255 chars: %s\n" +#~ msgstr "===== Nombre de Archivo truncado para 255 caracteres: %s\n" + +#~ msgid "========== Path name truncated to 255 chars: %s\n" +#~ msgstr "========== Nombre de Ruta truncado para 255 caracteres: %s\n" + +#~ msgid "========== Path length is zero. File=%s\n" +#~ msgstr "========== La longitud de la ruta es nula. Archivo=%s\n" + +#~ msgid "Path: %s\n" +#~ msgstr "Ruta: %s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Usage: testls [-d debug_level] [-] [pattern1 ...]\n" +#~ " -a print extended attributes (Win32 debug)\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -e specify file of exclude patterns\n" +#~ " -i specify file of include patterns\n" +#~ " -q quiet, don't print filenames (debug)\n" +#~ " - read pattern(s) from stdin\n" +#~ " -? print this message.\n" +#~ "\n" +#~ "Patterns are file inclusion -- normally directories.\n" +#~ "Debug level >= 1 prints each file found.\n" +#~ "Debug level >= 10 prints path/file for catalog.\n" +#~ "Errors always printed.\n" +#~ "Files/paths truncated is number with len > 255.\n" +#~ "Truncation is only in catalog.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Utilice: testls [-d nivel_ depuración] [-] [patrón1 ...]\n" +#~ " -a imprime atributos extendidos (depuración de Win32)\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -e especifica archivo de exclusión de patrones\n" +#~ " -i especifica archivo de inclusión de patrones\n" +#~ " - leer patrón(es) desde stdin\n" + +#~ msgid "Could not open include file: %s\n" +#~ msgstr "No se pudo abrir el archivo incluir: %s\n" + +#~ msgid "Could not open exclude file: %s\n" +#~ msgstr "No se pudo abrir el archivo excluir: %s\n" + +#~ msgid "Recursion turned off. Directory not entered. %s\n" +#~ msgstr "Recursión deshabilitada. No entró al directorio. %s\n" + +#~ msgid "Skip: File system change prohibited. Directory not entered. %s\n" +#~ msgstr "" +#~ "Saltar: Cambio de sistema de archivos prohibido. No entró al directorio. " +#~ "%s\n" + +#, fuzzy +#~ msgid "" +#~ "Director and Storage daemon passwords or names not the same.\n" +#~ "For help, please see " +#~ msgstr "Nombres o contraseñas en el Director o Storage no son las mismas.\n" + +#~ msgid "bdird " +#~ msgstr "--> " + +#~ msgid "FileSet: name=%s\n" +#~ msgstr "FileSet: nombre=%s\n" + +#~ msgid "%s OK -- with warnings" +#~ msgstr "%s OK -- con alertas" + +#, fuzzy +#~ msgid "Command line" +#~ msgstr "línea de comandos" + +#, fuzzy +#~ msgid "Invalid keyword found: %s\n" +#~ msgstr "Palabra clave inválida: %s\n" + +#, fuzzy +#~ msgid "Display data files usage" +#~ msgstr "Mostrar mensajes pendientes" + +#~ msgid "command line" +#~ msgstr "línea de comandos" + +#~ msgid "Verify Volume Data (not yet implemented)" +#~ msgstr "Verificar volumen de datos (aún no implementado)" + +#, fuzzy +#~ msgid "aclx_get error on file \"%s\": ERR=%s\n" +#~ msgstr "error de acl_get en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown acl type encountered on file \"%s\": %ld\n" +#~ msgstr "" +#~ "codificación errónea del tipo de ACL en el flujo de ACL en el archivo \"%s" +#~ "\" \n" + +#, fuzzy +#~ msgid "Failed to convert acl into text on file \"%s\"\n" +#~ msgstr "No se puede convertir acl de texto en el archivo \"%s\"\n" + +#, fuzzy +#~ msgid "" +#~ "Trying to restore POSIX acl on file \"%s\" on filesystem without AIXC acl " +#~ "support\n" +#~ msgstr "" +#~ "Tratando de restaurar acl en el archivo \"%s\" en sistema de ficheros sin " +#~ "soporte a acl\n" + +#, fuzzy +#~ msgid "" +#~ "Trying to restore NFSv4 acl on file \"%s\" on filesystem without NFS4 acl " +#~ "support\n" +#~ msgstr "" +#~ "Tratando de restaurar acl en el archivo \"%s\" en sistema de ficheros sin " +#~ "soporte a acl\n" + +#, fuzzy +#~ msgid "aclx_scanStr error on file \"%s\": ERR=%s\n" +#~ msgstr "error de acl_set en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "aclx_put error on file \"%s\": ERR=%s\n" +#~ msgstr "error de acl_get en el archivo \"%s\": ERR=%s\n" + +#~ msgid "getacl error on file \"%s\": ERR=%s\n" +#~ msgstr "error de getacl en el archivo \"%s\": ERR=%s\n" + +#~ msgid "acltostr error on file \"%s\": ERR=%s\n" +#~ msgstr "altostr error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "strtoacl error on file \"%s\": ERR=%s\n" +#~ msgstr "strtoacl error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "setacl error on file \"%s\": ERR=%s\n" +#~ msgstr "setacl error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "acltotext error on file \"%s\": ERR=%s\n" +#~ msgstr "acltotext error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "aclfromtext error on file \"%s\": ERR=%s\n" +#~ msgstr "aclfromtext error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "acl(SETACL) error on file \"%s\": ERR=%s\n" +#~ msgstr "acl(SETACL) error en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Recv caps from SD failed. ERR=%s\n" +#~ msgstr "cap_from_text fallido: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad caps from SD: %s.\n" +#~ msgstr "Malo comando Hello desde Director en %s: %s\n" + +#, fuzzy +#~ msgid "Bad caps from SD: %s\n" +#~ msgstr "Mal estado desde BSF. ERR=%s\n" + +#, fuzzy +#~ msgid "Generate VSS snapshots. Driver=\"%s\"\n" +#~ msgstr "" +#~ "Generar instantáneas(snapshots) VSS. Driver=\"%s\", Drive(s)=\"%s\"\n" + +#, fuzzy +#~ msgid "VSS CreateSnapshots failed. ERR=%s\n" +#~ msgstr "Fallo al generar VSS snapshots.\n" + +#~ msgid "VSS Writer (PrepareForBackup): %s\n" +#~ msgstr "VSS Writer (PrepareForBackup): %s\n" + +#~ msgid "No drive letters found for generating VSS snapshots.\n" +#~ msgstr "" +#~ "No encuentra las letras de unidad para la generación de " +#~ "instantáneas(snapshots) VSS.\n" + +#, fuzzy +#~ msgid "VSS was not initialized properly. ERR=%s\n" +#~ msgstr "" +#~ "VSS no se inicializo correctamente. Suporte VSS está desactivado. ERR=%s\n" + +#~ msgid "VSS was not initialized properly. VSS support is disabled. ERR=%s\n" +#~ msgstr "" +#~ "VSS no se inicializo correctamente. Suporte VSS está desactivado. ERR=%s\n" + +#, fuzzy +#~ msgid "VSS Writer (RestoreComplete): %s\n" +#~ msgstr "VSS Writer (BackupComplete): %s\n" + +#, fuzzy +#~ msgid "Write write error on %s: ERR=%s\n" +#~ msgstr "Error de escritura en %s: %s\n" + +#, fuzzy +#~ msgid "%s %sVersion: %s (%s) %s %s %s %s\n" +#~ msgstr "%s Versión: %s (%s) %s %s %s %s\n" + +#~ msgid "Error scanning record header: %s\n" +#~ msgstr "Error escaneando registro de cabecera: %s\n" + +#, fuzzy +#~ msgid "WriteEncryptedFileRaw failure: ERR=%s\n" +#~ msgstr "Fallo al listar Media: ERR=%s\n" + +#, fuzzy +#~ msgid "llistea error on file \"%s\": ERR=%s\n" +#~ msgstr "error de llistxattr en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "lgetea error on file \"%s\": ERR=%s\n" +#~ msgstr "error de getacl en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Xattr stream on file \"%s\" exceeds maximum size of %d bytes\n" +#~ msgstr "" +#~ "Flujo Xattr en el archivo \"%s\" excede el máximo tamaño de %d bytes\n" + +#, fuzzy +#~ msgid "lsetea error on file \"%s\": ERR=%s\n" +#~ msgstr "setacl error en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "attr_list error on file \"%s\": ERR=%s\n" +#~ msgstr "error de extattr_list_link en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Received illegal xattr named %s on file \"%s\"\n" +#~ msgstr "Flujo xattr ilegal, no hay XATTR_MAGIC en el archivo \"%s\"\n" + +#, fuzzy +#~ msgid "attr_set error on file \"%s\": ERR=%s\n" +#~ msgstr "error de acl_set en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "getproplist error on file \"%s\": ERR=%s\n" +#~ msgstr "error de getacl en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable create proper proplist to restore xattrs on file \"%s\"\n" +#~ msgstr "No es posible leer el contenido de xattr %s en el archivo \"%s\"\n" + +#, fuzzy +#~ msgid "setproplist error on file \"%s\": ERR=%s\n" +#~ msgstr "strtoacl error en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to open xattr %s on \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir xattr %s en \"%s\": ERR=%s\n" + +#~ msgid "Unable to read symlin %s on \"%s\": ERR=%s\n" +#~ msgstr "No se puede leer symlin %s en \"%s\": ERR=%s\n" + +#~ msgid "Unable to chdir to xattr space of file \"%s\": ERR=%s\n" +#~ msgstr "No se puede chdir a xattr espacio de archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to open xattr space %s on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede abrir espacio xattr %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to chdir to xattr space on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede chdir a xattr espacio en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to chdir to xattr space %s on file \"%s\": ERR=%s\n" +#~ msgstr "" +#~ "No se puede chdir a xattr espacio de %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to mkfifo xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede mkfifo xattr %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to mknod xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede mknode xattr %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Unable to mkdir xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede mkdir xattr %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "" +#~ "Unable to restore data of xattr %s on file \"%s\": Not all data available " +#~ "in xattr stream\n" +#~ msgstr "" +#~ "No se puede restaurar los datos de xattr %s en el archivo \"%s\": No " +#~ "todos los datos disponibles en flujo xattr\n" + +#~ msgid "Unable to symlink xattr %s to %s on file \"%s\": ERR=%s\n" +#~ msgstr "No se puede symlink xattr %s para %s en el archivo \"%s\": ERR=%s\n" + +#~ msgid "Illegal xattr stream, failed to parse xattr stream on file \"%s\"\n" +#~ msgstr "" +#~ "Ilegal flujo xattr, fallo al analizar flujo xattr en el archivo \"%s\"\n" + +#~ msgid "Error in %s file %s: ERR=%s\n" +#~ msgstr "Error en %s archivo %s: ERR=%s\n" + +#~ msgid "AdjustTokenPrivileges set " +#~ msgstr "Establecer AdjustTokenPrivileges " + +#~ msgid "%c: is not a valid drive.\n" +#~ msgstr "%c: no es una unidad válida.\n" + +#~ msgid "Attempt to redefine name \"%s\" to \"%s\"." +#~ msgstr "Intento de redefinir el nombre \"%s\" para \"%s\"." + +#~ msgid "Could not initialize Python\n" +#~ msgstr "No se pudo inicializar Python\n" + +#~ msgid "Could not Run Python string %s\n" +#~ msgstr "No se pudo ejecutar Python cadena %s\n" + +#~ msgid "Could not initialize Python Job type.\n" +#~ msgstr "No se pudo inicializar Job tipo Python.\n" + +#~ msgid "Could not import Python script %s/%s. Python disabled.\n" +#~ msgstr "No se puede importar script Python %s/%s. Python deshabilitado.\n" + +#~ msgid "Could not create Python Job Object.\n" +#~ msgstr "No es posible crear objeto Job Python.\n" + +#~ msgid "Python function \"%s\" not found.\n" +#~ msgstr "Python función \"%s\" no encontrada.\n" + +#~ msgid "Unknown Python daemon event %s\n" +#~ msgstr "Demonio Python evento %s desconocido\n" + +#~ msgid "Unable to initialize the Python lock. ERR=%s\n" +#~ msgstr "No se puede inicializar el bloqueo de Python. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't use replace=ifnewer with Delta plugin on %s\n" +#~ msgstr "No se puede unescapar cadena: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't use replace=ifolder with Delta plugin on %s\n" +#~ msgstr "No se puede unescapar cadena: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't run command %s. ERR=%s\n" +#~ msgstr "No se puede ejecutar el programa: %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't create working directory %s. ERR=%s\n" +#~ msgstr "No se puede crear directorio %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't delete working directory %s. ERR=%s\n" +#~ msgstr "No se puede crear directorio %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown parameter or missing argument for %s.\n" +#~ msgstr "Estado del Analizador %d desconocido\n" + +#, fuzzy +#~ msgid "Unknown parameter for %s. Expecting block or file\n" +#~ msgstr "Estado del Analizador %d desconocido\n" + +#, fuzzy +#~ msgid "Unknown parameter %s.\n" +#~ msgstr "Estado del Analizador %d desconocido\n" + +#, fuzzy +#~ msgid "Can't analyse plugin command line\n" +#~ msgstr "No se puede usar comando %s en un runscript" + +#, fuzzy +#~ msgid "Unable to access guest volume\n" +#~ msgstr "No se puede truncar el dispositivo %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create temporary file %s. ERR=%s\n" +#~ msgstr "No se puede crear el archivo bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't use mode=%s in MySQL plugin\n" +#~ msgstr "No se puede unescapar cadena: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to fdopen file %s. ERR=%s\n" +#~ msgstr "No se puede abrir el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid " Dumping database \"%s\"\n" +#~ msgstr "No se puede abrir la base de datos \"%s\".\n" + +#, fuzzy +#~ msgid "Unable to detect the MySQL data_directory on this system.\n" +#~ msgstr "No se puede mknode xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Can't get server configuration.\n" +#~ msgstr "Cambio del archivo de configuración" + +#, fuzzy +#~ msgid "Unable to get the BINLOG list.\n" +#~ msgstr "No se puede crear hilo. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to determine the last LSN for %s (Previous job is %s)\n" +#~ msgstr "No se puede escribir en %s\n" + +#, fuzzy +#~ msgid "Unable to detect datadir from MySQL\n" +#~ msgstr "No se puede crear hilo. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get last LSN from the backup\n" +#~ msgstr "No se puede obtener el registro Job para JobId=%s: ERR=%s\n" + +#, fuzzy +#~ msgid "The current LSN is %s\n" +#~ msgstr "Pool actual es: %s\n" + +#, fuzzy +#~ msgid "Restoring target database \"%s\"\n" +#~ msgstr "No se puede abrir la base de datos \"%s\".\n" + +#, fuzzy +#~ msgid "Creating target database \"%s\"\n" +#~ msgstr "No se puede abrir la base de datos \"%s\".\n" + +#, fuzzy +#~ msgid "Database \"%s\" already exists. Skipping creation.\n" +#~ msgstr "No existe base de datos %s, por favor crearla.\n" + +#, fuzzy +#~ msgid "Unable to parse or to use plugin options, %s\n" +#~ msgstr "No se puede abrir xattr %s en \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown parameter or bad argument for %s.\n" +#~ msgstr "Estado del Analizador %d desconocido\n" + +#, fuzzy +#~ msgid "Can't use mode=%s in postgresql plugin\n" +#~ msgstr "No se puede unescapar cadena: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't use service=%s in postgresql plugin ERR=%s\n" +#~ msgstr "No se puede unescapar cadena: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't get cluster configuration.\n" +#~ msgstr "Cambio del archivo de configuración" + +#, fuzzy +#~ msgid "Can't determine the last WAL file\n" +#~ msgstr "No se puede escribir en %s\n" + +#, fuzzy +#~ msgid "Can't determine WAL directory\n" +#~ msgstr "No se puede crear directorio %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't open WAL directory %s. ERR=%s\n" +#~ msgstr "No se puede crear directorio %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to detect the PostgreSQL data_directory on this system.\n" +#~ msgstr "No se puede mknode xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to find data_directory=%s on this system. ERR=%s\n" +#~ msgstr "No se puede mknode xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to start the PITR backup on this system.\n" +#~ msgstr "No se puede mknode xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to determine the first WAL file on this system.\n" +#~ msgstr "No se puede mknode xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse tablespaces %s on this system. ERR=%s\n" +#~ msgstr "No se puede abrir espacio xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse data_directory %s on this system. ERR=%s\n" +#~ msgstr "No se puede abrir xattr %s en el archivo \"%s\": ERR=%s\n" + +#, fuzzy +#~ msgid "Can't create the %s file for recovery. ERR=%s\n" +#~ msgstr "No es posible crear registro media. ERR=%s\n" + +#, fuzzy +#~ msgid "3612 JobId=%u waiting because device %s is reserved by: %s.\n" +#~ msgstr "" +#~ "3605 JobId=%u requiere la unidad disponible, pero el dispositivo %s está " +#~ "ocupado.\n" + +#, fuzzy +#~ msgid "" +#~ "3998 Bad return from storage \"%s\" command: ERR=%s.\n" +#~ "Results=%s\n" +#~ msgstr "" +#~ "3991 Malo comando auto-cambiador comando \"cargar? unidad %d\": ERR=%s\n" +#~ "Resultados=%s\n" + +#~ msgid "Alert: %s" +#~ msgstr "Alerta: %s" + +#, fuzzy +#~ msgid "DDE commit failed. ERR=%s\n" +#~ msgstr "prctl fallido: ERR=%s\n" + +#~ msgid "Unable to open device part=%d %s: ERR=%s\n" +#~ msgstr "No se puede abrir el dispositivo parte =%d %s: ERR=%s\n" + +#~ msgid "" +#~ "Error while writing, current part number is less than the total number of " +#~ "parts (%d/%d, device=%s)\n" +#~ msgstr "" +#~ "Error al escribir, número de parte actual es menor que el número total de " +#~ "partes (%d/%d, dispositivo=%s)\n" + +#~ msgid "" +#~ "End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +#~ "free_space_errno=%d, errmsg=%s).\n" +#~ msgstr "" +#~ "Fin de Volumen \"%s\" en %u:%u en el dispositivo %s (part_size=%s, " +#~ "free_space=%s, free_space_errno=%d, errmsg=%s).\n" + +#~ msgid "" +#~ "End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +#~ "free_space_errno=%d).\n" +#~ msgstr "" +#~ "Fin de Volumen \"%s\" en %u:%u en el dispositivo %s (part_size=%s, " +#~ "free_space=%s, free_space_errno=%d).\n" + +#~ msgid "" +#~ "File:blk=%u:%u blk_num=%u blen=%u First rec FI=%s SessId=%u SessTim=%u " +#~ "Strm=%s rlen=%d\n" +#~ msgstr "" +#~ "Archivo:blk=%u:%u blk_num=%u blen=%u Primer registro FI=%s SessId=%u " +#~ "SessTim=%u Strm=%s rlen=%d\n" + +#~ msgid "Block: %d size=%d\n" +#~ msgstr "Bloque: %d tamaño=%d\n" + +#~ msgid "btape does not work with DVD storage.\n" +#~ msgstr "btape no funciona con el almacenamiento de DVD.\n" + +#, fuzzy +#~ msgid "Dedupengine status:\n" +#~ msgstr "Estado del Dispositivo:\n" + +#, fuzzy +#~ msgid "Initializing DDE." +#~ msgstr "Inicializando ..." + +#, fuzzy +#~ msgid "Cannot create DedupDirectory: %s" +#~ msgstr "No se puede crear directorio %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create DedupIndexDirectory: %s" +#~ msgstr "No se puede crear directorio %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create recovery directory: %s" +#~ msgstr "No se puede crear directorio %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot delete temporary recovery directory: %s" +#~ msgstr "No se puede restablecer el directorio actual: ERR=%s\n" + +#, fuzzy +#~ msgid "Socket error or stop during rehydration. ERR=%d\n" +#~ msgstr "Error de Socket en comando %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unexpected message from FD, n=%d msglen=%d msg=%s\n" +#~ msgstr "Error al leer datos de cabecera de FD. ERR=%s\n" + +#, fuzzy +#~ msgid "Ignore unknown inter-daemon command: %ld\n" +#~ msgstr "Código de ítem desconocido: %d\n" + +#, fuzzy +#~ msgid "Error sending chunk request to client\n" +#~ msgstr "Error escribiendo encabezado para archivo de cola. ERR=%s\n" + +#, fuzzy +#~ msgid "Deduplication device not properly configured.\n" +#~ msgstr "" +#~ "Dispositivos de lectura y escritura no se han iniciado correctamente.\n" + +#, fuzzy +#~ msgid "Using default block size %u on dedup device %s\n" +#~ msgstr "Mínimo tamaño de bloque > máximo en el dispositivo %s\n" + +#, fuzzy +#~ msgid "Unable to init adata mutex: ERR=%s\n" +#~ msgstr "No se puede iniciar mutex: ERR=%s\n" + +#~ msgid "Unable to open archive %s: ERR=%s\n" +#~ msgstr "No se puede abrir archivo %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3000 Deduplication vacuum marked to be canceled.\n" +#~ msgstr "3000 Job %s marcados para ser cancelados.\n" + +#, fuzzy +#~ msgid "3915 Failed to label Volume: ERR=%s\n" +#~ msgstr "3912 Fallo al etiquetar el Volumen: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "3999 Device \"%s\" requested by DIR could not be locked for shared " +#~ "storage.\n" +#~ msgstr "" +#~ "\n" +#~ "Dispositivo \"%s\" solicitado por el DIR no se pudo abrir o no existe.\n" + +#~ msgid "Error writing part %d to the DVD: ERR=%s\n" +#~ msgstr "Error escribiendo parte %d en el DVD: ERR=%s\n" + +#~ msgid "Error while writing current part to the DVD: %s" +#~ msgstr "Error al escribir parte actual al DVD: %s" + +#~ msgid "Part %d (%lld bytes) written to DVD.\n" +#~ msgstr "Parte %d (%lld bytes) escrito al DVD.\n" + +#~ msgid "Remaining free space %s on %s\n" +#~ msgstr "Espacio libre restante %s en %s\n" + +#~ msgid "Next Volume part already exists on DVD. Cannot continue: %s\n" +#~ msgstr "" +#~ "Siguiente parte del volumen ya existe en DVD. No se puede continuar: %s\n" + +#~ msgid "" +#~ "Error writing. Current part less than total number of parts (%d/%d, " +#~ "device=%s)\n" +#~ msgstr "" +#~ "Error al escribir. Parte actual menor que número total de partes (%d/%d, " +#~ "dispositivo=%s)\n" + +#~ msgid "Unable to write last on %s: ERR=%s\n" +#~ msgstr "No se puede escribir última en %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open aligned volume: %s, ERR=%s\n" +#~ msgstr "No se pudo abrir archivo incluir: %s, ERR=%s\n" + +#, fuzzy +#~ msgid "Send caps to Client failed. ERR=%s\n" +#~ msgstr "Fallo al abrir dispositivo. ERR=%s\n" + +#, fuzzy +#~ msgid "Recv caps from Client failed. ERR=%s\n" +#~ msgstr "Fallo al abrir dispositivo. ERR=%s\n" + +#, fuzzy +#~ msgid "Recv bad caps from Client: %s.\n" +#~ msgstr "Malo comando Hello desde Director en %s: %s\n" + +#, fuzzy +#~ msgid "Recv bad caps from Client %s\n" +#~ msgstr "Malo comando Hello desde Director en %s: %s\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or File type Volume %s on Dedup device %s. Wanted File.\n" +#~ msgstr "Etiquetada nuevo Volumen \"%s\" en el dispositivo %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or Dedup type Volume %s on File device %s. Wanted File.\n" +#~ msgstr "Etiquetada nuevo Volumen \"%s\" en el dispositivo %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Got File or Dedup type Volume %s on Aligned device %s. Wanted Aligned.\n" +#~ msgstr "" +#~ "Escribió etiqueta de volumen \"%s\" pre-etiquetada en el dispositivo %s\n" + +#~ msgid "Ready to append to end of Volume \"%s\" part=%d size=%s\n" +#~ msgstr "Listo para anexar al final del Volumen \"%s\" parte=%d tamaño=%s\n" + +#~ msgid "" +#~ "Bacula cannot write on DVD Volume \"%s\" because: The sizes do not match! " +#~ "Volume=%s Catalog=%s\n" +#~ msgstr "" +#~ "Bacula no puedo escribir en el volumen DVD \"%s\" porque: Los tamaños no " +#~ "coinciden! Volumen=%s Catálogo=%s\n" + +#~ msgid "End of Volume at file %u on device %s, Volume \"%s\"\n" +#~ msgstr "Fin del Volumen en archivo %u en dispositivo %s, Volumen \"%s\"\n" + +#~ msgid "End of all volumes.\n" +#~ msgstr "Fin de todos los volúmenes.\n" + +#, fuzzy +#~ msgid "" +#~ "3610 JobId=%u Aligned volume max bytes does not allow concurrency on " +#~ "drive %s.\n" +#~ msgstr "3610 JobId=%u máximo volumen jobs excedido en la unidad %s.\n" + +#~ msgid "Error writing header to spool file. ERR=%s\n" +#~ msgstr "Error escribiendo encabezado para archivo de cola. ERR=%s\n" + +#~ msgid "Retrying after header spooling error failed.\n" +#~ msgstr "Fallido reintento después de error encolando encabezado.\n" + +#~ msgid "Retrying after data spooling error failed.\n" +#~ msgstr "Fallido reintento después de error encolando dato.\n" + +#, fuzzy +#~ msgid "3900 missing args in .status command: %s\n" +#~ msgstr "Comando .status malo: %s\n" + +#, fuzzy +#~ msgid " Device is BLOCKED by another SD=%s\n" +#~ msgstr "Dispositivo está BLOQUEADO. Usuario desmontado.\n" + +#, fuzzy +#~ msgid "Unable to resolve parent path for %ls\n" +#~ msgstr "No se pudo obtener registro de Job. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to parse user supplied restore configuration\n" +#~ msgstr "No es posible leer el archivo de configuración" + +#~ msgid "" +#~ "\n" +#~ "Usage: fstype [-v] path ...\n" +#~ "\n" +#~ " Print the file system type a given file/directory is on.\n" +#~ " The following options are supported:\n" +#~ "\n" +#~ " -v print both path and file system type.\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Utilice: fstype [-v] ruta ...\n" +#~ "\n" +#~ " Imprime el tipo de sistema de archivo de un determinado archivo/" +#~ "directorio.\n" +#~ "Las siguientes opciones son compatibles:\n" +#~ "\n" +#~ " -v imprimir tanto tipo de rutas y sistema de archivos.\n" +#~ " -? imprimir este mensaje.\n" +#~ "\n" + +#~ msgid "Open File Manager paused\n" +#~ msgstr "Administrador Open File pausado\n" + +#~ msgid "FAILED to pause Open File Manager\n" +#~ msgstr "Fallo al pausar Administrador Open File\n" + +#~ msgid "Running as '%s'. Privmask=%#08x\n" +#~ msgstr "Ejecutando como '%s'. Privmask=%#08x\n" + +#~ msgid "Failed to retrieve current UserName\n" +#~ msgstr "Fallo al recuperar UserName actual\n" + +#, fuzzy +#~ msgid "AIX Specific Extended attribs" +#~ msgstr "Atributos Extendidos Específicos de Linux" + +#~ msgid "Attempt to open read on non-open session.\n" +#~ msgstr "Intento de abrir leer sesión no abierta.\n" + +#~ msgid "Expected an fstype string, got: %s\n" +#~ msgstr "Se esperaba una cadena fstype, se obtuvo: %s\n" + +#~ msgid "Expected an drivetype string, got: %s\n" +#~ msgstr "Se esperaba una cadena drivetype, se obtuvo: %s\n" + +#, fuzzy +#~ msgid "Storage from Run NextPool override" +#~ msgstr "Storage desde recurso Pool's NextPool " + +#, fuzzy +#~ msgid "Storage from Job's NextPool resource" +#~ msgstr "Storage desde recurso Pool's NextPool " + +#~ msgid "Storage from Pool's NextPool resource" +#~ msgstr "Storage desde recurso Pool's NextPool " + +#, fuzzy +#~ msgid "Storage from NextPool override" +#~ msgstr "Storage desde recurso Pool's NextPool " + +#~ msgid "Enter *MediaId or Volume name: " +#~ msgstr "Introduzca MediaId o nombre del Volumen:" + +#~ msgid "Warning Job %s is not running. Continuing anyway ...\n" +#~ msgstr "" +#~ "Advertencia Job %s no está¡ en ejecución. Continuar de todos modos ...\n" + +#~ msgid "I only authenticate directors, not %d\n" +#~ msgstr "Yo sólo autentifico directores, no %d\n" + +#, fuzzy +#~ msgid "Generate VSS snapshot of drive \"%c:\\\" failed.\n" +#~ msgstr "Fallo al generar VSS snapshots.\n" + +#, fuzzy +#~ msgid "Selection item too large.\n" +#~ msgstr "Ranura demasiado grande.\n" + +#, fuzzy +#~ msgid "No input string given.\n" +#~ msgstr "Especificación de archivo no dado.\n" + +#~ msgid "I only authenticate Directors, not %d\n" +#~ msgstr "Solo autentico Directores, no %d\n" + +#~ msgid "FD connect failed: Job name not found: %s\n" +#~ msgstr "Fallo al conectar a FD: Nombre del Job no encontrado: %s\n" + +#~ msgid "Hey!!!! JobId %u Job %s already authenticated.\n" +#~ msgstr "Hey!!!! JobId %u Job %s ya autenticada.\n" + +#~ msgid "Attribute %s not found." +#~ msgstr "Atributo %s no encontrado." + +#~ msgid "Cannot delete attribute %s" +#~ msgstr "No se puedo eliminar atributo %s" + +#~ msgid "Read-only attribute" +#~ msgstr "Atributo Solo-Lectura" + +#~ msgid "Cannot find attribute %s" +#~ msgstr "No se puedo encontrar atributo %s" + +#~ msgid "Error in ParseTuple\n" +#~ msgstr "Error en ParseTuple\n" + +#~ msgid "Parse tuple error in job_write\n" +#~ msgstr "Error de análisis de tupla en job_write\n" + +#~ msgid "Error in Python method %s\n" +#~ msgstr "Error en el método Python %s\n" + +#~ msgid "" +#~ "Written by Nicolas Boichat (2004)\n" +#~ "\n" +#~ "Version: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Usage: tray-monitor [-c config_file] [-d debug_level]\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read configuration and exit\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "Escrito por Nicolas Boichat (2004)\n" +#~ "\n" +#~ "Versión: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Utilice: tray-monitor [-c archivo_configuración] [-d nivel_depuración]\n" +#~ " -c establece archivo de configuración para archivo\n" +#~ " -d establece nivel de depuración para \n" +#~ " -dt imprime timestamp en salida de depuración\n" +#~ " -t prueba - leer configuración y salir\n" +#~ " -? imprimir este mensaje.\n" +#~ "\n" + +#~ msgid "Bacula daemon status monitor" +#~ msgstr "Monitor de Estado del demonio Bacula" + +#~ msgid "Open status window..." +#~ msgstr "Abrir la ventana de estado..." + +#~ msgid "Exit" +#~ msgstr "Salir" + +#~ msgid "Bacula tray monitor" +#~ msgstr "Bacula tray monitor" + +#~ msgid " (DIR)" +#~ msgstr "(DIR)" + +#~ msgid " (FD)" +#~ msgstr "(FD)" + +#~ msgid " (SD)" +#~ msgstr "(SD)" + +#~ msgid "Unknown status." +#~ msgstr "Estado desconocido." + +#~ msgid "Refresh interval in seconds: " +#~ msgstr "Intervalo de actualización en segundos:" + +#~ msgid "Refresh now" +#~ msgstr "Actualizar Ahora" + +#~ msgid "About" +#~ msgstr "Sobre" + +#~ msgid "Close" +#~ msgstr "Cerrar" + +#~ msgid "Disconnecting from Director %s:%d\n" +#~ msgstr "Desconectando del Director %s:%d\n" + +#~ msgid "Disconnecting from Client %s:%d\n" +#~ msgstr "Desconectando del Cliente %s:%d\n" + +#~ msgid "Disconnecting from Storage %s:%d\n" +#~ msgstr "Desconectando del Almacenamiento %s:%d\n" + +#~ msgid "Bacula Tray Monitor" +#~ msgstr "Bacula Tray Monitor" + +#~ msgid "Written by Nicolas Boichat\n" +#~ msgstr "Escrito por Nicolás Boichat\n" + +#~ msgid "Version" +#~ msgstr "Version" + +#~ msgid "" +#~ "Current job: %s\n" +#~ "Last job: %s" +#~ msgstr "" +#~ "Job actual: %s\n" +#~ "Ultimo job: %s" + +#~ msgid " (%d errors)" +#~ msgstr "(%d errores)" + +#~ msgid " (%d error)" +#~ msgstr "(%d error)" + +#~ msgid "No current job." +#~ msgstr "Ningún trabajo actual." + +#~ msgid "No last job." +#~ msgstr "Ningún ultimo trabajo." + +#~ msgid "Job status: Created" +#~ msgstr "Estado del trabajo: Creado" + +#~ msgid "Job status: Running" +#~ msgstr "Estado del trabajo: Ejecutando" + +#~ msgid "Job status: Blocked" +#~ msgstr "Estado del trabajo: Bloqueado" + +#~ msgid "Job status: Terminated" +#~ msgstr "Estado del trabajo: Terminado" + +#~ msgid "Job status: Terminated in error" +#~ msgstr "Estado del trabajo: Terminado con error" + +#~ msgid "Job status: Error" +#~ msgstr "Estado del trabajo: Error" + +#~ msgid "Job status: Fatal error" +#~ msgstr "Estado del trabajo: Fatal error" + +#~ msgid "Job status: Verify differences" +#~ msgstr "Estado del Job: Verificar las diferencias" + +#~ msgid "Job status: Canceled" +#~ msgstr "Estado del Job: Cancelado" + +#~ msgid "Job status: Waiting on File daemon" +#~ msgstr "Estado del Job: Esperando en demonio File" + +#~ msgid "Job status: Waiting on the Storage daemon" +#~ msgstr "Estado del Job: Esperando en demonio Storage" + +#~ msgid "Job status: Waiting for new media" +#~ msgstr "Estado del Job: Esperando por nuevo medio" + +#~ msgid "Job status: Waiting for Mount" +#~ msgstr "Estado del Job: Esperando por montar" + +#~ msgid "Job status: Waiting for storage resource" +#~ msgstr "Estado del Job: Esperando por recurso storage" + +#~ msgid "Job status: Waiting for job resource" +#~ msgstr "Estado del Job: Esperando por recurso job" + +#~ msgid "Job status: Waiting for Client resource" +#~ msgstr "Estado del Job: Esperando por recurso Cliente" + +#~ msgid "Job status: Waiting for maximum jobs" +#~ msgstr "Estado del Job: Esperando por jobs máximos " + +#~ msgid "Job status: Waiting for start time" +#~ msgstr "Estado del Job: Esperando por hora de inicio " + +#~ msgid "Job status: Waiting for higher priority jobs to finish" +#~ msgstr "Estado del Job: Esperando por jobs de prioridad mayor por finalizar" + +#~ msgid "Unknown job status %c." +#~ msgstr "Estado del job desconocido %c." + +#~ msgid "Job status: Unknown(%c)" +#~ msgstr "Estado del job: Desconocido (%c)" + +#~ msgid "Bad scan : '%s' %d\n" +#~ msgstr "Mala análisis: '%s' %d\n" + +#~ msgid "Connecting to Client %s:%d\n" +#~ msgstr "Conectando con Cliente %s:%d\n" + +#~ msgid "Connecting to Storage %s:%d\n" +#~ msgstr "Conectando con Storage %s:%d\n" + +#~ msgid "Cannot connect to daemon.\n" +#~ msgstr "No se puede conectar al demonio.\n" + +#~ msgid "Opened connection with Director daemon.\n" +#~ msgstr "Conexión abierta con demonio Director.\n" + +#~ msgid "Opened connection with File daemon.\n" +#~ msgstr "Conexión abierta con demonio File.\n" + +#~ msgid "Opened connection with Storage daemon.\n" +#~ msgstr "Conexión abierta con demonio Storage.\n" + +#, fuzzy +#~ msgid "<< Error: BNET_SUB_PROMPT signal received. >>\n" +#~ msgstr "<< Error: BNET_PROMPT señal recibida. >>\n" + +#~ msgid "<< Heartbeat signal received, answered. >>\n" +#~ msgstr "<< Heartbeat señal recibida, respondió. >>\n" + +#~ msgid "<< Unexpected signal received : %s >>\n" +#~ msgstr "<< Inesperada señal recibida: %s >>\n" + +#~ msgid "\n" +#~ msgstr "\n" + +#, fuzzy +#~ msgid "Failed to allocate space for query filter.\n" +#~ msgstr "No se pudo asignar memoria para la firma de cifrado.\n" + +#, fuzzy +#~ msgid "Failed to allocate space for query filters.\n" +#~ msgstr "No se pudo asignar memoria para la firma de cifrado.\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to Ingres server.\n" +#~ "Database=%s User=%s\n" +#~ "It is probably not running or your password is incorrect.\n" +#~ msgstr "" +#~ "No se puede conectar al servidor MySQL.\n" +#~ "Base de Datos=%s Usuario=%s\n" + +#, fuzzy +#~ msgid "A user name for Ingres must be supplied.\n" +#~ msgstr "Un nombre de usuario para DBI debe ser suministrado.\n" + +#~ msgid "Unknown database type: %s\n" +#~ msgstr "Tipo de base de datos desconocido: %s\n" + +#~ msgid "" +#~ "Unable to locate the DBD drivers to DBI interface in: \n" +#~ "db_driverdir=%s. It is probaly not found any drivers\n" +#~ msgstr "" +#~ "Incapaz de localizar los controladores de la interfaz DBD para DBI en: \n" +#~ "db_driverdir=%s. Es probable que no se encuentra ningún controlador\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to DBI interface. Type=%s Database=%s User=%s\n" +#~ "Possible causes: SQL server not running; password incorrect; " +#~ "max_connections exceeded.\n" +#~ msgstr "" +#~ "No se pudo conectar a la interfaz DBI.\n" +#~ "Tipo=%s Base de Datos=%s Usuario=%s\n" +#~ "Probablemente no este ejecutando o su contraseña es incorrecta.\n" + +#~ msgid "error inserting batch mode: %s" +#~ msgstr "Error insertando en modo batch: %s" + +#~ msgid "Driver type not specified in Catalog resource.\n" +#~ msgstr "Tipo de controlador no especificado en los recursos de Catálogo.\n" + +#~ msgid "Invalid driver type, must be \"dbi:\"\n" +#~ msgstr "Tipo de controlador no válido, debe ser \"dbi:\"\n" + +#~ msgid "A user name for DBI must be supplied.\n" +#~ msgstr "Un nombre de usuario para DBI debe ser suministrado.\n" + +#, fuzzy +#~ msgid "Read zero bytes Vol=%s at %u:%u on device %s.\n" +#~ msgstr "Leer cero bytes en %u:%u en el dispositivo %s.\n" + +#~ msgid "Init Catalog" +#~ msgstr "Catálogo de inicio" + +#~ msgid "Volume to Catalog" +#~ msgstr "Volumen para Catalogo" + +#~ msgid "Disk to Catalog" +#~ msgstr "Disco para Catalogo" + +#~ msgid "Data" +#~ msgstr "Datos" + +#~ msgid "Bacula Storage: Idle" +#~ msgstr "Bacula Storage: Libre" + +#~ msgid "Bacula Storage: Running" +#~ msgstr "Bacula Storage: Ejecutando" + +#~ msgid "Bacula Storage: Last Job Canceled" +#~ msgstr "Bacula Storage: Último Job Cancelado" + +#~ msgid "Bacula Storage: Last Job Failed" +#~ msgstr "Bacula Storage: Último Job Fallido" + +#~ msgid "Bacula Storage: Last Job had Warnings" +#~ msgstr "Bacula Storage: Último Job con Advertencias" + +#~ msgid "JCR use_count=%d JobId=%d\n" +#~ msgstr "JCR use_count=%d JobId=%d\n" + +#~ msgid "It was tried to assign a ipv6 address to a ipv4(%d)\n" +#~ msgstr "Se trató de asignar una dirección IPv6 a IPv4(%d)\n" + +#~ msgid "Bad response to Hello command: ERR=" +#~ msgstr "mala respuesta al comando Hello: ERR =" + +#, fuzzy +#~ msgid "JobId %s is not running. Use Job name to %s inactive jobs.\n" +#~ msgstr "" +#~ "JobId %s no está en ejecución. Utilice el nombre del Job para cancelar " +#~ "jobs inactivo.\n" + +#, fuzzy +#~ msgid "Confirm %s (yes/no): " +#~ msgstr "Confirmar cancelar(si/no): " + +#~ msgid "Pool record not found." +#~ msgstr "Registro Pool no encontrado" + +#~ msgid "Priority must be 1-100" +#~ msgstr "Prioridad debe ser 1-100" + +#~ msgid "Job Level can be set only during JobInit" +#~ msgstr "Nivel de Job sólo se puede ajustar durante JobInit" + +#~ msgid "Bad JobLevel string" +#~ msgstr "Mala cadena JobLevel" + +#~ msgid "Python control commands" +#~ msgstr "Comandos de control de Python" + +#~ msgid "Python interpreter restarted.\n" +#~ msgstr "Interprete Python reiniciado.\n" + +#, fuzzy +#~ msgid "Illegal JobId %s ignored\n" +#~ msgstr "Clonar JobId %d iniciado.\n" + +#~ msgid "Failed to authenticate Storage daemon.\n" +#~ msgstr "Fallo al autenticar demonio Storage.\n" + +#, fuzzy +#~ msgid " SDSocket=closed\n" +#~ msgstr "SDSocket cerrado.\n" + +#~ msgid "Bacula Client: Idle" +#~ msgstr "Bacula Cliente: Inactivo" + +#~ msgid "Bacula Client: Running" +#~ msgstr "Bacula Cliente: Funcionando" + +#~ msgid "Bacula Client: Last Job Canceled" +#~ msgstr "Bacula Cliente: Ultimo Job Cancelado" + +#~ msgid "Bacula Client: Last Job Failed" +#~ msgstr "Bacula Cliente: Ultimo Job Fallido" + +#~ msgid "Bacula Client: Last Job had Warnings" +#~ msgstr "Bacula Cliente: Ultimo Job con Advertencias" + +#~ msgid "Enter restore mode" +#~ msgstr "Introduzca el modo de restauración" + +#~ msgid "Cancel restore" +#~ msgstr "Cancelar restauración" + +#~ msgid "Add" +#~ msgstr "Agregar" + +#~ msgid "Remove" +#~ msgstr "Eliminar" + +#~ msgid "Refresh" +#~ msgstr "Actualizar" + +#~ msgid "M" +#~ msgstr "M" + +#~ msgid "Filename" +#~ msgstr "Nombre de Archivo" + +#~ msgid "Size" +#~ msgstr "Tamaño" + +#~ msgid "Date" +#~ msgstr "Fecha" + +#~ msgid "Perm." +#~ msgstr "Permiso" + +#~ msgid "User" +#~ msgstr "Usuario" + +#~ msgid "Group" +#~ msgstr "Grupo" + +#~ msgid "Job Name" +#~ msgstr "Nombre del Job" + +#~ msgid "Fileset" +#~ msgstr "Fileset" + +#~ msgid "Before" +#~ msgstr "Antes de" + +#~ msgid "Please configure parameters concerning files to restore :" +#~ msgstr "" +#~ "Por favor, configure los parámetros relativos a los archivos a restaurar:" + +#~ msgid "always" +#~ msgstr "siempre" + +#~ msgid "if newer" +#~ msgstr "si los nuevos" + +#~ msgid "if older" +#~ msgstr "si los viejos" + +#~ msgid "never" +#~ msgstr "nunca" + +#~ msgid "Please configure parameters concerning files restoration :" +#~ msgstr "" +#~ "Por favor, configure los parámetros relativos a los archivos de " +#~ "restauración" + +#~ msgid "Getting parameters list." +#~ msgstr "Obteniendo los parámetros de la lista." + +#~ msgid "Error : no clients returned by the director." +#~ msgstr "Error: el director no devolvió ningún cliente" + +#~ msgid "Error : no filesets returned by the director." +#~ msgstr "Error: el director no devolvió ningún fileset" + +#~ msgid "Error : no storage returned by the director." +#~ msgstr "Error: el director no devolvió ningún storage" + +#~ msgid "Error : no jobs returned by the director." +#~ msgstr "Error: no hay Jobs devuelto por el director." + +#~ msgid "RestoreFiles" +#~ msgstr "RestoreFiles" + +#~ msgid "Please configure your restore parameters." +#~ msgstr "Por favor, configure los parámetros de restauración." + +#~ msgid "Please select a client." +#~ msgstr "Por favor seleccione un cliente." + +#~ msgid "Please select a restore date." +#~ msgstr "Por favor seleccione una fecha de restauración." + +#~ msgid "Building restore tree..." +#~ msgstr "Construyendo árbol de restauración..." + +#~ msgid "Error while starting restore: " +#~ msgstr "Error al iniciar restauración:" + +#~ msgid "" +#~ "Right click on a file or on a directory, or double-click on its mark to " +#~ "add it to the restore list." +#~ msgstr "" +#~ "Haga clic derecho sobre un archivo o un directorio, o haga doble clic en " +#~ "su marca para añadirlo a la lista de restauración." + +#~ msgid "Unexpected question has been received.\n" +#~ msgstr "Pregunta inesperada ha sido recibida.\n" + +#~ msgid "bwx-console: unexpected restore question." +#~ msgstr "bwx-console: inesperada consulta de restauración." + +#~ msgid " files selected to be restored." +#~ msgstr "archivos seleccionados para ser restaurado." + +#~ msgid " file selected to be restored." +#~ msgstr "archivo seleccionado para ser restaurado." + +#~ msgid "Please configure your restore (%ld files selected to be restored)..." +#~ msgstr "" +#~ "Por favor, configure su restauración (%ld archivos seleccionados para ser " +#~ "restaurado)..." + +#~ msgid "Restore failed : no file selected.\n" +#~ msgstr "Restauración fallida: ningún archivo seleccionado.\n" + +#~ msgid "Restore failed : no file selected." +#~ msgstr "Restauración fallida: ningún archivo seleccionado." + +#~ msgid "Restoring, please wait..." +#~ msgstr "Restaurando, por favor espere..." + +#~ msgid "Job queued. JobId=" +#~ msgstr "Cola de Job. JobId=" + +#~ msgid "Restore queued, jobid=" +#~ msgstr "Cola de Restauración, JobID =" + +#~ msgid "Job failed." +#~ msgstr "Job fallido." + +#~ msgid "Restore failed, please look at messages.\n" +#~ msgstr "Fallo en restauración, por favor, mirar los mensajes.\n" + +#~ msgid "Restore failed, please look at messages in console." +#~ msgstr "Fallo en restauración, por favor, mirar los mensajes en la consola." + +#~ msgid "Failed to retrieve jobid.\n" +#~ msgstr "No se ha podido recuperar jobId.\n" + +#~ msgid "" +#~ "Restore is scheduled to run. bwx-console will not wait for its " +#~ "completion.\n" +#~ msgstr "" +#~ "Restaurar está programado para ejecutarse. bwx-consola no esperara a su " +#~ "conclusión.\n" + +#~ msgid "" +#~ "Restore is scheduled to run. bwx-console will not wait for its completion." +#~ msgstr "" +#~ "Restaurar está programado para ejecutarse. bwx-consola no esperara a su " +#~ "conclusión." + +#~ msgid "Restore job created, but not yet running." +#~ msgstr "Job de restauración creado, pero aún no se ejecuta." + +#~ msgid "Restore job running, please wait (%ld of %ld files restored)..." +#~ msgstr "" +#~ "Job de restauración en ejecución, por favor espere (%ld de %ld archivos " +#~ "restaurados) ..." + +#~ msgid "Restore job terminated successfully." +#~ msgstr "Trabajo de restauración terminado correctamente." + +#~ msgid "Restore job terminated successfully.\n" +#~ msgstr "Trabajo de restauración terminado correctamente.\n" + +#~ msgid "Restore job terminated in error, see messages in console." +#~ msgstr "" +#~ "Trabajo de restauración terminado con error, ver los mensajes en la " +#~ "consola." + +#~ msgid "Restore job terminated in error, see messages.\n" +#~ msgstr "Trabajo de restauración terminado con error, ver los mensajes.\n" + +#~ msgid "Restore job reported a non-fatal error." +#~ msgstr "Trabajo de restauración no reporto error fatal." + +#~ msgid "Restore job reported a fatal error." +#~ msgstr "Trabajo de restauración reporto un error fatal." + +#~ msgid "Restore job cancelled by user." +#~ msgstr "Trabajo de restauración cancelado por el usuario." + +#~ msgid "Restore job cancelled by user.\n" +#~ msgstr "Trabajo de restauración cancelado por el usuario.\n" + +#~ msgid "Restore job is waiting on File daemon." +#~ msgstr "Trabajo de restauración esta esperando demonio File." + +#~ msgid "Restore job is waiting for new media." +#~ msgstr "Trabajo de restauración esta esperando por un nuevo medio." + +#~ msgid "Restore job is waiting for storage resource." +#~ msgstr "" +#~ "Trabajo de restauración esta esperando por recurso de almacenamiento." + +#~ msgid "Restore job is waiting for job resource." +#~ msgstr "Job de restauración está esperando por recurso job." + +#~ msgid "Restore job is waiting for Client resource." +#~ msgstr "Trabajo de restauración esta esperando por recurso de Cliente." + +#~ msgid "Restore job is waiting for maximum jobs." +#~ msgstr "Trabajo de restauración esta esperando por trabajo máximo." + +#~ msgid "Restore job is waiting for start time." +#~ msgstr "Trabajo de restauración esta esperando por hora de inicio." + +#~ msgid "Restore job is waiting for higher priority jobs to finish." +#~ msgstr "" +#~ "Job de restauración está esperando por jobs de mayor prioridad para " +#~ "finalizar." + +#~ msgid "" +#~ "The restore job has not been started within one minute, bwx-console will " +#~ "not wait for its completion anymore.\n" +#~ msgstr "" +#~ "El Job de restauración no se ha iniciado en el plazo de un minuto, bwx-" +#~ "console no va a esperar para su realización más.\n" + +#~ msgid "" +#~ "The restore job has not been started within one minute, bwx-console will " +#~ "not wait for its completion anymore." +#~ msgstr "" +#~ "El Job de restauración no se ha iniciado en el plazo de un minuto, bwx-" +#~ "console no va a esperar para su realización más." + +#~ msgid "Restore done successfully.\n" +#~ msgstr "Restauración finalizada con suceso.\n" + +#~ msgid "Restore done successfully." +#~ msgstr "Restauración finalizada con suceso." + +#~ msgid "Applying restore configuration changes..." +#~ msgstr "Aplicando cambios de configuración de restauración..." + +#~ msgid "Failed to find the selected client." +#~ msgstr "Fallo al encontrar el cliente seleccionado." + +#~ msgid "Failed to find the selected fileset." +#~ msgstr "Fallo al encontrar el fileset seleccionado." + +#~ msgid "Failed to find the selected storage." +#~ msgstr "Fallo al encontrar el almacenamiento seleccionado." + +#~ msgid "Run Restore job" +#~ msgstr "Ejecutando Job de restauración" + +#~ msgid "Restore configuration changes were applied." +#~ msgstr "Restaurar los cambios de configuración aplicados." + +#~ msgid "Restore cancelled." +#~ msgstr "Restauración cancelada." + +#~ msgid "No results to list." +#~ msgstr "No hay resultados para listar." + +#~ msgid "No backup found for this client." +#~ msgstr "Respaldos no encontrados para este cliente." + +#~ msgid "ERROR" +#~ msgstr "ERROR" + +#~ msgid "Query failed" +#~ msgstr "Consulta fallida" + +#~ msgid "Cannot get previous backups list, see console." +#~ msgstr "" +#~ "No se puede obtener lista de copias de seguridad anteriores, ver la " +#~ "consola." + +#~ msgid "JobName:" +#~ msgstr "JobName:" + +#~ msgid "Bootstrap:" +#~ msgstr "Bootstrap:" + +#~ msgid "Where:" +#~ msgstr "Donde:" + +#~ msgid "Replace:" +#~ msgstr "Reemplazar:" + +#~ msgid "ifnewer" +#~ msgstr "ifnewer" + +#~ msgid "ifolder" +#~ msgstr "ifolder" + +#~ msgid "FileSet:" +#~ msgstr "FileSet:" + +#~ msgid "Client:" +#~ msgstr "Cliente:" + +#~ msgid "Storage:" +#~ msgstr "Storage:" + +#~ msgid "When:" +#~ msgstr "Cuando:" + +#~ msgid "Priority:" +#~ msgstr "Prioridad:" + +#~ msgid "Restoring..." +#~ msgstr "Restaurando..." + +#~ msgid "Type your command below:" +#~ msgstr "Escriba su comando a continuación:" + +#~ msgid "Unknown command." +#~ msgstr "Comando desconocido." + +#~ msgid "Possible completions: " +#~ msgstr "Posibles complementos:" + +#~ msgid "&About...\tF1" +#~ msgstr "&Acerca de...\tF1" + +#~ msgid "Show about dialog" +#~ msgstr "Mostrar diálogo Acerca de" + +#~ msgid "Connect" +#~ msgstr "Conectar" + +#~ msgid "Connect to the director" +#~ msgstr "Conectar con director" + +#~ msgid "Disconnect" +#~ msgstr "Desconectar" + +#~ msgid "Disconnect of the director" +#~ msgstr "Desconectar del director" + +#~ msgid "Change of configuration file" +#~ msgstr "Cambio del archivo de configuración" + +#~ msgid "Change your default configuration file" +#~ msgstr "Cambiar el archivo de configuración predeterminado" + +#~ msgid "Edit your configuration file" +#~ msgstr "Edite su archivo de configuración" + +#~ msgid "E&xit\tAlt-X" +#~ msgstr "E&xit\tAlt-X" + +#~ msgid "Quit this program" +#~ msgstr "Salga de este programa" + +#~ msgid "&File" +#~ msgstr "&Archivo" + +#~ msgid "&Help" +#~ msgstr "&Ayuda" + +#~ msgid "Welcome to bacula bwx-console %s (%s)!\n" +#~ msgstr "Bienvenido a bacula bwx-console %s (%s)!\n" + +#~ msgid "" +#~ "Warning : Unicode is disabled because you are using wxWidgets for GTK+ " +#~ "1.2.\n" +#~ msgstr "" +#~ "Advertencia: Unicode está deshabilitado porque está utilizando wxWidgets " +#~ "para GTK+ 1.2.\n" + +#~ msgid "" +#~ "Warning : There is a problem with wxWidgets for GTK+ 2.0 without Unicode " +#~ "support when handling non-ASCII filenames: Every non-ASCII character in " +#~ "such filenames will be replaced by an interrogation mark.\n" +#~ "If this behaviour disturbs you, please build bwx-console against a " +#~ "Unicode version of wxWidgets for GTK+ 2.0.\n" +#~ "---\n" +#~ msgstr "" +#~ "Advertencia: Hay un problema con wxWidgets para GTK+ 2.0 sin el soporte " +#~ "Unicode al manejar nombres de archivos no-ASCII: Cada carácter no-ASCII " +#~ "en nombres de archivos será sustituido por un signo de interrogación.\n" +#~ "Si este comportamiento te molesta, por favor construya bwx-consola para " +#~ "una versión Unicode de wxWidgets para GTK+ 2.0.\n" +#~ "---\n" + +#~ msgid "Send" +#~ msgstr "Enviar" + +#~ msgid "Usage: bwx-console [-c configfile] [-w tmp]\n" +#~ msgstr "Utilice: bwx-console [-c archivo_configuración] [-w tmp]\n" + +#~ msgid "" +#~ "It seems that it is the first time you run bwx-console.\n" +#~ "This file (%s) has been choosen as default configuration file.\n" +#~ "Do you want to edit it? (if you click No you will have to select another " +#~ "file)" +#~ msgstr "" +#~ "Parece que es la primera vez que ejecute bwx-consola.\n" +#~ "Este archivo (%s) ha sido elegido como el archivo de configuración por " +#~ "defecto.\n" +#~ "¿Desea editar lo? (si hace clic en NO usted tendrá que seleccionar otro " +#~ "archivo)" + +#~ msgid "First run" +#~ msgstr "Primera ejecución" + +#~ msgid "" +#~ "Unable to read %s\n" +#~ "Error: %s\n" +#~ "Do you want to choose another one? (Press no to edit this file)" +#~ msgstr "" +#~ "No se puede leer %s\n" +#~ "Error: %s\n" +#~ " ¿Deseas seleccionar otro? (Presione no para editar este archivo)" + +#~ msgid "Please choose a configuration file to use" +#~ msgstr "Por favor, elija un archivo de configuración para usar" + +#~ msgid "" +#~ "This configuration file has been successfully read, use it as default?" +#~ msgstr "" +#~ "Este archivo de configuración se ha leído correctamente, utilizar el por " +#~ "defecto?" + +#~ msgid "Configuration file read successfully" +#~ msgstr "Archivo de configuración leído correctamente" + +#~ msgid "Using this configuration file: %s\n" +#~ msgstr "Usando este fichero de configuración: %s\n" + +#~ msgid "Connecting to the director..." +#~ msgstr "Conectando con director..." + +#~ msgid "Failed to unregister a data parser !" +#~ msgstr "Fallo al anular el registro del analizador de datos!" + +#~ msgid "Quitting.\n" +#~ msgstr "Saliendo.\n" + +#~ msgid "" +#~ "Welcome to Bacula bwx-console.\n" +#~ "Written by Nicolas Boichat \n" +#~ "Copyright (C), 2005-2007 Free Software Foundation Europe, e.V.\n" +#~ msgstr "" +#~ "Bienvenido a Bacula bwx-console.\n" +#~ "Escrito por Nicolas Boichat \n" +#~ "Copyright (C), 2005-2007 Free Software Foundation Europe, e.V.\n" + +#~ msgid "About Bacula bwx-console" +#~ msgstr "Acerca de Bacula bwx-console" + +#~ msgid "Please choose your default configuration file" +#~ msgstr "Por favor, seleccione su archivo de configuración por defecto" + +#~ msgid "Use this configuration file as default?" +#~ msgstr "Utilizar este archivo de configuración por defecto?" + +#~ msgid "Configuration file" +#~ msgstr "Archivo de configuración" + +#~ msgid "Console thread terminated." +#~ msgstr "Hilo de Consola terminado." + +#~ msgid "Connection to the director lost. Quit program?" +#~ msgstr "Conexión perdida con el director. Salir del programa?" + +#~ msgid "Connection lost" +#~ msgstr "Conexión perdida" + +#~ msgid "Connected to the director." +#~ msgstr "Conectado al director." + +#~ msgid "Reconnect" +#~ msgstr "Reconectar" + +#~ msgid "Reconnect to the director" +#~ msgstr "Reconectar al director" + +#~ msgid "Disconnected of the director." +#~ msgstr "Desconectado al director." + +#~ msgid "bwx-console: unexpected director's question." +#~ msgstr "bwx-console: inesperada consulta del director." + +#~ msgid "Config file editor" +#~ msgstr "Editor de archivos de configuración" + +#~ msgid "# Bacula bwx-console Configuration File\n" +#~ msgstr "# Bacula bwx-console Archivo de Configuración\n" + +#~ msgid "Save and close" +#~ msgstr "Guardar y cerrar" + +#~ msgid "Close without saving" +#~ msgstr "Cerrar sin guardar" + +#~ msgid "Unable to write to %s\n" +#~ msgstr "No se puede escribir en %s\n" + +#~ msgid "Error while saving" +#~ msgstr "Error al guardar" + +#~ msgid "Bacula bwx-console" +#~ msgstr "Bacula bwx-console" + +#~ msgid "Apply" +#~ msgstr "Aplicar" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Director \"%s\" in config file.\n" +#~ "At least one CA certificate store is required.\n" +#~ msgstr "" +#~ "Ni \"Certificado TLS CA \" o \"Directorio del Certificado TLS CA\" están " +#~ "definidos para el Director \"%s\" en el archivo de configuración.\n" +#~ "Por lo menos un almacén de certificados CA es necesario.\n" + +#~ msgid "" +#~ "No Director resource defined in config file.\n" +#~ "Without that I don't how to speak to the Director :-(\n" +#~ msgstr "" +#~ "Recurso Director no definido en el archivo de configuración.\n" +#~ "Sin eso, yo no sé cómo hablar con el Director :-(\n" + +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Console \"%s\" in config file.\n" +#~ msgstr "" +#~ "Ni \"Certificado TLS CA \" o \"Directorio del Certificado TLS CA\" están " +#~ "definidos para la Consola \"%s\" en el archivo de configuración.\n" + +#~ msgid "Error while initializing windows sockets...\n" +#~ msgstr "Error durante la inicialización de Windows Sockets ...\n" + +#~ msgid "Error while cleaning up windows sockets...\n" +#~ msgstr "Error durante la limpieza de Windows Sockets ...\n" + +#~ msgid "Error while initializing library." +#~ msgstr "Error al inicializar la librería." + +#~ msgid "Cryptographic library initialization failed.\n" +#~ msgstr "Fallo en inicialización de la librería criptográfica.\n" + +#~ msgid "Please correct configuration file.\n" +#~ msgstr "Por favor, corrija el archivo de configuración.\n" + +#~ msgid "Error : Library not initialized\n" +#~ msgstr "Error: Librería no inicializada\n" + +#~ msgid "Error : No configuration file loaded\n" +#~ msgstr "Error: No hay archivo de configuración cargado\n" + +#~ msgid "Connecting...\n" +#~ msgstr "Conectando...\n" + +#~ msgid "Error : No director defined in config file.\n" +#~ msgstr "Error: Director no definido en el archivo de configuración.\n" + +#~ msgid "Multiple directors found in your config file.\n" +#~ msgstr "Varios directores encontrados en su archivo de configuración.\n" + +#~ msgid "Please choose a director (1-%d): " +#~ msgstr "Por favor, elija un director (1-%d): " + +#~ msgid "Passphrase for Console \"%s\" TLS private key: " +#~ msgstr "Frase de contraseña para Console \"%s\" TLS clave privada:" + +#~ msgid "Passphrase for Director \"%s\" TLS private key: " +#~ msgstr "Frase de contraseña para Director \"%s\" TLS clave privada:" + +#~ msgid "Failed to connect to the director\n" +#~ msgstr "Error al conectar con el director\n" + +#~ msgid "Connected\n" +#~ msgstr "Conectado\n" + +#~ msgid "<< Unexpected signal received : " +#~ msgstr "<\n" +"Language-Team: Spanish\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-1\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: src/cats/bdb.c:128 +msgid "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +msgstr "" + +#: src/cats/bdb.c:129 +msgid "WARNING!!!! The Internal Database is NOT OPERATIONAL!\n" +msgstr "" + +#: src/cats/bdb.c:130 +msgid "You should use SQLite, PostgreSQL, or MySQL\n" +msgstr "" + +#: src/cats/bdb.c:152 src/cats/mysql.c:124 src/cats/postgresql.c:128 +#: src/cats/sqlite.c:122 +#, c-format +msgid "Unable to initialize DB lock. ERR=%s\n" +msgstr "" + +#: src/cats/bdb.c:162 +#, c-format +msgid "Unable to open Catalog DB control file %s: ERR=%s\n" +msgstr "" + +#: src/cats/bdb.c:217 +#, c-format +msgid "Error reading catalog DB control file. ERR=%s\n" +msgstr "" + +#: src/cats/bdb.c:220 +#, c-format +msgid "" +"Error, catalog DB control file wrong version. Wanted %d, got %d\n" +"Please reinitialize the working directory.\n" +msgstr "" + +#: src/cats/bdb_update.c:83 src/cats/bdb_update.c:114 +#, c-format +msgid "Error updating DB Job file. ERR=%s\n" +msgstr "" + +#: src/cats/bdb_update.c:153 src/cats/bdb_update.c:189 +#, c-format +msgid "Error updating DB Media file. ERR=%s\n" +msgstr "" + +#: src/cats/mysql.c:60 +msgid "A user name for MySQL must be supplied.\n" +msgstr "" + +#: src/cats/mysql.c:161 +#, c-format +msgid "" +"Unable to connect to MySQL server. \n" +"Database=%s User=%s\n" +"It is probably not running or your password is incorrect.\n" +msgstr "" + +#: src/cats/mysql.c:320 src/cats/postgresql.c:264 src/cats/sqlite.c:323 +#, c-format +msgid "Query failed: %s: ERR=%s\n" +msgstr "" + +#: src/cats/postgresql.c:62 +msgid "A user name for PostgreSQL must be supplied.\n" +msgstr "" + +#: src/cats/postgresql.c:165 +#, c-format +msgid "" +"Unable to connect to PostgreSQL server.\n" +"Database=%s User=%s\n" +"It is probably not running or your password is incorrect.\n" +msgstr "" + +#: src/cats/postgresql.c:506 +#, c-format +msgid "error fetching currval: %s\n" +msgstr "" + +#: src/cats/sql.c:96 +#, c-format +msgid "" +"query %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:118 +#, c-format +msgid "" +"insert %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:132 +#, c-format +msgid "Insertion problem: affected_rows=%s\n" +msgstr "" + +#: src/cats/sql.c:152 +#, c-format +msgid "" +"update %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:162 +#, c-format +msgid "Update problem: affected_rows=%s\n" +msgstr "" + +#: src/cats/sql.c:183 +#, c-format +msgid "" +"delete %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:209 src/cats/sql.c:216 src/cats/sql_create.c:773 +#: src/cats/sql_get.c:180 src/cats/sql_get.c:231 src/cats/sql_get.c:564 +#: src/cats/sql_get.c:639 src/cats/sql_get.c:866 +#, c-format +msgid "error fetching row: %s\n" +msgstr "" + +#: src/cats/sql.c:326 src/dird/catreq.c:369 src/dird/catreq.c:401 +#: src/dird/catreq.c:426 +#, c-format +msgid "Attribute create error. %s" +msgstr "" + +#: src/cats/sql.c:407 +#, c-format +msgid "Path length is zero. File=%s\n" +msgstr "" + +#: src/cats/sql.c:451 +msgid "No results to list.\n" +msgstr "" + +#: src/cats/sql_create.c:86 +#, c-format +msgid "Create DB Job record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:132 +#, c-format +msgid "Create JobMedia record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:141 +#, c-format +msgid "Update Media record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:171 +#, c-format +msgid "pool record %s already exists\n" +msgstr "" + +#: src/cats/sql_create.c:197 +#, c-format +msgid "Create db Pool record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:228 +#, c-format +msgid "Device record %s already exists\n" +msgstr "" + +#: src/cats/sql_create.c:244 +#, c-format +msgid "Create db Device record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:277 +#, c-format +msgid "More than one Storage record!: %d\n" +msgstr "" + +#: src/cats/sql_create.c:282 +#, c-format +msgid "error fetching Storage row: %s\n" +msgstr "" + +#: src/cats/sql_create.c:302 +#, c-format +msgid "Create DB Storage record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:334 +#, c-format +msgid "mediatype record %s already exists\n" +msgstr "" + +#: src/cats/sql_create.c:350 +#, c-format +msgid "Create db mediatype record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:384 +#, c-format +msgid "Volume \"%s\" already exists.\n" +msgstr "" + +#: src/cats/sql_create.c:422 +#, c-format +msgid "Create DB Media record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:470 +#, c-format +msgid "More than one Client!: %d\n" +msgstr "" + +#: src/cats/sql_create.c:475 +#, c-format +msgid "error fetching Client row: %s\n" +msgstr "" + +#: src/cats/sql_create.c:502 +#, c-format +msgid "Create DB Client record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:545 +#, c-format +msgid "Create DB Counters record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:578 +#, c-format +msgid "More than one FileSet!: %d\n" +msgstr "" + +#: src/cats/sql_create.c:583 +#, c-format +msgid "error fetching FileSet row: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:613 +#, c-format +msgid "Create DB FileSet record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:670 +#, c-format +msgid "Attempt to put non-attributes into catalog. Stream=%d\n" +msgstr "" + +#: src/cats/sql_create.c:733 +#, c-format +msgid "Create db File record %s failed. ERR=%s" +msgstr "" + +#: src/cats/sql_create.c:766 src/cats/sql_get.c:224 +#, c-format +msgid "More than one Path!: %s for path: %s\n" +msgstr "" + +#: src/cats/sql_create.c:797 +#, c-format +msgid "Create db Path record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:830 +#, c-format +msgid "More than one Filename! %s for file: %s\n" +msgstr "" + +#: src/cats/sql_create.c:836 +#, c-format +msgid "Error fetching row for file=%s: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:852 +#, c-format +msgid "Create db Filename record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_delete.c:79 +#, c-format +msgid "No pool record %s exists\n" +msgstr "" + +#: src/cats/sql_delete.c:84 +#, c-format +msgid "Expecting one pool record, got %d\n" +msgstr "" + +#: src/cats/sql_delete.c:90 +#, c-format +msgid "Error fetching row %s\n" +msgstr "" + +#: src/cats/sql_find.c:90 src/cats/sql_find.c:119 +#, c-format +msgid "" +"Query error for start time request: ERR=%s\n" +"CMD=%s\n" +msgstr "" + +#: src/cats/sql_find.c:96 +msgid "No prior Full backup Job record found.\n" +msgstr "" + +#: src/cats/sql_find.c:108 +#, c-format +msgid "Unknown level=%d\n" +msgstr "" + +#: src/cats/sql_find.c:125 +#, c-format +msgid "" +"No Job record found: ERR=%s\n" +"CMD=%s\n" +msgstr "" + +#: src/cats/sql_find.c:224 +#, c-format +msgid "Unknown Job level=%d\n" +msgstr "" + +#: src/cats/sql_find.c:234 +#, c-format +msgid "No Job found for: %s.\n" +msgstr "" + +#: src/cats/sql_find.c:245 +#, c-format +msgid "No Job found for: %s\n" +msgstr "" + +#: src/cats/sql_find.c:317 +#, c-format +msgid "Request for Volume item %d greater than max %d\n" +msgstr "" + +#: src/cats/sql_find.c:329 +#, c-format +msgid "No Volume record found for item %d.\n" +msgstr "" + +#: src/cats/sql_get.c:130 +#, c-format +msgid "get_file_record want 1 got rows=%d\n" +msgstr "" + +#: src/cats/sql_get.c:135 +#, c-format +msgid "Error fetching row: %s\n" +msgstr "" + +#: src/cats/sql_get.c:143 +#, c-format +msgid "File record for PathId=%s FilenameId=%s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:149 +msgid "File record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:174 +#, c-format +msgid "More than one Filename!: %s for file: %s\n" +msgstr "" + +#: src/cats/sql_get.c:184 +#, c-format +msgid "Get DB Filename record %s found bad record: %d\n" +msgstr "" + +#: src/cats/sql_get.c:190 +#, c-format +msgid "Filename record: %s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:194 +#, c-format +msgid "Filename record: %s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:235 +#, c-format +msgid "Get DB path record %s found bad record: %s\n" +msgstr "" + +#: src/cats/sql_get.c:248 +#, c-format +msgid "Path record: %s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:252 +#, c-format +msgid "Path record: %s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:287 +#, c-format +msgid "No Job found for JobId %s\n" +msgstr "" + +#: src/cats/sql_get.c:344 src/cats/sql_get.c:399 +#, c-format +msgid "No volumes found for JobId=%d\n" +msgstr "" + +#: src/cats/sql_get.c:350 src/cats/sql_get.c:408 +#, c-format +msgid "Error fetching row %d: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:364 +#, c-format +msgid "No Volume for JobId %d found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:480 +#, c-format +msgid "Pool id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:517 +#, c-format +msgid "Client id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:559 +#, c-format +msgid "More than one Pool!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:600 +msgid "Pool record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:634 +#, c-format +msgid "More than one Client!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:651 src/cats/sql_get.c:655 +msgid "Client record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:680 +#, c-format +msgid "More than one Counter!: %d\n" +msgstr "" + +#: src/cats/sql_get.c:685 +#, c-format +msgid "error fetching Counter row: %s\n" +msgstr "" + +#: src/cats/sql_get.c:705 +#, c-format +msgid "Counter record: %s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:741 +#, c-format +msgid "Error got %s FileSets but expected only one!\n" +msgstr "" + +#: src/cats/sql_get.c:746 +#, c-format +msgid "FileSet record \"%s\" not found.\n" +msgstr "" + +#: src/cats/sql_get.c:756 +msgid "FileSet record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:813 +#, c-format +msgid "Media id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:861 +#, c-format +msgid "More than one Volume!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:906 +#, c-format +msgid "Media record MediaId=%s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:909 +#, c-format +msgid "Media record for Volume \"%s\" not found.\n" +msgstr "" + +#: src/cats/sql_get.c:916 +#, c-format +msgid "Media record for MediaId=%u not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:919 +#, c-format +msgid "Media record for Vol=%s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_list.c:53 +#, c-format +msgid "Query failed: %s\n" +msgstr "" + +#: src/cats/sqlite.c:136 +#, c-format +msgid "Database %s does not exist, please create it.\n" +msgstr "" + +#: src/cats/sqlite.c:161 +#, c-format +msgid "Unable to open Database=%s. ERR=%s\n" +msgstr "" + +#: src/cats/sqlite.c:162 src/lib/bnet_server.c:371 +msgid "unknown" +msgstr "" + +#: src/cats/sqlite.c:221 +#, c-format +msgid "next_index query error: ERR=%s\n" +msgstr "" + +#: src/cats/sqlite.c:226 +#, c-format +msgid "Error fetching index: ERR=%s\n" +msgstr "" + +#: src/cats/sqlite.c:236 +#, c-format +msgid "next_index update error: ERR=%s\n" +msgstr "" + +#: src/console/authenticate.c:100 src/dird/authenticate.c:106 +#: src/dird/authenticate.c:206 src/filed/authenticate.c:119 +#: src/filed/authenticate.c:215 src/stored/authenticate.c:128 +#: src/stored/authenticate.c:232 src/wx-console/authenticate.c:106 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" + +#: src/console/authenticate.c:107 src/dird/authenticate.c:113 +#: src/dird/authenticate.c:213 src/filed/authenticate.c:127 +#: src/filed/authenticate.c:223 src/stored/authenticate.c:136 +#: src/stored/authenticate.c:240 src/wx-console/authenticate.c:112 +msgid "Authorization problem: Remote server requires TLS.\n" +msgstr "" + +#: src/console/authenticate.c:117 src/wx-console/authenticate.c:121 +msgid "TLS negotiation failed\n" +msgstr "" + +#: src/console/authenticate.c:129 src/gnome2-console/authenticate.c:85 +#: src/tray-monitor/authenticate.c:87 +#, c-format +msgid "Bad response to Hello command: ERR=%s\n" +msgstr "" + +#: src/console/authenticate.c:136 src/gnome2-console/authenticate.c:95 +#: src/tray-monitor/authenticate.c:94 src/wx-console/authenticate.c:136 +msgid "Director rejected Hello command\n" +msgstr "" + +#: src/console/authenticate.c:146 src/wx-console/authenticate.c:146 +msgid "" +"Director authorization problem.\n" +"Most likely the passwords do not agree.\n" +"If you are using TLS, there may have been a certificate validation error " +"during the TLS handshake.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/console/console.c:102 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"\n" +"Version: " +msgstr "" + +#: src/console/console.c:103 src/dird/admin.c:108 +#, c-format +msgid " (" +msgstr "" + +#: src/console/console.c:103 +#, c-format +msgid "" +") %s %s %s\n" +"\n" +"Usage: bconsole [-s] [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/console/console.c:150 +msgid "input from file" +msgstr "" + +#: src/console/console.c:151 +msgid "output to file" +msgstr "" + +#: src/console/console.c:152 src/dird/ua_cmds.c:110 +msgid "quit" +msgstr "" + +#: src/console/console.c:153 +msgid "output to file and terminal" +msgstr "" + +#: src/console/console.c:154 +msgid "sleep specified time" +msgstr "" + +#: src/console/console.c:155 src/dird/ua_cmds.c:122 +msgid "print current time" +msgstr "" + +#: src/console/console.c:156 +msgid "print Console's version" +msgstr "" + +#: src/console/console.c:157 src/dird/ua_cmds.c:99 +msgid "exit = quit" +msgstr "" + +#: src/console/console.c:158 +msgid "zed_keys = use zed keys instead of bash keys" +msgstr "" + +#: src/console/console.c:191 src/dird/ua_dotcmds.c:108 +msgid ": is an illegal command\n" +msgstr "" + +#: src/console/console.c:400 src/filed/filed.c:183 +#: src/gnome2-console/console.c:271 +msgid "TLS library initialization failed.\n" +msgstr "" + +#: src/console/console.c:404 src/dird/dird.c:200 src/dird/dird.c:410 +#: src/dird/dird.c:413 src/filed/filed.c:188 src/gnome2-console/console.c:275 +#: src/stored/stored.c:193 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "" + +#: src/console/console.c:421 +msgid "Available Directors:\n" +msgstr "" + +#: src/console/console.c:425 +#, c-format +msgid "%d %s at %s:%d\n" +msgstr "" + +#: src/console/console.c:429 +msgid "Select Director: " +msgstr "" + +#: src/console/console.c:435 +#, c-format +msgid "You must enter a number between 1 and %d\n" +msgstr "" + +#: src/console/console.c:455 src/tray-monitor/tray-monitor.c:858 +#, c-format +msgid "Connecting to Director %s:%d\n" +msgstr "" + +#: src/console/console.c:471 src/gnome2-console/console.c:504 +#: src/wx-console/console_thread.cpp:370 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "" + +#: src/console/console.c:492 src/gnome2-console/console.c:526 +#: src/wx-console/console_thread.cpp:391 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "" + +#: src/console/console.c:515 +msgid "Enter a period to cancel a command.\n" +msgstr "" + +#: src/console/console.c:582 src/console/console.c:611 src/dird/dird.c:496 +#: src/dird/dird.c:711 src/dird/dird.c:777 src/dird/dird.c:829 +#: src/filed/filed.c:302 src/filed/filed.c:348 +#: src/gnome2-console/console.c:140 src/gnome2-console/console.c:169 +#: src/stored/stored.c:309 src/wx-console/console_thread.cpp:94 +#: src/wx-console/console_thread.cpp:120 +msgid "TLS required but not configured in Bacula.\n" +msgstr "" + +#: src/console/console.c:589 src/gnome2-console/console.c:147 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" + +#: src/console/console.c:598 src/gnome2-console/console.c:156 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/console/console.c:618 src/gnome2-console/console.c:176 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" + +#: src/console/console.c:763 +msgid "Too many arguments on input command.\n" +msgstr "" + +#: src/console/console.c:767 +msgid "First argument to input command must be a filename.\n" +msgstr "" + +#: src/console/console.c:772 +#, c-format +msgid "Cannot open file %s for input. ERR=%s\n" +msgstr "" + +#: src/console/console.c:802 +msgid "Too many arguments on output/tee command.\n" +msgstr "" + +#: src/console/console.c:818 +#, c-format +msgid "Cannot open file %s for output. ERR=%s\n" +msgstr "" + +#: src/console/console_conf.c:123 src/gnome2-console/console_conf.c:122 +#: src/wx-console/console_conf.c:128 +#, c-format +msgid "No record for %d %s\n" +msgstr "" + +#: src/console/console_conf.c:132 src/wx-console/console_conf.c:137 +#, c-format +msgid "Console: name=%s rcfile=%s histfile=%s\n" +msgstr "" + +#: src/console/console_conf.c:136 src/gnome2-console/console_conf.c:131 +#: src/wx-console/console_conf.c:141 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "" + +#: src/console/console_conf.c:140 src/console/console_conf.c:216 +#: src/console/console_conf.c:261 src/console/console_conf.c:288 +#: src/filed/filed_conf.c:268 src/filed/filed_conf.c:327 +#: src/filed/filed_conf.c:357 src/gnome2-console/console_conf.c:142 +#: src/gnome2-console/console_conf.c:220 src/gnome2-console/console_conf.c:268 +#: src/gnome2-console/console_conf.c:298 src/stored/stored_conf.c:510 +#: src/stored/stored_conf.c:598 src/stored/stored_conf.c:633 +#: src/wx-console/console_conf.c:145 src/wx-console/console_conf.c:220 +#: src/wx-console/console_conf.c:265 src/wx-console/console_conf.c:292 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "" + +#: src/console/console_conf.c:242 src/dird/dird_conf.c:1112 +#: src/dird/dird_conf.c:1127 src/filed/filed_conf.c:296 +#: src/gnome2-console/console_conf.c:246 src/tray-monitor/tray_conf.c:262 +#: src/wx-console/console_conf.c:246 +#, c-format +msgid "%s item is required in %s resource, but not found.\n" +msgstr "" + +#: src/console/console_conf.c:304 src/dird/dird_conf.c:1299 +#: src/filed/filed_conf.c:374 src/gnome2-console/console_conf.c:314 +#: src/tray-monitor/tray_conf.c:341 src/wx-console/console_conf.c:308 +#, c-format +msgid "Attempt to define second %s resource named \"%s\" is not permitted.\n" +msgstr "" + +#: src/dird/admin.c:55 +#, c-format +msgid "Start Admin JobId %d, Job=%s\n" +msgstr "" + +#: src/dird/admin.c:82 src/dird/backup.c:343 src/dird/mac.c:287 +#, c-format +msgid "Error getting job record for stats: %s" +msgstr "" + +#: src/dird/admin.c:90 +msgid "Admin OK" +msgstr "" + +#: src/dird/admin.c:94 +msgid "*** Admin Error ***" +msgstr "" + +#: src/dird/admin.c:98 +msgid "Admin Canceled" +msgstr "" + +#: src/dird/admin.c:102 src/dird/backup.c:455 src/dird/mac.c:389 +#: src/dird/restore.c:246 +#, c-format +msgid "Inappropriate term code: %c\n" +msgstr "" + +#: src/dird/admin.c:108 +msgid "Bacula " +msgstr "" + +#: src/dird/admin.c:108 +#, c-format +msgid "" +"): %s\n" +" JobId: %d\n" +" Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/authenticate.c:67 src/dird/authenticate.c:68 +#: src/tray-monitor/authenticate.c:121 +#, c-format +msgid "Error sending Hello to Storage daemon. ERR=%s\n" +msgstr "" + +#: src/dird/authenticate.c:93 +msgid "Director and Storage daemon passwords or names not the same.\n" +msgstr "" + +#: src/dird/authenticate.c:95 +msgid "" +"Director unable to authenticate with Storage daemon. Possible causes:\n" +"Passwords or names not the same or\n" +"Maximum Concurrent Jobs exceeded on the SD or\n" +"SD networking messed up (restart daemon).\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/dird/authenticate.c:123 src/dird/authenticate.c:223 +#: src/dird/authenticate.c:356 src/filed/authenticate.c:136 +#: src/filed/authenticate.c:232 src/stored/authenticate.c:145 +#: src/stored/authenticate.c:249 +msgid "TLS negotiation failed.\n" +msgstr "" + +#: src/dird/authenticate.c:132 src/tray-monitor/authenticate.c:134 +#, c-format +msgid "bdird set configuration file to file\n" +" -dnn set debug level to nn\n" +" -f run in foreground (for debugging)\n" +" -g groupid\n" +" -r run now\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -u userid\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/dird/dird.c:196 src/stored/stored.c:189 +msgid "Cryptography library initialization failed.\n" +msgstr "" + +#: src/dird/dird.c:396 +msgid "Too many open reload requests. Request ignored.\n" +msgstr "" + +#: src/dird/dird.c:411 +msgid "Out of reload table entries. Giving up.\n" +msgstr "" + +#: src/dird/dird.c:414 +msgid "Resetting previous configuration.\n" +msgstr "" + +#: src/dird/dird.c:474 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't know who I am :-(\n" +msgstr "" + +#: src/dird/dird.c:482 src/filed/filed.c:295 +#, c-format +msgid "No Messages resource defined in %s\n" +msgstr "" + +#: src/dird/dird.c:487 +#, c-format +msgid "Only one Director resource permitted in %s\n" +msgstr "" + +#: src/dird/dird.c:502 src/filed/filed.c:357 src/stored/stored.c:361 +#, c-format +msgid "\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:508 src/filed/filed.c:363 src/stored/stored.c:367 +#, c-format +msgid "\"TLS Key\" file not defined for Director \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:514 src/filed/filed.c:369 src/stored/stored.c:373 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" + +#: src/dird/dird.c:533 src/filed/filed.c:388 src/stored/stored.c:392 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:541 +#, c-format +msgid "No Job records defined in %s\n" +msgstr "" + +#: src/dird/dird.c:585 src/dird/dird.c:598 +#, c-format +msgid "Hey something is wrong. p=0x%lu\n" +msgstr "" + +#: src/dird/dird.c:647 +#, c-format +msgid "\"%s\" directive in Job \"%s\" resource is required, but not found.\n" +msgstr "" + +#: src/dird/dird.c:654 +msgid "Too many items in Job resource\n" +msgstr "" + +#: src/dird/dird.c:672 src/dird/job.c:101 src/dird/ua_cmds.c:1480 +#: src/dird/ua_output.c:600 +#, c-format +msgid "Could not open database \"%s\".\n" +msgstr "" + +#: src/dird/dird.c:675 +#, c-format +msgid "%s" +msgstr "" + +#: src/dird/dird.c:717 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Storage \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:733 src/stored/stored.c:347 +#, c-format +msgid "Failed to initialize TLS context for Storage \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:784 +#, c-format +msgid "\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:790 +#, c-format +msgid "\"TLS Key\" file not defined for Console \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:796 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" + +#: src/dird/dird.c:813 src/dird/dird.c:853 src/filed/filed.c:326 +#, c-format +msgid "Failed to initialize TLS context for File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:836 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird_conf.c:443 src/tray-monitor/tray_conf.c:152 +#, c-format +msgid "No %s resource defined\n" +msgstr "" + +#: src/dird/dird_conf.c:452 +#, c-format +msgid "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:457 +#, c-format +msgid " query_file=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:460 src/dird/dird_conf.c:485 src/dird/dird_conf.c:499 +#: src/dird/dird_conf.c:544 src/dird/dird_conf.c:548 src/dird/dird_conf.c:552 +#: src/dird/dird_conf.c:576 src/dird/dird_conf.c:581 src/dird/dird_conf.c:585 +#: src/dird/dird_conf.c:589 src/dird/dird_conf.c:593 src/dird/dird_conf.c:597 +#: src/dird/dird_conf.c:607 +msgid " --> " +msgstr "" + +#: src/dird/dird_conf.c:466 src/dird/dird_conf.c:469 +#, c-format +msgid "Console: name=%s SSL=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:475 +#, c-format +msgid "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:480 +#, c-format +msgid "Counter: name=%s min=%d max=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:491 +#, c-format +msgid "Client: name=%s address=%s FDport=%d MaxJobs=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:494 +#, c-format +msgid " JobRetention=%s FileRetention=%s AutoPrune=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:506 +#, c-format +msgid "" +"Device: name=%s ok=%d num_writers=%d max_writers=%d\n" +" reserved=%d open=%d append=%d read=%d labeled=%d offline=%d autochgr=%" +"d\n" +" poolid=%s volname=%s MediaType=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:516 +#, c-format +msgid "" +"Storage: name=%s address=%s SDport=%d MaxJobs=%u\n" +" DeviceName=%s MediaType=%s StorageId=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:525 +#, c-format +msgid "" +"Catalog: name=%s address=%s DBport=%d db_name=%s\n" +" db_user=%s MutliDBConn=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:533 +#, c-format +msgid "%s: name=%s JobType=%d level=%s Priority=%d Enabled=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:534 src/dird/ua_cmds.c:430 src/dird/ua_prune.c:454 +#: src/dird/ua_run.c:643 src/dird/ua_select.c:234 src/dird/ua_select.c:257 +msgid "Job" +msgstr "" + +#: src/dird/dird_conf.c:534 +msgid "JobDefs" +msgstr "" + +#: src/dird/dird_conf.c:538 +#, c-format +msgid "" +" MaxJobs=%u Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%" +"d\n" +msgstr "" + +#: src/dird/dird_conf.c:556 +#, c-format +msgid " --> Where=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:559 +#, c-format +msgid " --> Bootstrap=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:562 +#, c-format +msgid " --> RunBefore=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:565 +#, c-format +msgid " --> RunAfter=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:568 +#, c-format +msgid " --> RunAfterFailed=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:571 +#, c-format +msgid " --> WriteBootstrap=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:603 +#, c-format +msgid " --> Run=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:614 +#, c-format +msgid "FileSet: name=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:676 src/dird/dird_conf.c:755 +#, c-format +msgid "Schedule: name=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:681 +#, c-format +msgid " --> Run Level=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:682 +msgid " hour=" +msgstr "" + +#: src/dird/dird_conf.c:691 +msgid " mday=" +msgstr "" + +#: src/dird/dird_conf.c:700 +msgid " month=" +msgstr "" + +#: src/dird/dird_conf.c:709 +msgid " wday=" +msgstr "" + +#: src/dird/dird_conf.c:718 +msgid " wom=" +msgstr "" + +#: src/dird/dird_conf.c:727 +msgid " woy=" +msgstr "" + +#: src/dird/dird_conf.c:736 +#, c-format +msgid " mins=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:738 src/dird/dird_conf.c:742 src/dird/dird_conf.c:746 +msgid " --> " +msgstr "" + +#: src/dird/dird_conf.c:759 +#, c-format +msgid "Pool: name=%s PoolType=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:761 +#, c-format +msgid " use_cat=%d use_once=%d acpt_any=%d cat_files=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:764 +#, c-format +msgid " max_vols=%d auto_prune=%d VolRetention=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:767 +#, c-format +msgid " VolUse=%s recycle=%d LabelFormat=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:771 +#, c-format +msgid " CleaningPrefix=%s LabelType=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:773 +#, c-format +msgid " RecyleOldest=%d PurgeOldest=%d MaxVolJobs=%d MaxVolFiles=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:779 +#, c-format +msgid "Messages: name=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:781 +#, c-format +msgid " mailcmd=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:783 +#, c-format +msgid " opcmd=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:786 src/tray-monitor/tray_conf.c:179 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "" + +#: src/dird/dird_conf.c:1080 src/tray-monitor/tray_conf.c:232 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "" + +#: src/dird/dird_conf.c:1118 src/lib/parse_conf.c:211 +#: src/tray-monitor/tray_conf.c:268 +#, c-format +msgid "Too many items in %s resource\n" +msgstr "" + +#: src/dird/dird_conf.c:1152 +#, c-format +msgid "Cannot find Console resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1158 src/filed/filed_conf.c:316 +#: src/stored/stored_conf.c:566 +#, c-format +msgid "Cannot find Director resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1165 src/stored/stored_conf.c:572 +#, c-format +msgid "Cannot find Storage resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1174 +#, c-format +msgid "Cannot find Job resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1192 +#, c-format +msgid "Cannot find Counter resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1200 src/filed/filed_conf.c:322 +#, c-format +msgid "Cannot find Client resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1212 +#, c-format +msgid "Cannot find Schedule resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1217 src/tray-monitor/tray_conf.c:287 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "" + +#: src/dird/dird_conf.c:1277 src/tray-monitor/tray_conf.c:322 +#, c-format +msgid "Unknown resource type %d in save_resrouce.\n" +msgstr "" + +#: src/dird/dird_conf.c:1292 +#, c-format +msgid "Name item is required in %s resource, but not found.\n" +msgstr "" + +#: src/dird/dird_conf.c:1304 +#, c-format +msgid "Inserting %s res: %s index=%d pass=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:1377 +#, c-format +msgid "Expected a Job Type keyword, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1401 +#, c-format +msgid "Expected a Job Level keyword, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1421 +#, c-format +msgid "Expected a Restore replacement option, got: %s" +msgstr "" + +#: src/dird/expand.c:240 +#, c-format +msgid "Count not update counter %s: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:412 +#, c-format +msgid "Cannot create var context: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:417 +#, c-format +msgid "Cannot set var callback: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:423 +#, c-format +msgid "Cannot set var operate: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:429 src/dird/expand.c:444 +#, c-format +msgid "Cannot unescape string: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:437 +#, c-format +msgid "Cannot expand expression \"%s\": ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:455 +#, c-format +msgid "Cannot destroy var context: ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:73 src/tray-monitor/tray-monitor.c:867 +msgid "File daemon" +msgstr "" + +#: src/dird/fd_cmds.c:104 +#, c-format +msgid "File daemon \"%s\" rejected Job command: %s\n" +msgstr "" + +#: src/dird/fd_cmds.c:117 +#, c-format +msgid "Error updating Client record. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:122 +#, c-format +msgid "FD gave bad response to JobId command: %s\n" +msgstr "" + +#: src/dird/fd_cmds.c:144 src/dird/fd_cmds.c:180 +msgid ", since=" +msgstr "" + +#: src/dird/fd_cmds.c:164 +msgid "No prior or suitable Full backup found. Doing FULL backup.\n" +msgstr "" + +#: src/dird/fd_cmds.c:165 src/dird/fd_cmds.c:173 +#, c-format +msgid " (upgraded from %s)" +msgstr "" + +#: src/dird/fd_cmds.c:171 +#, c-format +msgid "Prior failed job found. Upgrading to %s.\n" +msgstr "" + +#: src/dird/fd_cmds.c:196 +msgid "since_utime " +msgstr "" + +#: src/dird/fd_cmds.c:232 +#, c-format +msgid "Unimplemented backup level %d %c\n" +msgstr "" + +#: src/dird/fd_cmds.c:320 src/filed/job.c:566 +#, c-format +msgid "Cannot run program: %s. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:331 src/dird/fd_cmds.c:356 src/dird/fd_cmds.c:370 +msgid ">filed: write error on socket\n" +msgstr "" + +#: src/dird/fd_cmds.c:337 +#, c-format +msgid "Error running program: %s. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:346 +#, c-format +msgid "Cannot open included file: %s. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:441 src/filed/job.c:1622 +#, c-format +msgid "Could not open bootstrap file %s: ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:526 +#, c-format +msgid "" +" | ] -- cancel a job" +msgstr "" + +#: src/dird/ua_cmds.c:94 +msgid "create DB Pool from resource" +msgstr "" + +#: src/dird/ua_cmds.c:95 +msgid "delete [pool= | media volume=]" +msgstr "" + +#: src/dird/ua_cmds.c:96 +msgid "disable -- disable a job" +msgstr "" + +#: src/dird/ua_cmds.c:97 +msgid "enable -- enable a job" +msgstr "" + +#: src/dird/ua_cmds.c:98 +msgid "performs FileSet estimate, listing gives full listing" +msgstr "" + +#: src/dird/ua_cmds.c:100 +msgid "gui [on|off] -- non-interactive gui mode" +msgstr "" + +#: src/dird/ua_cmds.c:101 src/stored/btape.c:2540 +msgid "print this command" +msgstr "" + +#: src/dird/ua_cmds.c:102 +msgid "" +"list [pools | jobs | jobtotals | media | files ]; " +"from catalog" +msgstr "" + +#: src/dird/ua_cmds.c:103 +msgid "label a tape" +msgstr "" + +#: src/dird/ua_cmds.c:104 +msgid "full or long list like list command" +msgstr "" + +#: src/dird/ua_cmds.c:105 +msgid "messages" +msgstr "" + +#: src/dird/ua_cmds.c:106 +msgid "mount " +msgstr "" + +#: src/dird/ua_cmds.c:107 +msgid "prune expired records from catalog" +msgstr "" + +#: src/dird/ua_cmds.c:108 +msgid "purge records from catalog" +msgstr "" + +#: src/dird/ua_cmds.c:109 +msgid "python control commands" +msgstr "" + +#: src/dird/ua_cmds.c:111 +msgid "query catalog" +msgstr "" + +#: src/dird/ua_cmds.c:112 +msgid "restore files" +msgstr "" + +#: src/dird/ua_cmds.c:113 +msgid "relabel a tape" +msgstr "" + +#: src/dird/ua_cmds.c:114 +msgid "release " +msgstr "" + +#: src/dird/ua_cmds.c:115 +msgid "reload conf file" +msgstr "" + +#: src/dird/ua_cmds.c:116 +msgid "run " +msgstr "" + +#: src/dird/ua_cmds.c:117 +msgid "status [storage | client]=" +msgstr "" + +#: src/dird/ua_cmds.c:118 +msgid "sets debug level" +msgstr "" + +#: src/dird/ua_cmds.c:119 +msgid "sets new client address -- if authorized" +msgstr "" + +#: src/dird/ua_cmds.c:120 +msgid "show (resource records) [jobs | pools | ... | all]" +msgstr "" + +#: src/dird/ua_cmds.c:121 +msgid "use SQL to query catalog" +msgstr "" + +#: src/dird/ua_cmds.c:123 +msgid "turn on/off trace to file" +msgstr "" + +#: src/dird/ua_cmds.c:124 +msgid "unmount " +msgstr "" + +#: src/dird/ua_cmds.c:125 +msgid "umount for old-time Unix guys" +msgstr "" + +#: src/dird/ua_cmds.c:126 +msgid "update Volume, Pool or slots" +msgstr "" + +#: src/dird/ua_cmds.c:127 +msgid "use catalog xxx" +msgstr "" + +#: src/dird/ua_cmds.c:128 +msgid "does variable expansion" +msgstr "" + +#: src/dird/ua_cmds.c:129 +msgid "print Director version" +msgstr "" + +#: src/dird/ua_cmds.c:130 +msgid "wait until no jobs are running" +msgstr "" + +#: src/dird/ua_cmds.c:168 +#, c-format +msgid "%s: is an illegal command.\n" +msgstr "" + +#: src/dird/ua_cmds.c:206 +msgid "" +"You probably don't want to be using this command since it\n" +"creates database records without labeling the Volumes.\n" +"You probably want to use the \"label\" command.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:225 +#, c-format +msgid "Pool already has maximum volumes = %d\n" +msgstr "" + +#: src/dird/ua_cmds.c:227 +msgid "Enter new maximum (zero for unlimited): " +msgstr "" + +#: src/dird/ua_cmds.c:248 +#, c-format +msgid "Enter number of Volumes to create. 0=>fixed name. Max=%d: " +msgstr "" + +#: src/dird/ua_cmds.c:254 +#, c-format +msgid "The number must be between 0 and %d\n" +msgstr "" + +#: src/dird/ua_cmds.c:261 +msgid "Enter Volume name: " +msgstr "" + +#: src/dird/ua_cmds.c:265 +msgid "Enter base volume name: " +msgstr "" + +#: src/dird/ua_cmds.c:274 src/dird/ua_label.c:605 +msgid "Volume name too long.\n" +msgstr "" + +#: src/dird/ua_cmds.c:278 src/dird/ua_label.c:611 src/lib/edit.c:413 +msgid "Volume name must be at least one character long.\n" +msgstr "" + +#: src/dird/ua_cmds.c:287 +msgid "Enter the starting number: " +msgstr "" + +#: src/dird/ua_cmds.c:292 +msgid "Start number must be greater than zero.\n" +msgstr "" + +#: src/dird/ua_cmds.c:303 +msgid "Enter slot (0 for none): " +msgstr "" + +#: src/dird/ua_cmds.c:307 +msgid "InChanger? yes/no: " +msgstr "" + +#: src/dird/ua_cmds.c:334 +#, c-format +msgid "%d Volumes created in pool %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:350 src/dird/ua_cmds.c:917 +msgid "Turn on or off? " +msgstr "" + +#: src/dird/ua_cmds.c:358 src/dird/ua_cmds.c:925 +msgid "off" +msgstr "" + +#: src/dird/ua_cmds.c:378 +msgid "jobid" +msgstr "" + +#: src/dird/ua_cmds.c:385 +#, c-format +msgid "JobId %s is not running. Use Job name to cancel inactive jobs.\n" +msgstr "" + +#: src/dird/ua_cmds.c:389 +msgid "job" +msgstr "" + +#: src/dird/ua_cmds.c:394 +#, c-format +msgid "Warning Job %s is not running. Continuing anyway ...\n" +msgstr "" + +#: src/dird/ua_cmds.c:416 src/filed/status.c:195 src/stored/status.c:343 +msgid "No Jobs running.\n" +msgstr "" + +#: src/dird/ua_cmds.c:419 +msgid "Select Job:\n" +msgstr "" + +#: src/dird/ua_cmds.c:425 +#, c-format +msgid "JobId=%s Job=%s" +msgstr "" + +#: src/dird/ua_cmds.c:430 +msgid "Choose Job to cancel" +msgstr "" + +#: src/dird/ua_cmds.c:434 +msgid "Confirm cancel (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:442 +#, c-format +msgid "Job %s not found.\n" +msgstr "" + +#: src/dird/ua_cmds.c:547 +#, c-format +msgid "" +"Error: Pool %s already exists.\n" +"Use update to change it.\n" +msgstr "" + +#: src/dird/ua_cmds.c:558 +#, c-format +msgid "Pool %s created.\n" +msgstr "" + +#: src/dird/ua_cmds.c:571 +msgid "restart" +msgstr "" + +#: src/dird/ua_cmds.c:575 +msgid "Python interpreter restarted.\n" +msgstr "" + +#: src/dird/ua_cmds.c:577 src/dird/ua_cmds.c:1145 +msgid "Nothing done.\n" +msgstr "" + +#: src/dird/ua_cmds.c:593 src/dird/ua_cmds.c:639 +msgid "Illegal command from this console.\n" +msgstr "" + +#: src/dird/ua_cmds.c:600 src/dird/ua_run.c:353 +#, c-format +msgid "Client \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_cmds.c:609 +#, c-format +msgid "Client \"%s\" address set to %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:634 +#, c-format +msgid "Job \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_cmds.c:643 +#, c-format +msgid "Job \"%s\" %sabled\n" +msgstr "" + +#: src/dird/ua_cmds.c:667 src/dird/ua_status.c:286 +#, c-format +msgid "Connecting to Storage daemon %s at %s:%d\n" +msgstr "" + +#: src/dird/ua_cmds.c:673 src/dird/ua_status.c:297 +msgid "Connected to storage daemon\n" +msgstr "" + +#: src/dird/ua_cmds.c:693 src/dird/ua_cmds.c:1032 src/dird/ua_status.c:324 +#, c-format +msgid "Connecting to Client %s at %s:%d\n" +msgstr "" + +#: src/dird/ua_cmds.c:696 src/dird/ua_cmds.c:1035 +msgid "Failed to connect to Client.\n" +msgstr "" + +#: src/dird/ua_cmds.c:812 +msgid "Enter new debug level: " +msgstr "" + +#: src/dird/ua_cmds.c:878 +msgid "Available daemons are: \n" +msgstr "" + +#: src/dird/ua_cmds.c:883 +msgid "Select daemon type to set debug level" +msgstr "" + +#: src/dird/ua_cmds.c:987 src/dird/ua_run.c:478 +#, c-format +msgid "Level %s not valid.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1000 +msgid "No job specified.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1040 +msgid "Error sending include list.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1045 +msgid "Error sending exclude list.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1131 +msgid "" +"In general it is not a good idea to delete either a\n" +"Pool or a Volume since they may contain data.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1134 +msgid "Choose catalog item to delete" +msgstr "" + +#: src/dird/ua_cmds.c:1202 +msgid "Enter JobId to delete: " +msgstr "" + +#: src/dird/ua_cmds.c:1245 +#, c-format +msgid "Job %s and associated records deleted from the catalog.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1258 +#, c-format +msgid "" +"\n" +"This command will delete volume %s\n" +"and all Jobs saved on that volume from the Catalog\n" +msgstr "" + +#: src/dird/ua_cmds.c:1262 +msgid "Are you sure you want to delete this Volume? (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:1283 +msgid "Are you sure you want to delete this Pool? (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:1380 +#, c-format +msgid "Using Catalog name=%s DB=%s\n" +msgstr "" + +#: src/dird/ua_cmds.c:1421 src/dird/ua_tree.c:622 src/stored/btape.c:2587 +#, c-format +msgid "" +" Command Description\n" +" ======= ===========\n" +msgstr "" + +#: src/dird/ua_cmds.c:1423 +#, c-format +msgid " %-10s %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:1425 +msgid "" +"\n" +"When at a prompt, entering a period cancels the command.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1441 +#, c-format +msgid "%s Version: %s (%s)\n" +msgstr "" + +#: src/dird/ua_cmds.c:1460 +msgid "Could not find a Catalog resource\n" +msgstr "" + +#: src/dird/ua_cmds.c:1463 +msgid "You must specify a \"use \" command before continuing.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1467 +#, c-format +msgid "Using default Catalog name=%s DB=%s\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:131 +msgid "The Director will segment fault.\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:231 src/dird/ua_restore.c:740 +#: src/dird/ua_restore.c:778 src/dird/ua_restore.c:809 +#, c-format +msgid "Query failed: %s. ERR=%s\n" +msgstr "" + +#: src/dird/ua_input.c:82 +msgid "Enter slot" +msgstr "" + +#: src/dird/ua_input.c:86 src/dird/ua_input.c:92 +#, c-format +msgid "Expected a positive integer, got: %s\n" +msgstr "" + +#: src/dird/ua_input.c:120 src/dird/ua_run.c:862 src/dird/ua_select.c:54 +#: src/dird/ua_update.c:223 src/dird/ua_update.c:237 src/dird/ua_update.c:247 +#: src/dird/ua_update.c:261 src/dird/ua_update.c:551 +#: src/stored/parse_bsr.c:741 src/tools/dbcheck.c:1098 +msgid "yes" +msgstr "" + +#: src/dird/ua_input.c:124 src/dird/ua_select.c:57 src/dird/ua_update.c:225 +#: src/dird/ua_update.c:237 src/dird/ua_update.c:249 src/dird/ua_update.c:261 +#: src/dird/ua_update.c:551 src/stored/parse_bsr.c:741 +msgid "no" +msgstr "" + +#: src/dird/ua_input.c:127 +msgid "Invalid response. You must answer yes or no.\n" +msgstr "" + +#: src/dird/ua_label.c:89 +msgid "Negative numbers not permitted\n" +msgstr "" + +#: src/dird/ua_label.c:95 +msgid "Range end is not integer.\n" +msgstr "" + +#: src/dird/ua_label.c:100 +msgid "Range start is not an integer.\n" +msgstr "" + +#: src/dird/ua_label.c:106 +msgid "Range end not bigger than start.\n" +msgstr "" + +#: src/dird/ua_label.c:112 +msgid "Input value is not an integer.\n" +msgstr "" + +#: src/dird/ua_label.c:118 +msgid "Values must be be greater than zero.\n" +msgstr "" + +#: src/dird/ua_label.c:122 +msgid "Slot too large.\n" +msgstr "" + +#: src/dird/ua_label.c:176 src/dird/ua_label.c:473 +msgid "No slots in changer to scan.\n" +msgstr "" + +#: src/dird/ua_label.c:188 src/dird/ua_label.c:484 +msgid "No Volumes found to label, or no barcodes.\n" +msgstr "" + +#: src/dird/ua_label.c:198 +#, c-format +msgid "Slot %d greater than max %d ignored.\n" +msgstr "" + +#: src/dird/ua_label.c:227 +#, c-format +msgid "No VolName for Slot=%d InChanger set to zero.\n" +msgstr "" + +#: src/dird/ua_label.c:242 +#, c-format +msgid "Catalog record for Volume \"%s\" updated to reference slot %d.\n" +msgstr "" + +#: src/dird/ua_label.c:246 +#, c-format +msgid "Catalog record for Volume \"%s\" is up to date.\n" +msgstr "" + +#: src/dird/ua_label.c:252 +#, c-format +msgid "Volume \"%s\" not found in catalog. Slot=%d InChanger set to zero.\n" +msgstr "" + +#: src/dird/ua_label.c:344 +#, c-format +msgid "" +"Volume \"%s\" has VolStatus %s. It must be Purged or Recycled before " +"relabeling.\n" +msgstr "" + +#: src/dird/ua_label.c:360 +msgid "Enter new Volume name: " +msgstr "" + +#: src/dird/ua_label.c:373 +#, c-format +msgid "Media record for new Volume \"%s\" already exists.\n" +msgstr "" + +#: src/dird/ua_label.c:388 +msgid "Enter slot (0 or Enter for none): " +msgstr "" + +#: src/dird/ua_label.c:413 +#, c-format +msgid "Delete of Volume \"%s\" failed. ERR=%s" +msgstr "" + +#: src/dird/ua_label.c:416 +#, c-format +msgid "Old volume \"%s\" deleted from catalog.\n" +msgstr "" + +#: src/dird/ua_label.c:427 +#, c-format +msgid "Requesting to mount %s ...\n" +msgstr "" + +#: src/dird/ua_label.c:449 +msgid "Do not forget to mount the drive!!!\n" +msgstr "" + +#: src/dird/ua_label.c:489 +msgid "" +"The following Volumes will be labeled:\n" +"Slot Volume\n" +"==============\n" +msgstr "" + +#: src/dird/ua_label.c:498 src/stored/btape.c:606 +msgid "Do you want to continue? (y/n): " +msgstr "" + +#: src/dird/ua_label.c:519 +#, c-format +msgid "Media record for Slot %d Volume \"%s\" already exists.\n" +msgstr "" + +#: src/dird/ua_label.c:525 +#, c-format +msgid "Error setting InChanger: ERR=%s" +msgstr "" + +#: src/dird/ua_label.c:548 +#, c-format +msgid "Maximum pool Volumes=%d reached.\n" +msgstr "" + +#: src/dird/ua_label.c:555 +#, c-format +msgid "Catalog record for cleaning tape \"%s\" successfully created.\n" +msgstr "" + +#: src/dird/ua_label.c:562 +#, c-format +msgid "Catalog error on cleaning tape: %s" +msgstr "" + +#: src/dird/ua_label.c:598 +#, c-format +msgid "Illegal character \"%c\" in a volume name.\n" +msgstr "" + +#: src/dird/ua_label.c:643 +#, c-format +msgid "Sending relabel command from \"%s\" to \"%s\" ...\n" +msgstr "" + +#: src/dird/ua_label.c:650 +#, c-format +msgid "Sending label command for Volume \"%s\" Slot %d ...\n" +msgstr "" + +#: src/dird/ua_label.c:682 +#, c-format +msgid "Catalog record for Volume \"%s\", Slot %d successfully created.\n" +msgstr "" + +#: src/dird/ua_label.c:695 +#, c-format +msgid "Label command failed for Volume %s.\n" +msgstr "" + +#: src/dird/ua_label.c:705 +#, c-format +msgid "Connecting to Storage daemon %s at %s:%d ...\n" +msgstr "" + +#: src/dird/ua_label.c:733 +msgid "Could not open SD socket.\n" +msgstr "" + +#: src/dird/ua_label.c:739 +#, c-format +msgid "readlabel %s Slot=%d drive=%d\n" +msgstr "" + +#: src/dird/ua_label.c:781 +#, c-format +msgid "autochanger list %s \n" +msgstr "" + +#: src/dird/ua_label.c:805 src/dird/ua_label.c:815 +#, c-format +msgid "Invalid Slot number: %s\n" +msgstr "" + +#: src/dird/ua_label.c:824 +#, c-format +msgid "Invalid Volume name: %s\n" +msgstr "" + +#: src/dird/ua_label.c:893 +#, c-format +msgid "autochanger slots %s\n" +msgstr "" + +#: src/dird/ua_label.c:903 +#, c-format +msgid "Device \"%s\" has %d slots.\n" +msgstr "" + +#: src/dird/ua_label.c:925 +#, c-format +msgid "autochanger drives %s\n" +msgstr "" + +#: src/dird/ua_label.c:952 +#, c-format +msgid "Pool \"%s\" resource not found!\n" +msgstr "" + +#: src/dird/ua_output.c:64 src/dird/ua_output.c:88 +msgid "ON or OFF keyword missing.\n" +msgstr "" + +#: src/dird/ua_output.c:176 +msgid "Keywords for the show command are:\n" +msgstr "" + +#: src/dird/ua_output.c:182 +#, c-format +msgid "%s resource %s not found.\n" +msgstr "" + +#: src/dird/ua_output.c:185 +#, c-format +msgid "Resource %s not found\n" +msgstr "" + +#: src/dird/ua_output.c:251 +msgid "Hey! DB is NULL\n" +msgstr "" + +#: src/dird/ua_output.c:358 +#, c-format +msgid "Jobid %d used %d Volume(s): %s\n" +msgstr "" + +#: src/dird/ua_output.c:376 +msgid "No Pool specified.\n" +msgstr "" + +#: src/dird/ua_output.c:387 src/dird/ua_select.c:458 +#, c-format +msgid "Error obtaining pool ids. ERR=%s\n" +msgstr "" + +#: src/dird/ua_output.c:397 +#, c-format +msgid "Pool: %s\n" +msgstr "" + +#: src/dird/ua_output.c:413 src/dird/ua_status.c:452 +msgid "Ignoring illegal value for days.\n" +msgstr "" + +#: src/dird/ua_output.c:422 +#, c-format +msgid "Unknown list keyword: %s\n" +msgstr "" + +#: src/dird/ua_output.c:448 +#, c-format +msgid "%s is not a job name.\n" +msgstr "" + +#: src/dird/ua_output.c:469 +#, c-format +msgid "Could not find next Volume for Job %s (%s, %s).\n" +msgstr "" + +#: src/dird/ua_output.c:473 +#, c-format +msgid "The next Volume to be used by Job \"%s\" (%s, %s) will be %s\n" +msgstr "" + +#: src/dird/ua_output.c:483 +#, c-format +msgid "Could not find next Volume for Job %s.\n" +msgstr "" + +#: src/dird/ua_output.c:673 +msgid "You have no messages.\n" +msgstr "" + +#: src/dird/ua_prune.c:173 +msgid "Choose item to prune" +msgstr "" + +#: src/dird/ua_prune.c:254 +msgid "No Files found to prune.\n" +msgstr "" + +#: src/dird/ua_prune.c:286 +#, c-format +msgid "Pruned Files from %s Jobs for client %s from catalog.\n" +msgstr "" + +#: src/dird/ua_prune.c:392 +msgid "No Jobs found to prune.\n" +msgstr "" + +#: src/dird/ua_prune.c:453 +#, c-format +msgid "Pruned %d %s for client %s from catalog.\n" +msgstr "" + +#: src/dird/ua_prune.c:454 +msgid "Jobs" +msgstr "" + +#: src/dird/ua_prune.c:501 +#, c-format +msgid "There are no Jobs associated with Volume \"%s\". Prune not needed.\n" +msgstr "" + +#: src/dird/ua_prune.c:508 src/dird/ua_purge.c:482 +#, c-format +msgid "There are no Jobs associated with Volume \"%s\". Marking it purged.\n" +msgstr "" + +#: src/dird/ua_prune.c:566 +#, c-format +msgid "Pruned %d %s on Volume \"%s\" from catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:177 +msgid "" +"\n" +"This command is can be DANGEROUS!!!\n" +"\n" +"It purges (deletes) all Files from a Job,\n" +"JobId, Client or Volume; or it purges (deletes)\n" +"all Jobs from a Client or Volume without regard\n" +"for retention periods. Normally you should use the\n" +"PRUNE command, which respects retention periods.\n" +msgstr "" + +#: src/dird/ua_purge.c:237 +msgid "Choose item to purge" +msgstr "" + +#: src/dird/ua_purge.c:282 +#, c-format +msgid "Begin purging files for Client \"%s\"\n" +msgstr "" + +#: src/dird/ua_purge.c:294 +#, c-format +msgid "No Files found for client %s to purge from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:325 +#, c-format +msgid "%d Files for client \"%s\" purged from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:362 +#, c-format +msgid "Begin purging jobs from Client \"%s\"\n" +msgstr "" + +#: src/dird/ua_purge.c:373 +#, c-format +msgid "No Jobs found for client %s to purge from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:413 +#, c-format +msgid "%d Jobs for client %s purged from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:464 +#, c-format +msgid "" +"Volume \"%s\" has VolStatus \"%s\" and cannot be purged.\n" +"The VolStatus must be: Append, Full, Used, or Error to be purged.\n" +msgstr "" + +#: src/dird/ua_purge.c:535 +#, c-format +msgid "%d File%s on Volume \"%s\" purged from catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:549 +#, c-format +msgid "" +"There are no more Jobs associated with Volume \"%s\". Marking it purged.\n" +msgstr "" + +#: src/dird/ua_purge.c:581 +#, c-format +msgid "Cannot purge Volume with VolStatus=%s\n" +msgstr "" + +#: src/dird/ua_query.c:59 src/findlib/create_file.c:282 +#: src/findlib/create_file.c:339 +#, c-format +msgid "Could not open %s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_query.c:64 +msgid "Available queries:\n" +msgstr "" + +#: src/dird/ua_query.c:71 +msgid "Choose a query" +msgstr "" + +#: src/dird/ua_query.c:85 +msgid "Could not find query.\n" +msgstr "" + +#: src/dird/ua_query.c:103 +msgid "Too many prompts in query, max is 9.\n" +msgstr "" + +#: src/dird/ua_query.c:206 +#, c-format +msgid "Warning prompt %d missing.\n" +msgstr "" + +#: src/dird/ua_query.c:252 +msgid "" +"Entering SQL query mode.\n" +"Terminate each query with a semicolon.\n" +"Terminate query mode with a blank line.\n" +msgstr "" + +#: src/dird/ua_query.c:255 src/dird/ua_query.c:272 +msgid "Enter SQL query: " +msgstr "" + +#: src/dird/ua_query.c:274 +msgid "Add to SQL query: " +msgstr "" + +#: src/dird/ua_query.c:278 +msgid "End query mode.\n" +msgstr "" + +#: src/dird/ua_restore.c:115 +msgid "" +"No Restore Job Resource found in bacula-dir.conf.\n" +"You must create at least one before running this command.\n" +msgstr "" + +#: src/dird/ua_restore.c:131 +msgid "Restore not done.\n" +msgstr "" + +#: src/dird/ua_restore.c:142 +msgid "Unable to construct a valid BSR. Cannot continue.\n" +msgstr "" + +#: src/dird/ua_restore.c:146 src/dird/ua_restore.c:160 +msgid "No files selected to be restored.\n" +msgstr "" + +#: src/dird/ua_restore.c:154 +msgid "" +"\n" +"1 file selected to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:157 +#, c-format +msgid "" +"\n" +"%u files selected to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:175 +msgid "No Restore Job resource found!\n" +msgstr "" + +#: src/dird/ua_restore.c:237 +#, c-format +msgid "Missing value for keyword: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:286 +msgid "List last 20 Jobs run" +msgstr "" + +#: src/dird/ua_restore.c:287 +msgid "List Jobs where a given File is saved" +msgstr "" + +#: src/dird/ua_restore.c:288 +msgid "Enter list of comma separated JobIds to select" +msgstr "" + +#: src/dird/ua_restore.c:289 +msgid "Enter SQL list command" +msgstr "" + +#: src/dird/ua_restore.c:290 +msgid "Select the most recent backup for a client" +msgstr "" + +#: src/dird/ua_restore.c:291 +msgid "Select backup for a client before a specified time" +msgstr "" + +#: src/dird/ua_restore.c:292 +msgid "Enter a list of files to restore" +msgstr "" + +#: src/dird/ua_restore.c:293 +msgid "Enter a list of files to restore before a specified time" +msgstr "" + +#: src/dird/ua_restore.c:294 +msgid "Find the JobIds of the most recent backup for a client" +msgstr "" + +#: src/dird/ua_restore.c:295 +msgid "Find the JobIds for a backup for a client before a specified time" +msgstr "" + +#: src/dird/ua_restore.c:296 +msgid "Enter a list of directories to restore for found JobIds" +msgstr "" + +#: src/dird/ua_restore.c:297 src/dird/ua_status.c:713 src/filed/status.c:251 +#: src/stored/status.c:412 src/wx-console/wxbconfigpanel.cpp:191 +msgid "Cancel" +msgstr "" + +#: src/dird/ua_restore.c:333 +#, c-format +msgid "Unknown keyword: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:357 +#, c-format +msgid "Improper date format: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:397 src/dird/ua_select.c:590 +#, c-format +msgid "Error: Pool resource \"%s\" does not exist.\n" +msgstr "" + +#: src/dird/ua_restore.c:402 +#, c-format +msgid "Error: Pool resource \"%s\" access not allowed.\n" +msgstr "" + +#: src/dird/ua_restore.c:421 +msgid "" +"\n" +"First you select one or more JobIds that contain files\n" +"to be restored. You will be presented several methods\n" +"of specifying the JobIds. Then you will be allowed to\n" +"select which files from those JobIds are to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:433 +msgid "To select the JobIds, you have the following choices:\n" +msgstr "" + +#: src/dird/ua_restore.c:438 +msgid "Select item: " +msgstr "" + +#: src/dird/ua_restore.c:452 +msgid "Enter Filename (no path):" +msgstr "" + +#: src/dird/ua_restore.c:467 src/dird/ua_restore.c:567 +msgid "Enter JobId(s), comma separated, to restore: " +msgstr "" + +#: src/dird/ua_restore.c:473 +msgid "Enter SQL list command: " +msgstr "" + +#: src/dird/ua_restore.c:501 src/dird/ua_restore.c:526 +msgid "" +"Enter file names with paths, or < to enter a filename\n" +"containg a list of file names with paths, and terminate\n" +"them with a blank line.\n" +msgstr "" + +#: src/dird/ua_restore.c:505 src/dird/ua_restore.c:530 +msgid "Enter full filename: " +msgstr "" + +#: src/dird/ua_restore.c:565 +#, c-format +msgid "You have already seleted the following JobIds: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:580 +msgid "" +"Enter full directory names or start the name\n" +"with a < to indicate it is a filename containg a list\n" +"of directories and terminate them with a blank line.\n" +msgstr "" + +#: src/dird/ua_restore.c:584 +msgid "Enter directory name: " +msgstr "" + +#: src/dird/ua_restore.c:609 +msgid "No Jobs selected.\n" +msgstr "" + +#: src/dird/ua_restore.c:613 +#, c-format +msgid "You have selected the following JobIds: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:616 +#, c-format +msgid "You have selected the following JobId: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:624 +msgid "Invalid JobId in list.\n" +msgstr "" + +#: src/dird/ua_restore.c:637 +#, c-format +msgid "Unable to get Job record for JobId=%s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_restore.c:642 +#, c-format +msgid "No authorization. Job \"%s\" not selected.\n" +msgstr "" + +#: src/dird/ua_restore.c:656 +msgid "" +"The restored files will the most current backup\n" +"BEFORE the date you specify below.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:659 +msgid "Enter date as YYYY-MM-DD HH:MM:SS :" +msgstr "" + +#: src/dird/ua_restore.c:665 +msgid "Improper date format.\n" +msgstr "" + +#: src/dird/ua_restore.c:686 +#, c-format +msgid "Cannot open file %s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_restore.c:694 src/dird/ua_restore.c:698 +#, c-format +msgid "Error occurred on line %d of %s\n" +msgstr "" + +#: src/dird/ua_restore.c:744 src/dird/ua_restore.c:782 +#, c-format +msgid "No database record found for: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:769 +msgid "No JobId specified cannot continue.\n" +msgstr "" + +#: src/dird/ua_restore.c:813 +#, c-format +msgid "No table found: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:918 +#, c-format +msgid "" +"\n" +"Building directory tree for JobId %s ... " +msgstr "" + +#: src/dird/ua_restore.c:937 +msgid "" +"\n" +"There were no files inserted into the tree, so file selection\n" +"is not possible.Most likely your retention policy pruned the files\n" +msgstr "" + +#: src/dird/ua_restore.c:939 +msgid "" +"\n" +"Do you want to restore all the files? (yes|no): " +msgstr "" + +#: src/dird/ua_restore.c:955 +#, c-format +msgid "" +"\n" +"1 Job, %s files inserted into the tree and marked for extraction.\n" +msgstr "" + +#: src/dird/ua_restore.c:959 +#, c-format +msgid "" +"\n" +"1 Job, %s files inserted into the tree.\n" +msgstr "" + +#: src/dird/ua_restore.c:965 +#, c-format +msgid "" +"\n" +"%d Jobs, %s files inserted into the tree and marked for extraction.\n" +msgstr "" + +#: src/dird/ua_restore.c:969 +#, c-format +msgid "" +"\n" +"%d Jobs, %s files inserted into the tree.\n" +msgstr "" + +#: src/dird/ua_restore.c:1046 +#, c-format +msgid "Error getting FileSet \"%s\": ERR=%s\n" +msgstr "" + +#: src/dird/ua_restore.c:1054 src/dird/ua_select.c:167 +msgid "The defined FileSet resources are:\n" +msgstr "" + +#: src/dird/ua_restore.c:1058 src/dird/ua_run.c:644 src/dird/ua_select.c:175 +msgid "FileSet" +msgstr "" + +#: src/dird/ua_restore.c:1058 src/dird/ua_select.c:175 +msgid "Select FileSet resource" +msgstr "" + +#: src/dird/ua_restore.c:1065 +#, c-format +msgid "Error getting FileSet record: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:1066 +msgid "" +"This probably means you modified the FileSet.\n" +"Continuing anyway.\n" +msgstr "" + +#: src/dird/ua_restore.c:1081 +#, c-format +msgid "Pool \"%s\" not found, using any pool.\n" +msgstr "" + +#: src/dird/ua_restore.c:1107 src/dird/ua_restore.c:1123 +#, c-format +msgid "No Full backup before %s found.\n" +msgstr "" + +#: src/dird/ua_restore.c:1146 +msgid "No jobs found.\n" +msgstr "" + +#: src/dird/ua_restore.c:1312 +msgid "" +"Warning, the JobIds that you selected refer to more than one MediaType.\n" +"Restore is not possible. The MediaTypes used are:\n" +msgstr "" + +#: src/dird/ua_restore.c:1320 +msgid "No MediaType found for your JobIds.\n" +msgstr "" + +#: src/dird/ua_restore.c:1352 +#, c-format +msgid "Warning default storage overridden by %s on command line.\n" +msgstr "" + +#: src/dird/ua_restore.c:1363 +#, c-format +msgid "" +"\n" +"Warning. Unable to find Storage resource for\n" +"MediaType \"%s\", needed by the Jobs you selected.\n" +"You will be allowed to select a Storage device later.\n" +msgstr "" + +#: src/dird/ua_run.c:112 +#, c-format +msgid "Value missing for keyword %s\n" +msgstr "" + +#: src/dird/ua_run.c:119 +msgid "Job name specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:127 +msgid "JobId specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:136 +msgid "Client specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:144 +msgid "FileSet specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:152 +msgid "Level specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:161 +msgid "Storage specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:169 +msgid "Pool specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:177 +msgid "Where specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:185 +msgid "Bootstrap specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:193 +msgid "Replace specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:201 +msgid "When specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:209 +msgid "Priority specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:214 +msgid "Priority must be positive nonzero setting it to 10.\n" +msgstr "" + +#: src/dird/ua_run.c:224 +msgid "Verify Job specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:268 +#, c-format +msgid "Invalid keyword: %s\n" +msgstr "" + +#: src/dird/ua_run.c:280 +#, c-format +msgid "Catalog \"%s\" not found\n" +msgstr "" + +#: src/dird/ua_run.c:291 +#, c-format +msgid "Job \"%s\" not found\n" +msgstr "" + +#: src/dird/ua_run.c:298 +msgid "A job name must be specified.\n" +msgstr "" + +#: src/dird/ua_run.c:304 +#, c-format +msgid "No authorization. Job \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:313 +#, c-format +msgid "Storage \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:323 +#, c-format +msgid "No authorization. Storage \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:333 +#, c-format +msgid "Pool \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:343 +#, c-format +msgid "No authorization. Pool \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:363 +#, c-format +msgid "No authorization. Client \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:372 +#, c-format +msgid "FileSet \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:381 +#, c-format +msgid "No authorization. FileSet \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:389 +#, c-format +msgid "Verify Job \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:422 src/dird/ua_run.c:764 +msgid "Invalid time, using current time.\n" +msgstr "" + +#: src/dird/ua_run.c:442 +#, c-format +msgid "Invalid replace option: %s\n" +msgstr "" + +#: src/dird/ua_run.c:500 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"FileSet: %s\n" +"Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:507 src/lib/util.c:295 +msgid "Admin" +msgstr "" + +#: src/dird/ua_run.c:519 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"FileSet: %s\n" +"Level: %s\n" +"Client: %s\n" +"Storage: %s\n" +"Pool: %s\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:528 src/lib/util.c:286 +msgid "Backup" +msgstr "" + +#: src/dird/ua_run.c:544 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"FileSet: %s\n" +"Level: %s\n" +"Client: %s\n" +"Storage: %s\n" +"Pool: %s\n" +"Verify Job: %s\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:554 src/lib/util.c:289 +msgid "Verify" +msgstr "" + +#: src/dird/ua_run.c:571 +msgid "Please enter a JobId for restore: " +msgstr "" + +#: src/dird/ua_run.c:580 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +"Where: %s\n" +"Replace: %s\n" +"FileSet: %s\n" +"Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:602 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +"Where: %s\n" +"Replace: %s\n" +"Client: %s\n" +"Storage: %s\n" +"JobId: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:626 +#, c-format +msgid "Unknown Job Type=%d\n" +msgstr "" + +#: src/dird/ua_run.c:631 +msgid "OK to run? (yes/mod/no): " +msgstr "" + +#: src/dird/ua_run.c:637 src/dird/ua_select.c:44 +msgid "mod" +msgstr "" + +#: src/dird/ua_run.c:640 src/dird/ua_update.c:464 +msgid "Parameters to modify:\n" +msgstr "" + +#: src/dird/ua_run.c:641 +msgid "Level" +msgstr "" + +#: src/dird/ua_run.c:642 src/dird/ua_select.c:152 +#: src/wx-console/wxbrestorepanel.cpp:321 +#: src/wx-console/wxbrestorepanel.cpp:337 +#: src/wx-console/wxbrestorepanel.cpp:458 +#: src/wx-console/wxbrestorepanel.cpp:459 +#: src/wx-console/wxbrestorepanel.cpp:469 +#: src/wx-console/wxbrestorepanel.cpp:470 +#: src/wx-console/wxbrestorepanel.cpp:1133 +#: src/wx-console/wxbrestorepanel.cpp:1794 +#: src/wx-console/wxbrestorepanel.cpp:1865 +msgid "Storage" +msgstr "" + +#: src/dird/ua_run.c:645 src/dird/ua_select.c:282 src/dird/ua_select.c:391 +#: src/wx-console/wxbrestorepanel.cpp:318 +#: src/wx-console/wxbrestorepanel.cpp:336 +#: src/wx-console/wxbrestorepanel.cpp:410 +#: src/wx-console/wxbrestorepanel.cpp:411 +#: src/wx-console/wxbrestorepanel.cpp:421 +#: src/wx-console/wxbrestorepanel.cpp:422 +#: src/wx-console/wxbrestorepanel.cpp:669 +#: src/wx-console/wxbrestorepanel.cpp:1103 +#: src/wx-console/wxbrestorepanel.cpp:1190 +#: src/wx-console/wxbrestorepanel.cpp:1787 +#: src/wx-console/wxbrestorepanel.cpp:1789 +#: src/wx-console/wxbrestorepanel.cpp:1863 +#: src/wx-console/wxbrestorepanel.cpp:1918 +msgid "Client" +msgstr "" + +#: src/dird/ua_run.c:646 src/wx-console/wxbrestorepanel.cpp:338 +#: src/wx-console/wxbrestorepanel.cpp:823 +#: src/wx-console/wxbrestorepanel.cpp:1867 +msgid "When" +msgstr "" + +#: src/dird/ua_run.c:647 src/wx-console/wxbrestorepanel.cpp:339 +#: src/wx-console/wxbrestorepanel.cpp:1096 +#: src/wx-console/wxbrestorepanel.cpp:1869 +msgid "Priority" +msgstr "" + +#: src/dird/ua_run.c:650 src/dird/ua_select.c:476 src/dird/ua_select.c:566 +#: src/dird/ua_update.c:400 src/dird/ua_update.c:475 +#: src/wx-console/wxbrestorepanel.cpp:320 +#: src/wx-console/wxbrestorepanel.cpp:506 +#: src/wx-console/wxbrestorepanel.cpp:516 +#: src/wx-console/wxbrestorepanel.cpp:1783 +msgid "Pool" +msgstr "" + +#: src/dird/ua_run.c:652 +msgid "Verify Job" +msgstr "" + +#: src/dird/ua_run.c:655 src/wx-console/wxbrestorepanel.cpp:331 +#: src/wx-console/wxbrestorepanel.cpp:1848 +msgid "Bootstrap" +msgstr "" + +#: src/dird/ua_run.c:656 src/wx-console/wxbrestorepanel.cpp:332 +#: src/wx-console/wxbrestorepanel.cpp:1072 +#: src/wx-console/wxbrestorepanel.cpp:1850 +msgid "Where" +msgstr "" + +#: src/dird/ua_run.c:657 src/wx-console/wxbrestorepanel.cpp:334 +#: src/wx-console/wxbrestorepanel.cpp:1080 +#: src/wx-console/wxbrestorepanel.cpp:1854 +#: src/wx-console/wxbrestorepanel.cpp:1855 +#: src/wx-console/wxbrestorepanel.cpp:1856 +#: src/wx-console/wxbrestorepanel.cpp:1857 +#: src/wx-console/wxbrestorepanel.cpp:1858 +msgid "Replace" +msgstr "" + +#: src/dird/ua_run.c:658 +msgid "JobId" +msgstr "" + +#: src/dird/ua_run.c:660 src/dird/ua_update.c:479 +msgid "Select parameter to modify" +msgstr "" + +#: src/dird/ua_run.c:664 src/dird/ua_run.c:691 +msgid "Levels:\n" +msgstr "" + +#: src/dird/ua_run.c:665 src/filed/status.c:368 src/lib/util.c:319 +#: src/stored/status.c:451 +msgid "Base" +msgstr "" + +#: src/dird/ua_run.c:666 src/filed/status.c:370 src/lib/util.c:321 +#: src/stored/status.c:453 +msgid "Full" +msgstr "" + +#: src/dird/ua_run.c:667 src/filed/status.c:373 src/lib/util.c:324 +#: src/stored/status.c:456 +msgid "Incremental" +msgstr "" + +#: src/dird/ua_run.c:668 src/filed/status.c:376 src/lib/util.c:327 +#: src/stored/status.c:459 +msgid "Differential" +msgstr "" + +#: src/dird/ua_run.c:669 src/filed/status.c:379 src/lib/util.c:330 +#: src/stored/status.c:462 +msgid "Since" +msgstr "" + +#: src/dird/ua_run.c:670 src/dird/ua_run.c:697 +msgid "Select level" +msgstr "" + +#: src/dird/ua_run.c:692 +msgid "Initialize Catalog" +msgstr "" + +#: src/dird/ua_run.c:693 src/filed/status.c:382 src/lib/util.c:333 +#: src/stored/status.c:465 +msgid "Verify Catalog" +msgstr "" + +#: src/dird/ua_run.c:694 src/lib/util.c:339 +msgid "Verify Volume to Catalog" +msgstr "" + +#: src/dird/ua_run.c:695 src/lib/util.c:342 +msgid "Verify Disk to Catalog" +msgstr "" + +#: src/dird/ua_run.c:696 +msgid "Verify Volume Data (not yet implemented)" +msgstr "" + +#: src/dird/ua_run.c:718 +msgid "Level not appropriate for this Job. Cannot be changed.\n" +msgstr "" + +#: src/dird/ua_run.c:756 +msgid "" +"Please enter desired start time as YYYY-MM-DD HH:MM:SS (return for now): " +msgstr "" + +#: src/dird/ua_run.c:771 +msgid "Enter new Priority: " +msgstr "" + +#: src/dird/ua_run.c:775 +msgid "Priority must be a positive integer.\n" +msgstr "" + +#: src/dird/ua_run.c:793 +msgid "Please enter the Bootstrap file name: " +msgstr "" + +#: src/dird/ua_run.c:804 +#, c-format +msgid "Warning cannot open %s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_run.c:823 +msgid "Please enter path prefix for restore (/ for none): " +msgstr "" + +#: src/dird/ua_run.c:837 +msgid "Replace:\n" +msgstr "" + +#: src/dird/ua_run.c:841 +msgid "Select replace option" +msgstr "" + +#: src/dird/ua_run.c:851 +msgid "" +"You must set the bootstrap file to NULL to be able to specify a JobId.\n" +msgstr "" + +#: src/dird/ua_run.c:869 +msgid "Job failed.\n" +msgstr "" + +#: src/dird/ua_run.c:872 +#, c-format +msgid "Job started. JobId=%s\n" +msgstr "" + +#: src/dird/ua_run.c:878 +msgid "Job not run.\n" +msgstr "" + +#: src/dird/ua_select.c:39 +#, c-format +msgid "The current %s retention period is: %s\n" +msgstr "" + +#: src/dird/ua_select.c:41 +msgid "Continue? (yes/mod/no): " +msgstr "" + +#: src/dird/ua_select.c:45 +msgid "Enter new retention period: " +msgstr "" + +#: src/dird/ua_select.c:49 +msgid "Invalid period.\n" +msgstr "" + +#: src/dird/ua_select.c:128 +msgid "You have the following choices:\n" +msgstr "" + +#: src/dird/ua_select.c:144 +msgid "The defined Storage resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:152 +msgid "Select Storage resource" +msgstr "" + +#: src/dird/ua_select.c:193 +msgid "catalog" +msgstr "" + +#: src/dird/ua_select.c:201 +msgid "The defined Catalog resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:209 +msgid "Catalog" +msgstr "" + +#: src/dird/ua_select.c:209 +msgid "Select Catalog resource" +msgstr "" + +#: src/dird/ua_select.c:226 +msgid "The defined Job resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:234 +msgid "Select Job resource" +msgstr "" + +#: src/dird/ua_select.c:249 +msgid "The defined Restore Job resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:257 +msgid "Select Restore Job" +msgstr "" + +#: src/dird/ua_select.c:274 +msgid "The defined Client resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:282 +msgid "Select Client (File daemon) resource" +msgstr "" + +#: src/dird/ua_select.c:309 +#, c-format +msgid "Error: Client resource %s does not exist.\n" +msgstr "" + +#: src/dird/ua_select.c:334 +#, c-format +msgid "Could not find Client %s: ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:337 src/lib/bnet_server.c:284 +#: src/lib/bnet_server.c:378 +msgid "client" +msgstr "" + +#: src/dird/ua_select.c:338 +msgid "fd" +msgstr "" + +#: src/dird/ua_select.c:344 src/dird/ua_select.c:398 +#, c-format +msgid "Could not find Client \"%s\": ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:373 +#, c-format +msgid "Error obtaining client ids. ERR=%s\n" +msgstr "" + +#: src/dird/ua_select.c:377 +msgid "No clients defined. You must run a job before using this command.\n" +msgstr "" + +#: src/dird/ua_select.c:381 +msgid "Defined Clients:\n" +msgstr "" + +#: src/dird/ua_select.c:391 +msgid "Select the Client" +msgstr "" + +#: src/dird/ua_select.c:424 src/dird/ua_select.c:447 src/dird/ua_select.c:483 +#, c-format +msgid "Could not find Pool \"%s\": ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:462 +msgid "No pools defined. Use the \"create\" command to create one.\n" +msgstr "" + +#: src/dird/ua_select.c:466 +msgid "Defined Pools:\n" +msgstr "" + +#: src/dird/ua_select.c:476 +msgid "Select the Pool" +msgstr "" + +#: src/dird/ua_select.c:506 +#, c-format +msgid "No access to Pool \"%s\"\n" +msgstr "" + +#: src/dird/ua_select.c:532 +msgid "Enter MediaId or Volume name: " +msgstr "" + +#: src/dird/ua_select.c:558 +msgid "The defined Pool resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:566 +msgid "Select Pool resource" +msgstr "" + +#: src/dird/ua_select.c:601 +msgid "Enter the JobId to select: " +msgstr "" + +#: src/dird/ua_select.c:638 +#, c-format +msgid "Could not find Job \"%s\": ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:705 +#, c-format +msgid "Automatically selected %s: %s\n" +msgstr "" + +#: src/dird/ua_select.c:710 +#, c-format +msgid "Cannot select %s in batch mode.\n" +msgstr "" + +#: src/dird/ua_select.c:722 +msgid "Selection is empty!\n" +msgstr "" + +#: src/dird/ua_select.c:728 +msgid "Item 1 selected automatically.\n" +msgstr "" + +#: src/dird/ua_select.c:739 +msgid "Selection aborted, nothing done.\n" +msgstr "" + +#: src/dird/ua_select.c:744 +#, c-format +msgid "Please enter a number between 1 and %d\n" +msgstr "" + +#: src/dird/ua_select.c:793 +msgid "Storage name given twice.\n" +msgstr "" + +#: src/dird/ua_select.c:810 +#, c-format +msgid "Expecting jobid=nn command, got: %s\n" +msgstr "" + +#: src/dird/ua_select.c:814 +#, c-format +msgid "JobId %s is not running.\n" +msgstr "" + +#: src/dird/ua_select.c:823 +#, c-format +msgid "Expecting job=xxx, got: %s.\n" +msgstr "" + +#: src/dird/ua_select.c:827 +#, c-format +msgid "Job \"%s\" is not running.\n" +msgstr "" + +#: src/dird/ua_select.c:843 +#, c-format +msgid "Storage resource \"%s\": not found\n" +msgstr "" + +#: src/dird/ua_select.c:875 +msgid "Enter autochanger drive[0]: " +msgstr "" + +#: src/dird/ua_select.c:905 +msgid "Media Types defined in conf file:\n" +msgstr "" + +#: src/dird/ua_select.c:911 +msgid "Media Type" +msgstr "" + +#: src/dird/ua_select.c:911 +msgid "Select the Media Type" +msgstr "" + +#: src/dird/ua_server.c:61 +#, c-format +msgid "Cannot create UA thread: %s\n" +msgstr "" + +#: src/dird/ua_server.c:142 +msgid "You have messages.\n" +msgstr "" + +#: src/dird/ua_status.c:126 +msgid "Status available for:\n" +msgstr "" + +#: src/dird/ua_status.c:132 +msgid "Select daemon type for status" +msgstr "" + +#: src/dird/ua_status.c:245 +#, c-format +msgid "%s Version: %s (%s) %s %s %s\n" +msgstr "" + +#: src/dird/ua_status.c:249 src/stored/status.c:71 +#, c-format +msgid "Daemon started %s, 1 Job run since started.\n" +msgstr "" + +#: src/dird/ua_status.c:252 src/stored/status.c:74 +#, c-format +msgid "Daemon started %s, %d Jobs run since started.\n" +msgstr "" + +#: src/dird/ua_status.c:257 src/filed/status.c:121 src/stored/status.c:78 +#, c-format +msgid " Heap: bytes=%s max_bytes=%s bufs=%s max_bufs=%s\n" +msgstr "" + +#: src/dird/ua_status.c:277 src/dird/ua_status.c:493 src/dird/ua_status.c:655 +#: src/filed/status.c:198 src/filed/status.c:277 src/stored/status.c:345 +#: src/stored/status.c:361 src/stored/status.c:438 +msgid "====\n" +msgstr "" + +#: src/dird/ua_status.c:289 +#, c-format +msgid "" +"\n" +"Failed to connect to Storage daemon %s.\n" +"====\n" +msgstr "" + +#: src/dird/ua_status.c:327 +#, c-format +msgid "" +"Failed to connect to Client %s.\n" +"====\n" +msgstr "" + +#: src/dird/ua_status.c:335 +msgid "Connected to file daemon\n" +msgstr "" + +#: src/dird/ua_status.c:350 +msgid "" +"\n" +"Scheduled Jobs:\n" +msgstr "" + +#: src/dird/ua_status.c:351 +msgid "" +"Level Type Pri Scheduled Name Volume\n" +msgstr "" + +#: src/dird/ua_status.c:352 +msgid "===================================================================================\n" +msgstr "" + +#: src/dird/ua_status.c:399 +#, c-format +msgid "%-14s %-8s %3d %-18s %-18s %s\n" +msgstr "" + +#: src/dird/ua_status.c:491 +msgid "No Scheduled Jobs.\n" +msgstr "" + +#: src/dird/ua_status.c:508 src/stored/status.c:277 +msgid "" +"\n" +"Running Jobs:\n" +msgstr "" + +#: src/dird/ua_status.c:516 +#, c-format +msgid "Console connected at %s\n" +msgstr "" + +#: src/dird/ua_status.c:526 +msgid "" +"No Jobs running.\n" +"====\n" +msgstr "" + +#: src/dird/ua_status.c:531 +msgid " JobId Level Name Status\n" +msgstr "" + +#: src/dird/ua_status.c:532 src/filed/status.c:221 src/stored/status.c:382 +msgid "======================================================================\n" +msgstr "" + +#: src/dird/ua_status.c:540 +msgid "is waiting execution" +msgstr "" + +#: src/dird/ua_status.c:543 +msgid "is running" +msgstr "" + +#: src/dird/ua_status.c:546 +msgid "is blocked" +msgstr "" + +#: src/dird/ua_status.c:549 +msgid "has terminated" +msgstr "" + +#: src/dird/ua_status.c:552 +msgid "has erred" +msgstr "" + +#: src/dird/ua_status.c:555 +msgid "has errors" +msgstr "" + +#: src/dird/ua_status.c:558 +msgid "has a fatal error" +msgstr "" + +#: src/dird/ua_status.c:561 +msgid "has verify differences" +msgstr "" + +#: src/dird/ua_status.c:564 +msgid "has been canceled" +msgstr "" + +#: src/dird/ua_status.c:568 +#, c-format +msgid "is waiting on Client %s" +msgstr "" + +#: src/dird/ua_status.c:574 +#, c-format +msgid "is waiting on Storage %s" +msgstr "" + +#: src/dird/ua_status.c:579 +msgid "is waiting on max Storage jobs" +msgstr "" + +#: src/dird/ua_status.c:582 +msgid "is waiting on max Client jobs" +msgstr "" + +#: src/dird/ua_status.c:585 +msgid "is waiting on max Job jobs" +msgstr "" + +#: src/dird/ua_status.c:588 +msgid "is waiting on max total jobs" +msgstr "" + +#: src/dird/ua_status.c:591 +msgid "is waiting for its start time" +msgstr "" + +#: src/dird/ua_status.c:594 +msgid "is waiting for higher priority jobs to finish" +msgstr "" + +#: src/dird/ua_status.c:599 +#, c-format +msgid "is in unknown state %c" +msgstr "" + +#: src/dird/ua_status.c:613 +msgid "is waiting for a mount request" +msgstr "" + +#: src/dird/ua_status.c:620 +msgid "is waiting for an appendable Volume" +msgstr "" + +#: src/dird/ua_status.c:627 +#, c-format +msgid "is waiting for Client %s to connect to Storage %s" +msgstr "" + +#: src/dird/ua_status.c:643 +#, c-format +msgid "%6d %-6s %-20s %s\n" +msgstr "" + +#: src/dird/ua_status.c:665 src/filed/status.c:211 src/stored/status.c:373 +msgid "No Terminated Jobs.\n" +msgstr "" + +#: src/dird/ua_status.c:670 src/stored/status.c:378 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" + +#: src/dird/ua_status.c:671 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/dird/ua_status.c:672 +msgid "========================================================================\n" +msgstr "" + +#: src/dird/ua_status.c:703 src/filed/status.c:241 src/lib/util.c:178 +#: src/stored/status.c:402 +msgid "Created" +msgstr "" + +#: src/dird/ua_status.c:707 src/filed/status.c:245 src/lib/util.c:191 +#: src/lib/util.c:258 src/stored/status.c:406 +msgid "Error" +msgstr "" + +#: src/dird/ua_status.c:710 src/filed/status.c:248 src/stored/status.c:409 +msgid "Diffs" +msgstr "" + +#: src/dird/ua_status.c:716 src/filed/status.c:254 src/lib/util.c:187 +#: src/lib/util.c:254 src/stored/btape.c:1158 src/stored/status.c:415 +#: src/wx-console/wxbconfigpanel.cpp:180 +msgid "OK" +msgstr "" + +#: src/dird/ua_status.c:719 src/filed/status.c:257 src/stored/status.c:418 +msgid "Other" +msgstr "" + +#: src/dird/ua_status.c:722 src/filed/status.c:268 src/stored/status.c:429 +#, c-format +msgid "%6d %-6s %8s %14s %-7s %-8s %s\n" +msgstr "" + +#: src/dird/ua_status.c:730 src/stored/btape.c:180 +msgid "\n" +msgstr "" + +#: src/dird/ua_tree.c:58 +msgid "change current directory" +msgstr "" + +#: src/dird/ua_tree.c:59 +msgid "count marked files in and below the cd" +msgstr "" + +#: src/dird/ua_tree.c:60 src/dird/ua_tree.c:61 +msgid "long list current directory, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:62 +msgid "leave file selection mode" +msgstr "" + +#: src/dird/ua_tree.c:63 +msgid "estimate restore size" +msgstr "" + +#: src/dird/ua_tree.c:64 +msgid "same as done command" +msgstr "" + +#: src/dird/ua_tree.c:65 +msgid "find files, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:66 src/dird/ua_tree.c:75 +msgid "print help" +msgstr "" + +#: src/dird/ua_tree.c:67 +msgid "list current directory, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:68 +msgid "list the marked files in and below the cd" +msgstr "" + +#: src/dird/ua_tree.c:69 +msgid "mark dir/file to be restored recursively, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:70 +msgid "mark directory name to be restored (no files)" +msgstr "" + +#: src/dird/ua_tree.c:71 +msgid "print current working directory" +msgstr "" + +#: src/dird/ua_tree.c:72 +msgid "unmark dir/file to be restored recursively in dir" +msgstr "" + +#: src/dird/ua_tree.c:73 +msgid "unmark directory name only no recursion" +msgstr "" + +#: src/dird/ua_tree.c:74 +msgid "quit and do not do restore" +msgstr "" + +#: src/dird/ua_tree.c:94 +msgid "" +"\n" +"You are now entering file selection mode where you add (mark) and\n" +"remove (unmark) files to be restored. No files are initially added, unless\n" +"you used the \"all\" keyword on the command line.\n" +"Enter \"done\" to leave this mode.\n" +"\n" +msgstr "" + +#: src/dird/ua_tree.c:104 src/dird/ua_tree.c:664 src/dird/ua_tree.c:672 +#, c-format +msgid "cwd is: %s\n" +msgstr "" + +#: src/dird/ua_tree.c:125 +msgid "Illegal command. Enter \"done\" to exit.\n" +msgstr "" + +#: src/dird/ua_tree.c:298 src/dird/ua_tree.c:309 src/dird/ua_tree.c:326 +msgid "No files marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:311 +msgid "1 file marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:313 +#, c-format +msgid "%s files marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:340 +msgid "No directories marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:342 +msgid "1 directory marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:344 +#, c-format +msgid "%s directories marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:365 +#, c-format +msgid "%s total files/dirs. %s marked to be restored.\n" +msgstr "" + +#: src/dird/ua_tree.c:376 +msgid "No file specification given.\n" +msgstr "" + +#: src/dird/ua_tree.c:520 +#, c-format +msgid "Node %s has no children.\n" +msgstr "" + +#: src/dird/ua_tree.c:611 +#, c-format +msgid "%d total files; %d marked to be restored; %s bytes.\n" +msgstr "" + +#: src/dird/ua_tree.c:656 +msgid "Invalid path given.\n" +msgstr "" + +#: src/dird/ua_tree.c:683 src/dird/ua_tree.c:694 +msgid "No files unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:696 +msgid "1 file unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:698 +#, c-format +msgid "%d files unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:709 src/dird/ua_tree.c:725 +msgid "No directories unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:727 +msgid "1 directory unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:729 +#, c-format +msgid "%d directories unmarked.\n" +msgstr "" + +#: src/dird/ua_update.c:79 +msgid "Update choice:\n" +msgstr "" + +#: src/dird/ua_update.c:80 +msgid "Volume parameters" +msgstr "" + +#: src/dird/ua_update.c:81 +msgid "Pool from resource" +msgstr "" + +#: src/dird/ua_update.c:82 +msgid "Slots from autochanger" +msgstr "" + +#: src/dird/ua_update.c:83 +msgid "item" +msgstr "" + +#: src/dird/ua_update.c:83 +msgid "Choose catalog item to update" +msgstr "" + +#: src/dird/ua_update.c:122 +#, c-format +msgid "Invalid VolStatus specified: %s\n" +msgstr "" + +#: src/dird/ua_update.c:131 +#, c-format +msgid "New Volume status is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:141 +#, c-format +msgid "Invalid retention period specified: %s\n" +msgstr "" + +#: src/dird/ua_update.c:149 +#, c-format +msgid "New retention period is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:160 +#, c-format +msgid "Invalid use duration specified: %s\n" +msgstr "" + +#: src/dird/ua_update.c:168 +#, c-format +msgid "New use duration is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:182 +#, c-format +msgid "New max jobs is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:195 +#, c-format +msgid "New max files is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:206 +#, c-format +msgid "Invalid max. bytes specification: %s\n" +msgstr "" + +#: src/dird/ua_update.c:214 +#, c-format +msgid "New Max bytes is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:228 src/dird/ua_update.c:252 +msgid "Invalid value. It must be yes or no.\n" +msgstr "" + +#: src/dird/ua_update.c:236 +#, c-format +msgid "New Recycle flag is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:260 +#, c-format +msgid "New InChanger flag is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:278 +#, c-format +msgid "Invalid slot, it must be between 0 and MaxVols=%d\n" +msgstr "" + +#: src/dird/ua_update.c:287 src/dird/ua_update.c:577 +#, c-format +msgid "Error updating media record Slot: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:289 +#, c-format +msgid "New Slot is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:316 +#, c-format +msgid "New Pool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:345 +#, c-format +msgid "Error updating Volume record: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:347 +#, c-format +msgid "Volume defaults updated from \"%s\" Pool record.\n" +msgstr "" + +#: src/dird/ua_update.c:369 +#, c-format +msgid "Error updating Volume records: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:371 +msgid "All Volume defaults updated from Pool record.\n" +msgstr "" + +#: src/dird/ua_update.c:391 +msgid "VolStatus" +msgstr "" + +#: src/dird/ua_update.c:392 +msgid "VolRetention" +msgstr "" + +#: src/dird/ua_update.c:393 +msgid "VolUse" +msgstr "" + +#: src/dird/ua_update.c:394 +msgid "MaxVolJobs" +msgstr "" + +#: src/dird/ua_update.c:395 +msgid "MaxVolFiles" +msgstr "" + +#: src/dird/ua_update.c:396 +msgid "MaxVolBytes" +msgstr "" + +#: src/dird/ua_update.c:397 +msgid "Recycle" +msgstr "" + +#: src/dird/ua_update.c:398 +msgid "InChanger" +msgstr "" + +#: src/dird/ua_update.c:399 src/dird/ua_update.c:472 +msgid "Slot" +msgstr "" + +#: src/dird/ua_update.c:401 +msgid "FromPool" +msgstr "" + +#: src/dird/ua_update.c:402 +msgid "AllFromPool" +msgstr "" + +#: src/dird/ua_update.c:463 +#, c-format +msgid "Updating Volume \"%s\"\n" +msgstr "" + +#: src/dird/ua_update.c:465 +msgid "Volume Status" +msgstr "" + +#: src/dird/ua_update.c:466 +msgid "Volume Retention Period" +msgstr "" + +#: src/dird/ua_update.c:467 +msgid "Volume Use Duration" +msgstr "" + +#: src/dird/ua_update.c:468 +msgid "Maximum Volume Jobs" +msgstr "" + +#: src/dird/ua_update.c:469 +msgid "Maximum Volume Files" +msgstr "" + +#: src/dird/ua_update.c:470 +msgid "Maximum Volume Bytes" +msgstr "" + +#: src/dird/ua_update.c:471 +msgid "Recycle Flag" +msgstr "" + +#: src/dird/ua_update.c:473 +msgid "InChanger Flag" +msgstr "" + +#: src/dird/ua_update.c:474 +msgid "Volume Files" +msgstr "" + +#: src/dird/ua_update.c:476 +msgid "Volume from Pool" +msgstr "" + +#: src/dird/ua_update.c:477 +msgid "All Volumes from Pool" +msgstr "" + +#: src/dird/ua_update.c:478 +msgid "Done" +msgstr "" + +#: src/dird/ua_update.c:489 +#, c-format +msgid "Current Volume status is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:490 +msgid "Possible Values are:\n" +msgstr "" + +#: src/dird/ua_update.c:501 +msgid "Choose new Volume Status" +msgstr "" + +#: src/dird/ua_update.c:507 +#, c-format +msgid "Current retention period is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:509 +msgid "Enter Volume Retention period: " +msgstr "" + +#: src/dird/ua_update.c:516 +#, c-format +msgid "Current use duration is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:518 +msgid "Enter Volume Use Duration: " +msgstr "" + +#: src/dird/ua_update.c:525 +#, c-format +msgid "Current max jobs is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:526 +msgid "Enter new Maximum Jobs: " +msgstr "" + +#: src/dird/ua_update.c:533 +#, c-format +msgid "Current max files is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:534 +msgid "Enter new Maximum Files: " +msgstr "" + +#: src/dird/ua_update.c:541 +#, c-format +msgid "Current value is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:542 +msgid "Enter new Maximum Bytes: " +msgstr "" + +#: src/dird/ua_update.c:550 +#, c-format +msgid "Current recycle flag is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:552 +msgid "Enter new Recycle status: " +msgstr "" + +#: src/dird/ua_update.c:559 +#, c-format +msgid "Current Slot is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:560 +msgid "Enter new Slot: " +msgstr "" + +#: src/dird/ua_update.c:567 +#, c-format +msgid "Current InChanger flag is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:568 +msgid "Set InChanger flag? yes/no: " +msgstr "" + +#: src/dird/ua_update.c:579 +#, c-format +msgid "New InChanger flag is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:586 +msgid "" +"Warning changing Volume Files can result\n" +"in loss of data on your Volume\n" +"\n" +msgstr "" + +#: src/dird/ua_update.c:588 +#, c-format +msgid "Current Volume Files is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:589 +msgid "Enter new number of Files for Volume: " +msgstr "" + +#: src/dird/ua_update.c:594 +msgid "Normally, you should only increase Volume Files by one!\n" +msgstr "" + +#: src/dird/ua_update.c:595 +msgid "Continue? (yes/no): " +msgstr "" + +#: src/dird/ua_update.c:605 +#, c-format +msgid "New Volume Files is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:617 +#, c-format +msgid "Current Pool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:618 +msgid "Enter new Pool name: " +msgstr "" + +#: src/dird/ua_update.c:631 +msgid "Selection terminated.\n" +msgstr "" + +#: src/dird/ua_update.c:664 +#, c-format +msgid "db_update_pool_record returned %d. ERR=%s\n" +msgstr "" + +#: src/dird/ua_update.c:671 +msgid "Pool DB record updated from resource.\n" +msgstr "" + +#: src/dird/verify.c:87 +msgid "" +"Unable to find JobId of previous InitCatalog Job.\n" +"Please run a Verify with Level=InitCatalog before\n" +"running the current Job.\n" +msgstr "" + +#: src/dird/verify.c:117 +#, c-format +msgid "Verifying against JobId=%d Job=%s\n" +msgstr "" + +#: src/dird/verify.c:187 +#, c-format +msgid "Start Verify JobId=%s Level=%s Job=%s\n" +msgstr "" + +#: src/dird/verify.c:266 +msgid "Deprecated feature ... use bootstrap.\n" +msgstr "" + +#: src/dird/verify.c:279 +#, c-format +msgid "Unimplemented Verify level %d(%c)\n" +msgstr "" + +#: src/dird/verify.c:331 +#, c-format +msgid "Unimplemented verify level %d\n" +msgstr "" + +#: src/dird/verify.c:386 +msgid "Verify OK" +msgstr "" + +#: src/dird/verify.c:390 +msgid "*** Verify Error ***" +msgstr "" + +#: src/dird/verify.c:394 +msgid "Verify warnings" +msgstr "" + +#: src/dird/verify.c:397 +msgid "Verify Canceled" +msgstr "" + +#: src/dird/verify.c:400 +msgid "Verify Differences" +msgstr "" + +#: src/dird/verify.c:405 +#, c-format +msgid "Inappropriate term code: %d %c\n" +msgstr "" + +#: src/dird/verify.c:419 +#, c-format +msgid "" +"Bacula %s (%s): %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Files Expected: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" FD termination status: %s\n" +" SD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/verify.c:454 +#, c-format +msgid "" +"Bacula %s (%s): %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" FD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/verify.c:530 +#, c-format +msgid "" +"birdSD.\n" +msgstr "" + +#: src/filed/backup.c:155 +#, c-format +msgid " Recursion turned off. Will not descend into %s\n" +msgstr "" + +#: src/filed/backup.c:162 +#, c-format +msgid " Filesystem change prohibited. Will not descend into %s\n" +msgstr "" + +#: src/filed/backup.c:168 +#, c-format +msgid " Disallowed filesystem. Will not descend into %s\n" +msgstr "" + +#: src/filed/backup.c:186 src/filed/verify.c:113 +#, c-format +msgid " Could not access %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:193 src/filed/verify.c:120 +#, c-format +msgid " Could not follow link %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:200 src/filed/verify.c:127 +#, c-format +msgid " Could not stat %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:207 src/filed/verify.c:133 +#, c-format +msgid " Unchanged file skipped: %s\n" +msgstr "" + +#: src/filed/backup.c:210 +#, c-format +msgid " Archive file not saved: %s\n" +msgstr "" + +#: src/filed/backup.c:214 src/filed/verify.c:147 +#, c-format +msgid " Could not open directory %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:220 +#, c-format +msgid " Unknown file type %d; not saved: %s\n" +msgstr "" + +#: src/filed/backup.c:240 +#, c-format +msgid "Python reader program \"%s\" not found.\n" +msgstr "" + +#: src/filed/backup.c:269 src/filed/verify.c:214 +#, c-format +msgid " Cannot open %s: ERR=%s.\n" +msgstr "" + +#: src/filed/backup.c:298 src/filed/verify.c:229 +#, c-format +msgid " Cannot open resource fork for %s: ERR=%s.\n" +msgstr "" + +#: src/filed/backup.c:349 +#, c-format +msgid "Unknown signature type %i.\n" +msgstr "" + +#: src/filed/backup.c:414 src/filed/backup.c:499 src/filed/backup.c:525 +#: src/filed/backup.c:557 src/filed/backup.c:570 src/filed/backup.c:578 +#: src/filed/backup.c:619 src/filed/backup.c:653 +#, c-format +msgid "Network send error to SD. ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:479 +#, c-format +msgid "Compression error: %d\n" +msgstr "" + +#: src/filed/backup.c:516 +#, c-format +msgid "Read error on file %s. ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:519 +msgid "Too many errors.\n" +msgstr "" + +#: src/filed/backup.c:548 +#, c-format +msgid "Error reading ACL of %s\n" +msgstr "" + +#: src/filed/filed.c:62 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c use as configuration file\n" +" -dnn set debug level to nn\n" +" -f run in foreground (for debugging)\n" +" -g groupid\n" +" -i inetd request\n" +" -s no signals (for debugging)\n" +" -t test configuration file and exit\n" +" -u userid\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/filed/filed.c:282 +#, c-format +msgid "" +"No File daemon resource defined in %s\n" +"Without that I don't know who I am :-(\n" +msgstr "" + +#: src/filed/filed.c:287 +#, c-format +msgid "Only one Client resource permitted in %s\n" +msgstr "" + +#: src/filed/filed.c:310 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"File daemon in %s.\n" +msgstr "" + +#: src/filed/filed.c:339 +#, c-format +msgid "No Director resource defined in %s\n" +msgstr "" + +#: src/filed/job.c:324 +#, c-format +msgid "2901 Job %s not found.\n" +msgstr "" + +#: src/filed/job.c:340 +#, c-format +msgid "2001 Job %s marked to be canceled.\n" +msgstr "" + +#: src/filed/job.c:343 +msgid "2902 Error scanning cancel command.\n" +msgstr "" + +#: src/filed/job.c:362 +#, c-format +msgid "2991 Bad setdebug command: %s\n" +msgstr "" + +#: src/filed/job.c:378 +#, c-format +msgid "Bad estimate command: %s" +msgstr "" + +#: src/filed/job.c:379 +msgid "2992 Bad estimate command.\n" +msgstr "" + +#: src/filed/job.c:402 +#, c-format +msgid "Bad Job Command: %s" +msgstr "" + +#: src/filed/job.c:422 +#, c-format +msgid "Bad RunBeforeJob command: %s\n" +msgstr "" + +#: src/filed/job.c:423 src/filed/job.c:436 +msgid "2905 Bad RunBeforeJob command.\n" +msgstr "" + +#: src/filed/job.c:449 +#, c-format +msgid "Bad RunAfter command: %s\n" +msgstr "" + +#: src/filed/job.c:450 +msgid "2905 Bad RunAfterJob command.\n" +msgstr "" + +#: src/filed/job.c:476 +#, c-format +msgid "%s could not execute. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:485 +#, c-format +msgid "%s: %s\n" +msgstr "" + +#: src/filed/job.c:490 +#, c-format +msgid "%s returned non-zero status=%d. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:575 +#, c-format +msgid "Error running program: %s. RtnStat=%d ERR=%s\n" +msgstr "" + +#: src/filed/job.c:585 +#, c-format +msgid "Cannot open FileSet input file: %s. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:679 +#, c-format +msgid "REGEX %s compile error. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:733 +#, c-format +msgid "Invalid FileSet command: %s\n" +msgstr "" + +#: src/filed/job.c:902 src/findlib/match.c:184 +#, c-format +msgid "Unknown include/exclude option: %c\n" +msgstr "" + +#: src/filed/job.c:962 src/stored/fd_cmds.c:329 +#, c-format +msgid "Could not create bootstrap file %s: ERR=%s\n" +msgstr "" + +#: src/filed/job.c:1061 +#, c-format +msgid "DIR and FD clocks differ by %d seconds, FD automatically adjusting.\n" +msgstr "" + +#: src/filed/job.c:1069 +#, c-format +msgid "Unknown backup level: %s\n" +msgstr "" + +#: src/filed/job.c:1081 +#, c-format +msgid "Bad level command: %s\n" +msgstr "" + +#: src/filed/job.c:1102 +#, c-format +msgid "Bad session command: %s" +msgstr "" + +#: src/filed/job.c:1123 +#, c-format +msgid "Bad storage command: %s" +msgstr "" + +#: src/filed/job.c:1132 +#, c-format +msgid "Failed to connect to Storage daemon: %s:%d\n" +msgstr "" + +#: src/filed/job.c:1144 +msgid "Failed to authenticate Storage daemon.\n" +msgstr "" + +#: src/filed/job.c:1170 +msgid "Cannot contact Storage daemon\n" +msgstr "" + +#: src/filed/job.c:1188 +#, c-format +msgid "Bad response to append open: %s\n" +msgstr "" + +#: src/filed/job.c:1193 +msgid "Bad response from stored to open command\n" +msgstr "" + +#: src/filed/job.c:1222 +#, c-format +msgid "Generate VSS snapshots. Driver=\"%s\", Drive(s)=\"%s\"\n" +msgstr "" + +#: src/filed/job.c:1225 +#, c-format +msgid "Generate VSS snapshots failed. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:1232 +#, c-format +msgid "Generate VSS snapshot of drive \"%c:\\\" failed\n" +msgstr "" + +#: src/filed/job.c:1241 +#, c-format +msgid "VSS Writer: %s\n" +msgstr "" + +#: src/filed/job.c:1245 +msgid "No drive letters found for generating VSS snapshots.\n" +msgstr "" + +#: src/filed/job.c:1248 +msgid "VSS was not initialized properly. VSS support is disabled.\n" +msgstr "" + +#: src/filed/job.c:1297 +msgid "Append Close with SD failed.\n" +msgstr "" + +#: src/filed/job.c:1301 +#, c-format +msgid "Bad status %d returned from Storage Daemon.\n" +msgstr "" + +#: src/filed/job.c:1336 +#, c-format +msgid "2994 Bad verify command: %s\n" +msgstr "" + +#: src/filed/job.c:1351 src/filed/job.c:1390 +#, c-format +msgid "2994 Bad verify level: %s\n" +msgstr "" + +#: src/filed/job.c:1430 +#, c-format +msgid "Bad replace command. CMD=%s\n" +msgstr "" + +#: src/filed/job.c:1507 +msgid "Improper calling sequence.\n" +msgstr "" + +#: src/filed/job.c:1527 +#, c-format +msgid "Bad response to SD read open: %s\n" +msgstr "" + +#: src/filed/job.c:1532 +msgid "Bad response from stored to read open command\n" +msgstr "" + +#: src/filed/job.c:1598 +#, c-format +msgid "Comm error with SD. bad response to %s. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:1601 +#, c-format +msgid "Bad response to %s command. Wanted %s, got %s\n" +msgstr "" + +#: src/filed/pythonfd.c:150 src/stored/pythonsd.c:154 +#, c-format +msgid "Cannot delete attribute %s" +msgstr "" + +#: src/filed/pythonfd.c:168 src/filed/pythonfd.c:184 src/stored/pythonsd.c:187 +#, c-format +msgid "Cannot find attribute %s" +msgstr "" + +#: src/filed/restore.c:55 +#, c-format +msgid "Size of data or stream of %s not correct. Original %s, restored %s.\n" +msgstr "" + +#: src/filed/restore.c:165 src/filed/verify_vol.c:90 +#, c-format +msgid "Record header scan error: %s\n" +msgstr "" + +#: src/filed/restore.c:172 src/filed/verify_vol.c:99 +#, c-format +msgid "Data record error. ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:176 src/filed/verify_vol.c:103 +#, c-format +msgid "Actual data size %d not same as header %d\n" +msgstr "" + +#: src/filed/restore.c:201 src/filed/restore.c:383 +msgid "Logic error: output file should be open\n" +msgstr "" + +#: src/filed/restore.c:207 src/filed/restore.c:388 +msgid "Logic error: output file should not be open\n" +msgstr "" + +#: src/filed/restore.c:218 src/filed/verify_vol.c:142 +#: src/stored/bextract.c:289 src/stored/bls.c:371 src/stored/bscan.c:651 +#, c-format +msgid "Record header file index %ld not equal record index %ld\n" +msgstr "" + +#: src/filed/restore.c:231 src/stored/bextract.c:298 +#, c-format +msgid "%s stream not supported on this Client.\n" +msgstr "" + +#: src/filed/restore.c:308 +#, c-format +msgid " Cannot open resource fork for %s.\n" +msgstr "" + +#: src/filed/restore.c:331 +#, c-format +msgid " Invalid length of Finder Info (got %d, not 32)\n" +msgstr "" + +#: src/filed/restore.c:335 +#, c-format +msgid " Could not set Finder Info on %s\n" +msgstr "" + +#: src/filed/restore.c:347 +#, c-format +msgid "Can't restore ACL of %s\n" +msgstr "" + +#: src/filed/restore.c:359 +#, c-format +msgid "Can't restore default ACL of %s\n" +msgstr "" + +#: src/filed/restore.c:391 src/stored/bextract.c:455 +#, c-format +msgid "Unknown stream=%d ignored. This shouldn't happen!\n" +msgstr "" + +#: src/filed/restore.c:428 +#, c-format +msgid "" +"%d non-supported data streams and %d non-supported attrib streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:432 +#, c-format +msgid "%d non-supported resource fork streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:435 +#, c-format +msgid "%d non-supported Finder Info streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:438 +#, c-format +msgid "%d non-supported acl streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:450 +msgid "None" +msgstr "" + +#: src/filed/restore.c:454 +msgid "Zlib errno" +msgstr "" + +#: src/filed/restore.c:456 +msgid "Zlib stream error" +msgstr "" + +#: src/filed/restore.c:458 +msgid "Zlib data error" +msgstr "" + +#: src/filed/restore.c:460 +msgid "Zlib memory error" +msgstr "" + +#: src/filed/restore.c:462 +msgid "Zlib buffer error" +msgstr "" + +#: src/filed/restore.c:464 +msgid "Zlib version error" +msgstr "" + +#: src/filed/restore.c:466 src/lib/util.c:546 src/lib/util.c:556 +#: src/lib/util.c:564 src/lib/util.c:571 src/lib/util.c:578 src/lib/util.c:592 +#: src/lib/util.c:602 src/lib/util.c:609 src/lib/util.c:620 +msgid "*none*" +msgstr "" + +#: src/filed/restore.c:498 src/stored/bextract.c:387 +#, c-format +msgid "Seek to %s error on %s: ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:522 +#, c-format +msgid "Uncompression error on file %s. ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:530 src/stored/bextract.c:421 +msgid "GZIP data stream found, but GZIP not configured!\n" +msgstr "" + +#: src/filed/restore.c:539 src/stored/bextract.c:359 src/stored/bextract.c:409 +#, c-format +msgid "Write error on %s: %s\n" +msgstr "" + +#: src/filed/status.c:67 +#, c-format +msgid "%s Version: %s (%s) %s %s %s %s\n" +msgstr "" + +#: src/filed/status.c:71 +#, c-format +msgid "Daemon started %s, %d Job%s run since started.\n" +msgstr "" + +#: src/filed/status.c:127 +#, c-format +msgid " Sizeof: off_t=%d size_t=%d debug=%d trace=%d\n" +msgstr "" + +#: src/filed/status.c:138 +msgid "Running Jobs:\n" +msgstr "" + +#: src/filed/status.c:149 +#, c-format +msgid "Director connected at: %s\n" +msgstr "" + +#: src/filed/status.c:151 +#, c-format +msgid "JobId %d Job %s is running.\n" +msgstr "" + +#: src/filed/status.c:154 +#, c-format +msgid " %s%s Job started: %s\n" +msgstr "" + +#: src/filed/status.c:166 src/stored/status.c:323 +#, c-format +msgid " Files=%s Bytes=%s Bytes/sec=%s\n" +msgstr "" + +#: src/filed/status.c:171 +#, c-format +msgid " Files Examined=%s\n" +msgstr "" + +#: src/filed/status.c:176 +#, c-format +msgid " Processing file: %s\n" +msgstr "" + +#: src/filed/status.c:187 +msgid " SDSocket closed.\n" +msgstr "" + +#: src/filed/status.c:217 +msgid "Terminated Jobs:\n" +msgstr "" + +#: src/filed/status.c:219 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/filed/status.c:323 src/filed/status.c:347 src/stored/status.c:513 +#: src/stored/status.c:536 +#, c-format +msgid "Bad .status command: %s\n" +msgstr "" + +#: src/filed/status.c:324 +msgid "2900 Bad .status command, missing argument.\n" +msgstr "" + +#: src/filed/status.c:348 +msgid "2900 Bad .status command, wrong argument.\n" +msgstr "" + +#: src/filed/status.c:385 src/stored/status.c:468 +msgid "Init Catalog" +msgstr "" + +#: src/filed/status.c:388 src/stored/status.c:471 +msgid "Volume to Catalog" +msgstr "" + +#: src/filed/status.c:391 src/stored/status.c:474 +msgid "Disk to Catalog" +msgstr "" + +#: src/filed/status.c:394 src/stored/status.c:477 +msgid "Data" +msgstr "" + +#: src/filed/status.c:400 src/lib/util.c:351 src/stored/status.c:483 +msgid "Unknown Job Level" +msgstr "" + +#: src/filed/status.c:451 +msgid "Bacula Idle" +msgstr "" + +#: src/filed/status.c:462 +msgid "Bacula Running" +msgstr "" + +#: src/filed/status.c:476 +msgid "Last Job Canceled" +msgstr "" + +#: src/filed/status.c:480 +msgid "Last Job Failed" +msgstr "" + +#: src/filed/status.c:484 +msgid "Last Job had Warnings" +msgstr "" + +#: src/filed/verify.c:45 +#, c-format +msgid "Cannot malloc %d network read buffer\n" +msgstr "" + +#: src/filed/verify.c:136 +#, c-format +msgid " Archive file skipped: %s\n" +msgstr "" + +#: src/filed/verify.c:139 +#, c-format +msgid " Recursion turned off. Directory skipped: %s\n" +msgstr "" + +#: src/filed/verify.c:142 +#, c-format +msgid " File system change prohibited. Directory skipped: %s\n" +msgstr "" + +#: src/filed/verify.c:152 +#, c-format +msgid " Unknown file type %d: %s\n" +msgstr "" + +#: src/filed/verify.c:195 src/filed/verify_vol.c:200 +#, c-format +msgid "Network error in send to Director: ERR=%s\n" +msgstr "" + +#: src/filed/verify.c:284 +#, c-format +msgid "Error reading file %s: ERR=%s\n" +msgstr "" + +#: src/filed/verify_vol.c:56 +msgid "Storage command not issued before Verify.\n" +msgstr "" + +#: src/filed/verify_vol.c:136 +#, c-format +msgid "Error scanning record header: %s\n" +msgstr "" + +#: src/findlib/attribs.c:335 +#, c-format +msgid "File size of restored file %s not correct. Original %s, restored %s.\n" +msgstr "" + +#: src/findlib/attribs.c:353 src/findlib/attribs.c:360 +#, c-format +msgid "Unable to set file owner %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:366 +#, c-format +msgid "Unable to set file modes %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:376 +#, c-format +msgid "Unable to set file times %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:390 +#, c-format +msgid "Unable to set file flags %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:635 +#, c-format +msgid "Error in %s file %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:652 +#, c-format +msgid "Error in %s: ERR=%s\n" +msgstr "" + +#: src/findlib/bfile.c:67 +msgid "GZIP data" +msgstr "" + +#: src/findlib/bfile.c:69 +msgid "GZIP sparse data" +msgstr "" + +#: src/findlib/bfile.c:71 +msgid "Win32 data" +msgstr "" + +#: src/findlib/bfile.c:73 +msgid "Win32 GZIP data" +msgstr "" + +#: src/findlib/bfile.c:75 +msgid "File attributes" +msgstr "" + +#: src/findlib/bfile.c:77 +msgid "File data" +msgstr "" + +#: src/findlib/bfile.c:79 +msgid "MD5 signature" +msgstr "" + +#: src/findlib/bfile.c:81 +msgid "Extended attributes" +msgstr "" + +#: src/findlib/bfile.c:83 +msgid "Sparse data" +msgstr "" + +#: src/findlib/bfile.c:85 +msgid "Program names" +msgstr "" + +#: src/findlib/bfile.c:87 +msgid "Program data" +msgstr "" + +#: src/findlib/bfile.c:89 +msgid "SHA1 signature" +msgstr "" + +#: src/findlib/bfile.c:91 +msgid "HFS+ resource fork" +msgstr "" + +#: src/findlib/bfile.c:93 +msgid "HFS+ Finder Info" +msgstr "" + +#: src/findlib/create_file.c:86 +#, c-format +msgid "File skipped. Not newer: %s\n" +msgstr "" + +#: src/findlib/create_file.c:93 +#, c-format +msgid "File skipped. Not older: %s\n" +msgstr "" + +#: src/findlib/create_file.c:99 +#, c-format +msgid "File skipped. Already exists: %s\n" +msgstr "" + +#: src/findlib/create_file.c:124 +#, c-format +msgid "File %s already exists and could not be replaced. ERR=%s.\n" +msgstr "" + +#: src/findlib/create_file.c:176 src/findlib/create_file.c:277 +#: src/findlib/create_file.c:328 +#, c-format +msgid "bpkt already open fid=%d\n" +msgstr "" + +#: src/findlib/create_file.c:198 +msgid "Could not save_dirn" +msgstr "" + +#: src/findlib/create_file.c:207 src/findlib/create_file.c:220 +#, c-format +msgid "Could not chdir to %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:240 +#, c-format +msgid "Could not create %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:253 +#, c-format +msgid "Cannot make fifo %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:261 +#, c-format +msgid "Cannot make node %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:297 +#, c-format +msgid "Could not symlink %s -> %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:307 +#, c-format +msgid "Could not hard link %s -> %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:358 +#, c-format +msgid "Original file %s not saved: type=%d\n" +msgstr "" + +#: src/findlib/create_file.c:361 +#, c-format +msgid "Unknown file type %d; not restored: %s\n" +msgstr "" + +#: src/findlib/create_file.c:392 +#, c-format +msgid "Zero length filename: %s\n" +msgstr "" + +#: src/findlib/enable_priv.c:85 +msgid "AdjustTokenPrivileges set " +msgstr "" + +#: src/findlib/find_one.c:168 +#, c-format +msgid "Top level directory \"%s\" has an unlisted fstype\n" +msgstr "" + +#: src/findlib/makepath.c:117 +#, c-format +msgid "Cannot create directory %s: ERR=%s\n" +msgstr "" + +#: src/findlib/makepath.c:121 src/findlib/makepath.c:378 +#, c-format +msgid "%s exists but is not a directory\n" +msgstr "" + +#: src/findlib/makepath.c:276 src/findlib/makepath.c:337 +#: src/findlib/makepath.c:397 +#, c-format +msgid "Cannot change owner and/or group of %s: ERR=%s\n" +msgstr "" + +#: src/findlib/makepath.c:297 +#, c-format +msgid "Cannot chdir to directory, %s: ERR=%s\n" +msgstr "" + +#: src/findlib/makepath.c:352 src/findlib/makepath.c:368 +#: src/findlib/makepath.c:402 +#, c-format +msgid "Cannot change permissions of %s: ERR=%s\n" +msgstr "" + +#: src/findlib/save-cwd.c:48 +#, c-format +msgid "Cannot open current directory: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:62 +#, c-format +msgid "Current directory: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:84 +#, c-format +msgid "Cannot get current directory: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:105 +#, c-format +msgid "Cannot return to %s from %s: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:109 +#, c-format +msgid "Cannot return to saved working directory from %s: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:115 +#, c-format +msgid "Cannot return to %s: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:119 +#, c-format +msgid "Cannot return to saved working directory: %s\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:74 +#, c-format +msgid "%s: Director authorization problem.\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:75 +msgid "Director authorization problem.\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:77 +msgid "" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:87 +#, c-format +msgid "%s: Bad response to Hello command: ERR=%s\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:89 +msgid "The Director is probably not running.\n" +msgstr "" + +#: src/gnome2-console/console.c:88 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"\n" +"Version: %s (%s) %s %s %s\n" +"\n" +"Usage: gnome-console [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/gnome2-console/console.c:219 +#, c-format +msgid "Pthread cond init error = %s\n" +msgstr "" + +#: src/gnome2-console/console.c:357 +msgid " Not Connected" +msgstr "" + +#: src/gnome2-console/console.c:478 +#, c-format +msgid " Connecting to Director %s:%d" +msgstr "" + +#: src/gnome2-console/console.c:479 +#, c-format +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "" + +#: src/gnome2-console/console.c:494 src/wx-console/console_thread.cpp:360 +#, c-format +msgid "Passphrase for Console \"%s\" TLS private key: " +msgstr "" + +#: src/gnome2-console/console.c:516 src/wx-console/console_thread.cpp:381 +#, c-format +msgid "Passphrase for Director \"%s\" TLS private key: " +msgstr "" + +#: src/gnome2-console/console.c:535 src/tray-monitor/tray-monitor.c:860 +#: src/wx-console/console_thread.cpp:399 +msgid "Director daemon" +msgstr "" + +#: src/gnome2-console/console.c:547 +msgid " Initializing ..." +msgstr "" + +#: src/gnome2-console/console.c:583 +msgid " Connected" +msgstr "" + +#: src/gnome2-console/console.c:591 +msgid " Processing command ..." +msgstr "" + +#: src/gnome2-console/console.c:626 +msgid " At prompt waiting for input ..." +msgstr "" + +#: src/gnome2-console/console.c:746 +msgid " Ready" +msgstr "" + +#: src/gnome2-console/console_conf.c:135 +#, c-format +msgid "Console: name=%s\n" +msgstr "" + +#: src/gnome2-console/console_conf.c:138 +#, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "" + +#: src/gnome2-console/interface.c:32 +msgid "_Connect" +msgstr "" + +#: src/gnome2-console/interface.c:33 src/gnome2-console/interface.c:232 +msgid "Connect to Director" +msgstr "" + +#: src/gnome2-console/interface.c:39 +msgid "_Disconnect" +msgstr "" + +#: src/gnome2-console/interface.c:40 +msgid "Disconnect from Director" +msgstr "" + +#: src/gnome2-console/interface.c:93 +msgid "_Display Messages" +msgstr "" + +#: src/gnome2-console/interface.c:129 +msgid "_File" +msgstr "" + +#: src/gnome2-console/interface.c:136 +msgid "_Edit" +msgstr "" + +#: src/gnome2-console/interface.c:143 +msgid "_View" +msgstr "" + +#: src/gnome2-console/interface.c:144 src/gnome2-console/interface.c:252 +msgid "Display Messages" +msgstr "" + +#: src/gnome2-console/interface.c:150 +msgid "_Settings" +msgstr "" + +#: src/gnome2-console/interface.c:157 +msgid "_Help" +msgstr "" + +#: src/gnome2-console/interface.c:197 +msgid "Bacula Console" +msgstr "" + +#: src/gnome2-console/interface.c:231 src/wx-console/wxbmainframe.cpp:229 +#: src/wx-console/wxbmainframe.cpp:597 +msgid "Connect" +msgstr "" + +#: src/gnome2-console/interface.c:241 +msgid "Run" +msgstr "" + +#: src/gnome2-console/interface.c:242 src/gnome2-console/interface.c:677 +#: src/gnome2-console/interface.c:690 +msgid "Run a Job" +msgstr "" + +#: src/gnome2-console/interface.c:251 +msgid "Msgs" +msgstr "" + +#: src/gnome2-console/interface.c:261 src/lib/util.c:292 +#: src/wx-console/wxbrestorepanel.cpp:384 +#: src/wx-console/wxbrestorepanel.cpp:1949 +msgid "Restore" +msgstr "" + +#: src/gnome2-console/interface.c:271 +msgid "Label" +msgstr "" + +#: src/gnome2-console/interface.c:299 +msgid " Command: " +msgstr "" + +#: src/gnome2-console/interface.c:307 +msgid "Enter Commands Here" +msgstr "" + +#: src/gnome2-console/interface.c:318 +msgid " Status: " +msgstr "" + +#: src/gnome2-console/interface.c:326 +msgid " " +msgstr "" + +#: src/gnome2-console/interface.c:422 +msgid "About Bacula Console" +msgstr "" + +#: src/gnome2-console/interface.c:436 +msgid "Bacula Console\n" +msgstr "" + +#: src/gnome2-console/interface.c:444 +msgid "Copyright (c) 2000 - 2004, Kern Sibbald and John Walker" +msgstr "" + +#: src/gnome2-console/interface.c:450 +msgid "Authors: Kern Sibbald and John Walker" +msgstr "" + +#: src/gnome2-console/interface.c:456 +msgid "The Leading Open Source Backup Solution." +msgstr "" + +#: src/gnome2-console/interface.c:521 src/gnome2-console/interface.c:536 +msgid "Select Director" +msgstr "" + +#: src/gnome2-console/interface.c:708 src/gnome2-console/interface.c:1616 +msgid "Job:" +msgstr "" + +#: src/gnome2-console/interface.c:728 +msgid " Type:" +msgstr "" + +#: src/gnome2-console/interface.c:757 src/gnome2-console/interface.c:1644 +#: src/wx-console/wxbrestorepanel.cpp:1862 +msgid "Client:" +msgstr "" + +#: src/gnome2-console/interface.c:787 src/gnome2-console/interface.c:1672 +msgid "FileSet: " +msgstr "" + +#: src/gnome2-console/interface.c:813 src/wx-console/wxbrestorepanel.cpp:1868 +msgid "Priority:" +msgstr "" + +#: src/gnome2-console/interface.c:836 +msgid "Level:" +msgstr "" + +#: src/gnome2-console/interface.c:866 src/gnome2-console/interface.c:1418 +#: src/gnome2-console/interface.c:1700 +msgid "Pool:" +msgstr "" + +#: src/gnome2-console/interface.c:895 src/gnome2-console/interface.c:1393 +#: src/gnome2-console/interface.c:1728 src/wx-console/wxbrestorepanel.cpp:1864 +msgid "Storage:" +msgstr "" + +#: src/gnome2-console/interface.c:924 +msgid "Messages:" +msgstr "" + +#: src/gnome2-console/interface.c:952 +msgid "Where: " +msgstr "" + +#: src/gnome2-console/interface.c:970 src/wx-console/wxbrestorepanel.cpp:1866 +msgid "When:" +msgstr "" + +#: src/gnome2-console/interface.c:988 src/wx-console/wxbrestorepanel.cpp:1847 +msgid "Bootstrap:" +msgstr "" + +#: src/gnome2-console/interface.c:1108 +msgid "_New" +msgstr "" + +#: src/gnome2-console/interface.c:1177 +msgid "Restore File Selection" +msgstr "" + +#: src/gnome2-console/interface.c:1226 +msgid "Current dir:" +msgstr "" + +#: src/gnome2-console/interface.c:1252 +msgid "Files Selected: " +msgstr "" + +#: src/gnome2-console/interface.c:1373 src/gnome2-console/interface.c:1383 +msgid "Label a Volume" +msgstr "" + +#: src/gnome2-console/interface.c:1444 +msgid "Volume Name:" +msgstr "" + +#: src/gnome2-console/interface.c:1459 +msgid "Slot:" +msgstr "" + +#: src/gnome2-console/interface.c:1585 +msgid "Restore Files Dialog" +msgstr "" + +#: src/gnome2-console/interface.c:1599 +msgid "Restore Files" +msgstr "" + +#: src/gnome2-console/interface.c:1756 +msgid "Before:" +msgstr "" + +#: src/gnome2-console/interface.c:1799 +msgid "Select Files" +msgstr "" + +#: src/gnome2-console/interface.c:1883 +msgid "Progress" +msgstr "" + +#: src/gnome2-console/restore.c:121 +msgid "Mark" +msgstr "" + +#: src/gnome2-console/restore.c:121 +msgid "File" +msgstr "" + +#: src/gnome2-console/restore.c:121 +msgid "Mode" +msgstr "" + +#: src/gnome2-console/restore.c:121 src/wx-console/wxbrestorepanel.cpp:288 +msgid "User" +msgstr "" + +#: src/gnome2-console/restore.c:121 src/wx-console/wxbrestorepanel.cpp:292 +msgid "Group" +msgstr "" + +#: src/gnome2-console/restore.c:121 src/wx-console/wxbrestorepanel.cpp:276 +msgid "Size" +msgstr "" + +#: src/gnome2-console/restore.c:121 src/wx-console/wxbrestorepanel.cpp:280 +msgid "Date" +msgstr "" + +#: src/gnome2-console/support.c:41 +#, c-format +msgid "Widget not found: %s" +msgstr "" + +#: src/gnome2-console/support.c:60 src/gnome2-console/support.c:85 +#, c-format +msgid "Couldn't find pixmap file: %s" +msgstr "" + +#: src/gnome2-console/support.c:92 +#, c-format +msgid "Failed to load pixbuf file: %s: %s\n" +msgstr "" + +#: src/lib/tls.c:105 +#, c-format +msgid "" +"Error with certificate at depth: %d, issuer = %s, subject = %s, ERR=%d:%s\n" +msgstr "" + +#: src/lib/tls.c:152 +msgid "Error initializing SSL context" +msgstr "" + +#: src/lib/tls.c:173 +msgid "Error loading certificate verification stores" +msgstr "" + +#: src/lib/tls.c:178 +msgid "" +"Either a certificate file or a directory must be specified as a verification " +"store\n" +msgstr "" + +#: src/lib/tls.c:189 +msgid "Error loading certificate file" +msgstr "" + +#: src/lib/tls.c:197 +msgid "Error loading private key" +msgstr "" + +#: src/lib/tls.c:205 +msgid "Unable to open DH parameters file" +msgstr "" + +#: src/lib/tls.c:211 +msgid "Unable to load DH parameters from specified file" +msgstr "" + +#: src/lib/tls.c:215 +msgid "Failed to set TLS Diffie-Hellman parameters" +msgstr "" + +#: src/lib/tls.c:224 +msgid "Error setting cipher list, no valid ciphers available\n" +msgstr "" + +#: src/lib/tls.c:272 +msgid "Peer failed to present a TLS certificate\n" +msgstr "" + +#: src/lib/tls.c:314 +#, c-format +msgid "Peer %s failed to present a TLS certificate\n" +msgstr "" + +#: src/lib/tls.c:417 +msgid "Error creating file descriptor-based BIO" +msgstr "" + +#: src/lib/tls.c:428 +msgid "Error creating new SSL object" +msgstr "" + +#: src/lib/tls.c:492 src/lib/tls.c:515 +msgid "Connect failure" +msgstr "" + +#: src/lib/tls.c:592 src/lib/tls.c:596 +msgid "TLS shutdown failure." +msgstr "" + +#: src/lib/tls.c:645 src/lib/tls.c:665 +msgid "TLS read/write failure." +msgstr "" + +#: src/lib/tls.c:723 src/lib/tls.c:780 src/stored/dev.c:209 +#: src/stored/dev.c:227 src/stored/dev.c:233 src/stored/stored_conf.c:593 +#, c-format +msgid "Unable to init mutex: ERR=%s\n" +msgstr "" + +#: src/lib/tls.c:743 src/lib/tls.c:812 +#, c-format +msgid "Unable to destroy mutex: ERR=%s\n" +msgstr "" + +#: src/lib/tls.c:877 +#, c-format +msgid "Unable to init OpenSSL threading: ERR=%s\n" +msgstr "" + +#: src/lib/tls.c:887 +msgid "Failed to seed OpenSSL PRNG\n" +msgstr "" + +#: src/lib/tls.c:913 +msgid "Failed to save OpenSSL PRNG\n" +msgstr "" + +#: src/lib/address_conf.c:50 +#, c-format +msgid "Only ipv4 and ipv6 are supported (%d)\n" +msgstr "" + +#: src/lib/address_conf.c:54 +#, c-format +msgid "Only ipv4 is supported (%d)\n" +msgstr "" + +#: src/lib/address_conf.c:163 +#, c-format +msgid "It was tried to assign a ipv6 address to a ipv4(%d)\n" +msgstr "" + +#: src/lib/address_conf.c:172 +#, c-format +msgid "It was tried to assign a ipv4 address to a ipv6(%d)\n" +msgstr "" + +#: src/lib/address_conf.c:251 +#, c-format +msgid "Can't add default address (%s)\n" +msgstr "" + +#: src/lib/address_conf.c:281 +msgid "the old style addresses cannot be mixed with new style" +msgstr "" + +#: src/lib/address_conf.c:304 +#, c-format +msgid "can't resolve service(%s)" +msgstr "" + +#: src/lib/address_conf.c:314 +#, c-format +msgid "can't resolve hostname(%s) %s" +msgstr "" + +#: src/lib/address_conf.c:403 +#, c-format +msgid "Expected a block begin { , got: %s" +msgstr "" + +#: src/lib/address_conf.c:408 +msgid "Empty addr block is not allowed" +msgstr "" + +#: src/lib/address_conf.c:412 +#, c-format +msgid "Expected a string, got: %s" +msgstr "" + +#: src/lib/address_conf.c:421 +#, c-format +msgid "Expected a string [ip|ipv4|ipv6], got: %s" +msgstr "" + +#: src/lib/address_conf.c:425 +#, c-format +msgid "Expected a string [ip|ipv4], got: %s" +msgstr "" + +#: src/lib/address_conf.c:430 src/lib/address_conf.c:460 +#, c-format +msgid "Expected a equal =, got: %s" +msgstr "" + +#: src/lib/address_conf.c:434 +#, c-format +msgid "Expected a block beginn { , got: %s" +msgstr "" + +#: src/lib/address_conf.c:441 src/lib/address_conf.c:456 +#, c-format +msgid "Expected a identifier [addr|port], got: %s" +msgstr "" + +#: src/lib/address_conf.c:446 +msgid "Only one port per address block" +msgstr "" + +#: src/lib/address_conf.c:452 +msgid "Only one addr per address block" +msgstr "" + +#: src/lib/address_conf.c:468 +#, c-format +msgid "Expected a number or a string, got: %s" +msgstr "" + +#: src/lib/address_conf.c:474 +#, c-format +msgid "Expected an IP number or a hostname, got: %s" +msgstr "" + +#: src/lib/address_conf.c:480 +msgid "State machine missmatch" +msgstr "" + +#: src/lib/address_conf.c:486 src/lib/address_conf.c:499 +#, c-format +msgid "Expected a end of block }, got: %s" +msgstr "" + +#: src/lib/address_conf.c:492 +#, c-format +msgid "Can't add hostname(%s) and port(%s) to addrlist (%s)" +msgstr "" + +#: src/lib/address_conf.c:508 +#, c-format +msgid "Expected a hostname or IP nummer, got: %s" +msgstr "" + +#: src/lib/address_conf.c:513 src/lib/address_conf.c:527 +#, c-format +msgid "can't add port (%s) to (%s)" +msgstr "" + +#: src/lib/address_conf.c:522 +#, c-format +msgid "Expected a port number or string, got: %s" +msgstr "" + +#: src/lib/attr.c:64 +#, c-format +msgid "Error scanning attributes: %s\n" +msgstr "" + +#: src/lib/berrno.c:48 +msgid "Child exited normally." +msgstr "" + +#: src/lib/berrno.c:55 +msgid "Unknown error during program execvp" +msgstr "" + +#: src/lib/berrno.c:58 +#, c-format +msgid "Child exited with code %d" +msgstr "" + +#: src/lib/berrno.c:66 +#, c-format +msgid "Child died from signal %d: %s" +msgstr "" + +#: src/lib/berrno.c:72 +msgid "Invalid errno. No error message possible." +msgstr "" + +#: src/lib/bget_msg.c:86 +msgid "Status OK\n" +msgstr "" + +#: src/lib/bget_msg.c:90 +#, c-format +msgid "bget_msg: unknown signal %d\n" +msgstr "" + +#: src/lib/bnet.c:109 +#, c-format +msgid "Attr spool write error. ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:209 src/lib/bnet.c:266 +#, c-format +msgid "Read expected %d got %d from %s:%s:%d\n" +msgstr "" + +#: src/lib/bnet.c:227 +#, c-format +msgid "Packet size too big from \"%s:%s:%d. Terminating connection.\n" +msgstr "" + +#: src/lib/bnet.c:256 +#, c-format +msgid "Read error from %s:%s:%d: ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:332 src/lib/bnet.c:348 +#, c-format +msgid "fread attr spool error. ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:392 +#, c-format +msgid "Write error sending len to %s:%s:%d: ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:397 src/lib/bnet.c:430 +#, c-format +msgid "Wrote %d bytes to %s:%s:%d, but only %d accepted.\n" +msgstr "" + +#: src/lib/bnet.c:424 +#, c-format +msgid "Write error sending %d bytes to %s:%s:%d: ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:450 src/lib/bnet.c:490 +msgid "TLS connection initialization failed.\n" +msgstr "" + +#: src/lib/bnet.c:458 +msgid "TLS Negotiation failed.\n" +msgstr "" + +#: src/lib/bnet.c:464 +msgid "" +"TLS certificate verification failed. Peer certificate did not match a " +"required commonName\n" +msgstr "" + +#: src/lib/bnet.c:502 +#, c-format +msgid "" +"TLS host certificate verification failed. Host %s did not match presented " +"certificate\n" +msgstr "" + +#: src/lib/bnet.c:516 src/lib/bnet.c:521 +msgid "TLS not configured.\n" +msgstr "" + +#: src/lib/bnet.c:617 +msgid "No problem." +msgstr "" + +#: src/lib/bnet.c:620 +msgid "Authoritative answer for host not found." +msgstr "" + +#: src/lib/bnet.c:623 +msgid "Non-authoritative for host not found, or ServerFail." +msgstr "" + +#: src/lib/bnet.c:626 +msgid "Non-recoverable errors, FORMERR, REFUSED, or NOTIMP." +msgstr "" + +#: src/lib/bnet.c:629 +msgid "Valid name, no data record of resquested type." +msgstr "" + +#: src/lib/bnet.c:632 +msgid "Unknown error." +msgstr "" + +#: src/lib/bnet.c:767 +#, c-format +msgid "gethostbyname() for host \"%s\" failed: ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:787 +#, c-format +msgid "Socket open error. proto=%d port=%d. ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:796 src/lib/bnet.c:821 src/lib/bnet_server.c:193 +#: src/lib/bnet_server.c:351 +#, c-format +msgid "Cannot set SO_KEEPALIVE on socket: %s\n" +msgstr "" + +#: src/lib/bnet.c:852 +#, c-format +msgid "" +"Could not connect to %s on %s:%d. ERR=%s\n" +"Retrying ...\n" +msgstr "" + +#: src/lib/bnet.c:858 +#, c-format +msgid "Unable to connect to %s on %s:%d. ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:936 +msgid "Could not malloc BSOCK data buffer\n" +msgstr "" + +#: src/lib/bnet.c:943 src/lib/bnet.c:967 +#, c-format +msgid "sockopt error: %s\n" +msgstr "" + +#: src/lib/bnet.c:949 src/lib/bnet.c:973 +#, c-format +msgid "Warning network buffer = %d bytes not max size.\n" +msgstr "" + +#: src/lib/bnet.c:953 src/lib/bnet.c:977 +#, c-format +msgid "Network buffer size %d not multiple of tape block size.\n" +msgstr "" + +#: src/lib/bnet.c:997 src/lib/bnet.c:1031 +#, c-format +msgid "fcntl F_GETFL error. ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:1003 src/lib/bnet.c:1037 src/lib/bnet.c:1062 +#, c-format +msgid "fcntl F_SETFL error. ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:1116 +#, c-format +msgid "Unknown sig %d" +msgstr "" + +#: src/lib/bnet_pkt.c:89 src/lib/bnet_pkt.c:147 +#, c-format +msgid "Unknown BPKT type: %d\n" +msgstr "" + +#: src/lib/bnet_server.c:96 +#, c-format +msgid "Cannot open stream socket. ERR=%s. Current %s All %s\n" +msgstr "" + +#: src/lib/bnet_server.c:109 src/lib/bnet_server.c:258 +#, c-format +msgid "Cannot set SO_REUSEADDR on socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:118 +#, c-format +msgid "Cannot bind port %d: ERR=%s: Retrying ...\n" +msgstr "" + +#: src/lib/bnet_server.c:123 +#, c-format +msgid "Cannot bind port %d: ERR=%s.\n" +msgstr "" + +#: src/lib/bnet_server.c:134 +#, c-format +msgid "Could not init client queue: ERR=%s\n" +msgstr "" + +#: src/lib/bnet_server.c:157 src/lib/bnet_server.c:320 +#, c-format +msgid "Error in select: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:178 src/lib/bnet_server.c:339 +#, c-format +msgid "Connection from %s:%d refused by hosts.access\n" +msgstr "" + +#: src/lib/bnet_server.c:204 +msgid "Could not create client BSOCK.\n" +msgstr "" + +#: src/lib/bnet_server.c:211 +#, c-format +msgid "Could not add job to client queue: ERR=%s\n" +msgstr "" + +#: src/lib/bnet_server.c:222 +#, c-format +msgid "Could not destroy client queue: ERR=%s\n" +msgstr "" + +#: src/lib/bnet_server.c:249 +#, c-format +msgid "Cannot open stream socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:278 +#, c-format +msgid "Cannot bind port %d: ERR=%s: retrying ...\n" +msgstr "" + +#: src/lib/bnet_server.c:284 +msgid "Server socket" +msgstr "" + +#: src/lib/bnet_server.c:366 +#, c-format +msgid "Socket accept error for %s. ERR=%s\n" +msgstr "" + +#: src/lib/bpipe.c:282 src/lib/bpipe.c:361 +msgid "Program killed by Bacula watchdog (timeout)\n" +msgstr "" + +#: src/lib/bshm.c:69 +#, c-format +msgid "shmget failure key = %x\n" +msgstr "" + +#: src/lib/bshm.c:77 +#, c-format +msgid "Could not get %d bytes of shared memory: %s\n" +msgstr "" + +#: src/lib/bshm.c:102 +#, c-format +msgid "Could not attach shared memory: %s\n" +msgstr "" + +#: src/lib/bshm.c:123 +#, c-format +msgid "Error detaching shared memory: %s\n" +msgstr "" + +#: src/lib/bshm.c:139 +#, c-format +msgid "Could not destroy shared memory: %s\n" +msgstr "" + +#: src/lib/bsys.c:176 src/lib/bsys.c:192 src/lib/bsys.c:202 src/lib/bsys.c:214 +#, c-format +msgid "Out of memory: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:256 +msgid "Buffer overflow.\n" +msgstr "" + +#: src/lib/bsys.c:322 +msgid "Bad errno" +msgstr "" + +#: src/lib/bsys.c:339 +msgid "Possible mutex deadlock.\n" +msgstr "" + +#: src/lib/bsys.c:343 src/lib/bsys.c:375 +#, c-format +msgid "Mutex lock failure. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:346 +msgid "Possible mutex deadlock resolved.\n" +msgstr "" + +#: src/lib/bsys.c:358 +#, c-format +msgid "Mutex unlock not locked. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:363 src/lib/bsys.c:385 +#, c-format +msgid "Mutex unlock failure. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:399 +#, c-format +msgid "Memset for %d bytes at %s:%d\n" +msgstr "" + +#: src/lib/bsys.c:428 +#, c-format +msgid "Cannot open pid file. %s ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:432 +#, c-format +msgid "" +"%s is already running. pid=%d\n" +"Check file %s\n" +msgstr "" + +#: src/lib/bsys.c:445 +#, c-format +msgid "Could not open pid file. %s ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:565 +#, c-format +msgid "Could not create state file. %s ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:584 +#, c-format +msgid "Write final hdr error: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:611 +#, c-format +msgid "Could not find specified group: %s\n" +msgstr "" + +#: src/lib/bsys.c:614 src/lib/bsys.c:618 +#, c-format +msgid "Could not set specified group: %s\n" +msgstr "" + +#: src/lib/bsys.c:627 +#, c-format +msgid "Could not find specified userid: %s\n" +msgstr "" + +#: src/lib/bsys.c:630 +#, c-format +msgid "Could not set specified userid: %s\n" +msgstr "" + +#: src/lib/btimers.c:241 +msgid "stop_btimer called with NULL btimer_id\n" +msgstr "" + +#: src/lib/cram-md5.c:76 src/lib/cram-md5.c:101 +msgid "1999 Authorization failed.\n" +msgstr "" + +#: src/lib/daemon.c:53 +#, c-format +msgid "Cannot fork to become daemon: %s\n" +msgstr "" + +#: src/lib/edit.c:400 +#, c-format +msgid "Illegal character \"%c\" in name.\n" +msgstr "" + +#: src/lib/edit.c:407 +msgid "Name too long.\n" +msgstr "" + +#: src/lib/events.c:161 +msgid "Events not available" +msgstr "" + +#: src/lib/jcr.c:278 +msgid "NULL jcr.\n" +msgstr "" + +#: src/lib/jcr.c:399 +#, c-format +msgid "JCR use_count=%d JobId=%d\n" +msgstr "" + +#: src/lib/jcr.c:680 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading Storage " +"daemon.\n" +msgstr "" + +#: src/lib/jcr.c:692 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading File daemon.\n" +msgstr "" + +#: src/lib/jcr.c:704 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading Director.\n" +msgstr "" + +#: src/lib/lex.c:79 src/wx-console/console_thread.cpp:186 +#, c-format +msgid "Problem probably begins at line %d.\n" +msgstr "" + +#: src/lib/lex.c:84 src/wx-console/console_thread.cpp:191 +#, c-format +msgid "" +"Config error: %s\n" +" : line %d, col %d of file %s\n" +"%s\n" +"%s" +msgstr "" + +#: src/lib/lex.c:88 +#, c-format +msgid "Config error: %s\n" +msgstr "" + +#: src/lib/lex.c:108 +msgid "Close of NULL file\n" +msgstr "" + +#: src/lib/lex.c:181 +msgid "get_char: called after EOF\n" +msgstr "" + +#: src/lib/lex.c:220 +#, c-format +msgid "Config token too long, file: %s, line %d, begins at line %d\n" +msgstr "" + +#: src/lib/lex.c:244 +msgid "none" +msgstr "" + +#: src/lib/lex.c:245 +msgid "comment" +msgstr "" + +#: src/lib/lex.c:246 +msgid "number" +msgstr "" + +#: src/lib/lex.c:247 +msgid "ip_addr" +msgstr "" + +#: src/lib/lex.c:248 +msgid "identifier" +msgstr "" + +#: src/lib/lex.c:249 +msgid "string" +msgstr "" + +#: src/lib/lex.c:250 +msgid "quoted_string" +msgstr "" + +#: src/lib/lex.c:286 +#, c-format +msgid "expected a positive integer number, got: %s" +msgstr "" + +#: src/lib/lex.c:292 +#, c-format +msgid "expected a postive integer number, got: %s" +msgstr "" + +#: src/lib/lex.c:504 +#, c-format +msgid "Cannot open included config file %s: %s\n" +msgstr "" + +#: src/lib/lex.c:539 +#, c-format +msgid "expected an integer or a range, got %s: %s" +msgstr "" + +#: src/lib/lex.c:553 src/lib/lex.c:561 src/lib/lex.c:572 src/lib/lex.c:580 +#, c-format +msgid "expected an integer number, got %s: %s" +msgstr "" + +#: src/lib/lex.c:590 +#, c-format +msgid "expected a name, got %s: %s" +msgstr "" + +#: src/lib/lex.c:594 +#, c-format +msgid "name %s length %d too long, max is %d\n" +msgstr "" + +#: src/lib/lex.c:602 +#, c-format +msgid "expected a string, got %s: %s" +msgstr "" + +#: src/lib/mem_pool.c:95 +#, c-format +msgid "MemPool index %d larger than max %d\n" +msgstr "" + +#: src/lib/mem_pool.c:113 src/lib/mem_pool.c:133 src/lib/mem_pool.c:168 +#: src/lib/mem_pool.c:239 src/lib/mem_pool.c:259 src/lib/mem_pool.c:297 +#: src/lib/mem_pool.c:550 +#, c-format +msgid "Out of memory requesting %d bytes\n" +msgstr "" + +#: src/lib/message.c:253 src/lib/message.c:263 +#, c-format +msgid "Could not open console message file %s: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:268 +#, c-format +msgid "Could not get con mutex: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:372 +#, c-format +msgid "open mail pipe %s failed: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:378 +msgid "Bacula Message" +msgstr "" + +#: src/lib/message.c:431 +msgid "open mail pipe failed.\n" +msgstr "" + +#: src/lib/message.c:443 +#, c-format +msgid "close error: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:454 +#, c-format +msgid "Mail prog: %s" +msgstr "" + +#: src/lib/message.c:463 +#, c-format +msgid "" +"Mail program terminated in error.\n" +"CMD=%s\n" +"ERR=%s\n" +msgstr "" + +#: src/lib/message.c:650 +#, c-format +msgid "" +"Operator mail program terminated in error.\n" +"CMD=%s\n" +"ERR=%s\n" +msgstr "" + +#: src/lib/message.c:667 src/lib/message.c:689 src/lib/message.c:705 +#, c-format +msgid "fopen %s failed: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:933 +#, c-format +msgid "%s: ABORTING due to ERROR in %s:%d\n" +msgstr "" + +#: src/lib/message.c:937 +#, c-format +msgid "%s: ERROR TERMINATION at %s:%d\n" +msgstr "" + +#: src/lib/message.c:942 +#, c-format +msgid "%s: Fatal Error because: " +msgstr "" + +#: src/lib/message.c:944 +#, c-format +msgid "%s: Fatal Error at %s:%d because:\n" +msgstr "" + +#: src/lib/message.c:948 +#, c-format +msgid "%s: ERROR: " +msgstr "" + +#: src/lib/message.c:950 +#, c-format +msgid "%s: ERROR in %s:%d " +msgstr "" + +#: src/lib/message.c:953 +#, c-format +msgid "%s: Warning: " +msgstr "" + +#: src/lib/message.c:956 +#, c-format +msgid "%s: Security violation: " +msgstr "" + +#: src/lib/message.c:1032 +#, c-format +msgid "%s ABORTING due to ERROR\n" +msgstr "" + +#: src/lib/message.c:1035 +#, c-format +msgid "%s ERROR TERMINATION\n" +msgstr "" + +#: src/lib/message.c:1038 +#, c-format +msgid "%s: %s Fatal error: " +msgstr "" + +#: src/lib/message.c:1044 +#, c-format +msgid "%s: %s Error: " +msgstr "" + +#: src/lib/message.c:1050 +#, c-format +msgid "%s: %s Warning: " +msgstr "" + +#: src/lib/message.c:1053 +#, c-format +msgid "%s: %s Security violation: " +msgstr "" + +#: src/lib/parse_conf.c:160 +msgid "***UNKNOWN***" +msgstr "" + +#: src/lib/parse_conf.c:179 +#, c-format +msgid "Unable to initialize resource lock. ERR=%s\n" +msgstr "" + +#: src/lib/parse_conf.c:262 src/lib/parse_conf.c:281 +#, c-format +msgid "expected an =, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:289 +#, c-format +msgid "Unknown item code: %d\n" +msgstr "" + +#: src/lib/parse_conf.c:328 +#, c-format +msgid "message type: %s not found" +msgstr "" + +#: src/lib/parse_conf.c:367 +#, c-format +msgid "Attempt to redefine name \"%s\" to \"%s\"." +msgstr "" + +#: src/lib/parse_conf.c:458 +#, c-format +msgid "Could not find config Resource %s referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:462 +#, c-format +msgid "Attempt to redefine resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:497 +#, c-format +msgid "Too many %s directives. Max. is %d. line %d: %s\n" +msgstr "" + +#: src/lib/parse_conf.c:507 +#, c-format +msgid "Could not find config Resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:569 +#, c-format +msgid "Missing config Resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:633 +#, c-format +msgid "expected a size number, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:638 +#, c-format +msgid "expected a size, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:675 src/lib/parse_conf.c:680 +#, c-format +msgid "expected a time period, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:699 +#, c-format +msgid "Expect a %s or %s, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:723 +#, c-format +msgid "Expected a Tape Label keyword, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:772 +#, c-format +msgid "Cannot open config file \"%s\": %s\n" +msgstr "" + +#: src/lib/parse_conf.c:785 +#, c-format +msgid "Expected a Resource name identifier, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:797 +#, c-format +msgid "expected resource name, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:808 +#, c-format +msgid "not in resource definition: %s" +msgstr "" + +#: src/lib/parse_conf.c:833 +#, c-format +msgid "" +"Keyword \"%s\" not permitted in this resource.\n" +"Perhaps you left the trailing brace off of the previous resource." +msgstr "" + +#: src/lib/parse_conf.c:850 +#, c-format +msgid "unexpected token %d %s in resource definition" +msgstr "" + +#: src/lib/parse_conf.c:856 +#, c-format +msgid "Unknown parser state %d\n" +msgstr "" + +#: src/lib/parse_conf.c:861 +msgid "End of conf file reached with unclosed resource." +msgstr "" + +#: src/lib/pythonlib.c:108 +msgid "Could not initialize Python\n" +msgstr "" + +#: src/lib/pythonlib.c:113 +#, c-format +msgid "Could not Run Python string %s\n" +msgstr "" + +#: src/lib/pythonlib.c:125 +msgid "Could not initialize Python Job type.\n" +msgstr "" + +#: src/lib/pythonlib.c:130 +#, c-format +msgid "Could not import Python script %s/%s. Python disabled.\n" +msgstr "" + +#: src/lib/pythonlib.c:230 +msgid "Could not create Python Job Object.\n" +msgstr "" + +#: src/lib/pythonlib.c:243 src/lib/pythonlib.c:267 +#, c-format +msgid "Python function \"%s\" not found.\n" +msgstr "" + +#: src/lib/pythonlib.c:282 +#, c-format +msgid "Unknown Python daemon event %s\n" +msgstr "" + +#: src/lib/regex.c:1040 +msgid "Success" +msgstr "" + +#: src/lib/regex.c:1043 +msgid "No match" +msgstr "" + +#: src/lib/regex.c:1046 +msgid "Invalid regular expression" +msgstr "" + +#: src/lib/regex.c:1049 +msgid "Invalid collation character" +msgstr "" + +#: src/lib/regex.c:1052 +msgid "Invalid character class name" +msgstr "" + +#: src/lib/regex.c:1055 +msgid "Trailing backslash" +msgstr "" + +#: src/lib/regex.c:1058 +msgid "Invalid back reference" +msgstr "" + +#: src/lib/regex.c:1061 +msgid "Unmatched [ or [^" +msgstr "" + +#: src/lib/regex.c:1064 +msgid "Unmatched ( or \\(" +msgstr "" + +#: src/lib/regex.c:1067 +msgid "Unmatched \\{" +msgstr "" + +#: src/lib/regex.c:1070 +msgid "Invalid content of \\{\\}" +msgstr "" + +#: src/lib/regex.c:1073 +msgid "Invalid range end" +msgstr "" + +#: src/lib/regex.c:1076 +msgid "Memory exhausted" +msgstr "" + +#: src/lib/regex.c:1079 +msgid "Invalid preceding regular expression" +msgstr "" + +#: src/lib/regex.c:1082 +msgid "Premature end of regular expression" +msgstr "" + +#: src/lib/regex.c:1085 +msgid "Regular expression too big" +msgstr "" + +#: src/lib/regex.c:1088 +msgid "Unmatched ) or \\)" +msgstr "" + +#: src/lib/regex.c:5874 +msgid "No previous regular expression" +msgstr "" + +#: src/lib/res.c:66 +#, c-format +msgid "rwl_writelock failure at %s:%d: ERR=%s\n" +msgstr "" + +#: src/lib/res.c:76 +#, c-format +msgid "rwl_writeunlock failure at %s:%d:. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:289 +msgid "rwl_writeunlock called too many times.\n" +msgstr "" + +#: src/lib/rwlock.c:293 +msgid "rwl_writeunlock by non-owner.\n" +msgstr "" + +#: src/lib/rwlock.c:358 src/lib/semlock.c:248 +#, c-format +msgid "Write lock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:365 src/lib/semlock.c:255 +#, c-format +msgid "Write unlock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:375 src/lib/semlock.c:265 +#, c-format +msgid "Read lock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:382 src/lib/semlock.c:272 +#, c-format +msgid "Read unlock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:391 src/lib/semlock.c:281 +#, c-format +msgid "Thread %d found unchanged elements %d times\n" +msgstr "" + +#: src/lib/rwlock.c:423 src/lib/semlock.c:313 +#, c-format +msgid "Init rwlock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:438 src/lib/semlock.c:328 +#, c-format +msgid "Create thread failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:449 src/lib/semlock.c:339 +#, c-format +msgid "Join thread failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:452 src/lib/semlock.c:342 +#, c-format +msgid "%02d: interval %d, writes %d, reads %d\n" +msgstr "" + +#: src/lib/rwlock.c:462 src/lib/semlock.c:352 +#, c-format +msgid "data %02d: value %d, %d writes\n" +msgstr "" + +#: src/lib/rwlock.c:467 src/lib/semlock.c:357 +#, c-format +msgid "Total: %d thread writes, %d data writes\n" +msgstr "" + +#: src/lib/rwlock.c:539 src/lib/semlock.c:429 +msgid "Try write lock" +msgstr "" + +#: src/lib/rwlock.c:545 src/lib/semlock.c:435 +msgid "Try read lock" +msgstr "" + +#: src/lib/rwlock.c:600 src/lib/semlock.c:490 +msgid "Create thread" +msgstr "" + +#: src/lib/rwlock.c:610 src/lib/semlock.c:500 +msgid "Join thread" +msgstr "" + +#: src/lib/rwlock.c:612 src/lib/semlock.c:502 +#, c-format +msgid "%02d: interval %d, updates %d, r_collisions %d, w_collisions %d\n" +msgstr "" + +#: src/lib/rwlock.c:624 src/lib/semlock.c:514 +#, c-format +msgid "data %02d: value %d, %d updates\n" +msgstr "" + +#: src/lib/semlock.c:185 +msgid "sem_unlock by non-owner.\n" +msgstr "" + +#: src/lib/signal.c:61 +msgid "Invalid signal number" +msgstr "" + +#: src/lib/signal.c:87 +#, c-format +msgid "Bacula interrupted by signal %d: %s\n" +msgstr "" + +#: src/lib/signal.c:100 +#, c-format +msgid "Kaboom! %s, %s got signal %d. Attempting traceback.\n" +msgstr "" + +#: src/lib/signal.c:102 +#, c-format +msgid "Kaboom! exepath=%s\n" +msgstr "" + +#: src/lib/signal.c:136 +#, c-format +msgid "Fork error: ERR=%s\n" +msgstr "" + +#: src/lib/signal.c:143 +#, c-format +msgid "Calling: %s %s %s\n" +msgstr "" + +#: src/lib/signal.c:145 +#, c-format +msgid "execv: %s failed: ERR=%s\n" +msgstr "" + +#: src/lib/signal.c:160 +#, c-format +msgid "Traceback complete, attempting cleanup ...\n" +msgstr "" + +#: src/lib/signal.c:168 +#, c-format +msgid "It looks like the traceback worked ...\n" +msgstr "" + +#: src/lib/signal.c:197 +#, c-format +msgid "BA_NSIG too small (%d) should be (%d)\n" +msgstr "" + +#: src/lib/signal.c:203 +msgid "UNKNOWN SIGNAL" +msgstr "" + +#: src/lib/signal.c:204 +msgid "Hangup" +msgstr "" + +#: src/lib/signal.c:205 +msgid "Interrupt" +msgstr "" + +#: src/lib/signal.c:206 +msgid "Quit" +msgstr "" + +#: src/lib/signal.c:207 +msgid "Illegal instruction" +msgstr "" + +#: src/lib/signal.c:208 +msgid "Trace/Breakpoint trap" +msgstr "" + +#: src/lib/signal.c:209 +msgid "Abort" +msgstr "" + +#: src/lib/signal.c:211 +msgid "EMT instruction (Emulation Trap)" +msgstr "" + +#: src/lib/signal.c:214 +msgid "IOT trap" +msgstr "" + +#: src/lib/signal.c:216 +msgid "BUS error" +msgstr "" + +#: src/lib/signal.c:217 +msgid "Floating-point exception" +msgstr "" + +#: src/lib/signal.c:218 +msgid "Kill, unblockable" +msgstr "" + +#: src/lib/signal.c:219 +msgid "User-defined signal 1" +msgstr "" + +#: src/lib/signal.c:220 +msgid "Segmentation violation" +msgstr "" + +#: src/lib/signal.c:221 +msgid "User-defined signal 2" +msgstr "" + +#: src/lib/signal.c:222 +msgid "Broken pipe" +msgstr "" + +#: src/lib/signal.c:223 +msgid "Alarm clock" +msgstr "" + +#: src/lib/signal.c:224 +msgid "Termination" +msgstr "" + +#: src/lib/signal.c:226 +msgid "Stack fault" +msgstr "" + +#: src/lib/signal.c:228 +msgid "Child status has changed" +msgstr "" + +#: src/lib/signal.c:229 +msgid "Continue" +msgstr "" + +#: src/lib/signal.c:230 +msgid "Stop, unblockable" +msgstr "" + +#: src/lib/signal.c:231 +msgid "Keyboard stop" +msgstr "" + +#: src/lib/signal.c:232 +msgid "Background read from tty" +msgstr "" + +#: src/lib/signal.c:233 +msgid "Background write to tty" +msgstr "" + +#: src/lib/signal.c:234 +msgid "Urgent condition on socket" +msgstr "" + +#: src/lib/signal.c:235 +msgid "CPU limit exceeded" +msgstr "" + +#: src/lib/signal.c:236 +msgid "File size limit exceeded" +msgstr "" + +#: src/lib/signal.c:237 +msgid "Virtual alarm clock" +msgstr "" + +#: src/lib/signal.c:238 +msgid "Profiling alarm clock" +msgstr "" + +#: src/lib/signal.c:239 +msgid "Window size change" +msgstr "" + +#: src/lib/signal.c:240 +msgid "I/O now possible" +msgstr "" + +#: src/lib/signal.c:242 +msgid "Power failure restart" +msgstr "" + +#: src/lib/signal.c:245 +msgid "No runnable lwp" +msgstr "" + +#: src/lib/signal.c:248 +msgid "SIGLWP special signal used by thread library" +msgstr "" + +#: src/lib/signal.c:251 +msgid "Checkpoint Freeze" +msgstr "" + +#: src/lib/signal.c:254 +msgid "Checkpoint Thaw" +msgstr "" + +#: src/lib/signal.c:257 +msgid "Thread Cancellation" +msgstr "" + +#: src/lib/signal.c:260 +msgid "Resource Lost (e.g. record-lock lost)" +msgstr "" + +#: src/lib/smartall.c:132 src/lib/smartall.c:223 src/lib/smartall.c:238 +msgid "Out of memory\n" +msgstr "" + +#: src/lib/smartall.c:160 +#, c-format +msgid "Attempt to free NULL called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:176 +#, c-format +msgid "qp->qnext->qprev != qp called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:180 +#, c-format +msgid "qp->qprev->qnext != qp called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:189 +#, c-format +msgid "Buffer overrun called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:260 +#, c-format +msgid "sm_realloc size: %d\n" +msgstr "" + +#: src/lib/smartall.c:301 +#, c-format +msgid "sm_realloc %d at %x from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:363 +#, c-format +msgid "" +"\n" +"Orphaned buffers exist. Dump terminated following\n" +" discovery of bad links in chain of orphaned buffers.\n" +" Buffer address with bad links: %lx\n" +msgstr "" + +#: src/lib/smartall.c:374 +#, c-format +msgid "Orphaned buffer: %6u bytes allocated at line %d of %s %s\n" +msgstr "" + +#: src/lib/smartall.c:410 +#, c-format +msgid "Damaged buffer found. Called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:440 +#, c-format +msgid "" +"\n" +"Damaged buffers found at %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:443 +#, c-format +msgid " discovery of bad prev link.\n" +msgstr "" + +#: src/lib/smartall.c:446 +#, c-format +msgid " discovery of bad next link.\n" +msgstr "" + +#: src/lib/smartall.c:449 +#, c-format +msgid " discovery of data overrun.\n" +msgstr "" + +#: src/lib/smartall.c:452 +#, c-format +msgid " Buffer address: %lx\n" +msgstr "" + +#: src/lib/smartall.c:459 +#, c-format +msgid "Damaged buffer: %6u bytes allocated at line %d of %s %s\n" +msgstr "" + +#: src/lib/util.c:181 +msgid "Running" +msgstr "" + +#: src/lib/util.c:184 +msgid "Blocked" +msgstr "" + +#: src/lib/util.c:194 +msgid "Non-fatal error" +msgstr "" + +#: src/lib/util.c:197 src/lib/util.c:264 +msgid "Canceled" +msgstr "" + +#: src/lib/util.c:200 +msgid "Verify differences" +msgstr "" + +#: src/lib/util.c:203 +msgid "Waiting on FD" +msgstr "" + +#: src/lib/util.c:206 +msgid "Wait on SD" +msgstr "" + +#: src/lib/util.c:209 +msgid "Wait for new Volume" +msgstr "" + +#: src/lib/util.c:212 +msgid "Waiting for mount" +msgstr "" + +#: src/lib/util.c:215 +msgid "Waiting for Storage resource" +msgstr "" + +#: src/lib/util.c:218 +msgid "Waiting for Job resource" +msgstr "" + +#: src/lib/util.c:221 +msgid "Waiting for Client resource" +msgstr "" + +#: src/lib/util.c:224 +msgid "Waiting on Max Jobs" +msgstr "" + +#: src/lib/util.c:227 +msgid "Waiting for Start Time" +msgstr "" + +#: src/lib/util.c:230 +msgid "Waiting on Priority" +msgstr "" + +#: src/lib/util.c:237 +#, c-format +msgid "Unknown Job termination status=%d" +msgstr "" + +#: src/lib/util.c:261 +msgid "Fatal Error" +msgstr "" + +#: src/lib/util.c:267 +msgid "Differences" +msgstr "" + +#: src/lib/util.c:270 +msgid "Unknown term code" +msgstr "" + +#: src/lib/util.c:298 +msgid "Migrate" +msgstr "" + +#: src/lib/util.c:301 +msgid "Copy" +msgstr "" + +#: src/lib/util.c:304 +msgid "Unknown Type" +msgstr "" + +#: src/lib/util.c:336 +msgid "Verify Init Catalog" +msgstr "" + +#: src/lib/util.c:345 +msgid "Verify Data" +msgstr "" + +#: src/lib/util.c:647 +msgid "Working directory not defined. Cannot continue.\n" +msgstr "" + +#: src/lib/util.c:650 +#, c-format +msgid "Working Directory: \"%s\" not found. Cannot continue.\n" +msgstr "" + +#: src/lib/util.c:654 +#, c-format +msgid "Working Directory: \"%s\" is not a directory. Cannot continue.\n" +msgstr "" + +#: src/lib/var.c:2659 +msgid "everything ok" +msgstr "" + +#: src/lib/var.c:2660 +msgid "incomplete named character" +msgstr "" + +#: src/lib/var.c:2661 +msgid "incomplete hexadecimal value" +msgstr "" + +#: src/lib/var.c:2662 +msgid "invalid hexadecimal value" +msgstr "" + +#: src/lib/var.c:2663 +msgid "octal value too large" +msgstr "" + +#: src/lib/var.c:2664 +msgid "invalid octal value" +msgstr "" + +#: src/lib/var.c:2665 +msgid "incomplete octal value" +msgstr "" + +#: src/lib/var.c:2666 +msgid "incomplete grouped hexadecimal value" +msgstr "" + +#: src/lib/var.c:2667 +msgid "incorrect character class specification" +msgstr "" + +#: src/lib/var.c:2668 +msgid "invalid expansion configuration" +msgstr "" + +#: src/lib/var.c:2669 +msgid "out of memory" +msgstr "" + +#: src/lib/var.c:2670 +msgid "incomplete variable specification" +msgstr "" + +#: src/lib/var.c:2671 +msgid "undefined variable" +msgstr "" + +#: src/lib/var.c:2672 +msgid "input is neither text nor variable" +msgstr "" + +#: src/lib/var.c:2673 +msgid "unknown command character in variable" +msgstr "" + +#: src/lib/var.c:2674 +msgid "malformatted search and replace operation" +msgstr "" + +#: src/lib/var.c:2675 +msgid "unknown flag in search and replace operation" +msgstr "" + +#: src/lib/var.c:2676 +msgid "invalid regex in search and replace operation" +msgstr "" + +#: src/lib/var.c:2677 +msgid "missing parameter in command" +msgstr "" + +#: src/lib/var.c:2678 +msgid "empty search string in search and replace operation" +msgstr "" + +#: src/lib/var.c:2679 +msgid "start offset missing in cut operation" +msgstr "" + +#: src/lib/var.c:2680 +msgid "offsets in cut operation delimited by unknown character" +msgstr "" + +#: src/lib/var.c:2681 +msgid "range out of bounds in cut operation" +msgstr "" + +#: src/lib/var.c:2682 +msgid "offset out of bounds in cut operation" +msgstr "" + +#: src/lib/var.c:2683 +msgid "logic error in cut operation" +msgstr "" + +#: src/lib/var.c:2684 +msgid "malformatted transpose operation" +msgstr "" + +#: src/lib/var.c:2685 +msgid "source and target class mismatch in transpose operation" +msgstr "" + +#: src/lib/var.c:2686 +msgid "empty character class in transpose operation" +msgstr "" + +#: src/lib/var.c:2687 +msgid "incorrect character class in transpose operation" +msgstr "" + +#: src/lib/var.c:2688 +msgid "malformatted padding operation" +msgstr "" + +#: src/lib/var.c:2689 +msgid "width parameter missing in padding operation" +msgstr "" + +#: src/lib/var.c:2690 +msgid "fill string missing in padding operation" +msgstr "" + +#: src/lib/var.c:2691 +msgid "unknown quoted pair in search and replace operation" +msgstr "" + +#: src/lib/var.c:2692 +msgid "sub-matching reference out of range" +msgstr "" + +#: src/lib/var.c:2693 +msgid "invalid argument" +msgstr "" + +#: src/lib/var.c:2694 +msgid "incomplete quoted pair" +msgstr "" + +#: src/lib/var.c:2695 +msgid "lookup function does not support variable arrays" +msgstr "" + +#: src/lib/var.c:2696 +msgid "index of array variable contains an invalid character" +msgstr "" + +#: src/lib/var.c:2697 +msgid "index of array variable is incomplete" +msgstr "" + +#: src/lib/var.c:2698 +msgid "bracket expression in array variable's index not closed" +msgstr "" + +#: src/lib/var.c:2699 +msgid "division by zero error in index specification" +msgstr "" + +#: src/lib/var.c:2700 +msgid "unterminated loop construct" +msgstr "" + +#: src/lib/var.c:2701 +msgid "invalid character in loop limits" +msgstr "" + +#: src/lib/var.c:2702 +msgid "malformed operation argument list" +msgstr "" + +#: src/lib/var.c:2703 +msgid "undefined operation" +msgstr "" + +#: src/lib/var.c:2704 +msgid "formatting failure" +msgstr "" + +#: src/lib/var.c:2713 +msgid "unknown error" +msgstr "" + +#: src/lib/watchdog.c:74 +#, c-format +msgid "Unable to initialize watchdog lock. ERR=%s\n" +msgstr "" + +#: src/lib/watchdog.c:170 +msgid "BUG! register_watchdog called before start_watchdog\n" +msgstr "" + +#: src/lib/watchdog.c:173 +#, c-format +msgid "BUG! Watchdog %p has NULL callback\n" +msgstr "" + +#: src/lib/watchdog.c:176 +#, c-format +msgid "BUG! Watchdog %p has zero interval\n" +msgstr "" + +#: src/lib/watchdog.c:196 +msgid "BUG! unregister_watchdog_unlocked called before start_watchdog\n" +msgstr "" + +#: src/lib/watchdog.c:313 +#, c-format +msgid "rwl_writelock failure. ERR=%s\n" +msgstr "" + +#: src/lib/watchdog.c:327 +#, c-format +msgid "rwl_writeunlock failure. ERR=%s\n" +msgstr "" + +#: src/stored/acquire.c:96 +#, c-format +msgid "Hey! num_writers=%d!!!!\n" +msgstr "" + +#: src/stored/acquire.c:146 +#, c-format +msgid "Num_writers=%d not zero. Job %d canceled.\n" +msgstr "" + +#: src/stored/acquire.c:154 +#, c-format +msgid "No volumes specified. Job %d canceled.\n" +msgstr "" + +#: src/stored/acquire.c:182 src/stored/mount.c:73 +#, c-format +msgid "Job %d canceled.\n" +msgstr "" + +#: src/stored/acquire.c:193 +#, c-format +msgid "Open device %s Volume \"%s\" failed (EIO): ERR=%s\n" +msgstr "" + +#: src/stored/acquire.c:198 +#, c-format +msgid "Open device %s Volume \"%s\" failed: ERR=%s\n" +msgstr "" + +#: src/stored/acquire.c:263 src/stored/mount.c:67 +#, c-format +msgid "Too many errors trying to mount device %s.\n" +msgstr "" + +#: src/stored/acquire.c:272 +#, c-format +msgid "Ready to read from volume \"%s\" on device %s.\n" +msgstr "" + +#: src/stored/acquire.c:311 +#, c-format +msgid "Device %s is busy reading.\n" +msgstr "" + +#: src/stored/acquire.c:341 +#, c-format +msgid "Wanted Volume \"%s\", but device %s is busy writing on \"%s\" .\n" +msgstr "" + +#: src/stored/acquire.c:360 +#, c-format +msgid "" +"Cannot recycle volume \"%s\" on device %s because it is in use by another " +"job.\n" +msgstr "" + +#: src/stored/acquire.c:382 +#, c-format +msgid "Could not ready device %s for append.\n" +msgstr "" + +#: src/stored/acquire.c:459 src/stored/block.c:354 src/stored/block.c:677 +#: src/stored/block.c:746 +#, c-format +msgid "Could not create JobMedia record for Volume=\"%s\" Job=%s\n" +msgstr "" + +#: src/stored/acquire.c:501 +#, c-format +msgid "Alert: %s" +msgstr "" + +#: src/stored/acquire.c:509 +#, c-format +msgid "3997 Bad alert command: %s: ERR=%s.\n" +msgstr "" + +#: src/stored/ansi_label.c:83 +#, c-format +msgid "Read error on device %s in ANSI label. ERR=%s\n" +msgstr "" + +#: src/stored/ansi_label.c:93 +msgid "Insane! End of tape while reading ANSI label.\n" +msgstr "" + +#: src/stored/ansi_label.c:117 +msgid "No VOL1 label while reading ANSI/IBM label.\n" +msgstr "" + +#: src/stored/ansi_label.c:137 +#, c-format +msgid "Wanted ANSI Volume \"%s\" got \"%s\"\n" +msgstr "" + +#: src/stored/ansi_label.c:148 +msgid "No HDR1 label while reading ANSI label.\n" +msgstr "" + +#: src/stored/ansi_label.c:154 +#, c-format +msgid "ANSI/IBM Volume \"%s\" does not belong to Bacula.\n" +msgstr "" + +#: src/stored/ansi_label.c:165 +msgid "No HDR2 label while reading ANSI/IBM label.\n" +msgstr "" + +#: src/stored/ansi_label.c:179 +msgid "Unknown or bad ANSI/IBM label record.\n" +msgstr "" + +#: src/stored/ansi_label.c:186 +msgid "Too many records in while reading ANSI/IBM label.\n" +msgstr "" + +#: src/stored/ansi_label.c:285 +#, c-format +msgid "ANSI Volume label name \"%s\" longer than 6 chars.\n" +msgstr "" + +#: src/stored/ansi_label.c:302 +#, c-format +msgid "Could not write ANSI VOL1 label. ERR=%s\n" +msgstr "" + +#: src/stored/ansi_label.c:340 src/stored/ansi_label.c:369 +#, c-format +msgid "Could not write ANSI HDR1 label. ERR=%s\n" +msgstr "" + +#: src/stored/ansi_label.c:345 src/stored/ansi_label.c:376 +msgid "Could not write ANSI HDR1 label.\n" +msgstr "" + +#: src/stored/ansi_label.c:381 +#, c-format +msgid "Error writing EOF to tape. ERR=%s" +msgstr "" + +#: src/stored/ansi_label.c:386 +msgid "write_ansi_ibm_label called for non-ANSI/IBM type\n" +msgstr "" + +#: src/stored/append.c:49 +msgid "DCR is NULL!!!\n" +msgstr "" + +#: src/stored/append.c:54 +msgid "DEVICE is NULL!!!\n" +msgstr "" + +#: src/stored/append.c:66 +msgid "Unable to set network buffer size.\n" +msgstr "" + +#: src/stored/append.c:79 src/stored/append.c:88 src/stored/append.c:100 +#: src/stored/append.c:256 src/stored/append.c:271 src/stored/append.c:283 +#: src/stored/askdir.c:294 src/stored/askdir.c:295 +msgid "NULL Volume name. This shouldn't happen!!!\n" +msgstr "" + +#: src/stored/append.c:94 src/stored/btape.c:1854 +#, c-format +msgid "Write session label failed. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:106 +#, c-format +msgid "Network send error to FD. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:143 +#, c-format +msgid "Error reading data header from FD. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:165 +#, c-format +msgid "Malformed data header from FD: %s\n" +msgstr "" + +#: src/stored/append.c:175 +msgid "File index from FD not positive or sequential\n" +msgstr "" + +#: src/stored/append.c:206 src/stored/append.c:276 src/stored/spool.c:248 +#, c-format +msgid "Fatal append error on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/append.c:231 +#, c-format +msgid "Error updating file attributes. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:244 +#, c-format +msgid "Network error on data channel. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:265 src/stored/btape.c:1976 +#, c-format +msgid "Error writting end session label. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:278 +msgid "Set ok=FALSE after write_block_to_device.\n" +msgstr "" + +#: src/stored/askdir.c:161 +msgid "Network error on bnet_recv in req_vol_info.\n" +msgstr "" + +#: src/stored/askdir.c:178 +#, c-format +msgid "Error getting Volume info: %s" +msgstr "" + +#: src/stored/askdir.c:299 src/stored/askdir.c:300 +msgid "Attempt to update_volume_info in read mode!!!\n" +msgstr "" + +#: src/stored/askdir.c:328 +#, c-format +msgid "Didn't get vol info vol=%s: ERR=%s" +msgstr "" + +#: src/stored/askdir.c:359 +#, c-format +msgid "Error creating JobMedia record: ERR=%s\n" +msgstr "" + +#: src/stored/askdir.c:366 +#, c-format +msgid "Error creating JobMedia record: %s\n" +msgstr "" + +#: src/stored/askdir.c:432 +#, c-format +msgid "Job %s canceled while waiting for mount on Storage Device \"%s\".\n" +msgstr "" + +#: src/stored/askdir.c:445 +#, c-format +msgid "" +"Job %s waiting. Cannot find any appendable volumes.\n" +"Please use the \"label\" command to create a new Volume for:\n" +" Storage: %s\n" +" Media type: %s\n" +" Pool: %s\n" +msgstr "" + +#: src/stored/askdir.c:469 src/stored/askdir.c:551 +#, c-format +msgid "Max time exceeded waiting to mount Storage Device %s for Job %s\n" +msgstr "" + +#: src/stored/askdir.c:479 +msgid "pthread error in mount_next_volume.\n" +msgstr "" + +#: src/stored/askdir.c:511 +msgid "Cannot request another volume: no volume name given.\n" +msgstr "" + +#: src/stored/askdir.c:517 +#, c-format +msgid "Job %s canceled while waiting for mount on Storage Device %s.\n" +msgstr "" + +#: src/stored/askdir.c:532 +#, c-format +msgid "Please mount Volume \"%s\" on Storage Device %s for Job %s\n" +msgstr "" + +#: src/stored/askdir.c:561 +msgid "pthread error in mount_volume\n" +msgstr "" + +#: src/stored/authenticate.c:53 +#, c-format +msgid "I only authenticate Directors, not %d\n" +msgstr "" + +#: src/stored/authenticate.c:120 +msgid "" +"Incorrect password given by Director.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/stored/authenticate.c:178 +#, c-format +msgid "Unable to authenticate Director at %s.\n" +msgstr "" + +#: src/stored/authenticate.c:223 src/stored/authenticate.c:259 +#, c-format +msgid "" +"Incorrect authorization key from File daemon at %s rejected.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/stored/autochanger.c:53 +#, c-format +msgid "No Changer Name given for device %s. Cannot continue.\n" +msgstr "" + +#: src/stored/autochanger.c:59 +#, c-format +msgid "No Changer Command given for device %s. Cannot continue.\n" +msgstr "" + +#: src/stored/autochanger.c:72 +#, c-format +msgid "" +"Media Type not the same for all devices in changer %s. Cannot continue.\n" +msgstr "" + +#: src/stored/autochanger.c:152 +#, c-format +msgid "3304 Issuing autochanger \"load slot %d, drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:161 +#, c-format +msgid "3305 Autochanger \"load slot %d, drive %d\", status is OK.\n" +msgstr "" + +#: src/stored/autochanger.c:167 +#, c-format +msgid "3992 Bad autochanger \"load slot %d, drive %d\": ERR=%s.\n" +msgstr "" + +#: src/stored/autochanger.c:208 +msgid "3992 Missing Changer command.\n" +msgstr "" + +#: src/stored/autochanger.c:218 +#, c-format +msgid "3301 Issuing autochanger \"loaded drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:227 +#, c-format +msgid "3302 Autochanger \"loaded drive %d\", result is Slot %d.\n" +msgstr "" + +#: src/stored/autochanger.c:231 +#, c-format +msgid "3302 Autochanger \"loaded drive %d\", result: nothing loaded.\n" +msgstr "" + +#: src/stored/autochanger.c:238 +#, c-format +msgid "3991 Bad autochanger \"loaded drive %d\" command: ERR=%s.\n" +msgstr "" + +#: src/stored/autochanger.c:297 src/stored/autochanger.c:380 +#, c-format +msgid "3307 Issuing autochanger \"unload slot %d, drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:310 src/stored/autochanger.c:401 +#, c-format +msgid "3995 Bad autochanger \"unload slot %d, drive %d\": ERR=%s.\n" +msgstr "" + +#: src/stored/autochanger.c:369 +#, c-format +msgid "Volume %s is in use by device %s\n" +msgstr "" + +#: src/stored/autochanger.c:439 +#, c-format +msgid "3993 Device %s not an autochanger device.\n" +msgstr "" + +#: src/stored/autochanger.c:464 +#, c-format +msgid "3306 Issuing autochanger \"%s\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:467 +msgid "3996 Open bpipe failed.\n" +msgstr "" + +#: src/stored/autochanger.c:494 +#, c-format +msgid "Autochanger error: ERR=%s\n" +msgstr "" + +#: src/stored/bcopy.c:58 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bcopy [-d debug_level] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify configuration file\n" +" -d set debug level to nn\n" +" -i specify input Volume names (separated by |)\n" +" -o specify output Volume names (separated by |)\n" +" -p proceed inspite of errors\n" +" -v verbose\n" +" -w specify working directory (default /tmp)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bcopy.c:137 src/stored/bextract.c:177 src/stored/bscan.c:221 +msgid "Wrong number of arguments: \n" +msgstr "" + +#: src/stored/bcopy.c:172 src/stored/btape.c:333 src/stored/device.c:266 +#, c-format +msgid "dev open failed: %s\n" +msgstr "" + +#: src/stored/bcopy.c:185 +msgid "Write of last block failed.\n" +msgstr "" + +#: src/stored/bcopy.c:188 +#, c-format +msgid "%u Jobs copied. %u records copied.\n" +msgstr "" + +#: src/stored/bcopy.c:205 src/stored/bscan.c:390 +#, c-format +msgid "Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n" +msgstr "" + +#: src/stored/bcopy.c:220 +msgid "Volume is prelabeled. This volume cannot be copied.\n" +msgstr "" + +#: src/stored/bcopy.c:223 +msgid "Volume label not copied.\n" +msgstr "" + +#: src/stored/bcopy.c:235 src/stored/bcopy.c:242 src/stored/bcopy.c:265 +#: src/stored/btape.c:2342 +#, c-format +msgid "Cannot fixup device error. %s\n" +msgstr "" + +#: src/stored/bcopy.c:247 +msgid "EOM label not copied.\n" +msgstr "" + +#: src/stored/bcopy.c:250 +msgid "EOT label not copied.\n" +msgstr "" + +#: src/stored/bcopy.c:290 src/stored/bextract.c:479 src/stored/bls.c:443 +#: src/stored/bscan.c:1234 src/stored/btape.c:2690 +#, c-format +msgid "Mount Volume \"%s\" on device %s and press return when ready: " +msgstr "" + +#: src/stored/bextract.c:70 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bextract \n" +" -b specify a bootstrap file\n" +" -c specify a configuration file\n" +" -d set debug level to nn\n" +" -e exclude list\n" +" -i include list\n" +" -p proceed inspite of I/O errors\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bextract.c:127 src/stored/bls.c:128 +#, c-format +msgid "Could not open exclude file: %s, ERR=%s\n" +msgstr "" + +#: src/stored/bextract.c:142 src/stored/bls.c:142 +#, c-format +msgid "Could not open include file: %s, ERR=%s\n" +msgstr "" + +#: src/stored/bextract.c:198 +#, c-format +msgid "%d Program Name and/or Program Data Stream records ignored.\n" +msgstr "" + +#: src/stored/bextract.c:202 +#, c-format +msgid "%d Win32 data or Win32 gzip data stream records. Ignored.\n" +msgstr "" + +#: src/stored/bextract.c:226 +#, c-format +msgid "Cannot stat %s. It must exist. ERR=%s\n" +msgstr "" + +#: src/stored/bextract.c:230 +#, c-format +msgid "%s must be a directory.\n" +msgstr "" + +#: src/stored/bextract.c:251 +#, c-format +msgid "%u files restored.\n" +msgstr "" + +#: src/stored/bextract.c:278 src/stored/bextract.c:450 +msgid "Logic error output file should be open but is not.\n" +msgstr "" + +#: src/stored/bextract.c:285 src/stored/bls.c:364 src/stored/bscan.c:647 +msgid "Cannot continue.\n" +msgstr "" + +#: src/stored/bextract.c:347 +#, c-format +msgid "Seek error on %s: %s\n" +msgstr "" + +#: src/stored/bextract.c:400 +#, c-format +msgid "Uncompression error. ERR=%d\n" +msgstr "" + +#: src/stored/bextract.c:408 +msgid "===Write error===\n" +msgstr "" + +#: src/stored/bextract.c:441 +msgid "Got Program Name or Data Stream. Ignored.\n" +msgstr "" + +#: src/stored/block.c:79 +#, c-format +msgid "" +"Dump block %s %x: size=%d BlkNum=%d\n" +" Hdrcksum=%x cksum=%x\n" +msgstr "" + +#: src/stored/block.c:92 +#, c-format +msgid " Rec: VId=%u VT=%u FI=%s Strm=%s len=%d p=%x\n" +msgstr "" + +#: src/stored/block.c:148 +#, c-format +msgid "%d block read errors not printed.\n" +msgstr "" + +#: src/stored/block.c:236 src/stored/block.c:252 src/stored/block.c:262 +#, c-format +msgid "" +"Volume data error at %u:%u! Wanted ID: \"%s\", got \"%s\". Buffer " +"discarded.\n" +msgstr "" + +#: src/stored/block.c:276 +#, c-format +msgid "" +"Volume data error at %u:%u! Block length %u is insane (too large), probably " +"due to a bad archive.\n" +msgstr "" + +#: src/stored/block.c:302 +#, c-format +msgid "" +"Volume data error at %u:%u!\n" +"Block checksum mismatch in block=%u len=%d: calc=%x blk=%x\n" +msgstr "" + +#: src/stored/block.c:410 +msgid "Cannot write block. Device at EOM.\n" +msgstr "" + +#: src/stored/block.c:415 +msgid "Attempt to write on read-only Volume.\n" +msgstr "" + +#: src/stored/block.c:467 +#, c-format +msgid "User defined maximum volume capacity %s exceeded on device %s.\n" +msgstr "" + +#: src/stored/block.c:482 +#, c-format +msgid "Unable to write EOF. ERR=%s\n" +msgstr "" + +#: src/stored/block.c:508 src/stored/block.c:519 +msgid "Write block header zeroed.\n" +msgstr "" + +#: src/stored/block.c:537 +#, c-format +msgid "Write error at %u:%u on device %s. ERR=%s.\n" +msgstr "" + +#: src/stored/block.c:544 +#, c-format +msgid "End of Volume \"%s\" at %u:%u on device %s. Write of %u bytes got %d.\n" +msgstr "" + +#: src/stored/block.c:619 src/stored/block.c:625 +#, c-format +msgid "Backspace file at EOT failed. ERR=%s\n" +msgstr "" + +#: src/stored/block.c:632 +#, c-format +msgid "Backspace record at EOT failed. ERR=%s\n" +msgstr "" + +#: src/stored/block.c:649 +#, c-format +msgid "Re-read last block at EOT failed. ERR=%s" +msgstr "" + +#: src/stored/block.c:653 +msgid "Re-read of last block succeeded.\n" +msgstr "" + +#: src/stored/block.c:656 +#, c-format +msgid "Re-read of last block failed. Last block=%u Current block=%u.\n" +msgstr "" + +#: src/stored/block.c:685 +#, c-format +msgid "" +"Error writing final EOF to tape. This Volume may not be readable.\n" +"%s" +msgstr "" + +#: src/stored/block.c:795 src/stored/dvd.c:689 +#, c-format +msgid "" +"Error while writing, current part number is less than the total number of " +"parts (%d/%d, device=%s)\n" +msgstr "" + +#: src/stored/block.c:803 src/stored/block.c:913 +#, c-format +msgid "Unable to open device next part %s: ERR=%s\n" +msgstr "" + +#: src/stored/block.c:821 +#, c-format +msgid "" +"End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +"free_space_errno=%d, errmsg=%s).\n" +msgstr "" + +#: src/stored/block.c:834 +#, c-format +msgid "" +"End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +"free_space_errno=%d).\n" +msgstr "" + +#: src/stored/block.c:888 +#, c-format +msgid "Block buffer size looping problem on device %s\n" +msgstr "" + +#: src/stored/block.c:939 +#, c-format +msgid "Read error at file:blk %u:%u on device %s. ERR=%s.\n" +msgstr "" + +#: src/stored/block.c:952 +#, c-format +msgid "Read zero bytes at %u:%u on device %s.\n" +msgstr "" + +#: src/stored/block.c:965 +#, c-format +msgid "" +"Volume data error at %u:%u! Very short block of %d bytes on device %s " +"discarded.\n" +msgstr "" + +#: src/stored/block.c:990 +#, c-format +msgid "Block length %u is greater than buffer %u. Attempting recovery.\n" +msgstr "" + +#: src/stored/block.c:1009 +#, c-format +msgid "Setting block buffer size to %u bytes.\n" +msgstr "" + +#: src/stored/block.c:1024 +#, c-format +msgid "" +"Volume data error at %u:%u! Short block of %d bytes on device %s discarded.\n" +msgstr "" + +#: src/stored/bls.c:68 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bls [options] \n" +" -b specify a bootstrap file\n" +" -c specify a config file\n" +" -d specify debug level\n" +" -e exclude list\n" +" -i include list\n" +" -j list jobs\n" +" -k list blocks\n" +" (no j or k option) list saved files\n" +" -L dump label\n" +" -p proceed inspite of errors\n" +" -v be verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bls.c:189 +msgid "No archive name specified\n" +msgstr "" + +#: src/stored/bls.c:224 +#, c-format +msgid "" +"\n" +"Warning, this Volume is a continuation of Volume %s\n" +msgstr "" + +#: src/stored/bls.c:267 +#, c-format +msgid "Got EOM at file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/bls.c:278 +#, c-format +msgid "Mounted Volume \"%s\".\n" +msgstr "" + +#: src/stored/bls.c:280 +#, c-format +msgid "Got EOF at file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/bls.c:304 +#, c-format +msgid "" +"File:blk=%u:%u blk_num=%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%" +"s rlen=%d\n" +msgstr "" + +#: src/stored/bls.c:313 +#, c-format +msgid "Block: %d size=%d\n" +msgstr "" + +#: src/stored/bls.c:380 +#, c-format +msgid "FileIndex=%d VolSessionId=%d VolSessionTime=%d Stream=%d DataLen=%d\n" +msgstr "" + +#: src/stored/bls.c:397 src/stored/read_record.c:322 +msgid "Fresh Volume Label" +msgstr "" + +#: src/stored/bls.c:400 src/stored/read_record.c:325 +msgid "Volume Label" +msgstr "" + +#: src/stored/bls.c:404 src/stored/label.c:969 +msgid "Begin Job Session" +msgstr "" + +#: src/stored/bls.c:408 src/stored/label.c:972 +msgid "End Job Session" +msgstr "" + +#: src/stored/bls.c:411 +msgid "End of Medium" +msgstr "" + +#: src/stored/bls.c:414 src/stored/label.c:981 +msgid "Unknown" +msgstr "" + +#: src/stored/bls.c:420 src/stored/read_record.c:343 +#, c-format +msgid "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n" +msgstr "" + +#: src/stored/bscan.c:109 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bscan [ options ] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify configuration file\n" +" -d set debug level to nn\n" +" -m update media info in database\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database host (default NULL)\n" +" -p proceed inspite of I/O errors\n" +" -r list records\n" +" -s synchronize or store in database\n" +" -S show scan progress periodically\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -w specify working directory (default from conf file)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bscan.c:234 src/stored/stored.c:265 +#, c-format +msgid "No Storage resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:242 src/stored/stored.c:296 +#, c-format +msgid "No Working Directory defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:250 +#, c-format +msgid "Working Directory: %s not found. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:254 +#, c-format +msgid "Working Directory: %s is not a directory. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:268 src/stored/bscan.c:341 +#, c-format +msgid "First Volume Size = %sn" +msgstr "" + +#: src/stored/bscan.c:274 +msgid "Could not init Bacula database\n" +msgstr "" + +#: src/stored/bscan.c:281 +#, c-format +msgid "Using Database: %s, User: %s\n" +msgstr "" + +#: src/stored/bscan.c:315 +#, c-format +msgid "Create JobMedia for Job %s\n" +msgstr "" + +#: src/stored/bscan.c:326 +#, c-format +msgid "Could not create JobMedia record for Volume=%s Job=%s\n" +msgstr "" + +#: src/stored/bscan.c:382 +#, c-format +msgid "done: %d%%\n" +msgstr "" + +#: src/stored/bscan.c:406 +msgid "Volume is prelabeled. This tape cannot be scanned.\n" +msgstr "" + +#: src/stored/bscan.c:418 +#, c-format +msgid "Pool record for %s found in DB.\n" +msgstr "" + +#: src/stored/bscan.c:422 +#, c-format +msgid "VOL_LABEL: Pool record not found for Pool: %s\n" +msgstr "" + +#: src/stored/bscan.c:428 +#, c-format +msgid "VOL_LABEL: PoolType mismatch. DB=%s Vol=%s\n" +msgstr "" + +#: src/stored/bscan.c:432 +#, c-format +msgid "Pool type \"%s\" is OK.\n" +msgstr "" + +#: src/stored/bscan.c:442 +#, c-format +msgid "Media record for %s found in DB.\n" +msgstr "" + +#: src/stored/bscan.c:449 +#, c-format +msgid "VOL_LABEL: Media record not found for Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:456 +#, c-format +msgid "VOL_LABEL: MediaType mismatch. DB=%s Vol=%s\n" +msgstr "" + +#: src/stored/bscan.c:460 +#, c-format +msgid "Media type \"%s\" is OK.\n" +msgstr "" + +#: src/stored/bscan.c:469 +#, c-format +msgid "VOL_LABEL: OK for Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:476 +#, c-format +msgid "%d \"errors\" ignored before first Start of Session record.\n" +msgstr "" + +#: src/stored/bscan.c:487 +#, c-format +msgid "SOS_LABEL: Found Job record for JobId: %d\n" +msgstr "" + +#: src/stored/bscan.c:492 +#, c-format +msgid "SOS_LABEL: Job record not found for JobId: %d\n" +msgstr "" + +#: src/stored/bscan.c:532 +#, c-format +msgid "SOS_LABEL: VolSessId mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:538 +#, c-format +msgid "SOS_LABEL: VolSessTime mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:544 +#, c-format +msgid "SOS_LABEL: PoolId mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:562 src/stored/bscan.c:1013 +#, c-format +msgid "Could not find SessId=%d SessTime=%d for EOS record.\n" +msgstr "" + +#: src/stored/bscan.c:606 +#, c-format +msgid "Could not update job record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:617 +#, c-format +msgid "End of all Volumes. VolFiles=%u VolBlocks=%u VolBytes=%s\n" +msgstr "" + +#: src/stored/bscan.c:629 +#, c-format +msgid "Could not find Job for SessId=%d SessTime=%d record.\n" +msgstr "" + +#: src/stored/bscan.c:665 +#, c-format +msgid "%s file records. At file:blk=%s:%s bytes=%s\n" +msgstr "" + +#: src/stored/bscan.c:708 +#, c-format +msgid "Got MD5 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:717 +#, c-format +msgid "Got SHA1 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:725 +#, c-format +msgid "Got Prog Names Stream: %s\n" +msgstr "" + +#: src/stored/bscan.c:731 +msgid "Got Prog Data Stream record.\n" +msgstr "" + +#: src/stored/bscan.c:735 +#, c-format +msgid "Unknown stream type!!! stream=%d data=%s\n" +msgstr "" + +#: src/stored/bscan.c:799 +#, c-format +msgid "Could not create File Attributes record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:805 +#, c-format +msgid "Created File record: %s\n" +msgstr "" + +#: src/stored/bscan.c:842 +#, c-format +msgid "Could not create media record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:846 src/stored/bscan.c:867 +#, c-format +msgid "Could not update media record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:850 +#, c-format +msgid "Created Media record for Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:871 +#, c-format +msgid "Updated Media record at end of Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:888 +#, c-format +msgid "Could not create pool record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:892 +#, c-format +msgid "Created Pool record for Pool: %s\n" +msgstr "" + +#: src/stored/bscan.c:912 +#, c-format +msgid "Created Client record for Client: %s\n" +msgstr "" + +#: src/stored/bscan.c:929 +#, c-format +msgid "Fileset \"%s\" already exists.\n" +msgstr "" + +#: src/stored/bscan.c:933 +#, c-format +msgid "Could not create FileSet record \"%s\". ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:938 +#, c-format +msgid "Created FileSet record \"%s\"\n" +msgstr "" + +#: src/stored/bscan.c:985 +#, c-format +msgid "Could not create JobId record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:991 +#, c-format +msgid "Could not update job start record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:994 +#, c-format +msgid "Created new JobId=%u record for original JobId=%u\n" +msgstr "" + +#: src/stored/bscan.c:1044 +#, c-format +msgid "Could not update JobId=%u record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1049 +#, c-format +msgid "Updated Job termination record for JobId=%u TermStat=%c\n" +msgstr "" + +#: src/stored/bscan.c:1071 +#, c-format +msgid "Job Termination code: %d" +msgstr "" + +#: src/stored/bscan.c:1076 +#, c-format +msgid "" +"%s\n" +"JobId: %d\n" +"Job: %s\n" +"FileSet: %s\n" +"Backup Level: %s\n" +"Client: %s\n" +"Start time: %s\n" +"End time: %s\n" +"Files Written: %s\n" +"Bytes Written: %s\n" +"Volume Session Id: %d\n" +"Volume Session Time: %d\n" +"Last Volume Bytes: %s\n" +"Termination: %s\n" +"\n" +msgstr "" + +#: src/stored/bscan.c:1140 +#, c-format +msgid "Could not create JobMedia record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1144 +#, c-format +msgid "Created JobMedia record JobId %d, MediaId %d\n" +msgstr "" + +#: src/stored/bscan.c:1160 +#, c-format +msgid "Could not find SessId=%d SessTime=%d for MD5/SHA1 record.\n" +msgstr "" + +#: src/stored/bscan.c:1174 +#, c-format +msgid "Could not add MD5/SHA1 to File record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1179 +msgid "Updated MD5/SHA1 record\n" +msgstr "" + +#: src/stored/btape.c:157 src/stored/stored.c:107 +#, c-format +msgid "Tape block size (%d) not multiple of system size (%d)\n" +msgstr "" + +#: src/stored/btape.c:161 src/stored/stored.c:111 +#, c-format +msgid "Tape block size (%d) is not a power of 2\n" +msgstr "" + +#: src/stored/btape.c:164 +#, c-format +msgid "" +"\n" +"\n" +"!!!! Warning large disk addressing disabled. off_t=%d should be 8 or " +"more !!!!!\n" +"\n" +"\n" +msgstr "" + +#: src/stored/btape.c:171 +#, c-format +msgid "32 bit printf/scanf problem. i=%d x32=%u y32=%u\n" +msgstr "" + +#: src/stored/btape.c:180 +msgid "64 bit printf/scanf problem. i=%d x64=%" +msgstr "" + +#: src/stored/btape.c:180 +msgid " y64=%" +msgstr "" + +#: src/stored/btape.c:184 +#, c-format +msgid "Tape block granularity is %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:251 +msgid "No archive name specified.\n" +msgstr "" + +#: src/stored/btape.c:255 +msgid "Improper number of arguments specified.\n" +msgstr "" + +#: src/stored/btape.c:338 +#, c-format +msgid "open device %s: OK\n" +msgstr "" + +#: src/stored/btape.c:359 +msgid "Enter Volume Name: " +msgstr "" + +#: src/stored/btape.c:366 +#, c-format +msgid "Device open failed. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:371 +#, c-format +msgid "Wrote Volume label for volume \"%s\".\n" +msgstr "" + +#: src/stored/btape.c:385 +msgid "Volume has no label.\n" +msgstr "" + +#: src/stored/btape.c:388 +msgid "Volume label read correctly.\n" +msgstr "" + +#: src/stored/btape.c:391 +#, c-format +msgid "I/O error on device: ERR=%s" +msgstr "" + +#: src/stored/btape.c:394 +msgid "Volume name error\n" +msgstr "" + +#: src/stored/btape.c:397 +#, c-format +msgid "Error creating label. ERR=%s" +msgstr "" + +#: src/stored/btape.c:400 +msgid "Volume version error.\n" +msgstr "" + +#: src/stored/btape.c:403 +msgid "Bad Volume label type.\n" +msgstr "" + +#: src/stored/btape.c:406 +msgid "Unknown error.\n" +msgstr "" + +#: src/stored/btape.c:424 +#, c-format +msgid "Bad status from load. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:426 +#, c-format +msgid "Loaded %s\n" +msgstr "" + +#: src/stored/btape.c:435 src/stored/btape.c:775 src/stored/btape.c:817 +#: src/stored/btape.c:887 src/stored/btape.c:929 src/stored/btape.c:1198 +#, c-format +msgid "Bad status from rewind. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:438 src/stored/btape.c:1206 +#, c-format +msgid "Rewound %s\n" +msgstr "" + +#: src/stored/btape.c:465 src/stored/btape.c:1210 +#, c-format +msgid "Bad status from weof %d. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:469 +#, c-format +msgid "Wrote 1 EOF to %s\n" +msgstr "" + +#: src/stored/btape.c:472 +#, c-format +msgid "Wrote %d EOFs to %s\n" +msgstr "" + +#: src/stored/btape.c:490 +msgid "Moved to end of medium.\n" +msgstr "" + +#: src/stored/btape.c:517 +#, c-format +msgid "Bad status from bsf. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:519 +#, c-format +msgid "Backspaced %d file%s.\n" +msgstr "" + +#: src/stored/btape.c:536 +#, c-format +msgid "Bad status from bsr. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:538 +#, c-format +msgid "Backspaced %d record%s.\n" +msgstr "" + +#: src/stored/btape.c:548 src/stored/status.c:227 +#, c-format +msgid "Configured device capabilities:\n" +msgstr "" + +#: src/stored/btape.c:566 +#, c-format +msgid "Device status:\n" +msgstr "" + +#: src/stored/btape.c:580 src/stored/status.c:259 +#, c-format +msgid "Device parameters:\n" +msgstr "" + +#: src/stored/btape.c:585 +#, c-format +msgid "Status:\n" +msgstr "" + +#: src/stored/btape.c:600 +msgid "" +"Test writting larger and larger records.\n" +"This is a torture test for records.\n" +"I am going to write\n" +"larger and larger records. It will stop when the record size\n" +"plus the header exceeds the block size (by default about 64K)\n" +msgstr "" + +#: src/stored/btape.c:608 src/stored/btape.c:1815 +msgid "Command aborted.\n" +msgstr "" + +#: src/stored/btape.c:624 +#, c-format +msgid "Block %d i=%d\n" +msgstr "" + +#: src/stored/btape.c:650 +msgid "Skipping read backwards test because BSR turned off.\n" +msgstr "" + +#: src/stored/btape.c:654 +msgid "" +"\n" +"=== Write, backup, and re-read test ===\n" +"\n" +"I'm going to write three records and an EOF\n" +"then backup over the EOF and re-read the last record.\n" +"Bacula does this after writing the last block on the\n" +"tape to verify that the block was written correctly.\n" +"\n" +"This is not an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:667 src/stored/btape.c:678 src/stored/btape.c:689 +#: src/stored/btape.c:787 src/stored/btape.c:803 src/stored/btape.c:899 +#: src/stored/btape.c:915 src/stored/btape.c:1524 src/stored/btape.c:2407 +msgid "Error writing record to block.\n" +msgstr "" + +#: src/stored/btape.c:671 src/stored/btape.c:682 src/stored/btape.c:693 +#: src/stored/btape.c:791 src/stored/btape.c:807 src/stored/btape.c:903 +#: src/stored/btape.c:919 src/stored/btape.c:1528 src/stored/btape.c:2411 +msgid "Error writing block to device.\n" +msgstr "" + +#: src/stored/btape.c:674 +#, c-format +msgid "Wrote first record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:685 +#, c-format +msgid "Wrote second record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:696 +#, c-format +msgid "Wrote third record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:703 src/stored/btape.c:708 +#, c-format +msgid "Backspace file failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:712 +msgid "Backspaced over EOF OK.\n" +msgstr "" + +#: src/stored/btape.c:714 +#, c-format +msgid "Backspace record failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:717 +msgid "Backspace record OK.\n" +msgstr "" + +#: src/stored/btape.c:720 src/stored/btape.c:726 +#, c-format +msgid "Read block failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:731 +msgid "Bad data in record. Test failed!\n" +msgstr "" + +#: src/stored/btape.c:735 +msgid "" +"\n" +"Block re-read correct. Test succeeded!\n" +msgstr "" + +#: src/stored/btape.c:736 +msgid "" +"=== End Write, backup, and re-read test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:743 +msgid "" +"This is not terribly serious since Bacula only uses\n" +"this function to verify the last block written to the\n" +"tape. Bacula will skip the last block verification\n" +"if you add:\n" +"\n" +"Backward Space Record = No\n" +"\n" +"to your Storage daemon's Device resource definition.\n" +msgstr "" + +#: src/stored/btape.c:767 +msgid "" +"\n" +"=== Write, rewind, and re-read test ===\n" +"\n" +"I'm going to write 1000 records and an EOF\n" +"then write 1000 records and an EOF, then rewind,\n" +"and re-read the data to verify that it is correct.\n" +"\n" +"This is an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:795 src/stored/btape.c:811 src/stored/btape.c:907 +#: src/stored/btape.c:923 +#, c-format +msgid "Wrote 1000 blocks of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:820 src/stored/btape.c:932 +msgid "Rewind OK.\n" +msgstr "" + +#: src/stored/btape.c:827 src/stored/btape.c:981 +msgid "Got EOF on tape.\n" +msgstr "" + +#: src/stored/btape.c:832 +#, c-format +msgid "Read block %d failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:838 +#, c-format +msgid "Read record failed. Block %d! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:844 src/stored/btape.c:1011 +#, c-format +msgid "Bad data in record. Expected %d, got %d at byte %d. Test failed!\n" +msgstr "" + +#: src/stored/btape.c:851 +msgid "1000 blocks re-read correctly.\n" +msgstr "" + +#: src/stored/btape.c:854 src/stored/btape.c:1018 +msgid "" +"=== Test Succeeded. End Write, rewind, and re-read test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:879 +msgid "" +"\n" +"=== Write, rewind, and position test ===\n" +"\n" +"I'm going to write 1000 records and an EOF\n" +"then write 1000 records and an EOF, then rewind,\n" +"and position to a few blocks and verify that it is correct.\n" +"\n" +"This is an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:972 +#, c-format +msgid "Reposition to file:block %d:%d\n" +msgstr "" + +#: src/stored/btape.c:974 +msgid "Reposition error.\n" +msgstr "" + +#: src/stored/btape.c:987 +#, c-format +msgid "" +"Read block %d failed! file=%d blk=%d. ERR=%s\n" +"\n" +msgstr "" + +#: src/stored/btape.c:989 +msgid "" +"This may be because the tape drive block size is not\n" +" set to variable blocking as normally used by Bacula.\n" +" Please see the Tape Testing chapter in the manual and \n" +" look for using mt with defblksize and setoptions\n" +"If your tape drive block size is correct, then perhaps\n" +" your SCSI driver is *really* stupid and does not\n" +" correctly report the file:block after a FSF. In this\n" +" case try setting:\n" +" Fast Forward Space File = no\n" +" in your Device resource.\n" +msgstr "" + +#: src/stored/btape.c:1005 +#, c-format +msgid "Read record failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1016 +#, c-format +msgid "Block %d re-read correctly.\n" +msgstr "" + +#: src/stored/btape.c:1037 +msgid "" +"\n" +"\n" +"=== Append files test ===\n" +"\n" +"This test is essential to Bacula.\n" +"\n" +"I'm going to write one record in file 0,\n" +" two records in file 1,\n" +" and three records in file 2\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1061 +msgid "Now moving to end of medium.\n" +msgstr "" + +#: src/stored/btape.c:1063 src/stored/btape.c:1293 +#, c-format +msgid "We should be in file 3. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1064 src/stored/btape.c:1082 src/stored/btape.c:1282 +#: src/stored/btape.c:1294 src/stored/btape.c:1307 src/stored/btape.c:1324 +msgid "This is correct!" +msgstr "" + +#: src/stored/btape.c:1064 src/stored/btape.c:1082 src/stored/btape.c:1282 +#: src/stored/btape.c:1294 src/stored/btape.c:1307 src/stored/btape.c:1324 +msgid "This is NOT correct!!!!" +msgstr "" + +#: src/stored/btape.c:1070 +msgid "" +"\n" +"Now the important part, I am going to attempt to append to the tape.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1077 +msgid "" +"Done appending, there should be no I/O errors\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1078 +msgid "Doing Bacula scan of blocks:\n" +msgstr "" + +#: src/stored/btape.c:1080 +msgid "End scanning the tape.\n" +msgstr "" + +#: src/stored/btape.c:1081 src/stored/btape.c:1306 +#, c-format +msgid "We should be in file 4. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1106 +msgid "" +"\n" +"Autochanger enabled, but no name or no command device specified.\n" +msgstr "" + +#: src/stored/btape.c:1110 +msgid "" +"\n" +"Ah, I see you have an autochanger configured.\n" +"To test the autochanger you must have a blank tape\n" +" that I can write on in Slot 1.\n" +msgstr "" + +#: src/stored/btape.c:1113 +msgid "" +"\n" +"Do you wish to continue with the Autochanger test? (y/n): " +msgstr "" + +#: src/stored/btape.c:1120 +msgid "" +"\n" +"\n" +"=== Autochanger test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1129 +msgid "3301 Issuing autochanger \"loaded\" command.\n" +msgstr "" + +#: src/stored/btape.c:1138 +#, c-format +msgid "3991 Bad autochanger command: %s\n" +msgstr "" + +#: src/stored/btape.c:1139 +#, c-format +msgid "3991 result=\"%s\": ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1143 +#, c-format +msgid "Slot %d loaded. I am going to unload it.\n" +msgstr "" + +#: src/stored/btape.c:1145 +msgid "Nothing loaded in the drive. OK.\n" +msgstr "" + +#: src/stored/btape.c:1153 +#, c-format +msgid "3302 Issuing autochanger \"unload %d %d\" command.\n" +msgstr "" + +#: src/stored/btape.c:1158 +#, c-format +msgid "unload status=%s %d\n" +msgstr "" + +#: src/stored/btape.c:1158 +msgid "Bad" +msgstr "" + +#: src/stored/btape.c:1161 +#, c-format +msgid "3992 Bad autochanger command: %s\n" +msgstr "" + +#: src/stored/btape.c:1162 +#, c-format +msgid "3992 result=\"%s\": ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1172 +#, c-format +msgid "3303 Issuing autochanger \"load %d %d\" command.\n" +msgstr "" + +#: src/stored/btape.c:1180 +#, c-format +msgid "3303 Autochanger \"load %d %d\" status is OK.\n" +msgstr "" + +#: src/stored/btape.c:1184 +#, c-format +msgid "3993 Bad autochanger command: %s\n" +msgstr "" + +#: src/stored/btape.c:1185 +#, c-format +msgid "3993 result=\"%s\": ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1200 +msgid "" +"\n" +"The test failed, probably because you need to put\n" +"a longer sleep time in the mtx-script in the load) case.\n" +"Adding a 30 second sleep and trying again ...\n" +msgstr "" + +#: src/stored/btape.c:1213 +#, c-format +msgid "Wrote EOF to %s\n" +msgstr "" + +#: src/stored/btape.c:1217 +#, c-format +msgid "" +"\n" +"The test worked this time. Please add:\n" +"\n" +" sleep %d\n" +"\n" +"to your mtx-changer script in the load) case.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1222 +msgid "" +"\n" +"The test autochanger worked!!\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1233 +msgid "You must correct this error or the Autochanger will not work.\n" +msgstr "" + +#: src/stored/btape.c:1251 +msgid "" +"\n" +"\n" +"=== Forward space files test ===\n" +"\n" +"This test is essential to Bacula.\n" +"\n" +"I'm going to write five files then test forward spacing\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1276 +msgid "Now forward spacing 1 file.\n" +msgstr "" + +#: src/stored/btape.c:1278 src/stored/btape.c:1290 src/stored/btape.c:1303 +#: src/stored/btape.c:1321 src/stored/btape.c:1490 +#, c-format +msgid "Bad status from fsr. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1281 +#, c-format +msgid "We should be in file 1. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1288 +msgid "Now forward spacing 2 files.\n" +msgstr "" + +#: src/stored/btape.c:1301 +msgid "Now forward spacing 4 files.\n" +msgstr "" + +#: src/stored/btape.c:1313 +msgid "" +"The test worked this time. Please add:\n" +"\n" +" Fast Forward Space File = no\n" +"\n" +"to your Device resource for this drive.\n" +msgstr "" + +#: src/stored/btape.c:1319 +msgid "Now forward spacing 1 more file.\n" +msgstr "" + +#: src/stored/btape.c:1323 +#, c-format +msgid "We should be in file 5. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1328 +msgid "" +"\n" +"=== End Forward space files test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1332 +msgid "" +"\n" +"The forward space file test failed.\n" +msgstr "" + +#: src/stored/btape.c:1334 +msgid "" +"You have Fast Forward Space File enabled.\n" +"I am turning it off then retrying the test.\n" +msgstr "" + +#: src/stored/btape.c:1340 +msgid "" +"You must correct this error or Bacula will not work.\n" +"Some systems, e.g. OpenBSD, require you to set\n" +" Use MTIOCGET= no\n" +"in your device resource. Use with caution.\n" +msgstr "" + +#: src/stored/btape.c:1372 +msgid "" +"\n" +"Append test failed. Attempting again.\n" +"Setting \"Hardware End of Medium = no\n" +" and \"Fast Forward Space File = no\n" +"and retrying append test.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1380 +msgid "" +"\n" +"\n" +"It looks like the test worked this time, please add:\n" +"\n" +" Hardware End of Medium = No\n" +"\n" +" Fast Forward Space File = No\n" +"to your Device resource in the Storage conf file.\n" +msgstr "" + +#: src/stored/btape.c:1387 +msgid "" +"\n" +"\n" +"That appears *NOT* to have corrected the problem.\n" +msgstr "" + +#: src/stored/btape.c:1392 +msgid "" +"\n" +"\n" +"It looks like the append failed. Attempting again.\n" +"Setting \"BSF at EOM = yes\" and retrying append test.\n" +msgstr "" + +#: src/stored/btape.c:1397 +msgid "" +"\n" +"\n" +"It looks like the test worked this time, please add:\n" +"\n" +" Hardware End of Medium = No\n" +" Fast Forward Space File = No\n" +" BSF at EOM = yes\n" +"\n" +"to your Device resource in the Storage conf file.\n" +msgstr "" + +#: src/stored/btape.c:1408 +msgid "" +"\n" +"Append test failed.\n" +"\n" +"\n" +"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +"Unable to correct the problem. You MUST fix this\n" +"problem before Bacula can use your tape drive correctly\n" +"\n" +"Perhaps running Bacula in fixed block mode will work.\n" +"Do so by setting:\n" +"\n" +"Minimum Block Size = nnn\n" +"Maximum Block Size = nnn\n" +"\n" +"in your Storage daemon's Device definition.\n" +"nnn must match your tape driver's block size, which\n" +"can be determined by reading your tape manufacturers\n" +"information, and the information on your kernel dirver.\n" +"Fixed block sizes, however, are not normally an ideal solution.\n" +"\n" +"Some systems, e.g. OpenBSD, require you to set\n" +" Use MTIOCGET= no\n" +"in your device resource. Use with caution.\n" +msgstr "" + +#: src/stored/btape.c:1429 +msgid "" +"\n" +"The above Bacula scan should have output identical to what follows.\n" +"Please double check it ...\n" +"=== Sample correct output ===\n" +"1 block of 64448 bytes in file 1\n" +"End of File mark.\n" +"2 blocks of 64448 bytes in file 2\n" +"End of File mark.\n" +"3 blocks of 64448 bytes in file 3\n" +"End of File mark.\n" +"1 block of 64448 bytes in file 4\n" +"End of File mark.\n" +"Total files=4, blocks=7, bytes = 451,136\n" +"=== End sample correct output ===\n" +"\n" +"If the above scan output is not identical to the\n" +"sample output, you MUST correct the problem\n" +"or Bacula will not be able to write multiple Jobs to \n" +"the tape.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1468 +#, c-format +msgid "Bad status from fsf. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1472 +msgid "Forward spaced 1 file.\n" +msgstr "" + +#: src/stored/btape.c:1475 +#, c-format +msgid "Forward spaced %d files.\n" +msgstr "" + +#: src/stored/btape.c:1494 +msgid "Forward spaced 1 record.\n" +msgstr "" + +#: src/stored/btape.c:1497 +#, c-format +msgid "Forward spaced %d records.\n" +msgstr "" + +#: src/stored/btape.c:1531 +#, c-format +msgid "Wrote one record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:1533 +msgid "Wrote block to device.\n" +msgstr "" + +#: src/stored/btape.c:1548 +msgid "Enter length to read: " +msgstr "" + +#: src/stored/btape.c:1553 +msgid "Bad length entered, using default of 1024 bytes.\n" +msgstr "" + +#: src/stored/btape.c:1562 +#, c-format +msgid "Read of %d bytes gives stat=%d. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1585 src/stored/btape.c:1634 +#, c-format +msgid "End of tape\n" +msgstr "" + +#: src/stored/btape.c:1590 +#, c-format +msgid "Starting scan at file %u\n" +msgstr "" + +#: src/stored/btape.c:1595 src/stored/dev.c:1229 +#, c-format +msgid "read error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/btape.c:1597 +#, c-format +msgid "Bad status from read %d. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1600 src/stored/btape.c:1614 src/stored/btape.c:1678 +#: src/stored/btape.c:1690 src/stored/btape.c:1703 src/stored/btape.c:1719 +#, c-format +msgid "1 block of %d bytes in file %d\n" +msgstr "" + +#: src/stored/btape.c:1603 src/stored/btape.c:1617 src/stored/btape.c:1681 +#: src/stored/btape.c:1693 src/stored/btape.c:1706 src/stored/btape.c:1722 +#, c-format +msgid "%d blocks of %d bytes in file %d\n" +msgstr "" + +#: src/stored/btape.c:1625 src/stored/btape.c:1697 +#, c-format +msgid "End of File mark.\n" +msgstr "" + +#: src/stored/btape.c:1646 src/stored/btape.c:1750 +#, c-format +msgid "Total files=%d, blocks=%d, bytes = %s\n" +msgstr "" + +#: src/stored/btape.c:1710 +#, c-format +msgid "Short block read.\n" +msgstr "" + +#: src/stored/btape.c:1713 +#, c-format +msgid "Error reading block. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1737 +#, c-format +msgid "" +"Blk_block: %u dev_blk=%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%" +"s rlen=%d\n" +msgstr "" + +#: src/stored/btape.c:1759 +#, c-format +msgid "Device status: %u. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1789 +msgid "" +"\n" +"This command simulates Bacula writing to a tape.\n" +"It requires either one or two blank tapes, which it\n" +"will label and write.\n" +"\n" +"If you have an autochanger configured, it will use\n" +"the tapes that are in slots 1 and 2, otherwise, you will\n" +"be prompted to insert the tapes when necessary.\n" +"\n" +"It will print a status approximately\n" +"every 322 MB, and write an EOF every 3.2 GB. If you have\n" +"selected the simple test option, after writing the first tape\n" +"it will rewind it and re-read the last block written.\n" +"\n" +"If you have selected the multiple tape test, when the first tape\n" +"fills, it will ask for a second, and after writing a few more \n" +"blocks, it will stop. Then it will begin re-reading the\n" +"two tapes.\n" +"\n" +"This may take a long time -- hours! ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1806 +msgid "" +"Do you want to run the simplified test (s) with one tape\n" +"or the complete multiple tape (m) test: (s/m) " +msgstr "" + +#: src/stored/btape.c:1809 +msgid "Simple test (single tape) selected.\n" +msgstr "" + +#: src/stored/btape.c:1812 +msgid "Multiple tape test selected.\n" +msgstr "" + +#: src/stored/btape.c:1828 +msgid "Rewind failed.\n" +msgstr "" + +#: src/stored/btape.c:1831 +msgid "Write EOF failed.\n" +msgstr "" + +#: src/stored/btape.c:1858 +msgid "Wrote Start of Session label.\n" +msgstr "" + +#: src/stored/btape.c:1889 +#, c-format +msgid "%s Begin writing Bacula records to tape ...\n" +msgstr "" + +#: src/stored/btape.c:1891 +#, c-format +msgid "%s Begin writing Bacula records to first tape ...\n" +msgstr "" + +#: src/stored/btape.c:1932 +#, c-format +msgid "Wrote blk_block=%u, dev_blk_num=%u VolBytes=%s rate=%.1f KB/s\n" +msgstr "" + +#: src/stored/btape.c:1942 +#, c-format +msgid "%s Flush block, write EOF\n" +msgstr "" + +#: src/stored/btape.c:1953 +msgid "Not OK\n" +msgstr "" + +#: src/stored/btape.c:1981 +msgid "Set ok=false after write_block_to_device.\n" +msgstr "" + +#: src/stored/btape.c:1984 +msgid "Wrote End of Session label.\n" +msgstr "" + +#: src/stored/btape.c:2008 +#, c-format +msgid "Wrote state file last_block_num1=%d last_block_num2=%d\n" +msgstr "" + +#: src/stored/btape.c:2012 +#, c-format +msgid "Could not create state file: %s ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2020 +#, c-format +msgid "" +"\n" +"\n" +"%s Done filling tape at %d:%d. Now beginning re-read of tape ...\n" +msgstr "" + +#: src/stored/btape.c:2024 +#, c-format +msgid "" +"\n" +"\n" +"%s Done filling tapes at %d:%d. Now beginning re-read of first tape ...\n" +msgstr "" + +#: src/stored/btape.c:2063 +msgid "" +"\n" +"The state file level has changed. You must redo\n" +"the fill command.\n" +msgstr "" + +#: src/stored/btape.c:2069 +#, c-format +msgid "" +"\n" +"Could not find the state file: %s ERR=%s\n" +"You must redo the fill command.\n" +msgstr "" + +#: src/stored/btape.c:2111 +msgid "Mount first tape. Press enter when ready: " +msgstr "" + +#: src/stored/btape.c:2131 +msgid "Rewinding.\n" +msgstr "" + +#: src/stored/btape.c:2136 +#, c-format +msgid "Reading the first 10000 records from %u:%u.\n" +msgstr "" + +#: src/stored/btape.c:2140 src/stored/btape.c:2207 +#, c-format +msgid "Reposition from %u:%u to %u:%u\n" +msgstr "" + +#: src/stored/btape.c:2143 src/stored/btape.c:2194 src/stored/btape.c:2210 +#, c-format +msgid "Reposition error. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2146 +#, c-format +msgid "Reading block %u.\n" +msgstr "" + +#: src/stored/btape.c:2148 src/stored/btape.c:2199 src/stored/btape.c:2215 +#, c-format +msgid "Error reading block: ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2153 +msgid "" +"\n" +"The last block on the tape matches. Test succeeded.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2155 +msgid "" +"\n" +"The last block of the first tape matches.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2180 +msgid "Mount second tape. Press enter when ready: " +msgstr "" + +#: src/stored/btape.c:2192 +#, c-format +msgid "Reposition from %u:%u to 0:1\n" +msgstr "" + +#: src/stored/btape.c:2197 src/stored/btape.c:2213 +#, c-format +msgid "Reading block %d.\n" +msgstr "" + +#: src/stored/btape.c:2203 +msgid "" +"\n" +"The first block on the second tape matches.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2219 +msgid "" +"\n" +"The last block on the second tape matches. Test succeeded.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2234 +#, c-format +msgid "10000 records read now at %d:%d\n" +msgstr "" + +#: src/stored/btape.c:2257 src/stored/btape.c:2268 src/stored/btape.c:2316 +msgid "Last block written" +msgstr "" + +#: src/stored/btape.c:2259 src/stored/btape.c:2269 +msgid "Block read back" +msgstr "" + +#: src/stored/btape.c:2260 +#, c-format +msgid "" +"\n" +"\n" +"The blocks differ at byte %u\n" +msgstr "" + +#: src/stored/btape.c:2261 +msgid "" +"\n" +"\n" +"!!!! The last block written and the block\n" +"that was read back differ. The test FAILED !!!!\n" +"This must be corrected before you use Bacula\n" +"to write multi-tape Volumes.!!!!\n" +msgstr "" + +#: src/stored/btape.c:2300 +#, c-format +msgid "Last block at: %u:%u this_dev_block_num=%d\n" +msgstr "" + +#: src/stored/btape.c:2314 +#, c-format +msgid "Block not written: FileIndex=%u blk_block=%u Size=%u\n" +msgstr "" + +#: src/stored/btape.c:2318 +msgid "Block not written" +msgstr "" + +#: src/stored/btape.c:2333 +#, c-format +msgid "End of tape %d:%d. VolumeCapacity=%s. Write rate = %.1f KB/s\n" +msgstr "" + +#: src/stored/btape.c:2383 +msgid "Test writing blocks of 64512 bytes to tape.\n" +msgstr "" + +#: src/stored/btape.c:2385 +msgid "How many blocks do you want to write? (1000): " +msgstr "" + +#: src/stored/btape.c:2400 +#, c-format +msgid "Begin writing %d Bacula blocks to tape ...\n" +msgstr "" + +#: src/stored/btape.c:2452 +#, c-format +msgid "Begin writing raw blocks of %u bytes.\n" +msgstr "" + +#: src/stored/btape.c:2472 +#, c-format +msgid "Write failed at block %u. stat=%d ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2502 +#, c-format +msgid "Begin writing Bacula blocks of %u bytes.\n" +msgstr "" + +#: src/stored/btape.c:2521 +#, c-format +msgid "Write failed at block %u.\n" +msgstr "" + +#: src/stored/btape.c:2528 +msgid "test autochanger" +msgstr "" + +#: src/stored/btape.c:2529 +msgid "backspace file" +msgstr "" + +#: src/stored/btape.c:2530 +msgid "backspace record" +msgstr "" + +#: src/stored/btape.c:2531 +msgid "fill tape using Bacula writes" +msgstr "" + +#: src/stored/btape.c:2532 +msgid "list device capabilities" +msgstr "" + +#: src/stored/btape.c:2533 +msgid "clear tape errors" +msgstr "" + +#: src/stored/btape.c:2534 +msgid "go to end of Bacula data for append" +msgstr "" + +#: src/stored/btape.c:2535 +msgid "go to the physical end of medium" +msgstr "" + +#: src/stored/btape.c:2536 +msgid "fill tape, write onto second volume" +msgstr "" + +#: src/stored/btape.c:2537 +msgid "read filled tape" +msgstr "" + +#: src/stored/btape.c:2538 +msgid "forward space a file" +msgstr "" + +#: src/stored/btape.c:2539 +msgid "forward space a record" +msgstr "" + +#: src/stored/btape.c:2541 +msgid "write a Bacula label to the tape" +msgstr "" + +#: src/stored/btape.c:2542 +msgid "load a tape" +msgstr "" + +#: src/stored/btape.c:2543 +msgid "quit btape" +msgstr "" + +#: src/stored/btape.c:2544 +msgid "use write() to fill tape" +msgstr "" + +#: src/stored/btape.c:2545 +msgid "read and print the Bacula tape label" +msgstr "" + +#: src/stored/btape.c:2546 +msgid "test record handling functions" +msgstr "" + +#: src/stored/btape.c:2547 +msgid "rewind the tape" +msgstr "" + +#: src/stored/btape.c:2548 +msgid "read() tape block by block to EOT and report" +msgstr "" + +#: src/stored/btape.c:2549 +msgid "Bacula read block by block to EOT and report" +msgstr "" + +#: src/stored/btape.c:2550 +msgid "print tape status" +msgstr "" + +#: src/stored/btape.c:2551 +msgid "General test Bacula tape functions" +msgstr "" + +#: src/stored/btape.c:2552 +msgid "write an EOF on the tape" +msgstr "" + +#: src/stored/btape.c:2553 +msgid "write a single Bacula block" +msgstr "" + +#: src/stored/btape.c:2554 +msgid "read a single record" +msgstr "" + +#: src/stored/btape.c:2555 +msgid "quick fill command" +msgstr "" + +#: src/stored/btape.c:2576 +#, c-format +msgid "%s is an illegal command\n" +msgstr "" + +#: src/stored/btape.c:2586 +#, c-format +msgid "Interactive commands:\n" +msgstr "" + +#: src/stored/btape.c:2596 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: btape \n" +" -b specify bootstrap file\n" +" -c set configuration file to file\n" +" -d set debug level to nn\n" +" -p proceed inspite of I/O errors\n" +" -s turn off signals\n" +" -v be verbose\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2687 +#, c-format +msgid "Mount second Volume on device %s and press return when ready: " +msgstr "" + +#: src/stored/btape.c:2714 +#, c-format +msgid "Mount blank Volume on device %s and press return when ready: " +msgstr "" + +#: src/stored/btape.c:2732 +#, c-format +msgid "End of Volume \"%s\"\n" +msgstr "" + +#: src/stored/btape.c:2744 +#, c-format +msgid "Read block=%u, VolBytes=%s rate=%.1f KB/s\n" +msgstr "" + +#: src/stored/btape.c:2759 src/stored/mount.c:500 +#, c-format +msgid "Cannot open Dev=%s, Vol=%s\n" +msgstr "" + +#: src/stored/butil.c:46 +msgid "Nohdr," +msgstr "" + +#: src/stored/butil.c:49 +msgid "partial," +msgstr "" + +#: src/stored/butil.c:52 +msgid "empty," +msgstr "" + +#: src/stored/butil.c:55 +msgid "Nomatch," +msgstr "" + +#: src/stored/butil.c:58 +msgid "cont," +msgstr "" + +#: src/stored/butil.c:130 +msgid "Volume name or names is too long. Please use a .bsr file.\n" +msgstr "" + +#: src/stored/butil.c:150 +#, c-format +msgid "Cannot find device \"%s\" in config file %s.\n" +msgstr "" + +#: src/stored/butil.c:157 +#, c-format +msgid "Cannot init device %s\n" +msgstr "" + +#: src/stored/butil.c:178 +#, c-format +msgid "Cannot open %s\n" +msgstr "" + +#: src/stored/butil.c:261 +#, c-format +msgid "Could not find device \"%s\" in config file %s.\n" +msgstr "" + +#: src/stored/butil.c:266 +#, c-format +msgid "Using device: \"%s\" for reading.\n" +msgstr "" + +#: src/stored/butil.c:269 +#, c-format +msgid "Using device: \"%s\" for writing.\n" +msgstr "" + +#: src/stored/butil.c:285 +msgid "Unexpected End of Data\n" +msgstr "" + +#: src/stored/butil.c:287 +msgid "Unexpected End of Tape\n" +msgstr "" + +#: src/stored/butil.c:289 +msgid "Unexpected End of File\n" +msgstr "" + +#: src/stored/butil.c:291 +msgid "Tape Door is Open\n" +msgstr "" + +#: src/stored/butil.c:293 +msgid "Unexpected Tape is Off-line\n" +msgstr "" + +#: src/stored/dev.c:112 +#, c-format +msgid "Unable to stat device %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:123 +#, c-format +msgid "" +"%s is an unknown device type. Must be tape or directory\n" +" or have RequiresMount=yes for DVD. st_mode=%x\n" +msgstr "" + +#: src/stored/dev.c:179 +#, c-format +msgid "Unable to stat mount point %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:186 +msgid "" +"Mount and unmount commands must defined for a device which requires mount.\n" +msgstr "" + +#: src/stored/dev.c:189 +msgid "Write part command must be defined for a device which requires mount.\n" +msgstr "" + +#: src/stored/dev.c:194 +#, c-format +msgid "Block size %u on device %s is too large, using default %u\n" +msgstr "" + +#: src/stored/dev.c:199 +#, c-format +msgid "Max block size %u not multiple of device %s block size.\n" +msgstr "" + +#: src/stored/dev.c:215 src/stored/dev.c:221 +#, c-format +msgid "Unable to init cond variable: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:312 +msgid "Illegal mode given to open dev.\n" +msgstr "" + +#: src/stored/dev.c:355 src/stored/device.c:295 +#, c-format +msgid "Unable to open device %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:423 src/stored/dev.c:481 +#, c-format +msgid "Could not open file device %s. No Volume name given.\n" +msgstr "" + +#: src/stored/dev.c:449 src/stored/dev.c:562 +#, c-format +msgid "Could not open: %s, ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:506 +#, c-format +msgid "" +"The media in the device %s is not empty, please blank it before writing " +"anything to it.\n" +msgstr "" + +#: src/stored/dev.c:523 +#, c-format +msgid "There is no valid media in the device %s.\n" +msgstr "" + +#: src/stored/dev.c:530 +#, c-format +msgid "Could not mount device %s.\n" +msgstr "" + +#: src/stored/dev.c:588 +#, c-format +msgid "Could not fstat: %s, ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:631 +#, c-format +msgid "Bad call to rewind. Device %s not open\n" +msgstr "" + +#: src/stored/dev.c:677 +#, c-format +msgid "Rewind error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:687 src/stored/dev.c:802 src/stored/dev.c:938 +#: src/stored/dev.c:1457 +#, c-format +msgid "lseek_dev error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:727 +msgid "unknown blocked code" +msgstr "" + +#: src/stored/dev.c:772 +#, c-format +msgid "Bad call to eod_dev. Device %s not open\n" +msgstr "" + +#: src/stored/dev.c:838 +#, c-format +msgid "ioctl MTEOM error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:846 src/stored/dev.c:978 +#, c-format +msgid "ioctl MTIOCGET error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:924 +msgid "Bad device call. Device not open\n" +msgstr "" + +#: src/stored/dev.c:937 +#, c-format +msgid "Seek error: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:973 +msgid " Bacula status:" +msgstr "" + +#: src/stored/dev.c:974 src/stored/dev.c:1023 src/stored/dev.c:1025 +#, c-format +msgid " file=%d block=%d\n" +msgstr "" + +#: src/stored/dev.c:982 +msgid " Device status:" +msgstr "" + +#: src/stored/dev.c:1047 +msgid "Bad call to load_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1058 src/stored/dev.c:1071 +#, c-format +msgid "ioctl MTLOAD error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1107 +#, c-format +msgid "ioctl MTOFFL error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1148 +msgid "Bad call to fsf_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1158 src/stored/dev.c:1279 +#, c-format +msgid "Device %s at End of Tape.\n" +msgstr "" + +#: src/stored/dev.c:1183 src/stored/dev.c:1259 +#, c-format +msgid "ioctl MTFSF error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1308 +msgid "Bad call to bsf_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1314 +#, c-format +msgid "Device %s cannot BSF because it is not a tape.\n" +msgstr "" + +#: src/stored/dev.c:1329 +#, c-format +msgid "ioctl MTBSF error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1349 +msgid "Bad call to fsr. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1358 +#, c-format +msgid "ioctl MTFSR not permitted on %s.\n" +msgstr "" + +#: src/stored/dev.c:1386 +#, c-format +msgid "ioctl MTFSR %d error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1406 +msgid "Bad call to bsr_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1416 +#, c-format +msgid "ioctl MTBSR not permitted on %s.\n" +msgstr "" + +#: src/stored/dev.c:1429 +#, c-format +msgid "ioctl MTBSR error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1446 +msgid "Bad call to reposition_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1514 +msgid "Bad call to weof_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1524 +msgid "Attempt to WEOF on non-appendable Volume\n" +msgstr "" + +#: src/stored/dev.c:1541 +#, c-format +msgid "ioctl MTWEOF error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1583 +msgid "Got ENOTTY on read/write!\n" +msgstr "" + +#: src/stored/dev.c:1630 +#, c-format +msgid "unknown func code %d" +msgstr "" + +#: src/stored/dev.c:1636 +#, c-format +msgid "I/O function \"%s\" not supported on this device.\n" +msgstr "" + +#: src/stored/dev.c:1761 src/stored/dvd.c:734 +#, c-format +msgid "Unable to truncate device %s. ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:1800 +msgid "Bad call to term_dev. Device not open\n" +msgstr "" + +#: src/stored/device.c:100 +#, c-format +msgid "End of medium on Volume \"%s\" Bytes=%s Blocks=%s at %s.\n" +msgstr "" + +#: src/stored/device.c:114 +#, c-format +msgid "New volume \"%s\" mounted on device %s at %s.\n" +msgstr "" + +#: src/stored/device.c:126 +#, c-format +msgid "write_block_to_device Volume label failed. ERR=%s" +msgstr "" + +#: src/stored/device.c:162 +#, c-format +msgid "write_block_to_device overflow block failed. ERR=%s" +msgstr "" + +#: src/stored/device.c:297 +#, c-format +msgid "Unable to open archive %s: ERR=%s\n" +msgstr "" + +#: src/stored/device.c:332 +#, c-format +msgid "Device write lock failure. ERR=%s\n" +msgstr "" + +#: src/stored/device.c:340 +#, c-format +msgid "Device write unlock failure. ERR=%s\n" +msgstr "" + +#: src/stored/device.c:359 +#, c-format +msgid "pthread_cond_wait failure. ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:140 +msgid "Connection request failed.\n" +msgstr "" + +#: src/stored/dircmd.c:149 +#, c-format +msgid "Invalid connection. Len=%d\n" +msgstr "" + +#: src/stored/dircmd.c:241 +#, c-format +msgid "3991 Bad setdebug command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:262 +#, c-format +msgid "3904 Job %s not found.\n" +msgstr "" + +#: src/stored/dircmd.c:283 +#, c-format +msgid "3000 Job %s marked to be canceled.\n" +msgstr "" + +#: src/stored/dircmd.c:287 +msgid "3903 Error scanning cancel command.\n" +msgstr "" + +#: src/stored/dircmd.c:364 src/stored/dircmd.c:680 src/stored/dircmd.c:756 +#: src/stored/dircmd.c:819 src/stored/dircmd.c:874 src/stored/dircmd.c:918 +#, c-format +msgid "3999 Device \"%s\" not found or could not be opened.\n" +msgstr "" + +#: src/stored/dircmd.c:369 +#, c-format +msgid "3903 Error scanning label command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:412 +#, c-format +msgid "3920 Cannot label Volume because it is already labeled: \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:419 +msgid "3921 Wrong volume mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:423 +msgid "3922 Cannot relabel an ANSI/IBM labeled Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:431 src/stored/dircmd.c:440 +#, c-format +msgid "3912 Failed to label Volume: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:443 +#, c-format +msgid "3913 Cannot label Volume. Unknown status %d from read_volume_label()\n" +msgstr "" + +#: src/stored/dircmd.c:476 +#, c-format +msgid "3001 Mounted Volume: %s\n" +msgstr "" + +#: src/stored/dircmd.c:480 src/stored/dircmd.c:954 +#, c-format +msgid "" +"3902 Cannot mount Volume on Storage Device %s because:\n" +"%s" +msgstr "" + +#: src/stored/dircmd.c:508 src/stored/reserve.c:634 +#, c-format +msgid "" +"\n" +" Device \"%s\" requested by DIR could not be opened or does not exist.\n" +msgstr "" + +#: src/stored/dircmd.c:529 src/stored/reserve.c:630 +#, c-format +msgid "" +"\n" +" Device \"%s\" in changer \"%s\" requested by DIR could not be opened or " +"does not exist.\n" +msgstr "" + +#: src/stored/dircmd.c:593 src/stored/dircmd.c:646 +#, c-format +msgid "3901 open device failed: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:613 src/stored/dircmd.c:637 +#, c-format +msgid "3001 Device %s is mounted with Volume \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:616 src/stored/dircmd.c:640 src/stored/dircmd.c:655 +#, c-format +msgid "" +"3905 Device %s open but no Bacula volume is mounted.\n" +"If this is not a blank tape, try unmounting and remounting the Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:625 +#, c-format +msgid "3001 Device %s is doing acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:630 src/stored/dircmd.c:730 +#, c-format +msgid "3903 Device %s is being labeled.\n" +msgstr "" + +#: src/stored/dircmd.c:652 +#, c-format +msgid "3001 Device %s is already mounted with Volume \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:661 +#, c-format +msgid "3002 Device %s is mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:664 +#, c-format +msgid "3907 %s" +msgstr "" + +#: src/stored/dircmd.c:667 +#, c-format +msgid "3906 File device %s is always mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:673 +#, c-format +msgid "3905 Bizarre wait state %d\n" +msgstr "" + +#: src/stored/dircmd.c:684 +#, c-format +msgid "3909 Error scanning mount command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:711 +#, c-format +msgid "3901 Device %s is already unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:722 +#, c-format +msgid "3001 Device %s unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:726 +#, c-format +msgid "3902 Device %s is busy in acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:749 +#, c-format +msgid "3002 Device %s unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:761 +#, c-format +msgid "3907 Error scanning unmount command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:789 +#, c-format +msgid "3921 Device %s already released.\n" +msgstr "" + +#: src/stored/dircmd.c:796 +#, c-format +msgid "3922 Device %s waiting for mount.\n" +msgstr "" + +#: src/stored/dircmd.c:800 +#, c-format +msgid "3923 Device %s is busy in acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:804 +#, c-format +msgid "3914 Device %s is being labeled.\n" +msgstr "" + +#: src/stored/dircmd.c:812 +#, c-format +msgid "3022 Device %s released.\n" +msgstr "" + +#: src/stored/dircmd.c:824 +#, c-format +msgid "3927 Error scanning release command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:860 +#, c-format +msgid "3995 Device %s is not an autochanger.\n" +msgstr "" + +#: src/stored/dircmd.c:878 +#, c-format +msgid "3908 Error scanning autocharger drives/list/slots command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:922 +#, c-format +msgid "3909 Error scanning readlabel command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:950 +#, c-format +msgid "3001 Volume=%s Slot=%d\n" +msgstr "" + +#: src/stored/dircmd.c:979 +#, c-format +msgid "3910 Unable to open device %s: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:991 +#, c-format +msgid "3931 Device %s is BLOCKED. user unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:995 +#, c-format +msgid "" +"3932 Device %s is BLOCKED. user unmounted during wait for media/mount.\n" +msgstr "" + +#: src/stored/dircmd.c:999 +#, c-format +msgid "3933 Device %s is BLOCKED waiting for media.\n" +msgstr "" + +#: src/stored/dircmd.c:1003 +#, c-format +msgid "3934 Device %s is being initialized.\n" +msgstr "" + +#: src/stored/dircmd.c:1007 +#, c-format +msgid "3935 Device %s is blocked labeling a Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:1011 +#, c-format +msgid "3935 Device %s is blocked for unknown reason.\n" +msgstr "" + +#: src/stored/dircmd.c:1016 +#, c-format +msgid "3936 Device %s is busy reading.\n" +msgstr "" + +#: src/stored/dircmd.c:1019 +#, c-format +msgid "3937 Device %s is busy with %d writer(s).\n" +msgstr "" + +#: src/stored/dvd.c:153 +#, c-format +msgid "Device %s cannot be mounted. ERR=%s\n" +msgstr "" + +#: src/stored/dvd.c:271 +#, c-format +msgid "Cannot run free space command (%s)\n" +msgstr "" + +#: src/stored/dvd.c:374 +#, c-format +msgid "Error while writing current part to the DVD: %s" +msgstr "" + +#: src/stored/dvd.c:394 +#, c-format +msgid "Remaining free space %s on %s\n" +msgstr "" + +#: src/stored/dvd.c:464 +#, c-format +msgid "Next Volume part already exists on DVD. Cannot continue: %s\n" +msgstr "" + +#: src/stored/dvd.c:481 +#, c-format +msgid "open_next_part can't unlink existing part %s, ERR=%s\n" +msgstr "" + +#: src/stored/dvd.c:700 +#, c-format +msgid "Unable to write part %s: ERR=%s\n" +msgstr "" + +#: src/stored/fd_cmds.c:340 +msgid "Error parsing bootstrap file.\n" +msgstr "" + +#: src/stored/job.c:79 +#, c-format +msgid "Bad Job Command from Director: %s\n" +msgstr "" + +#: src/stored/job.c:189 +#, c-format +msgid "Job name not found: %s\n" +msgstr "" + +#: src/stored/job.c:200 +#, c-format +msgid "Hey!!!! JobId %u Job %s already authenticated.\n" +msgstr "" + +#: src/stored/job.c:211 +msgid "Unable to authenticate File daemon\n" +msgstr "" + +#: src/stored/job.c:338 +msgid "In free_jcr(), but still attached to device!!!!\n" +msgstr "" + +#: src/stored/label.c:74 +msgid "BAD call to read_dev_volume_label\n" +msgstr "" + +#: src/stored/label.c:79 src/stored/label.c:120 src/stored/label.c:208 +#, c-format +msgid "Wrong Volume mounted on device %s: Wanted %s have %s\n" +msgstr "" + +#: src/stored/label.c:86 src/stored/label.c:123 src/stored/label.c:195 +#, c-format +msgid "Too many tries: %s" +msgstr "" + +#: src/stored/label.c:102 +#, c-format +msgid "Couldn't rewind device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:140 +#, c-format +msgid "" +"Requested Volume \"%s\" on %s is not a Bacula labeled Volume, because: ERR=%s" +msgstr "" + +#: src/stored/label.c:145 +msgid "Could not read Volume label from block.\n" +msgstr "" + +#: src/stored/label.c:148 +#, c-format +msgid "Could not unserialize Volume label: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:153 +#, c-format +msgid "Volume Header Id bad: %s\n" +msgstr "" + +#: src/stored/label.c:180 +#, c-format +msgid "Volume on %s has wrong Bacula version. Wanted %d got %d\n" +msgstr "" + +#: src/stored/label.c:191 +#, c-format +msgid "Volume on %s has bad Bacula label type: %x\n" +msgstr "" + +#: src/stored/label.c:272 +#, c-format +msgid "Cannot write Volume label to block for device %s\n" +msgstr "" + +#: src/stored/label.c:400 +#, c-format +msgid "Rewind error on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:405 +#, c-format +msgid "Truncate error on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:427 +#, c-format +msgid "Unable to write device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:455 +#, c-format +msgid "Recycled volume \"%s\" on device %s, all previous data lost.\n" +msgstr "" + +#: src/stored/label.c:458 +#, c-format +msgid "Wrote label to prelabeled Volume \"%s\" on device %s\n" +msgstr "" + +#: src/stored/label.c:657 +#, c-format +msgid "Bad session label = %d\n" +msgstr "" + +#: src/stored/label.c:675 src/stored/label.c:682 +#, c-format +msgid "Error writing Session label to %s: %s\n" +msgstr "" + +#: src/stored/label.c:717 +#, c-format +msgid "Expecting Volume Label, got FI=%s Stream=%s len=%d\n" +msgstr "" + +#: src/stored/label.c:844 +#, c-format +msgid "Unknown %d" +msgstr "" + +#: src/stored/label.c:848 +#, c-format +msgid "" +"\n" +"Volume Label:\n" +"Id : %sVerNo : %d\n" +"VolName : %s\n" +"PrevVolName : %s\n" +"VolFile : %d\n" +"LabelType : %s\n" +"LabelSize : %d\n" +"PoolName : %s\n" +"MediaType : %s\n" +"PoolType : %s\n" +"HostName : %s\n" +msgstr "" + +#: src/stored/label.c:870 +#, c-format +msgid "Date label written: %s\n" +msgstr "" + +#: src/stored/label.c:876 +#, c-format +msgid "Date label written: %04d-%02d-%02d at %02d:%02d\n" +msgstr "" + +#: src/stored/label.c:896 +#, c-format +msgid "" +"\n" +"%s Record:\n" +"JobId : %d\n" +"VerNum : %d\n" +"PoolName : %s\n" +"PoolType : %s\n" +"JobName : %s\n" +"ClientName : %s\n" +msgstr "" + +#: src/stored/label.c:909 +#, c-format +msgid "" +"Job (unique name) : %s\n" +"FileSet : %s\n" +"JobType : %c\n" +"JobLevel : %c\n" +msgstr "" + +#: src/stored/label.c:918 +#, c-format +msgid "" +"JobFiles : %s\n" +"JobBytes : %s\n" +"StartBlock : %s\n" +"EndBlock : %s\n" +"StartFile : %s\n" +"EndFile : %s\n" +"JobErrors : %s\n" +"JobStatus : %c\n" +msgstr "" + +#: src/stored/label.c:939 +#, c-format +msgid "Date written : %s\n" +msgstr "" + +#: src/stored/label.c:944 +#, c-format +msgid "Date written : %04d-%02d-%02d at %02d:%02d\n" +msgstr "" + +#: src/stored/label.c:963 +msgid "Fresh Volume" +msgstr "" + +#: src/stored/label.c:966 +msgid "Volume" +msgstr "" + +#: src/stored/label.c:975 src/stored/read_record.c:336 +msgid "End of Media" +msgstr "" + +#: src/stored/label.c:978 +msgid "End of Tape" +msgstr "" + +#: src/stored/label.c:998 src/stored/label.c:1006 src/stored/label.c:1039 +#, c-format +msgid "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n" +msgstr "" + +#: src/stored/label.c:1003 +msgid "End of physical tape.\n" +msgstr "" + +#: src/stored/label.c:1018 src/stored/label.c:1027 +#, c-format +msgid "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n" +msgstr "" + +#: src/stored/label.c:1020 +#, c-format +msgid " Job=%s Date=%s Level=%c Type=%c\n" +msgstr "" + +#: src/stored/label.c:1029 +#, c-format +msgid " Date=%s Level=%c Type=%c Files=%s Bytes=%s Errors=%d Status=%c\n" +msgstr "" + +#: src/stored/mount.c:198 src/stored/mount.c:295 +#, c-format +msgid "Volume \"%s\" not on device %s.\n" +msgstr "" + +#: src/stored/mount.c:231 +#, c-format +msgid "" +"Director wanted Volume \"%s\".\n" +" Current Volume \"%s\" not acceptable because:\n" +" %s" +msgstr "" + +#: src/stored/mount.c:285 +#, c-format +msgid "Labeled new Volume \"%s\" on device %s.\n" +msgstr "" + +#: src/stored/mount.c:290 +#, c-format +msgid "Warning device %s not configured to autolabel Volumes.\n" +msgstr "" + +#: src/stored/mount.c:342 +#, c-format +msgid "Volume \"%s\" previously written, moving to end of data.\n" +msgstr "" + +#: src/stored/mount.c:345 +#, c-format +msgid "Unable to position to end of data on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/mount.c:357 +#, c-format +msgid "Ready to append to end of Volume \"%s\" at file=%d.\n" +msgstr "" + +#: src/stored/mount.c:360 +#, c-format +msgid "" +"I cannot write on Volume \"%s\" because:\n" +"The number of files mismatch! Volume=%u Catalog=%u\n" +msgstr "" + +#: src/stored/mount.c:388 +#, c-format +msgid "Ready to append to end of Volume \"%s\" at file address=%u.\n" +msgstr "" + +#: src/stored/mount.c:392 +#, c-format +msgid "" +"I cannot write on Volume \"%s\" because:\n" +"The EOD file address is wrong: Volume file address=%u != Catalog Endblock=%u" +"(+1)\n" +"You probably removed DVD last part in spool directory.\n" +msgstr "" + +#: src/stored/mount.c:418 +#, c-format +msgid "Marking Volume \"%s\" in Error in Catalog.\n" +msgstr "" + +#: src/stored/mount.c:434 +#, c-format +msgid "" +"Autochanger Volume \"%s\" not found in slot %d.\n" +" Setting InChanger to zero in catalog.\n" +msgstr "" + +#: src/stored/mount.c:453 +msgid "Hey!!!!! WroteVol non-zero !!!!!\n" +msgstr "" + +#: src/stored/parse_bsr.c:104 src/stored/parse_bsr.c:108 +#, c-format +msgid "" +"Bootstrap file error: %s\n" +" : Line %d, col %d of file %s\n" +"%s\n" +msgstr "" + +#: src/stored/parse_bsr.c:130 +#, c-format +msgid "Cannot open bootstrap file %s: %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:260 +#, c-format +msgid "MediaType %s in bsr at inappropriate place.\n" +msgstr "" + +#: src/stored/parse_bsr.c:416 +msgid "JobType not yet implemented\n" +msgstr "" + +#: src/stored/parse_bsr.c:424 +msgid "JobLevel not yet implemented\n" +msgstr "" + +#: src/stored/parse_bsr.c:626 +#, c-format +msgid "VolFile : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:634 +#, c-format +msgid "VolBlock : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:644 +#, c-format +msgid "FileIndex : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:646 +#, c-format +msgid "FileIndex : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:656 +#, c-format +msgid "JobId : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:658 +#, c-format +msgid "JobId : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:668 +#, c-format +msgid "SessId : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:670 +#, c-format +msgid "SessId : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:679 +#, c-format +msgid "VolumeName : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:688 +#, c-format +msgid "Client : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:696 +#, c-format +msgid "Job : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:704 +#, c-format +msgid "SessTime : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:718 +msgid "BSR is NULL\n" +msgstr "" + +#: src/stored/parse_bsr.c:722 +#, c-format +msgid "Next : 0x%x\n" +msgstr "" + +#: src/stored/parse_bsr.c:723 +#, c-format +msgid "Root bsr : 0x%x\n" +msgstr "" + +#: src/stored/parse_bsr.c:734 +#, c-format +msgid "Slot : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:737 +#, c-format +msgid "count : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:738 +#, c-format +msgid "found : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:741 +#, c-format +msgid "done : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:742 +#, c-format +msgid "positioning : %d\n" +msgstr "" + +#: src/stored/parse_bsr.c:743 +#, c-format +msgid "fast_reject : %d\n" +msgstr "" + +#: src/stored/pythonsd.c:202 +msgid "Error in ParseTuple\n" +msgstr "" + +#: src/stored/pythonsd.c:218 +msgid "Parse tuple error in job_write\n" +msgstr "" + +#: src/stored/pythonsd.c:254 +#, c-format +msgid "Error in Python method %s\n" +msgstr "" + +#: src/stored/read.c:55 +msgid "No Volume names found for restore.\n" +msgstr "" + +#: src/stored/read.c:109 +#, c-format +msgid ">filed: Error Hdr=%s\n" +msgstr "" + +#: src/stored/read.c:110 src/stored/read.c:125 +#, c-format +msgid "Error sending to File daemon. ERR=%s\n" +msgstr "" + +#: src/stored/read.c:124 +#, c-format +msgid "Error sending to FD. ERR=%s\n" +msgstr "" + +#: src/stored/read_record.c:68 +#, c-format +msgid "End of Volume at file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/read_record.c:71 +msgid "End of all volumes.\n" +msgstr "" + +#: src/stored/read_record.c:105 +#, c-format +msgid "Got EOF at file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/read_record.c:119 +msgid "Did fsr\n" +msgstr "" + +#: src/stored/read_record.c:276 +#, c-format +msgid "Reposition from (file:block) %u:%u to %u:%u\n" +msgstr "" + +#: src/stored/read_record.c:303 +#, c-format +msgid "Forward spacing to file:block %u:%u.\n" +msgstr "" + +#: src/stored/read_record.c:329 +msgid "Begin Session" +msgstr "" + +#: src/stored/read_record.c:333 +msgid "End Session" +msgstr "" + +#: src/stored/read_record.c:339 +#, c-format +msgid "Unknown code %d\n" +msgstr "" + +#: src/stored/record.c:60 +#, c-format +msgid "unknown: %d" +msgstr "" + +#: src/stored/record.c:343 +msgid "Damaged buffer\n" +msgstr "" + +#: src/stored/reserve.c:467 src/stored/reserve.c:479 +#, c-format +msgid "Failed command: %s\n" +msgstr "" + +#: src/stored/reserve.c:469 +#, c-format +msgid "" +"\n" +" Device \"%s\" with MediaType \"%s\" requested by DIR not found in SD " +"Device resources.\n" +msgstr "" + +#: src/stored/reserve.c:647 +#, c-format +msgid "3926 Could not get dcr for device: %s\n" +msgstr "" + +#: src/stored/reserve.c:709 +#, c-format +msgid "3601 JobId=%u device %s is BLOCKED due to user unmount.\n" +msgstr "" + +#: src/stored/reserve.c:718 +#, c-format +msgid "3602 JobId=%u device %s is busy (already reading/writing).\n" +msgstr "" + +#: src/stored/reserve.c:761 +#, c-format +msgid "3603 JobId=%u device %s is busy reading.\n" +msgstr "" + +#: src/stored/reserve.c:770 +#, c-format +msgid "3604 JobId=%u device %s is BLOCKED due to user unmount.\n" +msgstr "" + +#: src/stored/reserve.c:836 +#, c-format +msgid "3605 JobId=%u wants free drive but device %s is busy.\n" +msgstr "" + +#: src/stored/reserve.c:844 +#, c-format +msgid "3606 JobId=%u wants mounted, but drive %s has no Volume.\n" +msgstr "" + +#: src/stored/reserve.c:854 +#, c-format +msgid "3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on drive %s.\n" +msgstr "" + +#: src/stored/reserve.c:890 +#, c-format +msgid "3608 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" on drive %s.\n" +msgstr "" + +#: src/stored/reserve.c:933 +#, c-format +msgid "3609 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" on drive %s.\n" +msgstr "" + +#: src/stored/reserve.c:941 src/stored/reserve.c:945 +msgid "Logic error!!!! Should not get here.\n" +msgstr "" + +#: src/stored/reserve.c:942 +#, c-format +msgid "3910 JobId=%u Logic error!!!! drive %s Should not get here.\n" +msgstr "" + +#: src/stored/reserve.c:948 +#, c-format +msgid "3911 JobId=%u failed reserve drive %s.\n" +msgstr "" + +#: src/stored/spool.c:69 +#, c-format +msgid "" +"Data spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes/job.\n" +msgstr "" + +#: src/stored/spool.c:75 +#, c-format +msgid "Attr spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes.\n" +msgstr "" + +#: src/stored/spool.c:91 +msgid "Spooling data ...\n" +msgstr "" + +#: src/stored/spool.c:117 +#, c-format +msgid "Bad return from despool WroteVol=%d\n" +msgstr "" + +#: src/stored/spool.c:150 +#, c-format +msgid "Open data spool file %s failed: ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:199 +#, c-format +msgid "Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:204 +#, c-format +msgid "Writing spooled data to Volume. Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:258 src/stored/spool.c:432 src/stored/spool.c:470 +#, c-format +msgid "Ftruncate spool file failed: ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:260 +#, c-format +msgid "Bad return from ftruncate. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:309 +#, c-format +msgid "Spool header read error. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:312 +#, c-format +msgid "Spool read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:313 +#, c-format +msgid "Spool header read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:319 src/stored/spool.c:320 +#, c-format +msgid "Spool block too big. Max %u bytes, got %u\n" +msgstr "" + +#: src/stored/spool.c:325 src/stored/spool.c:326 +#, c-format +msgid "Spool data read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:383 +msgid "User specified spool size reached.\n" +msgstr "" + +#: src/stored/spool.c:385 +msgid "Bad return from despool in write_block.\n" +msgstr "" + +#: src/stored/spool.c:393 +msgid "Spooling data again ...\n" +msgstr "" + +#: src/stored/spool.c:424 +#, c-format +msgid "Error writing header to spool file. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:438 src/stored/spool.c:476 +msgid "Fatal despooling error." +msgstr "" + +#: src/stored/spool.c:445 +msgid "Retrying after header spooling error failed.\n" +msgstr "" + +#: src/stored/spool.c:459 +#, c-format +msgid "Error writing data to spool file. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:486 +msgid "Retrying after data spooling error failed.\n" +msgstr "" + +#: src/stored/spool.c:541 src/stored/spool.c:548 +#, c-format +msgid "Fseek on attributes file failed: ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:558 +#, c-format +msgid "Sending spooled attrs to the Director. Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:585 +#, c-format +msgid "fopen attr spool file %s failed: ERR=%s\n" +msgstr "" + +#: src/stored/status.c:67 +#, c-format +msgid "" +"\n" +"%s Version: %s (%s) %s %s %s\n" +msgstr "" + +#: src/stored/status.c:103 +msgid "" +"\n" +"Device status:\n" +msgstr "" + +#: src/stored/status.c:105 +#, c-format +msgid "Autochanger \"%s\" with devices:\n" +msgstr "" + +#: src/stored/status.c:119 +#, c-format +msgid "Device %s is mounted with Volume=\"%s\" Pool=\"%s\"\n" +msgstr "" + +#: src/stored/status.c:123 +#, c-format +msgid "Device %s open but no Bacula volume is mounted.\n" +msgstr "" + +#: src/stored/status.c:133 +#, c-format +msgid " Total Bytes=%s Blocks=%s Bytes/block=%s\n" +msgstr "" + +#: src/stored/status.c:147 +#, c-format +msgid " Total Bytes Read=%s Blocks Read=%s Bytes/block=%s\n" +msgstr "" + +#: src/stored/status.c:152 +#, c-format +msgid " Positioned at File=%s Block=%s\n" +msgstr "" + +#: src/stored/status.c:158 +#, c-format +msgid "Device %s is not open or does not exist.\n" +msgstr "" + +#: src/stored/status.c:160 +#, c-format +msgid "Device \"%s\" is not open or does not exist.\n" +msgstr "" + +#: src/stored/status.c:165 src/stored/status.c:168 src/stored/status.c:172 +#: src/stored/status.c:174 +msgid "" +"====\n" +"\n" +msgstr "" + +#: src/stored/status.c:166 +msgid "In Use Volume status:\n" +msgstr "" + +#: src/stored/status.c:190 +msgid "" +"No DEVICE structure.\n" +"\n" +msgstr "" + +#: src/stored/status.c:195 +msgid " Device is BLOCKED. User unmounted.\n" +msgstr "" + +#: src/stored/status.c:198 +msgid " Device is BLOCKED. User unmounted during wait for media/mount.\n" +msgstr "" + +#: src/stored/status.c:202 +#, c-format +msgid " Device is BLOCKED waiting for mount of volume \"%s\".\n" +msgstr "" + +#: src/stored/status.c:205 +msgid " Device is BLOCKED waiting for media.\n" +msgstr "" + +#: src/stored/status.c:209 +msgid " Device is being initialized.\n" +msgstr "" + +#: src/stored/status.c:212 +msgid " Device is blocked labeling a Volume.\n" +msgstr "" + +#: src/stored/status.c:220 +#, c-format +msgid " Slot %d is loaded in drive %d.\n" +msgstr "" + +#: src/stored/status.c:223 +#, c-format +msgid " Drive %d is not loaded.\n" +msgstr "" + +#: src/stored/status.c:242 +msgid "Device state:\n" +msgstr "" + +#: src/stored/status.c:256 +#, c-format +msgid "" +"num_writers=%d JobStatus=%c block=%d\n" +"\n" +msgstr "" + +#: src/stored/status.c:260 +#, c-format +msgid "Archive name: %s Device name: %s\n" +msgstr "" + +#: src/stored/status.c:262 +#, c-format +msgid "File=%u block=%u\n" +msgstr "" + +#: src/stored/status.c:263 +#, c-format +msgid "Min block=%u Max block=%u\n" +msgstr "" + +#: src/stored/status.c:280 +#, c-format +msgid "%s Job %s waiting for Client connection.\n" +msgstr "" + +#: src/stored/status.c:295 +#, c-format +msgid "" +"Reading: %s %s job %s JobId=%d Volume=\"%s\"\n" +" pool=\"%s\" device=\"%s\"\n" +msgstr "" + +#: src/stored/status.c:307 +#, c-format +msgid "" +"Writing: %s %s job %s JobId=%d Volume=\"%s\"\n" +" pool=\"%s\" device=\"%s\"\n" +msgstr "" + +#: src/stored/status.c:330 +#, c-format +msgid " FDReadSeqNo=%s in_msg=%u out_msg=%d fd=%d\n" +msgstr "" + +#: src/stored/status.c:335 +msgid " FDSocket closed\n" +msgstr "" + +#: src/stored/status.c:352 +msgid "" +"\n" +"Jobs waiting to reserve a drive:\n" +msgstr "" + +#: src/stored/status.c:380 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/stored/status.c:514 +msgid "3900 Bad .status command, missing argument.\n" +msgstr "" + +#: src/stored/status.c:537 +msgid "3900 Bad .status command, wrong argument.\n" +msgstr "" + +#: src/stored/stored.c:65 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: stored [options] [-c config_file] [config_file]\n" +" -c use as configuration file\n" +" -dnn set debug level to nn\n" +" -f run in foreground (for debugging)\n" +" -g set groupid to group\n" +" -p proceed despite I/O errors\n" +" -s no signals (for debugging)\n" +" -t test - read config and exit\n" +" -u userid to \n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/stored/stored.c:220 +msgid "Volume Session Time is ZERO!\n" +msgstr "" + +#: src/stored/stored.c:233 +#, c-format +msgid "Unable to create thread. ERR=%s\n" +msgstr "" + +#: src/stored/stored.c:271 +#, c-format +msgid "Only one Storage resource permitted in %s\n" +msgstr "" + +#: src/stored/stored.c:276 +#, c-format +msgid "No Director resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/stored.c:281 +#, c-format +msgid "No Device resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/stored.c:289 +#, c-format +msgid "No Messages resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/stored.c:316 +#, c-format +msgid "\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n" +msgstr "" + +#: src/stored/stored.c:322 +#, c-format +msgid "\"TLS Key\" file not defined for Storage \"%s\" in %s.\n" +msgstr "" + +#: src/stored/stored.c:328 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Storage \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" + +#: src/stored/stored.c:455 +#, c-format +msgid "Could not initialize %s\n" +msgstr "" + +#: src/stored/stored.c:468 +#, c-format +msgid "Could not open device %s\n" +msgstr "" + +#: src/stored/stored.c:481 +#, c-format +msgid "Could not mount device %s\n" +msgstr "" + +#: src/stored/stored_conf.c:216 +#, c-format +msgid "Expected a Device Type keyword, got: %s" +msgstr "" + +#: src/stored/stored_conf.c:231 +#, c-format +msgid "Warning: no \"%s\" resource (%d) defined.\n" +msgstr "" + +#: src/stored/stored_conf.c:234 +#, c-format +msgid "dump_resource type=%d\n" +msgstr "" + +#: src/stored/stored_conf.c:350 +#, c-format +msgid "Warning: unknown resource type %d\n" +msgstr "" + +#: src/stored/stored_conf.c:539 +#, c-format +msgid "\"%s\" item is required in \"%s\" resource, but not found.\n" +msgstr "" + +#: src/stored/stored_conf.c:545 +#, c-format +msgid "Too many items in \"%s\" resource\n" +msgstr "" + +#: src/stored/stored_conf.c:579 +#, c-format +msgid "Cannot find AutoChanger resource %s\n" +msgstr "" + +#: src/stored/stored_conf.c:650 +#, c-format +msgid "" +"Attempt to define second \"%s\" resource named \"%s\" is not permitted.\n" +msgstr "" + +#: src/stored/wait.c:114 +#, c-format +msgid "pthread timedwait error. ERR=%s\n" +msgstr "" + +#: src/stored/wait.c:199 +#, c-format +msgid "Job %s waiting to reserve a device.\n" +msgstr "" + +#: src/tools/bsmtp.c:85 +#, c-format +msgid "Fatal malformed reply from %s: %s\n" +msgstr "" + +#: src/tools/bsmtp.c:121 +#, c-format +msgid "" +"\n" +"Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +" -c set the Cc: field\n" +" -dnn set debug level to nn\n" +" -f set the From: field\n" +" -h use mailhost:port as the SMTP server\n" +" -s set the Subject: field\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tools/bsmtp.c:204 +msgid "Fatal error: no recipient given.\n" +msgstr "" + +#: src/tools/bsmtp.c:225 +#, c-format +msgid "Fatal gethostname error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:229 +#, c-format +msgid "Fatal gethostbyname for myself failed \"%s\": ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:254 +#, c-format +msgid "Error unknown mail host \"%s\": ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:257 +msgid "Retrying connection using \"localhost\".\n" +msgstr "" + +#: src/tools/bsmtp.c:265 +#, c-format +msgid "Fatal error: Unknown address family for smtp host: %d\n" +msgstr "" + +#: src/tools/bsmtp.c:273 +#, c-format +msgid "Fatal socket error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:277 +#, c-format +msgid "Fatal connect error to %s: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:282 +#, c-format +msgid "Fatal dup error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:286 src/tools/bsmtp.c:290 +#, c-format +msgid "Fatal fdopen error: ERR=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:158 +msgid "" +"Warning skipping the additional parameters for working directory/dbname/user/" +"password/host.\n" +msgstr "" + +#: src/tools/dbcheck.c:174 +#, c-format +msgid "Error can not find the Catalog name[%s] in the given config file [%s]\n" +msgstr "" + +#: src/tools/dbcheck.c:176 +#, c-format +msgid "Error there is no Catalog section in the given config file [%s]\n" +msgstr "" + +#: src/tools/dbcheck.c:185 +msgid "Error no Director resource defined.\n" +msgstr "" + +#: src/tools/dbcheck.c:199 +msgid "Wrong number of arguments.\n" +msgstr "" + +#: src/tools/dbcheck.c:204 +msgid "Working directory not supplied.\n" +msgstr "" + +#: src/tools/dbcheck.c:269 +#, c-format +msgid "Hello, this is the database check/correct program.\n" +msgstr "" + +#: src/tools/dbcheck.c:271 +#, c-format +msgid "Modify database is on." +msgstr "" + +#: src/tools/dbcheck.c:273 +#, c-format +msgid "Modify database is off." +msgstr "" + +#: src/tools/dbcheck.c:275 src/tools/dbcheck.c:336 +#, c-format +msgid " Verbose is on.\n" +msgstr "" + +#: src/tools/dbcheck.c:277 src/tools/dbcheck.c:338 +#, c-format +msgid " Verbose is off.\n" +msgstr "" + +#: src/tools/dbcheck.c:279 +#, c-format +msgid "Please select the function you want to perform.\n" +msgstr "" + +#: src/tools/dbcheck.c:283 +#, c-format +msgid "" +"\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Repair bad Filename records\n" +" 4) Repair bad Path records\n" +" 5) Eliminate duplicate Filename records\n" +" 6) Eliminate duplicate Path records\n" +" 7) Eliminate orphaned Jobmedia records\n" +" 8) Eliminate orphaned File records\n" +" 9) Eliminate orphaned Path records\n" +" 10) Eliminate orphaned Filename records\n" +" 11) Eliminate orphaned FileSet records\n" +" 12) Eliminate orphaned Client records\n" +" 13) Eliminate orphaned Job records\n" +" 14) Eliminate all Admin records\n" +" 15) Eliminate all Restore records\n" +" 16) All (3-15)\n" +" 17) Quit\n" +msgstr "" + +#: src/tools/dbcheck.c:302 +#, c-format +msgid "" +"\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Check for bad Filename records\n" +" 4) Check for bad Path records\n" +" 5) Check for duplicate Filename records\n" +" 6) Check for duplicate Path records\n" +" 7) Check for orphaned Jobmedia records\n" +" 8) Check for orphaned File records\n" +" 9) Check for orphaned Path records\n" +" 10) Check for orphaned Filename records\n" +" 11) Check for orphaned FileSet records\n" +" 12) Check for orphaned Client records\n" +" 13) Check for orphaned Job records\n" +" 14) Check for all Admin records\n" +" 15) Check for all Restore records\n" +" 16) All (3-15)\n" +" 17) Quit\n" +msgstr "" + +#: src/tools/dbcheck.c:322 +msgid "Select function number: " +msgstr "" + +#: src/tools/dbcheck.c:329 +#, c-format +msgid "Database will be modified.\n" +msgstr "" + +#: src/tools/dbcheck.c:331 +#, c-format +msgid "Database will NOT be modified.\n" +msgstr "" + +#: src/tools/dbcheck.c:421 +#, c-format +msgid "JobId=%s Name=\"%s\" StartTime=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:429 +#, c-format +msgid "Orphaned JobMediaId=%s JobId=%s Volume=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:436 +#, c-format +msgid "Orphaned FileId=%s JobId=%s Volume=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:443 +#, c-format +msgid "Orphaned FileSetId=%s FileSet=\"%s\" MD5=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:450 +#, c-format +msgid "Orphaned ClientId=%s Name=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:503 +#, c-format +msgid "Deleting: %s\n" +msgstr "" + +#: src/tools/dbcheck.c:577 +#, c-format +msgid "Checking for duplicate Filename entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:586 +#, c-format +msgid "Found %d duplicate Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:587 +msgid "Print the list? (yes/no): " +msgstr "" + +#: src/tools/dbcheck.c:603 src/tools/dbcheck.c:657 +#, c-format +msgid "Found %d for: %s\n" +msgstr "" + +#: src/tools/dbcheck.c:630 +#, c-format +msgid "Checking for duplicate Path entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:640 +#, c-format +msgid "Found %d duplicate Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:641 src/tools/dbcheck.c:691 src/tools/dbcheck.c:723 +#: src/tools/dbcheck.c:755 src/tools/dbcheck.c:783 src/tools/dbcheck.c:811 +#: src/tools/dbcheck.c:849 src/tools/dbcheck.c:887 src/tools/dbcheck.c:918 +#: src/tools/dbcheck.c:948 src/tools/dbcheck.c:982 src/tools/dbcheck.c:1040 +msgid "Print them? (yes/no): " +msgstr "" + +#: src/tools/dbcheck.c:683 +#, c-format +msgid "Checking for orphaned JobMedia entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:690 +#, c-format +msgid "Found %d orphaned JobMedia records.\n" +msgstr "" + +#: src/tools/dbcheck.c:703 +#, c-format +msgid "Deleting %d orphaned JobMedia records.\n" +msgstr "" + +#: src/tools/dbcheck.c:712 +#, c-format +msgid "Checking for orphaned File entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:722 +#, c-format +msgid "Found %d orphaned File records.\n" +msgstr "" + +#: src/tools/dbcheck.c:735 +#, c-format +msgid "Deleting %d orphaned File records.\n" +msgstr "" + +#: src/tools/dbcheck.c:744 +#, c-format +msgid "Checking for orphaned Path entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:754 +#, c-format +msgid "Found %d orphaned Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:763 +#, c-format +msgid "Deleting %d orphaned Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:772 +#, c-format +msgid "Checking for orphaned Filename entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:782 +#, c-format +msgid "Found %d orphaned Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:791 +#, c-format +msgid "Deleting %d orphaned Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:800 +#, c-format +msgid "Checking for orphaned FileSet entries. This takes some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:810 +#, c-format +msgid "Found %d orphaned FileSet records.\n" +msgstr "" + +#: src/tools/dbcheck.c:822 +#, c-format +msgid "Deleting %d orphaned FileSet records.\n" +msgstr "" + +#: src/tools/dbcheck.c:831 +#, c-format +msgid "Checking for orphaned Client entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:848 +#, c-format +msgid "Found %d orphaned Client records.\n" +msgstr "" + +#: src/tools/dbcheck.c:860 +#, c-format +msgid "Deleting %d orphaned Client records.\n" +msgstr "" + +#: src/tools/dbcheck.c:869 +#, c-format +msgid "Checking for orphaned Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:886 +#, c-format +msgid "Found %d orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:898 +#, c-format +msgid "Deleting %d orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:908 +#, c-format +msgid "Checking for Admin Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:917 +#, c-format +msgid "Found %d Admin Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:929 +#, c-format +msgid "Deleting %d Admin Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:938 +#, c-format +msgid "Checking for Restore Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:947 +#, c-format +msgid "Found %d Restore Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:959 +#, c-format +msgid "Deleting %d Restore Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:972 +#, c-format +msgid "Checking for Filenames with a trailing slash\n" +msgstr "" + +#: src/tools/dbcheck.c:981 +#, c-format +msgid "Found %d bad Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:995 src/tools/dbcheck.c:1053 +#, c-format +msgid "Reparing %d bad Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1030 +#, c-format +msgid "Checking for Paths without a trailing slash\n" +msgstr "" + +#: src/tools/dbcheck.c:1039 +#, c-format +msgid "Found %d bad Path records.\n" +msgstr "" + +#: src/tools/fstype.c:34 +#, c-format +msgid "" +"\n" +"Usage: fstype [-d debug_level] path ...\n" +"\n" +" Print the file system type a given file/directory is on.\n" +" The following options are supported:\n" +"\n" +" -v print both path and file system type.\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tools/fstype.c:86 +#, c-format +msgid "%s: unknown\n" +msgstr "" + +#: src/tools/testfind.c:49 +#, c-format +msgid "" +"\n" +"Usage: testfind [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -dnn set debug level to nn\n" +" -e specify file of exclude patterns\n" +" -i specify file of include patterns\n" +" - read pattern(s) from stdin\n" +" -? print this message.\n" +"\n" +"Patterns are used for file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors are always printed.\n" +"Files/paths truncated is the number of files/paths with len > 255.\n" +"Truncation is only in the catalog.\n" +"\n" +msgstr "" + +#: src/tools/testfind.c:134 src/tools/testls.c:130 +#, c-format +msgid "Could not open include file: %s\n" +msgstr "" + +#: src/tools/testfind.c:147 src/tools/testls.c:143 +#, c-format +msgid "Could not open exclude file: %s\n" +msgstr "" + +#: src/tools/testfind.c:160 +#, c-format +msgid "" +"Total files : %d\n" +"Max file length: %d\n" +"Max path length: %d\n" +"Files truncated: %d\n" +"Paths truncated: %d\n" +"Hard links : %d\n" +msgstr "" + +#: src/tools/testfind.c:199 +#, c-format +msgid "Reg: %s\n" +msgstr "" + +#: src/tools/testfind.c:220 +msgid "\t[will not descend: recursion turned off]" +msgstr "" + +#: src/tools/testfind.c:222 +msgid "\t[will not descend: file system change not allowed]" +msgstr "" + +#: src/tools/testfind.c:224 +msgid "\t[will not descend: disallowed file system]" +msgstr "" + +#: src/tools/testfind.c:240 src/tools/testls.c:178 +#, c-format +msgid "Err: Could not access %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:243 src/tools/testls.c:181 +#, c-format +msgid "Err: Could not follow ff->link %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:246 src/tools/testls.c:184 +#, c-format +msgid "Err: Could not stat %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:249 src/tools/testls.c:187 +#, c-format +msgid "Skip: File not saved. No change. %s\n" +msgstr "" + +#: src/tools/testfind.c:252 src/tools/testls.c:190 +#, c-format +msgid "Err: Attempt to backup archive. Not saved. %s\n" +msgstr "" + +#: src/tools/testfind.c:255 src/tools/testls.c:199 +#, c-format +msgid "Err: Could not open directory %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:258 src/tools/testls.c:202 +#, c-format +msgid "Err: Unknown file ff->type %d: %s\n" +msgstr "" + +#: src/tools/testfind.c:308 +#, c-format +msgid "===== Filename truncated to 255 chars: %s\n" +msgstr "" + +#: src/tools/testfind.c:325 +#, c-format +msgid "========== Path name truncated to 255 chars: %s\n" +msgstr "" + +#: src/tools/testfind.c:334 +#, c-format +msgid "========== Path length is zero. File=%s\n" +msgstr "" + +#: src/tools/testfind.c:337 +#, c-format +msgid "Path: %s\n" +msgstr "" + +#: src/tools/testls.c:45 +#, c-format +msgid "" +"\n" +"Usage: testls [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -dnn set debug level to nn\n" +" -e specify file of exclude patterns\n" +" -i specify file of include patterns\n" +" - read pattern(s) from stdin\n" +" -? print this message.\n" +"\n" +"Patterns are file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors always printed.\n" +"Files/paths truncated is number with len > 255.\n" +"Truncation is only in catalog.\n" +"\n" +msgstr "" + +#: src/tools/testls.c:193 +#, c-format +msgid "Recursion turned off. Directory not entered. %s\n" +msgstr "" + +#: src/tools/testls.c:196 +#, c-format +msgid "Skip: File system change prohibited. Directory not entered. %s\n" +msgstr "" + +#: src/tray-monitor/authenticate.c:78 +msgid "" +"Director authorization problem.\n" +"Most likely the passwords do not agree.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors " +"for help.\n" +msgstr "" + +#: src/tray-monitor/authenticate.c:127 +msgid "" +"Director and Storage daemon passwords or names not the same.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors " +"for help.\n" +msgstr "" + +#: src/tray-monitor/authenticate.c:172 +msgid "" +"Director and File daemon passwords or names not the same.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors " +"for help.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:102 +#, c-format +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"Written by Nicolas Boichat (2004)\n" +"\n" +"Version: %s (%s) %s %s %s\n" +"\n" +"Usage: tray-monitor [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:251 +#, c-format +msgid "" +"Error: %d Monitor resource defined in %s. You must define one and only one " +"Monitor resource.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:282 +#, c-format +msgid "" +"No Client, Storage nor Director resource defined in %s\n" +"Without that I don't how to get status from the File, Storage or Director " +"Daemon :-(\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:304 +#, c-format +msgid "" +"Invalid refresh interval defined in %s\n" +"This value must be greater or equal to 1 second and less or equal to 10 " +"minutes (read value: %d).\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:319 +msgid "Open status window..." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:325 +msgid "Exit" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:337 +msgid "Bacula tray monitor" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:362 +msgid " (DIR)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:366 +msgid " (FD)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:370 +msgid " (SD)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:383 +msgid "Unknown status." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:424 +msgid "Refresh interval in seconds: " +msgstr "" + +#: src/tray-monitor/tray-monitor.c:432 +msgid "Refresh now" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:436 +msgid "About" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:440 +msgid "Close" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:460 +#, c-format +msgid "Disconnecting from Director %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:463 +#, c-format +msgid "Disconnecting from Client %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:466 +#, c-format +msgid "Disconnecting from Storage %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:502 src/tray-monitor/tray-monitor.c:512 +msgid "Bacula Tray Monitor" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:503 src/tray-monitor/tray-monitor.c:513 +msgid "" +"Copyright (C) 2000-2016 Kern Sibbald +"Written by Nicolas Boichat\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:505 src/tray-monitor/tray-monitor.c:515 +msgid "Version:" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:583 +#, c-format +msgid "Error, currentitem is not a Client or a Storage..\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:656 +#, c-format +msgid "" +"Current job: %s\n" +"Last job: %s" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:668 +#, c-format +msgid " (%d errors)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:671 +#, c-format +msgid " (%d error)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:708 +msgid "No current job." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:711 +msgid "No last job." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:719 +msgid "Job status: Created" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:724 +msgid "Job status: Running" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:728 +msgid "Job status: Blocked" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:733 +msgid "Job status: Terminated" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:738 +msgid "Job status: Terminated in error" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:744 +msgid "Job status: Error" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:748 +msgid "Job status: Fatal error" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:753 +msgid "Job status: Verify differences" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:758 +msgid "Job status: Canceled" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:763 +msgid "Job status: Waiting on File daemon" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:768 +msgid "Job status: Waiting on the Storage daemon" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:773 +msgid "Job status: Waiting for new media" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:778 +msgid "Job status: Waiting for Mount" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:783 +msgid "Job status: Waiting for storage resource" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:788 +msgid "Job status: Waiting for job resource" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:793 +msgid "Job status: Waiting for Client resource" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:798 +msgid "Job status: Waiting for maximum jobs" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:803 +msgid "Job status: Waiting for start time" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:808 +msgid "Job status: Waiting for higher priority jobs to finish" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:813 +#, c-format +msgid "Unknown job status %c." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:814 +#, c-format +msgid "Job status: Unknown(%c)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:821 +#, c-format +msgid "Bad scan : '%s' %d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:859 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:865 +#, c-format +msgid "Connecting to Client %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:866 +#, c-format +msgid "Connecting to Client %s:%d" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:872 +#, c-format +msgid "Connecting to Storage %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:873 +#, c-format +msgid "Connecting to Storage %s:%d" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:878 src/tray-monitor/tray-monitor.c:916 +#, c-format +msgid "Error, currentitem is not a Client, a Storage or a Director..\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:884 +msgid "Cannot connect to daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:885 +msgid "Cannot connect to daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:897 +#, c-format +msgid "Authentication error : %s" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:904 +msgid "Opened connection with Director daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:905 +msgid "Opened connection with Director daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:908 +msgid "Opened connection with File daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:909 +msgid "Opened connection with File daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:912 +msgid "Opened connection with Storage daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:913 +msgid "Opened connection with Storage daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:948 +msgid "<< Error: BNET_PROMPT signal received. >>\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:953 src/wx-console/console_thread.cpp:465 +msgid "<< Heartbeat signal received, answered. >>\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:957 +#, c-format +msgid "<< Unexpected signal received : %s >>\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:962 +msgid "\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:966 +msgid "Error : BNET_HARDEOF or BNET_ERROR" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:972 +msgid "\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:976 +msgid "Error : Connection closed." +msgstr "" + +#: src/tray-monitor/tray_conf.c:161 +#, c-format +msgid "Monitor: name=%s FDtimeout=%s SDtimeout=%s\n" +msgstr "" + +#: src/tray-monitor/tray_conf.c:167 +#, c-format +msgid "Director: name=%s address=%s FDport=%d\n" +msgstr "" + +#: src/tray-monitor/tray_conf.c:171 +#, c-format +msgid "Client: name=%s address=%s FDport=%d\n" +msgstr "" + +#: src/tray-monitor/tray_conf.c:175 +#, c-format +msgid "Storage: name=%s address=%s SDport=%d\n" +msgstr "" + +#: src/wx-console/authenticate.c:129 +msgid "Bad response to Hello command: ERR=" +msgstr "" + +#: src/baconfig.h:55 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "" + +#: src/baconfig.h:62 +msgid "*None*" +msgstr "" + +#: src/filed/win32/winmain.cpp:228 src/filed/win32/winmain.cpp:236 +msgid "Bacula Usage" +msgstr "" + +#: src/filed/win32/winmain.cpp:233 +msgid "Bad Command Line Options" +msgstr "" + +#: src/filed/win32/winmain.cpp:326 +msgid "Another instance of Bacula is already running" +msgstr "" + +#: src/filed/win32/winservice.cpp:131 src/filed/win32/winservice.cpp:145 +msgid "No existing instance of Bacula could be contacted" +msgstr "" + +#: src/filed/win32/winservice.cpp:218 +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "" + +#: src/filed/win32/winservice.cpp:228 +msgid "Registry service not found: Bacula service not started" +msgstr "" + +#: src/filed/win32/winservice.cpp:230 +msgid "Registry service not found" +msgstr "" + +#: src/filed/win32/winservice.cpp:260 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/filed/win32/winservice.cpp:278 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/filed/win32/winservice.cpp:279 +msgid "Contact Register Service Handler failure" +msgstr "" + +#: src/filed/win32/winservice.cpp:295 +msgid "ReportStatus STOPPED failed 1" +msgstr "" + +#: src/filed/win32/winservice.cpp:318 +msgid "Report Service failure" +msgstr "" + +#: src/filed/win32/winservice.cpp:355 +msgid "Unable to install Bacula service" +msgstr "" + +#: src/filed/win32/winservice.cpp:373 +msgid "Service command length too long" +msgstr "" + +#: src/filed/win32/winservice.cpp:374 +msgid "Service command length too long. Service not registered." +msgstr "" + +#: src/filed/win32/winservice.cpp:389 +msgid "Cannot write System Registry" +msgstr "" + +#: src/filed/win32/winservice.cpp:390 +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "" + +#: src/filed/win32/winservice.cpp:397 +msgid "Cannot add Bacula key to System Registry" +msgstr "" + +#: src/filed/win32/winservice.cpp:398 src/filed/win32/winservice.cpp:449 +msgid "The Bacula service could not be installed" +msgstr "" + +#: src/filed/win32/winservice.cpp:406 +msgid "" +"The Bacula File service was successfully installed.\n" +"The service may be started by double clicking on the\n" +"Bacula \"Start\" icon and will be automatically\n" +"be run the next time this machine is rebooted. " +msgstr "" + +#: src/filed/win32/winservice.cpp:424 +msgid "" +"The Service Control Manager could not be contacted - the Bacula service was " +"not installed" +msgstr "" + +#: src/filed/win32/winservice.cpp:455 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/filed/win32/winservice.cpp:462 +msgid "" +"The Bacula File service was successfully installed.\n" +"The service may be started from the Control Panel and will\n" +"automatically be run the next time this machine is rebooted." +msgstr "" + +#: src/filed/win32/winservice.cpp:471 +msgid "" +"Unknown Windows operating system.\n" +"Cannot install Bacula service.\n" +msgstr "" + +#: src/filed/win32/winservice.cpp:496 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" + +#: src/filed/win32/winservice.cpp:501 +msgid "" +"Could not delete Registry key.\n" +"The Bacula service could not be removed" +msgstr "" + +#: src/filed/win32/winservice.cpp:511 +msgid "Bacula could not be contacted, probably not running" +msgstr "" + +#: src/filed/win32/winservice.cpp:517 src/filed/win32/winservice.cpp:553 +msgid "The Bacula service has been removed" +msgstr "" + +#: src/filed/win32/winservice.cpp:547 +msgid "The Bacula service could not be stopped" +msgstr "" + +#: src/filed/win32/winservice.cpp:555 +msgid "The Bacula service could not be removed" +msgstr "" + +#: src/filed/win32/winservice.cpp:560 +msgid "The Bacula service could not be found" +msgstr "" + +#: src/filed/win32/winservice.cpp:565 +msgid "The SCM could not be contacted - the Bacula service was not removed" +msgstr "" + +#: src/filed/win32/winservice.cpp:629 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/filed/win32/winservice.cpp:657 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" + +#: src/filed/win32/winservice.cpp:732 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "" + +#: src/filed/win32/winservice.cpp:736 +#, c-format +msgid "No longer locked\n" +msgstr "" + +#: src/filed/win32/winservice.cpp:740 +msgid "Could not lock database" +msgstr "" + +#: src/wx-console/console_thread.cpp:101 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in config file.\n" +"At least one CA certificate store is required.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:108 +msgid "" +"No Director resource defined in config file.\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:127 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in config file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:148 +msgid "Error while initializing windows sockets...\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:163 +msgid "Error while cleaning up windows sockets...\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:201 +msgid "Error while initializing library." +msgstr "" + +#: src/wx-console/console_thread.cpp:227 +msgid "Cryptographic library initialization failed.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:231 +msgid "Please correct configuration file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:273 +msgid "Error : Library not initialized\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:284 +msgid "Error : No configuration file loaded\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:294 +msgid "Connecting...\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:310 +msgid "Error : No director defined in config file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:322 +msgid "Multiple directors found in your config file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:331 +#, c-format +msgid "Please choose a director (1-%d): " +msgstr "" + +#: src/wx-console/console_thread.cpp:403 +msgid "Failed to connect to the director\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:413 +msgid "Connected\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:470 +msgid "<< Unexpected signal received : " +msgstr "" + +#: src/wx-console/console_thread.cpp:490 +msgid "Connection terminated\n" +msgstr "" + +#: src/wx-console/main.cpp:101 +msgid "Bacula wx-console" +msgstr "" + +#: src/wx-console/main.cpp:106 src/wx-console/wxbmainframe.cpp:248 +#, c-format +msgid "Welcome to bacula wx-console %s (%s)!\n" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:44 +msgid "Config file editor" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:54 +msgid "# Bacula wx-console Configuration File\n" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:90 +msgid "Save and close" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:91 +msgid "Close without saving" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:118 +#, c-format +msgid "Unable to write to %s\n" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:119 +msgid "Error while saving" +msgstr "" + +#: src/wx-console/wxbconfigpanel.cpp:184 +msgid "Apply" +msgstr "" + +#: src/wx-console/wxbhistorytextctrl.cpp:61 +#: src/wx-console/wxbhistorytextctrl.cpp:132 +#: src/wx-console/wxbmainframe.cpp:272 +msgid "Type your command below:" +msgstr "" + +#: src/wx-console/wxbhistorytextctrl.cpp:96 +msgid "Unknown command." +msgstr "" + +#: src/wx-console/wxbhistorytextctrl.cpp:105 +msgid "Possible completions: " +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:227 +msgid "&About...\tF1" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:227 +msgid "Show about dialog" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:229 src/wx-console/wxbmainframe.cpp:598 +msgid "Connect to the director" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:230 +msgid "Disconnect" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:230 +msgid "Disconnect of the director" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:232 +msgid "Change of configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:232 +msgid "Change your default configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:233 +msgid "Edit your configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:235 +msgid "E&xit\tAlt-X" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:235 +msgid "Quit this program" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:239 +msgid "&File" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:240 +msgid "&Help" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:257 +msgid "Console" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:264 +msgid "" +"Warning : Unicode is disabled because you are using wxWidgets for GTK+ 1.2.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:268 +msgid "" +"Warning : There is a problem with wxWidgets for GTK+ 2.0 without Unicode " +"support when handling non-ASCII filenames: Every non-ASCII character in such " +"filenames will be replaced by an interrogation mark.\n" +"If this behaviour disturbs you, please build wx-console against a Unicode " +"version of wxWidgets for GTK+ 2.0.\n" +"---\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:279 +msgid "Send" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:356 src/wx-console/wxbmainframe.cpp:368 +msgid "Error while parsing command line arguments, using defaults.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:357 src/wx-console/wxbmainframe.cpp:369 +msgid "Usage: wx-console [-c configfile] [-w tmp]\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:397 +#, c-format +msgid "" +"It seems that it is the first time you run wx-console.\n" +"This file (%s) has been choosen as default configuration file.\n" +"Do you want to edit it? (if you click No you will have to select another " +"file)" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:399 +msgid "First run" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:416 +#, c-format +msgid "" +"Unable to read %s\n" +"Error: %s\n" +"Do you want to choose another one? (Press no to edit this file)" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:418 +msgid "Unable to read configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:430 +msgid "Please choose a configuration file to use" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:443 +msgid "This configuration file has been successfully read, use it as default?" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:444 +msgid "Configuration file read successfully" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:454 +#, c-format +msgid "Using this configuration file: %s\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:459 +msgid "Connecting to the director..." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:474 +msgid "Failed to unregister a data parser !" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:482 +msgid "Quitting.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:497 +msgid "" +"Welcome to Bacula wx-console.\n" +"Written by Nicolas Boichat \n" +"(C) 2005 Kern Sibbald\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:499 +msgid "About Bacula wx-console" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:505 +msgid "Please choose your default configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:509 +msgid "Use this configuration file as default?" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:510 +msgid "Configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:581 +msgid "Console thread terminated." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:589 +msgid "Connection to the director lost. Quit program?" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:590 +msgid "Connection lost" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:606 +msgid "Connected to the director." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:629 +msgid "Reconnect" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:630 +msgid "Reconnect to the director" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:644 +msgid "Disconnected of the director." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:663 src/wx-console/wxbrestorepanel.cpp:689 +msgid "Unexpected question has been received.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:686 src/wx-console/wxbmainframe.cpp:703 +msgid "wx-console: unexpected director's question." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:216 +#: src/wx-console/wxbrestorepanel.cpp:1895 +#: src/wx-console/wxbrestorepanel.cpp:1924 +msgid "Enter restore mode" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:219 +msgid "Cancel restore" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:245 +#: src/wx-console/wxbrestorepanel.cpp:299 +msgid "Add" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:247 +#: src/wx-console/wxbrestorepanel.cpp:301 +msgid "Remove" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:249 +#: src/wx-console/wxbrestorepanel.cpp:303 +msgid "Refresh" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:268 +msgid "M" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:272 +msgid "Filename" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:284 +msgid "Perm." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:317 +#: src/wx-console/wxbrestorepanel.cpp:330 +#: src/wx-console/wxbrestorepanel.cpp:482 +#: src/wx-console/wxbrestorepanel.cpp:492 +#: src/wx-console/wxbrestorepanel.cpp:495 +#: src/wx-console/wxbrestorepanel.cpp:1760 +#: src/wx-console/wxbrestorepanel.cpp:1846 +msgid "Job Name" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:319 +#: src/wx-console/wxbrestorepanel.cpp:335 +#: src/wx-console/wxbrestorepanel.cpp:434 +#: src/wx-console/wxbrestorepanel.cpp:435 +#: src/wx-console/wxbrestorepanel.cpp:445 +#: src/wx-console/wxbrestorepanel.cpp:446 +#: src/wx-console/wxbrestorepanel.cpp:1118 +#: src/wx-console/wxbrestorepanel.cpp:1191 +#: src/wx-console/wxbrestorepanel.cpp:1798 +#: src/wx-console/wxbrestorepanel.cpp:1800 +#: src/wx-console/wxbrestorepanel.cpp:1861 +msgid "Fileset" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:322 +#: src/wx-console/wxbrestorepanel.cpp:1185 +#: src/wx-console/wxbrestorepanel.cpp:1201 +#: src/wx-console/wxbrestorepanel.cpp:1203 +#: src/wx-console/wxbrestorepanel.cpp:1211 +#: src/wx-console/wxbrestorepanel.cpp:1213 +#: src/wx-console/wxbrestorepanel.cpp:1232 +#: src/wx-console/wxbrestorepanel.cpp:1239 +#: src/wx-console/wxbrestorepanel.cpp:1788 +#: src/wx-console/wxbrestorepanel.cpp:1799 +#: src/wx-console/wxbrestorepanel.cpp:1919 +msgid "Before" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:324 +msgid "Please configure parameters concerning files to restore :" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:333 +#: src/wx-console/wxbrestorepanel.cpp:1854 +msgid "always" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:333 +msgid "if newer" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:333 +msgid "if older" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:333 +#: src/wx-console/wxbrestorepanel.cpp:1857 +msgid "never" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:341 +msgid "Please configure parameters concerning files restoration :" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:406 +msgid "Getting parameters list." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:414 +msgid "Error : no clients returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:438 +msgid "Error : no filesets returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:462 +msgid "Error : no storage returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:485 +#: src/wx-console/wxbrestorepanel.cpp:509 +msgid "Error : no jobs returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:495 +msgid "RestoreFiles" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:530 +msgid "Please configure your restore parameters." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:541 +msgid "Building restore tree..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:584 +msgid "Error while starting restore: " +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:674 +msgid "" +"Right click on a file or on a directory, or double-click on its mark to add " +"it to the restore list." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:711 +#: src/wx-console/wxbrestorepanel.cpp:733 +msgid "wx-console: unexpected restore question." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:744 +msgid " files selected to be restored." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:749 +msgid " file selected to be restored." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:756 +#, c-format +msgid "Please configure your restore (%ld files selected to be restored)..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:766 +msgid "Restore failed : no file selected.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:767 +msgid "Restore failed : no file selected." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:777 +msgid "Restoring, please wait..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:790 +msgid "Job started. JobId=" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:792 +msgid "Restore started, jobid=" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:796 +msgid "Job failed." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:797 +msgid "Restore failed, please look at messages.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:798 +msgid "Restore failed, please look at messages in console." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:804 +#: src/wx-console/wxbrestorepanel.cpp:805 +msgid "Failed to retrieve jobid.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:831 +msgid "" +"Restore is scheduled in more than two minutes, wx-console will not wait for " +"its completion.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:832 +msgid "" +"Restore is scheduled in more than two minutes, wx-console will not wait for " +"its completion." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:858 +msgid "Restore job created, but not yet running." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:863 +#, c-format +msgid "Restore job running, please wait (%ld of %ld files restored)..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:867 +msgid "Restore job terminated successfully." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:868 +msgid "Restore job terminated successfully.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:873 +msgid "Restore job terminated in error, see messages in console." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:874 +msgid "Restore job terminated in error, see messages.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:879 +msgid "Restore job reported a non-fatal error." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:883 +msgid "Restore job reported a fatal error." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:888 +msgid "Restore job cancelled by user." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:889 +msgid "Restore job cancelled by user.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:894 +msgid "Restore job is waiting on File daemon." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:898 +msgid "Restore job is waiting for new media." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:902 +msgid "Restore job is waiting for storage resource." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:906 +msgid "Restore job is waiting for job resource." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:910 +msgid "Restore job is waiting for Client resource." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:914 +msgid "Restore job is waiting for maximum jobs." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:918 +msgid "Restore job is waiting for start time." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:922 +msgid "Restore job is waiting for higher priority jobs to finish." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:971 +msgid "" +"The restore job has not been started within one minute, wx-console will not " +"wait for its completion anymore.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:972 +msgid "" +"The restore job has not been started within one minute, wx-console will not " +"wait for its completion anymore." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:982 +msgid "Restore done successfully.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:983 +msgid "Restore done successfully." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1054 +msgid "Applying restore configuration changes..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1105 +msgid "Failed to find the selected client." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1120 +msgid "Failed to find the selected fileset." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1135 +msgid "Failed to find the selected storage." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1152 +#: src/wx-console/wxbrestorepanel.cpp:1835 +msgid "Run Restore job" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1168 +msgid "Restore configuration changes were applied." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1177 +msgid "Restore cancelled.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1178 +msgid "Restore cancelled." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1200 +msgid "No results to list." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1202 +msgid "No backup found for this client." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1209 +msgid "ERROR" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1210 +msgid "Query failed" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1212 +msgid "Cannot get previous backups list, see console." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1845 +msgid "JobName:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1849 +msgid "Where:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1852 +msgid "Replace:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1855 +msgid "ifnewer" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1856 +msgid "ifolder" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1860 +msgid "FileSet:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1976 +msgid "Restoring..." +msgstr "" diff --git a/po/fi.po b/po/fi.po new file mode 100644 index 00000000..62887e93 --- /dev/null +++ b/po/fi.po @@ -0,0 +1,15929 @@ +# Finnish translations for branch package. +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +msgid "" +msgstr "" +"Project-Id-Version: branch 2.0\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2007-08-10 23:23+0200\n" +"PO-Revision-Date: 2007-02-03 19:16+0100\n" +"Last-Translator: Kern Sibbald \n" +"Language-Team: Finnish \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-1\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: src/cats/bdb.c:161 +msgid "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +msgstr "" + +#: src/cats/bdb.c:162 +msgid "WARNING!!!! The Internal Database is NOT OPERATIONAL!\n" +msgstr "" + +#: src/cats/bdb.c:163 +msgid "You should use SQLite, PostgreSQL, or MySQL\n" +msgstr "" + +#: src/cats/bdb.c:185 src/cats/mysql.c:152 src/cats/postgresql.c:161 +#: src/cats/sqlite.c:160 +#, c-format +msgid "Unable to initialize DB lock. ERR=%s\n" +msgstr "" + +#: src/cats/bdb.c:195 +#, c-format +msgid "Unable to open Catalog DB control file %s: ERR=%s\n" +msgstr "" + +#: src/cats/bdb.c:250 +#, c-format +msgid "Error reading catalog DB control file. ERR=%s\n" +msgstr "" + +#: src/cats/bdb.c:253 +#, c-format +msgid "" +"Error, catalog DB control file wrong version. Wanted %d, got %d\n" +"Please reinitialize the working directory.\n" +msgstr "" + +#: src/cats/bdb_update.c:88 src/cats/bdb_update.c:119 +#, c-format +msgid "Error updating DB Job file. ERR=%s\n" +msgstr "" + +#: src/cats/bdb_update.c:158 src/cats/bdb_update.c:194 +#, c-format +msgid "Error updating DB Media file. ERR=%s\n" +msgstr "" + +#: src/cats/mysql.c:82 +msgid "A user name for MySQL must be supplied.\n" +msgstr "" + +#: src/cats/mysql.c:190 +#, c-format +msgid "" +"Unable to connect to MySQL server.\n" +"Database=%s User=%s\n" +"MySQL connect failed either server not running or your authorization is " +"incorrect.\n" +msgstr "" + +#: src/cats/mysql.c:367 src/cats/postgresql.c:304 src/cats/sqlite.c:351 +#, c-format +msgid "Query failed: %s: ERR=%s\n" +msgstr "" + +#: src/cats/postgresql.c:85 +msgid "A user name for PostgreSQL must be supplied.\n" +msgstr "" + +#: src/cats/postgresql.c:148 +msgid "" +"PostgreSQL configuration problem. PostgreSQL library is not thread safe. " +"Connot continue.\n" +msgstr "" + +#: src/cats/postgresql.c:198 +#, c-format +msgid "" +"Unable to connect to PostgreSQL server.\n" +"Database=%s User=%s\n" +"It is probably not running or your password is incorrect.\n" +msgstr "" + +#: src/cats/postgresql.c:590 +#, c-format +msgid "error fetching currval: %s\n" +msgstr "" + +#: src/cats/postgresql.c:682 src/cats/postgresql.c:729 +#, c-format +msgid "error ending batch mode: %s\n" +msgstr "" + +#: src/cats/sql.c:139 +#, c-format +msgid "" +"query %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:161 +#, c-format +msgid "" +"insert %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:175 +#, c-format +msgid "Insertion problem: affected_rows=%s\n" +msgstr "" + +#: src/cats/sql.c:195 +#, c-format +msgid "" +"update %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:205 +#, c-format +msgid "Update failed: affected_rows=%s for %s\n" +msgstr "" + +#: src/cats/sql.c:226 +#, c-format +msgid "" +"delete %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:252 src/cats/sql.c:259 src/cats/sql_create.c:1018 +#: src/cats/sql_get.c:185 src/cats/sql_get.c:236 src/cats/sql_get.c:604 +#: src/cats/sql_get.c:680 src/cats/sql_get.c:951 +#, c-format +msgid "error fetching row: %s\n" +msgstr "" + +#: src/cats/sql.c:369 src/dird/catreq.c:404 src/dird/catreq.c:478 +#: src/dird/fd_cmds.c:638 src/dird/fd_cmds.c:696 +#, c-format +msgid "Attribute create error. %s" +msgstr "" + +#: src/cats/sql.c:450 +#, c-format +msgid "Path length is zero. File=%s\n" +msgstr "" + +#: src/cats/sql.c:494 +msgid "No results to list.\n" +msgstr "" + +#: src/cats/sql_create.c:95 +#, c-format +msgid "Create DB Job record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:146 +#, c-format +msgid "Create JobMedia record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:155 +#, c-format +msgid "Update Media record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:185 +#, c-format +msgid "pool record %s already exists\n" +msgstr "" + +#: src/cats/sql_create.c:212 +#, c-format +msgid "Create db Pool record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:243 +#, c-format +msgid "Device record %s already exists\n" +msgstr "" + +#: src/cats/sql_create.c:259 +#, c-format +msgid "Create db Device record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:292 +#, c-format +msgid "More than one Storage record!: %d\n" +msgstr "" + +#: src/cats/sql_create.c:297 +#, c-format +msgid "error fetching Storage row: %s\n" +msgstr "" + +#: src/cats/sql_create.c:317 +#, c-format +msgid "Create DB Storage record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:349 +#, c-format +msgid "mediatype record %s already exists\n" +msgstr "" + +#: src/cats/sql_create.c:365 +#, c-format +msgid "Create db mediatype record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:400 +#, c-format +msgid "Volume \"%s\" already exists.\n" +msgstr "" + +#: src/cats/sql_create.c:445 +#, c-format +msgid "Create DB Media record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:493 +#, c-format +msgid "More than one Client!: %d\n" +msgstr "" + +#: src/cats/sql_create.c:498 +#, c-format +msgid "error fetching Client row: %s\n" +msgstr "" + +#: src/cats/sql_create.c:525 +#, c-format +msgid "Create DB Client record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:568 +#, c-format +msgid "Create DB Counters record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:601 +#, c-format +msgid "More than one FileSet!: %d\n" +msgstr "" + +#: src/cats/sql_create.c:606 +#, c-format +msgid "error fetching FileSet row: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:636 +#, c-format +msgid "Create DB FileSet record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:850 src/dird/job.c:131 src/dird/ua_output.c:628 +#, c-format +msgid "Could not open database \"%s\".\n" +msgstr "" + +#: src/cats/sql_create.c:873 src/cats/sql_create.c:914 +#, c-format +msgid "Attempt to put non-attributes into catalog. Stream=%d\n" +msgstr "" + +#: src/cats/sql_create.c:978 +#, c-format +msgid "Create db File record %s failed. ERR=%s" +msgstr "" + +#: src/cats/sql_create.c:1011 src/cats/sql_get.c:229 +#, c-format +msgid "More than one Path!: %s for path: %s\n" +msgstr "" + +#: src/cats/sql_create.c:1042 +#, c-format +msgid "Create db Path record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:1075 +#, c-format +msgid "More than one Filename! %s for file: %s\n" +msgstr "" + +#: src/cats/sql_create.c:1081 +#, c-format +msgid "Error fetching row for file=%s: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:1097 +#, c-format +msgid "Create db Filename record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_delete.c:80 +#, c-format +msgid "No pool record %s exists\n" +msgstr "" + +#: src/cats/sql_delete.c:85 +#, c-format +msgid "Expecting one pool record, got %d\n" +msgstr "" + +#: src/cats/sql_delete.c:91 +#, c-format +msgid "Error fetching row %s\n" +msgstr "" + +#: src/cats/sql_find.c:98 src/cats/sql_find.c:127 +#, c-format +msgid "" +"Query error for start time request: ERR=%s\n" +"CMD=%s\n" +msgstr "" + +#: src/cats/sql_find.c:104 +msgid "No prior Full backup Job record found.\n" +msgstr "" + +#: src/cats/sql_find.c:116 +#, c-format +msgid "Unknown level=%d\n" +msgstr "" + +#: src/cats/sql_find.c:133 +#, c-format +msgid "" +"No Job record found: ERR=%s\n" +"CMD=%s\n" +msgstr "" + +#: src/cats/sql_find.c:232 +#, c-format +msgid "Unknown Job level=%d\n" +msgstr "" + +#: src/cats/sql_find.c:242 +#, c-format +msgid "No Job found for: %s.\n" +msgstr "" + +#: src/cats/sql_find.c:253 +#, c-format +msgid "No Job found for: %s\n" +msgstr "" + +#: src/cats/sql_find.c:332 +#, c-format +msgid "Request for Volume item %d greater than max %d or less than 1\n" +msgstr "" + +#: src/cats/sql_find.c:347 +#, c-format +msgid "No Volume record found for item %d.\n" +msgstr "" + +#: src/cats/sql_get.c:135 +#, c-format +msgid "get_file_record want 1 got rows=%d\n" +msgstr "" + +#: src/cats/sql_get.c:140 +#, c-format +msgid "Error fetching row: %s\n" +msgstr "" + +#: src/cats/sql_get.c:148 +#, c-format +msgid "File record for PathId=%s FilenameId=%s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:154 +msgid "File record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:179 +#, c-format +msgid "More than one Filename!: %s for file: %s\n" +msgstr "" + +#: src/cats/sql_get.c:189 +#, c-format +msgid "Get DB Filename record %s found bad record: %d\n" +msgstr "" + +#: src/cats/sql_get.c:195 +#, c-format +msgid "Filename record: %s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:199 +#, c-format +msgid "Filename record: %s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:240 +#, c-format +msgid "Get DB path record %s found bad record: %s\n" +msgstr "" + +#: src/cats/sql_get.c:253 +#, c-format +msgid "Path record: %s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:257 +#, c-format +msgid "Path record: %s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:294 +#, c-format +msgid "No Job found for JobId %s\n" +msgstr "" + +#: src/cats/sql_get.c:363 src/cats/sql_get.c:419 +#, c-format +msgid "No volumes found for JobId=%d\n" +msgstr "" + +#: src/cats/sql_get.c:369 src/cats/sql_get.c:430 +#, c-format +msgid "Error fetching row %d: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:383 +#, c-format +msgid "No Volume for JobId %d found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:520 +#, c-format +msgid "Pool id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:557 +#, c-format +msgid "Client id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:599 +#, c-format +msgid "More than one Pool!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:641 +msgid "Pool record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:675 +#, c-format +msgid "More than one Client!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:692 src/cats/sql_get.c:696 +msgid "Client record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:721 +#, c-format +msgid "More than one Counter!: %d\n" +msgstr "" + +#: src/cats/sql_get.c:726 +#, c-format +msgid "error fetching Counter row: %s\n" +msgstr "" + +#: src/cats/sql_get.c:746 +#, c-format +msgid "Counter record: %s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:782 +#, c-format +msgid "Error got %s FileSets but expected only one!\n" +msgstr "" + +#: src/cats/sql_get.c:787 +#, c-format +msgid "FileSet record \"%s\" not found.\n" +msgstr "" + +#: src/cats/sql_get.c:797 +msgid "FileSet record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:855 +#, c-format +msgid "Media id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:893 +#, c-format +msgid "query dbids failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:946 +#, c-format +msgid "More than one Volume!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:1001 +#, c-format +msgid "Media record MediaId=%s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:1004 +#, c-format +msgid "Media record for Volume \"%s\" not found.\n" +msgstr "" + +#: src/cats/sql_get.c:1011 +#, c-format +msgid "Media record for MediaId=%u not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:1014 +#, c-format +msgid "Media record for Vol=%s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_list.c:62 +#, c-format +msgid "Query failed: %s\n" +msgstr "" + +#: src/cats/sqlite.c:174 +#, c-format +msgid "Database %s does not exist, please create it.\n" +msgstr "" + +#: src/cats/sqlite.c:204 +#, c-format +msgid "Unable to open Database=%s. ERR=%s\n" +msgstr "" + +#: src/cats/sqlite.c:205 src/lib/bnet_server.c:391 +msgid "unknown" +msgstr "" + +#: src/dird/admin.c:63 +#, c-format +msgid "Start Admin JobId %d, Job=%s\n" +msgstr "" + +#: src/dird/admin.c:89 src/dird/backup.c:355 src/dird/migrate.c:1077 +#, c-format +msgid "Error getting Job record for Job report: ERR=%s" +msgstr "" + +#: src/dird/admin.c:97 +msgid "Admin OK" +msgstr "" + +#: src/dird/admin.c:101 +msgid "*** Admin Error ***" +msgstr "" + +#: src/dird/admin.c:105 +msgid "Admin Canceled" +msgstr "" + +#: src/dird/admin.c:109 src/dird/backup.c:405 src/dird/restore.c:279 +#, c-format +msgid "Inappropriate term code: %c\n" +msgstr "" + +#: src/dird/admin.c:115 +msgid "Bacula " +msgstr "" + +#: src/dird/admin.c:115 src/console/console.c:114 +#, c-format +msgid " (" +msgstr "" + +#: src/dird/admin.c:115 +#, c-format +msgid "" +"): %s\n" +" JobId: %d\n" +" Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/authenticate.c:80 src/dird/authenticate.c:81 +#: src/tray-monitor/authenticate.c:132 +#, c-format +msgid "Error sending Hello to Storage daemon. ERR=%s\n" +msgstr "" + +#: src/dird/authenticate.c:106 +msgid "Director and Storage daemon passwords or names not the same.\n" +msgstr "" + +#: src/dird/authenticate.c:108 +#, c-format +msgid "" +"Director unable to authenticate with Storage daemon at \"%s:%d\". Possible " +"causes:\n" +"Passwords or names not the same or\n" +"Maximum Concurrent Jobs exceeded on the SD or\n" +"SD networking messed up (restart daemon).\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/dird/authenticate.c:120 src/console/authenticate.c:114 +#: src/filed/authenticate.c:251 src/stored/authenticate.c:131 +#: src/stored/authenticate.c:232 src/wx-console/authenticate.c:127 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" + +#: src/dird/authenticate.c:127 src/console/authenticate.c:121 +#: src/filed/authenticate.c:147 src/filed/authenticate.c:259 +#: src/stored/authenticate.c:139 src/stored/authenticate.c:240 +#: src/wx-console/authenticate.c:133 +msgid "Authorization problem: Remote server requires TLS.\n" +msgstr "" + +#: src/dird/authenticate.c:136 +#, c-format +msgid "TLS negotiation failed with SD at \"%s:%d\"\n" +msgstr "" + +#: src/dird/authenticate.c:145 +#, c-format +msgid "bdird set configuration file to file\n" +" -dnn set debug level to nn\n" +" -f run in foreground (for debugging)\n" +" -g groupid\n" +" -r run now\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -u userid\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/dird/dird.c:224 src/console/console.c:426 src/filed/filed.c:182 +#: src/gnome2-console/console.c:286 src/stored/stored.c:210 +msgid "Cryptography library initialization failed.\n" +msgstr "" + +#: src/dird/dird.c:228 src/dird/dird.c:234 src/dird/dird.c:474 +#: src/dird/dird.c:477 src/console/console.c:430 src/filed/filed.c:187 +#: src/gnome2-console/console.c:290 src/stored/stored.c:214 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "" + +#: src/dird/dird.c:460 +msgid "Too many open reload requests. Request ignored.\n" +msgstr "" + +#: src/dird/dird.c:475 +msgid "Out of reload table entries. Giving up.\n" +msgstr "" + +#: src/dird/dird.c:478 +msgid "Resetting previous configuration.\n" +msgstr "" + +#: src/dird/dird.c:541 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't know who I am :-(\n" +msgstr "" + +#: src/dird/dird.c:549 src/filed/filed.c:281 +#, c-format +msgid "No Messages resource defined in %s\n" +msgstr "" + +#: src/dird/dird.c:554 +#, c-format +msgid "Only one Director resource permitted in %s\n" +msgstr "" + +#: src/dird/dird.c:563 src/dird/dird.c:764 src/dird/dird.c:816 +#: src/dird/dird.c:920 src/console/console.c:657 src/console/console.c:686 +#: src/filed/filed.c:288 src/filed/filed.c:446 +#: src/gnome2-console/console.c:153 src/gnome2-console/console.c:182 +#: src/stored/stored.c:332 src/wx-console/console_thread.cpp:114 +#: src/wx-console/console_thread.cpp:140 +msgid "TLS required but not configured in Bacula.\n" +msgstr "" + +#: src/dird/dird.c:569 src/filed/filed.c:455 src/stored/stored.c:384 +#, c-format +msgid "\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:575 src/filed/filed.c:461 src/stored/stored.c:390 +#, c-format +msgid "\"TLS Key\" file not defined for Director \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:581 src/filed/filed.c:467 src/stored/stored.c:396 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" + +#: src/dird/dird.c:600 src/filed/filed.c:486 src/stored/stored.c:415 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:608 +#, c-format +msgid "No Job records defined in %s\n" +msgstr "" + +#: src/dird/dird.c:666 src/dird/dird.c:679 +#, c-format +msgid "Hey something is wrong. p=0x%lu\n" +msgstr "" + +#: src/dird/dird.c:738 +#, c-format +msgid "\"%s\" directive in Job \"%s\" resource is required, but not found.\n" +msgstr "" + +#: src/dird/dird.c:745 +msgid "Too many items in Job resource\n" +msgstr "" + +#: src/dird/dird.c:749 +#, c-format +msgid "No storage specified in Job \"%s\" nor in Pool.\n" +msgstr "" + +#: src/dird/dird.c:771 +#, c-format +msgid "\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:777 +#, c-format +msgid "\"TLS Key\" file not defined for Console \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:783 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" + +#: src/dird/dird.c:800 src/dird/dird.c:840 src/filed/filed.c:312 +#, c-format +msgid "Failed to initialize TLS context for File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:823 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:872 src/dird/dird.c:874 +#, c-format +msgid "Could not open Catalog \"%s\", database \"%s\".\n" +msgstr "" + +#: src/dird/dird.c:877 +#, c-format +msgid "%s" +msgstr "" + +#: src/dird/dird.c:926 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Storage \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:942 src/stored/stored.c:370 +#, c-format +msgid "Failed to initialize TLS context for Storage \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird_conf.c:508 src/tray-monitor/tray_conf.c:168 +#, c-format +msgid "No %s resource defined\n" +msgstr "" + +#: src/dird/dird_conf.c:517 +#, c-format +msgid "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:522 +#, c-format +msgid " query_file=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:525 src/dird/dird_conf.c:545 src/dird/dird_conf.c:559 +#: src/dird/dird_conf.c:607 src/dird/dird_conf.c:611 src/dird/dird_conf.c:615 +#: src/dird/dird_conf.c:633 src/dird/dird_conf.c:650 src/dird/dird_conf.c:654 +#: src/dird/dird_conf.c:658 src/dird/dird_conf.c:662 src/dird/dird_conf.c:666 +#: src/dird/dird_conf.c:679 src/dird/dird_conf.c:880 +msgid " --> " +msgstr "" + +#: src/dird/dird_conf.c:530 +#, c-format +msgid "Console: name=%s SSL=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:535 +#, c-format +msgid "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:540 +#, c-format +msgid "Counter: name=%s min=%d max=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:551 +#, c-format +msgid "Client: name=%s address=%s FDport=%d MaxJobs=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:554 +#, c-format +msgid " JobRetention=%s FileRetention=%s AutoPrune=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:566 +#, c-format +msgid "" +"Device: name=%s ok=%d num_writers=%d max_writers=%d\n" +" reserved=%d open=%d append=%d read=%d labeled=%d offline=%d autochgr=%" +"d\n" +" poolid=%s volname=%s MediaType=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:576 +#, c-format +msgid "" +"Storage: name=%s address=%s SDport=%d MaxJobs=%u\n" +" DeviceName=%s MediaType=%s StorageId=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:585 +#, c-format +msgid "" +"Catalog: name=%s address=%s DBport=%d db_name=%s\n" +" db_user=%s MutliDBConn=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:593 +#, c-format +msgid "%s: name=%s JobType=%d level=%s Priority=%d Enabled=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:594 src/dird/ua_cmds.c:478 src/dird/ua_prune.c:365 +#: src/dird/ua_run.c:266 src/dird/ua_select.c:263 src/dird/ua_select.c:286 +msgid "Job" +msgstr "" + +#: src/dird/dird_conf.c:594 +msgid "JobDefs" +msgstr "" + +#: src/dird/dird_conf.c:598 +#, c-format +msgid "" +" MaxJobs=%u Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%" +"d\n" +msgstr "" + +#: src/dird/dird_conf.c:604 +#, c-format +msgid " SelectionType=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:619 +#, c-format +msgid " --> Where=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:622 +#, c-format +msgid " --> RegexWhere=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:625 +#, c-format +msgid " --> Bootstrap=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:628 +#, c-format +msgid " --> WriteBootstrap=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:640 src/lib/runscript.c:261 +msgid " --> RunScript\n" +msgstr "" + +#: src/dird/dird_conf.c:641 src/lib/runscript.c:262 +#, c-format +msgid " --> Command=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:642 src/lib/runscript.c:263 +#, c-format +msgid " --> Target=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:643 src/lib/runscript.c:264 +#, c-format +msgid " --> RunOnSuccess=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:644 src/lib/runscript.c:265 +#, c-format +msgid " --> RunOnFailure=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:645 src/lib/runscript.c:266 +#, c-format +msgid " --> FailJobOnError=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:646 src/lib/runscript.c:267 +#, c-format +msgid " --> RunWhen=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:672 +#, c-format +msgid " --> Run=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:676 +#, c-format +msgid " --> SelectionPattern=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:686 +#, c-format +msgid "FileSet: name=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:763 src/dird/dird_conf.c:842 +#, c-format +msgid "Schedule: name=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:768 +#, c-format +msgid " --> Run Level=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:769 +msgid " hour=" +msgstr "" + +#: src/dird/dird_conf.c:778 +msgid " mday=" +msgstr "" + +#: src/dird/dird_conf.c:787 +msgid " month=" +msgstr "" + +#: src/dird/dird_conf.c:796 +msgid " wday=" +msgstr "" + +#: src/dird/dird_conf.c:805 +msgid " wom=" +msgstr "" + +#: src/dird/dird_conf.c:814 +msgid " woy=" +msgstr "" + +#: src/dird/dird_conf.c:823 +#, c-format +msgid " mins=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:825 src/dird/dird_conf.c:829 src/dird/dird_conf.c:833 +msgid " --> " +msgstr "" + +#: src/dird/dird_conf.c:846 +#, c-format +msgid "Pool: name=%s PoolType=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:848 +#, c-format +msgid " use_cat=%d use_once=%d cat_files=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:851 +#, c-format +msgid " max_vols=%d auto_prune=%d VolRetention=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:854 +#, c-format +msgid " VolUse=%s recycle=%d LabelFormat=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:858 +#, c-format +msgid " CleaningPrefix=%s LabelType=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:860 +#, c-format +msgid " RecyleOldest=%d PurgeOldest=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:863 +#, c-format +msgid " MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:867 +#, c-format +msgid " MigTime=%s MigHiBytes=%s MigLoBytes=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:872 +#, c-format +msgid " NextPool=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:875 +#, c-format +msgid " RecyclePool=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:886 +#, c-format +msgid "Messages: name=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:888 +#, c-format +msgid " mailcmd=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:890 +#, c-format +msgid " opcmd=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:893 src/tray-monitor/tray_conf.c:199 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "" + +#: src/dird/dird_conf.c:1199 src/tray-monitor/tray_conf.c:257 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "" + +#: src/dird/dird_conf.c:1231 src/dird/dird_conf.c:1246 +#: src/dird/dird_conf.c:1842 src/console/console_conf.c:257 +#: src/filed/filed_conf.c:347 src/gnome2-console/console_conf.c:258 +#: src/tray-monitor/tray_conf.c:288 src/wx-console/console_conf.c:265 +#, c-format +msgid "%s item is required in %s resource, but not found.\n" +msgstr "" + +#: src/dird/dird_conf.c:1237 src/lib/parse_conf.c:234 +#: src/tray-monitor/tray_conf.c:294 +#, c-format +msgid "Too many items in %s resource\n" +msgstr "" + +#: src/dird/dird_conf.c:1277 +#, c-format +msgid "Cannot find Pool resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1286 +#, c-format +msgid "Cannot find Console resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1292 src/filed/filed_conf.c:367 +#: src/stored/stored_conf.c:584 +#, c-format +msgid "Cannot find Director resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1299 src/stored/stored_conf.c:590 +#, c-format +msgid "Cannot find Storage resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1308 +#, c-format +msgid "Cannot find Job resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1357 +#, c-format +msgid "Cannot find Counter resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1365 src/filed/filed_conf.c:373 +#, c-format +msgid "Cannot find Client resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1378 +#, c-format +msgid "Cannot find Schedule resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1383 src/dird/dird_conf.c:1443 +#: src/tray-monitor/tray_conf.c:314 src/tray-monitor/tray_conf.c:352 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "" + +#: src/dird/dird_conf.c:1458 +#, c-format +msgid "Name item is required in %s resource, but not found.\n" +msgstr "" + +#: src/dird/dird_conf.c:1466 src/console/console_conf.c:320 +#: src/filed/filed_conf.c:432 src/gnome2-console/console_conf.c:327 +#: src/tray-monitor/tray_conf.c:372 src/wx-console/console_conf.c:328 +#, c-format +msgid "Attempt to define second %s resource named \"%s\" is not permitted.\n" +msgstr "" + +#: src/dird/dird_conf.c:1471 +#, c-format +msgid "Inserting %s res: %s index=%d pass=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:1543 +#, c-format +msgid "Expected a Migration Job Type keyword, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1569 +#, c-format +msgid "Expected a Job Type keyword, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1593 +#, c-format +msgid "Expected a Job Level keyword, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1613 +#, c-format +msgid "Expected a Restore replacement option, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1661 src/dird/dird_conf.c:1771 +#: src/lib/parse_conf.c:723 src/lib/parse_conf.c:738 +#, c-format +msgid "Expect %s, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1683 src/lib/parse_conf.c:482 +#, c-format +msgid "Could not find config Resource %s referenced on line %d : %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1811 src/dird/inc_conf.c:645 +#, c-format +msgid "Expecting open brace. Got %s" +msgstr "" + +#: src/dird/dird_conf.c:1819 src/dird/inc_conf.c:360 src/dird/inc_conf.c:660 +#, c-format +msgid "Expecting keyword, got: %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1825 src/dird/inc_conf.c:368 src/dird/inc_conf.c:666 +#: src/lib/parse_conf.c:874 +#, c-format +msgid "expected an equals, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1836 src/dird/inc_conf.c:378 src/dird/inc_conf.c:675 +#, c-format +msgid "Keyword %s not permitted in this resource" +msgstr "" + +#: src/dird/expand.c:255 +#, c-format +msgid "Count not update counter %s: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:427 +#, c-format +msgid "Cannot create var context: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:432 +#, c-format +msgid "Cannot set var callback: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:438 +#, c-format +msgid "Cannot set var operate: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:444 src/dird/expand.c:459 +#, c-format +msgid "Cannot unescape string: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:452 +#, c-format +msgid "Cannot expand expression \"%s\": ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:470 +#, c-format +msgid "Cannot destroy var context: ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:94 src/tray-monitor/tray-monitor.c:914 +msgid "File daemon" +msgstr "" + +#: src/dird/fd_cmds.c:125 +#, c-format +msgid "File daemon \"%s\" rejected Job command: %s\n" +msgstr "" + +#: src/dird/fd_cmds.c:138 +#, c-format +msgid "Error updating Client record. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:143 +#, c-format +msgid "FD gave bad response to JobId command: %s\n" +msgstr "" + +#: src/dird/fd_cmds.c:165 src/dird/fd_cmds.c:201 +msgid ", since=" +msgstr "" + +#: src/dird/fd_cmds.c:185 +msgid "No prior or suitable Full backup found in catalog. Doing FULL backup.\n" +msgstr "" + +#: src/dird/fd_cmds.c:186 src/dird/fd_cmds.c:194 +#, c-format +msgid " (upgraded from %s)" +msgstr "" + +#: src/dird/fd_cmds.c:192 +#, c-format +msgid "Prior failed job found in catalog. Upgrading to %s.\n" +msgstr "" + +#: src/dird/fd_cmds.c:253 +#, c-format +msgid "Unimplemented backup level %d %c\n" +msgstr "" + +#: src/dird/fd_cmds.c:356 src/filed/job.c:640 +#, c-format +msgid "Cannot run program: %s. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:367 src/dird/fd_cmds.c:392 src/dird/fd_cmds.c:406 +msgid ">filed: write error on socket\n" +msgstr "" + +#: src/dird/fd_cmds.c:373 +#, c-format +msgid "Error running program: %s. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:382 +#, c-format +msgid "Cannot open included file: %s. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:477 src/filed/job.c:1831 +#, c-format +msgid "Could not open bootstrap file %s: ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:583 +#, c-format +msgid "Client \"%s\" RunScript failed.\n" +msgstr "" + +#: src/dird/fd_cmds.c:619 +#, c-format +msgid "" +" | ] -- cancel a job" +msgstr "" + +#: src/dird/ua_cmds.c:108 +msgid "create DB Pool from resource" +msgstr "" + +#: src/dird/ua_cmds.c:109 +msgid "delete [pool= | media volume=]" +msgstr "" + +#: src/dird/ua_cmds.c:110 +msgid "disable -- disable a job" +msgstr "" + +#: src/dird/ua_cmds.c:111 +msgid "enable -- enable a job" +msgstr "" + +#: src/dird/ua_cmds.c:112 +msgid "performs FileSet estimate, listing gives full listing" +msgstr "" + +#: src/dird/ua_cmds.c:113 src/console/console.c:171 +msgid "exit = quit" +msgstr "" + +#: src/dird/ua_cmds.c:114 +msgid "gui [on|off] -- non-interactive gui mode" +msgstr "" + +#: src/dird/ua_cmds.c:115 src/stored/btape.c:2540 +msgid "print this command" +msgstr "" + +#: src/dird/ua_cmds.c:116 +msgid "" +"list [pools | jobs | jobtotals | media | files ]; " +"from catalog" +msgstr "" + +#: src/dird/ua_cmds.c:117 +msgid "label a tape" +msgstr "" + +#: src/dird/ua_cmds.c:118 +msgid "full or long list like list command" +msgstr "" + +#: src/dird/ua_cmds.c:119 +msgid "print current memory usage" +msgstr "" + +#: src/dird/ua_cmds.c:120 +msgid "messages" +msgstr "" + +#: src/dird/ua_cmds.c:121 +msgid "mount " +msgstr "" + +#: src/dird/ua_cmds.c:122 +msgid "prune expired records from catalog" +msgstr "" + +#: src/dird/ua_cmds.c:123 +msgid "purge records from catalog" +msgstr "" + +#: src/dird/ua_cmds.c:124 +msgid "python control commands" +msgstr "" + +#: src/dird/ua_cmds.c:125 src/console/console.c:164 +msgid "quit" +msgstr "" + +#: src/dird/ua_cmds.c:126 +msgid "query catalog" +msgstr "" + +#: src/dird/ua_cmds.c:127 +msgid "restore files" +msgstr "" + +#: src/dird/ua_cmds.c:128 +msgid "relabel a tape" +msgstr "" + +#: src/dird/ua_cmds.c:129 +msgid "release " +msgstr "" + +#: src/dird/ua_cmds.c:130 +msgid "reload conf file" +msgstr "" + +#: src/dird/ua_cmds.c:131 +msgid "run " +msgstr "" + +#: src/dird/ua_cmds.c:132 +msgid "status [storage | client]=" +msgstr "" + +#: src/dird/ua_cmds.c:133 +msgid "sets debug level" +msgstr "" + +#: src/dird/ua_cmds.c:134 +msgid "sets new client address -- if authorized" +msgstr "" + +#: src/dird/ua_cmds.c:135 +msgid "show (resource records) [jobs | pools | ... | all]" +msgstr "" + +#: src/dird/ua_cmds.c:136 +msgid "use SQL to query catalog" +msgstr "" + +#: src/dird/ua_cmds.c:137 src/console/console.c:167 +msgid "print current time" +msgstr "" + +#: src/dird/ua_cmds.c:138 +msgid "turn on/off trace to file" +msgstr "" + +#: src/dird/ua_cmds.c:139 +msgid "unmount " +msgstr "" + +#: src/dird/ua_cmds.c:140 +msgid "umount for old-time Unix guys" +msgstr "" + +#: src/dird/ua_cmds.c:141 +msgid "update Volume, Pool or slots" +msgstr "" + +#: src/dird/ua_cmds.c:142 +msgid "use catalog xxx" +msgstr "" + +#: src/dird/ua_cmds.c:143 +msgid "does variable expansion" +msgstr "" + +#: src/dird/ua_cmds.c:144 +msgid "print Director version" +msgstr "" + +#: src/dird/ua_cmds.c:145 +msgid "" +"wait until no jobs are running [ | | " +"]" +msgstr "" + +#: src/dird/ua_cmds.c:186 +#, c-format +msgid "%s: is an invalid command.\n" +msgstr "" + +#: src/dird/ua_cmds.c:227 +msgid "" +"You probably don't want to be using this command since it\n" +"creates database records without labeling the Volumes.\n" +"You probably want to use the \"label\" command.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:246 +#, c-format +msgid "Pool already has maximum volumes=%d\n" +msgstr "" + +#: src/dird/ua_cmds.c:248 +msgid "Enter new maximum (zero for unlimited): " +msgstr "" + +#: src/dird/ua_cmds.c:269 +#, c-format +msgid "Enter number of Volumes to create. 0=>fixed name. Max=%d: " +msgstr "" + +#: src/dird/ua_cmds.c:275 +#, c-format +msgid "The number must be between 0 and %d\n" +msgstr "" + +#: src/dird/ua_cmds.c:282 +msgid "Enter Volume name: " +msgstr "" + +#: src/dird/ua_cmds.c:286 +msgid "Enter base volume name: " +msgstr "" + +#: src/dird/ua_cmds.c:295 src/dird/ua_label.c:645 +msgid "Volume name too long.\n" +msgstr "" + +#: src/dird/ua_cmds.c:299 src/dird/ua_label.c:651 src/lib/edit.c:459 +msgid "Volume name must be at least one character long.\n" +msgstr "" + +#: src/dird/ua_cmds.c:308 +msgid "Enter the starting number: " +msgstr "" + +#: src/dird/ua_cmds.c:313 +msgid "Start number must be greater than zero.\n" +msgstr "" + +#: src/dird/ua_cmds.c:324 +msgid "Enter slot (0 for none): " +msgstr "" + +#: src/dird/ua_cmds.c:328 +msgid "InChanger? yes/no: " +msgstr "" + +#: src/dird/ua_cmds.c:356 +#, c-format +msgid "%d Volumes created in pool %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:372 src/dird/ua_cmds.c:1035 +msgid "Turn on or off? " +msgstr "" + +#: src/dird/ua_cmds.c:403 +#, c-format +msgid "JobId %s is not running. Use Job name to cancel inactive jobs.\n" +msgstr "" + +#: src/dird/ua_cmds.c:412 src/dird/ua_cmds.c:422 +#, c-format +msgid "Warning Job %s is not running. Continuing anyway ...\n" +msgstr "" + +#: src/dird/ua_cmds.c:432 src/dird/ua_cmds.c:708 src/dird/ua_cmds.c:754 +msgid "Unauthorized command from this console.\n" +msgstr "" + +#: src/dird/ua_cmds.c:457 src/filed/status.c:201 src/stored/status.c:447 +msgid "No Jobs running.\n" +msgstr "" + +#: src/dird/ua_cmds.c:459 +msgid "None of your jobs are running.\n" +msgstr "" + +#: src/dird/ua_cmds.c:464 +msgid "Select Job:\n" +msgstr "" + +#: src/dird/ua_cmds.c:473 +#, c-format +msgid "JobId=%s Job=%s" +msgstr "" + +#: src/dird/ua_cmds.c:478 +msgid "Choose Job to cancel" +msgstr "" + +#: src/dird/ua_cmds.c:483 +#, c-format +msgid "" +"Cancel: %s\n" +"\n" +"%s" +msgstr "" + +#: src/dird/ua_cmds.c:484 +msgid "Confirm cancel?" +msgstr "" + +#: src/dird/ua_cmds.c:490 +msgid "Confirm cancel (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:498 src/dird/ua_cmds.c:749 +#, c-format +msgid "Job \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_cmds.c:595 +#, c-format +msgid "" +"Can't set %s RecyclePool to %s, %s is not in database.\n" +"Try to update it with 'update pool=%s'\n" +msgstr "" + +#: src/dird/ua_cmds.c:662 +#, c-format +msgid "" +"Error: Pool %s already exists.\n" +"Use update to change it.\n" +msgstr "" + +#: src/dird/ua_cmds.c:673 +#, c-format +msgid "Pool %s created.\n" +msgstr "" + +#: src/dird/ua_cmds.c:690 +msgid "Python interpreter restarted.\n" +msgstr "" + +#: src/dird/ua_cmds.c:692 src/dird/ua_cmds.c:1275 +msgid "Nothing done.\n" +msgstr "" + +#: src/dird/ua_cmds.c:715 src/dird/ua_run.c:1224 +#, c-format +msgid "Client \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_cmds.c:724 +#, c-format +msgid "Client \"%s\" address set to %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:758 +#, c-format +msgid "Job \"%s\" %sabled\n" +msgstr "" + +#: src/dird/ua_cmds.c:785 src/dird/ua_dotcmds.c:177 src/dird/ua_status.c:311 +#, c-format +msgid "Connecting to Storage daemon %s at %s:%d\n" +msgstr "" + +#: src/dird/ua_cmds.c:791 src/dird/ua_dotcmds.c:183 src/dird/ua_status.c:322 +msgid "Connected to storage daemon\n" +msgstr "" + +#: src/dird/ua_cmds.c:811 src/dird/ua_cmds.c:1162 src/dird/ua_dotcmds.c:203 +#: src/dird/ua_status.c:349 +#, c-format +msgid "Connecting to Client %s at %s:%d\n" +msgstr "" + +#: src/dird/ua_cmds.c:814 src/dird/ua_cmds.c:1165 src/dird/ua_dotcmds.c:206 +msgid "Failed to connect to Client.\n" +msgstr "" + +#: src/dird/ua_cmds.c:930 +msgid "Enter new debug level: " +msgstr "" + +#: src/dird/ua_cmds.c:996 src/dird/ua_dotcmds.c:282 +msgid "Available daemons are: \n" +msgstr "" + +#: src/dird/ua_cmds.c:997 src/dird/ua_dotcmds.c:283 +msgid "Director" +msgstr "" + +#: src/dird/ua_cmds.c:998 src/dird/ua_dotcmds.c:284 src/dird/ua_run.c:265 +#: src/dird/ua_select.c:168 src/wx-console/wxbrestorepanel.cpp:339 +#: src/wx-console/wxbrestorepanel.cpp:355 +#: src/wx-console/wxbrestorepanel.cpp:479 +#: src/wx-console/wxbrestorepanel.cpp:480 +#: src/wx-console/wxbrestorepanel.cpp:490 +#: src/wx-console/wxbrestorepanel.cpp:491 +#: src/wx-console/wxbrestorepanel.cpp:1154 +#: src/wx-console/wxbrestorepanel.cpp:1818 +#: src/wx-console/wxbrestorepanel.cpp:1889 +msgid "Storage" +msgstr "" + +#: src/dird/ua_cmds.c:999 src/dird/ua_dotcmds.c:285 src/dird/ua_run.c:271 +#: src/dird/ua_select.c:311 src/dird/ua_select.c:420 +#: src/wx-console/wxbrestorepanel.cpp:336 +#: src/wx-console/wxbrestorepanel.cpp:354 +#: src/wx-console/wxbrestorepanel.cpp:431 +#: src/wx-console/wxbrestorepanel.cpp:432 +#: src/wx-console/wxbrestorepanel.cpp:442 +#: src/wx-console/wxbrestorepanel.cpp:443 +#: src/wx-console/wxbrestorepanel.cpp:690 +#: src/wx-console/wxbrestorepanel.cpp:1124 +#: src/wx-console/wxbrestorepanel.cpp:1211 +#: src/wx-console/wxbrestorepanel.cpp:1811 +#: src/wx-console/wxbrestorepanel.cpp:1813 +#: src/wx-console/wxbrestorepanel.cpp:1887 +#: src/wx-console/wxbrestorepanel.cpp:1943 +msgid "Client" +msgstr "" + +#: src/dird/ua_cmds.c:1000 +msgid "All" +msgstr "" + +#: src/dird/ua_cmds.c:1001 +msgid "Select daemon type to set debug level" +msgstr "" + +#: src/dird/ua_cmds.c:1091 src/dird/ua_cmds.c:1130 src/dird/ua_cmds.c:1798 +#, c-format +msgid "No authorization for Job \"%s\"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1101 +#, c-format +msgid "No authorization for FileSet \"%s\"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1113 src/dird/ua_run.c:217 +#, c-format +msgid "Level %s not valid.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1126 +msgid "No job specified.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1170 +msgid "Error sending include list.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1175 +msgid "Error sending exclude list.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1261 +msgid "" +"In general it is not a good idea to delete either a\n" +"Pool or a Volume since they may contain data.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1264 +msgid "Choose catalog item to delete" +msgstr "" + +#: src/dird/ua_cmds.c:1332 +msgid "Enter JobId to delete: " +msgstr "" + +#: src/dird/ua_cmds.c:1367 +#, c-format +msgid "Job %s and associated records deleted from the catalog.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1381 +#, c-format +msgid "" +"\n" +"This command will delete volume %s\n" +"and all Jobs saved on that volume from the Catalog\n" +msgstr "" + +#: src/dird/ua_cmds.c:1385 +#, c-format +msgid "Are you sure you want to delete Volume \"%s\"? (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:1409 +#, c-format +msgid "Are you sure you want to delete Pool \"%s\"? (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:1523 +#, c-format +msgid "Using Catalog name=%s DB=%s\n" +msgstr "" + +#: src/dird/ua_cmds.c:1583 +msgid "ERR: Can't open db\n" +msgstr "" + +#: src/dird/ua_cmds.c:1619 +msgid "ERR: Job was not found\n" +msgstr "" + +#: src/dird/ua_cmds.c:1695 src/dird/ua_tree.c:664 src/stored/btape.c:2587 +#, c-format +msgid "" +" Command Description\n" +" ======= ===========\n" +msgstr "" + +#: src/dird/ua_cmds.c:1697 +#, c-format +msgid " %-10s %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:1699 +msgid "" +"\n" +"When at a prompt, entering a period cancels the command.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1716 src/dird/ua_status.c:263 src/stored/status.c:79 +#, c-format +msgid "%s Version: %s (%s) %s %s %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:1759 src/dird/ua_cmds.c:1786 src/dird/ua_cmds.c:1808 +#, c-format +msgid "No authorization for Catalog \"%s\"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1776 +#, c-format +msgid "No authorization for Client \"%s\"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1831 src/dird/ua_select.c:221 +msgid "Could not find a Catalog resource\n" +msgstr "" + +#: src/dird/ua_cmds.c:1844 +#, c-format +msgid "Could not open catalog database \"%s\".\n" +msgstr "" + +#: src/dird/ua_cmds.c:1854 +#, c-format +msgid "Using Catalog \"%s\"\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:138 +msgid ": is an invalid command.\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:238 src/dird/ua_dotcmds.c:288 +msgid "The Director will segment fault.\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:286 +msgid "Select daemon type to make die" +msgstr "" + +#: src/dird/ua_dotcmds.c:453 +msgid "Access to specified Client or FileSet not allowed.\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:458 src/dird/ua_dotcmds.c:494 +#: src/dird/ua_restore.c:882 src/dird/ua_restore.c:911 +#: src/dird/ua_restore.c:932 +#, c-format +msgid "Query failed: %s. ERR=%s\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:489 +msgid "query keyword not found.\n" +msgstr "" + +#: src/dird/ua_input.c:95 +msgid "Enter slot" +msgstr "" + +#: src/dird/ua_input.c:99 src/dird/ua_input.c:105 +#, c-format +msgid "Expected a positive integer, got: %s\n" +msgstr "" + +#: src/dird/ua_input.c:123 src/dird/ua_run.c:446 src/dird/ua_update.c:245 +#: src/dird/ua_update.c:265 src/dird/ua_update.c:609 +#: src/stored/parse_bsr.c:779 src/tools/dbcheck.c:1202 +msgid "yes" +msgstr "" + +#: src/dird/ua_input.c:127 src/dird/ua_update.c:245 src/dird/ua_update.c:265 +#: src/dird/ua_update.c:609 src/stored/parse_bsr.c:779 +msgid "no" +msgstr "" + +#: src/dird/ua_input.c:162 +msgid "Invalid response. You must answer yes or no.\n" +msgstr "" + +#: src/dird/ua_input.c:185 +msgid "Invalid Enabled value, it must be yes, no, archived, 0, 1, or 2\n" +msgstr "" + +#: src/dird/ua_label.c:102 +msgid "Negative numbers not permitted\n" +msgstr "" + +#: src/dird/ua_label.c:108 +msgid "Range end is not integer.\n" +msgstr "" + +#: src/dird/ua_label.c:113 +msgid "Range start is not an integer.\n" +msgstr "" + +#: src/dird/ua_label.c:119 +msgid "Range end not bigger than start.\n" +msgstr "" + +#: src/dird/ua_label.c:125 +msgid "Input value is not an integer.\n" +msgstr "" + +#: src/dird/ua_label.c:131 +msgid "Values must be be greater than zero.\n" +msgstr "" + +#: src/dird/ua_label.c:135 +msgid "Slot too large.\n" +msgstr "" + +#: src/dird/ua_label.c:184 src/dird/ua_label.c:349 src/dird/ua_run.c:1199 +msgid "command line" +msgstr "" + +#: src/dird/ua_label.c:202 src/dird/ua_label.c:513 +msgid "No slots in changer to scan.\n" +msgstr "" + +#: src/dird/ua_label.c:214 src/dird/ua_label.c:524 +msgid "No Volumes found to label, or no barcodes.\n" +msgstr "" + +#: src/dird/ua_label.c:224 +#, c-format +msgid "Slot %d greater than max %d ignored.\n" +msgstr "" + +#: src/dird/ua_label.c:253 +#, c-format +msgid "No VolName for Slot=%d InChanger set to zero.\n" +msgstr "" + +#: src/dird/ua_label.c:271 +#, c-format +msgid "Catalog record for Volume \"%s\" updated to reference slot %d.\n" +msgstr "" + +#: src/dird/ua_label.c:275 +#, c-format +msgid "Catalog record for Volume \"%s\" is up to date.\n" +msgstr "" + +#: src/dird/ua_label.c:281 +#, c-format +msgid "Volume \"%s\" not found in catalog. Slot=%d InChanger set to zero.\n" +msgstr "" + +#: src/dird/ua_label.c:378 +#, c-format +msgid "" +"Volume \"%s\" has VolStatus %s. It must be Purged or Recycled before " +"relabeling.\n" +msgstr "" + +#: src/dird/ua_label.c:394 +msgid "Enter new Volume name: " +msgstr "" + +#: src/dird/ua_label.c:407 +#, c-format +msgid "Media record for new Volume \"%s\" already exists.\n" +msgstr "" + +#: src/dird/ua_label.c:425 +msgid "Enter slot (0 or Enter for none): " +msgstr "" + +#: src/dird/ua_label.c:453 +#, c-format +msgid "Delete of Volume \"%s\" failed. ERR=%s" +msgstr "" + +#: src/dird/ua_label.c:456 +#, c-format +msgid "Old volume \"%s\" deleted from catalog.\n" +msgstr "" + +#: src/dird/ua_label.c:467 +#, c-format +msgid "Requesting to mount %s ...\n" +msgstr "" + +#: src/dird/ua_label.c:489 +msgid "Do not forget to mount the drive!!!\n" +msgstr "" + +#: src/dird/ua_label.c:529 +msgid "" +"The following Volumes will be labeled:\n" +"Slot Volume\n" +"==============\n" +msgstr "" + +#: src/dird/ua_label.c:538 +msgid "Do you want to label these Volumes? (yes|no): " +msgstr "" + +#: src/dird/ua_label.c:559 +#, c-format +msgid "Media record for Slot %d Volume \"%s\" already exists.\n" +msgstr "" + +#: src/dird/ua_label.c:565 +#, c-format +msgid "Error setting InChanger: ERR=%s" +msgstr "" + +#: src/dird/ua_label.c:588 +#, c-format +msgid "Maximum pool Volumes=%d reached.\n" +msgstr "" + +#: src/dird/ua_label.c:595 +#, c-format +msgid "Catalog record for cleaning tape \"%s\" successfully created.\n" +msgstr "" + +#: src/dird/ua_label.c:602 +#, c-format +msgid "Catalog error on cleaning tape: %s" +msgstr "" + +#: src/dird/ua_label.c:638 +#, c-format +msgid "Illegal character \"%c\" in a volume name.\n" +msgstr "" + +#: src/dird/ua_label.c:685 +#, c-format +msgid "Sending relabel command from \"%s\" to \"%s\" ...\n" +msgstr "" + +#: src/dird/ua_label.c:692 +#, c-format +msgid "Sending label command for Volume \"%s\" Slot %d ...\n" +msgstr "" + +#: src/dird/ua_label.c:733 +#, c-format +msgid "Catalog record for Volume \"%s\", Slot %d successfully created.\n" +msgstr "" + +#: src/dird/ua_label.c:746 +#, c-format +msgid "Label command failed for Volume %s.\n" +msgstr "" + +#: src/dird/ua_label.c:756 +#, c-format +msgid "Connecting to Storage daemon %s at %s:%d ...\n" +msgstr "" + +#: src/dird/ua_label.c:784 +msgid "Could not open SD socket.\n" +msgstr "" + +#: src/dird/ua_label.c:856 src/dird/ua_label.c:866 +#, c-format +msgid "Invalid Slot number: %s\n" +msgstr "" + +#: src/dird/ua_label.c:875 +#, c-format +msgid "Invalid Volume name: %s\n" +msgstr "" + +#: src/dird/ua_label.c:954 +#, c-format +msgid "Device \"%s\" has %d slots.\n" +msgstr "" + +#: src/dird/ua_label.c:1003 +#, c-format +msgid "Pool \"%s\" resource not found for volume \"%s\"!\n" +msgstr "" + +#: src/dird/ua_output.c:73 src/dird/ua_output.c:97 +msgid "ON or OFF keyword missing.\n" +msgstr "" + +#: src/dird/ua_output.c:185 +msgid "Keywords for the show command are:\n" +msgstr "" + +#: src/dird/ua_output.c:191 +#, c-format +msgid "%s resource %s not found.\n" +msgstr "" + +#: src/dird/ua_output.c:194 +#, c-format +msgid "Resource %s not found\n" +msgstr "" + +#: src/dird/ua_output.c:262 +msgid "Hey! DB is NULL\n" +msgstr "" + +#: src/dird/ua_output.c:376 +#, c-format +msgid "Jobid %d used %d Volume(s): %s\n" +msgstr "" + +#: src/dird/ua_output.c:394 +msgid "No Pool specified.\n" +msgstr "" + +#: src/dird/ua_output.c:405 src/dird/ua_select.c:488 +#, c-format +msgid "Error obtaining pool ids. ERR=%s\n" +msgstr "" + +#: src/dird/ua_output.c:415 +#, c-format +msgid "Pool: %s\n" +msgstr "" + +#: src/dird/ua_output.c:431 src/dird/ua_status.c:481 +msgid "Ignoring invalid value for days. Max is 50.\n" +msgstr "" + +#: src/dird/ua_output.c:440 +#, c-format +msgid "Unknown list keyword: %s\n" +msgstr "" + +#: src/dird/ua_output.c:466 +#, c-format +msgid "%s is not a job name.\n" +msgstr "" + +#: src/dird/ua_output.c:477 +#, c-format +msgid "Could not Pool Job %s\n" +msgstr "" + +#: src/dird/ua_output.c:489 +#, c-format +msgid "Could not find next Volume for Job %s (Pool=%s, Level=%s).\n" +msgstr "" + +#: src/dird/ua_output.c:493 +#, c-format +msgid "" +"The next Volume to be used by Job \"%s\" (Pool=%s, Level=%s) will be %s\n" +msgstr "" + +#: src/dird/ua_output.c:503 +#, c-format +msgid "Could not find next Volume for Job %s.\n" +msgstr "" + +#: src/dird/ua_output.c:702 +msgid "You have no messages.\n" +msgstr "" + +#: src/dird/ua_prune.c:132 +msgid "Choose item to prune" +msgstr "" + +#: src/dird/ua_prune.c:156 +#, c-format +msgid "Cannot prune Volume \"%s\" because it is archived.\n" +msgstr "" + +#: src/dird/ua_prune.c:218 +msgid "No Files found to prune.\n" +msgstr "" + +#: src/dird/ua_prune.c:240 +#, c-format +msgid "Pruned Files from %s Jobs for client %s from catalog.\n" +msgstr "" + +#: src/dird/ua_prune.c:364 +#, c-format +msgid "Pruned %d %s for client %s from catalog.\n" +msgstr "" + +#: src/dird/ua_prune.c:365 +msgid "Jobs" +msgstr "" + +#: src/dird/ua_prune.c:367 +msgid "No Jobs found to prune.\n" +msgstr "" + +#: src/dird/ua_purge.c:90 +msgid "" +"\n" +"This command is can be DANGEROUS!!!\n" +"\n" +"It purges (deletes) all Files from a Job,\n" +"JobId, Client or Volume; or it purges (deletes)\n" +"all Jobs from a Client or Volume without regard\n" +"for retention periods. Normally you should use the\n" +"PRUNE command, which respects retention periods.\n" +msgstr "" + +#: src/dird/ua_purge.c:152 +msgid "Choose item to purge" +msgstr "" + +#: src/dird/ua_purge.c:199 +#, c-format +msgid "Begin purging files for Client \"%s\"\n" +msgstr "" + +#: src/dird/ua_purge.c:208 src/dird/ua_purge.c:258 +#, c-format +msgid "No Files found for client %s to purge from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:211 +#, c-format +msgid "Files for %d Jobs for client \"%s\" purged from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:249 +#, c-format +msgid "Begin purging jobs from Client \"%s\"\n" +msgstr "" + +#: src/dird/ua_purge.c:261 +#, c-format +msgid "%d Jobs for client %s purged from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:407 +#, c-format +msgid "" +"\n" +"Volume \"%s\" has VolStatus \"%s\" and cannot be purged.\n" +"The VolStatus must be: Append, Full, Used, or Error to be purged.\n" +msgstr "" + +#: src/dird/ua_purge.c:440 +#, c-format +msgid "%d File%s on Volume \"%s\" purged from catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:481 +#, c-format +msgid "" +"There are no more Jobs associated with Volume \"%s\". Marking it purged.\n" +msgstr "" + +#: src/dird/ua_purge.c:522 +#, c-format +msgid "Unable move recycled Volume in full Pool \"%s\" MaxVols=%d\n" +msgstr "" + +#: src/dird/ua_purge.c:535 +#, c-format +msgid "All records pruned from Volume \"%s\"; marking it \"Purged\"\n" +msgstr "" + +#: src/dird/ua_purge.c:540 +#, c-format +msgid "Cannot purge Volume with VolStatus=%s\n" +msgstr "" + +#: src/dird/ua_query.c:72 src/findlib/create_file.c:283 +#: src/findlib/create_file.c:383 +#, c-format +msgid "Could not open %s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_query.c:77 +msgid "Available queries:\n" +msgstr "" + +#: src/dird/ua_query.c:84 +msgid "Choose a query" +msgstr "" + +#: src/dird/ua_query.c:98 +msgid "Could not find query.\n" +msgstr "" + +#: src/dird/ua_query.c:116 +msgid "Too many prompts in query, max is 9.\n" +msgstr "" + +#: src/dird/ua_query.c:219 +#, c-format +msgid "Warning prompt %d missing.\n" +msgstr "" + +#: src/dird/ua_query.c:264 +msgid "" +"Entering SQL query mode.\n" +"Terminate each query with a semicolon.\n" +"Terminate query mode with a blank line.\n" +msgstr "" + +#: src/dird/ua_query.c:267 src/dird/ua_query.c:283 +msgid "Enter SQL query: " +msgstr "" + +#: src/dird/ua_query.c:285 +msgid "Add to SQL query: " +msgstr "" + +#: src/dird/ua_query.c:288 +msgid "End query mode.\n" +msgstr "" + +#: src/dird/ua_restore.c:133 +msgid "\"RegexWhere\" specification not authorized.\n" +msgstr "" + +#: src/dird/ua_restore.c:140 +msgid "\"where\" specification not authorized.\n" +msgstr "" + +#: src/dird/ua_restore.c:162 +msgid "" +"No Restore Job Resource found in bacula-dir.conf.\n" +"You must create at least one before running this command.\n" +msgstr "" + +#: src/dird/ua_restore.c:178 +msgid "Restore not done.\n" +msgstr "" + +#: src/dird/ua_restore.c:190 +msgid "Unable to construct a valid BSR. Cannot continue.\n" +msgstr "" + +#: src/dird/ua_restore.c:194 src/dird/ua_restore.c:209 +msgid "No files selected to be restored.\n" +msgstr "" + +#: src/dird/ua_restore.c:202 +msgid "" +"\n" +"1 file selected to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:205 +#, c-format +msgid "" +"\n" +"%s files selected to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:224 +msgid "No Client resource found!\n" +msgstr "" + +#: src/dird/ua_restore.c:329 +#, c-format +msgid "Missing value for keyword: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:407 +msgid "List last 20 Jobs run" +msgstr "" + +#: src/dird/ua_restore.c:408 +msgid "List Jobs where a given File is saved" +msgstr "" + +#: src/dird/ua_restore.c:409 +msgid "Enter list of comma separated JobIds to select" +msgstr "" + +#: src/dird/ua_restore.c:410 +msgid "Enter SQL list command" +msgstr "" + +#: src/dird/ua_restore.c:411 +msgid "Select the most recent backup for a client" +msgstr "" + +#: src/dird/ua_restore.c:412 +msgid "Select backup for a client before a specified time" +msgstr "" + +#: src/dird/ua_restore.c:413 +msgid "Enter a list of files to restore" +msgstr "" + +#: src/dird/ua_restore.c:414 +msgid "Enter a list of files to restore before a specified time" +msgstr "" + +#: src/dird/ua_restore.c:415 +msgid "Find the JobIds of the most recent backup for a client" +msgstr "" + +#: src/dird/ua_restore.c:416 +msgid "Find the JobIds for a backup for a client before a specified time" +msgstr "" + +#: src/dird/ua_restore.c:417 +msgid "Enter a list of directories to restore for found JobIds" +msgstr "" + +#: src/dird/ua_restore.c:418 src/dird/ua_status.c:760 src/filed/status.c:256 +#: src/stored/status.c:521 src/wx-console/wxbconfigpanel.cpp:212 +msgid "Cancel" +msgstr "" + +#: src/dird/ua_restore.c:459 +#, c-format +msgid "Unknown keyword: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:488 src/dird/ua_update.c:833 +#, c-format +msgid "Improper date format: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:523 src/dird/ua_select.c:620 +#, c-format +msgid "Error: Pool resource \"%s\" does not exist.\n" +msgstr "" + +#: src/dird/ua_restore.c:528 +#, c-format +msgid "Error: Pool resource \"%s\" access not allowed.\n" +msgstr "" + +#: src/dird/ua_restore.c:544 +msgid "" +"\n" +"First you select one or more JobIds that contain files\n" +"to be restored. You will be presented several methods\n" +"of specifying the JobIds. Then you will be allowed to\n" +"select which files from those JobIds are to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:556 +msgid "To select the JobIds, you have the following choices:\n" +msgstr "" + +#: src/dird/ua_restore.c:561 +msgid "Select item: " +msgstr "" + +#: src/dird/ua_restore.c:566 src/dird/ua_restore.c:601 +msgid "SQL query not authorized.\n" +msgstr "" + +#: src/dird/ua_restore.c:579 +msgid "Enter Filename (no path):" +msgstr "" + +#: src/dird/ua_restore.c:594 src/dird/ua_restore.c:702 +msgid "Enter JobId(s), comma separated, to restore: " +msgstr "" + +#: src/dird/ua_restore.c:604 +msgid "Enter SQL list command: " +msgstr "" + +#: src/dird/ua_restore.c:638 src/dird/ua_restore.c:661 +msgid "" +"Enter file names with paths, or < to enter a filename\n" +"containing a list of file names with paths, and terminate\n" +"them with a blank line.\n" +msgstr "" + +#: src/dird/ua_restore.c:642 src/dird/ua_restore.c:665 +msgid "Enter full filename: " +msgstr "" + +#: src/dird/ua_restore.c:700 +#, c-format +msgid "You have already selected the following JobIds: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:717 +msgid "" +"Enter full directory names or start the name\n" +"with a < to indicate it is a filename containing a list\n" +"of directories and terminate them with a blank line.\n" +msgstr "" + +#: src/dird/ua_restore.c:721 +msgid "Enter directory name: " +msgstr "" + +#: src/dird/ua_restore.c:752 +msgid "Invalid JobId in list.\n" +msgstr "" + +#: src/dird/ua_restore.c:765 +#, c-format +msgid "Unable to get Job record for JobId=%s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_restore.c:771 +#, c-format +msgid "Access to JobId=%s (Job \"%s\") not authorized. Not selected.\n" +msgstr "" + +#: src/dird/ua_restore.c:784 +msgid "No Jobs selected.\n" +msgstr "" + +#: src/dird/ua_restore.c:788 +#, c-format +msgid "You have selected the following JobIds: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:790 +#, c-format +msgid "You have selected the following JobId: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:800 +msgid "" +"The restored files will the most current backup\n" +"BEFORE the date you specify below.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:803 +msgid "Enter date as YYYY-MM-DD HH:MM:SS :" +msgstr "" + +#: src/dird/ua_restore.c:809 +msgid "Improper date format.\n" +msgstr "" + +#: src/dird/ua_restore.c:830 +#, c-format +msgid "Cannot open file %s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_restore.c:838 src/dird/ua_restore.c:842 +#, c-format +msgid "Error occurred on line %d of file \"%s\"\n" +msgstr "" + +#: src/dird/ua_restore.c:886 src/dird/ua_restore.c:915 +#, c-format +msgid "No database record found for: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:902 +msgid "No JobId specified cannot continue.\n" +msgstr "" + +#: src/dird/ua_restore.c:936 +#, c-format +msgid "No table found: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:1033 +#, c-format +msgid "" +"\n" +"Building directory tree for JobId %s ... " +msgstr "" + +#: src/dird/ua_restore.c:1045 +msgid "" +"\n" +"There were no files inserted into the tree, so file selection\n" +"is not possible.Most likely your retention policy pruned the files\n" +msgstr "" + +#: src/dird/ua_restore.c:1047 +msgid "" +"\n" +"Do you want to restore all the files? (yes|no): " +msgstr "" + +#: src/dird/ua_restore.c:1063 +#, c-format +msgid "" +"\n" +"1 Job, %s files inserted into the tree and marked for extraction.\n" +msgstr "" + +#: src/dird/ua_restore.c:1067 +#, c-format +msgid "" +"\n" +"1 Job, %s files inserted into the tree.\n" +msgstr "" + +#: src/dird/ua_restore.c:1073 +#, c-format +msgid "" +"\n" +"%d Jobs, %s files inserted into the tree and marked for extraction.\n" +msgstr "" + +#: src/dird/ua_restore.c:1077 +#, c-format +msgid "" +"\n" +"%d Jobs, %s files inserted into the tree.\n" +msgstr "" + +#: src/dird/ua_restore.c:1150 +#, c-format +msgid "Error getting FileSet \"%s\": ERR=%s\n" +msgstr "" + +#: src/dird/ua_restore.c:1158 src/dird/ua_select.c:183 +msgid "The defined FileSet resources are:\n" +msgstr "" + +#: src/dird/ua_restore.c:1162 src/dird/ua_run.c:267 src/dird/ua_select.c:191 +msgid "FileSet" +msgstr "" + +#: src/dird/ua_restore.c:1162 src/dird/ua_select.c:191 +msgid "Select FileSet resource" +msgstr "" + +#: src/dird/ua_restore.c:1164 +#, c-format +msgid "No FileSet found for client \"%s\".\n" +msgstr "" + +#: src/dird/ua_restore.c:1170 +#, c-format +msgid "Error getting FileSet record: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:1171 +msgid "" +"This probably means you modified the FileSet.\n" +"Continuing anyway.\n" +msgstr "" + +#: src/dird/ua_restore.c:1186 +#, c-format +msgid "Pool \"%s\" not found, using any pool.\n" +msgstr "" + +#: src/dird/ua_restore.c:1213 src/dird/ua_restore.c:1229 +#, c-format +msgid "No Full backup before %s found.\n" +msgstr "" + +#: src/dird/ua_restore.c:1252 +msgid "No jobs found.\n" +msgstr "" + +#: src/dird/ua_restore.c:1411 +#, c-format +msgid "Warning default storage overridden by \"%s\" on command line.\n" +msgstr "" + +#: src/dird/ua_restore.c:1427 +#, c-format +msgid "Storage \"%s\" not found, using Storage \"%s\" from MediaType \"%s\".\n" +msgstr "" + +#: src/dird/ua_restore.c:1435 +#, c-format +msgid "" +"\n" +"Unable to find Storage resource for\n" +"MediaType \"%s\", needed by the Jobs you selected.\n" +msgstr "" + +#: src/dird/ua_run.c:152 src/dird/ua_run.c:338 +msgid "Invalid time, using current time.\n" +msgstr "" + +#: src/dird/ua_run.c:174 +#, c-format +msgid "Invalid replace option: %s\n" +msgstr "" + +#: src/dird/ua_run.c:242 +msgid "OK to run? (yes/mod/no): " +msgstr "" + +#: src/dird/ua_run.c:260 src/dird/ua_select.c:63 +msgid "mod" +msgstr "" + +#: src/dird/ua_run.c:263 src/dird/ua_update.c:518 +msgid "Parameters to modify:\n" +msgstr "" + +#: src/dird/ua_run.c:264 +msgid "Level" +msgstr "" + +#: src/dird/ua_run.c:269 +msgid "Restore Client" +msgstr "" + +#: src/dird/ua_run.c:273 src/wx-console/wxbrestorepanel.cpp:356 +#: src/wx-console/wxbrestorepanel.cpp:844 +#: src/wx-console/wxbrestorepanel.cpp:1891 +msgid "When" +msgstr "" + +#: src/dird/ua_run.c:274 src/wx-console/wxbrestorepanel.cpp:357 +#: src/wx-console/wxbrestorepanel.cpp:1117 +#: src/wx-console/wxbrestorepanel.cpp:1894 +msgid "Priority" +msgstr "" + +#: src/dird/ua_run.c:278 src/dird/ua_select.c:506 src/dird/ua_select.c:596 +#: src/dird/ua_update.c:529 src/wx-console/wxbrestorepanel.cpp:338 +#: src/wx-console/wxbrestorepanel.cpp:527 +#: src/wx-console/wxbrestorepanel.cpp:537 +#: src/wx-console/wxbrestorepanel.cpp:1807 +msgid "Pool" +msgstr "" + +#: src/dird/ua_run.c:280 +msgid "Verify Job" +msgstr "" + +#: src/dird/ua_run.c:283 src/wx-console/wxbrestorepanel.cpp:349 +#: src/wx-console/wxbrestorepanel.cpp:1872 +msgid "Bootstrap" +msgstr "" + +#: src/dird/ua_run.c:284 src/wx-console/wxbrestorepanel.cpp:350 +#: src/wx-console/wxbrestorepanel.cpp:1093 +#: src/wx-console/wxbrestorepanel.cpp:1874 +msgid "Where" +msgstr "" + +#: src/dird/ua_run.c:285 +msgid "File Relocation" +msgstr "" + +#: src/dird/ua_run.c:286 src/wx-console/wxbrestorepanel.cpp:352 +#: src/wx-console/wxbrestorepanel.cpp:1101 +#: src/wx-console/wxbrestorepanel.cpp:1878 +#: src/wx-console/wxbrestorepanel.cpp:1879 +#: src/wx-console/wxbrestorepanel.cpp:1880 +#: src/wx-console/wxbrestorepanel.cpp:1881 +#: src/wx-console/wxbrestorepanel.cpp:1882 +msgid "Replace" +msgstr "" + +#: src/dird/ua_run.c:287 +msgid "JobId" +msgstr "" + +#: src/dird/ua_run.c:289 src/dird/ua_run.c:487 src/dird/ua_update.c:535 +msgid "Select parameter to modify" +msgstr "" + +#: src/dird/ua_run.c:298 src/dird/ua_run.c:1205 +msgid "user selection" +msgstr "" + +#: src/dird/ua_run.c:330 +msgid "" +"Please enter desired start time as YYYY-MM-DD HH:MM:SS (return for now): " +msgstr "" + +#: src/dird/ua_run.c:345 +msgid "Enter new Priority: " +msgstr "" + +#: src/dird/ua_run.c:349 +msgid "Priority must be a positive integer.\n" +msgstr "" + +#: src/dird/ua_run.c:369 +msgid "Please enter the Bootstrap file name: " +msgstr "" + +#: src/dird/ua_run.c:380 +#, c-format +msgid "Warning cannot open %s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_run.c:399 +msgid "Please enter path prefix for restore (/ for none): " +msgstr "" + +#: src/dird/ua_run.c:421 +msgid "Replace:\n" +msgstr "" + +#: src/dird/ua_run.c:425 +msgid "Select replace option" +msgstr "" + +#: src/dird/ua_run.c:435 +msgid "" +"You must set the bootstrap file to NULL to be able to specify a JobId.\n" +msgstr "" + +#: src/dird/ua_run.c:455 +msgid "Job failed.\n" +msgstr "" + +#: src/dird/ua_run.c:458 +#, c-format +msgid "Job queued. JobId=%s\n" +msgstr "" + +#: src/dird/ua_run.c:464 +msgid "Job not run.\n" +msgstr "" + +#: src/dird/ua_run.c:476 +#, c-format +msgid "strip_prefix=%s add_prefix=%s add_suffix=%s\n" +msgstr "" + +#: src/dird/ua_run.c:479 +msgid "This will replace your current Where value\n" +msgstr "" + +#: src/dird/ua_run.c:480 +msgid "Strip prefix" +msgstr "" + +#: src/dird/ua_run.c:481 +msgid "Add prefix" +msgstr "" + +#: src/dird/ua_run.c:482 +msgid "Add file suffix" +msgstr "" + +#: src/dird/ua_run.c:483 +msgid "Enter a regexp" +msgstr "" + +#: src/dird/ua_run.c:484 +msgid "Test filename manipulation" +msgstr "" + +#: src/dird/ua_run.c:485 +msgid "Use this ?" +msgstr "" + +#: src/dird/ua_run.c:490 +msgid "Please enter path prefix to strip: " +msgstr "" + +#: src/dird/ua_run.c:498 +msgid "Please enter path prefix to add (/ for none): " +msgstr "" + +#: src/dird/ua_run.c:509 +msgid "Please enter file suffix to add: " +msgstr "" + +#: src/dird/ua_run.c:516 +msgid "Please enter a valid regexp (!from!to!): " +msgstr "" + +#: src/dird/ua_run.c:529 +#, c-format +msgid "regexwhere=%s\n" +msgstr "" + +#: src/dird/ua_run.c:535 +#, c-format +msgid "strip_prefix=%s add_prefix=%s add_suffix=%s result=%s\n" +msgstr "" + +#: src/dird/ua_run.c:542 +msgid "Cannot use your regexp\n" +msgstr "" + +#: src/dird/ua_run.c:546 +msgid "Please enter filename to test: " +msgstr "" + +#: src/dird/ua_run.c:548 +#, c-format +msgid "%s -> %s\n" +msgstr "" + +#: src/dird/ua_run.c:592 +msgid "Cannot use your regexp.\n" +msgstr "" + +#: src/dird/ua_run.c:605 src/dird/ua_run.c:631 +msgid "Levels:\n" +msgstr "" + +#: src/dird/ua_run.c:606 src/filed/status.c:373 src/lib/util.c:329 +#: src/stored/status.c:560 +msgid "Base" +msgstr "" + +#: src/dird/ua_run.c:607 src/filed/status.c:375 src/lib/util.c:331 +#: src/stored/status.c:562 +msgid "Full" +msgstr "" + +#: src/dird/ua_run.c:608 src/filed/status.c:378 src/lib/util.c:334 +#: src/stored/status.c:565 +msgid "Incremental" +msgstr "" + +#: src/dird/ua_run.c:609 src/filed/status.c:381 src/lib/util.c:337 +#: src/stored/status.c:568 +msgid "Differential" +msgstr "" + +#: src/dird/ua_run.c:610 src/filed/status.c:384 src/lib/util.c:340 +#: src/stored/status.c:571 +msgid "Since" +msgstr "" + +#: src/dird/ua_run.c:611 src/dird/ua_run.c:637 +msgid "Select level" +msgstr "" + +#: src/dird/ua_run.c:632 +msgid "Initialize Catalog" +msgstr "" + +#: src/dird/ua_run.c:633 src/filed/status.c:387 src/lib/util.c:343 +#: src/stored/status.c:574 +msgid "Verify Catalog" +msgstr "" + +#: src/dird/ua_run.c:634 src/lib/util.c:349 +msgid "Verify Volume to Catalog" +msgstr "" + +#: src/dird/ua_run.c:635 src/lib/util.c:352 +msgid "Verify Disk to Catalog" +msgstr "" + +#: src/dird/ua_run.c:636 +msgid "Verify Volume Data (not yet implemented)" +msgstr "" + +#: src/dird/ua_run.c:657 +msgid "Level not appropriate for this Job. Cannot be changed.\n" +msgstr "" + +#: src/dird/ua_run.c:671 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"FileSet: %s\n" +"Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:678 src/lib/util.c:296 +msgid "Admin" +msgstr "" + +#: src/dird/ua_run.c:691 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"Level: %s\n" +"Client: %s\n" +"FileSet: %s\n" +"Pool: %s (From %s)\n" +"Storage: %s (From %s)\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:700 src/lib/util.c:287 +msgid "Backup" +msgstr "" + +#: src/dird/ua_run.c:723 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"Level: %s\n" +"Client: %s\n" +"FileSet: %s\n" +"Pool: %s (From %s)\n" +"Storage: %s (From %s)\n" +"Verify Job: %s\n" +"Verify List: %s\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:734 src/lib/util.c:290 +msgid "Verify" +msgstr "" + +#: src/dird/ua_run.c:752 +msgid "Please enter a JobId for restore: " +msgstr "" + +#: src/dird/ua_run.c:764 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: f%s\n" +"Bootstrap: %s\n" +"RegexWhere: %s\n" +"Replace: %s\n" +"FileSet: %s\n" +"Backup Client: %s\n" +"Restore Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:789 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +"Where: %s\n" +"Replace: %s\n" +"FileSet: %s\n" +"Backup Client: %s\n" +"Restore Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:816 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +msgstr "" + +#: src/dird/ua_run.c:824 +#, c-format +msgid "RegexWhere: %s\n" +msgstr "" + +#: src/dird/ua_run.c:827 +#, c-format +msgid "Where: %s\n" +msgstr "" + +#: src/dird/ua_run.c:831 +#, c-format +msgid "" +"Replace: %s\n" +"Client: %s\n" +"Storage: %s\n" +"JobId: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:850 +#, c-format +msgid "" +"Run Migration job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +"Client: %s\n" +"FileSet: %s\n" +"Pool: %s (From %s)\n" +"Read Storage: %s (From %s)\n" +"Write Storage: %s (From %s)\n" +"JobId: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:875 +#, c-format +msgid "Unknown Job Type=%d\n" +msgstr "" + +#: src/dird/ua_run.c:935 +#, c-format +msgid "Value missing for keyword %s\n" +msgstr "" + +#: src/dird/ua_run.c:942 +msgid "Job name specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:950 +msgid "JobId specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:959 src/dird/ua_run.c:1103 +msgid "Client specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:967 +msgid "FileSet specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:975 +msgid "Level specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:984 +msgid "Storage specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:992 +msgid "RegexWhere or Where specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:997 +msgid "No authorization for \"regexwhere\" specification.\n" +msgstr "" + +#: src/dird/ua_run.c:1004 +msgid "Where or RegexWhere specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1009 +msgid "No authoriztion for \"where\" specification.\n" +msgstr "" + +#: src/dird/ua_run.c:1016 +msgid "Bootstrap specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1024 +msgid "Replace specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1032 +msgid "When specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1040 +msgid "Priority specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1045 +msgid "Priority must be positive nonzero setting it to 10.\n" +msgstr "" + +#: src/dird/ua_run.c:1055 +msgid "Verify Job specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1087 +msgid "Migration Job specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1095 +msgid "Pool specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1111 +msgid "Restore Client specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1135 +#, c-format +msgid "Invalid keyword: %s\n" +msgstr "" + +#: src/dird/ua_run.c:1146 +#, c-format +msgid "Catalog \"%s\" not found\n" +msgstr "" + +#: src/dird/ua_run.c:1150 +#, c-format +msgid "No authorization. Catalog \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1161 +#, c-format +msgid "Job \"%s\" not found\n" +msgstr "" + +#: src/dird/ua_run.c:1168 +msgid "A job name must be specified.\n" +msgstr "" + +#: src/dird/ua_run.c:1174 +#, c-format +msgid "No authorization. Job \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1182 +#, c-format +msgid "Pool \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1192 +#, c-format +msgid "No authorization. Pool \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1202 +#, c-format +msgid "Storage \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1214 +#, c-format +msgid "No authorization. Storage \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1234 src/dird/ua_run.c:1254 +#, c-format +msgid "No authorization. Client \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1244 +#, c-format +msgid "Restore Client \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1264 +#, c-format +msgid "FileSet \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1273 +#, c-format +msgid "No authorization. FileSet \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1281 +#, c-format +msgid "Verify Job \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1291 +#, c-format +msgid "Migration Job \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_select.c:55 +#, c-format +msgid "The current %s retention period is: %s\n" +msgstr "" + +#: src/dird/ua_select.c:60 +msgid "Continue? (yes/mod/no): " +msgstr "" + +#: src/dird/ua_select.c:64 +msgid "Enter new retention period: " +msgstr "" + +#: src/dird/ua_select.c:68 +msgid "Invalid period.\n" +msgstr "" + +#: src/dird/ua_select.c:144 +msgid "You have the following choices:\n" +msgstr "" + +#: src/dird/ua_select.c:160 +msgid "The defined Storage resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:168 +msgid "Select Storage resource" +msgstr "" + +#: src/dird/ua_select.c:224 +msgid "You must specify a \"use \" command before continuing.\n" +msgstr "" + +#: src/dird/ua_select.c:230 +msgid "The defined Catalog resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:238 +msgid "Catalog" +msgstr "" + +#: src/dird/ua_select.c:238 +msgid "Select Catalog resource" +msgstr "" + +#: src/dird/ua_select.c:255 +msgid "The defined Job resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:263 +msgid "Select Job resource" +msgstr "" + +#: src/dird/ua_select.c:278 +msgid "The defined Restore Job resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:286 +msgid "Select Restore Job" +msgstr "" + +#: src/dird/ua_select.c:303 +msgid "The defined Client resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:311 +msgid "Select Client (File daemon) resource" +msgstr "" + +#: src/dird/ua_select.c:338 +#, c-format +msgid "Error: Client resource %s does not exist.\n" +msgstr "" + +#: src/dird/ua_select.c:363 +#, c-format +msgid "Could not find Client %s: ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:373 src/dird/ua_select.c:427 +#, c-format +msgid "Could not find Client \"%s\": ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:402 +#, c-format +msgid "Error obtaining client ids. ERR=%s\n" +msgstr "" + +#: src/dird/ua_select.c:406 +msgid "No clients defined. You must run a job before using this command.\n" +msgstr "" + +#: src/dird/ua_select.c:410 +msgid "Defined Clients:\n" +msgstr "" + +#: src/dird/ua_select.c:420 +msgid "Select the Client" +msgstr "" + +#: src/dird/ua_select.c:453 src/dird/ua_select.c:477 src/dird/ua_select.c:513 +#, c-format +msgid "Could not find Pool \"%s\": ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:492 +msgid "No pools defined. Use the \"create\" command to create one.\n" +msgstr "" + +#: src/dird/ua_select.c:496 +msgid "Defined Pools:\n" +msgstr "" + +#: src/dird/ua_select.c:506 +msgid "Select the Pool" +msgstr "" + +#: src/dird/ua_select.c:536 +#, c-format +msgid "No access to Pool \"%s\"\n" +msgstr "" + +#: src/dird/ua_select.c:562 +msgid "Enter MediaId or Volume name: " +msgstr "" + +#: src/dird/ua_select.c:588 +msgid "The defined Pool resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:596 +msgid "Select Pool resource" +msgstr "" + +#: src/dird/ua_select.c:631 +msgid "Enter the JobId to select: " +msgstr "" + +#: src/dird/ua_select.c:669 +#, c-format +msgid "Could not find Job \"%s\": ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:749 +#, c-format +msgid "Automatically selected %s: %s\n" +msgstr "" + +#: src/dird/ua_select.c:754 +#, c-format +msgid "Cannot select %s in batch mode.\n" +msgstr "" + +#: src/dird/ua_select.c:772 +#, c-format +msgid "Selection list for \"%s\" is empty!\n" +msgstr "" + +#: src/dird/ua_select.c:778 +#, c-format +msgid "Automatically selected: %s\n" +msgstr "" + +#: src/dird/ua_select.c:790 +msgid "Selection aborted, nothing done.\n" +msgstr "" + +#: src/dird/ua_select.c:795 +#, c-format +msgid "Please enter a number between 1 and %d\n" +msgstr "" + +#: src/dird/ua_select.c:844 +msgid "Storage name given twice.\n" +msgstr "" + +#: src/dird/ua_select.c:861 +#, c-format +msgid "Expecting jobid=nn command, got: %s\n" +msgstr "" + +#: src/dird/ua_select.c:865 +#, c-format +msgid "JobId %s is not running.\n" +msgstr "" + +#: src/dird/ua_select.c:875 +#, c-format +msgid "Expecting job=xxx, got: %s.\n" +msgstr "" + +#: src/dird/ua_select.c:879 src/dird/ua_select.c:891 +#, c-format +msgid "Job \"%s\" is not running.\n" +msgstr "" + +#: src/dird/ua_select.c:887 +#, c-format +msgid "Expecting ujobid=xxx, got: %s.\n" +msgstr "" + +#: src/dird/ua_select.c:907 +#, c-format +msgid "Storage resource \"%s\": not found\n" +msgstr "" + +#: src/dird/ua_select.c:939 +msgid "Enter autochanger drive[0]: " +msgstr "" + +#: src/dird/ua_select.c:960 +msgid "Enter autochanger slot: " +msgstr "" + +#: src/dird/ua_select.c:990 +msgid "Media Types defined in conf file:\n" +msgstr "" + +#: src/dird/ua_select.c:996 +msgid "Media Type" +msgstr "" + +#: src/dird/ua_select.c:996 +msgid "Select the Media Type" +msgstr "" + +#: src/dird/ua_server.c:72 +#, c-format +msgid "Cannot create UA thread: %s\n" +msgstr "" + +#: src/dird/ua_server.c:159 +msgid "You have messages.\n" +msgstr "" + +#: src/dird/ua_status.c:143 +msgid "Status available for:\n" +msgstr "" + +#: src/dird/ua_status.c:149 +msgid "Select daemon type for status" +msgstr "" + +#: src/dird/ua_status.c:267 +#, c-format +msgid "Daemon started %s, 1 Job run since started.\n" +msgstr "" + +#: src/dird/ua_status.c:270 +#, c-format +msgid "Daemon started %s, %d Jobs run since started.\n" +msgstr "" + +#: src/dird/ua_status.c:273 src/filed/status.c:129 src/stored/status.c:90 +#, c-format +msgid " Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n" +msgstr "" + +#: src/dird/ua_status.c:299 src/dird/ua_status.c:526 src/dird/ua_status.c:702 +#: src/filed/status.c:204 src/filed/status.c:220 src/filed/status.c:282 +msgid "====\n" +msgstr "" + +#: src/dird/ua_status.c:314 +#, c-format +msgid "" +"\n" +"Failed to connect to Storage daemon %s.\n" +"====\n" +msgstr "" + +#: src/dird/ua_status.c:352 +#, c-format +msgid "" +"Failed to connect to Client %s.\n" +"====\n" +msgstr "" + +#: src/dird/ua_status.c:360 +msgid "Connected to file daemon\n" +msgstr "" + +#: src/dird/ua_status.c:375 +msgid "" +"\n" +"Scheduled Jobs:\n" +msgstr "" + +#: src/dird/ua_status.c:376 +msgid "" +"Level Type Pri Scheduled Name Volume\n" +msgstr "" + +#: src/dird/ua_status.c:377 +msgid "===================================================================================\n" +msgstr "" + +#: src/dird/ua_status.c:429 +#, c-format +msgid "%-14s %-8s %3d %-18s %-18s %s\n" +msgstr "" + +#: src/dird/ua_status.c:524 +msgid "No Scheduled Jobs.\n" +msgstr "" + +#: src/dird/ua_status.c:541 src/filed/status.c:144 src/stored/status.c:370 +msgid "" +"\n" +"Running Jobs:\n" +msgstr "" + +#: src/dird/ua_status.c:549 +#, c-format +msgid "Console connected at %s\n" +msgstr "" + +#: src/dird/ua_status.c:559 +msgid "" +"No Jobs running.\n" +"====\n" +msgstr "" + +#: src/dird/ua_status.c:564 +msgid " JobId Level Name Status\n" +msgstr "" + +#: src/dird/ua_status.c:565 src/filed/status.c:226 +msgid "======================================================================\n" +msgstr "" + +#: src/dird/ua_status.c:573 +msgid "is waiting execution" +msgstr "" + +#: src/dird/ua_status.c:576 +msgid "is running" +msgstr "" + +#: src/dird/ua_status.c:579 +msgid "is blocked" +msgstr "" + +#: src/dird/ua_status.c:582 +msgid "has terminated" +msgstr "" + +#: src/dird/ua_status.c:585 +msgid "has erred" +msgstr "" + +#: src/dird/ua_status.c:588 +msgid "has errors" +msgstr "" + +#: src/dird/ua_status.c:591 +msgid "has a fatal error" +msgstr "" + +#: src/dird/ua_status.c:594 +msgid "has verify differences" +msgstr "" + +#: src/dird/ua_status.c:597 +msgid "has been canceled" +msgstr "" + +#: src/dird/ua_status.c:602 +msgid "is waiting on Client" +msgstr "" + +#: src/dird/ua_status.c:604 +#, c-format +msgid "is waiting on Client %s" +msgstr "" + +#: src/dird/ua_status.c:612 src/dird/ua_status.c:614 +#, c-format +msgid "is waiting on Storage %s" +msgstr "" + +#: src/dird/ua_status.c:616 +msgid "is waiting on Storage" +msgstr "" + +#: src/dird/ua_status.c:622 +msgid "is waiting on max Storage jobs" +msgstr "" + +#: src/dird/ua_status.c:625 +msgid "is waiting on max Client jobs" +msgstr "" + +#: src/dird/ua_status.c:628 +msgid "is waiting on max Job jobs" +msgstr "" + +#: src/dird/ua_status.c:631 +msgid "is waiting on max total jobs" +msgstr "" + +#: src/dird/ua_status.c:634 +msgid "is waiting for its start time" +msgstr "" + +#: src/dird/ua_status.c:637 +msgid "is waiting for higher priority jobs to finish" +msgstr "" + +#: src/dird/ua_status.c:642 +#, c-format +msgid "is in unknown state %c" +msgstr "" + +#: src/dird/ua_status.c:656 +msgid "is waiting for a mount request" +msgstr "" + +#: src/dird/ua_status.c:663 +msgid "is waiting for an appendable Volume" +msgstr "" + +#: src/dird/ua_status.c:671 +msgid "is waiting for Client to connect to Storage daemon" +msgstr "" + +#: src/dird/ua_status.c:673 +#, c-format +msgid "is waiting for Client %s to connect to Storage %s" +msgstr "" + +#: src/dird/ua_status.c:690 +#, c-format +msgid "%6d %-6s %-20s %s\n" +msgstr "" + +#: src/dird/ua_status.c:712 +msgid "No Terminated Jobs.\n" +msgstr "" + +#: src/dird/ua_status.c:717 src/filed/status.c:216 src/stored/status.c:482 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" + +#: src/dird/ua_status.c:718 src/filed/status.c:224 src/stored/status.c:489 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/dird/ua_status.c:719 +msgid "====================================================================\n" +msgstr "" + +#: src/dird/ua_status.c:750 src/filed/status.c:246 src/lib/util.c:179 +#: src/stored/status.c:511 +msgid "Created" +msgstr "" + +#: src/dird/ua_status.c:754 src/filed/status.c:250 src/lib/util.c:192 +#: src/lib/util.c:259 src/stored/status.c:515 +msgid "Error" +msgstr "" + +#: src/dird/ua_status.c:757 src/filed/status.c:253 src/stored/status.c:518 +msgid "Diffs" +msgstr "" + +#: src/dird/ua_status.c:763 src/filed/status.c:259 src/lib/util.c:188 +#: src/lib/util.c:255 src/stored/btape.c:1189 src/stored/status.c:524 +#: src/wx-console/wxbconfigpanel.cpp:201 +msgid "OK" +msgstr "" + +#: src/dird/ua_status.c:766 src/filed/status.c:262 src/stored/status.c:527 +msgid "Other" +msgstr "" + +#: src/dird/ua_status.c:769 src/filed/status.c:273 src/stored/status.c:538 +#, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "" + +#: src/dird/ua_status.c:777 src/stored/btape.c:195 +msgid "\n" +msgstr "" + +#: src/dird/ua_tree.c:72 +msgid "change current directory" +msgstr "" + +#: src/dird/ua_tree.c:73 +msgid "count marked files in and below the cd" +msgstr "" + +#: src/dird/ua_tree.c:74 src/dird/ua_tree.c:75 +msgid "long list current directory, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:76 +msgid "leave file selection mode" +msgstr "" + +#: src/dird/ua_tree.c:77 +msgid "estimate restore size" +msgstr "" + +#: src/dird/ua_tree.c:78 +msgid "same as done command" +msgstr "" + +#: src/dird/ua_tree.c:79 +msgid "find files, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:80 src/dird/ua_tree.c:90 +msgid "print help" +msgstr "" + +#: src/dird/ua_tree.c:81 +msgid "list current directory, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:82 +msgid "list the marked files in and below the cd" +msgstr "" + +#: src/dird/ua_tree.c:83 +msgid "mark dir/file to be restored recursively, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:84 +msgid "mark directory name to be restored (no files)" +msgstr "" + +#: src/dird/ua_tree.c:85 src/dird/ua_tree.c:86 +msgid "print current working directory" +msgstr "" + +#: src/dird/ua_tree.c:87 +msgid "unmark dir/file to be restored recursively in dir" +msgstr "" + +#: src/dird/ua_tree.c:88 +msgid "unmark directory name only no recursion" +msgstr "" + +#: src/dird/ua_tree.c:89 +msgid "quit and do not do restore" +msgstr "" + +#: src/dird/ua_tree.c:110 +msgid "" +"\n" +"You are now entering file selection mode where you add (mark) and\n" +"remove (unmark) files to be restored. No files are initially added, unless\n" +"you used the \"all\" keyword on the command line.\n" +"Enter \"done\" to leave this mode.\n" +"\n" +msgstr "" + +#: src/dird/ua_tree.c:120 src/dird/ua_tree.c:716 +#, c-format +msgid "cwd is: %s\n" +msgstr "" + +#: src/dird/ua_tree.c:129 src/dird/ua_tree.c:144 +msgid "Invalid command. Enter \"done\" to exit.\n" +msgstr "" + +#: src/dird/ua_tree.c:331 src/dird/ua_tree.c:343 src/dird/ua_tree.c:360 +msgid "No files marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:345 +msgid "1 file marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:347 +#, c-format +msgid "%s files marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:375 +msgid "No directories marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:377 +msgid "1 directory marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:379 +#, c-format +msgid "%s directories marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:400 +#, c-format +msgid "%s total files/dirs. %s marked to be restored.\n" +msgstr "" + +#: src/dird/ua_tree.c:411 +msgid "No file specification given.\n" +msgstr "" + +#: src/dird/ua_tree.c:562 +#, c-format +msgid "Node %s has no children.\n" +msgstr "" + +#: src/dird/ua_tree.c:653 +#, c-format +msgid "%d total files; %d marked to be restored; %s bytes.\n" +msgstr "" + +#: src/dird/ua_tree.c:687 +msgid "Too few or too many arguments. Try using double quotes.\n" +msgstr "" + +#: src/dird/ua_tree.c:699 +msgid "Invalid path given.\n" +msgstr "" + +#: src/dird/ua_tree.c:735 src/dird/ua_tree.c:747 +msgid "No files unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:749 +msgid "1 file unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:752 +#, c-format +msgid "%s files unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:763 src/dird/ua_tree.c:780 +msgid "No directories unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:782 +msgid "1 directory unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:784 +#, c-format +msgid "%d directories unmarked.\n" +msgstr "" + +#: src/dird/ua_update.c:89 +msgid "Update choice:\n" +msgstr "" + +#: src/dird/ua_update.c:90 +msgid "Volume parameters" +msgstr "" + +#: src/dird/ua_update.c:91 +msgid "Pool from resource" +msgstr "" + +#: src/dird/ua_update.c:92 +msgid "Slots from autochanger" +msgstr "" + +#: src/dird/ua_update.c:93 +msgid "item" +msgstr "" + +#: src/dird/ua_update.c:93 +msgid "Choose catalog item to update" +msgstr "" + +#: src/dird/ua_update.c:133 +#, c-format +msgid "Invalid VolStatus specified: %s\n" +msgstr "" + +#: src/dird/ua_update.c:142 +#, c-format +msgid "New Volume status is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:152 +#, c-format +msgid "Invalid retention period specified: %s\n" +msgstr "" + +#: src/dird/ua_update.c:160 +#, c-format +msgid "New retention period is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:171 +#, c-format +msgid "Invalid use duration specified: %s\n" +msgstr "" + +#: src/dird/ua_update.c:179 +#, c-format +msgid "New use duration is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:193 +#, c-format +msgid "New max jobs is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:206 +#, c-format +msgid "New max files is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:217 +#, c-format +msgid "Invalid max. bytes specification: %s\n" +msgstr "" + +#: src/dird/ua_update.c:225 +#, c-format +msgid "New Max bytes is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:236 src/dird/ua_update.c:256 +msgid "Invalid value. It must be yes or no.\n" +msgstr "" + +#: src/dird/ua_update.c:244 +#, c-format +msgid "New Recycle flag is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:264 +#, c-format +msgid "New InChanger flag is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:282 +#, c-format +msgid "Invalid slot, it must be between 0 and MaxVols=%d\n" +msgstr "" + +#: src/dird/ua_update.c:291 src/dird/ua_update.c:637 +#, c-format +msgid "Error updating media record Slot: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:293 +#, c-format +msgid "New Slot is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:318 +#, c-format +msgid "New Pool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:352 +#, c-format +msgid "New RecyclePool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:372 +#, c-format +msgid "Error updating Volume record: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:374 +#, c-format +msgid "Volume defaults updated from \"%s\" Pool record.\n" +msgstr "" + +#: src/dird/ua_update.c:398 +#, c-format +msgid "Error updating Volume records: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:400 +#, c-format +msgid "All Volume defaults updated from \"%s\" Pool record.\n" +msgstr "" + +#: src/dird/ua_update.c:412 +#, c-format +msgid "Error updating media record Enabled: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:414 +#, c-format +msgid "New Enabled is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:519 +msgid "Volume Status" +msgstr "" + +#: src/dird/ua_update.c:520 +msgid "Volume Retention Period" +msgstr "" + +#: src/dird/ua_update.c:521 +msgid "Volume Use Duration" +msgstr "" + +#: src/dird/ua_update.c:522 +msgid "Maximum Volume Jobs" +msgstr "" + +#: src/dird/ua_update.c:523 +msgid "Maximum Volume Files" +msgstr "" + +#: src/dird/ua_update.c:524 +msgid "Maximum Volume Bytes" +msgstr "" + +#: src/dird/ua_update.c:525 +msgid "Recycle Flag" +msgstr "" + +#: src/dird/ua_update.c:526 +msgid "Slot" +msgstr "" + +#: src/dird/ua_update.c:527 +msgid "InChanger Flag" +msgstr "" + +#: src/dird/ua_update.c:528 +msgid "Volume Files" +msgstr "" + +#: src/dird/ua_update.c:530 +msgid "Volume from Pool" +msgstr "" + +#: src/dird/ua_update.c:531 +msgid "All Volumes from Pool" +msgstr "" + +#: src/dird/ua_update.c:532 +msgid "Enabled" +msgstr "" + +#: src/dird/ua_update.c:533 +msgid "RecyclePool" +msgstr "" + +#: src/dird/ua_update.c:534 +msgid "Done" +msgstr "" + +#: src/dird/ua_update.c:542 +#, c-format +msgid "Updating Volume \"%s\"\n" +msgstr "" + +#: src/dird/ua_update.c:547 +#, c-format +msgid "Current Volume status is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:548 +msgid "Possible Values are:\n" +msgstr "" + +#: src/dird/ua_update.c:559 +msgid "Choose new Volume Status" +msgstr "" + +#: src/dird/ua_update.c:565 +#, c-format +msgid "Current retention period is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:567 +msgid "Enter Volume Retention period: " +msgstr "" + +#: src/dird/ua_update.c:574 +#, c-format +msgid "Current use duration is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:576 +msgid "Enter Volume Use Duration: " +msgstr "" + +#: src/dird/ua_update.c:583 +#, c-format +msgid "Current max jobs is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:584 +msgid "Enter new Maximum Jobs: " +msgstr "" + +#: src/dird/ua_update.c:591 +#, c-format +msgid "Current max files is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:592 +msgid "Enter new Maximum Files: " +msgstr "" + +#: src/dird/ua_update.c:599 +#, c-format +msgid "Current value is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:600 +msgid "Enter new Maximum Bytes: " +msgstr "" + +#: src/dird/ua_update.c:608 +#, c-format +msgid "Current recycle flag is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:610 +msgid "Enter new Recycle status: " +msgstr "" + +#: src/dird/ua_update.c:617 +#, c-format +msgid "Current Slot is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:618 +msgid "Enter new Slot: " +msgstr "" + +#: src/dird/ua_update.c:625 +#, c-format +msgid "Current InChanger flag is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:626 +#, c-format +msgid "Set InChanger flag for Volume \"%s\": yes/no: " +msgstr "" + +#: src/dird/ua_update.c:639 +#, c-format +msgid "New InChanger flag is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:646 +msgid "" +"Warning changing Volume Files can result\n" +"in loss of data on your Volume\n" +"\n" +msgstr "" + +#: src/dird/ua_update.c:648 +#, c-format +msgid "Current Volume Files is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:649 +msgid "Enter new number of Files for Volume: " +msgstr "" + +#: src/dird/ua_update.c:654 +msgid "Normally, you should only increase Volume Files by one!\n" +msgstr "" + +#: src/dird/ua_update.c:655 +msgid "Increase Volume Files? (yes/no): " +msgstr "" + +#: src/dird/ua_update.c:665 +#, c-format +msgid "New Volume Files is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:677 +#, c-format +msgid "Current Pool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:678 +msgid "Enter new Pool name: " +msgstr "" + +#: src/dird/ua_update.c:695 +#, c-format +msgid "Current Enabled is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:696 +msgid "Enter new Enabled: " +msgstr "" + +#: src/dird/ua_update.c:715 +#, c-format +msgid "Current RecyclePool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:717 +msgid "No current RecyclePool\n" +msgstr "" + +#: src/dird/ua_update.c:719 +msgid "Enter new RecyclePool name: " +msgstr "" + +#: src/dird/ua_update.c:726 +msgid "Selection terminated.\n" +msgstr "" + +#: src/dird/ua_update.c:760 +#, c-format +msgid "db_update_pool_record returned %d. ERR=%s\n" +msgstr "" + +#: src/dird/ua_update.c:767 +msgid "Pool DB record updated from resource.\n" +msgstr "" + +#: src/dird/ua_update.c:794 +msgid "Expect JobId keyword, not found.\n" +msgstr "" + +#: src/dird/ua_update.c:819 +msgid "Neither Client nor StartTime specified.\n" +msgstr "" + +#: src/dird/verify.c:120 +msgid "" +"Unable to find JobId of previous InitCatalog Job.\n" +"Please run a Verify with Level=InitCatalog before\n" +"running the current Job.\n" +msgstr "" + +#: src/dird/verify.c:125 +msgid "Unable to find JobId of previous Job for this client.\n" +msgstr "" + +#: src/dird/verify.c:141 +#, c-format +msgid "Could not get job record for previous Job. ERR=%s" +msgstr "" + +#: src/dird/verify.c:146 +#, c-format +msgid "Last Job %d did not terminate normally. JobStatus=%c\n" +msgstr "" + +#: src/dird/verify.c:150 +#, c-format +msgid "Verifying against JobId=%d Job=%s\n" +msgstr "" + +#: src/dird/verify.c:179 +#, c-format +msgid "Start Verify JobId=%s Level=%s Job=%s\n" +msgstr "" + +#: src/dird/verify.c:263 +msgid "Deprecated feature ... use bootstrap.\n" +msgstr "" + +#: src/dird/verify.c:276 +#, c-format +msgid "Unimplemented Verify level %d(%c)\n" +msgstr "" + +#: src/dird/verify.c:330 +#, c-format +msgid "Unimplemented verify level %d\n" +msgstr "" + +#: src/dird/verify.c:384 +msgid "Verify OK" +msgstr "" + +#: src/dird/verify.c:388 +msgid "*** Verify Error ***" +msgstr "" + +#: src/dird/verify.c:392 +msgid "Verify warnings" +msgstr "" + +#: src/dird/verify.c:395 +msgid "Verify Canceled" +msgstr "" + +#: src/dird/verify.c:398 +msgid "Verify Differences" +msgstr "" + +#: src/dird/verify.c:403 +#, c-format +msgid "Inappropriate term code: %d %c\n" +msgstr "" + +#: src/dird/verify.c:417 +#, c-format +msgid "" +"Bacula %s %s (%s): %s\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Files Expected: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" FD termination status: %s\n" +" SD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/verify.c:452 +#, c-format +msgid "" +"Bacula %s %s (%s): %s\n" +" Build: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" FD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/verify.c:531 +#, c-format +msgid "" +"bird set configuration file to file\n" +" -dnn set debug level to nn\n" +" -n no conio\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/console/console.c:162 +msgid "input from file" +msgstr "" + +#: src/console/console.c:163 +msgid "output to file" +msgstr "" + +#: src/console/console.c:165 +msgid "output to file and terminal" +msgstr "" + +#: src/console/console.c:166 +msgid "sleep specified time" +msgstr "" + +#: src/console/console.c:168 +msgid "print Console's version" +msgstr "" + +#: src/console/console.c:169 +msgid "echo command string" +msgstr "" + +#: src/console/console.c:170 +msgid "execute an external command" +msgstr "" + +#: src/console/console.c:172 +msgid "zed_keys = use zed keys instead of bash keys" +msgstr "" + +#: src/console/console.c:205 +msgid ": is an invalid command\n" +msgstr "" + +#: src/console/console.c:462 +msgid "Available Directors:\n" +msgstr "" + +#: src/console/console.c:466 +#, c-format +msgid "%2d: %s at %s:%d\n" +msgstr "" + +#: src/console/console.c:470 +msgid "Select Director by entering a number: " +msgstr "" + +#: src/console/console.c:475 +#, c-format +msgid "%s is not a number. You must enter a number between 1 and %d\n" +msgstr "" + +#: src/console/console.c:481 +#, c-format +msgid "You must enter a number between 1 and %d\n" +msgstr "" + +#: src/console/console.c:521 src/tray-monitor/tray-monitor.c:905 +#, c-format +msgid "Connecting to Director %s:%d\n" +msgstr "" + +#: src/console/console.c:538 src/gnome2-console/console.c:526 +#: src/wx-console/console_thread.cpp:391 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "" + +#: src/console/console.c:558 src/gnome2-console/console.c:548 +#: src/wx-console/console_thread.cpp:412 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "" + +#: src/console/console.c:588 +msgid "Enter a period to cancel a command.\n" +msgstr "" + +#: src/console/console.c:664 src/gnome2-console/console.c:160 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" + +#: src/console/console.c:673 src/gnome2-console/console.c:169 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/console/console.c:693 src/gnome2-console/console.c:189 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" + +#: src/console/console.c:849 +msgid "Too many arguments on input command.\n" +msgstr "" + +#: src/console/console.c:853 +msgid "First argument to input command must be a filename.\n" +msgstr "" + +#: src/console/console.c:858 +#, c-format +msgid "Cannot open file %s for input. ERR=%s\n" +msgstr "" + +#: src/console/console.c:888 +msgid "Too many arguments on output/tee command.\n" +msgstr "" + +#: src/console/console.c:905 +#, c-format +msgid "Cannot open file %s for output. ERR=%s\n" +msgstr "" + +#: src/console/console.c:924 +msgid "Too many arguments. Enclose command in double quotes.\n" +msgstr "" + +#: src/console/console.c:933 +#, c-format +msgid "Cannot popen(\"%s\", \"r\"): ERR=%s\n" +msgstr "" + +#: src/console/console.c:945 src/stored/autochanger.c:549 +#, c-format +msgid "Autochanger error: ERR=%s\n" +msgstr "" + +#: src/console/console_conf.c:138 src/gnome2-console/console_conf.c:134 +#: src/wx-console/console_conf.c:147 +#, c-format +msgid "No record for %d %s\n" +msgstr "" + +#: src/console/console_conf.c:147 src/wx-console/console_conf.c:156 +#, c-format +msgid "Console: name=%s rcfile=%s histfile=%s\n" +msgstr "" + +#: src/console/console_conf.c:151 src/gnome2-console/console_conf.c:143 +#: src/wx-console/console_conf.c:160 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "" + +#: src/console/console_conf.c:155 src/console/console_conf.c:231 +#: src/console/console_conf.c:276 src/console/console_conf.c:303 +#: src/filed/filed_conf.c:319 src/filed/filed_conf.c:384 +#: src/filed/filed_conf.c:414 src/gnome2-console/console_conf.c:154 +#: src/gnome2-console/console_conf.c:232 src/gnome2-console/console_conf.c:280 +#: src/gnome2-console/console_conf.c:310 src/stored/stored_conf.c:528 +#: src/stored/stored_conf.c:616 src/stored/stored_conf.c:651 +#: src/wx-console/console_conf.c:164 src/wx-console/console_conf.c:239 +#: src/wx-console/console_conf.c:284 src/wx-console/console_conf.c:311 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "" + +#: src/filed/acl.c:104 src/filed/acl.c:110 +msgid "ACL support not configured for your machine.\n" +msgstr "" + +#: src/filed/acl.c:186 +#, c-format +msgid "acl_to_text error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:215 +#, c-format +msgid "acl_delete_def_file error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:223 +#, c-format +msgid "acl_from_text error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:237 +#, c-format +msgid "ac_valid error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:252 +#, c-format +msgid "acl_set_file error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:286 +#, c-format +msgid "acltostr error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:303 src/filed/acl.c:311 +#, c-format +msgid "strtoacl error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:323 +#, c-format +msgid "setacl error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:359 +#, c-format +msgid "acltotext error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:376 +#, c-format +msgid "aclfromtext error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:388 +#, c-format +msgid "acl(SETACL) error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/authenticate.c:60 +#, c-format +msgid "I only authenticate directors, not %d\n" +msgstr "" + +#: src/filed/authenticate.c:68 src/stored/authenticate.c:66 +#, c-format +msgid "Bad Hello command from Director at %s. Len=%d.\n" +msgstr "" + +#: src/filed/authenticate.c:80 src/stored/authenticate.c:77 +#, c-format +msgid "Bad Hello command from Director at %s: %s\n" +msgstr "" + +#: src/filed/authenticate.c:92 +#, c-format +msgid "Connection from unknown Director %s at %s rejected.\n" +msgstr "" + +#: src/filed/authenticate.c:132 +#, c-format +msgid "Incorrect password given by Director at %s.\n" +msgstr "" + +#: src/filed/authenticate.c:139 +msgid "" +"Authorization problem: Remote server did not advertize required TLS " +"support.\n" +msgstr "" + +#: src/filed/authenticate.c:193 src/stored/dircmd.c:199 +msgid "Unable to authenticate Director\n" +msgstr "" + +#: src/filed/authenticate.c:244 +msgid "" +"Authorization key rejected by Storage daemon.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/filed/backup.c:86 +msgid "Cannot set buffer size FD->SD.\n" +msgstr "" + +#: src/filed/backup.c:129 src/filed/backup.c:138 +msgid "An error occurred while encrypting the stream.\n" +msgstr "" + +#: src/filed/backup.c:255 +#, c-format +msgid " Recursion turned off. Will not descend from %s into %s\n" +msgstr "" + +#: src/filed/backup.c:262 +#, c-format +msgid " %s is a different filesystem. Will not descend from %s into %s\n" +msgstr "" + +#: src/filed/backup.c:268 +#, c-format +msgid " Disallowed filesystem. Will not descend from %s into %s\n" +msgstr "" + +#: src/filed/backup.c:273 +#, c-format +msgid " Disallowed drive type. Will not descend into %s\n" +msgstr "" + +#: src/filed/backup.c:292 src/filed/verify.c:123 +#, c-format +msgid " Could not access %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:299 src/filed/verify.c:130 +#, c-format +msgid " Could not follow link %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:306 src/filed/verify.c:137 +#, c-format +msgid " Could not stat %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:313 src/filed/verify.c:143 +#, c-format +msgid " Unchanged file skipped: %s\n" +msgstr "" + +#: src/filed/backup.c:316 +#, c-format +msgid " Archive file not saved: %s\n" +msgstr "" + +#: src/filed/backup.c:320 src/filed/verify.c:158 +#, c-format +msgid " Could not open directory %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:326 +#, c-format +msgid " Unknown file type %d; not saved: %s\n" +msgstr "" + +#: src/filed/backup.c:366 src/filed/verify.c:239 +#, c-format +msgid "%s digest initialization failed\n" +msgstr "" + +#: src/filed/backup.c:380 +#, c-format +msgid "%s signature digest initialization failed\n" +msgstr "" + +#: src/filed/backup.c:400 +#, c-format +msgid "Python reader program \"%s\" not found.\n" +msgstr "" + +#: src/filed/backup.c:462 src/filed/verify.c:300 +#, c-format +msgid " Cannot open %s: ERR=%s.\n" +msgstr "" + +#: src/filed/backup.c:499 src/filed/verify.c:314 +#, c-format +msgid " Cannot open resource fork for %s: ERR=%s.\n" +msgstr "" + +#: src/filed/backup.c:556 +msgid "Failed to allocate memory for crypto signature.\n" +msgstr "" + +#: src/filed/backup.c:561 src/filed/backup.c:567 src/filed/backup.c:582 +msgid "An error occurred while signing the stream.\n" +msgstr "" + +#: src/filed/backup.c:606 +msgid "An error occurred finalizing signing the stream.\n" +msgstr "" + +#: src/filed/backup.c:693 +#, c-format +msgid "Compression deflateParams error: %d\n" +msgstr "" + +#: src/filed/backup.c:705 +msgid "Encrypting sparse data not supported.\n" +msgstr "" + +#: src/filed/backup.c:712 +msgid "Failed to initialize encryption context.\n" +msgstr "" + +#: src/filed/backup.c:735 src/filed/backup.c:891 src/filed/backup.c:926 +#: src/filed/backup.c:937 src/filed/backup.c:983 src/filed/backup.c:996 +#: src/filed/backup.c:1004 src/filed/backup.c:1050 src/filed/backup.c:1086 +#, c-format +msgid "Network send error to SD. ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:814 +#, c-format +msgid "Compression deflate error: %d\n" +msgstr "" + +#: src/filed/backup.c:821 +#, c-format +msgid "Compression deflateReset error: %d\n" +msgstr "" + +#: src/filed/backup.c:864 src/filed/backup.c:880 +msgid "Encryption error\n" +msgstr "" + +#: src/filed/backup.c:904 +#, c-format +msgid "Read error on file %s. ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:907 +msgid "Too many errors.\n" +msgstr "" + +#: src/filed/backup.c:917 +msgid "Encryption padding error\n" +msgstr "" + +#: src/filed/backup.c:974 +#, c-format +msgid "Error reading ACL of %s\n" +msgstr "" + +#: src/filed/backup.c:1029 +msgid "Invalid file flags, no supported data stream type.\n" +msgstr "" + +#: src/filed/filed.c:65 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c use as configuration file\n" +" -dnn set debug level to nn\n" +" -f run in foreground (for debugging)\n" +" -g groupid\n" +" -s no signals (for debugging)\n" +" -t test configuration file and exit\n" +" -u userid\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/filed/filed.c:268 +#, c-format +msgid "" +"No File daemon resource defined in %s\n" +"Without that I don't know who I am :-(\n" +msgstr "" + +#: src/filed/filed.c:273 +#, c-format +msgid "Only one Client resource permitted in %s\n" +msgstr "" + +#: src/filed/filed.c:296 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"File daemon in %s.\n" +msgstr "" + +#: src/filed/filed.c:320 +msgid "PKI encryption/signing enabled but not compiled into Bacula.\n" +msgstr "" + +#: src/filed/filed.c:331 +#, c-format +msgid "" +"\"PKI Key Pair\" must be defined for File daemon \"%s\" in %s if either " +"\"PKI Sign\" or \"PKI Encrypt\" are enabled.\n" +msgstr "" + +#: src/filed/filed.c:343 src/filed/filed.c:374 src/filed/filed.c:415 +msgid "Failed to allocate a new keypair object.\n" +msgstr "" + +#: src/filed/filed.c:347 +#, c-format +msgid "Failed to load public certificate for File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/filed/filed.c:353 +#, c-format +msgid "Failed to load private key for File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/filed/filed.c:383 +#, c-format +msgid "Failed to load private key from file %s for File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/filed/filed.c:390 +#, c-format +msgid "" +"Failed to load trusted signer certificate from file %s for File daemon \"%s" +"\" in %s.\n" +msgstr "" + +#: src/filed/filed.c:421 +#, c-format +msgid "" +"Failed to load master key certificate from file %s for File daemon \"%s\" in " +"%s.\n" +msgstr "" + +#: src/filed/filed.c:437 +#, c-format +msgid "No Director resource defined in %s\n" +msgstr "" + +#: src/filed/job.c:373 +#, c-format +msgid "2901 Job %s not found.\n" +msgstr "" + +#: src/filed/job.c:382 +#, c-format +msgid "2001 Job %s marked to be canceled.\n" +msgstr "" + +#: src/filed/job.c:385 +msgid "2902 Error scanning cancel command.\n" +msgstr "" + +#: src/filed/job.c:404 +#, c-format +msgid "2991 Bad setdebug command: %s\n" +msgstr "" + +#: src/filed/job.c:420 +#, c-format +msgid "Bad estimate command: %s" +msgstr "" + +#: src/filed/job.c:421 +msgid "2992 Bad estimate command.\n" +msgstr "" + +#: src/filed/job.c:444 +#, c-format +msgid "Bad Job Command: %s" +msgstr "" + +#: src/filed/job.c:465 +#, c-format +msgid "Bad RunBeforeJob command: %s\n" +msgstr "" + +#: src/filed/job.c:466 src/filed/job.c:484 +msgid "2905 Bad RunBeforeJob command.\n" +msgstr "" + +#: src/filed/job.c:495 +msgid "2905 Bad RunBeforeNow command.\n" +msgstr "" + +#: src/filed/job.c:514 +#, c-format +msgid "Bad RunAfter command: %s\n" +msgstr "" + +#: src/filed/job.c:515 +msgid "2905 Bad RunAfterJob command.\n" +msgstr "" + +#: src/filed/job.c:549 +#, c-format +msgid "Bad RunScript command: %s\n" +msgstr "" + +#: src/filed/job.c:550 +msgid "2905 Bad RunScript command.\n" +msgstr "" + +#: src/filed/job.c:652 +#, c-format +msgid "Error running program: %s. stat=%d: ERR=%s\n" +msgstr "" + +#: src/filed/job.c:662 +#, c-format +msgid "Cannot open FileSet input file: %s. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:758 +#, c-format +msgid "REGEX %s compile error. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:820 +#, c-format +msgid "Invalid FileSet command: %s\n" +msgstr "" + +#: src/filed/job.c:1054 src/findlib/match.c:199 src/tools/testfind.c:635 +#, c-format +msgid "Unknown include/exclude option: %c\n" +msgstr "" + +#: src/filed/job.c:1123 src/stored/fd_cmds.c:355 +#, c-format +msgid "Could not create bootstrap file %s: ERR=%s\n" +msgstr "" + +#: src/filed/job.c:1232 +#, c-format +msgid "DIR and FD clocks differ by %d seconds, FD automatically adjusting.\n" +msgstr "" + +#: src/filed/job.c:1240 +#, c-format +msgid "Unknown backup level: %s\n" +msgstr "" + +#: src/filed/job.c:1252 +#, c-format +msgid "Bad level command: %s\n" +msgstr "" + +#: src/filed/job.c:1273 +#, c-format +msgid "Bad session command: %s" +msgstr "" + +#: src/filed/job.c:1294 +#, c-format +msgid "Bad storage command: %s" +msgstr "" + +#: src/filed/job.c:1303 +#, c-format +msgid "Failed to connect to Storage daemon: %s:%d\n" +msgstr "" + +#: src/filed/job.c:1315 +msgid "Failed to authenticate Storage daemon.\n" +msgstr "" + +#: src/filed/job.c:1353 +msgid "Cannot contact Storage daemon\n" +msgstr "" + +#: src/filed/job.c:1371 +#, c-format +msgid "Bad response to append open: %s\n" +msgstr "" + +#: src/filed/job.c:1376 +msgid "Bad response from stored to open command\n" +msgstr "" + +#: src/filed/job.c:1403 +#, c-format +msgid "Generate VSS snapshots. Driver=\"%s\", Drive(s)=\"%s\"\n" +msgstr "" + +#: src/filed/job.c:1405 +msgid "Generate VSS snapshots failed.\n" +msgstr "" + +#: src/filed/job.c:1412 +#, c-format +msgid "" +"Generate VSS snapshot of drive \"%c:\\\" failed. VSS support is disabled on " +"this drive.\n" +msgstr "" + +#: src/filed/job.c:1419 +#, c-format +msgid "VSS Writer (PrepareForBackup): %s\n" +msgstr "" + +#: src/filed/job.c:1424 +msgid "No drive letters found for generating VSS snapshots.\n" +msgstr "" + +#: src/filed/job.c:1428 +#, c-format +msgid "VSS was not initialized properly. VSS support is disabled. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:1478 +msgid "Append Close with SD failed.\n" +msgstr "" + +#: src/filed/job.c:1482 +#, c-format +msgid "Bad status %d returned from Storage Daemon.\n" +msgstr "" + +#: src/filed/job.c:1500 +#, c-format +msgid "VSS Writer (BackupComplete): %s\n" +msgstr "" + +#: src/filed/job.c:1528 +#, c-format +msgid "2994 Bad verify command: %s\n" +msgstr "" + +#: src/filed/job.c:1543 src/filed/job.c:1582 +#, c-format +msgid "2994 Bad verify level: %s\n" +msgstr "" + +#: src/filed/job.c:1626 +#, c-format +msgid "Bad replace command. CMD=%s\n" +msgstr "" + +#: src/filed/job.c:1644 +#, c-format +msgid "Bad where regexp. where=%s\n" +msgstr "" + +#: src/filed/job.c:1718 +msgid "Improper calling sequence.\n" +msgstr "" + +#: src/filed/job.c:1738 +#, c-format +msgid "Bad response to SD read open: %s\n" +msgstr "" + +#: src/filed/job.c:1743 +msgid "Bad response from stored to read open command\n" +msgstr "" + +#: src/filed/job.c:1807 +#, c-format +msgid "Comm error with SD. bad response to %s. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:1810 +#, c-format +msgid "Bad response to %s command. Wanted %s, got %s\n" +msgstr "" + +#: src/filed/pythonfd.c:157 src/stored/pythonsd.c:162 +#, c-format +msgid "Cannot delete attribute %s" +msgstr "" + +#: src/filed/pythonfd.c:175 src/filed/pythonfd.c:191 src/stored/pythonsd.c:195 +#, c-format +msgid "Cannot find attribute %s" +msgstr "" + +#: src/filed/restore.c:133 +#, c-format +msgid "Size of data or stream of %s not correct. Original %s, restored %s.\n" +msgstr "" + +#: src/filed/restore.c:255 src/filed/verify_vol.c:99 +#, c-format +msgid "Record header scan error: %s\n" +msgstr "" + +#: src/filed/restore.c:263 src/filed/verify_vol.c:108 +#, c-format +msgid "Data record error. ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:267 src/filed/verify_vol.c:112 +#, c-format +msgid "Actual data size %d not same as header %d\n" +msgstr "" + +#: src/filed/restore.c:294 src/filed/restore.c:639 +msgid "Logic error: output file should be open\n" +msgstr "" + +#: src/filed/restore.c:315 src/filed/restore.c:652 +msgid "Logic error: output file should not be open\n" +msgstr "" + +#: src/filed/restore.c:326 src/filed/verify_vol.c:151 +#: src/stored/bextract.c:302 src/stored/bls.c:383 src/stored/bscan.c:663 +#, c-format +msgid "Record header file index %ld not equal record index %ld\n" +msgstr "" + +#: src/filed/restore.c:339 src/stored/bextract.c:311 +#, c-format +msgid "%s stream not supported on this Client.\n" +msgstr "" + +#: src/filed/restore.c:391 +msgid "Unexpected cryptographic session data stream.\n" +msgstr "" + +#: src/filed/restore.c:399 +msgid "" +"No private decryption keys have been defined to decrypt encrypted backup " +"data.\n" +msgstr "" + +#: src/filed/restore.c:410 +msgid "Could not create digest.\n" +msgstr "" + +#: src/filed/restore.c:424 +msgid "Missing private key required to decrypt encrypted backup data.\n" +msgstr "" + +#: src/filed/restore.c:427 +msgid "Decrypt of the session key failed.\n" +msgstr "" + +#: src/filed/restore.c:431 +#, c-format +msgid "An error occurred while decoding encrypted session data stream: %s\n" +msgstr "" + +#: src/filed/restore.c:480 src/filed/restore.c:526 +#, c-format +msgid "Missing encryption session data stream for %s\n" +msgstr "" + +#: src/filed/restore.c:488 src/filed/restore.c:533 +#, c-format +msgid "Failed to initialize decryption context for %s\n" +msgstr "" + +#: src/filed/restore.c:545 +#, c-format +msgid " Cannot open resource fork for %s.\n" +msgstr "" + +#: src/filed/restore.c:571 +#, c-format +msgid " Invalid length of Finder Info (got %d, not 32)\n" +msgstr "" + +#: src/filed/restore.c:575 +#, c-format +msgid " Could not set Finder Info on %s\n" +msgstr "" + +#: src/filed/restore.c:588 +#, c-format +msgid "Can't restore ACL of %s\n" +msgstr "" + +#: src/filed/restore.c:600 +#, c-format +msgid "Can't restore default ACL of %s\n" +msgstr "" + +#: src/filed/restore.c:610 +msgid "Unexpected cryptographic signature data stream.\n" +msgstr "" + +#: src/filed/restore.c:616 +#, c-format +msgid "Failed to decode message signature for %s\n" +msgstr "" + +#: src/filed/restore.c:655 src/stored/bextract.c:469 +#, c-format +msgid "Unknown stream=%d ignored. This shouldn't happen!\n" +msgstr "" + +#: src/filed/restore.c:732 +#, c-format +msgid "" +"%d non-supported data streams and %d non-supported attrib streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:736 +#, c-format +msgid "%d non-supported resource fork streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:739 +#, c-format +msgid "%d non-supported Finder Info streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:742 +#, c-format +msgid "%d non-supported acl streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:745 +#, c-format +msgid "%d non-supported crypto streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:757 +msgid "None" +msgstr "" + +#: src/filed/restore.c:761 +msgid "Zlib errno" +msgstr "" + +#: src/filed/restore.c:763 +msgid "Zlib stream error" +msgstr "" + +#: src/filed/restore.c:765 +msgid "Zlib data error" +msgstr "" + +#: src/filed/restore.c:767 +msgid "Zlib memory error" +msgstr "" + +#: src/filed/restore.c:769 +msgid "Zlib buffer error" +msgstr "" + +#: src/filed/restore.c:771 +msgid "Zlib version error" +msgstr "" + +#: src/filed/restore.c:773 src/lib/util.c:591 src/lib/util.c:601 +#: src/lib/util.c:609 src/lib/util.c:616 src/lib/util.c:623 src/lib/util.c:637 +#: src/lib/util.c:647 src/lib/util.c:654 src/lib/util.c:665 +msgid "*none*" +msgstr "" + +#: src/filed/restore.c:809 +#, c-format +msgid "Missing cryptographic signature for %s\n" +msgstr "" + +#: src/filed/restore.c:838 src/filed/restore.c:862 +#, c-format +msgid "Signature validation failed for file %s: ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:852 +#, c-format +msgid "Digest one file failed for file: %s\n" +msgstr "" + +#: src/filed/restore.c:883 +#, c-format +msgid "Signature validation failed for %s: %s\n" +msgstr "" + +#: src/filed/restore.c:909 src/stored/bextract.c:400 +#, c-format +msgid "Seek to %s error on %s: ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:936 +#, c-format +msgid "Uncompression error on file %s. ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:945 src/stored/bextract.c:434 +msgid "GZIP data stream found, but GZIP not configured!\n" +msgstr "" + +#: src/filed/restore.c:968 +#, c-format +msgid "Write error in Win32 Block Decomposition on %s: %s\n" +msgstr "" + +#: src/filed/restore.c:974 src/stored/bextract.c:372 src/stored/bextract.c:422 +#, c-format +msgid "Write error on %s: %s\n" +msgstr "" + +#: src/filed/restore.c:1023 +msgid "Decryption error\n" +msgstr "" + +#: src/filed/restore.c:1113 +#, c-format +msgid "Decryption error. buf_len=%d decrypt_len=%d on file %s\n" +msgstr "" + +#: src/filed/restore.c:1217 +msgid "Open File Manager paused\n" +msgstr "" + +#: src/filed/restore.c:1221 +msgid "FAILED to pause Open File Manager\n" +msgstr "" + +#: src/filed/restore.c:1229 +#, c-format +msgid "Running as '%s'. Privmask=%#08x\n" +msgstr "" + +#: src/filed/restore.c:1231 +msgid "Failed to retrieve current UserName\n" +msgstr "" + +#: src/filed/status.c:77 +#, c-format +msgid "%s Version: %s (%s) %s %s %s %s\n" +msgstr "" + +#: src/filed/status.c:81 src/stored/status.c:86 +#, c-format +msgid "Daemon started %s, %d Job%s run since started.\n" +msgstr "" + +#: src/filed/status.c:136 +#, c-format +msgid " Sizeof: boffset_t=%d size_t=%d debug=%d trace=%d\n" +msgstr "" + +#: src/filed/status.c:155 +#, c-format +msgid "Director connected at: %s\n" +msgstr "" + +#: src/filed/status.c:157 +#, c-format +msgid "JobId %d Job %s is running.\n" +msgstr "" + +#: src/filed/status.c:160 +#, c-format +msgid " %s%s Job started: %s\n" +msgstr "" + +#: src/filed/status.c:172 +#, c-format +msgid " Files=%s Bytes=%s Bytes/sec=%s Errors=%d\n" +msgstr "" + +#: src/filed/status.c:178 +#, c-format +msgid " Files Examined=%s\n" +msgstr "" + +#: src/filed/status.c:183 +#, c-format +msgid " Processing file: %s\n" +msgstr "" + +#: src/filed/status.c:194 +msgid " SDSocket closed.\n" +msgstr "" + +#: src/filed/status.c:328 src/filed/status.c:352 src/stored/status.c:636 +#: src/stored/status.c:659 +#, c-format +msgid "Bad .status command: %s\n" +msgstr "" + +#: src/filed/status.c:329 +msgid "2900 Bad .status command, missing argument.\n" +msgstr "" + +#: src/filed/status.c:353 +msgid "2900 Bad .status command, wrong argument.\n" +msgstr "" + +#: src/filed/status.c:390 src/stored/status.c:577 +msgid "Init Catalog" +msgstr "" + +#: src/filed/status.c:393 src/stored/status.c:580 +msgid "Volume to Catalog" +msgstr "" + +#: src/filed/status.c:396 src/stored/status.c:583 +msgid "Disk to Catalog" +msgstr "" + +#: src/filed/status.c:399 src/stored/status.c:586 +msgid "Data" +msgstr "" + +#: src/filed/status.c:405 src/lib/util.c:361 src/stored/status.c:592 +msgid "Unknown Job Level" +msgstr "" + +#: src/filed/status.c:421 +msgid "Bacula Client: Idle" +msgstr "" + +#: src/filed/status.c:432 +msgid "Bacula Client: Running" +msgstr "" + +#: src/filed/status.c:446 +msgid "Bacula Client: Last Job Canceled" +msgstr "" + +#: src/filed/status.c:450 +msgid "Bacula Client: Last Job Failed" +msgstr "" + +#: src/filed/status.c:454 +msgid "Bacula Client: Last Job had Warnings" +msgstr "" + +#: src/filed/verify.c:53 +#, c-format +msgid "Cannot malloc %d network read buffer\n" +msgstr "" + +#: src/filed/verify.c:146 +#, c-format +msgid " Archive file skipped: %s\n" +msgstr "" + +#: src/filed/verify.c:149 +#, c-format +msgid " Recursion turned off. Directory skipped: %s\n" +msgstr "" + +#: src/filed/verify.c:153 +#, c-format +msgid " File system change prohibited. Directory skipped: %s\n" +msgstr "" + +#: src/filed/verify.c:163 +#, c-format +msgid " Unknown file type %d: %s\n" +msgstr "" + +#: src/filed/verify.c:206 src/filed/verify_vol.c:209 +#, c-format +msgid "Network error in send to Director: ERR=%s\n" +msgstr "" + +#: src/filed/verify.c:352 +#, c-format +msgid "Error reading file %s: ERR=%s\n" +msgstr "" + +#: src/filed/verify_vol.c:65 +msgid "Storage command not issued before Verify.\n" +msgstr "" + +#: src/filed/verify_vol.c:145 +#, c-format +msgid "Error scanning record header: %s\n" +msgstr "" + +#: src/findlib/attribs.c:408 +#, c-format +msgid "File size of restored file %s not correct. Original %s, restored %s.\n" +msgstr "" + +#: src/findlib/attribs.c:434 src/findlib/attribs.c:441 +#, c-format +msgid "Unable to set file owner %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:447 +#, c-format +msgid "Unable to set file modes %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:457 +#, c-format +msgid "Unable to set file times %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:471 +#, c-format +msgid "Unable to set file flags %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:714 +#, c-format +msgid "Error in %s file %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:731 +#, c-format +msgid "Error in %s: ERR=%s\n" +msgstr "" + +#: src/findlib/bfile.c:82 +msgid "Unix attributes" +msgstr "" + +#: src/findlib/bfile.c:84 +msgid "File data" +msgstr "" + +#: src/findlib/bfile.c:86 +msgid "MD5 digest" +msgstr "" + +#: src/findlib/bfile.c:88 +msgid "GZIP data" +msgstr "" + +#: src/findlib/bfile.c:90 +msgid "Extended attributes" +msgstr "" + +#: src/findlib/bfile.c:92 +msgid "Sparse data" +msgstr "" + +#: src/findlib/bfile.c:94 +msgid "GZIP sparse data" +msgstr "" + +#: src/findlib/bfile.c:96 +msgid "Program names" +msgstr "" + +#: src/findlib/bfile.c:98 +msgid "Program data" +msgstr "" + +#: src/findlib/bfile.c:100 +msgid "SHA1 digest" +msgstr "" + +#: src/findlib/bfile.c:102 +msgid "Win32 data" +msgstr "" + +#: src/findlib/bfile.c:104 +msgid "Win32 GZIP data" +msgstr "" + +#: src/findlib/bfile.c:106 +msgid "MacOS Fork data" +msgstr "" + +#: src/findlib/bfile.c:108 +msgid "HFS+ attribs" +msgstr "" + +#: src/findlib/bfile.c:110 +msgid "Standard Unix ACL attribs" +msgstr "" + +#: src/findlib/bfile.c:112 +msgid "Default Unix ACL attribs" +msgstr "" + +#: src/findlib/bfile.c:114 +msgid "SHA256 digest" +msgstr "" + +#: src/findlib/bfile.c:116 +msgid "SHA512 digest" +msgstr "" + +#: src/findlib/bfile.c:118 +msgid "Signed digest" +msgstr "" + +#: src/findlib/bfile.c:120 +msgid "Encrypted File data" +msgstr "" + +#: src/findlib/bfile.c:122 +msgid "Encrypted Win32 data" +msgstr "" + +#: src/findlib/bfile.c:124 +msgid "Encrypted session data" +msgstr "" + +#: src/findlib/bfile.c:126 +msgid "Encrypted GZIP data" +msgstr "" + +#: src/findlib/bfile.c:128 +msgid "Encrypted Win32 GZIP data" +msgstr "" + +#: src/findlib/bfile.c:130 +msgid "Encrypted MacOS fork data" +msgstr "" + +#: src/findlib/create_file.c:123 +#, c-format +msgid "File skipped. Not newer: %s\n" +msgstr "" + +#: src/findlib/create_file.c:130 +#, c-format +msgid "File skipped. Not older: %s\n" +msgstr "" + +#: src/findlib/create_file.c:136 +#, c-format +msgid "File skipped. Already exists: %s\n" +msgstr "" + +#: src/findlib/create_file.c:162 +#, c-format +msgid "File %s already exists and could not be replaced. ERR=%s.\n" +msgstr "" + +#: src/findlib/create_file.c:214 src/findlib/create_file.c:277 +#: src/findlib/create_file.c:370 +#, c-format +msgid "bpkt already open fid=%d\n" +msgstr "" + +#: src/findlib/create_file.c:222 +#, c-format +msgid "Could not create %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:236 +#, c-format +msgid "Cannot make fifo %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:254 +#, c-format +msgid "Cannot make node %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:299 +#, c-format +msgid "Could not symlink %s -> %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:324 src/findlib/create_file.c:335 +#, c-format +msgid "Could not restore file flags for file %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:328 src/findlib/create_file.c:343 +#, c-format +msgid "Could not hard link %s -> %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:339 +#, c-format +msgid "Could not reset file flags for file %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:402 +#, c-format +msgid "Original file %s not saved: type=%d\n" +msgstr "" + +#: src/findlib/create_file.c:405 +#, c-format +msgid "Unknown file type %d; not restored: %s\n" +msgstr "" + +#: src/findlib/create_file.c:449 +#, c-format +msgid "Zero length filename: %s\n" +msgstr "" + +#: src/findlib/enable_priv.c:92 +msgid "AdjustTokenPrivileges set " +msgstr "" + +#: src/findlib/find_one.c:228 +#, c-format +msgid "Cannot stat file %s: ERR=%s\n" +msgstr "" + +#: src/findlib/find_one.c:234 +#, c-format +msgid "%s mtime changed during backup.\n" +msgstr "" + +#: src/findlib/find_one.c:240 +#, c-format +msgid "%s ctime changed during backup.\n" +msgstr "" + +#: src/findlib/find_one.c:246 src/findlib/find_one.c:253 +#, c-format +msgid "%s size changed during backup.\n" +msgstr "" + +#: src/findlib/find_one.c:310 +#, c-format +msgid "Top level directory \"%s\" has unlisted fstype \"%s\"\n" +msgstr "" + +#: src/findlib/find_one.c:325 +#, c-format +msgid "Top level directory \"%s\" has an unlisted drive type \"%s\"\n" +msgstr "" + +#: src/findlib/makepath.c:116 +#, c-format +msgid "Cannot create directory %s: ERR=%s\n" +msgstr "" + +#: src/findlib/makepath.c:120 src/findlib/makepath.c:398 +#, c-format +msgid "%s exists but is not a directory\n" +msgstr "" + +#: src/findlib/makepath.c:229 +#, c-format +msgid "%c: is not a valid drive\n" +msgstr "" + +#: src/findlib/makepath.c:296 src/findlib/makepath.c:357 +#: src/findlib/makepath.c:417 +#, c-format +msgid "Cannot change owner and/or group of %s: ERR=%s\n" +msgstr "" + +#: src/findlib/makepath.c:317 +#, c-format +msgid "Cannot chdir to directory, %s: ERR=%s\n" +msgstr "" + +#: src/findlib/makepath.c:372 src/findlib/makepath.c:388 +#: src/findlib/makepath.c:422 +#, c-format +msgid "Cannot change permissions of %s: ERR=%s\n" +msgstr "" + +#: src/findlib/save-cwd.c:48 +#, c-format +msgid "Cannot open current directory: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:62 +#, c-format +msgid "Current directory: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:84 +#, c-format +msgid "Cannot get current directory: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:105 +#, c-format +msgid "Cannot return to %s from %s: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:109 +#, c-format +msgid "Cannot return to saved working directory from %s: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:115 +#, c-format +msgid "Cannot return to %s: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:119 +#, c-format +msgid "Cannot return to saved working directory: %s\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:87 +#, c-format +msgid "%s: Director authorization problem.\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:88 +msgid "Director authorization problem.\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:90 +msgid "" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:100 +#, c-format +msgid "%s: Bad response to Hello command: ERR=%s\n" +msgstr "" + +#: src/gnome2-console/authenticate.c:102 +msgid "The Director is probably not running.\n" +msgstr "" + +#: src/gnome2-console/console.c:102 +#, c-format +msgid "" +"\n" +"Version: %s (%s) %s %s %s\n" +"\n" +"Usage: bgnome-console [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/gnome2-console/console.c:234 +#, c-format +msgid "Pthread cond init error = %s\n" +msgstr "" + +#: src/gnome2-console/console.c:379 +msgid " Not Connected" +msgstr "" + +#: src/gnome2-console/console.c:500 +#, c-format +msgid " Connecting to Director %s:%d" +msgstr "" + +#: src/gnome2-console/console.c:501 +#, c-format +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "" + +#: src/gnome2-console/console.c:516 src/wx-console/console_thread.cpp:381 +#, c-format +msgid "Passphrase for Console \"%s\" TLS private key: " +msgstr "" + +#: src/gnome2-console/console.c:538 src/wx-console/console_thread.cpp:402 +#, c-format +msgid "Passphrase for Director \"%s\" TLS private key: " +msgstr "" + +#: src/gnome2-console/console.c:557 src/tray-monitor/tray-monitor.c:907 +#: src/wx-console/console_thread.cpp:420 +msgid "Director daemon" +msgstr "" + +#: src/gnome2-console/console.c:569 +msgid " Initializing ..." +msgstr "" + +#: src/gnome2-console/console.c:605 +msgid " Connected" +msgstr "" + +#: src/gnome2-console/console.c:613 +msgid " Processing command ..." +msgstr "" + +#: src/gnome2-console/console.c:648 +msgid " At prompt waiting for input ..." +msgstr "" + +#: src/gnome2-console/console.c:768 +msgid " Ready" +msgstr "" + +#: src/gnome2-console/console_conf.c:147 +#, c-format +msgid "Console: name=%s\n" +msgstr "" + +#: src/gnome2-console/console_conf.c:150 src/tray-monitor/tray_conf.c:195 +#, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "" + +#: src/gnome2-console/interface.c:202 +msgid "Bacula Console" +msgstr "" + +#: src/gnome2-console/interface.c:235 src/wx-console/wxbmainframe.cpp:248 +#: src/wx-console/wxbmainframe.cpp:619 +msgid "Connect" +msgstr "" + +#: src/gnome2-console/interface.c:238 +msgid "Connect to Director" +msgstr "" + +#: src/gnome2-console/interface.c:242 +msgid "Run" +msgstr "" + +#: src/gnome2-console/interface.c:245 src/gnome2-console/interface.c:682 +#: src/gnome2-console/interface.c:696 +msgid "Run a Job" +msgstr "" + +#: src/gnome2-console/interface.c:249 +msgid "Dir Status" +msgstr "" + +#: src/gnome2-console/interface.c:255 src/lib/util.c:293 +#: src/wx-console/wxbrestorepanel.cpp:404 +#: src/wx-console/wxbrestorepanel.cpp:1974 +msgid "Restore" +msgstr "" + +#: src/gnome2-console/interface.c:261 +msgid "Label" +msgstr "" + +#: src/gnome2-console/interface.c:267 +msgid "Msgs" +msgstr "" + +#: src/gnome2-console/interface.c:270 +msgid "Display Messages" +msgstr "" + +#: src/gnome2-console/interface.c:294 +msgid " Command: " +msgstr "" + +#: src/gnome2-console/interface.c:303 +msgid "Enter Commands Here" +msgstr "" + +#: src/gnome2-console/interface.c:314 +msgid " Status: " +msgstr "" + +#: src/gnome2-console/interface.c:323 src/gnome2-console/interface.c:1709 +msgid " " +msgstr "" + +#: src/gnome2-console/interface.c:424 +msgid "About Bacula Console" +msgstr "" + +#: src/gnome2-console/interface.c:439 +msgid "Bacula Console\n" +msgstr "" + +#: src/gnome2-console/interface.c:448 +msgid "Copyright (c) 2000 - 2004, Kern Sibbald and John Walker" +msgstr "" + +#: src/gnome2-console/interface.c:453 +msgid "Authors: Kern Sibbald and John Walker" +msgstr "" + +#: src/gnome2-console/interface.c:458 +msgid "The Leading Open Source Backup Solution." +msgstr "" + +#: src/gnome2-console/interface.c:523 src/gnome2-console/interface.c:540 +msgid "Select Director" +msgstr "" + +#: src/gnome2-console/interface.c:570 +msgid " " +msgstr "" + +#: src/gnome2-console/interface.c:715 src/gnome2-console/interface.c:1631 +msgid "Job:" +msgstr "" + +#: src/gnome2-console/interface.c:736 +msgid " Type:" +msgstr "" + +#: src/gnome2-console/interface.c:756 src/gnome2-console/interface.c:785 +#: src/gnome2-console/interface.c:1284 src/gnome2-console/interface.c:1680 +msgid " " +msgstr "" + +#: src/gnome2-console/interface.c:765 src/gnome2-console/interface.c:1660 +#: src/wx-console/wxbrestorepanel.cpp:1886 +msgid "Client:" +msgstr "" + +#: src/gnome2-console/interface.c:795 src/gnome2-console/interface.c:1689 +msgid "FileSet: " +msgstr "" + +#: src/gnome2-console/interface.c:820 src/wx-console/wxbrestorepanel.cpp:1893 +msgid "Priority:" +msgstr "" + +#: src/gnome2-console/interface.c:836 +msgid "Level:" +msgstr "" + +#: src/gnome2-console/interface.c:857 +msgid " " +msgstr "" + +#: src/gnome2-console/interface.c:867 src/gnome2-console/interface.c:1433 +#: src/gnome2-console/interface.c:1718 +msgid "Pool:" +msgstr "" + +#: src/gnome2-console/interface.c:888 src/gnome2-console/interface.c:919 +#: src/gnome2-console/interface.c:949 src/gnome2-console/interface.c:969 +#: src/gnome2-console/interface.c:989 src/gnome2-console/interface.c:1009 +#: src/gnome2-console/interface.c:1014 src/gnome2-console/interface.c:1483 +#: src/gnome2-console/interface.c:1739 src/gnome2-console/interface.c:1769 +#: src/gnome2-console/interface.c:1788 src/gnome2-console/interface.c:1793 +msgid " " +msgstr "" + +#: src/gnome2-console/interface.c:898 src/gnome2-console/interface.c:1407 +#: src/gnome2-console/interface.c:1748 src/wx-console/wxbrestorepanel.cpp:1888 +msgid "Storage:" +msgstr "" + +#: src/gnome2-console/interface.c:929 +msgid "Messages:" +msgstr "" + +#: src/gnome2-console/interface.c:959 +msgid "Where: " +msgstr "" + +#: src/gnome2-console/interface.c:979 src/wx-console/wxbrestorepanel.cpp:1890 +msgid "When:" +msgstr "" + +#: src/gnome2-console/interface.c:999 src/wx-console/wxbrestorepanel.cpp:1871 +msgid "Bootstrap:" +msgstr "" + +#: src/gnome2-console/interface.c:1194 +msgid "Restore File Selection" +msgstr "" + +#: src/gnome2-console/interface.c:1238 +msgid "Current dir:" +msgstr "" + +#: src/gnome2-console/interface.c:1265 +msgid "Files Selected: " +msgstr "" + +#: src/gnome2-console/interface.c:1385 src/gnome2-console/interface.c:1396 +msgid "Label a Volume" +msgstr "" + +#: src/gnome2-console/interface.c:1458 +msgid "Volume Name:" +msgstr "" + +#: src/gnome2-console/interface.c:1472 +msgid "Slot:" +msgstr "" + +#: src/gnome2-console/interface.c:1598 +msgid "Restore Files Dialog" +msgstr "" + +#: src/gnome2-console/interface.c:1613 +msgid "Restore Files" +msgstr "" + +#: src/gnome2-console/interface.c:1778 +msgid "Before:" +msgstr "" + +#: src/gnome2-console/interface.c:1824 +msgid "Select Files" +msgstr "" + +#: src/gnome2-console/interface.c:1907 +msgid "Progress" +msgstr "" + +#: src/gnome2-console/restore.c:149 +msgid "Mark" +msgstr "" + +#: src/gnome2-console/restore.c:149 +msgid "File" +msgstr "" + +#: src/gnome2-console/restore.c:149 +msgid "Mode" +msgstr "" + +#: src/gnome2-console/restore.c:149 src/wx-console/wxbrestorepanel.cpp:306 +msgid "User" +msgstr "" + +#: src/gnome2-console/restore.c:149 src/wx-console/wxbrestorepanel.cpp:310 +msgid "Group" +msgstr "" + +#: src/gnome2-console/restore.c:149 src/wx-console/wxbrestorepanel.cpp:294 +msgid "Size" +msgstr "" + +#: src/gnome2-console/restore.c:149 src/wx-console/wxbrestorepanel.cpp:298 +msgid "Date" +msgstr "" + +#: src/gnome2-console/support.c:60 src/gnome2-console/support.c:85 +#, c-format +msgid "Couldn't find pixmap file: %s" +msgstr "" + +#: src/lib/address_conf.c:63 +#, c-format +msgid "Only ipv4 and ipv6 are supported (%d)\n" +msgstr "" + +#: src/lib/address_conf.c:67 +#, c-format +msgid "Only ipv4 is supported (%d)\n" +msgstr "" + +#: src/lib/address_conf.c:176 +#, c-format +msgid "It was tried to assign a ipv6 address to a ipv4(%d)\n" +msgstr "" + +#: src/lib/address_conf.c:185 +#, c-format +msgid "It was tried to assign a ipv4 address to a ipv6(%d)\n" +msgstr "" + +#: src/lib/address_conf.c:264 +#, c-format +msgid "Can't add default address (%s)\n" +msgstr "" + +#: src/lib/address_conf.c:293 +msgid "the old style addresses cannot be mixed with new style" +msgstr "" + +#: src/lib/address_conf.c:314 +#, c-format +msgid "can't resolve service(%s)" +msgstr "" + +#: src/lib/address_conf.c:323 +#, c-format +msgid "can't resolve hostname(%s) %s" +msgstr "" + +#: src/lib/address_conf.c:413 src/lib/address_conf.c:444 +#, c-format +msgid "Expected a block begin { , got: %s" +msgstr "" + +#: src/lib/address_conf.c:418 +msgid "Empty addr block is not allowed" +msgstr "" + +#: src/lib/address_conf.c:422 +#, c-format +msgid "Expected a string, got: %s" +msgstr "" + +#: src/lib/address_conf.c:431 +#, c-format +msgid "Expected a string [ip|ipv4|ipv6], got: %s" +msgstr "" + +#: src/lib/address_conf.c:435 +#, c-format +msgid "Expected a string [ip|ipv4], got: %s" +msgstr "" + +#: src/lib/address_conf.c:440 src/lib/address_conf.c:470 +#, c-format +msgid "Expected a equal =, got: %s" +msgstr "" + +#: src/lib/address_conf.c:451 src/lib/address_conf.c:466 +#, c-format +msgid "Expected a identifier [addr|port], got: %s" +msgstr "" + +#: src/lib/address_conf.c:456 +msgid "Only one port per address block" +msgstr "" + +#: src/lib/address_conf.c:462 +msgid "Only one addr per address block" +msgstr "" + +#: src/lib/address_conf.c:478 +#, c-format +msgid "Expected a number or a string, got: %s" +msgstr "" + +#: src/lib/address_conf.c:484 src/lib/address_conf.c:517 +#, c-format +msgid "Expected an IP number or a hostname, got: %s" +msgstr "" + +#: src/lib/address_conf.c:490 +msgid "State machine missmatch" +msgstr "" + +#: src/lib/address_conf.c:496 src/lib/address_conf.c:508 +#, c-format +msgid "Expected a end of block }, got: %s" +msgstr "" + +#: src/lib/address_conf.c:502 +#, c-format +msgid "Can't add hostname(%s) and port(%s) to addrlist (%s)" +msgstr "" + +#: src/lib/address_conf.c:522 src/lib/address_conf.c:536 +#, c-format +msgid "can't add port (%s) to (%s)" +msgstr "" + +#: src/lib/address_conf.c:531 +#, c-format +msgid "Expected a port number or string, got: %s" +msgstr "" + +#: src/lib/attr.c:76 +#, c-format +msgid "Error scanning attributes: %s\n" +msgstr "" + +#: src/lib/berrno.c:62 +msgid "Child exited normally." +msgstr "" + +#: src/lib/berrno.c:69 +msgid "Unknown error during program execvp" +msgstr "" + +#: src/lib/berrno.c:72 +#, c-format +msgid "Child exited with code %d" +msgstr "" + +#: src/lib/berrno.c:80 +#, c-format +msgid "Child died from signal %d: %s" +msgstr "" + +#: src/lib/berrno.c:86 +msgid "Invalid errno. No error message possible." +msgstr "" + +#: src/lib/bget_msg.c:99 +msgid "Status OK\n" +msgstr "" + +#: src/lib/bget_msg.c:103 +#, c-format +msgid "bget_msg: unknown signal %d\n" +msgstr "" + +#: src/lib/bnet.c:118 +#, c-format +msgid "Attr spool write error. ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:244 src/lib/bnet.c:283 +msgid "TLS connection initialization failed.\n" +msgstr "" + +#: src/lib/bnet.c:252 +msgid "TLS Negotiation failed.\n" +msgstr "" + +#: src/lib/bnet.c:258 src/lib/bnet.c:298 +msgid "" +"TLS certificate verification failed. Peer certificate did not match a " +"required commonName\n" +msgstr "" + +#: src/lib/bnet.c:305 +#, c-format +msgid "" +"TLS host certificate verification failed. Host %s did not match presented " +"certificate\n" +msgstr "" + +#: src/lib/bnet.c:322 +msgid "TLS enabled but not configured.\n" +msgstr "" + +#: src/lib/bnet.c:328 +msgid "TLS enable but not configured.\n" +msgstr "" + +#: src/lib/bnet.c:386 +msgid "No problem." +msgstr "" + +#: src/lib/bnet.c:389 +msgid "Authoritative answer for host not found." +msgstr "" + +#: src/lib/bnet.c:392 +msgid "Non-authoritative for host not found, or ServerFail." +msgstr "" + +#: src/lib/bnet.c:395 +msgid "Non-recoverable errors, FORMERR, REFUSED, or NOTIMP." +msgstr "" + +#: src/lib/bnet.c:398 +msgid "Valid name, no data record of resquested type." +msgstr "" + +#: src/lib/bnet.c:401 +msgid "Unknown error." +msgstr "" + +#: src/lib/bnet.c:655 +#, c-format +msgid "Unknown sig %d" +msgstr "" + +#: src/lib/bnet_server.c:109 +#, c-format +msgid "Cannot open stream socket. ERR=%s. Current %s All %s\n" +msgstr "" + +#: src/lib/bnet_server.c:122 src/lib/bnet_server.c:275 +#, c-format +msgid "Cannot set SO_REUSEADDR on socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:131 +#, c-format +msgid "Cannot bind port %d: ERR=%s: Retrying ...\n" +msgstr "" + +#: src/lib/bnet_server.c:136 +#, c-format +msgid "Cannot bind port %d: ERR=%s.\n" +msgstr "" + +#: src/lib/bnet_server.c:147 +#, c-format +msgid "Could not init client queue: ERR=%s\n" +msgstr "" + +#: src/lib/bnet_server.c:166 src/lib/bnet_server.c:338 +#, c-format +msgid "Error in select: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:187 src/lib/bnet_server.c:357 +#, c-format +msgid "Connection from %s:%d refused by hosts.access\n" +msgstr "" + +#: src/lib/bnet_server.c:202 src/lib/bnet_server.c:370 +#, c-format +msgid "Cannot set SO_KEEPALIVE on socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:213 +msgid "Could not create client BSOCK.\n" +msgstr "" + +#: src/lib/bnet_server.c:220 +#, c-format +msgid "Could not add job to client queue: ERR=%s\n" +msgstr "" + +#: src/lib/bnet_server.c:237 +#, c-format +msgid "Could not destroy client queue: ERR=%s\n" +msgstr "" + +#: src/lib/bnet_server.c:265 +#, c-format +msgid "Cannot open stream socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:295 +#, c-format +msgid "Cannot bind port %d: ERR=%s: retrying ...\n" +msgstr "" + +#: src/lib/bnet_server.c:301 +msgid "Server socket" +msgstr "" + +#: src/lib/bnet_server.c:301 src/lib/bnet_server.c:398 +msgid "client" +msgstr "" + +#: src/lib/bnet_server.c:386 +#, c-format +msgid "Socket accept error for %s. ERR=%s\n" +msgstr "" + +#: src/lib/bpipe.c:362 src/lib/bpipe.c:452 +msgid "Program killed by Bacula watchdog (timeout)\n" +msgstr "" + +#: src/lib/bsys.c:208 src/lib/bsys.c:225 src/lib/bsys.c:249 src/lib/bsys.c:262 +#, c-format +msgid "Out of memory: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:304 +msgid "Buffer overflow.\n" +msgstr "" + +#: src/lib/bsys.c:370 +msgid "Bad errno" +msgstr "" + +#: src/lib/bsys.c:387 +msgid "Possible mutex deadlock.\n" +msgstr "" + +#: src/lib/bsys.c:391 src/lib/bsys.c:424 +#, c-format +msgid "Mutex lock failure. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:394 +msgid "Possible mutex deadlock resolved.\n" +msgstr "" + +#: src/lib/bsys.c:407 +#, c-format +msgid "Mutex unlock not locked. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:412 src/lib/bsys.c:434 +#, c-format +msgid "Mutex unlock failure. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:448 +#, c-format +msgid "Memset for %d bytes at %s:%d\n" +msgstr "" + +#: src/lib/bsys.c:478 +#, c-format +msgid "Cannot open pid file. %s ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:493 +#, c-format +msgid "" +"%s is already running. pid=%d\n" +"Check file %s\n" +msgstr "" + +#: src/lib/bsys.c:507 +#, c-format +msgid "Could not open pid file. %s ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:615 +#, c-format +msgid "Could not create state file. %s ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:634 +#, c-format +msgid "Write final hdr error: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:670 +#, c-format +msgid "Could not find userid=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:676 +#, c-format +msgid "Could not find password entry. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:689 +#, c-format +msgid "Could not find group=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:697 +#, c-format +msgid "Could not initgroups for group=%s, userid=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:700 +#, c-format +msgid "Could not initgroups for userid=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:707 +#, c-format +msgid "Could not set group=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:713 +#, c-format +msgid "Could not set specified userid: %s\n" +msgstr "" + +#: src/lib/btimers.c:254 +msgid "stop_btimer called with NULL btimer_id\n" +msgstr "" + +#: src/lib/cram-md5.c:109 src/lib/cram-md5.c:137 +msgid "1999 Authorization failed.\n" +msgstr "" + +#: src/lib/crypto.c:435 +msgid "Unable to open certificate file" +msgstr "" + +#: src/lib/crypto.c:442 +msgid "Unable to read certificate from file" +msgstr "" + +#: src/lib/crypto.c:448 +msgid "Unable to extract public key from certificate" +msgstr "" + +#: src/lib/crypto.c:455 +msgid "" +"Provided certificate does not include the required subjectKeyIdentifier " +"extension." +msgstr "" + +#: src/lib/crypto.c:462 +#, c-format +msgid "Unsupported key type provided: %d\n" +msgstr "" + +#: src/lib/crypto.c:499 src/lib/crypto.c:547 +msgid "Unable to open private key file" +msgstr "" + +#: src/lib/crypto.c:529 src/lib/crypto.c:563 +msgid "Unable to read private key from file" +msgstr "" + +#: src/lib/crypto.c:622 +#, c-format +msgid "Unsupported digest type: %d\n" +msgstr "" + +#: src/lib/crypto.c:636 +msgid "OpenSSL digest initialization failed" +msgstr "" + +#: src/lib/crypto.c:650 +msgid "OpenSSL digest update failed" +msgstr "" + +#: src/lib/crypto.c:668 +msgid "OpenSSL digest finalize failed" +msgstr "" + +#: src/lib/crypto.c:766 +msgid "OpenSSL digest_new failed" +msgstr "" + +#: src/lib/crypto.c:772 +msgid "OpenSSL sign get digest failed" +msgstr "" + +#: src/lib/crypto.c:811 src/lib/crypto.c:815 +msgid "OpenSSL digest Verify final failed" +msgstr "" + +#: src/lib/crypto.c:820 +msgid "No signers found for crypto verify.\n" +msgstr "" + +#: src/lib/crypto.c:881 +msgid "Signature creation failed" +msgstr "" + +#: src/lib/crypto.c:959 +msgid "Signature decoding failed" +msgstr "" + +#: src/lib/crypto.c:1036 +msgid "Unsupported cipher type specified\n" +msgstr "" + +#: src/lib/crypto.c:1185 +msgid "CryptoData decoding failed" +msgstr "" + +#: src/lib/crypto.c:1229 +msgid "Failure decrypting the session key" +msgstr "" + +#: src/lib/crypto.c:1280 +#, c-format +msgid "Unsupported contentEncryptionAlgorithm: %d\n" +msgstr "" + +#: src/lib/crypto.c:1290 src/lib/crypto.c:1296 +msgid "OpenSSL cipher context initialization failed" +msgstr "" + +#: src/lib/crypto.c:1303 +msgid "Encryption session provided an invalid symmetric key" +msgstr "" + +#: src/lib/crypto.c:1309 +msgid "Encryption session provided an invalid IV" +msgstr "" + +#: src/lib/crypto.c:1315 +msgid "OpenSSL cipher context key/IV initialization failed" +msgstr "" + +#: src/lib/crypto.c:1385 +#, c-format +msgid "Unable to init OpenSSL threading: ERR=%s\n" +msgstr "" + +#: src/lib/crypto.c:1398 +msgid "Failed to seed OpenSSL PRNG\n" +msgstr "" + +#: src/lib/crypto.c:1424 +msgid "Failed to save OpenSSL PRNG\n" +msgstr "" + +#: src/lib/crypto.c:1485 +#, c-format +msgid "Unsupported digest type=%d specified\n" +msgstr "" + +#: src/lib/crypto.c:1505 +#, c-format +msgid "SHA1Update() returned an error: %d\n" +msgstr "" + +#: src/lib/crypto.c:1648 +msgid "No error" +msgstr "" + +#: src/lib/crypto.c:1650 +msgid "Signer not found" +msgstr "" + +#: src/lib/crypto.c:1652 +msgid "Recipient not found" +msgstr "" + +#: src/lib/crypto.c:1654 +msgid "Unsupported digest algorithm" +msgstr "" + +#: src/lib/crypto.c:1656 +msgid "Unsupported encryption algorithm" +msgstr "" + +#: src/lib/crypto.c:1658 +msgid "Signature is invalid" +msgstr "" + +#: src/lib/crypto.c:1660 +msgid "Decryption error" +msgstr "" + +#: src/lib/crypto.c:1663 +msgid "Internal error" +msgstr "" + +#: src/lib/crypto.c:1665 +msgid "Unknown error" +msgstr "" + +#: src/lib/daemon.c:66 +#, c-format +msgid "Cannot fork to become daemon: %s\n" +msgstr "" + +#: src/lib/edit.c:446 +#, c-format +msgid "Illegal character \"%c\" in name.\n" +msgstr "" + +#: src/lib/edit.c:453 +msgid "Name too long.\n" +msgstr "" + +#: src/lib/jcr.c:297 +msgid "NULL jcr.\n" +msgstr "" + +#: src/lib/jcr.c:430 +#, c-format +msgid "JCR use_count=%d JobId=%d\n" +msgstr "" + +#: src/lib/jcr.c:762 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading Storage " +"daemon.\n" +msgstr "" + +#: src/lib/jcr.c:774 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading File daemon.\n" +msgstr "" + +#: src/lib/jcr.c:786 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading Director.\n" +msgstr "" + +#: src/lib/lex.c:93 src/wx-console/console_thread.cpp:208 +#, c-format +msgid "Problem probably begins at line %d.\n" +msgstr "" + +#: src/lib/lex.c:98 src/wx-console/console_thread.cpp:213 +#, c-format +msgid "" +"Config error: %s\n" +" : line %d, col %d of file %s\n" +"%s\n" +"%s" +msgstr "" + +#: src/lib/lex.c:102 +#, c-format +msgid "Config error: %s\n" +msgstr "" + +#: src/lib/lex.c:131 +msgid "Close of NULL file\n" +msgstr "" + +#: src/lib/lex.c:226 +msgid "get_char: called after EOF\n" +msgstr "" + +#: src/lib/lex.c:268 +#, c-format +msgid "Config token too long, file: %s, line %d, begins at line %d\n" +msgstr "" + +#: src/lib/lex.c:292 +msgid "none" +msgstr "" + +#: src/lib/lex.c:293 +msgid "comment" +msgstr "" + +#: src/lib/lex.c:294 +msgid "number" +msgstr "" + +#: src/lib/lex.c:295 +msgid "ip_addr" +msgstr "" + +#: src/lib/lex.c:296 +msgid "identifier" +msgstr "" + +#: src/lib/lex.c:297 +msgid "string" +msgstr "" + +#: src/lib/lex.c:298 +msgid "quoted_string" +msgstr "" + +#: src/lib/lex.c:299 +msgid "UTF-8 Byte Order Mark" +msgstr "" + +#: src/lib/lex.c:300 +msgid "UTF-16le Byte Order Mark" +msgstr "" + +#: src/lib/lex.c:338 src/lib/lex.c:344 +#, c-format +msgid "expected a positive integer number, got: %s" +msgstr "" + +#: src/lib/lex.c:454 +msgid "" +"This config file appears to be in an unsupported Unicode format (UTF-16be). " +"Please resave as UTF-8\n" +msgstr "" + +#: src/lib/lex.c:583 +#, c-format +msgid "Cannot open included config file %s: %s\n" +msgstr "" + +#: src/lib/lex.c:642 +#, c-format +msgid "expected an integer or a range, got %s: %s" +msgstr "" + +#: src/lib/lex.c:656 src/lib/lex.c:664 src/lib/lex.c:675 src/lib/lex.c:683 +#, c-format +msgid "expected an integer number, got %s: %s" +msgstr "" + +#: src/lib/lex.c:693 +#, c-format +msgid "expected a name, got %s: %s" +msgstr "" + +#: src/lib/lex.c:697 +#, c-format +msgid "name %s length %d too long, max is %d\n" +msgstr "" + +#: src/lib/lex.c:705 +#, c-format +msgid "expected a string, got %s: %s" +msgstr "" + +#: src/lib/mem_pool.c:108 +#, c-format +msgid "MemPool index %d larger than max %d\n" +msgstr "" + +#: src/lib/mem_pool.c:126 src/lib/mem_pool.c:146 src/lib/mem_pool.c:181 +#: src/lib/mem_pool.c:253 src/lib/mem_pool.c:273 src/lib/mem_pool.c:311 +#: src/lib/mem_pool.c:583 +#, c-format +msgid "Out of memory requesting %d bytes\n" +msgstr "" + +#: src/lib/message.c:268 src/lib/message.c:278 +#, c-format +msgid "Could not open console message file %s: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:283 +#, c-format +msgid "Could not get con mutex: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:387 +#, c-format +msgid "open mail pipe %s failed: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:393 +msgid "Bacula Message" +msgstr "" + +#: src/lib/message.c:453 +msgid "open mail pipe failed.\n" +msgstr "" + +#: src/lib/message.c:465 +#, c-format +msgid "close error: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:476 +#, c-format +msgid "Mail prog: %s" +msgstr "" + +#: src/lib/message.c:485 +#, c-format +msgid "" +"Mail program terminated in error.\n" +"CMD=%s\n" +"ERR=%s\n" +msgstr "" + +#: src/lib/message.c:584 src/lib/message.c:735 +#, c-format +msgid "fopen %s failed: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:717 +#, c-format +msgid "" +"Operator mail program terminated in error.\n" +"CMD=%s\n" +"ERR=%s\n" +msgstr "" + +#: src/lib/message.c:1015 +#, c-format +msgid "%s: ABORTING due to ERROR in %s:%d\n" +msgstr "" + +#: src/lib/message.c:1019 +#, c-format +msgid "%s: ERROR TERMINATION at %s:%d\n" +msgstr "" + +#: src/lib/message.c:1024 +#, c-format +msgid "%s: Fatal Error because: " +msgstr "" + +#: src/lib/message.c:1026 +#, c-format +msgid "%s: Fatal Error at %s:%d because:\n" +msgstr "" + +#: src/lib/message.c:1030 +#, c-format +msgid "%s: ERROR: " +msgstr "" + +#: src/lib/message.c:1032 +#, c-format +msgid "%s: ERROR in %s:%d " +msgstr "" + +#: src/lib/message.c:1035 +#, c-format +msgid "%s: Warning: " +msgstr "" + +#: src/lib/message.c:1038 +#, c-format +msgid "%s: Security violation: " +msgstr "" + +#: src/lib/message.c:1114 +#, c-format +msgid "%s ABORTING due to ERROR\n" +msgstr "" + +#: src/lib/message.c:1117 +#, c-format +msgid "%s ERROR TERMINATION\n" +msgstr "" + +#: src/lib/message.c:1120 +#, c-format +msgid "%s: %s Fatal error: " +msgstr "" + +#: src/lib/message.c:1126 +#, c-format +msgid "%s: %s Error: " +msgstr "" + +#: src/lib/message.c:1132 +#, c-format +msgid "%s: %s Warning: " +msgstr "" + +#: src/lib/message.c:1135 +#, c-format +msgid "%s: %s Security violation: " +msgstr "" + +#: src/lib/openssl.c:118 src/lib/openssl.c:179 src/stored/dev.c:218 +#: src/stored/dev.c:236 src/stored/dev.c:243 src/stored/stored_conf.c:611 +#, c-format +msgid "Unable to init mutex: ERR=%s\n" +msgstr "" + +#: src/lib/openssl.c:140 src/lib/openssl.c:213 +#, c-format +msgid "Unable to destroy mutex: ERR=%s\n" +msgstr "" + +#: src/lib/parse_conf.c:180 +msgid "***UNKNOWN***" +msgstr "" + +#: src/lib/parse_conf.c:200 +#, c-format +msgid "Unable to initialize resource lock. ERR=%s\n" +msgstr "" + +#: src/lib/parse_conf.c:287 src/lib/parse_conf.c:306 +#, c-format +msgid "expected an =, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:314 +#, c-format +msgid "Unknown item code: %d\n" +msgstr "" + +#: src/lib/parse_conf.c:354 +#, c-format +msgid "message type: %s not found" +msgstr "" + +#: src/lib/parse_conf.c:391 +#, c-format +msgid "Attempt to redefine name \"%s\" to \"%s\"." +msgstr "" + +#: src/lib/parse_conf.c:486 +#, c-format +msgid "Attempt to redefine resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:521 +#, c-format +msgid "Too many %s directives. Max. is %d. line %d: %s\n" +msgstr "" + +#: src/lib/parse_conf.c:531 +#, c-format +msgid "Could not find config Resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:593 +#, c-format +msgid "Missing config Resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:657 +#, c-format +msgid "expected a size number, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:662 +#, c-format +msgid "expected a size, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:699 src/lib/parse_conf.c:704 +#, c-format +msgid "expected a time period, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:763 +#, c-format +msgid "Expected a Tape Label keyword, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:819 +#, c-format +msgid "Cannot open config file \"%s\": %s\n" +msgstr "" + +#: src/lib/parse_conf.c:835 +msgid "" +"Currently we cannot handle UTF-16 source files. Please convert the conf file " +"to UTF-8\n" +msgstr "" + +#: src/lib/parse_conf.c:839 +#, c-format +msgid "Expected a Resource name identifier, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:852 +#, c-format +msgid "expected resource name, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:863 +#, c-format +msgid "not in resource definition: %s" +msgstr "" + +#: src/lib/parse_conf.c:888 +#, c-format +msgid "" +"Keyword \"%s\" not permitted in this resource.\n" +"Perhaps you left the trailing brace off of the previous resource." +msgstr "" + +#: src/lib/parse_conf.c:899 +msgid "Name not specified for resource" +msgstr "" + +#: src/lib/parse_conf.c:908 +#, c-format +msgid "unexpected token %d %s in resource definition" +msgstr "" + +#: src/lib/parse_conf.c:914 +#, c-format +msgid "Unknown parser state %d\n" +msgstr "" + +#: src/lib/parse_conf.c:919 +msgid "End of conf file reached with unclosed resource." +msgstr "" + +#: src/lib/pythonlib.c:127 +msgid "Could not initialize Python\n" +msgstr "" + +#: src/lib/pythonlib.c:132 +#, c-format +msgid "Could not Run Python string %s\n" +msgstr "" + +#: src/lib/pythonlib.c:144 +msgid "Could not initialize Python Job type.\n" +msgstr "" + +#: src/lib/pythonlib.c:149 +#, c-format +msgid "Could not import Python script %s/%s. Python disabled.\n" +msgstr "" + +#: src/lib/pythonlib.c:252 +msgid "Could not create Python Job Object.\n" +msgstr "" + +#: src/lib/pythonlib.c:265 src/lib/pythonlib.c:289 +#, c-format +msgid "Python function \"%s\" not found.\n" +msgstr "" + +#: src/lib/pythonlib.c:304 +#, c-format +msgid "Unknown Python daemon event %s\n" +msgstr "" + +#: src/lib/pythonlib.c:329 +#, c-format +msgid "Unable to initialize the Python lock. ERR=%s\n" +msgstr "" + +#: src/lib/res.c:66 +#, c-format +msgid "rwl_writelock failure at %s:%d: ERR=%s\n" +msgstr "" + +#: src/lib/res.c:76 +#, c-format +msgid "rwl_writeunlock failure at %s:%d:. ERR=%s\n" +msgstr "" + +#: src/lib/runscript.c:212 +#, c-format +msgid "%s: run command \"%s\"\n" +msgstr "" + +#: src/lib/runscript.c:218 +#, c-format +msgid "Runscript: %s could not execute. ERR=%s\n" +msgstr "" + +#: src/lib/runscript.c:227 +#, c-format +msgid "%s: %s\n" +msgstr "" + +#: src/lib/runscript.c:232 +#, c-format +msgid "Runscript: %s returned non-zero status=%d. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:297 +msgid "rwl_writeunlock called too many times.\n" +msgstr "" + +#: src/lib/rwlock.c:301 +msgid "rwl_writeunlock by non-owner.\n" +msgstr "" + +#: src/lib/rwlock.c:367 +#, c-format +msgid "Write lock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:375 +#, c-format +msgid "Write unlock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:386 +#, c-format +msgid "Read lock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:394 +#, c-format +msgid "Read unlock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:403 +#, c-format +msgid "Thread %d found unchanged elements %d times\n" +msgstr "" + +#: src/lib/rwlock.c:436 +#, c-format +msgid "Init rwlock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:452 +#, c-format +msgid "Create thread failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:464 +#, c-format +msgid "Join thread failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:467 +#, c-format +msgid "%02d: interval %d, writes %d, reads %d\n" +msgstr "" + +#: src/lib/rwlock.c:477 +#, c-format +msgid "data %02d: value %d, %d writes\n" +msgstr "" + +#: src/lib/rwlock.c:482 +#, c-format +msgid "Total: %d thread writes, %d data writes\n" +msgstr "" + +#: src/lib/rwlock.c:554 +msgid "Try write lock" +msgstr "" + +#: src/lib/rwlock.c:560 +msgid "Try read lock" +msgstr "" + +#: src/lib/rwlock.c:615 +msgid "Create thread" +msgstr "" + +#: src/lib/rwlock.c:625 +msgid "Join thread" +msgstr "" + +#: src/lib/rwlock.c:627 +#, c-format +msgid "%02d: interval %d, updates %d, r_collisions %d, w_collisions %d\n" +msgstr "" + +#: src/lib/rwlock.c:639 +#, c-format +msgid "data %02d: value %d, %d updates\n" +msgstr "" + +#: src/lib/signal.c:68 +msgid "Invalid signal number" +msgstr "" + +#: src/lib/signal.c:94 +#, c-format +msgid "Bacula interrupted by signal %d: %s\n" +msgstr "" + +#: src/lib/signal.c:107 +#, c-format +msgid "Kaboom! %s, %s got signal %d - %s. Attempting traceback.\n" +msgstr "" + +#: src/lib/signal.c:109 +#, c-format +msgid "Kaboom! exepath=%s\n" +msgstr "" + +#: src/lib/signal.c:143 +#, c-format +msgid "Fork error: ERR=%s\n" +msgstr "" + +#: src/lib/signal.c:150 +#, c-format +msgid "Calling: %s %s %s\n" +msgstr "" + +#: src/lib/signal.c:153 +#, c-format +msgid "execv: %s failed: ERR=%s\n" +msgstr "" + +#: src/lib/signal.c:168 +#, c-format +msgid "Traceback complete, attempting cleanup ...\n" +msgstr "" + +#: src/lib/signal.c:176 +#, c-format +msgid "It looks like the traceback worked ...\n" +msgstr "" + +#: src/lib/signal.c:205 +#, c-format +msgid "BA_NSIG too small (%d) should be (%d)\n" +msgstr "" + +#: src/lib/signal.c:211 +msgid "UNKNOWN SIGNAL" +msgstr "" + +#: src/lib/signal.c:212 +msgid "Hangup" +msgstr "" + +#: src/lib/signal.c:213 +msgid "Interrupt" +msgstr "" + +#: src/lib/signal.c:214 +msgid "Quit" +msgstr "" + +#: src/lib/signal.c:215 +msgid "Illegal instruction" +msgstr "" + +#: src/lib/signal.c:216 +msgid "Trace/Breakpoint trap" +msgstr "" + +#: src/lib/signal.c:217 +msgid "Abort" +msgstr "" + +#: src/lib/signal.c:219 +msgid "EMT instruction (Emulation Trap)" +msgstr "" + +#: src/lib/signal.c:222 +msgid "IOT trap" +msgstr "" + +#: src/lib/signal.c:224 +msgid "BUS error" +msgstr "" + +#: src/lib/signal.c:225 +msgid "Floating-point exception" +msgstr "" + +#: src/lib/signal.c:226 +msgid "Kill, unblockable" +msgstr "" + +#: src/lib/signal.c:227 +msgid "User-defined signal 1" +msgstr "" + +#: src/lib/signal.c:228 +msgid "Segmentation violation" +msgstr "" + +#: src/lib/signal.c:229 +msgid "User-defined signal 2" +msgstr "" + +#: src/lib/signal.c:230 +msgid "Broken pipe" +msgstr "" + +#: src/lib/signal.c:231 +msgid "Alarm clock" +msgstr "" + +#: src/lib/signal.c:232 +msgid "Termination" +msgstr "" + +#: src/lib/signal.c:234 +msgid "Stack fault" +msgstr "" + +#: src/lib/signal.c:236 +msgid "Child status has changed" +msgstr "" + +#: src/lib/signal.c:237 +msgid "Continue" +msgstr "" + +#: src/lib/signal.c:238 +msgid "Stop, unblockable" +msgstr "" + +#: src/lib/signal.c:239 +msgid "Keyboard stop" +msgstr "" + +#: src/lib/signal.c:240 +msgid "Background read from tty" +msgstr "" + +#: src/lib/signal.c:241 +msgid "Background write to tty" +msgstr "" + +#: src/lib/signal.c:242 +msgid "Urgent condition on socket" +msgstr "" + +#: src/lib/signal.c:243 +msgid "CPU limit exceeded" +msgstr "" + +#: src/lib/signal.c:244 +msgid "File size limit exceeded" +msgstr "" + +#: src/lib/signal.c:245 +msgid "Virtual alarm clock" +msgstr "" + +#: src/lib/signal.c:246 +msgid "Profiling alarm clock" +msgstr "" + +#: src/lib/signal.c:247 +msgid "Window size change" +msgstr "" + +#: src/lib/signal.c:248 +msgid "I/O now possible" +msgstr "" + +#: src/lib/signal.c:250 +msgid "Power failure restart" +msgstr "" + +#: src/lib/signal.c:253 +msgid "No runnable lwp" +msgstr "" + +#: src/lib/signal.c:256 +msgid "SIGLWP special signal used by thread library" +msgstr "" + +#: src/lib/signal.c:259 +msgid "Checkpoint Freeze" +msgstr "" + +#: src/lib/signal.c:262 +msgid "Checkpoint Thaw" +msgstr "" + +#: src/lib/signal.c:265 +msgid "Thread Cancellation" +msgstr "" + +#: src/lib/signal.c:268 +msgid "Resource Lost (e.g. record-lock lost)" +msgstr "" + +#: src/lib/smartall.c:146 src/lib/smartall.c:255 src/lib/smartall.c:270 +msgid "Out of memory\n" +msgstr "" + +#: src/lib/smartall.c:151 +msgid "Too much memory used." +msgstr "" + +#: src/lib/smartall.c:180 +#, c-format +msgid "Attempt to free NULL called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:194 +#, c-format +msgid "double free from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:202 +#, c-format +msgid "qp->qnext->qprev != qp called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:206 +#, c-format +msgid "qp->qprev->qnext != qp called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:215 +#, c-format +msgid "Buffer overrun called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:292 +#, c-format +msgid "sm_realloc size: %d\n" +msgstr "" + +#: src/lib/smartall.c:330 +#, c-format +msgid "sm_realloc %d at %x from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:392 +#, c-format +msgid "" +"\n" +"Orphaned buffers exist. Dump terminated following\n" +" discovery of bad links in chain of orphaned buffers.\n" +" Buffer address with bad links: %p\n" +msgstr "" + +#: src/lib/smartall.c:404 +#, c-format +msgid "%s buffer: %s %6u bytes buf=%p allocated at %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:440 +#, c-format +msgid "Damaged buffer found. Called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:470 +#, c-format +msgid "" +"\n" +"Damaged buffers found at %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:473 +#, c-format +msgid " discovery of bad prev link.\n" +msgstr "" + +#: src/lib/smartall.c:476 +#, c-format +msgid " discovery of bad next link.\n" +msgstr "" + +#: src/lib/smartall.c:479 +#, c-format +msgid " discovery of data overrun.\n" +msgstr "" + +#: src/lib/smartall.c:482 +#, c-format +msgid " Buffer address: %p\n" +msgstr "" + +#: src/lib/smartall.c:489 +#, c-format +msgid "Damaged buffer: %6u bytes allocated at line %d of %s %s\n" +msgstr "" + +#: src/lib/tls.c:93 +#, c-format +msgid "" +"Error with certificate at depth: %d, issuer = %s, subject = %s, ERR=%d:%s\n" +msgstr "" + +#: src/lib/tls.c:130 +msgid "Error initializing SSL context" +msgstr "" + +#: src/lib/tls.c:151 +msgid "Error loading certificate verification stores" +msgstr "" + +#: src/lib/tls.c:156 +msgid "" +"Either a certificate file or a directory must be specified as a verification " +"store\n" +msgstr "" + +#: src/lib/tls.c:167 +msgid "Error loading certificate file" +msgstr "" + +#: src/lib/tls.c:175 +msgid "Error loading private key" +msgstr "" + +#: src/lib/tls.c:183 +msgid "Unable to open DH parameters file" +msgstr "" + +#: src/lib/tls.c:189 +msgid "Unable to load DH parameters from specified file" +msgstr "" + +#: src/lib/tls.c:193 +msgid "Failed to set TLS Diffie-Hellman parameters" +msgstr "" + +#: src/lib/tls.c:203 +msgid "Error setting cipher list, no valid ciphers available\n" +msgstr "" + +#: src/lib/tls.c:262 +msgid "Peer failed to present a TLS certificate\n" +msgstr "" + +#: src/lib/tls.c:305 +#, c-format +msgid "Peer %s failed to present a TLS certificate\n" +msgstr "" + +#: src/lib/tls.c:407 +msgid "Error creating file descriptor-based BIO" +msgstr "" + +#: src/lib/tls.c:418 +msgid "Error creating new SSL object" +msgstr "" + +#: src/lib/tls.c:481 src/lib/tls.c:504 +msgid "Connect failure" +msgstr "" + +#: src/lib/tls.c:576 src/lib/tls.c:580 +msgid "TLS shutdown failure." +msgstr "" + +#: src/lib/tls.c:639 +msgid "TLS read/write failure." +msgstr "" + +#: src/lib/util.c:182 +msgid "Running" +msgstr "" + +#: src/lib/util.c:185 +msgid "Blocked" +msgstr "" + +#: src/lib/util.c:195 +msgid "Non-fatal error" +msgstr "" + +#: src/lib/util.c:198 src/lib/util.c:265 +msgid "Canceled" +msgstr "" + +#: src/lib/util.c:201 +msgid "Verify differences" +msgstr "" + +#: src/lib/util.c:204 +msgid "Waiting on FD" +msgstr "" + +#: src/lib/util.c:207 +msgid "Wait on SD" +msgstr "" + +#: src/lib/util.c:210 +msgid "Wait for new Volume" +msgstr "" + +#: src/lib/util.c:213 +msgid "Waiting for mount" +msgstr "" + +#: src/lib/util.c:216 +msgid "Waiting for Storage resource" +msgstr "" + +#: src/lib/util.c:219 +msgid "Waiting for Job resource" +msgstr "" + +#: src/lib/util.c:222 +msgid "Waiting for Client resource" +msgstr "" + +#: src/lib/util.c:225 +msgid "Waiting on Max Jobs" +msgstr "" + +#: src/lib/util.c:228 +msgid "Waiting for Start Time" +msgstr "" + +#: src/lib/util.c:231 +msgid "Waiting on Priority" +msgstr "" + +#: src/lib/util.c:238 +#, c-format +msgid "Unknown Job termination status=%d" +msgstr "" + +#: src/lib/util.c:262 +msgid "Fatal Error" +msgstr "" + +#: src/lib/util.c:268 +msgid "Differences" +msgstr "" + +#: src/lib/util.c:271 +msgid "Unknown term code" +msgstr "" + +#: src/lib/util.c:299 +msgid "Migrate" +msgstr "" + +#: src/lib/util.c:302 +msgid "Copy" +msgstr "" + +#: src/lib/util.c:305 src/wx-console/wxbmainframe.cpp:276 +msgid "Console" +msgstr "" + +#: src/lib/util.c:308 +msgid "System or Console" +msgstr "" + +#: src/lib/util.c:311 +msgid "Scan" +msgstr "" + +#: src/lib/util.c:314 +msgid "Unknown Type" +msgstr "" + +#: src/lib/util.c:346 +msgid "Verify Init Catalog" +msgstr "" + +#: src/lib/util.c:355 +msgid "Verify Data" +msgstr "" + +#: src/lib/util.c:692 +msgid "Working directory not defined. Cannot continue.\n" +msgstr "" + +#: src/lib/util.c:695 +#, c-format +msgid "Working Directory: \"%s\" not found. Cannot continue.\n" +msgstr "" + +#: src/lib/util.c:699 +#, c-format +msgid "Working Directory: \"%s\" is not a directory. Cannot continue.\n" +msgstr "" + +#: src/lib/var.c:2669 +msgid "everything ok" +msgstr "" + +#: src/lib/var.c:2670 +msgid "incomplete named character" +msgstr "" + +#: src/lib/var.c:2671 +msgid "incomplete hexadecimal value" +msgstr "" + +#: src/lib/var.c:2672 +msgid "invalid hexadecimal value" +msgstr "" + +#: src/lib/var.c:2673 +msgid "octal value too large" +msgstr "" + +#: src/lib/var.c:2674 +msgid "invalid octal value" +msgstr "" + +#: src/lib/var.c:2675 +msgid "incomplete octal value" +msgstr "" + +#: src/lib/var.c:2676 +msgid "incomplete grouped hexadecimal value" +msgstr "" + +#: src/lib/var.c:2677 +msgid "incorrect character class specification" +msgstr "" + +#: src/lib/var.c:2678 +msgid "invalid expansion configuration" +msgstr "" + +#: src/lib/var.c:2679 +msgid "out of memory" +msgstr "" + +#: src/lib/var.c:2680 +msgid "incomplete variable specification" +msgstr "" + +#: src/lib/var.c:2681 +msgid "undefined variable" +msgstr "" + +#: src/lib/var.c:2682 +msgid "input is neither text nor variable" +msgstr "" + +#: src/lib/var.c:2683 +msgid "unknown command character in variable" +msgstr "" + +#: src/lib/var.c:2684 +msgid "malformatted search and replace operation" +msgstr "" + +#: src/lib/var.c:2685 +msgid "unknown flag in search and replace operation" +msgstr "" + +#: src/lib/var.c:2686 +msgid "invalid regex in search and replace operation" +msgstr "" + +#: src/lib/var.c:2687 +msgid "missing parameter in command" +msgstr "" + +#: src/lib/var.c:2688 +msgid "empty search string in search and replace operation" +msgstr "" + +#: src/lib/var.c:2689 +msgid "start offset missing in cut operation" +msgstr "" + +#: src/lib/var.c:2690 +msgid "offsets in cut operation delimited by unknown character" +msgstr "" + +#: src/lib/var.c:2691 +msgid "range out of bounds in cut operation" +msgstr "" + +#: src/lib/var.c:2692 +msgid "offset out of bounds in cut operation" +msgstr "" + +#: src/lib/var.c:2693 +msgid "logic error in cut operation" +msgstr "" + +#: src/lib/var.c:2694 +msgid "malformatted transpose operation" +msgstr "" + +#: src/lib/var.c:2695 +msgid "source and target class mismatch in transpose operation" +msgstr "" + +#: src/lib/var.c:2696 +msgid "empty character class in transpose operation" +msgstr "" + +#: src/lib/var.c:2697 +msgid "incorrect character class in transpose operation" +msgstr "" + +#: src/lib/var.c:2698 +msgid "malformatted padding operation" +msgstr "" + +#: src/lib/var.c:2699 +msgid "width parameter missing in padding operation" +msgstr "" + +#: src/lib/var.c:2700 +msgid "fill string missing in padding operation" +msgstr "" + +#: src/lib/var.c:2701 +msgid "unknown quoted pair in search and replace operation" +msgstr "" + +#: src/lib/var.c:2702 +msgid "sub-matching reference out of range" +msgstr "" + +#: src/lib/var.c:2703 +msgid "invalid argument" +msgstr "" + +#: src/lib/var.c:2704 +msgid "incomplete quoted pair" +msgstr "" + +#: src/lib/var.c:2705 +msgid "lookup function does not support variable arrays" +msgstr "" + +#: src/lib/var.c:2706 +msgid "index of array variable contains an invalid character" +msgstr "" + +#: src/lib/var.c:2707 +msgid "index of array variable is incomplete" +msgstr "" + +#: src/lib/var.c:2708 +msgid "bracket expression in array variable's index not closed" +msgstr "" + +#: src/lib/var.c:2709 +msgid "division by zero error in index specification" +msgstr "" + +#: src/lib/var.c:2710 +msgid "unterminated loop construct" +msgstr "" + +#: src/lib/var.c:2711 +msgid "invalid character in loop limits" +msgstr "" + +#: src/lib/var.c:2712 +msgid "malformed operation argument list" +msgstr "" + +#: src/lib/var.c:2713 +msgid "undefined operation" +msgstr "" + +#: src/lib/var.c:2714 +msgid "formatting failure" +msgstr "" + +#: src/lib/var.c:2723 +msgid "unknown error" +msgstr "" + +#: src/lib/watchdog.c:83 +#, c-format +msgid "Unable to initialize watchdog lock. ERR=%s\n" +msgstr "" + +#: src/lib/watchdog.c:180 +msgid "BUG! register_watchdog called before start_watchdog\n" +msgstr "" + +#: src/lib/watchdog.c:183 +#, c-format +msgid "BUG! Watchdog %p has NULL callback\n" +msgstr "" + +#: src/lib/watchdog.c:186 +#, c-format +msgid "BUG! Watchdog %p has zero interval\n" +msgstr "" + +#: src/lib/watchdog.c:206 +msgid "BUG! unregister_watchdog_unlocked called before start_watchdog\n" +msgstr "" + +#: src/lib/watchdog.c:325 +#, c-format +msgid "rwl_writelock failure. ERR=%s\n" +msgstr "" + +#: src/lib/watchdog.c:340 +#, c-format +msgid "rwl_writeunlock failure. ERR=%s\n" +msgstr "" + +#: src/stored/acquire.c:69 +#, c-format +msgid "Acquire read: num_writers=%d not zero. Job %d canceled.\n" +msgstr "" + +#: src/stored/acquire.c:78 +#, c-format +msgid "No volumes specified for reading. Job %s canceled.\n" +msgstr "" + +#: src/stored/acquire.c:87 +#, c-format +msgid "Logic error: no next volume to read. Numvol=%d Curvol=%d\n" +msgstr "" + +#: src/stored/acquire.c:113 +#, c-format +msgid "" +"Changing device. Want Media Type=\"%s\" have=\"%s\"\n" +" device=%s\n" +msgstr "" + +#: src/stored/acquire.c:151 +#, c-format +msgid "Media Type change. New device %s chosen.\n" +msgstr "" + +#: src/stored/acquire.c:162 +#, c-format +msgid "No suitable device found to read Volume \"%s\"\n" +msgstr "" + +#: src/stored/acquire.c:191 +#, c-format +msgid "Job %s canceled.\n" +msgstr "" + +#: src/stored/acquire.c:205 +#, c-format +msgid "Read open device %s Volume \"%s\" failed: ERR=%s\n" +msgstr "" + +#: src/stored/acquire.c:276 +#, c-format +msgid "Too many errors trying to mount device %s for reading.\n" +msgstr "" + +#: src/stored/acquire.c:285 +#, c-format +msgid "Ready to read from volume \"%s\" on device %s.\n" +msgstr "" + +#: src/stored/acquire.c:328 +#, c-format +msgid "Want to append, but device %s is busy reading.\n" +msgstr "" + +#: src/stored/acquire.c:358 +#, c-format +msgid "" +"Wanted to append to Volume \"%s\", but device %s is busy writing on \"%s" +"\" .\n" +msgstr "" + +#: src/stored/acquire.c:377 +#, c-format +msgid "" +"Cannot recycle volume \"%s\" on device %s because it is in use by another " +"job.\n" +msgstr "" + +#: src/stored/acquire.c:402 +#, c-format +msgid "" +"Invalid tape position on volume \"%s\" on device %s. Expected %d, got %d\n" +msgstr "" + +#: src/stored/acquire.c:422 +#, c-format +msgid "Could not ready device %s for append.\n" +msgstr "" + +#: src/stored/acquire.c:506 src/stored/block.c:367 src/stored/block.c:713 +#: src/stored/block.c:788 +#, c-format +msgid "Could not create JobMedia record for Volume=\"%s\" Job=%s\n" +msgstr "" + +#: src/stored/acquire.c:547 +#, c-format +msgid "Alert: %s" +msgstr "" + +#: src/stored/acquire.c:555 +#, c-format +msgid "3997 Bad alert command: %s: ERR=%s.\n" +msgstr "" + +#: src/stored/ansi_label.c:96 +#, c-format +msgid "Read error on device %s in ANSI label. ERR=%s\n" +msgstr "" + +#: src/stored/ansi_label.c:106 +msgid "Insane! End of tape while reading ANSI label.\n" +msgstr "" + +#: src/stored/ansi_label.c:130 +msgid "No VOL1 label while reading ANSI/IBM label.\n" +msgstr "" + +#: src/stored/ansi_label.c:150 +#, c-format +msgid "Wanted ANSI Volume \"%s\" got \"%s\"\n" +msgstr "" + +#: src/stored/ansi_label.c:161 +msgid "No HDR1 label while reading ANSI label.\n" +msgstr "" + +#: src/stored/ansi_label.c:167 +#, c-format +msgid "ANSI/IBM Volume \"%s\" does not belong to Bacula.\n" +msgstr "" + +#: src/stored/ansi_label.c:178 +msgid "No HDR2 label while reading ANSI/IBM label.\n" +msgstr "" + +#: src/stored/ansi_label.c:192 +msgid "Unknown or bad ANSI/IBM label record.\n" +msgstr "" + +#: src/stored/ansi_label.c:199 +msgid "Too many records in while reading ANSI/IBM label.\n" +msgstr "" + +#: src/stored/ansi_label.c:298 +#, c-format +msgid "ANSI Volume label name \"%s\" longer than 6 chars.\n" +msgstr "" + +#: src/stored/ansi_label.c:315 +#, c-format +msgid "Could not write ANSI VOL1 label. ERR=%s\n" +msgstr "" + +#: src/stored/ansi_label.c:353 src/stored/ansi_label.c:382 +#, c-format +msgid "Could not write ANSI HDR1 label. ERR=%s\n" +msgstr "" + +#: src/stored/ansi_label.c:358 src/stored/ansi_label.c:389 +msgid "Could not write ANSI HDR1 label.\n" +msgstr "" + +#: src/stored/ansi_label.c:394 +#, c-format +msgid "Error writing EOF to tape. ERR=%s" +msgstr "" + +#: src/stored/ansi_label.c:399 +msgid "write_ansi_ibm_label called for non-ANSI/IBM type\n" +msgstr "" + +#: src/stored/append.c:64 +msgid "DCR is NULL!!!\n" +msgstr "" + +#: src/stored/append.c:69 +msgid "DEVICE is NULL!!!\n" +msgstr "" + +#: src/stored/append.c:81 +msgid "Unable to set network buffer size.\n" +msgstr "" + +#: src/stored/append.c:94 src/stored/append.c:103 src/stored/append.c:115 +#: src/stored/append.c:298 src/stored/append.c:309 src/stored/askdir.c:332 +#: src/stored/askdir.c:333 +msgid "NULL Volume name. This shouldn't happen!!!\n" +msgstr "" + +#: src/stored/append.c:109 src/stored/btape.c:1889 +#, c-format +msgid "Write session label failed. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:121 +#, c-format +msgid "Network send error to FD. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:158 +#, c-format +msgid "Error reading data header from FD. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:180 +#, c-format +msgid "Malformed data header from FD: %s\n" +msgstr "" + +#: src/stored/append.c:190 +msgid "File index from FD not positive or sequential\n" +msgstr "" + +#: src/stored/append.c:244 src/stored/mac.c:248 +#, c-format +msgid "Error updating file attributes. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:258 +#, c-format +msgid "Network error on data channel. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:279 +#, c-format +msgid "" +"Job write elapsed time = %02d:%02d:%02d, Transfer rate = %s bytes/second\n" +msgstr "" + +#: src/stored/append.c:292 src/stored/btape.c:2013 +#, c-format +msgid "Error writting end session label. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:303 src/stored/mac.c:124 src/stored/mac.c:220 +#: src/stored/spool.c:293 +#, c-format +msgid "Fatal append error on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/append.c:305 src/stored/mac.c:126 +msgid "Set ok=FALSE after write_block_to_device.\n" +msgstr "" + +#: src/stored/askdir.c:178 +msgid "Network error on bnet_recv in req_vol_info.\n" +msgstr "" + +#: src/stored/askdir.c:195 +#, c-format +msgid "Error getting Volume info: %s" +msgstr "" + +#: src/stored/askdir.c:363 +#, c-format +msgid "Didn't get vol info vol=%s: ERR=%s" +msgstr "" + +#: src/stored/askdir.c:405 +#, c-format +msgid "Error creating JobMedia record: ERR=%s\n" +msgstr "" + +#: src/stored/askdir.c:412 +#, c-format +msgid "Error creating JobMedia record: %s\n" +msgstr "" + +#: src/stored/askdir.c:478 +#, c-format +msgid "Job %s canceled while waiting for mount on Storage Device \"%s\".\n" +msgstr "" + +#: src/stored/askdir.c:491 +#, c-format +msgid "" +"Job %s waiting. Cannot find any appendable volumes.\n" +"Please use the \"label\" command to create a new Volume for:\n" +" Storage: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/askdir.c:515 src/stored/askdir.c:605 +#, c-format +msgid "Max time exceeded waiting to mount Storage Device %s for Job %s\n" +msgstr "" + +#: src/stored/askdir.c:525 +msgid "pthread error in mount_next_volume.\n" +msgstr "" + +#: src/stored/askdir.c:557 +msgid "Cannot request another volume: no volume name given.\n" +msgstr "" + +#: src/stored/askdir.c:563 +#, c-format +msgid "Job %s canceled while waiting for mount on Storage Device %s.\n" +msgstr "" + +#: src/stored/askdir.c:578 +#, c-format +msgid "" +"Please mount Volume \"%s\" or label a new one for:\n" +" Job: %s\n" +" Storage: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/askdir.c:615 +msgid "pthread error in mount_volume\n" +msgstr "" + +#: src/stored/authenticate.c:60 +#, c-format +msgid "I only authenticate Directors, not %d\n" +msgstr "" + +#: src/stored/authenticate.c:90 +#, c-format +msgid "" +"Connection from unknown Director %s at %s rejected.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/stored/authenticate.c:123 +msgid "" +"Incorrect password given by Director.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/stored/authenticate.c:179 +#, c-format +msgid "Unable to authenticate Director at %s.\n" +msgstr "" + +#: src/stored/authenticate.c:223 src/stored/authenticate.c:257 +#, c-format +msgid "" +"Incorrect authorization key from File daemon at %s rejected.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/stored/autochanger.c:66 +#, c-format +msgid "No Changer Name given for device %s. Cannot continue.\n" +msgstr "" + +#: src/stored/autochanger.c:72 +#, c-format +msgid "No Changer Command given for device %s. Cannot continue.\n" +msgstr "" + +#: src/stored/autochanger.c:85 +#, c-format +msgid "" +"Media Type not the same for all devices in changer %s. Cannot continue.\n" +msgstr "" + +#: src/stored/autochanger.c:149 +#, c-format +msgid "" +"Invalid slot=%d defined in catalog for Volume \"%s\" on %s. Manual load my " +"be required.\n" +msgstr "" + +#: src/stored/autochanger.c:154 +#, c-format +msgid "No \"Changer Device\" for %s. Manual load of Volume may be required.\n" +msgstr "" + +#: src/stored/autochanger.c:158 +#, c-format +msgid "No \"Changer Command\" for %s. Manual load of Volume may be requird.\n" +msgstr "" + +#: src/stored/autochanger.c:188 +#, c-format +msgid "3304 Issuing autochanger \"load slot %d, drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:196 +#, c-format +msgid "3305 Autochanger \"load slot %d, drive %d\", status is OK.\n" +msgstr "" + +#: src/stored/autochanger.c:205 +#, c-format +msgid "" +"3992 Bad autochanger \"load slot %d, drive %d\": ERR=%s.\n" +"Results=%s\n" +msgstr "" + +#: src/stored/autochanger.c:251 +msgid "3992 Missing Changer command.\n" +msgstr "" + +#: src/stored/autochanger.c:265 +#, c-format +msgid "3301 Issuing autochanger \"loaded? drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:275 +#, c-format +msgid "3302 Autochanger \"loaded? drive %d\", result is Slot %d.\n" +msgstr "" + +#: src/stored/autochanger.c:279 +#, c-format +msgid "3302 Autochanger \"loaded? drive %d\", result: nothing loaded.\n" +msgstr "" + +#: src/stored/autochanger.c:286 +#, c-format +msgid "" +"3991 Bad autochanger \"loaded? drive %d\" command: ERR=%s.\n" +"Results=%s\n" +msgstr "" + +#: src/stored/autochanger.c:345 src/stored/autochanger.c:436 +#, c-format +msgid "3307 Issuing autochanger \"unload slot %d, drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:359 +#, c-format +msgid "" +"3995 Bad autochanger \"unload slot %d, drive %d\": ERR=%s\n" +"Results=%s\n" +msgstr "" + +#: src/stored/autochanger.c:423 +#, c-format +msgid "Volume \"%s\" is in use by device %s\n" +msgstr "" + +#: src/stored/autochanger.c:458 +#, c-format +msgid "3995 Bad autochanger \"unload slot %d, drive %d\": ERR=%s.\n" +msgstr "" + +#: src/stored/autochanger.c:498 +#, c-format +msgid "3993 Device %s not an autochanger device.\n" +msgstr "" + +#: src/stored/autochanger.c:519 +#, c-format +msgid "3306 Issuing autochanger \"%s\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:522 +msgid "3996 Open bpipe failed.\n" +msgstr "" + +#: src/stored/bcopy.c:72 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bcopy [-d debug_level] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify configuration file\n" +" -d set debug level to nn\n" +" -i specify input Volume names (separated by |)\n" +" -o specify output Volume names (separated by |)\n" +" -p proceed inspite of errors\n" +" -v verbose\n" +" -w specify working directory (default /tmp)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bcopy.c:151 src/stored/bextract.c:187 src/stored/bscan.c:228 +msgid "Wrong number of arguments: \n" +msgstr "" + +#: src/stored/bcopy.c:191 src/stored/btape.c:364 src/stored/device.c:296 +#, c-format +msgid "dev open failed: %s\n" +msgstr "" + +#: src/stored/bcopy.c:204 +msgid "Write of last block failed.\n" +msgstr "" + +#: src/stored/bcopy.c:207 +#, c-format +msgid "%u Jobs copied. %u records copied.\n" +msgstr "" + +#: src/stored/bcopy.c:224 src/stored/bscan.c:402 +#, c-format +msgid "Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n" +msgstr "" + +#: src/stored/bcopy.c:239 +msgid "Volume is prelabeled. This volume cannot be copied.\n" +msgstr "" + +#: src/stored/bcopy.c:242 +msgid "Volume label not copied.\n" +msgstr "" + +#: src/stored/bcopy.c:254 src/stored/bcopy.c:261 src/stored/bcopy.c:284 +#: src/stored/btape.c:2386 +#, c-format +msgid "Cannot fixup device error. %s\n" +msgstr "" + +#: src/stored/bcopy.c:266 +msgid "EOM label not copied.\n" +msgstr "" + +#: src/stored/bcopy.c:269 +msgid "EOT label not copied.\n" +msgstr "" + +#: src/stored/bcopy.c:305 src/stored/bextract.c:489 src/stored/bls.c:452 +#: src/stored/bscan.c:1283 src/stored/btape.c:2685 +#, c-format +msgid "Mount Volume \"%s\" on device %s and press return when ready: " +msgstr "" + +#: src/stored/bextract.c:78 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bextract \n" +" -b specify a bootstrap file\n" +" -c specify a configuration file\n" +" -d set debug level to nn\n" +" -e exclude list\n" +" -i include list\n" +" -p proceed inspite of I/O errors\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bextract.c:137 src/stored/bls.c:140 +#, c-format +msgid "Could not open exclude file: %s, ERR=%s\n" +msgstr "" + +#: src/stored/bextract.c:152 src/stored/bls.c:154 +#, c-format +msgid "Could not open include file: %s, ERR=%s\n" +msgstr "" + +#: src/stored/bextract.c:208 +#, c-format +msgid "%d Program Name and/or Program Data Stream records ignored.\n" +msgstr "" + +#: src/stored/bextract.c:212 +#, c-format +msgid "%d Win32 data or Win32 gzip data stream records. Ignored.\n" +msgstr "" + +#: src/stored/bextract.c:239 +#, c-format +msgid "Cannot stat %s. It must exist. ERR=%s\n" +msgstr "" + +#: src/stored/bextract.c:243 +#, c-format +msgid "%s must be a directory.\n" +msgstr "" + +#: src/stored/bextract.c:264 +#, c-format +msgid "%u files restored.\n" +msgstr "" + +#: src/stored/bextract.c:291 src/stored/bextract.c:464 +msgid "Logic error output file should be open but is not.\n" +msgstr "" + +#: src/stored/bextract.c:298 src/stored/bls.c:376 src/stored/bscan.c:659 +msgid "Cannot continue.\n" +msgstr "" + +#: src/stored/bextract.c:360 +#, c-format +msgid "Seek error on %s: %s\n" +msgstr "" + +#: src/stored/bextract.c:413 +#, c-format +msgid "Uncompression error. ERR=%d\n" +msgstr "" + +#: src/stored/bextract.c:421 +msgid "===Write error===\n" +msgstr "" + +#: src/stored/bextract.c:455 +msgid "Got Program Name or Data Stream. Ignored.\n" +msgstr "" + +#: src/stored/block.c:91 +#, c-format +msgid "" +"Dump block %s %x: size=%d BlkNum=%d\n" +" Hdrcksum=%x cksum=%x\n" +msgstr "" + +#: src/stored/block.c:104 +#, c-format +msgid " Rec: VId=%u VT=%u FI=%s Strm=%s len=%d p=%x\n" +msgstr "" + +#: src/stored/block.c:160 +#, c-format +msgid "%d block read errors not printed.\n" +msgstr "" + +#: src/stored/block.c:248 src/stored/block.c:264 src/stored/block.c:274 +#, c-format +msgid "" +"Volume data error at %u:%u! Wanted ID: \"%s\", got \"%s\". Buffer " +"discarded.\n" +msgstr "" + +#: src/stored/block.c:288 +#, c-format +msgid "" +"Volume data error at %u:%u! Block length %u is insane (too large), probably " +"due to a bad archive.\n" +msgstr "" + +#: src/stored/block.c:314 +#, c-format +msgid "" +"Volume data error at %u:%u!\n" +"Block checksum mismatch in block=%u len=%d: calc=%x blk=%x\n" +msgstr "" + +#: src/stored/block.c:425 +msgid "Cannot write block. Device at EOM.\n" +msgstr "" + +#: src/stored/block.c:430 +msgid "Attempt to write on read-only Volume.\n" +msgstr "" + +#: src/stored/block.c:482 +#, c-format +msgid "User defined maximum volume capacity %s exceeded on device %s.\n" +msgstr "" + +#: src/stored/block.c:497 +#, c-format +msgid "Unable to write EOF. ERR=%s\n" +msgstr "" + +#: src/stored/block.c:523 src/stored/block.c:548 +msgid "Write block header zeroed.\n" +msgstr "" + +#: src/stored/block.c:567 +#, c-format +msgid "Write error at %u:%u on device %s. ERR=%s.\n" +msgstr "" + +#: src/stored/block.c:574 +#, c-format +msgid "End of Volume \"%s\" at %u:%u on device %s. Write of %u bytes got %d.\n" +msgstr "" + +#: src/stored/block.c:650 src/stored/block.c:656 +#, c-format +msgid "Backspace file at EOT failed. ERR=%s\n" +msgstr "" + +#: src/stored/block.c:663 +#, c-format +msgid "Backspace record at EOT failed. ERR=%s\n" +msgstr "" + +#: src/stored/block.c:680 +#, c-format +msgid "Re-read last block at EOT failed. ERR=%s" +msgstr "" + +#: src/stored/block.c:690 +#, c-format +msgid "" +"Re-read of last block OK, but block numbers differ. Last block=%u Current " +"block=%u.\n" +msgstr "" + +#: src/stored/block.c:693 +msgid "Re-read of last block succeeded.\n" +msgstr "" + +#: src/stored/block.c:721 +#, c-format +msgid "" +"Error writing final EOF to tape. This Volume may not be readable.\n" +"%s" +msgstr "" + +#: src/stored/block.c:735 +#, c-format +msgid "" +"Error writing final part to DVD. This Volume may not be readable.\n" +"%s" +msgstr "" + +#: src/stored/block.c:837 +#, c-format +msgid "" +"Error while writing, current part number is less than the total number of " +"parts (%d/%d, device=%s)\n" +msgstr "" + +#: src/stored/block.c:845 +#, c-format +msgid "Unable to open device next part %s: ERR=%s\n" +msgstr "" + +#: src/stored/block.c:865 +#, c-format +msgid "" +"End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +"free_space_errno=%d, errmsg=%s).\n" +msgstr "" + +#: src/stored/block.c:878 +#, c-format +msgid "" +"End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +"free_space_errno=%d).\n" +msgstr "" + +#: src/stored/block.c:934 +#, c-format +msgid "Block buffer size looping problem on device %s\n" +msgstr "" + +#: src/stored/block.c:962 +#, c-format +msgid "Unable to open device part=%d %s: ERR=%s\n" +msgstr "" + +#: src/stored/block.c:988 +#, c-format +msgid "Read error on fd=%d at file:blk %u:%u on device %s. ERR=%s.\n" +msgstr "" + +#: src/stored/block.c:1001 +#, c-format +msgid "Read zero bytes at %u:%u on device %s.\n" +msgstr "" + +#: src/stored/block.c:1014 +#, c-format +msgid "" +"Volume data error at %u:%u! Very short block of %d bytes on device %s " +"discarded.\n" +msgstr "" + +#: src/stored/block.c:1039 +#, c-format +msgid "Block length %u is greater than buffer %u. Attempting recovery.\n" +msgstr "" + +#: src/stored/block.c:1058 +#, c-format +msgid "Setting block buffer size to %u bytes.\n" +msgstr "" + +#: src/stored/block.c:1073 +#, c-format +msgid "" +"Volume data error at %u:%u! Short block of %d bytes on device %s discarded.\n" +msgstr "" + +#: src/stored/bls.c:78 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bls [options] \n" +" -b specify a bootstrap file\n" +" -c specify a config file\n" +" -d specify debug level\n" +" -e exclude list\n" +" -i include list\n" +" -j list jobs\n" +" -k list blocks\n" +" (no j or k option) list saved files\n" +" -L dump label\n" +" -p proceed inspite of errors\n" +" -v be verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bls.c:201 +msgid "No archive name specified\n" +msgstr "" + +#: src/stored/bls.c:236 +#, c-format +msgid "" +"\n" +"Warning, this Volume is a continuation of Volume %s\n" +msgstr "" + +#: src/stored/bls.c:279 +#, c-format +msgid "Got EOM at file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/bls.c:290 +#, c-format +msgid "Mounted Volume \"%s\".\n" +msgstr "" + +#: src/stored/bls.c:292 +#, c-format +msgid "End of file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/bls.c:316 +#, c-format +msgid "" +"File:blk=%u:%u blk_num=%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%" +"s rlen=%d\n" +msgstr "" + +#: src/stored/bls.c:325 +#, c-format +msgid "Block: %d size=%d\n" +msgstr "" + +#: src/stored/bls.c:392 +#, c-format +msgid "FileIndex=%d VolSessionId=%d VolSessionTime=%d Stream=%d DataLen=%d\n" +msgstr "" + +#: src/stored/bls.c:409 src/stored/read_record.c:388 +msgid "Fresh Volume Label" +msgstr "" + +#: src/stored/bls.c:412 src/stored/read_record.c:391 +msgid "Volume Label" +msgstr "" + +#: src/stored/bls.c:416 src/stored/label.c:1031 +msgid "Begin Job Session" +msgstr "" + +#: src/stored/bls.c:420 src/stored/label.c:1034 +msgid "End Job Session" +msgstr "" + +#: src/stored/bls.c:424 +msgid "End of Medium" +msgstr "" + +#: src/stored/bls.c:427 src/stored/label.c:1043 +msgid "Unknown" +msgstr "" + +#: src/stored/bls.c:433 src/stored/read_record.c:409 +#, c-format +msgid "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n" +msgstr "" + +#: src/stored/bscan.c:115 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bscan [ options ] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify configuration file\n" +" -d set debug level to nn\n" +" -m update media info in database\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database host (default NULL)\n" +" -p proceed inspite of I/O errors\n" +" -r list records\n" +" -s synchronize or store in database\n" +" -S show scan progress periodically\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -w specify working directory (default from conf file)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bscan.c:241 src/stored/stored.c:288 +#, c-format +msgid "No Storage resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:249 src/stored/stored.c:319 +#, c-format +msgid "No Working Directory defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:257 +#, c-format +msgid "Working Directory: %s not found. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:261 +#, c-format +msgid "Working Directory: %s is not a directory. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:275 src/stored/bscan.c:349 +#, c-format +msgid "First Volume Size = %sn" +msgstr "" + +#: src/stored/bscan.c:281 +msgid "Could not init Bacula database\n" +msgstr "" + +#: src/stored/bscan.c:288 +#, c-format +msgid "Using Database: %s, User: %s\n" +msgstr "" + +#: src/stored/bscan.c:323 +#, c-format +msgid "Create JobMedia for Job %s\n" +msgstr "" + +#: src/stored/bscan.c:331 +#, c-format +msgid "Could not create JobMedia record for Volume=%s Job=%s\n" +msgstr "" + +#: src/stored/bscan.c:394 +#, c-format +msgid "done: %d%%\n" +msgstr "" + +#: src/stored/bscan.c:418 +msgid "Volume is prelabeled. This tape cannot be scanned.\n" +msgstr "" + +#: src/stored/bscan.c:430 +#, c-format +msgid "Pool record for %s found in DB.\n" +msgstr "" + +#: src/stored/bscan.c:434 +#, c-format +msgid "VOL_LABEL: Pool record not found for Pool: %s\n" +msgstr "" + +#: src/stored/bscan.c:440 +#, c-format +msgid "VOL_LABEL: PoolType mismatch. DB=%s Vol=%s\n" +msgstr "" + +#: src/stored/bscan.c:444 +#, c-format +msgid "Pool type \"%s\" is OK.\n" +msgstr "" + +#: src/stored/bscan.c:454 +#, c-format +msgid "Media record for %s found in DB.\n" +msgstr "" + +#: src/stored/bscan.c:461 +#, c-format +msgid "VOL_LABEL: Media record not found for Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:468 +#, c-format +msgid "VOL_LABEL: MediaType mismatch. DB=%s Vol=%s\n" +msgstr "" + +#: src/stored/bscan.c:472 +#, c-format +msgid "Media type \"%s\" is OK.\n" +msgstr "" + +#: src/stored/bscan.c:481 +#, c-format +msgid "VOL_LABEL: OK for Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:488 +#, c-format +msgid "%d \"errors\" ignored before first Start of Session record.\n" +msgstr "" + +#: src/stored/bscan.c:499 +#, c-format +msgid "SOS_LABEL: Found Job record for JobId: %d\n" +msgstr "" + +#: src/stored/bscan.c:504 +#, c-format +msgid "SOS_LABEL: Job record not found for JobId: %d\n" +msgstr "" + +#: src/stored/bscan.c:544 +#, c-format +msgid "SOS_LABEL: VolSessId mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:550 +#, c-format +msgid "SOS_LABEL: VolSessTime mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:556 +#, c-format +msgid "SOS_LABEL: PoolId mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:574 src/stored/bscan.c:1077 +#, c-format +msgid "Could not find SessId=%d SessTime=%d for EOS record.\n" +msgstr "" + +#: src/stored/bscan.c:618 +#, c-format +msgid "Could not update job record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:629 +#, c-format +msgid "End of all Volumes. VolFiles=%u VolBlocks=%u VolBytes=%s\n" +msgstr "" + +#: src/stored/bscan.c:641 +#, c-format +msgid "Could not find Job for SessId=%d SessTime=%d record.\n" +msgstr "" + +#: src/stored/bscan.c:677 +#, c-format +msgid "%s file records. At file:blk=%s:%s bytes=%s\n" +msgstr "" + +#: src/stored/bscan.c:731 +#, c-format +msgid "Got MD5 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:739 +#, c-format +msgid "Got SHA1 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:747 +#, c-format +msgid "Got SHA256 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:755 +#, c-format +msgid "Got SHA512 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:763 src/stored/bscan.c:770 +msgid "Got signed digest record\n" +msgstr "" + +#: src/stored/bscan.c:776 +#, c-format +msgid "Got Prog Names Stream: %s\n" +msgstr "" + +#: src/stored/bscan.c:782 +msgid "Got Prog Data Stream record.\n" +msgstr "" + +#: src/stored/bscan.c:792 +#, c-format +msgid "Unknown stream type!!! stream=%d len=%i\n" +msgstr "" + +#: src/stored/bscan.c:856 +#, c-format +msgid "Could not create File Attributes record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:862 +#, c-format +msgid "Created File record: %s\n" +msgstr "" + +#: src/stored/bscan.c:906 +#, c-format +msgid "Could not create media record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:910 src/stored/bscan.c:931 +#, c-format +msgid "Could not update media record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:914 +#, c-format +msgid "Created Media record for Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:935 +#, c-format +msgid "Updated Media record at end of Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:952 +#, c-format +msgid "Could not create pool record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:956 +#, c-format +msgid "Created Pool record for Pool: %s\n" +msgstr "" + +#: src/stored/bscan.c:976 +#, c-format +msgid "Created Client record for Client: %s\n" +msgstr "" + +#: src/stored/bscan.c:993 +#, c-format +msgid "Fileset \"%s\" already exists.\n" +msgstr "" + +#: src/stored/bscan.c:997 +#, c-format +msgid "Could not create FileSet record \"%s\". ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1002 +#, c-format +msgid "Created FileSet record \"%s\"\n" +msgstr "" + +#: src/stored/bscan.c:1049 +#, c-format +msgid "Could not create JobId record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1055 +#, c-format +msgid "Could not update job start record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1058 +#, c-format +msgid "Created new JobId=%u record for original JobId=%u\n" +msgstr "" + +#: src/stored/bscan.c:1108 +#, c-format +msgid "Could not update JobId=%u record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1113 +#, c-format +msgid "Updated Job termination record for JobId=%u Level=%s TermStat=%c\n" +msgstr "" + +#: src/stored/bscan.c:1135 +#, c-format +msgid "Job Termination code: %d" +msgstr "" + +#: src/stored/bscan.c:1140 +#, c-format +msgid "" +"%s\n" +"JobId: %d\n" +"Job: %s\n" +"FileSet: %s\n" +"Backup Level: %s\n" +"Client: %s\n" +"Start time: %s\n" +"End time: %s\n" +"Files Written: %s\n" +"Bytes Written: %s\n" +"Volume Session Id: %d\n" +"Volume Session Time: %d\n" +"Last Volume Bytes: %s\n" +"Termination: %s\n" +"\n" +msgstr "" + +#: src/stored/bscan.c:1197 +#, c-format +msgid "Could not create JobMedia record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1201 +#, c-format +msgid "Created JobMedia record JobId %d, MediaId %d\n" +msgstr "" + +#: src/stored/bscan.c:1217 +#, c-format +msgid "Could not find SessId=%d SessTime=%d for MD5/SHA1 record.\n" +msgstr "" + +#: src/stored/bscan.c:1231 +#, c-format +msgid "Could not add MD5/SHA1 to File record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1236 +msgid "Updated MD5/SHA1 record\n" +msgstr "" + +#: src/stored/btape.c:172 src/stored/stored.c:128 +#, c-format +msgid "Tape block size (%d) not multiple of system size (%d)\n" +msgstr "" + +#: src/stored/btape.c:176 src/stored/stored.c:132 +#, c-format +msgid "Tape block size (%d) is not a power of 2\n" +msgstr "" + +#: src/stored/btape.c:179 +#, c-format +msgid "" +"\n" +"\n" +"!!!! Warning large disk addressing disabled. boffset_t=%d should be 8 or " +"more !!!!!\n" +"\n" +"\n" +msgstr "" + +#: src/stored/btape.c:186 +#, c-format +msgid "32 bit printf/scanf problem. i=%d x32=%u y32=%u\n" +msgstr "" + +#: src/stored/btape.c:195 +msgid "64 bit printf/scanf problem. i=%d x64=%" +msgstr "" + +#: src/stored/btape.c:195 +msgid " y64=%" +msgstr "" + +#: src/stored/btape.c:200 +#, c-format +msgid "Tape block granularity is %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:269 +msgid "No archive name specified.\n" +msgstr "" + +#: src/stored/btape.c:273 +msgid "Improper number of arguments specified.\n" +msgstr "" + +#: src/stored/btape.c:287 +msgid "btape does not work with DVD storage.\n" +msgstr "" + +#: src/stored/btape.c:292 +msgid "btape only works with tape storage.\n" +msgstr "" + +#: src/stored/btape.c:368 +#, c-format +msgid "open device %s: OK\n" +msgstr "" + +#: src/stored/btape.c:391 +msgid "Enter Volume Name: " +msgstr "" + +#: src/stored/btape.c:398 +#, c-format +msgid "Device open failed. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:404 +#, c-format +msgid "Wrote Volume label for volume \"%s\".\n" +msgstr "" + +#: src/stored/btape.c:418 +msgid "Volume has no label.\n" +msgstr "" + +#: src/stored/btape.c:421 +msgid "Volume label read correctly.\n" +msgstr "" + +#: src/stored/btape.c:424 +#, c-format +msgid "I/O error on device: ERR=%s" +msgstr "" + +#: src/stored/btape.c:427 +msgid "Volume name error\n" +msgstr "" + +#: src/stored/btape.c:430 +#, c-format +msgid "Error creating label. ERR=%s" +msgstr "" + +#: src/stored/btape.c:433 +msgid "Volume version error.\n" +msgstr "" + +#: src/stored/btape.c:436 +msgid "Bad Volume label type.\n" +msgstr "" + +#: src/stored/btape.c:439 +msgid "Unknown error.\n" +msgstr "" + +#: src/stored/btape.c:457 +#, c-format +msgid "Bad status from load. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:459 +#, c-format +msgid "Loaded %s\n" +msgstr "" + +#: src/stored/btape.c:468 src/stored/btape.c:807 src/stored/btape.c:849 +#: src/stored/btape.c:919 src/stored/btape.c:961 src/stored/btape.c:1229 +#, c-format +msgid "Bad status from rewind. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:471 src/stored/btape.c:1237 +#, c-format +msgid "Rewound %s\n" +msgstr "" + +#: src/stored/btape.c:497 src/stored/btape.c:1241 +#, c-format +msgid "Bad status from weof. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:501 +#, c-format +msgid "Wrote 1 EOF to %s\n" +msgstr "" + +#: src/stored/btape.c:504 +#, c-format +msgid "Wrote %d EOFs to %s\n" +msgstr "" + +#: src/stored/btape.c:522 +msgid "Moved to end of medium.\n" +msgstr "" + +#: src/stored/btape.c:549 +#, c-format +msgid "Bad status from bsf. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:551 +#, c-format +msgid "Backspaced %d file%s.\n" +msgstr "" + +#: src/stored/btape.c:568 +#, c-format +msgid "Bad status from bsr. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:570 +#, c-format +msgid "Backspaced %d record%s.\n" +msgstr "" + +#: src/stored/btape.c:580 src/stored/status.c:302 +#, c-format +msgid "Configured device capabilities:\n" +msgstr "" + +#: src/stored/btape.c:598 +#, c-format +msgid "Device status:\n" +msgstr "" + +#: src/stored/btape.c:612 src/stored/status.c:341 +#, c-format +msgid "Device parameters:\n" +msgstr "" + +#: src/stored/btape.c:617 +#, c-format +msgid "Status:\n" +msgstr "" + +#: src/stored/btape.c:632 +msgid "" +"Test writting larger and larger records.\n" +"This is a torture test for records.\n" +"I am going to write\n" +"larger and larger records. It will stop when the record size\n" +"plus the header exceeds the block size (by default about 64K)\n" +msgstr "" + +#: src/stored/btape.c:638 +msgid "Do you want to continue? (y/n): " +msgstr "" + +#: src/stored/btape.c:640 src/stored/btape.c:1857 +msgid "Command aborted.\n" +msgstr "" + +#: src/stored/btape.c:656 +#, c-format +msgid "Block %d i=%d\n" +msgstr "" + +#: src/stored/btape.c:682 +msgid "Skipping read backwards test because BSR turned off.\n" +msgstr "" + +#: src/stored/btape.c:686 +msgid "" +"\n" +"=== Write, backup, and re-read test ===\n" +"\n" +"I'm going to write three records and an EOF\n" +"then backup over the EOF and re-read the last record.\n" +"Bacula does this after writing the last block on the\n" +"tape to verify that the block was written correctly.\n" +"\n" +"This is not an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:699 src/stored/btape.c:710 src/stored/btape.c:721 +#: src/stored/btape.c:819 src/stored/btape.c:835 src/stored/btape.c:931 +#: src/stored/btape.c:947 src/stored/btape.c:1566 src/stored/btape.c:2451 +msgid "Error writing record to block.\n" +msgstr "" + +#: src/stored/btape.c:703 src/stored/btape.c:714 src/stored/btape.c:725 +#: src/stored/btape.c:823 src/stored/btape.c:839 src/stored/btape.c:935 +#: src/stored/btape.c:951 src/stored/btape.c:1570 src/stored/btape.c:2455 +msgid "Error writing block to device.\n" +msgstr "" + +#: src/stored/btape.c:706 +#, c-format +msgid "Wrote first record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:717 +#, c-format +msgid "Wrote second record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:728 +#, c-format +msgid "Wrote third record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:735 src/stored/btape.c:740 +#, c-format +msgid "Backspace file failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:744 +msgid "Backspaced over EOF OK.\n" +msgstr "" + +#: src/stored/btape.c:746 +#, c-format +msgid "Backspace record failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:749 +msgid "Backspace record OK.\n" +msgstr "" + +#: src/stored/btape.c:752 src/stored/btape.c:758 +#, c-format +msgid "Read block failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:763 +msgid "Bad data in record. Test failed!\n" +msgstr "" + +#: src/stored/btape.c:767 +msgid "" +"\n" +"Block re-read correct. Test succeeded!\n" +msgstr "" + +#: src/stored/btape.c:768 +msgid "" +"=== End Write, backup, and re-read test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:775 +msgid "" +"This is not terribly serious since Bacula only uses\n" +"this function to verify the last block written to the\n" +"tape. Bacula will skip the last block verification\n" +"if you add:\n" +"\n" +"Backward Space Record = No\n" +"\n" +"to your Storage daemon's Device resource definition.\n" +msgstr "" + +#: src/stored/btape.c:799 +msgid "" +"\n" +"=== Write, rewind, and re-read test ===\n" +"\n" +"I'm going to write 1000 records and an EOF\n" +"then write 1000 records and an EOF, then rewind,\n" +"and re-read the data to verify that it is correct.\n" +"\n" +"This is an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:827 src/stored/btape.c:843 src/stored/btape.c:939 +#: src/stored/btape.c:955 +#, c-format +msgid "Wrote 1000 blocks of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:852 src/stored/btape.c:964 +msgid "Rewind OK.\n" +msgstr "" + +#: src/stored/btape.c:859 src/stored/btape.c:1013 +msgid "Got EOF on tape.\n" +msgstr "" + +#: src/stored/btape.c:864 +#, c-format +msgid "Read block %d failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:870 +#, c-format +msgid "Read record failed. Block %d! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:876 src/stored/btape.c:1043 +#, c-format +msgid "Bad data in record. Expected %d, got %d at byte %d. Test failed!\n" +msgstr "" + +#: src/stored/btape.c:883 +msgid "1000 blocks re-read correctly.\n" +msgstr "" + +#: src/stored/btape.c:886 src/stored/btape.c:1050 +msgid "" +"=== Test Succeeded. End Write, rewind, and re-read test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:911 +msgid "" +"\n" +"=== Write, rewind, and position test ===\n" +"\n" +"I'm going to write 1000 records and an EOF\n" +"then write 1000 records and an EOF, then rewind,\n" +"and position to a few blocks and verify that it is correct.\n" +"\n" +"This is an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1004 +#, c-format +msgid "Reposition to file:block %d:%d\n" +msgstr "" + +#: src/stored/btape.c:1006 +msgid "Reposition error.\n" +msgstr "" + +#: src/stored/btape.c:1019 +#, c-format +msgid "" +"Read block %d failed! file=%d blk=%d. ERR=%s\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1021 +msgid "" +"This may be because the tape drive block size is not\n" +" set to variable blocking as normally used by Bacula.\n" +" Please see the Tape Testing chapter in the manual and \n" +" look for using mt with defblksize and setoptions\n" +"If your tape drive block size is correct, then perhaps\n" +" your SCSI driver is *really* stupid and does not\n" +" correctly report the file:block after a FSF. In this\n" +" case try setting:\n" +" Fast Forward Space File = no\n" +" in your Device resource.\n" +msgstr "" + +#: src/stored/btape.c:1037 +#, c-format +msgid "Read record failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1048 +#, c-format +msgid "Block %d re-read correctly.\n" +msgstr "" + +#: src/stored/btape.c:1069 +msgid "" +"\n" +"\n" +"=== Append files test ===\n" +"\n" +"This test is essential to Bacula.\n" +"\n" +"I'm going to write one record in file 0,\n" +" two records in file 1,\n" +" and three records in file 2\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1093 +msgid "Now moving to end of medium.\n" +msgstr "" + +#: src/stored/btape.c:1095 src/stored/btape.c:1324 +#, c-format +msgid "We should be in file 3. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1096 src/stored/btape.c:1114 src/stored/btape.c:1313 +#: src/stored/btape.c:1325 src/stored/btape.c:1338 src/stored/btape.c:1355 +msgid "This is correct!" +msgstr "" + +#: src/stored/btape.c:1096 src/stored/btape.c:1114 src/stored/btape.c:1313 +#: src/stored/btape.c:1325 src/stored/btape.c:1338 src/stored/btape.c:1355 +msgid "This is NOT correct!!!!" +msgstr "" + +#: src/stored/btape.c:1102 +msgid "" +"\n" +"Now the important part, I am going to attempt to append to the tape.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1109 +msgid "" +"Done appending, there should be no I/O errors\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1110 +msgid "Doing Bacula scan of blocks:\n" +msgstr "" + +#: src/stored/btape.c:1112 +msgid "End scanning the tape.\n" +msgstr "" + +#: src/stored/btape.c:1113 src/stored/btape.c:1337 +#, c-format +msgid "We should be in file 4. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1138 +msgid "" +"\n" +"Autochanger enabled, but no name or no command device specified.\n" +msgstr "" + +#: src/stored/btape.c:1142 +msgid "" +"\n" +"Ah, I see you have an autochanger configured.\n" +"To test the autochanger you must have a blank tape\n" +" that I can write on in Slot 1.\n" +msgstr "" + +#: src/stored/btape.c:1145 +msgid "" +"\n" +"Do you wish to continue with the Autochanger test? (y/n): " +msgstr "" + +#: src/stored/btape.c:1152 +msgid "" +"\n" +"\n" +"=== Autochanger test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1161 +msgid "3301 Issuing autochanger \"loaded\" command.\n" +msgstr "" + +#: src/stored/btape.c:1170 +#, c-format +msgid "3991 Bad autochanger command: %s\n" +msgstr "" + +#: src/stored/btape.c:1171 +#, c-format +msgid "3991 result=\"%s\": ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1175 +#, c-format +msgid "Slot %d loaded. I am going to unload it.\n" +msgstr "" + +#: src/stored/btape.c:1177 +msgid "Nothing loaded in the drive. OK.\n" +msgstr "" + +#: src/stored/btape.c:1184 +#, c-format +msgid "3302 Issuing autochanger \"unload %d %d\" command.\n" +msgstr "" + +#: src/stored/btape.c:1189 +#, c-format +msgid "unload status=%s %d\n" +msgstr "" + +#: src/stored/btape.c:1189 +msgid "Bad" +msgstr "" + +#: src/stored/btape.c:1192 +#, c-format +msgid "3992 Bad autochanger command: %s\n" +msgstr "" + +#: src/stored/btape.c:1193 +#, c-format +msgid "3992 result=\"%s\": ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1203 +#, c-format +msgid "3303 Issuing autochanger \"load %d %d\" command.\n" +msgstr "" + +#: src/stored/btape.c:1211 +#, c-format +msgid "3303 Autochanger \"load %d %d\" status is OK.\n" +msgstr "" + +#: src/stored/btape.c:1215 +#, c-format +msgid "3993 Bad autochanger command: %s\n" +msgstr "" + +#: src/stored/btape.c:1216 +#, c-format +msgid "3993 result=\"%s\": ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1231 +msgid "" +"\n" +"The test failed, probably because you need to put\n" +"a longer sleep time in the mtx-script in the load) case.\n" +"Adding a 30 second sleep and trying again ...\n" +msgstr "" + +#: src/stored/btape.c:1244 +#, c-format +msgid "Wrote EOF to %s\n" +msgstr "" + +#: src/stored/btape.c:1248 +#, c-format +msgid "" +"\n" +"The test worked this time. Please add:\n" +"\n" +" sleep %d\n" +"\n" +"to your mtx-changer script in the load) case.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1253 +msgid "" +"\n" +"The test autochanger worked!!\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1264 +msgid "You must correct this error or the Autochanger will not work.\n" +msgstr "" + +#: src/stored/btape.c:1282 +msgid "" +"\n" +"\n" +"=== Forward space files test ===\n" +"\n" +"This test is essential to Bacula.\n" +"\n" +"I'm going to write five files then test forward spacing\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1307 +msgid "Now forward spacing 1 file.\n" +msgstr "" + +#: src/stored/btape.c:1309 src/stored/btape.c:1321 src/stored/btape.c:1334 +#: src/stored/btape.c:1352 src/stored/btape.c:1521 +#, c-format +msgid "Bad status from fsr. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1312 +#, c-format +msgid "We should be in file 1. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1319 +msgid "Now forward spacing 2 files.\n" +msgstr "" + +#: src/stored/btape.c:1332 +msgid "Now forward spacing 4 files.\n" +msgstr "" + +#: src/stored/btape.c:1344 +msgid "" +"The test worked this time. Please add:\n" +"\n" +" Fast Forward Space File = no\n" +"\n" +"to your Device resource for this drive.\n" +msgstr "" + +#: src/stored/btape.c:1350 +msgid "Now forward spacing 1 more file.\n" +msgstr "" + +#: src/stored/btape.c:1354 +#, c-format +msgid "We should be in file 5. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1359 +msgid "" +"\n" +"=== End Forward space files test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1363 +msgid "" +"\n" +"The forward space file test failed.\n" +msgstr "" + +#: src/stored/btape.c:1365 +msgid "" +"You have Fast Forward Space File enabled.\n" +"I am turning it off then retrying the test.\n" +msgstr "" + +#: src/stored/btape.c:1371 +msgid "" +"You must correct this error or Bacula will not work.\n" +"Some systems, e.g. OpenBSD, require you to set\n" +" Use MTIOCGET= no\n" +"in your device resource. Use with caution.\n" +msgstr "" + +#: src/stored/btape.c:1403 +msgid "" +"\n" +"Append test failed. Attempting again.\n" +"Setting \"Hardware End of Medium = no\n" +" and \"Fast Forward Space File = no\n" +"and retrying append test.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1411 +msgid "" +"\n" +"\n" +"It looks like the test worked this time, please add:\n" +"\n" +" Hardware End of Medium = No\n" +"\n" +" Fast Forward Space File = No\n" +"to your Device resource in the Storage conf file.\n" +msgstr "" + +#: src/stored/btape.c:1418 +msgid "" +"\n" +"\n" +"That appears *NOT* to have corrected the problem.\n" +msgstr "" + +#: src/stored/btape.c:1423 +msgid "" +"\n" +"\n" +"It looks like the append failed. Attempting again.\n" +"Setting \"BSF at EOM = yes\" and retrying append test.\n" +msgstr "" + +#: src/stored/btape.c:1428 +msgid "" +"\n" +"\n" +"It looks like the test worked this time, please add:\n" +"\n" +" Hardware End of Medium = No\n" +" Fast Forward Space File = No\n" +" BSF at EOM = yes\n" +"\n" +"to your Device resource in the Storage conf file.\n" +msgstr "" + +#: src/stored/btape.c:1439 +msgid "" +"\n" +"Append test failed.\n" +"\n" +"\n" +"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +"Unable to correct the problem. You MUST fix this\n" +"problem before Bacula can use your tape drive correctly\n" +"\n" +"Perhaps running Bacula in fixed block mode will work.\n" +"Do so by setting:\n" +"\n" +"Minimum Block Size = nnn\n" +"Maximum Block Size = nnn\n" +"\n" +"in your Storage daemon's Device definition.\n" +"nnn must match your tape driver's block size, which\n" +"can be determined by reading your tape manufacturers\n" +"information, and the information on your kernel dirver.\n" +"Fixed block sizes, however, are not normally an ideal solution.\n" +"\n" +"Some systems, e.g. OpenBSD, require you to set\n" +" Use MTIOCGET= no\n" +"in your device resource. Use with caution.\n" +msgstr "" + +#: src/stored/btape.c:1460 +msgid "" +"\n" +"The above Bacula scan should have output identical to what follows.\n" +"Please double check it ...\n" +"=== Sample correct output ===\n" +"1 block of 64448 bytes in file 1\n" +"End of File mark.\n" +"2 blocks of 64448 bytes in file 2\n" +"End of File mark.\n" +"3 blocks of 64448 bytes in file 3\n" +"End of File mark.\n" +"1 block of 64448 bytes in file 4\n" +"End of File mark.\n" +"Total files=4, blocks=7, bytes = 451,136\n" +"=== End sample correct output ===\n" +"\n" +"If the above scan output is not identical to the\n" +"sample output, you MUST correct the problem\n" +"or Bacula will not be able to write multiple Jobs to \n" +"the tape.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1499 +#, c-format +msgid "Bad status from fsf. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1503 +msgid "Forward spaced 1 file.\n" +msgstr "" + +#: src/stored/btape.c:1506 +#, c-format +msgid "Forward spaced %d files.\n" +msgstr "" + +#: src/stored/btape.c:1525 +msgid "Forward spaced 1 record.\n" +msgstr "" + +#: src/stored/btape.c:1528 +#, c-format +msgid "Forward spaced %d records.\n" +msgstr "" + +#: src/stored/btape.c:1573 +#, c-format +msgid "Wrote one record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:1575 +msgid "Wrote block to device.\n" +msgstr "" + +#: src/stored/btape.c:1590 +msgid "Enter length to read: " +msgstr "" + +#: src/stored/btape.c:1595 +msgid "Bad length entered, using default of 1024 bytes.\n" +msgstr "" + +#: src/stored/btape.c:1604 +#, c-format +msgid "Read of %d bytes gives stat=%d. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1627 src/stored/btape.c:1676 +#, c-format +msgid "End of tape\n" +msgstr "" + +#: src/stored/btape.c:1632 +#, c-format +msgid "Starting scan at file %u\n" +msgstr "" + +#: src/stored/btape.c:1637 src/stored/dev.c:1314 +#, c-format +msgid "read error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/btape.c:1639 +#, c-format +msgid "Bad status from read %d. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1642 src/stored/btape.c:1656 src/stored/btape.c:1720 +#: src/stored/btape.c:1732 src/stored/btape.c:1745 src/stored/btape.c:1761 +#, c-format +msgid "1 block of %d bytes in file %d\n" +msgstr "" + +#: src/stored/btape.c:1645 src/stored/btape.c:1659 src/stored/btape.c:1723 +#: src/stored/btape.c:1735 src/stored/btape.c:1748 src/stored/btape.c:1764 +#, c-format +msgid "%d blocks of %d bytes in file %d\n" +msgstr "" + +#: src/stored/btape.c:1667 src/stored/btape.c:1739 +#, c-format +msgid "End of File mark.\n" +msgstr "" + +#: src/stored/btape.c:1688 src/stored/btape.c:1792 +#, c-format +msgid "Total files=%d, blocks=%d, bytes = %s\n" +msgstr "" + +#: src/stored/btape.c:1752 +#, c-format +msgid "Short block read.\n" +msgstr "" + +#: src/stored/btape.c:1755 +#, c-format +msgid "Error reading block. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1779 +#, c-format +msgid "" +"Blk_block: %u dev_blk=%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%" +"s rlen=%d\n" +msgstr "" + +#: src/stored/btape.c:1801 +#, c-format +msgid "Device status: %u. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1831 +msgid "" +"\n" +"This command simulates Bacula writing to a tape.\n" +"It requires either one or two blank tapes, which it\n" +"will label and write.\n" +"\n" +"If you have an autochanger configured, it will use\n" +"the tapes that are in slots 1 and 2, otherwise, you will\n" +"be prompted to insert the tapes when necessary.\n" +"\n" +"It will print a status approximately\n" +"every 322 MB, and write an EOF every 3.2 GB. If you have\n" +"selected the simple test option, after writing the first tape\n" +"it will rewind it and re-read the last block written.\n" +"\n" +"If you have selected the multiple tape test, when the first tape\n" +"fills, it will ask for a second, and after writing a few more \n" +"blocks, it will stop. Then it will begin re-reading the\n" +"two tapes.\n" +"\n" +"This may take a long time -- hours! ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1848 +msgid "" +"Do you want to run the simplified test (s) with one tape\n" +"or the complete multiple tape (m) test: (s/m) " +msgstr "" + +#: src/stored/btape.c:1851 +msgid "Simple test (single tape) selected.\n" +msgstr "" + +#: src/stored/btape.c:1854 +msgid "Multiple tape test selected.\n" +msgstr "" + +#: src/stored/btape.c:1893 +msgid "Wrote Start of Session label.\n" +msgstr "" + +#: src/stored/btape.c:1924 +#, c-format +msgid "%s Begin writing Bacula records to tape ...\n" +msgstr "" + +#: src/stored/btape.c:1926 +#, c-format +msgid "%s Begin writing Bacula records to first tape ...\n" +msgstr "" + +#: src/stored/btape.c:1967 +#, c-format +msgid "Wrote blk_block=%u, dev_blk_num=%u VolBytes=%s rate=%.1f KB/s\n" +msgstr "" + +#: src/stored/btape.c:1977 +#, c-format +msgid "%s Flush block, write EOF\n" +msgstr "" + +#: src/stored/btape.c:1988 +msgid "Not OK\n" +msgstr "" + +#: src/stored/btape.c:2018 +msgid "Set ok=false after write_block_to_device.\n" +msgstr "" + +#: src/stored/btape.c:2021 +msgid "Wrote End of Session label.\n" +msgstr "" + +#: src/stored/btape.c:2045 +#, c-format +msgid "Wrote state file last_block_num1=%d last_block_num2=%d\n" +msgstr "" + +#: src/stored/btape.c:2049 +#, c-format +msgid "Could not create state file: %s ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2057 +#, c-format +msgid "" +"\n" +"\n" +"%s Done filling tape at %d:%d. Now beginning re-read of tape ...\n" +msgstr "" + +#: src/stored/btape.c:2061 +#, c-format +msgid "" +"\n" +"\n" +"%s Done filling tapes at %d:%d. Now beginning re-read of first tape ...\n" +msgstr "" + +#: src/stored/btape.c:2100 +msgid "" +"\n" +"The state file level has changed. You must redo\n" +"the fill command.\n" +msgstr "" + +#: src/stored/btape.c:2106 +#, c-format +msgid "" +"\n" +"Could not find the state file: %s ERR=%s\n" +"You must redo the fill command.\n" +msgstr "" + +#: src/stored/btape.c:2162 +msgid "Mount first tape. Press enter when ready: " +msgstr "" + +#: src/stored/btape.c:2177 +msgid "Rewinding.\n" +msgstr "" + +#: src/stored/btape.c:2182 +#, c-format +msgid "Reading the first 10000 records from %u:%u.\n" +msgstr "" + +#: src/stored/btape.c:2186 src/stored/btape.c:2251 +#, c-format +msgid "Reposition from %u:%u to %u:%u\n" +msgstr "" + +#: src/stored/btape.c:2189 src/stored/btape.c:2238 src/stored/btape.c:2254 +#, c-format +msgid "Reposition error. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2192 +#, c-format +msgid "Reading block %u.\n" +msgstr "" + +#: src/stored/btape.c:2194 src/stored/btape.c:2243 src/stored/btape.c:2259 +#, c-format +msgid "Error reading block: ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2199 +msgid "" +"\n" +"The last block on the tape matches. Test succeeded.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2201 +msgid "" +"\n" +"The last block of the first tape matches.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2224 +msgid "Mount second tape. Press enter when ready: " +msgstr "" + +#: src/stored/btape.c:2236 +#, c-format +msgid "Reposition from %u:%u to 0:1\n" +msgstr "" + +#: src/stored/btape.c:2241 src/stored/btape.c:2257 +#, c-format +msgid "Reading block %d.\n" +msgstr "" + +#: src/stored/btape.c:2247 +msgid "" +"\n" +"The first block on the second tape matches.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2263 +msgid "" +"\n" +"The last block on the second tape matches. Test succeeded.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2278 +#, c-format +msgid "10000 records read now at %d:%d\n" +msgstr "" + +#: src/stored/btape.c:2301 src/stored/btape.c:2312 src/stored/btape.c:2360 +msgid "Last block written" +msgstr "" + +#: src/stored/btape.c:2303 src/stored/btape.c:2313 +msgid "Block read back" +msgstr "" + +#: src/stored/btape.c:2304 +#, c-format +msgid "" +"\n" +"\n" +"The blocks differ at byte %u\n" +msgstr "" + +#: src/stored/btape.c:2305 +msgid "" +"\n" +"\n" +"!!!! The last block written and the block\n" +"that was read back differ. The test FAILED !!!!\n" +"This must be corrected before you use Bacula\n" +"to write multi-tape Volumes.!!!!\n" +msgstr "" + +#: src/stored/btape.c:2344 +#, c-format +msgid "Last block at: %u:%u this_dev_block_num=%d\n" +msgstr "" + +#: src/stored/btape.c:2358 +#, c-format +msgid "Block not written: FileIndex=%u blk_block=%u Size=%u\n" +msgstr "" + +#: src/stored/btape.c:2362 +msgid "Block not written" +msgstr "" + +#: src/stored/btape.c:2377 +#, c-format +msgid "End of tape %d:%d. VolumeCapacity=%s. Write rate = %.1f KB/s\n" +msgstr "" + +#: src/stored/btape.c:2427 +msgid "Test writing blocks of 64512 bytes to tape.\n" +msgstr "" + +#: src/stored/btape.c:2429 +msgid "How many blocks do you want to write? (1000): " +msgstr "" + +#: src/stored/btape.c:2444 +#, c-format +msgid "Begin writing %d Bacula blocks to tape ...\n" +msgstr "" + +#: src/stored/btape.c:2496 +#, c-format +msgid "Begin writing raw blocks of %u bytes.\n" +msgstr "" + +#: src/stored/btape.c:2520 +#, c-format +msgid "Write failed at block %u. stat=%d ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2529 +msgid "test autochanger" +msgstr "" + +#: src/stored/btape.c:2530 +msgid "backspace file" +msgstr "" + +#: src/stored/btape.c:2531 +msgid "backspace record" +msgstr "" + +#: src/stored/btape.c:2532 +msgid "list device capabilities" +msgstr "" + +#: src/stored/btape.c:2533 +msgid "clear tape errors" +msgstr "" + +#: src/stored/btape.c:2534 +msgid "go to end of Bacula data for append" +msgstr "" + +#: src/stored/btape.c:2535 +msgid "go to the physical end of medium" +msgstr "" + +#: src/stored/btape.c:2536 +msgid "fill tape, write onto second volume" +msgstr "" + +#: src/stored/btape.c:2537 +msgid "read filled tape" +msgstr "" + +#: src/stored/btape.c:2538 +msgid "forward space a file" +msgstr "" + +#: src/stored/btape.c:2539 +msgid "forward space a record" +msgstr "" + +#: src/stored/btape.c:2541 +msgid "write a Bacula label to the tape" +msgstr "" + +#: src/stored/btape.c:2542 +msgid "load a tape" +msgstr "" + +#: src/stored/btape.c:2543 +msgid "quit btape" +msgstr "" + +#: src/stored/btape.c:2544 +msgid "use write() to fill tape" +msgstr "" + +#: src/stored/btape.c:2545 +msgid "read and print the Bacula tape label" +msgstr "" + +#: src/stored/btape.c:2546 +msgid "test record handling functions" +msgstr "" + +#: src/stored/btape.c:2547 +msgid "rewind the tape" +msgstr "" + +#: src/stored/btape.c:2548 +msgid "read() tape block by block to EOT and report" +msgstr "" + +#: src/stored/btape.c:2549 +msgid "Bacula read block by block to EOT and report" +msgstr "" + +#: src/stored/btape.c:2550 +msgid "print tape status" +msgstr "" + +#: src/stored/btape.c:2551 +msgid "General test Bacula tape functions" +msgstr "" + +#: src/stored/btape.c:2552 +msgid "write an EOF on the tape" +msgstr "" + +#: src/stored/btape.c:2553 +msgid "write a single Bacula block" +msgstr "" + +#: src/stored/btape.c:2554 +msgid "read a single record" +msgstr "" + +#: src/stored/btape.c:2555 +msgid "read a single Bacula block" +msgstr "" + +#: src/stored/btape.c:2556 +msgid "quick fill command" +msgstr "" + +#: src/stored/btape.c:2577 +#, c-format +msgid "\"%s\" is an invalid command\n" +msgstr "" + +#: src/stored/btape.c:2586 +#, c-format +msgid "Interactive commands:\n" +msgstr "" + +#: src/stored/btape.c:2597 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: btape \n" +" -b specify bootstrap file\n" +" -c set configuration file to file\n" +" -d set debug level to nn\n" +" -p proceed inspite of I/O errors\n" +" -s turn off signals\n" +" -v be verbose\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2682 +#, c-format +msgid "Mount second Volume on device %s and press return when ready: " +msgstr "" + +#: src/stored/btape.c:2709 +#, c-format +msgid "Mount blank Volume on device %s and press return when ready: " +msgstr "" + +#: src/stored/btape.c:2727 +#, c-format +msgid "End of Volume \"%s\" %d records.\n" +msgstr "" + +#: src/stored/btape.c:2740 +#, c-format +msgid "Read block=%u, VolBytes=%s rate=%.1f KB/s\n" +msgstr "" + +#: src/stored/btape.c:2752 src/stored/mount.c:627 +#, c-format +msgid "Cannot open Dev=%s, Vol=%s\n" +msgstr "" + +#: src/stored/butil.c:59 +msgid "Nohdr," +msgstr "" + +#: src/stored/butil.c:62 +msgid "partial," +msgstr "" + +#: src/stored/butil.c:65 +msgid "empty," +msgstr "" + +#: src/stored/butil.c:68 +msgid "Nomatch," +msgstr "" + +#: src/stored/butil.c:71 +msgid "cont," +msgstr "" + +#: src/stored/butil.c:147 +msgid "Volume name or names is too long. Please use a .bsr file.\n" +msgstr "" + +#: src/stored/butil.c:167 +#, c-format +msgid "Cannot find device \"%s\" in config file %s.\n" +msgstr "" + +#: src/stored/butil.c:174 +#, c-format +msgid "Cannot init device %s\n" +msgstr "" + +#: src/stored/butil.c:194 +#, c-format +msgid "Cannot open %s\n" +msgstr "" + +#: src/stored/butil.c:277 +#, c-format +msgid "Could not find device \"%s\" in config file %s.\n" +msgstr "" + +#: src/stored/butil.c:282 +#, c-format +msgid "Using device: \"%s\" for reading.\n" +msgstr "" + +#: src/stored/butil.c:285 +#, c-format +msgid "Using device: \"%s\" for writing.\n" +msgstr "" + +#: src/stored/butil.c:301 +msgid "Unexpected End of Data\n" +msgstr "" + +#: src/stored/butil.c:303 +msgid "Unexpected End of Tape\n" +msgstr "" + +#: src/stored/butil.c:305 +msgid "Unexpected End of File\n" +msgstr "" + +#: src/stored/butil.c:307 +msgid "Tape Door is Open\n" +msgstr "" + +#: src/stored/butil.c:309 +msgid "Unexpected Tape is Off-line\n" +msgstr "" + +#: src/stored/dev.c:119 +#, c-format +msgid "Unable to stat device %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:130 +#, c-format +msgid "" +"%s is an unknown device type. Must be tape or directory\n" +" or have RequiresMount=yes for DVD. st_mode=%x\n" +msgstr "" + +#: src/stored/dev.c:189 +#, c-format +msgid "Unable to stat mount point %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:195 +msgid "" +"Mount and unmount commands must defined for a device which requires mount.\n" +msgstr "" + +#: src/stored/dev.c:198 +msgid "Write part command must be defined for a device which requires mount.\n" +msgstr "" + +#: src/stored/dev.c:203 +#, c-format +msgid "Block size %u on device %s is too large, using default %u\n" +msgstr "" + +#: src/stored/dev.c:208 +#, c-format +msgid "Max block size %u not multiple of device %s block size.\n" +msgstr "" + +#: src/stored/dev.c:224 src/stored/dev.c:230 +#, c-format +msgid "Unable to init cond variable: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:326 +msgid "Illegal mode given to open dev.\n" +msgstr "" + +#: src/stored/dev.c:421 src/stored/device.c:325 +#, c-format +msgid "Unable to open device %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:456 +#, c-format +msgid "Could not open file device %s. No Volume name given.\n" +msgstr "" + +#: src/stored/dev.c:479 src/stored/dev.c:647 +#, c-format +msgid "Could not open: %s, ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:523 +#, c-format +msgid "Could not open DVD device %s. No Volume name given.\n" +msgstr "" + +#: src/stored/dev.c:572 +#, c-format +msgid "The DVD in device %s contains data, please blank it before writing.\n" +msgstr "" + +#: src/stored/dev.c:593 +#, c-format +msgid "Unable to stat DVD part 1 file %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:601 +#, c-format +msgid "DVD part 1 is not a regular file %s.\n" +msgstr "" + +#: src/stored/dev.c:621 +#, c-format +msgid "There is no valid DVD in device %s.\n" +msgstr "" + +#: src/stored/dev.c:627 +#, c-format +msgid "Could not mount DVD device %s.\n" +msgstr "" + +#: src/stored/dev.c:677 +#, c-format +msgid "Could not fstat: %s, ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:711 +#, c-format +msgid "Bad call to rewind. Device %s not open\n" +msgstr "" + +#: src/stored/dev.c:750 +#, c-format +msgid "No tape loaded or drive offline on %s.\n" +msgstr "" + +#: src/stored/dev.c:760 +#, c-format +msgid "Rewind error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:770 src/stored/dev.c:848 src/stored/dev.c:985 +#: src/stored/dev.c:1561 +#, c-format +msgid "lseek error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:819 +#, c-format +msgid "Bad call to eod. Device %s not open\n" +msgstr "" + +#: src/stored/dev.c:884 +#, c-format +msgid "ioctl MTEOM error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:893 src/stored/dev.c:1026 +#, c-format +msgid "ioctl MTIOCGET error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:971 +msgid "Bad device call. Device not open\n" +msgstr "" + +#: src/stored/dev.c:984 +#, c-format +msgid "Seek error: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:1021 +msgid " Bacula status:" +msgstr "" + +#: src/stored/dev.c:1022 src/stored/dev.c:1105 src/stored/dev.c:1107 +#, c-format +msgid " file=%d block=%d\n" +msgstr "" + +#: src/stored/dev.c:1030 +msgid " Device status:" +msgstr "" + +#: src/stored/dev.c:1129 +msgid "Bad call to load_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1140 src/stored/dev.c:1153 +#, c-format +msgid "ioctl MTLOAD error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1184 +#, c-format +msgid "ioctl MTOFFL error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1225 +msgid "Bad call to fsf. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1236 src/stored/dev.c:1363 +#, c-format +msgid "Device %s at End of Tape.\n" +msgstr "" + +#: src/stored/dev.c:1267 src/stored/dev.c:1343 +#, c-format +msgid "ioctl MTFSF error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1392 +msgid "Bad call to bsf. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1398 +#, c-format +msgid "Device %s cannot BSF because it is not a tape.\n" +msgstr "" + +#: src/stored/dev.c:1415 +#, c-format +msgid "ioctl MTBSF error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1434 +msgid "Bad call to fsr. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1444 +#, c-format +msgid "ioctl MTFSR not permitted on %s.\n" +msgstr "" + +#: src/stored/dev.c:1472 +#, c-format +msgid "ioctl MTFSR %d error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1490 +msgid "Bad call to bsr_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1500 +#, c-format +msgid "ioctl MTBSR not permitted on %s.\n" +msgstr "" + +#: src/stored/dev.c:1514 +#, c-format +msgid "ioctl MTBSR error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1550 +msgid "Bad call to reposition. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1629 +msgid "Bad call to weof_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1639 +msgid "Attempt to WEOF on non-appendable Volume\n" +msgstr "" + +#: src/stored/dev.c:1657 +#, c-format +msgid "ioctl MTWEOF error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1756 +#, c-format +msgid "unknown func code %d" +msgstr "" + +#: src/stored/dev.c:1762 +#, c-format +msgid "I/O function \"%s\" not supported on this device.\n" +msgstr "" + +#: src/stored/dev.c:1917 +#, c-format +msgid "Unable to truncate device %s. ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:2016 src/stored/dev.c:2021 +#, c-format +msgid "Device %s cannot be %smounted. ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:2429 +#, c-format +msgid "Unable to set eotmodel on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/device.c:120 +#, c-format +msgid "End of medium on Volume \"%s\" Bytes=%s Blocks=%s at %s.\n" +msgstr "" + +#: src/stored/device.c:136 +#, c-format +msgid "New volume \"%s\" mounted on device %s at %s.\n" +msgstr "" + +#: src/stored/device.c:148 +#, c-format +msgid "write_block_to_device Volume label failed. ERR=%s" +msgstr "" + +#: src/stored/device.c:183 +#, c-format +msgid "write_block_to_device overflow block failed. ERR=%s" +msgstr "" + +#: src/stored/device.c:327 +#, c-format +msgid "Unable to open archive %s: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:154 +msgid "Connection request failed.\n" +msgstr "" + +#: src/stored/dircmd.c:163 +#, c-format +msgid "Invalid connection. Len=%d\n" +msgstr "" + +#: src/stored/dircmd.c:274 +#, c-format +msgid "3991 Bad setdebug command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:295 +#, c-format +msgid "3904 Job %s not found.\n" +msgstr "" + +#: src/stored/dircmd.c:321 +#, c-format +msgid "Job %s marked to be canceled.\n" +msgstr "" + +#: src/stored/dircmd.c:322 +#, c-format +msgid "3000 Job %s marked to be canceled.\n" +msgstr "" + +#: src/stored/dircmd.c:326 +msgid "3903 Error scanning cancel command.\n" +msgstr "" + +#: src/stored/dircmd.c:402 src/stored/dircmd.c:758 src/stored/dircmd.c:848 +#: src/stored/dircmd.c:920 src/stored/dircmd.c:983 src/stored/dircmd.c:1026 +#, c-format +msgid "3999 Device \"%s\" not found or could not be opened.\n" +msgstr "" + +#: src/stored/dircmd.c:407 +#, c-format +msgid "3903 Error scanning label command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:457 +#, c-format +msgid "3910 Unable to open device %s: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:474 +#, c-format +msgid "3920 Cannot label Volume because it is already labeled: \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:481 +msgid "3921 Wrong volume mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:485 +msgid "3922 Cannot relabel an ANSI/IBM labeled Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:493 +#, c-format +msgid "3912 Failed to label Volume: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:503 +#, c-format +msgid "3914 Failed to label Volume (no media): ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:506 +#, c-format +msgid "3913 Cannot label Volume. Unknown status %d from read_volume_label()\n" +msgstr "" + +#: src/stored/dircmd.c:539 +#, c-format +msgid "3001 Mounted Volume: %s\n" +msgstr "" + +#: src/stored/dircmd.c:543 src/stored/dircmd.c:1062 +#, c-format +msgid "" +"3902 Cannot mount Volume on Storage Device %s because:\n" +"%s" +msgstr "" + +#: src/stored/dircmd.c:571 src/stored/reserve.c:1073 +#, c-format +msgid "" +"\n" +" Device \"%s\" requested by DIR could not be opened or does not exist.\n" +msgstr "" + +#: src/stored/dircmd.c:593 src/stored/reserve.c:1069 +#, c-format +msgid "" +"\n" +" Device \"%s\" in changer \"%s\" requested by DIR could not be opened or " +"does not exist.\n" +msgstr "" + +#: src/stored/dircmd.c:668 src/stored/dircmd.c:725 +#, c-format +msgid "3901 open device failed: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:688 src/stored/dircmd.c:716 +#, c-format +msgid "3001 Device %s is mounted with Volume \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:691 src/stored/dircmd.c:719 src/stored/dircmd.c:734 +#, c-format +msgid "" +"3905 Device %s open but no Bacula volume is mounted.\n" +"If this is not a blank tape, try unmounting and remounting the Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:701 +#, c-format +msgid "3001 Device %s is doing acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:706 src/stored/dircmd.c:820 +#, c-format +msgid "3903 Device %s is being labeled.\n" +msgstr "" + +#: src/stored/dircmd.c:731 +#, c-format +msgid "3001 Device %s is already mounted with Volume \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:740 +#, c-format +msgid "3002 Device %s is mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:743 src/stored/dircmd.c:793 src/stored/dircmd.c:808 +#: src/stored/dircmd.c:839 +#, c-format +msgid "3907 %s" +msgstr "" + +#: src/stored/dircmd.c:746 +#, c-format +msgid "3906 File device %s is always mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:752 +#, c-format +msgid "3905 Bizarre wait state %d\n" +msgstr "" + +#: src/stored/dircmd.c:762 +#, c-format +msgid "3909 Error scanning mount command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:790 src/stored/dircmd.c:841 +#, c-format +msgid "3002 Device %s unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:797 +#, c-format +msgid "3901 Device %s is already unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:811 +#, c-format +msgid "3001 Device %s unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:816 +#, c-format +msgid "3902 Device %s is busy in acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:853 +#, c-format +msgid "3907 Error scanning unmount command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:884 +#, c-format +msgid "3921 Device %s already released.\n" +msgstr "" + +#: src/stored/dircmd.c:891 +#, c-format +msgid "3922 Device %s waiting for sysop.\n" +msgstr "" + +#: src/stored/dircmd.c:897 +#, c-format +msgid "3922 Device %s waiting for mount.\n" +msgstr "" + +#: src/stored/dircmd.c:901 +#, c-format +msgid "3923 Device %s is busy in acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:905 +#, c-format +msgid "3914 Device %s is being labeled.\n" +msgstr "" + +#: src/stored/dircmd.c:914 +#, c-format +msgid "3022 Device %s released.\n" +msgstr "" + +#: src/stored/dircmd.c:925 +#, c-format +msgid "3927 Error scanning release command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:970 +#, c-format +msgid "3995 Device %s is not an autochanger.\n" +msgstr "" + +#: src/stored/dircmd.c:987 +#, c-format +msgid "3908 Error scanning autocharger drives/list/slots command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:1030 +#, c-format +msgid "3909 Error scanning readlabel command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:1058 +#, c-format +msgid "3001 Volume=%s Slot=%d\n" +msgstr "" + +#: src/stored/dircmd.c:1090 +#, c-format +msgid "3931 Device %s is BLOCKED. user unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:1094 +#, c-format +msgid "" +"3932 Device %s is BLOCKED. user unmounted during wait for media/mount.\n" +msgstr "" + +#: src/stored/dircmd.c:1098 +#, c-format +msgid "3933 Device %s is BLOCKED waiting for media.\n" +msgstr "" + +#: src/stored/dircmd.c:1102 +#, c-format +msgid "3934 Device %s is being initialized.\n" +msgstr "" + +#: src/stored/dircmd.c:1106 +#, c-format +msgid "3935 Device %s is blocked labeling a Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:1110 +#, c-format +msgid "3935 Device %s is blocked for unknown reason.\n" +msgstr "" + +#: src/stored/dircmd.c:1115 +#, c-format +msgid "3936 Device %s is busy reading.\n" +msgstr "" + +#: src/stored/dircmd.c:1118 +#, c-format +msgid "3937 Device %s is busy with %d writer(s).\n" +msgstr "" + +#: src/stored/dvd.c:112 +msgid "No FreeSpace command defined.\n" +msgstr "" + +#: src/stored/dvd.c:146 +#, c-format +msgid "Cannot run free space command. Results=%s ERR=%s\n" +msgstr "" + +#: src/stored/dvd.c:262 +#, c-format +msgid "Error writing part %d to the DVD: ERR=%s\n" +msgstr "" + +#: src/stored/dvd.c:264 +#, c-format +msgid "Error while writing current part to the DVD: %s" +msgstr "" + +#: src/stored/dvd.c:274 +#, c-format +msgid "Part %d (%lld bytes) written to DVD.\n" +msgstr "" + +#: src/stored/dvd.c:293 +#, c-format +msgid "Remaining free space %s on %s\n" +msgstr "" + +#: src/stored/dvd.c:359 +#, c-format +msgid "Next Volume part already exists on DVD. Cannot continue: %s\n" +msgstr "" + +#: src/stored/dvd.c:378 +#, c-format +msgid "open_next_part can't unlink existing part %s, ERR=%s\n" +msgstr "" + +#: src/stored/dvd.c:579 +#, c-format +msgid "" +"Error writing. Current part less than total number of parts (%d/%d, device=%" +"s)\n" +msgstr "" + +#: src/stored/dvd.c:586 +#, c-format +msgid "Unable to write last on %s: ERR=%s\n" +msgstr "" + +#: src/stored/fd_cmds.c:368 +msgid "Error parsing bootstrap file.\n" +msgstr "" + +#: src/stored/job.c:207 +#, c-format +msgid "Job name not found: %s\n" +msgstr "" + +#: src/stored/job.c:218 +#, c-format +msgid "Hey!!!! JobId %u Job %s already authenticated.\n" +msgstr "" + +#: src/stored/job.c:229 +msgid "Unable to authenticate File daemon\n" +msgstr "" + +#: src/stored/job.c:351 +msgid "In free_jcr(), but still attached to device!!!!\n" +msgstr "" + +#: src/stored/label.c:91 src/stored/label.c:132 src/stored/label.c:226 +#, c-format +msgid "Wrong Volume mounted on device %s: Wanted %s have %s\n" +msgstr "" + +#: src/stored/label.c:98 src/stored/label.c:135 src/stored/label.c:208 +#, c-format +msgid "Too many tries: %s" +msgstr "" + +#: src/stored/label.c:114 +#, c-format +msgid "Couldn't rewind device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:152 +#, c-format +msgid "" +"Requested Volume \"%s\" on %s is not a Bacula labeled Volume, because: ERR=%s" +msgstr "" + +#: src/stored/label.c:157 +msgid "Could not read Volume label from block.\n" +msgstr "" + +#: src/stored/label.c:160 +#, c-format +msgid "Could not unserialize Volume label: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:165 +#, c-format +msgid "Volume Header Id bad: %s\n" +msgstr "" + +#: src/stored/label.c:193 +#, c-format +msgid "Volume on %s has wrong Bacula version. Wanted %d got %d\n" +msgstr "" + +#: src/stored/label.c:204 +#, c-format +msgid "Volume on %s has bad Bacula label type: %x\n" +msgstr "" + +#: src/stored/label.c:217 src/stored/label.c:402 +#, c-format +msgid "Could not reserve volume %s on %s\n" +msgstr "" + +#: src/stored/label.c:290 +#, c-format +msgid "Cannot write Volume label to block for device %s\n" +msgstr "" + +#: src/stored/label.c:449 +#, c-format +msgid "Rewind error on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:455 +#, c-format +msgid "Truncate error on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:461 +#, c-format +msgid "Failed to re-open DVD after truncate on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:484 +#, c-format +msgid "Unable to write device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:512 +#, c-format +msgid "Recycled volume \"%s\" on device %s, all previous data lost.\n" +msgstr "" + +#: src/stored/label.c:515 +#, c-format +msgid "Wrote label to prelabeled Volume \"%s\" on device %s\n" +msgstr "" + +#: src/stored/label.c:719 +#, c-format +msgid "Bad session label = %d\n" +msgstr "" + +#: src/stored/label.c:737 src/stored/label.c:744 +#, c-format +msgid "Error writing Session label to %s: %s\n" +msgstr "" + +#: src/stored/label.c:779 +#, c-format +msgid "Expecting Volume Label, got FI=%s Stream=%s len=%d\n" +msgstr "" + +#: src/stored/label.c:906 +#, c-format +msgid "Unknown %d" +msgstr "" + +#: src/stored/label.c:910 +#, c-format +msgid "" +"\n" +"Volume Label:\n" +"Id : %sVerNo : %d\n" +"VolName : %s\n" +"PrevVolName : %s\n" +"VolFile : %d\n" +"LabelType : %s\n" +"LabelSize : %d\n" +"PoolName : %s\n" +"MediaType : %s\n" +"PoolType : %s\n" +"HostName : %s\n" +msgstr "" + +#: src/stored/label.c:932 +#, c-format +msgid "Date label written: %s\n" +msgstr "" + +#: src/stored/label.c:938 +#, c-format +msgid "Date label written: %04d-%02d-%02d at %02d:%02d\n" +msgstr "" + +#: src/stored/label.c:958 +#, c-format +msgid "" +"\n" +"%s Record:\n" +"JobId : %d\n" +"VerNum : %d\n" +"PoolName : %s\n" +"PoolType : %s\n" +"JobName : %s\n" +"ClientName : %s\n" +msgstr "" + +#: src/stored/label.c:971 +#, c-format +msgid "" +"Job (unique name) : %s\n" +"FileSet : %s\n" +"JobType : %c\n" +"JobLevel : %c\n" +msgstr "" + +#: src/stored/label.c:980 +#, c-format +msgid "" +"JobFiles : %s\n" +"JobBytes : %s\n" +"StartBlock : %s\n" +"EndBlock : %s\n" +"StartFile : %s\n" +"EndFile : %s\n" +"JobErrors : %s\n" +"JobStatus : %c\n" +msgstr "" + +#: src/stored/label.c:1001 +#, c-format +msgid "Date written : %s\n" +msgstr "" + +#: src/stored/label.c:1006 +#, c-format +msgid "Date written : %04d-%02d-%02d at %02d:%02d\n" +msgstr "" + +#: src/stored/label.c:1025 +msgid "Fresh Volume" +msgstr "" + +#: src/stored/label.c:1028 +msgid "Volume" +msgstr "" + +#: src/stored/label.c:1037 src/stored/read_record.c:402 +msgid "End of Media" +msgstr "" + +#: src/stored/label.c:1040 +msgid "End of Tape" +msgstr "" + +#: src/stored/label.c:1060 src/stored/label.c:1068 src/stored/label.c:1101 +#, c-format +msgid "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n" +msgstr "" + +#: src/stored/label.c:1065 +msgid "End of physical tape.\n" +msgstr "" + +#: src/stored/label.c:1080 src/stored/label.c:1089 +#, c-format +msgid "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n" +msgstr "" + +#: src/stored/label.c:1082 +#, c-format +msgid " Job=%s Date=%s Level=%c Type=%c\n" +msgstr "" + +#: src/stored/label.c:1091 +#, c-format +msgid " Date=%s Level=%c Type=%c Files=%s Bytes=%s Errors=%d Status=%c\n" +msgstr "" + +#: src/stored/mac.c:79 +msgid "Read and write devices not properly initialized.\n" +msgstr "" + +#: src/stored/mac.c:87 +#, c-format +msgid "No Volume names found for %s.\n" +msgstr "" + +#: src/stored/mount.c:90 +#, c-format +msgid "Too many errors trying to mount device %s.\n" +msgstr "" + +#: src/stored/mount.c:96 +#, c-format +msgid "Job %d canceled.\n" +msgstr "" + +#: src/stored/mount.c:211 +#, c-format +msgid "Could not open device %s: ERR=%s\n" +msgstr "" + +#: src/stored/mount.c:254 src/stored/mount.c:533 +#, c-format +msgid "Volume \"%s\" not on device %s.\n" +msgstr "" + +#: src/stored/mount.c:292 +#, c-format +msgid "" +"Director wanted Volume \"%s\".\n" +" Current Volume \"%s\" not acceptable because:\n" +" %s" +msgstr "" + +#: src/stored/mount.c:375 +#, c-format +msgid "Volume \"%s\" previously written, moving to end of data.\n" +msgstr "" + +#: src/stored/mount.c:378 +#, c-format +msgid "Unable to position to end of data on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/mount.c:386 +#, c-format +msgid "Ready to append to end of Volume \"%s\" part=%d size=%s\n" +msgstr "" + +#: src/stored/mount.c:390 +#, c-format +msgid "" +"Bacula cannot write on DVD Volume \"%s\" because: The sizes do not match! " +"Volume=%s Catalog=%s\n" +msgstr "" + +#: src/stored/mount.c:404 +#, c-format +msgid "Ready to append to end of Volume \"%s\" at file=%d.\n" +msgstr "" + +#: src/stored/mount.c:407 +#, c-format +msgid "" +"Bacula cannot write on tape Volume \"%s\" because:\n" +"The number of files mismatch! Volume=%u Catalog=%u\n" +msgstr "" + +#: src/stored/mount.c:418 +#, c-format +msgid "Ready to append to end of Volume \"%s\" size=%s\n" +msgstr "" + +#: src/stored/mount.c:422 +#, c-format +msgid "" +"Bacula cannot write on disk Volume \"%s\" because: The sizes do not match! " +"Volume=%s Catalog=%s\n" +msgstr "" + +#: src/stored/mount.c:452 +#, c-format +msgid "Ready to append to end of Volume \"%s\" at file address=%u.\n" +msgstr "" + +#: src/stored/mount.c:456 +#, c-format +msgid "" +"Bacula cannot write on Volume \"%s\" because:\n" +"The EOD file address is wrong: Volume file address=%u != Catalog Endblock=%u" +"(+1)\n" +"Perhaps You removed the DVD last part in spool directory.\n" +msgstr "" + +#: src/stored/mount.c:523 +#, c-format +msgid "Labeled new Volume \"%s\" on device %s.\n" +msgstr "" + +#: src/stored/mount.c:528 +#, c-format +msgid "Device %s not configured to autolabel Volumes.\n" +msgstr "" + +#: src/stored/mount.c:548 +#, c-format +msgid "Marking Volume \"%s\" in Error in Catalog.\n" +msgstr "" + +#: src/stored/mount.c:564 +#, c-format +msgid "" +"Autochanger Volume \"%s\" not found in slot %d.\n" +" Setting InChanger to zero in catalog.\n" +msgstr "" + +#: src/stored/mount.c:583 +msgid "Hey!!!!! WroteVol non-zero !!!!!\n" +msgstr "" + +#: src/stored/parse_bsr.c:118 src/stored/parse_bsr.c:122 +#, c-format +msgid "" +"Bootstrap file error: %s\n" +" : Line %d, col %d of file %s\n" +"%s\n" +msgstr "" + +#: src/stored/parse_bsr.c:144 +#, c-format +msgid "Cannot open bootstrap file %s: %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:274 +#, c-format +msgid "MediaType %s in bsr at inappropriate place.\n" +msgstr "" + +#: src/stored/parse_bsr.c:295 +#, c-format +msgid "Device \"%s\" in bsr at inappropriate place.\n" +msgstr "" + +#: src/stored/parse_bsr.c:452 +msgid "JobType not yet implemented\n" +msgstr "" + +#: src/stored/parse_bsr.c:460 +msgid "JobLevel not yet implemented\n" +msgstr "" + +#: src/stored/parse_bsr.c:643 +#, c-format +msgid "Slot %d in bsr at inappropriate place.\n" +msgstr "" + +#: src/stored/parse_bsr.c:667 +#, c-format +msgid "VolFile : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:675 +#, c-format +msgid "VolBlock : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:685 +#, c-format +msgid "FileIndex : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:687 +#, c-format +msgid "FileIndex : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:697 +#, c-format +msgid "JobId : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:699 +#, c-format +msgid "JobId : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:709 +#, c-format +msgid "SessId : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:711 +#, c-format +msgid "SessId : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:720 +#, c-format +msgid "VolumeName : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:721 +#, c-format +msgid " MediaType : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:722 +#, c-format +msgid " Device : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:723 +#, c-format +msgid " Slot : %d\n" +msgstr "" + +#: src/stored/parse_bsr.c:732 +#, c-format +msgid "Client : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:740 +#, c-format +msgid "Job : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:748 +#, c-format +msgid "SessTime : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:759 +msgid "BSR is NULL\n" +msgstr "" + +#: src/stored/parse_bsr.c:763 +#, c-format +msgid "Next : 0x%x\n" +msgstr "" + +#: src/stored/parse_bsr.c:764 +#, c-format +msgid "Root bsr : 0x%x\n" +msgstr "" + +#: src/stored/parse_bsr.c:775 +#, c-format +msgid "count : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:776 +#, c-format +msgid "found : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:779 +#, c-format +msgid "done : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:780 +#, c-format +msgid "positioning : %d\n" +msgstr "" + +#: src/stored/parse_bsr.c:781 +#, c-format +msgid "fast_reject : %d\n" +msgstr "" + +#: src/stored/pythonsd.c:210 +msgid "Error in ParseTuple\n" +msgstr "" + +#: src/stored/pythonsd.c:226 +msgid "Parse tuple error in job_write\n" +msgstr "" + +#: src/stored/pythonsd.c:263 +#, c-format +msgid "Error in Python method %s\n" +msgstr "" + +#: src/stored/read.c:68 +msgid "No Volume names found for restore.\n" +msgstr "" + +#: src/stored/read.c:122 +#, c-format +msgid ">filed: Error Hdr=%s\n" +msgstr "" + +#: src/stored/read.c:123 src/stored/read.c:138 +#, c-format +msgid "Error sending to File daemon. ERR=%s\n" +msgstr "" + +#: src/stored/read.c:137 +#, c-format +msgid "Error sending to FD. ERR=%s\n" +msgstr "" + +#: src/stored/read_record.c:83 +#, c-format +msgid "End of Volume at file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/read_record.c:86 +msgid "End of all volumes.\n" +msgstr "" + +#: src/stored/read_record.c:129 +msgid "part" +msgstr "" + +#: src/stored/read_record.c:132 +msgid "file" +msgstr "" + +#: src/stored/read_record.c:135 +#, c-format +msgid "End of %s %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/read_record.c:149 +msgid "Did fsr in attemp to skip bad record.\n" +msgstr "" + +#: src/stored/read_record.c:342 +#, c-format +msgid "Reposition from (file:block) %u:%u to %u:%u\n" +msgstr "" + +#: src/stored/read_record.c:370 +#, c-format +msgid "Forward spacing Volume \"%s\" to file:block %u:%u.\n" +msgstr "" + +#: src/stored/read_record.c:395 +msgid "Begin Session" +msgstr "" + +#: src/stored/read_record.c:399 +msgid "End Session" +msgstr "" + +#: src/stored/read_record.c:405 +#, c-format +msgid "Unknown code %d\n" +msgstr "" + +#: src/stored/record.c:71 +#, c-format +msgid "unknown: %d" +msgstr "" + +#: src/stored/record.c:378 +msgid "Damaged buffer\n" +msgstr "" + +#: src/stored/record.c:549 +#, c-format +msgid "Sanity check failed. maxlen=%d datalen=%d. Block discarded.\n" +msgstr "" + +#: src/stored/reserve.c:93 +#, c-format +msgid "Unable to initialize reservation lock. ERR=%s\n" +msgstr "" + +#: src/stored/reserve.c:99 +#, c-format +msgid "Unable to initialize volume list lock. ERR=%s\n" +msgstr "" + +#: src/stored/reserve.c:429 +#, c-format +msgid "Hey! num_writers=%d!!!!\n" +msgstr "" + +#: src/stored/reserve.c:643 +msgid "3939 Could not get dcr\n" +msgstr "" + +#: src/stored/reserve.c:766 src/stored/reserve.c:777 +#, c-format +msgid "Failed command: %s\n" +msgstr "" + +#: src/stored/reserve.c:767 +#, c-format +msgid "" +"\n" +" Device \"%s\" with MediaType \"%s\" requested by DIR not found in SD " +"Device resources.\n" +msgstr "" + +#: src/stored/reserve.c:1085 +#, c-format +msgid "3926 Could not get dcr for device: %s\n" +msgstr "" + +#: src/stored/reserve.c:1180 +#, c-format +msgid "3601 JobId=%u device %s is BLOCKED due to user unmount.\n" +msgstr "" + +#: src/stored/reserve.c:1190 +#, c-format +msgid "3602 JobId=%u device %s is busy (already reading/writing).\n" +msgstr "" + +#: src/stored/reserve.c:1237 +#, c-format +msgid "3603 JobId=%u device %s is busy reading.\n" +msgstr "" + +#: src/stored/reserve.c:1246 +#, c-format +msgid "3604 JobId=%u device %s is BLOCKED due to user unmount.\n" +msgstr "" + +#: src/stored/reserve.c:1314 +#, c-format +msgid "3605 JobId=%u wants free drive but device %s is busy.\n" +msgstr "" + +#: src/stored/reserve.c:1322 +#, c-format +msgid "3606 JobId=%u prefers mounted drives, but drive %s has no Volume.\n" +msgstr "" + +#: src/stored/reserve.c:1345 +#, c-format +msgid "3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on drive %s.\n" +msgstr "" + +#: src/stored/reserve.c:1387 +#, c-format +msgid "" +"3608 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" nreserve=%d on drive %" +"s.\n" +msgstr "" + +#: src/stored/reserve.c:1431 +#, c-format +msgid "3609 JobId=%u wants Pool=\"%s\" but has Pool=\"%s\" on drive %s.\n" +msgstr "" + +#: src/stored/reserve.c:1439 +#, c-format +msgid "Logic error!!!! JobId=%u Should not get here.\n" +msgstr "" + +#: src/stored/reserve.c:1440 +#, c-format +msgid "3910 JobId=%u Logic error!!!! drive %s Should not get here.\n" +msgstr "" + +#: src/stored/reserve.c:1443 +msgid "Logic error!!!! Should not get here.\n" +msgstr "" + +#: src/stored/reserve.c:1446 +#, c-format +msgid "3911 JobId=%u failed reserve drive %s.\n" +msgstr "" + +#: src/stored/spool.c:84 +msgid "Spooling statistics:\n" +msgstr "" + +#: src/stored/spool.c:87 +#, c-format +msgid "" +"Data spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes/job.\n" +msgstr "" + +#: src/stored/spool.c:95 +#, c-format +msgid "Attr spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes.\n" +msgstr "" + +#: src/stored/spool.c:115 +msgid "Spooling data ...\n" +msgstr "" + +#: src/stored/spool.c:141 +#, c-format +msgid "Bad return from despool WroteVol=%d\n" +msgstr "" + +#: src/stored/spool.c:174 +#, c-format +msgid "Open data spool file %s failed: ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:232 +#, c-format +msgid "Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:236 +#, c-format +msgid "Writing spooled data to Volume. Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:306 +#, c-format +msgid "" +"Despooling elapsed time = %02d:%02d:%02d, Transfer rate = %s bytes/second\n" +msgstr "" + +#: src/stored/spool.c:315 src/stored/spool.c:504 src/stored/spool.c:546 +#, c-format +msgid "Ftruncate spool file failed: ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:376 +#, c-format +msgid "Spool header read error. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:379 +#, c-format +msgid "Spool read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:380 +#, c-format +msgid "Spool header read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:386 src/stored/spool.c:387 +#, c-format +msgid "Spool block too big. Max %u bytes, got %u\n" +msgstr "" + +#: src/stored/spool.c:392 src/stored/spool.c:393 +#, c-format +msgid "Spool data read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:450 +msgid "User specified spool size reached.\n" +msgstr "" + +#: src/stored/spool.c:452 +msgid "Bad return from despool in write_block.\n" +msgstr "" + +#: src/stored/spool.c:460 +msgid "Spooling data again ...\n" +msgstr "" + +#: src/stored/spool.c:491 +#, c-format +msgid "Error writing header to spool file. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:510 src/stored/spool.c:552 +msgid "Fatal despooling error." +msgstr "" + +#: src/stored/spool.c:517 +msgid "Retrying after header spooling error failed.\n" +msgstr "" + +#: src/stored/spool.c:531 +#, c-format +msgid "Error writing data to spool file. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:562 +msgid "Retrying after data spooling error failed.\n" +msgstr "" + +#: src/stored/spool.c:617 src/stored/spool.c:624 +#, c-format +msgid "Fseek on attributes file failed: ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:634 +#, c-format +msgid "Sending spooled attrs to the Director. Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:661 +#, c-format +msgid "fopen attr spool file %s failed: ERR=%s\n" +msgstr "" + +#: src/stored/status.c:120 +msgid "" +"\n" +"Device status:\n" +msgstr "" + +#: src/stored/status.c:124 +#, c-format +msgid "Autochanger \"%s\" with devices:\n" +msgstr "" + +#: src/stored/status.c:142 +#, c-format +msgid "" +"Device %s is mounted with:\n" +" Volume: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/status.c:152 +#, c-format +msgid "Device %s open but no Bacula volume is currently mounted.\n" +msgstr "" + +#: src/stored/status.c:163 +#, c-format +msgid " Total Bytes=%s Blocks=%s Bytes/block=%s\n" +msgstr "" + +#: src/stored/status.c:178 +#, c-format +msgid " Total Bytes Read=%s Blocks Read=%s Bytes/block=%s\n" +msgstr "" + +#: src/stored/status.c:184 +#, c-format +msgid " Positioned at File=%s Block=%s\n" +msgstr "" + +#: src/stored/status.c:191 +#, c-format +msgid "Device %s is not open.\n" +msgstr "" + +#: src/stored/status.c:195 +#, c-format +msgid "Device \"%s\" is not open or does not exist.\n" +msgstr "" + +#: src/stored/status.c:201 +msgid "In Use Volume status:\n" +msgstr "" + +#: src/stored/status.c:208 src/stored/status.c:210 +msgid "" +"====\n" +"\n" +msgstr "" + +#: src/stored/status.c:227 +msgid "" +"No DEVICE structure.\n" +"\n" +msgstr "" + +#: src/stored/status.c:234 +msgid " Device is BLOCKED. User unmounted.\n" +msgstr "" + +#: src/stored/status.c:238 +msgid " Device is BLOCKED. User unmounted during wait for media/mount.\n" +msgstr "" + +#: src/stored/status.c:250 +#, c-format +msgid "" +" Device is BLOCKED waiting for mount of volume \"%s\",\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/status.c:259 +#, c-format +msgid "" +" Device is BLOCKED waiting to create a volume for:\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/status.c:271 +msgid " Device is BLOCKED waiting for media.\n" +msgstr "" + +#: src/stored/status.c:277 +msgid " Device is being initialized.\n" +msgstr "" + +#: src/stored/status.c:281 +msgid " Device is blocked labeling a Volume.\n" +msgstr "" + +#: src/stored/status.c:290 +#, c-format +msgid " Slot %d is loaded in drive %d.\n" +msgstr "" + +#: src/stored/status.c:294 +#, c-format +msgid " Drive %d is not loaded.\n" +msgstr "" + +#: src/stored/status.c:297 +#, c-format +msgid " Drive %d status unknown.\n" +msgstr "" + +#: src/stored/status.c:320 +msgid "Device state:\n" +msgstr "" + +#: src/stored/status.c:338 +#, c-format +msgid "" +"num_writers=%d block=%d\n" +"\n" +msgstr "" + +#: src/stored/status.c:344 +#, c-format +msgid "Archive name: %s Device name: %s\n" +msgstr "" + +#: src/stored/status.c:348 +#, c-format +msgid "File=%u block=%u\n" +msgstr "" + +#: src/stored/status.c:351 +#, c-format +msgid "Min block=%u Max block=%u\n" +msgstr "" + +#: src/stored/status.c:375 +#, c-format +msgid "%s Job %s waiting for Client connection.\n" +msgstr "" + +#: src/stored/status.c:391 +#, c-format +msgid "" +"Reading: %s %s job %s JobId=%d Volume=\"%s\"\n" +" pool=\"%s\" device=%s\n" +msgstr "" + +#: src/stored/status.c:404 +#, c-format +msgid "" +"Writing: %s %s job %s JobId=%d Volume=\"%s\"\n" +" pool=\"%s\" device=%s\n" +msgstr "" + +#: src/stored/status.c:415 +#, c-format +msgid " spooling=%d despooling=%d despool_wait=%d\n" +msgstr "" + +#: src/stored/status.c:424 +#, c-format +msgid " Files=%s Bytes=%s Bytes/sec=%s\n" +msgstr "" + +#: src/stored/status.c:432 +#, c-format +msgid " FDReadSeqNo=%s in_msg=%u out_msg=%d fd=%d\n" +msgstr "" + +#: src/stored/status.c:438 +msgid " FDSocket closed\n" +msgstr "" + +#: src/stored/status.c:460 +msgid "" +"\n" +"Jobs waiting to reserve a drive:\n" +msgstr "" + +#: src/stored/status.c:491 +msgid "===================================================================\n" +msgstr "" + +#: src/stored/status.c:637 +msgid "3900 Bad .status command, missing argument.\n" +msgstr "" + +#: src/stored/status.c:660 +msgid "3900 Bad .status command, wrong argument.\n" +msgstr "" + +#: src/stored/status.c:674 +msgid "Bacula Storage: Idle" +msgstr "" + +#: src/stored/status.c:685 +msgid "Bacula Storage: Running" +msgstr "" + +#: src/stored/status.c:699 +msgid "Bacula Storage: Last Job Canceled" +msgstr "" + +#: src/stored/status.c:703 +msgid "Bacula Storage: Last Job Failed" +msgstr "" + +#: src/stored/status.c:707 +msgid "Bacula Storage: Last Job had Warnings" +msgstr "" + +#: src/stored/stored.c:82 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: stored [options] [-c config_file] [config_file]\n" +" -c use as configuration file\n" +" -dnn set debug level to nn\n" +" -f run in foreground (for debugging)\n" +" -g set groupid to group\n" +" -p proceed despite I/O errors\n" +" -s no signals (for debugging)\n" +" -t test - read config and exit\n" +" -u userid to \n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/stored/stored.c:243 +msgid "Volume Session Time is ZERO!\n" +msgstr "" + +#: src/stored/stored.c:256 +#, c-format +msgid "Unable to create thread. ERR=%s\n" +msgstr "" + +#: src/stored/stored.c:294 +#, c-format +msgid "Only one Storage resource permitted in %s\n" +msgstr "" + +#: src/stored/stored.c:299 +#, c-format +msgid "No Director resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/stored.c:304 +#, c-format +msgid "No Device resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/stored.c:312 +#, c-format +msgid "No Messages resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/stored.c:339 +#, c-format +msgid "\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n" +msgstr "" + +#: src/stored/stored.c:345 +#, c-format +msgid "\"TLS Key\" file not defined for Storage \"%s\" in %s.\n" +msgstr "" + +#: src/stored/stored.c:351 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Storage \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" + +#: src/stored/stored.c:482 +#, c-format +msgid "Could not initialize %s\n" +msgstr "" + +#: src/stored/stored.c:495 +#, c-format +msgid "Could not open device %s\n" +msgstr "" + +#: src/stored/stored.c:508 +#, c-format +msgid "Could not mount device %s\n" +msgstr "" + +#: src/stored/stored_conf.c:234 +#, c-format +msgid "Expected a Device Type keyword, got: %s" +msgstr "" + +#: src/stored/stored_conf.c:249 +#, c-format +msgid "Warning: no \"%s\" resource (%d) defined.\n" +msgstr "" + +#: src/stored/stored_conf.c:252 +#, c-format +msgid "dump_resource type=%d\n" +msgstr "" + +#: src/stored/stored_conf.c:368 +#, c-format +msgid "Warning: unknown resource type %d\n" +msgstr "" + +#: src/stored/stored_conf.c:557 +#, c-format +msgid "\"%s\" item is required in \"%s\" resource, but not found.\n" +msgstr "" + +#: src/stored/stored_conf.c:563 +#, c-format +msgid "Too many items in \"%s\" resource\n" +msgstr "" + +#: src/stored/stored_conf.c:597 +#, c-format +msgid "Cannot find AutoChanger resource %s\n" +msgstr "" + +#: src/stored/stored_conf.c:669 +#, c-format +msgid "" +"Attempt to define second \"%s\" resource named \"%s\" is not permitted.\n" +msgstr "" + +#: src/stored/wait.c:127 +#, c-format +msgid "pthread timedwait error. ERR=%s\n" +msgstr "" + +#: src/stored/wait.c:217 +#, c-format +msgid "JobId=%s, Job %s waiting to reserve a device.\n" +msgstr "" + +#: src/tools/bregex.c:147 src/tools/bwild.c:122 +#, c-format +msgid "Could not open data file: %s\n" +msgstr "" + +#: src/tools/bsmtp.c:117 +#, c-format +msgid "Fatal malformed reply from %s: %s\n" +msgstr "" + +#: src/tools/bsmtp.c:125 +#, c-format +msgid "Fatal fgets error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:156 +#, c-format +msgid "" +"\n" +"Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +" -8 set charset utf-8\n" +" -c set the Cc: field\n" +" -dnn set debug level to nn\n" +" -f set the From: field\n" +" -h use mailhost:port as the SMTP server\n" +" -s set the Subject: field\n" +" -r set the Reply-To: field\n" +" -l set the maximum number of lines that should be sent " +"(default: unlimited)\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tools/bsmtp.c:287 +msgid "Fatal error: no recipient given.\n" +msgstr "" + +#: src/tools/bsmtp.c:316 +#, c-format +msgid "Fatal gethostname error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:320 +#, c-format +msgid "Fatal gethostbyname for myself failed \"%s\": ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:356 +#, c-format +msgid "Error unknown mail host \"%s\": ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:359 +msgid "Retrying connection using \"localhost\".\n" +msgstr "" + +#: src/tools/bsmtp.c:367 +#, c-format +msgid "Fatal error: Unknown address family for smtp host: %d\n" +msgstr "" + +#: src/tools/bsmtp.c:376 src/tools/bsmtp.c:381 +#, c-format +msgid "Fatal socket error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:386 +#, c-format +msgid "Fatal connect error to %s: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:394 +#, c-format +msgid "Fatal _open_osfhandle error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:401 src/tools/bsmtp.c:405 src/tools/bsmtp.c:414 +#: src/tools/bsmtp.c:418 +#, c-format +msgid "Fatal fdopen error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:410 +#, c-format +msgid "Fatal dup error: ERR=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:175 +msgid "" +"Warning skipping the additional parameters for working directory/dbname/user/" +"password/host.\n" +msgstr "" + +#: src/tools/dbcheck.c:191 +#, c-format +msgid "Error can not find the Catalog name[%s] in the given config file [%s]\n" +msgstr "" + +#: src/tools/dbcheck.c:193 +#, c-format +msgid "Error there is no Catalog section in the given config file [%s]\n" +msgstr "" + +#: src/tools/dbcheck.c:202 +msgid "Error no Director resource defined.\n" +msgstr "" + +#: src/tools/dbcheck.c:216 +msgid "Wrong number of arguments.\n" +msgstr "" + +#: src/tools/dbcheck.c:221 +msgid "Working directory not supplied.\n" +msgstr "" + +#: src/tools/dbcheck.c:285 +#, c-format +msgid "Hello, this is the database check/correct program.\n" +msgstr "" + +#: src/tools/dbcheck.c:287 +#, c-format +msgid "Modify database is on." +msgstr "" + +#: src/tools/dbcheck.c:289 +#, c-format +msgid "Modify database is off." +msgstr "" + +#: src/tools/dbcheck.c:291 src/tools/dbcheck.c:352 +#, c-format +msgid " Verbose is on.\n" +msgstr "" + +#: src/tools/dbcheck.c:293 src/tools/dbcheck.c:354 +#, c-format +msgid " Verbose is off.\n" +msgstr "" + +#: src/tools/dbcheck.c:295 +#, c-format +msgid "Please select the function you want to perform.\n" +msgstr "" + +#: src/tools/dbcheck.c:299 +#, c-format +msgid "" +"\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Repair bad Filename records\n" +" 4) Repair bad Path records\n" +" 5) Eliminate duplicate Filename records\n" +" 6) Eliminate duplicate Path records\n" +" 7) Eliminate orphaned Jobmedia records\n" +" 8) Eliminate orphaned File records\n" +" 9) Eliminate orphaned Path records\n" +" 10) Eliminate orphaned Filename records\n" +" 11) Eliminate orphaned FileSet records\n" +" 12) Eliminate orphaned Client records\n" +" 13) Eliminate orphaned Job records\n" +" 14) Eliminate all Admin records\n" +" 15) Eliminate all Restore records\n" +" 16) All (3-15)\n" +" 17) Quit\n" +msgstr "" + +#: src/tools/dbcheck.c:318 +#, c-format +msgid "" +"\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Check for bad Filename records\n" +" 4) Check for bad Path records\n" +" 5) Check for duplicate Filename records\n" +" 6) Check for duplicate Path records\n" +" 7) Check for orphaned Jobmedia records\n" +" 8) Check for orphaned File records\n" +" 9) Check for orphaned Path records\n" +" 10) Check for orphaned Filename records\n" +" 11) Check for orphaned FileSet records\n" +" 12) Check for orphaned Client records\n" +" 13) Check for orphaned Job records\n" +" 14) Check for all Admin records\n" +" 15) Check for all Restore records\n" +" 16) All (3-15)\n" +" 17) Quit\n" +msgstr "" + +#: src/tools/dbcheck.c:338 +msgid "Select function number: " +msgstr "" + +#: src/tools/dbcheck.c:345 +#, c-format +msgid "Database will be modified.\n" +msgstr "" + +#: src/tools/dbcheck.c:347 +#, c-format +msgid "Database will NOT be modified.\n" +msgstr "" + +#: src/tools/dbcheck.c:437 +#, c-format +msgid "JobId=%s Name=\"%s\" StartTime=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:445 +#, c-format +msgid "Orphaned JobMediaId=%s JobId=%s Volume=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:452 +#, c-format +msgid "Orphaned FileId=%s JobId=%s Volume=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:459 +#, c-format +msgid "Orphaned FileSetId=%s FileSet=\"%s\" MD5=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:466 +#, c-format +msgid "Orphaned ClientId=%s Name=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:520 +#, c-format +msgid "Deleting: %s\n" +msgstr "" + +#: src/tools/dbcheck.c:594 +#, c-format +msgid "Checking for duplicate Filename entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:603 +#, c-format +msgid "Found %d duplicate Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:604 +msgid "Print the list? (yes/no): " +msgstr "" + +#: src/tools/dbcheck.c:623 src/tools/dbcheck.c:681 +#, c-format +msgid "Found %d for: %s\n" +msgstr "" + +#: src/tools/dbcheck.c:651 +#, c-format +msgid "Checking for duplicate Path entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:661 +#, c-format +msgid "Found %d duplicate Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:662 src/tools/dbcheck.c:716 src/tools/dbcheck.c:760 +#: src/tools/dbcheck.c:803 src/tools/dbcheck.c:842 src/tools/dbcheck.c:880 +#: src/tools/dbcheck.c:921 src/tools/dbcheck.c:962 src/tools/dbcheck.c:1000 +#: src/tools/dbcheck.c:1033 src/tools/dbcheck.c:1070 src/tools/dbcheck.c:1134 +msgid "Print them? (yes/no): " +msgstr "" + +#: src/tools/dbcheck.c:709 +#, c-format +msgid "Checking for orphaned JobMedia entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:715 +#, c-format +msgid "Found %d orphaned JobMedia records.\n" +msgstr "" + +#: src/tools/dbcheck.c:733 +#, c-format +msgid "Deleting %d orphaned JobMedia records.\n" +msgstr "" + +#: src/tools/dbcheck.c:750 +#, c-format +msgid "Checking for orphaned File entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:759 +#, c-format +msgid "Found %d orphaned File records.\n" +msgstr "" + +#: src/tools/dbcheck.c:776 +#, c-format +msgid "Deleting %d orphaned File records.\n" +msgstr "" + +#: src/tools/dbcheck.c:793 +#, c-format +msgid "Checking for orphaned Path entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:802 +#, c-format +msgid "Found %d orphaned Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:815 +#, c-format +msgid "Deleting %d orphaned Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:832 +#, c-format +msgid "Checking for orphaned Filename entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:841 +#, c-format +msgid "Found %d orphaned Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:854 +#, c-format +msgid "Deleting %d orphaned Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:869 +#, c-format +msgid "Checking for orphaned FileSet entries. This takes some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:879 +#, c-format +msgid "Found %d orphaned FileSet records.\n" +msgstr "" + +#: src/tools/dbcheck.c:894 +#, c-format +msgid "Deleting %d orphaned FileSet records.\n" +msgstr "" + +#: src/tools/dbcheck.c:903 +#, c-format +msgid "Checking for orphaned Client entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:920 +#, c-format +msgid "Found %d orphaned Client records.\n" +msgstr "" + +#: src/tools/dbcheck.c:935 +#, c-format +msgid "Deleting %d orphaned Client records.\n" +msgstr "" + +#: src/tools/dbcheck.c:944 +#, c-format +msgid "Checking for orphaned Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:961 +#, c-format +msgid "Found %d orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:976 +#, c-format +msgid "Deleting %d orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:978 +#, c-format +msgid "Deleting JobMedia records of orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:980 +#, c-format +msgid "Deleting Log records of orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:990 +#, c-format +msgid "Checking for Admin Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:999 +#, c-format +msgid "Found %d Admin Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1014 +#, c-format +msgid "Deleting %d Admin Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1023 +#, c-format +msgid "Checking for Restore Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:1032 +#, c-format +msgid "Found %d Restore Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1047 +#, c-format +msgid "Deleting %d Restore Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1060 +#, c-format +msgid "Checking for Filenames with a trailing slash\n" +msgstr "" + +#: src/tools/dbcheck.c:1069 +#, c-format +msgid "Found %d bad Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1087 src/tools/dbcheck.c:1150 +#, c-format +msgid "Reparing %d bad Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1124 +#, c-format +msgid "Checking for Paths without a trailing slash\n" +msgstr "" + +#: src/tools/dbcheck.c:1133 +#, c-format +msgid "Found %d bad Path records.\n" +msgstr "" + +#: src/tools/drivetype.c:47 +#, c-format +msgid "" +"\n" +"Usage: drivetype [-v] path ...\n" +"\n" +" Print the drive type a given file/directory is on.\n" +" The following options are supported:\n" +"\n" +" -v print both path and file system type.\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tools/drivetype.c:101 src/tools/fstype.c:101 +#, c-format +msgid "%s: unknown\n" +msgstr "" + +#: src/tools/fstype.c:47 +#, c-format +msgid "" +"\n" +"Usage: fstype [-v] path ...\n" +"\n" +" Print the file system type a given file/directory is on.\n" +" The following options are supported:\n" +"\n" +" -v print both path and file system type.\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tools/testfind.c:66 +#, c-format +msgid "" +"\n" +"Usage: testfind [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -dnn set debug level to nn\n" +" -c specify config file containing FileSet resources\n" +" -f specify which FileSet to use\n" +" -? print this message.\n" +"\n" +"Patterns are used for file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors are always printed.\n" +"Files/paths truncated is the number of files/paths with len > 255.\n" +"Truncation is only in the catalog.\n" +"\n" +msgstr "" + +#: src/tools/testfind.c:225 +#, c-format +msgid "" +"\n" +"Total files : %d\n" +"Max file length: %d\n" +"Max path length: %d\n" +"Files truncated: %d\n" +"Paths truncated: %d\n" +"Hard links : %d\n" +msgstr "" + +#: src/tools/testfind.c:265 +#, c-format +msgid "Reg: %s\n" +msgstr "" + +#: src/tools/testfind.c:287 +msgid "\t[will not descend: recursion turned off]" +msgstr "" + +#: src/tools/testfind.c:289 +msgid "\t[will not descend: file system change not allowed]" +msgstr "" + +#: src/tools/testfind.c:291 +msgid "\t[will not descend: disallowed file system]" +msgstr "" + +#: src/tools/testfind.c:293 +msgid "\t[will not descend: disallowed drive type]" +msgstr "" + +#: src/tools/testfind.c:309 src/tools/testls.c:188 +#, c-format +msgid "Err: Could not access %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:312 src/tools/testls.c:191 +#, c-format +msgid "Err: Could not follow ff->link %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:315 src/tools/testls.c:194 +#, c-format +msgid "Err: Could not stat %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:318 src/tools/testls.c:197 +#, c-format +msgid "Skip: File not saved. No change. %s\n" +msgstr "" + +#: src/tools/testfind.c:321 src/tools/testls.c:200 +#, c-format +msgid "Err: Attempt to backup archive. Not saved. %s\n" +msgstr "" + +#: src/tools/testfind.c:324 src/tools/testls.c:209 +#, c-format +msgid "Err: Could not open directory %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:327 src/tools/testls.c:212 +#, c-format +msgid "Err: Unknown file ff->type %d: %s\n" +msgstr "" + +#: src/tools/testfind.c:377 +#, c-format +msgid "===== Filename truncated to 255 chars: %s\n" +msgstr "" + +#: src/tools/testfind.c:394 +#, c-format +msgid "========== Path name truncated to 255 chars: %s\n" +msgstr "" + +#: src/tools/testfind.c:403 +#, c-format +msgid "========== Path length is zero. File=%s\n" +msgstr "" + +#: src/tools/testfind.c:406 +#, c-format +msgid "Path: %s\n" +msgstr "" + +#: src/tools/testls.c:55 +#, c-format +msgid "" +"\n" +"Usage: testls [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -dnn set debug level to nn\n" +" -e specify file of exclude patterns\n" +" -i specify file of include patterns\n" +" - read pattern(s) from stdin\n" +" -? print this message.\n" +"\n" +"Patterns are file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors always printed.\n" +"Files/paths truncated is number with len > 255.\n" +"Truncation is only in catalog.\n" +"\n" +msgstr "" + +#: src/tools/testls.c:140 +#, c-format +msgid "Could not open include file: %s\n" +msgstr "" + +#: src/tools/testls.c:153 +#, c-format +msgid "Could not open exclude file: %s\n" +msgstr "" + +#: src/tools/testls.c:203 +#, c-format +msgid "Recursion turned off. Directory not entered. %s\n" +msgstr "" + +#: src/tools/testls.c:206 +#, c-format +msgid "Skip: File system change prohibited. Directory not entered. %s\n" +msgstr "" + +#: src/tray-monitor/authenticate.c:88 +msgid "" +"Director authorization problem.\n" +"Most likely the passwords do not agree.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/tray-monitor/authenticate.c:138 +msgid "" +"Director and Storage daemon passwords or names not the same.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/tray-monitor/authenticate.c:145 +#, c-format +msgid "bdird set configuration file to file\n" +" -dnn set debug level to nn\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:262 +#, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one and only one " +"Monitor resource.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:293 +#, c-format +msgid "" +"No Client, Storage or Director resource defined in %s\n" +"Without that I don't how to get status from the File, Storage or Director " +"Daemon :-(\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:315 +#, c-format +msgid "" +"Invalid refresh interval defined in %s\n" +"This value must be greater or equal to 1 second and less or equal to 10 " +"minutes (read value: %d).\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:330 +msgid "Open status window..." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:336 +msgid "Exit" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:348 +msgid "Bacula tray monitor" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:373 +msgid " (DIR)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:377 +msgid " (FD)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:381 +msgid " (SD)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:394 +msgid "Unknown status." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:464 +msgid "Refresh interval in seconds: " +msgstr "" + +#: src/tray-monitor/tray-monitor.c:472 +msgid "Refresh now" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:476 +msgid "About" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:480 +msgid "Close" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:500 +#, c-format +msgid "Disconnecting from Director %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:503 +#, c-format +msgid "Disconnecting from Client %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:506 +#, c-format +msgid "Disconnecting from Storage %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:543 src/tray-monitor/tray-monitor.c:554 +msgid "Bacula Tray Monitor" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:545 src/tray-monitor/tray-monitor.c:556 +msgid "Written by Nicolas Boichat\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:546 src/tray-monitor/tray-monitor.c:557 +msgid "Version" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:625 +#, c-format +msgid "Error, currentitem is not a Client or a Storage..\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:699 +#, c-format +msgid "" +"Current job: %s\n" +"Last job: %s" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:711 +#, c-format +msgid " (%d errors)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:714 +#, c-format +msgid " (%d error)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:752 +msgid "No current job." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:755 +msgid "No last job." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:763 +msgid "Job status: Created" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:768 +msgid "Job status: Running" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:772 +msgid "Job status: Blocked" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:777 +msgid "Job status: Terminated" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:782 +msgid "Job status: Terminated in error" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:788 +msgid "Job status: Error" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:792 +msgid "Job status: Fatal error" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:797 +msgid "Job status: Verify differences" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:802 +msgid "Job status: Canceled" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:807 +msgid "Job status: Waiting on File daemon" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:812 +msgid "Job status: Waiting on the Storage daemon" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:817 +msgid "Job status: Waiting for new media" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:822 +msgid "Job status: Waiting for Mount" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:827 +msgid "Job status: Waiting for storage resource" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:832 +msgid "Job status: Waiting for job resource" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:837 +msgid "Job status: Waiting for Client resource" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:842 +msgid "Job status: Waiting for maximum jobs" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:847 +msgid "Job status: Waiting for start time" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:852 +msgid "Job status: Waiting for higher priority jobs to finish" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:857 +#, c-format +msgid "Unknown job status %c." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:858 +#, c-format +msgid "Job status: Unknown(%c)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:865 +#, c-format +msgid "Bad scan : '%s' %d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:906 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:912 +#, c-format +msgid "Connecting to Client %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:913 +#, c-format +msgid "Connecting to Client %s:%d" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:919 +#, c-format +msgid "Connecting to Storage %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:920 +#, c-format +msgid "Connecting to Storage %s:%d" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:925 src/tray-monitor/tray-monitor.c:963 +#, c-format +msgid "Error, currentitem is not a Client, a Storage or a Director..\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:931 +msgid "Cannot connect to daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:932 +msgid "Cannot connect to daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:944 +#, c-format +msgid "Authentication error : %s" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:951 +msgid "Opened connection with Director daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:952 +msgid "Opened connection with Director daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:955 +msgid "Opened connection with File daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:956 +msgid "Opened connection with File daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:959 +msgid "Opened connection with Storage daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:960 +msgid "Opened connection with Storage daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:997 +msgid "<< Error: BNET_PROMPT signal received. >>\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1002 src/wx-console/console_thread.cpp:486 +msgid "<< Heartbeat signal received, answered. >>\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1006 +#, c-format +msgid "<< Unexpected signal received : %s >>\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1011 +msgid "\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1015 +msgid "Error : BNET_HARDEOF or BNET_ERROR" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1021 +msgid "\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1025 +msgid "Error : Connection closed." +msgstr "" + +#: src/tray-monitor/tray_conf.c:177 +#, c-format +msgid "Monitor: name=%s FDtimeout=%s SDtimeout=%s\n" +msgstr "" + +#: src/tray-monitor/tray_conf.c:183 +#, c-format +msgid "Director: name=%s address=%s FDport=%d\n" +msgstr "" + +#: src/tray-monitor/tray_conf.c:187 +#, c-format +msgid "Client: name=%s address=%s FDport=%d\n" +msgstr "" + +#: src/tray-monitor/tray_conf.c:191 +#, c-format +msgid "Storage: name=%s address=%s SDport=%d\n" +msgstr "" + +#: src/wx-console/authenticate.c:150 +msgid "Bad response to Hello command: ERR=" +msgstr "" + +#: src/baconfig.h:72 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "" + +#: src/baconfig.h:79 +msgid "*None*" +msgstr "" + +#: src/win32/dird/winmain.cpp:193 src/win32/dird/winmain.cpp:200 +#: src/win32/filed/winmain.cpp:224 src/win32/filed/winmain.cpp:231 +#: src/win32/stored/baculasd/winmain.cpp:225 +#: src/win32/stored/baculasd/winmain.cpp:232 +msgid "Bacula Usage" +msgstr "" + +#: src/win32/dird/winmain.cpp:197 src/win32/filed/winmain.cpp:228 +#: src/win32/stored/baculasd/winmain.cpp:229 +msgid "Bad Command Line Options" +msgstr "" + +#: src/win32/dird/winservice.cpp:119 src/win32/filed/winservice.cpp:238 +#: src/win32/stored/baculasd/winservice.cpp:169 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/win32/dird/winservice.cpp:135 src/win32/filed/winservice.cpp:256 +#: src/win32/stored/baculasd/winservice.cpp:185 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/win32/dird/winservice.cpp:136 src/win32/filed/winservice.cpp:257 +#: src/win32/stored/baculasd/winservice.cpp:186 +msgid "Contact Register Service Handler failure" +msgstr "" + +#: src/win32/dird/winservice.cpp:152 src/win32/filed/winservice.cpp:273 +#: src/win32/stored/baculasd/winservice.cpp:202 +msgid "ReportStatus STOPPED failed 1" +msgstr "" + +#: src/win32/dird/winservice.cpp:175 src/win32/filed/winservice.cpp:296 +#: src/win32/stored/baculasd/winservice.cpp:225 +msgid "Report Service failure" +msgstr "" + +#: src/win32/dird/winservice.cpp:211 +msgid "Unable to install Bacula Director service" +msgstr "" + +#: src/win32/dird/winservice.cpp:219 src/win32/filed/winservice.cpp:340 +#: src/win32/stored/baculasd/winservice.cpp:269 +msgid "Service command length too long" +msgstr "" + +#: src/win32/dird/winservice.cpp:220 src/win32/filed/winservice.cpp:341 +#: src/win32/stored/baculasd/winservice.cpp:270 +msgid "Service command length too long. Service not registered." +msgstr "" + +#: src/win32/dird/winservice.cpp:233 +msgid "" +"The Service Control Manager could not be contacted - the Bacula Director " +"service was not installed" +msgstr "" + +#: src/win32/dird/winservice.cpp:258 +msgid "The Bacula Director service could not be installed" +msgstr "" + +#: src/win32/dird/winservice.cpp:264 +msgid "Provides director services. Bacula -- the network backup solution." +msgstr "" + +#: src/win32/dird/winservice.cpp:272 +msgid "" +"The Bacula Director service was successfully installed.\n" +"The service may be started from the Control Panel and will\n" +"automatically be run the next time this machine is rebooted." +msgstr "" + +#: src/win32/dird/winservice.cpp:311 +msgid "The Bacula Director service could not be stopped" +msgstr "" + +#: src/win32/dird/winservice.cpp:318 +msgid "The Bacula Director service has been removed" +msgstr "" + +#: src/win32/dird/winservice.cpp:321 +msgid "The Bacula Director service could not be removed" +msgstr "" + +#: src/win32/dird/winservice.cpp:326 +msgid "The Bacula Director service could not be found" +msgstr "" + +#: src/win32/dird/winservice.cpp:331 +msgid "" +"The SCM could not be contacted - the Bacula Director service was not removed" +msgstr "" + +#: src/win32/dird/winservice.cpp:393 src/win32/filed/winservice.cpp:604 +#: src/win32/stored/baculasd/winservice.cpp:443 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/win32/dird/winservice.cpp:421 src/win32/filed/winservice.cpp:632 +#: src/win32/stored/baculasd/winservice.cpp:471 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" + +#: src/win32/dird/winservice.cpp:496 src/win32/filed/winservice.cpp:706 +#: src/win32/stored/baculasd/winservice.cpp:546 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "" + +#: src/win32/dird/winservice.cpp:500 src/win32/filed/winservice.cpp:710 +#: src/win32/stored/baculasd/winservice.cpp:550 +#, c-format +msgid "No longer locked\n" +msgstr "" + +#: src/win32/dird/winservice.cpp:504 src/win32/filed/winservice.cpp:714 +#: src/win32/stored/baculasd/winservice.cpp:554 +msgid "Could not lock database" +msgstr "" + +#: src/win32/filed/winmain.cpp:326 src/win32/stored/baculasd/winmain.cpp:326 +msgid "Another instance of Bacula is already running" +msgstr "" + +#: src/win32/filed/winservice.cpp:111 src/win32/filed/winservice.cpp:125 +msgid "No existing instance of Bacula File service could be contacted" +msgstr "" + +#: src/win32/filed/winservice.cpp:196 +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "" + +#: src/win32/filed/winservice.cpp:206 +msgid "Registry service not found: Bacula service not started" +msgstr "" + +#: src/win32/filed/winservice.cpp:208 +msgid "Registry service not found" +msgstr "" + +#: src/win32/filed/winservice.cpp:332 +msgid "Unable to install Bacula File service" +msgstr "" + +#: src/win32/filed/winservice.cpp:356 +msgid "Cannot write System Registry" +msgstr "" + +#: src/win32/filed/winservice.cpp:357 +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "" + +#: src/win32/filed/winservice.cpp:364 +msgid "Cannot add Bacula key to System Registry" +msgstr "" + +#: src/win32/filed/winservice.cpp:365 src/win32/filed/winservice.cpp:418 +msgid "The Bacula service could not be installed" +msgstr "" + +#: src/win32/filed/winservice.cpp:374 +msgid "" +"The Bacula File service was successfully installed.\n" +"The service may be started by double clicking on the\n" +"Bacula \"Start\" icon and will be automatically\n" +"be run the next time this machine is rebooted. " +msgstr "" + +#: src/win32/filed/winservice.cpp:393 +msgid "" +"The Service Control Manager could not be contacted - the Bacula service was " +"not installed" +msgstr "" + +#: src/win32/filed/winservice.cpp:424 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/win32/filed/winservice.cpp:432 +msgid "" +"The Bacula File service was successfully installed.\n" +"The service may be started from the Control Panel and will\n" +"automatically be run the next time this machine is rebooted." +msgstr "" + +#: src/win32/filed/winservice.cpp:442 +msgid "" +"Unknown Windows operating system.\n" +"Cannot install Bacula service.\n" +msgstr "" + +#: src/win32/filed/winservice.cpp:467 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:472 +msgid "" +"Could not delete Registry key.\n" +"The Bacula service could not be removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:482 +msgid "Bacula could not be contacted, probably not running" +msgstr "" + +#: src/win32/filed/winservice.cpp:489 +msgid "The Bacula service has been removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:520 +msgid "The Bacula file service could not be stopped" +msgstr "" + +#: src/win32/filed/winservice.cpp:527 +msgid "The Bacula file service has been removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:530 +msgid "The Bacula file service could not be removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:535 +msgid "The Bacula file service could not be found" +msgstr "" + +#: src/win32/filed/winservice.cpp:540 +msgid "" +"The SCM could not be contacted - the Bacula file service was not removed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:85 +#: src/win32/stored/baculasd/winservice.cpp:99 +msgid "No existing instance of Bacula storage service could be contacted" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:261 +msgid "Unable to install Bacula Storage service" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:283 +msgid "" +"The Service Control Manager could not be contacted - the Bacula Storage " +"service was not installed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:308 +msgid "The Bacula Storage service could not be installed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:314 +msgid "Provides storage services. Bacula -- the network backup solution." +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:322 +msgid "" +"The Bacula Storage service was successfully installed.\n" +"The service may be started from the Control Panel and will\n" +"automatically be run the next time this machine is rebooted." +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:361 +msgid "The Bacula Storage service could not be stopped" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:368 +msgid "The Bacula Storage service has been removed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:371 +msgid "The Bacula Storage service could not be removed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:376 +msgid "The Bacula Storage service could not be found" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:381 +msgid "" +"The SCM could not be contacted - the Bacula Storage service was not removed" +msgstr "" + +#: src/wx-console/console_thread.cpp:121 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in config file.\n" +"At least one CA certificate store is required.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:128 +msgid "" +"No Director resource defined in config file.\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:147 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in config file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:169 +msgid "Error while initializing windows sockets...\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:185 +msgid "Error while cleaning up windows sockets...\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:224 +msgid "Error while initializing library." +msgstr "" + +#: src/wx-console/console_thread.cpp:248 +msgid "Cryptographic library initialization failed.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:252 +msgid "Please correct configuration file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:294 +msgid "Error : Library not initialized\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:305 +msgid "Error : No configuration file loaded\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:315 +msgid "Connecting...\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:331 +msgid "Error : No director defined in config file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:343 +msgid "Multiple directors found in your config file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:352 +#, c-format +msgid "Please choose a director (1-%d): " +msgstr "" + +#: src/wx-console/console_thread.cpp:424 +msgid "Failed to connect to the director\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:434 +msgid "Connected\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:491 +msgid "<< Unexpected signal received : " +msgstr "" + +#: src/wx-console/console_thread.cpp:511 +msgid "Connection terminated\n" +msgstr "" + +#: src/wx-console/main.cpp:119 +msgid "Bacula bwx-console" +msgstr "" + +#: src/wx-console/main.cpp:124 src/wx-console/wxbmainframe.cpp:267 +#, c-format +msgid "Welcome to bacula bwx-console %s (%s)!\n" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:65 +msgid "Config file editor" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:75 +msgid "# Bacula bwx-console Configuration File\n" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:111 +msgid "Save and close" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:112 +msgid "Close without saving" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:139 +#, c-format +msgid "Unable to write to %s\n" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:140 +msgid "Error while saving" +msgstr "" + +#: src/wx-console/wxbconfigpanel.cpp:205 +msgid "Apply" +msgstr "" + +#: src/wx-console/wxbhistorytextctrl.cpp:82 +#: src/wx-console/wxbhistorytextctrl.cpp:153 +#: src/wx-console/wxbmainframe.cpp:291 +msgid "Type your command below:" +msgstr "" + +#: src/wx-console/wxbhistorytextctrl.cpp:117 +msgid "Unknown command." +msgstr "" + +#: src/wx-console/wxbhistorytextctrl.cpp:126 +msgid "Possible completions: " +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:246 +msgid "&About...\tF1" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:246 +msgid "Show about dialog" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:248 src/wx-console/wxbmainframe.cpp:620 +msgid "Connect to the director" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:249 +msgid "Disconnect" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:249 +msgid "Disconnect of the director" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:251 +msgid "Change of configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:251 +msgid "Change your default configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:252 +msgid "Edit your configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:254 +msgid "E&xit\tAlt-X" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:254 +msgid "Quit this program" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:258 +msgid "&File" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:259 +msgid "&Help" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:283 +msgid "" +"Warning : Unicode is disabled because you are using wxWidgets for GTK+ 1.2.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:287 +msgid "" +"Warning : There is a problem with wxWidgets for GTK+ 2.0 without Unicode " +"support when handling non-ASCII filenames: Every non-ASCII character in such " +"filenames will be replaced by an interrogation mark.\n" +"If this behaviour disturbs you, please build bwx-console against a Unicode " +"version of wxWidgets for GTK+ 2.0.\n" +"---\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:298 +msgid "Send" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:375 src/wx-console/wxbmainframe.cpp:387 +msgid "Error while parsing command line arguments, using defaults.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:376 src/wx-console/wxbmainframe.cpp:388 +msgid "Usage: bwx-console [-c configfile] [-w tmp]\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:417 +#, c-format +msgid "" +"It seems that it is the first time you run bwx-console.\n" +"This file (%s) has been choosen as default configuration file.\n" +"Do you want to edit it? (if you click No you will have to select another " +"file)" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:419 +msgid "First run" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:436 +#, c-format +msgid "" +"Unable to read %s\n" +"Error: %s\n" +"Do you want to choose another one? (Press no to edit this file)" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:438 +msgid "Unable to read configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:450 +msgid "Please choose a configuration file to use" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:463 +msgid "This configuration file has been successfully read, use it as default?" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:464 +msgid "Configuration file read successfully" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:474 +#, c-format +msgid "Using this configuration file: %s\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:479 +msgid "Connecting to the director..." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:494 +msgid "Failed to unregister a data parser !" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:502 +msgid "Quitting.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:517 +msgid "" +"Welcome to Bacula bwx-console.\n" +"Written by Nicolas Boichat \n" +"Copyright (C), 2005-2007 Free Software Foundation Europe, e.V.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:521 +msgid "About Bacula bwx-console" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:527 +msgid "Please choose your default configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:531 +msgid "Use this configuration file as default?" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:532 +msgid "Configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:603 +msgid "Console thread terminated." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:611 +msgid "Connection to the director lost. Quit program?" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:612 +msgid "Connection lost" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:628 +msgid "Connected to the director." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:651 +msgid "Reconnect" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:652 +msgid "Reconnect to the director" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:666 +msgid "Disconnected of the director." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:685 src/wx-console/wxbrestorepanel.cpp:710 +msgid "Unexpected question has been received.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:708 src/wx-console/wxbmainframe.cpp:725 +msgid "bwx-console: unexpected director's question." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:234 +#: src/wx-console/wxbrestorepanel.cpp:1920 +#: src/wx-console/wxbrestorepanel.cpp:1949 +msgid "Enter restore mode" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:237 +msgid "Cancel restore" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:263 +#: src/wx-console/wxbrestorepanel.cpp:317 +msgid "Add" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:265 +#: src/wx-console/wxbrestorepanel.cpp:319 +msgid "Remove" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:267 +#: src/wx-console/wxbrestorepanel.cpp:321 +msgid "Refresh" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:286 +msgid "M" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:290 +msgid "Filename" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:302 +msgid "Perm." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:335 +#: src/wx-console/wxbrestorepanel.cpp:348 +#: src/wx-console/wxbrestorepanel.cpp:503 +#: src/wx-console/wxbrestorepanel.cpp:513 +#: src/wx-console/wxbrestorepanel.cpp:516 +#: src/wx-console/wxbrestorepanel.cpp:1784 +#: src/wx-console/wxbrestorepanel.cpp:1870 +msgid "Job Name" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:337 +#: src/wx-console/wxbrestorepanel.cpp:353 +#: src/wx-console/wxbrestorepanel.cpp:455 +#: src/wx-console/wxbrestorepanel.cpp:456 +#: src/wx-console/wxbrestorepanel.cpp:466 +#: src/wx-console/wxbrestorepanel.cpp:467 +#: src/wx-console/wxbrestorepanel.cpp:1139 +#: src/wx-console/wxbrestorepanel.cpp:1212 +#: src/wx-console/wxbrestorepanel.cpp:1822 +#: src/wx-console/wxbrestorepanel.cpp:1824 +#: src/wx-console/wxbrestorepanel.cpp:1885 +msgid "Fileset" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:340 +#: src/wx-console/wxbrestorepanel.cpp:1206 +#: src/wx-console/wxbrestorepanel.cpp:1222 +#: src/wx-console/wxbrestorepanel.cpp:1224 +#: src/wx-console/wxbrestorepanel.cpp:1232 +#: src/wx-console/wxbrestorepanel.cpp:1234 +#: src/wx-console/wxbrestorepanel.cpp:1253 +#: src/wx-console/wxbrestorepanel.cpp:1260 +#: src/wx-console/wxbrestorepanel.cpp:1812 +#: src/wx-console/wxbrestorepanel.cpp:1823 +#: src/wx-console/wxbrestorepanel.cpp:1944 +msgid "Before" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:342 +msgid "Please configure parameters concerning files to restore :" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:351 +#: src/wx-console/wxbrestorepanel.cpp:1878 +msgid "always" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:351 +msgid "if newer" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:351 +msgid "if older" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:351 +#: src/wx-console/wxbrestorepanel.cpp:1881 +msgid "never" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:359 +msgid "Please configure parameters concerning files restoration :" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:427 +msgid "Getting parameters list." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:435 +msgid "Error : no clients returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:459 +msgid "Error : no filesets returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:483 +msgid "Error : no storage returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:506 +#: src/wx-console/wxbrestorepanel.cpp:530 +msgid "Error : no jobs returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:516 +msgid "RestoreFiles" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:551 +msgid "Please configure your restore parameters." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:562 +msgid "Building restore tree..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:605 +msgid "Error while starting restore: " +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:695 +msgid "" +"Right click on a file or on a directory, or double-click on its mark to add " +"it to the restore list." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:732 +#: src/wx-console/wxbrestorepanel.cpp:754 +msgid "bwx-console: unexpected restore question." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:765 +msgid " files selected to be restored." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:770 +msgid " file selected to be restored." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:777 +#, c-format +msgid "Please configure your restore (%ld files selected to be restored)..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:787 +msgid "Restore failed : no file selected.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:788 +msgid "Restore failed : no file selected." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:798 +msgid "Restoring, please wait..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:811 +msgid "Job queued. JobId=" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:813 +msgid "Restore queued, jobid=" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:817 +msgid "Job failed." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:818 +msgid "Restore failed, please look at messages.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:819 +msgid "Restore failed, please look at messages in console." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:825 +#: src/wx-console/wxbrestorepanel.cpp:826 +msgid "Failed to retrieve jobid.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:852 +msgid "" +"Restore is scheduled in more than two minutes, bwx-console will not wait for " +"its completion.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:853 +msgid "" +"Restore is scheduled in more than two minutes, bwx-console will not wait for " +"its completion." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:879 +msgid "Restore job created, but not yet running." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:884 +#, c-format +msgid "Restore job running, please wait (%ld of %ld files restored)..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:888 +msgid "Restore job terminated successfully." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:889 +msgid "Restore job terminated successfully.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:894 +msgid "Restore job terminated in error, see messages in console." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:895 +msgid "Restore job terminated in error, see messages.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:900 +msgid "Restore job reported a non-fatal error." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:904 +msgid "Restore job reported a fatal error." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:909 +msgid "Restore job cancelled by user." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:910 +msgid "Restore job cancelled by user.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:915 +msgid "Restore job is waiting on File daemon." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:919 +msgid "Restore job is waiting for new media." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:923 +msgid "Restore job is waiting for storage resource." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:927 +msgid "Restore job is waiting for job resource." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:931 +msgid "Restore job is waiting for Client resource." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:935 +msgid "Restore job is waiting for maximum jobs." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:939 +msgid "Restore job is waiting for start time." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:943 +msgid "Restore job is waiting for higher priority jobs to finish." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:992 +msgid "" +"The restore job has not been started within one minute, bwx-console will not " +"wait for its completion anymore.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:993 +msgid "" +"The restore job has not been started within one minute, bwx-console will not " +"wait for its completion anymore." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1003 +msgid "Restore done successfully.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1004 +msgid "Restore done successfully." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1075 +msgid "Applying restore configuration changes..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1126 +msgid "Failed to find the selected client." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1141 +msgid "Failed to find the selected fileset." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1156 +msgid "Failed to find the selected storage." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1173 +#: src/wx-console/wxbrestorepanel.cpp:1859 +msgid "Run Restore job" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1189 +msgid "Restore configuration changes were applied." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1198 +msgid "Restore cancelled.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1199 +msgid "Restore cancelled." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1221 +msgid "No results to list." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1223 +msgid "No backup found for this client." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1230 +msgid "ERROR" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1231 +msgid "Query failed" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1233 +msgid "Cannot get previous backups list, see console." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1869 +msgid "JobName:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1873 +msgid "Where:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1876 +msgid "Replace:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1879 +msgid "ifnewer" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1880 +msgid "ifolder" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1884 +msgid "FileSet:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:2001 +msgid "Restoring..." +msgstr "" diff --git a/po/fr.po b/po/fr.po new file mode 100644 index 00000000..29d289af --- /dev/null +++ b/po/fr.po @@ -0,0 +1,13630 @@ +# French translations for Bacula package +# Traduction anglaise du package Bacula. +# Copyright (C) 2000-2016 Kern Sibbald +# Nicolas Boichat , 2005. +# License: BSD 2-Clause; see file LICENSE-FOSS +# +msgid "" +msgstr "" +"Project-Id-Version: Bacula 1.38\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2018-08-11 21:43+0200\n" +"PO-Revision-Date: 2009-10-18 14:04+0000\n" +"Last-Translator: Eric Bollengier \n" +"Language-Team: French \n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: src/baconfig.h:62 src/baconfig.h:63 src/baconfig.h:68 src/baconfig.h:69 +#: src/baconfig.h:80 src/baconfig.h:81 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "" + +#: src/baconfig.h:89 +msgid "*None*" +msgstr "" + +#: src/lib/status.h:84 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" +"\n" +"Job terminés :\n" + +#: src/lib/status.h:91 +#, fuzzy +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr " JobId Type Fichiers Octets Statut Terminé Nom\n" + +#: src/lib/status.h:93 +msgid "===================================================================\n" +msgstr "===================================================================\n" + +#: src/lib/status.h:119 +msgid "Created" +msgstr "Crée" + +#: src/lib/status.h:123 +msgid "Error" +msgstr "Erreur" + +#: src/lib/status.h:126 +msgid "Diffs" +msgstr "" + +#: src/lib/status.h:129 +msgid "Cancel" +msgstr "Annulé" + +#: src/lib/status.h:132 +msgid "OK" +msgstr "OK" + +#: src/lib/status.h:135 +msgid "OK -- with warnings" +msgstr "OK -- avec des avertissements" + +#: src/lib/status.h:138 +msgid "Incomplete" +msgstr "" + +#: src/lib/status.h:141 +msgid "Other" +msgstr "Autre" + +#: src/lib/status.h:153 +#, fuzzy, c-format +msgid "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +msgstr "%6d %-6s %8s %10s %-7s %-8s %s\n" + +#: src/lib/status.h:182 +#, fuzzy, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "%6d %-6s %8s %10s %-7s %-8s %s\n" + +#: src/lib/status.h:214 src/lib/status.h:225 src/lib/status.h:239 +#: src/lib/status.h:243 src/lib/status.h:247 +msgid "Bacula " +msgstr "Bacula " + +#: src/qt-console/bat_conf.cpp:133 +#, c-format +msgid "No record for %d %s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:142 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:146 +#, fuzzy, c-format +msgid "Console: name=%s\n" +msgstr "Console connecté à %s\n" + +#: src/qt-console/bat_conf.cpp:149 +#, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:153 src/qt-console/bat_conf.cpp:235 +#: src/qt-console/bat_conf.cpp:282 src/qt-console/bat_conf.cpp:312 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:259 +#: src/qt-console/tray-monitor/tray_conf.cpp:311 +#, c-format +msgid "\"%s\" directive is required in \"%s\" resource, but not found.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:88 +#, fuzzy, c-format +msgid "Already connected\"%s\".\n" +msgstr "Console connecté à %s\n" + +#: src/qt-console/bcomm/dircomm.cpp:99 +#, fuzzy, c-format +msgid "Connecting to Director %s:%d" +msgstr "Connexion au Director %s:%d\n" + +#: src/qt-console/bcomm/dircomm.cpp:101 +#, fuzzy, c-format +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "Connexion au Director %s:%d\n" + +#: src/qt-console/bcomm/dircomm.cpp:153 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "Impossible d'initialiser le contexte TLS pour la Console \"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:176 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "Impossible d'initialiser le contexte TLS pour le Director \"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:198 +#: src/qt-console/tray-monitor/task.cpp:233 +#, fuzzy +msgid "Director daemon" +msgstr "Director" + +#: src/qt-console/bcomm/dircomm.cpp:236 +msgid "Initializing ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:252 src/qt-console/console/console.cpp:133 +#, fuzzy +msgid "Connected" +msgstr "Connexion...\n" + +#: src/qt-console/bcomm/dircomm.cpp:377 +#, fuzzy +msgid "Command completed ..." +msgstr "Commande annulée.\n" + +#: src/qt-console/bcomm/dircomm.cpp:384 src/qt-console/console/console.cpp:370 +msgid "Processing command ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:391 +msgid "At main prompt waiting for input ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:398 src/qt-console/bcomm/dircomm.cpp:408 +msgid "At prompt waiting for input ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:416 +#, fuzzy +msgid "Command failed." +msgstr "Commande annulée.\n" + +#: src/qt-console/bcomm/dircomm.cpp:488 +#, fuzzy +msgid "Director disconnected." +msgstr "Connexion du director le %s\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:110 +#, fuzzy, c-format +msgid "Director authorization problem at \"%s:%d\"\n" +msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:117 +#, c-format +msgid "" +"Authorization problem: Remote server at \"%s:%d\" did not advertise required " +"TLS support.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:125 +#, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\": Remote server requires " +"TLS.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:136 +#, fuzzy, c-format +msgid "TLS negotiation failed with Director at \"%s:%d\"\n" +msgstr "Négociation TLS échouée avec le SD \"%s:%d\".\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:148 +#, fuzzy, c-format +msgid "" +"Bad response to Hello command: ERR=%s\n" +"The Director at \"%s:%d\" is probably not running.\n" +msgstr "Mauvaise réponse à la commande Hello : ERR=%s\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:165 +#, fuzzy, c-format +msgid "Director at \"%s:%d\" rejected Hello command\n" +msgstr "Le File Daemon \"%s:%d\" a rejeté la commande Hello\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:182 +#, fuzzy, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\"\n" +"Most likely the passwords do not agree.\n" +"If you are using TLS, there may have been a certificate validation error " +"during the TLS handshake.\n" +"For help, please see " +msgstr "" +"Problème d'authentification avec le director.\n" +"Le plus souvent, les mots de pass ne correspondent pas.\n" +"Si vous utilisez TLS, il peut y avoir une erreur de validation du " +"certificat\n" +"pendant l'initialisation de la connexion TLS.\n" +"Vous trouverez de l'aide sur\n" +"http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors\n" + +#: src/qt-console/main.cpp:160 +msgid "Cryptography library initialization failed.\n" +msgstr "" + +#: src/qt-console/main.cpp:164 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "Merci de corriger le fichier de configuration : %s\n" + +#: src/qt-console/main.cpp:188 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" +") %s %s %s\n" +"\n" +"Usage : bconsole [-s] [-c config_file] [-d niveau_debug]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - lecture de la configuration et sortie\n" +"\n" + +#: src/qt-console/main.cpp:221 src/qt-console/main.cpp:251 +msgid "TLS required but not configured in Bacula.\n" +msgstr "" + +#: src/qt-console/main.cpp:229 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" + +#: src/qt-console/main.cpp:238 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" +"Pas de director défini pour %s\n" +"Sans cette définition, il n'est pas possible de se connecter à celui-ci.\n" + +#: src/qt-console/main.cpp:259 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:86 +#, fuzzy +msgid "" +"Authorization problem.\n" +"Most likely the passwords do not agree.\n" +"For help, please see " +msgstr "" +"Problème d'authentification avec le director.\n" +"Le plus souvent, les mots de pass ne correspondent pas.\n" +"Vous trouverez de l'aide sur\n" +"http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:94 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:110 +#, fuzzy +msgid "TLS negotiation failed\n" +msgstr "Négociation TLS échouée.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:117 +#, c-format +msgid "Bad response to Hello command: ERR=%s\n" +msgstr "Mauvaise réponse à la commande Hello : ERR=%s\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:134 +#, fuzzy +msgid "Daemon rejected Hello command\n" +msgstr "Le Client a rejeté la commande Hello\\n\n" + +#: src/qt-console/tray-monitor/conf.cpp:89 +msgid "The Name of the Monitor should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:129 +msgid "The name of the Resource should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:138 +#, c-format +msgid "The address of the Resource should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:147 +#, c-format +msgid "The Password of should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:171 +#, c-format +msgid "The TLS CA Certificate File should be a PEM file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:182 +#, c-format +msgid "The TLS CA Certificate Directory should be a directory for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:193 +#, c-format +msgid "The TLS Certificate File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:204 +#, c-format +msgid "The TLS Key File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:45 +msgid "This restricted console does not have access to Backup jobs" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:123 +#, fuzzy +msgid "Nothing selected" +msgstr "Pas de job sélectionné.\n" + +#: src/qt-console/tray-monitor/task.cpp:97 +msgid "Bandwidth can set only set on Client" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:102 +#, fuzzy +msgid "Bandwidth parameter is invalid" +msgstr "La création de la signature a échouée" + +#: src/qt-console/tray-monitor/task.cpp:177 +#, fuzzy +msgid "Client daemon" +msgstr "Director" + +#: src/qt-console/tray-monitor/task.cpp:205 +#, fuzzy +msgid "Storage daemon" +msgstr "En attente du Storage" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:45 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: tray-monitor [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -W 0/1 force the detection of the systray\n" +" -? print this message.\n" +"\n" +msgstr "" +") %s %s %s\n" +"\n" +"Usage : bconsole [-s] [-c config_file] [-d niveau_debug]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - lecture de la configuration et sortie\n" +"\n" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:118 +msgid "TLS PassPhrase" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:164 +#, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one Monitor " +"resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-ui.h:105 +#, fuzzy, c-format +msgid "Failed to initialize TLS context for \"%s\".\n" +msgstr "Impossible d'initialiser le contexte TLS pour la Console \"%s\".\n" + +#: src/qt-console/tray-monitor/tray-ui.h:320 +#, fuzzy +msgid "Select a Director" +msgstr "Director" + +#: src/qt-console/tray-monitor/tray_conf.cpp:172 +#, c-format +msgid "No %s resource defined\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:181 +#, fuzzy, c-format +msgid "Monitor: name=%s\n" +msgstr "Console connecté à %s\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:184 +#, fuzzy, c-format +msgid "Director: name=%s address=%s port=%d\n" +msgstr "Client \"%s\" adresse positionné à %s\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:188 +#, fuzzy, c-format +msgid "Client: name=%s address=%s port=%d\n" +msgstr "Client \"%s\" adresse positionné à %s\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:192 +#, fuzzy, c-format +msgid "Storage: name=%s address=%s port=%d\n" +msgstr "Client \"%s\" adresse positionné à %s\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:196 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:284 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:318 +#, fuzzy, c-format +msgid "Too many directives in \"%s\" resource\n" +msgstr "Trop d'éléments dans la ressource \"%s\"\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:338 +#: src/qt-console/tray-monitor/tray_conf.cpp:372 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "" + +#: src/win32/compat/compat.cpp:2879 +#, fuzzy +msgid "" +"\n" +"\n" +"Bacula ERROR: " +msgstr "Bacula " + +#: src/win32/filed/vss.cpp:244 src/win32/filed/vss.cpp:259 +#, c-format +msgid "pthread key create failed: ERR=%s\n" +msgstr "erreur sur pthread_key_create. ERR=%s\n" + +#: src/win32/filed/vss.cpp:267 +#, fuzzy, c-format +msgid "pthread_setspecific failed: ERR=%s\n" +msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#: src/win32/filed/vss_generic.cpp:725 +#, fuzzy, c-format +msgid "Unable to find volume %ls in the device list\n" +msgstr "Le nouveau volume \"%s\" a été labélisé sur le device %s.\n" + +#: src/win32/libwin32/main.cpp:227 +#, fuzzy +msgid "Bad Command Line Option" +msgstr "Erreur sur la commande : %s\n" + +#: src/win32/libwin32/service.cpp:98 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:99 +#, fuzzy +msgid "Failure contacting the Service Handler" +msgstr "Impossible de décrypter la clef de session" + +#: src/win32/libwin32/service.cpp:110 +msgid "Service start report failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:163 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/win32/libwin32/service.cpp:170 +#, fuzzy +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "Ressource %s introuvable\n" + +#: src/win32/libwin32/service.cpp:180 +#, fuzzy +msgid "Registry service not found: Bacula service not started" +msgstr "Ressource %s introuvable\n" + +#: src/win32/libwin32/service.cpp:182 +#, fuzzy +msgid "Registry service entry point not found" +msgstr "Ressource %s introuvable\n" + +#: src/win32/libwin32/service.cpp:204 +#, fuzzy +msgid "Report Service failure" +msgstr "Erreur de connexion" + +#: src/win32/libwin32/service.cpp:235 +#, fuzzy +msgid "Unable to install the service" +msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#: src/win32/libwin32/service.cpp:243 +#, fuzzy +msgid "Service command length too long" +msgstr "Nom trop long.\n" + +#: src/win32/libwin32/service.cpp:244 +#, fuzzy +msgid "Service command length too long. Service not registered." +msgstr "Nom trop long.\n" + +#: src/win32/libwin32/service.cpp:257 +#, fuzzy +msgid "" +"The Service Control Manager could not be contacted - the service was not " +"installed" +msgstr "Ressource %s introuvable\n" + +#: src/win32/libwin32/service.cpp:280 src/win32/libwin32/service.cpp:309 +#: src/win32/libwin32/service.cpp:355 src/win32/libwin32/service.cpp:362 +#: src/win32/libwin32/service.cpp:366 +#, fuzzy +msgid "The Bacula service: " +msgstr "Bacula Storage : Dernier Job annulé" + +#: src/win32/libwin32/service.cpp:287 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/win32/libwin32/service.cpp:298 +#, fuzzy +msgid "Cannot write System Registry for " +msgstr "Impossible de détruire la mémoire partagée : %s\n" + +#: src/win32/libwin32/service.cpp:299 +#, fuzzy +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "Ressource %s introuvable\n" + +#: src/win32/libwin32/service.cpp:308 +#, fuzzy +msgid "Cannot add Bacula key to System Registry" +msgstr "Impossible de détruire la mémoire partagée : %s\n" + +#: src/win32/libwin32/service.cpp:319 +msgid "The " +msgstr "" + +#: src/win32/libwin32/service.cpp:373 +#, fuzzy +msgid "An existing Bacula service: " +msgstr "Bacula Storage : Dernier Job annulé" + +#: src/win32/libwin32/service.cpp:381 +#, fuzzy +msgid "" +"The service Manager could not be contacted - the Bacula service was not " +"removed" +msgstr "Ressource %s introuvable\n" + +#: src/win32/libwin32/service.cpp:394 +#, fuzzy +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "Ressource %s introuvable\n" + +#: src/win32/libwin32/service.cpp:401 +#, fuzzy +msgid "Could not delete Registry key for " +msgstr "Impossible de détruire la mémoire partagée : %s\n" + +#: src/win32/libwin32/service.cpp:411 +msgid "Bacula could not be contacted, probably not running" +msgstr "" + +#: src/win32/libwin32/service.cpp:418 +#, fuzzy +msgid "The Bacula service has been removed" +msgstr "Ressource %s introuvable\n" + +#: src/win32/libwin32/service.cpp:459 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:485 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" + +#: src/win32/libwin32/service.cpp:561 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:565 +#, c-format +msgid "No longer locked\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:569 +#, fuzzy +msgid "Could not lock database" +msgstr "Impossible d'ouvrir la base de données \"%s\".\n" + +#, fuzzy +#~ msgid "Query failed: %s: ERR=%s\n" +#~ msgstr "Erreur sur la requête : %s\n" + +#~ msgid "A user name for MySQL must be supplied.\n" +#~ msgstr "Un nom d'utilisateur MySQL doit être fourni.\n" + +#~ msgid "Unable to initialize DB lock. ERR=%s\n" +#~ msgstr "Impossible d'initialiser le verrou sur la base. ERR=%s\n" + +#~ msgid "" +#~ "Unable to connect to MySQL server.\n" +#~ "Database=%s User=%s\n" +#~ "MySQL connect failed either server not running or your authorization is " +#~ "incorrect.\n" +#~ msgstr "" +#~ "Impossible de se connecter au serveur MySQL.\n" +#~ "Base=%s Utilisateur=%s\n" +#~ "Le serveur n'est pas démarré ou bien votre password est invalide.\n" + +#, fuzzy +#~ msgid "Attribute create error. %s" +#~ msgstr "Erreur sur l'autochangeur : ERR=%s\n" + +#~ msgid "A user name for PostgreSQL must be supplied.\n" +#~ msgstr "Un nom d'utilisateur PostgreSQL doit être fourni.\n" + +#, fuzzy +#~ msgid "error fetching row: %s\n" +#~ msgstr "erreur en terminant le mode batch: %s\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to PostgreSQL server. Database=%s User=%s\n" +#~ "Possible causes: SQL server not running; password incorrect; " +#~ "max_connections exceeded.\n" +#~ msgstr "" +#~ "Impossible de se connecter au serveur PostgreSQL.\n" +#~ "Base=%s Utilisateur=%s\n" +#~ "Le serveur n'est pas démarré ou bien votre password est invalide.\n" + +#, fuzzy +#~ msgid "Fetch failed: ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "error fetching currval: %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "error starting batch mode: %s" +#~ msgstr "erreur en terminant le mode batch: %s\n" + +#, fuzzy +#~ msgid "error ending batch mode: %s" +#~ msgstr "erreur en terminant le mode batch: %s\n" + +#, fuzzy +#~ msgid "error copying in batch mode: %s" +#~ msgstr "erreur en terminant le mode batch: %s\n" + +#, fuzzy +#~ msgid "" +#~ "query %s failed:\n" +#~ "%s\n" +#~ msgstr "Erreur sur la requête : %s\n" + +#, fuzzy +#~ msgid "" +#~ "insert %s failed:\n" +#~ "%s\n" +#~ msgstr "Erreur sur la requête : %s\n" + +#, fuzzy +#~ msgid "" +#~ "update %s failed:\n" +#~ "%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "delete %s failed:\n" +#~ "%s\n" +#~ msgstr "execv : %s en échec : ERR=%s\n" + +#~ msgid "No results to list.\n" +#~ msgstr "Liste vide.\n" + +#, fuzzy +#~ msgid "Could not init database batch connection\n" +#~ msgstr "Impossible d'initialiser le Python\n" + +#, fuzzy +#~ msgid "Could not open database \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir la base de données \"%s\".\n" + +#, fuzzy +#~ msgid "Create DB Job record %s failed. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Create JobMedia record %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Update Media record %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#~ msgid "pool record %s already exists\n" +#~ msgstr "Le pool %s existe déjà en base\n" + +#, fuzzy +#~ msgid "Create db Pool record %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#~ msgid "Device record %s already exists\n" +#~ msgstr "Le device %s existe déjà en base\n" + +#, fuzzy +#~ msgid "Create db Device record %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "More than one Storage record!: %d\n" +#~ msgstr "Connexion au Director %s:%d\n" + +#, fuzzy +#~ msgid "error fetching Storage row: %s\n" +#~ msgstr "Impossible de trouver la ressource Storage \"%s\"\n" + +#, fuzzy +#~ msgid "Create DB Storage record %s failed. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "mediatype record %s already exists\n" +#~ msgstr "Le device %s existe déjà en base\n" + +#, fuzzy +#~ msgid "Create db mediatype record %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#~ msgid "Volume \"%s\" already exists.\n" +#~ msgstr "Le volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "Create DB Media record %s failed. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "More than one Client!: %d\n" +#~ msgstr "Connexion au client %s (%s:%d)\n" + +#, fuzzy +#~ msgid "error fetching Client row: %s\n" +#~ msgstr "erreur en terminant le mode batch: %s\n" + +#, fuzzy +#~ msgid "Create DB Client record %s failed. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Create db Path record %s failed. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Create DB Counters record %s failed. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "More than one FileSet!: %d\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "error fetching FileSet row: ERR=%s\n" +#~ msgstr "Impossible de positionner le flag InChanger : ERR=%s" + +#, fuzzy +#~ msgid "Create DB FileSet record %s failed. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Create db File record %s failed. ERR=%s" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "More than one Filename! %s for file: %s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Error fetching row for file=%s: ERR=%s\n" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Create db Filename record %s failed. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Create db Object record %s failed. ERR=%s" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "No pool record %s exists\n" +#~ msgstr "Le pool %s existe déjà en base\n" + +#, fuzzy +#~ msgid "Expecting one pool record, got %d\n" +#~ msgstr "Attendait un mot clef, eu : %s\n" + +#, fuzzy +#~ msgid "Error fetching row %s\n" +#~ msgstr "Erreur pendant la récupération des informations sur un Volume : %s" + +#, fuzzy +#~ msgid "No prior backup Job record found.\n" +#~ msgstr "Pas de précédent backup Full en base.\n" + +#~ msgid "No prior Full backup Job record found.\n" +#~ msgstr "Pas de précédent backup Full en base.\n" + +#, fuzzy +#~ msgid "Unknown level=%d\n" +#~ msgstr "Niveau de job inconnu %d\n" + +#, fuzzy +#~ msgid "" +#~ "No Job record found: ERR=%s\n" +#~ "CMD=%s\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#~ msgid "Unknown Job level=%d\n" +#~ msgstr "Niveau de job inconnu %d\n" + +#~ msgid "No Job found for: %s.\n" +#~ msgstr "Pas de job trouvé pour : %s.\n" + +#~ msgid "No Job found for: %s\n" +#~ msgstr "Pas de job trouvé pour %s\n" + +#, fuzzy +#~ msgid "Request for Volume item %d greater than max %d or less than 1\n" +#~ msgstr "Le slot %d est ignoré car il est supérieur au maximum %d.\n" + +#~ msgid "No Volume record found for item %d.\n" +#~ msgstr "Pas de volume trouvé en base pour l'objet %d.\n" + +#, fuzzy +#~ msgid "Error fetching row: %s\n" +#~ msgstr "Erreur pendant la récupération des informations sur un Volume : %s" + +#, fuzzy +#~ msgid "File record for PathId=%s FilenameId=%s not found.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "File record not found in Catalog.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "More than one Filename!: %s for file: %s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Filename record: %s not found.\n" +#~ msgstr "Le FileSet \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "Filename record: %s not found in Catalog.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "Get DB path record %s found bad record: %s\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Path record: %s not found.\n" +#~ msgstr "%s ressource %s introuvable.\n" + +#, fuzzy +#~ msgid "Path record: %s not found in Catalog.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "No Job found for JobId %s\n" +#~ msgstr "Pas de job trouvé pour %s\n" + +#, fuzzy +#~ msgid "No volumes found for JobId=%d\n" +#~ msgstr "Aucun volume trouvé pour la restauration.\n" + +#, fuzzy +#~ msgid "Error fetching row %d: ERR=%s\n" +#~ msgstr "Erreur pendant lors de la récupération du pool. ERR=%s\n" + +#, fuzzy +#~ msgid "No Volume for JobId %d found in Catalog.\n" +#~ msgstr "Pas de volume trouvé en base pour l'objet %d.\n" + +#, fuzzy +#~ msgid "Pool id select failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Client id select failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Pool record not found in Catalog.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "RestoreObject record \"%d\" not found.\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "RestoreObject record not found in Catalog.\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "More than one Client!: %s\n" +#~ msgstr "est en attente du client %s" + +#, fuzzy +#~ msgid "Client record not found in Catalog.\n" +#~ msgstr "Pas de ressource \"Restore Job\" trouvée !\n" + +#, fuzzy +#~ msgid "error fetching Counter row: %s\n" +#~ msgstr "Erreur pendant la récupération des informations sur un Volume : %s" + +#, fuzzy +#~ msgid "Counter record: %s not found in Catalog.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "FileSet record \"%s\" not found.\n" +#~ msgstr "Le FileSet \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "FileSet record not found in Catalog.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "Media id select failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "query dbids failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "More than one Volume!: %s\n" +#~ msgstr "3001 Volume monté : %s\n" + +#, fuzzy +#~ msgid "Media record with MediaId=%s not found.\n" +#~ msgstr "%s ressource %s introuvable.\n" + +#, fuzzy +#~ msgid "Media record for Volume name \"%s\" not found.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "Media record for MediaId=%u not found in Catalog.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "Media record for Volume Name \"%s\" not found in Catalog.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "More than one Snapshot!: %s\n" +#~ msgstr "Erreur durant la création des snapshots VSS.\n" + +#, fuzzy +#~ msgid "Snapshot record with SnapshotId=%s not found.\n" +#~ msgstr "%s ressource %s introuvable.\n" + +#, fuzzy +#~ msgid "Snapshot record for Snapshot name \"%s\" not found.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#~ msgid "Query failed: %s\n" +#~ msgstr "Erreur sur la requête : %s\n" + +#, fuzzy +#~ msgid "Database %s does not exist, please create it.\n" +#~ msgstr "Impossible de créer le volume \"%s\" car il existe déjà.\n" + +#, fuzzy +#~ msgid "Unable to open Database=%s. ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#~ msgid "unknown" +#~ msgstr "inconnu" + +#~ msgid "Director rejected Hello command\n" +#~ msgstr "Le director a rejeté la commande Hello\n" + +#, fuzzy +#~ msgid "" +#~ "Director authorization problem.\n" +#~ "Most likely the passwords do not agree.\n" +#~ "If you are using TLS, there may have been a certificate validation error " +#~ "during the TLS handshake.\n" +#~ "For help, please see " +#~ msgstr "" +#~ "Problème d'authentification avec le director.\n" +#~ "Le plus souvent, les mots de pass ne correspondent pas.\n" +#~ "Si vous utilisez TLS, il peut y avoir une erreur de validation du " +#~ "certificat\n" +#~ "pendant l'initialisation de la connexion TLS.\n" +#~ "Vous trouverez de l'aide sur\n" +#~ "http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: " +#~ msgstr "" +#~ "\n" +#~ "Version : " + +#, fuzzy +#~ msgid "Please use valid -l argument: %s\n" +#~ msgstr "FileSet" + +#~ msgid "quit" +#~ msgstr "quit" + +#, fuzzy +#~ msgid "sleep specified time" +#~ msgstr "Le FileSet est déjà spécifié.\n" + +#~ msgid "print current time" +#~ msgstr "affiche la date courante" + +#, fuzzy +#~ msgid "print Console's version" +#~ msgstr "affiche la version du Director" + +#, fuzzy +#~ msgid "echo command string" +#~ msgstr "Erreur sur la commande : %s\n" + +#~ msgid "exit = quit" +#~ msgstr "exit = quit" + +#, fuzzy +#~ msgid "send a file to the director" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "set command separator" +#~ msgstr "Erreur sur la commande : %s\n" + +#~ msgid ": is an invalid command\n" +#~ msgstr "%s : est une commande invalide.\n" + +#, fuzzy +#~ msgid "Illegal separator character.\n" +#~ msgstr "Caractère illégal \"%c\" dans le nom.\n" + +#, fuzzy +#~ msgid "Command logic problem\n" +#~ msgstr "Commande annulée.\n" + +#, fuzzy +#~ msgid "Can't find %s in Director list\n" +#~ msgstr "Impossible de trouver la ressource Director \"%s\"\n" + +#, fuzzy +#~ msgid "Available Directors:\n" +#~ msgstr "Requêtes disponibles :\n" + +#, fuzzy +#~ msgid "%s is not a number. You must enter a number between 1 and %d\n" +#~ msgstr "Vous devez saisir un nombre entre 1 et %d\n" + +#~ msgid "You must enter a number between 1 and %d\n" +#~ msgstr "Vous devez saisir un nombre entre 1 et %d\n" + +#, fuzzy +#~ msgid "Can't find %s in Console list\n" +#~ msgstr "Impossible de trouver la ressource Director \"%s\"\n" + +#~ msgid "Connecting to Director %s:%d\n" +#~ msgstr "Connexion au Director %s:%d\n" + +#~ msgid "Enter a period to cancel a command.\n" +#~ msgstr "Tapez un point (.) pour annuler une commande.\n" + +#~ msgid "Too many arguments on input command.\n" +#~ msgstr "Trop d'arguments sur la commande.\n" + +#~ msgid "First argument to input command must be a filename.\n" +#~ msgstr "Le premier argument de la commande doit être un fichier.\n" + +#~ msgid "Cannot open file %s for input. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s pour lecture. ERR=%s\n" + +#, fuzzy +#~ msgid "Too many arguments on output/tee command.\n" +#~ msgstr "Trop d'arguments sur la commande.\n" + +#~ msgid "Cannot open file %s for output. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s pour la sortie. ERR=%s\n" + +#~ msgid "Too many arguments. Enclose command in double quotes.\n" +#~ msgstr "" +#~ "Trop d'arguments sur la commande. Essayez d'utiliser des \"\"\" autour " +#~ "des commandes\n" + +#, fuzzy +#~ msgid "Cannot popen(\"%s\", \"r\"): ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "@exec error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Console: name=%s rcfile=%s histfile=%s\n" +#~ msgstr "Client \"%s\" adresse positionné à %s\n" + +#, fuzzy +#~ msgid "Start Admin JobId %d, Job=%s\n" +#~ msgstr "Démarrage du backup JobId %s, Job=%s\n" + +#, fuzzy +#~ msgid "Error getting Job record for Job report: ERR=%s" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#~ msgid "Admin OK" +#~ msgstr "Admin OK" + +#~ msgid "*** Admin Error ***" +#~ msgstr "*** Admin en Erreur ***" + +#~ msgid "Admin Canceled" +#~ msgstr "Admin Annulé" + +#, fuzzy +#~ msgid "Inappropriate term code: %c\n" +#~ msgstr "Format de date invalide : %s\n" + +#, fuzzy +#~ msgid "Error sending Hello to Storage daemon. ERR=%s\n" +#~ msgstr "Erreur pendant lors de la récupération du pool. ERR=%s\n" + +#, fuzzy +#~ msgid "Director and Storage daemon passwords or names not the same.\n" +#~ msgstr "" +#~ "Le mot de passe ou le nom du Director et du Client ne sont pas " +#~ "identiques.\n" + +#, fuzzy +#~ msgid "" +#~ "Director unable to authenticate with Storage daemon at \"%s:%d\". " +#~ "Possible causes:\n" +#~ "Passwords or names not the same or\n" +#~ "Maximum Concurrent Jobs exceeded on the SD or\n" +#~ "SD networking messed up (restart daemon).\n" +#~ "For help, please see: " +#~ msgstr "" +#~ "Le Director a rencontré un problème d'authentification avec le Storage " +#~ "Daemon \"%s:%d\".\n" +#~ "Les causes possibles sont :\n" +#~ "- Les mots de pass ne correspondent pas ;\n" +#~ "- Le nombre maximum de job concurrent est atteint sur le SD ;\n" +#~ "- La connexion réseau du SD est tombée (il faut le redémarrer).\n" +#~ "Vous trouverez de l'aide sur\n" +#~ "http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with SD at \"%s:%d\"\n" +#~ msgstr "Négociation TLS échouée avec le SD \"%s:%d\".\n" + +#~ msgid "bdird] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -R do not apply JobDefs to Job\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read configuration and exit\n" +#~ " -s output in show text format\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -d positionne le niveau de debug à nn\n" +#~ " -dt affiche un timestamp devant chaque ligne de debug\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "No Director resource defined in %s\n" +#~ "Without that I don't know who I am :-(\n" +#~ msgstr "" +#~ "Pas de director défini pour %s\n" +#~ "Sans cette définition, il n'est pas possible de se connecter à celui-ci.\n" + +#, fuzzy +#~ msgid "No Messages resource defined in %s\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Only one Director resource permitted in %s\n" +#~ msgstr "Impossible de trouver la ressource Director \"%s\"\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for File daemon \"%s\" in %s.\n" +#~ msgstr "Impossible d'initialiser le contexte TLS pour le Director \"%s\".\n" + +#, fuzzy +#~ msgid "No Job records defined in %s\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Too many items in Job resource\n" +#~ msgstr "Trop d'élément dans la ressource %s\n" + +#~ msgid "No storage specified in Job \"%s\" nor in Pool.\n" +#~ msgstr "Pas de Storage spécifié dans le job \"%s\" ni dans le Pool.\n" + +#, fuzzy +#~ msgid "Unable to get Job record. ERR=%s\n" +#~ msgstr "Impossible de récupérer le Pool depuis le catalogue : ERR=%s" + +#, fuzzy +#~ msgid "Unable to get Job Volume Parameters. ERR=%s\n" +#~ msgstr "Impossible de récupérer le Pool depuis le catalogue : ERR=%s" + +#~ msgid "Unable to create bootstrap file %s. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "No files found to read. No bootstrap file written.\n" +#~ msgstr "" +#~ "Aucun fichier trouvé pour la restauration/migration. Pas de fichier " +#~ "Bootstrap écrit.\n" + +#~ msgid "Error writing bsr file.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "" +#~ "The Job will require the following (*=>InChanger):\n" +#~ " Volume(s) Storage(s) SD Device(s)\n" +#~ "===========================================================================\n" +#~ msgstr "" +#~ "Ce job va utiliser les éléments suivants :\n" +#~ " Volume(s) Storage(s) SD Device(s)\n" +#~ "===========================================================================\n" + +#~ msgid "No Volumes found to restore.\n" +#~ msgstr "Aucun volume trouvé pour la restauration.\n" + +#~ msgid "1990 Invalid Catalog Request: %s" +#~ msgstr "1990 Requête sur le Catalogue Invalide : %s" + +#~ msgid "Invalid Catalog request; DB not open: %s" +#~ msgstr "Requête sur le Catalogue invalide ; la base n'est pas ouverte : %s" + +#, fuzzy +#~ msgid "Pool \"%s\" not found for SD find media request.\n" +#~ msgstr "La ressource Pool \"%s\" est introuvable pour le volume \"%s\"\n" + +#~ msgid "1901 No Media.\n" +#~ msgstr "1901 Pas de Media.\n" + +#~ msgid "not in Pool" +#~ msgstr "non présent dans le Pool" + +#, fuzzy +#~ msgid "not correct MediaType" +#~ msgstr "Choisissez le type de Media" + +#~ msgid "is not Enabled" +#~ msgstr "n'est pas activé (Enabled)" + +#, fuzzy +#~ msgid "1998 Volume \"%s\" catalog status is %s, %s.\n" +#~ msgstr "1998 Le statut du Volume \"%s\" est %s, %s.\n" + +#~ msgid "1997 Volume \"%s\" not in catalog.\n" +#~ msgstr "1997 le Volume \"%s\" n'est pas dans le catalogue.\n" + +#~ msgid "Unable to get Media record for Volume %s: ERR=%s\n" +#~ msgstr "" +#~ "Impossible de récupérer les informations du Media pour le Volume %s : ERR=" +#~ "%s\n" + +#, fuzzy +#~ msgid "1991 Catalog Request for vol=%s failed: %s" +#~ msgstr "1990 Requête sur le Catalogue Invalide : %s" + +#, fuzzy +#~ msgid "Catalog error updating Media record. %s" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#~ msgid "1993 Update Media error\n" +#~ msgstr "1993 Erreur sur la mise à jour du Media\n" + +#, fuzzy +#~ msgid "Catalog error creating JobMedia record. %s" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "1992 Create JobMedia error\n" +#~ msgstr "1991 Erreur sur la mise à jour du JobMedia\n" + +#, fuzzy +#~ msgid "Invalid Catalog request: %s" +#~ msgstr "1990 Requête sur le Catalogue Invalide : %s" + +#, fuzzy +#~ msgid "Attribute create error: ERR=%s" +#~ msgstr "Erreur sur l'autochangeur : ERR=%s\n" + +#, fuzzy +#~ msgid "Restore object create error. %s" +#~ msgstr "Restauration annulée" + +#, fuzzy +#~ msgid "attribute create error. ERR=%s" +#~ msgstr "Erreur sur l'autochangeur : ERR=%s\n" + +#, fuzzy +#~ msgid "Catalog error updating file digest. %s" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "1994 Invalid Catalog Update: %s" +#~ msgstr "1990 Requête sur le Catalogue Invalide : %s" + +#, fuzzy +#~ msgid "Invalid Catalog Update; DB not open: %s" +#~ msgstr "Requête sur le Catalogue invalide ; la base n'est pas ouverte : %s" + +#, fuzzy +#~ msgid "" +#~ "fread attr spool error. Wanted %ld bytes, maximum permitted 10000000 " +#~ "bytes\n" +#~ msgstr "Erreur pendant l'écriture des attributs dans le spool. ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool error. Wanted %ld bytes but got %lld ERR=%s\n" +#~ msgstr "Erreur pendant l'écriture des attributs dans le spool. ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool error. ERR=%s\n" +#~ msgstr "Erreur pendant l'écriture des attributs dans le spool. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-dir [-f -s] [-c config_file] [-d debug_level] " +#~ "[config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -r run now\n" +#~ " -s no signals\n" +#~ " -t test - read configuration and exit\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -d positionne le niveau de debug à nn\n" +#~ " -dt affiche un timestamp devant chaque ligne de debug\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#~ msgid "Resetting previous configuration.\n" +#~ msgstr "Retour à la dernière configuration.\n" + +#, fuzzy +#~ msgid "Storage=%s not found. Assuming it was removed!!!\n" +#~ msgstr "" +#~ "Le Storage \"%s\" est introuvable, utilisation du Storage \"%s\" du " +#~ "MediaType \"%s\".\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for Director \"%s\" in %s.\n" +#~ msgstr "Impossible d'initialiser le contexte TLS pour le Director \"%s\".\n" + +#, fuzzy +#~ msgid "PoolType required in Pool resource \"%s\".\n" +#~ msgstr "Sélectionnez le Pool" + +#, fuzzy +#~ msgid "Invalid PoolType \"%s\" in Pool resource \"%s\".\n" +#~ msgstr "Impossible de trouver la ressource Pool \"%s\"\n" + +#~ msgid "Could not open Catalog \"%s\", database \"%s\".\n" +#~ msgstr "" +#~ "Impossible d'ouvrir le catalogue \"%s\", sur la base de données \"%s\".\n" + +#~ msgid "%s" +#~ msgstr "%s" + +#, fuzzy +#~ msgid "Could not create storage record for %s\n" +#~ msgstr "Impossible de trouver la ressource Storage \"%s\"\n" + +#, fuzzy +#~ msgid "Could not update storage record for %s\n" +#~ msgstr "Impossible de trouver la ressource Storage \"%s\"\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for Storage \"%s\" in %s.\n" +#~ msgstr "Impossible d'initialiser le contexte TLS pour le Director \"%s\".\n" + +#, fuzzy +#~ msgid "Could not compile regex pattern \"%s\" ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n" +#~ msgstr "Client \"%s\" adresse positionné à %s\n" + +#, fuzzy +#~ msgid " query_file=%s\n" +#~ msgstr "Erreur sur la requête : %s\n" + +#, fuzzy +#~ msgid " --> " +#~ msgstr " --> Target=%s\n" + +#, fuzzy +#~ msgid "Console: name=%s SSL=%d\n" +#~ msgstr "Console connecté à %s\n" + +#, fuzzy +#~ msgid "Counter: name=%s min=%d max=%d\n" +#~ msgstr "Client \"%s\" adresse positionné à %s\n" + +#, fuzzy +#~ msgid "" +#~ "Client: Name=%s Enabled=%d Address=%s FDport=%d MaxJobs=%u NumJobs=%u\n" +#~ msgstr "Client \"%s\" adresse positionné à %s\n" + +#~ msgid "Job" +#~ msgstr "Job" + +#~ msgid "JobDefs" +#~ msgstr "JobDefs" + +#, fuzzy +#~ msgid " SpoolSize=%s\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#, fuzzy +#~ msgid " Accurate=%d\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#, fuzzy +#~ msgid " SelectionType=%d\n" +#~ msgstr "Job du Type=%d inconnu\n" + +#, fuzzy +#~ msgid " PrefixLinks=%d\n" +#~ msgstr "Priorité" + +#, fuzzy +#~ msgid " --> Where=%s\n" +#~ msgstr " --> RegexWhere=%s\n" + +#~ msgid " --> RegexWhere=%s\n" +#~ msgstr " --> RegexWhere=%s\n" + +#, fuzzy +#~ msgid " --> Bootstrap=%s\n" +#~ msgstr " --> Target=%s\n" + +#, fuzzy +#~ msgid " --> WriteBootstrap=%s\n" +#~ msgstr " --> Target=%s\n" + +#, fuzzy +#~ msgid " --> PluginOptions=%s\n" +#~ msgstr " --> RunOnSuccess=%u\n" + +#~ msgid " --> MaxRunTime=%u\n" +#~ msgstr " --> MaxRunTime=%u\n" + +#~ msgid " --> MaxWaitTime=%u\n" +#~ msgstr " --> MaxWaitTime=%u\n" + +#~ msgid " --> MaxStartDelay=%u\n" +#~ msgstr " --> MaxStartDelay=%u\n" + +#, fuzzy +#~ msgid " --> MaxRunSchedTime=%u\n" +#~ msgstr " --> MaxRunTime=%u\n" + +#, fuzzy +#~ msgid " --> Base %s\n" +#~ msgstr " --> Target=%s\n" + +#~ msgid " --> RunScript\n" +#~ msgstr " --> RunScript\n" + +#~ msgid " --> Command=%s\n" +#~ msgstr " --> Command=%s\n" + +#~ msgid " --> Target=%s\n" +#~ msgstr " --> Target=%s\n" + +#~ msgid " --> RunOnSuccess=%u\n" +#~ msgstr " --> RunOnSuccess=%u\n" + +#~ msgid " --> RunOnFailure=%u\n" +#~ msgstr " --> RunOnFailure=%u\n" + +#, fuzzy +#~ msgid " --> FailJobOnError=%u\n" +#~ msgstr " --> RunOnFailure=%u\n" + +#~ msgid " --> RunWhen=%u\n" +#~ msgstr " --> RunWhen=%u\n" + +#, fuzzy +#~ msgid " --> VFullBackup" +#~ msgstr "Incrémental" + +#, fuzzy +#~ msgid " --> FullBackup" +#~ msgstr "Incrémental" + +#, fuzzy +#~ msgid " --> IncrementalBackup" +#~ msgstr "Incrémental" + +#, fuzzy +#~ msgid " --> DifferentialBackup" +#~ msgstr "Différentiel" + +#, fuzzy +#~ msgid " --> Next" +#~ msgstr " --> Target=%s\n" + +#, fuzzy +#~ msgid " --> Run=%s\n" +#~ msgstr " --> RunWhen=%u\n" + +#, fuzzy +#~ msgid " --> SelectionPattern=%s\n" +#~ msgstr " --> RunOnSuccess=%u\n" + +#, fuzzy +#~ msgid " --> Run Level=%s\n" +#~ msgstr " --> RunWhen=%u\n" + +#, fuzzy +#~ msgid " MaxRunSchedTime=%u\n" +#~ msgstr " --> MaxRunTime=%u\n" + +#, fuzzy +#~ msgid " Priority=%u\n" +#~ msgstr "Priorité" + +#, fuzzy +#~ msgid " hour=" +#~ msgstr "Priorité" + +#, fuzzy +#~ msgid " month=" +#~ msgstr "Priorité" + +#, fuzzy +#~ msgid " woy=" +#~ msgstr "Priorité" + +#, fuzzy +#~ msgid " mins=%d\n" +#~ msgstr "Priorité" + +#, fuzzy +#~ msgid "Schedule: name=%s\n" +#~ msgstr "Console connecté à %s\n" + +#, fuzzy +#~ msgid "Pool: name=%s PoolType=%s\n" +#~ msgstr "Console connecté à %s\n" + +#, fuzzy +#~ msgid " CacheRetention=%s\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#, fuzzy +#~ msgid " NextPool=%s\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#, fuzzy +#~ msgid " RecyclePool=%s\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#, fuzzy +#~ msgid " ScratchPool=%s\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#, fuzzy +#~ msgid " Catalog=%s\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#, fuzzy +#~ msgid "Messages: name=%s\n" +#~ msgstr "Console connecté à %s\n" + +#, fuzzy +#~ msgid " mailcmd=%s\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#, fuzzy +#~ msgid " opcmd=%s\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#~ msgid "Cannot find Pool resource %s\n" +#~ msgstr "Impossible de trouver la ressource Pool \"%s\"\n" + +#~ msgid "Cannot find Console resource %s\n" +#~ msgstr "Impossible de trouver la ressource Console \"%s\"\n" + +#~ msgid "Cannot find Director resource %s\n" +#~ msgstr "Impossible de trouver la ressource Director \"%s\"\n" + +#~ msgid "Cannot find Storage resource %s\n" +#~ msgstr "Impossible de trouver la ressource Storage \"%s\"\n" + +#~ msgid "Cannot find Job resource %s\n" +#~ msgstr "Impossible de trouver la ressource Job \"%s\"\n" + +#~ msgid "Cannot find Counter resource %s\n" +#~ msgstr "Impossible de trouver la ressource Counter \"%s\"\n" + +#~ msgid "Cannot find Client resource %s\n" +#~ msgstr "Impossible de trouver la ressource Client \"%s\"\n" + +#~ msgid "Cannot find Schedule resource %s\n" +#~ msgstr "Impossible de trouver la ressource Schedule \"%s\"\n" + +#, fuzzy +#~ msgid "Expected one of: %s, got: %s" +#~ msgstr "Attendait %s, a pas : %s" + +#, fuzzy +#~ msgid "Could not find Storage Resource %s referenced on line %d : %s\n" +#~ msgstr "Impossible de trouver la ressource \"%s\" utilisée ligne %d : %s\n" + +#, fuzzy +#~ msgid "" +#~ "Attempt to redefine Storage resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "Impossible de trouver la ressource \"%s\" utilisée ligne %d : %s\n" + +#~ msgid "Expected a Migration Job Type keyword, got: %s" +#~ msgstr "Attendait un type de Job de Migration, eu : %s" + +#, fuzzy +#~ msgid "Expected a Job Type keyword, got: %s" +#~ msgstr "Attendait un niveau de sauvegarde, eu : %s" + +#~ msgid "Expected a Job Level keyword, got: %s" +#~ msgstr "Attendait un niveau de sauvegarde, eu : %s" + +#~ msgid "Expected a Restore replacement option, got: %s" +#~ msgstr "Attendait un niveau de remplacement, eu : %s" + +#~ msgid "Expect %s, got: %s" +#~ msgstr "Attendait %s, a pas : %s" + +#, fuzzy +#~ msgid "Could not find config Resource %s referenced on line %d : %s\n" +#~ msgstr "Impossible de trouver la ressource \"%s\" utilisée ligne %d : %s\n" + +#~ msgid "Expecting open brace. Got %s" +#~ msgstr "Attendait {, eu : %s" + +#~ msgid "Expecting keyword, got: %s\n" +#~ msgstr "Attendait un mot clef, eu : %s\n" + +#~ msgid "expected an equals, got: %s" +#~ msgstr "attendait un égale, eu : %s" + +#, fuzzy +#~ msgid "Keyword %s not permitted in this resource" +#~ msgstr "Impossible de trouver un Catalogue\n" + +#, fuzzy +#~ msgid "Count not update counter %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create var context: ERR=%s\n" +#~ msgstr "change le répertoire courant" + +#, fuzzy +#~ msgid "Cannot set var callback: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot set var operate: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot unescape string: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot expand expression \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot destroy var context: ERR=%s\n" +#~ msgstr "Impossible de détruire la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Client: " +#~ msgstr "Client" + +#, fuzzy +#~ msgid "File daemon \"%s\" rejected Job command: %s\n" +#~ msgstr "Le File Daemon \"%s:%d\" a rejeté la commande Hello\n" + +#, fuzzy +#~ msgid "Error updating Client record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "FD gave bad response to JobId command: %s\n" +#~ msgstr "Mauvaise réponse à la commande Hello : ERR=%s\n" + +#~ msgid ", since=" +#~ msgstr ", depuis=" + +#~ msgid "" +#~ "No prior or suitable Full backup found in catalog. Doing FULL backup.\n" +#~ msgstr "" +#~ "Pas de précédent backup Full utilisable. Lancement d'un backup FULL.\n" + +#~ msgid " (upgraded from %s)" +#~ msgstr " (à la place de %s)" + +#, fuzzy +#~ msgid "" +#~ "No prior or suitable Full backup found in catalog. Doing Virtual FULL " +#~ "backup.\n" +#~ msgstr "" +#~ "Pas de précédent backup Full utilisable. Lancement d'un backup FULL.\n" + +#, fuzzy +#~ msgid "" +#~ "No prior or suitable Differential backup found in catalog. Doing " +#~ "Differential backup.\n" +#~ msgstr "" +#~ "Pas de précédent backup Full utilisable. Lancement d'un backup FULL.\n" + +#~ msgid "Prior failed job found in catalog. Upgrading to %s.\n" +#~ msgstr "Le job précédent était en erreur. Passage au type %s.\n" + +#~ msgid "Cannot run program: %s. ERR=%s\n" +#~ msgstr "Impossible de lancer la commande : %s. ERR=%s\n" + +#, fuzzy +#~ msgid ">filed: write error on socket\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#~ msgid "Error running program: %s. ERR=%s\n" +#~ msgstr "Erreur dans l'exécution de la commande : %s. ERR=%s\n" + +#~ msgid "Cannot open included file: %s. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier inclus : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Client \"%s\" RunScript failed.\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "RestoreObject failed.\n" +#~ msgstr "restauration de fichier" + +#, fuzzy +#~ msgid "ComponentInfo failed.\n" +#~ msgstr "Commande annulée.\n" + +#, fuzzy +#~ msgid " -- estime un FileSet (listing donne la liste des " +#~ "fichiers)" + +#, fuzzy +#~ msgid "Non-interactive gui mode" +#~ msgstr "gui [on|off] -- mode non interactif (pour interface graphique)" + +#, fuzzy +#~ msgid "Print help on specific command" +#~ msgstr "affiche cette commande" + +#, fuzzy +#~ msgid "Label a tape" +#~ msgstr "labéliser une bande" + +#, fuzzy +#~ msgid "List objects from catalog" +#~ msgstr "purge les enregistrements du catalogue" + +#, fuzzy +#~ msgid "Display pending messages" +#~ msgstr "Message de Bacula" + +#, fuzzy +#~ msgid "Print current memory usage" +#~ msgstr "affiche la consommation mémoire courante" + +#, fuzzy +#~ msgid "Mount storage" +#~ msgstr "unmount -- démonte un lecteur" + +#, fuzzy +#~ msgid "Prune expired records from catalog" +#~ msgstr "purge les entrées expirées du catalogue" + +#, fuzzy +#~ msgid "Purge records from catalog" +#~ msgstr "purge les enregistrements du catalogue" + +#, fuzzy +#~ msgid "Query catalog" +#~ msgstr "interroger le catalogue" + +#, fuzzy +#~ msgid "Restore files" +#~ msgstr "restauration de fichier" + +#, fuzzy +#~ msgid "Relabel a tape" +#~ msgstr "re-labélise une bande" + +#, fuzzy +#~ msgid "Release storage" +#~ msgstr "Restaurer" + +#, fuzzy +#~ msgid "Reload conf file" +#~ msgstr "recharge la configuration" + +#, fuzzy +#~ msgid "Run a job" +#~ msgstr "Job en cours :\n" + +#, fuzzy +#~ msgid "Restart a job" +#~ msgstr "Sélectionnez le Job de restauration" + +#, fuzzy +#~ msgid "Resume a job" +#~ msgstr "Job en cours :\n" + +#, fuzzy +#~ msgid "Report status" +#~ msgstr "Statut :\n" + +#, fuzzy +#~ msgid "Stop a job" +#~ msgstr "Job en cours :\n" + +#, fuzzy +#~ msgid "Sets debug level" +#~ msgstr "positionne le niveau de debug" + +#, fuzzy +#~ msgid "Show resource records" +#~ msgstr "Pool à partir de sa définition" + +#, fuzzy +#~ msgid "Use SQL to query catalog" +#~ msgstr "passer des commandes SQL pour interroger le catalogue" + +#, fuzzy +#~ msgid "Print current time" +#~ msgstr "affiche la date courante" + +#, fuzzy +#~ msgid "Turn on/off trace to file" +#~ msgstr "active/désactive le fichier de trace" + +#, fuzzy +#~ msgid "Unmount storage" +#~ msgstr "unmount -- démonte un lecteur" + +#, fuzzy +#~ msgid "Umount - for old-time Unix guys, see unmount" +#~ msgstr "umount -- démonte un lecteur" + +#, fuzzy +#~ msgid "Update volume, pool or stats" +#~ msgstr "met à jour un volume, un pool ou bien des slots" + +#, fuzzy +#~ msgid "Use catalog xxx" +#~ msgstr "interroger le catalogue" + +#, fuzzy +#~ msgid "Print Director version" +#~ msgstr "affiche la version du Director" + +#, fuzzy +#~ msgid "Wait until no jobs are running" +#~ msgstr "Aucun de vos jobs ne sont en cours.\n" + +#~ msgid "%s: is an invalid command.\n" +#~ msgstr "%s : est une commande invalide.\n" + +#~ msgid "Pool already has maximum volumes=%d\n" +#~ msgstr "Le pool a déjà atteint le nombre maximum de volume=%d\n" + +#~ msgid "Enter new maximum (zero for unlimited): " +#~ msgstr "Entrez le nouveau maximum (zéro pour illimité) : " + +#~ msgid "Enter number of Volumes to create. 0=>fixed name. Max=%d: " +#~ msgstr "Entrez le nombre de Volume à créer. 0=>nom fixé. Max=%d : " + +#~ msgid "The number must be between 0 and %d\n" +#~ msgstr "Le nombre doit être entre 0 et %d\n" + +#~ msgid "Enter Volume name: " +#~ msgstr "Entrez le nom du Volume : " + +#~ msgid "Enter base volume name: " +#~ msgstr "Entrez le nom de base du volume : " + +#~ msgid "Volume name too long.\n" +#~ msgstr "Nom de Volume trop long.\n" + +#~ msgid "Volume name must be at least one character long.\n" +#~ msgstr "Le nom du volume doit comporter au moins un caractère\n" + +#~ msgid "Enter the starting number: " +#~ msgstr "Entrez le nombre de départ : " + +#~ msgid "Start number must be greater than zero.\n" +#~ msgstr "Le nombre de départ doit être supérieur à zéro.\n" + +#~ msgid "Enter slot (0 for none): " +#~ msgstr "Saisissez le slot (0 pour aucun) : " + +#~ msgid "InChanger? yes/no: " +#~ msgstr "InChanger ? oui/non : " + +#~ msgid "%d Volumes created in pool %s\n" +#~ msgstr "%d Volumes créés dans le pool %s\n" + +#~ msgid "Turn on or off? " +#~ msgstr "Activer ou désactiver ? (on/off) " + +#~ msgid "" +#~ "Error: Pool %s already exists.\n" +#~ "Use update to change it.\n" +#~ msgstr "" +#~ "Erreur : Pool %s est déjà défini.\n" +#~ "Utilisez update pour le changer\n" + +#~ msgid "Pool %s created.\n" +#~ msgstr "Pool %s créé.\n" + +#, fuzzy +#~ msgid "Failed to set bandwidth limit to Client.\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Set Bandwidth choice:\n" +#~ msgstr "Elément à mettre à jour :\n" + +#, fuzzy +#~ msgid "Running Job" +#~ msgstr "" +#~ "\n" +#~ "Job en cours :\n" + +#, fuzzy +#~ msgid "Invalid value for limit parameter. Expecting speed.\n" +#~ msgstr "Saisie invalide. Veuillez répondre oui ou non.\n" + +#, fuzzy +#~ msgid "Enter new bandwidth limit: " +#~ msgstr "Saisissez la valeur du nombre maximum de Job : " + +#~ msgid "Unauthorized command from this console.\n" +#~ msgstr "Commande interdite depuis cette console.\n" + +#~ msgid "Client \"%s\" not found.\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#~ msgid "Client \"%s\" address set to %s\n" +#~ msgstr "Client \"%s\" adresse positionné à %s\n" + +#, fuzzy +#~ msgid "Job \"%s\" %sabled\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "Client \"%s\" %sabled\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "Schedule \"%s\" %sabled\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#~ msgid "Connecting to Storage daemon %s at %s:%d\n" +#~ msgstr "Connexion au Storage Daemon %s (%s:%d)\n" + +#~ msgid "Connected to storage daemon\n" +#~ msgstr "Connecté au Storage Daemon\n" + +#~ msgid "Enter new debug level: " +#~ msgstr "Saisissez le nouveau niveau de debug : " + +#~ msgid "Available daemons are: \n" +#~ msgstr "Les démons disponibles sont :\n" + +#~ msgid "Director" +#~ msgstr "Director" + +#~ msgid "Storage" +#~ msgstr "Stockage" + +#~ msgid "Client" +#~ msgstr "Client" + +#~ msgid "Select daemon type to set debug level" +#~ msgstr "Sélectionnez le composant a mettre à jour" + +#, fuzzy +#~ msgid "No authorization for Client \"%s\"\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Client name missing.\n" +#~ msgstr "Director" + +#~ msgid "Job \"%s\" not found.\n" +#~ msgstr "Job \"%s\" non trouvé.\n" + +#, fuzzy +#~ msgid "No authorization for Job \"%s\"\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Job name missing.\n" +#~ msgstr "La valeur actuelle est : %s\n" + +#, fuzzy +#~ msgid "Fileset \"%s\" not found.\n" +#~ msgstr "Le FileSet \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "No authorization for FileSet \"%s\"\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Fileset name missing.\n" +#~ msgstr "La valeur actuelle est : %s\n" + +#, fuzzy +#~ msgid "Level \"%s\" not valid.\n" +#~ msgstr "Le type %s est invalide.\n" + +#, fuzzy +#~ msgid "Level value missing.\n" +#~ msgstr "La valeur actuelle est : %s\n" + +#, fuzzy +#~ msgid "Invalid value for accurate. It must be yes or no.\n" +#~ msgstr "Saisie invalide. Veuillez répondre oui ou non.\n" + +#, fuzzy +#~ msgid "Accurate value missing.\n" +#~ msgstr "La valeur actuelle est : %s\n" + +#~ msgid "No job specified.\n" +#~ msgstr "Pas de job sélectionné.\n" + +#~ msgid "Error sending include list.\n" +#~ msgstr "Erreur pendant l'envoi de la liste d'inclusion.\n" + +#~ msgid "Error sending exclude list.\n" +#~ msgstr "Erreur pendant l'envoi de la liste d'exclusion.\n" + +#~ msgid "" +#~ "In general it is not a good idea to delete either a\n" +#~ "Pool or a Volume since they may contain data.\n" +#~ "\n" +#~ msgstr "" +#~ "Généralement supprimer un pool ou bien un volume\n" +#~ "n'est pas une bonne idée car ils peuvent contenir des données.\n" +#~ "\n" + +#~ msgid "Choose catalog item to delete" +#~ msgstr "Choisissez l'objet du catalogue à supprimer" + +#~ msgid "Nothing done.\n" +#~ msgstr "Rien de fait.\n" + +#, fuzzy +#~ msgid "Are you sure you want to delete %d JobIds ? (yes/no): " +#~ msgstr "Êtes vous certain de vouloir supprimer ce Pool ? (oui/non) : " + +#~ msgid "Enter JobId to delete: " +#~ msgstr "Saisissez le JobId à supprimer : " + +#, fuzzy +#~ msgid "JobId=%s and associated records deleted from the catalog.\n" +#~ msgstr "" +#~ "Le Job %s et les enregistrements associés ont été supprimés du " +#~ "catalogue.\n" + +#~ msgid "" +#~ "\n" +#~ "This command will delete volume %s\n" +#~ "and all Jobs saved on that volume from the Catalog\n" +#~ msgstr "" +#~ "\n" +#~ "Cette commande va supprimer le Volume %s\n" +#~ "et tous les Jobs sauvegardés sur celui-ci du Catalogue\n" + +#, fuzzy +#~ msgid "Are you sure you want to delete Volume \"%s\"? (yes/no): " +#~ msgstr "Êtes vous certain de vouloir supprimer ce Volume ? (oui/non) : " + +#, fuzzy +#~ msgid "Are you sure you want to delete Pool \"%s\"? (yes/no): " +#~ msgstr "Êtes vous certain de vouloir supprimer ce Pool ? (oui/non) : " + +#, fuzzy +#~ msgid "Invalid device name. %s" +#~ msgstr "Option d'écrasement (Replace) invalide : %s\n" + +#, fuzzy +#~ msgid "Unable to %s for volume \"%s\"\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "Cloud choice: \n" +#~ msgstr "Elément à mettre à jour :\n" + +#, fuzzy +#~ msgid "Truncate a Volume Cache" +#~ msgstr "Entrez le nom du Volume : " + +#, fuzzy +#~ msgid "Select action to perform on Cloud" +#~ msgstr "Sélectionnez le composant a tuer" + +#~ msgid "Using Catalog name=%s DB=%s\n" +#~ msgstr "Utilisation du Catalogue name=%s DB=%s\n" + +#, fuzzy +#~ msgid "ERR: Can't open db\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#~ msgid "ERR: Job was not found\n" +#~ msgstr "ERR: Job %s non trouvé\n" + +#, fuzzy +#~ msgid "" +#~ " Command Description\n" +#~ " ======= ===========\n" +#~ msgstr "" +#~ " Commande Description\n" +#~ " ======== ===========\n" + +#, fuzzy +#~ msgid " %-13s %s\n" +#~ msgstr " %-10s %s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Can't find %s command.\n" +#~ "\n" +#~ msgstr ": est une commande invalide.\n" + +#~ msgid "" +#~ "\n" +#~ "When at a prompt, entering a period cancels the command.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Sur une question, tapez un point (.) pour annuler la commande en cours.\n" + +#, fuzzy +#~ msgid "%s Version: %s (%s) %s %s %s %s\n" +#~ msgstr "%s Version : %s (%s) %s %s %s\n" + +#, fuzzy +#~ msgid "No authorization for Catalog \"%s\"\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#~ msgid "Could not find a Catalog resource\n" +#~ msgstr "Impossible de trouver un Catalogue\n" + +#~ msgid "Could not open catalog database \"%s\".\n" +#~ msgstr "Impossible d'ouvrir le catalogue \"%s\".\n" + +#~ msgid "Using Catalog \"%s\"\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#~ msgid ": is an invalid command.\n" +#~ msgstr ": est une commande invalide.\n" + +#, fuzzy +#~ msgid "path name missing.\n" +#~ msgstr "La valeur actuelle est : %s\n" + +#, fuzzy +#~ msgid "Failed to send command to Client.\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Unable to get Job record for Job=%s\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get last Job record for Job=%s\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get Client record for Client=%s\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get last Job record for Client=%s\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#~ msgid "Unable to get Job record for JobId=%s: ERR=%s\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown command: %s\n" +#~ msgstr "Erreur inconnue." + +#~ msgid "Select daemon type to make die" +#~ msgstr "Sélectionnez le composant a tuer" + +#, fuzzy +#~ msgid "Invalid argument for %s\n" +#~ msgstr "argument invalide" + +#, fuzzy +#~ msgid "Invalid argument for job\n" +#~ msgstr "argument invalide" + +#, fuzzy +#~ msgid "Unable to open the catalog.\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "Query failed: %s. ERR=%s\n" +#~ msgstr "Erreur sur la requête : %s\n" + +#, fuzzy +#~ msgid "query keyword not found.\n" +#~ msgstr "%s ressource %s introuvable.\n" + +#, fuzzy +#~ msgid "List MediaType failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "List Media failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "List Location failed: ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#~ msgid "Enter slot" +#~ msgstr "Saisissez le slot" + +#~ msgid "Expected a positive integer, got: %s\n" +#~ msgstr "Attendait un entier positif, pas : %s\n" + +#~ msgid "Invalid response. You must answer yes or no.\n" +#~ msgstr "Réponse invalide. Vous devez répondre oui ou non.\n" + +#, fuzzy +#~ msgid "Invalid Enabled value, it must be yes, no, archived, 0, 1, or 2\n" +#~ msgstr "Saisie invalide. Veuillez répondre oui ou non.\n" + +#, fuzzy +#~ msgid "Illegal character \"%c\" in a comment.\n" +#~ msgstr "Caractère illégal \"%c\" dans le nom.\n" + +#, fuzzy +#~ msgid "Comment too long.\n" +#~ msgstr "Nom trop long.\n" + +#, fuzzy +#~ msgid "Comment must be at least one character long.\n" +#~ msgstr "Le nom du volume doit comporter au moins un caractère\n" + +#~ msgid "Negative numbers not permitted\n" +#~ msgstr "Les nombres négatifs ne sont pas autorisés\n" + +#, fuzzy +#~ msgid "Range end is not integer.\n" +#~ msgstr "La valeur saisie n'est pas un nombre.\n" + +#, fuzzy +#~ msgid "Range start is not an integer.\n" +#~ msgstr "La valeur saisie n'est pas un nombre.\n" + +#~ msgid "Input value is not an integer.\n" +#~ msgstr "La valeur saisie n'est pas un nombre.\n" + +#~ msgid "Values must be be greater than zero.\n" +#~ msgstr "Les valeurs doivent être supérieurs à zéro.\n" + +#~ msgid "Slot too large.\n" +#~ msgstr "Slot trop grand.\n" + +#~ msgid "No slots in changer to scan.\n" +#~ msgstr "Pas de slot dans le magasin à scanner.\n" + +#~ msgid "No Volumes found to label, or no barcodes.\n" +#~ msgstr "Pas de volume à labéliser ou pas de codebar.\n" + +#~ msgid "Slot %d greater than max %d ignored.\n" +#~ msgstr "Le slot %d est ignoré car il est supérieur au maximum %d.\n" + +#~ msgid "No VolName for Slot=%d InChanger set to zero.\n" +#~ msgstr "Pas de volume sur le Slot %d. Mise à zéro de InChanger.\n" + +#~ msgid "Catalog record for Volume \"%s\" updated to reference slot %d.\n" +#~ msgstr "" +#~ "Mise à jour des informations du volume \"%s\" dans le catalogue (Slot=" +#~ "%d).\n" + +#~ msgid "Catalog record for Volume \"%s\" is up to date.\n" +#~ msgstr "Le volume \"%s\" est à jour dans le catalogue.\n" + +#~ msgid "Volume \"%s\" not found in catalog. Slot=%d InChanger set to zero.\n" +#~ msgstr "" +#~ "Volume \"%s\" absent du catalogue. mise à zéro de InChanger pour le Slot=" +#~ "%d.\n" + +#~ msgid "" +#~ "Volume \"%s\" has VolStatus %s. It must be Purged or Recycled before " +#~ "relabeling.\n" +#~ msgstr "" +#~ "Le volume \"%s\" (VolStatus) a le statut \"%s\". Il doit être purgé ou " +#~ "bien\n" +#~ "recyclé avant de pouvoir le re-labéliser.\n" + +#~ msgid "Enter new Volume name: " +#~ msgstr "Saisissez le nouveau nom du Volume : " + +#~ msgid "Media record for new Volume \"%s\" already exists.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#~ msgid "Enter slot (0 or Enter for none): " +#~ msgstr "Saisissez le slot (0 ou Entrée pour aucun) : " + +#~ msgid "Delete of Volume \"%s\" failed. ERR=%s" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#~ msgid "Old volume \"%s\" deleted from catalog.\n" +#~ msgstr "L'ancien volume \"%s\" a été supprimé du catalogue.\n" + +#~ msgid "Requesting to mount %s ...\n" +#~ msgstr "Demande pour monter %s...\n" + +#~ msgid "Do not forget to mount the drive!!!\n" +#~ msgstr "N'oubliez pas de monter le lecteur.\n" + +#~ msgid "" +#~ "The following Volumes will be labeled:\n" +#~ "Slot Volume\n" +#~ "==============\n" +#~ msgstr "" +#~ "Les volumes suivants vont être labélisés :\n" +#~ "Slot Volume\n" +#~ "==============\n" + +#, fuzzy +#~ msgid "Do you want to label these Volumes? (yes|no): " +#~ msgstr "" +#~ "\n" +#~ "Voulez vous restaurer tous les fichiers ? (oui|non) : " + +#, fuzzy +#~ msgid "Media record for Slot %d Volume \"%s\" already exists.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#~ msgid "Error setting InChanger: ERR=%s" +#~ msgstr "Impossible de positionner le flag InChanger : ERR=%s" + +#~ msgid "Maximum pool Volumes=%d reached.\n" +#~ msgstr "Le nombre maximum de volume (%d) pour ce pool est atteint.\n" + +#, fuzzy +#~ msgid "Catalog record for cleaning tape \"%s\" successfully created.\n" +#~ msgstr "Le volume \"%s\" est à jour dans le catalogue.\n" + +#, fuzzy +#~ msgid "Catalog error on cleaning tape: %s" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#~ msgid "Illegal character \"%c\" in a volume name.\n" +#~ msgstr "Caractères \"%c\" interdits dans le nom d'un volume.\n" + +#~ msgid "Sending relabel command from \"%s\" to \"%s\" ...\n" +#~ msgstr "Envoie de la commande pour re-labéliser de \"%s\" à \"%s\"...\n" + +#~ msgid "Sending label command for Volume \"%s\" Slot %d ...\n" +#~ msgstr "Demande de labélisation du volume \"%s\" Slot %d...\n" + +#, fuzzy +#~ msgid "Catalog record for Volume \"%s\", Slot %d successfully created.\n" +#~ msgstr "Le volume \"%s\" est à jour dans le catalogue.\n" + +#~ msgid "Label command failed for Volume %s.\n" +#~ msgstr "Impossible de labéliser le volume %s.\n" + +#~ msgid "Could not open SD socket.\n" +#~ msgstr "Impossible d'ouvrir la socket avec le SD.\n" + +#~ msgid "Invalid Slot number: %s\n" +#~ msgstr "Numéro de slot invalide : %s\n" + +#, fuzzy +#~ msgid "Invalid Volume name: %s. Volume skipped.\n" +#~ msgstr "Nom de Volume invalide : %s\n" + +#~ msgid "Device \"%s\" has %d slots.\n" +#~ msgstr "Le Device \"%s\" a %d slots.\n" + +#~ msgid "Pool \"%s\" resource not found for volume \"%s\"!\n" +#~ msgstr "La ressource Pool \"%s\" est introuvable pour le volume \"%s\"\n" + +#, fuzzy +#~ msgid "No Volumes found, or no barcodes.\n" +#~ msgstr "Pas de volume à labéliser ou pas de codebar.\n" + +#~ msgid "ON or OFF keyword missing.\n" +#~ msgstr "Mot clef ON ou OFF manquant.\n" + +#, fuzzy +#~ msgid "Disabled Jobs:\n" +#~ msgstr "est bloqué" + +#, fuzzy +#~ msgid "No disabled Jobs.\n" +#~ msgstr "Pas de job programmé.\n" + +#~ msgid "%s resource %s not found.\n" +#~ msgstr "%s ressource %s introuvable.\n" + +#~ msgid "Resource %s not found\n" +#~ msgstr "Ressource %s introuvable\n" + +#, fuzzy +#~ msgid "Unknown order type %s\n" +#~ msgstr "Mot clef inconnu : %s\n" + +#, fuzzy +#~ msgid "Invalid jobid argument\n" +#~ msgstr "argument invalide" + +#, fuzzy +#~ msgid "Unknown ObjectType %s\n" +#~ msgstr "Mot clef inconnu : %s\n" + +#~ msgid "Jobid %d used %d Volume(s): %s\n" +#~ msgstr "JobId %d a utilisé %d volume(s) : %s\n" + +#~ msgid "No Pool specified.\n" +#~ msgstr "Pas de Pool spécifié.\n" + +#~ msgid "Error obtaining pool ids. ERR=%s\n" +#~ msgstr "Erreur pendant lors de la récupération du pool. ERR=%s\n" + +#~ msgid "Pool: %s\n" +#~ msgstr "Pool : %s\n" + +#, fuzzy +#~ msgid "Unknown list keyword: %s\n" +#~ msgstr "Mot clef inconnu : %s\n" + +#~ msgid "%s is not a job name.\n" +#~ msgstr "%s n'est pas un nom de job.\n" + +#, fuzzy +#~ msgid "Could not find Pool for Job %s\n" +#~ msgstr "Impossible d'ouvrir le device %s\n" + +#, fuzzy +#~ msgid "Could not find next Volume for Job %s (Pool=%s, Level=%s).\n" +#~ msgstr "Impossible de trouver le prochain Volume pour le Job %s (%s, %s).\n" + +#, fuzzy +#~ msgid "" +#~ "The next Volume to be used by Job \"%s\" (Pool=%s, Level=%s) will be %s\n" +#~ msgstr "Le prochain Volume utilisé par le Job \"%s\" (%s, %s) sera %s\n" + +#~ msgid "Could not find next Volume for Job %s.\n" +#~ msgstr "Impossible de trouver le prochain volume pour le Job %s.\n" + +#~ msgid "Pool %s not in database. %s" +#~ msgstr "Pool %s introuvable en base. %s" + +#~ msgid "Pool %s created in database.\n" +#~ msgstr "Pool %s créé en base.\n" + +#~ msgid "You have no messages.\n" +#~ msgstr "Vous n'avez pas de messages.\n" + +#~ msgid "Choose item to prune" +#~ msgstr "Que voulez vous purger du catalogue (prune)" + +#~ msgid "Cannot prune Volume \"%s\" because it is archived.\n" +#~ msgstr "Impossible de pruner le Volume \"%s\" car il est archivé.\n" + +#, fuzzy +#~ msgid "Pruned Jobs from JobHisto catalog.\n" +#~ msgstr "Purge du catalogue (prune) de %d %s du client %s.\n" + +#, fuzzy +#~ msgid "Begin pruning Files.\n" +#~ msgstr "Début de purge des fichiers du catalogue (prune).\n" + +#~ msgid "No Files found to prune.\n" +#~ msgstr "Pas de fichier trouvé pour la purge du catalogue (prune).\n" + +#~ msgid "Pruned Files from %s Jobs for client %s from catalog.\n" +#~ msgstr "" +#~ "Purge du catalogue des fichiers (prune) de %s Jobs pour le client %s.\n" + +#, fuzzy +#~ msgid "Begin pruning Jobs older than %s.\n" +#~ msgstr "Début de purge des Jobs du catalogue (prune).\n" + +#~ msgid "Pruned %d %s for client %s from catalog.\n" +#~ msgstr "Purge du catalogue (prune) de %d %s du client %s.\n" + +#~ msgid "Jobs" +#~ msgstr "Jobs" + +#~ msgid "No Jobs found to prune.\n" +#~ msgstr "Pas de job trouvé pour la purge du catalogue (prune).\n" + +#, fuzzy +#~ msgid "Volume \"%s\"" +#~ msgstr "Fichiers du Volume" + +#, fuzzy +#~ msgid "%d expired volume%s found\n" +#~ msgstr "%s ressource %s introuvable.\n" + +#, fuzzy +#~ msgid "" +#~ "Found %d Job(s) associated with the Volume \"%s\" that will be pruned\n" +#~ msgstr "" +#~ "Il n'y a pas de job associé avec le volume \"%s\". Il doit être marqué\n" +#~ "comme purgé.\n" + +#, fuzzy +#~ msgid "Found no Job associated with the Volume \"%s\" to prune\n" +#~ msgstr "" +#~ "Il n'y a pas de job associé avec le volume \"%s\". Il doit être marqué\n" +#~ "comme purgé.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "This command can be DANGEROUS!!!\n" +#~ "\n" +#~ "It purges (deletes) all Files from a Job,\n" +#~ "JobId, Client or Volume; or it purges (deletes)\n" +#~ "all Jobs from a Client or Volume without regard\n" +#~ "to retention periods. Normally you should use the\n" +#~ "PRUNE command, which respects retention periods.\n" +#~ msgstr "" +#~ "\n" +#~ "Cette commande peut être DANGEUREUSE !\n" +#~ "\n" +#~ "Elle supprime tous les enregistrements des fichiers d'un job, \n" +#~ "d'un client ou d'un volume ; ou bien elle supprime tous les jobs\n" +#~ "d'un client ou d'un volume sans s'occuper des périodes de rétention.\n" +#~ "\n" +#~ "Normalement vous devez utiliser la commande PRUNE qui respecte les " +#~ "périodes\n" +#~ "de rétention.\n" + +#~ msgid "Choose item to purge" +#~ msgstr "Choisissez l'élément à purger" + +#~ msgid "Begin purging files for Client \"%s\"\n" +#~ msgstr "Début de la purge des fichiers du client \"%s\"\n" + +#~ msgid "No Files found for client %s to purge from %s catalog.\n" +#~ msgstr "" +#~ "Pas de fichier à purger pour le client \"%s\" dans le catalogue %s.\n" + +#~ msgid "Files for %d Jobs for client \"%s\" purged from %s catalog.\n" +#~ msgstr "Fichiers de %d jobs du client \"%s\" purgé du catalogue %s.\n" + +#~ msgid "Begin purging jobs from Client \"%s\"\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#~ msgid "No Jobs found for client %s to purge from %s catalog.\n" +#~ msgstr "Pas de job à purger pour le client \"%s\" dans le catalogue %s.\n" + +#~ msgid "%d Jobs for client %s purged from %s catalog.\n" +#~ msgstr "%d jobs du client \"%s\" purgé du catalogue %s.\n" + +#~ msgid "" +#~ "\n" +#~ "Volume \"%s\" has VolStatus \"%s\" and cannot be purged.\n" +#~ "The VolStatus must be: Append, Full, Used, or Error to be purged.\n" +#~ msgstr "" +#~ "\n" +#~ "Le volume \"%s\" est en état \"%s\" et il ne peut pas être purgé.\n" +#~ "Son statut doit être : Append, Full, Used ou Error pour être purgé.\n" + +#, fuzzy +#~ msgid "%d Job%s on Volume \"%s\" purged from catalog.\n" +#~ msgstr "%d fichier%s du volume \"%s\" purgé du catalogue.\n" + +#~ msgid "" +#~ "There are no more Jobs associated with Volume \"%s\". Marking it purged.\n" +#~ msgstr "" +#~ "Il n'y a plus de job associé avec le volume \"%s\". Il est marqué purgé.\n" + +#, fuzzy +#~ msgid "Can't update volume size in the catalog\n" +#~ msgstr "Le Volume \"%s\" a été créé dans le catalogue.\n" + +#, fuzzy +#~ msgid "Unable to truncate volume \"%s\"\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#~ msgid "Unable move recycled Volume in full Pool \"%s\" MaxVols=%d\n" +#~ msgstr "" +#~ "Impossible de déplacer le volume recyclé, le Pool \"%s\" est plein. " +#~ "MaxVols=%d\n" + +#~ msgid "All records pruned from Volume \"%s\"; marking it \"Purged\"\n" +#~ msgstr "" +#~ "Il n'y a pas de job associé avec le volume \"%s\". Il est marqué comme " +#~ "Purged.\n" + +#~ msgid "Cannot purge Volume with VolStatus=%s\n" +#~ msgstr "Impossible de purger un volume dans l'état (VolStatus) %s\n" + +#~ msgid "Could not open %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#~ msgid "Available queries:\n" +#~ msgstr "Requêtes disponibles :\n" + +#, fuzzy +#~ msgid "Invalid command line query item specified.\n" +#~ msgstr "Le job est déjà spécifié.\n" + +#~ msgid "Choose a query" +#~ msgstr "Choisissez une requête" + +#~ msgid "Could not find query.\n" +#~ msgstr "Impossible de trouver la requête.\n" + +#, fuzzy +#~ msgid "Warning prompt %d missing.\n" +#~ msgstr "JobId %s n'est pas en cours.\n" + +#~ msgid "Enter SQL query: " +#~ msgstr "Saisissez votre requête SQL : " + +#, fuzzy +#~ msgid "Add to SQL query: " +#~ msgstr "Saisissez votre requête SQL : " + +#, fuzzy +#~ msgid "\"RegexWhere\" specification not authorized.\n" +#~ msgstr "RegexWhere ou Where est déjà spécifiée.\n" + +#, fuzzy +#~ msgid "" +#~ "No Restore Job Resource found in bacula-dir.conf.\n" +#~ "You must create at least one before running this command.\n" +#~ msgstr "" +#~ "Pas de client défini. Vous devez lancer une sauvegarde avant d'utiliser " +#~ "cette commande.\n" + +#~ msgid "Restore not done.\n" +#~ msgstr "Restauration non effectuée.\n" + +#~ msgid "Unable to construct a valid BSR. Cannot continue.\n" +#~ msgstr "Impossible de générer un fichier bootstrap valide. Abandon.\n" + +#~ msgid "No files selected to be restored.\n" +#~ msgstr "Aucun fichier sélectionné pour la restauration.\n" + +#~ msgid "Bootstrap records written to %s\n" +#~ msgstr "Fichier bootstrap écrit sur %s\n" + +#~ msgid "" +#~ "\n" +#~ "1 file selected to be restored.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "1 fichier sélectionne pour la restauration.\n" +#~ "\n" + +#~ msgid "" +#~ "\n" +#~ "%s files selected to be restored.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "%s fichiers sélectionnés pour la restauration.\n" + +#, fuzzy +#~ msgid "No Client resource found!\n" +#~ msgstr "Pas de ressource \"Restore Job\" trouvée !\n" + +#, fuzzy +#~ msgid "The restore will use the following job(s) as Base\n" +#~ msgstr "Le job va utiliser les volumes suivants :\n" + +#, fuzzy +#~ msgid "Missing value for keyword: %s\n" +#~ msgstr "Argument invalide : %s\n" + +#~ msgid "List last 20 Jobs run" +#~ msgstr "Afficher les 20 derniers jobs lancés" + +#~ msgid "List Jobs where a given File is saved" +#~ msgstr "Afficher les jobs où un fichier donné a été sauvegardé" + +#~ msgid "Enter list of comma separated JobIds to select" +#~ msgstr "Saisir une liste de JobIds à sélectionner (ex : 12,4,3)" + +#~ msgid "Enter SQL list command" +#~ msgstr "Exécuter une requête SQL" + +#~ msgid "Select the most recent backup for a client" +#~ msgstr "Sélectionner la sauvegarde la plus récente pour un client" + +#~ msgid "Select backup for a client before a specified time" +#~ msgstr "" +#~ "Sélectionner la dernière sauvegarde pour un client avant une certaine date" + +#~ msgid "Enter a list of files to restore" +#~ msgstr "Saisir la liste des fichiers à restaurer" + +#~ msgid "Enter a list of files to restore before a specified time" +#~ msgstr "Saisir la liste des fichiers à restaurer avant une certaine date" + +#~ msgid "Find the JobIds of the most recent backup for a client" +#~ msgstr "Afficher les JobIds de sauvegarde les plus récents pour un client" + +#~ msgid "Find the JobIds for a backup for a client before a specified time" +#~ msgstr "Afficher les JobIds de sauvegarde avant une certaine date" + +#~ msgid "Enter a list of directories to restore for found JobIds" +#~ msgstr "Saisir la liste des répertoires à restaurer (pour un JobId)" + +#, fuzzy +#~ msgid "Select full restore to a specified Job date" +#~ msgstr "" +#~ "Sélectionner la dernière sauvegarde pour un client avant une certaine date" + +#~ msgid "Unknown keyword: %s\n" +#~ msgstr "Mot clef inconnu : %s\n" + +#~ msgid "Improper date format: %s\n" +#~ msgstr "Format de date invalide : %s\n" + +#~ msgid "Error: Pool resource \"%s\" does not exist.\n" +#~ msgstr "Erreur : le Pool \"%s\" n'existe pas.\n" + +#~ msgid "Error: Pool resource \"%s\" access not allowed.\n" +#~ msgstr "Erreur : l'utilisation du Pool \"%s\" n'est pas autorisé.\n" + +#~ msgid "" +#~ "\n" +#~ "First you select one or more JobIds that contain files\n" +#~ "to be restored. You will be presented several methods\n" +#~ "of specifying the JobIds. Then you will be allowed to\n" +#~ "select which files from those JobIds are to be restored.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "D'abord, vous devez sélectionner un ou plusieurs jobs (par leur JobId) " +#~ "qui\n" +#~ "contiennent les fichiers à restaurer. Il vous est présenté plusieurs " +#~ "méthodes\n" +#~ "pour choisir le bon JobId. Après, vous pourrez sélectionner les fichiers " +#~ "à\n" +#~ "restaurer parmi la liste totale des fichiers présents dans les jobs\n" +#~ "sélectionnés.\n" + +#~ msgid "To select the JobIds, you have the following choices:\n" +#~ msgstr "" +#~ "Pour sélectionner les JobIds, vous avez les possibilités suivantes :\n" + +#~ msgid "Select item: " +#~ msgstr "Choix : " + +#~ msgid "Enter Filename (no path):" +#~ msgstr "Saisissez le nom du fichier (sans le chemin) : " + +#~ msgid "Enter JobId(s), comma separated, to restore: " +#~ msgstr "Saisissez le ou les JobIds à restaurer (ex : id1,id2,id3) : " + +#~ msgid "Enter SQL list command: " +#~ msgstr "Exécuter une requête SQL : " + +#~ msgid "" +#~ "Enter file names with paths, or < to enter a filename\n" +#~ "containing a list of file names with paths, and terminate\n" +#~ "them with a blank line.\n" +#~ msgstr "" +#~ "Saisissez les répertoires complets ou bien < pour saisir le nom d'un\n" +#~ "fichier contenant la liste des répertoires et\n" +#~ "terminez la saisie par une ligne vide.\n" + +#~ msgid "Enter full filename: " +#~ msgstr "Saisissez le nom complet du fichier : " + +#~ msgid "You have already selected the following JobIds: %s\n" +#~ msgstr "Vous avez déjà sélectionné les JobIds suivants : %s\n" + +#~ msgid "" +#~ "Enter full directory names or start the name\n" +#~ "with a < to indicate it is a filename containing a list\n" +#~ "of directories and terminate them with a blank line.\n" +#~ msgstr "" +#~ "Saisissez les répertoires complets ou bien le nom d'un\n" +#~ "fichier (commençant par <) contenant la liste des répertoires et\n" +#~ "terminez la saisie par une ligne vide.\n" + +#~ msgid "Enter directory name: " +#~ msgstr "Saisissez le nom d'un répertoire : " + +#, fuzzy +#~ msgid "Enter JobId to get the state to restore: " +#~ msgstr "Saisissez le JobId à supprimer : " + +#, fuzzy +#~ msgid "Invalid JobId in list.\n" +#~ msgstr "Période invalide.\n" + +#~ msgid "You have selected the following JobIds: %s\n" +#~ msgstr "Vous avez sélectionné les JobIds suivants : %s\n" + +#~ msgid "You have selected the following JobId: %s\n" +#~ msgstr "Vous avez sélectionné le JobId suivant : %s\n" + +#~ msgid "Enter date as YYYY-MM-DD HH:MM:SS :" +#~ msgstr "Saisissez la date au format YYYY-MM-DD HH:MM:SS : " + +#~ msgid "Improper date format.\n" +#~ msgstr "Format de date invalide.\n" + +#~ msgid "Cannot open file %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#~ msgid "Error occurred on line %d of file \"%s\"\n" +#~ msgstr "Une erreur est survenue à la ligne %d de \"%s\"\n" + +#~ msgid "No database record found for: %s\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "No JobId specified cannot continue.\n" +#~ msgstr "Pas de job sélectionné.\n" + +#, fuzzy +#~ msgid "No table found: %s\n" +#~ msgstr "Pas de job trouvé pour %s\n" + +#~ msgid "" +#~ "\n" +#~ "Do you want to restore all the files? (yes|no): " +#~ msgstr "" +#~ "\n" +#~ "Voulez vous restaurer tous les fichiers ? (oui|non) : " + +#, fuzzy +#~ msgid "Regex compile error: %s\n" +#~ msgstr "erreur sockopt : %s\n" + +#, fuzzy +#~ msgid "Unable to create component file %s. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to fdopen component file %s. ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing component file.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Building directory tree for JobId(s) %s ... " +#~ msgstr "" +#~ "\n" +#~ "Analyse des répertoires pour le JobId %s..." + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%s files inserted into the tree and marked for extraction.\n" +#~ msgstr "" +#~ "\n" +#~ "1 Job, %s fichiers analysés et sélectionnés pour la restauration.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%s files inserted into the tree.\n" +#~ msgstr "" +#~ "\n" +#~ "1 Job, %s fichiers analysés\n" + +#, fuzzy +#~ msgid "Error getting FileSet \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "FileSet argument: %s\n" +#~ msgstr "FileSet" + +#~ msgid "The defined FileSet resources are:\n" +#~ msgstr "Les FileSet définis sont :\n" + +#~ msgid "FileSet" +#~ msgstr "FileSet" + +#~ msgid "Select FileSet resource" +#~ msgstr "Sélectionnez le FileSet" + +#, fuzzy +#~ msgid "No FileSet found for client \"%s\".\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Error getting FileSet record: %s\n" +#~ msgstr "Erreur pendant la récupération des informations sur un Volume : %s" + +#, fuzzy +#~ msgid "Pool \"%s\" not found, using any pool.\n" +#~ msgstr "Le pool \"%s\" est introuvable.\n" + +#~ msgid "No Full backup before %s found.\n" +#~ msgstr "Pas de backup Full trouvé avant %s.\n" + +#~ msgid "No jobs found.\n" +#~ msgstr "Pas de jobs trouvé.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Warning Storage is overridden by \"%s\" on the command line.\n" +#~ msgstr "" +#~ "Attention, le storage par défaut est remplacé par \"%s\" en ligne de " +#~ "commande.\n" + +#, fuzzy +#~ msgid "Using Storage \"%s\" from MediaType \"%s\".\n" +#~ msgstr "" +#~ "Le Storage \"%s\" est introuvable, utilisation du Storage \"%s\" du " +#~ "MediaType \"%s\".\n" + +#~ msgid "" +#~ "Storage \"%s\" not found, using Storage \"%s\" from MediaType \"%s\".\n" +#~ msgstr "" +#~ "Le Storage \"%s\" est introuvable, utilisation du Storage \"%s\" du " +#~ "MediaType \"%s\".\n" + +#~ msgid "OK to run? (yes/mod/no): " +#~ msgstr "OK pour le lancement ? (oui/mod/non) : " + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Bad response: %s. You must answer yes, mod, or no.\n" +#~ "\n" +#~ msgstr "Réponse invalide. Vous devez répondre oui ou non.\n" + +#~ msgid "Job not run.\n" +#~ msgstr "Job non lancé.\n" + +#, fuzzy +#~ msgid "Job %s failed.\n" +#~ msgstr "Job échoué.\n" + +#~ msgid "Job queued. JobId=%s\n" +#~ msgstr "Job mis en queue. JobId=%s\n" + +#~ msgid "Job \"%s\" not found\n" +#~ msgstr "Le job \"%s\" est introuvable\n" + +#~ msgid "A job name must be specified.\n" +#~ msgstr "Un nom de Job doit être spécifié.\n" + +#, fuzzy +#~ msgid "No authorization. Job \"%s\".\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#~ msgid "Pool \"%s\" not found.\n" +#~ msgstr "Le pool \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "No authorization. Pool \"%s\".\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "NextPool \"%s\" not found.\n" +#~ msgstr "Le pool \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "No authorization. NextPool \"%s\".\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Restore Client \"%s\" not found.\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "No authorization. Client \"%s\".\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#~ msgid "FileSet \"%s\" not found.\n" +#~ msgstr "Le FileSet \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "No authorization. FileSet \"%s\".\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#~ msgid "Storage \"%s\" not found.\n" +#~ msgstr "Le Storage \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "user selection" +#~ msgstr "restauration de fichier" + +#, fuzzy +#~ msgid "No authorization. Storage \"%s\".\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "No JobId specified.\n" +#~ msgstr "Pas de job sélectionné.\n" + +#, fuzzy +#~ msgid "Invalid or no Job name specified.\n" +#~ msgstr "Le job est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Enter the JobId list to select: " +#~ msgstr "Entrez le JobId à sélectionner : " + +#, fuzzy +#~ msgid "Could not get job record for selected JobId=%d. ERR=%s" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#~ msgid "You have the following choices:\n" +#~ msgstr "Vous avez les choix suivants :\n" + +#, fuzzy +#~ msgid "Select termination code: " +#~ msgstr "Sélection terminée.\n" + +#, fuzzy +#~ msgid "Unable to use current plugin configuration, discarding it." +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#, fuzzy +#~ msgid "Plugin Restore Options\n" +#~ msgstr "Sélectionnez le Job de restauration" + +#, fuzzy +#~ msgid "Use above plugin configuration? (yes/mod/no): " +#~ msgstr "Continuer ? (oui/mod/non) : " + +#~ msgid "mod" +#~ msgstr "mod" + +#, fuzzy +#~ msgid "Please enter a value for %s: " +#~ msgstr "Saisissez le JobId pour la restauration : " + +#, fuzzy +#~ msgid "No plugin to configure\n" +#~ msgstr "Plugin=%s non trouvé.\n" + +#, fuzzy +#~ msgid "Plugins to configure:\n" +#~ msgstr "Plugin=%s non trouvé.\n" + +#, fuzzy +#~ msgid "Select plugin to configure" +#~ msgstr "TLS actif mais non configuré.\n" + +#, fuzzy +#~ msgid "Can't configure %32s\n" +#~ msgstr "Impossible d'ouvrir le fichier de configuration \"%s\" : %s\n" + +#~ msgid "Level" +#~ msgstr "Type" + +#, fuzzy +#~ msgid "Restore Client" +#~ msgstr "Restauration annulée" + +#~ msgid "When" +#~ msgstr "Quand" + +#~ msgid "Priority" +#~ msgstr "Priorité" + +#~ msgid "Pool" +#~ msgstr "Pool" + +#, fuzzy +#~ msgid "NextPool" +#~ msgstr "Pool" + +#~ msgid "Verify Job" +#~ msgstr "Job de vérification" + +#~ msgid "Bootstrap" +#~ msgstr "Bootstrap" + +#~ msgid "Where" +#~ msgstr "Destination" + +#, fuzzy +#~ msgid "File Relocation" +#~ msgstr "restauration de fichier" + +#~ msgid "Replace" +#~ msgstr "Ecrasement" + +#~ msgid "JobId" +#~ msgstr "JobId" + +#, fuzzy +#~ msgid "Plugin Options" +#~ msgstr "Sélectionnez le Job de restauration" + +#, fuzzy +#~ msgid "" +#~ "Please enter start time as a duration or YYYY-MM-DD HH:MM:SS or return " +#~ "for now: " +#~ msgstr "" +#~ "Saisissez la date de lancement (YYYY-MM-DD HH:MM:SS) (ou maintenant) : " + +#~ msgid "Enter new Priority: " +#~ msgstr "Saisissez la nouvelle priorité : " + +#~ msgid "Priority must be a positive integer.\n" +#~ msgstr "La priorité doit être un entier positif.\n" + +#~ msgid "Please enter the Bootstrap file name: " +#~ msgstr "Saisissez le nom du fichier Bootstrap : " + +# Impossible d'ouvrir %s : ERR=%s +#~ msgid "Warning cannot open %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Please enter the full path prefix for restore (/ for none): " +#~ msgstr "Saisissez le chemin (prefix) pour la restauration (/ pour aucun) : " + +#~ msgid "Replace:\n" +#~ msgstr "Ecrasement :\n" + +#~ msgid "Select replace option" +#~ msgstr "Saisissez l'option d'écrasement" + +#~ msgid "" +#~ "You must set the bootstrap file to NULL to be able to specify a JobId.\n" +#~ msgstr "" +#~ "Vous ne devez pas spécifié de fichier bootstrap pour pouvoir utiliser un " +#~ "JobId.\n" + +#, fuzzy +#~ msgid "Please Plugin Options string: " +#~ msgstr "Saisissez le début du chemin (prefix) à enlever : " + +#~ msgid "Invalid replace option: %s\n" +#~ msgstr "Option d'écrasement (Replace) invalide : %s\n" + +#, fuzzy +#~ msgid "Enter a regexp" +#~ msgstr "Exécuter une requête SQL : " + +#, fuzzy +#~ msgid "Please enter the path prefix to strip: " +#~ msgstr "Saisissez le début du chemin (prefix) à enlever : " + +#, fuzzy +#~ msgid "Please enter the path prefix to add (/ for none): " +#~ msgstr "Saisissez le chemin (prefix) à ajouter (/ pour aucun) : " + +#, fuzzy +#~ msgid "Please enter the file suffix to add: " +#~ msgstr "Saisissez une extention à ajouter aux fichiers : " + +#~ msgid "Please enter a valid regexp (!from!to!): " +#~ msgstr "Saisissez une regexp valide (!rechercher!remplacer!) : " + +#, fuzzy +#~ msgid "regexwhere=%s\n" +#~ msgstr " --> RegexWhere=%s\n" + +#~ msgid "Cannot use your regexp\n" +#~ msgstr "Impossible d'utiliser votre regexp\n" + +#~ msgid "Please enter filename to test: " +#~ msgstr "Saisissez un nom de fichier à tester : " + +#~ msgid "%s -> %s\n" +#~ msgstr "%s -> %s\n" + +#~ msgid "Cannot use your regexp.\n" +#~ msgstr "Impossible d'utiliser votre regexp.\n" + +#~ msgid "Levels:\n" +#~ msgstr "Types :\n" + +#~ msgid "Full" +#~ msgstr "Full" + +#~ msgid "Incremental" +#~ msgstr "Incrémental" + +#~ msgid "Differential" +#~ msgstr "Différentiel" + +#~ msgid "Since" +#~ msgstr "Depuis" + +#~ msgid "Select level" +#~ msgstr "Saisissez le type" + +#~ msgid "Initialize Catalog" +#~ msgstr "Initialisez le catalogue" + +#, fuzzy +#~ msgid "Verify Catalog" +#~ msgstr "interroger le catalogue" + +#, fuzzy +#~ msgid "Verify Volume Data" +#~ msgstr "Vérification des données sur le volume (pas encore implémenté)" + +#, fuzzy +#~ msgid "" +#~ "Run Admin Job\n" +#~ "JobName: %s\n" +#~ "FileSet: %s\n" +#~ "Client: %s\n" +#~ "Storage: %s\n" +#~ "When: %s\n" +#~ "Priority: %d\n" +#~ msgstr "" +#~ "Lancement du job %s\n" +#~ "JobName : %s\n" +#~ "FileSet : %s\n" +#~ "Client : %s\n" +#~ "Storage : %s\n" +#~ "Quand : %s\n" +#~ "Priorité : %d\n" + +#, fuzzy +#~ msgid "" +#~ "Run Backup job\n" +#~ "JobName: %s\n" +#~ "Level: %s\n" +#~ "Client: %s\n" +#~ "FileSet: %s\n" +#~ "Pool: %s (From %s)\n" +#~ "%sStorage: %s (From %s)\n" +#~ "When: %s\n" +#~ "Priority: %d\n" +#~ "%s%s%s" +#~ msgstr "" +#~ "Lancement du job %s\n" +#~ "JobName : %s\n" +#~ "Niveau : %s\n" +#~ "Client : %s\n" +#~ "FileSet : %s\n" +#~ "Pool : %s (Depuis %s)\n" +#~ "Storage : %s (Depuis %s)\n" +#~ "Quand : %s\n" +#~ "Priorité : %d\n" + +#, fuzzy +#~ msgid "Could not get job record for selected JobId. ERR=%s" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Run Verify Job\n" +#~ "JobName: %s\n" +#~ "Level: %s\n" +#~ "Client: %s\n" +#~ "FileSet: %s\n" +#~ "Pool: %s (From %s)\n" +#~ "Storage: %s (From %s)\n" +#~ "Verify Job: %s\n" +#~ "Verify List: %s\n" +#~ "When: %s\n" +#~ "Priority: %d\n" +#~ msgstr "" +#~ "Lancement du job %s\n" +#~ "JobName : %s\n" +#~ "Niveau : %s\n" +#~ "Client : %s\n" +#~ "FileSet : %s\n" +#~ "Pool : %s (Depuis %s)\n" +#~ "Storage : %s (Depuis %s)\n" +#~ "Verify Job : %s\n" +#~ "Verify List: %s\n" +#~ "Quand : %s\n" +#~ "Priorité : %d\n" + +#~ msgid "Please enter a JobId for restore: " +#~ msgstr "Saisissez le JobId pour la restauration : " + +#, fuzzy +#~ msgid "User specified" +#~ msgstr "Pas de storage sélectionné.\n" + +#, fuzzy +#~ msgid "" +#~ "Run Restore job\n" +#~ "JobName: %s\n" +#~ "Bootstrap: %s\n" +#~ "RegexWhere: %s\n" +#~ "Replace: %s\n" +#~ "FileSet: %s\n" +#~ "Backup Client: %s\n" +#~ "Restore Client: %s\n" +#~ "Storage: %s\n" +#~ "When: %s\n" +#~ "Catalog: %s\n" +#~ "Priority: %d\n" +#~ "Plugin Options: %s\n" +#~ msgstr "" +#~ "Lancement de la restauration\n" +#~ "JobName : %s\n" +#~ "Bootstrap : %s\n" +#~ "RegexWhere : %s\n" +#~ "Ecrasement : %s\n" +#~ "FileSet : %s\n" +#~ "Backup Client : %s\n" +#~ "Restore Client : %s\n" +#~ "Storage : %s\n" +#~ "Quand : %s\n" +#~ "Catalogue : %s\n" +#~ "Priorité : %d\n" + +#~ msgid "" +#~ "Run Restore job\n" +#~ "JobName: %s\n" +#~ "Bootstrap: %s\n" +#~ "Where: %s\n" +#~ "Replace: %s\n" +#~ "FileSet: %s\n" +#~ "Backup Client: %s\n" +#~ "Restore Client: %s\n" +#~ "Storage: %s\n" +#~ "When: %s\n" +#~ "Catalog: %s\n" +#~ "Priority: %d\n" +#~ "Plugin Options: %s\n" +#~ msgstr "" +#~ "Lancement de la restauration\n" +#~ "JobName : %s\n" +#~ "Bootstrap : %s\n" +#~ "Déplacement : %s\n" +#~ "Ecrasement : %s\n" +#~ "FileSet : %s\n" +#~ "Backup Client : %s\n" +#~ "Restore Client : %s\n" +#~ "Storage : %s\n" +#~ "Quand : %s\n" +#~ "Catalogue : %s\n" +#~ "Priorité : %d\n" +#~ "Options Plugins: %s\n" + +#~ msgid "" +#~ "Run Restore job\n" +#~ "JobName: %s\n" +#~ "Bootstrap: %s\n" +#~ msgstr "" +#~ "Lancement de la restauration\n" +#~ "JobName : %s\n" +#~ "Bootstrap : %s\n" + +#, fuzzy +#~ msgid "RegexWhere: %s\n" +#~ msgstr " --> RegexWhere=%s\n" + +#~ msgid "Where: %s\n" +#~ msgstr "Where : %s\n" + +#~ msgid "" +#~ "Replace: %s\n" +#~ "Client: %s\n" +#~ "Storage: %s\n" +#~ "JobId: %s\n" +#~ "When: %s\n" +#~ "Catalog: %s\n" +#~ "Priority: %d\n" +#~ "Plugin Options: %s\n" +#~ msgstr "" +#~ "Ecrasement : %s\n" +#~ "Client : %s\n" +#~ "Storage : %s\n" +#~ "JobId : %s\n" +#~ "Quand : %s\n" +#~ "Catalogue : %s\n" +#~ "Priorité : %d\n" +#~ "Options Plugins : %s\n" + +#, fuzzy +#~ msgid "Run Copy job\n" +#~ msgstr "Sélectionnez le Job de restauration" + +#, fuzzy +#~ msgid "Run Migration job\n" +#~ msgstr "Sélectionnez le Job de restauration" + +#~ msgid "Unknown Job Type=%d\n" +#~ msgstr "Job du Type=%d inconnu\n" + +#, fuzzy +#~ msgid "Value missing for keyword %s\n" +#~ msgstr "Argument invalide : %s\n" + +#~ msgid "JobId specified twice.\n" +#~ msgstr "Le JobId est déjà spécifié.\n" + +#~ msgid "Client specified twice.\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#~ msgid "FileSet specified twice.\n" +#~ msgstr "Le FileSet est déjà spécifié.\n" + +#~ msgid "Level specified twice.\n" +#~ msgstr "Le type (Level) est déjà spécifié.\n" + +#~ msgid "Storage specified twice.\n" +#~ msgstr "Le Storage est déjà spécifié.\n" + +#~ msgid "RegexWhere or Where specified twice.\n" +#~ msgstr "RegexWhere ou Where est déjà spécifiée.\n" + +#, fuzzy +#~ msgid "No authorization for \"regexwhere\" specification.\n" +#~ msgstr "La destination (Where) est déjà spécifiée.\n" + +#~ msgid "Where or RegexWhere specified twice.\n" +#~ msgstr "RegexWhere ou Where est déjà spécifiée.\n" + +#, fuzzy +#~ msgid "No authoriztion for \"where\" specification.\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#~ msgid "Bootstrap specified twice.\n" +#~ msgstr "Le bootstrap est déjà spécifié.\n" + +#~ msgid "Replace specified twice.\n" +#~ msgstr "L'option d'écrasement (Replace) est déjà spécifié.\n" + +#~ msgid "When specified twice.\n" +#~ msgstr "La planification (When) est déjà spécifiée.\n" + +#~ msgid "Priority specified twice.\n" +#~ msgstr "La priorité (Priority) est déjà spécifiée.\n" + +#~ msgid "Priority must be positive nonzero setting it to 10.\n" +#~ msgstr "" +#~ "La priorité doit être supérieure à zéro. Utilisation d'une priorité de " +#~ "10.\n" + +#, fuzzy +#~ msgid "Verify Job specified twice.\n" +#~ msgstr "Le JobId est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Migration Job specified twice.\n" +#~ msgstr "Le JobId est déjà spécifié.\n" + +#~ msgid "Pool specified twice.\n" +#~ msgstr "Le pool est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Restore Client specified twice.\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Plugin Options not yet implemented.\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Plugin Options specified twice.\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#, fuzzy +#~ msgid "No authoriztion for \"PluginOptions\" specification.\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Spool flag specified twice.\n" +#~ msgstr "Le pool est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Invalid spooldata flag.\n" +#~ msgstr "Période invalide.\n" + +#, fuzzy +#~ msgid "IgnoreDuplicateCheck flag specified twice.\n" +#~ msgstr "Le pool est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Invalid ignoreduplicatecheck flag.\n" +#~ msgstr "Période invalide.\n" + +#, fuzzy +#~ msgid "Accurate flag specified twice.\n" +#~ msgstr "Le pool est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Invalid accurate flag.\n" +#~ msgstr "Période invalide.\n" + +#~ msgid "Job name specified twice.\n" +#~ msgstr "Le job est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Media Type specified twice.\n" +#~ msgstr "L'option d'écrasement (Replace) est déjà spécifié.\n" + +#, fuzzy +#~ msgid "NextPool specified twice.\n" +#~ msgstr "Le pool est déjà spécifié.\n" + +#~ msgid "Invalid keyword: %s\n" +#~ msgstr "Argument invalide : %s\n" + +#~ msgid "Catalog \"%s\" not found\n" +#~ msgstr "Le catalogue \"%s\" est introuvable\n" + +#, fuzzy +#~ msgid "No authorization. Catalog \"%s\".\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Verify Job \"%s\" not found.\n" +#~ msgstr "Job \"%s\" non trouvé.\n" + +#, fuzzy +#~ msgid "Migration Job \"%s\" not found.\n" +#~ msgstr "Job \"%s\" non trouvé.\n" + +#~ msgid "The current %s retention period is: %s\n" +#~ msgstr "La période de rétention courante %s est : %s\n" + +#, fuzzy +#~ msgid "Continue? (yes/no): " +#~ msgstr "Continuer ? (oui/mod/non) : " + +#~ msgid "Continue? (yes/mod/no): " +#~ msgstr "Continuer ? (oui/mod/non) : " + +#~ msgid "Enter new retention period: " +#~ msgstr "Saisissez une nouvelle période de rétention : " + +#~ msgid "Invalid period.\n" +#~ msgstr "Période invalide.\n" + +#~ msgid "The defined Storage resources are:\n" +#~ msgstr "Les ressources de Stockage définies sont :\n" + +#~ msgid "Select Storage resource" +#~ msgstr "Sélectionnez la ressource de Stockage" + +#~ msgid "The defined Catalog resources are:\n" +#~ msgstr "Les Catalogues définis sont :\n" + +#~ msgid "Catalog" +#~ msgstr "Catalogue" + +#~ msgid "Select Catalog resource" +#~ msgstr "Sélectionnez le Catalogue" + +#, fuzzy +#~ msgid "The disabled Job resources are:\n" +#~ msgstr "Les Job définis sont :\n" + +#, fuzzy +#~ msgid "The enabled Job resources are:\n" +#~ msgstr "Les Job définis sont :\n" + +#~ msgid "Select Job resource" +#~ msgstr "Sélectionnez le Job" + +#~ msgid "The defined Job resources are:\n" +#~ msgstr "Les Job définis sont :\n" + +#, fuzzy +#~ msgid "Error: Restore Job resource \"%s\" does not exist.\n" +#~ msgstr "Erreur : le Pool \"%s\" n'existe pas.\n" + +#~ msgid "The defined Restore Job resources are:\n" +#~ msgstr "Les Job de restauration sont :\n" + +#~ msgid "Select Restore Job" +#~ msgstr "Sélectionnez le Job de restauration" + +#~ msgid "The defined Client resources are:\n" +#~ msgstr "Les clients définis sont :\n" + +#, fuzzy +#~ msgid "Select Client resource" +#~ msgstr "Sélectionnez le FileSet" + +#~ msgid "Select Client (File daemon) resource" +#~ msgstr "Sélectionnez le client (File daemon)" + +#~ msgid "Error: Client resource %s does not exist.\n" +#~ msgstr "Erreur : le client %s n'est pas définie.\n" + +#, fuzzy +#~ msgid "The defined Schedule resources are:\n" +#~ msgstr "Les clients définis sont :\n" + +#, fuzzy +#~ msgid "Schedule" +#~ msgstr "" +#~ "\n" +#~ "Jobs planifiés :\n" + +#, fuzzy +#~ msgid "Select Schedule resource" +#~ msgstr "Sélectionnez le Pool" + +#~ msgid "Could not find Client %s: ERR=%s" +#~ msgstr "Impossible de trouver le client %s : ERR=%s" + +#~ msgid "Could not find Client \"%s\": ERR=%s" +#~ msgstr "Impossible de trouver le client \"%s\" : ERR=%s" + +#~ msgid "Error obtaining client ids. ERR=%s\n" +#~ msgstr "Erreur pendant l'obtention de l'identifiant du Client. ERR=%s\n" + +#~ msgid "No clients defined. You must run a job before using this command.\n" +#~ msgstr "" +#~ "Pas de client défini. Vous devez lancer une sauvegarde avant d'utiliser " +#~ "cette commande.\n" + +#~ msgid "Defined Clients:\n" +#~ msgstr "Clients définis :\n" + +#~ msgid "Select the Client" +#~ msgstr "Sélectionnez le client" + +#~ msgid "Could not find Pool \"%s\": ERR=%s" +#~ msgstr "Impossible de trouver le Pool \"%s\" : ERR=%s" + +#~ msgid "No pools defined. Use the \"create\" command to create one.\n" +#~ msgstr "" +#~ "Pas de Pool défini. Utilisez la commande \"create\" pour en créer un.\n" + +#~ msgid "Defined Pools:\n" +#~ msgstr "Pools définis :\n" + +#~ msgid "Select the Pool" +#~ msgstr "Sélectionnez le Pool" + +#~ msgid "No access to Pool \"%s\"\n" +#~ msgstr "Pas d'accès au Pool \"%s\"\n" + +#, fuzzy +#~ msgid "Enter a Volume name or *MediaId: " +#~ msgstr "Entrez le nom du Volume : " + +#~ msgid "The defined Pool resources are:\n" +#~ msgstr "Les Pools définis sont :\n" + +#~ msgid "Select Pool resource" +#~ msgstr "Sélectionnez le Pool" + +#~ msgid "Enter the JobId to select: " +#~ msgstr "Entrez le JobId à sélectionner : " + +#~ msgid "Could not find Job \"%s\": ERR=%s" +#~ msgstr "Impossible de trouver le Job \"%s\" : ERR=%s" + +#~ msgid "Automatically selected %s: %s\n" +#~ msgstr "Sélection automatique %s : %s\n" + +#~ msgid "Selection list for \"%s\" is empty!\n" +#~ msgstr "La sélection pour \"%s\" est vide !\n" + +#~ msgid "Automatically selected: %s\n" +#~ msgstr "Sélection automatique : %s\n" + +#~ msgid "Selection aborted, nothing done.\n" +#~ msgstr "Sélection annulée, rien de fait.\n" + +#~ msgid "Please enter a number between 1 and %d\n" +#~ msgstr "Merci de saisir un nombre entre 1 et %d\n" + +#, fuzzy +#~ msgid "Storage name given twice.\n" +#~ msgstr "Le Storage est déjà spécifié.\n" + +#~ msgid "Expecting jobid=nn command, got: %s\n" +#~ msgstr "Attendait l'option jobid=nn, pas : %s\n" + +#~ msgid "JobId %s is not running.\n" +#~ msgstr "JobId %s n'est pas en cours.\n" + +#~ msgid "Expecting job=xxx, got: %s.\n" +#~ msgstr "Attendait l'option job=xxx, pas : %s\n" + +#~ msgid "Job \"%s\" is not running.\n" +#~ msgstr "Job \"%s\" n'est pas en cours.\n" + +#~ msgid "Expecting ujobid=xxx, got: %s.\n" +#~ msgstr "Attendait l'option ujobid=xxx, pas : %s\n" + +#~ msgid "Storage resource \"%s\": not found\n" +#~ msgstr "Storage resource \"%s\" : non trouvé\n" + +#~ msgid "Enter autochanger drive[0]: " +#~ msgstr "Saisissez le numéro du lecteur de l'autochanger [0] : " + +#~ msgid "Enter autochanger slot: " +#~ msgstr "Saisissez le slot de l'autochanger [0] : " + +#, fuzzy +#~ msgid "Media Types defined in conf file:\n" +#~ msgstr "L'option d'écrasement (Replace) est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Media Type" +#~ msgstr "Choisissez le type de Media" + +#~ msgid "Select the Media Type" +#~ msgstr "Choisissez le type de Media" + +#~ msgid "No Jobs running.\n" +#~ msgstr "Pas de job en cours.\n" + +#~ msgid "None of your jobs are running.\n" +#~ msgstr "Aucun de vos jobs ne sont en cours.\n" + +#, fuzzy +#~ msgid "Unauthorized command from this console for JobId=%d.\n" +#~ msgstr "Commande interdite depuis cette console.\n" + +#, fuzzy +#~ msgid "Warning Job JobId=%d is not running.\n" +#~ msgstr "Attention le Job %s n'est pas en cours. Continuons quand même...\n" + +#, fuzzy +#~ msgid "Confirm %s of %d Job%s (yes/no): " +#~ msgstr "Confirmez l'annulation (oui/non) : " + +#, fuzzy +#~ msgid "Unauthorized command from this console for job=%s.\n" +#~ msgstr "Commande interdite depuis cette console.\n" + +#, fuzzy +#~ msgid "Warning Job %s is not running.\n" +#~ msgstr "JobId %s n'est pas en cours.\n" + +#, fuzzy +#~ msgid "Unauthorized command from this console for ujobid=%s.\n" +#~ msgstr "Commande interdite depuis cette console.\n" + +#, fuzzy +#~ msgid "Select Job(s):\n" +#~ msgstr "Sélectionnez le Job :\n" + +#~ msgid "JobId=%s Job=%s" +#~ msgstr "JobId=%s Job=%s" + +#, fuzzy +#~ msgid "Choose Job list to %s" +#~ msgstr "Sélectionnez le Job à annuler" + +#, fuzzy +#~ msgid "Invalid argument \"action\".\n" +#~ msgstr "argument invalide" + +#, fuzzy +#~ msgid "No Volumes found to perform the command.\n" +#~ msgstr "Pas de volume à labéliser ou pas de codebar.\n" + +#, fuzzy +#~ msgid "Cannot create UA thread: %s\n" +#~ msgstr "change le répertoire courant" + +#~ msgid "You have messages.\n" +#~ msgstr "Vous avez des messages.\n" + +#, fuzzy +#~ msgid "Connecting to Storage %s at %s:%d\n" +#~ msgstr "Connexion au Storage Daemon %s (%s:%d)\n" + +#, fuzzy +#~ msgid "Failed to connect to Storage.\n" +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#~ msgid "Status available for:\n" +#~ msgstr "Statut disponible pour :\n" + +#~ msgid "Select daemon type for status" +#~ msgstr "Saisissez le composant à afficher" + +#, fuzzy +#~ msgid "%s %sVersion: %s (%s) %s %s %s\n" +#~ msgstr "%s Version : %s (%s) %s %s %s\n" + +#, fuzzy +#~ msgid "Daemon started %s, conf reloaded %s\n" +#~ msgstr "Démon démarré depuis %s, 1 job lancé depuis cette date.\n" + +#, fuzzy +#~ msgid " Jobs: run=%d, running=%d mode=%d,%d\n" +#~ msgstr "Démon démarré depuis %s, %d jobs lancés depuis cette date.\n" + +#, fuzzy +#~ msgid " Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n" +#~ msgstr " Heap: bytes=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n" + +#, fuzzy +#~ msgid "No authorization for Storage \"%s\"\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#~ msgid "" +#~ "\n" +#~ "Failed to connect to Storage daemon %s.\n" +#~ "====\n" +#~ msgstr "" +#~ "\n" +#~ "Impossible de se connecter au Storage Daemon %s.\n" +#~ "====\n" + +#~ msgid "" +#~ "Failed to connect to Client %s.\n" +#~ "====\n" +#~ msgstr "" +#~ "Impossible de se connecter au client %s.\n" +#~ "====\n" + +#~ msgid "Connected to file daemon\n" +#~ msgstr "Connecté avec le File Daemon\n" + +#~ msgid "" +#~ "\n" +#~ "Scheduled Jobs:\n" +#~ msgstr "" +#~ "\n" +#~ "Jobs planifiés :\n" + +#, fuzzy +#~ msgid "" +#~ "Level Type Pri Scheduled Job Name " +#~ "Volume\n" +#~ msgstr "" +#~ "Type Action Pri Planification Nom " +#~ "Volume\n" + +#, fuzzy +#~ msgid "===================================================================================\n" +#~ msgstr "========================================================================\n" + +#, fuzzy +#~ msgid "" +#~ "Level Type Pri Scheduled Job Name " +#~ "Schedule\n" +#~ msgstr "" +#~ "Type Action Pri Planification Nom " +#~ "Volume\n" + +#, fuzzy +#~ msgid "=====================================================================================\n" +#~ msgstr "========================================================================\n" + +#, fuzzy +#~ msgid "%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n" +#~ msgstr "%-14s %-8s %3d %-18s %-18s %s\n" + +#~ msgid "%-14s %-8s %3d %-18s %-18s %s\n" +#~ msgstr "%-14s %-8s %3d %-18s %-18s %s\n" + +#~ msgid "No Scheduled Jobs.\n" +#~ msgstr "Pas de job programmé.\n" + +#~ msgid "" +#~ "\n" +#~ "Running Jobs:\n" +#~ msgstr "" +#~ "\n" +#~ "Job en cours :\n" + +#, fuzzy +#~ msgid "Console connected %sat %s\n" +#~ msgstr "Console connecté à %s\n" + +#, fuzzy +#~ msgid " JobId Type Level Files Bytes Name Status\n" +#~ msgstr " JobId Type Nom Statut\n" + +#~ msgid "======================================================================\n" +#~ msgstr "======================================================================\n" + +#~ msgid "is waiting execution" +#~ msgstr "est en attente d'exécution" + +#~ msgid "is running" +#~ msgstr "est en cours" + +#~ msgid "is blocked" +#~ msgstr "est bloqué" + +#~ msgid "has terminated" +#~ msgstr "est terminé" + +#, fuzzy +#~ msgid "has terminated with warnings" +#~ msgstr "Job terminés :\n" + +#, fuzzy +#~ msgid "has terminated in incomplete state" +#~ msgstr "Job terminés :\n" + +#, fuzzy +#~ msgid "has erred" +#~ msgstr "est en erreur" + +#~ msgid "has errors" +#~ msgstr "est en erreur" + +#~ msgid "has a fatal error" +#~ msgstr "est en erreur (fatale)" + +#, fuzzy +#~ msgid "has verify differences" +#~ msgstr "Vérification des différences" + +#~ msgid "has been canceled" +#~ msgstr "a été annulé" + +#, fuzzy +#~ msgid "is waiting on Client" +#~ msgstr "est en attente du client %s" + +#~ msgid "is waiting on Client %s" +#~ msgstr "est en attente du client %s" + +#, fuzzy +#~ msgid "is waiting on Storage \"%s\"" +#~ msgstr "est en attente du Storage %s" + +#, fuzzy +#~ msgid "is waiting on Storage" +#~ msgstr "est en attente du Storage %s" + +#, fuzzy +#~ msgid "is waiting on max Storage jobs" +#~ msgstr "est en attente du Storage %s" + +#, fuzzy +#~ msgid "is waiting on max Client jobs" +#~ msgstr "est en attente du client %s" + +#, fuzzy +#~ msgid "is waiting on max Job jobs" +#~ msgstr "est en attente du Storage %s" + +#, fuzzy +#~ msgid "is waiting on max total jobs" +#~ msgstr "est en attente du Storage %s" + +#, fuzzy +#~ msgid "is waiting for its start time (%s)" +#~ msgstr "attend son heure de démarrage" + +#~ msgid "is waiting for higher priority jobs to finish" +#~ msgstr "attend qu'un job plus prioritaire se termine" + +#, fuzzy +#~ msgid "is waiting for a Shared Storage device" +#~ msgstr "est en attente du Storage %s" + +#, fuzzy +#~ msgid "SD committing Data" +#~ msgstr "Spooling des données...\n" + +#, fuzzy +#~ msgid "SD despooling Data" +#~ msgstr "Spooling des données...\n" + +#, fuzzy +#~ msgid "SD despooling Attributes" +#~ msgstr "Spooling des données...\n" + +#, fuzzy +#~ msgid "Dir inserting Attributes" +#~ msgstr "Spooling des données...\n" + +#~ msgid "is in unknown state %c" +#~ msgstr "est dans un état inconnu %c" + +#~ msgid "is waiting for a mount request" +#~ msgstr "est en attente d'un montage" + +#~ msgid "is waiting for an appendable Volume" +#~ msgstr "est en attente d'un volume libre" + +#, fuzzy +#~ msgid "is waiting for Client to connect to Storage daemon" +#~ msgstr "attend que le client %s se connecte au Storage %s" + +#~ msgid "is waiting for Client %s to connect to Storage %s" +#~ msgstr "attend que le client %s se connecte au Storage %s" + +#, fuzzy +#~ msgid "%6d\t%-6s\t%-20s\t%s\t%s\n" +#~ msgstr "%6d %-6s %-20s %s\n" + +#, fuzzy +#~ msgid "%6d %-4s %-3s %10s %10s %-17s %s\n" +#~ msgstr "%6d %-6s %8s %10s %-7s %-8s %s\n" + +#~ msgid "" +#~ "No Jobs running.\n" +#~ "====\n" +#~ msgstr "" +#~ "Pas de job en cours.\n" +#~ "====\n" + +#~ msgid "No Terminated Jobs.\n" +#~ msgstr "Pas de job terminé.\n" + +#~ msgid "====================================================================\n" +#~ msgstr "=====================================================================\n" + +#~ msgid "\n" +#~ msgstr "\n" + +#, fuzzy +#~ msgid "add dir/file to be restored recursively, wildcards allowed" +#~ msgstr "" +#~ "marque récursivement les fichiers/répertoires pour être restaurés, les " +#~ "jokers (*) fonctionnent" + +#~ msgid "change current directory" +#~ msgstr "change le répertoire courant" + +#~ msgid "count marked files in and below the cd" +#~ msgstr "compte le nombre de fichiers marqués à partir du répertoire courant" + +#, fuzzy +#~ msgid "delete dir/file to be restored recursively in dir" +#~ msgstr "dé-sélectionne les fichiers/répertoires récursivement" + +#~ msgid "long list current directory, wildcards allowed" +#~ msgstr "liste détaillée du répertoire courant, les jocker (*) fonctionnent" + +#~ msgid "leave file selection mode" +#~ msgstr "sort de la sélection des fichiers" + +#~ msgid "estimate restore size" +#~ msgstr "estime la taille de la restauration" + +#~ msgid "same as done command" +#~ msgstr "synonyme de la commande \"done\"" + +#~ msgid "find files, wildcards allowed" +#~ msgstr "recherche des fichiers, les jokers (*) fonctionnent" + +#~ msgid "print help" +#~ msgstr "affiche l'aide" + +#~ msgid "list current directory, wildcards allowed" +#~ msgstr "" +#~ "affiche le contenu du répertoire courant, les jokers (*) fonctionnent" + +#, fuzzy +#~ msgid "list subdir in current directory, wildcards allowed" +#~ msgstr "" +#~ "affiche le contenu du répertoire courant, les jokers (*) fonctionnent" + +#~ msgid "list the marked files in and below the cd" +#~ msgstr "liste les fichiers marqués à partir du répertoire courant" + +#, fuzzy +#~ msgid "list the marked files in" +#~ msgstr "liste les fichiers marqués à partir du répertoire courant" + +#~ msgid "mark dir/file to be restored recursively, wildcards allowed" +#~ msgstr "" +#~ "marque récursivement les fichiers/répertoires pour être restaurés, les " +#~ "jokers (*) fonctionnent" + +#~ msgid "mark directory name to be restored (no files)" +#~ msgstr "marque un répertoire (seulement) pour la restauration" + +#~ msgid "print current working directory" +#~ msgstr "affiche le répertoire courant" + +#~ msgid "unmark dir/file to be restored recursively in dir" +#~ msgstr "dé-sélectionne les fichiers/répertoires récursivement" + +#~ msgid "unmark directory name only no recursion" +#~ msgstr "dé-sélectionne seulement un répertoire" + +#~ msgid "quit and do not do restore" +#~ msgstr "quitte et annule la restauration" + +#~ msgid "" +#~ "\n" +#~ "You are now entering file selection mode where you add (mark) and\n" +#~ "remove (unmark) files to be restored. No files are initially added, " +#~ "unless\n" +#~ "you used the \"all\" keyword on the command line.\n" +#~ "Enter \"done\" to leave this mode.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Vous êtes dans le mode de sélection des fichiers ou vous devez ajouter " +#~ "(mark)\n" +#~ "et supprimez (unmark) les fichiers à restaurer. Aucun fichier n'est " +#~ "sélectionné\n" +#~ "par défaut sauf si vous avez utilisé l'option \"all\" dans la commande.\n" +#~ "Terminez votre saisie par \"done\"\n" +#~ "\n" + +#~ msgid "cwd is: %s\n" +#~ msgstr "Le répertoire courant est : %s\n" + +#, fuzzy +#~ msgid "Invalid command \"%s\". Enter \"done\" to exit.\n" +#~ msgstr "Commande invalide. Tapez \"done\" pour quitter.\n" + +#~ msgid "No files marked.\n" +#~ msgstr "Aucun fichier sélectionné.\n" + +#~ msgid "1 file marked.\n" +#~ msgstr "1 fichier sélectionné.\n" + +#~ msgid "%s files marked.\n" +#~ msgstr "%s fichiers sélectionnés.\n" + +#~ msgid "No directories marked.\n" +#~ msgstr "Pas de répertoire sélectionné.\n" + +#~ msgid "1 directory marked.\n" +#~ msgstr "1 répertoire sélectionné.\n" + +#~ msgid "%s directories marked.\n" +#~ msgstr "%s répertoires sélectionnés.\n" + +#~ msgid "%s total files/dirs. %s marked to be restored.\n" +#~ msgstr "" +#~ "%s fichiers/répertoires au total. %s sélectionné pour la restauration.\n" + +#, fuzzy +#~ msgid "No file specification given.\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#~ msgid "Node %s has no children.\n" +#~ msgstr "Le noeud %s n'a pas de fils.\n" + +#~ msgid "%d total files; %d marked to be restored; %s bytes.\n" +#~ msgstr "" +#~ "%d fichiers en tout ; %d marqués pour la restauration ; %s octets.\n" + +#~ msgid "" +#~ " Command Description\n" +#~ " ======= ===========\n" +#~ msgstr "" +#~ " Commande Description\n" +#~ " ======== ===========\n" + +#~ msgid "Too few or too many arguments. Try using double quotes.\n" +#~ msgstr "" +#~ "Trop ou pas assez d'arguments sur la commande. Essayez d'utiliser des " +#~ "\"\"\"\n" + +#, fuzzy +#~ msgid "Invalid path given.\n" +#~ msgstr "Période invalide.\n" + +#~ msgid "No files unmarked.\n" +#~ msgstr "Pas de fichier dé-sélectionné.\n" + +#~ msgid "1 file unmarked.\n" +#~ msgstr "1 fichier dé-sélectionné.\n" + +#~ msgid "%s files unmarked.\n" +#~ msgstr "%s fichiers dé-sélectionnés.\n" + +#~ msgid "No directories unmarked.\n" +#~ msgstr "Pas de répertoire dé-sélectionné\n" + +#~ msgid "1 directory unmarked.\n" +#~ msgstr "1 répertoire dé-sélectionné\n" + +#~ msgid "%d directories unmarked.\n" +#~ msgstr "%d répertoires dé-sélectionnés.\n" + +#~ msgid "Update choice:\n" +#~ msgstr "Elément à mettre à jour :\n" + +#~ msgid "Volume parameters" +#~ msgstr "Paramètres d'un volume" + +#~ msgid "Pool from resource" +#~ msgstr "Pool à partir de sa définition" + +#~ msgid "Slots from autochanger" +#~ msgstr "Slots d'un autochangeur" + +#, fuzzy +#~ msgid "Long term statistics" +#~ msgstr "Spooling des données...\n" + +#, fuzzy +#~ msgid "Snapshot parameters" +#~ msgstr "Paramètres d'un volume" + +#~ msgid "item" +#~ msgstr "item" + +#~ msgid "Choose catalog item to update" +#~ msgstr "Choisissez l'élément à mettre à jour" + +#, fuzzy +#~ msgid "Invalid VolStatus specified: %s\n" +#~ msgstr "Durée d'utilisation invalide : %s\n" + +#~ msgid "New Volume status is: %s\n" +#~ msgstr "Le statut du volume est : %s\n" + +#, fuzzy +#~ msgid "Invalid cache retention period specified: %s\n" +#~ msgstr "Période de rétention invalide : %s\n" + +#, fuzzy +#~ msgid "New Cache Retention period is: %s\n" +#~ msgstr "La nouvelle période de rétention est : %s\n" + +#~ msgid "Invalid use duration specified: %s\n" +#~ msgstr "Durée d'utilisation invalide : %s\n" + +#~ msgid "New use duration is: %s\n" +#~ msgstr "La nouvelle durée d'utilisation est : %s\n" + +#, fuzzy +#~ msgid "New max jobs is: %s\n" +#~ msgstr "Le nombre maximum de Job actuel est : %u\n" + +#, fuzzy +#~ msgid "New max files is: %s\n" +#~ msgstr "Le nombre maximum de fichier actuel est : %u\n" + +#, fuzzy +#~ msgid "Invalid max. bytes specification: %s\n" +#~ msgstr "Option d'écrasement (Replace) invalide : %s\n" + +#, fuzzy +#~ msgid "New Max bytes is: %s\n" +#~ msgstr "Le nouveau flag Enabled est : %d\n" + +#~ msgid "Invalid value. It must be yes or no.\n" +#~ msgstr "Saisie invalide. Veuillez répondre oui ou non.\n" + +#, fuzzy +#~ msgid "New Recycle flag is: %s\n" +#~ msgstr "Le nouveau RecyclePool est : %s\n" + +#, fuzzy +#~ msgid "New InChanger flag is: %s\n" +#~ msgstr "Le nouveau flag InChanger est : %d\n" + +#~ msgid "Invalid slot, it must be between 0 and MaxVols=%d\n" +#~ msgstr "Slot invalide, il doit être compris entre 0 et MaxVols=%d\n" + +#, fuzzy +#~ msgid "Error updating media record Slot: ERR=%s" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#~ msgid "New Slot is: %d\n" +#~ msgstr "Le nouveau slot est : %d\n" + +#~ msgid "New Pool is: %s\n" +#~ msgstr "Le nouveau pool est : %s\n" + +#~ msgid "New RecyclePool is: %s\n" +#~ msgstr "Le nouveau RecyclePool est : %s\n" + +#, fuzzy +#~ msgid "Error updating Volume record: ERR=%s" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#, fuzzy +#~ msgid "Error updating Volume records: ERR=%s" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#, fuzzy +#~ msgid "Error updating media record Enabled: ERR=%s" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#, fuzzy +#~ msgid "New Enabled is: %d\n" +#~ msgstr "Le nouveau flag Enabled est : %d\n" + +#, fuzzy +#~ msgid "Error updating media record ActionOnPurge: ERR=%s" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#, fuzzy +#~ msgid "New ActionOnPurge is: %s\n" +#~ msgstr "La nouvelle durée d'utilisation est : %s\n" + +#~ msgid "Volume Status" +#~ msgstr "Statut d'un volume" + +#~ msgid "Volume Retention Period" +#~ msgstr "Période de rétention d'un volume" + +#~ msgid "Volume Use Duration" +#~ msgstr "Durée d'utilisation d'un volume" + +#~ msgid "Maximum Volume Jobs" +#~ msgstr "Nombre maximum de job sur un volume" + +#~ msgid "Maximum Volume Files" +#~ msgstr "Nombre maximum de fichier sur un volume" + +#~ msgid "Maximum Volume Bytes" +#~ msgstr "Taille maximum d'un volume" + +#~ msgid "Recycle Flag" +#~ msgstr "Flag de recyclage" + +#~ msgid "InChanger Flag" +#~ msgstr "Flag InChanger" + +#~ msgid "Volume Files" +#~ msgstr "Fichiers du Volume" + +#, fuzzy +#~ msgid "Volume from Pool" +#~ msgstr "Nom de Volume trop long.\n" + +#, fuzzy +#~ msgid "Enabled" +#~ msgstr "n'est pas activé (Enabled)" + +#~ msgid "RecyclePool" +#~ msgstr "RecyclePool" + +#, fuzzy +#~ msgid "Action On Purge" +#~ msgstr "La nouvelle durée d'utilisation est : %s\n" + +#, fuzzy +#~ msgid "Cache Retention" +#~ msgstr "Période de rétention d'un volume" + +#~ msgid "Updating Volume \"%s\"\n" +#~ msgstr "Mise à jour du Volume \"%s\"\n" + +#~ msgid "Current Volume status is: %s\n" +#~ msgstr "Le statut actuel du volume (Volume status) est : %s\n" + +#~ msgid "Possible Values are:\n" +#~ msgstr "Les valeurs possibles sont :\n" + +#~ msgid "Choose new Volume Status" +#~ msgstr "Saisissez le nouveau statut du volume (Volume Status)" + +#~ msgid "Enter Volume Retention period: " +#~ msgstr "Saisissez la période de rétention du volume : " + +#~ msgid "Current use duration is: %s\n" +#~ msgstr "La durée d'utilisation actuelle est : %s\n" + +#~ msgid "Enter Volume Use Duration: " +#~ msgstr "Saisissez la durée d'utilisation du volume : " + +#~ msgid "Current max jobs is: %u\n" +#~ msgstr "Le nombre maximum de Job actuel est : %u\n" + +#~ msgid "Enter new Maximum Jobs: " +#~ msgstr "Saisissez la valeur du nombre maximum de Job : " + +#~ msgid "Current max files is: %u\n" +#~ msgstr "Le nombre maximum de fichier actuel est : %u\n" + +#~ msgid "Enter new Maximum Files: " +#~ msgstr "Saisissez la valeur du nombre maximum de fichier (Maximum Files) : " + +#~ msgid "Current value is: %s\n" +#~ msgstr "La valeur actuelle est : %s\n" + +#~ msgid "Enter new Maximum Bytes: " +#~ msgstr "Saisissez la nouvelle taille maximum (octets) : " + +#~ msgid "Current recycle flag is: %s\n" +#~ msgstr "Le flag de recyclage courant est : %s\n" + +#, fuzzy +#~ msgid "Enter new Recycle status: " +#~ msgstr "Saisissez le nouveau RecyclePool : " + +#~ msgid "Current Slot is: %d\n" +#~ msgstr "Le slot courant est : %d\n" + +#~ msgid "Enter new Slot: " +#~ msgstr "Saisissez le nouveau slot : " + +#~ msgid "Current InChanger flag is: %d\n" +#~ msgstr "Le flag InChanger courant est : %d\n" + +#, fuzzy +#~ msgid "Set InChanger flag for Volume \"%s\": yes/no: " +#~ msgstr "Positionner le flag InChanger ? oui/non : " + +#~ msgid "New InChanger flag is: %d\n" +#~ msgstr "Le nouveau flag InChanger est : %d\n" + +#~ msgid "" +#~ "Warning changing Volume Files can result\n" +#~ "in loss of data on your Volume\n" +#~ "\n" +#~ msgstr "" +#~ "Attention, changer le nombre de fichier du Volume peut\n" +#~ "vous faire perdre des données du Volume\n" +#~ "\n" + +#~ msgid "Current Volume Files is: %u\n" +#~ msgstr "Le nombre courant de fichier sur le Volume est : %u\n" + +#~ msgid "Enter new number of Files for Volume: " +#~ msgstr "Saisissez le nouveau nombre de fichiers du Volume : " + +#~ msgid "Normally, you should only increase Volume Files by one!\n" +#~ msgstr "" +#~ "Logiquement, vous devez augmenter le nombre de fichier du Volume d'un !\n" + +#, fuzzy +#~ msgid "Increase Volume Files? (yes/no): " +#~ msgstr "Le nouveau nombre de fichier du Volume est : %u\n" + +#~ msgid "New Volume Files is: %u\n" +#~ msgstr "Le nouveau nombre de fichier du Volume est : %u\n" + +#~ msgid "Current Pool is: %s\n" +#~ msgstr "Le pool courant est : %s\n" + +#~ msgid "Enter new Pool name: " +#~ msgstr "Saisissez le nouveau nom pour ce pool : " + +#~ msgid "Current Enabled is: %d\n" +#~ msgstr "La valeur actuelle de Enabled est : %d\n" + +#~ msgid "Enter new Enabled: " +#~ msgstr "Saisissez la nouvelle valeur pour Enabled : " + +#~ msgid "Current RecyclePool is: %s\n" +#~ msgstr "Le RecyclePool courant est : %s\n" + +#~ msgid "No current RecyclePool\n" +#~ msgstr "Pas de RecyclePool courant\n" + +#, fuzzy +#~ msgid "Current ActionOnPurge is: %s\n" +#~ msgstr "La valeur actuelle est : %s\n" + +#, fuzzy +#~ msgid "Current Cache Retention period is: %s\n" +#~ msgstr "La période de rétention actuelle est : %s\n" + +#, fuzzy +#~ msgid "Enter Cache Retention period: " +#~ msgstr "Saisissez une nouvelle période de rétention : " + +#~ msgid "db_update_pool_record returned %d. ERR=%s\n" +#~ msgstr "db_update_pool_record a retourné %d. ERR=%s\n" + +#~ msgid "Pool DB record updated from resource.\n" +#~ msgstr "" +#~ "Les paramètres du Pool en base ont été mis à jour depuis la " +#~ "configuration.\n" + +#, fuzzy +#~ msgid "Expect JobId keyword, not found.\n" +#~ msgstr "%s ressource %s introuvable.\n" + +#, fuzzy +#~ msgid "Update failed. Job not authorized on this console\n" +#~ msgstr "Commande interdite depuis cette console.\n" + +#, fuzzy +#~ msgid "Neither Client, StartTime or Priority specified.\n" +#~ msgstr "Pas de storage sélectionné.\n" + +#, fuzzy +#~ msgid "Job not found.\n" +#~ msgstr "Job %s non trouvé\n" + +#, fuzzy +#~ msgid "Start Virtual Backup JobId %s, Job=%s\n" +#~ msgstr "Démarrage du backup JobId %s, Job=%s\n" + +#, fuzzy +#~ msgid "No valid Jobs found from user selection.\n" +#~ msgstr "Pas de job trouvé pour : %s.\n" + +#, fuzzy +#~ msgid "Using user supplied JobIds=%s\n" +#~ msgstr "Migration utilisant JobId=%s Job=%s\n" + +#, fuzzy +#~ msgid "No previous Jobs found.\n" +#~ msgstr "Aucun Job trouvé pour la migration.\n" + +#, fuzzy +#~ msgid "Error getting Job record for previous Job: ERR=%s" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#~ msgid "Backup OK -- with warnings" +#~ msgstr "Backup OK -- avec des erreurs" + +#, fuzzy +#~ msgid "" +#~ "%s %s %s (%s):\n" +#~ " Build OS: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " Backup Level: Virtual Full\n" +#~ " Client: \"%s\" %s\n" +#~ " FileSet: \"%s\" %s\n" +#~ " Pool: \"%s\" (From %s)\n" +#~ " Catalog: \"%s\" (From %s)\n" +#~ " Storage: \"%s\" (From %s)\n" +#~ " Scheduled time: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Elapsed time: %s\n" +#~ " Priority: %d\n" +#~ " SD Files Written: %s\n" +#~ " SD Bytes Written: %s (%sB)\n" +#~ " Rate: %.1f KB/s\n" +#~ " Volume name(s): %s\n" +#~ " Volume Session Id: %d\n" +#~ " Volume Session Time: %d\n" +#~ " Last Volume Bytes: %s (%sB)\n" +#~ " SD Errors: %d\n" +#~ " SD termination status: %s\n" +#~ " Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "Bacula %s %s (%s): %s\n" +#~ " Build OS : %s %s %s\n" +#~ " JobId : %d\n" +#~ " Job : %s\n" +#~ " Niveau de backup : %s%s\n" +#~ " Client : \"%s\" %s\n" +#~ " FileSet : \"%s\" %s\n" +#~ " Pool : \"%s\" (Depuis %s)\n" +#~ " Storage : \"%s\" (Depuis %s)\n" +#~ " Date prévue : %s\n" +#~ " Date de début : %s\n" +#~ " Date de fin : %s\n" +#~ " Temps écoulé : %s\n" +#~ " Priorité : %d\n" +#~ " Fichiers écrits FD : %s\n" +#~ " Fichiers écrits SD : %s\n" +#~ " Octets écrits FD : %s (%so)\n" +#~ " Octets écrits SD : %s (%so)\n" +#~ " Débit : %.1f Ko/s\n" +#~ " Compression logicielle : %s\n" +#~ " Nom des Volumes : %s\n" +#~ " Volume Session Id : %d\n" +#~ " Volume Session date : %d\n" +#~ " Taille du volume : %s (%so)\n" +#~ " Erreurs FD non fatales : %d\n" +#~ " Erreurs du SD : %d\n" +#~ " Statut de fin du FD : %s\n" +#~ " Statut de fin du SD : %s\n" +#~ " Statut de fin : %s\n" + +#~ msgid "" +#~ "Unable to find JobId of previous InitCatalog Job.\n" +#~ "Please run a Verify with Level=InitCatalog before\n" +#~ "running the current Job.\n" +#~ msgstr "" +#~ "Impossible de trouvé JobId d'un précédent Job \"InitCatalog.\n" +#~ "Il faut lancer un Job Verify avec l'option Level=InitCatalog avant\n" +#~ "de lancer le Job courant.\n" + +#~ msgid "Unable to find JobId of previous Job for this client.\n" +#~ msgstr "Impossible de trouver JobId d'un précédent Job pour ce client.\n" + +#, fuzzy +#~ msgid "Could not get job record for previous Job. ERR=%s" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#~ msgid "Last Job %d did not terminate normally. JobStatus=%c\n" +#~ msgstr "Le dernier job %d ne s'est pas terminé correctement. JobStatus=%c\n" + +#, fuzzy +#~ msgid "Verifying against JobId=%d Job=%s\n" +#~ msgstr "Migration utilisant JobId=%s Job=%s\n" + +#, fuzzy +#~ msgid "Could not get fileset record from previous Job. ERR=%s" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find FileSet resource \"%s\" from previous Job\n" +#~ msgstr "Impossible de trouver le prochain volume pour le Job %s.\n" + +#, fuzzy +#~ msgid "Using FileSet \"%\"\n" +#~ msgstr "Utilisation du Catalogue \"%s\"\n" + +#, fuzzy +#~ msgid "Could not get FileSet resource for verify Job." +#~ msgstr "Impossible de trouver la ressource Storage \"%s\"\n" + +#, fuzzy +#~ msgid "Start Verify JobId=%s Level=%s Job=%s\n" +#~ msgstr "Démarrage du backup JobId %s, Job=%s\n" + +#, fuzzy +#~ msgid "Verify OK -- with warnings" +#~ msgstr "OK -- avec des avertissements" + +#~ msgid "Verify OK" +#~ msgstr "Vérification OK" + +#~ msgid "*** Verify Error ***" +#~ msgstr "*** Erreur de Vérification ***" + +#, fuzzy +#~ msgid "Verify warnings" +#~ msgstr "Vérification" + +#~ msgid "Verify Canceled" +#~ msgstr "Vérification annulée" + +#, fuzzy +#~ msgid "Verify Differences" +#~ msgstr "Vérification des différences" + +#, fuzzy +#~ msgid "" +#~ "%s %s %s (%s):\n" +#~ " Build OS: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " FileSet: %s\n" +#~ " Verify Level: %s\n" +#~ " Client: %s\n" +#~ " Verify JobId: %d\n" +#~ " Verify Job: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Elapsed time: %s\n" +#~ " Accurate: %s\n" +#~ " Files Expected: %s\n" +#~ " Files Examined: %s\n" +#~ " Non-fatal FD errors: %d\n" +#~ " SD Errors: %d\n" +#~ " FD termination status: %s\n" +#~ " SD termination status: %s\n" +#~ " Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s %s (%s): %s\n" +#~ " Build OS: %s %s %s\n" +#~ " JobId : %d\n" +#~ " Job : %s\n" +#~ " FileSet: %s\n" +#~ " Client : %s\n" +#~ " Début : %s\n" +#~ " Fin : %s\n" +#~ " Fichiers attendus : %s\n" +#~ " Fichiers restaurés : %s\n" +#~ " Octets restaurés : %s\n" +#~ " Débit : %.1f Ko/s\n" +#~ " Erreurs du FD : %d\n" +#~ " Statut de fin du FD : %s\n" +#~ " Statut de fin du SD : %s\n" +#~ " Etat : %s\n" + +#, fuzzy +#~ msgid "" +#~ "%s %s %s (%s):\n" +#~ " Build: %s %s %s\n" +#~ " JobId: %d\n" +#~ " Job: %s\n" +#~ " FileSet: %s\n" +#~ " Verify Level: %s\n" +#~ " Client: %s\n" +#~ " Verify JobId: %d\n" +#~ " Verify Job: %s\n" +#~ " Start time: %s\n" +#~ " End time: %s\n" +#~ " Elapsed time: %s\n" +#~ " Files Examined: %s\n" +#~ " Non-fatal FD errors: %d\n" +#~ " FD termination status: %s\n" +#~ " Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s (%s) : %s\n" +#~ " Build: %s %s %s\\n\"\n" +#~ " JobId : %d\n" +#~ " Job : %s\n" +#~ " FileSet : %s\n" +#~ " Client : %s\n" +#~ " Début : %s\n" +#~ " Fin : %s\n" +#~ " Fichiers attendus : %s\n" +#~ " Fichiers restaurés : %s\n" +#~ " Octets restaurés : %s\n" +#~ " Débit : %.1f Ko/s\n" +#~ " Erreurs du FD : %d\n" +#~ " Statut de fin du FD : %s\n" +#~ " Statut de fin du SD : %s\n" +#~ " Etat : %s\n" + +#~ msgid "New file: %s\n" +#~ msgstr "Nouveau Fichier : %s\n" + +#~ msgid "File not in catalog: %s\n" +#~ msgstr "Fichier absent du catalogue : %s\n" + +#, fuzzy +#~ msgid "The following files are in the Catalog but not on %s:\n" +#~ msgstr "" +#~ "Les fichiers suivants sont dans le catalogue mais absents du disque :\n" + +#~ msgid "File: %s\n" +#~ msgstr "Fichier : %s\n" + +#, fuzzy +#~ msgid "The following files were in the Catalog, but not in the Job data:\n" +#~ msgstr "" +#~ "Les fichiers suivants sont dans le catalogue mais absents du disque :\n" + +#, fuzzy +#~ msgid " %s\n" +#~ msgstr " --> Target=%s\n" + +#, fuzzy +#~ msgid "Cannot verify checksum for %s\n" +#~ msgstr "Impossible de trouver la ressource Schedule \"%s\"\n" + +#, fuzzy +#~ msgid "%s digest initialization failed\n" +#~ msgstr "Initialisation de la connexion TLS échouée.\n" + +#, fuzzy +#~ msgid "2991 Bad accurate command\n" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#~ msgid "Incorrect password given by Director at %s.\n" +#~ msgstr "Password incorrect donné par le Director à %s.\n" + +#, fuzzy +#~ msgid "Had %ld acl errors while doing backup\n" +#~ msgstr "Saisir la liste des fichiers à restaurer" + +#, fuzzy +#~ msgid "Had %ld xattr errors while doing backup\n" +#~ msgstr "Saisir la liste des fichiers à restaurer" + +#~ msgid " Could not access \"%s\": ERR=%s\n" +#~ msgstr " Impossible d'acceder à \"%s\" : ERR=%s\n" + +#~ msgid " Could not follow link \"%s\": ERR=%s\n" +#~ msgstr " Impossible de suivre le lien \"%s\" : ERR=%s\n" + +#~ msgid " Could not stat \"%s\": ERR=%s\n" +#~ msgstr " Impossible d'acceder à \"%s\" : ERR=%s\\n\n" + +#, fuzzy +#~ msgid " Unchanged file skipped: %s\n" +#~ msgstr " Type de fichier inconnu %d ; non sauvé : %s\n" + +#, fuzzy +#~ msgid " Archive file not saved: %s\n" +#~ msgstr " Type de fichier inconnu %d ; non sauvé : %s\n" + +#~ msgid " Could not open directory \"%s\": ERR=%s\n" +#~ msgstr " Impossible d'ouvrir le répertoire \"%s\" : ERR=%s\n" + +#~ msgid " Unknown file type %d; not saved: %s\n" +#~ msgstr " Type de fichier inconnu %d ; non sauvé : %s\n" + +#~ msgid " Cannot open \"%s\": ERR=%s.\n" +#~ msgstr " Impossible d'ouvrir \"%s\" : ERR=%s.\n" + +#, fuzzy +#~ msgid "Network send error to SD. ERR=%s\n" +#~ msgstr "Erreur dans l'exécution de la commande : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Read error on file %s. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Encryption error\n" +#~ msgstr "Erreur inconnue." + +#, fuzzy +#~ msgid "Network send error to SD. Data=%s ERR=%s\n" +#~ msgstr "Erreur dans l'exécution de la commande : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Compression deflateParams error: %d\n" +#~ msgstr "Erreur de décompression. ERR=%d\n" + +#, fuzzy +#~ msgid " Cannot open resource fork for \"%s\": ERR=%s.\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Compression deflate error: %d\n" +#~ msgstr "Erreur de décompression. ERR=%d\n" + +#, fuzzy +#~ msgid "Compression deflateReset error: %d\n" +#~ msgstr "Erreur de décompression. ERR=%d\n" + +#, fuzzy +#~ msgid "Compression LZO error: %d\n" +#~ msgstr "Erreur de décompression. ERR=%d\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bfdjson [options] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -t test configuration file and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -d positionne le niveau de debug à nn\n" +#~ " -dt affiche un timestamp sur chaque ligne de debug\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "No File daemon resource defined in %s\n" +#~ "Without that I don't know who I am :-(\n" +#~ msgstr "" +#~ "Pas de director défini pour %s\n" +#~ "Sans cette définition, il n'est pas possible de se connecter à celui-ci.\n" + +#, fuzzy +#~ msgid "Only one Client resource permitted in %s\n" +#~ msgstr "Impossible de trouver la ressource Client \"%s\"\n" + +#, fuzzy +#~ msgid "No Director resource defined in %s\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Failed to initialize encryption context.\n" +#~ msgstr "Impossible d'initialiser le contexte TLS pour la Console \"%s\".\n" + +#, fuzzy +#~ msgid "%s signature digest initialization failed\n" +#~ msgstr "Initialisation de la connexion TLS échouée.\n" + +#, fuzzy +#~ msgid "Unsupported cipher on this system.\n" +#~ msgstr "Le cipher spécifié est non supporté\n" + +#, fuzzy +#~ msgid "An error occurred while encrypting the stream.\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "Failed to allocate memory for crypto signature.\n" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "An error occurred while adding signer the stream.\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "An error occurred while signing the stream.\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "An error occurred finalizing signing the stream.\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "Plugin save packet not found.\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#~ msgid "Plugin=%s not found.\n" +#~ msgstr "Plugin=%s non trouvé.\n" + +#, fuzzy +#~ msgid "Could not create %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Error while creating command string %s.\n" +#~ msgstr "Erreur pendant l'initialisation du contexte SSL" + +#, fuzzy +#~ msgid "Error while executing \"%s\" %s. %s %s\n" +#~ msgstr "Entrez le nombre de départ : " + +#, fuzzy +#~ msgid "Unable to parse snapshot command output\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Unable to create snapshot record. ERR=%s\n" +#~ msgstr "Impossible de récupérer le Pool depuis le catalogue : ERR=%s" + +#, fuzzy +#~ msgid "Unable to create snapshot record, got %s\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to delete snapshot record. ERR=%s\n" +#~ msgstr "Impossible de récupérer le Pool depuis le catalogue : ERR=%s" + +#, fuzzy +#~ msgid "Unable to delete snapshot record, got %s\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get snapshot record. ERR=%s\n" +#~ msgstr "Impossible de récupérer le Pool depuis le catalogue : ERR=%s" + +#, fuzzy +#~ msgid "Unable to get snapshot record, got %s\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to parse command output\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid " Delete Snapshot for %s\n" +#~ msgstr "Erreur durant la création des snapshots VSS.\n" + +#, fuzzy +#~ msgid " Unable to delete snapshot of %s ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid " Create Snapshot for %s\n" +#~ msgstr "Erreur durant la création des snapshots VSS.\n" + +#, fuzzy +#~ msgid " Unable to create snapshot of %s ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +#~ " -c use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -k keep readall capabilities\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test configuration file and exit\n" +#~ " -T set trace on\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -d positionne le niveau de debug à nn\n" +#~ " -dt affiche un timestamp sur chaque ligne de debug\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Disable Command \"%s\" not found.\n" +#~ msgstr "le client \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "Failed to allocate a new keypair object.\n" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Failed to load public certificate for File daemon \"%s\" in %s.\n" +#~ msgstr "Impossible de se connecter au client.\n" + +#, fuzzy +#~ msgid "Failed to load private key for File daemon \"%s\" in %s.\n" +#~ msgstr "Impossible de se connecter au client.\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for Console \"%s\" in %s.\n" +#~ msgstr "Impossible d'initialiser le contexte TLS pour la Console \"%s\".\n" + +#, fuzzy +#~ msgid "Expected a Cipher Type keyword, got: %s" +#~ msgstr "Attendait le mot clef FileSet, eu : %s" + +#, fuzzy +#~ msgid "Cannot find any Console resource for remote access\n" +#~ msgstr "Impossible de trouver la ressource Console \"%s\"\n" + +#, fuzzy +#~ msgid "Bad Hello command from Director at %s. Len=%d.\n" +#~ msgstr "Connexion invalide. Len=%d\n" + +#, fuzzy +#~ msgid "Bad Hello command from Director at %s: %s\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#~ msgid "Connection from unknown Director %s at %s rejected.\n" +#~ msgstr "" +#~ "Connexion d'un Director inconnu %s à %s rejeté.\n" +#~ "\n" + +#, fuzzy +#~ msgid "SD connect failed: Bad Hello command\n" +#~ msgstr "Le director a rejeté la commande Hello\n" + +#, fuzzy +#~ msgid "SD connect failed: Job name not found: %s\n" +#~ msgstr "Job non trouvé : %s\n" + +#, fuzzy +#~ msgid "SD \"%s\" tried to connect two times.\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Command: \"%s\" is disabled.\n" +#~ msgstr "Commande annulée.\n" + +#, fuzzy +#~ msgid "Bad command from %s. Len=%d.\n" +#~ msgstr "Connexion invalide. Len=%d\n" + +#~ msgid "2902 Error scanning cancel command.\n" +#~ msgstr "2902 Erreur dans le décodage de la commande d'annulation.\n" + +#~ msgid "2901 Job %s not found.\n" +#~ msgstr "2901 Le job %s est introuvable.\n" + +#, fuzzy +#~ msgid "2001 Job \"%s\" marked to be %s.\n" +#~ msgstr "2001 Le job %s va être annulé.\n" + +#, fuzzy +#~ msgid "2991 Bad setbandwidth command: %s\n" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#~ msgid "2991 Bad setdebug command: %s\n" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#, fuzzy +#~ msgid "Bad estimate command: %s" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "2992 Bad estimate command.\n" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#, fuzzy +#~ msgid "Bad Job Command: %s" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "Bad RunBeforeJob command: %s\n" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "2905 Bad RunBeforeJob command.\n" +#~ msgstr "2905 Erreur sur la commande RunScript.\n" + +#, fuzzy +#~ msgid "2905 Bad RunBeforeNow command.\n" +#~ msgstr "2905 Erreur sur la commande RunScript.\n" + +#, fuzzy +#~ msgid "Bad RunAfter command: %s\n" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "2905 Bad RunAfterJob command.\n" +#~ msgstr "2905 Erreur sur la commande RunScript.\n" + +#~ msgid "Bad RunScript command: %s\n" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#~ msgid "2905 Bad RunScript command.\n" +#~ msgstr "2905 Erreur sur la commande RunScript.\n" + +#, fuzzy +#~ msgid "Bad RestoreObject command: %s\n" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "2909 Bad RestoreObject command.\n" +#~ msgstr "2905 Erreur sur la commande RunScript.\n" + +#~ msgid "Error running program: %s. stat=%d: ERR=%s\n" +#~ msgstr "Erreur dans l'exécution de la commande : %s. stat=%d: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open FileSet input file: %s. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier inclus : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "REGEX %s compile error. ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Invalid FileSet command: %s\n" +#~ msgstr "Erreur sur la commande : %s\n" + +#, fuzzy +#~ msgid "" +#~ "DIR and FD clocks differ by %lld seconds, FD automatically compensating.\n" +#~ msgstr "" +#~ "L'horloge du client et du director ont %d secondes d'écart, le client " +#~ "s'est ajusté automatiquement.\n" + +#, fuzzy +#~ msgid "Unknown backup level: %s\n" +#~ msgstr "Niveau de job inconnu %d\n" + +#, fuzzy +#~ msgid "Bad level command: %s\n" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "Bad session command: %s" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "Bad storage command: %s" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#, fuzzy +#~ msgid "Failed connect from Storage daemon. SD bsock=NULL.\n" +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#, fuzzy +#~ msgid "ACL support not configured for Client.\n" +#~ msgstr "TLS actif mais non configuré.\n" + +#~ msgid "Cannot contact Storage daemon\n" +#~ msgstr "Impossible de se connecter au démon Storage\n" + +#, fuzzy +#~ msgid "Bad response to append open: %s\n" +#~ msgstr "Mauvaise réponse à la commande Hello : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad response from stored to open command\n" +#~ msgstr "Mauvaise réponse à la commande Hello : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status %d %c returned from Storage Daemon.\n" +#~ msgstr "Impossible de récupérer le statut du Job depuis le FD.\n" + +#, fuzzy +#~ msgid "2994 Bad verify command: %s\n" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#, fuzzy +#~ msgid "Bad replace command. CMD=%s\n" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "Bad response to SD read open: %s\n" +#~ msgstr "Mauvaise réponse à la commande Hello : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad response from stored to read open command\n" +#~ msgstr "" +#~ "Mauvaise réponse du File Daemon \"%s:%d\" à la commande Hello : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Bad response from SD to %s command. Wanted %s, got len=%ld msg=\"%s\"\n" +#~ msgstr "Mauvaise réponse à la commande %s : voulait %s, pas %s\n" + +#, fuzzy +#~ msgid "Bad response from SD to %s command. Wanted %s, got SIGNAL %s\n" +#~ msgstr "Mauvaise réponse à la commande %s : voulait %s, pas %s\n" + +#, fuzzy +#~ msgid "Error setting Finder Info on \"%s\"\n" +#~ msgstr "Impossible d'ouvrir le device %s\n" + +#, fuzzy +#~ msgid "LZO init failed\n" +#~ msgstr "Impossible de Rembobiner.\n" + +#, fuzzy +#~ msgid "Record header scan error: %s\n" +#~ msgstr "erreur sockopt : %s\n" + +#, fuzzy +#~ msgid "Data record error. ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create digest.\n" +#~ msgstr "Impossible de créer la structure BSOCK cliente.\n" + +#~ msgid "Decrypt of the session key failed.\n" +#~ msgstr "Impossible de décrypter la clef de session.\n" + +#, fuzzy +#~ msgid "Signer not found. Decryption failed.\n" +#~ msgstr "La création de la signature a échouée" + +#, fuzzy +#~ msgid "Unsupported digest algorithm. Decrypt failed.\n" +#~ msgstr "Le digest spécifié n'est pas supporté : %d\n" + +#, fuzzy +#~ msgid "Unsupported encryption algorithm. Decrypt failed.\n" +#~ msgstr "contentEncryptionAlgorithm non supporté : %d\n" + +#, fuzzy +#~ msgid "" +#~ "An error=%d occurred while decoding encrypted session data stream: ERR=" +#~ "%s\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "Missing encryption session data stream for %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "Failed to initialize decryption context for %s\n" +#~ msgstr "Impossible d'initialiser le contexte TLS pour la Console \"%s\".\n" + +#, fuzzy +#~ msgid "Cannot open resource fork for %s.\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to decode message signature for %s\n" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#, fuzzy +#~ msgid "Encountered %ld acl errors while doing restore\n" +#~ msgstr "Saisir la liste des fichiers à restaurer" + +#, fuzzy +#~ msgid "Encountered %ld xattr errors while doing restore\n" +#~ msgstr "Saisir la liste des fichiers à restaurer" + +#, fuzzy +#~ msgid "" +#~ "%d non-supported data streams and %d non-supported attrib streams " +#~ "ignored.\n" +#~ msgstr "contentEncryptionAlgorithm non supporté : %d\n" + +#, fuzzy +#~ msgid "%d non-supported resource fork streams ignored.\n" +#~ msgstr "contentEncryptionAlgorithm non supporté : %d\n" + +#, fuzzy +#~ msgid "%d non-supported Finder Info streams ignored.\n" +#~ msgstr "contentEncryptionAlgorithm non supporté : %d\n" + +#, fuzzy +#~ msgid "%d non-supported acl streams ignored.\n" +#~ msgstr "contentEncryptionAlgorithm non supporté : %d\n" + +#, fuzzy +#~ msgid "%d non-supported crypto streams ignored.\n" +#~ msgstr "contentEncryptionAlgorithm non supporté : %d\n" + +#, fuzzy +#~ msgid "%d non-supported xattr streams ignored.\n" +#~ msgstr "contentEncryptionAlgorithm non supporté : %d\n" + +#, fuzzy +#~ msgid "Zlib errno" +#~ msgstr "Mauvais errno" + +#, fuzzy +#~ msgid "Zlib data error" +#~ msgstr "Erreur fatale" + +#, fuzzy +#~ msgid "*none*" +#~ msgstr "none" + +#, fuzzy +#~ msgid "Seek to %s error on %s: ERR=%s\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "LZO uncompression error on file %s. ERR=%d\n" +#~ msgstr "Erreur de décompression. ERR=%d\n" + +#, fuzzy +#~ msgid "Uncompression error on file %s. ERR=%s\n" +#~ msgstr "Erreur de décompression. ERR=%d\n" + +#, fuzzy +#~ msgid "Wrong write size error at byte=%lld block=%d wanted=%d wrote=%d\n" +#~ msgstr "Erreur d'écriture à %u:%u sur le device %s. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Write error at byte=%lld block=%d write_len=%d lerror=%d on %s: ERR=%s\n" +#~ msgstr "Erreur d'écriture à %u:%u sur le device %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Decryption error\n" +#~ msgstr "Erreur pendant le re-positionnement. ERR=%s\n" + +#, fuzzy +#~ msgid "Signature validation failed for file %s: ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Digest one file failed for file: %s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Signature validation failed for %s: %s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Daemon started %s. Jobs: run=%d running=%d.\n" +#~ msgstr "Démon démarré depuis %s, %d jobs lancés depuis cette date.\n" + +#, fuzzy +#~ msgid "Director connected %sat: %s\n" +#~ msgstr "Connexion du director le %s\n" + +#, fuzzy +#~ msgid "JobId %d Job %s is running.\n" +#~ msgstr "JobId %s n'est pas en cours.\n" + +#, fuzzy +#~ msgid " %s %s Job started: %s\n" +#~ msgstr "Le job %d est annulé.\n" + +#, fuzzy +#~ msgid "" +#~ " Files=%s Bytes=%s AveBytes/sec=%s LastBytes/sec=%s Errors=%d\n" +#~ " Bwlimit=%s ReadBytes=%s\n" +#~ msgstr " Fichiers=%s Octets=%s Octets/sec=%s Erreurs=%d\n" + +#, fuzzy +#~ msgid " Processing file: %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#~ msgid "====\n" +#~ msgstr "====\n" + +#, fuzzy +#~ msgid "Bad .status command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "2900 Bad .status command, missing argument.\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "Cannot malloc %d network read buffer\n" +#~ msgstr "Impossible d'ouvrir la base de données \"%s\".\n" + +#, fuzzy +#~ msgid " Could not access %s: ERR=%s\n" +#~ msgstr " Impossible d'acceder à \"%s\" : ERR=%s\n" + +#, fuzzy +#~ msgid " Could not follow link %s: ERR=%s\n" +#~ msgstr " Impossible de suivre le lien \"%s\" : ERR=%s\n" + +#, fuzzy +#~ msgid " Could not stat %s: ERR=%s\n" +#~ msgstr " Impossible d'acceder à \"%s\" : ERR=%s\\n\n" + +#, fuzzy +#~ msgid " Could not open directory %s: ERR=%s\n" +#~ msgstr " Impossible d'ouvrir le répertoire \"%s\" : ERR=%s\n" + +#, fuzzy +#~ msgid " Unknown file type %d: %s\n" +#~ msgstr " Type de fichier inconnu %d ; non sauvé : %s\n" + +#, fuzzy +#~ msgid "Network error in send to Director: ERR=%s\n" +#~ msgstr "Erreur dans l'exécution de la commande : %s. ERR=%s\n" + +#, fuzzy +#~ msgid " Cannot open %s: ERR=%s.\n" +#~ msgstr " Impossible d'ouvrir \"%s\" : ERR=%s.\n" + +#, fuzzy +#~ msgid " Cannot open resource fork for %s: ERR=%s.\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading file %s: ERR=%s\n" +#~ msgstr "Erreur de lecture de %s:%s:%d : ERR=%s\n" + +#, fuzzy +#~ msgid " %s differs on \"%s\". File=%s Vol=%s\n" +#~ msgstr " Positionné sur Fichier=%s Bloc=%s\n" + +#, fuzzy +#~ msgid "Unable to stat file \"%s\": ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to serialize extended attributes on file \"%s\"\n" +#~ msgstr "Impossible d'initialiser le contexte TLS pour la Console \"%s\".\n" + +#, fuzzy +#~ msgid "Illegal empty xattr attribute name\n" +#~ msgstr "Caractère interdit dans le nom du Volume \"%s\"\n" + +#, fuzzy +#~ msgid "Illegal xattr stream, no XATTR_MAGIC on file \"%s\"\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Illegal xattr stream, xattr name length <= 0 on file \"%s\"\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "pioctl VIOCGETAL error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "pioctl VIOCSETAL error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "pathconf error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Trying to restore acl on file \"%s\" on filesystem without %s acl " +#~ "support\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to convert %d into namespace on file \"%s\"\n" +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#, fuzzy +#~ msgid "acl_to_text error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "acl_get_file error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "acl_delete_def_file error on file \"%s\": filesystem doesn't support " +#~ "ACLs\n" +#~ msgstr "" +#~ "Impossible de récupérer les informations du Media pour le Volume %s : ERR=" +#~ "%s\n" + +#, fuzzy +#~ msgid "acl_delete_def_file error on file \"%s\": ERR=%s\n" +#~ msgstr "" +#~ "Impossible de récupérer les informations du Media pour le Volume %s : ERR=" +#~ "%s\n" + +#, fuzzy +#~ msgid "acl_from_text error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "acl_set_file error on file \"%s\": filesystem doesn't support ACLs\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "acl_set_file error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "extattr_list_link error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "extattr_get_link error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to split %s into namespace and name part on file \"%s\"\n" +#~ msgstr "" +#~ "Impossible de se positionner à la fin du média sur le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to convert %s into namespace on file \"%s\"\n" +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#, fuzzy +#~ msgid "extattr_set_link error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "acl_valid error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "llistxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "lgetxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "setxattr error on file \"%s\": filesystem doesn't support XATTR\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "setxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Trying to restore acl on file \"%s\" on filesystem without acl support\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Trying to restore POSIX acl on file \"%s\" on filesystem without aclent " +#~ "acl support\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get status on xattr \"%s\" on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unsupported extended attribute type: %i for \"%s\" on file \"%s\"\n" +#~ msgstr "" +#~ "Impossible de se positionner à la fin du média sur le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to send extended attribute \"%s\" on file \"%s\"\n" +#~ msgstr "" +#~ "Impossible de se positionner à la fin du média sur le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to restore extensible attributes on file \"%s\"\n" +#~ msgstr "" +#~ "Impossible de se positionner à la fin du média sur le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to restore extended attributes on file \"%s\"\n" +#~ msgstr "" +#~ "Impossible de se positionner à la fin du média sur le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "acl_get error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get xattr acl on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get acl on xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get acl text on xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to convert acl from text on file \"%s\"\n" +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#, fuzzy +#~ msgid "Unable to restore acl of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "acl_fromtext error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "wrong encoding of acl type in acl stream on file \"%s\"\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "acl_set error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open xattr on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to list the xattr on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to close xattr list on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to stat xattr \"%s\" on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open file \"%s\": ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to link xattr %s to %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open attribute \"%s\" at file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to restore data of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unsupported xattr type %s on file \"%s\"\n" +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#, fuzzy +#~ msgid "Unable to restore owner of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to restore filetimes of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set file owner %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set file modes %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set file times %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set file flags %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "File data" +#~ msgstr "FileSet" + +#, fuzzy +#~ msgid "Plugin Name" +#~ msgstr "Saisissez un nom de Volume : " + +#, fuzzy +#~ msgid "Plugin Data" +#~ msgstr "Saisissez un nom de Volume : " + +#, fuzzy +#~ msgid "Restore Object" +#~ msgstr "Restauration OK" + +#, fuzzy +#~ msgid "File skipped. Already exists: %s\n" +#~ msgstr "Le volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "Cannot make fifo %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot make node %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not symlink %s -> %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not restore file flags for file %s: ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not hard link %s -> %s: ERR=%s\n" +#~ msgstr "Impossible de trouver le client %s : ERR=%s" + +#, fuzzy +#~ msgid "Could not reset file flags for file %s: ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown file type %d; not restored: %s\n" +#~ msgstr " Type de fichier inconnu %d ; non sauvé : %s\n" + +#, fuzzy +#~ msgid "Zero length filename: %s\n" +#~ msgstr "Saisissez le nom complet du fichier : " + +#, fuzzy +#~ msgid "Plugin: \"%s\" not found.\n" +#~ msgstr "Plugin=%s non trouvé.\n" + +#, fuzzy +#~ msgid "Cannot stat file %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create directory %s: ERR=%s\n" +#~ msgstr "change le répertoire courant" + +#, fuzzy +#~ msgid "%s exists but is not a directory.\n" +#~ msgstr "%s doit être un répertoire.\n" + +#, fuzzy +#~ msgid "Cannot change owner and/or group of %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot change permissions of %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Too many subdirectories. Some permissions not reset.\n" +#~ msgstr "Trop d'éléments dans la ressource \"%s\"\n" + +#, fuzzy +#~ msgid "Cannot open current directory: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier inclus : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot get current directory: ERR=%s\n" +#~ msgstr "change le répertoire courant" + +#, fuzzy +#~ msgid "Cannot reset current directory: ERR=%s\n" +#~ msgstr "change le répertoire courant" + +#~ msgid "Only ipv4 and ipv6 are supported (%d)\n" +#~ msgstr "Seulement l'ipv4 et l'ipv6 sont supportés (%d)\n" + +#~ msgid "Only ipv4 is supported (%d)\n" +#~ msgstr "Seulement l'ipv4 est supporté (%d)\n" + +#, fuzzy +#~ msgid "Can't add default IPv4 address (%s)\n" +#~ msgstr "Impossible d'ajouter l'adresse par défaut (%s)\n" + +#, fuzzy +#~ msgid "Cannot resolve service(%s)" +#~ msgstr "impossible de trouver une correspondance pour le service (%s)" + +#, fuzzy +#~ msgid "Cannot resolve hostname(%s) %s" +#~ msgstr "impossible de résoudre le hostname (%s) %s" + +#, fuzzy +#~ msgid "Expected a block to begin with { but got: %s" +#~ msgstr "Attendait un début de bloc {, pas : %s" + +#, fuzzy +#~ msgid "Expected a string but got: %s" +#~ msgstr "Attendait une chaîne, pas : %s" + +#, fuzzy +#~ msgid "Expected a string [ip|ipv4|ipv6] but got: %s" +#~ msgstr "Attentait la chaîne [ip|ipv4|ipv6], pas : %s" + +#, fuzzy +#~ msgid "Expected a string [ip|ipv4] but got: %s" +#~ msgstr "Attendait la chaîne [ip|ipv4], pas : %s" + +#, fuzzy +#~ msgid "Expected an equal = but got: %s" +#~ msgstr "Attendait un égal =, pas : %s" + +#, fuzzy +#~ msgid "Expected an identifier [addr|port] but got: %s" +#~ msgstr "Attendait un identifiant [addr|port], pas : %s" + +#~ msgid "Only one port per address block" +#~ msgstr "Seulement un port par bloc d'adresse" + +#~ msgid "Only one addr per address block" +#~ msgstr "Seulement une adresse par bloc d'adresse" + +#, fuzzy +#~ msgid "Expected a identifier [addr|port] but got: %s" +#~ msgstr "Attendait un identifiant [addr|port], pas : %s" + +#~ msgid "Expected a equal =, got: %s" +#~ msgstr "Attendait un égal =, pas : %s" + +#, fuzzy +#~ msgid "Expected a number or a string but got: %s" +#~ msgstr "Attendait un nombre ou une chaîne, pas : %s" + +#, fuzzy +#~ msgid "Expected an IP number or a hostname but got: %s" +#~ msgstr "Attendait une adresse IP ou un nom de machine, pas : %s" + +#, fuzzy +#~ msgid "Expected a end of block with } but got: %s" +#~ msgstr "Attendait une fin de bloc }, pas : %s" + +#, fuzzy +#~ msgid "Cannot add hostname(%s) and port(%s) to addrlist (%s)" +#~ msgstr "" +#~ "Impossible d'ajouter le hostname (%s) et le port (%s) à la liste " +#~ "d'adresse (%s)" + +#, fuzzy +#~ msgid "Expected an end of block with } but got: %s" +#~ msgstr "Attendait une fin de bloc }, pas : %s" + +#~ msgid "Expected an IP number or a hostname, got: %s" +#~ msgstr "Attendait une adresse IP ou un nom de machine, pas : %s" + +#, fuzzy +#~ msgid "Cannot add port (%s) to (%s)" +#~ msgstr "impossible d'ajouter le port (%s) à (%s)" + +#~ msgid "Expected a port number or string, got: %s" +#~ msgstr "Attendait un numéro de port ou une chaîne, pas : %s" + +#~ msgid "Error scanning attributes: %s\n" +#~ msgstr "Erreur pendant la lecture des attributs : %s\n" + +#, fuzzy +#~ msgid "Child exited normally." +#~ msgstr "Commande annulée.\n" + +#~ msgid "Child died from signal %d: %s" +#~ msgstr "Le processus fils est mort par le signal %d : %s" + +#~ msgid "Status OK\n" +#~ msgstr "Statut OK\n" + +#, fuzzy +#~ msgid "bget_msg: unknown signal %d\n" +#~ msgstr "bget_dirmsg : signal bnet inconnu %d\n" + +#, fuzzy +#~ msgid "Attr spool write error. wrote=%d wanted=%d bytes. ERR=%s\n" +#~ msgstr "Erreur pendant l'écriture des attributs dans le spool. ERR=%s\n" + +#~ msgid "TLS connection initialization failed.\n" +#~ msgstr "Initialisation de la connexion TLS échouée.\n" + +#, fuzzy +#~ msgid "TLS Negotiation failed.\n" +#~ msgstr "Négociation TLS échouée.\n" + +#~ msgid "TLS enabled but not configured.\n" +#~ msgstr "TLS activé mais non configuré.\n" + +#~ msgid "TLS enable but not configured.\n" +#~ msgstr "TLS actif mais non configuré.\n" + +#~ msgid "No problem." +#~ msgstr "Pas de problème." + +#, fuzzy +#~ msgid "Authoritative answer for host not found." +#~ msgstr "Attribut %s non trouvé." + +#~ msgid "Unknown error." +#~ msgstr "Erreur inconnue." + +#~ msgid "Unknown sig %d" +#~ msgstr "sig inconnu %d" + +#~ msgid "Cannot set SO_REUSEADDR on socket: %s\n" +#~ msgstr "" +#~ "Impossible de positionner l'option SO_REUSEADDR sur la socket : %s\n" + +#~ msgid "Cannot bind port %d: ERR=%s: Retrying ...\n" +#~ msgstr "Impossible de s'attacher au port %d : ERR=%s : Réessaie...\n" + +#~ msgid "Cannot bind port %d: ERR=%s.\n" +#~ msgstr "Impossible de s'attacher au port %d : ERR=%s.\n" + +#, fuzzy +#~ msgid "No addr/port found to listen on.\n" +#~ msgstr "Aucun volume trouvé pour la restauration.\n" + +#~ msgid "Could not init client queue: ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#~ msgid "Error in select: %s\n" +#~ msgstr "Erreur sur le select : %s\n" + +#~ msgid "Connection from %s:%d refused by hosts.access\n" +#~ msgstr "Connexion depuis %s:%d refusée par hosts.access\n" + +#~ msgid "Cannot set SO_KEEPALIVE on socket: %s\n" +#~ msgstr "Impossible de positionner SO_KEEPALIVE sur la socket : %s\n" + +#~ msgid "Could not create client BSOCK.\n" +#~ msgstr "Impossible de créer la structure BSOCK cliente.\n" + +#~ msgid "Could not add job to client queue: ERR=%s\n" +#~ msgstr "Impossible d'ajouter le job à la queue cliente : ERR=%s\n" + +#~ msgid "Could not destroy client queue: ERR=%s\n" +#~ msgstr "Impossible de détruire la queue cliente : ERR=%s\n" + +#~ msgid "" +#~ "Could not connect to %s on %s:%d. ERR=%s\n" +#~ "Retrying ...\n" +#~ msgstr "" +#~ "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" +#~ "Réessaie...\n" + +#~ msgid "Unable to connect to %s on %s:%d. ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "gethostbyname() for host \"%s\" failed: ERR=%s\n" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#~ msgid "Socket open error. proto=%d port=%d. ERR=%s\n" +#~ msgstr "Ouverture de la socket en erreur. proto=%d port=%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Source address bind error. proto=%d. ERR=%s\n" +#~ msgstr "Ouverture de la socket en erreur. proto=%d port=%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot set TCP_KEEPIDLE on socket: %s\n" +#~ msgstr "Impossible de positionner SO_KEEPALIVE sur la socket : %s\n" + +#, fuzzy +#~ msgid "Could not init bsock read mutex. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock write mutex. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock attribute mutex. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Write error sending %d bytes to %s:%s:%d: ERR=%s\n" +#~ msgstr "Erreur de lecture de %s:%s:%d : ERR=%s\n" + +#~ msgid "Read expected %d got %d from %s:%s:%d\n" +#~ msgstr "Attendait %d en lecture, eu %d de %s:%s:%d\n" + +#~ msgid "Read error from %s:%s:%d: ERR=%s\n" +#~ msgstr "Erreur de lecture de %s:%s:%d : ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool error. Wanted=%d got=%d bytes.\n" +#~ msgstr "Erreur pendant l'écriture des attributs dans le spool. ERR=%s\n" + +#, fuzzy +#~ msgid "fread attr spool I/O error.\n" +#~ msgstr "Erreur pendant l'écriture des attributs dans le spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not malloc BSOCK data buffer\n" +#~ msgstr "Impossible d'ouvrir la base de données \"%s\".\n" + +#~ msgid "sockopt error: %s\n" +#~ msgstr "erreur sockopt : %s\n" + +#, fuzzy +#~ msgid "fcntl F_GETFL error. ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "fcntl F_SETFL error. ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Director authorization error at \"%s:%d\"\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "" +#~ "Bad errmsg to Hello command: ERR=%s\n" +#~ "The Director at \"%s:%d\" may not be running.\n" +#~ msgstr "Mauvaise réponse à la commande Hello : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error with Director at \"%s:%d\"\n" +#~ "Most likely the passwords do not agree.\n" +#~ "If you are using TLS, there may have been a certificate validation error " +#~ "during the TLS handshake.\n" +#~ "For help, please see: " +#~ msgstr "" +#~ "Problème d'authentification avec le director.\n" +#~ "Le plus souvent, les mots de pass ne correspondent pas.\n" +#~ "Si vous utilisez TLS, il peut y avoir une erreur de validation du " +#~ "certificat\n" +#~ "pendant l'initialisation de la connexion TLS.\n" +#~ "Vous trouverez de l'aide sur\n" +#~ "http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors\n" + +#, fuzzy +#~ msgid "safe_unlink could not compile regex pattern \"%s\" ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#~ msgid "Out of memory: ERR=%s\n" +#~ msgstr "Plus de mémoire : ERR=%s\n" + +#~ msgid "Buffer overflow.\n" +#~ msgstr "Buffer overflow.\n" + +#~ msgid "Bad errno" +#~ msgstr "Mauvais errno" + +#, fuzzy +#~ msgid "Cannot open %s file. %s ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier contenant le pid. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open %s file. %s ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier pid. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot lock %s file. %s ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier contenant le pid. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot not open %s file. %s ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier contenant le pid. %s ERR=%s\n" + +#~ msgid "Could not create state file. %s ERR=%s\n" +#~ msgstr "Impossible de créer le fichier d'état. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Write final hdr error: ERR=%s\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "1999 Authorization failed.\n" +#~ msgstr "Négociation TLS échouée.\n" + +#~ msgid "Unable to open certificate file" +#~ msgstr "Impossible d'ouvrir de fichier de certificat" + +#~ msgid "Unable to read certificate from file" +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#~ msgid "Unable to extract public key from certificate" +#~ msgstr "Impossible d'extraire la clef publique à partir du certificat" + +#~ msgid "" +#~ "Provided certificate does not include the required subjectKeyIdentifier " +#~ "extension." +#~ msgstr "" +#~ "Le certificat fournis n'inclus pas l'extension subjectKeyIdentifier " +#~ "requise" + +#~ msgid "Unsupported key type provided: %d\n" +#~ msgstr "Type de clef fourni non supporté : %d\n" + +#~ msgid "Unable to open private key file" +#~ msgstr "Impossible d'ouvrir le fichier de clef privée" + +#~ msgid "Unable to read private key from file" +#~ msgstr "Impossible de lire la clef privée à partir du fichier" + +#~ msgid "Unsupported digest type: %d\n" +#~ msgstr "Le digest spécifié n'est pas supporté : %d\n" + +#, fuzzy +#~ msgid "OpenSSL digest initialization failed" +#~ msgstr "Initialisation du digest OpenSSL à échoué" + +#, fuzzy +#~ msgid "OpenSSL digest update failed" +#~ msgstr "Initialisation du digest OpenSSL à échoué" + +#~ msgid "OpenSSL digest finalize failed" +#~ msgstr "Initialisation du digest OpenSSL à échoué" + +#, fuzzy +#~ msgid "OpenSSL digest_new failed" +#~ msgstr "Initialisation du digest OpenSSL à échoué" + +#, fuzzy +#~ msgid "OpenSSL sign get digest failed" +#~ msgstr "Initialisation du digest OpenSSL à échoué" + +#, fuzzy +#~ msgid "OpenSSL digest Verify final failed" +#~ msgstr "Initialisation du contexte clef/IV du cipher OpenSSL à échoué" + +#, fuzzy +#~ msgid "No signers found for crypto verify.\n" +#~ msgstr "Aucun volume trouvé pour la restauration.\n" + +#~ msgid "Signature creation failed" +#~ msgstr "La création de la signature a échouée" + +#~ msgid "Signature decoding failed" +#~ msgstr "Le décodage de la signature a échoué" + +#~ msgid "Unsupported cipher type specified\n" +#~ msgstr "Le cipher spécifié est non supporté\n" + +#~ msgid "CryptoData decoding failed" +#~ msgstr "Le décodage du CryptoData a échoué" + +#~ msgid "Failure decrypting the session key" +#~ msgstr "Impossible de décrypter la clef de session" + +#~ msgid "Unsupported contentEncryptionAlgorithm: %d\n" +#~ msgstr "contentEncryptionAlgorithm non supporté : %d\n" + +#~ msgid "OpenSSL cipher context initialization failed" +#~ msgstr "Initialisation du contexte clef/IV du cipher OpenSSL à échoué" + +#, fuzzy +#~ msgid "OpenSSL cipher context key/IV initialization failed" +#~ msgstr "Initialisation du contexte clef/IV du cipher OpenSSL à échoué" + +#, fuzzy +#~ msgid "Unsupported digest type=%d specified\n" +#~ msgstr "Le cipher spécifié est non supporté\n" + +#, fuzzy +#~ msgid "SHA1Update() returned an error: %d\n" +#~ msgstr "1993 Erreur sur la mise à jour du Media\n" + +#~ msgid "No error" +#~ msgstr "Pas d'erreur" + +#, fuzzy +#~ msgid "Signer not found" +#~ msgstr "Le Storage \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "Recipient not found" +#~ msgstr "Ressource %s introuvable\n" + +#, fuzzy +#~ msgid "Unsupported digest algorithm" +#~ msgstr "Le digest spécifié n'est pas supporté : %d\n" + +#, fuzzy +#~ msgid "Unsupported encryption algorithm" +#~ msgstr "contentEncryptionAlgorithm non supporté : %d\n" + +#, fuzzy +#~ msgid "Decryption error" +#~ msgstr "Erreur interne" + +#~ msgid "Internal error" +#~ msgstr "Erreur interne" + +#~ msgid "Unknown error" +#~ msgstr "Erreur inconnue." + +#, fuzzy +#~ msgid "Cannot fork to become daemon: ERR=%s\n" +#~ msgstr "Impossible de forker pour passer en mode démon : %s\n" + +#, fuzzy +#~ msgid "Create thread" +#~ msgstr "Crée" + +#~ msgid "Illegal character \"%c\" in name.\n" +#~ msgstr "Caractère illégal \"%c\" dans le nom.\n" + +#~ msgid "Name too long.\n" +#~ msgstr "Nom trop long.\n" + +#, fuzzy +#~ msgid "" +#~ "Config file error: %s\n" +#~ " : Line %d, col %d of file %s\n" +#~ "%s\n" +#~ msgstr "" +#~ "Erreur de config : %s\n" +#~ " : ligne %d, col %d du fichier %s\n" +#~ "%s\n" +#~ "%s" + +#, fuzzy +#~ msgid "Cannot open config file %s: %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de configuration \"%s\" : %s\n" + +#, fuzzy +#~ msgid "Cannot open lex\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#~ msgid "Backup" +#~ msgstr "Backup" + +#~ msgid "Verifying" +#~ msgstr "Vérification" + +#~ msgid "Restoring" +#~ msgstr "Restauration" + +#~ msgid "Archiving" +#~ msgstr "Archivage" + +#~ msgid "Copying" +#~ msgstr "Copier" + +#~ msgid "Migration" +#~ msgstr "Migrer" + +#, fuzzy +#~ msgid "Scanning" +#~ msgstr "En cours" + +#~ msgid "Unknown operation" +#~ msgstr "Opération inconnue" + +#~ msgid "backup" +#~ msgstr "backup" + +#~ msgid "verified" +#~ msgstr "vérifié" + +#~ msgid "verify" +#~ msgstr "Vérifier" + +#~ msgid "restored" +#~ msgstr "Restauré" + +#~ msgid "restore" +#~ msgstr "restaurer" + +#~ msgid "archived" +#~ msgstr "archivé" + +#~ msgid "archive" +#~ msgstr "archiver" + +#~ msgid "copied" +#~ msgstr "copié" + +#~ msgid "copy" +#~ msgstr "copier" + +#~ msgid "migrated" +#~ msgstr "migrer" + +#~ msgid "migrate" +#~ msgstr "migré" + +#~ msgid "scanned" +#~ msgstr "scanné" + +#~ msgid "scan" +#~ msgstr "scanner" + +#~ msgid "unknown action" +#~ msgstr "action inconnue" + +#~ msgid "pthread_once failed. ERR=%s\n" +#~ msgstr "erreur sur pthread_once. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init msg_queue mutex. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#~ msgid "NULL jcr.\n" +#~ msgstr "NULL jcr.\n" + +#~ msgid "" +#~ "Watchdog sending kill after %d secs to thread stalled reading Storage " +#~ "daemon.\n" +#~ msgstr "" +#~ "Le watchdog a envoyé un signal après %d secs au thread bloqué en écoute " +#~ "du SD.\n" + +#~ msgid "" +#~ "Watchdog sending kill after %d secs to thread stalled reading File " +#~ "daemon.\n" +#~ msgstr "" +#~ "Le watchdog a envoyé un signal après %d secs au thread bloqué en écoute " +#~ "du FD.\n" + +#~ msgid "" +#~ "Watchdog sending kill after %d secs to thread stalled reading Director.\n" +#~ msgstr "" +#~ "Le watchdog a envoyé un signal après %d secs au thread bloqué en écoute " +#~ "du Director.\n" + +#~ msgid "Problem probably begins at line %d.\n" +#~ msgstr "Le problème commence sûrement au début de la ligne %d.\n" + +#~ msgid "" +#~ "Config error: %s\n" +#~ " : line %d, col %d of file %s\n" +#~ "%s\n" +#~ "%s" +#~ msgstr "" +#~ "Erreur de config : %s\n" +#~ " : ligne %d, col %d du fichier %s\n" +#~ "%s\n" +#~ "%s" + +#~ msgid "Config error: %s\n" +#~ msgstr "Erreur de config : %s\n" + +#~ msgid "Config token too long, file: %s, line %d, begins at line %d\n" +#~ msgstr "Config token trop long, fichier : %s, ligne %d, débutant ligne %d\n" + +#~ msgid "none" +#~ msgstr "none" + +#~ msgid "comment" +#~ msgstr "comment" + +#~ msgid "number" +#~ msgstr "number" + +#~ msgid "ip_addr" +#~ msgstr "ip_addr" + +# identifiant +#~ msgid "identifier" +#~ msgstr "identifier" + +#~ msgid "string" +#~ msgstr "string" + +#~ msgid "quoted_string" +#~ msgstr "quoted_string" + +#, fuzzy +#~ msgid "include" +#~ msgstr "Depuis" + +#, fuzzy +#~ msgid "include_quoted_string" +#~ msgstr "quoted_string" + +#~ msgid "expected a positive integer number, got: %s" +#~ msgstr "attendait un nombre entier positif, pas : %s" + +#~ msgid "Cannot open included config file %s: %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de configuration inclus %s : %s\n" + +#~ msgid "expected an integer or a range, got %s: %s" +#~ msgstr "attendait un entier ou bien un intervalle, pas %s : %s" + +#~ msgid "expected an integer number, got %s: %s" +#~ msgstr "attendait un nombre entier, pas %s : %s" + +#~ msgid "expected a name, got %s: %s" +#~ msgstr "attendait un nom, pas %s : %s" + +#~ msgid "name %s length %d too long, max is %d\n" +#~ msgstr "la longueur du nom %s (%d) est trop grande, le max est %d\n" + +#~ msgid "expected a string, got %s: %s" +#~ msgstr "attendait une chaîne, pas %s : %s" + +#, fuzzy +#~ msgid "Mutex lock failure. ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Mutex unlock failure. ERR=%s\n" +#~ msgstr "rwl_writeunlock en échec sur %s:%d :. ERR=%s\n" + +#, fuzzy +#~ msgid "pthread_create failed: ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#~ msgid "Out of memory requesting %d bytes\n" +#~ msgstr "Plus de mémoire à l'allocation de %d octets\n" + +#, fuzzy +#~ msgid "Could not open console message file %s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not get con mutex: ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#~ msgid "Bacula Message" +#~ msgstr "Message de Bacula" + +#, fuzzy +#~ msgid "open mail pipe %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "open mail pipe failed.\n" +#~ msgstr "Job échoué.\n" + +#~ msgid "close error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#~ msgid "" +#~ "Mail program terminated in error.\n" +#~ "CMD=%s\n" +#~ "ERR=%s\n" +#~ msgstr "" +#~ "La commande mail s'est terminée en erreur.\n" +#~ "CMD=%s\n" +#~ "ERR=%s\n" + +#~ msgid "fopen %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Msg delivery error: Operator mail program terminated in error.\n" +#~ "CMD=%s\n" +#~ "ERR=%s\n" +#~ msgstr "" +#~ "La commande mail s'est terminée en erreur.\n" +#~ "CMD=%s\n" +#~ "ERR=%s\n" + +#, fuzzy +#~ msgid "Msg delivery error: fopen %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "%s: ABORTING due to ERROR in %s:%d\n" +#~ msgstr "%s : ERREUR dans %s:%d " + +#, fuzzy +#~ msgid "%s: ERROR TERMINATION at %s:%d\n" +#~ msgstr "%s : ERREUR dans %s:%d " + +#~ msgid "%s: Fatal Error because: " +#~ msgstr "%s : Erreur Fatale car : " + +#~ msgid "%s: Fatal Error at %s:%d because:\n" +#~ msgstr "%s : Erreur Fatale à %s:%d car :\n" + +#~ msgid "%s: ERROR: " +#~ msgstr "%s : ERREUR : " + +#~ msgid "%s: ERROR in %s:%d " +#~ msgstr "%s : ERREUR dans %s:%d " + +#~ msgid "%s: Warning: " +#~ msgstr "%s : Attention : " + +#, fuzzy +#~ msgid "%s JobId %u: Fatal error: " +#~ msgstr "%s : %s Erreur fatale : " + +#, fuzzy +#~ msgid "%s JobId %u: Error: " +#~ msgstr "%s : %s Erreur : " + +#, fuzzy +#~ msgid "%s JobId %u: Warning: " +#~ msgstr "%s : Attention : " + +#, fuzzy +#~ msgid "%s JobId %u: Security violation: " +#~ msgstr "%s : Attention : " + +#~ msgid "Unable to init mutex: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#~ msgid "Unable to destroy mutex: ERR=%s\n" +#~ msgstr "Impossible de détruire le mutex : ERR=%s\n" + +#~ msgid "Unable to init OpenSSL threading: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le thread OpenSSL : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to seed OpenSSL PRNG\n" +#~ msgstr "Impossible de sauvegarder le PRNG OpenSSL\n" + +#~ msgid "Failed to save OpenSSL PRNG\n" +#~ msgstr "Impossible de sauvegarder le PRNG OpenSSL\n" + +#~ msgid "expected an =, got: %s" +#~ msgstr "attendait un =, eu : %s" + +#, fuzzy +#~ msgid "Unknown item code: %d\n" +#~ msgstr "Erreur inconnue." + +#, fuzzy +#~ msgid "message type: %s not found" +#~ msgstr "Le FileSet \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "" +#~ "Attempt to redefine \"%s\" from \"%s\" to \"%s\" referenced on line %d : " +#~ "%s\n" +#~ msgstr "Impossible de trouver la ressource \"%s\" utilisée ligne %d : %s\n" + +#~ msgid "Could not find config Resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "Impossible de trouver la ressource \"%s\" utilisée ligne %d : %s\n" + +#, fuzzy +#~ msgid "Attempt to redefine resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "Impossible de trouver la ressource \"%s\" utilisée ligne %d : %s\n" + +#, fuzzy +#~ msgid "Too many %s directives. Max. is %d. line %d: %s\n" +#~ msgstr "Trop d'éléments dans la ressource \"%s\"\n" + +#~ msgid "Missing config Resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "Impossible de trouver la ressource \"%s\" utilisée ligne %d : %s\n" + +#~ msgid "expected a size number, got: %s" +#~ msgstr "attendait une taille, eu : %s" + +#, fuzzy +#~ msgid "expected a speed number, got: %s" +#~ msgstr "attendait une taille, eu : %s" + +#, fuzzy +#~ msgid "expected a %s, got: %s" +#~ msgstr "attendait un =, eu : %s" + +#~ msgid "Expected a Tape Label keyword, got: %s" +#~ msgstr "Attendait un Label de lecteur, a pas : %s" + +#, fuzzy +#~ msgid "Unable to initialize resource lock. ERR=%s\n" +#~ msgstr "Impossible d'initialiser le verrou sur la base. ERR=%s\n" + +#, fuzzy +#~ msgid "Config filename too long.\n" +#~ msgstr "Nom de Volume trop long.\n" + +#~ msgid "Cannot open config file \"%s\": %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de configuration \"%s\" : %s\n" + +#~ msgid "Expected a Resource name identifier, got: %s" +#~ msgstr "Attendait un identifiant de Ressource, a pas : %s" + +#~ msgid "expected resource name, got: %s" +#~ msgstr "attendait un nom de ressource, eu : %s" + +#, fuzzy +#~ msgid "not in resource definition: %s" +#~ msgstr "mot clés inattendu %d %s dans la définition de la ressource" + +#, fuzzy +#~ msgid "Name not specified for resource" +#~ msgstr "Impossible de trouver un Catalogue\n" + +#~ msgid "unexpected token %d %s in resource definition" +#~ msgstr "mot clés inattendu %d %s dans la définition de la ressource" + +#, fuzzy +#~ msgid "Unknown parser state %d\n" +#~ msgstr "est dans un état inconnu %c" + +#, fuzzy +#~ msgid "Failed to open Plugin directory %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to find any plugins in %s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "dlopen plugin %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Lookup of loadPlugin in plugin %s failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +# Impossible d'ouvrir le fichier de spool des attributs : ERR=%s +#, fuzzy +#~ msgid "Lookup of unloadPlugin in plugin %s failed: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier de spool des attributs %s : ERR=%s\n" + +#~ msgid "Could not find userid=%s: ERR=%s\n" +#~ msgstr "Impossible de trouver le userid %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find password entry. ERR=%s\n" +#~ msgstr "Impossible de trouver le client %s : ERR=%s\n" + +#~ msgid "Could not find group=%s: ERR=%s\n" +#~ msgstr "Impossible de trouver le groupe=%s : ERR=%s\n" + +#~ msgid "Could not initgroups for group=%s, userid=%s: ERR=%s\n" +#~ msgstr "" +#~ "Impossible d'utiliser initgroups pour le groupe=%s, userid=%s: ERR=%s\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not initgroups for userid=%s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not set group=%s: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#, fuzzy +#~ msgid "prctl failed: ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "setreuid failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "cap_from_text failed: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "cap_set_proc failed: ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not set specified userid: %s\n" +#~ msgstr "Impossible de trouver le userid %s : ERR=%s\n" + +#~ msgid "rwl_writelock failure at %s:%d: ERR=%s\n" +#~ msgstr "rwl_writelock en échec sur %s:%d : ERR=%s\n" + +#~ msgid "rwl_writeunlock failure at %s:%d:. ERR=%s\n" +#~ msgstr "rwl_writeunlock en échec sur %s:%d :. ERR=%s\n" + +#~ msgid "%s: run %s \"%s\"\n" +#~ msgstr "%s: exécution %s de la commande \"%s\"\n" + +#~ msgid "Runscript: %s could not execute. ERR=%s\n" +#~ msgstr "Runscript : impossible d'exécuter %s. ERR=%s\n" + +#~ msgid "%s: %s\n" +#~ msgstr "%s: %s\n" + +#~ msgid "Runscript: %s returned non-zero status=%d. ERR=%s\n" +#~ msgstr "" +#~ "Runscript : %s s'est terminé avec un statut différent de 0 statut=%d. ERR=" +#~ "%s\n" + +#, fuzzy +#~ msgid "rwl_writeunlock called too many times.\n" +#~ msgstr "rwl_writeunlock en échec sur %s:%d :. ERR=%s\n" + +#, fuzzy +#~ msgid "Negative numbers not permitted.\n" +#~ msgstr "Les nombres négatifs ne sont pas autorisés\n" + +#, fuzzy +#~ msgid "User cancel requested.\n" +#~ msgstr "Restauration annulée" + +#, fuzzy +#~ msgid "Selection items must be be greater than zero.\n" +#~ msgstr "Les valeurs doivent être supérieurs à zéro.\n" + +#, fuzzy +#~ msgid "Invalid signal number" +#~ msgstr "Numéro de slot invalide : %s\n" + +#~ msgid "Bacula interrupted by signal %d: %s\n" +#~ msgstr "Bacula a reçu le signal %d : %s\n" + +#, fuzzy +#~ msgid "Kaboom! %s, %s got signal %d - %s at %s. Attempting traceback.\n" +#~ msgstr "" +#~ "Kaboom ! %s, %s a reçu le signal %d - %s. Tentative de dump des traces.\n" + +#~ msgid "Kaboom! exepath=%s\n" +#~ msgstr "Kaboom ! exepath=%s\n" + +#~ msgid "Fork error: ERR=%s\n" +#~ msgstr "Fork en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Calling: %s %s %s %s\n" +#~ msgstr "Exécution : %s %s %s\n" + +#~ msgid "execv: %s failed: ERR=%s\n" +#~ msgstr "execv : %s en échec : ERR=%s\n" + +#~ msgid "BA_NSIG too small (%d) should be (%d)\n" +#~ msgstr "BA_NSIG trop petit (%d) devrait être (%d)\n" + +#~ msgid "UNKNOWN SIGNAL" +#~ msgstr "SIGNAL INCONNU" + +#~ msgid "Hangup" +#~ msgstr "Hangup" + +#~ msgid "Quit" +#~ msgstr "Quit" + +#~ msgid "Abort" +#~ msgstr "Abort" + +#~ msgid "IOT trap" +#~ msgstr "IOT trap" + +#~ msgid "BUS error" +#~ msgstr "BUS error" + +#~ msgid "Segmentation violation" +#~ msgstr "Erreur de segmentation" + +#~ msgid "Broken pipe" +#~ msgstr "Tube brisé" + +#~ msgid "Alarm clock" +#~ msgstr "Alarm clock" + +#, fuzzy +#~ msgid "Termination" +#~ msgstr "" +#~ "\n" +#~ "Job terminés :\n" + +#~ msgid "Continue" +#~ msgstr "Continue" + +#, fuzzy +#~ msgid "Virtual alarm clock" +#~ msgstr "Alarm clock" + +#, fuzzy +#~ msgid "Profiling alarm clock" +#~ msgstr "Alarm clock" + +#~ msgid "Out of memory\n" +#~ msgstr "Plus de mémoire\n" + +#, fuzzy +#~ msgid "sm_realloc size: %d\n" +#~ msgstr "Attendait %d en lecture, eu %d de %s:%s:%d\n" + +#, fuzzy +#~ msgid "sm_realloc %d at %p from %s:%d\n" +#~ msgstr "Attendait %d en lecture, eu %d de %s:%s:%d\n" + +#~ msgid " NULL pointer.\n" +#~ msgstr " pointeur NULL.\n" + +#~ msgid " Buffer address: %p\n" +#~ msgstr " Adresse du buffer : %p\n" + +#, fuzzy +#~ msgid "%6d\t%-7s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +#~ msgstr "%6d %-6s %8s %10s %-7s %-8s %s\n" + +#~ msgid "Error initializing SSL context" +#~ msgstr "Erreur pendant l'initialisation du contexte SSL" + +#, fuzzy +#~ msgid "Error loading certificate verification stores" +#~ msgstr "Erreur dans le chargement du certificat" + +#~ msgid "Error loading certificate file" +#~ msgstr "Erreur dans le chargement du certificat" + +#~ msgid "Error loading private key" +#~ msgstr "Erreur dans le chargement de la clef privée" + +#~ msgid "Unable to open DH parameters file" +#~ msgstr "Impossible d'ouvrir le fichier de paramètre DH" + +#~ msgid "Unable to load DH parameters from specified file" +#~ msgstr "" +#~ "Impossible de charger les paramètres DH à partir du fichier spécifié" + +#~ msgid "Failed to set TLS Diffie-Hellman parameters" +#~ msgstr "Impossible de positionner les paramètres TLS Diffie-Hellman" + +#, fuzzy +#~ msgid "Peer failed to present a TLS certificate\n" +#~ msgstr "Impossible d'ouvrir de fichier de certificat" + +#~ msgid "Error creating new SSL object" +#~ msgstr "Erreur pendant la création d'un nouvel objet SSL" + +#~ msgid "Connect failure" +#~ msgstr "Erreur de connexion" + +#~ msgid "Running" +#~ msgstr "En cours" + +#~ msgid "Blocked" +#~ msgstr "Bloqué" + +#, fuzzy +#~ msgid "Incomplete job" +#~ msgstr "valeur octal incomplète" + +#~ msgid "Non-fatal error" +#~ msgstr "Erreur non fatale" + +#~ msgid "Canceled" +#~ msgstr "Annulé" + +#~ msgid "Verify differences" +#~ msgstr "Vérification des différences" + +#~ msgid "Waiting on FD" +#~ msgstr "En attente du FD" + +#~ msgid "Wait on SD" +#~ msgstr "En attente du SD" + +#~ msgid "Wait for new Volume" +#~ msgstr "En attente d'un nouveau Volume" + +#~ msgid "Waiting for mount" +#~ msgstr "En attente d'un montage" + +#~ msgid "Waiting for Storage resource" +#~ msgstr "En attente du Storage" + +#, fuzzy +#~ msgid "Waiting for Job resource" +#~ msgstr "En attente du Storage" + +#, fuzzy +#~ msgid "Waiting for Client resource" +#~ msgstr "%s Job %s est en attente de la connexion du Client.\n" + +#, fuzzy +#~ msgid "Waiting on Max Jobs" +#~ msgstr "En attente du FD" + +#, fuzzy +#~ msgid "Waiting for Start Time" +#~ msgstr "attend son heure de démarrage" + +#, fuzzy +#~ msgid "Waiting on Priority" +#~ msgstr "En attente du FD" + +#, fuzzy +#~ msgid "Unknown Job termination status=%d" +#~ msgstr "est dans un état inconnu %c" + +#, fuzzy +#~ msgid "Completed successfully" +#~ msgstr "Restauration effectuée." + +#~ msgid "Completed with warnings" +#~ msgstr "Terminé avec des avertissements" + +#~ msgid "Terminated with errors" +#~ msgstr "Terminé avec des erreurs" + +#~ msgid "Fatal error" +#~ msgstr "Erreur fatale" + +#~ msgid "Created, not yet running" +#~ msgstr "Créé, mais non démarré" + +#~ msgid "Canceled by user" +#~ msgstr "Annulé par l'utilisateur" + +#, fuzzy +#~ msgid "Verify found differences" +#~ msgstr "Vérification des différences" + +#~ msgid "Waiting for File daemon" +#~ msgstr "En attente du client" + +#~ msgid "Waiting for Storage daemon" +#~ msgstr "En attente du Storage" + +#~ msgid "Waiting for higher priority jobs" +#~ msgstr "Attend qu'un job plus prioritaire se termine" + +#~ msgid "Batch inserting file records" +#~ msgstr "Mise à jour du catalogue" + +#~ msgid "Fatal Error" +#~ msgstr "Erreur Fatale" + +#, fuzzy +#~ msgid "Differences" +#~ msgstr "Différentiel" + +#, fuzzy +#~ msgid "Unknown term code" +#~ msgstr "Erreur inconnue." + +#, fuzzy +#~ msgid "Migrated Job" +#~ msgstr "Migrer" + +#~ msgid "Verify" +#~ msgstr "Vérifier" + +#~ msgid "Restore" +#~ msgstr "Restaurer" + +#, fuzzy +#~ msgid "Console" +#~ msgstr "Message de Bacula" + +#~ msgid "Admin" +#~ msgstr "Admin" + +#~ msgid "Archive" +#~ msgstr "Archiver" + +#, fuzzy +#~ msgid "Job Copy" +#~ msgstr "Copier" + +#~ msgid "Copy" +#~ msgstr "Copier" + +#~ msgid "Migrate" +#~ msgstr "Migrer" + +#, fuzzy +#~ msgid "Scan" +#~ msgstr "En cours" + +#, fuzzy +#~ msgid "Unknown Type" +#~ msgstr "Job du Type=%d inconnu\n" + +#~ msgid "Base" +#~ msgstr "Base" + +#, fuzzy +#~ msgid "Verify Init Catalog" +#~ msgstr "Initialisez le catalogue" + +#, fuzzy +#~ msgid "Verify Data" +#~ msgstr "Vérifier" + +#, fuzzy +#~ msgid "Unknown Job Level" +#~ msgstr "Niveau de job inconnu %d\n" + +#, fuzzy +#~ msgid "Disabled" +#~ msgstr "est bloqué" + +#, fuzzy +#~ msgid "Cleaning" +#~ msgstr "En cours" + +#, fuzzy +#~ msgid "Recycle" +#~ msgstr "RecyclePool" + +#, fuzzy +#~ msgid "Invalid volume status" +#~ msgstr "Nom de Volume invalide : %s\n" + +#~ msgid "everything ok" +#~ msgstr "tout est ok" + +#, fuzzy +#~ msgid "incomplete named character" +#~ msgstr "valeur hexadécimale incomplète" + +#~ msgid "incomplete hexadecimal value" +#~ msgstr "valeur hexadécimale incomplète" + +#~ msgid "invalid hexadecimal value" +#~ msgstr "valeur hexadécimale invalide" + +#~ msgid "octal value too large" +#~ msgstr "valeur octal trop grande" + +#~ msgid "invalid octal value" +#~ msgstr "valeur octal invalide" + +#~ msgid "incomplete octal value" +#~ msgstr "valeur octal incomplète" + +#, fuzzy +#~ msgid "incomplete grouped hexadecimal value" +#~ msgstr "valeur hexadécimale incomplète" + +#, fuzzy +#~ msgid "invalid expansion configuration" +#~ msgstr "Retour à la dernière configuration.\n" + +#~ msgid "out of memory" +#~ msgstr "plus de mémoire" + +#~ msgid "undefined variable" +#~ msgstr "variable non définie" + +#~ msgid "input is neither text nor variable" +#~ msgstr "l'entrée n'est ni du texte ni une variable" + +#, fuzzy +#~ msgid "missing parameter in command" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#~ msgid "invalid argument" +#~ msgstr "argument invalide" + +#, fuzzy +#~ msgid "incomplete quoted pair" +#~ msgstr "valeur octal incomplète" + +#~ msgid "undefined operation" +#~ msgstr "opération indéfinie" + +#~ msgid "unknown error" +#~ msgstr "erreur inconnue" + +#, fuzzy +#~ msgid "Unable to initialize watchdog lock. ERR=%s\n" +#~ msgstr "Impossible d'initialiser le verrou sur la base. ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writelock failure. ERR=%s\n" +#~ msgstr "rwl_writeunlock en échec sur %s:%d :. ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writeunlock failure. ERR=%s\n" +#~ msgstr "rwl_writeunlock en échec sur %s:%d :. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init work queue: ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not add work to queue: ERR=%s\n" +#~ msgstr "Impossible d'ajouter le job à la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Waiting for workq to be empty: ERR=%s\n" +#~ msgstr "Impossible de forker pour passer en mode démon : %s\n" + +#, fuzzy +#~ msgid "Error in workq_destroy: ERR=%s\n" +#~ msgstr "Erreur sur le select : %s\n" + +#~ msgid "No volumes specified for reading. Job %s canceled.\n" +#~ msgstr "Pas de volume spécifié pour la lecture. Abandon du job %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Changing read device. Want Media Type=\"%s\" have=\"%s\"\n" +#~ " %s device=%s\n" +#~ msgstr "" +#~ "Changement du Device de lecture. Want Media Type=\"%s\" have=\"%s\"\n" +#~ " device=%s\n" + +#, fuzzy +#~ msgid "No suitable device found to read Volume \"%s\"\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Job %s canceled.\n" +#~ msgstr "Le job %s est annulé.\n" + +#, fuzzy +#~ msgid "Read open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Too many errors trying to mount %s device %s for reading.\n" +#~ msgstr "Le volume \"%s\" n'est pas dans le device %s.\n" + +#, fuzzy +#~ msgid "Ready to read from volume \"%s\" on %s device %s.\n" +#~ msgstr "Prêt à lire les données du volume \"%s\" depuis le device %s.\n" + +#, fuzzy +#~ msgid "Could not ready %s device %s for append.\n" +#~ msgstr "Impossible d'ouvrir le device %s\n" + +#~ msgid "Could not create JobMedia record for Volume=\"%s\" Job=%s\n" +#~ msgstr "Impossible de créer un JobMedia en base pour le Volume=%s Job=%s\n" + +#, fuzzy +#~ msgid "Read error on device %s in ANSI label. ERR=%s\n" +#~ msgstr "Erreur de lecture de %s:%s:%d : ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown or bad ANSI/IBM label record.\n" +#~ msgstr "3922 Impossible de re-labéliser un Volume ANSI/IBM.\n" + +#, fuzzy +#~ msgid "Could not write ANSI VOL1 label. Wanted size=%d got=%d ERR=%s\n" +#~ msgstr "Impossible de créer le fichier d'état. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Could not write ANSI HDR1 label. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier d'état. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Could not write ANSI HDR1 label.\n" +#~ msgstr "Impossible de créer le fichier d'état. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing EOF to tape. ERR=%s" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set network buffer size.\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#~ msgid "Write session label failed. ERR=%s\n" +#~ msgstr "Impossible d'écrire le label de session. ERR=%s\n" + +#, fuzzy +#~ msgid "Network send error to FD. ERR=%s\n" +#~ msgstr "Erreur dans l'exécution de la commande : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading data header from FD. n=%d msglen=%d ERR=%s\n" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Malformed data header from FD: %s\n" +#~ msgstr "Message mal formé : %s\n" + +#, fuzzy +#~ msgid "Network error reading from FD. ERR=%s\n" +#~ msgstr "Erreur dans l'exécution de la commande : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal append error on device %s: ERR=%s\n" +#~ msgstr "Erreur de lecture de %s:%s:%d : ERR=%s\n" + +#, fuzzy +#~ msgid "Set ok=FALSE after write_block_to_device.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "Error writing end session label. ERR=%s\n" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Set ok=FALSE after write_final_block_to_device.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "Elapsed time=%02d:%02d:%02d, Transfer rate=%s Bytes/second\n" +#~ msgstr "" +#~ "Temps d'écriture du job = %02d:%02d:%02d, Taux de transfert = %s o/s\n" + +#, fuzzy +#~ msgid "Error updating file attributes. ERR=%s\n" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Mount Volume \"%s\" on device %s and press return when ready: " +#~ msgstr "Chargez une deuxième bande et appuyez sur \"Entrée\" : " + +#~ msgid "Error getting Volume info: %s" +#~ msgstr "Erreur pendant la récupération des informations sur un Volume : %s" + +#, fuzzy +#~ msgid "Didn't get vol info vol=%s: ERR=%s" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Error creating JobMedia records: ERR=%s\n" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#, fuzzy +#~ msgid "Error creating JobMedia records: %s\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#~ msgid "Job %s canceled while waiting for mount on Storage Device \"%s\".\n" +#~ msgstr "" +#~ "Le job %s a été annulé alors qu'il attendait un montage sur le Storage " +#~ "Device \"%s\".\n" + +#, fuzzy +#~ msgid "" +#~ "Job %s is waiting. Cannot find any appendable volumes.\n" +#~ "Please use the \"label\" command to create a new Volume for:\n" +#~ " Storage: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Le Job %s est en attente. Bacula n'a pas pu trouver de media disponible.\n" +#~ "Merci de créer un nouveau volume via la commande \"label\" pour :\n" +#~ " Storage : %s\n" +#~ " Type du Media : %s\n" +#~ " Pool : %s\n" + +#, fuzzy +#~ msgid "Max time exceeded waiting to mount Storage Device %s for Job %s\n" +#~ msgstr "" +#~ "Le Job %s a été annulé pendant qu'il attendait le montage sur le Storage " +#~ "Device %s.\n" + +#, fuzzy +#~ msgid "Cannot request another volume: no volume name given.\n" +#~ msgstr "Le volume \"%s\" n'est pas dans le device %s.\n" + +#, fuzzy +#~ msgid "" +#~ "%sPlease mount append Volume \"%s\" or label a new one for:\n" +#~ " Job: %s\n" +#~ " Storage: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Le Job %s est en attente. Bacula n'a pas pu trouver de media disponible.\n" +#~ "Merci de créer un nouveau volume via la commande \"label\" pour :\n" +#~ " Storage : %s\n" +#~ " Type du Media : %s\n" +#~ " Pool : %s\n" + +#, fuzzy +#~ msgid "" +#~ "%sPlease mount read Volume \"%s\" for:\n" +#~ " Job: %s\n" +#~ " Storage: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Le Job %s est en attente. Bacula n'a pas pu trouver de media disponible.\n" +#~ "Merci de créer un nouveau volume via la commande \"label\" pour :\n" +#~ " Storage : %s\n" +#~ " Type du Media : %s\n" +#~ " Pool : %s\n" + +#~ msgid "Job %s canceled while waiting for mount on Storage Device %s.\n" +#~ msgstr "" +#~ "Le Job %s a été annulé pendant qu'il attendait le montage sur le Storage " +#~ "Device %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Incorrect password given by Director.\n" +#~ "For help, please see: " +#~ msgstr "Password incorrect donné par le Director à %s.\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with DIR at \"%s:%d\"\n" +#~ msgstr "Négociation TLS échouée avec le SD \"%s:%d\".\n" + +#, fuzzy +#~ msgid "Unable to authenticate Director at %s.\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Incorrect authorization key from File daemon at %s rejected.\n" +#~ "For help, please see: " +#~ msgstr "Password incorrect donné par le Director à %s.\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with FD at \"%s:%d\"\n" +#~ msgstr "Négociation TLS échouée avec le FD \"%s:%d\".\n" + +#, fuzzy +#~ msgid "No slot defined in catalog (slot=%d) for Volume \"%s\" on %s.\n" +#~ msgstr "Demande de labélisation du volume \"%s\" Slot %d...\n" + +#, fuzzy +#~ msgid "" +#~ "3304 Issuing autochanger \"load Volume %s, Slot %d, Drive %d\" command.\n" +#~ msgstr "" +#~ "3304 Envoi de la commande \"load slot %d, drive %d\" à l'autochangeur.\n" + +#, fuzzy +#~ msgid "" +#~ "3305 Autochanger \"load Volume %s, Slot %d, Drive %d\", status is OK.\n" +#~ msgstr "3305 Autochangeur \"load slot %d, drive %d\", le résultat est OK.\n" + +#, fuzzy +#~ msgid "" +#~ "3992 Bad autochanger \"load Volume %s Slot %d, Drive %d\": ERR=%s.\n" +#~ "Results=%s\n" +#~ msgstr "" +#~ "3992 Erreur sur l'autochangeur \"load slot %d, drive %d\" : ERR=%s.\n" +#~ "Resultat=%s\n" + +#~ msgid "3301 Issuing autochanger \"loaded? drive %d\" command.\n" +#~ msgstr "3301 Envoi de la commande \"loaded? drive %d\" à l'autochangeur.\n" + +#~ msgid "3302 Autochanger \"loaded? drive %d\", result is Slot %d.\n" +#~ msgstr "3302 Autochangeur \"loaded drive %d\", le resultat est Slot %d.\n" + +#~ msgid "3302 Autochanger \"loaded? drive %d\", result: nothing loaded.\n" +#~ msgstr "3302 Autochangeur \"loaded drive %d\", résultat : lecteur vide.\n" + +#~ msgid "" +#~ "3991 Bad autochanger \"loaded? drive %d\" command: ERR=%s.\n" +#~ "Results=%s\n" +#~ msgstr "" +#~ "3991 Erreur sur l'autochangeur \"loaded drive %d\" : ERR=%s.\n" +#~ "Resultat=%s\n" + +#, fuzzy +#~ msgid "Lock failure on autochanger. ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Unlock failure on autochanger. ERR=%s\n" +#~ msgstr "rwl_writeunlock en échec sur %s:%d :. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "3307 Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" " +#~ "command.\n" +#~ msgstr "" +#~ "3307 Envoi de la commande \"unload slot %d, drive %d\" à l'autochangeur.\n" + +#, fuzzy +#~ msgid "" +#~ "3995 Bad autochanger \"unload Volume %s, Slot %d, Drive %d\": ERR=%s\n" +#~ "Results=%s\n" +#~ msgstr "" +#~ "3995 Erreur sur l'autochangeur \"unload slot %d, drive %d\" : ERR=%s.\n" +#~ "Resultat=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" wanted on %s is in use by device %s\n" +#~ msgstr "Le volume \"%s\" est utilisé par le device %s\n" + +#, fuzzy +#~ msgid "" +#~ "3997 Bad autochanger \"unload Volume %s, Slot %d, Drive %d\": ERR=%s.\n" +#~ msgstr "" +#~ "3995 Erreur sur l'autochangeur \"unload slot %d, drive %d\" : ERR=%s.\n" + +#~ msgid "3993 Device %s not an autochanger device.\n" +#~ msgstr "3993 Le Device %s n'est pas un autochangeur.\n" + +#~ msgid "3306 Issuing autochanger \"%s\" command.\n" +#~ msgstr "3306 Envoi de la commande \"%s\" à l'autochangeur.\n" + +#, fuzzy +#~ msgid "3996 Open bpipe failed.\n" +#~ msgstr "3901 l'ouverture du device a échoué : ERR=%s\n" + +#~ msgid "Autochanger error: ERR=%s\n" +#~ msgstr "Erreur sur l'autochangeur : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bcopy [-d debug_level] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -i specify input Volume names (separated by |)\n" +#~ " -o specify output Volume names (separated by |)\n" +#~ " -p proceed inspite of errors\n" +#~ " -v verbose\n" +#~ " -w specify working directory (default /tmp)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "dev open failed: %s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Write of last block failed.\n" +#~ msgstr "Flush de %s blocs, écriture de EOF\n" + +#, fuzzy +#~ msgid "Volume label not copied.\n" +#~ msgstr "Le Volume n'a pas de label.\n" + +#, fuzzy +#~ msgid "Cannot fixup device error. %s\n" +#~ msgstr "Impossible de trouver la ressource Client \"%s\"\n" + +#, fuzzy +#~ msgid "EOM label not copied.\n" +#~ msgstr "TLS actif mais non configuré.\n" + +#, fuzzy +#~ msgid "EOT label not copied.\n" +#~ msgstr "TLS actif mais non configuré.\n" + +#, fuzzy +#~ msgid "Volume Label" +#~ msgstr "Le Volume n'a pas de label.\n" + +#, fuzzy +#~ msgid "End Job Session" +#~ msgstr "Ecriture du label de fin de session.\n" + +#, fuzzy +#~ msgid "Unknown" +#~ msgstr "inconnu" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bextract \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T send debug traces to trace file (stored in /tmp)\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -t read data from volume, do not write anything\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "Copyright (C) 2000-2016 Kern Sibbald\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not open exclude file: %s, ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier pid. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open include file: %s, ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier inclus : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot stat %s. It must exist. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#~ msgid "%s must be a directory.\n" +#~ msgstr "%s doit être un répertoire.\n" + +#~ msgid "%u files restored.\n" +#~ msgstr "%u fichiers restaurés.\n" + +#, fuzzy +#~ msgid "Found %s error%s\n" +#~ msgstr "Erreur de config : %s\n" + +#, fuzzy +#~ msgid "Write error on %s: %s\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot continue.\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "%s was deleted.\n" +#~ msgstr "Pas de job sélectionné.\n" + +#, fuzzy +#~ msgid "Seek error Addr=%llu on %s: %s\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#~ msgid "Uncompression error. ERR=%d\n" +#~ msgstr "Erreur de décompression. ERR=%d\n" + +#, fuzzy +#~ msgid "LZO uncompression error. ERR=%d\n" +#~ msgstr "Erreur de décompression. ERR=%d\n" + +#, fuzzy +#~ msgid "Error writing JobMedia record to catalog.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "Error writing final JobMedia record to catalog.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "Cannot write block. Device is disabled. dev=%s\n" +#~ msgstr "Impossible d'écrire un bloc. Le Device est au bout EOM.\n" + +#, fuzzy +#~ msgid "Cannot write block. Device at EOM. dev=%s\n" +#~ msgstr "Impossible d'écrire un bloc. Le Device est au bout EOM.\n" + +#, fuzzy +#~ msgid "Attempt to write on closed device=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Write error at %s on device %s Vol=%s. ERR=%s.\n" +#~ msgstr "Erreur d'écriture à %u:%u sur le device %s. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Out of freespace caused End of Volume \"%s\" at %s on device %s. Write of " +#~ "%u bytes got %d.\n" +#~ msgstr "" +#~ "Fin du volume \"%s\" à %u:%u sur le device %s. Ecriture de %u octets, eu " +#~ "%d.\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" at %s on device %s. Write of %u bytes got %d.\n" +#~ msgstr "" +#~ "Fin du volume \"%s\" à %u:%u sur le device %s. Ecriture de %u octets, eu " +#~ "%d.\n" + +#, fuzzy +#~ msgid "Job failed or canceled.\n" +#~ msgstr "Le job %d est annulé.\n" + +#, fuzzy +#~ msgid "The %sVolume=%s on device=%s appears to be unlabeled.%s\n" +#~ msgstr "Le volume sur %s possède un mauvais label Bacula : %x\n" + +#, fuzzy +#~ msgid "Read error on fd=%d at addr=%s on device %s. ERR=%s.\n" +#~ msgstr "Erreur de lecture de %s:%s:%d : ERR=%s\n" + +#, fuzzy +#~ msgid "Read zero %sbytes Vol=%s at %s on device %s.\n" +#~ msgstr "Prêt à lire les données du volume \"%s\" depuis le device %s.\n" + +#, fuzzy +#~ msgid "%d block read errors not printed.\n" +#~ msgstr "1000 blocs relus correctement.\n" + +#, fuzzy +#~ msgid "" +#~ "User defined maximum volume size %s will be exceeded on device %s.\n" +#~ " Marking Volume \"%s\" as Full.\n" +#~ msgstr "" +#~ "Nombre maximum de job sur le volume atteind. Marquage du volume \"%s\" " +#~ "comme Used.\n" + +#, fuzzy +#~ msgid "Backspace file at EOT failed. ERR=%s\n" +#~ msgstr "Erreur sur la re-lecture du dernier bloc en EOT. ERR=%s" + +#, fuzzy +#~ msgid "Backspace record at EOT failed. ERR=%s\n" +#~ msgstr "Erreur sur la re-lecture du dernier bloc en EOT. ERR=%s" + +#~ msgid "Re-read last block at EOT failed. ERR=%s" +#~ msgstr "Erreur sur la re-lecture du dernier bloc en EOT. ERR=%s" + +#~ msgid "Re-read of last block succeeded.\n" +#~ msgstr "La re-lecture du dernier bloc écrit a réussi.\n" + +#, fuzzy +#~ msgid "Error sending Volume info to Director.\n" +#~ msgstr "Erreur pendant la récupération des informations sur un Volume : %s" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bls [options] \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -j list jobs\n" +#~ " -k list blocks\n" +#~ " (no j or k option) list saved files\n" +#~ " -L dump label\n" +#~ " -p proceed inspite of errors\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -E Check records to detect errors\n" +#~ " -v be verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "Copyright (C) 2000-2016 Kern Sibbald\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "No archive name specified\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Warning, this Volume is a continuation of Volume %s\n" +#~ msgstr "" +#~ "Attention, changer le nombre de fichier du Volume peut\n" +#~ "vous faire perdre des données du Volume\n" +#~ "\n" + +#, fuzzy +#~ msgid "Got EOM at file %u on device %s, Volume \"%s\"\n" +#~ msgstr "Fin de Volume au fichier %u sur le Device %s, Volume \"%s\"\n" + +#, fuzzy +#~ msgid "Mounted Volume \"%s\".\n" +#~ msgstr "3001 Volume monté : %s\n" + +#, fuzzy +#~ msgid "End of file %u on device %s, Volume \"%s\"\n" +#~ msgstr "Fin de %s %u sur le device %s, Volume \"%s\"\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bscan [ options ] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -m update media info in database\n" +#~ " -D specify the driver database name (default NULL)\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database password (default none)\n" +#~ " -h specify database host (default NULL)\n" +#~ " -t specify database port (default 0)\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -r list records\n" +#~ " -s synchronize or store in database\n" +#~ " -S show scan progress periodically\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -w specify working directory (default from conf " +#~ "file)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "Copyright (C) 2000-2016 Kern Sibbald\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "First Volume Size = %s\n" +#~ msgstr "Le nombre courant de fichier sur le Volume est : %u\n" + +#, fuzzy +#~ msgid "Using Database: %s, User: %s\n" +#~ msgstr "Mot clef inconnu : %s\n" + +#, fuzzy +#~ msgid "Create JobMedia for Job %s\n" +#~ msgstr "1991 Erreur sur la mise à jour du JobMedia\n" + +#, fuzzy +#~ msgid "Could not create JobMedia record for Volume=%s Job=%s\n" +#~ msgstr "Impossible de créer un JobMedia en base pour le Volume=%s Job=%s\n" + +#, fuzzy +#~ msgid "Pool record for %s found in DB.\n" +#~ msgstr "Pas de volume trouvé en base pour l'objet %d.\n" + +#, fuzzy +#~ msgid "VOL_LABEL: Pool record not found for Pool: %s\n" +#~ msgstr "La ressource Pool \"%s\" est introuvable pour le volume \"%s\"\n" + +#, fuzzy +#~ msgid "Pool type \"%s\" is OK.\n" +#~ msgstr "Le pool \"%s\" est introuvable.\n" + +#, fuzzy +#~ msgid "Media record for %s found in DB.\n" +#~ msgstr "Le nouveau volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "VOL_LABEL: Media record not found for Volume: %s\n" +#~ msgstr "La ressource Pool \"%s\" est introuvable pour le volume \"%s\"\n" + +#, fuzzy +#~ msgid "SOS_LABEL: Found Job record for JobId: %d\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "SOS_LABEL: Job record not found for JobId: %d\n" +#~ msgstr "La ressource Pool \"%s\" est introuvable !\n" + +#, fuzzy +#~ msgid "Could not update job record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "End of all Volumes. VolFiles=%u VolBlocks=%u VolBytes=%s\n" +#~ msgstr "Fin de média sur le Volume \"%s\" Octets=%s Blocs=%s à %s.\n" + +#, fuzzy +#~ msgid "Could not create File Attributes record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Created File record: %s\n" +#~ msgstr "lit un seul enregistrement" + +#, fuzzy +#~ msgid "Could not create media record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update media record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Created Media record for Volume: %s\n" +#~ msgstr "" +#~ "Impossible de récupérer les informations du Media pour le Volume %s : ERR=" +#~ "%s\n" + +#, fuzzy +#~ msgid "Updated Media record at end of Volume: %s\n" +#~ msgstr "" +#~ "Impossible de récupérer les informations du Media pour le Volume %s : ERR=" +#~ "%s\n" + +#, fuzzy +#~ msgid "Could not create pool record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Created Pool record for Pool: %s\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Could not get Client record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Created Client record for Client: %s\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Fileset \"%s\" already exists.\n" +#~ msgstr "Le volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "Could not create FileSet record \"%s\". ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Created FileSet record \"%s\"\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Could not create JobId record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update job start record. ERR=%s\n" +#~ msgstr "Impossible de trouver la ressource Storage \"%s\"\n" + +#, fuzzy +#~ msgid "Created new JobId=%u record for original JobId=%u\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update JobId=%u record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Job Termination code: %d" +#~ msgstr "Sélection terminée.\n" + +#, fuzzy +#~ msgid "" +#~ "%s\n" +#~ "JobId: %d\n" +#~ "Job: %s\n" +#~ "FileSet: %s\n" +#~ "Backup Level: %s\n" +#~ "Client: %s\n" +#~ "Start time: %s\n" +#~ "End time: %s\n" +#~ "Files Written: %s\n" +#~ "Bytes Written: %s\n" +#~ "Volume Session Id: %d\n" +#~ "Volume Session Time: %d\n" +#~ "Last Volume Bytes: %s\n" +#~ "Termination: %s\n" +#~ "\n" +#~ msgstr "" +#~ "%s %s (%s) : %s\n" +#~ " Build: %s %s %s\\n\"\n" +#~ " JobId : %d\n" +#~ " Job : %s\n" +#~ " FileSet : %s\n" +#~ " Client : %s\n" +#~ " Début : %s\n" +#~ " Fin : %s\n" +#~ " Fichiers attendus : %s\n" +#~ " Fichiers restaurés : %s\n" +#~ " Octets restaurés : %s\n" +#~ " Débit : %.1f Ko/s\n" +#~ " Erreurs du FD : %d\n" +#~ " Statut de fin du FD : %s\n" +#~ " Statut de fin du SD : %s\n" +#~ " Etat : %s\n" + +#, fuzzy +#~ msgid "Could not create JobMedia record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not add MD5/SHA1 to File record. ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bsdjson [options] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read config and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : stored [options] [-c config_file] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -dt affiche un timestamp devant chaque ligne de debug\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -p continue même en cas d'erreurs E/S\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "No Storage resource defined in %s. Cannot continue.\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Only one Storage resource permitted in %s\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "No Director resource defined in %s. Cannot continue.\n" +#~ msgstr "" +#~ "Pas de director défini pour %s\n" +#~ "Sans cette définition, il n'est pas possible de se connecter à celui-ci.\n" + +#, fuzzy +#~ msgid "No Messages resource defined in %s. Cannot continue.\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "Tape block size (%d) not multiple of system size (%d)\n" +#~ msgstr "" +#~ "La taille du buffer réseau %d n'est pas un multiple de la taille de bloc " +#~ "du lecteur.\n" + +#, fuzzy +#~ msgid "No archive name specified.\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Improper number of arguments specified.\n" +#~ msgstr "Le job est déjà spécifié.\n" + +#, fuzzy +#~ msgid "Total Volume bytes=%sB. Total Write rate = %sB/s\n" +#~ msgstr "Fin de média sur le Volume \"%s\" Octets=%s Blocs=%s à %s.\n" + +#, fuzzy +#~ msgid "Volume bytes=%sB. Write rate = %sB/s\n" +#~ msgstr "Fin de média sur le Volume \"%s\" Octets=%s Blocs=%s à %s.\n" + +#, fuzzy +#~ msgid "open device %s: OK\n" +#~ msgstr "Impossible d'ouvrir le device %s\n" + +#~ msgid "Enter Volume Name: " +#~ msgstr "Saisissez un nom de Volume : " + +#~ msgid "Device open failed. ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Wrote Volume label for volume \"%s\".\n" +#~ msgstr "Impossible de lire le label du Volume depuis le média.\n" + +#~ msgid "Volume has no label.\n" +#~ msgstr "Le Volume n'a pas de label.\n" + +#, fuzzy +#~ msgid "Volume label read correctly.\n" +#~ msgstr "1000 blocs relus correctement.\n" + +#, fuzzy +#~ msgid "I/O error on device: ERR=%s" +#~ msgstr "Erreur de lecture de %s:%s:%d : ERR=%s\n" + +#, fuzzy +#~ msgid "Volume type error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Volume name error\n" +#~ msgstr "Nom de Volume trop long.\n" + +#, fuzzy +#~ msgid "Error creating label. ERR=%s" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Volume version error.\n" +#~ msgstr "Période de rétention d'un volume" + +#, fuzzy +#~ msgid "Bad Volume label type.\n" +#~ msgstr "Le Volume n'a pas de label.\n" + +#, fuzzy +#~ msgid "Unknown error.\n" +#~ msgstr "Erreur inconnue." + +#, fuzzy +#~ msgid "Bad status from load. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from rewind. ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from weof. ERR=%s\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#, fuzzy +#~ msgid "Wrote 1 EOF to %s\n" +#~ msgstr "Where : %s\n" + +#, fuzzy +#~ msgid "Wrote %d EOFs to %s\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "Bad status from bsf. ERR=%s\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#, fuzzy +#~ msgid "Bad status from bsr. ERR=%s\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#, fuzzy +#~ msgid "Device status:\n" +#~ msgstr "" +#~ "\n" +#~ "Statut du Device :\n" + +#, fuzzy +#~ msgid "Device parameters:\n" +#~ msgstr "" +#~ "\n" +#~ "Statut du Device :\n" + +#~ msgid "Status:\n" +#~ msgstr "Statut :\n" + +#~ msgid "Do you want to continue? (y/n): " +#~ msgstr "Voulez vous continuer ? (y/n) : " + +#~ msgid "Command aborted.\n" +#~ msgstr "Commande annulée.\n" + +#, fuzzy +#~ msgid "Block %d i=%d\n" +#~ msgstr "Bloc : %d taille=%d\n" + +#, fuzzy +#~ msgid "Error writing record to block.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "Error writing block to device.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "Wrote first record of %d bytes.\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "Wrote second record of %d bytes.\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "Wrote third record of %d bytes.\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "Backspace file failed! ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Backspace record failed! ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Read block failed! ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Block re-read correct. Test succeeded!\n" +#~ msgstr "1000 blocs relus correctement.\n" + +#, fuzzy +#~ msgid "Write failed at block %u. stat=%d ERR=%s\n" +#~ msgstr "Impossible d'écrire au bloc %u.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Error writing record to block.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Error writing block to device.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "Wrote %d blocks of %d bytes.\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "Rewind OK.\n" +#~ msgstr "Rembobinage.\n" + +#, fuzzy +#~ msgid "Got EOF on tape.\n" +#~ msgstr "écrit un EOF sur la bande" + +#, fuzzy +#~ msgid "Read block %d failed! ERR=%s\n" +#~ msgstr "Erreur sur la re-lecture du dernier bloc en EOT. ERR=%s" + +#, fuzzy +#~ msgid "Read record failed. Block %d! ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "%d blocks re-read correctly.\n" +#~ msgstr "1000 blocs relus correctement.\n" + +#, fuzzy +#~ msgid "Reposition to file:block %d:%d\n" +#~ msgstr "Repositionnement de (fichier:bloc) %u:%u à %u:%u\n" + +#, fuzzy +#~ msgid "Reposition error.\n" +#~ msgstr "Erreur pendant le re-positionnement. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Read block %d failed! file=%d blk=%d. ERR=%s\n" +#~ "\n" +#~ msgstr "Erreur sur la re-lecture du dernier bloc en EOT. ERR=%s" + +#, fuzzy +#~ msgid "Read record failed! ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "Block %d re-read correctly.\n" +#~ msgstr "1000 blocs relus correctement.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Do you wish to continue with the Autochanger test? (y/n): " +#~ msgstr "Voulez vous continuer ? (y/n) : " + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "\n" +#~ "=== Autochanger test ===\n" +#~ "\n" +#~ msgstr "autochanger list %s \n" + +#, fuzzy +#~ msgid "3301 Issuing autochanger \"loaded\" command.\n" +#~ msgstr "3301 Envoi de la commande \"loaded? drive %d\" à l'autochangeur.\n" + +#, fuzzy +#~ msgid "3991 Bad autochanger command: %s\n" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#, fuzzy +#~ msgid "Nothing loaded in the drive. OK.\n" +#~ msgstr " Le slot %d est chargé dans le lecteur %d.\n" + +#, fuzzy +#~ msgid "3302 Issuing autochanger \"unload %d %d\" command.\n" +#~ msgstr "3301 Envoi de la commande \"loaded? drive %d\" à l'autochangeur.\n" + +#, fuzzy +#~ msgid "3992 Bad autochanger command: %s\n" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#, fuzzy +#~ msgid "3303 Issuing autochanger \"load %d %d\" command.\n" +#~ msgstr "3301 Envoi de la commande \"loaded? drive %d\" à l'autochangeur.\n" + +#, fuzzy +#~ msgid "3303 Autochanger \"load %d %d\" status is OK.\n" +#~ msgstr "3305 Autochangeur \"load slot %d, drive %d\", le résultat est OK.\n" + +#, fuzzy +#~ msgid "3993 Bad autochanger command: %s\n" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#, fuzzy +#~ msgid "Now forward spacing 1 file.\n" +#~ msgstr "Pas de job sélectionné.\n" + +#, fuzzy +#~ msgid "Bad status from fsr. ERR=%s\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#, fuzzy +#~ msgid "Now forward spacing 2 files.\n" +#~ msgstr "Pas de job sélectionné.\n" + +#, fuzzy +#~ msgid "Now forward spacing 4 files.\n" +#~ msgstr "Pas de job sélectionné.\n" + +#, fuzzy +#~ msgid "Bad status from fsf. ERR=%s\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#, fuzzy +#~ msgid "Wrote one record of %d bytes.\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "Wrote block to device.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "Enter length to read: " +#~ msgstr "Entrez le nom du Volume : " + +#, fuzzy +#~ msgid "End of tape\n" +#~ msgstr "Fin de tous les Volumes.\n" + +#, fuzzy +#~ msgid "read error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from read %d. ERR=%s\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "1 block of %d bytes in file %d\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "%d blocks of %d bytes in file %d\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "End of File mark.\n" +#~ msgstr "1 fichier sélectionné.\n" + +#, fuzzy +#~ msgid "Total files=%d, blocks=%d, bytes = %s\n" +#~ msgstr " Total Octets=%s Blocs=%s Octets/Bloc=%s\n" + +#, fuzzy +#~ msgid "Short block read.\n" +#~ msgstr "Flush de %s blocs, écriture de EOF\n" + +#, fuzzy +#~ msgid "Error reading block. ERR=%s\n" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Device status: %u. ERR=%s\n" +#~ msgstr "" +#~ "\n" +#~ "Statut du Device :\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "This command simulates Bacula writing to a tape.\n" +#~ "It requires either one or two blank tapes, which it\n" +#~ "will label and write.\n" +#~ "\n" +#~ "If you have an autochanger configured, it will use\n" +#~ "the tapes that are in slots 1 and 2, otherwise, you will\n" +#~ "be prompted to insert the tapes when necessary.\n" +#~ "\n" +#~ "It will print a status approximately\n" +#~ "every 322 MB, and write an EOF every %s. If you have\n" +#~ "selected the simple test option, after writing the first tape\n" +#~ "it will rewind it and re-read the last block written.\n" +#~ "\n" +#~ "If you have selected the multiple tape test, when the first tape\n" +#~ "fills, it will ask for a second, and after writing a few more \n" +#~ "blocks, it will stop. Then it will begin re-reading the\n" +#~ "two tapes.\n" +#~ "\n" +#~ "This may take a long time -- hours! ...\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Cette commande simule l'écriture d'une bande par Bacula.\n" +#~ "Ce test requiert une ou deux bandes vierges qui vont être\n" +#~ "labélisées et écrites.\n" +#~ "\n" +#~ "Si vous avez configuré un autochangeur, il utilisera les bandes\n" +#~ "des slots 1 et 2, sinon, le programme vous demandera d'insérer\n" +#~ "les bandes quand cela sera nécessaire.\n" +#~ "\n" +#~ "L'état d'avancement sera affiché tous les 322 Mo, et un EOF sera \n" +#~ "écrit tous les 3,2 Go. Si vous avez choisi le test simple, après avoir\n" +#~ "rempli la bande, elle sera rembobinée et le dernier bloc écrit sera\n" +#~ "relu.\n" +#~ "\n" +#~ "Si vous avez choisi le test multi-bande, quand la première bande sera\n" +#~ "remplie vous devrez insérer la nouvelle et après l'écriture de quelques\n" +#~ "blocs les deux bandes seront relues.\n" +#~ "\n" +#~ "Ce test peut durer longtemps (voir des heures).\n" + +#~ msgid "" +#~ "Do you want to run the simplified test (s) with one tape\n" +#~ "or the complete multiple tape (m) test: (s/m) " +#~ msgstr "" +#~ "Voulez vous lancer le test simplifié (s) utilisant une seule bande\n" +#~ "ou bien le test multi-bande complet (m) : (s/m) " + +#~ msgid "Simple test (single tape) selected.\n" +#~ msgstr "Sélection du test simplifié (utilisant une seule bande).\n" + +#~ msgid "Multiple tape test selected.\n" +#~ msgstr "Sélection du test multiple.\n" + +#~ msgid "Wrote Start of Session label.\n" +#~ msgstr "Ecriture du label de début de session.\n" + +#, fuzzy +#~ msgid "Flush block failed.\n" +#~ msgstr "Flush de %s blocs, écriture de EOF\n" + +#, fuzzy +#~ msgid "Wrote block=%u, file,blk=%u,%u VolBytes=%s rate=%sB/s\n" +#~ msgstr "Ecriture blk_block=%u, dev_blk_num=%u VolBytes=%s rate=%.1f KB/s\n" + +#~ msgid "%s Flush block, write EOF\n" +#~ msgstr "Flush de %s blocs, écriture de EOF\n" + +#, fuzzy +#~ msgid "Wrote 1000 blocks on second tape. Done.\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "Job canceled.\n" +#~ msgstr "Le job %s est annulé.\n" + +#, fuzzy +#~ msgid "Set ok=false after write_block_to_device.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#~ msgid "Wrote End of Session label.\n" +#~ msgstr "Ecriture du label de fin de session.\n" + +#~ msgid "Wrote state file last_block_num1=%d last_block_num2=%d\n" +#~ msgstr "Ecriture du fichier d'état last_block_num1=%d last_block_num2=%d\n" + +#~ msgid "Could not create state file: %s ERR=%s\n" +#~ msgstr "Impossible de créer le fichier d'état : %s ERR=%s\n" + +#, fuzzy +#~ msgid "do_unfill failed.\n" +#~ msgstr "Job échoué.\n" + +#, fuzzy +#~ msgid "%s: Error during test.\n" +#~ msgstr "Erreur pendant l'envoi de la liste d'inclusion.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "The state file level has changed. You must redo\n" +#~ "the fill command.\n" +#~ msgstr "" +#~ "\n" +#~ "Impossible de trouver le fichier d'état : %s ERR=%s\n" +#~ "Vous devez relancer la commande \"fill\".\n" + +#~ msgid "" +#~ "\n" +#~ "Could not find the state file: %s ERR=%s\n" +#~ "You must redo the fill command.\n" +#~ msgstr "" +#~ "\n" +#~ "Impossible de trouver le fichier d'état : %s ERR=%s\n" +#~ "Vous devez relancer la commande \"fill\".\n" + +#~ msgid "Mount first tape. Press enter when ready: " +#~ msgstr "Chargez la première bande et appuyez sur \"Entrée\" : " + +#~ msgid "Rewinding.\n" +#~ msgstr "Rembobinage.\n" + +#~ msgid "Reading the first 10000 records from %u:%u.\n" +#~ msgstr "Lecture des 10000 premiers enregistrements depuis %u:%u.\n" + +#~ msgid "Reposition from %u:%u to %u:%u\n" +#~ msgstr "Re-positionnement de %u:%u à %u:%u\n" + +#~ msgid "Reposition error. ERR=%s\n" +#~ msgstr "Erreur pendant le re-positionnement. ERR=%s\n" + +#~ msgid "Reading block %u.\n" +#~ msgstr "Lecture du bloc %u.\n" + +#, fuzzy +#~ msgid "Error reading block: ERR=%s\n" +#~ msgstr "Erreur dans l'exécution de la commande : %s. ERR=%s\n" + +#~ msgid "Mount second tape. Press enter when ready: " +#~ msgstr "Chargez une deuxième bande et appuyez sur \"Entrée\" : " + +#~ msgid "Reposition from %u:%u to 0:1\n" +#~ msgstr "Re-positionnement de %u:%u à 0:1\n" + +#~ msgid "Reading block %d.\n" +#~ msgstr "Lecture du bloc %d.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "The first block on the second tape matches.\n" +#~ "\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#~ msgid "10000 records read now at %d:%d\n" +#~ msgstr "10000 enregistrements lus maintenant à %d:%d\n" + +#~ msgid "Last block written" +#~ msgstr "Dernier bloc écrit" + +#, fuzzy +#~ msgid "Block read back" +#~ msgstr "Bloqué" + +#~ msgid "Block not written" +#~ msgstr "Bloc non écrit" + +#, fuzzy +#~ msgid "End of tape %d:%d. Volume Bytes=%s. Write rate = %sB/s\n" +#~ msgstr "Fin de média sur le Volume \"%s\" Octets=%s Blocs=%s à %s.\n" + +#, fuzzy +#~ msgid "Begin writing raw blocks of %u bytes.\n" +#~ msgstr "Ecriture de 1000 blocs de %d octets.\n" + +#, fuzzy +#~ msgid "test autochanger" +#~ msgstr "Slots d'un autochangeur" + +#, fuzzy +#~ msgid "clear tape errors" +#~ msgstr "Erreur fatale" + +#, fuzzy +#~ msgid "read filled tape" +#~ msgstr "rembobine la bande" + +#, fuzzy +#~ msgid "forward space a record" +#~ msgstr "lit un seul enregistrement" + +#~ msgid "print this command" +#~ msgstr "affiche cette commande" + +#, fuzzy +#~ msgid "write a Bacula label to the tape" +#~ msgstr "écrit un EOF sur la bande" + +#, fuzzy +#~ msgid "load a tape" +#~ msgstr "re-labélise une bande" + +#, fuzzy +#~ msgid "read and print the Bacula tape label" +#~ msgstr "lit un seul bloc bacula" + +#~ msgid "rewind the tape" +#~ msgstr "rembobine la bande" + +#, fuzzy +#~ msgid "read() tape block by block to EOT and report" +#~ msgstr "Bacula lit bloc par bloc jusqu'à la fin de la bande (EOT) et résume" + +#~ msgid "Bacula read block by block to EOT and report" +#~ msgstr "Bacula lit bloc par bloc jusqu'à la fin de la bande (EOT) et résume" + +#, fuzzy +#~ msgid "print tape status" +#~ msgstr "Statut :\n" + +#~ msgid "General test Bacula tape functions" +#~ msgstr "test général des fonctions Bacula sur un lecteur de bande" + +#~ msgid "write an EOF on the tape" +#~ msgstr "écrit un EOF sur la bande" + +#~ msgid "write a single Bacula block" +#~ msgstr "écrit un seul bloc bacula" + +#~ msgid "read a single record" +#~ msgstr "lit un seul enregistrement" + +#~ msgid "read a single Bacula block" +#~ msgstr "lit un seul bloc bacula" + +#, fuzzy +#~ msgid "quick fill command" +#~ msgstr "affiche cette commande" + +#~ msgid "\"%s\" is an invalid command\n" +#~ msgstr "\"%s\" est une commande invalide.\n" + +#, fuzzy +#~ msgid "Interactive commands:\n" +#~ msgstr "Erreur sur la commande : %s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: btape \n" +#~ " -b specify bootstrap file\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -s turn off signals\n" +#~ " -w set working directory to dir\n" +#~ " -v be verbose\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ ") %s %s %s\n" +#~ "\n" +#~ "Usage : bconsole [-s] [-c config_file] [-d niveau_debug]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s no signals\n" +#~ " -t test - lecture de la configuration et sortie\n" +#~ "\n" + +#, fuzzy +#~ msgid "Mount second Volume on device %s and press return when ready: " +#~ msgstr "Chargez une deuxième bande et appuyez sur \"Entrée\" : " + +#, fuzzy +#~ msgid "Mount blank Volume on device %s and press return when ready: " +#~ msgstr "Chargez une deuxième bande et appuyez sur \"Entrée\" : " + +#, fuzzy +#~ msgid "End of Volume \"%s\" %d records.\n" +#~ msgstr "Le volume \"%s\" existe déjà en base.\n" + +#, fuzzy +#~ msgid "Read block=%u, VolBytes=%s rate=%sB/s\n" +#~ msgstr "Ecriture blk_block=%u, dev_blk_num=%u VolBytes=%s rate=%.1f KB/s\n" + +#, fuzzy +#~ msgid "Cannot open Dev=%s, Vol=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s pour lecture. ERR=%s\n" + +#, fuzzy +#~ msgid "Nomatch," +#~ msgstr "Aucune correspondance" + +#, fuzzy +#~ msgid "Cannot find device \"%s\" in config file %s.\n" +#~ msgstr "Impossible d'ouvrir le fichier de configuration \"%s\" : %s\n" + +#, fuzzy +#~ msgid "Cannot init device %s\n" +#~ msgstr "Impossible de monter le device %s\n" + +#, fuzzy +#~ msgid "Cannot open %s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find device \"%s\" in config file %s.\n" +#~ msgstr "Impossible de trouver la ressource \"%s\" utilisée ligne %d : %s\n" + +#, fuzzy +#~ msgid "Using device: \"%s\" for writing.\n" +#~ msgstr "Using Device \"%s\"\n" + +#, fuzzy +#~ msgid "Using device: \"%s\" for reading.\n" +#~ msgstr "Using Device \"%s\"\n" + +#, fuzzy +#~ msgid "Bad device call. Device not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "Seek error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#~ msgid "lseek error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Error closing device %s. ERR=%s.\n" +#~ msgstr "Erreur de lecture de %s:%s:%d : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot run free space command. Results=%s ERR=%s\n" +#~ msgstr "Impossible de lancer la commande : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Bad call to weof_dev. Device %s not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "Bad call to eod. Device %s not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#~ msgid "Unable to write EOF. ERR=%s\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#~ msgid "End of medium on Volume \"%s\" Bytes=%s Blocks=%s at %s.\n" +#~ msgstr "Fin de média sur le Volume \"%s\" Octets=%s Blocs=%s à %s.\n" + +#~ msgid "New volume \"%s\" mounted on device %s at %s.\n" +#~ msgstr "Nouveau volume \"%s\" monté sur le device %s à %s.\n" + +#, fuzzy +#~ msgid "write_block_to_device Volume label failed. ERR=%s" +#~ msgstr "Impossible d'écrire le label de session. ERR=%s\n" + +#, fuzzy +#~ msgid "Connection request from %s failed.\n" +#~ msgstr "Demande de connexion échouée.\n" + +#, fuzzy +#~ msgid "[SF0101] Bad client command: %s" +#~ msgstr "Erreur dans la commande RunScript : %s\n" + +#, fuzzy +#~ msgid "[SF0102] Failed to connect to Client daemon: %s:%d\n" +#~ msgstr "Impossible de se connecter au client.\n" + +#, fuzzy +#~ msgid "[SF0103] Bad storage command: %s" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#, fuzzy +#~ msgid "[SF0104] Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#, fuzzy +#~ msgid "3991 Bad setdebug command: %s\n" +#~ msgstr "2991 Erreur dans la commande setdebug : %s\n" + +#, fuzzy +#~ msgid "3903 Error scanning cancel command.\n" +#~ msgstr "2902 Erreur dans le décodage de la commande d'annulation.\n" + +#~ msgid "3904 Job %s not found.\n" +#~ msgstr "3904 Job %s non trouvé.\n" + +#, fuzzy +#~ msgid "3000 JobId=%ld Job=\"%s\" marked to be %s.\n" +#~ msgstr "JobId %s, Job %s marqué pour être annulé.\n" + +#, fuzzy +#~ msgid "3908 Error reserving Volume=\"%s\": %s" +#~ msgstr "Erreur pendant la récupération des informations sur un Volume : %s" + +#, fuzzy +#~ msgid "3999 Device \"%s\" not found or could not be opened.\n" +#~ msgstr "Le Device \"%s\" n'est pas ouvert ou il n'existe pas.\n" + +#, fuzzy +#~ msgid "3903 Error scanning label command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "3900 Truncate cache for volume \"%s\" failed. ERR=%s\n" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "3911 Error scanning truncate command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "3900 Not yet implemented\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#, fuzzy +#~ msgid "3912 Error scanning upload command: ERR=%s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "3999 Error with the upload: ERR=%s\n" +#~ msgstr "Erreur dans l'exécution de la commande : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "3929 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#~ msgid "3920 Cannot label Volume because it is already labeled: \"%s\"\n" +#~ msgstr "" +#~ "3920 Impossible de labéliser le Volume car il possède déjà le label : \"%s" +#~ "\"\n" + +#~ msgid "3921 Wrong volume mounted.\n" +#~ msgstr "3921 Mauvais volume monté.\n" + +#~ msgid "3922 Cannot relabel an ANSI/IBM labeled Volume.\n" +#~ msgstr "3922 Impossible de re-labéliser un Volume ANSI/IBM.\n" + +#, fuzzy +#~ msgid "3912 Failed to label Volume %s: ERR=%s\n" +#~ msgstr "3912 Impossible de labéliser le Volume : ERR=%s\n" + +#, fuzzy +#~ msgid "3913 Failed to open next part: ERR=%s\n" +#~ msgstr "3912 Impossible de labéliser le Volume : ERR=%s\n" + +#, fuzzy +#~ msgid "3917 Failed to label Volume: ERR=%s\n" +#~ msgstr "3912 Impossible de labéliser le Volume : ERR=%s\n" + +#, fuzzy +#~ msgid "3918 Failed to label Volume (no media): ERR=%s\n" +#~ msgstr "3912 Impossible de labéliser le Volume : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "3919 Cannot label Volume. Unknown status %d from read_volume_label()\n" +#~ msgstr "" +#~ "3920 Impossible de labéliser le Volume car il possède déjà le label : \"%s" +#~ "\"\n" + +#~ msgid "3001 Mounted Volume: %s\n" +#~ msgstr "3001 Volume monté : %s\n" + +#, fuzzy +#~ msgid "" +#~ "3902 Cannot mount Volume on Storage Device \"%s\" because:\n" +#~ "%s" +#~ msgstr "" +#~ "3902 Impossible de monté le volume dans le storage device %s car :\n" +#~ "%s" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0106] Device \"%s\" requested by DIR could not be opened or does not " +#~ "exist.\n" +#~ msgstr "Le Device \"%s\" n'est pas ouvert ou il n'existe pas.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0107] Device \"%s\" in changer \"%s\" requested by DIR could not be " +#~ "opened or does not exist.\n" +#~ msgstr "Le Device \"%s\" n'est pas ouvert ou il n'existe pas.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0108] Device \"%s\" requested by DIR could not be opened or does not " +#~ "exist.\n" +#~ msgstr "Le Device \"%s\" n'est pas ouvert ou il n'existe pas.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "[SW0109] Device \"%s\" in changer \"%s\" requested by DIR could not be " +#~ "opened or does not exist.\n" +#~ msgstr "Le Device \"%s\" n'est pas ouvert ou il n'existe pas.\n" + +#, fuzzy +#~ msgid "Specified slot ignored. " +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "3901 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" is mounted with Volume \"%s\"\n" +#~ msgstr "3001 Device %s est monté avec le volume \"%s\"\n" + +#, fuzzy +#~ msgid "" +#~ "3905 Device \"%s\" open but no Bacula volume is mounted.\n" +#~ "If this is not a blank tape, try unmounting and remounting the Volume.\n" +#~ msgstr "" +#~ "3905 Le device %s est ouvert mais il n'y a pas de volume Bacula monté.\n" +#~ "Si ce n'est pas une cartouche vierge, essayer de la démonter puis de la " +#~ "remonter.\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" is doing acquire.\n" +#~ msgstr "3902 Le Device %s est occupé en acquisition.\n" + +#, fuzzy +#~ msgid "3903 Device \"%s\" is being labeled.\n" +#~ msgstr "3934 Device %s est en cours d'initialisation.\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" is already mounted with Volume \"%s\"\n" +#~ msgstr "3001 Device %s est déjà monté avec le volume \"%s\"\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" is mounted.\n" +#~ msgstr "3002 Le device %s est monté.\n" + +#~ msgid "3907 %s" +#~ msgstr "3907 %s" + +#, fuzzy +#~ msgid "3906 File device \"%s\" is always mounted.\n" +#~ msgstr "3906 Le device fichier %s est toujours monté.\n" + +#, fuzzy +#~ msgid "3930 Device \"%s\" is being released.\n" +#~ msgstr "3934 Device %s est en cours d'initialisation.\n" + +#, fuzzy +#~ msgid "3905 Unknown wait state %d\n" +#~ msgstr "est dans un état inconnu %c" + +#~ msgid "3909 Error scanning mount command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "3003 Device \"%s\" already enabled.\n" +#~ msgstr "3022 Le device %s est libéré.\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" enabled.\n" +#~ msgstr "3022 Le device %s est libéré.\n" + +#, fuzzy +#~ msgid "3004 Device \"%s\" deleted %d alert%s.\n" +#~ msgstr "3022 Le device %s est libéré.\n" + +#, fuzzy +#~ msgid "3907 Error scanning \"enable\" command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" disabled.\n" +#~ msgstr "3002 Le device %s est monté.\n" + +#, fuzzy +#~ msgid "3907 Error scanning \"disable\" command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" unmounted.\n" +#~ msgstr "3002 Le device %s est démonté.\n" + +#, fuzzy +#~ msgid "3901 Device \"%s\" is already unmounted.\n" +#~ msgstr "3901 Le device %s est déjà démonté.\n" + +#, fuzzy +#~ msgid "3001 Device \"%s\" unmounted.\n" +#~ msgstr "3001 Le device %s est démonté.\n" + +#, fuzzy +#~ msgid "3902 Device \"%s\" is busy in acquire.\n" +#~ msgstr "3902 Le Device %s est occupé en acquisition.\n" + +#, fuzzy +#~ msgid "3907 Error scanning unmount command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "3916 Error scanning action_on_purge command\n" +#~ msgstr "2902 Erreur dans le décodage de la commande d'annulation.\n" + +#, fuzzy +#~ msgid "3921 Device \"%s\" already released.\n" +#~ msgstr "3921 Le Device %s est déjà libéré.\n" + +#, fuzzy +#~ msgid "3922 Device \"%s\" waiting for sysop.\n" +#~ msgstr "3922 Device %s est en attente d'une intervention sysop.\n" + +#, fuzzy +#~ msgid "3922 Device \"%s\" waiting for mount.\n" +#~ msgstr "3922 Le Device %s est en atttente d'un montage.\n" + +#, fuzzy +#~ msgid "3923 Device \"%s\" is busy in acquire.\n" +#~ msgstr "3902 Le Device %s est occupé en acquisition.\n" + +#, fuzzy +#~ msgid "3914 Device \"%s\" is being labeled.\n" +#~ msgstr "3934 Device %s est en cours d'initialisation.\n" + +#, fuzzy +#~ msgid "3022 Device \"%s\" released.\n" +#~ msgstr "3022 Le device %s est libéré.\n" + +#, fuzzy +#~ msgid "3927 Error scanning release command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "[SF0110] Could not create bootstrap file %s: ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0111] Error parsing bootstrap file.\n" +#~ msgstr "Erreur pendant l'écriture du fichier bsr.\n" + +#, fuzzy +#~ msgid "3998 Device \"%s\" is not an autochanger.\n" +#~ msgstr "3995 Le Device %s n'est pas un autochangeur.\n" + +#, fuzzy +#~ msgid "3909 Error scanning autochanger drives/list/slots command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "3909 Error scanning readlabel command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#~ msgid "3001 Volume=%s Slot=%d\n" +#~ msgstr "3001 Volume=%s Slot=%d\n" + +#, fuzzy +#~ msgid "3931 Device \"%s\" is BLOCKED. user unmounted.\n" +#~ msgstr "3931 Device %s est BLOQUE, démonté par l'utilisateur.\n" + +#, fuzzy +#~ msgid "" +#~ "3932 Device \"%s\" is BLOCKED. user unmounted during wait for media/" +#~ "mount.\n" +#~ msgstr "" +#~ "3932 Device %s est BLOQUE, démonté par l'utilisateur alors que bacula " +#~ "était en attente d'un média.\n" + +#, fuzzy +#~ msgid "3933 Device \"%s\" is BLOCKED waiting for media.\n" +#~ msgstr "3933 Device %s est bloqué en attente d'un media.\n" + +#, fuzzy +#~ msgid "3934 Device \"%s\" is being initialized.\n" +#~ msgstr "3934 Device %s est en cours d'initialisation.\n" + +#, fuzzy +#~ msgid "3935 Device \"%s\" is blocked labeling a Volume.\n" +#~ msgstr "3935 Device %s est bloqué par une labélisation de volume.\n" + +#, fuzzy +#~ msgid "3935 Device \"%s\" is blocked for unknown reason.\n" +#~ msgstr "3935 Device %s est bloqué pour une raison inconnue.\n" + +#, fuzzy +#~ msgid "3936 Device \"%s\" is busy reading.\n" +#~ msgstr "3936 Device %s est occupé en lecture.\n" + +#, fuzzy +#~ msgid "3937 Device \"%s\" is busy with writers=%d reserved=%d.\n" +#~ msgstr "3937 Device %s est occupé avec %d flux en écriture.\n" + +#, fuzzy +#~ msgid "FD command not found: %s\n" +#~ msgstr "Job non trouvé : %s\n" + +#, fuzzy +#~ msgid "Cannot open session, received bad parameters.\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Rewind failed: device %s is not open.\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "Bad call to reposition. Device not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "Could not open file device %s. No Volume name given.\n" +#~ msgstr "Le volume \"%s\" n'est pas dans le device %s.\n" + +#, fuzzy +#~ msgid "Could not open(%s,%s,0640): ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to truncate device %s. ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to stat device %s. ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reopen: %s, ERR=%s\n" +#~ msgstr "Impossible d'ouvrir %s : ERR=%s\n" + +#~ msgid "Device %s cannot be %smounted. ERR=%s\n" +#~ msgstr "Le Device %s ne peut pas être %smounted. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Ready to append to end of Volumes \"%s\" ameta size=%s adata size=%s\n" +#~ msgstr "" +#~ "Prêt à ajouter des données à la fin du volume \"%s\" part=%d size=%s\n" +#~ "\n" + +#, fuzzy +#~ msgid "Ready to append to end of Volume \"%s\" size=%s\n" +#~ msgstr "" +#~ "Prêt à ajouter des données à la fin du volume \"%s\" part=%d size=%s\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "For Volume \"%s\":\n" +#~ " The sizes do not match! Metadata Volume=%s Catalog=%s\n" +#~ " Correcting Catalog\n" +#~ msgstr "" +#~ "Impossible d'écrire sur le volume \"%s\" car :\n" +#~ "Les tailles ne correspondent pas. Volume=%s Catalogue=%s\n" + +#, fuzzy +#~ msgid "" +#~ "For aligned Volume \"%s\":\n" +#~ " Aligned sizes do not match! Aligned Volume=%s Catalog=%s\n" +#~ " Correcting Catalog\n" +#~ msgstr "" +#~ "Impossible d'écrire sur le volume \"%s\" car :\n" +#~ "Les tailles ne correspondent pas. Volume=%s Catalogue=%s\n" + +#, fuzzy +#~ msgid "Error updating Catalog\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Bacula cannot write on disk Volume \"%s\" because: The sizes do not " +#~ "match! Volume=%s Catalog=%s\n" +#~ msgstr "" +#~ "Impossible d'écrire sur le volume \"%s\" car :\n" +#~ "Les tailles ne correspondent pas. Volume=%s Catalogue=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Connection from unknown Director %s at %s rejected.\n" +#~ "Please see " +#~ msgstr "" +#~ "Connexion d'un Director inconnu %s à %s rejeté.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Invalid connection from %s. Len=%d\n" +#~ msgstr "Connexion invalide. Len=%d\n" + +#, fuzzy +#~ msgid "Invalid Hello from %s. Len=%d\n" +#~ msgstr "Connexion invalide. Len=%d\n" + +#, fuzzy +#~ msgid "Client connect failed: Job name not found: %s\n" +#~ msgstr "Job non trouvé : %s\n" + +#, fuzzy +#~ msgid "Unable to authenticate File daemon\n" +#~ msgstr "Impossible de se connecter au client.\n" + +#, fuzzy +#~ msgid "Client socket not open. Could not connect to Client.\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Recv request to Client failed. ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Bad Hello from Client: %s.\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#, fuzzy +#~ msgid "[SE0001] Unable to stat device %s at %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0001] Invalid device type=%d name=\"%s\"\n" +#~ msgstr "Option d'écrasement (Replace) invalide : %s\n" + +#, fuzzy +#~ msgid "[SA0003] Unable to stat mount point %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0006] Block size %u on device %s is too large, using default %u\n" +#~ msgstr "" +#~ "La taille de bloc %u sur le Device %s est trop grande, utilisation de la " +#~ "valeur par défaut %u\n" + +#, fuzzy +#~ msgid "" +#~ "[SW0007] Max block size %u not multiple of device %s block size=%d.\n" +#~ msgstr "" +#~ "La taille du buffer réseau %d n'est pas un multiple de la taille de bloc " +#~ "du lecteur.\n" + +#, fuzzy +#~ msgid "[SA0009] Unable to init mutex: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0010] Unable to init cond variable: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0011] Unable to init cond variable: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0012] Unable to init spool mutex: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0013] Unable to init acquire mutex: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0014] Unable to init freespace mutex: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0015] Unable to init read acquire mutex: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0016] Unable to init volcat mutex: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0017] Unable to init dcrs mutex: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0020] dlopen of SD driver=%s at %s failed: ERR=%s\n" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Couldn't rewind %s device %s: ERR=%s\n" +#~ msgstr "Impossible de trouver le userid %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Wrong Volume mounted on %s device %s: Wanted %s have %s\n" +#~ msgstr "Nouveau volume \"%s\" monté sur le device %s à %s.\n" + +#, fuzzy +#~ msgid "Too many tries: %s" +#~ msgstr "Trop d'élément dans la ressource %s\n" + +#~ msgid "Could not read Volume label from block.\n" +#~ msgstr "Impossible de lire le label du Volume depuis le média.\n" + +#, fuzzy +#~ msgid "Could not unserialize Volume label: ERR=%s\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Volume on %s device %s has wrong Bacula version. Wanted %d got %d\n" +#~ msgstr "Le volume sur %s possède un mauvais label Bacula : %x\n" + +#, fuzzy +#~ msgid "Volume on %s device %s has bad Bacula label type: %ld\n" +#~ msgstr "Le volume sur %s possède un mauvais label Bacula : %x\n" + +#, fuzzy +#~ msgid "" +#~ "Wrong Volume Type. Wanted an Aligned Volume %s on device %s, but got: %s\n" +#~ msgstr "Nouveau volume \"%s\" monté sur le device %s à %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Wrong Volume Type. Wanted a Cloud Volume %s on device %s, but got: %s\n" +#~ msgstr "Nouveau volume \"%s\" monté sur le device %s à %s.\n" + +#, fuzzy +#~ msgid "Could not reserve volume %s on %s device %s\n" +#~ msgstr "Impossible de trouver le prochain volume pour le Job %s.\n" + +#, fuzzy +#~ msgid "Cannot write Volume label to block for %s device %s\n" +#~ msgstr "Impossible d'écrire le label du Volume sur le Device %s\n" + +#, fuzzy +#~ msgid "Open %s device %s Volume \"%s\" failed: ERR=%s" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "Rewind error on %s device %s: ERR=%s\n" +#~ msgstr "Erreur de lecture de %s:%s:%d : ERR=%s\n" + +#, fuzzy +#~ msgid "Truncate error on %s device %s: ERR=%s\n" +#~ msgstr "Erreur d'écriture à %u:%u sur le device %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to re-open device after truncate on %s device %s: ERR=%s" +#~ msgstr "" +#~ "Impossible de se positionner à la fin du média sur le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write %s device %s: ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Recycled volume \"%s\" on %s device %s, all previous data lost.\n" +#~ msgstr "" +#~ "Recyclage du volume \"%s\" sur le lecteur %s, les précédentes données " +#~ "sont perdues.\n" + +#, fuzzy +#~ msgid "Wrote label to prelabeled Volume \"%s\" on %s device %s\n" +#~ msgstr "" +#~ "Ecriture du label sur le Volume pré-labélisé \"%s\" sur le lecteur %s\n" + +#, fuzzy +#~ msgid "Bad Volume session label request=%d\n" +#~ msgstr "Le Volume n'a pas de label.\n" + +#, fuzzy +#~ msgid "Unknown %d" +#~ msgstr "sig inconnu %d" + +#, fuzzy +#~ msgid "Date label written: %s\n" +#~ msgstr "Dernier bloc écrit" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%s Record:\n" +#~ "JobId : %d\n" +#~ "VerNum : %d\n" +#~ "PoolName : %s\n" +#~ "PoolType : %s\n" +#~ "JobName : %s\n" +#~ "ClientName : %s\n" +#~ msgstr "" +#~ ") : %s\n" +#~ " JobId : %d\n" +#~ " Job : %s\n" +#~ " Début : %s\n" +#~ " Fin : %s\n" +#~ " Statut de fin : %s\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "JobFiles : %s\n" +#~ "JobBytes : %s\n" +#~ "StartBlock : %s\n" +#~ "EndBlock : %s\n" +#~ "StartFile : %s\n" +#~ "EndFile : %s\n" +#~ "JobErrors : %s\n" +#~ "JobStatus : %c\n" +#~ msgstr "" +#~ ") : %s\n" +#~ " JobId : %d\n" +#~ " Job : %s\n" +#~ " Début : %s\n" +#~ " Fin : %s\n" +#~ " Statut de fin : %s\n" +#~ "\n" + +#, fuzzy +#~ msgid "Date written : %s\n" +#~ msgstr "Where : %s\n" + +#, fuzzy +#~ msgid "Fresh Volume" +#~ msgstr "labéliser une bande" + +#, fuzzy +#~ msgid "Volume" +#~ msgstr "Fichiers du Volume" + +#, fuzzy +#~ msgid "pthread_cond_wait failure. ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "unknown blocked code" +#~ msgstr "source inconnue" + +#, fuzzy +#~ msgid "Too many errors trying to mount %s device %s.\n" +#~ msgstr "Le volume \"%s\" n'est pas dans le device %s.\n" + +#~ msgid "Job %d canceled.\n" +#~ msgstr "Le job %d est annulé.\n" + +#, fuzzy +#~ msgid "Open of %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#~ msgid "Volume \"%s\" previously written, moving to end of data.\n" +#~ msgstr "" +#~ "Le volume \"%s\" contient des données, re-positionnement à la fin.\n" + +#, fuzzy +#~ msgid "Unable to position to end of data on %s device %s: ERR=%s\n" +#~ msgstr "" +#~ "Impossible de se positionner à la fin du média sur le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" not loaded on %s device %s.\n" +#~ msgstr "Le volume \"%s\" n'est pas dans le device %s.\n" + +#~ msgid "" +#~ "Director wanted Volume \"%s\".\n" +#~ " Current Volume \"%s\" not acceptable because:\n" +#~ " %s" +#~ msgstr "" +#~ "Le director voulait utiliser le volume \"%s\".\n" +#~ " Le volume courant \"%s\" n'est pas utilisable car :\n" +#~ " %s" + +#, fuzzy +#~ msgid "%s device %s not configured to autolabel Volumes.\n" +#~ msgstr "" +#~ "Attention, le device %s n'est pas configuré pour labéliser " +#~ "automatiquement les volumes.\n" + +#~ msgid "Marking Volume \"%s\" in Error in Catalog.\n" +#~ msgstr "Le volume \"%s\" est marqué en Erreur dans le catalogue.\n" + +#, fuzzy +#~ msgid "" +#~ "Autochanger Volume \"%s\" not found in slot %d.\n" +#~ " Setting InChanger to zero in catalog.\n" +#~ msgstr "" +#~ "Volume \"%s\" absent du catalogue. mise à zéro de InChanger pour le Slot=" +#~ "%d.\n" + +#, fuzzy +#~ msgid "" +#~ "Invalid tape position on volume \"%s\" on device %s. Expected %d, got %d\n" +#~ msgstr "" +#~ "Fin du volume \"%s\" à %u:%u sur le device %s. Ecriture de %u octets, eu " +#~ "%d.\n" + +#, fuzzy +#~ msgid "Cannot open %s Dev=%s, Vol=%s for reading.\n" +#~ msgstr "Impossible d'ouvrir le fichier %s pour lecture. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set eotmodel on device %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid " Bacula status:" +#~ msgstr "Message de Bacula" + +#~ msgid " file=%d block=%d\n" +#~ msgstr " fichier=%d bloc=%d\n" + +#, fuzzy +#~ msgid "ioctl MTIOCGET error on %s. ERR=%s.\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid " Device status:" +#~ msgstr "" +#~ "\n" +#~ "Statut du Device :\n" + +#, fuzzy +#~ msgid "unknown func code %d" +#~ msgstr "source inconnue" + +#, fuzzy +#~ msgid "I/O function \"%s\" not supported on this device.\n" +#~ msgstr "Le volume \"%s\" n'est pas dans le device %s.\n" + +#, fuzzy +#~ msgid "Cannot open bootstrap file %s: %s\n" +#~ msgstr "Impossible d'ouvrir le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Device \"%s\" in bsr at inappropriate place.\n" +#~ msgstr "3902 Le Device %s est occupé en acquisition.\n" + +#, fuzzy +#~ msgid "REGEX '%s' compile error. ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "JobType not yet implemented\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#, fuzzy +#~ msgid "JobLevel not yet implemented\n" +#~ msgstr "Le client est déjà spécifié.\n" + +#, fuzzy +#~ msgid "FileIndex : %u\n" +#~ msgstr "Fichier : %s\n" + +#, fuzzy +#~ msgid "VolumeName : %s\n" +#~ msgstr "Saisissez un nom de Volume : " + +#, fuzzy +#~ msgid " MediaType : %s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid " Device : %s\n" +#~ msgstr "Where : %s\n" + +#, fuzzy +#~ msgid " Slot : %d\n" +#~ msgstr "Le nouveau slot est : %d\n" + +#, fuzzy +#~ msgid "Client : %s\n" +#~ msgstr "Client" + +#, fuzzy +#~ msgid "done : %s\n" +#~ msgstr "Where : %s\n" + +#, fuzzy +#~ msgid "" +#~ "Bootstrap file error: %s\n" +#~ " : Line %d, col %d of file %s\n" +#~ "%s\n" +#~ msgstr "" +#~ "Erreur de config : %s\n" +#~ " : ligne %d, col %d du fichier %s\n" +#~ "%s\n" +#~ "%s" + +#, fuzzy +#~ msgid "No Volume names found for restore.\n" +#~ msgstr "Aucun volume trouvé pour la restauration.\n" + +#, fuzzy +#~ msgid "Error sending header to Client. ERR=%s\n" +#~ msgstr "Erreur pendant lors de la récupération du pool. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending to FD. ERR=%s\n" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending data to Client. ERR=%s\n" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending to File daemon. ERR=%s\n" +#~ msgstr "Erreur pendant lors de la récupération du pool. ERR=%s\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" at addr=%s on device %s.\n" +#~ msgstr "Le volume \"%s\" n'est pas dans le device %s.\n" + +#, fuzzy +#~ msgid "Forward spacing Volume \"%s\" to addr=%s\n" +#~ msgstr "" +#~ "Postionnement en avant du Volume \"%s\" sur le fichier:bloc %u:%u.\n" + +#, fuzzy +#~ msgid "Unknown code %d\n" +#~ msgstr "Erreur inconnue." + +#, fuzzy +#~ msgid "unknown: %d" +#~ msgstr "inconnu" + +#, fuzzy +#~ msgid "Unable to initialize reservation lock. ERR=%s\n" +#~ msgstr "Impossible d'initialiser le verrou sur la base. ERR=%s\n" + +#, fuzzy +#~ msgid "3939 Could not get dcr\n" +#~ msgstr "Impossible de créer la structure BSOCK cliente.\n" + +#, fuzzy +#~ msgid "Device reservation failed for JobId=%d: %s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#~ msgid "Failed command: %s\n" +#~ msgstr "Erreur sur la commande : %s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ " Device \"%s\" in changer \"%s\" requested by DIR could not be opened " +#~ "or does not exist.\n" +#~ msgstr "Le Device \"%s\" n'est pas ouvert ou il n'existe pas.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ " Device \"%s\" requested by DIR could not be opened or does not " +#~ "exist.\n" +#~ msgstr "Le Device \"%s\" n'est pas ouvert ou il n'existe pas.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ " Device \"%s\" requested by DIR is disabled.\n" +#~ msgstr "3934 Device %s est en cours d'initialisation.\n" + +#, fuzzy +#~ msgid "3926 Could not get dcr for device: %s\n" +#~ msgstr "Impossible d'ouvrir le device %s\n" + +#, fuzzy +#~ msgid "3603 JobId=%u %s device %s is busy reading.\n" +#~ msgstr "3603 JobId=%u device %s est occupé en lecture.\n" + +#, fuzzy +#~ msgid "3604 JobId=%u %s device %s is BLOCKED due to user unmount.\n" +#~ msgstr "" +#~ "3604 JobId=%u device %s est bloqué car il a été démonté par l'utilisateur " +#~ "(unmount).\n" + +#, fuzzy +#~ msgid "3601 JobId=%u %s device %s is BLOCKED due to user unmount.\n" +#~ msgstr "" +#~ "3601 JobId=%u device %s est BLOQUE car il a été demonté par " +#~ "l'utilisateur.\n" + +#, fuzzy +#~ msgid "" +#~ "3602 JobId=%u %s device %s is busy (already reading/writing). read=%d, " +#~ "writers=%d reserved=%d\n" +#~ msgstr "3602 JobId=%u device %s est occupé (à lire ou écrire).\n" + +#, fuzzy +#~ msgid "3609 JobId=%u Max concurrent jobs=%d exceeded on %s device %s.\n" +#~ msgstr "" +#~ "3607 JobId=%u voulait Vol=\"%s\", c'est le Vol=\"%s\" qui est dans le " +#~ "drive %s.\n" + +#, fuzzy +#~ msgid "3611 JobId=%u Volume max jobs=%d exceeded on %s device %s.\n" +#~ msgstr "" +#~ "3607 JobId=%u voulait Vol=\"%s\", c'est le Vol=\"%s\" qui est dans le " +#~ "drive %s.\n" + +#, fuzzy +#~ msgid "" +#~ "3608 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" nreserve=%d on %s " +#~ "device %s.\n" +#~ msgstr "" +#~ "3608 JobId=%u voulait le Pool=\"%s\", mais c'est le Pool=\"%s\" qui est " +#~ "dans le drive %s.\n" + +#, fuzzy +#~ msgid "3605 JobId=%u wants free drive but %s device %s is busy.\n" +#~ msgstr "" +#~ "3605 JobId=%u voulait libérer le lecteur, mais le device %s est occupé.\n" + +#, fuzzy +#~ msgid "" +#~ "3606 JobId=%u prefers mounted drives, but %s device %s has no Volume.\n" +#~ msgstr "" +#~ "3606 JobId=%u voulait un lecteur monté, mais le lecteur %s est vide.\n" + +#, fuzzy +#~ msgid "" +#~ "3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on %s device %s.\n" +#~ msgstr "" +#~ "3607 JobId=%u voulait Vol=\"%s\", c'est le Vol=\"%s\" qui est dans le " +#~ "drive %s.\n" + +#, fuzzy +#~ msgid "3911 JobId=%u failed reserve %s device %s.\n" +#~ msgstr "" +#~ "3605 JobId=%u voulait libérer le lecteur, mais le device %s est occupé.\n" + +#, fuzzy +#~ msgid "Job cancelled.\n" +#~ msgstr "Le job %s est annulé.\n" + +#, fuzzy +#~ msgid "Spooling statistics:\n" +#~ msgstr "Spooling des données...\n" + +#~ msgid "Spooling data ...\n" +#~ msgstr "Spooling des données...\n" + +#~ msgid "Open data spool file %s failed: ERR=%s\n" +#~ msgstr "Erreur pendant l'ouverture fichier de spool %s. ERR=%s\n" + +#~ msgid "Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n" +#~ msgstr "" +#~ "Transfert des données spoolées sur le Volume \"%s\". Transfert de %s " +#~ "octets...\n" + +#~ msgid "Writing spooled data to Volume. Despooling %s bytes ...\n" +#~ msgstr "" +#~ "Ecriture des données spoolées sur le Volume. Transfert de %s octets...\n" + +#, fuzzy +#~ msgid "" +#~ "Despooling elapsed time = %02d:%02d:%02d, Transfer rate = %s Bytes/" +#~ "second\n" +#~ msgstr "" +#~ "Temps du transfert des données spoolées = %02d:%02d:%02d, Taux de " +#~ "transfert = %s o/s\n" + +#, fuzzy +#~ msgid "Ftruncate spool file failed: ERR=%s\n" +#~ msgstr "Erreur pendant l'ouverture fichier de spool %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Spool header read error. ERR=%s\n" +#~ msgstr "Erreur sur l'autochangeur : ERR=%s\n" + +#, fuzzy +#~ msgid "Spool read error. Wanted %u bytes, got %d\n" +#~ msgstr "Erreur pendant l'écriture des attributs dans le spool. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "User specified Job spool size reached: JobSpoolSize=%s MaxJobSpoolSize=" +#~ "%s\n" +#~ msgstr "Taille du spool spécifiée par l'utlisateur atteinte.\n" + +#, fuzzy +#~ msgid "" +#~ "User specified Device spool size reached: DevSpoolSize=%s MaxDevSpoolSize=" +#~ "%s\n" +#~ msgstr "Taille du spool spécifiée par l'utlisateur atteinte.\n" + +#~ msgid "Spooling data again ...\n" +#~ msgstr "Reprise du spool des données...\n" + +#, fuzzy +#~ msgid "Fatal despooling error." +#~ msgstr "Erreur fatale" + +#, fuzzy +#~ msgid "Error writing block to spool file. ERR=%s\n" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +# Impossible d'ouvrir le fichier de spool des attributs : ERR=%s +#, fuzzy +#~ msgid "Fseek on attributes file failed: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier de spool des attributs %s : ERR=%s\n" + +# Impossible d'ouvrir le fichier de spool des attributs : ERR=%s +#, fuzzy +#~ msgid "Truncate on attributes file failed: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier de spool des attributs %s : ERR=%s\n" + +#~ msgid "Sending spooled attrs to the Director. Despooling %s bytes ...\n" +#~ msgstr "" +#~ "Transfert des attributs spoolés au Director. Transfert de %s octets...\n" + +# Impossible d'ouvrir le fichier de spool des attributs : ERR=%s +#~ msgid "fopen attr spool file %s failed: ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier de spool des attributs %s : ERR=%s\n" + +#, fuzzy +#~ msgid " %s Alert: at %s Volume=\"%s\" alert=%s\n" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#~ msgid "Used Volume status:\n" +#~ msgstr "Volume en cours d'utilisation :\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Device \"%s\" is not open or does not exist.\n" +#~ msgstr "Le Device \"%s\" n'est pas ouvert ou il n'existe pas.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Device %s is %s %s:\n" +#~ " Volume: %s\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ "Le Device %s est monté avec :\n" +#~ " Volume : %s\n" +#~ " Pool : %s\n" +#~ " Type du Media : %s\n" + +#, fuzzy +#~ msgid "waiting for" +#~ msgstr "En attente d'un montage" + +#, fuzzy +#~ msgid "*unknown*" +#~ msgstr "inconnu" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Device %s: %s open but no Bacula volume is currently mounted.\n" +#~ msgstr "" +#~ "Le Device %s est ouvert, mais il n'y a pas de Volume Bacula monté.\n" + +#~ msgid " Total Bytes=%s Blocks=%s Bytes/block=%s\n" +#~ msgstr " Total Octets=%s Blocs=%s Octets/Bloc=%s\n" + +#~ msgid " Total Bytes Read=%s Blocks Read=%s Bytes/block=%s\n" +#~ msgstr " Total des Octets lu=%s Blocs lu=%s Octets/Bloc=%s\n" + +#~ msgid " Positioned at File=%s Block=%s\n" +#~ msgstr " Positionné sur Fichier=%s Bloc=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Device %s: %s is not open.\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid " Available %sSpace=%sB\n" +#~ msgstr "Requêtes disponibles :\n" + +#~ msgid "Autochanger \"%s\" with devices:\n" +#~ msgstr "Autochangeur \"%s\" avec les Devices :\n" + +#~ msgid "" +#~ "\n" +#~ "Device status:\n" +#~ msgstr "" +#~ "\n" +#~ "Statut du Device :\n" + +#, fuzzy +#~ msgid "Daemon started %s. Jobs: run=%d, running=%d.\n" +#~ msgstr "Démon démarré depuis %s, %d jobs lancés depuis cette date.\n" + +#, fuzzy +#~ msgid " Device is disabled. User command.\n" +#~ msgstr " Le Device est BLOQUE. Démonté par l'utilisateur.\n" + +#, fuzzy +#~ msgid " Device is BLOCKED. User unmounted.\n" +#~ msgstr " Le Device est BLOQUE. Démonté par l'utilisateur.\n" + +#, fuzzy +#~ msgid " Device is BLOCKED. User unmounted during wait for media/mount.\n" +#~ msgstr "" +#~ " Le Device est BLOQUE. Démonté par l'utilisateur à cause d'un " +#~ "chargement de média.\n" + +#, fuzzy +#~ msgid "" +#~ " Device is BLOCKED waiting for mount of volume \"%s\",\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ " Le Device est BLOQUE en attente du montage du volume \"%s\",\n" +#~ " Pool : %s\n" +#~ " Media type : %s\n" + +#, fuzzy +#~ msgid "" +#~ " Device is BLOCKED waiting to create a volume for:\n" +#~ " Pool: %s\n" +#~ " Media type: %s\n" +#~ msgstr "" +#~ " Le Device est BLOQUE en attente de création d'un volume :\n" +#~ " Pool : %s\n" +#~ " Media type : %s\n" + +#, fuzzy +#~ msgid " Device is BLOCKED waiting for media.\n" +#~ msgstr " Le Device est BLOQUE en attente d'un média.\n" + +#, fuzzy +#~ msgid " Device is being initialized.\n" +#~ msgstr " Le Device est en cours d'initialisation.\n" + +#, fuzzy +#~ msgid " Device is blocked labeling a Volume.\n" +#~ msgstr " Le Device est occupé à labéliser un Volume.\n" + +#, fuzzy +#~ msgid " Slot %d %s loaded in drive %d.\n" +#~ msgstr " Le slot %d est chargé dans le lecteur %d.\n" + +#, fuzzy +#~ msgid " Drive %d is not loaded.\n" +#~ msgstr " Le lecteur %d n'est pas chargé.\n" + +#, fuzzy +#~ msgid "Device state:\n" +#~ msgstr "" +#~ "\n" +#~ "Statut du Device :\n" + +#, fuzzy +#~ msgid "Attached JobIds: " +#~ msgstr "" +#~ "\n" +#~ "Jobs planifiés :\n" + +#, fuzzy +#~ msgid " File=%u block=%u\n" +#~ msgstr "Fichier=%u bloc=%u\n" + +#, fuzzy +#~ msgid " Min block=%u Max block=%u\n" +#~ msgstr "Min bloc=%u Max bloc=%u\n" + +#~ msgid "%s Job %s waiting for Client connection.\n" +#~ msgstr "%s Job %s est en attente de la connexion du Client.\n" + +#, fuzzy +#~ msgid "" +#~ "Reading: %s %s job %s JobId=%d Volume=\"%s\"\n" +#~ " pool=\"%s\" device=%s newbsr=%d\n" +#~ msgstr "" +#~ "Lecture : %s %s job %s JobId=%d Volume=\"%s\"\n" +#~ " pool=\"%s\" device=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Writing: %s %s job %s JobId=%d Volume=\"%s\"\n" +#~ " pool=\"%s\" device=%s\n" +#~ msgstr "" +#~ "Ecriture : %s %s job %s JobId=%d Volume=\"%s\"\n" +#~ " pool=\"%s\" device=%s\n" + +#~ msgid " spooling=%d despooling=%d despool_wait=%d\n" +#~ msgstr " spooling=%d despooling=%d despool_wait=%d\n" + +#, fuzzy +#~ msgid " Files=%s Bytes=%s AveBytes/sec=%s LastBytes/sec=%s\n" +#~ msgstr " Fichiers=%s Octets=%s Octets/sec=%s\n" + +#~ msgid "" +#~ "\n" +#~ "Jobs waiting to reserve a drive:\n" +#~ msgstr "" +#~ "\n" +#~ "Jobs en attente de réservation de lecteur :\n" + +#, fuzzy +#~ msgid "3900 No arg in .status command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "3900 Unknown arg in .status command: %s\n" +#~ msgstr "3909 Erreur pendant la lecture de la commande de montage : %s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-sd [options] [-c config_file] [config_file]\n" +#~ " -c use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g set groupid to group\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -p proceed despite I/O errors\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test - read config and exit\n" +#~ " -u userid to \n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : stored [options] [-c config_file] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -dt affiche un timestamp devant chaque ligne de debug\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -p continue même en cas d'erreurs E/S\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Unable to create thread. ERR=%s\n" +#~ msgstr "Impossible de récupérer le Pool depuis le catalogue : ERR=%s" + +#, fuzzy +#~ msgid "Could not initialize SD device \"%s\"\n" +#~ msgstr "Impossible d'initialiser %s\n" + +#, fuzzy +#~ msgid "Unable to stat ControlDevice %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#~ msgid "Could not open device %s\n" +#~ msgstr "Impossible d'ouvrir le device %s\n" + +#~ msgid "Could not mount device %s\n" +#~ msgstr "Impossible de monter le device %s\n" + +#, fuzzy +#~ msgid "Expected a Device Type keyword, got: %s" +#~ msgstr "Attendait le mot clef FileSet, eu : %s" + +#, fuzzy +#~ msgid "Expected a Cloud driver keyword, got: %s" +#~ msgstr "Attendait un niveau de sauvegarde, eu : %s" + +#, fuzzy +#~ msgid "Expected a Truncate Cache option keyword, got: %s" +#~ msgstr "Attendait une option de FileSet, eu : %s:" + +#, fuzzy +#~ msgid "Expected a Cloud Upload option keyword, got: %s" +#~ msgstr "Attendait une option de FileSet, eu : %s:" + +#, fuzzy +#~ msgid "Expected a Cloud communications protocol option keyword, got: %s" +#~ msgstr "Attendait un type de Job de Migration, eu : %s" + +#, fuzzy +#~ msgid "Expected a Cloud Uri Style option keyword, got: %s" +#~ msgstr "Attendait une option de FileSet, eu : %s:" + +#~ msgid "Cannot find AutoChanger resource %s\n" +#~ msgstr "Impossible de trouver la ressource AutoChanger %s\n" + +#, fuzzy +#~ msgid "Unable to init lock for Autochanger=%s: ERR=%s\n" +#~ msgstr "Impossible d'initialiser le muxtex : ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Device resource %s\n" +#~ msgstr "Impossible de trouver la ressource Director \"%s\"\n" + +#, fuzzy +#~ msgid "Alert: Volume=\"%s\" alert=%d: ERR=%s\n" +#~ msgstr "Impossible de supprimer le volume \"%s\". ERR=%s" + +#, fuzzy +#~ msgid "3997 Bad alert command: %s: ERR=%s.\n" +#~ msgstr "Impossible de lancer la commande : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open device %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Rewind error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#~ msgid "Ready to append to end of Volume \"%s\" at file=%d.\n" +#~ msgstr "Prêt à ajouter des données à la fin du volume \"%s\" file=%d.\n" + +#, fuzzy +#~ msgid "" +#~ "For Volume \"%s\":\n" +#~ "The number of files mismatch! Volume=%u Catalog=%u\n" +#~ "Correcting Catalog\n" +#~ msgstr "" +#~ "Impossible d'écrire sur le volume \"%s\" \n" +#~ "car le nombre de fichiers ne correspond pas. Volume=%u Catalogue=%u\n" + +#, fuzzy +#~ msgid "" +#~ "Bacula cannot write on tape Volume \"%s\" because:\n" +#~ "The number of files mismatch! Volume=%u Catalog=%u\n" +#~ msgstr "" +#~ "Impossible d'écrire sur le volume \"%s\" \n" +#~ "car le nombre de fichiers ne correspond pas. Volume=%u Catalogue=%u\n" + +#, fuzzy +#~ msgid "ioctl MTEOM error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad call to load_dev. Device not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "ioctl MTLOAD error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTOFFL error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad call to fsf. Device not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "Device %s at End of Tape.\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "ioctl MTFSF error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad call to bsf. Device not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "Device %s cannot BSF because it is not a tape.\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "ioctl MTBSF error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad call to fsr. Device not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "ioctl MTFSR %d error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad call to bsr_dev. Device not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "ioctl MTBSR error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad call to weof_dev. Device not open\n" +#~ msgstr "Le Device %s n'est pas ouvert.\n" + +#, fuzzy +#~ msgid "Attempt to WEOF on non-appendable Volume\n" +#~ msgstr "est en attente d'un volume libre" + +#, fuzzy +#~ msgid "ioctl MTWEOF error on %s. ERR=%s.\n" +#~ msgstr "erreur de déplacement (lseek) sur %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Read and write devices not properly initialized.\n" +#~ msgstr " Le Device est en cours d'initialisation.\n" + +#, fuzzy +#~ msgid "No Volume names found for %s.\n" +#~ msgstr "Aucun volume trouvé pour la restauration.\n" + +#, fuzzy +#~ msgid "Unable to initialize volume list lock. ERR=%s\n" +#~ msgstr "Impossible d'initialiser le verrou sur la base. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reserve volume \"%s\", because job canceled.\n" +#~ msgstr "Impossible de trouver le prochain volume pour le Job %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Could not reserve volume \"%s\" for append, because it will be read.\n" +#~ msgstr "Impossible de trouver le prochain volume pour le Job %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Cannot reserve Volume=%s because drive is busy with Volume=%s (JobId=" +#~ "%ld).\n" +#~ msgstr "Impossible de pruner le Volume \"%s\" car il est archivé.\n" + +#, fuzzy +#~ msgid "Volume %s is busy swapping from %s to %s\n" +#~ msgstr "Le device %s est occupé en lecture.\n" + +#, fuzzy +#~ msgid "Volume %s is busy swapping.\n" +#~ msgstr "Le device %s est occupé en lecture.\n" + +#, fuzzy +#~ msgid "%s device %s is busy.\n" +#~ msgstr "Le device %s est occupé en lecture.\n" + +#, fuzzy +#~ msgid "pthread timedwait error. ERR=%s\n" +#~ msgstr "Erreur pendant l'écriture des attributs dans le spool. ERR=%s\n" + +#, fuzzy +#~ msgid "JobId=%s, Job %s waiting to reserve a device.\n" +#~ msgstr "Le job %s est en attente de réservation d'un device.\n" + +#, fuzzy +#~ msgid "JobId=%s, Job %s waiting device %s.\n" +#~ msgstr "Le job %s est en attente de réservation d'un device.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "Example : bbatch -w /path/to/workdir -h localhost -f dat1 -f dat -f datx\n" +#~ " will start 3 thread and load dat1, dat and datx in your catalog\n" +#~ "See bbatch.c to generate datafile\n" +#~ "\n" +#~ "Usage: bbatch [ options ] -w working/dir -f datafile\n" +#~ " -b with batch mode\n" +#~ " -B without batch mode\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database host (default NULL)\n" +#~ " -k path name to the key file (default NULL)\n" +#~ " -e path name to the certificate file (default " +#~ "NULL)\n" +#~ " -a path name to the CA certificate file (default " +#~ "NULL)\n" +#~ " -w specify working directory\n" +#~ " -r call restore code with given jobids\n" +#~ " -v verbose\n" +#~ " -f specify data file\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "Copyright (C) 2000-2016 Kern Sibbald\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not init Bacula database\n" +#~ msgstr "Impossible d'ouvrir la base de données \"%s\".\n" + +#, fuzzy +#~ msgid "Error opening datafile %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "Error while inserting file\n" +#~ msgstr "Entrez le nombre de départ : " + +#~ msgid "Could not open data file: %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "Fatal fgets error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +#~ " -4 forces bsmtp to use IPv4 addresses only.\n" +#~ " -6 forces bsmtp to use IPv6 addresses only.\n" +#~ " -8 set charset to UTF-8\n" +#~ " -a use any ip protocol for address resolution\n" +#~ " -c set the Cc: field\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f set the From: field\n" +#~ " -h use mailhost:port as the SMTP server\n" +#~ " -s set the Subject: field\n" +#~ " -r set the Reply-To: field\n" +#~ " -l set the maximum number of lines to send (default: " +#~ "unlimited)\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -d positionne le niveau de debug à nn\n" +#~ " -dt affiche un timestamp devant chaque ligne de debug\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Fatal gethostname error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal getaddrinfo for myself failed \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal gethostbyname for myself failed \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Error unknown mail host \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to connect to mailhost %s\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Fatal socket error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal connect error to %s: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal _open_osfhandle error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal fdopen error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal dup error: ERR=%s\n" +#~ msgstr "erreur de fermeture : ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "ERROR %s\n" +#~ "\n" +#~ msgstr "%s : ERREUR : " + +#, fuzzy +#~ msgid "Unable to open -p argument for reading" +#~ msgstr "Impossible d'ouvrir le fichier de paramètre DH" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database host (default NULL)\n" +#~ " -k path name to the key file (default NULL)\n" +#~ " -e path name to the certificate file (default " +#~ "NULL)\n" +#~ " -a path name to the CA certificate file (default " +#~ "NULL)\n" +#~ " -w specify working directory\n" +#~ " -j specify jobids\n" +#~ " -p specify path\n" +#~ " -f specify file\n" +#~ " -l maximum tuple to fetch\n" +#~ " -T truncate cache table before starting\n" +#~ " -v verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "Copyright (C) 2000-2016 Kern Sibbald\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database host (default NULL)\n" +#~ " -w specify working directory\n" +#~ " -p specify path\n" +#~ " -f specify file\n" +#~ " -l maximum tuple to fetch\n" +#~ " -q print only errors\n" +#~ " -v verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "Copyright (C) 2000-2016 Kern Sibbald\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not open, database \"%s\".\n" +#~ msgstr "Impossible d'ouvrir la base de données \"%s\".\n" + +#, fuzzy +#~ msgid "Error no Director resource defined.\n" +#~ msgstr "Impossible de trouver la ressource Director \"%s\"\n" + +#, fuzzy +#~ msgid "Select function number: " +#~ msgstr "Sélection terminée.\n" + +#, fuzzy +#~ msgid "Deleting: %s\n" +#~ msgstr "Fichier : %s\n" + +#, fuzzy +#~ msgid "Print the list? (yes/no): " +#~ msgstr "Continuer ? (oui/mod/non) : " + +#, fuzzy +#~ msgid "Found %d for: %s\n" +#~ msgstr "Erreur de config : %s\n" + +#, fuzzy +#~ msgid "Print them? (yes/no): " +#~ msgstr "Continuer ? (oui/mod/non) : " + +#, fuzzy +#~ msgid "Found %d orphaned File records.\n" +#~ msgstr "Impossible de créer la structure BSOCK cliente.\n" + +#, fuzzy +#~ msgid "Create temporary index? (yes/no): " +#~ msgstr "Continuez ? (oui/non) : " + +#, fuzzy +#~ msgid "Found %d orphaned Filename records.\n" +#~ msgstr "Impossible de créer la structure BSOCK cliente.\n" + +#, fuzzy +#~ msgid "Found %d orphaned FileSet records.\n" +#~ msgstr "Impossible de créer la structure BSOCK cliente.\n" + +#, fuzzy +#~ msgid "Deleting %d orphaned FileSet records.\n" +#~ msgstr "Impossible de créer la structure BSOCK cliente.\n" + +#, fuzzy +#~ msgid "Found %d orphaned Client records.\n" +#~ msgstr "Impossible d'initialiser la queue cliente : ERR=%s\n" + +#, fuzzy +#~ msgid "Deleting JobMedia records of orphaned Job records.\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Deleting Log records of orphaned Job records.\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "Checking for Restore Job entries.\n" +#~ msgstr "Les Job de restauration sont :\n" + +#, fuzzy +#~ msgid "Found %d Restore Job records.\n" +#~ msgstr "Erreur de config : %s\n" + +#, fuzzy +#~ msgid "Deleting %d Restore Job records.\n" +#~ msgstr "Les Job de restauration sont :\n" + +#, fuzzy +#~ msgid "Create temporary index... This may take some time!\n" +#~ msgstr "Continuez ? (oui/non) : " + +#, fuzzy +#~ msgid "Drop temporary index.\n" +#~ msgstr "Continuez ? (oui/non) : " + +#, fuzzy +#~ msgid "%s: unknown\n" +#~ msgstr "inconnu" + +#, fuzzy +#~ msgid "Reg: %s\n" +#~ msgstr "Fichier : %s\n" + +#, fuzzy +#~ msgid "Err: Could not access %s: %s\n" +#~ msgstr " Impossible d'acceder à \"%s\" : ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Could not follow ff->link %s: %s\n" +#~ msgstr " Impossible de suivre le lien \"%s\" : ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Could not stat %s: %s\n" +#~ msgstr " Impossible d'acceder à \"%s\" : ERR=%s\\n\n" + +#, fuzzy +#~ msgid "Err: Could not open directory %s: %s\n" +#~ msgstr " Impossible d'ouvrir le répertoire \"%s\" : ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Unknown file ff->type %d: %s\n" +#~ msgstr " Type de fichier inconnu %d ; non sauvé : %s\n" + +#, fuzzy +#~ msgid "Path: %s\n" +#~ msgstr "Pool : %s\n" + +#, fuzzy +#~ msgid "Could not open include file: %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "Could not open exclude file: %s\n" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Director and Storage daemon passwords or names not the same.\n" +#~ "For help, please see " +#~ msgstr "" +#~ "Le mot de passe ou le nom du Director et du Client ne sont pas " +#~ "identiques.\n" + +#, fuzzy +#~ msgid "" +#~ "Director and File daemon passwords or names not the same.\n" +#~ "For help, please see " +#~ msgstr "" +#~ "Le mot de passe ou le nom du Director et du Client ne sont pas " +#~ "identiques.\n" + +#, fuzzy +#~ msgid "" +#~ "No Client, Storage or Director resource defined in %s\n" +#~ "Without that I don't how to get status from the File, Storage or Director " +#~ "Daemon :-(\n" +#~ msgstr "" +#~ "Pas de director défini pour %s\n" +#~ "Sans cette définition, il n'est pas possible de se connecter à celui-ci.\n" + +#, fuzzy +#~ msgid "Connecting to Client %s:%d" +#~ msgstr "Connexion au client %s (%s:%d)\n" + +#, fuzzy +#~ msgid "Connecting to Storage %s:%d" +#~ msgstr "Connexion au Director %s:%d\n" + +#, fuzzy +#~ msgid "Cannot connect to daemon." +#~ msgstr "Impossible de se connecter au démon Storage\n" + +#, fuzzy +#~ msgid "Authentication error : %s" +#~ msgstr "Erreur sur l'autochangeur : ERR=%s\n" + +#, fuzzy +#~ msgid "Opened connection with Director daemon." +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#, fuzzy +#~ msgid "Opened connection with File daemon." +#~ msgstr "Impossible de se connecter au client.\n" + +#, fuzzy +#~ msgid "Opened connection with Storage daemon." +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#, fuzzy +#~ msgid "Error : Connection closed." +#~ msgstr "Connexion...\n" + +#~ msgid "%s OK -- with warnings" +#~ msgstr "%s OK -- avec des avertissements" + +#, fuzzy +#~ msgid "Command line" +#~ msgstr "Erreur sur la commande : %s\n" + +#, fuzzy +#~ msgid "Invalid keyword found: %s\n" +#~ msgstr "Argument invalide : %s\n" + +#, fuzzy +#~ msgid "Display data files usage" +#~ msgstr "Message de Bacula" + +#, fuzzy +#~ msgid "aclx_get error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to convert acl into text on file \"%s\"\n" +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#, fuzzy +#~ msgid "aclx_scanStr error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "aclx_put error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "acltostr error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "strtoacl error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "setacl error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "acltotext error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "aclfromtext error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "acl(SETACL) error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Bad caps from SD: %s.\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#, fuzzy +#~ msgid "Generate VSS snapshots. Driver=\"%s\"\n" +#~ msgstr "Création des snapshot VSS. Driver=\"%s\", Lecteur(s)=\"%s\"\n" + +#, fuzzy +#~ msgid "VSS CreateSnapshots failed. ERR=%s\n" +#~ msgstr "Erreur durant la création des snapshots VSS.\n" + +#, fuzzy +#~ msgid "VSS was not initialized properly. ERR=%s\n" +#~ msgstr "Impossible d'initialiser le verrou sur la base. ERR=%s\n" + +#, fuzzy +#~ msgid "WriteEncryptedFileRaw failure: ERR=%s\n" +#~ msgstr "fopen %s en erreur : ERR=%s\n" + +#, fuzzy +#~ msgid "llistea error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "lgetea error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "lsetea error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "attr_list error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "attr_set error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "getproplist error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable create proper proplist to restore xattrs on file \"%s\"\n" +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#, fuzzy +#~ msgid "setproplist error on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open xattr %s on \"%s\": ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to read symlin %s on \"%s\": ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to chdir to xattr space of file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open xattr space %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to chdir to xattr space on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to chdir to xattr space %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to mkfifo xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to mknod xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to mkdir xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to symlink xattr %s to %s on file \"%s\": ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "%c: is not a valid drive.\n" +#~ msgstr "%s : est une commande invalide.\n" + +#~ msgid "Could not initialize Python\n" +#~ msgstr "Impossible d'initialiser le Python\n" + +#~ msgid "Could not Run Python string %s\n" +#~ msgstr "Impossible de lancer la commande Python %s\n" + +#, fuzzy +#~ msgid "Unable to initialize the Python lock. ERR=%s\n" +#~ msgstr "Impossible d'initialiser le verrou sur la base. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't create working directory %s. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier inclus : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't delete working directory %s. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier inclus : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown parameter or missing argument for %s.\n" +#~ msgstr "Mot clef inconnu : %s\n" + +#, fuzzy +#~ msgid "Unknown parameter for %s. Expecting block or file\n" +#~ msgstr "Mot clef inconnu : %s\n" + +#, fuzzy +#~ msgid "Unknown parameter %s.\n" +#~ msgstr "Mot clef inconnu : %s\n" + +#, fuzzy +#~ msgid "Unable to access guest volume\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create temporary file %s. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid " Dumping database \"%s\"\n" +#~ msgstr "Impossible d'ouvrir la base de données \"%s\".\n" + +#, fuzzy +#~ msgid "Unable to detect the MySQL data_directory on this system.\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't get server configuration.\n" +#~ msgstr "La création de la signature a échouée" + +#, fuzzy +#~ msgid "Unable to get the BINLOG list.\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Unable to determine the last LSN for %s (Previous job is %s)\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to detect datadir from MySQL\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Unable to get last LSN from the backup\n" +#~ msgstr "Impossible de récupérer le Job du JobId=%s : ERR=%s\n" + +#, fuzzy +#~ msgid "The current LSN is %s\n" +#~ msgstr "Le pool courant est : %s\n" + +#, fuzzy +#~ msgid "Restoring target database \"%s\"\n" +#~ msgstr "Impossible d'ouvrir la base de données \"%s\".\n" + +#, fuzzy +#~ msgid "Creating target database \"%s\"\n" +#~ msgstr "Impossible d'ouvrir la base de données \"%s\".\n" + +#, fuzzy +#~ msgid "Unable to parse or to use plugin options, %s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown parameter or bad argument for %s.\n" +#~ msgstr "Mot clef inconnu : %s\n" + +#, fuzzy +#~ msgid "Can't get cluster configuration.\n" +#~ msgstr "La création de la signature a échouée" + +#, fuzzy +#~ msgid "Can't determine the last WAL file\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't determine WAL directory\n" +#~ msgstr "Impossible d'ouvrir le fichier inclus : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't open WAL directory %s. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier inclus : %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to detect the PostgreSQL data_directory on this system.\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to find data_directory=%s on this system. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to start the PITR backup on this system.\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to determine the first WAL file on this system.\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse tablespaces %s on this system. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse data_directory %s on this system. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't create the %s file for recovery. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier d'état : %s ERR=%s\n" + +#, fuzzy +#~ msgid "3612 JobId=%u waiting because device %s is reserved by: %s.\n" +#~ msgstr "" +#~ "3605 JobId=%u voulait libérer le lecteur, mais le device %s est occupé.\n" + +#, fuzzy +#~ msgid "" +#~ "3998 Bad return from storage \"%s\" command: ERR=%s.\n" +#~ "Results=%s\n" +#~ msgstr "" +#~ "3991 Erreur sur l'autochangeur \"loaded drive %d\" : ERR=%s.\n" +#~ "Resultat=%s\n" + +#~ msgid "Alert: %s" +#~ msgstr "Alert: %s" + +#, fuzzy +#~ msgid "DDE commit failed. ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open device part=%d %s: ERR=%s\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Dedupengine status:\n" +#~ msgstr "" +#~ "\n" +#~ "Statut du Device :\n" + +#, fuzzy +#~ msgid "Cannot create DedupIndexDirectory: %s" +#~ msgstr "change le répertoire courant" + +#, fuzzy +#~ msgid "Cannot create recovery directory: %s" +#~ msgstr "change le répertoire courant" + +#, fuzzy +#~ msgid "Cannot delete temporary recovery directory: %s" +#~ msgstr "change le répertoire courant" + +#, fuzzy +#~ msgid "Socket error or stop during rehydration. ERR=%d\n" +#~ msgstr "Erreur de socket sur la commande %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unexpected message from FD, n=%d msglen=%d msg=%s\n" +#~ msgstr "" +#~ "Erreur pendant l'écriture des données vers le fichier de spool. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending chunk request to client\n" +#~ msgstr "Erreur pendant lors de la récupération du pool. ERR=%s\n" + +#, fuzzy +#~ msgid "3000 Deduplication vacuum marked to be canceled.\n" +#~ msgstr "3000 Job %s marqué pour être annulé.\n" + +#, fuzzy +#~ msgid "3915 Failed to label Volume: ERR=%s\n" +#~ msgstr "3912 Impossible de labéliser le Volume : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write last on %s: ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open aligned volume: %s, ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier pid. %s ERR=%s\n" + +#, fuzzy +#~ msgid "Send caps to Client failed. ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Recv caps from Client failed. ERR=%s\n" +#~ msgstr "Erreur sur l'ouverture du périphérique. ERR=%s\n" + +#, fuzzy +#~ msgid "Recv bad caps from Client: %s.\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#, fuzzy +#~ msgid "Recv bad caps from Client %s\n" +#~ msgstr "Début de purge des jobs du client \"%s\"\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or File type Volume %s on Dedup device %s. Wanted File.\n" +#~ msgstr "Le nouveau volume \"%s\" a été labélisé sur le device %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or Dedup type Volume %s on File device %s. Wanted File.\n" +#~ msgstr "Le nouveau volume \"%s\" a été labélisé sur le device %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Got File or Dedup type Volume %s on Aligned device %s. Wanted Aligned.\n" +#~ msgstr "" +#~ "Ecriture du label sur le Volume pré-labélisé \"%s\" sur le lecteur %s\n" + +#~ msgid "Ready to append to end of Volume \"%s\" part=%d size=%s\n" +#~ msgstr "" +#~ "Prêt à ajouter des données à la fin du volume \"%s\" part=%d size=%s\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "Bacula cannot write on DVD Volume \"%s\" because: The sizes do not match! " +#~ "Volume=%s Catalog=%s\n" +#~ msgstr "" +#~ "Impossible d'écrire sur le volume \"%s\" car :\n" +#~ "Les tailles ne correspondent pas. Volume=%s Catalogue=%s\n" + +#, fuzzy +#~ msgid "" +#~ "3610 JobId=%u Aligned volume max bytes does not allow concurrency on " +#~ "drive %s.\n" +#~ msgstr "" +#~ "3607 JobId=%u voulait Vol=\"%s\", c'est le Vol=\"%s\" qui est dans le " +#~ "drive %s.\n" + +#, fuzzy +#~ msgid " Device is BLOCKED by another SD=%s\n" +#~ msgstr " Le Device est BLOQUE. Démonté par l'utilisateur.\n" + +#, fuzzy +#~ msgid "Unable to parse user supplied restore configuration\n" +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#, fuzzy +#~ msgid "Storage from Job's NextPool resource" +#~ msgstr "Sélectionnez le Pool" + +#, fuzzy +#~ msgid "Storage from Pool's NextPool resource" +#~ msgstr "Sélectionnez le Pool" + +#, fuzzy +#~ msgid "Enter *MediaId or Volume name: " +#~ msgstr "Saisissez le MediaId ou le nom du Volume : " + +#~ msgid "Warning Job %s is not running. Continuing anyway ...\n" +#~ msgstr "Attention le Job %s n'est pas en cours. Continuons quand même...\n" + +#, fuzzy +#~ msgid "Generate VSS snapshot of drive \"%c:\\\" failed.\n" +#~ msgstr "Erreur durant la création des snapshots VSS.\n" + +#, fuzzy +#~ msgid "Selection item too large.\n" +#~ msgstr "Slot trop grand.\n" + +#, fuzzy +#~ msgid "FD connect failed: Job name not found: %s\n" +#~ msgstr "Job non trouvé : %s\n" + +#, fuzzy +#~ msgid "" +#~ "Written by Nicolas Boichat (2004)\n" +#~ "\n" +#~ "Version: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Usage: tray-monitor [-c config_file] [-d debug_level]\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read configuration and exit\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ ") %s %s %s\n" +#~ "\n" +#~ "Usage : bconsole [-s] [-c config_file] [-d niveau_debug]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s no signals\n" +#~ " -t test - lecture de la configuration et sortie\n" +#~ "\n" + +#, fuzzy +#~ msgid "Bacula daemon status monitor" +#~ msgstr "Bacula Storage : En cours" + +#, fuzzy +#~ msgid "Bacula tray monitor" +#~ msgstr "Bacula Storage : En cours" + +#, fuzzy +#~ msgid "Unknown status." +#~ msgstr "Erreur inconnue." + +#, fuzzy +#~ msgid "About" +#~ msgstr "Abort" + +#, fuzzy +#~ msgid "Disconnecting from Director %s:%d\n" +#~ msgstr "Connexion au Director %s:%d\n" + +#, fuzzy +#~ msgid "Disconnecting from Client %s:%d\n" +#~ msgstr "Connexion au client %s (%s:%d)\n" + +#, fuzzy +#~ msgid "Disconnecting from Storage %s:%d\n" +#~ msgstr "Connexion au Director %s:%d\n" + +#~ msgid "Version" +#~ msgstr "Version" + +#, fuzzy +#~ msgid "" +#~ "Current job: %s\n" +#~ "Last job: %s" +#~ msgstr "Le statut actuel du volume (Volume status) est : %s\n" + +#, fuzzy +#~ msgid " (%d errors)" +#~ msgstr "est en erreur" + +#, fuzzy +#~ msgid " (%d error)" +#~ msgstr "BUS error" + +#, fuzzy +#~ msgid "No current job." +#~ msgstr "Pas de RecyclePool courant\n" + +#, fuzzy +#~ msgid "Job status: Running" +#~ msgstr "Pas de job en cours.\n" + +#, fuzzy +#~ msgid "Job status: Terminated" +#~ msgstr "est terminé" + +#, fuzzy +#~ msgid "Job status: Fatal error" +#~ msgstr "%s : %s Erreur fatale : " + +#, fuzzy +#~ msgid "Job status: Verify differences" +#~ msgstr "Vérification des différences" + +#, fuzzy +#~ msgid "Job status: Canceled" +#~ msgstr "Le job %s est annulé.\n" + +#, fuzzy +#~ msgid "Job status: Waiting on File daemon" +#~ msgstr "%s Job %s est en attente de la connexion du Client.\n" + +#, fuzzy +#~ msgid "Job status: Waiting on the Storage daemon" +#~ msgstr "est en attente du Storage %s" + +#, fuzzy +#~ msgid "Job status: Waiting for new media" +#~ msgstr " Le Device est BLOQUE en attente d'un média.\n" + +#, fuzzy +#~ msgid "Job status: Waiting for Mount" +#~ msgstr "En attente d'un montage" + +#, fuzzy +#~ msgid "Job status: Waiting for storage resource" +#~ msgstr "En attente du Storage" + +#, fuzzy +#~ msgid "Job status: Waiting for job resource" +#~ msgstr "En attente du Storage" + +#, fuzzy +#~ msgid "Job status: Waiting for higher priority jobs to finish" +#~ msgstr "attend qu'un job plus prioritaire se termine" + +#, fuzzy +#~ msgid "Cannot connect to daemon.\n" +#~ msgstr "Impossible de se connecter au démon Storage\n" + +#, fuzzy +#~ msgid "Opened connection with Director daemon.\n" +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#, fuzzy +#~ msgid "Opened connection with File daemon.\n" +#~ msgstr "Impossible de se connecter au client.\n" + +#, fuzzy +#~ msgid "Opened connection with Storage daemon.\n" +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#, fuzzy +#~ msgid "Failed to allocate space for query filter.\n" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Failed to allocate space for query filters.\n" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to Ingres server.\n" +#~ "Database=%s User=%s\n" +#~ "It is probably not running or your password is incorrect.\n" +#~ msgstr "" +#~ "Impossible de se connecter au serveur MySQL.\n" +#~ "Base=%s Utilisateur=%s\n" +#~ "Le serveur n'est pas démarré ou bien votre password est invalide.\n" + +#, fuzzy +#~ msgid "A user name for Ingres must be supplied.\n" +#~ msgstr "Un nom d'utilisateur MySQL doit être fourni.\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to connect to DBI interface. Type=%s Database=%s User=%s\n" +#~ "Possible causes: SQL server not running; password incorrect; " +#~ "max_connections exceeded.\n" +#~ msgstr "" +#~ "Impossible de se connecter au serveur PostgreSQL.\n" +#~ "Base=%s Utilisateur=%s\n" +#~ "Le serveur n'est pas démarré ou bien votre password est invalide.\n" + +#, fuzzy +#~ msgid "Invalid driver type, must be \"dbi:\"\n" +#~ msgstr "Saisie invalide. Veuillez répondre oui ou non.\n" + +#, fuzzy +#~ msgid "A user name for DBI must be supplied.\n" +#~ msgstr "Un nom d'utilisateur MySQL doit être fourni.\n" + +#, fuzzy +#~ msgid "Read zero bytes Vol=%s at %u:%u on device %s.\n" +#~ msgstr "Prêt à lire les données du volume \"%s\" depuis le device %s.\n" + +#~ msgid "Bacula Storage: Idle" +#~ msgstr "Bacula Storage : En attente" + +#~ msgid "Bacula Storage: Running" +#~ msgstr "Bacula Storage : En cours" + +#~ msgid "Bacula Storage: Last Job Canceled" +#~ msgstr "Bacula Storage : Dernier Job annulé" + +#~ msgid "Bacula Storage: Last Job Failed" +#~ msgstr "Bacula Storage : Dernier Job en erreur" + +#~ msgid "Bacula Storage: Last Job had Warnings" +#~ msgstr "Bacula Storage : Le dernier Job avait des erreurs" + +#~ msgid "JCR use_count=%d JobId=%d\n" +#~ msgstr "JCR use_count=%d JobId=%d\n" + +#, fuzzy +#~ msgid "Bad response to Hello command: ERR=" +#~ msgstr "Mauvaise réponse à la commande Hello : ERR=%s\n" + +#, fuzzy +#~ msgid "JobId %s is not running. Use Job name to %s inactive jobs.\n" +#~ msgstr "" +#~ "JobId %s n'est pas en cours. Utilisez le nom du Job pour annuler un job " +#~ "inactif.\n" + +#, fuzzy +#~ msgid "Confirm %s (yes/no): " +#~ msgstr "Confirmez l'annulation (oui/non) : " + +#~ msgid "Priority must be 1-100" +#~ msgstr "La priorité doit être comprise entre 1 et 100" + +#, fuzzy +#~ msgid "Enter restore mode" +#~ msgstr "Saisissez le nom d'un répertoire : " + +#, fuzzy +#~ msgid "Cancel restore" +#~ msgstr "Annulé" + +#, fuzzy +#~ msgid "Remove" +#~ msgstr "Restaurer" + +#, fuzzy +#~ msgid "Refresh" +#~ msgstr "Restaurer" + +#, fuzzy +#~ msgid "Filename" +#~ msgstr "FileSet" + +#, fuzzy +#~ msgid "Size" +#~ msgstr "Depuis" + +#, fuzzy +#~ msgid "Job Name" +#~ msgstr "Job échoué.\n" + +#, fuzzy +#~ msgid "Fileset" +#~ msgstr "FileSet" + +#, fuzzy +#~ msgid "Before" +#~ msgstr "Restaurer" + +#, fuzzy +#~ msgid "Please configure parameters concerning files to restore :" +#~ msgstr "Aucun fichier sélectionné pour la restauration.\n" + +#, fuzzy +#~ msgid "if newer" +#~ msgstr "Type" + +#, fuzzy +#~ msgid "never" +#~ msgstr "Type" + +#, fuzzy +#~ msgid "Error : no clients returned by the director." +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Error : no storage returned by the director." +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Error : no jobs returned by the director." +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "RestoreFiles" +#~ msgstr "restauration de fichier" + +#, fuzzy +#~ msgid "Please configure your restore parameters." +#~ msgstr "Aucun fichier sélectionné pour la restauration.\n" + +#, fuzzy +#~ msgid "Please select a client." +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Please select a restore date." +#~ msgstr "Aucun fichier sélectionné pour la restauration.\n" + +#, fuzzy +#~ msgid "Building restore tree..." +#~ msgstr "" +#~ "\n" +#~ "Analyse des répertoires pour le JobId %s..." + +#, fuzzy +#~ msgid "Error while starting restore: " +#~ msgstr "Entrez le nombre de départ : " + +#, fuzzy +#~ msgid " files selected to be restored." +#~ msgstr "Aucun fichier sélectionné pour la restauration.\n" + +#, fuzzy +#~ msgid " file selected to be restored." +#~ msgstr "Aucun fichier sélectionné pour la restauration.\n" + +#, fuzzy +#~ msgid "Please configure your restore (%ld files selected to be restored)..." +#~ msgstr "Aucun fichier sélectionné pour la restauration.\n" + +#, fuzzy +#~ msgid "Restore failed : no file selected.\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Restore failed : no file selected." +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Restoring, please wait..." +#~ msgstr "string" + +#, fuzzy +#~ msgid "Job queued. JobId=" +#~ msgstr "Job mis en queue. JobId=%s\n" + +#, fuzzy +#~ msgid "Job failed." +#~ msgstr "Job échoué.\n" + +#, fuzzy +#~ msgid "Failed to retrieve jobid.\n" +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Restore job created, but not yet running." +#~ msgstr "Restauration annulée" + +#, fuzzy +#~ msgid "Restore job running, please wait (%ld of %ld files restored)..." +#~ msgstr "%s Job %s est en attente de la connexion du Client.\n" + +#, fuzzy +#~ msgid "Restore job terminated successfully." +#~ msgstr "Restauration non effectuée.\n" + +#, fuzzy +#~ msgid "Restore job terminated successfully.\n" +#~ msgstr "Restauration non effectuée.\n" + +#, fuzzy +#~ msgid "Restore job terminated in error, see messages.\n" +#~ msgstr " Le Device est BLOQUE en attente d'un média.\n" + +#, fuzzy +#~ msgid "Restore job reported a fatal error." +#~ msgstr "Restauration annulée" + +#, fuzzy +#~ msgid "Restore job cancelled by user." +#~ msgstr "Restauration annulée" + +#, fuzzy +#~ msgid "Restore job cancelled by user.\n" +#~ msgstr "Restauration annulée" + +#, fuzzy +#~ msgid "Restore job is waiting on File daemon." +#~ msgstr "%s Job %s est en attente de la connexion du Client.\n" + +#, fuzzy +#~ msgid "Restore job is waiting for new media." +#~ msgstr " Le Device est BLOQUE en attente d'un média.\n" + +#, fuzzy +#~ msgid "Restore job is waiting for storage resource." +#~ msgstr "En attente du Storage" + +#, fuzzy +#~ msgid "Restore job is waiting for job resource." +#~ msgstr "En attente du Storage" + +#, fuzzy +#~ msgid "Restore job is waiting for Client resource." +#~ msgstr "%s Job %s est en attente de la connexion du Client.\n" + +#, fuzzy +#~ msgid "Restore job is waiting for maximum jobs." +#~ msgstr "En attente du Storage" + +#, fuzzy +#~ msgid "Restore job is waiting for start time." +#~ msgstr "attend son heure de démarrage" + +#, fuzzy +#~ msgid "Restore job is waiting for higher priority jobs to finish." +#~ msgstr "attend qu'un job plus prioritaire se termine" + +#, fuzzy +#~ msgid "Restore done successfully.\n" +#~ msgstr "Restauration non effectuée.\n" + +#, fuzzy +#~ msgid "Restore done successfully." +#~ msgstr "Restauration non effectuée.\n" + +#, fuzzy +#~ msgid "Failed to find the selected client." +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Failed to find the selected fileset." +#~ msgstr "Impossible de se connecter au Client.\n" + +#, fuzzy +#~ msgid "Failed to find the selected storage." +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#, fuzzy +#~ msgid "Restore cancelled." +#~ msgstr "Restauration annulée" + +#, fuzzy +#~ msgid "No results to list." +#~ msgstr "Liste vide.\n" + +#, fuzzy +#~ msgid "No backup found for this client." +#~ msgstr "Pas de job trouvé pour : %s.\n" + +#, fuzzy +#~ msgid "ERROR" +#~ msgstr "%s : ERREUR : " + +#, fuzzy +#~ msgid "Query failed" +#~ msgstr "Erreur sur la requête : %s\n" + +#, fuzzy +#~ msgid "JobName:" +#~ msgstr "Job échoué.\n" + +#, fuzzy +#~ msgid "Bootstrap:" +#~ msgstr "Bootstrap" + +#, fuzzy +#~ msgid "Where:" +#~ msgstr "Destination" + +#, fuzzy +#~ msgid "Replace:" +#~ msgstr "Ecrasement :\n" + +#, fuzzy +#~ msgid "ifnewer" +#~ msgstr "Type" + +#, fuzzy +#~ msgid "FileSet:" +#~ msgstr "FileSet" + +#, fuzzy +#~ msgid "Client:" +#~ msgstr "Client" + +#, fuzzy +#~ msgid "Storage:" +#~ msgstr "Stockage" + +#, fuzzy +#~ msgid "When:" +#~ msgstr "Quand" + +#, fuzzy +#~ msgid "Priority:" +#~ msgstr "Priorité" + +#, fuzzy +#~ msgid "Restoring..." +#~ msgstr "string" + +#~ msgid "Type your command below:" +#~ msgstr "Saisissez votre commande ci-dessous : " + +#, fuzzy +#~ msgid "Unknown command." +#~ msgstr "Erreur inconnue." + +#, fuzzy +#~ msgid "Possible completions: " +#~ msgstr "Les valeurs possibles sont :\n" + +#, fuzzy +#~ msgid "Connect" +#~ msgstr "Connexion...\n" + +#, fuzzy +#~ msgid "Connect to the director" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Disconnect" +#~ msgstr "Connexion...\n" + +#, fuzzy +#~ msgid "Disconnect of the director" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Change of configuration file" +#~ msgstr "La création de la signature a échouée" + +#, fuzzy +#~ msgid "Change your default configuration file" +#~ msgstr "Impossible de lire le certificat à partir du fichier" + +#, fuzzy +#~ msgid "Edit your configuration file" +#~ msgstr "La création de la signature a échouée" + +#, fuzzy +#~ msgid "&File" +#~ msgstr "FileSet" + +#, fuzzy +#~ msgid "First run" +#~ msgstr "est en cours" + +#, fuzzy +#~ msgid "Please choose a configuration file to use" +#~ msgstr "Merci de corriger le fichier de configuration : %s\n" + +#, fuzzy +#~ msgid "Configuration file read successfully" +#~ msgstr "La création de la signature a échouée" + +#, fuzzy +#~ msgid "Using this configuration file: %s\n" +#~ msgstr "Merci de corriger le fichier de configuration : %s\n" + +#, fuzzy +#~ msgid "Connecting to the director..." +#~ msgstr "Connexion au Director %s:%d\n" + +#, fuzzy +#~ msgid "Please choose your default configuration file" +#~ msgstr "Merci de corriger le fichier de configuration : %s\n" + +#, fuzzy +#~ msgid "Use this configuration file as default?" +#~ msgstr "Merci de corriger le fichier de configuration : %s\n" + +#, fuzzy +#~ msgid "Configuration file" +#~ msgstr "La création de la signature a échouée" + +#, fuzzy +#~ msgid "Console thread terminated." +#~ msgstr "est terminé" + +#, fuzzy +#~ msgid "Connection to the director lost. Quit program?" +#~ msgstr "Connexion au Director %s:%d\n" + +#, fuzzy +#~ msgid "Connection lost" +#~ msgstr "Connexion...\n" + +#, fuzzy +#~ msgid "Connected to the director." +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Reconnect" +#~ msgstr "Connexion...\n" + +#, fuzzy +#~ msgid "Reconnect to the director" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Disconnected of the director." +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Config file editor" +#~ msgstr "La création de la signature a échouée" + +#, fuzzy +#~ msgid "# Bacula bwx-console Configuration File\n" +#~ msgstr "Merci de corriger le fichier de configuration : %s\n" + +#, fuzzy +#~ msgid "Unable to write to %s\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "Error while saving" +#~ msgstr "Entrez le nombre de départ : " + +#, fuzzy +#~ msgid "Bacula bwx-console" +#~ msgstr "Message de Bacula" + +#, fuzzy +#~ msgid "" +#~ "No Director resource defined in config file.\n" +#~ "Without that I don't how to speak to the Director :-(\n" +#~ msgstr "" +#~ "Pas de director défini pour %s\n" +#~ "Sans cette définition, il n'est pas possible de se connecter à celui-ci.\n" + +#, fuzzy +#~ msgid "Error while initializing windows sockets...\n" +#~ msgstr "Erreur pendant l'initialisation du contexte SSL" + +#, fuzzy +#~ msgid "Error while initializing library." +#~ msgstr "Erreur pendant l'initialisation du contexte SSL" + +#, fuzzy +#~ msgid "Cryptographic library initialization failed.\n" +#~ msgstr "Initialisation de la connexion TLS échouée.\n" + +#, fuzzy +#~ msgid "Please correct configuration file.\n" +#~ msgstr "Merci de corriger le fichier de configuration : %s\n" + +#, fuzzy +#~ msgid "Error : Library not initialized\n" +#~ msgstr "Impossible d'initialiser %s\n" + +#, fuzzy +#~ msgid "Error : No configuration file loaded\n" +#~ msgstr "La création de la signature a échouée" + +#~ msgid "Connecting...\n" +#~ msgstr "Connexion...\n" + +#~ msgid "Failed to connect to the director\n" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Connected\n" +#~ msgstr "Connexion...\n" + +#, fuzzy +#~ msgid "Connection terminated\n" +#~ msgstr "Sélection terminée.\n" + +#~ msgid "%6d %-6s %-20s %s\n" +#~ msgstr "%6d %-6s %-20s %s\n" + +#, fuzzy +#~ msgid "Unable to write to %s. ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get the Full job for %s\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write to %s to save full job name. ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to %s exitcode=%d\n" +#~ msgstr "3910 Impossible d'ouvrir le device %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to save last controlfile into file %s. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create %s " +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to save last SCN into file %s. ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find last control file ret=%d\n" +#~ msgstr "Impossible de trouver la ressource Console \"%s\"\n" + +#, fuzzy +#~ msgid "Can't open /etc/oratab. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s pour lecture. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to list schema for %s. exitcode=%d\n" +#~ msgstr "Impossible d'écrire le marqueur EOF. ERR=%s\n" + +#, fuzzy +#~ msgid "Can't open %s. ERR=%s\n" +#~ msgstr "Impossible d'ouvrir le fichier %s : ERR=%s\n" + +#, fuzzy +#~ msgid "Error occured while selecting instance.\n" +#~ msgstr "Entrez le nombre de départ : " + +#, fuzzy +#~ msgid "Job Level not supported.\n" +#~ msgstr "Job %s non trouvé.\n" + +#, fuzzy +#~ msgid "Calling RMAN for %s\n" +#~ msgstr "Exécution : %s %s %s\n" + +#, fuzzy +#~ msgid "Unable to open %s to save RMAN output. ERR=%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Error occured while dumping tempfiles ERR=%s\n" +#~ msgstr "Entrez le nombre de départ : " + +#, fuzzy +#~ msgid "Error occured while dumping logfiles ERR=%s\n" +#~ msgstr "Une erreur est survenue à la ligne %d de \"%s\"\n" + +#, fuzzy +#~ msgid "" +#~ "Unable to copy schema \"%s\" to \"%s\" ret=%d\n" +#~ "%s\n" +#~ msgstr "Impossible de se connecter à %s sur %s:%d. ERR=%s\n" + +#, fuzzy +#~ msgid "Disable a job" +#~ msgstr "est bloqué" + +#, fuzzy +#~ msgid "Failed to initialize database backend\n" +#~ msgstr "Impossible d'initialiser le contexte TLS pour la Console \"%s\".\n" + +#, fuzzy +#~ msgid "disabled" +#~ msgstr "est bloqué" + +#~ msgid " Drive %d status unknown.\n" +#~ msgstr " Le statut du lecteur %d est inconnu.\n" + +#, fuzzy +#~ msgid "Append data error.\n" +#~ msgstr "Erreur non fatale" + +#~ msgid "Network buffer size %d not multiple of tape block size.\n" +#~ msgstr "" +#~ "La taille du buffer réseau %d n'est pas un multiple de la taille de bloc " +#~ "du lecteur.\n" + +#~ msgid "" +#~ "Generate VSS snapshot of drive \"%c:\\\" failed. VSS support is disabled " +#~ "on this drive.\n" +#~ msgstr "" +#~ "Erreur lors de la création du snapshot VSS du lecteur \"%c:\\\\\". Le VSS " +#~ "est désactivé sur ce lecteur.\n" + +#~ msgid "Daemon started %s, %d Job%s run since started.\n" +#~ msgstr "Démon démarré le %s, %d job%s lancés depuis.\n" + +#, fuzzy +#~ msgid "" +#~ "Director and Storage daemon passwords or names not the same.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Problème d'authentification entre le storage et le director.\n" +#~ "Le plus souvent, les mots de pass ne correspondent pas.\n" +#~ "Vous trouverez de l'aide sur\n" +#~ "http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors\n" + +#, fuzzy +#~ msgid "" +#~ "Director and File daemon passwords or names not the same.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Problème d'authentification entre le client et le director.\n" +#~ "Le plus souvent, les mots de pass ne correspondent pas.\n" +#~ "Vous trouverez de l'aide sur\n" +#~ "http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors\n" + +#, fuzzy +#~ msgid "A dbi driver for DBI must be supplied.\n" +#~ msgstr "Un nom d'utilisateur MySQL doit être fourni.\n" + +#, fuzzy +#~ msgid "" +#~ "Cancel: %s\n" +#~ "\n" +#~ "%s" +#~ msgstr "" +#~ "Annule : %s\n" +#~ "\n" +#~ "%s" + +#~ msgid "Confirm cancel?" +#~ msgstr "Confirmez l'annulation : " + +#~ msgid "Max sched run time exceeded. Job canceled.\n" +#~ msgstr "" +#~ "Temps d'exécution maximum depuis la planification atteind. Abandon du " +#~ "job.\n" + +#, fuzzy +#~ msgid "" +#~ "Incorrect authorization key from File daemon at %s rejected.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Problème d'authentification avec le director.\n" +#~ "Le plus souvent, les mots de pass ne correspondent pas.\n" +#~ "Vous trouverez de l'aide sur\n" +#~ "http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors\n" + +#~ msgid "" +#~ "====\n" +#~ "\n" +#~ msgstr "" +#~ "====\n" +#~ "\n" + +#, fuzzy +#~ msgid "Unable to stat DVD part 1 file %s: ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "There is no valid DVD in device %s.\n" +#~ msgstr "Le volume \"%s\" n'est pas dans le device %s.\n" + +#~ msgid "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +#~ msgstr "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + +#~ msgid "WARNING!!!! The Internal Database is NOT OPERATIONAL!\n" +#~ msgstr "ATTENTION !!!! La base interne n'est pas OPERATIONNELLE !\n" + +#~ msgid "You should use SQLite, PostgreSQL, or MySQL\n" +#~ msgstr "Vous devez utiliser SQLite, PostgreSQL, ou MySQL\n" + +#, fuzzy +#~ msgid "Unable to open Catalog DB control file %s: ERR=%s\n" +#~ msgstr "Impossible de créer le fichier bootstrap %s. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not connect to storage daemon" +#~ msgstr "Impossible de se connecter au Storage daemon.\n" + +#, fuzzy +#~ msgid "Job %s marked to be canceled.\n" +#~ msgstr "Le job va être marqué annulé.\n" + +#, fuzzy +#~ msgid "" +#~ "cancel [jobid= job= ujobid=] -- \n" +#~ " cancel a job" +#~ msgstr "cancel [ | ] -- annulation d'un job" + +#, fuzzy +#~ msgid "delete [volume= pool= job jobid=]" +#~ msgstr "delete [pool= | media volume=]" + +#~ msgid "disable -- disable a job" +#~ msgstr "disable -- désactive un job" + +#~ msgid "enable -- enable a job" +#~ msgstr "enable -- active un job" + +#, fuzzy +#~ msgid "" +#~ "list [pools | jobs | jobtotals | media | \n" +#~ " files | copies ]; from catalog" +#~ msgstr "" +#~ "list [pools | jobs | jobtotals | media | files " +#~ "] -- depuis le catalogue" + +#~ msgid "messages" +#~ msgstr "messages" + +#, fuzzy +#~ msgid "use -- catalog xxx" +#~ msgstr "utilise le catalogue xxx" + +#, fuzzy +#~ msgid "No FileSet record defined for job %s\n" +#~ msgstr "Pas de volume trouvé en base pour l'objet %d.\n" + +#, fuzzy +#~ msgid "No Pool resource defined for job %s\n" +#~ msgstr "La ressource Pool \"%s\" est introuvable !\n" + +#, fuzzy +#~ msgid "Folder" +#~ msgstr "FileSet" + +#, fuzzy +#~ msgid "1 file (%s)" +#~ msgstr "Nouveau Fichier : %s\n" + +#, fuzzy +#~ msgid "1 file selected (%s)" +#~ msgstr "" +#~ "\n" +#~ "1 fichier sélectionne pour la restauration.\n" +#~ "\n" + +#, fuzzy +#~ msgid "%d files selected (%s)" +#~ msgstr "" +#~ "\n" +#~ "1 fichier sélectionne pour la restauration.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Command not found." +#~ msgstr "Commande annulée.\n" + +#, fuzzy +#~ msgid "Do you want to create a new archive with these files?" +#~ msgstr "" +#~ "\n" +#~ "Voulez vous restaurer tous les fichiers ? (oui|non) : " + +#, fuzzy +#~ msgid "Create _Archive" +#~ msgstr "Crée" + +#, fuzzy +#~ msgid "Name" +#~ msgstr "Job échoué.\n" + +#, fuzzy +#~ msgid "Current Location:" +#~ msgstr "Le slot courant est : %d\n" + +#, fuzzy +#~ msgid "View selected file" +#~ msgstr "" +#~ "\n" +#~ "%u fichiers sélectionnés pour la restauration.\n" + +#, fuzzy +#~ msgid "Create _Folder" +#~ msgstr "Crée" + +#, fuzzy +#~ msgid "Couldn't find pixmap file: %s" +#~ msgstr "Impossible d'ouvrir le fichier de données %s.\n" + +#, fuzzy +#~ msgid "Connect to Director" +#~ msgstr "Impossible de se connecter au Director\n" + +#, fuzzy +#~ msgid "Run" +#~ msgstr "En cours" + +#, fuzzy +#~ msgid "Label" +#~ msgstr "Type" + +#, fuzzy +#~ msgid " Status: " +#~ msgstr "Statut :\n" + +#, fuzzy +#~ msgid " " +#~ msgstr " (" + +#, fuzzy +#~ msgid "Bacula Console\n" +#~ msgstr "Message de Bacula" + +#, fuzzy +#~ msgid "Copyright (c) 2000 - 2004, Kern Sibbald and John Walker" +#~ msgstr "" +#~ "Copyright (C) 2000-2016 Kern Sibbald\n" +#~ "Version : " + +#, fuzzy +#~ msgid "Job:" +#~ msgstr "Job" + +#, fuzzy +#~ msgid " " +#~ msgstr " (" + +#, fuzzy +#~ msgid "Level:" +#~ msgstr "Type" + +#, fuzzy +#~ msgid "Pool:" +#~ msgstr "Pool" + +#, fuzzy +#~ msgid "Messages:" +#~ msgstr "messages" + +#, fuzzy +#~ msgid "Where: " +#~ msgstr "Destination" + +#, fuzzy +#~ msgid "Current dir:" +#~ msgstr "Le slot courant est : %d\n" + +#, fuzzy +#~ msgid "Files Selected: " +#~ msgstr "" +#~ "\n" +#~ "%u fichiers sélectionnés pour la restauration.\n" + +#, fuzzy +#~ msgid "Restore Files Dialog" +#~ msgstr "restauration de fichier" + +#, fuzzy +#~ msgid "Restore Files" +#~ msgstr "restauration de fichier" + +#, fuzzy +#~ msgid "Before:" +#~ msgstr "Restaurer" + +#, fuzzy +#~ msgid "Select Files" +#~ msgstr "Saisissez le type" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Version: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Usage: bgnome-console [-s] [-c config_file] [-d debug_level] " +#~ "[config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s no signals\n" +#~ " -t test - read configuration and exit\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ ") %s %s %s\n" +#~ "\n" +#~ "Usage : bconsole [-s] [-c config_file] [-d niveau_debug]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s no signals\n" +#~ " -t test - lecture de la configuration et sortie\n" +#~ "\n" + +#, fuzzy +#~ msgid "Pthread cond init error = %s\n" +#~ msgstr "Erreur pendant l'écriture des attributs dans le spool. ERR=%s\n" + +#, fuzzy +#~ msgid " Not Connected" +#~ msgstr "Connexion...\n" + +#, fuzzy +#~ msgid " Connecting to Director %s:%d" +#~ msgstr "Connexion au Director %s:%d\n" + +#, fuzzy +#~ msgid " Connected" +#~ msgstr "Connexion...\n" + +#, fuzzy +#~ msgid "File" +#~ msgstr "FileSet" + +#, fuzzy +#~ msgid "Director authorization problem.\n" +#~ msgstr "Pas d'enregistrement trouvé en base pour : %s\n" + +#, fuzzy +#~ msgid "" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Problème d'authentification entre le client et le director.\n" +#~ "Le plus souvent, les mots de pass ne correspondent pas.\n" +#~ "Vous trouverez de l'aide sur\n" +#~ "http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors\n" + +#, fuzzy +#~ msgid "%s: Bad response to Hello command: ERR=%s\n" +#~ msgstr "Mauvaise réponse à la commande Hello : ERR=%s\n" + +#, fuzzy +#~ msgid "Can't restore ACLs of %s\n" +#~ msgstr "Démarrage du Job de restauration %s\n" + +#~ msgid "Cannot select %s in batch mode.\n" +#~ msgstr "Impossible de choisir %s en mode batch.\n" + +#~ msgid "run " +#~ msgstr "run -- lance un job" + +#~ msgid "status [storage | client]=" +#~ msgstr "" +#~ "status [storage | client]= -- affiche le statut d'un composant" + +#, fuzzy +#~ msgid "Cannot find previous JobIds.\n" +#~ msgstr "Impossible de trouver la ressource Job \"%s\"\n" + +#~ msgid "Start Migration JobId %s, Job=%s\n" +#~ msgstr "Début de la Migration JobId %s, Job=%s\n" + +#~ msgid "No Volumes found to migrate.\n" +#~ msgstr "Aucun volume trouvé pour la migration.\n" + +#~ msgid "" +#~ "\n" +#~ "%d Jobs, %s files inserted into the tree and marked for extraction.\n" +#~ msgstr "" +#~ "\n" +#~ "%d Jobs, %s fichiers analysés et sélectionnés pour la restauration.\n" + +#~ msgid "" +#~ "\n" +#~ "%d Jobs, %s files inserted into the tree.\n" +#~ msgstr "" +#~ "\n" +#~ "%d Jobs, %s fichiers analysés.\n" + +#~ msgid " (" +#~ msgstr " (" + +#, fuzzy +#~ msgid "" +#~ "Run Migration job\n" +#~ "JobName: %s\n" +#~ "Bootstrap: %s\n" +#~ "Client: %s\n" +#~ "FileSet: %s\n" +#~ "Pool: %s (From %s)\n" +#~ "Read Storage: %s (From %s)\n" +#~ "Write Storage: %s (From %s)\n" +#~ "JobId: %s\n" +#~ "When: %s\n" +#~ "Catalog: %s\n" +#~ "Priority: %d\n" +#~ msgstr "" +#~ "Lancement de la restauration\n" +#~ "JobName : %s\n" +#~ "Bootstrap : %s\n" +#~ "Destination : %s\n" +#~ "Ecrasement : %s\n" +#~ "Client : %s\n" +#~ "Storage : %s\n" +#~ "JobId : %s\n" +#~ "Quand : %s\n" +#~ "Catalogue : %s\n" +#~ "Priorité : %d\n" + +#, fuzzy +#~ msgid "Error updating DB Media file. ERR=%s\n" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#~ msgid "===Write error===\n" +#~ msgstr "===Erreur d'écriture===\n" + +#~ msgid "Ready to append to end of Volume \"%s\" at file address=%u.\n" +#~ msgstr "" +#~ "Prêt à ajouter des données à la fin du volume \"%s\" file adress=%u.\n" + +#, fuzzy +#~ msgid "" +#~ "Wanted to append to Volume \"%s\", but device %s is busy writing on \"%s" +#~ "\" .\n" +#~ msgstr "" +#~ "Voulait le Volume \"%s\", mais le Device %s est occupé à écrire sur \"%s" +#~ "\".\n" + +#~ msgid "" +#~ "Cannot recycle volume \"%s\" on device %s because it is in use by another " +#~ "job.\n" +#~ msgstr "" +#~ "Impossible de recycler le volume \"%s\" sur le device %s car il est " +#~ "utilisé par un autre job.\n" + +#, fuzzy +#~ msgid "" +#~ ") %s %s %s\n" +#~ "\n" +#~ "Usage: bconsole [-s] [-c config_file] [-d debug_level]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -n no conio\n" +#~ " -s no signals\n" +#~ " -t test - read configuration and exit\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ ") %s %s %s\n" +#~ "\n" +#~ "Usage : bconsole [-s] [-c config_file] [-d niveau_debug]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s no signals\n" +#~ " -t test - lecture de la configuration et sortie\n" +#~ "\n" + +#~ msgid "%s: %s Warning: " +#~ msgstr "%s : %s Attention : " + +#~ msgid "Cannot bind port %d: ERR=%s: retrying ...\n" +#~ msgstr "Impossible de s'attacher au port %d : ERR=%s : Réessaie...\n" + +#~ msgid "Server socket" +#~ msgstr "Socket serveur" + +#~ msgid "client" +#~ msgstr "client" + +#, fuzzy +#~ msgid " could not be installed" +#~ msgstr "Bacula Storage : Dernier Job en erreur" + +#, fuzzy +#~ msgid " has been removed" +#~ msgstr "a été annulé" + +#, fuzzy +#~ msgid " could not be removed" +#~ msgstr "Impossible d'ouvrir le device %s\n" + +#~ msgid "get_char: called after EOF\n" +#~ msgstr "get_char : appelé après EOF\n" + +#~ msgid "Unable to update Volume record: ERR=%s" +#~ msgstr "Impossible de mettre à jour les informations du Volume : ERR=%s" + +#~ msgid "Do you want to continue? (yes|no): " +#~ msgstr "Voulez vous continuer ? (oui/non) : " + +#, fuzzy +#~ msgid "Invalid MediaId found.\n" +#~ msgstr "Période invalide.\n" + +#~ msgid "Device %s is mounted with Volume=\"%s\" Pool=\"%s\"\n" +#~ msgstr "Le Device %s est monté avec le Volume=\"%s\" Pool=\"%s\"\n" + +#, fuzzy +#~ msgid "Forbidden \"where\" specified.\n" +#~ msgstr "La destination (Where) est déjà spécifiée.\n" + +#~ msgid "Please mount Volume \"%s\" on Storage Device %s for Job %s\n" +#~ msgstr "" +#~ "Merci de monter le Volume \"%s\" sur le Storage Device \"%s\" pour le Job " +#~ "%s\n" + +#~ msgid "%s Version: %s (%s)\n" +#~ msgstr "%s Version : %s (%s)\n" + +#~ msgid "There are no Jobs associated with Volume \"%s\". Prune not needed.\n" +#~ msgstr "" +#~ "Il n'y a pas de job associé avec le volume \"%s\". Pas besoin de purger " +#~ "le catalogue (prune).\n" + +#~ msgid "Pruned %d %s on Volume \"%s\" from catalog.\n" +#~ msgstr "Purge du catalogue (prune) de %d %s sur le volume \"%s\".\n" + +#~ msgid "%d Files for client \"%s\" purged from %s catalog.\n" +#~ msgstr "%d fichiers du client \"%s\" purgé du catalogue %s.\n" + +#~ msgid "" +#~ "Run Restore job\n" +#~ "JobName: %s\n" +#~ "Bootstrap: %s\n" +#~ "Where: %s\n" +#~ "Replace: %s\n" +#~ "FileSet: %s\n" +#~ "Client: %s\n" +#~ "Storage: %s\n" +#~ "When: %s\n" +#~ "Catalog: %s\n" +#~ "Priority: %d\n" +#~ msgstr "" +#~ "Lancement de la restauration\n" +#~ "JobName : %s\n" +#~ "Bootstrap : %s\n" +#~ "Destination : %s\n" +#~ "Ecrasement : %s\n" +#~ "FileSet : %s\n" +#~ "Client : %s\n" +#~ "Storage : %s\n" +#~ "Quand : %s\n" +#~ "Catalogue : %s\n" +#~ "Priorité : %d\n" + +#~ msgid "Item 1 selected automatically.\n" +#~ msgstr "Sélection automatique de l'objet 1.\n" + +#~ msgid "OpenSSL error occured" +#~ msgstr "Une erreur OpenSSL s'est produite" + +#, fuzzy +#~ msgid "Job started. JobId=" +#~ msgstr "Job démarré. JobId=%s\n" + +#~ msgid "%s: is an illegal command.\n" +#~ msgstr "%s : est une commande invalide.\n" + +#, fuzzy +#~ msgid "\"%s\" is an illegal command\n" +#~ msgstr "%s est une commande invalide\n" + +#, fuzzy +#~ msgid "" +#~ ")\n" +#~ "\n" +#~ "Usage: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -r run now\n" +#~ " -s no signals\n" +#~ " -t test - read configuration and exit\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "Copyright (C) 2000-2016 Kern Sibbald\n" +#~ "Version : %s (%s)\n" +#~ "\n" +#~ "Usage : dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c utilise fich comme fichier de configuration\n" +#~ " -dnn positionne le niveau de debug à nn\n" +#~ " -f reste en avant-plan (pour debugger)\n" +#~ " -g groupid\n" +#~ " -r lance maintenant\n" +#~ " -s pas de signaux\n" +#~ " -t test - lit seulement le fichier de configuration\n" +#~ " -u userid\n" +#~ " -v affiche les messages utilisateurs\n" +#~ " -? affiche ce message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Copyright (C) 2000-2016 Kern SibbaldWritten by Nicolas Boichat\n" +#~ msgstr "" +#~ "Copyright (C) 2000-2016 Kern Sibbald\n" +#~ "Version : " + +#~ msgid "Using default Catalog name=%s DB=%s\n" +#~ msgstr "Utilisation du Catalogue par défaut name=%s DB=%s\n" + +#, fuzzy +#~ msgid "3912 Failed to truncate previous DVD volume.\n" +#~ msgstr "3912 Impossible de labéliser le Volume : ERR=%s\n" + +#~ msgid "Write EOF failed.\n" +#~ msgstr "Impossible d'écrire le EOF.\n" + +#~ msgid "Success" +#~ msgstr "Succès" + +#~ msgid "Invalid regular expression" +#~ msgstr "Expression régulière invalide" + +#~ msgid "Invalid back reference" +#~ msgstr "Référence arrière invalide" + +#~ msgid "Regular expression too big" +#~ msgstr "Expression régulière trop grande" + +#~ msgid "Could not get %d bytes of shared memory: %s\n" +#~ msgstr "Impossible de récupérer %d octets de mémoire partagée : %s\n" + +#~ msgid "catalog" +#~ msgstr "catalog" + +#~ msgid "fd" +#~ msgstr "fd" + +#~ msgid "readlabel %s Slot=%d drive=%d\n" +#~ msgstr "readlabel %s Slot=%d drive=%d\n" + +#~ msgid "autochanger slots %s\n" +#~ msgstr "autochanger slots %s\n" + +#~ msgid "autochanger drives %s\n" +#~ msgstr "autochanger drives %s\n" + +#~ msgid "No MediaType found for your JobIds.\n" +#~ msgstr "Pas de MediaType trouvé pour vos JobIds\n" + +#~ msgid "off" +#~ msgstr "off" + +#~ msgid "jobid" +#~ msgstr "jobid" + +#~ msgid "job" +#~ msgstr "job" + +#~ msgid "restart" +#~ msgstr "restart" + +#~ msgid "" +#~ "\n" +#~ "%s Version: %s (%s) %s %s %s\n" +#~ msgstr "" +#~ "\n" +#~ "%s Version : %s (%s) %s %s %s\n" diff --git a/po/insert-header.sin b/po/insert-header.sin new file mode 100644 index 00000000..b26de01f --- /dev/null +++ b/po/insert-header.sin @@ -0,0 +1,23 @@ +# Sed script that inserts the file called HEADER before the header entry. +# +# At each occurrence of a line starting with "msgid ", we execute the following +# commands. At the first occurrence, insert the file. At the following +# occurrences, do nothing. The distinction between the first and the following +# occurrences is achieved by looking at the hold space. +/^msgid /{ +x +# Test if the hold space is empty. +s/m/m/ +ta +# Yes it was empty. First occurrence. Read the file. +r HEADER +# Output the file's contents by reading the next line. But don't lose the +# current line while doing this. +g +N +bb +:a +# The hold space was nonempty. Following occurrences. Do nothing. +x +:b +} diff --git a/po/it.po b/po/it.po new file mode 100644 index 00000000..a49ad035 --- /dev/null +++ b/po/it.po @@ -0,0 +1,15533 @@ +# Italian translations for Bacula package +# Traduzioni italiane per il pacchetto Bacula.. +# Copyright 2000-2015, Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# , 2005. +# License: BSD 2-Clause; see file LICENSE-FOSS +msgid "" +msgstr "" +"Project-Id-Version: Bacula 1.38\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2007-08-10 23:23+0200\n" +"PO-Revision-Date: 2005-08-08 17:50+0200\n" +"Last-Translator: \n" +"Language-Team: Italian \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-1\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: src/cats/bdb.c:161 +msgid "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +msgstr "" + +#: src/cats/bdb.c:162 +msgid "WARNING!!!! The Internal Database is NOT OPERATIONAL!\n" +msgstr "" + +#: src/cats/bdb.c:163 +msgid "You should use SQLite, PostgreSQL, or MySQL\n" +msgstr "" + +#: src/cats/bdb.c:185 src/cats/mysql.c:152 src/cats/postgresql.c:161 +#: src/cats/sqlite.c:160 +#, c-format +msgid "Unable to initialize DB lock. ERR=%s\n" +msgstr "" + +#: src/cats/bdb.c:195 +#, c-format +msgid "Unable to open Catalog DB control file %s: ERR=%s\n" +msgstr "" + +#: src/cats/bdb.c:250 +#, c-format +msgid "Error reading catalog DB control file. ERR=%s\n" +msgstr "" + +#: src/cats/bdb.c:253 +#, c-format +msgid "" +"Error, catalog DB control file wrong version. Wanted %d, got %d\n" +"Please reinitialize the working directory.\n" +msgstr "" + +#: src/cats/bdb_update.c:88 src/cats/bdb_update.c:119 +#, c-format +msgid "Error updating DB Job file. ERR=%s\n" +msgstr "" + +#: src/cats/bdb_update.c:158 src/cats/bdb_update.c:194 +#, c-format +msgid "Error updating DB Media file. ERR=%s\n" +msgstr "" + +#: src/cats/mysql.c:82 +msgid "A user name for MySQL must be supplied.\n" +msgstr "" + +#: src/cats/mysql.c:190 +#, c-format +msgid "" +"Unable to connect to MySQL server.\n" +"Database=%s User=%s\n" +"MySQL connect failed either server not running or your authorization is " +"incorrect.\n" +msgstr "" + +#: src/cats/mysql.c:367 src/cats/postgresql.c:304 src/cats/sqlite.c:351 +#, c-format +msgid "Query failed: %s: ERR=%s\n" +msgstr "" + +#: src/cats/postgresql.c:85 +msgid "A user name for PostgreSQL must be supplied.\n" +msgstr "" + +#: src/cats/postgresql.c:148 +msgid "" +"PostgreSQL configuration problem. PostgreSQL library is not thread safe. " +"Connot continue.\n" +msgstr "" + +#: src/cats/postgresql.c:198 +#, c-format +msgid "" +"Unable to connect to PostgreSQL server.\n" +"Database=%s User=%s\n" +"It is probably not running or your password is incorrect.\n" +msgstr "" + +#: src/cats/postgresql.c:590 +#, c-format +msgid "error fetching currval: %s\n" +msgstr "" + +#: src/cats/postgresql.c:682 src/cats/postgresql.c:729 +#, c-format +msgid "error ending batch mode: %s\n" +msgstr "" + +#: src/cats/sql.c:139 +#, c-format +msgid "" +"query %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:161 +#, c-format +msgid "" +"insert %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:175 +#, c-format +msgid "Insertion problem: affected_rows=%s\n" +msgstr "" + +#: src/cats/sql.c:195 +#, c-format +msgid "" +"update %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:205 +#, c-format +msgid "Update failed: affected_rows=%s for %s\n" +msgstr "" + +#: src/cats/sql.c:226 +#, c-format +msgid "" +"delete %s failed:\n" +"%s\n" +msgstr "" + +#: src/cats/sql.c:252 src/cats/sql.c:259 src/cats/sql_create.c:1018 +#: src/cats/sql_get.c:185 src/cats/sql_get.c:236 src/cats/sql_get.c:604 +#: src/cats/sql_get.c:680 src/cats/sql_get.c:951 +#, c-format +msgid "error fetching row: %s\n" +msgstr "" + +#: src/cats/sql.c:369 src/dird/catreq.c:404 src/dird/catreq.c:478 +#: src/dird/fd_cmds.c:638 src/dird/fd_cmds.c:696 +#, c-format +msgid "Attribute create error. %s" +msgstr "" + +#: src/cats/sql.c:450 +#, c-format +msgid "Path length is zero. File=%s\n" +msgstr "" + +#: src/cats/sql.c:494 +msgid "No results to list.\n" +msgstr "" + +#: src/cats/sql_create.c:95 +#, c-format +msgid "Create DB Job record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:146 +#, c-format +msgid "Create JobMedia record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:155 +#, c-format +msgid "Update Media record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:185 +#, c-format +msgid "pool record %s already exists\n" +msgstr "" + +#: src/cats/sql_create.c:212 +#, c-format +msgid "Create db Pool record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:243 +#, c-format +msgid "Device record %s already exists\n" +msgstr "" + +#: src/cats/sql_create.c:259 +#, c-format +msgid "Create db Device record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:292 +#, c-format +msgid "More than one Storage record!: %d\n" +msgstr "" + +#: src/cats/sql_create.c:297 +#, c-format +msgid "error fetching Storage row: %s\n" +msgstr "" + +#: src/cats/sql_create.c:317 +#, c-format +msgid "Create DB Storage record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:349 +#, c-format +msgid "mediatype record %s already exists\n" +msgstr "" + +#: src/cats/sql_create.c:365 +#, c-format +msgid "Create db mediatype record %s failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:400 +#, c-format +msgid "Volume \"%s\" already exists.\n" +msgstr "" + +#: src/cats/sql_create.c:445 +#, c-format +msgid "Create DB Media record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:493 +#, c-format +msgid "More than one Client!: %d\n" +msgstr "" + +#: src/cats/sql_create.c:498 +#, c-format +msgid "error fetching Client row: %s\n" +msgstr "" + +#: src/cats/sql_create.c:525 +#, c-format +msgid "Create DB Client record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:568 +#, c-format +msgid "Create DB Counters record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:601 +#, c-format +msgid "More than one FileSet!: %d\n" +msgstr "" + +#: src/cats/sql_create.c:606 +#, c-format +msgid "error fetching FileSet row: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:636 +#, c-format +msgid "Create DB FileSet record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:850 src/dird/job.c:131 src/dird/ua_output.c:628 +#, c-format +msgid "Could not open database \"%s\".\n" +msgstr "" + +#: src/cats/sql_create.c:873 src/cats/sql_create.c:914 +#, c-format +msgid "Attempt to put non-attributes into catalog. Stream=%d\n" +msgstr "" + +#: src/cats/sql_create.c:978 +#, c-format +msgid "Create db File record %s failed. ERR=%s" +msgstr "" + +#: src/cats/sql_create.c:1011 src/cats/sql_get.c:229 +#, c-format +msgid "More than one Path!: %s for path: %s\n" +msgstr "" + +#: src/cats/sql_create.c:1042 +#, c-format +msgid "Create db Path record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:1075 +#, c-format +msgid "More than one Filename! %s for file: %s\n" +msgstr "" + +#: src/cats/sql_create.c:1081 +#, c-format +msgid "Error fetching row for file=%s: ERR=%s\n" +msgstr "" + +#: src/cats/sql_create.c:1097 +#, c-format +msgid "Create db Filename record %s failed. ERR=%s\n" +msgstr "" + +#: src/cats/sql_delete.c:80 +#, c-format +msgid "No pool record %s exists\n" +msgstr "" + +#: src/cats/sql_delete.c:85 +#, c-format +msgid "Expecting one pool record, got %d\n" +msgstr "" + +#: src/cats/sql_delete.c:91 +#, c-format +msgid "Error fetching row %s\n" +msgstr "" + +#: src/cats/sql_find.c:98 src/cats/sql_find.c:127 +#, c-format +msgid "" +"Query error for start time request: ERR=%s\n" +"CMD=%s\n" +msgstr "" + +#: src/cats/sql_find.c:104 +msgid "No prior Full backup Job record found.\n" +msgstr "" + +#: src/cats/sql_find.c:116 +#, c-format +msgid "Unknown level=%d\n" +msgstr "" + +#: src/cats/sql_find.c:133 +#, c-format +msgid "" +"No Job record found: ERR=%s\n" +"CMD=%s\n" +msgstr "" + +#: src/cats/sql_find.c:232 +#, c-format +msgid "Unknown Job level=%d\n" +msgstr "" + +#: src/cats/sql_find.c:242 +#, c-format +msgid "No Job found for: %s.\n" +msgstr "" + +#: src/cats/sql_find.c:253 +#, c-format +msgid "No Job found for: %s\n" +msgstr "" + +#: src/cats/sql_find.c:332 +#, c-format +msgid "Request for Volume item %d greater than max %d or less than 1\n" +msgstr "" + +#: src/cats/sql_find.c:347 +#, c-format +msgid "No Volume record found for item %d.\n" +msgstr "" + +#: src/cats/sql_get.c:135 +#, c-format +msgid "get_file_record want 1 got rows=%d\n" +msgstr "" + +#: src/cats/sql_get.c:140 +#, c-format +msgid "Error fetching row: %s\n" +msgstr "" + +#: src/cats/sql_get.c:148 +#, c-format +msgid "File record for PathId=%s FilenameId=%s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:154 +msgid "File record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:179 +#, c-format +msgid "More than one Filename!: %s for file: %s\n" +msgstr "" + +#: src/cats/sql_get.c:189 +#, c-format +msgid "Get DB Filename record %s found bad record: %d\n" +msgstr "" + +#: src/cats/sql_get.c:195 +#, c-format +msgid "Filename record: %s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:199 +#, c-format +msgid "Filename record: %s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:240 +#, c-format +msgid "Get DB path record %s found bad record: %s\n" +msgstr "" + +#: src/cats/sql_get.c:253 +#, c-format +msgid "Path record: %s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:257 +#, c-format +msgid "Path record: %s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:294 +#, c-format +msgid "No Job found for JobId %s\n" +msgstr "" + +#: src/cats/sql_get.c:363 src/cats/sql_get.c:419 +#, c-format +msgid "No volumes found for JobId=%d\n" +msgstr "" + +#: src/cats/sql_get.c:369 src/cats/sql_get.c:430 +#, c-format +msgid "Error fetching row %d: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:383 +#, c-format +msgid "No Volume for JobId %d found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:520 +#, c-format +msgid "Pool id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:557 +#, c-format +msgid "Client id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:599 +#, c-format +msgid "More than one Pool!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:641 +msgid "Pool record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:675 +#, c-format +msgid "More than one Client!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:692 src/cats/sql_get.c:696 +msgid "Client record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:721 +#, c-format +msgid "More than one Counter!: %d\n" +msgstr "" + +#: src/cats/sql_get.c:726 +#, c-format +msgid "error fetching Counter row: %s\n" +msgstr "" + +#: src/cats/sql_get.c:746 +#, c-format +msgid "Counter record: %s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:782 +#, c-format +msgid "Error got %s FileSets but expected only one!\n" +msgstr "" + +#: src/cats/sql_get.c:787 +#, c-format +msgid "FileSet record \"%s\" not found.\n" +msgstr "" + +#: src/cats/sql_get.c:797 +msgid "FileSet record not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:855 +#, c-format +msgid "Media id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:893 +#, c-format +msgid "query dbids failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:946 +#, c-format +msgid "More than one Volume!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:1001 +#, c-format +msgid "Media record MediaId=%s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:1004 +#, c-format +msgid "Media record for Volume \"%s\" not found.\n" +msgstr "" + +#: src/cats/sql_get.c:1011 +#, c-format +msgid "Media record for MediaId=%u not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_get.c:1014 +#, c-format +msgid "Media record for Vol=%s not found in Catalog.\n" +msgstr "" + +#: src/cats/sql_list.c:62 +#, c-format +msgid "Query failed: %s\n" +msgstr "" + +#: src/cats/sqlite.c:174 +#, c-format +msgid "Database %s does not exist, please create it.\n" +msgstr "" + +#: src/cats/sqlite.c:204 +#, c-format +msgid "Unable to open Database=%s. ERR=%s\n" +msgstr "" + +#: src/cats/sqlite.c:205 src/lib/bnet_server.c:391 +msgid "unknown" +msgstr "" + +#: src/dird/admin.c:63 +#, c-format +msgid "Start Admin JobId %d, Job=%s\n" +msgstr "" + +#: src/dird/admin.c:89 src/dird/backup.c:355 src/dird/migrate.c:1077 +#, c-format +msgid "Error getting Job record for Job report: ERR=%s" +msgstr "" + +#: src/dird/admin.c:97 +msgid "Admin OK" +msgstr "" + +#: src/dird/admin.c:101 +msgid "*** Admin Error ***" +msgstr "" + +#: src/dird/admin.c:105 +msgid "Admin Canceled" +msgstr "" + +#: src/dird/admin.c:109 src/dird/backup.c:405 src/dird/restore.c:279 +#, c-format +msgid "Inappropriate term code: %c\n" +msgstr "" + +#: src/dird/admin.c:115 +msgid "Bacula " +msgstr "" + +#: src/dird/admin.c:115 src/console/console.c:114 +#, c-format +msgid " (" +msgstr "" + +#: src/dird/admin.c:115 +#, c-format +msgid "" +"): %s\n" +" JobId: %d\n" +" Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/authenticate.c:80 src/dird/authenticate.c:81 +#: src/tray-monitor/authenticate.c:132 +#, c-format +msgid "Error sending Hello to Storage daemon. ERR=%s\n" +msgstr "" + +#: src/dird/authenticate.c:106 +msgid "Director and Storage daemon passwords or names not the same.\n" +msgstr "" + +#: src/dird/authenticate.c:108 +#, c-format +msgid "" +"Director unable to authenticate with Storage daemon at \"%s:%d\". Possible " +"causes:\n" +"Passwords or names not the same or\n" +"Maximum Concurrent Jobs exceeded on the SD or\n" +"SD networking messed up (restart daemon).\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/dird/authenticate.c:120 src/console/authenticate.c:114 +#: src/filed/authenticate.c:251 src/stored/authenticate.c:131 +#: src/stored/authenticate.c:232 src/wx-console/authenticate.c:127 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" + +#: src/dird/authenticate.c:127 src/console/authenticate.c:121 +#: src/filed/authenticate.c:147 src/filed/authenticate.c:259 +#: src/stored/authenticate.c:139 src/stored/authenticate.c:240 +#: src/wx-console/authenticate.c:133 +msgid "Authorization problem: Remote server requires TLS.\n" +msgstr "" + +#: src/dird/authenticate.c:136 +#, c-format +msgid "TLS negotiation failed with SD at \"%s:%d\"\n" +msgstr "" + +#: src/dird/authenticate.c:145 +#, c-format +msgid "bdird set configuration file to file\n" +" -dnn set debug level to nn\n" +" -f run in foreground (for debugging)\n" +" -g groupid\n" +" -r run now\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -u userid\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/dird/dird.c:224 src/console/console.c:426 src/filed/filed.c:182 +#: src/gnome2-console/console.c:286 src/stored/stored.c:210 +msgid "Cryptography library initialization failed.\n" +msgstr "" + +#: src/dird/dird.c:228 src/dird/dird.c:234 src/dird/dird.c:474 +#: src/dird/dird.c:477 src/console/console.c:430 src/filed/filed.c:187 +#: src/gnome2-console/console.c:290 src/stored/stored.c:214 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "" + +#: src/dird/dird.c:460 +msgid "Too many open reload requests. Request ignored.\n" +msgstr "" + +#: src/dird/dird.c:475 +msgid "Out of reload table entries. Giving up.\n" +msgstr "" + +#: src/dird/dird.c:478 +msgid "Resetting previous configuration.\n" +msgstr "" + +#: src/dird/dird.c:541 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't know who I am :-(\n" +msgstr "" + +#: src/dird/dird.c:549 src/filed/filed.c:281 +#, c-format +msgid "No Messages resource defined in %s\n" +msgstr "" + +#: src/dird/dird.c:554 +#, c-format +msgid "Only one Director resource permitted in %s\n" +msgstr "" + +#: src/dird/dird.c:563 src/dird/dird.c:764 src/dird/dird.c:816 +#: src/dird/dird.c:920 src/console/console.c:657 src/console/console.c:686 +#: src/filed/filed.c:288 src/filed/filed.c:446 +#: src/gnome2-console/console.c:153 src/gnome2-console/console.c:182 +#: src/stored/stored.c:332 src/wx-console/console_thread.cpp:114 +msgid "TLS required but not configured in Bacula.\n" +msgstr "" + +#: src/dird/dird.c:569 src/filed/filed.c:455 src/stored/stored.c:384 +#, c-format +msgid "\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:575 src/filed/filed.c:461 src/stored/stored.c:390 +#, c-format +msgid "\"TLS Key\" file not defined for Director \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:581 src/filed/filed.c:467 src/stored/stored.c:396 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" + +#: src/dird/dird.c:600 src/filed/filed.c:486 src/stored/stored.c:415 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:608 +#, c-format +msgid "No Job records defined in %s\n" +msgstr "" + +#: src/dird/dird.c:666 src/dird/dird.c:679 +#, c-format +msgid "Hey something is wrong. p=0x%lu\n" +msgstr "" + +#: src/dird/dird.c:738 +#, c-format +msgid "\"%s\" directive in Job \"%s\" resource is required, but not found.\n" +msgstr "" + +#: src/dird/dird.c:745 +msgid "Too many items in Job resource\n" +msgstr "" + +#: src/dird/dird.c:749 +#, c-format +msgid "No storage specified in Job \"%s\" nor in Pool.\n" +msgstr "" + +#: src/dird/dird.c:771 +#, c-format +msgid "\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:777 +#, c-format +msgid "\"TLS Key\" file not defined for Console \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:783 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" + +#: src/dird/dird.c:800 src/dird/dird.c:840 src/filed/filed.c:312 +#, c-format +msgid "Failed to initialize TLS context for File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:823 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:872 src/dird/dird.c:874 +#, c-format +msgid "Could not open Catalog \"%s\", database \"%s\".\n" +msgstr "" + +#: src/dird/dird.c:877 +#, c-format +msgid "%s" +msgstr "" + +#: src/dird/dird.c:926 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Storage \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird.c:942 src/stored/stored.c:370 +#, c-format +msgid "Failed to initialize TLS context for Storage \"%s\" in %s.\n" +msgstr "" + +#: src/dird/dird_conf.c:508 src/tray-monitor/tray_conf.c:168 +#, c-format +msgid "No %s resource defined\n" +msgstr "" + +#: src/dird/dird_conf.c:517 +#, c-format +msgid "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:522 +#, c-format +msgid " query_file=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:525 src/dird/dird_conf.c:545 src/dird/dird_conf.c:559 +#: src/dird/dird_conf.c:607 src/dird/dird_conf.c:611 src/dird/dird_conf.c:615 +#: src/dird/dird_conf.c:633 src/dird/dird_conf.c:650 src/dird/dird_conf.c:654 +#: src/dird/dird_conf.c:658 src/dird/dird_conf.c:662 src/dird/dird_conf.c:666 +#: src/dird/dird_conf.c:679 src/dird/dird_conf.c:880 +msgid " --> " +msgstr "" + +#: src/dird/dird_conf.c:530 +#, c-format +msgid "Console: name=%s SSL=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:535 +#, c-format +msgid "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:540 +#, c-format +msgid "Counter: name=%s min=%d max=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:551 +#, c-format +msgid "Client: name=%s address=%s FDport=%d MaxJobs=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:554 +#, c-format +msgid " JobRetention=%s FileRetention=%s AutoPrune=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:566 +#, c-format +msgid "" +"Device: name=%s ok=%d num_writers=%d max_writers=%d\n" +" reserved=%d open=%d append=%d read=%d labeled=%d offline=%d autochgr=%" +"d\n" +" poolid=%s volname=%s MediaType=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:576 +#, c-format +msgid "" +"Storage: name=%s address=%s SDport=%d MaxJobs=%u\n" +" DeviceName=%s MediaType=%s StorageId=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:585 +#, c-format +msgid "" +"Catalog: name=%s address=%s DBport=%d db_name=%s\n" +" db_user=%s MutliDBConn=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:593 +#, c-format +msgid "%s: name=%s JobType=%d level=%s Priority=%d Enabled=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:594 src/dird/ua_cmds.c:478 src/dird/ua_prune.c:365 +#: src/dird/ua_run.c:266 src/dird/ua_select.c:263 src/dird/ua_select.c:286 +msgid "Job" +msgstr "" + +#: src/dird/dird_conf.c:594 +msgid "JobDefs" +msgstr "" + +#: src/dird/dird_conf.c:598 +#, c-format +msgid "" +" MaxJobs=%u Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%" +"d\n" +msgstr "" + +#: src/dird/dird_conf.c:604 +#, c-format +msgid " SelectionType=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:619 +#, c-format +msgid " --> Where=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:622 +#, c-format +msgid " --> RegexWhere=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:625 +#, c-format +msgid " --> Bootstrap=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:628 +#, c-format +msgid " --> WriteBootstrap=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:640 src/lib/runscript.c:261 +msgid " --> RunScript\n" +msgstr "" + +#: src/dird/dird_conf.c:641 src/lib/runscript.c:262 +#, c-format +msgid " --> Command=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:642 src/lib/runscript.c:263 +#, c-format +msgid " --> Target=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:643 src/lib/runscript.c:264 +#, c-format +msgid " --> RunOnSuccess=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:644 src/lib/runscript.c:265 +#, c-format +msgid " --> RunOnFailure=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:645 src/lib/runscript.c:266 +#, c-format +msgid " --> FailJobOnError=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:646 src/lib/runscript.c:267 +#, c-format +msgid " --> RunWhen=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:672 +#, c-format +msgid " --> Run=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:676 +#, c-format +msgid " --> SelectionPattern=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:686 +#, c-format +msgid "FileSet: name=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:763 src/dird/dird_conf.c:842 +#, c-format +msgid "Schedule: name=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:768 +#, c-format +msgid " --> Run Level=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:769 +msgid " hour=" +msgstr "" + +#: src/dird/dird_conf.c:778 +msgid " mday=" +msgstr "" + +#: src/dird/dird_conf.c:787 +msgid " month=" +msgstr "" + +#: src/dird/dird_conf.c:796 +msgid " wday=" +msgstr "" + +#: src/dird/dird_conf.c:805 +msgid " wom=" +msgstr "" + +#: src/dird/dird_conf.c:814 +msgid " woy=" +msgstr "" + +#: src/dird/dird_conf.c:823 +#, c-format +msgid " mins=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:825 src/dird/dird_conf.c:829 src/dird/dird_conf.c:833 +msgid " --> " +msgstr "" + +#: src/dird/dird_conf.c:846 +#, c-format +msgid "Pool: name=%s PoolType=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:848 +#, c-format +msgid " use_cat=%d use_once=%d cat_files=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:851 +#, c-format +msgid " max_vols=%d auto_prune=%d VolRetention=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:854 +#, c-format +msgid " VolUse=%s recycle=%d LabelFormat=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:858 +#, c-format +msgid " CleaningPrefix=%s LabelType=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:860 +#, c-format +msgid " RecyleOldest=%d PurgeOldest=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:863 +#, c-format +msgid " MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:867 +#, c-format +msgid " MigTime=%s MigHiBytes=%s MigLoBytes=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:872 +#, c-format +msgid " NextPool=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:875 +#, c-format +msgid " RecyclePool=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:886 +#, c-format +msgid "Messages: name=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:888 +#, c-format +msgid " mailcmd=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:890 +#, c-format +msgid " opcmd=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:893 src/tray-monitor/tray_conf.c:199 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "" + +#: src/dird/dird_conf.c:1199 src/tray-monitor/tray_conf.c:257 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "" + +#: src/dird/dird_conf.c:1231 src/dird/dird_conf.c:1246 +#: src/dird/dird_conf.c:1842 src/console/console_conf.c:257 +#: src/filed/filed_conf.c:347 src/gnome2-console/console_conf.c:258 +#, c-format +msgid "%s item is required in %s resource, but not found.\n" +msgstr "" + +#: src/dird/dird_conf.c:1237 src/lib/parse_conf.c:234 +#: src/tray-monitor/tray_conf.c:294 +#, c-format +msgid "Too many items in %s resource\n" +msgstr "" + +#: src/dird/dird_conf.c:1277 +#, c-format +msgid "Cannot find Pool resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1286 +#, c-format +msgid "Cannot find Console resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1292 src/filed/filed_conf.c:367 +#: src/stored/stored_conf.c:584 +#, c-format +msgid "Cannot find Director resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1299 src/stored/stored_conf.c:590 +#, c-format +msgid "Cannot find Storage resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1308 +#, c-format +msgid "Cannot find Job resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1357 +#, c-format +msgid "Cannot find Counter resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1365 src/filed/filed_conf.c:373 +#, c-format +msgid "Cannot find Client resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1378 +#, c-format +msgid "Cannot find Schedule resource %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1383 src/dird/dird_conf.c:1443 +#: src/tray-monitor/tray_conf.c:314 src/tray-monitor/tray_conf.c:352 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "" + +#: src/dird/dird_conf.c:1458 +#, c-format +msgid "Name item is required in %s resource, but not found.\n" +msgstr "" + +#: src/dird/dird_conf.c:1466 src/console/console_conf.c:320 +#: src/filed/filed_conf.c:432 src/gnome2-console/console_conf.c:327 +#: src/tray-monitor/tray_conf.c:372 src/wx-console/console_conf.c:328 +#, c-format +msgid "Attempt to define second %s resource named \"%s\" is not permitted.\n" +msgstr "" + +#: src/dird/dird_conf.c:1471 +#, c-format +msgid "Inserting %s res: %s index=%d pass=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:1543 +#, c-format +msgid "Expected a Migration Job Type keyword, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1569 +#, c-format +msgid "Expected a Job Type keyword, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1593 +#, c-format +msgid "Expected a Job Level keyword, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1613 +#, c-format +msgid "Expected a Restore replacement option, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1661 src/dird/dird_conf.c:1771 +#: src/lib/parse_conf.c:723 src/lib/parse_conf.c:738 +#, c-format +msgid "Expect %s, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1683 src/lib/parse_conf.c:482 +#, c-format +msgid "Could not find config Resource %s referenced on line %d : %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1811 src/dird/inc_conf.c:645 +#, c-format +msgid "Expecting open brace. Got %s" +msgstr "" + +#: src/dird/dird_conf.c:1819 src/dird/inc_conf.c:360 src/dird/inc_conf.c:660 +#, c-format +msgid "Expecting keyword, got: %s\n" +msgstr "" + +#: src/dird/dird_conf.c:1825 src/dird/inc_conf.c:368 src/dird/inc_conf.c:666 +#: src/lib/parse_conf.c:874 +#, c-format +msgid "expected an equals, got: %s" +msgstr "" + +#: src/dird/dird_conf.c:1836 src/dird/inc_conf.c:378 src/dird/inc_conf.c:675 +#, c-format +msgid "Keyword %s not permitted in this resource" +msgstr "" + +#: src/dird/expand.c:255 +#, c-format +msgid "Count not update counter %s: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:427 +#, c-format +msgid "Cannot create var context: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:432 +#, c-format +msgid "Cannot set var callback: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:438 +#, c-format +msgid "Cannot set var operate: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:444 src/dird/expand.c:459 +#, c-format +msgid "Cannot unescape string: ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:452 +#, c-format +msgid "Cannot expand expression \"%s\": ERR=%s\n" +msgstr "" + +#: src/dird/expand.c:470 +#, c-format +msgid "Cannot destroy var context: ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:94 src/tray-monitor/tray-monitor.c:914 +msgid "File daemon" +msgstr "" + +#: src/dird/fd_cmds.c:125 +#, c-format +msgid "File daemon \"%s\" rejected Job command: %s\n" +msgstr "" + +#: src/dird/fd_cmds.c:138 +#, c-format +msgid "Error updating Client record. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:143 +#, c-format +msgid "FD gave bad response to JobId command: %s\n" +msgstr "" + +#: src/dird/fd_cmds.c:165 src/dird/fd_cmds.c:201 +msgid ", since=" +msgstr "" + +#: src/dird/fd_cmds.c:185 +msgid "No prior or suitable Full backup found in catalog. Doing FULL backup.\n" +msgstr "" + +#: src/dird/fd_cmds.c:186 src/dird/fd_cmds.c:194 +#, c-format +msgid " (upgraded from %s)" +msgstr "" + +#: src/dird/fd_cmds.c:192 +#, c-format +msgid "Prior failed job found in catalog. Upgrading to %s.\n" +msgstr "" + +#: src/dird/fd_cmds.c:253 +#, c-format +msgid "Unimplemented backup level %d %c\n" +msgstr "" + +#: src/dird/fd_cmds.c:356 src/filed/job.c:640 +#, c-format +msgid "Cannot run program: %s. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:367 src/dird/fd_cmds.c:392 src/dird/fd_cmds.c:406 +msgid ">filed: write error on socket\n" +msgstr "" + +#: src/dird/fd_cmds.c:373 +#, c-format +msgid "Error running program: %s. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:382 +#, c-format +msgid "Cannot open included file: %s. ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:477 src/filed/job.c:1831 +#, c-format +msgid "Could not open bootstrap file %s: ERR=%s\n" +msgstr "" + +#: src/dird/fd_cmds.c:583 +#, c-format +msgid "Client \"%s\" RunScript failed.\n" +msgstr "" + +#: src/dird/fd_cmds.c:619 +#, c-format +msgid "" +" | ] -- cancel a job" +msgstr "" + +#: src/dird/ua_cmds.c:108 +msgid "create DB Pool from resource" +msgstr "" + +#: src/dird/ua_cmds.c:109 +msgid "delete [pool= | media volume=]" +msgstr "" + +#: src/dird/ua_cmds.c:110 +msgid "disable -- disable a job" +msgstr "" + +#: src/dird/ua_cmds.c:111 +msgid "enable -- enable a job" +msgstr "" + +#: src/dird/ua_cmds.c:112 +msgid "performs FileSet estimate, listing gives full listing" +msgstr "" + +#: src/dird/ua_cmds.c:113 src/console/console.c:171 +msgid "exit = quit" +msgstr "" + +#: src/dird/ua_cmds.c:114 +msgid "gui [on|off] -- non-interactive gui mode" +msgstr "" + +#: src/dird/ua_cmds.c:115 src/stored/btape.c:2540 +msgid "print this command" +msgstr "" + +#: src/dird/ua_cmds.c:116 +msgid "" +"list [pools | jobs | jobtotals | media | files ]; " +"from catalog" +msgstr "" + +#: src/dird/ua_cmds.c:117 +msgid "label a tape" +msgstr "" + +#: src/dird/ua_cmds.c:118 +msgid "full or long list like list command" +msgstr "" + +#: src/dird/ua_cmds.c:119 +msgid "print current memory usage" +msgstr "" + +#: src/dird/ua_cmds.c:120 +msgid "messages" +msgstr "" + +#: src/dird/ua_cmds.c:121 +msgid "mount " +msgstr "" + +#: src/dird/ua_cmds.c:122 +msgid "prune expired records from catalog" +msgstr "" + +#: src/dird/ua_cmds.c:123 +msgid "purge records from catalog" +msgstr "" + +#: src/dird/ua_cmds.c:124 +msgid "python control commands" +msgstr "" + +#: src/dird/ua_cmds.c:125 src/console/console.c:164 +msgid "quit" +msgstr "" + +#: src/dird/ua_cmds.c:126 +msgid "query catalog" +msgstr "" + +#: src/dird/ua_cmds.c:127 +msgid "restore files" +msgstr "" + +#: src/dird/ua_cmds.c:128 +msgid "relabel a tape" +msgstr "" + +#: src/dird/ua_cmds.c:129 +msgid "release " +msgstr "" + +#: src/dird/ua_cmds.c:130 +msgid "reload conf file" +msgstr "" + +#: src/dird/ua_cmds.c:131 +msgid "run " +msgstr "" + +#: src/dird/ua_cmds.c:132 +msgid "status [storage | client]=" +msgstr "" + +#: src/dird/ua_cmds.c:133 +msgid "sets debug level" +msgstr "" + +#: src/dird/ua_cmds.c:134 +msgid "sets new client address -- if authorized" +msgstr "" + +#: src/dird/ua_cmds.c:135 +msgid "show (resource records) [jobs | pools | ... | all]" +msgstr "" + +#: src/dird/ua_cmds.c:136 +msgid "use SQL to query catalog" +msgstr "" + +#: src/dird/ua_cmds.c:137 src/console/console.c:167 +msgid "print current time" +msgstr "" + +#: src/dird/ua_cmds.c:138 +msgid "turn on/off trace to file" +msgstr "" + +#: src/dird/ua_cmds.c:139 +msgid "unmount " +msgstr "" + +#: src/dird/ua_cmds.c:140 +msgid "umount for old-time Unix guys" +msgstr "" + +#: src/dird/ua_cmds.c:141 +msgid "update Volume, Pool or slots" +msgstr "" + +#: src/dird/ua_cmds.c:142 +msgid "use catalog xxx" +msgstr "" + +#: src/dird/ua_cmds.c:143 +msgid "does variable expansion" +msgstr "" + +#: src/dird/ua_cmds.c:144 +msgid "print Director version" +msgstr "" + +#: src/dird/ua_cmds.c:145 +msgid "" +"wait until no jobs are running [ | | " +"]" +msgstr "" + +#: src/dird/ua_cmds.c:186 +#, c-format +msgid "%s: is an invalid command.\n" +msgstr "" + +#: src/dird/ua_cmds.c:227 +msgid "" +"You probably don't want to be using this command since it\n" +"creates database records without labeling the Volumes.\n" +"You probably want to use the \"label\" command.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:246 +#, c-format +msgid "Pool already has maximum volumes=%d\n" +msgstr "" + +#: src/dird/ua_cmds.c:248 +msgid "Enter new maximum (zero for unlimited): " +msgstr "" + +#: src/dird/ua_cmds.c:269 +#, c-format +msgid "Enter number of Volumes to create. 0=>fixed name. Max=%d: " +msgstr "" + +#: src/dird/ua_cmds.c:275 +#, c-format +msgid "The number must be between 0 and %d\n" +msgstr "" + +#: src/dird/ua_cmds.c:282 +msgid "Enter Volume name: " +msgstr "" + +#: src/dird/ua_cmds.c:286 +msgid "Enter base volume name: " +msgstr "" + +#: src/dird/ua_cmds.c:295 src/dird/ua_label.c:645 +msgid "Volume name too long.\n" +msgstr "" + +#: src/dird/ua_cmds.c:299 src/dird/ua_label.c:651 src/lib/edit.c:459 +msgid "Volume name must be at least one character long.\n" +msgstr "" + +#: src/dird/ua_cmds.c:308 +msgid "Enter the starting number: " +msgstr "" + +#: src/dird/ua_cmds.c:313 +msgid "Start number must be greater than zero.\n" +msgstr "" + +#: src/dird/ua_cmds.c:324 +msgid "Enter slot (0 for none): " +msgstr "" + +#: src/dird/ua_cmds.c:328 +msgid "InChanger? yes/no: " +msgstr "" + +#: src/dird/ua_cmds.c:356 +#, c-format +msgid "%d Volumes created in pool %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:372 src/dird/ua_cmds.c:1035 +msgid "Turn on or off? " +msgstr "" + +#: src/dird/ua_cmds.c:403 +#, c-format +msgid "JobId %s is not running. Use Job name to cancel inactive jobs.\n" +msgstr "" + +#: src/dird/ua_cmds.c:412 src/dird/ua_cmds.c:422 +#, c-format +msgid "Warning Job %s is not running. Continuing anyway ...\n" +msgstr "" + +#: src/dird/ua_cmds.c:432 src/dird/ua_cmds.c:708 src/dird/ua_cmds.c:754 +msgid "Unauthorized command from this console.\n" +msgstr "" + +#: src/dird/ua_cmds.c:457 src/filed/status.c:201 src/stored/status.c:447 +msgid "No Jobs running.\n" +msgstr "" + +#: src/dird/ua_cmds.c:459 +msgid "None of your jobs are running.\n" +msgstr "" + +#: src/dird/ua_cmds.c:464 +msgid "Select Job:\n" +msgstr "" + +#: src/dird/ua_cmds.c:473 +#, c-format +msgid "JobId=%s Job=%s" +msgstr "" + +#: src/dird/ua_cmds.c:478 +msgid "Choose Job to cancel" +msgstr "" + +#: src/dird/ua_cmds.c:483 +#, c-format +msgid "" +"Cancel: %s\n" +"\n" +"%s" +msgstr "" + +#: src/dird/ua_cmds.c:484 +msgid "Confirm cancel?" +msgstr "" + +#: src/dird/ua_cmds.c:490 +msgid "Confirm cancel (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:498 src/dird/ua_cmds.c:749 +#, c-format +msgid "Job \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_cmds.c:595 +#, c-format +msgid "" +"Can't set %s RecyclePool to %s, %s is not in database.\n" +"Try to update it with 'update pool=%s'\n" +msgstr "" + +#: src/dird/ua_cmds.c:662 +#, c-format +msgid "" +"Error: Pool %s already exists.\n" +"Use update to change it.\n" +msgstr "" + +#: src/dird/ua_cmds.c:673 +#, c-format +msgid "Pool %s created.\n" +msgstr "" + +#: src/dird/ua_cmds.c:690 +msgid "Python interpreter restarted.\n" +msgstr "" + +#: src/dird/ua_cmds.c:692 src/dird/ua_cmds.c:1275 +msgid "Nothing done.\n" +msgstr "" + +#: src/dird/ua_cmds.c:715 src/dird/ua_run.c:1224 +#, c-format +msgid "Client \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_cmds.c:724 +#, c-format +msgid "Client \"%s\" address set to %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:758 +#, c-format +msgid "Job \"%s\" %sabled\n" +msgstr "" + +#: src/dird/ua_cmds.c:785 src/dird/ua_dotcmds.c:177 src/dird/ua_status.c:311 +#, c-format +msgid "Connecting to Storage daemon %s at %s:%d\n" +msgstr "" + +#: src/dird/ua_cmds.c:791 src/dird/ua_dotcmds.c:183 src/dird/ua_status.c:322 +msgid "Connected to storage daemon\n" +msgstr "" + +#: src/dird/ua_cmds.c:811 src/dird/ua_cmds.c:1162 src/dird/ua_dotcmds.c:203 +#: src/dird/ua_status.c:349 +#, c-format +msgid "Connecting to Client %s at %s:%d\n" +msgstr "" + +#: src/dird/ua_cmds.c:814 src/dird/ua_cmds.c:1165 src/dird/ua_dotcmds.c:206 +msgid "Failed to connect to Client.\n" +msgstr "" + +#: src/dird/ua_cmds.c:930 +msgid "Enter new debug level: " +msgstr "" + +#: src/dird/ua_cmds.c:996 src/dird/ua_dotcmds.c:282 +msgid "Available daemons are: \n" +msgstr "" + +#: src/dird/ua_cmds.c:997 src/dird/ua_dotcmds.c:283 +msgid "Director" +msgstr "" + +#: src/dird/ua_cmds.c:998 src/dird/ua_dotcmds.c:284 src/dird/ua_run.c:265 +#: src/dird/ua_select.c:168 +msgid "Storage" +msgstr "" + +#: src/dird/ua_cmds.c:999 src/dird/ua_dotcmds.c:285 src/dird/ua_run.c:271 +#: src/dird/ua_select.c:311 src/dird/ua_select.c:420 +msgid "Client" +msgstr "" + +#: src/dird/ua_cmds.c:1000 +msgid "All" +msgstr "" + +#: src/dird/ua_cmds.c:1001 +msgid "Select daemon type to set debug level" +msgstr "" + +#: src/dird/ua_cmds.c:1091 src/dird/ua_cmds.c:1130 src/dird/ua_cmds.c:1798 +#, c-format +msgid "No authorization for Job \"%s\"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1101 +#, c-format +msgid "No authorization for FileSet \"%s\"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1113 src/dird/ua_run.c:217 +#, c-format +msgid "Level %s not valid.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1126 +msgid "No job specified.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1170 +msgid "Error sending include list.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1175 +msgid "Error sending exclude list.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1261 +msgid "" +"In general it is not a good idea to delete either a\n" +"Pool or a Volume since they may contain data.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1264 +msgid "Choose catalog item to delete" +msgstr "" + +#: src/dird/ua_cmds.c:1332 +msgid "Enter JobId to delete: " +msgstr "" + +#: src/dird/ua_cmds.c:1367 +#, c-format +msgid "Job %s and associated records deleted from the catalog.\n" +msgstr "" + +#: src/dird/ua_cmds.c:1381 +#, c-format +msgid "" +"\n" +"This command will delete volume %s\n" +"and all Jobs saved on that volume from the Catalog\n" +msgstr "" + +#: src/dird/ua_cmds.c:1385 +#, c-format +msgid "Are you sure you want to delete Volume \"%s\"? (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:1409 +#, c-format +msgid "Are you sure you want to delete Pool \"%s\"? (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:1523 +#, c-format +msgid "Using Catalog name=%s DB=%s\n" +msgstr "" + +#: src/dird/ua_cmds.c:1583 +msgid "ERR: Can't open db\n" +msgstr "" + +#: src/dird/ua_cmds.c:1619 +msgid "ERR: Job was not found\n" +msgstr "" + +#: src/dird/ua_cmds.c:1695 src/dird/ua_tree.c:664 src/stored/btape.c:2587 +#, c-format +msgid "" +" Command Description\n" +" ======= ===========\n" +msgstr "" + +#: src/dird/ua_cmds.c:1697 +#, c-format +msgid " %-10s %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:1699 +msgid "" +"\n" +"When at a prompt, entering a period cancels the command.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1716 src/dird/ua_status.c:263 src/stored/status.c:79 +#, c-format +msgid "%s Version: %s (%s) %s %s %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:1759 src/dird/ua_cmds.c:1786 src/dird/ua_cmds.c:1808 +#, c-format +msgid "No authorization for Catalog \"%s\"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1776 +#, c-format +msgid "No authorization for Client \"%s\"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1831 src/dird/ua_select.c:221 +msgid "Could not find a Catalog resource\n" +msgstr "" + +#: src/dird/ua_cmds.c:1844 +#, c-format +msgid "Could not open catalog database \"%s\".\n" +msgstr "" + +#: src/dird/ua_cmds.c:1854 +#, c-format +msgid "Using Catalog \"%s\"\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:138 +msgid ": is an invalid command.\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:238 src/dird/ua_dotcmds.c:288 +msgid "The Director will segment fault.\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:286 +msgid "Select daemon type to make die" +msgstr "" + +#: src/dird/ua_dotcmds.c:453 +msgid "Access to specified Client or FileSet not allowed.\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:458 src/dird/ua_dotcmds.c:494 +#: src/dird/ua_restore.c:882 src/dird/ua_restore.c:911 +#: src/dird/ua_restore.c:932 +#, c-format +msgid "Query failed: %s. ERR=%s\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:489 +msgid "query keyword not found.\n" +msgstr "" + +#: src/dird/ua_input.c:95 +msgid "Enter slot" +msgstr "" + +#: src/dird/ua_input.c:99 src/dird/ua_input.c:105 +#, c-format +msgid "Expected a positive integer, got: %s\n" +msgstr "" + +#: src/dird/ua_input.c:123 src/dird/ua_run.c:446 src/dird/ua_update.c:245 +#: src/dird/ua_update.c:265 src/dird/ua_update.c:609 +#: src/stored/parse_bsr.c:779 src/tools/dbcheck.c:1202 +msgid "yes" +msgstr "" + +#: src/dird/ua_input.c:127 src/dird/ua_update.c:245 src/dird/ua_update.c:265 +#: src/dird/ua_update.c:609 src/stored/parse_bsr.c:779 +msgid "no" +msgstr "" + +#: src/dird/ua_input.c:162 +msgid "Invalid response. You must answer yes or no.\n" +msgstr "" + +#: src/dird/ua_input.c:185 +msgid "Invalid Enabled value, it must be yes, no, archived, 0, 1, or 2\n" +msgstr "" + +#: src/dird/ua_label.c:102 +msgid "Negative numbers not permitted\n" +msgstr "" + +#: src/dird/ua_label.c:108 +msgid "Range end is not integer.\n" +msgstr "" + +#: src/dird/ua_label.c:113 +msgid "Range start is not an integer.\n" +msgstr "" + +#: src/dird/ua_label.c:119 +msgid "Range end not bigger than start.\n" +msgstr "" + +#: src/dird/ua_label.c:125 +msgid "Input value is not an integer.\n" +msgstr "" + +#: src/dird/ua_label.c:131 +msgid "Values must be be greater than zero.\n" +msgstr "" + +#: src/dird/ua_label.c:135 +msgid "Slot too large.\n" +msgstr "" + +#: src/dird/ua_label.c:184 src/dird/ua_label.c:349 src/dird/ua_run.c:1199 +msgid "command line" +msgstr "" + +#: src/dird/ua_label.c:202 src/dird/ua_label.c:513 +msgid "No slots in changer to scan.\n" +msgstr "" + +#: src/dird/ua_label.c:214 src/dird/ua_label.c:524 +msgid "No Volumes found to label, or no barcodes.\n" +msgstr "" + +#: src/dird/ua_label.c:224 +#, c-format +msgid "Slot %d greater than max %d ignored.\n" +msgstr "" + +#: src/dird/ua_label.c:253 +#, c-format +msgid "No VolName for Slot=%d InChanger set to zero.\n" +msgstr "" + +#: src/dird/ua_label.c:271 +#, c-format +msgid "Catalog record for Volume \"%s\" updated to reference slot %d.\n" +msgstr "" + +#: src/dird/ua_label.c:275 +#, c-format +msgid "Catalog record for Volume \"%s\" is up to date.\n" +msgstr "" + +#: src/dird/ua_label.c:281 +#, c-format +msgid "Volume \"%s\" not found in catalog. Slot=%d InChanger set to zero.\n" +msgstr "" + +#: src/dird/ua_label.c:378 +#, c-format +msgid "" +"Volume \"%s\" has VolStatus %s. It must be Purged or Recycled before " +"relabeling.\n" +msgstr "" + +#: src/dird/ua_label.c:394 +msgid "Enter new Volume name: " +msgstr "" + +#: src/dird/ua_label.c:407 +#, c-format +msgid "Media record for new Volume \"%s\" already exists.\n" +msgstr "" + +#: src/dird/ua_label.c:425 +msgid "Enter slot (0 or Enter for none): " +msgstr "" + +#: src/dird/ua_label.c:453 +#, c-format +msgid "Delete of Volume \"%s\" failed. ERR=%s" +msgstr "" + +#: src/dird/ua_label.c:456 +#, c-format +msgid "Old volume \"%s\" deleted from catalog.\n" +msgstr "" + +#: src/dird/ua_label.c:467 +#, c-format +msgid "Requesting to mount %s ...\n" +msgstr "" + +#: src/dird/ua_label.c:489 +msgid "Do not forget to mount the drive!!!\n" +msgstr "" + +#: src/dird/ua_label.c:529 +msgid "" +"The following Volumes will be labeled:\n" +"Slot Volume\n" +"==============\n" +msgstr "" + +#: src/dird/ua_label.c:538 +msgid "Do you want to label these Volumes? (yes|no): " +msgstr "" + +#: src/dird/ua_label.c:559 +#, c-format +msgid "Media record for Slot %d Volume \"%s\" already exists.\n" +msgstr "" + +#: src/dird/ua_label.c:565 +#, c-format +msgid "Error setting InChanger: ERR=%s" +msgstr "" + +#: src/dird/ua_label.c:588 +#, c-format +msgid "Maximum pool Volumes=%d reached.\n" +msgstr "" + +#: src/dird/ua_label.c:595 +#, c-format +msgid "Catalog record for cleaning tape \"%s\" successfully created.\n" +msgstr "" + +#: src/dird/ua_label.c:602 +#, c-format +msgid "Catalog error on cleaning tape: %s" +msgstr "" + +#: src/dird/ua_label.c:638 +#, c-format +msgid "Illegal character \"%c\" in a volume name.\n" +msgstr "" + +#: src/dird/ua_label.c:685 +#, c-format +msgid "Sending relabel command from \"%s\" to \"%s\" ...\n" +msgstr "" + +#: src/dird/ua_label.c:692 +#, c-format +msgid "Sending label command for Volume \"%s\" Slot %d ...\n" +msgstr "" + +#: src/dird/ua_label.c:733 +#, c-format +msgid "Catalog record for Volume \"%s\", Slot %d successfully created.\n" +msgstr "" + +#: src/dird/ua_label.c:746 +#, c-format +msgid "Label command failed for Volume %s.\n" +msgstr "" + +#: src/dird/ua_label.c:756 +#, c-format +msgid "Connecting to Storage daemon %s at %s:%d ...\n" +msgstr "" + +#: src/dird/ua_label.c:784 +msgid "Could not open SD socket.\n" +msgstr "" + +#: src/dird/ua_label.c:856 src/dird/ua_label.c:866 +#, c-format +msgid "Invalid Slot number: %s\n" +msgstr "" + +#: src/dird/ua_label.c:875 +#, c-format +msgid "Invalid Volume name: %s\n" +msgstr "" + +#: src/dird/ua_label.c:954 +#, c-format +msgid "Device \"%s\" has %d slots.\n" +msgstr "" + +#: src/dird/ua_label.c:1003 +#, c-format +msgid "Pool \"%s\" resource not found for volume \"%s\"!\n" +msgstr "" + +#: src/dird/ua_output.c:73 src/dird/ua_output.c:97 +msgid "ON or OFF keyword missing.\n" +msgstr "" + +#: src/dird/ua_output.c:185 +msgid "Keywords for the show command are:\n" +msgstr "" + +#: src/dird/ua_output.c:191 +#, c-format +msgid "%s resource %s not found.\n" +msgstr "" + +#: src/dird/ua_output.c:194 +#, c-format +msgid "Resource %s not found\n" +msgstr "" + +#: src/dird/ua_output.c:262 +msgid "Hey! DB is NULL\n" +msgstr "" + +#: src/dird/ua_output.c:376 +#, c-format +msgid "Jobid %d used %d Volume(s): %s\n" +msgstr "" + +#: src/dird/ua_output.c:394 +msgid "No Pool specified.\n" +msgstr "" + +#: src/dird/ua_output.c:405 src/dird/ua_select.c:488 +#, c-format +msgid "Error obtaining pool ids. ERR=%s\n" +msgstr "" + +#: src/dird/ua_output.c:415 +#, c-format +msgid "Pool: %s\n" +msgstr "" + +#: src/dird/ua_output.c:431 src/dird/ua_status.c:481 +msgid "Ignoring invalid value for days. Max is 50.\n" +msgstr "" + +#: src/dird/ua_output.c:440 +#, c-format +msgid "Unknown list keyword: %s\n" +msgstr "" + +#: src/dird/ua_output.c:466 +#, c-format +msgid "%s is not a job name.\n" +msgstr "" + +#: src/dird/ua_output.c:477 +#, c-format +msgid "Could not Pool Job %s\n" +msgstr "" + +#: src/dird/ua_output.c:489 +#, c-format +msgid "Could not find next Volume for Job %s (Pool=%s, Level=%s).\n" +msgstr "" + +#: src/dird/ua_output.c:493 +#, c-format +msgid "" +"The next Volume to be used by Job \"%s\" (Pool=%s, Level=%s) will be %s\n" +msgstr "" + +#: src/dird/ua_output.c:503 +#, c-format +msgid "Could not find next Volume for Job %s.\n" +msgstr "" + +#: src/dird/ua_output.c:702 +msgid "You have no messages.\n" +msgstr "" + +#: src/dird/ua_prune.c:132 +msgid "Choose item to prune" +msgstr "" + +#: src/dird/ua_prune.c:156 +#, c-format +msgid "Cannot prune Volume \"%s\" because it is archived.\n" +msgstr "" + +#: src/dird/ua_prune.c:218 +msgid "No Files found to prune.\n" +msgstr "" + +#: src/dird/ua_prune.c:240 +#, c-format +msgid "Pruned Files from %s Jobs for client %s from catalog.\n" +msgstr "" + +#: src/dird/ua_prune.c:364 +#, c-format +msgid "Pruned %d %s for client %s from catalog.\n" +msgstr "" + +#: src/dird/ua_prune.c:365 +msgid "Jobs" +msgstr "" + +#: src/dird/ua_prune.c:367 +msgid "No Jobs found to prune.\n" +msgstr "" + +#: src/dird/ua_purge.c:90 +msgid "" +"\n" +"This command is can be DANGEROUS!!!\n" +"\n" +"It purges (deletes) all Files from a Job,\n" +"JobId, Client or Volume; or it purges (deletes)\n" +"all Jobs from a Client or Volume without regard\n" +"for retention periods. Normally you should use the\n" +"PRUNE command, which respects retention periods.\n" +msgstr "" + +#: src/dird/ua_purge.c:152 +msgid "Choose item to purge" +msgstr "" + +#: src/dird/ua_purge.c:199 +#, c-format +msgid "Begin purging files for Client \"%s\"\n" +msgstr "" + +#: src/dird/ua_purge.c:208 src/dird/ua_purge.c:258 +#, c-format +msgid "No Files found for client %s to purge from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:211 +#, c-format +msgid "Files for %d Jobs for client \"%s\" purged from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:249 +#, c-format +msgid "Begin purging jobs from Client \"%s\"\n" +msgstr "" + +#: src/dird/ua_purge.c:261 +#, c-format +msgid "%d Jobs for client %s purged from %s catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:407 +#, c-format +msgid "" +"\n" +"Volume \"%s\" has VolStatus \"%s\" and cannot be purged.\n" +"The VolStatus must be: Append, Full, Used, or Error to be purged.\n" +msgstr "" + +#: src/dird/ua_purge.c:440 +#, c-format +msgid "%d File%s on Volume \"%s\" purged from catalog.\n" +msgstr "" + +#: src/dird/ua_purge.c:481 +#, c-format +msgid "" +"There are no more Jobs associated with Volume \"%s\". Marking it purged.\n" +msgstr "" + +#: src/dird/ua_purge.c:522 +#, c-format +msgid "Unable move recycled Volume in full Pool \"%s\" MaxVols=%d\n" +msgstr "" + +#: src/dird/ua_purge.c:535 +#, c-format +msgid "All records pruned from Volume \"%s\"; marking it \"Purged\"\n" +msgstr "" + +#: src/dird/ua_purge.c:540 +#, c-format +msgid "Cannot purge Volume with VolStatus=%s\n" +msgstr "" + +#: src/dird/ua_query.c:72 src/findlib/create_file.c:283 +#: src/findlib/create_file.c:383 +#, c-format +msgid "Could not open %s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_query.c:77 +msgid "Available queries:\n" +msgstr "" + +#: src/dird/ua_query.c:84 +msgid "Choose a query" +msgstr "" + +#: src/dird/ua_query.c:98 +msgid "Could not find query.\n" +msgstr "" + +#: src/dird/ua_query.c:116 +msgid "Too many prompts in query, max is 9.\n" +msgstr "" + +#: src/dird/ua_query.c:219 +#, c-format +msgid "Warning prompt %d missing.\n" +msgstr "" + +#: src/dird/ua_query.c:264 +msgid "" +"Entering SQL query mode.\n" +"Terminate each query with a semicolon.\n" +"Terminate query mode with a blank line.\n" +msgstr "" + +#: src/dird/ua_query.c:267 src/dird/ua_query.c:283 +msgid "Enter SQL query: " +msgstr "" + +#: src/dird/ua_query.c:285 +msgid "Add to SQL query: " +msgstr "" + +#: src/dird/ua_query.c:288 +msgid "End query mode.\n" +msgstr "" + +#: src/dird/ua_restore.c:133 +msgid "\"RegexWhere\" specification not authorized.\n" +msgstr "" + +#: src/dird/ua_restore.c:140 +msgid "\"where\" specification not authorized.\n" +msgstr "" + +#: src/dird/ua_restore.c:162 +msgid "" +"No Restore Job Resource found in bacula-dir.conf.\n" +"You must create at least one before running this command.\n" +msgstr "" + +#: src/dird/ua_restore.c:178 +msgid "Restore not done.\n" +msgstr "" + +#: src/dird/ua_restore.c:190 +msgid "Unable to construct a valid BSR. Cannot continue.\n" +msgstr "" + +#: src/dird/ua_restore.c:194 src/dird/ua_restore.c:209 +msgid "No files selected to be restored.\n" +msgstr "" + +#: src/dird/ua_restore.c:202 +msgid "" +"\n" +"1 file selected to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:205 +#, c-format +msgid "" +"\n" +"%s files selected to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:224 +msgid "No Client resource found!\n" +msgstr "" + +#: src/dird/ua_restore.c:329 +#, c-format +msgid "Missing value for keyword: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:407 +msgid "List last 20 Jobs run" +msgstr "" + +#: src/dird/ua_restore.c:408 +msgid "List Jobs where a given File is saved" +msgstr "" + +#: src/dird/ua_restore.c:409 +msgid "Enter list of comma separated JobIds to select" +msgstr "" + +#: src/dird/ua_restore.c:410 +msgid "Enter SQL list command" +msgstr "" + +#: src/dird/ua_restore.c:411 +msgid "Select the most recent backup for a client" +msgstr "" + +#: src/dird/ua_restore.c:412 +msgid "Select backup for a client before a specified time" +msgstr "" + +#: src/dird/ua_restore.c:413 +msgid "Enter a list of files to restore" +msgstr "" + +#: src/dird/ua_restore.c:414 +msgid "Enter a list of files to restore before a specified time" +msgstr "" + +#: src/dird/ua_restore.c:415 +msgid "Find the JobIds of the most recent backup for a client" +msgstr "" + +#: src/dird/ua_restore.c:416 +msgid "Find the JobIds for a backup for a client before a specified time" +msgstr "" + +#: src/dird/ua_restore.c:417 +msgid "Enter a list of directories to restore for found JobIds" +msgstr "" + +#: src/dird/ua_restore.c:418 src/dird/ua_status.c:760 src/filed/status.c:256 +msgid "Cancel" +msgstr "" + +#: src/dird/ua_restore.c:459 +#, c-format +msgid "Unknown keyword: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:488 src/dird/ua_update.c:833 +#, c-format +msgid "Improper date format: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:523 src/dird/ua_select.c:620 +#, c-format +msgid "Error: Pool resource \"%s\" does not exist.\n" +msgstr "" + +#: src/dird/ua_restore.c:528 +#, c-format +msgid "Error: Pool resource \"%s\" access not allowed.\n" +msgstr "" + +#: src/dird/ua_restore.c:544 +msgid "" +"\n" +"First you select one or more JobIds that contain files\n" +"to be restored. You will be presented several methods\n" +"of specifying the JobIds. Then you will be allowed to\n" +"select which files from those JobIds are to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:556 +msgid "To select the JobIds, you have the following choices:\n" +msgstr "" + +#: src/dird/ua_restore.c:561 +msgid "Select item: " +msgstr "" + +#: src/dird/ua_restore.c:566 src/dird/ua_restore.c:601 +msgid "SQL query not authorized.\n" +msgstr "" + +#: src/dird/ua_restore.c:579 +msgid "Enter Filename (no path):" +msgstr "" + +#: src/dird/ua_restore.c:594 src/dird/ua_restore.c:702 +msgid "Enter JobId(s), comma separated, to restore: " +msgstr "" + +#: src/dird/ua_restore.c:604 +msgid "Enter SQL list command: " +msgstr "" + +#: src/dird/ua_restore.c:638 src/dird/ua_restore.c:661 +msgid "" +"Enter file names with paths, or < to enter a filename\n" +"containing a list of file names with paths, and terminate\n" +"them with a blank line.\n" +msgstr "" + +#: src/dird/ua_restore.c:642 src/dird/ua_restore.c:665 +msgid "Enter full filename: " +msgstr "" + +#: src/dird/ua_restore.c:700 +#, c-format +msgid "You have already selected the following JobIds: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:717 +msgid "" +"Enter full directory names or start the name\n" +"with a < to indicate it is a filename containing a list\n" +"of directories and terminate them with a blank line.\n" +msgstr "" + +#: src/dird/ua_restore.c:721 +msgid "Enter directory name: " +msgstr "" + +#: src/dird/ua_restore.c:752 +msgid "Invalid JobId in list.\n" +msgstr "" + +#: src/dird/ua_restore.c:765 +#, c-format +msgid "Unable to get Job record for JobId=%s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_restore.c:771 +#, c-format +msgid "Access to JobId=%s (Job \"%s\") not authorized. Not selected.\n" +msgstr "" + +#: src/dird/ua_restore.c:784 +msgid "No Jobs selected.\n" +msgstr "" + +#: src/dird/ua_restore.c:788 +#, c-format +msgid "You have selected the following JobIds: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:790 +#, c-format +msgid "You have selected the following JobId: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:800 +msgid "" +"The restored files will the most current backup\n" +"BEFORE the date you specify below.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:803 +msgid "Enter date as YYYY-MM-DD HH:MM:SS :" +msgstr "" + +#: src/dird/ua_restore.c:809 +msgid "Improper date format.\n" +msgstr "" + +#: src/dird/ua_restore.c:830 +#, c-format +msgid "Cannot open file %s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_restore.c:838 src/dird/ua_restore.c:842 +#, c-format +msgid "Error occurred on line %d of file \"%s\"\n" +msgstr "" + +#: src/dird/ua_restore.c:886 src/dird/ua_restore.c:915 +#, c-format +msgid "No database record found for: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:902 +msgid "No JobId specified cannot continue.\n" +msgstr "" + +#: src/dird/ua_restore.c:936 +#, c-format +msgid "No table found: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:1033 +#, c-format +msgid "" +"\n" +"Building directory tree for JobId %s ... " +msgstr "" + +#: src/dird/ua_restore.c:1045 +msgid "" +"\n" +"There were no files inserted into the tree, so file selection\n" +"is not possible.Most likely your retention policy pruned the files\n" +msgstr "" + +#: src/dird/ua_restore.c:1047 +msgid "" +"\n" +"Do you want to restore all the files? (yes|no): " +msgstr "" + +#: src/dird/ua_restore.c:1063 +#, c-format +msgid "" +"\n" +"1 Job, %s files inserted into the tree and marked for extraction.\n" +msgstr "" + +#: src/dird/ua_restore.c:1067 +#, c-format +msgid "" +"\n" +"1 Job, %s files inserted into the tree.\n" +msgstr "" + +#: src/dird/ua_restore.c:1073 +#, c-format +msgid "" +"\n" +"%d Jobs, %s files inserted into the tree and marked for extraction.\n" +msgstr "" + +#: src/dird/ua_restore.c:1077 +#, c-format +msgid "" +"\n" +"%d Jobs, %s files inserted into the tree.\n" +msgstr "" + +#: src/dird/ua_restore.c:1150 +#, c-format +msgid "Error getting FileSet \"%s\": ERR=%s\n" +msgstr "" + +#: src/dird/ua_restore.c:1158 src/dird/ua_select.c:183 +msgid "The defined FileSet resources are:\n" +msgstr "" + +#: src/dird/ua_restore.c:1162 src/dird/ua_run.c:267 src/dird/ua_select.c:191 +msgid "FileSet" +msgstr "" + +#: src/dird/ua_restore.c:1162 src/dird/ua_select.c:191 +msgid "Select FileSet resource" +msgstr "" + +#: src/dird/ua_restore.c:1164 +#, c-format +msgid "No FileSet found for client \"%s\".\n" +msgstr "" + +#: src/dird/ua_restore.c:1170 +#, c-format +msgid "Error getting FileSet record: %s\n" +msgstr "" + +#: src/dird/ua_restore.c:1171 +msgid "" +"This probably means you modified the FileSet.\n" +"Continuing anyway.\n" +msgstr "" + +#: src/dird/ua_restore.c:1186 +#, c-format +msgid "Pool \"%s\" not found, using any pool.\n" +msgstr "" + +#: src/dird/ua_restore.c:1213 src/dird/ua_restore.c:1229 +#, c-format +msgid "No Full backup before %s found.\n" +msgstr "" + +#: src/dird/ua_restore.c:1252 +msgid "No jobs found.\n" +msgstr "" + +#: src/dird/ua_restore.c:1411 +#, c-format +msgid "Warning default storage overridden by \"%s\" on command line.\n" +msgstr "" + +#: src/dird/ua_restore.c:1427 +#, c-format +msgid "Storage \"%s\" not found, using Storage \"%s\" from MediaType \"%s\".\n" +msgstr "" + +#: src/dird/ua_restore.c:1435 +#, c-format +msgid "" +"\n" +"Unable to find Storage resource for\n" +"MediaType \"%s\", needed by the Jobs you selected.\n" +msgstr "" + +#: src/dird/ua_run.c:152 src/dird/ua_run.c:338 +msgid "Invalid time, using current time.\n" +msgstr "" + +#: src/dird/ua_run.c:174 +#, c-format +msgid "Invalid replace option: %s\n" +msgstr "" + +#: src/dird/ua_run.c:242 +msgid "OK to run? (yes/mod/no): " +msgstr "" + +#: src/dird/ua_run.c:260 src/dird/ua_select.c:63 +msgid "mod" +msgstr "" + +#: src/dird/ua_run.c:263 src/dird/ua_update.c:518 +msgid "Parameters to modify:\n" +msgstr "" + +#: src/dird/ua_run.c:264 +msgid "Level" +msgstr "" + +#: src/dird/ua_run.c:269 +msgid "Restore Client" +msgstr "" + +#: src/dird/ua_run.c:273 +msgid "When" +msgstr "" + +#: src/dird/ua_run.c:274 +msgid "Priority" +msgstr "" + +#: src/dird/ua_run.c:278 src/dird/ua_select.c:506 src/dird/ua_select.c:596 +#: src/dird/ua_update.c:529 +msgid "Pool" +msgstr "" + +#: src/dird/ua_run.c:280 +msgid "Verify Job" +msgstr "" + +#: src/dird/ua_run.c:283 +msgid "Bootstrap" +msgstr "" + +#: src/dird/ua_run.c:284 +msgid "Where" +msgstr "" + +#: src/dird/ua_run.c:285 +msgid "File Relocation" +msgstr "" + +#: src/dird/ua_run.c:286 +msgid "Replace" +msgstr "" + +#: src/dird/ua_run.c:287 +msgid "JobId" +msgstr "" + +#: src/dird/ua_run.c:289 src/dird/ua_run.c:487 src/dird/ua_update.c:535 +msgid "Select parameter to modify" +msgstr "" + +#: src/dird/ua_run.c:298 src/dird/ua_run.c:1205 +msgid "user selection" +msgstr "" + +#: src/dird/ua_run.c:330 +msgid "" +"Please enter desired start time as YYYY-MM-DD HH:MM:SS (return for now): " +msgstr "" + +#: src/dird/ua_run.c:345 +msgid "Enter new Priority: " +msgstr "" + +#: src/dird/ua_run.c:349 +msgid "Priority must be a positive integer.\n" +msgstr "" + +#: src/dird/ua_run.c:369 +msgid "Please enter the Bootstrap file name: " +msgstr "" + +#: src/dird/ua_run.c:380 +#, c-format +msgid "Warning cannot open %s: ERR=%s\n" +msgstr "" + +#: src/dird/ua_run.c:399 +msgid "Please enter path prefix for restore (/ for none): " +msgstr "" + +#: src/dird/ua_run.c:421 +msgid "Replace:\n" +msgstr "" + +#: src/dird/ua_run.c:425 +msgid "Select replace option" +msgstr "" + +#: src/dird/ua_run.c:435 +msgid "" +"You must set the bootstrap file to NULL to be able to specify a JobId.\n" +msgstr "" + +#: src/dird/ua_run.c:455 +msgid "Job failed.\n" +msgstr "" + +#: src/dird/ua_run.c:458 +#, c-format +msgid "Job queued. JobId=%s\n" +msgstr "" + +#: src/dird/ua_run.c:464 +msgid "Job not run.\n" +msgstr "" + +#: src/dird/ua_run.c:476 +#, c-format +msgid "strip_prefix=%s add_prefix=%s add_suffix=%s\n" +msgstr "" + +#: src/dird/ua_run.c:479 +msgid "This will replace your current Where value\n" +msgstr "" + +#: src/dird/ua_run.c:480 +msgid "Strip prefix" +msgstr "" + +#: src/dird/ua_run.c:481 +msgid "Add prefix" +msgstr "" + +#: src/dird/ua_run.c:482 +msgid "Add file suffix" +msgstr "" + +#: src/dird/ua_run.c:483 +msgid "Enter a regexp" +msgstr "" + +#: src/dird/ua_run.c:484 +msgid "Test filename manipulation" +msgstr "" + +#: src/dird/ua_run.c:485 +msgid "Use this ?" +msgstr "" + +#: src/dird/ua_run.c:490 +msgid "Please enter path prefix to strip: " +msgstr "" + +#: src/dird/ua_run.c:498 +msgid "Please enter path prefix to add (/ for none): " +msgstr "" + +#: src/dird/ua_run.c:509 +msgid "Please enter file suffix to add: " +msgstr "" + +#: src/dird/ua_run.c:516 +msgid "Please enter a valid regexp (!from!to!): " +msgstr "" + +#: src/dird/ua_run.c:529 +#, c-format +msgid "regexwhere=%s\n" +msgstr "" + +#: src/dird/ua_run.c:535 +#, c-format +msgid "strip_prefix=%s add_prefix=%s add_suffix=%s result=%s\n" +msgstr "" + +#: src/dird/ua_run.c:542 +msgid "Cannot use your regexp\n" +msgstr "" + +#: src/dird/ua_run.c:546 +msgid "Please enter filename to test: " +msgstr "" + +#: src/dird/ua_run.c:548 +#, c-format +msgid "%s -> %s\n" +msgstr "" + +#: src/dird/ua_run.c:592 +msgid "Cannot use your regexp.\n" +msgstr "" + +#: src/dird/ua_run.c:605 src/dird/ua_run.c:631 +msgid "Levels:\n" +msgstr "" + +#: src/dird/ua_run.c:606 src/filed/status.c:373 src/lib/util.c:329 +#: src/stored/status.c:560 +msgid "Base" +msgstr "" + +#: src/dird/ua_run.c:607 src/filed/status.c:375 src/lib/util.c:331 +#: src/stored/status.c:562 +msgid "Full" +msgstr "" + +#: src/dird/ua_run.c:608 src/filed/status.c:378 src/lib/util.c:334 +#: src/stored/status.c:565 +msgid "Incremental" +msgstr "" + +#: src/dird/ua_run.c:609 src/filed/status.c:381 src/lib/util.c:337 +#: src/stored/status.c:568 +msgid "Differential" +msgstr "" + +#: src/dird/ua_run.c:610 src/filed/status.c:384 src/lib/util.c:340 +#: src/stored/status.c:571 +msgid "Since" +msgstr "" + +#: src/dird/ua_run.c:611 src/dird/ua_run.c:637 +msgid "Select level" +msgstr "" + +#: src/dird/ua_run.c:632 +msgid "Initialize Catalog" +msgstr "" + +#: src/dird/ua_run.c:633 src/filed/status.c:387 src/lib/util.c:343 +#: src/stored/status.c:574 +msgid "Verify Catalog" +msgstr "" + +#: src/dird/ua_run.c:634 src/lib/util.c:349 +msgid "Verify Volume to Catalog" +msgstr "" + +#: src/dird/ua_run.c:635 src/lib/util.c:352 +msgid "Verify Disk to Catalog" +msgstr "" + +#: src/dird/ua_run.c:636 +msgid "Verify Volume Data (not yet implemented)" +msgstr "" + +#: src/dird/ua_run.c:657 +msgid "Level not appropriate for this Job. Cannot be changed.\n" +msgstr "" + +#: src/dird/ua_run.c:671 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"FileSet: %s\n" +"Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:678 src/lib/util.c:296 +msgid "Admin" +msgstr "" + +#: src/dird/ua_run.c:691 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"Level: %s\n" +"Client: %s\n" +"FileSet: %s\n" +"Pool: %s (From %s)\n" +"Storage: %s (From %s)\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:700 src/lib/util.c:287 +msgid "Backup" +msgstr "" + +#: src/dird/ua_run.c:723 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"Level: %s\n" +"Client: %s\n" +"FileSet: %s\n" +"Pool: %s (From %s)\n" +"Storage: %s (From %s)\n" +"Verify Job: %s\n" +"Verify List: %s\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:734 src/lib/util.c:290 +msgid "Verify" +msgstr "" + +#: src/dird/ua_run.c:752 +msgid "Please enter a JobId for restore: " +msgstr "" + +#: src/dird/ua_run.c:764 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: f%s\n" +"Bootstrap: %s\n" +"RegexWhere: %s\n" +"Replace: %s\n" +"FileSet: %s\n" +"Backup Client: %s\n" +"Restore Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:789 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +"Where: %s\n" +"Replace: %s\n" +"FileSet: %s\n" +"Backup Client: %s\n" +"Restore Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:816 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +msgstr "" + +#: src/dird/ua_run.c:824 +#, c-format +msgid "RegexWhere: %s\n" +msgstr "" + +#: src/dird/ua_run.c:827 +#, c-format +msgid "Where: %s\n" +msgstr "" + +#: src/dird/ua_run.c:831 +#, c-format +msgid "" +"Replace: %s\n" +"Client: %s\n" +"Storage: %s\n" +"JobId: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:850 +#, c-format +msgid "" +"Run Migration job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +"Client: %s\n" +"FileSet: %s\n" +"Pool: %s (From %s)\n" +"Read Storage: %s (From %s)\n" +"Write Storage: %s (From %s)\n" +"JobId: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:875 +#, c-format +msgid "Unknown Job Type=%d\n" +msgstr "" + +#: src/dird/ua_run.c:935 +#, c-format +msgid "Value missing for keyword %s\n" +msgstr "" + +#: src/dird/ua_run.c:942 +msgid "Job name specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:950 +msgid "JobId specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:959 src/dird/ua_run.c:1103 +msgid "Client specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:967 +msgid "FileSet specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:975 +msgid "Level specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:984 +msgid "Storage specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:992 +msgid "RegexWhere or Where specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:997 +msgid "No authorization for \"regexwhere\" specification.\n" +msgstr "" + +#: src/dird/ua_run.c:1004 +msgid "Where or RegexWhere specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1009 +msgid "No authoriztion for \"where\" specification.\n" +msgstr "" + +#: src/dird/ua_run.c:1016 +msgid "Bootstrap specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1024 +msgid "Replace specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1032 +msgid "When specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1040 +msgid "Priority specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1045 +msgid "Priority must be positive nonzero setting it to 10.\n" +msgstr "" + +#: src/dird/ua_run.c:1055 +msgid "Verify Job specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1087 +msgid "Migration Job specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1095 +msgid "Pool specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1111 +msgid "Restore Client specified twice.\n" +msgstr "" + +#: src/dird/ua_run.c:1135 +#, c-format +msgid "Invalid keyword: %s\n" +msgstr "" + +#: src/dird/ua_run.c:1146 +#, c-format +msgid "Catalog \"%s\" not found\n" +msgstr "" + +#: src/dird/ua_run.c:1150 +#, c-format +msgid "No authorization. Catalog \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1161 +#, c-format +msgid "Job \"%s\" not found\n" +msgstr "" + +#: src/dird/ua_run.c:1168 +msgid "A job name must be specified.\n" +msgstr "" + +#: src/dird/ua_run.c:1174 +#, c-format +msgid "No authorization. Job \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1182 +#, c-format +msgid "Pool \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1192 +#, c-format +msgid "No authorization. Pool \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1202 +#, c-format +msgid "Storage \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1214 +#, c-format +msgid "No authorization. Storage \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1234 src/dird/ua_run.c:1254 +#, c-format +msgid "No authorization. Client \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1244 +#, c-format +msgid "Restore Client \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1264 +#, c-format +msgid "FileSet \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1273 +#, c-format +msgid "No authorization. FileSet \"%s\".\n" +msgstr "" + +#: src/dird/ua_run.c:1281 +#, c-format +msgid "Verify Job \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_run.c:1291 +#, c-format +msgid "Migration Job \"%s\" not found.\n" +msgstr "" + +#: src/dird/ua_select.c:55 +#, c-format +msgid "The current %s retention period is: %s\n" +msgstr "" + +#: src/dird/ua_select.c:60 +msgid "Continue? (yes/mod/no): " +msgstr "" + +#: src/dird/ua_select.c:64 +msgid "Enter new retention period: " +msgstr "" + +#: src/dird/ua_select.c:68 +msgid "Invalid period.\n" +msgstr "" + +#: src/dird/ua_select.c:144 +msgid "You have the following choices:\n" +msgstr "" + +#: src/dird/ua_select.c:160 +msgid "The defined Storage resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:168 +msgid "Select Storage resource" +msgstr "" + +#: src/dird/ua_select.c:224 +msgid "You must specify a \"use \" command before continuing.\n" +msgstr "" + +#: src/dird/ua_select.c:230 +msgid "The defined Catalog resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:238 +msgid "Catalog" +msgstr "" + +#: src/dird/ua_select.c:238 +msgid "Select Catalog resource" +msgstr "" + +#: src/dird/ua_select.c:255 +msgid "The defined Job resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:263 +msgid "Select Job resource" +msgstr "" + +#: src/dird/ua_select.c:278 +msgid "The defined Restore Job resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:286 +msgid "Select Restore Job" +msgstr "" + +#: src/dird/ua_select.c:303 +msgid "The defined Client resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:311 +msgid "Select Client (File daemon) resource" +msgstr "" + +#: src/dird/ua_select.c:338 +#, c-format +msgid "Error: Client resource %s does not exist.\n" +msgstr "" + +#: src/dird/ua_select.c:363 +#, c-format +msgid "Could not find Client %s: ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:373 src/dird/ua_select.c:427 +#, c-format +msgid "Could not find Client \"%s\": ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:402 +#, c-format +msgid "Error obtaining client ids. ERR=%s\n" +msgstr "" + +#: src/dird/ua_select.c:406 +msgid "No clients defined. You must run a job before using this command.\n" +msgstr "" + +#: src/dird/ua_select.c:410 +msgid "Defined Clients:\n" +msgstr "" + +#: src/dird/ua_select.c:420 +msgid "Select the Client" +msgstr "" + +#: src/dird/ua_select.c:453 src/dird/ua_select.c:477 src/dird/ua_select.c:513 +#, c-format +msgid "Could not find Pool \"%s\": ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:492 +msgid "No pools defined. Use the \"create\" command to create one.\n" +msgstr "" + +#: src/dird/ua_select.c:496 +msgid "Defined Pools:\n" +msgstr "" + +#: src/dird/ua_select.c:506 +msgid "Select the Pool" +msgstr "" + +#: src/dird/ua_select.c:536 +#, c-format +msgid "No access to Pool \"%s\"\n" +msgstr "" + +#: src/dird/ua_select.c:562 +msgid "Enter MediaId or Volume name: " +msgstr "" + +#: src/dird/ua_select.c:588 +msgid "The defined Pool resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:596 +msgid "Select Pool resource" +msgstr "" + +#: src/dird/ua_select.c:631 +msgid "Enter the JobId to select: " +msgstr "" + +#: src/dird/ua_select.c:669 +#, c-format +msgid "Could not find Job \"%s\": ERR=%s" +msgstr "" + +#: src/dird/ua_select.c:749 +#, c-format +msgid "Automatically selected %s: %s\n" +msgstr "" + +#: src/dird/ua_select.c:754 +#, c-format +msgid "Cannot select %s in batch mode.\n" +msgstr "" + +#: src/dird/ua_select.c:772 +#, c-format +msgid "Selection list for \"%s\" is empty!\n" +msgstr "" + +#: src/dird/ua_select.c:778 +#, c-format +msgid "Automatically selected: %s\n" +msgstr "" + +#: src/dird/ua_select.c:790 +msgid "Selection aborted, nothing done.\n" +msgstr "" + +#: src/dird/ua_select.c:795 +#, c-format +msgid "Please enter a number between 1 and %d\n" +msgstr "" + +#: src/dird/ua_select.c:844 +msgid "Storage name given twice.\n" +msgstr "" + +#: src/dird/ua_select.c:861 +#, c-format +msgid "Expecting jobid=nn command, got: %s\n" +msgstr "" + +#: src/dird/ua_select.c:865 +#, c-format +msgid "JobId %s is not running.\n" +msgstr "" + +#: src/dird/ua_select.c:875 +#, c-format +msgid "Expecting job=xxx, got: %s.\n" +msgstr "" + +#: src/dird/ua_select.c:879 src/dird/ua_select.c:891 +#, c-format +msgid "Job \"%s\" is not running.\n" +msgstr "" + +#: src/dird/ua_select.c:887 +#, c-format +msgid "Expecting ujobid=xxx, got: %s.\n" +msgstr "" + +#: src/dird/ua_select.c:907 +#, c-format +msgid "Storage resource \"%s\": not found\n" +msgstr "" + +#: src/dird/ua_select.c:939 +msgid "Enter autochanger drive[0]: " +msgstr "" + +#: src/dird/ua_select.c:960 +msgid "Enter autochanger slot: " +msgstr "" + +#: src/dird/ua_select.c:990 +msgid "Media Types defined in conf file:\n" +msgstr "" + +#: src/dird/ua_select.c:996 +msgid "Media Type" +msgstr "" + +#: src/dird/ua_select.c:996 +msgid "Select the Media Type" +msgstr "" + +#: src/dird/ua_server.c:72 +#, c-format +msgid "Cannot create UA thread: %s\n" +msgstr "" + +#: src/dird/ua_server.c:159 +msgid "You have messages.\n" +msgstr "" + +#: src/dird/ua_status.c:143 +msgid "Status available for:\n" +msgstr "" + +#: src/dird/ua_status.c:149 +msgid "Select daemon type for status" +msgstr "" + +#: src/dird/ua_status.c:267 +#, c-format +msgid "Daemon started %s, 1 Job run since started.\n" +msgstr "" + +#: src/dird/ua_status.c:270 +#, c-format +msgid "Daemon started %s, %d Jobs run since started.\n" +msgstr "" + +#: src/dird/ua_status.c:273 src/filed/status.c:129 src/stored/status.c:90 +#, c-format +msgid " Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n" +msgstr "" + +#: src/dird/ua_status.c:299 src/dird/ua_status.c:526 src/dird/ua_status.c:702 +#: src/filed/status.c:204 src/filed/status.c:220 src/filed/status.c:282 +msgid "====\n" +msgstr "" + +#: src/dird/ua_status.c:314 +#, c-format +msgid "" +"\n" +"Failed to connect to Storage daemon %s.\n" +"====\n" +msgstr "" + +#: src/dird/ua_status.c:352 +#, c-format +msgid "" +"Failed to connect to Client %s.\n" +"====\n" +msgstr "" + +#: src/dird/ua_status.c:360 +msgid "Connected to file daemon\n" +msgstr "" + +#: src/dird/ua_status.c:375 +msgid "" +"\n" +"Scheduled Jobs:\n" +msgstr "" + +#: src/dird/ua_status.c:376 +msgid "" +"Level Type Pri Scheduled Name Volume\n" +msgstr "" + +#: src/dird/ua_status.c:377 +msgid "===================================================================================\n" +msgstr "" + +#: src/dird/ua_status.c:429 +#, c-format +msgid "%-14s %-8s %3d %-18s %-18s %s\n" +msgstr "" + +#: src/dird/ua_status.c:524 +msgid "No Scheduled Jobs.\n" +msgstr "" + +#: src/dird/ua_status.c:541 src/filed/status.c:144 src/stored/status.c:370 +msgid "" +"\n" +"Running Jobs:\n" +msgstr "" + +#: src/dird/ua_status.c:549 +#, c-format +msgid "Console connected at %s\n" +msgstr "" + +#: src/dird/ua_status.c:559 +msgid "" +"No Jobs running.\n" +"====\n" +msgstr "" + +#: src/dird/ua_status.c:564 +msgid " JobId Level Name Status\n" +msgstr "" + +#: src/dird/ua_status.c:565 src/filed/status.c:226 +msgid "======================================================================\n" +msgstr "" + +#: src/dird/ua_status.c:573 +msgid "is waiting execution" +msgstr "" + +#: src/dird/ua_status.c:576 +msgid "is running" +msgstr "" + +#: src/dird/ua_status.c:579 +msgid "is blocked" +msgstr "" + +#: src/dird/ua_status.c:582 +msgid "has terminated" +msgstr "" + +#: src/dird/ua_status.c:585 +msgid "has erred" +msgstr "" + +#: src/dird/ua_status.c:588 +msgid "has errors" +msgstr "" + +#: src/dird/ua_status.c:591 +msgid "has a fatal error" +msgstr "" + +#: src/dird/ua_status.c:594 +msgid "has verify differences" +msgstr "" + +#: src/dird/ua_status.c:597 +msgid "has been canceled" +msgstr "" + +#: src/dird/ua_status.c:602 +msgid "is waiting on Client" +msgstr "" + +#: src/dird/ua_status.c:604 +#, c-format +msgid "is waiting on Client %s" +msgstr "" + +#: src/dird/ua_status.c:612 src/dird/ua_status.c:614 +#, c-format +msgid "is waiting on Storage %s" +msgstr "" + +#: src/dird/ua_status.c:616 +msgid "is waiting on Storage" +msgstr "" + +#: src/dird/ua_status.c:622 +msgid "is waiting on max Storage jobs" +msgstr "" + +#: src/dird/ua_status.c:625 +msgid "is waiting on max Client jobs" +msgstr "" + +#: src/dird/ua_status.c:628 +msgid "is waiting on max Job jobs" +msgstr "" + +#: src/dird/ua_status.c:631 +msgid "is waiting on max total jobs" +msgstr "" + +#: src/dird/ua_status.c:634 +msgid "is waiting for its start time" +msgstr "" + +#: src/dird/ua_status.c:637 +msgid "is waiting for higher priority jobs to finish" +msgstr "" + +#: src/dird/ua_status.c:642 +#, c-format +msgid "is in unknown state %c" +msgstr "" + +#: src/dird/ua_status.c:656 +msgid "is waiting for a mount request" +msgstr "" + +#: src/dird/ua_status.c:663 +msgid "is waiting for an appendable Volume" +msgstr "" + +#: src/dird/ua_status.c:671 +msgid "is waiting for Client to connect to Storage daemon" +msgstr "" + +#: src/dird/ua_status.c:673 +#, c-format +msgid "is waiting for Client %s to connect to Storage %s" +msgstr "" + +#: src/dird/ua_status.c:690 +#, c-format +msgid "%6d %-6s %-20s %s\n" +msgstr "" + +#: src/dird/ua_status.c:712 +msgid "No Terminated Jobs.\n" +msgstr "" + +#: src/dird/ua_status.c:717 src/filed/status.c:216 src/stored/status.c:482 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" + +#: src/dird/ua_status.c:718 src/filed/status.c:224 src/stored/status.c:489 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/dird/ua_status.c:719 +msgid "====================================================================\n" +msgstr "" + +#: src/dird/ua_status.c:750 src/filed/status.c:246 src/lib/util.c:179 +#: src/stored/status.c:511 +msgid "Created" +msgstr "" + +#: src/dird/ua_status.c:754 src/filed/status.c:250 src/lib/util.c:192 +#: src/lib/util.c:259 src/stored/status.c:515 +msgid "Error" +msgstr "" + +#: src/dird/ua_status.c:757 src/filed/status.c:253 src/stored/status.c:518 +msgid "Diffs" +msgstr "" + +#: src/dird/ua_status.c:763 src/filed/status.c:259 src/lib/util.c:188 +#: src/lib/util.c:255 src/stored/btape.c:1189 src/stored/status.c:524 +msgid "OK" +msgstr "" + +#: src/dird/ua_status.c:766 src/filed/status.c:262 src/stored/status.c:527 +msgid "Other" +msgstr "" + +#: src/dird/ua_status.c:769 src/filed/status.c:273 src/stored/status.c:538 +#, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "" + +#: src/dird/ua_status.c:777 src/stored/btape.c:195 +msgid "\n" +msgstr "" + +#: src/dird/ua_tree.c:72 +msgid "change current directory" +msgstr "" + +#: src/dird/ua_tree.c:73 +msgid "count marked files in and below the cd" +msgstr "" + +#: src/dird/ua_tree.c:74 src/dird/ua_tree.c:75 +msgid "long list current directory, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:76 +msgid "leave file selection mode" +msgstr "" + +#: src/dird/ua_tree.c:77 +msgid "estimate restore size" +msgstr "" + +#: src/dird/ua_tree.c:78 +msgid "same as done command" +msgstr "" + +#: src/dird/ua_tree.c:79 +msgid "find files, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:80 src/dird/ua_tree.c:90 +msgid "print help" +msgstr "" + +#: src/dird/ua_tree.c:81 +msgid "list current directory, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:82 +msgid "list the marked files in and below the cd" +msgstr "" + +#: src/dird/ua_tree.c:83 +msgid "mark dir/file to be restored recursively, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:84 +msgid "mark directory name to be restored (no files)" +msgstr "" + +#: src/dird/ua_tree.c:85 src/dird/ua_tree.c:86 +msgid "print current working directory" +msgstr "" + +#: src/dird/ua_tree.c:87 +msgid "unmark dir/file to be restored recursively in dir" +msgstr "" + +#: src/dird/ua_tree.c:88 +msgid "unmark directory name only no recursion" +msgstr "" + +#: src/dird/ua_tree.c:89 +msgid "quit and do not do restore" +msgstr "" + +#: src/dird/ua_tree.c:110 +msgid "" +"\n" +"You are now entering file selection mode where you add (mark) and\n" +"remove (unmark) files to be restored. No files are initially added, unless\n" +"you used the \"all\" keyword on the command line.\n" +"Enter \"done\" to leave this mode.\n" +"\n" +msgstr "" + +#: src/dird/ua_tree.c:120 src/dird/ua_tree.c:716 +#, c-format +msgid "cwd is: %s\n" +msgstr "" + +#: src/dird/ua_tree.c:129 src/dird/ua_tree.c:144 +msgid "Invalid command. Enter \"done\" to exit.\n" +msgstr "" + +#: src/dird/ua_tree.c:331 src/dird/ua_tree.c:343 src/dird/ua_tree.c:360 +msgid "No files marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:345 +msgid "1 file marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:347 +#, c-format +msgid "%s files marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:375 +msgid "No directories marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:377 +msgid "1 directory marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:379 +#, c-format +msgid "%s directories marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:400 +#, c-format +msgid "%s total files/dirs. %s marked to be restored.\n" +msgstr "" + +#: src/dird/ua_tree.c:411 +msgid "No file specification given.\n" +msgstr "" + +#: src/dird/ua_tree.c:562 +#, c-format +msgid "Node %s has no children.\n" +msgstr "" + +#: src/dird/ua_tree.c:653 +#, c-format +msgid "%d total files; %d marked to be restored; %s bytes.\n" +msgstr "" + +#: src/dird/ua_tree.c:687 +msgid "Too few or too many arguments. Try using double quotes.\n" +msgstr "" + +#: src/dird/ua_tree.c:699 +msgid "Invalid path given.\n" +msgstr "" + +#: src/dird/ua_tree.c:735 src/dird/ua_tree.c:747 +msgid "No files unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:749 +msgid "1 file unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:752 +#, c-format +msgid "%s files unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:763 src/dird/ua_tree.c:780 +msgid "No directories unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:782 +msgid "1 directory unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:784 +#, c-format +msgid "%d directories unmarked.\n" +msgstr "" + +#: src/dird/ua_update.c:89 +msgid "Update choice:\n" +msgstr "" + +#: src/dird/ua_update.c:90 +msgid "Volume parameters" +msgstr "" + +#: src/dird/ua_update.c:91 +msgid "Pool from resource" +msgstr "" + +#: src/dird/ua_update.c:92 +msgid "Slots from autochanger" +msgstr "" + +#: src/dird/ua_update.c:93 +msgid "item" +msgstr "" + +#: src/dird/ua_update.c:93 +msgid "Choose catalog item to update" +msgstr "" + +#: src/dird/ua_update.c:133 +#, c-format +msgid "Invalid VolStatus specified: %s\n" +msgstr "" + +#: src/dird/ua_update.c:142 +#, c-format +msgid "New Volume status is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:152 +#, c-format +msgid "Invalid retention period specified: %s\n" +msgstr "" + +#: src/dird/ua_update.c:160 +#, c-format +msgid "New retention period is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:171 +#, c-format +msgid "Invalid use duration specified: %s\n" +msgstr "" + +#: src/dird/ua_update.c:179 +#, c-format +msgid "New use duration is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:193 +#, c-format +msgid "New max jobs is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:206 +#, c-format +msgid "New max files is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:217 +#, c-format +msgid "Invalid max. bytes specification: %s\n" +msgstr "" + +#: src/dird/ua_update.c:225 +#, c-format +msgid "New Max bytes is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:236 src/dird/ua_update.c:256 +msgid "Invalid value. It must be yes or no.\n" +msgstr "" + +#: src/dird/ua_update.c:244 +#, c-format +msgid "New Recycle flag is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:264 +#, c-format +msgid "New InChanger flag is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:282 +#, c-format +msgid "Invalid slot, it must be between 0 and MaxVols=%d\n" +msgstr "" + +#: src/dird/ua_update.c:291 src/dird/ua_update.c:637 +#, c-format +msgid "Error updating media record Slot: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:293 +#, c-format +msgid "New Slot is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:318 +#, c-format +msgid "New Pool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:352 +#, c-format +msgid "New RecyclePool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:372 +#, c-format +msgid "Error updating Volume record: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:374 +#, c-format +msgid "Volume defaults updated from \"%s\" Pool record.\n" +msgstr "" + +#: src/dird/ua_update.c:398 +#, c-format +msgid "Error updating Volume records: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:400 +#, c-format +msgid "All Volume defaults updated from \"%s\" Pool record.\n" +msgstr "" + +#: src/dird/ua_update.c:412 +#, c-format +msgid "Error updating media record Enabled: ERR=%s" +msgstr "" + +#: src/dird/ua_update.c:414 +#, c-format +msgid "New Enabled is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:519 +msgid "Volume Status" +msgstr "" + +#: src/dird/ua_update.c:520 +msgid "Volume Retention Period" +msgstr "" + +#: src/dird/ua_update.c:521 +msgid "Volume Use Duration" +msgstr "" + +#: src/dird/ua_update.c:522 +msgid "Maximum Volume Jobs" +msgstr "" + +#: src/dird/ua_update.c:523 +msgid "Maximum Volume Files" +msgstr "" + +#: src/dird/ua_update.c:524 +msgid "Maximum Volume Bytes" +msgstr "" + +#: src/dird/ua_update.c:525 +msgid "Recycle Flag" +msgstr "" + +#: src/dird/ua_update.c:526 +msgid "Slot" +msgstr "" + +#: src/dird/ua_update.c:527 +msgid "InChanger Flag" +msgstr "" + +#: src/dird/ua_update.c:528 +msgid "Volume Files" +msgstr "" + +#: src/dird/ua_update.c:530 +msgid "Volume from Pool" +msgstr "" + +#: src/dird/ua_update.c:531 +msgid "All Volumes from Pool" +msgstr "" + +#: src/dird/ua_update.c:532 +msgid "Enabled" +msgstr "" + +#: src/dird/ua_update.c:533 +msgid "RecyclePool" +msgstr "" + +#: src/dird/ua_update.c:534 +msgid "Done" +msgstr "" + +#: src/dird/ua_update.c:542 +#, c-format +msgid "Updating Volume \"%s\"\n" +msgstr "" + +#: src/dird/ua_update.c:547 +#, c-format +msgid "Current Volume status is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:548 +msgid "Possible Values are:\n" +msgstr "" + +#: src/dird/ua_update.c:559 +msgid "Choose new Volume Status" +msgstr "" + +#: src/dird/ua_update.c:565 +#, c-format +msgid "Current retention period is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:567 +msgid "Enter Volume Retention period: " +msgstr "" + +#: src/dird/ua_update.c:574 +#, c-format +msgid "Current use duration is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:576 +msgid "Enter Volume Use Duration: " +msgstr "" + +#: src/dird/ua_update.c:583 +#, c-format +msgid "Current max jobs is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:584 +msgid "Enter new Maximum Jobs: " +msgstr "" + +#: src/dird/ua_update.c:591 +#, c-format +msgid "Current max files is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:592 +msgid "Enter new Maximum Files: " +msgstr "" + +#: src/dird/ua_update.c:599 +#, c-format +msgid "Current value is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:600 +msgid "Enter new Maximum Bytes: " +msgstr "" + +#: src/dird/ua_update.c:608 +#, c-format +msgid "Current recycle flag is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:610 +msgid "Enter new Recycle status: " +msgstr "" + +#: src/dird/ua_update.c:617 +#, c-format +msgid "Current Slot is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:618 +msgid "Enter new Slot: " +msgstr "" + +#: src/dird/ua_update.c:625 +#, c-format +msgid "Current InChanger flag is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:626 +#, c-format +msgid "Set InChanger flag for Volume \"%s\": yes/no: " +msgstr "" + +#: src/dird/ua_update.c:639 +#, c-format +msgid "New InChanger flag is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:646 +msgid "" +"Warning changing Volume Files can result\n" +"in loss of data on your Volume\n" +"\n" +msgstr "" + +#: src/dird/ua_update.c:648 +#, c-format +msgid "Current Volume Files is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:649 +msgid "Enter new number of Files for Volume: " +msgstr "" + +#: src/dird/ua_update.c:654 +msgid "Normally, you should only increase Volume Files by one!\n" +msgstr "" + +#: src/dird/ua_update.c:655 +msgid "Increase Volume Files? (yes/no): " +msgstr "" + +#: src/dird/ua_update.c:665 +#, c-format +msgid "New Volume Files is: %u\n" +msgstr "" + +#: src/dird/ua_update.c:677 +#, c-format +msgid "Current Pool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:678 +msgid "Enter new Pool name: " +msgstr "" + +#: src/dird/ua_update.c:695 +#, c-format +msgid "Current Enabled is: %d\n" +msgstr "" + +#: src/dird/ua_update.c:696 +msgid "Enter new Enabled: " +msgstr "" + +#: src/dird/ua_update.c:715 +#, c-format +msgid "Current RecyclePool is: %s\n" +msgstr "" + +#: src/dird/ua_update.c:717 +msgid "No current RecyclePool\n" +msgstr "" + +#: src/dird/ua_update.c:719 +msgid "Enter new RecyclePool name: " +msgstr "" + +#: src/dird/ua_update.c:726 +msgid "Selection terminated.\n" +msgstr "" + +#: src/dird/ua_update.c:760 +#, c-format +msgid "db_update_pool_record returned %d. ERR=%s\n" +msgstr "" + +#: src/dird/ua_update.c:767 +msgid "Pool DB record updated from resource.\n" +msgstr "" + +#: src/dird/ua_update.c:794 +msgid "Expect JobId keyword, not found.\n" +msgstr "" + +#: src/dird/ua_update.c:819 +msgid "Neither Client nor StartTime specified.\n" +msgstr "" + +#: src/dird/verify.c:120 +msgid "" +"Unable to find JobId of previous InitCatalog Job.\n" +"Please run a Verify with Level=InitCatalog before\n" +"running the current Job.\n" +msgstr "" + +#: src/dird/verify.c:125 +msgid "Unable to find JobId of previous Job for this client.\n" +msgstr "" + +#: src/dird/verify.c:141 +#, c-format +msgid "Could not get job record for previous Job. ERR=%s" +msgstr "" + +#: src/dird/verify.c:146 +#, c-format +msgid "Last Job %d did not terminate normally. JobStatus=%c\n" +msgstr "" + +#: src/dird/verify.c:150 +#, c-format +msgid "Verifying against JobId=%d Job=%s\n" +msgstr "" + +#: src/dird/verify.c:179 +#, c-format +msgid "Start Verify JobId=%s Level=%s Job=%s\n" +msgstr "" + +#: src/dird/verify.c:263 +msgid "Deprecated feature ... use bootstrap.\n" +msgstr "" + +#: src/dird/verify.c:276 +#, c-format +msgid "Unimplemented Verify level %d(%c)\n" +msgstr "" + +#: src/dird/verify.c:330 +#, c-format +msgid "Unimplemented verify level %d\n" +msgstr "" + +#: src/dird/verify.c:384 +msgid "Verify OK" +msgstr "" + +#: src/dird/verify.c:388 +msgid "*** Verify Error ***" +msgstr "" + +#: src/dird/verify.c:392 +msgid "Verify warnings" +msgstr "" + +#: src/dird/verify.c:395 +msgid "Verify Canceled" +msgstr "" + +#: src/dird/verify.c:398 +msgid "Verify Differences" +msgstr "" + +#: src/dird/verify.c:403 +#, c-format +msgid "Inappropriate term code: %d %c\n" +msgstr "" + +#: src/dird/verify.c:417 +#, c-format +msgid "" +"Bacula %s %s (%s): %s\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Files Expected: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" FD termination status: %s\n" +" SD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/verify.c:452 +#, c-format +msgid "" +"Bacula %s %s (%s): %s\n" +" Build: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" FD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/verify.c:531 +#, c-format +msgid "" +"bird set configuration file to file\n" +" -dnn set debug level to nn\n" +" -n no conio\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/console/console.c:162 +msgid "input from file" +msgstr "" + +#: src/console/console.c:163 +msgid "output to file" +msgstr "" + +#: src/console/console.c:165 +msgid "output to file and terminal" +msgstr "" + +#: src/console/console.c:166 +msgid "sleep specified time" +msgstr "" + +#: src/console/console.c:168 +msgid "print Console's version" +msgstr "" + +#: src/console/console.c:169 +msgid "echo command string" +msgstr "" + +#: src/console/console.c:170 +msgid "execute an external command" +msgstr "" + +#: src/console/console.c:172 +msgid "zed_keys = use zed keys instead of bash keys" +msgstr "" + +#: src/console/console.c:205 +msgid ": is an invalid command\n" +msgstr "" + +#: src/console/console.c:462 +msgid "Available Directors:\n" +msgstr "" + +#: src/console/console.c:466 +#, c-format +msgid "%2d: %s at %s:%d\n" +msgstr "" + +#: src/console/console.c:470 +msgid "Select Director by entering a number: " +msgstr "" + +#: src/console/console.c:475 +#, c-format +msgid "%s is not a number. You must enter a number between 1 and %d\n" +msgstr "" + +#: src/console/console.c:481 +#, c-format +msgid "You must enter a number between 1 and %d\n" +msgstr "" + +#: src/console/console.c:521 src/tray-monitor/tray-monitor.c:905 +#, c-format +msgid "Connecting to Director %s:%d\n" +msgstr "" + +#: src/console/console.c:538 src/gnome2-console/console.c:526 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "" + +#: src/console/console.c:558 src/gnome2-console/console.c:548 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "" + +#: src/console/console.c:588 +msgid "Enter a period to cancel a command.\n" +msgstr "" + +#: src/console/console.c:664 src/gnome2-console/console.c:160 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" + +#: src/console/console.c:673 src/gnome2-console/console.c:169 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/console/console.c:693 src/gnome2-console/console.c:189 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" + +#: src/console/console.c:849 +msgid "Too many arguments on input command.\n" +msgstr "" + +#: src/console/console.c:853 +msgid "First argument to input command must be a filename.\n" +msgstr "" + +#: src/console/console.c:858 +#, c-format +msgid "Cannot open file %s for input. ERR=%s\n" +msgstr "" + +#: src/console/console.c:888 +msgid "Too many arguments on output/tee command.\n" +msgstr "" + +#: src/console/console.c:905 +#, c-format +msgid "Cannot open file %s for output. ERR=%s\n" +msgstr "" + +#: src/console/console.c:924 +msgid "Too many arguments. Enclose command in double quotes.\n" +msgstr "" + +#: src/console/console.c:933 +#, c-format +msgid "Cannot popen(\"%s\", \"r\"): ERR=%s\n" +msgstr "" + +#: src/console/console.c:945 src/stored/autochanger.c:549 +#, c-format +msgid "Autochanger error: ERR=%s\n" +msgstr "" + +#: src/console/console_conf.c:138 +#, c-format +msgid "No record for %d %s\n" +msgstr "" + +#: src/console/console_conf.c:147 +#, c-format +msgid "Console: name=%s rcfile=%s histfile=%s\n" +msgstr "" + +#: src/console/console_conf.c:151 src/gnome2-console/console_conf.c:143 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "" + +#: src/console/console_conf.c:155 src/console/console_conf.c:231 +#: src/console/console_conf.c:276 src/console/console_conf.c:303 +#: src/filed/filed_conf.c:319 src/filed/filed_conf.c:384 +#: src/filed/filed_conf.c:414 src/gnome2-console/console_conf.c:154 +#: src/stored/stored_conf.c:616 src/stored/stored_conf.c:651 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "" + +#: src/filed/acl.c:104 src/filed/acl.c:110 +msgid "ACL support not configured for your machine.\n" +msgstr "" + +#: src/filed/acl.c:186 +#, c-format +msgid "acl_to_text error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:215 +#, c-format +msgid "acl_delete_def_file error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:223 +#, c-format +msgid "acl_from_text error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:237 +#, c-format +msgid "ac_valid error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:252 +#, c-format +msgid "acl_set_file error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:286 +#, c-format +msgid "acltostr error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:303 src/filed/acl.c:311 +#, c-format +msgid "strtoacl error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:323 +#, c-format +msgid "setacl error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:359 +#, c-format +msgid "acltotext error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:376 +#, c-format +msgid "aclfromtext error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/acl.c:388 +#, c-format +msgid "acl(SETACL) error on file \"%s\": ERR=%s\n" +msgstr "" + +#: src/filed/authenticate.c:60 +#, c-format +msgid "I only authenticate directors, not %d\n" +msgstr "" + +#: src/filed/authenticate.c:68 src/stored/authenticate.c:66 +#, c-format +msgid "Bad Hello command from Director at %s. Len=%d.\n" +msgstr "" + +#: src/filed/authenticate.c:80 src/stored/authenticate.c:77 +#, c-format +msgid "Bad Hello command from Director at %s: %s\n" +msgstr "" + +#: src/filed/authenticate.c:92 +#, c-format +msgid "Connection from unknown Director %s at %s rejected.\n" +msgstr "" + +#: src/filed/authenticate.c:132 +#, c-format +msgid "Incorrect password given by Director at %s.\n" +msgstr "" + +#: src/filed/authenticate.c:139 +msgid "" +"Authorization problem: Remote server did not advertize required TLS " +"support.\n" +msgstr "" + +#: src/filed/authenticate.c:193 src/stored/dircmd.c:199 +msgid "Unable to authenticate Director\n" +msgstr "" + +#: src/filed/authenticate.c:244 +msgid "" +"Authorization key rejected by Storage daemon.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/filed/backup.c:86 +msgid "Cannot set buffer size FD->SD.\n" +msgstr "" + +#: src/filed/backup.c:129 src/filed/backup.c:138 +msgid "An error occurred while encrypting the stream.\n" +msgstr "" + +#: src/filed/backup.c:255 +#, c-format +msgid " Recursion turned off. Will not descend from %s into %s\n" +msgstr "" + +#: src/filed/backup.c:262 +#, c-format +msgid " %s is a different filesystem. Will not descend from %s into %s\n" +msgstr "" + +#: src/filed/backup.c:268 +#, c-format +msgid " Disallowed filesystem. Will not descend from %s into %s\n" +msgstr "" + +#: src/filed/backup.c:273 +#, c-format +msgid " Disallowed drive type. Will not descend into %s\n" +msgstr "" + +#: src/filed/backup.c:292 src/filed/verify.c:123 +#, c-format +msgid " Could not access %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:299 src/filed/verify.c:130 +#, c-format +msgid " Could not follow link %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:306 src/filed/verify.c:137 +#, c-format +msgid " Could not stat %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:313 src/filed/verify.c:143 +#, c-format +msgid " Unchanged file skipped: %s\n" +msgstr "" + +#: src/filed/backup.c:316 +#, c-format +msgid " Archive file not saved: %s\n" +msgstr "" + +#: src/filed/backup.c:320 src/filed/verify.c:158 +#, c-format +msgid " Could not open directory %s: ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:326 +#, c-format +msgid " Unknown file type %d; not saved: %s\n" +msgstr "" + +#: src/filed/backup.c:366 src/filed/verify.c:239 +#, c-format +msgid "%s digest initialization failed\n" +msgstr "" + +#: src/filed/backup.c:380 +#, c-format +msgid "%s signature digest initialization failed\n" +msgstr "" + +#: src/filed/backup.c:400 +#, c-format +msgid "Python reader program \"%s\" not found.\n" +msgstr "" + +#: src/filed/backup.c:462 src/filed/verify.c:300 +#, c-format +msgid " Cannot open %s: ERR=%s.\n" +msgstr "" + +#: src/filed/backup.c:499 src/filed/verify.c:314 +#, c-format +msgid " Cannot open resource fork for %s: ERR=%s.\n" +msgstr "" + +#: src/filed/backup.c:556 +msgid "Failed to allocate memory for crypto signature.\n" +msgstr "" + +#: src/filed/backup.c:561 src/filed/backup.c:567 src/filed/backup.c:582 +msgid "An error occurred while signing the stream.\n" +msgstr "" + +#: src/filed/backup.c:606 +msgid "An error occurred finalizing signing the stream.\n" +msgstr "" + +#: src/filed/backup.c:693 +#, c-format +msgid "Compression deflateParams error: %d\n" +msgstr "" + +#: src/filed/backup.c:705 +msgid "Encrypting sparse data not supported.\n" +msgstr "" + +#: src/filed/backup.c:712 +msgid "Failed to initialize encryption context.\n" +msgstr "" + +#: src/filed/backup.c:735 src/filed/backup.c:891 src/filed/backup.c:926 +#: src/filed/backup.c:937 src/filed/backup.c:983 src/filed/backup.c:996 +#: src/filed/backup.c:1004 src/filed/backup.c:1050 src/filed/backup.c:1086 +#, c-format +msgid "Network send error to SD. ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:814 +#, c-format +msgid "Compression deflate error: %d\n" +msgstr "" + +#: src/filed/backup.c:821 +#, c-format +msgid "Compression deflateReset error: %d\n" +msgstr "" + +#: src/filed/backup.c:864 src/filed/backup.c:880 +msgid "Encryption error\n" +msgstr "" + +#: src/filed/backup.c:904 +#, c-format +msgid "Read error on file %s. ERR=%s\n" +msgstr "" + +#: src/filed/backup.c:907 +msgid "Too many errors.\n" +msgstr "" + +#: src/filed/backup.c:917 +msgid "Encryption padding error\n" +msgstr "" + +#: src/filed/backup.c:974 +#, c-format +msgid "Error reading ACL of %s\n" +msgstr "" + +#: src/filed/backup.c:1029 +msgid "Invalid file flags, no supported data stream type.\n" +msgstr "" + +#: src/filed/filed.c:65 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c use as configuration file\n" +" -dnn set debug level to nn\n" +" -f run in foreground (for debugging)\n" +" -g groupid\n" +" -s no signals (for debugging)\n" +" -t test configuration file and exit\n" +" -u userid\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/filed/filed.c:268 +#, c-format +msgid "" +"No File daemon resource defined in %s\n" +"Without that I don't know who I am :-(\n" +msgstr "" + +#: src/filed/filed.c:273 +#, c-format +msgid "Only one Client resource permitted in %s\n" +msgstr "" + +#: src/filed/filed.c:296 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"File daemon in %s.\n" +msgstr "" + +#: src/filed/filed.c:320 +msgid "PKI encryption/signing enabled but not compiled into Bacula.\n" +msgstr "" + +#: src/filed/filed.c:331 +#, c-format +msgid "" +"\"PKI Key Pair\" must be defined for File daemon \"%s\" in %s if either " +"\"PKI Sign\" or \"PKI Encrypt\" are enabled.\n" +msgstr "" + +#: src/filed/filed.c:343 src/filed/filed.c:374 src/filed/filed.c:415 +msgid "Failed to allocate a new keypair object.\n" +msgstr "" + +#: src/filed/filed.c:347 +#, c-format +msgid "Failed to load public certificate for File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/filed/filed.c:353 +#, c-format +msgid "Failed to load private key for File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/filed/filed.c:383 +#, c-format +msgid "Failed to load private key from file %s for File daemon \"%s\" in %s.\n" +msgstr "" + +#: src/filed/filed.c:390 +#, c-format +msgid "" +"Failed to load trusted signer certificate from file %s for File daemon \"%s" +"\" in %s.\n" +msgstr "" + +#: src/filed/filed.c:421 +#, c-format +msgid "" +"Failed to load master key certificate from file %s for File daemon \"%s\" in " +"%s.\n" +msgstr "" + +#: src/filed/filed.c:437 +#, c-format +msgid "No Director resource defined in %s\n" +msgstr "" + +#: src/filed/job.c:373 +#, c-format +msgid "2901 Job %s not found.\n" +msgstr "" + +#: src/filed/job.c:382 +#, c-format +msgid "2001 Job %s marked to be canceled.\n" +msgstr "" + +#: src/filed/job.c:385 +msgid "2902 Error scanning cancel command.\n" +msgstr "" + +#: src/filed/job.c:404 +#, c-format +msgid "2991 Bad setdebug command: %s\n" +msgstr "" + +#: src/filed/job.c:420 +#, c-format +msgid "Bad estimate command: %s" +msgstr "" + +#: src/filed/job.c:421 +msgid "2992 Bad estimate command.\n" +msgstr "" + +#: src/filed/job.c:444 +#, c-format +msgid "Bad Job Command: %s" +msgstr "" + +#: src/filed/job.c:465 +#, c-format +msgid "Bad RunBeforeJob command: %s\n" +msgstr "" + +#: src/filed/job.c:466 src/filed/job.c:484 +msgid "2905 Bad RunBeforeJob command.\n" +msgstr "" + +#: src/filed/job.c:495 +msgid "2905 Bad RunBeforeNow command.\n" +msgstr "" + +#: src/filed/job.c:514 +#, c-format +msgid "Bad RunAfter command: %s\n" +msgstr "" + +#: src/filed/job.c:515 +msgid "2905 Bad RunAfterJob command.\n" +msgstr "" + +#: src/filed/job.c:549 +#, c-format +msgid "Bad RunScript command: %s\n" +msgstr "" + +#: src/filed/job.c:550 +msgid "2905 Bad RunScript command.\n" +msgstr "" + +#: src/filed/job.c:652 +#, c-format +msgid "Error running program: %s. stat=%d: ERR=%s\n" +msgstr "" + +#: src/filed/job.c:662 +#, c-format +msgid "Cannot open FileSet input file: %s. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:758 +#, c-format +msgid "REGEX %s compile error. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:820 +#, c-format +msgid "Invalid FileSet command: %s\n" +msgstr "" + +#: src/filed/job.c:1054 src/findlib/match.c:199 src/tools/testfind.c:635 +#, c-format +msgid "Unknown include/exclude option: %c\n" +msgstr "" + +#: src/filed/job.c:1123 src/stored/fd_cmds.c:355 +#, c-format +msgid "Could not create bootstrap file %s: ERR=%s\n" +msgstr "" + +#: src/filed/job.c:1232 +#, c-format +msgid "DIR and FD clocks differ by %d seconds, FD automatically adjusting.\n" +msgstr "" + +#: src/filed/job.c:1240 +#, c-format +msgid "Unknown backup level: %s\n" +msgstr "" + +#: src/filed/job.c:1252 +#, c-format +msgid "Bad level command: %s\n" +msgstr "" + +#: src/filed/job.c:1273 +#, c-format +msgid "Bad session command: %s" +msgstr "" + +#: src/filed/job.c:1294 +#, c-format +msgid "Bad storage command: %s" +msgstr "" + +#: src/filed/job.c:1303 +#, c-format +msgid "Failed to connect to Storage daemon: %s:%d\n" +msgstr "" + +#: src/filed/job.c:1315 +msgid "Failed to authenticate Storage daemon.\n" +msgstr "" + +#: src/filed/job.c:1353 +msgid "Cannot contact Storage daemon\n" +msgstr "" + +#: src/filed/job.c:1371 +#, c-format +msgid "Bad response to append open: %s\n" +msgstr "" + +#: src/filed/job.c:1376 +msgid "Bad response from stored to open command\n" +msgstr "" + +#: src/filed/job.c:1403 +#, c-format +msgid "Generate VSS snapshots. Driver=\"%s\", Drive(s)=\"%s\"\n" +msgstr "" + +#: src/filed/job.c:1405 +msgid "Generate VSS snapshots failed.\n" +msgstr "" + +#: src/filed/job.c:1412 +#, c-format +msgid "" +"Generate VSS snapshot of drive \"%c:\\\" failed. VSS support is disabled on " +"this drive.\n" +msgstr "" + +#: src/filed/job.c:1419 +#, c-format +msgid "VSS Writer (PrepareForBackup): %s\n" +msgstr "" + +#: src/filed/job.c:1424 +msgid "No drive letters found for generating VSS snapshots.\n" +msgstr "" + +#: src/filed/job.c:1428 +#, c-format +msgid "VSS was not initialized properly. VSS support is disabled. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:1478 +msgid "Append Close with SD failed.\n" +msgstr "" + +#: src/filed/job.c:1482 +#, c-format +msgid "Bad status %d returned from Storage Daemon.\n" +msgstr "" + +#: src/filed/job.c:1500 +#, c-format +msgid "VSS Writer (BackupComplete): %s\n" +msgstr "" + +#: src/filed/job.c:1528 +#, c-format +msgid "2994 Bad verify command: %s\n" +msgstr "" + +#: src/filed/job.c:1543 src/filed/job.c:1582 +#, c-format +msgid "2994 Bad verify level: %s\n" +msgstr "" + +#: src/filed/job.c:1626 +#, c-format +msgid "Bad replace command. CMD=%s\n" +msgstr "" + +#: src/filed/job.c:1644 +#, c-format +msgid "Bad where regexp. where=%s\n" +msgstr "" + +#: src/filed/job.c:1718 +msgid "Improper calling sequence.\n" +msgstr "" + +#: src/filed/job.c:1738 +#, c-format +msgid "Bad response to SD read open: %s\n" +msgstr "" + +#: src/filed/job.c:1743 +msgid "Bad response from stored to read open command\n" +msgstr "" + +#: src/filed/job.c:1807 +#, c-format +msgid "Comm error with SD. bad response to %s. ERR=%s\n" +msgstr "" + +#: src/filed/job.c:1810 +#, c-format +msgid "Bad response to %s command. Wanted %s, got %s\n" +msgstr "" + +#: src/filed/pythonfd.c:157 src/stored/pythonsd.c:162 +#, c-format +msgid "Cannot delete attribute %s" +msgstr "" + +#: src/filed/pythonfd.c:175 src/filed/pythonfd.c:191 src/stored/pythonsd.c:195 +#, c-format +msgid "Cannot find attribute %s" +msgstr "" + +#: src/filed/restore.c:133 +#, c-format +msgid "Size of data or stream of %s not correct. Original %s, restored %s.\n" +msgstr "" + +#: src/filed/restore.c:255 src/filed/verify_vol.c:99 +#, c-format +msgid "Record header scan error: %s\n" +msgstr "" + +#: src/filed/restore.c:263 src/filed/verify_vol.c:108 +#, c-format +msgid "Data record error. ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:267 src/filed/verify_vol.c:112 +#, c-format +msgid "Actual data size %d not same as header %d\n" +msgstr "" + +#: src/filed/restore.c:294 src/filed/restore.c:639 +msgid "Logic error: output file should be open\n" +msgstr "" + +#: src/filed/restore.c:315 src/filed/restore.c:652 +msgid "Logic error: output file should not be open\n" +msgstr "" + +#: src/filed/restore.c:326 src/filed/verify_vol.c:151 +#: src/stored/bextract.c:302 src/stored/bls.c:383 src/stored/bscan.c:663 +#, c-format +msgid "Record header file index %ld not equal record index %ld\n" +msgstr "" + +#: src/filed/restore.c:339 src/stored/bextract.c:311 +#, c-format +msgid "%s stream not supported on this Client.\n" +msgstr "" + +#: src/filed/restore.c:391 +msgid "Unexpected cryptographic session data stream.\n" +msgstr "" + +#: src/filed/restore.c:399 +msgid "" +"No private decryption keys have been defined to decrypt encrypted backup " +"data.\n" +msgstr "" + +#: src/filed/restore.c:410 +msgid "Could not create digest.\n" +msgstr "" + +#: src/filed/restore.c:424 +msgid "Missing private key required to decrypt encrypted backup data.\n" +msgstr "" + +#: src/filed/restore.c:427 +msgid "Decrypt of the session key failed.\n" +msgstr "" + +#: src/filed/restore.c:431 +#, c-format +msgid "An error occurred while decoding encrypted session data stream: %s\n" +msgstr "" + +#: src/filed/restore.c:480 src/filed/restore.c:526 +#, c-format +msgid "Missing encryption session data stream for %s\n" +msgstr "" + +#: src/filed/restore.c:488 src/filed/restore.c:533 +#, c-format +msgid "Failed to initialize decryption context for %s\n" +msgstr "" + +#: src/filed/restore.c:545 +#, c-format +msgid " Cannot open resource fork for %s.\n" +msgstr "" + +#: src/filed/restore.c:571 +#, c-format +msgid " Invalid length of Finder Info (got %d, not 32)\n" +msgstr "" + +#: src/filed/restore.c:575 +#, c-format +msgid " Could not set Finder Info on %s\n" +msgstr "" + +#: src/filed/restore.c:588 +#, c-format +msgid "Can't restore ACL of %s\n" +msgstr "" + +#: src/filed/restore.c:600 +#, c-format +msgid "Can't restore default ACL of %s\n" +msgstr "" + +#: src/filed/restore.c:610 +msgid "Unexpected cryptographic signature data stream.\n" +msgstr "" + +#: src/filed/restore.c:616 +#, c-format +msgid "Failed to decode message signature for %s\n" +msgstr "" + +#: src/filed/restore.c:655 src/stored/bextract.c:469 +#, c-format +msgid "Unknown stream=%d ignored. This shouldn't happen!\n" +msgstr "" + +#: src/filed/restore.c:732 +#, c-format +msgid "" +"%d non-supported data streams and %d non-supported attrib streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:736 +#, c-format +msgid "%d non-supported resource fork streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:739 +#, c-format +msgid "%d non-supported Finder Info streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:742 +#, c-format +msgid "%d non-supported acl streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:745 +#, c-format +msgid "%d non-supported crypto streams ignored.\n" +msgstr "" + +#: src/filed/restore.c:757 +msgid "None" +msgstr "" + +#: src/filed/restore.c:761 +msgid "Zlib errno" +msgstr "" + +#: src/filed/restore.c:763 +msgid "Zlib stream error" +msgstr "" + +#: src/filed/restore.c:765 +msgid "Zlib data error" +msgstr "" + +#: src/filed/restore.c:767 +msgid "Zlib memory error" +msgstr "" + +#: src/filed/restore.c:769 +msgid "Zlib buffer error" +msgstr "" + +#: src/filed/restore.c:771 +msgid "Zlib version error" +msgstr "" + +#: src/filed/restore.c:773 src/lib/util.c:591 src/lib/util.c:601 +#: src/lib/util.c:609 src/lib/util.c:616 src/lib/util.c:623 src/lib/util.c:637 +#: src/lib/util.c:647 src/lib/util.c:654 src/lib/util.c:665 +msgid "*none*" +msgstr "" + +#: src/filed/restore.c:809 +#, c-format +msgid "Missing cryptographic signature for %s\n" +msgstr "" + +#: src/filed/restore.c:838 src/filed/restore.c:862 +#, c-format +msgid "Signature validation failed for file %s: ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:852 +#, c-format +msgid "Digest one file failed for file: %s\n" +msgstr "" + +#: src/filed/restore.c:883 +#, c-format +msgid "Signature validation failed for %s: %s\n" +msgstr "" + +#: src/filed/restore.c:909 src/stored/bextract.c:400 +#, c-format +msgid "Seek to %s error on %s: ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:936 +#, c-format +msgid "Uncompression error on file %s. ERR=%s\n" +msgstr "" + +#: src/filed/restore.c:945 src/stored/bextract.c:434 +msgid "GZIP data stream found, but GZIP not configured!\n" +msgstr "" + +#: src/filed/restore.c:968 +#, c-format +msgid "Write error in Win32 Block Decomposition on %s: %s\n" +msgstr "" + +#: src/filed/restore.c:974 src/stored/bextract.c:372 src/stored/bextract.c:422 +#, c-format +msgid "Write error on %s: %s\n" +msgstr "" + +#: src/filed/restore.c:1023 +msgid "Decryption error\n" +msgstr "" + +#: src/filed/restore.c:1113 +#, c-format +msgid "Decryption error. buf_len=%d decrypt_len=%d on file %s\n" +msgstr "" + +#: src/filed/restore.c:1217 +msgid "Open File Manager paused\n" +msgstr "" + +#: src/filed/restore.c:1221 +msgid "FAILED to pause Open File Manager\n" +msgstr "" + +#: src/filed/restore.c:1229 +#, c-format +msgid "Running as '%s'. Privmask=%#08x\n" +msgstr "" + +#: src/filed/restore.c:1231 +msgid "Failed to retrieve current UserName\n" +msgstr "" + +#: src/filed/status.c:77 +#, c-format +msgid "%s Version: %s (%s) %s %s %s %s\n" +msgstr "" + +#: src/filed/status.c:81 src/stored/status.c:86 +#, c-format +msgid "Daemon started %s, %d Job%s run since started.\n" +msgstr "" + +#: src/filed/status.c:136 +#, c-format +msgid " Sizeof: boffset_t=%d size_t=%d debug=%d trace=%d\n" +msgstr "" + +#: src/filed/status.c:155 +#, c-format +msgid "Director connected at: %s\n" +msgstr "" + +#: src/filed/status.c:157 +#, c-format +msgid "JobId %d Job %s is running.\n" +msgstr "" + +#: src/filed/status.c:160 +#, c-format +msgid " %s%s Job started: %s\n" +msgstr "" + +#: src/filed/status.c:172 +#, c-format +msgid " Files=%s Bytes=%s Bytes/sec=%s Errors=%d\n" +msgstr "" + +#: src/filed/status.c:178 +#, c-format +msgid " Files Examined=%s\n" +msgstr "" + +#: src/filed/status.c:183 +#, c-format +msgid " Processing file: %s\n" +msgstr "" + +#: src/filed/status.c:194 +msgid " SDSocket closed.\n" +msgstr "" + +#: src/filed/status.c:328 src/filed/status.c:352 src/stored/status.c:636 +#: src/stored/status.c:659 +#, c-format +msgid "Bad .status command: %s\n" +msgstr "" + +#: src/filed/status.c:329 +msgid "2900 Bad .status command, missing argument.\n" +msgstr "" + +#: src/filed/status.c:353 +msgid "2900 Bad .status command, wrong argument.\n" +msgstr "" + +#: src/filed/status.c:390 src/stored/status.c:577 +msgid "Init Catalog" +msgstr "" + +#: src/filed/status.c:393 src/stored/status.c:580 +msgid "Volume to Catalog" +msgstr "" + +#: src/filed/status.c:396 src/stored/status.c:583 +msgid "Disk to Catalog" +msgstr "" + +#: src/filed/status.c:399 src/stored/status.c:586 +msgid "Data" +msgstr "" + +#: src/filed/status.c:405 src/lib/util.c:361 src/stored/status.c:592 +msgid "Unknown Job Level" +msgstr "" + +#: src/filed/status.c:421 +msgid "Bacula Client: Idle" +msgstr "" + +#: src/filed/status.c:432 +msgid "Bacula Client: Running" +msgstr "" + +#: src/filed/status.c:446 +msgid "Bacula Client: Last Job Canceled" +msgstr "" + +#: src/filed/status.c:450 +msgid "Bacula Client: Last Job Failed" +msgstr "" + +#: src/filed/status.c:454 +msgid "Bacula Client: Last Job had Warnings" +msgstr "" + +#: src/filed/verify.c:53 +#, c-format +msgid "Cannot malloc %d network read buffer\n" +msgstr "" + +#: src/filed/verify.c:146 +#, c-format +msgid " Archive file skipped: %s\n" +msgstr "" + +#: src/filed/verify.c:149 +#, c-format +msgid " Recursion turned off. Directory skipped: %s\n" +msgstr "" + +#: src/filed/verify.c:153 +#, c-format +msgid " File system change prohibited. Directory skipped: %s\n" +msgstr "" + +#: src/filed/verify.c:163 +#, c-format +msgid " Unknown file type %d: %s\n" +msgstr "" + +#: src/filed/verify.c:206 src/filed/verify_vol.c:209 +#, c-format +msgid "Network error in send to Director: ERR=%s\n" +msgstr "" + +#: src/filed/verify.c:352 +#, c-format +msgid "Error reading file %s: ERR=%s\n" +msgstr "" + +#: src/filed/verify_vol.c:65 +msgid "Storage command not issued before Verify.\n" +msgstr "" + +#: src/filed/verify_vol.c:145 +#, c-format +msgid "Error scanning record header: %s\n" +msgstr "" + +#: src/findlib/attribs.c:408 +#, c-format +msgid "File size of restored file %s not correct. Original %s, restored %s.\n" +msgstr "" + +#: src/findlib/attribs.c:434 src/findlib/attribs.c:441 +#, c-format +msgid "Unable to set file owner %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:447 +#, c-format +msgid "Unable to set file modes %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:457 +#, c-format +msgid "Unable to set file times %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:471 +#, c-format +msgid "Unable to set file flags %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:714 +#, c-format +msgid "Error in %s file %s: ERR=%s\n" +msgstr "" + +#: src/findlib/attribs.c:731 +#, c-format +msgid "Error in %s: ERR=%s\n" +msgstr "" + +#: src/findlib/bfile.c:82 +msgid "Unix attributes" +msgstr "" + +#: src/findlib/bfile.c:84 +msgid "File data" +msgstr "" + +#: src/findlib/bfile.c:86 +msgid "MD5 digest" +msgstr "" + +#: src/findlib/bfile.c:88 +msgid "GZIP data" +msgstr "" + +#: src/findlib/bfile.c:90 +msgid "Extended attributes" +msgstr "" + +#: src/findlib/bfile.c:92 +msgid "Sparse data" +msgstr "" + +#: src/findlib/bfile.c:94 +msgid "GZIP sparse data" +msgstr "" + +#: src/findlib/bfile.c:96 +msgid "Program names" +msgstr "" + +#: src/findlib/bfile.c:98 +msgid "Program data" +msgstr "" + +#: src/findlib/bfile.c:100 +msgid "SHA1 digest" +msgstr "" + +#: src/findlib/bfile.c:102 +msgid "Win32 data" +msgstr "" + +#: src/findlib/bfile.c:104 +msgid "Win32 GZIP data" +msgstr "" + +#: src/findlib/bfile.c:106 +msgid "MacOS Fork data" +msgstr "" + +#: src/findlib/bfile.c:108 +msgid "HFS+ attribs" +msgstr "" + +#: src/findlib/bfile.c:110 +msgid "Standard Unix ACL attribs" +msgstr "" + +#: src/findlib/bfile.c:112 +msgid "Default Unix ACL attribs" +msgstr "" + +#: src/findlib/bfile.c:114 +msgid "SHA256 digest" +msgstr "" + +#: src/findlib/bfile.c:116 +msgid "SHA512 digest" +msgstr "" + +#: src/findlib/bfile.c:118 +msgid "Signed digest" +msgstr "" + +#: src/findlib/bfile.c:120 +msgid "Encrypted File data" +msgstr "" + +#: src/findlib/bfile.c:122 +msgid "Encrypted Win32 data" +msgstr "" + +#: src/findlib/bfile.c:124 +msgid "Encrypted session data" +msgstr "" + +#: src/findlib/bfile.c:126 +msgid "Encrypted GZIP data" +msgstr "" + +#: src/findlib/bfile.c:128 +msgid "Encrypted Win32 GZIP data" +msgstr "" + +#: src/findlib/bfile.c:130 +msgid "Encrypted MacOS fork data" +msgstr "" + +#: src/findlib/create_file.c:123 +#, c-format +msgid "File skipped. Not newer: %s\n" +msgstr "" + +#: src/findlib/create_file.c:130 +#, c-format +msgid "File skipped. Not older: %s\n" +msgstr "" + +#: src/findlib/create_file.c:136 +#, c-format +msgid "File skipped. Already exists: %s\n" +msgstr "" + +#: src/findlib/create_file.c:162 +#, c-format +msgid "File %s already exists and could not be replaced. ERR=%s.\n" +msgstr "" + +#: src/findlib/create_file.c:214 src/findlib/create_file.c:277 +#: src/findlib/create_file.c:370 +#, c-format +msgid "bpkt already open fid=%d\n" +msgstr "" + +#: src/findlib/create_file.c:222 +#, c-format +msgid "Could not create %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:236 +#, c-format +msgid "Cannot make fifo %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:254 +#, c-format +msgid "Cannot make node %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:299 +#, c-format +msgid "Could not symlink %s -> %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:324 src/findlib/create_file.c:335 +#, c-format +msgid "Could not restore file flags for file %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:328 src/findlib/create_file.c:343 +#, c-format +msgid "Could not hard link %s -> %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:339 +#, c-format +msgid "Could not reset file flags for file %s: ERR=%s\n" +msgstr "" + +#: src/findlib/create_file.c:402 +#, c-format +msgid "Original file %s not saved: type=%d\n" +msgstr "" + +#: src/findlib/create_file.c:405 +#, c-format +msgid "Unknown file type %d; not restored: %s\n" +msgstr "" + +#: src/findlib/create_file.c:449 +#, c-format +msgid "Zero length filename: %s\n" +msgstr "" + +#: src/findlib/enable_priv.c:92 +msgid "AdjustTokenPrivileges set " +msgstr "" + +#: src/findlib/find_one.c:228 +#, c-format +msgid "Cannot stat file %s: ERR=%s\n" +msgstr "" + +#: src/findlib/find_one.c:234 +#, c-format +msgid "%s mtime changed during backup.\n" +msgstr "" + +#: src/findlib/find_one.c:240 +#, c-format +msgid "%s ctime changed during backup.\n" +msgstr "" + +#: src/findlib/find_one.c:246 src/findlib/find_one.c:253 +#, c-format +msgid "%s size changed during backup.\n" +msgstr "" + +#: src/findlib/find_one.c:310 +#, c-format +msgid "Top level directory \"%s\" has unlisted fstype \"%s\"\n" +msgstr "" + +#: src/findlib/find_one.c:325 +#, c-format +msgid "Top level directory \"%s\" has an unlisted drive type \"%s\"\n" +msgstr "" + +#: src/findlib/makepath.c:116 +#, c-format +msgid "Cannot create directory %s: ERR=%s\n" +msgstr "" + +#: src/findlib/makepath.c:120 src/findlib/makepath.c:398 +#, c-format +msgid "%s exists but is not a directory\n" +msgstr "" + +#: src/findlib/makepath.c:229 +#, c-format +msgid "%c: is not a valid drive\n" +msgstr "" + +#: src/findlib/makepath.c:296 src/findlib/makepath.c:357 +#: src/findlib/makepath.c:417 +#, c-format +msgid "Cannot change owner and/or group of %s: ERR=%s\n" +msgstr "" + +#: src/findlib/makepath.c:317 +#, c-format +msgid "Cannot chdir to directory, %s: ERR=%s\n" +msgstr "" + +#: src/findlib/makepath.c:372 src/findlib/makepath.c:388 +#: src/findlib/makepath.c:422 +#, c-format +msgid "Cannot change permissions of %s: ERR=%s\n" +msgstr "" + +#: src/findlib/save-cwd.c:48 +#, c-format +msgid "Cannot open current directory: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:62 +#, c-format +msgid "Current directory: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:84 +#, c-format +msgid "Cannot get current directory: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:105 +#, c-format +msgid "Cannot return to %s from %s: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:109 +#, c-format +msgid "Cannot return to saved working directory from %s: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:115 +#, c-format +msgid "Cannot return to %s: %s\n" +msgstr "" + +#: src/findlib/save-cwd.c:119 +#, c-format +msgid "Cannot return to saved working directory: %s\n" +msgstr "" + +#: src/lib/address_conf.c:63 +#, c-format +msgid "Only ipv4 and ipv6 are supported (%d)\n" +msgstr "" + +#: src/lib/address_conf.c:67 +#, c-format +msgid "Only ipv4 is supported (%d)\n" +msgstr "" + +#: src/lib/address_conf.c:176 +#, c-format +msgid "It was tried to assign a ipv6 address to a ipv4(%d)\n" +msgstr "" + +#: src/lib/address_conf.c:185 +#, c-format +msgid "It was tried to assign a ipv4 address to a ipv6(%d)\n" +msgstr "" + +#: src/lib/address_conf.c:264 +#, c-format +msgid "Can't add default address (%s)\n" +msgstr "" + +#: src/lib/address_conf.c:293 +msgid "the old style addresses cannot be mixed with new style" +msgstr "" + +#: src/lib/address_conf.c:314 +#, c-format +msgid "can't resolve service(%s)" +msgstr "" + +#: src/lib/address_conf.c:323 +#, c-format +msgid "can't resolve hostname(%s) %s" +msgstr "" + +#: src/lib/address_conf.c:413 src/lib/address_conf.c:444 +#, c-format +msgid "Expected a block begin { , got: %s" +msgstr "" + +#: src/lib/address_conf.c:418 +msgid "Empty addr block is not allowed" +msgstr "" + +#: src/lib/address_conf.c:422 +#, c-format +msgid "Expected a string, got: %s" +msgstr "" + +#: src/lib/address_conf.c:431 +#, c-format +msgid "Expected a string [ip|ipv4|ipv6], got: %s" +msgstr "" + +#: src/lib/address_conf.c:435 +#, c-format +msgid "Expected a string [ip|ipv4], got: %s" +msgstr "" + +#: src/lib/address_conf.c:440 src/lib/address_conf.c:470 +#, c-format +msgid "Expected a equal =, got: %s" +msgstr "" + +#: src/lib/address_conf.c:451 src/lib/address_conf.c:466 +#, c-format +msgid "Expected a identifier [addr|port], got: %s" +msgstr "" + +#: src/lib/address_conf.c:456 +msgid "Only one port per address block" +msgstr "" + +#: src/lib/address_conf.c:462 +msgid "Only one addr per address block" +msgstr "" + +#: src/lib/address_conf.c:478 +#, c-format +msgid "Expected a number or a string, got: %s" +msgstr "" + +#: src/lib/address_conf.c:484 src/lib/address_conf.c:517 +#, c-format +msgid "Expected an IP number or a hostname, got: %s" +msgstr "" + +#: src/lib/address_conf.c:490 +msgid "State machine missmatch" +msgstr "" + +#: src/lib/address_conf.c:496 src/lib/address_conf.c:508 +#, c-format +msgid "Expected a end of block }, got: %s" +msgstr "" + +#: src/lib/address_conf.c:502 +#, c-format +msgid "Can't add hostname(%s) and port(%s) to addrlist (%s)" +msgstr "" + +#: src/lib/address_conf.c:522 src/lib/address_conf.c:536 +#, c-format +msgid "can't add port (%s) to (%s)" +msgstr "" + +#: src/lib/address_conf.c:531 +#, c-format +msgid "Expected a port number or string, got: %s" +msgstr "" + +#: src/lib/attr.c:76 +#, c-format +msgid "Error scanning attributes: %s\n" +msgstr "" + +#: src/lib/berrno.c:62 +msgid "Child exited normally." +msgstr "" + +#: src/lib/berrno.c:69 +msgid "Unknown error during program execvp" +msgstr "" + +#: src/lib/berrno.c:72 +#, c-format +msgid "Child exited with code %d" +msgstr "" + +#: src/lib/berrno.c:80 +#, c-format +msgid "Child died from signal %d: %s" +msgstr "" + +#: src/lib/berrno.c:86 +msgid "Invalid errno. No error message possible." +msgstr "" + +#: src/lib/bget_msg.c:99 +msgid "Status OK\n" +msgstr "" + +#: src/lib/bget_msg.c:103 +#, c-format +msgid "bget_msg: unknown signal %d\n" +msgstr "" + +#: src/lib/bnet.c:118 +#, c-format +msgid "Attr spool write error. ERR=%s\n" +msgstr "" + +#: src/lib/bnet.c:244 src/lib/bnet.c:283 +msgid "TLS connection initialization failed.\n" +msgstr "" + +#: src/lib/bnet.c:252 +msgid "TLS Negotiation failed.\n" +msgstr "" + +#: src/lib/bnet.c:258 src/lib/bnet.c:298 +msgid "" +"TLS certificate verification failed. Peer certificate did not match a " +"required commonName\n" +msgstr "" + +#: src/lib/bnet.c:305 +#, c-format +msgid "" +"TLS host certificate verification failed. Host %s did not match presented " +"certificate\n" +msgstr "" + +#: src/lib/bnet.c:322 +msgid "TLS enabled but not configured.\n" +msgstr "" + +#: src/lib/bnet.c:328 +msgid "TLS enable but not configured.\n" +msgstr "" + +#: src/lib/bnet.c:386 +msgid "No problem." +msgstr "" + +#: src/lib/bnet.c:389 +msgid "Authoritative answer for host not found." +msgstr "" + +#: src/lib/bnet.c:392 +msgid "Non-authoritative for host not found, or ServerFail." +msgstr "" + +#: src/lib/bnet.c:395 +msgid "Non-recoverable errors, FORMERR, REFUSED, or NOTIMP." +msgstr "" + +#: src/lib/bnet.c:398 +msgid "Valid name, no data record of resquested type." +msgstr "" + +#: src/lib/bnet.c:401 +msgid "Unknown error." +msgstr "" + +#: src/lib/bnet.c:655 +#, c-format +msgid "Unknown sig %d" +msgstr "" + +#: src/lib/bnet_server.c:109 +#, c-format +msgid "Cannot open stream socket. ERR=%s. Current %s All %s\n" +msgstr "" + +#: src/lib/bnet_server.c:122 src/lib/bnet_server.c:275 +#, c-format +msgid "Cannot set SO_REUSEADDR on socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:131 +#, c-format +msgid "Cannot bind port %d: ERR=%s: Retrying ...\n" +msgstr "" + +#: src/lib/bnet_server.c:136 +#, c-format +msgid "Cannot bind port %d: ERR=%s.\n" +msgstr "" + +#: src/lib/bnet_server.c:147 +#, c-format +msgid "Could not init client queue: ERR=%s\n" +msgstr "" + +#: src/lib/bnet_server.c:166 src/lib/bnet_server.c:338 +#, c-format +msgid "Error in select: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:187 src/lib/bnet_server.c:357 +#, c-format +msgid "Connection from %s:%d refused by hosts.access\n" +msgstr "" + +#: src/lib/bnet_server.c:202 src/lib/bnet_server.c:370 +#, c-format +msgid "Cannot set SO_KEEPALIVE on socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:213 +msgid "Could not create client BSOCK.\n" +msgstr "" + +#: src/lib/bnet_server.c:220 +#, c-format +msgid "Could not add job to client queue: ERR=%s\n" +msgstr "" + +#: src/lib/bnet_server.c:237 +#, c-format +msgid "Could not destroy client queue: ERR=%s\n" +msgstr "" + +#: src/lib/bnet_server.c:265 +#, c-format +msgid "Cannot open stream socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:295 +#, c-format +msgid "Cannot bind port %d: ERR=%s: retrying ...\n" +msgstr "" + +#: src/lib/bnet_server.c:301 +msgid "Server socket" +msgstr "" + +#: src/lib/bnet_server.c:301 src/lib/bnet_server.c:398 +msgid "client" +msgstr "" + +#: src/lib/bnet_server.c:386 +#, c-format +msgid "Socket accept error for %s. ERR=%s\n" +msgstr "" + +#: src/lib/bpipe.c:362 src/lib/bpipe.c:452 +msgid "Program killed by Bacula watchdog (timeout)\n" +msgstr "" + +#: src/lib/bsys.c:208 src/lib/bsys.c:225 src/lib/bsys.c:249 src/lib/bsys.c:262 +#, c-format +msgid "Out of memory: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:304 +msgid "Buffer overflow.\n" +msgstr "" + +#: src/lib/bsys.c:370 +msgid "Bad errno" +msgstr "" + +#: src/lib/bsys.c:387 +msgid "Possible mutex deadlock.\n" +msgstr "" + +#: src/lib/bsys.c:391 src/lib/bsys.c:424 +#, c-format +msgid "Mutex lock failure. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:394 +msgid "Possible mutex deadlock resolved.\n" +msgstr "" + +#: src/lib/bsys.c:407 +#, c-format +msgid "Mutex unlock not locked. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:412 src/lib/bsys.c:434 +#, c-format +msgid "Mutex unlock failure. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:448 +#, c-format +msgid "Memset for %d bytes at %s:%d\n" +msgstr "" + +#: src/lib/bsys.c:478 +#, c-format +msgid "Cannot open pid file. %s ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:493 +#, c-format +msgid "" +"%s is already running. pid=%d\n" +"Check file %s\n" +msgstr "" + +#: src/lib/bsys.c:507 +#, c-format +msgid "Could not open pid file. %s ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:615 +#, c-format +msgid "Could not create state file. %s ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:634 +#, c-format +msgid "Write final hdr error: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:670 +#, c-format +msgid "Could not find userid=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:676 +#, c-format +msgid "Could not find password entry. ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:689 +#, c-format +msgid "Could not find group=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:697 +#, c-format +msgid "Could not initgroups for group=%s, userid=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:700 +#, c-format +msgid "Could not initgroups for userid=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:707 +#, c-format +msgid "Could not set group=%s: ERR=%s\n" +msgstr "" + +#: src/lib/bsys.c:713 +#, c-format +msgid "Could not set specified userid: %s\n" +msgstr "" + +#: src/lib/btimers.c:254 +msgid "stop_btimer called with NULL btimer_id\n" +msgstr "" + +#: src/lib/cram-md5.c:109 src/lib/cram-md5.c:137 +msgid "1999 Authorization failed.\n" +msgstr "" + +#: src/lib/crypto.c:435 +msgid "Unable to open certificate file" +msgstr "" + +#: src/lib/crypto.c:442 +msgid "Unable to read certificate from file" +msgstr "" + +#: src/lib/crypto.c:448 +msgid "Unable to extract public key from certificate" +msgstr "" + +#: src/lib/crypto.c:455 +msgid "" +"Provided certificate does not include the required subjectKeyIdentifier " +"extension." +msgstr "" + +#: src/lib/crypto.c:462 +#, c-format +msgid "Unsupported key type provided: %d\n" +msgstr "" + +#: src/lib/crypto.c:499 src/lib/crypto.c:547 +msgid "Unable to open private key file" +msgstr "" + +#: src/lib/crypto.c:529 src/lib/crypto.c:563 +msgid "Unable to read private key from file" +msgstr "" + +#: src/lib/crypto.c:622 +#, c-format +msgid "Unsupported digest type: %d\n" +msgstr "" + +#: src/lib/crypto.c:636 +msgid "OpenSSL digest initialization failed" +msgstr "" + +#: src/lib/crypto.c:650 +msgid "OpenSSL digest update failed" +msgstr "" + +#: src/lib/crypto.c:668 +msgid "OpenSSL digest finalize failed" +msgstr "" + +#: src/lib/crypto.c:766 +msgid "OpenSSL digest_new failed" +msgstr "" + +#: src/lib/crypto.c:772 +msgid "OpenSSL sign get digest failed" +msgstr "" + +#: src/lib/crypto.c:811 src/lib/crypto.c:815 +msgid "OpenSSL digest Verify final failed" +msgstr "" + +#: src/lib/crypto.c:820 +msgid "No signers found for crypto verify.\n" +msgstr "" + +#: src/lib/crypto.c:881 +msgid "Signature creation failed" +msgstr "" + +#: src/lib/crypto.c:959 +msgid "Signature decoding failed" +msgstr "" + +#: src/lib/crypto.c:1036 +msgid "Unsupported cipher type specified\n" +msgstr "" + +#: src/lib/crypto.c:1185 +msgid "CryptoData decoding failed" +msgstr "" + +#: src/lib/crypto.c:1229 +msgid "Failure decrypting the session key" +msgstr "" + +#: src/lib/crypto.c:1280 +#, c-format +msgid "Unsupported contentEncryptionAlgorithm: %d\n" +msgstr "" + +#: src/lib/crypto.c:1290 src/lib/crypto.c:1296 +msgid "OpenSSL cipher context initialization failed" +msgstr "" + +#: src/lib/crypto.c:1303 +msgid "Encryption session provided an invalid symmetric key" +msgstr "" + +#: src/lib/crypto.c:1309 +msgid "Encryption session provided an invalid IV" +msgstr "" + +#: src/lib/crypto.c:1315 +msgid "OpenSSL cipher context key/IV initialization failed" +msgstr "" + +#: src/lib/crypto.c:1385 +#, c-format +msgid "Unable to init OpenSSL threading: ERR=%s\n" +msgstr "" + +#: src/lib/crypto.c:1398 +msgid "Failed to seed OpenSSL PRNG\n" +msgstr "" + +#: src/lib/crypto.c:1424 +msgid "Failed to save OpenSSL PRNG\n" +msgstr "" + +#: src/lib/crypto.c:1485 +#, c-format +msgid "Unsupported digest type=%d specified\n" +msgstr "" + +#: src/lib/crypto.c:1505 +#, c-format +msgid "SHA1Update() returned an error: %d\n" +msgstr "" + +#: src/lib/crypto.c:1648 +msgid "No error" +msgstr "" + +#: src/lib/crypto.c:1650 +msgid "Signer not found" +msgstr "" + +#: src/lib/crypto.c:1652 +msgid "Recipient not found" +msgstr "" + +#: src/lib/crypto.c:1654 +msgid "Unsupported digest algorithm" +msgstr "" + +#: src/lib/crypto.c:1656 +msgid "Unsupported encryption algorithm" +msgstr "" + +#: src/lib/crypto.c:1658 +msgid "Signature is invalid" +msgstr "" + +#: src/lib/crypto.c:1660 +msgid "Decryption error" +msgstr "" + +#: src/lib/crypto.c:1663 +msgid "Internal error" +msgstr "" + +#: src/lib/crypto.c:1665 +msgid "Unknown error" +msgstr "" + +#: src/lib/daemon.c:66 +#, c-format +msgid "Cannot fork to become daemon: %s\n" +msgstr "" + +#: src/lib/edit.c:446 +#, c-format +msgid "Illegal character \"%c\" in name.\n" +msgstr "" + +#: src/lib/edit.c:453 +msgid "Name too long.\n" +msgstr "" + +#: src/lib/jcr.c:297 +msgid "NULL jcr.\n" +msgstr "" + +#: src/lib/jcr.c:430 +#, c-format +msgid "JCR use_count=%d JobId=%d\n" +msgstr "" + +#: src/lib/jcr.c:762 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading Storage " +"daemon.\n" +msgstr "" + +#: src/lib/jcr.c:774 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading File daemon.\n" +msgstr "" + +#: src/lib/jcr.c:786 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading Director.\n" +msgstr "" + +#: src/lib/lex.c:93 src/wx-console/console_thread.cpp:208 +#, c-format +msgid "Problem probably begins at line %d.\n" +msgstr "" + +#: src/lib/lex.c:98 src/wx-console/console_thread.cpp:213 +#, c-format +msgid "" +"Config error: %s\n" +" : line %d, col %d of file %s\n" +"%s\n" +"%s" +msgstr "" + +#: src/lib/lex.c:102 +#, c-format +msgid "Config error: %s\n" +msgstr "" + +#: src/lib/lex.c:131 +msgid "Close of NULL file\n" +msgstr "" + +#: src/lib/lex.c:226 +msgid "get_char: called after EOF\n" +msgstr "" + +#: src/lib/lex.c:268 +#, c-format +msgid "Config token too long, file: %s, line %d, begins at line %d\n" +msgstr "" + +#: src/lib/lex.c:292 +msgid "none" +msgstr "" + +#: src/lib/lex.c:293 +msgid "comment" +msgstr "" + +#: src/lib/lex.c:294 +msgid "number" +msgstr "" + +#: src/lib/lex.c:295 +msgid "ip_addr" +msgstr "" + +#: src/lib/lex.c:296 +msgid "identifier" +msgstr "" + +#: src/lib/lex.c:297 +msgid "string" +msgstr "" + +#: src/lib/lex.c:298 +msgid "quoted_string" +msgstr "" + +#: src/lib/lex.c:299 +msgid "UTF-8 Byte Order Mark" +msgstr "" + +#: src/lib/lex.c:300 +msgid "UTF-16le Byte Order Mark" +msgstr "" + +#: src/lib/lex.c:338 src/lib/lex.c:344 +#, c-format +msgid "expected a positive integer number, got: %s" +msgstr "" + +#: src/lib/lex.c:454 +msgid "" +"This config file appears to be in an unsupported Unicode format (UTF-16be). " +"Please resave as UTF-8\n" +msgstr "" + +#: src/lib/lex.c:583 +#, c-format +msgid "Cannot open included config file %s: %s\n" +msgstr "" + +#: src/lib/lex.c:642 +#, c-format +msgid "expected an integer or a range, got %s: %s" +msgstr "" + +#: src/lib/lex.c:656 src/lib/lex.c:664 src/lib/lex.c:675 src/lib/lex.c:683 +#, c-format +msgid "expected an integer number, got %s: %s" +msgstr "" + +#: src/lib/lex.c:693 +#, c-format +msgid "expected a name, got %s: %s" +msgstr "" + +#: src/lib/lex.c:697 +#, c-format +msgid "name %s length %d too long, max is %d\n" +msgstr "" + +#: src/lib/lex.c:705 +#, c-format +msgid "expected a string, got %s: %s" +msgstr "" + +#: src/lib/mem_pool.c:108 +#, c-format +msgid "MemPool index %d larger than max %d\n" +msgstr "" + +#: src/lib/mem_pool.c:126 src/lib/mem_pool.c:146 src/lib/mem_pool.c:181 +#: src/lib/mem_pool.c:253 src/lib/mem_pool.c:273 src/lib/mem_pool.c:311 +#: src/lib/mem_pool.c:583 +#, c-format +msgid "Out of memory requesting %d bytes\n" +msgstr "" + +#: src/lib/message.c:268 src/lib/message.c:278 +#, c-format +msgid "Could not open console message file %s: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:283 +#, c-format +msgid "Could not get con mutex: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:387 +#, c-format +msgid "open mail pipe %s failed: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:393 +msgid "Bacula Message" +msgstr "" + +#: src/lib/message.c:453 +msgid "open mail pipe failed.\n" +msgstr "" + +#: src/lib/message.c:465 +#, c-format +msgid "close error: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:476 +#, c-format +msgid "Mail prog: %s" +msgstr "" + +#: src/lib/message.c:485 +#, c-format +msgid "" +"Mail program terminated in error.\n" +"CMD=%s\n" +"ERR=%s\n" +msgstr "" + +#: src/lib/message.c:584 src/lib/message.c:735 +#, c-format +msgid "fopen %s failed: ERR=%s\n" +msgstr "" + +#: src/lib/message.c:717 +#, c-format +msgid "" +"Operator mail program terminated in error.\n" +"CMD=%s\n" +"ERR=%s\n" +msgstr "" + +#: src/lib/message.c:1015 +#, c-format +msgid "%s: ABORTING due to ERROR in %s:%d\n" +msgstr "" + +#: src/lib/message.c:1019 +#, c-format +msgid "%s: ERROR TERMINATION at %s:%d\n" +msgstr "" + +#: src/lib/message.c:1024 +#, c-format +msgid "%s: Fatal Error because: " +msgstr "" + +#: src/lib/message.c:1026 +#, c-format +msgid "%s: Fatal Error at %s:%d because:\n" +msgstr "" + +#: src/lib/message.c:1030 +#, c-format +msgid "%s: ERROR: " +msgstr "" + +#: src/lib/message.c:1032 +#, c-format +msgid "%s: ERROR in %s:%d " +msgstr "" + +#: src/lib/message.c:1035 +#, c-format +msgid "%s: Warning: " +msgstr "" + +#: src/lib/message.c:1038 +#, c-format +msgid "%s: Security violation: " +msgstr "" + +#: src/lib/message.c:1114 +#, c-format +msgid "%s ABORTING due to ERROR\n" +msgstr "" + +#: src/lib/message.c:1117 +#, c-format +msgid "%s ERROR TERMINATION\n" +msgstr "" + +#: src/lib/message.c:1120 +#, c-format +msgid "%s: %s Fatal error: " +msgstr "" + +#: src/lib/message.c:1126 +#, c-format +msgid "%s: %s Error: " +msgstr "" + +#: src/lib/message.c:1132 +#, c-format +msgid "%s: %s Warning: " +msgstr "" + +#: src/lib/message.c:1135 +#, c-format +msgid "%s: %s Security violation: " +msgstr "" + +#: src/lib/openssl.c:118 src/lib/openssl.c:179 src/stored/dev.c:218 +#: src/stored/dev.c:236 src/stored/dev.c:243 src/stored/stored_conf.c:611 +#, c-format +msgid "Unable to init mutex: ERR=%s\n" +msgstr "" + +#: src/lib/openssl.c:140 src/lib/openssl.c:213 +#, c-format +msgid "Unable to destroy mutex: ERR=%s\n" +msgstr "" + +#: src/lib/parse_conf.c:180 +msgid "***UNKNOWN***" +msgstr "" + +#: src/lib/parse_conf.c:200 +#, c-format +msgid "Unable to initialize resource lock. ERR=%s\n" +msgstr "" + +#: src/lib/parse_conf.c:287 src/lib/parse_conf.c:306 +#, c-format +msgid "expected an =, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:314 +#, c-format +msgid "Unknown item code: %d\n" +msgstr "" + +#: src/lib/parse_conf.c:354 +#, c-format +msgid "message type: %s not found" +msgstr "" + +#: src/lib/parse_conf.c:391 +#, c-format +msgid "Attempt to redefine name \"%s\" to \"%s\"." +msgstr "" + +#: src/lib/parse_conf.c:486 +#, c-format +msgid "Attempt to redefine resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:521 +#, c-format +msgid "Too many %s directives. Max. is %d. line %d: %s\n" +msgstr "" + +#: src/lib/parse_conf.c:531 +#, c-format +msgid "Could not find config Resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:593 +#, c-format +msgid "Missing config Resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:657 +#, c-format +msgid "expected a size number, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:662 +#, c-format +msgid "expected a size, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:699 src/lib/parse_conf.c:704 +#, c-format +msgid "expected a time period, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:763 +#, c-format +msgid "Expected a Tape Label keyword, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:819 +#, c-format +msgid "Cannot open config file \"%s\": %s\n" +msgstr "" + +#: src/lib/parse_conf.c:835 +msgid "" +"Currently we cannot handle UTF-16 source files. Please convert the conf file " +"to UTF-8\n" +msgstr "" + +#: src/lib/parse_conf.c:839 +#, c-format +msgid "Expected a Resource name identifier, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:852 +#, c-format +msgid "expected resource name, got: %s" +msgstr "" + +#: src/lib/parse_conf.c:863 +#, c-format +msgid "not in resource definition: %s" +msgstr "" + +#: src/lib/parse_conf.c:888 +#, c-format +msgid "" +"Keyword \"%s\" not permitted in this resource.\n" +"Perhaps you left the trailing brace off of the previous resource." +msgstr "" + +#: src/lib/parse_conf.c:899 +msgid "Name not specified for resource" +msgstr "" + +#: src/lib/parse_conf.c:908 +#, c-format +msgid "unexpected token %d %s in resource definition" +msgstr "" + +#: src/lib/parse_conf.c:914 +#, c-format +msgid "Unknown parser state %d\n" +msgstr "" + +#: src/lib/parse_conf.c:919 +msgid "End of conf file reached with unclosed resource." +msgstr "" + +#: src/lib/pythonlib.c:127 +msgid "Could not initialize Python\n" +msgstr "" + +#: src/lib/pythonlib.c:132 +#, c-format +msgid "Could not Run Python string %s\n" +msgstr "" + +#: src/lib/pythonlib.c:144 +msgid "Could not initialize Python Job type.\n" +msgstr "" + +#: src/lib/pythonlib.c:149 +#, c-format +msgid "Could not import Python script %s/%s. Python disabled.\n" +msgstr "" + +#: src/lib/pythonlib.c:252 +msgid "Could not create Python Job Object.\n" +msgstr "" + +#: src/lib/pythonlib.c:265 src/lib/pythonlib.c:289 +#, c-format +msgid "Python function \"%s\" not found.\n" +msgstr "" + +#: src/lib/pythonlib.c:304 +#, c-format +msgid "Unknown Python daemon event %s\n" +msgstr "" + +#: src/lib/pythonlib.c:329 +#, c-format +msgid "Unable to initialize the Python lock. ERR=%s\n" +msgstr "" + +#: src/lib/res.c:66 +#, c-format +msgid "rwl_writelock failure at %s:%d: ERR=%s\n" +msgstr "" + +#: src/lib/res.c:76 +#, c-format +msgid "rwl_writeunlock failure at %s:%d:. ERR=%s\n" +msgstr "" + +#: src/lib/runscript.c:212 +#, c-format +msgid "%s: run command \"%s\"\n" +msgstr "" + +#: src/lib/runscript.c:218 +#, c-format +msgid "Runscript: %s could not execute. ERR=%s\n" +msgstr "" + +#: src/lib/runscript.c:227 +#, c-format +msgid "%s: %s\n" +msgstr "" + +#: src/lib/runscript.c:232 +#, c-format +msgid "Runscript: %s returned non-zero status=%d. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:297 +msgid "rwl_writeunlock called too many times.\n" +msgstr "" + +#: src/lib/rwlock.c:301 +msgid "rwl_writeunlock by non-owner.\n" +msgstr "" + +#: src/lib/rwlock.c:367 +#, c-format +msgid "Write lock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:375 +#, c-format +msgid "Write unlock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:386 +#, c-format +msgid "Read lock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:394 +#, c-format +msgid "Read unlock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:403 +#, c-format +msgid "Thread %d found unchanged elements %d times\n" +msgstr "" + +#: src/lib/rwlock.c:436 +#, c-format +msgid "Init rwlock failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:452 +#, c-format +msgid "Create thread failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:464 +#, c-format +msgid "Join thread failed. ERR=%s\n" +msgstr "" + +#: src/lib/rwlock.c:467 +#, c-format +msgid "%02d: interval %d, writes %d, reads %d\n" +msgstr "" + +#: src/lib/rwlock.c:477 +#, c-format +msgid "data %02d: value %d, %d writes\n" +msgstr "" + +#: src/lib/rwlock.c:482 +#, c-format +msgid "Total: %d thread writes, %d data writes\n" +msgstr "" + +#: src/lib/rwlock.c:554 +msgid "Try write lock" +msgstr "" + +#: src/lib/rwlock.c:560 +msgid "Try read lock" +msgstr "" + +#: src/lib/rwlock.c:615 +msgid "Create thread" +msgstr "" + +#: src/lib/rwlock.c:625 +msgid "Join thread" +msgstr "" + +#: src/lib/rwlock.c:627 +#, c-format +msgid "%02d: interval %d, updates %d, r_collisions %d, w_collisions %d\n" +msgstr "" + +#: src/lib/rwlock.c:639 +#, c-format +msgid "data %02d: value %d, %d updates\n" +msgstr "" + +#: src/lib/signal.c:68 +msgid "Invalid signal number" +msgstr "" + +#: src/lib/signal.c:94 +#, c-format +msgid "Bacula interrupted by signal %d: %s\n" +msgstr "" + +#: src/lib/signal.c:107 +#, c-format +msgid "Kaboom! %s, %s got signal %d - %s. Attempting traceback.\n" +msgstr "" + +#: src/lib/signal.c:109 +#, c-format +msgid "Kaboom! exepath=%s\n" +msgstr "" + +#: src/lib/signal.c:143 +#, c-format +msgid "Fork error: ERR=%s\n" +msgstr "" + +#: src/lib/signal.c:150 +#, c-format +msgid "Calling: %s %s %s\n" +msgstr "" + +#: src/lib/signal.c:153 +#, c-format +msgid "execv: %s failed: ERR=%s\n" +msgstr "" + +#: src/lib/signal.c:168 +#, c-format +msgid "Traceback complete, attempting cleanup ...\n" +msgstr "" + +#: src/lib/signal.c:176 +#, c-format +msgid "It looks like the traceback worked ...\n" +msgstr "" + +#: src/lib/signal.c:205 +#, c-format +msgid "BA_NSIG too small (%d) should be (%d)\n" +msgstr "" + +#: src/lib/signal.c:211 +msgid "UNKNOWN SIGNAL" +msgstr "" + +#: src/lib/signal.c:212 +msgid "Hangup" +msgstr "" + +#: src/lib/signal.c:213 +msgid "Interrupt" +msgstr "" + +#: src/lib/signal.c:214 +msgid "Quit" +msgstr "" + +#: src/lib/signal.c:215 +msgid "Illegal instruction" +msgstr "" + +#: src/lib/signal.c:216 +msgid "Trace/Breakpoint trap" +msgstr "" + +#: src/lib/signal.c:217 +msgid "Abort" +msgstr "" + +#: src/lib/signal.c:219 +msgid "EMT instruction (Emulation Trap)" +msgstr "" + +#: src/lib/signal.c:222 +msgid "IOT trap" +msgstr "" + +#: src/lib/signal.c:224 +msgid "BUS error" +msgstr "" + +#: src/lib/signal.c:225 +msgid "Floating-point exception" +msgstr "" + +#: src/lib/signal.c:226 +msgid "Kill, unblockable" +msgstr "" + +#: src/lib/signal.c:227 +msgid "User-defined signal 1" +msgstr "" + +#: src/lib/signal.c:228 +msgid "Segmentation violation" +msgstr "" + +#: src/lib/signal.c:229 +msgid "User-defined signal 2" +msgstr "" + +#: src/lib/signal.c:230 +msgid "Broken pipe" +msgstr "" + +#: src/lib/signal.c:231 +msgid "Alarm clock" +msgstr "" + +#: src/lib/signal.c:232 +msgid "Termination" +msgstr "" + +#: src/lib/signal.c:234 +msgid "Stack fault" +msgstr "" + +#: src/lib/signal.c:236 +msgid "Child status has changed" +msgstr "" + +#: src/lib/signal.c:237 +msgid "Continue" +msgstr "" + +#: src/lib/signal.c:238 +msgid "Stop, unblockable" +msgstr "" + +#: src/lib/signal.c:239 +msgid "Keyboard stop" +msgstr "" + +#: src/lib/signal.c:240 +msgid "Background read from tty" +msgstr "" + +#: src/lib/signal.c:241 +msgid "Background write to tty" +msgstr "" + +#: src/lib/signal.c:242 +msgid "Urgent condition on socket" +msgstr "" + +#: src/lib/signal.c:243 +msgid "CPU limit exceeded" +msgstr "" + +#: src/lib/signal.c:244 +msgid "File size limit exceeded" +msgstr "" + +#: src/lib/signal.c:245 +msgid "Virtual alarm clock" +msgstr "" + +#: src/lib/signal.c:246 +msgid "Profiling alarm clock" +msgstr "" + +#: src/lib/signal.c:247 +msgid "Window size change" +msgstr "" + +#: src/lib/signal.c:248 +msgid "I/O now possible" +msgstr "" + +#: src/lib/signal.c:250 +msgid "Power failure restart" +msgstr "" + +#: src/lib/signal.c:253 +msgid "No runnable lwp" +msgstr "" + +#: src/lib/signal.c:256 +msgid "SIGLWP special signal used by thread library" +msgstr "" + +#: src/lib/signal.c:259 +msgid "Checkpoint Freeze" +msgstr "" + +#: src/lib/signal.c:262 +msgid "Checkpoint Thaw" +msgstr "" + +#: src/lib/signal.c:265 +msgid "Thread Cancellation" +msgstr "" + +#: src/lib/signal.c:268 +msgid "Resource Lost (e.g. record-lock lost)" +msgstr "" + +#: src/lib/smartall.c:146 src/lib/smartall.c:255 src/lib/smartall.c:270 +msgid "Out of memory\n" +msgstr "" + +#: src/lib/smartall.c:151 +msgid "Too much memory used." +msgstr "" + +#: src/lib/smartall.c:180 +#, c-format +msgid "Attempt to free NULL called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:194 +#, c-format +msgid "double free from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:202 +#, c-format +msgid "qp->qnext->qprev != qp called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:206 +#, c-format +msgid "qp->qprev->qnext != qp called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:215 +#, c-format +msgid "Buffer overrun called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:292 +#, c-format +msgid "sm_realloc size: %d\n" +msgstr "" + +#: src/lib/smartall.c:330 +#, c-format +msgid "sm_realloc %d at %x from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:392 +#, c-format +msgid "" +"\n" +"Orphaned buffers exist. Dump terminated following\n" +" discovery of bad links in chain of orphaned buffers.\n" +" Buffer address with bad links: %p\n" +msgstr "" + +#: src/lib/smartall.c:404 +#, c-format +msgid "%s buffer: %s %6u bytes buf=%p allocated at %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:440 +#, c-format +msgid "Damaged buffer found. Called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:470 +#, c-format +msgid "" +"\n" +"Damaged buffers found at %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:473 +#, c-format +msgid " discovery of bad prev link.\n" +msgstr "" + +#: src/lib/smartall.c:476 +#, c-format +msgid " discovery of bad next link.\n" +msgstr "" + +#: src/lib/smartall.c:479 +#, c-format +msgid " discovery of data overrun.\n" +msgstr "" + +#: src/lib/smartall.c:482 +#, c-format +msgid " Buffer address: %p\n" +msgstr "" + +#: src/lib/smartall.c:489 +#, c-format +msgid "Damaged buffer: %6u bytes allocated at line %d of %s %s\n" +msgstr "" + +#: src/lib/tls.c:93 +#, c-format +msgid "" +"Error with certificate at depth: %d, issuer = %s, subject = %s, ERR=%d:%s\n" +msgstr "" + +#: src/lib/tls.c:130 +msgid "Error initializing SSL context" +msgstr "" + +#: src/lib/tls.c:151 +msgid "Error loading certificate verification stores" +msgstr "" + +#: src/lib/tls.c:156 +msgid "" +"Either a certificate file or a directory must be specified as a verification " +"store\n" +msgstr "" + +#: src/lib/tls.c:167 +msgid "Error loading certificate file" +msgstr "" + +#: src/lib/tls.c:175 +msgid "Error loading private key" +msgstr "" + +#: src/lib/tls.c:183 +msgid "Unable to open DH parameters file" +msgstr "" + +#: src/lib/tls.c:189 +msgid "Unable to load DH parameters from specified file" +msgstr "" + +#: src/lib/tls.c:193 +msgid "Failed to set TLS Diffie-Hellman parameters" +msgstr "" + +#: src/lib/tls.c:203 +msgid "Error setting cipher list, no valid ciphers available\n" +msgstr "" + +#: src/lib/tls.c:262 +msgid "Peer failed to present a TLS certificate\n" +msgstr "" + +#: src/lib/tls.c:305 +#, c-format +msgid "Peer %s failed to present a TLS certificate\n" +msgstr "" + +#: src/lib/tls.c:407 +msgid "Error creating file descriptor-based BIO" +msgstr "" + +#: src/lib/tls.c:418 +msgid "Error creating new SSL object" +msgstr "" + +#: src/lib/tls.c:481 src/lib/tls.c:504 +msgid "Connect failure" +msgstr "" + +#: src/lib/tls.c:576 src/lib/tls.c:580 +msgid "TLS shutdown failure." +msgstr "" + +#: src/lib/tls.c:639 +msgid "TLS read/write failure." +msgstr "" + +#: src/lib/util.c:182 +msgid "Running" +msgstr "" + +#: src/lib/util.c:185 +msgid "Blocked" +msgstr "" + +#: src/lib/util.c:195 +msgid "Non-fatal error" +msgstr "" + +#: src/lib/util.c:198 src/lib/util.c:265 +msgid "Canceled" +msgstr "" + +#: src/lib/util.c:201 +msgid "Verify differences" +msgstr "" + +#: src/lib/util.c:204 +msgid "Waiting on FD" +msgstr "" + +#: src/lib/util.c:207 +msgid "Wait on SD" +msgstr "" + +#: src/lib/util.c:210 +msgid "Wait for new Volume" +msgstr "" + +#: src/lib/util.c:213 +msgid "Waiting for mount" +msgstr "" + +#: src/lib/util.c:216 +msgid "Waiting for Storage resource" +msgstr "" + +#: src/lib/util.c:219 +msgid "Waiting for Job resource" +msgstr "" + +#: src/lib/util.c:222 +msgid "Waiting for Client resource" +msgstr "" + +#: src/lib/util.c:225 +msgid "Waiting on Max Jobs" +msgstr "" + +#: src/lib/util.c:228 +msgid "Waiting for Start Time" +msgstr "" + +#: src/lib/util.c:231 +msgid "Waiting on Priority" +msgstr "" + +#: src/lib/util.c:238 +#, c-format +msgid "Unknown Job termination status=%d" +msgstr "" + +#: src/lib/util.c:262 +msgid "Fatal Error" +msgstr "" + +#: src/lib/util.c:268 +msgid "Differences" +msgstr "" + +#: src/lib/util.c:271 +msgid "Unknown term code" +msgstr "" + +#: src/lib/util.c:299 +msgid "Migrate" +msgstr "" + +#: src/lib/util.c:302 +msgid "Copy" +msgstr "" + +#: src/lib/util.c:305 src/wx-console/wxbmainframe.cpp:276 +msgid "Console" +msgstr "" + +#: src/lib/util.c:308 +msgid "System or Console" +msgstr "" + +#: src/lib/util.c:311 +msgid "Scan" +msgstr "" + +#: src/lib/util.c:314 +msgid "Unknown Type" +msgstr "" + +#: src/lib/util.c:346 +msgid "Verify Init Catalog" +msgstr "" + +#: src/lib/util.c:355 +msgid "Verify Data" +msgstr "" + +#: src/lib/util.c:692 +msgid "Working directory not defined. Cannot continue.\n" +msgstr "" + +#: src/lib/util.c:695 +#, c-format +msgid "Working Directory: \"%s\" not found. Cannot continue.\n" +msgstr "" + +#: src/lib/util.c:699 +#, c-format +msgid "Working Directory: \"%s\" is not a directory. Cannot continue.\n" +msgstr "" + +#: src/lib/var.c:2669 +msgid "everything ok" +msgstr "" + +#: src/lib/var.c:2670 +msgid "incomplete named character" +msgstr "" + +#: src/lib/var.c:2671 +msgid "incomplete hexadecimal value" +msgstr "" + +#: src/lib/var.c:2672 +msgid "invalid hexadecimal value" +msgstr "" + +#: src/lib/var.c:2673 +msgid "octal value too large" +msgstr "" + +#: src/lib/var.c:2674 +msgid "invalid octal value" +msgstr "" + +#: src/lib/var.c:2675 +msgid "incomplete octal value" +msgstr "" + +#: src/lib/var.c:2676 +msgid "incomplete grouped hexadecimal value" +msgstr "" + +#: src/lib/var.c:2677 +msgid "incorrect character class specification" +msgstr "" + +#: src/lib/var.c:2678 +msgid "invalid expansion configuration" +msgstr "" + +#: src/lib/var.c:2679 +msgid "out of memory" +msgstr "" + +#: src/lib/var.c:2680 +msgid "incomplete variable specification" +msgstr "" + +#: src/lib/var.c:2681 +msgid "undefined variable" +msgstr "" + +#: src/lib/var.c:2682 +msgid "input is neither text nor variable" +msgstr "" + +#: src/lib/var.c:2683 +msgid "unknown command character in variable" +msgstr "" + +#: src/lib/var.c:2684 +msgid "malformatted search and replace operation" +msgstr "" + +#: src/lib/var.c:2685 +msgid "unknown flag in search and replace operation" +msgstr "" + +#: src/lib/var.c:2686 +msgid "invalid regex in search and replace operation" +msgstr "" + +#: src/lib/var.c:2687 +msgid "missing parameter in command" +msgstr "" + +#: src/lib/var.c:2688 +msgid "empty search string in search and replace operation" +msgstr "" + +#: src/lib/var.c:2689 +msgid "start offset missing in cut operation" +msgstr "" + +#: src/lib/var.c:2690 +msgid "offsets in cut operation delimited by unknown character" +msgstr "" + +#: src/lib/var.c:2691 +msgid "range out of bounds in cut operation" +msgstr "" + +#: src/lib/var.c:2692 +msgid "offset out of bounds in cut operation" +msgstr "" + +#: src/lib/var.c:2693 +msgid "logic error in cut operation" +msgstr "" + +#: src/lib/var.c:2694 +msgid "malformatted transpose operation" +msgstr "" + +#: src/lib/var.c:2695 +msgid "source and target class mismatch in transpose operation" +msgstr "" + +#: src/lib/var.c:2696 +msgid "empty character class in transpose operation" +msgstr "" + +#: src/lib/var.c:2697 +msgid "incorrect character class in transpose operation" +msgstr "" + +#: src/lib/var.c:2698 +msgid "malformatted padding operation" +msgstr "" + +#: src/lib/var.c:2699 +msgid "width parameter missing in padding operation" +msgstr "" + +#: src/lib/var.c:2700 +msgid "fill string missing in padding operation" +msgstr "" + +#: src/lib/var.c:2701 +msgid "unknown quoted pair in search and replace operation" +msgstr "" + +#: src/lib/var.c:2702 +msgid "sub-matching reference out of range" +msgstr "" + +#: src/lib/var.c:2703 +msgid "invalid argument" +msgstr "" + +#: src/lib/var.c:2704 +msgid "incomplete quoted pair" +msgstr "" + +#: src/lib/var.c:2705 +msgid "lookup function does not support variable arrays" +msgstr "" + +#: src/lib/var.c:2706 +msgid "index of array variable contains an invalid character" +msgstr "" + +#: src/lib/var.c:2707 +msgid "index of array variable is incomplete" +msgstr "" + +#: src/lib/var.c:2708 +msgid "bracket expression in array variable's index not closed" +msgstr "" + +#: src/lib/var.c:2709 +msgid "division by zero error in index specification" +msgstr "" + +#: src/lib/var.c:2710 +msgid "unterminated loop construct" +msgstr "" + +#: src/lib/var.c:2711 +msgid "invalid character in loop limits" +msgstr "" + +#: src/lib/var.c:2712 +msgid "malformed operation argument list" +msgstr "" + +#: src/lib/var.c:2713 +msgid "undefined operation" +msgstr "" + +#: src/lib/var.c:2714 +msgid "formatting failure" +msgstr "" + +#: src/lib/var.c:2723 +msgid "unknown error" +msgstr "" + +#: src/lib/watchdog.c:83 +#, c-format +msgid "Unable to initialize watchdog lock. ERR=%s\n" +msgstr "" + +#: src/lib/watchdog.c:180 +msgid "BUG! register_watchdog called before start_watchdog\n" +msgstr "" + +#: src/lib/watchdog.c:183 +#, c-format +msgid "BUG! Watchdog %p has NULL callback\n" +msgstr "" + +#: src/lib/watchdog.c:186 +#, c-format +msgid "BUG! Watchdog %p has zero interval\n" +msgstr "" + +#: src/lib/watchdog.c:206 +msgid "BUG! unregister_watchdog_unlocked called before start_watchdog\n" +msgstr "" + +#: src/lib/watchdog.c:325 +#, c-format +msgid "rwl_writelock failure. ERR=%s\n" +msgstr "" + +#: src/lib/watchdog.c:340 +#, c-format +msgid "rwl_writeunlock failure. ERR=%s\n" +msgstr "" + +#: src/stored/acquire.c:69 +#, c-format +msgid "Acquire read: num_writers=%d not zero. Job %d canceled.\n" +msgstr "" + +#: src/stored/acquire.c:78 +#, c-format +msgid "No volumes specified for reading. Job %s canceled.\n" +msgstr "" + +#: src/stored/acquire.c:87 +#, c-format +msgid "Logic error: no next volume to read. Numvol=%d Curvol=%d\n" +msgstr "" + +#: src/stored/acquire.c:113 +#, c-format +msgid "" +"Changing device. Want Media Type=\"%s\" have=\"%s\"\n" +" device=%s\n" +msgstr "" + +#: src/stored/acquire.c:151 +#, c-format +msgid "Media Type change. New device %s chosen.\n" +msgstr "" + +#: src/stored/acquire.c:162 +#, c-format +msgid "No suitable device found to read Volume \"%s\"\n" +msgstr "" + +#: src/stored/acquire.c:191 +#, c-format +msgid "Job %s canceled.\n" +msgstr "" + +#: src/stored/acquire.c:205 +#, c-format +msgid "Read open device %s Volume \"%s\" failed: ERR=%s\n" +msgstr "" + +#: src/stored/acquire.c:276 +#, c-format +msgid "Too many errors trying to mount device %s for reading.\n" +msgstr "" + +#: src/stored/acquire.c:285 +#, c-format +msgid "Ready to read from volume \"%s\" on device %s.\n" +msgstr "" + +#: src/stored/acquire.c:328 +#, c-format +msgid "Want to append, but device %s is busy reading.\n" +msgstr "" + +#: src/stored/acquire.c:358 +#, c-format +msgid "" +"Wanted to append to Volume \"%s\", but device %s is busy writing on \"%s" +"\" .\n" +msgstr "" + +#: src/stored/acquire.c:377 +#, c-format +msgid "" +"Cannot recycle volume \"%s\" on device %s because it is in use by another " +"job.\n" +msgstr "" + +#: src/stored/acquire.c:402 +#, c-format +msgid "" +"Invalid tape position on volume \"%s\" on device %s. Expected %d, got %d\n" +msgstr "" + +#: src/stored/acquire.c:422 +#, c-format +msgid "Could not ready device %s for append.\n" +msgstr "" + +#: src/stored/acquire.c:506 src/stored/block.c:367 src/stored/block.c:713 +#: src/stored/block.c:788 +#, c-format +msgid "Could not create JobMedia record for Volume=\"%s\" Job=%s\n" +msgstr "" + +#: src/stored/acquire.c:547 +#, c-format +msgid "Alert: %s" +msgstr "" + +#: src/stored/acquire.c:555 +#, c-format +msgid "3997 Bad alert command: %s: ERR=%s.\n" +msgstr "" + +#: src/stored/ansi_label.c:96 +#, c-format +msgid "Read error on device %s in ANSI label. ERR=%s\n" +msgstr "" + +#: src/stored/ansi_label.c:106 +msgid "Insane! End of tape while reading ANSI label.\n" +msgstr "" + +#: src/stored/ansi_label.c:130 +msgid "No VOL1 label while reading ANSI/IBM label.\n" +msgstr "" + +#: src/stored/ansi_label.c:150 +#, c-format +msgid "Wanted ANSI Volume \"%s\" got \"%s\"\n" +msgstr "" + +#: src/stored/ansi_label.c:161 +msgid "No HDR1 label while reading ANSI label.\n" +msgstr "" + +#: src/stored/ansi_label.c:167 +#, c-format +msgid "ANSI/IBM Volume \"%s\" does not belong to Bacula.\n" +msgstr "" + +#: src/stored/ansi_label.c:178 +msgid "No HDR2 label while reading ANSI/IBM label.\n" +msgstr "" + +#: src/stored/ansi_label.c:192 +msgid "Unknown or bad ANSI/IBM label record.\n" +msgstr "" + +#: src/stored/ansi_label.c:199 +msgid "Too many records in while reading ANSI/IBM label.\n" +msgstr "" + +#: src/stored/ansi_label.c:298 +#, c-format +msgid "ANSI Volume label name \"%s\" longer than 6 chars.\n" +msgstr "" + +#: src/stored/ansi_label.c:315 +#, c-format +msgid "Could not write ANSI VOL1 label. ERR=%s\n" +msgstr "" + +#: src/stored/ansi_label.c:353 src/stored/ansi_label.c:382 +#, c-format +msgid "Could not write ANSI HDR1 label. ERR=%s\n" +msgstr "" + +#: src/stored/ansi_label.c:358 src/stored/ansi_label.c:389 +msgid "Could not write ANSI HDR1 label.\n" +msgstr "" + +#: src/stored/ansi_label.c:394 +#, c-format +msgid "Error writing EOF to tape. ERR=%s" +msgstr "" + +#: src/stored/ansi_label.c:399 +msgid "write_ansi_ibm_label called for non-ANSI/IBM type\n" +msgstr "" + +#: src/stored/append.c:64 +msgid "DCR is NULL!!!\n" +msgstr "" + +#: src/stored/append.c:69 +msgid "DEVICE is NULL!!!\n" +msgstr "" + +#: src/stored/append.c:81 +msgid "Unable to set network buffer size.\n" +msgstr "" + +#: src/stored/append.c:94 src/stored/append.c:103 src/stored/append.c:115 +#: src/stored/append.c:298 src/stored/append.c:309 src/stored/askdir.c:332 +#: src/stored/askdir.c:333 +msgid "NULL Volume name. This shouldn't happen!!!\n" +msgstr "" + +#: src/stored/append.c:109 src/stored/btape.c:1889 +#, c-format +msgid "Write session label failed. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:121 +#, c-format +msgid "Network send error to FD. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:158 +#, c-format +msgid "Error reading data header from FD. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:180 +#, c-format +msgid "Malformed data header from FD: %s\n" +msgstr "" + +#: src/stored/append.c:190 +msgid "File index from FD not positive or sequential\n" +msgstr "" + +#: src/stored/append.c:244 src/stored/mac.c:248 +#, c-format +msgid "Error updating file attributes. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:258 +#, c-format +msgid "Network error on data channel. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:279 +#, c-format +msgid "" +"Job write elapsed time = %02d:%02d:%02d, Transfer rate = %s bytes/second\n" +msgstr "" + +#: src/stored/append.c:292 src/stored/btape.c:2013 +#, c-format +msgid "Error writting end session label. ERR=%s\n" +msgstr "" + +#: src/stored/append.c:303 src/stored/mac.c:124 src/stored/mac.c:220 +#: src/stored/spool.c:293 +#, c-format +msgid "Fatal append error on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/append.c:305 src/stored/mac.c:126 +msgid "Set ok=FALSE after write_block_to_device.\n" +msgstr "" + +#: src/stored/askdir.c:178 +msgid "Network error on bnet_recv in req_vol_info.\n" +msgstr "" + +#: src/stored/askdir.c:195 +#, c-format +msgid "Error getting Volume info: %s" +msgstr "" + +#: src/stored/askdir.c:363 +#, c-format +msgid "Didn't get vol info vol=%s: ERR=%s" +msgstr "" + +#: src/stored/askdir.c:405 +#, c-format +msgid "Error creating JobMedia record: ERR=%s\n" +msgstr "" + +#: src/stored/askdir.c:412 +#, c-format +msgid "Error creating JobMedia record: %s\n" +msgstr "" + +#: src/stored/askdir.c:478 +#, c-format +msgid "Job %s canceled while waiting for mount on Storage Device \"%s\".\n" +msgstr "" + +#: src/stored/askdir.c:491 +#, c-format +msgid "" +"Job %s waiting. Cannot find any appendable volumes.\n" +"Please use the \"label\" command to create a new Volume for:\n" +" Storage: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/askdir.c:515 src/stored/askdir.c:605 +#, c-format +msgid "Max time exceeded waiting to mount Storage Device %s for Job %s\n" +msgstr "" + +#: src/stored/askdir.c:525 +msgid "pthread error in mount_next_volume.\n" +msgstr "" + +#: src/stored/askdir.c:557 +msgid "Cannot request another volume: no volume name given.\n" +msgstr "" + +#: src/stored/askdir.c:563 +#, c-format +msgid "Job %s canceled while waiting for mount on Storage Device %s.\n" +msgstr "" + +#: src/stored/askdir.c:578 +#, c-format +msgid "" +"Please mount Volume \"%s\" or label a new one for:\n" +" Job: %s\n" +" Storage: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/askdir.c:615 +msgid "pthread error in mount_volume\n" +msgstr "" + +#: src/stored/authenticate.c:60 +#, c-format +msgid "I only authenticate Directors, not %d\n" +msgstr "" + +#: src/stored/authenticate.c:90 +#, c-format +msgid "" +"Connection from unknown Director %s at %s rejected.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/stored/authenticate.c:123 +msgid "" +"Incorrect password given by Director.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/stored/authenticate.c:179 +#, c-format +msgid "Unable to authenticate Director at %s.\n" +msgstr "" + +#: src/stored/authenticate.c:223 src/stored/authenticate.c:257 +#, c-format +msgid "" +"Incorrect authorization key from File daemon at %s rejected.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/stored/autochanger.c:66 +#, c-format +msgid "No Changer Name given for device %s. Cannot continue.\n" +msgstr "" + +#: src/stored/autochanger.c:72 +#, c-format +msgid "No Changer Command given for device %s. Cannot continue.\n" +msgstr "" + +#: src/stored/autochanger.c:85 +#, c-format +msgid "" +"Media Type not the same for all devices in changer %s. Cannot continue.\n" +msgstr "" + +#: src/stored/autochanger.c:149 +#, c-format +msgid "" +"Invalid slot=%d defined in catalog for Volume \"%s\" on %s. Manual load my " +"be required.\n" +msgstr "" + +#: src/stored/autochanger.c:154 +#, c-format +msgid "No \"Changer Device\" for %s. Manual load of Volume may be required.\n" +msgstr "" + +#: src/stored/autochanger.c:158 +#, c-format +msgid "No \"Changer Command\" for %s. Manual load of Volume may be requird.\n" +msgstr "" + +#: src/stored/autochanger.c:188 +#, c-format +msgid "3304 Issuing autochanger \"load slot %d, drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:196 +#, c-format +msgid "3305 Autochanger \"load slot %d, drive %d\", status is OK.\n" +msgstr "" + +#: src/stored/autochanger.c:205 +#, c-format +msgid "" +"3992 Bad autochanger \"load slot %d, drive %d\": ERR=%s.\n" +"Results=%s\n" +msgstr "" + +#: src/stored/autochanger.c:251 +msgid "3992 Missing Changer command.\n" +msgstr "" + +#: src/stored/autochanger.c:265 +#, c-format +msgid "3301 Issuing autochanger \"loaded? drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:275 +#, c-format +msgid "3302 Autochanger \"loaded? drive %d\", result is Slot %d.\n" +msgstr "" + +#: src/stored/autochanger.c:279 +#, c-format +msgid "3302 Autochanger \"loaded? drive %d\", result: nothing loaded.\n" +msgstr "" + +#: src/stored/autochanger.c:286 +#, c-format +msgid "" +"3991 Bad autochanger \"loaded? drive %d\" command: ERR=%s.\n" +"Results=%s\n" +msgstr "" + +#: src/stored/autochanger.c:345 src/stored/autochanger.c:436 +#, c-format +msgid "3307 Issuing autochanger \"unload slot %d, drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:359 +#, c-format +msgid "" +"3995 Bad autochanger \"unload slot %d, drive %d\": ERR=%s\n" +"Results=%s\n" +msgstr "" + +#: src/stored/autochanger.c:423 +#, c-format +msgid "Volume \"%s\" is in use by device %s\n" +msgstr "" + +#: src/stored/autochanger.c:458 +#, c-format +msgid "3995 Bad autochanger \"unload slot %d, drive %d\": ERR=%s.\n" +msgstr "" + +#: src/stored/autochanger.c:498 +#, c-format +msgid "3993 Device %s not an autochanger device.\n" +msgstr "" + +#: src/stored/autochanger.c:519 +#, c-format +msgid "3306 Issuing autochanger \"%s\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:522 +msgid "3996 Open bpipe failed.\n" +msgstr "" + +#: src/stored/bcopy.c:72 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bcopy [-d debug_level] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify configuration file\n" +" -d set debug level to nn\n" +" -i specify input Volume names (separated by |)\n" +" -o specify output Volume names (separated by |)\n" +" -p proceed inspite of errors\n" +" -v verbose\n" +" -w specify working directory (default /tmp)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bcopy.c:151 src/stored/bextract.c:187 src/stored/bscan.c:228 +msgid "Wrong number of arguments: \n" +msgstr "" + +#: src/stored/bcopy.c:191 src/stored/btape.c:364 src/stored/device.c:296 +#, c-format +msgid "dev open failed: %s\n" +msgstr "" + +#: src/stored/bcopy.c:204 +msgid "Write of last block failed.\n" +msgstr "" + +#: src/stored/bcopy.c:207 +#, c-format +msgid "%u Jobs copied. %u records copied.\n" +msgstr "" + +#: src/stored/bcopy.c:224 src/stored/bscan.c:402 +#, c-format +msgid "Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n" +msgstr "" + +#: src/stored/bcopy.c:239 +msgid "Volume is prelabeled. This volume cannot be copied.\n" +msgstr "" + +#: src/stored/bcopy.c:242 +msgid "Volume label not copied.\n" +msgstr "" + +#: src/stored/bcopy.c:254 src/stored/bcopy.c:261 src/stored/bcopy.c:284 +#: src/stored/btape.c:2386 +#, c-format +msgid "Cannot fixup device error. %s\n" +msgstr "" + +#: src/stored/bcopy.c:266 +msgid "EOM label not copied.\n" +msgstr "" + +#: src/stored/bcopy.c:269 +msgid "EOT label not copied.\n" +msgstr "" + +#: src/stored/bcopy.c:305 src/stored/bextract.c:489 src/stored/bls.c:452 +#: src/stored/bscan.c:1283 src/stored/btape.c:2685 +#, c-format +msgid "Mount Volume \"%s\" on device %s and press return when ready: " +msgstr "" + +#: src/stored/bextract.c:78 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bextract \n" +" -b specify a bootstrap file\n" +" -c specify a configuration file\n" +" -d set debug level to nn\n" +" -e exclude list\n" +" -i include list\n" +" -p proceed inspite of I/O errors\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bextract.c:137 src/stored/bls.c:140 +#, c-format +msgid "Could not open exclude file: %s, ERR=%s\n" +msgstr "" + +#: src/stored/bextract.c:152 src/stored/bls.c:154 +#, c-format +msgid "Could not open include file: %s, ERR=%s\n" +msgstr "" + +#: src/stored/bextract.c:208 +#, c-format +msgid "%d Program Name and/or Program Data Stream records ignored.\n" +msgstr "" + +#: src/stored/bextract.c:212 +#, c-format +msgid "%d Win32 data or Win32 gzip data stream records. Ignored.\n" +msgstr "" + +#: src/stored/bextract.c:239 +#, c-format +msgid "Cannot stat %s. It must exist. ERR=%s\n" +msgstr "" + +#: src/stored/bextract.c:243 +#, c-format +msgid "%s must be a directory.\n" +msgstr "" + +#: src/stored/bextract.c:264 +#, c-format +msgid "%u files restored.\n" +msgstr "" + +#: src/stored/bextract.c:291 src/stored/bextract.c:464 +msgid "Logic error output file should be open but is not.\n" +msgstr "" + +#: src/stored/bextract.c:298 src/stored/bls.c:376 src/stored/bscan.c:659 +msgid "Cannot continue.\n" +msgstr "" + +#: src/stored/bextract.c:360 +#, c-format +msgid "Seek error on %s: %s\n" +msgstr "" + +#: src/stored/bextract.c:413 +#, c-format +msgid "Uncompression error. ERR=%d\n" +msgstr "" + +#: src/stored/bextract.c:421 +msgid "===Write error===\n" +msgstr "" + +#: src/stored/bextract.c:455 +msgid "Got Program Name or Data Stream. Ignored.\n" +msgstr "" + +#: src/stored/block.c:91 +#, c-format +msgid "" +"Dump block %s %x: size=%d BlkNum=%d\n" +" Hdrcksum=%x cksum=%x\n" +msgstr "" + +#: src/stored/block.c:104 +#, c-format +msgid " Rec: VId=%u VT=%u FI=%s Strm=%s len=%d p=%x\n" +msgstr "" + +#: src/stored/block.c:160 +#, c-format +msgid "%d block read errors not printed.\n" +msgstr "" + +#: src/stored/block.c:248 src/stored/block.c:264 src/stored/block.c:274 +#, c-format +msgid "" +"Volume data error at %u:%u! Wanted ID: \"%s\", got \"%s\". Buffer " +"discarded.\n" +msgstr "" + +#: src/stored/block.c:288 +#, c-format +msgid "" +"Volume data error at %u:%u! Block length %u is insane (too large), probably " +"due to a bad archive.\n" +msgstr "" + +#: src/stored/block.c:314 +#, c-format +msgid "" +"Volume data error at %u:%u!\n" +"Block checksum mismatch in block=%u len=%d: calc=%x blk=%x\n" +msgstr "" + +#: src/stored/block.c:425 +msgid "Cannot write block. Device at EOM.\n" +msgstr "" + +#: src/stored/block.c:430 +msgid "Attempt to write on read-only Volume.\n" +msgstr "" + +#: src/stored/block.c:482 +#, c-format +msgid "User defined maximum volume capacity %s exceeded on device %s.\n" +msgstr "" + +#: src/stored/block.c:497 +#, c-format +msgid "Unable to write EOF. ERR=%s\n" +msgstr "" + +#: src/stored/block.c:523 src/stored/block.c:548 +msgid "Write block header zeroed.\n" +msgstr "" + +#: src/stored/block.c:567 +#, c-format +msgid "Write error at %u:%u on device %s. ERR=%s.\n" +msgstr "" + +#: src/stored/block.c:574 +#, c-format +msgid "End of Volume \"%s\" at %u:%u on device %s. Write of %u bytes got %d.\n" +msgstr "" + +#: src/stored/block.c:650 src/stored/block.c:656 +#, c-format +msgid "Backspace file at EOT failed. ERR=%s\n" +msgstr "" + +#: src/stored/block.c:663 +#, c-format +msgid "Backspace record at EOT failed. ERR=%s\n" +msgstr "" + +#: src/stored/block.c:680 +#, c-format +msgid "Re-read last block at EOT failed. ERR=%s" +msgstr "" + +#: src/stored/block.c:690 +#, c-format +msgid "" +"Re-read of last block OK, but block numbers differ. Last block=%u Current " +"block=%u.\n" +msgstr "" + +#: src/stored/block.c:693 +msgid "Re-read of last block succeeded.\n" +msgstr "" + +#: src/stored/block.c:721 +#, c-format +msgid "" +"Error writing final EOF to tape. This Volume may not be readable.\n" +"%s" +msgstr "" + +#: src/stored/block.c:735 +#, c-format +msgid "" +"Error writing final part to DVD. This Volume may not be readable.\n" +"%s" +msgstr "" + +#: src/stored/block.c:837 +#, c-format +msgid "" +"Error while writing, current part number is less than the total number of " +"parts (%d/%d, device=%s)\n" +msgstr "" + +#: src/stored/block.c:845 +#, c-format +msgid "Unable to open device next part %s: ERR=%s\n" +msgstr "" + +#: src/stored/block.c:865 +#, c-format +msgid "" +"End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +"free_space_errno=%d, errmsg=%s).\n" +msgstr "" + +#: src/stored/block.c:878 +#, c-format +msgid "" +"End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +"free_space_errno=%d).\n" +msgstr "" + +#: src/stored/block.c:934 +#, c-format +msgid "Block buffer size looping problem on device %s\n" +msgstr "" + +#: src/stored/block.c:962 +#, c-format +msgid "Unable to open device part=%d %s: ERR=%s\n" +msgstr "" + +#: src/stored/block.c:988 +#, c-format +msgid "Read error on fd=%d at file:blk %u:%u on device %s. ERR=%s.\n" +msgstr "" + +#: src/stored/block.c:1001 +#, c-format +msgid "Read zero bytes at %u:%u on device %s.\n" +msgstr "" + +#: src/stored/block.c:1014 +#, c-format +msgid "" +"Volume data error at %u:%u! Very short block of %d bytes on device %s " +"discarded.\n" +msgstr "" + +#: src/stored/block.c:1039 +#, c-format +msgid "Block length %u is greater than buffer %u. Attempting recovery.\n" +msgstr "" + +#: src/stored/block.c:1058 +#, c-format +msgid "Setting block buffer size to %u bytes.\n" +msgstr "" + +#: src/stored/block.c:1073 +#, c-format +msgid "" +"Volume data error at %u:%u! Short block of %d bytes on device %s discarded.\n" +msgstr "" + +#: src/stored/bls.c:78 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bls [options] \n" +" -b specify a bootstrap file\n" +" -c specify a config file\n" +" -d specify debug level\n" +" -e exclude list\n" +" -i include list\n" +" -j list jobs\n" +" -k list blocks\n" +" (no j or k option) list saved files\n" +" -L dump label\n" +" -p proceed inspite of errors\n" +" -v be verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bls.c:201 +msgid "No archive name specified\n" +msgstr "" + +#: src/stored/bls.c:236 +#, c-format +msgid "" +"\n" +"Warning, this Volume is a continuation of Volume %s\n" +msgstr "" + +#: src/stored/bls.c:279 +#, c-format +msgid "Got EOM at file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/bls.c:290 +#, c-format +msgid "Mounted Volume \"%s\".\n" +msgstr "" + +#: src/stored/bls.c:292 +#, c-format +msgid "End of file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/bls.c:316 +#, c-format +msgid "" +"File:blk=%u:%u blk_num=%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%" +"s rlen=%d\n" +msgstr "" + +#: src/stored/bls.c:325 +#, c-format +msgid "Block: %d size=%d\n" +msgstr "" + +#: src/stored/bls.c:392 +#, c-format +msgid "FileIndex=%d VolSessionId=%d VolSessionTime=%d Stream=%d DataLen=%d\n" +msgstr "" + +#: src/stored/bls.c:409 src/stored/read_record.c:388 +msgid "Fresh Volume Label" +msgstr "" + +#: src/stored/bls.c:412 src/stored/read_record.c:391 +msgid "Volume Label" +msgstr "" + +#: src/stored/bls.c:416 src/stored/label.c:1031 +msgid "Begin Job Session" +msgstr "" + +#: src/stored/bls.c:420 src/stored/label.c:1034 +msgid "End Job Session" +msgstr "" + +#: src/stored/bls.c:424 +msgid "End of Medium" +msgstr "" + +#: src/stored/bls.c:427 src/stored/label.c:1043 +msgid "Unknown" +msgstr "" + +#: src/stored/bls.c:433 src/stored/read_record.c:409 +#, c-format +msgid "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n" +msgstr "" + +#: src/stored/bscan.c:115 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bscan [ options ] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify configuration file\n" +" -d set debug level to nn\n" +" -m update media info in database\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database host (default NULL)\n" +" -p proceed inspite of I/O errors\n" +" -r list records\n" +" -s synchronize or store in database\n" +" -S show scan progress periodically\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -w specify working directory (default from conf file)\n" +" -? print this message\n" +"\n" +msgstr "" + +#: src/stored/bscan.c:241 src/stored/stored.c:288 +#, c-format +msgid "No Storage resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:249 src/stored/stored.c:319 +#, c-format +msgid "No Working Directory defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:257 +#, c-format +msgid "Working Directory: %s not found. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:261 +#, c-format +msgid "Working Directory: %s is not a directory. Cannot continue.\n" +msgstr "" + +#: src/stored/bscan.c:275 src/stored/bscan.c:349 +#, c-format +msgid "First Volume Size = %sn" +msgstr "" + +#: src/stored/bscan.c:281 +msgid "Could not init Bacula database\n" +msgstr "" + +#: src/stored/bscan.c:288 +#, c-format +msgid "Using Database: %s, User: %s\n" +msgstr "" + +#: src/stored/bscan.c:323 +#, c-format +msgid "Create JobMedia for Job %s\n" +msgstr "" + +#: src/stored/bscan.c:331 +#, c-format +msgid "Could not create JobMedia record for Volume=%s Job=%s\n" +msgstr "" + +#: src/stored/bscan.c:394 +#, c-format +msgid "done: %d%%\n" +msgstr "" + +#: src/stored/bscan.c:418 +msgid "Volume is prelabeled. This tape cannot be scanned.\n" +msgstr "" + +#: src/stored/bscan.c:430 +#, c-format +msgid "Pool record for %s found in DB.\n" +msgstr "" + +#: src/stored/bscan.c:434 +#, c-format +msgid "VOL_LABEL: Pool record not found for Pool: %s\n" +msgstr "" + +#: src/stored/bscan.c:440 +#, c-format +msgid "VOL_LABEL: PoolType mismatch. DB=%s Vol=%s\n" +msgstr "" + +#: src/stored/bscan.c:444 +#, c-format +msgid "Pool type \"%s\" is OK.\n" +msgstr "" + +#: src/stored/bscan.c:454 +#, c-format +msgid "Media record for %s found in DB.\n" +msgstr "" + +#: src/stored/bscan.c:461 +#, c-format +msgid "VOL_LABEL: Media record not found for Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:468 +#, c-format +msgid "VOL_LABEL: MediaType mismatch. DB=%s Vol=%s\n" +msgstr "" + +#: src/stored/bscan.c:472 +#, c-format +msgid "Media type \"%s\" is OK.\n" +msgstr "" + +#: src/stored/bscan.c:481 +#, c-format +msgid "VOL_LABEL: OK for Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:488 +#, c-format +msgid "%d \"errors\" ignored before first Start of Session record.\n" +msgstr "" + +#: src/stored/bscan.c:499 +#, c-format +msgid "SOS_LABEL: Found Job record for JobId: %d\n" +msgstr "" + +#: src/stored/bscan.c:504 +#, c-format +msgid "SOS_LABEL: Job record not found for JobId: %d\n" +msgstr "" + +#: src/stored/bscan.c:544 +#, c-format +msgid "SOS_LABEL: VolSessId mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:550 +#, c-format +msgid "SOS_LABEL: VolSessTime mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:556 +#, c-format +msgid "SOS_LABEL: PoolId mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:574 src/stored/bscan.c:1077 +#, c-format +msgid "Could not find SessId=%d SessTime=%d for EOS record.\n" +msgstr "" + +#: src/stored/bscan.c:618 +#, c-format +msgid "Could not update job record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:629 +#, c-format +msgid "End of all Volumes. VolFiles=%u VolBlocks=%u VolBytes=%s\n" +msgstr "" + +#: src/stored/bscan.c:641 +#, c-format +msgid "Could not find Job for SessId=%d SessTime=%d record.\n" +msgstr "" + +#: src/stored/bscan.c:677 +#, c-format +msgid "%s file records. At file:blk=%s:%s bytes=%s\n" +msgstr "" + +#: src/stored/bscan.c:731 +#, c-format +msgid "Got MD5 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:739 +#, c-format +msgid "Got SHA1 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:747 +#, c-format +msgid "Got SHA256 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:755 +#, c-format +msgid "Got SHA512 record: %s\n" +msgstr "" + +#: src/stored/bscan.c:763 src/stored/bscan.c:770 +msgid "Got signed digest record\n" +msgstr "" + +#: src/stored/bscan.c:776 +#, c-format +msgid "Got Prog Names Stream: %s\n" +msgstr "" + +#: src/stored/bscan.c:782 +msgid "Got Prog Data Stream record.\n" +msgstr "" + +#: src/stored/bscan.c:792 +#, c-format +msgid "Unknown stream type!!! stream=%d len=%i\n" +msgstr "" + +#: src/stored/bscan.c:856 +#, c-format +msgid "Could not create File Attributes record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:862 +#, c-format +msgid "Created File record: %s\n" +msgstr "" + +#: src/stored/bscan.c:906 +#, c-format +msgid "Could not create media record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:910 src/stored/bscan.c:931 +#, c-format +msgid "Could not update media record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:914 +#, c-format +msgid "Created Media record for Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:935 +#, c-format +msgid "Updated Media record at end of Volume: %s\n" +msgstr "" + +#: src/stored/bscan.c:952 +#, c-format +msgid "Could not create pool record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:956 +#, c-format +msgid "Created Pool record for Pool: %s\n" +msgstr "" + +#: src/stored/bscan.c:976 +#, c-format +msgid "Created Client record for Client: %s\n" +msgstr "" + +#: src/stored/bscan.c:993 +#, c-format +msgid "Fileset \"%s\" already exists.\n" +msgstr "" + +#: src/stored/bscan.c:997 +#, c-format +msgid "Could not create FileSet record \"%s\". ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1002 +#, c-format +msgid "Created FileSet record \"%s\"\n" +msgstr "" + +#: src/stored/bscan.c:1049 +#, c-format +msgid "Could not create JobId record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1055 +#, c-format +msgid "Could not update job start record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1058 +#, c-format +msgid "Created new JobId=%u record for original JobId=%u\n" +msgstr "" + +#: src/stored/bscan.c:1108 +#, c-format +msgid "Could not update JobId=%u record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1113 +#, c-format +msgid "Updated Job termination record for JobId=%u Level=%s TermStat=%c\n" +msgstr "" + +#: src/stored/bscan.c:1135 +#, c-format +msgid "Job Termination code: %d" +msgstr "" + +#: src/stored/bscan.c:1140 +#, c-format +msgid "" +"%s\n" +"JobId: %d\n" +"Job: %s\n" +"FileSet: %s\n" +"Backup Level: %s\n" +"Client: %s\n" +"Start time: %s\n" +"End time: %s\n" +"Files Written: %s\n" +"Bytes Written: %s\n" +"Volume Session Id: %d\n" +"Volume Session Time: %d\n" +"Last Volume Bytes: %s\n" +"Termination: %s\n" +"\n" +msgstr "" + +#: src/stored/bscan.c:1197 +#, c-format +msgid "Could not create JobMedia record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1201 +#, c-format +msgid "Created JobMedia record JobId %d, MediaId %d\n" +msgstr "" + +#: src/stored/bscan.c:1217 +#, c-format +msgid "Could not find SessId=%d SessTime=%d for MD5/SHA1 record.\n" +msgstr "" + +#: src/stored/bscan.c:1231 +#, c-format +msgid "Could not add MD5/SHA1 to File record. ERR=%s\n" +msgstr "" + +#: src/stored/bscan.c:1236 +msgid "Updated MD5/SHA1 record\n" +msgstr "" + +#: src/stored/btape.c:172 src/stored/stored.c:128 +#, c-format +msgid "Tape block size (%d) not multiple of system size (%d)\n" +msgstr "" + +#: src/stored/btape.c:176 src/stored/stored.c:132 +#, c-format +msgid "Tape block size (%d) is not a power of 2\n" +msgstr "" + +#: src/stored/btape.c:179 +#, c-format +msgid "" +"\n" +"\n" +"!!!! Warning large disk addressing disabled. boffset_t=%d should be 8 or " +"more !!!!!\n" +"\n" +"\n" +msgstr "" + +#: src/stored/btape.c:186 +#, c-format +msgid "32 bit printf/scanf problem. i=%d x32=%u y32=%u\n" +msgstr "" + +#: src/stored/btape.c:195 +msgid "64 bit printf/scanf problem. i=%d x64=%" +msgstr "" + +#: src/stored/btape.c:195 +msgid " y64=%" +msgstr "" + +#: src/stored/btape.c:200 +#, c-format +msgid "Tape block granularity is %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:269 +msgid "No archive name specified.\n" +msgstr "" + +#: src/stored/btape.c:273 +msgid "Improper number of arguments specified.\n" +msgstr "" + +#: src/stored/btape.c:287 +msgid "btape does not work with DVD storage.\n" +msgstr "" + +#: src/stored/btape.c:292 +msgid "btape only works with tape storage.\n" +msgstr "" + +#: src/stored/btape.c:368 +#, c-format +msgid "open device %s: OK\n" +msgstr "" + +#: src/stored/btape.c:391 +msgid "Enter Volume Name: " +msgstr "" + +#: src/stored/btape.c:398 +#, c-format +msgid "Device open failed. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:404 +#, c-format +msgid "Wrote Volume label for volume \"%s\".\n" +msgstr "" + +#: src/stored/btape.c:418 +msgid "Volume has no label.\n" +msgstr "" + +#: src/stored/btape.c:421 +msgid "Volume label read correctly.\n" +msgstr "" + +#: src/stored/btape.c:424 +#, c-format +msgid "I/O error on device: ERR=%s" +msgstr "" + +#: src/stored/btape.c:427 +msgid "Volume name error\n" +msgstr "" + +#: src/stored/btape.c:430 +#, c-format +msgid "Error creating label. ERR=%s" +msgstr "" + +#: src/stored/btape.c:433 +msgid "Volume version error.\n" +msgstr "" + +#: src/stored/btape.c:436 +msgid "Bad Volume label type.\n" +msgstr "" + +#: src/stored/btape.c:439 +msgid "Unknown error.\n" +msgstr "" + +#: src/stored/btape.c:457 +#, c-format +msgid "Bad status from load. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:459 +#, c-format +msgid "Loaded %s\n" +msgstr "" + +#: src/stored/btape.c:468 src/stored/btape.c:807 src/stored/btape.c:849 +#: src/stored/btape.c:919 src/stored/btape.c:961 src/stored/btape.c:1229 +#, c-format +msgid "Bad status from rewind. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:471 src/stored/btape.c:1237 +#, c-format +msgid "Rewound %s\n" +msgstr "" + +#: src/stored/btape.c:497 src/stored/btape.c:1241 +#, c-format +msgid "Bad status from weof. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:501 +#, c-format +msgid "Wrote 1 EOF to %s\n" +msgstr "" + +#: src/stored/btape.c:504 +#, c-format +msgid "Wrote %d EOFs to %s\n" +msgstr "" + +#: src/stored/btape.c:522 +msgid "Moved to end of medium.\n" +msgstr "" + +#: src/stored/btape.c:549 +#, c-format +msgid "Bad status from bsf. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:551 +#, c-format +msgid "Backspaced %d file%s.\n" +msgstr "" + +#: src/stored/btape.c:568 +#, c-format +msgid "Bad status from bsr. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:570 +#, c-format +msgid "Backspaced %d record%s.\n" +msgstr "" + +#: src/stored/btape.c:580 src/stored/status.c:302 +#, c-format +msgid "Configured device capabilities:\n" +msgstr "" + +#: src/stored/btape.c:598 +#, c-format +msgid "Device status:\n" +msgstr "" + +#: src/stored/btape.c:612 src/stored/status.c:341 +#, c-format +msgid "Device parameters:\n" +msgstr "" + +#: src/stored/btape.c:617 +#, c-format +msgid "Status:\n" +msgstr "" + +#: src/stored/btape.c:632 +msgid "" +"Test writting larger and larger records.\n" +"This is a torture test for records.\n" +"I am going to write\n" +"larger and larger records. It will stop when the record size\n" +"plus the header exceeds the block size (by default about 64K)\n" +msgstr "" + +#: src/stored/btape.c:638 +msgid "Do you want to continue? (y/n): " +msgstr "" + +#: src/stored/btape.c:640 src/stored/btape.c:1857 +msgid "Command aborted.\n" +msgstr "" + +#: src/stored/btape.c:656 +#, c-format +msgid "Block %d i=%d\n" +msgstr "" + +#: src/stored/btape.c:682 +msgid "Skipping read backwards test because BSR turned off.\n" +msgstr "" + +#: src/stored/btape.c:686 +msgid "" +"\n" +"=== Write, backup, and re-read test ===\n" +"\n" +"I'm going to write three records and an EOF\n" +"then backup over the EOF and re-read the last record.\n" +"Bacula does this after writing the last block on the\n" +"tape to verify that the block was written correctly.\n" +"\n" +"This is not an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:699 src/stored/btape.c:710 src/stored/btape.c:721 +#: src/stored/btape.c:819 src/stored/btape.c:835 src/stored/btape.c:931 +#: src/stored/btape.c:947 src/stored/btape.c:1566 src/stored/btape.c:2451 +msgid "Error writing record to block.\n" +msgstr "" + +#: src/stored/btape.c:703 src/stored/btape.c:714 src/stored/btape.c:725 +#: src/stored/btape.c:823 src/stored/btape.c:839 src/stored/btape.c:935 +#: src/stored/btape.c:951 src/stored/btape.c:1570 src/stored/btape.c:2455 +msgid "Error writing block to device.\n" +msgstr "" + +#: src/stored/btape.c:706 +#, c-format +msgid "Wrote first record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:717 +#, c-format +msgid "Wrote second record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:728 +#, c-format +msgid "Wrote third record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:735 src/stored/btape.c:740 +#, c-format +msgid "Backspace file failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:744 +msgid "Backspaced over EOF OK.\n" +msgstr "" + +#: src/stored/btape.c:746 +#, c-format +msgid "Backspace record failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:749 +msgid "Backspace record OK.\n" +msgstr "" + +#: src/stored/btape.c:752 src/stored/btape.c:758 +#, c-format +msgid "Read block failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:763 +msgid "Bad data in record. Test failed!\n" +msgstr "" + +#: src/stored/btape.c:767 +msgid "" +"\n" +"Block re-read correct. Test succeeded!\n" +msgstr "" + +#: src/stored/btape.c:768 +msgid "" +"=== End Write, backup, and re-read test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:775 +msgid "" +"This is not terribly serious since Bacula only uses\n" +"this function to verify the last block written to the\n" +"tape. Bacula will skip the last block verification\n" +"if you add:\n" +"\n" +"Backward Space Record = No\n" +"\n" +"to your Storage daemon's Device resource definition.\n" +msgstr "" + +#: src/stored/btape.c:799 +msgid "" +"\n" +"=== Write, rewind, and re-read test ===\n" +"\n" +"I'm going to write 1000 records and an EOF\n" +"then write 1000 records and an EOF, then rewind,\n" +"and re-read the data to verify that it is correct.\n" +"\n" +"This is an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:827 src/stored/btape.c:843 src/stored/btape.c:939 +#: src/stored/btape.c:955 +#, c-format +msgid "Wrote 1000 blocks of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:852 src/stored/btape.c:964 +msgid "Rewind OK.\n" +msgstr "" + +#: src/stored/btape.c:859 src/stored/btape.c:1013 +msgid "Got EOF on tape.\n" +msgstr "" + +#: src/stored/btape.c:864 +#, c-format +msgid "Read block %d failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:870 +#, c-format +msgid "Read record failed. Block %d! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:876 src/stored/btape.c:1043 +#, c-format +msgid "Bad data in record. Expected %d, got %d at byte %d. Test failed!\n" +msgstr "" + +#: src/stored/btape.c:883 +msgid "1000 blocks re-read correctly.\n" +msgstr "" + +#: src/stored/btape.c:886 src/stored/btape.c:1050 +msgid "" +"=== Test Succeeded. End Write, rewind, and re-read test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:911 +msgid "" +"\n" +"=== Write, rewind, and position test ===\n" +"\n" +"I'm going to write 1000 records and an EOF\n" +"then write 1000 records and an EOF, then rewind,\n" +"and position to a few blocks and verify that it is correct.\n" +"\n" +"This is an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1004 +#, c-format +msgid "Reposition to file:block %d:%d\n" +msgstr "" + +#: src/stored/btape.c:1006 +msgid "Reposition error.\n" +msgstr "" + +#: src/stored/btape.c:1019 +#, c-format +msgid "" +"Read block %d failed! file=%d blk=%d. ERR=%s\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1021 +msgid "" +"This may be because the tape drive block size is not\n" +" set to variable blocking as normally used by Bacula.\n" +" Please see the Tape Testing chapter in the manual and \n" +" look for using mt with defblksize and setoptions\n" +"If your tape drive block size is correct, then perhaps\n" +" your SCSI driver is *really* stupid and does not\n" +" correctly report the file:block after a FSF. In this\n" +" case try setting:\n" +" Fast Forward Space File = no\n" +" in your Device resource.\n" +msgstr "" + +#: src/stored/btape.c:1037 +#, c-format +msgid "Read record failed! ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1048 +#, c-format +msgid "Block %d re-read correctly.\n" +msgstr "" + +#: src/stored/btape.c:1069 +msgid "" +"\n" +"\n" +"=== Append files test ===\n" +"\n" +"This test is essential to Bacula.\n" +"\n" +"I'm going to write one record in file 0,\n" +" two records in file 1,\n" +" and three records in file 2\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1093 +msgid "Now moving to end of medium.\n" +msgstr "" + +#: src/stored/btape.c:1095 src/stored/btape.c:1324 +#, c-format +msgid "We should be in file 3. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1096 src/stored/btape.c:1114 src/stored/btape.c:1313 +#: src/stored/btape.c:1325 src/stored/btape.c:1338 src/stored/btape.c:1355 +msgid "This is correct!" +msgstr "" + +#: src/stored/btape.c:1096 src/stored/btape.c:1114 src/stored/btape.c:1313 +#: src/stored/btape.c:1325 src/stored/btape.c:1338 src/stored/btape.c:1355 +msgid "This is NOT correct!!!!" +msgstr "" + +#: src/stored/btape.c:1102 +msgid "" +"\n" +"Now the important part, I am going to attempt to append to the tape.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1109 +msgid "" +"Done appending, there should be no I/O errors\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1110 +msgid "Doing Bacula scan of blocks:\n" +msgstr "" + +#: src/stored/btape.c:1112 +msgid "End scanning the tape.\n" +msgstr "" + +#: src/stored/btape.c:1113 src/stored/btape.c:1337 +#, c-format +msgid "We should be in file 4. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1138 +msgid "" +"\n" +"Autochanger enabled, but no name or no command device specified.\n" +msgstr "" + +#: src/stored/btape.c:1142 +msgid "" +"\n" +"Ah, I see you have an autochanger configured.\n" +"To test the autochanger you must have a blank tape\n" +" that I can write on in Slot 1.\n" +msgstr "" + +#: src/stored/btape.c:1145 +msgid "" +"\n" +"Do you wish to continue with the Autochanger test? (y/n): " +msgstr "" + +#: src/stored/btape.c:1152 +msgid "" +"\n" +"\n" +"=== Autochanger test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1161 +msgid "3301 Issuing autochanger \"loaded\" command.\n" +msgstr "" + +#: src/stored/btape.c:1170 +#, c-format +msgid "3991 Bad autochanger command: %s\n" +msgstr "" + +#: src/stored/btape.c:1171 +#, c-format +msgid "3991 result=\"%s\": ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1175 +#, c-format +msgid "Slot %d loaded. I am going to unload it.\n" +msgstr "" + +#: src/stored/btape.c:1177 +msgid "Nothing loaded in the drive. OK.\n" +msgstr "" + +#: src/stored/btape.c:1184 +#, c-format +msgid "3302 Issuing autochanger \"unload %d %d\" command.\n" +msgstr "" + +#: src/stored/btape.c:1189 +#, c-format +msgid "unload status=%s %d\n" +msgstr "" + +#: src/stored/btape.c:1189 +msgid "Bad" +msgstr "" + +#: src/stored/btape.c:1192 +#, c-format +msgid "3992 Bad autochanger command: %s\n" +msgstr "" + +#: src/stored/btape.c:1193 +#, c-format +msgid "3992 result=\"%s\": ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1203 +#, c-format +msgid "3303 Issuing autochanger \"load %d %d\" command.\n" +msgstr "" + +#: src/stored/btape.c:1211 +#, c-format +msgid "3303 Autochanger \"load %d %d\" status is OK.\n" +msgstr "" + +#: src/stored/btape.c:1215 +#, c-format +msgid "3993 Bad autochanger command: %s\n" +msgstr "" + +#: src/stored/btape.c:1216 +#, c-format +msgid "3993 result=\"%s\": ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1231 +msgid "" +"\n" +"The test failed, probably because you need to put\n" +"a longer sleep time in the mtx-script in the load) case.\n" +"Adding a 30 second sleep and trying again ...\n" +msgstr "" + +#: src/stored/btape.c:1244 +#, c-format +msgid "Wrote EOF to %s\n" +msgstr "" + +#: src/stored/btape.c:1248 +#, c-format +msgid "" +"\n" +"The test worked this time. Please add:\n" +"\n" +" sleep %d\n" +"\n" +"to your mtx-changer script in the load) case.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1253 +msgid "" +"\n" +"The test autochanger worked!!\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1264 +msgid "You must correct this error or the Autochanger will not work.\n" +msgstr "" + +#: src/stored/btape.c:1282 +msgid "" +"\n" +"\n" +"=== Forward space files test ===\n" +"\n" +"This test is essential to Bacula.\n" +"\n" +"I'm going to write five files then test forward spacing\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1307 +msgid "Now forward spacing 1 file.\n" +msgstr "" + +#: src/stored/btape.c:1309 src/stored/btape.c:1321 src/stored/btape.c:1334 +#: src/stored/btape.c:1352 src/stored/btape.c:1521 +#, c-format +msgid "Bad status from fsr. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1312 +#, c-format +msgid "We should be in file 1. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1319 +msgid "Now forward spacing 2 files.\n" +msgstr "" + +#: src/stored/btape.c:1332 +msgid "Now forward spacing 4 files.\n" +msgstr "" + +#: src/stored/btape.c:1344 +msgid "" +"The test worked this time. Please add:\n" +"\n" +" Fast Forward Space File = no\n" +"\n" +"to your Device resource for this drive.\n" +msgstr "" + +#: src/stored/btape.c:1350 +msgid "Now forward spacing 1 more file.\n" +msgstr "" + +#: src/stored/btape.c:1354 +#, c-format +msgid "We should be in file 5. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1359 +msgid "" +"\n" +"=== End Forward space files test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1363 +msgid "" +"\n" +"The forward space file test failed.\n" +msgstr "" + +#: src/stored/btape.c:1365 +msgid "" +"You have Fast Forward Space File enabled.\n" +"I am turning it off then retrying the test.\n" +msgstr "" + +#: src/stored/btape.c:1371 +msgid "" +"You must correct this error or Bacula will not work.\n" +"Some systems, e.g. OpenBSD, require you to set\n" +" Use MTIOCGET= no\n" +"in your device resource. Use with caution.\n" +msgstr "" + +#: src/stored/btape.c:1403 +msgid "" +"\n" +"Append test failed. Attempting again.\n" +"Setting \"Hardware End of Medium = no\n" +" and \"Fast Forward Space File = no\n" +"and retrying append test.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1411 +msgid "" +"\n" +"\n" +"It looks like the test worked this time, please add:\n" +"\n" +" Hardware End of Medium = No\n" +"\n" +" Fast Forward Space File = No\n" +"to your Device resource in the Storage conf file.\n" +msgstr "" + +#: src/stored/btape.c:1418 +msgid "" +"\n" +"\n" +"That appears *NOT* to have corrected the problem.\n" +msgstr "" + +#: src/stored/btape.c:1423 +msgid "" +"\n" +"\n" +"It looks like the append failed. Attempting again.\n" +"Setting \"BSF at EOM = yes\" and retrying append test.\n" +msgstr "" + +#: src/stored/btape.c:1428 +msgid "" +"\n" +"\n" +"It looks like the test worked this time, please add:\n" +"\n" +" Hardware End of Medium = No\n" +" Fast Forward Space File = No\n" +" BSF at EOM = yes\n" +"\n" +"to your Device resource in the Storage conf file.\n" +msgstr "" + +#: src/stored/btape.c:1439 +msgid "" +"\n" +"Append test failed.\n" +"\n" +"\n" +"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +"Unable to correct the problem. You MUST fix this\n" +"problem before Bacula can use your tape drive correctly\n" +"\n" +"Perhaps running Bacula in fixed block mode will work.\n" +"Do so by setting:\n" +"\n" +"Minimum Block Size = nnn\n" +"Maximum Block Size = nnn\n" +"\n" +"in your Storage daemon's Device definition.\n" +"nnn must match your tape driver's block size, which\n" +"can be determined by reading your tape manufacturers\n" +"information, and the information on your kernel dirver.\n" +"Fixed block sizes, however, are not normally an ideal solution.\n" +"\n" +"Some systems, e.g. OpenBSD, require you to set\n" +" Use MTIOCGET= no\n" +"in your device resource. Use with caution.\n" +msgstr "" + +#: src/stored/btape.c:1460 +msgid "" +"\n" +"The above Bacula scan should have output identical to what follows.\n" +"Please double check it ...\n" +"=== Sample correct output ===\n" +"1 block of 64448 bytes in file 1\n" +"End of File mark.\n" +"2 blocks of 64448 bytes in file 2\n" +"End of File mark.\n" +"3 blocks of 64448 bytes in file 3\n" +"End of File mark.\n" +"1 block of 64448 bytes in file 4\n" +"End of File mark.\n" +"Total files=4, blocks=7, bytes = 451,136\n" +"=== End sample correct output ===\n" +"\n" +"If the above scan output is not identical to the\n" +"sample output, you MUST correct the problem\n" +"or Bacula will not be able to write multiple Jobs to \n" +"the tape.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1499 +#, c-format +msgid "Bad status from fsf. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1503 +msgid "Forward spaced 1 file.\n" +msgstr "" + +#: src/stored/btape.c:1506 +#, c-format +msgid "Forward spaced %d files.\n" +msgstr "" + +#: src/stored/btape.c:1525 +msgid "Forward spaced 1 record.\n" +msgstr "" + +#: src/stored/btape.c:1528 +#, c-format +msgid "Forward spaced %d records.\n" +msgstr "" + +#: src/stored/btape.c:1573 +#, c-format +msgid "Wrote one record of %d bytes.\n" +msgstr "" + +#: src/stored/btape.c:1575 +msgid "Wrote block to device.\n" +msgstr "" + +#: src/stored/btape.c:1590 +msgid "Enter length to read: " +msgstr "" + +#: src/stored/btape.c:1595 +msgid "Bad length entered, using default of 1024 bytes.\n" +msgstr "" + +#: src/stored/btape.c:1604 +#, c-format +msgid "Read of %d bytes gives stat=%d. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1627 src/stored/btape.c:1676 +#, c-format +msgid "End of tape\n" +msgstr "" + +#: src/stored/btape.c:1632 +#, c-format +msgid "Starting scan at file %u\n" +msgstr "" + +#: src/stored/btape.c:1637 src/stored/dev.c:1314 +#, c-format +msgid "read error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/btape.c:1639 +#, c-format +msgid "Bad status from read %d. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1642 src/stored/btape.c:1656 src/stored/btape.c:1720 +#: src/stored/btape.c:1732 src/stored/btape.c:1745 src/stored/btape.c:1761 +#, c-format +msgid "1 block of %d bytes in file %d\n" +msgstr "" + +#: src/stored/btape.c:1645 src/stored/btape.c:1659 src/stored/btape.c:1723 +#: src/stored/btape.c:1735 src/stored/btape.c:1748 src/stored/btape.c:1764 +#, c-format +msgid "%d blocks of %d bytes in file %d\n" +msgstr "" + +#: src/stored/btape.c:1667 src/stored/btape.c:1739 +#, c-format +msgid "End of File mark.\n" +msgstr "" + +#: src/stored/btape.c:1688 src/stored/btape.c:1792 +#, c-format +msgid "Total files=%d, blocks=%d, bytes = %s\n" +msgstr "" + +#: src/stored/btape.c:1752 +#, c-format +msgid "Short block read.\n" +msgstr "" + +#: src/stored/btape.c:1755 +#, c-format +msgid "Error reading block. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1779 +#, c-format +msgid "" +"Blk_block: %u dev_blk=%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%" +"s rlen=%d\n" +msgstr "" + +#: src/stored/btape.c:1801 +#, c-format +msgid "Device status: %u. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:1831 +msgid "" +"\n" +"This command simulates Bacula writing to a tape.\n" +"It requires either one or two blank tapes, which it\n" +"will label and write.\n" +"\n" +"If you have an autochanger configured, it will use\n" +"the tapes that are in slots 1 and 2, otherwise, you will\n" +"be prompted to insert the tapes when necessary.\n" +"\n" +"It will print a status approximately\n" +"every 322 MB, and write an EOF every 3.2 GB. If you have\n" +"selected the simple test option, after writing the first tape\n" +"it will rewind it and re-read the last block written.\n" +"\n" +"If you have selected the multiple tape test, when the first tape\n" +"fills, it will ask for a second, and after writing a few more \n" +"blocks, it will stop. Then it will begin re-reading the\n" +"two tapes.\n" +"\n" +"This may take a long time -- hours! ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1848 +msgid "" +"Do you want to run the simplified test (s) with one tape\n" +"or the complete multiple tape (m) test: (s/m) " +msgstr "" + +#: src/stored/btape.c:1851 +msgid "Simple test (single tape) selected.\n" +msgstr "" + +#: src/stored/btape.c:1854 +msgid "Multiple tape test selected.\n" +msgstr "" + +#: src/stored/btape.c:1893 +msgid "Wrote Start of Session label.\n" +msgstr "" + +#: src/stored/btape.c:1924 +#, c-format +msgid "%s Begin writing Bacula records to tape ...\n" +msgstr "" + +#: src/stored/btape.c:1926 +#, c-format +msgid "%s Begin writing Bacula records to first tape ...\n" +msgstr "" + +#: src/stored/btape.c:1967 +#, c-format +msgid "Wrote blk_block=%u, dev_blk_num=%u VolBytes=%s rate=%.1f KB/s\n" +msgstr "" + +#: src/stored/btape.c:1977 +#, c-format +msgid "%s Flush block, write EOF\n" +msgstr "" + +#: src/stored/btape.c:1988 +msgid "Not OK\n" +msgstr "" + +#: src/stored/btape.c:2018 +msgid "Set ok=false after write_block_to_device.\n" +msgstr "" + +#: src/stored/btape.c:2021 +msgid "Wrote End of Session label.\n" +msgstr "" + +#: src/stored/btape.c:2045 +#, c-format +msgid "Wrote state file last_block_num1=%d last_block_num2=%d\n" +msgstr "" + +#: src/stored/btape.c:2049 +#, c-format +msgid "Could not create state file: %s ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2057 +#, c-format +msgid "" +"\n" +"\n" +"%s Done filling tape at %d:%d. Now beginning re-read of tape ...\n" +msgstr "" + +#: src/stored/btape.c:2061 +#, c-format +msgid "" +"\n" +"\n" +"%s Done filling tapes at %d:%d. Now beginning re-read of first tape ...\n" +msgstr "" + +#: src/stored/btape.c:2100 +msgid "" +"\n" +"The state file level has changed. You must redo\n" +"the fill command.\n" +msgstr "" + +#: src/stored/btape.c:2106 +#, c-format +msgid "" +"\n" +"Could not find the state file: %s ERR=%s\n" +"You must redo the fill command.\n" +msgstr "" + +#: src/stored/btape.c:2162 +msgid "Mount first tape. Press enter when ready: " +msgstr "" + +#: src/stored/btape.c:2177 +msgid "Rewinding.\n" +msgstr "" + +#: src/stored/btape.c:2182 +#, c-format +msgid "Reading the first 10000 records from %u:%u.\n" +msgstr "" + +#: src/stored/btape.c:2186 src/stored/btape.c:2251 +#, c-format +msgid "Reposition from %u:%u to %u:%u\n" +msgstr "" + +#: src/stored/btape.c:2189 src/stored/btape.c:2238 src/stored/btape.c:2254 +#, c-format +msgid "Reposition error. ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2192 +#, c-format +msgid "Reading block %u.\n" +msgstr "" + +#: src/stored/btape.c:2194 src/stored/btape.c:2243 src/stored/btape.c:2259 +#, c-format +msgid "Error reading block: ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2199 +msgid "" +"\n" +"The last block on the tape matches. Test succeeded.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2201 +msgid "" +"\n" +"The last block of the first tape matches.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2224 +msgid "Mount second tape. Press enter when ready: " +msgstr "" + +#: src/stored/btape.c:2236 +#, c-format +msgid "Reposition from %u:%u to 0:1\n" +msgstr "" + +#: src/stored/btape.c:2241 src/stored/btape.c:2257 +#, c-format +msgid "Reading block %d.\n" +msgstr "" + +#: src/stored/btape.c:2247 +msgid "" +"\n" +"The first block on the second tape matches.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2263 +msgid "" +"\n" +"The last block on the second tape matches. Test succeeded.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2278 +#, c-format +msgid "10000 records read now at %d:%d\n" +msgstr "" + +#: src/stored/btape.c:2301 src/stored/btape.c:2312 src/stored/btape.c:2360 +msgid "Last block written" +msgstr "" + +#: src/stored/btape.c:2303 src/stored/btape.c:2313 +msgid "Block read back" +msgstr "" + +#: src/stored/btape.c:2304 +#, c-format +msgid "" +"\n" +"\n" +"The blocks differ at byte %u\n" +msgstr "" + +#: src/stored/btape.c:2305 +msgid "" +"\n" +"\n" +"!!!! The last block written and the block\n" +"that was read back differ. The test FAILED !!!!\n" +"This must be corrected before you use Bacula\n" +"to write multi-tape Volumes.!!!!\n" +msgstr "" + +#: src/stored/btape.c:2344 +#, c-format +msgid "Last block at: %u:%u this_dev_block_num=%d\n" +msgstr "" + +#: src/stored/btape.c:2358 +#, c-format +msgid "Block not written: FileIndex=%u blk_block=%u Size=%u\n" +msgstr "" + +#: src/stored/btape.c:2362 +msgid "Block not written" +msgstr "" + +#: src/stored/btape.c:2377 +#, c-format +msgid "End of tape %d:%d. VolumeCapacity=%s. Write rate = %.1f KB/s\n" +msgstr "" + +#: src/stored/btape.c:2427 +msgid "Test writing blocks of 64512 bytes to tape.\n" +msgstr "" + +#: src/stored/btape.c:2429 +msgid "How many blocks do you want to write? (1000): " +msgstr "" + +#: src/stored/btape.c:2444 +#, c-format +msgid "Begin writing %d Bacula blocks to tape ...\n" +msgstr "" + +#: src/stored/btape.c:2496 +#, c-format +msgid "Begin writing raw blocks of %u bytes.\n" +msgstr "" + +#: src/stored/btape.c:2520 +#, c-format +msgid "Write failed at block %u. stat=%d ERR=%s\n" +msgstr "" + +#: src/stored/btape.c:2529 +msgid "test autochanger" +msgstr "" + +#: src/stored/btape.c:2530 +msgid "backspace file" +msgstr "" + +#: src/stored/btape.c:2531 +msgid "backspace record" +msgstr "" + +#: src/stored/btape.c:2532 +msgid "list device capabilities" +msgstr "" + +#: src/stored/btape.c:2533 +msgid "clear tape errors" +msgstr "" + +#: src/stored/btape.c:2534 +msgid "go to end of Bacula data for append" +msgstr "" + +#: src/stored/btape.c:2535 +msgid "go to the physical end of medium" +msgstr "" + +#: src/stored/btape.c:2536 +msgid "fill tape, write onto second volume" +msgstr "" + +#: src/stored/btape.c:2537 +msgid "read filled tape" +msgstr "" + +#: src/stored/btape.c:2538 +msgid "forward space a file" +msgstr "" + +#: src/stored/btape.c:2539 +msgid "forward space a record" +msgstr "" + +#: src/stored/btape.c:2541 +msgid "write a Bacula label to the tape" +msgstr "" + +#: src/stored/btape.c:2542 +msgid "load a tape" +msgstr "" + +#: src/stored/btape.c:2543 +msgid "quit btape" +msgstr "" + +#: src/stored/btape.c:2544 +msgid "use write() to fill tape" +msgstr "" + +#: src/stored/btape.c:2545 +msgid "read and print the Bacula tape label" +msgstr "" + +#: src/stored/btape.c:2546 +msgid "test record handling functions" +msgstr "" + +#: src/stored/btape.c:2547 +msgid "rewind the tape" +msgstr "" + +#: src/stored/btape.c:2548 +msgid "read() tape block by block to EOT and report" +msgstr "" + +#: src/stored/btape.c:2549 +msgid "Bacula read block by block to EOT and report" +msgstr "" + +#: src/stored/btape.c:2550 +msgid "print tape status" +msgstr "" + +#: src/stored/btape.c:2551 +msgid "General test Bacula tape functions" +msgstr "" + +#: src/stored/btape.c:2552 +msgid "write an EOF on the tape" +msgstr "" + +#: src/stored/btape.c:2553 +msgid "write a single Bacula block" +msgstr "" + +#: src/stored/btape.c:2554 +msgid "read a single record" +msgstr "" + +#: src/stored/btape.c:2555 +msgid "read a single Bacula block" +msgstr "" + +#: src/stored/btape.c:2556 +msgid "quick fill command" +msgstr "" + +#: src/stored/btape.c:2577 +#, c-format +msgid "\"%s\" is an invalid command\n" +msgstr "" + +#: src/stored/btape.c:2586 +#, c-format +msgid "Interactive commands:\n" +msgstr "" + +#: src/stored/btape.c:2597 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: btape \n" +" -b specify bootstrap file\n" +" -c set configuration file to file\n" +" -d set debug level to nn\n" +" -p proceed inspite of I/O errors\n" +" -s turn off signals\n" +" -v be verbose\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2682 +#, c-format +msgid "Mount second Volume on device %s and press return when ready: " +msgstr "" + +#: src/stored/btape.c:2709 +#, c-format +msgid "Mount blank Volume on device %s and press return when ready: " +msgstr "" + +#: src/stored/btape.c:2727 +#, c-format +msgid "End of Volume \"%s\" %d records.\n" +msgstr "" + +#: src/stored/btape.c:2740 +#, c-format +msgid "Read block=%u, VolBytes=%s rate=%.1f KB/s\n" +msgstr "" + +#: src/stored/btape.c:2752 src/stored/mount.c:627 +#, c-format +msgid "Cannot open Dev=%s, Vol=%s\n" +msgstr "" + +#: src/stored/butil.c:59 +msgid "Nohdr," +msgstr "" + +#: src/stored/butil.c:62 +msgid "partial," +msgstr "" + +#: src/stored/butil.c:65 +msgid "empty," +msgstr "" + +#: src/stored/butil.c:68 +msgid "Nomatch," +msgstr "" + +#: src/stored/butil.c:71 +msgid "cont," +msgstr "" + +#: src/stored/butil.c:147 +msgid "Volume name or names is too long. Please use a .bsr file.\n" +msgstr "" + +#: src/stored/butil.c:167 +#, c-format +msgid "Cannot find device \"%s\" in config file %s.\n" +msgstr "" + +#: src/stored/butil.c:174 +#, c-format +msgid "Cannot init device %s\n" +msgstr "" + +#: src/stored/butil.c:194 +#, c-format +msgid "Cannot open %s\n" +msgstr "" + +#: src/stored/butil.c:277 +#, c-format +msgid "Could not find device \"%s\" in config file %s.\n" +msgstr "" + +#: src/stored/butil.c:282 +#, c-format +msgid "Using device: \"%s\" for reading.\n" +msgstr "" + +#: src/stored/butil.c:285 +#, c-format +msgid "Using device: \"%s\" for writing.\n" +msgstr "" + +#: src/stored/butil.c:301 +msgid "Unexpected End of Data\n" +msgstr "" + +#: src/stored/butil.c:303 +msgid "Unexpected End of Tape\n" +msgstr "" + +#: src/stored/butil.c:305 +msgid "Unexpected End of File\n" +msgstr "" + +#: src/stored/butil.c:307 +msgid "Tape Door is Open\n" +msgstr "" + +#: src/stored/butil.c:309 +msgid "Unexpected Tape is Off-line\n" +msgstr "" + +#: src/stored/dev.c:119 +#, c-format +msgid "Unable to stat device %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:130 +#, c-format +msgid "" +"%s is an unknown device type. Must be tape or directory\n" +" or have RequiresMount=yes for DVD. st_mode=%x\n" +msgstr "" + +#: src/stored/dev.c:189 +#, c-format +msgid "Unable to stat mount point %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:195 +msgid "" +"Mount and unmount commands must defined for a device which requires mount.\n" +msgstr "" + +#: src/stored/dev.c:198 +msgid "Write part command must be defined for a device which requires mount.\n" +msgstr "" + +#: src/stored/dev.c:203 +#, c-format +msgid "Block size %u on device %s is too large, using default %u\n" +msgstr "" + +#: src/stored/dev.c:208 +#, c-format +msgid "Max block size %u not multiple of device %s block size.\n" +msgstr "" + +#: src/stored/dev.c:224 src/stored/dev.c:230 +#, c-format +msgid "Unable to init cond variable: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:326 +msgid "Illegal mode given to open dev.\n" +msgstr "" + +#: src/stored/dev.c:421 src/stored/device.c:325 +#, c-format +msgid "Unable to open device %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:456 +#, c-format +msgid "Could not open file device %s. No Volume name given.\n" +msgstr "" + +#: src/stored/dev.c:479 src/stored/dev.c:647 +#, c-format +msgid "Could not open: %s, ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:523 +#, c-format +msgid "Could not open DVD device %s. No Volume name given.\n" +msgstr "" + +#: src/stored/dev.c:572 +#, c-format +msgid "The DVD in device %s contains data, please blank it before writing.\n" +msgstr "" + +#: src/stored/dev.c:593 +#, c-format +msgid "Unable to stat DVD part 1 file %s: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:601 +#, c-format +msgid "DVD part 1 is not a regular file %s.\n" +msgstr "" + +#: src/stored/dev.c:621 +#, c-format +msgid "There is no valid DVD in device %s.\n" +msgstr "" + +#: src/stored/dev.c:627 +#, c-format +msgid "Could not mount DVD device %s.\n" +msgstr "" + +#: src/stored/dev.c:677 +#, c-format +msgid "Could not fstat: %s, ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:711 +#, c-format +msgid "Bad call to rewind. Device %s not open\n" +msgstr "" + +#: src/stored/dev.c:750 +#, c-format +msgid "No tape loaded or drive offline on %s.\n" +msgstr "" + +#: src/stored/dev.c:760 +#, c-format +msgid "Rewind error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:770 src/stored/dev.c:848 src/stored/dev.c:985 +#: src/stored/dev.c:1561 +#, c-format +msgid "lseek error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:819 +#, c-format +msgid "Bad call to eod. Device %s not open\n" +msgstr "" + +#: src/stored/dev.c:884 +#, c-format +msgid "ioctl MTEOM error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:893 src/stored/dev.c:1026 +#, c-format +msgid "ioctl MTIOCGET error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:971 +msgid "Bad device call. Device not open\n" +msgstr "" + +#: src/stored/dev.c:984 +#, c-format +msgid "Seek error: ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:1021 +msgid " Bacula status:" +msgstr "" + +#: src/stored/dev.c:1022 src/stored/dev.c:1105 src/stored/dev.c:1107 +#, c-format +msgid " file=%d block=%d\n" +msgstr "" + +#: src/stored/dev.c:1030 +msgid " Device status:" +msgstr "" + +#: src/stored/dev.c:1129 +msgid "Bad call to load_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1140 src/stored/dev.c:1153 +#, c-format +msgid "ioctl MTLOAD error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1184 +#, c-format +msgid "ioctl MTOFFL error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1225 +msgid "Bad call to fsf. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1236 src/stored/dev.c:1363 +#, c-format +msgid "Device %s at End of Tape.\n" +msgstr "" + +#: src/stored/dev.c:1267 src/stored/dev.c:1343 +#, c-format +msgid "ioctl MTFSF error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1392 +msgid "Bad call to bsf. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1398 +#, c-format +msgid "Device %s cannot BSF because it is not a tape.\n" +msgstr "" + +#: src/stored/dev.c:1415 +#, c-format +msgid "ioctl MTBSF error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1434 +msgid "Bad call to fsr. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1444 +#, c-format +msgid "ioctl MTFSR not permitted on %s.\n" +msgstr "" + +#: src/stored/dev.c:1472 +#, c-format +msgid "ioctl MTFSR %d error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1490 +msgid "Bad call to bsr_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1500 +#, c-format +msgid "ioctl MTBSR not permitted on %s.\n" +msgstr "" + +#: src/stored/dev.c:1514 +#, c-format +msgid "ioctl MTBSR error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1550 +msgid "Bad call to reposition. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1629 +msgid "Bad call to weof_dev. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1639 +msgid "Attempt to WEOF on non-appendable Volume\n" +msgstr "" + +#: src/stored/dev.c:1657 +#, c-format +msgid "ioctl MTWEOF error on %s. ERR=%s.\n" +msgstr "" + +#: src/stored/dev.c:1756 +#, c-format +msgid "unknown func code %d" +msgstr "" + +#: src/stored/dev.c:1762 +#, c-format +msgid "I/O function \"%s\" not supported on this device.\n" +msgstr "" + +#: src/stored/dev.c:1917 +#, c-format +msgid "Unable to truncate device %s. ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:2016 src/stored/dev.c:2021 +#, c-format +msgid "Device %s cannot be %smounted. ERR=%s\n" +msgstr "" + +#: src/stored/dev.c:2429 +#, c-format +msgid "Unable to set eotmodel on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/device.c:120 +#, c-format +msgid "End of medium on Volume \"%s\" Bytes=%s Blocks=%s at %s.\n" +msgstr "" + +#: src/stored/device.c:136 +#, c-format +msgid "New volume \"%s\" mounted on device %s at %s.\n" +msgstr "" + +#: src/stored/device.c:148 +#, c-format +msgid "write_block_to_device Volume label failed. ERR=%s" +msgstr "" + +#: src/stored/device.c:183 +#, c-format +msgid "write_block_to_device overflow block failed. ERR=%s" +msgstr "" + +#: src/stored/device.c:327 +#, c-format +msgid "Unable to open archive %s: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:154 +msgid "Connection request failed.\n" +msgstr "" + +#: src/stored/dircmd.c:163 +#, c-format +msgid "Invalid connection. Len=%d\n" +msgstr "" + +#: src/stored/dircmd.c:274 +#, c-format +msgid "3991 Bad setdebug command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:295 +#, c-format +msgid "3904 Job %s not found.\n" +msgstr "" + +#: src/stored/dircmd.c:321 +#, c-format +msgid "Job %s marked to be canceled.\n" +msgstr "" + +#: src/stored/dircmd.c:322 +#, c-format +msgid "3000 Job %s marked to be canceled.\n" +msgstr "" + +#: src/stored/dircmd.c:326 +msgid "3903 Error scanning cancel command.\n" +msgstr "" + +#: src/stored/dircmd.c:402 src/stored/dircmd.c:758 src/stored/dircmd.c:848 +#: src/stored/dircmd.c:920 src/stored/dircmd.c:983 src/stored/dircmd.c:1026 +#, c-format +msgid "3999 Device \"%s\" not found or could not be opened.\n" +msgstr "" + +#: src/stored/dircmd.c:407 +#, c-format +msgid "3903 Error scanning label command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:457 +#, c-format +msgid "3910 Unable to open device %s: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:474 +#, c-format +msgid "3920 Cannot label Volume because it is already labeled: \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:481 +msgid "3921 Wrong volume mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:485 +msgid "3922 Cannot relabel an ANSI/IBM labeled Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:493 +#, c-format +msgid "3912 Failed to label Volume: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:503 +#, c-format +msgid "3914 Failed to label Volume (no media): ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:506 +#, c-format +msgid "3913 Cannot label Volume. Unknown status %d from read_volume_label()\n" +msgstr "" + +#: src/stored/dircmd.c:539 +#, c-format +msgid "3001 Mounted Volume: %s\n" +msgstr "" + +#: src/stored/dircmd.c:543 src/stored/dircmd.c:1062 +#, c-format +msgid "" +"3902 Cannot mount Volume on Storage Device %s because:\n" +"%s" +msgstr "" + +#: src/stored/dircmd.c:571 src/stored/reserve.c:1073 +#, c-format +msgid "" +"\n" +" Device \"%s\" requested by DIR could not be opened or does not exist.\n" +msgstr "" + +#: src/stored/dircmd.c:593 src/stored/reserve.c:1069 +#, c-format +msgid "" +"\n" +" Device \"%s\" in changer \"%s\" requested by DIR could not be opened or " +"does not exist.\n" +msgstr "" + +#: src/stored/dircmd.c:668 src/stored/dircmd.c:725 +#, c-format +msgid "3901 open device failed: ERR=%s\n" +msgstr "" + +#: src/stored/dircmd.c:688 src/stored/dircmd.c:716 +#, c-format +msgid "3001 Device %s is mounted with Volume \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:691 src/stored/dircmd.c:719 src/stored/dircmd.c:734 +#, c-format +msgid "" +"3905 Device %s open but no Bacula volume is mounted.\n" +"If this is not a blank tape, try unmounting and remounting the Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:701 +#, c-format +msgid "3001 Device %s is doing acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:706 src/stored/dircmd.c:820 +#, c-format +msgid "3903 Device %s is being labeled.\n" +msgstr "" + +#: src/stored/dircmd.c:731 +#, c-format +msgid "3001 Device %s is already mounted with Volume \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:740 +#, c-format +msgid "3002 Device %s is mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:743 src/stored/dircmd.c:793 src/stored/dircmd.c:808 +#: src/stored/dircmd.c:839 +#, c-format +msgid "3907 %s" +msgstr "" + +#: src/stored/dircmd.c:746 +#, c-format +msgid "3906 File device %s is always mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:752 +#, c-format +msgid "3905 Bizarre wait state %d\n" +msgstr "" + +#: src/stored/dircmd.c:762 +#, c-format +msgid "3909 Error scanning mount command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:790 src/stored/dircmd.c:841 +#, c-format +msgid "3002 Device %s unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:797 +#, c-format +msgid "3901 Device %s is already unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:811 +#, c-format +msgid "3001 Device %s unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:816 +#, c-format +msgid "3902 Device %s is busy in acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:853 +#, c-format +msgid "3907 Error scanning unmount command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:884 +#, c-format +msgid "3921 Device %s already released.\n" +msgstr "" + +#: src/stored/dircmd.c:891 +#, c-format +msgid "3922 Device %s waiting for sysop.\n" +msgstr "" + +#: src/stored/dircmd.c:897 +#, c-format +msgid "3922 Device %s waiting for mount.\n" +msgstr "" + +#: src/stored/dircmd.c:901 +#, c-format +msgid "3923 Device %s is busy in acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:905 +#, c-format +msgid "3914 Device %s is being labeled.\n" +msgstr "" + +#: src/stored/dircmd.c:914 +#, c-format +msgid "3022 Device %s released.\n" +msgstr "" + +#: src/stored/dircmd.c:925 +#, c-format +msgid "3927 Error scanning release command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:970 +#, c-format +msgid "3995 Device %s is not an autochanger.\n" +msgstr "" + +#: src/stored/dircmd.c:987 +#, c-format +msgid "3908 Error scanning autocharger drives/list/slots command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:1030 +#, c-format +msgid "3909 Error scanning readlabel command: %s\n" +msgstr "" + +#: src/stored/dircmd.c:1058 +#, c-format +msgid "3001 Volume=%s Slot=%d\n" +msgstr "" + +#: src/stored/dircmd.c:1090 +#, c-format +msgid "3931 Device %s is BLOCKED. user unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:1094 +#, c-format +msgid "" +"3932 Device %s is BLOCKED. user unmounted during wait for media/mount.\n" +msgstr "" + +#: src/stored/dircmd.c:1098 +#, c-format +msgid "3933 Device %s is BLOCKED waiting for media.\n" +msgstr "" + +#: src/stored/dircmd.c:1102 +#, c-format +msgid "3934 Device %s is being initialized.\n" +msgstr "" + +#: src/stored/dircmd.c:1106 +#, c-format +msgid "3935 Device %s is blocked labeling a Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:1110 +#, c-format +msgid "3935 Device %s is blocked for unknown reason.\n" +msgstr "" + +#: src/stored/dircmd.c:1115 +#, c-format +msgid "3936 Device %s is busy reading.\n" +msgstr "" + +#: src/stored/dircmd.c:1118 +#, c-format +msgid "3937 Device %s is busy with %d writer(s).\n" +msgstr "" + +#: src/stored/dvd.c:112 +msgid "No FreeSpace command defined.\n" +msgstr "" + +#: src/stored/dvd.c:146 +#, c-format +msgid "Cannot run free space command. Results=%s ERR=%s\n" +msgstr "" + +#: src/stored/dvd.c:262 +#, c-format +msgid "Error writing part %d to the DVD: ERR=%s\n" +msgstr "" + +#: src/stored/dvd.c:264 +#, c-format +msgid "Error while writing current part to the DVD: %s" +msgstr "" + +#: src/stored/dvd.c:274 +#, c-format +msgid "Part %d (%lld bytes) written to DVD.\n" +msgstr "" + +#: src/stored/dvd.c:293 +#, c-format +msgid "Remaining free space %s on %s\n" +msgstr "" + +#: src/stored/dvd.c:359 +#, c-format +msgid "Next Volume part already exists on DVD. Cannot continue: %s\n" +msgstr "" + +#: src/stored/dvd.c:378 +#, c-format +msgid "open_next_part can't unlink existing part %s, ERR=%s\n" +msgstr "" + +#: src/stored/dvd.c:579 +#, c-format +msgid "" +"Error writing. Current part less than total number of parts (%d/%d, device=%" +"s)\n" +msgstr "" + +#: src/stored/dvd.c:586 +#, c-format +msgid "Unable to write last on %s: ERR=%s\n" +msgstr "" + +#: src/stored/fd_cmds.c:368 +msgid "Error parsing bootstrap file.\n" +msgstr "" + +#: src/stored/job.c:207 +#, c-format +msgid "Job name not found: %s\n" +msgstr "" + +#: src/stored/job.c:218 +#, c-format +msgid "Hey!!!! JobId %u Job %s already authenticated.\n" +msgstr "" + +#: src/stored/job.c:229 +msgid "Unable to authenticate File daemon\n" +msgstr "" + +#: src/stored/job.c:351 +msgid "In free_jcr(), but still attached to device!!!!\n" +msgstr "" + +#: src/stored/label.c:91 src/stored/label.c:132 src/stored/label.c:226 +#, c-format +msgid "Wrong Volume mounted on device %s: Wanted %s have %s\n" +msgstr "" + +#: src/stored/label.c:98 src/stored/label.c:135 src/stored/label.c:208 +#, c-format +msgid "Too many tries: %s" +msgstr "" + +#: src/stored/label.c:114 +#, c-format +msgid "Couldn't rewind device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:152 +#, c-format +msgid "" +"Requested Volume \"%s\" on %s is not a Bacula labeled Volume, because: ERR=%s" +msgstr "" + +#: src/stored/label.c:157 +msgid "Could not read Volume label from block.\n" +msgstr "" + +#: src/stored/label.c:160 +#, c-format +msgid "Could not unserialize Volume label: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:165 +#, c-format +msgid "Volume Header Id bad: %s\n" +msgstr "" + +#: src/stored/label.c:193 +#, c-format +msgid "Volume on %s has wrong Bacula version. Wanted %d got %d\n" +msgstr "" + +#: src/stored/label.c:204 +#, c-format +msgid "Volume on %s has bad Bacula label type: %x\n" +msgstr "" + +#: src/stored/label.c:217 src/stored/label.c:402 +#, c-format +msgid "Could not reserve volume %s on %s\n" +msgstr "" + +#: src/stored/label.c:290 +#, c-format +msgid "Cannot write Volume label to block for device %s\n" +msgstr "" + +#: src/stored/label.c:449 +#, c-format +msgid "Rewind error on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:455 +#, c-format +msgid "Truncate error on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:461 +#, c-format +msgid "Failed to re-open DVD after truncate on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:484 +#, c-format +msgid "Unable to write device %s: ERR=%s\n" +msgstr "" + +#: src/stored/label.c:512 +#, c-format +msgid "Recycled volume \"%s\" on device %s, all previous data lost.\n" +msgstr "" + +#: src/stored/label.c:515 +#, c-format +msgid "Wrote label to prelabeled Volume \"%s\" on device %s\n" +msgstr "" + +#: src/stored/label.c:719 +#, c-format +msgid "Bad session label = %d\n" +msgstr "" + +#: src/stored/label.c:737 src/stored/label.c:744 +#, c-format +msgid "Error writing Session label to %s: %s\n" +msgstr "" + +#: src/stored/label.c:779 +#, c-format +msgid "Expecting Volume Label, got FI=%s Stream=%s len=%d\n" +msgstr "" + +#: src/stored/label.c:906 +#, c-format +msgid "Unknown %d" +msgstr "" + +#: src/stored/label.c:910 +#, c-format +msgid "" +"\n" +"Volume Label:\n" +"Id : %sVerNo : %d\n" +"VolName : %s\n" +"PrevVolName : %s\n" +"VolFile : %d\n" +"LabelType : %s\n" +"LabelSize : %d\n" +"PoolName : %s\n" +"MediaType : %s\n" +"PoolType : %s\n" +"HostName : %s\n" +msgstr "" + +#: src/stored/label.c:932 +#, c-format +msgid "Date label written: %s\n" +msgstr "" + +#: src/stored/label.c:938 +#, c-format +msgid "Date label written: %04d-%02d-%02d at %02d:%02d\n" +msgstr "" + +#: src/stored/label.c:958 +#, c-format +msgid "" +"\n" +"%s Record:\n" +"JobId : %d\n" +"VerNum : %d\n" +"PoolName : %s\n" +"PoolType : %s\n" +"JobName : %s\n" +"ClientName : %s\n" +msgstr "" + +#: src/stored/label.c:971 +#, c-format +msgid "" +"Job (unique name) : %s\n" +"FileSet : %s\n" +"JobType : %c\n" +"JobLevel : %c\n" +msgstr "" + +#: src/stored/label.c:980 +#, c-format +msgid "" +"JobFiles : %s\n" +"JobBytes : %s\n" +"StartBlock : %s\n" +"EndBlock : %s\n" +"StartFile : %s\n" +"EndFile : %s\n" +"JobErrors : %s\n" +"JobStatus : %c\n" +msgstr "" + +#: src/stored/label.c:1001 +#, c-format +msgid "Date written : %s\n" +msgstr "" + +#: src/stored/label.c:1006 +#, c-format +msgid "Date written : %04d-%02d-%02d at %02d:%02d\n" +msgstr "" + +#: src/stored/label.c:1025 +msgid "Fresh Volume" +msgstr "" + +#: src/stored/label.c:1028 +msgid "Volume" +msgstr "" + +#: src/stored/label.c:1037 src/stored/read_record.c:402 +msgid "End of Media" +msgstr "" + +#: src/stored/label.c:1040 +msgid "End of Tape" +msgstr "" + +#: src/stored/label.c:1060 src/stored/label.c:1068 src/stored/label.c:1101 +#, c-format +msgid "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n" +msgstr "" + +#: src/stored/label.c:1065 +msgid "End of physical tape.\n" +msgstr "" + +#: src/stored/label.c:1080 src/stored/label.c:1089 +#, c-format +msgid "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n" +msgstr "" + +#: src/stored/label.c:1082 +#, c-format +msgid " Job=%s Date=%s Level=%c Type=%c\n" +msgstr "" + +#: src/stored/label.c:1091 +#, c-format +msgid " Date=%s Level=%c Type=%c Files=%s Bytes=%s Errors=%d Status=%c\n" +msgstr "" + +#: src/stored/mac.c:79 +msgid "Read and write devices not properly initialized.\n" +msgstr "" + +#: src/stored/mac.c:87 +#, c-format +msgid "No Volume names found for %s.\n" +msgstr "" + +#: src/stored/mount.c:90 +#, c-format +msgid "Too many errors trying to mount device %s.\n" +msgstr "" + +#: src/stored/mount.c:96 +#, c-format +msgid "Job %d canceled.\n" +msgstr "" + +#: src/stored/mount.c:211 +#, c-format +msgid "Could not open device %s: ERR=%s\n" +msgstr "" + +#: src/stored/mount.c:254 src/stored/mount.c:533 +#, c-format +msgid "Volume \"%s\" not on device %s.\n" +msgstr "" + +#: src/stored/mount.c:292 +#, c-format +msgid "" +"Director wanted Volume \"%s\".\n" +" Current Volume \"%s\" not acceptable because:\n" +" %s" +msgstr "" + +#: src/stored/mount.c:375 +#, c-format +msgid "Volume \"%s\" previously written, moving to end of data.\n" +msgstr "" + +#: src/stored/mount.c:378 +#, c-format +msgid "Unable to position to end of data on device %s: ERR=%s\n" +msgstr "" + +#: src/stored/mount.c:386 +#, c-format +msgid "Ready to append to end of Volume \"%s\" part=%d size=%s\n" +msgstr "" + +#: src/stored/mount.c:390 +#, c-format +msgid "" +"Bacula cannot write on DVD Volume \"%s\" because: The sizes do not match! " +"Volume=%s Catalog=%s\n" +msgstr "" + +#: src/stored/mount.c:404 +#, c-format +msgid "Ready to append to end of Volume \"%s\" at file=%d.\n" +msgstr "" + +#: src/stored/mount.c:407 +#, c-format +msgid "" +"Bacula cannot write on tape Volume \"%s\" because:\n" +"The number of files mismatch! Volume=%u Catalog=%u\n" +msgstr "" + +#: src/stored/mount.c:418 +#, c-format +msgid "Ready to append to end of Volume \"%s\" size=%s\n" +msgstr "" + +#: src/stored/mount.c:422 +#, c-format +msgid "" +"Bacula cannot write on disk Volume \"%s\" because: The sizes do not match! " +"Volume=%s Catalog=%s\n" +msgstr "" + +#: src/stored/mount.c:452 +#, c-format +msgid "Ready to append to end of Volume \"%s\" at file address=%u.\n" +msgstr "" + +#: src/stored/mount.c:456 +#, c-format +msgid "" +"Bacula cannot write on Volume \"%s\" because:\n" +"The EOD file address is wrong: Volume file address=%u != Catalog Endblock=%u" +"(+1)\n" +"Perhaps You removed the DVD last part in spool directory.\n" +msgstr "" + +#: src/stored/mount.c:523 +#, c-format +msgid "Labeled new Volume \"%s\" on device %s.\n" +msgstr "" + +#: src/stored/mount.c:528 +#, c-format +msgid "Device %s not configured to autolabel Volumes.\n" +msgstr "" + +#: src/stored/mount.c:548 +#, c-format +msgid "Marking Volume \"%s\" in Error in Catalog.\n" +msgstr "" + +#: src/stored/mount.c:564 +#, c-format +msgid "" +"Autochanger Volume \"%s\" not found in slot %d.\n" +" Setting InChanger to zero in catalog.\n" +msgstr "" + +#: src/stored/mount.c:583 +msgid "Hey!!!!! WroteVol non-zero !!!!!\n" +msgstr "" + +#: src/stored/parse_bsr.c:118 src/stored/parse_bsr.c:122 +#, c-format +msgid "" +"Bootstrap file error: %s\n" +" : Line %d, col %d of file %s\n" +"%s\n" +msgstr "" + +#: src/stored/parse_bsr.c:144 +#, c-format +msgid "Cannot open bootstrap file %s: %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:274 +#, c-format +msgid "MediaType %s in bsr at inappropriate place.\n" +msgstr "" + +#: src/stored/parse_bsr.c:295 +#, c-format +msgid "Device \"%s\" in bsr at inappropriate place.\n" +msgstr "" + +#: src/stored/parse_bsr.c:452 +msgid "JobType not yet implemented\n" +msgstr "" + +#: src/stored/parse_bsr.c:460 +msgid "JobLevel not yet implemented\n" +msgstr "" + +#: src/stored/parse_bsr.c:643 +#, c-format +msgid "Slot %d in bsr at inappropriate place.\n" +msgstr "" + +#: src/stored/parse_bsr.c:667 +#, c-format +msgid "VolFile : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:675 +#, c-format +msgid "VolBlock : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:685 +#, c-format +msgid "FileIndex : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:687 +#, c-format +msgid "FileIndex : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:697 +#, c-format +msgid "JobId : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:699 +#, c-format +msgid "JobId : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:709 +#, c-format +msgid "SessId : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:711 +#, c-format +msgid "SessId : %u-%u\n" +msgstr "" + +#: src/stored/parse_bsr.c:720 +#, c-format +msgid "VolumeName : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:721 +#, c-format +msgid " MediaType : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:722 +#, c-format +msgid " Device : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:723 +#, c-format +msgid " Slot : %d\n" +msgstr "" + +#: src/stored/parse_bsr.c:732 +#, c-format +msgid "Client : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:740 +#, c-format +msgid "Job : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:748 +#, c-format +msgid "SessTime : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:759 +msgid "BSR is NULL\n" +msgstr "" + +#: src/stored/parse_bsr.c:763 +#, c-format +msgid "Next : 0x%x\n" +msgstr "" + +#: src/stored/parse_bsr.c:764 +#, c-format +msgid "Root bsr : 0x%x\n" +msgstr "" + +#: src/stored/parse_bsr.c:775 +#, c-format +msgid "count : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:776 +#, c-format +msgid "found : %u\n" +msgstr "" + +#: src/stored/parse_bsr.c:779 +#, c-format +msgid "done : %s\n" +msgstr "" + +#: src/stored/parse_bsr.c:780 +#, c-format +msgid "positioning : %d\n" +msgstr "" + +#: src/stored/parse_bsr.c:781 +#, c-format +msgid "fast_reject : %d\n" +msgstr "" + +#: src/stored/pythonsd.c:210 +msgid "Error in ParseTuple\n" +msgstr "" + +#: src/stored/pythonsd.c:226 +msgid "Parse tuple error in job_write\n" +msgstr "" + +#: src/stored/pythonsd.c:263 +#, c-format +msgid "Error in Python method %s\n" +msgstr "" + +#: src/stored/read.c:68 +msgid "No Volume names found for restore.\n" +msgstr "" + +#: src/stored/read.c:122 +#, c-format +msgid ">filed: Error Hdr=%s\n" +msgstr "" + +#: src/stored/read.c:123 src/stored/read.c:138 +#, c-format +msgid "Error sending to File daemon. ERR=%s\n" +msgstr "" + +#: src/stored/read.c:137 +#, c-format +msgid "Error sending to FD. ERR=%s\n" +msgstr "" + +#: src/stored/read_record.c:83 +#, c-format +msgid "End of Volume at file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/read_record.c:86 +msgid "End of all volumes.\n" +msgstr "" + +#: src/stored/read_record.c:129 +msgid "part" +msgstr "" + +#: src/stored/read_record.c:132 +msgid "file" +msgstr "" + +#: src/stored/read_record.c:135 +#, c-format +msgid "End of %s %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/read_record.c:149 +msgid "Did fsr in attemp to skip bad record.\n" +msgstr "" + +#: src/stored/read_record.c:342 +#, c-format +msgid "Reposition from (file:block) %u:%u to %u:%u\n" +msgstr "" + +#: src/stored/read_record.c:370 +#, c-format +msgid "Forward spacing Volume \"%s\" to file:block %u:%u.\n" +msgstr "" + +#: src/stored/read_record.c:395 +msgid "Begin Session" +msgstr "" + +#: src/stored/read_record.c:399 +msgid "End Session" +msgstr "" + +#: src/stored/read_record.c:405 +#, c-format +msgid "Unknown code %d\n" +msgstr "" + +#: src/stored/record.c:71 +#, c-format +msgid "unknown: %d" +msgstr "" + +#: src/stored/record.c:378 +msgid "Damaged buffer\n" +msgstr "" + +#: src/stored/record.c:549 +#, c-format +msgid "Sanity check failed. maxlen=%d datalen=%d. Block discarded.\n" +msgstr "" + +#: src/stored/reserve.c:93 +#, c-format +msgid "Unable to initialize reservation lock. ERR=%s\n" +msgstr "" + +#: src/stored/reserve.c:99 +#, c-format +msgid "Unable to initialize volume list lock. ERR=%s\n" +msgstr "" + +#: src/stored/reserve.c:429 +#, c-format +msgid "Hey! num_writers=%d!!!!\n" +msgstr "" + +#: src/stored/reserve.c:643 +msgid "3939 Could not get dcr\n" +msgstr "" + +#: src/stored/reserve.c:766 src/stored/reserve.c:777 +#, c-format +msgid "Failed command: %s\n" +msgstr "" + +#: src/stored/reserve.c:767 +#, c-format +msgid "" +"\n" +" Device \"%s\" with MediaType \"%s\" requested by DIR not found in SD " +"Device resources.\n" +msgstr "" + +#: src/stored/reserve.c:1085 +#, c-format +msgid "3926 Could not get dcr for device: %s\n" +msgstr "" + +#: src/stored/reserve.c:1180 +#, c-format +msgid "3601 JobId=%u device %s is BLOCKED due to user unmount.\n" +msgstr "" + +#: src/stored/reserve.c:1190 +#, c-format +msgid "3602 JobId=%u device %s is busy (already reading/writing).\n" +msgstr "" + +#: src/stored/reserve.c:1237 +#, c-format +msgid "3603 JobId=%u device %s is busy reading.\n" +msgstr "" + +#: src/stored/reserve.c:1246 +#, c-format +msgid "3604 JobId=%u device %s is BLOCKED due to user unmount.\n" +msgstr "" + +#: src/stored/reserve.c:1314 +#, c-format +msgid "3605 JobId=%u wants free drive but device %s is busy.\n" +msgstr "" + +#: src/stored/reserve.c:1322 +#, c-format +msgid "3606 JobId=%u prefers mounted drives, but drive %s has no Volume.\n" +msgstr "" + +#: src/stored/reserve.c:1345 +#, c-format +msgid "3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on drive %s.\n" +msgstr "" + +#: src/stored/reserve.c:1387 +#, c-format +msgid "" +"3608 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" nreserve=%d on drive %" +"s.\n" +msgstr "" + +#: src/stored/reserve.c:1431 +#, c-format +msgid "3609 JobId=%u wants Pool=\"%s\" but has Pool=\"%s\" on drive %s.\n" +msgstr "" + +#: src/stored/reserve.c:1439 +#, c-format +msgid "Logic error!!!! JobId=%u Should not get here.\n" +msgstr "" + +#: src/stored/reserve.c:1440 +#, c-format +msgid "3910 JobId=%u Logic error!!!! drive %s Should not get here.\n" +msgstr "" + +#: src/stored/reserve.c:1443 +msgid "Logic error!!!! Should not get here.\n" +msgstr "" + +#: src/stored/reserve.c:1446 +#, c-format +msgid "3911 JobId=%u failed reserve drive %s.\n" +msgstr "" + +#: src/stored/spool.c:84 +msgid "Spooling statistics:\n" +msgstr "" + +#: src/stored/spool.c:87 +#, c-format +msgid "" +"Data spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes/job.\n" +msgstr "" + +#: src/stored/spool.c:95 +#, c-format +msgid "Attr spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes.\n" +msgstr "" + +#: src/stored/spool.c:115 +msgid "Spooling data ...\n" +msgstr "" + +#: src/stored/spool.c:141 +#, c-format +msgid "Bad return from despool WroteVol=%d\n" +msgstr "" + +#: src/stored/spool.c:174 +#, c-format +msgid "Open data spool file %s failed: ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:232 +#, c-format +msgid "Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:236 +#, c-format +msgid "Writing spooled data to Volume. Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:306 +#, c-format +msgid "" +"Despooling elapsed time = %02d:%02d:%02d, Transfer rate = %s bytes/second\n" +msgstr "" + +#: src/stored/spool.c:315 src/stored/spool.c:504 src/stored/spool.c:546 +#, c-format +msgid "Ftruncate spool file failed: ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:376 +#, c-format +msgid "Spool header read error. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:379 +#, c-format +msgid "Spool read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:380 +#, c-format +msgid "Spool header read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:386 src/stored/spool.c:387 +#, c-format +msgid "Spool block too big. Max %u bytes, got %u\n" +msgstr "" + +#: src/stored/spool.c:392 src/stored/spool.c:393 +#, c-format +msgid "Spool data read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:450 +msgid "User specified spool size reached.\n" +msgstr "" + +#: src/stored/spool.c:452 +msgid "Bad return from despool in write_block.\n" +msgstr "" + +#: src/stored/spool.c:460 +msgid "Spooling data again ...\n" +msgstr "" + +#: src/stored/spool.c:491 +#, c-format +msgid "Error writing header to spool file. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:510 src/stored/spool.c:552 +msgid "Fatal despooling error." +msgstr "" + +#: src/stored/spool.c:517 +msgid "Retrying after header spooling error failed.\n" +msgstr "" + +#: src/stored/spool.c:531 +#, c-format +msgid "Error writing data to spool file. ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:562 +msgid "Retrying after data spooling error failed.\n" +msgstr "" + +#: src/stored/spool.c:617 src/stored/spool.c:624 +#, c-format +msgid "Fseek on attributes file failed: ERR=%s\n" +msgstr "" + +#: src/stored/spool.c:634 +#, c-format +msgid "Sending spooled attrs to the Director. Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:661 +#, c-format +msgid "fopen attr spool file %s failed: ERR=%s\n" +msgstr "" + +#: src/stored/status.c:120 +msgid "" +"\n" +"Device status:\n" +msgstr "" + +#: src/stored/status.c:124 +#, c-format +msgid "Autochanger \"%s\" with devices:\n" +msgstr "" + +#: src/stored/status.c:142 +#, c-format +msgid "" +"Device %s is mounted with:\n" +" Volume: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/status.c:152 +#, c-format +msgid "Device %s open but no Bacula volume is currently mounted.\n" +msgstr "" + +#: src/stored/status.c:163 +#, c-format +msgid " Total Bytes=%s Blocks=%s Bytes/block=%s\n" +msgstr "" + +#: src/stored/status.c:178 +#, c-format +msgid " Total Bytes Read=%s Blocks Read=%s Bytes/block=%s\n" +msgstr "" + +#: src/stored/status.c:184 +#, c-format +msgid " Positioned at File=%s Block=%s\n" +msgstr "" + +#: src/stored/status.c:191 +#, c-format +msgid "Device %s is not open.\n" +msgstr "" + +#: src/stored/status.c:195 +#, c-format +msgid "Device \"%s\" is not open or does not exist.\n" +msgstr "" + +#: src/stored/status.c:201 +msgid "In Use Volume status:\n" +msgstr "" + +#: src/stored/status.c:208 src/stored/status.c:210 +msgid "" +"====\n" +"\n" +msgstr "" + +#: src/stored/status.c:227 +msgid "" +"No DEVICE structure.\n" +"\n" +msgstr "" + +#: src/stored/status.c:234 +msgid " Device is BLOCKED. User unmounted.\n" +msgstr "" + +#: src/stored/status.c:238 +msgid " Device is BLOCKED. User unmounted during wait for media/mount.\n" +msgstr "" + +#: src/stored/status.c:250 +#, c-format +msgid "" +" Device is BLOCKED waiting for mount of volume \"%s\",\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/status.c:259 +#, c-format +msgid "" +" Device is BLOCKED waiting to create a volume for:\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/status.c:271 +msgid " Device is BLOCKED waiting for media.\n" +msgstr "" + +#: src/stored/status.c:277 +msgid " Device is being initialized.\n" +msgstr "" + +#: src/stored/status.c:281 +msgid " Device is blocked labeling a Volume.\n" +msgstr "" + +#: src/stored/status.c:290 +#, c-format +msgid " Slot %d is loaded in drive %d.\n" +msgstr "" + +#: src/stored/status.c:294 +#, c-format +msgid " Drive %d is not loaded.\n" +msgstr "" + +#: src/stored/status.c:297 +#, c-format +msgid " Drive %d status unknown.\n" +msgstr "" + +#: src/stored/status.c:320 +msgid "Device state:\n" +msgstr "" + +#: src/stored/status.c:338 +#, c-format +msgid "" +"num_writers=%d block=%d\n" +"\n" +msgstr "" + +#: src/stored/status.c:344 +#, c-format +msgid "Archive name: %s Device name: %s\n" +msgstr "" + +#: src/stored/status.c:348 +#, c-format +msgid "File=%u block=%u\n" +msgstr "" + +#: src/stored/status.c:351 +#, c-format +msgid "Min block=%u Max block=%u\n" +msgstr "" + +#: src/stored/status.c:375 +#, c-format +msgid "%s Job %s waiting for Client connection.\n" +msgstr "" + +#: src/stored/status.c:391 +#, c-format +msgid "" +"Reading: %s %s job %s JobId=%d Volume=\"%s\"\n" +" pool=\"%s\" device=%s\n" +msgstr "" + +#: src/stored/status.c:404 +#, c-format +msgid "" +"Writing: %s %s job %s JobId=%d Volume=\"%s\"\n" +" pool=\"%s\" device=%s\n" +msgstr "" + +#: src/stored/status.c:415 +#, c-format +msgid " spooling=%d despooling=%d despool_wait=%d\n" +msgstr "" + +#: src/stored/status.c:424 +#, c-format +msgid " Files=%s Bytes=%s Bytes/sec=%s\n" +msgstr "" + +#: src/stored/status.c:432 +#, c-format +msgid " FDReadSeqNo=%s in_msg=%u out_msg=%d fd=%d\n" +msgstr "" + +#: src/stored/status.c:438 +msgid " FDSocket closed\n" +msgstr "" + +#: src/stored/status.c:460 +msgid "" +"\n" +"Jobs waiting to reserve a drive:\n" +msgstr "" + +#: src/stored/status.c:491 +msgid "===================================================================\n" +msgstr "" + +#: src/stored/status.c:637 +msgid "3900 Bad .status command, missing argument.\n" +msgstr "" + +#: src/stored/status.c:660 +msgid "3900 Bad .status command, wrong argument.\n" +msgstr "" + +#: src/stored/status.c:674 +msgid "Bacula Storage: Idle" +msgstr "" + +#: src/stored/status.c:685 +msgid "Bacula Storage: Running" +msgstr "" + +#: src/stored/status.c:699 +msgid "Bacula Storage: Last Job Canceled" +msgstr "" + +#: src/stored/status.c:703 +msgid "Bacula Storage: Last Job Failed" +msgstr "" + +#: src/stored/status.c:707 +msgid "Bacula Storage: Last Job had Warnings" +msgstr "" + +#: src/stored/stored.c:82 +#, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: stored [options] [-c config_file] [config_file]\n" +" -c use as configuration file\n" +" -dnn set debug level to nn\n" +" -f run in foreground (for debugging)\n" +" -g set groupid to group\n" +" -p proceed despite I/O errors\n" +" -s no signals (for debugging)\n" +" -t test - read config and exit\n" +" -u userid to \n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/stored/stored.c:243 +msgid "Volume Session Time is ZERO!\n" +msgstr "" + +#: src/stored/stored.c:256 +#, c-format +msgid "Unable to create thread. ERR=%s\n" +msgstr "" + +#: src/stored/stored.c:294 +#, c-format +msgid "Only one Storage resource permitted in %s\n" +msgstr "" + +#: src/stored/stored.c:299 +#, c-format +msgid "No Director resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/stored.c:304 +#, c-format +msgid "No Device resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/stored.c:312 +#, c-format +msgid "No Messages resource defined in %s. Cannot continue.\n" +msgstr "" + +#: src/stored/stored.c:339 +#, c-format +msgid "\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n" +msgstr "" + +#: src/stored/stored.c:345 +#, c-format +msgid "\"TLS Key\" file not defined for Storage \"%s\" in %s.\n" +msgstr "" + +#: src/stored/stored.c:351 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Storage \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" + +#: src/stored/stored.c:482 +#, c-format +msgid "Could not initialize %s\n" +msgstr "" + +#: src/stored/stored.c:495 +#, c-format +msgid "Could not open device %s\n" +msgstr "" + +#: src/stored/stored.c:508 +#, c-format +msgid "Could not mount device %s\n" +msgstr "" + +#: src/stored/stored_conf.c:234 +#, c-format +msgid "Expected a Device Type keyword, got: %s" +msgstr "" + +#: src/stored/stored_conf.c:249 +#, c-format +msgid "Warning: no \"%s\" resource (%d) defined.\n" +msgstr "" + +#: src/stored/stored_conf.c:252 +#, c-format +msgid "dump_resource type=%d\n" +msgstr "" + +#: src/stored/stored_conf.c:368 +#, c-format +msgid "Warning: unknown resource type %d\n" +msgstr "" + +#: src/stored/stored_conf.c:557 +#, c-format +msgid "\"%s\" item is required in \"%s\" resource, but not found.\n" +msgstr "" + +#: src/stored/stored_conf.c:563 +#, c-format +msgid "Too many items in \"%s\" resource\n" +msgstr "" + +#: src/stored/stored_conf.c:597 +#, c-format +msgid "Cannot find AutoChanger resource %s\n" +msgstr "" + +#: src/stored/stored_conf.c:669 +#, c-format +msgid "" +"Attempt to define second \"%s\" resource named \"%s\" is not permitted.\n" +msgstr "" + +#: src/stored/wait.c:127 +#, c-format +msgid "pthread timedwait error. ERR=%s\n" +msgstr "" + +#: src/stored/wait.c:217 +#, c-format +msgid "JobId=%s, Job %s waiting to reserve a device.\n" +msgstr "" + +#: src/tools/bregex.c:147 src/tools/bwild.c:122 +#, c-format +msgid "Could not open data file: %s\n" +msgstr "" + +#: src/tools/bsmtp.c:117 +#, c-format +msgid "Fatal malformed reply from %s: %s\n" +msgstr "" + +#: src/tools/bsmtp.c:125 +#, c-format +msgid "Fatal fgets error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:156 +#, c-format +msgid "" +"\n" +"Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +" -8 set charset utf-8\n" +" -c set the Cc: field\n" +" -dnn set debug level to nn\n" +" -f set the From: field\n" +" -h use mailhost:port as the SMTP server\n" +" -s set the Subject: field\n" +" -r set the Reply-To: field\n" +" -l set the maximum number of lines that should be sent " +"(default: unlimited)\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tools/bsmtp.c:287 +msgid "Fatal error: no recipient given.\n" +msgstr "" + +#: src/tools/bsmtp.c:316 +#, c-format +msgid "Fatal gethostname error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:320 +#, c-format +msgid "Fatal gethostbyname for myself failed \"%s\": ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:356 +#, c-format +msgid "Error unknown mail host \"%s\": ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:359 +msgid "Retrying connection using \"localhost\".\n" +msgstr "" + +#: src/tools/bsmtp.c:367 +#, c-format +msgid "Fatal error: Unknown address family for smtp host: %d\n" +msgstr "" + +#: src/tools/bsmtp.c:376 src/tools/bsmtp.c:381 +#, c-format +msgid "Fatal socket error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:386 +#, c-format +msgid "Fatal connect error to %s: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:394 +#, c-format +msgid "Fatal _open_osfhandle error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:401 src/tools/bsmtp.c:405 src/tools/bsmtp.c:414 +#: src/tools/bsmtp.c:418 +#, c-format +msgid "Fatal fdopen error: ERR=%s\n" +msgstr "" + +#: src/tools/bsmtp.c:410 +#, c-format +msgid "Fatal dup error: ERR=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:175 +msgid "" +"Warning skipping the additional parameters for working directory/dbname/user/" +"password/host.\n" +msgstr "" + +#: src/tools/dbcheck.c:191 +#, c-format +msgid "Error can not find the Catalog name[%s] in the given config file [%s]\n" +msgstr "" + +#: src/tools/dbcheck.c:193 +#, c-format +msgid "Error there is no Catalog section in the given config file [%s]\n" +msgstr "" + +#: src/tools/dbcheck.c:202 +msgid "Error no Director resource defined.\n" +msgstr "" + +#: src/tools/dbcheck.c:216 +msgid "Wrong number of arguments.\n" +msgstr "" + +#: src/tools/dbcheck.c:221 +msgid "Working directory not supplied.\n" +msgstr "" + +#: src/tools/dbcheck.c:285 +#, c-format +msgid "Hello, this is the database check/correct program.\n" +msgstr "" + +#: src/tools/dbcheck.c:287 +#, c-format +msgid "Modify database is on." +msgstr "" + +#: src/tools/dbcheck.c:289 +#, c-format +msgid "Modify database is off." +msgstr "" + +#: src/tools/dbcheck.c:291 src/tools/dbcheck.c:352 +#, c-format +msgid " Verbose is on.\n" +msgstr "" + +#: src/tools/dbcheck.c:293 src/tools/dbcheck.c:354 +#, c-format +msgid " Verbose is off.\n" +msgstr "" + +#: src/tools/dbcheck.c:295 +#, c-format +msgid "Please select the function you want to perform.\n" +msgstr "" + +#: src/tools/dbcheck.c:299 +#, c-format +msgid "" +"\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Repair bad Filename records\n" +" 4) Repair bad Path records\n" +" 5) Eliminate duplicate Filename records\n" +" 6) Eliminate duplicate Path records\n" +" 7) Eliminate orphaned Jobmedia records\n" +" 8) Eliminate orphaned File records\n" +" 9) Eliminate orphaned Path records\n" +" 10) Eliminate orphaned Filename records\n" +" 11) Eliminate orphaned FileSet records\n" +" 12) Eliminate orphaned Client records\n" +" 13) Eliminate orphaned Job records\n" +" 14) Eliminate all Admin records\n" +" 15) Eliminate all Restore records\n" +" 16) All (3-15)\n" +" 17) Quit\n" +msgstr "" + +#: src/tools/dbcheck.c:318 +#, c-format +msgid "" +"\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Check for bad Filename records\n" +" 4) Check for bad Path records\n" +" 5) Check for duplicate Filename records\n" +" 6) Check for duplicate Path records\n" +" 7) Check for orphaned Jobmedia records\n" +" 8) Check for orphaned File records\n" +" 9) Check for orphaned Path records\n" +" 10) Check for orphaned Filename records\n" +" 11) Check for orphaned FileSet records\n" +" 12) Check for orphaned Client records\n" +" 13) Check for orphaned Job records\n" +" 14) Check for all Admin records\n" +" 15) Check for all Restore records\n" +" 16) All (3-15)\n" +" 17) Quit\n" +msgstr "" + +#: src/tools/dbcheck.c:338 +msgid "Select function number: " +msgstr "" + +#: src/tools/dbcheck.c:345 +#, c-format +msgid "Database will be modified.\n" +msgstr "" + +#: src/tools/dbcheck.c:347 +#, c-format +msgid "Database will NOT be modified.\n" +msgstr "" + +#: src/tools/dbcheck.c:437 +#, c-format +msgid "JobId=%s Name=\"%s\" StartTime=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:445 +#, c-format +msgid "Orphaned JobMediaId=%s JobId=%s Volume=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:452 +#, c-format +msgid "Orphaned FileId=%s JobId=%s Volume=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:459 +#, c-format +msgid "Orphaned FileSetId=%s FileSet=\"%s\" MD5=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:466 +#, c-format +msgid "Orphaned ClientId=%s Name=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:520 +#, c-format +msgid "Deleting: %s\n" +msgstr "" + +#: src/tools/dbcheck.c:594 +#, c-format +msgid "Checking for duplicate Filename entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:603 +#, c-format +msgid "Found %d duplicate Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:604 +msgid "Print the list? (yes/no): " +msgstr "" + +#: src/tools/dbcheck.c:623 src/tools/dbcheck.c:681 +#, c-format +msgid "Found %d for: %s\n" +msgstr "" + +#: src/tools/dbcheck.c:651 +#, c-format +msgid "Checking for duplicate Path entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:661 +#, c-format +msgid "Found %d duplicate Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:662 src/tools/dbcheck.c:716 src/tools/dbcheck.c:760 +#: src/tools/dbcheck.c:803 src/tools/dbcheck.c:842 src/tools/dbcheck.c:880 +#: src/tools/dbcheck.c:921 src/tools/dbcheck.c:962 src/tools/dbcheck.c:1000 +#: src/tools/dbcheck.c:1033 src/tools/dbcheck.c:1070 src/tools/dbcheck.c:1134 +msgid "Print them? (yes/no): " +msgstr "" + +#: src/tools/dbcheck.c:709 +#, c-format +msgid "Checking for orphaned JobMedia entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:715 +#, c-format +msgid "Found %d orphaned JobMedia records.\n" +msgstr "" + +#: src/tools/dbcheck.c:733 +#, c-format +msgid "Deleting %d orphaned JobMedia records.\n" +msgstr "" + +#: src/tools/dbcheck.c:750 +#, c-format +msgid "Checking for orphaned File entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:759 +#, c-format +msgid "Found %d orphaned File records.\n" +msgstr "" + +#: src/tools/dbcheck.c:776 +#, c-format +msgid "Deleting %d orphaned File records.\n" +msgstr "" + +#: src/tools/dbcheck.c:793 +#, c-format +msgid "Checking for orphaned Path entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:802 +#, c-format +msgid "Found %d orphaned Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:815 +#, c-format +msgid "Deleting %d orphaned Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:832 +#, c-format +msgid "Checking for orphaned Filename entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:841 +#, c-format +msgid "Found %d orphaned Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:854 +#, c-format +msgid "Deleting %d orphaned Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:869 +#, c-format +msgid "Checking for orphaned FileSet entries. This takes some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:879 +#, c-format +msgid "Found %d orphaned FileSet records.\n" +msgstr "" + +#: src/tools/dbcheck.c:894 +#, c-format +msgid "Deleting %d orphaned FileSet records.\n" +msgstr "" + +#: src/tools/dbcheck.c:903 +#, c-format +msgid "Checking for orphaned Client entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:920 +#, c-format +msgid "Found %d orphaned Client records.\n" +msgstr "" + +#: src/tools/dbcheck.c:935 +#, c-format +msgid "Deleting %d orphaned Client records.\n" +msgstr "" + +#: src/tools/dbcheck.c:944 +#, c-format +msgid "Checking for orphaned Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:961 +#, c-format +msgid "Found %d orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:976 +#, c-format +msgid "Deleting %d orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:978 +#, c-format +msgid "Deleting JobMedia records of orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:980 +#, c-format +msgid "Deleting Log records of orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:990 +#, c-format +msgid "Checking for Admin Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:999 +#, c-format +msgid "Found %d Admin Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1014 +#, c-format +msgid "Deleting %d Admin Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1023 +#, c-format +msgid "Checking for Restore Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:1032 +#, c-format +msgid "Found %d Restore Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1047 +#, c-format +msgid "Deleting %d Restore Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1060 +#, c-format +msgid "Checking for Filenames with a trailing slash\n" +msgstr "" + +#: src/tools/dbcheck.c:1069 +#, c-format +msgid "Found %d bad Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1087 src/tools/dbcheck.c:1150 +#, c-format +msgid "Reparing %d bad Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1124 +#, c-format +msgid "Checking for Paths without a trailing slash\n" +msgstr "" + +#: src/tools/dbcheck.c:1133 +#, c-format +msgid "Found %d bad Path records.\n" +msgstr "" + +#: src/tools/drivetype.c:47 +#, c-format +msgid "" +"\n" +"Usage: drivetype [-v] path ...\n" +"\n" +" Print the drive type a given file/directory is on.\n" +" The following options are supported:\n" +"\n" +" -v print both path and file system type.\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tools/drivetype.c:101 src/tools/fstype.c:101 +#, c-format +msgid "%s: unknown\n" +msgstr "" + +#: src/tools/fstype.c:47 +#, c-format +msgid "" +"\n" +"Usage: fstype [-v] path ...\n" +"\n" +" Print the file system type a given file/directory is on.\n" +" The following options are supported:\n" +"\n" +" -v print both path and file system type.\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tools/testfind.c:66 +#, c-format +msgid "" +"\n" +"Usage: testfind [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -dnn set debug level to nn\n" +" -c specify config file containing FileSet resources\n" +" -f specify which FileSet to use\n" +" -? print this message.\n" +"\n" +"Patterns are used for file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors are always printed.\n" +"Files/paths truncated is the number of files/paths with len > 255.\n" +"Truncation is only in the catalog.\n" +"\n" +msgstr "" + +#: src/tools/testfind.c:225 +#, c-format +msgid "" +"\n" +"Total files : %d\n" +"Max file length: %d\n" +"Max path length: %d\n" +"Files truncated: %d\n" +"Paths truncated: %d\n" +"Hard links : %d\n" +msgstr "" + +#: src/tools/testfind.c:265 +#, c-format +msgid "Reg: %s\n" +msgstr "" + +#: src/tools/testfind.c:287 +msgid "\t[will not descend: recursion turned off]" +msgstr "" + +#: src/tools/testfind.c:289 +msgid "\t[will not descend: file system change not allowed]" +msgstr "" + +#: src/tools/testfind.c:291 +msgid "\t[will not descend: disallowed file system]" +msgstr "" + +#: src/tools/testfind.c:293 +msgid "\t[will not descend: disallowed drive type]" +msgstr "" + +#: src/tools/testfind.c:309 src/tools/testls.c:188 +#, c-format +msgid "Err: Could not access %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:312 src/tools/testls.c:191 +#, c-format +msgid "Err: Could not follow ff->link %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:315 src/tools/testls.c:194 +#, c-format +msgid "Err: Could not stat %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:318 src/tools/testls.c:197 +#, c-format +msgid "Skip: File not saved. No change. %s\n" +msgstr "" + +#: src/tools/testfind.c:321 src/tools/testls.c:200 +#, c-format +msgid "Err: Attempt to backup archive. Not saved. %s\n" +msgstr "" + +#: src/tools/testfind.c:324 src/tools/testls.c:209 +#, c-format +msgid "Err: Could not open directory %s: %s\n" +msgstr "" + +#: src/tools/testfind.c:327 src/tools/testls.c:212 +#, c-format +msgid "Err: Unknown file ff->type %d: %s\n" +msgstr "" + +#: src/tools/testfind.c:377 +#, c-format +msgid "===== Filename truncated to 255 chars: %s\n" +msgstr "" + +#: src/tools/testfind.c:394 +#, c-format +msgid "========== Path name truncated to 255 chars: %s\n" +msgstr "" + +#: src/tools/testfind.c:403 +#, c-format +msgid "========== Path length is zero. File=%s\n" +msgstr "" + +#: src/tools/testfind.c:406 +#, c-format +msgid "Path: %s\n" +msgstr "" + +#: src/tools/testls.c:55 +#, c-format +msgid "" +"\n" +"Usage: testls [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -dnn set debug level to nn\n" +" -e specify file of exclude patterns\n" +" -i specify file of include patterns\n" +" - read pattern(s) from stdin\n" +" -? print this message.\n" +"\n" +"Patterns are file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors always printed.\n" +"Files/paths truncated is number with len > 255.\n" +"Truncation is only in catalog.\n" +"\n" +msgstr "" + +#: src/tools/testls.c:140 +#, c-format +msgid "Could not open include file: %s\n" +msgstr "" + +#: src/tools/testls.c:153 +#, c-format +msgid "Could not open exclude file: %s\n" +msgstr "" + +#: src/tools/testls.c:203 +#, c-format +msgid "Recursion turned off. Directory not entered. %s\n" +msgstr "" + +#: src/tools/testls.c:206 +#, c-format +msgid "Skip: File system change prohibited. Directory not entered. %s\n" +msgstr "" + +#: src/tray-monitor/authenticate.c:88 +msgid "" +"Director authorization problem.\n" +"Most likely the passwords do not agree.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/tray-monitor/authenticate.c:138 +msgid "" +"Director and Storage daemon passwords or names not the same.\n" +"Please see http://www.bacula.org/rel-manual/faq.html#AuthorizationErrors for " +"help.\n" +msgstr "" + +#: src/tray-monitor/authenticate.c:145 +#, c-format +msgid "bdird set configuration file to file\n" +" -dnn set debug level to nn\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:262 +#, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one and only one " +"Monitor resource.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:293 +#, c-format +msgid "" +"No Client, Storage or Director resource defined in %s\n" +"Without that I don't how to get status from the File, Storage or Director " +"Daemon :-(\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:315 +#, c-format +msgid "" +"Invalid refresh interval defined in %s\n" +"This value must be greater or equal to 1 second and less or equal to 10 " +"minutes (read value: %d).\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:330 +msgid "Open status window..." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:336 +msgid "Exit" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:348 +msgid "Bacula tray monitor" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:373 +msgid " (DIR)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:377 +msgid " (FD)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:381 +msgid " (SD)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:394 +msgid "Unknown status." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:464 +msgid "Refresh interval in seconds: " +msgstr "" + +#: src/tray-monitor/tray-monitor.c:472 +msgid "Refresh now" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:476 +msgid "About" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:480 +msgid "Close" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:500 +#, c-format +msgid "Disconnecting from Director %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:503 +#, c-format +msgid "Disconnecting from Client %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:506 +#, c-format +msgid "Disconnecting from Storage %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:543 src/tray-monitor/tray-monitor.c:554 +msgid "Bacula Tray Monitor" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:545 src/tray-monitor/tray-monitor.c:556 +msgid "Written by Nicolas Boichat\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:546 src/tray-monitor/tray-monitor.c:557 +msgid "Version" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:625 +#, c-format +msgid "Error, currentitem is not a Client or a Storage..\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:699 +#, c-format +msgid "" +"Current job: %s\n" +"Last job: %s" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:711 +#, c-format +msgid " (%d errors)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:714 +#, c-format +msgid " (%d error)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:752 +msgid "No current job." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:755 +msgid "No last job." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:763 +msgid "Job status: Created" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:768 +msgid "Job status: Running" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:772 +msgid "Job status: Blocked" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:777 +msgid "Job status: Terminated" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:782 +msgid "Job status: Terminated in error" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:788 +msgid "Job status: Error" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:792 +msgid "Job status: Fatal error" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:797 +msgid "Job status: Verify differences" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:802 +msgid "Job status: Canceled" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:807 +msgid "Job status: Waiting on File daemon" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:812 +msgid "Job status: Waiting on the Storage daemon" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:817 +msgid "Job status: Waiting for new media" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:822 +msgid "Job status: Waiting for Mount" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:827 +msgid "Job status: Waiting for storage resource" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:832 +msgid "Job status: Waiting for job resource" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:837 +msgid "Job status: Waiting for Client resource" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:842 +msgid "Job status: Waiting for maximum jobs" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:847 +msgid "Job status: Waiting for start time" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:852 +msgid "Job status: Waiting for higher priority jobs to finish" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:857 +#, c-format +msgid "Unknown job status %c." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:858 +#, c-format +msgid "Job status: Unknown(%c)" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:865 +#, c-format +msgid "Bad scan : '%s' %d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:906 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:912 +#, c-format +msgid "Connecting to Client %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:913 +#, c-format +msgid "Connecting to Client %s:%d" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:919 +#, c-format +msgid "Connecting to Storage %s:%d\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:920 +#, c-format +msgid "Connecting to Storage %s:%d" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:925 src/tray-monitor/tray-monitor.c:963 +#, c-format +msgid "Error, currentitem is not a Client, a Storage or a Director..\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:931 +msgid "Cannot connect to daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:932 +msgid "Cannot connect to daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:944 +#, c-format +msgid "Authentication error : %s" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:951 +msgid "Opened connection with Director daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:952 +msgid "Opened connection with Director daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:955 +msgid "Opened connection with File daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:956 +msgid "Opened connection with File daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:959 +msgid "Opened connection with Storage daemon.\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:960 +msgid "Opened connection with Storage daemon." +msgstr "" + +#: src/tray-monitor/tray-monitor.c:997 +msgid "<< Error: BNET_PROMPT signal received. >>\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1002 src/wx-console/console_thread.cpp:486 +msgid "<< Heartbeat signal received, answered. >>\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1006 +#, c-format +msgid "<< Unexpected signal received : %s >>\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1011 +msgid "\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1015 +msgid "Error : BNET_HARDEOF or BNET_ERROR" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1021 +msgid "\n" +msgstr "" + +#: src/tray-monitor/tray-monitor.c:1025 +msgid "Error : Connection closed." +msgstr "" + +#: src/tray-monitor/tray_conf.c:177 +#, c-format +msgid "Monitor: name=%s FDtimeout=%s SDtimeout=%s\n" +msgstr "" + +#: src/tray-monitor/tray_conf.c:183 +#, c-format +msgid "Director: name=%s address=%s FDport=%d\n" +msgstr "" + +#: src/tray-monitor/tray_conf.c:187 +#, c-format +msgid "Client: name=%s address=%s FDport=%d\n" +msgstr "" + +#: src/tray-monitor/tray_conf.c:191 +#, c-format +msgid "Storage: name=%s address=%s SDport=%d\n" +msgstr "" + +#: src/wx-console/authenticate.c:150 +msgid "Bad response to Hello command: ERR=" +msgstr "" + +#: src/baconfig.h:72 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "" + +#: src/baconfig.h:79 +msgid "*None*" +msgstr "" + +#: src/win32/dird/winmain.cpp:193 src/win32/dird/winmain.cpp:200 +#: src/win32/filed/winmain.cpp:224 src/win32/filed/winmain.cpp:231 +#: src/win32/stored/baculasd/winmain.cpp:225 +#: src/win32/stored/baculasd/winmain.cpp:232 +msgid "Bacula Usage" +msgstr "" + +#: src/win32/dird/winmain.cpp:197 src/win32/filed/winmain.cpp:228 +#: src/win32/stored/baculasd/winmain.cpp:229 +msgid "Bad Command Line Options" +msgstr "" + +#: src/win32/dird/winservice.cpp:119 src/win32/filed/winservice.cpp:238 +#: src/win32/stored/baculasd/winservice.cpp:169 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/win32/dird/winservice.cpp:135 src/win32/filed/winservice.cpp:256 +#: src/win32/stored/baculasd/winservice.cpp:185 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/win32/dird/winservice.cpp:136 src/win32/filed/winservice.cpp:257 +#: src/win32/stored/baculasd/winservice.cpp:186 +msgid "Contact Register Service Handler failure" +msgstr "" + +#: src/win32/dird/winservice.cpp:152 src/win32/filed/winservice.cpp:273 +#: src/win32/stored/baculasd/winservice.cpp:202 +msgid "ReportStatus STOPPED failed 1" +msgstr "" + +#: src/win32/dird/winservice.cpp:175 src/win32/filed/winservice.cpp:296 +#: src/win32/stored/baculasd/winservice.cpp:225 +msgid "Report Service failure" +msgstr "" + +#: src/win32/dird/winservice.cpp:211 +msgid "Unable to install Bacula Director service" +msgstr "" + +#: src/win32/dird/winservice.cpp:219 src/win32/filed/winservice.cpp:340 +#: src/win32/stored/baculasd/winservice.cpp:269 +msgid "Service command length too long" +msgstr "" + +#: src/win32/dird/winservice.cpp:220 src/win32/filed/winservice.cpp:341 +#: src/win32/stored/baculasd/winservice.cpp:270 +msgid "Service command length too long. Service not registered." +msgstr "" + +#: src/win32/dird/winservice.cpp:233 +msgid "" +"The Service Control Manager could not be contacted - the Bacula Director " +"service was not installed" +msgstr "" + +#: src/win32/dird/winservice.cpp:258 +msgid "The Bacula Director service could not be installed" +msgstr "" + +#: src/win32/dird/winservice.cpp:264 +msgid "Provides director services. Bacula -- the network backup solution." +msgstr "" + +#: src/win32/dird/winservice.cpp:272 +msgid "" +"The Bacula Director service was successfully installed.\n" +"The service may be started from the Control Panel and will\n" +"automatically be run the next time this machine is rebooted." +msgstr "" + +#: src/win32/dird/winservice.cpp:311 +msgid "The Bacula Director service could not be stopped" +msgstr "" + +#: src/win32/dird/winservice.cpp:318 +msgid "The Bacula Director service has been removed" +msgstr "" + +#: src/win32/dird/winservice.cpp:321 +msgid "The Bacula Director service could not be removed" +msgstr "" + +#: src/win32/dird/winservice.cpp:326 +msgid "The Bacula Director service could not be found" +msgstr "" + +#: src/win32/dird/winservice.cpp:331 +msgid "" +"The SCM could not be contacted - the Bacula Director service was not removed" +msgstr "" + +#: src/win32/dird/winservice.cpp:393 src/win32/filed/winservice.cpp:604 +#: src/win32/stored/baculasd/winservice.cpp:443 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/win32/dird/winservice.cpp:421 src/win32/filed/winservice.cpp:632 +#: src/win32/stored/baculasd/winservice.cpp:471 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" + +#: src/win32/dird/winservice.cpp:496 src/win32/filed/winservice.cpp:706 +#: src/win32/stored/baculasd/winservice.cpp:546 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "" + +#: src/win32/dird/winservice.cpp:500 src/win32/filed/winservice.cpp:710 +#: src/win32/stored/baculasd/winservice.cpp:550 +#, c-format +msgid "No longer locked\n" +msgstr "" + +#: src/win32/dird/winservice.cpp:504 src/win32/filed/winservice.cpp:714 +#: src/win32/stored/baculasd/winservice.cpp:554 +msgid "Could not lock database" +msgstr "" + +#: src/win32/filed/winmain.cpp:326 src/win32/stored/baculasd/winmain.cpp:326 +msgid "Another instance of Bacula is already running" +msgstr "" + +#: src/win32/filed/winservice.cpp:111 src/win32/filed/winservice.cpp:125 +msgid "No existing instance of Bacula File service could be contacted" +msgstr "" + +#: src/win32/filed/winservice.cpp:196 +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "" + +#: src/win32/filed/winservice.cpp:206 +msgid "Registry service not found: Bacula service not started" +msgstr "" + +#: src/win32/filed/winservice.cpp:208 +msgid "Registry service not found" +msgstr "" + +#: src/win32/filed/winservice.cpp:332 +msgid "Unable to install Bacula File service" +msgstr "" + +#: src/win32/filed/winservice.cpp:356 +msgid "Cannot write System Registry" +msgstr "" + +#: src/win32/filed/winservice.cpp:357 +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "" + +#: src/win32/filed/winservice.cpp:364 +msgid "Cannot add Bacula key to System Registry" +msgstr "" + +#: src/win32/filed/winservice.cpp:365 src/win32/filed/winservice.cpp:418 +msgid "The Bacula service could not be installed" +msgstr "" + +#: src/win32/filed/winservice.cpp:374 +msgid "" +"The Bacula File service was successfully installed.\n" +"The service may be started by double clicking on the\n" +"Bacula \"Start\" icon and will be automatically\n" +"be run the next time this machine is rebooted. " +msgstr "" + +#: src/win32/filed/winservice.cpp:393 +msgid "" +"The Service Control Manager could not be contacted - the Bacula service was " +"not installed" +msgstr "" + +#: src/win32/filed/winservice.cpp:424 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/win32/filed/winservice.cpp:432 +msgid "" +"The Bacula File service was successfully installed.\n" +"The service may be started from the Control Panel and will\n" +"automatically be run the next time this machine is rebooted." +msgstr "" + +#: src/win32/filed/winservice.cpp:442 +msgid "" +"Unknown Windows operating system.\n" +"Cannot install Bacula service.\n" +msgstr "" + +#: src/win32/filed/winservice.cpp:467 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:472 +msgid "" +"Could not delete Registry key.\n" +"The Bacula service could not be removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:482 +msgid "Bacula could not be contacted, probably not running" +msgstr "" + +#: src/win32/filed/winservice.cpp:489 +msgid "The Bacula service has been removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:520 +msgid "The Bacula file service could not be stopped" +msgstr "" + +#: src/win32/filed/winservice.cpp:527 +msgid "The Bacula file service has been removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:530 +msgid "The Bacula file service could not be removed" +msgstr "" + +#: src/win32/filed/winservice.cpp:535 +msgid "The Bacula file service could not be found" +msgstr "" + +#: src/win32/filed/winservice.cpp:540 +msgid "" +"The SCM could not be contacted - the Bacula file service was not removed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:85 +#: src/win32/stored/baculasd/winservice.cpp:99 +msgid "No existing instance of Bacula storage service could be contacted" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:261 +msgid "Unable to install Bacula Storage service" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:283 +msgid "" +"The Service Control Manager could not be contacted - the Bacula Storage " +"service was not installed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:308 +msgid "The Bacula Storage service could not be installed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:314 +msgid "Provides storage services. Bacula -- the network backup solution." +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:322 +msgid "" +"The Bacula Storage service was successfully installed.\n" +"The service may be started from the Control Panel and will\n" +"automatically be run the next time this machine is rebooted." +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:361 +msgid "The Bacula Storage service could not be stopped" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:368 +msgid "The Bacula Storage service has been removed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:371 +msgid "The Bacula Storage service could not be removed" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:376 +msgid "The Bacula Storage service could not be found" +msgstr "" + +#: src/win32/stored/baculasd/winservice.cpp:381 +msgid "" +"The SCM could not be contacted - the Bacula Storage service was not removed" +msgstr "" + +#: src/wx-console/console_thread.cpp:121 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in config file.\n" +"At least one CA certificate store is required.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:128 +msgid "" +"No Director resource defined in config file.\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:147 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in config file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:169 +msgid "Error while initializing windows sockets...\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:185 +msgid "Error while cleaning up windows sockets...\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:224 +msgid "Error while initializing library." +msgstr "" + +#: src/wx-console/console_thread.cpp:248 +msgid "Cryptographic library initialization failed.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:252 +msgid "Please correct configuration file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:294 +msgid "Error : Library not initialized\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:305 +msgid "Error : No configuration file loaded\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:315 +msgid "Connecting...\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:331 +msgid "Error : No director defined in config file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:343 +msgid "Multiple directors found in your config file.\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:352 +#, c-format +msgid "Please choose a director (1-%d): " +msgstr "" + +#: src/wx-console/console_thread.cpp:424 +msgid "Failed to connect to the director\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:434 +msgid "Connected\n" +msgstr "" + +#: src/wx-console/console_thread.cpp:491 +msgid "<< Unexpected signal received : " +msgstr "" + +#: src/wx-console/console_thread.cpp:511 +msgid "Connection terminated\n" +msgstr "" + +#: src/wx-console/main.cpp:119 +msgid "Bacula bwx-console" +msgstr "" + +#: src/wx-console/main.cpp:124 src/wx-console/wxbmainframe.cpp:267 +#, c-format +msgid "Welcome to bacula bwx-console %s (%s)!\n" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:65 +msgid "Config file editor" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:75 +msgid "# Bacula bwx-console Configuration File\n" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:111 +msgid "Save and close" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:112 +msgid "Close without saving" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:139 +#, c-format +msgid "Unable to write to %s\n" +msgstr "" + +#: src/wx-console/wxbconfigfileeditor.cpp:140 +msgid "Error while saving" +msgstr "" + +#: src/wx-console/wxbconfigpanel.cpp:205 +msgid "Apply" +msgstr "" + +#: src/wx-console/wxbhistorytextctrl.cpp:82 +#: src/wx-console/wxbhistorytextctrl.cpp:153 +#: src/wx-console/wxbmainframe.cpp:291 +msgid "Type your command below:" +msgstr "" + +#: src/wx-console/wxbhistorytextctrl.cpp:117 +msgid "Unknown command." +msgstr "" + +#: src/wx-console/wxbhistorytextctrl.cpp:126 +msgid "Possible completions: " +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:246 +msgid "&About...\tF1" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:246 +msgid "Show about dialog" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:248 src/wx-console/wxbmainframe.cpp:620 +msgid "Connect to the director" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:249 +msgid "Disconnect" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:249 +msgid "Disconnect of the director" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:251 +msgid "Change of configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:251 +msgid "Change your default configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:252 +msgid "Edit your configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:254 +msgid "E&xit\tAlt-X" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:254 +msgid "Quit this program" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:258 +msgid "&File" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:259 +msgid "&Help" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:283 +msgid "" +"Warning : Unicode is disabled because you are using wxWidgets for GTK+ 1.2.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:287 +msgid "" +"Warning : There is a problem with wxWidgets for GTK+ 2.0 without Unicode " +"support when handling non-ASCII filenames: Every non-ASCII character in such " +"filenames will be replaced by an interrogation mark.\n" +"If this behaviour disturbs you, please build bwx-console against a Unicode " +"version of wxWidgets for GTK+ 2.0.\n" +"---\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:298 +msgid "Send" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:375 src/wx-console/wxbmainframe.cpp:387 +msgid "Error while parsing command line arguments, using defaults.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:376 src/wx-console/wxbmainframe.cpp:388 +msgid "Usage: bwx-console [-c configfile] [-w tmp]\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:417 +#, c-format +msgid "" +"It seems that it is the first time you run bwx-console.\n" +"This file (%s) has been choosen as default configuration file.\n" +"Do you want to edit it? (if you click No you will have to select another " +"file)" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:419 +msgid "First run" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:436 +#, c-format +msgid "" +"Unable to read %s\n" +"Error: %s\n" +"Do you want to choose another one? (Press no to edit this file)" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:438 +msgid "Unable to read configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:450 +msgid "Please choose a configuration file to use" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:463 +msgid "This configuration file has been successfully read, use it as default?" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:464 +msgid "Configuration file read successfully" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:474 +#, c-format +msgid "Using this configuration file: %s\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:479 +msgid "Connecting to the director..." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:494 +msgid "Failed to unregister a data parser !" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:502 +msgid "Quitting.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:517 +msgid "" +"Welcome to Bacula bwx-console.\n" +"Written by Nicolas Boichat \n" +"Copyright (C), 2005-2007 Free Software Foundation Europe, e.V.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:521 +msgid "About Bacula bwx-console" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:527 +msgid "Please choose your default configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:531 +msgid "Use this configuration file as default?" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:532 +msgid "Configuration file" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:603 +msgid "Console thread terminated." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:611 +msgid "Connection to the director lost. Quit program?" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:612 +msgid "Connection lost" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:628 +msgid "Connected to the director." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:651 +msgid "Reconnect" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:652 +msgid "Reconnect to the director" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:666 +msgid "Disconnected of the director." +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:685 src/wx-console/wxbrestorepanel.cpp:710 +msgid "Unexpected question has been received.\n" +msgstr "" + +#: src/wx-console/wxbmainframe.cpp:708 src/wx-console/wxbmainframe.cpp:725 +msgid "bwx-console: unexpected director's question." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:234 +#: src/wx-console/wxbrestorepanel.cpp:1920 +#: src/wx-console/wxbrestorepanel.cpp:1949 +msgid "Enter restore mode" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:237 +msgid "Cancel restore" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:263 +#: src/wx-console/wxbrestorepanel.cpp:317 +msgid "Add" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:265 +#: src/wx-console/wxbrestorepanel.cpp:319 +msgid "Remove" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:267 +#: src/wx-console/wxbrestorepanel.cpp:321 +msgid "Refresh" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:286 +msgid "M" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:290 +msgid "Filename" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:302 +msgid "Perm." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:335 +#: src/wx-console/wxbrestorepanel.cpp:348 +#: src/wx-console/wxbrestorepanel.cpp:503 +#: src/wx-console/wxbrestorepanel.cpp:513 +#: src/wx-console/wxbrestorepanel.cpp:516 +#: src/wx-console/wxbrestorepanel.cpp:1784 +#: src/wx-console/wxbrestorepanel.cpp:1870 +msgid "Job Name" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:337 +#: src/wx-console/wxbrestorepanel.cpp:353 +#: src/wx-console/wxbrestorepanel.cpp:455 +#: src/wx-console/wxbrestorepanel.cpp:456 +#: src/wx-console/wxbrestorepanel.cpp:466 +#: src/wx-console/wxbrestorepanel.cpp:467 +#: src/wx-console/wxbrestorepanel.cpp:1139 +#: src/wx-console/wxbrestorepanel.cpp:1212 +#: src/wx-console/wxbrestorepanel.cpp:1822 +#: src/wx-console/wxbrestorepanel.cpp:1824 +#: src/wx-console/wxbrestorepanel.cpp:1885 +msgid "Fileset" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:340 +#: src/wx-console/wxbrestorepanel.cpp:1206 +#: src/wx-console/wxbrestorepanel.cpp:1222 +#: src/wx-console/wxbrestorepanel.cpp:1224 +#: src/wx-console/wxbrestorepanel.cpp:1232 +#: src/wx-console/wxbrestorepanel.cpp:1234 +#: src/wx-console/wxbrestorepanel.cpp:1253 +#: src/wx-console/wxbrestorepanel.cpp:1260 +#: src/wx-console/wxbrestorepanel.cpp:1812 +#: src/wx-console/wxbrestorepanel.cpp:1823 +#: src/wx-console/wxbrestorepanel.cpp:1944 +msgid "Before" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:342 +msgid "Please configure parameters concerning files to restore :" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:351 +#: src/wx-console/wxbrestorepanel.cpp:1878 +msgid "always" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:351 +msgid "if newer" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:351 +msgid "if older" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:351 +#: src/wx-console/wxbrestorepanel.cpp:1881 +msgid "never" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:359 +msgid "Please configure parameters concerning files restoration :" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:427 +msgid "Getting parameters list." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:435 +msgid "Error : no clients returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:459 +msgid "Error : no filesets returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:483 +msgid "Error : no storage returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:506 +#: src/wx-console/wxbrestorepanel.cpp:530 +msgid "Error : no jobs returned by the director." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:516 +msgid "RestoreFiles" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:551 +msgid "Please configure your restore parameters." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:562 +msgid "Building restore tree..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:605 +msgid "Error while starting restore: " +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:695 +msgid "" +"Right click on a file or on a directory, or double-click on its mark to add " +"it to the restore list." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:732 +#: src/wx-console/wxbrestorepanel.cpp:754 +msgid "bwx-console: unexpected restore question." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:765 +msgid " files selected to be restored." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:770 +msgid " file selected to be restored." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:777 +#, c-format +msgid "Please configure your restore (%ld files selected to be restored)..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:787 +msgid "Restore failed : no file selected.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:788 +msgid "Restore failed : no file selected." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:798 +msgid "Restoring, please wait..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:811 +msgid "Job queued. JobId=" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:813 +msgid "Restore queued, jobid=" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:817 +msgid "Job failed." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:818 +msgid "Restore failed, please look at messages.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:819 +msgid "Restore failed, please look at messages in console." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:825 +#: src/wx-console/wxbrestorepanel.cpp:826 +msgid "Failed to retrieve jobid.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:852 +msgid "" +"Restore is scheduled in more than two minutes, bwx-console will not wait for " +"its completion.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:853 +msgid "" +"Restore is scheduled in more than two minutes, bwx-console will not wait for " +"its completion." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:879 +msgid "Restore job created, but not yet running." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:884 +#, c-format +msgid "Restore job running, please wait (%ld of %ld files restored)..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:888 +msgid "Restore job terminated successfully." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:889 +msgid "Restore job terminated successfully.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:894 +msgid "Restore job terminated in error, see messages in console." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:895 +msgid "Restore job terminated in error, see messages.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:900 +msgid "Restore job reported a non-fatal error." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:904 +msgid "Restore job reported a fatal error." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:909 +msgid "Restore job cancelled by user." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:910 +msgid "Restore job cancelled by user.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:915 +msgid "Restore job is waiting on File daemon." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:919 +msgid "Restore job is waiting for new media." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:923 +msgid "Restore job is waiting for storage resource." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:927 +msgid "Restore job is waiting for job resource." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:931 +msgid "Restore job is waiting for Client resource." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:935 +msgid "Restore job is waiting for maximum jobs." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:939 +msgid "Restore job is waiting for start time." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:943 +msgid "Restore job is waiting for higher priority jobs to finish." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:992 +msgid "" +"The restore job has not been started within one minute, bwx-console will not " +"wait for its completion anymore.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:993 +msgid "" +"The restore job has not been started within one minute, bwx-console will not " +"wait for its completion anymore." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1003 +msgid "Restore done successfully.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1004 +msgid "Restore done successfully." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1075 +msgid "Applying restore configuration changes..." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1126 +msgid "Failed to find the selected client." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1141 +msgid "Failed to find the selected fileset." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1156 +msgid "Failed to find the selected storage." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1173 +#: src/wx-console/wxbrestorepanel.cpp:1859 +msgid "Run Restore job" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1189 +msgid "Restore configuration changes were applied." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1198 +msgid "Restore cancelled.\n" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1199 +msgid "Restore cancelled." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1221 +msgid "No results to list." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1223 +msgid "No backup found for this client." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1230 +msgid "ERROR" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1231 +msgid "Query failed" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1233 +msgid "Cannot get previous backups list, see console." +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1869 +msgid "JobName:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1873 +msgid "Where:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1876 +msgid "Replace:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1879 +msgid "ifnewer" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1880 +msgid "ifolder" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:1884 +msgid "FileSet:" +msgstr "" + +#: src/wx-console/wxbrestorepanel.cpp:2001 +msgid "Restoring..." +msgstr "" diff --git a/po/nl.po b/po/nl.po new file mode 100644 index 00000000..a972d556 --- /dev/null +++ b/po/nl.po @@ -0,0 +1,5618 @@ +# Dutch translation of Bacula +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# W. van den Akker , 2012 +# +msgid "" +msgstr "" +"Project-Id-Version: Bacula 5.2.7\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2018-08-11 21:43+0200\n" +"PO-Revision-Date: 2012-05-05 11:52+0100\n" +"Last-Translator: W. van den Akker \n" +"Language-Team: LANGUAGE \n" +"Language: nl\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: src/baconfig.h:62 src/baconfig.h:63 src/baconfig.h:68 src/baconfig.h:69 +#: src/baconfig.h:80 src/baconfig.h:81 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "" + +#: src/baconfig.h:89 +msgid "*None*" +msgstr "*Geen*" + +#: src/lib/status.h:84 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" + +#: src/lib/status.h:91 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/lib/status.h:93 +msgid "===================================================================\n" +msgstr "" + +#: src/lib/status.h:119 +msgid "Created" +msgstr "" + +#: src/lib/status.h:123 +msgid "Error" +msgstr "" + +#: src/lib/status.h:126 +msgid "Diffs" +msgstr "" + +#: src/lib/status.h:129 +msgid "Cancel" +msgstr "" + +#: src/lib/status.h:132 +msgid "OK" +msgstr "" + +#: src/lib/status.h:135 +msgid "OK -- with warnings" +msgstr "" + +#: src/lib/status.h:138 +msgid "Incomplete" +msgstr "" + +#: src/lib/status.h:141 +msgid "Other" +msgstr "" + +#: src/lib/status.h:153 +#, c-format +msgid "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +msgstr "" + +#: src/lib/status.h:182 +#, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "" + +#: src/lib/status.h:214 src/lib/status.h:225 src/lib/status.h:239 +#: src/lib/status.h:243 src/lib/status.h:247 +msgid "Bacula " +msgstr "" + +#: src/qt-console/bat_conf.cpp:133 +#, c-format +msgid "No record for %d %s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:142 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:146 +#, c-format +msgid "Console: name=%s\n" +msgstr "Console: naam=%s\n" + +#: src/qt-console/bat_conf.cpp:149 +#, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:153 src/qt-console/bat_conf.cpp:235 +#: src/qt-console/bat_conf.cpp:282 src/qt-console/bat_conf.cpp:312 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:259 +#: src/qt-console/tray-monitor/tray_conf.cpp:311 +#, c-format +msgid "\"%s\" directive is required in \"%s\" resource, but not found.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:88 +#, c-format +msgid "Already connected\"%s\".\n" +msgstr "Al verbonden\"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:99 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:101 +#, c-format +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "" +"Verbinden met Director %s:%d\n" +"\n" + +#: src/qt-console/bcomm/dircomm.cpp:153 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:176 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:198 +#: src/qt-console/tray-monitor/task.cpp:233 +msgid "Director daemon" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:236 +msgid "Initializing ..." +msgstr "Initialiseren ..." + +#: src/qt-console/bcomm/dircomm.cpp:252 src/qt-console/console/console.cpp:133 +msgid "Connected" +msgstr "Verbonden" + +#: src/qt-console/bcomm/dircomm.cpp:377 +msgid "Command completed ..." +msgstr "Opdracht gereed ..." + +#: src/qt-console/bcomm/dircomm.cpp:384 src/qt-console/console/console.cpp:370 +msgid "Processing command ..." +msgstr "Opdracht aan het uitvoeren ..." + +#: src/qt-console/bcomm/dircomm.cpp:391 +msgid "At main prompt waiting for input ..." +msgstr "Wacht op invoer op opdrachtregel in hoofdscherm ..." + +#: src/qt-console/bcomm/dircomm.cpp:398 src/qt-console/bcomm/dircomm.cpp:408 +msgid "At prompt waiting for input ..." +msgstr "Wacht op invoer op opdrachtregel ..." + +#: src/qt-console/bcomm/dircomm.cpp:416 +msgid "Command failed." +msgstr "Opdracht mislukt." + +#: src/qt-console/bcomm/dircomm.cpp:488 +msgid "Director disconnected." +msgstr "Director niet verbonden." + +#: src/qt-console/bcomm/dircomm_auth.cpp:110 +#, c-format +msgid "Director authorization problem at \"%s:%d\"\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:117 +#, c-format +msgid "" +"Authorization problem: Remote server at \"%s:%d\" did not advertise required " +"TLS support.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:125 +#, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\": Remote server requires " +"TLS.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:136 +#, c-format +msgid "TLS negotiation failed with Director at \"%s:%d\"\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:148 +#, c-format +msgid "" +"Bad response to Hello command: ERR=%s\n" +"The Director at \"%s:%d\" is probably not running.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:165 +#, c-format +msgid "Director at \"%s:%d\" rejected Hello command\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:182 +#, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\"\n" +"Most likely the passwords do not agree.\n" +"If you are using TLS, there may have been a certificate validation error " +"during the TLS handshake.\n" +"For help, please see " +msgstr "" + +#: src/qt-console/main.cpp:160 +msgid "Cryptography library initialization failed.\n" +msgstr "" + +#: src/qt-console/main.cpp:164 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "" + +#: src/qt-console/main.cpp:188 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Versie: %s (%s) %s %s %s\n" +"\n" +"Specificatie: bat [-s] [-c configuratiebestand] [-d debug_level] " +"[configuratiebestand]\n" +" -c gebruik configuratiebestand\n" +" -d instellen debug level op \n" +" -s geen signaleringen\n" +" -t test - lees configuratie en stop\n" +" -? geef deze melding.\n" +"\n" + +#: src/qt-console/main.cpp:221 src/qt-console/main.cpp:251 +msgid "TLS required but not configured in Bacula.\n" +msgstr "" + +#: src/qt-console/main.cpp:229 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" + +#: src/qt-console/main.cpp:238 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/qt-console/main.cpp:259 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:86 +msgid "" +"Authorization problem.\n" +"Most likely the passwords do not agree.\n" +"For help, please see " +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:94 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:110 +#, fuzzy +msgid "TLS negotiation failed\n" +msgstr "Initialiseren LZO mislukt\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:117 +#, c-format +msgid "Bad response to Hello command: ERR=%s\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:134 +msgid "Daemon rejected Hello command\n" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:89 +msgid "The Name of the Monitor should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:129 +msgid "The name of the Resource should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:138 +#, c-format +msgid "The address of the Resource should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:147 +#, c-format +msgid "The Password of should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:171 +#, c-format +msgid "The TLS CA Certificate File should be a PEM file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:182 +#, c-format +msgid "The TLS CA Certificate Directory should be a directory for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:193 +#, c-format +msgid "The TLS Certificate File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:204 +#, c-format +msgid "The TLS Key File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:45 +msgid "This restricted console does not have access to Backup jobs" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:123 +msgid "Nothing selected" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:97 +msgid "Bandwidth can set only set on Client" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:102 +msgid "Bandwidth parameter is invalid" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:177 +#, fuzzy +msgid "Client daemon" +msgstr "Client" + +#: src/qt-console/tray-monitor/task.cpp:205 +#, fuzzy +msgid "Storage daemon" +msgstr "Opslag" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:45 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: tray-monitor [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -W 0/1 force the detection of the systray\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Versie: %s (%s) %s %s %s\n" +"\n" +"Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +" -c gebruik configuratiebestand\n" +" -d instellen debug level op \n" +" -dt weergeven tijd in debug output\n" +" -t test - lees configuratie en stop\n" +" -? geef deze melding.\n" +"\n" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:118 +msgid "TLS PassPhrase" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:164 +#, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one Monitor " +"resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-ui.h:105 +#, c-format +msgid "Failed to initialize TLS context for \"%s\".\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-ui.h:320 +#, fuzzy +msgid "Select a Director" +msgstr "Selecteer Job media:" + +#: src/qt-console/tray-monitor/tray_conf.cpp:172 +#, c-format +msgid "No %s resource defined\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:181 +#, fuzzy, c-format +msgid "Monitor: name=%s\n" +msgstr "Console: naam=%s\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:184 +#, c-format +msgid "Director: name=%s address=%s port=%d\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:188 +#, fuzzy, c-format +msgid "Client: name=%s address=%s port=%d\n" +msgstr "Opdracht mislukt." + +#: src/qt-console/tray-monitor/tray_conf.cpp:192 +#, c-format +msgid "Storage: name=%s address=%s port=%d\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:196 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:284 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:318 +#, c-format +msgid "Too many directives in \"%s\" resource\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:338 +#: src/qt-console/tray-monitor/tray_conf.cpp:372 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "" + +#: src/win32/compat/compat.cpp:2879 +msgid "" +"\n" +"\n" +"Bacula ERROR: " +msgstr "" +"\n" +"\n" +"Bacula FOUT: " + +#: src/win32/filed/vss.cpp:244 src/win32/filed/vss.cpp:259 +#, fuzzy, c-format +msgid "pthread key create failed: ERR=%s\n" +msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#: src/win32/filed/vss.cpp:267 +#, fuzzy, c-format +msgid "pthread_setspecific failed: ERR=%s\n" +msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#: src/win32/filed/vss_generic.cpp:725 +#, fuzzy, c-format +msgid "Unable to find volume %ls in the device list\n" +msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#: src/win32/libwin32/main.cpp:227 +msgid "Bad Command Line Option" +msgstr "Onjuiste opdrachtregel optie" + +#: src/win32/libwin32/service.cpp:98 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:99 +msgid "Failure contacting the Service Handler" +msgstr "" + +#: src/win32/libwin32/service.cpp:110 +msgid "Service start report failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:163 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/win32/libwin32/service.cpp:170 +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "KERNEL32.DLL niet gevonden. Bacula service niet gestart" + +#: src/win32/libwin32/service.cpp:180 +#, fuzzy +msgid "Registry service not found: Bacula service not started" +msgstr "KERNEL32.DLL niet gevonden. Bacula service niet gestart" + +#: src/win32/libwin32/service.cpp:182 +#, fuzzy +msgid "Registry service entry point not found" +msgstr "KERNEL32.DLL niet gevonden. Bacula service niet gestart" + +#: src/win32/libwin32/service.cpp:204 +#, fuzzy +msgid "Report Service failure" +msgstr "Opdracht mislukt." + +#: src/win32/libwin32/service.cpp:235 +msgid "Unable to install the service" +msgstr "Kan de service niet installeren" + +#: src/win32/libwin32/service.cpp:243 +#, fuzzy +msgid "Service command length too long" +msgstr "Lengte service commando te lang. Service niet geregistreerd" + +#: src/win32/libwin32/service.cpp:244 +msgid "Service command length too long. Service not registered." +msgstr "Lengte service commando te lang. Service niet geregistreerd" + +#: src/win32/libwin32/service.cpp:257 +msgid "" +"The Service Control Manager could not be contacted - the service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:280 src/win32/libwin32/service.cpp:309 +#: src/win32/libwin32/service.cpp:355 src/win32/libwin32/service.cpp:362 +#: src/win32/libwin32/service.cpp:366 +msgid "The Bacula service: " +msgstr "De Bacula service:" + +#: src/win32/libwin32/service.cpp:287 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/win32/libwin32/service.cpp:298 +#, fuzzy +msgid "Cannot write System Registry for " +msgstr "Kan Bacula sleutel niet aan Systeem Register toevoegen" + +#: src/win32/libwin32/service.cpp:299 +#, fuzzy +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "KERNEL32.DLL niet gevonden. Bacula service niet gestart" + +#: src/win32/libwin32/service.cpp:308 +msgid "Cannot add Bacula key to System Registry" +msgstr "Kan Bacula sleutel niet aan Systeem Register toevoegen" + +#: src/win32/libwin32/service.cpp:319 +msgid "The " +msgstr "De" + +#: src/win32/libwin32/service.cpp:373 +msgid "An existing Bacula service: " +msgstr "Een bestaande Bacula service:" + +#: src/win32/libwin32/service.cpp:381 +#, fuzzy +msgid "" +"The service Manager could not be contacted - the Bacula service was not " +"removed" +msgstr "KERNEL32.DLL niet gevonden. Bacula service niet gestart" + +#: src/win32/libwin32/service.cpp:394 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:401 +#, fuzzy +msgid "Could not delete Registry key for " +msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#: src/win32/libwin32/service.cpp:411 +msgid "Bacula could not be contacted, probably not running" +msgstr "Bacula kan niet worden bereikt, waarschijnlijk niet opgestart" + +#: src/win32/libwin32/service.cpp:418 +msgid "The Bacula service has been removed" +msgstr "De Bacula service is verwijderd" + +#: src/win32/libwin32/service.cpp:459 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:485 +#, fuzzy, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" +"\n" +"\n" +"%s fout: %ld op %s:%d" + +#: src/win32/libwin32/service.cpp:561 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:565 +#, c-format +msgid "No longer locked\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:569 +#, fuzzy +msgid "Could not lock database" +msgstr "Kan Client niet vinden %s: ERR=%s" + +#~ msgid "" +#~ "Copyright (C) 2005 Christian Masopust\n" +#~ "Written by Christian Masopust (2005)\n" +#~ "\n" +#~ "Version: " +#~ msgstr "" +#~ "Copyright (C) 2005 Christian Masopust\n" +#~ "Gemaakt door Christian Masopust (2005)\n" +#~ "\n" +#~ "Versie: " + +#, fuzzy +#~ msgid "Query failed: %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to initialize DB lock. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Fetch failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "error ending batch mode: %s" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "" +#~ "insert %s failed:\n" +#~ "%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "" +#~ "delete %s failed:\n" +#~ "%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "No results to list.\n" +#~ msgstr "Geen resultaten gevonden om te tonen." + +#, fuzzy +#~ msgid "Could not init database batch connection\n" +#~ msgstr "Data parser kon niet geregistreerd worden!" + +#, fuzzy +#~ msgid "Could not open database \"%s\": ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Create DB Job record %s failed. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Create JobMedia record %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Update Media record %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create db Pool record %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create db Device record %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create DB Storage record %s failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create db mediatype record %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" already exists.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Create DB Media record %s failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create DB Client record %s failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create db Path record %s failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create DB Counters record %s failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "error fetching FileSet row: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create DB FileSet record %s failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create db File record %s failed. ERR=%s" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error fetching row for file=%s: ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Create db Filename record %s failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Create db Object record %s failed. ERR=%s" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Expecting one pool record, got %d\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Error fetching row %s\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "No Job found for: %s.\n" +#~ msgstr "Geen backup gevonden voor deze client." + +#, fuzzy +#~ msgid "No Job found for: %s\n" +#~ msgstr "Geen backup gevonden voor deze client." + +#, fuzzy +#~ msgid "No Volume record found for item %d.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "File record for PathId=%s FilenameId=%s not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "File record not found in Catalog.\n" +#~ msgstr "Bestand niet in catalog: %s\n" + +#, fuzzy +#~ msgid "Filename record: %s not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Path record: %s not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "No Job found for JobId %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "No volumes found for JobId=%d\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error fetching row %d: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "No Volume for JobId %d found in Catalog.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Pool id select failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Client id select failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Pool record not found in Catalog.\n" +#~ msgstr "Kan Catalog medium niet vinden\n" + +#, fuzzy +#~ msgid "RestoreObject record \"%d\" not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "RestoreObject record not found in Catalog.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "More than one Client!: %s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Client record not found in Catalog.\n" +#~ msgstr "Fout: Client %s bestaat niet.\n" + +#, fuzzy +#~ msgid "FileSet record \"%s\" not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "FileSet record not found in Catalog.\n" +#~ msgstr "Bestand niet in catalog: %s\n" + +#, fuzzy +#~ msgid "Media id select failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "query dbids failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Media record with MediaId=%s not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Media record for Volume name \"%s\" not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Media record for MediaId=%u not found in Catalog.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Media record for Volume Name \"%s\" not found in Catalog.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "More than one Snapshot!: %s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Snapshot record with SnapshotId=%s not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Snapshot record for Snapshot name \"%s\" not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Query failed: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "Database %s does not exist, please create it.\n" +#~ msgstr "" +#~ "Geprobeerd om \"%s\" aan te maken, maar deze bestaat al. Probeer " +#~ "opnieuw.\n" + +#, fuzzy +#~ msgid "Unable to open Database=%s. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "send a file to the director" +#~ msgstr "Verbinding naar director opnieuw opzetten" + +#, fuzzy +#~ msgid "Command logic problem\n" +#~ msgstr "Onjuiste opdrachtregel optie" + +#, fuzzy +#~ msgid "%s is not a number. You must enter a number between 1 and %d\n" +#~ msgstr "Voer een nummer in tussen 1 en %d\n" + +#, fuzzy +#~ msgid "You must enter a number between 1 and %d\n" +#~ msgstr "Voer een nummer in tussen 1 en %d\n" + +#, fuzzy +#~ msgid "Connecting to Director %s:%d\n" +#~ msgstr "" +#~ "Verbinden met Director %s:%d\n" +#~ "\n" + +#, fuzzy +#~ msgid "Cannot open file %s for input. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open file %s for output. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot popen(\"%s\", \"r\"): ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "@exec error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Console: name=%s rcfile=%s histfile=%s\n" +#~ msgstr "Console: naam=%s\n" + +#, fuzzy +#~ msgid "Error getting Job record for Job report: ERR=%s" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#~ msgid "*** Admin Error ***" +#~ msgstr "*** Admin Fout ***" + +#~ msgid "Admin Canceled" +#~ msgstr "Admin geannuleerd" + +#, fuzzy +#~ msgid "Error sending Hello to Storage daemon. ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with SD at \"%s:%d\"\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid "Error sending Hello to File daemon at \"%s:%d\". ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with FD at \"%s:%d\".\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid "TLS negotiation failed.\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid "Pool resource" +#~ msgstr "Selecteer Pool" + +#, fuzzy +#~ msgid "Network error with FD during %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error getting Client record for Job report: ERR=%s" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Error getting Media record for Volume \"%s\": ERR=%s" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Backup OK -- %s" +#~ msgstr "Backup OK -- met waarschuwingen" + +#, fuzzy +#~ msgid "with warnings" +#~ msgstr "Backup OK -- met waarschuwingen" + +#~ msgid "*** Backup Error ***" +#~ msgstr "*** Backup Fout ***" + +#~ msgid "Backup Canceled" +#~ msgstr "Backup geannuleerd" + +#, fuzzy +#~ msgid "" +#~ "Could not open WriteBootstrap file:\n" +#~ "%s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bdirjson [] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -R do not apply JobDefs to Job\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read configuration and exit\n" +#~ " -s output in show text format\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for File daemon \"%s\" in %s.\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "No Job records defined in %s\n" +#~ msgstr "Fout : Geen director gevonden in uw configuratie.\n" + +#, fuzzy +#~ msgid "Too many items in Job resource\n" +#~ msgstr "De gedefinieerde job resources zijn:\n" + +#, fuzzy +#~ msgid "Unable to get Job record. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get Job Volume Parameters. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create bootstrap file %s. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "No files found to read. No bootstrap file written.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#~ msgid "Error writing bsr file.\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#~ msgid "No Volumes found to restore.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Volumes marked with \"*\" are in the Autochanger.\n" +#~ msgstr "" +#~ "\n" +#~ "Volumes gemarkeerd met \"*\" staan online.\n" + +#, fuzzy +#~ msgid "not correct MediaType" +#~ msgstr "Selecteer media type" + +#, fuzzy +#~ msgid "1998 Volume \"%s\" catalog status is %s, %s.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "1997 Volume \"%s\" not in catalog.\n" +#~ msgstr "Bestand niet in catalog: %s\n" + +#, fuzzy +#~ msgid "Unable to get Media record for Volume %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Attribute create error: ERR=%s" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "attribute create error. ERR=%s" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "fread attr spool error. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-dir [-f -s] [-c config_file] [-d debug_level] " +#~ "[config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -r run now\n" +#~ " -s no signals\n" +#~ " -t test - read configuration and exit\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Resetting previous configuration.\n" +#~ msgstr "Dit configuratiebestand wordt gebruikt: %s\n" + +#, fuzzy +#~ msgid "Could not open Catalog \"%s\", database \"%s\".\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not update storage record for %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not compile regex pattern \"%s\" ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Console: name=%s SSL=%d\n" +#~ msgstr "Console: naam=%s\n" + +#~ msgid "Job" +#~ msgstr "Job" + +#, fuzzy +#~ msgid " --> PluginOptions=%s\n" +#~ msgstr "Uitvoeren herstel opdracht" + +#, fuzzy +#~ msgid "Schedule: name=%s\n" +#~ msgstr "Console: naam=%s\n" + +#, fuzzy +#~ msgid "Pool: name=%s PoolType=%s\n" +#~ msgstr "Console: naam=%s\n" + +#, fuzzy +#~ msgid " NextPool=%s\n" +#~ msgstr "Pool" + +#, fuzzy +#~ msgid "Messages: name=%s\n" +#~ msgstr "Console: naam=%s\n" + +#, fuzzy +#~ msgid " mailcmd=%s\n" +#~ msgstr " st_atime verschilt\n" + +#, fuzzy +#~ msgid "Cannot find Pool resource %s\n" +#~ msgstr "Kan Catalog medium niet vinden\n" + +#, fuzzy +#~ msgid "Cannot find Console resource %s\n" +#~ msgstr "Kan Catalog medium niet vinden\n" + +#, fuzzy +#~ msgid "Cannot find Director resource %s\n" +#~ msgstr "Kan Catalog medium niet vinden\n" + +#, fuzzy +#~ msgid "Cannot find Storage resource %s\n" +#~ msgstr "De gedefinieerde Opslag media zijn:\n" + +#, fuzzy +#~ msgid "Cannot find Job resource %s\n" +#~ msgstr "De gedefinieerde job resources zijn:\n" + +#, fuzzy +#~ msgid "Cannot find Counter resource %s\n" +#~ msgstr "Kan Catalog medium niet vinden\n" + +#, fuzzy +#~ msgid "Cannot find Client resource %s\n" +#~ msgstr "De gedefinieerde Clients zijn:\n" + +#, fuzzy +#~ msgid "Cannot find Schedule resource %s\n" +#~ msgstr "De gedefinieerde Clients zijn:\n" + +#, fuzzy +#~ msgid "Expecting keyword, got: %s\n" +#~ msgstr "Onjuiste periode.\n" + +#, fuzzy +#~ msgid "Count not update counter %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Cannot create var context: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Cannot set var callback: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot set var operate: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot unescape string: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot expand expression \"%s\": ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Client: " +#~ msgstr "Client" + +#, fuzzy +#~ msgid "Error updating Client record. ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Cannot run program: %s. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid ">filed: write error on socket\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error running program: %s. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Cannot open included file: %s. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Client \"%s\" RunScript failed.\n" +#~ msgstr "Opdracht mislukt." + +#, fuzzy +#~ msgid "RestoreObject failed.\n" +#~ msgstr "Uitvoeren herstel opdracht" + +#, fuzzy +#~ msgid "ComponentInfo failed.\n" +#~ msgstr "Opdracht mislukt." + +#, fuzzy +#~ msgid "\" command before continuing.\n" +#~ msgstr "" +#~ "U diente een \"use \" te specificeren alvorens te kunnen " +#~ "doorgaan.\n" + +#~ msgid "The defined Catalog resources are:\n" +#~ msgstr "De gedefinieerde Catalog media zijn:\n" + +#~ msgid "Select Catalog resource" +#~ msgstr "Selecteer Catalog media" + +#, fuzzy +#~ msgid "The disabled Job resources are:\n" +#~ msgstr "De gedefinieerde job resources zijn:\n" + +#, fuzzy +#~ msgid "The enabled Job resources are:\n" +#~ msgstr "De gedefinieerde job resources zijn:\n" + +#, fuzzy +#~ msgid "Select Job resource" +#~ msgstr "Selecteer Pool" + +#~ msgid "The defined Job resources are:\n" +#~ msgstr "De gedefinieerde job resources zijn:\n" + +#, fuzzy +#~ msgid "Error: Restore Job resource \"%s\" does not exist.\n" +#~ msgstr "Fout: Client %s bestaat niet.\n" + +#, fuzzy +#~ msgid "The defined Restore Job resources are:\n" +#~ msgstr "De gedefinieerde job resources zijn:\n" + +#~ msgid "Select Restore Job" +#~ msgstr "Selecteer Job om terug te zetten" + +#~ msgid "The defined Client resources are:\n" +#~ msgstr "De gedefinieerde Clients zijn:\n" + +#, fuzzy +#~ msgid "Select Client resource" +#~ msgstr "Selecteer FileSet bron" + +#~ msgid "Select Client (File daemon) resource" +#~ msgstr "Selecteer een Client (File daemon)" + +#~ msgid "Error: Client resource %s does not exist.\n" +#~ msgstr "Fout: Client %s bestaat niet.\n" + +#, fuzzy +#~ msgid "The defined Schedule resources are:\n" +#~ msgstr "De gedefinieerde Clients zijn:\n" + +#, fuzzy +#~ msgid "Select Schedule resource" +#~ msgstr "Selecteer Pool" + +#~ msgid "Could not find Client %s: ERR=%s" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#~ msgid "Could not find Client \"%s\": ERR=%s" +#~ msgstr "Kan Client niet vinden \"%s\": ERR=%s" + +#, fuzzy +#~ msgid "Error obtaining client ids. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#~ msgid "Defined Clients:\n" +#~ msgstr "Gedefinieerde Clients:\n" + +#~ msgid "Select the Client" +#~ msgstr "Selecteer Client" + +#, fuzzy +#~ msgid "Could not find Pool \"%s\": ERR=%s" +#~ msgstr "Kan Job niet vinden \"%s\": ERR=%s" + +#~ msgid "Defined Pools:\n" +#~ msgstr "Gedefinieerde Pools:\n" + +#~ msgid "Select the Pool" +#~ msgstr "Selecteer Pool" + +#~ msgid "No access to Pool \"%s\"\n" +#~ msgstr "Geen toegang tot Pool \"%s\"\n" + +#, fuzzy +#~ msgid "Enter a Volume name or *MediaId: " +#~ msgstr "Voer *MediaId of Volume naam in:" + +#, fuzzy +#~ msgid "The defined Pool resources are:\n" +#~ msgstr "De gedefinieerde job resources zijn:\n" + +#~ msgid "Select Pool resource" +#~ msgstr "Selecteer Pool" + +#~ msgid "Enter the JobId to select: " +#~ msgstr "Voer de gekozen Job in:" + +#~ msgid "Could not find Job \"%s\": ERR=%s" +#~ msgstr "Kan Job niet vinden \"%s\": ERR=%s" + +#, fuzzy +#~ msgid "Selection aborted, nothing done.\n" +#~ msgstr "Selecteer Pool" + +#~ msgid "Please enter a number between 1 and %d\n" +#~ msgstr "Voer een nummer in tussen 1 en %d\n" + +#~ msgid "JobId %s is not running.\n" +#~ msgstr "JobID %s is niet in uitvoering.\n" + +#~ msgid "Job \"%s\" is not running.\n" +#~ msgstr "Job \"%s\" is niet in uitvoering.\n" + +#, fuzzy +#~ msgid "Storage resource \"%s\": not found\n" +#~ msgstr "Fout: Client %s bestaat niet.\n" + +#~ msgid "Enter autochanger drive[0]: " +#~ msgstr "Voer autochanger drive[0] in: " + +#~ msgid "Enter autochanger slot: " +#~ msgstr "Voer autochanger slot in:" + +#, fuzzy +#~ msgid "Media Types defined in conf file:\n" +#~ msgstr "Fout : Geen director gevonden in uw configuratie.\n" + +#~ msgid "Media Type" +#~ msgstr "Type media" + +#~ msgid "Select the Media Type" +#~ msgstr "Selecteer media type" + +#~ msgid "No Jobs running.\n" +#~ msgstr "Geen Jobs in uitvoering.\n" + +#~ msgid "None of your jobs are running.\n" +#~ msgstr "Geen van uw opdrachten zijn in uitvoering.\n" + +#, fuzzy +#~ msgid "Warning Job JobId=%d is not running.\n" +#~ msgstr "JobID %s is niet in uitvoering.\n" + +#, fuzzy +#~ msgid "Confirm %s of %d Job%s (yes/no): " +#~ msgstr "Bevestig annulering (yes/no)" + +#, fuzzy +#~ msgid "Warning Job %s is not running.\n" +#~ msgstr "JobID %s is niet in uitvoering.\n" + +#, fuzzy +#~ msgid "Select Job(s):\n" +#~ msgstr "Selecteer Job:\n" + +#, fuzzy +#~ msgid "Choose Job list to %s" +#~ msgstr "Voer de gekozen Job in:" + +#, fuzzy +#~ msgid "Invalid argument \"action\".\n" +#~ msgstr "Onjuiste periode.\n" + +#, fuzzy +#~ msgid "No Volumes found to perform the command.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Cannot create UA thread: %s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#~ msgid "You have messages.\n" +#~ msgstr "U heeft berichten.\n" + +#, fuzzy +#~ msgid "Connecting to Storage %s at %s:%d\n" +#~ msgstr "" +#~ "Verbinden met Director %s:%d\n" +#~ "\n" + +#, fuzzy +#~ msgid "Failed to connect to Storage.\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "No authorization for Storage \"%s\"\n" +#~ msgstr "Geen toegang tot Pool \"%s\"\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Failed to connect to Storage daemon %s.\n" +#~ "====\n" +#~ msgstr "Kan de geselecteerde opslag niet vinden." + +#, fuzzy +#~ msgid "" +#~ "Failed to connect to Client %s.\n" +#~ "====\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "Connected to file daemon\n" +#~ msgstr "Verbonden met de director." + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Scheduled Jobs:\n" +#~ msgstr "Selecteer Job:\n" + +#, fuzzy +#~ msgid "Console connected %sat %s\n" +#~ msgstr "Al verbonden\"%s\".\n" + +#, fuzzy +#~ msgid "is running" +#~ msgstr "Geen Jobs in uitvoering.\n" + +#, fuzzy +#~ msgid "has terminated" +#~ msgstr "Console taak beeindigd." + +#, fuzzy +#~ msgid "has a fatal error" +#~ msgstr "Zlib data fout" + +#, fuzzy +#~ msgid "has been canceled" +#~ msgstr "Backup geannuleerd" + +#, fuzzy +#~ msgid "is waiting on Client %s" +#~ msgstr "Job %s wacht %d seconde voor de start tijd.\n" + +#, fuzzy +#~ msgid "is waiting on Storage \"%s\"" +#~ msgstr "Job %s wacht %d seconde voor de start tijd.\n" + +#, fuzzy +#~ msgid "is waiting on max Client jobs" +#~ msgstr "Job %s wacht %d seconde voor de start tijd.\n" + +#, fuzzy +#~ msgid "is waiting for its start time (%s)" +#~ msgstr "Job %s wacht %d seconde voor de start tijd.\n" + +#, fuzzy +#~ msgid "is waiting for a Shared Storage device" +#~ msgstr "Job %s wacht %d seconde voor de start tijd.\n" + +#, fuzzy +#~ msgid "is waiting for a mount request" +#~ msgstr "Job %s wacht %d seconde voor de start tijd.\n" + +#, fuzzy +#~ msgid "is waiting for Client to connect to Storage daemon" +#~ msgstr "Kan de geselecteerde opslag niet vinden." + +#, fuzzy +#~ msgid "is waiting for Client %s to connect to Storage %s" +#~ msgstr "Job %s wacht %d seconde voor de start tijd.\n" + +#, fuzzy +#~ msgid "" +#~ "No Jobs running.\n" +#~ "====\n" +#~ msgstr "Geen Jobs in uitvoering.\n" + +#, fuzzy +#~ msgid "print current working directory" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "cwd is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "Node %s has no children.\n" +#~ msgstr "JobID %s is niet in uitvoering.\n" + +#, fuzzy +#~ msgid "Invalid path given.\n" +#~ msgstr "Onjuiste periode.\n" + +#, fuzzy +#~ msgid "Pool from resource" +#~ msgstr "Selecteer Pool" + +#, fuzzy +#~ msgid "Slots from autochanger" +#~ msgstr "Voer autochanger slot in:" + +#, fuzzy +#~ msgid "Choose catalog item to update" +#~ msgstr "Voer de gekozen Job in:" + +#, fuzzy +#~ msgid "New Volume status is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "Invalid cache retention period specified: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "New Cache Retention period is: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "New use duration is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "New max jobs is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "New max files is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "New Max bytes is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "New Recycle flag is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "New InChanger flag is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "Error updating media record Slot: ERR=%s" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "New Slot is: %d\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "New Pool is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "New RecyclePool is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "Error updating Volume record: ERR=%s" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error updating Volume records: ERR=%s" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error updating media record Enabled: ERR=%s" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "New Enabled is: %d\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "New ActionOnPurge is: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "Volume Retention Period" +#~ msgstr "Geef nieuwe retention periode:" + +#, fuzzy +#~ msgid "Cache Retention" +#~ msgstr "Geef nieuwe retention periode:" + +#, fuzzy +#~ msgid "Current Volume status is: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Enter Volume Retention period: " +#~ msgstr "Geef nieuwe retention periode:" + +#, fuzzy +#~ msgid "Current use duration is: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Enter Volume Use Duration: " +#~ msgstr "Voer *MediaId of Volume naam in:" + +#, fuzzy +#~ msgid "Current max jobs is: %u\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Current max files is: %u\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Current value is: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Current recycle flag is: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Current Slot is: %d\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Enter new Slot: " +#~ msgstr "Voer autochanger slot in:" + +#, fuzzy +#~ msgid "Current InChanger flag is: %d\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Current Volume Files is: %u\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "New Volume Files is: %u\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "Current Pool is: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Enter new Pool name: " +#~ msgstr "Geef nieuwe retention periode:" + +#, fuzzy +#~ msgid "Current Enabled is: %d\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Enter new Enabled: " +#~ msgstr "Geef nieuwe retention periode:" + +#, fuzzy +#~ msgid "Current RecyclePool is: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Current ActionOnPurge is: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Current Cache Retention period is: %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Enter Cache Retention period: " +#~ msgstr "Geef nieuwe retention periode:" + +#, fuzzy +#~ msgid "Expect JobId keyword, not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Job not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "No previous Jobs found.\n" +#~ msgstr "vorige Job" + +#, fuzzy +#~ msgid "Error getting Job record for previous Job: ERR=%s" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#~ msgid "Backup OK -- with warnings" +#~ msgstr "Backup OK -- met waarschuwingen" + +#, fuzzy +#~ msgid "Could not get job record for previous Job. ERR=%s" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not get fileset record from previous Job. ERR=%s" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find FileSet resource \"%s\" from previous Job\n" +#~ msgstr "De gedefinieerde Fileset media zijn:\n" + +#, fuzzy +#~ msgid "Verify OK -- with warnings" +#~ msgstr "Backup OK -- met waarschuwingen" + +#~ msgid "Verify OK" +#~ msgstr "Controle OK" + +#~ msgid "*** Verify Error ***" +#~ msgstr "*** Fout bij controleren ***" + +#, fuzzy +#~ msgid "Verify Canceled" +#~ msgstr "Admin geannuleerd" + +#~ msgid "New file: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#~ msgid "File not in catalog: %s\n" +#~ msgstr "Bestand niet in catalog: %s\n" + +#~ msgid " st_ino differ. Cat: %s File: %s\n" +#~ msgstr " st_ino verschil. Cat: %s Bestand: %s\n" + +#~ msgid " st_mode differ. Cat: %x File: %x\n" +#~ msgstr " st_mode verschil. Cat: %x Bestand: %x\n" + +#~ msgid " st_nlink differ. Cat: %d File: %d\n" +#~ msgstr " st_nlink verschil. Cat: %d Bestand: %d\n" + +#~ msgid " st_uid differ. Cat: %u File: %u\n" +#~ msgstr " st_uid verschil. Cat: %u Bestand: %u\n" + +#~ msgid " st_gid differ. Cat: %u File: %u\n" +#~ msgstr " st_gid verschil. Cat: %u Bestand: %u\n" + +#~ msgid " st_size differ. Cat: %s File: %s\n" +#~ msgstr " st_size verschil. Cat: %s Bestand: %s\n" + +#~ msgid " st_atime differs\n" +#~ msgstr " st_atime verschilt\n" + +#~ msgid " st_mtime differs\n" +#~ msgstr " st_mtime verschilt\n" + +#~ msgid " st_ctime differs\n" +#~ msgstr " st_ctime verschilt\n" + +#, fuzzy +#~ msgid " st_size decrease. Cat: %s File: %s\n" +#~ msgstr " st_size verschil. Cat: %s Bestand: %s\n" + +#, fuzzy +#~ msgid " %s differs. File=%s Cat=%s\n" +#~ msgstr " st_ino verschil. Cat: %s Bestand: %s\n" + +#~ msgid "File: %s\n" +#~ msgstr "Bestand: %s\n" + +#, fuzzy +#~ msgid "%s digest initialization failed\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid " Could not access \"%s\": ERR=%s\n" +#~ msgstr "Kan Job niet vinden \"%s\": ERR=%s" + +#, fuzzy +#~ msgid " Could not follow link \"%s\": ERR=%s\n" +#~ msgstr "Kan Client niet vinden \"%s\": ERR=%s" + +#, fuzzy +#~ msgid " Could not stat \"%s\": ERR=%s\n" +#~ msgstr "Kan Job niet vinden \"%s\": ERR=%s" + +#, fuzzy +#~ msgid " Could not open directory \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid " Cannot open \"%s\": ERR=%s.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Network send error to SD. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#~ msgid "Read error on file %s. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#~ msgid "Too many errors. JobErrors=%d.\n" +#~ msgstr "Te veel fouten. OpdrachtFouten=%d.\n" + +#, fuzzy +#~ msgid "Encryption padding error\n" +#~ msgstr "Fout in versleuteling \n" + +#~ msgid "Encryption error\n" +#~ msgstr "Fout in versleuteling \n" + +#, fuzzy +#~ msgid "Network send error to SD. Data=%s ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Compression deflateParams error: %d\n" +#~ msgstr "Compressie LZO fout: %d\n" + +#, fuzzy +#~ msgid " Cannot open resource fork for \"%s\": ERR=%s.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Compression deflate error: %d\n" +#~ msgstr "Compressie LZO fout: %d\n" + +#, fuzzy +#~ msgid "Compression deflateReset error: %d\n" +#~ msgstr "Compressie LZO fout: %d\n" + +#~ msgid "Compression LZO error: %d\n" +#~ msgstr "Compressie LZO fout: %d\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bfdjson [options] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -t test configuration file and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Only one Client resource permitted in %s\n" +#~ msgstr "De gedefinieerde Clients zijn:\n" + +#, fuzzy +#~ msgid "Failed to initialize encryption context.\n" +#~ msgstr "Kan de geselecteerde client niet vinden." + +#, fuzzy +#~ msgid "Unsupported cipher on this system.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Plugin save packet not found.\n" +#~ msgstr "Fout: Client %s bestaat niet.\n" + +#, fuzzy +#~ msgid "Plugin=%s not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Could not create %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Error while creating command string %s.\n" +#~ msgstr "" +#~ "Fout bij verwerken van opdrachtregel variabelen, maak nu gebruik van " +#~ "standaard instelling.\n" + +#, fuzzy +#~ msgid "Error while executing \"%s\" %s. %s %s\n" +#~ msgstr "" +#~ "Fout bij verwerken van opdrachtregel variabelen, maak nu gebruik van " +#~ "standaard instelling.\n" + +#, fuzzy +#~ msgid "Unable to parse snapshot command output\n" +#~ msgstr "Kan de geselecteerde client niet vinden." + +#, fuzzy +#~ msgid "Unable to create snapshot record. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to create snapshot record, got %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to delete snapshot record. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to delete snapshot record, got %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get snapshot record. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get snapshot record, got %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to parse command output\n" +#~ msgstr "Kan de geselecteerde client niet vinden." + +#, fuzzy +#~ msgid " Delete Snapshot for %s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid " Unable to delete snapshot of %s ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid " Create Snapshot for %s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid " Unable to create snapshot of %s ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +#~ " -c use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -k keep readall capabilities\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test configuration file and exit\n" +#~ " -T set trace on\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Disable Command \"%s\" not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Connection from unknown Director %s at %s rejected.\n" +#~ msgstr "" +#~ "Verbinden met Director %s:%d\n" +#~ "\n" + +#, fuzzy +#~ msgid "SD \"%s\" tried to connect two times.\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "Command: \"%s\" is disabled.\n" +#~ msgstr "Opdracht mislukt." + +#, fuzzy +#~ msgid "2901 Job %s not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Cannot open FileSet input file: %s. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Invalid FileSet command: %s\n" +#~ msgstr "Onjuiste periode.\n" + +#, fuzzy +#~ msgid "Bad session command: %s" +#~ msgstr "Opdracht aan het uitvoeren ..." + +#, fuzzy +#~ msgid "Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "Failed connect from Storage daemon. SD bsock=NULL.\n" +#~ msgstr "Kan de geselecteerde opslag niet vinden." + +#, fuzzy +#~ msgid "Cannot contact Storage daemon\n" +#~ msgstr "Kan de geselecteerde opslag niet vinden." + +#, fuzzy +#~ msgid "Error setting Finder Info on \"%s\"\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "LZO init failed\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid "Data record error. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Cannot open resource fork for %s.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to decode message signature for %s\n" +#~ msgstr "Kan de geselecteerde client niet vinden." + +#, fuzzy +#~ msgid "None" +#~ msgstr "*Geen*" + +#~ msgid "Zlib errno" +#~ msgstr "Zlib foutnr." + +#~ msgid "Zlib stream error" +#~ msgstr "Zlib datastroom fout" + +#~ msgid "Zlib data error" +#~ msgstr "Zlib data fout" + +#~ msgid "Zlib memory error" +#~ msgstr "Zlib geheugen fout" + +#~ msgid "Zlib buffer error" +#~ msgstr "Zlib buffer fout" + +#~ msgid "Zlib version error" +#~ msgstr "Zlib versie fout" + +#, fuzzy +#~ msgid "*none*" +#~ msgstr "*Geen*" + +#, fuzzy +#~ msgid "Seek to %s error on %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "LZO uncompression error on file %s. ERR=%d\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Uncompression error on file %s. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Write error at byte=%lld block=%d write_len=%d lerror=%d on %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Decryption error\n" +#~ msgstr "Fout in versleuteling \n" + +#, fuzzy +#~ msgid "Signature validation failed for file %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Digest one file failed for file: %s\n" +#~ msgstr "Dit configuratiebestand wordt gebruikt: %s\n" + +#, fuzzy +#~ msgid "Director connected %sat: %s\n" +#~ msgstr "Director niet verbonden." + +#, fuzzy +#~ msgid "JobId %d Job %s is running.\n" +#~ msgstr "JobID %s is niet in uitvoering.\n" + +#, fuzzy +#~ msgid " Could not access %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid " Could not follow link %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid " Could not stat %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid " Could not open directory %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Network error in send to Director: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid " Cannot open %s: ERR=%s.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid " Cannot open resource fork for %s: ERR=%s.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading file %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid " st_size differs on \"%s\". Vol: %s File: %s\n" +#~ msgstr " st_size verschil. Cat: %s Bestand: %s\n" + +#, fuzzy +#~ msgid " %s differs on \"%s\". File=%s Vol=%s\n" +#~ msgstr " st_ino verschil. Cat: %s Bestand: %s\n" + +#, fuzzy +#~ msgid "Unable to stat file \"%s\": ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Illegal empty xattr attribute name\n" +#~ msgstr "Onjuist karakter in Volume naam \"%s\"\n" + +#, fuzzy +#~ msgid "pioctl VIOCGETAL error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "pioctl VIOCSETAL error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "pathconf error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Failed to convert %d into namespace on file \"%s\"\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "acl_to_text error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "acl_get_file error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "acl_delete_def_file error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "acl_from_text error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "acl_set_file error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "extattr_list_link error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "extattr_get_link error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Failed to convert %s into namespace on file \"%s\"\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "extattr_set_link error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "acl_valid error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "llistxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "lgetxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "setxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to get status on xattr \"%s\" on file \"%s\": ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "acl_get error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to get xattr acl on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to get acl on xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to get acl text on xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to convert acl from text on file \"%s\"\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to restore acl of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "acl_fromtext error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "acl_set error on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to open xattr on file \"%s\": ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to list the xattr on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to close xattr list on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to stat xattr \"%s\" on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to open file \"%s\": ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to link xattr %s to %s on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to open attribute \"%s\" at file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to restore data of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to restore owner of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to restore filetimes of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to set file owner %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set file modes %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set file times %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set file flags %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Restore Object" +#~ msgstr "Uitvoeren herstel opdracht" + +#, fuzzy +#~ msgid "Cannot make fifo %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot make node %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not symlink %s -> %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not restore file flags for file %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not hard link %s -> %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not reset file flags for file %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Plugin: \"%s\" not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Cannot stat file %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create directory %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Cannot change owner and/or group of %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot change permissions of %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open current directory: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Cannot get current directory: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Cannot reset current directory: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error scanning attributes: %s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "TLS connection initialization failed.\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid "TLS Negotiation failed.\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid "Unknown error." +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "Unknown sig %d" +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "Cannot bind port %d: ERR=%s.\n" +#~ msgstr "Kan Job niet vinden \"%s\": ERR=%s" + +#, fuzzy +#~ msgid "No addr/port found to listen on.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Could not init client queue: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not add job to client queue: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not destroy client queue: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "" +#~ "Could not connect to %s on %s:%d. ERR=%s\n" +#~ "Retrying ...\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Unable to connect to %s on %s:%d. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Socket open error. proto=%d port=%d. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock read mutex. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock write mutex. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock attribute mutex. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Write error sending %d bytes to %s:%s:%d: ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Read error from %s:%s:%d: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "sockopt error: %s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "fcntl F_GETFL error. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "fcntl F_SETFL error. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Director authorization error at \"%s:%d\"\n" +#~ msgstr "Geen toegang tot Pool \"%s\"\n" + +#, fuzzy +#~ msgid "Out of memory: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Bad errno" +#~ msgstr "Zlib foutnr." + +#, fuzzy +#~ msgid "Memset for %d bytes at %s:%d\n" +#~ msgstr "" +#~ "\n" +#~ "\n" +#~ "%s fout: %ld op %s:%d" + +#, fuzzy +#~ msgid "Cannot open %s file. %s ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open %s file. %s ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Cannot lock %s file. %s ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot not open %s file. %s ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create state file. %s ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Write final hdr error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "1999 Authorization failed.\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid "Unable to open certificate file" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to read certificate from file" +#~ msgstr "Kan de geselecteerde client niet vinden." + +#, fuzzy +#~ msgid "Unable to open private key file" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to read private key from file" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "No signers found for crypto verify.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Signature creation failed" +#~ msgstr "Configuratiebestand" + +#, fuzzy +#~ msgid "Signer not found" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Recipient not found" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Decryption error" +#~ msgstr "Fout in versleuteling \n" + +#, fuzzy +#~ msgid "Unknown error" +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "Cannot fork to become daemon: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Illegal character \"%c\" in name.\n" +#~ msgstr "Onjuist karakter in Volume naam \"%s\"\n" + +#, fuzzy +#~ msgid "Cannot open config file %s: %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open lex\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Verifying" +#~ msgstr "Controle OK" + +#, fuzzy +#~ msgid "Restoring" +#~ msgstr "Terugzetten..." + +#, fuzzy +#~ msgid "Unknown operation" +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "verify" +#~ msgstr "Controle OK" + +#, fuzzy +#~ msgid "restored" +#~ msgstr "Terugzetten afbreken" + +#, fuzzy +#~ msgid "restore" +#~ msgstr "Terugzetten afbreken" + +#, fuzzy +#~ msgid "unknown action" +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "pthread_once failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not init msg_queue mutex. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Config error: %s\n" +#~ msgstr "Compressie LZO fout: %d\n" + +#, fuzzy +#~ msgid "string" +#~ msgstr "Terugzetten..." + +#, fuzzy +#~ msgid "Cannot open included config file %s: %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Mutex lock failure. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Mutex unlock failure. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "pthread_create failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not open console message file %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not get con mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "open mail pipe %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "close error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "fopen %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Msg delivery error: fopen %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to init mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to destroy mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to init OpenSSL threading: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to initialize resource lock. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open config file \"%s\": %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to open Plugin directory %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "dlopen plugin %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Lookup of loadPlugin in plugin %s failed: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not find userid=%s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not find password entry. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find group=%s: ERR=%s\n" +#~ msgstr "Kan Job niet vinden \"%s\": ERR=%s" + +#, fuzzy +#~ msgid "Could not initgroups for group=%s, userid=%s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not initgroups for userid=%s: ERR=%s\n" +#~ msgstr "Kan Job niet vinden \"%s\": ERR=%s" + +#, fuzzy +#~ msgid "Could not set group=%s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "prctl failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "setreuid failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "cap_from_text failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "cap_set_proc failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not set specified userid: %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "%s: %s\n" +#~ msgstr "Bestand: %s\n" + +#, fuzzy +#~ msgid "User cancel requested.\n" +#~ msgstr "Terugzetten geannuleerd.\n" + +#, fuzzy +#~ msgid "Selection items must be be greater than zero.\n" +#~ msgstr "Selecteer Pool" + +#, fuzzy +#~ msgid "Fork error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "execv: %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error loading certificate file" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "Unable to open DH parameters file" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Connect failure" +#~ msgstr "Verbonden" + +#, fuzzy +#~ msgid "Non-fatal error" +#~ msgstr "Zlib data fout" + +#, fuzzy +#~ msgid "Canceled" +#~ msgstr "Backup geannuleerd" + +#, fuzzy +#~ msgid "Waiting for Storage resource" +#~ msgstr "Selecteer opslag medium" + +#, fuzzy +#~ msgid "Waiting for Job resource" +#~ msgstr "Selecteer Pool" + +#, fuzzy +#~ msgid "Waiting for Client resource" +#~ msgstr "Selecteer FileSet bron" + +#, fuzzy +#~ msgid "Waiting for Start Time" +#~ msgstr "Job %s wacht %d seconde voor de start tijd.\n" + +#, fuzzy +#~ msgid "Completed successfully" +#~ msgstr "Terugzetten succesvol." + +#, fuzzy +#~ msgid "Completed with warnings" +#~ msgstr "Backup OK -- met waarschuwingen" + +#, fuzzy +#~ msgid "Fatal error" +#~ msgstr "Zlib data fout" + +#, fuzzy +#~ msgid "Canceled by user" +#~ msgstr "Terugzetten afbreken" + +#, fuzzy +#~ msgid "Waiting for Storage daemon" +#~ msgstr "Kan de geselecteerde opslag niet vinden." + +#, fuzzy +#~ msgid "Fatal Error" +#~ msgstr "Zlib data fout" + +#, fuzzy +#~ msgid "Unknown term code" +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "Verify" +#~ msgstr "Controle OK" + +#, fuzzy +#~ msgid "Restore" +#~ msgstr "Uitvoeren herstel opdracht" + +#, fuzzy +#~ msgid "Verify Data" +#~ msgstr "Controle OK" + +#, fuzzy +#~ msgid "Working directory not defined. Cannot continue.\n" +#~ msgstr "Fout : Geen director gevonden in uw configuratie.\n" + +#, fuzzy +#~ msgid "unknown error" +#~ msgstr "Fout in versleuteling \n" + +#, fuzzy +#~ msgid "Unable to initialize watchdog lock. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writelock failure. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "rwl_writeunlock failure. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not init work queue: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not add work to queue: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Error in workq_destroy: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Job %s canceled.\n" +#~ msgstr "Terugzetten geannuleerd.\n" + +#, fuzzy +#~ msgid "Read open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Ready to read from volume \"%s\" on %s device %s.\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "Could not ready %s device %s for append.\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not create JobMedia record for Volume=\"%s\" Job=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Read error on device %s in ANSI label. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not write ANSI HDR1 label. ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Error writing EOF to tape. ERR=%s" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Unable to set network buffer size.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Write session label failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Network send error to FD. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error reading data header from FD. n=%d msglen=%d ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Network error reading from FD. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Fatal append error on device %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error writing end session label. ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error updating file attributes. ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error getting Volume info: %s" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error creating JobMedia records: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error creating JobMedia records: %s\n" +#~ msgstr "" +#~ "Fout bij verwerken van opdrachtregel variabelen, maak nu gebruik van " +#~ "standaard instelling.\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with DIR at \"%s:%d\"\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid "Unable to authenticate Director at %s.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with FD at \"%s:%d\"\n" +#~ msgstr "Initialiseren LZO mislukt\n" + +#, fuzzy +#~ msgid "Volume \"%s\" wanted on %s is in use by device %s\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Autochanger error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bcopy [-d debug_level] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -i specify input Volume names (separated by |)\n" +#~ " -o specify output Volume names (separated by |)\n" +#~ " -p proceed inspite of errors\n" +#~ " -v verbose\n" +#~ " -w specify working directory (default /tmp)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "dev open failed: %s\n" +#~ msgstr "Nieuw bestand: %s\n" + +#, fuzzy +#~ msgid "Unknown" +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bextract \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T send debug traces to trace file (stored in /tmp)\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -t read data from volume, do not write anything\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not open exclude file: %s, ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not open include file: %s, ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Cannot stat %s. It must exist. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Write error on %s: %s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Seek error Addr=%llu on %s: %s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Uncompression error. ERR=%d\n" +#~ msgstr "Compressie LZO fout: %d\n" + +#, fuzzy +#~ msgid "LZO uncompression error. ERR=%d\n" +#~ msgstr "Compressie LZO fout: %d\n" + +#, fuzzy +#~ msgid "Error writing JobMedia record to catalog.\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "Attempt to write on closed device=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Write error at %s on device %s Vol=%s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Job failed or canceled.\n" +#~ msgstr "Terugzetten geannuleerd.\n" + +#, fuzzy +#~ msgid "The %sVolume=%s on device=%s appears to be unlabeled.%s\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "Read error on fd=%d at addr=%s on device %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Read zero %sbytes Vol=%s at %s on device %s.\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "Backspace file at EOT failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Backspace record at EOT failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Re-read last block at EOT failed. ERR=%s" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error sending Volume info to Director.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bls [options] \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -j list jobs\n" +#~ " -k list blocks\n" +#~ " (no j or k option) list saved files\n" +#~ " -L dump label\n" +#~ " -p proceed inspite of errors\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -E Check records to detect errors\n" +#~ " -v be verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Warning, this Volume is a continuation of Volume %s\n" +#~ msgstr "Dit configuratiebestand wordt gebruikt: %s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bscan [ options ] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -m update media info in database\n" +#~ " -D specify the driver database name (default NULL)\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database password (default none)\n" +#~ " -h specify database host (default NULL)\n" +#~ " -t specify database port (default 0)\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -r list records\n" +#~ " -s synchronize or store in database\n" +#~ " -S show scan progress periodically\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -w specify working directory (default from conf " +#~ "file)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "No Working Directory defined in %s. Cannot continue.\n" +#~ msgstr "Fout : Geen director gevonden in uw configuratie.\n" + +#, fuzzy +#~ msgid "Create JobMedia for Job %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create JobMedia record for Volume=%s Job=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Media record for %s found in DB.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Could not update job record. ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not create File Attributes record. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Created File record: %s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not create media record. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not update media record. ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Created Media record for Volume: %s\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Could not create pool record. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Created Pool record for Pool: %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not get Client record. ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Created Client record for Client: %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Fileset \"%s\" already exists.\n" +#~ msgstr "" +#~ "Geprobeerd om \"%s\" aan te maken, maar deze bestaat al. Probeer " +#~ "opnieuw.\n" + +#, fuzzy +#~ msgid "Could not create FileSet record \"%s\". ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not create JobId record. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not update job start record. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Created new JobId=%u record for original JobId=%u\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update JobId=%u record. ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not create JobMedia record. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not add MD5/SHA1 to File record. ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bsdjson [options] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read config and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Only one Storage resource permitted in %s\n" +#~ msgstr "De gedefinieerde Opslag media zijn:\n" + +#, fuzzy +#~ msgid "Enter Volume Name: " +#~ msgstr "Voer *MediaId of Volume naam in:" + +#, fuzzy +#~ msgid "Device open failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "I/O error on device: ERR=%s" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Volume type error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error creating label. ERR=%s" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Volume version error.\n" +#~ msgstr "Zlib versie fout" + +#, fuzzy +#~ msgid "Unknown error.\n" +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "Bad status from load. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Bad status from rewind. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Bad status from weof. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Bad status from bsf. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Do you want to continue? (y/n): " +#~ msgstr "Doorgaan? (ja, wijz, nee):" + +#, fuzzy +#~ msgid "Command aborted.\n" +#~ msgstr "Opdracht mislukt." + +#, fuzzy +#~ msgid "Error writing record to block.\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "Error writing block to device.\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "Backspace file failed! ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Backspace record failed! ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Read block failed! ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Write failed at block %u. stat=%d ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Error writing record to block.\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Error writing block to device.\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "Read block %d failed! ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Read record failed. Block %d! ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Reposition error.\n" +#~ msgstr "Fout in versleuteling \n" + +#, fuzzy +#~ msgid "" +#~ "Read block %d failed! file=%d blk=%d. ERR=%s\n" +#~ "\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Read record failed! ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Bad status from fsr. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Bad status from fsf. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Enter length to read: " +#~ msgstr "Voer *MediaId of Volume naam in:" + +#, fuzzy +#~ msgid "Read of %d bytes gives stat=%d. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "read error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Bad status from read %d. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error reading block. ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Job canceled.\n" +#~ msgstr "Terugzetten geannuleerd.\n" + +#, fuzzy +#~ msgid "Could not create state file: %s ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "do_unfill failed.\n" +#~ msgstr "Job mislukt." + +#, fuzzy +#~ msgid "Reposition error. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error reading block: ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "test autochanger" +#~ msgstr "Voer autochanger slot in:" + +#, fuzzy +#~ msgid "clear tape errors" +#~ msgstr "Zlib data fout" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: btape \n" +#~ " -b specify bootstrap file\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -s turn off signals\n" +#~ " -w set working directory to dir\n" +#~ " -v be verbose\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" %d records.\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "Cannot open Dev=%s, Vol=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find device \"%s\" in config file %s.\n" +#~ msgstr "Kan Client niet vinden \"%s\": ERR=%s" + +#, fuzzy +#~ msgid "Seek error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "lseek error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error closing device %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Cannot run free space command. Results=%s ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to write EOF. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "New volume \"%s\" mounted on device %s at %s.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Connection request from %s failed.\n" +#~ msgstr "Verbinding verbroken\n" + +#, fuzzy +#~ msgid "[SF0102] Failed to connect to Client daemon: %s:%d\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "[SF0104] Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "3904 Job %s not found.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "3900 Truncate cache for volume \"%s\" failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "3912 Error scanning upload command: ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "3929 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "3912 Failed to label Volume %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "3913 Failed to open next part: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "3901 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "3002 Device \"%s\" enabled.\n" +#~ msgstr "Opdracht mislukt." + +#, fuzzy +#~ msgid "3002 Device \"%s\" disabled.\n" +#~ msgstr "Opdracht mislukt." + +#, fuzzy +#~ msgid "[SF0110] Could not create bootstrap file %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "[SF0111] Error parsing bootstrap file.\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "FD command not found: %s\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Could not open(%s,%s,0640): ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Unable to truncate device %s. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to stat device %s. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reopen: %s, ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "" +#~ "Connection from unknown Director %s at %s rejected.\n" +#~ "Please see " +#~ msgstr "" +#~ "Verbinden met Director %s:%d\n" +#~ "\n" + +#, fuzzy +#~ msgid "Invalid Hello from %s. Len=%d\n" +#~ msgstr "Onjuiste periode.\n" + +#, fuzzy +#~ msgid "Recv request to Client failed. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "[SE0001] Unable to stat device %s at %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0003] Unable to stat mount point %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0009] Unable to init mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0010] Unable to init cond variable: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0011] Unable to init cond variable: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0012] Unable to init spool mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0013] Unable to init acquire mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0014] Unable to init freespace mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0015] Unable to init read acquire mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0016] Unable to init volcat mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0017] Unable to init dcrs mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Couldn't rewind %s device %s: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not unserialize Volume label: ERR=%s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Volume on %s device %s has bad Bacula label type: %ld\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "Could not reserve volume %s on %s device %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Cannot write Volume label to block for %s device %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Rewind error on %s device %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Truncate error on %s device %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Failed to re-open device after truncate on %s device %s: ERR=%s" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write %s device %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Recycled volume \"%s\" on %s device %s, all previous data lost.\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "Wrote label to prelabeled Volume \"%s\" on %s device %s\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "Unknown %d" +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "pthread_cond_wait failure. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Too many errors trying to mount %s device %s.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Job %d canceled.\n" +#~ msgstr "Terugzetten geannuleerd.\n" + +#, fuzzy +#~ msgid "Open of %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to position to end of data on %s device %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" not loaded on %s device %s.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Cannot open %s Dev=%s, Vol=%s for reading.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set eotmodel on device %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTIOCGET error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "I/O function \"%s\" not supported on this device.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Cannot open bootstrap file %s: %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "FileIndex : %u\n" +#~ msgstr "Bestand: %s\n" + +#, fuzzy +#~ msgid " MediaType : %s\n" +#~ msgstr "Type media" + +#, fuzzy +#~ msgid "No Volume names found for restore.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error sending header to Client. ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error sending to FD. ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error sending data to Client. ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error sending to File daemon. ERR=%s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" at addr=%s on device %s.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "Unknown code %d\n" +#~ msgstr "Onbekende opdracht." + +#, fuzzy +#~ msgid "Unable to initialize reservation lock. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed command: %s\n" +#~ msgstr "Bestand: %s\n" + +#, fuzzy +#~ msgid "3926 Could not get dcr for device: %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "3611 JobId=%u Volume max jobs=%d exceeded on %s device %s.\n" +#~ msgstr "Job \"%s\" niet gevonden.\n" + +#, fuzzy +#~ msgid "" +#~ "3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on %s device %s.\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "Job cancelled.\n" +#~ msgstr "Terugzetten geannuleerd." + +#, fuzzy +#~ msgid "Open data spool file %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Ftruncate spool file failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Spool header read error. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Error writing block to spool file. ERR=%s\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "Fseek on attributes file failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Truncate on attributes file failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "fopen attr spool file %s failed: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Device \"%s\" is not open or does not exist.\n" +#~ msgstr "Fout: Client %s bestaat niet.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-sd [options] [-c config_file] [config_file]\n" +#~ " -c use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g set groupid to group\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -p proceed despite I/O errors\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test - read config and exit\n" +#~ " -u userid to \n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Unable to create thread. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not initialize SD device \"%s\"\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Unable to stat ControlDevice %s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open device %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not mount device %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Cannot find AutoChanger resource %s\n" +#~ msgstr "Kan Catalog medium niet vinden\n" + +#, fuzzy +#~ msgid "Unable to init lock for Autochanger=%s: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Device resource %s\n" +#~ msgstr "Kan Catalog medium niet vinden\n" + +#, fuzzy +#~ msgid "3997 Bad alert command: %s: ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to open device %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Rewind error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "ioctl MTEOM error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "ioctl MTLOAD error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "ioctl MTOFFL error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "ioctl MTFSF error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "ioctl MTBSF error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "ioctl MTFSR %d error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "ioctl MTBSR error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "ioctl MTWEOF error on %s. ERR=%s.\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "No Volume names found for %s.\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Unable to initialize volume list lock. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reserve volume \"%s\", because job canceled.\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "" +#~ "Could not reserve volume \"%s\" for append, because it will be read.\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "pthread timedwait error. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not init Bacula database\n" +#~ msgstr "Data parser kon niet geregistreerd worden!" + +#, fuzzy +#~ msgid "Error opening datafile %s\n" +#~ msgstr "Geen Volumes gevonden om terug te zetten.\n" + +#, fuzzy +#~ msgid "Error while inserting file\n" +#~ msgstr "Fout bij schrijven van bsr bestand.\n" + +#, fuzzy +#~ msgid "Could not open data file: %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Fatal fgets error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +#~ " -4 forces bsmtp to use IPv4 addresses only.\n" +#~ " -6 forces bsmtp to use IPv6 addresses only.\n" +#~ " -8 set charset to UTF-8\n" +#~ " -a use any ip protocol for address resolution\n" +#~ " -c set the Cc: field\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f set the From: field\n" +#~ " -h use mailhost:port as the SMTP server\n" +#~ " -s set the Subject: field\n" +#~ " -r set the Reply-To: field\n" +#~ " -l set the maximum number of lines to send (default: " +#~ "unlimited)\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Versie: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Specificatie: tray-monitor [-c configuratiebestand] [-d debug_level]\n" +#~ " -c gebruik configuratiebestand\n" +#~ " -d instellen debug level op \n" +#~ " -dt weergeven tijd in debug output\n" +#~ " -t test - lees configuratie en stop\n" +#~ " -? geef deze melding.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Fatal gethostname error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Fatal getaddrinfo for myself failed \"%s\": ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Error unknown mail host \"%s\": ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Failed to connect to mailhost %s\n" +#~ msgstr "Verbinden met de director mislukt\n" + +#, fuzzy +#~ msgid "Fatal socket error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Fatal connect error to %s: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Fatal _open_osfhandle error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Fatal fdopen error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Fatal dup error: ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "" +#~ "ERROR %s\n" +#~ "\n" +#~ msgstr "FOUT" + +#, fuzzy +#~ msgid "Unable to open -p argument for reading" +#~ msgstr "Kan de geselecteerde client niet vinden." + +#, fuzzy +#~ msgid "Could not open, database \"%s\".\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Error there is no Catalog section in the given config file [%s]\n" +#~ msgstr "Fout : Geen director gevonden in uw configuratie.\n" + +#, fuzzy +#~ msgid "Error no Director resource defined.\n" +#~ msgstr "Fout: Client %s bestaat niet.\n" + +#, fuzzy +#~ msgid "Deleting: %s\n" +#~ msgstr "Bestand: %s\n" + +#, fuzzy +#~ msgid "Print the list? (yes/no): " +#~ msgstr "Doorgaan? (ja, wijz, nee):" + +#, fuzzy +#~ msgid "Print them? (yes/no): " +#~ msgstr "Doorgaan? (ja, wijz, nee):" + +#, fuzzy +#~ msgid "Create temporary index? (yes/no): " +#~ msgstr "Doorgaan? (ja, wijz, nee):" + +#, fuzzy +#~ msgid "Deleting %d Restore Job records.\n" +#~ msgstr "Selecteer Job om terug te zetten" + +#, fuzzy +#~ msgid "Reg: %s\n" +#~ msgstr "Bestand: %s\n" + +#, fuzzy +#~ msgid "Err: Could not access %s: %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Err: Could not follow ff->link %s: %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Err: Could not stat %s: %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Err: Could not open directory %s: %s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Could not open include file: %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Could not open exclude file: %s\n" +#~ msgstr "Kan Client niet vinden %s: ERR=%s" + +#, fuzzy +#~ msgid "Can't delete working directory %s. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to detect the MySQL data_directory on this system.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't get server configuration.\n" +#~ msgstr "Wijziging van configuratiebestand" + +#, fuzzy +#~ msgid "Unable to get the BINLOG list.\n" +#~ msgstr "Kan de geselecteerde client niet vinden." + +#, fuzzy +#~ msgid "Unable to determine the last LSN for %s (Previous job is %s)\n" +#~ msgstr "Kan de geselecteerde client niet vinden." + +#, fuzzy +#~ msgid "Unable to get last LSN from the backup\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "The current LSN is %s\n" +#~ msgstr "De huidige %s retention periode: %s\n" + +#, fuzzy +#~ msgid "Unable to parse or to use plugin options, %s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't get cluster configuration.\n" +#~ msgstr "Wijziging van configuratiebestand" + +#, fuzzy +#~ msgid "Can't determine the last WAL file\n" +#~ msgstr "Kan de geselecteerde client niet vinden." + +#, fuzzy +#~ msgid "Unable to detect the PostgreSQL data_directory on this system.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to find data_directory=%s on this system. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to determine the first WAL file on this system.\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse tablespaces %s on this system. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse data_directory %s on this system. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Initializing DDE." +#~ msgstr "Initialiseren ..." + +#, fuzzy +#~ msgid "Unable to init adata mutex: ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or File type Volume %s on Dedup device %s. Wanted File.\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or Dedup type Volume %s on File device %s. Wanted File.\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "" +#~ "Got File or Dedup type Volume %s on Aligned device %s. Wanted Aligned.\n" +#~ msgstr "Nieuw volume \"%s\" aangemaakt in catalogus.\n" + +#, fuzzy +#~ msgid "Unable to parse user supplied restore configuration\n" +#~ msgstr "Kan configuratiebestand niet lezen" + +#, fuzzy +#~ msgid "Confirm %s (yes/no): " +#~ msgstr "Bevestig annulering (yes/no)" + +#~ msgid "Add" +#~ msgstr "Toevoegen" + +#~ msgid "Remove" +#~ msgstr "Verwijderen" + +#~ msgid "Refresh" +#~ msgstr "Verversen" + +#~ msgid "M" +#~ msgstr "M" + +#~ msgid "Size" +#~ msgstr "Omvang" + +#~ msgid "Date" +#~ msgstr "Datum" + +#~ msgid "Perm." +#~ msgstr "Perm." + +#~ msgid "User" +#~ msgstr "Gebruiker" + +#~ msgid "Group" +#~ msgstr "Groep" + +#~ msgid "Job Name" +#~ msgstr "Naam opdracht:" + +#~ msgid "Please configure parameters concerning files to restore :" +#~ msgstr "Configureer parameters voor het terugzetten van bestanden aub :" + +#~ msgid "always" +#~ msgstr "altijd" + +#~ msgid "if newer" +#~ msgstr "indien nieuwer" + +#~ msgid "if older" +#~ msgstr "indien ouder" + +#~ msgid "never" +#~ msgstr "nooit" + +#~ msgid "Failed to find the selected fileset." +#~ msgstr "Kan de geselecteerde fileset niet vinden." + +#~ msgid "Cannot get previous backups list, see console." +#~ msgstr "Kan vorige backuplijst niet ophalen, zie console." + +#~ msgid "JobName:" +#~ msgstr "JobNaam:" + +#~ msgid "Type your command below:" +#~ msgstr "Voer uw opdracht hieronder in:" + +#~ msgid "&About...\tF1" +#~ msgstr "&Over...\tF1" + +#~ msgid "Show about dialog" +#~ msgstr "Toon het Over scherm" + +#~ msgid "Connect" +#~ msgstr "Verbonden" + +#~ msgid "Connect to the director" +#~ msgstr "Verbinden met director" + +#~ msgid "Disconnect" +#~ msgstr "Verbroken" + +#~ msgid "Disconnect of the director" +#~ msgstr "Verbroken van de director" + +#~ msgid "Change of configuration file" +#~ msgstr "Wijziging van configuratiebestand" + +#~ msgid "Change your default configuration file" +#~ msgstr "Wijzig standaard configuratiebestand" + +#~ msgid "Edit your configuration file" +#~ msgstr "Wijzigen configuratiebestand" + +#~ msgid "E&xit\tAlt-X" +#~ msgstr "&Verlaat\tAlt-X" + +#~ msgid "Quit this program" +#~ msgstr "Verlaat dit programma" + +#~ msgid "&File" +#~ msgstr "&Bestand" + +#~ msgid "&Help" +#~ msgstr "&Help" + +#~ msgid "Welcome to bacula bwx-console %s (%s)!\n" +#~ msgstr "Welkom bij het bacula bwx-console %s (%s)!\n" + +#~ msgid "" +#~ "Warning : Unicode is disabled because you are using wxWidgets for GTK+ " +#~ "1.2.\n" +#~ msgstr "" +#~ "Waarschuwing : Unicode is niet actief omdat u wxWidgets for GTK+ 1.2 " +#~ "gebruikt.\n" + +#~ msgid "Send" +#~ msgstr "Versturen" + +#~ msgid "Usage: bwx-console [-c configfile] [-w tmp]\n" +#~ msgstr "Specificatie: bwx-console [-c configuratiebestand] [-w tmp]\n" + +#~ msgid "" +#~ "It seems that it is the first time you run bwx-console.\n" +#~ "This file (%s) has been choosen as default configuration file.\n" +#~ "Do you want to edit it? (if you click No you will have to select another " +#~ "file)" +#~ msgstr "" +#~ "Waarschijnlijk gebruikt u bwx-console voor de eerste keer.\n" +#~ "Dit bestand (%s) is ingesteld als uw standaard configuratiebestand.\n" +#~ "Wilt u dit wijzigen? (Als u Nee selecteert dan moet u een ander bestand " +#~ "selecteren)" + +#~ msgid "First run" +#~ msgstr "Eerste run" + +#~ msgid "" +#~ "Unable to read %s\n" +#~ "Error: %s\n" +#~ "Do you want to choose another one? (Press no to edit this file)" +#~ msgstr "" +#~ "Kan %s\n" +#~ " niet lezenFout: %s\n" +#~ "Wilt u een andere kiezen? (Toets nee om het bestand te wijzigen)" + +#~ msgid "Please choose a configuration file to use" +#~ msgstr "Kies een configuratiebestand om te gebruiken" + +#~ msgid "" +#~ "This configuration file has been successfully read, use it as default?" +#~ msgstr "Configuratiebestand met succes gelezen. Als standaard gebruiken?" + +#~ msgid "Configuration file read successfully" +#~ msgstr "Configuratiebestand met succes gelezen" + +#~ msgid "Connecting to the director..." +#~ msgstr "Verbinden met director ..." + +#~ msgid "Failed to unregister a data parser !" +#~ msgstr "Data parser kon niet geregistreerd worden!" + +#~ msgid "Quitting.\n" +#~ msgstr "Bezig met afsluiten.\n" + +#~ msgid "" +#~ "Welcome to Bacula bwx-console.\n" +#~ "Written by Nicolas Boichat \n" +#~ "Copyright (C), 2005-2007 Free Software Foundation Europe, e.V.\n" +#~ msgstr "" +#~ "Welkom bij Bacula bwx-console.\n" +#~ "Geschreven door Nicolas Boichat \n" +#~ "Copyright (C), 2005-2007 Free Software Foundation Europe, e.V.\n" + +#~ msgid "About Bacula bwx-console" +#~ msgstr "Over Bacula bwx-console" + +#~ msgid "Please choose your default configuration file" +#~ msgstr "Kies aub uw standaard configuratiebestand" + +#~ msgid "Use this configuration file as default?" +#~ msgstr "Dit configuratiebestand als standaard gebruiken?" + +#~ msgid "Connection to the director lost. Quit program?" +#~ msgstr "Verbinding met de director verbroken. Programma stoppen?" + +#~ msgid "Connection lost" +#~ msgstr "Verbinding verloren" + +#~ msgid "Reconnect" +#~ msgstr "Opnieuw verbinden" + +#~ msgid "Reconnect to the director" +#~ msgstr "Verbinding naar director opnieuw opzetten" + +#~ msgid "Disconnected of the director." +#~ msgstr "Verbinding verbroken met director" + +#~ msgid "bwx-console: unexpected director's question." +#~ msgstr "bwx-console: onverwachte director's opdracht" + +#~ msgid "Save and close" +#~ msgstr "Opslaan en afsluiten" + +#~ msgid "Apply" +#~ msgstr "Toepassen" + +#~ msgid "Please correct configuration file.\n" +#~ msgstr "Corrigeer configuratie bestand aub.\n" + +#~ msgid "Connecting...\n" +#~ msgstr "Verbinding maken ...\n" + +#~ msgid "Multiple directors found in your config file.\n" +#~ msgstr "Meerdere directors gevonden in uw configuratiebestand.\n" + +#~ msgid "Please choose a director (1-%d): " +#~ msgstr "Kiest u een director (1-%d): " + +#~ msgid "Connected\n" +#~ msgstr "Verbonden\n" + +#~ msgid "<< Unexpected signal received : " +#~ msgstr "<< Onverwacht signaal ontvangen : " + +#, fuzzy +#~ msgid "Unable to write to %s to save full job name. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to save last controlfile into file %s. ERR=%s\n" +#~ msgstr "Lees fout op bestand %s. FOUT=%s\n" + +#, fuzzy +#~ msgid "Unable to list schema for %s. exitcode=%d\n" +#~ msgstr "Kan de service niet installeren" + +#, fuzzy +#~ msgid "Unable to open %s to save RMAN output. ERR=%s\n" +#~ msgstr "Kan job cond variabele niet initialiseren: ERR=%s\n" + +#~ msgid "" +#~ "Cancel: %s\n" +#~ "\n" +#~ "%s" +#~ msgstr "" +#~ "Anuleer: %s\n" +#~ "\n" +#~ "%s" + +#~ msgid "Confirm cancel?" +#~ msgstr "Bevestig annulering?" diff --git a/po/pl.po b/po/pl.po new file mode 100644 index 00000000..4d9c0365 --- /dev/null +++ b/po/pl.po @@ -0,0 +1,17501 @@ +# bacula translations +# Copyright (C) 2010 Inteos Sp. z o.o. +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Translated by Inteos Translation Team: pl@inteos.pl +# +msgid "" +msgstr "" +"Project-Id-Version: pl\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2010-03-30 22:47+0200\n" +"PO-Revision-Date: 2010-02-21 09:58+0100\n" +"Last-Translator: Grzegorz Grabowski \n" +"Language-Team: Inteos PL \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: src/filed/backup.c:100 +msgid "Cannot set buffer size FD->SD.\n" +msgstr "Nie można ustawić wielkości bufora FD->SD.\n" + +#: src/filed/backup.c:167 +#, c-format +msgid "Encountered %ld acl errors while doing backup\n" +msgstr "Osiągnięto %ld błędów acl w trakcie wykonywania backupów\n" + +#: src/filed/backup.c:171 +#, c-format +msgid "Encountered %ld xattr errors while doing backup\n" +msgstr "Osiągnięto %ld błędów xattr w trakcie wykonywania backupów\n" + +#: src/filed/backup.c:231 src/filed/backup.c:240 +msgid "An error occurred while encrypting the stream.\n" +msgstr "Wystpi bd w trakcie szyfrowania strumienia.\n" + +#: src/filed/backup.c:342 +#, c-format +msgid " Recursion turned off. Will not descend from %s into %s\n" +msgstr " Rekursja jest wyłączona. Nie będę zagłębiał się z %s do %s\n" + +#: src/filed/backup.c:349 +#, c-format +msgid " %s is a different filesystem. Will not descend from %s into %s\n" +msgstr " %s jest na innym systemie plikw. Nie będę zagłębiał się z %s do %s\n" + +#: src/filed/backup.c:355 +#, c-format +msgid " Disallowed filesystem. Will not descend from %s into %s\n" +msgstr " Niedozwolony system plikw. Nie będę zagłębiał się z %s do %s\n" + +#: src/filed/backup.c:360 +#, c-format +msgid " Disallowed drive type. Will not descend into %s\n" +msgstr " Niedozwolony typ napdu. Nie będę zagłębiał się do %s\n" + +#: src/filed/backup.c:370 +#, c-format +msgid " Socket file skipped: %s\n" +msgstr " Pominity plik gniazda: %s\n" + +#: src/filed/backup.c:383 +#, c-format +msgid " Could not access \"%s\": ERR=%s\n" +msgstr " Nie można dostać się do: \"%s\": ERR=%s\n" + +#: src/filed/backup.c:390 +#, c-format +msgid " Could not follow link \"%s\": ERR=%s\n" +msgstr " Nie można podążać za dowiązaniem \"%s\": ERR=%s\n" + +#: src/filed/backup.c:397 +#, c-format +msgid " Could not stat \"%s\": ERR=%s\n" +msgstr " Nie można wykona stat na \"%s\": ERR=%s\n" + +#: src/filed/backup.c:404 src/filed/verify.c:148 +#, c-format +msgid " Unchanged file skipped: %s\n" +msgstr " Pominity nie zmieniony plik: %s\n" + +#: src/filed/backup.c:407 +#, c-format +msgid " Archive file not saved: %s\n" +msgstr " Nie zapisany plik archiwum: %s\n" + +#: src/filed/backup.c:411 +#, c-format +msgid " Could not open directory \"%s\": ERR=%s\n" +msgstr " Nie można otworzyć katalogu \"%s\": ERR=%s\n" + +#: src/filed/backup.c:417 +#, c-format +msgid " Unknown file type %d; not saved: %s\n" +msgstr " Nieznany typ pliku %d; nie zapisano: %s\n" + +#: src/filed/backup.c:457 src/filed/accurate.c:410 src/filed/verify.c:244 +#, c-format +msgid "%s digest initialization failed\n" +msgstr "Inicjalizacja skrtu %s nieudana\n" + +#: src/filed/backup.c:473 +#, c-format +msgid "%s signature digest initialization failed\n" +msgstr "Inicjalizacja skrtu podpisu %s nieudana\n" + +#: src/filed/backup.c:545 +#, c-format +msgid " Cannot open \"%s\": ERR=%s.\n" +msgstr " Nie można otworzyć \"%s\": ERR=%s.\n" + +#: src/filed/backup.c:582 +#, c-format +msgid " Cannot open resource fork for \"%s\": ERR=%s.\n" +msgstr " Nie można otworzyć rozwidlenia zasobu dla \"%s\": ERR=%s.\n" + +#: src/filed/backup.c:674 +msgid "Failed to allocate memory for crypto signature.\n" +msgstr "Nieudana alokacja pamici dla podpisu kryptograficznego.\n" + +#: src/filed/backup.c:679 src/filed/backup.c:685 src/filed/backup.c:700 +msgid "An error occurred while signing the stream.\n" +msgstr "Wystpi bd w trakcie podpisywania strumienia.\n" + +#: src/filed/backup.c:724 +msgid "An error occurred finalizing signing the stream.\n" +msgstr "Wystpi bd w trakcie koczenia podpisywania strumienia.\n" + +#: src/filed/backup.c:814 +#, c-format +msgid "Compression deflateParams error: %d\n" +msgstr "Błąd kompresji w defalteParams: %d\n" + +#: src/filed/backup.c:826 +msgid "Encrypting sparse data not supported.\n" +msgstr "Szyfrowanie rzadkich danych nie jest wspierane.\n" + +#: src/filed/backup.c:833 +msgid "Failed to initialize encryption context.\n" +msgstr "Błąd inicjalizacji kontekstu szyfrowania.\n" + +#: src/filed/backup.c:857 src/filed/backup.c:1017 src/filed/backup.c:1054 +#: src/filed/backup.c:1067 src/filed/backup.c:1127 src/filed/backup.c:1169 +#: src/filed/acl.c:91 src/filed/acl.c:106 src/filed/acl.c:114 +#: src/filed/xattr.c:90 src/filed/xattr.c:105 src/filed/xattr.c:113 +#: src/filed/fd_plugins.c:341 src/filed/fd_plugins.c:355 +#, c-format +msgid "Network send error to SD. ERR=%s\n" +msgstr "Błąd sieci w wysyaniu do SD. ERR=%s\n" + +#: src/filed/backup.c:939 +#, c-format +msgid "Compression deflate error: %d\n" +msgstr "Błąd kompresji w deflate: %d\n" + +#: src/filed/backup.c:946 +#, c-format +msgid "Compression deflateReset error: %d\n" +msgstr "Błąd kompresji w deflateReset: %d\n" + +#: src/filed/backup.c:989 src/filed/backup.c:1005 +msgid "Encryption error\n" +msgstr "Błąd szyfrowania\n" + +#: src/filed/backup.c:1031 +#, c-format +msgid "Read error on file %s. ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/filed/backup.c:1034 +#, c-format +msgid "Too many errors. JobErrors=%d.\n" +msgstr "Zbyt duo bdw. JobErrors=%d.\n" + +#: src/filed/backup.c:1044 +msgid "Encryption padding error\n" +msgstr "Błąd wypeniania szyfrowania\n" + +#: src/filed/backup.c:1105 +msgid "Invalid file flags, no supported data stream type.\n" +msgstr "Niepoprawne flagi pliku, nie wspierany typ strumienia danych.\n" + +#: src/filed/accurate.c:194 +#, c-format +msgid "Space saved with Base jobs: %lld MB\n" +msgstr "Ilo szczdzonej przestrzeni z Base jobs: %lld MB\n" + +#: src/filed/accurate.c:381 +#, c-format +msgid "Can't verify checksum for %s\n" +msgstr "Nie mogę zweryfikowa sum kontroln dla %s\n" + +#: src/filed/accurate.c:487 +msgid "2991 Bad accurate command\n" +msgstr "2991 Za komenda accurate\n" + +#: src/filed/authenticate.c:67 +#, c-format +msgid "I only authenticate directors, not %d\n" +msgstr "Autentykuj wycznie directory, nie %d\n" + +#: src/filed/authenticate.c:75 src/stored/authenticate.c:69 +#, c-format +msgid "Bad Hello command from Director at %s. Len=%d.\n" +msgstr "Za komenda Hello od Directora przy %s. Len=%d.\n" + +#: src/filed/authenticate.c:87 src/stored/authenticate.c:80 +#, c-format +msgid "Bad Hello command from Director at %s: %s\n" +msgstr "Za komenda Hello od Directora przy %s: %s\n" + +#: src/filed/authenticate.c:99 +#, c-format +msgid "Connection from unknown Director %s at %s rejected.\n" +msgstr "Połączenie od nieznanego Directora %s przy %s odrzucone.\n" + +#: src/filed/authenticate.c:143 +#, c-format +msgid "Incorrect password given by Director at %s.\n" +msgstr "Niepoprawne haso podane przez Director przy %s.\n" + +#: src/filed/authenticate.c:150 src/filed/authenticate.c:269 +#: src/stored/authenticate.c:138 src/stored/authenticate.c:249 +msgid "" +"Authorization problem: Remote server did not advertize required TLS " +"support.\n" +msgstr "" +"Problem autoryzacji: Zdalny serwer nie zaproponowa wymaganego wsparcia dla " +"TLS.\n" + +#: src/filed/authenticate.c:168 src/filed/authenticate.c:287 +#: src/dird/authenticate.c:403 +msgid "TLS negotiation failed.\n" +msgstr "Negocjacja TLS nieudana.\n" + +#: src/filed/authenticate.c:207 src/stored/dircmd.c:208 +msgid "Unable to authenticate Director\n" +msgstr "Nie mogę zautentykowa Directora\n" + +#: src/filed/authenticate.c:262 +msgid "" +"Authorization key rejected by Storage daemon.\n" +"Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 for help.\n" +msgstr "" +"Autoryzacja klucza odrzucona przez Storage daemona.\n" +"Proszę zobacz http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 w celu uzyskania pomocy.\n" + +#: src/filed/verify_vol.c:65 +msgid "Storage command not issued before Verify.\n" +msgstr "Komenda Storage nie zostaa wysana przed komend Verify.\n" + +#: src/filed/verify_vol.c:99 src/filed/restore.c:267 +#, c-format +msgid "Record header scan error: %s\n" +msgstr "Błąd skanowania nagwka rekordu: %s\n" + +#: src/filed/verify_vol.c:108 src/filed/restore.c:277 +#, c-format +msgid "Data record error. ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/filed/verify_vol.c:112 src/filed/restore.c:281 +#, c-format +msgid "Actual data size %d not same as header %d\n" +msgstr "Aktualny rozmiar danych %d nie jest taki sam jak w nagwku %d\n" + +#: src/filed/verify_vol.c:145 +#, c-format +msgid "Error scanning record header: %s\n" +msgstr "Błąd skanowania nagwna rekordu: %s\n" + +#: src/filed/verify_vol.c:208 src/filed/verify.c:211 +#, c-format +msgid "Network error in send to Director: ERR=%s\n" +msgstr "Błąd sieci w wysyaniu do Directora: ERR=%s\n" + +#: src/filed/filed_conf.c:381 src/dird/dird_conf.c:1434 +#: src/stored/stored_conf.c:607 +#, c-format +msgid "Cannot find Director resource %s\n" +msgstr "Nie można znale zasobu Director %s\n" + +#: src/filed/filed_conf.c:387 src/dird/dird_conf.c:1508 +#, c-format +msgid "Cannot find Client resource %s\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/filed/acl.c:374 +#, c-format +msgid "acl_to_text error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl_to_text na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:400 +#, c-format +msgid "acl_get_file error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl_get_file na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:439 +#, c-format +msgid "acl_delete_def_file error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl_delete_def_file na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:447 +#, c-format +msgid "acl_from_text error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl_from_text na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:460 +#, c-format +msgid "acl_valid error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl_valid na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:481 +#, c-format +msgid "acl_set_file error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl_set_file na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:754 +#, c-format +msgid "getacl error on file \"%s\": ERR=%s\n" +msgstr "Błąd w getacl na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:785 +#, c-format +msgid "acltostr error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acltostr na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:802 src/filed/acl.c:809 +#, c-format +msgid "strtoacl error on file \"%s\": ERR=%s\n" +msgstr "Błąd w strtoacl na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:827 +#, c-format +msgid "setacl error on file \"%s\": ERR=%s\n" +msgstr "Błąd w setacl na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:912 src/filed/acl.c:1001 +#, c-format +msgid "pathconf error on file \"%s\": ERR=%s\n" +msgstr "Błąd w pathconf na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:930 +#, c-format +msgid "acl_get error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl_get na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:993 +#, c-format +msgid "" +"Trying to restore acl on file \"%s\" on filesystem without acl support\n" +msgstr "" +"Próba odtworzenia acl na pliku \"%s\" na systemie plikw bez wsparcia dla acl\n" + +#: src/filed/acl.c:1017 +#, c-format +msgid "" +"Trying to restore acl on file \"%s\" on filesystem without aclent acl " +"support\n" +msgstr "" +"Próba odtworzenia acl na pliku \"%s\" na systemie plikw bez wsparcia aclent " +"acl\n" + +#: src/filed/acl.c:1027 +#, c-format +msgid "" +"Trying to restore acl on file \"%s\" on filesystem without ace acl support\n" +msgstr "" +"Próba odtworzenia acl na pliku \"%s\" na systemie plikw bez wsparcia ace " +"acl\n" + +#: src/filed/acl.c:1042 +#, c-format +msgid "acl_fromtext error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl_fromtext na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:1055 src/filed/acl.c:1062 +#, c-format +msgid "wrong encoding of acl type in acl stream on file \"%s\"\n" +msgstr "bdne kodowanie typu acl w strumieniu acl na pliku \"%s\"\n" + +#: src/filed/acl.c:1086 +#, c-format +msgid "acl_set error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl_set na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:1165 +#, c-format +msgid "acltotext error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acltotext na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:1183 +#, c-format +msgid "aclfromtext error on file \"%s\": ERR=%s\n" +msgstr "Błąd w aclfromtext na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:1200 +#, c-format +msgid "acl(SETACL) error on file \"%s\": ERR=%s\n" +msgstr "Błąd w acl(SETACL) na pliku \"%s\": ERR=%s\n" + +#: src/filed/acl.c:1275 +#, c-format +msgid "Can't restore ACLs of %s - incompatible acl stream encountered - %d\n" +msgstr "" +"Nie mogę odtworzy ACL na %s - napotkano niekompatybilny strumie acl - %d\n" + +#: src/filed/restore.c:109 +#, c-format +msgid "Size of data or stream of %s not correct. Original %s, restored %s.\n" +msgstr "" +"Rozmiar danych lub strumienia w %s nie jest poprawny. Oryginalny %s, " +"odzyskany %s.\n" + +#: src/filed/restore.c:129 +#, c-format +msgid "Invalid length of Finder Info (got %d, not 32)\n" +msgstr "Niepoprawna dugo pola Finder Info (otrzymano %d, a nie 32)\n" + +#: src/filed/restore.c:134 +#, c-format +msgid "Could not set Finder Info on %s\n" +msgstr "Nie można ustawi pola Finder Info na %s\n" + +#: src/filed/restore.c:331 +#, c-format +msgid "Record header file index %ld not equal record index %ld\n" +msgstr "" +"Indeks pliku w nagwku rekordu %ld nie jest rwny indeksowi rekordu %ld\n" + +#: src/filed/restore.c:345 src/stored/bextract.c:334 +#, c-format +msgid "%s stream not supported on this Client.\n" +msgstr "Strumie %s nie jest wspierany na tym Kliencie.\n" + +#: src/filed/restore.c:433 +msgid "Unexpected cryptographic session data stream.\n" +msgstr "Niespodziewany stumie danych sesji kryptograficznej.\n" + +#: src/filed/restore.c:443 +msgid "" +"No private decryption keys have been defined to decrypt encrypted backup " +"data.\n" +msgstr "" +"Nie zdefiniowano prywatnych kluczy szyfrowania aby odszyfrowa dane " +"backupowe.\n" + +#: src/filed/restore.c:454 +msgid "Could not create digest.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/filed/restore.c:472 +msgid "Missing private key required to decrypt encrypted backup data.\n" +msgstr "" +"Brakujcy klucz prywatny wymagany do odszyfrowania zaszyfrowanych danych " +"backupowych.\n" + +#: src/filed/restore.c:475 +msgid "Decrypt of the session key failed.\n" +msgstr "Nieudane odszyfrowanie klucza sesji.\n" + +#: src/filed/restore.c:481 +#, c-format +msgid "An error occurred while decoding encrypted session data stream: %s\n" +msgstr "" +"Wystpi bd w trakcie dekodowania zaszyfrowanej sesji strumienia danych: %s\n" + +#: src/filed/restore.c:534 src/filed/restore.c:587 +#, c-format +msgid "Missing encryption session data stream for %s\n" +msgstr "Brakujca szyfrowana sesja strumienia danych dla %s\n" + +#: src/filed/restore.c:542 src/filed/restore.c:594 +#, c-format +msgid "Failed to initialize decryption context for %s\n" +msgstr "Nieudana inicjalizacja kontekstu odszyfrowywania dla %s\n" + +#: src/filed/restore.c:606 +#, c-format +msgid "Cannot open resource fork for %s.\n" +msgstr "Nie można otworzyć rozwidlenia zasobów.dla %s.\n" + +#: src/filed/restore.c:731 +msgid "Unexpected cryptographic signature data stream.\n" +msgstr "Niespodziewany strumie danych podpisu kryptograficznego.\n" + +#: src/filed/restore.c:739 +#, c-format +msgid "Failed to decode message signature for %s\n" +msgstr "Nieudane dekodowanie podpisu komunikatu dla %s\n" + +#: src/filed/restore.c:765 src/stored/bextract.c:493 +#, c-format +msgid "Unknown stream=%d ignored. This shouldn't happen!\n" +msgstr "Nieznany strumie=%d zignorowany. To nie powinno się sta!\n" + +#: src/filed/restore.c:794 +#, c-format +msgid "Encountered %ld acl errors while doing restore\n" +msgstr "Osiągnięto %ld błędów acl w trakcie wykonywania odtwarzania\n" + +#: src/filed/restore.c:798 +#, c-format +msgid "Encountered %ld xattr errors while doing restore\n" +msgstr "Osiągnięto %ld błędów xattr w trakcie wykonywania odtwarzania\n" + +#: src/filed/restore.c:802 +#, c-format +msgid "" +"%d non-supported data streams and %d non-supported attrib streams ignored.\n" +msgstr "" +"Zignorowano %d nie wspieranych strumieni danych i %d nie wspieranych " +"strumieni atrybutw.\n" + +#: src/filed/restore.c:806 +#, c-format +msgid "%d non-supported resource fork streams ignored.\n" +msgstr "Zignorowano %d nie wspieranych strumieni rozwidlenia zasobów.\n" + +#: src/filed/restore.c:809 +#, c-format +msgid "%d non-supported Finder Info streams ignored.\n" +msgstr "Zignorowano %d nie wspieranych strumieni Finder Info.\n" + +#: src/filed/restore.c:812 +#, c-format +msgid "%d non-supported acl streams ignored.\n" +msgstr "Zignorowano %d nie wspieranych strumieni acl.\n" + +#: src/filed/restore.c:815 +#, c-format +msgid "%d non-supported crypto streams ignored.\n" +msgstr "Zignorowano %d nie wspieranych strumieni kryptograficznych.\n" + +#: src/filed/restore.c:818 +#, c-format +msgid "%d non-supported xattr streams ignored.\n" +msgstr "Zignorowano %d nie wspieranych strumieni xattr.\n" + +#: src/filed/restore.c:886 src/lib/util.c:406 +msgid "None" +msgstr "Brak" + +#: src/filed/restore.c:890 +msgid "Zlib errno" +msgstr "Zlib errno" + +#: src/filed/restore.c:892 +msgid "Zlib stream error" +msgstr "Błąd strumienia Zlib" + +#: src/filed/restore.c:894 +msgid "Zlib data error" +msgstr "Błąd danych Zlib" + +#: src/filed/restore.c:896 +msgid "Zlib memory error" +msgstr "Błąd pamici Zlib" + +#: src/filed/restore.c:898 +msgid "Zlib buffer error" +msgstr "Błąd bufora Zlib" + +#: src/filed/restore.c:900 +msgid "Zlib version error" +msgstr "Błąd wersji Zlib" + +#: src/filed/restore.c:902 src/lib/util.c:747 src/lib/util.c:757 +#: src/lib/util.c:765 src/lib/util.c:772 src/lib/util.c:779 src/lib/util.c:793 +#: src/lib/util.c:803 src/lib/util.c:810 src/lib/util.c:821 +msgid "*none*" +msgstr "*brak*" + +#: src/filed/restore.c:940 +#, c-format +msgid "Missing cryptographic signature for %s\n" +msgstr "Brakucy podpis kryptograficzny dla %s\n" + +#: src/filed/restore.c:973 src/filed/restore.c:1002 +#, c-format +msgid "Signature validation failed for file %s: ERR=%s\n" +msgstr "Nieudana walidacja podpisu dla pliku %s: ERR=%s\n" + +#: src/filed/restore.c:990 +#, c-format +msgid "Digest one file failed for file: %s\n" +msgstr "Nieudany skrót jednego pliku dla pliku: %s\n" + +#: src/filed/restore.c:1029 +#, c-format +msgid "Signature validation failed for %s: %s\n" +msgstr "Nieudana walidacja sygnatury dla %s: %s\n" + +#: src/filed/restore.c:1057 src/stored/bextract.c:424 +msgid "Seek to %s error on %s: ERR=%s\n" +msgstr "Przesunięcie do %s błąd na %s: ERR=%s\n" + +#: src/filed/restore.c:1094 +#, c-format +msgid "Uncompression error on file %s. ERR=%s\n" +msgstr "Błąd dekompresji na pliku %s. ERR=%s\n" + +#: src/filed/restore.c:1103 src/stored/bextract.c:458 +msgid "GZIP data stream found, but GZIP not configured!\n" +msgstr "Napotkano strumie GZIP, lecz GZIP nie jest skonfigurowany!\n" + +#: src/filed/restore.c:1126 +#, c-format +msgid "Write error in Win32 Block Decomposition on %s: %s\n" +msgstr "Błąd zapisu w Dekompozycji Bloku Win32 na %s: %s\n" + +#: src/filed/restore.c:1132 src/stored/bextract.c:283 +#: src/stored/bextract.c:289 +#, c-format +msgid "Write error on %s: %s\n" +msgstr "Błąd zapisu na %s: %s\n" + +#: src/filed/restore.c:1185 +msgid "Decryption error\n" +msgstr "Błąd odszyfrowywania\n" + +#: src/filed/restore.c:1279 +msgid "Logic error: output file should be open\n" +msgstr "Błąd logiki: plik wyjściowy powinien być otwarty\n" + +#: src/filed/restore.c:1310 +msgid "Logic error: output file should not be open\n" +msgstr "Błąd logiki: plik wyjściowy nie powinien być otwarty\n" + +#: src/filed/restore.c:1343 +#, c-format +msgid "Decryption error. buf_len=%d decrypt_len=%d on file %s\n" +msgstr "Błąd odszyfrowywania. buf_len=%d decrypt_len=%d na pliku %s\n" + +#: src/filed/restore.c:1464 +msgid "Open File Manager paused\n" +msgstr "Open File Manager zatrzymany\n" + +#: src/filed/restore.c:1468 +msgid "FAILED to pause Open File Manager\n" +msgstr "NIEUDANE zatrzymanie Open File Manager\n" + +#: src/filed/restore.c:1476 +#, c-format +msgid "Running as '%s'. Privmask=%#08x\n" +msgstr "Uruchomiono jako '%s'. Privmask=%#08x\n" + +#: src/filed/restore.c:1478 +msgid "Failed to retrieve current UserName\n" +msgstr "Nieudane pobranie aktualnego UserName\n" + +#: src/filed/xattr.c:227 +#, c-format +msgid "Illegal xattr stream, no XATTR_MAGIC on file \"%s\"\n" +msgstr "Niedowolnony strumie xattr, brak XATTR_MAGIC na pliku \"%s\"\n" + +#: src/filed/xattr.c:348 src/filed/xattr.c:374 +#, c-format +msgid "llistxattr error on file \"%s\": ERR=%s\n" +msgstr "Błąd llistxattr na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:452 src/filed/xattr.c:478 +#, c-format +msgid "lgetxattr error on file \"%s\": ERR=%s\n" +msgstr "Błąd lgetxattr na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:499 src/filed/xattr.c:870 src/filed/xattr.c:1604 +#, c-format +msgid "Xattr stream on file \"%s\" exceeds maximum size of %d bytes\n" +msgstr "Strumie Xattr na pliku \"%s\" przekroczy maksymalny rozmiar %d bajtw\n" + +#: src/filed/xattr.c:523 src/filed/xattr.c:904 +#, c-format +msgid "Failed to serialize extended attributes on file \"%s\"\n" +msgstr "Nieudane serializacja rozszerzonych atrybutw na pliku \"%s\"\n" + +#: src/filed/xattr.c:571 +#, c-format +msgid "lsetxattr error on file \"%s\": ERR=%s\n" +msgstr "Błąd lsetxattr na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:673 +#, c-format +msgid "Failed to convert %d into namespace on file \"%s\"\n" +msgstr "Nieudana konwersja %d w przestrzeni nazw na pliku \"%s\"\n" + +#: src/filed/xattr.c:706 src/filed/xattr.c:732 +#, c-format +msgid "extattr_list_link error on file \"%s\": ERR=%s\n" +msgstr "Błąd extattr_list_link na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:823 src/filed/xattr.c:849 +#, c-format +msgid "extattr_get_link error on file \"%s\": ERR=%s\n" +msgstr "Błąd extattr_get_link na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:962 +#, c-format +msgid "Failed to split %s into namespace and name part on file \"%s\"\n" +msgstr "Nieudany podzia %s w przestrzeni nazw i czci nazwy na pliku \"%s\"\n" + +#: src/filed/xattr.c:974 +#, c-format +msgid "Failed to convert %s into namespace on file \"%s\"\n" +msgstr "Nieudana konwersja %s w przestrzeni nazw na pliku \"%s\"\n" + +#: src/filed/xattr.c:992 +#, c-format +msgid "extattr_set_link error on file \"%s\": ERR=%s\n" +msgstr "Błąd extattr_set_link na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1272 src/filed/xattr.c:1322 +#, c-format +msgid "Unable to get acl on xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę otrzyma acl na xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1336 +#, c-format +msgid "Unable to get acl text on xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę otrzyma acl text na xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1405 +#, c-format +msgid "Unable to get status on xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę otrzyma statusu na xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1528 +#, c-format +msgid "Unable to open xattr %s on \"%s\": ERR=%s\n" +msgstr "Nie mogę otworzyć xattr %s na \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1547 +#, c-format +msgid "Unable to read symlin %s on \"%s\": ERR=%s\n" +msgstr "Nie mogę odczyta linku symbolicznego %s na \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1616 +#, c-format +msgid "Unable to read content of xattr %s on file \"%s\"\n" +msgstr "Nie mogę odczyta kontekst xattr %s na pliku \"%s\"\n" + +#: src/filed/xattr.c:1652 +#, c-format +msgid "Unable to chdir to xattr space of file \"%s\": ERR=%s\n" +msgstr "Nie mogę wykona chdir do przestrzeni xattr pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1708 src/filed/xattr.c:1935 +#, c-format +msgid "Unable to open file \"%s\": ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1733 src/filed/xattr.c:1984 +#, c-format +msgid "Unable to open xattr space %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę otworzyć przestrzeni xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1746 src/filed/xattr.c:1954 +#, c-format +msgid "Unable to chdir to xattr space on file \"%s\": ERR=%s\n" +msgstr "Nie mogę wykona chdir do przestrzeni xattr na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1764 +#, c-format +msgid "Unable to list the xattr space on file \"%s\": ERR=%s\n" +msgstr "Nie mogę wylistowa przestrze xattr na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1853 +#, c-format +msgid "Unable to convert acl from text on file \"%s\"\n" +msgstr "Nie mogę przekonwertowa acl z tekstu na plik \"%s\"\n" + +#: src/filed/xattr.c:1860 src/filed/xattr.c:1881 +#, c-format +msgid "Unable to restore acl of xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę odtworzy acl xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1946 +#, c-format +msgid "Unable to open xattr space on file \"%s\": ERR=%s\n" +msgstr "Nie mogę otworzyć przestrzeni xattr na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1970 src/filed/xattr.c:2109 +#, c-format +msgid "Unable to open xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę otworzyć xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:1998 +#, c-format +msgid "Unable to chdir to xattr space %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę wykona chdir do przestrzeni xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:2034 +#, c-format +msgid "Unable to mkfifo xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę wykona mkfifo xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:2048 +#, c-format +msgid "Unable to mknod xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę wykona mknod xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:2063 +#, c-format +msgid "Unable to mkdir xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę wykona mkdir xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:2080 +#, c-format +msgid "Unable to link xattr %s to %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę wykona link xattr %s do %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:2129 +#, c-format +msgid "" +"Unable to restore data of xattr %s on file \"%s\": Not all data available in " +"xattr stream\n" +msgstr "" +"Nie mogę odzyska danych xattr %s na pliku \"%s\": Nie wszystkie dane s " +"dostpne w strumieniu xattr\n" + +#: src/filed/xattr.c:2139 +#, c-format +msgid "Unable to restore data of xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę odzyska danych xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:2159 +#, c-format +msgid "Unable to symlink xattr %s to %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę wykona symlink xattr %s do %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:2192 +#, c-format +msgid "Unable to restore owner of xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę odzyska waciciela xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:2217 +#, c-format +msgid "Unable to restore filetimes of xattr %s on file \"%s\": ERR=%s\n" +msgstr "Nie mogę odzyska xattr filetime %s na pliku \"%s\": ERR=%s\n" + +#: src/filed/xattr.c:2232 +#, c-format +msgid "Illegal xattr stream, failed to parse xattr stream on file \"%s\"\n" +msgstr "" +"Niepoprawny strumie xattr, nieudane przetwarzanie strumienia na pliku \"%s" +"\"\n" + +#: src/filed/xattr.c:2289 +#, c-format +msgid "Failed to restore extensible attributes on file \"%s\"\n" +msgstr "Nieudane odzyskanie rozszerzalnych atrybutw na pliku \"%s\"\n" + +#: src/filed/xattr.c:2300 +#, c-format +msgid "Failed to restore extended attributes on file \"%s\"\n" +msgstr "Nieudane odzyskanie rozszerzonych atrybutw na pliku \"%s\"\n" + +#: src/filed/xattr.c:2358 +#, c-format +msgid "" +"Can't restore Extended Attributes of %s - incompatible xattr stream " +"encountered - %d\n" +msgstr "" +"Nie mogę odzyska Rozszrzone Atrybuty %s - osignito niekompatybilny strumie " +"xattr - %d\n" + +#: src/filed/job.c:420 +#, c-format +msgid "2901 Job %s not found.\n" +msgstr "2901 Zadanie %s nie znaleziono.\n" + +#: src/filed/job.c:430 +#, c-format +msgid "2001 Job %s marked to be canceled.\n" +msgstr "2001 Zadanie %s oznaczone do anulowania.\n" + +#: src/filed/job.c:433 +msgid "2902 Error scanning cancel command.\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/filed/job.c:452 +#, c-format +msgid "2991 Bad setdebug command: %s\n" +msgstr "2991 Bdna komenda setdebug: %s\n" + +#: src/filed/job.c:468 +#, c-format +msgid "Bad estimate command: %s" +msgstr "Bdna komenda estimate: %s" + +#: src/filed/job.c:469 +msgid "2992 Bad estimate command.\n" +msgstr "2992 Bdna komenda estimate.\n" + +#: src/filed/job.c:492 +#, c-format +msgid "Bad Job Command: %s" +msgstr "Bdna komenda Job: %s" + +#: src/filed/job.c:514 +#, c-format +msgid "Bad RunBeforeJob command: %s\n" +msgstr "Bdna komenda RunBeforeJob: %s\n" + +#: src/filed/job.c:515 src/filed/job.c:533 +msgid "2905 Bad RunBeforeJob command.\n" +msgstr "2905 Bdna komenda RunBeforeJob.\n" + +#: src/filed/job.c:544 +msgid "2905 Bad RunBeforeNow command.\n" +msgstr "2905 Bdna komenda RunBeforeNow.\n" + +#: src/filed/job.c:563 +#, c-format +msgid "Bad RunAfter command: %s\n" +msgstr "Bdna komenda RunAfter: %s\n" + +#: src/filed/job.c:564 +msgid "2905 Bad RunAfterJob command.\n" +msgstr "2905 Bdna komenda RunAfterJob.\n" + +#: src/filed/job.c:598 +#, c-format +msgid "Bad RunScript command: %s\n" +msgstr "Bdna komenda RunScript: %s\n" + +#: src/filed/job.c:599 +msgid "2905 Bad RunScript command.\n" +msgstr "2905 Bdna komenda RunScript.\n" + +#: src/filed/job.c:690 src/dird/fd_cmds.c:492 +#, c-format +msgid "Cannot run program: %s. ERR=%s\n" +msgstr "Nie można uruchomi programu: %s. ERR=%s\n" + +#: src/filed/job.c:706 +#, c-format +msgid "Error running program: %s. stat=%d: ERR=%s\n" +msgstr "Błąd uruchomienia programu: %s. stat=%d: ERR=%s\n" + +#: src/filed/job.c:716 +#, c-format +msgid "Cannot open FileSet input file: %s. ERR=%s\n" +msgstr "Nie można otworzyć pliku wejciowego FileSet: %s. ERR=%s\n" + +#: src/filed/job.c:738 +msgid "Plugin Directory not defined. Cannot use plugin: \"%\"\n" +msgstr "" +"Katalog dla Wtyczek nie jest zdefiniowany. Nie można uy wtyczki: \"%\"\n" + +#: src/filed/job.c:844 +#, c-format +msgid "REGEX %s compile error. ERR=%s\n" +msgstr "Błąd kompilacji REGEX %s. ERR=%s\n" + +#: src/filed/job.c:910 +#, c-format +msgid "Invalid FileSet command: %s\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/filed/job.c:1178 src/findlib/match.c:200 src/tools/testfind.c:641 +#, c-format +msgid "Unknown include/exclude option: %c\n" +msgstr "Nieznana opcja include/exclude: %c\n" + +#: src/filed/job.c:1249 src/stored/dircmd.c:1025 +msgid "Could not create bootstrap file %s: ERR=%s\n" +msgstr "Nie można utworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/filed/job.c:1363 +#, c-format +msgid "" +"DIR and FD clocks differ by %lld seconds, FD automatically compensating.\n" +msgstr "Zegary DIR i FD rni się o %lld sekund, automatyczne kompensowanie FD.\n" + +#: src/filed/job.c:1372 +#, c-format +msgid "Unknown backup level: %s\n" +msgstr "Nieznany poziom backupu: %s\n" + +#: src/filed/job.c:1385 +#, c-format +msgid "Bad level command: %s\n" +msgstr "Za komenda level: %s\n" + +#: src/filed/job.c:1407 +#, c-format +msgid "Bad session command: %s" +msgstr "Za komenda session: %s" + +#: src/filed/job.c:1465 +#, c-format +msgid "Bad storage command: %s" +msgstr "Za komenda storage: %s" + +#: src/filed/job.c:1479 src/tray-monitor/tray-monitor.c:952 +#: src/dird/msgchan.c:106 +msgid "Storage daemon" +msgstr "Demon Skadowania" + +#: src/filed/job.c:1485 +#, c-format +msgid "Failed to connect to Storage daemon: %s:%d\n" +msgstr "Nieudane podłączenie do demona Skadowania: %s:%d\n" + +#: src/filed/job.c:1497 +msgid "Failed to authenticate Storage daemon.\n" +msgstr "Nieudana autentykacja demona Skadowania.\n" + +#: src/filed/job.c:1538 +msgid "ACL support not configured for your machine.\n" +msgstr "Nie skonfigurowane wsparcie ACL na twojej maszynie.\n" + +#: src/filed/job.c:1542 +msgid "XATTR support not configured for your machine.\n" +msgstr "Nie skonfigurowane wsparcie XATTR na twojej maszynie.\n" + +#: src/filed/job.c:1551 +msgid "Cannot contact Storage daemon\n" +msgstr "Nie można skontaktowa się z demonem Skadowania\n" + +#: src/filed/job.c:1570 +#, c-format +msgid "Bad response to append open: %s\n" +msgstr "Za odpowiedź na append open: %s\n" + +#: src/filed/job.c:1575 +msgid "Bad response from stored to open command\n" +msgstr "Za odpowiedź z demona skadowania na komend open\n" + +#: src/filed/job.c:1604 +#, c-format +msgid "Generate VSS snapshots. Driver=\"%s\", Drive(s)=\"%s\"\n" +msgstr "Generowanie migawki VSS. Sterownik=\"%s\", Dysk(i)=\"%s\"\n" + +#: src/filed/job.c:1606 +msgid "Generate VSS snapshots failed.\n" +msgstr "Nieudane generowanie migawki VSS.\n" + +#: src/filed/job.c:1613 +#, c-format +msgid "" +"Generate VSS snapshot of drive \"%c:\\\" failed. VSS support is disabled on " +"this drive.\n" +msgstr "" +"Nieudane generowanie migawki VSS dysku \"%c:\\\". Wsparcie dla VSS jest " +"wyczone dla tego dysku.\n" + +#: src/filed/job.c:1620 +#, c-format +msgid "VSS Writer (PrepareForBackup): %s\n" +msgstr "VSS Writer (PrepareForBackup): %s\n" + +#: src/filed/job.c:1625 +msgid "No drive letters found for generating VSS snapshots.\n" +msgstr "Nie znaleziono liter dyskw dla generowania migawek VSS.\n" + +#: src/filed/job.c:1629 +#, c-format +msgid "VSS was not initialized properly. VSS support is disabled. ERR=%s\n" +msgstr "" +"VSS nie zostao poprawnie zainicjowane. Wsparcie dla VSS jest wyczone. ERR=%" +"s\n" + +#: src/filed/job.c:1680 +msgid "Append Close with SD failed.\n" +msgstr "Nieudane Append Close z SD.\n" + +#: src/filed/job.c:1684 +#, c-format +msgid "Bad status %d returned from Storage Daemon.\n" +msgstr "Zwrcono zy status %d z Demona Skadowania.\n" + +#: src/filed/job.c:1702 +#, c-format +msgid "VSS Writer (BackupComplete): %s\n" +msgstr "VSS Writer (BackupComplete): %s\n" + +#: src/filed/job.c:1726 +#, c-format +msgid "2994 Bad verify command: %s\n" +msgstr "2994 Za komenda verify: %s\n" + +#: src/filed/job.c:1741 src/filed/job.c:1782 +#, c-format +msgid "2994 Bad verify level: %s\n" +msgstr "2994 Za komenda verify: %s\n" + +#: src/filed/job.c:1816 +#, c-format +msgid "Bad replace command. CMD=%s\n" +msgstr "Za komenda replace. CMD=%s\n" + +#: src/filed/job.c:1834 +#, c-format +msgid "Bad where regexp. where=%s\n" +msgstr "Ze polecenie regexp where. where=%s\n" + +#: src/filed/job.c:1918 +msgid "Improper calling sequence.\n" +msgstr "Niepoprawna sekwencja woania.\n" + +#: src/filed/job.c:1938 +#, c-format +msgid "Bad response to SD read open: %s\n" +msgstr "Za odpowiedź na SD read open: %s\n" + +#: src/filed/job.c:1943 +msgid "Bad response from stored to read open command\n" +msgstr "Za odpowiedź ze stored na komend read open\n" + +#: src/filed/job.c:2010 +#, c-format +msgid "Comm error with SD. bad response to %s. ERR=%s\n" +msgstr "Błąd komunikacji z SD. za odpowiedź na %s. ERR=%s\n" + +#: src/filed/job.c:2013 +#, c-format +msgid "Bad response to %s command. Wanted %s, got %s\n" +msgstr "Za odpowiedź na komend %s. Oczekiwano %s, otrzymano %s\n" + +#: src/filed/job.c:2034 src/dird/msgchan.c:461 src/dird/restore.c:137 +#, c-format +msgid "Could not open bootstrap file %s: ERR=%s\n" +msgstr "Nie można otworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/filed/fd_plugins.c:280 +#, c-format +msgid "Command plugin \"%s\" returned bad startBackupFile packet.\n" +msgstr "Wtyczka komend \"%s\" zwrcia zy pakiet startBackupFile.\n" + +#: src/filed/fd_plugins.c:328 +msgid "Plugin save packet not found.\n" +msgstr "Nie znaleziono pakietu zapisu wtyczki.\n" + +#: src/filed/fd_plugins.c:416 +#, c-format +msgid "Malformed plugin command. Name not terminated by colon: %s\n" +msgstr "Znieksztacona komenda wtyczki. Nazwa nie zakoczona dwukropkiem: %s\n" + +#: src/filed/fd_plugins.c:449 +#, c-format +msgid "Plugin=%s not found.\n" +msgstr "Wtyczka=%s nie znaleziona.\n" + +#: src/filed/fd_plugins.c:495 +#, c-format +msgid "Plugin createFile call failed. Stat=%d file=%s\n" +msgstr "Nieudanie wywoanie createFile wtyczki. Stat=%d file=%s\n" + +#: src/filed/fd_plugins.c:500 +#, c-format +msgid "Plugin createFile call failed. Returned CF_ERROR file=%s\n" +msgstr "Nieudanie wywoanie createFile wtyczki. Zwrcono CF_ERROR plik=%s\n" + +#: src/filed/fd_plugins.c:516 src/findlib/create_file.c:222 +#, c-format +msgid "Could not create %s: ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/filed/fd_plugins.c:599 +#, c-format +msgid "Loaded plugin: %s\n" +msgstr "Zaadowana wtyczka: %s\n" + +#: src/filed/fd_plugins.c:618 +#, c-format +msgid "Plugin magic wrong. Plugin=%s wanted=%s got=%s\n" +msgstr "Zy znak magiczny wtyczki. Wtyczka=%s oczekiwano=%s otrzymano=%s\n" + +#: src/filed/fd_plugins.c:626 +#, c-format +msgid "Plugin version incorrect. Plugin=%s wanted=%d got=%d\n" +msgstr "Niepoprawna wersja wtyczki. Wtyczka=%s oczekiwano=%d otrzymano=%d\n" + +#: src/filed/fd_plugins.c:634 +#, c-format +msgid "Plugin license incompatible. Plugin=%s license=%s\n" +msgstr "Niekompatybilna licencja wtyczki. Wtyczka=%s licencja=%s\n" + +#: src/filed/status.c:84 src/dird/ua_cmds.c:1934 +#, c-format +msgid "%s Version: %s (%s) %s %s %s %s\n" +msgstr "%s Wersja: %s (%s) %s %s %s %s\n" + +#: src/filed/status.c:88 src/stored/status.c:226 +#, c-format +msgid "Daemon started %s, %d Job%s run since started.\n" +msgstr "Demon wystartowany %s, %d Zada %s uruchomionych od wystartowania.\n" + +#: src/filed/status.c:136 src/dird/ua_status.c:305 src/stored/status.c:230 +#, c-format +msgid " Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n" +msgstr " Sterta: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n" + +#: src/filed/status.c:143 +#, c-format +msgid " Sizeof: boffset_t=%d size_t=%d debug=%d trace=%d\n" +msgstr " Wielkoci: boffset_t=%d size_t=%d debug=%d trace=%d\n" + +#: src/filed/status.c:177 src/dird/ua_status.c:610 src/stored/status.c:397 +msgid "" +"\n" +"Running Jobs:\n" +msgstr "" +"\n" +"Uruchomione Zadania:\n" + +#: src/filed/status.c:189 +#, c-format +msgid "Director connected at: %s\n" +msgstr "Dyrektor podłączony o: %s\n" + +#: src/filed/status.c:191 +#, c-format +msgid "JobId %d Job %s is running.\n" +msgstr "JobId %d Zadanie %s jest uruchomione.\n" + +#: src/filed/status.c:194 +#, c-format +msgid " %s%s %s Job started: %s\n" +msgstr " %s%s %s Zadanie wystartowane: %s\n" + +#: src/filed/status.c:207 +#, c-format +msgid " Files=%s Bytes=%s Bytes/sec=%s Errors=%d\n" +msgstr " Plików=%s Bajtów=%s Bajtów/sek=%s Błędów=%d\n" + +#: src/filed/status.c:213 +#, c-format +msgid " Files Examined=%s\n" +msgstr " Zbadanych Plików=%s\n" + +#: src/filed/status.c:218 +#, c-format +msgid " Processing file: %s\n" +msgstr " Przetworzonych plikw: %s\n" + +#: src/filed/status.c:229 +msgid " SDSocket closed.\n" +msgstr " SDSocket zamknięto.\n" + +#: src/filed/status.c:237 src/dird/ua_cmds.c:520 src/stored/status.c:474 +msgid "No Jobs running.\n" +msgstr "adne Zadanie nie uruchomione.\n" + +#: src/filed/status.c:240 src/filed/status.c:258 src/filed/status.c:332 +msgid "====\n" +msgstr "====\n" + +#: src/filed/status.c:253 src/dird/ua_status.c:824 src/stored/status.c:508 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" +"\n" +"Zakoczonych Zada:\n" + +#: src/filed/status.c:263 src/dird/ua_status.c:825 src/stored/status.c:515 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr " JobId Poziom Pliki Bajty Status Zakoczone Nazwa \n" + +#: src/filed/status.c:265 src/dird/ua_status.c:635 +msgid "======================================================================\n" +msgstr "======================================================================\n" + +#: src/filed/status.c:286 src/lib/util.c:180 src/dird/ua_status.c:858 +#: src/stored/status.c:537 +msgid "Created" +msgstr "Utworzone" + +#: src/filed/status.c:290 src/lib/util.c:193 src/lib/util.c:328 +#: src/lib/util.c:474 src/dird/ua_status.c:862 src/stored/status.c:541 +msgid "Error" +msgstr "Bd" + +#: src/filed/status.c:293 src/dird/ua_status.c:865 src/stored/status.c:544 +msgid "Diffs" +msgstr "Rnice" + +#: src/filed/status.c:296 src/dird/ua_status.c:868 src/dird/ua_restore.c:430 +#: src/stored/status.c:547 src/wx-console/wxbconfigpanel.cpp:220 +msgid "Cancel" +msgstr "Anulowane" + +#: src/filed/status.c:299 src/lib/util.c:189 src/lib/util.c:321 +#: src/dird/ua_status.c:871 src/stored/btape.c:1514 src/stored/status.c:550 +#: src/wx-console/wxbconfigpanel.cpp:209 +msgid "OK" +msgstr "OK" + +#: src/filed/status.c:302 src/dird/ua_status.c:877 src/stored/status.c:556 +msgid "Other" +msgstr "Inne" + +#: src/filed/status.c:314 src/dird/ua_status.c:881 src/stored/status.c:568 +#, c-format +msgid "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +msgstr "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" + +#: src/filed/status.c:322 src/dird/ua_status.c:889 src/stored/status.c:576 +#, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "%6d %-6s %8s %10s %-7s %-8s %s\n" + +#: src/filed/status.c:386 src/filed/status.c:419 src/stored/status.c:699 +#: src/stored/status.c:745 +#, c-format +msgid "Bad .status command: %s\n" +msgstr "Za komenda .status: %s\n" + +#: src/filed/status.c:387 +msgid "2900 Bad .status command, missing argument.\n" +msgstr "2900 Za komenda .status, brakujcy argument.\n" + +#: src/filed/status.c:420 +msgid "2900 Bad .status command, wrong argument.\n" +msgstr "2900 Za komenda .status, niepoprawny argument.\n" + +#: src/filed/status.c:440 src/lib/util.c:420 src/stored/status.c:599 +msgid "Base" +msgstr "Baza" + +#: src/filed/status.c:442 src/lib/util.c:422 src/lib/util.c:468 +#: src/dird/ua_run.c:692 src/stored/status.c:601 +msgid "Full" +msgstr "Peny" + +#: src/filed/status.c:445 src/lib/util.c:425 src/dird/ua_run.c:693 +#: src/stored/status.c:604 +msgid "Incremental" +msgstr "Przyrostowy" + +#: src/filed/status.c:448 src/lib/util.c:428 src/dird/ua_run.c:694 +#: src/stored/status.c:607 +msgid "Differential" +msgstr "Rnicowy" + +#: src/filed/status.c:451 src/lib/util.c:431 src/dird/ua_run.c:695 +#: src/stored/status.c:610 +msgid "Since" +msgstr "Od" + +#: src/filed/status.c:454 src/lib/util.c:434 src/dird/ua_run.c:722 +#: src/stored/status.c:613 +msgid "Verify Catalog" +msgstr "Weryfikacja Katalogu" + +#: src/filed/status.c:457 src/stored/status.c:616 +msgid "Init Catalog" +msgstr "Inicjalizacja Katalogu" + +#: src/filed/status.c:460 src/stored/status.c:619 +msgid "Volume to Catalog" +msgstr "Wolumen do Katalogu" + +#: src/filed/status.c:463 src/stored/status.c:622 +msgid "Disk to Catalog" +msgstr "Dysk do Katalogu" + +#: src/filed/status.c:466 src/stored/status.c:625 +msgid "Data" +msgstr "Dane" + +#: src/filed/status.c:472 src/lib/util.c:455 src/stored/status.c:631 +msgid "Unknown Job Level" +msgstr "Nieznany Poziom Zadania" + +#: src/filed/status.c:488 +msgid "Bacula Client: Idle" +msgstr "Klient Bacula: Bezczynny" + +#: src/filed/status.c:499 +msgid "Bacula Client: Running" +msgstr "Klient Bacula: Uruchomiony" + +#: src/filed/status.c:513 +msgid "Bacula Client: Last Job Canceled" +msgstr "Klient Bacula: Ostatnie Zadanie Anulowane" + +#: src/filed/status.c:517 +msgid "Bacula Client: Last Job Failed" +msgstr "Klient Bacula: Ostatnie Zadanie Nieudane" + +#: src/filed/status.c:521 +msgid "Bacula Client: Last Job had Warnings" +msgstr "Klient Bacula: Ostatnie Zadanie miao Ostrzeżenia" + +#: src/filed/verify.c:59 +#, c-format +msgid "Cannot malloc %d network read buffer\n" +msgstr "Nie można alokowa %d buforw odczytu sieci\n" + +#: src/filed/verify.c:128 +#, c-format +msgid " Could not access %s: ERR=%s\n" +msgstr " Brak dostpu %s: ERR=%s\n" + +#: src/filed/verify.c:135 +#, c-format +msgid " Could not follow link %s: ERR=%s\n" +msgstr " Nie można poda za linkiem %s: ERR=%s\n" + +#: src/filed/verify.c:142 +#, c-format +msgid " Could not stat %s: ERR=%s\n" +msgstr " Nie można wykona stat %s: ERR=%s\n" + +#: src/filed/verify.c:151 +#, c-format +msgid " Archive file skipped: %s\n" +msgstr " Ominito archiwalny plik: %s\n" + +#: src/filed/verify.c:154 +#, c-format +msgid " Recursion turned off. Directory skipped: %s\n" +msgstr " Rekursja wyczona. Ominito katalog: %s\n" + +#: src/filed/verify.c:158 +#, c-format +msgid " File system change prohibited. Directory skipped: %s\n" +msgstr " Zabroniona zmiana filesystemu. Ominito katalog: %s\n" + +#: src/filed/verify.c:163 +#, c-format +msgid " Could not open directory %s: ERR=%s\n" +msgstr " Nie można otworzyć katalogu %s: ERR=%s\n" + +#: src/filed/verify.c:168 +#, c-format +msgid " Unknown file type %d: %s\n" +msgstr " Nieznany rodziaj pliku %d: %s\n" + +#: src/filed/verify.c:306 +#, c-format +msgid " Cannot open %s: ERR=%s.\n" +msgstr " Nie można otworzyć %s: ERR=%s.\n" + +#: src/filed/verify.c:320 +#, c-format +msgid " Cannot open resource fork for %s: ERR=%s.\n" +msgstr " Nie można otworzyć rozwidlenia zasobu dla %s: ERR=%s.\n" + +#: src/filed/verify.c:381 +#, c-format +msgid "Error reading file %s: ERR=%s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/filed/pythonfd.c:94 src/filed/pythonfd.c:148 src/filed/pythonfd.c:212 +#: src/dird/pythondir.c:122 src/stored/pythonsd.c:101 +#: src/stored/pythonsd.c:166 +msgid "Job pointer not found." +msgstr "Nie znaleziono wskanika zadania." + +#: src/filed/pythonfd.c:125 src/dird/pythondir.c:210 src/stored/pythonsd.c:143 +#, c-format +msgid "Attribute %s not found." +msgstr "Atrybut %s nie znaleziony." + +#: src/filed/pythonfd.c:142 src/stored/pythonsd.c:160 +#, c-format +msgid "Cannot delete attribute %s" +msgstr "Nie można skasowa atrybutu %s" + +#: src/filed/pythonfd.c:160 src/filed/pythonfd.c:176 src/stored/pythonsd.c:193 +#, c-format +msgid "Cannot find attribute %s" +msgstr "Nie można znale atrybutu %s" + +#: src/filed/pythonfd.c:167 src/dird/pythondir.c:253 src/dird/pythondir.c:259 +#: src/stored/pythonsd.c:183 +msgid "Read-only attribute" +msgstr "Atrybut tylko do odczytu" + +#: src/filed/filed.c:76 +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c use as configuration file\n" +" -d set debug level to \n" +" -dt print a timestamp in debug output\n" +" -f run in foreground (for debugging)\n" +" -g groupid\n" +" -k keep readall capabilities\n" +" -m print kaboom output (for debugging)\n" +" -s no signals (for debugging)\n" +" -t test configuration file and exit\n" +" -u userid\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"\n" +"Użycie: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c użyj jako pliku konfiguracyjnego\n" +" -d ustaw poziom debugingu na \n" +" -dt wyświetl znacznik czasu podczas wywietlania debugingu\n" +" -f uruchom na pierwszym planie (dla debugingu)\n" +" -g identyfikator grupy\n" +" -k utrzymaj właściwości readall\n" +" -m wyświetl informacje kaboom (dla debugingu)\n" +" -s brak sygnałów (dla debugingu)\n" +" -t przetestuj plik konfiguracji i zakończ\n" +" -u identyfikator użytkownika\n" +" -v gadatliwe komunikaty użytkownika\n" +" -? wyświetl ten komunikat.\n" +"\n" + +#: src/filed/filed.c:198 +msgid "-k option has no meaning without -u option.\n" +msgstr "opcja -k nie ma znaczenia bez opcji -u.\n" + +#: src/filed/filed.c:217 src/dird/dird.c:257 src/console/console.c:1104 +#: src/stored/stored.c:242 src/qt-console/main.cpp:156 +msgid "Cryptography library initialization failed.\n" +msgstr "Nieudana inicjalizacja biblioteki kryptograficznej.\n" + +#: src/filed/filed.c:222 src/dird/dird.c:261 src/dird/dird.c:289 +#: src/dird/dird.c:535 src/dird/dird.c:538 src/console/console.c:1108 +#: src/stored/stored.c:246 src/qt-console/main.cpp:160 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "Proszę popraw plik konfiguracyjny: %s\n" + +#: src/filed/filed.c:335 +#, c-format +msgid "" +"No File daemon resource defined in %s\n" +"Without that I don't know who I am :-(\n" +msgstr "" +"Brak definicji zasobu demona Plików w %s\n" +"Bez tego nie wiem kim jestem :-(\n" + +#: src/filed/filed.c:340 +#, c-format +msgid "Only one Client resource permitted in %s\n" +msgstr "Dozwolony tylko jeden zasób Client w %s\n" + +#: src/filed/filed.c:348 src/dird/dird.c:611 +#, c-format +msgid "No Messages resource defined in %s\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/filed/filed.c:355 src/filed/filed.c:514 src/dird/dird.c:625 +#: src/dird/dird.c:831 src/dird/dird.c:886 src/dird/dird.c:1044 +#: src/console/console.c:1291 src/console/console.c:1321 +#: src/stored/stored.c:381 src/wx-console/console_thread.cpp:118 +#: src/wx-console/console_thread.cpp:144 src/qt-console/main.cpp:212 +#: src/qt-console/main.cpp:242 +msgid "TLS required but not configured in Bacula.\n" +msgstr "Wymagane TLS lecz nie zostao skonfigurowane w Baculi.\n" + +#: src/filed/filed.c:364 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"File daemon in %s.\n" +msgstr "" +"Ani \"TLS CA Certificate\", ani \"TLS CA Certificate Dir\" nie zostay " +"zdefiniowane dla demona Plików w %s.\n" + +#: src/filed/filed.c:380 src/dird/dird.c:870 src/dird/dird.c:910 +#, c-format +msgid "Failed to initialize TLS context for File daemon \"%s\" in %s.\n" +msgstr "Nieudana inicjalizacja kontekstu TLS dla demona Plików \"%s\" in %s.\n" + +#: src/filed/filed.c:388 +msgid "PKI encryption/signing enabled but not compiled into Bacula.\n" +msgstr "" +"Szyfrowanie/podpisywanie PKI wczone ale nie jest wkompilowane w Bacula.\n" + +#: src/filed/filed.c:399 +#, c-format +msgid "" +"\"PKI Key Pair\" must be defined for File daemon \"%s\" in %s if either " +"\"PKI Sign\" or \"PKI Encrypt\" are enabled.\n" +msgstr "" +"\"PKI Key Pair\" musi by zdefiniowane dla demona Plików \"%s\" w %s jeli ani " +"\"PKI Sign\", ani \"PKI Encrypt\" nie zostay wczone.\n" + +#: src/filed/filed.c:411 src/filed/filed.c:442 src/filed/filed.c:483 +msgid "Failed to allocate a new keypair object.\n" +msgstr "Nieudana alokacja nowego obiektu pary kluczy.\n" + +#: src/filed/filed.c:415 +#, c-format +msgid "Failed to load public certificate for File daemon \"%s\" in %s.\n" +msgstr "" +"Nieudane zaadowanie publicznego certyfikatu dla demona Plików \"%s\" w %s.\n" + +#: src/filed/filed.c:421 +#, c-format +msgid "Failed to load private key for File daemon \"%s\" in %s.\n" +msgstr "Nieudane zaadowanie prywatnego klucza dla demona Plików \"%s\" w %s.\n" + +#: src/filed/filed.c:451 +#, c-format +msgid "Failed to load private key from file %s for File daemon \"%s\" in %s.\n" +msgstr "" +"Nieudane zaadowanie prywatnego klucza %s dla demona Plików \"%s\" w %s.\n" + +#: src/filed/filed.c:458 +#, c-format +msgid "" +"Failed to load trusted signer certificate from file %s for File daemon \"%s" +"\" in %s.\n" +msgstr "" +"Nieudane zaadowanie zaufanego certyfikatu osoby podpisujcej z pliku %s dla " +"demona Plików \"%s\" w %s.\n" + +#: src/filed/filed.c:489 +#, c-format +msgid "" +"Failed to load master key certificate from file %s for File daemon \"%s\" in " +"%s.\n" +msgstr "" +"Nieudane zaadowanie certyfikatu gwnego klucza z pliku %s dla demona Plików \"%" +"s\" w %s.\n" + +#: src/filed/filed.c:505 +#, c-format +msgid "No Director resource defined in %s\n" +msgstr "Brak definicji zasobu Dyrektora w %s\n" + +#: src/filed/filed.c:524 src/dird/dird.c:633 src/stored/stored.c:437 +#, c-format +msgid "\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n" +msgstr "Nie zdefiniowany plik \"TLS Certificate\" dla Dyrektora \"%s\" w %s.\n" + +#: src/filed/filed.c:530 src/dird/dird.c:639 src/stored/stored.c:443 +#, c-format +msgid "\"TLS Key\" file not defined for Director \"%s\" in %s.\n" +msgstr "Nie zdefiniowany plik \"TLS Key\" dla Dyrektora \"%s\" w %s.\n" + +#: src/filed/filed.c:536 src/dird/dird.c:646 src/stored/stored.c:449 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" +"Ani \"TLS CA Certificate\", ani \"TLS CA Certificate Dir\" nie s " +"zdefiniowane dla Dyrektora \"%s\" w %s. Co najmniej jedno skadowanie " +"certyfikatu CA jest wymagane kiedy jest używane \"TLS Verify Peer\".\n" + +#: src/filed/filed.c:555 src/dird/dird.c:665 src/stored/stored.c:468 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\" in %s.\n" +msgstr "Nieudana inicjalizacja kontekstu TLS dla Dyrektora \"%s\" w %s.\n" + +#: src/tray-monitor/tray_conf.c:168 src/dird/dird_conf.c:558 +#, c-format +msgid "No %s resource defined\n" +msgstr "Nie zdefiniowano zasobu %s\n" + +#: src/tray-monitor/tray_conf.c:177 +#, c-format +msgid "Monitor: name=%s FDtimeout=%s SDtimeout=%s\n" +msgstr "Monitor: nazwa=%s FDtimeout=%s SDtimeout=%s\n" + +#: src/tray-monitor/tray_conf.c:183 +#, c-format +msgid "Director: name=%s address=%s FDport=%d\n" +msgstr "Dyrektor: nazwa=%s adres=%s FDport=%d\n" + +#: src/tray-monitor/tray_conf.c:187 +#, c-format +msgid "Client: name=%s address=%s FDport=%d\n" +msgstr "Klient: nazwa=%s adres=%s FDport=%d\n" + +#: src/tray-monitor/tray_conf.c:191 +#, c-format +msgid "Storage: name=%s address=%s SDport=%d\n" +msgstr "Storage: nazwa=%s adres=%s SDport=%d\n" + +#: src/tray-monitor/tray_conf.c:195 src/qt-console/bat_conf.cpp:157 +#, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "ConsoleFont: nazwa=%s krj czcionki=%s\n" + +#: src/tray-monitor/tray_conf.c:199 src/dird/dird_conf.c:1008 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "Nieznany typ zasobu %d w dump_resource.\n" + +#: src/tray-monitor/tray_conf.c:257 src/dird/dird_conf.c:1339 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "Nieznany typ zasobu %d w free_resource.\n" + +#: src/tray-monitor/tray_conf.c:294 src/lib/parse_conf.c:224 +#: src/dird/dird_conf.c:1377 +#, c-format +msgid "Too many items in %s resource\n" +msgstr "Zbyt duo elementw w zasobie %s\n" + +#: src/tray-monitor/tray_conf.c:314 src/tray-monitor/tray_conf.c:352 +#: src/dird/dird_conf.c:1526 src/dird/dird_conf.c:1586 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "Nieznany typ zasobu %d w save_resource.\n" + +#: src/tray-monitor/authenticate.c:88 +msgid "" +"Director authorization problem.\n" +"Most likely the passwords do not agree.\n" +"Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 for help.\n" +msgstr "" +"Problem autoryzacji Dyrektora.\n" +"Najprawdopodobniej nie zgadzaj się hasa.\n" +"Proszę zobacz http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 dla uzyskania pomocy.\n" + +#: src/tray-monitor/authenticate.c:97 src/console/authenticate.c:155 +#, c-format +msgid "Bad response to Hello command: ERR=%s\n" +msgstr "Za odpowiedź na komend Hello: ERR=%s\n" + +#: src/tray-monitor/authenticate.c:132 src/dird/authenticate.c:83 +#: src/dird/authenticate.c:84 +#, c-format +msgid "Error sending Hello to Storage daemon. ERR=%s\n" +msgstr "Błąd w wysyaniu Hello do demona Przechowywania. ERR=%s\n" + +#: src/tray-monitor/authenticate.c:138 +msgid "" +"Director and Storage daemon passwords or names not the same.\n" +"Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 for help.\n" +msgstr "" +"Hasła demonów Dyrektora i Przechowywania nie s takie same.\n" +"Proszę zobacz http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 dla uzyskania pomocy.\n" + +#: src/tray-monitor/authenticate.c:145 +msgid "bdird set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" +"Napisany przez Nicolas Boichat (2004)\n" +"\n" +"Wersja: %s (%s) %s %s %s\n" +"\n" +"Użycie: tray-monitor [-c config_file] [-d debug_level]\n" +" -c ustaw plik konfiguracyjny na \n" +" -d ustaw poziom debugowania na \n" +" -dt wyświetla znaczniki czasowe w debugingu\n" +" -t test - odczytuje konfigurację i kończy działanie\n" +" -? wyświetla ten komunikat.\n" +"\n" + +#: src/tray-monitor/tray-monitor.c:270 +#, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one and only one " +"Monitor resource.\n" +msgstr "" +"Bd: zdefiniowano %d zasoby Monitor w %s. Musisz zdefiniowa wycznie jeden i " +"tylko jeden zasób Monitora.\n" + +#: src/tray-monitor/tray-monitor.c:301 +#, c-format +msgid "" +"No Client, Storage or Director resource defined in %s\n" +"Without that I don't how to get status from the File, Storage or Director " +"Daemon :-(\n" +msgstr "" +"Brak definicji zasobu Klienta, Przechowywania lub Dyrektora w %s\n" +"Bez tego nie wiem jak mam otrzyma status z demonów Plików, Przechowywania i " +"Dyrektora. :-(\n" + +#: src/tray-monitor/tray-monitor.c:323 +#, c-format +msgid "" +"Invalid refresh interval defined in %s\n" +"This value must be greater or equal to 1 second and less or equal to 10 " +"minutes (read value: %d).\n" +msgstr "" +"Niepoprawny interwa czasu zdefiniowany w %s\n" +"Warto ta musi by wiksza lub rwna 1 sekundzie i mniejsza lub rwna 10 minutom " +"(odczytana warto: %d).\n" + +#: src/tray-monitor/tray-monitor.c:330 +msgid "Bacula daemon status monitor" +msgstr "Monitor statusu demona Bacula" + +#: src/tray-monitor/tray-monitor.c:339 +msgid "Open status window..." +msgstr "Otwrz okno statusu..." + +#: src/tray-monitor/tray-monitor.c:345 +msgid "Exit" +msgstr "Wyjcie" + +# Tray to tacka - nie do końca mi to pasuje: "Tackowy Monitor Baculi" +#: src/tray-monitor/tray-monitor.c:357 +msgid "Bacula tray monitor" +msgstr "Tackowy monitor Baculi" + +#: src/tray-monitor/tray-monitor.c:382 +msgid " (DIR)" +msgstr " (DIR)" + +#: src/tray-monitor/tray-monitor.c:386 +msgid " (FD)" +msgstr " (FD)" + +#: src/tray-monitor/tray-monitor.c:390 +msgid " (SD)" +msgstr " (SD)" + +#: src/tray-monitor/tray-monitor.c:403 +msgid "Unknown status." +msgstr "Nieznany status." + +#: src/tray-monitor/tray-monitor.c:477 +msgid "Refresh interval in seconds: " +msgstr "Interwa odwierzania w sekundach: " + +#: src/tray-monitor/tray-monitor.c:485 +msgid "Refresh now" +msgstr "Odwierz teraz" + +#: src/tray-monitor/tray-monitor.c:489 +msgid "About" +msgstr "O Programie" + +#: src/tray-monitor/tray-monitor.c:493 +msgid "Close" +msgstr "Zamknij" + +#: src/tray-monitor/tray-monitor.c:513 +#, c-format +msgid "Disconnecting from Director %s:%d\n" +msgstr "Rozłączanie od Dyrektora %s:%d\n" + +#: src/tray-monitor/tray-monitor.c:516 +#, c-format +msgid "Disconnecting from Client %s:%d\n" +msgstr "Rozłączanie od Klienta %s:%d\n" + +#: src/tray-monitor/tray-monitor.c:519 +#, c-format +msgid "Disconnecting from Storage %s:%d\n" +msgstr "Rozłączanie od Storage %s:%d\n" + +#: src/tray-monitor/tray-monitor.c:560 src/tray-monitor/tray-monitor.c:571 +msgid "Bacula Tray Monitor" +msgstr "Monitor programu Bacula" + +#: src/tray-monitor/tray-monitor.c:562 src/tray-monitor/tray-monitor.c:573 +msgid "Written by Nicolas Boichat\n" +msgstr "Napisany przez Nicolas Boichat\n" + +#: src/tray-monitor/tray-monitor.c:563 src/tray-monitor/tray-monitor.c:574 +msgid "Version" +msgstr "Wersja" + +#: src/tray-monitor/tray-monitor.c:656 +#, c-format +msgid "Error, currentitem is not a Client or a Storage..\n" +msgstr "Bd, currentitem nie jest ani Klientem ani Storage...\n" + +#: src/tray-monitor/tray-monitor.c:730 +#, c-format +msgid "" +"Current job: %s\n" +"Last job: %s" +msgstr "" +"Aktualne zadanie: %s\n" +"Ostatnie zadanie: %s" + +#: src/tray-monitor/tray-monitor.c:742 +#, c-format +msgid " (%d errors)" +msgstr " (%d bdw)" + +#: src/tray-monitor/tray-monitor.c:745 +#, c-format +msgid " (%d error)" +msgstr " (%d bd)" + +#: src/tray-monitor/tray-monitor.c:783 +msgid "No current job." +msgstr "Brak aktualnego zadania." + +#: src/tray-monitor/tray-monitor.c:786 +msgid "No last job." +msgstr "Brak ostatniego zadania." + +#: src/tray-monitor/tray-monitor.c:794 +msgid "Job status: Created" +msgstr "Status zadania: Stworzone" + +#: src/tray-monitor/tray-monitor.c:799 +msgid "Job status: Running" +msgstr "Status zadania: Uruchomione" + +#: src/tray-monitor/tray-monitor.c:803 +msgid "Job status: Blocked" +msgstr "Status zadania: Zablokowane" + +#: src/tray-monitor/tray-monitor.c:808 +msgid "Job status: Terminated" +msgstr "Status zadania: Zakoczone" + +#: src/tray-monitor/tray-monitor.c:813 +msgid "Job status: Terminated in error" +msgstr "Status zadania: Zakoczone przez bd" + +#: src/tray-monitor/tray-monitor.c:819 +msgid "Job status: Error" +msgstr "Status zadania: Bd" + +#: src/tray-monitor/tray-monitor.c:823 +msgid "Job status: Fatal error" +msgstr "Status zadania: Błąd krytyczny" + +#: src/tray-monitor/tray-monitor.c:828 +msgid "Job status: Verify differences" +msgstr "Status zadania: Rnice przy weryfikacji" + +#: src/tray-monitor/tray-monitor.c:833 +msgid "Job status: Canceled" +msgstr "Status zadania: Anulowane" + +#: src/tray-monitor/tray-monitor.c:838 +msgid "Job status: Waiting on File daemon" +msgstr "Status zadania: Oczekiwanie na demon Plików" + +#: src/tray-monitor/tray-monitor.c:843 +msgid "Job status: Waiting on the Storage daemon" +msgstr "Status zadania: Oczekiwanie na demon Przechowywania" + +#: src/tray-monitor/tray-monitor.c:848 +msgid "Job status: Waiting for new media" +msgstr "Status zadania: Oczekiwanie na nowe media" + +#: src/tray-monitor/tray-monitor.c:853 +msgid "Job status: Waiting for Mount" +msgstr "Status zadania: Oczekiwanie na Zamontowanie" + +#: src/tray-monitor/tray-monitor.c:858 +msgid "Job status: Waiting for storage resource" +msgstr "Status zadania: Oczekiwanie na zasoby przechowywania" + +#: src/tray-monitor/tray-monitor.c:863 +msgid "Job status: Waiting for job resource" +msgstr "Status zadania: Oczekiwanie na zasoby zadania" + +#: src/tray-monitor/tray-monitor.c:868 +msgid "Job status: Waiting for Client resource" +msgstr "Status zadania: Oczekiwanie na zasoby Klienta" + +#: src/tray-monitor/tray-monitor.c:873 +msgid "Job status: Waiting for maximum jobs" +msgstr "Status zadania: Oczekiwanie ze wzgldu na maksymaln ilo zada" + +#: src/tray-monitor/tray-monitor.c:878 +msgid "Job status: Waiting for start time" +msgstr "Status zadania: Oczekiwanie na czas uruchomienia" + +#: src/tray-monitor/tray-monitor.c:883 +msgid "Job status: Waiting for higher priority jobs to finish" +msgstr "Status zadania: Oczekiwanie na zakoczenie zadań o wyszym priorytecie" + +#: src/tray-monitor/tray-monitor.c:888 +#, c-format +msgid "Unknown job status %c." +msgstr "Nieznany status zadania %c." + +#: src/tray-monitor/tray-monitor.c:889 +#, c-format +msgid "Job status: Unknown(%c)" +msgstr "Status zadania: Nieznany(%c)" + +#: src/tray-monitor/tray-monitor.c:896 +#, c-format +msgid "Bad scan : '%s' %d\n" +msgstr "Ze skanowanie : '%s' %d\n" + +#: src/tray-monitor/tray-monitor.c:936 src/console/console.c:1138 +#, c-format +msgid "Connecting to Director %s:%d\n" +msgstr "Podłączenie do Dyrektora %s:%d\n" + +#: src/tray-monitor/tray-monitor.c:937 src/qt-console/bcomm/dircomm.cpp:102 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "Podłączenie do Dyrektora %s:%d" + +#: src/tray-monitor/tray-monitor.c:938 src/wx-console/console_thread.cpp:428 +#: src/qt-console/bcomm/dircomm.cpp:170 +msgid "Director daemon" +msgstr "Demon Dyrektora" + +#: src/tray-monitor/tray-monitor.c:943 +#, c-format +msgid "Connecting to Client %s:%d\n" +msgstr "Podłączenie do Klienta %s:%d\n" + +#: src/tray-monitor/tray-monitor.c:944 +#, c-format +msgid "Connecting to Client %s:%d" +msgstr "Podłączenie do Klienta %s:%d" + +#: src/tray-monitor/tray-monitor.c:945 +msgid "File daemon" +msgstr "Demon Plików" + +#: src/tray-monitor/tray-monitor.c:950 +#, c-format +msgid "Connecting to Storage %s:%d\n" +msgstr "Podłączenie do Storage %s:%d\n" + +#: src/tray-monitor/tray-monitor.c:951 +#, c-format +msgid "Connecting to Storage %s:%d" +msgstr "Podłączenie do Storage %s:%d" + +#: src/tray-monitor/tray-monitor.c:956 src/tray-monitor/tray-monitor.c:994 +#, c-format +msgid "Error, currentitem is not a Client, a Storage or a Director..\n" +msgstr "Błąd currentitem nie jest Klientem, Storage ani Dyrektorem..\n" + +#: src/tray-monitor/tray-monitor.c:962 +msgid "Cannot connect to daemon.\n" +msgstr "Nie można połączyć się do demona.\n" + +#: src/tray-monitor/tray-monitor.c:963 +msgid "Cannot connect to daemon." +msgstr "Nie można połączyć się do demona." + +#: src/tray-monitor/tray-monitor.c:975 +#, c-format +msgid "Authentication error : %s" +msgstr "Bad autentykacji : %s" + +#: src/tray-monitor/tray-monitor.c:982 +msgid "Opened connection with Director daemon.\n" +msgstr "Otwarte poczenie z demonem Dyrektora.\n" + +#: src/tray-monitor/tray-monitor.c:983 +msgid "Opened connection with Director daemon." +msgstr "Otwarte poczenie z demonem Dyrektora." + +#: src/tray-monitor/tray-monitor.c:986 +msgid "Opened connection with File daemon.\n" +msgstr "Otwarte poczenie z demonem plikw.\n" + +#: src/tray-monitor/tray-monitor.c:987 +msgid "Opened connection with File daemon." +msgstr "Otwarte poczenie z demonem plikw." + +#: src/tray-monitor/tray-monitor.c:990 +msgid "Opened connection with Storage daemon.\n" +msgstr "Otwarte poczenie z demonem Przechowywania.\n" + +#: src/tray-monitor/tray-monitor.c:991 +msgid "Opened connection with Storage daemon." +msgstr "Otwarte poczenie z demonem Przechowywania." + +#: src/tray-monitor/tray-monitor.c:1028 +msgid "<< Error: BNET_PROMPT signal received. >>\n" +msgstr "<< Błąd: otrzymano sygnał BNET_PROMPT. >>\n" + +#: src/tray-monitor/tray-monitor.c:1033 src/wx-console/console_thread.cpp:494 +msgid "<< Heartbeat signal received, answered. >>\n" +msgstr "<< Otrzymano sygnał Heartbeat, odpowiedziano. >>\n" + +#: src/tray-monitor/tray-monitor.c:1037 +msgid "<< Unexpected signal received : %s >>\n" +msgstr "<< Otrzymano nieoczekiwany sygnał : %s >>\n" + +#: src/tray-monitor/tray-monitor.c:1042 +msgid "\n" +msgstr "\n" + +#: src/tray-monitor/tray-monitor.c:1046 +msgid "Error : BNET_HARDEOF or BNET_ERROR" +msgstr "Błąd : BNET_HARDEOF lub BNET_ERROR" + +#: src/tray-monitor/tray-monitor.c:1052 +msgid "\n" +msgstr "\n" + +#: src/tray-monitor/tray-monitor.c:1056 +msgid "Error : Connection closed." +msgstr "Błąd : Połączenie zamknięte." + +#: src/cats/mysql.c:81 +msgid "A user name for MySQL must be supplied.\n" +msgstr "Nazwa użytkownika dla MySQL powinna by dostarczona.\n" + +#: src/cats/mysql.c:157 src/cats/postgresql.c:190 src/cats/sqlite.c:165 +#: src/cats/ingres.c:204 src/cats/dbi.c:219 +#, c-format +msgid "Unable to initialize DB lock. ERR=%s\n" +msgstr "Nie można zainicjalizowa blokady BD. ERR=%s\n" + +#: src/cats/mysql.c:195 +#, c-format +msgid "" +"Unable to connect to MySQL server.\n" +"Database=%s User=%s\n" +"MySQL connect failed either server not running or your authorization is " +"incorrect.\n" +msgstr "" +"Nie można połączyć się do serwera MySQL.\n" +"Baza danych=%s Użytkownik=%s\n" +"Nieudane MySQL connect, moliwe e nie dziaa serwer lub twoja autoryzacja jest " +"niepoprawna.\n" + +#: src/cats/mysql.c:286 +msgid "MySQL client library must be thread-safe when using BatchMode.\n" +msgstr "" +"Biblioteka kliencka MySQL powinna by typu thread-safe jesli używamy trybu " +"wsadowego (BatchMode).\n" + +#: src/cats/mysql.c:345 src/cats/postgresql.c:365 src/cats/sqlite.c:378 +#: src/cats/ingres.c:363 src/cats/dbi.c:465 +#, c-format +msgid "Query failed: %s: ERR=%s\n" +msgstr "Nieudane zapytanie: %s: ERR=%s\n" + +#: src/cats/sql_create.c:101 +#, c-format +msgid "Create DB Job record %s failed. ERR=%s\n" +msgstr "Nieudane stworzenie rekordu BD Zadania %s. ERR=%s\n" + +#: src/cats/sql_create.c:151 +#, c-format +msgid "Create JobMedia record %s failed: ERR=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/cats/sql_create.c:160 +msgid "Update Media record %s failed: ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/cats/sql_create.c:188 +#, c-format +msgid "pool record %s already exists\n" +msgstr "rekord puli %s ju istnieje\n" + +#: src/cats/sql_create.c:219 +msgid "Create db Pool record %s failed: ERR=%s\n" +msgstr "Nieudane stworzenie rekordu BD Puli %s: ERR=%s\n" + +#: src/cats/sql_create.c:251 +msgid "Device record %s already exists\n" +msgstr "Rekord urządzenia %s już istnieje\n" + +#: src/cats/sql_create.c:267 +msgid "Create db Device record %s failed: ERR=%s\n" +msgstr "Nieudane stworzenie rekordu bd Urządzenia %s: ERR=%s\n" + +#: src/cats/sql_create.c:301 +msgid "More than one Storage record!: %d\n" +msgstr "Więcej niż jeden rekord Storage!: %d\n" + +#: src/cats/sql_create.c:306 +msgid "error fetching Storage row: %s\n" +msgstr "błąd pobierania wiersza Storage: %s\n" + +#: src/cats/sql_create.c:326 +msgid "Create DB Storage record %s failed. ERR=%s\n" +msgstr "Nieudane stworzenie rekordu BD Storage %s. ERR=%s\n" + +#: src/cats/sql_create.c:358 +msgid "mediatype record %s already exists\n" +msgstr "rekord mediatype %s już istnieje\n" + +#: src/cats/sql_create.c:374 +msgid "Create db mediatype record %s failed: ERR=%s\n" +msgstr "Nieudane stworzenie rekordu mediatype %s: ERR=%s\n" + +#: src/cats/sql_create.c:409 +msgid "Volume \"%s\" already exists.\n" +msgstr "Wolumen \"%s\" już istnieje.\n" + +#: src/cats/sql_create.c:454 +msgid "Create DB Media record %s failed. ERR=%s\n" +msgstr "Nieudane stworzenie rekordu BD Media %s: ERR=%s\n" + +#: src/cats/sql_create.c:501 +msgid "More than one Client!: %d\n" +msgstr "Więcej niż jeden Klient!: %d\n" + +#: src/cats/sql_create.c:506 +msgid "error fetching Client row: %s\n" +msgstr "błąd pobierania wiersza Klienta: %s\n" + +#: src/cats/sql_create.c:533 +msgid "Create DB Client record %s failed. ERR=%s\n" +msgstr "Nieudane stworzenie rekordu BD Klienta %s. ERR=%s\n" + +#: src/cats/sql_create.c:568 src/cats/sql_get.c:244 +#, c-format +msgid "More than one Path!: %s for path: %s\n" +msgstr "Więcej niż jedna Ścieżka: %s dla ścieżki: %s\n" + +#: src/cats/sql_create.c:575 src/cats/sql.c:358 src/cats/sql.c:365 +#: src/cats/postgresql.c:149 src/cats/sql_get.c:199 src/cats/sql_get.c:251 +#: src/cats/sql_get.c:626 src/cats/sql_get.c:705 src/cats/sql_get.c:1014 +msgid "error fetching row: %s\n" +msgstr "błąd pobierania wiersza: %s\n" + +#: src/cats/sql_create.c:599 +msgid "Create db Path record %s failed. ERR=%s\n" +msgstr "Nieudane stworzenie rekordu BD Ścieżki. ERR=%s\n" + +#: src/cats/sql_create.c:644 +msgid "Create DB Counters record %s failed. ERR=%s\n" +msgstr "Nieudane stworzenie rekordu BD Liczników %s. ERR=%s\n" + +#: src/cats/sql_create.c:677 +msgid "More than one FileSet!: %d\n" +msgstr "Więcej niż jeden FileSet!: %d\n" + +#: src/cats/sql_create.c:682 +msgid "error fetching FileSet row: ERR=%s\n" +msgstr "błąd pobierania wiersza FileSet: ERR=%s\n" + +#: src/cats/sql_create.c:712 +msgid "Create DB FileSet record %s failed. ERR=%s\n" +msgstr "Nieudane stworzenie rekordu BD FileSet %s. ERR=%s\n" + +#: src/cats/sql_create.c:1035 +msgid "Create db File record %s failed. ERR=%s" +msgstr "Nieudane stworzenie rekordu bd Pliku %s. ERR=%s" + +#: src/cats/sql_create.c:1061 +msgid "More than one Filename! %s for file: %s\n" +msgstr "Więcej niż jedna Nazwa pliku! %s dla pliku: %s\n" + +#: src/cats/sql_create.c:1067 +msgid "Error fetching row for file=%s: ERR=%s\n" +msgstr "Błąd pobierania wiersza dla pliku=%s: ERR=%s\n" + +#: src/cats/sql_create.c:1083 +msgid "Create db Filename record %s failed. ERR=%s\n" +msgstr "Nieudane stworzenie rekordu Filename %s. ERR=%s\n" + +#: src/cats/sql_create.c:1144 +#, c-format +msgid "Attempt to put non-attributes into catalog. Stream=%d\n" +msgstr "Próba umieszczenia nie-atrybutów w katalogu. Strumień=%d\n" + +#: src/cats/sql_create.c:1155 +msgid "Can't Copy/Migrate job using BaseJob" +msgstr "Nie można Skopiować/Zmigrować zadania używając BaseJob" + +#: src/cats/sql_create.c:1246 src/cats/sql_get.c:1101 +msgid "ERR=JobIds are empty\n" +msgstr "ERR=JobIds są puste\n" + +#: src/cats/sql_list.c:64 +msgid "Query failed: %s\n" +msgstr "Nieudane zapytanie: %s\n" + +#: src/cats/sql_list.c:276 +msgid "These JobIds have copies as follows:\n" +msgstr "Te JobIds posiadają następujące kopie:\n" + +#: src/cats/sql_list.c:278 +msgid "The catalog contains copies as follows:\n" +msgstr "Katalog posiada następujące kopie:\n" + +#: src/cats/sql.c:66 +msgid "Driver type not specified in Catalog resource.\n" +msgstr "Nie wskazano rodzaju sterownika w zasobach Katalogu.\n" + +#: src/cats/sql.c:69 +msgid "Invalid driver type, must be \"dbi:\"\n" +msgstr "Niepoprawny rodzaj sterownika, powinien być \"dbi:\"\n" + +#: src/cats/sql.c:83 +msgid "Unknown database type: %s\n" +msgstr "Nieznany rodzaj bazy danych: %s\n" + +#: src/cats/sql.c:204 +#, c-format +msgid "" +"On db_name=%s, %s max_connections=%d is lower than Director MaxConcurentJobs=" +"%d\n" +msgstr "" + +#: src/cats/sql.c:245 +msgid "" +"query %s failed:\n" +"%s\n" +msgstr "" +"nieudane zapytanie %s:\n" +"%s\n" + +#: src/cats/sql.c:267 +msgid "" +"insert %s failed:\n" +"%s\n" +msgstr "nieudane wstawienie %s:\n" +"%s\n" + +#: src/cats/sql.c:281 +#, c-format +msgid "Insertion problem: affected_rows=%s\n" +msgstr "Problem ze wstawieniem: problematycznych_wierszy=%s\n" + +#: src/cats/sql.c:301 +msgid "" +"update %s failed:\n" +"%s\n" +msgstr "nieudana aktualizacja: %s\n" +"%s\n" + +#: src/cats/sql.c:311 +#, c-format +msgid "Update failed: affected_rows=%s for %s\n" +msgstr "Nieudana aktualizacja: problematycznych_wierszy=%s dla %s\n" + +#: src/cats/sql.c:332 +msgid "" +"delete %s failed:\n" +"%s\n" +msgstr "nieudane skasowanie %s:\n" +"%s\n" + +#: src/cats/sql.c:527 src/dird/catreq.c:413 src/dird/fd_cmds.c:718 +#: src/dird/fd_cmds.c:776 +msgid "Attribute create error. %s" +msgstr "Błąd stworzenia atrybutu. %s" + +#: src/cats/sql.c:654 +msgid "Path length is zero. File=%s\n" +msgstr "Długość ścieżki wynosi zero. Plik=%s\n" + +#: src/cats/sql.c:718 +msgid "No results to list.\n" +msgstr "Brak rezultatów do wylistowania.\n" + +#: src/cats/sql.c:844 +msgid "Could not init database batch connection" +msgstr "Nie można zainicjować wsadowego połączenia do bazy danych" + +#: src/cats/sql.c:850 +msgid "Could not open database \"%s\": ERR=%s\n" +msgstr "Nie można otworzyć bazy danych \"%s\": ERR=%s\n" + +#: src/cats/postgresql.c:85 +msgid "A user name for PostgreSQL must be supplied.\n" +msgstr "Nazwa użytkownika dla MySQL powinna by dostarczona.\n" + +#: src/cats/postgresql.c:160 +#, c-format +msgid "Encoding error for database \"%s\". Wanted SQL_ASCII, got %s\n" +msgstr "" + +#: src/cats/postgresql.c:227 +msgid "" +"Unable to connect to PostgreSQL server. Database=%s User=%s\n" +"Possible causes: SQL server not running; password incorrect; max_connections " +"exceeded.\n" +msgstr "" +"Nie można połączyć się do serwera PostgreSQL. Baza danych=%s Użytkownik=%s\n" +"Moliwe przyczyny: nie działa serwer SQL; niepoprawne hasło; przekroczono max_connections.\n" + +#: src/cats/postgresql.c:308 +msgid "Pg client library must be thread-safe when using BatchMode.\n" +msgstr "Biblioteka kliencka Pg powinna by typu thread-safe jesli używamy trybu " +"wsadowego (BatchMode).\n" + +#: src/cats/postgresql.c:346 +msgid "PQescapeStringConn returned non-zero.\n" +msgstr "" + +#: src/cats/postgresql.c:652 +msgid "error fetching currval: %s\n" +msgstr "błąd pobierania curval: %s\n" + +#: src/cats/postgresql.c:717 src/cats/dbi.c:863 +msgid "error starting batch mode: %s" +msgstr "błąd uruchomienia trybu wsadowego: %s" + +#: src/cats/postgresql.c:748 src/cats/postgresql.c:754 +msgid "error ending batch mode: %s" +msgstr "błąd zakończenia trybu wsadowego: %s" + +#: src/cats/postgresql.c:803 +#, c-format +msgid "error copying in batch mode: %s" +msgstr "błąd kopiowania w trybie wsadowym: %s" + +#: src/cats/sql_find.c:97 src/cats/sql_find.c:126 src/cats/sql_find.c:176 +#, c-format +msgid "" +"Query error for start time request: ERR=%s\n" +"CMD=%s\n" +msgstr "" + +#: src/cats/sql_find.c:103 src/cats/sql_find.c:182 +msgid "No prior Full backup Job record found.\n" +msgstr "Nie znaleziono wcześniejszego rekordu Zadania Pełnego Backup'u.\n" + +#: src/cats/sql_find.c:115 +msgid "Unknown level=%d\n" +msgstr "Nieznany poziom=%d\n" + +#: src/cats/sql_find.c:132 +msgid "" +"No Job record found: ERR=%s\n" +"CMD=%s\n" +msgstr "" +"Nie znaleziono rekordu Zadania: ERR=%s\n" +"CMD=%s\n" + +#: src/cats/sql_find.c:277 +msgid "Unknown Job level=%d\n" +msgstr "Nieznany poziom Zadania=%d\n" + +#: src/cats/sql_find.c:287 +msgid "No Job found for: %s.\n" +msgstr "Nie znaleziono Zadania dla: %s.\n" + +#: src/cats/sql_find.c:298 +msgid "No Job found for: %s\n" +msgstr "Nie znaleziono Zadania dla: %s\n" + +#: src/cats/sql_find.c:375 +#, c-format +msgid "Request for Volume item %d greater than max %d or less than 1\n" +msgstr "Rządanie dla elementu Wolumenu %d większe od %d lub mniejsze niż 1\n" + +#: src/cats/sql_find.c:390 +msgid "No Volume record found for item %d.\n" +msgstr "Nie znaleziono rekordu Wolumenu dla elementu %d.\n" + +#: src/cats/sql_get.c:146 +msgid "Error fetching row: %s\n" +msgstr "Błąd pobierania rekordu: %s\n" + +#: src/cats/sql_get.c:153 +#, c-format +msgid "get_file_record want 1 got rows=%d PathId=%s FilenameId=%s\n" +msgstr "" + +#: src/cats/sql_get.c:161 +#, c-format +msgid "File record for PathId=%s FilenameId=%s not found.\n" +msgstr "" + +#: src/cats/sql_get.c:167 +msgid "File record not found in Catalog.\n" +msgstr "Brak rekordu Pliku w Katalogu.\n" + +#: src/cats/sql_get.c:193 +msgid "More than one Filename!: %s for file: %s\n" +msgstr "Więcej niż jedna nazwa pliku!: %s dla pliku %s\n" + +#: src/cats/sql_get.c:203 +#, c-format +msgid "Get DB Filename record %s found bad record: %d\n" +msgstr "" + +#: src/cats/sql_get.c:209 +msgid "Filename record: %s not found.\n" +msgstr "Rekord nazwy pliku: %s nie znaleziony.\n" + +#: src/cats/sql_get.c:213 +msgid "Filename record: %s not found in Catalog.\n" +msgstr "Rekord nazwy pliku: %s nie znaleziony w Katalogu.\n" + +#: src/cats/sql_get.c:255 +msgid "Get DB path record %s found bad record: %s\n" +msgstr "" + +#: src/cats/sql_get.c:268 +msgid "Path record: %s not found.\n" +msgstr "Rekord ścierzki: %s nie znaleziony.\n" + +#: src/cats/sql_get.c:272 +msgid "Path record: %s not found in Catalog.\n" +msgstr "Rekord ścierzki: %s nie znaleziony w Katalogu.\n" + +#: src/cats/sql_get.c:309 +msgid "No Job found for JobId %s\n" +msgstr "Brak Zadań dla JobIds dla %s\n" + +#: src/cats/sql_get.c:380 src/cats/sql_get.c:436 +msgid "No volumes found for JobId=%d\n" +msgstr "Nie znaleziono wolumenów dla JobId=%d\n" + +#: src/cats/sql_get.c:386 src/cats/sql_get.c:447 +msgid "Error fetching row %d: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:400 +msgid "No Volume for JobId %d found in Catalog.\n" +msgstr "Nie znaleziono Wolumenu dla JobId %d w Katalogu.\n" + +#: src/cats/sql_get.c:540 +msgid "Pool id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:577 +msgid "Client id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:621 +#, c-format +msgid "More than one Pool!: %s\n" +msgstr "" + +#: src/cats/sql_get.c:665 +msgid "Pool record not found in Catalog.\n" +msgstr "Brak rekordu Puli w Katalogu.\n" + +#: src/cats/sql_get.c:700 +msgid "More than one Client!: %s\n" +msgstr "Więcej niż jeden Klient!: %s\n" + +#: src/cats/sql_get.c:717 src/cats/sql_get.c:721 +msgid "Client record not found in Catalog.\n" +msgstr "Nie znaleziono rekordu Klienta w Katalogu.\n" + +#: src/cats/sql_get.c:746 +#, c-format +msgid "More than one Counter!: %d\n" +msgstr "Więcej niż jeden Licznik!: %d\n" + +#: src/cats/sql_get.c:751 +msgid "error fetching Counter row: %s\n" +msgstr "błąd pobierania wiersza Licznika: %s\n" + +#: src/cats/sql_get.c:771 +msgid "Counter record: %s not found in Catalog.\n" +msgstr "Rekord licznika: %s nie znaleziono w Katalogu.\n" + +#: src/cats/sql_get.c:808 +#, c-format +msgid "Error got %s FileSets but expected only one!\n" +msgstr "" + +#: src/cats/sql_get.c:813 +msgid "FileSet record \"%s\" not found.\n" +msgstr "Rekord Fileset \"%s\" nie znaleziony.\n" + +#: src/cats/sql_get.c:823 +msgid "FileSet record not found in Catalog.\n" +msgstr "Rekord Fileset nie znaleziony w Katalogu.\n" + +#: src/cats/sql_get.c:919 +msgid "Media id select failed: ERR=%s\n" +msgstr "" + +#: src/cats/sql_get.c:957 +msgid "query dbids failed: ERR=%s\n" +msgstr "nieudane zapytanie dbids: ERR=%s\n" + +#: src/cats/sql_get.c:1009 +msgid "More than one Volume!: %s\n" +msgstr "Więcej niż jeden Wolumen!: %s\n" + +#: src/cats/sql_get.c:1065 +msgid "Media record MediaId=%s not found.\n" +msgstr "Brak rekordu Medium MediaId=%s\n" + +#: src/cats/sql_get.c:1068 +msgid "Media record for Volume \"%s\" not found.\n" +msgstr "Brak rekordu Medium dla Wolumenu \"%s\"\n" + +#: src/cats/sql_get.c:1075 +msgid "Media record for MediaId=%u not found in Catalog.\n" +msgstr "Brak rekordu Medium dla MediaId=%u w Katalogu.\n" + +#: src/cats/sql_get.c:1078 +msgid "Media record for Vol=%s not found in Catalog.\n" +msgstr "Brak rekordu Medium dla Vol=%s w Katalogu.\n" + +#: src/cats/sql_delete.c:80 +msgid "No pool record %s exists\n" +msgstr "Rekord puli %s nie istnieje\n" + +#: src/cats/sql_delete.c:85 +msgid "Expecting one pool record, got %d\n" +msgstr "Oczekiwano jeden rekord puli, otrzymano %d\n" + +#: src/cats/sql_delete.c:91 +msgid "Error fetching row %s\n" +msgstr "Błąd pobierania wiersza %s\n" + +#: src/cats/sqlite.c:179 +#, c-format +msgid "Database %s does not exist, please create it.\n" +msgstr "Baza danych %s nie istnieje, proszę utworzyć ją.\n" + +#: src/cats/sqlite.c:209 +msgid "Unable to open Database=%s. ERR=%s\n" +msgstr "Nie mogę otworzyć Bazy danych=%s. ERR=%s\n" + +#: src/cats/sqlite.c:210 +msgid "unknown" +msgstr "nieznany" + +#: src/cats/sqlite.c:279 +msgid "SQLite3 client library must be thread-safe when using BatchMode.\n" +msgstr "Biblioteka kliencka SQLite3 powinna by typu thread-safe jesli używamy trybu " +"wsadowego (BatchMode).\n" + +#: src/cats/ingres.c:85 +msgid "A user name for Ingres must be supplied.\n" +msgstr "Nazwa użytkownika dla Ingres powinna być dostarczona.\n" + +#: src/cats/ingres.c:224 +msgid "" +"Unable to connect to Ingres server.\n" +"Database=%s User=%s\n" +"It is probably not running or your password is incorrect.\n" +msgstr "" +"Nie można połączyć się do serwera Ingres.\n" +"Baza danych=%s Użytkownik=%s\n" +"Najprawdopodobniej nie działa lub twoje hasło jest niepoprawne.\n" + +#: src/cats/dbi.c:104 +msgid "A dbi driver for DBI must be supplied.\n" +msgstr "Sterownik dbi dla trybu DBI musi być podany.\n" + +#: src/cats/dbi.c:130 +msgid "A user name for DBI must be supplied.\n" +msgstr "Nazwa użytkownika dla DBI powinna być dostarczona.\n" + +#: src/cats/dbi.c:234 +#, c-format +msgid "" +"Unable to locate the DBD drivers to DBI interface in: \n" +"db_driverdir=%s. It is probaly not found any drivers\n" +msgstr "" + +#: src/cats/dbi.c:300 +msgid "" +"Unable to connect to DBI interface. Type=%s Database=%s User=%s\n" +"Possible causes: SQL server not running; password incorrect; max_connections " +"exceeded.\n" +msgstr "" +"Nie można połączyć się do interfejsu DBI. Typ=%s Baza danych=%s Użytkownik=%s\n" +"Możliwe przyczyny: Nie działa SQL serwer; hasło jest niepoprawne; przekroczono max_connections.\n" + +#: src/cats/dbi.c:1044 +msgid "error inserting batch mode: %s" +msgstr "" + +#: src/lib/edit.c:463 +msgid "Illegal character \"%c\" in name.\n" +msgstr "Nieprawidłowy znak \"%c\" w nazwie.\n" + +#: src/lib/edit.c:470 +msgid "Name too long.\n" +msgstr "Nazwa za długa.\n" + +#: src/lib/edit.c:476 src/dird/ua_label.c:654 src/dird/ua_cmds.c:360 +msgid "Volume name must be at least one character long.\n" +msgstr "Wolumen musi mieć przynajmniej jeden znak.\n" + +#: src/lib/pythonlib.c:118 +msgid "Could not initialize Python\n" +msgstr "Nie można zainicjować Pythona\n" + +#: src/lib/pythonlib.c:123 +msgid "Could not Run Python string %s\n" +msgstr "" + +#: src/lib/pythonlib.c:135 +msgid "Could not initialize Python Job type.\n" +msgstr "" + +#: src/lib/pythonlib.c:140 +#, c-format +msgid "Could not import Python script %s/%s. Python disabled.\n" +msgstr "" + +#: src/lib/pythonlib.c:242 +msgid "Could not create Python Job Object.\n" +msgstr "Nie można stworzyć Obiektu Pythona Zadanie.\n" + +#: src/lib/pythonlib.c:255 src/lib/pythonlib.c:279 +msgid "Python function \"%s\" not found.\n" +msgstr "Funkcja Pythona \"%s\" nie znaleziona.\n" + +#: src/lib/pythonlib.c:294 +msgid "Unknown Python daemon event %s\n" +msgstr "Nieznane zdarzenie demona Pythona %s\n" + +#: src/lib/pythonlib.c:319 +msgid "Unable to initialize the Python lock. ERR=%s\n" +msgstr "Nie można zainicjalizowa blokady Pythona. ERR=%s\n" + +#: src/lib/rwlock.c:304 +msgid "rwl_writeunlock called too many times.\n" +msgstr "rwl_writeunlock wywołany zbyt wiele razy.\n" + +#: src/lib/rwlock.c:309 +msgid "rwl_writeunlock by non-owner.\n" +msgstr "" + +#: src/lib/rwlock.c:434 +#, c-format +msgid "Thread %d found unchanged elements %d times\n" +msgstr "" + +#: src/lib/rwlock.c:504 +#, c-format +msgid "%02d: interval %d, writes %d, reads %d\n" +msgstr "" + +#: src/lib/rwlock.c:514 +#, c-format +msgid "data %02d: value %d, %d writes\n" +msgstr "" + +#: src/lib/rwlock.c:519 +#, c-format +msgid "Total: %d thread writes, %d data writes\n" +msgstr "" + +#: src/lib/rwlock.c:591 +msgid "Try write lock" +msgstr "" + +#: src/lib/rwlock.c:597 +msgid "Try read lock" +msgstr "" + +#: src/lib/rwlock.c:653 +msgid "Create thread" +msgstr "Stwórz wątek" + +#: src/lib/rwlock.c:663 +msgid "Join thread" +msgstr "Dołącz wątek" + +#: src/lib/rwlock.c:665 +#, c-format +msgid "%02d: interval %d, updates %d, r_collisions %d, w_collisions %d\n" +msgstr "" + +#: src/lib/rwlock.c:677 +#, c-format +msgid "data %02d: value %d, %d updates\n" +msgstr "" + +#: src/lib/lockmgr.c:65 +#, c-format +msgid "ASSERT failed at %s:%i: %s\n" +msgstr "" + +#: src/lib/lockmgr.c:70 +#, c-format +msgid "ASSERT failed at %s:%i: %s \n" +msgstr "" + +#: src/lib/lockmgr.c:100 +msgid "Mutex lock failure. ERR=%s\n" +msgstr "Błąd blokowania muteksa. ERR=%s\n" + +#: src/lib/lockmgr.c:110 +msgid "Mutex unlock failure. ERR=%s\n" +msgstr "Błąd odblokowania muteksa. ERR=%s\n" + +#: src/lib/lockmgr.c:275 src/lib/lockmgr.c:611 src/lib/lockmgr.c:639 +#: src/lib/jcr.c:319 +msgid "pthread key create failed: ERR=%s\n" +msgstr "nieudane utworzenie klucza pthread: ERR=%s\n" + +#: src/lib/lockmgr.c:623 +msgid "pthread_create failed: ERR=%s\n" +msgstr "nieudane pthread_create: ERR=%s\n" + +#: src/lib/berrno.c:64 +msgid "Child exited normally." +msgstr "" + +#: src/lib/berrno.c:71 +msgid "Unknown error during program execvp" +msgstr "" + +#: src/lib/berrno.c:74 +#, c-format +msgid "Child exited with code %d" +msgstr "" + +#: src/lib/berrno.c:82 +#, c-format +msgid "Child died from signal %d: %s" +msgstr "" + +#: src/lib/berrno.c:88 +msgid "Invalid errno. No error message possible." +msgstr "" + +#: src/lib/priv.c:68 +msgid "Could not find userid=%s: ERR=%s\n" +msgstr "Nie można znaleźć userid=%s: ERR=%s\n" + +#: src/lib/priv.c:74 +msgid "Could not find password entry. ERR=%s\n" +msgstr "Nie można znaleźć elementu hasła. ERR=%s\n" + +#: src/lib/priv.c:87 +msgid "Could not find group=%s: ERR=%s\n" +msgstr "Nie można znaleźć grupy=%s: ERR=%s\n" + +#: src/lib/priv.c:95 +msgid "Could not initgroups for group=%s, userid=%s: ERR=%s\n" +msgstr "Nie można zainicjować grup dla group=%s, userid=%s: ERR=%s\n" + +#: src/lib/priv.c:98 +msgid "Could not initgroups for userid=%s: ERR=%s\n" +msgstr "Nie można zainicjować grup dla userid=%s: ERR=%s\n" + +#: src/lib/priv.c:105 +msgid "Could not set group=%s: ERR=%s\n" +msgstr "Nie można ustawić group=%s: ERR=%s\n" + +#: src/lib/priv.c:115 +msgid "prctl failed: ERR=%s\n" +msgstr "nieudany prctl: ERR=%s\n" + +#: src/lib/priv.c:119 +msgid "setreuid failed: ERR=%s\n" +msgstr "nieudane setreuid: ERR=%s\n" + +#: src/lib/priv.c:123 +msgid "cap_from_text failed: ERR=%s\n" +msgstr "nieudane cap_from_text: ERR=%s\n" + +#: src/lib/priv.c:127 +msgid "cap_set_proc failed: ERR=%s\n" +msgstr "nieudane cap_set_proc: ERR=%s\n" + +#: src/lib/priv.c:131 +msgid "Keep readall caps not implemented this OS or missing libraries.\n" +msgstr "" + +#: src/lib/priv.c:135 +msgid "Could not set specified userid: %s\n" +msgstr "Nie można ustawić wskazanego userid: %s\n" + +#: src/lib/jcr.c:230 src/lib/util.c:356 src/dird/ua_run.c:790 +msgid "Backup" +msgstr "Backup" + +#: src/lib/jcr.c:232 +msgid "Verifying" +msgstr "Weryfikowanie" + +#: src/lib/jcr.c:234 +msgid "Restoring" +msgstr "Odtwarzanie" + +#: src/lib/jcr.c:236 +msgid "Archiving" +msgstr "Archiwizowanie" + +#: src/lib/jcr.c:238 +msgid "Copying" +msgstr "Kopiowanie" + +#: src/lib/jcr.c:240 +msgid "Migration" +msgstr "Migracja" + +#: src/lib/jcr.c:242 +msgid "Scanning" +msgstr "Skanowanie" + +#: src/lib/jcr.c:244 +msgid "Unknown operation" +msgstr "Nieznana operacja" + +#: src/lib/jcr.c:253 +msgid "backup" +msgstr "backup" + +#: src/lib/jcr.c:255 +msgid "verified" +msgstr "zweryfikowany" + +#: src/lib/jcr.c:255 +msgid "verify" +msgstr "zweryfikuj" + +#: src/lib/jcr.c:257 +msgid "restored" +msgstr "odtwarzony" + +#: src/lib/jcr.c:257 +msgid "restore" +msgstr "odtwarzanie" + +#: src/lib/jcr.c:259 +msgid "archived" +msgstr "zarchiwizowany" + +#: src/lib/jcr.c:259 +msgid "archive" +msgstr "archiwizacja" + +#: src/lib/jcr.c:261 +msgid "copied" +msgstr "skopiowany" + +#: src/lib/jcr.c:261 +msgid "copy" +msgstr "kopia" + +#: src/lib/jcr.c:263 +msgid "migrated" +msgstr "zmigrowany" + +#: src/lib/jcr.c:263 +msgid "migrate" +msgstr "migracja" + +#: src/lib/jcr.c:265 +msgid "scanned" +msgstr "zeskanowany" + +#: src/lib/jcr.c:265 +msgid "scan" +msgstr "skanuj" + +#: src/lib/jcr.c:267 +msgid "unknown action" +msgstr "nieznana akcja" + +#: src/lib/jcr.c:341 +msgid "pthread_once failed. ERR=%s\n" +msgstr "nieudane pthread_once. ERR=%s\n" + +#: src/lib/jcr.c:349 +msgid "Could not init msg_queue mutex. ERR=%s\n" +msgstr "Nie można zainicjować muteksa msg_queue. ERR=%s\n" + +#: src/lib/jcr.c:401 +msgid "NULL jcr.\n" +msgstr "NULL jcr.\n" + +#: src/lib/jcr.c:500 +#, c-format +msgid "JCR use_count=%d JobId=%d\n" +msgstr "JCR use_count=%d JobId=%d\n" + +#: src/lib/jcr.c:596 +msgid "pthread_setspecific failed: ERR=%s\n" +msgstr "nieudane pthread_setspecific: ERR=%s\n" + +#: src/lib/jcr.c:1016 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading Storage " +"daemon.\n" +msgstr "" +"Watchdog wysłał kill po %d sekundach do wstrzymanego wątku Demona Przechowywania podczas odczytu.\n" + +#: src/lib/jcr.c:1028 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading File daemon.\n" +msgstr "" + +#: src/lib/jcr.c:1040 +#, c-format +msgid "" +"Watchdog sending kill after %d secs to thread stalled reading Director.\n" +msgstr "" + +#: src/lib/signal.c:67 +msgid "Invalid signal number" +msgstr "Niepoprawny numer sygnału" + +#: src/lib/signal.c:151 src/lib/signal.c:153 +#, c-format +msgid "Bacula interrupted by signal %d: %s\n" +msgstr "" + +#: src/lib/signal.c:166 +#, c-format +msgid "Kaboom! %s, %s got signal %d - %s. Attempting traceback.\n" +msgstr "" + +#: src/lib/signal.c:168 +#, c-format +msgid "Kaboom! exepath=%s\n" +msgstr "Kaboom! exepath=%s\n" + +#: src/lib/signal.c:202 +msgid "Fork error: ERR=%s\n" +msgstr "Błąd funkcji Fork: ERR=%s\n" + +#: src/lib/signal.c:210 +#, c-format +msgid "Calling: %s %s %s %s\n" +msgstr "Wywoanie: %s %s %s %s\n" + +#: src/lib/signal.c:214 +msgid "execv: %s failed: ERR=%s\n" +msgstr "nieudane execv: %s: ERR=%s\n" + +#: src/lib/signal.c:235 +#, c-format +msgid "It looks like the traceback worked ...\n" +msgstr "" + +#: src/lib/signal.c:289 +#, c-format +msgid "BA_NSIG too small (%d) should be (%d)\n" +msgstr "" + +#: src/lib/signal.c:295 +msgid "UNKNOWN SIGNAL" +msgstr "NIEZNANY SYGNAŁ" + +#: src/lib/signal.c:296 +msgid "Hangup" +msgstr "Zawieszenie" + +#: src/lib/signal.c:297 +msgid "Interrupt" +msgstr "Przerwanie" + +#: src/lib/signal.c:298 +msgid "Quit" +msgstr "wyjdź" + +#: src/lib/signal.c:299 +msgid "Illegal instruction" +msgstr "Niepoprawna instrukcja" + +#: src/lib/signal.c:300 +msgid "Trace/Breakpoint trap" +msgstr "" + +#: src/lib/signal.c:301 +msgid "Abort" +msgstr "Porzuć" + +#: src/lib/signal.c:303 +msgid "EMT instruction (Emulation Trap)" +msgstr "" + +#: src/lib/signal.c:306 +msgid "IOT trap" +msgstr "" + +#: src/lib/signal.c:308 +msgid "BUS error" +msgstr "Błąd szyny (Bus error)" + +#: src/lib/signal.c:309 +msgid "Floating-point exception" +msgstr "" + +#: src/lib/signal.c:310 +msgid "Kill, unblockable" +msgstr "Kill, nieblokowane" + +#: src/lib/signal.c:311 +msgid "User-defined signal 1" +msgstr "" + +#: src/lib/signal.c:312 +msgid "Segmentation violation" +msgstr "Naruszenie segmentacji" + +#: src/lib/signal.c:313 +msgid "User-defined signal 2" +msgstr "" + +#: src/lib/signal.c:314 +msgid "Broken pipe" +msgstr "" + +#: src/lib/signal.c:315 +msgid "Alarm clock" +msgstr "" + +#: src/lib/signal.c:316 +msgid "Termination" +msgstr "Zakończenie" + +#: src/lib/signal.c:318 +msgid "Stack fault" +msgstr "" + +#: src/lib/signal.c:320 +msgid "Child status has changed" +msgstr "" + +#: src/lib/signal.c:321 +msgid "Continue" +msgstr "Kontynuacja" + +#: src/lib/signal.c:322 +msgid "Stop, unblockable" +msgstr "Stop, nieblokowane" + +#: src/lib/signal.c:323 +msgid "Keyboard stop" +msgstr "" + +#: src/lib/signal.c:324 +msgid "Background read from tty" +msgstr "" + +#: src/lib/signal.c:325 +msgid "Background write to tty" +msgstr "" + +#: src/lib/signal.c:326 +msgid "Urgent condition on socket" +msgstr "" + +#: src/lib/signal.c:327 +msgid "CPU limit exceeded" +msgstr "" + +#: src/lib/signal.c:328 +msgid "File size limit exceeded" +msgstr "" + +#: src/lib/signal.c:329 +msgid "Virtual alarm clock" +msgstr "" + +#: src/lib/signal.c:330 +msgid "Profiling alarm clock" +msgstr "" + +#: src/lib/signal.c:331 +msgid "Window size change" +msgstr "" + +#: src/lib/signal.c:332 +msgid "I/O now possible" +msgstr "" + +#: src/lib/signal.c:334 +msgid "Power failure restart" +msgstr "" + +#: src/lib/signal.c:337 +msgid "No runnable lwp" +msgstr "" + +#: src/lib/signal.c:340 +msgid "SIGLWP special signal used by thread library" +msgstr "" + +#: src/lib/signal.c:343 +msgid "Checkpoint Freeze" +msgstr "" + +#: src/lib/signal.c:346 +msgid "Checkpoint Thaw" +msgstr "" + +#: src/lib/signal.c:349 +msgid "Thread Cancellation" +msgstr "" + +#: src/lib/signal.c:352 +msgid "Resource Lost (e.g. record-lock lost)" +msgstr "" + +#: src/lib/bnet_server.c:109 +#, c-format +msgid "Cannot open stream socket. ERR=%s. Current %s All %s\n" +msgstr "" + +#: src/lib/bnet_server.c:122 +#, c-format +msgid "Cannot set SO_REUSEADDR on socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:131 +msgid "Cannot bind port %d: ERR=%s: Retrying ...\n" +msgstr "Nie można podłączyć się do portu %d: ERR=%s: Powtarzanie ...\n" + +#: src/lib/bnet_server.c:136 +msgid "Cannot bind port %d: ERR=%s.\n" +msgstr "Nie można podłączyć się do portu %d: ERR=%s.\n" + +#: src/lib/bnet_server.c:147 +msgid "Could not init client queue: ERR=%s\n" +msgstr "Nie można zainicjować kolejki klienta: ERR=%s\n" + +#: src/lib/bnet_server.c:166 +msgid "Error in select: %s\n" +msgstr "Błąd w funkcji select: %s\n" + +#: src/lib/bnet_server.c:187 +#, c-format +msgid "Connection from %s:%d refused by hosts.access\n" +msgstr "" + +#: src/lib/bnet_server.c:202 src/lib/bsock.c:249 src/lib/bsock.c:285 +#, c-format +msgid "Cannot set SO_KEEPALIVE on socket: %s\n" +msgstr "" + +#: src/lib/bnet_server.c:213 +msgid "Could not create client BSOCK.\n" +msgstr "Nie można stworzyć BSOCK klienta.\n" + +#: src/lib/bnet_server.c:220 +msgid "Could not add job to client queue: ERR=%s\n" +msgstr "Nie można dodać zadania do kolejki klienta: ERR=%s\n" + +#: src/lib/bnet_server.c:237 +msgid "Could not destroy client queue: ERR=%s\n" +msgstr "Nie można zniszczyć kolejki klienta: ERR=%s\n" + +#: src/lib/bpipe.c:361 src/lib/bpipe.c:444 +msgid "Program killed by Bacula (timeout)\n" +msgstr "Program zabity przez Bacula (brak czasu)\n" + +#: src/lib/bnet.c:116 +msgid "Attr spool write error. ERR=%s\n" +msgstr "Błąd zapisywania spool attr. ERR=%s\n" + +#: src/lib/bnet.c:242 src/lib/bnet.c:283 +msgid "TLS connection initialization failed.\n" +msgstr "Nieudana inicjalizacja połączenia TLS.\n" + +#: src/lib/bnet.c:250 +msgid "TLS Negotiation failed.\n" +msgstr "Nieudana negocjacja TLS.\n" + +#: src/lib/bnet.c:256 src/lib/bnet.c:298 +msgid "" +"TLS certificate verification failed. Peer certificate did not match a " +"required commonName\n" +msgstr "" + +#: src/lib/bnet.c:305 +#, c-format +msgid "" +"TLS host certificate verification failed. Host name \"%s\" did not match " +"presented certificate\n" +msgstr "" + +#: src/lib/bnet.c:322 +msgid "TLS enabled but not configured.\n" +msgstr "Włączone TLS lecz nie zostao skonfigurowane.\n" + +#: src/lib/bnet.c:328 +msgid "TLS enable but not configured.\n" +msgstr "Włączone TLS lecz nie zostao skonfigurowane.\n" + +#: src/lib/bnet.c:386 +msgid "No problem." +msgstr "Brak problemu." + +#: src/lib/bnet.c:389 +msgid "Authoritative answer for host not found." +msgstr "Autorytatywna odpowiedź dla nie znaleziono hosta." + +#: src/lib/bnet.c:392 +msgid "Non-authoritative for host not found, or ServerFail." +msgstr "Nie autorytatywna odpowiedź dla nie znaleziono hosta lub Błąd Serwera." + +#: src/lib/bnet.c:395 +msgid "Non-recoverable errors, FORMERR, REFUSED, or NOTIMP." +msgstr "" + +#: src/lib/bnet.c:398 +msgid "Valid name, no data record of resquested type." +msgstr "" + +#: src/lib/bnet.c:401 +msgid "Unknown error." +msgstr "Nieznany błąd." + +#: src/lib/bnet.c:658 +msgid "Unknown sig %d" +msgstr "Nieznany sig %d." + +#: src/lib/res.c:66 +msgid "rwl_writelock failure at %s:%d: ERR=%s\n" +msgstr "nieudane rwl_writelock przy %s:%d: ERR=%s\n" + +#: src/lib/res.c:76 +msgid "rwl_writeunlock failure at %s:%d:. ERR=%s\n" +msgstr "nieudane rwl_writeunlock przy %s:%d:. ERR=%s\n" + +#: src/lib/btimers.c:265 +msgid "stop_btimer called with NULL btimer_id\n" +msgstr "wywołano stop_btimer z btimer_id równym NULL\n" + +#: src/lib/daemon.c:66 +msgid "Cannot fork to become daemon: ERR=%s\n" +msgstr "Nie można wykonać fork aby stać się demonem: ERR=%s\n" + +#: src/lib/watchdog.c:83 +msgid "Unable to initialize watchdog lock. ERR=%s\n" +msgstr "Nie można zainicjalizowa blokady watchdog. ERR=%s\n" + +#: src/lib/watchdog.c:180 +msgid "BUG! register_watchdog called before start_watchdog\n" +msgstr "BUG! register_watchdog wywołanie przed start_watchdog\n" + +#: src/lib/watchdog.c:183 +#, c-format +msgid "BUG! Watchdog %p has NULL callback\n" +msgstr "BUG! Watchdog %p posiada pusty callback\n" + +#: src/lib/watchdog.c:186 +#, c-format +msgid "BUG! Watchdog %p has zero interval\n" +msgstr "BUG! Watchdog %p posiada zerowy interwał\n" + +#: src/lib/watchdog.c:206 +msgid "BUG! unregister_watchdog_unlocked called before start_watchdog\n" +msgstr "BUG! unregister_watchdog_unlocked wywołane przed start_watchdog\n" + +#: src/lib/watchdog.c:326 +msgid "rwl_writelock failure. ERR=%s\n" +msgstr "nieudane rwl_writelock. ERR=%s\n" + +#: src/lib/watchdog.c:341 +msgid "rwl_writeunlock failure. ERR=%s\n" +msgstr "nieudane rwl_writeunlock. ERR=%s\n" + +#: src/lib/cram-md5.c:111 src/lib/cram-md5.c:139 +msgid "1999 Authorization failed.\n" +msgstr "1999 Nieudana autoryzacja.\n" + +#: src/lib/openssl.c:122 src/lib/openssl.c:181 src/stored/stored_conf.c:634 +#: src/stored/acquire.c:602 src/stored/dev.c:242 src/stored/dev.c:260 +#: src/stored/dev.c:266 src/stored/dev.c:277 +msgid "Unable to init mutex: ERR=%s\n" +msgstr "Nie mogę zainicjować muteksa: ERR=%s\n" + +#: src/lib/openssl.c:143 src/lib/openssl.c:214 +msgid "Unable to destroy mutex: ERR=%s\n" +msgstr "Nie mogę zniszczyć muteksa: ERR=%s\n" + +#: src/lib/smartall.c:148 src/lib/smartall.c:257 src/lib/smartall.c:272 +msgid "Out of memory\n" +msgstr "Brak pamięci\n" + +#: src/lib/smartall.c:153 +msgid "Too much memory used." +msgstr "Użyto zbyt dużo pamięci." + +#: src/lib/smartall.c:182 +#, c-format +msgid "Attempt to free NULL called from %s:%d\n" +msgstr "Próba zwolnienia pustego wskaźnika, wywołanie z %s:%d\n" + +#: src/lib/smartall.c:196 +#, c-format +msgid "double free from %s:%d\n" +msgstr "podwójne zwolnienie wskaźnika z %s:%d\n" + +#: src/lib/smartall.c:204 +#, c-format +msgid "qp->qnext->qprev != qp called from %s:%d\n" +msgstr "qp->qnext->qprev != qp wywołanie z %s:%d\n" + +#: src/lib/smartall.c:208 +#, c-format +msgid "qp->qprev->qnext != qp called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:217 +#, c-format +msgid "Buffer overrun called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:294 +#, c-format +msgid "sm_realloc size: %d\n" +msgstr "" + +#: src/lib/smartall.c:332 +#, c-format +msgid "sm_realloc %d at %p from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:394 +#, c-format +msgid "" +"\n" +"Orphaned buffers exist. Dump terminated following\n" +" discovery of bad links in chain of orphaned buffers.\n" +" Buffer address with bad links: %p\n" +msgstr "" + +#: src/lib/smartall.c:406 +#, c-format +msgid "%s buffer: %s %6u bytes buf=%p allocated at %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:442 +#, c-format +msgid "Damaged buffer found. Called from %s:%d\n" +msgstr "" + +#: src/lib/smartall.c:475 +msgid "" +"\n" +"Damaged buffers found at %s:%d\n" +msgstr "" +"\n" +"Znaleziono zepsuty bufor przy %s:%d\n" + +#: src/lib/smartall.c:478 +#, c-format +msgid " discovery of bad prev link.\n" +msgstr "" + +#: src/lib/smartall.c:481 +#, c-format +msgid " discovery of bad next link.\n" +msgstr "" + +#: src/lib/smartall.c:484 +#, c-format +msgid " discovery of data overrun.\n" +msgstr "" + +#: src/lib/smartall.c:487 +#, c-format +msgid " NULL pointer.\n" +msgstr "" + +#: src/lib/smartall.c:493 +#, c-format +msgid " Buffer address: %p\n" +msgstr "" + +#: src/lib/smartall.c:500 +#, c-format +msgid "Damaged buffer: %6u bytes allocated at line %d of %s %s\n" +msgstr "" + +#: src/lib/runscript.c:238 +#, c-format +msgid "%s: run %s \"%s\"\n" +msgstr "" + +#: src/lib/runscript.c:247 +#, c-format +msgid "Runscript: %s could not execute. ERR=%s\n" +msgstr "" + +#: src/lib/runscript.c:256 +msgid "%s: %s\n" +msgstr "%s: %s\n" + +#: src/lib/runscript.c:261 +#, c-format +msgid "Runscript: %s returned non-zero status=%d. ERR=%s\n" +msgstr "" + +#: src/lib/runscript.c:299 src/dird/dird_conf.c:719 +msgid " --> RunScript\n" +msgstr "" + +#: src/lib/runscript.c:300 src/dird/dird_conf.c:720 +msgid " --> Command=%s\n" +msgstr " --> Komenda=%s\n" + +#: src/lib/runscript.c:301 src/dird/dird_conf.c:721 +msgid " --> Target=%s\n" +msgstr " --> Cel=%s\n" + +#: src/lib/runscript.c:302 src/dird/dird_conf.c:722 +msgid " --> RunOnSuccess=%u\n" +msgstr " --> RunOnSuccess=%s\n" + +#: src/lib/runscript.c:303 src/dird/dird_conf.c:723 +msgid " --> RunOnFailure=%u\n" +msgstr " --> RunOnFailure=%s\n" + +#: src/lib/runscript.c:304 src/dird/dird_conf.c:724 +#, c-format +msgid " --> FailJobOnError=%u\n" +msgstr " --> FailJobOnError=%u\n" + +#: src/lib/runscript.c:305 src/dird/dird_conf.c:725 +msgid " --> RunWhen=%u\n" +msgstr " --> RunWhen=%s\n" + +#: src/lib/message.c:318 src/lib/message.c:328 +msgid "Could not open console message file %s: ERR=%s\n" +msgstr "Nie można otworzyć pliku komunikatów konsoli %s: ERR=%s\n" + +#: src/lib/message.c:333 +#, fuzzy, c-format +msgid "Could not get con mutex: ERR=%s\n" +msgstr "Nie można zainicjować kolejki zada: ERR=%s\n" + +#: src/lib/message.c:438 +msgid "Bacula Message" +msgstr "Komunikat Bacula" + +#: src/lib/message.c:442 +#, fuzzy, c-format +msgid "open mail pipe %s failed: ERR=%s\n" +msgstr "nieudane połączenie z programem mail: ERR=%s\n" + +#: src/lib/message.c:502 +#, fuzzy, c-format +msgid "open mail pipe failed.\n" +msgstr "nieudane połączenie z programem mail.\n" + +#: src/lib/message.c:514 +#, fuzzy, c-format +msgid "close error: ERR=%s\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/lib/message.c:525 +#, c-format +msgid "Mail prog: %s" +msgstr "" + +#: src/lib/message.c:534 +#, c-format +msgid "" +"Mail program terminated in error.\n" +"CMD=%s\n" +"ERR=%s\n" +msgstr "" +"Program Mail zakończony z błędem.\n" +"CMD=%s\n" +"ERR=%s\n" + +#: src/lib/message.c:631 +msgid "fopen %s failed: ERR=%s\n" +msgstr "nieudane fopen %s: ERR=%s\n" + +#: src/lib/message.c:770 +#, c-format +msgid "" +"Msg delivery error: Operator mail program terminated in error.\n" +"CMD=%s\n" +"ERR=%s\n" +msgstr "" +"Błąd dostarczenia komunikatu: Program operatora mail zakończony z błędem.\n" +"CMD=%s\n" +"ERR=%s\n" + +#: src/lib/message.c:788 +#, c-format +msgid "Msg delivery error: fopen %s failed: ERR=%s\n" +msgstr "Błąd w dostarczeniu komunikatu: nieudane fopen %s: ERR=%s\n" + +#: src/lib/message.c:1091 +#, c-format +msgid "%s: ABORTING due to ERROR in %s:%d\n" +msgstr "" + +#: src/lib/message.c:1095 +#, c-format +msgid "%s: ERROR TERMINATION at %s:%d\n" +msgstr "" + +#: src/lib/message.c:1100 +#, c-format +msgid "%s: Fatal Error because: " +msgstr "" + +#: src/lib/message.c:1102 +#, c-format +msgid "%s: Fatal Error at %s:%d because:\n" +msgstr "" + +#: src/lib/message.c:1106 +msgid "%s: ERROR: " +msgstr "%s: BŁĄD: " + +#: src/lib/message.c:1108 +#, c-format +msgid "%s: ERROR in %s:%d " +msgstr "%s: BŁĄD w %s:%d " + +#: src/lib/message.c:1111 +#, c-format +msgid "%s: Warning: " +msgstr "%s: Ostrzeżenie: " + +#: src/lib/message.c:1114 +#, c-format +msgid "%s: Security violation: " +msgstr "%s: Naruszenie bezpieczeństwa: " + +#: src/lib/message.c:1189 +#, c-format +msgid "%s ABORTING due to ERROR\n" +msgstr "" + +#: src/lib/message.c:1192 +#, c-format +msgid "%s ERROR TERMINATION\n" +msgstr "" + +#: src/lib/message.c:1195 +msgid "%s JobId %u: Fatal error: " +msgstr "%s JobId %u: Błąd krytyczny: " + +#: src/lib/message.c:1201 +msgid "%s JobId %u: Error: " +msgstr "%s JobId %u: Błąd: " + +#: src/lib/message.c:1207 +msgid "%s JobId %u: Warning: " +msgstr "%s JobId %u: Ostrzeżenie: " + +#: src/lib/message.c:1213 +#, c-format +msgid "%s JobId %u: Security violation: " +msgstr "%s JobId %u: Naruszenie bezpieczeństwa: " + +#: src/lib/bsys.c:216 src/lib/bsys.c:233 src/lib/bsys.c:257 src/lib/bsys.c:270 +msgid "Out of memory: ERR=%s\n" +msgstr "Brak pamięci: ERR=%s\n" + +#: src/lib/bsys.c:312 +msgid "Buffer overflow.\n" +msgstr "Przepełnienie bufora.\n" + +#: src/lib/bsys.c:378 +msgid "Bad errno" +msgstr "Zły errno" + +#: src/lib/bsys.c:393 +#, c-format +msgid "Memset for %d bytes at %s:%d\n" +msgstr "Memset dla %d bajtów przy %s:%d\n" + +#: src/lib/bsys.c:423 +msgid "Cannot open pid file. %s ERR=%s\n" +msgstr "Nie można otworzyć pliku pid. %s ERR=%s\n" + +#: src/lib/bsys.c:438 +#, c-format +msgid "" +"%s is already running. pid=%d\n" +"Check file %s\n" +msgstr "" +"%s jest już uruchomiony. pid=%d\n" +"Sprawdź plik %s\n" + +#: src/lib/bsys.c:452 +msgid "Could not open pid file. %s ERR=%s\n" +msgstr "Nie można otworzyć pliku pid. %s ERR=%s\n" + +#: src/lib/bsys.c:560 +msgid "Could not create state file. %s ERR=%s\n" +msgstr "Nie można stworzyć pliku stanu. %s ERR=%s\n" + +#: src/lib/bsys.c:579 +#, fuzzy, c-format +msgid "Write final hdr error: ERR=%s\n" +msgstr "Błąd zapisu . ERR=%s\n" + +#: src/lib/lex.c:93 src/wx-console/console_thread.cpp:212 +#, c-format +msgid "Problem probably begins at line %d.\n" +msgstr "Problem najprawdopodobniej zaczyna się od linii %d.\n" + +#: src/lib/lex.c:98 src/wx-console/console_thread.cpp:217 +msgid "" +"Config error: %s\n" +" : line %d, col %d of file %s\n" +"%s\n" +"%s" +msgstr "" +"Błąd konfiguracji: %s\n" +" : Linia %d, kolumna %d w pliku %s\n" +"%s\n" +"%s" + +#: src/lib/lex.c:102 +#, fuzzy, c-format +msgid "Config error: %s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/lib/lex.c:131 +msgid "Close of NULL file\n" +msgstr "" + +#: src/lib/lex.c:226 +msgid "" +"get_char: called after EOF. You may have a open double quote without the " +"closing double quote.\n" +msgstr "" + +#: src/lib/lex.c:269 +#, c-format +msgid "Config token too long, file: %s, line %d, begins at line %d\n" +msgstr "" + +#: src/lib/lex.c:293 +#, fuzzy +msgid "none" +msgstr "*brak*" + +#: src/lib/lex.c:294 +msgid "comment" +msgstr "" + +#: src/lib/lex.c:295 +msgid "number" +msgstr "" + +#: src/lib/lex.c:296 +msgid "ip_addr" +msgstr "" + +#: src/lib/lex.c:297 +msgid "identifier" +msgstr "" + +#: src/lib/lex.c:298 +#, fuzzy +msgid "string" +msgstr "Odtwarzanie..." + +#: src/lib/lex.c:299 +msgid "quoted_string" +msgstr "" + +#: src/lib/lex.c:300 +#, fuzzy +msgid "include" +msgstr "Od" + +#: src/lib/lex.c:301 +msgid "include_quoted_string" +msgstr "" + +#: src/lib/lex.c:302 +msgid "UTF-8 Byte Order Mark" +msgstr "" + +#: src/lib/lex.c:303 +msgid "UTF-16le Byte Order Mark" +msgstr "" + +#: src/lib/lex.c:341 src/lib/lex.c:347 src/lib/lex.c:358 src/lib/lex.c:364 +#, fuzzy, c-format +msgid "expected a positive integer number, got: %s" +msgstr "Oczekiwano dodatniej wartości całkowitej, wprowadzono: %s\n" + +#: src/lib/lex.c:474 +msgid "" +"This config file appears to be in an unsupported Unicode format (UTF-16be). " +"Please resave as UTF-8\n" +msgstr "" + +#: src/lib/lex.c:613 src/lib/lex.c:641 +#, fuzzy, c-format +msgid "Cannot open included config file %s: %s\n" +msgstr "Nie można otworzyć pliku wejciowego FileSet: %s. ERR=%s\n" + +#: src/lib/lex.c:700 src/lib/lex.c:757 +#, fuzzy, c-format +msgid "expected an integer or a range, got %s: %s" +msgstr "Oczekiwano ciągu znaków regex, otrzymano: %s\n" + +#: src/lib/lex.c:714 src/lib/lex.c:722 src/lib/lex.c:733 src/lib/lex.c:741 +#, fuzzy, c-format +msgid "expected an integer number, got %s: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/lex.c:771 +#, fuzzy, c-format +msgid "expected a name, got %s: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/lex.c:775 +#, c-format +msgid "name %s length %d too long, max is %d\n" +msgstr "" + +#: src/lib/lex.c:783 +#, fuzzy, c-format +msgid "expected a string, got %s: %s" +msgstr "Oczekiwano ciągu znaków regex, otrzymano: %s\n" + +#: src/lib/bget_msg.c:99 +#, fuzzy +msgid "Status OK\n" +msgstr "Status:\n" + +#: src/lib/bget_msg.c:103 +#, c-format +msgid "bget_msg: unknown signal %d\n" +msgstr "" + +#: src/lib/address_conf.c:63 +#, c-format +msgid "Only ipv4 and ipv6 are supported (%d)\n" +msgstr "" + +#: src/lib/address_conf.c:67 +#, c-format +msgid "Only ipv4 is supported (%d)\n" +msgstr "" + +#: src/lib/address_conf.c:176 +#, c-format +msgid "It was tried to assign a ipv6 address to a ipv4(%d)\n" +msgstr "" + +#: src/lib/address_conf.c:185 +#, c-format +msgid "It was tried to assign a ipv4 address to a ipv6(%d)\n" +msgstr "" + +#: src/lib/address_conf.c:264 +#, c-format +msgid "Can't add default address (%s)\n" +msgstr "" + +#: src/lib/address_conf.c:293 +msgid "the old style addresses cannot be mixed with new style" +msgstr "" + +#: src/lib/address_conf.c:314 +#, c-format +msgid "can't resolve service(%s)" +msgstr "" + +#: src/lib/address_conf.c:323 +#, c-format +msgid "can't resolve hostname(%s) %s" +msgstr "" + +#: src/lib/address_conf.c:413 src/lib/address_conf.c:444 +#, fuzzy, c-format +msgid "Expected a block begin { , got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/address_conf.c:418 +msgid "Empty addr block is not allowed" +msgstr "" + +#: src/lib/address_conf.c:422 +#, fuzzy, c-format +msgid "Expected a string, got: %s" +msgstr "Oczekiwano ciągu znaków regex, otrzymano: %s\n" + +#: src/lib/address_conf.c:431 +#, fuzzy, c-format +msgid "Expected a string [ip|ipv4|ipv6], got: %s" +msgstr "Oczekiwano ciągu znaków regex, otrzymano: %s\n" + +#: src/lib/address_conf.c:435 +#, fuzzy, c-format +msgid "Expected a string [ip|ipv4], got: %s" +msgstr "Oczekiwano ciągu znaków regex, otrzymano: %s\n" + +#: src/lib/address_conf.c:440 src/lib/address_conf.c:470 +#, fuzzy, c-format +msgid "Expected a equal =, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/address_conf.c:451 src/lib/address_conf.c:466 +#, fuzzy, c-format +msgid "Expected a identifier [addr|port], got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/address_conf.c:456 +msgid "Only one port per address block" +msgstr "" + +#: src/lib/address_conf.c:462 +msgid "Only one addr per address block" +msgstr "" + +#: src/lib/address_conf.c:478 +#, fuzzy, c-format +msgid "Expected a number or a string, got: %s" +msgstr "Oczekiwano ciągu znaków regex, otrzymano: %s\n" + +#: src/lib/address_conf.c:484 src/lib/address_conf.c:517 +#, fuzzy, c-format +msgid "Expected an IP number or a hostname, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/address_conf.c:490 +msgid "State machine missmatch" +msgstr "" + +#: src/lib/address_conf.c:496 src/lib/address_conf.c:508 +#, fuzzy, c-format +msgid "Expected a end of block }, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/address_conf.c:502 +#, c-format +msgid "Can't add hostname(%s) and port(%s) to addrlist (%s)" +msgstr "" + +#: src/lib/address_conf.c:522 src/lib/address_conf.c:536 +#, c-format +msgid "can't add port (%s) to (%s)" +msgstr "" + +#: src/lib/address_conf.c:531 +#, fuzzy, c-format +msgid "Expected a port number or string, got: %s" +msgstr "Oczekiwano ciągu znaków regex, otrzymano: %s\n" + +#: src/lib/var.c:2669 +msgid "everything ok" +msgstr "" + +#: src/lib/var.c:2670 +msgid "incomplete named character" +msgstr "" + +#: src/lib/var.c:2671 +msgid "incomplete hexadecimal value" +msgstr "" + +#: src/lib/var.c:2672 +msgid "invalid hexadecimal value" +msgstr "" + +#: src/lib/var.c:2673 +msgid "octal value too large" +msgstr "" + +#: src/lib/var.c:2674 +msgid "invalid octal value" +msgstr "" + +#: src/lib/var.c:2675 +msgid "incomplete octal value" +msgstr "" + +#: src/lib/var.c:2676 +msgid "incomplete grouped hexadecimal value" +msgstr "" + +#: src/lib/var.c:2677 +msgid "incorrect character class specification" +msgstr "" + +#: src/lib/var.c:2678 +#, fuzzy +msgid "invalid expansion configuration" +msgstr "Proszę popraw plik konfiguracyjny: %s\n" + +#: src/lib/var.c:2679 +msgid "out of memory" +msgstr "" + +#: src/lib/var.c:2680 +msgid "incomplete variable specification" +msgstr "" + +#: src/lib/var.c:2681 +msgid "undefined variable" +msgstr "" + +#: src/lib/var.c:2682 +msgid "input is neither text nor variable" +msgstr "" + +#: src/lib/var.c:2683 +msgid "unknown command character in variable" +msgstr "" + +#: src/lib/var.c:2684 +msgid "malformatted search and replace operation" +msgstr "" + +#: src/lib/var.c:2685 +msgid "unknown flag in search and replace operation" +msgstr "" + +#: src/lib/var.c:2686 +msgid "invalid regex in search and replace operation" +msgstr "" + +#: src/lib/var.c:2687 +msgid "missing parameter in command" +msgstr "" + +#: src/lib/var.c:2688 +msgid "empty search string in search and replace operation" +msgstr "" + +#: src/lib/var.c:2689 +msgid "start offset missing in cut operation" +msgstr "" + +#: src/lib/var.c:2690 +msgid "offsets in cut operation delimited by unknown character" +msgstr "" + +#: src/lib/var.c:2691 +msgid "range out of bounds in cut operation" +msgstr "" + +#: src/lib/var.c:2692 +msgid "offset out of bounds in cut operation" +msgstr "" + +#: src/lib/var.c:2693 +msgid "logic error in cut operation" +msgstr "" + +#: src/lib/var.c:2694 +msgid "malformatted transpose operation" +msgstr "" + +#: src/lib/var.c:2695 +msgid "source and target class mismatch in transpose operation" +msgstr "" + +#: src/lib/var.c:2696 +msgid "empty character class in transpose operation" +msgstr "" + +#: src/lib/var.c:2697 +msgid "incorrect character class in transpose operation" +msgstr "" + +#: src/lib/var.c:2698 +msgid "malformatted padding operation" +msgstr "" + +#: src/lib/var.c:2699 +msgid "width parameter missing in padding operation" +msgstr "" + +#: src/lib/var.c:2700 +msgid "fill string missing in padding operation" +msgstr "" + +#: src/lib/var.c:2701 +msgid "unknown quoted pair in search and replace operation" +msgstr "" + +#: src/lib/var.c:2702 +msgid "sub-matching reference out of range" +msgstr "" + +#: src/lib/var.c:2703 +msgid "invalid argument" +msgstr "" + +#: src/lib/var.c:2704 +msgid "incomplete quoted pair" +msgstr "" + +#: src/lib/var.c:2705 +msgid "lookup function does not support variable arrays" +msgstr "" + +#: src/lib/var.c:2706 +msgid "index of array variable contains an invalid character" +msgstr "" + +#: src/lib/var.c:2707 +msgid "index of array variable is incomplete" +msgstr "" + +#: src/lib/var.c:2708 +msgid "bracket expression in array variable's index not closed" +msgstr "" + +#: src/lib/var.c:2709 +msgid "division by zero error in index specification" +msgstr "" + +#: src/lib/var.c:2710 +msgid "unterminated loop construct" +msgstr "" + +#: src/lib/var.c:2711 +msgid "invalid character in loop limits" +msgstr "" + +#: src/lib/var.c:2712 +msgid "malformed operation argument list" +msgstr "" + +#: src/lib/var.c:2713 +msgid "undefined operation" +msgstr "" + +#: src/lib/var.c:2714 +#, fuzzy +msgid "formatting failure" +msgstr "Proszę popraw plik konfiguracyjny: %s\n" + +#: src/lib/var.c:2723 +#, fuzzy +msgid "unknown error" +msgstr "Błąd szyfrowania\n" + +#: src/lib/parse_conf.c:178 +msgid "***UNKNOWN***" +msgstr "" + +#: src/lib/parse_conf.c:277 src/lib/parse_conf.c:298 +#, fuzzy, c-format +msgid "expected an =, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/parse_conf.c:307 +#, fuzzy, c-format +msgid "Unknown item code: %d\n" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/lib/parse_conf.c:347 +#, fuzzy, c-format +msgid "message type: %s not found" +msgstr "Fileset \"%s\" nie znaleziony.\n" + +#: src/lib/parse_conf.c:385 +#, fuzzy, c-format +msgid "Attempt to redefine name \"%s\" to \"%s\"." +msgstr "" +"Próba definicji kolejnego zasobu %s nazwanego \"%s\" nie jest dozwolone.\n" + +#: src/lib/parse_conf.c:477 src/dird/dird_conf.c:1842 +#, fuzzy, c-format +msgid "Could not find config Resource %s referenced on line %d : %s\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/lib/parse_conf.c:482 +#, fuzzy, c-format +msgid "Attempt to redefine resource \"%s\" referenced on line %d : %s\n" +msgstr "" +"Próba definicji kolejnego zasobu %s nazwanego \"%s\" nie jest dozwolone.\n" + +#: src/lib/parse_conf.c:518 +#, c-format +msgid "Too many %s directives. Max. is %d. line %d: %s\n" +msgstr "" + +#: src/lib/parse_conf.c:529 +#, fuzzy, c-format +msgid "Could not find config Resource \"%s\" referenced on line %d : %s\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/lib/parse_conf.c:592 +#, c-format +msgid "Missing config Resource \"%s\" referenced on line %d : %s\n" +msgstr "" + +#: src/lib/parse_conf.c:657 +#, fuzzy, c-format +msgid "expected a size number, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/parse_conf.c:667 +#, fuzzy, c-format +msgid "expected a size, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/parse_conf.c:716 src/lib/parse_conf.c:722 +#, fuzzy, c-format +msgid "expected a time period, got: %s" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/lib/parse_conf.c:741 src/lib/parse_conf.c:757 src/dird/dird_conf.c:1820 +#: src/dird/dird_conf.c:1936 +#, fuzzy, c-format +msgid "Expect %s, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/parse_conf.c:783 +#, fuzzy, c-format +msgid "Expected a Tape Label keyword, got: %s" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/lib/parse_conf.c:866 +#, fuzzy, c-format +msgid "Unable to initialize resource lock. ERR=%s\n" +msgstr "Nie można zainicjalizowa blokady BD. ERR=%s\n" + +#: src/lib/parse_conf.c:874 +#, fuzzy +msgid "Config filename too long.\n" +msgstr "Komentarz za długi.\n" + +#: src/lib/parse_conf.c:897 +#, fuzzy, c-format +msgid "Cannot open config file \"%s\": %s\n" +msgstr "Nie można otworzyć pliku %s: ERR=%s\n" + +#: src/lib/parse_conf.c:914 +msgid "" +"Currently we cannot handle UTF-16 source files. Please convert the conf file " +"to UTF-8\n" +msgstr "" + +#: src/lib/parse_conf.c:918 +#, fuzzy, c-format +msgid "Expected a Resource name identifier, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/parse_conf.c:934 +#, fuzzy, c-format +msgid "expected resource name, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/parse_conf.c:945 +#, fuzzy, c-format +msgid "not in resource definition: %s" +msgstr "Brak definicji zasobu Dyrektora w %s\n" + +#: src/lib/parse_conf.c:956 src/dird/dird_conf.c:1996 src/dird/inc_conf.c:400 +#: src/dird/inc_conf.c:747 +#, fuzzy, c-format +msgid "expected an equals, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/lib/parse_conf.c:970 +#, c-format +msgid "" +"Keyword \"%s\" not permitted in this resource.\n" +"Perhaps you left the trailing brace off of the previous resource." +msgstr "" + +#: src/lib/parse_conf.c:981 +#, fuzzy +msgid "Name not specified for resource" +msgstr "Nie można znale zasobu Director %s\n" + +#: src/lib/parse_conf.c:991 +#, c-format +msgid "unexpected token %d %s in resource definition" +msgstr "" + +#: src/lib/parse_conf.c:997 +#, fuzzy, c-format +msgid "Unknown parser state %d\n" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/lib/parse_conf.c:1002 +msgid "End of conf file reached with unclosed resource." +msgstr "" + +#: src/lib/mem_pool.c:107 +#, c-format +msgid "MemPool index %d larger than max %d\n" +msgstr "" + +#: src/lib/mem_pool.c:125 src/lib/mem_pool.c:145 src/lib/mem_pool.c:181 +#: src/lib/mem_pool.c:252 src/lib/mem_pool.c:272 src/lib/mem_pool.c:307 +#: src/lib/mem_pool.c:600 +#, c-format +msgid "Out of memory requesting %d bytes\n" +msgstr "" + +#: src/lib/mem_pool.c:162 +#, fuzzy +msgid "obuf is NULL\n" +msgstr "BSR is NULL\n" + +#: src/lib/bsock.c:128 +#, fuzzy, c-format +msgid "" +"Could not connect to %s on %s:%d. ERR=%s\n" +"Retrying ...\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/lib/bsock.c:134 +#, fuzzy, c-format +msgid "Unable to connect to %s on %s:%d. ERR=%s\n" +msgstr "Nie mogę otworzyć xattr %s na \"%s\": ERR=%s\n" + +#: src/lib/bsock.c:207 +#, fuzzy, c-format +msgid "gethostbyname() for host \"%s\" failed: ERR=%s\n" +msgstr "Błąd w acltostr na pliku \"%s\": ERR=%s\n" + +#: src/lib/bsock.c:227 +#, fuzzy, c-format +msgid "Socket open error. proto=%d port=%d. ERR=%s\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/lib/bsock.c:238 +#, fuzzy, c-format +msgid "Source address bind error. proto=%d. ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/lib/bsock.c:257 +#, c-format +msgid "Cannot set SO_KEEPIDLE on socket: %s\n" +msgstr "" + +#: src/lib/bsock.c:304 +#, fuzzy, c-format +msgid "Could not init bsock mutex. ERR=%s\n" +msgstr "Nie można zainicjować kolejki zada: ERR=%s\n" + +#: src/lib/bsock.c:372 +#, fuzzy, c-format +msgid "Write error sending %d bytes to %s:%s:%d: ERR=%s\n" +msgstr "Błąd w wysyaniu Hello do demona Plików. ERR=%s\n" + +#: src/lib/bsock.c:378 +#, c-format +msgid "Wrote %d bytes to %s:%s:%d, but only %d accepted.\n" +msgstr "" + +#: src/lib/bsock.c:467 src/lib/bsock.c:528 +#, c-format +msgid "Read expected %d got %d from %s:%s:%d\n" +msgstr "" + +#: src/lib/bsock.c:487 +#, c-format +msgid "Packet size too big from \"%s:%s:%d. Terminating connection.\n" +msgstr "" + +#: src/lib/bsock.c:517 +#, fuzzy, c-format +msgid "Read error from %s:%s:%d: ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/lib/bsock.c:587 src/dird/catreq.c:582 src/dird/catreq.c:600 +#, fuzzy, c-format +msgid "fread attr spool error. ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/lib/bsock.c:605 +msgid "fread attr spool I/O error.\n" +msgstr "" + +#: src/lib/bsock.c:665 +#, fuzzy +msgid "Could not malloc BSOCK data buffer\n" +msgstr "Nie można otworzyć bazy danych katalogu \"%s\".\n" + +#: src/lib/bsock.c:672 src/lib/bsock.c:696 +#, fuzzy, c-format +msgid "sockopt error: %s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/lib/bsock.c:678 src/lib/bsock.c:702 +#, c-format +msgid "Warning network buffer = %d bytes not max size.\n" +msgstr "" + +#: src/lib/bsock.c:682 src/lib/bsock.c:706 +#, fuzzy, c-format +msgid "Network buffer size %d not multiple of tape block size.\n" +msgstr "" +"Rozmiar bloku taśmy (%d) nie jest wielokrotnością rozmiaru systemowego (%d)\n" + +#: src/lib/bsock.c:727 src/lib/bsock.c:761 +#, fuzzy, c-format +msgid "fcntl F_GETFL error. ERR=%s\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/lib/bsock.c:733 src/lib/bsock.c:767 src/lib/bsock.c:792 +#, fuzzy, c-format +msgid "fcntl F_SETFL error. ERR=%s\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/lib/bsock.c:965 src/qt-console/bcomm/dircomm_auth.cpp:112 +#, fuzzy, c-format +msgid "Director authorization problem at \"%s:%d\"\n" +msgstr "Brak autoryzacji dla Zadania \"%s\"\n" + +#: src/lib/bsock.c:972 src/qt-console/bcomm/dircomm_auth.cpp:119 +#, fuzzy, c-format +msgid "" +"Authorization problem: Remote server at \"%s:%d\" did not advertise required " +"TLS support.\n" +msgstr "" +"Problem autoryzacji: Zdalny serwer nie zadeklarował wymaganego wsparcia dla " +"TLS.\n" + +#: src/lib/bsock.c:980 src/qt-console/bcomm/dircomm_auth.cpp:127 +#, fuzzy, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\": Remote server requires " +"TLS.\n" +msgstr "Problem autoryzacji: Zdalny serwer wymaga TLS.\n" + +#: src/lib/bsock.c:992 src/qt-console/bcomm/dircomm_auth.cpp:138 +#, fuzzy, c-format +msgid "TLS negotiation failed with Director at \"%s:%d\"\n" +msgstr "Negocjacje TLS nie powiody si\n" + +#: src/lib/bsock.c:1002 src/qt-console/bcomm/dircomm_auth.cpp:150 +#, fuzzy, c-format +msgid "" +"Bad response to Hello command: ERR=%s\n" +"The Director at \"%s:%d\" is probably not running.\n" +msgstr "Za odpowiedź na komend Hello: ERR=%s\n" + +#: src/lib/bsock.c:1011 src/qt-console/bcomm/dircomm_auth.cpp:159 +#, fuzzy, c-format +msgid "Director at \"%s:%d\" rejected Hello command\n" +msgstr "Director odrzuci komend Hello\n" + +#: src/lib/bsock.c:1021 src/qt-console/bcomm/dircomm_auth.cpp:171 +#, fuzzy, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\"\n" +"Most likely the passwords do not agree.\n" +"If you are using TLS, there may have been a certificate validation error " +"during the TLS handshake.\n" +"Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 for help.\n" +msgstr "" +"Problem autoryzacji Director'a.\n" +"Najprawdopodobniej się nie zgadzaj.\n" +"Jeżeli używasz TLS, może to być także błąd weryfikacji certyfikatu podczas " +"negocjacji TLS.\n" +"Zobacz http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 w celu uzyskania pomocy.\n" + +#: src/lib/tls.c:90 +#, c-format +msgid "" +"Error with certificate at depth: %d, issuer = %s, subject = %s, ERR=%d:%s\n" +msgstr "" + +#: src/lib/tls.c:127 +#, fuzzy +msgid "Error initializing SSL context" +msgstr "Błąd skanowania nagwna rekordu: %s\n" + +#: src/lib/tls.c:148 +msgid "Error loading certificate verification stores" +msgstr "" + +#: src/lib/tls.c:153 +msgid "" +"Either a certificate file or a directory must be specified as a verification " +"store\n" +msgstr "" + +#: src/lib/tls.c:164 +#, fuzzy +msgid "Error loading certificate file" +msgstr "Błąd skanowania nagwna rekordu: %s\n" + +#: src/lib/tls.c:172 +msgid "Error loading private key" +msgstr "" + +#: src/lib/tls.c:180 +#, fuzzy +msgid "Unable to open DH parameters file" +msgstr "Nie mogę otworzyć xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/lib/tls.c:186 +msgid "Unable to load DH parameters from specified file" +msgstr "" + +#: src/lib/tls.c:190 +msgid "Failed to set TLS Diffie-Hellman parameters" +msgstr "" + +#: src/lib/tls.c:200 +msgid "Error setting cipher list, no valid ciphers available\n" +msgstr "" + +#: src/lib/tls.c:259 +msgid "Peer failed to present a TLS certificate\n" +msgstr "" + +#: src/lib/tls.c:304 +#, c-format +msgid "Peer %s failed to present a TLS certificate\n" +msgstr "" + +#: src/lib/tls.c:416 +#, fuzzy +msgid "Error creating file descriptor-based BIO" +msgstr "Błąd skanowania nagwna rekordu: %s\n" + +#: src/lib/tls.c:427 +msgid "Error creating new SSL object" +msgstr "" + +#: src/lib/tls.c:490 src/lib/tls.c:513 +#, fuzzy +msgid "Connect failure" +msgstr "Utworzone" + +#: src/lib/tls.c:592 src/lib/tls.c:596 +#, fuzzy +msgid "TLS shutdown failure." +msgstr "Negocjacja TLS nieudana.\n" + +#: src/lib/tls.c:663 +msgid "TLS read/write failure." +msgstr "" + +#: src/lib/crypto.c:442 +#, fuzzy +msgid "Unable to open certificate file" +msgstr "Proszę popraw plik konfiguracyjny: %s\n" + +#: src/lib/crypto.c:449 +#, fuzzy +msgid "Unable to read certificate from file" +msgstr "Proszę popraw plik konfiguracyjny: %s\n" + +#: src/lib/crypto.c:455 +msgid "Unable to extract public key from certificate" +msgstr "" + +#: src/lib/crypto.c:462 +msgid "" +"Provided certificate does not include the required subjectKeyIdentifier " +"extension." +msgstr "" + +#: src/lib/crypto.c:469 +#, c-format +msgid "Unsupported key type provided: %d\n" +msgstr "" + +#: src/lib/crypto.c:506 src/lib/crypto.c:554 +#, fuzzy +msgid "Unable to open private key file" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/lib/crypto.c:536 src/lib/crypto.c:570 +#, fuzzy +msgid "Unable to read private key from file" +msgstr "Proszę popraw plik konfiguracyjny: %s\n" + +#: src/lib/crypto.c:629 +#, fuzzy, c-format +msgid "Unsupported digest type: %d\n" +msgstr "Niezaimplementowany poziom weryfikacji %d\n" + +#: src/lib/crypto.c:643 +#, fuzzy +msgid "OpenSSL digest initialization failed" +msgstr "Inicjalizacja skrtu %s nieudana\n" + +#: src/lib/crypto.c:657 +msgid "OpenSSL digest update failed" +msgstr "" + +#: src/lib/crypto.c:675 +#, fuzzy +msgid "OpenSSL digest finalize failed" +msgstr "Inicjalizacja skrtu %s nieudana\n" + +#: src/lib/crypto.c:773 +msgid "OpenSSL digest_new failed" +msgstr "" + +#: src/lib/crypto.c:779 +msgid "OpenSSL sign get digest failed" +msgstr "" + +#: src/lib/crypto.c:818 src/lib/crypto.c:822 +msgid "OpenSSL digest Verify final failed" +msgstr "" + +#: src/lib/crypto.c:827 +#, fuzzy +msgid "No signers found for crypto verify.\n" +msgstr "Błąd wersji Zlib" + +#: src/lib/crypto.c:888 +#, fuzzy +msgid "Signature creation failed" +msgstr "Nieudana walidacja sygnatury dla %s: %s\n" + +#: src/lib/crypto.c:966 +#, fuzzy +msgid "Signature decoding failed" +msgstr "Nieudana walidacja sygnatury dla %s: %s\n" + +#: src/lib/crypto.c:1043 +msgid "Unsupported cipher type specified\n" +msgstr "" + +#: src/lib/crypto.c:1192 +msgid "CryptoData decoding failed" +msgstr "" + +#: src/lib/crypto.c:1236 +#, fuzzy +msgid "Failure decrypting the session key" +msgstr "Nieudane odszyfrowanie klucza sesji.\n" + +#: src/lib/crypto.c:1287 +#, c-format +msgid "Unsupported contentEncryptionAlgorithm: %d\n" +msgstr "" + +#: src/lib/crypto.c:1297 src/lib/crypto.c:1303 +#, fuzzy +msgid "OpenSSL cipher context initialization failed" +msgstr "Inicjalizacja skrtu %s nieudana\n" + +#: src/lib/crypto.c:1310 +msgid "Encryption session provided an invalid symmetric key" +msgstr "" + +#: src/lib/crypto.c:1316 +#, fuzzy +msgid "Encryption session provided an invalid IV" +msgstr "zakodowane dane sesji" + +#: src/lib/crypto.c:1322 +#, fuzzy +msgid "OpenSSL cipher context key/IV initialization failed" +msgstr "Inicjalizacja skrtu %s nieudana\n" + +#: src/lib/crypto.c:1392 +#, fuzzy, c-format +msgid "Unable to init OpenSSL threading: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/lib/crypto.c:1405 +msgid "Failed to seed OpenSSL PRNG\n" +msgstr "" + +#: src/lib/crypto.c:1431 +msgid "Failed to save OpenSSL PRNG\n" +msgstr "" + +#: src/lib/crypto.c:1492 +#, c-format +msgid "Unsupported digest type=%d specified\n" +msgstr "" + +#: src/lib/crypto.c:1512 +#, c-format +msgid "SHA1Update() returned an error: %d\n" +msgstr "" + +#: src/lib/crypto.c:1655 +#, fuzzy +msgid "No error" +msgstr " (%d bd)" + +#: src/lib/crypto.c:1657 +#, fuzzy +msgid "Signer not found" +msgstr "Nie znaleziono wskanika zadania." + +#: src/lib/crypto.c:1659 +#, fuzzy +msgid "Recipient not found" +msgstr "Nie znaleziono wskanika zadania." + +#: src/lib/crypto.c:1661 +msgid "Unsupported digest algorithm" +msgstr "" + +#: src/lib/crypto.c:1663 +#, fuzzy +msgid "Unsupported encryption algorithm" +msgstr "Zignorowano %d nie wspieranych strumieni kryptograficznych.\n" + +#: src/lib/crypto.c:1665 +msgid "Signature is invalid" +msgstr "" + +#: src/lib/crypto.c:1667 +#, fuzzy +msgid "Decryption error" +msgstr "Błąd odszyfrowywania\n" + +#: src/lib/crypto.c:1670 +msgid "Internal error" +msgstr "" + +#: src/lib/crypto.c:1672 +#, fuzzy +msgid "Unknown error" +msgstr "Błąd szyfrowania\n" + +#: src/lib/util.c:183 +#, fuzzy +msgid "Running" +msgstr "" +"\n" +"Uruchomione Zadania:\n" + +#: src/lib/util.c:186 +#, fuzzy +msgid "Blocked" +msgstr "Odczyt zapisanego bloku" + +#: src/lib/util.c:196 +#, fuzzy +msgid "Non-fatal error" +msgstr "Błąd danych Zlib" + +#: src/lib/util.c:199 src/lib/util.c:324 src/dird/ua_status.c:874 +#: src/stored/status.c:553 +#, fuzzy +msgid "OK -- with warnings" +msgstr "%s OK -- z ostrzeżeniami" + +#: src/lib/util.c:202 src/lib/util.c:334 +#, fuzzy +msgid "Canceled" +msgstr "Anulowane" + +#: src/lib/util.c:205 +#, fuzzy +msgid "Verify differences" +msgstr "Rnice przy weryfikacji" + +#: src/lib/util.c:208 +msgid "Waiting on FD" +msgstr "" + +#: src/lib/util.c:211 +msgid "Wait on SD" +msgstr "" + +#: src/lib/util.c:214 +msgid "Wait for new Volume" +msgstr "" + +#: src/lib/util.c:217 +#, fuzzy +msgid "Waiting for mount" +msgstr "Status zadania: Oczekiwanie na Zamontowanie" + +#: src/lib/util.c:220 +#, fuzzy +msgid "Waiting for Storage resource" +msgstr "Status zadania: Oczekiwanie na zasoby przechowywania" + +#: src/lib/util.c:223 +#, fuzzy +msgid "Waiting for Job resource" +msgstr "Status zadania: Oczekiwanie na zasoby zadania" + +#: src/lib/util.c:226 +#, fuzzy +msgid "Waiting for Client resource" +msgstr "Status zadania: Oczekiwanie na zasoby Klienta" + +#: src/lib/util.c:229 +msgid "Waiting on Max Jobs" +msgstr "" + +#: src/lib/util.c:232 +#, fuzzy +msgid "Waiting for Start Time" +msgstr "Status zadania: Oczekiwanie na czas startu" + +#: src/lib/util.c:235 +msgid "Waiting on Priority" +msgstr "" + +#: src/lib/util.c:238 src/dird/ua_status.c:714 src/dird/ua_status.c:765 +msgid "SD committing Data" +msgstr "" + +#: src/lib/util.c:241 src/dird/ua_status.c:717 src/dird/ua_status.c:768 +msgid "SD despooling Data" +msgstr "" + +#: src/lib/util.c:244 src/dird/ua_status.c:720 src/dird/ua_status.c:771 +#, fuzzy +msgid "SD despooling Attributes" +msgstr "Spooling statystyk:\n" + +#: src/lib/util.c:247 src/dird/ua_status.c:723 src/dird/ua_status.c:774 +#, fuzzy +msgid "Dir inserting Attributes" +msgstr "Atrybuty Unix" + +#: src/lib/util.c:254 +#, fuzzy, c-format +msgid "Unknown Job termination status=%d" +msgstr "Nieznany status zadania %c." + +#: src/lib/util.c:270 +#, fuzzy +msgid "Completed successfully" +msgstr "Odtwarzanie wykonane poprawnie." + +#: src/lib/util.c:273 +#, fuzzy +msgid "Completed with warnings" +msgstr "Przywracanie OK -- z ostrzeżeniami" + +#: src/lib/util.c:276 +#, fuzzy +msgid "Terminated with errors" +msgstr "Status zadania: Zakoczone przez bd" + +#: src/lib/util.c:279 +#, fuzzy +msgid "Fatal error" +msgstr "Błąd danych Zlib" + +#: src/lib/util.c:282 +#, fuzzy +msgid "Created, not yet running" +msgstr "Zadanie odtwarzania stworzone, lecz jeszcze nie uruchomione." + +#: src/lib/util.c:285 +#, fuzzy +msgid "Canceled by user" +msgstr "Odtwarzanie Anulowane" + +#: src/lib/util.c:288 +#, fuzzy +msgid "Verify found differences" +msgstr "Rnice przy weryfikacji" + +#: src/lib/util.c:291 +#, fuzzy +msgid "Waiting for File daemon" +msgstr "Status zadania: Oczekiwanie na demon Plików" + +#: src/lib/util.c:294 +#, fuzzy +msgid "Waiting for Storage daemon" +msgstr "Status zadania: Oczekiwanie na demon Przechowywania" + +#: src/lib/util.c:297 +#, fuzzy +msgid "Waiting for higher priority jobs" +msgstr "Status zadania: Oczekiwanie na zakoczenie zada o wyszym priorytecie" + +#: src/lib/util.c:300 +#, fuzzy +msgid "Batch inserting file records" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/lib/util.c:331 +#, fuzzy +msgid "Fatal Error" +msgstr "Bd" + +#: src/lib/util.c:337 +#, fuzzy +msgid "Differences" +msgstr "Rnice przy weryfikacji" + +#: src/lib/util.c:340 +#, fuzzy +msgid "Unknown term code" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/lib/util.c:359 +#, fuzzy +msgid "Migrated Job" +msgstr "" +"\n" +"Zakoczonych Zada:\n" + +#: src/lib/util.c:362 src/dird/ua_run.c:827 +#, fuzzy +msgid "Verify" +msgstr "Weryfikacja OK" + +#: src/lib/util.c:365 src/wx-console/wxbrestorepanel.cpp:404 +#: src/wx-console/wxbrestorepanel.cpp:2000 +#, fuzzy +msgid "Restore" +msgstr "Odtwarzanie Anulowane" + +#: src/lib/util.c:368 src/wx-console/wxbmainframe.cpp:276 +#, fuzzy +msgid "Console" +msgstr "Zamknij" + +#: src/lib/util.c:371 +msgid "System or Console" +msgstr "" + +#: src/lib/util.c:374 src/dird/ua_run.c:767 +msgid "Admin" +msgstr "" + +#: src/lib/util.c:377 src/lib/util.c:466 +msgid "Archive" +msgstr "" + +#: src/lib/util.c:380 +msgid "Job Copy" +msgstr "" + +#: src/lib/util.c:383 +msgid "Copy" +msgstr "" + +#: src/lib/util.c:386 +msgid "Migrate" +msgstr "" + +#: src/lib/util.c:389 +msgid "Scan" +msgstr "" + +#: src/lib/util.c:393 +#, fuzzy +msgid "Unknown Type" +msgstr "Nieznany status." + +#: src/lib/util.c:403 +msgid "Truncate" +msgstr "" + +#: src/lib/util.c:437 +#, fuzzy +msgid "Verify Init Catalog" +msgstr "Weryfikacja Katalogu" + +#: src/lib/util.c:440 src/dird/ua_run.c:723 +#, fuzzy +msgid "Verify Volume to Catalog" +msgstr "Wolumen do Katalogu" + +#: src/lib/util.c:443 src/dird/ua_run.c:724 +#, fuzzy +msgid "Verify Disk to Catalog" +msgstr "Dysk do Katalogu" + +#: src/lib/util.c:446 +#, fuzzy +msgid "Verify Data" +msgstr "Weryfikacja Katalogu" + +#: src/lib/util.c:449 +msgid "Virtual Full" +msgstr "" + +#: src/lib/util.c:465 +#, fuzzy +msgid "Append" +msgstr "Wysłano" + +#: src/lib/util.c:467 +msgid "Disabled" +msgstr "" + +#: src/lib/util.c:469 +#, fuzzy +msgid "Used" +msgstr "Użytkownik" + +#: src/lib/util.c:470 +msgid "Cleaning" +msgstr "" + +#: src/lib/util.c:471 +msgid "Purged" +msgstr "" + +#: src/lib/util.c:472 +msgid "Recycle" +msgstr "" + +#: src/lib/util.c:473 +msgid "Read-Only" +msgstr "" + +#: src/lib/util.c:485 +#, fuzzy +msgid "Invalid volume status" +msgstr "Status używanego Wolumenu:\n" + +#: src/lib/util.c:855 +#, fuzzy +msgid "Working directory not defined. Cannot continue.\n" +msgstr "" +"Katalog dla Wtyczek nie jest zdefiniowany. Nie można uy wtyczki: \"%\"\n" + +#: src/lib/util.c:858 +#, fuzzy, c-format +msgid "Working Directory: \"%s\" not found. Cannot continue.\n" +msgstr "" +"Katalog dla Wtyczek nie jest zdefiniowany. Nie można uy wtyczki: \"%\"\n" + +#: src/lib/util.c:862 +#, fuzzy, c-format +msgid "Working Directory: \"%s\" is not a directory. Cannot continue.\n" +msgstr "" +"Katalog dla Wtyczek nie jest zdefiniowany. Nie można uy wtyczki: \"%\"\n" + +#: src/lib/plugins.c:111 +#, fuzzy, c-format +msgid "Failed to open Plugin directory %s: ERR=%s\n" +msgstr " Nie można otworzyć katalogu %s: ERR=%s\n" + +#: src/lib/plugins.c:126 +#, c-format +msgid "Failed to find any plugins in %s\n" +msgstr "" + +#: src/lib/plugins.c:158 +#, fuzzy, c-format +msgid "Plugin load %s failed: ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/lib/plugins.c:169 +#, fuzzy, c-format +msgid "Lookup of loadPlugin in plugin %s failed: ERR=%s\n" +msgstr "Nie mogę otworzyć xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/lib/plugins.c:178 +#, fuzzy, c-format +msgid "Lookup of unloadPlugin in plugin %s failed: ERR=%s\n" +msgstr "Nie mogę otworzyć xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/lib/attr.c:79 +#, fuzzy, c-format +msgid "Error scanning attributes: %s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/dird/pythondir.c:154 +#, fuzzy +msgid "Pool record not found." +msgstr "Zasób Puli \"%s\" nie znaleziony.\n" + +#: src/dird/pythondir.c:285 +msgid "Priority must be 1-100" +msgstr "" + +#: src/dird/pythondir.c:290 +msgid "Job Level can be set only during JobInit" +msgstr "" + +#: src/dird/pythondir.c:304 +msgid "Bad JobLevel string" +msgstr "" + +#: src/dird/ua_label.c:105 +msgid "Negative numbers not permitted\n" +msgstr "" + +#: src/dird/ua_label.c:111 +msgid "Range end is not integer.\n" +msgstr "" + +#: src/dird/ua_label.c:116 +msgid "Range start is not an integer.\n" +msgstr "" + +#: src/dird/ua_label.c:122 +msgid "Range end not bigger than start.\n" +msgstr "" + +#: src/dird/ua_label.c:128 +msgid "Input value is not an integer.\n" +msgstr "" + +#: src/dird/ua_label.c:134 +msgid "Values must be be greater than zero.\n" +msgstr "" + +#: src/dird/ua_label.c:138 +#, fuzzy +msgid "Slot too large.\n" +msgstr "Komentarz za długi.\n" + +#: src/dird/ua_label.c:187 src/dird/ua_label.c:352 src/dird/ua_label.c:1215 +#: src/dird/ua_run.c:1348 +#, fuzzy +msgid "command line" +msgstr "Nieudana komenda." + +#: src/dird/ua_label.c:205 src/dird/ua_label.c:516 src/dird/ua_label.c:1222 +msgid "No slots in changer to scan.\n" +msgstr "" + +#: src/dird/ua_label.c:217 src/dird/ua_label.c:527 +#, fuzzy +msgid "No Volumes found to label, or no barcodes.\n" +msgstr "Błąd wersji Zlib" + +#: src/dird/ua_label.c:227 src/dird/ua_label.c:1243 +#, c-format +msgid "Slot %d greater than max %d ignored.\n" +msgstr "" + +#: src/dird/ua_label.c:256 +#, c-format +msgid "No VolName for Slot=%d InChanger set to zero.\n" +msgstr "" + +#: src/dird/ua_label.c:274 +#, c-format +msgid "Catalog record for Volume \"%s\" updated to reference slot %d.\n" +msgstr "" + +#: src/dird/ua_label.c:278 +#, fuzzy, c-format +msgid "Catalog record for Volume \"%s\" is up to date.\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/dird/ua_label.c:284 +#, c-format +msgid "Volume \"%s\" not found in catalog. Slot=%d InChanger set to zero.\n" +msgstr "" + +#: src/dird/ua_label.c:381 +#, c-format +msgid "" +"Volume \"%s\" has VolStatus %s. It must be Purged or Recycled before " +"relabeling.\n" +msgstr "" + +#: src/dird/ua_label.c:397 +#, fuzzy +msgid "Enter new Volume name: " +msgstr "Wprowadź nazwę wolumenu: " + +#: src/dird/ua_label.c:410 +#, fuzzy, c-format +msgid "Media record for new Volume \"%s\" already exists.\n" +msgstr "rekord puli %s ju istnieje\n" + +#: src/dird/ua_label.c:428 +msgid "Enter slot (0 or Enter for none): " +msgstr "" + +#: src/dird/ua_label.c:456 +#, fuzzy, c-format +msgid "Delete of Volume \"%s\" failed. ERR=%s" +msgstr "Błąd w dostarczeniu komunikatu: nieudane fopen %s: ERR=%s\n" + +#: src/dird/ua_label.c:459 +#, fuzzy, c-format +msgid "Old volume \"%s\" deleted from catalog.\n" +msgstr "W katalogu utworzono nowy Wolumen \"%s\"\n" + +#: src/dird/ua_label.c:470 +#, c-format +msgid "Requesting to mount %s ...\n" +msgstr "" + +#: src/dird/ua_label.c:492 +msgid "Do not forget to mount the drive!!!\n" +msgstr "" + +#: src/dird/ua_label.c:532 +msgid "" +"The following Volumes will be labeled:\n" +"Slot Volume\n" +"==============\n" +msgstr "" + +#: src/dird/ua_label.c:541 +#, fuzzy +msgid "Do you want to label these Volumes? (yes|no): " +msgstr "" +"\n" +"Czy chcesz przywrócić wszystkie pliki? (yes|no): " + +#: src/dird/ua_label.c:562 +#, fuzzy, c-format +msgid "Media record for Slot %d Volume \"%s\" already exists.\n" +msgstr "rekord puli %s ju istnieje\n" + +#: src/dird/ua_label.c:568 +#, fuzzy, c-format +msgid "Error setting InChanger: ERR=%s" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/dird/ua_label.c:591 +#, c-format +msgid "Maximum pool Volumes=%d reached.\n" +msgstr "" + +#: src/dird/ua_label.c:598 +#, c-format +msgid "Catalog record for cleaning tape \"%s\" successfully created.\n" +msgstr "" + +#: src/dird/ua_label.c:605 +#, fuzzy, c-format +msgid "Catalog error on cleaning tape: %s" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/dird/ua_label.c:641 +#, fuzzy, c-format +msgid "Illegal character \"%c\" in a volume name.\n" +msgstr "Nieprawidłowy znak \"%c\" w komentarzu.\n" + +#: src/dird/ua_label.c:648 src/dird/ua_cmds.c:356 +#, fuzzy +msgid "Volume name too long.\n" +msgstr "Komentarz za długi.\n" + +#: src/dird/ua_label.c:688 +#, c-format +msgid "Sending relabel command from \"%s\" to \"%s\" ...\n" +msgstr "" + +#: src/dird/ua_label.c:695 +#, fuzzy, c-format +msgid "Sending label command for Volume \"%s\" Slot %d ...\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/dird/ua_label.c:736 +#, c-format +msgid "Catalog record for Volume \"%s\", Slot %d successfully created.\n" +msgstr "" + +#: src/dird/ua_label.c:749 +#, fuzzy, c-format +msgid "Label command failed for Volume %s.\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/dird/ua_label.c:759 src/dird/ua_purge.c:569 +#, fuzzy, c-format +msgid "Connecting to Storage daemon %s at %s:%d ...\n" +msgstr "Podłączenie do Storage %s:%d\n" + +#: src/dird/ua_label.c:762 src/dird/ua_purge.c:572 src/dird/ua_cmds.c:888 +#: src/dird/ua_cmds.c:1615 src/dird/job.c:443 src/dird/ua_dotcmds.c:379 +#, fuzzy +msgid "Failed to connect to Storage daemon.\n" +msgstr "Nieudane podłączenie do demona Skadowania: %s:%d\n" + +#: src/dird/ua_label.c:787 +#, fuzzy +msgid "Could not open SD socket.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/dird/ua_label.c:859 src/dird/ua_label.c:869 +#, fuzzy, c-format +msgid "Invalid Slot number: %s\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/dird/ua_label.c:878 +#, fuzzy, c-format +msgid "Invalid Volume name: %s\n" +msgstr "VolumeName : %s\n" + +#: src/dird/ua_label.c:972 +#, fuzzy, c-format +msgid "Device \"%s\" has %d slots.\n" +msgstr " Nie zapisany plik archiwum: %s\n" + +#: src/dird/ua_label.c:1018 +#, fuzzy, c-format +msgid "Pool \"%s\" resource not found for volume \"%s\"!\n" +msgstr "Nie znaleziono zasobu Zadania dla \"%s\".\n" + +#: src/dird/ua_label.c:1234 +#, fuzzy +msgid "No Volumes found, or no barcodes.\n" +msgstr "Błąd wersji Zlib" + +#: src/dird/ua_label.c:1237 +msgid "" +" Slot | Volume Name | Status | Media Type | " +"Pool |\n" +msgstr "" + +#: src/dird/ua_label.c:1238 +msgid "" +"------+------------------+-----------+----------------------" +"+--------------------|\n" +msgstr "" + +#: src/dird/backup.c:94 src/dird/migrate.c:222 src/dird/migrate.c:223 +#: src/dird/vbackup.c:90 src/dird/job.c:189 src/dird/job.c:850 +#: src/dird/job.c:1150 src/dird/job.c:1196 src/dird/job.c:1210 +#, fuzzy +msgid "Pool resource" +msgstr "Zasób Klienta" + +#: src/dird/backup.c:97 +#, fuzzy +msgid "No Storage specification found in Job or Pool.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/dird/backup.c:258 +#, fuzzy, c-format +msgid "Using BaseJobId(s): %s\n" +msgstr "%s korzysta z JobId=%s Job=%s\n" + +#: src/dird/backup.c:267 +#, fuzzy +msgid "Cannot find previous jobids.\n" +msgstr "Nie można znale atrybutu %s" + +#: src/dird/backup.c:276 +msgid "Sending Accurate information.\n" +msgstr "" + +#: src/dird/backup.c:328 +#, fuzzy, c-format +msgid "Start Backup JobId %s, Job=%s\n" +msgstr "Uruchom Wirtualny Backup JobId %s, Job=%s\n" + +#: src/dird/backup.c:518 +#, fuzzy, c-format +msgid "Unexpected Client Job message: %s\n" +msgstr "Nieoczekiwany koniec taśmy\n" + +#: src/dird/backup.c:530 +#, fuzzy, c-format +msgid "Network error with FD during %s: ERR=%s\n" +msgstr "Błąd sieci w wysyaniu do SD. ERR=%s\n" + +#: src/dird/backup.c:554 +#, fuzzy +msgid "No Job status returned from FD.\n" +msgstr "Zwrcono zy status %d z Demona Skadowania.\n" + +#: src/dird/backup.c:609 src/dird/admin.c:92 src/dird/migrate.c:1204 +#: src/dird/vbackup.c:303 +#, fuzzy, c-format +msgid "Error getting Job record for Job report: ERR=%s" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/dird/backup.c:616 src/dird/vbackup.c:310 +#, fuzzy, c-format +msgid "Error getting Client record for Job report: ERR=%s" +msgstr "Błąd aktualizacji rekordu Klienta. ERR=%s\n" + +#: src/dird/backup.c:622 src/dird/migrate.c:1234 src/dird/vbackup.c:316 +#, fuzzy, c-format +msgid "Error getting Media record for Volume \"%s\": ERR=%s" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/dird/backup.c:632 src/dird/backup.c:638 src/dird/vbackup.c:326 +#: src/stored/bscan.c:1167 +#, fuzzy +msgid "Backup OK -- with warnings" +msgstr "%s OK -- z ostrzeżeniami" + +#: src/dird/backup.c:634 src/dird/vbackup.c:328 src/stored/bscan.c:1164 +msgid "Backup OK" +msgstr "" + +#: src/dird/backup.c:642 src/dird/vbackup.c:333 src/stored/bscan.c:1171 +#, fuzzy +msgid "*** Backup Error ***" +msgstr "*** Błąd Weryfikacji ***" + +#: src/dird/backup.c:652 src/dird/vbackup.c:343 src/stored/bscan.c:1174 +#, fuzzy +msgid "Backup Canceled" +msgstr "%s Anulowano" + +#: src/dird/backup.c:662 src/dird/admin.c:112 src/dird/vbackup.c:353 +#: src/dird/restore.c:547 +#, fuzzy, c-format +msgid "Inappropriate term code: %c\n" +msgstr "Nieprawidłowy kod: %d %c\n" + +#: src/dird/backup.c:708 +#, fuzzy, c-format +msgid "" +"%s %s %s (%s): %s\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" Backup Level: %s%s\n" +" Client: \"%s\" %s\n" +" FileSet: \"%s\" %s\n" +" Pool: \"%s\" (From %s)\n" +" Catalog: \"%s\" (From %s)\n" +" Storage: \"%s\" (From %s)\n" +" Scheduled time: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Elapsed time: %s\n" +" Priority: %d\n" +" FD Files Written: %s\n" +" SD Files Written: %s\n" +" FD Bytes Written: %s (%sB)\n" +" SD Bytes Written: %s (%sB)\n" +" Rate: %.1f KB/s\n" +" Software Compression: %s\n" +"%s VSS: %s\n" +" Encryption: %s\n" +" Accurate: %s\n" +" Volume name(s): %s\n" +" Volume Session Id: %d\n" +" Volume Session Time: %d\n" +" Last Volume Bytes: %s (%sB)\n" +" Non-fatal FD errors: %d\n" +" SD Errors: %d\n" +" FD termination status: %s\n" +" SD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" +"%s %s %s (%s): %s\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" Backup Level: Virtual Full\n" +" Klient: \"%s\" %s\n" +" FileSet: \"%s\" %s\n" +" Pula: \"%s\" (From %s)\n" +" Katalog: \"%s\" (From %s)\n" +" Storage: \"%s\" (From %s)\n" +" Zaplanowany czas: %s\n" +" Czas rozpoczęcia: %s\n" +" Czas zakończenia: %s\n" +" Czas trwania: %s\n" +" Priorytet: %d\n" +" Zapisano Plików SD: %s\n" +" Zapisano Bajtów SD: %s (%sB)\n" +" Szybkość: %.1f KB/s\n" +" Nazwa Wolumenu: %s\n" +" Id Sesji Wolumenu: %d\n" +" Czas Sesji Wolumenu: %d\n" +" Ostatnie bajty Wolum.: %s (%sB)\n" +" Błędy SD: %d\n" +" Status zakończenia SD: %s\n" +" Zakończenie: %s\n" +"\n" + +#: src/dird/backup.c:766 src/dird/backup.c:767 src/dird/backup.c:768 +#: src/dird/ua_run.c:172 src/dird/ua_update.c:258 src/dird/ua_update.c:278 +#: src/dird/ua_update.c:699 src/dird/ua_input.c:123 src/tools/dbcheck.c:1292 +#: src/stored/parse_bsr.c:864 +msgid "yes" +msgstr "" + +#: src/dird/backup.c:766 src/dird/backup.c:767 src/dird/backup.c:768 +#: src/dird/ua_update.c:258 src/dird/ua_update.c:278 src/dird/ua_update.c:699 +#: src/dird/ua_input.c:127 src/stored/parse_bsr.c:864 +msgid "no" +msgstr "" + +#: src/dird/backup.c:810 +#, fuzzy, c-format +msgid "Could not get Job Volume Parameters to update Bootstrap file. ERR=%s\n" +msgstr "Nie można otworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/dird/backup.c:846 +#, fuzzy, c-format +msgid "" +"Could not open WriteBootstrap file:\n" +"%s: ERR=%s\n" +msgstr "Nie można otworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/dird/ua_run.c:146 +msgid "OK to run? (yes/mod/no): " +msgstr "" + +#: src/dird/ua_run.c:185 +#, fuzzy +msgid "Job failed.\n" +msgstr "Status zadania: Anulowane" + +#: src/dird/ua_run.c:188 +#, fuzzy, c-format +msgid "Job queued. JobId=%s\n" +msgstr "Zadanie zakolejkowane. JobId=" + +#: src/dird/ua_run.c:194 +#, fuzzy +msgid "Job not run.\n" +msgstr "Nie znaleziono zadania %s.\n" + +#: src/dird/ua_run.c:205 src/dird/ua_select.c:62 +msgid "mod" +msgstr "" + +#: src/dird/ua_run.c:208 src/dird/ua_update.c:605 +msgid "Parameters to modify:\n" +msgstr "" + +#: src/dird/ua_run.c:209 +#, fuzzy +msgid "Level" +msgstr "nigdy" + +#: src/dird/ua_run.c:210 src/dird/ua_select.c:167 src/dird/ua_cmds.c:1095 +#: src/dird/ua_dotcmds.c:478 src/wx-console/wxbrestorepanel.cpp:339 +#: src/wx-console/wxbrestorepanel.cpp:355 +#: src/wx-console/wxbrestorepanel.cpp:479 +#: src/wx-console/wxbrestorepanel.cpp:480 +#: src/wx-console/wxbrestorepanel.cpp:490 +#: src/wx-console/wxbrestorepanel.cpp:491 +#: src/wx-console/wxbrestorepanel.cpp:1173 +#: src/wx-console/wxbrestorepanel.cpp:1176 +#: src/wx-console/wxbrestorepanel.cpp:1845 +#: src/wx-console/wxbrestorepanel.cpp:1916 +#, fuzzy +msgid "Storage" +msgstr "Demon Skadowania" + +#: src/dird/ua_run.c:211 src/dird/ua_prune.c:441 src/dird/dird_conf.c:649 +#: src/dird/ua_select.c:266 src/dird/ua_select.c:289 src/dird/ua_select.c:312 +#: src/dird/ua_cmds.c:541 +msgid "Job" +msgstr "" + +#: src/dird/ua_run.c:212 src/dird/ua_select.c:190 src/dird/ua_restore.c:1247 +#, fuzzy +msgid "FileSet" +msgstr "Demon Plików" + +#: src/dird/ua_run.c:214 +#, fuzzy +msgid "Restore Client" +msgstr "Odtwarzanie Anulowane" + +#: src/dird/ua_run.c:216 src/dird/ua_select.c:337 src/dird/ua_select.c:446 +#: src/dird/ua_cmds.c:1096 src/dird/ua_dotcmds.c:479 +#: src/wx-console/wxbrestorepanel.cpp:336 +#: src/wx-console/wxbrestorepanel.cpp:354 +#: src/wx-console/wxbrestorepanel.cpp:431 +#: src/wx-console/wxbrestorepanel.cpp:432 +#: src/wx-console/wxbrestorepanel.cpp:442 +#: src/wx-console/wxbrestorepanel.cpp:443 +#: src/wx-console/wxbrestorepanel.cpp:700 +#: src/wx-console/wxbrestorepanel.cpp:1133 +#: src/wx-console/wxbrestorepanel.cpp:1136 +#: src/wx-console/wxbrestorepanel.cpp:1238 +#: src/wx-console/wxbrestorepanel.cpp:1838 +#: src/wx-console/wxbrestorepanel.cpp:1840 +#: src/wx-console/wxbrestorepanel.cpp:1914 +#: src/wx-console/wxbrestorepanel.cpp:1970 +#, fuzzy +msgid "Client" +msgstr "Klient:" + +#: src/dird/ua_run.c:218 src/wx-console/wxbrestorepanel.cpp:356 +#: src/wx-console/wxbrestorepanel.cpp:854 +#: src/wx-console/wxbrestorepanel.cpp:1918 +#, fuzzy +msgid "When" +msgstr "Kiedy:" + +#: src/dird/ua_run.c:219 src/wx-console/wxbrestorepanel.cpp:357 +#: src/wx-console/wxbrestorepanel.cpp:1128 +#: src/wx-console/wxbrestorepanel.cpp:1921 +#, fuzzy +msgid "Priority" +msgstr "Priorytet:" + +#: src/dird/ua_run.c:224 src/dird/ua_update.c:616 src/dird/ua_select.c:535 +#: src/dird/ua_select.c:633 src/wx-console/wxbrestorepanel.cpp:338 +#: src/wx-console/wxbrestorepanel.cpp:527 +#: src/wx-console/wxbrestorepanel.cpp:537 +#: src/wx-console/wxbrestorepanel.cpp:1834 +msgid "Pool" +msgstr "" + +#: src/dird/ua_run.c:226 +#, fuzzy +msgid "Verify Job" +msgstr "Weryfikacja OK" + +#: src/dird/ua_run.c:229 src/wx-console/wxbrestorepanel.cpp:349 +#: src/wx-console/wxbrestorepanel.cpp:1899 +#, fuzzy +msgid "Bootstrap" +msgstr "Bootstrap:" + +#: src/dird/ua_run.c:230 src/wx-console/wxbrestorepanel.cpp:350 +#: src/wx-console/wxbrestorepanel.cpp:1104 +#: src/wx-console/wxbrestorepanel.cpp:1901 +#, fuzzy +msgid "Where" +msgstr "Gdzie:" + +#: src/dird/ua_run.c:231 +#, fuzzy +msgid "File Relocation" +msgstr "Demon Plików" + +#: src/dird/ua_run.c:232 src/wx-console/wxbrestorepanel.cpp:352 +#: src/wx-console/wxbrestorepanel.cpp:1112 +#: src/wx-console/wxbrestorepanel.cpp:1905 +#: src/wx-console/wxbrestorepanel.cpp:1906 +#: src/wx-console/wxbrestorepanel.cpp:1907 +#: src/wx-console/wxbrestorepanel.cpp:1908 +#: src/wx-console/wxbrestorepanel.cpp:1909 +#, fuzzy +msgid "Replace" +msgstr "Zamiana:" + +#: src/dird/ua_run.c:233 +msgid "JobId" +msgstr "" + +#: src/dird/ua_run.c:236 +msgid "Plugin Options" +msgstr "" + +#: src/dird/ua_run.c:238 src/dird/ua_run.c:572 src/dird/ua_update.c:624 +#, fuzzy +msgid "Select parameter to modify" +msgstr "Wybierz rodzaj demona do zabicia" + +#: src/dird/ua_run.c:247 src/dird/ua_run.c:1354 +msgid "user selection" +msgstr "" + +#: src/dird/ua_run.c:279 +#, fuzzy +msgid "" +"Please enter desired start time as YYYY-MM-DD HH:MM:SS (return for now): " +msgstr "Wprowadź datę w formacie YYYY-MM-DD HH:MM:SS :" + +#: src/dird/ua_run.c:287 src/dird/ua_run.c:466 +msgid "Invalid time, using current time.\n" +msgstr "" + +#: src/dird/ua_run.c:294 +#, fuzzy +msgid "Enter new Priority: " +msgstr "Priorytet:" + +#: src/dird/ua_run.c:298 +msgid "Priority must be a positive integer.\n" +msgstr "" + +#: src/dird/ua_run.c:319 +#, fuzzy +msgid "Please enter the Bootstrap file name: " +msgstr "Nieudane stworzenie pliku bootstrap.\n" + +#: src/dird/ua_run.c:331 +#, fuzzy, c-format +msgid "Warning cannot open %s: ERR=%s\n" +msgstr " Nie można otworzyć %s: ERR=%s.\n" + +#: src/dird/ua_run.c:350 +msgid "Please enter path prefix for restore (/ for none): " +msgstr "" + +#: src/dird/ua_run.c:372 +#, fuzzy +msgid "Replace:\n" +msgstr "Zamiana:" + +#: src/dird/ua_run.c:376 +msgid "Select replace option" +msgstr "" + +#: src/dird/ua_run.c:387 +msgid "" +"You must set the bootstrap file to NULL to be able to specify a JobId.\n" +msgstr "" + +#: src/dird/ua_run.c:392 +msgid "Please Plugin Options string: " +msgstr "" + +#: src/dird/ua_run.c:430 src/dird/ua_run.c:439 +msgid "User input" +msgstr "" + +#: src/dird/ua_run.c:497 +#, fuzzy, c-format +msgid "Invalid replace option: %s\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/dird/ua_run.c:541 src/dird/ua_cmds.c:1238 +#, fuzzy, c-format +msgid "Level \"%s\" not valid.\n" +msgstr "Fileset \"%s\" nie znaleziony.\n" + +#: src/dird/ua_run.c:561 +#, c-format +msgid "strip_prefix=%s add_prefix=%s add_suffix=%s\n" +msgstr "" + +#: src/dird/ua_run.c:564 +msgid "This will replace your current Where value\n" +msgstr "" + +#: src/dird/ua_run.c:565 +msgid "Strip prefix" +msgstr "" + +#: src/dird/ua_run.c:566 +msgid "Add prefix" +msgstr "" + +#: src/dird/ua_run.c:567 +msgid "Add file suffix" +msgstr "" + +#: src/dird/ua_run.c:568 +msgid "Enter a regexp" +msgstr "" + +#: src/dird/ua_run.c:569 +msgid "Test filename manipulation" +msgstr "" + +#: src/dird/ua_run.c:570 +msgid "Use this ?" +msgstr "" + +#: src/dird/ua_run.c:575 +msgid "Please enter path prefix to strip: " +msgstr "" + +#: src/dird/ua_run.c:583 +msgid "Please enter path prefix to add (/ for none): " +msgstr "" + +#: src/dird/ua_run.c:594 +msgid "Please enter file suffix to add: " +msgstr "" + +#: src/dird/ua_run.c:601 +msgid "Please enter a valid regexp (!from!to!): " +msgstr "" + +#: src/dird/ua_run.c:614 +#, fuzzy, c-format +msgid "regexwhere=%s\n" +msgstr "Ze polecenie regexp where. where=%s\n" + +#: src/dird/ua_run.c:620 +#, c-format +msgid "strip_prefix=%s add_prefix=%s add_suffix=%s result=%s\n" +msgstr "" + +#: src/dird/ua_run.c:627 +msgid "Cannot use your regexp\n" +msgstr "" + +#: src/dird/ua_run.c:630 +msgid "Enter a period (.) to stop this test\n" +msgstr "" + +#: src/dird/ua_run.c:631 +#, fuzzy +msgid "Please enter filename to test: " +msgstr "Wprowadź nazwę pliku (bez ścieżki):" + +#: src/dird/ua_run.c:633 +#, c-format +msgid "%s -> %s\n" +msgstr "" + +#: src/dird/ua_run.c:677 +msgid "Cannot use your regexp.\n" +msgstr "" + +#: src/dird/ua_run.c:690 src/dird/ua_run.c:720 +msgid "Levels:\n" +msgstr "" + +#: src/dird/ua_run.c:696 +msgid "VirtualFull" +msgstr "" + +#: src/dird/ua_run.c:697 src/dird/ua_run.c:726 +#, fuzzy +msgid "Select level" +msgstr "Wybierz pozycję: " + +#: src/dird/ua_run.c:721 +#, fuzzy +msgid "Initialize Catalog" +msgstr "Inicjalizacja Katalogu" + +#: src/dird/ua_run.c:725 +#, fuzzy +msgid "Verify Volume Data (not yet implemented)" +msgstr "JobType jeszcze nie zaimplementowany\n" + +#: src/dird/ua_run.c:746 +msgid "Level not appropriate for this Job. Cannot be changed.\n" +msgstr "" + +#: src/dird/ua_run.c:760 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"FileSet: %s\n" +"Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:780 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"Level: %s\n" +"Client: %s\n" +"FileSet: %s\n" +"Pool: %s (From %s)\n" +"Storage: %s (From %s)\n" +"When: %s\n" +"Priority: %d\n" +"%s%s%s" +msgstr "" + +#: src/dird/ua_run.c:816 +#, c-format +msgid "" +"Run %s job\n" +"JobName: %s\n" +"Level: %s\n" +"Client: %s\n" +"FileSet: %s\n" +"Pool: %s (From %s)\n" +"Storage: %s (From %s)\n" +"Verify Job: %s\n" +"Verify List: %s\n" +"When: %s\n" +"Priority: %d\n" +msgstr "" + +#: src/dird/ua_run.c:845 +#, fuzzy +msgid "Please enter a JobId for restore: " +msgstr "Proszę wskaż datę odtwarzania." + +#: src/dird/ua_run.c:857 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +"RegexWhere: %s\n" +"Replace: %s\n" +"FileSet: %s\n" +"Backup Client: %s\n" +"Restore Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +"Plugin Options: %s\n" +msgstr "" + +#: src/dird/ua_run.c:884 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +"Where: %s\n" +"Replace: %s\n" +"FileSet: %s\n" +"Backup Client: %s\n" +"Restore Client: %s\n" +"Storage: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +"Plugin Options: %s\n" +msgstr "" + +#: src/dird/ua_run.c:913 +#, c-format +msgid "" +"Run Restore job\n" +"JobName: %s\n" +"Bootstrap: %s\n" +msgstr "" + +#: src/dird/ua_run.c:921 +#, fuzzy, c-format +msgid "RegexWhere: %s\n" +msgstr "Reg: %s\n" + +#: src/dird/ua_run.c:924 +#, fuzzy, c-format +msgid "Where: %s\n" +msgstr "Data zapisana : %s\n" + +#: src/dird/ua_run.c:928 +#, fuzzy, c-format +msgid "" +"Replace: %s\n" +"Client: %s\n" +"Storage: %s\n" +"JobId: %s\n" +"When: %s\n" +"Catalog: %s\n" +"Priority: %d\n" +"Plugin Options: %s\n" +msgstr "" +"JobFiles : %s\n" +"JobBytes : %s\n" +"StartBlock : %s\n" +"EndBlock : %s\n" +"StartFile : %s\n" +"EndFile : %s\n" +"JobErrors : %s\n" +"JobStatus : %c\n" + +#: src/dird/ua_run.c:950 +#, fuzzy +msgid "Run Copy job\n" +msgstr "Uruchom job" + +#: src/dird/ua_run.c:952 +#, fuzzy +msgid "Run Migration job\n" +msgstr "Nie można rurchomi zadania migracji.\n" + +#: src/dird/ua_run.c:982 +#, fuzzy, c-format +msgid "Unknown Job Type=%d\n" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/dird/ua_run.c:1046 +#, fuzzy, c-format +msgid "Value missing for keyword %s\n" +msgstr "Nieznane sowo kluczowe: %s\n" + +#: src/dird/ua_run.c:1053 +#, fuzzy +msgid "Job name specified twice.\n" +msgstr "Nie podano nazwy archiwum.\n" + +#: src/dird/ua_run.c:1061 +#, fuzzy +msgid "JobId specified twice.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/dird/ua_run.c:1070 src/dird/ua_run.c:1214 +#, fuzzy +msgid "Client specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1078 +#, fuzzy +msgid "FileSet specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1086 +#, fuzzy +msgid "Level specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1095 +#, fuzzy +msgid "Storage specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1103 +#, fuzzy +msgid "RegexWhere or Where specified twice.\n" +msgstr "Specyfikacja \"RegexWhere\" nie została autoryzowana.\n" + +#: src/dird/ua_run.c:1108 +#, fuzzy +msgid "No authorization for \"regexwhere\" specification.\n" +msgstr "Brak autoryzacji dla FileSet'u \"%s\"\n" + +#: src/dird/ua_run.c:1115 +#, fuzzy +msgid "Where or RegexWhere specified twice.\n" +msgstr "Specyfikacja \"RegexWhere\" nie została autoryzowana.\n" + +#: src/dird/ua_run.c:1120 +#, fuzzy +msgid "No authoriztion for \"where\" specification.\n" +msgstr "Brak autoryzacji dla FileSet'u \"%s\"\n" + +#: src/dird/ua_run.c:1127 +#, fuzzy +msgid "Bootstrap specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1135 +#, fuzzy +msgid "Replace specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1143 +#, fuzzy +msgid "When specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1151 +#, fuzzy +msgid "Priority specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1156 +msgid "Priority must be positive nonzero setting it to 10.\n" +msgstr "" + +#: src/dird/ua_run.c:1166 +#, fuzzy +msgid "Verify Job specified twice.\n" +msgstr "Nie skazano zadania.\n" + +#: src/dird/ua_run.c:1198 +#, fuzzy +msgid "Migration Job specified twice.\n" +msgstr "Nie skazano zadania.\n" + +#: src/dird/ua_run.c:1206 +#, fuzzy +msgid "Pool specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1222 +#, fuzzy +msgid "Restore Client specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1229 +#, fuzzy +msgid "Plugin Options not yet implemented.\n" +msgstr "JobType jeszcze nie zaimplementowany\n" + +#: src/dird/ua_run.c:1232 +#, fuzzy +msgid "Plugin Options specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1237 +#, fuzzy +msgid "No authoriztion for \"PluginOptions\" specification.\n" +msgstr "Brak autoryzacji dla Klienta \"%s\"\n" + +#: src/dird/ua_run.c:1244 +#, fuzzy +msgid "Spool flag specified twice.\n" +msgstr "określony czas uśpienia" + +#: src/dird/ua_run.c:1251 +msgid "Invalid spooldata flag.\n" +msgstr "" + +#: src/dird/ua_run.c:1275 +#, fuzzy, c-format +msgid "Invalid keyword: %s\n" +msgstr "Nieznane sowo kluczowe: %s\n" + +#: src/dird/ua_run.c:1290 +#, fuzzy, c-format +msgid "Catalog \"%s\" not found\n" +msgstr "Wtyczka: \"%s\" nie znaleziona.\n" + +#: src/dird/ua_run.c:1294 +#, fuzzy, c-format +msgid "No authorization. Catalog \"%s\".\n" +msgstr "Brak autoryzacji dla Katalogu \"%s\"\n" + +#: src/dird/ua_run.c:1305 +#, fuzzy, c-format +msgid "Job \"%s\" not found\n" +msgstr "Nie znaleziono zadania %s.\n" + +#: src/dird/ua_run.c:1312 +#, fuzzy +msgid "A job name must be specified.\n" +msgstr "Nie skazano zadania.\n" + +#: src/dird/ua_run.c:1318 +#, fuzzy, c-format +msgid "No authorization. Job \"%s\".\n" +msgstr "Brak autoryzacji dla Zadania \"%s\"\n" + +#: src/dird/ua_run.c:1326 +#, fuzzy, c-format +msgid "Pool \"%s\" not found.\n" +msgstr "Wtyczka: \"%s\" nie znaleziona.\n" + +#: src/dird/ua_run.c:1336 +#, fuzzy, c-format +msgid "No authorization. Pool \"%s\".\n" +msgstr "Brak autoryzacji dla Zadania \"%s\"\n" + +#: src/dird/ua_run.c:1351 +#, fuzzy, c-format +msgid "Storage \"%s\" not found.\n" +msgstr "Fileset \"%s\" nie znaleziony.\n" + +#: src/dird/ua_run.c:1360 src/dird/job.c:1267 +#, fuzzy +msgid "No storage specified.\n" +msgstr "Nie skazano zadania.\n" + +#: src/dird/ua_run.c:1363 +#, fuzzy, c-format +msgid "No authorization. Storage \"%s\".\n" +msgstr "Brak autoryzacji dla Katalogu \"%s\"\n" + +#: src/dird/ua_run.c:1373 src/dird/ua_cmds.c:815 src/dird/ua_cmds.c:1183 +#, fuzzy, c-format +msgid "Client \"%s\" not found.\n" +msgstr "Fileset \"%s\" nie znaleziony.\n" + +#: src/dird/ua_run.c:1383 src/dird/ua_run.c:1403 +#, fuzzy, c-format +msgid "No authorization. Client \"%s\".\n" +msgstr "Brak autoryzacji dla Klienta \"%s\"\n" + +#: src/dird/ua_run.c:1393 +#, fuzzy, c-format +msgid "Restore Client \"%s\" not found.\n" +msgstr "Fileset \"%s\" nie znaleziony.\n" + +#: src/dird/ua_run.c:1413 +#, fuzzy, c-format +msgid "FileSet \"%s\" not found.\n" +msgstr "Fileset \"%s\" nie znaleziony.\n" + +#: src/dird/ua_run.c:1422 +#, fuzzy, c-format +msgid "No authorization. FileSet \"%s\".\n" +msgstr "Brak autoryzacji dla FileSet'u \"%s\"\n" + +#: src/dird/ua_run.c:1430 +#, fuzzy, c-format +msgid "Verify Job \"%s\" not found.\n" +msgstr "Nie znaleziono zadania %s.\n" + +#: src/dird/ua_run.c:1440 +#, fuzzy, c-format +msgid "Migration Job \"%s\" not found.\n" +msgstr "Wtyczka: \"%s\" nie znaleziona.\n" + +#: src/dird/authenticate.c:113 +msgid "Director and Storage daemon passwords or names not the same.\n" +msgstr "" + +#: src/dird/authenticate.c:115 +#, fuzzy, c-format +msgid "" +"Director unable to authenticate with Storage daemon at \"%s:%d\". Possible " +"causes:\n" +"Passwords or names not the same or\n" +"Maximum Concurrent Jobs exceeded on the SD or\n" +"SD networking messed up (restart daemon).\n" +"Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 for help.\n" +msgstr "" +"Hasła demonów Dyrektora i Przechowywania nie s takie same.\n" +"Proszę zobacz http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 dla uzyskania pomocy.\n" + +#: src/dird/authenticate.c:143 +#, fuzzy, c-format +msgid "TLS negotiation failed with SD at \"%s:%d\"\n" +msgstr "Negocjacje TLS nie powiody si\n" + +#: src/dird/authenticate.c:155 +#, fuzzy, c-format +msgid "bdird Cel=%s\n" + +#: src/dird/dird_conf.c:575 src/dird/dird_conf.c:595 src/dird/dird_conf.c:609 +#: src/dird/dird_conf.c:668 src/dird/dird_conf.c:672 src/dird/dird_conf.c:676 +#: src/dird/dird_conf.c:706 src/dird/dird_conf.c:729 src/dird/dird_conf.c:733 +#: src/dird/dird_conf.c:737 src/dird/dird_conf.c:741 src/dird/dird_conf.c:745 +#: src/dird/dird_conf.c:758 src/dird/dird_conf.c:985 src/dird/dird_conf.c:992 +msgid " --> " +msgstr "" + +#: src/dird/dird_conf.c:580 +#, fuzzy, c-format +msgid "Console: name=%s SSL=%d\n" +msgstr "ConsoleFont: nazwa=%s krj czcionki=%s\n" + +#: src/dird/dird_conf.c:585 +#, c-format +msgid "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:590 +#, fuzzy, c-format +msgid "Counter: name=%s min=%d max=%d\n" +msgstr "ConsoleFont: nazwa=%s krj czcionki=%s\n" + +#: src/dird/dird_conf.c:601 +#, fuzzy, c-format +msgid "Client: name=%s address=%s FDport=%d MaxJobs=%u\n" +msgstr "Klient: nazwa=%s adres=%s FDport=%d\n" + +#: src/dird/dird_conf.c:604 +#, c-format +msgid " JobRetention=%s FileRetention=%s AutoPrune=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:617 +#, c-format +msgid "" +"Device: name=%s ok=%d num_writers=%d max_writers=%d\n" +" reserved=%d open=%d append=%d read=%d labeled=%d offline=%d autochgr=%" +"d\n" +" poolid=%s volname=%s MediaType=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:628 +#, c-format +msgid "" +"Storage: name=%s address=%s SDport=%d MaxJobs=%u\n" +" DeviceName=%s MediaType=%s StorageId=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:638 +#, c-format +msgid "" +"Catalog: name=%s address=%s DBport=%d db_name=%s\n" +" db_driver=%s db_user=%s MutliDBConn=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:648 +#, c-format +msgid "%s: name=%s JobType=%d level=%s Priority=%d Enabled=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:649 +msgid "JobDefs" +msgstr "" + +#: src/dird/dird_conf.c:653 +#, c-format +msgid "" +" MaxJobs=%u Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%" +"d\n" +msgstr "" + +#: src/dird/dird_conf.c:659 +#, c-format +msgid " SpoolSize=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:662 +#, c-format +msgid " Accurate=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:665 +#, fuzzy, c-format +msgid " SelectionType=%d\n" +msgstr "Nieznany Typ Selekcji %s.\n" + +#: src/dird/dird_conf.c:680 +#, fuzzy, c-format +msgid " --> Where=%s\n" +msgstr " --> Cel=%s\n" + +#: src/dird/dird_conf.c:683 +#, fuzzy, c-format +msgid " --> RegexWhere=%s\n" +msgstr " --> Cel=%s\n" + +#: src/dird/dird_conf.c:686 +#, fuzzy, c-format +msgid " --> Bootstrap=%s\n" +msgstr "Bootstrap:" + +#: src/dird/dird_conf.c:689 +#, fuzzy, c-format +msgid " --> WriteBootstrap=%s\n" +msgstr " --> Cel=%s\n" + +#: src/dird/dird_conf.c:692 +#, c-format +msgid " --> PluginOptions=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:695 +#, fuzzy, c-format +msgid " --> MaxRunTime=%u\n" +msgstr " --> Komenda=%s\n" + +#: src/dird/dird_conf.c:698 +#, c-format +msgid " --> MaxWaitTime=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:701 +#, c-format +msgid " --> MaxStartDelay=%u\n" +msgstr "" + +#: src/dird/dird_conf.c:713 +#, fuzzy, c-format +msgid " --> Base %s\n" +msgstr " --> Cel=%s\n" + +#: src/dird/dird_conf.c:751 +#, fuzzy, c-format +msgid " --> Run=%s\n" +msgstr " --> Komenda=%s\n" + +#: src/dird/dird_conf.c:755 +#, c-format +msgid " --> SelectionPattern=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:766 +#, fuzzy, c-format +msgid "FileSet: name=%s\n" +msgstr "ConsoleFont: nazwa=%s krj czcionki=%s\n" + +#: src/dird/dird_conf.c:857 src/dird/dird_conf.c:936 +#, fuzzy, c-format +msgid "Schedule: name=%s\n" +msgstr "ConsoleFont: nazwa=%s krj czcionki=%s\n" + +#: src/dird/dird_conf.c:862 +#, fuzzy, c-format +msgid " --> Run Level=%s\n" +msgstr " --> Komenda=%s\n" + +#: src/dird/dird_conf.c:863 +msgid " hour=" +msgstr "" + +#: src/dird/dird_conf.c:872 +msgid " mday=" +msgstr "" + +#: src/dird/dird_conf.c:881 +msgid " month=" +msgstr "" + +#: src/dird/dird_conf.c:890 +msgid " wday=" +msgstr "" + +#: src/dird/dird_conf.c:899 +msgid " wom=" +msgstr "" + +#: src/dird/dird_conf.c:908 +msgid " woy=" +msgstr "" + +#: src/dird/dird_conf.c:917 +#, fuzzy, c-format +msgid " mins=%d\n" +msgstr " Zbadanych Plików=%s\n" + +#: src/dird/dird_conf.c:919 src/dird/dird_conf.c:923 src/dird/dird_conf.c:927 +msgid " --> " +msgstr "" + +#: src/dird/dird_conf.c:941 +#, fuzzy, c-format +msgid "Pool: name=%s PoolType=%s\n" +msgstr "ConsoleFont: nazwa=%s krj czcionki=%s\n" + +#: src/dird/dird_conf.c:943 +#, c-format +msgid " use_cat=%d use_once=%d cat_files=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:946 +#, c-format +msgid " max_vols=%d auto_prune=%d VolRetention=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:949 +#, c-format +msgid " VolUse=%s recycle=%d LabelFormat=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:953 +#, c-format +msgid " CleaningPrefix=%s LabelType=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:955 +#, c-format +msgid " RecyleOldest=%d PurgeOldest=%d ActionOnPurge=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:959 +#, c-format +msgid " MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:963 +#, fuzzy, c-format +msgid " MigTime=%s MigHiBytes=%s MigLoBytes=%s\n" +msgstr " Plików=%s Bajtów=%s Bajtów/sek=%s Błędów=%d\n" + +#: src/dird/dird_conf.c:967 +#, c-format +msgid " JobRetention=%s FileRetention=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:971 +#, c-format +msgid " NextPool=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:974 +#, c-format +msgid " RecyclePool=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:977 +#, c-format +msgid " ScratchPool=%s\n" +msgstr "" + +#: src/dird/dird_conf.c:980 +#, fuzzy, c-format +msgid " Catalog=%s\n" +msgstr "Używam Katalogu \"%s\"\n" + +#: src/dird/dird_conf.c:1000 +#, fuzzy, c-format +msgid "Messages: name=%s\n" +msgstr "ConsoleFont: nazwa=%s krj czcionki=%s\n" + +#: src/dird/dird_conf.c:1002 +#, fuzzy, c-format +msgid " mailcmd=%s\n" +msgstr " Zbadanych Plików=%s\n" + +#: src/dird/dird_conf.c:1004 +#, fuzzy, c-format +msgid " opcmd=%s\n" +msgstr " --> Komenda=%s\n" + +#: src/dird/dird_conf.c:1417 +#, fuzzy, c-format +msgid "Cannot find Pool resource %s\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/dird/dird_conf.c:1428 +#, fuzzy, c-format +msgid "Cannot find Console resource %s\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/dird/dird_conf.c:1441 src/stored/stored_conf.c:613 +#, fuzzy, c-format +msgid "Cannot find Storage resource %s\n" +msgstr "Nie można znale zasobu Director %s\n" + +#: src/dird/dird_conf.c:1450 +#, fuzzy, c-format +msgid "Cannot find Job resource %s\n" +msgstr "Nie można znale zasobu Director %s\n" + +#: src/dird/dird_conf.c:1500 +#, fuzzy, c-format +msgid "Cannot find Counter resource %s\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/dird/dird_conf.c:1521 +#, fuzzy, c-format +msgid "Cannot find Schedule resource %s\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/dird/dird_conf.c:1601 +#, fuzzy, c-format +msgid "Name item is required in %s resource, but not found.\n" +msgstr "Element %s jest wymagany w zasobie %s, lecz nie został znaleziony.\n" + +#: src/dird/dird_conf.c:1614 +#, c-format +msgid "Inserting %s res: %s index=%d pass=%d\n" +msgstr "" + +#: src/dird/dird_conf.c:1627 +#, fuzzy, c-format +msgid "Expected one of: %s, got: %s" +msgstr "Oczekiwano ciągu znaków fstype, otrzymano: %s\n" + +#: src/dird/dird_conf.c:1700 +#, fuzzy, c-format +msgid "Expected a Migration Job Type keyword, got: %s" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/dird/dird_conf.c:1726 +#, fuzzy, c-format +msgid "Expected a Job Type keyword, got: %s" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/dird/dird_conf.c:1750 +#, fuzzy, c-format +msgid "Expected a Job Level keyword, got: %s" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/dird/dird_conf.c:1770 +#, fuzzy, c-format +msgid "Expected a Restore replacement option, got: %s" +msgstr "Oczekiwano ciągu znaków fstype, otrzymano: %s\n" + +#: src/dird/dird_conf.c:1976 src/dird/inc_conf.c:726 +#, fuzzy, c-format +msgid "Expecting open brace. Got %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/dird/dird_conf.c:1990 src/dird/inc_conf.c:392 src/dird/inc_conf.c:741 +#, fuzzy, c-format +msgid "Expecting keyword, got: %s\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/dird/dird_conf.c:2007 src/dird/inc_conf.c:410 src/dird/inc_conf.c:756 +#, c-format +msgid "Keyword %s not permitted in this resource" +msgstr "" + +#: src/dird/ua_tree.c:75 +msgid "add dir/file to be restored recursively, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:76 +#, fuzzy +msgid "change current directory" +msgstr "Nie można pobra aktualnego katalogu: ERR=%s\n" + +#: src/dird/ua_tree.c:77 +msgid "count marked files in and below the cd" +msgstr "" + +#: src/dird/ua_tree.c:78 +msgid "delete dir/file to be restored recursively in dir" +msgstr "" + +#: src/dird/ua_tree.c:79 src/dird/ua_tree.c:80 +msgid "long list current directory, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:81 +msgid "leave file selection mode" +msgstr "" + +#: src/dird/ua_tree.c:82 +#, fuzzy +msgid "estimate restore size" +msgstr "Anulowane" + +#: src/dird/ua_tree.c:83 +#, fuzzy +msgid "same as done command" +msgstr "Za komenda session: %s" + +#: src/dird/ua_tree.c:84 +msgid "find files, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:85 src/dird/ua_tree.c:98 src/dird/ua_tree.c:99 +msgid "print help" +msgstr "" + +#: src/dird/ua_tree.c:86 src/dird/ua_tree.c:87 +msgid "list current directory, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:88 +msgid "list subdir in current directory, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:89 +msgid "list the marked files in and below the cd" +msgstr "" + +#: src/dird/ua_tree.c:90 +msgid "list the marked files in" +msgstr "" + +#: src/dird/ua_tree.c:91 +msgid "mark dir/file to be restored recursively, wildcards allowed" +msgstr "" + +#: src/dird/ua_tree.c:92 +msgid "mark directory name to be restored (no files)" +msgstr "" + +#: src/dird/ua_tree.c:93 src/dird/ua_tree.c:94 +#, fuzzy +msgid "print current working directory" +msgstr "wyświetl aktualny czas" + +#: src/dird/ua_tree.c:95 +msgid "unmark dir/file to be restored recursively in dir" +msgstr "" + +#: src/dird/ua_tree.c:96 +msgid "unmark directory name only no recursion" +msgstr "" + +#: src/dird/ua_tree.c:97 +msgid "quit and do not do restore" +msgstr "" + +#: src/dird/ua_tree.c:119 +msgid "" +"\n" +"You are now entering file selection mode where you add (mark) and\n" +"remove (unmark) files to be restored. No files are initially added, unless\n" +"you used the \"all\" keyword on the command line.\n" +"Enter \"done\" to leave this mode.\n" +"\n" +msgstr "" + +#: src/dird/ua_tree.c:130 src/dird/ua_tree.c:790 +#, fuzzy, c-format +msgid "cwd is: %s\n" +msgstr " Przetworzonych plikw: %s\n" + +#: src/dird/ua_tree.c:139 src/dird/ua_tree.c:154 +#, c-format +msgid "Invalid command \"%s\". Enter \"done\" to exit.\n" +msgstr "" + +#: src/dird/ua_tree.c:343 src/dird/ua_tree.c:355 src/dird/ua_tree.c:372 +#, fuzzy +msgid "No files marked.\n" +msgstr "Znacznik konca pliku.\n" + +#: src/dird/ua_tree.c:357 +#, fuzzy +msgid "1 file marked.\n" +msgstr "Znacznik konca pliku.\n" + +#: src/dird/ua_tree.c:359 +#, fuzzy, c-format +msgid "%s files marked.\n" +msgstr "%u przywróconych plików.\n" + +#: src/dird/ua_tree.c:387 +msgid "No directories marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:389 +#, fuzzy +msgid "1 directory marked.\n" +msgstr "Wprowadź nazwę katalogu: " + +#: src/dird/ua_tree.c:391 +#, c-format +msgid "%s directories marked.\n" +msgstr "" + +#: src/dird/ua_tree.c:412 +#, fuzzy, c-format +msgid "%s total files/dirs. %s marked to be restored.\n" +msgstr "" +"\n" +"Do przywrócenia wybrano %s plików.\n" +"\n" + +#: src/dird/ua_tree.c:423 +#, fuzzy +msgid "No file specification given.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/dird/ua_tree.c:634 +#, fuzzy, c-format +msgid "Node %s has no children.\n" +msgstr "Wolumen do Katalogu" + +#: src/dird/ua_tree.c:727 +#, fuzzy, c-format +msgid "%d total files; %d marked to be restored; %s bytes.\n" +msgstr "Nieznany typ pliku %d; nie odtworzono: %s\n" + +#: src/dird/ua_tree.c:738 src/stored/btape.c:2936 +#, fuzzy, c-format +msgid "" +" Command Description\n" +" ======= ===========\n" +msgstr "" +" Komenda Opis\n" +" ======= ===========\n" + +#: src/dird/ua_tree.c:761 +#, fuzzy +msgid "Too few or too many arguments. Try using double quotes.\n" +msgstr "Zbyt wiele argumentów. Otocz polecenie podwójnym cudzysłowem.\n" + +#: src/dird/ua_tree.c:773 +#, fuzzy +msgid "Invalid path given.\n" +msgstr "Nieprawidłowy JobId na liście.\n" + +#: src/dird/ua_tree.c:809 src/dird/ua_tree.c:821 +#, fuzzy +msgid "No files unmarked.\n" +msgstr "Znacznik konca pliku.\n" + +#: src/dird/ua_tree.c:823 +#, fuzzy +msgid "1 file unmarked.\n" +msgstr "Znacznik konca pliku.\n" + +#: src/dird/ua_tree.c:826 +#, fuzzy, c-format +msgid "%s files unmarked.\n" +msgstr "%u przywróconych plików.\n" + +#: src/dird/ua_tree.c:837 src/dird/ua_tree.c:854 +msgid "No directories unmarked.\n" +msgstr "" + +#: src/dird/ua_tree.c:856 +#, fuzzy +msgid "1 directory unmarked.\n" +msgstr "Wprowadź nazwę katalogu: " + +#: src/dird/ua_tree.c:858 +#, c-format +msgid "%d directories unmarked.\n" +msgstr "" + +#: src/dird/ua_select.c:54 +#, c-format +msgid "The current %s retention period is: %s\n" +msgstr "" + +#: src/dird/ua_select.c:59 +msgid "Continue? (yes/mod/no): " +msgstr "" + +#: src/dird/ua_select.c:63 +#, fuzzy +msgid "Enter new retention period: " +msgstr "Wprowadź długość do odczytu: " + +#: src/dird/ua_select.c:67 +#, fuzzy +msgid "Invalid period.\n" +msgstr "Nieprawidłowy JobId na liście.\n" + +#: src/dird/ua_select.c:143 +#, fuzzy +msgid "You have the following choices:\n" +msgstr "Możesz wybrać identyfikatory JobId z nastepującej listy:\n" + +#: src/dird/ua_select.c:159 +#, fuzzy +msgid "The defined Storage resources are:\n" +msgstr "Dozwolony tylko jeden zasób Client w %s\n" + +#: src/dird/ua_select.c:167 +#, fuzzy +msgid "Select Storage resource" +msgstr "Nie można otrzyma zasobu storage '%s'.\n" + +#: src/dird/ua_select.c:182 src/dird/ua_restore.c:1243 +#, fuzzy +msgid "The defined FileSet resources are:\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/dird/ua_select.c:190 src/dird/ua_restore.c:1247 +#, fuzzy +msgid "Select FileSet resource" +msgstr "Zasób Klienta" + +#: src/dird/ua_select.c:220 src/dird/ua_cmds.c:2067 +#, fuzzy +msgid "Could not find a Catalog resource\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/dird/ua_select.c:223 +msgid "You must specify a \"use \" command before continuing.\n" +msgstr "" + +#: src/dird/ua_select.c:229 +msgid "The defined Catalog resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:237 +#, fuzzy +msgid "Catalog" +msgstr "Inicjalizacja Katalogu" + +#: src/dird/ua_select.c:237 +#, fuzzy +msgid "Select Catalog resource" +msgstr "Zasób Klienta" + +#: src/dird/ua_select.c:255 src/dird/ua_select.c:281 +#, fuzzy +msgid "The defined Job resources are:\n" +msgstr "Zbyt duo elementw w zasobie Zadania\n" + +#: src/dird/ua_select.c:266 src/dird/ua_select.c:289 +#, fuzzy +msgid "Select Job resource" +msgstr "Zasób Klienta" + +#: src/dird/ua_select.c:304 +#, fuzzy +msgid "The defined Restore Job resources are:\n" +msgstr "Usuwanie %d wpisów zadań Przywrócenia.\n" + +#: src/dird/ua_select.c:312 +#, fuzzy +msgid "Select Restore Job" +msgstr "Uruchom Zadanie Przywracania %s\n" + +#: src/dird/ua_select.c:329 +#, fuzzy +msgid "The defined Client resources are:\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/dird/ua_select.c:337 +msgid "Select Client (File daemon) resource" +msgstr "" + +#: src/dird/ua_select.c:364 +#, fuzzy, c-format +msgid "Error: Client resource %s does not exist.\n" +msgstr "Zasób Puli \"%s\" nie znaleziony.\n" + +#: src/dird/ua_select.c:389 +#, fuzzy, c-format +msgid "Could not find Client %s: ERR=%s" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/dird/ua_select.c:399 src/dird/ua_select.c:453 +#, fuzzy, c-format +msgid "Could not find Client \"%s\": ERR=%s" +msgstr " Nie można poda za dowiązaniem \"%s\": ERR=%s\n" + +#: src/dird/ua_select.c:428 +#, fuzzy, c-format +msgid "Error obtaining client ids. ERR=%s\n" +msgstr "Błąd aktualizacji rekordu Klienta. ERR=%s\n" + +#: src/dird/ua_select.c:432 +msgid "No clients defined. You must run a job before using this command.\n" +msgstr "" + +#: src/dird/ua_select.c:436 +msgid "Defined Clients:\n" +msgstr "" + +#: src/dird/ua_select.c:446 +#, fuzzy +msgid "Select the Client" +msgstr "Wybierz pozycję: " + +#: src/dird/ua_select.c:479 src/dird/ua_select.c:503 src/dird/ua_select.c:548 +#, fuzzy, c-format +msgid "Could not find Pool \"%s\": ERR=%s" +msgstr " Nie można poda za dowiązaniem \"%s\": ERR=%s\n" + +#: src/dird/ua_select.c:518 +msgid "No pools defined. Use the \"create\" command to create one.\n" +msgstr "" + +#: src/dird/ua_select.c:522 +msgid "Defined Pools:\n" +msgstr "Zdefiniowane Pule:\n" + +#: src/dird/ua_select.c:535 +msgid "Select the Pool" +msgstr "Wybierz Pulę" + +#: src/dird/ua_select.c:573 +#, c-format +msgid "No access to Pool \"%s\"\n" +msgstr "Brak dostępu do Puli \"%s\"\n" + +#: src/dird/ua_select.c:599 +msgid "Enter *MediaId or Volume name: " +msgstr "Wprowadź *MediaId lub nazwę Wolumenu: " + +#: src/dird/ua_select.c:625 +msgid "The defined Pool resources are:\n" +msgstr "" + +#: src/dird/ua_select.c:633 +msgid "Select Pool resource" +msgstr "Wybierz zasób Puli" + +#: src/dird/ua_select.c:657 src/dird/ua_restore.c:537 +#, fuzzy, c-format +msgid "Error: Pool resource \"%s\" does not exist.\n" +msgstr "Zasób Puli \"%s\" nie znaleziony.\n" + +#: src/dird/ua_select.c:668 +#, fuzzy +msgid "Enter the JobId to select: " +msgstr "Wprowadź JobId do usunięcia: " + +#: src/dird/ua_select.c:706 +#, fuzzy, c-format +msgid "Could not find Job \"%s\": ERR=%s" +msgstr "Nie można zainicjować kolejki zada: ERR=%s\n" + +#: src/dird/ua_select.c:786 +#, c-format +msgid "Automatically selected %s: %s\n" +msgstr "" + +#: src/dird/ua_select.c:797 +#, c-format +msgid "" +"Your request has multiple choices for \"%s\". Selection is not possible in " +"batch mode.\n" +msgstr "" + +#: src/dird/ua_select.c:815 +#, c-format +msgid "Selection list for \"%s\" is empty!\n" +msgstr "" + +#: src/dird/ua_select.c:821 +#, c-format +msgid "Automatically selected: %s\n" +msgstr "" + +#: src/dird/ua_select.c:833 +#, fuzzy +msgid "Selection aborted, nothing done.\n" +msgstr "Przywracanie niewykonane.\n" + +#: src/dird/ua_select.c:838 +#, fuzzy, c-format +msgid "Please enter a number between 1 and %d\n" +msgstr "Musisz wybrać liczbę między 1 a %d\n" + +#: src/dird/ua_select.c:887 +msgid "Storage name given twice.\n" +msgstr "" + +#: src/dird/ua_select.c:904 +#, fuzzy, c-format +msgid "Expecting jobid=nn command, got: %s\n" +msgstr "Oczekiwano ciągu znaków wild-card, otrzymano: %s\n" + +#: src/dird/ua_select.c:908 +#, fuzzy, c-format +msgid "JobId %s is not running.\n" +msgstr "JobId %d Zadanie %s jest uruchomione.\n" + +#: src/dird/ua_select.c:918 +#, fuzzy, c-format +msgid "Expecting job=xxx, got: %s.\n" +msgstr "Oczekiwano ciągu znaków regex, otrzymano: %s\n" + +#: src/dird/ua_select.c:922 src/dird/ua_select.c:934 +#, fuzzy, c-format +msgid "Job \"%s\" is not running.\n" +msgstr "JobId %d Zadanie %s jest uruchomione.\n" + +#: src/dird/ua_select.c:930 +#, c-format +msgid "Expecting ujobid=xxx, got: %s.\n" +msgstr "" + +#: src/dird/ua_select.c:950 +#, fuzzy, c-format +msgid "Storage resource \"%s\": not found\n" +msgstr "Zasób Puli \"%s\" nie znaleziony.\n" + +#: src/dird/ua_select.c:982 +#, fuzzy +msgid "Enter autochanger drive[0]: " +msgstr "przetestuj bibliotekę taśmową" + +#: src/dird/ua_select.c:1003 +#, fuzzy +msgid "Enter autochanger slot: " +msgstr "przetestuj bibliotekę taśmową" + +#: src/dird/ua_select.c:1033 +#, fuzzy +msgid "Media Types defined in conf file:\n" +msgstr "Brak definicji zasobu Dyrektora w %s\n" + +#: src/dird/ua_select.c:1039 +#, fuzzy +msgid "Media Type" +msgstr " MediaType : %s\n" + +#: src/dird/ua_select.c:1039 +msgid "Select the Media Type" +msgstr "" + +#: src/dird/ua_status.c:175 +msgid "Status available for:\n" +msgstr "" + +#: src/dird/ua_status.c:181 +#, fuzzy +msgid "Select daemon type for status" +msgstr "Wybierz rodzaj demona do zabicia" + +#: src/dird/ua_status.c:295 src/stored/status.c:219 +#, fuzzy, c-format +msgid "%s Version: %s (%s) %s %s %s\n" +msgstr "%s Wersja: %s (%s) %s %s %s %s\n" + +#: src/dird/ua_status.c:299 +#, fuzzy, c-format +msgid "Daemon started %s, 1 Job run since started.\n" +msgstr "Demon wystartowany %s, %d Zada %s uruchomionych od wystartowania.\n" + +#: src/dird/ua_status.c:302 +#, fuzzy, c-format +msgid "Daemon started %s, %d Jobs run since started.\n" +msgstr "Demon wystartowany %s, %d Zada %s uruchomionych od wystartowania.\n" + +#: src/dird/ua_status.c:357 src/dird/ua_cmds.c:882 src/dird/ua_cmds.c:1604 +#: src/dird/job.c:151 src/dird/job.c:157 src/dird/job.c:1184 +#: src/dird/job.c:1188 src/dird/ua_dotcmds.c:373 +#, fuzzy +msgid "unknown source" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/dird/ua_status.c:360 src/dird/ua_cmds.c:885 src/dird/ua_dotcmds.c:376 +#, fuzzy, c-format +msgid "Connecting to Storage daemon %s at %s:%d\n" +msgstr "Podłączenie do Storage %s:%d\n" + +#: src/dird/ua_status.c:363 +#, fuzzy, c-format +msgid "" +"\n" +"Failed to connect to Storage daemon %s.\n" +"====\n" +msgstr "Nieudane podłączenie do demona Skadowania: %s:%d\n" + +#: src/dird/ua_status.c:371 src/dird/ua_cmds.c:891 src/dird/ua_dotcmds.c:382 +#, fuzzy +msgid "Connected to storage daemon\n" +msgstr "Nie można skontaktowa się z demonem Skadowania\n" + +#: src/dird/ua_status.c:402 src/dird/ua_cmds.c:911 src/dird/ua_cmds.c:1301 +#: src/dird/ua_dotcmds.c:402 +#, fuzzy, c-format +msgid "Connecting to Client %s at %s:%d\n" +msgstr "Podłączenie do Klienta %s:%d\n" + +#: src/dird/ua_status.c:405 +#, fuzzy, c-format +msgid "" +"Failed to connect to Client %s.\n" +"====\n" +msgstr "Nieudane podłączenie do demona Plików.\n" + +#: src/dird/ua_status.c:413 +#, fuzzy +msgid "Connected to file daemon\n" +msgstr "Nieudane podłączenie do demona Plików.\n" + +#: src/dird/ua_status.c:433 +#, fuzzy +msgid "" +"\n" +"Scheduled Jobs:\n" +msgstr "" +"\n" +"Zakoczonych Zada:\n" + +#: src/dird/ua_status.c:434 +msgid "" +"Level Type Pri Scheduled Name Volume\n" +msgstr "" + +#: src/dird/ua_status.c:435 +#, fuzzy +msgid "===================================================================================\n" +msgstr "======================================================================\n" + +#: src/dird/ua_status.c:492 +#, fuzzy, c-format +msgid "%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n" +msgstr "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" + +#: src/dird/ua_status.c:496 +#, fuzzy, c-format +msgid "%-14s %-8s %3d %-18s %-18s %s\n" +msgstr "%6d %-6s %8s %10s %-7s %-8s %s\n" + +#: src/dird/ua_status.c:550 +msgid "Ignoring invalid value for days. Max is 500.\n" +msgstr "" + +#: src/dird/ua_status.c:593 +msgid "No Scheduled Jobs.\n" +msgstr "" + +#: src/dird/ua_status.c:618 +#, fuzzy, c-format +msgid "Console connected at %s\n" +msgstr "Dyrektor podłączony o: %s\n" + +#: src/dird/ua_status.c:628 +#, fuzzy +msgid "" +"No Jobs running.\n" +"====\n" +msgstr "adne Zadanie nie uruchomione.\n" + +#: src/dird/ua_status.c:634 +#, fuzzy +msgid " JobId Level Name Status\n" +msgstr " JobId Poziom Pliki Bajty Status Zakoczone Nazwa \n" + +#: src/dird/ua_status.c:644 +msgid "is waiting execution" +msgstr "" + +#: src/dird/ua_status.c:647 +#, fuzzy +msgid "is running" +msgstr "adne Zadanie nie uruchomione.\n" + +#: src/dird/ua_status.c:650 +#, fuzzy +msgid "is blocked" +msgstr "Nieudane odszyfrowanie klucza sesji.\n" + +#: src/dird/ua_status.c:653 +#, fuzzy +msgid "has terminated" +msgstr "Status zadania: Zakoczone" + +#: src/dird/ua_status.c:656 +#, fuzzy +msgid "has terminated with warnings" +msgstr "Przywracanie OK -- z ostrzeżeniami" + +#: src/dird/ua_status.c:659 +#, fuzzy +msgid "has erred" +msgstr "były" + +#: src/dird/ua_status.c:662 +#, fuzzy +msgid "has errors" +msgstr " (%d bdw)" + +#: src/dird/ua_status.c:665 +#, fuzzy +msgid "has a fatal error" +msgstr "Status zadania: Błąd krytyczny" + +#: src/dird/ua_status.c:668 +#, fuzzy +msgid "has verify differences" +msgstr "Rnice przy weryfikacji" + +#: src/dird/ua_status.c:671 +#, fuzzy +msgid "has been canceled" +msgstr "Status zadania: Anulowane" + +#: src/dird/ua_status.c:676 +#, fuzzy +msgid "is waiting on Client" +msgstr "Status zadania: Oczekiwanie na zasoby Klienta" + +#: src/dird/ua_status.c:678 +#, fuzzy, c-format +msgid "is waiting on Client %s" +msgstr "Rozłączanie od Klienta %s:%d\n" + +#: src/dird/ua_status.c:686 src/dird/ua_status.c:688 +#, fuzzy, c-format +msgid "is waiting on Storage %s" +msgstr "Rozłączanie od Storage %s:%d\n" + +#: src/dird/ua_status.c:690 +#, fuzzy +msgid "is waiting on Storage" +msgstr "Status zadania: Oczekiwanie na demon Przechowywania" + +#: src/dird/ua_status.c:696 +#, fuzzy +msgid "is waiting on max Storage jobs" +msgstr "Status zadania: Oczekiwanie na demon Przechowywania" + +#: src/dird/ua_status.c:699 +#, fuzzy +msgid "is waiting on max Client jobs" +msgstr "Status zadania: Oczekiwanie ze wzgldu na maksymaln ilo zada" + +#: src/dird/ua_status.c:702 +#, fuzzy +msgid "is waiting on max Job jobs" +msgstr "Status zadania: Oczekiwanie ze wzgldu na maksymaln ilo zada" + +#: src/dird/ua_status.c:705 +#, fuzzy +msgid "is waiting on max total jobs" +msgstr "Status zadania: Oczekiwanie ze wzgldu na maksymaln ilo zada" + +#: src/dird/ua_status.c:708 +#, fuzzy +msgid "is waiting for its start time" +msgstr "Status zadania: Oczekiwanie na czas startu" + +#: src/dird/ua_status.c:711 +#, fuzzy +msgid "is waiting for higher priority jobs to finish" +msgstr "Status zadania: Oczekiwanie na zakoczenie zada o wyszym priorytecie" + +#: src/dird/ua_status.c:728 +#, fuzzy, c-format +msgid "is in unknown state %c" +msgstr "Nieznany status zadania %c." + +#: src/dird/ua_status.c:742 +#, fuzzy +msgid "is waiting for a mount request" +msgstr "Status zadania: Oczekiwanie na zasoby Klienta" + +#: src/dird/ua_status.c:749 +msgid "is waiting for an appendable Volume" +msgstr "" + +#: src/dird/ua_status.c:757 +#, fuzzy +msgid "is waiting for Client to connect to Storage daemon" +msgstr "Nieudane podłączenie do demona Skadowania: %s:%d\n" + +#: src/dird/ua_status.c:759 +#, fuzzy, c-format +msgid "is waiting for Client %s to connect to Storage %s" +msgstr "Status zadania: Oczekiwanie na zasoby Klienta" + +#: src/dird/ua_status.c:790 +#, fuzzy, c-format +msgid "%6d\t%-6s\t%-20s\t%s\t%s\n" +msgstr "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" + +#: src/dird/ua_status.c:794 +#, fuzzy, c-format +msgid "%6d %-6s %-20s %s\n" +msgstr "%6d %-6s %8s %10s %-7s %-8s %s\n" + +#: src/dird/ua_status.c:798 +#, fuzzy, c-format +msgid " %-30s\n" +msgstr "Job : %s\n" + +#: src/dird/ua_status.c:818 +#, fuzzy +msgid "No Terminated Jobs.\n" +msgstr "" +"\n" +"Zakoczonych Zada:\n" + +#: src/dird/ua_status.c:826 +#, fuzzy +msgid "====================================================================\n" +msgstr "======================================================================\n" + +#: src/dird/ua_status.c:898 +msgid "\n" +msgstr "" + +#: src/dird/ua_output.c:69 src/dird/ua_output.c:93 +msgid "ON or OFF keyword missing.\n" +msgstr "" + +#: src/dird/ua_output.c:113 +#, fuzzy +msgid "Disabled Jobs:\n" +msgstr "" +"\n" +"Zakoczonych Zada:\n" + +#: src/dird/ua_output.c:119 +msgid "No disabled Jobs.\n" +msgstr "" + +#: src/dird/ua_output.c:163 +msgid "disabled" +msgstr "" + +#: src/dird/ua_output.c:208 +msgid "Keywords for the show command are:\n" +msgstr "" + +#: src/dird/ua_output.c:214 +#, fuzzy, c-format +msgid "%s resource %s not found.\n" +msgstr "Zasób Puli \"%s\" nie znaleziony.\n" + +#: src/dird/ua_output.c:217 +#, fuzzy, c-format +msgid "Resource %s not found\n" +msgstr "Zasób Puli \"%s\" nie znaleziony.\n" + +#: src/dird/ua_output.c:288 +#, fuzzy +msgid "Hey! DB is NULL\n" +msgstr "BSR is NULL\n" + +#: src/dird/ua_output.c:444 +#, c-format +msgid "Jobid %d used %d Volume(s): %s\n" +msgstr "" + +#: src/dird/ua_output.c:462 +#, fuzzy +msgid "No Pool specified.\n" +msgstr "Nie skazano zadania.\n" + +#: src/dird/ua_output.c:483 +#, fuzzy, c-format +msgid "Pool: %s\n" +msgstr "Za komenda level: %s\n" + +#: src/dird/ua_output.c:499 +msgid "Ignoring invalid value for days. Max is 50.\n" +msgstr "" + +#: src/dird/ua_output.c:521 +#, fuzzy, c-format +msgid "Unknown list keyword: %s\n" +msgstr "Nieznane sowo kluczowe: %s\n" + +#: src/dird/ua_output.c:547 +#, fuzzy, c-format +msgid "%s is not a job name.\n" +msgstr "%c: nie jest poprawnym napędem.\n" + +#: src/dird/ua_output.c:561 +#, fuzzy, c-format +msgid "Could not find Pool for Job %s\n" +msgstr "Nie można ustawi pola Finder Info na %s\n" + +#: src/dird/ua_output.c:574 +#, fuzzy, c-format +msgid "Could not find next Volume for Job %s (Pool=%s, Level=%s).\n" +msgstr "Nie można otrzymać rekordu zadania dla JobId=%s do %s. ERR=%s" + +#: src/dird/ua_output.c:578 +#, c-format +msgid "" +"The next Volume to be used by Job \"%s\" (Pool=%s, Level=%s) will be %s\n" +msgstr "" + +#: src/dird/ua_output.c:589 +#, fuzzy, c-format +msgid "Could not find next Volume for Job %s.\n" +msgstr "Nie można ustawi pola Finder Info na %s\n" + +#: src/dird/ua_output.c:715 src/dird/job.c:133 +#, fuzzy, c-format +msgid "Could not open database \"%s\".\n" +msgstr "Nie można otworzyć bazy danych katalogu \"%s\".\n" + +#: src/dird/ua_output.c:728 +#, fuzzy, c-format +msgid "Pool %s not in database. %s" +msgstr "Pula \"%s\" nie istnieje w bazie danych. ERR=%s" + +#: src/dird/ua_output.c:736 +#, fuzzy, c-format +msgid "Pool %s created in database.\n" +msgstr "Pula \"%s\" nie istnieje w bazie danych. ERR=%s" + +#: src/dird/ua_output.c:789 +#, fuzzy +msgid "You have no messages.\n" +msgstr "Otrzymaes komunikaty.\n" + +#: src/dird/ua_output.c:869 +msgid "Message too long to display.\n" +msgstr "" + +#: src/dird/msgchan.c:204 +#, fuzzy, c-format +msgid "Storage daemon rejected Job command: %s\n" +msgstr "Demon Przechowywania odrzuci komend Hello\n" + +#: src/dird/msgchan.c:212 +#, fuzzy, c-format +msgid "fixed name. Max=%d: " +msgstr "" + +#: src/dird/ua_cmds.c:335 +#, fuzzy, c-format +msgid "The number must be between 0 and %d\n" +msgstr "Musisz wybrać liczbę między 1 a %d\n" + +#: src/dird/ua_cmds.c:343 +#, fuzzy +msgid "Enter Volume name: " +msgstr "Wprowadź nazwę wolumenu: " + +#: src/dird/ua_cmds.c:347 +#, fuzzy +msgid "Enter base volume name: " +msgstr "Wprowadź nazwę wolumenu: " + +#: src/dird/ua_cmds.c:371 +#, fuzzy +msgid "Enter the starting number: " +msgstr "Błąd skanowania nagwna rekordu: %s\n" + +#: src/dird/ua_cmds.c:376 +msgid "Start number must be greater than zero.\n" +msgstr "" + +#: src/dird/ua_cmds.c:387 +#, fuzzy +msgid "Enter slot (0 for none): " +msgstr "Anulowane" + +#: src/dird/ua_cmds.c:391 +msgid "InChanger? yes/no: " +msgstr "" + +#: src/dird/ua_cmds.c:419 +#, fuzzy, c-format +msgid "%d Volumes created in pool %s\n" +msgstr "Nie znaleziono zasobu Zadania dla \"%s\".\n" + +#: src/dird/ua_cmds.c:435 src/dird/ua_cmds.c:1132 +msgid "Turn on or off? " +msgstr "" + +#: src/dird/ua_cmds.c:466 +#, c-format +msgid "JobId %s is not running. Use Job name to cancel inactive jobs.\n" +msgstr "" + +#: src/dird/ua_cmds.c:475 src/dird/ua_cmds.c:485 +#, c-format +msgid "Warning Job %s is not running. Continuing anyway ...\n" +msgstr "" + +#: src/dird/ua_cmds.c:495 src/dird/ua_cmds.c:808 src/dird/ua_cmds.c:854 +msgid "Unauthorized command from this console.\n" +msgstr "" + +#: src/dird/ua_cmds.c:522 +#, fuzzy +msgid "None of your jobs are running.\n" +msgstr "adne Zadanie nie uruchomione.\n" + +#: src/dird/ua_cmds.c:527 +#, fuzzy +msgid "Select Job:\n" +msgstr "Wybierz pozycję: " + +#: src/dird/ua_cmds.c:536 +#, fuzzy, c-format +msgid "JobId=%s Job=%s" +msgstr "%s korzysta z JobId=%s Job=%s\n" + +#: src/dird/ua_cmds.c:541 +msgid "Choose Job to cancel" +msgstr "" + +#: src/dird/ua_cmds.c:546 +#, fuzzy, c-format +msgid "" +"Cancel: %s\n" +"\n" +"%s" +msgstr "Anulowane" + +#: src/dird/ua_cmds.c:547 +msgid "Confirm cancel?" +msgstr "" + +#: src/dird/ua_cmds.c:553 +msgid "Confirm cancel (yes/no): " +msgstr "" + +#: src/dird/ua_cmds.c:561 src/dird/ua_cmds.c:849 src/dird/ua_cmds.c:1200 +#, fuzzy, c-format +msgid "Job \"%s\" not found.\n" +msgstr "Nie znaleziono zadania %s.\n" + +#: src/dird/ua_cmds.c:659 +#, c-format +msgid "" +"Can't set %s RecyclePool to %s, %s is not in database.\n" +"Try to update it with 'update pool=%s'\n" +msgstr "" + +#: src/dird/ua_cmds.c:677 +#, c-format +msgid "" +"Can't set %s ScratchPool to %s, %s is not in database.\n" +"Try to update it with 'update pool=%s'\n" +msgstr "" + +#: src/dird/ua_cmds.c:746 +#, c-format +msgid "" +"Error: Pool %s already exists.\n" +"Use update to change it.\n" +msgstr "" + +#: src/dird/ua_cmds.c:757 +#, fuzzy, c-format +msgid "Pool %s created.\n" +msgstr "Status zadania: Anulowane" + +#: src/dird/ua_cmds.c:787 +msgid "Python interpreter restarted.\n" +msgstr "" + +#: src/dird/ua_cmds.c:790 src/dird/ua_cmds.c:1430 +msgid "Nothing done.\n" +msgstr "" + +#: src/dird/ua_cmds.c:824 +#, fuzzy, c-format +msgid "Client \"%s\" address set to %s\n" +msgstr "Klient: nazwa=%s adres=%s FDport=%d\n" + +#: src/dird/ua_cmds.c:858 +#, fuzzy, c-format +msgid "Job \"%s\" %sabled\n" +msgstr "Status zadania: Anulowane" + +#: src/dird/ua_cmds.c:914 src/dird/ua_cmds.c:1304 src/dird/ua_dotcmds.c:405 +#, fuzzy +msgid "Failed to connect to Client.\n" +msgstr "Nieudane podłączenie do demona Plików.\n" + +#: src/dird/ua_cmds.c:1027 +#, fuzzy +msgid "Enter new debug level: " +msgstr "Wprowadź JobId do usunięcia: " + +#: src/dird/ua_cmds.c:1093 src/dird/ua_dotcmds.c:476 +msgid "Available daemons are: \n" +msgstr "" + +#: src/dird/ua_cmds.c:1094 src/dird/ua_dotcmds.c:477 +#, fuzzy +msgid "Director" +msgstr "Demon Dyrektora" + +#: src/dird/ua_cmds.c:1097 +msgid "All" +msgstr "" + +#: src/dird/ua_cmds.c:1098 +msgid "Select daemon type to set debug level" +msgstr "Wybierz rodzaj demona aby ustawić poziom debugingu" + +#: src/dird/ua_cmds.c:1187 src/dird/ua_cmds.c:2010 +#, c-format +msgid "No authorization for Client \"%s\"\n" +msgstr "Brak autoryzacji dla Klienta \"%s\"\n" + +#: src/dird/ua_cmds.c:1192 +msgid "Client name missing.\n" +msgstr "Brak nazwy Klienta.\n" + +#: src/dird/ua_cmds.c:1204 src/dird/ua_cmds.c:1265 src/dird/ua_cmds.c:2032 +#, c-format +msgid "No authorization for Job \"%s\"\n" +msgstr "Brak autoryzacji dla Zadania \"%s\"\n" + +#: src/dird/ua_cmds.c:1209 +msgid "Job name missing.\n" +msgstr "Brak nazwy Zadania.\n" + +#: src/dird/ua_cmds.c:1218 +#, c-format +msgid "Fileset \"%s\" not found.\n" +msgstr "Fileset \"%s\" nie znaleziony.\n" + +#: src/dird/ua_cmds.c:1222 +#, c-format +msgid "No authorization for FileSet \"%s\"\n" +msgstr "Brak autoryzacji dla FileSet'u \"%s\"\n" + +#: src/dird/ua_cmds.c:1227 +msgid "Fileset name missing.\n" +msgstr "Brakująca nazwa fileset.\n" + +#: src/dird/ua_cmds.c:1242 +msgid "Level value missing.\n" +msgstr "Brak wartości Poziomu.\n" + +#: src/dird/ua_cmds.c:1248 +msgid "Invalid value for accurate. It must be yes or no.\n" +msgstr "Błędna wartość dla dokładności. Musisz wprowadzić 'yes' lub 'no'.\n" + +#: src/dird/ua_cmds.c:1261 +msgid "No job specified.\n" +msgstr "Nie skazano zadania.\n" + +#: src/dird/ua_cmds.c:1309 +msgid "Error sending include list.\n" +msgstr "Błąd wysyania listy wcze.\n" + +#: src/dird/ua_cmds.c:1314 +msgid "Error sending exclude list.\n" +msgstr "Błąd wysyania listy wycze\n" + +#: src/dird/ua_cmds.c:1416 +msgid "" +"In general it is not a good idea to delete either a\n" +"Pool or a Volume since they may contain data.\n" +"\n" +msgstr "" +"Z zasady, nie jest dobrym pomysłem usunięcie \n" +"Puli lub Wolumenu ponieważ mogą one zawierać dane.\n" +"\n" + +#: src/dird/ua_cmds.c:1419 +msgid "Choose catalog item to delete" +msgstr "Wybierz pozycję katalogu do usunięcia" + +#: src/dird/ua_cmds.c:1483 +msgid "Enter JobId to delete: " +msgstr "Wprowadź JobId do usunięcia: " + +#: src/dird/ua_cmds.c:1522 +#, c-format +msgid "Job %s and associated records deleted from the catalog.\n" +msgstr "Zadanie %s oraz powiązane rekordy usunięte z katalogu.\n" + +#: src/dird/ua_cmds.c:1536 +#, c-format +msgid "" +"\n" +"This command will delete volume %s\n" +"and all Jobs saved on that volume from the Catalog\n" +msgstr "" +"\n" +"Ta komenda usunie wolumen %s\n" +"oraz wszystkie Zadnia zapisane na tym wolumenie z Katalogu\n" + +#: src/dird/ua_cmds.c:1543 +#, c-format +msgid "Are you sure you want to delete Volume \"%s\"? (yes/no): " +msgstr "Czy jesteś pewien że chcesz usunąć Wolumen \"%s\"? (yes/no): " + +#: src/dird/ua_cmds.c:1568 +#, c-format +msgid "Are you sure you want to delete Pool \"%s\"? (yes/no): " +msgstr "Czy jesteś pewien że chcesz usunąć Pulę \"%s\"? (yes/no): " + +#: src/dird/ua_cmds.c:1682 +#, c-format +msgid "Using Catalog name=%s DB=%s\n" +msgstr "Używam Katalogu o nazwie=%s DB=%s\n" + +#: src/dird/ua_cmds.c:1750 +msgid "ERR: Can't open db\n" +msgstr "ERR: Nie można otworzyć BD\n" + +#: src/dird/ua_cmds.c:1797 +msgid "Wait on mount timed out\n" +msgstr "Przekroczono czas oczekiwania na zamontowanie\n" + +#: src/dird/ua_cmds.c:1807 +msgid "ERR: Job was not found\n" +msgstr "ERR: Zadanie nie znalezione\n" + +#: src/dird/ua_cmds.c:1883 +msgid "" +" Command Description\n" +" ======= ===========\n" +msgstr "" +" Komenda Opis\n" +" ======= ===========\n" + +#: src/dird/ua_cmds.c:1887 +#, c-format +msgid "" +" %-13s %s\n" +"\n" +"Arguments:\n" +"\t%s\n" +msgstr "" +" %-13s %s\n" +"\n" +"Argumenty:\n" +"\t%s\n" + +#: src/dird/ua_cmds.c:1892 +#, c-format +msgid " %-13s %s\n" +msgstr "" + +#: src/dird/ua_cmds.c:1896 +#, c-format +msgid "" +"\n" +"Can't find %s command.\n" +"\n" +msgstr "" +"\n" +"Niemożliwe odnalezienie komendy %s .\n" +"\n" + +#: src/dird/ua_cmds.c:1898 +msgid "" +"\n" +"When at a prompt, entering a period cancels the command.\n" +"\n" +msgstr "" + +#: src/dird/ua_cmds.c:1993 src/dird/ua_cmds.c:2020 src/dird/ua_cmds.c:2042 +#, c-format +msgid "No authorization for Catalog \"%s\"\n" +msgstr "Brak autoryzacji dla Katalogu \"%s\"\n" + +#: src/dird/ua_cmds.c:2087 +#, c-format +msgid "Could not open catalog database \"%s\".\n" +msgstr "Nie można otworzyć bazy danych katalogu \"%s\".\n" + +#: src/dird/ua_cmds.c:2097 +#, c-format +msgid "Using Catalog \"%s\"\n" +msgstr "Używam Katalogu \"%s\"\n" + +#: src/dird/migrate.c:114 src/dird/vbackup.c:77 +msgid "Could not get or create a Pool record.\n" +msgstr "Nie można pobra lub stworzyć rekordu Puli.\n" + +#: src/dird/migrate.c:131 src/dird/vbackup.c:185 +msgid "Could not get or create the FileSet record.\n" +msgstr "Nie można pobra lub stworzyć rekordu FileSet.\n" + +#: src/dird/migrate.c:149 src/dird/migrate.c:163 +#, c-format +msgid "No previous Job found to %s.\n" +msgstr "" + +#: src/dird/migrate.c:155 +msgid "Create bootstrap file failed.\n" +msgstr "Nieudane stworzenie pliku bootstrap.\n" + +#: src/dird/migrate.c:165 +#, c-format +msgid "Previous Job has no data to %s.\n" +msgstr "" + +#: src/dird/migrate.c:182 +#, c-format +msgid "Job resource not found for \"%s\".\n" +msgstr "Nie znaleziono zasobu zadania dla \"%s\".\n" + +#: src/dird/migrate.c:186 +#, c-format +msgid "Previous Job resource not found for \"%s\".\n" +msgstr "Nie można otworzyć rozwidlenia zasobów dla %s.\n" + +#: src/dird/migrate.c:203 +msgid "setup job failed.\n" +msgstr "zadanie konfiguracji zakończone niepowodzeniem.\n" + +#: src/dird/migrate.c:252 +#, c-format +msgid "Pool for JobId %s not in database. ERR=%s\n" +msgstr "Pula dla JobId %s nie istnieje w bazie danych. ERR=%s\n" + +#: src/dird/migrate.c:260 +#, c-format +msgid "Pool resource \"%s\" not found.\n" +msgstr "Zasób Puli \"%s\" nie znaleziony.\n" + +#: src/dird/migrate.c:279 src/dird/vbackup.c:117 +msgid "Job Pool's NextPool resource" +msgstr "Zasób dla zadania pluli NextPoolJob" + +#: src/dird/migrate.c:310 src/dird/migrate.c:883 +#, c-format +msgid "Could not get job record for JobId %s to %s. ERR=%s" +msgstr "Nie można otrzymać rekordu zadania dla JobId=%s do %s. ERR=%s" + +#: src/dird/migrate.c:321 +#, c-format +msgid "JobId %s already %s probably by another Job. %s stopped.\n" +msgstr "JobId %s już jest %s prawdopodobnie przez inny Job. %s zatrzymano.\n" + +#: src/dird/migrate.c:331 +#, c-format +msgid "Start %s JobId %s, Job=%s\n" +msgstr "Start %s JobId %s, Job=%s\n" + +#: src/dird/migrate.c:705 +#, c-format +msgid "No %s SQL selection pattern specified.\n" +msgstr "Nie określony wzorzec selekcji SQL %s. \n" + +#: src/dird/migrate.c:712 src/dird/migrate.c:731 src/dird/migrate.c:752 +#: src/dird/migrate.c:788 src/dird/migrate.c:815 src/dird/migrate.c:939 +#: src/dird/migrate.c:972 src/dird/migrate.c:1101 +#, c-format +msgid "SQL failed. ERR=%s\n" +msgstr "Nieudany SQL. ERR=%s\n" + +#: src/dird/migrate.c:735 src/dird/migrate.c:742 src/dird/migrate.c:756 +#: src/dird/migrate.c:819 +#, c-format +msgid "No Volumes found to %s.\n" +msgstr "" + +#: src/dird/migrate.c:770 src/dird/migrate.c:856 src/dird/migrate.c:872 +msgid "Invalid JobId found.\n" +msgstr "" + +#: src/dird/migrate.c:830 +#, c-format +msgid "Unknown %s Selection Type.\n" +msgstr "Nieznany Typ Selekcji %s.\n" + +#: src/dird/migrate.c:842 src/dird/migrate.c:859 src/dird/migrate.c:875 +#, c-format +msgid "No JobIds found to %s.\n" +msgstr "Brak JobIds dla %s.\n" + +#: src/dird/migrate.c:846 +#, c-format +msgid "The following %u JobId%s chosen to be %s: %s\n" +msgstr "Następujace %u JobId %s wybrane aby być %s: %s\n" + +#: src/dird/migrate.c:847 +msgid " was" +msgstr "był" + +#: src/dird/migrate.c:847 +msgid "s were" +msgstr "były" + +#: src/dird/migrate.c:889 +#, c-format +msgid "%s using JobId=%s Job=%s\n" +msgstr "%s korzysta z JobId=%s Job=%s\n" + +#: src/dird/migrate.c:922 +msgid "Could not start migration job.\n" +msgstr "Nie można rurchomi zadania migracji.\n" + +#: src/dird/migrate.c:924 +#, c-format +msgid "%s JobId %d started.\n" +msgstr "%s JobId %d wystartowane.\n" + +#: src/dird/migrate.c:943 +#, c-format +msgid "No %s found to %s.\n" +msgstr "" + +#: src/dird/migrate.c:947 +#, c-format +msgid "SQL error. Expected 1 MediaId got %d\n" +msgstr "Błąd SQL. Oczekiwano na 1 MediaId, wprowadzono %d\n" + +#: src/dird/migrate.c:976 src/dird/migrate.c:1106 +#, c-format +msgid "No %ss found to %s.\n" +msgstr "" + +#: src/dird/migrate.c:998 +msgid "Selection Type 'pooluncopiedjobs' only applies to Copy Jobs" +msgstr "" +"Typ selekcji 'pooluncopiedjobs' stosuje się wyłacznie dla Zadań Kopiowania" + +#: src/dird/migrate.c:1007 +#, c-format +msgid "SQL to get uncopied jobs failed. ERR=%s\n" +msgstr "" + +#: src/dird/migrate.c:1030 +#, c-format +msgid "No %s %s selection pattern specified.\n" +msgstr "Żaden wzorzec selekcji %s %s nie został określony.\n" + +#: src/dird/migrate.c:1041 +#, c-format +msgid "SQL to get %s failed. ERR=%s\n" +msgstr "Nieudany SQL podczas pobierania %s. ERR=%s\n" + +#: src/dird/migrate.c:1046 +#, c-format +msgid "Query of Pool \"%s\" returned no Jobs to %s.\n" +msgstr "" + +#: src/dird/migrate.c:1055 +#, c-format +msgid "Could not compile regex pattern \"%s\" ERR=%s\n" +msgstr "Nie można skompilowa wzorca regex \"%s\" ERR=%s\n" + +#: src/dird/migrate.c:1084 +#, c-format +msgid "Regex pattern matched no Jobs to %s.\n" +msgstr "" + +#: src/dird/migrate.c:1242 +#, c-format +msgid "%s OK -- with warnings" +msgstr "%s OK -- z ostrzeżeniami" + +#: src/dird/migrate.c:1244 +#, c-format +msgid "%s OK" +msgstr "%s OK" + +#: src/dird/migrate.c:1249 +#, c-format +msgid "*** %s Error ***" +msgstr "" + +#: src/dird/migrate.c:1259 +#, c-format +msgid "%s Canceled" +msgstr "%s Anulowano" + +#: src/dird/migrate.c:1268 +#, c-format +msgid "Inappropriate %s term code" +msgstr "Nieodpowiedni kod (term code) %s" + +#: src/dird/migrate.c:1278 +#, c-format +msgid "%s -- no files to %s" +msgstr "%s -- brak plików dla %s" + +#: src/dird/migrate.c:1293 +#, c-format +msgid "" +"%s %s %s (%s): %s\n" +" Build OS: %s %s %s\n" +" Prev Backup JobId: %s\n" +" Prev Backup Job: %s\n" +" New Backup JobId: %s\n" +" Current JobId: %s\n" +" Current Job: %s\n" +" Backup Level: %s%s\n" +" Client: %s\n" +" FileSet: \"%s\" %s\n" +" Read Pool: \"%s\" (From %s)\n" +" Read Storage: \"%s\" (From %s)\n" +" Write Pool: \"%s\" (From %s)\n" +" Write Storage: \"%s\" (From %s)\n" +" Catalog: \"%s\" (From %s)\n" +" Start time: %s\n" +" End time: %s\n" +" Elapsed time: %s\n" +" Priority: %d\n" +" SD Files Written: %s\n" +" SD Bytes Written: %s (%sB)\n" +" Rate: %.1f KB/s\n" +" Volume name(s): %s\n" +" Volume Session Id: %d\n" +" Volume Session Time: %d\n" +" Last Volume Bytes: %s (%sB)\n" +" SD Errors: %d\n" +" SD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/migrate.c:1404 +#, c-format +msgid "No Next Pool specification found in Pool \"%s\".\n" +msgstr "" + +#: src/dird/migrate.c:1410 +#, c-format +msgid "No Storage specification found in Next Pool \"%s\".\n" +msgstr "" + +#: src/dird/migrate.c:1416 +msgid "Storage from Pool's NextPool resource" +msgstr "" + +#: src/dird/vbackup.c:146 +#, c-format +msgid "Start Virtual Backup JobId %s, Job=%s\n" +msgstr "Uruchom Wirtualny Backup JobId %s, Job=%s\n" + +#: src/dird/vbackup.c:150 +msgid "" +"This Job is not an Accurate backup so is not equivalent to a Full backup.\n" +msgstr "" + +#: src/dird/vbackup.c:157 +msgid "No previous Jobs found.\n" +msgstr "Nie znaleziono poprzednich zadań.\n" + +#: src/dird/vbackup.c:179 +#, c-format +msgid "Error getting Job record for previous Job: ERR=%s" +msgstr "" + +#: src/dird/vbackup.c:390 +#, c-format +msgid "" +"%s %s %s (%s): %s\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" Backup Level: Virtual Full\n" +" Client: \"%s\" %s\n" +" FileSet: \"%s\" %s\n" +" Pool: \"%s\" (From %s)\n" +" Catalog: \"%s\" (From %s)\n" +" Storage: \"%s\" (From %s)\n" +" Scheduled time: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Elapsed time: %s\n" +" Priority: %d\n" +" SD Files Written: %s\n" +" SD Bytes Written: %s (%sB)\n" +" Rate: %.1f KB/s\n" +" Volume name(s): %s\n" +" Volume Session Id: %d\n" +" Volume Session Time: %d\n" +" Last Volume Bytes: %s (%sB)\n" +" SD Errors: %d\n" +" SD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" +"%s %s %s (%s): %s\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" Backup Level: Virtual Full\n" +" Klient: \"%s\" %s\n" +" FileSet: \"%s\" %s\n" +" Pula: \"%s\" (From %s)\n" +" Katalog: \"%s\" (From %s)\n" +" Storage: \"%s\" (From %s)\n" +" Zaplanowany czas: %s\n" +" Czas rozpoczęcia: %s\n" +" Czas zakończenia: %s\n" +" Czas trwania: %s\n" +" Priorytet: %d\n" +" Zapisano Plików SD: %s\n" +" Zapisano Bajtów SD: %s (%sB)\n" +" Szybkość: %.1f KB/s\n" +" Nazwa Wolumenu: %s\n" +" Id Sesji Wolumenu: %d\n" +" Czas Sesji Wolumenu: %d\n" +" Ostatnie bajty Wolum.: %s (%sB)\n" +" Błędy SD: %d\n" +" Status zakończenia SD: %s\n" +" Zakończenie: %s\n" +"\n" + +#: src/dird/autoprune.c:76 +msgid "" +"End auto prune.\n" +"\n" +msgstr "Zakończ automatyczne obcięcie (ang. auto prune).\n" + +#: src/dird/ua_restore.c:144 +msgid "\"RegexWhere\" specification not authorized.\n" +msgstr "Specyfikacja \"RegexWhere\" nie została autoryzowana.\n" + +#: src/dird/ua_restore.c:151 +msgid "\"where\" specification not authorized.\n" +msgstr "Specyfikacja \"Where\" nie została autoryzowana.\n" + +#: src/dird/ua_restore.c:173 +msgid "" +"No Restore Job Resource found in bacula-dir.conf.\n" +"You must create at least one before running this command.\n" +msgstr "" + +#: src/dird/ua_restore.c:190 +msgid "Restore not done.\n" +msgstr "Przywracanie niewykonane.\n" + +#: src/dird/ua_restore.c:201 +msgid "Unable to construct a valid BSR. Cannot continue.\n" +msgstr "Niemożliwa konstrukcja poprawnego/ważnego BSR. Nie mogę kontynuować.\n" + +#: src/dird/ua_restore.c:205 src/dird/ua_restore.c:217 +#, fuzzy +msgid "No files selected to be restored.\n" +msgstr "Nieznany typ pliku %d; nie odtworzono: %s\n" + +#: src/dird/ua_restore.c:211 +msgid "" +"\n" +"1 file selected to be restored.\n" +"\n" +msgstr "" +"\n" +"Do przywrócenia wybrano 1 plik.\n" +"\n" + +#: src/dird/ua_restore.c:213 +#, c-format +msgid "" +"\n" +"%s files selected to be restored.\n" +"\n" +msgstr "" +"\n" +"Do przywrócenia wybrano %s plików.\n" +"\n" + +#: src/dird/ua_restore.c:232 +msgid "No Client resource found!\n" +msgstr "Nie znaleziono zasobu Klienta!\n" + +#: src/dird/ua_restore.c:319 +msgid "The restore will use the following job(s) as Base\n" +msgstr "Przywracanie wykorzysta następujące Zadanie/Zadania jako Bazę\n" + +#: src/dird/ua_restore.c:340 +#, fuzzy, c-format +msgid "Missing value for keyword: %s\n" +msgstr "Nieznane sowo kluczowe: %s\n" + +#: src/dird/ua_restore.c:418 +msgid "List last 20 Jobs run" +msgstr "Lista uruchomionych ostatnich 20 Zadań" + +#: src/dird/ua_restore.c:419 +msgid "List Jobs where a given File is saved" +msgstr "Lista Zadań w których podany Plik został zapisany" + +#: src/dird/ua_restore.c:420 +msgid "Enter list of comma separated JobIds to select" +msgstr "W celu wyboru wprowadź listę JobId oddzielonych przecinkami" + +#: src/dird/ua_restore.c:421 +msgid "Enter SQL list command" +msgstr "Wprowadź listę poleceń SQL" + +#: src/dird/ua_restore.c:422 +msgid "Select the most recent backup for a client" +msgstr "Wybierz najbardziej aktualny backup dla klienta" + +#: src/dird/ua_restore.c:423 +msgid "Select backup for a client before a specified time" +msgstr "Wybierz backup dla klienta przed określoną datą" + +#: src/dird/ua_restore.c:424 +msgid "Enter a list of files to restore" +msgstr "Wprowad list plikw do odtwarzania" + +#: src/dird/ua_restore.c:425 +#, fuzzy +msgid "Enter a list of files to restore before a specified time" +msgstr "Wprowad list plikw do odtwarzania" + +#: src/dird/ua_restore.c:426 +msgid "Find the JobIds of the most recent backup for a client" +msgstr "Znajdź wszystkie JobId najbardziej aktualnego backup'u klienta" + +#: src/dird/ua_restore.c:427 +msgid "Find the JobIds for a backup for a client before a specified time" +msgstr "znajdź wszystkie JobId backup'u klienta sprzed określonej daty" + +#: src/dird/ua_restore.c:428 +#, fuzzy +msgid "Enter a list of directories to restore for found JobIds" +msgstr "Wprowad list plikw do odtwarzania" + +#: src/dird/ua_restore.c:429 +msgid "Select full restore to a specified Job date" +msgstr "Wybierz pełne przywrócenie dla określonej daty Zadania" + +#: src/dird/ua_restore.c:473 +#, c-format +msgid "Unknown keyword: %s\n" +msgstr "Nieznane sowo kluczowe: %s\n" + +#: src/dird/ua_restore.c:542 +#, fuzzy, c-format +msgid "Error: Pool resource \"%s\" access not allowed.\n" +msgstr "Zasób Puli \"%s\" nie znaleziony.\n" + +#: src/dird/ua_restore.c:558 +msgid "" +"\n" +"First you select one or more JobIds that contain files\n" +"to be restored. You will be presented several methods\n" +"of specifying the JobIds. Then you will be allowed to\n" +"select which files from those JobIds are to be restored.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:571 +msgid "To select the JobIds, you have the following choices:\n" +msgstr "Możesz wybrać identyfikatory JobId z nastepującej listy:\n" + +#: src/dird/ua_restore.c:576 +msgid "Select item: " +msgstr "Wybierz pozycję: " + +#: src/dird/ua_restore.c:581 src/dird/ua_restore.c:616 +msgid "SQL query not authorized.\n" +msgstr "Nieautoryzowane zapytanie SQL.\n" + +#: src/dird/ua_restore.c:594 +msgid "Enter Filename (no path):" +msgstr "Wprowadź nazwę pliku (bez ścieżki):" + +#: src/dird/ua_restore.c:609 src/dird/ua_restore.c:717 +msgid "Enter JobId(s), comma separated, to restore: " +msgstr "" +"Aby przywrócić, wprowadź wybrane identyfikatory JobId oddzielone " +"przecinkami: " + +#: src/dird/ua_restore.c:619 +#, fuzzy +msgid "Enter SQL list command: " +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/dird/ua_restore.c:653 src/dird/ua_restore.c:676 +msgid "" +"Enter file names with paths, or < to enter a filename\n" +"containing a list of file names with paths, and terminate\n" +"them with a blank line.\n" +msgstr "" + +#: src/dird/ua_restore.c:657 src/dird/ua_restore.c:680 +msgid "Enter full filename: " +msgstr "Wprowadź pełną nazwę pliku: " + +#: src/dird/ua_restore.c:715 +#, c-format +msgid "You have already selected the following JobIds: %s\n" +msgstr "Następujące JobId zostały już wybrane: %s\n" + +#: src/dird/ua_restore.c:732 +msgid "" +"Enter full directory names or start the name\n" +"with a < to indicate it is a filename containing a list\n" +"of directories and terminate them with a blank line.\n" +msgstr "" + +#: src/dird/ua_restore.c:736 +msgid "Enter directory name: " +msgstr "Wprowadź nazwę katalogu: " + +#: src/dird/ua_restore.c:752 +#, fuzzy +msgid "Enter JobId to get the state to restore: " +msgstr "Wprowad list plikw do odtwarzania" + +#: src/dird/ua_restore.c:761 src/dird/ua_restore.c:804 +#, c-format +msgid "Unable to get Job record for JobId=%s: ERR=%s\n" +msgstr "Nie mogę otrzyma rekordu Zadania dla JobId=%s: ERR=%s\n" + +#: src/dird/ua_restore.c:765 +#, c-format +msgid "Selecting jobs to build the Full state at %s\n" +msgstr "" + +#: src/dird/ua_restore.c:791 +msgid "Invalid JobId in list.\n" +msgstr "Nieprawidłowy JobId na liście.\n" + +#: src/dird/ua_restore.c:810 +#, c-format +msgid "Access to JobId=%s (Job \"%s\") not authorized. Not selected.\n" +msgstr "Nieautoryzowany dostęp do JobId=%s (Zadanie \"%s\"). Nie wybrano.\n" + +#: src/dird/ua_restore.c:823 +#, fuzzy +msgid "No Jobs selected.\n" +msgstr "Status zadania: Anulowane" + +#: src/dird/ua_restore.c:828 +#, c-format +msgid "You have selected the following JobIds: %s\n" +msgstr "Wybrałeś następujące JobId: %s\n" + +#: src/dird/ua_restore.c:830 +#, c-format +msgid "You have selected the following JobId: %s\n" +msgstr "Wybrałeś następujące JobId: %s\n" + +#: src/dird/ua_restore.c:840 +msgid "" +"The restored files will the most current backup\n" +"BEFORE the date you specify below.\n" +"\n" +msgstr "" + +#: src/dird/ua_restore.c:843 +msgid "Enter date as YYYY-MM-DD HH:MM:SS :" +msgstr "Wprowadź datę w formacie YYYY-MM-DD HH:MM:SS :" + +#: src/dird/ua_restore.c:849 +msgid "Improper date format.\n" +msgstr "Niepoprawny format daty.\n" + +#: src/dird/ua_restore.c:870 +#, c-format +msgid "Cannot open file %s: ERR=%s\n" +msgstr "Nie można otworzyć pliku %s: ERR=%s\n" + +#: src/dird/ua_restore.c:878 src/dird/ua_restore.c:882 +#, fuzzy, c-format +msgid "Error occurred on line %d of file \"%s\"\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/dird/ua_restore.c:922 src/dird/ua_restore.c:950 +#: src/dird/ua_restore.c:971 src/dird/ua_dotcmds.c:682 +#: src/dird/ua_dotcmds.c:726 +#, c-format +msgid "Query failed: %s. ERR=%s\n" +msgstr "Nieudane zapytanie: %s. ERR=%s\n" + +#: src/dird/ua_restore.c:926 src/dird/ua_restore.c:954 +#, c-format +msgid "No database record found for: %s\n" +msgstr "Brak rekordu bazodanowego dla: %s\n" + +#: src/dird/ua_restore.c:942 +#, fuzzy +msgid "No JobId specified cannot continue.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/dird/ua_restore.c:975 +#, c-format +msgid "No table found: %s\n" +msgstr "Nie znaleziono tabeli: %s\n" + +#: src/dird/ua_restore.c:1033 +msgid "" +"\n" +"\n" +"For one or more of the JobIds selected, no files were found,\n" +"so file selection is not possible.\n" +"Most likely your retention policy pruned the files.\n" +msgstr "" +"\n" +"\n" +"Dla jednego lub więcej z wybranych JobId, nie odnaleziono plików,\n" +"dlatego wybór jest niemożliwy.\n" +"Najprawdopodobniej wybrano politykę retencji która obcięła pliki.\n" + +#: src/dird/ua_restore.c:1036 +msgid "" +"\n" +"Do you want to restore all the files? (yes|no): " +msgstr "" +"\n" +"Czy chcesz przywrócić wszystkie pliki? (yes|no): " + +#: src/dird/ua_restore.c:1039 +msgid "" +"\n" +"Regexp matching files to restore? (empty to abort): " +msgstr "" + +#: src/dird/ua_restore.c:1055 +#, c-format +msgid "Regex compile error: %s\n" +msgstr "Błąd kompilacji Regex: %s\n" + +#: src/dird/ua_restore.c:1101 +#, c-format +msgid "" +"\n" +"Building directory tree for JobId(s) %s ... " +msgstr "" +"\n" +"Tworzenie drzewa katalogów dla JobId: %s ... " + +#: src/dird/ua_restore.c:1160 +#, c-format +msgid "" +"\n" +"%s files inserted into the tree and marked for extraction.\n" +msgstr "" +"\n" +"%s pliki wstawione do drzewa i zaznaczone do ekstrakcji.\n" + +#: src/dird/ua_restore.c:1163 +#, c-format +msgid "" +"\n" +"%s files inserted into the tree.\n" +msgstr "" +"\n" +"%s plików wstawionych do drzewa.\n" + +#: src/dird/ua_restore.c:1235 +#, c-format +msgid "Error getting FileSet \"%s\": ERR=%s\n" +msgstr "Błąd pobierania FileSet \"%s\": ERR=%s\n" + +#: src/dird/ua_restore.c:1249 +#, fuzzy, c-format +msgid "No FileSet found for client \"%s\".\n" +msgstr "Nie znaleziono zasobu Zadania dla \"%s\".\n" + +#: src/dird/ua_restore.c:1255 +#, c-format +msgid "Error getting FileSet record: %s\n" +msgstr "Błąd pobierania rekordu FileSet: %s\n" + +#: src/dird/ua_restore.c:1256 +msgid "" +"This probably means you modified the FileSet.\n" +"Continuing anyway.\n" +msgstr "" +"To prawdopodobnie oznacza że zmodyfikowałeś FileSet.\n" +"Kontynuuję bez względu na to.\n" + +#: src/dird/ua_restore.c:1271 +#, fuzzy, c-format +msgid "Pool \"%s\" not found, using any pool.\n" +msgstr "Zasób Puli \"%s\" nie znaleziony.\n" + +#: src/dird/ua_restore.c:1298 src/dird/ua_restore.c:1314 +#, c-format +msgid "No Full backup before %s found.\n" +msgstr "Nie ma Pełnego Backup'u przed %s.\n" + +#: src/dird/ua_restore.c:1343 +msgid "No jobs found.\n" +msgstr "Nie znaleziono Zada.\n" + +#: src/dird/ua_restore.c:1464 +#, c-format +msgid "Warning default storage overridden by \"%s\" on command line.\n" +msgstr "" + +#: src/dird/ua_restore.c:1480 +#, c-format +msgid "Storage \"%s\" not found, using Storage \"%s\" from MediaType \"%s\".\n" +msgstr "" + +#: src/dird/ua_restore.c:1488 +#, c-format +msgid "" +"\n" +"Unable to find Storage resource for\n" +"MediaType \"%s\", needed by the Jobs you selected.\n" +msgstr "" + +#: src/dird/restore.c:185 src/dird/restore.c:272 +#, c-format +msgid "Could not get storage resource '%s'.\n" +msgstr "Nie można otrzyma zasobu storage '%s'.\n" + +#: src/dird/restore.c:459 +msgid "" +"Cannot restore without a bootstrap file.\n" +"You probably ran a restore job directly. All restore jobs must\n" +"be run using the restore command.\n" +msgstr "" + +#: src/dird/restore.c:467 +#, c-format +msgid "Start Restore Job %s\n" +msgstr "Uruchom Zadanie Przywracania %s\n" + +#: src/dird/restore.c:517 +msgid "Restore OK -- warning file count mismatch" +msgstr "Przywracanie OK -- ostrzeżenie o niedopasowaniu zliczenia plików" + +#: src/dird/restore.c:519 +#, fuzzy +msgid "Restore OK" +msgstr "Odtwarzanie Anulowane" + +#: src/dird/restore.c:523 +msgid "Restore OK -- with warnings" +msgstr "Przywracanie OK -- z ostrzeżeniami" + +#: src/dird/restore.c:527 +#, fuzzy +msgid "*** Restore Error ***" +msgstr "*** Błąd Weryfikacji ***" + +#: src/dird/restore.c:537 +msgid "Restore Canceled" +msgstr "Odtwarzanie Anulowane" + +#: src/dird/restore.c:564 +#, c-format +msgid "" +"%s %s %s (%s): %s\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" Restore Client: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Files Expected: %s\n" +" Files Restored: %s\n" +" Bytes Restored: %s\n" +" Rate: %.1f KB/s\n" +" FD Errors: %d\n" +" FD termination status: %s\n" +" SD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" + +#: src/dird/newvol.c:91 +#, c-format +msgid "Illegal character in Volume name \"%s\"\n" +msgstr "Nieprawidłowy znak w nazwie Wolumenu \"%s\"\n" + +#: src/dird/newvol.c:104 +#, c-format +msgid "Created new Volume \"%s\" in catalog.\n" +msgstr "W katalogu utworzono nowy Wolumen \"%s\"\n" + +#: src/dird/newvol.c:131 +#, fuzzy, c-format +msgid "SQL failed, but ignored. ERR=%s\n" +msgstr "Nieudany SQL. ERR=%s\n" + +#: src/dird/newvol.c:142 +#, c-format +msgid "Wanted to create Volume \"%s\", but it already exists. Trying again.\n" +msgstr "" +"Żadanie utworzenia Wolumenu \"%s\", który już istnieje. Ponowienie próby.\n" + +#: src/dird/newvol.c:151 +msgid "Too many failures. Giving up creating Volume name.\n" +msgstr "Zbyt wiele niepowodzeń. Rezygnacja z utworzenia nazwy Wolumenu.\n" + +#: src/dird/job.c:62 +#, c-format +msgid "Could not init job queue: ERR=%s\n" +msgstr "Nie można zainicjować kolejki zada: ERR=%s\n" + +#: src/dird/job.c:94 +#, c-format +msgid "Could not add job queue: ERR=%s\n" +msgstr "Nie można dodać zadania do kolejki: ERR=%s\n" + +#: src/dird/job.c:113 src/dird/jobq.c:221 src/stored/dircmd.c:198 +#: src/stored/stored.c:530 +#, c-format +msgid "Unable to init job cond variable: ERR=%s\n" +msgstr "Nie można zainicjować zmiennej warunkowej zadania: ERR=%s\n" + +#: src/dird/job.c:187 src/dird/job.c:428 src/dird/job.c:430 src/dird/job.c:485 +#: src/dird/job.c:487 src/dird/job.c:1153 src/dird/job.c:1194 +#: src/dird/job.c:1203 +msgid "Job resource" +msgstr "Zasób zadania" + +#: src/dird/job.c:234 src/dird/job.c:353 +msgid "Unimplemented job type: %d\n" +msgstr "Niezaimplementowany typ zadania: %d\n" + +#: src/dird/job.c:276 +msgid "Job canceled because max start delay time exceeded.\n" +msgstr "" +"Zadanie anulowane ze względu na przekroczenie dopuszczalnego czasu zwłoki " +"uruchomienia.\n" + +#: src/dird/job.c:281 +msgid "Job canceled because max sched run time exceeded.\n" +msgstr "" +"Zadanie anulowane ze względu na przekroczenie dopuszczalnego czasu " +"uruchomienia.\n" + +#: src/dird/job.c:395 +#, c-format +msgid "JobId %s, Job %s marked to be canceled.\n" +msgstr "JobId %s, Zadanie %s oznaczone do anulowania.\n" + +#: src/dird/job.c:405 +msgid "Failed to connect to File daemon.\n" +msgstr "Nieudane podłączenie do demona Plików.\n" + +#: src/dird/job.c:553 +msgid "Max wait time exceeded. Job canceled.\n" +msgstr "Przekroczony maksymalny czas oczekiwania. Zadanie anulowane.\n" + +#: src/dird/job.c:558 +msgid "Max run time exceeded. Job canceled.\n" +msgstr "Przekroczony maksymalny czas uruchomienia. Zadanie anulowane.\n" + +#: src/dird/job.c:563 +msgid "Max sched run time exceeded. Job canceled.\n" +msgstr "" +"Przekroczony maksymalny czas wykonania zaplanowanego zadania. Zadanie " +"anulowane.\n" + +#: src/dird/job.c:685 +#, c-format +msgid "Pool \"%s\" not in database. ERR=%s" +msgstr "Pula \"%s\" nie istnieje w bazie danych. ERR=%s" + +#: src/dird/job.c:689 +msgid "Created database record for Pool \"%s\".\n" +msgstr "Stworzony rekord bazodanowy dla Puli \"%s\".\n" + +#: src/dird/job.c:755 src/dird/job.c:788 +#, c-format +msgid "JobId %d already running. Duplicate job not allowed.\n" +msgstr "JobId %d już uruchomiony. Duplikat zadania nie dozwolony.\n" + +#: src/dird/job.c:780 +#, c-format +msgid "Cancelling duplicate JobId=%d.\n" +msgstr "Anulowanie duplikatu JobId=%d.\n" + +#: src/dird/job.c:807 src/dird/job.c:1140 +msgid "Run pool override" +msgstr "" + +#: src/dird/job.c:818 +msgid "Run FullPool override" +msgstr "" + +#: src/dird/job.c:820 +msgid "Job FullPool override" +msgstr "" + +#: src/dird/job.c:829 +msgid "Run IncPool override" +msgstr "" + +#: src/dird/job.c:831 +msgid "Job IncPool override" +msgstr "" + +#: src/dird/job.c:840 +msgid "Run DiffPool override" +msgstr "" + +#: src/dird/job.c:842 +msgid "Job DiffPool override" +msgstr "" + +#: src/dird/job.c:872 src/stored/bscan.c:1012 +#, c-format +msgid "Could not create Client record. ERR=%s\n" +msgstr "Nie można stworzyć rekordu Klienta. ERR=%s\n" + +#: src/dird/job.c:908 +msgid "FileSet MD5 digest not found.\n" +msgstr "Nie znaleziono skrótu FileSet MD5.\n" + +#: src/dird/job.c:913 +#, c-format +msgid "Could not create FileSet \"%s\" record. ERR=%s\n" +msgstr "Nie można stworzyć rekordu FileSet \"%s\". ERR=%s\n" + +#: src/dird/job.c:955 +#, c-format +msgid "Error updating job record. %s" +msgstr "Błąd uaktualniania rekordu zadania. %s" + +#: src/dird/job.c:1145 +msgid "Run storage override" +msgstr "" + +#: src/dird/job.c:1213 +msgid "Client resource" +msgstr "Zasób Klienta" + +#: src/dird/job.c:1436 +#, c-format +msgid "Could not start clone job: \"%s\".\n" +msgstr "Nie można uruchomi zadania klonowania: \"%s\".\n" + +#: src/dird/job.c:1439 +#, fuzzy, c-format +msgid "Clone JobId %d started.\n" +msgstr "%s JobId %d wystartowane.\n" + +#: src/dird/ua_dotcmds.c:159 +msgid ": is an invalid command.\n" +msgstr ": jest nieprawidłowym poleceniem.\n" + +#: src/dird/ua_dotcmds.c:480 +msgid "Select daemon type to make die" +msgstr "Wybierz rodzaj demona do zabicia" + +#: src/dird/ua_dotcmds.c:505 +msgid "The Director will generate a deadlock.\n" +msgstr "Director wygeneruje deadlock.\n" + +#: src/dird/ua_dotcmds.c:509 +msgid "The Director will segment fault.\n" +msgstr "" + +#: src/dird/ua_dotcmds.c:677 +msgid "Access to specified Client or FileSet not allowed.\n" +msgstr "Brak pozwolenia na dostęp do wybranego Klienta lub FileSet'u.\n" + +#: src/dird/ua_dotcmds.c:721 +msgid "query keyword not found.\n" +msgstr "nie znaleziono sowa kluczowego zapytania.\n" + +#: src/dird/ua_dotcmds.c:748 +#, fuzzy, c-format +msgid "List MediaType failed: ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/dird/ua_dotcmds.c:762 +#, fuzzy, c-format +msgid "List Media failed: ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/dird/ua_dotcmds.c:776 +#, fuzzy, c-format +msgid "List Location failed: ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/dird/getmsg.c:172 +#, c-format +msgid "bget_dirmsg: unknown bnet signal %d\n" +msgstr "" + +#: src/dird/getmsg.c:190 src/dird/getmsg.c:196 src/dird/getmsg.c:209 +#: src/dird/getmsg.c:243 src/dird/getmsg.c:265 src/dird/getmsg.c:291 +#, c-format +msgid "Malformed message: %s\n" +msgstr "" + +#: src/dird/getmsg.c:363 +#, c-format +msgid "Bad response to %s command: wanted %s, got %s\n" +msgstr "Za odpowiedź na komend %s. Oczekiwano %s, otrzymano %s\n" + +#: src/dird/getmsg.c:368 +#, fuzzy, c-format +msgid "Socket error on %s command: ERR=%s\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/dird/jobq.c:75 +#, fuzzy, c-format +msgid "pthread_attr_init: ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/dird/jobq.c:84 +#, fuzzy, c-format +msgid "pthread_mutex_init: ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/dird/jobq.c:90 +#, fuzzy, c-format +msgid "pthread_cond_init: ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/dird/jobq.c:132 +#, fuzzy, c-format +msgid "pthread_cond_broadcast: ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/dird/jobq.c:140 +#, fuzzy, c-format +msgid "pthread_cond_wait: ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/dird/jobq.c:181 +#, fuzzy, c-format +msgid "Job %s waiting %d seconds for scheduled start time.\n" +msgstr "Status zadania: Oczekiwanie na czas startu" + +#: src/dird/jobq.c:243 +#, fuzzy, c-format +msgid "pthread_thread_create: ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/dird/jobq.c:353 +#, fuzzy, c-format +msgid "pthread_cond_signal: ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/dird/jobq.c:364 +#, c-format +msgid "pthread_create: ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/dird/jobq.c:636 +#, c-format +msgid "Rescheduled Job %s at %s to re-run in %d seconds (%s).\n" +msgstr "" +"Przeplanowano Zadania %s na %s. Ponowne uruchomienie za %d sekund (%s).\n" + +#: src/dird/jobq.c:675 src/dird/jobq.c:680 +msgid "previous Job" +msgstr "poprzednie Zadanie" + +#: src/dird/jobq.c:717 +#, c-format +msgid "" +"Job canceled. Attempt to read and write same device.\n" +" Read storage \"%s\" (From %s) -- Write storage \"%s\" (From %s)\n" +msgstr "" + +#: src/dird/expand.c:255 +#, c-format +msgid "Count not update counter %s: ERR=%s\n" +msgstr "Nie można zaktualizowa licznika %s: ERR=%s\n" + +#: src/dird/expand.c:427 +#, c-format +msgid "Cannot create var context: ERR=%s\n" +msgstr "Nie można stworzyć kontekstu zmiennej: ERR=%s\n" + +#: src/dird/expand.c:432 +#, c-format +msgid "Cannot set var callback: ERR=%s\n" +msgstr "Nie można ustawi wywoania zmiennej: ERR=%s\n" + +#: src/dird/expand.c:438 +#, c-format +msgid "Cannot set var operate: ERR=%s\n" +msgstr "Nie można ustawi operatora zmiennej: ERR=%s\n" + +#: src/dird/expand.c:444 src/dird/expand.c:459 +#, c-format +msgid "Cannot unescape string: ERR=%s\n" +msgstr "Nie można wyplta acucha: ERR=%s\n" + +#: src/dird/expand.c:452 +#, c-format +msgid "Cannot expand expression \"%s\": ERR=%s\n" +msgstr "Nie można rozwin wyraenia \"%s\": ERR=%s.\n" + +#: src/dird/expand.c:470 +#, c-format +msgid "Cannot destroy var context: ERR=%s\n" +msgstr "Nie można zniszczy kontekstu zmiennej: ERR=%s\n" + +#: src/dird/recycle.c:65 +#, c-format +msgid "Recycled volume \"%s\"\n" +msgstr "Zrecyklingowany wolumen \"%s\"\n" + +#: src/dird/inc_conf.c:311 +#, c-format +msgid "Expected a strip path positive integer, got:%s:" +msgstr "" + +#: src/dird/inc_conf.c:331 +#, c-format +msgid "Expected a FileSet option keyword, got:%s:" +msgstr "" + +#: src/dird/inc_conf.c:364 +msgid "Old style Include/Exclude not supported\n" +msgstr "" + +#: src/dird/inc_conf.c:463 +#, c-format +msgid "Regex compile error. ERR=%s\n" +msgstr "Błąd kompilacji regex. ERR=%s\n" + +#: src/dird/inc_conf.c:484 +#, c-format +msgid "Expected a regex string, got: %s\n" +msgstr "Oczekiwano ciągu znaków regex, otrzymano: %s\n" + +#: src/dird/inc_conf.c:560 +#, c-format +msgid "Expected a wild-card string, got: %s\n" +msgstr "Oczekiwano ciągu znaków wild-card, otrzymano: %s\n" + +#: src/dird/inc_conf.c:583 +#, c-format +msgid "Expected an fstype string, got: %s\n" +msgstr "Oczekiwano ciągu znaków fstype, otrzymano: %s\n" + +#: src/dird/inc_conf.c:595 +msgid "ExcludeDirContaining directive not permitted in Exclude.\n" +msgstr "" + +#: src/dird/inc_conf.c:622 +#, c-format +msgid "Expected an drivetype string, got: %s\n" +msgstr "Oczekiwano ciągu znaków drivetype, otrzymano: %s\n" + +#: src/dird/inc_conf.c:646 src/dird/inc_conf.c:689 +#, c-format +msgid "Backslash found. Use forward slashes or quote the string.: %s\n" +msgstr "" + +#: src/dird/inc_conf.c:661 src/dird/inc_conf.c:704 +#, c-format +msgid "Expected a filename, got: %s" +msgstr "Oczekiwano nazwy pliku, otrzymano: %s" + +#: src/dird/inc_conf.c:678 +msgid "Plugin directive not permitted in Exclude\n" +msgstr "" + +#: src/dird/inc_conf.c:721 +msgid "Options section not permitted in Exclude\n" +msgstr "" + +#: src/dird/inc_conf.c:781 +#, fuzzy, c-format +msgid "Expected a FileSet keyword, got: %s" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/dird/verify.c:86 src/dird/verify.c:310 +#, fuzzy, c-format +msgid "Unimplemented Verify level %d(%c)\n" +msgstr "Niezaimplementowany poziom weryfikacji %d\n" + +#: src/dird/verify.c:141 +msgid "" +"Unable to find JobId of previous InitCatalog Job.\n" +"Please run a Verify with Level=InitCatalog before\n" +"running the current Job.\n" +msgstr "" + +#: src/dird/verify.c:146 +msgid "Unable to find JobId of previous Job for this client.\n" +msgstr "" +"Niemożliwe znalezienie JobId wcześniejszego Zadania dla tego klienta.\n" + +#: src/dird/verify.c:162 +#, fuzzy, c-format +msgid "Could not get job record for previous Job. ERR=%s" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/dird/verify.c:168 +#, c-format +msgid "Last Job %d did not terminate normally. JobStatus=%c\n" +msgstr "Ostatnie Zadanie %d nie zakończyło się poprawnie. JobStatus=%c\n" + +#: src/dird/verify.c:172 +#, c-format +msgid "Verifying against JobId=%d Job=%s\n" +msgstr "Weryfikacja w stosunku do JobId=%d Job=%s\n" + +#: src/dird/verify.c:211 +#, c-format +msgid "Start Verify JobId=%s Level=%s Job=%s\n" +msgstr "Uruchomienie Weryfikacji JobId=%s Level=%s Job=%s\n" + +#: src/dird/verify.c:297 +msgid "Deprecated feature ... use bootstrap.\n" +msgstr "" + +#: src/dird/verify.c:364 +#, c-format +msgid "Unimplemented verify level %d\n" +msgstr "Niezaimplementowany poziom weryfikacji %d\n" + +#: src/dird/verify.c:416 +msgid "Verify OK" +msgstr "Weryfikacja OK" + +#: src/dird/verify.c:420 +msgid "*** Verify Error ***" +msgstr "*** Błąd Weryfikacji ***" + +#: src/dird/verify.c:424 +msgid "Verify warnings" +msgstr "Ostrzerzenia przy Weryfikacji" + +#: src/dird/verify.c:427 +msgid "Verify Canceled" +msgstr "Weryfikacja Anulowana" + +#: src/dird/verify.c:430 +msgid "Verify Differences" +msgstr "Rnice przy weryfikacji" + +#: src/dird/verify.c:435 +#, c-format +msgid "Inappropriate term code: %d %c\n" +msgstr "Nieprawidłowy kod: %d %c\n" + +#: src/dird/verify.c:449 +#, c-format +msgid "" +"%s %s %s (%s): %s\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Files Expected: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" FD termination status: %s\n" +" SD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" +"%s %s %s (%s): %s\n" +" System operacyjny: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Poziom weryfikacji: %s\n" +" Klient: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Czas rozpoczęcia: %s\n" +" Czas zakończenia: %s\n" +" Pliki oczekiwane: %s\n" +" Pliki sprawdzone: %s\n" +" Niekrytyczne błędy FD: %d\n" +" Status FD termination: %s\n" +" Status SD termination status: %s\n" +" Termination: %s\n" +"\n" + +#: src/dird/verify.c:484 +#, c-format +msgid "" +"%s %s %s (%s): %s\n" +" Build: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" FD termination status: %s\n" +" Termination: %s\n" +"\n" +msgstr "" +"%s %s %s (%s): %s\n" +" Build: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Klient: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Czas rozpoczęcia: %s\n" +" Czas zakończenia: %s\n" +" Pliki sprawdzone: %s\n" +" Niekrytyczne błędy FD: %d\n" +" Status FD termination: %s\n" +" Termination: %s\n" +"\n" + +# fuzzy, c-format +#: src/dird/verify.c:562 +#, fuzzy, c-format +msgid "" +"bird set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -f run in foreground (for debugging)\n" +" -g groupid\n" +" -m print kaboom output (for debugging)\n" +" -r run now\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -u userid\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"\n" +"Użycie: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +" -c uyj jako pliku konfiguracyjnego\n" +" -d ustaw poziom debugingu na \n" +" -dt wyświetl znacznik czasu podczas wywietlania debugingu\n" +" -f uruchom na pierwszym planie (dla debugingu)\n" +" -g identyfikator grupy\n" +" -m wyświetl informacje kaboom (dla debugingu)\n" +" -r uruchom teraz zadanie \n" +" -s brak sygnaw\n" +" -t test - odczytaj plik konfiguracji i zakocz\n" +" -u identyfikator uytkownika\n" +" -v gadatliwe komunikaty uytkownika\n" +" -? wyświetl ten komunikat.\n" +"\n" + +#: src/dird/dird.c:521 +msgid "Too many open reload requests. Request ignored.\n" +msgstr "Zbyt wiele żądań przeładowania. Żądanie zignorowane.\n" + +# fuzzy: src/dird/dird.c:536 +#: src/dird/dird.c:536 +msgid "Out of reload table entries. Giving up.\n" +msgstr "Przekroczono zakres wpisów tabeli przeładowania. Poddaję się.\n" + +#: src/dird/dird.c:539 +#, fuzzy +msgid "Resetting previous configuration.\n" +msgstr "Proszę popraw plik konfiguracyjny: %s\n" + +#: src/dird/dird.c:603 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't know who I am :-(\n" +msgstr "" +"Brak definicji zasobu Dyrektora w %s\n" +"Bez tego nie wiem kim jestem :-(\n" + +#: src/dird/dird.c:616 +#, c-format +msgid "Only one Director resource permitted in %s\n" +msgstr "Dozwolony tylko jeden zasób Dyrektora w %s\n" + +#: src/dird/dird.c:673 +#, c-format +msgid "No Job records defined in %s\n" +msgstr "Brak definicji rekordw Zadania w %s\n" + +#: src/dird/dird.c:731 src/dird/dird.c:744 +#, c-format +msgid "Hey something is wrong. p=0x%lu\n" +msgstr "Hej, coś jest nie tak! p=0x%lu\n" + +#: src/dird/dird.c:805 +#, c-format +msgid "\"%s\" directive in Job \"%s\" resource is required, but not found.\n" +msgstr "" +"Dyrektywa \"%s\" w zasobie Zadania \"%s\" jest wymagana, lecz nie zostaa " +"znaleziona.\n" + +#: src/dird/dird.c:812 +msgid "Too many items in Job resource\n" +msgstr "Zbyt duo elementw w zasobie Zadania\n" + +#: src/dird/dird.c:816 +#, fuzzy, c-format +msgid "No storage specified in Job \"%s\" nor in Pool.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/dird/dird.c:840 +#, c-format +msgid "\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n" +msgstr "Nie zdefiniowany plik \"TLS Certificate\" dla Konsoli \"%s\" w %s.\n" + +#: src/dird/dird.c:846 +#, c-format +msgid "\"TLS Key\" file not defined for Console \"%s\" in %s.\n" +msgstr "Nie zdefiniowany plik \"TLS Key\" dla Konsoli \"%s\" w %s.\n" + +#: src/dird/dird.c:853 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" +"Ani \"TLS CA Certificate\", ani \"TLS CA Certificate Dir\" nie są " +"zdefiniowane dla Konsoli \"%s\" w %s. Co najmniej jedno składowanie " +"certyfikatu CA jest wymagane kiedy jest używane \"TLS Verify Peer\".\n" + +#: src/dird/dird.c:893 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"File daemon \"%s\" in %s.\n" +msgstr "" +"Ani \"TLS CA Certificate\", ani \"TLS CA Certificate Dir\" nie zostay " +"zdefiniowane dla demona Plików \"%s\" w %s.\n" + +#: src/dird/dird.c:949 src/dird/dird.c:951 +msgid "Could not open Catalog \"%s\", database \"%s\".\n" +msgstr "Nie można otworzyć Katalogu \"%s\", baza danych \"%s\".\n" + +#: src/dird/dird.c:954 +#, c-format +msgid "%s" +msgstr "%s" + +#: src/dird/dird.c:1025 +#, c-format +msgid "Could not create storage record for %s\n" +msgstr "Nie można stworzyć rekordu storage dla %s\n" + +#: src/dird/dird.c:1033 +#, c-format +msgid "Could not update storage record for %s\n" +msgstr "Nie można uaktualni rekordu storage dla %s\n" + +#: src/dird/dird.c:1052 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Storage \"%s\" in %s.\n" +msgstr "" +"Ani \"TLS CA Certificate\", ani \"TLS CA Certificate Dir\" nie zostay " +"zdefiniowane dla demona Przechowywania \"%s\" w %s.\n" + +#: src/dird/dird.c:1068 src/stored/stored.c:421 +#, c-format +msgid "Failed to initialize TLS context for Storage \"%s\" in %s.\n" +msgstr "Nieudana inicjalizacja kontekstu TLS dla Storage \"%s\" w %s.\n" + +#: src/dird/ua_input.c:95 +msgid "Enter slot" +msgstr "Wprowadź slot" + +#: src/dird/ua_input.c:99 src/dird/ua_input.c:105 +#, c-format +msgid "Expected a positive integer, got: %s\n" +msgstr "Oczekiwano dodatniej wartości całkowitej, wprowadzono: %s\n" + +# fuzzy : src/dird/ua_input.c:162 +#: src/dird/ua_input.c:162 +msgid "Invalid response. You must answer yes or no.\n" +msgstr "Nieprawidłowa odpowiedź. Musisz odpowiedzieć 'yes' lub 'no'.\n" + +#: src/dird/ua_input.c:185 +msgid "Invalid Enabled value, it must be yes, no, archived, 0, 1, or 2\n" +msgstr "" + +#: src/dird/ua_input.c:212 +#, c-format +msgid "Illegal character \"%c\" in a comment.\n" +msgstr "Nieprawidłowy znak \"%c\" w komentarzu.\n" + +#: src/dird/ua_input.c:219 +msgid "Comment too long.\n" +msgstr "Komentarz za długi.\n" + +#: src/dird/ua_input.c:225 +msgid "Comment must be at least one character long.\n" +msgstr "Komentarz musi mieć przynajmniej jeden znak.\n" + +#: src/dird/fd_cmds.c:94 +msgid "Client: " +msgstr "Klient: " + +#: src/dird/fd_cmds.c:136 +#, c-format +msgid "File daemon \"%s\" rejected Job command: %s\n" +msgstr "Demona Plików \"%s\" odrzuci komend Job: %s\n" + +#: src/dird/fd_cmds.c:149 +#, c-format +msgid "Error updating Client record. ERR=%s\n" +msgstr "Błąd aktualizacji rekordu Klienta. ERR=%s\n" + +#: src/dird/fd_cmds.c:154 +#, fuzzy, c-format +msgid "FD gave bad response to JobId command: %s\n" +msgstr "filed: write error on socket\n" +msgstr ">filed: bd zapisu na soket\n" + +#: src/dird/fd_cmds.c:509 +#, c-format +msgid "Error running program: %s. ERR=%s\n" +msgstr "Błąd uruchomienia programu: %s. ERR=%s\n" + +#: src/dird/fd_cmds.c:518 +#, c-format +msgid "Cannot open included file: %s. ERR=%s\n" +msgstr "Nie można otworzyć pliku wejciowego FileSet: %s. ERR=%s\n" + +#: src/dird/fd_cmds.c:664 +#, c-format +msgid "Client \"%s\" RunScript failed.\n" +msgstr "Niepowodzenie RunScript'u Klienta \"%s\".\n" + +#: src/dird/fd_cmds.c:699 +#, fuzzy, c-format +msgid "" +" %s: ERR=%s\n" +msgstr "Nie można stworzyć linka symbolicznego %s -> %s: ERR=%s\n" + +#: src/findlib/create_file.c:325 src/findlib/create_file.c:338 +#, fuzzy, c-format +msgid "Could not restore file flags for file %s: ERR=%s\n" +msgstr "Nie można stworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/findlib/create_file.c:329 src/findlib/create_file.c:346 +#, fuzzy, c-format +msgid "Could not hard link %s -> %s: ERR=%s\n" +msgstr "Nie można stworzyć linka twardego %s -> %s: ERR=%s\n" + +#: src/findlib/create_file.c:342 +#, fuzzy, c-format +msgid "Could not reset file flags for file %s: ERR=%s\n" +msgstr "Nie można stworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/findlib/create_file.c:396 +#, fuzzy, c-format +msgid "Original file %s have been deleted: type=%d\n" +msgstr "Nie zapisany oryginalny plik: typ=%s\n" + +#: src/findlib/create_file.c:408 +#, fuzzy, c-format +msgid "Original file %s not saved: type=%d\n" +msgstr "Nie zapisany oryginalny plik: typ=%s\n" + +#: src/findlib/create_file.c:411 +#, c-format +msgid "Unknown file type %d; not restored: %s\n" +msgstr "Nieznany typ pliku %d; nie odtworzono: %s\n" + +#: src/findlib/create_file.c:455 +#, fuzzy, c-format +msgid "Zero length filename: %s\n" +msgstr " Przetworzonych plikw: %s\n" + +#: src/findlib/find.c:212 +#, c-format +msgid "Plugin: \"%s\" not found.\n" +msgstr "Wtyczka: \"%s\" nie znaleziona.\n" + +#: src/findlib/bfile.c:95 +msgid "Unix attributes" +msgstr "Atrybuty Unix" + +#: src/findlib/bfile.c:97 +msgid "File data" +msgstr "Dane pliku" + +#: src/findlib/bfile.c:99 +msgid "MD5 digest" +msgstr "Streszczenie MD5" + +#: src/findlib/bfile.c:101 +msgid "GZIP data" +msgstr "Dane GZIP" + +#: src/findlib/bfile.c:103 +#, fuzzy +msgid "Extended attributes" +msgstr "Atrybuty Unix" + +#: src/findlib/bfile.c:105 +#, fuzzy +msgid "Sparse data" +msgstr "Dane pliku" + +#: src/findlib/bfile.c:107 +msgid "GZIP sparse data" +msgstr "" + +#: src/findlib/bfile.c:109 +msgid "Program names" +msgstr "Nazwy programu" + +#: src/findlib/bfile.c:111 +msgid "Program data" +msgstr "Dane programu" + +#: src/findlib/bfile.c:113 +msgid "SHA1 digest" +msgstr "Streszczenie SHA1" + +#: src/findlib/bfile.c:115 +#, fuzzy +msgid "Win32 data" +msgstr "Dane pliku" + +#: src/findlib/bfile.c:117 +msgid "Win32 GZIP data" +msgstr "Dane Win32 GZIP" + +#: src/findlib/bfile.c:119 +msgid "MacOS Fork data" +msgstr "Dane MacOS Fork" + +#: src/findlib/bfile.c:121 +msgid "HFS+ attribs" +msgstr "Atrybuty HFS+" + +#: src/findlib/bfile.c:123 +#, fuzzy +msgid "Standard Unix ACL attribs" +msgstr "Atrybuty Unix" + +#: src/findlib/bfile.c:125 +#, fuzzy +msgid "Default Unix ACL attribs" +msgstr "Atrybuty Unix" + +#: src/findlib/bfile.c:127 +msgid "SHA256 digest" +msgstr "Streszczenie SHA256" + +#: src/findlib/bfile.c:129 +msgid "SHA512 digest" +msgstr "Streszczenie SHA512" + +#: src/findlib/bfile.c:131 +msgid "Signed digest" +msgstr "Podpisane streszczenie" + +#: src/findlib/bfile.c:133 +#, fuzzy +msgid "Encrypted File data" +msgstr "Dane pliku" + +#: src/findlib/bfile.c:135 +msgid "Encrypted Win32 data" +msgstr "zakodowane dane Win32" + +#: src/findlib/bfile.c:137 +msgid "Encrypted session data" +msgstr "zakodowane dane sesji" + +#: src/findlib/bfile.c:139 +msgid "Encrypted GZIP data" +msgstr "Zakodowane dane GZIP" + +#: src/findlib/bfile.c:141 +msgid "Encrypted Win32 GZIP data" +msgstr "Zakodowane dane Win32 GZIP" + +#: src/findlib/bfile.c:143 +msgid "Encrypted MacOS fork data" +msgstr "Zakodowane dane MacOS fork" + +#: src/findlib/bfile.c:145 +msgid "AIX Specific ACL attribs" +msgstr "Atrybuty specyficzne ACL dla AIX" + +#: src/findlib/bfile.c:147 +msgid "Darwin Specific ACL attribs" +msgstr "Atrybuty specyficzne ACL dla Darwin" + +#: src/findlib/bfile.c:149 +msgid "FreeBSD Specific Default ACL attribs" +msgstr "Domyślne atrybuty specyficzne ACL dla FreeBSD" + +#: src/findlib/bfile.c:151 +msgid "FreeBSD Specific Access ACL attribs" +msgstr "Atrybuty specyficzne dostępu ACL dla FreeBSD" + +#: src/findlib/bfile.c:153 +msgid "HPUX Specific ACL attribs" +msgstr "Atrybuty specyficzne ACL dla HPUX" + +#: src/findlib/bfile.c:155 +msgid "Irix Specific Default ACL attribs" +msgstr "Domyślne atrybuty specyficzne ACL dla Irix" + +#: src/findlib/bfile.c:157 +msgid "Irix Specific Access ACL attribs" +msgstr "Atrybuty specyficzne dostępu ACL dla Irix" + +#: src/findlib/bfile.c:159 +msgid "Linux Specific Default ACL attribs" +msgstr "Domyślne atrybuty specyficzne ACL dla Linux" + +#: src/findlib/bfile.c:161 +msgid "Linux Specific Access ACL attribs" +msgstr "Atrybuty specyficzne dostępu ACL dla Linux" + +#: src/findlib/bfile.c:163 +msgid "OSF1 Specific Default ACL attribs" +msgstr "Domyślne atrybuty specyficzne ACL dla OSF1" + +#: src/findlib/bfile.c:165 +msgid "OSF1 Specific Access ACL attribs" +msgstr "Atrybuty specyficzne dostępu ACL dla Irix" + +#: src/findlib/bfile.c:167 src/findlib/bfile.c:169 +msgid "Solaris Specific ACL attribs" +msgstr "Atrybuty specyficzne ACL dla Solaris" + +#: src/findlib/bfile.c:171 +msgid "OpenBSD Specific Extended attribs" +msgstr "Rozszerzone atrybuty specyficzne dla OpenBSD" + +#: src/findlib/bfile.c:173 +msgid "Solaris Specific Extensible attribs or System Extended attribs" +msgstr "" +"Rozszeżalne atrybuty specyficzne dla Solaris lub rozszerzone " +"atrybutysystemowe" + +#: src/findlib/bfile.c:175 +msgid "Solaris Specific Extended attribs" +msgstr "Rozszerzone atrybuty specyficzne dla Solaris" + +#: src/findlib/bfile.c:177 +msgid "Darwin Specific Extended attribs" +msgstr "Rozszerzone atrybuty specyficzne dla Darvin" + +#: src/findlib/bfile.c:179 +msgid "FreeBSD Specific Extended attribs" +msgstr "Rozszerzone atrybuty specyficzne dla FreeBSD" + +#: src/findlib/bfile.c:181 +msgid "Linux Specific Extended attribs" +msgstr "Rozszerzone atrybuty specyficzne dla Linux" + +#: src/findlib/bfile.c:183 +msgid "NetBSD Specific Extended attribs" +msgstr "Rozszerzone atrybuty specyficzne dla NetBSD" + +#: src/findlib/attribs.c:420 +#, fuzzy, c-format +msgid "File size of restored file %s not correct. Original %s, restored %s.\n" +msgstr "" +"Rozmiar odzyskanego pliku %s nie jest poprawny. Oryginalny %s, odzyskany %" +"s.\n" + +#: src/findlib/attribs.c:446 src/findlib/attribs.c:453 +#, fuzzy, c-format +msgid "Unable to set file owner %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/findlib/attribs.c:459 +#, fuzzy, c-format +msgid "Unable to set file modes %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/findlib/attribs.c:469 +#, fuzzy, c-format +msgid "Unable to set file times %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/findlib/attribs.c:483 +#, fuzzy, c-format +msgid "Unable to set file flags %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/findlib/attribs.c:736 +#, fuzzy, c-format +msgid "Error in %s file %s: ERR=%s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/findlib/attribs.c:753 +#, fuzzy, c-format +msgid "Error in %s: ERR=%s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/console/console.c:127 +#, fuzzy, c-format +msgid "" +"\n" +"Version: " +msgstr "Wersja" + +#: src/console/console.c:179 +msgid "input from file" +msgstr "wkład z pliku" + +#: src/console/console.c:180 +msgid "output to file" +msgstr "dane wyjściowe dla pliku" + +#: src/console/console.c:181 +msgid "quit" +msgstr "wyjdź" + +#: src/console/console.c:182 +msgid "output to file and terminal" +msgstr "dane wyjściowe dla pliku lub terminala" + +#: src/console/console.c:183 +msgid "sleep specified time" +msgstr "określony czas uśpienia" + +#: src/console/console.c:184 +msgid "print current time" +msgstr "wyświetl aktualny czas" + +#: src/console/console.c:185 +msgid "print Console's version" +msgstr "wyświetl wersję konsoli" + +#: src/console/console.c:186 +msgid "echo command string" +msgstr "" + +#: src/console/console.c:187 +msgid "execute an external command" +msgstr "wykonaj polecenie zewnętrzne" + +#: src/console/console.c:188 +msgid "exit = quit" +msgstr "wyjdź" + +#: src/console/console.c:189 +msgid "zed_keys = use zed keys instead of bash keys" +msgstr "zed_keys = użyj kluczy zed zamiast kluczy bash" + +#: src/console/console.c:190 +msgid "help listing" +msgstr "listing pomocy" + +#: src/console/console.c:192 +msgid "set command separator" +msgstr "ustaw separator poleceń" + +#: src/console/console.c:226 +#, fuzzy +msgid ": is an invalid command\n" +msgstr "Za komenda level: %s\n" + +#: src/console/console.c:674 +msgid "Illegal separator character.\n" +msgstr "Nieprawidłowy znak seperatora.\n" + +#: src/console/console.c:702 +msgid "Command logic problem\n" +msgstr "Problem logiki polecenia\n" + +#: src/console/console.c:920 +#, fuzzy, c-format +msgid "Can't find %s in Director list\n" +msgstr "Nie można znale zasobu Director %s\n" + +#: src/console/console.c:928 +msgid "Available Directors:\n" +msgstr "" + +#: src/console/console.c:932 +#, c-format +msgid "%2d: %s at %s:%d\n" +msgstr "%2d: %s w %s:%d\n" + +#: src/console/console.c:936 +msgid "Select Director by entering a number: " +msgstr "" + +#: src/console/console.c:943 +#, c-format +msgid "%s is not a number. You must enter a number between 1 and %d\n" +msgstr "%s nie jest liczbą. Musisz wybrać liczbę między 1 a %d\n" + +#: src/console/console.c:950 +#, c-format +msgid "You must enter a number between 1 and %d\n" +msgstr "Musisz wybrać liczbę między 1 a %d\n" + +#: src/console/console.c:1155 src/wx-console/console_thread.cpp:399 +#: src/qt-console/bcomm/dircomm.cpp:130 +#, fuzzy, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "Nieudana inicjalizacja kontekstu TLS dla Dyrektora \"%s\" w %s.\n" + +#: src/console/console.c:1175 src/wx-console/console_thread.cpp:420 +#: src/qt-console/bcomm/dircomm.cpp:152 +#, fuzzy, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "Nieudana inicjalizacja kontekstu TLS dla Dyrektora \"%s\" w %s.\n" + +#: src/console/console.c:1205 +#, fuzzy +msgid "Enter a period to cancel a command.\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/console/console.c:1299 src/qt-console/main.cpp:220 +#, fuzzy, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" +"Ani \"TLS CA Certificate\", ani \"TLS CA Certificate Dir\" nie s " +"zdefiniowane dla Dyrektora \"%s\" w %s. Co najmniej jedno skadowanie " +"certyfikatu CA jest wymagane kiedy jest używane \"TLS Verify Peer\".\n" + +#: src/console/console.c:1308 src/qt-console/main.cpp:229 +#, fuzzy, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" +"Brak definicji zasobu demona Plików w %s\n" +"Bez tego nie wiem kim jestem :-(\n" + +#: src/console/console.c:1328 src/qt-console/main.cpp:250 +#, fuzzy, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" +"Ani \"TLS CA Certificate\", ani \"TLS CA Certificate Dir\" nie zostay " +"zdefiniowane dla demona Plików w %s.\n" + +#: src/console/console.c:1352 +msgid "Too many arguments on input command.\n" +msgstr "Zbyt wiele argumentów komendy wejściowej.\n" + +#: src/console/console.c:1356 +msgid "First argument to input command must be a filename.\n" +msgstr "Pierwszym argumentem komendy wejściowej musi być nazwa pliku.\n" + +#: src/console/console.c:1362 +#, fuzzy, c-format +msgid "Cannot open file %s for input. ERR=%s\n" +msgstr "Nie można otworzyć pliku wejciowego FileSet: %s. ERR=%s\n" + +#: src/console/console.c:1392 +msgid "Too many arguments on output/tee command.\n" +msgstr "" + +#: src/console/console.c:1409 +#, fuzzy, c-format +msgid "Cannot open file %s for output. ERR=%s\n" +msgstr "Nie można otworzyć pliku wejciowego FileSet: %s. ERR=%s\n" + +#: src/console/console.c:1428 +msgid "Too many arguments. Enclose command in double quotes.\n" +msgstr "Zbyt wiele argumentów. Otocz polecenie podwójnym cudzysłowem.\n" + +#: src/console/console.c:1437 +#, fuzzy, c-format +msgid "Cannot popen(\"%s\", \"r\"): ERR=%s\n" +msgstr " Nie można otworzyć \"%s\": ERR=%s.\n" + +#: src/console/console.c:1449 src/stored/autochanger.c:586 +#, fuzzy, c-format +msgid "Autochanger error: ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/tools/bregex.c:153 src/tools/bregtest.c:137 src/tools/bwild.c:122 +#: patches/testing/bregsed.c:131 +#, fuzzy, c-format +msgid "Could not open data file: %s\n" +msgstr "Nie można otworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/tools/fstype.c:47 +#, c-format +msgid "" +"\n" +"Usage: fstype [-v] path ...\n" +"\n" +" Print the file system type a given file/directory is on.\n" +" The following options are supported:\n" +"\n" +" -v print both path and file system type.\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Użycie: fstype [-v] path ...\n" +"\n" +" Wyświetl rodzaj systemu plików dla wybranego pliku/katalogu.\n" +" Wspierane są następujące opcje:\n" +"\n" +" -v wyświetl ścieżkę i rodzaj systemu plików.\n" +" -? wyświetl tą podpowiedź.\n" +"\n" + +#: src/tools/fstype.c:101 src/tools/drivetype.c:101 +#, fuzzy, c-format +msgid "%s: unknown\n" +msgstr "Nieznany status." + +#: src/tools/drivetype.c:47 +#, c-format +msgid "" +"\n" +"Usage: drivetype [-v] path ...\n" +"\n" +" Print the drive type a given file/directory is on.\n" +" The following options are supported:\n" +"\n" +" -v print both path and file system type.\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Usage: drivetype [-v] path ...\n" +"\n" +" Wyświetl rodzaj napędu dla wybranego pliku/katalogu.\n" +" Wspierane są następujące opcje:\n" +"\n" +" -v wyświetl ścieżkę i rodzaj systemu plików.\n" +" -? wyświetl tą podpowiedź.\n" +"\n" + +#: src/tools/bbatch.c:78 +#, fuzzy, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"Example : bbatch -w /path/to/workdir -h localhost -f dat1 -f dat -f datx\n" +" will start 3 thread and load dat1, dat and datx in your catalog\n" +"See bbatch.c to generate datafile\n" +"\n" +"Usage: bbatch [ options ] -w working/dir -f datafile\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database host (default NULL)\n" +" -w specify working directory\n" +" -r call restore code with given jobids\n" +" -v verbose\n" +" -f specify data file\n" +" -? print this message\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"Przykład : bbatch -w /path/to/workdir -h localhost -f dat1 -f dat -f datx\n" +" uruchomi 3 wątki i ładowanie dat1, dat and datx do twojego katalogu\n" +"Zobacz bbatch.c by wygenerować datafile\n" +"\n" +"Użycie: bbatch [ options ] -w working/dir -f datafile\n" +" -d «nn» ustaw poziom debugging'u na «nn»\n" +" -dt wyświetl czasy w danych wyjściowych debugging'u\n" +" -n «name» określ nazwę bazy danych (domyślnie bacula)\n" +" -u «user» określ nazwę użytkownika bazy danych (domyślnie " +"bacula)\n" +" -P «password określ hasło bazy danych (domyślnie brak)\n" +" -h «host» określ hasta bazy danych (domyślnie NULL)\n" +" -w «working» określ katalog roboczy\n" +" -r «jobids» wywołaj kod przywracania dla podanych jobid\n" +" -v szczegółowe informacje dla użytkownika\n" +" -f «file» określ plik danych\n" +" -? wyświetl tą podpowiedź.\n" +"\n" + +#: src/tools/bbatch.c:181 src/tools/bvfs_test.c:201 src/stored/bcopy.c:163 +#: src/stored/bextract.c:195 src/stored/bscan.c:247 +msgid "Wrong number of arguments: \n" +msgstr "Niepoprawna ilość argumentów: \n" + +#: src/tools/bbatch.c:192 src/tools/bbatch.c:240 src/tools/bvfs_test.c:215 +#: src/stored/bscan.c:301 +#, fuzzy +msgid "Could not init Bacula database\n" +msgstr "Nie można otworzyć bazy danych katalogu \"%s\".\n" + +#: src/tools/bbatch.c:202 +#, c-format +msgid "Computing file list for jobid=%s files=%lld secs=%d\n" +msgstr "" + +#: src/tools/bbatch.c:247 src/tools/bvfs_test.c:224 src/stored/bscan.c:308 +#, c-format +msgid "Using Database: %s, User: %s\n" +msgstr "Wykorzystywana baza danych: %s, Użytkownik: %s\n" + +#: src/tools/bbatch.c:305 +#, fuzzy, c-format +msgid "Error opening datafile %s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/tools/bbatch.c:315 +#, fuzzy +msgid "Error while inserting file\n" +msgstr "Błąd skanowania nagwna rekordu: %s\n" + +#: src/tools/testfind.c:68 +#, fuzzy, c-format +msgid "" +"\n" +"Usage: testfind [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -c specify config file containing FileSet resources\n" +" -f specify which FileSet to use\n" +" -? print this message.\n" +"\n" +"Patterns are used for file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors are always printed.\n" +"Files/paths truncated is the number of files/paths with len > 255.\n" +"Truncation is only in the catalog.\n" +"\n" +msgstr "" +"\n" +"Użycie: testfind [-d debug_level] [-] [pattern1 ...]\n" +" -a wyświetl rozszerzone atrybuty (Win32 debug)\n" +" -d «nn» set debug level to «nn»\n" +" -dt print timestamp in debug output\n" +" -c specify config file containing FileSet resources\n" +" -f specify which FileSet to use\n" +" -? print this message.\n" +"\n" +"Patterns are used for file inclusion -- normally directories.\n" +"Debug level »= 1 prints each file found.\n" +"Debug level »= 10 prints path/file for catalog.\n" +"Errors are always printed.\n" +"Files/paths truncated is the number of files/paths with len » 255.\n" +"Truncation is only in the catalog.\n" +"\n" + +#: src/tools/testfind.c:233 +#, c-format +msgid "" +"\n" +"Total files : %d\n" +"Max file length: %d\n" +"Max path length: %d\n" +"Files truncated: %d\n" +"Paths truncated: %d\n" +"Hard links : %d\n" +msgstr "" + +#: src/tools/testfind.c:274 +#, c-format +msgid "Reg: %s\n" +msgstr "Reg: %s\n" + +#: src/tools/testfind.c:296 +msgid "\t[will not descend: recursion turned off]" +msgstr "" + +#: src/tools/testfind.c:298 +msgid "\t[will not descend: file system change not allowed]" +msgstr "" + +#: src/tools/testfind.c:300 +msgid "\t[will not descend: disallowed file system]" +msgstr "" + +#: src/tools/testfind.c:302 +msgid "\t[will not descend: disallowed drive type]" +msgstr "" + +#: src/tools/testfind.c:318 src/tools/testls.c:215 +#, fuzzy, c-format +msgid "Err: Could not access %s: %s\n" +msgstr " Brak dostpu %s: ERR=%s\n" + +#: src/tools/testfind.c:321 src/tools/testls.c:218 +#, fuzzy, c-format +msgid "Err: Could not follow ff->link %s: %s\n" +msgstr " Nie można poda za linkiem %s: ERR=%s\n" + +#: src/tools/testfind.c:324 src/tools/testls.c:221 +#, fuzzy, c-format +msgid "Err: Could not stat %s: %s\n" +msgstr " Nie można wykona stat %s: ERR=%s\n" + +#: src/tools/testfind.c:327 src/tools/testls.c:224 +#, c-format +msgid "Skip: File not saved. No change. %s\n" +msgstr "Pominiecie: Plik nie zapisany. Brak zmiany. %s\n" + +#: src/tools/testfind.c:330 src/tools/testls.c:227 +#, c-format +msgid "Err: Attempt to backup archive. Not saved. %s\n" +msgstr "Err: Próba backup'u archiwum. Nie zapisano. %s\n" + +#: src/tools/testfind.c:333 src/tools/testls.c:236 +#, fuzzy, c-format +msgid "Err: Could not open directory %s: %s\n" +msgstr " Nie można otworzyć katalogu %s: ERR=%s\n" + +#: src/tools/testfind.c:336 src/tools/testls.c:239 +#, fuzzy, c-format +msgid "Err: Unknown file ff->type %d: %s\n" +msgstr " Nieznany rodziaj pliku %d: %s\n" + +#: src/tools/testfind.c:386 +#, c-format +msgid "===== Filename truncated to 255 chars: %s\n" +msgstr "===== Nazwa pliku skrócona do 255 znaków: %s\n" + +#: src/tools/testfind.c:403 +#, c-format +msgid "========== Path name truncated to 255 chars: %s\n" +msgstr "========== Nazwa ścieżki skrócona do 255 znaków: %s\n" + +#: src/tools/testfind.c:412 +#, c-format +msgid "========== Path length is zero. File=%s\n" +msgstr "========== Zerowa długość ścieżki. Plik=%s\n" + +#: src/tools/testfind.c:415 +#, c-format +msgid "Path: %s\n" +msgstr "Ścieżka: %s\n" + +#: src/tools/dbcheck.c:207 +msgid "OK - DB backend seems to be thread-safe.\n" +msgstr "" + +#: src/tools/dbcheck.c:215 +msgid "" +"Warning skipping the additional parameters for working directory/dbname/user/" +"password/host.\n" +msgstr "" + +#: src/tools/dbcheck.c:232 +#, c-format +msgid "Error can not find the Catalog name[%s] in the given config file [%s]\n" +msgstr "" +"ERR: W podanym pliku konfiguracyjnym [%s] nie można odnaleźć Katalogu o " +"nazwie [%s]\n" + +#: src/tools/dbcheck.c:234 +#, c-format +msgid "Error there is no Catalog section in the given config file [%s]\n" +msgstr "ERR: w podanym pliku konfiguracyjnym [%s] brakuje sekcji Katalogu\n" + +#: src/tools/dbcheck.c:243 +#, fuzzy +msgid "Error no Director resource defined.\n" +msgstr "Brak definicji zasobu Dyrektora w %s\n" + +#: src/tools/dbcheck.c:268 +msgid "Wrong number of arguments.\n" +msgstr "Niepoprawna ilość argumentów.\n" + +#: src/tools/dbcheck.c:273 +#, fuzzy +msgid "Working directory not supplied.\n" +msgstr "Szyfrowanie rzadkich danych nie jest wspierane.\n" + +#: src/tools/dbcheck.c:307 +msgid "Database port must be a numeric value.\n" +msgstr "Port bazy danych musi mieć wartość numeryczną.\n" + +#: src/tools/dbcheck.c:310 +msgid "Database port must be a int value.\n" +msgstr "Port bazy danych musi mieć wartość całkowitą.\n" + +#: src/tools/dbcheck.c:352 +#, c-format +msgid "Hello, this is the database check/correct program.\n" +msgstr "Witam, to jest program sprawdzający poprawność bazy danych.\n" + +#: src/tools/dbcheck.c:354 +#, c-format +msgid "Modify database is on." +msgstr "Modyfikacja bazy danych w trakcie." + +#: src/tools/dbcheck.c:356 +#, c-format +msgid "Modify database is off." +msgstr "Modyfikacja bazy danych wyłączona." + +#: src/tools/dbcheck.c:358 src/tools/dbcheck.c:419 +#, fuzzy, c-format +msgid " Verbose is on.\n" +msgstr "Wersja" + +#: src/tools/dbcheck.c:360 src/tools/dbcheck.c:421 +#, fuzzy, c-format +msgid " Verbose is off.\n" +msgstr "Wersja" + +#: src/tools/dbcheck.c:362 +#, c-format +msgid "Please select the function you want to perform.\n" +msgstr "Proszę wybrać funkcję którą chcesz wykonać.\n" + +#: src/tools/dbcheck.c:366 +#, c-format +msgid "" +"\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Repair bad Filename records\n" +" 4) Repair bad Path records\n" +" 5) Eliminate duplicate Filename records\n" +" 6) Eliminate duplicate Path records\n" +" 7) Eliminate orphaned Jobmedia records\n" +" 8) Eliminate orphaned File records\n" +" 9) Eliminate orphaned Path records\n" +" 10) Eliminate orphaned Filename records\n" +" 11) Eliminate orphaned FileSet records\n" +" 12) Eliminate orphaned Client records\n" +" 13) Eliminate orphaned Job records\n" +" 14) Eliminate all Admin records\n" +" 15) Eliminate all Restore records\n" +" 16) All (3-15)\n" +" 17) Quit\n" +msgstr "" + +#: src/tools/dbcheck.c:385 +#, c-format +msgid "" +"\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Check for bad Filename records\n" +" 4) Check for bad Path records\n" +" 5) Check for duplicate Filename records\n" +" 6) Check for duplicate Path records\n" +" 7) Check for orphaned Jobmedia records\n" +" 8) Check for orphaned File records\n" +" 9) Check for orphaned Path records\n" +" 10) Check for orphaned Filename records\n" +" 11) Check for orphaned FileSet records\n" +" 12) Check for orphaned Client records\n" +" 13) Check for orphaned Job records\n" +" 14) Check for all Admin records\n" +" 15) Check for all Restore records\n" +" 16) All (3-15)\n" +" 17) Quit\n" +msgstr "" + +#: src/tools/dbcheck.c:405 +msgid "Select function number: " +msgstr "Wybierz numer funkcji: " + +#: src/tools/dbcheck.c:412 +#, c-format +msgid "Database will be modified.\n" +msgstr "Baza danych zostanie zmodyfikowana.\n" + +#: src/tools/dbcheck.c:414 +#, c-format +msgid "Database will NOT be modified.\n" +msgstr "Baza danych NIE zostanie zmodyfikowana.\n" + +#: src/tools/dbcheck.c:504 +#, c-format +msgid "JobId=%s Name=\"%s\" StartTime=%s\n" +msgstr "JobId=%s Nazwa=\"%s\" StartTime=%s\n" + +#: src/tools/dbcheck.c:512 +#, c-format +msgid "Orphaned JobMediaId=%s JobId=%s Volume=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:519 +#, c-format +msgid "Orphaned FileId=%s JobId=%s Volume=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:526 +#, c-format +msgid "Orphaned FileSetId=%s FileSet=\"%s\" MD5=%s\n" +msgstr "" + +#: src/tools/dbcheck.c:533 +#, c-format +msgid "Orphaned ClientId=%s Name=\"%s\"\n" +msgstr "" + +#: src/tools/dbcheck.c:587 +#, c-format +msgid "Deleting: %s\n" +msgstr "Usuwanie: %s\n" + +#: src/tools/dbcheck.c:661 +#, c-format +msgid "Checking for duplicate Filename entries.\n" +msgstr "Sprawdzanie istnienia duplikatów nazw plików.\n" + +#: src/tools/dbcheck.c:670 +#, fuzzy, c-format +msgid "Found %d duplicate Filename records.\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/tools/dbcheck.c:671 +msgid "Print the list? (yes/no): " +msgstr "" + +#: src/tools/dbcheck.c:690 src/tools/dbcheck.c:748 +#, c-format +msgid "Found %d for: %s\n" +msgstr "Znaleziono %d for: %s\n" + +#: src/tools/dbcheck.c:718 +#, c-format +msgid "Checking for duplicate Path entries.\n" +msgstr "Sprawdzanie istnienia duplikatów nazw ścieżek.\n" + +#: src/tools/dbcheck.c:728 +#, c-format +msgid "Found %d duplicate Path records.\n" +msgstr "Znaleziono %d wystąpień duplikatów ścieżek.\n" + +#: src/tools/dbcheck.c:729 src/tools/dbcheck.c:783 src/tools/dbcheck.c:827 +#: src/tools/dbcheck.c:879 src/tools/dbcheck.c:929 src/tools/dbcheck.c:970 +#: src/tools/dbcheck.c:1011 src/tools/dbcheck.c:1052 src/tools/dbcheck.c:1090 +#: src/tools/dbcheck.c:1123 src/tools/dbcheck.c:1160 src/tools/dbcheck.c:1224 +msgid "Print them? (yes/no): " +msgstr "" + +#: src/tools/dbcheck.c:776 +#, c-format +msgid "Checking for orphaned JobMedia entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:782 +#, fuzzy, c-format +msgid "Found %d orphaned JobMedia records.\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/tools/dbcheck.c:800 +#, fuzzy, c-format +msgid "Deleting %d orphaned JobMedia records.\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/tools/dbcheck.c:817 +#, c-format +msgid "Checking for orphaned File entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:826 +#, fuzzy, c-format +msgid "Found %d orphaned File records.\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/tools/dbcheck.c:843 +#, fuzzy, c-format +msgid "Deleting %d orphaned File records.\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/tools/dbcheck.c:859 src/tools/dbcheck.c:909 +msgid "Create temporary index? (yes/no): " +msgstr "Czy utworzyć tymczasowy index? (yes/no)" + +#: src/tools/dbcheck.c:869 +#, c-format +msgid "Checking for orphaned Path entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:878 +#, c-format +msgid "Found %d orphaned Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:891 +#, c-format +msgid "Deleting %d orphaned Path records.\n" +msgstr "" + +#: src/tools/dbcheck.c:919 +#, c-format +msgid "Checking for orphaned Filename entries. This may take some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:928 +#, fuzzy, c-format +msgid "Found %d orphaned Filename records.\n" +msgstr "Nie można pobra lub stworzyć rekordu FileSet.\n" + +#: src/tools/dbcheck.c:941 +#, c-format +msgid "Deleting %d orphaned Filename records.\n" +msgstr "" + +#: src/tools/dbcheck.c:959 +#, c-format +msgid "Checking for orphaned FileSet entries. This takes some time!\n" +msgstr "" + +#: src/tools/dbcheck.c:969 +#, fuzzy, c-format +msgid "Found %d orphaned FileSet records.\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/tools/dbcheck.c:984 +#, fuzzy, c-format +msgid "Deleting %d orphaned FileSet records.\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/tools/dbcheck.c:993 +#, c-format +msgid "Checking for orphaned Client entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:1010 +#, fuzzy, c-format +msgid "Found %d orphaned Client records.\n" +msgstr "Nie można stworzyć rekordu Klienta. ERR=%s\n" + +#: src/tools/dbcheck.c:1025 +#, fuzzy, c-format +msgid "Deleting %d orphaned Client records.\n" +msgstr "Nie można stworzyć rekordu Klienta. ERR=%s\n" + +#: src/tools/dbcheck.c:1034 +#, c-format +msgid "Checking for orphaned Job entries.\n" +msgstr "" + +#: src/tools/dbcheck.c:1051 +#, fuzzy, c-format +msgid "Found %d orphaned Job records.\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/tools/dbcheck.c:1066 +#, c-format +msgid "Deleting %d orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1068 +#, c-format +msgid "Deleting JobMedia records of orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1070 +#, c-format +msgid "Deleting Log records of orphaned Job records.\n" +msgstr "" + +#: src/tools/dbcheck.c:1080 +#, c-format +msgid "Checking for Admin Job entries.\n" +msgstr "Weryfikacja wystąpień wpisów Zadań Admin'a\n" + +#: src/tools/dbcheck.c:1089 +#, c-format +msgid "Found %d Admin Job records.\n" +msgstr "Znaleziono %d wpisów zadań Admin'a\n" + +#: src/tools/dbcheck.c:1104 +#, c-format +msgid "Deleting %d Admin Job records.\n" +msgstr "Usuwanie %d wpisów zadań Admina.\n" + +#: src/tools/dbcheck.c:1113 +#, c-format +msgid "Checking for Restore Job entries.\n" +msgstr "Weryfikacja wystąpień wpisów Zadań Przywrócenia.\n" + +#: src/tools/dbcheck.c:1122 +#, fuzzy, c-format +msgid "Found %d Restore Job records.\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/tools/dbcheck.c:1137 +#, c-format +msgid "Deleting %d Restore Job records.\n" +msgstr "Usuwanie %d wpisów zadań Przywrócenia.\n" + +#: src/tools/dbcheck.c:1150 +#, c-format +msgid "Checking for Filenames with a trailing slash\n" +msgstr "Wyszukiwanie nazw plików z ukośnikiem zamykającym\n" + +#: src/tools/dbcheck.c:1159 +#, c-format +msgid "Found %d bad Filename records.\n" +msgstr "Znaleziono %d wystąpień błędnych nazw plików.\n" + +#: src/tools/dbcheck.c:1177 src/tools/dbcheck.c:1240 +#, c-format +msgid "Reparing %d bad Filename records.\n" +msgstr "Naprawa %d wystąpień błędnych nazw plików.\n" + +#: src/tools/dbcheck.c:1214 +#, c-format +msgid "Checking for Paths without a trailing slash\n" +msgstr "Wyszukiwanie ścieżek bez ukośnika zamykającego\n" + +#: src/tools/dbcheck.c:1223 +#, c-format +msgid "Found %d bad Path records.\n" +msgstr "Znaleziono %d wystąpień błędnych ścieżek.\n" + +#: src/tools/dbcheck.c:1377 +#, c-format +msgid "" +"Ok. Index over the %s column already exists and dbcheck will work faster.\n" +msgstr "" + +#: src/tools/dbcheck.c:1380 +#, c-format +msgid "" +"Note. Index over the %s column not found, that can greatly slow down " +"dbcheck.\n" +msgstr "" + +#: src/tools/dbcheck.c:1396 +#, c-format +msgid "Create temporary index... This may take some time!\n" +msgstr "Utwórz indeks tymczasowy... To może chwilę potrwać!\n" + +#: src/tools/dbcheck.c:1404 +#, c-format +msgid "Temporary index created.\n" +msgstr "Indeks tymczasowy utworzony.\n" + +#: src/tools/dbcheck.c:1419 +#, c-format +msgid "Drop temporary index.\n" +msgstr "Usuń indeks tymczasowy.\n" + +#: src/tools/dbcheck.c:1429 +#, c-format +msgid "Temporary index %s deleted.\n" +msgstr "Indeks tymczasowy %s usuniety.\n" + +#: src/tools/bvfs_test.c:55 +#, fuzzy, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database host (default NULL)\n" +" -w specify working directory\n" +" -j specify jobids\n" +" -p specify path\n" +" -f specify file\n" +" -l maximum tuple to fetch\n" +" -T truncate cache table before starting\n" +" -v verbose\n" +" -? print this message\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"Przykład : bbatch -w /path/to/workdir -h localhost -f dat1 -f dat -f datx\n" +" uruchomi 3 wątki i ładowanie dat1, dat and datx do twojego katalogu\n" +"Zobacz bbatch.c by wygenerować datafile\n" +"\n" +"Użycie: bbatch [ options ] -w working/dir -f datafile\n" +" -d «nn» ustaw poziom debugging'u na «nn»\n" +" -dt wyświetl czasy w danych wyjściowych debugging'u\n" +" -n «name» określ nazwę bazy danych (domyślnie bacula)\n" +" -u «user» określ nazwę użytkownika bazy danych (domyślnie " +"bacula)\n" +" -P «password określ hasło bazy danych (domyślnie brak)\n" +" -h «host» określ hasta bazy danych (domyślnie NULL)\n" +" -w «working» określ katalog roboczy\n" +" -r «jobids» wywołaj kod przywracania dla podanych jobid\n" +" -v szczegółowe informacje dla użytkownika\n" +" -f «file» określ plik danych\n" +" -? wyświetl tą podpowiedź.\n" +"\n" + +#: src/tools/bsmtp.c:145 +#, c-format +msgid "Fatal malformed reply from %s: %s\n" +msgstr "" + +#: src/tools/bsmtp.c:153 +#, fuzzy, c-format +msgid "Fatal fgets error: ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/tools/bsmtp.c:186 +#, fuzzy, c-format +msgid "" +"\n" +"Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +" -8 set charset to UTF-8\n" +" -c set the Cc: field\n" +" -d set debug level to \n" +" -dt print a timestamp in debug output\n" +" -f set the From: field\n" +" -h use mailhost:port as the SMTP server\n" +" -s set the Subject: field\n" +" -r set the Reply-To: field\n" +" -l set the maximum number of lines to send (default: " +"unlimited)\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"\n" +"Użycie: dird [-f -s] [-c config_file] [-d debug_level] [config_file]\n" +" -c uyj jako pliku konfiguracyjnego\n" +" -d ustaw poziom debugingu na \n" +" -dt wyświetl znacznik czasu podczas wywietlania debugingu\n" +" -f uruchom na pierwszym planie (dla debugingu)\n" +" -g identyfikator grupy\n" +" -m wyświetl informacje kaboom (dla debugingu)\n" +" -r uruchom teraz zadanie \n" +" -s brak sygnaw\n" +" -t test - odczytaj plik konfiguracji i zakocz\n" +" -u identyfikator uytkownika\n" +" -v gadatliwe komunikaty uytkownika\n" +" -? wyświetl ten komunikat.\n" +"\n" + +#: src/tools/bsmtp.c:343 +msgid "Fatal error: no recipient given.\n" +msgstr "" + +#: src/tools/bsmtp.c:372 +#, fuzzy, c-format +msgid "Fatal gethostname error: ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/tools/bsmtp.c:376 +#, fuzzy, c-format +msgid "Fatal gethostbyname for myself failed \"%s\": ERR=%s\n" +msgstr "Błąd w acltostr na pliku \"%s\": ERR=%s\n" + +#: src/tools/bsmtp.c:412 +#, fuzzy, c-format +msgid "Error unknown mail host \"%s\": ERR=%s\n" +msgstr "Błąd w getacl na pliku \"%s\": ERR=%s\n" + +#: src/tools/bsmtp.c:415 +msgid "Retrying connection using \"localhost\".\n" +msgstr "" + +#: src/tools/bsmtp.c:423 +#, c-format +msgid "Fatal error: Unknown address family for smtp host: %d\n" +msgstr "" + +#: src/tools/bsmtp.c:432 src/tools/bsmtp.c:437 +#, fuzzy, c-format +msgid "Fatal socket error: ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/tools/bsmtp.c:442 +#, fuzzy, c-format +msgid "Fatal connect error to %s: ERR=%s\n" +msgstr "Błąd w pathconf na pliku \"%s\": ERR=%s\n" + +#: src/tools/bsmtp.c:450 +#, fuzzy, c-format +msgid "Fatal _open_osfhandle error: ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/tools/bsmtp.c:457 src/tools/bsmtp.c:461 src/tools/bsmtp.c:470 +#: src/tools/bsmtp.c:474 +#, fuzzy, c-format +msgid "Fatal fdopen error: ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/tools/bsmtp.c:466 +#, fuzzy, c-format +msgid "Fatal dup error: ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/tools/testls.c:59 +#, fuzzy, c-format +msgid "" +"\n" +"Usage: testls [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -e specify file of exclude patterns\n" +" -i specify file of include patterns\n" +" -q quiet, don't print filenames (debug)\n" +" - read pattern(s) from stdin\n" +" -? print this message.\n" +"\n" +"Patterns are file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors always printed.\n" +"Files/paths truncated is number with len > 255.\n" +"Truncation is only in catalog.\n" +"\n" +msgstr "" +"\n" +"Użycie: testfind [-d debug_level] [-] [pattern1 ...]\n" +" -a wyświetl rozszerzone atrybuty (Win32 debug)\n" +" -d «nn» set debug level to «nn»\n" +" -dt print timestamp in debug output\n" +" -c specify config file containing FileSet resources\n" +" -f specify which FileSet to use\n" +" -? print this message.\n" +"\n" +"Patterns are used for file inclusion -- normally directories.\n" +"Debug level »= 1 prints each file found.\n" +"Debug level »= 10 prints path/file for catalog.\n" +"Errors are always printed.\n" +"Files/paths truncated is the number of files/paths with len » 255.\n" +"Truncation is only in the catalog.\n" +"\n" + +#: src/tools/testls.c:155 +#, fuzzy, c-format +msgid "Could not open include file: %s\n" +msgstr "Nie można ustawi pola Finder Info na %s\n" + +#: src/tools/testls.c:168 +#, fuzzy, c-format +msgid "Could not open exclude file: %s\n" +msgstr "Nie można otworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/tools/testls.c:182 +#, c-format +msgid "Files seen = %d\n" +msgstr "" + +#: src/tools/testls.c:230 +#, fuzzy, c-format +msgid "Recursion turned off. Directory not entered. %s\n" +msgstr " Rekursja wyczona. Ominito katalog: %s\n" + +#: src/tools/testls.c:233 +#, fuzzy, c-format +msgid "Skip: File system change prohibited. Directory not entered. %s\n" +msgstr " Zabroniona zmiana filesystemu. Ominito katalog: %s\n" + +#: src/stored/bcopy.c:76 +#, fuzzy, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bcopy [-d debug_level] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify a Storage configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -i specify input Volume names (separated by |)\n" +" -o specify output Volume names (separated by |)\n" +" -p proceed inspite of errors\n" +" -v verbose\n" +" -w specify working directory (default /tmp)\n" +" -? print this message\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"\n" +"Użycie: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c uyj jako pliku konfiguracyjnego\n" +" -d ustaw poziom debugingu na \n" +" -dt wyświetl znacznik czasowu podczas wywietlania debugingu\n" +" -f uruchom na pierwszym planie (dla debugingu)\n" +" -g identyfikator grupy\n" +" -k keep readall capabilities\n" +" -m wyświetl informacje kaboom (dla debugingu)\n" +" -s brak sygnałów (dla debugingu)\n" +" -t przetestuj plik konfiguracji i zakocz\n" +" -u identyfikator uytkownika\n" +" -v gadatliwe komunikaty uytkownika\n" +" -? wyświetl ten komunikat.\n" +"\n" + +#: src/stored/bcopy.c:204 src/stored/device.c:298 src/stored/btape.c:472 +#, fuzzy, c-format +msgid "dev open failed: %s\n" +msgstr " Przetworzonych plikw: %s\n" + +#: src/stored/bcopy.c:219 +#, fuzzy +msgid "Write of last block failed.\n" +msgstr "Nieudane odszyfrowanie klucza sesji.\n" + +#: src/stored/bcopy.c:223 +#, c-format +msgid "%u Jobs copied. %u records copied.\n" +msgstr "Skopiowano %u Jobów. Skopiowano %u rekordów.\n" + +#: src/stored/bcopy.c:240 src/stored/bscan.c:422 +#, c-format +msgid "Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n" +msgstr "Rekord: SessId=%u SessTim=%u FileIndex=%d Strumień=%d długość=%u\n" + +#: src/stored/bcopy.c:256 +msgid "Volume is prelabeled. This volume cannot be copied.\n" +msgstr "" +"wolumen ma nadaną wcześniej etykietę. Ten wolumen nie moze być skopiowany.\n" + +#: src/stored/bcopy.c:259 +msgid "Volume label not copied.\n" +msgstr "Etykieta wolumenu nie została skopiowana.\n" + +#: src/stored/bcopy.c:265 +msgid "Copy skipped. Record does not match BSR filter.\n" +msgstr "Kopiowanie pominięte. Wpis nie odpowiada filtrowi BSR.\n" + +#: src/stored/bcopy.c:282 src/stored/bcopy.c:290 src/stored/bcopy.c:318 +#: src/stored/btape.c:2743 +#, fuzzy, c-format +msgid "Cannot fixup device error. %s\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/stored/bcopy.c:296 +msgid "EOM label not copied.\n" +msgstr "Etykieta EOM nie została skopiowana.\n" + +#: src/stored/bcopy.c:299 +msgid "EOT label not copied.\n" +msgstr "Etykieta EOT nie została skopiowana.\n" + +#: src/stored/bcopy.c:332 src/stored/read_record.c:399 src/stored/bls.c:423 +#, fuzzy +msgid "Fresh Volume Label" +msgstr "Wolumen do Katalogu" + +#: src/stored/bcopy.c:335 src/stored/read_record.c:402 src/stored/bls.c:426 +#, fuzzy +msgid "Volume Label" +msgstr "Wolumen do Katalogu" + +#: src/stored/bcopy.c:339 src/stored/label.c:1031 src/stored/bls.c:430 +msgid "Begin Job Session" +msgstr "Rozpocznij Sesję Zadania" + +#: src/stored/bcopy.c:343 src/stored/label.c:1034 src/stored/bls.c:435 +msgid "End Job Session" +msgstr "Zakończ Sesję Zadania" + +#: src/stored/bcopy.c:348 src/stored/bls.c:439 +msgid "End of Medium" +msgstr "Koniec Medium" + +#: src/stored/bcopy.c:351 src/stored/label.c:1043 src/stored/bls.c:451 +#, fuzzy +msgid "Unknown" +msgstr "Nieznany status." + +#: src/stored/bcopy.c:357 src/stored/read_record.c:420 src/stored/bls.c:458 +#, c-format +msgid "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n" +msgstr "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n" + +#: src/stored/bcopy.c:375 src/stored/btape.c:3037 src/stored/bextract.c:513 +#: src/stored/bscan.c:1327 src/stored/bls.c:477 +#, c-format +msgid "Mount Volume \"%s\" on device %s and press return when ready: " +msgstr "" +"Zamontuj Wolumen \"%s\" w urządzeniu %s i kiedy gotowe naciśnij Enter: " + +#: src/stored/ansi_label.c:96 +#, fuzzy, c-format +msgid "Read error on device %s in ANSI label. ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/ansi_label.c:106 +msgid "Insane! End of tape while reading ANSI label.\n" +msgstr "Szaleństwo! Koniec taśmy podczas odczytu etykiety ANSI.\n" + +#: src/stored/ansi_label.c:132 +msgid "No VOL1 label while reading ANSI/IBM label.\n" +msgstr "Brak etykiety VOL1 podczas odczytu etykiety ANSI/IBM.\n" + +#: src/stored/ansi_label.c:155 +#, fuzzy, c-format +msgid "Wanted ANSI Volume \"%s\" got \"%s\"\n" +msgstr "Nie mogę przyci wolumenu \"%s\"\n" + +#: src/stored/ansi_label.c:166 +msgid "No HDR1 label while reading ANSI label.\n" +msgstr "Brak etykiety HDR1 podczas odczytu etykiety ANSI.\n" + +#: src/stored/ansi_label.c:172 +#, c-format +msgid "ANSI/IBM Volume \"%s\" does not belong to Bacula.\n" +msgstr "Wolumen ANSI/IBM \"%s\" nie należy do Bacula.\n" + +#: src/stored/ansi_label.c:184 +msgid "No HDR2 label while reading ANSI/IBM label.\n" +msgstr "Brak etykiety HDR2 podczas odczytu etykiety ANSI/IBM.\n" + +#: src/stored/ansi_label.c:199 +msgid "Unknown or bad ANSI/IBM label record.\n" +msgstr "Nieznana etykieta ANSI/IBM lub jej błędny zapis.\n" + +#: src/stored/ansi_label.c:207 +msgid "Too many records in while reading ANSI/IBM label.\n" +msgstr "Zbyt wiele rekordów podczas odczytu etykiety ANSI/IBM.\n" + +#: src/stored/ansi_label.c:307 +#, c-format +msgid "ANSI Volume label name \"%s\" longer than 6 chars.\n" +msgstr "Etykieta Wolumenu ANSI \"%s\" dłuższa niż 6 znaków.\n" + +#: src/stored/ansi_label.c:333 +#, fuzzy, c-format +msgid "Could not write ANSI VOL1 label. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/ansi_label.c:371 src/stored/ansi_label.c:400 +#, fuzzy, c-format +msgid "Could not write ANSI HDR1 label. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/ansi_label.c:376 src/stored/ansi_label.c:407 +#, fuzzy +msgid "Could not write ANSI HDR1 label.\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/ansi_label.c:412 +#, fuzzy, c-format +msgid "Error writing EOF to tape. ERR=%s" +msgstr "Błąd w wysyaniu Hello do demona Przechowywania. ERR=%s\n" + +#: src/stored/ansi_label.c:417 +msgid "write_ansi_ibm_label called for non-ANSI/IBM type\n" +msgstr "write_ansi_ibm_label wywołuje typ non-ANSI/IBM\n" + +#: src/stored/label.c:95 +#, fuzzy, c-format +msgid "Couldn't rewind device %s: ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/label.c:112 src/stored/label.c:204 +#, c-format +msgid "Wrong Volume mounted on device %s: Wanted %s have %s\n" +msgstr "" +"Na urządzeniu %s zamontowano niepoprawny wolumen: żądano %s otrzymano %s\n" + +#: src/stored/label.c:115 src/stored/label.c:192 +#, fuzzy, c-format +msgid "Too many tries: %s" +msgstr "Zbyt duo elementw w zasobie %s\n" + +#: src/stored/label.c:132 +#, c-format +msgid "" +"Requested Volume \"%s\" on %s is not a Bacula labeled Volume, because: ERR=%s" +msgstr "" +"Wymagany Wolumen \"%s\" na %s nie jest Wolumenem z etykietą Bacula, " +"ponieważ: ERR=%s" + +#: src/stored/label.c:137 +#, fuzzy +msgid "Could not read Volume label from block.\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/label.c:140 +#, fuzzy, c-format +msgid "Could not unserialize Volume label: ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/label.c:145 +#, c-format +msgid "Volume Header Id bad: %s\n" +msgstr "Zły Header Id: %s\n" + +#: src/stored/label.c:177 +#, c-format +msgid "Volume on %s has wrong Bacula version. Wanted %d got %d\n" +msgstr "Wolumen na %s na złą wersję Bacula. Oczekiwano %d, otrzymano %d\n" + +#: src/stored/label.c:188 +#, c-format +msgid "Volume on %s has bad Bacula label type: %x\n" +msgstr "Wolumen na %s ma zły typ etykiety Bacula: %x\n" + +#: src/stored/label.c:238 src/stored/label.c:400 src/stored/mount.c:462 +#, fuzzy, c-format +msgid "Could not reserve volume %s on %s\n" +msgstr "Nie można ustawi pola Finder Info na %s\n" + +#: src/stored/label.c:278 +#, c-format +msgid "Cannot write Volume label to block for device %s\n" +msgstr "Nie można zapisać etykiety Wolumenu do bloku na urządzeniu %s\n" + +#: src/stored/label.c:330 src/stored/label.c:430 src/stored/mount.c:220 +#, fuzzy, c-format +msgid "Open device %s Volume \"%s\" failed: ERR=%s\n" +msgstr "Błąd w dostarczeniu komunikatu: nieudane fopen %s: ERR=%s\n" + +#: src/stored/label.c:455 +#, fuzzy, c-format +msgid "Rewind error on device %s: ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/label.c:463 +#, fuzzy, c-format +msgid "Truncate error on device %s: ERR=%s\n" +msgstr "Błąd w acltotext na pliku \"%s\": ERR=%s\n" + +#: src/stored/label.c:469 +#, fuzzy, c-format +msgid "Failed to re-open DVD after truncate on device %s: ERR=%s\n" +msgstr "Nie mogę otworzyć przestrzeni xattr na pliku \"%s\": ERR=%s\n" + +#: src/stored/label.c:492 +#, fuzzy, c-format +msgid "Unable to write device %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/label.c:522 +#, c-format +msgid "Recycled volume \"%s\" on device %s, all previous data lost.\n" +msgstr "" +"Usunięty Wolumen \"%s\" na urządzeniu %s, wszystkie wcześniejsze dane " +"utracone.\n" + +#: src/stored/label.c:525 +#, c-format +msgid "Wrote label to prelabeled Volume \"%s\" on device %s\n" +msgstr "" +"Zapisano etykietę na wolumenie z wcześniejszą etykietą \"%s\". Urządzenie %" +"s\n" + +#: src/stored/label.c:724 +#, c-format +msgid "Bad Volume session label = %d\n" +msgstr "Błędna etykieta sesji Wolumenu = %d\n" + +#: src/stored/label.c:779 +#, c-format +msgid "Expecting Volume Label, got FI=%s Stream=%s len=%d\n" +msgstr "Oczekiwano etykiety Wolumenu, otrzymano FI=%s Stream=%s len=%d\n" + +#: src/stored/label.c:906 +#, fuzzy, c-format +msgid "Unknown %d" +msgstr "Nieznany status." + +#: src/stored/label.c:910 +#, c-format +msgid "" +"\n" +"Volume Label:\n" +"Id : %sVerNo : %d\n" +"VolName : %s\n" +"PrevVolName : %s\n" +"VolFile : %d\n" +"LabelType : %s\n" +"LabelSize : %d\n" +"PoolName : %s\n" +"MediaType : %s\n" +"PoolType : %s\n" +"HostName : %s\n" +msgstr "" +"\n" +"Etykieta Wolumenu:\n" +"Id : %sVerNo : %d\n" +"VolName : %s\n" +"PrevVolName : %s\n" +"VolFile : %d\n" +"LabelType : %s\n" +"LabelSize : %d\n" +"PoolName : %s\n" +"MediaType : %s\n" +"PoolType : %s\n" +"HostName : %s\n" + +#: src/stored/label.c:932 +#, c-format +msgid "Date label written: %s\n" +msgstr "Etykieta daty zapisana: %s\n" + +#: src/stored/label.c:938 +#, c-format +msgid "Date label written: %04d-%02d-%02d at %02d:%02d\n" +msgstr "Etykieta daty zapisana: %04d-%02d-%02d przy %02d:%02d\n" + +#: src/stored/label.c:958 +#, c-format +msgid "" +"\n" +"%s Record:\n" +"JobId : %d\n" +"VerNum : %d\n" +"PoolName : %s\n" +"PoolType : %s\n" +"JobName : %s\n" +"ClientName : %s\n" +msgstr "" +"\n" +"%s Record:\n" +"JobId : %d\n" +"VerNum : %d\n" +"PoolName : %s\n" +"PoolType : %s\n" +"JobName : %s\n" +"ClientName : %s\n" + +#: src/stored/label.c:971 +#, c-format +msgid "" +"Job (unique name) : %s\n" +"FileSet : %s\n" +"JobType : %c\n" +"JobLevel : %c\n" +msgstr "" +"Job (unique name) : %s\n" +"FileSet : %s\n" +"JobType : %c\n" +"JobLevel : %c\n" + +#: src/stored/label.c:980 +#, c-format +msgid "" +"JobFiles : %s\n" +"JobBytes : %s\n" +"StartBlock : %s\n" +"EndBlock : %s\n" +"StartFile : %s\n" +"EndFile : %s\n" +"JobErrors : %s\n" +"JobStatus : %c\n" +msgstr "" +"JobFiles : %s\n" +"JobBytes : %s\n" +"StartBlock : %s\n" +"EndBlock : %s\n" +"StartFile : %s\n" +"EndFile : %s\n" +"JobErrors : %s\n" +"JobStatus : %c\n" + +#: src/stored/label.c:1001 +#, c-format +msgid "Date written : %s\n" +msgstr "Data zapisana : %s\n" + +#: src/stored/label.c:1006 +#, c-format +msgid "Date written : %04d-%02d-%02d at %02d:%02d\n" +msgstr "Data zapisana : %04d-%02d-%02d at %02d:%02d\n" + +#: src/stored/label.c:1025 +msgid "Fresh Volume" +msgstr "Nowy Wolumen" + +#: src/stored/label.c:1028 +#, fuzzy +msgid "Volume" +msgstr "Wolumen do Katalogu" + +#: src/stored/label.c:1037 src/stored/read_record.c:413 +msgid "End of Media" +msgstr "Koniec Medium" + +#: src/stored/label.c:1040 +msgid "End of Tape" +msgstr "Koniec Taśmy" + +#: src/stored/label.c:1060 src/stored/label.c:1068 src/stored/label.c:1101 +#, c-format +msgid "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n" +msgstr "Rekord %s : File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n" + +#: src/stored/label.c:1065 +msgid "End of physical tape.\n" +msgstr "Fizyczny koniec taśmy.\n" + +#: src/stored/label.c:1080 src/stored/label.c:1089 +#, c-format +msgid "%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n" +msgstr "Rekord %s: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n" + +#: src/stored/label.c:1082 +#, c-format +msgid " Job=%s Date=%s Level=%c Type=%c\n" +msgstr " Zadanie=%s Data=%s Poziom=%c Typ=%c\n" + +#: src/stored/label.c:1091 +#, fuzzy, c-format +msgid " Date=%s Level=%c Type=%c Files=%s Bytes=%s Errors=%d Status=%c\n" +msgstr " Plików=%s Bajtów=%s Bajtów/sek=%s Błędów=%d\n" + +#: src/stored/authenticate.c:63 +#, fuzzy, c-format +msgid "I only authenticate Directors, not %d\n" +msgstr "Autentykuj wycznie directory, nie %d\n" + +#: src/stored/authenticate.c:93 +#, fuzzy, c-format +msgid "" +"Connection from unknown Director %s at %s rejected.\n" +"Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 for help.\n" +msgstr "" +"Autoryzacja klucza odrzucona przez Storage daemona.\n" +"Proszę zobacz http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 w celu uzyskania pomocy.\n" + +#: src/stored/authenticate.c:130 +#, fuzzy +msgid "" +"Incorrect password given by Director.\n" +"Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 for help.\n" +msgstr "" +"Autoryzacja klucza odrzucona przez Storage daemona.\n" +"Proszę zobacz http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 w celu uzyskania pomocy.\n" + +#: src/stored/authenticate.c:156 +#, fuzzy, c-format +msgid "TLS negotiation failed with DIR at \"%s:%d\"\n" +msgstr "Negocjacje TLS nie powiody si\n" + +#: src/stored/authenticate.c:192 +#, fuzzy, c-format +msgid "Unable to authenticate Director at %s.\n" +msgstr "Nie mogę zautentykowa Directora\n" + +#: src/stored/authenticate.c:240 src/stored/authenticate.c:280 +#, fuzzy, c-format +msgid "" +"Incorrect authorization key from File daemon at %s rejected.\n" +"Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 for help.\n" +msgstr "" +"Autoryzacja klucza odrzucona przez Storage daemona.\n" +"Proszę zobacz http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +"html#SECTION003760000000000000000 w celu uzyskania pomocy.\n" + +#: src/stored/authenticate.c:267 +#, fuzzy, c-format +msgid "TLS negotiation failed with FD at \"%s:%d\"\n" +msgstr "Negocjacje TLS nie powiody si\n" + +#: src/stored/parse_bsr.c:123 src/stored/parse_bsr.c:127 +#, c-format +msgid "" +"Bootstrap file error: %s\n" +" : Line %d, col %d of file %s\n" +"%s\n" +msgstr "" +"Błąd pliku inicjalizacyjnego: %s\n" +" : Linia %d, kolumna %d w pliku %s\n" +"%s\n" + +#: src/stored/parse_bsr.c:149 +#, fuzzy, c-format +msgid "Cannot open bootstrap file %s: %s\n" +msgstr "Nie można otworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/stored/parse_bsr.c:280 +#, c-format +msgid "MediaType %s in bsr at inappropriate place.\n" +msgstr "MediaType %s w bsr jest w niewłaściwym miejscu.\n" + +#: src/stored/parse_bsr.c:312 +#, c-format +msgid "Device \"%s\" in bsr at inappropriate place.\n" +msgstr "Urządzenie \"%s\" w bsr jest w niewłaściwym miejscu.\n" + +#: src/stored/parse_bsr.c:485 +#, fuzzy, c-format +msgid "REGEX '%s' compile error. ERR=%s\n" +msgstr "Błąd kompilacji REGEX %s. ERR=%s\n" + +#: src/stored/parse_bsr.c:495 +msgid "JobType not yet implemented\n" +msgstr "JobType jeszcze nie zaimplementowany\n" + +#: src/stored/parse_bsr.c:503 +msgid "JobLevel not yet implemented\n" +msgstr "JobLevel jeszcze nie zaimplementowany\n" + +#: src/stored/parse_bsr.c:720 +#, c-format +msgid "Slot %d in bsr at inappropriate place.\n" +msgstr "Slot %d w bsr na nieprawidłowym miejscu.\n" + +#: src/stored/parse_bsr.c:744 +#, c-format +msgid "VolFile : %u-%u\n" +msgstr "VolFile : %u-%u\n" + +#: src/stored/parse_bsr.c:752 +#, c-format +msgid "VolBlock : %u-%u\n" +msgstr "VolBlock : %u-%u\n" + +#: src/stored/parse_bsr.c:760 +#, c-format +msgid "VolAddr : %llu-%llu\n" +msgstr "VolAddr : %llu-%llu\n" + +#: src/stored/parse_bsr.c:769 +#, c-format +msgid "FileIndex : %u\n" +msgstr "FileIndex : %u\n" + +#: src/stored/parse_bsr.c:771 +#, c-format +msgid "FileIndex : %u-%u\n" +msgstr "FileIndex : %u-%u\n" + +#: src/stored/parse_bsr.c:781 +#, c-format +msgid "JobId : %u\n" +msgstr "JobId : %u\n" + +#: src/stored/parse_bsr.c:783 +#, c-format +msgid "JobId : %u-%u\n" +msgstr "JobId : %u-%u\n" + +#: src/stored/parse_bsr.c:793 +#, c-format +msgid "SessId : %u\n" +msgstr "SessId : %u\n" + +#: src/stored/parse_bsr.c:795 +#, c-format +msgid "SessId : %u-%u\n" +msgstr "SessId : %u-%u\n" + +#: src/stored/parse_bsr.c:804 +#, c-format +msgid "VolumeName : %s\n" +msgstr "VolumeName : %s\n" + +#: src/stored/parse_bsr.c:805 +#, c-format +msgid " MediaType : %s\n" +msgstr " MediaType : %s\n" + +#: src/stored/parse_bsr.c:806 +#, c-format +msgid " Device : %s\n" +msgstr " Urządzenie : %s\n" + +#: src/stored/parse_bsr.c:807 +#, c-format +msgid " Slot : %d\n" +msgstr " Slot : %d\n" + +#: src/stored/parse_bsr.c:816 +#, c-format +msgid "Client : %s\n" +msgstr "Klient : %s\n" + +#: src/stored/parse_bsr.c:824 +#, c-format +msgid "Job : %s\n" +msgstr "Job : %s\n" + +#: src/stored/parse_bsr.c:832 +#, c-format +msgid "SessTime : %u\n" +msgstr "SessTime : %u\n" + +#: src/stored/parse_bsr.c:843 +msgid "BSR is NULL\n" +msgstr "BSR is NULL\n" + +#: src/stored/parse_bsr.c:847 +#, c-format +msgid "Next : 0x%x\n" +msgstr "Next : 0x%x\n" + +#: src/stored/parse_bsr.c:848 +#, c-format +msgid "Root bsr : 0x%x\n" +msgstr "Root bsr : 0x%x\n" + +#: src/stored/parse_bsr.c:860 +#, c-format +msgid "count : %u\n" +msgstr "zliczone : %u\n" + +#: src/stored/parse_bsr.c:861 +#, c-format +msgid "found : %u\n" +msgstr "znaleziono : %u\n" + +#: src/stored/parse_bsr.c:864 +#, c-format +msgid "done : %s\n" +msgstr "wykonane : %s\n" + +#: src/stored/parse_bsr.c:865 +#, c-format +msgid "positioning : %d\n" +msgstr "pozycjonowanie : %d\n" + +#: src/stored/parse_bsr.c:866 +#, c-format +msgid "fast_reject : %d\n" +msgstr "fast_reject : %d\n" + +#: src/stored/device.c:120 +#, fuzzy, c-format +msgid "End of medium on Volume \"%s\" Bytes=%s Blocks=%s at %s.\n" +msgstr " Plików=%s Bajtów=%s Bajtów/sek=%s Błędów=%d\n" + +#: src/stored/device.c:139 +#, c-format +msgid "New volume \"%s\" mounted on device %s at %s.\n" +msgstr "Nowy wolumen \"%s\" zamontowany na urządzeniu %s w %s.\n" + +#: src/stored/device.c:151 +#, fuzzy, c-format +msgid "write_block_to_device Volume label failed. ERR=%s" +msgstr "Nieudane odszyfrowanie klucza sesji.\n" + +#: src/stored/device.c:186 +#, c-format +msgid "write_block_to_device overflow block failed. ERR=%s" +msgstr "Niepowodzenie przepełnienia bloku write_block_to_device. ERR=%s" + +#: src/stored/device.c:191 +#, c-format +msgid "Catastrophic error. Cannot write overflow block to device %s. ERR=%s" +msgstr "" +"Katastrofalny błąd. Na urządzeniu %s nie można zapisać bloku przepełnienia. " +"ERR=%s" + +#: src/stored/device.c:327 src/stored/dev.c:489 +#, fuzzy, c-format +msgid "Unable to open device %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/device.c:329 +#, fuzzy, c-format +msgid "Unable to open archive %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/record.c:77 +#, fuzzy, c-format +msgid "unknown: %d" +msgstr "Nieznany status." + +#: src/stored/record.c:389 +msgid "Damaged buffer\n" +msgstr "Zniszczony bufor\n" + +#: src/stored/record.c:563 +#, c-format +msgid "Sanity check failed. maxlen=%d datalen=%d. Block discarded.\n" +msgstr "" +"Niepowodzenie testu zdrowego rozsądku. Wielkości: maxlen=%d a mamy datalen=%" +"d. Blok odrzucony.\n" + +#: src/stored/read.c:66 +#, fuzzy +msgid "No Volume names found for restore.\n" +msgstr "Błąd wersji Zlib" + +#: src/stored/read.c:120 +#, fuzzy, c-format +msgid ">filed: Error Hdr=%s\n" +msgstr "»wprowadzono: Błąd Hdr=%s\n" + +#: src/stored/read.c:121 src/stored/read.c:136 +#, fuzzy, c-format +msgid "Error sending to File daemon. ERR=%s\n" +msgstr "Błąd w wysyaniu Hello do demona Plików. ERR=%s\n" + +#: src/stored/read.c:135 +#, fuzzy, c-format +msgid "Error sending to FD. ERR=%s\n" +msgstr "Błąd w wysyaniu Hello do demona Plików. ERR=%s\n" + +#: src/stored/lock.c:236 +#, fuzzy, c-format +msgid "pthread_cond_wait failure. ERR=%s\n" +msgstr "pthread_create: ERR=%s\n" + +#: src/stored/lock.c:334 +#, fuzzy +msgid "unknown blocked code" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/stored/btape.c:178 src/stored/stored.c:151 +#, c-format +msgid "Tape block size (%d) not multiple of system size (%d)\n" +msgstr "" +"Rozmiar bloku taśmy (%d) nie jest wielokrotnością rozmiaru systemowego (%d)\n" + +#: src/stored/btape.c:182 src/stored/stored.c:155 +#, c-format +msgid "Tape block size (%d) is not a power of 2\n" +msgstr "Rozmiar bloku taśmy (%d) nie jest potegą 2\n" + +#: src/stored/btape.c:185 +#, c-format +msgid "" +"\n" +"\n" +"!!!! Warning large disk addressing disabled. boffset_t=%d should be 8 or " +"more !!!!!\n" +"\n" +"\n" +msgstr "" +"\n" +"\n" +"!!!! Ostrzeżenie o wyłączeniu adresacji dużych dysków. boffset_t=%d a pownno " +"być 8 lub wiecej !!!!!\n" +"\n" +"\n" + +#: src/stored/btape.c:192 +#, c-format +msgid "32 bit printf/scanf problem. i=%d x32=%u y32=%u\n" +msgstr "Problem printf/scanf 32 bit. i=%d x32=%u y32=%u\n" + +#: src/stored/btape.c:201 +msgid "64 bit printf/scanf problem. i=%d x64=%" +msgstr "" + +#: src/stored/btape.c:206 +#, c-format +msgid "Tape block granularity is %d bytes.\n" +msgstr "Granulacja bloku taśmy to %d bajtów.\n" + +#: src/stored/btape.c:279 +msgid "No archive name specified.\n" +msgstr "Nie podano nazwy archiwum.\n" + +#: src/stored/btape.c:283 +msgid "Improper number of arguments specified.\n" +msgstr "Podano niepoprawną ilość argumentów.\n" + +#: src/stored/btape.c:297 +msgid "btape does not work with DVD storage.\n" +msgstr "btape nie działa z nośnikami DVD.\n" + +#: src/stored/btape.c:302 +msgid "btape only works with tape storage.\n" +msgstr "btape działa tylko z nośnikami w postaci taśm.\n" + +#: src/stored/btape.c:382 +#, c-format +msgid "Total Volume bytes=%sB. Total Write rate = %sB/s\n" +msgstr "Sumaryczny rozmiar: bajtów=%sB. Całkowita prędkość zapisu = %sB/s\n" + +#: src/stored/btape.c:408 +#, c-format +msgid "Volume bytes=%sB. Write rate = %sB/s\n" +msgstr "Wolumen: bajtów=%sB. Prędkość zapisu = %sB/s\n" + +#: src/stored/btape.c:476 +#, fuzzy, c-format +msgid "open device %s: OK\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/btape.c:499 +msgid "Enter Volume Name: " +msgstr "Wprowadź nazwę wolumenu: " + +#: src/stored/btape.c:506 +#, fuzzy, c-format +msgid "Device open failed. ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:511 +#, c-format +msgid "Wrote Volume label for volume \"%s\".\n" +msgstr "" + +#: src/stored/btape.c:525 +#, fuzzy +msgid "Volume has no label.\n" +msgstr "Wolumen do Katalogu" + +#: src/stored/btape.c:528 +msgid "Volume label read correctly.\n" +msgstr "" + +#: src/stored/btape.c:531 +#, fuzzy, c-format +msgid "I/O error on device: ERR=%s" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:534 +#, fuzzy +msgid "Volume name error\n" +msgstr "Błąd wersji Zlib" + +#: src/stored/btape.c:537 +#, fuzzy, c-format +msgid "Error creating label. ERR=%s" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:540 +#, fuzzy +msgid "Volume version error.\n" +msgstr "Błąd wersji Zlib" + +#: src/stored/btape.c:543 +#, fuzzy +msgid "Bad Volume label type.\n" +msgstr "Wolumen do Katalogu" + +#: src/stored/btape.c:546 +#, fuzzy +msgid "Unknown error.\n" +msgstr "Błąd szyfrowania\n" + +#: src/stored/btape.c:564 +#, fuzzy, c-format +msgid "Bad status from load. ERR=%s\n" +msgstr "Za komenda .status: %s\n" + +#: src/stored/btape.c:566 +#, fuzzy, c-format +msgid "Loaded %s\n" +msgstr "Zaadowana wtyczka: %s\n" + +#: src/stored/btape.c:575 src/stored/btape.c:1130 src/stored/btape.c:1203 +#: src/stored/btape.c:1283 src/stored/btape.c:1554 +#, fuzzy, c-format +msgid "Bad status from rewind. ERR=%s\n" +msgstr "Za komenda .status: %s\n" + +#: src/stored/btape.c:578 src/stored/btape.c:1562 +#, fuzzy, c-format +msgid "Rewound %s\n" +msgstr "Nie znaleziono tabeli: %s\n" + +#: src/stored/btape.c:604 src/stored/btape.c:1566 +#, fuzzy, c-format +msgid "Bad status from weof. ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/stored/btape.c:608 +#, c-format +msgid "Wrote 1 EOF to %s\n" +msgstr "Zapisano 1 EOF do %s\n" + +#: src/stored/btape.c:611 +#, c-format +msgid "Wrote %d EOFs to %s\n" +msgstr "Zapisano %d EOF do %s\n" + +#: src/stored/btape.c:629 +msgid "Moved to end of medium.\n" +msgstr "Przeniesiono na koniec medium.\n" + +#: src/stored/btape.c:656 +#, fuzzy, c-format +msgid "Bad status from bsf. ERR=%s\n" +msgstr "Za komenda .status: %s\n" + +#: src/stored/btape.c:658 +#, fuzzy, c-format +msgid "Backspaced %d file%s.\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/btape.c:675 +#, fuzzy, c-format +msgid "Bad status from bsr. ERR=%s\n" +msgstr "Za komenda .status: %s\n" + +#: src/stored/btape.c:677 +#, fuzzy, c-format +msgid "Backspaced %d record%s.\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/btape.c:687 src/stored/status.c:332 +#, c-format +msgid "Configured device capabilities:\n" +msgstr "Skonfigurowano wydajność urządzenia:\n" + +#: src/stored/btape.c:705 +msgid "Device status:\n" +msgstr "Status urządzenia:\n" + +#: src/stored/btape.c:719 src/stored/status.c:372 +#, c-format +msgid "Device parameters:\n" +msgstr "Parametry urządzenia:\n" + +#: src/stored/btape.c:724 +#, c-format +msgid "Status:\n" +msgstr "Status:\n" + +#: src/stored/btape.c:739 +msgid "" +"Test writing larger and larger records.\n" +"This is a torture test for records.\n" +"I am going to write\n" +"larger and larger records. It will stop when the record size\n" +"plus the header exceeds the block size (by default about 64K)\n" +msgstr "" + +#: src/stored/btape.c:745 +msgid "Do you want to continue? (y/n): " +msgstr "Czy chcesz kontynuować? (y/n): " + +#: src/stored/btape.c:747 src/stored/btape.c:2192 +msgid "Command aborted.\n" +msgstr "" + +#: src/stored/btape.c:763 +#, c-format +msgid "Block %d i=%d\n" +msgstr "Blok %d i=%d\n" + +#: src/stored/btape.c:789 +msgid "Skipping read backwards test because BSR turned off.\n" +msgstr "" + +#: src/stored/btape.c:793 +msgid "" +"\n" +"=== Write, backup, and re-read test ===\n" +"\n" +"I'm going to write three records and an EOF\n" +"then backup over the EOF and re-read the last record.\n" +"Bacula does this after writing the last block on the\n" +"tape to verify that the block was written correctly.\n" +"\n" +"This is not an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:806 src/stored/btape.c:817 src/stored/btape.c:828 +#: src/stored/btape.c:1140 src/stored/btape.c:1156 src/stored/btape.c:1898 +#: src/stored/btape.c:2810 +#, fuzzy +msgid "Error writing record to block.\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:810 src/stored/btape.c:821 src/stored/btape.c:832 +#: src/stored/btape.c:1144 src/stored/btape.c:1160 src/stored/btape.c:1902 +#: src/stored/btape.c:2814 +#, fuzzy +msgid "Error writing block to device.\n" +msgstr "Błąd w wysyaniu Hello do demona Przechowywania. ERR=%s\n" + +#: src/stored/btape.c:813 +#, fuzzy, c-format +msgid "Wrote first record of %d bytes.\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/btape.c:824 +#, fuzzy, c-format +msgid "Wrote second record of %d bytes.\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/btape.c:835 +#, fuzzy, c-format +msgid "Wrote third record of %d bytes.\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/btape.c:842 src/stored/btape.c:847 +#, fuzzy, c-format +msgid "Backspace file failed! ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/btape.c:851 +msgid "Backspaced over EOF OK.\n" +msgstr "" + +#: src/stored/btape.c:853 +#, fuzzy, c-format +msgid "Backspace record failed! ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/btape.c:856 +#, fuzzy +msgid "Backspace record OK.\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/btape.c:859 src/stored/btape.c:865 +#, fuzzy, c-format +msgid "Read block failed! ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:870 +#, fuzzy +msgid "Bad data in record. Test failed!\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/btape.c:874 +msgid "" +"\n" +"Block re-read correct. Test succeeded!\n" +msgstr "" + +#: src/stored/btape.c:875 +msgid "" +"=== End Write, backup, and re-read test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:882 +msgid "" +"This is not terribly serious since Bacula only uses\n" +"this function to verify the last block written to the\n" +"tape. Bacula will skip the last block verification\n" +"if you add:\n" +"\n" +"Backward Space Record = No\n" +"\n" +"to your Storage daemon's Device resource definition.\n" +msgstr "" + +#: src/stored/btape.c:904 +#, c-format +msgid "Begin writing %i files of %sB with raw blocks of %u bytes.\n" +msgstr "" + +#: src/stored/btape.c:925 src/stored/btape.c:2866 +#, fuzzy, c-format +msgid "Write failed at block %u. stat=%d ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:959 +#, c-format +msgid "Begin writing %i files of %sB with blocks of %u bytes.\n" +msgstr "" + +#: src/stored/btape.c:968 +#, fuzzy +msgid "" +"\n" +"Error writing record to block.\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:972 +#, fuzzy +msgid "" +"\n" +"Error writing block to device.\n" +msgstr "Błąd w wysyaniu Hello do demona Przechowywania. ERR=%s\n" + +#: src/stored/btape.c:1027 +msgid "The file_size is too big, stop this test with Ctrl-c.\n" +msgstr "" + +#: src/stored/btape.c:1055 +msgid "Test with zero data, should give the maximum throughput.\n" +msgstr "" + +#: src/stored/btape.c:1067 src/stored/btape.c:1093 +msgid "Test with random data, should give the minimum throughput.\n" +msgstr "" + +#: src/stored/btape.c:1082 +msgid "Test with zero data and bacula block structure.\n" +msgstr "" + +#: src/stored/btape.c:1116 +#, c-format +msgid "" +"\n" +"=== Write, rewind, and re-read test ===\n" +"\n" +"I'm going to write %d records and an EOF\n" +"then write %d records and an EOF, then rewind,\n" +"and re-read the data to verify that it is correct.\n" +"\n" +"This is an *essential* feature ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1148 src/stored/btape.c:1164 +#, fuzzy, c-format +msgid "Wrote %d blocks of %d bytes.\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/btape.c:1206 src/stored/btape.c:1286 +msgid "Rewind OK.\n" +msgstr "" + +#: src/stored/btape.c:1219 src/stored/btape.c:1338 +msgid "Got EOF on tape.\n" +msgstr "" + +#: src/stored/btape.c:1224 +#, fuzzy, c-format +msgid "Read block %d failed! ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:1230 +#, fuzzy, c-format +msgid "Read record failed. Block %d! ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:1236 src/stored/btape.c:1368 +#, fuzzy, c-format +msgid "Bad data in record. Expected %d, got %d at byte %d. Test failed!\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/btape.c:1243 +#, c-format +msgid "%d blocks re-read correctly.\n" +msgstr "" + +#: src/stored/btape.c:1246 src/stored/btape.c:1375 +msgid "" +"=== Test Succeeded. End Write, rewind, and re-read test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1274 +msgid "Block position test\n" +msgstr "Test pozycji bloku\n" + +#: src/stored/btape.c:1329 +#, c-format +msgid "Reposition to file:block %d:%d\n" +msgstr "Repozycja do plik:blok %d:%d\n" + +#: src/stored/btape.c:1331 +#, fuzzy +msgid "Reposition error.\n" +msgstr "Błąd odszyfrowywania\n" + +#: src/stored/btape.c:1344 +#, fuzzy, c-format +msgid "" +"Read block %d failed! file=%d blk=%d. ERR=%s\n" +"\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:1346 +msgid "" +"This may be because the tape drive block size is not\n" +" set to variable blocking as normally used by Bacula.\n" +" Please see the Tape Testing chapter in the manual and \n" +" look for using mt with defblksize and setoptions\n" +"If your tape drive block size is correct, then perhaps\n" +" your SCSI driver is *really* stupid and does not\n" +" correctly report the file:block after a FSF. In this\n" +" case try setting:\n" +" Fast Forward Space File = no\n" +" in your Device resource.\n" +msgstr "" + +#: src/stored/btape.c:1362 +#, fuzzy, c-format +msgid "Read record failed! ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/btape.c:1373 +#, c-format +msgid "Block %d re-read correctly.\n" +msgstr "Blok %d został ponownie przeczytany poprawnie.\n" + +#: src/stored/btape.c:1394 +msgid "" +"\n" +"\n" +"=== Append files test ===\n" +"\n" +"This test is essential to Bacula.\n" +"\n" +"I'm going to write one record in file 0,\n" +" two records in file 1,\n" +" and three records in file 2\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1418 +msgid "Now moving to end of medium.\n" +msgstr "" + +#: src/stored/btape.c:1420 src/stored/btape.c:1649 +#, c-format +msgid "We should be in file 3. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1421 src/stored/btape.c:1439 src/stored/btape.c:1638 +#: src/stored/btape.c:1650 src/stored/btape.c:1663 src/stored/btape.c:1680 +msgid "This is correct!" +msgstr "To jest prawidłowo!" + +#: src/stored/btape.c:1421 src/stored/btape.c:1439 src/stored/btape.c:1638 +#: src/stored/btape.c:1650 src/stored/btape.c:1663 src/stored/btape.c:1680 +msgid "This is NOT correct!!!!" +msgstr "To NIE jest prawidlowo!!!" + +#: src/stored/btape.c:1427 +msgid "" +"\n" +"Now the important part, I am going to attempt to append to the tape.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1434 +msgid "" +"Done appending, there should be no I/O errors\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1435 +msgid "Doing Bacula scan of blocks:\n" +msgstr "" + +#: src/stored/btape.c:1437 +msgid "End scanning the tape.\n" +msgstr "Koniec skanowania taśmy.\n" + +#: src/stored/btape.c:1438 src/stored/btape.c:1662 +#, c-format +msgid "We should be in file 4. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1463 +msgid "" +"\n" +"Autochanger enabled, but no name or no command device specified.\n" +msgstr "" +"\n" +"Biblioteka taśmowa uaktywniona, ale nie określono nazwy lub urządzenia " +"sterującego.\n" + +#: src/stored/btape.c:1467 +msgid "" +"\n" +"Ah, I see you have an autochanger configured.\n" +"To test the autochanger you must have a blank tape\n" +" that I can write on in Slot 1.\n" +msgstr "" +"\n" +"O, widzę że biblioteka taśmowa jest skonfigurowana.\n" +"By przetestować bibliotekę taśmową musisz mieć czystą taśmę\n" +" w Slocie 1 na której mogę zapisywać.\n" + +#: src/stored/btape.c:1470 +msgid "" +"\n" +"Do you wish to continue with the Autochanger test? (y/n): " +msgstr "" +"\n" +"Czy chcesz kontynuować test biblioteki taśmowej? (y/n): " + +#: src/stored/btape.c:1477 +msgid "" +"\n" +"\n" +"=== Autochanger test ===\n" +"\n" +msgstr "" +"\n" +"\n" +"=== Test biblioteki taśmowej ===\n" +"\n" + +#: src/stored/btape.c:1486 +#, fuzzy +msgid "3301 Issuing autochanger \"loaded\" command.\n" +msgstr "Za komenda storage: %s" + +#: src/stored/btape.c:1495 +#, fuzzy, c-format +msgid "3991 Bad autochanger command: %s\n" +msgstr "Za komenda storage: %s" + +#: src/stored/btape.c:1496 +#, c-format +msgid "3991 result=\"%s\": ERR=%s\n" +msgstr "3991 wynik=\"%s\": ERR=%s\n" + +#: src/stored/btape.c:1500 +#, c-format +msgid "Slot %d loaded. I am going to unload it.\n" +msgstr "Slot %d załadowany. Będzie rozładowany.\n" + +#: src/stored/btape.c:1502 +msgid "Nothing loaded in the drive. OK.\n" +msgstr "Nic nie załadowano w napędzie. OK.\n" + +#: src/stored/btape.c:1509 +#, c-format +msgid "3302 Issuing autochanger \"unload %d %d\" command.\n" +msgstr "" + +#: src/stored/btape.c:1514 +#, c-format +msgid "unload status=%s %d\n" +msgstr "status wyąłdowania=%s %d\n" + +#: src/stored/btape.c:1514 +msgid "Bad" +msgstr "Błąd" + +#: src/stored/btape.c:1517 +#, fuzzy, c-format +msgid "3992 Bad autochanger command: %s\n" +msgstr "Za komenda storage: %s" + +#: src/stored/btape.c:1518 +#, c-format +msgid "3992 result=\"%s\": ERR=%s\n" +msgstr "3992 wynik=\"%s\": ERR=%s\n" + +#: src/stored/btape.c:1528 +#, fuzzy, c-format +msgid "3303 Issuing autochanger \"load %d %d\" command.\n" +msgstr "Za komenda storage: %s" + +#: src/stored/btape.c:1536 +#, c-format +msgid "3303 Autochanger \"load %d %d\" status is OK.\n" +msgstr "" + +#: src/stored/btape.c:1540 +#, fuzzy, c-format +msgid "3993 Bad autochanger command: %s\n" +msgstr "Za komenda storage: %s" + +#: src/stored/btape.c:1541 +#, c-format +msgid "3993 result=\"%s\": ERR=%s\n" +msgstr "3993 wynik=\"%s\": ERR=%s\n" + +#: src/stored/btape.c:1556 +msgid "" +"\n" +"The test failed, probably because you need to put\n" +"a longer sleep time in the mtx-script in the load) case.\n" +"Adding a 30 second sleep and trying again ...\n" +msgstr "" + +#: src/stored/btape.c:1569 +#, c-format +msgid "Wrote EOF to %s\n" +msgstr "Zapisano EOF do %s\n" + +#: src/stored/btape.c:1573 +#, c-format +msgid "" +"\n" +"The test worked this time. Please add:\n" +"\n" +" sleep %d\n" +"\n" +"to your mtx-changer script in the load) case.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1578 +msgid "" +"\n" +"The test autochanger worked!!\n" +"\n" +msgstr "" +"\n" +"Test biblioteki taśmowej zakończony sukcesem!!\n" +"\n" + +#: src/stored/btape.c:1589 +msgid "You must correct this error or the Autochanger will not work.\n" +msgstr "Bez poprawy tego błędu biblioteka taśmowej nie będzie działać.\n" + +#: src/stored/btape.c:1607 +msgid "" +"\n" +"\n" +"=== Forward space files test ===\n" +"\n" +"This test is essential to Bacula.\n" +"\n" +"I'm going to write five files then test forward spacing\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1632 +msgid "Now forward spacing 1 file.\n" +msgstr "" + +#: src/stored/btape.c:1634 src/stored/btape.c:1646 src/stored/btape.c:1659 +#: src/stored/btape.c:1677 src/stored/btape.c:1853 +#, fuzzy, c-format +msgid "Bad status from fsr. ERR=%s\n" +msgstr "Za komenda .status: %s\n" + +#: src/stored/btape.c:1637 +#, c-format +msgid "We should be in file 1. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1644 +msgid "Now forward spacing 2 files.\n" +msgstr "" + +#: src/stored/btape.c:1657 +msgid "Now forward spacing 4 files.\n" +msgstr "" + +#: src/stored/btape.c:1669 +msgid "" +"The test worked this time. Please add:\n" +"\n" +" Fast Forward Space File = no\n" +"\n" +"to your Device resource for this drive.\n" +msgstr "" + +#: src/stored/btape.c:1675 +msgid "Now forward spacing 1 more file.\n" +msgstr "" + +#: src/stored/btape.c:1679 +#, c-format +msgid "We should be in file 5. I am at file %d. %s\n" +msgstr "" + +#: src/stored/btape.c:1684 +msgid "" +"\n" +"=== End Forward space files test ===\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1688 +msgid "" +"\n" +"The forward space file test failed.\n" +msgstr "" + +#: src/stored/btape.c:1690 +msgid "" +"You have Fast Forward Space File enabled.\n" +"I am turning it off then retrying the test.\n" +msgstr "" + +#: src/stored/btape.c:1696 +msgid "" +"You must correct this error or Bacula will not work.\n" +"Some systems, e.g. OpenBSD, require you to set\n" +" Use MTIOCGET= no\n" +"in your device resource. Use with caution.\n" +msgstr "" + +#: src/stored/btape.c:1730 +msgid "" +"\n" +"Append test failed. Attempting again.\n" +"Setting \"Hardware End of Medium = no\n" +" and \"Fast Forward Space File = no\n" +"and retrying append test.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1738 +msgid "" +"\n" +"\n" +"It looks like the test worked this time, please add:\n" +"\n" +" Hardware End of Medium = No\n" +"\n" +" Fast Forward Space File = No\n" +"to your Device resource in the Storage conf file.\n" +msgstr "" + +#: src/stored/btape.c:1745 +msgid "" +"\n" +"\n" +"That appears *NOT* to have corrected the problem.\n" +msgstr "" + +#: src/stored/btape.c:1750 +msgid "" +"\n" +"\n" +"It looks like the append failed. Attempting again.\n" +"Setting \"BSF at EOM = yes\" and retrying append test.\n" +msgstr "" + +#: src/stored/btape.c:1755 +msgid "" +"\n" +"\n" +"It looks like the test worked this time, please add:\n" +"\n" +" Hardware End of Medium = No\n" +" Fast Forward Space File = No\n" +" BSF at EOM = yes\n" +"\n" +"to your Device resource in the Storage conf file.\n" +msgstr "" + +#: src/stored/btape.c:1766 +msgid "" +"\n" +"Append test failed.\n" +"\n" +"\n" +"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" +"Unable to correct the problem. You MUST fix this\n" +"problem before Bacula can use your tape drive correctly\n" +"\n" +"Perhaps running Bacula in fixed block mode will work.\n" +"Do so by setting:\n" +"\n" +"Minimum Block Size = nnn\n" +"Maximum Block Size = nnn\n" +"\n" +"in your Storage daemon's Device definition.\n" +"nnn must match your tape driver's block size, which\n" +"can be determined by reading your tape manufacturers\n" +"information, and the information on your kernel dirver.\n" +"Fixed block sizes, however, are not normally an ideal solution.\n" +"\n" +"Some systems, e.g. OpenBSD, require you to set\n" +" Use MTIOCGET= no\n" +"in your device resource. Use with caution.\n" +msgstr "" + +#: src/stored/btape.c:1788 +msgid "" +"\n" +"The above Bacula scan should have output identical to what follows.\n" +"Please double check it ...\n" +"=== Sample correct output ===\n" +"1 block of 64448 bytes in file 1\n" +"End of File mark.\n" +"2 blocks of 64448 bytes in file 2\n" +"End of File mark.\n" +"3 blocks of 64448 bytes in file 3\n" +"End of File mark.\n" +"1 block of 64448 bytes in file 4\n" +"End of File mark.\n" +"Total files=4, blocks=7, bytes = 451,136\n" +"=== End sample correct output ===\n" +"\n" +"If the above scan output is not identical to the\n" +"sample output, you MUST correct the problem\n" +"or Bacula will not be able to write multiple Jobs to \n" +"the tape.\n" +"\n" +msgstr "" + +#: src/stored/btape.c:1831 +#, fuzzy, c-format +msgid "Bad status from fsf. ERR=%s\n" +msgstr "Za komenda .status: %s\n" + +#: src/stored/btape.c:1835 +msgid "Forward spaced 1 file.\n" +msgstr "" + +#: src/stored/btape.c:1838 +#, c-format +msgid "Forward spaced %d files.\n" +msgstr "" + +#: src/stored/btape.c:1857 +msgid "Forward spaced 1 record.\n" +msgstr "" + +#: src/stored/btape.c:1860 +#, c-format +msgid "Forward spaced %d records.\n" +msgstr "" + +#: src/stored/btape.c:1905 +#, fuzzy, c-format +msgid "Wrote one record of %d bytes.\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/btape.c:1907 +msgid "Wrote block to device.\n" +msgstr "Zapisano blok na urzadzenie.\n" + +#: src/stored/btape.c:1922 +msgid "Enter length to read: " +msgstr "Wprowadź długość do odczytu: " + +#: src/stored/btape.c:1927 +msgid "Bad length entered, using default of 1024 bytes.\n" +msgstr "Wprowadzono błędną dlugość, używam domyślnej długości 1024 bajtów.\n" + +#: src/stored/btape.c:1936 +#, fuzzy, c-format +msgid "Read of %d bytes gives stat=%d. ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:1959 src/stored/btape.c:2008 +#, c-format +msgid "End of tape\n" +msgstr "Koniec taśmy\n" + +#: src/stored/btape.c:1964 +#, c-format +msgid "Starting scan at file %u\n" +msgstr "Rozpoczynam skanowanie od pliku %u\n" + +#: src/stored/btape.c:1969 src/stored/dev.c:1387 +#, fuzzy, c-format +msgid "read error on %s. ERR=%s.\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:1971 +#, fuzzy, c-format +msgid "Bad status from read %d. ERR=%s\n" +msgstr "Za komenda .status: %s\n" + +#: src/stored/btape.c:1974 src/stored/btape.c:1988 src/stored/btape.c:2052 +#: src/stored/btape.c:2064 src/stored/btape.c:2077 src/stored/btape.c:2093 +#, c-format +msgid "1 block of %d bytes in file %d\n" +msgstr "1 blok %d bajtów w pliku %d\n" + +#: src/stored/btape.c:1977 src/stored/btape.c:1991 src/stored/btape.c:2055 +#: src/stored/btape.c:2067 src/stored/btape.c:2080 src/stored/btape.c:2096 +#, c-format +msgid "%d blocks of %d bytes in file %d\n" +msgstr "%d bloków %d bajtów w pliku %d\n" + +#: src/stored/btape.c:1999 src/stored/btape.c:2071 +#, c-format +msgid "End of File mark.\n" +msgstr "Znacznik konca pliku.\n" + +#: src/stored/btape.c:2020 src/stored/btape.c:2124 +#, fuzzy, c-format +msgid "Total files=%d, blocks=%d, bytes = %s\n" +msgstr " Plików=%s Bajtów=%s Bajtów/sek=%s Błędów=%d\n" + +#: src/stored/btape.c:2084 +#, c-format +msgid "Short block read.\n" +msgstr "Odczyt krótkiego bloku.\n" + +#: src/stored/btape.c:2087 +#, fuzzy, c-format +msgid "Error reading block. ERR=%s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:2111 +#, c-format +msgid "" +"Block=%u file,blk=%u,%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%s " +"rlen=%d\n" +msgstr "" + +#: src/stored/btape.c:2133 +#, fuzzy, c-format +msgid "Device status: %u. ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/btape.c:2165 +#, c-format +msgid "" +"\n" +"This command simulates Bacula writing to a tape.\n" +"It requires either one or two blank tapes, which it\n" +"will label and write.\n" +"\n" +"If you have an autochanger configured, it will use\n" +"the tapes that are in slots 1 and 2, otherwise, you will\n" +"be prompted to insert the tapes when necessary.\n" +"\n" +"It will print a status approximately\n" +"every 322 MB, and write an EOF every %s. If you have\n" +"selected the simple test option, after writing the first tape\n" +"it will rewind it and re-read the last block written.\n" +"\n" +"If you have selected the multiple tape test, when the first tape\n" +"fills, it will ask for a second, and after writing a few more \n" +"blocks, it will stop. Then it will begin re-reading the\n" +"two tapes.\n" +"\n" +"This may take a long time -- hours! ...\n" +"\n" +msgstr "" + +#: src/stored/btape.c:2183 +msgid "" +"Do you want to run the simplified test (s) with one tape\n" +"or the complete multiple tape (m) test: (s/m) " +msgstr "" + +#: src/stored/btape.c:2186 +msgid "Simple test (single tape) selected.\n" +msgstr "wybrano prosty test (pojedyńcza taśma).\n" + +#: src/stored/btape.c:2189 +msgid "Multiple tape test selected.\n" +msgstr "" + +#: src/stored/btape.c:2227 src/stored/append.c:106 +#, fuzzy, c-format +msgid "Write session label failed. ERR=%s\n" +msgstr "Nieudane odszyfrowanie klucza sesji.\n" + +#: src/stored/btape.c:2231 +#, fuzzy +msgid "Wrote Start of Session label.\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:2250 +#, c-format +msgid "%s Begin writing Bacula records to tape ...\n" +msgstr "" + +#: src/stored/btape.c:2252 +#, c-format +msgid "%s Begin writing Bacula records to first tape ...\n" +msgstr "" + +#: src/stored/btape.c:2277 +#, fuzzy +msgid "Flush block failed.\n" +msgstr "Nieudane odszyfrowanie klucza sesji.\n" + +#: src/stored/btape.c:2291 +#, c-format +msgid "Wrote block=%u, file,blk=%u,%u VolBytes=%s rate=%sB/s\n" +msgstr "" + +#: src/stored/btape.c:2302 +#, c-format +msgid "%s Flush block, write EOF\n" +msgstr "" + +#: src/stored/btape.c:2311 +msgid "Wrote 1000 blocks on second tape. Done.\n" +msgstr "" + +#: src/stored/btape.c:2316 +msgid "Not OK\n" +msgstr "Niepoprawnie\n" + +#: src/stored/btape.c:2340 +#, fuzzy +msgid "Job canceled.\n" +msgstr "Status zadania: Anulowane" + +#: src/stored/btape.c:2345 src/stored/append.c:285 +#, fuzzy, c-format +msgid "Error writing end session label. ERR=%s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:2351 +msgid "Set ok=false after write_block_to_device.\n" +msgstr "" + +#: src/stored/btape.c:2355 +#, fuzzy +msgid "Wrote End of Session label.\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:2379 +#, c-format +msgid "Wrote state file last_block_num1=%d last_block_num2=%d\n" +msgstr "" + +#: src/stored/btape.c:2383 +#, fuzzy, c-format +msgid "Could not create state file: %s ERR=%s\n" +msgstr "Nie można stworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/stored/btape.c:2394 +#, c-format +msgid "" +"\n" +"\n" +"%s Done filling tape at %d:%d. Now beginning re-read of tape ...\n" +msgstr "" + +#: src/stored/btape.c:2397 +#, c-format +msgid "" +"\n" +"\n" +"%s Done filling tapes at %d:%d. Now beginning re-read of first tape ...\n" +msgstr "" + +#: src/stored/btape.c:2403 +msgid "do_unfill failed.\n" +msgstr "niepowodzenie do_unfill.\n" + +#: src/stored/btape.c:2408 +#, fuzzy, c-format +msgid "%s: Error during test.\n" +msgstr "Błąd wysyania listy wcze.\n" + +#: src/stored/btape.c:2443 +msgid "" +"\n" +"The state file level has changed. You must redo\n" +"the fill command.\n" +msgstr "" + +#: src/stored/btape.c:2450 +#, fuzzy, c-format +msgid "" +"\n" +"Could not find the state file: %s ERR=%s\n" +"You must redo the fill command.\n" +msgstr "Nie można stworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/stored/btape.c:2516 +msgid "Mount first tape. Press enter when ready: " +msgstr "Umieść pierwszą taśmę. Naciśnij 'Enter' kiedy gotowe: " + +#: src/stored/btape.c:2532 +msgid "Rewinding.\n" +msgstr "Przewijanie.\n" + +#: src/stored/btape.c:2537 +#, c-format +msgid "Reading the first 10000 records from %u:%u.\n" +msgstr "Odczytywanie pierwszych 10000 rekordów z %u:%u.\n" + +#: src/stored/btape.c:2541 src/stored/btape.c:2608 +#, c-format +msgid "Reposition from %u:%u to %u:%u\n" +msgstr "Repozycja z %u:%u do %u:%u\n" + +#: src/stored/btape.c:2544 src/stored/btape.c:2595 src/stored/btape.c:2611 +#, fuzzy, c-format +msgid "Reposition error. ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/stored/btape.c:2547 +#, fuzzy, c-format +msgid "Reading block %u.\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:2549 src/stored/btape.c:2600 src/stored/btape.c:2616 +#, fuzzy, c-format +msgid "Error reading block: ERR=%s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:2554 +msgid "" +"\n" +"The last block on the tape matches. Test succeeded.\n" +"\n" +msgstr "" +"\n" +"Ostatni blok na taśmie zgodny. Test zakończony sukcesem.\n" +"\n" + +#: src/stored/btape.c:2556 +msgid "" +"\n" +"The last block of the first tape matches.\n" +"\n" +msgstr "" +"\n" +"Ostatni blok na pierwszej taśmie zgodny.\n" + +#: src/stored/btape.c:2580 +msgid "Mount second tape. Press enter when ready: " +msgstr "Zamontuj druga taśmę. Naciśnij 'Enter' kiedy gotowe: " + +#: src/stored/btape.c:2593 +#, c-format +msgid "Reposition from %u:%u to 0:1\n" +msgstr "Repozycja z %u:%u do 0:1\n" + +#: src/stored/btape.c:2598 src/stored/btape.c:2614 +#, fuzzy, c-format +msgid "Reading block %d.\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/btape.c:2604 +msgid "" +"\n" +"The first block on the second tape matches.\n" +"\n" +msgstr "" +"\n" +"Pierwszy blok na drugiej taśmie zgodny.\n" +"\n" + +#: src/stored/btape.c:2620 +msgid "" +"\n" +"The last block on the second tape matches. Test succeeded.\n" +"\n" +msgstr "" +"\n" +"Ostatni blok na drugiej taśmie zgodny. Test zakończony sukcesem.\n" +"\n" + +#: src/stored/btape.c:2637 +#, c-format +msgid "10000 records read now at %d:%d\n" +msgstr "10000 rekordów odczytanych na %d:%d\n" + +#: src/stored/btape.c:2660 src/stored/btape.c:2671 src/stored/btape.c:2716 +msgid "Last block written" +msgstr "Ostatni blok zapisany" + +#: src/stored/btape.c:2662 src/stored/btape.c:2672 +msgid "Block read back" +msgstr "Odczyt zapisanego bloku" + +#: src/stored/btape.c:2663 +#, c-format +msgid "" +"\n" +"\n" +"The blocks differ at byte %u\n" +msgstr "" +"\n" +"\n" +"Blok różni się w bajcie %u\n" + +#: src/stored/btape.c:2664 +msgid "" +"\n" +"\n" +"!!!! The last block written and the block\n" +"that was read back differ. The test FAILED !!!!\n" +"This must be corrected before you use Bacula\n" +"to write multi-tape Volumes.!!!!\n" +msgstr "" +"\n" +"\n" +"Uwaga!!!!: Ostatni zapisany blok różni się od bloku odczytanego\n" +"Test zakończony niepowodzeniem !!!!\n" +"Konieczna poprawa przed użyciem Bacula'i\n" +"do zapisu na wielotasmowych Wolumenach.!!!!\n" + +#: src/stored/btape.c:2700 +#, c-format +msgid "Last block at: %u:%u this_dev_block_num=%d\n" +msgstr "Ostatni blok przy: %u:%u this_dev_block_num=%d\n" + +#: src/stored/btape.c:2714 +#, c-format +msgid "Block not written: FileIndex=%u blk_block=%u Size=%u\n" +msgstr "Blok nie został zapisany: FileIndex=%u blk_block=%u Rozmiar=%u\n" + +#: src/stored/btape.c:2718 +msgid "Block not written" +msgstr "Blok nie zostal zapisany" + +#: src/stored/btape.c:2733 +#, c-format +msgid "End of tape %d:%d. Volume Bytes=%s. Write rate = %sB/s\n" +msgstr "Koniec taśmy %d:%d. Rozmiar =%s bajtów. Prędkość zapisu = %sB/s\n" + +#: src/stored/btape.c:2784 +msgid "Test writing blocks of 64512 bytes to tape.\n" +msgstr "Test zapisu na taśmie bloków 64512 bajtów.\n" + +#: src/stored/btape.c:2786 +msgid "How many blocks do you want to write? (1000): " +msgstr "Ile bloków chcesz zapisać? (1000): " + +#: src/stored/btape.c:2803 +#, c-format +msgid "Begin writing %d Bacula blocks to tape ...\n" +msgstr "Rozpocznij zapis na taśmę %d bloków Bacula ...\n" + +#: src/stored/btape.c:2846 +#, c-format +msgid "Begin writing raw blocks of %u bytes.\n" +msgstr "Rozpocznij zapis surowych bloków po %u bajtów.\n" + +#: src/stored/btape.c:2877 +msgid "test autochanger" +msgstr "przetestuj bibliotekę taśmową" + +#: src/stored/btape.c:2878 +msgid "backspace file" +msgstr "cofnij się o plik" + +#: src/stored/btape.c:2879 +#, fuzzy +msgid "backspace record" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/btape.c:2880 +msgid "list device capabilities" +msgstr "wylistuj właściwości urządzenia" + +#: src/stored/btape.c:2881 +msgid "clear tape errors" +msgstr "wyczyść błędy taśmy" + +#: src/stored/btape.c:2882 +msgid "go to end of Bacula data for append" +msgstr "idź do końca danych Bacula'i w celu dołączenia (append)" + +#: src/stored/btape.c:2883 +msgid "go to the physical end of medium" +msgstr "idź do fizycznego końca medium" + +#: src/stored/btape.c:2884 +msgid "fill tape, write onto second volume" +msgstr "wypełnij taśmę, zapisuj na drugim wolumenie" + +#: src/stored/btape.c:2885 +msgid "read filled tape" +msgstr "odczytaj zapełnioną taśmę" + +#: src/stored/btape.c:2886 +msgid "forward space a file" +msgstr "" + +#: src/stored/btape.c:2887 +msgid "forward space a record" +msgstr "" + +#: src/stored/btape.c:2888 +msgid "print this command" +msgstr "wyświetl tą komendę" + +#: src/stored/btape.c:2889 +msgid "write a Bacula label to the tape" +msgstr "zapisz etykietę Bacula dla taśmy" + +#: src/stored/btape.c:2890 +msgid "load a tape" +msgstr "załaduj taśmę" + +#: src/stored/btape.c:2891 +msgid "quit btape" +msgstr "zamknij btape" + +#: src/stored/btape.c:2892 +msgid "use write() to fill tape" +msgstr "użyj write() by zapełnić taśmę" + +#: src/stored/btape.c:2893 +msgid "read and print the Bacula tape label" +msgstr "odczytaj o wyświetl etykietę taśmy Bacula" + +#: src/stored/btape.c:2894 +msgid "test record handling functions" +msgstr "przetestuj funkcje sterujace zapisu" + +#: src/stored/btape.c:2895 +msgid "rewind the tape" +msgstr "przewiń taśmę" + +#: src/stored/btape.c:2896 +msgid "read() tape block by block to EOT and report" +msgstr "read() - odczytaj taśmę blok po bloku do EOT i wygeneruj raport" + +#: src/stored/btape.c:2897 +msgid "Bacula read block by block to EOT and report" +msgstr "Bacula odczytała blok po bloku do EOT i wygenerowała raport" + +#: src/stored/btape.c:2898 +msgid "" +"[file_size=n(GB)|nb_file=3|skip_zero|skip_random|skip_raw|skip_block] report " +"drive speed" +msgstr "" +"[file_size=n(GB)|nb_file=3|skip_zero|skip_random|skip_raw|skip_block] raport " +"prędkości dysku" + +#: src/stored/btape.c:2899 +msgid "print tape status" +msgstr "wyświetl status taśmy" + +#: src/stored/btape.c:2900 +msgid "General test Bacula tape functions" +msgstr "Ogólny test Bacula funkcji taśmy" + +#: src/stored/btape.c:2901 +msgid "write an EOF on the tape" +msgstr "Zapis EOF na taśmie." + +#: src/stored/btape.c:2902 +msgid "write a single Bacula block" +msgstr "zapisz pojedyńczy blok Bacula'i" + +#: src/stored/btape.c:2903 +msgid "read a single record" +msgstr "odczytaj pojedynczy rekord" + +#: src/stored/btape.c:2904 +msgid "read a single Bacula block" +msgstr "przeczytaj pojedyńczy blok Bacula'i" + +#: src/stored/btape.c:2905 +msgid "quick fill command" +msgstr "polecenie szybkiego wypełnienia" + +#: src/stored/btape.c:2926 +#, c-format +msgid "\"%s\" is an invalid command\n" +msgstr "\"%s\" nie jest poprawnym poleceniem\n" + +#: src/stored/btape.c:2935 +#, fuzzy, c-format +msgid "Interactive commands:\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/stored/btape.c:2946 +#, fuzzy, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: btape \n" +" -b specify bootstrap file\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -p proceed inspite of I/O errors\n" +" -s turn off signals\n" +" -v be verbose\n" +" -? print this message.\n" +"\n" +msgstr "" +"Napisany przez Nicolas Boichat (2004)\n" +"\n" +"Wersja: %s (%s) %s %s %s\n" +"\n" +"Użycie: tray-monitor [-c config_file] [-d debug_level]\n" +" -c ustaw plik konfiguracyjny na \n" +" -d usta poziom debugowania na \n" +" -dt wyświetla znaczniki czasowe w debugingu\n" +" -t test - odczytuje konfiguracj i kończy działanie\n" +" -? wyświetla ten komunikat.\n" +"\n" + +#: src/stored/btape.c:3034 +#, c-format +msgid "Mount second Volume on device %s and press return when ready: " +msgstr "Zamontuj drugi Wolumen w urządzeniu %s i kiedy gotowe naciśnij Enter: " + +#: src/stored/btape.c:3062 +#, c-format +msgid "Mount blank Volume on device %s and press return when ready: " +msgstr "Zamontuj pusty Wolumen w urządzeniu %s i kiedy gotowe naciśnij Enter: " + +#: src/stored/btape.c:3082 +#, fuzzy, c-format +msgid "End of Volume \"%s\" %d records.\n" +msgstr "Nie mogę przyci wolumenu \"%s\"\n" + +#: src/stored/btape.c:3096 +#, c-format +msgid "Read block=%u, VolBytes=%s rate=%sB/s\n" +msgstr "Read block =%u, VolBytes=%s rate=%sB/s\n" + +#: src/stored/btape.c:3109 src/stored/mount.c:843 +#, fuzzy, c-format +msgid "Cannot open Dev=%s, Vol=%s\n" +msgstr " Nie można otworzyć %s: ERR=%s.\n" + +#: src/stored/block.c:89 +#, c-format +msgid "" +"Dump block %s %x: size=%d BlkNum=%d\n" +" Hdrcksum=%x cksum=%x\n" +msgstr "" +"Blok dump'a %s %x: size=%d BlkNum=%d\n" +" Hdrcksum=%x cksum=%x\n" + +#: src/stored/block.c:102 +#, c-format +msgid " Rec: VId=%u VT=%u FI=%s Strm=%s len=%d p=%x\n" +msgstr " Rec: VId=%u VT=%u FI=%s Strm=%s len=%d p=%x\n" + +#: src/stored/block.c:158 +#, c-format +msgid "%d block read errors not printed.\n" +msgstr "%d błędów odczytu bloków które nie zostały wyświetlone.\n" + +#: src/stored/block.c:248 src/stored/block.c:264 src/stored/block.c:274 +#, c-format +msgid "" +"Volume data error at %u:%u! Wanted ID: \"%s\", got \"%s\". Buffer " +"discarded.\n" +msgstr "" +"Błąd wolumenu danych dla %u:%u! Żądane ID: \"%s\", otrzymano \"%s\". Bufor " +"odrzucony.\n" + +#: src/stored/block.c:289 +#, c-format +msgid "" +"Volume data error at %u:%u! Block length %u is insane (too large), probably " +"due to a bad archive.\n" +msgstr "" +"Błąd wielkości danych dla %u:%u! Wielkość bloku %u jest gigantyczna, " +"prawdopodobnie z powodu błędnego archiwum.\n" + +#: src/stored/block.c:315 +#, c-format +msgid "" +"Volume data error at %u:%u!\n" +"Block checksum mismatch in block=%u len=%d: calc=%x blk=%x\n" +msgstr "" +"Błąd danych wolumanu przy %u:%u!\n" +"Suma kontrolna bloku niezgodna dla block=%u len=%d: calc=%x blk=%x\n" + +#: src/stored/block.c:368 src/stored/block.c:732 src/stored/block.c:806 +#: src/stored/acquire.c:485 src/stored/spool.c:309 +#, fuzzy, c-format +msgid "Could not create JobMedia record for Volume=\"%s\" Job=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/block.c:428 +msgid "Cannot write block. Device at EOM.\n" +msgstr "Nie można zapisać bloku. EOM urządzenia.\n" + +#: src/stored/block.c:433 +msgid "Attempt to write on read-only Volume.\n" +msgstr "Próba zapisu na wolumenie tylko do odczytu.\n" + +#: src/stored/block.c:485 +#, c-format +msgid "User defined maximum volume capacity %s exceeded on device %s.\n" +msgstr "" +"Użytkownik zdefiniował maksymalną pojemność wolumenu %s która przekracza " +"dostępną na urządzeniu %s.\n" + +#: src/stored/block.c:500 +#, fuzzy, c-format +msgid "Unable to write EOF. ERR=%s\n" +msgstr "Nie można zainicjalizowa blokady BD. ERR=%s\n" + +#: src/stored/block.c:526 src/stored/block.c:551 +#, fuzzy +msgid "Write block header zeroed.\n" +msgstr "Nieudane odszyfrowanie klucza sesji.\n" + +#: src/stored/block.c:570 +#, fuzzy, c-format +msgid "Write error at %u:%u on device %s. ERR=%s.\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/block.c:577 +#, c-format +msgid "End of Volume \"%s\" at %u:%u on device %s. Write of %u bytes got %d.\n" +msgstr "" +"Koniec Wolumenu \"%s\" przy %u:%u na urządzeniu %s. Zapisano %u bajtów, do " +"zapisu %d.\n" + +#: src/stored/block.c:658 src/stored/block.c:664 +#, fuzzy, c-format +msgid "Backspace file at EOT failed. ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/block.c:671 +#, fuzzy, c-format +msgid "Backspace record at EOT failed. ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/block.c:688 +#, fuzzy, c-format +msgid "Re-read last block at EOT failed. ERR=%s" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/block.c:698 +#, c-format +msgid "" +"Re-read of last block: block numbers differ by more than one.\n" +"Probable tape misconfiguration and data loss. Read block=%u Want block=%u.\n" +msgstr "" +"Ponowny odczyt ostatniego bloku: ilość bloków różni się o więcej niż jeden.\n" +"Pawdopodobna błędna konfiguracja taśmy i utrata danych. Odczytany blok=%u " +"Żądany blok=%u.\n" + +#: src/stored/block.c:703 +#, c-format +msgid "" +"Re-read of last block OK, but block numbers differ. Read block=%u Want block=" +"%u.\n" +msgstr "" +"Ponowny odczyt ostatniego bloku zakończony sukcesem. jednak różna ilość " +"bloków.Odczytany blok=%u Żądany blok=%u.\n" + +#: src/stored/block.c:707 +#, fuzzy +msgid "Re-read of last block succeeded.\n" +msgstr "Nieudane odszyfrowanie klucza sesji.\n" + +#: src/stored/block.c:739 +#, c-format +msgid "" +"Error writing final EOF to tape. This Volume may not be readable.\n" +"%s" +msgstr "" +"Błąd podczas końcowego zapisu EOF na taśmę. Ten Wolumen może nie nadawać się " +"do odczytu.\n" +"%s" + +#: src/stored/block.c:753 +#, c-format +msgid "" +"Error writing final part to DVD. This Volume may not be readable.\n" +"%s" +msgstr "" +"Błąd podczas zapisu ostatniej części na DVD. Ten Wolumen może nie nadawać " +"się do odczytu.\n" +"%s" + +#: src/stored/block.c:855 +#, c-format +msgid "" +"Error while writing, current part number is less than the total number of " +"parts (%d/%d, device=%s)\n" +msgstr "" +"Błąd podczas zapisu, bieżący numer części urzadzenia jest mniejszy " +"niżcałkowita ilość części (%d/%d, urządzenie=%s)\n" + +#: src/stored/block.c:863 +#, fuzzy, c-format +msgid "Unable to open device next part %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/block.c:883 +#, c-format +msgid "" +"End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +"free_space_errno=%d, errmsg=%s).\n" +msgstr "" +"Koniec Wolumenu \"%s\" dla %u:%u na urzadzeniu %s (part_size=%s, free_space=%" +"s, free_space_errno=%d, errmsg=%s).\n" + +#: src/stored/block.c:896 +#, c-format +msgid "" +"End of Volume \"%s\" at %u:%u on device %s (part_size=%s, free_space=%s, " +"free_space_errno=%d).\n" +msgstr "" + +#: src/stored/block.c:955 +#, c-format +msgid "Block buffer size looping problem on device %s\n" +msgstr "Problem zapętlenia dot. rozmiaru bloku bufora na urządzeniu %s\n" + +#: src/stored/block.c:983 +#, fuzzy, c-format +msgid "Unable to open device part=%d %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/block.c:1009 +#, fuzzy, c-format +msgid "Read error on fd=%d at file:blk %u:%u on device %s. ERR=%s.\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/block.c:1022 +#, fuzzy, c-format +msgid "Read zero bytes at %u:%u on device %s.\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/block.c:1046 +#, c-format +msgid "" +"Volume data error at %u:%u! Very short block of %d bytes on device %s " +"discarded.\n" +msgstr "" + +#: src/stored/block.c:1071 +#, c-format +msgid "Block length %u is greater than buffer %u. Attempting recovery.\n" +msgstr "" + +#: src/stored/block.c:1090 +#, c-format +msgid "Setting block buffer size to %u bytes.\n" +msgstr "Ustawiam rozmiar bloku bufora na %u bajtów.\n" + +#: src/stored/block.c:1105 +#, c-format +msgid "" +"Volume data error at %u:%u! Short block of %d bytes on device %s discarded.\n" +msgstr "" + +#: src/stored/bextract.c:79 +#, fuzzy, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bextract \n" +" -b specify a bootstrap file\n" +" -c specify a Storage configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -e exclude list\n" +" -i include list\n" +" -p proceed inspite of I/O errors\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"\n" +"Użycie: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c uyj jako pliku konfiguracyjnego\n" +" -d ustaw poziom debugingu na \n" +" -dt wyświetl znacznik czasowu podczas wywietlania debugingu\n" +" -f uruchom na pierwszym planie (dla debugingu)\n" +" -g identyfikator grupy\n" +" -k keep readall capabilities\n" +" -m wyświetl informacje kaboom (dla debugingu)\n" +" -s brak sygnałów (dla debugingu)\n" +" -t przetestuj plik konfiguracji i zakocz\n" +" -u identyfikator uytkownika\n" +" -v gadatliwe komunikaty uytkownika\n" +" -? wyświetl ten komunikat.\n" +"\n" + +#: src/stored/bextract.c:145 src/stored/bls.c:149 +#, fuzzy, c-format +msgid "Could not open exclude file: %s, ERR=%s\n" +msgstr "Nie można otworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/stored/bextract.c:160 src/stored/bls.c:164 +#, fuzzy, c-format +msgid "Could not open include file: %s, ERR=%s\n" +msgstr "Nie można otworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/stored/bextract.c:217 +#, c-format +msgid "%d Program Name and/or Program Data Stream records ignored.\n" +msgstr "Nazwa Programu %d i/lub zapisy Strumieni Danych zignorowane.\n" + +#: src/stored/bextract.c:221 +#, c-format +msgid "%d Win32 data or Win32 gzip data stream records. Ignored.\n" +msgstr "" +"Strumienie zapisu %d danych Win32 lub danych Win32 gzip. Zignorowano.\n" + +#: src/stored/bextract.c:248 +#, fuzzy, c-format +msgid "Cannot stat %s. It must exist. ERR=%s\n" +msgstr "Nie można wykona stat na pliku %s: ERR=%s\n" + +#: src/stored/bextract.c:252 +#, c-format +msgid "%s must be a directory.\n" +msgstr "%s musi być katalogiem.\n" + +#: src/stored/bextract.c:273 +#, c-format +msgid "%u files restored.\n" +msgstr "%u przywróconych plików.\n" + +#: src/stored/bextract.c:320 src/stored/bextract.c:488 +#, fuzzy +msgid "Logic error output file should be open but is not.\n" +msgstr "Błąd logiki: plik wyjściowy powinien być otwarty\n" + +#: src/stored/bextract.c:327 src/stored/bscan.c:666 src/stored/bls.c:387 +#, fuzzy +msgid "Cannot continue.\n" +msgstr "Nie można połączyć się do demona.\n" + +#: src/stored/bextract.c:344 +#, fuzzy, c-format +msgid "%s was deleted.\n" +msgstr "Status zadania: Anulowane" + +#: src/stored/bextract.c:388 +#, fuzzy, c-format +msgid "Seek error on %s: %s\n" +msgstr "Błąd zapisu na %s: %s\n" + +#: src/stored/bextract.c:444 +#, fuzzy, c-format +msgid "Uncompression error. ERR=%d\n" +msgstr "Błąd dekompresji na pliku %s. ERR=%s\n" + +#: src/stored/bextract.c:479 +msgid "Got Program Name or Data Stream. Ignored.\n" +msgstr "Otrzymano Nazwę Programu lub Strumień Danych. Zignorowano.\n" + +#: src/stored/bscan.c:117 +#, fuzzy, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bscan [ options ] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -m update media info in database\n" +" -D specify the driver database name (default NULL)\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database password (default none)\n" +" -h specify database host (default NULL)\n" +" -t specify database port (default 0)\n" +" -p proceed inspite of I/O errors\n" +" -r list records\n" +" -s synchronize or store in database\n" +" -S show scan progress periodically\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -w specify working directory (default from conf file)\n" +" -? print this message\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"\n" +"Użycie: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c uyj jako pliku konfiguracyjnego\n" +" -d ustaw poziom debugingu na \n" +" -dt wyświetl znacznik czasowu podczas wywietlania debugingu\n" +" -f uruchom na pierwszym planie (dla debugingu)\n" +" -g identyfikator grupy\n" +" -k keep readall capabilities\n" +" -m wyświetl informacje kaboom (dla debugingu)\n" +" -s brak sygnałów (dla debugingu)\n" +" -t przetestuj plik konfiguracji i zakocz\n" +" -u identyfikator uytkownika\n" +" -v gadatliwe komunikaty uytkownika\n" +" -? wyświetl ten komunikat.\n" +"\n" + +#: src/stored/bscan.c:261 src/stored/stored.c:337 +#, fuzzy, c-format +msgid "No Storage resource defined in %s. Cannot continue.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/stored/bscan.c:269 src/stored/stored.c:368 +#, fuzzy, c-format +msgid "No Working Directory defined in %s. Cannot continue.\n" +msgstr "" +"Katalog dla Wtyczek nie jest zdefiniowany. Nie można uy wtyczki: \"%\"\n" + +#: src/stored/bscan.c:277 +#, fuzzy, c-format +msgid "Working Directory: %s not found. Cannot continue.\n" +msgstr "" +"Katalog dla Wtyczek nie jest zdefiniowany. Nie można uy wtyczki: \"%\"\n" + +#: src/stored/bscan.c:281 +#, fuzzy, c-format +msgid "Working Directory: %s is not a directory. Cannot continue.\n" +msgstr "" +"Katalog dla Wtyczek nie jest zdefiniowany. Nie można uy wtyczki: \"%\"\n" + +#: src/stored/bscan.c:295 src/stored/bscan.c:369 +#, c-format +msgid "First Volume Size = %s\n" +msgstr "Rozmiar Pierwszego Wolumenu = %s\n" + +#: src/stored/bscan.c:342 +#, fuzzy, c-format +msgid "Create JobMedia for Job %s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/bscan.c:351 +#, fuzzy, c-format +msgid "Could not create JobMedia record for Volume=%s Job=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/bscan.c:414 +#, c-format +msgid "done: %d%%\n" +msgstr "gotowe: %d%%\n" + +#: src/stored/bscan.c:438 +msgid "Volume is prelabeled. This tape cannot be scanned.\n" +msgstr "" +"Wolumen ma już wcześniej nadaną rtykietę. Ta taśma nie może być skanowana.\n" + +#: src/stored/bscan.c:450 +#, fuzzy, c-format +msgid "Pool record for %s found in DB.\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/bscan.c:454 +#, fuzzy, c-format +msgid "VOL_LABEL: Pool record not found for Pool: %s\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/bscan.c:460 +#, c-format +msgid "VOL_LABEL: PoolType mismatch. DB=%s Vol=%s\n" +msgstr "VOL_LABEL: Błędnie wprowadzono PoolType. DB=%s Vol=%s\n" + +#: src/stored/bscan.c:464 +#, c-format +msgid "Pool type \"%s\" is OK.\n" +msgstr "Rodzaj puli \"%s\" OK.\n" + +#: src/stored/bscan.c:474 +#, fuzzy, c-format +msgid "Media record for %s found in DB.\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/bscan.c:481 +#, fuzzy, c-format +msgid "VOL_LABEL: Media record not found for Volume: %s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/bscan.c:488 +#, c-format +msgid "VOL_LABEL: MediaType mismatch. DB=%s Vol=%s\n" +msgstr "" + +#: src/stored/bscan.c:492 +#, c-format +msgid "Media type \"%s\" is OK.\n" +msgstr "" + +#: src/stored/bscan.c:502 +#, c-format +msgid "VOL_LABEL: OK for Volume: %s\n" +msgstr "VOL_LABEL: OK dla Wolumenu: %s\n" + +#: src/stored/bscan.c:509 +#, c-format +msgid "%d \"errors\" ignored before first Start of Session record.\n" +msgstr "" + +#: src/stored/bscan.c:520 +#, fuzzy, c-format +msgid "SOS_LABEL: Found Job record for JobId: %d\n" +msgstr "Nie mogę otrzyma rekordu Zadania dla JobId=%s: ERR=%s\n" + +#: src/stored/bscan.c:525 +#, fuzzy, c-format +msgid "SOS_LABEL: Job record not found for JobId: %d\n" +msgstr "Nie znaleziono zasobu Zadania dla \"%s\".\n" + +#: src/stored/bscan.c:551 +#, c-format +msgid "SOS_LABEL: VolSessId mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:557 +#, c-format +msgid "SOS_LABEL: VolSessTime mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:563 +#, c-format +msgid "SOS_LABEL: PoolId mismatch for JobId=%u. DB=%d Vol=%d\n" +msgstr "" + +#: src/stored/bscan.c:581 src/stored/bscan.c:1117 +#, c-format +msgid "Could not find SessId=%d SessTime=%d for EOS record.\n" +msgstr "" + +#: src/stored/bscan.c:625 +#, fuzzy, c-format +msgid "Could not update job record. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/bscan.c:636 +#, c-format +msgid "End of all Volumes. VolFiles=%u VolBlocks=%u VolBytes=%s\n" +msgstr "" + +#: src/stored/bscan.c:648 +#, c-format +msgid "Could not find Job for SessId=%d SessTime=%d record.\n" +msgstr "" + +#: src/stored/bscan.c:679 +#, c-format +msgid "%s file records. At file:blk=%s:%s bytes=%s\n" +msgstr "" + +#: src/stored/bscan.c:733 +#, fuzzy, c-format +msgid "Got MD5 record: %s\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/bscan.c:741 +#, fuzzy, c-format +msgid "Got SHA1 record: %s\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/bscan.c:749 +#, fuzzy, c-format +msgid "Got SHA256 record: %s\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/bscan.c:757 +#, fuzzy, c-format +msgid "Got SHA512 record: %s\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/bscan.c:765 src/stored/bscan.c:772 +msgid "Got signed digest record\n" +msgstr "" + +#: src/stored/bscan.c:778 +#, c-format +msgid "Got Prog Names Stream: %s\n" +msgstr "" + +#: src/stored/bscan.c:784 +msgid "Got Prog Data Stream record.\n" +msgstr "" + +#: src/stored/bscan.c:818 +#, fuzzy, c-format +msgid "Unknown stream type!!! stream=%d len=%i\n" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/stored/bscan.c:886 +#, fuzzy, c-format +msgid "Could not create File Attributes record. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/bscan.c:892 +#, fuzzy, c-format +msgid "Created File record: %s\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/stored/bscan.c:937 +#, fuzzy, c-format +msgid "Could not create media record. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/bscan.c:941 src/stored/bscan.c:962 +#, fuzzy, c-format +msgid "Could not update media record. ERR=%s\n" +msgstr " Nie można otworzyć katalogu %s: ERR=%s\n" + +#: src/stored/bscan.c:945 +#, fuzzy, c-format +msgid "Created Media record for Volume: %s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/bscan.c:966 +#, fuzzy, c-format +msgid "Updated Media record at end of Volume: %s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/bscan.c:983 +#, fuzzy, c-format +msgid "Could not create pool record. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/bscan.c:987 +#, fuzzy, c-format +msgid "Created Pool record for Pool: %s\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/bscan.c:1006 +#, fuzzy, c-format +msgid "Could not get Client record. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/bscan.c:1016 +#, fuzzy, c-format +msgid "Created Client record for Client: %s\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/stored/bscan.c:1033 +#, fuzzy, c-format +msgid "Fileset \"%s\" already exists.\n" +msgstr "rekord puli %s ju istnieje\n" + +#: src/stored/bscan.c:1037 +#, fuzzy, c-format +msgid "Could not create FileSet record \"%s\". ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/bscan.c:1042 +#, fuzzy, c-format +msgid "Created FileSet record \"%s\"\n" +msgstr "Niepoprawna komenda FileSet: %s\n" + +#: src/stored/bscan.c:1089 +#, fuzzy, c-format +msgid "Could not create JobId record. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/bscan.c:1095 +#, fuzzy, c-format +msgid "Could not update job start record. ERR=%s\n" +msgstr "Nie można stworzyć pliku bootstrap %s: ERR=%s\n" + +#: src/stored/bscan.c:1098 +#, fuzzy, c-format +msgid "Created new JobId=%u record for original JobId=%u\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/bscan.c:1148 +#, fuzzy, c-format +msgid "Could not update JobId=%u record. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/bscan.c:1153 +#, c-format +msgid "Updated Job termination record for JobId=%u Level=%s TermStat=%c\n" +msgstr "" + +#: src/stored/bscan.c:1178 +#, fuzzy, c-format +msgid "Job Termination code: %d" +msgstr "Status zadania: Zakoczone" + +#: src/stored/bscan.c:1183 +#, c-format +msgid "" +"%s\n" +"JobId: %d\n" +"Job: %s\n" +"FileSet: %s\n" +"Backup Level: %s\n" +"Client: %s\n" +"Start time: %s\n" +"End time: %s\n" +"Files Written: %s\n" +"Bytes Written: %s\n" +"Volume Session Id: %d\n" +"Volume Session Time: %d\n" +"Last Volume Bytes: %s\n" +"Termination: %s\n" +"\n" +msgstr "" + +#: src/stored/bscan.c:1241 +#, fuzzy, c-format +msgid "Could not create JobMedia record. ERR=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/bscan.c:1245 +#, fuzzy, c-format +msgid "Created JobMedia record JobId %d, MediaId %d\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/bscan.c:1261 +#, c-format +msgid "Could not find SessId=%d SessTime=%d for MD5/SHA1 record.\n" +msgstr "" + +#: src/stored/bscan.c:1275 +#, fuzzy, c-format +msgid "Could not add MD5/SHA1 to File record. ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/bscan.c:1280 +#, fuzzy +msgid "Updated MD5/SHA1 record\n" +msgstr "Brak rekordu dla %d %s\n" + +#: src/stored/read_record.c:88 +#, c-format +msgid "End of Volume at file %u on device %s, Volume \"%s\"\n" +msgstr "Koniec Wolumenu na pliku %u urządzenia %s, Wolumen \"%s\"\n" + +#: src/stored/read_record.c:92 +msgid "End of all volumes.\n" +msgstr "Koniec wszystkich Wolumenów.\n" + +#: src/stored/read_record.c:136 +msgid "part" +msgstr "część" + +#: src/stored/read_record.c:139 +msgid "file" +msgstr "plik" + +#: src/stored/read_record.c:142 +#, fuzzy, c-format +msgid "End of %s %u on device %s, Volume \"%s\"\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/read_record.c:157 +msgid "Did fsr in attemp to skip bad record.\n" +msgstr "" + +#: src/stored/read_record.c:382 +#, c-format +msgid "Forward spacing Volume \"%s\" to file:block %u:%u.\n" +msgstr "" + +#: src/stored/read_record.c:406 +msgid "Begin Session" +msgstr "Rozpocznij Sesję" + +#: src/stored/read_record.c:410 +msgid "End Session" +msgstr "Zakończ Sesję" + +#: src/stored/read_record.c:416 +#, fuzzy, c-format +msgid "Unknown code %d\n" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/stored/reserve.c:88 +#, fuzzy, c-format +msgid "Unable to initialize reservation lock. ERR=%s\n" +msgstr "Nie można zainicjalizowa blokady BD. ERR=%s\n" + +#: src/stored/reserve.c:158 +#, c-format +msgid "Hey! num_writers=%d!!!!\n" +msgstr "Hej! num_writers=%d!!!!\n" + +#: src/stored/reserve.c:256 +#, fuzzy +msgid "3939 Could not get dcr\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/reserve.c:358 +#, fuzzy, c-format +msgid "Device reservation failed for JobId=%d: %s\n" +msgstr "Nieudana walidacja sygnatury dla %s: %s\n" + +#: src/stored/reserve.c:367 +#, fuzzy, c-format +msgid "Failed command: %s\n" +msgstr "Za komenda level: %s\n" + +#: src/stored/reserve.c:626 src/stored/dircmd.c:618 +#, c-format +msgid "" +"\n" +" Device \"%s\" in changer \"%s\" requested by DIR could not be opened or " +"does not exist.\n" +msgstr "" +"\n" +" Żądane przez DIR Urządzenie \"%s\" w zmieniarce \"%s\" nie może być " +"otworzone lub nie istnieje.\n" + +#: src/stored/reserve.c:630 src/stored/dircmd.c:596 +#, c-format +msgid "" +"\n" +" Device \"%s\" requested by DIR could not be opened or does not exist.\n" +msgstr "" +"\n" +" Żądane przez DIR Urządzenie \"%s\" nie może być otworzone lub nie " +"istnieje.\n" + +#: src/stored/reserve.c:646 +#, fuzzy, c-format +msgid "3926 Could not get dcr for device: %s\n" +msgstr "Nie można ustawi pola Finder Info na %s\n" + +#: src/stored/reserve.c:772 +#, c-format +msgid "3601 JobId=%u device %s is BLOCKED due to user unmount.\n" +msgstr "" +"3601 JobId=%u - urządzenie %s jest ZABLOKOWANE z powodu odmontowania przez " +"użytkownika.\n" + +#: src/stored/reserve.c:782 +#, c-format +msgid "3602 JobId=%u device %s is busy (already reading/writing).\n" +msgstr "3602 JobId=%u - urządzenie %s jest zajęte (trwa odczyt/zapis).\n" + +#: src/stored/reserve.c:829 +#, c-format +msgid "3603 JobId=%u device %s is busy reading.\n" +msgstr "3603 JobId=%u - urządzenei %s jest zajęte, trwa odczyt.\n" + +#: src/stored/reserve.c:838 +#, c-format +msgid "3604 JobId=%u device %s is BLOCKED due to user unmount.\n" +msgstr "" +"3604 JobId=%u - urządzenie %s jest ZABLOKOWANE z powodu odmontowania przez " +"użytkownika.\n" + +#: src/stored/reserve.c:875 +#, c-format +msgid "" +"3608 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" nreserve=%d on drive %" +"s.\n" +msgstr "" +"3608 JobId=%u żąda Pool=\"%s\" ale otrzymało Pool=\"%s\" nreserve=%d na " +"dysku %s.\n" + +#: src/stored/reserve.c:899 +#, c-format +msgid "3609 JobId=%u Max concurrent jobs exceeded on drive %s.\n" +msgstr "" +"3609 JobId=%u - Na dysku %s przekroczono maksymalną ilość równoczesnych " +"zadań.\n" + +#: src/stored/reserve.c:911 +#, c-format +msgid "3610 JobId=%u Volume max jobs exceeded on drive %s.\n" +msgstr "3610 JobId=%u - Na dysku %s przekroczono maksymalny wolumen zadań.\n" + +#: src/stored/reserve.c:964 +#, c-format +msgid "3605 JobId=%u wants free drive but device %s is busy.\n" +msgstr "3605 JobId=%u żąda wolnego dysku ale urządzenie %s jest zajęte.\n" + +#: src/stored/reserve.c:972 +#, c-format +msgid "3606 JobId=%u prefers mounted drives, but drive %s has no Volume.\n" +msgstr "" +"3606 JobId=%u preferuje zamontowane dyski, jednak dysk %s nie ma Wolumenu.\n" + +#: src/stored/reserve.c:994 +#, c-format +msgid "3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on drive %s.\n" +msgstr "3607 JobId=%u żąda Vol=\"%s\" napęd posiada Vol=\"%s\" w napędzie %s.\n" + +#: src/stored/reserve.c:1049 +msgid "Logic error!!!! JobId=%u Should not get here.\n" +msgstr "Błąd logiki!!!! JobId=%u Nie powinienem być tutaj.\n" + +#: src/stored/reserve.c:1050 +msgid "3910 JobId=%u Logic error!!!! drive %s Should not get here.\n" +msgstr "3910 JobId=%u Błąd logiki!!!! napęd %s Nie powinienem być tutaj.\n" + +#: src/stored/reserve.c:1053 +msgid "Logic error!!!! Should not get here.\n" +msgstr "Błąd logiki!!!! Nie powinienem być tutaj.\n" + +#: src/stored/reserve.c:1056 +#, c-format +msgid "3911 JobId=%u failed reserve drive %s.\n" +msgstr "3911 JobId=%u niepowodzenie przy rezerwacji dysku %s.\n" + +#: src/stored/autochanger.c:65 +#, fuzzy, c-format +msgid "No Changer Name given for device %s. Cannot continue.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/stored/autochanger.c:71 +#, fuzzy, c-format +msgid "No Changer Command given for device %s. Cannot continue.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/stored/autochanger.c:84 +#, fuzzy, c-format +msgid "" +"Media Type not the same for all devices in changer %s. Cannot continue.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/stored/autochanger.c:152 +#, c-format +msgid "" +"Invalid slot=%d defined in catalog for Volume \"%s\" on %s. Manual load may " +"be required.\n" +msgstr "" +"Zdefiniowany błędny slot=%d w katalogu dla Wolumenu \"%s\" na %s. Może być " +"konieczne załadowanie ręczne.\n" + +#: src/stored/autochanger.c:157 +#, c-format +msgid "No \"Changer Device\" for %s. Manual load of Volume may be required.\n" +msgstr "" +"Brak \"Changer Device\" dla %s. Może być konieczne ręczne załadowanie " +"Wolumenu.\n" + +#: src/stored/autochanger.c:161 +#, c-format +msgid "No \"Changer Command\" for %s. Manual load of Volume may be requird.\n" +msgstr "" +"Brak \"Changer Command\" dla %s. Może być konieczne ręczne załadowanie " +"Wolumenu.\n" + +#: src/stored/autochanger.c:191 +#, c-format +msgid "3304 Issuing autochanger \"load slot %d, drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:199 +#, c-format +msgid "3305 Autochanger \"load slot %d, drive %d\", status is OK.\n" +msgstr "3305 Autochanger \"załadowany slot %d, napęd %d\", status is OK.\n" + +#: src/stored/autochanger.c:208 +#, c-format +msgid "" +"3992 Bad autochanger \"load slot %d, drive %d\": ERR=%s.\n" +"Results=%s\n" +msgstr "" +"3992 Zła biblioteka taśmowa \"załadowany slot %d, napęd %d\": ERR=%s.\n" +"Wyniki=%s\n" + +#: src/stored/autochanger.c:269 +#, c-format +msgid "3301 Issuing autochanger \"loaded? drive %d\" command.\n" +msgstr "3301 Wywołanie autochanger \"załadowany? napęd %d\" komenda.\n" + +#: src/stored/autochanger.c:278 +#, c-format +msgid "3302 Autochanger \"loaded? drive %d\", result is Slot %d.\n" +msgstr "" + +#: src/stored/autochanger.c:282 +#, c-format +msgid "3302 Autochanger \"loaded? drive %d\", result: nothing loaded.\n" +msgstr "" + +#: src/stored/autochanger.c:289 +#, fuzzy, c-format +msgid "" +"3991 Bad autochanger \"loaded? drive %d\" command: ERR=%s.\n" +"Results=%s\n" +msgstr "Za komenda storage: %s" + +#: src/stored/autochanger.c:354 src/stored/autochanger.c:472 +#, c-format +msgid "3307 Issuing autochanger \"unload slot %d, drive %d\" command.\n" +msgstr "" + +#: src/stored/autochanger.c:367 +#, c-format +msgid "" +"3995 Bad autochanger \"unload slot %d, drive %d\": ERR=%s\n" +"Results=%s\n" +msgstr "" + +#: src/stored/autochanger.c:429 +#, c-format +msgid "Volume \"%s\" wanted on %s is in use by device %s\n" +msgstr "" + +#: src/stored/autochanger.c:490 +#, fuzzy, c-format +msgid "3995 Bad autochanger \"unload slot %d, drive %d\": ERR=%s.\n" +msgstr "Za komenda storage: %s" + +#: src/stored/autochanger.c:535 +#, c-format +msgid "3993 Device %s not an autochanger device.\n" +msgstr "" + +#: src/stored/autochanger.c:556 +#, fuzzy, c-format +msgid "3306 Issuing autochanger \"%s\" command.\n" +msgstr "Za komenda storage: %s" + +#: src/stored/autochanger.c:559 +msgid "3996 Open bpipe failed.\n" +msgstr "" + +#: src/stored/dircmd.c:155 +#, fuzzy, c-format +msgid "Connection request from %s failed.\n" +msgstr "Podłączenie do Klienta %s:%d\n" + +#: src/stored/dircmd.c:165 +#, fuzzy, c-format +msgid "Invalid connection from %s. Len=%d\n" +msgstr "Rozłączanie od Klienta %s:%d\n" + +#: src/stored/dircmd.c:294 +#, fuzzy, c-format +msgid "3991 Bad setdebug command: %s\n" +msgstr "2991 Bdna komenda setdebug: %s\n" + +#: src/stored/dircmd.c:315 +#, fuzzy, c-format +msgid "3904 Job %s not found.\n" +msgstr "2901 Zadanie %s nie znaleziono.\n" + +#: src/stored/dircmd.c:343 +#, fuzzy, c-format +msgid "JobId=%d Job=\"%s\" marked to be canceled.\n" +msgstr "2001 Zadanie %s oznaczone do anulowania.\n" + +#: src/stored/dircmd.c:345 +#, fuzzy, c-format +msgid "3000 Job %s marked to be canceled.\n" +msgstr "2001 Zadanie %s oznaczone do anulowania.\n" + +#: src/stored/dircmd.c:349 +#, fuzzy +msgid "3903 Error scanning cancel command.\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/stored/dircmd.c:425 src/stored/dircmd.c:788 src/stored/dircmd.c:880 +#: src/stored/dircmd.c:991 src/stored/dircmd.c:1113 src/stored/dircmd.c:1156 +#, c-format +msgid "3999 Device \"%s\" not found or could not be opened.\n" +msgstr "" + +#: src/stored/dircmd.c:430 +#, fuzzy, c-format +msgid "3903 Error scanning label command: %s\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/stored/dircmd.c:480 +#, fuzzy, c-format +msgid "3910 Unable to open device %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/dircmd.c:497 +#, c-format +msgid "3920 Cannot label Volume because it is already labeled: \"%s\"\n" +msgstr "" +"3920 Nie można nadac etykiety wolumenowi ponieważ etykieta jest nadana: \"%s" +"\"\n" + +#: src/stored/dircmd.c:504 +msgid "3921 Wrong volume mounted.\n" +msgstr "3921 Zamontowano niepoprawny Wolumen.\n" + +#: src/stored/dircmd.c:508 +msgid "3922 Cannot relabel an ANSI/IBM labeled Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:516 +#, fuzzy, c-format +msgid "3912 Failed to label Volume: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/dircmd.c:526 +#, fuzzy, c-format +msgid "3914 Failed to label Volume (no media): ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/dircmd.c:529 +#, c-format +msgid "3913 Cannot label Volume. Unknown status %d from read_volume_label()\n" +msgstr "" + +#: src/stored/dircmd.c:563 +#, fuzzy, c-format +msgid "3001 Mounted Volume: %s\n" +msgstr "Nie mogę przyci wolumenu \"%s\"\n" + +#: src/stored/dircmd.c:567 src/stored/dircmd.c:1192 +#, c-format +msgid "" +"3902 Cannot mount Volume on Storage Device %s because:\n" +"%s" +msgstr "" +"3902 Nie można zamontować Wolumenu na urządzeniu Storage %s ponieważ:\n" +"%s" + +#: src/stored/dircmd.c:680 +msgid "Specified slot ignored. " +msgstr "Zdefiniowany slot został zignorowany. " + +#: src/stored/dircmd.c:695 src/stored/dircmd.c:752 +#, fuzzy, c-format +msgid "3901 Unable to open device %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/dircmd.c:715 src/stored/dircmd.c:743 +#, c-format +msgid "3001 Device %s is mounted with Volume \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:718 src/stored/dircmd.c:746 src/stored/dircmd.c:761 +#, c-format +msgid "" +"3905 Device %s open but no Bacula volume is mounted.\n" +"If this is not a blank tape, try unmounting and remounting the Volume.\n" +msgstr "" + +#: src/stored/dircmd.c:728 +#, c-format +msgid "3001 Device %s is doing acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:733 src/stored/dircmd.c:851 +#, c-format +msgid "3903 Device %s is being labeled.\n" +msgstr "" + +#: src/stored/dircmd.c:758 +#, c-format +msgid "3001 Device %s is already mounted with Volume \"%s\"\n" +msgstr "" + +#: src/stored/dircmd.c:767 +#, c-format +msgid "3002 Device %s is mounted.\n" +msgstr "3002 Urządzenie %s jest zamontowane.\n" + +#: src/stored/dircmd.c:770 src/stored/dircmd.c:823 src/stored/dircmd.c:839 +#: src/stored/dircmd.c:871 +#, c-format +msgid "3907 %s" +msgstr "3907 %s" + +#: src/stored/dircmd.c:773 +#, c-format +msgid "3906 File device %s is always mounted.\n" +msgstr "" + +#: src/stored/dircmd.c:782 +#, c-format +msgid "3905 Bizarre wait state %d\n" +msgstr "" + +#: src/stored/dircmd.c:792 +#, fuzzy, c-format +msgid "3909 Error scanning mount command: %s\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/stored/dircmd.c:820 src/stored/dircmd.c:873 +#, c-format +msgid "3002 Device %s unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:827 +#, c-format +msgid "3901 Device %s is already unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:842 +#, c-format +msgid "3001 Device %s unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:847 +#, c-format +msgid "3902 Device %s is busy in acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:885 +#, fuzzy, c-format +msgid "3907 Error scanning unmount command: %s\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/stored/dircmd.c:913 +#, fuzzy +msgid "3916 Error scanning action_on_purge command\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/stored/dircmd.c:956 +#, c-format +msgid "3921 Device %s already released.\n" +msgstr "" + +#: src/stored/dircmd.c:963 +#, c-format +msgid "3922 Device %s waiting for sysop.\n" +msgstr "" + +#: src/stored/dircmd.c:969 +#, c-format +msgid "3922 Device %s waiting for mount.\n" +msgstr "" + +#: src/stored/dircmd.c:973 +#, c-format +msgid "3923 Device %s is busy in acquire.\n" +msgstr "" + +#: src/stored/dircmd.c:977 +#, c-format +msgid "3914 Device %s is being labeled.\n" +msgstr "" + +#: src/stored/dircmd.c:985 +#, c-format +msgid "3022 Device %s released.\n" +msgstr "" + +#: src/stored/dircmd.c:996 +#, fuzzy, c-format +msgid "3927 Error scanning release command: %s\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/stored/dircmd.c:1038 +#, fuzzy +msgid "Error parsing bootstrap file.\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/dircmd.c:1100 +#, c-format +msgid "3995 Device %s is not an autochanger.\n" +msgstr "" + +#: src/stored/dircmd.c:1117 +#, fuzzy, c-format +msgid "3908 Error scanning autocharger drives/list/slots command: %s\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/stored/dircmd.c:1160 +#, fuzzy, c-format +msgid "3909 Error scanning readlabel command: %s\n" +msgstr "2902 Błąd skanowania komendy anuluj.\n" + +#: src/stored/dircmd.c:1188 +#, c-format +msgid "3001 Volume=%s Slot=%d\n" +msgstr "" + +#: src/stored/dircmd.c:1220 +#, c-format +msgid "3931 Device %s is BLOCKED. user unmounted.\n" +msgstr "" + +#: src/stored/dircmd.c:1224 +#, c-format +msgid "" +"3932 Device %s is BLOCKED. user unmounted during wait for media/mount.\n" +msgstr "" + +#: src/stored/dircmd.c:1228 +#, fuzzy, c-format +msgid "3933 Device %s is BLOCKED waiting for media.\n" +msgstr "Status zadania: Oczekiwanie na nowe media" + +#: src/stored/dircmd.c:1232 +#, c-format +msgid "3934 Device %s is being initialized.\n" +msgstr "" + +#: src/stored/dircmd.c:1236 +#, fuzzy, c-format +msgid "3935 Device %s is blocked labeling a Volume.\n" +msgstr "Wymagane TLS lecz nie zostao skonfigurowane w Baculi.\n" + +#: src/stored/dircmd.c:1240 +#, c-format +msgid "3935 Device %s is blocked for unknown reason.\n" +msgstr "" + +#: src/stored/dircmd.c:1245 +#, c-format +msgid "3936 Device %s is busy reading.\n" +msgstr "3936 Urządzenie %s jest zajęte odczytem.\n" + +#: src/stored/dircmd.c:1248 +#, c-format +msgid "3937 Device %s is busy with %d writer(s).\n" +msgstr "" + +#: src/stored/job.c:249 +#, c-format +msgid "FD connect failed: Job name not found: %s\n" +msgstr "" + +#: src/stored/job.c:259 +#, fuzzy, c-format +msgid "Hey!!!! JobId %u Job %s already authenticated.\n" +msgstr "JobId %s, Zadanie %s oznaczone do anulowania.\n" + +#: src/stored/job.c:276 +#, fuzzy +msgid "Unable to authenticate File daemon\n" +msgstr "Nie mogę zautentykowa Directora\n" + +#: src/stored/job.c:405 +msgid "In free_jcr(), but still attached to device!!!!\n" +msgstr "" + +#: src/stored/askdir.c:178 +msgid "Network error on bnet_recv in req_vol_info.\n" +msgstr "" + +#: src/stored/askdir.c:196 +#, fuzzy, c-format +msgid "Error getting Volume info: %s" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/askdir.c:339 src/stored/askdir.c:340 src/stored/append.c:91 +#: src/stored/append.c:100 src/stored/append.c:112 src/stored/append.c:292 +#: src/stored/append.c:293 src/stored/append.c:308 src/stored/append.c:309 +#, fuzzy +msgid "NULL Volume name. This shouldn't happen!!!\n" +msgstr "Nieznany strumie=%d zignorowany. To nie powinno się sta!\n" + +#: src/stored/askdir.c:374 +#, fuzzy, c-format +msgid "Didn't get vol info vol=%s: ERR=%s" +msgstr "Nie można stworzyć fifo %s: ERR=%s\n" + +#: src/stored/askdir.c:429 +#, fuzzy, c-format +msgid "Error creating JobMedia record: ERR=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/askdir.c:436 +#, fuzzy, c-format +msgid "Error creating JobMedia record: %s\n" +msgstr "Błąd skanowania nagwna rekordu: %s\n" + +#: src/stored/askdir.c:510 +#, c-format +msgid "Job %s canceled while waiting for mount on Storage Device \"%s\".\n" +msgstr "" + +#: src/stored/askdir.c:521 +#, c-format +msgid "" +"Job %s is waiting. Cannot find any appendable volumes.\n" +"Please use the \"label\" command to create a new Volume for:\n" +" Storage: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/askdir.c:547 src/stored/askdir.c:649 +#, c-format +msgid "Max time exceeded waiting to mount Storage Device %s for Job %s\n" +msgstr "" + +#: src/stored/askdir.c:557 +msgid "pthread error in mount_next_volume.\n" +msgstr "błąd pthread w mount_next_volume.\n" + +#: src/stored/askdir.c:591 +msgid "Cannot request another volume: no volume name given.\n" +msgstr "" + +#: src/stored/askdir.c:597 +#, c-format +msgid "Job %s canceled while waiting for mount on Storage Device %s.\n" +msgstr "" + +#: src/stored/askdir.c:614 +#, c-format +msgid "" +"Please mount Volume \"%s\" or label a new one for:\n" +" Job: %s\n" +" Storage: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/askdir.c:620 +#, c-format +msgid "" +"Please mount Volume \"%s\" for:\n" +" Job: %s\n" +" Storage: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/askdir.c:659 +msgid "pthread error in mount_volume\n" +msgstr "błąd pthread w mount_volume.\n" + +#: src/stored/dvd.c:111 +#, fuzzy +msgid "No FreeSpace command defined.\n" +msgstr "Nie zdefiniowano zasobu %s\n" + +#: src/stored/dvd.c:145 +#, fuzzy, c-format +msgid "Cannot run free space command. Results=%s ERR=%s\n" +msgstr "Nie można uruchomi programu: %s. ERR=%s\n" + +#: src/stored/dvd.c:261 +#, fuzzy, c-format +msgid "Error writing part %d to the DVD: ERR=%s\n" +msgstr "Błąd uruchomienia programu: %s. stat=%d: ERR=%s\n" + +#: src/stored/dvd.c:263 +#, fuzzy, c-format +msgid "Error while writing current part to the DVD: %s" +msgstr "Błąd uruchomienia programu: %s. stat=%d: ERR=%s\n" + +#: src/stored/dvd.c:273 +#, c-format +msgid "Part %d (%lld bytes) written to DVD.\n" +msgstr "" + +#: src/stored/dvd.c:292 +#, c-format +msgid "Remaining free space %s on %s\n" +msgstr "" + +#: src/stored/dvd.c:358 +#, c-format +msgid "Next Volume part already exists on DVD. Cannot continue: %s\n" +msgstr "" + +#: src/stored/dvd.c:377 +#, c-format +msgid "open_next_part can't unlink existing part %s, ERR=%s\n" +msgstr "" + +#: src/stored/dvd.c:578 +#, c-format +msgid "" +"Error writing. Current part less than total number of parts (%d/%d, device=%" +"s)\n" +msgstr "" + +#: src/stored/dvd.c:585 +#, fuzzy, c-format +msgid "Unable to write last on %s: ERR=%s\n" +msgstr "Nie mogę odczyta linku symbolicznego %s na \"%s\": ERR=%s\n" + +#: src/stored/stored_conf.c:241 +#, c-format +msgid "Expected a Device Type keyword, got: %s" +msgstr "" + +#: src/stored/stored_conf.c:255 +#, c-format +msgid "" +"Maximum Block Size configured value %u is greater than allowed maximum: %u" +msgstr "" + +#: src/stored/stored_conf.c:269 +#, fuzzy, c-format +msgid "Warning: no \"%s\" resource (%d) defined.\n" +msgstr "Nie zdefiniowano zasobu %s\n" + +#: src/stored/stored_conf.c:272 +#, fuzzy, c-format +msgid "dump_resource type=%d\n" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/stored/stored_conf.c:388 +#, fuzzy, c-format +msgid "Warning: unknown resource type %d\n" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/stored/stored_conf.c:580 +#, fuzzy, c-format +msgid "\"%s\" item is required in \"%s\" resource, but not found.\n" +msgstr "Element %s jest wymagany w zasobie %s, lecz nie został znaleziony.\n" + +#: src/stored/stored_conf.c:586 +#, fuzzy, c-format +msgid "Too many items in \"%s\" resource\n" +msgstr "Zbyt duo elementw w zasobie %s\n" + +#: src/stored/stored_conf.c:620 +#, fuzzy, c-format +msgid "Cannot find AutoChanger resource %s\n" +msgstr "Nie można znale zasobu Director %s\n" + +#: src/stored/stored_conf.c:692 +#, fuzzy, c-format +msgid "" +"Attempt to define second \"%s\" resource named \"%s\" is not permitted.\n" +msgstr "" +"Próba definicji kolejnego zasobu %s nazwanego \"%s\" nie jest dozwolona.\n" + +#: src/stored/acquire.c:71 +#, c-format +msgid "Acquire read: num_writers=%d not zero. Job %d canceled.\n" +msgstr "" + +#: src/stored/acquire.c:80 +#, c-format +msgid "No volumes specified for reading. Job %s canceled.\n" +msgstr "" + +#: src/stored/acquire.c:89 +#, c-format +msgid "Logic error: no next volume to read. Numvol=%d Curvol=%d\n" +msgstr "" + +#: src/stored/acquire.c:115 +#, c-format +msgid "" +"Changing read device. Want Media Type=\"%s\" have=\"%s\"\n" +" device=%s\n" +msgstr "" + +#: src/stored/acquire.c:152 +#, c-format +msgid "Media Type change. New read device %s chosen.\n" +msgstr "" + +#: src/stored/acquire.c:165 +#, c-format +msgid "No suitable device found to read Volume \"%s\"\n" +msgstr "" + +#: src/stored/acquire.c:205 +#, fuzzy, c-format +msgid "Job %s canceled.\n" +msgstr "Status zadania: Anulowane" + +#: src/stored/acquire.c:223 +#, fuzzy, c-format +msgid "Read open device %s Volume \"%s\" failed: ERR=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/acquire.c:313 +#, c-format +msgid "Too many errors trying to mount device %s for reading.\n" +msgstr "" + +#: src/stored/acquire.c:322 +#, fuzzy, c-format +msgid "Ready to read from volume \"%s\" on device %s.\n" +msgstr "Nie można ustawi pola Finder Info na %s\n" + +#: src/stored/acquire.c:370 +#, c-format +msgid "Want to append, but device %s is busy reading.\n" +msgstr "" + +#: src/stored/acquire.c:403 +#, fuzzy, c-format +msgid "Could not ready device %s for append.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/acquire.c:535 +#, c-format +msgid "Alert: %s" +msgstr "Alert: %s" + +#: src/stored/acquire.c:543 +#, fuzzy, c-format +msgid "3997 Bad alert command: %s: ERR=%s.\n" +msgstr "2994 Za komenda verify: %s\n" + +#: src/stored/butil.c:59 +msgid "Nohdr," +msgstr "" + +#: src/stored/butil.c:62 +msgid "partial," +msgstr "częściowy," + +#: src/stored/butil.c:65 +msgid "empty," +msgstr "pusty" + +#: src/stored/butil.c:68 +msgid "Nomatch," +msgstr "" + +#: src/stored/butil.c:71 +msgid "cont," +msgstr "" + +#: src/stored/butil.c:148 +msgid "Volume name or names is too long. Please use a .bsr file.\n" +msgstr "" + +#: src/stored/butil.c:168 +#, fuzzy, c-format +msgid "Cannot find device \"%s\" in config file %s.\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/stored/butil.c:175 +#, fuzzy, c-format +msgid "Cannot init device %s\n" +msgstr "Nie można znale zasobu Client %s\n" + +#: src/stored/butil.c:195 +#, fuzzy, c-format +msgid "Cannot open %s\n" +msgstr " Nie można otworzyć %s: ERR=%s.\n" + +#: src/stored/butil.c:282 +#, fuzzy, c-format +msgid "Could not find device \"%s\" in config file %s.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/butil.c:287 +#, c-format +msgid "Using device: \"%s\" for reading.\n" +msgstr "" + +#: src/stored/butil.c:290 +#, c-format +msgid "Using device: \"%s\" for writing.\n" +msgstr "" + +#: src/stored/butil.c:306 +msgid "Unexpected End of Data\n" +msgstr "Nieoczekiwany koniec danych\n" + +#: src/stored/butil.c:308 +msgid "Unexpected End of Tape\n" +msgstr "Nieoczekiwany koniec taśmy\n" + +#: src/stored/butil.c:310 +msgid "Unexpected End of File\n" +msgstr "Nieoczekiwany koniec pliku\n" + +#: src/stored/butil.c:312 +msgid "Tape Door is Open\n" +msgstr "" + +#: src/stored/butil.c:314 +msgid "Unexpected Tape is Off-line\n" +msgstr "" + +#: src/stored/status.c:100 +msgid "Used Volume status:\n" +msgstr "Status używanego Wolumenu:\n" + +#: src/stored/status.c:108 src/stored/status.c:110 +#, fuzzy +msgid "" +"====\n" +"\n" +msgstr "====\n" + +#: src/stored/status.c:129 +msgid "" +"\n" +"Device status:\n" +msgstr "" +"\n" +"Status urządzenia:\n" + +#: src/stored/status.c:133 +#, c-format +msgid "Autochanger \"%s\" with devices:\n" +msgstr "" + +#: src/stored/status.c:151 +#, c-format +msgid "" +"Device %s is mounted with:\n" +" Volume: %s\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/status.c:161 +#, c-format +msgid "Device %s open but no Bacula volume is currently mounted.\n" +msgstr "" + +#: src/stored/status.c:172 +#, fuzzy, c-format +msgid " Total Bytes=%s Blocks=%s Bytes/block=%s\n" +msgstr " Plików=%s Bajtów=%s Bajtów/sek=%s Błędów=%d\n" + +#: src/stored/status.c:187 +#, fuzzy, c-format +msgid " Total Bytes Read=%s Blocks Read=%s Bytes/block=%s\n" +msgstr " Plików=%s Bajtów=%s Bajtów/sek=%s Błędów=%d\n" + +#: src/stored/status.c:193 +#, c-format +msgid " Positioned at File=%s Block=%s\n" +msgstr " Ustawiony na: Plik=%s Blok=%s\n" + +#: src/stored/status.c:200 +#, fuzzy, c-format +msgid "Device %s is not open.\n" +msgstr " Nie zapisany plik archiwum: %s\n" + +#: src/stored/status.c:204 +#, fuzzy, c-format +msgid "Device \"%s\" is not open or does not exist.\n" +msgstr "Wymagane TLS lecz nie zostao skonfigurowane w Baculi.\n" + +#: src/stored/status.c:249 +msgid "" +"No DEVICE structure.\n" +"\n" +msgstr "" +"Brak struktury URZĄDZENIA.\n" +"\n" + +#: src/stored/status.c:255 +msgid " Device is BLOCKED. User unmounted.\n" +msgstr "" + +#: src/stored/status.c:259 +msgid " Device is BLOCKED. User unmounted during wait for media/mount.\n" +msgstr "" + +#: src/stored/status.c:271 +#, c-format +msgid "" +" Device is BLOCKED waiting for mount of volume \"%s\",\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/status.c:280 +#, c-format +msgid "" +" Device is BLOCKED waiting to create a volume for:\n" +" Pool: %s\n" +" Media type: %s\n" +msgstr "" + +#: src/stored/status.c:292 +#, fuzzy +msgid " Device is BLOCKED waiting for media.\n" +msgstr "Status zadania: Oczekiwanie na nowe media" + +#: src/stored/status.c:298 +msgid " Device is being initialized.\n" +msgstr "" + +#: src/stored/status.c:302 +#, fuzzy +msgid " Device is blocked labeling a Volume.\n" +msgstr "Wymagane TLS lecz nie zostao skonfigurowane w Baculi.\n" + +#: src/stored/status.c:311 +#, fuzzy, c-format +msgid " Slot %d is loaded in drive %d.\n" +msgstr " Nie zapisany plik archiwum: %s\n" + +#: src/stored/status.c:315 +#, fuzzy, c-format +msgid " Drive %d is not loaded.\n" +msgstr " Nie zapisany plik archiwum: %s\n" + +#: src/stored/status.c:318 +#, fuzzy, c-format +msgid " Drive %d status unknown.\n" +msgstr " Nie zapisany plik archiwum: %s\n" + +#: src/stored/status.c:350 +msgid "Device state:\n" +msgstr "" + +#: src/stored/status.c:368 +#, c-format +msgid "" +"num_writers=%d reserved=%d block=%d\n" +"\n" +msgstr "" + +#: src/stored/status.c:375 +#, c-format +msgid "Archive name: %s Device name: %s\n" +msgstr "" + +#: src/stored/status.c:379 +#, c-format +msgid "File=%u block=%u\n" +msgstr "Plik=%u blok=%u\n" + +#: src/stored/status.c:382 +#, c-format +msgid "Min block=%u Max block=%u\n" +msgstr "Min blok=%u Max blok=%u\n" + +#: src/stored/status.c:402 +#, fuzzy, c-format +msgid "%s Job %s waiting for Client connection.\n" +msgstr "Status zadania: Oczekiwanie na zasoby Klienta" + +#: src/stored/status.c:418 +#, c-format +msgid "" +"Reading: %s %s job %s JobId=%d Volume=\"%s\"\n" +" pool=\"%s\" device=%s\n" +msgstr "" + +#: src/stored/status.c:431 +#, c-format +msgid "" +"Writing: %s %s job %s JobId=%d Volume=\"%s\"\n" +" pool=\"%s\" device=%s\n" +msgstr "" + +#: src/stored/status.c:442 +#, c-format +msgid " spooling=%d despooling=%d despool_wait=%d\n" +msgstr "" + +#: src/stored/status.c:451 +#, fuzzy, c-format +msgid " Files=%s Bytes=%s Bytes/sec=%s\n" +msgstr " Plików=%s Bajtów=%s Bajtów/sek=%s Błędów=%d\n" + +#: src/stored/status.c:459 +#, c-format +msgid " FDReadSeqNo=%s in_msg=%u out_msg=%d fd=%d\n" +msgstr " FDReadSeqNo=%s in_msg=%u out_msg=%d fd=%d\n" + +#: src/stored/status.c:465 +msgid " FDSocket closed\n" +msgstr " SDSocket zamknięto.\n" + +#: src/stored/status.c:486 +msgid "" +"\n" +"Jobs waiting to reserve a drive:\n" +msgstr "" +"\n" +"Zadania oczekujące na rezerwację napędu:\n" + +#: src/stored/status.c:517 +msgid "===================================================================\n" +msgstr "===================================================================\n" + +#: src/stored/status.c:700 +msgid "3900 Bad .status command, missing argument.\n" +msgstr "3900 Zła komenda .status, brakujcy argument.\n" + +#: src/stored/status.c:746 +msgid "3900 Bad .status command, wrong argument.\n" +msgstr "3900 Za komenda .status, niepoprawny argument.\n" + +#: src/stored/status.c:761 +msgid "Bacula Storage: Idle" +msgstr "Bacula Storage: Bezczynny" + +#: src/stored/status.c:772 +msgid "Bacula Storage: Running" +msgstr "Bacula Storage: Uruchomiony" + +#: src/stored/status.c:786 +msgid "Bacula Storage: Last Job Canceled" +msgstr "Bacula Storage: Ostatnie Zadanie Anulowane" + +#: src/stored/status.c:790 +msgid "Bacula Storage: Last Job Failed" +msgstr "Bacula Storage: Ostatnie Zadanie Nieudane" + +#: src/stored/status.c:794 +msgid "Bacula Storage: Last Job had Warnings" +msgstr "Bacula Storage: Ostatnie Zadanie miało Ostrzeżenia" + +#: src/stored/wait.c:134 +#, fuzzy, c-format +msgid "pthread timedwait error. ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/stored/wait.c:233 +msgid "JobId=%s, Job %s waiting to reserve a device.\n" +msgstr "JobId=%s, Zadanie %s oczekuje na rezerwację urządzenia.\n" + +#: src/stored/spool.c:83 +msgid "Spooling statistics:\n" +msgstr "Statystyki spooling'u:\n" + +#: src/stored/spool.c:86 +#, c-format +msgid "" +"Data spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes/job.\n" +msgstr "" + +#: src/stored/spool.c:94 +#, c-format +msgid "Attr spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes.\n" +msgstr "" + +#: src/stored/spool.c:112 +msgid "Spooling data ...\n" +msgstr "" + +#: src/stored/spool.c:138 +#, c-format +msgid "Bad return from despool WroteVol=%d\n" +msgstr "" + +#: src/stored/spool.c:171 +#, fuzzy, c-format +msgid "Open data spool file %s failed: ERR=%s\n" +msgstr "Nieudana aktualizacja rekordu Media %s: ERR=%s\n" + +#: src/stored/spool.c:224 +msgid "Despooling zero bytes. Your disk is probably FULL!\n" +msgstr "" + +#: src/stored/spool.c:233 +#, c-format +msgid "Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:238 +#, c-format +msgid "Writing spooled data to Volume. Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:300 src/stored/mac.c:128 src/stored/mac.c:248 +#: src/stored/append.c:300 +#, fuzzy, c-format +msgid "Fatal append error on device %s: ERR=%s\n" +msgstr "Błąd w getacl na pliku \"%s\": ERR=%s\n" + +#: src/stored/spool.c:326 +#, c-format +msgid "" +"Despooling elapsed time = %02d:%02d:%02d, Transfer rate = %s Bytes/second\n" +msgstr "" + +#: src/stored/spool.c:335 src/stored/spool.c:528 src/stored/spool.c:570 +#, fuzzy, c-format +msgid "Ftruncate spool file failed: ERR=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/spool.c:394 +#, fuzzy, c-format +msgid "Spool header read error. ERR=%s\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/stored/spool.c:397 +#, c-format +msgid "Spool read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:398 +#, fuzzy, c-format +msgid "Spool header read error. Wanted %u bytes, got %d\n" +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/stored/spool.c:404 src/stored/spool.c:405 +#, c-format +msgid "Spool block too big. Max %u bytes, got %u\n" +msgstr "" + +#: src/stored/spool.c:410 src/stored/spool.c:411 +#, c-format +msgid "Spool data read error. Wanted %u bytes, got %d\n" +msgstr "" + +#: src/stored/spool.c:471 +msgid "User specified spool size reached.\n" +msgstr "" + +#: src/stored/spool.c:473 +msgid "Bad return from despool in write_block.\n" +msgstr "" + +#: src/stored/spool.c:481 +msgid "Spooling data again ...\n" +msgstr "" + +#: src/stored/spool.c:512 +#, fuzzy, c-format +msgid "Error writing header to spool file. ERR=%s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/spool.c:516 +#, c-format +msgid "" +"Error writing header to spool file. Disk probably full. Attempting recovery. " +"Wanted to write=%d got=%d\n" +msgstr "" + +#: src/stored/spool.c:534 src/stored/spool.c:576 +#, fuzzy +msgid "Fatal despooling error." +msgstr "Błąd rekordu danych. ERR=%s\n" + +#: src/stored/spool.c:541 +msgid "Retrying after header spooling error failed.\n" +msgstr "" + +#: src/stored/spool.c:555 +#, fuzzy, c-format +msgid "Error writing data to spool file. ERR=%s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/spool.c:586 +msgid "Retrying after data spooling error failed.\n" +msgstr "" + +#: src/stored/spool.c:655 +msgid "Network error on BlastAttributes.\n" +msgstr "Błąd sieci na BlastAttributes.\n" + +#: src/stored/spool.c:676 src/stored/spool.c:683 +#, fuzzy, c-format +msgid "Fseek on attributes file failed: ERR=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/spool.c:695 +#, c-format +msgid "Sending spooled attrs to the Director. Despooling %s bytes ...\n" +msgstr "" + +#: src/stored/spool.c:721 +#, fuzzy, c-format +msgid "fopen attr spool file %s failed: ERR=%s\n" +msgstr "Nie mogę otworzyć xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/stored/mac.c:82 +msgid "Read and write devices not properly initialized.\n" +msgstr "" + +#: src/stored/mac.c:88 +#, fuzzy, c-format +msgid "No Volume names found for %s.\n" +msgstr "Nie znaleziono zasobu Zadania dla \"%s\".\n" + +#: src/stored/mac.c:130 src/stored/append.c:302 +msgid "Set ok=FALSE after write_block_to_device.\n" +msgstr "" + +#: src/stored/mac.c:278 src/stored/append.c:226 +#, fuzzy, c-format +msgid "Error updating file attributes. ERR=%s\n" +msgstr "Błąd odczytywania pliku %s: ERR=%s\n" + +#: src/stored/pythonsd.c:208 +msgid "Error in ParseTuple\n" +msgstr "Bład w ParseTuple\n" + +#: src/stored/pythonsd.c:224 +msgid "Parse tuple error in job_write\n" +msgstr "" + +#: src/stored/pythonsd.c:261 +#, fuzzy, c-format +msgid "Error in Python method %s\n" +msgstr "Błąd uaktualniania rekordu zadania. %s" + +#: src/stored/mount.c:96 +#, fuzzy, c-format +msgid "Too many errors trying to mount device %s.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/mount.c:104 +#, fuzzy, c-format +msgid "Job %d canceled.\n" +msgstr "2001 Zadanie %s oznaczone do anulowania.\n" + +#: src/stored/mount.c:284 +#, c-format +msgid "Volume \"%s\" previously written, moving to end of data.\n" +msgstr "" + +#: src/stored/mount.c:290 +#, fuzzy, c-format +msgid "Unable to position to end of data on device %s: ERR=%s\n" +msgstr "Nie mogę odzyska waciciela xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/stored/mount.c:410 src/stored/mount.c:707 +#, fuzzy, c-format +msgid "Volume \"%s\" not on device %s.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/mount.c:443 +#, c-format +msgid "" +"Director wanted Volume \"%s\".\n" +" Current Volume \"%s\" not acceptable because:\n" +" %s" +msgstr "" + +#: src/stored/mount.c:598 +#, fuzzy, c-format +msgid "Ready to append to end of Volume \"%s\" part=%d size=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/mount.c:602 +#, c-format +msgid "" +"Bacula cannot write on DVD Volume \"%s\" because: The sizes do not match! " +"Volume=%s Catalog=%s\n" +msgstr "" + +#: src/stored/mount.c:616 +#, fuzzy, c-format +msgid "Ready to append to end of Volume \"%s\" at file=%d.\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/mount.c:619 +#, c-format +msgid "" +"Bacula cannot write on tape Volume \"%s\" because:\n" +"The number of files mismatch! Volume=%u Catalog=%u\n" +msgstr "" + +#: src/stored/mount.c:630 +#, fuzzy, c-format +msgid "Ready to append to end of Volume \"%s\" size=%s\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/mount.c:634 +#, c-format +msgid "" +"Bacula cannot write on disk Volume \"%s\" because: The sizes do not match! " +"Volume=%s Catalog=%s\n" +msgstr "" + +#: src/stored/mount.c:697 +#, fuzzy, c-format +msgid "Labeled new Volume \"%s\" on device %s.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/mount.c:702 +#, fuzzy, c-format +msgid "Device %s not configured to autolabel Volumes.\n" +msgstr "Wymagane TLS lecz nie zostao skonfigurowane w Baculi.\n" + +#: src/stored/mount.c:721 +#, c-format +msgid "Marking Volume \"%s\" in Error in Catalog.\n" +msgstr "" + +#: src/stored/mount.c:738 +#, c-format +msgid "" +"Autochanger Volume \"%s\" not found in slot %d.\n" +" Setting InChanger to zero in catalog.\n" +msgstr "" + +#: src/stored/mount.c:757 +msgid "Hey!!!!! WroteVol non-zero !!!!!\n" +msgstr "" + +#: src/stored/mount.c:806 +#, c-format +msgid "" +"Invalid tape position on volume \"%s\" on device %s. Expected %d, got %d\n" +msgstr "" + +#: src/stored/fd_cmds.c:166 +#, fuzzy, c-format +msgid "Command error with FD, hanging up. %s\n" +msgstr "Błąd komunikacji z SD. za odpowiedź na %s. ERR=%s\n" + +#: src/stored/fd_cmds.c:169 +#, fuzzy +msgid "Command error with FD, hanging up.\n" +msgstr "Błąd komunikacji z SD. za odpowiedź na %s. ERR=%s\n" + +#: src/stored/fd_cmds.c:180 +#, fuzzy, c-format +msgid "FD command not found: %s\n" +msgstr "Nie znaleziono tabeli: %s\n" + +#: src/stored/fd_cmds.c:206 +#, fuzzy +msgid "Append data error.\n" +msgstr "Błąd danych Zlib" + +#: src/stored/fd_cmds.c:211 +msgid "Attempt to append on non-open session.\n" +msgstr "" + +#: src/stored/fd_cmds.c:223 src/stored/fd_cmds.c:266 +msgid "Attempt to close non-open session.\n" +msgstr "" + +#: src/stored/fd_cmds.c:241 +msgid "Attempt to open already open session.\n" +msgstr "" + +#: src/stored/fd_cmds.c:295 +msgid "Attempt to read on non-open session.\n" +msgstr "" + +#: src/stored/fd_cmds.c:313 src/stored/fd_cmds.c:322 +msgid "Attempt to open read on non-open session.\n" +msgstr "" + +#: src/stored/dev.c:119 +#, fuzzy, c-format +msgid "Unable to stat device %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/dev.c:137 +#, c-format +msgid "" +"%s is an unknown device type. Must be tape or directory\n" +" or have RequiresMount=yes for DVD. st_mode=%x\n" +msgstr "" + +#: src/stored/dev.c:198 +#, fuzzy, c-format +msgid "Unable to stat mount point %s: ERR=%s\n" +msgstr "Nie mogę odczyta linku symbolicznego %s na \"%s\": ERR=%s\n" + +#: src/stored/dev.c:203 +msgid "" +"Mount and unmount commands must defined for a device which requires mount.\n" +msgstr "" + +#: src/stored/dev.c:208 +msgid "Write part command must be defined for a device which requires mount.\n" +msgstr "" + +#: src/stored/dev.c:219 +#, fuzzy, c-format +msgid "Min block size > max on device %s\n" +msgstr "Problem zapętlenia dot. rozmiaru bloku bufora na urządzeniu %s " + +#: src/stored/dev.c:223 +#, c-format +msgid "Block size %u on device %s is too large, using default %u\n" +msgstr "" + +#: src/stored/dev.c:228 +#, c-format +msgid "Max block size %u not multiple of device %s block size.\n" +msgstr "" + +#: src/stored/dev.c:232 +#, c-format +msgid "Max Vol Size < 8 * Max Block Size on device %s\n" +msgstr "" + +#: src/stored/dev.c:248 src/stored/dev.c:254 +#, fuzzy, c-format +msgid "Unable to init cond variable: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/dev.c:393 +msgid "Illegal mode given to open dev.\n" +msgstr "" + +#: src/stored/dev.c:524 +#, fuzzy, c-format +msgid "Could not open file device %s. No Volume name given.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/dev.c:547 src/stored/dev.c:715 +#, fuzzy, c-format +msgid "Could not open: %s, ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/dev.c:591 +#, fuzzy, c-format +msgid "Could not open DVD device %s. No Volume name given.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/dev.c:640 +#, c-format +msgid "The DVD in device %s contains data, please blank it before writing.\n" +msgstr "" + +#: src/stored/dev.c:661 +#, fuzzy, c-format +msgid "Unable to stat DVD part 1 file %s: ERR=%s\n" +msgstr "Nie mogę otrzyma statusu na xattr %s na pliku \"%s\": ERR=%s\n" + +#: src/stored/dev.c:669 +#, c-format +msgid "DVD part 1 is not a regular file %s.\n" +msgstr "" + +#: src/stored/dev.c:689 +#, c-format +msgid "There is no valid DVD in device %s.\n" +msgstr "" + +#: src/stored/dev.c:695 +#, fuzzy, c-format +msgid "Could not mount DVD device %s.\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/dev.c:745 +#, fuzzy, c-format +msgid "Could not fstat: %s, ERR=%s\n" +msgstr " Nie można wykona stat %s: ERR=%s\n" + +#: src/stored/dev.c:779 +#, c-format +msgid "Bad call to rewind. Device %s not open\n" +msgstr "" + +#: src/stored/dev.c:818 +#, c-format +msgid "No tape loaded or drive offline on %s.\n" +msgstr "" + +#: src/stored/dev.c:828 +#, fuzzy, c-format +msgid "Rewind error on %s. ERR=%s.\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/dev.c:838 src/stored/dev.c:916 src/stored/dev.c:1058 +#: src/stored/dev.c:1647 patches/testing/mtops.c:276 +#: patches/testing/mtops.c:300 patches/testing/mtops.c:320 +#: patches/testing/mtops.c:355 patches/testing/mtops.c:378 +#, fuzzy, c-format +msgid "lseek error on %s. ERR=%s.\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/stored/dev.c:887 +#, c-format +msgid "Bad call to eod. Device %s not open\n" +msgstr "" + +#: src/stored/dev.c:954 +#, fuzzy, c-format +msgid "ioctl MTEOM error on %s. ERR=%s.\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/stored/dev.c:964 src/stored/dev.c:1099 +#, fuzzy, c-format +msgid "ioctl MTIOCGET error on %s. ERR=%s.\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/stored/dev.c:1044 +msgid "Bad device call. Device not open\n" +msgstr "" + +#: src/stored/dev.c:1057 +#, fuzzy, c-format +msgid "Seek error: ERR=%s\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/stored/dev.c:1094 +#, fuzzy +msgid " Bacula status:" +msgstr "Monitor statusu demona Bacula" + +#: src/stored/dev.c:1095 src/stored/dev.c:1178 src/stored/dev.c:1180 +#, c-format +msgid " file=%d block=%d\n" +msgstr " plik=%d blok=%d\n" + +#: src/stored/dev.c:1103 +#, fuzzy +msgid " Device status:" +msgstr "Monitor statusu demona Bacula" + +#: src/stored/dev.c:1202 +msgid "Bad call to load_dev. Device not open\n" +msgstr "Błędne odwołanie do load_dev. Urządzenie nie otwarte\n" + +#: src/stored/dev.c:1213 src/stored/dev.c:1226 +#, fuzzy, c-format +msgid "ioctl MTLOAD error on %s. ERR=%s.\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/stored/dev.c:1257 +#, fuzzy, c-format +msgid "ioctl MTOFFL error on %s. ERR=%s.\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/stored/dev.c:1298 +msgid "Bad call to fsf. Device not open\n" +msgstr "Błędne odwołanie do fsf. Urządzenie nie otwarte\n" + +#: src/stored/dev.c:1309 src/stored/dev.c:1436 +#, c-format +msgid "Device %s at End of Tape.\n" +msgstr "" + +#: src/stored/dev.c:1340 src/stored/dev.c:1416 +#, fuzzy, c-format +msgid "ioctl MTFSF error on %s. ERR=%s.\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/stored/dev.c:1465 +msgid "Bad call to bsf. Device not open\n" +msgstr "Błędne odwołanie do bsf. Urządzenie nie otwarte\n" + +#: src/stored/dev.c:1471 +#, c-format +msgid "Device %s cannot BSF because it is not a tape.\n" +msgstr "" + +#: src/stored/dev.c:1488 +#, fuzzy, c-format +msgid "ioctl MTBSF error on %s. ERR=%s.\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/stored/dev.c:1507 +msgid "Bad call to fsr. Device not open\n" +msgstr "Błędne odwołanie do fsr. Urządzenie nie otwarte\n" + +#: src/stored/dev.c:1517 +#, fuzzy, c-format +msgid "ioctl MTFSR not permitted on %s.\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/dev.c:1545 +#, fuzzy, c-format +msgid "ioctl MTFSR %d error on %s. ERR=%s.\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/dev.c:1563 +msgid "Bad call to bsr_dev. Device not open\n" +msgstr "Błędne odwołanie do bsr_dev. Urządzenie nie otwarte\n" + +#: src/stored/dev.c:1573 +#, fuzzy, c-format +msgid "ioctl MTBSR not permitted on %s.\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/dev.c:1587 +#, fuzzy, c-format +msgid "ioctl MTBSR error on %s. ERR=%s.\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/dev.c:1636 +msgid "Bad call to reposition. Device not open\n" +msgstr "Błędne odwołanie dla repozycji. Urządzenie nie otwarte\n" + +#: src/stored/dev.c:1715 +msgid "Bad call to weof_dev. Device not open\n" +msgstr "Błędne odwołanie do weof_dev. Urządzenie nie otwarte\n" + +#: src/stored/dev.c:1725 +msgid "Attempt to WEOF on non-appendable Volume\n" +msgstr "" + +#: src/stored/dev.c:1743 +#, fuzzy, c-format +msgid "ioctl MTWEOF error on %s. ERR=%s.\n" +msgstr "Przesunicie do %s na: %s: ERR=%s\n" + +#: src/stored/dev.c:1842 +#, fuzzy, c-format +msgid "unknown func code %d" +msgstr "Nieznany rodzaj zasobu %d\n" + +#: src/stored/dev.c:1848 +#, fuzzy, c-format +msgid "I/O function \"%s\" not supported on this device.\n" +msgstr "Strumie %s nie jest wspierany na tym Kliencie.\n" + +#: src/stored/dev.c:2023 +#, fuzzy, c-format +msgid "Unable to truncate device %s. ERR=%s\n" +msgstr "Nie mogę przyci wolumenu \"%s\"\n" + +#: src/stored/dev.c:2040 +#, fuzzy, c-format +msgid "Unable to stat device %s. ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/dev.c:2054 +#, c-format +msgid "Device %s doesn't support ftruncate(). Recreating file %s.\n" +msgstr "Urządzenie %s nie obsługuje ftruncate(). Odtwarzanie pliku %s.\n" + +#: src/stored/dev.c:2066 +#, fuzzy, c-format +msgid "Could not reopen: %s, ERR=%s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/dev.c:2189 src/stored/dev.c:2260 +#, fuzzy, c-format +msgid "Device %s cannot be %smounted. ERR=%s\n" +msgstr "Błąd odczytu na pliku %s. ERR=%s\n" + +#: src/stored/dev.c:2656 +#, fuzzy, c-format +msgid "Unable to set eotmodel on device %s: ERR=%s\n" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/stored/vol_mgr.c:95 +#, fuzzy, c-format +msgid "Unable to initialize volume list lock. ERR=%s\n" +msgstr "Nie można zainicjalizowa blokady BD. ERR=%s\n" + +#: src/stored/vol_mgr.c:493 +#, c-format +msgid "Device switch. New device %s chosen.\n" +msgstr "" + +#: src/stored/bls.c:79 +#, fuzzy, c-format +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: bls [options] \n" +" -b specify a bootstrap file\n" +" -c specify a Storage configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -e exclude list\n" +" -i include list\n" +" -j list jobs\n" +" -k list blocks\n" +" (no j or k option) list saved files\n" +" -L dump label\n" +" -p proceed inspite of errors\n" +" -v be verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"\n" +"Użycie: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c uyj jako pliku konfiguracyjnego\n" +" -d ustaw poziom debugingu na \n" +" -dt wyświetl znacznik czasowu podczas wywietlania debugingu\n" +" -f uruchom na pierwszym planie (dla debugingu)\n" +" -g identyfikator grupy\n" +" -k keep readall capabilities\n" +" -m wyświetl informacje kaboom (dla debugingu)\n" +" -s brak sygnałów (dla debugingu)\n" +" -t przetestuj plik konfiguracji i zakocz\n" +" -u identyfikator uytkownika\n" +" -v gadatliwe komunikaty uytkownika\n" +" -? wyświetl ten komunikat.\n" +"\n" + +#: src/stored/bls.c:211 +msgid "No archive name specified\n" +msgstr "" + +#: src/stored/bls.c:247 +#, fuzzy, c-format +msgid "" +"\n" +"Warning, this Volume is a continuation of Volume %s\n" +msgstr "Proszę popraw plik konfiguracyjny: %s\n" + +#: src/stored/bls.c:290 +#, c-format +msgid "Got EOM at file %u on device %s, Volume \"%s\"\n" +msgstr "" + +#: src/stored/bls.c:301 +#, fuzzy, c-format +msgid "Mounted Volume \"%s\".\n" +msgstr "Nie mogę przyci wolumenu \"%s\"\n" + +#: src/stored/bls.c:303 +#, fuzzy, c-format +msgid "End of file %u on device %s, Volume \"%s\"\n" +msgstr "Nieudane stworzenie rekordu JobMedia %s: ERR=%s\n" + +#: src/stored/bls.c:327 +#, c-format +msgid "" +"File:blk=%u:%u blk_num=%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%" +"s rlen=%d\n" +msgstr "" + +#: src/stored/bls.c:336 +#, c-format +msgid "Block: %d size=%d\n" +msgstr "" + +#: src/stored/bls.c:389 +msgid "Attrib unpack error!\n" +msgstr "" + +#: src/stored/bls.c:400 +#, c-format +msgid "FileIndex=%d VolSessionId=%d VolSessionTime=%d Stream=%d DataLen=%d\n" +msgstr "" + +#: src/stored/bls.c:442 +msgid "End of Physical Medium" +msgstr "Koniec Fizycznego Medium" + +#: src/stored/bls.c:445 +msgid "Start of object" +msgstr "Początek obiektu" + +#: src/stored/bls.c:448 +msgid "End of object" +msgstr "Koniec obiektu" + +#: src/stored/append.c:63 +msgid "DCR is NULL!!!\n" +msgstr "DCR jest NULL!!!\n" + +#: src/stored/append.c:68 +msgid "DEVICE is NULL!!!\n" +msgstr "DEVICE jest NULL!!!\n" + +#: src/stored/append.c:78 +msgid "Unable to set network buffer size.\n" +msgstr "Nie mogę ustawić rozmiaru bufora sieciowego.\n" + +#: src/stored/append.c:118 +msgid "Network send error to FD. ERR=%s\n" +msgstr "Błąd sieci w wysyaniu do FD. ERR=%s\n" + +#: src/stored/append.c:155 +msgid "Error reading data header from FD. ERR=%s\n" +msgstr "Błąd odczytywania nagłówka danych z FD. ERR=%s\n" + +#: src/stored/append.c:162 +msgid "Malformed data header from FD: %s\n" +msgstr "Uszkodzony nagłówek danych z FD: %s\n" + +#: src/stored/append.c:171 +msgid "File index from FD not positive or sequential\n" +msgstr "Indeks Pliku z FD nie jest pozytywny ani sekwencyjny\n" + +#: src/stored/append.c:241 +#, fuzzy, c-format +msgid "Network error reading from FD. ERR=%s\n" +msgstr "Błąd sieci w wysyaniu do SD. ERR=%s\n" + +#: src/stored/append.c:270 +#, c-format +msgid "" +"Job write elapsed time = %02d:%02d:%02d, Transfer rate = %s Bytes/second\n" +msgstr "" + +#: src/stored/stored.c:100 +msgid "" +"\n" +"Version: %s (%s)\n" +"\n" +"Usage: stored [options] [-c config_file] [config_file]\n" +" -c use as configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -f run in foreground (for debugging)\n" +" -g set groupid to group\n" +" -m print kaboom output (for debugging)\n" +" -p proceed despite I/O errors\n" +" -s no signals (for debugging)\n" +" -t test - read config and exit\n" +" -u userid to \n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s)\n" +"\n" +"Użycie: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +" -c uyj jako pliku konfiguracyjnego\n" +" -d ustaw poziom debugingu na \n" +" -dt wyświetl znacznik czasu podczas wywietlania debugingu\n" +" -f uruchom na pierwszym planie (dla debugingu)\n" +" -g identyfikator grupy\n" +" -k zachowaj właściwości readall\n" +" -m wyświetl informacje kaboom (dla debugingu)\n" +" -s brak sygnałów (dla debugingu)\n" +" -t przetestuj plik konfiguracji i zakocz\n" +" -u identyfikator uytkownika\n" +" -v gadatliwe komunikaty uytkownika\n" +" -? wyświetl ten komunikat.\n" +"\n" + +#: src/stored/stored.c:283 +msgid "Volume Session Time is ZERO!\n" +msgstr "" + +#: src/stored/stored.c:304 +msgid "Unable to create thread. ERR=%s\n" +msgstr "Nie mogę utworzyć wątku. ERR=%s\n" + +#: src/stored/stored.c:343 +#, fuzzy, c-format +msgid "Only one Storage resource permitted in %s\n" +msgstr "Dozwolony tylko jeden zasób Client w %s\n" + +#: src/stored/stored.c:348 +#, fuzzy, c-format +msgid "No Director resource defined in %s. Cannot continue.\n" +msgstr "Brak definicji zasobu Dyrektora w %s\n" + +#: src/stored/stored.c:353 +#, fuzzy, c-format +msgid "No Device resource defined in %s. Cannot continue.\n" +msgstr "Brak definicji zasobu Dyrektora w %s\n" + +#: src/stored/stored.c:361 +#, fuzzy, c-format +msgid "No Messages resource defined in %s. Cannot continue.\n" +msgstr "Brak definicji zasobu Messages w %s\n" + +#: src/stored/stored.c:390 +#, fuzzy, c-format +msgid "\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n" +msgstr "Nie zdefiniowany plik \"TLS Certificate\" dla Dyrektora \"%s\" w %s.\n" + +#: src/stored/stored.c:396 +#, fuzzy, c-format +msgid "\"TLS Key\" file not defined for Storage \"%s\" in %s.\n" +msgstr "Nie zdefiniowany plik \"TLS Key\" dla Dyrektora \"%s\" w %s.\n" + +#: src/stored/stored.c:402 +#, fuzzy, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Storage \"%s\" in %s. At least one CA certificate store is required when " +"using \"TLS Verify Peer\".\n" +msgstr "" +"Ani \"TLS CA Certificate\", ani \"TLS CA Certificate Dir\" nie s " +"zdefiniowane dla Dyrektora \"%s\" w %s. Co najmniej jedno skadowanie " +"certyfikatu CA jest wymagane kiedy jest używane \"TLS Verify Peer\".\n" + +#: src/stored/stored.c:538 +#, fuzzy, c-format +msgid "Could not initialize %s\n" +msgstr "Nie można stworzyć %s: ERR=%s\n" + +#: src/stored/stored.c:551 +#, fuzzy, c-format +msgid "Could not open device %s\n" +msgstr "Nie można stworzyć skrt.\n" + +#: src/stored/stored.c:565 +#, fuzzy, c-format +msgid "Could not mount device %s\n" +msgstr "Nie można stworzyć skrt.\n" + +#: examples/nagios/check_bacula/check_bacula.c:59 +#, c-format +msgid "" +"Copyright (C) 2005 Christian Masopust\n" +"Written by Christian Masopust (2005)\n" +"\n" +"Version: " +msgstr "" + +#: src/baconfig.h:70 src/baconfig.h:71 +#, fuzzy, c-format +msgid "Failed ASSERT: %s\n" +msgstr "Za komenda level: %s\n" + +#: src/win32/libwin32/service.cpp:109 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:110 +msgid "Failure contacting the Service Handler" +msgstr "" + +#: src/win32/libwin32/service.cpp:121 +msgid "Service start report failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:174 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/win32/libwin32/service.cpp:181 +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "" + +#: src/win32/libwin32/service.cpp:191 +msgid "Registry service not found: Bacula service not started" +msgstr "" + +#: src/win32/libwin32/service.cpp:193 +msgid "Registry service entry point not found" +msgstr "" + +#: src/win32/libwin32/service.cpp:214 +msgid "Report Service failure" +msgstr "" + +#: src/win32/libwin32/service.cpp:245 +#, fuzzy +msgid "Unable to install the service" +msgstr "Nie mogę otworzyć pliku \"%s\": ERR=%s\n" + +#: src/win32/libwin32/service.cpp:253 +msgid "Service command length too long" +msgstr "" + +#: src/win32/libwin32/service.cpp:254 +msgid "Service command length too long. Service not registered." +msgstr "" + +#: src/win32/libwin32/service.cpp:267 +msgid "" +"The Service Control Manager could not be contacted - the service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:290 src/win32/libwin32/service.cpp:319 +#: src/win32/libwin32/service.cpp:366 src/win32/libwin32/service.cpp:373 +#: src/win32/libwin32/service.cpp:377 +msgid "The Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:297 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/win32/libwin32/service.cpp:308 +msgid "Cannot write System Registry for " +msgstr "" + +#: src/win32/libwin32/service.cpp:309 +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:318 +msgid "Cannot add Bacula key to System Registry" +msgstr "" + +#: src/win32/libwin32/service.cpp:329 +msgid "The " +msgstr "" + +#: src/win32/libwin32/service.cpp:384 +msgid "A existing Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:392 +msgid "" +"The service Manager could not be contacted - the Bacula service was not " +"removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:404 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:410 +#, fuzzy +msgid "Could not delete Registry key for " +msgstr "Nie można stworzyć rekordu storage dla %s\n" + +#: src/win32/libwin32/service.cpp:420 +msgid "Bacula could not be contacted, probably not running" +msgstr "" + +#: src/win32/libwin32/service.cpp:427 +msgid "The Bacula service has been removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:468 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:494 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" +"\n" +"\n" +"%s error: %ld at %s:%d" + +#: src/win32/libwin32/service.cpp:570 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "Zablokowane przez: %s, duration: %ld seconds\n" + +#: src/win32/libwin32/service.cpp:574 +#, c-format +msgid "No longer locked\n" +msgstr "Koniec zablokowania\n" + +#: src/win32/libwin32/service.cpp:578 +msgid "Could not lock database" +msgstr "Nie można zablokować bazy danych" + +#: src/win32/libwin32/main.cpp:241 +msgid "Bad Command Line Option" +msgstr "Niepoprawna opcja w lini poleceń" + +#: src/win32/compat/compat.cpp:2625 +msgid "" +"\n" +"\n" +"Bacula ERROR: " +msgstr "" +"\n" +"\n" +"BŁĄD Bacula: " + +#: src/qt-console/bat_conf.cpp:154 +msgid "Console: name=%s\n" +msgstr "Konsola: nazwa=%s\n" + +#: src/qt-console/console/console.cpp:138 src/qt-console/bcomm/dircomm.cpp:216 +msgid "Connected" +msgstr "Podłączony" + +#: src/qt-console/console/console.cpp:363 src/qt-console/bcomm/dircomm.cpp:332 +msgid "Processing command ..." +msgstr "Przetwarzanie komendy ..." + +#: src/qt-console/bcomm/dircomm.cpp:89 +msgid "Already connected\"%s\".\n" +msgstr "Już podłączony\"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:104 +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "" +"Podłączenie do Dyrektora %s:%d\n" +"\n" + +#: src/qt-console/bcomm/dircomm.cpp:201 +msgid "Initializing ..." +msgstr "Inicjalizowanie ..." + +#: src/qt-console/bcomm/dircomm.cpp:325 +msgid "Command completed ..." +msgstr "Zakończona komenda ..." + +#: src/qt-console/bcomm/dircomm.cpp:339 +msgid "At main prompt waiting for input ..." +msgstr "Główne polecenie czeka na dane ..." + +#: src/qt-console/bcomm/dircomm.cpp:346 +msgid "At prompt waiting for input ..." +msgstr "Polecenie czeka na dane ..." + +#: src/qt-console/bcomm/dircomm.cpp:361 +msgid "Command failed." +msgstr "Nieudana komenda." + +#: src/qt-console/bcomm/dircomm.cpp:428 +msgid "Director disconnected." +msgstr "Dyrektor rozłączony." + +#: src/qt-console/main.cpp:179 +msgid "" +"\n" +"Version: %s (%s) %s %s %s\n" +"\n" +"Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Wersja: %s (%s) %s %s %s\n" +"\n" +"Użycie: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c ustaw plik konfiguracyjny na file\n" +" -dnn ustaw poziom debugowania na nn\n" +" -s brak sygnałów\n" +" -t test - odczytuje konfiguracj i kończy działanie\n" +" -? wyświetla ten komunikat.\n" +"\n" diff --git a/po/quot.sed b/po/quot.sed new file mode 100644 index 00000000..0122c463 --- /dev/null +++ b/po/quot.sed @@ -0,0 +1,6 @@ +s/"\([^"]*\)"/“\1”/g +s/`\([^`']*\)'/‘\1’/g +s/ '\([^`']*\)' / ‘\1’ /g +s/ '\([^`']*\)'$/ ‘\1’/g +s/^'\([^`']*\)' /‘\1’ /g +s/“”/""/g diff --git a/po/remove-potcdate.sin b/po/remove-potcdate.sin new file mode 100644 index 00000000..2436c49e --- /dev/null +++ b/po/remove-potcdate.sin @@ -0,0 +1,19 @@ +# Sed script that remove the POT-Creation-Date line in the header entry +# from a POT file. +# +# The distinction between the first and the following occurrences of the +# pattern is achieved by looking at the hold space. +/^"POT-Creation-Date: .*"$/{ +x +# Test if the hold space is empty. +s/P/P/ +ta +# Yes it was empty. First occurrence. Remove the line. +g +d +bb +:a +# The hold space was nonempty. Following occurrences. Do nothing. +x +:b +} diff --git a/po/sv.po b/po/sv.po new file mode 100644 index 00000000..1667a2c5 --- /dev/null +++ b/po/sv.po @@ -0,0 +1,609 @@ +# Swedish translations for PACKAGE package. +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +msgid "" +msgstr "" +"Project-Id-Version: Bacula 2.1.x\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2018-08-11 21:43+0200\n" +"PO-Revision-Date: 2007-06-22 19:18+0200\n" +"Last-Translator: Kern Sibbald \n" +"Language-Team: Swedish \n" +"Language: sv\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ASCII\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: src/baconfig.h:62 src/baconfig.h:63 src/baconfig.h:68 src/baconfig.h:69 +#: src/baconfig.h:80 src/baconfig.h:81 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "" + +#: src/baconfig.h:89 +msgid "*None*" +msgstr "" + +#: src/lib/status.h:84 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" + +#: src/lib/status.h:91 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/lib/status.h:93 +msgid "===================================================================\n" +msgstr "" + +#: src/lib/status.h:119 +msgid "Created" +msgstr "" + +#: src/lib/status.h:123 +msgid "Error" +msgstr "" + +#: src/lib/status.h:126 +msgid "Diffs" +msgstr "" + +#: src/lib/status.h:129 +msgid "Cancel" +msgstr "" + +#: src/lib/status.h:132 +msgid "OK" +msgstr "" + +#: src/lib/status.h:135 +msgid "OK -- with warnings" +msgstr "" + +#: src/lib/status.h:138 +msgid "Incomplete" +msgstr "" + +#: src/lib/status.h:141 +msgid "Other" +msgstr "" + +#: src/lib/status.h:153 +#, c-format +msgid "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +msgstr "" + +#: src/lib/status.h:182 +#, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "" + +#: src/lib/status.h:214 src/lib/status.h:225 src/lib/status.h:239 +#: src/lib/status.h:243 src/lib/status.h:247 +msgid "Bacula " +msgstr "" + +#: src/qt-console/bat_conf.cpp:133 +#, c-format +msgid "No record for %d %s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:142 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:146 +#, c-format +msgid "Console: name=%s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:149 +#, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:153 src/qt-console/bat_conf.cpp:235 +#: src/qt-console/bat_conf.cpp:282 src/qt-console/bat_conf.cpp:312 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "" + +#: src/qt-console/bat_conf.cpp:259 +#: src/qt-console/tray-monitor/tray_conf.cpp:311 +#, c-format +msgid "\"%s\" directive is required in \"%s\" resource, but not found.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:88 +#, c-format +msgid "Already connected\"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:99 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:101 +#, c-format +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:153 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:176 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:198 +#: src/qt-console/tray-monitor/task.cpp:233 +msgid "Director daemon" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:236 +msgid "Initializing ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:252 src/qt-console/console/console.cpp:133 +msgid "Connected" +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:377 +msgid "Command completed ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:384 src/qt-console/console/console.cpp:370 +msgid "Processing command ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:391 +msgid "At main prompt waiting for input ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:398 src/qt-console/bcomm/dircomm.cpp:408 +msgid "At prompt waiting for input ..." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:416 +msgid "Command failed." +msgstr "" + +#: src/qt-console/bcomm/dircomm.cpp:488 +msgid "Director disconnected." +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:110 +#, c-format +msgid "Director authorization problem at \"%s:%d\"\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:117 +#, c-format +msgid "" +"Authorization problem: Remote server at \"%s:%d\" did not advertise required " +"TLS support.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:125 +#, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\": Remote server requires " +"TLS.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:136 +#, c-format +msgid "TLS negotiation failed with Director at \"%s:%d\"\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:148 +#, c-format +msgid "" +"Bad response to Hello command: ERR=%s\n" +"The Director at \"%s:%d\" is probably not running.\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:165 +#, c-format +msgid "Director at \"%s:%d\" rejected Hello command\n" +msgstr "" + +#: src/qt-console/bcomm/dircomm_auth.cpp:182 +#, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\"\n" +"Most likely the passwords do not agree.\n" +"If you are using TLS, there may have been a certificate validation error " +"during the TLS handshake.\n" +"For help, please see " +msgstr "" + +#: src/qt-console/main.cpp:160 +msgid "Cryptography library initialization failed.\n" +msgstr "" + +#: src/qt-console/main.cpp:164 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "" + +#: src/qt-console/main.cpp:188 +#, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/qt-console/main.cpp:221 src/qt-console/main.cpp:251 +msgid "TLS required but not configured in Bacula.\n" +msgstr "" + +#: src/qt-console/main.cpp:229 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" + +#: src/qt-console/main.cpp:238 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" + +#: src/qt-console/main.cpp:259 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:86 +msgid "" +"Authorization problem.\n" +"Most likely the passwords do not agree.\n" +"For help, please see " +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:94 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:110 +msgid "TLS negotiation failed\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:117 +#, c-format +msgid "Bad response to Hello command: ERR=%s\n" +msgstr "" + +#: src/qt-console/tray-monitor/authenticate.cpp:134 +msgid "Daemon rejected Hello command\n" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:89 +msgid "The Name of the Monitor should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:129 +msgid "The name of the Resource should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:138 +#, c-format +msgid "The address of the Resource should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:147 +#, c-format +msgid "The Password of should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:171 +#, c-format +msgid "The TLS CA Certificate File should be a PEM file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:182 +#, c-format +msgid "The TLS CA Certificate Directory should be a directory for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:193 +#, c-format +msgid "The TLS Certificate File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:204 +#, c-format +msgid "The TLS Key File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:45 +msgid "This restricted console does not have access to Backup jobs" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:123 +msgid "Nothing selected" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:97 +msgid "Bandwidth can set only set on Client" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:102 +msgid "Bandwidth parameter is invalid" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:177 +msgid "Client daemon" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:205 +msgid "Storage daemon" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:45 +#, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: tray-monitor [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -W 0/1 force the detection of the systray\n" +" -? print this message.\n" +"\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:118 +msgid "TLS PassPhrase" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:164 +#, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one Monitor " +"resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-ui.h:105 +#, c-format +msgid "Failed to initialize TLS context for \"%s\".\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray-ui.h:320 +msgid "Select a Director" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:172 +#, c-format +msgid "No %s resource defined\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:181 +#, c-format +msgid "Monitor: name=%s\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:184 +#, c-format +msgid "Director: name=%s address=%s port=%d\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:188 +#, c-format +msgid "Client: name=%s address=%s port=%d\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:192 +#, c-format +msgid "Storage: name=%s address=%s port=%d\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:196 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:284 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:318 +#, c-format +msgid "Too many directives in \"%s\" resource\n" +msgstr "" + +#: src/qt-console/tray-monitor/tray_conf.cpp:338 +#: src/qt-console/tray-monitor/tray_conf.cpp:372 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "" + +#: src/win32/compat/compat.cpp:2879 +msgid "" +"\n" +"\n" +"Bacula ERROR: " +msgstr "" + +#: src/win32/filed/vss.cpp:244 src/win32/filed/vss.cpp:259 +#, c-format +msgid "pthread key create failed: ERR=%s\n" +msgstr "" + +#: src/win32/filed/vss.cpp:267 +#, c-format +msgid "pthread_setspecific failed: ERR=%s\n" +msgstr "" + +#: src/win32/filed/vss_generic.cpp:725 +#, c-format +msgid "Unable to find volume %ls in the device list\n" +msgstr "" + +#: src/win32/libwin32/main.cpp:227 +msgid "Bad Command Line Option" +msgstr "" + +#: src/win32/libwin32/service.cpp:98 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:99 +msgid "Failure contacting the Service Handler" +msgstr "" + +#: src/win32/libwin32/service.cpp:110 +msgid "Service start report failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:163 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/win32/libwin32/service.cpp:170 +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "" + +#: src/win32/libwin32/service.cpp:180 +msgid "Registry service not found: Bacula service not started" +msgstr "" + +#: src/win32/libwin32/service.cpp:182 +msgid "Registry service entry point not found" +msgstr "" + +#: src/win32/libwin32/service.cpp:204 +msgid "Report Service failure" +msgstr "" + +#: src/win32/libwin32/service.cpp:235 +msgid "Unable to install the service" +msgstr "" + +#: src/win32/libwin32/service.cpp:243 +msgid "Service command length too long" +msgstr "" + +#: src/win32/libwin32/service.cpp:244 +msgid "Service command length too long. Service not registered." +msgstr "" + +#: src/win32/libwin32/service.cpp:257 +msgid "" +"The Service Control Manager could not be contacted - the service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:280 src/win32/libwin32/service.cpp:309 +#: src/win32/libwin32/service.cpp:355 src/win32/libwin32/service.cpp:362 +#: src/win32/libwin32/service.cpp:366 +msgid "The Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:287 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/win32/libwin32/service.cpp:298 +msgid "Cannot write System Registry for " +msgstr "" + +#: src/win32/libwin32/service.cpp:299 +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:308 +msgid "Cannot add Bacula key to System Registry" +msgstr "" + +#: src/win32/libwin32/service.cpp:319 +msgid "The " +msgstr "" + +#: src/win32/libwin32/service.cpp:373 +msgid "An existing Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:381 +msgid "" +"The service Manager could not be contacted - the Bacula service was not " +"removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:394 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:401 +msgid "Could not delete Registry key for " +msgstr "" + +#: src/win32/libwin32/service.cpp:411 +msgid "Bacula could not be contacted, probably not running" +msgstr "" + +#: src/win32/libwin32/service.cpp:418 +msgid "The Bacula service has been removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:459 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:485 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" + +#: src/win32/libwin32/service.cpp:561 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:565 +#, c-format +msgid "No longer locked\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:569 +msgid "Could not lock database" +msgstr "" diff --git a/po/uk.po b/po/uk.po new file mode 100644 index 00000000..a0923410 --- /dev/null +++ b/po/uk.po @@ -0,0 +1,6436 @@ +# Ukrainian translations for bacula package +# Український переклад bacula. +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# Vitaliy Kosharskiy , 2010. +# +msgid "" +msgstr "" +"Project-Id-Version: bacula 9.2.1\n" +"Report-Msgid-Bugs-To: bacula-devel@lists.sourceforge.net\n" +"POT-Creation-Date: 2018-08-11 21:43+0200\n" +"PO-Revision-Date: 2010-01-08 17:32+0300\n" +"Last-Translator: Vitaliy Kosharskiy \n" +"Language-Team: Ukrainian\n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" + +#: src/baconfig.h:62 src/baconfig.h:63 src/baconfig.h:68 src/baconfig.h:69 +#: src/baconfig.h:80 src/baconfig.h:81 +#, c-format +msgid "Failed ASSERT: %s\n" +msgstr "" + +#: src/baconfig.h:89 +msgid "*None*" +msgstr "" + +#: src/lib/status.h:84 +msgid "" +"\n" +"Terminated Jobs:\n" +msgstr "" + +#: src/lib/status.h:91 +msgid " JobId Level Files Bytes Status Finished Name \n" +msgstr "" + +#: src/lib/status.h:93 +msgid "===================================================================\n" +msgstr "" + +#: src/lib/status.h:119 +msgid "Created" +msgstr "" + +#: src/lib/status.h:123 +msgid "Error" +msgstr "" + +#: src/lib/status.h:126 +msgid "Diffs" +msgstr "" + +#: src/lib/status.h:129 +msgid "Cancel" +msgstr "" + +#: src/lib/status.h:132 +msgid "OK" +msgstr "" + +#: src/lib/status.h:135 +msgid "OK -- with warnings" +msgstr "" + +#: src/lib/status.h:138 +msgid "Incomplete" +msgstr "" + +#: src/lib/status.h:141 +msgid "Other" +msgstr "" + +#: src/lib/status.h:153 +#, c-format +msgid "%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n" +msgstr "" + +#: src/lib/status.h:182 +#, c-format +msgid "%6d %-6s %8s %10s %-7s %-8s %s\n" +msgstr "" + +#: src/lib/status.h:214 src/lib/status.h:225 src/lib/status.h:239 +#: src/lib/status.h:243 src/lib/status.h:247 +msgid "Bacula " +msgstr "" + +#: src/qt-console/bat_conf.cpp:133 +#, c-format +msgid "No record for %d %s\n" +msgstr "Відсутні записи для %d %s\n" + +#: src/qt-console/bat_conf.cpp:142 +#, c-format +msgid "Director: name=%s address=%s DIRport=%d\n" +msgstr "Керівник: назва=%s адреса=%s DIRport=%d\n" + +#: src/qt-console/bat_conf.cpp:146 +#, c-format +msgid "Console: name=%s\n" +msgstr "Консоль: назва=%s\n" + +#: src/qt-console/bat_conf.cpp:149 +#, fuzzy, c-format +msgid "ConsoleFont: name=%s font face=%s\n" +msgstr "Консоль: назва=%s rcfile=%s histfile=%s\n" + +#: src/qt-console/bat_conf.cpp:153 src/qt-console/bat_conf.cpp:235 +#: src/qt-console/bat_conf.cpp:282 src/qt-console/bat_conf.cpp:312 +#, c-format +msgid "Unknown resource type %d\n" +msgstr "Невідомий тип ресурсу %d\n" + +#: src/qt-console/bat_conf.cpp:259 +#: src/qt-console/tray-monitor/tray_conf.cpp:311 +#, fuzzy, c-format +msgid "\"%s\" directive is required in \"%s\" resource, but not found.\n" +msgstr "Не знайдено необхідний елемент %s для ресурсу %s.\n" + +#: src/qt-console/bcomm/dircomm.cpp:88 +#, c-format +msgid "Already connected\"%s\".\n" +msgstr "Вже приєднано\"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:99 +#, c-format +msgid "Connecting to Director %s:%d" +msgstr "Приєднуюсь до Керівника %s:%d" + +#: src/qt-console/bcomm/dircomm.cpp:101 +#, c-format +msgid "" +"Connecting to Director %s:%d\n" +"\n" +msgstr "" +"З'єднуюсь із Керівником %s:%d\n" +"\n" + +#: src/qt-console/bcomm/dircomm.cpp:153 +#, c-format +msgid "Failed to initialize TLS context for Console \"%s\".\n" +msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:176 +#, c-format +msgid "Failed to initialize TLS context for Director \"%s\".\n" +msgstr "Ініціалізація контексту TLS для Керівника невдала \"%s\".\n" + +#: src/qt-console/bcomm/dircomm.cpp:198 +#: src/qt-console/tray-monitor/task.cpp:233 +msgid "Director daemon" +msgstr "Керівник" + +#: src/qt-console/bcomm/dircomm.cpp:236 +msgid "Initializing ..." +msgstr "Ініціалізація ..." + +#: src/qt-console/bcomm/dircomm.cpp:252 src/qt-console/console/console.cpp:133 +msgid "Connected" +msgstr "З'єднано" + +#: src/qt-console/bcomm/dircomm.cpp:377 +msgid "Command completed ..." +msgstr "Команду виконано ..." + +#: src/qt-console/bcomm/dircomm.cpp:384 src/qt-console/console/console.cpp:370 +msgid "Processing command ..." +msgstr "Виконання команди ..." + +#: src/qt-console/bcomm/dircomm.cpp:391 +msgid "At main prompt waiting for input ..." +msgstr "Очікуться введення у основній рядку ..." + +#: src/qt-console/bcomm/dircomm.cpp:398 src/qt-console/bcomm/dircomm.cpp:408 +msgid "At prompt waiting for input ..." +msgstr "Очікуться введення у рядку ..." + +#: src/qt-console/bcomm/dircomm.cpp:416 +msgid "Command failed." +msgstr "Помилка команди" + +#: src/qt-console/bcomm/dircomm.cpp:488 +msgid "Director disconnected." +msgstr "Керівник від'єднано" + +#: src/qt-console/bcomm/dircomm_auth.cpp:110 +#, c-format +msgid "Director authorization problem at \"%s:%d\"\n" +msgstr "Проблеми авторизації Директора \"%s:%d\"\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:117 +#, c-format +msgid "" +"Authorization problem: Remote server at \"%s:%d\" did not advertise required " +"TLS support.\n" +msgstr "" +"Проблеми авторизації: Віддалений сервер \"%s:%d\" не повідомляє про " +"необхідність використання TLS.\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:125 +#, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\": Remote server requires " +"TLS.\n" +msgstr "" +"Проблеми авторизації Керівника \"%s:%d\": Віддалений сепвер вимагає TLS.\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:136 +#, c-format +msgid "TLS negotiation failed with Director at \"%s:%d\"\n" +msgstr "Встановлення TLS із Керівником невдале \"%s:%d\"\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:148 +#, c-format +msgid "" +"Bad response to Hello command: ERR=%s\n" +"The Director at \"%s:%d\" is probably not running.\n" +msgstr "" +"Погана відповідь на команду Hello: ERR=%s\n" +"Керівник \"%s:%d\" ймовірно не запущений.\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:165 +#, c-format +msgid "Director at \"%s:%d\" rejected Hello command\n" +msgstr "Керівник \"%s:%d\" відкинув команду Hello\n" + +#: src/qt-console/bcomm/dircomm_auth.cpp:182 +#, fuzzy, c-format +msgid "" +"Authorization problem with Director at \"%s:%d\"\n" +"Most likely the passwords do not agree.\n" +"If you are using TLS, there may have been a certificate validation error " +"during the TLS handshake.\n" +"For help, please see " +msgstr "" +"Проблеми авторизації із Керівником \"%s:%d\"\n" +"Швидше за все, проблема у паролях.\n" +"Якщо Ви використовуєте TLS, можливо, невдала перевірка сертифікату під час " +"TLS handshake.\n" +"Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/rel-" +"manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#: src/qt-console/main.cpp:160 +msgid "Cryptography library initialization failed.\n" +msgstr "Ініціалізація криптографії невдала.\n" + +#: src/qt-console/main.cpp:164 +#, c-format +msgid "Please correct configuration file: %s\n" +msgstr "Будьласка виправте файл конфігурації: %s\n" + +#: src/qt-console/main.cpp:188 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n" +msgstr "" +"\n" +"Версія: %s (%s) %s %s %s\n" +"\n" +"Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s без сигналів\n" +" -t перевірка - прочитати конфігурацію і вийти\n" +" -? print this message.\n" +"\n" + +#: src/qt-console/main.cpp:221 src/qt-console/main.cpp:251 +msgid "TLS required but not configured in Bacula.\n" +msgstr "TLS необхідний, але не налаштовано у Bacula.\n" + +#: src/qt-console/main.cpp:229 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Director \"%s\" in %s. At least one CA certificate store is required.\n" +msgstr "" +"Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" не " +"задано для Керівника \"%s\" у %s. Необхідне щонайменше одне сховище для " +"сертифікату CA.\n" + +#: src/qt-console/main.cpp:238 +#, c-format +msgid "" +"No Director resource defined in %s\n" +"Without that I don't how to speak to the Director :-(\n" +msgstr "" +"У %s не задано ресурсу Керівника\n" +"Без цього я не знаю як спілкуватись із Керівником :-(\n" + +#: src/qt-console/main.cpp:259 +#, c-format +msgid "" +"Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined for " +"Console \"%s\" in %s.\n" +msgstr "" +"Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" не " +"задано для Консолі \"%s\" у %s.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:86 +#, fuzzy +msgid "" +"Authorization problem.\n" +"Most likely the passwords do not agree.\n" +"For help, please see " +msgstr "" +"Проблеми під час авторизації Керівником.\n" +"Швидше за все, невірні паролі.\n" +"Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/rel-" +"manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:94 +msgid "" +"Authorization problem: Remote server did not advertise required TLS " +"support.\n" +msgstr "" +"Проблеми авторизації: Віддалений сервер не повідомив про необхідність " +"використання TLS.\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:110 +msgid "TLS negotiation failed\n" +msgstr "Невдале встановлення з'єднання TLS\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:117 +#, c-format +msgid "Bad response to Hello command: ERR=%s\n" +msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#: src/qt-console/tray-monitor/authenticate.cpp:134 +#, fuzzy +msgid "Daemon rejected Hello command\n" +msgstr "Збирач відхилив команду Hello \n" + +#: src/qt-console/tray-monitor/conf.cpp:89 +msgid "The Name of the Monitor should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:129 +msgid "The name of the Resource should be set" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:138 +#, c-format +msgid "The address of the Resource should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:147 +#, c-format +msgid "The Password of should be set for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:171 +#, c-format +msgid "The TLS CA Certificate File should be a PEM file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:182 +#, c-format +msgid "The TLS CA Certificate Directory should be a directory for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:193 +#, c-format +msgid "The TLS Certificate File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/conf.cpp:204 +#, c-format +msgid "The TLS Key File should be a file for resource %s" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:45 +msgid "This restricted console does not have access to Backup jobs" +msgstr "" + +#: src/qt-console/tray-monitor/runjob.cpp:123 +#, fuzzy +msgid "Nothing selected" +msgstr "Статус задачі: Відмінена" + +#: src/qt-console/tray-monitor/task.cpp:97 +msgid "Bandwidth can set only set on Client" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:102 +msgid "Bandwidth parameter is invalid" +msgstr "" + +#: src/qt-console/tray-monitor/task.cpp:177 +#, fuzzy +msgid "Client daemon" +msgstr "Збирач" + +#: src/qt-console/tray-monitor/task.cpp:205 +msgid "Storage daemon" +msgstr "Зберігач" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:45 +#, fuzzy, c-format +msgid "" +"\n" +"%sVersion: %s (%s) %s %s %s\n" +"\n" +"Usage: tray-monitor [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -W 0/1 force the detection of the systray\n" +" -? print this message.\n" +"\n" +msgstr "" +"Автор Nicolas Boichat (2004)\n" +"\n" +"Версія: %s (%s) %s %s %s\n" +"\n" +"Використання: tray-monitor [-c config_file] [-d debug_level]\n" +" -c задати конфігураційний файл \n" +" -d встановити рівень відлагоджування у \n" +" -dt виводити часову мітку у даних відлагоджування\n" +" -t перевірка - прогитати конфігурацію і завершити\n" +" -? показати це повідомлення.\n" +"\n" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:118 +msgid "TLS PassPhrase" +msgstr "" + +#: src/qt-console/tray-monitor/tray-monitor.cpp:164 +#, fuzzy, c-format +msgid "" +"Error: %d Monitor resources defined in %s. You must define one Monitor " +"resource.\n" +msgstr "" +"Помилка: %d ресурсів Спостерігача визначено у %s. Ви повинні визначити одині " +"тільки один ресурс Спостерігача.\n" + +#: src/qt-console/tray-monitor/tray-ui.h:105 +#, fuzzy, c-format +msgid "Failed to initialize TLS context for \"%s\".\n" +msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#: src/qt-console/tray-monitor/tray-ui.h:320 +#, fuzzy +msgid "Select a Director" +msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:172 +#, c-format +msgid "No %s resource defined\n" +msgstr "Ресурс %s не визначено\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:181 +#, fuzzy, c-format +msgid "Monitor: name=%s\n" +msgstr "Консоль: назва=%s\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:184 +#, fuzzy, c-format +msgid "Director: name=%s address=%s port=%d\n" +msgstr "Керівник: назва=%s address=%s FDport=%d\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:188 +#, fuzzy, c-format +msgid "Client: name=%s address=%s port=%d\n" +msgstr "Керівник: назва=%s address=%s FDport=%d\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:192 +#, fuzzy, c-format +msgid "Storage: name=%s address=%s port=%d\n" +msgstr "Сховище: назва=%s address=%s SDport=%d\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:196 +#, c-format +msgid "Unknown resource type %d in dump_resource.\n" +msgstr "Невідомий тип ресурсу %d у dump_resource.\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:284 +#, c-format +msgid "Unknown resource type %d in free_resource.\n" +msgstr "Невідомий тип ресурсу %d у free_resource.\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:318 +#, fuzzy, c-format +msgid "Too many directives in \"%s\" resource\n" +msgstr "Забагато елементів у ресурсі %s\n" + +#: src/qt-console/tray-monitor/tray_conf.cpp:338 +#: src/qt-console/tray-monitor/tray_conf.cpp:372 +#, c-format +msgid "Unknown resource type %d in save_resource.\n" +msgstr "Невідомий тип ресурсу %d у save_resource.\n" + +#: src/win32/compat/compat.cpp:2879 +msgid "" +"\n" +"\n" +"Bacula ERROR: " +msgstr "" + +#: src/win32/filed/vss.cpp:244 src/win32/filed/vss.cpp:259 +#, fuzzy, c-format +msgid "pthread key create failed: ERR=%s\n" +msgstr "Помилка у %s файл %s: ERR=%s\n" + +#: src/win32/filed/vss.cpp:267 +#, fuzzy, c-format +msgid "pthread_setspecific failed: ERR=%s\n" +msgstr "Помилка у %s файл %s: ERR=%s\n" + +#: src/win32/filed/vss_generic.cpp:725 +#, fuzzy, c-format +msgid "Unable to find volume %ls in the device list\n" +msgstr "У картотеці створено новий Том \"%s\".\n" + +#: src/win32/libwin32/main.cpp:227 +msgid "Bad Command Line Option" +msgstr "Не зрозумілий параметр командного рядка" + +#: src/win32/libwin32/service.cpp:98 +msgid "RegisterServiceCtlHandler failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:99 +msgid "Failure contacting the Service Handler" +msgstr "" + +#: src/win32/libwin32/service.cpp:110 +msgid "Service start report failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:163 +msgid "StartServiceCtrlDispatcher failed." +msgstr "" + +#: src/win32/libwin32/service.cpp:170 +msgid "KERNEL32.DLL not found: Bacula service not started" +msgstr "" + +#: src/win32/libwin32/service.cpp:180 +msgid "Registry service not found: Bacula service not started" +msgstr "" + +#: src/win32/libwin32/service.cpp:182 +#, fuzzy +msgid "Registry service entry point not found" +msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#: src/win32/libwin32/service.cpp:204 +#, fuzzy +msgid "Report Service failure" +msgstr "Помилка команди" + +#: src/win32/libwin32/service.cpp:235 +#, fuzzy +msgid "Unable to install the service" +msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#: src/win32/libwin32/service.cpp:243 +msgid "Service command length too long" +msgstr "" + +#: src/win32/libwin32/service.cpp:244 +msgid "Service command length too long. Service not registered." +msgstr "" + +#: src/win32/libwin32/service.cpp:257 +msgid "" +"The Service Control Manager could not be contacted - the service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:280 src/win32/libwin32/service.cpp:309 +#: src/win32/libwin32/service.cpp:355 src/win32/libwin32/service.cpp:362 +#: src/win32/libwin32/service.cpp:366 +msgid "The Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:287 +msgid "" +"Provides file backup and restore services. Bacula -- the network backup " +"solution." +msgstr "" + +#: src/win32/libwin32/service.cpp:298 +#, fuzzy +msgid "Cannot write System Registry for " +msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#: src/win32/libwin32/service.cpp:299 +msgid "" +"The System Registry could not be updated - the Bacula service was not " +"installed" +msgstr "" + +#: src/win32/libwin32/service.cpp:308 +msgid "Cannot add Bacula key to System Registry" +msgstr "" + +#: src/win32/libwin32/service.cpp:319 +msgid "The " +msgstr "" + +#: src/win32/libwin32/service.cpp:373 +msgid "An existing Bacula service: " +msgstr "" + +#: src/win32/libwin32/service.cpp:381 +msgid "" +"The service Manager could not be contacted - the Bacula service was not " +"removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:394 +msgid "" +"Could not find registry entry.\n" +"Service probably not registerd - the Bacula service was not removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:401 +#, fuzzy +msgid "Could not delete Registry key for " +msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#: src/win32/libwin32/service.cpp:411 +msgid "Bacula could not be contacted, probably not running" +msgstr "" + +#: src/win32/libwin32/service.cpp:418 +msgid "The Bacula service has been removed" +msgstr "" + +#: src/win32/libwin32/service.cpp:459 +msgid "SetServiceStatus failed" +msgstr "" + +#: src/win32/libwin32/service.cpp:485 +#, c-format +msgid "" +"\n" +"\n" +"%s error: %ld at %s:%d" +msgstr "" + +#: src/win32/libwin32/service.cpp:561 +#, c-format +msgid "Locked by: %s, duration: %ld seconds\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:565 +#, c-format +msgid "No longer locked\n" +msgstr "" + +#: src/win32/libwin32/service.cpp:569 +#, fuzzy +msgid "Could not lock database" +msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Query failed: %s: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to initialize DB lock. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Attribute create error. %s" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "error fetching row: %s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fetch failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "error ending batch mode: %s" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "insert %s failed:\n" +#~ "%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "update %s failed:\n" +#~ "%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "delete %s failed:\n" +#~ "%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init database batch connection\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open database \"%s\": ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Create DB Job record %s failed. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Create JobMedia record %s failed: ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Update Media record %s failed: ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "pool record %s already exists\n" +#~ msgstr "Файл пропущено. Вже існує: %s\n" + +#, fuzzy +#~ msgid "Create db Pool record %s failed: ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Device record %s already exists\n" +#~ msgstr "Файл пропущено. Вже існує: %s\n" + +#, fuzzy +#~ msgid "Create db Device record %s failed: ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "More than one Storage record!: %d\n" +#~ msgstr "Приєднуюсь до Зберігача %s:%d\n" + +#, fuzzy +#~ msgid "error fetching Storage row: %s\n" +#~ msgstr "Приєднуюсь до Зберігача %s:%d\n" + +#, fuzzy +#~ msgid "Create DB Storage record %s failed. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Create db mediatype record %s failed: ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" already exists.\n" +#~ msgstr "Файл пропущено. Вже існує: %s\n" + +#, fuzzy +#~ msgid "Create DB Media record %s failed. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "More than one Client!: %d\n" +#~ msgstr "Приєднуюсь до Клієнта %s:%d\n" + +#, fuzzy +#~ msgid "Create DB Client record %s failed. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Create db Path record %s failed. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Create DB Counters record %s failed. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "error fetching FileSet row: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Create DB FileSet record %s failed. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Create db File record %s failed. ERR=%s" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error fetching row for file=%s: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Create db Filename record %s failed. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Create db Object record %s failed. ERR=%s" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "No pool record %s exists\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Expecting one pool record, got %d\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error fetching row %s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown level=%d\n" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "" +#~ "No Job record found: ERR=%s\n" +#~ "CMD=%s\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Unknown Job level=%d\n" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "No Job found for: %s.\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "No Job found for: %s\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "No Volume record found for item %d.\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Error fetching row: %s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "File record for PathId=%s FilenameId=%s not found.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "File record not found in Catalog.\n" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#, fuzzy +#~ msgid "Filename record: %s not found.\n" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#, fuzzy +#~ msgid "Path record: %s not found.\n" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#, fuzzy +#~ msgid "No Job found for JobId %s\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Error fetching row %d: ERR=%s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Pool id select failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Client id select failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "More than one Client!: %s\n" +#~ msgstr "Приєднуюсь до Клієнта %s:%d\n" + +#, fuzzy +#~ msgid "Client record not found in Catalog.\n" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#, fuzzy +#~ msgid "FileSet record \"%s\" not found.\n" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#, fuzzy +#~ msgid "FileSet record not found in Catalog.\n" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#, fuzzy +#~ msgid "Media id select failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "query dbids failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Media record with MediaId=%s not found.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "Media record for Volume name \"%s\" not found.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "Media record for MediaId=%u not found in Catalog.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "Media record for Volume Name \"%s\" not found in Catalog.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "More than one Snapshot!: %s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Snapshot record with SnapshotId=%s not found.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "Snapshot record for Snapshot name \"%s\" not found.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "Query failed: %s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Database %s does not exist, please create it.\n" +#~ msgstr "Спроба створити вже існуючий Том \"%s\". Спробуйте ще раз.\n" + +#, fuzzy +#~ msgid "Unable to open Database=%s. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#~ msgid "Authorization problem: Remote server requires TLS.\n" +#~ msgstr "Проблеми авторизації: Віддалений сервер вимагає використання TLS.\n" + +#~ msgid "Director rejected Hello command\n" +#~ msgstr "Керівник не прийняв команду Hello\n" + +#, fuzzy +#~ msgid "" +#~ "Director authorization problem.\n" +#~ "Most likely the passwords do not agree.\n" +#~ "If you are using TLS, there may have been a certificate validation error " +#~ "during the TLS handshake.\n" +#~ "For help, please see " +#~ msgstr "" +#~ "Проблеми авторизації Керівника.\n" +#~ "Швидше за все, проблема у паролях.\n" +#~ "Якщо Ви використовуєте TLS, можливо, невдала перевірка сертифікату під " +#~ "час TLS handshake.\n" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: " +#~ msgstr "" +#~ "\n" +#~ "Вірсія: " + +#~ msgid "input from file" +#~ msgstr "введення із файлу" + +#~ msgid "output to file" +#~ msgstr "вивід до файлу" + +#~ msgid "output to file and terminal" +#~ msgstr "вивід до файлу та консолі" + +#, fuzzy +#~ msgid "output everything to file and terminal (tee all)" +#~ msgstr "вивід до файлу та консолі" + +#~ msgid "sleep specified time" +#~ msgstr "зазначений час сну" + +#~ msgid "print current time" +#~ msgstr "вивести поточний час" + +#~ msgid "print Console's version" +#~ msgstr "вивести версію Консолі" + +#~ msgid "echo command string" +#~ msgstr "луна командного рядку" + +#~ msgid "execute an external command" +#~ msgstr "виконати зовнішню команду" + +#~ msgid "zed_keys = use zed keys instead of bash keys" +#~ msgstr "zed_keys = використовувати zed keys замість bash keys" + +#~ msgid "help listing" +#~ msgstr "допомога" + +#~ msgid "set command separator" +#~ msgstr "задати роздільник команд" + +#~ msgid ": is an invalid command\n" +#~ msgstr ": неправильна команда\n" + +#~ msgid "Illegal separator character.\n" +#~ msgstr "Невірний символ роздільника.\n" + +#~ msgid "Command logic problem\n" +#~ msgstr "Проблеми логіки команди\n" + +#, fuzzy +#~ msgid "Can't find %s in Director list\n" +#~ msgstr ": неправильна команда\n" + +#~ msgid "Available Directors:\n" +#~ msgstr "Наявні Керівники:\n" + +#~ msgid "Select Director by entering a number: " +#~ msgstr "Оберіть Керівника, увівши номер: " + +#~ msgid "%s is not a number. You must enter a number between 1 and %d\n" +#~ msgstr "%s не є номером. Ви повинні увести номер із проміжку 1..%d\n" + +#~ msgid "You must enter a number between 1 and %d\n" +#~ msgstr "Ви повинні увести номер із проміжку 1..%d\n" + +#, fuzzy +#~ msgid "Can't find %s in Console list\n" +#~ msgstr ": неправильна команда\n" + +#~ msgid "Connecting to Director %s:%d\n" +#~ msgstr "Підключаюсь до Керівника %s:%d\n" + +#~ msgid "Enter a period to cancel a command.\n" +#~ msgstr "Уведіть цятку для переривання команди.\n" + +#~ msgid "Too many arguments on input command.\n" +#~ msgstr "Забагато параметрів команди.\n" + +#~ msgid "First argument to input command must be a filename.\n" +#~ msgstr "Перший параметр команди повинен бути назвою файлу.\n" + +#~ msgid "Cannot open file %s for input. ERR=%s\n" +#~ msgstr "Не вдається відкрити файл %s для введення. ERR=%s\n" + +#~ msgid "Too many arguments on output/tee command.\n" +#~ msgstr "Забагато параметрів на виході команди.\n" + +#~ msgid "Cannot open file %s for output. ERR=%s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#~ msgid "Too many arguments. Enclose command in double quotes.\n" +#~ msgstr "Забагато параметрів. Оточіть комінду подвійними лапками.\n" + +#, fuzzy +#~ msgid "Cannot popen(\"%s\", \"r\"): ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "@exec error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#~ msgid "Console: name=%s rcfile=%s histfile=%s\n" +#~ msgstr "Консоль: назва=%s rcfile=%s histfile=%s\n" + +#, fuzzy +#~ msgid "Error getting Job record for Job report: ERR=%s" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Admin Canceled" +#~ msgstr "Статус задачі: Відмінена" + +#~ msgid "Error sending Hello to Storage daemon. ERR=%s\n" +#~ msgstr "Не вдалося надіслати Hello до Зберігача. ERR=%s\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with SD at \"%s:%d\"\n" +#~ msgstr "Встановлення TLS із Керівником невдале \"%s:%d\"\n" + +#, fuzzy +#~ msgid "bdird] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -R do not apply JobDefs to Job\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read configuration and exit\n" +#~ " -s output in show text format\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s без сигналів\n" +#~ " -t перевірка - прочитати конфігурацію і вийти\n" +#~ " -? print this message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "No Director resource defined in %s\n" +#~ "Without that I don't know who I am :-(\n" +#~ msgstr "" +#~ "У %s не задано ресурсу Керівника\n" +#~ "Без цього я не знаю як спілкуватись із Керівником :-(\n" + +#, fuzzy +#~ msgid "No Messages resource defined in %s\n" +#~ msgstr "Ресурс %s не визначено\n" + +#, fuzzy +#~ msgid "\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Консолі \"%s\" у %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Director \"%s\" in %s. At least one CA certificate store is required " +#~ "when using \"TLS Verify Peer\".\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Керівника \"%s\" у %s. Необхідне щонайменше одне сховище " +#~ "для сертифікату CA.\n" + +#, fuzzy +#~ msgid "\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Консолі \"%s\" у %s.\n" + +#, fuzzy +#~ msgid "\"TLS Key\" file not defined for Console \"%s\" in %s.\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Консолі \"%s\" у %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Console \"%s\" in %s. At least one CA certificate store is required " +#~ "when using \"TLS Verify Peer\".\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Керівника \"%s\" у %s. Необхідне щонайменше одне сховище " +#~ "для сертифікату CA.\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for File daemon \"%s\" in %s.\n" +#~ msgstr "Ініціалізація контексту TLS для Керівника невдала \"%s\".\n" + +#, fuzzy +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for File daemon \"%s\" in %s.\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Консолі \"%s\" у %s.\n" + +#, fuzzy +#~ msgid "No Job records defined in %s\n" +#~ msgstr "Ресурс %s не визначено\n" + +#, fuzzy +#~ msgid "" +#~ "\"%s\" directive in Job \"%s\" resource is required, but not found.\n" +#~ msgstr "Не знайдено необхідний елемент %s для ресурсу %s.\n" + +#, fuzzy +#~ msgid "Too many items in Job resource\n" +#~ msgstr "Забагато елементів у ресурсі %s\n" + +#, fuzzy +#~ msgid "Unable to get Job record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get Job Volume Parameters. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create bootstrap file %s. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing bsr file.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "No Volumes found to restore.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "1990 Invalid Catalog Request: %s" +#~ msgstr "1994 Помилка оновлення Каталогу: %s" + +#, fuzzy +#~ msgid "Invalid Catalog request; DB not open: %s" +#~ msgstr "Помилка оновлення Каталогу: БД не відкрита: %s" + +#, fuzzy +#~ msgid "1998 Volume \"%s\" catalog status is %s, %s.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Unable to get Media record for Volume %s: ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Catalog error updating Media record. %s" +#~ msgstr "Помилка під час оновлення відбитку файлу. %s" + +#, fuzzy +#~ msgid "Catalog error creating JobMedia record. %s" +#~ msgstr "Помилка під час оновлення відбитку файлу. %s" + +#, fuzzy +#~ msgid "Invalid Catalog request: %s" +#~ msgstr "1994 Помилка оновлення Каталогу: %s" + +#, fuzzy +#~ msgid "Attribute create error: ERR=%s" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Restore object create error. %s" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Catalog error updating file digest. Unsupported digest stream type: %d" +#~ msgstr "Помилка під час оновлення відбитку файлу. %s" + +#, fuzzy +#~ msgid "attribute create error. ERR=%s" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#~ msgid "Catalog error updating file digest. %s" +#~ msgstr "Помилка під час оновлення відбитку файлу. %s" + +#~ msgid "1994 Invalid Catalog Update: %s" +#~ msgstr "1994 Помилка оновлення Каталогу: %s" + +#~ msgid "Invalid Catalog Update; DB not open: %s" +#~ msgstr "Помилка оновлення Каталогу: БД не відкрита: %s" + +#, fuzzy +#~ msgid "fread attr spool error. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-dir [-f -s] [-c config_file] [-d debug_level] " +#~ "[config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -r run now\n" +#~ " -s no signals\n" +#~ " -t test - read configuration and exit\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s без сигналів\n" +#~ " -t перевірка - прочитати конфігурацію і вийти\n" +#~ " -? print this message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for Director \"%s\" in %s.\n" +#~ msgstr "Ініціалізація контексту TLS для Керівника невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Could not open Catalog \"%s\", database \"%s\".\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create storage record for %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update storage record for %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Storage \"%s\" in %s.\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Консолі \"%s\" у %s.\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for Storage \"%s\" in %s.\n" +#~ msgstr "Ініціалізація контексту TLS для Керівника невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Could not compile regex pattern \"%s\" ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n" +#~ msgstr "Споглядач: назва=%s FDtimeout=%s SDtimeout=%s\n" + +#, fuzzy +#~ msgid "Console: name=%s SSL=%d\n" +#~ msgstr "Консоль: назва=%s\n" + +#, fuzzy +#~ msgid "Counter: name=%s min=%d max=%d\n" +#~ msgstr "Консоль: назва=%s rcfile=%s histfile=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Client: Name=%s Enabled=%d Address=%s FDport=%d MaxJobs=%u NumJobs=%u\n" +#~ msgstr "Клієнт: назва=%s address=%s FDport=%d\n" + +#, fuzzy +#~ msgid "Schedule: name=%s\n" +#~ msgstr "Консоль: назва=%s\n" + +#, fuzzy +#~ msgid "Pool: name=%s PoolType=%s\n" +#~ msgstr "Консоль: назва=%s rcfile=%s histfile=%s\n" + +#, fuzzy +#~ msgid "Messages: name=%s\n" +#~ msgstr "Консоль: назва=%s\n" + +#, fuzzy +#~ msgid "Cannot find Pool resource %s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Console resource %s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Director resource %s\n" +#~ msgstr ": неправильна команда\n" + +#, fuzzy +#~ msgid "Cannot find Storage resource %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Job resource %s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Counter resource %s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Client resource %s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Schedule resource %s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find Storage Resource %s referenced on line %d : %s\n" +#~ msgstr "" +#~ "Спроба визначити другий %s ресурс із назвою \"%s\" не дозволяється.\n" + +#, fuzzy +#~ msgid "" +#~ "Attempt to redefine Storage resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Спроба визначити другий %s ресурс із назвою \"%s\" не дозволяється.\n" + +#, fuzzy +#~ msgid "Could not find config Resource %s referenced on line %d : %s\n" +#~ msgstr "" +#~ "Спроба визначити другий %s ресурс із назвою \"%s\" не дозволяється.\n" + +#~ msgid "Count not update counter %s: ERR=%s\n" +#~ msgstr "Лічильник %s не оновлено: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create var context: ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot set var callback: ERR=%s\n" +#~ msgstr "Не можливо перевстановити поточну теку: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot set var operate: ERR=%s\n" +#~ msgstr "Не можливо перевстановити поточну теку: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot unescape string: ERR=%s\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot expand expression \"%s\": ERR=%s\n" +#~ msgstr "Не можливо змінити дозволи для %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot destroy var context: ERR=%s\n" +#~ msgstr "Не можливо перевстановити поточну теку: ERR=%s\n" + +#, fuzzy +#~ msgid "Client: " +#~ msgstr "Збирач" + +#, fuzzy +#~ msgid "File daemon \"%s\" rejected Job command: %s\n" +#~ msgstr "Зберігач відхилив команду Hello\n" + +#, fuzzy +#~ msgid "Error updating Client record. ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "FD gave bad response to JobId command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot run program: %s. ERR=%s\n" +#~ msgstr "Не вдалось створити ноду(node) %s: ERR=%s\n" + +#, fuzzy +#~ msgid ">filed: write error on socket\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error running program: %s. ERR=%s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open included file: %s. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Client \"%s\" RunScript failed.\n" +#~ msgstr "Помилка команди" + +#, fuzzy +#~ msgid "RestoreObject failed.\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "ComponentInfo failed.\n" +#~ msgstr "Помилка команди" + +#, fuzzy +#~ msgid " get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -t test configuration file and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "Автор Nicolas Boichat (2004)\n" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: tray-monitor [-c config_file] [-d debug_level]\n" +#~ " -c задати конфігураційний файл \n" +#~ " -d встановити рівень відлагоджування у \n" +#~ " -dt виводити часову мітку у даних відлагоджування\n" +#~ " -t перевірка - прогитати конфігурацію і завершити\n" +#~ " -? показати це повідомлення.\n" +#~ "\n" + +#, fuzzy +#~ msgid "" +#~ "No File daemon resource defined in %s\n" +#~ "Without that I don't know who I am :-(\n" +#~ msgstr "" +#~ "У %s не задано ресурсу Керівника\n" +#~ "Без цього я не знаю як спілкуватись із Керівником :-(\n" + +#, fuzzy +#~ msgid "Only one Client resource permitted in %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for File daemon in %s.\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Консолі \"%s\" у %s.\n" + +#, fuzzy +#~ msgid "No Director resource defined in %s\n" +#~ msgstr "Ресурс %s не визначено\n" + +#, fuzzy +#~ msgid "Failed to initialize encryption context.\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "%s signature digest initialization failed\n" +#~ msgstr "Невдала ініціалізація відбитку %s\n" + +#, fuzzy +#~ msgid "Unsupported cipher on this system.\n" +#~ msgstr "Цей тип відбитку не підтримується: %d\n" + +#, fuzzy +#~ msgid "An error occurred while encrypting the stream.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "An error occurred while adding signer the stream.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "An error occurred while signing the stream.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "An error occurred finalizing signing the stream.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Plugin save packet not found.\n" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#, fuzzy +#~ msgid "Plugin=%s not found.\n" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#~ msgid "Could not create %s: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error while creating command string %s.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Error while executing \"%s\" %s. %s %s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to parse snapshot command output\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create snapshot record. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to create snapshot record, got %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to delete snapshot record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to delete snapshot record, got %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get snapshot record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get snapshot record, got %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to parse command output\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Delete Snapshot for %s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Unable to delete snapshot of %s ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Create Snapshot for %s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Unable to create snapshot of %s ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Version: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" +#~ " -c use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g groupid\n" +#~ " -k keep readall capabilities\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test configuration file and exit\n" +#~ " -T set trace on\n" +#~ " -u userid\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s без сигналів\n" +#~ " -t перевірка - прочитати конфігурацію і вийти\n" +#~ " -? print this message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Disable Command \"%s\" not found.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "Failed to initialize TLS context for Console \"%s\" in %s.\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Cannot find any Console resource for remote access\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Bad Hello command from Director at %s. Len=%d.\n" +#~ msgstr "Від'єднуюсь від Керівника %s:%d\n" + +#, fuzzy +#~ msgid "Bad Hello command from Director at %s: %s\n" +#~ msgstr "Від'єднуюсь від Керівника %s:%d\n" + +#, fuzzy +#~ msgid "Connection from unknown Director %s at %s rejected.\n" +#~ msgstr "Від'єднуюсь від Керівника %s:%d\n" + +#, fuzzy +#~ msgid "SD connect failed: Bad Hello command\n" +#~ msgstr "Керівник не прийняв команду Hello\n" + +#, fuzzy +#~ msgid "SD connect failed: Job name not found: %s\n" +#~ msgstr "Керівник не прийняв команду Hello\n" + +#, fuzzy +#~ msgid "SD \"%s\" tried to connect two times.\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Command: \"%s\" is disabled.\n" +#~ msgstr "Помилка команди" + +#, fuzzy +#~ msgid "Bad command from %s. Len=%d.\n" +#~ msgstr "Від'єднуюсь від Клієнта %s:%d\n" + +#, fuzzy +#~ msgid "2902 Error scanning cancel command.\n" +#~ msgstr "Уведіть цятку для переривання команди.\n" + +#, fuzzy +#~ msgid "2991 Bad setbandwidth command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "2991 Bad setdebug command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad estimate command: %s" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad Job Command: %s" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad RunBeforeJob command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "2905 Bad RunBeforeJob command.\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad RunAfter command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad RunScript command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "2905 Bad RunScript command.\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad RestoreObject command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "2909 Bad RestoreObject command.\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Error running program: %s. stat=%d: ERR=%s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open FileSet input file: %s. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "REGEX %s compile error. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Invalid FileSet command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#~ msgid "Unknown include/exclude option: %c\n" +#~ msgstr "Не зрозуміла опція параметру include/exclude: %c\n" + +#, fuzzy +#~ msgid "Unknown backup level: %s\n" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "Bad level command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad session command: %s" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad storage command: %s" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Failed connect from Storage daemon. SD bsock=NULL.\n" +#~ msgstr "Відкриті з'єднання із Зберігачем.\n" + +#, fuzzy +#~ msgid "ACL support not configured for Client.\n" +#~ msgstr "TLS необхідний, але не налаштовано у Bacula.\n" + +#, fuzzy +#~ msgid "Cannot contact Storage daemon\n" +#~ msgstr "Не можу приєднатись до демону.\n" + +#, fuzzy +#~ msgid "Bad response to append open: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad response from stored to open command\n" +#~ msgstr "Невірна відповідь від Збирача на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "2994 Bad verify command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad replace command. CMD=%s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad response to SD read open: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad response from stored to read open command\n" +#~ msgstr "Невірна відповідь від Збирача на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Bad response from SD to %s command. Wanted %s, got len=%ld msg=\"%s\"\n" +#~ msgstr "Невірна відповідь від Збирача на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad response from SD to %s command. Wanted %s, got SIGNAL %s\n" +#~ msgstr "Невірна відповідь від Збирача на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Size of data or stream of %s not correct. Original %s, restored %s.\n" +#~ msgstr "" +#~ "Не правильний розмір відновленого файлу %s. Оригінальний %s, відновлений " +#~ "%s.\n" + +#, fuzzy +#~ msgid "Error setting Finder Info on \"%s\"\n" +#~ msgstr "Не вдалось перевстановити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "LZO init failed\n" +#~ msgstr "Невдале встановлення з'єднання TLS\n" + +#, fuzzy +#~ msgid "Data record error. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#~ msgid "Could not create digest.\n" +#~ msgstr "Не можливо створити відбиток.\n" + +#, fuzzy +#~ msgid "Unsupported digest algorithm. Decrypt failed.\n" +#~ msgstr "Відбиток" + +#, fuzzy +#~ msgid "Unsupported encryption algorithm. Decrypt failed.\n" +#~ msgstr "Відбиток" + +#, fuzzy +#~ msgid "" +#~ "An error=%d occurred while decoding encrypted session data stream: ERR=" +#~ "%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Missing encryption session data stream for %s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to initialize decryption context for %s\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Cannot open resource fork for %s.\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to decode message signature for %s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Zlib data error" +#~ msgstr " (%d помилка)" + +#, fuzzy +#~ msgid "Seek to %s error on %s: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "LZO uncompression error on file %s. ERR=%d\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Uncompression error on file %s. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Write error at byte=%lld block=%d write_len=%d lerror=%d on %s: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Signature validation failed for file %s: ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Digest one file failed for file: %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Director connected %sat: %s\n" +#~ msgstr "Керівник від'єднано" + +#, fuzzy +#~ msgid " Processing file: %s\n" +#~ msgstr "Пуста назва файлу: %s\n" + +#, fuzzy +#~ msgid "Bad .status command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid " Could not access %s: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Could not follow link %s: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid " Could not stat %s: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Could not open directory %s: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid " Unknown file type %d: %s\n" +#~ msgstr "Невідомий тип файлу %d: не відновлено: %s\n" + +#, fuzzy +#~ msgid "Network error in send to Director: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Cannot open %s: ERR=%s.\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid " Cannot open resource fork for %s: ERR=%s.\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading file %s: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to stat file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to serialize extended attributes on file \"%s\"\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Illegal empty xattr attribute name\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "pioctl VIOCGETAL error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "pioctl VIOCSETAL error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "pathconf error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to convert %d into namespace on file \"%s\"\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "acl_to_text error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "acl_get_file error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "acl_delete_def_file error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "acl_from_text error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "acl_set_file error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "extattr_list_link error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "extattr_get_link error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to split %s into namespace and name part on file \"%s\"\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Failed to convert %s into namespace on file \"%s\"\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "extattr_set_link error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "acl_valid error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "llistxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "lgetxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "setxattr error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get status on xattr \"%s\" on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to send extended attribute \"%s\" on file \"%s\"\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Failed to restore extensible attributes on file \"%s\"\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Failed to restore extended attributes on file \"%s\"\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "acl_get error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get xattr acl on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get acl on xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get acl text on xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to convert acl from text on file \"%s\"\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Unable to restore acl of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "acl_fromtext error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "wrong encoding of acl type in acl stream on file \"%s\"\n" +#~ msgstr "Невідомий тип файлу %d: не відновлено: %s\n" + +#, fuzzy +#~ msgid "acl_set error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open xattr on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to list the xattr on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to close xattr list on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to stat xattr \"%s\" on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to link xattr %s to %s on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open attribute \"%s\" at file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to restore data of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unsupported xattr type %s on file \"%s\"\n" +#~ msgstr "Заданий тип відбитку=%d не підтримується\n" + +#, fuzzy +#~ msgid "Unable to restore owner of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to restore filetimes of xattr %s on file \"%s\": ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#~ msgid "Unable to set file owner %s: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#~ msgid "Unable to set file modes %s: ERR=%s\n" +#~ msgstr "Не можливо встановити параметри файлу %s: ERR=%s\n" + +#~ msgid "Unable to set file times %s: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#~ msgid "" +#~ "File size of restored file %s not correct. Original %s, restored %s.\n" +#~ msgstr "" +#~ "Не правильний розмір відновленого файлу %s. Оригінальний %s, відновлений " +#~ "%s.\n" + +#~ msgid "Unable to set file flags %s: ERR=%s\n" +#~ msgstr "Не можливо встановити прапорці файлу %s: ERR=%s\n" + +#~ msgid "Unix attributes" +#~ msgstr "Атрибули Unix" + +#~ msgid "File data" +#~ msgstr "Дата файлу" + +#~ msgid "MD5 digest" +#~ msgstr "Відбиток MD5" + +#~ msgid "GZIP data" +#~ msgstr "Дані, запаковані GZIP" + +#, fuzzy +#~ msgid "Compressed data" +#~ msgstr "Замало даних" + +#~ msgid "Extended attributes" +#~ msgstr "Додаткові атрибути" + +#~ msgid "Sparse data" +#~ msgstr "Замало даних" + +#~ msgid "GZIP sparse data" +#~ msgstr "Замало даних GZIP " + +#, fuzzy +#~ msgid "Compressed sparse data" +#~ msgstr "Замало даних GZIP " + +#~ msgid "Program names" +#~ msgstr "Назви програми" + +#~ msgid "Program data" +#~ msgstr "Програмні дані" + +#~ msgid "SHA1 digest" +#~ msgstr "Відбиток HA1" + +#~ msgid "Win32 data" +#~ msgstr "Дані Win32" + +#~ msgid "Win32 GZIP data" +#~ msgstr "Дані Win32 GZIP" + +#, fuzzy +#~ msgid "Win32 compressed data" +#~ msgstr "Дані Win32" + +#~ msgid "MacOS Fork data" +#~ msgstr "Дані MacOS Fork" + +#~ msgid "HFS+ attribs" +#~ msgstr "Атрибути HFS+" + +#~ msgid "Standard Unix ACL attribs" +#~ msgstr "Стандартні атрибути Unix ACL" + +#~ msgid "Default Unix ACL attribs" +#~ msgstr "Звичійні атрибути Unix ACL" + +#~ msgid "SHA256 digest" +#~ msgstr "Відбиток SHA256" + +#~ msgid "SHA512 digest" +#~ msgstr "Відбиток SHA512" + +#~ msgid "Signed digest" +#~ msgstr "Підписаний відбиток" + +#~ msgid "Encrypted File data" +#~ msgstr "Зашифровані файли" + +#~ msgid "Encrypted Win32 data" +#~ msgstr "Зашифровані дані Win32" + +#~ msgid "Encrypted session data" +#~ msgstr "Зашифровані дані сесій" + +#~ msgid "Encrypted GZIP data" +#~ msgstr "Зашифровані дані GZIP" + +#, fuzzy +#~ msgid "Encrypted compressed data" +#~ msgstr "Зашифровані дані сесій" + +#~ msgid "Encrypted Win32 GZIP data" +#~ msgstr "Зашифровані дані Win32 GZIP" + +#, fuzzy +#~ msgid "Encrypted Win32 Compressed data" +#~ msgstr "Зашифровані дані Win32" + +#~ msgid "Encrypted MacOS fork data" +#~ msgstr "Зашифровані дані MacOS Fork " + +#, fuzzy +#~ msgid "AIX ACL attribs" +#~ msgstr "Специфічні атрибути AIX ACL" + +#, fuzzy +#~ msgid "Darwin ACL attribs" +#~ msgstr "Специфічні атрибути Darwin ACL" + +#, fuzzy +#~ msgid "FreeBSD Default ACL attribs" +#~ msgstr "Специфічні атрибути FreeBSD Default ACL" + +#, fuzzy +#~ msgid "FreeBSD Access ACL attribs" +#~ msgstr "Специфічні атрибути FreeBSD Access ACL" + +#, fuzzy +#~ msgid "HPUX ACL attribs" +#~ msgstr "Специфічні атрибути HPUX ACL" + +#, fuzzy +#~ msgid "Irix Default ACL attribs" +#~ msgstr "Специфічні атрибути Irix Default ACL" + +#, fuzzy +#~ msgid "Irix Access ACL attribs" +#~ msgstr "Специфічні атрибути Irix Access ACL" + +#, fuzzy +#~ msgid "Linux Default ACL attribs" +#~ msgstr "Специфічні атрибути Linux Default ACL" + +#, fuzzy +#~ msgid "Linux Access ACL attribs" +#~ msgstr "Специфічні атрибути Linux Access ACL" + +#, fuzzy +#~ msgid "TRU64 Default ACL attribs" +#~ msgstr "Специфічні атрибути Irix Default ACL" + +#, fuzzy +#~ msgid "TRU64 Access ACL attribs" +#~ msgstr "Специфічні атрибути Irix Access ACL" + +#, fuzzy +#~ msgid "Solaris POSIX ACL attribs" +#~ msgstr "Специфічні атрибути Solaris ACL" + +#, fuzzy +#~ msgid "Solaris NFSv4/ZFS ACL attribs" +#~ msgstr "Специфічні атрибути Solaris ACL" + +#, fuzzy +#~ msgid "AFS ACL attribs" +#~ msgstr "Специфічні атрибути AIX ACL" + +#, fuzzy +#~ msgid "AIX POSIX ACL attribs" +#~ msgstr "Специфічні атрибути AIX ACL" + +#, fuzzy +#~ msgid "AIX NFSv4 ACL attribs" +#~ msgstr "Специфічні атрибути AIX ACL" + +#, fuzzy +#~ msgid "FreeBSD NFSv4/ZFS ACL attribs" +#~ msgstr "Специфічні атрибути FreeBSD Access ACL" + +#, fuzzy +#~ msgid "GNU Hurd Default ACL attribs" +#~ msgstr "Специфічні атрибути Irix Default ACL" + +#, fuzzy +#~ msgid "GNU Hurd Access ACL attribs" +#~ msgstr "Специфічні атрибути Irix Access ACL" + +#, fuzzy +#~ msgid "GNU Hurd Extended attribs" +#~ msgstr "Специфічні додаткові атрибути Linux" + +#, fuzzy +#~ msgid "IRIX Extended attribs" +#~ msgstr "Специфічні додаткові атрибути Linux" + +#, fuzzy +#~ msgid "TRU64 Extended attribs" +#~ msgstr "Специфічні додаткові атрибути Linux" + +#, fuzzy +#~ msgid "AIX Extended attribs" +#~ msgstr "Додаткові атрибути" + +#, fuzzy +#~ msgid "OpenBSD Extended attribs" +#~ msgstr "Специфічні додаткові атрибути NetBSD" + +#, fuzzy +#~ msgid "Solaris Extensible attribs or System Extended attribs" +#~ msgstr "Специфічні додаткові атрибути Solaris" + +#, fuzzy +#~ msgid "Solaris Extended attribs" +#~ msgstr "Специфічні додаткові атрибути Solaris" + +#, fuzzy +#~ msgid "Darwin Extended attribs" +#~ msgstr "Специфічні додаткові атрибути Darwin" + +#, fuzzy +#~ msgid "FreeBSD Extended attribs" +#~ msgstr "Специфічні додаткові атрибути FreeBS" + +#, fuzzy +#~ msgid "Linux Extended attribs" +#~ msgstr "Специфічні додаткові атрибути Linux" + +#, fuzzy +#~ msgid "NetBSD Extended attribs" +#~ msgstr "Специфічні додаткові атрибути NetBSD" + +#~ msgid "File skipped. Not newer: %s\n" +#~ msgstr "Файл пропущено. Не новий: %s\n" + +#~ msgid "File skipped. Not older: %s\n" +#~ msgstr "Файл пропущено. Не застарілий: %s\n" + +#~ msgid "File skipped. Already exists: %s\n" +#~ msgstr "Файл пропущено. Вже існує: %s\n" + +#~ msgid "File %s already exists and could not be replaced. ERR=%s.\n" +#~ msgstr "Файл %s існує та його не вдалось замінити. ERR=%s.\n" + +#~ msgid "Cannot make fifo %s: ERR=%s\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#~ msgid "Cannot make node %s: ERR=%s\n" +#~ msgstr "Не вдалось створити ноду(node) %s: ERR=%s\n" + +#~ msgid "Could not symlink %s -> %s: ERR=%s\n" +#~ msgstr "Не вдалось створити лінку(symlink) %s -> %s: ERR=%s\n" + +#~ msgid "Could not restore file flags for file %s: ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#~ msgid "Could not hard link %s -> %s: ERR=%s\n" +#~ msgstr "Не вдалось створити жорстку лінку(hardlink) %s -> %s: ERR=%s\n" + +#~ msgid "Could not reset file flags for file %s: ERR=%s\n" +#~ msgstr "Не вдалось перевстановити прапорці для файлу %s: ERR=%s\n" + +#~ msgid "Original file %s have been deleted: type=%d\n" +#~ msgstr "Оригінальний файл %s було видалено: тип=%d\n" + +#~ msgid "Original file %s not saved: type=%d\n" +#~ msgstr "Оригінальний файл %s не збережено: тип=%d\n" + +#~ msgid "Unknown file type %d; not restored: %s\n" +#~ msgstr "Невідомий тип файлу %d: не відновлено: %s\n" + +#~ msgid "Zero length filename: %s\n" +#~ msgstr "Пуста назва файлу: %s\n" + +#, fuzzy +#~ msgid "Plugin: \"%s\" not found.\n" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#, fuzzy +#~ msgid "Cannot stat file %s: ERR=%s\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#~ msgid "%s mtime changed during backup.\n" +#~ msgstr "%s mtime змінився під час резервування.\n" + +#~ msgid "%s ctime changed during backup.\n" +#~ msgstr "%s ctime змінився під час резервування.\n" + +#, fuzzy +#~ msgid "%s size of %lld changed during backup to %lld.n" +#~ msgstr "%s розмір змінився під час резервування.\n" + +#~ msgid "Top level directory \"%s\" has unlisted fstype \"%s\"\n" +#~ msgstr "Вища тека \"%s\" має незрозумілий тип файлової системи \"%s\"\n" + +#~ msgid "Top level directory \"%s\" has an unlisted drive type \"%s\"\n" +#~ msgstr "Вища тека \"%s\" має незрозумілий тип пристрою \"%s\"\n" + +#~ msgid "Cannot create directory %s: ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#~ msgid "%s exists but is not a directory.\n" +#~ msgstr "%s існує, але не є текою.\n" + +#~ msgid "Cannot change owner and/or group of %s: ERR=%s\n" +#~ msgstr "Не можливо змінити власника та/або групу для %s: ERR=%s\n" + +#~ msgid "Cannot change permissions of %s: ERR=%s\n" +#~ msgstr "Не можливо змінити дозволи для %s: ERR=%s\n" + +#~ msgid "Too many subdirectories. Some permissions not reset.\n" +#~ msgstr "Забагато підтек. Деякі обмеження не перевстановлено.\n" + +#~ msgid "Cannot open current directory: ERR=%s\n" +#~ msgstr "Не можливо відкрити поточну теку: ERR=%s\n" + +#~ msgid "Cannot get current directory: ERR=%s\n" +#~ msgstr "Не можливо отримати поточну теку: ERR=%s\n" + +#~ msgid "Cannot reset current directory: ERR=%s\n" +#~ msgstr "Не можливо перевстановити поточну теку: ERR=%s\n" + +#, fuzzy +#~ msgid "Error scanning attributes: %s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "TLS connection initialization failed.\n" +#~ msgstr "Невдала ініціалізація відбитку %s\n" + +#, fuzzy +#~ msgid "TLS Negotiation failed.\n" +#~ msgstr "Невдале встановлення з'єднання TLS\n" + +#, fuzzy +#~ msgid "TLS enabled but not configured.\n" +#~ msgstr "TLS необхідний, але не налаштовано у Bacula.\n" + +#, fuzzy +#~ msgid "TLS enable but not configured.\n" +#~ msgstr "TLS необхідний, але не налаштовано у Bacula.\n" + +#, fuzzy +#~ msgid "Unknown error." +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "Unknown sig %d" +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "Cannot bind port %d: ERR=%s.\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init client queue: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error in select: %s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create client BSOCK.\n" +#~ msgstr "Не можливо створити відбиток.\n" + +#, fuzzy +#~ msgid "Could not add job to client queue: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not destroy client queue: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Could not connect to %s on %s:%d. ERR=%s\n" +#~ "Retrying ...\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to connect to %s on %s:%d. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "gethostbyname() for host \"%s\" failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Socket open error. proto=%d port=%d. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock read mutex. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock write mutex. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init bsock attribute mutex. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Write error sending %d bytes to %s:%s:%d: ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Read error from %s:%s:%d: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not malloc BSOCK data buffer\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "sockopt error: %s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "fcntl F_GETFL error. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "fcntl F_SETFL error. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Director authorization error at \"%s:%d\"\n" +#~ msgstr "Проблеми авторизації Директора \"%s:%d\"\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error: Remote server at \"%s:%d\" did not advertise " +#~ "required TLS support.\n" +#~ msgstr "" +#~ "Проблеми авторизації: Віддалений сервер \"%s:%d\" не повідомляє про " +#~ "необхідність використання TLS.\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error with Director at \"%s:%d\": Remote server requires " +#~ "TLS.\n" +#~ msgstr "" +#~ "Проблеми авторизації Керівника \"%s:%d\": Віддалений сепвер вимагає TLS.\n" + +#, fuzzy +#~ msgid "" +#~ "Bad errmsg to Hello command: ERR=%s\n" +#~ "The Director at \"%s:%d\" may not be running.\n" +#~ msgstr "" +#~ "Погана відповідь на команду Hello: ERR=%s\n" +#~ "Керівник \"%s:%d\" ймовірно не запущений.\n" + +#, fuzzy +#~ msgid "" +#~ "Authorization error with Director at \"%s:%d\"\n" +#~ "Most likely the passwords do not agree.\n" +#~ "If you are using TLS, there may have been a certificate validation error " +#~ "during the TLS handshake.\n" +#~ "For help, please see: " +#~ msgstr "" +#~ "Проблеми авторизації із Керівником \"%s:%d\"\n" +#~ "Швидше за все, проблема у паролях.\n" +#~ "Якщо Ви використовуєте TLS, можливо, невдала перевірка сертифікату під " +#~ "час TLS handshake.\n" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#, fuzzy +#~ msgid "safe_unlink could not compile regex pattern \"%s\" ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Out of memory: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad errno" +#~ msgstr " (%d помилка)" + +#, fuzzy +#~ msgid "Cannot open %s file. %s ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open %s file. %s ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot lock %s file. %s ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot not open %s file. %s ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create state file. %s ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Write final hdr error: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "1999 Authorization failed.\n" +#~ msgstr "Невдале встановлення з'єднання TLS\n" + +#, fuzzy +#~ msgid "Unable to open certificate file" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to read certificate from file" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unsupported key type provided: %d\n" +#~ msgstr "Цей тип відбитку не підтримується: %d\n" + +#, fuzzy +#~ msgid "Unable to open private key file" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to read private key from file" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#~ msgid "Unsupported digest type: %d\n" +#~ msgstr "Цей тип відбитку не підтримується: %d\n" + +#~ msgid "OpenSSL digest initialization failed" +#~ msgstr "Відбиток" + +#~ msgid "OpenSSL digest update failed" +#~ msgstr "Відбиток" + +#~ msgid "OpenSSL digest finalize failed" +#~ msgstr "Відбиток" + +#, fuzzy +#~ msgid "OpenSSL digest_new failed" +#~ msgstr "Відбиток" + +#~ msgid "OpenSSL sign get digest failed" +#~ msgstr "Відбиток" + +#~ msgid "OpenSSL digest Verify final failed" +#~ msgstr "Відбиток" + +#, fuzzy +#~ msgid "Signature creation failed" +#~ msgstr "Невдале встановлення з'єднання TLS\n" + +#, fuzzy +#~ msgid "Unsupported cipher type specified\n" +#~ msgstr "Заданий тип відбитку=%d не підтримується\n" + +#, fuzzy +#~ msgid "Unsupported contentEncryptionAlgorithm: %d\n" +#~ msgstr "Відбиток" + +#, fuzzy +#~ msgid "OpenSSL cipher context initialization failed" +#~ msgstr "Відбиток" + +#, fuzzy +#~ msgid "Encryption session provided an invalid IV" +#~ msgstr "Зашифровані дані сесій" + +#, fuzzy +#~ msgid "OpenSSL cipher context key/IV initialization failed" +#~ msgstr "Відбиток" + +#~ msgid "Unsupported digest type=%d specified\n" +#~ msgstr "Заданий тип відбитку=%d не підтримується\n" + +#, fuzzy +#~ msgid "No error" +#~ msgstr " (%d помилка)" + +#, fuzzy +#~ msgid "Signer not found" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#, fuzzy +#~ msgid "Recipient not found" +#~ msgstr "Не знайдено відбиток MD5 Набору Файлів.\n" + +#~ msgid "Unsupported digest algorithm" +#~ msgstr "Відбиток" + +#, fuzzy +#~ msgid "Unsupported encryption algorithm" +#~ msgstr "Відбиток" + +#, fuzzy +#~ msgid "Cannot fork to become daemon: ERR=%s\n" +#~ msgstr "Не можливо перевстановити поточну теку: ERR=%s\n" + +#, fuzzy +#~ msgid "Illegal character \"%c\" in name.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "Cannot open config file %s: %s\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open lex\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown operation" +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "verified" +#~ msgstr "зазначений час сну" + +#, fuzzy +#~ msgid "unknown action" +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "pthread_once failed. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init msg_queue mutex. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Config error: %s\n" +#~ msgstr " (%d помилок)" + +#, fuzzy +#~ msgid "Cannot open included config file %s: %s\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Mutex lock failure. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Mutex unlock failure. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "pthread_create failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open console message file %s: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not get con mutex: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "open mail pipe %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "open mail pipe failed.\n" +#~ msgstr "Помилка команди" + +#, fuzzy +#~ msgid "close error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "fopen %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Msg delivery error: fopen %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "%s JobId %u: Fatal error: " +#~ msgstr "Статус задачі: Ватальна помилка" + +#, fuzzy +#~ msgid "%s JobId %u: Error: " +#~ msgstr "Статус задачі: Помилка" + +#, fuzzy +#~ msgid "Unable to init mutex: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to destroy mutex: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to init OpenSSL threading: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Attempt to define second \"%s\" resource named \"%s\" is not permitted.\n" +#~ msgstr "" +#~ "Спроба визначити другий %s ресурс із назвою \"%s\" не дозволяється.\n" + +#, fuzzy +#~ msgid "Unknown item code: %d\n" +#~ msgstr "Невідомий статус задачі %c. " + +#, fuzzy +#~ msgid "" +#~ "Attempt to redefine \"%s\" from \"%s\" to \"%s\" referenced on line %d : " +#~ "%s\n" +#~ msgstr "" +#~ "Спроба визначити другий %s ресурс із назвою \"%s\" не дозволяється.\n" + +#, fuzzy +#~ msgid "Could not find config Resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Спроба визначити другий %s ресурс із назвою \"%s\" не дозволяється.\n" + +#, fuzzy +#~ msgid "Attempt to redefine resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Спроба визначити другий %s ресурс із назвою \"%s\" не дозволяється.\n" + +#, fuzzy +#~ msgid "Too many %s directives. Max. is %d. line %d: %s\n" +#~ msgstr "Забагато елементів у ресурсі %s\n" + +#, fuzzy +#~ msgid "Missing config Resource \"%s\" referenced on line %d : %s\n" +#~ msgstr "" +#~ "Спроба визначити другий %s ресурс із назвою \"%s\" не дозволяється.\n" + +#, fuzzy +#~ msgid "Unable to initialize resource lock. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open config file \"%s\": %s\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "not in resource definition: %s" +#~ msgstr "Ресурс %s не визначено\n" + +#, fuzzy +#~ msgid "Unknown parser state %d\n" +#~ msgstr "Невідомий статус задачі %c. " + +#, fuzzy +#~ msgid "Failed to open Plugin directory %s: ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "dlopen plugin %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Lookup of loadPlugin in plugin %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Lookup of unloadPlugin in plugin %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find userid=%s: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find password entry. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find group=%s: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not initgroups for group=%s, userid=%s: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not initgroups for userid=%s: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not set group=%s: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "prctl failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "setreuid failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "cap_from_text failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "cap_set_proc failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not set specified userid: %s\n" +#~ msgstr "Не вдалось перевстановити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writelock failure at %s:%d: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writeunlock failure at %s:%d:. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fork error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "execv: %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "BUS error" +#~ msgstr " (%d помилка)" + +#, fuzzy +#~ msgid "Error loading certificate file" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open DH parameters file" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Connect failure" +#~ msgstr "З'єднано" + +#, fuzzy +#~ msgid "Canceled" +#~ msgstr "Статус задачі: Відмінена" + +#, fuzzy +#~ msgid "Verify differences" +#~ msgstr "Статус задачі: Паревірка різниць" + +#, fuzzy +#~ msgid "Waiting for mount" +#~ msgstr "Статус задачі: Очікування монтування" + +#, fuzzy +#~ msgid "Waiting for Storage resource" +#~ msgstr "Статус задачі: Очікування ресурсу зберігання" + +#, fuzzy +#~ msgid "Waiting for Job resource" +#~ msgstr "Статус задачі: Очікування ресурсу задачі" + +#, fuzzy +#~ msgid "Waiting for Client resource" +#~ msgstr "Статус задачі: Очікування ресурсу Слієнту" + +#, fuzzy +#~ msgid "Waiting for Start Time" +#~ msgstr "Статус задачі: Очікування часу початку" + +#, fuzzy +#~ msgid "Unknown Job termination status=%d" +#~ msgstr "Невідомий статус задачі %c. " + +#, fuzzy +#~ msgid "Terminated with errors" +#~ msgstr "Статус задачі: Виконана із помилками" + +#, fuzzy +#~ msgid "Fatal error" +#~ msgstr "Статус задачі: Ватальна помилка" + +#, fuzzy +#~ msgid "Verify found differences" +#~ msgstr "Статус задачі: Паревірка різниць" + +#, fuzzy +#~ msgid "Waiting for File daemon" +#~ msgstr "Статус задачі: Очікування Збирача" + +#, fuzzy +#~ msgid "Waiting for Storage daemon" +#~ msgstr "Статус задачі: Очікування Зберігача" + +#, fuzzy +#~ msgid "Waiting for higher priority jobs" +#~ msgstr "Статус задачі: Очікування завершення пріоритетніших задач" + +#, fuzzy +#~ msgid "Unknown term code" +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "Unknown Type" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "Unable to initialize watchdog lock. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writelock failure. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "rwl_writeunlock failure. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not init work queue: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not add work to queue: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error in workq_destroy: ERR=%s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Job %s canceled.\n" +#~ msgstr "Статус задачі: Відмінена" + +#, fuzzy +#~ msgid "Read open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Ready to read from volume \"%s\" on %s device %s.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Could not ready %s device %s for append.\n" +#~ msgstr "Не можливо створити відбиток.\n" + +#, fuzzy +#~ msgid "Could not create JobMedia record for Volume=\"%s\" Job=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Read error on device %s in ANSI label. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not write ANSI HDR1 label. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing EOF to tape. ERR=%s" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set network buffer size.\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Write session label failed. ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Network send error to FD. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading data header from FD. n=%d msglen=%d ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Network error reading from FD. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal append error on device %s: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing end session label. ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Error updating file attributes. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error getting Volume info: %s" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Didn't get vol info vol=%s: ERR=%s" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error creating JobMedia records: ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error creating JobMedia records: %s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with DIR at \"%s:%d\"\n" +#~ msgstr "Встановлення TLS із Керівником невдале \"%s:%d\"\n" + +#, fuzzy +#~ msgid "Unable to authenticate Director at %s.\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Incorrect authorization key from File daemon at %s rejected.\n" +#~ "For help, please see: " +#~ msgstr "" +#~ "Проблеми під час авторизації Керівником.\n" +#~ "Швидше за все, невірні паролі.\n" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#, fuzzy +#~ msgid "TLS negotiation failed with FD at \"%s:%d\"\n" +#~ msgstr "Встановлення TLS із Керівником невдале \"%s:%d\"\n" + +#, fuzzy +#~ msgid "Lock failure on autochanger. ERR=%s\n" +#~ msgstr "Проігноровано помилку SQL. ERR=%s\n" + +#, fuzzy +#~ msgid "Unlock failure on autochanger. ERR=%s\n" +#~ msgstr "Проігноровано помилку SQL. ERR=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" wanted on %s is in use by device %s\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Autochanger error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bcopy [-d debug_level] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -i specify input Volume names (separated by |)\n" +#~ " -o specify output Volume names (separated by |)\n" +#~ " -p proceed inspite of errors\n" +#~ " -v verbose\n" +#~ " -w specify working directory (default /tmp)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s без сигналів\n" +#~ " -t перевірка - прочитати конфігурацію і вийти\n" +#~ " -? print this message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "dev open failed: %s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot fixup device error. %s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown" +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bextract \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T send debug traces to trace file (stored in /tmp)\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -t read data from volume, do not write anything\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s без сигналів\n" +#~ " -t перевірка - прочитати конфігурацію і вийти\n" +#~ " -? print this message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Could not open exclude file: %s, ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open include file: %s, ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot stat %s. It must exist. ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "%s must be a directory.\n" +#~ msgstr "%s існує, але не є текою.\n" + +#, fuzzy +#~ msgid "Found %s error%s\n" +#~ msgstr " (%d помилок)" + +#, fuzzy +#~ msgid "Write error on %s: %s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot continue.\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Seek error Addr=%llu on %s: %s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Uncompression error. ERR=%d\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "LZO uncompression error. ERR=%d\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Attempt to write on closed device=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Write error at %s on device %s Vol=%s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Job failed or canceled.\n" +#~ msgstr "Статус задачі: Відмінена" + +#, fuzzy +#~ msgid "The %sVolume=%s on device=%s appears to be unlabeled.%s\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Read error on fd=%d at addr=%s on device %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Read zero %sbytes Vol=%s at %s on device %s.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Backspace file at EOT failed. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Backspace record at EOT failed. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Re-read last block at EOT failed. ERR=%s" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending Volume info to Director.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bls [options] \n" +#~ " -b specify a bootstrap file\n" +#~ " -c specify a Storage configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -e exclude list\n" +#~ " -i include list\n" +#~ " -j list jobs\n" +#~ " -k list blocks\n" +#~ " (no j or k option) list saved files\n" +#~ " -L dump label\n" +#~ " -p proceed inspite of errors\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -E Check records to detect errors\n" +#~ " -v be verbose\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s без сигналів\n" +#~ " -t перевірка - прочитати конфігурацію і вийти\n" +#~ " -? print this message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "No archive name specified\n" +#~ msgstr "зазначений час сну" + +#, fuzzy +#~ msgid "Got EOM at file %u on device %s, Volume \"%s\"\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "End of file %u on device %s, Volume \"%s\"\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bscan [ options ] \n" +#~ " -b bootstrap specify a bootstrap file\n" +#~ " -c specify configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -m update media info in database\n" +#~ " -D specify the driver database name (default NULL)\n" +#~ " -n specify the database name (default bacula)\n" +#~ " -u specify database user name (default bacula)\n" +#~ " -P specify database password (default none)\n" +#~ " -h specify database host (default NULL)\n" +#~ " -t specify database port (default 0)\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -r list records\n" +#~ " -s synchronize or store in database\n" +#~ " -S show scan progress periodically\n" +#~ " -v verbose\n" +#~ " -V specify Volume names (separated by |)\n" +#~ " -w specify working directory (default from conf " +#~ "file)\n" +#~ " -? print this message\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s без сигналів\n" +#~ " -t перевірка - прочитати конфігурацію і вийти\n" +#~ " -? print this message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Create JobMedia for Job %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create JobMedia record for Volume=%s Job=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Pool record for %s found in DB.\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Media record for %s found in DB.\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "Could not update job record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Got MD5 record: %s\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Got SHA1 record: %s\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#~ msgid "Got signed digest record\n" +#~ msgstr "Отримано підписаний відбиток запису\n" + +#, fuzzy +#~ msgid "Unknown stream type!!! stream=%d len=%i\n" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "Could not create File Attributes record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Created File record: %s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create media record. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update media record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Created Media record for Volume: %s\n" +#~ msgstr "Заборонені символи у назві Тому \"%s\"\n" + +#, fuzzy +#~ msgid "Could not create pool record. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Created Pool record for Pool: %s\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Could not get Client record. ERR=%s\n" +#~ msgstr "Не можливо отримати поточну теку: ERR=%s\n" + +#, fuzzy +#~ msgid "Created Client record for Client: %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fileset \"%s\" already exists.\n" +#~ msgstr "Файл пропущено. Вже існує: %s\n" + +#, fuzzy +#~ msgid "Could not create FileSet record \"%s\". ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create JobId record. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update job start record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Created new JobId=%u record for original JobId=%u\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not update JobId=%u record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Job Termination code: %d" +#~ msgstr "Статус задачі: Виконана" + +#, fuzzy +#~ msgid "Could not create JobMedia record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not add MD5/SHA1 to File record. ERR=%s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bsdjson [options] [config_file]\n" +#~ " -r get resource type \n" +#~ " -n get resource \n" +#~ " -l get only directives matching dirs (use with -r)\n" +#~ " -D get only data\n" +#~ " -c use as configuration file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read config and exit\n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "Автор Nicolas Boichat (2004)\n" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: tray-monitor [-c config_file] [-d debug_level]\n" +#~ " -c задати конфігураційний файл \n" +#~ " -d встановити рівень відлагоджування у \n" +#~ " -dt виводити часову мітку у даних відлагоджування\n" +#~ " -t перевірка - прогитати конфігурацію і завершити\n" +#~ " -? показати це повідомлення.\n" +#~ "\n" + +#, fuzzy +#~ msgid "No Storage resource defined in %s. Cannot continue.\n" +#~ msgstr "" +#~ "Помилка: %d ресурсів Спостерігача визначено у %s. Ви повинні визначити " +#~ "одині тільки один ресурс Спостерігача.\n" + +#, fuzzy +#~ msgid "Only one Storage resource permitted in %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "No Director resource defined in %s. Cannot continue.\n" +#~ msgstr "" +#~ "У %s не задано ресурсу Керівника\n" +#~ "Без цього я не знаю як спілкуватись із Керівником :-(\n" + +#, fuzzy +#~ msgid "\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Консолі \"%s\" у %s.\n" + +#, fuzzy +#~ msgid "" +#~ "Neither \"TLS CA Certificate\" or \"TLS CA Certificate Dir\" are defined " +#~ "for Storage \"%s\" in %s. At least one CA certificate store is required " +#~ "when using \"TLS Verify Peer\".\n" +#~ msgstr "" +#~ "Жоден із параметрів \"TLS CA Certificate\" або \"TLS CA Certificate Dir\" " +#~ "не задано для Керівника \"%s\" у %s. Необхідне щонайменше одне сховище " +#~ "для сертифікату CA.\n" + +#, fuzzy +#~ msgid "No archive name specified.\n" +#~ msgstr "зазначений час сну" + +#, fuzzy +#~ msgid "Device open failed. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "I/O error on device: ERR=%s" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume type error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume name error\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error creating label. ERR=%s" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Volume version error.\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown error.\n" +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "Bad status from load. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from rewind. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from weof. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from bsf. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Command aborted.\n" +#~ msgstr "Помилка команди" + +#, fuzzy +#~ msgid "Error writing record to block.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing block to device.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Wrote first record of %d bytes.\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Wrote second record of %d bytes.\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Wrote third record of %d bytes.\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "Backspace file failed! ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Backspace record failed! ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Read block failed! ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Write failed at block %u. stat=%d ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Error writing record to block.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Error writing block to device.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Read block %d failed! ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Read record failed. Block %d! ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Read record failed! ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3991 Bad autochanger command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "3992 Bad autochanger command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "3993 Bad autochanger command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from fsr. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from fsf. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Wrote one record of %d bytes.\n" +#~ msgstr "Відсутні записи для %d %s\n" + +#, fuzzy +#~ msgid "read error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad status from read %d. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading block. ERR=%s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Wrote Start of Session label.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Job canceled.\n" +#~ msgstr "Статус задачі: Відмінена" + +#, fuzzy +#~ msgid "Wrote End of Session label.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not create state file: %s ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "do_unfill failed.\n" +#~ msgstr "Помилка команди" + +#, fuzzy +#~ msgid "Reposition error. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error reading block: ERR=%s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "clear tape errors" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "forward space a record" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "print this command" +#~ msgstr ": неправильна команда\n" + +#, fuzzy +#~ msgid "\"%s\" is an invalid command\n" +#~ msgstr ": неправильна команда\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: btape \n" +#~ " -b specify bootstrap file\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -p proceed inspite of I/O errors\n" +#~ " -s turn off signals\n" +#~ " -w set working directory to dir\n" +#~ " -v be verbose\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "Автор Nicolas Boichat (2004)\n" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: tray-monitor [-c config_file] [-d debug_level]\n" +#~ " -c задати конфігураційний файл \n" +#~ " -d встановити рівень відлагоджування у \n" +#~ " -dt виводити часову мітку у даних відлагоджування\n" +#~ " -t перевірка - прогитати конфігурацію і завершити\n" +#~ " -? показати це повідомлення.\n" +#~ "\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" %d records.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Cannot open Dev=%s, Vol=%s\n" +#~ msgstr "Не вдається відкрити файл %s для введення. ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find device \"%s\" in config file %s.\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot init device %s\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot open %s\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not find device \"%s\" in config file %s.\n" +#~ msgstr "Не можливо створити відбиток.\n" + +#, fuzzy +#~ msgid "Seek error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "lseek error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error closing device %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "No FreeSpace command defined.\n" +#~ msgstr "Ресурс %s не визначено\n" + +#, fuzzy +#~ msgid "Cannot run free space command. Results=%s ERR=%s\n" +#~ msgstr "Не вдалось створити ноду(node) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write EOF. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "New volume \"%s\" mounted on device %s at %s.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "write_block_to_device Volume label failed. ERR=%s" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Connection request from %s failed.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0101] Bad client command: %s" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0102] Failed to connect to Client daemon: %s:%d\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "[SF0103] Bad storage command: %s" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0104] Failed to connect to Storage daemon: %s:%d\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "3991 Bad setdebug command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "3903 Error scanning cancel command.\n" +#~ msgstr "Уведіть цятку для переривання команди.\n" + +#, fuzzy +#~ msgid "3900 Truncate cache for volume \"%s\" failed. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3912 Error scanning upload command: ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "3999 Error with the upload: ERR=%s\n" +#~ msgstr "Помилка у %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3929 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3912 Failed to label Volume %s: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3913 Failed to open next part: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3917 Failed to label Volume: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3918 Failed to label Volume (no media): ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Specified slot ignored. " +#~ msgstr "Проігноровано помилку SQL. ERR=%s\n" + +#, fuzzy +#~ msgid "3901 Unable to open device \"%s\": ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3905 Unknown wait state %d\n" +#~ msgstr "Невідомий статус задачі %c. " + +#, fuzzy +#~ msgid "3002 Device \"%s\" enabled.\n" +#~ msgstr "Помилка команди" + +#, fuzzy +#~ msgid "3002 Device \"%s\" disabled.\n" +#~ msgstr "Помилка команди" + +#, fuzzy +#~ msgid "[SF0110] Could not create bootstrap file %s: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0111] Error parsing bootstrap file.\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "FD command not found: %s\n" +#~ msgstr "Керівник не прийняв команду Hello\n" + +#, fuzzy +#~ msgid "Cannot open session, received bad parameters.\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open file device %s. No Volume name given.\n" +#~ msgstr "Не можливо створити відбиток.\n" + +#, fuzzy +#~ msgid "Could not open(%s,%s,0640): ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to truncate device %s. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to stat device %s. ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reopen: %s, ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Ready to append to end of Volume \"%s\" size=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Connection from unknown Director %s at %s rejected.\n" +#~ "Please see " +#~ msgstr "Від'єднуюсь від Керівника %s:%d\n" + +#, fuzzy +#~ msgid "Invalid connection from %s. Len=%d\n" +#~ msgstr "Від'єднуюсь від Клієнта %s:%d\n" + +#, fuzzy +#~ msgid "Invalid Hello from %s. Len=%d\n" +#~ msgstr "Від'єднуюсь від Клієнта %s:%d\n" + +#, fuzzy +#~ msgid "Client connect failed: Job name not found: %s\n" +#~ msgstr "Керівник не прийняв команду Hello\n" + +#, fuzzy +#~ msgid "Recv request to Client failed. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Bad Hello from Client: %s.\n" +#~ msgstr "Від'єднуюсь від Керівника %s:%d\n" + +#, fuzzy +#~ msgid "[SE0001] Unable to stat device %s at %s: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0003] Unable to stat mount point %s: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0009] Unable to init mutex: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0010] Unable to init cond variable: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0011] Unable to init cond variable: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0012] Unable to init spool mutex: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0013] Unable to init acquire mutex: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0014] Unable to init freespace mutex: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0015] Unable to init read acquire mutex: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0016] Unable to init volcat mutex: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SA0017] Unable to init dcrs mutex: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "[SF0020] dlopen of SD driver=%s at %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Couldn't rewind %s device %s: ERR=%s\n" +#~ msgstr "Не вдалось створити жорстку лінку(hardlink) %s -> %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not unserialize Volume label: ERR=%s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume on %s device %s has bad Bacula label type: %ld\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Could not reserve volume %s on %s device %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot write Volume label to block for %s device %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Open %s device %s Volume \"%s\" failed: ERR=%s" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Open %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Rewind error on %s device %s: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Truncate error on %s device %s: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to re-open device after truncate on %s device %s: ERR=%s" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write %s device %s: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Recycled volume \"%s\" on %s device %s, all previous data lost.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Wrote label to prelabeled Volume \"%s\" on %s device %s\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Unknown %d" +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "pthread_cond_wait failure. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Too many errors trying to mount %s device %s.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Job %d canceled.\n" +#~ msgstr "Статус задачі: Відмінена" + +#, fuzzy +#~ msgid "Open of %s device %s Volume \"%s\" failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to position to end of data on %s device %s: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Volume \"%s\" not loaded on %s device %s.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "%s device %s not configured to autolabel Volumes.\n" +#~ msgstr "TLS необхідний, але не налаштовано у Bacula.\n" + +#, fuzzy +#~ msgid "Cannot open %s Dev=%s, Vol=%s for reading.\n" +#~ msgstr "Не вдається відкрити файл %s для введення. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to set eotmodel on device %s: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Bacula status:" +#~ msgstr "Монітор статусу демонів Bacula" + +#, fuzzy +#~ msgid "ioctl MTIOCGET error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "unknown func code %d" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "I/O function \"%s\" not supported on this device.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Cannot open bootstrap file %s: %s\n" +#~ msgstr "Не можливо створити чергу(fifo) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "REGEX '%s' compile error. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "VolumeName : %s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending header to Client. ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending to FD. ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending data to Client. ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Error sending to File daemon. ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "End of Volume \"%s\" at addr=%s on device %s.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "End Session" +#~ msgstr "Зашифровані дані сесій" + +#, fuzzy +#~ msgid "Unknown code %d\n" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "Unable to initialize reservation lock. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3939 Could not get dcr\n" +#~ msgstr "Не можливо створити відбиток.\n" + +#, fuzzy +#~ msgid "Failed command: %s\n" +#~ msgstr "Погана відповідь на команду Hello: ERR=%s\n" + +#, fuzzy +#~ msgid "3926 Could not get dcr for device: %s\n" +#~ msgstr "Не вдалось перевстановити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3611 JobId=%u Volume max jobs=%d exceeded on %s device %s.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "" +#~ "3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on %s device %s.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Job cancelled.\n" +#~ msgstr "Статус задачі: Відмінена" + +#, fuzzy +#~ msgid "Open data spool file %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Ftruncate spool file failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Spool header read error. ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error writing block to spool file. ERR=%s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Fseek on attributes file failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Truncate on attributes file failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "fopen attr spool file %s failed: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid " %s Alert: at %s Volume=\"%s\" alert=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid " Device is blocked labeling a Volume.\n" +#~ msgstr "TLS необхідний, але не налаштовано у Bacula.\n" + +#, fuzzy +#~ msgid "%s Job %s waiting for Client connection.\n" +#~ msgstr "Статус задачі: Очікування ресурсу Слієнту" + +#, fuzzy +#~ msgid "3900 Unknown arg in .status command: %s\n" +#~ msgstr "Невідомий статус задачі %c. " + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "%sVersion: %s (%s)\n" +#~ "\n" +#~ "Usage: bacula-sd [options] [-c config_file] [config_file]\n" +#~ " -c use as configuration file\n" +#~ " -d [,] set debug level to , debug tags to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -T set trace on\n" +#~ " -f run in foreground (for debugging)\n" +#~ " -g set groupid to group\n" +#~ " -m print kaboom output (for debugging)\n" +#~ " -p proceed despite I/O errors\n" +#~ " -s no signals (for debugging)\n" +#~ " -t test - read config and exit\n" +#~ " -u userid to \n" +#~ " -v verbose user messages\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s без сигналів\n" +#~ " -t перевірка - прочитати конфігурацію і вийти\n" +#~ " -? print this message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Unable to create thread. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not initialize SD device \"%s\"\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Unable to stat ControlDevice %s: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open device %s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not mount device %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Warning: no \"%s\" resource (%d) defined.\n" +#~ msgstr "Ресурс %s не визначено\n" + +#, fuzzy +#~ msgid "dump_resource type=%d\n" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "Warning: unknown resource type %d\n" +#~ msgstr "Невідомий тип ресурсу %d\n" + +#, fuzzy +#~ msgid "Cannot find AutoChanger resource %s\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to init lock for Autochanger=%s: ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot find Device resource %s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Alert: Volume=\"%s\" alert=%d: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "3997 Bad alert command: %s: ERR=%s.\n" +#~ msgstr "Не вдалось створити ноду(node) %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open device %s: ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Rewind error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Ready to append to end of Volume \"%s\" at file=%d.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTEOM error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTLOAD error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTOFFL error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTFSF error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTBSF error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTFSR %d error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTBSR error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "ioctl MTWEOF error on %s. ERR=%s.\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to initialize volume list lock. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not reserve volume \"%s\", because job canceled.\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Could not reserve volume \"%s\" for append, because it will be read.\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "pthread timedwait error. ERR=%s\n" +#~ msgstr "Помилка ініціалізації Pthread: %s\n" + +#, fuzzy +#~ msgid "Could not init Bacula database\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error opening datafile %s\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Error while inserting file\n" +#~ msgstr "Помилка надсилання Hello до Збирача. ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open data file: %s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal fgets error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "\n" +#~ "Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +#~ " -4 forces bsmtp to use IPv4 addresses only.\n" +#~ " -6 forces bsmtp to use IPv6 addresses only.\n" +#~ " -8 set charset to UTF-8\n" +#~ " -a use any ip protocol for address resolution\n" +#~ " -c set the Cc: field\n" +#~ " -d set debug level to \n" +#~ " -dt print a timestamp in debug output\n" +#~ " -f set the From: field\n" +#~ " -h use mailhost:port as the SMTP server\n" +#~ " -s set the Subject: field\n" +#~ " -r set the Reply-To: field\n" +#~ " -l set the maximum number of lines to send (default: " +#~ "unlimited)\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +#~ " -c set configuration file to file\n" +#~ " -dnn set debug level to nn\n" +#~ " -s без сигналів\n" +#~ " -t перевірка - прочитати конфігурацію і вийти\n" +#~ " -? print this message.\n" +#~ "\n" + +#, fuzzy +#~ msgid "Fatal gethostname error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal getaddrinfo for myself failed \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal gethostbyname for myself failed \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error unknown mail host \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Failed to connect to mailhost %s\n" +#~ msgstr "Ініціалізація контексту TLS для Консолі невдала \"%s\".\n" + +#, fuzzy +#~ msgid "Fatal socket error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal connect error to %s: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal _open_osfhandle error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal fdopen error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Fatal dup error: ERR=%s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open -p argument for reading" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open, database \"%s\".\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Error no Director resource defined.\n" +#~ msgstr "Ресурс %s не визначено\n" + +#, fuzzy +#~ msgid "Select function number: " +#~ msgstr "Оберіть Керівника, увівши номер: " + +#, fuzzy +#~ msgid "Found %d for: %s\n" +#~ msgstr " (%d помилок)" + +#, fuzzy +#~ msgid "Found %d Restore Job records.\n" +#~ msgstr " (%d помилок)" + +#, fuzzy +#~ msgid "Err: Could not access %s: %s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Could not follow ff->link %s: %s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Could not stat %s: %s\n" +#~ msgstr "Не вдалось створити %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Could not open directory %s: %s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Err: Unknown file ff->type %d: %s\n" +#~ msgstr "Невідомий тип файлу %d: не відновлено: %s\n" + +#, fuzzy +#~ msgid "Could not open include file: %s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Could not open exclude file: %s\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#~ msgid "" +#~ "No Client, Storage or Director resource defined in %s\n" +#~ "Without that I don't how to get status from the File, Storage or Director " +#~ "Daemon :-(\n" +#~ msgstr "" +#~ "У %s не визначено жодного ресурсу Клієнту, Зберігача або Керівника\n" +#~ "Без цього я не знаю як отримати статус Зберігача, Збирача або Керівника\n" + +#~ msgid "" +#~ "Invalid refresh interval defined in %s\n" +#~ "This value must be greater or equal to 1 second and less or equal to 10 " +#~ "minutes (read value: %d).\n" +#~ msgstr "" +#~ "Задано поганий інтервал оновлення у %s\n" +#~ "Це значення повинно бути не меньше 1 секунди та не більше 10 хвилин " +#~ "(теперішнє значення: %d).\n" + +#~ msgid "Error, currentitem is not a Client or a Storage..\n" +#~ msgstr "Помилка. Поточний пункт не є Клієнтом або Зберігачем.\n" + +#~ msgid "Connecting to Client %s:%d" +#~ msgstr "Приєднуюсь до Клієнта %s:%d" + +#~ msgid "Connecting to Storage %s:%d" +#~ msgstr "Приєднуюсь до Зберігача %s:%d" + +#~ msgid "Error, currentitem is not a Client, a Storage or a Director..\n" +#~ msgstr "Помилка. Поточний пункт не є Клієнтом або Зберігачем.\n" + +#~ msgid "Cannot connect to daemon." +#~ msgstr "Не можу приєднатись до демону." + +#~ msgid "Opened connection with Director daemon." +#~ msgstr "Відкриті з'єднання із Керівником." + +#~ msgid "Opened connection with File daemon." +#~ msgstr "Відкриті з'єднання із Збирачем." + +#~ msgid "Opened connection with Storage daemon." +#~ msgstr "Відкриті з'єднання із Зберігачем." + +#~ msgid "Error : Connection closed." +#~ msgstr "Помилка : З'єднання завершено" + +#, fuzzy +#~ msgid "encode command string" +#~ msgstr "луна командного рядку" + +#, fuzzy +#~ msgid "Command line" +#~ msgstr "Не зрозумілий параметр командного рядка" + +#, fuzzy +#~ msgid "aclx_scanStr error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "aclx_put error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Generate VSS snapshots. Driver=\"%s\"\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "WriteEncryptedFileRaw failure: ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "attr_list error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "attr_set error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "getproplist error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "setproplist error on file \"%s\": ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't delete working directory %s. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unknown parameter or missing argument for %s.\n" +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "Unknown parameter for %s. Expecting block or file\n" +#~ msgstr "Невідомий статус" + +#, fuzzy +#~ msgid "Unable to create temporary file %s. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to detect the MySQL data_directory on this system.\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get the BINLOG list.\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to determine the last LSN for %s (Previous job is %s)\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to get last LSN from the backup\n" +#~ msgstr "Не вдалось відновити прапорці для файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Restoring target database \"%s\"\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Creating target database \"%s\"\n" +#~ msgstr "Не вдалось відкрити%s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't determine the last WAL file\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't determine WAL directory\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to detect the PostgreSQL data_directory on this system.\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to find data_directory=%s on this system. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to start the PITR backup on this system.\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to determine the first WAL file on this system.\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse tablespaces %s on this system. ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to analyse data_directory %s on this system. ERR=%s\n" +#~ msgstr "Не можливо встановити час файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't create the %s file for recovery. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "DDE commit failed. ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Initializing DDE." +#~ msgstr "Ініціалізація ..." + +#, fuzzy +#~ msgid "Cannot create DedupIndexDirectory: %s" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot create recovery directory: %s" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Cannot delete temporary recovery directory: %s" +#~ msgstr "Не можливо перевстановити поточну теку: ERR=%s\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or File type Volume %s on Dedup device %s. Wanted File.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "" +#~ "Got Aligned or Dedup type Volume %s on File device %s. Wanted File.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "" +#~ "Got File or Dedup type Volume %s on Aligned device %s. Wanted Aligned.\n" +#~ msgstr "У картотеці створено новий Том \"%s\".\n" + +#, fuzzy +#~ msgid "Unable to parse user supplied restore configuration\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "AIX Specific Extended attribs" +#~ msgstr "Специфічні додаткові атрибути Linux" + +#~ msgid "" +#~ "Written by Nicolas Boichat (2004)\n" +#~ "\n" +#~ "Version: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Usage: tray-monitor [-c config_file] [-d debug_level]\n" +#~ " -c set configuration file to file\n" +#~ " -d set debug level to \n" +#~ " -dt print timestamp in debug output\n" +#~ " -t test - read configuration and exit\n" +#~ " -? print this message.\n" +#~ "\n" +#~ msgstr "" +#~ "Автор Nicolas Boichat (2004)\n" +#~ "\n" +#~ "Версія: %s (%s) %s %s %s\n" +#~ "\n" +#~ "Використання: tray-monitor [-c config_file] [-d debug_level]\n" +#~ " -c задати конфігураційний файл \n" +#~ " -d встановити рівень відлагоджування у \n" +#~ " -dt виводити часову мітку у даних відлагоджування\n" +#~ " -t перевірка - прогитати конфігурацію і завершити\n" +#~ " -? показати це повідомлення.\n" +#~ "\n" + +#~ msgid "Open status window..." +#~ msgstr "Відкрити вікно статусу" + +#~ msgid "Exit" +#~ msgstr "Закінчити" + +#~ msgid " (DIR)" +#~ msgstr "Керівник" + +#~ msgid " (FD)" +#~ msgstr "Збирач" + +#~ msgid " (SD)" +#~ msgstr "(Зберігач)" + +#~ msgid "Refresh interval in seconds: " +#~ msgstr "Інтервал оновлення у секундах" + +#~ msgid "Refresh now" +#~ msgstr "Оновити зараз" + +#~ msgid "About" +#~ msgstr "Про..." + +#~ msgid "Close" +#~ msgstr "Зачинити" + +#~ msgid "Disconnecting from Storage %s:%d\n" +#~ msgstr "Від'єднуюсь від Зберігача %s:%d\n" + +#~ msgid "Version" +#~ msgstr "Версія" + +#~ msgid "No last job." +#~ msgstr "Останні задачі відсутні" + +#~ msgid "Job status: Blocked" +#~ msgstr "Статус задачі: Заблокована" + +#~ msgid "Job status: Waiting for new media" +#~ msgstr "Статус задачі: Очікування нового носія" + +#~ msgid "Job status: Waiting for maximum jobs" +#~ msgstr "Статус задачі: Очікування кількості задач" + +#~ msgid "Job status: Unknown(%c)" +#~ msgstr "Статус задачі: Невідомий(%c)" + +#~ msgid "Opened connection with Director daemon.\n" +#~ msgstr "Відкриті з'єднання із Керівником.\n" + +#~ msgid "Opened connection with File daemon.\n" +#~ msgstr "Відкриті з'єднання із Збирачем.\n" + +#~ msgid "Bad response to Hello command: ERR=" +#~ msgstr "Погана відповідь на команду Hello: ERR=" + +#~ msgid "Welcome to bacula bwx-console %s (%s)!\n" +#~ msgstr "Ласкаво просимо до bacula bwx-console %s (%s)!\n" + +#~ msgid "Passphrase for Console \"%s\" TLS private key: " +#~ msgstr "Пароль для закритого ключа TLS Консолі \"%s\": " + +#~ msgid "Passphrase for Director \"%s\" TLS private key: " +#~ msgstr "Пароль для закритого ключа TLS Керівника \"%s\": " + +#, fuzzy +#~ msgid "%s blocks of %lld changed during backup to %lld.\n" +#~ msgstr "%s розмір змінився під час резервування.\n" + +#, fuzzy +#~ msgid "Unable to get the Full job for %s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to write to %s to save full job name. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to save last controlfile into file %s. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to save last SCN into file %s. ERR=%s\n" +#~ msgstr "Не можливо встановити власника файлу %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't open /etc/oratab. ERR=%s\n" +#~ msgstr "Не вдається відкрити файл %s для введення. ERR=%s\n" + +#, fuzzy +#~ msgid "Unable to open %s to save RMAN output. ERR=%s\n" +#~ msgstr "Не вдається відкрити файл %s для виведення. ERR=%s\n" + +#, fuzzy +#~ msgid "Error occured while dumping tempfiles ERR=%s\n" +#~ msgstr "Помилка у %s файл %s: ERR=%s\n" + +#, fuzzy +#~ msgid "Can't create pipe to discuss with Oracle. ERR=%s\n" +#~ msgstr "Не можливо створити теку %s: ERR=%s\n" + +#~ msgid "" +#~ "Authorization key rejected by Storage daemon.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#~ msgid "" +#~ "Director and Storage daemon passwords or names not the same.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Керівник та Зберігач мають не однакові назви або паролі.\n" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#~ msgid "" +#~ "Director and File daemon passwords or names not the same.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Керівник та Збирач мають не однакові назви або паролі.\n" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#~ msgid "OSF1 Specific Default ACL attribs" +#~ msgstr "Специфічні атрибути OSF1 Default ACL" + +#~ msgid "OSF1 Specific Access ACL attribs" +#~ msgstr "Специфічні атрибути OSF1 Access ACL" + +#~ msgid "" +#~ "Connection from unknown Director %s at %s rejected.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Відмовлено у з'єднанні від невідомого Керівника %s %s.\n" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#~ msgid "" +#~ "Incorrect password given by Director.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Невірний пароль, наданий Керівником.\n" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#~ msgid "" +#~ "Incorrect authorization key from File daemon at %s rejected.\n" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Авторизаційний ключ, отриманий від Збирача %s відкинуто.\n" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" + +#~ msgid "" +#~ "Please see http://www.bacula.org/en/rel-manual/Bacula_Freque_Asked_Questi." +#~ "html#SECTION003760000000000000000 for help.\n" +#~ msgstr "" +#~ "Для отримання допомоги, будь ласка, перегляньте http://www.bacula.org/en/" +#~ "rel-manual/Bacula_Freque_Asked_Questi.html#SECTION003760000000000000000.\n" diff --git a/release/README b/release/README new file mode 100644 index 00000000..969ae985 --- /dev/null +++ b/release/README @@ -0,0 +1,53 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +This directory contains the scripts that we use to create a new +set of release files. + +In general, you do the following: + +1. Copy the entire contents of this directory out of the + Bacula source tree (this may not be required, but is cleaner). + +2. Edit the config file, and update the following enviroment variables + to point to the correct directories on your system. They should + point to directories that contain git repos: + + bacula + docs + rescue + + Also ensure that you have set repo to the name of your repository. + Normally, it is origin, but some developers (Kern) use bee. + +3. Ensure that "branch" is properly set to the right version in + the config file. + +4. Note, the release version is obtained from bacula/src/version.h and + put into a number of files in the release by the release scripts. + +5. Run the makeall script. + + ./makeall + +6. There should be no errors. If + there are, fix them, and re-run the script or re-run the scripts + that makeall calls: + +7. All the output files should be put in this directory. + +8. Before releasing, detar the main Bacula source into some + directory and run all the regression tests using the files + in the bacula-xx.xx.xx.tar.gz file. This ensures that there + are no missing files. + +9. Also before releasing, run at least one backup using the + Win32 and/or Win64 file daemon. + +10 When you are sure, run: + + pushtags + + which will push each repo and push the tags. diff --git a/release/ReleaseProcedure.txt b/release/ReleaseProcedure.txt new file mode 100644 index 00000000..0aa407ee --- /dev/null +++ b/release/ReleaseProcedure.txt @@ -0,0 +1,75 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +These are the steps to complete for making a new release. + +1. Ensure that the previous release had a tag pushed. If + not create one at the prior release point and push it + to the repos. + git push tag + +2. Update the ChangeLog using release/git2changelog.pl Release-5.2.x..HEAD >1 + +3. Edit 1 into ChangeLog ensuring to mark the previous release + Version number prior to adding the new ChangeLog output. + +4. Edit the ReleaseNotes. Be sure not to change anything in the + prior version (typos are OK). This sometimes means duplicating + text, but it is far better to have a complete history. + Terminate the previous release with a line of all =====, + and ensure that the previous release version is properly + defined. Then add the new release section. Point out + the need to review prior releases if changing major versions. + +5. Update the version and date. + +6. Update the po files (cd po; make update-po). Correct any + problems and re-run until correct. + +7. Update the docs. Make sure they have the correct date, and + that the new docs are uploaded to bacula.org + +8. Make sure everything is pushed including the docs. + +9. Diff the prior version against the current one: + git diff Release-5.2.1..HEAD >diff (where 5.2.1 is the prior) + and check for debug messages that have level zero, new text + in non-debug messages that is not setup for translation. + +10. Run a full regression test (./nightly-all) on as many + platforms as possible. + +11. Check the CDash Bacula output pages to make sure there are + no overlooked problems. + +12. Cut the release (i.e. make the .tar.gz files) by copying + the release directory out of the build tree, ensuring that + your config file is properly set, and that your signing + key is properly setup, and running the ./makeall script. + +13. Ensure that the Windows builds were done properly. + +14. detar the main bacula source release + +15. Run a regression on the detared file (ensures that all files + are actually in the tar and that it is not corrupt). + +16. push the tags (once pushed they can be corrected but it is more + complicated than simply re-running the ./makeall script) + ./pushtags which does in all repos + + git push tag + +17. Upload the release files to Source Forge. + +18. Update the release version and date on the main bacula.org page + +19. Update the news item to announce the release. + +20. Send the release announcement to the users, devel, and announce + mailing lists. + +21. Update the link to the release version in the "Current Release" + in the menu (inc/header.php). diff --git a/release/bgit.py b/release/bgit.py new file mode 100755 index 00000000..66f823ad --- /dev/null +++ b/release/bgit.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python2 +# this program compare two branche of GIT +# and show the differences +import sys +import os +import logging +import collections +import re +import argparse +import time + +try: + import git +except ImportError: + print >>sys.stderr, "you must install python-git aka GitPython" + sys.exit(1) + + +def add_console_logger(): + console=logging.StreamHandler() + console.setFormatter(logging.Formatter('%(levelname)-3.3s %(filename)s:%(lineno)d %(message)s', '%H:%M:%S')) + console.setLevel(logging.DEBUG) # must be INFO for prod + logging.getLogger().addHandler(console) + return console + +def add_file_logger(filename): + filelog=logging.FileHandler(filename) + # %(asctime)s '%Y-%m-%d %H:%M:%S' + filelog.setFormatter(logging.Formatter('%(asctime)s %(levelname)-3.3s %(filename)s:%(lineno)d %(message)s', '%H:%M:%S')) + filelog.setLevel(logging.DEBUG) + logging.getLogger().addHandler(filelog) + return filelog + + + +def run_cmp_branch(repo, args): + print args.branch1 + print args.branch2 +# for commit in repo.iter_commits(args.branch1, max_count=10): +# print commit.hexsha, commit.committed_date, commit.author.name, commit.message + +# print dir(repo) + commons=repo.merge_base(args.branch1, args.branch2) + if len(commons)!=1: + print "cannot find the unique common commit between", args.branch1, args.branch2 + + common=commons[0] + # make a list of all know commit in branch-2 + commits2=set() + for commit in repo.iter_commits(args.branch2): + if commit.hexsha==common.hexsha: + break + + subject=commit.message.split('\n', 1)[0] + commits2.add((commit.authored_date, commit.author.name, subject)) + #print commit.committed_date, commit.author.name, subject + + # list and compare with commits of branch-& + for commit in repo.iter_commits(args.branch1): + if commit.hexsha==common.hexsha: + break + subject=commit.message.split('\n', 1)[0] + date=time.strftime("%Y-%m-%d %H:%M", time.gmtime(commit.authored_date)) + if (commit.authored_date, commit.author.name, subject) in commits2: + print "=", date, commit.author.name, subject + else: + print "+", date, commit.author.name, subject + +mainparser=argparse.ArgumentParser(description='git utility for bacula') +subparsers=mainparser.add_subparsers(dest='command', metavar='', title='valid commands') + +git_parser=argparse.ArgumentParser(add_help=False) +git_parser.add_argument('--git_dir', metavar='GIT-DIR', type=str, default='.', help='the directory with the .git sub dir') + +parser=subparsers.add_parser('cmp_branch', parents=[git_parser, ], help='compare two branches, highligh commits missing in the second branch') + +parser.add_argument('branch1', metavar='BRANCH-1', help='the first branch') +parser.add_argument('branch2', metavar='BRANCH-2', help='the second branch') + +args=mainparser.parse_args() + + +logging.getLogger().setLevel(logging.DEBUG) + +add_console_logger() +print args.git_dir +print "logging into gitstat.log" +add_file_logger('gitstat.log') + +# search the git repo +repo=None +if args.git_dir: + if args.git_dir=='.': + path=os.getcwd() + while path and not os.path.isdir(os.path.join(path, '.git')): + path=os.path.dirname(path) + print path + + if path and os.path.isdir(os.path.join(path, '.git')): + try: + repo=git.Repo(path) + except git.exc.InvalidGitRepositoryError: + parser.error("git repository not found in %s" % (path,)) + else: + args.git_dir=path + else: + parser.error("not .git directory found above %s" % (os.getcwd(),)) + + else: + try: + repo=git.Repo(args.git_dir) + except git.exc.InvalidGitRepositoryError: + parser.error("git repository not found in %s" % (args.git_dir,)) + +if args.command=='cmp_branch': + run_cmp_branch(repo, args) + diff --git a/release/check_packages b/release/check_packages new file mode 100755 index 00000000..63c10b71 --- /dev/null +++ b/release/check_packages @@ -0,0 +1,25 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +. ./config + +cp package-list /tmp/$$ +echo "s%@ver@%${ver}%g" >/tmp/$$ +echo "s%@lsmdate@%${lsmdate}%g" >>/tmp/$$ +sed -f /tmp/$$ package-list >/tmp/a$$ + +echo "Checking that all packages in package-list are built" +err=0 +for i in `cat /tmp/a$$`; do + if test ! -e $i ; then + echo "Error: package $i not found" + err=1 + fi +done +if [ $err = 0 ] ; then + echo "All packages exist ..." +fi + +rm -f /tmp/$$ /tmp/a$$ diff --git a/release/clean b/release/clean new file mode 100755 index 00000000..deccc144 --- /dev/null +++ b/release/clean @@ -0,0 +1,7 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Remove old baculas +rm -f bacula-* win*.log diff --git a/release/config b/release/config new file mode 100644 index 00000000..a12c603f --- /dev/null +++ b/release/config @@ -0,0 +1,53 @@ +# +# Configuration for release scripts +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Set to the Branch you are working on +# +branch=${branch:-Branch-7.9} +# +# set reltype to release or beta -- for upload and setting DEVELOPER +reltype=release + +repo=$reltype + +# Bacula git repos +bacula=${bacula:-${HOME}/bee/bacula} +docs=${docs:-${HOME}/bacula/docs} + +# Limit bw to upload on the website +max_bw=${max_bw:-80} +upload_opt="--bwlimit=$max_bw" + +# +# Set the following to your remote name. By default it is origin. +remote=bs + +export push=no +# +# Note, you will probably want to set updatepo=no if you +# run this script multiple times for a given release. +export updatepo=no + +cwd=`pwd` + +cd ${bacula}/bacula +if [ $? -ne 0 ]; then + echo "Directory: $1 does not exist" + exit 1 +fi +current=`git branch | awk '/*/ { print $2 }'` +git checkout ${branch} >/dev/null 2>&1 +git pull ${remote} ${branch} >/dev/null 2>&1 +if [ $? -ne 0 ]; then + echo "Checkout or Pull of branch ${branch} failed." + exit 1 +fi +ver=`sed -n -e 's/^#define VERSION.*"\(.*\)"$/\1/p' src/version.h` +lsmdate=`sed -n -e 's/^#define LSMDATE.*"\(.*\)"$/\1/p' src/version.h` + +git checkout $current >/dev/null 2>&1 + +cd $cwd diff --git a/release/git2changelog.pl b/release/git2changelog.pl new file mode 100755 index 00000000..f371deb0 --- /dev/null +++ b/release/git2changelog.pl @@ -0,0 +1,84 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +=head USAGE + + ./git2changelog.pl Release-3.0.1..Release-3.0.2 + + For bweb ReleaseNotes, use + FORBWEB=1 ./git2changelog.pl Release-3.0.1..Release-3.0.2 + +=cut + +use strict; +use POSIX q/strftime/; + +my $d=''; +my $cur; +my %elt; +my $last_txt=''; +my %bugs; +my $refs = shift || ''; +my $for_bweb = $ENV{FORBWEB}?1:0; +open(FP, "git log --no-merges --pretty=format:'%at: %s' $refs|") or die "Can't run git log $!"; +while (my $l = ) { + + # remove non useful messages + next if ($l =~ /(tweak|typo|cleanup|regress:|again|.gitignore|fix compilation|technotes)/ixs); + next if ($l =~ /update (version|technotes|kernstodo|projects|releasenotes|version|home|release|todo|notes|changelog|tpl|configure)/i); + + next if ($l =~ /bacula-web:/); + + if ($for_bweb) { + next if ($l !~ /bweb/ixs); + $l =~ s/bweb: *//ig; + } else { + next if ($l =~ /bweb:/ixs); + } + + # keep list of fixed bugs + if ($l =~ /#(\d+)/) { + $bugs{$1}=1; + } + + # remove old commit format + $l =~ s/^(\d+): (kes|ebl) /$1: /; + + if ($l =~ /(\d+): (.+)/) { + # use date as 01Jan70 + my $dnow = strftime('%d%b%y', localtime($1)); + my $cur = strftime('%Y%m%d', localtime($1)); + my $txt = $2; + + # avoid identical multiple commit message + next if ($last_txt eq $txt); + $last_txt = $txt; + + # We format the string on 79 caracters + $txt =~ s/\s\s+/ /g; + $txt =~ s/.{70,77} /$&\n /g; + + # if we are the same day, just add entry + if ($dnow ne $d) { + $d = $dnow; + if (!exists $elt{$cur}) { + push @{$elt{$cur}}, "\n\n$dnow"; + } + } + push @{$elt{$cur}}, " - $txt"; + + } else { + print STDERR "invalid format: $l\n"; + } +} + +close(FP); + +foreach my $d (sort {$b <=> $a} keys %elt) { + print join("\n", @{$elt{$d}}); +} + +print "\n\nBugs fixed/closed since last release:\n"; +print join(" ", sort keys %bugs), "\n"; diff --git a/release/makeall b/release/makeall new file mode 100755 index 00000000..73b5d7a8 --- /dev/null +++ b/release/makeall @@ -0,0 +1,17 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Make full release +# + +# Source config +. ./config + +echo "Creating release version: ${ver}" + +cd $cwd + +./makebacularel $bacula $remote $branch $ver +./makedocsrel $bacula $docs $remote $branch $ver diff --git a/release/makebacularel b/release/makebacularel new file mode 100755 index 00000000..0d8947a3 --- /dev/null +++ b/release/makebacularel @@ -0,0 +1,125 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This script is used to make a Bacula release +# bacula +# regress +# gui +# It writes in the current directory, so we recommend that you +# do not run it in the Bacula source or Git directory, but +# in a special release directory. +# +# Commit all changes, update the language files, +# export the release, make a release tar. +# +# source config +. ./config + +tagbase=Release- +base_dir=bacula +base_pkg=bacula + +remote=$2 +branch=$3 +ver=$4 + +if [ $# != 4 ] ; then + echo "Need $0 " + echo "e.g. $0 release-source bee Branch-4.0 4.0.3" + exit 1 +fi +cd $1 +if [ $? -ne 0 ]; then + echo "Directory: $1 does not exist" + exit 1 +fi +src=`pwd` +current=`git branch | awk '/*/ { print $2 }'` +cd $base_dir +if [ $? -ne 0 ]; then + echo "Directory: $1 does not exist" + exit 1 +fi +git checkout ${branch} +git pull ${remote} ${branch} +if [ $? -ne 0 ]; then + echo "Checkout of branch ${branch} failed." + exit 1 +fi +fulltag=$tagbase$ver +echo " " +echo "Making $reltype for $base_pkg-$ver ..." +echo " " +#echo "OK? ctl-c to stop" +#read a +rm -rf Release-$ver +if [ $reltype != "beta" ] ; then + cd src + cp -fp version.h 1 + sed 's%^#define DEVELOPER 1%/* #define DEVELOPER 1 */%g' 1 >version.h + rm -f 1 + cd .. +fi +if [ a$updatepo != ano ]; then + ./configure --enable-client-only + cd po + make update-po + cd ${src} +fi + +git tag -d ${fulltag} +echo "Creating new tag -- $fulltag" +git tag $fulltag +if [ $? != 0 ] ; then + echo " " + echo "Non-zero return status from Git" + echo " " + exit 1 +fi +echo "Create Tag $fulltag done" +cd ${cwd} +rm -rf $base_pkg-$ver $base_pkg-$ver.tar.gz $base_pkg-$ver.tar.gz.sig +rm -rf $fulltag +cd ${src} +git archive --format=tar --prefix=$base_pkg-$ver/ $fulltag | gzip >${cwd}/$base_pkg-$ver.tar.gz + +if [ $? != 0 ] ; then + echo " " + echo "Non-zero return status from Git" + echo " " + exit 1 +fi +echo "Exported release into ${cwd}/$base_pkg-$ver.tar.gz" +cd ${cwd} +tar xfz $base_pkg-$ver.tar.gz +# First remove Enterprise Win32 plugin source +cd $base_pkg-$ver/bacula +cp -fp ReleaseNotes ChangeLog src/version.h ${cwd}/ +cp -fp LICENSE* ${cwd}/ + +cd ${cwd}/$base_pkg-$ver +# Move directories to release names including version +mv bacula $base_pkg-$ver +mv gui $base_pkg-gui-$ver +mv regress $base_pkg-regress-$ver +# Tar each component individually +tar cvfz ../$base_pkg-$ver.tar.gz $base_pkg-$ver +tar cvfz ../$base_pkg-gui-$ver.tar.gz $base_pkg-gui-$ver +tar cvfz ../$base_pkg-regress-$ver.tar.gz $base_pkg-regress-$ver +cd .. +rm -rf $base_pkg-$ver +./sign $base_pkg-$ver.tar.gz +./sign $base_pkg-gui-$ver.tar.gz +./sign $base_pkg-regress-$ver.tar.gz + +if [ a$push != ano ]; then + cd ${src} + git push ${remote} ${branch} + git push ${remote} tag ${fulltag} + echo "Pushed ${remote} and push tag ${fulltag}" +fi + +cd ${src} +git checkout ${current} diff --git a/release/makedocsonly b/release/makedocsonly new file mode 100755 index 00000000..f2fbf4fb --- /dev/null +++ b/release/makedocsonly @@ -0,0 +1,59 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Make only the docs packages +# + +# Source config +. ./config + +check_rtn() +{ +if [ $? -ne 0 ] ; then + echo " " + echo "Error in $1 ..." + exit 1 +fi +} + +echo "Creating release version: ${ver}" + +cd $cwd + +./makedocsrel $bacula $branch $docs $remote $branch $ver | tee -a build.log +check_rtn makedocsrel + +./makemanualsrel $ver +check_rtn makemanualsrel + +# strip known error words from build.log +sed -i -e 's%strerror%%g' build.log +sed -i -e 's%dlerror%%g' build.log +sed -i -e 's%OnError%%g' build.log +sed -i -e 's%k7\-error%%g' build.log +sed -i -e 's%error\.tpl%%g' build.log +sed -i -e 's%error\.ico%%g' build.log +sed -i -e 's%errors\-test%%g' build.log +sed -i -e 's%fatal\-test%%g' build.log +sed -i -e 's%errors\.in%%g' build.log +sed -i -e 's%s_error%%g' build.log +grep -i error build.log >/dev/null +if [ $? -eq 0 ] ; then + echo " " + echo "Errors in build.log" + exit 1 +fi +grep -i warning build.log >/dev/null +if [ $? -eq 0 ] ; then + echo " " + echo "Warnings in build.log" + exit 1 +fi +grep -i fatal build.log >/dev/null +if [ $? -eq 0 ] ; then + echo " " + echo "Fatal errors in build.log" + exit 1 +fi diff --git a/release/makedocsrel b/release/makedocsrel new file mode 100755 index 00000000..67fb59a9 --- /dev/null +++ b/release/makedocsrel @@ -0,0 +1,124 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This script is used to make a Bacula docs release +# docs +# It writes in the current directory, so we recommend that you +# do not run it in the Bacula source or Git directory, but +# in a special release directory. +# +# Commit all changes, export the release, make a release tar. +# +# source ./config +. ./config + +tagbase=Release- +base_dir=docs +base_pkg=bacula-$base_dir +remote=$3 +branch=$4 +ver=$5 +fulltag=$tagbase$ver + +if [ $# != 5 ] ; then + echo "Need $0 " + echo "e.g. $0 bacula-source docs-source bee Branch-4.0 4.0.3" + exit 1 +fi +cd $1 +if [ $? -ne 0 ]; then + echo "Directory: $1 does not exist" + exit 1 +fi +cd bacula +if [ $? -ne 0 ]; then + echo "Directory: $1 does not exist" + exit 1 +fi +bcurrent=`git branch | awk '/*/ { print $2 }'` +git checkout ${branch} +if [ $? -ne 0 ]; then + echo "Checkout of branch ${branch} in $1/bacula failed." + exit 1 +fi +git pull ${remote} ${branch} +bsrc=`pwd` +cd ${cwd} +cd $2 +if [ $? -ne 0 ]; then + echo "Directory: $2 does not exist" + exit 1 +fi +cd $base_dir +if [ $? -ne 0 ]; then + echo "Directory: $2 does not exist" + exit 1 +fi +current=`git branch | awk '/*/ { print $2 }'` +src=`pwd` +git checkout ${branch} +if [ $? -ne 0 ]; then + echo "Checkout of branch ${branch} in ${src} failed." + exit 1 +fi +git pull ${remote} ${branch} +echo " " +echo "Making $reltype for $base_pkg-$ver ..." +echo " " +#echo "OK? ctl-c to stop" +#read a + +git tag -d ${fulltag} 2>/dev/null 1>/dev/null +echo "Creating new tag -- $fulltag" +git tag $fulltag +if [ $? != 0 ] ; then + echo " " + echo "Non-zero return status from Git" + echo " " + exit 1 +fi +echo "Create Tag $fulltag done" +cd ${cwd} +rm -rf $base_pkg-$ver $base_pkg-$ver.tar.gz $base_pkg-$ver.tar.gz.sig +rm -rf $fulltag +cd ${src} +git archive --format=tar --prefix=$base_pkg-$ver/ $fulltag | gzip >${cwd}/$base_pkg-$ver.tar.gz +if [ $? != 0 ] ; then + echo " " + echo "Non-zero return status from Git" + echo " " + exit 1 +fi +echo "Exported release into ${cwd}/$base_pkg-$ver.tar.gz" +cd ${cwd} +tar xfz $base_pkg-$ver.tar.gz +cd $base_pkg-$ver/ +mv docs $base_pkg-$ver +cd $base_pkg-$ver +./update_version +rm -rf home-page cvt.scr presentations send techlogs upload rss_web.php +make +make mini-clean +cd ${cwd} +echo "Tarring docs ..." +tar cf $base_pkg-$ver.tar $base_pkg-$ver +rm -rf $base_pkg-$ver $base_pkg-$ver.tar.* +echo "bzip2 docs ..." +bzip2 $base_pkg-$ver.tar +./sign $base_pkg-$ver.tar.bz2 +ls -l $base_pkg-$ver.tar.* + +if [ a$push != ano ]; then + cd ${src} + git push ${remote} ${branch} + git push ${remote} tag ${fulltag} + echo "Pushed ${remote} and push tag ${fulltag}" +fi + +cd ${src} +git checkout ${current} + +cd ${bsrc} +git checkout ${bcurrent} diff --git a/release/makemanualsrel b/release/makemanualsrel new file mode 100755 index 00000000..677faee6 --- /dev/null +++ b/release/makemanualsrel @@ -0,0 +1,64 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This script is used to make a Bacula manuals release +# There are the html manuals and the pdf manuals. Both +# are made from the docs source release. +# +# It writes in the current directory, so we recommend that you +# do not run it in the Bacula source or Git directory, but +# in a special release directory. +# +# Commit all changes, export the release, make a release tar. +# +# source ./config +. ./config + +cwd=`pwd` +ver=$1 +tagbase=Release- +base_name=bacula +# from where we get out package files +base_pkg=${base_name}-docs +html_name=${base_name}-html-manuals-${ver} +pdf_name=${base_name}-pdf-manuals-${ver} + +if [ $# != 1 ] ; then + echo "Need $0 " + echo "e.g. $0 6.4.0" + exit 1 +fi + +rm -rf $base_pkg-$ver +echo "Detarring $base_pkg-$ver.tar.bz2" +tar xfj $base_pkg-$ver.tar.bz2 +cd $base_pkg-$ver/manuals/en +mkdir -p html pdf +for i in console developers main misc problems utility; do + cp -a $i/$i/ html/ + cp $i/$i.pdf pdf/ +done + +# create index for bweb +../../tools/create_index.pl main/main +cp ../../tools/index.html . + +echo "Creating ${html_name}.tar.gz" +tar cfz ${html_name}.tar.gz html css js images index.html +echo "Creating ${pdf_name}.tar.gz" +tar cfz ${pdf_name}.tar.gz pdf + +mv ${html_name}.tar.gz $cwd/ +mv ${pdf_name}.tar.gz $cwd/ + +cd $cwd +# Remove detared docs directory +rm -rf $base_pkg-$ver ${html_name}.tar.gz.sig ${pdf_name}.tar.gz.sig +./sign ${html_name}.tar.gz +./sign ${pdf_name}.tar.gz +echo " " +echo "Done making html and pdf manuals" + +exit 0 diff --git a/release/package-list b/release/package-list new file mode 100644 index 00000000..cfce827d --- /dev/null +++ b/release/package-list @@ -0,0 +1,8 @@ +bacula-@ver@.tar.gz +bacula-@ver@.tar.gz.sig +bacula-docs-@ver@.tar.bz2 +bacula-docs-@ver@.tar.bz2.sig +bacula-gui-@ver@.tar.gz +bacula-gui-@ver@.tar.gz.sig +bacula-regress-@ver@.tar.gz +bacula-regress-@ver@.tar.gz.sig diff --git a/release/pushtags b/release/pushtags new file mode 100755 index 00000000..07fd5921 --- /dev/null +++ b/release/pushtags @@ -0,0 +1,32 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Push tags +# + +. ./config + +push() +{ + for i in $bacula $docs ; do + cd $i + current=`git branch | awk '/*/ { print $2 }'` + git checkout ${branch} + git push ${1} ${branch} + git push ${1} tag ${fulltag} + echo "Pushed ${1} and git push ${1} tag ${fulltag} in $i" + git checkout ${current} + done +} + +echo "Updating repo and tags for release version: ${ver}" + +cd $cwd + +fulltag=Release-$ver + +# Push to both remotes +push ${remote} +push bacula diff --git a/release/sign b/release/sign new file mode 100755 index 00000000..035a35c3 --- /dev/null +++ b/release/sign @@ -0,0 +1,8 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +gpgkey=${gpgkey:-bacula} +gpg --digest-algo=SHA256 --detach-sign --armor --default-key $gpgkey -o $1.sig $1 +gpg --verify $1.sig diff --git a/release/upload b/release/upload new file mode 100755 index 00000000..ceb3213f --- /dev/null +++ b/release/upload @@ -0,0 +1,38 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Upload to File Release System on serfullver +# +. ./config + +ver="7.9.0" +RSYNC="rsync -av --no-g --progress $upload_opt" + +do_upload() +{ +where=$1 +echo "Creating ${where}:/home/src/$fullver" +ssh ${where} < Saving existing mtx-changer to mtx-changer.old"; \ + $(MV) -f ${DESTDIR}${scriptdir}/mtx-changer ${DESTDIR}${scriptdir}/mtx-changer.old; \ + fi + $(INSTALL_SCRIPT) mtx-changer $(DESTDIR)$(scriptdir)/mtx-changer + @if test -f ${DESTDIR}${scriptdir}/mtx-changer.conf; then \ + echo " ==> Installing mtx-changer.conf to mtx-changer.conf.new"; \ + $(INSTALL_DATA) mtx-changer.conf $(DESTDIR)$(scriptdir)/mtx-changer.conf.new; \ + else \ + $(INSTALL_DATA) mtx-changer.conf $(DESTDIR)$(scriptdir)/mtx-changer.conf; \ + fi + @if test -f ${DESTDIR}${scriptdir}/disk-changer; then \ + echo " ==> Saving existing disk-changer to disk-changer.old"; \ + $(MV) -f ${DESTDIR}${scriptdir}/disk-changer ${DESTDIR}${scriptdir}/disk-changer.old; \ + fi + $(INSTALL_SCRIPT) disk-changer $(DESTDIR)$(scriptdir)/disk-changer + $(INSTALL_DATA) btraceback.gdb $(DESTDIR)$(scriptdir)/btraceback.gdb + $(INSTALL_DATA) btraceback.dbx $(DESTDIR)$(scriptdir)/btraceback.dbx + $(INSTALL_DATA) btraceback.mdb $(DESTDIR)$(scriptdir)/btraceback.mdb + @if test -f ${DESTDIR}${scriptdir}/baculabackupreport; then \ + echo " ==> Saving existing baculabackupreport to baculabackupreport.old"; \ + $(MV) -f ${DESTDIR}${scriptdir}/baculabackupreport ${DESTDIR}${scriptdir}/baculabackupreport.old; \ + fi + $(INSTALL_SCRIPT) baculabackupreport $(DESTDIR)$(scriptdir)/baculabackupreport + $(INSTALL_SCRIPT) bacula-tray-monitor.desktop $(DESTDIR)$(scriptdir)/bacula-tray-monitor.desktop + chmod 0644 $(DESTDIR)$(scriptdir)/btraceback.gdb \ + $(DESTDIR)$(scriptdir)/btraceback.dbx \ + $(DESTDIR)$(scriptdir)/btraceback.mdb + $(INSTALL_SCRIPT) btraceback $(DESTDIR)$(sbindir)/btraceback + + +uninstall: + (cd $(DESTDIR)$(scriptdir); $(RMF) bconsole) + (cd $(DESTDIR)$(scriptdir); $(RMF) bacula) + (cd $(DESTDIR)$(scriptdir); $(RMF) bacula_config) + (cd $(DESTDIR)$(sbindir); $(RMF) bacula) + (cd $(DESTDIR)$(sbindir); $(RMF) tapealert) + (cd $(DESTDIR)$(scriptdir); $(RMF) baculabackupreport) + (cd $(DESTDIR)$(scriptdir); $(RMF) bacula-ctl-dir) + (cd $(DESTDIR)$(scriptdir); $(RMF) bacula-ctl-fd) + (cd $(DESTDIR)$(scriptdir); $(RMF) bacula-ctl-sd) + (cd $(DESTDIR)$(scriptdir); $(RMF) fd) + (cd $(DESTDIR)$(scriptdir); $(RMF) mtx-changer) + (cd $(DESTDIR)$(scriptdir); $(RMF) disk-changer) + (cd $(DESTDIR)$(scriptdir); $(RMF) dvd-handler) + (cd $(DESTDIR)$(scriptdir); $(RMF) btraceback.gdb) + (cd $(DESTDIR)$(scriptdir); $(RMF) btraceback.dbx) + (cd $(DESTDIR)$(scriptdir); $(RMF) btraceback.mdb) + (cd $(DESTDIR)$(scriptdir); $(RMF) breload) + (cd $(DESTDIR)$(sbindir); $(RMF) btraceback) + +Makefile: Makefile.in + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + chmod 755 bacula btraceback + chmod 755 bacula-ctl-dir bacula-ctl-fd bacula-ctl-sd bacula_config + chmod 755 disk-changer mtx-changer bconsole tapealert + +Makefiles: + $(SHELL) config.status + chmod 755 bacula btraceback + chmod 755 bacula-ctl-dir bacula-ctl-fd bacula-ctl-sd + chmod 755 mtx-changer bconsole tapealert + +clean: + @$(RMF) *~ 1 2 3 + +# clean for distribution +distclean: clean + @$(RMF) bacula fd btraceback + @$(RMF) bacula-ctl-dir bacula-ctl-fd bacula-ctl-sd bacula_config + @$(RMF) bconsole logrotate bacula.desktop + @$(RMF) mtx-changer dvd-handler + +# ------------------------------------------------------------------------ diff --git a/scripts/bacula-ctl-dir.in b/scripts/bacula-ctl-dir.in new file mode 100644 index 00000000..ab1eaaca --- /dev/null +++ b/scripts/bacula-ctl-dir.in @@ -0,0 +1,262 @@ +#! /bin/sh +# +# Bacula(R) - The Network Backup Solution +# +# Copyright (C) 2000-2016 Kern Sibbald +# +# The original author of Bacula is Kern Sibbald, with contributions +# from many others, a complete list can be found in the file AUTHORS. +# +# You may use this file and others of this release according to the +# license defined in the LICENSE file, which includes the Affero General +# Public License, v3.0 ("AGPLv3") and some additional permissions and +# terms pursuant to its AGPLv3 Section 7. +# +# This notice must be preserved when any source code is +# conveyed and/or propagated. +# +# Bacula(R) is a registered trademark of Kern Sibbald. +# +# bacula-ctl-dir This shell script takes care of starting and stopping +# the bacula Director daemon +# +# This is pretty much watered down version of the RedHat script +# that works on Solaris as well as Linux, but it won't work everywhere. +# +# description: The Leading Open Source Backup Solution. +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +PSCMD="@PSCMD@" +PS="ps" + +# +# On Solaris, you may need to use nawk, or alternatively, +# add the GNU binaries to your path, such as /usr/xpg4/bin +# +AWK=@AWK@ + +# All these are not *really* needed but it makes it +# easier to "steal" this code for the development +# environment where they are different. +# +BACDIRBIN=@sbindir@ +BACDIRCFG=@sysconfdir@ +PIDDIR=@piddir@ +SUBSYSDIR=@subsysdir@ + +DIR_PORT=@dir_port@ + +DIR_USER=@dir_user@ +DIR_GROUP=@dir_group@ +Bacula="@BACULA@" +PIDOF=@PIDOF@ + +# A function to stop a program. +killproc() { + RC=0 + # Test syntax. + if [ $# = 0 ]; then + echo "Usage: killproc {program} {port} [signal]" + return 1 + fi + + notset=0 + # check for third arg to be kill level + if [ "$3" != "" ] ; then + killlevel=$3 + else + notset=1 + killlevel="-9" + fi + + # Get base program name + base=`basename $1` + + # Find pid. + pid=`pidofproc $base $2` + + # Kill it. + if [ "$pid" != "" ] ; then + if [ "$notset" = "1" ] ; then + if ${PS} -p "$pid">/dev/null 2>&1; then + # TERM first, then KILL if not dead + kill -TERM $pid 2>/dev/null + sleep 1 + if ${PS} -p "$pid" >/dev/null 2>&1 ; then + sleep 1 + if ${PS} -p "$pid" >/dev/null 2>&1 ; then + sleep 3 + if ${PS} -p "$pid" >/dev/null 2>&1 ; then + kill -KILL $pid 2>/dev/null + fi + fi + fi + fi + ${PS} -p "$pid" >/dev/null 2>&1 + RC=$? + [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" + # RC=$((! $RC)) + # use specified level only + else + if ${PS} -p "$pid" >/dev/null 2>&1; then + kill $killlevel $pid 2>/dev/null + RC=$? + [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" + fi + fi + else + failure "$base shutdown" + fi + # Remove pid file if any. + if [ "$notset" = "1" ]; then + rm -f ${PIDDIR}/$base.$2.pid + fi + return $RC +} + +# A function to find the pid of a program. +pidofproc() { + pid="" + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: pidofproc {program}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try PID file + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -n 1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + fi + + # Next try "pidof" + if [ -x ${PIDOF} ] ; then + pid=`${PIDOF} $1` + fi + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + + # Finally try to extract it from ps + pid=`${PSCMD} | grep $1 | ${AWK} '{ print $1 }' | tr '\n' ' '` + echo $pid + return 0 +} + +status() { + pid="" + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: status {program} {port}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try "pidof" + if [ -x ${PIDOF} ] ; then + pid=`${PIDOF} $1` + fi + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + else + pid=`${PSCMD} | ${AWK} 'BEGIN { prog=ARGV[1]; ARGC=1 } + { if ((prog == $2) || (("(" prog ")") == $2) || + (("[" prog "]") == $2) || + ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + fi + fi + + # Next try the PID files + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -n 1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo "$base dead but pid file exists" + return 1 + fi + fi + # See if the subsys lock exists + if [ -f ${SUBSYSDIR}/$base ] ; then + echo "$base dead but subsys locked" + return 2 + fi + echo "$base is stopped" + return 3 +} + +success() { + return 0 +} + +failure() { + rc=$? + return $rc +} + +OS=`uname -s` + +# if /lib/tls exists, force Bacula to use the glibc pthreads instead +if [ -d "/lib/tls" -a $OS = "Linux" -a `uname -r | cut -c1-3` = "2.4" ] ; then + export LD_ASSUME_KERNEL=2.4.19 +fi + +case "$1" in + start) + [ -x ${BACDIRBIN}/bacula-dir ] && { + echo "Starting the $Bacula Director daemon" + OPTIONS='' + if [ "${DIR_USER}" != '' ]; then + OPTIONS="${OPTIONS} -u ${DIR_USER}" + fi + + if [ "${DIR_GROUP}" != '' ]; then + OPTIONS="${OPTIONS} -g ${DIR_GROUP}" + fi + + if [ "x${VALGRIND_DIR}" = "x1" ]; then + valgrind --leak-check=full ${BACDIRBIN}/bacula-dir $2 $3 ${OPTIONS} -v -c ${BACDIRCFG}/bacula-dir.conf + else + ${BACDIRBIN}/bacula-dir $2 $3 ${OPTIONS} -v -c ${BACDIRCFG}/bacula-dir.conf + fi + sleep 1 + } + ;; + + stop) + [ -x ${BACDIRBIN}/bacula-dir ] && { + echo "Stopping the $Bacula Director daemon" + killproc ${BACDIRBIN}/bacula-dir ${DIR_PORT} $2 + } + ;; + + restart) + $0 stop + sleep 5 + $0 start + ;; + + status) + [ -x ${BACDIRBIN}/bacula-dir ] && status ${BACDIRBIN}/bacula-dir ${DIR_PORT} + ;; + + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit 0 diff --git a/scripts/bacula-ctl-fd.in b/scripts/bacula-ctl-fd.in new file mode 100644 index 00000000..0032f0ba --- /dev/null +++ b/scripts/bacula-ctl-fd.in @@ -0,0 +1,259 @@ +#! /bin/sh +# +# Bacula(R) - The Network Backup Solution +# +# Copyright (C) 2000-2016 Kern Sibbald +# +# The original author of Bacula is Kern Sibbald, with contributions +# from many others, a complete list can be found in the file AUTHORS. +# +# You may use this file and others of this release according to the +# license defined in the LICENSE file, which includes the Affero General +# Public License, v3.0 ("AGPLv3") and some additional permissions and +# terms pursuant to its AGPLv3 Section 7. +# +# This notice must be preserved when any source code is +# conveyed and/or propagated. +# +# Bacula(R) is a registered trademark of Kern Sibbald. +# +# bacula-ctl-fd This shell script takes care of starting and stopping +# the bacula File daemon. +# +# This is pretty much watered down version of the RedHat script +# that works on Solaris as well as Linux, but it won't work everywhere. +# +# description: The Leading Open Source Backup Solution. +# + +PSCMD="@PSCMD@" +PS="ps" + +# +# On Solaris, you may need to use nawk, or alternatively, +# add the GNU binaries to your path, such as /usr/xpg4/bin +# +AWK=@AWK@ + +# All these are not *really* needed but it makes it +# easier to "steal" this code for the development +# environment where they are different. +# +BACFDBIN=@sbindir@ +BACFDCFG=@sysconfdir@ +PIDDIR=@piddir@ +SUBSYSDIR=@subsysdir@ + +FD_PORT=@fd_port@ + +FD_USER=@fd_user@ +FD_GROUP=@fd_group@ +Bacula="@BACULA@" +PIDOF=@PIDOF@ + +# A function to stop a program. +killproc() { + RC=0 + # Test syntax. + if [ $# = 0 ]; then + echo "Usage: killproc {program} {port} [signal]" + return 1 + fi + + notset=0 + # check for third arg to be kill level + if [ "$3" != "" ] ; then + killlevel=$3 + else + notset=1 + killlevel="-9" + fi + + # Get base program name + base=`basename $1` + + # Find pid. + pid=`pidofproc $base $2` + + # Kill it. + if [ "$pid" != "" ] ; then + if [ "$notset" = "1" ] ; then + if ${PS} -p "$pid">/dev/null 2>&1; then + # TERM first, then KILL if not dead + kill -TERM $pid 2>/dev/null + sleep 1 + if ${PS} -p "$pid" >/dev/null 2>&1 ; then + sleep 1 + if ${PS} -p "$pid" >/dev/null 2>&1 ; then + sleep 3 + if ${PS} -p "$pid" >/dev/null 2>&1 ; then + kill -KILL $pid 2>/dev/null + fi + fi + fi + fi + ${PS} -p "$pid" >/dev/null 2>&1 + RC=$? + [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" + # RC=$((! $RC)) + # use specified level only + else + if ${PS} -p "$pid" >/dev/null 2>&1; then + kill $killlevel $pid 2>/dev/null + RC=$? + [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" + fi + fi + else + failure "$base shutdown" + fi + # Remove pid file if any. + if [ "$notset" = "1" ]; then + rm -f ${PIDDIR}/$base.$2.pid + fi + return $RC +} + +# A function to find the pid of a program. +pidofproc() { + pid="" + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: pidofproc {program}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try PID file + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -n 1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + fi + + # Next try "pidof" + if [ -x ${PIDOF} ] ; then + pid=`${PIDOF} $1` + fi + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + + # Finally try to extract it from ps + pid=`${PSCMD} | grep $1 | ${AWK} '{ print $1 }' | tr '\n' ' '` + echo $pid + return 0 +} + +status() { + pid="" + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: status {program} {port}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try "pidof" + if [ -x ${PIDOF} ] ; then + pid=`${PIDOF} $1` + fi + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + else + pid=`${PSCMD} | ${AWK} 'BEGIN { prog=ARGV[1]; ARGC=1 } + { if ((prog == $2) || (("(" prog ")") == $2) || + (("[" prog "]") == $2) || + ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + fi + fi + + # Next try the PID files + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -n 1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo "$base dead but pid file exists" + return 1 + fi + fi + # See if the subsys lock exists + if [ -f ${SUBSYSDIR}/$base ] ; then + echo "$base dead but subsys locked" + return 2 + fi + echo "$base is stopped" + return 3 +} + +success() { + return 0 +} + +failure() { + rc=$? + return $rc +} + +OS=`uname -s` + +# if /lib/tls exists, force Bacula to use the glibc pthreads instead +if [ -d "/lib/tls" -a $OS = "Linux" -a `uname -r | cut -c1-3` = "2.4" ] ; then + export LD_ASSUME_KERNEL=2.4.19 +fi + +case "$1" in + start) + [ -x ${BACFDBIN}/bacula-fd ] && { + echo "Starting the $Bacula File daemon" + OPTIONS='' + if [ "${FD_USER}" != '' ]; then + OPTIONS="${OPTIONS} -u ${FD_USER}" + fi + + if [ "${FD_GROUP}" != '' ]; then + OPTIONS="${OPTIONS} -g ${FD_GROUP}" + fi + + if [ "x${VALGRIND_FD}" = "x1" ]; then + valgrind --leak-check=full ${BACFDBIN}/bacula-fd $2 $3 ${OPTIONS} -v -c ${BACFDCFG}/bacula-fd.conf + else + ${BACFDBIN}/bacula-fd $2 $3 ${OPTIONS} -v -c ${BACFDCFG}/bacula-fd.conf + fi + } + ;; + + stop) + # Stop the FD first so that SD will fail jobs and update catalog + [ -x ${BACFDBIN}/bacula-fd ] && { + echo "Stopping the $Bacula File daemon" + killproc ${BACFDBIN}/bacula-fd ${FD_PORT} $2 + } + ;; + + restart) + $0 stop + sleep 5 + $0 start + ;; + + status) + [ -x ${BACFDBIN}/bacula-fd ] && status ${BACFDBIN}/bacula-fd ${FD_PORT} + ;; + + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit 0 diff --git a/scripts/bacula-ctl-sd.in b/scripts/bacula-ctl-sd.in new file mode 100644 index 00000000..2c247967 --- /dev/null +++ b/scripts/bacula-ctl-sd.in @@ -0,0 +1,260 @@ +#! /bin/sh +# +# Bacula(R) - The Network Backup Solution +# +# Copyright (C) 2000-2018 Kern Sibbald +# +# The original author of Bacula is Kern Sibbald, with contributions +# from many others, a complete list can be found in the file AUTHORS. +# +# You may use this file and others of this release according to the +# license defined in the LICENSE file, which includes the Affero General +# Public License, v3.0 ("AGPLv3") and some additional permissions and +# terms pursuant to its AGPLv3 Section 7. +# +# This notice must be preserved when any source code is +# conveyed and/or propagated. +# +# Bacula(R) is a registered trademark of Kern Sibbald. +# +# bacula-ctl-sd This shell script takes care of starting and stopping +# the bacula Storage daemon +# +# This is pretty much watered down version of the RedHat script +# that works on Solaris as well as Linux, but it won't work everywhere. +# +# description: The Leading Open Source Backup Solution. +# + +PSCMD="@PSCMD@" +PS="ps" + +# +# On Solaris, you may need to use nawk, or alternatively, +# add the GNU binaries to your path, such as /usr/xpg4/bin +# +AWK=@AWK@ + +# All these are not *really* needed but it makes it +# easier to "steal" this code for the development +# environment where they are different. +# +BACSDBIN=@sbindir@ +BACSDCFG=@sysconfdir@ +PIDDIR=@piddir@ +SUBSYSDIR=@subsysdir@ + +SD_PORT=@sd_port@ + +SD_USER=@sd_user@ +SD_GROUP=@sd_group@ +Bacula="@BACULA@" +PIDOF=@PIDOF@ + +# A function to stop a program. +killproc() { + RC=0 + # Test syntax. + if [ $# = 0 ]; then + echo "Usage: killproc {program} {port} [signal]" + return 1 + fi + + notset=0 + # check for third arg to be kill level + if [ "$3" != "" ] ; then + killlevel=$3 + else + notset=1 + killlevel="-9" + fi + + # Get base program name + base=`basename $1` + + # Find pid. + pid=`pidofproc $base $2` + + # Kill it. + if [ "$pid" != "" ] ; then + if [ "$notset" = "1" ] ; then + if ${PS} -p "$pid">/dev/null 2>&1; then + # TERM first, then KILL if not dead + kill -TERM $pid 2>/dev/null + sleep 1 + if ${PS} -p "$pid" >/dev/null 2>&1 ; then + sleep 1 + if ${PS} -p "$pid" >/dev/null 2>&1 ; then + sleep 3 + if ${PS} -p "$pid" >/dev/null 2>&1 ; then + kill -KILL $pid 2>/dev/null + fi + fi + fi + fi + ${PS} -p "$pid" >/dev/null 2>&1 + RC=$? + [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" + # RC=$((! $RC)) + # use specified level only + else + if ${PS} -p "$pid" >/dev/null 2>&1; then + kill $killlevel $pid 2>/dev/null + RC=$? + [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" + fi + fi + else + failure "$base shutdown" + fi + # Remove pid file if any. + if [ "$notset" = "1" ]; then + rm -f ${PIDDIR}/$base.$2.pid + fi + return $RC +} + +# A function to find the pid of a program. +pidofproc() { + pid="" + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: pidofproc {program}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try PID file + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -n 1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + fi + + # Next try "pidof" + if [ -x ${PIDOF} ] ; then + pid=`${PIDOF} $1` + fi + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + + # Finally try to extract it from ps + pid=`${PSCMD} | grep $1 | ${AWK} '{ print $1 }' | tr '\n' ' '` + echo $pid + return 0 +} + +status() { + pid="" + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: status {program} {port}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try "pidof" + if [ -x ${PIDOF} ] ; then + pid=`${PIDOF} $1` + fi + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + else + pid=`${PSCMD} | ${AWK} 'BEGIN { prog=ARGV[1]; ARGC=1 } + { if ((prog == $2) || (("(" prog ")") == $2) || + (("[" prog "]") == $2) || + ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + fi + fi + + # Next try the PID files + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -n 1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo "$base dead but pid file exists" + return 1 + fi + fi + # See if the subsys lock exists + if [ -f ${SUBSYSDIR}/$base ] ; then + echo "$base dead but subsys locked" + return 2 + fi + echo "$base is stopped" + return 3 +} + +success() { + return 0 +} + +failure() { + rc=$? + return $rc +} + +OS=`uname -s` + +# if /lib/tls exists, force Bacula to use the glibc pthreads instead +if [ -d "/lib/tls" -a $OS = "Linux" -a `uname -r | cut -c1-3` = "2.4" ] ; then + export LD_ASSUME_KERNEL=2.4.19 +fi + +case "$1" in + start) + [ -x ${BACSDBIN}/bacula-sd ] && { + echo "Starting the $Bacula Storage daemon" + OPTIONS='' + if [ "${SD_USER}" != '' ]; then + OPTIONS="${OPTIONS} -u ${SD_USER}" + fi + + if [ "${SD_GROUP}" != '' ]; then + OPTIONS="${OPTIONS} -g ${SD_GROUP}" + fi + + ulimit -l unlimited > /dev/null 2> /dev/null || true + if [ "x${VALGRIND_SD}" = "x1" ]; then + valgrind --leak-check=full ${BACSDBIN}/bacula-sd $2 $3 ${OPTIONS} -v -c ${BACSDCFG}/bacula-sd.conf + else + ${BACSDBIN}/bacula-sd $2 $3 ${OPTIONS} -v -c ${BACSDCFG}/bacula-sd.conf + fi + } + ;; + + + stop) + [ -x ${BACSDBIN}/bacula-sd ] && { + echo "Stopping the $Bacula Storage daemon" + killproc ${BACSDBIN}/bacula-sd ${SD_PORT} $2 + } + ;; + + restart) + $0 stop + sleep 5 + $0 start + ;; + + status) + [ -x ${BACSDBIN}/bacula-sd ] && status ${BACSDBIN}/bacula-sd ${SD_PORT} + ;; + + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit 0 diff --git a/scripts/bacula-tray-monitor.desktop.in b/scripts/bacula-tray-monitor.desktop.in new file mode 100644 index 00000000..395f3029 --- /dev/null +++ b/scripts/bacula-tray-monitor.desktop.in @@ -0,0 +1,10 @@ +[Desktop Entry] +Name=Bacula Monitor +Comment=Notification Tray Monitor +Icon=/usr/share/pixmaps/bacula-tray-monitor.xpm +Exec=@sbindir@/bacula-tray-monitor -c @sysconfdir@/tray-monitor.conf +Terminal=false +Type=Application +Encoding=UTF-8 +X-Desktop-File-Install-Version=0.3 +Categories=System;Application;Utility;X-Red-Hat-Base; diff --git a/scripts/bacula.in b/scripts/bacula.in new file mode 100755 index 00000000..acefe0dc --- /dev/null +++ b/scripts/bacula.in @@ -0,0 +1,71 @@ +#! /bin/sh +# +# Bacula(R) - The Network Backup Solution +# +# Copyright (C) 2000-2018 Kern Sibbald +# +# The original author of Bacula is Kern Sibbald, with contributions +# from many others, a complete list can be found in the file AUTHORS. +# +# You may use this file and others of this release according to the +# license defined in the LICENSE file, which includes the Affero General +# Public License, v3.0 ("AGPLv3") and some additional permissions and +# terms pursuant to its AGPLv3 Section 7. +# +# This notice must be preserved when any source code is +# conveyed and/or propagated. +# +# Bacula(R) is a registered trademark of Kern Sibbald. +# +# bacula This shell script takes care of starting and stopping +# the bacula daemons. +# +# This is pretty much watered down version of the RedHat script +# that works on Solaris as well as Linux, but it won't work everywhere. +# +# description: The Leading Open Source Backup Solution. +# + +# All these are not *really* needed but it makes it +# easier to "steal" this code for the development +# environment where they are different. +# +SCRIPTDIR=@scriptdir@ +# +# Disable Glibc malloc checks, it doesn't help and it keeps from getting +# good dumps +MALLOC_CHECK_=0 +export MALLOC_CHECK_ + +case "$1" in + start) + [ -x ${SCRIPTDIR}/bacula-ctl-sd ] && ${SCRIPTDIR}/bacula-ctl-sd $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-fd ] && ${SCRIPTDIR}/bacula-ctl-fd $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-dir ] && ${SCRIPTDIR}/bacula-ctl-dir $1 $2 + ;; + + stop) + # Stop the FD first so that SD will fail jobs and update catalog + [ -x ${SCRIPTDIR}/bacula-ctl-fd ] && ${SCRIPTDIR}/bacula-ctl-fd $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-sd ] && ${SCRIPTDIR}/bacula-ctl-sd $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-dir ] && ${SCRIPTDIR}/bacula-ctl-dir $1 $2 + ;; + + restart) + $0 stop + sleep 2 + $0 start + ;; + + status) + [ -x ${SCRIPTDIR}/bacula-ctl-sd ] && ${SCRIPTDIR}/bacula-ctl-sd status + [ -x ${SCRIPTDIR}/bacula-ctl-fd ] && ${SCRIPTDIR}/bacula-ctl-fd status + [ -x ${SCRIPTDIR}/bacula-ctl-dir ] && ${SCRIPTDIR}/bacula-ctl-dir status + ;; + + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit 0 diff --git a/scripts/bacula.png b/scripts/bacula.png new file mode 100644 index 00000000..bb6d67c7 Binary files /dev/null and b/scripts/bacula.png differ diff --git a/scripts/bacula.vim b/scripts/bacula.vim new file mode 100644 index 00000000..d48ee986 --- /dev/null +++ b/scripts/bacula.vim @@ -0,0 +1,176 @@ +" Vim syntax file +" Put this file to your $HOME/.vim/syntax/ and use :syntax on +" Language: Bacula +" Maintainer: Eric Bollengier +" URL: +" Latest Revision: 2007-02-11 + + +if version < 600 + syntax clear +elseif exists("b:current_syntax") + finish +endif + + +" comments +syn region BacComment display oneline start="#" end="$" keepend contains=BacTodo +syn region BacComment2 start="/\*" end="\*/" + +syn region BacInclude start=/^@/ end="$" + +syntax region xCond start=/\w+\s*{/ms=e+1 end=/}/me=s-1 +syntax keyword BacName Name +syn case ignore + +syn keyword LevelElt contained Full Incremental Differential + +" todo +syn keyword BacTodo contained TODO FIXME XXX NOTE +syn region BacString start=/"/ skip=/\\"/ end=/"/ + +" Specifique Client { +syn region BacClient display start=/Client {/ end="^}" contains=BacString,BacComment,BacC1,BacC2,BacC3,BacC4 +syn match BacC1 contained /File\s*Retention/ +syn match BacC2 contained /Maximum\s*Concurrent\s*Jobs/ +syn match BacC3 contained /Job\s*Retention/ +syn keyword BacC4 contained Name Password Address Catalog AutoPrune FDPort + +" FileSet { +syn region BacFileSet display start="FileSet {" end="^}" contains=BacString,BacComment,BacName,BacFSInc,BacFSExc,BacFS2 +syn region BacFSInc contained display start="Include {" end="}" contains=BacString,BacComment,BacFSOpt,BacFS1 +syn region BacFSExc contained display start="Exclude {" end="}" contains=BacString,BacComment,BacFSOpt,BacFS1 +syn region BacFSOpt contained display start="Options {" end="}" contains=BacString,BacComment,BacFSOpt1,BacFSOpt2 +syn keyword BacFSOpt1 contained verify signature onefs noatime RegexFile Exclude Wild WildDir WildFile CheckChanges aclsupport +syn match BacFSOpt2 contained /ignore case/ +syn keyword BacFS1 contained File +syn match BacFS2 contained /Enable VSS/ + +" Storage { +syn region BacSto display start="Storage {" end="}" contains=BacName,BacComment,BacString,BacSto1,BacSto2 +syn keyword BacSto1 contained Address SDPort Password Device Autochanger +syn match BacSto2 contained /Media\s*Type/ + +" Director { +syn region BacDir display start="Director {" end="}" contains=BacName,BacComment,BacString,BacDir,BacDir1,BacDir2 +syn keyword BacDir1 contained DIRport QueryFile WorkingDirectory PidDirectory Password Messages +syn match BacDir2 contained /Maximum\s*Concurrent\s*Jobs/ + +" Catalog { +syn region BacCat display start="Catalog {" end="}" contains=BacName,BacComment,BacString,BacCat1 +syn keyword BacCat1 contained dbname user password dbport + +" Job { +syn region BacJob display start="Job {" end="^}" contains=BacJ1,BacJ2,BacString,BacComment,Level,BacC2,BacJ3,BacRun +syn region BacJobDefs display start="JobDefs {" end="^}" contains=BacJ1,BacJ2,BacString,BacComment,Level,BacC2,BacJ3 +syn region Level display start="Level =" end="$" contains=LevelElt + +syn keyword BacJ1 contained Schedule Name Priority Client Pool JobDefs FileSet SpoolData Storage where +syn keyword BacJ2 contained RunBeforeJob RunAfterJob Type Messages ClientRunAfterJob +syn match BacJ3 contained /Write Bootstrap/ + + +" RunScript { +syn region BacRun contained display start="RunScript {" end="}" contains=BacR1,BacR2,BacR3,BacR4,BacRW,BacString,BacComment +syn match BacR1 contained /Runs\s*When/ +syn match BacR2 contained /Runs\s*On\s*Client/ +syn match BacR3 contained /Runs\s*On\s*Failure/ +syn keyword BacR4 contained Command +syn keyword BacRW contained After Before Always + +" Schedule { +syn region BacSched display start="Schedule {" end="^}" contains=BacSR,BacString,BacComment,BacName,BacRun +syn keyword BacS1 contained Pool FullPool on at +syn keyword BacS2 contained sun mon tue wed thu fri sat sunday monday tuesday wednesday thursday friday saturday +syn keyword BacS3 contained jan feb mar apr may jun jul aug sep oct nov dec +syn keyword BacS4 contained 1st 2nd 3rd 4th 5th first second third fourth fifth +syn region BacSR contained display start="Run = " end="$" contains=BacS1,BacS2,BacS3,BacS4,LevelElt + +syn keyword BacSpecial false true yes no + +" Pool +syn region BacPool display start="Pool {" end="^}" contains=BacP1,BacP2,BacP3,BacString,BacComment +syn match BacP1 contained /Pool\s*Type/ +syn match BacP2 contained /Volume\s*Retention/ +syn keyword BacP3 contained Name AutoPrune Recycle + +syn case match +if version >= 508 || !exists("did_screen_syn_inits") + if version < 508 + let did_screen_syn_inits = 1 + command -nargs=+ HiLink hi link + else + command -nargs=+ HiLink hi def link + endif + +" Define the default highlighting. + +HiLink BacFileSet Function +HiLink BacFSInc Function +HiLink BacFSExc Function +HiLink BacFSOpt Function +HiLink BacFSOpt1 Keyword +HiLink BacFSOpt2 Keyword +HiLink BacFS1 Keyword +HiLink BacFS2 Keyword + +HiLink BacInclude Include +HiLink BacComment Comment +HiLink BacComment2 Comment +HiLink BacTodo Todo +HiLink LevelElt String +HiLink BacRun Function + +HiLink BacCat Function +HiLink BacCat1 Keyword + +HiLink BacSto Function +HiLink BacSto1 Keyword +HiLink BacSto2 Keyword + +HiLink BacDir Function +HiLink BacDir1 keyword +HiLink BacDir2 keyword + +HiLink BacJob Function +HiLink BacJobDefs Function +HiLink BacJ1 Keyword +HiLink BacJ2 Keyword +HiLink BacJ3 Keyword + +HiLink BacClient Function +HiLink BacC1 Keyword +HiLink BacC2 Keyword +HiLink BacC3 Keyword +HiLink BacC4 Keyword +HiLink Level Keyword + +HiLink BacSched Function +HiLink BacS1 Keyword +HiLink BacS2 String +HiLink BacS3 String +HiLink BacS4 String + +HiLink BacR1 Keyword +HiLink BacR2 Keyword +HiLink BacR3 Keyword +HiLink BacR4 Keyword +HiLink BacRW String + +HiLink BacPool Function +HiLink BacP1 Keyword +HiLink BacP2 Keyword +HiLink BacP3 Keyword + +HiLink BacName Keyword +HiLink BacString String +HiLink BacNumber Number +HiLink BacCommand BacCommands +HiLink BacCommands Keyword +HiLink BacSpecial Boolean +HiLink BacKey Function +HiLink Equal Comment +delcommand HiLink + +endif + diff --git a/scripts/baculabackupreport.in b/scripts/baculabackupreport.in new file mode 100755 index 00000000..da962c44 --- /dev/null +++ b/scripts/baculabackupreport.in @@ -0,0 +1,858 @@ +#!/bin/sh +# +# baculabackupreport.sh +# +# ------------------------------------------------------------------------------ +# +# waa - 20130428 - Initial release. +# Generate basic Bacula backup report. +# +# waa - 20170501 - Change Log moved to bottom of script. +# +# ------------------------------------------------------------------------------ +# +# Copyright (c) 2013-2017, William A. Arlofski waa-at-revpol-dot-com +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# ------------------------------------------------------------------------------ + + +# System variables +# ---------------- +server="@hostname@" +admin="@job_email@" +bcbin="/opt/bacula/bin/bconsole" +sendmail="/usr/sbin/sendmail" +bcconfig="/opt/bacula/etc/bconsole.conf" + +# Database variables +# ------------------ +dbtype="pgsql" # Supported options are pgsql, mysql, mariadb +db="bacula" +dbuser="bacula" +dbbin="/usr/bin/psql" +# dbpass="-pPassword" # Uncomment and set db password if one is used + +# Formatting variables +# -------------------- +html="yes" # Generate HTML emails instead of plain text emails? +boldstatus="yes" # Set tag on Status field (only if html="yes") +colorstatusbg="yes" # Colorize the Status cell's background? (only if html="yes") + +jobtableheadercolor="#b0b0b0" # Background color for the HTML table's header +jobtablejobcolor="#f4f4f4" # Background color for the job rows in the HTML table +runningjobcolor="#4d79ff" # Background color of the Status cell for "Running" jobs +goodjobcolor="#00f000" # Background color of the Status cell for "OK" jobs +warnjobcolor="#ffff00" # Background color of the Status cell for "OK" jobs (with warnings - well, actually with 'joberrors') +badjobcolor="#cc3300" # Background color of the Status cell for "bad" jobs +goodjobwitherrcolor="#cccc00" # Background color of the Status cell for "OK" jobs (with errors) - Not implemented due to request + +fontfamily="Verdana, Arial, Helvetica, sans-serif" # Set the font family to use for HTML emails +fontsize="16px" # Set the font size to use for email title and print summaries +fontsizejobinfo="12px" # Set the font size to use for job information inside of table +fontsizesumlog="10px" # Set the font size of bad logs and job summaries + +printsummary="yes" # Print a short summary after the job list table? (Total Jobs, Files & Bytes) +emailsummaries="no" # Email all job summaries. Be careful with this, it can generate very large emails +emailbadlogs="yes" # Email logs of bad jobs or jobs with JobErrors -ne 0. Be careful, this can generate very large emails. +addsubjecticon="yes" # Prepend the email Subject with UTF-8 icons (a 'checkmark', 'circle with slash', or a bold 'x') +nojobsicon="=?utf-8?Q?=E2=8A=98?=" # utf-8 subject icon when no jobs have been run +goodjobsicon="=?utf-8?Q?=E2=9C=94?=" # utf-8 subject icon when all jobs were "OK" +badjobsicon="=?utf-8?Q?=E2=9C=96?=" # utf-8 subject icon when there are jobs with errors etc +starbadjobids="yes" # Prepend an asterisk "*" to jobids of "bad" jobs +sortfield="EndTime" # Which catalog db field to sort on? Multiple,fields,work,here +sortorder="DESC" # Which direction to sort? +emailtitle="Jobs Run On ${server} in the Past ${1} Hours" # This is prepended at the top of the email, before the jobs table + + +# -------------------------------------------------- +# Nothing should need to be modified below this line +# -------------------------------------------------- + + +hist=${1} +if [ -z ${hist} ]; then + echo -e "\nUSE:\n$0 \n" + exit 1 +fi + +if [ ! -e ${bcconfig} ]; then + echo -e "\nThe bconsole configuration file does not seem to be '${bcconfig}'." + echo -e "Please check the setting for the variable 'bcconfig'.\n" + exit 1 +fi + +if [ ! -x ${bcbin} ]; then + echo -e "\nThe bconsole binary does not seem to be '${bcbin}', or it is not executable." + echo -e "Please check the setting for the variable 'bcbin'.\n" + exit 1 +fi + +if [ ! -x ${dbbin} ]; then + echo -e "\nThe database client binary does not seem to be '${dbbin}', or it is not executable." + echo -e "Please check the setting for the variable 'dbbin'.\n" + exit 1 +fi + +if [ ! -x ${sendmail} ]; then + echo -e "\nThe sendmail binary does not seem to be '${sendmail}', or it is not executable." + echo -e "Please check the setting for the variable 'sendmail'.\n" + exit 1 +fi + + +# Build query based on dbtype. Good thing we have "standards" Sigh... +# ------------------------------------------------------------------- +case ${dbtype} in + mysql ) + queryresult=$(echo "SELECT JobId, Name, StartTime, EndTime, Type, Level, JobStatus, JobFiles, JobBytes, \ + TIMEDIFF (EndTime,StartTime) as RunTime, JobErrors \ + FROM Job \ + WHERE (RealEndTime >= DATE_ADD(NOW(), INTERVAL -${hist} HOUR) OR JobStatus='R') \ + ORDER BY ${sortfield} ${sortorder};" \ + | ${dbbin} -u ${dbuser} ${dbpass} ${db} \ + | sed '/^JobId/d' ) + ;; + + pgsql ) + queryresult=$(echo "SELECT JobId, Name, StartTime, EndTime, Type, Level, JobStatus, JobFiles, JobBytes, \ + AGE(EndTime, StartTime) as RunTime, JobErrors \ + FROM Job \ + WHERE (RealEndTime >= CURRENT_TIMESTAMP(2) - cast('${hist} HOUR' as INTERVAL) OR JobStatus='R') \ + ORDER BY ${sortfield} ${sortorder};" \ + | ${dbbin} -U ${dbuser} ${dbpass} ${db} -0t \ + | sed -e 's/|//g' -e '/^$/d' ) + ;; + + mariadb ) + queryresult=$(echo "SELECT JobId, Name, StartTime, EndTime, Type, Level, JobStatus, JobFiles, JobBytes, \ + TIMEDIFF (EndTime,StartTime) as RunTime, JobErrors \ + FROM Job \ + WHERE (RealEndTime >= DATE_ADD(NOW(), INTERVAL -${hist} HOUR) OR JobStatus='R') \ + ORDER BY ${sortfield} ${sortorder};" \ + | ${dbbin} -u ${dbuser} -p${dbpass} ${db} -s -N ) + ;; + + * ) + echo "dbtype of '${dbtype}' is invalid. Please set dbtype variable to 'mysql', 'pgsql', or 'mariadb'" + exit 1 + ;; +esac + + +# If we have no jobs to report on, then +# we need to skip the entire awk script +# and some bash stuff and jump all the +# way to about line 673 +# ------------------------------------- +if [ -z "${queryresult}" ]; then + results="0" + else + results="1" + + +# Now for some fun with awk +# ------------------------- +IFS=" " +msg=$(echo ${queryresult} | \ +LC_ALL=en_US.UTF-8 \ +awk \ +-v html="${html}" \ +-v boldstatus="${boldstatus}" \ +-v colorstatusbg="${colorstatusbg}" \ +-v jobtableheadercolor="${jobtableheadercolor}" \ +-v jobtablejobcolor="${jobtablejobcolor}" \ +-v runningjobcolor="${runningjobcolor}" \ +-v goodjobcolor="${goodjobcolor}" \ +-v goodjobwitherrcolor="${goodjobwitherrcolor}" \ +-v warnjobcolor="${warnjobcolor}" \ +-v badjobcolor="${badjobcolor}" \ +-v printsummary="${printsummary}" \ +-v starbadjobids="${starbadjobids}" \ +'BEGIN { awkerr = 0 } +{star = " " } + + + # List of possible jobstatus codes + # -------------------------------- + # Enter SQL query: SELECT * FROM status; + # +-----------+---------------------------------+----------+ + # | jobstatus | jobstatuslong | severity | + # +-----------+---------------------------------+----------+ + # | C | Created, not yet running | 15 | + # | R | Running | 15 | + # | B | Blocked | 15 | + # | T | Completed successfully | 10 | + # | E | Terminated with errors | 25 | + # | e | Non-fatal error | 20 | + # | f | Fatal error | 100 | + # | D | Verify found differences | 15 | + # | A | Canceled by user | 90 | + # | F | Waiting for Client | 15 | + # | S | Waiting for Storage daemon | 15 | + # | m | Waiting for new media | | + # | M | Waiting for media mount | 15 | + # | s | Waiting for storage resource | 15 | + # | j | Waiting for job resource | 15 | + # | c | Waiting for client resource | 15 | + # | d | Waiting on maximum jobs | 15 | + # | t | Waiting on start time | 15 | + # | p | Waiting on higher priority jobs | 15 | + # | a | SD despooling attributes | 15 | + # | i | Doing batch insert file records | 15 | + # | I | Incomplete Job | 25 | + # +-----------+---------------------------------+----------+ + + + # Is this job still running? + # If a job is still running, then there will be no "Stop Time" + # fields, so $9 (jobstatus) will be shifted left two columns + # to $7, and we will need to test and then reassign these variables + # Note, this seems to be required for PostgreSQL, but MariaDB and + # MySQL return all zeros for the date and time for running jobs + # ----------------------------------------------------------------- + { if ($7 == "R" && $8 ~ /^[0-9]+/) + { + $13 = $10 + $11 = $9 + $10 = $8 + $9 = $7 + $8 = $6 + $7 = $5 + $5 = "--=Still Running=--" + $6 = "" + } + } + + + # Assign words to job status code characters + # ------------------------------------------ + # First, check to see if we need to generate an HTML email + { if (html == "yes") + { + # Set default opening and closing tags for status cell + # ---------------------------------------------------- + tdo = "" + tdc = "" + + # Check to see if the job is "OK" then assign + # the "goodjobcolor" to the cell background + # ------------------------------------------- + if ($9 ~ /[T]/ && $13 == 0) + { + if (colorstatusbg == "yes") + # Assign jobs that are OK or Running the goodjobcolor + # --------------------------------------------------- + { + tdo = "" + } + + # Should the status be bolded? + # ---------------------------- + if (boldstatus == "yes") + { + tdo=tdo"" + tdc=""tdc + } + status["T"]=tdo"-OK-"tdc + + # If it is a good job, but with errors or warnings + # then we will assign the warnjobcolor + # ------------------------------------------------ + } else if ($9 == "T" && $13 != 0) + { + if (colorstatusbg == "yes") + # Assign OK jobs with errors the warnjobcolor + # ------------------------------------------- + { + tdo = "" + } + + # Should the status be bolded? + # ---------------------------- + if (boldstatus == "yes") + { + tdo=tdo"" + tdc=""tdc + } + # Since the "W" jobstatus never appears in the DB, we manually + # assign it here so it can be recognized later on in the script + # ------------------------------------------------------------- + $9 = "W" + status["W"]=tdo"OK/Warnings"tdc + + # If the job is still running we will + # assign it the runningjobcolor + # ----------------------------------- + } else if ($9 == "R") + { + if (colorstatusbg == "yes") + # Assign running jobs the runningjobcolor + # --------------------------------------- + { + tdo = "" + } + + # Should the status be bolded? + # ---------------------------- + if (boldstatus == "yes") + { + tdo=tdo"" + tdc=""tdc + } + status["R"]=tdo"Running"tdc + + # If it is a bad job, then + # we assign the badjobcolor + # ------------------------- + } else if ($9 ~ /[ABDef]/) + { + if (colorstatusbg == "yes") + # Assign bad jobs the badjobcolor + # ------------------------------- + { + tdo = "" + } + + # Should the status be bolded? + # ---------------------------- + if (boldstatus == "yes") + { + tdo=tdo"" + tdc=""tdc + } + status["A"]=tdo"Aborted"tdc + status["D"]=tdo"Verify Diffs"tdc + status["f"]=tdo"Failed"tdc + + # If it is a job with warnings or errors, assign the job the warnjobcolor + # I have never seen a "W" status in the db. Jobs that are "OK -- with warnings" + # still have a "T" jobstatus, but the joberrors field is incremented in the db + # ----------------------------------------------------------------------------- + } else if ($9 ~ /[EI]/) + { + if (colorstatusbg == "yes") + # Assign job the warnjobcolor + # --------------------------- + { + tdo = "" + } + + # Should the status be bolded? + # ---------------------------- + if (boldstatus == "yes") + { + tdo=tdo"" + tdc=""tdc + } + status["E"]=tdo"OK, w/Errors"tdc + status["I"]=tdo"Incomplete"tdc + } + } else + # $html is not "yes" so statuses will be normal text + # -------------------------------------------------- + { + status["A"]=" Aborted " + status["D"]=" Verify Diffs " + status["E"]=" OK, w/Errors " + status["f"]=" Failed " + status["I"]=" Incomplete " + status["R"]=" Running " + status["T"]=" -OK- " + # Since the "W" jobstatus never appears in the DB, we manually + # assign it here so it can be recognized later on in the script + # ------------------------------------------------------------- + if ($9 == "T" && $13 != 0) + { $9 = "W" + status["W"]=" OK/Warnings " + } + } + } + + + # These status characters seem to only + # be Director "in memory" statuses. They + # do not get entered into the DB ever so we + # cannot catch them with the db query we use + # I might have to query the DIR as well as + # the DB to be able to capture these + # ------------------------------------------ + { + status["C"]=" Created " + status["B"]=" Blocked " + status["F"]=" Wait FD " + status["S"]=" Wait SD " + status["m"]=" Wait New Media" + status["M"]=" Wait Mount " + status["s"]=" Wait Storage" + status["j"]=" Wait Job " + status["c"]=" Wait Client " + status["d"]=" Wait Max Jobs" + status["t"]="Wait Start Time" + status["p"]=" Wait Priority" + status["a"]=" Despool Attrs" + status["i"]=" Batch Insert " + status["L"]="Spool Last Data" + } + + + # Assign words to job type code characters + # ---------------------------------------- + { + jobtype["D"]="Admin" + jobtype["B"]="Backup" + jobtype["C"]="Copy" + jobtype["c"]="Control" + jobtype["R"]="Restore" + jobtype["V"]="Verify" + } + + + # Assign words to job level code characters + # ----------------------------------------- + { + level["F"]="Full" + level["I"]="Incr" + level["D"]="Diff" + level["f"]="VFul" + level["-"]="----" + } + + + # Assign words to Verify job level code characters + # ------------------------------------------------ + { + level["A"]="VVol" + level["C"]="VCat" + level["V"]="Init" + level["O"]="VV2C" + level["d"]="VD2C" + } + + + # Check to see if the job did not "T"erminate OK then increment $awkerr, + # and prepend the JobId with an asterisk for quick visual identification + # of problem jobs. + + # Need to choose between a positive or negative test of the job status code + # ------------------------------------------------------------------------- + # Negative check - testing for non existence of all "good" status codes + # $9 !~ /[TRCFSMmsjcdtpai]/ { awkerr++; $1 = "* "$1 } + # Positive check - testing the existence of all "bad" status codes + # good { if ($9 ~ /[ABDEIWef]/ || $13 != 0) { awkerr++; if (starbadjobids == "yes") { star = "*" } } } + { if ($9 ~ /[ABDEIef]/) { awkerr++; if (starbadjobids == "yes") { star = "*" } } } + + + # If the job is an Admin, Copy, Control, + # Restore, or Migration job it will have + # no real "Level", so we set it to "----" + # --------------------------------------- + { if ($7 ~ /[CcDRm]/) { $8 = "-" } } + + + # Print out each job, formatted with the following fields: + # JobId Name Status Errors Type Level Files Bytes StartTime EndTime RunTime + # ------------------------------------------------------------------------- + { if (html == "yes") + { printf(" \ + %s%s%s \ + %s \ + %s \ + %'"'"'d \ + %s \ + %s \ + %'"'"'d \ + %'"'"'9.2f GB \ + %s %s \ + %s %s \ + %s \ + \n", \ + jobtablejobcolor, star, $1, star, $2, status[$9], $13, jobtype[$7], level[$8], $10, $11/(1024*1024*1024), $3, $4, $5, $6, $12); + } else + { printf("%s %-7s %-14s %16s %'"'"'12d %8s %6s %'"'"'9d %'"'"'9.2f GB %11s %-9s %-10s %-9s %-9s\n", \ + star, $1, $2, status[$9], $13, jobtype[$7], level[$8], $10, $11/(1024*1024*1024), $3, $4, $5, $6, $12); + } + } + + + # Count the number of jobs + # ------------------------ + { totaljobs++ } + + + # Count the number of files and bytes from all jobs + # ------------------------------------------------- + { files += $10 } + { bytes += $11 } + + +# Finally, print out the summaries +# -------------------------------- +END { +if (printsummary == "yes") + { if (html == "yes") + { + printf("") + printf("
\ +
\ + \ + \ + \ + \ +
Total Jobs: %'"'"'15d
Total Files: %'"'"'15d
Total Bytes: %'"'"'15.2f GB
\ +
",\ + totaljobs, files, bytes/(1024*1024*1024)); + } else +printf("\ + =================================\n\ + Total Jobs : %'"'"'15d\n\ + Total Files : %'"'"'15d\n\ + Total Bytes : %'"'"'15.2f GB\n\ + =================================\n",\ +totaljobs, files, bytes/(1024*1024*1024)); +} exit awkerr } +') + + +# Any failed jobs, or jobs with errors? +# ------------------------------------- +numbadjobs=$? + + +# Do we email the job summaries? +# ------------------------------ +if [ ${emailsummaries} == "yes" ]; then + # Get all of the jobids from the query results, but + # skip any running jobs because they will not have + # a summary in the DB until the job has terminated + # ------------------------------------------------- + alljobids=$(echo "${queryresult}" \ + | awk '{ if ($7 != "R") printf("%s ", $1) }') + + + # If no jobids were returned, skip creating + # the header and looping through zero records + # ------------------------------------------- + if [ ! -z "${alljobids}" ]; then + # Generate the header + # ------------------- + msg="${msg}"$( + if [ ${html} == "yes" ]; then + echo "
====================================="
+                                else
+                                        echo -e "\n\n\n====================================="
+                fi
+                        echo "Job Summaries of All Terminated Jobs:"
+                        echo "====================================="
+                )
+
+
+                # Get the job logs from all jobs and just grep for the summary
+                # ------------------------------------------------------------
+                for jobid in ${alljobids}; do
+                        msg="${msg}"$(
+                                echo -e "\n--------------"
+                                echo "JobId: ${jobid}"
+                                echo "--------------"
+                                echo "llist joblog jobid=${jobid}" | ${bcbin} -c ${bcconfig} | grep -A31 "^  Build OS:"
+                                echo "======================================================================"
+                        )
+                done
+                if [ ${html} == "yes" ]; then
+                        msg=${msg}$(echo "
") + fi + fi +fi + + +# Do we email the bad job logs with the report? +# --------------------------------------------- +if [ ${emailbadlogs} == "yes" ]; then + # Get the badjobs, or the good jobs with + # JobErrors != 0 from the query results + # -------------------------------------- + badjobids=$(echo "${queryresult}" \ + | awk '{ if ($9 ~ /[ABDEIef]/ || ($9 == "T" && $13 != 0)) printf("%s ", $1) }') + + + # If no jobids were returned, skip creating + # the header and looping through zero records + # ------------------------------------------- + if [ ! -z "${badjobids}" ]; then + # Generate the header + # ------------------- + msg="${msg}"$( + if [ ${html} == "yes" ]; then + echo "
=========================================================="
+                                else
+                                        echo -e "\n\n\n=========================================================="
+                fi
+                        echo "Job logs of failed jobs, or good jobs with JobErrors != 0:"
+                        echo "=========================================================="
+                )
+
+
+                # Get the bad job's log from the Director via bconsole
+                # ----------------------------------------------------
+                for jobid in ${badjobids}; do
+                        msg="${msg}"$(
+                                echo -e "\n--------------"
+                                echo "JobId: ${jobid}"
+                                echo "--------------"
+                                echo "llist joblog jobid=${jobid}" | ${bcbin} -c ${bcconfig}
+                                echo "======================================================================"
+                        )
+                done
+                if [ ${html} == "yes" ]; then
+                        msg=${msg}$(echo "
") + fi + fi +fi + + +# Prepend the header to the $msg output +# ------------------------------------- +if [ ${html} == "yes" ]; then + msg=" + + + + +

${emailtitle}

+ + + + + + + + + + + + + + +${msg} +" + else + msg=" + ${emailtitle} + ------------------------------------------ + + JobId Job Name Status Errors Type Level Files Bytes Start Time End Time Run Time + ----- -------------- --------------- ---------- ------- ----- -------- ----------- ------------------- ------------------- -------- +${msg}" +fi + +fi # If there were zero results returned from the + # SQL the query, we skip the entire awk script, + # and a lot of other bash stuff that generates + # the email body and we end up here +# ------------------------------------------------- +if [ ${results} -eq 0 ]; then + status="No Jobs Have Been Run" + subjecticon="${nojobsicon}" + msg="Nothing to see here..." + else + # Totally unnecessary, but, well... OCD... :) + # -------------------------------------------- + if [ ${numbadjobs} -ne 0 ]; then + if [ ${numbadjobs} -eq 1 ]; then + job="Job" + else + job="Jobs" + fi + status="(${numbadjobs}) ${job} with Errors" + subjecticon="${badjobsicon}" + else + status="All Jobs OK" + subjecticon="${goodjobsicon}" + fi +fi + + +# More silliness +# -------------- +if [ ${hist} -eq 1 ]; then + hour="Hour" + else + hour="Hours" +fi + + +# Email the report +# ---------------- +( +echo "To: ${admin}" +echo "From: ${admin}" +if [ ${addsubjecticon} == "yes" ]; then + echo "Subject: ${subjecticon} ${server} - ${status} in the Past ${hist} ${hour}" + else + echo "Subject: ${server} - ${status} in the Past ${hist} ${hour}" +fi +if [ ${html} == "yes" ] && [ ${results} -ne 0 ]; then + echo "Content-Type: text/html" + echo "MIME-Version: 1.0" +fi +echo "" +echo "${msg}" +) | /usr/sbin/sendmail -t +# ------------- +# End of script +# ------------- + + +# ---------- +# Change Log +# ---------- +# ---------------------------- +# William A. Arlofski +# Reverse Polarity, LLC +# helpdesk@revpol.com +# http://www.revpol.com/bacula +# ---------------------------- +# +# +# 20130428 - Initial release +# Generate and email a basic Bacula backup report +# 1st command line parameter is expected to be a +# number of hours. No real error checking is done +# +# 20131224 - Removed "AND JobStatus='T'" to get all backup jobs +# whether running, or completed with errors etc. +# - Added Several fields "StartTime", "EndTime", +# "JobFiles" +# - Removed "JobType" because we are only selecting +# jobs of type "Backup" (AND Type='B') +# - Modified header lines and printf lines for better +# formatting +# +# 20140107 - Modified script to include more information and cleaned +# up the output formatting +# +# 20150704 - Added ability to work with MySQL or Postgresql +# +# 20150723 - Modified query, removed "Type='B'" clause to catch all jobs, +# including Copy jobs, Admin jobs etc. Modified header, and +# output string to match new query and include job's "Type" +# column. +# +# 20170225 - Rewrote awk script so that a status/summary could be set in +# the email report's subject. eg: +# Subject: "serverName - All Jobs OK in the past x hours" +# Subject: "serverName - x Jobs FAILED in the past y hours" +# +# 20170303 - Fixed output in cases where there are jobs running and there +# is no "Stop Time" for a job. +# +# 20170406 - Some major modifications: +# - Added feature to spell out words instead of using the +# single character codes for Job type, Job Status, and +# Job Level - Including the different levels for Verify +# jobs +# - If a job terminates with an error or warning, then the +# job's line in the output is prepended with an asterisk +# "*" for quick visual identification +# - Modified the outputs of the files and bytes fields to +# include commas when the number is > 999 +# - Added totals to the end of the report for Jobs, Files, +# and Bytes +# - Added $sortfield and $sortorder variables to allow output +# to be sorted as desired +# - Set the level of a job to "----" when the level is not +# applicable as in Restore jobs, Admin jobs etc. +# +# 20170408 - Some minor cleanup, and moving things around +# - Added $emailsummaries variable to append the job summaries +# to the end of the report. +# - Added $emailbadlogs variable to append full joblogs of jobs +# which have failed or jobs with errors to the end of the report +# for quick access to investigate failed jobs. +# +# 20170417 - Added some tests for binaries and the bconsole config file +# +# 20170429 - Thanks to Chris Couture for contacting me and submitting a +# working query for MariaDB. I have added 'mariadb' as a new +# dbtype option. +# - Thanks to Chris Couture for the ideas and some code examples +# to create an HTML email. +# - Added $html variable to enable HTML emails. +# - Added $boldstatus variable to make the Status bold +# in HTML emails. +# - Added $colorstatusbg variable to color the background of +# the Status cell in HTML emails. +# - Thanks to Chris Couture for the idea of adding RunTime +# to the email output. +# - Thanks to Chris Couture for the idea of using some unicode +# characters (a 'checkmark'or a bold 'x') in the Subject: +# to quickly see if everything ran OK. +# - Added $addsubjecticon variable to enable/disable the +# prepending of this icon to the Subject. +# - Added $printsumary variable to give the option to print the +# total Jobs, Files, and Bytes after the job listing table. +# - Added $starbadjobids variable to enable/disable prepending +# the bad jobids with an asterisk "*". +# - Modified the way the email is built at the end. Thanks to +# Chris Courture again for this nice idea. +# - Added $jobtableheadercolor, $jobtablejobcolor, $goodjobcolor, +# $goodjobwitherrcolor, $runningjobcolor, $warnjobcolor, and +# $badjobcolor variables to colorize HTML emails +# - Added $emailtitle variable for the title at the top +# - Added $fontfamily, $fontsize, $fontsizejobinfo, and $fontsizesumlog +# variables to allow styling of the HTML output (Thanks again Chris) +# - Added $nojobsicon, $goodjobsicon, and $badjobsicon variables to +# allow setting the prepended utf-8 subject icon character +# - Reformatted things so that if there are no jobs returned by the +# SQL query, the email message sent is nice and short +# - Modified the license to allow for inclusion into Bacula Community, +# and possibly the Enterprise Edition releases +# +# 20170430 - Modified the order of the fields to make more sense +# - Re-aligned the text email so that when an asterisk is pre-pended it +# does not shift the whole line +# +# 20170508 - Re-worked some of the logic so that good jobs (JobStatus="T") which +# have errors will have their status listed as "OK/Warnings", and it +# will not trigger as a "bad job" on the JobErrors, so it will not +# have an asterisk prepended to the JobId in the job listing. I think +# this fix is more of a temporary change in the hopes that a "W" +# status to represent "good jobs with warnings" is implemented in the +# db in the future. +# - Added an "Errors" column to the table to show "JobErrors" from the +# db. +# - Some minor variable name changes and other minor changes +# +# 20170511 - Minor adjustments to the alignment formatting of the text email +# - Minor 'case' changes to a couple levels (Init & VCat) +# +# ------------------------------------------------------------------------------ + + +# I like small tabs. Use :set list in vim to see tabbing etc +# vim: set tabstop=2:softtabstop=2:shiftwidth=2 # diff --git a/scripts/bat.console_apps.in b/scripts/bat.console_apps.in new file mode 100644 index 00000000..3f660a9a --- /dev/null +++ b/scripts/bat.console_apps.in @@ -0,0 +1,4 @@ +USER=root +PROGRAM=@sbindir@/bat +SESSION=true +FALLBACK=true diff --git a/scripts/bat.desktop.consolehelper.in b/scripts/bat.desktop.consolehelper.in new file mode 100644 index 00000000..53b9696f --- /dev/null +++ b/scripts/bat.desktop.consolehelper.in @@ -0,0 +1,11 @@ +[Desktop Entry] +Name=Bacula Administration Tool +Comment=Bacula Director Console +Icon=/usr/share/pixmaps/bat_icon.png +Exec=/usr/bin/bat -c @sysconfdir@/bat.conf +Terminal=false +Type=Application +Encoding=UTF-8 +StartupNotify=true +X-Desktop-File-Install-Version=0.3 +Categories=System;Application;Utility;X-Red-Hat-Base; diff --git a/scripts/bat.desktop.in b/scripts/bat.desktop.in new file mode 100644 index 00000000..a18e4188 --- /dev/null +++ b/scripts/bat.desktop.in @@ -0,0 +1,11 @@ +[Desktop Entry] +Name=Bacula Administration Tool +Comment=Bacula Director Console +Icon=/usr/share/pixmaps/bat_icon.png +Exec=@sbindir@/bat -c @sysconfdir@/bat.conf +Terminal=false +Type=Application +Encoding=UTF-8 +StartupNotify=true +X-Desktop-File-Install-Version=0.3 +Categories=System;Application;Utility;X-Red-Hat-Base; diff --git a/scripts/bat.desktop.xsu.in b/scripts/bat.desktop.xsu.in new file mode 100644 index 00000000..cced5a00 --- /dev/null +++ b/scripts/bat.desktop.xsu.in @@ -0,0 +1,11 @@ +[Desktop Entry] +Name=Bacula Administration Tool +Comment=Bacula Director Console +Icon=/usr/share/pixmaps/bat_icon.png +Exec=kdesu -t -c "@sbindir@/bat -c @sysconfdir@/bat.conf" -f @sysconfdir@/bat.kdesu +Terminal=false +Type=Application +Encoding=UTF-8 +StartupNotify=true +X-Desktop-File-Install-Version=0.3 +Categories=System;Application;Utility;X-Red-Hat-Base; diff --git a/scripts/bat.pamd b/scripts/bat.pamd new file mode 100644 index 00000000..15cb90fa --- /dev/null +++ b/scripts/bat.pamd @@ -0,0 +1,7 @@ +#%PAM-1.0 +auth sufficient pam_rootok.so +auth sufficient pam_timestamp.so +auth required pam_stack.so service=system-auth +session optional pam_xauth.so +session optional pam_timestamp.so +account required pam_permit.so diff --git a/scripts/bconsole.in b/scripts/bconsole.in new file mode 100755 index 00000000..f696ba1c --- /dev/null +++ b/scripts/bconsole.in @@ -0,0 +1,41 @@ +#!/bin/sh +# +# Bacula(R) - The Network Backup Solution +# +# Copyright (C) 2000-2016 Kern Sibbald +# +# The original author of Bacula is Kern Sibbald, with contributions +# from many others, a complete list can be found in the file AUTHORS. +# +# You may use this file and others of this release according to the +# license defined in the LICENSE file, which includes the Affero General +# Public License, v3.0 ("AGPLv3") and some additional permissions and +# terms pursuant to its AGPLv3 Section 7. +# +# This notice must be preserved when any source code is +# conveyed and/or propagated. +# +# Bacula(R) is a registered trademark of Kern Sibbald. +# +which dirname >/dev/null +# does dirname exit? +if [ $? = 0 ] ; then + cwd=`dirname $0` + if [ x$cwd = x. ]; then + cwd=`pwd` + fi + if [ x$cwd = x@sbindir@ ] ; then + echo "bconsole not properly installed." + exit 1 + fi +fi +if [ x@sbindir@ = x@sysconfdir@ ]; then + echo "bconsole not properly installed." + exit 1 +fi +if [ $# = 1 ] ; then + echo "doing bconsole $1.conf" + @sbindir@/bconsole -c $1.conf +else + @sbindir@/bconsole -c @sysconfdir@/bconsole.conf +fi diff --git a/scripts/breload.in b/scripts/breload.in new file mode 100644 index 00000000..a571f960 --- /dev/null +++ b/scripts/breload.in @@ -0,0 +1,63 @@ +#! /bin/sh +# +# Bacula(R) - The Network Backup Solution +# +# Copyright (C) 2000-2016 Kern Sibbald +# +# The original author of Bacula is Kern Sibbald, with contributions +# from many others, a complete list can be found in the file AUTHORS. +# +# You may use this file and others of this release according to the +# license defined in the LICENSE file, which includes the Affero General +# Public License, v3.0 ("AGPLv3") and some additional permissions and +# terms pursuant to its AGPLv3 Section 7. +# +# This notice must be preserved when any source code is +# conveyed and/or propagated. +# +# Bacula(R) is a registered trademark of Kern Sibbald. +# +# breload This shell script takes care of reloading the director after +# a backup of the configuration and a bacula-dir -t test +# +# + +BACDIRBIN=@sbindir@ +BACDIRCFG=@sysconfdir@ +BACWORKDIR=@working_dir@ +BACBKPDIR=$BACWORKDIR/bkp +Bacula="@BACULA@" +DIR_USER=@dir_user@ +RET=0 + +if [ -x ${BACDIRBIN}/bacula-dir -a -r ${BACDIRCFG}/bacula-dir.conf ]; then + echo "Testing the $Bacula Director daemon configuration" + + if [ $(whoami) != "$DIR_USER" ]; then + USER_OPT="-u $DIR_USER" + fi + + ${BACDIRBIN}/bacula-dir -t $USER_OPT -c ${BACDIRCFG}/bacula-dir.conf + + RET=$? + if [ $RET = 0 ]; then + if [ ! -d $BACBKPDIR ]; then + echo "Creating Backup configuration directory" + mkdir -p $BACBKPDIR + chmod 700 $BACBKPDIR + chown $DIR_USER $BACBKPDIR + fi + if [ -d $BACDIRCFG/conf.d ]; then + CONFD=$BACDIRCFG/conf.d + fi + if [ -d $BACBKPDIR ]; then + echo "Backup configuration" + tar cfz $BACBKPDIR/bacula-dir-conf.$(date +%s).tgz $BACDIRCFG/*conf $CONFD + fi + echo reload | ${BACDIRBIN}/bconsole >/dev/null + echo "Reloading configuration" + else + echo "Can't reload configuration, please correct errors first" + fi +fi +exit $RET diff --git a/scripts/btraceback.dbx b/scripts/btraceback.dbx new file mode 100644 index 00000000..30c0d551 --- /dev/null +++ b/scripts/btraceback.dbx @@ -0,0 +1,53 @@ +# btraceback.dbx +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +dbxenv language_mode c++ + +echo "fail_time ==> \c"; print -l (char *)fail_time +echo "exename ==> \c"; print -l (char *)exename +echo "exepath ==> \c"; print -l (char *)exepath +echo "assert_msg ==> \c"; print -l (char *)assert_msg +echo "db_engine_name ==> \c"; print -l (char *)db_engine_name +echo "version ==> \c"; print -l (char *)version +echo "host_os ==> \c"; print -l (char *)host_os +echo "distname ==> \c"; print -l (char *)distname +echo "distver ==> \c"; print -l (char *)distver +echo "dist_name ==> \c"; print -l (char *)dist_name + +echo "******** RUNNING THREADS/LWPS:" +echo +lwps + +echo +echo +echo "******** STACK TRACE OF CURRENT THREAD/LWP:" +echo +where + +echo +echo +echo "******** VARIABLES DUMP OF CURRENT THREAD/LWP:" +echo +dump + +for LWP in $(lwps | sh sed -e 's/.*@//' -e 's/ .*//'); do +( + if lwp l@$LWP; then + echo "******************************************" + echo + echo "******** STACK TRACE OF THREAD/LWP ${LWP}:" + echo + where + + echo + echo "******** VARIABLES DUMP OF THREAD/LWP ${LWP}:" + echo + dump + echo "******************************************" + fi +) +done +quit diff --git a/scripts/btraceback.gdb b/scripts/btraceback.gdb new file mode 100644 index 00000000..aaed915f --- /dev/null +++ b/scripts/btraceback.gdb @@ -0,0 +1,33 @@ +print fail_time +print my_name +print exename +print exepath +print assert_msg +print db_engine_name +print version +print host_os +print distname +print distver +print host_name +print dist_name +show env TestName +bt +thread apply all bt +f 0 +info locals +f 1 +info locals +f 2 +info locals +f 3 +info locals +f 4 +info locals +f 5 +info locals +f 6 +info locals +f 7 +info locals +detach +quit diff --git a/scripts/btraceback.in b/scripts/btraceback.in new file mode 100755 index 00000000..269cc7e4 --- /dev/null +++ b/scripts/btraceback.in @@ -0,0 +1,65 @@ +#!/bin/sh +# +# Bacula(R) - The Network Backup Solution +# +# Copyright (C) 2000-2016 Kern Sibbald +# Copyright (C) 2000-2014 Free Software Foundation Europe e.V. +# +# The original author of Bacula is Kern Sibbald, with contributions +# from many others, a complete list can be found in the file AUTHORS. +# +# You may use this file and others of this release according to the +# license defined in the LICENSE file, which includes the Affero General +# Public License, v3.0 ("AGPLv3") and some additional permissions and +# terms pursuant to its AGPLv3 Section 7. +# +# This notice must be preserved when any source code is +# conveyed and/or propagated. +# +# Bacula(R) is a registered trademark of Kern Sibbald. +# +# Script to do a stackdump of a Bacula daemon/program. +# +# We attempt to attach to running program +# +# Arguments to this script are +# $1 = path to executable +# $2 = main pid of running program to be traced back. +# $3 = working directory +# +PNAME=`basename $1` +WD="$3" +case `uname -s` in +SunOS) + # + # See what debuggers are available on this platform. + # We need to to some tricks to find out as a which on + # a non existing binary gives: + # + # no in + # + # So we use the return code which is 0 when it finds + # somethings and 1 if not. + # + which gdb > /dev/null 2>&1 && GDB=`which gdb` || GDB='' + which dbx > /dev/null 2>&1 && DBX=`which dbx` || DBX='' + which mdb > /dev/null 2>&1 && MDB=`which mdb` || MDB='' + gcore -o ${WD}/${PNAME} $2 + if [ ! -z "${DBX}" ]; then + ${DBX} $1 $2 < @scriptdir@/btraceback.dbx > ${WD}/bacula.$2.traceback 2>&1 + elif [ ! -z "${GDB}" ]; then + ${GDB} -quiet -batch -x @scriptdir@/btraceback.gdb $1 $2 > ${WD}/bacula.$2.traceback 2>&1 + elif [ ! -z "${MDB}" ]; then + ${MDB} -u -p $2 < @scriptdir@/btraceback.mdb > ${WD}/bacula.$2.traceback 2>&1 + fi + PNAME="${PNAME} on `hostname`" + cat ${WD}/bacula.$2.traceback \ + | @sbindir@/bsmtp -h @smtp_host@ -f @dump_email@ -s "Bacula DBX traceback of ${PNAME}" @dump_email@ + ;; +*) + gdb -quiet -batch -x @scriptdir@/btraceback.gdb $1 $2 >${WD}/bacula.$2.traceback 2>&1 + PNAME="${PNAME} on `hostname`" + cat ${WD}/bacula.$2.traceback \ + | @sbindir@/bsmtp -h @smtp_host@ -f @dump_email@ -s "Bacula GDB traceback of ${PNAME}" @dump_email@ + ;; +esac diff --git a/scripts/btraceback.mdb b/scripts/btraceback.mdb new file mode 100644 index 00000000..c598f7ee --- /dev/null +++ b/scripts/btraceback.mdb @@ -0,0 +1,28 @@ +# btraceback.mdb +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +$G +::echo "******** RUNNING LWPS/THREADS:" +::echo +::walk thread + +::echo +::echo +::echo "******** STACK TRACE OF CURRENT LWP:" +::echo +$C + +::echo +::echo +::echo "******** VARIABLES DUMP OF CURRENT LWP:" +::echo + +::echo "******** STACK TRACE OF LWPS:" +::walk thread | ::findstack + +::echo "******** VARIABLES DUMP OF LWPS:" + +::quit diff --git a/scripts/defaultconfig b/scripts/defaultconfig new file mode 100755 index 00000000..92d0e186 --- /dev/null +++ b/scripts/defaultconfig @@ -0,0 +1,26 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This is a default configuration file for Bacula that +# sets reasonable defaults, and assumes that you do not +# have MySQL running. It will "install" Bacula into +# bin and etc in the current directory. +# + +CFLAGS="-g -Wall" \ + ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --with-pid-dir=$HOME/bacula/bin \ + --with-subsys-dir=$HOME/bacula/bin \ + --enable-smartalloc \ + --with-mysql=$HOME/mysql \ + --with-working-dir=$HOME/bacula/bin/working \ + --with-dump-email=root@localhost \ + --with-job-email=root@localhost \ + --with-smtp-host=localhost \ + --with-baseport=9101 + +exit 0 diff --git a/scripts/devel_bacula.in b/scripts/devel_bacula.in new file mode 100755 index 00000000..53e36c15 --- /dev/null +++ b/scripts/devel_bacula.in @@ -0,0 +1,280 @@ +#! /bin/sh +# +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# bacula This shell script takes care of starting and stopping +# the bacula daemons. +# +# It runs with different ports than the production version, +# and using the current development enviornment. +# +# This is pretty much watered down version of the RedHat script +# that works on Solaris as well as Linux, but it won't work everywhere. +# +# description: The Leading Open Source Backup Solution. +# + +PSCMD="@PSCMD@" +pwd=`pwd` + +BACDIRBIN=${pwd}/src/dird +BACDIRCFG=${pwd}/src/dird +BACFDBIN=${pwd}/src/filed +BACFDCFG=${pwd}/src/filed +BACSDBIN=${pwd}/src/stored +BACSDCFG=${pwd}/src/stored +PIDDIR=@piddir@ +SUBSYSDIR=@subsysdir@ + +# Use non-production ports +DIR_PORT=8101 +FD_PORT=8102 +SD_PORT=8103 + +DIR_USER=@dir_user@ +DIR_GROUP=@dir_group@ +FD_USER=@fd_user@ +FD_GROUP=@fd_group@ +SD_USER=@sd_user@ +SD_GROUP=@sd_group@ + +# A function to stop a program. +killproc() { + RC=0 + # Test syntax. + if [ $# = 0 ]; then + echo "Usage: killproc {program} [signal]" + return 1 + fi + + notset=0 + # check for third arg to be kill level + if [ "$3" != "" ] ; then + killlevel=$3 + else + notset=1 + killlevel="-9" + fi + + # Get base program name + base=`basename $1` + + # Find pid. + pid=`pidofproc $base $2` + + # Kill it. + if [ "$pid" != "" ] ; then + if [ "$notset" = "1" ] ; then + if ps -p $pid>/dev/null 2>&1; then + # TERM first, then KILL if not dead + kill -TERM $pid 2>/dev/null + sleep 1 + if ps -p $pid >/dev/null 2>&1 ; then + sleep 1 + if ps -p $pid >/dev/null 2>&1 ; then + sleep 3 + if ps -p $pid >/dev/null 2>&1 ; then + kill -KILL $pid 2>/dev/null + fi + fi + fi + fi + ps -p $pid >/dev/null 2>&1 + RC=$? + [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" + # RC=$((! $RC)) + # use specified level only + else + if ps -p $pid >/dev/null 2>&1; then + kill $killlevel $pid 2>/dev/null + RC=$? + [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" + fi + fi + else + failure "$base shutdown" + fi + # Remove pid file if any. + if [ "$notset" = "1" ]; then + rm -f ${PIDDIR}/$base.$2.pid + fi + return $RC +} + +# A function to find the pid of a program. +pidofproc() { + pid="" + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: pidofproc {program}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try PID file + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + fi + + # Next try "pidof" + if [ -x /sbin/pidof ] ; then + pid=`/sbin/pidof $1` + fi + if [ "$pid" != "" ] ; then + echo $pid + return 0 + fi + + # Finally try to extract it from ps + ${PSCMD} | grep $1 | awk '{ print $1 }' | tr '\n' ' ' + return 0 +} + +status() { + # Test syntax. + if [ $# = 0 ] ; then + echo "Usage: status {program}" + return 1 + fi + + # Get base program name + base=`basename $1` + + # First try "pidof" + if [ -x /sbin/pidof ] ; then + pid=`/sbin/pidof $1` + fi + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + else + pid=`${PSCMD} | awk 'BEGIN { prog=ARGV[1]; ARGC=1 } + { if ((prog == $2) || (("(" prog ")") == $2) || + (("[" prog "]") == $2) || + ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` + if [ "$pid" != "" ] ; then + echo "$base (pid $pid) is running..." + return 0 + fi + fi + + # Next try the PID files + if [ -f ${PIDDIR}/$base.$2.pid ] ; then + pid=`head -1 ${PIDDIR}/$base.$2.pid` + if [ "$pid" != "" ] ; then + echo "$base dead but pid file exists" + return 1 + fi + fi + # See if the subsys lock exists + if [ -f ${SUBSYSDIR}/$base ] ; then + echo "$base dead but subsys locked" + return 2 + fi + echo "$base is stopped" + return 3 +} + +success() { + return 0 +} + +failure() { + rc=$? + return $rc +} + +case "$1" in + start) + [ -x ${BACSDBIN}/bacula-sd ] && { + echo "Starting the Storage daemon" + OPTIONS='' + if [ "${SD_USER}" != '' ]; then + OPTIONS="${OPTIONS} -u ${SD_USER}" + fi + + if [ "${SD_GROUP}" != '' ]; then + OPTIONS="${OPTIONS} -g ${SD_GROUP}" + fi + + ${BACSDBIN}/bacula-sd $2 ${OPTIONS} -v -c ${BACSDCFG}/stored.conf + } + + [ -x ${BACFDBIN}/bacula-fd ] && { + echo "Starting the File daemon" + OPTIONS='' + if [ "${FD_USER}" != '' ]; then + OPTIONS="${OPTIONS} -u ${FD_USER}" + fi + + if [ "${FD_GROUP}" != '' ]; then + OPTIONS="${OPTIONS} -g ${FD_GROUP}" + fi + + ${BACFDBIN}/bacula-fd $2 ${OPTIONS} -v -c ${BACFDCFG}/filed.conf + } + + [ -x ${BACDIRBIN}/bacula-dir ] && { + sleep 2 + echo "Starting the Director daemon" + OPTIONS='' + if [ "${DIR_USER}" != '' ]; then + OPTIONS="${OPTIONS} -u ${DIR_USER}" + fi + + if [ "${DIR_GROUP}" != '' ]; then + OPTIONS="${OPTIONS} -g ${DIR_GROUP}" + fi + + if [ "${VALGRIND}" != '' ]; then + valgrind --leak-check=full ${BACDIRBIN}/bacula-dir $2 ${OPTIONS} -v -c ${BACDIRCFG}/dird.conf + else + ${BACDIRBIN}/bacula-dir $2 ${OPTIONS} -v -c ${BACDIRCFG}/dird.conf + fi + } + ;; + + stop) + # Stop the FD first so that SD will fail jobs and update catalog + [ -x ${BACFDBIN}/bacula-fd ] && { + echo "Stopping the File daemon" + killproc ${BACFDBIN}/bacula-fd ${FD_PORT} + } + + [ -x ${BACSDBIN}/bacula-sd ] && { + echo "Stopping the Storage daemon" + killproc ${BACSDBIN}/bacula-sd ${SD_PORT} + } + + [ -x ${BACDIRBIN}/bacula-dir ] && { + echo "Stopping the Director daemon" + killproc ${BACDIRBIN}/bacula-dir ${DIR_PORT} + } + echo + ;; + + restart) + $0 stop + sleep 5 + $0 start + ;; + + status) + [ -x ${BACSDBIN}/bacula-sd ] && status ${BACSDBIN}/bacula-sd ${SD_PORT} + [ -x ${BACFDBIN}/bacula-fd ] && status ${BACFDBIN}/bacula-fd ${FD_PORT} + [ -x ${BACDIRBIN}/bacula-dir ] && status ${BACDIRBIN}/bacula-dir ${DIR_PORT} + ;; + + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac +exit 0 diff --git a/scripts/disk-changer.in b/scripts/disk-changer.in new file mode 100644 index 00000000..cf5a9735 --- /dev/null +++ b/scripts/disk-changer.in @@ -0,0 +1,397 @@ +#!/bin/sh +# +# Bacula interface to virtual autoloader using disk storage +# +# Written by Kern Sibbald +# +# Bacula(R) - The Network Backup Solution +# +# Copyright (C) 2000-2016 Kern Sibbald +# +# The original author of Bacula is Kern Sibbald, with contributions +# from many others, a complete list can be found in the file AUTHORS. +# +# You may use this file and others of this release according to the +# license defined in the LICENSE file, which includes the Affero General +# Public License, v3.0 ("AGPLv3") and some additional permissions and +# terms pursuant to its AGPLv3 Section 7. +# +# This notice must be preserved when any source code is +# conveyed and/or propagated. +# +# Bacula(R) is a registered trademark of Kern Sibbald. +# If you set in your Device resource +# +# Changer Command = "path-to-this-script/disk-changer %c %o %S %a %d" +# you will have the following input to this script: +# +# So Bacula will always call with all the following arguments, even though +# in come cases, not all are used. Note, the Volume name is not always +# included. +# +# disk-changer "changer-device" "command" "slot" "archive-device" "drive-index" "volume" +# $1 $2 $3 $4 $5 $6 +# +# By default the autochanger has 10 Volumes and 1 Drive. +# +# Note: For this script to work, you *must" specify +# Device Type = File +# in each of the Devices associated with your AutoChanger resource. +# +# changer-device is the name of a file that overrides the default +# volumes and drives. It may have: +# maxslot=n where n is one based (default 10) +# maxdrive=m where m is zero based (default 1 -- i.e. 2 drives) +# +# This code can also simulate barcodes. You simply put +# a list of the slots and barcodes in the "base" directory/barcodes. +# See below for the base directory definition. Example of a +# barcodes file: +# /var/bacula/barcodes +# 1:Vol001 +# 2:Vol002 +# ... +# +# archive-device is the name of the base directory where you want the +# Volumes stored appended with /drive0 for the first drive; /drive1 +# for the second drive, ... For example, you might use +# /var/bacula/drive0 Note: you must not have a trailing slash, and +# the string (e.g. /drive0) must be unique, and it must not match +# any other part of the directory name. These restrictions could be +# easily removed by any clever script jockey. +# +# Full example: disk-changer /var/bacula/conf load 1 /var/bacula/drive0 0 TestVol001 +# +# The Volumes will be created with names slot1, slot2, slot3, ... maxslot in the +# base directory. In the above example the base directory is /var/bacula. +# However, as with tapes, their Bacula Volume names will be stored inside the +# Volume label. In addition to the Volumes (e.g. /var/bacula/slot1, +# /var/bacula/slot3, ...) this script will create a /var/bacula/loadedn +# file to keep track of what Slot is loaded. You should not change this file. +# +# Modified 8 June 2010 to accept Volume names from the calling program as arg 6. +# In this case, rather than storing the data in slotn, it is stored in the +# Volume name. Note: for this to work, Volume names may not include spaces. +# + +wd=@working_dir@ + +# +# log whats done +# +# to turn on logging, uncomment the following line +#touch $wd/disk-changer.log +# +dbgfile="$wd/disk-changer.log" +debug() { + if test -f $dbgfile; then + echo "`date +\"%Y%m%d-%H:%M:%S\"` $*" >> $dbgfile + fi +} + + +# +# Create a temporary file +# +make_temp_file() { + TMPFILE=`mktemp -t mtx.XXXXXXXXXX` + if test x${TMPFILE} = x; then + TMPFILE="$wd/disk-changer.$$" + if test -f ${TMPFILE}; then + echo "Temp file security problem on: ${TMPFILE}" + exit 1 + fi + fi +} + +# check parameter count on commandline +# +check_parm_count() { + pCount=$1 + pCountNeed=$2 + if test $pCount -lt $pCountNeed; then + echo "usage: disk-changer ctl-device command [slot archive-device drive-index]" + echo " Insufficient number of arguments arguments given." + if test $pCount -lt 2; then + echo " Mimimum usage is first two arguments ..." + else + echo " Command expected $pCountNeed arguments" + fi + exit 1 + fi +} + +# +# Strip off the final name in order to get the Directory ($dir) +# that we are dealing with. +# +get_dir() { + bn=`basename $device` + dir=`echo "$device" | sed -e s%/$bn%%g` + if [ ! -d $dir ]; then + echo "ERROR: Autochanger directory \"$dir\" does not exist." + echo " You must create it." + exit 1 + fi +} + +# +# Get the Volume name from the call line, or directly from +# the volslotn information. +# +get_vol() { + havevol=0 + debug "vol=$volume" + if test "x$volume" != x && test "x$volume" != "x*NONE*" ; then + debug "touching $dir/$volume" + touch $dir/$volume + echo "$volume" >$dir/volslot${slot} + havevol=1 + elif [ -f $dir/volslot${slot} ]; then + volume=`cat $dir/volslot${slot}` + havevol=1 + fi +} + + +# Setup arguments +ctl=$1 +cmd="$2" +slot=$3 +device=$4 +drive=$5 +volume=$6 + +# set defaults +maxdrive=1 +maxslot=10 + +# Pull in conf file +if [ -f $ctl ]; then + . $ctl +fi + + +# Check for special cases where only 2 arguments are needed, +# all others are a minimum of 5 +# +case $2 in + list|listall) + check_parm_count $# 2 + ;; + slots) + check_parm_count $# 2 + ;; + transfer) + check_parm_count $# 4 + if [ $slot -gt $maxslot ]; then + echo "Slot ($slot) out of range (1-$maxslot)" + debug "Error: Slot ($slot) out of range (1-$maxslot)" + exit 1 + fi + ;; + *) + check_parm_count $# 5 + if [ $drive -gt $maxdrive ]; then + echo "Drive ($drive) out of range (0-$maxdrive)" + debug "Error: Drive ($drive) out of range (0-$maxdrive)" + exit 1 + fi + if [ $slot -gt $maxslot ]; then + echo "Slot ($slot) out of range (1-$maxslot)" + debug "Error: Slot ($slot) out of range (1-$maxslot)" + exit 1 + fi + ;; +esac + + +debug "Parms: $ctl $cmd $slot $device $drive $volume $havevol" + +case $cmd in + unload) + debug "Doing disk -f $ctl unload $slot $device $drive $volume" + get_dir + if [ -f $dir/loaded${drive} ]; then + ld=`cat $dir/loaded${drive}` + else + echo "Storage Element $slot is Already Full" + debug "Unload error: $dir/loaded${drive} is already unloaded" + exit 1 + fi + if [ $slot -eq $ld ]; then + echo "0" >$dir/loaded${drive} + unlink $device 2>/dev/null >/dev/null + unlink ${device}.add 2>/dev/null >/dev/null + rm -f ${device} ${device}.add + else + echo "Storage Element $slot is Already Full" + debug "Unload error: $dir/loaded${drive} slot=$ld is already unloaded" + exit 1 + fi + ;; + + load) + debug "Doing disk $ctl load $slot $device $drive $volume" + get_dir + i=0 + # Check if slot already in a drive + while [ $i -le $maxdrive ]; do + if [ -f $dir/loaded${i} ]; then + ld=`cat $dir/loaded${i}` + else + ld=0 + fi + if [ $ld -eq $slot ]; then + echo "Drive ${i} Full (Storage element ${ld} loaded)" + debug "Load error: Cannot load Slot=${ld} in drive=$drive. Already in drive=${i}" + exit 1 + fi + i=`expr $i + 1` + done + # Check if we have a Volume name + get_vol + if [ $havevol -eq 0 ]; then + # check if slot exists + if [ ! -f $dir/slot${slot} ] ; then + echo "source Element Address $slot is Empty" + debug "Load error: source Element Address $slot is Empty" + exit 1 + fi + fi + if [ -f $dir/loaded${drive} ]; then + ld=`cat $dir/loaded${drive}` + else + ld=0 + fi + if [ $ld -ne 0 ]; then + echo "Drive ${drive} Full (Storage element ${ld} loaded)" + echo "Load error: Drive ${drive} Full (Storage element ${ld} loaded)" + exit 1 + fi + echo "0" >$dir/loaded${drive} + unlink $device 2>/dev/null >/dev/null + unlink ${device}.add 2>/dev/null >/dev/null + rm -f ${device} ${device}.add + if [ $havevol -ne 0 ]; then + ln -s $dir/$volume $device + ln -s $dir/${volume}.add ${device}.add + rtn=$? + else + ln -s $dir/slot${slot} $device + ln -s $dir/slot${slot}.add ${device}.add + rtn=$? + fi + if [ $rtn -eq 0 ]; then + echo $slot >$dir/loaded${drive} + fi + exit $rtn + ;; + + list) + debug "Doing disk -f $ctl -- to list volumes" + get_dir + if [ -f $dir/barcodes ]; then + cat $dir/barcodes + else + i=1 + while [ $i -le $maxslot ]; do + slot=$i + volume= + get_vol + if [ $havevol -eq 0 ]; then + echo "$i:" + else + echo "$i:$volume" + fi + i=`expr $i + 1` + done + fi + exit 0 + ;; + + listall) + # ***FIXME*** must add new Volume stuff + make_temp_file + debug "Doing disk -f $ctl -- to list volumes" + get_dir + if [ ! -f $dir/barcodes ]; then + exit 0 + fi + + # we print drive content seen by autochanger + # and we also remove loaded media from the barcode list + i=0 + while [ $i -le $maxdrive ]; do + if [ -f $dir/loaded${i} ]; then + ld=`cat $dir/loaded${i}` + v=`awk -F: "/^$ld:/"' { print $2 }' $dir/barcodes` + echo "D:$i:F:$ld:$v" + echo "^$ld:" >> $TMPFILE + fi + i=`expr $i + 1` + done + + # Empty slots are not in barcodes file + # When we detect a gap, we print missing rows as empty + # At the end, we fill the gap between the last entry and maxslot + grep -v -f $TMPFILE $dir/barcodes | sort -n | \ + perl -ne 'BEGIN { $cur=1 } + if (/(\d+):(.+)?/) { + if ($cur == $1) { + print "S:$1:F:$2\n" + } else { + while ($cur < $1) { + print "S:$cur:E\n"; + $cur++; + } + } + $cur++; + } + END { while ($cur < '"$maxslot"') { print "S:$cur:E\n"; $cur++; } } ' + + rm -f $TMPFILE + exit 0 + ;; + transfer) + # ***FIXME*** must add new Volume stuff + get_dir + make_temp_file + slotdest=$device + if [ -f $dir/slot{$slotdest} ]; then + echo "destination Element Address $slot is Full" + exit 1 + fi + if [ ! -f $dir/slot${slot} ] ; then + echo "source Element Address $slot is Empty" + exit 1 + fi + + echo "Transfering $slot to $slotdest" + mv $dir/slot${slot} $dir/slot{$slotdest} + mv $dir/slot${slot}.add $dir/slot{$slotdest}.add + + if [ -f $dir/barcodes ]; then + sed "s/^$slot:/$slotdest:/" > $TMPFILE + sort -n $TMPFILE > $dir/barcodes + fi + exit 0 + ;; + loaded) + debug "Doing disk -f $ctl $drive -- to find what is loaded" + get_dir + if [ -f $dir/loaded${drive} ]; then + a=`cat $dir/loaded${drive}` + else + a="0" + fi + debug "Loaded: drive=$drive is $a" + echo $a + exit + ;; + + slots) + debug "Doing disk -f $ctl -- to get count of slots" + echo $maxslot + ;; +esac diff --git a/scripts/filetype.vim b/scripts/filetype.vim new file mode 100644 index 00000000..25eeacbf --- /dev/null +++ b/scripts/filetype.vim @@ -0,0 +1,7 @@ +" put this file to $HOME/.vim +if exists("have_load_filetypes") + finish +endif +augroup filetypedetect + au! BufRead,BufNewFile bacula-dir.conf setfiletype bacula +augroup END diff --git a/scripts/freespace b/scripts/freespace new file mode 100755 index 00000000..5a283912 --- /dev/null +++ b/scripts/freespace @@ -0,0 +1,34 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Bacula interface to get device freespace +# +# If you set in your Device resource +# +# Free Space Command = "path-to-this-script/freespace %a" +# you will have the following input to this script: +# +# +# freespace "archive-device" +# $1 +# + +OS=`uname` +case ${OS} in + SunOS) + cmd="/bin/df -P" + ;; + FreeBSD) + cmd="/bin/df -P" + ;; + Linux) + cmd="/bin/df -P" + ;; + *) + cmd="/bin/df -P" + ;; +esac + +$cmd $1 | tail -1 | awk '{ print $4 }' diff --git a/scripts/isworm b/scripts/isworm new file mode 100755 index 00000000..ba64292b --- /dev/null +++ b/scripts/isworm @@ -0,0 +1,84 @@ +#!/bin/sh +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Bacula interface to get worm status of tape +# +# isworm %l (control device name) +# +# Typical output: +# sdparm --page=0x1D -f /dev/sg0 +# /dev/st0: HP Ultrium 5-SCSI I5AW [tape] +# Medium configuration (SSC) mode page: +# WORMM 1 [cha: n, def: 1, sav: 1] +# WMLR 1 [cha: n, def: 1, sav: 1] +# WMFR 2 [cha: n, def: 2, sav: 2] +# +# Where WORMM is worm mode +# WMLR is worm mode label restrictions +# 0 - No blocks can be overwritten +# 1 - Some types of format labels may not be overwritten +# 2 - All format labels can be overwritten +# WMFR is worm mode filemark restrictions +# 0-1 - Reserved +# 2 - Any number of filemarks immediately preceding EOD can be +# overwritten except file mark closest to BOP (beginning of +# partition). +# 3 - Any number of filemarks immediately preceding the EOD +# can be overwritten +# 4-FF - Reserved +# + +if [ x$1 = x ] ; then + echo "First argument missing. Must be device control name." + exit 1 +fi + +sdparm=`which sdparm` +if [ x${sdparm} = x ] ; then + echo "sdparm program not found, but is required." + exit 0 +fi + +# +# This should be the correct way to determine if the tape is WORM +# but it does not work for mhvtl. Comment out the next 5 lines +# and the code that follows will detect correctly on mhtvl. +# +worm=`$sdparm --page=0x1D -f $1 |grep " *WORMM"|cut -b12-16|sed "s:^ *::"` +if [ $? = 0 ] ; then + echo $worm + exit 0 +fi + +tapeinfo=`which tapeinfo` +if [ x${tapeinfo} = x ] ; then + echo "tapeinfo program not found, but is required." + exit 1 +fi + +# +# Unfortunately IBM and HP handle the Medium Type differently, +# so we detect the vendor and get the appropriate Worm flag. +# +vendor=`$tapeinfo -f $1|grep "^Vendor ID:"|cut -b13-15` +if [ x$vendor = xHP ] ; then + worm=`$tapeinfo -f $1|grep "^Medium Type: 0x"|cut -b16-16` + echo $worm + exit 0 +fi + +if [ x$vendor = xIBM ] ; then + worm=`$tapeinfo -f $1|grep "^Medium Type: 0x"|cut -b17-17` + if [ x$worm = xc ]; then + echo "1" + exit 0 + fi + if [ x$worm = xC ]; then + echo "1" + exit 0 + fi +fi +echo "0" +exit 0 diff --git a/scripts/logrotate.in b/scripts/logrotate.in new file mode 100644 index 00000000..825666fd --- /dev/null +++ b/scripts/logrotate.in @@ -0,0 +1,18 @@ +# +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +# If you are appending to a log file (default), to +# have your log file compressed, rotated, and after a time +# deleted, after possibly editing the values below, +# copy this file to: +# +# /etc/logrotate.d/bacula +# +@logdir@/bacula.log { + monthly + rotate 5 + notifempty + missingok +} diff --git a/scripts/logwatch/Makefile.in b/scripts/logwatch/Makefile.in new file mode 100644 index 00000000..ae94edbe --- /dev/null +++ b/scripts/logwatch/Makefile.in @@ -0,0 +1,32 @@ +# Makefile to install logwatch script +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# 08 Jan 2005 D. Scott Barninger +# + +ifeq ("$(shell test -d /etc/log.d)",0) + SYSCONFDIR=/etc/log.d + SCRIPTDIR=/etc/log.d +else + SYSCONFDIR=/etc/logwatch + SCRIPTDIR=/usr/share/logwatch +endif + +INSTALL=@INSTALL@ + +all: install + +install: + + $(INSTALL) -m 755 bacula $(DESTDIR)$(SCRIPTDIR)/scripts/services/bacula + $(INSTALL) -m 755 applybaculadate $(DESTDIR)$(SCRIPTDIR)/scripts/shared/applybaculadate + $(INSTALL) -m 644 logfile.bacula.conf $(DESTDIR)$(SYSCONFDIR)/conf/logfiles/bacula.conf + $(INSTALL) -m 644 services.bacula.conf $(DESTDIR)$(SYSCONFDIR)/conf/services/bacula.conf + +uninstall: + rm -f $(DESTDIR)$(SCRIPTDIR)/scripts/services/bacula + rm -f $(DESTDIR)$(SCRIPTDIR)/scripts/shared/applybaculadate + rm -f $(DESTDIR)$(SYSCONFDIR)/conf/logfiles/bacula.conf + rm -f $(DESTDIR)$(SYSCONFDIR)/conf/services/bacula.conf diff --git a/scripts/logwatch/README b/scripts/logwatch/README new file mode 100644 index 00000000..96f68b3b --- /dev/null +++ b/scripts/logwatch/README @@ -0,0 +1,43 @@ +Installation instructions for bacula logwatch script +04 Sep 2005 + +Installation into a standard logwatch configuration is fairly +straightforward. Run 'make install' as root from this directory. + +For manual install copy the files as indicated below: + +cp -p scripts/logwatch/bacula /etc/log.d/scripts/services/bacula +cp -p scripts/logwatch/applybacula /etc/log.d/scripts/shared/applybaculadate +cp -p scripts/logwatch/logfile.bacula.conf /etc/log.d/conf/logfiles/bacula.conf +cp -p scripts/logwatch/services.bacula.conf /etc/log.d/conf/services/bacula.conf +chmod 755 /etc/log.d/scripts/services/bacula +chmod 755 /etc/log.d/scripts/shared/applybaculadate +chmod 644 /etc/log.d/conf/logfiles/bacula.conf +chmod 644 /etc/log.d/conf/services/bacula.conf + +To test your installation run logwatch (as root): +/etc/log.d/logwatch + +The following is the kind of output you could expect to be added to the +standard logwatch listing: + + --------------------- bacula Begin ------------------------ + +Jobs Run: +2005-09-02 2012 backupNightlySave.2005-09-02_01.05.00 + BackupOK + +2005-09-02 2013 scottNightlySave.2005-09-02_01.05.01 + BackupOK + +2005-09-02 2014 marthaNightlySave.2005-09-02_01.05.02 + BackupOK + +2005-09-02 2015 lyndaNightlySave.2005-09-02_01.05.03 + BackupOK + +2005-09-02 2016 backupBackupCatalog.2005-09-02_01.10.00 + BackupOK + + + ---------------------- bacula End ------------------------- diff --git a/scripts/logwatch/applybaculadate b/scripts/logwatch/applybaculadate new file mode 100755 index 00000000..ec7fe60e --- /dev/null +++ b/scripts/logwatch/applybaculadate @@ -0,0 +1,46 @@ +#!/usr/bin/perl + +######################################################################## +## Copyright (c) 2009 Sigma Consulting Services Limited +## v1.00 2009/06/21 16:54:23 Ian McMichael +## +## This program is free software: you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation, either version 2 of the License, or +## any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with this program. If not, see . +######################################################################## + +use Logwatch ':dates'; + +my $Debug = $ENV{'LOGWATCH_DEBUG'} || 0; + +$SearchDate = TimeFilter('%d-%b %H:%M'); + +if ( $Debug > 5 ) { + print STDERR "DEBUG: Inside ApplyBaculaDate...\n"; + print STDERR "DEBUG: Looking For: " . $SearchDate . "\n"; +} + +my $OutputLine = 0; + +while (defined($ThisLine = )) { + if ($ThisLine =~ m/^$SearchDate /o) { + $OutputLine = 1; + } elsif ($ThisLine !~ m/^\s+/o) { + $OutputLine = 0; + } + + if ($OutputLine) { + print $ThisLine; + } +} + +# vi: shiftwidth=3 syntax=perl tabstop=3 et diff --git a/scripts/logwatch/bacula b/scripts/logwatch/bacula new file mode 100755 index 00000000..67eb321a --- /dev/null +++ b/scripts/logwatch/bacula @@ -0,0 +1,65 @@ +#!/usr/bin/perl -w +# +# logwatch filter script for bacula log files +# +# Mon Jan 03 2005 +# D. Scott Barninger and Karl Cunningham +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS + +use strict; +use POSIX qw(strftime); + +my (@JobName,$JobStatus,$ThisName,$ThisStatus,$ThisDate); +my ($Job,$JobId,$JobDate,$Debug,$DebugCounter,%data); + +# set debug level +$Debug = $ENV{'LOGWATCH_DEBUG'} || 0; + +if ( $Debug >= 5 ) { + print STDERR "\n\nDEBUG: Inside Bacula Filter \n\n"; + $DebugCounter = 1; +} + +while () { + chomp; + if ( $Debug >= 5 ) { + print STDERR "DEBUG($DebugCounter): $_\n"; + $DebugCounter++; + } + + # Test the line for a new entry, which is a jobid record + if (/^\s*JobId:\s+(\d+)/) { + $JobId = $1; + next; + } + + (/^\s*Job:\s*(.*)/) and $Job = $1; + (/^\s*Termination:\s*(.*)/) and $JobStatus = $1; + (/^\s*Job:.*(\d{4}-\d{2}-\d{2})/) and $JobDate = $1; + + if ($JobId and $Job and $JobStatus and $JobDate) { + $data{$JobId} = { + "Job" => $Job, + "JobStatus" => $JobStatus, + "JobDate" => $JobDate, + }; + $JobId = $Job = $JobStatus = $JobDate = ""; + } +} + +# if we have data print it out, otherwise do nothing +if (scalar(keys(%data))) { + print "\nJobs Run:\n"; + foreach my $Id (sort {$a<=>$b} (keys(%data))) { + $ThisName = $data{$Id}{Job}; + $ThisStatus = $data{$Id}{JobStatus}; + $ThisDate = $data{$Id}{JobDate}; + $ThisName =~ s/\s//g; + $ThisStatus =~ s/\s//g; + print "$ThisDate $Id $ThisName\n $ThisStatus\n\n"; + } +} + +exit(0); diff --git a/scripts/logwatch/logfile.bacula.conf.in b/scripts/logwatch/logfile.bacula.conf.in new file mode 100644 index 00000000..22b6e985 --- /dev/null +++ b/scripts/logwatch/logfile.bacula.conf.in @@ -0,0 +1,3 @@ +# What actual file? Defaults to LogPath if not absolute path.... +LogFile = @logdir@/bacula.log + diff --git a/scripts/logwatch/services.bacula.conf b/scripts/logwatch/services.bacula.conf new file mode 100644 index 00000000..b0b1d814 --- /dev/null +++ b/scripts/logwatch/services.bacula.conf @@ -0,0 +1,6 @@ +Title = "bacula" + +# Which logfile group... +LogFile = bacula + +*ApplyBaculaDate = diff --git a/scripts/magic.bacula b/scripts/magic.bacula new file mode 100644 index 00000000..f7465651 --- /dev/null +++ b/scripts/magic.bacula @@ -0,0 +1,11 @@ +### Old Bacula volume +12 string BB01 old Bacula volume data, +>20 bedate x session time %s + +### Current Bacula volume +12 string BB02 Bacula volume data, +>20 bedate x session time %s, +>24 belong -1 PRE_LABEL, +>24 belong -2 VOL_LABEL, +>>57 belong x version %d, +>>93 string x name '%s' diff --git a/scripts/magic.bacula.txt b/scripts/magic.bacula.txt new file mode 100644 index 00000000..636d6b88 --- /dev/null +++ b/scripts/magic.bacula.txt @@ -0,0 +1,17 @@ +The file magic.bacula can be added to your system files so that the +file command will recognize Bacula Volumes as such. + +If I understand this correctly, you add that to the database of +"magic" that the command file uses. + +See 'man 1 file' and 'man 5 magic'. On my system, there are +many ways to add to the magic. For testing, you'd add it to +/etc/magic, usually. + +This is an interesting contribution that might also be +relevant to the maintainers of magic, IMO. + +Explanation by: Arno + +magic.bacula submitted in bug #715 by "shattered". + diff --git a/scripts/manual_prune.pl b/scripts/manual_prune.pl new file mode 100755 index 00000000..d01dbbfd --- /dev/null +++ b/scripts/manual_prune.pl @@ -0,0 +1,251 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +use strict; + +=head1 DESCRIPTION + + manual_prune.pl -- prune volumes + +=head2 USAGE + + manual_prune.pl [--bconsole=/path/to/bconsole] [--help] [--doprune] [--expired] [--fixerror] [--fileprune] + + This program when run will manually prune all Volumes that it finds + in your Bacula catalog. It will respect all the Retention periods. + + manual_prune must have access to bconsole. It will execute bconsole + from /opt/bacula/bin. If bconsole is in a different location, + you must specify the path to it with the --bconsole=... option. + + If you do not add --doprune, you will see what the script proposes + to do, but it will not prune. + + If you add --fixerror, it will change the status of any Volume + that is marked Error to Used so that it will be pruned. + + If you add --expired, it will attempt to prune only those + Volumes where the Volume Retention period has expired. + + If you use --fileprune, the script will prune files and pathvisibility + useful to avoid blocking Bacula during pruning. + + Adding --debug will print additional debug information. + + +=head1 LICENSE + + Copyright (C) 2008-2014 Bacula Systems SA + + Bacula(R) is a registered trademark of Kern Sibbald. + The licensor of Bacula Enterprise is Bacula Systems SA, + Rue Galilee 5, 1400 Yverdon-les-Bains, Switzerland. + + This file has been made available for your personal use in the + hopes that it will allow community users to make better use of + Bacula. + +=head1 VERSION + + 1.4 + +=cut + +use Getopt::Long qw/:config no_ignore_case/; +use Pod::Usage; +use File::Temp; + +my $help; +my $do_prune; +my $expired; +my $debug; +# set to your bconsole prog +my $bconsole = "/opt/bacula/bin/bconsole"; +my $do_fix; +my $do_file_prune; + +GetOptions('help' => \$help, + 'bconsole=s' => \$bconsole, + 'expired' => \$expired, + 'debug' => \$debug, + 'fixerror' => \$do_fix, + 'fileprune' => \$do_file_prune, + 'doprune' => \$do_prune) + || Pod::Usage::pod2usage(-exitval => 2, -verbose => 2) ; + +if ($help) { + Pod::Usage::pod2usage(-exitval => 2, -verbose => 2) ; +} + +if (! -x $bconsole) { + die "Can't exec $bconsole, please specify --bconsole option $!"; +} + +my @vol; +my @vol_purged; + +# This fix can work with File based device. Don't use it for Tape media +if ($do_fix) { + my ($fh, $file) = File::Temp::tempfile(); + print $fh "sql +SELECT VolumeName AS \"?vol?\" FROM Media WHERE VolStatus = 'Error'; + +"; + close($fh); + open(FP, "cat $file | $bconsole|") or die "Can't open $bconsole (ERR=$!), adjust your \$PATH"; + while (my $l = ) + { + if ($l =~ /^\s*\|\s*([\w\d:\. \-]+?)\s*\|/) { + if ($debug) { + print $l; + } + push @vol, $1; + } + } + close(FP); + unlink($file); + + if (scalar(@vol) > 0) { + print "Will try to fix volume in Error: ", join(",", @vol), "\n"; + open(FP, "|$bconsole") or die "Can't send commands to $bconsole"; + print FP map { "update volume=$_ volstatus=Used\n" } @vol; + close(FP); + @vol = (); + } +} + +if ($do_file_prune) { + my ($fh, $file) = File::Temp::tempfile(); + if ($do_prune) { + print $fh "sql +BEGIN; +CREATE TEMPORARY TABLE temp AS +SELECT DISTINCT JobId FROM Job JOIN JobMedia USING (JobId) JOIN +(SELECT Media.MediaId AS MediaId +FROM Media +WHERE VolStatus IN ('Full', 'Used') + AND ( (Media.LastWritten) + + interval '1 second' * (Media.VolRetention) + ) < NOW()) AS M USING (MediaId) + WHERE Job.JobFiles > 50000 AND Job.PurgedFiles=0; +SELECT JobId FROM temp; +DELETE FROM File WHERE JobId IN (SELECT JobId FROM temp); +DELETE FROM PathVisibility WHERE JobId IN (SELECT JobId FROM temp); +UPDATE Job SET PurgedFiles=1 WHERE JobId IN (SELECT JobId FROM temp); +DROP TABLE temp; +COMMIT; + +quit +"; + } else { + print $fh "sql +SELECT DISTINCT JobId FROM Job JOIN JobMedia USING (JobId) JOIN +(SELECT Media.MediaId AS MediaId +FROM Media +WHERE VolStatus IN ('Full', 'Used') + AND ( (Media.LastWritten) + + interval '1 second' * (Media.VolRetention) + ) < NOW()) AS M USING (MediaId) + WHERE Job.JobFiles > 50000 AND Job.PurgedFiles=0; + +quit +"; + } + close($fh); + open(FP, "cat $file | $bconsole|") or die "Can't open $bconsole (ERR=$!), adjust your \$PATH"; + while (my $l = ) + { + if ($debug || !$do_prune) { + print $l; + } + } + close(FP); + unlink($file); + exit 0; +} + +# TODO: Fix it for SQLite +# works only for postgresql and MySQL at the moment +# One of the two query will fail, but it's not a problem +if ($expired) { + my ($fh, $file) = File::Temp::tempfile(); + print $fh "sql +SELECT Media.VolumeName AS volumename, + Media.LastWritten AS lastwritten, + ( + (Media.LastWritten) + + interval '1 second' * (Media.VolRetention) + ) AS expire +FROM Media +WHERE VolStatus IN ('Full', 'Used') + AND ( (Media.LastWritten) + + interval '1 second' * (Media.VolRetention) + ) < NOW(); +SELECT Media.VolumeName AS volumename, + Media.LastWritten AS lastwritten, + ( + Media.LastWritten + Media.VolRetention + ) AS expire +FROM Media +WHERE VolStatus IN ('Full', 'Used') + AND ( Media.LastWritten + Media.VolRetention + ) < NOW(); + +quit +"; + close($fh); + open(FP, "cat $file | $bconsole|") or die "Can't open $bconsole (ERR=$!), adjust your \$PATH"; + while (my $l = ) + { + # | TestVolume001 | 2011-06-17 14:36:59 | 2011-06-17 14:37:00 + if ($l =~ /^\s*\|\s*([\w\d:\. \-]+?)\s*\|\s*\d/) { + if ($debug) { + print $l; + } + push @vol, $1; + } + } + close(FP); + unlink($file); + +} else { + + open(FP, "echo list volumes | $bconsole|") or die "Can't open $bconsole (ERR=$!), adjust your \$PATH"; + while (my $l = ) + { + # | 1 | TestVolume001 | Used + if ($l =~ /^\s*\|\s*[\d,]+\s*\|\s*([\w\d-]+)\s*\|\s*Used/) { + push @vol, $1; + } + if ($l =~ /^\s*\|\s*[\d,]+\s*\|\s*([\w\d-]+)\s*\|\s*Full/) { + push @vol, $1; + } + if ($l =~ /^\s*\|\s*[\d,]+\s*\|\s*([\w\d-]+)\s*\|\s*Purged/) { + push @vol_purged, $1; + } + } + close(FP); + + if ($? != 0) { + system("echo list volumes | $bconsole"); + die "bconsole returns a non zero status, please check that you can execute it"; + + } +} + +if (!scalar(@vol)) { + print "No Volume(s) found to prune.\n"; + +} else { + if ($do_prune) { + print "Attempting to to prune ", join(",", @vol), "\n"; + open(FP, "|$bconsole") or die "Can't send commands to $bconsole"; + print FP map { "prune volume=$_ yes\n" } @vol; + close(FP); + } else { + print "Would have attempted to prune ", join(",", @vol), "\n"; + print "You can actually prune by specifying the --doprune option.\n" + } +} diff --git a/scripts/mtx-changer.conf b/scripts/mtx-changer.conf new file mode 100644 index 00000000..981efb9e --- /dev/null +++ b/scripts/mtx-changer.conf @@ -0,0 +1,89 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +# This file is sourced by the mtx-changer script every time it runs. +# You can put your site customization here, and when you do an +# upgrade, the process should not modify this file. Thus you +# preserve your mtx-changer configuration. +# + +# We update the version when an incompatible change +# to mtx-changer or this conf file is made, such as +# adding a new required variable. +version=2 + +# Set to 1 if you want to do offline before unload +offline=0 + +# Set to amount of time in seconds to wait after an offline +offline_sleep=0 + +# Set to amount of time in seconds to wait after a load +load_sleep=0 + +# Set to 1 to do an inventory before a status. Not normally needed. +inventory=0 + +# If you have a VXA PacketLoader, it might display a different +# Storage Element line, so try setting the following to 1 +vxa_packetloader=0 + +# +# Debug logging +# + +# If you have multiple SD's, set this differently for each one +# so you know which message comes from which one. This can +# be any string, and will appear in each debug message just +# after the time stamp. +chgr_id=0 + +# Set to 1 if you want debug info written to a log +debug_log=0 + +# Set to debug level you want to see +# 0 is off +# 10 is important events (load, unload, loaded) +# 100 is everything +# Note debug_log must be set to 1 for anything to be generated +# +debug_level=10 + +# Debug levels by importance +# Normally you do not need to change this +dbglvl=100 +# More important messages +idbglvl=10 + +# +# mt status output +# SunOS No Additional Sense +# FreeBSD Current Driver State: at rest. +# Linux ONLINE +# Note Debian has a different mt than the standard Linux version. +# When no tape is in the drive it waits 2 minutes. +# When a tape is in the drive, it prints user unfriendly output. +# Note, with Ubuntu Gusty (8.04), there are two versions of mt, +# so we attempt to figure out which one. +# + +OS=`uname` +case ${OS} in + SunOS) + ready="No Additional Sense" + ;; + FreeBSD) + ready="Current Driver State: at rest." + ;; + Linux) + ready="ONLINE" + if test -f /etc/debian_version ; then + mt --version|grep "mt-st" >/dev/null 2>&1 + if test $? -eq 1 ; then + ready="drive status" + fi + fi + ;; +esac diff --git a/scripts/mtx-changer.in b/scripts/mtx-changer.in new file mode 100644 index 00000000..f1dc2d31 --- /dev/null +++ b/scripts/mtx-changer.in @@ -0,0 +1,353 @@ +#!/bin/sh +# +# Bacula(R) - The Network Backup Solution +# +# Copyright (C) 2000-2016 Kern Sibbald +# +# The original author of Bacula is Kern Sibbald, with contributions +# from many others, a complete list can be found in the file AUTHORS. +# +# You may use this file and others of this release according to the +# license defined in the LICENSE file, which includes the Affero General +# Public License, v3.0 ("AGPLv3") and some additional permissions and +# terms pursuant to its AGPLv3 Section 7. +# +# This notice must be preserved when any source code is +# conveyed and/or propagated. +# +# Bacula(R) is a registered trademark of Kern Sibbald. +# +# If you set in your Device resource +# +# Changer Command = "path-to-this-script/mtx-changer %c %o %S %a %d" +# you will have the following input to this script: +# +# So Bacula will always call with all the following arguments, even though +# in come cases, not all are used. +# +# mtx-changer "changer-device" "command" "slot" "archive-device" "drive-index" +# $1 $2 $3 $4 $5 +# +# for example: +# +# mtx-changer /dev/sg0 load 1 /dev/nst0 0 (on a Linux system) +# +# will request to load the first cartidge into drive 0, where +# the SCSI control channel is /dev/sg0, and the read/write device +# is /dev/nst0. +# +# The commands are: +# Command Function +# unload unload a given slot +# load load a given slot +# loaded which slot is loaded? +# list list Volume names (requires barcode reader) +# slots how many slots total? +# listall list all info +# transfer +# +# Slots are numbered from 1 ... +# Drives are numbered from 0 ... +# +# +# If you need to an offline, refer to the drive as $4 +# e.g. mt -f $4 offline +# +# Many changers need an offline after the unload. Also many +# changers need a sleep 60 after the mtx load. +# +# N.B. If you change the script, take care to return either +# the mtx exit code or a 0. If the script exits with a non-zero +# exit code, Bacula will assume the request failed. +# + +# myversion must be the same as version in mtx-changer.conf +myversion=2 + +# source our conf file +if test ! -f @scriptdir@/mtx-changer.conf ; then + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "ERROR: @scriptdir@/mtx-changer.conf file not found!!!!" + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + exit 1 +fi +. @scriptdir@/mtx-changer.conf + +if test "${version}" != "${myversion}" ; then + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "ERROR: @scriptdir@/mtx-changer.conf has wrong version. Wanted ${myversion}, got ${version} !!!" + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + exit 1 +fi + +MTX=@MTX@ + +if test ${debug_log} -ne 0 ; then + touch @working_dir@/mtx.log +fi +dbgfile="@working_dir@/mtx.log" +debug() { + if test -f $dbgfile -a ${debug_level} -ge $1; then + echo "`date +%m%d-%H:%M:%S.%N|cut -c1-16` ${chgr_id} $2" >> $dbgfile + fi +} + + +# +# Create a temporary file +# +make_temp_file() { + TMPFILE=`mktemp @working_dir@/mtx.XXXXXXXXXX` + if test x${TMPFILE} = x; then + TMPFILE="@working_dir@/mtx.$$" + if test -f ${TMPFILE}; then + echo "ERROR: Temp file security problem on: ${TMPFILE}" + exit 1 + fi + fi +} + +# +# Create a temporary file for stderr +# +# Note, this file is used because sometime mtx emits +# unexpected error messages followed by the output +# expected during success. +# So we separate STDOUT and STDERR in +# certain of the mtx commands. The contents of STDERR +# is then printed after the STDOUT produced by mtx +# thus we sometimes get better changer results. +# +make_err_file() { + ERRFILE=`mktemp @working_dir@/mtx.err.XXXXXXXXXX` + if test x${ERRFILE} = x; then + ERRFILE="@working_dir@/mtx.err.$$" + if test -f ${ERRFILE}; then + echo "ERROR: Temp file security problem on: ${ERRFILE}" + exit 1 + fi + fi +} + + +# +# The purpose of this function to wait a maximum +# time for the drive. It will +# return as soon as the drive is ready, or after +# waiting a maximum of 300 seconds. +# Note, this is very system dependent, so if you are +# not running on Linux, you will probably need to +# re-write it, or at least change the grep target. +# We've attempted to get the appropriate OS grep targets +# in the code at the top of this script. +# +wait_for_drive() { + i=0 + while [ $i -le 300 ]; do # Wait max 300 seconds + if mt -f $1 status 2>&1 | grep "${ready}" >/dev/null 2>&1; then + break + fi + debug $dbglvl "Device $1 - not ready, retrying..." + sleep 1 + i=`expr $i + 1` + done +} + +# check parameter count on commandline +# +check_parm_count() { + pCount=$1 + pCountNeed=$2 + if test $pCount -lt $pCountNeed; then + echo "ERROR: usage: mtx-changer ctl-device command [slot archive-device drive-index]" + echo " Insufficient number of arguments given." + if test $pCount -lt 2; then + echo " Mimimum usage is first two arguments ..." + else + echo " Command expected $pCountNeed arguments" + fi + exit 1 + fi +} + +# Check for special cases where only 2 arguments are needed, +# all others are a minimum of 5 +# +case $2 in + list|listall) + check_parm_count $# 2 + ;; + slots) + check_parm_count $# 2 + ;; + transfer) + check_parm_count $# 4 + ;; + *) + check_parm_count $# 5 + ;; +esac + + +# Setup arguments +ctl=$1 +cmd="$2" +slot=$3 +device=$4 +drive=$5 + +debug $dbglvl "Parms: $ctl $cmd $slot $device $drive" + +case $cmd in + unload) + + if test ${offline} -eq 1 ; then + mt -f $device offline + fi + if test ${offline_sleep} -ne 0 ; then + sleep ${offline_sleep} + fi + make_err_file + for i in 1 2 3 4 5 ; do + debug $idbglvl "Doing mtx -f $ctl unload slot=$slot drv=$drive" + ${MTX} -f $ctl unload $slot $drive 2>${ERRFILE} + rtn=$? + if test $rtn -eq 0 ; then + break + fi + grep "Error Code=" ${ERRFILE} 2>/dev/null 1>/dev/null + if test $? -ne 0 ; then + break + fi + sleep $i + done + cat ${ERRFILE} + rm -f ${ERRFILE} >/dev/null 2>&1 + if test $rtn -ne 0 ; then + debug $idbglvl "FAIL: mtx -f $ctl unload slot=$slot drv=$drive" + fi + exit $rtn + ;; + + load) + make_err_file + for i in 1 2 3 4 5 ; do + debug $idbglvl "Doing mtx -f $ctl load slot=$slot drv=$drive" + ${MTX} -f $ctl load $slot $drive 2>${ERRFILE} + rtn=$? + if test $rtn -eq 0 ; then + break + fi + grep "Error Code=" ${ERRFILE} 2>/dev/null 1>/dev/null + if test $? -ne 0 ; then + break + fi + sleep $i + done + if test ${load_sleep} -ne 0 ; then + sleep ${load_sleep} + fi + wait_for_drive $device + cat ${ERRFILE} + rm -f ${ERRFILE} >/dev/null 2>&1 + if test $rtn -ne 0 ; then + debug $idbglvl "FAIL: mtx -f $ctl load slot=$slot drv=$drive" + fi + exit $rtn + ;; + + list) + make_temp_file + if test ${inventory} -ne 0 ; then + ${MTX} -f $ctl inventory + fi + debug $dbglvl "Doing mtx -f $ctl list" + ${MTX} -f $ctl status >${TMPFILE} + rtn=$? + if test ${vxa_packetloader} -ne 0 ; then + cat ${TMPFILE} | grep " *Storage Element [0-9]*:.*Full" | sed "s/ Storage Element //" | sed "s/Full :VolumeTag=//" + else + cat ${TMPFILE} | grep " Storage Element [0-9]*:.*Full" | awk "{print \$3 \$4}" | sed "s/Full *\(:VolumeTag=\)*//" + fi + cat ${TMPFILE} | grep "^Data Transfer Element [0-9]*:Full (Storage Element [0-9]" | awk '{printf "%s:%s\n",$7,$10}' + rm -f ${TMPFILE} >/dev/null 2>&1 + if test $rtn -ne 0 ; then + debug $idbglvl "FAIL: mtx -f $ctl list" + fi + exit $rtn + ;; + + listall) +# Drive content: D:Drive num:F:Slot loaded:Volume Name +# D:0:F:2:vol2 or D:Drive num:E +# D:1:F:42:vol42 +# D:3:E +# +# Slot content: +# S:1:F:vol1 S:Slot num:F:Volume Name +# S:2:E or S:Slot num:E +# S:3:F:vol4 +# +# Import/Export tray slots: +# I:10:F:vol10 I:Slot num:F:Volume Name +# I:11:E or I:Slot num:E +# I:12:F:vol40 + + make_temp_file + if test ${inventory} -ne 0 ; then + ${MTX} -f $ctl inventory + fi + debug $dbglvl "Doing mtx -f $ctl -- to list all" + ${MTX} -f $ctl status >${TMPFILE} + rtn=$? + # can be converted to awk+sed+cut, see below + perl -ne ' +/Data Transfer Element (\d+):Empty/ && print "D:$1:E\n"; +/Data Transfer Element (\d+):Full \(Storage Element (\d+) Loaded\)(:VolumeTag =\s*(.+))?/ && print "D:$1:F:$2:$4\n"; +/Storage Element (\d+):Empty/ && print "S:$1:E\n"; +/Storage Element (\d+):Full( :VolumeTag=(.+))?/ && print "S:$1:F:$3\n"; +/Storage Element (\d+) IMPORT.EXPORT:Empty/ && print "I:$1:E\n"; +/Storage Element (\d+) IMPORT.EXPORT:Full( :VolumeTag=(.+))?/ && print "I:$1:F:$3\n";' ${TMPFILE} + # If perl isn't installed, you can use by those commands +#cat ${TMPFILE} | grep "Data Transfer Element" | awk "{print \"D:\"\$4 \$7 \$9 \$10}" | sed "s/=/:/" | sed "s/Full/F:/" | sed "s/Empty/E/" +#cat ${TMPFILE} | grep -v "Data Transfer Element" | grep "Storage Element" | grep -v "IMPORT/EXPORT" | awk "{print \"S:\"\$3 \$4 \$5}" | sed "s/IMPORT\/EXPORT//" | sed "s/Full *:VolumeTag=/F:/" | sed "s/Empty/E/" +#cat ${TMPFILE} | grep -v "Data Transfer Element" | grep "Storage Element" | grep "IMPORT/EXPORT" | awk "{print \"I:\"\$3 \$4 \$5}" | sed "s/IMPORT\/EXPORT//" | sed "s/Full *:VolumeTag=/F:/" | sed "s/Empty/E/" + + rm -f ${TMPFILE} >/dev/null 2>&1 + exit $rtn + ;; + + transfer) + slotdest=$device + debug $dbglvl "Doing transfer from $slot to $slotdest" + ${MTX} -f $ctl transfer $slot $slotdest + rtn=$? + if test $rtn -ne 0 ; then + debug $idbglvl "FAIL: mtx -f $ctl transfer from=$slot to=$slotdest" + fi + exit $rtn + ;; + + loaded) + make_temp_file + debug $idbglvl "Doing mtx -f $ctl $drive -- to find what is loaded" + ${MTX} -f $ctl status >${TMPFILE} + rtn=$? + cat ${TMPFILE} | grep "^Data Transfer Element $drive:Full" | awk "{print \$7}" + cat ${TMPFILE} | grep "^Data Transfer Element $drive:Empty" | awk "{print 0}" + rm -f ${TMPFILE} >/dev/null 2>&1 + if test $rtn -ne 0 ; then + debug $idbglvl "FAIL: mtx -f $ctl loaded drv=$drive" + fi + exit $rtn + ;; + + slots) + debug $dbglvl "Doing mtx -f $ctl -- to get count of slots" + ${MTX} -f $ctl status | grep " *Storage Changer" | awk "{print \$5}" + rtn=$? + if test $rtn -ne 0 ; then + debug $idbglvl "FAIL: mtx -f $ctl slots" + fi + ;; +esac diff --git a/scripts/tapealert b/scripts/tapealert new file mode 100755 index 00000000..c8c68c0a --- /dev/null +++ b/scripts/tapealert @@ -0,0 +1,68 @@ +#!/bin/sh +# +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Bacula interface to tapeinfo to get tape alerts +# +# tapealert %l (control device name) +# +# Note: you must have in your SD Device resource: +# Alert Command = /full-path/tapealert %l +# Control Device = /dev/sg0n (where this is the scsi control +# device for the device you are using). +# + +# Note: to test +# 1. uncomment out the DEBUG=1 line below +# 2. Possibly remove or add TapeAlert[nn]: that you want to test. +# Note, the message following the : is not used. +# 3. Run Bacula +# +#DEBUG=1 + +tapeinfo=`which tapeinfo` + +if [ x${tapeinfo} = x ] ; then + echo "tapeinfo program not found, but is required." + exit 1 +fi +if [ x$1 = x ] ; then + echo "First argument missing. Must be device control name." + exit 1 +fi + + +if [ x$DEBUG = x ] ; then +$tapeinfo -f $1 |grep "^TapeAlert" - |cut -b1-13 +exit $? + +else + +# For testing only +cat < + #include + #ifndef _ + #define _(s) gettext((s)) + #endif /* _ */ + #ifndef N_ + #define N_(s) (s) + #endif /* N_ */ +#else /* !ENABLE_NLS */ + #undef _ + #undef N_ + #undef textdomain + #undef bindtextdomain + #undef setlocale + + #ifndef _ + #define _(s) (s) + #endif + #ifndef N_ + #define N_(s) (s) + #endif + #ifndef textdomain + #define textdomain(d) + #endif + #ifndef bindtextdomain + #define bindtextdomain(p, d) + #endif + #ifndef setlocale + #define setlocale(p, d) + #endif +#endif /* ENABLE_NLS */ + + +/* Use the following for strings not to be translated */ +#define NT_(s) (s) + +/* This should go away! ****FIXME***** */ +#define MAXSTRING 500 + +/* Maximum length to edit time/date */ +#define MAX_TIME_LENGTH 50 + +/* Maximum Name length including EOS */ +#define MAX_NAME_LENGTH 128 + +/* Maximum escaped Name lenght including EOS 2*MAX_NAME_LENGTH+1 */ +#define MAX_ESCAPE_NAME_LENGTH 257 + +/* Maximume number of user entered command args */ +#define MAX_CMD_ARGS 30 + +/* All tape operations MUST be a multiple of this */ +#define TAPE_BSIZE 1024 + +#ifdef DEV_BSIZE +#define B_DEV_BSIZE DEV_BSIZE +#endif + +#if !defined(B_DEV_BSIZE) & defined(BSIZE) +#define B_DEV_BSIZE BSIZE +#endif + +#ifndef B_DEV_BSIZE +#define B_DEV_BSIZE 512 +#endif + +/** + * Set to time limit for other end to respond to + * authentication. Normally 10 minutes is *way* + * more than enough. The idea is to keep the Director + * from hanging because there is a dead connection on + * the other end. + */ +#define AUTH_TIMEOUT 60 * 10 + +/* + * Default network buffer size + */ +#define DEFAULT_NETWORK_BUFFER_SIZE (64 * 1024) + +/** + * Tape label types -- stored in catalog + */ +#define B_BACULA_LABEL 0 +#define B_ANSI_LABEL 1 +#define B_IBM_LABEL 2 + +/* + * Device types + * If you update this table, be sure to add an + * entry in prt_dev_types[] in stored/dev.c + * This number is stored in the Catalog as VolType or VolParts, do not change. + */ +enum { + B_FILE_DEV = 1, + B_TAPE_DEV = 2, + B_DVD_DEV = 3, + B_FIFO_DEV = 4, + B_VTAPE_DEV= 5, /* change to B_TAPE_DEV after init */ + B_FTP_DEV = 6, + B_VTL_DEV = 7, /* Virtual tape library device */ + B_ADATA_DEV = 8, /* Aligned data Data file */ + B_ALIGNED_DEV = 9, /* Aligned data Meta file */ + B_NULL_DEV = 11, /* /dev/null for testing */ + B_VALIGNED_DEV = 12, /* Virtual for Aligned device (not stored) */ + B_VDEDUP_DEV = 13, /* Virtual for Dedup device (not stored) */ + B_CLOUD_DEV = 14 /* New Cloud device type (available in 8.8) */ +}; + +/** + * Actions on purge (bit mask) + */ +#define ON_PURGE_TRUNCATE 1 + +/* Size of File Address stored in STREAM_SPARSE_DATA. Do NOT change! */ +#define OFFSET_FADDR_SIZE (sizeof(uint64_t)) + +/* Size of crypto length stored at head of crypto buffer. Do NOT change! */ +#define CRYPTO_LEN_SIZE ((int)sizeof(uint32_t)) + + +/* Plugin Features */ +#define PLUGIN_FEATURE_RESTORELISTFILES "RestoreListFiles" + +/** + * This is for dumb compilers/libraries like Solaris. Linux GCC + * does it correctly, so it might be worthwhile + * to remove the isascii(c) with ifdefs on such + * "smart" systems. + */ +#define B_ISSPACE(c) (isascii((int)(c)) && isspace((int)(c))) +#define B_ISALPHA(c) (isascii((int)(c)) && isalpha((int)(c))) +#define B_ISUPPER(c) (isascii((int)(c)) && isupper((int)(c))) +#define B_ISDIGIT(c) (isascii((int)(c)) && isdigit((int)(c))) +#define B_ISXDIGIT(c) (isascii((int)(c)) && isxdigit((int)(c))) + +/** For multiplying by 10 with shift and addition */ +#define B_TIMES10(d) ((d<<3)+(d<<1)) + + +typedef void (HANDLER)(); +typedef int (INTHANDLER)(); + +#ifdef SETPGRP_VOID +# define SETPGRP_ARGS(x, y) /* No arguments */ +#else +# define SETPGRP_ARGS(x, y) (x, y) +#endif + +#ifndef S_ISLNK +#define S_ISLNK(m) (((m) & S_IFM) == S_IFLNK) +#endif + +/** Added by KES to deal with Win32 systems */ +#ifndef S_ISWIN32 +#define S_ISWIN32 020000 +#endif + +#ifndef INADDR_NONE +#define INADDR_NONE ((unsigned long) -1) +#endif + +#ifdef TIME_WITH_SYS_TIME +# include +# include +#else +# ifdef HAVE_SYS_TIME_H +# include +# else +# include +# endif +#endif + +#ifndef O_BINARY +#define O_BINARY 0 +#endif + +#ifndef O_NOFOLLOW +#define O_NOFOLLOW 0 +#endif + +#ifndef MODE_RW +#define MODE_RW 0666 +#endif + +#if defined(HAVE_WIN32) +typedef int64_t boffset_t; +#define caddr_t char * +#else +typedef off_t boffset_t; +#endif + +/* These probably should be subroutines */ +#define Pw(x) \ + do { int errstat; if ((errstat=rwl_writelock(&(x)))) \ + e_msg(__FILE__, __LINE__, M_ABORT, 0, "Write lock lock failure. ERR=%s\n",\ + strerror(errstat)); \ + } while(0) + +#define Vw(x) \ + do { int errstat; if ((errstat=rwl_writeunlock(&(x)))) \ + e_msg(__FILE__, __LINE__, M_ABORT, 0, "Write lock unlock failure. ERR=%s\n",\ + strerror(errstat)); \ + } while(0) + +#define LockRes() b_LockRes(__FILE__, __LINE__) +#define UnlockRes() b_UnlockRes(__FILE__, __LINE__) + +#ifdef DEBUG_MEMSET +#define memset(a, v, n) b_memset(__FILE__, __LINE__, a, v, n) +void b_memset(const char *file, int line, void *mem, int val, size_t num); +#endif + +/* we look for simple debug level + * then finally we check if tags are set on debug_level and lvl + */ + +/* + lvl | debug_level | tags | result + -------+----------------+-------+------- + 0 | 0 | | OK + T1|0 | 0 | | NOK + T1|0 | 0 | T1 | OK + 10 | 0 | | NOK + 10 | 10 | | OK + T1|10 | 10 | | NOK + T1|10 | 10 | T1 | OK + T1|10 | 10 | T2 | NOK + */ + +/* The basic test is working because tags are on high bits */ +#if 1 +#define chk_dbglvl(lvl) ((lvl) <= debug_level || \ + (((lvl) & debug_level_tags) && (((lvl) & ~DT_ALL) <= debug_level))) +#else +/* Alain's macro for debug */ +#define chk_dbglvl(lvl) (((lvl) & debug_level_tags) || (((lvl) & ~DT_ALL) <= debug_level)) +#endif +/** + * The digit following Dmsg and Emsg indicates the number of substitutions in + * the message string. We need to do this kludge because non-GNU compilers + * do not handle varargs #defines. + */ +/** Debug Messages that are printed */ +#ifdef DEBUG +#define Dmsg0(lvl, msg) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg) +#define Dmsg1(lvl, msg, a1) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1) +#define Dmsg2(lvl, msg, a1, a2) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2) +#define Dmsg3(lvl, msg, a1, a2, a3) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3) +#define Dmsg4(lvl, msg, arg1, arg2, arg3, arg4) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, arg1, arg2, arg3, arg4) +#define Dmsg5(lvl, msg, a1, a2, a3, a4, a5) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5) +#define Dmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6) +#define Dmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7) +#define Dmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) +#define Dmsg9(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) +#define Dmsg10(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) +#define Dmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) +#define Dmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) +#define Dmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) +#else +#define Dmsg0(lvl, msg) +#define Dmsg1(lvl, msg, a1) +#define Dmsg2(lvl, msg, a1, a2) +#define Dmsg3(lvl, msg, a1, a2, a3) +#define Dmsg4(lvl, msg, arg1, arg2, arg3, arg4) +#define Dmsg5(lvl, msg, a1, a2, a3, a4, a5) +#define Dmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) +#define Dmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) +#define Dmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) +#define Dmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) +#define Dmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) +#define Dmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) +#endif /* DEBUG */ + +#ifdef TRACE_FILE +#define Tmsg0(lvl, msg) t_msg(__FILE__, __LINE__, lvl, msg) +#define Tmsg1(lvl, msg, a1) t_msg(__FILE__, __LINE__, lvl, msg, a1) +#define Tmsg2(lvl, msg, a1, a2) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2) +#define Tmsg3(lvl, msg, a1, a2, a3) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3) +#define Tmsg4(lvl, msg, arg1, arg2, arg3, arg4) t_msg(__FILE__, __LINE__, lvl, msg, arg1, arg2, arg3, arg4) +#define Tmsg5(lvl, msg, a1, a2, a3, a4, a5) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5) +#define Tmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6) +#define Tmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7) +#define Tmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) +#define Tmsg9(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) +#define Tmsg10(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) +#define Tmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) +#define Tmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) +#define Tmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) +#else +#define Tmsg0(lvl, msg) +#define Tmsg1(lvl, msg, a1) +#define Tmsg2(lvl, msg, a1, a2) +#define Tmsg3(lvl, msg, a1, a2, a3) +#define Tmsg4(lvl, msg, arg1, arg2, arg3, arg4) +#define Tmsg5(lvl, msg, a1, a2, a3, a4, a5) +#define Tmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) +#define Tmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) +#define Tmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) +#define Tmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) +#define Tmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) +#define Tmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) +#endif /* TRACE_FILE */ + + + +/** Messages that are printed (uses d_msg) */ +#define Pmsg0(lvl, msg) p_msg(__FILE__, __LINE__, lvl, msg) +#define Pmsg1(lvl, msg, a1) p_msg(__FILE__, __LINE__, lvl, msg, a1) +#define Pmsg2(lvl, msg, a1, a2) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2) +#define Pmsg3(lvl, msg, a1, a2, a3) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3) +#define Pmsg4(lvl, msg, arg1, arg2, arg3, arg4) p_msg(__FILE__, __LINE__, lvl, msg, arg1, arg2, arg3, arg4) +#define Pmsg5(lvl, msg, a1, a2, a3, a4, a5) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5) +#define Pmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6) +#define Pmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7) +#define Pmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) +#define Pmsg9(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) +#define Pmsg10(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) +#define Pmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) +#define Pmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) +#define Pmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) +#define Pmsg14(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) + + +/** Daemon Error Messages that are delivered according to the message resource */ +#define Emsg0(typ, lvl, msg) e_msg(__FILE__, __LINE__, typ, lvl, msg) +#define Emsg1(typ, lvl, msg, a1) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1) +#define Emsg2(typ, lvl, msg, a1, a2) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2) +#define Emsg3(typ, lvl, msg, a1, a2, a3) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3) +#define Emsg4(typ, lvl, msg, a1, a2, a3, a4) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3, a4) +#define Emsg5(typ, lvl, msg, a1, a2, a3, a4, a5) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3, a4, a5) +#define Emsg6(typ, lvl, msg, a1, a2, a3, a4, a5, a6) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3, a4, a5, a6) + +/** Job Error Messages that are delivered according to the message resource */ +#define Jmsg0(jcr, typ, lvl, msg) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg) +#define Jmsg1(jcr, typ, lvl, msg, a1) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1) +#define Jmsg2(jcr, typ, lvl, msg, a1, a2) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2) +#define Jmsg3(jcr, typ, lvl, msg, a1, a2, a3) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3) +#define Jmsg4(jcr, typ, lvl, msg, a1, a2, a3, a4) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4) +#define Jmsg5(jcr, typ, lvl, msg, a1, a2, a3, a4, a5) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5) +#define Jmsg6(jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6) +#define Jmsg7(jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6, a7) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6, a7) +#define Jmsg8(jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) + +/** Queued Job Error Messages that are delivered according to the message resource */ +#define Qmsg0(jcr, typ, mtime, msg) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg) +#define Qmsg1(jcr, typ, mtime, msg, a1) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1) +#define Qmsg2(jcr, typ, mtime, msg, a1, a2) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2) +#define Qmsg3(jcr, typ, mtime, msg, a1, a2, a3) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2, a3) +#define Qmsg4(jcr, typ, mtime, msg, a1, a2, a3, a4) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2, a3, a4) +#define Qmsg5(jcr, typ, mtime, msg, a1, a2, a3, a4, a5) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2, a3, a4, a5) +#define Qmsg6(jcr, typ, mtime, msg, a1, a2, a3, a4, a5, a6) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2, a3, a4, a5, a6) + + +/** Memory Messages that are edited into a Pool Memory buffer */ +#define Mmsg0(buf, msg) m_msg(__FILE__, __LINE__, buf, msg) +#define Mmsg1(buf, msg, a1) m_msg(__FILE__, __LINE__, buf, msg, a1) +#define Mmsg2(buf, msg, a1, a2) m_msg(__FILE__, __LINE__, buf, msg, a1, a2) +#define Mmsg3(buf, msg, a1, a2, a3) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3) +#define Mmsg4(buf, msg, a1, a2, a3, a4) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3, a4) +#define Mmsg5(buf, msg, a1, a2, a3, a4, a5) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3, a4, a5) +#define Mmsg6(buf, msg, a1, a2, a3, a4, a5, a6) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3, a4, a5, a6) +#define Mmsg7(buf, msg, a1, a2, a3, a4, a5, a6, a7) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3, a4, a5, a6) +#define Mmsg8(buf,msg,a1,a2,a3,a4,a5,a6,a7,a8) m_msg(__FILE__,__LINE__,buf,msg,a1,a2,a3,a4,a5,a6) +#define Mmsg11(buf,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) m_msg(__FILE__,__LINE__,buf,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) +#define Mmsg15(buf,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) m_msg(__FILE__,__LINE__,buf,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) + +class POOL_MEM; +/* Edit message into Pool Memory buffer -- no __FILE__ and __LINE__ */ +int Mmsg(POOLMEM **msgbuf, const char *fmt,...); +int Mmsg(POOLMEM *&msgbuf, const char *fmt,...); +int Mmsg(POOL_MEM &msgbuf, const char *fmt,...); + +#define MmsgD0(level, msgbuf, fmt) \ + { Mmsg(msgbuf, fmt); Dmsg1(level, "%s", msgbuf); } +#define MmsgD1(level, msgbuf, fmt, a1) \ + { Mmsg(msgbuf, fmt, a1); Dmsg1(level, "%s", msgbuf); } +#define MmsgD2(level, msgbuf, fmt, a1, a2) \ + { Mmsg(msgbuf, fmt, a1, a2); Dmsg1(level, "%s", msgbuf); } +#define MmsgD3(level, msgbuf, fmt, a1, a2, a3) \ + { Mmsg(msgbuf, fmt, a1, a2, a3); Dmsg1(level, "%s", msgbuf); } +#define MmsgD4(level, msgbuf, fmt, a1, a2, a3, a4) \ + { Mmsg(msgbuf, fmt, a1, a2, a3, a4); Dmsg1(level, "%s", msgbuf); } +#define MmsgD5(level, msgbuf, fmt, a1, a2, a3, a4, a5) \ + { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5); Dmsg1(level, "%s", msgbuf); } +#define MmsgD6(level, msgbuf, fmt, a1, a2, a3, a4, a5, a6) \ + { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5, a6); Dmsg1(level, "%s", msgbuf); } + +#define MmsgT0(level, msgbuf, fmt) \ + { Mmsg(msgbuf, fmt); Tmsg1(level, "%s", msgbuf); } +#define MmsgT1(level, msgbuf, fmt, a1) \ + { Mmsg(msgbuf, fmt, a1); Tmsg1(level, "%s", msgbuf); } +#define MmsgT2(level, msgbuf, fmt, a1, a2) \ + { Mmsg(msgbuf, fmt, a1, a2); Tmsg1(level, "%s", msgbuf); } +#define MmsgT3(level, msgbuf, fmt, a1, a2, a3) \ + { Mmsg(msgbuf, fmt, a1, a2, a3); Tmsg1(level, "%s", msgbuf); } +#define MmsgT4(level, msgbuf, fmt, a1, a2, a3, a4) \ + { Mmsg(msgbuf, fmt, a1, a2, a3, a4); Tmsg1(level, "%s", msgbuf); } +#define MmsgT5(level, msgbuf, fmt, a1, a2, a3, a4, a5) \ + { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5); Tmsg1(level, "%s", msgbuf); } +#define MmsgT6(level, msgbuf, fmt, a1, a2, a3, a4, a5, a6) \ + { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5, a6); Tmsg1(level, "%s", msgbuf); } + +class JCR; +void d_msg(const char *file, int line, int64_t level, const char *fmt,...); +void p_msg(const char *file, int line, int level, const char *fmt,...); +void e_msg(const char *file, int line, int type, int level, const char *fmt,...); +void j_msg(const char *file, int line, JCR *jcr, int type, utime_t mtime, const char *fmt,...); +void q_msg(const char *file, int line, JCR *jcr, int type, utime_t mtime, const char *fmt,...); +int m_msg(const char *file, int line, POOLMEM **msgbuf, const char *fmt,...); +int m_msg(const char *file, int line, POOLMEM *&pool_buf, const char *fmt, ...); +void t_msg(const char *file, int line, int64_t level, const char *fmt,...); + + +/** Use our strdup with smartalloc */ +#ifndef HAVE_WXCONSOLE +#undef strdup +#define strdup(buf) bad_call_on_strdup_use_bstrdup(buf) +#else +/* Groan, WxWidgets has its own way of doing NLS so cleanup */ +#ifndef ENABLE_NLS +#undef _ +#undef setlocale +#undef textdomain +#undef bindtextdomain +#endif +#endif + +/** Use our fgets which handles interrupts */ +#undef fgets +#define fgets(x,y,z) bfgets((x), (y), (z)) + +/** Use our sscanf, which is safer and works with known sizes */ +#define sscanf bsscanf + +/* Use our fopen, which uses the CLOEXEC flag */ +#define fopen bfopen + +#ifdef DEBUG +#define bstrdup(str) strcpy((char *)b_malloc(__FILE__,__LINE__,strlen((str))+1),(str)) +#else +#define bstrdup(str) strcpy((char *)bmalloc(strlen((str))+1),(str)) +#endif + +#ifdef DEBUG +#define bmalloc(size) b_malloc(__FILE__, __LINE__, (size)) +#endif + +/** Macro to simplify free/reset pointers */ +#define bfree_and_null(a) do{if(a){free(a); (a)=NULL;}} while(0) + +/** + * Replace codes needed in both file routines and non-file routines + * Job replace codes -- in "replace" + */ +#define REPLACE_ALWAYS 'a' +#define REPLACE_IFNEWER 'w' +#define REPLACE_NEVER 'n' +#define REPLACE_IFOLDER 'o' + +/** This probably should be done on a machine by machine basis, but it works */ +/** This is critical for the smartalloc routines to properly align memory */ +#define ALIGN_SIZE (sizeof(double)) +#define BALIGN(x) (((x) + ALIGN_SIZE - 1) & ~(ALIGN_SIZE -1)) + + +/* ============================================================= + * OS Dependent defines + * ============================================================= + */ +#if defined (__digital__) && defined (__unix__) +/* Tru64 - has 64 bit fseeko and ftello */ +#define fseeko fseek +#define ftello ftell +#endif /* digital stuff */ + +#ifndef HAVE_FSEEKO +/* This OS does not handle 64 bit fseeks and ftells */ +#define fseeko fseek +#define ftello ftell +#endif + + +#ifdef HAVE_WIN32 +/* + * Windows + */ +#define PathSeparator '\\' +#define PathSeparatorUp "..\\" +#define PathSeparatorCur ".\\" + +inline bool IsPathSeparator(int ch) { return ch == '/' || ch == '\\'; } +inline char *first_path_separator(char *path) { return strpbrk(path, "/\\"); } +inline const char *first_path_separator(const char *path) { return strpbrk(path, "/\\"); } + +extern void pause_msg(const char *file, const char *func, int line, const char *msg); +#define pause(msg) if (debug_level) pause_msg(__FILE__, __func__, __LINE__, (msg)) + +#else /* Unix/Linux */ +#define PathSeparator '/' +#define PathSeparatorUp "../" +#define PathSeparatorCur "./" + +/* Define Winsock functions if we aren't on Windows */ + +#define WSA_Init() 0 /* 0 = success */ +#define WSACleanup() 0 /* 0 = success */ + +inline bool IsPathSeparator(int ch) { return ch == '/'; } +inline char *first_path_separator(char *path) { return strchr(path, '/'); } +inline const char *first_path_separator(const char *path) { return strchr(path, '/'); } +#define pause(msg) +#endif /* HAVE_WIN32 */ + +#ifdef HAVE_DARWIN_OS +/* Apparently someone forgot to wrap getdomainname as a C function */ +#ifdef __cplusplus +extern "C" { +#endif +int getdomainname(char *name, int namelen); +#ifdef __cplusplus +} +#endif +#endif /* HAVE_DARWIN_OS */ + + +/* **** Unix Systems **** */ +#ifdef HAVE_SUN_OS +/* + * On Solaris 2.5/2.6/7 and 8, threads are not timesliced by default, + * so we need to explictly increase the conncurrency level. + */ +#ifdef USE_THR_SETCONCURRENCY +#include +#define set_thread_concurrency(x) thr_setconcurrency(x) +extern int thr_setconcurrency(int); +#define SunOS 1 +#else +#define set_thread_concurrency(x) +#define thr_setconcurrency(x) +#endif + +#else +#define set_thread_concurrency(x) +#endif /* HAVE_SUN_OS */ + + +#ifdef HAVE_OSF1_OS +#ifdef __cplusplus +extern "C" { +#endif +int fchdir(int filedes); +long gethostid(void); +int getdomainname(char *name, int namelen); +#ifdef __cplusplus +} +#endif +#endif /* HAVE_OSF1_OS */ + + +#ifdef HAVE_HPUX_OS +# undef h_errno +extern int h_errno; +/** the {get,set}domainname() functions exist in HPUX's libc. + * the configure script detects that correctly. + * the problem is no system headers declares the prototypes for these functions + * this is done below + */ +#ifdef __cplusplus +extern "C" { +#endif +int getdomainname(char *name, int namlen); +int setdomainname(char *name, int namlen); +#ifdef __cplusplus +} +#endif +#endif /* HAVE_HPUX_OS */ + + +/** Disabled because it breaks internationalisation... +#undef HAVE_SETLOCALE +#ifdef HAVE_SETLOCALE +#include +#else +#define setlocale(x, y) ("ANSI_X3.4-1968") +#endif +#ifdef HAVE_NL_LANGINFO +#include +#else +#define nl_langinfo(x) ("ANSI_X3.4-1968") +#endif +*/ + +/** Determine endianes */ +static inline bool bigendian() { return htonl(1) == 1L; } + +#ifndef __GNUC__ +#define __PRETTY_FUNCTION__ __func__ +#endif +#ifdef HAVE_SUN_OS +#undef ENTER_LEAVE +#endif +#ifdef ENTER_LEAVE +#define Enter(lvl) Dmsg1(lvl, "Enter: %s\n", __PRETTY_FUNCTION__) +#define Leave(lvl) Dmsg1(lvl, "Leave: %s\n", __PRETTY_FUNCTION__) +#else +#define Enter(lvl) +#define Leave(lvl) +#endif + +#ifdef __GNUC__x +# define CHECK_FORMAT(fun, f, a) __attribute__ ((format (fun, f, a))) +#else +# define CHECK_FORMAT(fun, f, a) +#endif + +#endif /* _BACONFIG_H */ diff --git a/src/bacula.h b/src/bacula.h new file mode 100644 index 00000000..1fee6214 --- /dev/null +++ b/src/bacula.h @@ -0,0 +1,205 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * bacula.h -- main header file to include in all Bacula source + * + */ + +#ifndef _BACULA_H +#define _BACULA_H 1 + +/* Disable FORTIFY_SOURCE, because bacula uses is own memory + * manager + */ +#ifdef _FORTIFY_SOURCE +#undef _FORTIFY_SOURCE +#endif + +#ifdef __cplusplus +/* Workaround for SGI IRIX 6.5 */ +#define _LANGUAGE_C_PLUS_PLUS 1 +#endif + +#if defined(HAVE_WIN32) +#if defined(HAVE_MINGW) +#include "winhdrs.h" +#else +#error "Only MINGW is supported" +#include "winconfig.h" +#endif +#else +#include "config.h" +#endif +#define __CONFIG_H + + +#define _REENTRANT 1 +#define _THREAD_SAFE 1 +#define _POSIX_PTHREAD_SEMANTICS 1 + + +/* System includes */ +#if defined(HAVE_STDINT_H) +#ifndef __sgi +#include +#endif +#elif defined(HAVE_INTTYPES_H) +#include +#endif +#if defined(HAVE_STDARG_H) +#include +#endif +#include +#if defined(HAVE_STDLIB_H) +#include +#endif +#if HAVE_UNISTD_H +# ifdef HAVE_HPUX_OS +# undef _INCLUDE_POSIX1C_SOURCE +# endif +#include +#endif +#if HAVE_ALLOCA_H +#include +#endif +#if defined(_MSC_VER) +#include +#include +#include +#endif +#include +#include + +/* O_NOATIME is defined at fcntl.h when supported */ +#ifndef O_NOATIME +#define O_NOATIME 0 +#endif + +#if defined(_MSC_VER) +extern "C" { +#include "getopt.h" +} +#endif + +#ifdef xxxxx +#ifdef HAVE_GETOPT_LONG +#include +#else +#include "lib/getopt.h" +#endif +#endif + +#include +#include +#include +#include +#ifndef _SPLINT_ +#include +#endif +#if HAVE_LIMITS_H +#include +#endif +#include +#include +#include +#include +#include +#ifdef HAVE_SYS_BITYPES_H +#include +#endif +#include +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#if defined(HAVE_WIN32) & !defined(HAVE_MINGW) +#include +#endif +#if !defined(HAVE_WIN32) & !defined(HAVE_MINGW) +#include +#endif +#include +#if HAVE_SYS_WAIT_H +#include +#endif +#include +#include +#include + +#ifdef HAVE_OPENSSL +/* fight OpenSSL namespace pollution */ +#define STORE OSSL_STORE +#include +#include +#include +#include +#include +#include +#undef STORE +#endif + +/* Local Bacula includes. Be sure to put all the system + * includes before these. + */ +#if defined(HAVE_WIN32) +//#include +#include "compat.h" +#endif + +#include "version.h" +#include "bc_types.h" +#include "streams.h" +#include "filetypes.h" +#include "baconfig.h" +#include "lib/lib.h" + +/* manually enable feature that you want to test in DEVELOPER mode*/ +#ifdef DEVELOPER +#endif + +#ifdef DDE_EXTRA_CHECKS +const bool have_dde_extra_check = true; +#else +const bool have_dde_extra_check = false; +#endif +/* + * For wx-console compiles, we undo some Bacula defines. + * This prevents conflicts between wx-Widgets and Bacula. + * In wx-console files that malloc or free() Bacula structures + * config/resources and interface to the Bacula libraries, + * you must use bmalloc() and bfree(). + */ +#ifdef HAVE_WXCONSOLE +#undef New +#undef _ +#undef free +#undef malloc +#endif + +#if defined(HAVE_WIN32) +#include "winapi.h" +#include "winhost.h" +#else +#include "host.h" +#endif + +#ifndef HAVE_ZLIB_H +#undef HAVE_LIBZ /* no good without headers */ +#endif + +#endif diff --git a/src/bc_types.h b/src/bc_types.h new file mode 100644 index 00000000..40d99750 --- /dev/null +++ b/src/bc_types.h @@ -0,0 +1,252 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + Define integer types for Bacula -- Kern Sibbald + + Integer types. These types should be be used in all + contexts in which the length of an integer stored on + removable media must be known regardless of the + architecture of the platform. + + Bacula types are: + + int8_t, int16_t, int32_t, int64_t + uint8_t, uint16_t, uint32_t, uint64_t + + Also, we define types such as file address lengths. + */ + + +#ifndef __bc_types_INCLUDED +#define __bc_types_INCLUDED + +/* + * These are the sizes of the current definitions of database + * Ids. In general, FileId_t can be set to uint64_t and it + * *should* work. Users have reported back that it does work + * for PostgreSQL. For the other types, all places in Bacula + * have been converted, but no one has actually tested it. + * In principle, the only field that really should need to be + * 64 bits is the FileId_t + */ +typedef uint64_t FileId_t; +typedef uint32_t DBId_t; /* general DB id type */ +typedef uint32_t JobId_t; + + +typedef char POOLMEM; + + +/* Types */ + +/* If sys/types.h does not supply intXX_t, supply them ourselves */ +/* (or die trying) */ + +#ifndef HAVE_U_INT +typedef unsigned int u_int; +#endif + +#ifndef HAVE_INTXX_T +# if (SIZEOF_CHAR == 1) +typedef signed char int8_t; +# else +# error "8 bit int type not found." +# endif +# if (SIZEOF_SHORT_INT == 2) +typedef short int int16_t; +# else +# error "16 bit int type not found." +# endif +# if (SIZEOF_INT == 4) +typedef int int32_t; +# else +# error "32 bit int type not found." +# endif +#endif + +/* If sys/types.h does not supply u_intXX_t, supply them ourselves */ +#ifndef HAVE_U_INTXX_T +# ifdef HAVE_UINTXX_T +typedef uint8_t u_int8_t; +typedef uint16_t u_int16_t; +typedef uint32_t u_int32_t; +# define HAVE_U_INTXX_T 1 +# else +# if (SIZEOF_CHAR == 1) +typedef unsigned char u_int8_t; +# else +# error "8 bit int type not found. Required!" +# endif +# if (SIZEOF_SHORT_INT == 2) +typedef unsigned short int u_int16_t; +# else +# error "16 bit int type not found. Required!" +# endif +# if (SIZEOF_INT == 4) +typedef unsigned int u_int32_t; +# else +# error "32 bit int type not found. Required!" +# endif +# endif +#endif + +/* 64-bit types */ +#ifndef HAVE_INT64_T +# if (SIZEOF_LONG_LONG_INT == 8) +typedef long long int int64_t; +# define HAVE_INT64_T 1 +# else +# if (SIZEOF_LONG_INT == 8) +typedef long int int64_t; +# define HAVE_INT64_T 1 +# endif +# endif +#endif + +#ifndef HAVE_INTMAX_T +# ifdef HAVE_INT64_T +typedef int64_t intmax_t; +# else +# error "64 bit type not found. Required!" +# endif +#endif + +#ifndef HAVE_U_INT64_T +# if (SIZEOF_LONG_LONG_INT == 8) +typedef unsigned long long int u_int64_t; +# define HAVE_U_INT64_T 1 +# else +# if (SIZEOF_LONG_INT == 8) +typedef unsigned long int u_int64_t; +# define HAVE_U_INT64_T 1 +# else +# error "64 bit type not found. Required!" +# endif +# endif +#endif + +#ifndef HAVE_U_INTMAX_T +# ifdef HAVE_U_INT64_T +typedef u_int64_t u_intmax_t; +# else +# error "64 bit type not found. Required!" +# endif +#endif + +#ifndef HAVE_INTPTR_T +#define HAVE_INTPTR_T 1 +# if (SIZEOF_INT_P == 4) +typedef int32_t intptr_t; +# else +# if (SIZEOF_INT_P == 8) +typedef int64_t intptr_t; +# else +# error "Can't find sizeof pointer. Required!" +# endif +# endif +#endif + +#ifndef HAVE_UINTPTR_T +#define HAVE_UINTPTR_T 1 +# if (SIZEOF_INT_P == 4) +typedef uint32_t uintptr_t; +# else +# if (SIZEOF_INT_P == 8) +typedef uint64_t uintptr_t; +# else +# error "Can't find sizeof pointer. Required!" +# endif +# endif +#endif + +/* Limits for the above types. */ +#undef INT8_MIN +#undef INT8_MAX +#undef UINT8_MAX +#undef INT16_MIN +#undef INT16_MAX +#undef UINT16_MAX +#undef INT32_MIN +#undef INT32_MAX +#undef UINT32_MAX + +#define INT8_MIN (-127-1) +#define INT8_MAX (127) +#define UINT8_MAX (255u) +#define INT16_MIN (-32767-1) +#define INT16_MAX (32767) +#define UINT16_MAX (65535u) +#define INT32_MIN (-2147483647-1) +#define INT32_MAX (2147483647) +#define UINT32_MAX (4294967295u) + +typedef double float64_t; +typedef float float32_t; + + +/* Define the uint versions actually used in Bacula */ +#ifndef uint8_t +#define uint8_t u_int8_t +#define uint16_t u_int16_t +#define uint32_t u_int32_t +#define uint64_t u_int64_t +#define uintmax_t u_intmax_t +#endif + +/* Bacula time -- Unix time with microseconds */ +#define btime_t int64_t +/* Unix time (time_t) widened to 64 bits */ +#define utime_t int64_t + +#ifndef HAVE_SOCKLEN_T +#define socklen_t int +#endif + +#ifndef HAVE_WIN32 +#ifndef SOCKET_ERROR +#define SOCKET_ERROR (-1) +#endif +#endif + +#ifdef HAVE_OLD_SOCKOPT +#define sockopt_val_t char * +#else +#define sockopt_val_t void * +#endif + +/* + * Status codes returned by create_file() + * Used in findlib, filed, and plugins + */ +enum { + CF_SKIP = 1, /* skip file (not newer or something) */ + CF_ERROR, /* error creating file */ + CF_EXTRACT, /* file created, data to extract */ + CF_CREATED, /* file created, no data to extract */ + CF_CORE /* let bacula core handle the file creation */ +}; + +#ifndef MAX +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + +#endif /* __bc_types_INCLUDED */ diff --git a/src/bsd b/src/bsd new file mode 100644 index 00000000..f276c886 --- /dev/null +++ b/src/bsd @@ -0,0 +1,4 @@ +# +# Copyright (C) 2000-2019 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# diff --git a/src/c b/src/c new file mode 100644 index 00000000..5c98a0d1 --- /dev/null +++ b/src/c @@ -0,0 +1,18 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ diff --git a/src/c.scr b/src/c.scr new file mode 100644 index 00000000..206188e7 --- /dev/null +++ b/src/c.scr @@ -0,0 +1,8 @@ +l Copyright (C) +-1 +mark +l */ +mark +db +inc /home/kern/bacula/k/src/c +e diff --git a/src/cats/Makefile.in b/src/cats/Makefile.in new file mode 100644 index 00000000..22d10b85 --- /dev/null +++ b/src/cats/Makefile.in @@ -0,0 +1,323 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +@MCOMMON@ + +srcdir = . +.PATH: . + +# one up +basedir = .. +# top dir +topdir = ../.. +# this dir relative to top dir +thisdir = src/cats + +#CPPFLAGS += -DCATS_PRIVATE_DBI @DBI_DBD_DRIVERDIR@ +CPPFLAGS += -DCATS_PRIVATE_DBI + +DEBUG=@DEBUG@ +MKDIR=$(topdir)/autoconf/mkinstalldirs + +first_rule: all +dummy: + +# +# include files installed when using libtool +# +INCLUDE_FILES = cats.h protos.h sql_cmds.h + +MYSQL_INCLUDE = @MYSQL_INCLUDE@ +MYSQL_LIBS = @MYSQL_LIBS@ +MYSQL_SRCS = mysql.c +MYSQL_OBJS = $(MYSQL_SRCS:.c=.o) +MYSQL_LOBJS = $(MYSQL_SRCS:.c=.lo) + +POSTGRESQL_INCLUDE = @POSTGRESQL_INCLUDE@ +POSTGRESQL_LIBS = @POSTGRESQL_LIBS@ +POSTGRESQL_SRCS = postgresql.c +POSTGRESQL_OBJS = $(POSTGRESQL_SRCS:.c=.o) +POSTGRESQL_LOBJS = $(POSTGRESQL_SRCS:.c=.lo) + +SQLITE_INCLUDE = @SQLITE_INCLUDE@ +SQLITE_LIBS = @SQLITE_LIBS@ +SQLITE_SRCS = sqlite.c +SQLITE_OBJS = $(SQLITE_SRCS:.c=.o) +SQLITE_LOBJS = $(SQLITE_SRCS:.c=.lo) + +#DBI_INCLUDE = @DBI_INCLUDE@ +#DBI_LIBS = @DBI_LIBS@ +#DBI_SRCS = dbi.c +#DBI_OBJS = $(DBI_SRCS:.c=.o) +#DBI_LOBJS = $(DBI_SRCS:.c=.lo) + +DB_LIBS=@DB_LIBS@ + +CATS_SRCS = mysql.c postgresql.c sqlite.c +LIBBACSQL_SRCS = bvfs.c cats.c sql.c sql_cmds.c sql_create.c sql_delete.c \ + sql_find.c sql_get.c sql_list.c sql_update.c +LIBBACSQL_OBJS = $(LIBBACSQL_SRCS:.c=.o) +LIBBACCATS_OBJS = $(CATS_SRCS:.c=.o) +LIBBACSQL_LOBJS = $(LIBBACSQL_SRCS:.c=.lo) + +LIBBACSQL_LT_RELEASE = @LIBBACSQL_LT_RELEASE@ +LIBBACCATS_LT_RELEASE = @LIBBACCATS_LT_RELEASE@ + +.SUFFIXES: .c .o .lo +.PHONY: +.DONTCARE: + +# inference rules +.c.o: + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +.c.lo: + @echo "Compiling $<" + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +$(MYSQL_LOBJS): + @echo "Compiling $(@:.lo=.c)" + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(MYSQL_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.lo=.c) + +$(POSTGRESQL_LOBJS): + @echo "Compiling $(@:.lo=.c)" + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(POSTGRESQL_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.lo=.c) + +$(SQLITE_LOBJS): + @echo "Compiling $(@:.lo=.c)" + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(SQLITE_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.lo=.c) + +#$(DBI_LOBJS): +# @echo "Compiling $(@:.lo=.c)" +# $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DBI_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.lo=.c) + +$(MYSQL_OBJS): + @echo "Compiling $(@:.o=.c)" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(MYSQL_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.o=.c) + +$(POSTGRESQL_OBJS): + @echo "Compiling $(@:.o=.c)" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(POSTGRESQL_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.o=.c) + +$(SQLITE_OBJS): + @echo "Compiling $(@:.o=.c)" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(SQLITE_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.o=.c) + +#$(DBI_OBJS): +# @echo "Compiling $(@:.o=.c)" +# $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DBI_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.o=.c) +#------------------------------------------------------------------------- +all: Makefile libbacsql$(DEFAULT_ARCHIVE_TYPE) libbaccats$(DEFAULT_ARCHIVE_TYPE) @SHARED_CATALOG_TARGETS@ + @echo "==== Make of sqllibs is good ====" + @echo " " + +libbacsql.a: $(LIBBACSQL_OBJS) + @echo "Making $@ ..." + $(AR) rc $@ $(LIBBACSQL_OBJS) + $(RANLIB) $@ + +libbaccats.a: $(LIBBACCATS_OBJS) + @echo "Making $@ ..." + $(AR) rc $@ $(LIBBACCATS_OBJS) + $(RANLIB) $@ + +libbacsql.la: Makefile $(LIBBACSQL_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(LIBBACSQL_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACSQL_LT_RELEASE) $(DB_LIBS) + +libbaccats.la: Makefile cats_null.lo + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ cats_null.lo -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) + +libbaccats-mysql.la: Makefile $(MYSQL_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(MYSQL_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) \ + -soname libbaccats-$(LIBBACCATS_LT_RELEASE).so $(MYSQL_LIBS) + +libbaccats-postgresql.la: Makefile $(POSTGRESQL_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(POSTGRESQL_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) \ + -soname libbaccats-$(LIBBACCATS_LT_RELEASE).so $(POSTGRESQL_LIBS) + +libbaccats-sqlite3.la: Makefile $(SQLITE_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(SQLITE_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) \ + -soname libbaccats-$(LIBBACCATS_LT_RELEASE).so $(SQLITE_LIBS) +#libbaccats-dbi.la: Makefile $(DBI_LOBJS) +# @echo "Making $@ ..." +# $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(DBI_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) \ +# -soname libbaccats-$(LIBBACCATS_LT_RELEASE).so $(DBI_LIBS) + +Makefile: $(srcdir)/Makefile.in $(topdir)/config.status + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + +libtool-clean: + @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) + @$(RMF) *.la + @$(RMF) -r .libs _libs + +clean: libtool-clean + @$(RMF) core a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 + +realclean: clean + $(RMF) tags + + $(RMF) create_bacula_database update_bacula_tables make_bacula_tables + $(RMF) grant_bacula_privileges drop_bacula_tables drop_bacula_database + + $(RMF) create_mysql_database update_mysql_tables make_mysql_tables + $(RMF) grant_mysql_privileges drop_mysql_tables drop_mysql_database + + $(RMF) create_postgresql_database update_postgresql_tables make_postgresql_tables + $(RMF) grant_postgresql_privileges drop_postgresql_tables drop_postgresql_database + + $(RMF) create_sqlite_database update_sqlite_tables make_sqlite_tables + $(RMF) grant_sqlite_privileges drop_sqlite_tables drop_sqlite_database + + $(RMF) create_sqlite3_database update_sqlite3_tables make_sqlite3_tables + $(RMF) grant_sqlite3_privileges drop_sqlite3_tables drop_sqlite3_database + + $(RMF) mysql sqlite postgresql + $(RMF) make_catalog_backup make_catalog_backup.pl delete_catalog_backup + +distclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +devclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +install-includes: + $(MKDIR) $(DESTDIR)/$(includedir)/bacula/sql + for I in $(INCLUDE_FILES); do \ + $(INSTALL_DATA) $$I $(DESTDIR)$(includedir)/bacula/sql/`basename $$I`; \ + done + +uninstall-includes: + for I in $(INCLUDE_FILES); do \ + $(RMF) $(DESTDIR)$(includedir)/bacula/sql/`basename $$I`; \ + done + +libtool-install: all + $(MKDIR) $(DESTDIR)$(libdir) + $(RMF) $(DESTDIR)$(libdir)/libbacsql-*.so $(DESTDIR)$(libdir)/libbacsql.la + $(RMF) $(DESTDIR)$(libdir)/libbaccats-*.so $(DESTDIR)$(libdir)/libbaccats.la + $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbacsql.la $(DESTDIR)$(libdir) + $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbaccats.la $(DESTDIR)$(libdir) + for db_type in @DB_BACKENDS@; do \ + $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbaccats-$${db_type}.la $(DESTDIR)$(libdir); \ + done + ./install-default-backend @DEFAULT_DB_TYPE@ $(LIBBACCATS_LT_RELEASE) $(DESTDIR)$(libdir) + +libtool-uninstall: + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbacsql.la + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbaccats.la + for db_type in @DB_BACKENDS@; do \ + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbacsql-$${db_type}.la; \ + done + +install: @LIBTOOL_INSTALL_TARGET@ @INCLUDE_INSTALL_TARGET@ + for db_type in @DB_BACKENDS@; do \ + if [ -f create_$${db_type}_database ]; then \ + $(INSTALL_SCRIPT) create_$${db_type}_database $(DESTDIR)$(scriptdir)/create_$${db_type}_database; \ + $(INSTALL_SCRIPT) update_$${db_type}_tables $(DESTDIR)$(scriptdir)/update_$${db_type}_tables; \ + $(INSTALL_SCRIPT) make_$${db_type}_tables $(DESTDIR)$(scriptdir)/make_$${db_type}_tables; \ + $(INSTALL_SCRIPT) grant_$${db_type}_privileges $(DESTDIR)$(scriptdir)/grant_$${db_type}_privileges; \ + $(INSTALL_SCRIPT) drop_$${db_type}_tables $(DESTDIR)$(scriptdir)/drop_$${db_type}_tables; \ + $(INSTALL_SCRIPT) drop_$${db_type}_database $(DESTDIR)$(scriptdir)/drop_$${db_type}_database; \ + fi; \ + done + + $(INSTALL_SCRIPT) create_bacula_database $(DESTDIR)$(scriptdir)/create_bacula_database + $(INSTALL_SCRIPT) update_bacula_tables $(DESTDIR)$(scriptdir)/update_bacula_tables + $(INSTALL_SCRIPT) make_bacula_tables $(DESTDIR)$(scriptdir)/make_bacula_tables + $(INSTALL_SCRIPT) grant_bacula_privileges $(DESTDIR)$(scriptdir)/grant_bacula_privileges + $(INSTALL_SCRIPT) drop_bacula_tables $(DESTDIR)$(scriptdir)/drop_bacula_tables + $(INSTALL_SCRIPT) drop_bacula_database $(DESTDIR)$(scriptdir)/drop_bacula_database + + @filename=make_catalog_backup.pl; \ + if test -f $(DESTDIR)$(scriptdir)/$$filename; then \ + destname=$$filename.new; \ + echo " ==> Found existing $$filename, installing new file as $$destname"; \ + else \ + destname=$$filename; \ + fi; \ + echo "$(INSTALL_SCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname"; \ + $(INSTALL_SCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname + @filename=make_catalog_backup; \ + if test -f $(DESTDIR)$(scriptdir)/$$filename; then \ + destname=$$filename.new; \ + echo " ==> Found existing $$filename, installing new file as $$destname"; \ + else \ + destname=$$filename; \ + fi; \ + echo "$(INSTALL_SCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname"; \ + $(INSTALL_SCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname + @filename=delete_catalog_backup; \ + if test -f $(DESTDIR)$(scriptdir)/$$filename; then \ + destname=$$filename.new; \ + echo " ==> Found existing $$filename, installing new file as $$destname"; \ + else \ + destname=$$filename; \ + fi; \ + echo "$(INSTALL_SCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname"; \ + $(INSTALL_SCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname + +uninstall: @LIBTOOL_UNINSTALL_TARGET@ @INCLUDE_UNINSTALL_TARGET@ + @for db_type in @DB_BACKENDS@; do \ + (cd $(DESTDIR)$(scriptdir); $(RMF) create_$${db_type}_database); \ + (cd $(DESTDIR)$(scriptdir); $(RMF) update_$${db_type}_tables); \ + (cd $(DESTDIR)$(scriptdir); $(RMF) make_$${db_type}_tables); \ + (cd $(DESTDIR)$(scriptdir); $(RMF) grant_$${db_type}_privileges); \ + (cd $(DESTDIR)$(scriptdir); $(RMF) drop_$${db_type}_tables); \ + (cd $(DESTDIR)$(scriptdir); $(RMF) drop_$${db_type}_database); \ + done + + (cd $(DESTDIR)$(scriptdir); $(RMF) create_bacula_database) + (cd $(DESTDIR)$(scriptdir); $(RMF) update_bacula_tables) + (cd $(DESTDIR)$(scriptdir); $(RMF) make_bacula_tables) + (cd $(DESTDIR)$(scriptdir); $(RMF) grant_bacula_privileges) + (cd $(DESTDIR)$(scriptdir); $(RMF) drop_bacula_tables) + (cd $(DESTDIR)$(scriptdir); $(RMF) drop_bacula_database) + + (cd $(DESTDIR)$(scriptdir); $(RMF) make_catalog_backup) + (cd $(DESTDIR)$(scriptdir); $(RMF) make_catalog_backup.pl) + (cd $(DESTDIR)$(scriptdir); $(RMF) delete_catalog_backup) + +# Semi-automatic generation of dependencies: +# Use gcc -M because X11 `makedepend' doesn't work on all systems +# and it also includes system headers. +# `semi'-automatic since dependencies are generated at distribution time. + +depend: + @$(MV) Makefile Makefile.bak + @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile + @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile + @for src in $(LIBBACSQL_SRCS); do \ + $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $$src >> Makefile; \ + done + @for src in $(MYSQL_SRCS); do \ + $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(MYSQL_INCLUDE) $$src >> Makefile; \ + done + @for src in $(POSTGRESQL_SRCS); do \ + $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(POSTGRESQL_INCLUDE) $$src >> Makefile; \ + done + @for src in $(SQLITE_SRCS); do \ + $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(SQLITE_INCLUDE) $$src >> Makefile; \ + done +# @for src in $(DBI_SRCS); do \ +# $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(DBI_INCLUDE) $$src >> Makefile; \ +# done + @if test -f Makefile ; then \ + $(RMF) Makefile.bak; \ + else \ + $(MV) Makefile.bak Makefile; \ + echo " ===== Something went wrong in make depend ====="; \ + fi + +# ----------------------------------------------------------------------- +# DO NOT DELETE: nice dependency list follows diff --git a/src/cats/bdb.h b/src/cats/bdb.h new file mode 100644 index 00000000..50b55402 --- /dev/null +++ b/src/cats/bdb.h @@ -0,0 +1,319 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Catalog DB Interface class + * + * Written by Kern E. Sibbald + */ + +#ifndef __BDB_H_ +#define __BDB_H_ 1 + +/* + * These enums can be used to build queries that respects + * Bacula Restricted Consoles. + */ +typedef enum +{ + DB_ACL_JOB = 1, + DB_ACL_CLIENT, + DB_ACL_STORAGE, + DB_ACL_POOL, + DB_ACL_FILESET, + DB_ACL_RCLIENT, + DB_ACL_BCLIENT, + DB_ACL_PATH, + DB_ACL_LOG, + DB_ACL_LAST /* Keep last */ +} DB_ACL_t; + +/* + * Bits for the opts argument of db_get_job_list() + * If neither DBL_ALL_FILES nor DBL_DELETED, return non-deleted files + */ +#define DBL_NONE 0 /* no options */ +#define DBL_USE_DELTA (1<<0) /* Use delta indexes */ +#define DBL_ALL_FILES (1<<1) /* Return all files including deleted ones */ +#define DBL_DELETED (1<<2) /* Return only deleted files */ +#define DBL_USE_MD5 (1<<3) /* Include md5 */ + +/* Turn the num to a bit field */ +#define DB_ACL_BIT(x) (1<inc_use_count(); + db = mdb; /* need to inc ref count */ + jobids = get_pool_memory(PM_NAME); + prev_dir = get_pool_memory(PM_NAME); + pattern = get_pool_memory(PM_NAME); + filename = get_pool_memory(PM_NAME); + tmp = get_pool_memory(PM_NAME); + escaped_list = get_pool_memory(PM_NAME); + *filename = *jobids = *prev_dir = *pattern = 0; + pwd_id = offset = 0; + see_copies = see_all_versions = false; + compute_delta = true; + limit = 1000; + attr = new_attr(jcr); + list_entries = result_handler; + user_data = this; + username = NULL; + job_acl = client_acl = pool_acl = fileset_acl = NULL; + last_dir_acl = NULL; + dir_acl = NULL; + use_acl = false; + dir_filenameid = 0; /* special FilenameId where Name='' */ +} + +Bvfs::~Bvfs() { + free_pool_memory(jobids); + free_pool_memory(pattern); + free_pool_memory(prev_dir); + free_pool_memory(filename); + free_pool_memory(tmp); + free_pool_memory(escaped_list); + if (username) { + free(username); + } + free_attr(attr); + jcr->dec_use_count(); + if (dir_acl) { + delete dir_acl; + } +} + +char *Bvfs::escape_list(alist *lst) +{ + char *elt; + int len; + + /* List is empty, reject everything */ + if (!lst || lst->size() == 0) { + Mmsg(escaped_list, "''"); + return escaped_list; + } + + *tmp = 0; + *escaped_list = 0; + + foreach_alist(elt, lst) { + if (elt && *elt) { + len = strlen(elt); + /* Escape + ' ' */ + tmp = check_pool_memory_size(tmp, 2 * len + 2 + 2); + + tmp[0] = '\''; + db->bdb_escape_string(jcr, tmp + 1 , elt, len); + pm_strcat(tmp, "'"); + + if (*escaped_list) { + pm_strcat(escaped_list, ","); + } + + pm_strcat(escaped_list, tmp); + } + } + return escaped_list; +} + +/* Returns the number of jobids in the result */ +int Bvfs::filter_jobid() +{ + POOL_MEM query; + POOL_MEM sub_where; + POOL_MEM sub_join; + + /* No ACL, no username, no check */ + if (!job_acl && !fileset_acl && !client_acl && !pool_acl && !username) { + Dmsg0(dbglevel_sql, "No ACL\n"); + /* Just count the number of items in the list */ + int nb = (*jobids != 0) ? 1 : 0; + for (char *p=jobids; *p ; p++) { + if (*p == ',') { + nb++; + } + } + return nb; + } + + if (job_acl) { + Mmsg(sub_where, " AND Job.Name IN (%s) ", escape_list(job_acl)); + } + + if (fileset_acl) { + Mmsg(query, " AND FileSet.FileSet IN (%s) ", escape_list(fileset_acl)); + pm_strcat(sub_where, query.c_str()); + pm_strcat(sub_join, " JOIN FileSet USING (FileSetId) "); + } + + if (client_acl) { + Mmsg(query, " AND Client.Name IN (%s) ", escape_list(client_acl)); + pm_strcat(sub_where, query.c_str()); + } + + if (pool_acl) { + Mmsg(query, " AND Pool.Name IN (%s) ", escape_list(pool_acl)); + pm_strcat(sub_where, query.c_str()); + pm_strcat(sub_join, " JOIN Pool USING (PoolId) "); + } + + if (username) { + /* Query used by Bweb to filter clients, activated when using + * set_username() + */ + Mmsg(query, + "SELECT DISTINCT JobId FROM Job JOIN Client USING (ClientId) %s " + "JOIN (SELECT ClientId FROM client_group_member " + "JOIN client_group USING (client_group_id) " + "JOIN bweb_client_group_acl USING (client_group_id) " + "JOIN bweb_user USING (userid) " + "WHERE bweb_user.username = '%s' " + ") AS filter USING (ClientId) " + " WHERE JobId IN (%s) %s", + sub_join.c_str(), username, jobids, sub_where.c_str()); + + } else { + Mmsg(query, + "SELECT DISTINCT JobId FROM Job JOIN Client USING (ClientId) %s " + " WHERE JobId IN (%s) %s", + sub_join.c_str(), jobids, sub_where.c_str()); + } + + db_list_ctx ctx; + Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); + db->bdb_sql_query(query.c_str(), db_list_handler, &ctx); + pm_strcpy(jobids, ctx.list); + return ctx.count; +} + +/* Return the number of jobids after the filter */ +int Bvfs::set_jobid(JobId_t id) +{ + Mmsg(jobids, "%lld", (uint64_t)id); + return filter_jobid(); +} + +/* Return the number of jobids after the filter */ +int Bvfs::set_jobids(char *ids) +{ + pm_strcpy(jobids, ids); + return filter_jobid(); +} + +/* + * TODO: Find a way to let the user choose how he wants to display + * files and directories + */ + + +/* + * Working Object to store PathId already seen (avoid + * database queries), equivalent to %cache_ppathid in perl + */ + +#define NITEMS 50000 +class pathid_cache { +private: + hlink *nodes; + int nb_node; + int max_node; + + alist *table_node; + + htable *cache_ppathid; + +public: + pathid_cache() { + hlink link; + cache_ppathid = (htable *)malloc(sizeof(htable)); + cache_ppathid->init(&link, &link, NITEMS); + max_node = NITEMS; + nodes = (hlink *) malloc(max_node * sizeof (hlink)); + nb_node = 0; + table_node = New(alist(5, owned_by_alist)); + table_node->append(nodes); + }; + + hlink *get_hlink() { + if (++nb_node >= max_node) { + nb_node = 0; + nodes = (hlink *)malloc(max_node * sizeof(hlink)); + table_node->append(nodes); + } + return nodes + nb_node; + }; + + bool lookup(char *pathid) { + bool ret = cache_ppathid->lookup(pathid) != NULL; + return ret; + }; + + void insert(char *pathid) { + hlink *h = get_hlink(); + cache_ppathid->insert(pathid, h); + }; + + ~pathid_cache() { + cache_ppathid->destroy(); + free(cache_ppathid); + delete table_node; + }; +private: + pathid_cache(const pathid_cache &); /* prohibit pass by value */ + pathid_cache &operator= (const pathid_cache &);/* prohibit class assignment*/ +} ; + +/* Return the parent_dir with the trailing / (update the given string) + * TODO: see in the rest of bacula if we don't have already this function + * dir=/tmp/toto/ + * dir=/tmp/ + * dir=/ + * dir= + */ +char *bvfs_parent_dir(char *path) +{ + char *p = path; + int len = strlen(path) - 1; + + /* windows directory / */ + if (len == 2 && B_ISALPHA(path[0]) + && path[1] == ':' + && path[2] == '/') + { + len = 0; + path[0] = '\0'; + } + + if (len >= 0 && path[len] == '/') { /* if directory, skip last / */ + path[len] = '\0'; + } + + if (len > 0) { + p += len; + while (p > path && !IsPathSeparator(*p)) { + p--; + } + p[1] = '\0'; + } + return path; +} + +/* Return the basename of the with the trailing / + * TODO: see in the rest of bacula if we don't have + * this function already + */ +char *bvfs_basename_dir(char *path) +{ + char *p = path; + int len = strlen(path) - 1; + + if (path[len] == '/') { /* if directory, skip last / */ + len -= 1; + } + + if (len > 0) { + p += len; + while (p > path && !IsPathSeparator(*p)) { + p--; + } + if (*p == '/') { + p++; /* skip first / */ + } + } + return p; +} + +static void build_path_hierarchy(JCR *jcr, BDB *mdb, + pathid_cache &ppathid_cache, + char *org_pathid, char *path) +{ + Dmsg1(dbglevel, "build_path_hierarchy(%s)\n", path); + char pathid[50]; + ATTR_DBR parent; + char *bkp = mdb->path; + strncpy(pathid, org_pathid, sizeof(pathid)); + + /* Does the ppathid exist for this ? we use a memory cache... In order to + * avoid the full loop, we consider that if a dir is allready in the + * PathHierarchy table, then there is no need to calculate all the + * hierarchy + */ + while (path && *path) + { + if (!ppathid_cache.lookup(pathid)) + { + Mmsg(mdb->cmd, + "SELECT PPathId FROM PathHierarchy WHERE PathId = %s", + pathid); + + if (!mdb->QueryDB(jcr, mdb->cmd)) { + goto bail_out; /* Query failed, just leave */ + } + + /* Do we have a result ? */ + if (mdb->sql_num_rows() > 0) { + ppathid_cache.insert(pathid); + /* This dir was in the db ... + * It means we can leave, the tree has allready been built for + * this dir + */ + goto bail_out; + } else { + /* search or create parent PathId in Path table */ + mdb->path = bvfs_parent_dir(path); + mdb->pnl = strlen(mdb->path); + if (!mdb->bdb_create_path_record(jcr, &parent)) { + goto bail_out; + } + ppathid_cache.insert(pathid); + + Mmsg(mdb->cmd, + "INSERT INTO PathHierarchy (PathId, PPathId) " + "VALUES (%s,%lld)", + pathid, (uint64_t) parent.PathId); + + if (!mdb->InsertDB(jcr, mdb->cmd)) { + goto bail_out; /* Can't insert the record, just leave */ + } + + edit_uint64(parent.PathId, pathid); + path = mdb->path; /* already done */ + } + } else { + /* It's already in the cache. We can leave, no time to waste here, + * all the parent dirs have allready been done + */ + goto bail_out; + } + } + +bail_out: + mdb->path = bkp; + mdb->fnl = 0; +} + +/* + * Internal function to update path_hierarchy cache with a shared pathid cache + * return Error 0 + * OK 1 + */ +static int update_path_hierarchy_cache(JCR *jcr, + BDB *mdb, + pathid_cache &ppathid_cache, + JobId_t JobId) +{ + Dmsg0(dbglevel, "update_path_hierarchy_cache()\n"); + uint32_t ret=0; + uint32_t num; + char jobid[50]; + edit_uint64(JobId, jobid); + + mdb->bdb_lock(); + + /* We don't really want to harm users with spurious messages, + * everything is handled by transaction + */ + mdb->set_use_fatal_jmsg(false); + + mdb->bdb_start_transaction(jcr); + + Mmsg(mdb->cmd, "SELECT 1 FROM Job WHERE JobId = %s AND HasCache=1", jobid); + + if (!mdb->QueryDB(jcr, mdb->cmd) || mdb->sql_num_rows() > 0) { + Dmsg1(dbglevel, "already computed %d\n", (uint32_t)JobId ); + ret = 1; + goto bail_out; + } + + /* Inserting path records for JobId */ + Mmsg(mdb->cmd, "INSERT INTO PathVisibility (PathId, JobId) " + "SELECT DISTINCT PathId, JobId " + "FROM (SELECT PathId, JobId FROM File WHERE JobId = %s AND FileIndex > 0 " + "UNION " + "SELECT PathId, BaseFiles.JobId " + "FROM BaseFiles JOIN File AS F USING (FileId) " + "WHERE BaseFiles.JobId = %s) AS B", + jobid, jobid); + + if (!mdb->QueryDB(jcr, mdb->cmd)) { + Dmsg1(dbglevel, "Can't fill PathVisibility %d\n", (uint32_t)JobId ); + goto bail_out; + } + + /* Now we have to do the directory recursion stuff to determine missing + * visibility We try to avoid recursion, to be as fast as possible We also + * only work on not allready hierarchised directories... + */ + Mmsg(mdb->cmd, + "SELECT PathVisibility.PathId, Path " + "FROM PathVisibility " + "JOIN Path ON( PathVisibility.PathId = Path.PathId) " + "LEFT JOIN PathHierarchy " + "ON (PathVisibility.PathId = PathHierarchy.PathId) " + "WHERE PathVisibility.JobId = %s " + "AND PathHierarchy.PathId IS NULL " + "ORDER BY Path", jobid); + Dmsg1(dbglevel_sql, "q=%s\n", mdb->cmd); + + if (!mdb->QueryDB(jcr, mdb->cmd)) { + Dmsg1(dbglevel, "Can't get new Path %d\n", (uint32_t)JobId ); + goto bail_out; + } + + /* TODO: I need to reuse the DB connection without emptying the result + * So, now i'm copying the result in memory to be able to query the + * catalog descriptor again. + */ + num = mdb->sql_num_rows(); + if (num > 0) { + char **result = (char **)malloc (num * 2 * sizeof(char *)); + + SQL_ROW row; + int i=0; + while((row = mdb->sql_fetch_row())) { + result[i++] = bstrdup(row[0]); + result[i++] = bstrdup(row[1]); + } + + i=0; + while (num > 0) { + build_path_hierarchy(jcr, mdb, ppathid_cache, result[i], result[i+1]); + free(result[i++]); + free(result[i++]); + num--; + } + free(result); + } + + if (mdb->bdb_get_type_index() == SQL_TYPE_SQLITE3) { + Mmsg(mdb->cmd, + "INSERT INTO PathVisibility (PathId, JobId) " + "SELECT DISTINCT h.PPathId AS PathId, %s " + "FROM PathHierarchy AS h " + "WHERE h.PathId IN (SELECT PathId FROM PathVisibility WHERE JobId=%s) " + "AND h.PPathId NOT IN (SELECT PathId FROM PathVisibility WHERE JobId=%s)", + jobid, jobid, jobid ); + + } else if (mdb->bdb_get_type_index() == SQL_TYPE_MYSQL) { + Mmsg(mdb->cmd, + "INSERT INTO PathVisibility (PathId, JobId) " + "SELECT a.PathId,%s " + "FROM ( " + "SELECT DISTINCT h.PPathId AS PathId " + "FROM PathHierarchy AS h " + "JOIN PathVisibility AS p ON (h.PathId=p.PathId) " + "WHERE p.JobId=%s) AS a " + "LEFT JOIN PathVisibility AS b ON (b.JobId=%s and a.PathId = b.PathId) " + "WHERE b.PathId IS NULL", jobid, jobid, jobid); + + } else { + Mmsg(mdb->cmd, + "INSERT INTO PathVisibility (PathId, JobId) " + "SELECT a.PathId,%s " + "FROM ( " + "SELECT DISTINCT h.PPathId AS PathId " + "FROM PathHierarchy AS h " + "JOIN PathVisibility AS p ON (h.PathId=p.PathId) " + "WHERE p.JobId=%s) AS a LEFT JOIN " + "(SELECT PathId " + "FROM PathVisibility " + "WHERE JobId=%s) AS b ON (a.PathId = b.PathId) " + "WHERE b.PathId IS NULL", jobid, jobid, jobid); + } + + do { + ret = mdb->QueryDB(jcr, mdb->cmd); + } while (ret && mdb->sql_affected_rows() > 0); + + Mmsg(mdb->cmd, "UPDATE Job SET HasCache=1 WHERE JobId=%s", jobid); + ret = mdb->UpdateDB(jcr, mdb->cmd, false); + +bail_out: + mdb->bdb_end_transaction(jcr); + + if (!ret) { + Mmsg(mdb->cmd, "SELECT HasCache FROM Job WHERE JobId=%s", jobid); + mdb->bdb_sql_query(mdb->cmd, db_int_handler, &ret); + } + + /* Enable back the FATAL message if something is wrong */ + mdb->set_use_fatal_jmsg(true); + + mdb->bdb_unlock(); + return ret; +} + +/* Compute the cache for the bfileview compoment */ +void Bvfs::fv_update_cache() +{ + int64_t pathid; + int64_t size=0, count=0; + + Dmsg0(dbglevel, "fv_update_cache()\n"); + + if (!*jobids) { + return; /* Nothing to build */ + } + + db->bdb_lock(); + /* We don't really want to harm users with spurious messages, + * everything is handled by transaction + */ + db->set_use_fatal_jmsg(false); + + db->bdb_start_transaction(jcr); + + pathid = get_root(); + + fv_compute_size_and_count(pathid, &size, &count); + + db->bdb_end_transaction(jcr); + + /* Enable back the FATAL message if something is wrong */ + db->set_use_fatal_jmsg(true); + + db->bdb_unlock(); +} + +/* + * Find an store the filename descriptor for empty directories Filename.Name='' + */ +DBId_t Bvfs::get_dir_filenameid() +{ + uint32_t id; + if (dir_filenameid) { + return dir_filenameid; + } + Mmsg(db->cmd, "SELECT FilenameId FROM Filename WHERE Name = ''"); + db_sql_query(db, db->cmd, db_int_handler, &id); + dir_filenameid = id; + return dir_filenameid; +} + +/* Not yet working */ +void Bvfs::fv_get_big_files(int64_t pathid, int64_t min_size, int32_t limit) +{ + Mmsg(db->cmd, + "SELECT FilenameId AS filenameid, Name AS name, size " + "FROM ( " + "SELECT FilenameId, base64_decode_lstat(8,LStat) AS size " + "FROM File " + "WHERE PathId = %lld " + "AND JobId = %s " + ") AS S INNER JOIN Filename USING (FilenameId) " + "WHERE S.size > %lld " + "ORDER BY S.size DESC " + "LIMIT %d ", pathid, jobids, min_size, limit); +} + + +/* Get the current path size and files count */ +void Bvfs::fv_get_current_size_and_count(int64_t pathid, int64_t *size, int64_t *count) +{ + SQL_ROW row; + + *size = *count = 0; + + Mmsg(db->cmd, + "SELECT Size AS size, Files AS files " + " FROM PathVisibility " + " WHERE PathId = %lld " + " AND JobId = %s ", pathid, jobids); + + if (!db->QueryDB(jcr, db->cmd)) { + return; + } + + if ((row = db->sql_fetch_row())) { + *size = str_to_int64(row[0]); + *count = str_to_int64(row[1]); + } +} + +/* Compute for the current path the size and files count */ +void Bvfs::fv_get_size_and_count(int64_t pathid, int64_t *size, int64_t *count) +{ + SQL_ROW row; + + *size = *count = 0; + + Mmsg(db->cmd, + "SELECT sum(base64_decode_lstat(8,LStat)) AS size, count(1) AS files " + " FROM File " + " WHERE PathId = %lld " + " AND JobId = %s ", pathid, jobids); + + if (!db->QueryDB(jcr, db->cmd)) { + return; + } + + if ((row = db->sql_fetch_row())) { + *size = str_to_int64(row[0]); + *count = str_to_int64(row[1]); + } +} + +void Bvfs::fv_compute_size_and_count(int64_t pathid, int64_t *size, int64_t *count) +{ + Dmsg1(dbglevel, "fv_compute_size_and_count(%lld)\n", pathid); + + fv_get_current_size_and_count(pathid, size, count); + if (*size > 0) { + return; + } + + /* Update stats for the current directory */ + fv_get_size_and_count(pathid, size, count); + + /* Update stats for all sub directories */ + Mmsg(db->cmd, + " SELECT PathId " + " FROM PathVisibility " + " INNER JOIN PathHierarchy USING (PathId) " + " WHERE PPathId = %lld " + " AND JobId = %s ", pathid, jobids); + + db->QueryDB(jcr, db->cmd); + int num = db->sql_num_rows(); + + if (num > 0) { + int64_t *result = (int64_t *)malloc (num * sizeof(int64_t)); + SQL_ROW row; + int i=0; + + while((row = db->sql_fetch_row())) { + result[i++] = str_to_int64(row[0]); /* PathId */ + } + + i=0; + while (num > 0) { + int64_t c=0, s=0; + fv_compute_size_and_count(result[i], &s, &c); + *size += s; + *count += c; + + i++; + num--; + } + free(result); + } + + fv_update_size_and_count(pathid, *size, *count); +} + +void Bvfs::fv_update_size_and_count(int64_t pathid, int64_t size, int64_t count) +{ + Mmsg(db->cmd, + "UPDATE PathVisibility SET Files = %lld, Size = %lld " + " WHERE JobId = %s " + " AND PathId = %lld ", count, size, jobids, pathid); + + db->UpdateDB(jcr, db->cmd, false); +} + +void bvfs_update_cache(JCR *jcr, BDB *mdb) +{ + uint32_t nb=0; + db_list_ctx jobids_list; + + mdb->bdb_lock(); + +#ifdef xxx + /* TODO: Remove this code when updating make_bacula_table script */ + Mmsg(mdb->cmd, "SELECT 1 FROM Job WHERE HasCache<>2 LIMIT 1"); + if (!mdb->QueryDB(jcr, mdb->cmd)) { + Dmsg0(dbglevel, "Creating cache table\n"); + Mmsg(mdb->cmd, "ALTER TABLE Job ADD HasCache int DEFAULT 0"); + mdb->QueryDB(jcr, mdb->cmd); + + Mmsg(mdb->cmd, + "CREATE TABLE PathHierarchy ( " + "PathId integer NOT NULL, " + "PPathId integer NOT NULL, " + "CONSTRAINT pathhierarchy_pkey " + "PRIMARY KEY (PathId))"); + mdb->QueryDB(jcr, mdb->cmd); + + Mmsg(mdb->cmd, + "CREATE INDEX pathhierarchy_ppathid " + "ON PathHierarchy (PPathId)"); + mdb->QueryDB(jcr, mdb->cmd); + + Mmsg(mdb->cmd, + "CREATE TABLE PathVisibility (" + "PathId integer NOT NULL, " + "JobId integer NOT NULL, " + "Size int8 DEFAULT 0, " + "Files int4 DEFAULT 0, " + "CONSTRAINT pathvisibility_pkey " + "PRIMARY KEY (JobId, PathId))"); + mdb->QueryDB(jcr, mdb->cmd); + + Mmsg(mdb->cmd, + "CREATE INDEX pathvisibility_jobid " + "ON PathVisibility (JobId)"); + mdb->QueryDB(jcr, mdb->cmd); + + } +#endif + + Mmsg(mdb->cmd, + "SELECT JobId from Job " + "WHERE HasCache = 0 " + "AND Type IN ('B') AND JobStatus IN ('T', 'f', 'A') " + "ORDER BY JobId"); + + mdb->bdb_sql_query(mdb->cmd, db_list_handler, &jobids_list); + + bvfs_update_path_hierarchy_cache(jcr, mdb, jobids_list.list); + + mdb->bdb_start_transaction(jcr); + Dmsg0(dbglevel, "Cleaning pathvisibility\n"); + Mmsg(mdb->cmd, + "DELETE FROM PathVisibility " + "WHERE NOT EXISTS " + "(SELECT 1 FROM Job WHERE JobId=PathVisibility.JobId)"); + nb = mdb->DeleteDB(jcr, mdb->cmd); + Dmsg1(dbglevel, "Affected row(s) = %d\n", nb); + + mdb->bdb_end_transaction(jcr); + mdb->bdb_unlock(); +} + +/* + * Update the bvfs cache for given jobids (1,2,3,4) + */ +int +bvfs_update_path_hierarchy_cache(JCR *jcr, BDB *mdb, char *jobids) +{ + pathid_cache ppathid_cache; + JobId_t JobId; + char *p; + int ret=1; + + for (p=jobids; ; ) { + int stat = get_next_jobid_from_list(&p, &JobId); + if (stat < 0) { + ret = 0; + break; + } + if (stat == 0) { + break; + } + Dmsg1(dbglevel, "Updating cache for %lld\n", (uint64_t)JobId); + if (!update_path_hierarchy_cache(jcr, mdb, ppathid_cache, JobId)) { + ret = 0; + } + } + return ret; +} + +/* + * Update the bvfs fileview for given jobids + */ +void +bvfs_update_fv_cache(JCR *jcr, BDB *mdb, char *jobids) +{ + char *p; + JobId_t JobId; + Bvfs bvfs(jcr, mdb); + + for (p=jobids; ; ) { + int stat = get_next_jobid_from_list(&p, &JobId); + if (stat < 0) { + return; + } + if (stat == 0) { + break; + } + + Dmsg1(dbglevel, "Trying to create cache for %lld\n", (int64_t)JobId); + + bvfs.set_jobid(JobId); + bvfs.fv_update_cache(); + } +} + +/* + * Update the bvfs cache for current jobids + */ +void Bvfs::update_cache() +{ + bvfs_update_path_hierarchy_cache(jcr, db, jobids); +} + + +bool Bvfs::ch_dir(DBId_t pathid) +{ + reset_offset(); + + pwd_id = pathid; + return pwd_id != 0; +} + + +/* Change the current directory, returns true if the path exists */ +bool Bvfs::ch_dir(const char *path) +{ + db->bdb_lock(); + pm_strcpy(db->path, path); + db->pnl = strlen(db->path); + ch_dir(db->bdb_get_path_record(jcr)); + db->bdb_unlock(); + return pwd_id != 0; +} + +/* + * Get all file versions for a specified list of clients + * TODO: Handle basejobs using different client + */ +void Bvfs::get_all_file_versions(DBId_t pathid, FileId_t fnid, alist *clients) +{ + char ed1[50], ed2[50], *eclients; + POOL_MEM q, query; + + if (see_copies) { + Mmsg(q, " AND Job.Type IN ('C', 'B') "); + } else { + Mmsg(q, " AND Job.Type = 'B' "); + } + + eclients = escape_list(clients); + + Dmsg3(dbglevel, "get_all_file_versions(%lld, %lld, %s)\n", (uint64_t)pathid, + (uint64_t)fnid, eclients); + + Mmsg(query,// 1 2 3 4 +"SELECT 'V', File.PathId, File.FilenameId, 0, File.JobId, " +// 5 6 7 + "File.LStat, File.FileId, File.Md5, " +// 8 9 + "Media.VolumeName, Media.InChanger " +"FROM File, Job, Client, JobMedia, Media " +"WHERE File.FilenameId = %s " + "AND File.PathId=%s " + "AND File.JobId = Job.JobId " + "AND Job.JobId = JobMedia.JobId " + "AND File.FileIndex >= JobMedia.FirstIndex " + "AND File.FileIndex <= JobMedia.LastIndex " + "AND JobMedia.MediaId = Media.MediaId " + "AND Job.ClientId = Client.ClientId " + "AND Client.Name IN (%s) " + "%s ORDER BY FileId LIMIT %d OFFSET %d" + ,edit_uint64(fnid, ed1), edit_uint64(pathid, ed2), eclients, q.c_str(), + limit, offset); + Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); + db->bdb_sql_query(query.c_str(), list_entries, user_data); +} + +/* + * Get all file versions for a specified client + * TODO: Handle basejobs using different client + */ +bool Bvfs::get_delta(FileId_t fileid) +{ + Dmsg1(dbglevel, "get_delta(%lld)\n", (uint64_t)fileid); + char ed1[50]; + int32_t num; + SQL_ROW row; + POOL_MEM q; + POOL_MEM query; + char *fn = NULL; + bool ret = false; + db->bdb_lock(); + + /* Check if some FileId have DeltaSeq > 0 + * Foreach of them we need to get the accurate_job list, and compute + * what are dependencies + */ + Mmsg(query, + "SELECT F.JobId, FN.Name, F.PathId, F.DeltaSeq " + "FROM File AS F, Filename AS FN WHERE FileId = %lld " + "AND FN.FilenameId = F.FilenameId AND DeltaSeq > 0", fileid); + + if (!db->QueryDB(jcr, query.c_str())) { + Dmsg1(dbglevel_sql, "Can't execute query=%s\n", query.c_str()); + goto bail_out; + } + + /* TODO: Use an other DB connection can avoid to copy the result of the + * previous query into a temporary buffer + */ + num = db->sql_num_rows(); + Dmsg2(dbglevel, "Found %d Delta parts q=%s\n", + num, query.c_str()); + + if (num > 0 && (row = db->sql_fetch_row())) { + JOB_DBR jr, jr2; + db_list_ctx lst; + memset(&jr, 0, sizeof(jr)); + memset(&jr2, 0, sizeof(jr2)); + + fn = bstrdup(row[1]); /* Filename */ + int64_t jid = str_to_int64(row[0]); /* JobId */ + int64_t pid = str_to_int64(row[2]); /* PathId */ + + /* Need to limit the query to StartTime, Client/Fileset */ + jr2.JobId = jid; + if (!db->bdb_get_job_record(jcr, &jr2)) { + Dmsg1(0, "Unable to get job record for jobid %d\n", jid); + goto bail_out; + } + + jr.JobId = jid; + jr.ClientId = jr2.ClientId; + jr.FileSetId = jr2.FileSetId; + jr.JobLevel = L_INCREMENTAL; + jr.StartTime = jr2.StartTime; + + /* Get accurate jobid list */ + if (!db->bdb_get_accurate_jobids(jcr, &jr, &lst)) { + Dmsg1(0, "Unable to get Accurate list for jobid %d\n", jid); + goto bail_out; + } + + /* Escape filename */ + db->fnl = strlen(fn); + db->esc_name = check_pool_memory_size(db->esc_name, 2*db->fnl+2); + db->bdb_escape_string(jcr, db->esc_name, fn, db->fnl); + + edit_int64(pid, ed1); /* pathid */ + + int id=db->bdb_get_type_index(); + Mmsg(query, bvfs_select_delta_version_with_basejob_and_delta[id], + lst.list, db->esc_name, ed1, + lst.list, db->esc_name, ed1, + lst.list, lst.list); + + Mmsg(db->cmd, + // 0 1 2 3 4 5 6 7 + "SELECT 'd', PathId, 0, JobId, LStat, FileId, DeltaSeq, JobTDate" + " FROM (%s) AS F1 " + "ORDER BY DeltaSeq ASC", + query.c_str()); + + Dmsg1(dbglevel_sql, "q=%s\n", db->cmd); + + if (!db->bdb_sql_query(db->cmd, list_entries, user_data)) { + Dmsg1(dbglevel_sql, "Can't exec q=%s\n", db->cmd); + goto bail_out; + } + } + ret = true; +bail_out: + if (fn) { + free(fn); + } + db->bdb_unlock(); + return ret; +} + +/* + * Get all volumes for a specific file + */ +void Bvfs::get_volumes(FileId_t fileid) +{ + Dmsg1(dbglevel, "get_volumes(%lld)\n", (uint64_t)fileid); + + char ed1[50]; + POOL_MEM query; + + Mmsg(query, +// 7 8 +"SELECT DISTINCT 'L',0,0,0,0,0,0, Media.VolumeName, Media.InChanger " +"FROM File JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) " +"WHERE File.FileId = %s " + "AND File.FileIndex >= JobMedia.FirstIndex " + "AND File.FileIndex <= JobMedia.LastIndex " + " LIMIT %d OFFSET %d" + ,edit_uint64(fileid, ed1), limit, offset); + Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); + db->bdb_sql_query(query.c_str(), list_entries, user_data); +} + +DBId_t Bvfs::get_root() +{ + int p; + *db->path = 0; + db->bdb_lock(); + p = db->bdb_get_path_record(jcr); + db->bdb_unlock(); + return p; +} + +static int path_handler(void *ctx, int fields, char **row) +{ + Bvfs *fs = (Bvfs *) ctx; + return fs->_handle_path(ctx, fields, row); +} + +int Bvfs::_handle_path(void *ctx, int fields, char **row) +{ + if (bvfs_is_dir(row)) { + /* can have the same path 2 times */ + if (strcmp(row[BVFS_PathId], prev_dir)) { + pm_strcpy(prev_dir, row[BVFS_PathId]); + if (strcmp(NPRTB(row[BVFS_FileIndex]), "0") == 0 && + strcmp(NPRTB(row[BVFS_FileId]), "0") != 0) + { + /* The directory was probably deleted */ + return 0; + } + return list_entries(user_data, fields, row); + } + } + return 0; +} + +/* + * Retrieve . and .. information + */ +void Bvfs::ls_special_dirs() +{ + Dmsg1(dbglevel, "ls_special_dirs(%lld)\n", (uint64_t)pwd_id); + char ed1[50], ed2[50]; + if (*jobids == 0) { + return; + } + if (!dir_filenameid) { + get_dir_filenameid(); + } + + /* Will fetch directories */ + *prev_dir = 0; + + POOL_MEM query; + Mmsg(query, +"(SELECT PathHierarchy.PPathId AS PathId, '..' AS Path " + "FROM PathHierarchy JOIN PathVisibility USING (PathId) " + "WHERE PathHierarchy.PathId = %s " + "AND PathVisibility.JobId IN (%s) " +"UNION " + "SELECT %s AS PathId, '.' AS Path)", + edit_uint64(pwd_id, ed1), jobids, ed1); + + POOL_MEM query2; + Mmsg(query2,// 1 2 3 4 5 6 +"SELECT 'D', tmp.PathId, 0, tmp.Path, JobId, LStat, FileId, FileIndex " + "FROM %s AS tmp LEFT JOIN ( " // get attributes if any + "SELECT File1.PathId AS PathId, File1.JobId AS JobId, " + "File1.LStat AS LStat, File1.FileId AS FileId, " + "File1.FileIndex AS FileIndex, " + "Job1.JobTDate AS JobTDate " + "FROM File AS File1 JOIN Job AS Job1 USING (JobId)" + "WHERE File1.FilenameId = %s " + "AND File1.JobId IN (%s)) AS listfile1 " + "ON (tmp.PathId = listfile1.PathId) " + "ORDER BY tmp.Path, JobTDate DESC ", + query.c_str(), edit_uint64(dir_filenameid, ed2), jobids); + + Dmsg1(dbglevel_sql, "q=%s\n", query2.c_str()); + db->bdb_sql_query(query2.c_str(), path_handler, this); +} + +/* Returns true if we have dirs to read */ +bool Bvfs::ls_dirs() +{ + Dmsg1(dbglevel, "ls_dirs(%lld)\n", (uint64_t)pwd_id); + char ed1[50], ed2[50]; + if (*jobids == 0) { + return false; + } + + POOL_MEM query; + POOL_MEM filter; + if (*pattern) { + Mmsg(filter, " AND Path2.Path %s '%s' ", + match_query[db->bdb_get_type_index()], pattern); + + } + + if (!dir_filenameid) { + get_dir_filenameid(); + } + + /* the sql query displays same directory multiple time, take the first one */ + *prev_dir = 0; + + /* Let's retrieve the list of the visible dirs in this dir ... + * First, I need the empty filenameid to locate efficiently + * the dirs in the file table + * my $dir_filenameid = $self->get_dir_filenameid(); + */ + /* Then we get all the dir entries from File ... */ + Mmsg(query, +// 0 1 2 3 4 5 6 +"SELECT 'D', PathId, 0, Path, JobId, LStat, FileId, FileIndex FROM ( " + "SELECT Path1.PathId AS PathId, Path1.Path AS Path, " + "lower(Path1.Path) AS lpath, " + "listfile1.JobId AS JobId, listfile1.LStat AS LStat, " + "listfile1.FileId AS FileId, " + "listfile1.JobTDate AS JobTDate, " + "listfile1.FileIndex AS FileIndex " + "FROM ( " + "SELECT DISTINCT PathHierarchy1.PathId AS PathId " + "FROM PathHierarchy AS PathHierarchy1 " + "JOIN Path AS Path2 " + "ON (PathHierarchy1.PathId = Path2.PathId) " + "JOIN PathVisibility AS PathVisibility1 " + "ON (PathHierarchy1.PathId = PathVisibility1.PathId) " + "WHERE PathHierarchy1.PPathId = %s " + "AND PathVisibility1.JobId IN (%s) " + "%s " + ") AS listpath1 " + "JOIN Path AS Path1 ON (listpath1.PathId = Path1.PathId) " + + "LEFT JOIN ( " /* get attributes if any */ + "SELECT File1.PathId AS PathId, File1.JobId AS JobId, " + "File1.LStat AS LStat, File1.FileId AS FileId, " + "File1.FileIndex, Job1.JobTDate AS JobTDate " + "FROM File AS File1 JOIN Job AS Job1 USING (JobId) " + "WHERE File1.FilenameId = %s " + "AND File1.JobId IN (%s)) AS listfile1 " + "ON (listpath1.PathId = listfile1.PathId) " + ") AS A ORDER BY Path,JobTDate DESC LIMIT %d OFFSET %d", + edit_uint64(pwd_id, ed1), + jobids, + filter.c_str(), + edit_uint64(dir_filenameid, ed2), + jobids, + limit, offset); + + Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); + + db->bdb_lock(); + db->bdb_sql_query(query.c_str(), path_handler, this); + nb_record = db->sql_num_rows(); + db->bdb_unlock(); + + return nb_record == limit; +} + +void build_ls_files_query(BDB *db, POOL_MEM &query, + const char *JobId, const char *PathId, + const char *filter, int64_t limit, int64_t offset) +{ + if (db->bdb_get_type_index() == SQL_TYPE_POSTGRESQL) { + Mmsg(query, sql_bvfs_list_files[db->bdb_get_type_index()], + JobId, PathId, JobId, PathId, + filter, limit, offset); + } else { + Mmsg(query, sql_bvfs_list_files[db->bdb_get_type_index()], + JobId, PathId, JobId, PathId, + limit, offset, filter, JobId, JobId); + } +} + +/* Returns true if we have files to read */ +bool Bvfs::ls_files() +{ + POOL_MEM query; + POOL_MEM filter; + char pathid[50]; + + Dmsg1(dbglevel, "ls_files(%lld)\n", (uint64_t)pwd_id); + if (*jobids == 0) { + return false; + } + + if (!pwd_id) { + ch_dir(get_root()); + } + + edit_uint64(pwd_id, pathid); + if (*pattern) { + Mmsg(filter, " AND Filename.Name %s '%s' ", + match_query[db_get_type_index(db)], pattern); + + } else if (*filename) { + Mmsg(filter, " AND Filename.Name = '%s' ", filename); + } + + build_ls_files_query(db, query, + jobids, pathid, filter.c_str(), + limit, offset); + + Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); + + db->bdb_lock(); + db->bdb_sql_query(query.c_str(), list_entries, user_data); + nb_record = db->sql_num_rows(); + db->bdb_unlock(); + + return nb_record == limit; +} + + +/* + * Return next Id from comma separated list + * + * Returns: + * 1 if next Id returned + * 0 if no more Ids are in list + * -1 there is an error + * TODO: merge with get_next_jobid_from_list() and get_next_dbid_from_list() + */ +static int get_next_id_from_list(char **p, int64_t *Id) +{ + const int maxlen = 30; + char id[maxlen+1]; + char *q = *p; + + id[0] = 0; + for (int i=0; ibdb_sql_query("BEGIN", NULL, NULL); + db->bdb_sql_query("UPDATE Job SET HasCache=0", NULL, NULL); + db->bdb_sql_query("TRUNCATE PathHierarchy", NULL, NULL); + db->bdb_sql_query("TRUNCATE PathVisibility", NULL, NULL); + db->bdb_sql_query("COMMIT", NULL, NULL); +} + +bool Bvfs::drop_restore_list(char *output_table) +{ + POOL_MEM query; + if (check_temp(output_table)) { + Mmsg(query, "DROP TABLE %s", output_table); + db->bdb_sql_query(query.c_str(), NULL, NULL); + return true; + } + return false; +} + +bool Bvfs::compute_restore_list(char *fileid, char *dirid, char *hardlink, + char *output_table) +{ + POOL_MEM query; + POOL_MEM tmp, tmp2; + int64_t id, jobid, prev_jobid; + int num; + bool init=false; + bool ret=false; + /* check args */ + if ((*fileid && !is_a_number_list(fileid)) || + (*dirid && !is_a_number_list(dirid)) || + (*hardlink && !is_a_number_list(hardlink))|| + (!*hardlink && !*fileid && !*dirid && !*hardlink)) + { + Dmsg0(dbglevel, "ERROR: One or more of FileId, DirId or HardLink is not given or not a number.\n"); + return false; + } + if (!check_temp(output_table)) { + Dmsg0(dbglevel, "ERROR: Wrong format for table name (in path field).\n"); + return false; + } + + db->bdb_lock(); + + /* Cleanup old tables first */ + Mmsg(query, "DROP TABLE btemp%s", output_table); + db->bdb_sql_query(query.c_str()); + + Mmsg(query, "DROP TABLE %s", output_table); + db->bdb_sql_query(query.c_str()); + + Mmsg(query, "CREATE TABLE btemp%s AS ", output_table); + + if (*fileid) { /* Select files with their direct id */ + init=true; + Mmsg(tmp,"SELECT Job.JobId, JobTDate, FileIndex, FilenameId, " + "PathId, FileId " + "FROM File,Job WHERE Job.JobId=File.Jobid " + "AND FileId IN (%s)", + fileid); + pm_strcat(query, tmp.c_str()); + } + + /* Add a directory content */ + while (get_next_id_from_list(&dirid, &id) == 1) { + Mmsg(tmp, "SELECT Path FROM Path WHERE PathId=%lld", id); + + if (!db->bdb_sql_query(tmp.c_str(), get_path_handler, (void *)&tmp2)) { + Dmsg3(dbglevel, "ERROR: Path not found %lld q=%s s=%s\n", + id, tmp.c_str(), tmp2.c_str()); + /* print error */ + goto bail_out; + } + if (!strcmp(tmp2.c_str(), "")) { /* path not found */ + Dmsg3(dbglevel, "ERROR: Path not found %lld q=%s s=%s\n", + id, tmp.c_str(), tmp2.c_str()); + break; + } + /* escape % and _ for LIKE search */ + tmp.check_size((strlen(tmp2.c_str())+1) * 2); + char *p = tmp.c_str(); + for (char *s = tmp2.c_str(); *s ; s++) { + if (*s == '%' || *s == '_' || *s == '\\') { + *p = '\\'; + p++; + } + *p = *s; + p++; + } + *p = '\0'; + tmp.strcat("%"); + + size_t len = strlen(tmp.c_str()); + tmp2.check_size((len+1) * 2); + db->bdb_escape_string(jcr, tmp2.c_str(), tmp.c_str(), len); + + if (init) { + query.strcat(" UNION "); + } + + Mmsg(tmp, "SELECT Job.JobId, JobTDate, File.FileIndex, File.FilenameId, " + "File.PathId, FileId " + "FROM Path JOIN File USING (PathId) JOIN Job USING (JobId) " + "WHERE Path.Path LIKE '%s' ESCAPE '%s' AND File.JobId IN (%s) ", + tmp2.c_str(), escape_char_value[db->bdb_get_type_index()], jobids); + query.strcat(tmp.c_str()); + init = true; + + query.strcat(" UNION "); + + /* A directory can have files from a BaseJob */ + Mmsg(tmp, "SELECT File.JobId, JobTDate, BaseFiles.FileIndex, " + "File.FilenameId, File.PathId, BaseFiles.FileId " + "FROM BaseFiles " + "JOIN File USING (FileId) " + "JOIN Job ON (BaseFiles.JobId = Job.JobId) " + "JOIN Path USING (PathId) " + "WHERE Path.Path LIKE '%s' AND BaseFiles.JobId IN (%s) ", + tmp2.c_str(), jobids); + query.strcat(tmp.c_str()); + } + + /* expect jobid,fileindex */ + prev_jobid=0; + while (get_next_id_from_list(&hardlink, &jobid) == 1) { + if (get_next_id_from_list(&hardlink, &id) != 1) { + Dmsg0(dbglevel, "ERROR: hardlink should be two by two\n"); + goto bail_out; + } + if (jobid != prev_jobid) { /* new job */ + if (prev_jobid == 0) { /* first jobid */ + if (init) { + query.strcat(" UNION "); + } + } else { /* end last job, start new one */ + tmp.strcat(") UNION "); + query.strcat(tmp.c_str()); + } + Mmsg(tmp, "SELECT Job.JobId, JobTDate, FileIndex, FilenameId, " + "PathId, FileId " + "FROM File JOIN Job USING (JobId) WHERE JobId = %lld " + "AND FileIndex IN (%lld", jobid, id); + prev_jobid = jobid; + + } else { /* same job, add new findex */ + Mmsg(tmp2, ", %lld", id); + tmp.strcat(tmp2.c_str()); + } + } + + if (prev_jobid != 0) { /* end last job */ + tmp.strcat(") "); + query.strcat(tmp.c_str()); + init = true; + } + + Dmsg1(dbglevel_sql, "query=%s\n", query.c_str()); + + if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { + Dmsg1(dbglevel, "ERROR executing query=%s\n", query.c_str()); + goto bail_out; + } + + Mmsg(query, sql_bvfs_select[db->bdb_get_type_index()], + output_table, output_table, output_table); + + /* TODO: handle jobid filter */ + Dmsg1(dbglevel_sql, "query=%s\n", query.c_str()); + if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { + Dmsg1(dbglevel, "ERROR executing query=%s\n", query.c_str()); + goto bail_out; + } + + /* MySQL needs the index */ + if (db->bdb_get_type_index() == SQL_TYPE_MYSQL) { + Mmsg(query, "CREATE INDEX idx_%s ON %s (JobId)", + output_table, output_table); + Dmsg1(dbglevel_sql, "query=%s\n", query.c_str()); + if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { + Dmsg1(dbglevel, "ERROR executing query=%s\n", query.c_str()); + goto bail_out; + } + } + + /* Check if some FileId have DeltaSeq > 0 + * Foreach of them we need to get the accurate_job list, and compute + * what are dependencies + */ + Mmsg(query, + "SELECT F.FileId, F.JobId, F.FilenameId, F.PathId, F.DeltaSeq " + "FROM File AS F JOIN Job USING (JobId) JOIN %s USING (FileId) " + "WHERE DeltaSeq > 0", output_table); + + if (!db->QueryDB(jcr, query.c_str())) { + Dmsg1(dbglevel_sql, "Can't execute query=%s\n", query.c_str()); + } + + /* TODO: Use an other DB connection can avoid to copy the result of the + * previous query into a temporary buffer + */ + num = db->sql_num_rows(); + Dmsg2(dbglevel, "Found %d Delta parts in restore selection q=%s\n", num, query.c_str()); + + if (num > 0) { + int64_t *result = (int64_t *)malloc (num * 4 * sizeof(int64_t)); + SQL_ROW row; + int i=0; + + while((row = db->sql_fetch_row())) { + result[i++] = str_to_int64(row[0]); /* FileId */ + result[i++] = str_to_int64(row[1]); /* JobId */ + result[i++] = str_to_int64(row[2]); /* FilenameId */ + result[i++] = str_to_int64(row[3]); /* PathId */ + } + + i=0; + while (num > 0) { + insert_missing_delta(output_table, result + i); + i += 4; + num--; + } + free(result); + } + + ret = true; + +bail_out: + Mmsg(query, "DROP TABLE btemp%s", output_table); + db->bdb_sql_query(query.c_str(), NULL, NULL); + db->bdb_unlock(); + return ret; +} + +void Bvfs::insert_missing_delta(char *output_table, int64_t *res) +{ + char ed1[50]; + db_list_ctx lst; + POOL_MEM query; + JOB_DBR jr, jr2; + memset(&jr, 0, sizeof(jr)); + memset(&jr2, 0, sizeof(jr2)); + + /* Need to limit the query to StartTime, Client/Fileset */ + jr2.JobId = res[1]; + db->bdb_get_job_record(jcr, &jr2); + + jr.JobId = res[1]; + jr.ClientId = jr2.ClientId; + jr.FileSetId = jr2.FileSetId; + jr.JobLevel = L_INCREMENTAL; + jr.StartTime = jr2.StartTime; + + /* Get accurate jobid list */ + db->bdb_get_accurate_jobids(jcr, &jr, &lst); + + Dmsg2(dbglevel_sql, "JobId list for %lld is %s\n", res[0], lst.list); + + /* The list contains already the last DeltaSeq element, so + * we don't need to select it in the next query + */ + for (int l = strlen(lst.list); l > 0; l--) { + if (lst.list[l] == ',') { + lst.list[l] = '\0'; + break; + } + } + + Dmsg1(dbglevel_sql, "JobId list after strip is %s\n", lst.list); + + /* Escape filename */ + db->fnl = strlen((char *)res[2]); + db->esc_name = check_pool_memory_size(db->esc_name, 2*db->fnl+2); + db->bdb_escape_string(jcr, db->esc_name, (char *)res[2], db->fnl); + + edit_int64(res[3], ed1); /* pathid */ + + int id=db->bdb_get_type_index(); + Mmsg(query, bvfs_select_delta_version_with_basejob_and_delta[id], + lst.list, db->esc_name, ed1, + lst.list, db->esc_name, ed1, + lst.list, lst.list); + + Mmsg(db->cmd, "INSERT INTO %s " + "SELECT JobId, FileIndex, FileId FROM (%s) AS F1", + output_table, query.c_str()); + + if (!db->bdb_sql_query(db->cmd, NULL, NULL)) { + Dmsg1(dbglevel_sql, "Can't exec q=%s\n", db->cmd); + } +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ diff --git a/src/cats/bvfs.h b/src/cats/bvfs.h new file mode 100644 index 00000000..db424649 --- /dev/null +++ b/src/cats/bvfs.h @@ -0,0 +1,307 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef __BVFS_H_ +#define __BVFS_H_ 1 + + +/* + * This object can be use to browse the catalog + * + * Bvfs fs; + * fs.set_jobid(10); + * fs.update_cache(); + * fs.ch_dir("/"); + * fs.ls_dirs(); + * fs.ls_files(); + */ + +/* Helper for result handler */ +typedef enum { + BVFS_FILE_RECORD = 'F', + BVFS_DIR_RECORD = 'D', + BVFS_FILE_VERSION = 'V', + BVFS_VOLUME_LIST = 'L', + BVFS_DELTA_RECORD = 'd' +} bvfs_handler_type; + +typedef enum { + BVFS_Type = 0, /* Could be D, F, V, L */ + BVFS_PathId = 1, + BVFS_FilenameId = 2, + + BVFS_Name = 3, /* Can be empty for File version */ + BVFS_JobId = 4, + + BVFS_LStat = 5, /* Can be empty for missing directories */ + BVFS_FileId = 6, /* Can be empty for missing directories */ + + /* Only if Path record */ + BVFS_FileIndex = 7, + + /* Only if File Version record */ + BVFS_Md5 = 7, + BVFS_VolName = 8, + BVFS_VolInchanger = 9, + + /* Only if Delta record */ + BVFS_DeltaSeq = 6, + BVFS_JobTDate = 7 +} bvfs_row_index; + +class Bvfs { + +public: + Bvfs(JCR *j, BDB *mdb); + virtual ~Bvfs(); + + void set_compute_delta(bool val) { + compute_delta = val; + }; + + /* Return the number of jobids after the filter */ + int set_jobid(JobId_t id); + int set_jobids(char *ids); + + char *get_jobids() { + return jobids; + } + + void set_limit(uint32_t max) { + limit = max; + } + + void set_offset(uint32_t nb) { + offset = nb; + } + + void set_pattern(char *p) { + uint32_t len = strlen(p); + pattern = check_pool_memory_size(pattern, len*2+1); + db->bdb_escape_string(jcr, pattern, p, len); + } + + void set_filename(char *p) { + uint32_t len = strlen(p); + filename = check_pool_memory_size(filename, len*2+1); + db->bdb_escape_string(jcr, filename, p, len); + } + + /* Get the root point */ + DBId_t get_root(); + + /* It's much better to access Path though their PathId, it + * avoids mistakes with string encoding + */ + bool ch_dir(DBId_t pathid); + + /* + * Returns true if the directory exists + */ + bool ch_dir(const char *path); + + bool ls_files(); /* Returns true if we have more files to read */ + bool ls_dirs(); /* Returns true if we have more dir to read */ + void ls_special_dirs(); /* get . and .. */ + void get_all_file_versions(DBId_t pathid, FileId_t fnid, char *client) { + alist clients(1, not_owned_by_alist); + clients.append(client); + get_all_file_versions(pathid, fnid, &clients); + }; + void get_all_file_versions(DBId_t pathid, FileId_t fnid, alist *clients); + + void update_cache(); + + /* bfileview */ + void fv_update_cache(); + + void set_see_all_versions(bool val) { + see_all_versions = val; + } + + void set_see_copies(bool val) { + see_copies = val; + } + + DBId_t get_dir_filenameid(); + + int filter_jobid(); /* Call after set_username, returns the number of jobids */ + + void set_username(char *user) { + if (user) { + username = bstrdup(user); + } + }; + + char *escape_list(alist *list); + + bool copy_acl(alist *list) { + if (!list || + (list->size() > 0 && + (strcasecmp((char *)list->get(0), "*all*") == 0))) + { + return false; + } + return true; + }; + + /* Keep a pointer to various ACLs */ + void set_job_acl(alist *lst) { + job_acl = copy_acl(lst)?lst:NULL; + use_acl = true; + }; + void set_fileset_acl(alist *lst) { + fileset_acl = copy_acl(lst)?lst:NULL; + use_acl = true; + }; + void set_client_acl(alist *lst) { + client_acl = copy_acl(lst)?lst:NULL; + use_acl = true; + }; + void set_pool_acl(alist *lst) { + pool_acl = copy_acl(lst)?lst:NULL; + use_acl = true; + }; + void set_handler(DB_RESULT_HANDLER *h, void *ctx) { + list_entries = h; + user_data = ctx; + }; + + DBId_t get_pwd() { + return pwd_id; + }; + + ATTR *get_attr() { + return attr; + } + + JCR *get_jcr() { + return jcr; + } + + void reset_offset() { + offset=0; + } + + void next_offset() { + offset+=limit; + } + + /* Clear all cache */ + void clear_cache(); + + /* Compute restore list */ + bool compute_restore_list(char *fileid, char *dirid, char *hardlink, + char *output_table); + + /* Drop previous restore list */ + bool drop_restore_list(char *output_table); + + /* for internal use */ + int _handle_path(void *, int, char **); + + /* Handle Delta parts if any */ + void insert_missing_delta(char *output_table, int64_t *res); + + /* Get a list of volumes */ + void get_volumes(FileId_t fileid); + + /* Get Delta parts of a file */ + bool get_delta(FileId_t fileid); + + /* Check if the parent directories are accessible */ + bool check_path_access(DBId_t pathid); + + /* Check if the full path is authorized by the current set of ACLs */ + bool check_full_path_access(int nb, sellist *sel, db_list_ctx *toexcl); + + alist *dir_acl; + + int check_dirs; /* When it's 1, we check the against directory_acl */ + bool can_access(struct stat *st); + bool can_access_dir(const char *path); + +private: + Bvfs(const Bvfs &); /* prohibit pass by value */ + Bvfs & operator = (const Bvfs &); /* prohibit class assignment */ + + JCR *jcr; + BDB *db; + POOLMEM *jobids; + char *username; /* Used with Bweb */ + + POOLMEM *prev_dir; /* ls_dirs query returns all versions, take the 1st one */ + POOLMEM *pattern; + POOLMEM *filename; + + POOLMEM *tmp; + POOLMEM *escaped_list; + + /* Pointer to Console ACL */ + alist *job_acl; + alist *client_acl; + alist *fileset_acl; + alist *pool_acl; + char *last_dir_acl; + + ATTR *attr; /* Can be use by handler to call decode_stat() */ + + uint32_t limit; + uint32_t offset; + uint32_t nb_record; /* number of records of the last query */ + DBId_t pwd_id; /* Current pathid */ + DBId_t dir_filenameid; /* special FilenameId where Name='' */ + + bool see_all_versions; + bool see_copies; + bool compute_delta; + + db_list_ctx fileid_to_delete; /* used also by check_path_access */ + bool need_to_check_permissions(); + bool use_acl; + + /* bfileview */ + void fv_get_big_files(int64_t pathid, int64_t min_size, int32_t limit); + void fv_update_size_and_count(int64_t pathid, int64_t size, int64_t count); + void fv_compute_size_and_count(int64_t pathid, int64_t *size, int64_t *count); + void fv_get_current_size_and_count(int64_t pathid, int64_t *size, int64_t *count); + void fv_get_size_and_count(int64_t pathid, int64_t *size, int64_t *count); + + DB_RESULT_HANDLER *list_entries; + void *user_data; +}; + +#define bvfs_is_dir(row) ((row)[BVFS_Type][0] == BVFS_DIR_RECORD) +#define bvfs_is_file(row) ((row)[BVFS_Type][0] == BVFS_FILE_RECORD) +#define bvfs_is_version(row) ((row)[BVFS_Type][0] == BVFS_FILE_VERSION) +#define bvfs_is_volume_list(row) ((row)[BVFS_Type][0] == BVFS_VOLUME_LIST) +#define bvfs_is_delta_list(row) ((row)[BVFS_Type][0] == BVFS_DELTA_RECORD) + +void bvfs_update_fv_cache(JCR *jcr, BDB *mdb, char *jobids); +int bvfs_update_path_hierarchy_cache(JCR *jcr, BDB *mdb, char *jobids); +void bvfs_update_cache(JCR *jcr, BDB *mdb); +char *bvfs_parent_dir(char *path); + +/* Return the basename of the with the trailing / (update the given string) + * TODO: see in the rest of bacula if we don't have + * this function already + */ +char *bvfs_basename_dir(char *path); + +#endif /* __BVFS_H_ */ diff --git a/src/cats/cats.c b/src/cats/cats.c new file mode 100644 index 00000000..d7469fe4 --- /dev/null +++ b/src/cats/cats.c @@ -0,0 +1,152 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Generic catalog class methods. + * + * Note: at one point, this file was assembled from parts of other files + * by a programmer, and other than "wrapping" in a class, which is a trivial + * change for a C++ programmer, nothing substantial was done, yet all the + * code was recommitted under this programmer's name. Consequently, we + * undo those changes here. + */ + +#include "bacula.h" + +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL + +#include "cats.h" + +bool BDB::bdb_match_database(const char *db_driver, const char *db_name, + const char *db_address, int db_port) +{ + BDB *mdb = this; + bool match; + + if (db_driver) { + match = strcasecmp(mdb->m_db_driver, db_driver) == 0 && + bstrcmp(mdb->m_db_name, db_name) && + bstrcmp(mdb->m_db_address, db_address) && + mdb->m_db_port == db_port && + mdb->m_dedicated == false; + } else { + match = bstrcmp(mdb->m_db_name, db_name) && + bstrcmp(mdb->m_db_address, db_address) && + mdb->m_db_port == db_port && + mdb->m_dedicated == false; + } + return match; +} + +BDB *BDB::bdb_clone_database_connection(JCR *jcr, bool mult_db_connections) +{ + BDB *mdb = this; + /* + * See if its a simple clone e.g. with mult_db_connections set to false + * then we just return the calling class pointer. + */ + if (!mult_db_connections) { + mdb->m_ref_count++; + return mdb; + } + + /* + * A bit more to do here just open a new session to the database. + */ + return db_init_database(jcr, mdb->m_db_driver, mdb->m_db_name, + mdb->m_db_user, mdb->m_db_password, mdb->m_db_address, + mdb->m_db_port, mdb->m_db_socket, + mdb->m_db_ssl_mode, mdb->m_db_ssl_key, + mdb->m_db_ssl_cert, mdb->m_db_ssl_ca, + mdb->m_db_ssl_capath, mdb->m_db_ssl_cipher, + true, mdb->m_disabled_batch_insert); +} + +const char *BDB::bdb_get_engine_name(void) +{ + BDB *mdb = this; + switch (mdb->m_db_driver_type) { + case SQL_DRIVER_TYPE_MYSQL: + return "MySQL"; + case SQL_DRIVER_TYPE_POSTGRESQL: + return "PostgreSQL"; + case SQL_DRIVER_TYPE_SQLITE3: + return "SQLite3"; + default: + return "Unknown"; + } +} + +/* + * Lock database, this can be called multiple times by the same + * thread without blocking, but must be unlocked the number of + * times it was locked using db_unlock(). + */ +void BDB::bdb_lock(const char *file, int line) +{ + int errstat; + BDB *mdb = this; + + if ((errstat = rwl_writelock_p(&mdb->m_lock, file, line)) != 0) { + berrno be; + e_msg(file, line, M_FATAL, 0, "rwl_writelock failure. stat=%d: ERR=%s\n", + errstat, be.bstrerror(errstat)); + } +} + +/* + * Unlock the database. This can be called multiple times by the + * same thread up to the number of times that thread called + * db_lock()/ + */ +void BDB::bdb_unlock(const char *file, int line) +{ + int errstat; + BDB *mdb = this; + + if ((errstat = rwl_writeunlock(&mdb->m_lock)) != 0) { + berrno be; + e_msg(file, line, M_FATAL, 0, "rwl_writeunlock failure. stat=%d: ERR=%s\n", + errstat, be.bstrerror(errstat)); + } +} + +bool BDB::bdb_sql_query(const char *query, int flags) +{ + bool retval; + BDB *mdb = this; + + bdb_lock(); + retval = sql_query(query, flags); + if (!retval) { + Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror()); + } + bdb_unlock(); + return retval; +} + +void BDB::print_lock_info(FILE *fp) +{ + BDB *mdb = this; + if (mdb->m_lock.valid == RWLOCK_VALID) { + fprintf(fp, "\tRWLOCK=%p w_active=%i w_wait=%i\n", + &mdb->m_lock, mdb->m_lock.w_active, mdb->m_lock.w_wait); + } +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ diff --git a/src/cats/cats.h b/src/cats/cats.h new file mode 100644 index 00000000..9801082f --- /dev/null +++ b/src/cats/cats.h @@ -0,0 +1,629 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Catalog DB header file + * + * Written by Kern E. Sibbald + * + * Anyone who accesses the database will need to include + * this file. + */ + +#ifndef __CATS_H_ +#define __CATS_H_ 1 + +/* + Here is how database versions work. + + While I am working on a new release with database changes, the + update scripts are in the src/cats directory under the names + update_xxx_tables.in. Most of the time, I make database updates + in one go and immediately update the version, but not always. If + there are going to be several updates as is the case with version + 1.37, then I will often forgo changing the version until the last + update otherwise I will end up with too many versions and a lot + of confusion. + + When I am pretty sure there will be no more updates, I will + change the version from 8 to 9 (in the present case), and when I + am 100% sure there will be no more changes, the update script + will be copied to the updatedb directory with the correct name + (in the present case 8 to 9). + */ + +/* Current database version number for all drivers */ +#define BDB_VERSION 16 + +typedef void (DB_LIST_HANDLER)(void *, const char *); +typedef int (DB_RESULT_HANDLER)(void *, int, char **); + +/* What kind of database we have */ +typedef enum { + SQL_TYPE_MYSQL = 0, + SQL_TYPE_POSTGRESQL = 1, + SQL_TYPE_SQLITE3 = 2, + SQL_TYPE_UNKNOWN = 99 +} SQL_DBTYPE; + +/* What kind of driver we have */ +typedef enum { + SQL_DRIVER_TYPE_MYSQL = 0, + SQL_DRIVER_TYPE_POSTGRESQL = 1, + SQL_DRIVER_TYPE_SQLITE3 = 2 +} SQL_DRIVER; + + +/* ============================================================== + * + * What follows are definitions that are used "globally" for all + * the different SQL engines and both inside and external to the + * cats directory. + */ + +#define faddr_t long + +/* + * Generic definition of a sql_row. + */ +typedef char **SQL_ROW; + +/* + * Generic definition of a a sql_field. + */ +typedef struct sql_field { + char *name; /* name of column */ + int max_length; /* max length */ + uint32_t type; /* type */ + uint32_t flags; /* flags */ +} SQL_FIELD; + + +/* + * Structure used when calling db_get_query_ids() + * allows the subroutine to return a list of ids. + */ +class dbid_list : public SMARTALLOC { +public: + DBId_t *DBId; /* array of DBIds */ + char *PurgedFiles; /* Array of PurgedFile flags */ + int num_ids; /* num of ids actually stored */ + int max_ids; /* size of id array */ + int num_seen; /* number of ids processed */ + int tot_ids; /* total to process */ + + dbid_list(); /* in sql.c */ + ~dbid_list(); /* in sql.c */ +}; + +/* Job information passed to create job record and update + * job record at end of job. Note, although this record + * contains all the fields found in the Job database record, + * it also contains fields found in the JobMedia record. + */ +/* Job record */ +struct JOB_DBR { + JobId_t JobId; + char Job[MAX_NAME_LENGTH]; /* Job unique name */ + char Name[MAX_NAME_LENGTH]; /* Job base name */ + char PriorJob[MAX_NAME_LENGTH]; /* PriorJob name if any */ + int JobType; /* actually char(1) */ + int JobLevel; /* actually char(1) */ + int JobStatus; /* actually char(1) */ + DBId_t ClientId; /* Id of client */ + DBId_t PoolId; /* Id of pool */ + DBId_t FileSetId; /* Id of FileSet */ + DBId_t PriorJobId; /* Id of migrated (prior) job */ + time_t SchedTime; /* Time job scheduled */ + time_t StartTime; /* Job start time */ + time_t EndTime; /* Job termination time of orig job */ + time_t RealEndTime; /* Job termination time of this job */ + utime_t JobTDate; /* Backup time/date in seconds */ + uint32_t VolSessionId; + uint32_t VolSessionTime; + uint32_t JobFiles; + uint32_t JobErrors; + uint32_t JobMissingFiles; + uint64_t JobBytes; + uint64_t ReadBytes; + int PurgedFiles; + int HasBase; + + /* Note, FirstIndex, LastIndex, Start/End File and Block + * are only used in the JobMedia record. + */ + uint32_t FirstIndex; /* First index this Volume */ + uint32_t LastIndex; /* Last index this Volume */ + uint32_t StartFile; + uint32_t EndFile; + uint32_t StartBlock; + uint32_t EndBlock; + + char cSchedTime[MAX_TIME_LENGTH]; + char cStartTime[MAX_TIME_LENGTH]; + char cEndTime[MAX_TIME_LENGTH]; + char cRealEndTime[MAX_TIME_LENGTH]; + /* Extra stuff not in DB */ + int order; /* 0 ASC, 1 DESC */ + int limit; /* limit records to display */ + faddr_t rec_addr; + int32_t FileIndex; /* added during Verify */ + + int CorrNbJob; /* used by dbd_get_job_statistics() */ + int CorrJobBytes; /* used by dbd_get_job_statistics() */ + int CorrJobFiles; /* used by dbd_get_job_statistics() */ +}; + +/* Job Media information used to create the media records + * for each Volume used for the job. + */ +/* JobMedia record */ +struct JOBMEDIA_DBR { + DBId_t JobMediaId; /* record id */ + JobId_t JobId; /* JobId */ + DBId_t MediaId; /* MediaId */ + uint32_t FirstIndex; /* First index this Volume */ + uint32_t LastIndex; /* Last index this Volume */ + uint32_t StartFile; /* File for start of data */ + uint32_t EndFile; /* End file on Volume */ + uint32_t StartBlock; /* start block on tape */ + uint32_t EndBlock; /* last block */ +}; + + +/* Volume Parameter structure */ +struct VOL_PARAMS { + char VolumeName[MAX_NAME_LENGTH]; /* Volume name */ + char MediaType[MAX_NAME_LENGTH]; /* Media Type */ + char Storage[MAX_NAME_LENGTH]; /* Storage name */ + uint32_t VolIndex; /* Volume seqence no. */ + uint32_t FirstIndex; /* First index this Volume */ + uint32_t LastIndex; /* Last index this Volume */ + int32_t Slot; /* Slot */ + uint64_t StartAddr; /* Start address */ + uint64_t EndAddr; /* End address */ + int32_t InChanger; /* InChanger flag */ +}; + + +/* Attributes record -- NOT same as in database because + * in general, this "record" creates multiple database + * records (e.g. pathname, filename, fileattributes). + */ +struct ATTR_DBR { + char *fname; /* full path & filename */ + char *link; /* link if any */ + char *attr; /* attributes statp */ + int32_t FileIndex; + uint32_t Stream; + uint32_t FileType; + uint32_t DeltaSeq; + JobId_t JobId; + DBId_t ClientId; + DBId_t PathId; + DBId_t FilenameId; + FileId_t FileId; + char *Digest; + int DigestType; +}; + +struct ROBJECT_DBR { + char *object_name; + char *object; + char *plugin_name; + char *JobIds; + uint32_t object_len; + uint32_t object_full_len; + uint32_t object_index; + int32_t object_compression; + int32_t FileIndex; + uint32_t Stream; + uint32_t FileType; + JobId_t JobId; + DBId_t RestoreObjectId; +}; + + +/* File record -- same format as database */ +struct FILE_DBR { + FileId_t FileId; + int32_t FileIndex; + int32_t FileIndex2; + JobId_t JobId; + DBId_t FilenameId; + DBId_t PathId; + JobId_t MarkId; + uint32_t DeltaSeq; + char LStat[256]; + char Digest[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)]; + int DigestType; /* NO_SIG/MD5_SIG/SHA1_SIG */ +}; + +/* Pool record -- same format as database */ +class POOL_DBR { +public: + /* + * Do not turn on constructor until all bmemset on POOL_DBR removed + * + * POOL_DBR() { bmemset(this, 0, sizeof(POOL_DBR)); }; + * ~POOL_DBR() { }; + */ + DBId_t PoolId; + char Name[MAX_NAME_LENGTH]; /* Pool name */ + uint32_t NumVols; /* total number of volumes */ + uint32_t MaxVols; /* max allowed volumes */ + int32_t LabelType; /* Bacula/ANSI/IBM */ + int32_t UseOnce; /* set to use once only */ + int32_t UseCatalog; /* set to use catalog */ + int32_t AcceptAnyVolume; /* set to accept any volume sequence */ + int32_t AutoPrune; /* set to prune automatically */ + int32_t Recycle; /* default Vol recycle flag */ + uint32_t ActionOnPurge; /* action on purge, e.g. truncate the disk volume */ + utime_t VolRetention; /* retention period in seconds */ + utime_t CacheRetention; /* cache retention period in seconds */ + utime_t VolUseDuration; /* time in secs volume can be used */ + uint32_t MaxVolJobs; /* Max Jobs on Volume */ + uint32_t MaxVolFiles; /* Max files on Volume */ + uint64_t MaxVolBytes; /* Max bytes on Volume */ + DBId_t RecyclePoolId; /* RecyclePool destination when media is purged */ + DBId_t ScratchPoolId; /* ScratchPool source when media is needed */ + char PoolType[MAX_NAME_LENGTH]; + char LabelFormat[MAX_NAME_LENGTH]; + /* Extra stuff not in DB */ + faddr_t rec_addr; +}; + +class DEVICE_DBR { +public: + DBId_t DeviceId; + char Name[MAX_NAME_LENGTH]; /* Device name */ + DBId_t MediaTypeId; /* MediaType */ + DBId_t StorageId; /* Storage id if autochanger */ + uint32_t DevMounts; /* Number of times mounted */ + uint32_t DevErrors; /* Number of read/write errors */ + uint64_t DevReadBytes; /* Number of bytes read */ + uint64_t DevWriteBytes; /* Number of bytew written */ + uint64_t DevReadTime; /* time spent reading volume */ + uint64_t DevWriteTime; /* time spent writing volume */ + uint64_t DevReadTimeSincCleaning; /* read time since cleaning */ + uint64_t DevWriteTimeSincCleaning; /* write time since cleaning */ + time_t CleaningDate; /* time last cleaned */ + utime_t CleaningPeriod; /* time between cleanings */ +}; + +class STORAGE_DBR { +public: + DBId_t StorageId; + char Name[MAX_NAME_LENGTH]; /* Device name */ + int AutoChanger; /* Set if autochanger */ + + /* Not in database */ + bool created; /* set if created by db_create ... */ +}; + +class MEDIATYPE_DBR { +public: + DBId_t MediaTypeId; + char MediaType[MAX_NAME_LENGTH]; /* MediaType string */ + int ReadOnly; /* Set if read-only */ +}; + +/* Media record -- same as the database */ +class MEDIA_DBR { +public: + MEDIA_DBR() { memset(this, 0, sizeof(MEDIA_DBR)); }; + ~MEDIA_DBR() { }; + void clear() { memset(this, 0, sizeof(MEDIA_DBR)); }; + void copy(MEDIA_DBR *omr) { memcpy(this, omr, sizeof(MEDIA_DBR)); sid_group = NULL; }; + + DBId_t MediaId; /* Unique volume id */ + char VolumeName[MAX_NAME_LENGTH]; /* Volume name */ + char MediaType[MAX_NAME_LENGTH]; /* Media type */ + DBId_t PoolId; /* Pool id */ + time_t FirstWritten; /* Time Volume first written this usage */ + time_t LastWritten; /* Time Volume last written */ + time_t LabelDate; /* Date/Time Volume labeled */ + time_t InitialWrite; /* Date/Time Volume first written */ + int32_t LabelType; /* Label (Bacula/ANSI/IBM) */ + uint32_t VolJobs; /* number of jobs on this medium */ + uint32_t VolFiles; /* Number of files */ + uint32_t VolBlocks; /* Number of blocks */ + uint32_t VolParts; /* Number of cache parts */ + uint32_t VolCloudParts; /* Number of cloud parts */ + uint32_t VolMounts; /* Number of times mounted */ + uint32_t VolErrors; /* Number of read/write errors */ + uint64_t VolWrites; /* Number of writes */ + uint64_t VolReads; /* Number of reads */ + uint64_t VolBytes; /* Number of bytes written */ + uint64_t VolABytes; /* Size of aligned volume */ + uint64_t VolHoleBytes; /* The size of Holes */ + uint32_t VolHoles; /* Number of holes */ + uint32_t VolType; /* Device type of where Volume labeled */ + uint64_t MaxVolBytes; /* Max bytes to write to Volume */ + uint64_t VolCapacityBytes; /* capacity estimate */ + uint64_t LastPartBytes; /* Bytes in last part */ + uint64_t VolReadTime; /* time spent reading volume */ + uint64_t VolWriteTime; /* time spent writing volume */ + utime_t VolRetention; /* Volume retention in seconds */ + utime_t CacheRetention; /* Cache retention period in second */ + utime_t VolUseDuration; /* time in secs volume can be used */ + uint32_t ActionOnPurge; /* action on purge, e.g. truncate the disk volume */ + uint32_t MaxVolJobs; /* Max Jobs on Volume */ + uint32_t MaxVolFiles; /* Max files on Volume */ + int32_t Recycle; /* recycle yes/no */ + int32_t Slot; /* slot in changer */ + int32_t Enabled; /* 0=disabled, 1=enabled, 2=archived */ + int32_t InChanger; /* Volume currently in changer */ + DBId_t StorageId; /* Storage record Id */ + uint32_t EndFile; /* Last file on volume */ + uint32_t EndBlock; /* Last block on volume */ + uint32_t RecycleCount; /* Number of times recycled */ + char VolStatus[20]; /* Volume status */ + DBId_t DeviceId; /* Device where Vol last written */ + DBId_t LocationId; /* Where Volume is -- user defined */ + DBId_t ScratchPoolId; /* Where to move if scratch */ + DBId_t RecyclePoolId; /* Where to move when recycled */ + /* Extra stuff not in DB */ + faddr_t rec_addr; /* found record address */ + /* Since the database returns times as strings, this is how we pass + * them back. + */ + char cFirstWritten[MAX_TIME_LENGTH]; /* FirstWritten returned from DB */ + char cLastWritten[MAX_TIME_LENGTH]; /* LastWritten returned from DB */ + char cLabelDate[MAX_TIME_LENGTH]; /* LabelData returned from DB */ + char cInitialWrite[MAX_TIME_LENGTH]; /* InitialWrite returned from DB */ + char *exclude_list; /* Optionnal exclude list for db_find_next_volume() */ + char *sid_group; /* Storageid group string */ + char sid[30]; /* edited StorageId */ + bool set_first_written; + bool set_label_date; +}; + +/* Client record -- same as the database */ +struct CLIENT_DBR { + DBId_t ClientId; /* Unique Client id */ + int AutoPrune; + utime_t FileRetention; + utime_t JobRetention; + char Name[MAX_NAME_LENGTH]; /* Client name */ + char Uname[256]; /* Uname for client */ +}; + +/* Counter record as in database */ +struct COUNTER_DBR { + char Counter[MAX_NAME_LENGTH]; + int32_t MinValue; + int32_t MaxValue; + int32_t CurrentValue; + char WrapCounter[MAX_NAME_LENGTH]; +}; + + +/* FileSet record -- same as the database */ +struct FILESET_DBR { + DBId_t FileSetId; /* Unique FileSet id */ + char FileSet[MAX_NAME_LENGTH]; /* FileSet name */ + char MD5[50]; /* MD5 signature of include/exclude */ + time_t CreateTime; /* date created */ + /* + * This is where we return CreateTime + */ + char cCreateTime[MAX_TIME_LENGTH]; /* CreateTime as returned from DB */ + /* Not in DB but returned by db_create_fileset() */ + bool created; /* set when record newly created */ +}; + +class SNAPSHOT_DBR { +public: + SNAPSHOT_DBR() { + memset(this, 0, sizeof(SNAPSHOT_DBR)); + }; + ~SNAPSHOT_DBR() { + reset(); + }; + void debug(int level) { + Dmsg8(DT_SNAPSHOT|level, + "Snapshot %s:\n" + " Volume: %s\n" + " Device: %s\n" + " Id: %d\n" + " FileSet: %s\n" + " CreateDate: %s\n" + " Client: %s\n" + " Type: %s\n", + Name, NPRT(Volume), NPRT(Device), SnapshotId, + FileSet, CreateDate, Client, Type); + }; + char *as_arg(POOLMEM **out) { + bash_spaces(Name); + bash_spaces(Type); + + if (Volume) { + bash_spaces(Volume); + } + if (Device) { + bash_spaces(Device); + } + + Mmsg(out, "name=%s volume=%s device=%s tdate=%d type=%s", + Name, NPRTB(Volume), NPRTB(Device), CreateTDate, Type); + + unbash_spaces(Name); + unbash_spaces(Type); + if (Volume) { + unbash_spaces(Volume); + } + if (Device) { + unbash_spaces(Device); + } + return *out; + }; + void reset() { + if (need_to_free) { + if (Volume) { + free(Volume); + } + if (Device) { + free(Device); + } + if (errmsg) { + free(errmsg); + } + errmsg = Volume = Device = NULL; + } + need_to_free = false; + }; + bool need_to_free; /* Need to free the internal memory */ + /* Used when searching snapshots */ + char created_after[MAX_TIME_LENGTH]; + char created_before[MAX_TIME_LENGTH]; + bool expired; /* Look for CreateTDate > (NOW - Retention) */ + bool sorted_client; /* Results sorted by Client, SnapshotId */ + int status; /* Status of the snapshot */ + + DBId_t SnapshotId; /* Unique Snapshot ID */ + DBId_t JobId; /* Related JobId */ + DBId_t FileSetId; /* FileSetId if any */ + DBId_t ClientId; /* From which client this snapshot comes */ + char Name[MAX_NAME_LENGTH]; /* Snapshot Name */ + char FileSet[MAX_NAME_LENGTH];/* FileSet name if any */ + char Client[MAX_NAME_LENGTH]; /* Client name */ + char Type[MAX_NAME_LENGTH]; /* zfs, btrfs, lvm, netapp, */ + char Comment[MAX_NAME_LENGTH];/* Comment */ + char CreateDate[MAX_TIME_LENGTH]; /* Create date as string */ + time_t CreateTDate; /* Create TDate (in sec, since epoch) */ + char *Volume; /* Volume taken in snapshot */ + char *Device; /* Device, Pool, Directory, ... */ + char *errmsg; /* Error associated with a snapshot */ + utime_t Retention; /* Number of second before pruning the snapshot */ + uint64_t Size; /* Snapshot Size */ +}; + +/* Call back context for getting a 32/64 bit value from the database */ +class db_int64_ctx { +public: + int64_t value; /* value returned */ + int count; /* number of values seen */ + + db_int64_ctx() : value(0), count(0) {}; + ~db_int64_ctx() {}; +private: + db_int64_ctx(const db_int64_ctx&); /* prohibit pass by value */ + db_int64_ctx &operator=(const db_int64_ctx&); /* prohibit class assignment */ +}; + +/* Call back context for getting a list of comma separated strings from the + * database + */ +class db_list_ctx { +public: + POOLMEM *list; /* list */ + int count; /* number of values seen */ + + db_list_ctx() { list = get_pool_memory(PM_FNAME); reset(); } + ~db_list_ctx() { free_pool_memory(list); list = NULL; } + void reset() { *list = 0; count = 0;} + void add(const db_list_ctx &str) { + if (str.count > 0) { + if (*list) { + pm_strcat(list, ","); + } + pm_strcat(list, str.list); + count += str.count; + } + } + void add(const char *str) { + if (count > 0) { + pm_strcat(list, ","); + } + pm_strcat(list, str); + count++; + } +private: + db_list_ctx(const db_list_ctx&); /* prohibit pass by value */ + db_list_ctx &operator=(const db_list_ctx&); /* prohibit class assignment */ +}; + +/* sql_query flags */ +#define QF_STORE_RESULT 0x01 + +/* sql_list.c */ +enum e_list_type { + HORZ_LIST, /* list */ + VERT_LIST, /* llist */ + ARG_LIST, /* key1=v1 key2=v2 key3=v3 */ + FAILED_JOBS, + INCOMPLETE_JOBS +}; + +#include "bdb.h" +#include "protos.h" +#include "jcr.h" +#include "sql_cmds.h" + + +/* Object used in db_list_xxx function */ +class LIST_CTX { +public: + char line[256]; /* Used to print last dash line */ + int32_t num_rows; + + e_list_type type; /* Vertical/Horizontal */ + DB_LIST_HANDLER *send; /* send data back */ + bool once; /* Used to print header one time */ + void *ctx; /* send() user argument */ + BDB *mdb; + JCR *jcr; + + void empty() { + once = false; + line[0] = '\0'; + } + + void send_dashes() { + if (*line) { + send(ctx, line); + } + } + + LIST_CTX(JCR *j, BDB *m, DB_LIST_HANDLER *h, void *c, e_list_type t) { + line[0] = '\0'; + once = false; + num_rows = 0; + type = t; + send = h; + ctx = c; + jcr = j; + mdb = m; + } +}; + +/* Functions exported by sql.c for use within the cats directory. */ +int list_result(void *vctx, int cols, char **row); +int list_result(JCR *jcr, BDB *mdb, DB_LIST_HANDLER *send, void *ctx, e_list_type type); +int get_sql_record_max(JCR *jcr, BDB *mdb); +void list_dashes(BDB *mdb, DB_LIST_HANDLER *send, void *ctx); + +void print_dashes(BDB *mdb); +void print_result(BDB *mdb); +int QueryDB(const char *file, int line, JCR *jcr, BDB *db, char *select_cmd); +int InsertDB(const char *file, int line, JCR *jcr, BDB *db, char *select_cmd); +int DeleteDB(const char *file, int line, JCR *jcr, BDB *db, char *delete_cmd); +void split_path_and_file(JCR *jcr, BDB *mdb, const char *fname); + +#endif /* __CATS_H_ */ diff --git a/src/cats/cats_null.c b/src/cats/cats_null.c new file mode 100644 index 00000000..ac68388d --- /dev/null +++ b/src/cats/cats_null.c @@ -0,0 +1,37 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Null Bacula backend function replaced with the correct one + * at install time. + */ + +#include "bacula.h" +#include "cats.h" + +BDB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, + const char *db_user, const char *db_password, const char *db_address, + int db_port, const char *db_socket, + const char *db_ssl_mode, const char *db_ssl_key, + const char *db_ssl_cert, const char *db_ssl_ca, + const char *db_ssl_capath, const char *db_ssl_cipher, + bool mult_db_connections, bool disable_batch_insert) +{ + Jmsg(jcr, M_FATAL, 0, _("Please replace this null libbaccats library with a proper one.\n")); + return NULL; +} diff --git a/src/cats/create_bacula_database.in b/src/cats/create_bacula_database.in new file mode 100644 index 00000000..08139779 --- /dev/null +++ b/src/cats/create_bacula_database.in @@ -0,0 +1,50 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This routine creates the Bacula database +# using PostgreSQL, Ingres, MySQL, or SQLite. +# + +# can be used to change the current user with su +pre_command="sh -c" + +default_db_type=@DEFAULT_DB_TYPE@ + +# +# See if the first argument is a valid backend name. +# If so the user overrides the default database backend. +# +if [ $# -gt 0 ]; then + case $1 in + sqlite3) + db_type=$1 + shift + ;; + mysql) + db_type=$1 + shift + ;; + postgresql) + db_type=$1 + shift + ;; + *) + ;; + esac +fi + +# +# If no new db_type is gives use the default db_type. +# +if [ -z "${db_type}" ]; then + db_type="${default_db_type}" +fi + +if [ $db_type = postgresql -a "$UID" = 0 ]; then + pre_command="su - postgres -c" +fi + +echo "Creating ${db_type} database" +$pre_command "@scriptdir@/create_${db_type}_database $*" diff --git a/src/cats/create_mysql_database.in b/src/cats/create_mysql_database.in new file mode 100644 index 00000000..63199f80 --- /dev/null +++ b/src/cats/create_mysql_database.in @@ -0,0 +1,20 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# shell script to create Bacula database(s) +# + +bindir=@MYSQL_BINDIR@ +db_name=@db_name@ + +if $bindir/mysql $* -f </dev/null; then + echo "Database encoding OK" +else + echo " " + echo "Database encoding bad. Do not use this database" + echo " " + exit 1 +fi diff --git a/src/cats/create_sqlite3_database.in b/src/cats/create_sqlite3_database.in new file mode 100644 index 00000000..28a9b55d --- /dev/null +++ b/src/cats/create_sqlite3_database.in @@ -0,0 +1,15 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# shell script to create Bacula SQLite tables + +bindir=@SQLITE_BINDIR@ +PATH=$bindir:$PATH +cd @working_dir@ +db_name=@db_name@ + +sqlite3 $* ${db_name}.db </dev/null 1>/dev/null < " + exit 1 +fi + +default_backend=$1 +library_version=$2 +install_dir=$3 + +# +# Find out what the shared lib extension is for this platform. +# +eval `${LIBTOOL} --config | grep shrext_cmds` +eval SHLIB_EXT=$shrext_cmds + +if [ -z "${SHLIB_EXT}" ]; then + echo "Failed to determine default shared library extension" + exit 1 +fi + +if [ -f ${install_dir}/libbaccats-${default_backend}-${library_version}${SHLIB_EXT} ]; then + # + # Create a default catalog library pointing to one of the shared libs. + # + rm -f ${install_dir}/libbaccats-${library_version}${SHLIB_EXT} + + # + # Create a relative symlink to the default backend + # As all backends are in the same directory anyhow this should + # always work. + # + ln -s libbaccats-${default_backend}${SHLIB_EXT} \ + ${install_dir}/libbaccats-${library_version}${SHLIB_EXT} +fi + +exit 0 diff --git a/src/cats/make_bacula_tables.in b/src/cats/make_bacula_tables.in new file mode 100755 index 00000000..285b6cd3 --- /dev/null +++ b/src/cats/make_bacula_tables.in @@ -0,0 +1,50 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This routine makes the appropriately configured +# Bacula tables for PostgreSQL, Ingres, MySQL, or SQLite. +# + +# can be used to change the current user with su +pre_command="sh -c" + +default_db_type=@DEFAULT_DB_TYPE@ + +# +# See if the first argument is a valid backend name. +# If so the user overrides the default database backend. +# +if [ $# -gt 0 ]; then + case $1 in + sqlite3) + db_type=$1 + shift + ;; + mysql) + db_type=$1 + shift + ;; + postgresql) + db_type=$1 + shift + ;; + *) + ;; + esac +fi + +# +# If no new db_type is gives use the default db_type. +# +if [ -z "${db_type}" ]; then + db_type="${default_db_type}" +fi + +if [ $db_type = postgresql -a "$UID" = 0 ]; then + pre_command="su - postgres -c" +fi + +echo "Making ${db_type} tables" +$pre_command "@scriptdir@/make_${db_type}_tables $*" diff --git a/src/cats/make_catalog_backup.in b/src/cats/make_catalog_backup.in new file mode 100755 index 00000000..97a97f61 --- /dev/null +++ b/src/cats/make_catalog_backup.in @@ -0,0 +1,111 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This script dumps your Bacula catalog in ASCII format +# It works for MySQL, SQLite, and PostgreSQL +# +# $1 is the name of the database to be backed up and the name +# of the output file (default = bacula). +# $2 is the user name with which to access the database +# (default = bacula). +# $3 is the password with which to access the database or "" if no password +# (default ""). WARNING!!! Passing the password via the command line is +# insecure and should not be used since any user can display the command +# line arguments and the environment using ps. Please consult your +# MySQL or PostgreSQL manual for secure methods of specifying the +# password. +# $4 is the host on which the database is located +# (default "") +# $5 is the type of database +# +# + +default_db_type=@DEFAULT_DB_TYPE@ +user=${2:-@db_user@} + +# +# See if the fifth argument is a valid backend name. +# If so the user overrides the default database backend. +# +if [ $# -ge 5 ]; then + case $5 in + sqlite3) + db_type=$5 + ;; + mysql) + db_type=$5 + ;; + postgresql) + db_type=$5 + ;; + ingres) + db_type=$5 + ;; + *) + ;; + esac +fi + +# +# If no new db_type is gives use the default db_type. +# +if [ -z "${db_type}" ]; then + db_type="${default_db_type}" +fi + +cd @working_dir@ +rm -f $1.sql + +case ${db_type} in + sqlite3) + BINDIR=@SQLITE_BINDIR@ + echo ".dump" | ${BINDIR}/sqlite3 $1.db >$1.sql + ;; + mysql) + BINDIR=@MYSQL_BINDIR@ + if test $# -gt 2; then + MYSQLPASSWORD=" --password=$3" + else + MYSQLPASSWORD="" + fi + if test $# -gt 3; then + MYSQLHOST=" --host=$4" + else + MYSQLHOST="" + fi + ${BINDIR}/mysqldump -u ${user}${MYSQLPASSWORD}${MYSQLHOST} -f --opt $1 >$1.sql + ;; + postgresql) + BINDIR=@POSTGRESQL_BINDIR@ + if test $# -gt 2; then + PGPASSWORD=$3 + export PGPASSWORD + fi + if test $# -gt 3; then + PGHOST=" --host=$4" + else + PGHOST="" + fi + # you could also add --compress for compression. See man pg_dump + exec ${BINDIR}/pg_dump -c $PGHOST -U $user $1 >$1.sql + ;; +esac +# +# To read back a MySQL database use: +# cd @working_dir@ +# rm -f ${BINDIR}/../var/bacula/* +# mysql '$wd/$args{db_name}.sql'"); + print "Error while executing sqlite dump $!\n"; + return 1; +} + +# TODO: use just ENV and drop the pg_service.conf file +sub setup_env_pgsql +{ + my %args = @_; + my $username = getpwuid $ENV{'UID'}; + umask(0077); + + if ($args{db_address}) { + $ENV{PGHOST}=$args{db_address}; + } + if ($args{db_socket}) { + $ENV{PGHOST}=$args{db_socket}; + } + if ($args{db_port}) { + $ENV{PGPORT}=$args{db_port}; + } + if ($args{db_user}) { + $ENV{PGUSER}=$args{db_user}; + } + if ($args{db_password}) { + $ENV{PGPASSWORD}=$args{db_password}; + } + $ENV{PGDATABASE}=$args{db_name}; + system("echo '\\q' | HOME='$wd' psql") == 0 or die "$username doesn't have access to the catalog database\n"; +} + +sub dump_pgsql +{ + my %args = @_; + setup_env_pgsql(%args); + exec("HOME='$wd' pg_dump -c > '$wd/$args{db_name}.sql'"); + print "Error while executing postgres dump $!\n"; + return 1; # in case of error +} + +sub analyse_pgsql +{ + my %args = @_; + setup_env_pgsql(%args); + my @output =`LANG=C HOME='$wd' vacuumdb -z 2>&1`; + my $exitcode = $? >> 8; + print grep { !/^WARNING:\s+skipping\s\"(pg_|sql_)/ } @output; + if ($exitcode != 0) { + print "Error while executing postgres analyse. Exitcode=$exitcode\n"; + } + return $exitcode; +} + +sub setup_env_mysql +{ + my %args = @_; + umask(0077); + unlink("$wd/.my.cnf"); + open(MY, ">$wd/.my.cnf") + or die "Can't open $wd/.my.cnf for writing $@"; + + $args{db_address} = $args{db_address} || "localhost"; + my $addr = "host=$args{db_address}"; + if ($args{db_socket}) { # unix socket is fastest than net socket + $addr = "socket=\"$args{db_socket}\""; + } + my $mode = $args{mode} || 'client'; + print MY "[$mode] +$addr +user=\"$args{db_user}\" +password=\"$args{db_password}\" +"; + if ($args{db_port}) { + print MY "port=$args{db_port}\n"; + } + close(MY); +} + +sub dump_mysql +{ + my %args = @_; + + setup_env_mysql(%args); + exec("HOME='$wd' mysqldump -f --opt $args{db_name} > '$wd/$args{db_name}.sql'"); + print "Error while executing mysql dump $!\n"; + return 1; +} + +sub analyse_mysql +{ + my %args = @_; + + $args{mode} = 'mysqlcheck'; + setup_env_mysql(%args); + + exec("HOME='$wd' mysqlcheck -a $args{db_name}"); + print "Error while executing mysql analyse $!\n"; + return 1; +} + +sub handle_catalog +{ + my ($mode, %args) = @_; + if ($args{db_type} eq 'SQLite3') { + $ENV{PATH}="@SQLITE_BINDIR@:$ENV{PATH}"; + if ($mode eq 'dump') { + dump_sqlite3(%args); + } + } elsif ($args{db_type} eq 'PostgreSQL') { + $ENV{PATH}="@POSTGRESQL_BINDIR@:$ENV{PATH}"; + if ($mode eq 'dump') { + dump_pgsql(%args); + } else { + analyse_pgsql(%args); + } + } elsif ($args{db_type} eq 'MySQL') { + $ENV{PATH}="@MYSQL_BINDIR@:$ENV{PATH}"; + if ($mode eq 'dump') { + dump_mysql(%args); + } else { + analyse_mysql(%args); + } + } else { + die "This database type isn't supported"; + } +} + +open(FP, "$dir_conf -C '$cat'|") or die "Can't get catalog information $@"; +# catalog=MyCatalog +# db_type=SQLite +# db_name=regress +# db_driver= +# db_user=regress +# db_password= +# db_address= +# db_port=0 +# db_socket= +my %cfg; + +while(my $l = ) +{ + if ($l =~ /catalog=(.+)/) { + if (exists $cfg{catalog} and $cfg{catalog} eq $cat) { + exit handle_catalog($mode, %cfg); + } + %cfg = (); # reset + } + + if ($l =~ /(\w+)=(.+)/) { + $cfg{$1}=$2; + } +} + +if (exists $cfg{catalog} and $cfg{catalog} eq $cat) { + exit handle_catalog($mode, %cfg); +} + +print "Can't find your catalog ($cat) in director configuration\n"; +exit 1; diff --git a/src/cats/make_mysql_tables.in b/src/cats/make_mysql_tables.in new file mode 100644 index 00000000..f41a6345 --- /dev/null +++ b/src/cats/make_mysql_tables.in @@ -0,0 +1,487 @@ +#!/bin/sh +# +# shell script to create Bacula MySQL tables +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Important note: +# You won't get any support for performance issue if you changed the +# default # schema. +# +# Useful commands: +# mysql -u root +# show databases; +# show tables from ; +# show columns from
Job IDJob NameStatusErrorsTypeLevelFilesBytesStart TimeEnd TimeRun Time
from ; +# +# use mysql; +# select user from user; +# +bindir=@MYSQL_BINDIR@ +PATH="$bindir:$PATH" +db_name=${db_name:-@db_name@} + +if mysql $* -f < +#define __BDB_MYSQL_H_ 1 +#include "bdb_mysql.h" + +/* ----------------------------------------------------------------------- + * + * MySQL dependent defines and subroutines + * + * ----------------------------------------------------------------------- + */ + +/* List of open databases */ +static dlist *db_list = NULL; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +BDB_MYSQL::BDB_MYSQL(): BDB() +{ + BDB_MYSQL *mdb = this; + + if (db_list == NULL) { + db_list = New(dlist(this, &this->m_link)); + } + mdb->m_db_driver_type = SQL_DRIVER_TYPE_MYSQL; + mdb->m_db_type = SQL_TYPE_MYSQL; + mdb->m_db_driver = bstrdup("MySQL"); + mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */ + mdb->errmsg[0] = 0; + mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */ + mdb->cached_path = get_pool_memory(PM_FNAME); + mdb->cached_path_id = 0; + mdb->m_ref_count = 1; + mdb->fname = get_pool_memory(PM_FNAME); + mdb->path = get_pool_memory(PM_FNAME); + mdb->esc_name = get_pool_memory(PM_FNAME); + mdb->esc_path = get_pool_memory(PM_FNAME); + mdb->esc_obj = get_pool_memory(PM_FNAME); + mdb->m_use_fatal_jmsg = true; + + /* Initialize the private members. */ + mdb->m_db_handle = NULL; + mdb->m_result = NULL; + + db_list->append(this); +} + +BDB_MYSQL::~BDB_MYSQL() +{ +} + +/* + * Initialize database data structure. In principal this should + * never have errors, or it is really fatal. + */ +BDB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user, + const char *db_password, const char *db_address, int db_port, const char *db_socket, + const char *db_ssl_mode, const char *db_ssl_key, + const char *db_ssl_cert, const char *db_ssl_ca, + const char *db_ssl_capath, const char *db_ssl_cipher, + bool mult_db_connections, bool disable_batch_insert) +{ + BDB_MYSQL *mdb = NULL; + + if (!db_user) { + Jmsg(jcr, M_FATAL, 0, _("A user name for MySQL must be supplied.\n")); + return NULL; + } + P(mutex); /* lock DB queue */ + + /* + * Look to see if DB already open + */ + if (db_list && !mult_db_connections) { + foreach_dlist(mdb, db_list) { + if (mdb->bdb_match_database(db_driver, db_name, db_address, db_port)) { + Dmsg1(100, "DB REopen %s\n", db_name); + mdb->increment_refcount(); + goto get_out; + } + } + } + Dmsg0(100, "db_init_database first time\n"); + mdb = New(BDB_MYSQL()); + if (!mdb) goto get_out; + + /* + * Initialize the parent class members. + */ + mdb->m_db_name = bstrdup(db_name); + mdb->m_db_user = bstrdup(db_user); + if (db_password) { + mdb->m_db_password = bstrdup(db_password); + } + if (db_address) { + mdb->m_db_address = bstrdup(db_address); + } + if (db_socket) { + mdb->m_db_socket = bstrdup(db_socket); + } + if (db_ssl_mode) { + mdb->m_db_ssl_mode = bstrdup(db_ssl_mode); + } else { + mdb->m_db_ssl_mode = bstrdup("preferred"); + } + if (db_ssl_key) { + mdb->m_db_ssl_key = bstrdup(db_ssl_key); + } + if (db_ssl_cert) { + mdb->m_db_ssl_cert = bstrdup(db_ssl_cert); + } + if (db_ssl_ca) { + mdb->m_db_ssl_ca = bstrdup(db_ssl_ca); + } + if (db_ssl_capath) { + mdb->m_db_ssl_capath = bstrdup(db_ssl_capath); + } + if (db_ssl_cipher) { + mdb->m_db_ssl_cipher = bstrdup(db_ssl_cipher); + } + mdb->m_db_port = db_port; + + if (disable_batch_insert) { + mdb->m_disabled_batch_insert = true; + mdb->m_have_batch_insert = false; + } else { + mdb->m_disabled_batch_insert = false; +#ifdef USE_BATCH_FILE_INSERT +#ifdef HAVE_MYSQL_THREAD_SAFE + mdb->m_have_batch_insert = mysql_thread_safe(); +#else + mdb->m_have_batch_insert = false; +#endif /* HAVE_MYSQL_THREAD_SAFE */ +#else + mdb->m_have_batch_insert = false; +#endif /* USE_BATCH_FILE_INSERT */ + } + + mdb->m_allow_transactions = mult_db_connections; + + /* At this time, when mult_db_connections == true, this is for + * specific console command such as bvfs or batch mode, and we don't + * want to share a batch mode or bvfs. In the future, we can change + * the creation function to add this parameter. + */ + mdb->m_dedicated = mult_db_connections; + +get_out: + V(mutex); + return mdb; +} + + +/* + * Now actually open the database. This can generate errors, + * which are returned in the errmsg + * + * DO NOT close the database or delete mdb here !!!! + */ +bool BDB_MYSQL::bdb_open_database(JCR *jcr) +{ + BDB_MYSQL *mdb = this; + bool retval = false; + int errstat; + my_bool reconnect = 1; + + P(mutex); + if (mdb->m_connected) { + retval = true; + goto get_out; + } + + if ((errstat=rwl_init(&mdb->m_lock)) != 0) { + berrno be; + Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"), + be.bstrerror(errstat)); + goto get_out; + } + + /* + * Connect to the database + */ +#ifdef xHAVE_EMBEDDED_MYSQL +// mysql_server_init(0, NULL, NULL); +#endif + mysql_init(&mdb->m_instance); + + Dmsg0(50, "mysql_init done\n"); + + /* + * Sets the appropriate certificate options for + * establishing secure connection using SSL to the database. + */ + if (mdb->m_db_ssl_key) { + mysql_ssl_set(&(mdb->m_instance), + mdb->m_db_ssl_key, + mdb->m_db_ssl_cert, + mdb->m_db_ssl_ca, + mdb->m_db_ssl_capath, + mdb->m_db_ssl_cipher); + } + + /* + * If connection fails, try at 5 sec intervals for 30 seconds. + */ + for (int retry=0; retry < 6; retry++) { + mdb->m_db_handle = mysql_real_connect( + &(mdb->m_instance), /* db */ + mdb->m_db_address, /* default = localhost */ + mdb->m_db_user, /* login name */ + mdb->m_db_password, /* password */ + mdb->m_db_name, /* database name */ + mdb->m_db_port, /* default port */ + mdb->m_db_socket, /* default = socket */ + CLIENT_FOUND_ROWS); /* flags */ + + /* + * If no connect, try once more in case it is a timing problem + */ + if (mdb->m_db_handle != NULL) { + break; + } + bmicrosleep(5,0); + } + + mysql_options(&mdb->m_instance, MYSQL_OPT_RECONNECT, &reconnect); /* so connection does not timeout */ + Dmsg0(50, "mysql_real_connect done\n"); + Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n", mdb->m_db_user, mdb->m_db_name, + (mdb->m_db_password == NULL) ? "(NULL)" : mdb->m_db_password); + + if (mdb->m_db_handle == NULL) { + Mmsg2(&mdb->errmsg, _("Unable to connect to MySQL server.\n" +"Database=%s User=%s\n" +"MySQL connect failed either server not running or your authorization is incorrect.\n"), + mdb->m_db_name, mdb->m_db_user); +#if MYSQL_VERSION_ID >= 40101 + Dmsg3(50, "Error %u (%s): %s\n", + mysql_errno(&(mdb->m_instance)), mysql_sqlstate(&(mdb->m_instance)), + mysql_error(&(mdb->m_instance))); +#else + Dmsg2(50, "Error %u: %s\n", + mysql_errno(&(mdb->m_instance)), mysql_error(&(mdb->m_instance))); +#endif + goto get_out; + } + + /* get the current cipher used for SSL connection */ + if (mdb->m_db_ssl_key) { + const char *cipher; + if (mdb->m_db_ssl_cipher) { + free(mdb->m_db_ssl_cipher); + } + cipher = (const char *)mysql_get_ssl_cipher(&(mdb->m_instance)); + if (cipher) { + mdb->m_db_ssl_cipher = bstrdup(cipher); + } + Dmsg1(50, "db_ssl_ciper=%s\n", (mdb->m_db_ssl_cipher == NULL) ? "(NULL)" : mdb->m_db_ssl_cipher); + } + + mdb->m_connected = true; + if (!bdb_check_version(jcr)) { + goto get_out; + } + + Dmsg3(100, "opendb ref=%d connected=%d db=%p\n", mdb->m_ref_count, mdb->m_connected, mdb->m_db_handle); + + /* + * Set connection timeout to 8 days specialy for batch mode + */ + sql_query("SET wait_timeout=691200"); + sql_query("SET interactive_timeout=691200"); + + retval = true; + +get_out: + V(mutex); + return retval; +} + +void BDB_MYSQL::bdb_close_database(JCR *jcr) +{ + BDB_MYSQL *mdb = this; + + if (mdb->m_connected) { + bdb_end_transaction(jcr); + } + P(mutex); + mdb->m_ref_count--; + Dmsg3(100, "closedb ref=%d connected=%d db=%p\n", mdb->m_ref_count, mdb->m_connected, mdb->m_db_handle); + if (mdb->m_ref_count == 0) { + if (mdb->m_connected) { + sql_free_result(); + } + db_list->remove(mdb); + if (mdb->m_connected) { + Dmsg1(100, "close db=%p\n", mdb->m_db_handle); + mysql_close(&mdb->m_instance); + } + if (is_rwl_valid(&mdb->m_lock)) { + rwl_destroy(&mdb->m_lock); + } + free_pool_memory(mdb->errmsg); + free_pool_memory(mdb->cmd); + free_pool_memory(mdb->cached_path); + free_pool_memory(mdb->fname); + free_pool_memory(mdb->path); + free_pool_memory(mdb->esc_name); + free_pool_memory(mdb->esc_path); + free_pool_memory(mdb->esc_obj); + if (mdb->m_db_driver) { + free(mdb->m_db_driver); + } + if (mdb->m_db_name) { + free(mdb->m_db_name); + } + if (mdb->m_db_user) { + free(mdb->m_db_user); + } + if (mdb->m_db_password) { + free(mdb->m_db_password); + } + if (mdb->m_db_address) { + free(mdb->m_db_address); + } + if (mdb->m_db_socket) { + free(mdb->m_db_socket); + } + if (mdb->m_db_ssl_mode) { + free(mdb->m_db_ssl_mode); + } + if (mdb->m_db_ssl_key) { + free(mdb->m_db_ssl_key); + } + if (mdb->m_db_ssl_cert) { + free(mdb->m_db_ssl_cert); + } + if (mdb->m_db_ssl_ca) { + free(mdb->m_db_ssl_ca); + } + if (mdb->m_db_ssl_capath) { + free(mdb->m_db_ssl_capath); + } + if (mdb->m_db_ssl_cipher) { + free(mdb->m_db_ssl_cipher); + } + delete mdb; + if (db_list->size() == 0) { + delete db_list; + db_list = NULL; + } + } + V(mutex); +} + +/* + * This call is needed because the message channel thread + * opens a database on behalf of a jcr that was created in + * a different thread. MySQL then allocates thread specific + * data, which is NOT freed when the original jcr thread + * closes the database. Thus the msgchan must call here + * to cleanup any thread specific data that it created. + */ +void BDB_MYSQL::bdb_thread_cleanup(void) +{ +#ifndef HAVE_WIN32 + mysql_thread_end(); /* Cleanup thread specific data */ +#endif +} + +/* + * Escape strings so MySQL is happy + * + * len is the length of the old string. Your new + * string must be long enough (max 2*old+1) to hold + * the escaped output. + */ +void BDB_MYSQL::bdb_escape_string(JCR *jcr, char *snew, char *old, int len) +{ + BDB_MYSQL *mdb = this; + mysql_real_escape_string(mdb->m_db_handle, snew, old, len); +} + +/* + * Escape binary object so that MySQL is happy + * Memory is stored in BDB struct, no need to free it + */ +char *BDB_MYSQL::bdb_escape_object(JCR *jcr, char *old, int len) +{ + BDB_MYSQL *mdb = this; + mdb->esc_obj = check_pool_memory_size(mdb->esc_obj, len*2+1); + mysql_real_escape_string(mdb->m_db_handle, mdb->esc_obj, old, len); + return mdb->esc_obj; +} + +/* + * Unescape binary object so that MySQL is happy + */ +void BDB_MYSQL::bdb_unescape_object(JCR *jcr, char *from, int32_t expected_len, + POOLMEM **dest, int32_t *dest_len) +{ + if (!from) { + *dest[0] = 0; + *dest_len = 0; + return; + } + *dest = check_pool_memory_size(*dest, expected_len+1); + *dest_len = expected_len; + memcpy(*dest, from, expected_len); + (*dest)[expected_len]=0; +} + +void BDB_MYSQL::bdb_start_transaction(JCR *jcr) +{ + if (!jcr->attr) { + jcr->attr = get_pool_memory(PM_FNAME); + } + if (!jcr->ar) { + jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR)); + memset(jcr->ar, 0, sizeof(ATTR_DBR)); + } +} + +void BDB_MYSQL::bdb_end_transaction(JCR *jcr) +{ + if (jcr && jcr->cached_attribute) { + Dmsg0(400, "Flush last cached attribute.\n"); + if (!bdb_create_attributes_record(jcr, jcr->ar)) { + Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), jcr->db->bdb_strerror()); + } + jcr->cached_attribute = false; + } +} + +/* + * Submit a general SQL command (cmd), and for each row returned, + * the result_handler is called with the ctx. + */ +bool BDB_MYSQL::bdb_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx) +{ + int ret; + SQL_ROW row; + bool send = true; + bool retval = false; + BDB_MYSQL *mdb = this; + + Dmsg1(500, "db_sql_query starts with %s\n", query); + + bdb_lock(); + errmsg[0] = 0; + ret = mysql_query(m_db_handle, query); + if (ret != 0) { + Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror()); + Dmsg0(500, "db_sql_query failed\n"); + goto get_out; + } + + Dmsg0(500, "db_sql_query succeeded. checking handler\n"); + + if (result_handler) { + if ((mdb->m_result = mysql_use_result(mdb->m_db_handle)) != NULL) { + mdb->m_num_fields = mysql_num_fields(mdb->m_result); + + /* + * We *must* fetch all rows + */ + while ((row = mysql_fetch_row(m_result))) { + if (send) { + /* the result handler returns 1 when it has + * seen all the data it wants. However, we + * loop to the end of the data. + */ + if (result_handler(ctx, mdb->m_num_fields, row)) { + send = false; + } + } + } + sql_free_result(); + } + } + + Dmsg0(500, "db_sql_query finished\n"); + retval = true; + +get_out: + bdb_unlock(); + return retval; +} + +bool BDB_MYSQL::sql_query(const char *query, int flags) +{ + int ret; + bool retval = true; + BDB_MYSQL *mdb = this; + + Dmsg1(500, "sql_query starts with '%s'\n", query); + /* + * We are starting a new query. reset everything. + */ + mdb->m_num_rows = -1; + mdb->m_row_number = -1; + mdb->m_field_number = -1; + + if (mdb->m_result) { + mysql_free_result(mdb->m_result); + mdb->m_result = NULL; + } + + ret = mysql_query(mdb->m_db_handle, query); + if (ret == 0) { + Dmsg0(500, "we have a result\n"); + if (flags & QF_STORE_RESULT) { + mdb->m_result = mysql_store_result(mdb->m_db_handle); + if (mdb->m_result != NULL) { + mdb->m_num_fields = mysql_num_fields(mdb->m_result); + Dmsg1(500, "we have %d fields\n", mdb->m_num_fields); + mdb->m_num_rows = mysql_num_rows(mdb->m_result); + Dmsg1(500, "we have %d rows\n", mdb->m_num_rows); + } else { + mdb->m_num_fields = 0; + mdb->m_num_rows = mysql_affected_rows(mdb->m_db_handle); + Dmsg1(500, "we have %d rows\n", mdb->m_num_rows); + } + } else { + mdb->m_num_fields = 0; + mdb->m_num_rows = mysql_affected_rows(mdb->m_db_handle); + Dmsg1(500, "we have %d rows\n", mdb->m_num_rows); + } + } else { + Dmsg0(500, "we failed\n"); + mdb->m_status = 1; /* failed */ + retval = false; + } + return retval; +} + +void BDB_MYSQL::sql_free_result(void) +{ + BDB_MYSQL *mdb = this; + bdb_lock(); + if (mdb->m_result) { + mysql_free_result(mdb->m_result); + mdb->m_result = NULL; + } + if (mdb->m_fields) { + free(mdb->m_fields); + mdb->m_fields = NULL; + } + mdb->m_num_rows = mdb->m_num_fields = 0; + bdb_unlock(); +} + +SQL_ROW BDB_MYSQL::sql_fetch_row(void) +{ + BDB_MYSQL *mdb = this; + if (!mdb->m_result) { + return NULL; + } else { + return mysql_fetch_row(mdb->m_result); + } +} + +const char *BDB_MYSQL::sql_strerror(void) +{ + BDB_MYSQL *mdb = this; + return mysql_error(mdb->m_db_handle); +} + +void BDB_MYSQL::sql_data_seek(int row) +{ + BDB_MYSQL *mdb = this; + return mysql_data_seek(mdb->m_result, row); +} + +int BDB_MYSQL::sql_affected_rows(void) +{ + BDB_MYSQL *mdb = this; + return mysql_affected_rows(mdb->m_db_handle); +} + +uint64_t BDB_MYSQL::sql_insert_autokey_record(const char *query, const char *table_name) +{ + BDB_MYSQL *mdb = this; + /* + * First execute the insert query and then retrieve the currval. + */ + if (mysql_query(mdb->m_db_handle, query) != 0) { + return 0; + } + + mdb->m_num_rows = mysql_affected_rows(mdb->m_db_handle); + if (mdb->m_num_rows != 1) { + return 0; + } + + mdb->changes++; + + return mysql_insert_id(mdb->m_db_handle); +} + +SQL_FIELD *BDB_MYSQL::sql_fetch_field(void) +{ + int i; + MYSQL_FIELD *field; + BDB_MYSQL *mdb = this; + + if (!mdb->m_fields || mdb->m_fields_size < mdb->m_num_fields) { + if (mdb->m_fields) { + free(mdb->m_fields); + mdb->m_fields = NULL; + } + Dmsg1(500, "allocating space for %d fields\n", mdb->m_num_fields); + mdb->m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * mdb->m_num_fields); + mdb->m_fields_size = mdb->m_num_fields; + + for (i = 0; i < mdb->m_num_fields; i++) { + Dmsg1(500, "filling field %d\n", i); + if ((field = mysql_fetch_field(mdb->m_result)) != NULL) { + mdb->m_fields[i].name = field->name; + mdb->m_fields[i].max_length = field->max_length; + mdb->m_fields[i].type = field->type; + mdb->m_fields[i].flags = field->flags; + + Dmsg4(500, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n", + mdb->m_fields[i].name, mdb->m_fields[i].max_length, mdb->m_fields[i].type, mdb->m_fields[i].flags); + } + } + } + + /* + * Increment field number for the next time around + */ + return &mdb->m_fields[mdb->m_field_number++]; +} + +bool BDB_MYSQL::sql_field_is_not_null(int field_type) +{ + return IS_NOT_NULL(field_type); +} + +bool BDB_MYSQL::sql_field_is_numeric(int field_type) +{ + return IS_NUM(field_type); +} + +/* + * Returns true if OK + * false if failed + */ +bool BDB_MYSQL::sql_batch_start(JCR *jcr) +{ + BDB_MYSQL *mdb = this; + bool retval; + + bdb_lock(); + retval = sql_query("CREATE TEMPORARY TABLE batch (" + "FileIndex integer," + "JobId integer," + "Path blob," + "Name blob," + "LStat tinyblob," + "MD5 tinyblob," + "DeltaSeq integer)"); + bdb_unlock(); + + /* + * Keep track of the number of changes in batch mode. + */ + mdb->changes = 0; + + return retval; +} + +/* set error to something to abort operation */ +/* + * Returns true if OK + * false if failed + */ +bool BDB_MYSQL::sql_batch_end(JCR *jcr, const char *error) +{ + BDB_MYSQL *mdb = this; + + mdb->m_status = 0; + + /* + * Flush any pending inserts. + */ + if (mdb->changes) { + return sql_query(mdb->cmd); + } + + return true; +} + +/* + * Returns true if OK + * false if failed + */ +bool BDB_MYSQL::sql_batch_insert(JCR *jcr, ATTR_DBR *ar) +{ + BDB_MYSQL *mdb = this; + const char *digest; + char ed1[50]; + + mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); + bdb_escape_string(jcr, mdb->esc_name, mdb->fname, mdb->fnl); + + mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1); + bdb_escape_string(jcr, mdb->esc_path, mdb->path, mdb->pnl); + + if (ar->Digest == NULL || ar->Digest[0] == 0) { + digest = "0"; + } else { + digest = ar->Digest; + } + + /* + * Try to batch up multiple inserts using multi-row inserts. + */ + if (mdb->changes == 0) { + Mmsg(cmd, "INSERT INTO batch VALUES " + "(%d,%s,'%s','%s','%s','%s',%u)", + ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path, + mdb->esc_name, ar->attr, digest, ar->DeltaSeq); + mdb->changes++; + } else { + /* + * We use the esc_obj for temporary storage otherwise + * we keep on copying data. + */ + Mmsg(mdb->esc_obj, ",(%d,%s,'%s','%s','%s','%s',%u)", + ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path, + mdb->esc_name, ar->attr, digest, ar->DeltaSeq); + pm_strcat(mdb->cmd, mdb->esc_obj); + mdb->changes++; + } + + /* + * See if we need to flush the query buffer filled + * with multi-row inserts. + */ + if ((mdb->changes % MYSQL_CHANGES_PER_BATCH_INSERT) == 0) { + if (!sql_query(mdb->cmd)) { + mdb->changes = 0; + return false; + } else { + mdb->changes = 0; + } + } + return true; +} + + +#endif /* HAVE_MYSQL */ diff --git a/src/cats/mysql.in b/src/cats/mysql.in new file mode 100644 index 00000000..b4e0d70d --- /dev/null +++ b/src/cats/mysql.in @@ -0,0 +1,11 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# shell script to create Bacula MySQL tables +# +bindir=@MYSQL_BINDIR@ +db_name=@db_name@ + +$bindir/mysql $* ${db_name} diff --git a/src/cats/postgresql.c b/src/cats/postgresql.c new file mode 100644 index 00000000..a611ffa3 --- /dev/null +++ b/src/cats/postgresql.c @@ -0,0 +1,1184 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Catalog Database routines specific to PostgreSQL + * These are PostgreSQL specific routines + * + * Dan Langille, December 2003 + * based upon work done by Kern Sibbald, March 2000 + * + * Note: at one point, this file was changed to class based by a certain + * programmer, and other than "wrapping" in a class, which is a trivial + * change for a C++ programmer, nothing substantial was done, yet all the + * code was recommitted under this programmer's name. Consequently, we + * undo those changes here. Unfortunately, it is too difficult to put + * back the original author's name (Dan Langille) on the parts he wrote. + */ + +#include "bacula.h" + +#ifdef HAVE_POSTGRESQL + +#include "cats.h" + +/* Note in this file, we want these for Postgresql not Bacula */ +#undef PACKAGE_BUGREPORT +#undef PACKAGE_NAME +#undef PACKAGE_STRING +#undef PACKAGE_TARNAME +#undef PACKAGE_VERSION + +#include "libpq-fe.h" +#include "postgres_ext.h" /* needed for NAMEDATALEN */ +#include "pg_config_manual.h" /* get NAMEDATALEN on version 8.3 or later */ +#include "pg_config.h" /* for PG_VERSION_NUM */ +#define __BDB_POSTGRESQL_H_ 1 +#include "bdb_postgresql.h" + +#define dbglvl_dbg DT_SQL|100 +#define dbglvl_info DT_SQL|50 +#define dbglvl_err DT_SQL|10 + +/* ----------------------------------------------------------------------- + * + * PostgreSQL dependent defines and subroutines + * + * ----------------------------------------------------------------------- + */ + +/* List of open databases */ +static dlist *db_list = NULL; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +BDB_POSTGRESQL::BDB_POSTGRESQL(): BDB() +{ + BDB_POSTGRESQL *mdb = this; + + if (db_list == NULL) { + db_list = New(dlist(mdb, &mdb->m_link)); + } + mdb->m_db_driver_type = SQL_DRIVER_TYPE_POSTGRESQL; + mdb->m_db_type = SQL_TYPE_POSTGRESQL; + mdb->m_db_driver = bstrdup("PostgreSQL"); + + mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */ + mdb->errmsg[0] = 0; + mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */ + mdb->cached_path = get_pool_memory(PM_FNAME); + mdb->cached_path_id = 0; + mdb->m_ref_count = 1; + mdb->fname = get_pool_memory(PM_FNAME); + mdb->path = get_pool_memory(PM_FNAME); + mdb->esc_name = get_pool_memory(PM_FNAME); + mdb->esc_path = get_pool_memory(PM_FNAME); + mdb->esc_obj = get_pool_memory(PM_FNAME); + mdb->m_use_fatal_jmsg = true; + + /* Initialize the private members. */ + mdb->m_db_handle = NULL; + mdb->m_result = NULL; + mdb->m_buf = get_pool_memory(PM_FNAME); + + db_list->append(this); +} + +BDB_POSTGRESQL::~BDB_POSTGRESQL() +{ +} + +/* + * Initialize database data structure. In principal this should + * never have errors, or it is really fatal. + */ +BDB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user, + const char *db_password, const char *db_address, int db_port, const char *db_socket, + const char *db_ssl_mode, const char *db_ssl_key, const char *db_ssl_cert, + const char *db_ssl_ca, const char *db_ssl_capath, const char *db_ssl_cipher, + bool mult_db_connections, bool disable_batch_insert) +{ + BDB_POSTGRESQL *mdb = NULL; + + if (!db_user) { + Jmsg(jcr, M_FATAL, 0, _("A user name for PostgreSQL must be supplied.\n")); + return NULL; + } + P(mutex); /* lock DB queue */ + if (db_list && !mult_db_connections) { + /* + * Look to see if DB already open + */ + foreach_dlist(mdb, db_list) { + if (mdb->bdb_match_database(db_driver, db_name, db_address, db_port)) { + Dmsg1(dbglvl_info, "DB REopen %s\n", db_name); + mdb->increment_refcount(); + goto get_out; + } + } + } + Dmsg0(dbglvl_info, "db_init_database first time\n"); + /* Create the global Bacula db context */ + mdb = New(BDB_POSTGRESQL()); + if (!mdb) goto get_out; + + /* Initialize the parent class members. */ + mdb->m_db_name = bstrdup(db_name); + mdb->m_db_user = bstrdup(db_user); + if (db_password) { + mdb->m_db_password = bstrdup(db_password); + } + if (db_address) { + mdb->m_db_address = bstrdup(db_address); + } + if (db_socket) { + mdb->m_db_socket = bstrdup(db_socket); + } + if (db_ssl_mode) { + mdb->m_db_ssl_mode = bstrdup(db_ssl_mode); + } else { + mdb->m_db_ssl_mode = bstrdup("prefer"); + } + if (db_ssl_key) { + mdb->m_db_ssl_key = bstrdup(db_ssl_key); + } + if (db_ssl_cert) { + mdb->m_db_ssl_cert = bstrdup(db_ssl_cert); + } + if (db_ssl_ca) { + mdb->m_db_ssl_ca = bstrdup(db_ssl_ca); + } + mdb->m_db_port = db_port; + + if (disable_batch_insert) { + mdb->m_disabled_batch_insert = true; + mdb->m_have_batch_insert = false; + } else { + mdb->m_disabled_batch_insert = false; +#ifdef USE_BATCH_FILE_INSERT +#if defined(HAVE_POSTGRESQL_BATCH_FILE_INSERT) || defined(HAVE_PQISTHREADSAFE) +#ifdef HAVE_PQISTHREADSAFE + mdb->m_have_batch_insert = PQisthreadsafe(); +#else + mdb->m_have_batch_insert = true; +#endif /* HAVE_PQISTHREADSAFE */ +#else + mdb->m_have_batch_insert = true; +#endif /* HAVE_POSTGRESQL_BATCH_FILE_INSERT || HAVE_PQISTHREADSAFE */ +#else + mdb->m_have_batch_insert = false; +#endif /* USE_BATCH_FILE_INSERT */ + } + mdb->m_allow_transactions = mult_db_connections; + + /* At this time, when mult_db_connections == true, this is for + * specific console command such as bvfs or batch mode, and we don't + * want to share a batch mode or bvfs. In the future, we can change + * the creation function to add this parameter. + */ + mdb->m_dedicated = mult_db_connections; + +get_out: + V(mutex); + return mdb; +} + + +/* Check that the database corresponds to the encoding we want */ +static bool pgsql_check_database_encoding(JCR *jcr, BDB_POSTGRESQL *mdb) +{ + SQL_ROW row; + int ret = false; + + if (!mdb->sql_query("SELECT getdatabaseencoding()", QF_STORE_RESULT)) { + Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg); + return false; + } + + if ((row = mdb->sql_fetch_row()) == NULL) { + Mmsg1(mdb->errmsg, _("error fetching row: %s\n"), mdb->sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "Can't check database encoding %s", mdb->errmsg); + } else { + ret = bstrcmp(row[0], "SQL_ASCII"); + + if (ret) { + /* If we are in SQL_ASCII, we can force the client_encoding to SQL_ASCII too */ + mdb->sql_query("SET client_encoding TO 'SQL_ASCII'"); + + } else { + /* Something is wrong with database encoding */ + Mmsg(mdb->errmsg, + _("Encoding error for database \"%s\". Wanted SQL_ASCII, got %s\n"), + mdb->get_db_name(), row[0]); + Jmsg(jcr, M_WARNING, 0, "%s", mdb->errmsg); + Dmsg1(dbglvl_err, "%s", mdb->errmsg); + } + } + return ret; +} + +/* + * Now actually open the database. This can generate errors, + * which are returned in the errmsg + * + * DO NOT close the database or delete mdb here !!!! + */ +bool BDB_POSTGRESQL::bdb_open_database(JCR *jcr) +{ + bool retval = false; + int errstat; + char buf[10], *port; + BDB_POSTGRESQL *mdb = this; + + P(mutex); + if (mdb->m_connected) { + retval = true; + goto get_out; + } + + if ((errstat=rwl_init(&mdb->m_lock)) != 0) { + berrno be; + Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"), + be.bstrerror(errstat)); + goto get_out; + } + + if (mdb->m_db_port) { + bsnprintf(buf, sizeof(buf), "%d", mdb->m_db_port); + port = buf; + } else { + port = NULL; + } + + /* Tells libpq that the SSL library has already been initialized */ + PQinitSSL(0); + + /* If connection fails, try at 5 sec intervals for 30 seconds. */ + for (int retry=0; retry < 6; retry++) { + /* connect to the database */ + +#if PG_VERSION_NUM < 90000 + + /* Old "depreciated" connection call */ + mdb->m_db_handle = PQsetdbLogin( + mdb->m_db_address, /* default = localhost */ + port, /* default port */ + NULL, /* pg options */ + NULL, /* tty, ignored */ + mdb->m_db_name, /* database name */ + mdb->m_db_user, /* login name */ + mdb->m_db_password); /* password */ +#else + /* Code for Postgresql 9.0 and greater */ + const char *keywords[10] = {"host", "port", + "dbname", "user", + "password", "sslmode", + "sslkey", "sslcert", + "sslrootcert", NULL }; + const char *values[10] = {mdb->m_db_address, /* default localhost */ + port, /* default port */ + mdb->m_db_name, + mdb->m_db_user, + mdb->m_db_password, + mdb->m_db_ssl_mode, + mdb->m_db_ssl_key, + mdb->m_db_ssl_cert, + mdb->m_db_ssl_ca, + NULL }; + mdb->m_db_handle = PQconnectdbParams(keywords, values, 0); +#endif + + /* If no connect, try once more in case it is a timing problem */ + if (PQstatus(mdb->m_db_handle) == CONNECTION_OK) { + break; + } + bmicrosleep(5, 0); + } + + Dmsg0(dbglvl_info, "pg_real_connect done\n"); + Dmsg3(dbglvl_info, "db_user=%s db_name=%s db_password=%s\n", mdb->m_db_user, mdb->m_db_name, + mdb->m_db_password==NULL?"(NULL)":mdb->m_db_password); + +#ifdef HAVE_OPENSSL + #define USE_OPENSSL 1 + SSL *ssl; + if (PQgetssl(mdb->m_db_handle) != NULL) { + Dmsg0(dbglvl_info, "SSL in use\n"); + ssl = (SSL *)PQgetssl(mdb->m_db_handle); + Dmsg2(dbglvl_info, "Version:%s Cipher:%s\n", SSL_get_version(ssl), SSL_get_cipher(ssl)); + } else { + Dmsg0(dbglvl_info, "SSL not in use\n"); + } +#endif + + if (PQstatus(mdb->m_db_handle) != CONNECTION_OK) { + Mmsg2(&mdb->errmsg, _("Unable to connect to PostgreSQL server. Database=%s User=%s\n" + "Possible causes: SQL server not running; password incorrect; max_connections exceeded.\n"), + mdb->m_db_name, mdb->m_db_user); + goto get_out; + } + + mdb->m_connected = true; + if (!bdb_check_version(jcr)) { + goto get_out; + } + + sql_query("SET datestyle TO 'ISO, YMD'"); + sql_query("SET cursor_tuple_fraction=1"); + + /* + * Tell PostgreSQL we are using standard conforming strings and avoid warnings such as: + * WARNING: nonstandard use of \\ in a string literal + */ + sql_query("SET standard_conforming_strings=on"); + + /* Check that encoding is SQL_ASCII */ + pgsql_check_database_encoding(jcr, mdb); + + retval = true; + +get_out: + V(mutex); + return retval; +} + +void BDB_POSTGRESQL::bdb_close_database(JCR *jcr) +{ + BDB_POSTGRESQL *mdb = this; + + if (mdb->m_connected) { + bdb_end_transaction(jcr); + } + P(mutex); + mdb->m_ref_count--; + if (mdb->m_ref_count == 0) { + if (mdb->m_connected) { + sql_free_result(); + } + db_list->remove(mdb); + if (mdb->m_connected && mdb->m_db_handle) { + PQfinish(mdb->m_db_handle); + } + if (is_rwl_valid(&mdb->m_lock)) { + rwl_destroy(&mdb->m_lock); + } + free_pool_memory(mdb->errmsg); + free_pool_memory(mdb->cmd); + free_pool_memory(mdb->cached_path); + free_pool_memory(mdb->fname); + free_pool_memory(mdb->path); + free_pool_memory(mdb->esc_name); + free_pool_memory(mdb->esc_path); + free_pool_memory(mdb->esc_obj); + free_pool_memory(mdb->m_buf); + if (mdb->m_db_driver) { + free(mdb->m_db_driver); + } + if (mdb->m_db_name) { + free(mdb->m_db_name); + } + if (mdb->m_db_user) { + free(mdb->m_db_user); + } + if (mdb->m_db_password) { + free(mdb->m_db_password); + } + if (mdb->m_db_address) { + free(mdb->m_db_address); + } + if (mdb->m_db_socket) { + free(mdb->m_db_socket); + } + if (mdb->m_db_ssl_mode) { + free(mdb->m_db_ssl_mode); + } + if (mdb->m_db_ssl_key) { + free(mdb->m_db_ssl_key); + } + if (mdb->m_db_ssl_cert) { + free(mdb->m_db_ssl_cert); + } + if (mdb->m_db_ssl_ca) { + free(mdb->m_db_ssl_ca); + } + delete mdb; + if (db_list->size() == 0) { + delete db_list; + db_list = NULL; + } + } + V(mutex); +} + +void BDB_POSTGRESQL::bdb_thread_cleanup(void) +{ +} + +/* + * Escape strings so PostgreSQL is happy + * + * len is the length of the old string. Your new + * string must be long enough (max 2*old+1) to hold + * the escaped output. + */ +void BDB_POSTGRESQL::bdb_escape_string(JCR *jcr, char *snew, char *old, int len) +{ + BDB_POSTGRESQL *mdb = this; + int failed; + + PQescapeStringConn(mdb->m_db_handle, snew, old, len, &failed); + if (failed) { + Jmsg(jcr, M_FATAL, 0, _("PQescapeStringConn returned non-zero.\n")); + /* failed on encoding, probably invalid multibyte encoding in the source string + see PQescapeStringConn documentation for details. */ + Dmsg0(dbglvl_err, "PQescapeStringConn failed\n"); + } +} + +/* + * Escape binary so that PostgreSQL is happy + * + */ +char *BDB_POSTGRESQL::bdb_escape_object(JCR *jcr, char *old, int len) +{ + size_t new_len; + unsigned char *obj; + BDB_POSTGRESQL *mdb = this; + + mdb->esc_obj[0] = 0; + obj = PQescapeByteaConn(mdb->m_db_handle, (unsigned const char *)old, len, &new_len); + if (!obj) { + Jmsg(jcr, M_FATAL, 0, _("PQescapeByteaConn returned NULL.\n")); + } else { + mdb->esc_obj = check_pool_memory_size(mdb->esc_obj, new_len+1); + memcpy(mdb->esc_obj, obj, new_len); + mdb->esc_obj[new_len] = 0; + PQfreemem(obj); + } + return (char *)mdb->esc_obj; +} + +/* + * Unescape binary object so that PostgreSQL is happy + * + */ +void BDB_POSTGRESQL::bdb_unescape_object(JCR *jcr, char *from, int32_t expected_len, + POOLMEM **dest, int32_t *dest_len) +{ + size_t new_len; + unsigned char *obj; + + if (!from) { + *dest[0] = 0; + *dest_len = 0; + return; + } + + obj = PQunescapeBytea((unsigned const char *)from, &new_len); + + if (!obj) { + Jmsg(jcr, M_FATAL, 0, _("PQunescapeByteaConn returned NULL.\n")); + } + + *dest_len = new_len; + *dest = check_pool_memory_size(*dest, new_len+1); + memcpy(*dest, obj, new_len); + (*dest)[new_len]=0; + + PQfreemem(obj); + + Dmsg1(dbglvl_info, "obj size: %d\n", *dest_len); +} + +/* + * Start a transaction. This groups inserts and makes things more efficient. + * Usually started when inserting file attributes. + */ +void BDB_POSTGRESQL::bdb_start_transaction(JCR *jcr) +{ + BDB_POSTGRESQL *mdb = this; + + if (!jcr->attr) { + jcr->attr = get_pool_memory(PM_FNAME); + } + if (!jcr->ar) { + jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR)); + memset(jcr->ar, 0, sizeof(ATTR_DBR)); + } + + /* + * This is turned off because transactions break if + * multiple simultaneous jobs are run. + */ + if (!mdb->m_allow_transactions) { + return; + } + + bdb_lock(); + /* Allow only 25,000 changes per transaction */ + if (mdb->m_transaction && changes > 25000) { + bdb_end_transaction(jcr); + } + if (!mdb->m_transaction) { + sql_query("BEGIN"); /* begin transaction */ + Dmsg0(dbglvl_info, "Start PosgreSQL transaction\n"); + mdb->m_transaction = true; + } + bdb_unlock(); +} + +void BDB_POSTGRESQL::bdb_end_transaction(JCR *jcr) +{ + BDB_POSTGRESQL *mdb = this; + + if (jcr && jcr->cached_attribute) { + Dmsg0(dbglvl_info, "Flush last cached attribute.\n"); + if (!bdb_create_attributes_record(jcr, jcr->ar)) { + Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), jcr->db->bdb_strerror()); + } + jcr->cached_attribute = false; + } + + if (!mdb->m_allow_transactions) { + return; + } + + bdb_lock(); + if (mdb->m_transaction) { + sql_query("COMMIT"); /* end transaction */ + mdb->m_transaction = false; + Dmsg1(dbglvl_info, "End PostgreSQL transaction changes=%d\n", changes); + } + changes = 0; + bdb_unlock(); +} + + +/* + * Submit a general SQL command, and for each row returned, + * the result_handler is called with the ctx. + */ +bool BDB_POSTGRESQL::bdb_big_sql_query(const char *query, + DB_RESULT_HANDLER *result_handler, + void *ctx) +{ + BDB_POSTGRESQL *mdb = this; + SQL_ROW row; + bool retval = false; + bool in_transaction = mdb->m_transaction; + + Dmsg1(dbglvl_info, "db_sql_query starts with '%s'\n", query); + + mdb->errmsg[0] = 0; + /* This code handles only SELECT queries */ + if (strncasecmp(query, "SELECT", 6) != 0) { + return bdb_sql_query(query, result_handler, ctx); + } + + if (!result_handler) { /* no need of big_query without handler */ + return false; + } + + bdb_lock(); + + if (!in_transaction) { /* CURSOR needs transaction */ + sql_query("BEGIN"); + } + + Mmsg(m_buf, "DECLARE _bac_cursor CURSOR FOR %s", query); + + if (!sql_query(mdb->m_buf)) { + Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), mdb->m_buf, sql_strerror()); + Dmsg1(dbglvl_err, "%s\n", mdb->errmsg); + goto get_out; + } + + do { + if (!sql_query("FETCH 100 FROM _bac_cursor")) { + Mmsg(mdb->errmsg, _("Fetch failed: ERR=%s\n"), sql_strerror()); + Dmsg1(dbglvl_err, "%s\n", mdb->errmsg); + goto get_out; + } + while ((row = sql_fetch_row()) != NULL) { + Dmsg1(dbglvl_info, "Fetching %d rows\n", mdb->m_num_rows); + if (result_handler(ctx, mdb->m_num_fields, row)) + break; + } + PQclear(mdb->m_result); + m_result = NULL; + + } while (m_num_rows > 0); /* TODO: Can probably test against 100 */ + + sql_query("CLOSE _bac_cursor"); + + Dmsg0(dbglvl_info, "db_big_sql_query finished\n"); + sql_free_result(); + retval = true; + +get_out: + if (!in_transaction) { + sql_query("COMMIT"); /* end transaction */ + } + + bdb_unlock(); + return retval; +} + +/* + * Submit a general SQL command, and for each row returned, + * the result_handler is called with the ctx. + */ +bool BDB_POSTGRESQL::bdb_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx) +{ + SQL_ROW row; + bool retval = true; + BDB_POSTGRESQL *mdb = this; + + Dmsg1(dbglvl_info, "db_sql_query starts with '%s'\n", query); + + bdb_lock(); + mdb->errmsg[0] = 0; + if (!sql_query(query, QF_STORE_RESULT)) { + Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror()); + Dmsg0(dbglvl_err, "db_sql_query failed\n"); + retval = false; + goto get_out; + } + + Dmsg0(dbglvl_info, "db_sql_query succeeded. checking handler\n"); + + if (result_handler) { + Dmsg0(dbglvl_dbg, "db_sql_query invoking handler\n"); + while ((row = sql_fetch_row())) { + Dmsg0(dbglvl_dbg, "db_sql_query sql_fetch_row worked\n"); + if (result_handler(ctx, mdb->m_num_fields, row)) + break; + } + sql_free_result(); + } + + Dmsg0(dbglvl_info, "db_sql_query finished\n"); + +get_out: + bdb_unlock(); + return retval; +} + +/* + * If this routine returns false (failure), Bacula expects + * that no result has been stored. + * This is where QueryDB calls to with Postgresql. + * + * Returns: true on success + * false on failure + * + */ +bool BDB_POSTGRESQL::sql_query(const char *query, int flags) +{ + int i; + bool retval = false; + BDB_POSTGRESQL *mdb = this; + + Dmsg1(dbglvl_info, "sql_query starts with '%s'\n", query); + + /* We are starting a new query. reset everything. */ + mdb->m_num_rows = -1; + mdb->m_row_number = -1; + mdb->m_field_number = -1; + + if (mdb->m_result) { + PQclear(mdb->m_result); /* hmm, someone forgot to free?? */ + mdb->m_result = NULL; + } + + for (i = 0; i < 10; i++) { + mdb->m_result = PQexec(mdb->m_db_handle, query); + if (mdb->m_result) { + break; + } + bmicrosleep(5, 0); + } + if (!mdb->m_result) { + Dmsg1(dbglvl_err, "Query failed: %s\n", query); + goto get_out; + } + + mdb->m_status = PQresultStatus(mdb->m_result); + if (mdb->m_status == PGRES_TUPLES_OK || mdb->m_status == PGRES_COMMAND_OK) { + Dmsg0(dbglvl_dbg, "we have a result\n"); + + /* How many fields in the set? */ + mdb->m_num_fields = (int)PQnfields(mdb->m_result); + Dmsg1(dbglvl_dbg, "we have %d fields\n", mdb->m_num_fields); + + mdb->m_num_rows = PQntuples(mdb->m_result); + Dmsg1(dbglvl_dbg, "we have %d rows\n", mdb->m_num_rows); + + mdb->m_row_number = 0; /* we can start to fetch something */ + mdb->m_status = 0; /* succeed */ + retval = true; + } else { + Dmsg1(dbglvl_err, "Result status failed: %s\n", query); + goto get_out; + } + + Dmsg0(dbglvl_info, "sql_query finishing\n"); + goto ok_out; + +get_out: + Dmsg0(dbglvl_err, "we failed\n"); + PQclear(mdb->m_result); + mdb->m_result = NULL; + mdb->m_status = 1; /* failed */ + +ok_out: + return retval; +} + +void BDB_POSTGRESQL::sql_free_result(void) +{ + BDB_POSTGRESQL *mdb = this; + + bdb_lock(); + if (mdb->m_result) { + PQclear(mdb->m_result); + mdb->m_result = NULL; + } + if (mdb->m_rows) { + free(mdb->m_rows); + mdb->m_rows = NULL; + } + if (mdb->m_fields) { + free(mdb->m_fields); + mdb->m_fields = NULL; + } + mdb->m_num_rows = mdb->m_num_fields = 0; + bdb_unlock(); +} + +SQL_ROW BDB_POSTGRESQL::sql_fetch_row(void) +{ + SQL_ROW row = NULL; /* by default, return NULL */ + BDB_POSTGRESQL *mdb = this; + + Dmsg0(dbglvl_info, "sql_fetch_row start\n"); + + if (mdb->m_num_fields == 0) { /* No field, no row */ + Dmsg0(dbglvl_err, "sql_fetch_row finishes returning NULL, no fields\n"); + return NULL; + } + + if (!mdb->m_rows || mdb->m_rows_size < mdb->m_num_fields) { + if (mdb->m_rows) { + Dmsg0(dbglvl_dbg, "sql_fetch_row freeing space\n"); + free(mdb->m_rows); + } + Dmsg1(dbglvl_dbg, "we need space for %d bytes\n", sizeof(char *) * mdb->m_num_fields); + mdb->m_rows = (SQL_ROW)malloc(sizeof(char *) * mdb->m_num_fields); + mdb->m_rows_size = mdb->m_num_fields; + + /* Now reset the row_number now that we have the space allocated */ + mdb->m_row_number = 0; + } + + /* If still within the result set */ + if (mdb->m_row_number >= 0 && mdb->m_row_number < mdb->m_num_rows) { + Dmsg2(dbglvl_dbg, "sql_fetch_row row number '%d' is acceptable (0..%d)\n", mdb->m_row_number, m_num_rows); + + /* Get each value from this row */ + for (int j = 0; j < mdb->m_num_fields; j++) { + mdb->m_rows[j] = PQgetvalue(mdb->m_result, mdb->m_row_number, j); + Dmsg2(dbglvl_dbg, "sql_fetch_row field '%d' has value '%s'\n", j, mdb->m_rows[j]); + } + mdb->m_row_number++; /* Increment the row number for the next call */ + row = mdb->m_rows; + } else { + Dmsg2(dbglvl_dbg, "sql_fetch_row row number '%d' is NOT acceptable (0..%d)\n", mdb->m_row_number, m_num_rows); + } + + Dmsg1(dbglvl_info, "sql_fetch_row finishes returning %p\n", row); + + return row; +} + +const char *BDB_POSTGRESQL::sql_strerror(void) +{ + BDB_POSTGRESQL *mdb = this; + return PQerrorMessage(mdb->m_db_handle); +} + +void BDB_POSTGRESQL::sql_data_seek(int row) +{ + BDB_POSTGRESQL *mdb = this; + /* Set the row number to be returned on the next call to sql_fetch_row */ + mdb->m_row_number = row; +} + +int BDB_POSTGRESQL::sql_affected_rows(void) +{ + BDB_POSTGRESQL *mdb = this; + return (unsigned)str_to_int32(PQcmdTuples(mdb->m_result)); +} + +uint64_t BDB_POSTGRESQL::sql_insert_autokey_record(const char *query, const char *table_name) +{ + uint64_t id = 0; + char sequence[NAMEDATALEN-1]; + char getkeyval_query[NAMEDATALEN+50]; + PGresult *p_result; + BDB_POSTGRESQL *mdb = this; + + /* First execute the insert query and then retrieve the currval. */ + if (!sql_query(query)) { + return 0; + } + + mdb->m_num_rows = sql_affected_rows(); + if (mdb->m_num_rows != 1) { + return 0; + } + mdb->changes++; + /* + * Obtain the current value of the sequence that + * provides the serial value for primary key of the table. + * + * currval is local to our session. It is not affected by + * other transactions. + * + * Determine the name of the sequence. + * PostgreSQL automatically creates a sequence using + *
__seq. + * At the time of writing, all tables used this format for + * for their primary key:
id + * Except for basefiles which has a primary key on baseid. + * Therefore, we need to special case that one table. + * + * everything else can use the PostgreSQL formula. + */ + if (strcasecmp(table_name, "basefiles") == 0) { + bstrncpy(sequence, "basefiles_baseid", sizeof(sequence)); + } else { + bstrncpy(sequence, table_name, sizeof(sequence)); + bstrncat(sequence, "_", sizeof(sequence)); + bstrncat(sequence, table_name, sizeof(sequence)); + bstrncat(sequence, "id", sizeof(sequence)); + } + + bstrncat(sequence, "_seq", sizeof(sequence)); + bsnprintf(getkeyval_query, sizeof(getkeyval_query), "SELECT currval('%s')", sequence); + + Dmsg1(dbglvl_info, "sql_insert_autokey_record executing query '%s'\n", getkeyval_query); + for (int i = 0; i < 10; i++) { + p_result = PQexec(mdb->m_db_handle, getkeyval_query); + if (p_result) { + break; + } + bmicrosleep(5, 0); + } + if (!p_result) { + Dmsg1(dbglvl_err, "Query failed: %s\n", getkeyval_query); + goto get_out; + } + + Dmsg0(dbglvl_dbg, "exec done"); + + if (PQresultStatus(p_result) == PGRES_TUPLES_OK) { + Dmsg0(dbglvl_dbg, "getting value"); + id = str_to_uint64(PQgetvalue(p_result, 0, 0)); + Dmsg2(dbglvl_dbg, "got value '%s' which became %d\n", PQgetvalue(p_result, 0, 0), id); + } else { + Dmsg1(dbglvl_err, "Result status failed: %s\n", getkeyval_query); + Mmsg1(&mdb->errmsg, _("error fetching currval: %s\n"), PQerrorMessage(mdb->m_db_handle)); + } + +get_out: + PQclear(p_result); + return id; +} + +SQL_FIELD *BDB_POSTGRESQL::sql_fetch_field(void) +{ + int max_len; + int this_len; + BDB_POSTGRESQL *mdb = this; + + Dmsg0(dbglvl_dbg, "sql_fetch_field starts\n"); + + if (!mdb->m_fields || mdb->m_fields_size < mdb->m_num_fields) { + if (mdb->m_fields) { + free(mdb->m_fields); + mdb->m_fields = NULL; + } + Dmsg1(dbglvl_dbg, "allocating space for %d fields\n", mdb->m_num_fields); + mdb->m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * mdb->m_num_fields); + mdb->m_fields_size = mdb->m_num_fields; + + for (int i = 0; i < mdb->m_num_fields; i++) { + Dmsg1(dbglvl_dbg, "filling field %d\n", i); + mdb->m_fields[i].name = PQfname(mdb->m_result, i); + mdb->m_fields[i].type = PQftype(mdb->m_result, i); + mdb->m_fields[i].flags = 0; + + /* For a given column, find the max length. */ + max_len = 0; + for (int j = 0; j < mdb->m_num_rows; j++) { + if (PQgetisnull(mdb->m_result, j, i)) { + this_len = 4; /* "NULL" */ + } else { + this_len = cstrlen(PQgetvalue(mdb->m_result, j, i)); + } + + if (max_len < this_len) { + max_len = this_len; + } + } + mdb->m_fields[i].max_length = max_len; + + Dmsg4(dbglvl_dbg, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n", + mdb->m_fields[i].name, mdb->m_fields[i].max_length, mdb->m_fields[i].type, mdb->m_fields[i].flags); + } + } + + /* Increment field number for the next time around */ + return &mdb->m_fields[mdb->m_field_number++]; +} + +bool BDB_POSTGRESQL::sql_field_is_not_null(int field_type) +{ + if (field_type == 1) { + return true; + } + return false; +} + +bool BDB_POSTGRESQL::sql_field_is_numeric(int field_type) +{ + /* + * TEMP: the following is taken from select OID, typname from pg_type; + */ + switch (field_type) { + case 20: + case 21: + case 23: + case 700: + case 701: + return true; + default: + return false; + } +} + +/* + * Escape strings so PostgreSQL is happy on COPY + * + * len is the length of the old string. Your new + * string must be long enough (max 2*old+1) to hold + * the escaped output. + */ +static char *pgsql_copy_escape(char *dest, char *src, size_t len) +{ + /* we have to escape \t, \n, \r, \ */ + char c = '\0' ; + + while (len > 0 && *src) { + switch (*src) { + case '\n': + c = 'n'; + break; + case '\\': + c = '\\'; + break; + case '\t': + c = 't'; + break; + case '\r': + c = 'r'; + break; + default: + c = '\0' ; + } + + if (c) { + *dest = '\\'; + dest++; + *dest = c; + } else { + *dest = *src; + } + + len--; + src++; + dest++; + } + + *dest = '\0'; + return dest; +} + +bool BDB_POSTGRESQL::sql_batch_start(JCR *jcr) +{ + BDB_POSTGRESQL *mdb = this; + const char *query = "COPY batch FROM STDIN"; + + Dmsg0(dbglvl_info, "sql_batch_start started\n"); + + if (!sql_query("CREATE TEMPORARY TABLE batch (" + "FileIndex int," + "JobId int," + "Path varchar," + "Name varchar," + "LStat varchar," + "Md5 varchar," + "DeltaSeq smallint)")) { + Dmsg0(dbglvl_err, "sql_batch_start failed\n"); + return false; + } + + /* We are starting a new query. reset everything. */ + mdb->m_num_rows = -1; + mdb->m_row_number = -1; + mdb->m_field_number = -1; + + sql_free_result(); + + for (int i=0; i < 10; i++) { + mdb->m_result = PQexec(mdb->m_db_handle, query); + if (mdb->m_result) { + break; + } + bmicrosleep(5, 0); + } + if (!mdb->m_result) { + Dmsg1(dbglvl_err, "Query failed: %s\n", query); + goto get_out; + } + + mdb->m_status = PQresultStatus(mdb->m_result); + if (mdb->m_status == PGRES_COPY_IN) { + /* How many fields in the set? */ + mdb->m_num_fields = (int) PQnfields(mdb->m_result); + mdb->m_num_rows = 0; + mdb->m_status = 1; + } else { + Dmsg1(dbglvl_err, "Result status failed: %s\n", query); + goto get_out; + } + + Dmsg0(dbglvl_info, "sql_batch_start finishing\n"); + + return true; + +get_out: + Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), PQerrorMessage(mdb->m_db_handle)); + mdb->m_status = 0; + PQclear(mdb->m_result); + mdb->m_result = NULL; + return false; +} + +/* + * Set error to something to abort the operation + */ +bool BDB_POSTGRESQL::sql_batch_end(JCR *jcr, const char *error) +{ + int res; + int count=30; + PGresult *p_result; + BDB_POSTGRESQL *mdb = this; + + Dmsg0(dbglvl_info, "sql_batch_end started\n"); + + do { + res = PQputCopyEnd(mdb->m_db_handle, error); + } while (res == 0 && --count > 0); + + if (res == 1) { + Dmsg0(dbglvl_dbg, "ok\n"); + mdb->m_status = 0; + } + + if (res <= 0) { + mdb->m_status = 1; + Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->m_db_handle)); + Dmsg1(dbglvl_err, "failure %s\n", errmsg); + } + + /* Check command status and return to normal libpq state */ + p_result = PQgetResult(mdb->m_db_handle); + if (PQresultStatus(p_result) != PGRES_COMMAND_OK) { + Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->m_db_handle)); + mdb->m_status = 1; + } + + /* Get some statistics to compute the best plan */ + sql_query("ANALYZE batch"); + + PQclear(p_result); + + Dmsg0(dbglvl_info, "sql_batch_end finishing\n"); + return true; +} + +bool BDB_POSTGRESQL::sql_batch_insert(JCR *jcr, ATTR_DBR *ar) +{ + int res; + int count=30; + size_t len; + const char *digest; + char ed1[50]; + BDB_POSTGRESQL *mdb = this; + + mdb->esc_name = check_pool_memory_size(mdb->esc_name, fnl*2+1); + pgsql_copy_escape(mdb->esc_name, fname, fnl); + + mdb->esc_path = check_pool_memory_size(mdb->esc_path, pnl*2+1); + pgsql_copy_escape(mdb->esc_path, path, pnl); + + if (ar->Digest == NULL || ar->Digest[0] == 0) { + digest = "0"; + } else { + digest = ar->Digest; + } + + len = Mmsg(mdb->cmd, "%d\t%s\t%s\t%s\t%s\t%s\t%u\n", + ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path, + mdb->esc_name, ar->attr, digest, ar->DeltaSeq); + + do { + res = PQputCopyData(mdb->m_db_handle, mdb->cmd, len); + } while (res == 0 && --count > 0); + + if (res == 1) { + Dmsg0(dbglvl_dbg, "ok\n"); + mdb->changes++; + mdb->m_status = 1; + } + + if (res <= 0) { + mdb->m_status = 0; + Mmsg1(&mdb->errmsg, _("error copying in batch mode: %s"), PQerrorMessage(mdb->m_db_handle)); + Dmsg1(dbglvl_err, "failure %s\n", mdb->errmsg); + } + + Dmsg0(dbglvl_info, "sql_batch_insert finishing\n"); + + return true; +} + + +#endif /* HAVE_POSTGRESQL */ diff --git a/src/cats/postgresql.in b/src/cats/postgresql.in new file mode 100644 index 00000000..ccd273d2 --- /dev/null +++ b/src/cats/postgresql.in @@ -0,0 +1,11 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# shell script to create Bacula PostgreSQL tables +# +bindir=@POSTGRESQL_BINDIR@ +db_name=@db_name@ + +$bindir/psql $* ${db_name} diff --git a/src/cats/protos.h b/src/cats/protos.h new file mode 100644 index 00000000..dd7844cc --- /dev/null +++ b/src/cats/protos.h @@ -0,0 +1,299 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Database routines that are exported by the cats library for + * use elsewhere in Bacula (mainly the Director). + * + * Note: the interface that is used by the core Bacula code outside + * of the cats directory has names that are: + * db_xxx(x, db, y, ...) + * usually with a database pointer such as db as an argument. + * This simplifies the vast bulk of the code and makes it easier to read. + * These are translated into class calls on the db pointer by a #define + * in this file. + * + * The actual class code is named bdb_xxx(x, y, ...) and is called with + * the class pointer such as db->bdb_xxx(x, y, ...) The code in this + * cats directory can use the db_xxx() calls or the db->bdb_xxx() calls. + * In the Bacula core code we prefer using only the db_xxx() calls. + * + * Written by Kern Sibbald, MM + */ + +#ifndef __SQL_PROTOS_H +#define __SQL_PROTOS_H + +#include "cats.h" + +BDB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, + const char *db_user, const char *db_password, + const char *db_address, int db_port, + const char *db_socket, + const char *db_ssl_mode, const char *db_ssl_key, + const char *db_ssl_cert, const char *db_ssl_ca, + const char *db_ssl_capath, const char *db_ssl_cipher, + bool mult_db_connections, bool disable_batch_insert); + +/* Database prototypes and defines */ + +/* Misc */ +#define db_lock(mdb) \ + mdb->bdb_lock(__FILE__, __LINE__) +#define db_unlock(mdb) \ + mdb->bdb_unlock() + + +/* Virtual methods */ +#define db_escape_string(jcr, mdb, snew, old, len) \ + mdb->bdb_escape_string(jcr, snew, old, len) +#define db_escape_object(jcr, mdb, old, len) \ + mdb->bdb_escape_object(jcr, old, len) +#define db_unescape_object(jcr, mdb, from, expected_len, dest, len) \ + mdb->bdb_unescape_object(jcr, from, expected_len, dest, len) +#define db_open_database(jcr, mdb) \ + mdb->bdb_open_database(jcr) +#define db_close_database(jcr, mdb) \ + mdb->bdb_close_database(jcr) +#define db_close_database(jcr, mdb) \ + mdb->bdb_close_database(jcr) +#define db_start_transaction(jcr, mdb) \ + mdb->bdb_start_transaction(jcr) +#define db_end_transaction(jcr, mdb) \ + if (mdb) mdb->bdb_end_transaction(jcr) +#define db_sql_query(mdb, query, result_handler, ctx) \ + mdb->bdb_sql_query(query, result_handler, ctx) +#define db_thread_cleanup(mdb) \ + if (mdb) mdb->bdb_thread_cleanup() + +/* sql.c */ +int db_int64_handler(void *ctx, int num_fields, char **row); +int db_strtime_handler(void *ctx, int num_fields, char **row); +int db_list_handler(void *ctx, int num_fields, char **row); +int db_string_list_handler(void *ctx, int num_fields, char **row); +int db_int_handler(void *ctx, int num_fields, char **row); +void bdb_debug_print(JCR *jcr, FILE *fp); +void db_free_restoreobject_record(JCR *jcr, ROBJECT_DBR *rr); + +#define db_open_batch_connexion(jcr, mdb) \ + mdb->bdb_open_batch_connexion(jcr) +#define db_strerror(mdb) \ + mdb->bdb_strerror() +#define db_debug_print(jcr, fp) \ + bdb_debug_print(jcr, fp) +#define db_check_max_connections(jcr, mdb, maxc) \ + mdb->bdb_check_max_connections(jcr, maxc) + +/* sql_create.c */ +bool bdb_write_batch_file_records(JCR *jcr); +void bdb_disable_batch_insert(bool disable); + +/* sql_get.c */ +void bdb_free_restoreobject_record(JCR *jcr, ROBJECT_DBR *rr); + + +/* sql_create.c */ +#define db_create_path_record(jcr, mdb, ar) \ + mdb->bdb_create_path_record(jcr, ar) +#define db_create_file_attributes_record(jcr, mdb, ar) \ + mdb->bdb_create_file_attributes_record(jcr, ar) +#define db_create_job_record(jcr, mdb, jr) \ + mdb->bdb_create_job_record(jcr, jr) +#define db_create_media_record(jcr, mdb, media_dbr) \ + mdb->bdb_create_media_record(jcr, media_dbr) +#define db_create_client_record(jcr, mdb, cr) \ + mdb->bdb_create_client_record(jcr, cr) +#define db_create_fileset_record(jcr, mdb, fsr) \ + mdb->bdb_create_fileset_record(jcr, fsr) +#define db_create_pool_record(jcr, mdb, pool_dbr) \ + mdb->bdb_create_pool_record(jcr, pool_dbr) +#define db_create_jobmedia_record(jcr, mdb, jr) \ + mdb->bdb_create_jobmedia_record(jcr, jr) +#define db_create_counter_record(jcr, mdb, cr) \ + mdb->bdb_create_counter_record(jcr, cr) +#define db_create_device_record(jcr, mdb, dr) \ + mdb->bdb_create_device_record(jcr, dr) +#define db_create_storage_record(jcr, mdb, sr) \ + mdb->bdb_create_storage_record(jcr, sr) +#define db_create_mediatype_record(jcr, mdb, mr) \ + mdb->bdb_create_mediatype_record(jcr, mr) +#define db_write_batch_file_records(jcr) \ + bdb_write_batch_file_records(jcr) +#define db_create_attributes_record(jcr, mdb, ar) \ + mdb->bdb_create_attributes_record(jcr, ar) +#define db_create_restore_object_record(jcr, mdb, ar) \ + mdb->bdb_create_restore_object_record(jcr, ar) +#define db_create_base_file_attributes_record(jcr, mdb, ar) \ + mdb->bdb_create_base_file_attributes_record(jcr, ar) +#define db_commit_base_file_attributes_record(jcr, mdb) \ + mdb->bdb_commit_base_file_attributes_record(jcr) +#define db_create_base_file_list(jcr, mdb, jobids) \ + mdb->bdb_create_base_file_list(jcr, jobids) +#define db_disable_batch_insert(disable) \ + bdb_disable_batch_insert(disable) +#define db_create_snapshot_record(jcr, mdb, sr) \ + mdb->bdb_create_snapshot_record(jcr, sr) +#define db_get_job_statistics(jcr, mdb, jr) \ + mdb->bdb_get_job_statistics(jcr, jr) + +/* sql_delete.c */ +#define db_delete_pool_record(jcr, mdb, pool_dbr) \ + mdb->bdb_delete_pool_record(jcr, pool_dbr) +#define db_delete_media_record(jcr, mdb, mr) \ + mdb->bdb_delete_media_record(jcr, mr) +#define db_purge_media_record(jcr, mdb, mr) \ + mdb->bdb_purge_media_record(jcr, mr) +#define db_delete_snapshot_record(jcr, mdb, sr) \ + mdb->bdb_delete_snapshot_record(jcr, sr) +#define db_delete_client_record(jcr, mdb, cr) \ + mdb->bdb_delete_client_record(jcr, cr) + + +/* sql_find.c */ +#define db_find_last_job_end_time(jcr, mdb, jr, etime, job) \ + mdb->bdb_find_last_job_end_time(jcr, jr, etime, job) +#define db_find_last_job_start_time(jcr, mdb, jr, stime, job, JobLevel) \ + mdb->bdb_find_last_job_start_time(jcr, jr, stime, job, JobLevel) +#define db_find_job_start_time(jcr, mdb, jr, stime, job) \ + mdb->bdb_find_job_start_time(jcr, jr, stime, job) +#define db_find_last_jobid(jcr, mdb, Name, jr) \ + mdb->bdb_find_last_jobid(jcr, Name, jr) +#define db_find_next_volume(jcr, mdb, index, InChanger, mr) \ + mdb->bdb_find_next_volume(jcr, index, InChanger, mr) +#define db_find_failed_job_since(jcr, mdb, jr, stime, JobLevel) \ + mdb->bdb_find_failed_job_since(jcr, jr, stime, JobLevel) + +/* sql_get.c */ +#define db_get_volume_jobids(jcr, mdb, mr, lst) \ + mdb->bdb_get_volume_jobids(jcr, mr, lst) +#define db_get_client_jobids(jcr, mdb, cr, lst) \ + mdb->bdb_get_client_jobids(jcr, cr, lst) +#define db_get_base_file_list(jcr, mdb, use_md5, result_handler, ctx) \ + mdb->bdb_get_base_file_list(jcr, use_md5, result_handler, ctx) +#define db_get_path_record(jcr, mdb) \ + mdb->bdb_get_path_record(jcr) +#define db_get_pool_record(jcr, mdb, pdbr) \ + mdb->bdb_get_pool_record(jcr, pdbr) +#define db_get_pool_numvols(jcr, mdb, pdbr) \ + mdb->bdb_get_pool_numvols(jcr, pdbr) +#define db_get_client_record(jcr, mdb, cr) \ + mdb->bdb_get_client_record(jcr, cr) +#define db_get_job_record(jcr, mdb, jr) \ + mdb->bdb_get_job_record(jcr, jr) +#define db_get_job_volume_names(jcr, mdb, JobId, VolumeNames) \ + mdb->bdb_get_job_volume_names(jcr, JobId, VolumeNames) +#define db_get_file_attributes_record(jcr, mdb, fname, jr, fdbr) \ + mdb->bdb_get_file_attributes_record(jcr, fname, jr, fdbr) +#define db_get_fileset_record(jcr, mdb, fsr) \ + mdb->bdb_get_fileset_record(jcr, fsr) +#define db_get_media_record(jcr, mdb, mr) \ + mdb->bdb_get_media_record(jcr, mr) +#define db_get_num_media_records(jcr, mdb) \ + mdb->bdb_get_num_media_records(jcr) +#define db_get_num_pool_records(jcr, mdb) \ + mdb->bdb_get_num_pool_records(jcr) +#define db_get_pool_ids(jcr, mdb, num_ids, ids) \ + mdb->bdb_get_pool_ids(jcr, num_ids, ids) +#define db_get_client_ids(jcr, mdb, num_ids, ids) \ + mdb->bdb_get_client_ids(jcr, num_ids, ids) +#define db_get_media_ids(jcr, mdb, mr, num_ids, ids) \ + mdb->bdb_get_media_ids(jcr, mr, num_ids, ids) +#define db_get_job_volume_parameters(jcr, mdb, JobId, VolParams) \ + mdb->bdb_get_job_volume_parameters(jcr, JobId, VolParams) +#define db_get_counter_record(jcr, mdb, cr) \ + mdb->bdb_get_counter_record(jcr, cr) +#define db_get_query_dbids(jcr, mdb, query, ids) \ + mdb->bdb_get_query_dbids(jcr, query, ids) +#define db_get_file_list(jcr, mdb, jobids, opts, result_handler, ctx) \ + mdb->bdb_get_file_list(jcr, jobids, opts, result_handler, ctx) +#define db_get_base_jobid(jcr, mdb, jr, jobid) \ + mdb->bdb_get_base_jobid(jcr, jr, jobid) +#define db_get_accurate_jobids(jcr, mdb, jr, jobids) \ + mdb->bdb_get_accurate_jobids(jcr, jr, jobids) +#define db_get_used_base_jobids(jcr, mdb, jobids, result) \ + mdb->bdb_get_used_base_jobids(jcr, jobids, result) +#define db_get_restoreobject_record(jcr, mdb, rr) \ + mdb->bdb_get_restoreobject_record(jcr, rr) +#define db_get_type_index(mdb) \ + mdb->bdb_get_type_index() +#define db_get_engine_name(mdb) \ + mdb->bdb_get_engine_name() +#define db_get_snapshot_record(jcr, mdb, sr) \ + mdb->bdb_get_snapshot_record(jcr, sr) + +/* sql_list.c */ +#define db_list_pool_records(jcr, mdb, pr, sendit, ctx, type) \ + mdb->bdb_list_pool_records(jcr, pr, sendit, ctx, type) +#define db_list_job_records(jcr, mdb, jr, sendit, ctx, type) \ + mdb->bdb_list_job_records(jcr, jr, sendit, ctx, type) +#define db_list_job_totals(jcr, mdb, jr, sendit, ctx) \ + mdb->bdb_list_job_totals(jcr, jr, sendit, ctx) +#define db_list_files_for_job(jcr, mdb, jobid, deleted, sendit, ctx) \ + mdb->bdb_list_files_for_job(jcr, jobid, deleted, sendit, ctx) +#define db_list_media_records(jcr, mdb, mdbr, sendit, ctx, type) \ + mdb->bdb_list_media_records(jcr, mdbr, sendit, ctx, type) +#define db_list_jobmedia_records(jcr, mdb, JobId, sendit, ctx, type) \ + mdb->bdb_list_jobmedia_records(jcr, JobId, sendit, ctx, type) +#define db_list_joblog_records(jcr, mdb, JobId, sendit, ctx, type) \ + mdb->bdb_list_joblog_records(jcr, JobId, sendit, ctx, type) +#define db_list_sql_query(jcr, mdb, query, sendit, ctx, verbose, type) \ + mdb->bdb_list_sql_query(jcr, query, sendit, ctx, verbose, type) +#define db_list_client_records(jcr, mdb, sendit, ctx, type) \ + mdb->bdb_list_client_records(jcr, sendit, ctx, type) +#define db_list_copies_records(jcr, mdb, limit, jobids, sendit, ctx, type) \ + mdb->bdb_list_copies_records(jcr, limit, jobids, sendit, ctx, type) +#define db_list_base_files_for_job(jcr, mdb, jobid, sendit, ctx) \ + mdb->bdb_list_base_files_for_job(jcr, jobid, sendit, ctx) +#define db_list_restore_objects(jcr, mdb, rr, sendit, ctx, type) \ + mdb->bdb_list_restore_objects(jcr, rr, sendit, ctx, type) +#define db_list_snapshot_records(jcr, mdb, snapdbr, sendit, ua, llist) \ + mdb->bdb_list_snapshot_records(jcr, snapdbr, sendit, ua, llist) + + + +/* sql_update.c */ +#define db_update_job_start_record(jcr, mdb, jr) \ + mdb->bdb_update_job_start_record(jcr, jr) +#define db_update_job_end_record(jcr, mdb, jr) \ + mdb->bdb_update_job_end_record(jcr, jr) +#define db_update_client_record(jcr, mdb, cr) \ + mdb->bdb_update_client_record(jcr, cr) +#define db_update_pool_record(jcr, mdb, pr) \ + mdb->bdb_update_pool_record(jcr, pr) +#define db_update_storage_record(jcr, mdb, sr) \ + mdb->bdb_update_storage_record(jcr, sr) +#define db_update_media_record(jcr, mdb, mr) \ + mdb->bdb_update_media_record(jcr, mr) +#define db_update_media_defaults(jcr, mdb, mr) \ + mdb->bdb_update_media_defaults(jcr, mr) +#define db_update_counter_record(jcr, mdb, cr) \ + mdb->bdb_update_counter_record(jcr, cr) +#define db_add_digest_to_file_record(jcr, mdb, FileId, digest, type) \ + mdb->bdb_add_digest_to_file_record(jcr, FileId, digest, type) +#define db_mark_file_record(jcr, mdb, FileId, JobId) \ + mdb->bdb_mark_file_record(jcr, FileId, JobId) +#define db_make_inchanger_unique(jcr, mdb, mr) \ + mdb->bdb_make_inchanger_unique(jcr, mr) +#define db_update_stats(jcr, mdb, age) \ + mdb->bdb_update_stats(jcr, age) +#define db_update_snapshot_record(jcr, mdb, sr) \ + mdb->bdb_update_snapshot_record(jcr, sr) + + +#endif /* __SQL_PROTOS_H */ diff --git a/src/cats/sql.c b/src/cats/sql.c new file mode 100644 index 00000000..3b9598d9 --- /dev/null +++ b/src/cats/sql.c @@ -0,0 +1,987 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Catalog Database interface routines + * + * Almost generic set of SQL database interface routines + * (with a little more work) + * SQL engine specific routines are in mysql.c, postgresql.c, + * sqlite.c, ... + * + * Written by Kern Sibbald, March 2000 + * + * Note: at one point, this file was changed to class based by a certain + * programmer, and other than "wrapping" in a class, which is a trivial + * change for a C++ programmer, nothing substantial was done, yet all the + * code was recommitted under this programmer's name. Consequently, we + * undo those changes here. + */ + +#include "bacula.h" + +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL + +#include "cats.h" + +/* Forward referenced subroutines */ +void print_dashes(BDB *mdb); +void print_result(BDB *mdb); + +dbid_list::dbid_list() +{ + memset(this, 0, sizeof(dbid_list)); + max_ids = 1000; + DBId = (DBId_t *)malloc(max_ids * sizeof(DBId_t)); + num_ids = num_seen = tot_ids = 0; + PurgedFiles = NULL; +} + +dbid_list::~dbid_list() +{ + free(DBId); +} + +/* + * Called here to retrieve an string list from the database + */ +int db_string_list_handler(void *ctx, int num_fields, char **row) +{ + alist **val = (alist **)ctx; + + if (row[0]) { + (*val)->append(bstrdup(row[0])); + } + + return 0; +} + +/* + * Called here to retrieve an integer from the database + */ +int db_int_handler(void *ctx, int num_fields, char **row) +{ + uint32_t *val = (uint32_t *)ctx; + + Dmsg1(800, "int_handler starts with row pointing at %x\n", row); + + if (row[0]) { + Dmsg1(800, "int_handler finds '%s'\n", row[0]); + *val = str_to_int64(row[0]); + } else { + Dmsg0(800, "int_handler finds zero\n"); + *val = 0; + } + Dmsg0(800, "int_handler finishes\n"); + return 0; +} + +/* + * Called here to retrieve a 32/64 bit integer from the database. + * The returned integer will be extended to 64 bit. + */ +int db_int64_handler(void *ctx, int num_fields, char **row) +{ + db_int64_ctx *lctx = (db_int64_ctx *)ctx; + + if (row[0]) { + lctx->value = str_to_int64(row[0]); + lctx->count++; + } + return 0; +} + +/* + * Called here to retrieve a btime from the database. + * The returned integer will be extended to 64 bit. + */ +int db_strtime_handler(void *ctx, int num_fields, char **row) +{ + db_int64_ctx *lctx = (db_int64_ctx *)ctx; + + if (row[0]) { + lctx->value = str_to_utime(row[0]); + lctx->count++; + } + return 0; +} + +/* + * Use to build a comma separated list of values from a query. "10,20,30" + */ +int db_list_handler(void *ctx, int num_fields, char **row) +{ + db_list_ctx *lctx = (db_list_ctx *)ctx; + if (num_fields == 1 && row[0]) { + lctx->add(row[0]); + } + return 0; +} + +/* + * specific context passed from bdb_check_max_connections to + * db_max_connections_handler. + */ +struct max_connections_context { + BDB *db; + uint32_t nr_connections; +}; + +/* + * Called here to retrieve max_connections from db + */ +static int db_max_connections_handler(void *ctx, int num_fields, char **row) +{ + struct max_connections_context *context; + uint32_t index; + + context = (struct max_connections_context *)ctx; + switch (context->db->bdb_get_type_index()) { + case SQL_TYPE_MYSQL: + index = 1; + default: + index = 0; + } + + if (row[index]) { + context->nr_connections = str_to_int64(row[index]); + } else { + Dmsg0(800, "int_handler finds zero\n"); + context->nr_connections = 0; + } + return 0; +} + +BDB::BDB() +{ + init_acl(); + acl_join = get_pool_memory(PM_MESSAGE); + acl_where = get_pool_memory(PM_MESSAGE); +} + +BDB::~BDB() +{ + free_acl(); + free_pool_memory(acl_join); + free_pool_memory(acl_where); +} + +/* Get the WHERE section of a query that permits to respect + * the console ACLs. + * + * get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_CLIENT), true) + * -> WHERE Job.Name IN ('a', 'b', 'c') AND Client.Name IN ('d', 'e') + * + * get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_CLIENT), false) + * -> AND Job.Name IN ('a', 'b', 'c') AND Client.Name IN ('d', 'e') + */ +char *BDB::get_acls(int tables, bool where /* use WHERE or AND */) +{ + POOL_MEM tmp; + pm_strcpy(acl_where, ""); + + for (int i=0 ; i < DB_ACL_LAST; i++) { + if (tables & DB_ACL_BIT(i)) { + pm_strcat(acl_where, get_acl((DB_ACL_t)i, where)); + where = acl_where[0] == 0 && where; + } + } + return acl_where; +} + +/* Create the JOIN string that will help to filter queries results */ +char *BDB::get_acl_join_filter(int tables) +{ + POOL_MEM tmp; + pm_strcpy(acl_join, ""); + + if (tables & DB_ACL_BIT(DB_ACL_JOB)) { + Mmsg(tmp, " JOIN Job USING (JobId) "); + pm_strcat(acl_join, tmp); + } + if (tables & (DB_ACL_BIT(DB_ACL_CLIENT) | DB_ACL_BIT(DB_ACL_RCLIENT) | DB_ACL_BIT(DB_ACL_BCLIENT))) { + Mmsg(tmp, " JOIN Client USING (ClientId) "); + pm_strcat(acl_join, tmp); + } + if (tables & DB_ACL_BIT(DB_ACL_POOL)) { + Mmsg(tmp, " JOIN Pool USING (PoolId) "); + pm_strcat(acl_join, tmp); + } + if (tables & DB_ACL_BIT(DB_ACL_PATH)) { + Mmsg(tmp, " JOIN Path USING (PathId) "); + pm_strcat(acl_join, tmp); + } + if (tables & DB_ACL_BIT(DB_ACL_LOG)) { + Mmsg(tmp, " JOIN Log USING (JobId) "); + pm_strcat(acl_join, tmp); + } + if (tables & DB_ACL_BIT(DB_ACL_FILESET)) { + Mmsg(tmp, " LEFT JOIN FileSet USING (FileSetId) "); + pm_strcat(acl_join, tmp); + } + return acl_join; +} + +/* Intialize the ACL list */ +void BDB::init_acl() +{ + for(int i=0; i < DB_ACL_LAST; i++) { + acls[i] = NULL; + } +} + +/* Free ACL list */ +void BDB::free_acl() +{ + for(int i=0; i < DB_ACL_LAST; i++) { + free_and_null_pool_memory(acls[i]); + } +} + +/* Get ACL for a given type */ +const char *BDB::get_acl(DB_ACL_t type, bool where /* display WHERE or AND */) +{ + if (!acls[type]) { + return ""; + } + strcpy(acls[type], where?" WHERE ":" AND "); + acls[type][7] = ' ' ; /* replace \0 by ' ' */ + return acls[type]; +} + +/* Keep UAContext ACLs in our structure for further SQL queries */ +void BDB::set_acl(JCR *jcr, DB_ACL_t type, alist *list, alist *list2) +{ + /* If the list is present, but we authorize everything */ + if (list && list->size() == 1 && strcasecmp((char*)list->get(0), "*all*") == 0) { + return; + } + + /* If the list is present, but we authorize everything */ + if (list2 && list2->size() == 1 && strcasecmp((char*)list2->get(0), "*all*") == 0) { + return; + } + + POOLMEM *tmp = get_pool_memory(PM_FNAME); + POOLMEM *where = get_pool_memory(PM_FNAME); + + *where = 0; + *tmp = 0; + + /* For clients, we can have up to 2 lists */ + escape_acl_list(jcr, &tmp, list); + escape_acl_list(jcr, &tmp, list2); + + switch(type) { + case DB_ACL_JOB: + Mmsg(where, " AND Job.Name IN (%s) ", tmp); + break; + case DB_ACL_CLIENT: + Mmsg(where, " AND Client.Name IN (%s) ", tmp); + break; + case DB_ACL_BCLIENT: + Mmsg(where, " AND Client.Name IN (%s) ", tmp); + break; + case DB_ACL_RCLIENT: + Mmsg(where, " AND Client.Name IN (%s) ", tmp); + break; + case DB_ACL_FILESET: + Mmsg(where, " AND (FileSetId = 0 OR FileSet.FileSet IN (%s)) ", tmp); + break; + case DB_ACL_POOL: + Mmsg(where, " AND (PoolId = 0 OR Pool.Name IN (%s)) ", tmp); + break; + default: + break; + } + acls[type] = where; + free_pool_memory(tmp); +} + +/* Convert a ACL list to a SQL IN() list */ +char *BDB::escape_acl_list(JCR *jcr, POOLMEM **escaped_list, alist *lst) +{ + char *elt; + int len; + POOL_MEM tmp; + + if (!lst) { + return *escaped_list; /* TODO: check how we handle the empty list */ + + /* List is empty, reject everything */ + } else if (lst->size() == 0) { + Mmsg(escaped_list, "''"); + return *escaped_list; + } + + foreach_alist(elt, lst) { + if (elt && *elt) { + len = strlen(elt); + /* Escape + ' ' */ + tmp.check_size(2 * len + 2 + 2); + + pm_strcpy(tmp, "'"); + bdb_lock(); + bdb_escape_string(jcr, tmp.c_str() + 1 , elt, len); + bdb_unlock(); + pm_strcat(tmp, "'"); + + if (*escaped_list[0]) { + pm_strcat(escaped_list, ","); + } + + pm_strcat(escaped_list, tmp.c_str()); + } + } + return *escaped_list; +} + +/* + * Check catalog max_connections setting + */ +bool BDB::bdb_check_max_connections(JCR *jcr, uint32_t max_concurrent_jobs) +{ + struct max_connections_context context; + + /* Without Batch insert, no need to verify max_connections */ + if (!batch_insert_available()) + return true; + + context.db = this; + context.nr_connections = 0; + + /* Check max_connections setting */ + if (!bdb_sql_query(sql_get_max_connections[bdb_get_type_index()], + db_max_connections_handler, &context)) { + Jmsg(jcr, M_ERROR, 0, "Can't verify max_connections settings %s", errmsg); + return false; + } + if (context.nr_connections && max_concurrent_jobs && max_concurrent_jobs > context.nr_connections) { + Mmsg(errmsg, + _("Potential performance problem:\n" + "max_connections=%d set for %s database \"%s\" should be larger than Director's " + "MaxConcurrentJobs=%d\n"), + context.nr_connections, bdb_get_engine_name(), get_db_name(), max_concurrent_jobs); + Jmsg(jcr, M_WARNING, 0, "%s", errmsg); + return false; + } + + return true; +} + +/* NOTE!!! The following routines expect that the + * calling subroutine sets and clears the mutex + */ + +/* Check that the tables correspond to the version we want */ +bool BDB::bdb_check_version(JCR *jcr) +{ + uint32_t bacula_db_version = 0; + const char *query = "SELECT VersionId FROM Version"; + + bacula_db_version = 0; + if (!bdb_sql_query(query, db_int_handler, (void *)&bacula_db_version)) { + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + return false; + } + if (bacula_db_version != BDB_VERSION) { + Mmsg(errmsg, "Version error for database \"%s\". Wanted %d, got %d\n", + get_db_name(), BDB_VERSION, bacula_db_version); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + return false; + } + return true; +} + +/* + * Utility routine for queries. The database MUST be locked before calling here. + * Returns: 0 on failure + * 1 on success + */ +bool BDB::QueryDB(JCR *jcr, char *cmd, const char *file, int line) +{ + sql_free_result(); + if (!sql_query(cmd, QF_STORE_RESULT)) { + m_msg(file, line, &errmsg, _("query %s failed:\n%s\n"), cmd, sql_strerror()); + if (use_fatal_jmsg()) { + j_msg(file, line, jcr, M_FATAL, 0, "%s", errmsg); + } + if (verbose) { + j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); + } + return false; + } + + return true; +} + +/* + * Utility routine to do inserts + * Returns: 0 on failure + * 1 on success + */ +bool BDB::InsertDB(JCR *jcr, char *cmd, const char *file, int line) +{ + if (!sql_query(cmd)) { + m_msg(file, line, &errmsg, _("insert %s failed:\n%s\n"), cmd, sql_strerror()); + if (use_fatal_jmsg()) { + j_msg(file, line, jcr, M_FATAL, 0, "%s", errmsg); + } + if (verbose) { + j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); + } + return false; + } + int num_rows = sql_affected_rows(); + if (num_rows != 1) { + char ed1[30]; + m_msg(file, line, &errmsg, _("Insertion problem: affected_rows=%s\n"), + edit_uint64(num_rows, ed1)); + if (verbose) { + j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); + } + return false; + } + changes++; + return true; +} + +/* Utility routine for updates. + * Returns: false on failure + * true on success + * + * Some UPDATE queries must update record(s), other queries might not update + * anything. + */ +bool BDB::UpdateDB(JCR *jcr, char *cmd, bool can_be_empty, + const char *file, int line) +{ + if (!sql_query(cmd)) { + m_msg(file, line, &errmsg, _("update %s failed:\n%s\n"), cmd, sql_strerror()); + j_msg(file, line, jcr, M_ERROR, 0, "%s", errmsg); + if (verbose) { + j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); + } + return false; + } + int num_rows = sql_affected_rows(); + if ((num_rows == 0 && !can_be_empty) || num_rows < 0) { + char ed1[30]; + m_msg(file, line, &errmsg, _("Update failed: affected_rows=%s for %s\n"), + edit_uint64(num_rows, ed1), cmd); + if (verbose) { +// j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); + } + return false; + } + changes++; + return true; +} + +/* Utility routine for deletes + * + * Returns: -1 on error + * n number of rows affected + */ +int BDB::DeleteDB(JCR *jcr, char *cmd, const char *file, int line) +{ + + if (!sql_query(cmd)) { + m_msg(file, line, &errmsg, _("delete %s failed:\n%s\n"), cmd, sql_strerror()); + j_msg(file, line, jcr, M_ERROR, 0, "%s", errmsg); + if (verbose) { + j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); + } + return -1; + } + changes++; + return sql_affected_rows(); +} + + +/* + * Get record max. Query is already in mdb->cmd + * No locking done + * + * Returns: -1 on failure + * count on success + */ +int get_sql_record_max(JCR *jcr, BDB *mdb) +{ + SQL_ROW row; + int stat = 0; + + if (mdb->QueryDB(jcr, mdb->cmd)) { + if ((row = mdb->sql_fetch_row()) == NULL) { + Mmsg1(&mdb->errmsg, _("error fetching row: %s\n"), mdb->sql_strerror()); + stat = -1; + } else { + stat = str_to_int64(row[0]); + } + mdb->sql_free_result(); + } else { + Mmsg1(&mdb->errmsg, _("error fetching row: %s\n"), mdb->sql_strerror()); + stat = -1; + } + return stat; +} + +/* + * Given a full filename, split it into its path + * and filename parts. They are returned in pool memory + * in the mdb structure. + */ +void split_path_and_file(JCR *jcr, BDB *mdb, const char *afname) +{ + const char *p, *f; + + /* Find path without the filename. + * I.e. everything after the last / is a "filename". + * OK, maybe it is a directory name, but we treat it like + * a filename. If we don't find a / then the whole name + * must be a path name (e.g. c:). + */ + for (p=f=afname; *p; p++) { + if (IsPathSeparator(*p)) { + f = p; /* set pos of last slash */ + } + } + if (IsPathSeparator(*f)) { /* did we find a slash? */ + f++; /* yes, point to filename */ + } else { /* no, whole thing must be path name */ + f = p; + } + + /* If filename doesn't exist (i.e. root directory), we + * simply create a blank name consisting of a single + * space. This makes handling zero length filenames + * easier. + */ + mdb->fnl = p - f; + if (mdb->fnl > 0) { + mdb->fname = check_pool_memory_size(mdb->fname, mdb->fnl+1); + memcpy(mdb->fname, f, mdb->fnl); /* copy filename */ + mdb->fname[mdb->fnl] = 0; + } else { + mdb->fname[0] = 0; + mdb->fnl = 0; + } + + mdb->pnl = f - afname; + if (mdb->pnl > 0) { + mdb->path = check_pool_memory_size(mdb->path, mdb->pnl+1); + memcpy(mdb->path, afname, mdb->pnl); + mdb->path[mdb->pnl] = 0; + } else { + Mmsg1(&mdb->errmsg, _("Path length is zero. File=%s\n"), afname); + Jmsg(jcr, M_FATAL, 0, "%s", mdb->errmsg); + mdb->path[0] = 0; + mdb->pnl = 0; + } + + Dmsg3(500, "split fname=%s: path=%s file=%s\n", afname, mdb->path, mdb->fname); +} + +/* + * Set maximum field length to something reasonable + */ +static int max_length(int max_length) +{ + int max_len = max_length; + /* Sanity check */ + if (max_len < 0) { + max_len = 2; + } else if (max_len > 100) { + max_len = 100; + } + return max_len; +} + +/* + * List dashes as part of header for listing SQL results in a table + */ +void +list_dashes(BDB *mdb, DB_LIST_HANDLER *send, void *ctx) +{ + SQL_FIELD *field; + int i, j; + int len; + + mdb->sql_field_seek(0); + send(ctx, "+"); + for (i = 0; i < mdb->sql_num_fields(); i++) { + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + len = max_length(field->max_length + 2); + for (j = 0; j < len; j++) { + send(ctx, "-"); + } + send(ctx, "+"); + } + send(ctx, "\n"); +} + +/* Small handler to print the last line of a list xxx command */ +static void last_line_handler(void *vctx, const char *str) +{ + LIST_CTX *ctx = (LIST_CTX *)vctx; + bstrncat(ctx->line, str, sizeof(ctx->line)); +} + +int list_result(void *vctx, int nb_col, char **row) +{ + SQL_FIELD *field; + int i, col_len, max_len = 0; + char buf[2000], ewc[30]; + + LIST_CTX *pctx = (LIST_CTX *)vctx; + DB_LIST_HANDLER *send = pctx->send; + e_list_type type = pctx->type; + BDB *mdb = pctx->mdb; + void *ctx = pctx->ctx; + JCR *jcr = pctx->jcr; + + if (!pctx->once) { + pctx->once = true; + + Dmsg1(800, "list_result starts looking at %d fields\n", mdb->sql_num_fields()); + /* determine column display widths */ + mdb->sql_field_seek(0); + for (i = 0; i < mdb->sql_num_fields(); i++) { + Dmsg1(800, "list_result processing field %d\n", i); + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + col_len = cstrlen(field->name); + if (type == VERT_LIST) { + if (col_len > max_len) { + max_len = col_len; + } + } else { + if (mdb->sql_field_is_numeric(field->type) && (int)field->max_length > 0) { /* fixup for commas */ + field->max_length += (field->max_length - 1) / 3; + } + if (col_len < (int)field->max_length) { + col_len = field->max_length; + } + if (col_len < 4 && !mdb->sql_field_is_not_null(field->flags)) { + col_len = 4; /* 4 = length of the word "NULL" */ + } + field->max_length = col_len; /* reset column info */ + } + } + + pctx->num_rows++; + + Dmsg0(800, "list_result finished first loop\n"); + if (type == VERT_LIST) { + goto vertical_list; + } + if (type == ARG_LIST) { + goto arg_list; + } + + Dmsg1(800, "list_result starts second loop looking at %d fields\n", + mdb->sql_num_fields()); + + /* Keep the result to display the same line at the end of the table */ + list_dashes(mdb, last_line_handler, pctx); + send(ctx, pctx->line); + + send(ctx, "|"); + mdb->sql_field_seek(0); + for (i = 0; i < mdb->sql_num_fields(); i++) { + Dmsg1(800, "list_result looking at field %d\n", i); + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + max_len = max_length(field->max_length); + bsnprintf(buf, sizeof(buf), " %-*s |", max_len, field->name); + send(ctx, buf); + } + send(ctx, "\n"); + list_dashes(mdb, send, ctx); + } + Dmsg1(800, "list_result starts third loop looking at %d fields\n", + mdb->sql_num_fields()); + mdb->sql_field_seek(0); + send(ctx, "|"); + for (i = 0; i < mdb->sql_num_fields(); i++) { + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + max_len = max_length(field->max_length); + if (row[i] == NULL) { + bsnprintf(buf, sizeof(buf), " %-*s |", max_len, "NULL"); + } else if (mdb->sql_field_is_numeric(field->type) && !jcr->gui && is_an_integer(row[i])) { + bsnprintf(buf, sizeof(buf), " %*s |", max_len, + add_commas(row[i], ewc)); + } else { + bsnprintf(buf, sizeof(buf), " %-*s |", max_len, row[i]); + } + send(ctx, buf); + } + send(ctx, "\n"); + return 0; + +vertical_list: + + Dmsg1(800, "list_result starts vertical list at %d fields\n", mdb->sql_num_fields()); + mdb->sql_field_seek(0); + for (i = 0; i < mdb->sql_num_fields(); i++) { + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + if (row[i] == NULL) { + bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, "NULL"); + } else if (mdb->sql_field_is_numeric(field->type) && !jcr->gui && is_an_integer(row[i])) { + bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, + add_commas(row[i], ewc)); + } else { + bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, row[i]); + } + send(ctx, buf); + } + send(ctx, "\n"); + return 0; + +arg_list: + Dmsg1(800, "list_result starts simple list at %d fields\n", mdb->sql_num_fields()); + mdb->sql_field_seek(0); + for (i = 0; i < mdb->sql_num_fields(); i++) { + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + if (row[i] == NULL) { + bsnprintf(buf, sizeof(buf), "%s%s=", (i>0?" ":""), field->name); + } else { + bash_spaces(row[i]); + bsnprintf(buf, sizeof(buf), "%s%s=%s ", (i>0?" ":""), field->name, row[i]); + } + send(ctx, buf); + } + send(ctx, "\n"); + return 0; + +} + +/* + * If full_list is set, we list vertically, otherwise, we + * list on one line horizontally. + * Return number of rows + */ +int +list_result(JCR *jcr, BDB *mdb, DB_LIST_HANDLER *send, void *ctx, e_list_type type) +{ + SQL_FIELD *field; + SQL_ROW row; + int i, col_len, max_len = 0; + char buf[2000], ewc[30]; + + Dmsg0(800, "list_result starts\n"); + if (mdb->sql_num_rows() == 0) { + send(ctx, _("No results to list.\n")); + return mdb->sql_num_rows(); + } + + Dmsg1(800, "list_result starts looking at %d fields\n", mdb->sql_num_fields()); + /* determine column display widths */ + mdb->sql_field_seek(0); + for (i = 0; i < mdb->sql_num_fields(); i++) { + Dmsg1(800, "list_result processing field %d\n", i); + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + col_len = cstrlen(field->name); + if (type == VERT_LIST) { + if (col_len > max_len) { + max_len = col_len; + } + } else { + if (mdb->sql_field_is_numeric(field->type) && (int)field->max_length > 0) { /* fixup for commas */ + field->max_length += (field->max_length - 1) / 3; + } + if (col_len < (int)field->max_length) { + col_len = field->max_length; + } + if (col_len < 4 && !mdb->sql_field_is_not_null(field->flags)) { + col_len = 4; /* 4 = length of the word "NULL" */ + } + field->max_length = col_len; /* reset column info */ + } + } + + Dmsg0(800, "list_result finished first loop\n"); + if (type == VERT_LIST) { + goto vertical_list; + } + if (type == ARG_LIST) { + goto arg_list; + } + + Dmsg1(800, "list_result starts second loop looking at %d fields\n", mdb->sql_num_fields()); + list_dashes(mdb, send, ctx); + send(ctx, "|"); + mdb->sql_field_seek(0); + for (i = 0; i < mdb->sql_num_fields(); i++) { + Dmsg1(800, "list_result looking at field %d\n", i); + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + max_len = max_length(field->max_length); + bsnprintf(buf, sizeof(buf), " %-*s |", max_len, field->name); + send(ctx, buf); + } + send(ctx, "\n"); + list_dashes(mdb, send, ctx); + + Dmsg1(800, "list_result starts third loop looking at %d fields\n", mdb->sql_num_fields()); + while ((row = mdb->sql_fetch_row()) != NULL) { + mdb->sql_field_seek(0); + send(ctx, "|"); + for (i = 0; i < mdb->sql_num_fields(); i++) { + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + max_len = max_length(field->max_length); + if (row[i] == NULL) { + bsnprintf(buf, sizeof(buf), " %-*s |", max_len, "NULL"); + } else if (mdb->sql_field_is_numeric(field->type) && !jcr->gui && is_an_integer(row[i])) { + bsnprintf(buf, sizeof(buf), " %*s |", max_len, + add_commas(row[i], ewc)); + } else { + strip_trailing_junk(row[i]); + bsnprintf(buf, sizeof(buf), " %-*s |", max_len, row[i]); + } + send(ctx, buf); + } + send(ctx, "\n"); + } + list_dashes(mdb, send, ctx); + return mdb->sql_num_rows(); + +vertical_list: + + Dmsg1(800, "list_result starts vertical list at %d fields\n", mdb->sql_num_fields()); + while ((row = mdb->sql_fetch_row()) != NULL) { + mdb->sql_field_seek(0); + for (i = 0; i < mdb->sql_num_fields(); i++) { + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + if (row[i] == NULL) { + bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, "NULL"); + } else if (mdb->sql_field_is_numeric(field->type) && !jcr->gui && is_an_integer(row[i])) { + bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, + add_commas(row[i], ewc)); + } else { + strip_trailing_junk(row[i]); + bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, row[i]); + } + send(ctx, buf); + } + send(ctx, "\n"); + } + +arg_list: + + Dmsg1(800, "list_result starts arg list at %d fields\n", mdb->sql_num_fields()); + while ((row = mdb->sql_fetch_row()) != NULL) { + mdb->sql_field_seek(0); + for (i = 0; i < mdb->sql_num_fields(); i++) { + field = mdb->sql_fetch_field(); + if (!field) { + break; + } + if (row[i] == NULL) { + bsnprintf(buf, sizeof(buf), "%s%s=", (i>0?" ":""), field->name); + } else { + bash_spaces(row[i]); + bsnprintf(buf, sizeof(buf), "%s%s=%s", (i>0?" ":""), field->name, row[i]); + } + send(ctx, buf); + } + send(ctx, "\n"); + } + return mdb->sql_num_rows(); +} + +/* + * Open a new connexion to mdb catalog. This function is used + * by batch and accurate mode. + */ +bool BDB::bdb_open_batch_connexion(JCR *jcr) +{ + bool multi_db; + + multi_db = batch_insert_available(); + + if (!jcr->db_batch) { + jcr->db_batch = bdb_clone_database_connection(jcr, multi_db); + if (!jcr->db_batch) { + Mmsg0(&errmsg, _("Could not init database batch connection\n")); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + return false; + } + + if (!jcr->db_batch->bdb_open_database(jcr)) { + Mmsg2(&errmsg, _("Could not open database \"%s\": ERR=%s\n"), + jcr->db_batch->get_db_name(), jcr->db_batch->bdb_strerror()); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + return false; + } + } + return true; +} + +/* + * !!! WARNING !!! Use this function only when bacula is stopped. + * ie, after a fatal signal and before exiting the program + * Print information about a BDB object. + */ +void bdb_debug_print(JCR *jcr, FILE *fp) +{ + BDB *mdb = jcr->db; + + if (!mdb) { + return; + } + + fprintf(fp, "BDB=%p db_name=%s db_user=%s connected=%s\n", + mdb, NPRTB(mdb->get_db_name()), NPRTB(mdb->get_db_user()), mdb->is_connected() ? "true" : "false"); + fprintf(fp, "\tcmd=\"%s\" changes=%i\n", NPRTB(mdb->cmd), mdb->changes); + mdb->print_lock_info(fp); +} + +bool BDB::bdb_check_settings(JCR *jcr, int64_t *starttime, int val, int64_t val2) +{ + return true; +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ diff --git a/src/cats/sql_cmds.c b/src/cats/sql_cmds.c new file mode 100644 index 00000000..4515a6d2 --- /dev/null +++ b/src/cats/sql_cmds.c @@ -0,0 +1,1053 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * This file contains all the SQL commands that are either issued by + * the Director or which are database backend specific. + * + * Written by Kern Sibbald, July MMII + */ +/* + * Note, PostgreSQL imposes some constraints on using DISTINCT and GROUP BY + * for example, the following is illegal in PostgreSQL: + * SELECT DISTINCT JobId FROM temp ORDER BY StartTime ASC; + * because all the ORDER BY expressions must appear in the SELECT list! + */ + +#include "bacula.h" + +const char *get_restore_objects = + "SELECT JobId,ObjectLength,ObjectFullLength,ObjectIndex," + "ObjectType,ObjectCompression,FileIndex,ObjectName," + "RestoreObject,PluginName " + "FROM RestoreObject " + "WHERE JobId IN (%s) " + "AND ObjectType = %d " + "ORDER BY ObjectIndex ASC"; + +const char *cleanup_created_job = + "UPDATE Job SET JobStatus='f', StartTime=SchedTime, EndTime=SchedTime " + "WHERE JobStatus = 'C'"; +const char *cleanup_running_job = + "UPDATE Job SET JobStatus='f', EndTime=StartTime WHERE JobStatus = 'R'"; + +/* For sql_update.c db_update_stats */ +const char *fill_jobhisto = + "INSERT INTO JobHisto (JobId, Job, Name, Type, Level," + "ClientId, JobStatus," + "SchedTime, StartTime, EndTime, RealEndTime, JobTDate," + "VolSessionId, VolSessionTime, JobFiles, JobBytes, ReadBytes," + "JobErrors, JobMissingFiles, PoolId, FileSetId, PriorJobId," + "PurgedFiles, HasBase, Reviewed, Comment)" + "SELECT JobId, Job, Name, Type, Level, ClientId, JobStatus," + "SchedTime, StartTime, EndTime, RealEndTime, JobTDate," + "VolSessionId, VolSessionTime, JobFiles, JobBytes, ReadBytes," + "JobErrors, JobMissingFiles, PoolId, FileSetId, PriorJobId," + "PurgedFiles, HasBase, Reviewed, Comment " + "FROM Job " + "WHERE JobStatus IN ('T','W','f','A','E')" + "AND NOT EXISTS " + "(SELECT JobHisto.JobId " + "FROM JobHisto WHERE JobHisto.Jobid=Job.JobId)" + "AND JobTDate < %s "; + +/* For ua_update.c */ +const char *list_pool = "SELECT * FROM Pool WHERE PoolId=%s"; + +/* For ua_dotcmds.c */ +const char *client_backups = + "SELECT DISTINCT Job.JobId,Client.Name as Client,Level,StartTime," + "JobFiles,JobBytes,VolumeName,MediaType,FileSet,Media.Enabled as Enabled" + " FROM Client,Job,JobMedia,Media,FileSet" + " WHERE Client.Name='%s'" + " AND FileSet='%s'" + " AND Client.ClientId=Job.ClientId " + " AND JobStatus IN ('T','W') AND Type='B' " + " AND JobMedia.JobId=Job.JobId AND JobMedia.MediaId=Media.MediaId " + " AND Job.FileSetId=FileSet.FileSetId" + " ORDER BY Job.StartTime"; + +/* ====== ua_prune.c */ + +const char *sel_JobMedia = + "SELECT DISTINCT JobMedia.JobId FROM JobMedia,Job" + " WHERE MediaId=%s AND Job.JobId=JobMedia.JobId " + " AND Job.JobTDate<%s AND Job.JobStatus NOT IN ('R', 'C') "; + +/* Delete temp tables and indexes */ +const char *drop_deltabs[] = { + "DROP TABLE DelCandidates", + NULL}; + +const char *create_delindex = "CREATE INDEX DelInx1 ON DelCandidates (JobId)"; + +/* ======= ua_restore.c */ +const char *uar_count_files = + "SELECT JobFiles FROM Job WHERE JobId=%s"; + +/* List last 20 Jobs */ +const char *uar_list_jobs = + "SELECT JobId,Client.Name as Client,StartTime,Level as " + "JobLevel,JobFiles,JobBytes " + "FROM Client,Job WHERE Client.ClientId=Job.ClientId AND JobStatus IN ('T','W') " + "AND Type='B' ORDER BY StartTime DESC LIMIT 20"; + +const char *uar_print_jobs = + "SELECT DISTINCT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName" + " FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) " + " WHERE JobId IN (%s) " + " ORDER BY StartTime ASC"; + +/* + * Find all files for a particular JobId and insert them into + * the tree during a restore. + */ +const char *uar_sel_files = + "SELECT Path.Path,Filename.Name,FileIndex,JobId,LStat " + "FROM File,Filename,Path " + "WHERE File.JobId IN (%s) AND Filename.FilenameId=File.FilenameId " + "AND Path.PathId=File.PathId"; + +const char *uar_del_temp = "DROP TABLE temp"; +const char *uar_del_temp1 = "DROP TABLE temp1"; + +const char *uar_last_full = + "INSERT INTO temp1 SELECT Job.JobId,JobTdate " + "FROM Client,Job,JobMedia,Media,FileSet WHERE Client.ClientId=%s " + "AND Job.ClientId=%s " + "AND Job.StartTime < '%s' " + "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' " + "AND JobMedia.JobId=Job.JobId " + "AND Media.Enabled=1 " + "AND JobMedia.MediaId=Media.MediaId " + "AND Job.FileSetId=FileSet.FileSetId " + "AND FileSet.FileSet='%s' " + "%s" + "ORDER BY Job.JobTDate DESC LIMIT 1"; + +const char *uar_full = + "INSERT INTO temp SELECT Job.JobId,Job.JobTDate," + "Job.ClientId,Job.Level,Job.JobFiles,Job.JobBytes," + "StartTime,VolumeName,JobMedia.StartFile,VolSessionId,VolSessionTime " + "FROM temp1,Job,JobMedia,Media WHERE temp1.JobId=Job.JobId " + "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' " + "AND Media.Enabled=1 " + "AND JobMedia.JobId=Job.JobId " + "AND JobMedia.MediaId=Media.MediaId"; + +const char *uar_dif = + "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,Job.ClientId," + "Job.Level,Job.JobFiles,Job.JobBytes," + "Job.StartTime,Media.VolumeName,JobMedia.StartFile," + "Job.VolSessionId,Job.VolSessionTime " + "FROM Job,JobMedia,Media,FileSet " + "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' " + "AND Job.ClientId=%s " + "AND JobMedia.JobId=Job.JobId " + "AND Media.Enabled=1 " + "AND JobMedia.MediaId=Media.MediaId " + "AND Job.Level='D' AND JobStatus IN ('T','W') AND Type='B' " + "AND Job.FileSetId=FileSet.FileSetId " + "AND FileSet.FileSet='%s' " + "%s" + "ORDER BY Job.JobTDate DESC LIMIT 1"; + +const char *uar_inc = + "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,Job.ClientId," + "Job.Level,Job.JobFiles,Job.JobBytes," + "Job.StartTime,Media.VolumeName,JobMedia.StartFile," + "Job.VolSessionId,Job.VolSessionTime " + "FROM Job,JobMedia,Media,FileSet " + "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' " + "AND Job.ClientId=%s " + "AND Media.Enabled=1 " + "AND JobMedia.JobId=Job.JobId " + "AND JobMedia.MediaId=Media.MediaId " + "AND Job.Level='I' AND JobStatus IN ('T','W') AND Type='B' " + "AND Job.FileSetId=FileSet.FileSetId " + "AND FileSet.FileSet='%s' " + "%s"; + +const char *uar_list_temp = + "SELECT DISTINCT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName" + " FROM temp" + " ORDER BY StartTime ASC"; + + +const char *uar_sel_jobid_temp = + "SELECT DISTINCT JobId,StartTime FROM temp ORDER BY StartTime ASC"; + +const char *uar_sel_all_temp1 = "SELECT * FROM temp1"; + +const char *uar_sel_all_temp = "SELECT * FROM temp"; + + + +/* Select FileSet names for this Client */ +const char *uar_sel_fileset = + "SELECT DISTINCT FileSet.FileSet FROM Job," + "Client,FileSet WHERE Job.FileSetId=FileSet.FileSetId " + "AND Job.ClientId=%s AND Client.ClientId=%s " + "ORDER BY FileSet.FileSet"; + +/* Select all different FileSet for this client + * This query doesn't guarantee that the Id is the latest + * version of the FileSet. Can be used with other queries that + * use Ids to select the FileSet name. (like in accurate) + */ +const char *uar_sel_filesetid = + "SELECT MAX(FileSet.FileSetId) " + "FROM FileSet JOIN Job USING (FileSetId) " + "WHERE Job.ClientId=%s " + "GROUP BY FileSet"; + +/* + * Find JobId, FileIndex for a given path/file and date + * for use when inserting individual files into the tree. + */ +const char *uar_jobid_fileindex = + "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client " + "WHERE Job.JobId=File.JobId " + "AND Job.StartTime<='%s' " + "AND Path.Path='%s' " + "AND Filename.Name='%s' " + "AND Client.Name='%s' " + "AND Job.ClientId=Client.ClientId " + "AND Path.PathId=File.PathId " + "AND Filename.FilenameId=File.FilenameId " + "AND JobStatus IN ('T','W') AND Type='B' " + "ORDER BY Job.StartTime DESC LIMIT 1"; + +const char *uar_jobids_fileindex = + "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client " + "WHERE Job.JobId IN (%s) " + "AND Job.JobId=File.JobId " + "AND Job.StartTime<='%s' " + "AND Path.Path='%s' " + "AND Filename.Name='%s' " + "AND Client.Name='%s' " + "AND Job.ClientId=Client.ClientId " + "AND Path.PathId=File.PathId " + "AND Filename.FilenameId=File.FilenameId " + "ORDER BY Job.StartTime DESC LIMIT 1"; + +/* Query to get list of files from table -- presuably built by an external program */ +const char *uar_jobid_fileindex_from_table = + "SELECT JobId, FileIndex FROM %s ORDER BY JobId, FileIndex ASC"; + +/* Get the list of the last recent version per Delta with a given + * jobid list. This is a tricky part because with SQL the result of: + * + * SELECT MAX(A), B, C, D FROM... GROUP BY (B,C) + * + * doesn't give the good result (for D). + * + * With PostgreSQL, we can use DISTINCT ON(), but with Mysql or Sqlite, + * we need an extra join using JobTDate. + */ +static const char *select_recent_version_with_basejob_default = +"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, " + "File.FilenameId AS FilenameId, LStat, MD5, DeltaSeq, " + "Job.JobTDate AS JobTDate " +"FROM Job, File, ( " + "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId " + "FROM ( " + "SELECT JobTDate, PathId, FilenameId " /* Get all normal files */ + "FROM File JOIN Job USING (JobId) " /* from selected backup */ + "WHERE File.JobId IN (%s) " + "UNION ALL " + "SELECT JobTDate, PathId, FilenameId " /* Get all files from */ + "FROM BaseFiles " /* BaseJob */ + "JOIN File USING (FileId) " + "JOIN Job ON (BaseJobId = Job.JobId) " + "WHERE BaseFiles.JobId IN (%s) " /* Use Max(JobTDate) to find */ + ") AS tmp " + "GROUP BY PathId, FilenameId " /* the latest file version */ + ") AS T1 " +"WHERE (Job.JobId IN ( " /* Security, we force JobId to be valid */ + "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) " + "OR Job.JobId IN (%s)) " + "AND T1.JobTDate = Job.JobTDate " /* Join on JobTDate to get the orginal */ + "AND Job.JobId = File.JobId " /* Job/File record */ + "AND T1.PathId = File.PathId " + "AND T1.FilenameId = File.FilenameId"; + +const char *select_recent_version_with_basejob[] = +{ + /* MySQL */ + select_recent_version_with_basejob_default, + + /* Postgresql */ /* The DISTINCT ON () permits to avoid extra join */ + "SELECT DISTINCT ON (FilenameId, PathId) JobTDate, JobId, FileId, " + "FileIndex, PathId, FilenameId, LStat, MD5, DeltaSeq " + "FROM " + "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5, DeltaSeq " + "FROM File WHERE JobId IN (%s) " + "UNION ALL " + "SELECT File.FileId, File.JobId, PathId, FilenameId, " + "File.FileIndex, LStat, MD5, DeltaSeq " + "FROM BaseFiles JOIN File USING (FileId) " + "WHERE BaseFiles.JobId IN (%s) " + ") AS T JOIN Job USING (JobId) " + "ORDER BY FilenameId, PathId, JobTDate DESC ", + + /* SQLite */ + select_recent_version_with_basejob_default +}; + +/* We do the same thing than the previous query, but we include + * all delta parts. If the file has been deleted, we can have irrelevant + * parts. + * + * The code that uses results should control the delta sequence with + * the following rules: + * First Delta = 0 + * Delta = Previous Delta + 1 + * + * If we detect a gap, we can discard further pieces + * If a file starts at 1 instead of 0, the file has been deleted, and further + * pieces are useless. + * This control should be reset for each new file + */ +static const char *select_recent_version_with_basejob_and_delta_default = +"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, " + "File.FilenameId AS FilenameId, LStat, MD5, File.DeltaSeq AS DeltaSeq, " + "Job.JobTDate AS JobTDate " +"FROM Job, File, ( " + "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId, DeltaSeq " + "FROM ( " + "SELECT JobTDate, PathId, FilenameId, DeltaSeq " /*Get all normal files*/ + "FROM File JOIN Job USING (JobId) " /* from selected backup */ + "WHERE File.JobId IN (%s) " + "UNION ALL " + "SELECT JobTDate, PathId, FilenameId, DeltaSeq " /*Get all files from */ + "FROM BaseFiles " /* BaseJob */ + "JOIN File USING (FileId) " + "JOIN Job ON (BaseJobId = Job.JobId) " + "WHERE BaseFiles.JobId IN (%s) " /* Use Max(JobTDate) to find */ + ") AS tmp " + "GROUP BY PathId, FilenameId, DeltaSeq " /* the latest file version */ + ") AS T1 " +"WHERE (Job.JobId IN ( " /* Security, we force JobId to be valid */ + "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) " + "OR Job.JobId IN (%s)) " + "AND T1.JobTDate = Job.JobTDate " /* Join on JobTDate to get the orginal */ + "AND Job.JobId = File.JobId " /* Job/File record */ + "AND T1.PathId = File.PathId " + "AND T1.FilenameId = File.FilenameId"; + +const char *select_recent_version_with_basejob_and_delta[] = { + /* MySQL */ + select_recent_version_with_basejob_and_delta_default, + + /* Postgresql */ /* The DISTINCT ON () permits to avoid extra join */ + "SELECT DISTINCT ON (FilenameId, PathId, DeltaSeq) JobTDate, JobId, FileId, " + "FileIndex, PathId, FilenameId, LStat, MD5, DeltaSeq " + "FROM " + "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5,DeltaSeq " + "FROM File WHERE JobId IN (%s) " + "UNION ALL " + "SELECT File.FileId, File.JobId, PathId, FilenameId, " + "File.FileIndex, LStat, MD5, DeltaSeq " + "FROM BaseFiles JOIN File USING (FileId) " + "WHERE BaseFiles.JobId IN (%s) " + ") AS T JOIN Job USING (JobId) " + "ORDER BY FilenameId, PathId, DeltaSeq, JobTDate DESC ", + + /* SQLite */ + select_recent_version_with_basejob_and_delta_default +}; + +/* Get the list of the last recent version with a given BaseJob jobid list + * We don't handle Delta with BaseJobs, they have only Full files + */ +static const char *select_recent_version_default = + "SELECT j1.JobId AS JobId, f1.FileId AS FileId, f1.FileIndex AS FileIndex, " + "f1.PathId AS PathId, f1.FilenameId AS FilenameId, " + "f1.LStat AS LStat, f1.MD5 AS MD5, j1.JobTDate " + "FROM ( " /* Choose the last version for each Path/Filename */ + "SELECT max(JobTDate) AS JobTDate, PathId, FilenameId " + "FROM File JOIN Job USING (JobId) " + "WHERE File.JobId IN (%s) " + "GROUP BY PathId, FilenameId " + ") AS t1, Job AS j1, File AS f1 " + "WHERE t1.JobTDate = j1.JobTDate " + "AND j1.JobId IN (%s) " + "AND t1.FilenameId = f1.FilenameId " + "AND t1.PathId = f1.PathId " + "AND j1.JobId = f1.JobId"; + +const char *select_recent_version[] = +{ + /* MySQL */ + select_recent_version_default, + + /* Postgresql */ + "SELECT DISTINCT ON (FilenameId, PathId) JobTDate, JobId, FileId, " + "FileIndex, PathId, FilenameId, LStat, MD5 " + "FROM File JOIN Job USING (JobId) " + "WHERE JobId IN (%s) " + "ORDER BY FilenameId, PathId, JobTDate DESC ", + + /* SQLite */ + select_recent_version_default +}; + +/* We don't create this table as TEMPORARY because MySQL + MyISAM 5.0 and 5.1 are unable to run further queries in this mode + */ +static const char *create_temp_accurate_jobids_default = + "CREATE TABLE btemp3%s AS " + "SELECT JobId, StartTime, EndTime, JobTDate, PurgedFiles " + "FROM Job JOIN FileSet USING (FileSetId) " + "WHERE ClientId = %s " + "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' " + "AND StartTime<'%s' " + "AND FileSet.FileSet=(SELECT FileSet FROM FileSet WHERE FileSetId = %s) " + " %s " /* Any filter */ + "ORDER BY Job.JobTDate DESC LIMIT 1"; + +const char *create_temp_accurate_jobids[] = { + /* Mysql */ + create_temp_accurate_jobids_default, + + /* Postgresql */ + create_temp_accurate_jobids_default, + + /* SQLite3 */ + create_temp_accurate_jobids_default +}; + +const char *create_temp_basefile[] = { + /* Mysql */ + "CREATE TEMPORARY TABLE basefile%lld (" + "Path BLOB NOT NULL," + "Name BLOB NOT NULL," + "INDEX (Path(255), Name(255)))", + + /* Postgresql */ + "CREATE TEMPORARY TABLE basefile%lld (" + "Path TEXT," + "Name TEXT)", + + /* SQLite3 */ + "CREATE TEMPORARY TABLE basefile%lld (" + "Path TEXT," + "Name TEXT)" +}; + +const char *create_temp_new_basefile[] = { + /* Mysql */ + "CREATE TEMPORARY TABLE new_basefile%lld AS " + "SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex," + "Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId, " + "Temp.MD5 AS MD5 " + "FROM ( %s ) AS Temp " + "JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) " + "JOIN Path ON (Path.PathId = Temp.PathId) " + "WHERE Temp.FileIndex > 0", + + /* Postgresql */ + "CREATE TEMPORARY TABLE new_basefile%lld AS " + "SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex," + "Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId, " + "Temp.MD5 AS MD5 " + "FROM ( %s ) AS Temp " + "JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) " + "JOIN Path ON (Path.PathId = Temp.PathId) " + "WHERE Temp.FileIndex > 0", + + /* SQLite3 */ + "CREATE TEMPORARY TABLE new_basefile%lld AS " + "SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex," + "Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId, " + "Temp.MD5 AS MD5 " + "FROM ( %s ) AS Temp " + "JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) " + "JOIN Path ON (Path.PathId = Temp.PathId) " + "WHERE Temp.FileIndex > 0" +}; + +/* ====== ua_prune.c */ + +/* List of SQL commands to create temp table and indicies */ +const char *create_deltabs[] = +{ + /* MySQL */ + "CREATE TEMPORARY TABLE DelCandidates (" + "JobId INTEGER UNSIGNED NOT NULL, " + "PurgedFiles TINYINT, " + "FileSetId INTEGER UNSIGNED, " + "JobFiles INTEGER UNSIGNED, " + "JobStatus BINARY(1))", + + /* PostgreSQL */ + "CREATE TEMPORARY TABLE DelCandidates ( " + "JobId INTEGER NOT NULL, " + "PurgedFiles SMALLINT, " + "FileSetId INTEGER, " + "JobFiles INTEGER, " + "JobStatus char(1))", + + /* SQLite */ + "CREATE TEMPORARY TABLE DelCandidates (" + "JobId INTEGER UNSIGNED NOT NULL, " + "PurgedFiles TINYINT, " + "FileSetId INTEGER UNSIGNED, " + "JobFiles INTEGER UNSIGNED, " + "JobStatus CHAR)" +}; + +/* ======= ua_purge.c ====== */ +/* Select the first available Copy Job that must be upgraded + * to a Backup job when the original backup job is expired. + */ +static const char *uap_upgrade_copies_oldest_job_default = +"CREATE TEMPORARY TABLE cpy_tmp AS " + "SELECT MIN(JobId) AS JobId FROM Job " /* Choose the oldest job */ + "WHERE Type='%c' " /* JT_JOB_COPY */ + "AND ( PriorJobId IN (%s) " /* JobId selection */ + "OR " + " PriorJobId IN ( " + "SELECT PriorJobId " + "FROM Job " + "WHERE JobId IN (%s) " /* JobId selection */ + " AND Type='B' " + ") " + ") " + "GROUP BY PriorJobId "; /* one result per copy */ + +const char *uap_upgrade_copies_oldest_job[] = +{ + /* MySQL */ + uap_upgrade_copies_oldest_job_default, + /* PostgreSQL */ + uap_upgrade_copies_oldest_job_default, + /* SQLite */ + uap_upgrade_copies_oldest_job_default +}; + +/* ======= ua_restore.c ====== */ + +/* List Jobs where a particular file is saved */ +const char *uar_file[] = +{ + /* MySQL */ + "SELECT Job.JobId as JobId," + "CONCAT(Path.Path,Filename.Name) as Name, " + "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes " + "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' " + "AND Client.ClientId=Job.ClientId " + "AND Job.JobId=File.JobId AND File.FileIndex > 0 " + "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId " + "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20", + + /* Postgresql */ + "SELECT Job.JobId as JobId," + "Path.Path||Filename.Name as Name, " + "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes " + "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' " + "AND Client.ClientId=Job.ClientId " + "AND Job.JobId=File.JobId AND File.FileIndex > 0 " + "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId " + "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20", + + /* SQLite3 */ + "SELECT Job.JobId as JobId," + "Path.Path||Filename.Name as Name, " + "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes " + "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' " + "AND Client.ClientId=Job.ClientId " + "AND Job.JobId=File.JobId AND File.FileIndex > 0 " + "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId " + "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20" +}; + +const char *uar_create_temp[] = { + /* Mysql */ + "CREATE TEMPORARY TABLE temp (" + "JobId INTEGER UNSIGNED NOT NULL," + "JobTDate BIGINT UNSIGNED," + "ClientId INTEGER UNSIGNED," + "Level CHAR," + "JobFiles INTEGER UNSIGNED," + "JobBytes BIGINT UNSIGNED," + "StartTime TEXT," + "VolumeName TEXT," + "StartFile INTEGER UNSIGNED," + "VolSessionId INTEGER UNSIGNED," + "VolSessionTime INTEGER UNSIGNED)", + + /* PostgreSQL */ + "CREATE TEMPORARY TABLE temp (" + "JobId INTEGER NOT NULL," + "JobTDate BIGINT," + "ClientId INTEGER," + "Level CHAR," + "JobFiles INTEGER," + "JobBytes BIGINT," + "StartTime TEXT," + "VolumeName TEXT," + "StartFile INTEGER," + "VolSessionId INTEGER," + "VolSessionTime INTEGER)", + + /* SQLite */ + "CREATE TEMPORARY TABLE temp (" + "JobId INTEGER UNSIGNED NOT NULL," + "JobTDate BIGINT UNSIGNED," + "ClientId INTEGER UNSIGNED," + "Level CHAR," + "JobFiles INTEGER UNSIGNED," + "JobBytes BIGINT UNSIGNED," + "StartTime TEXT," + "VolumeName TEXT," + "StartFile INTEGER UNSIGNED," + "VolSessionId INTEGER UNSIGNED," + "VolSessionTime INTEGER UNSIGNED)" +}; + +const char *uar_create_temp1[] = +{ + /* MySQL */ + "CREATE TEMPORARY TABLE temp1 (" + "JobId INTEGER UNSIGNED NOT NULL," + "JobTDate BIGINT UNSIGNED)", + /* PostgreSQL */ + "CREATE TEMPORARY TABLE temp1 (" + "JobId INTEGER NOT NULL," + "JobTDate BIGINT)", + /* SQLite */ + "CREATE TEMPORARY TABLE temp1 (" + "JobId INTEGER UNSIGNED NOT NULL," + "JobTDate BIGINT UNSIGNED)" +}; + +/* Query to get all files in a directory no recursing + * Note, for PostgreSQL since it respects the "Single Value + * rule", the results of the SELECT will be unoptimized. + * I.e. the same file will be restored multiple times, once + * for each time it was backed up. + */ + +const char *uar_jobid_fileindex_from_dir[] = { + /* Mysql */ + "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client " + "WHERE Job.JobId IN (%s) " + "AND Job.JobId=File.JobId " + "AND Path.Path='%s' " + "AND Client.Name='%s' " + "AND Job.ClientId=Client.ClientId " + "AND Path.PathId=File.Pathid " + "AND Filename.FilenameId=File.FilenameId " + "GROUP BY File.FileIndex ", + + /* Postgresql */ + "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client " + "WHERE Job.JobId IN (%s) " + "AND Job.JobId=File.JobId " + "AND Path.Path='%s' " + "AND Client.Name='%s' " + "AND Job.ClientId=Client.ClientId " + "AND Path.PathId=File.Pathid " + "AND Filename.FilenameId=File.FilenameId", + + /* SQLite3 */ + "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client " + "WHERE Job.JobId IN (%s) " + "AND Job.JobId=File.JobId " + "AND Path.Path='%s' " + "AND Client.Name='%s' " + "AND Job.ClientId=Client.ClientId " + "AND Path.PathId=File.Pathid " + "AND Filename.FilenameId=File.FilenameId " + "GROUP BY File.FileIndex " +}; + +const char *sql_media_order_most_recently_written[] = { + /* Mysql */ + "ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId", + + /* Postgresql */ + "ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId", + + /* SQLite3 */ + "ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId" +}; + +const char *sql_get_max_connections[] = { + /* Mysql */ + "SHOW VARIABLES LIKE 'max_connections'", + /* PostgreSQL */ + "SHOW max_connections", + /* SQLite */ + "SELECT 0" +}; + +/* + * The Group By can return strange numbers when having multiple + * version of a file in the same dataset. + */ +const char *default_sql_bvfs_select = +"CREATE TABLE %s AS " +"SELECT File.JobId, File.FileIndex, File.FileId " +"FROM Job, File, ( " + "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId " + "FROM btemp%s GROUP BY PathId, FilenameId " + ") AS T1 JOIN Filename USING (FilenameId) " +"WHERE T1.JobTDate = Job.JobTDate " + "AND Job.JobId = File.JobId " + "AND T1.PathId = File.PathId " + "AND T1.FilenameId = File.FilenameId " + "AND File.FileIndex > 0 " + "AND Job.JobId IN (SELECT DISTINCT JobId FROM btemp%s) "; + +const char *sql_bvfs_select[] = +{ + /* MySQL */ + default_sql_bvfs_select, + /* PostgreSQL */ + "CREATE TABLE %s AS ( " + "SELECT JobId, FileIndex, FileId " + "FROM ( " + "SELECT DISTINCT ON (PathId, FilenameId) " + "JobId, FileIndex, FileId " + "FROM btemp%s " + "ORDER BY PathId, FilenameId, JobTDate DESC " + ") AS T " + "WHERE FileIndex > 0)", + /* SQLite */ + default_sql_bvfs_select +}; + +static const char *sql_bvfs_list_files_default = +"SELECT 'F', T1.PathId, T1.FilenameId, Filename.Name, " + "File.JobId, File.LStat, File.FileId " +"FROM Job, File, ( " + "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId " + "FROM ( " + "SELECT JobTDate, PathId, FilenameId " + "FROM File JOIN Job USING (JobId) " + "WHERE File.JobId IN (%s) AND PathId = %s " + "UNION ALL " + "SELECT JobTDate, PathId, FilenameId " + "FROM BaseFiles " + "JOIN File USING (FileId) " + "JOIN Job ON (BaseJobId = Job.JobId) " + "WHERE BaseFiles.JobId IN (%s) AND PathId = %s " + ") AS tmp GROUP BY PathId, FilenameId " + "LIMIT %lld OFFSET %lld" + ") AS T1 JOIN Filename USING (FilenameId) " +"WHERE T1.JobTDate = Job.JobTDate " + "AND Job.JobId = File.JobId " + "AND T1.PathId = File.PathId " + "AND T1.FilenameId = File.FilenameId " + "AND Filename.Name != '' " + "AND File.FileIndex > 0 " + " %s " /* AND Name LIKE '' */ + "AND (Job.JobId IN ( " + "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) " + "OR Job.JobId IN (%s)) "; + +const char *sql_bvfs_list_files[] = { + /* MySQL */ + sql_bvfs_list_files_default, + + /* JobId PathId JobId PathId WHERE? Filename? Limit Offset*/ + /* Postgresql */ + "SELECT Type, PathId, FilenameId, Name, JobId, LStat, FileId " + "FROM (" + "SELECT DISTINCT ON (FilenameId) 'F' as Type, PathId, T.FilenameId, " + "Filename.Name, JobId, LStat, FileId, FileIndex " + "FROM " + "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5 " + "FROM File WHERE JobId IN (%s) AND PathId = %s " + "UNION ALL " + "SELECT File.FileId, File.JobId, PathId, FilenameId, " + "File.FileIndex, LStat, MD5 " + "FROM BaseFiles JOIN File USING (FileId) " + "WHERE BaseFiles.JobId IN (%s) AND File.PathId = %s " + ") AS T JOIN Job USING (JobId) JOIN Filename USING (FilenameId) " + " WHERE Filename.Name != '' " + " %s " /* AND Name LIKE '' */ + "ORDER BY FilenameId, StartTime DESC " + ") AS A WHERE A.FileIndex > 0 " + "LIMIT %lld OFFSET %lld ", + + /* SQLite */ + sql_bvfs_list_files_default, + + /* SQLite */ + sql_bvfs_list_files_default +}; + +/* Basically the same thing than select_recent_version_with_basejob_and_delta_default, + * but we specify a single file with FilenameId/PathId + * + * Input: + * 1 JobId to look at + * 2 FilenameId + * 3 PathId + * 4 JobId to look at + * 5 FilenameId + * 6 PathId + * 7 Jobid + * 8 JobId + */ +const char *bvfs_select_delta_version_with_basejob_and_delta_default = +"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, " + "File.FilenameId AS FilenameId, LStat, MD5, File.DeltaSeq AS DeltaSeq, " + "Job.JobTDate AS JobTDate " +"FROM Job, File, ( " + "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId, DeltaSeq " + "FROM ( " + "SELECT JobTDate, PathId, FilenameId, DeltaSeq " /*Get all normal files*/ + "FROM File JOIN Job USING (JobId) " /* from selected backup */ + "WHERE File.JobId IN (%s) AND FilenameId = %s AND PathId = %s " + "UNION ALL " + "SELECT JobTDate, PathId, FilenameId, DeltaSeq " /*Get all files from */ + "FROM BaseFiles " /* BaseJob */ + "JOIN File USING (FileId) " + "JOIN Job ON (BaseJobId = Job.JobId) " + "WHERE BaseFiles.JobId IN (%s) " /* Use Max(JobTDate) to find */ + " AND FilenameId = %s AND PathId = %s " + ") AS tmp " + "GROUP BY PathId, FilenameId, DeltaSeq " /* the latest file version */ + ") AS T1 " +"WHERE (Job.JobId IN ( " /* Security, we force JobId to be valid */ + "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) " + "OR Job.JobId IN (%s)) " + "AND T1.JobTDate = Job.JobTDate " /* Join on JobTDate to get the orginal */ + "AND Job.JobId = File.JobId " /* Job/File record */ + "AND T1.PathId = File.PathId " + "AND T1.FilenameId = File.FilenameId"; + + +const char *bvfs_select_delta_version_with_basejob_and_delta[] = +{ + /* MySQL */ + bvfs_select_delta_version_with_basejob_and_delta_default, + + /* Postgresql */ /* The DISTINCT ON () permits to avoid extra join */ + "SELECT DISTINCT ON (FilenameId, PathId, DeltaSeq) JobTDate, JobId, FileId, " + "FileIndex, PathId, FilenameId, LStat, MD5, DeltaSeq " + "FROM " + "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5,DeltaSeq " + "FROM File WHERE JobId IN (%s) AND FilenameId = %s AND PathId = %s " + "UNION ALL " + "SELECT File.FileId, File.JobId, PathId, FilenameId, " + "File.FileIndex, LStat, MD5, DeltaSeq " + "FROM BaseFiles JOIN File USING (FileId) " + "WHERE BaseFiles.JobId IN (%s) AND FilenameId = %s AND PathId = %s " + ") AS T JOIN Job USING (JobId) " + "ORDER BY FilenameId, PathId, DeltaSeq, JobTDate DESC ", + + /* SQLite */ + bvfs_select_delta_version_with_basejob_and_delta_default +}; + + +const char *batch_lock_path_query[] = { + /* Mysql */ + "LOCK TABLES Path write, batch write, Path as p write", + + /* Postgresql */ + "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE", + + /* SQLite3 */ + "BEGIN" +}; + +const char *batch_lock_filename_query[] = { + /* Mysql */ + "LOCK TABLES Filename write, batch write, Filename as f write", + + /* Postgresql */ + "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE", + + /* SQLite3 */ + "BEGIN" +}; + +const char *batch_unlock_tables_query[] = { + /* Mysql */ + "UNLOCK TABLES", + + /* Postgresql */ + "COMMIT", + + /* SQLite3 */ + "COMMIT" +}; + +const char *batch_fill_path_query[] = { + /* Mysql */ + "INSERT INTO Path (Path) " + "SELECT a.Path FROM " + "(SELECT DISTINCT Path FROM batch) AS a WHERE NOT EXISTS " + "(SELECT Path FROM Path AS p WHERE p.Path = a.Path)", + + /* PostgreSQL */ + "INSERT INTO Path (Path)" + "SELECT a.Path FROM " + "(SELECT DISTINCT Path FROM batch) AS a " + "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ", + + /* SQLite */ + "INSERT INTO Path (Path)" + "SELECT DISTINCT Path FROM batch " + "EXCEPT SELECT Path FROM Path" +}; + +const char *batch_fill_filename_query[] = { + /* Mysql */ + "INSERT INTO Filename (Name) " + "SELECT a.Name FROM " + "(SELECT DISTINCT Name FROM batch) AS a WHERE NOT EXISTS " + "(SELECT Name FROM Filename AS f WHERE f.Name = a.Name)", + + /* Postgresql */ + "INSERT INTO Filename (Name) " + "SELECT a.Name FROM " + "(SELECT DISTINCT Name FROM batch) as a " + "WHERE NOT EXISTS " + "(SELECT Name FROM Filename WHERE Name = a.Name)", + + /* SQLite3 */ + "INSERT INTO Filename (Name) " + "SELECT DISTINCT Name FROM batch " + "EXCEPT SELECT Name FROM Filename" +}; + +const char *match_query[] = { + /* Mysql */ + "REGEXP", + /* PostgreSQL */ + "~", + /* SQLite */ + "LIKE" /* MATCH doesn't seems to work anymore... */ +}; + +static const char *insert_counter_values_default = + "INSERT INTO Counters (Counter, MinValue, " + "MaxValue, CurrentValue, WrapCounter) " + "VALUES ('%s','%d','%d','%d','%s')"; + +const char *insert_counter_values[] = { + /* MySQL */ + "INSERT INTO Counters (Counter, Counters.MinValue, " + "Counters.MaxValue, CurrentValue, WrapCounter) " + "VALUES ('%s','%d','%d','%d','%s')", + + /* PostgreSQL */ + insert_counter_values_default, + + /* SQLite */ + insert_counter_values_default +}; + +static const char *select_counter_values_default = + "SELECT MinValue, MaxValue, CurrentValue, WrapCounter" + " FROM Counters WHERE Counter='%s'"; + +const char *select_counter_values[] = +{ + /* MySQL */ + "SELECT Counters.MinValue, Counters.MaxValue, CurrentValue, WrapCounter" + " FROM Counters WHERE Counter='%s'", + + /* PostgreSQL */ + select_counter_values_default, + + /* SQLite */ + select_counter_values_default +}; + +static const char *update_counter_values_default = + "UPDATE Counters SET MinValue=%d, MaxValue=%d, CurrentValue=%d," + "WrapCounter='%s' WHERE Counter='%s'"; + +const char *update_counter_values[] = +{ + /* MySQL */ + "UPDATE Counters SET Counters.MinValue=%d, Counters.MaxValue=%d," + "CurrentValue=%d, WrapCounter='%s' WHERE Counter='%s'", + /* PostgreSQL */ + update_counter_values_default, + /* SQLite */ + update_counter_values_default +}; + +static const char *expired_volumes_defaults = +"SELECT Media.VolumeName AS volumename," + "Media.LastWritten AS lastwritten" +" FROM Media" +" WHERE VolStatus IN ('Full', 'Used')" + " AND ( Media.LastWritten + Media.VolRetention ) < NOW()" + " %s "; + +const char *prune_cache[] = { + /* MySQL */ + " (Media.LastWritten + Media.CacheRetention) < NOW() ", + /* PostgreSQL */ + " (Media.LastWritten + (interval '1 second' * Media.CacheRetention)) < NOW() ", + /* SQLite */ + " ( strftime('%s', Media.LastWritten) + Media.CacheRetention < strftime('%s', datetime('now', 'localtime'))) " +}; + +const char *expired_volumes[] = { + /* MySQL */ + expired_volumes_defaults, + /* PostgreSQL */ + "SELECT Media.VolumeName, Media.LastWritten " + " FROM Media " + " WHERE VolStatus IN ('Full', 'Used') " + " AND ( Media.LastWritten + (interval '1 second' * Media.VolRetention ) < NOW()) " + " %s ", + /* SQLite */ + expired_volumes_defaults +}; + +const char *expires_in[] = { + /* MySQL */ + "(GREATEST(0, CAST(UNIX_TIMESTAMP(LastWritten) + Media.VolRetention AS SIGNED) - UNIX_TIMESTAMP(NOW())))", + /* PostgreSQL */ + "GREATEST(0, (extract('epoch' from LastWritten + Media.VolRetention * interval '1second' - NOW())::bigint))", + /* SQLite */ + "MAX(0, (strftime('%s', LastWritten) + Media.VolRetention - strftime('%s', datetime('now', 'localtime'))))" +}; + +const char *strip_restore[] = { + /* MySQL */ + "DELETE FROM %s WHERE FileId IN (SELECT * FROM (SELECT FileId FROM %s as B JOIN File USING (FileId) WHERE PathId IN (%s)) AS C)", + /* PostgreSQL */ + "DELETE FROM %s WHERE FileId IN (SELECT FileId FROM %s JOIN File USING (FileId) WHERE PathId IN (%s))", + /* SQLite */ + "DELETE FROM %s WHERE FileId IN (SELECT FileId FROM %s JOIN File USING (FileId) WHERE PathId IN (%s))" +}; + +static const char *escape_char_value_default = "\\"; + +const char *escape_char_value[] = { + /* MySQL */ + "\\\\", + /* PostgreSQL */ + escape_char_value_default, + /* SQLite */ + escape_char_value_default +}; \ No newline at end of file diff --git a/src/cats/sql_cmds.h b/src/cats/sql_cmds.h new file mode 100644 index 00000000..2b02139a --- /dev/null +++ b/src/cats/sql_cmds.h @@ -0,0 +1,92 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * sql_cmds.c contains all the SQL commands that are either issued by the + * Director or which are database backend specific. + * + * This file defines the external definitions necessary to + * build on Windows + * + * Kern Sibbald, July MMII + */ + +extern const char CATS_IMP_EXP *batch_fill_filename_query[]; +extern const char CATS_IMP_EXP *batch_fill_path_query[]; +extern const char CATS_IMP_EXP *batch_lock_filename_query[]; +extern const char CATS_IMP_EXP *batch_lock_path_query[]; +extern const char CATS_IMP_EXP *batch_unlock_tables_query[]; +extern const char CATS_IMP_EXP *bvfs_select_delta_version_with_basejob_and_delta[]; +extern const char CATS_IMP_EXP *cleanup_created_job; +extern const char CATS_IMP_EXP *cleanup_running_job; +extern const char CATS_IMP_EXP *client_backups; +extern const char CATS_IMP_EXP *cnt_File; +extern const char CATS_IMP_EXP *create_delindex; +extern const char CATS_IMP_EXP *create_deltabs[]; +extern const char CATS_IMP_EXP *create_temp_accurate_jobids[]; +extern const char CATS_IMP_EXP *create_temp_basefile[]; +extern const char CATS_IMP_EXP *create_temp_new_basefile[]; +extern const char CATS_IMP_EXP *del_MAC; +extern const char CATS_IMP_EXP *drop_deltabs[]; +extern const char CATS_IMP_EXP *expired_volumes[]; +extern const char CATS_IMP_EXP *fill_jobhisto; +extern const char CATS_IMP_EXP *get_restore_objects; +extern const char CATS_IMP_EXP *insert_counter_values[]; +extern const char CATS_IMP_EXP *list_pool; +extern const char CATS_IMP_EXP *match_query[]; +extern const char CATS_IMP_EXP *select_counter_values[]; +extern const char CATS_IMP_EXP *select_recent_version[]; +extern const char CATS_IMP_EXP *select_recent_version_with_basejob[]; +extern const char CATS_IMP_EXP *select_recent_version_with_basejob_and_delta[]; +extern const char CATS_IMP_EXP *sel_JobMedia; +extern const char CATS_IMP_EXP *sql_bvfs_list_files[]; +extern const char CATS_IMP_EXP *sql_bvfs_select[]; +extern const char CATS_IMP_EXP *sql_get_max_connections[]; +extern const char CATS_IMP_EXP *sql_media_order_most_recently_written[]; +extern const char CATS_IMP_EXP *uap_upgrade_copies_oldest_job[]; +extern const char CATS_IMP_EXP *uar_count_files; +extern const char CATS_IMP_EXP *uar_count_files; +extern const char CATS_IMP_EXP *uar_create_temp[]; +extern const char CATS_IMP_EXP *uar_create_temp1[]; +extern const char CATS_IMP_EXP *uar_del_temp; +extern const char CATS_IMP_EXP *uar_del_temp1; +extern const char CATS_IMP_EXP *uar_dif; +extern const char CATS_IMP_EXP *uar_file[]; +extern const char CATS_IMP_EXP *uar_full; +extern const char CATS_IMP_EXP *uar_inc; +extern const char CATS_IMP_EXP *uar_jobid_fileindex; +extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_dir[]; +extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_table; +extern const char CATS_IMP_EXP *uar_jobids_fileindex; +extern const char CATS_IMP_EXP *uar_last_full; +extern const char CATS_IMP_EXP *uar_list_jobs; +extern const char CATS_IMP_EXP *uar_list_temp; +extern const char CATS_IMP_EXP *uar_mediatype; +extern const char CATS_IMP_EXP *uar_print_jobs; +extern const char CATS_IMP_EXP *uar_sel_all_temp; +extern const char CATS_IMP_EXP *uar_sel_all_temp1; +extern const char CATS_IMP_EXP *uar_sel_files; +extern const char CATS_IMP_EXP *uar_sel_fileset; +extern const char CATS_IMP_EXP *uar_sel_filesetid; +extern const char CATS_IMP_EXP *uar_sel_jobid_temp; +extern const char CATS_IMP_EXP *update_counter_values[]; +extern const char CATS_IMP_EXP *expires_in[]; +extern const char CATS_IMP_EXP *prune_cache[]; +extern const char CATS_IMP_EXP *strip_restore[]; +extern const char CATS_IMP_EXP *escape_char_value[]; diff --git a/src/cats/sql_create.c b/src/cats/sql_create.c new file mode 100644 index 00000000..ff40241f --- /dev/null +++ b/src/cats/sql_create.c @@ -0,0 +1,1288 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Catalog Database Create record interface routines + * + * Written by Kern Sibbald, March 2000 + */ + +#include "bacula.h" + +static const int dbglevel = 160; + +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL + +#include "cats.h" + +/* ----------------------------------------------------------------------- + * + * Generic Routines (or almost generic) + * + * ----------------------------------------------------------------------- + */ + +/** Create a new record for the Job + * Returns: false on failure + * true on success + */ +bool BDB::bdb_create_job_record(JCR *jcr, JOB_DBR *jr) +{ + POOL_MEM buf; + char dt[MAX_TIME_LENGTH]; + time_t stime; + struct tm tm; + bool ok; + int len; + utime_t JobTDate; + char ed1[30],ed2[30]; + char esc_job[MAX_ESCAPE_NAME_LENGTH]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + + stime = jr->SchedTime; + ASSERT(stime != 0); + + (void)localtime_r(&stime, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + JobTDate = (utime_t)stime; + + len = strlen(jcr->comment); /* TODO: use jr instead of jcr to get comment */ + buf.check_size(len*2+1); + bdb_escape_string(jcr, buf.c_str(), jcr->comment, len); + + bdb_escape_string(jcr, esc_job, jr->Job, strlen(jr->Job)); + bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); + + /* Must create it */ + Mmsg(cmd, +"INSERT INTO Job (Job,Name,Type,Level,JobStatus,SchedTime,JobTDate," + "ClientId,Comment) " +"VALUES ('%s','%s','%c','%c','%c','%s',%s,%s,'%s')", + esc_job, esc_name, (char)(jr->JobType), (char)(jr->JobLevel), + (char)(jr->JobStatus), dt, edit_uint64(JobTDate, ed1), + edit_int64(jr->ClientId, ed2), buf.c_str()); + + if ((jr->JobId = sql_insert_autokey_record(cmd, NT_("Job"))) == 0) { + Mmsg2(&errmsg, _("Create DB Job record %s failed. ERR=%s\n"), + cmd, sql_strerror()); + ok = false; + } else { + ok = true; + } + bdb_unlock(); + return ok; +} + + +/** Create a JobMedia record for medium used this job + * Returns: false on failure + * true on success + */ +bool BDB::bdb_create_jobmedia_record(JCR *jcr, JOBMEDIA_DBR *jm) +{ + bool ok = true; + int count; + char ed1[50], ed2[50]; + + bdb_lock(); + + /* Now get count for VolIndex */ + Mmsg(cmd, "SELECT count(*) from JobMedia WHERE JobId=%s", + edit_int64(jm->JobId, ed1)); + count = get_sql_record_max(jcr, this); + if (count < 0) { + count = 0; + } + count++; + + Mmsg(cmd, + "INSERT INTO JobMedia (JobId,MediaId,FirstIndex,LastIndex," + "StartFile,EndFile,StartBlock,EndBlock,VolIndex) " + "VALUES (%s,%s,%u,%u,%u,%u,%u,%u,%u)", + edit_int64(jm->JobId, ed1), + edit_int64(jm->MediaId, ed2), + jm->FirstIndex, jm->LastIndex, + jm->StartFile, jm->EndFile, jm->StartBlock, jm->EndBlock,count); + + Dmsg0(300, cmd); + if (!InsertDB(jcr, cmd)) { + Mmsg2(&errmsg, _("Create JobMedia record %s failed: ERR=%s\n"), cmd, + sql_strerror()); + ok = false; + } else { + /* Worked, now update the Media record with the EndFile and EndBlock */ + Mmsg(cmd, + "UPDATE Media SET EndFile=%lu, EndBlock=%lu WHERE MediaId=%lu", + jm->EndFile, jm->EndBlock, jm->MediaId); + if (!UpdateDB(jcr, cmd, false)) { + Mmsg2(&errmsg, _("Update Media record %s failed: ERR=%s\n"), cmd, + sql_strerror()); + ok = false; + } + } + bdb_unlock(); + Dmsg0(300, "Return from JobMedia\n"); + return ok; +} + +/** Create Unique Pool record + * Returns: false on failure + * true on success + */ +bool BDB::bdb_create_pool_record(JCR *jcr, POOL_DBR *pr) +{ + bool stat; + char ed1[30], ed2[30], ed3[50], ed4[50], ed5[50], ed6[50]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + char esc_lf[MAX_ESCAPE_NAME_LENGTH]; + + Dmsg0(200, "In create pool\n"); + bdb_lock(); + bdb_escape_string(jcr, esc_name, pr->Name, strlen(pr->Name)); + bdb_escape_string(jcr, esc_lf, pr->LabelFormat, strlen(pr->LabelFormat)); + Mmsg(cmd, "SELECT PoolId,Name FROM Pool WHERE Name='%s'", esc_name); + Dmsg1(200, "selectpool: %s\n", cmd); + + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 0) { + Mmsg1(&errmsg, _("pool record %s already exists\n"), pr->Name); + sql_free_result(); + bdb_unlock(); + Dmsg1(200, "%s", errmsg); /* pool already exists */ + return false; + } + sql_free_result(); + } + + /* Must create it */ + Mmsg(cmd, +"INSERT INTO Pool (Name,NumVols,MaxVols,UseOnce,UseCatalog," +"AcceptAnyVolume,AutoPrune,Recycle,VolRetention,VolUseDuration," +"MaxVolJobs,MaxVolFiles,MaxVolBytes,PoolType,LabelType,LabelFormat," +"RecyclePoolId,ScratchPoolId,ActionOnPurge,CacheRetention) " +"VALUES ('%s',%u,%u,%d,%d,%d,%d,%d,%s,%s,%u,%u,%s,'%s',%d,'%s',%s,%s,%d,%s)", + esc_name, + pr->NumVols, pr->MaxVols, + pr->UseOnce, pr->UseCatalog, + pr->AcceptAnyVolume, + pr->AutoPrune, pr->Recycle, + edit_uint64(pr->VolRetention, ed1), + edit_uint64(pr->VolUseDuration, ed2), + pr->MaxVolJobs, pr->MaxVolFiles, + edit_uint64(pr->MaxVolBytes, ed3), + pr->PoolType, pr->LabelType, esc_lf, + edit_int64(pr->RecyclePoolId,ed4), + edit_int64(pr->ScratchPoolId,ed5), + pr->ActionOnPurge, + edit_uint64(pr->CacheRetention, ed6) + ); + Dmsg1(200, "Create Pool: %s\n", cmd); + if ((pr->PoolId = sql_insert_autokey_record(cmd, NT_("Pool"))) == 0) { + Mmsg2(&errmsg, _("Create db Pool record %s failed: ERR=%s\n"), + cmd, sql_strerror()); + stat = false; + } else { + stat = true; + } + bdb_unlock(); + return stat; +} + +/** + * Create Unique Device record + * Returns: false on failure + * true on success + */ +bool BDB::bdb_create_device_record(JCR *jcr, DEVICE_DBR *dr) +{ + bool ok; + char ed1[30], ed2[30]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + Dmsg0(200, "In create Device\n"); + bdb_lock(); + bdb_escape_string(jcr, esc, dr->Name, strlen(dr->Name)); + Mmsg(cmd, "SELECT DeviceId,Name FROM Device WHERE Name='%s'", esc); + Dmsg1(200, "selectdevice: %s\n", cmd); + + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 0) { + Mmsg1(&errmsg, _("Device record %s already exists\n"), dr->Name); + sql_free_result(); + bdb_unlock(); + return false; + } + sql_free_result(); + } + + /* Must create it */ + Mmsg(cmd, +"INSERT INTO Device (Name,MediaTypeId,StorageId) VALUES ('%s',%s,%s)", + esc, + edit_uint64(dr->MediaTypeId, ed1), + edit_int64(dr->StorageId, ed2)); + Dmsg1(200, "Create Device: %s\n", cmd); + if ((dr->DeviceId = sql_insert_autokey_record(cmd, NT_("Device"))) == 0) { + Mmsg2(&errmsg, _("Create db Device record %s failed: ERR=%s\n"), + cmd, sql_strerror()); + ok = false; + } else { + ok = true; + } + bdb_unlock(); + return ok; +} + + + +/** + * Create a Unique record for Storage -- no duplicates + * Returns: false on failure + * true on success with id in sr->StorageId + */ +bool BDB::bdb_create_storage_record(JCR *jcr, STORAGE_DBR *sr) +{ + SQL_ROW row; + bool ok; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc, sr->Name, strlen(sr->Name)); + Mmsg(cmd, "SELECT StorageId,AutoChanger FROM Storage WHERE Name='%s'",esc); + + sr->StorageId = 0; + sr->created = false; + /* Check if it already exists */ + if (QueryDB(jcr, cmd)) { + /* If more than one, report error, but return first row */ + if (sql_num_rows() > 1) { + Mmsg1(&errmsg, _("More than one Storage record!: %d\n"), sql_num_rows()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } + if (sql_num_rows() >= 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(&errmsg, _("error fetching Storage row: %s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + sql_free_result(); + bdb_unlock(); + return false; + } + sr->StorageId = str_to_int64(row[0]); + sr->AutoChanger = atoi(row[1]); /* bool */ + sql_free_result(); + bdb_unlock(); + return true; + } + sql_free_result(); + } + + /* Must create it */ + Mmsg(cmd, "INSERT INTO Storage (Name,AutoChanger)" + " VALUES ('%s',%d)", esc, sr->AutoChanger); + + if ((sr->StorageId = sql_insert_autokey_record(cmd, NT_("Storage"))) == 0) { + Mmsg2(&errmsg, _("Create DB Storage record %s failed. ERR=%s\n"), + cmd, sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + ok = false; + } else { + sr->created = true; + ok = true; + } + bdb_unlock(); + return ok; +} + + +/** + * Create Unique MediaType record + * Returns: false on failure + * true on success + */ +bool BDB::bdb_create_mediatype_record(JCR *jcr, MEDIATYPE_DBR *mr) +{ + bool stat; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + Dmsg0(200, "In create mediatype\n"); + bdb_lock(); + bdb_escape_string(jcr, esc, mr->MediaType, strlen(mr->MediaType)); + Mmsg(cmd, "SELECT MediaTypeId,MediaType FROM MediaType WHERE MediaType='%s'", esc); + Dmsg1(200, "selectmediatype: %s\n", cmd); + + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 0) { + Mmsg1(&errmsg, _("mediatype record %s already exists\n"), mr->MediaType); + sql_free_result(); + bdb_unlock(); + return false; + } + sql_free_result(); + } + + /* Must create it */ + Mmsg(cmd, +"INSERT INTO MediaType (MediaType,ReadOnly) " +"VALUES ('%s',%d)", + mr->MediaType, + mr->ReadOnly); + Dmsg1(200, "Create mediatype: %s\n", cmd); + if ((mr->MediaTypeId = sql_insert_autokey_record(cmd, NT_("MediaType"))) == 0) { + Mmsg2(&errmsg, _("Create db mediatype record %s failed: ERR=%s\n"), + cmd, sql_strerror()); + stat = false; + } else { + stat = true; + } + bdb_unlock(); + return stat; +} + + +/** + * Create Media record. VolumeName and non-zero Slot must be unique + * + * Returns: 0 on failure + * 1 on success + */ +int BDB::bdb_create_media_record(JCR *jcr, MEDIA_DBR *mr) +{ + int stat; + char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50], ed7[50], ed8[50]; + char ed9[50], ed10[50], ed11[50], ed12[50], ed13[50], ed14[50]; + struct tm tm; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + char esc_mtype[MAX_ESCAPE_NAME_LENGTH]; + char esc_status[MAX_ESCAPE_NAME_LENGTH]; + + + bdb_lock(); + bdb_escape_string(jcr, esc_name, mr->VolumeName, strlen(mr->VolumeName)); + bdb_escape_string(jcr, esc_mtype, mr->MediaType, strlen(mr->MediaType)); + bdb_escape_string(jcr, esc_status, mr->VolStatus, strlen(mr->VolStatus)); + + Mmsg(cmd, "SELECT MediaId FROM Media WHERE VolumeName='%s'", esc_name); + Dmsg1(500, "selectpool: %s\n", cmd); + + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 0) { + Mmsg1(&errmsg, _("Volume \"%s\" already exists.\n"), mr->VolumeName); + sql_free_result(); + bdb_unlock(); + return 0; + } + sql_free_result(); + } + + /* Must create it */ + Mmsg(cmd, +"INSERT INTO Media (VolumeName,MediaType,MediaTypeId,PoolId,MaxVolBytes," +"VolCapacityBytes,Recycle,VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles," +"VolStatus,Slot,VolBytes,InChanger,VolReadTime,VolWriteTime,VolType," +"VolParts,VolCloudParts,LastPartBytes," +"EndFile,EndBlock,LabelType,StorageId,DeviceId,LocationId," +"ScratchPoolId,RecyclePoolId,Enabled,ActionOnPurge,CacheRetention)" +"VALUES ('%s','%s',0,%u,%s,%s,%d,%s,%s,%u,%u,'%s',%d,%s,%d,%s,%s,%d," + "%d,%d,'%s',%d,%d,%d,%s,%s,%s,%s,%s,%d,%d,%s)", + esc_name, + esc_mtype, mr->PoolId, + edit_uint64(mr->MaxVolBytes,ed1), + edit_uint64(mr->VolCapacityBytes, ed2), + mr->Recycle, + edit_uint64(mr->VolRetention, ed3), + edit_uint64(mr->VolUseDuration, ed4), + mr->MaxVolJobs, + mr->MaxVolFiles, + esc_status, + mr->Slot, + edit_uint64(mr->VolBytes, ed5), + mr->InChanger, + edit_int64(mr->VolReadTime, ed6), + edit_int64(mr->VolWriteTime, ed7), + mr->VolType, + mr->VolParts, + mr->VolCloudParts, + edit_uint64(mr->LastPartBytes, ed8), + mr->EndFile, + mr->EndBlock, + mr->LabelType, + edit_int64(mr->StorageId, ed9), + edit_int64(mr->DeviceId, ed10), + edit_int64(mr->LocationId, ed11), + edit_int64(mr->ScratchPoolId, ed12), + edit_int64(mr->RecyclePoolId, ed13), + mr->Enabled, + mr->ActionOnPurge, + edit_uint64(mr->CacheRetention, ed14) + ); + + Dmsg1(500, "Create Volume: %s\n", cmd); + if ((mr->MediaId = sql_insert_autokey_record(cmd, NT_("Media"))) == 0) { + Mmsg2(&errmsg, _("Create DB Media record %s failed. ERR=%s\n"), + cmd, sql_strerror()); + stat = 0; + } else { + stat = 1; + if (mr->set_label_date) { + char dt[MAX_TIME_LENGTH]; + if (mr->LabelDate == 0) { + mr->LabelDate = time(NULL); + } + (void)localtime_r(&mr->LabelDate, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + Mmsg(cmd, "UPDATE Media SET LabelDate='%s' " + "WHERE MediaId=%lu", dt, mr->MediaId); + stat = UpdateDB(jcr, cmd, false); + } + /* + * Make sure that if InChanger is non-zero any other identical slot + * has InChanger zero. + */ + db_make_inchanger_unique(jcr, this, mr); + } + + bdb_unlock(); + return stat; +} + +/** + * Create a Unique record for the client -- no duplicates + * Returns: 0 on failure + * 1 on success with id in cr->ClientId + */ +int BDB::bdb_create_client_record(JCR *jcr, CLIENT_DBR *cr) +{ + SQL_ROW row; + int stat; + char ed1[50], ed2[50]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + char esc_uname[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc_name, cr->Name, strlen(cr->Name)); + bdb_escape_string(jcr, esc_uname, cr->Uname, strlen(cr->Uname)); + Mmsg(cmd, "SELECT ClientId,Uname,AutoPrune," + "FileRetention,JobRetention FROM Client WHERE Name='%s'",esc_name); + + cr->ClientId = 0; + if (QueryDB(jcr, cmd)) { + /* If more than one, report error, but return first row */ + if (sql_num_rows() > 1) { + Mmsg1(&errmsg, _("More than one Client!: %d\n"), sql_num_rows()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } + if (sql_num_rows() >= 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(&errmsg, _("error fetching Client row: %s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + sql_free_result(); + bdb_unlock(); + return 0; + } + cr->ClientId = str_to_int64(row[0]); + if (row[1]) { + bstrncpy(cr->Uname, row[1], sizeof(cr->Uname)); + } else { + cr->Uname[0] = 0; /* no name */ + } + cr->AutoPrune = str_to_int64(row[2]); + cr->FileRetention = str_to_int64(row[3]); + cr->JobRetention = str_to_int64(row[4]); + sql_free_result(); + bdb_unlock(); + return 1; + } + sql_free_result(); + } + + /* Must create it */ + Mmsg(cmd, "INSERT INTO Client (Name,Uname,AutoPrune," +"FileRetention,JobRetention) VALUES " +"('%s','%s',%d,%s,%s)", esc_name, esc_uname, cr->AutoPrune, + edit_uint64(cr->FileRetention, ed1), + edit_uint64(cr->JobRetention, ed2)); + + if ((cr->ClientId = sql_insert_autokey_record(cmd, NT_("Client"))) == 0) { + Mmsg2(&errmsg, _("Create DB Client record %s failed. ERR=%s\n"), + cmd, sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + stat = 0; + } else { + stat = 1; + } + bdb_unlock(); + return stat; +} + + +/** Create a Unique record for the Path -- no duplicates */ +int BDB::bdb_create_path_record(JCR *jcr, ATTR_DBR *ar) +{ + SQL_ROW row; + int stat; + + errmsg[0] = 0; + esc_name = check_pool_memory_size(esc_name, 2*pnl+2); + bdb_escape_string(jcr, esc_name, path, pnl); + + if (cached_path_id != 0 && cached_path_len == pnl && + strcmp(cached_path, path) == 0) { + ar->PathId = cached_path_id; + return 1; + } + + Mmsg(cmd, "SELECT PathId FROM Path WHERE Path='%s'", esc_name); + + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 1) { + char ed1[30]; + Mmsg2(&errmsg, _("More than one Path!: %s for path: %s\n"), + edit_uint64(sql_num_rows(), ed1), path); + Jmsg(jcr, M_WARNING, 0, "%s", errmsg); + } + /* Even if there are multiple paths, take the first one */ + if (sql_num_rows() >= 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(&errmsg, _("error fetching row: %s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + sql_free_result(); + ar->PathId = 0; + ASSERT2(ar->PathId, + "Your Path table is broken. " + "Please, use dbcheck to correct it."); + return 0; + } + ar->PathId = str_to_int64(row[0]); + sql_free_result(); + /* Cache path */ + if (ar->PathId != cached_path_id) { + cached_path_id = ar->PathId; + cached_path_len = pnl; + pm_strcpy(cached_path, path); + } + ASSERT(ar->PathId); + return 1; + } + sql_free_result(); + } + + Mmsg(cmd, "INSERT INTO Path (Path) VALUES ('%s')", esc_name); + + if ((ar->PathId = sql_insert_autokey_record(cmd, NT_("Path"))) == 0) { + Mmsg2(&errmsg, _("Create db Path record %s failed. ERR=%s\n"), + cmd, sql_strerror()); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + ar->PathId = 0; + stat = 0; + } else { + stat = 1; + } + + /* Cache path */ + if (stat && ar->PathId != cached_path_id) { + cached_path_id = ar->PathId; + cached_path_len = pnl; + pm_strcpy(cached_path, path); + } + return stat; +} + +/** + * Create a Unique record for the counter -- no duplicates + * Returns: 0 on failure + * 1 on success with counter filled in + */ +int BDB::bdb_create_counter_record(JCR *jcr, COUNTER_DBR *cr) +{ + char esc[MAX_ESCAPE_NAME_LENGTH]; + COUNTER_DBR mcr; + int stat; + + bdb_lock(); + memset(&mcr, 0, sizeof(mcr)); + bstrncpy(mcr.Counter, cr->Counter, sizeof(mcr.Counter)); + if (bdb_get_counter_record(jcr, &mcr)) { + memcpy(cr, &mcr, sizeof(COUNTER_DBR)); + bdb_unlock(); + return 1; + } + bdb_escape_string(jcr, esc, cr->Counter, strlen(cr->Counter)); + + /* Must create it */ + Mmsg(cmd, insert_counter_values[bdb_get_type_index()], + esc, cr->MinValue, cr->MaxValue, cr->CurrentValue, + cr->WrapCounter); + + if (!InsertDB(jcr, cmd)) { + Mmsg2(&errmsg, _("Create DB Counters record %s failed. ERR=%s\n"), + cmd, sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + stat = 0; + } else { + stat = 1; + } + bdb_unlock(); + return stat; +} + +/** + * Create a FileSet record. This record is unique in the + * name and the MD5 signature of the include/exclude sets. + * Returns: 0 on failure + * 1 on success with FileSetId in record + */ +bool BDB::bdb_create_fileset_record(JCR *jcr, FILESET_DBR *fsr) +{ + SQL_ROW row; + bool stat; + struct tm tm; + char esc_fs[MAX_ESCAPE_NAME_LENGTH]; + char esc_md5[MAX_ESCAPE_NAME_LENGTH]; + + /* TODO: Escape FileSet and MD5 */ + bdb_lock(); + fsr->created = false; + bdb_escape_string(jcr, esc_fs, fsr->FileSet, strlen(fsr->FileSet)); + bdb_escape_string(jcr, esc_md5, fsr->MD5, strlen(fsr->MD5)); + Mmsg(cmd, "SELECT FileSetId,CreateTime FROM FileSet WHERE " + "FileSet='%s' AND MD5='%s'", esc_fs, esc_md5); + + fsr->FileSetId = 0; + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 1) { + Mmsg1(&errmsg, _("More than one FileSet!: %d\n"), sql_num_rows()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } + if (sql_num_rows() >= 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(&errmsg, _("error fetching FileSet row: ERR=%s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + sql_free_result(); + bdb_unlock(); + return false; + } + fsr->FileSetId = str_to_int64(row[0]); + if (row[1] == NULL) { + fsr->cCreateTime[0] = 0; + } else { + bstrncpy(fsr->cCreateTime, row[1], sizeof(fsr->cCreateTime)); + } + sql_free_result(); + bdb_unlock(); + return true; + } + sql_free_result(); + } + + if (fsr->CreateTime == 0 && fsr->cCreateTime[0] == 0) { + fsr->CreateTime = time(NULL); + } + (void)localtime_r(&fsr->CreateTime, &tm); + strftime(fsr->cCreateTime, sizeof(fsr->cCreateTime), "%Y-%m-%d %H:%M:%S", &tm); + + /* Must create it */ + Mmsg(cmd, "INSERT INTO FileSet (FileSet,MD5,CreateTime) " +"VALUES ('%s','%s','%s')", esc_fs, esc_md5, fsr->cCreateTime); + + if ((fsr->FileSetId = sql_insert_autokey_record(cmd, NT_("FileSet"))) == 0) { + Mmsg2(&errmsg, _("Create DB FileSet record %s failed. ERR=%s\n"), + cmd, sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + stat = false; + } else { + fsr->created = true; + stat = true; + } + + bdb_unlock(); + return stat; +} + + +/** + * struct stat + * { + * dev_t st_dev; * device * + * ino_t st_ino; * inode * + * mode_t st_mode; * protection * + * nlink_t st_nlink; * number of hard links * + * uid_t st_uid; * user ID of owner * + * gid_t st_gid; * group ID of owner * + * dev_t st_rdev; * device type (if inode device) * + * off_t st_size; * total size, in bytes * + * unsigned long st_blksize; * blocksize for filesystem I/O * + * unsigned long st_blocks; * number of blocks allocated * + * time_t st_atime; * time of last access * + * time_t st_mtime; * time of last modification * + * time_t st_ctime; * time of last inode change * + * }; + */ + +/* For maintenance, we can put batch mode in hold */ +static bool batch_mode_enabled = true; + +void bdb_disable_batch_insert(bool enabled) +{ + batch_mode_enabled = enabled; +} + +/* + * All sql_batch_xx functions are used to do bulk batch + * insert in File/Filename/Path tables. + * + * To sum up : + * - bulk load a temp table + * - insert missing filenames into filename with a single query (lock filenames + * - table before that to avoid possible duplicate inserts with concurrent update) + * - insert missing paths into path with another single query + * - then insert the join between the temp, filename and path tables into file. + */ + +/* + * Returns true if OK + * false if failed + */ +bool bdb_write_batch_file_records(JCR *jcr) +{ + bool retval = false; + int JobStatus = jcr->JobStatus; + + if (!jcr->batch_started) { /* no files to backup ? */ + Dmsg0(50,"db_write_batch_file_records: no files\n"); + return true; + } + + if (job_canceled(jcr)) { + goto bail_out; + } + + jcr->JobStatus = JS_AttrInserting; + + /* Check if batch mode is on hold */ + while (!batch_mode_enabled) { + Dmsg0(50, "batch mode is on hold\n"); + bmicrosleep(10, 0); + + if (job_canceled(jcr)) { + goto bail_out; + } + } + + Dmsg1(50,"db_write_batch_file_records changes=%u\n",jcr->db_batch->changes); + + if (!jcr->db_batch->sql_batch_end(jcr, NULL)) { + Jmsg1(jcr, M_FATAL, 0, "Batch end %s\n", jcr->db_batch->errmsg); + goto bail_out; + } + if (job_canceled(jcr)) { + goto bail_out; + } + + /* We have to lock tables */ + if (!jcr->db_batch->bdb_sql_query(batch_lock_path_query[jcr->db_batch->bdb_get_type_index()], NULL, NULL)) { + Jmsg1(jcr, M_FATAL, 0, "Lock Path table %s\n", jcr->db_batch->errmsg); + goto bail_out; + } + + if (!jcr->db_batch->bdb_sql_query(batch_fill_path_query[jcr->db_batch->bdb_get_type_index()], NULL, NULL)) { + Jmsg1(jcr, M_FATAL, 0, "Fill Path table %s\n",jcr->db_batch->errmsg); + jcr->db_batch->bdb_sql_query(batch_unlock_tables_query[jcr->db_batch->bdb_get_type_index()], NULL, NULL); + goto bail_out; + } + + if (!jcr->db_batch->bdb_sql_query(batch_unlock_tables_query[jcr->db_batch->bdb_get_type_index()], NULL, NULL)) { + Jmsg1(jcr, M_FATAL, 0, "Unlock Path table %s\n", jcr->db_batch->errmsg); + goto bail_out; + } + + /* We have to lock tables */ + if (!db_sql_query(jcr->db_batch, batch_lock_filename_query[db_get_type_index(jcr->db_batch)], NULL, NULL)) { + Jmsg1(jcr, M_FATAL, 0, "Lock Filename table %s\n", jcr->db_batch->errmsg); + goto bail_out; + } + + if (!db_sql_query(jcr->db_batch, batch_fill_filename_query[db_get_type_index(jcr->db_batch)], NULL, NULL)) { + Jmsg1(jcr,M_FATAL,0,"Fill Filename table %s\n",jcr->db_batch->errmsg); + db_sql_query(jcr->db_batch, batch_unlock_tables_query[db_get_type_index(jcr->db_batch)], NULL, NULL); + goto bail_out; + } + + if (!db_sql_query(jcr->db_batch, batch_unlock_tables_query[db_get_type_index(jcr->db_batch)], NULL, NULL)) { + Jmsg1(jcr, M_FATAL, 0, "Unlock Filename table %s\n", jcr->db_batch->errmsg); + goto bail_out; + } + + if (!db_sql_query(jcr->db_batch, +"INSERT INTO File (FileIndex, JobId, PathId, FilenameId, LStat, MD5, DeltaSeq) " + "SELECT batch.FileIndex, batch.JobId, Path.PathId, " + "Filename.FilenameId,batch.LStat, batch.MD5, batch.DeltaSeq " + "FROM batch " + "JOIN Path ON (batch.Path = Path.Path) " + "JOIN Filename ON (batch.Name = Filename.Name)", + NULL, NULL)) + { + Jmsg1(jcr, M_FATAL, 0, "Fill File table %s\n", jcr->db_batch->errmsg); + goto bail_out; + } + + jcr->JobStatus = JobStatus; /* reset entry status */ + retval = true; + +bail_out: + jcr->db_batch->bdb_sql_query("DROP TABLE batch", NULL,NULL); + jcr->batch_started = false; + + return retval; +} + +/* + * Create File record in BDB + * + * In order to reduce database size, we store the File attributes, + * the FileName, and the Path separately. In principle, there + * is a single FileName record and a single Path record, no matter + * how many times it occurs. This is this subroutine, we separate + * the file and the path and fill temporary tables with this three records. + * + * Note: all routines that call this expect to be able to call + * db_strerror(mdb) to get the error message, so the error message + * MUST be edited into errmsg before returning an error status. + */ +bool BDB::bdb_create_batch_file_attributes_record(JCR *jcr, ATTR_DBR *ar) +{ + ASSERT(ar->FileType != FT_BASE); + Dmsg2(dbglevel, "FileIndex=%d Fname=%s\n", ar->FileIndex, ar->fname); + Dmsg0(dbglevel, "put_file_into_catalog\n"); + + if (jcr->batch_started && jcr->db_batch->changes > 500000) { + bdb_write_batch_file_records(jcr); + jcr->db_batch->changes = 0; + } + + /* Open the dedicated connexion */ + if (!jcr->batch_started) { + if (!bdb_open_batch_connexion(jcr)) { + return false; /* error already printed */ + } + if (!jcr->db_batch->sql_batch_start(jcr)) { + Mmsg1(&errmsg, + "Can't start batch mode: ERR=%s", jcr->db_batch->bdb_strerror()); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + return false; + } + jcr->batch_started = true; + } + + split_path_and_file(jcr, jcr->db_batch, ar->fname); + + return jcr->db_batch->sql_batch_insert(jcr, ar); +} + +/** + * Create File record in BDB + * + * In order to reduce database size, we store the File attributes, + * the FileName, and the Path separately. In principle, there + * is a single FileName record and a single Path record, no matter + * how many times it occurs. This is this subroutine, we separate + * the file and the path and create three database records. + */ +bool BDB::bdb_create_file_attributes_record(JCR *jcr, ATTR_DBR *ar) +{ + bdb_lock(); + Dmsg2(dbglevel, "FileIndex=%d Fname=%s\n", ar->FileIndex, ar->fname); + Dmsg0(dbglevel, "put_file_into_catalog\n"); + + split_path_and_file(jcr, this, ar->fname); + + if (!bdb_create_filename_record(jcr, ar)) { + goto bail_out; + } + Dmsg1(dbglevel, "bdb_create_filename_record: %s\n", esc_name); + + + if (!bdb_create_path_record(jcr, ar)) { + goto bail_out; + } + Dmsg1(dbglevel, "bdb_create_path_record: %s\n", esc_name); + + /* Now create master File record */ + if (!bdb_create_file_record(jcr, ar)) { + goto bail_out; + } + Dmsg0(dbglevel, "db_create_file_record OK\n"); + + Dmsg3(dbglevel, "CreateAttributes Path=%s File=%s FilenameId=%d\n", path, fname, ar->FilenameId); + bdb_unlock(); + return true; + +bail_out: + bdb_unlock(); + return false; +} +/** + * This is the master File entry containing the attributes. + * The filename and path records have already been created. + */ +int BDB::bdb_create_file_record(JCR *jcr, ATTR_DBR *ar) +{ + int stat; + static const char *no_digest = "0"; + const char *digest; + + ASSERT(ar->JobId); + ASSERT(ar->PathId); + ASSERT(ar->FilenameId); + + if (ar->Digest == NULL || ar->Digest[0] == 0) { + digest = no_digest; + } else { + digest = ar->Digest; + } + + /* Must create it */ + Mmsg(cmd, + "INSERT INTO File (FileIndex,JobId,PathId,FilenameId," + "LStat,MD5,DeltaSeq) VALUES (%d,%u,%u,%u,'%s','%s',%u)", + ar->FileIndex, ar->JobId, ar->PathId, ar->FilenameId, + ar->attr, digest, ar->DeltaSeq); + + if ((ar->FileId = sql_insert_autokey_record(cmd, NT_("File"))) == 0) { + Mmsg2(&errmsg, _("Create db File record %s failed. ERR=%s"), + cmd, sql_strerror()); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + stat = 0; + } else { + stat = 1; + } + return stat; +} + +/** Create a Unique record for the filename -- no duplicates */ +int BDB::bdb_create_filename_record(JCR *jcr, ATTR_DBR *ar) +{ + SQL_ROW row; + + errmsg[0] = 0; + esc_name = check_pool_memory_size(esc_name, 2*fnl+2); + bdb_escape_string(jcr, esc_name, fname, fnl); + + Mmsg(cmd, "SELECT FilenameId FROM Filename WHERE Name='%s'", esc_name); + + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 1) { + char ed1[30]; + Mmsg2(&errmsg, _("More than one Filename! %s for file: %s\n"), + edit_uint64(sql_num_rows(), ed1), fname); + Jmsg(jcr, M_WARNING, 0, "%s", errmsg); + } + if (sql_num_rows() >= 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg2(&errmsg, _("Error fetching row for file=%s: ERR=%s\n"), + fname, sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + ar->FilenameId = 0; + } else { + ar->FilenameId = str_to_int64(row[0]); + } + sql_free_result(); + return ar->FilenameId > 0; + } + sql_free_result(); + } + + Mmsg(cmd, "INSERT INTO Filename (Name) VALUES ('%s')", esc_name); + + ar->FilenameId = sql_insert_autokey_record(cmd, NT_("Filename")); + if (ar->FilenameId == 0) { + Mmsg2(&errmsg, _("Create db Filename record %s failed. ERR=%s\n"), + cmd, sql_strerror()); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + } + return ar->FilenameId > 0; +} + +/* + * Create file attributes record, or base file attributes record + */ +bool BDB::bdb_create_attributes_record(JCR *jcr, ATTR_DBR *ar) +{ + bool ret; + + Dmsg2(dbglevel, "FileIndex=%d Fname=%s\n", ar->FileIndex, ar->fname); + errmsg[0] = 0; + /* + * Make sure we have an acceptable attributes record. + */ + if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES || + ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) { + Mmsg1(&errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"), + ar->Stream); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + return false; + } + + if (ar->FileType != FT_BASE) { + if (batch_insert_available()) { + ret = bdb_create_batch_file_attributes_record(jcr, ar); + /* Error message already printed */ + } else { + ret = bdb_create_file_attributes_record(jcr, ar); + } + } else if (jcr->HasBase) { + ret = bdb_create_base_file_attributes_record(jcr, ar); + } else { + Mmsg0(&errmsg, _("Cannot Copy/Migrate job using BaseJob.\n")); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + ret = true; /* in copy/migration what do we do ? */ + } + + return ret; +} + +/** + * Create Base File record in BDB + * + */ +bool BDB::bdb_create_base_file_attributes_record(JCR *jcr, ATTR_DBR *ar) +{ + bool ret; + + Dmsg1(dbglevel, "create_base_file Fname=%s\n", ar->fname); + Dmsg0(dbglevel, "put_base_file_into_catalog\n"); + + bdb_lock(); + split_path_and_file(jcr, this, ar->fname); + + esc_name = check_pool_memory_size(esc_name, fnl*2+1); + bdb_escape_string(jcr, esc_name, fname, fnl); + + esc_path = check_pool_memory_size(esc_path, pnl*2+1); + bdb_escape_string(jcr, esc_path, path, pnl); + + Mmsg(cmd, "INSERT INTO basefile%lld (Path, Name) VALUES ('%s','%s')", + (uint64_t)jcr->JobId, esc_path, esc_name); + + ret = InsertDB(jcr, cmd); + bdb_unlock(); + + return ret; +} + +/** + * Cleanup the base file temporary tables + */ +static void db_cleanup_base_file(JCR *jcr, BDB *mdb) +{ + POOL_MEM buf(PM_MESSAGE); + Mmsg(buf, "DROP TABLE new_basefile%lld", (uint64_t) jcr->JobId); + mdb->bdb_sql_query(buf.c_str(), NULL, NULL); + + Mmsg(buf, "DROP TABLE basefile%lld", (uint64_t) jcr->JobId); + mdb->bdb_sql_query(buf.c_str(), NULL, NULL); +} + +/** + * Put all base file seen in the backup to the BaseFile table + * and cleanup temporary tables + */ +bool BDB::bdb_commit_base_file_attributes_record(JCR *jcr) +{ + bool ret; + char ed1[50]; + + bdb_lock(); + + Mmsg(cmd, + "INSERT INTO BaseFiles (BaseJobId, JobId, FileId, FileIndex) " + "SELECT B.JobId AS BaseJobId, %s AS JobId, " + "B.FileId, B.FileIndex " + "FROM basefile%s AS A, new_basefile%s AS B " + "WHERE A.Path = B.Path " + "AND A.Name = B.Name " + "ORDER BY B.FileId", + edit_uint64(jcr->JobId, ed1), ed1, ed1); + ret = bdb_sql_query(cmd, NULL, NULL); + /* + * Display error now, because the subsequent cleanup destroys the + * error message from the above query. + */ + if (!ret) { + Jmsg1(jcr, M_FATAL, 0, "%s", jcr->db->bdb_strerror()); + } + jcr->nb_base_files_used = sql_affected_rows(); + db_cleanup_base_file(jcr, this); + + bdb_unlock(); + return ret; +} + +/** + * Find the last "accurate" backup state with Base jobs + * 1) Get all files with jobid in list (F subquery) + * 2) Take only the last version of each file (Temp subquery) => accurate list is ok + * 3) Put the result in a temporary table for the end of job + * + */ +bool BDB::bdb_create_base_file_list(JCR *jcr, char *jobids) +{ + POOL_MEM buf; + bool ret = false; + + bdb_lock(); + + if (!*jobids) { + Mmsg(errmsg, _("ERR=JobIds are empty\n")); + goto bail_out; + } + + Mmsg(cmd, create_temp_basefile[bdb_get_type_index()], (uint64_t) jcr->JobId); + if (!bdb_sql_query(cmd, NULL, NULL)) { + goto bail_out; + } + Mmsg(buf, select_recent_version[bdb_get_type_index()], jobids, jobids); + Mmsg(cmd, create_temp_new_basefile[bdb_get_type_index()], (uint64_t)jcr->JobId, buf.c_str()); + + ret = bdb_sql_query(cmd, NULL, NULL); +bail_out: + bdb_unlock(); + return ret; +} + +/** + * Create Restore Object record in BDB + * + */ +bool BDB::bdb_create_restore_object_record(JCR *jcr, ROBJECT_DBR *ro) +{ + bool stat; + int plug_name_len; + POOLMEM *esc_plug_name = get_pool_memory(PM_MESSAGE); + + bdb_lock(); + + Dmsg1(dbglevel, "Oname=%s\n", ro->object_name); + Dmsg0(dbglevel, "put_object_into_catalog\n"); + + fnl = strlen(ro->object_name); + esc_name = check_pool_memory_size(esc_name, fnl*2+1); + bdb_escape_string(jcr, esc_name, ro->object_name, fnl); + + bdb_escape_object(jcr, ro->object, ro->object_len); + + plug_name_len = strlen(ro->plugin_name); + esc_plug_name = check_pool_memory_size(esc_plug_name, plug_name_len*2+1); + bdb_escape_string(jcr, esc_plug_name, ro->plugin_name, plug_name_len); + + Mmsg(cmd, + "INSERT INTO RestoreObject (ObjectName,PluginName,RestoreObject," + "ObjectLength,ObjectFullLength,ObjectIndex,ObjectType," + "ObjectCompression,FileIndex,JobId) " + "VALUES ('%s','%s','%s',%d,%d,%d,%d,%d,%d,%u)", + esc_name, esc_plug_name, esc_obj, + ro->object_len, ro->object_full_len, ro->object_index, + ro->FileType, ro->object_compression, ro->FileIndex, ro->JobId); + + ro->RestoreObjectId = sql_insert_autokey_record(cmd, NT_("RestoreObject")); + if (ro->RestoreObjectId == 0) { + Mmsg2(&errmsg, _("Create db Object record %s failed. ERR=%s"), + cmd, sql_strerror()); + Jmsg(jcr, M_FATAL, 0, "%s", errmsg); + stat = false; + } else { + stat = true; + } + bdb_unlock(); + free_pool_memory(esc_plug_name); + return stat; +} + +bool BDB::bdb_create_snapshot_record(JCR *jcr, SNAPSHOT_DBR *snap) +{ + bool status = false; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + POOLMEM *esc_vol = get_pool_memory(PM_MESSAGE); + POOLMEM *esc_dev = get_pool_memory(PM_MESSAGE); + POOLMEM *esc_type = get_pool_memory(PM_MESSAGE); + POOLMEM *esc_client = get_pool_memory(PM_MESSAGE); + POOLMEM *esc_fs = get_pool_memory(PM_MESSAGE); + char esc_comment[MAX_ESCAPE_NAME_LENGTH]; + char dt[MAX_TIME_LENGTH], ed1[50], ed2[50]; + time_t stime; + struct tm tm; + + bdb_lock(); + + esc_vol = check_pool_memory_size(esc_vol, strlen(snap->Volume) * 2 + 1); + bdb_escape_string(jcr, esc_vol, snap->Volume, strlen(snap->Volume)); + + esc_dev = check_pool_memory_size(esc_dev, strlen(snap->Device) * 2 + 1); + bdb_escape_string(jcr, esc_dev, snap->Device, strlen(snap->Device)); + + esc_type = check_pool_memory_size(esc_type, strlen(snap->Type) * 2 + 1); + bdb_escape_string(jcr, esc_type, snap->Type, strlen(snap->Type)); + + bdb_escape_string(jcr, esc_comment, snap->Comment, strlen(snap->Comment)); + + if (*snap->Client) { + bdb_escape_string(jcr, esc_name, snap->Client, strlen(snap->Client)); + Mmsg(esc_client, "(SELECT ClientId FROM Client WHERE Name='%s')", esc_name); + + } else { + Mmsg(esc_client, "%d", snap->ClientId); + } + + if (*snap->FileSet) { + bdb_escape_string(jcr, esc_name, snap->FileSet, strlen(snap->FileSet)); + Mmsg(esc_fs, "(SELECT FileSetId FROM FileSet WHERE FileSet='%s' ORDER BY CreateTime DESC LIMIT 1)", esc_name); + + } else { + Mmsg(esc_fs, "%d", snap->FileSetId); + } + + bdb_escape_string(jcr, esc_name, snap->Name, strlen(snap->Name)); + + stime = snap->CreateTDate; + (void)localtime_r(&stime, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + + Mmsg(cmd, "INSERT INTO Snapshot " + "(Name, JobId, CreateTDate, CreateDate, ClientId, FileSetId, Volume, Device, Type, Retention, Comment) " + "VALUES ('%s', %s, %d, '%s', %s, %s, '%s', '%s', '%s', %s, '%s')", + esc_name, edit_uint64(snap->JobId, ed2), stime, dt, esc_client, esc_fs, esc_vol, + esc_dev, esc_type, edit_int64(snap->Retention, ed1), esc_comment); + + if (bdb_sql_query(cmd, NULL, NULL)) { + snap->SnapshotId = sql_insert_autokey_record(cmd, NT_("Snapshot")); + status = true; + } + + bdb_unlock(); + + free_pool_memory(esc_vol); + free_pool_memory(esc_dev); + free_pool_memory(esc_type); + free_pool_memory(esc_client); + free_pool_memory(esc_fs); + + return status; +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ diff --git a/src/cats/sql_delete.c b/src/cats/sql_delete.c new file mode 100644 index 00000000..c93690fe --- /dev/null +++ b/src/cats/sql_delete.c @@ -0,0 +1,254 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Catalog Database Delete record interface routines + * + * Written by Kern Sibbald, December 2000-2014 + */ + +#include "bacula.h" + +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL +#include "cats.h" + +/* ----------------------------------------------------------------------- + * + * Generic Routines (or almost generic) + * + * ----------------------------------------------------------------------- + */ + +/* + * Delete Pool record, must also delete all associated + * Media records. + * + * Returns: 0 on error + * 1 on success + * PoolId = number of Pools deleted (should be 1) + * NumVols = number of Media records deleted + */ +int BDB::bdb_delete_pool_record(JCR *jcr, POOL_DBR *pr) +{ + SQL_ROW row; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc, pr->Name, strlen(pr->Name)); + Mmsg(cmd, "SELECT PoolId FROM Pool WHERE Name='%s'", esc); + Dmsg1(10, "selectpool: %s\n", cmd); + + pr->PoolId = pr->NumVols = 0; + + if (QueryDB(jcr, cmd)) { + int nrows = sql_num_rows(); + if (nrows == 0) { + Mmsg(errmsg, _("No pool record %s exists\n"), pr->Name); + sql_free_result(); + bdb_unlock(); + return 0; + } else if (nrows != 1) { + Mmsg(errmsg, _("Expecting one pool record, got %d\n"), nrows); + sql_free_result(); + bdb_unlock(); + return 0; + } + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(&errmsg, _("Error fetching row %s\n"), sql_strerror()); + bdb_unlock(); + return 0; + } + pr->PoolId = str_to_int64(row[0]); + sql_free_result(); + } + + /* Delete Media owned by this pool */ + Mmsg(cmd, +"DELETE FROM Media WHERE Media.PoolId = %d", pr->PoolId); + + pr->NumVols = DeleteDB(jcr, cmd); + Dmsg1(200, "Deleted %d Media records\n", pr->NumVols); + + /* Delete Pool */ + Mmsg(cmd, +"DELETE FROM Pool WHERE Pool.PoolId = %d", pr->PoolId); + pr->PoolId = DeleteDB(jcr, cmd); + Dmsg1(200, "Deleted %d Pool records\n", pr->PoolId); + + bdb_unlock(); + return 1; +} + +#define MAX_DEL_LIST_LEN 1000000 + +struct s_del_ctx { + JobId_t *JobId; + int num_ids; /* ids stored */ + int max_ids; /* size of array */ + int num_del; /* number deleted */ + int tot_ids; /* total to process */ +}; + +/* + * Called here to make in memory list of JobIds to be + * deleted. The in memory list will then be transversed + * to issue the SQL DELETE commands. Note, the list + * is allowed to get to MAX_DEL_LIST_LEN to limit the + * maximum malloc'ed memory. + */ +static int delete_handler(void *ctx, int num_fields, char **row) +{ + struct s_del_ctx *del = (struct s_del_ctx *)ctx; + + if (del->num_ids == MAX_DEL_LIST_LEN) { + return 1; + } + if (del->num_ids == del->max_ids) { + del->max_ids = (del->max_ids * 3) / 2; + del->JobId = (JobId_t *)brealloc(del->JobId, sizeof(JobId_t) * + del->max_ids); + } + del->JobId[del->num_ids++] = (JobId_t)str_to_int64(row[0]); + return 0; +} + + +/* + * This routine will purge (delete) all records + * associated with a particular Volume. It will + * not delete the media record itself. + * TODO: This function is broken and it doesn't purge + * File, BaseFiles, Log, ... + * We call it from relabel and delete volume=, both ensure + * that the volume is properly purged. + */ +static int do_media_purge(BDB *mdb, MEDIA_DBR *mr) +{ + POOLMEM *query = get_pool_memory(PM_MESSAGE); + struct s_del_ctx del; + char ed1[50]; + int i; + + del.num_ids = 0; + del.tot_ids = 0; + del.num_del = 0; + del.max_ids = 0; + Mmsg(mdb->cmd, "SELECT JobId from JobMedia WHERE MediaId=%lu", mr->MediaId); + del.max_ids = mr->VolJobs; + if (del.max_ids < 100) { + del.max_ids = 100; + } else if (del.max_ids > MAX_DEL_LIST_LEN) { + del.max_ids = MAX_DEL_LIST_LEN; + } + del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); + mdb->bdb_sql_query(mdb->cmd, delete_handler, (void *)&del); + + for (i=0; i < del.num_ids; i++) { + Dmsg1(400, "Delete JobId=%d\n", del.JobId[i]); + Mmsg(query, "DELETE FROM Job WHERE JobId=%s", edit_int64(del.JobId[i], ed1)); + mdb->bdb_sql_query(query, NULL, (void *)NULL); + Mmsg(query, "DELETE FROM File WHERE JobId=%s", edit_int64(del.JobId[i], ed1)); + mdb->bdb_sql_query(query, NULL, (void *)NULL); + Mmsg(query, "DELETE FROM JobMedia WHERE JobId=%s", edit_int64(del.JobId[i], ed1)); + mdb->bdb_sql_query(query, NULL, (void *)NULL); + } + free(del.JobId); + free_pool_memory(query); + return 1; +} + +/* Delete Media record and all records that + * are associated with it. + */ +int BDB::bdb_delete_media_record(JCR *jcr, MEDIA_DBR *mr) +{ + bdb_lock(); + if (mr->MediaId == 0 && !bdb_get_media_record(jcr, mr)) { + bdb_unlock(); + return 0; + } + /* Do purge if not already purged */ + if (strcmp(mr->VolStatus, "Purged") != 0) { + /* Delete associated records */ + do_media_purge(this, mr); + } + + Mmsg(cmd, "DELETE FROM Media WHERE MediaId=%lu", mr->MediaId); + bdb_sql_query(cmd, NULL, (void *)NULL); + bdb_unlock(); + return 1; +} + +/* + * Purge all records associated with a + * media record. This does not delete the + * media record itself. But the media status + * is changed to "Purged". + */ +int BDB::bdb_purge_media_record(JCR *jcr, MEDIA_DBR *mr) +{ + bdb_lock(); + if (mr->MediaId == 0 && !bdb_get_media_record(jcr, mr)) { + bdb_unlock(); + return 0; + } + /* Delete associated records */ + do_media_purge(this, mr); /* Note, always purge */ + + /* Mark Volume as purged */ + strcpy(mr->VolStatus, "Purged"); + if (!bdb_update_media_record(jcr, mr)) { + bdb_unlock(); + return 0; + } + + bdb_unlock(); + return 1; +} + +/* Delete Snapshot record */ +int BDB::bdb_delete_snapshot_record(JCR *jcr, SNAPSHOT_DBR *sr) +{ + bdb_lock(); + if (sr->SnapshotId == 0 && !bdb_get_snapshot_record(jcr, sr)) { + bdb_unlock(); + return 0; + } + + Mmsg(cmd, "DELETE FROM Snapshot WHERE SnapshotId=%d", sr->SnapshotId); + bdb_sql_query(cmd, NULL, (void *)NULL); + bdb_unlock(); + return 1; +} + +/* Delete Client record */ +int BDB::bdb_delete_client_record(JCR *jcr, CLIENT_DBR *cr) +{ + bdb_lock(); + if (cr->ClientId == 0 && !bdb_get_client_record(jcr, cr)) { + bdb_unlock(); + return 0; + } + + Mmsg(cmd, "DELETE FROM Client WHERE ClientId=%d", cr->ClientId); + bdb_sql_query(cmd, NULL, (void *)NULL); + bdb_unlock(); + return 1; +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ diff --git a/src/cats/sql_find.c b/src/cats/sql_find.c new file mode 100644 index 00000000..a94e2d22 --- /dev/null +++ b/src/cats/sql_find.c @@ -0,0 +1,540 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Catalog Database Find record interface routines + * + * Note, generally, these routines are more complicated + * that a simple search by name or id. Such simple + * request are in get.c + * + * Written by Kern Sibbald, December 2000 + */ + +#include "bacula.h" + +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL + +#include "cats.h" + +/* ----------------------------------------------------------------------- + * + * Generic Routines (or almost generic) + * + * ----------------------------------------------------------------------- + */ + +/* + * Find the most recent successful real end time for a job given. + * + * RealEndTime is returned in etime + * Job name is returned in job (MAX_NAME_LENGTH) + * + * Returns: false on failure + * true on success, jr is unchanged, but etime and job are set + */ +bool BDB::bdb_find_last_job_end_time(JCR *jcr, JOB_DBR *jr, POOLMEM **etime, + char *job) +{ + SQL_ROW row; + char ed1[50], ed2[50]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); + pm_strcpy(etime, "0000-00-00 00:00:00"); /* default */ + job[0] = 0; + + Mmsg(cmd, + "SELECT RealEndTime, Job FROM Job WHERE JobStatus IN ('T','W') AND Type='%c' AND " + "Level IN ('%c','%c','%c') AND Name='%s' AND ClientId=%s AND FileSetId=%s " + "ORDER BY RealEndTime DESC LIMIT 1", jr->JobType, + L_FULL, L_DIFFERENTIAL, L_INCREMENTAL, esc_name, + edit_int64(jr->ClientId, ed1), edit_int64(jr->FileSetId, ed2)); + + if (!QueryDB(jcr, cmd)) { + Mmsg2(&errmsg, _("Query error for end time request: ERR=%s\nCMD=%s\n"), + sql_strerror(), cmd); + goto bail_out; + } + if ((row = sql_fetch_row()) == NULL) { + sql_free_result(); + Mmsg(errmsg, _("No prior backup Job record found.\n")); + goto bail_out; + } + Dmsg1(100, "Got end time: %s\n", row[0]); + pm_strcpy(etime, row[0]); + bstrncpy(job, row[1], MAX_NAME_LENGTH); + + sql_free_result(); + bdb_unlock(); + return true; + +bail_out: + bdb_unlock(); + return false; +} + + +/* + * Find job start time if JobId specified, otherwise + * find last Job start time Incremental and Differential saves. + * + * StartTime is returned in stime + * Job name is returned in job (MAX_NAME_LENGTH) + * + * Returns: false on failure + * true on success, jr is unchanged, but stime and job are set + */ +bool BDB::bdb_find_job_start_time(JCR *jcr, JOB_DBR *jr, POOLMEM **stime, char *job) +{ + SQL_ROW row; + char ed1[50], ed2[50]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); + pm_strcpy(stime, "0000-00-00 00:00:00"); /* default */ + job[0] = 0; + + /* If no Id given, we must find corresponding job */ + if (jr->JobId == 0) { + /* Differential is since last Full backup */ + Mmsg(cmd, +"SELECT StartTime, Job FROM Job WHERE JobStatus IN ('T','W') AND Type='%c' AND " +"Level='%c' AND Name='%s' AND ClientId=%s AND FileSetId=%s " +"ORDER BY StartTime DESC LIMIT 1", + jr->JobType, L_FULL, esc_name, + edit_int64(jr->ClientId, ed1), edit_int64(jr->FileSetId, ed2)); + + if (jr->JobLevel == L_DIFFERENTIAL) { + /* SQL cmd for Differential backup already edited above */ + + /* Incremental is since last Full, Incremental, or Differential */ + } else if (jr->JobLevel == L_INCREMENTAL) { + /* + * For an Incremental job, we must first ensure + * that a Full backup was done (cmd edited above) + * then we do a second look to find the most recent + * backup + */ + if (!QueryDB(jcr, cmd)) { + Mmsg2(&errmsg, _("Query error for start time request: ERR=%s\nCMD=%s\n"), + sql_strerror(), cmd); + goto bail_out; + } + if ((row = sql_fetch_row()) == NULL) { + sql_free_result(); + Mmsg(errmsg, _("No prior Full backup Job record found.\n")); + goto bail_out; + } + sql_free_result(); + /* Now edit SQL command for Incremental Job */ + Mmsg(cmd, +"SELECT StartTime, Job FROM Job WHERE JobStatus IN ('T','W') AND Type='%c' AND " +"Level IN ('%c','%c','%c') AND Name='%s' AND ClientId=%s " +"AND FileSetId=%s ORDER BY StartTime DESC LIMIT 1", + jr->JobType, L_INCREMENTAL, L_DIFFERENTIAL, L_FULL, esc_name, + edit_int64(jr->ClientId, ed1), edit_int64(jr->FileSetId, ed2)); + } else { + Mmsg1(errmsg, _("Unknown level=%d\n"), jr->JobLevel); + goto bail_out; + } + } else { + Dmsg1(100, "Submitting: %s\n", cmd); + Mmsg(cmd, "SELECT StartTime, Job FROM Job WHERE Job.JobId=%s", + edit_int64(jr->JobId, ed1)); + } + + if (!QueryDB(jcr, cmd)) { + pm_strcpy(stime, ""); /* set EOS */ + Mmsg2(&errmsg, _("Query error for start time request: ERR=%s\nCMD=%s\n"), + sql_strerror(), cmd); + goto bail_out; + } + + if ((row = sql_fetch_row()) == NULL) { + Mmsg2(&errmsg, _("No Job record found: ERR=%s\nCMD=%s\n"), + sql_strerror(), cmd); + sql_free_result(); + goto bail_out; + } + Dmsg2(100, "Got start time: %s, job: %s\n", row[0], row[1]); + pm_strcpy(stime, row[0]); + bstrncpy(job, row[1], MAX_NAME_LENGTH); + + sql_free_result(); + + bdb_unlock(); + return true; + +bail_out: + bdb_unlock(); + return false; +} + + +/* + * Find the last job start time for the specified JobLevel + * + * StartTime is returned in stime + * Job name is returned in job (MAX_NAME_LENGTH) + * + * Returns: false on failure + * true on success, jr is unchanged, but stime and job are set + */ +bool BDB::bdb_find_last_job_start_time(JCR *jcr, JOB_DBR *jr, + POOLMEM **stime, char *job, int JobLevel) +{ + SQL_ROW row; + char ed1[50], ed2[50]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); + pm_strcpy(stime, "0000-00-00 00:00:00"); /* default */ + job[0] = 0; + + Mmsg(cmd, +"SELECT StartTime, Job FROM Job WHERE JobStatus IN ('T','W') AND Type='%c' AND " +"Level='%c' AND Name='%s' AND ClientId=%s AND FileSetId=%s " +"ORDER BY StartTime DESC LIMIT 1", + jr->JobType, JobLevel, esc_name, + edit_int64(jr->ClientId, ed1), edit_int64(jr->FileSetId, ed2)); + if (!QueryDB(jcr, cmd)) { + Mmsg2(&errmsg, _("Query error for start time request: ERR=%s\nCMD=%s\n"), + sql_strerror(), cmd); + goto bail_out; + } + if ((row = sql_fetch_row()) == NULL) { + sql_free_result(); + Mmsg(errmsg, _("No prior Full backup Job record found.\n")); + goto bail_out; + } + Dmsg1(100, "Got start time: %s\n", row[0]); + pm_strcpy(stime, row[0]); + bstrncpy(job, row[1], MAX_NAME_LENGTH); + + sql_free_result(); + bdb_unlock(); + return true; + +bail_out: + bdb_unlock(); + return false; +} + +/* + * Find last failed job since given start-time + * it must be either Full or Diff. + * + * Returns: false on failure + * true on success, jr is unchanged and stime unchanged + * level returned in JobLevel + */ +bool BDB::bdb_find_failed_job_since(JCR *jcr, JOB_DBR *jr, POOLMEM *stime, int &JobLevel) +{ + SQL_ROW row; + char ed1[50], ed2[50]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); + + /* Differential is since last Full backup */ + Mmsg(cmd, + "SELECT Level FROM Job WHERE JobStatus IN ('%c','%c', '%c', '%c') AND " + "Type='%c' AND Level IN ('%c','%c') AND Name='%s' AND ClientId=%s " + "AND FileSetId=%s AND StartTime>'%s' " + "ORDER BY StartTime DESC LIMIT 1", + JS_Canceled, JS_ErrorTerminated, JS_Error, JS_FatalError, + jr->JobType, L_FULL, L_DIFFERENTIAL, esc_name, + edit_int64(jr->ClientId, ed1), edit_int64(jr->FileSetId, ed2), + stime); + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return false; + } + + if ((row = sql_fetch_row()) == NULL) { + sql_free_result(); + bdb_unlock(); + return false; + } + JobLevel = (int)*row[0]; + sql_free_result(); + + bdb_unlock(); + return true; +} + + +/* + * Find JobId of last job that ran. E.g. for + * VERIFY_CATALOG we want the JobId of the last INIT. + * For VERIFY_VOLUME_TO_CATALOG, we want the JobId of the last Job. + * + * Returns: true on success + * false on failure + */ +bool BDB::bdb_find_last_jobid(JCR *jcr, const char *Name, JOB_DBR *jr) +{ + SQL_ROW row; + char ed1[50]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + /* Find last full */ + Dmsg2(100, "JobLevel=%d JobType=%d\n", jr->JobLevel, jr->JobType); + if (jr->JobLevel == L_VERIFY_CATALOG) { + bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); + Mmsg(cmd, +"SELECT JobId FROM Job WHERE Type='V' AND Level='%c' AND " +" JobStatus IN ('T','W') AND Name='%s' AND " +"ClientId=%s ORDER BY StartTime DESC LIMIT 1", + L_VERIFY_INIT, esc_name, + edit_int64(jr->ClientId, ed1)); + } else if (jr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG || + jr->JobLevel == L_VERIFY_DISK_TO_CATALOG || + jr->JobLevel == L_VERIFY_DATA || + jr->JobType == JT_BACKUP) { + if (Name) { + bdb_escape_string(jcr, esc_name, (char*)Name, + MIN(strlen(Name), sizeof(esc_name))); + Mmsg(cmd, +"SELECT JobId FROM Job WHERE Type='B' AND JobStatus IN ('T','W') AND " +"Name='%s' ORDER BY StartTime DESC LIMIT 1", esc_name); + } else { + Mmsg(cmd, +"SELECT JobId FROM Job WHERE Type='B' AND JobStatus IN ('T','W') AND " +"ClientId=%s ORDER BY StartTime DESC LIMIT 1", + edit_int64(jr->ClientId, ed1)); + } + } else { + Mmsg1(&errmsg, _("Unknown Job level=%d\n"), jr->JobLevel); + bdb_unlock(); + return false; + } + Dmsg1(100, "Query: %s\n", cmd); + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return false; + } + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(&errmsg, _("No Job found for: %s.\n"), cmd); + sql_free_result(); + bdb_unlock(); + return false; + } + + jr->JobId = str_to_int64(row[0]); + sql_free_result(); + + Dmsg1(100, "db_get_last_jobid: got JobId=%d\n", jr->JobId); + if (jr->JobId <= 0) { + Mmsg1(&errmsg, _("No Job found for: %s\n"), cmd); + bdb_unlock(); + return false; + } + + bdb_unlock(); + return true; +} + +/* + * Find Available Media (Volume) for Pool + * + * Find a Volume for a given PoolId, MediaType, and Status. + * + * Returns: 0 on failure + * numrows on success + */ +int BDB::bdb_find_next_volume(JCR *jcr, int item, bool InChanger, MEDIA_DBR *mr) +{ + SQL_ROW row = NULL; + int numrows; + const char *order; + char esc_type[MAX_ESCAPE_NAME_LENGTH]; + char esc_status[MAX_ESCAPE_NAME_LENGTH]; + char ed1[50]; + + bdb_lock(); + bdb_escape_string(jcr, esc_type, mr->MediaType, strlen(mr->MediaType)); + bdb_escape_string(jcr, esc_status, mr->VolStatus, strlen(mr->VolStatus)); + + if (item == -1) { /* find oldest volume */ + /* Find oldest volume */ + Mmsg(cmd, "SELECT MediaId,VolumeName,VolJobs,VolFiles,VolBlocks," + "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," + "MediaType,VolStatus,PoolId,VolRetention,VolUseDuration,MaxVolJobs," + "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," + "EndFile,EndBlock,VolType,VolParts,VolCloudParts,LastPartBytes," + "LabelType,LabelDate,StorageId," + "Enabled,LocationId,RecycleCount,InitialWrite," + "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime,ActionOnPurge,CacheRetention " + "FROM Media WHERE PoolId=%s AND MediaType='%s' " + " AND (VolStatus IN ('Full', 'Append', 'Used') OR (VolStatus IN ('Recycle', 'Purged', 'Used') AND Recycle=1)) " + " AND Enabled=1 " + "ORDER BY LastWritten LIMIT 1", + edit_int64(mr->PoolId, ed1), esc_type); + item = 1; + } else { + POOL_MEM changer(PM_FNAME); + POOL_MEM voltype(PM_FNAME); + POOL_MEM exclude(PM_FNAME); + /* Find next available volume */ + /* ***FIXME*** + * replace switch with + * if (StorageId == 0) + * break; + * else use mr->sid_group; but it must be set!!! + */ + if (InChanger) { + ASSERT(mr->sid_group); + if (mr->sid_group) { + Mmsg(changer, " AND InChanger=1 AND StorageId IN (%s) ", + mr->sid_group); + } else { + Mmsg(changer, " AND InChanger=1 AND StorageId=%s ", + edit_int64(mr->StorageId, ed1)); + } + } + /* Volumes will be automatically excluded from the query, we just take the + * first one of the list + */ + if (mr->exclude_list && *mr->exclude_list) { + item = 1; + Mmsg(exclude, " AND MediaId NOT IN (%s) ", mr->exclude_list); + } + if (strcmp(mr->VolStatus, "Recycle") == 0 || + strcmp(mr->VolStatus, "Purged") == 0) { + order = "AND Recycle=1 ORDER BY LastWritten ASC,MediaId"; /* take oldest that can be recycled */ + } else { + order = sql_media_order_most_recently_written[bdb_get_type_index()]; /* take most recently written */ + } + if (mr->VolType == 0) { + Mmsg(voltype, ""); + } else { + Mmsg(voltype, "AND VolType IN (0,%d)", mr->VolType); + } + Mmsg(cmd, "SELECT MediaId,VolumeName,VolJobs,VolFiles,VolBlocks," + "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," + "MediaType,VolStatus,PoolId,VolRetention,VolUseDuration,MaxVolJobs," + "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," + "EndFile,EndBlock,VolType,VolParts,VolCloudParts,LastPartBytes," + "LabelType,LabelDate,StorageId," + "Enabled,LocationId,RecycleCount,InitialWrite," + "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime,ActionOnPurge,CacheRetention " + "FROM Media WHERE PoolId=%s AND MediaType='%s' AND Enabled=1 " + "AND VolStatus='%s' " + "%s " + "%s " + "%s " + "%s LIMIT %d", + edit_int64(mr->PoolId, ed1), esc_type, + esc_status, + voltype.c_str(), + changer.c_str(), exclude.c_str(), order, item); + } + Dmsg1(100, "fnextvol=%s\n", cmd); + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return 0; + } + + numrows = sql_num_rows(); + if (item > numrows || item < 1) { + Dmsg2(050, "item=%d got=%d\n", item, numrows); + Mmsg2(&errmsg, _("Request for Volume item %d greater than max %d or less than 1\n"), + item, numrows); + bdb_unlock(); + return 0; + } + + /* Note, we previously seeked to the row using: + * sql_data_seek(item-1); + * but this failed on PostgreSQL, so now we loop + * over all the records. This should not be too horrible since + * the maximum Volumes we look at in any case is 20. + */ + while (item-- > 0) { + if ((row = sql_fetch_row()) == NULL) { + Dmsg1(050, "Fail fetch item=%d\n", item+1); + Mmsg1(&errmsg, _("No Volume record found for item %d.\n"), item); + sql_free_result(); + bdb_unlock(); + return 0; + } + } + + /* Return fields in Media Record */ + mr->MediaId = str_to_int64(row[0]); + bstrncpy(mr->VolumeName, row[1]!=NULL?row[1]:"", sizeof(mr->VolumeName)); + mr->VolJobs = str_to_int64(row[2]); + mr->VolFiles = str_to_int64(row[3]); + mr->VolBlocks = str_to_int64(row[4]); + mr->VolBytes = str_to_uint64(row[5]); + mr->VolMounts = str_to_int64(row[6]); + mr->VolErrors = str_to_int64(row[7]); + mr->VolWrites = str_to_int64(row[8]); + mr->MaxVolBytes = str_to_uint64(row[9]); + mr->VolCapacityBytes = str_to_uint64(row[10]); + bstrncpy(mr->MediaType, row[11]!=NULL?row[11]:"", sizeof(mr->MediaType)); + bstrncpy(mr->VolStatus, row[12]!=NULL?row[12]:"", sizeof(mr->VolStatus)); + mr->PoolId = str_to_int64(row[13]); + mr->VolRetention = str_to_uint64(row[14]); + mr->VolUseDuration = str_to_uint64(row[15]); + mr->MaxVolJobs = str_to_int64(row[16]); + mr->MaxVolFiles = str_to_int64(row[17]); + mr->Recycle = str_to_int64(row[18]); + mr->Slot = str_to_int64(row[19]); + bstrncpy(mr->cFirstWritten, row[20]!=NULL?row[20]:"", sizeof(mr->cFirstWritten)); + mr->FirstWritten = (time_t)str_to_utime(mr->cFirstWritten); + bstrncpy(mr->cLastWritten, row[21]!=NULL?row[21]:"", sizeof(mr->cLastWritten)); + mr->LastWritten = (time_t)str_to_utime(mr->cLastWritten); + mr->InChanger = str_to_uint64(row[22]); + mr->EndFile = str_to_uint64(row[23]); + mr->EndBlock = str_to_uint64(row[24]); + mr->VolType = str_to_int64(row[25]); + mr->VolParts = str_to_int64(row[26]); + mr->VolCloudParts = str_to_int64(row[27]); + mr->LastPartBytes = str_to_int64(row[28]); + mr->LabelType = str_to_int64(row[29]); + bstrncpy(mr->cLabelDate, row[30]!=NULL?row[30]:"", sizeof(mr->cLabelDate)); + mr->LabelDate = (time_t)str_to_utime(mr->cLabelDate); + mr->StorageId = str_to_int64(row[31]); + mr->Enabled = str_to_int64(row[32]); + mr->LocationId = str_to_int64(row[33]); + mr->RecycleCount = str_to_int64(row[34]); + bstrncpy(mr->cInitialWrite, row[35]!=NULL?row[35]:"", sizeof(mr->cInitialWrite)); + mr->InitialWrite = (time_t)str_to_utime(mr->cInitialWrite); + mr->ScratchPoolId = str_to_int64(row[36]); + mr->RecyclePoolId = str_to_int64(row[37]); + mr->VolReadTime = str_to_int64(row[38]); + mr->VolWriteTime = str_to_int64(row[39]); + mr->ActionOnPurge = str_to_int64(row[40]); + mr->CacheRetention = str_to_int64(row[41]); + + sql_free_result(); + + bdb_unlock(); + Dmsg1(050, "Rtn numrows=%d\n", numrows); + return numrows; +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ diff --git a/src/cats/sql_get.c b/src/cats/sql_get.c new file mode 100644 index 00000000..4ffe2ec3 --- /dev/null +++ b/src/cats/sql_get.c @@ -0,0 +1,1757 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/** + * Bacula Catalog Database Get record interface routines + * Note, these routines generally get a record by id or + * by name. If more logic is involved, the routine + * should be in find.c + * + * Written by Kern Sibbald, March 2000 + */ + +#include "bacula.h" + +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL + +#include "cats.h" + +/* ----------------------------------------------------------------------- + * + * Generic Routines (or almost generic) + * + * ----------------------------------------------------------------------- + */ + +/* + * Given a full filename (with path), look up the File record + * (with attributes) in the database. + * + * Returns: false on failure + * true on success with the File record in FILE_DBR + */ +bool BDB::bdb_get_file_attributes_record(JCR *jcr, char *afname, JOB_DBR *jr, FILE_DBR *fdbr) +{ + bool ok; + + Dmsg1(500, "db_get_file_att_record fname=%s \n", afname); + + bdb_lock(); + + split_path_and_file(jcr, this, afname); + + fdbr->FilenameId = bdb_get_filename_record(jcr); + + fdbr->PathId = bdb_get_path_record(jcr); + + ok = bdb_get_file_record(jcr, jr, fdbr); + + bdb_unlock(); + + return ok; +} + + +/* + * Get a File record + * + * DO NOT use Jmsg in this routine. + * + * Note in this routine, we do not use Jmsg because it may be + * called to get attributes of a non-existent file, which is + * "normal" if a new file is found during Verify. + * + * The following is a bit of a kludge: because we always backup a + * directory entry, we can end up with two copies of the directory + * in the backup. One is when we encounter the directory and find + * we cannot recurse into it, and the other is when we find an + * explicit mention of the directory. This can also happen if the + * use includes the directory twice. In this case, Verify + * VolumeToCatalog fails because we have two copies in the catalog, and + * only the first one is marked (twice). So, when calling from Verify, + * VolumeToCatalog jr is not NULL, and we know jr->FileIndex is the fileindex + * of the version of the directory/file we actually want and do + * a more explicit SQL search. + * + * Returns: false on failure + * true on success + * + */ +bool BDB::bdb_get_file_record(JCR *jcr, JOB_DBR *jr, FILE_DBR *fdbr) +{ + SQL_ROW row; + bool ok = false; + char ed1[50], ed2[50], ed3[50]; + + switch (jcr->getJobLevel()) { + case L_VERIFY_VOLUME_TO_CATALOG: + Mmsg(cmd, +"SELECT FileId, LStat, MD5 FROM File WHERE File.JobId=%s AND File.PathId=%s AND " +"File.FilenameId=%s AND File.FileIndex=%d", + edit_int64(fdbr->JobId, ed1), + edit_int64(fdbr->PathId, ed2), + edit_int64(fdbr->FilenameId,ed3), + jr->FileIndex); + break; + case L_VERIFY_DISK_TO_CATALOG: + Mmsg(cmd, +"SELECT FileId, LStat, MD5 FROM File,Job WHERE " +"File.JobId=Job.JobId AND File.PathId=%s AND " +"File.FilenameId=%s AND Job.Type='B' AND Job.JobStatus IN ('T','W') AND " +"ClientId=%s ORDER BY StartTime DESC LIMIT 1", + edit_int64(fdbr->PathId, ed1), + edit_int64(fdbr->FilenameId, ed2), + edit_int64(jr->ClientId,ed3)); + break; + default: + Mmsg(cmd, +"SELECT FileId, LStat, MD5 FROM File WHERE File.JobId=%s AND File.PathId=%s AND " +"File.FilenameId=%s", + edit_int64(fdbr->JobId, ed1), + edit_int64(fdbr->PathId, ed2), + edit_int64(fdbr->FilenameId,ed3)); + break; + } + + Dmsg3(450, "Get_file_record JobId=%u FilenameId=%u PathId=%u\n", + fdbr->JobId, fdbr->FilenameId, fdbr->PathId); + + Dmsg1(100, "Query=%s\n", cmd); + + if (QueryDB(jcr, cmd)) { + Dmsg1(100, "get_file_record sql_num_rows()=%d\n", sql_num_rows()); + if (sql_num_rows() >= 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("Error fetching row: %s\n"), sql_strerror()); + } else { + fdbr->FileId = (FileId_t)str_to_int64(row[0]); + bstrncpy(fdbr->LStat, row[1], sizeof(fdbr->LStat)); + bstrncpy(fdbr->Digest, row[2], sizeof(fdbr->Digest)); + ok = true; + if (sql_num_rows() > 1) { + Mmsg3(errmsg, _("get_file_record want 1 got rows=%d PathId=%s FilenameId=%s\n"), + sql_num_rows(), + edit_int64(fdbr->PathId, ed1), + edit_int64(fdbr->FilenameId, ed2)); + Dmsg1(000, "=== Problem! %s", errmsg); + } + } + } else { + Mmsg2(errmsg, _("File record for PathId=%s FilenameId=%s not found.\n"), + edit_int64(fdbr->PathId, ed1), + edit_int64(fdbr->FilenameId, ed2)); + } + sql_free_result(); + } else { + Mmsg(errmsg, _("File record not found in Catalog.\n")); + } + return ok; +} + +/* + * Get Filename record + * Returns: 0 on failure + * FilenameId on success + * + * DO NOT use Jmsg in this routine (see notes for get_file_record) + */ +int BDB::bdb_get_filename_record(JCR *jcr) +{ + SQL_ROW row; + int FilenameId = 0; + + esc_name = check_pool_memory_size(esc_name, 2*fnl+2); + bdb_escape_string(jcr, esc_name, fname, fnl); + + Mmsg(cmd, "SELECT FilenameId FROM Filename WHERE Name='%s'", esc_name); + if (QueryDB(jcr, cmd)) { + char ed1[30]; + if (sql_num_rows() > 1) { + Mmsg2(errmsg, _("More than one Filename!: %s for file: %s\n"), + edit_uint64(sql_num_rows(), ed1), fname); + Jmsg(jcr, M_WARNING, 0, "%s", errmsg); + } + if (sql_num_rows() >= 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); + } else { + FilenameId = str_to_int64(row[0]); + if (FilenameId <= 0) { + Mmsg2(errmsg, _("Get DB Filename record %s found bad record: %d\n"), + cmd, FilenameId); + FilenameId = 0; + } + } + } else { + Mmsg1(errmsg, _("Filename record: %s not found.\n"), fname); + } + sql_free_result(); + } else { + Mmsg(errmsg, _("Filename record: %s not found in Catalog.\n"), fname); + } + return FilenameId; +} + +/** + * Get path record + * Returns: 0 on failure + * PathId on success + * + * DO NOT use Jmsg in this routine (see notes for get_file_record) + */ +int BDB::bdb_get_path_record(JCR *jcr) +{ + SQL_ROW row; + uint32_t PathId = 0; + + esc_name = check_pool_memory_size(esc_name, 2*pnl+2); + bdb_escape_string(jcr, esc_name, path, pnl); + + if (cached_path_id != 0 && cached_path_len == pnl && + strcmp(cached_path, path) == 0) { + return cached_path_id; + } + + Mmsg(cmd, "SELECT PathId FROM Path WHERE Path='%s'", esc_name); + + if (QueryDB(jcr, cmd)) { + char ed1[30]; + if (sql_num_rows() > 1) { + Mmsg2(errmsg, _("More than one Path!: %s for path: %s\n"), + edit_uint64(sql_num_rows(), ed1), path); + Jmsg(jcr, M_WARNING, 0, "%s", errmsg); + } + /* Even if there are multiple paths, take the first one */ + if (sql_num_rows() >= 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); + } else { + PathId = str_to_int64(row[0]); + if (PathId <= 0) { + Mmsg2(errmsg, _("Get DB path record %s found bad record: %s\n"), + cmd, edit_int64(PathId, ed1)); + PathId = 0; + } else { + /* Cache path */ + if (PathId != cached_path_id) { + cached_path_id = PathId; + cached_path_len = pnl; + pm_strcpy(cached_path, path); + } + } + } + } else { + Mmsg1(errmsg, _("Path record: %s not found.\n"), path); + } + sql_free_result(); + } else { + Mmsg(errmsg, _("Path record: %s not found in Catalog.\n"), path); + } + return PathId; +} + + +/** + * Get Job record for given JobId or Job name + * Returns: false on failure + * true on success + */ +bool BDB::bdb_get_job_record(JCR *jcr, JOB_DBR *jr) +{ + SQL_ROW row; + char ed1[50]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + if (jr->JobId == 0) { + bdb_escape_string(jcr, esc, jr->Job, strlen(jr->Job)); + Mmsg(cmd, "SELECT VolSessionId,VolSessionTime," +"PoolId,StartTime,EndTime,JobFiles,JobBytes,JobTDate,Job,JobStatus," +"Type,Level,ClientId,Name,PriorJobId,RealEndTime,JobId,FileSetId," +"SchedTime,RealEndTime,ReadBytes,HasBase,PurgedFiles " +"FROM Job WHERE Job='%s'", esc); + } else { + Mmsg(cmd, "SELECT VolSessionId,VolSessionTime," +"PoolId,StartTime,EndTime,JobFiles,JobBytes,JobTDate,Job,JobStatus," +"Type,Level,ClientId,Name,PriorJobId,RealEndTime,JobId,FileSetId," +"SchedTime,RealEndTime,ReadBytes,HasBase,PurgedFiles " +"FROM Job WHERE JobId=%s", + edit_int64(jr->JobId, ed1)); + } + + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return false; /* failed */ + } + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("No Job found for JobId %s\n"), edit_int64(jr->JobId, ed1)); + sql_free_result(); + bdb_unlock(); + return false; /* failed */ + } + + jr->VolSessionId = str_to_uint64(row[0]); + jr->VolSessionTime = str_to_uint64(row[1]); + jr->PoolId = str_to_int64(row[2]); + bstrncpy(jr->cStartTime, row[3]!=NULL?row[3]:"", sizeof(jr->cStartTime)); + bstrncpy(jr->cEndTime, row[4]!=NULL?row[4]:"", sizeof(jr->cEndTime)); + jr->JobFiles = str_to_int64(row[5]); + jr->JobBytes = str_to_int64(row[6]); + jr->JobTDate = str_to_int64(row[7]); + bstrncpy(jr->Job, row[8]!=NULL?row[8]:"", sizeof(jr->Job)); + jr->JobStatus = row[9]!=NULL?(int)*row[9]:JS_FatalError; + jr->JobType = row[10]!=NULL?(int)*row[10]:JT_BACKUP; + jr->JobLevel = row[11]!=NULL?(int)*row[11]:L_NONE; + jr->ClientId = str_to_uint64(row[12]!=NULL?row[12]:(char *)""); + bstrncpy(jr->Name, row[13]!=NULL?row[13]:"", sizeof(jr->Name)); + jr->PriorJobId = str_to_uint64(row[14]!=NULL?row[14]:(char *)""); + bstrncpy(jr->cRealEndTime, row[15]!=NULL?row[15]:"", sizeof(jr->cRealEndTime)); + if (jr->JobId == 0) { + jr->JobId = str_to_int64(row[16]); + } + jr->FileSetId = str_to_int64(row[17]); + bstrncpy(jr->cSchedTime, row[18]!=NULL?row[18]:"", sizeof(jr->cSchedTime)); + bstrncpy(jr->cRealEndTime, row[19]!=NULL?row[19]:"", sizeof(jr->cRealEndTime)); + jr->ReadBytes = str_to_int64(row[20]); + jr->StartTime = str_to_utime(jr->cStartTime); + jr->SchedTime = str_to_utime(jr->cSchedTime); + jr->EndTime = str_to_utime(jr->cEndTime); + jr->RealEndTime = str_to_utime(jr->cRealEndTime); + jr->HasBase = str_to_int64(row[21]); + jr->PurgedFiles = str_to_int64(row[22]); + sql_free_result(); + + bdb_unlock(); + return true; +} + +/** + * Find VolumeNames for a given JobId + * Returns: 0 on error or no Volumes found + * number of volumes on success + * Volumes are concatenated in VolumeNames + * separated by a vertical bar (|) in the order + * that they were written. + * + * Returns: number of volumes on success + */ +int BDB::bdb_get_job_volume_names(JCR *jcr, JobId_t JobId, POOLMEM **VolumeNames) +{ + SQL_ROW row; + char ed1[50]; + int stat = 0; + int i; + + bdb_lock(); + /* Get one entry per VolumeName, but "sort" by VolIndex */ + Mmsg(cmd, + "SELECT VolumeName,MAX(VolIndex) FROM JobMedia,Media WHERE " + "JobMedia.JobId=%s AND JobMedia.MediaId=Media.MediaId " + "GROUP BY VolumeName " + "ORDER BY 2 ASC", edit_int64(JobId,ed1)); + + Dmsg1(130, "VolNam=%s\n", cmd); + *VolumeNames[0] = 0; + if (QueryDB(jcr, cmd)) { + Dmsg1(130, "Num rows=%d\n", sql_num_rows()); + if (sql_num_rows() <= 0) { + Mmsg1(errmsg, _("No volumes found for JobId=%d\n"), JobId); + stat = 0; + } else { + stat = sql_num_rows(); + for (i=0; i < stat; i++) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg2(errmsg, _("Error fetching row %d: ERR=%s\n"), i, sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + stat = 0; + break; + } else { + if (*VolumeNames[0] != 0) { + pm_strcat(VolumeNames, "|"); + } + pm_strcat(VolumeNames, row[0]); + } + } + } + sql_free_result(); + } else { + Mmsg(errmsg, _("No Volume for JobId %d found in Catalog.\n"), JobId); + } + bdb_unlock(); + return stat; +} + +/** + * Find Volume parameters for a give JobId + * Returns: 0 on error or no Volumes found + * number of volumes on success + * List of Volumes and start/end file/blocks (malloced structure!) + * + * Returns: number of volumes on success + */ +int BDB::bdb_get_job_volume_parameters(JCR *jcr, JobId_t JobId, VOL_PARAMS **VolParams) +{ + SQL_ROW row; + char ed1[50]; + int stat = 0; + int i; + VOL_PARAMS *Vols = NULL; + + bdb_lock(); + Mmsg(cmd, +"SELECT VolumeName,MediaType,FirstIndex,LastIndex,StartFile," +"JobMedia.EndFile,StartBlock,JobMedia.EndBlock," +"Slot,StorageId,InChanger" +" FROM JobMedia,Media WHERE JobMedia.JobId=%s" +" AND JobMedia.MediaId=Media.MediaId ORDER BY VolIndex,JobMediaId", + edit_int64(JobId, ed1)); + + Dmsg1(130, "VolNam=%s\n", cmd); + if (QueryDB(jcr, cmd)) { + Dmsg1(200, "Num rows=%d\n", sql_num_rows()); + if (sql_num_rows() <= 0) { + Mmsg1(errmsg, _("No volumes found for JobId=%d\n"), JobId); + stat = 0; + } else { + stat = sql_num_rows(); + DBId_t *SId = NULL; + if (stat > 0) { + *VolParams = Vols = (VOL_PARAMS *)malloc(stat * sizeof(VOL_PARAMS)); + SId = (DBId_t *)malloc(stat * sizeof(DBId_t)); + } + for (i=0; i < stat; i++) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg2(errmsg, _("Error fetching row %d: ERR=%s\n"), i, sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + stat = 0; + break; + } else { + DBId_t StorageId; + uint32_t StartBlock, EndBlock, StartFile, EndFile; + bstrncpy(Vols[i].VolumeName, row[0], MAX_NAME_LENGTH); + bstrncpy(Vols[i].MediaType, row[1], MAX_NAME_LENGTH); + Vols[i].FirstIndex = str_to_uint64(row[2]); + Vols[i].LastIndex = str_to_uint64(row[3]); + StartFile = str_to_uint64(row[4]); + EndFile = str_to_uint64(row[5]); + StartBlock = str_to_uint64(row[6]); + EndBlock = str_to_uint64(row[7]); + Vols[i].StartAddr = (((uint64_t)StartFile)<<32) | StartBlock; + Vols[i].EndAddr = (((uint64_t)EndFile)<<32) | EndBlock; + Vols[i].Slot = str_to_uint64(row[8]); + StorageId = str_to_uint64(row[9]); + Vols[i].InChanger = str_to_uint64(row[10]); + Vols[i].Storage[0] = 0; + SId[i] = StorageId; + } + } + for (i=0; i < stat; i++) { + if (SId[i] != 0) { + Mmsg(cmd, "SELECT Name from Storage WHERE StorageId=%s", + edit_int64(SId[i], ed1)); + if (QueryDB(jcr, cmd)) { + if ((row = sql_fetch_row()) && row[0]) { + bstrncpy(Vols[i].Storage, row[0], MAX_NAME_LENGTH); + } + } + } + } + if (SId) { + free(SId); + } + } + sql_free_result(); + } + bdb_unlock(); + return stat; +} + + + +/** + * Get the number of pool records + * + * Returns: -1 on failure + * number on success + */ +int BDB::bdb_get_num_pool_records(JCR *jcr) +{ + int stat = 0; + + bdb_lock(); + Mmsg(cmd, "SELECT count(*) from Pool"); + stat = get_sql_record_max(jcr, this); + bdb_unlock(); + return stat; +} + +/** + * This function returns a list of all the Pool record ids. + * The caller must free ids if non-NULL. + * + * Returns 0: on failure + * 1: on success + */ +int BDB::bdb_get_pool_ids(JCR *jcr, int *num_ids, uint32_t *ids[]) +{ + SQL_ROW row; + int stat = 0; + int i = 0; + uint32_t *id; + + bdb_lock(); + *ids = NULL; + Mmsg(cmd, "SELECT PoolId FROM Pool ORDER By Name"); + if (QueryDB(jcr, cmd)) { + *num_ids = sql_num_rows(); + if (*num_ids > 0) { + id = (uint32_t *)malloc(*num_ids * sizeof(uint32_t)); + while ((row = sql_fetch_row()) != NULL) { + id[i++] = str_to_uint64(row[0]); + } + *ids = id; + } + sql_free_result(); + stat = 1; + } else { + Mmsg(errmsg, _("Pool id select failed: ERR=%s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + stat = 0; + } + bdb_unlock(); + return stat; +} + +/** + * This function returns a list of all the Client record ids. + * The caller must free ids if non-NULL. + * + * Returns 0: on failure + * 1: on success + */ +int BDB::bdb_get_client_ids(JCR *jcr, int *num_ids, uint32_t *ids[]) +{ + SQL_ROW row; + int stat = 0; + int i = 0; + uint32_t *id; + + bdb_lock(); + *ids = NULL; + Mmsg(cmd, "SELECT ClientId FROM Client ORDER BY Name ASC"); + if (QueryDB(jcr, cmd)) { + *num_ids = sql_num_rows(); + if (*num_ids > 0) { + id = (uint32_t *)malloc(*num_ids * sizeof(uint32_t)); + while ((row = sql_fetch_row()) != NULL) { + id[i++] = str_to_uint64(row[0]); + } + *ids = id; + } + sql_free_result(); + stat = 1; + } else { + Mmsg(errmsg, _("Client id select failed: ERR=%s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + stat = 0; + } + bdb_unlock(); + return stat; +} + +/** + * Get Pool Id, Scratch Pool Id, Recycle Pool Id + * Returns: false on failure + * true on success + */ +bool BDB::bdb_get_pool_record(JCR *jcr, POOL_DBR *pdbr) +{ + SQL_ROW row; + bool ok = false; + char ed1[50]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + if (pdbr->PoolId != 0) { /* find by id */ + Mmsg(cmd, +"SELECT PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog,AcceptAnyVolume," +"AutoPrune,Recycle,VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles," +"MaxVolBytes,PoolType,LabelType,LabelFormat,RecyclePoolId,ScratchPoolId," +"ActionOnPurge,CacheRetention FROM Pool WHERE Pool.PoolId=%s", + edit_int64(pdbr->PoolId, ed1)); + } else { /* find by name */ + bdb_escape_string(jcr, esc, pdbr->Name, strlen(pdbr->Name)); + Mmsg(cmd, +"SELECT PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog,AcceptAnyVolume," +"AutoPrune,Recycle,VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles," +"MaxVolBytes,PoolType,LabelType,LabelFormat,RecyclePoolId,ScratchPoolId," +"ActionOnPurge,CacheRetention FROM Pool WHERE Pool.Name='%s'", esc); + } + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 1) { + char ed1[30]; + Mmsg1(errmsg, _("More than one Pool! Num=%s\n"), + edit_uint64(sql_num_rows(), ed1)); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } else if (sql_num_rows() == 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } else { + pdbr->PoolId = str_to_int64(row[0]); + bstrncpy(pdbr->Name, row[1]!=NULL?row[1]:"", sizeof(pdbr->Name)); + pdbr->NumVols = str_to_int64(row[2]); + pdbr->MaxVols = str_to_int64(row[3]); + pdbr->UseOnce = str_to_int64(row[4]); + pdbr->UseCatalog = str_to_int64(row[5]); + pdbr->AcceptAnyVolume = str_to_int64(row[6]); + pdbr->AutoPrune = str_to_int64(row[7]); + pdbr->Recycle = str_to_int64(row[8]); + pdbr->VolRetention = str_to_int64(row[9]); + pdbr->VolUseDuration = str_to_int64(row[10]); + pdbr->MaxVolJobs = str_to_int64(row[11]); + pdbr->MaxVolFiles = str_to_int64(row[12]); + pdbr->MaxVolBytes = str_to_uint64(row[13]); + bstrncpy(pdbr->PoolType, row[14]!=NULL?row[14]:"", sizeof(pdbr->PoolType)); + pdbr->LabelType = str_to_int64(row[15]); + bstrncpy(pdbr->LabelFormat, row[16]!=NULL?row[16]:"", sizeof(pdbr->LabelFormat)); + pdbr->RecyclePoolId = str_to_int64(row[17]); + pdbr->ScratchPoolId = str_to_int64(row[18]); + pdbr->ActionOnPurge = str_to_int32(row[19]); + pdbr->CacheRetention = str_to_int64(row[20]); + ok = true; + } + } + sql_free_result(); + } + bdb_unlock(); + return ok; +} +/** + * Get Pool numvols + * If the PoolId is non-zero, we get its record, + * otherwise, we search on the PoolName and we compute the number of volumes + * + * Returns: false on failure + * true on success + */ +bool BDB::bdb_get_pool_numvols(JCR *jcr, POOL_DBR *pdbr) +{ + bool ok; + char ed1[50]; + + ok = db_get_pool_record(jcr, this, pdbr); + + bdb_lock(); + if (ok) { + uint32_t NumVols; + Mmsg(cmd, "SELECT count(*) from Media WHERE PoolId=%s", + edit_int64(pdbr->PoolId, ed1)); + NumVols = get_sql_record_max(jcr, this); + Dmsg2(400, "Actual NumVols=%d Pool NumVols=%d\n", NumVols, pdbr->NumVols); + if (NumVols != pdbr->NumVols) { + pdbr->NumVols = NumVols; + db_update_pool_record(jcr, this, pdbr); + } + } else { + Mmsg(errmsg, _("Pool record not found in Catalog.\n")); + } + bdb_unlock(); + return ok; +} + +/* + * Free restoreobject record (some fields are allocated through malloc) + */ +void db_free_restoreobject_record(JCR *jcr, ROBJECT_DBR *rr) +{ + if (rr->object) { + free(rr->object); + } + if (rr->object_name) { + free(rr->object_name); + } + if (rr->plugin_name) { + free(rr->plugin_name); + } + rr->object = rr->plugin_name = rr->object_name = NULL; +} + +/* + * Get RestoreObject Record + * If the RestoreObjectId is non-zero, we get its record + * + * You must call db_free_restoreobject_record() after db_get_restoreobject_record() + * + * Returns: false on failure + * true on success + */ +bool BDB::bdb_get_restoreobject_record(JCR *jcr, ROBJECT_DBR *rr) +{ + SQL_ROW row; + int stat = false; + char ed1[50]; + int32_t len; + + bdb_lock(); + Mmsg(cmd, + "SELECT ObjectName, PluginName, ObjectType, JobId, ObjectCompression, " + "RestoreObject, ObjectLength, ObjectFullLength, FileIndex " + "FROM RestoreObject " + "WHERE RestoreObjectId=%s", + edit_int64(rr->RestoreObjectId, ed1)); + + /* Using the JobId permits to check the Job name against ACLs and + * make sure that the current user is authorized to see the Restore object + */ + if (rr->JobId) { + pm_strcat(cmd, " AND JobId="); + pm_strcat(cmd, edit_int64(rr->JobId, ed1)); + + } else if (rr->JobIds && is_a_number_list(rr->JobIds)) { + pm_strcat(cmd, " AND JobId IN ("); + pm_strcat(cmd, rr->JobIds); + pm_strcat(cmd, ")"); + } + + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 1) { + char ed1[30]; + Mmsg1(errmsg, _("Error got %s RestoreObjects but expected only one!\n"), + edit_uint64(sql_num_rows(), ed1)); + sql_data_seek(sql_num_rows()-1); + } + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("RestoreObject record \"%d\" not found.\n"), rr->RestoreObjectId); + } else { + db_free_restoreobject_record(jcr, rr); + rr->object_name = bstrdup(row[0]); + rr->plugin_name = bstrdup(row[1]); + rr->FileType = str_to_uint64(row[2]); + rr->JobId = str_to_uint64(row[3]); + rr->object_compression = str_to_int64(row[4]); + rr->object_len = str_to_uint64(row[6]); + rr->object_full_len = str_to_uint64(row[7]); + rr->object_index = str_to_uint64(row[8]); + + bdb_unescape_object(jcr, + row[5], /* Object */ + rr->object_len, /* Object length */ + &cmd, &len); + + if (rr->object_compression > 0) { + int out_len = rr->object_full_len + 100; /* full length */ + char *obj = (char *)malloc(out_len); + Zinflate(cmd, rr->object_len, obj, out_len); /* out_len is updated */ + if (out_len != (int)rr->object_full_len) { + Dmsg3(10, "Decompression failed. Len wanted=%d got=%d. Object=%s\n", + rr->object_full_len, out_len, rr->plugin_name); + + Mmsg(errmsg, _("Decompression failed. Len wanted=%d got=%d. Object=%s\n"), + rr->object_full_len, out_len, rr->plugin_name); + } + obj[out_len] = 0; + rr->object = obj; + rr->object_len = out_len; + + } else { + rr->object = (char *)malloc(sizeof(char)*(len+1)); + memcpy(rr->object, cmd, len); + rr->object[len] = 0; + rr->object_len = len; + } + + stat = true; + } + sql_free_result(); + } else { + Mmsg(errmsg, _("RestoreObject record not found in Catalog.\n")); + } + bdb_unlock(); + return stat; +} + +/** + * Get Client Record + * If the ClientId is non-zero, we get its record, + * otherwise, we search on the Client Name + * + * Returns: 0 on failure + * 1 on success + */ +int BDB::bdb_get_client_record(JCR *jcr, CLIENT_DBR *cdbr) +{ + SQL_ROW row; + int stat = 0; + char ed1[50]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + if (cdbr->ClientId != 0) { /* find by id */ + Mmsg(cmd, +"SELECT ClientId,Name,Uname,AutoPrune,FileRetention,JobRetention " +"FROM Client WHERE Client.ClientId=%s", + edit_int64(cdbr->ClientId, ed1)); + } else { /* find by name */ + bdb_escape_string(jcr, esc, cdbr->Name, strlen(cdbr->Name)); + Mmsg(cmd, +"SELECT ClientId,Name,Uname,AutoPrune,FileRetention,JobRetention " +"FROM Client WHERE Client.Name='%s'", esc); + } + + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 1) { + Mmsg1(errmsg, _("More than one Client!: %s\n"), + edit_uint64(sql_num_rows(), ed1)); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } else if (sql_num_rows() == 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } else { + cdbr->ClientId = str_to_int64(row[0]); + bstrncpy(cdbr->Name, row[1]!=NULL?row[1]:"", sizeof(cdbr->Name)); + bstrncpy(cdbr->Uname, row[2]!=NULL?row[2]:"", sizeof(cdbr->Uname)); + cdbr->AutoPrune = str_to_int64(row[3]); + cdbr->FileRetention = str_to_int64(row[4]); + cdbr->JobRetention = str_to_int64(row[5]); + stat = 1; + } + } else { + Mmsg(errmsg, _("Client record not found in Catalog.\n")); + } + sql_free_result(); + } else { + Mmsg(errmsg, _("Client record not found in Catalog.\n")); + } + bdb_unlock(); + return stat; +} + +/** + * Get Counter Record + * + * Returns: 0 on failure + * 1 on success + */ +bool BDB::bdb_get_counter_record(JCR *jcr, COUNTER_DBR *cr) +{ + SQL_ROW row; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc, cr->Counter, strlen(cr->Counter)); + + Mmsg(cmd, select_counter_values[bdb_get_type_index()], esc); + if (QueryDB(jcr, cmd)) { + + /* If more than one, report error, but return first row */ + if (sql_num_rows() > 1) { + Mmsg1(errmsg, _("More than one Counter!: %d\n"), sql_num_rows()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } + if (sql_num_rows() >= 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("error fetching Counter row: %s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + sql_free_result(); + bdb_unlock(); + return false; + } + cr->MinValue = str_to_int64(row[0]); + cr->MaxValue = str_to_int64(row[1]); + cr->CurrentValue = str_to_int64(row[2]); + if (row[3]) { + bstrncpy(cr->WrapCounter, row[3], sizeof(cr->WrapCounter)); + } else { + cr->WrapCounter[0] = 0; + } + sql_free_result(); + bdb_unlock(); + return true; + } + sql_free_result(); + } else { + Mmsg(errmsg, _("Counter record: %s not found in Catalog.\n"), cr->Counter); + } + bdb_unlock(); + return false; +} + + +/** + * Get FileSet Record + * If the FileSetId is non-zero, we get its record, + * otherwise, we search on the name + * + * Returns: 0 on failure + * id on success + */ +int BDB::bdb_get_fileset_record(JCR *jcr, FILESET_DBR *fsr) +{ + SQL_ROW row; + int stat = 0; + char ed1[50]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + if (fsr->FileSetId != 0) { /* find by id */ + Mmsg(cmd, + "SELECT FileSetId,FileSet,MD5,CreateTime FROM FileSet " + "WHERE FileSetId=%s", + edit_int64(fsr->FileSetId, ed1)); + } else { /* find by name */ + bdb_escape_string(jcr, esc, fsr->FileSet, strlen(fsr->FileSet)); + Mmsg(cmd, + "SELECT FileSetId,FileSet,MD5,CreateTime FROM FileSet " + "WHERE FileSet='%s' ORDER BY CreateTime DESC LIMIT 1", esc); + } + + if (QueryDB(jcr, cmd)) { + if (sql_num_rows() > 1) { + char ed1[30]; + Mmsg1(errmsg, _("Error got %s FileSets but expected only one!\n"), + edit_uint64(sql_num_rows(), ed1)); + sql_data_seek(sql_num_rows()-1); + } + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("FileSet record \"%s\" not found.\n"), fsr->FileSet); + } else { + fsr->FileSetId = str_to_int64(row[0]); + bstrncpy(fsr->FileSet, row[1]!=NULL?row[1]:"", sizeof(fsr->FileSet)); + bstrncpy(fsr->MD5, row[2]!=NULL?row[2]:"", sizeof(fsr->MD5)); + bstrncpy(fsr->cCreateTime, row[3]!=NULL?row[3]:"", sizeof(fsr->cCreateTime)); + stat = fsr->FileSetId; + } + sql_free_result(); + } else { + Mmsg(errmsg, _("FileSet record not found in Catalog.\n")); + } + bdb_unlock(); + return stat; +} + + +/** + * Get the number of Media records + * + * Returns: -1 on failure + * number on success + */ +int BDB::bdb_get_num_media_records(JCR *jcr) +{ + int stat = 0; + + bdb_lock(); + Mmsg(cmd, "SELECT count(*) from Media"); + stat = get_sql_record_max(jcr, this); + bdb_unlock(); + return stat; +} + +/** + * This function returns a list of all the Media record ids for + * the current Pool, the correct Media Type, Recyle, Enabled, StorageId, VolBytes + * VolumeName if specified + * The caller must free ids if non-NULL. + * + * Returns false: on failure + * true: on success + */ +bool BDB::bdb_get_media_ids(JCR *jcr, MEDIA_DBR *mr, int *num_ids, uint32_t *ids[]) +{ + SQL_ROW row; + int i = 0; + uint32_t *id; + char ed1[50]; + bool ok = false; + char buf[MAX_NAME_LENGTH*3]; /* Can contain MAX_NAME_LENGTH*2+1 + AND ....='' */ + char esc[MAX_NAME_LENGTH*2+1]; + + bdb_lock(); + *ids = NULL; + + Mmsg(cmd, "SELECT DISTINCT MediaId FROM Media WHERE Enabled=%d ", + mr->Enabled); + + if (mr->Recycle >= 0) { + bsnprintf(buf, sizeof(buf), "AND Recycle=%d ", mr->Recycle); + pm_strcat(cmd, buf); + } + + if (*mr->MediaType) { + bdb_escape_string(jcr, esc, mr->MediaType, strlen(mr->MediaType)); + bsnprintf(buf, sizeof(buf), "AND MediaType='%s' ", esc); + pm_strcat(cmd, buf); + } + + if (mr->sid_group) { + bsnprintf(buf, sizeof(buf), "AND StorageId IN (%s) ", mr->sid_group); + pm_strcat(cmd, buf); + } else if (mr->StorageId) { + bsnprintf(buf, sizeof(buf), "AND StorageId=%s ", edit_uint64(mr->StorageId, ed1)); + pm_strcat(cmd, buf); + } + + if (mr->PoolId) { + bsnprintf(buf, sizeof(buf), "AND PoolId=%s ", edit_uint64(mr->PoolId, ed1)); + pm_strcat(cmd, buf); + } + + if (mr->VolBytes) { + bsnprintf(buf, sizeof(buf), "AND VolBytes > %s ", edit_uint64(mr->VolBytes, ed1)); + pm_strcat(cmd, buf); + } + + if (*mr->VolumeName) { + bdb_escape_string(jcr, esc, mr->VolumeName, strlen(mr->VolumeName)); + bsnprintf(buf, sizeof(buf), "AND VolumeName = '%s' ", esc); + pm_strcat(cmd, buf); + } + + if (*mr->VolStatus) { + bdb_escape_string(jcr, esc, mr->VolStatus, strlen(mr->VolStatus)); + bsnprintf(buf, sizeof(buf), "AND VolStatus = '%s' ", esc); + pm_strcat(cmd, buf); + } + + /* Filter the volumes with the CacheRetention */ + if (mr->CacheRetention) { + bsnprintf(buf, sizeof(buf), "AND %s ", prune_cache[bdb_get_type_index()]); + pm_strcat(cmd, buf); + } + + Dmsg1(100, "q=%s\n", cmd); + + if (QueryDB(jcr, cmd)) { + *num_ids = sql_num_rows(); + if (*num_ids > 0) { + id = (uint32_t *)malloc(*num_ids * sizeof(uint32_t)); + while ((row = sql_fetch_row()) != NULL) { + id[i++] = str_to_uint64(row[0]); + } + *ids = id; + } + sql_free_result(); + ok = true; + } else { + Mmsg(errmsg, _("Media id select failed: ERR=%s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + ok = false; + } + bdb_unlock(); + return ok; +} + +/** + * This function returns a list of all the DBIds that are returned + * for the query. + * + * Returns false: on failure + * true: on success + */ +bool BDB::bdb_get_query_dbids(JCR *jcr, POOL_MEM &query, dbid_list &ids) +{ + SQL_ROW row; + int i = 0; + bool ok = false; + + bdb_lock(); + ids.num_ids = 0; + if (QueryDB(jcr, query.c_str())) { + ids.num_ids = sql_num_rows(); + if (ids.num_ids > 0) { + if (ids.max_ids < ids.num_ids) { + free(ids.DBId); + ids.DBId = (DBId_t *)malloc(ids.num_ids * sizeof(DBId_t)); + } + while ((row = sql_fetch_row()) != NULL) { + ids.DBId[i++] = str_to_uint64(row[0]); + } + } + sql_free_result(); + ok = true; + } else { + Mmsg(errmsg, _("query dbids failed: ERR=%s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + ok = false; + } + bdb_unlock(); + return ok; +} + +/** + * Get Media Record + * + * Returns: false: on failure + * true: on success + */ +bool BDB::bdb_get_media_record(JCR *jcr, MEDIA_DBR *mr) +{ + SQL_ROW row; + char ed1[50]; + bool ok = false; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + if (mr->MediaId == 0 && mr->VolumeName[0] == 0) { + Mmsg(cmd, "SELECT count(*) from Media"); + mr->MediaId = get_sql_record_max(jcr, this); + bdb_unlock(); + return true; + } + if (mr->MediaId != 0) { /* find by id */ + Mmsg(cmd, "SELECT MediaId,VolumeName,VolJobs,VolFiles," + "VolBlocks,VolBytes,VolABytes,VolHoleBytes,VolHoles,VolMounts," + "VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," + "MediaType,VolStatus,PoolId,VolRetention,VolUseDuration,MaxVolJobs," + "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," + "EndFile,EndBlock,VolType,VolParts,VolCloudParts,LastPartBytes," + "LabelType,LabelDate,StorageId," + "Enabled,LocationId,RecycleCount,InitialWrite," + "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime,ActionOnPurge,CacheRetention " + "FROM Media WHERE MediaId=%s", + edit_int64(mr->MediaId, ed1)); + } else { /* find by name */ + bdb_escape_string(jcr, esc, mr->VolumeName, strlen(mr->VolumeName)); + Mmsg(cmd, "SELECT MediaId,VolumeName,VolJobs,VolFiles," + "VolBlocks,VolBytes,VolABytes,VolHoleBytes,VolHoles,VolMounts," + "VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," + "MediaType,VolStatus,PoolId,VolRetention,VolUseDuration,MaxVolJobs," + "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," + "EndFile,EndBlock,VolType,VolParts,VolCloudParts,LastPartBytes," + "LabelType,LabelDate,StorageId," + "Enabled,LocationId,RecycleCount,InitialWrite," + "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime,ActionOnPurge,CacheRetention " + "FROM Media WHERE VolumeName='%s'", esc); + } + + if (QueryDB(jcr, cmd)) { + char ed1[50]; + if (sql_num_rows() > 1) { + Mmsg1(errmsg, _("More than one Volume!: %s\n"), + edit_uint64(sql_num_rows(), ed1)); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } else if (sql_num_rows() == 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } else { + mr->MediaId = str_to_int64(row[0]); + bstrncpy(mr->VolumeName, row[1]!=NULL?row[1]:"", sizeof(mr->VolumeName)); + mr->VolJobs = str_to_int64(row[2]); + mr->VolFiles = str_to_int64(row[3]); + mr->VolBlocks = str_to_int64(row[4]); + mr->VolBytes = str_to_uint64(row[5]); + mr->VolABytes = str_to_uint64(row[6]); + mr->VolHoleBytes = str_to_uint64(row[7]); + mr->VolHoles = str_to_int64(row[8]); + mr->VolMounts = str_to_int64(row[9]); + mr->VolErrors = str_to_int64(row[10]); + mr->VolWrites = str_to_int64(row[11]); + mr->MaxVolBytes = str_to_uint64(row[12]); + mr->VolCapacityBytes = str_to_uint64(row[13]); + bstrncpy(mr->MediaType, row[14]!=NULL?row[14]:"", sizeof(mr->MediaType)); + bstrncpy(mr->VolStatus, row[15]!=NULL?row[15]:"", sizeof(mr->VolStatus)); + mr->PoolId = str_to_int64(row[16]); + mr->VolRetention = str_to_uint64(row[17]); + mr->VolUseDuration = str_to_uint64(row[18]); + mr->MaxVolJobs = str_to_int64(row[19]); + mr->MaxVolFiles = str_to_int64(row[20]); + mr->Recycle = str_to_int64(row[21]); + mr->Slot = str_to_int64(row[22]); + bstrncpy(mr->cFirstWritten, row[23]!=NULL?row[23]:"", sizeof(mr->cFirstWritten)); + mr->FirstWritten = (time_t)str_to_utime(mr->cFirstWritten); + bstrncpy(mr->cLastWritten, row[24]!=NULL?row[24]:"", sizeof(mr->cLastWritten)); + mr->LastWritten = (time_t)str_to_utime(mr->cLastWritten); + mr->InChanger = str_to_uint64(row[25]); + mr->EndFile = str_to_uint64(row[26]); + mr->EndBlock = str_to_uint64(row[27]); + mr->VolType = str_to_int64(row[28]); + mr->VolParts = str_to_int64(row[29]); + mr->VolCloudParts = str_to_int64(row[30]); + mr->LastPartBytes = str_to_uint64(row[31]); + mr->LabelType = str_to_int64(row[32]); + bstrncpy(mr->cLabelDate, row[33]!=NULL?row[33]:"", sizeof(mr->cLabelDate)); + mr->LabelDate = (time_t)str_to_utime(mr->cLabelDate); + mr->StorageId = str_to_int64(row[34]); + mr->Enabled = str_to_int64(row[35]); + mr->LocationId = str_to_int64(row[36]); + mr->RecycleCount = str_to_int64(row[37]); + bstrncpy(mr->cInitialWrite, row[38]!=NULL?row[38]:"", sizeof(mr->cInitialWrite)); + mr->InitialWrite = (time_t)str_to_utime(mr->cInitialWrite); + mr->ScratchPoolId = str_to_int64(row[39]); + mr->RecyclePoolId = str_to_int64(row[40]); + mr->VolReadTime = str_to_int64(row[41]); + mr->VolWriteTime = str_to_int64(row[42]); + mr->ActionOnPurge = str_to_int32(row[43]); + mr->CacheRetention = str_to_int64(row[44]); + + ok = true; + } + } else { + if (mr->MediaId != 0) { + Mmsg1(errmsg, _("Media record with MediaId=%s not found.\n"), + edit_int64(mr->MediaId, ed1)); + } else { + Mmsg1(errmsg, _("Media record for Volume name \"%s\" not found.\n"), + mr->VolumeName); + } + } + sql_free_result(); + } else { + if (mr->MediaId != 0) { + Mmsg(errmsg, _("Media record for MediaId=%u not found in Catalog.\n"), + mr->MediaId); + } else { + Mmsg(errmsg, _("Media record for Volume Name \"%s\" not found in Catalog.\n"), + mr->VolumeName); + } } + bdb_unlock(); + return ok; +} + +/* Remove all MD5 from a query (can save lot of memory with many files) */ +static void strip_md5(char *q) +{ + char *p = q; + while ((p = strstr(p, ", MD5"))) { + memset(p, ' ', 5 * sizeof(char)); + } +} + +/** + * Find the last "accurate" backup state (that can take deleted files in + * account) + * 1) Get all files with jobid in list (F subquery) + * Get all files in BaseFiles with jobid in list + * 2) Take only the last version of each file (Temp subquery) => accurate list + * is ok + * 3) Join the result to file table to get fileindex, jobid and lstat information + * + * TODO: See if we can do the SORT only if needed (as an argument) + */ +bool BDB::bdb_get_file_list(JCR *jcr, char *jobids, int opts, + DB_RESULT_HANDLER *result_handler, void *ctx) +{ + const char *type; + + if (opts & DBL_ALL_FILES) { + type = ""; + } else { + /* Only non-deleted files */ + type = "WHERE FileIndex > 0"; + } + if (opts & DBL_DELETED) { + type = "WHERE FileIndex <= 0"; + } + if (!*jobids) { + bdb_lock(); + Mmsg(errmsg, _("ERR=JobIds are empty\n")); + bdb_unlock(); + return false; + } + POOL_MEM buf(PM_MESSAGE); + POOL_MEM buf2(PM_MESSAGE); + if (opts & DBL_USE_DELTA) { + Mmsg(buf2, select_recent_version_with_basejob_and_delta[bdb_get_type_index()], + jobids, jobids, jobids, jobids); + + } else { + Mmsg(buf2, select_recent_version_with_basejob[bdb_get_type_index()], + jobids, jobids, jobids, jobids); + } + + /* bsr code is optimized for JobId sorted, with Delta, we need to get + * them ordered by date. JobTDate and JobId can be mixed if using Copy + * or Migration + */ + Mmsg(buf, +"SELECT Path.Path, Filename.Name, T1.FileIndex, T1.JobId, LStat, DeltaSeq, MD5 " + "FROM ( %s ) AS T1 " + "JOIN Filename ON (Filename.FilenameId = T1.FilenameId) " + "JOIN Path ON (Path.PathId = T1.PathId) %s " +"ORDER BY T1.JobTDate, FileIndex ASC",/* Return sorted by JobTDate */ + /* FileIndex for restore code */ + buf2.c_str(), type); + + if (!(opts & DBL_USE_MD5)) { + strip_md5(buf.c_str()); + } + + Dmsg1(100, "q=%s\n", buf.c_str()); + + return bdb_big_sql_query(buf.c_str(), result_handler, ctx); +} + +/** + * This procedure gets the base jobid list used by jobids, + */ +bool BDB::bdb_get_used_base_jobids(JCR *jcr, + POOLMEM *jobids, db_list_ctx *result) +{ + POOL_MEM buf; + + Mmsg(buf, + "SELECT DISTINCT BaseJobId " + " FROM Job JOIN BaseFiles USING (JobId) " + " WHERE Job.HasBase = 1 " + " AND Job.JobId IN (%s) ", jobids); + return bdb_sql_query(buf.c_str(), db_list_handler, result); +} + +/* Mutex used to have global counter on btemp table */ +static pthread_mutex_t btemp_mutex = PTHREAD_MUTEX_INITIALIZER; +static uint32_t btemp_cur = 1; + +/** + * The decision do change an incr/diff was done before + * Full : do nothing + * Differential : get the last full id + * Incremental : get the last full + last diff + last incr(s) ids + * + * If you specify jr->StartTime, it will be used to limit the search + * in the time. (usually now) + * + * TODO: look and merge from ua_restore.c + */ +bool BDB::bdb_get_accurate_jobids(JCR *jcr, + JOB_DBR *jr, db_list_ctx *jobids) +{ + bool ret=false; + char clientid[50], jobid[50], filesetid[50]; + char date[MAX_TIME_LENGTH]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + POOL_MEM query(PM_MESSAGE), name(PM_FNAME); + + /* Take the current time as upper limit if nothing else specified */ + utime_t StartTime = (jr->StartTime)?jr->StartTime:time(NULL); + + bstrutime(date, sizeof(date), StartTime + 1); + jobids->reset(); + + /* If we are comming from bconsole, we must ensure that we + * have a unique name. + */ + if (jcr->JobId == 0) { + P(btemp_mutex); + bsnprintf(jobid, sizeof(jobid), "0%u", btemp_cur++); + V(btemp_mutex); + } else { + edit_uint64(jcr->JobId, jobid); + } + + if (jr->Name[0] != 0) { + bdb_escape_string(jcr, esc, jr->Name, strlen(jr->Name)); + Mmsg(name, " AND Name = '%s' ", esc); + } + + /* First, find the last good Full backup for this job/client/fileset */ + Mmsg(query, create_temp_accurate_jobids[bdb_get_type_index()], + jobid, + edit_uint64(jr->ClientId, clientid), + date, + edit_uint64(jr->FileSetId, filesetid), + name.c_str() + ); + + if (!bdb_sql_query(query.c_str(), NULL, NULL)) { + goto bail_out; + } + + if (jr->JobLevel == L_INCREMENTAL || jr->JobLevel == L_VIRTUAL_FULL) { + /* Now, find the last differential backup after the last full */ + Mmsg(query, +"INSERT INTO btemp3%s (JobId, StartTime, EndTime, JobTDate, PurgedFiles) " + "SELECT JobId, StartTime, EndTime, JobTDate, PurgedFiles " + "FROM Job JOIN FileSet USING (FileSetId) " + "WHERE ClientId = %s " + "AND Level='D' AND JobStatus IN ('T','W') AND Type='B' " + "AND StartTime > (SELECT EndTime FROM btemp3%s ORDER BY EndTime DESC LIMIT 1) " + "AND StartTime < '%s' " + "AND FileSet.FileSet= (SELECT FileSet FROM FileSet WHERE FileSetId = %s) " + " %s " /* Optional name */ + "ORDER BY Job.JobTDate DESC LIMIT 1 ", + jobid, + clientid, + jobid, + date, + filesetid, + name.c_str() + ); + + if (!bdb_sql_query(query.c_str(), NULL, NULL)) { + goto bail_out; + } + + /* We just have to take all incremental after the last Full/Diff */ + Mmsg(query, +"INSERT INTO btemp3%s (JobId, StartTime, EndTime, JobTDate, PurgedFiles) " + "SELECT JobId, StartTime, EndTime, JobTDate, PurgedFiles " + "FROM Job JOIN FileSet USING (FileSetId) " + "WHERE ClientId = %s " + "AND Level='I' AND JobStatus IN ('T','W') AND Type='B' " + "AND StartTime > (SELECT EndTime FROM btemp3%s ORDER BY EndTime DESC LIMIT 1) " + "AND StartTime < '%s' " + "AND FileSet.FileSet= (SELECT FileSet FROM FileSet WHERE FileSetId = %s) " + " %s " + "ORDER BY Job.JobTDate DESC ", + jobid, + clientid, + jobid, + date, + filesetid, + name.c_str() + ); + if (!bdb_sql_query(query.c_str(), NULL, NULL)) { + goto bail_out; + } + } + + /* build a jobid list ie: 1,2,3,4 */ + Mmsg(query, "SELECT JobId FROM btemp3%s ORDER by JobTDate", jobid); + bdb_sql_query(query.c_str(), db_list_handler, jobids); + Dmsg1(1, "db_get_accurate_jobids=%s\n", jobids->list); + ret = true; + +bail_out: + Mmsg(query, "DROP TABLE btemp3%s", jobid); + bdb_sql_query(query.c_str(), NULL, NULL); + + return ret; +} + +bool BDB::bdb_get_base_file_list(JCR *jcr, bool use_md5, + DB_RESULT_HANDLER *result_handler, void *ctx) +{ + POOL_MEM buf(PM_MESSAGE); + + Mmsg(buf, + "SELECT Path, Name, FileIndex, JobId, LStat, 0 As DeltaSeq, MD5 " + "FROM new_basefile%lld ORDER BY JobId, FileIndex ASC", + (uint64_t) jcr->JobId); + + if (!use_md5) { + strip_md5(buf.c_str()); + } + return bdb_sql_query(buf.c_str(), result_handler, ctx); +} + +bool BDB::bdb_get_base_jobid(JCR *jcr, JOB_DBR *jr, JobId_t *jobid) +{ + POOL_MEM query(PM_FNAME); + utime_t StartTime; + db_int64_ctx lctx; + char date[MAX_TIME_LENGTH]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + bool ret = false; + + *jobid = 0; + lctx.count = 0; + lctx.value = 0; + + StartTime = (jr->StartTime)?jr->StartTime:time(NULL); + bstrutime(date, sizeof(date), StartTime + 1); + bdb_escape_string(jcr, esc, jr->Name, strlen(jr->Name)); + + /* we can take also client name, fileset, etc... */ + + Mmsg(query, + "SELECT JobId, Job, StartTime, EndTime, JobTDate, PurgedFiles " + "FROM Job " +// "JOIN FileSet USING (FileSetId) JOIN Client USING (ClientId) " + "WHERE Job.Name = '%s' " + "AND Level='B' AND JobStatus IN ('T','W') AND Type='B' " +// "AND FileSet.FileSet= '%s' " +// "AND Client.Name = '%s' " + "AND StartTime<'%s' " + "ORDER BY Job.JobTDate DESC LIMIT 1", + esc, +// edit_uint64(jr->ClientId, clientid), +// edit_uint64(jr->FileSetId, filesetid)); + date); + + Dmsg1(10, "db_get_base_jobid q=%s\n", query.c_str()); + if (!bdb_sql_query(query.c_str(), db_int64_handler, &lctx)) { + goto bail_out; + } + *jobid = (JobId_t) lctx.value; + + Dmsg1(10, "db_get_base_jobid=%lld\n", *jobid); + ret = true; + +bail_out: + return ret; +} + +/* Get JobIds associated with a volume */ +bool BDB::bdb_get_volume_jobids(JCR *jcr, + MEDIA_DBR *mr, db_list_ctx *lst) +{ + char ed1[50]; + bool ret = false; + + bdb_lock(); + Mmsg(cmd, "SELECT DISTINCT JobId FROM JobMedia WHERE MediaId=%s", + edit_int64(mr->MediaId, ed1)); + ret = bdb_sql_query(cmd, db_list_handler, lst); + bdb_unlock(); + return ret; +} + +/* Get JobIds associated with a client */ +bool BDB::bdb_get_client_jobids(JCR *jcr, + CLIENT_DBR *cr, db_list_ctx *lst) +{ + char ed1[50]; + bool ret = false; + + bdb_lock(); + Mmsg(cmd, "SELECT JobId FROM Job WHERE ClientId=%s", + edit_int64(cr->ClientId, ed1)); + ret = bdb_sql_query(cmd, db_list_handler, lst); + bdb_unlock(); + return ret; +} + +/** + * Get Snapshot Record + * + * Returns: false: on failure + * true: on success + */ +bool BDB::bdb_get_snapshot_record(JCR *jcr, SNAPSHOT_DBR *sr) +{ + SQL_ROW row; + char ed1[50]; + bool ok = false; + char esc[MAX_ESCAPE_NAME_LENGTH]; + POOL_MEM filter1, filter2; + + if (sr->SnapshotId == 0 && (sr->Name[0] == 0 || sr->Device[0] == 0)) { + Dmsg0(10, "No SnapshotId or Name/Device provided\n"); + return false; + } + + bdb_lock(); + + if (sr->SnapshotId != 0) { /* find by id */ + Mmsg(filter1, "Snapshot.SnapshotId=%d", sr->SnapshotId); + + } else if (*sr->Name && *sr->Device) { /* find by name */ + bdb_escape_string(jcr, esc, sr->Name, strlen(sr->Name)); + Mmsg(filter1, "Snapshot.Name='%s'", esc); + bdb_escape_string(jcr, esc, sr->Device, strlen(sr->Device)); + Mmsg(filter2, "AND Snapshot.Device='%s'", esc); + + } else { + Dmsg0(10, "No SnapshotId or Name and Device\n"); + return false; + } + + Mmsg(cmd, "SELECT SnapshotId, Snapshot.Name, JobId, Snapshot.FileSetId, " + "FileSet.FileSet, CreateTDate, CreateDate, " + "Client.Name AS Client, Snapshot.ClientId, Volume, Device, Type, Retention, " + "Comment FROM Snapshot JOIN Client USING (ClientId) LEFT JOIN FileSet USING (FileSetId) WHERE %s %s", + filter1.c_str(), filter2.c_str()); + + if (QueryDB(jcr, cmd)) { + char ed1[50]; + if (sql_num_rows() > 1) { + Mmsg1(errmsg, _("More than one Snapshot!: %s\n"), + edit_uint64(sql_num_rows(), ed1)); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } else if (sql_num_rows() == 1) { + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); + Jmsg(jcr, M_ERROR, 0, "%s", errmsg); + } else { + /* return values */ + sr->reset(); + sr->need_to_free = true; + sr->SnapshotId = str_to_int64(row[0]); + bstrncpy(sr->Name, row[1], sizeof(sr->Name)); + sr->JobId = str_to_int64(row[2]); + sr->FileSetId = str_to_int64(row[3]); + bstrncpy(sr->FileSet, row[4], sizeof(sr->FileSet)); + sr->CreateTDate = str_to_uint64(row[5]); + bstrncpy(sr->CreateDate, row[6], sizeof(sr->CreateDate)); + bstrncpy(sr->Client, row[7], sizeof(sr->Client)); + sr->ClientId = str_to_int64(row[8]); + sr->Volume = bstrdup(row[9]); + sr->Device = bstrdup(row[10]); + bstrncpy(sr->Type, row[11], sizeof(sr->Type)); + sr->Retention = str_to_int64(row[12]); + bstrncpy(sr->Comment, NPRTB(row[13]), sizeof(sr->Comment)); + ok = true; + } + } else { + if (sr->SnapshotId != 0) { + Mmsg1(errmsg, _("Snapshot record with SnapshotId=%s not found.\n"), + edit_int64(sr->SnapshotId, ed1)); + } else { + Mmsg1(errmsg, _("Snapshot record for Snapshot name \"%s\" not found.\n"), + sr->Name); + } + } + sql_free_result(); + } else { + if (sr->SnapshotId != 0) { + Mmsg1(errmsg, _("Snapshot record with SnapshotId=%s not found.\n"), + edit_int64(sr->SnapshotId, ed1)); + } else { + Mmsg1(errmsg, _("Snapshot record for Snapshot name \"%s\" not found.\n"), + sr->Name); + } + } + bdb_unlock(); + return ok; +} + +/* Job, Level */ +static void build_estimate_query(BDB *db, POOL_MEM &query, const char *mode, + char *job_esc, char level) +{ + POOL_MEM filter, tmp; + char ed1[50]; + + + if (level == 0) { + level = 'F'; + } + /* MySQL doesn't have statistic functions */ + if (db->bdb_get_type_index() == SQL_TYPE_POSTGRESQL) { + /* postgresql have functions that permit to handle lineal regression + * in y=ax + b + * REGR_SLOPE(Y,X) = get x + * REGR_INTERCEPT(Y,X) = get b + * and we need y when x=now() + * CORR gives the correlation + * (TODO: display progress bar only if CORR > 0.8) + */ + btime_t now = time(NULL); + Mmsg(query, + "SELECT temp.jobname AS jobname, " + "COALESCE(CORR(value,JobTDate),0) AS corr, " + "(%s*REGR_SLOPE(value,JobTDate) " + " + REGR_INTERCEPT(value,JobTDate)) AS value, " + "AVG(value) AS avg_value, " + " COUNT(1) AS nb ", edit_int64(now, ed1)); + } else { + Mmsg(query, + "SELECT jobname AS jobname, " + "0.1 AS corr, AVG(value) AS value, AVG(value) AS avg_value, " + "COUNT(1) AS nb "); + } + + /* if it's a differential, we need to compare since the last full + * + * F D D D F D D D F I I I I D I I I + * | # # # # | # # + * | # # # # # # | # # + * | # # # # # # # # | # # # # # # # # # + * +----------------- +------------------- + */ + if (level == L_DIFFERENTIAL) { + Mmsg(filter, + " AND Job.StartTime > ( " + " SELECT StartTime " + " FROM Job " + " WHERE Job.Name = '%s' " + " AND Job.Level = 'F' " + " AND Job.JobStatus IN ('T', 'W') " + " ORDER BY Job.StartTime DESC LIMIT 1) ", + job_esc); + } + Mmsg(tmp, + " FROM ( " + " SELECT Job.Name AS jobname, " + " %s AS value, " + " JobTDate AS jobtdate " + " FROM Job INNER JOIN Client USING (ClientId) " + " WHERE Job.Name = '%s' " + " AND Job.Level = '%c' " + " AND Job.JobStatus IN ('T', 'W') " + "%s " + "ORDER BY StartTime DESC " + "LIMIT 4" + ") AS temp GROUP BY temp.jobname", + mode, job_esc, level, filter.c_str() + ); + pm_strcat(query, tmp.c_str()); +} + +bool BDB::bdb_get_job_statistics(JCR *jcr, JOB_DBR *jr) +{ + SQL_ROW row; + POOL_MEM queryB, queryF, query; + char job_esc[MAX_ESCAPE_NAME_LENGTH]; + bool ok = false; + + bdb_lock(); + bdb_escape_string(jcr, job_esc, jr->Name, strlen(jr->Name)); + build_estimate_query(this, queryB, "JobBytes", job_esc, jr->JobLevel); + build_estimate_query(this, queryF, "JobFiles", job_esc, jr->JobLevel); + Mmsg(query, + "SELECT bytes.corr * 100 AS corr_jobbytes, " /* 0 */ + "bytes.value AS jobbytes, " /* 1 */ + "bytes.avg_value AS avg_jobbytes, " /* 2 */ + "bytes.nb AS nb_jobbytes, " /* 3 */ + "files.corr * 100 AS corr_jobfiles, " /* 4 */ + "files.value AS jobfiles, " /* 5 */ + "files.avg_value AS avg_jobfiles, " /* 6 */ + "files.nb AS nb_jobfiles " /* 7 */ + "FROM (%s) AS bytes LEFT JOIN (%s) AS files USING (jobname)", + queryB.c_str(), queryF.c_str()); + Dmsg1(100, "query=%s\n", query.c_str()); + + if (QueryDB(jcr, query.c_str())) { + char ed1[50]; + if (sql_num_rows() > 1) { + Mmsg1(errmsg, _("More than one Result!: %s\n"), + edit_uint64(sql_num_rows(), ed1)); + goto bail_out; + } + ok = true; + + if ((row = sql_fetch_row()) == NULL) { + Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); + } else { + jr->CorrJobBytes = str_to_int64(row[0]); + jr->JobBytes = str_to_int64(row[1]); + + /* lineal expression with only one job doesn't return a correct value */ + if (str_to_int64(row[3]) == 1) { + jr->JobBytes = str_to_int64(row[2]); /* Take the AVG value */ + } + jr->CorrNbJob = str_to_int64(row[3]); /* Number of jobs used in this sample */ + jr->CorrJobFiles = str_to_int64(row[4]); + jr->JobFiles = str_to_int64(row[5]); + + if (str_to_int64(row[7]) == 1) { + jr->JobFiles = str_to_int64(row[6]); /* Take the AVG value */ + } + } + sql_free_result(); + } +bail_out: + bdb_unlock(); + return ok; +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ diff --git a/src/cats/sql_list.c b/src/cats/sql_list.c new file mode 100644 index 00000000..15cfe094 --- /dev/null +++ b/src/cats/sql_list.c @@ -0,0 +1,761 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Catalog Database List records interface routines + * + * Written by Kern Sibbald, March 2000 + * + */ + +#include "bacula.h" + +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL + +#include "cats.h" + +/* ----------------------------------------------------------------------- + * + * Generic Routines (or almost generic) + * + * ----------------------------------------------------------------------- + */ + +#define append_filter(buf, sql) \ + do { \ + if (*buf) { \ + pm_strcat(buf, " AND ");\ + } else { \ + pm_strcpy(buf, " WHERE ");\ + } \ + pm_strcat(buf, sql); \ + } while (0) + +/* + * Submit general SQL query + */ +int BDB::bdb_list_sql_query(JCR *jcr, const char *query, DB_LIST_HANDLER *sendit, + void *ctx, int verbose, e_list_type type) +{ + bdb_lock(); + if (!sql_query(query, QF_STORE_RESULT)) { + Mmsg(errmsg, _("Query failed: %s\n"), sql_strerror()); + if (verbose) { + sendit(ctx, errmsg); + } + bdb_unlock(); + return 0; + } + + list_result(jcr,this, sendit, ctx, type); + sql_free_result(); + bdb_unlock(); + return 1; +} + +void BDB::bdb_list_pool_records(JCR *jcr, POOL_DBR *pdbr, + DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) +{ + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc, pdbr->Name, strlen(pdbr->Name)); + + if (type == VERT_LIST) { + if (pdbr->Name[0] != 0) { + Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog," + "AcceptAnyVolume,VolRetention,VolUseDuration,MaxVolJobs,MaxVolBytes," + "AutoPrune,Recycle,PoolType,LabelFormat,Enabled,ScratchPoolId," + "RecyclePoolId,LabelType,ActionOnPurge,CacheRetention " + " FROM Pool WHERE Name='%s'", esc); + } else { + Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog," + "AcceptAnyVolume,VolRetention,VolUseDuration,MaxVolJobs,MaxVolBytes," + "AutoPrune,Recycle,PoolType,LabelFormat,Enabled,ScratchPoolId," + "RecyclePoolId,LabelType,ActionOnPurge,CacheRetention " + " FROM Pool ORDER BY PoolId"); + } + } else { + if (pdbr->Name[0] != 0) { + Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,MaxVolBytes,VolRetention,Enabled,PoolType,LabelFormat " + "FROM Pool WHERE Name='%s'", esc); + } else { + Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,MaxVolBytes,VolRetention,Enabled,PoolType,LabelFormat " + "FROM Pool ORDER BY PoolId"); + } + } + + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return; + } + + list_result(jcr, this, sendit, ctx, type); + + sql_free_result(); + bdb_unlock(); +} + +void BDB::bdb_list_client_records(JCR *jcr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) +{ + bdb_lock(); + if (type == VERT_LIST) { + Mmsg(cmd, "SELECT ClientId,Name,Uname,AutoPrune,FileRetention," + "JobRetention " + "FROM Client ORDER BY ClientId"); + } else { + Mmsg(cmd, "SELECT ClientId,Name,FileRetention,JobRetention " + "FROM Client ORDER BY ClientId"); + } + + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return; + } + + list_result(jcr, this, sendit, ctx, type); + + sql_free_result(); + bdb_unlock(); +} + +/* + * List restore objects + * + * JobId | JobIds: List RestoreObjects for specific Job(s) + * It is possible to specify the ObjectType using FileType field. + */ +void BDB::bdb_list_restore_objects(JCR *jcr, ROBJECT_DBR *rr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) +{ + POOL_MEM filter; + char ed1[50]; + char *jobid; + + if (rr->JobIds && is_a_number_list(rr->JobIds)) { + jobid = rr->JobIds; + + } else if (rr->JobId) { + jobid = edit_int64(rr->JobId, ed1); + + } else { + return; + } + + if (rr->FileType > 0) { + Mmsg(filter, "AND ObjectType = %d ", rr->FileType); + } + + bdb_lock(); + if (type == VERT_LIST) { + Mmsg(cmd, "SELECT JobId, RestoreObjectId, ObjectName, " + "PluginName, ObjectType " + "FROM RestoreObject JOIN Job USING (JobId) WHERE JobId IN (%s) %s " + "ORDER BY JobTDate ASC, RestoreObjectId", + jobid, filter.c_str()); + } else { + Mmsg(cmd, "SELECT JobId, RestoreObjectId, ObjectName, " + "PluginName, ObjectType, ObjectLength " + "FROM RestoreObject JOIN Job USING (JobId) WHERE JobId IN (%s) %s " + "ORDER BY JobTDate ASC, RestoreObjectId", + jobid, filter.c_str()); + } + + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return; + } + + list_result(jcr, this, sendit, ctx, type); + + sql_free_result(); + bdb_unlock(); +} + +/* + * If VolumeName is non-zero, list the record for that Volume + * otherwise, list the Volumes in the Pool specified by PoolId + */ +void BDB::bdb_list_media_records(JCR *jcr, MEDIA_DBR *mdbr, + DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) +{ + char ed1[50]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + const char *expiresin = expires_in[bdb_get_type_index()]; + + bdb_lock(); + bdb_escape_string(jcr, esc, mdbr->VolumeName, strlen(mdbr->VolumeName)); + const char *join = ""; + const char *where = ""; + + if (type == VERT_LIST) { + if (mdbr->VolumeName[0] != 0) { + Mmsg(cmd, "SELECT MediaId,VolumeName,Slot,PoolId," + "MediaType,MediaTypeId,FirstWritten,LastWritten,LabelDate,VolJobs," + "VolFiles,VolBlocks,VolParts,VolCloudParts,Media.CacheRetention,VolMounts,VolBytes," + "VolABytes,VolAPadding," + "VolHoleBytes,VolHoles,LastPartBytes,VolErrors,VolWrites," + "VolCapacityBytes,VolStatus,Media.Enabled,Media.Recycle,Media.VolRetention," + "Media.VolUseDuration,Media.MaxVolJobs,Media.MaxVolFiles,Media.MaxVolBytes,InChanger," + "EndFile,EndBlock,VolType,Media.LabelType,StorageId,DeviceId," + "MediaAddressing,VolReadTime,VolWriteTime," + "LocationId,RecycleCount,InitialWrite,Media.ScratchPoolId,Media.RecyclePoolId, " + "Media.ActionOnPurge,%s AS ExpiresIn, Comment" + " FROM Media %s WHERE Media.VolumeName='%s' %s", + expiresin, + join, + esc, + where + ); + } else { + Mmsg(cmd, "SELECT MediaId,VolumeName,Slot,PoolId," + "MediaType,MediaTypeId,FirstWritten,LastWritten,LabelDate,VolJobs," + "VolFiles,VolBlocks,VolParts,VolCloudParts,Media.CacheRetention,VolMounts,VolBytes," + "VolABytes,VolAPadding," + "VolHoleBytes,VolHoles,LastPartBytes,VolErrors,VolWrites," + "VolCapacityBytes,VolStatus,Media.Enabled,Media.Recycle,Media.VolRetention," + "Media.VolUseDuration,Media.MaxVolJobs,Media.MaxVolFiles,Media.MaxVolBytes,InChanger," + "EndFile,EndBlock,VolType,Media.LabelType,StorageId,DeviceId," + "MediaAddressing,VolReadTime,VolWriteTime," + "LocationId,RecycleCount,InitialWrite,Media.ScratchPoolId,Media.RecyclePoolId, " + "Media.ActionOnPurge,%s AS ExpiresIn, Comment" + " FROM Media %s WHERE Media.PoolId=%s %s ORDER BY MediaId", + expiresin, + join, + edit_int64(mdbr->PoolId, ed1), + where + ); + } + } else { + if (mdbr->VolumeName[0] != 0) { + Mmsg(cmd, "SELECT MediaId,VolumeName,VolStatus,Media.Enabled," + "VolBytes,VolFiles,Media.VolRetention,Media.Recycle,Slot,InChanger,MediaType,VolType," + "VolParts,%s AS ExpiresIn " + "FROM Media %s WHERE Media.VolumeName='%s' %s", + expiresin, + join, + esc, + where + ); + } else { + Mmsg(cmd, "SELECT MediaId,VolumeName,VolStatus,Media.Enabled," + "VolBytes,VolFiles,Media.VolRetention,Media.Recycle,Slot,InChanger,MediaType,VolType," + "VolParts,LastWritten,%s AS ExpiresIn " + "FROM Media %s WHERE Media.PoolId=%s %s ORDER BY MediaId", + expiresin, + join, + edit_int64(mdbr->PoolId, ed1), + where + ); + } + } + Dmsg1(DT_SQL|50, "q=%s\n", cmd); + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return; + } + + list_result(jcr, this, sendit, ctx, type); + + sql_free_result(); + bdb_unlock(); +} + +void BDB::bdb_list_jobmedia_records(JCR *jcr, uint32_t JobId, + DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) +{ + char ed1[50]; + + bdb_lock(); + const char *join = ""; + const char *where = ""; + + if (type == VERT_LIST) { + if (JobId > 0) { /* do by JobId */ + Mmsg(cmd, "SELECT JobMediaId,JobId,Media.MediaId,Media.VolumeName," + "FirstIndex,LastIndex,StartFile,JobMedia.EndFile,StartBlock," + "JobMedia.EndBlock " + "FROM JobMedia JOIN Media USING (MediaId) %s " + "WHERE JobMedia.JobId=%s %s", + join, + edit_int64(JobId, ed1), + where); + } else { + Mmsg(cmd, "SELECT JobMediaId,JobId,Media.MediaId,Media.VolumeName," + "FirstIndex,LastIndex,StartFile,JobMedia.EndFile,StartBlock," + "JobMedia.EndBlock " + "FROM JobMedia JOIN Media USING (MediaId) %s %s", + join, + where); + } + + } else { + if (JobId > 0) { /* do by JobId */ + Mmsg(cmd, "SELECT JobId,Media.VolumeName,FirstIndex,LastIndex " + "FROM JobMedia JOIN Media USING (MediaId) %s WHERE " + "JobMedia.JobId=%s %s", + join, + edit_int64(JobId, ed1), + where); + } else { + Mmsg(cmd, "SELECT JobId,Media.VolumeName,FirstIndex,LastIndex " + "FROM JobMedia JOIN Media USING (MediaId) %s %s", + join, + where); + } + } + Dmsg1(DT_SQL|50, "q=%s\n", cmd); + + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return; + } + + list_result(jcr, this, sendit, ctx, type); + + sql_free_result(); + bdb_unlock(); +} + + +void BDB::bdb_list_copies_records(JCR *jcr, uint32_t limit, char *JobIds, + DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) +{ + POOL_MEM str_limit(PM_MESSAGE); + POOL_MEM str_jobids(PM_MESSAGE); + + if (limit > 0) { + Mmsg(str_limit, " LIMIT %d", limit); + } + + if (JobIds && JobIds[0]) { + Mmsg(str_jobids, " AND (Job.PriorJobId IN (%s) OR Job.JobId IN (%s)) ", + JobIds, JobIds); + } + + bdb_lock(); + Mmsg(cmd, + "SELECT DISTINCT Job.PriorJobId AS JobId, Job.Job, " + "Job.JobId AS CopyJobId, Media.MediaType " + "FROM Job " + "JOIN JobMedia USING (JobId) " + "JOIN Media USING (MediaId) " + "WHERE Job.Type = '%c' %s ORDER BY Job.PriorJobId DESC %s", + (char) JT_JOB_COPY, str_jobids.c_str(), str_limit.c_str()); + + if (!QueryDB(jcr, cmd)) { + goto bail_out; + } + + if (sql_num_rows()) { + if (JobIds && JobIds[0]) { + sendit(ctx, _("These JobIds have copies as follows:\n")); + } else { + sendit(ctx, _("The catalog contains copies as follows:\n")); + } + + list_result(jcr, this, sendit, ctx, type); + } + + sql_free_result(); + +bail_out: + bdb_unlock(); +} + +void BDB::bdb_list_joblog_records(JCR *jcr, uint32_t JobId, + DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) +{ + char ed1[50]; + + if (JobId <= 0) { + return; + } + bdb_lock(); + if (type == VERT_LIST) { + Mmsg(cmd, "SELECT Time,LogText FROM Log " + "WHERE Log.JobId=%s ORDER BY LogId ASC", edit_int64(JobId, ed1)); + } else { + Mmsg(cmd, "SELECT LogText FROM Log " + "WHERE Log.JobId=%s ORDER BY LogId ASC", edit_int64(JobId, ed1)); + } + if (!QueryDB(jcr, cmd)) { + goto bail_out; + } + + list_result(jcr, this, sendit, ctx, type); + + sql_free_result(); + +bail_out: + bdb_unlock(); +} + + +/* + * List Job record(s) that match JOB_DBR + * + * Currently, we return all jobs or if jr->JobId is set, + * only the job with the specified id. + */ +alist *BDB::bdb_list_job_records(JCR *jcr, JOB_DBR *jr, DB_LIST_HANDLER *sendit, + void *ctx, e_list_type type) +{ + char ed1[50]; + char limit[50]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + alist *list = NULL; + POOLMEM *where = get_pool_memory(PM_MESSAGE); + POOLMEM *tmp = get_pool_memory(PM_MESSAGE); + const char *order = "ASC"; + *where = 0; + + bdb_lock(); + if (jr->order == 1) { + order = "DESC"; + } + if (jr->limit > 0) { + snprintf(limit, sizeof(limit), " LIMIT %d", jr->limit); + } else { + limit[0] = 0; + } + if (jr->Name[0]) { + bdb_escape_string(jcr, esc, jr->Name, strlen(jr->Name)); + Mmsg(tmp, " Job.Name='%s' ", esc); + append_filter(where, tmp); + + } else if (jr->JobId != 0) { + Mmsg(tmp, " Job.JobId=%s ", edit_int64(jr->JobId, ed1)); + append_filter(where, tmp); + + } else if (jr->Job[0] != 0) { + bdb_escape_string(jcr, esc, jr->Job, strlen(jr->Job)); + Mmsg(tmp, " Job.Job='%s' ", esc); + append_filter(where, tmp); + } + + if (type == INCOMPLETE_JOBS && jr->JobStatus == JS_FatalError) { + Mmsg(tmp, " Job.JobStatus IN ('E', 'f') "); + append_filter(where, tmp); + + } else if (jr->JobStatus) { + Mmsg(tmp, " Job.JobStatus='%c' ", jr->JobStatus); + append_filter(where, tmp); + } + + if (jr->JobType) { + Mmsg(tmp, " Job.Type='%c' ", jr->JobType); + append_filter(where, tmp); + } + + if (jr->JobErrors > 0) { + Mmsg(tmp, " Job.JobErrors > 0 "); + append_filter(where, tmp); + } + + if (jr->ClientId > 0) { + Mmsg(tmp, " Job.ClientId=%s ", edit_int64(jr->ClientId, ed1)); + append_filter(where, tmp); + } + + switch (type) { + case VERT_LIST: + Mmsg(cmd, + "SELECT JobId,Job,Job.Name,PurgedFiles,Type,Level," + "Job.ClientId,Client.Name as ClientName,JobStatus,SchedTime," + "StartTime,EndTime,RealEndTime,JobTDate," + "VolSessionId,VolSessionTime,JobFiles,JobBytes,ReadBytes,JobErrors," + "JobMissingFiles,Job.PoolId,Pool.Name as PoolName,PriorJobId," + "Job.FileSetId,FileSet.FileSet,Job.HasBase,Job.HasCache,Job.Comment " + "FROM Job JOIN Client USING (ClientId) LEFT JOIN Pool USING (PoolId) " + "LEFT JOIN FileSet USING (FileSetId) %s " + "ORDER BY StartTime %s %s", where, order, limit); + break; + case HORZ_LIST: + Mmsg(cmd, + "SELECT JobId,Name,StartTime,Type,Level,JobFiles,JobBytes,JobStatus " + "FROM Job %s ORDER BY StartTime %s,JobId %s %s", where, order, order, limit); + break; + case INCOMPLETE_JOBS: + Mmsg(cmd, + "SELECT JobId,Name,StartTime,Type,Level,JobFiles,JobBytes,JobStatus " + "FROM Job %s ORDER BY StartTime %s,JobId %s %s", + where, order, order, limit); + break; + default: + break; + } + + free_pool_memory(tmp); + free_pool_memory(where); + + Dmsg1(100, "SQL: %s\n", cmd); + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return NULL; + } + if (type == INCOMPLETE_JOBS) { + SQL_ROW row; + list = New(alist(10)); + sql_data_seek(0); + for (int i=0; (row=sql_fetch_row()) != NULL; i++) { + list->append(bstrdup(row[0])); + } + } + sql_data_seek(0); + list_result(jcr, this, sendit, ctx, type); + sql_free_result(); + bdb_unlock(); + return list; +} + +/* + * List Job totals + * + */ +void BDB::bdb_list_job_totals(JCR *jcr, JOB_DBR *jr, DB_LIST_HANDLER *sendit, void *ctx) +{ + bdb_lock(); + + /* List by Job */ + Mmsg(cmd, "SELECT count(*) AS Jobs,sum(JobFiles) " + "AS Files,sum(JobBytes) AS Bytes,Name AS Job FROM Job GROUP BY Name"); + + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return; + } + + list_result(jcr, this, sendit, ctx, HORZ_LIST); + + sql_free_result(); + + /* Do Grand Total */ + Mmsg(cmd, "SELECT count(*) AS Jobs,sum(JobFiles) " + "AS Files,sum(JobBytes) As Bytes FROM Job"); + + if (!QueryDB(jcr, cmd)) { + bdb_unlock(); + return; + } + + list_result(jcr, this, sendit, ctx, HORZ_LIST); + + sql_free_result(); + bdb_unlock(); +} + +/* List all file records from a job + * "deleted" values are described just below + */ +void BDB::bdb_list_files_for_job(JCR *jcr, JobId_t jobid, int deleted, DB_LIST_HANDLER *sendit, void *ctx) +{ + char ed1[50]; + const char *opt; + LIST_CTX lctx(jcr, this, sendit, ctx, HORZ_LIST); + + switch (deleted) { + case 0: /* Show only actual files */ + opt = " AND FileIndex > 0 "; + break; + case 1: /* Show only deleted files */ + opt = " AND FileIndex <= 0 "; + break; + default: /* Show everything */ + opt = ""; + break; + } + + bdb_lock(); + + /* + * MySQL is different with no || operator + */ + if (bdb_get_type_index() == SQL_TYPE_MYSQL) { + Mmsg(cmd, "SELECT CONCAT(Path.Path,Filename.Name) AS Filename " + "FROM (SELECT PathId, FilenameId FROM File WHERE JobId=%s %s " + "UNION ALL " + "SELECT PathId, FilenameId " + "FROM BaseFiles JOIN File " + "ON (BaseFiles.FileId = File.FileId) " + "WHERE BaseFiles.JobId = %s" + ") AS F, Filename,Path " + "WHERE Filename.FilenameId=F.FilenameId " + "AND Path.PathId=F.PathId", + edit_int64(jobid, ed1), opt, ed1); + } else { + Mmsg(cmd, "SELECT Path.Path||Filename.Name AS Filename " + "FROM (SELECT PathId, FilenameId FROM File WHERE JobId=%s %s " + "UNION ALL " + "SELECT PathId, FilenameId " + "FROM BaseFiles JOIN File " + "ON (BaseFiles.FileId = File.FileId) " + "WHERE BaseFiles.JobId = %s" + ") AS F, Filename,Path " + "WHERE Filename.FilenameId=F.FilenameId " + "AND Path.PathId=F.PathId", + edit_int64(jobid, ed1), opt, ed1); + } + Dmsg1(100, "q=%s\n", cmd); + if (!bdb_big_sql_query(cmd, list_result, &lctx)) { + bdb_unlock(); + return; + } + + lctx.send_dashes(); + + sql_free_result(); + bdb_unlock(); +} + +void BDB::bdb_list_base_files_for_job(JCR *jcr, JobId_t jobid, DB_LIST_HANDLER *sendit, void *ctx) +{ + char ed1[50]; + LIST_CTX lctx(jcr, this, sendit, ctx, HORZ_LIST); + + bdb_lock(); + + /* + * Stupid MySQL is NON-STANDARD ! + */ + if (bdb_get_type_index() == SQL_TYPE_MYSQL) { + Mmsg(cmd, "SELECT CONCAT(Path.Path,Filename.Name) AS Filename " + "FROM BaseFiles, File, Filename, Path " + "WHERE BaseFiles.JobId=%s AND BaseFiles.BaseJobId = File.JobId " + "AND BaseFiles.FileId = File.FileId " + "AND Filename.FilenameId=File.FilenameId " + "AND Path.PathId=File.PathId", + edit_int64(jobid, ed1)); + } else { + Mmsg(cmd, "SELECT Path.Path||Filename.Name AS Filename " + "FROM BaseFiles, File, Filename, Path " + "WHERE BaseFiles.JobId=%s AND BaseFiles.BaseJobId = File.JobId " + "AND BaseFiles.FileId = File.FileId " + "AND Filename.FilenameId=File.FilenameId " + "AND Path.PathId=File.PathId", + edit_int64(jobid, ed1)); + } + + if (!bdb_big_sql_query(cmd, list_result, &lctx)) { + bdb_unlock(); + return; + } + + lctx.send_dashes(); + + sql_free_result(); + bdb_unlock(); +} + +void BDB::bdb_list_snapshot_records(JCR *jcr, SNAPSHOT_DBR *sdbr, + DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) +{ + POOLMEM *filter = get_pool_memory(PM_MESSAGE); + POOLMEM *tmp = get_pool_memory(PM_MESSAGE); + POOLMEM *esc = get_pool_memory(PM_MESSAGE); + char ed1[50]; + + bdb_lock(); + *filter = 0; + + if (sdbr->Name[0]) { + bdb_escape_string(jcr, esc, sdbr->Name, strlen(sdbr->Name)); + Mmsg(tmp, "Name='%s'", esc); + append_filter(filter, tmp); + } + if (sdbr->SnapshotId > 0) { + Mmsg(tmp, "Snapshot.SnapshotId=%d", sdbr->SnapshotId); + append_filter(filter, tmp); + } + if (sdbr->ClientId > 0) { + Mmsg(tmp, "Snapshot.ClientId=%d", sdbr->ClientId); + append_filter(filter, tmp); + } + if (sdbr->JobId > 0) { + Mmsg(tmp, "Snapshot.JobId=%d", sdbr->JobId); + append_filter(filter, tmp); + } + if (*sdbr->Client) { + bdb_escape_string(jcr, esc, sdbr->Client, strlen(sdbr->Client)); + Mmsg(tmp, "Client.Name='%s'", esc); + append_filter(filter, tmp); + } + if (sdbr->Device && *(sdbr->Device)) { + esc = check_pool_memory_size(esc, strlen(sdbr->Device) * 2 + 1); + bdb_escape_string(jcr, esc, sdbr->Device, strlen(sdbr->Device)); + Mmsg(tmp, "Device='%s'", esc); + append_filter(filter, tmp); + } + if (*sdbr->Type) { + bdb_escape_string(jcr, esc, sdbr->Type, strlen(sdbr->Type)); + Mmsg(tmp, "Type='%s'", esc); + append_filter(filter, tmp); + } + if (*sdbr->created_before) { + bdb_escape_string(jcr, esc, sdbr->created_before, strlen(sdbr->created_before)); + Mmsg(tmp, "CreateDate <= '%s'", esc); + append_filter(filter, tmp); + } + if (*sdbr->created_after) { + bdb_escape_string(jcr, esc, sdbr->created_after, strlen(sdbr->created_after)); + Mmsg(tmp, "CreateDate >= '%s'", esc); + append_filter(filter, tmp); + } + if (sdbr->expired) { + Mmsg(tmp, "CreateTDate < (%s - Retention)", edit_int64(time(NULL), ed1)); + append_filter(filter, tmp); + } + if (*sdbr->CreateDate) { + bdb_escape_string(jcr, esc, sdbr->CreateDate, strlen(sdbr->CreateDate)); + Mmsg(tmp, "CreateDate = '%s'", esc); + append_filter(filter, tmp); + } + + if (sdbr->sorted_client) { + pm_strcat(filter, " ORDER BY Client.Name, SnapshotId DESC"); + + } else { + pm_strcat(filter, " ORDER BY SnapshotId DESC"); + } + + if (type == VERT_LIST || type == ARG_LIST) { + Mmsg(cmd, "SELECT SnapshotId, Snapshot.Name, CreateDate, Client.Name AS Client, " + "FileSet.FileSet AS FileSet, JobId, Volume, Device, Type, Retention, Comment " + "FROM Snapshot JOIN Client USING (ClientId) LEFT JOIN FileSet USING (FileSetId) %s", filter); + + } else if (type == HORZ_LIST) { + Mmsg(cmd, "SELECT SnapshotId, Snapshot.Name, CreateDate, Client.Name AS Client, " + "Device, Type " + "FROM Snapshot JOIN Client USING (ClientId) %s", filter); + } + + if (!QueryDB(jcr, cmd)) { + goto bail_out; + } + + list_result(jcr, this, sendit, ctx, type); + +bail_out: + sql_free_result(); + bdb_unlock(); + + free_pool_memory(filter); + free_pool_memory(esc); + free_pool_memory(tmp); +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ diff --git a/src/cats/sql_update.c b/src/cats/sql_update.c new file mode 100644 index 00000000..b01ed7ad --- /dev/null +++ b/src/cats/sql_update.c @@ -0,0 +1,529 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Catalog Database Update record interface routines + * + * Written by Kern Sibbald, March 2000 + */ + +#include "bacula.h" + +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL + +#include "cats.h" + +#define dbglevel1 100 +#define dbglevel2 400 + +/* ----------------------------------------------------------------------- + * + * Generic Routines (or almost generic) + * + * ----------------------------------------------------------------------- + */ + +/* ----------------------------------------------------------------------- + * + * Generic Routines (or almost generic) + * + * ----------------------------------------------------------------------- + */ +/* Update the attributes record by adding the file digest */ +int BDB::bdb_add_digest_to_file_record(JCR *jcr, FileId_t FileId, char *digest, + int type) +{ + int ret; + char ed1[50]; + int len = strlen(digest); + + bdb_lock(); + esc_name = check_pool_memory_size(esc_name, len*2+1); + bdb_escape_string(jcr, esc_name, digest, len); + Mmsg(cmd, "UPDATE File SET MD5='%s' WHERE FileId=%s", esc_name, + edit_int64(FileId, ed1)); + ret = UpdateDB(jcr, cmd, false); + bdb_unlock(); + return ret; +} + +/* Mark the file record as being visited during database + * verify compare. Stuff JobId into the MarkId field + */ +int BDB::bdb_mark_file_record(JCR *jcr, FileId_t FileId, JobId_t JobId) +{ + int stat; + char ed1[50], ed2[50]; + + bdb_lock(); + Mmsg(cmd, "UPDATE File SET MarkId=%s WHERE FileId=%s", + edit_int64(JobId, ed1), edit_int64(FileId, ed2)); + stat = UpdateDB(jcr, cmd, false); + bdb_unlock(); + return stat; +} + +/* + * Update the Job record at start of Job + * + * Returns: false on failure + * true on success + */ +bool BDB::bdb_update_job_start_record(JCR *jcr, JOB_DBR *jr) +{ + char dt[MAX_TIME_LENGTH]; + time_t stime; + struct tm tm; + btime_t JobTDate; + int stat; + char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50]; + + stime = jr->StartTime; + (void)localtime_r(&stime, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + JobTDate = (btime_t)stime; + + bdb_lock(); + Mmsg(cmd, "UPDATE Job SET JobStatus='%c',Level='%c',StartTime='%s'," +"ClientId=%s,JobTDate=%s,PoolId=%s,FileSetId=%s WHERE JobId=%s", + (char)(jcr->JobStatus), + (char)(jr->JobLevel), dt, + edit_int64(jr->ClientId, ed1), + edit_uint64(JobTDate, ed2), + edit_int64(jr->PoolId, ed3), + edit_int64(jr->FileSetId, ed4), + edit_int64(jr->JobId, ed5)); + + stat = UpdateDB(jcr, cmd, false); + changes = 0; + bdb_unlock(); + return stat; +} + +/* + * Update Long term statistics with all jobs that were run before + * age seconds + */ +int BDB::bdb_update_stats(JCR *jcr, utime_t age) +{ + char ed1[30]; + int rows; + + utime_t now = (utime_t)time(NULL); + edit_uint64(now - age, ed1); + + bdb_lock(); + + Mmsg(cmd, fill_jobhisto, ed1); + QueryDB(jcr, cmd); /* TODO: get a message ? */ + rows = sql_affected_rows(); + + bdb_unlock(); + + return rows; +} + +/* + * Update the Job record at end of Job + * + * Returns: 0 on failure + * 1 on success + */ +int BDB::bdb_update_job_end_record(JCR *jcr, JOB_DBR *jr) +{ + char dt[MAX_TIME_LENGTH]; + char rdt[MAX_TIME_LENGTH]; + time_t ttime; + struct tm tm; + int stat; + char ed1[30], ed2[30], ed3[50], ed4[50]; + btime_t JobTDate; + char PriorJobId[50]; + + if (jr->PriorJobId) { + bstrncpy(PriorJobId, edit_int64(jr->PriorJobId, ed1), sizeof(PriorJobId)); + } else { + bstrncpy(PriorJobId, "0", sizeof(PriorJobId)); + } + + ttime = jr->EndTime; + (void)localtime_r(&ttime, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + + if (jr->RealEndTime == 0 || jr->RealEndTime < jr->EndTime) { + jr->RealEndTime = jr->EndTime; + } + ttime = jr->RealEndTime; + (void)localtime_r(&ttime, &tm); + strftime(rdt, sizeof(rdt), "%Y-%m-%d %H:%M:%S", &tm); + + JobTDate = ttime; + + bdb_lock(); + Mmsg(cmd, + "UPDATE Job SET JobStatus='%c',EndTime='%s'," +"ClientId=%u,JobBytes=%s,ReadBytes=%s,JobFiles=%u,JobErrors=%u,VolSessionId=%u," +"VolSessionTime=%u,PoolId=%u,FileSetId=%u,JobTDate=%s," +"RealEndTime='%s',PriorJobId=%s,HasBase=%u,PurgedFiles=%u WHERE JobId=%s", + (char)(jr->JobStatus), dt, jr->ClientId, edit_uint64(jr->JobBytes, ed1), + edit_uint64(jr->ReadBytes, ed4), + jr->JobFiles, jr->JobErrors, jr->VolSessionId, jr->VolSessionTime, + jr->PoolId, jr->FileSetId, edit_uint64(JobTDate, ed2), + rdt, PriorJobId, jr->HasBase, jr->PurgedFiles, + edit_int64(jr->JobId, ed3)); + + stat = UpdateDB(jcr, cmd, false); + + bdb_unlock(); + return stat; +} + +/* + * Update Client record + * Returns: 0 on failure + * 1 on success + */ +int BDB::bdb_update_client_record(JCR *jcr, CLIENT_DBR *cr) +{ + int stat; + char ed1[50], ed2[50]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + char esc_uname[MAX_ESCAPE_NAME_LENGTH]; + CLIENT_DBR tcr; + + bdb_lock(); + memcpy(&tcr, cr, sizeof(tcr)); + if (!bdb_create_client_record(jcr, &tcr)) { + bdb_unlock(); + return 0; + } + + bdb_escape_string(jcr, esc_name, cr->Name, strlen(cr->Name)); + bdb_escape_string(jcr, esc_uname, cr->Uname, strlen(cr->Uname)); + Mmsg(cmd, +"UPDATE Client SET AutoPrune=%d,FileRetention=%s,JobRetention=%s," +"Uname='%s' WHERE Name='%s'", + cr->AutoPrune, + edit_uint64(cr->FileRetention, ed1), + edit_uint64(cr->JobRetention, ed2), + esc_uname, esc_name); + + stat = UpdateDB(jcr, cmd, false); + bdb_unlock(); + return stat; +} + + +/* + * Update Counters record + * Returns: 0 on failure + * 1 on success + */ +int BDB::bdb_update_counter_record(JCR *jcr, COUNTER_DBR *cr) +{ + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc, cr->Counter, strlen(cr->Counter)); + Mmsg(cmd, update_counter_values[bdb_get_type_index()], + cr->MinValue, cr->MaxValue, cr->CurrentValue, + cr->WrapCounter, esc); + + int stat = UpdateDB(jcr, cmd, false); + bdb_unlock(); + return stat; +} + + +int BDB::bdb_update_pool_record(JCR *jcr, POOL_DBR *pr) +{ + int stat; + char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50], ed7[50]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + bdb_lock(); + bdb_escape_string(jcr, esc, pr->LabelFormat, strlen(pr->LabelFormat)); + + Mmsg(cmd, "SELECT count(*) from Media WHERE PoolId=%s", + edit_int64(pr->PoolId, ed4)); + pr->NumVols = get_sql_record_max(jcr, this); + Dmsg1(dbglevel2, "NumVols=%d\n", pr->NumVols); + + Mmsg(cmd, +"UPDATE Pool SET NumVols=%u,MaxVols=%u,UseOnce=%d,UseCatalog=%d," +"AcceptAnyVolume=%d,VolRetention='%s',VolUseDuration='%s'," +"MaxVolJobs=%u,MaxVolFiles=%u,MaxVolBytes=%s,Recycle=%d," +"AutoPrune=%d,LabelType=%d,LabelFormat='%s',RecyclePoolId=%s," +"ScratchPoolId=%s,ActionOnPurge=%d,CacheRetention='%s' WHERE PoolId=%s", + pr->NumVols, pr->MaxVols, pr->UseOnce, pr->UseCatalog, + pr->AcceptAnyVolume, edit_uint64(pr->VolRetention, ed1), + edit_uint64(pr->VolUseDuration, ed2), + pr->MaxVolJobs, pr->MaxVolFiles, + edit_uint64(pr->MaxVolBytes, ed3), + pr->Recycle, pr->AutoPrune, pr->LabelType, + esc, edit_int64(pr->RecyclePoolId,ed5), + edit_int64(pr->ScratchPoolId,ed6), + pr->ActionOnPurge, + edit_uint64(pr->CacheRetention, ed7), + ed4); + stat = UpdateDB(jcr, cmd, false); + bdb_unlock(); + return stat; +} + +bool BDB::bdb_update_storage_record(JCR *jcr, STORAGE_DBR *sr) +{ + int stat; + char ed1[50]; + + bdb_lock(); + Mmsg(cmd, "UPDATE Storage SET AutoChanger=%d WHERE StorageId=%s", + sr->AutoChanger, edit_int64(sr->StorageId, ed1)); + + stat = UpdateDB(jcr, cmd, false); + bdb_unlock(); + return stat; +} + + +/* + * Update the Media Record at end of Session + * + * Returns: 0 on failure + * numrows on success + */ +int BDB::bdb_update_media_record(JCR *jcr, MEDIA_DBR *mr) +{ + char dt[MAX_TIME_LENGTH]; + time_t ttime; + struct tm tm; + int stat; + char ed1[50], ed2[50], ed3[50], ed4[50]; + char ed5[50], ed6[50], ed7[50], ed8[50]; + char ed9[50], ed10[50], ed11[50], ed12[50]; + char ed13[50], ed14[50], ed15[50], ed16[50]; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + char esc_status[MAX_ESCAPE_NAME_LENGTH]; + + Dmsg1(dbglevel1, "update_media: FirstWritten=%d\n", mr->FirstWritten); + bdb_lock(); + bdb_escape_string(jcr, esc_name, mr->VolumeName, strlen(mr->VolumeName)); + bdb_escape_string(jcr, esc_status, mr->VolStatus, strlen(mr->VolStatus)); + + if (mr->set_first_written) { + Dmsg1(dbglevel2, "Set FirstWritten Vol=%s\n", mr->VolumeName); + ttime = mr->FirstWritten; + (void)localtime_r(&ttime, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + Mmsg(cmd, "UPDATE Media SET FirstWritten='%s'" + " WHERE VolumeName='%s'", dt, esc_name); + stat = UpdateDB(jcr, cmd, false); + Dmsg1(dbglevel2, "Firstwritten=%d\n", mr->FirstWritten); + } + + /* Label just done? */ + if (mr->set_label_date) { + ttime = mr->LabelDate; + if (ttime == 0) { + ttime = time(NULL); + } + (void)localtime_r(&ttime, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + Mmsg(cmd, "UPDATE Media SET LabelDate='%s' " + "WHERE VolumeName='%s'", dt, esc_name); + UpdateDB(jcr, cmd, false); + } + + if (mr->LastWritten != 0) { + ttime = mr->LastWritten; + (void)localtime_r(&ttime, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + Mmsg(cmd, "UPDATE Media Set LastWritten='%s' " + "WHERE VolumeName='%s'", dt, esc_name); + UpdateDB(jcr, cmd, false); + } + + /* sanity checks for #1066 */ + if (mr->VolReadTime < 0) { + mr->VolReadTime = 0; + } + if (mr->VolWriteTime < 0) { + mr->VolWriteTime = 0; + } + + Mmsg(cmd, "UPDATE Media SET VolJobs=%u," + "VolFiles=%u,VolBlocks=%u,VolBytes=%s,VolABytes=%s," + "VolHoleBytes=%s,VolHoles=%u,VolMounts=%u,VolErrors=%u," + "VolWrites=%s,MaxVolBytes=%s,VolStatus='%s'," + "Slot=%d,InChanger=%d,VolReadTime=%s,VolWriteTime=%s,VolType=%d," + "VolParts=%d,VolCloudParts=%d,LastPartBytes=%s," + "LabelType=%d,StorageId=%s,PoolId=%s,VolRetention=%s,VolUseDuration=%s," + "MaxVolJobs=%d,MaxVolFiles=%d,Enabled=%d,LocationId=%s," + "ScratchPoolId=%s,RecyclePoolId=%s,RecycleCount=%d,Recycle=%d," + "ActionOnPurge=%d,CacheRetention=%s,EndBlock=%u" + " WHERE VolumeName='%s'", + mr->VolJobs, mr->VolFiles, mr->VolBlocks, + edit_uint64(mr->VolBytes, ed1), + edit_uint64(mr->VolABytes, ed2), + edit_uint64(mr->VolHoleBytes, ed3), + mr->VolHoles, mr->VolMounts, mr->VolErrors, + edit_uint64(mr->VolWrites, ed4), + edit_uint64(mr->MaxVolBytes, ed5), + esc_status, mr->Slot, mr->InChanger, + edit_int64(mr->VolReadTime, ed6), + edit_int64(mr->VolWriteTime, ed7), + mr->VolType, + mr->VolParts, + mr->VolCloudParts, + edit_uint64(mr->LastPartBytes, ed8), + mr->LabelType, + edit_int64(mr->StorageId, ed9), + edit_int64(mr->PoolId, ed10), + edit_uint64(mr->VolRetention, ed11), + edit_uint64(mr->VolUseDuration, ed12), + mr->MaxVolJobs, mr->MaxVolFiles, + mr->Enabled, edit_uint64(mr->LocationId, ed13), + edit_uint64(mr->ScratchPoolId, ed14), + edit_uint64(mr->RecyclePoolId, ed15), + mr->RecycleCount,mr->Recycle, mr->ActionOnPurge, + edit_uint64(mr->CacheRetention, ed16), + mr->EndBlock, + esc_name); + + Dmsg1(dbglevel1, "%s\n", cmd); + + stat = UpdateDB(jcr, cmd, false); + + /* Make sure InChanger is 0 for any record having the same Slot */ + db_make_inchanger_unique(jcr, this, mr); + + bdb_unlock(); + return stat; +} + +/* + * Update the Media Record Default values from Pool + * + * Returns: 0 on failure + * numrows on success + */ +int BDB::bdb_update_media_defaults(JCR *jcr, MEDIA_DBR *mr) +{ + int stat; + char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + bool can_be_empty; + + bdb_lock(); + if (mr->VolumeName[0]) { + bdb_escape_string(jcr, esc, mr->VolumeName, strlen(mr->VolumeName)); + Mmsg(cmd, "UPDATE Media SET " + "ActionOnPurge=%d, Recycle=%d,VolRetention=%s,VolUseDuration=%s," + "MaxVolJobs=%u,MaxVolFiles=%u,MaxVolBytes=%s,RecyclePoolId=%s,CacheRetention=%s" + " WHERE VolumeName='%s'", + mr->ActionOnPurge, mr->Recycle,edit_uint64(mr->VolRetention, ed1), + edit_uint64(mr->VolUseDuration, ed2), + mr->MaxVolJobs, mr->MaxVolFiles, + edit_uint64(mr->MaxVolBytes, ed3), + edit_uint64(mr->RecyclePoolId, ed4), + edit_uint64(mr->CacheRetention, ed5), + esc); + can_be_empty = false; + + } else { + Mmsg(cmd, "UPDATE Media SET " + "ActionOnPurge=%d, Recycle=%d,VolRetention=%s,VolUseDuration=%s," + "MaxVolJobs=%u,MaxVolFiles=%u,MaxVolBytes=%s,RecyclePoolId=%s,CacheRetention=%s" + " WHERE PoolId=%s", + mr->ActionOnPurge, mr->Recycle,edit_uint64(mr->VolRetention, ed1), + edit_uint64(mr->VolUseDuration, ed2), + mr->MaxVolJobs, mr->MaxVolFiles, + edit_uint64(mr->MaxVolBytes, ed3), + edit_int64(mr->RecyclePoolId, ed4), + edit_uint64(mr->CacheRetention, ed5), + edit_int64(mr->PoolId, ed6)); + can_be_empty = true; + } + + Dmsg1(dbglevel1, "%s\n", cmd); + + stat = UpdateDB(jcr, cmd, can_be_empty); + + bdb_unlock(); + return stat; +} + + +/* + * If we have a non-zero InChanger, ensure that no other Media + * record has InChanger set on the same Slot. + * + * This routine assumes the database is already locked. + */ +void BDB::bdb_make_inchanger_unique(JCR *jcr, MEDIA_DBR *mr) +{ + char ed1[50]; + char esc[MAX_ESCAPE_NAME_LENGTH]; + + if (mr->InChanger != 0 && mr->Slot != 0 && mr->StorageId != 0) { + if (!mr->sid_group) { + mr->sid_group = edit_int64(mr->StorageId, mr->sid); + } + if (mr->MediaId != 0) { + Mmsg(cmd, "UPDATE Media SET InChanger=0, Slot=0 WHERE " + "Slot=%d AND StorageId IN (%s) AND MediaId!=%s", + mr->Slot, + mr->sid_group, edit_int64(mr->MediaId, ed1)); + + } else if (*mr->VolumeName) { + bdb_escape_string(jcr, esc,mr->VolumeName,strlen(mr->VolumeName)); + Mmsg(cmd, "UPDATE Media SET InChanger=0, Slot=0 WHERE " + "Slot=%d AND StorageId IN (%s) AND VolumeName!='%s'", + mr->Slot, mr->sid_group, esc); + + } else { /* used by ua_label to reset all volume with this slot */ + Mmsg(cmd, "UPDATE Media SET InChanger=0, Slot=0 WHERE " + "Slot=%d AND StorageId IN (%s)", + mr->Slot, mr->sid_group, mr->VolumeName); + } + Dmsg1(dbglevel1, "%s\n", cmd); + UpdateDB(jcr, cmd, true); + } +} + +/* Update only Retention */ +bool BDB::bdb_update_snapshot_record(JCR *jcr, SNAPSHOT_DBR *sr) +{ + int stat, len; + char ed1[50], ed2[50]; + + len = strlen(sr->Comment); + bdb_lock(); + + esc_name = check_pool_memory_size(esc_name, len*2+1); + bdb_escape_string(jcr, esc_name, sr->Comment, len); + + Mmsg(cmd, "UPDATE Snapshot SET Retention=%s, Comment='%s' WHERE SnapshotId=%s", + edit_int64(sr->Retention, ed2), sr->Comment, edit_int64(sr->SnapshotId, ed1)); + + stat = UpdateDB(jcr, cmd, false); + bdb_unlock(); + return stat; +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ diff --git a/src/cats/sqlite.c b/src/cats/sqlite.c new file mode 100644 index 00000000..3ed39be5 --- /dev/null +++ b/src/cats/sqlite.c @@ -0,0 +1,739 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Catalog Database routines specific to SQLite + * + * Written by Kern Sibbald, January 2002 + * + * Note: at one point, this file was changed to class based by a certain + * programmer, and other than "wrapping" in a class, which is a trivial + * change for a C++ programmer, nothing substantial was done, yet all the + * code was recommitted under this programmer's name. Consequently, we + * undo those changes here. + */ + +#include "bacula.h" + +#if HAVE_SQLITE3 + +#include "cats.h" +#include +#define __BDB_SQLITE_H_ 1 +#include "bdb_sqlite.h" + +/* ----------------------------------------------------------------------- + * + * SQLite dependent defines and subroutines + * + * ----------------------------------------------------------------------- + */ + +/* List of open databases */ +static dlist *db_list = NULL; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * When using mult_db_connections + * sqlite can be BUSY. We just need sleep a little in this case. + */ +static int my_sqlite_busy_handler(void *arg, int calls) +{ + bmicrosleep(0, 500); + return 1; +} + +BDB_SQLITE::BDB_SQLITE() +{ + BDB_SQLITE *mdb = this; + + if (db_list == NULL) { + db_list = New(dlist(mdb, &mdb->m_link)); + } + mdb->m_db_driver_type = SQL_DRIVER_TYPE_SQLITE3; + mdb->m_db_type = SQL_TYPE_SQLITE3; + mdb->m_db_driver = bstrdup("SQLite3"); + + mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */ + mdb->errmsg[0] = 0; + mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */ + mdb->cached_path = get_pool_memory(PM_FNAME); + mdb->cached_path_id = 0; + mdb->m_ref_count = 1; + mdb->fname = get_pool_memory(PM_FNAME); + mdb->path = get_pool_memory(PM_FNAME); + mdb->esc_name = get_pool_memory(PM_FNAME); + mdb->esc_path = get_pool_memory(PM_FNAME); + mdb->esc_obj = get_pool_memory(PM_FNAME); + mdb->m_use_fatal_jmsg = true; + + /* Initialize the private members. */ + mdb->m_db_handle = NULL; + mdb->m_result = NULL; + mdb->m_sqlite_errmsg = NULL; + + db_list->append(this); +} + +BDB_SQLITE::~BDB_SQLITE() +{ +} + +/* + * Initialize database data structure. In principal this should + * never have errors, or it is really fatal. + */ +BDB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user, + const char *db_password, const char *db_address, int db_port, const char *db_socket, + const char *db_ssl_mode, const char *db_ssl_key, + const char *db_ssl_cert, const char *db_ssl_ca, + const char *db_ssl_capath, const char *db_ssl_cipher, + bool mult_db_connections, bool disable_batch_insert) +{ + BDB_SQLITE *mdb = NULL; + + P(mutex); /* lock DB queue */ + /* + * Look to see if DB already open + */ + if (db_list && !mult_db_connections) { + foreach_dlist(mdb, db_list) { + if (mdb->bdb_match_database(db_driver, db_name, db_address, db_port)) { + Dmsg1(300, "DB REopen %s\n", db_name); + mdb->increment_refcount(); + goto bail_out; + } + } + } + Dmsg0(300, "db_init_database first time\n"); + mdb = New(BDB_SQLITE()); + + mdb->m_db_name = bstrdup(db_name); + if (disable_batch_insert) { + mdb->m_disabled_batch_insert = true; + mdb->m_have_batch_insert = false; + } else { + mdb->m_disabled_batch_insert = false; +#ifdef USE_BATCH_FILE_INSERT +#ifdef HAVE_SQLITE3_THREADSAFE + mdb->m_have_batch_insert = sqlite3_threadsafe(); +#else + mdb->m_have_batch_insert = false; +#endif /* HAVE_SQLITE3_THREADSAFE */ +#else + mdb->m_have_batch_insert = false; +#endif /* USE_BATCH_FILE_INSERT */ + } + mdb->m_allow_transactions = mult_db_connections; + + /* At this time, when mult_db_connections == true, this is for + * specific console command such as bvfs or batch mode, and we don't + * want to share a batch mode or bvfs. In the future, we can change + * the creation function to add this parameter. + */ + mdb->m_dedicated = mult_db_connections; + +bail_out: + V(mutex); + return mdb; +} + + +/* + * Now actually open the database. This can generate errors, + * which are returned in the errmsg + * + * DO NOT close the database or delete mdb here !!!! + */ +bool BDB_SQLITE::bdb_open_database(JCR *jcr) +{ + bool retval = false; + char *db_file; + int len; + struct stat statbuf; + int ret; + int errstat; + int retry = 0; + BDB_SQLITE *mdb = this; + + P(mutex); + if (mdb->m_connected) { + retval = true; + goto bail_out; + } + + if ((errstat=rwl_init(&mdb->m_lock)) != 0) { + berrno be; + Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"), + be.bstrerror(errstat)); + goto bail_out; + } + + /* + * Open the database + */ + len = strlen(working_directory) + strlen(mdb->m_db_name) + 5; + db_file = (char *)malloc(len); + strcpy(db_file, working_directory); + strcat(db_file, "/"); + strcat(db_file, m_db_name); + strcat(db_file, ".db"); + if (stat(db_file, &statbuf) != 0) { + Mmsg1(&mdb->errmsg, _("Database %s does not exist, please create it.\n"), + db_file); + free(db_file); + goto bail_out; + } + + for (mdb->m_db_handle = NULL; !mdb->m_db_handle && retry++ < 10; ) { + ret = sqlite3_open(db_file, &mdb->m_db_handle); + if (ret != SQLITE_OK) { + mdb->m_sqlite_errmsg = (char *)sqlite3_errmsg(mdb->m_db_handle); + sqlite3_close(mdb->m_db_handle); + mdb->m_db_handle = NULL; + } else { + mdb->m_sqlite_errmsg = NULL; + } + + Dmsg0(300, "sqlite_open\n"); + if (!mdb->m_db_handle) { + bmicrosleep(1, 0); + } + } + if (mdb->m_db_handle == NULL) { + Mmsg2(&mdb->errmsg, _("Unable to open Database=%s. ERR=%s\n"), + db_file, mdb->m_sqlite_errmsg ? mdb->m_sqlite_errmsg : _("unknown")); + free(db_file); + goto bail_out; + } + mdb->m_connected = true; + free(db_file); + + /* + * Set busy handler to wait when we use mult_db_connections = true + */ + sqlite3_busy_handler(mdb->m_db_handle, my_sqlite_busy_handler, NULL); + +#if defined(SQLITE3_INIT_QUERY) + sql_query(SQLITE3_INIT_QUERY); +#endif + + if (!bdb_check_version(jcr)) { + goto bail_out; + } + + retval = true; + +bail_out: + V(mutex); + return retval; +} + +void BDB_SQLITE::bdb_close_database(JCR *jcr) +{ + BDB_SQLITE *mdb = this; + + if (mdb->m_connected) { + bdb_end_transaction(jcr); + } + P(mutex); + mdb->m_ref_count--; + if (mdb->m_ref_count == 0) { + if (mdb->m_connected) { + sql_free_result(); + } + db_list->remove(mdb); + if (mdb->m_connected && mdb->m_db_handle) { + sqlite3_close(mdb->m_db_handle); + } + if (is_rwl_valid(&mdb->m_lock)) { + rwl_destroy(&mdb->m_lock); + } + free_pool_memory(mdb->errmsg); + free_pool_memory(mdb->cmd); + free_pool_memory(mdb->cached_path); + free_pool_memory(mdb->fname); + free_pool_memory(mdb->path); + free_pool_memory(mdb->esc_name); + free_pool_memory(mdb->esc_path); + free_pool_memory(mdb->esc_obj); + if (mdb->m_db_driver) { + free(mdb->m_db_driver); + } + if (mdb->m_db_name) { + free(mdb->m_db_name); + } + delete this; + if (db_list->size() == 0) { + delete db_list; + db_list = NULL; + } + } + V(mutex); +} + +void BDB_SQLITE::bdb_thread_cleanup(void) +{ + sqlite3_thread_cleanup(); +} + +/* + * Escape strings so SQLite is happy + * + * len is the length of the old string. Your new + * string must be long enough (max 2*old+1) to hold + * the escaped output. + */ +void BDB_SQLITE::bdb_escape_string(JCR *jcr, char *snew, char *sold, int len) +{ + char *n, *o; + + n = snew; + o = sold; + while (len--) { + switch (*o) { + case '\'': + *n++ = '\''; + *n++ = '\''; + o++; + break; + case 0: + *n++ = '\\'; + *n++ = 0; + o++; + break; + default: + *n++ = *o++; + break; + } + } + *n = 0; +} + +/* + * Escape binary object so that SQLite is happy + * Memory is stored in BDB struct, no need to free it + * + * TODO: this should be implemented (escape \0) + */ +char *BDB_SQLITE::bdb_escape_object(JCR *jcr, char *old, int len) +{ + int l; + int max = len*2; /* TODO: too big, should be *4/3 */ + + esc_obj = check_pool_memory_size(esc_obj, max); + l = bin_to_base64(esc_obj, max, old, len, true); + esc_obj[l] = 0; + ASSERT(l < max); /* TODO: add check for l */ + + return esc_obj; +} + +/* + * Unescape binary object so that SQLIte is happy + * + * TODO: need to be implemented (escape \0) + */ + +void BDB_SQLITE::bdb_unescape_object(JCR *jcr, char *from, int32_t expected_len, + POOLMEM **dest, int32_t *dest_len) +{ + if (!from) { + *dest[0] = 0; + *dest_len = 0; + return; + } + *dest = check_pool_memory_size(*dest, expected_len+1); + base64_to_bin(*dest, expected_len+1, from, strlen(from)); + *dest_len = expected_len; + (*dest)[expected_len] = 0; +} + +/* + * Start a transaction. This groups inserts and makes things + * more efficient. Usually started when inserting file attributes. + */ +void BDB_SQLITE::bdb_start_transaction(JCR *jcr) +{ + BDB_SQLITE *mdb = this; + + if (!jcr->attr) { + jcr->attr = get_pool_memory(PM_FNAME); + } + if (!jcr->ar) { + jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR)); + memset(jcr->ar, 0, sizeof(ATTR_DBR)); + } + + if (!mdb->m_allow_transactions) { + return; + } + + bdb_lock(); + /* + * Allow only 10,000 changes per transaction + */ + if (mdb->m_transaction && mdb->changes > 10000) { + bdb_end_transaction(jcr); + } + if (!mdb->m_transaction) { + sql_query("BEGIN"); /* begin transaction */ + Dmsg0(400, "Start SQLite transaction\n"); + mdb->m_transaction = true; + } + bdb_unlock(); +} + +void BDB_SQLITE::bdb_end_transaction(JCR *jcr) +{ + BDB_SQLITE *mdb = this; + + if (jcr && jcr->cached_attribute) { + Dmsg0(400, "Flush last cached attribute.\n"); + if (!bdb_create_attributes_record(jcr, jcr->ar)) { + Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), jcr->db->bdb_strerror()); + } + jcr->cached_attribute = false; + } + + if (!mdb->m_allow_transactions) { + return; + } + + bdb_lock(); + if (mdb->m_transaction) { + sql_query("COMMIT"); /* end transaction */ + mdb->m_transaction = false; + Dmsg1(400, "End SQLite transaction changes=%d\n", changes); + } + mdb->changes = 0; + bdb_unlock(); +} + +struct rh_data { + BDB_SQLITE *mdb; + DB_RESULT_HANDLER *result_handler; + void *ctx; + bool initialized; +}; + +/* + * Convert SQLite's callback into Bacula DB callback + */ +static int sqlite_result_handler(void *arh_data, int num_fields, char **rows, char **col_names) +{ + struct rh_data *rh_data = (struct rh_data *)arh_data; + + /* The db_sql_query doesn't have access to m_results, so if we wan't to get + * fields information, we need to use col_names + */ + if (!rh_data->initialized) { + rh_data->mdb->set_column_names(col_names, num_fields); + rh_data->initialized = true; + } + if (rh_data->result_handler) { + (*(rh_data->result_handler))(rh_data->ctx, num_fields, rows); + } + + return 0; +} + +/* + * Submit a general SQL command (cmd), and for each row returned, + * the result_handler is called with the ctx. + */ +bool BDB_SQLITE::bdb_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx) +{ + BDB_SQLITE *mdb = this; + bool retval = false; + int stat; + struct rh_data rh_data; + + Dmsg1(500, "db_sql_query starts with '%s'\n", query); + + bdb_lock(); + mdb->errmsg[0] = 0; + if (mdb->m_sqlite_errmsg) { + sqlite3_free(mdb->m_sqlite_errmsg); + mdb->m_sqlite_errmsg = NULL; + } + sql_free_result(); + + rh_data.ctx = ctx; + rh_data.mdb = this; + rh_data.initialized = false; + rh_data.result_handler = result_handler; + + stat = sqlite3_exec(m_db_handle, query, sqlite_result_handler, + (void *)&rh_data, &m_sqlite_errmsg); + + if (stat != SQLITE_OK) { + Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror()); + Dmsg0(500, "db_sql_query finished\n"); + goto bail_out; + } + Dmsg0(500, "db_sql_query finished\n"); + sql_free_result(); + retval = true; + +bail_out: + bdb_unlock(); + return retval; +} + +/* + * Submit a sqlite query and retrieve all the data + */ +bool BDB_SQLITE::sql_query(const char *query, int flags) +{ + int stat; + bool retval = false; + BDB_SQLITE *mdb = this; + + Dmsg1(500, "sql_query starts with '%s'\n", query); + + sql_free_result(); + if (mdb->m_sqlite_errmsg) { + sqlite3_free(mdb->m_sqlite_errmsg); + mdb->m_sqlite_errmsg = NULL; + } + + stat = sqlite3_get_table(m_db_handle, (char *)query, &m_result, + &m_num_rows, &m_num_fields, &m_sqlite_errmsg); + + mdb->m_row_number = 0; /* no row fetched */ + if (stat != 0) { /* something went wrong */ + mdb->m_num_rows = mdb->m_num_fields = 0; + Dmsg0(500, "sql_query finished\n"); + } else { + Dmsg0(500, "sql_query finished\n"); + retval = true; + } + return retval; +} + +void BDB_SQLITE::sql_free_result(void) +{ + BDB_SQLITE *mdb = this; + + bdb_lock(); + if (mdb->m_fields) { + free(mdb->m_fields); + mdb->m_fields = NULL; + } + if (mdb->m_result) { + sqlite3_free_table(mdb->m_result); + mdb->m_result = NULL; + } + mdb->m_col_names = NULL; + mdb->m_num_rows = mdb->m_num_fields = 0; + bdb_unlock(); +} + +/* + * Fetch one row at a time + */ +SQL_ROW BDB_SQLITE::sql_fetch_row(void) +{ + BDB_SQLITE *mdb = this; + if (!mdb->m_result || (mdb->m_row_number >= mdb->m_num_rows)) { + return NULL; + } + mdb->m_row_number++; + return &mdb->m_result[mdb->m_num_fields * mdb->m_row_number]; +} + +const char *BDB_SQLITE::sql_strerror(void) +{ + BDB_SQLITE *mdb = this; + return mdb->m_sqlite_errmsg ? mdb->m_sqlite_errmsg : "unknown"; +} + +void BDB_SQLITE::sql_data_seek(int row) +{ + BDB_SQLITE *mdb = this; + /* Set the row number to be returned on the next call to sql_fetch_row */ + mdb->m_row_number = row; +} + +int BDB_SQLITE::sql_affected_rows(void) +{ + BDB_SQLITE *mdb = this; + return sqlite3_changes(mdb->m_db_handle); +} + +uint64_t BDB_SQLITE::sql_insert_autokey_record(const char *query, const char *table_name) +{ + BDB_SQLITE *mdb = this; + /* First execute the insert query and then retrieve the currval. */ + if (!sql_query(query)) { + return 0; + } + + mdb->m_num_rows = sql_affected_rows(); + if (mdb->m_num_rows != 1) { + return 0; + } + + mdb->changes++; + + return sqlite3_last_insert_rowid(mdb->m_db_handle); +} + +SQL_FIELD *BDB_SQLITE::sql_fetch_field(void) +{ + BDB_SQLITE *mdb = this; + int i, j, len; + + /* We are in the middle of a db_sql_query and we want to get fields info */ + if (mdb->m_col_names != NULL) { + if (mdb->m_num_fields > mdb->m_field_number) { + mdb->m_sql_field.name = mdb->m_col_names[mdb->m_field_number]; + /* We don't have the maximum field length, so we can use 80 as + * estimation. + */ + len = MAX(cstrlen(mdb->m_sql_field.name), 80/mdb->m_num_fields); + mdb->m_sql_field.max_length = len; + + mdb->m_field_number++; + mdb->m_sql_field.type = 0; /* not numeric */ + mdb->m_sql_field.flags = 1; /* not null */ + return &mdb->m_sql_field; + } else { /* too much fetch_field() */ + return NULL; + } + } + + /* We are after a sql_query() that stores the result in m_results */ + if (!mdb->m_fields || mdb->m_fields_size < mdb->m_num_fields) { + if (mdb->m_fields) { + free(mdb->m_fields); + mdb->m_fields = NULL; + } + Dmsg1(500, "allocating space for %d fields\n", m_num_fields); + mdb->m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * mdb->m_num_fields); + mdb->m_fields_size = mdb->m_num_fields; + + for (i = 0; i < mdb->m_num_fields; i++) { + Dmsg1(500, "filling field %d\n", i); + mdb->m_fields[i].name = mdb->m_result[i]; + mdb->m_fields[i].max_length = cstrlen(mdb->m_fields[i].name); + for (j = 1; j <= mdb->m_num_rows; j++) { + if (mdb->m_result[i + mdb->m_num_fields * j]) { + len = (uint32_t)cstrlen(mdb->m_result[i + mdb->m_num_fields * j]); + } else { + len = 0; + } + if (len > mdb->m_fields[i].max_length) { + mdb->m_fields[i].max_length = len; + } + } + mdb->m_fields[i].type = 0; + mdb->m_fields[i].flags = 1; /* not null */ + + Dmsg4(500, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n", + mdb->m_fields[i].name, mdb->m_fields[i].max_length, mdb->m_fields[i].type, mdb->m_fields[i].flags); + } + } + + /* Increment field number for the next time around */ + return &mdb->m_fields[mdb->m_field_number++]; +} + +bool BDB_SQLITE::sql_field_is_not_null(int field_type) +{ + if (field_type == 1) { + return true; + } + return false; +} + +bool BDB_SQLITE::sql_field_is_numeric(int field_type) +{ + if (field_type == 1) { + return true; + } + return false; +} + +/* + * Returns true if OK + * false if failed + */ +bool BDB_SQLITE::sql_batch_start(JCR *jcr) +{ + bool ret; + + bdb_lock(); + ret = sql_query("CREATE TEMPORARY TABLE batch (" + "FileIndex integer," + "JobId integer," + "Path blob," + "Name blob," + "LStat tinyblob," + "MD5 tinyblob," + "DeltaSeq integer)"); + bdb_unlock(); + + return ret; +} + +/* Set error to something to abort operation */ +/* + * Returns true if OK + * false if failed + */ +bool BDB_SQLITE::sql_batch_end(JCR *jcr, const char *error) +{ + m_status = 0; + return true; +} + +/* + * Returns true if OK + * false if failed + */ +bool BDB_SQLITE::sql_batch_insert(JCR *jcr, ATTR_DBR *ar) +{ + BDB_SQLITE *mdb = this; + const char *digest; + char ed1[50]; + + mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); + bdb_escape_string(jcr, mdb->esc_name, mdb->fname, mdb->fnl); + + mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1); + bdb_escape_string(jcr, mdb->esc_path, mdb->path, mdb->pnl); + + if (ar->Digest == NULL || ar->Digest[0] == 0) { + digest = "0"; + } else { + digest = ar->Digest; + } + + Mmsg(mdb->cmd, "INSERT INTO batch VALUES " + "(%d,%s,'%s','%s','%s','%s',%u)", + ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path, + mdb->esc_name, ar->attr, digest, ar->DeltaSeq); + + return sql_query(mdb->cmd); +} + + +#endif /* HAVE_SQLITE3 */ diff --git a/src/cats/sqlite.in b/src/cats/sqlite.in new file mode 100644 index 00000000..61c23aed --- /dev/null +++ b/src/cats/sqlite.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# shell script to invoke SQLite on Bacula database + +bindir=@SQLITE_BINDIR@ +db_name=@db_name@ +$bindir/sqlite @working_dir@/${db_name}.db diff --git a/src/cats/update_bacula_tables.in b/src/cats/update_bacula_tables.in new file mode 100755 index 00000000..cafeaa17 --- /dev/null +++ b/src/cats/update_bacula_tables.in @@ -0,0 +1,50 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This routine alters the appropriately configured +# Bacula tables for PostgreSQL, Ingres, MySQL, or SQLite. +# + +# can be used to change the current user with su +pre_command="sh -c" + +default_db_type=@DEFAULT_DB_TYPE@ + +# +# See if the first argument is a valid backend name. +# If so the user overrides the default database backend. +# +if [ $# -gt 0 ]; then + case $1 in + sqlite3) + db_type=$1 + shift + ;; + mysql) + db_type=$1 + shift + ;; + postgresql) + db_type=$1 + shift + ;; + *) + ;; + esac +fi + +# +# If no new db_type is gives use the default db_type. +# +if [ -z "${db_type}" ]; then + db_type="${default_db_type}" +fi + +if [ $db_type = postgresql -a "$UID" = 0 ]; then + pre_command="su - postgres -c" +fi + +echo "Altering ${db_type} tables" +$pre_command "@scriptdir@/update_${db_type}_tables $*" diff --git a/src/cats/update_mysql_tables.in b/src/cats/update_mysql_tables.in new file mode 100644 index 00000000..2dbc3434 --- /dev/null +++ b/src/cats/update_mysql_tables.in @@ -0,0 +1,258 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Shell script to update MySQL tables from Bacula Community version +# 5.0.x, 5.2.x, 7.0.x, 7.2.x, 7.4.x +# +echo " " +echo "This script will update a Bacula MySQL database from version 12-16 to 16" +echo " " +echo "Depending on the current version of your catalog," +echo "you may have to run this script multiple times." +echo " " + +bindir=@MYSQL_BINDIR@ +PATH="$bindir:$PATH" +db_name=@db_name@ + +ARGS=$* + +getVersion() +{ + mysql $ARGS -D ${db_name} -e "select VersionId from Version LIMIT 1\G" >/tmp/$$ + DBVERSION=`sed -n -e 's/^VersionId: \(.*\)$/\1/p' /tmp/$$` +} + +getVersion + +if [ "x$DBVERSION" = x ]; then + echo + echo "Unable to detect database version, you can specify connection information" + echo "on the command line." + echo "Error. Cannot upgrade this database." + exit 1 +fi + +if [ "$DBVERSION" -lt 12 -o "$DBVERSION" -gt 16 ] ; then + echo " " + echo "The existing database is version $DBVERSION !!" + echo "This script can only update an existing version 12-16 to version 16." + echo "Error. Cannot upgrade this database." + echo " " + exit 1 +fi + +# For all versions, we need to create the Index on Media(StorageId) +# It may fail, but it's not a big problem +# mysql $* -f </dev/null 2> /dev/null +# CREATE INDEX media_storageid_idx ON Media (StorageId); +# END-OF-DATA + +if [ "$DBVERSION" -eq 12 ] ; then + if mysql $* -f < Found existing $$srcconf, installing new conf file as $$destconf"; \ + else \ + destconf=$$srcconf; \ + if test -f ${DESTDIR}${sysconfdir}/console.conf; then \ + echo "Existing console.conf moved to bconsole.conf"; \ + @$(MV) ${DESTDIR}${sysconfdir}/console.conf ${DESTDIR}${sysconfdir}/bconsole.conf; \ + destconf=$$srcconf.new; \ + fi; \ + fi; \ + echo "${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf"; \ + ${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf + if test -f static-bconsole; then \ + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) static-bconsole $(DESTDIR)$(sbindir)/static-bconsole; \ + fi + +uninstall: + (cd $(DESTDIR)$(sbindir); $(RMF) bconsole static-bconsole bbconsjson) + (cd $(DESTDIR)$(sysconfdir); $(RMF) console.conf bconsole.conf bconsole.conf.new) + + +# Semi-automatic generation of dependencies: +# Use gcc -MM because X11 `makedepend' doesn't work on all systems +# and it also includes system headers. +# `semi'-automatic since dependencies are generated at distribution time. + +depend: + @$(MV) Makefile Makefile.bak + @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile + @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile + @$(CXX) -S -M $(CPPFLAGS) $(CONS_INC) -I$(srcdir) -I$(basedir) *.c >> Makefile + @if test -f Makefile ; then \ + $(RMF) Makefile.bak; \ + else \ + $(MV) Makefile.bak Makefile; \ + echo " ===== Something went wrong in make depend ====="; \ + fi + +# ----------------------------------------------------------------------- +# DO NOT DELETE: nice dependency list follows diff --git a/src/console/authenticate.c b/src/console/authenticate.c new file mode 100644 index 00000000..54052201 --- /dev/null +++ b/src/console/authenticate.c @@ -0,0 +1,188 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula UA authentication. Provides authentication with + * the Director. + * + * Kern Sibbald, June MMI + * + * This routine runs as a thread and must be thread reentrant. + * + * Basic tasks done here: + * + */ + +#include "bacula.h" +#include "console_conf.h" + +/* + * Version at end of Hello Enterprise: + * prior to 06Aug13 no version + * 1 06Aug13 - added comm line compression + * Community + * prior to 06Aug13 no version + * 100 14Feb17 - added comm line compression + */ +#define UA_VERSION 100 + +void senditf(const char *fmt, ...); +void sendit(const char *buf); + +/* Commands sent to Director */ +static char hello[] = "Hello %s calling %d\n"; + +/* Response from Director */ +static char oldOKhello[] = "1000 OK:"; +static char newOKhello[] = "1000 OK: %d"; +static char FDOKhello[] = "2000 OK Hello %d"; + +/* Forward referenced functions */ + +/* + * Authenticate Director + */ +int authenticate_director(BSOCK *dir, DIRRES *director, CONRES *cons) +{ + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + bool tls_authenticate; + int compatible = true; + int dir_version = 0; + int fd_version = 0; + char bashed_name[MAX_NAME_LENGTH]; + char *password; + TLS_CONTEXT *tls_ctx = NULL; + + /* + * Send my name to the Director then do authentication + */ + if (cons) { + bstrncpy(bashed_name, cons->hdr.name, sizeof(bashed_name)); + bash_spaces(bashed_name); + password = cons->password; + /* TLS Requirement */ + if (cons->tls_enable) { + if (cons->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + if (cons->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + tls_authenticate = cons->tls_authenticate; + tls_ctx = cons->tls_ctx; + } else { + bstrncpy(bashed_name, "*UserAgent*", sizeof(bashed_name)); + password = director->password; + /* TLS Requirement */ + if (director->tls_enable) { + if (director->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + if (director->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + tls_authenticate = director->tls_authenticate; + tls_ctx = director->tls_ctx; + } + + + /* Timeout Hello after 15 secs */ + btimer_t *tid = start_bsock_timer(dir, 15); + dir->fsend(hello, bashed_name, UA_VERSION); + + if (!cram_md5_respond(dir, password, &tls_remote_need, &compatible) || + !cram_md5_challenge(dir, password, tls_local_need, compatible)) { + goto bail_out; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + sendit(_("Authorization problem:" + " Remote server did not advertise required TLS support.\n")); + goto bail_out; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + sendit(_("Authorization problem:" + " Remote server requires TLS.\n")); + goto bail_out; + } + + /* Is TLS Enabled? */ + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_client(tls_ctx, dir, NULL)) { + sendit(_("TLS negotiation failed\n")); + goto bail_out; + } + if (tls_authenticate) { /* Authenticate only? */ + dir->free_tls(); /* yes, shutdown tls */ + } + } + + /* + * It's possible that the TLS connection will + * be dropped here if an invalid client certificate was presented + */ + Dmsg1(6, ">dird: %s", dir->msg); + if (dir->recv() <= 0) { + senditf(_("Bad response to Hello command: ERR=%s\n"), + dir->bstrerror()); + goto bail_out; + } + + Dmsg1(10, "msg); + if (strncmp(dir->msg, oldOKhello, sizeof(oldOKhello)-1) == 0) { + /* If Dir version exists, get it */ + sscanf(dir->msg, newOKhello, &dir_version); + sendit(dir->msg); + /* Check for hello from FD */ + } else if (sscanf(dir->msg, FDOKhello, &fd_version) == 1) { + sendit(dir->msg); + } else { + sendit(_("Director rejected Hello command\n")); + goto bail_out; + } + /* Turn on compression for newer Directors */ + if (dir_version >= 103 && (!cons || cons->comm_compression)) { + dir->set_compress(); + } else { + dir->clear_compress(); + } + /* ***FIXME*** should turn on compression for FD if possible */ + stop_bsock_timer(tid); + return 1; + +bail_out: + stop_bsock_timer(tid); + sendit( _("Director authorization problem.\n" + "Most likely the passwords do not agree.\n" + "If you are using TLS, there may have been a certificate validation error during the TLS handshake.\n" + "For help, please see " MANUAL_AUTH_URL "\n")); + return 0; +} diff --git a/src/console/bbconsjson.c b/src/console/bbconsjson.c new file mode 100644 index 00000000..d4c088d1 --- /dev/null +++ b/src/console/bbconsjson.c @@ -0,0 +1,598 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Console .conf to Json program. + * + * Kern Sibbald, September MMXII + * + */ + +#include "bacula.h" +#include "lib/breg.h" +#include "console_conf.h" +#include "jcr.h" + +/* Imported variables */ +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + extern URES res_all; +} +#else +extern URES res_all; +#endif +extern s_kw msg_types[]; +extern RES_TABLE resources[]; + +/* Exported functions */ +void senditf(const char *fmt, ...); +void sendit(const char *buf); + +/* Imported functions */ +extern bool parse_cons_config(CONFIG *config, const char *configfile, int exit_code); + +typedef struct +{ + /* default { { "Director": { "Name": aa, ...} }, { "Job": {..} */ + bool do_list; /* [ {}, {}, ..] or { "aa": {}, "bb": {}, ...} */ + bool do_one; /* { "Name": "aa", "Description": "test, ... } */ + bool do_only_data; /* [ {}, {}, {}, ] */ + char *resource_type; + char *resource_name; + regex_t directive_reg; +} display_filter; + +/* Forward referenced functions */ +static void terminate_console(int sig); +static int check_resources(); +//static void ressendit(void *ua, const char *fmt, ...); +//static void dump_resource_types(); +//static void dump_directives(); +static void dump_json(display_filter *filter); + +/* Static variables */ +static char *configfile = NULL; +static FILE *output = stdout; +static bool teeout = false; /* output to output and stdout */ +static int numdir; +static POOLMEM *args; +static CONFIG *config; + + +#define CONFIG_FILE "bconsole.conf" /* default configuration file */ + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: " VERSION " (" BDATE ") %s %s %s\n\n" +"Usage: bconsjson [options] [config_file]\n" +" -r get resource type \n" +" -n get resource \n" +" -l get only directives matching dirs (use with -r)\n" +" -D get only data\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -v verbose\n" +" -? print this message.\n" +"\n"), 2012, BDEMO, HOST_OS, DISTNAME, DISTVER); + + exit(1); +} + +/********************************************************************* + * + * Bacula console conf to Json + * + */ +int main(int argc, char *argv[]) +{ + int ch; + bool test_config = false; + display_filter filter; + memset(&filter, 0, sizeof(filter)); + int rtn = 0; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + if (init_crypto() != 0) { + Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); + } + + init_stack_dump(); + lmgr_init_thread(); + my_name_is(argc, argv, "bconsole"); + init_msg(NULL, NULL); + working_directory = "/tmp"; + args = get_pool_memory(PM_FNAME); + + while ((ch = getopt(argc, argv, "n:vDabc:d:jl:r:t?")) != -1) { + switch (ch) { + case 'D': + filter.do_only_data = true; + break; + case 'a': +// list_all = true; + break; + + case 'c': /* configuration file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + break; + + case 'd': + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + + case 'l': + filter.do_list = true; + if (regcomp(&filter.directive_reg, optarg, REG_EXTENDED) != 0) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, + _("Please use valid -l argument: %s\n"), optarg); + } + break; + + case 'r': + filter.resource_type = optarg; + break; + + case 'n': + filter.resource_name = optarg; + break; + + case 't': + test_config = true; + break; + + case 'v': /* verbose */ + verbose++; + break; + + case '?': + default: + usage(); + exit(1); + } + } + argc -= optind; + argv += optind; + + OSDependentInit(); + + if (argc) { + usage(); + exit(1); + } + + if (filter.do_list && !filter.resource_type) { + usage(); + } + + if (filter.resource_type && filter.resource_name) { + filter.do_one = true; + } + + if (configfile == NULL || configfile[0] == 0) { + configfile = bstrdup(CONFIG_FILE); + } + + if (test_config && verbose > 0) { + char buf[1024]; + find_config_file(configfile, buf, sizeof(buf)); + printf("config_file=%s\n", buf); + } + + config = New(CONFIG()); + config->encode_password(false); + parse_cons_config(config, configfile, M_ERROR_TERM); + + if (!check_resources()) { + Emsg1(M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); + } + + if (test_config) { + terminate_console(0); + exit(0); + } + + dump_json(&filter); + + terminate_console(0); + return rtn; +} + +/* Cleanup and then exit */ +static void terminate_console(int sig) +{ + static bool already_here = false; + + if (already_here) { /* avoid recursive temination problems */ + exit(1); + } + already_here = true; + stop_watchdog(); + delete config; + config = NULL; + free(res_head); + res_head = NULL; + free_pool_memory(args); + lmgr_cleanup_main(); + + if (sig != 0) { + exit(1); + } + return; +} + + +/* + * Dump out all resources in json format. + * Note!!!! This routine must be in this file rather + * than in src/lib/parser_conf.c otherwise the pointers + * will be all messed up. + */ +static void dump_json(display_filter *filter) +{ + int resinx, item; + int directives; + bool first_res; + bool first_directive; + RES_ITEM *items; + RES *res; + HPKT hpkt; + regmatch_t pmatch[32]; + + init_hpkt(hpkt); + + /* List resources and directives */ + if (filter->do_only_data) { + printf("["); + + /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } + * or print a single item + */ + } else if (filter->do_one || filter->do_list) { + printf("{"); + + } else { + /* [ { "Client": { "Name": "aa",.. } }, { "Director": { "Name": "bb", ... } } ]*/ + printf("["); + } + + first_res = true; + /* Loop over all resource types */ + for (resinx=0; resources[resinx].name; resinx++) { + /* Skip this resource type */ + if (filter->resource_type && + strcasecmp(filter->resource_type, resources[resinx].name) != 0) { + continue; + } + + directives = 0; + /* Loop over all resources of this type */ + foreach_rblist(res, res_head[resinx]->res_list) { + hpkt.res = res; + items = resources[resinx].items; + if (!items) { + break; + } + + /* Copy the resource into res_all */ + memcpy(&res_all, res, sizeof(res_all)); + + if (filter->resource_name) { + bool skip=true; + /* The Name should be at the first place, so this is not a real loop */ + for (item=0; items[item].name; item++) { + if (strcasecmp(items[item].name, "Name") == 0) { + if (strcasecmp(*(items[item].value), filter->resource_name) == 0) { + skip = false; + } + break; + } + } + if (skip) { /* The name doesn't match, so skip it */ + continue; + } + } + + if (first_res) { + printf("\n"); + + } else { + printf(",\n"); + } + + if (filter->do_only_data) { + printf(" {"); + + } else if (filter->do_one) { + /* Nothing to print */ + + /* When sending the list, the form is: + * { aa: { Name: aa, Description: aadesc...}, bb: { Name: bb + */ + } else if (filter->do_list) { + /* Search and display Name, should be the first item */ + for (item=0; items[item].name; item++) { + if (strcmp(items[item].name, "Name") == 0) { + printf("%s: {\n", quote_string(hpkt.edbuf, *items[item].value)); + break; + } + } + } else { + /* Begin new resource */ + printf("{\n \"%s\": {", resources[resinx].name); + } + + first_res = false; + first_directive = true; + directives = 0; + + for (item=0; items[item].name; item++) { + /* Check user argument -l */ + if (filter->do_list && + regexec(&filter->directive_reg, + items[item].name, 32, pmatch, 0) != 0) + { + continue; + } + + hpkt.ritem = &items[item]; + if (bit_is_set(item, res_all.hdr.item_present)) { + if (!first_directive) printf(","); + if (display_global_item(hpkt)) { + /* Fall-through wanted */ + } else { + printf("\n \"%s\": null", items[item].name); + } + directives++; + first_directive = false; + } + if (items[item].flags & ITEM_LAST) { + display_last(hpkt); /* If last bit set always call to cleanup */ + } + } + /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } */ + if (filter->do_only_data || filter->do_list) { + printf("\n }"); /* Finish the Resource with a single } */ + + } else { + if (filter->do_one) { + /* don't print anything */ + } else if (directives) { + printf("\n }\n}"); /* end of resource */ + } else { + printf("}\n}"); + } + } + } /* End loop over all resources of this type */ + } /* End loop all resource types */ + + if (filter->do_only_data) { + printf("\n]\n"); + + } else if (filter->do_one || filter->do_list) { + printf("\n}\n"); + + } else { + printf("\n]\n"); + } + term_hpkt(hpkt); +} + + +/* + * Make a quick check to see that we have all the + * resources needed. + */ +static int check_resources() +{ + bool OK = true; + DIRRES *director; + bool tls_needed; + + LockRes(); + + numdir = 0; + foreach_res(director, R_DIRECTOR) { + + numdir++; + /* tls_require implies tls_enable */ + if (director->tls_require) { + if (have_tls) { + director->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + + tls_needed = director->tls_enable || director->tls_authenticate; + + if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." + " At least one CA certificate store is required.\n"), + director->hdr.name, configfile); + OK = false; + } + } + + if (numdir == 0) { + Emsg1(M_FATAL, 0, _("No Director resource defined in %s\n" + "Without that I don't how to speak to the Director :-(\n"), configfile); + OK = false; + } + + CONRES *cons; + /* Loop over Consoles */ + foreach_res(cons, R_CONSOLE) { + /* tls_require implies tls_enable */ + if (cons->tls_require) { + if (have_tls) { + cons->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + tls_needed = cons->tls_enable || cons->tls_authenticate; + if ((!cons->tls_ca_certfile && !cons->tls_ca_certdir) && tls_needed) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Console \"%s\" in %s.\n"), + cons->hdr.name, configfile); + OK = false; + } + } + + UnlockRes(); + + return OK; +} + +#ifdef needed +static void ressendit(void *sock, const char *fmt, ...) +{ + char buf[3000]; + va_list arg_ptr; + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); + va_end(arg_ptr); + sendit(buf); +} + +static void dump_resource_types() +{ + int i; + bool first; + + /* List resources and their code */ + printf("[\n"); + first = true; + for (i=0; resources[i].name; i++) { + if (!first) { + printf(",\n"); + } + printf(" \"%s\": %d", resources[i].name, resources[i].rcode); + first = false; + } + printf("\n]\n"); +} + +static void dump_directives() +{ + int i, j; + bool first_res; + bool first_directive; + RES_ITEM *items; + + /* List resources and directives */ + printf("[\n"); + first_res = true; + for (i=0; resources[i].name; i++) { + if (!first_res) { + printf(",\n"); + } + printf("{\n \"%s\": {\n", resources[i].name); + first_res = false; + first_directive = true; + items = resources[i].items; + for (j=0; items[j].name; j++) { + if (!first_directive) { + printf(",\n"); + } + printf(" \"%s\": null", items[j].name); + first_directive = false; + } + printf("\n }"); /* end of resource */ + } + printf("\n]\n"); +} +#endif + +/* + * Send a line to the output file and or the terminal + */ +void senditf(const char *fmt,...) +{ + char buf[3000]; + va_list arg_ptr; + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); + va_end(arg_ptr); + sendit(buf); +} + +void sendit(const char *buf) +{ +#ifdef CONIO_FIX + char obuf[3000]; + if (output == stdout || teeout) { + const char *p, *q; + /* + * Here, we convert every \n into \r\n because the + * terminal is in raw mode when we are using + * conio. + */ + for (p=q=buf; (p=strchr(q, '\n')); ) { + int len = p - q; + if (len > 0) { + memcpy(obuf, q, len); + } + memcpy(obuf+len, "\r\n", 3); + q = ++p; /* point after \n */ + fputs(obuf, output); + } + if (*q) { + fputs(q, output); + } + fflush(output); + } + if (output != stdout) { + fputs(buf, output); + } +#else + + fputs(buf, output); + fflush(output); + if (teeout) { + fputs(buf, stdout); + fflush(stdout); + } +#endif +} diff --git a/src/console/bconsole.conf.in b/src/console/bconsole.conf.in new file mode 100644 index 00000000..84d23a28 --- /dev/null +++ b/src/console/bconsole.conf.in @@ -0,0 +1,13 @@ +# +# Bacula User Agent (or Console) Configuration File +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +Director { + Name = @basename@-dir + DIRport = @dir_port@ + address = @hostname@ + Password = "@dir_password@" +} diff --git a/src/console/conio.c b/src/console/conio.c new file mode 100755 index 00000000..a463207a --- /dev/null +++ b/src/console/conio.c @@ -0,0 +1,1146 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + Generalized console input/output handler + A maintanable replacement for readline() + + Updated for Bacula, Kern Sibbald, December MMIII + + This code is in part derived from code that I wrote in + 1981, so some of it is a bit old and could use a cleanup. + +*/ + +/* + * UTF-8 + * If the top bit of a UTF-8 string is 0 (8 bits), then it + * is a normal ASCII character. + * If the top two bits are 11 (i.e. (c & 0xC0) == 0xC0 then + * it is the start of a series of chars (up to 5) + * Each subsequent character starts with 10 (i.e. (c & 0xC0) == 0x80) + */ + + +#ifdef TEST_PROGRAM +#include +#include +#include +#include +#include +#include +#define HAVE_CONIO 1 +#else + +/* We are in Bacula */ +#include "bacula.h" + +#endif + +#ifdef HAVE_CONIO + +#include +#include + +#ifdef HAVE_SUN_OS +#ifndef _TERM_H +extern "C" int tgetent(void *, const char *); +extern "C" int tgetnum(const char *); +extern "C" char *tgetstr (const char*, char**); +# Note: the following on older (Solaris 10) systems +# may need to be moved to after the #endif +extern "C" char *tgoto (const char *, int, int); +#endif +#elif defined(__sgi) +extern "C" int tgetent(char *, char *); +extern "C" int tgetnum(char id[2]); +extern "C" char *tgetstr(char id[2], char **); +extern "C" char *tgoto(char *, int, int); +#elif defined (__digital__) && defined (__unix__) +extern "C" int tgetent(void *, const char *); +extern "C" int tgetnum(const char *); +extern "C" char *tgetstr (const char*, char**); +extern "C" char *tgoto (const char *, int, int); +#endif +#include "func.h" + + +/* From termios library */ +#if defined(HAVE_HPUX_OS) || defined(HAVE_AIX_OS) +static char *BC; +static char *UP; +#else +extern char *BC; +extern char *UP; +#endif + +static void add_smap(char *str, int func); + + +/* Global variables */ + +static const char *t_up = "\n"; /* scroll up character */ +static const char *t_honk = "\007"; /* sound beep */ +static char *t_il; /* insert line */ +static char *t_dl; /* delete line */ +static char *t_cs; /* clear screen */ +static char *t_cl; /* clear line */ +static int t_width = 79; /* terminal width */ +static int t_height = 24; /* terminal height */ +static int linsdel_ok = 0; /* set if term has line insert & delete fncs */ + +static char *t_cm; /* cursor positioning */ +static char *t_ti; /* init sequence */ +static char *t_te; /* end sequence */ +static char *t_do; /* down one line */ +static char *t_sf; /* scroll screen one line up */ + +/* Keypad and Function Keys */ +static char *kl; /* left key */ +static char *kr; /* right */ +static char *ku; /* up */ +static char *kd; /* down */ +static char *kh; /* home */ +static char *kb; /* backspace */ +static char *kD; /* delete key */ +static char *kI; /* insert */ +static char *kN; /* next page */ +static char *kP; /* previous page */ +static char *kH; /* home */ +static char *kE; /* end */ + +#ifndef EOS +#define EOS '\0' /* end of string terminator */ +#endif + +#define TRUE 1 +#define FALSE 0 +/* + * Stab entry. Input chars (str), the length, and the desired + * func code. + */ +typedef struct s_stab { + struct s_stab *next; + char *str; + int len; + int func; +} stab_t; + +#define MAX_STAB 30 + +static stab_t **stab = NULL; /* array of stabs by length */ +static int num_stab; /* size of stab array */ + +static bool old_term_params_set = false; +static struct termios old_term_params; + +/* Maintain lines in a doubly linked circular pool of lines. Each line is + preceded by a header defined by the lstr structure */ + + +struct lstr { /* line pool structure */ + struct lstr *prevl; /* link to previous line */ + struct lstr *nextl; /* link to next line */ + long len; /* length of line+header */ + char used; /* set if line valid */ + char line; /* line is actually varying length */ +}; + +#ifdef unix +#define POOLEN 128000 /* bytes in line pool */ +#else +#define POOLEN 500 /* bytes in line pool */ +#endif +char pool[POOLEN]; /* line pool */ +#define PHDRL ((int)sizeof(struct lstr)) /* length of line header */ + +static struct lstr *lptr; /* current line pointer */ +static struct lstr *slptr; /* store line pointer */ +static int cl, cp; +static char *getnext(), *getprev(); +static int first = 1; +static int mode_insert = 1; +static int mode_wspace = 1; /* words separated by spaces */ + + +static short char_map[600]= { + 0, F_SOL, /* ^a Line start */ + F_PRVWRD, /* ^b Previous word */ F_BREAK, /* ^C break */ + F_DELCHR, /* ^D Delete character */ F_EOL, /* ^e End of line */ + F_CSRRGT, /* ^f Right */ F_TABBAK, /* ^G Back tab */ + F_CSRLFT, /* ^H Left */ F_TAB, /* ^I Tab */ + F_CSRDWN, /* ^J Down */ F_DELEOL, /* ^K kill to eol */ + F_CLRSCRN,/* ^L clear screen */ F_RETURN, /* ^M Carriage return */ + F_RETURN, /* ^N enter line */ F_CONCAT, /* ^O Concatenate lines */ + F_CSRUP, /* ^P cursor up */ F_TINS, /* ^Q Insert character mode */ + F_PAGUP, /* ^R Page up */ F_CENTER, /* ^S Center text */ + F_PAGDWN, /* ^T Page down */ F_DELSOL, /* ^U delete to start of line */ + F_DELWRD, /* ^V Delete word */ F_PRVWRD, /* ^W Previous word */ + F_NXTMCH, /* ^X Next match */ F_DELEOL, /* ^Y Delete to end of line */ + F_BACKGND,/* ^Z Background */ 0x1B, /* ^[=ESC escape */ + F_TENTRY, /* ^\ Entry mode */ F_PASTECB,/* ^]=paste clipboard */ + F_HOME, /* ^^ Home */ F_ERSLIN, /* ^_ Erase line */ + + ' ','!','"','#','$','%','&','\047', + '(',')','*','+','\054','-','.','/', + '0','1','2','3','4','5','6','7', + '8','9',':',';','<','=','>','?', + '@','A','B','C','D','E','F','G', + 'H','I','J','K','L','M','N','O', + 'P','Q','R','S','T','U','V','W', + 'X','Y','Z','[','\\',']','^','_', + '\140','a','b','c','d','e','f','g', + 'h','i','j','k','l','m','n','o', + 'p','q','r','s','t','u','v','w', + 'x','y','z','{','|','}','\176',F_ERSCHR /* erase character */ + + }; + + +/* Local variables */ + +#define CR '\r' /* carriage return */ + + +/* Function Prototypes */ + +static unsigned int input_char(void); +static unsigned int t_gnc(void); +static void insert_space(char *curline, int line_len); +static void insert_hole(char *curline, int line_len); +static void forward(char *str, int str_len); +static void backup(char *curline); +static void delchr(int cnt, char *curline, int line_len); +static int iswordc(char c); +static int next_word(char *ldb_buf); +static int prev_word(char *ldb_buf); +static void prtcur(char *str); +static void poolinit(void); +static char * getnext(void); +static char * getprev(void); +static void putline(char *newl, int newlen); +static void t_honk_horn(void); +static void t_insert_line(void); +static void t_delete_line(void); +static void t_clrline(int pos, int width); +void t_sendl(const char *msg, int len); +void t_send(const char *msg); +void t_char(char c); +static void asclrs(); +static void ascurs(int y, int x); + +static void rawmode(FILE *input); +static void normode(void); +static unsigned t_getch(); +static void asclrl(int pos, int width); +static void asinsl(); +static void asdell(); + +int input_line(char *string, int length); +void con_term(); +void trapctlc(); +int usrbrk(); +void clrbrk(); + +void con_init(FILE *input) +{ + atexit(con_term); + rawmode(input); + trapctlc(); +} + +/* + * Zed control keys + */ +void con_set_zed_keys(void) +{ + char_map[1] = F_NXTWRD; /* ^A Next Word */ + char_map[2] = F_SPLIT; /* ^B Split line */ + char_map[3] = F_EOI; /* ^C Quit */ + char_map[4] = F_DELCHR; /* ^D Delete character */ + char_map[5] = F_EOF; /* ^E End of file */ + char_map[6] = F_INSCHR; /* ^F Insert character */ + char_map[7] = F_TABBAK; /* ^G Back tab */ + char_map[8] = F_CSRLFT; /* ^H Left */ + char_map[9] = F_TAB; /* ^I Tab */ + char_map[10] = F_CSRDWN; /* ^J Down */ + char_map[11] = F_CSRUP; /* ^K Up */ + char_map[12] = F_CSRRGT; /* ^L Right */ + char_map[13] = F_RETURN; /* ^M Carriage return */ + char_map[14] = F_EOL; /* ^N End of line */ + char_map[15] = F_CONCAT; /* ^O Concatenate lines */ + char_map[16] = F_MARK; /* ^P Set marker */ + char_map[17] = F_TINS; /* ^Q Insert character mode */ + char_map[18] = F_PAGUP; /* ^R Page up */ + char_map[19] = F_CENTER; /* ^S Center text */ + char_map[20] = F_PAGDWN; /* ^T Page down */ + char_map[21] = F_SOL; /* ^U Line start */ + char_map[22] = F_DELWRD; /* ^V Delete word */ + char_map[23] = F_PRVWRD; /* ^W Previous word */ + char_map[24] = F_NXTMCH; /* ^X Next match */ + char_map[25] = F_DELEOL; /* ^Y Delete to end of line */ + char_map[26] = F_DELLIN; /* ^Z Delete line */ + /* 27 = ESC */ + char_map[28] = F_TENTRY; /* ^\ Entry mode */ + char_map[29] = F_PASTECB;/* ^]=paste clipboard */ + char_map[30] = F_HOME; /* ^^ Home */ + char_map[31] = F_ERSLIN; /* ^_ Erase line */ + +} + +void con_term() +{ + normode(); +} + +#ifdef TEST_PROGRAM +/* + * Guarantee that the string is properly terminated */ +char *bstrncpy(char *dest, const char *src, int maxlen) +{ + strncpy(dest, src, maxlen-1); + dest[maxlen-1] = 0; + return dest; +} +#endif + + +/* + * New style string mapping to function code + */ +static unsigned do_smap(unsigned c) +{ + char str[MAX_STAB]; + int len = 0; + stab_t *tstab; + int i, found; + unsigned cm; + + len = 1; + str[0] = c; + str[1] = 0; + + cm = char_map[c]; + if (cm == 0) { + return c; + } else { + c = cm; + } + for ( ;; ) { + found = 0; + for (i=len-1; inext) { + if (strncmp(str, tstab->str, len) == 0) { + if (len == tstab->len) { + return tstab->func; + } + found = 1; + break; /* found possibility continue searching */ + } + } + } + if (!found) { + return len==1?c:0; + } + /* found partial match, so get next character and retry */ + str[len++] = t_gnc(); + str[len] = 0; + } +} + +#ifdef DEBUG_x +static void dump_stab() +{ + int i, j, c; + stab_t *tstab; + char buf[100]; + + for (i=0; inext) { + for (j=0; jlen; j++) { + c = tstab->str[j]; + if (c < 0x20 || c > 0x7F) { + sprintf(buf, " 0x%x ", c); + t_send(buf); + } else { + buf[0] = c; + buf[1] = 0; + t_sendl(buf, 1); + } + } + sprintf(buf, " func=%d len=%d\n\r", tstab->func, tstab->len); + t_send(buf); + } + } +} +#endif + +/* + * New routine. Add string to string->func mapping table. + */ +static void add_smap(char *str, int func) +{ + stab_t *tstab; + int len; + + if (!str) { + return; + } + len = strlen(str); + if (len == 0) { +/* errmsg("String for func %d is zero length\n", func); */ + return; + } + tstab = (stab_t *)malloc(sizeof(stab_t)); + memset(tstab, 0, sizeof(stab_t)); + tstab->len = len; + tstab->str = (char *)malloc(tstab->len + 1); + bstrncpy(tstab->str, str, tstab->len + 1); + tstab->func = func; + if (tstab->len > num_stab) { + printf("stab string too long %d. Max is %d\n", tstab->len, num_stab); + exit(1); + } + tstab->next = stab[tstab->len-1]; + stab[tstab->len-1] = tstab; +/* printf("Add_smap tstab=%x len=%d func=%d tstab->next=%x\n\r", tstab, len, + func, tstab->next); */ + +} + + +/* Get the next character from the terminal - performs table lookup on + the character to do the desired translation */ +static unsigned int +input_char() +{ + unsigned c; + + if ((c=t_gnc()) <= 599) { /* IBM generates codes up to 260 */ + c = do_smap(c); + } else if (c > 1000) { /* stuffed function */ + c -= 1000; /* convert back to function code */ + } + if (c <= 0) { + t_honk_horn(); + } + /* if we got a screen size escape sequence, read height, width */ + if (c == F_SCRSIZ) { + t_gnc(); /* - 0x20 = y */ + t_gnc(); /* - 0x20 = x */ + c = input_char(); + } + return c; +} + + +/* Get a complete input line */ + +int +input_line(char *string, int length) +{ + char curline[2000]; /* edit buffer */ + int noline; + unsigned c; + int more; + int i; + + if (first) { + poolinit(); /* build line pool */ + first = 0; + } + noline = 1; /* no line fetched yet */ + for (cl=cp=0; cl cp) + cl = cp; + break; + case F_NXTWRD: + i = next_word(curline); + while (i--) { + forward(curline, sizeof(curline)); + } + break; + case F_PRVWRD: + i = prev_word(curline); + while (i--) { + backup(curline); + } + break; + case F_DELWRD: + delchr(next_word(curline), curline, sizeof(curline)); /* delete word */ + break; + case F_NXTMCH: /* Ctl-X */ + if (cl==0) { + *string = EOS; /* terminate string */ + return(c); /* give it to him */ + } + /* Note fall through */ + case F_DELLIN: + case F_ERSLIN: + while (cp > 0) { + backup(curline); /* backup to beginning of line */ + } + t_clrline(0, t_width); /* erase line */ + cp = 0; + cl = 0; /* reset cursor counter */ + t_char(' '); + t_char(0x8); + break; + case F_SOL: + while (cp > 0) { + backup(curline); + } + break; + case F_EOL: + while (cp < cl) { + forward(curline, sizeof(curline)); + } + while (cp > cl) { + backup(curline); + } + break; + case F_TINS: /* toggle insert mode */ + mode_insert = !mode_insert; /* flip bit */ + break; + default: + if (c > 255) { /* function key hit */ + if (cl==0) { /* if first character then */ + *string = EOS; /* terminate string */ + return c; /* return it */ + } + t_honk_horn(); /* complain */ + } else { + if ((c & 0xC0) == 0xC0) { + if ((c & 0xFC) == 0xFC) { + more = 5; + } else if ((c & 0xF8) == 0xF8) { + more = 4; + } else if ((c & 0xF0) == 0xF0) { + more = 3; + } else if ((c & 0xE0) == 0xE0) { + more = 2; + } else { + more = 1; + } + } else { + more = 0; + } + if (mode_insert) { + insert_space(curline, sizeof(curline)); + } + curline[cp++] = c; /* store character in line being built */ + t_char(c); /* echo character to terminal */ + while (more--) { + c= input_char(); + insert_hole(curline, sizeof(curline)); + curline[cp++] = c; /* store character in line being built */ + t_char(c); /* echo character to terminal */ + } + if (cp > cl) { + cl = cp; /* keep current length */ + curline[cp] = 0; + } + } + break; + } /* end switch */ + } +/* If we fall through here rather than goto done, the line is too long + simply return what we have now. */ +done: + curline[cl++] = EOS; /* terminate */ + bstrncpy(string,curline,length); /* return line to caller */ + /* Save non-blank lines. Note, put line zaps curline */ + if (curline[0] != EOS) { + putline(curline,cl); /* save line for posterity */ + } + return 0; /* give it to him/her */ +} + +/* Insert a space at the current cursor position */ +static void +insert_space(char *curline, int curline_len) +{ + int i; + + if (cp >= cl || cl+1 > curline_len) { + return; + } + /* Note! source and destination overlap */ + memmove(&curline[cp+1],&curline[cp],i=cl-cp); + cl++; + curline[cp] = ' '; + i = 0; + while (cl > cp) { + forward(curline, curline_len); + i++; + } + while (i--) { + backup(curline); + } +} + + +static void +insert_hole(char *curline, int curline_len) +{ + int i; + + if (cp > cl || cl+1 > curline_len) { + return; + } + /* Note! source and destination overlap */ + memmove(&curline[cp+1], &curline[cp], i=cl-cp); + cl++; + curline[cl] = 0; +} + + +/* Move cursor forward keeping characters under it */ +static void +forward(char *str, int str_len) +{ + if (cp > str_len) { + return; + } + if (cp >= cl) { + t_char(' '); + str[cp+1] = ' '; + str[cp+2] = 0; + } else { + t_char(str[cp]); + if ((str[cp] & 0xC0) == 0xC0) { + cp++; + while ((str[cp] & 0xC0) == 0x80) { + t_char(str[cp]); + cp++; + } + cp--; + } + } + cp++; +} + +/* How many characters under the cursor */ +static int +char_count(int cptr, char *str) +{ + int cnt = 1; + if (cptr > cl) { + return 0; + } + if ((str[cptr] & 0xC0) == 0xC0) { + cptr++; + while ((str[cptr] & 0xC0) == 0x80) { + cnt++; + cptr++; + } + } + return cnt; +} + +/* Backup cursor keeping characters under it */ +static void +backup(char *str) +{ + if (cp == 0) { + return; + } + while ((str[cp] & 0xC0) == 0x80) { + cp--; + } + t_char('\010'); + cp--; +} + +/* Delete the character under the cursor */ +static void +delchr(int del, char *curline, int line_len) +{ + int i, cnt; + + if (cp > cl || del == 0) { + return; + } + while (del-- && cl > 0) { + cnt = char_count(cp, curline); + if ((i=cl-cp-cnt) > 0) { + memcpy(&curline[cp], &curline[cp+cnt], i); + } + cl -= cnt; + curline[cl] = EOS; + t_clrline(0, t_width); + i = 0; + while (cl > cp) { + forward(curline, line_len); + i++; + } + while (i--) { + backup(curline); + } + } +} + +/* Determine if character is part of a word */ +static int +iswordc(char c) +{ + if (mode_wspace) + return !isspace(c); + if (c >= '0' && c <= '9') + return true; + if (c == '$' || c == '%') + return true; + return isalpha(c); +} + +/* Return number of characters to get to next word */ +static int +next_word(char *ldb_buf) +{ + int ncp; + + if (cp > cl) + return 0; + ncp = cp; + for ( ; ncp cl) /* if past eol start at eol */ + ncp=cl+1; + else + ncp = cp; + /* backup to end of previous word - i.e. skip special chars */ + for (i=ncp-1; i && !iswordc(*(ldb_buf+i)); i--) ; + if (i == 0) { /* at beginning of line? */ + return cp; /* backup to beginning */ + } + /* now move back through word to beginning of word */ + for ( ; i && iswordc(*(ldb_buf+i)); i--) ; + ncp = i+1; /* position to first char of word */ + if (i==0 && iswordc(*ldb_buf)) /* check for beginning of line */ + ncp = 0; + return cp-ncp; /* return count */ +} + +/* Display new current line */ +static void +prtcur(char *str) +{ + while (cp > 0) { + backup(str); + } + t_clrline(0,t_width); + cp = cl = strlen(str); + t_sendl(str, cl); +} + + +/* Initialize line pool. Split pool into two pieces. */ +static void +poolinit() +{ + slptr = lptr = (struct lstr *)pool; + lptr->nextl = lptr; + lptr->prevl = lptr; + lptr->used = 1; + lptr->line = 0; + lptr->len = POOLEN; +} + + +/* Return pointer to next line in the pool and advance current line pointer */ +static char * +getnext() +{ + do { /* find next used line */ + lptr = lptr->nextl; + } while (!lptr->used); + return (char *)&lptr->line; +} + +/* Return pointer to previous line in the pool */ +static char * +getprev() +{ + do { /* find previous used line */ + lptr = lptr->prevl; + } while (!lptr->used); + return (char *)&lptr->line; +} + +static void +putline(char *newl, int newlen) +{ + struct lstr *nptr; /* points to next line */ + char *p; + + lptr = slptr; /* get ptr to last line stored */ + lptr = lptr->nextl; /* advance pointer */ + if ((char *)lptr-pool+newlen+PHDRL > POOLEN) { /* not enough room */ + lptr->used = 0; /* delete line */ + lptr = (struct lstr *)pool; /* start at beginning of buffer */ + } + while (lptr->len < newlen+PHDRL) { /* concatenate buffers */ + nptr = lptr->nextl; /* point to next line */ + lptr->nextl = nptr->nextl; /* unlink it from list */ + nptr->nextl->prevl = lptr; + lptr->len += nptr->len; + } + if (lptr->len > newlen + 2 * PHDRL + 7) { /* split buffer */ + nptr = (struct lstr *)((char *)lptr + newlen + PHDRL); + /* Appropriate byte alignment - for Intel 2 byte, but on + Sparc we need 8 byte alignment, so we always do 8 */ + if (((long unsigned)nptr & 7) != 0) { /* test eight byte alignment */ + p = (char *)nptr; + nptr = (struct lstr *)((((long unsigned) p) & ~7) + 8); + } + nptr->len = lptr->len - ((char *)nptr - (char *)lptr); + lptr->len -= nptr->len; + nptr->nextl = lptr->nextl; /* link in new buffer */ + lptr->nextl->prevl = nptr; + lptr->nextl = nptr; + nptr->prevl = lptr; + nptr->used = 0; + } + memcpy(&lptr->line,newl,newlen); + lptr->used = 1; /* mark line used */ + slptr = lptr; /* save as stored line */ +} + +#ifdef DEBUGOUT +static void +dump(struct lstr *ptr, char *msg) +{ + printf("%s buf=%x nextl=%x prevl=%x len=%d used=%d\n", + msg,ptr,ptr->nextl,ptr->prevl,ptr->len,ptr->used); + if (ptr->used) + printf("line=%s\n",&ptr->line); +} +#endif /* DEBUGOUT */ + + +/* Honk horn on terminal */ +static void +t_honk_horn() +{ + t_send(t_honk); +} + +/* Insert line on terminal */ +static void +t_insert_line() +{ + asinsl(); +} + +/* Delete line from terminal */ +static void +t_delete_line() +{ + asdell(); +} + +/* clear line from pos to width */ +static void +t_clrline(int pos, int width) +{ + asclrl(pos, width); /* clear to end of line */ +} + +/* Helper function to add string preceded by + * ESC to smap table */ +static void add_esc_smap(const char *str, int func) +{ + char buf[1000]; + buf[0] = 0x1B; /* esc */ + bstrncpy(buf+1, str, sizeof(buf)-1); + add_smap(buf, func); +} + +/* Set raw mode on terminal file. Basically, get the terminal into a + mode in which all characters can be read as they are entered. CBREAK + mode is not sufficient. + */ +static void rawmode(FILE *input) +{ + struct termios t; + static char term_buf[2048]; + static char *term_buffer = term_buf; + char *termtype = (char *)getenv("TERM"); + + /* Make sure we are dealing with a terminal */ + if (!isatty(fileno(input))) { + return; + } + if (tcgetattr(0, &old_term_params) != 0) { + printf("conio: Cannot tcgetattr()\n"); + exit(1); + } + old_term_params_set = true; + t = old_term_params; + t.c_cc[VMIN] = 1; /* satisfy read after 1 char */ + t.c_cc[VTIME] = 0; + t.c_iflag &= ~(BRKINT | IGNPAR | PARMRK | INPCK | + ISTRIP | ICRNL | IXON | IXOFF | INLCR | IGNCR); + t.c_iflag |= IGNBRK; + t.c_oflag |= ONLCR; + t.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHONL | ICANON | + NOFLSH | TOSTOP); + tcflush(0, TCIFLUSH); + if (tcsetattr(0, TCSANOW, &t) == -1) { + printf("Cannot tcsetattr()\n"); + } + + /* Defaults, the main program can override these */ + signal(SIGQUIT, SIG_IGN); + signal(SIGHUP, SIG_IGN); + trapctlc(); + signal(SIGWINCH, SIG_IGN); + + if (!termtype) { + printf("Cannot get terminal type.\n"); + normode(); + exit(1); + } + if (tgetent(term_buffer, termtype) < 0) { + printf("Cannot get terminal termcap entry.\n"); + normode(); + exit(1); + } + t_width = t_height = -1; + /* Note (char *)casting is due to really stupid compiler warnings */ + t_width = tgetnum((char *)"co") - 1; + t_height = tgetnum((char *)"li"); + BC = NULL; + UP = NULL; + t_cm = (char *)tgetstr((char *)"cm", &term_buffer); + t_cs = (char *)tgetstr((char *)"cl", &term_buffer); /* clear screen */ + t_cl = (char *)tgetstr((char *)"ce", &term_buffer); /* clear line */ + t_dl = (char *)tgetstr((char *)"dl", &term_buffer); /* delete line */ + t_il = (char *)tgetstr((char *)"al", &term_buffer); /* insert line */ + t_honk = (char *)tgetstr((char *)"bl", &term_buffer); /* beep */ + t_ti = (char *)tgetstr((char *)"ti", &term_buffer); + t_te = (char *)tgetstr((char *)"te", &term_buffer); + t_up = (char *)tgetstr((char *)"up", &term_buffer); + t_do = (char *)tgetstr((char *)"do", &term_buffer); + t_sf = (char *)tgetstr((char *)"sf", &term_buffer); + + num_stab = MAX_STAB; /* get default stab size */ + stab = (stab_t **)malloc(sizeof(stab_t *) * num_stab); + memset(stab, 0, sizeof(stab_t *) * num_stab); + + /* Key bindings */ + kl = (char *)tgetstr((char *)"kl", &term_buffer); + kr = (char *)tgetstr((char *)"kr", &term_buffer); + ku = (char *)tgetstr((char *)"ku", &term_buffer); + kd = (char *)tgetstr((char *)"kd", &term_buffer); + kh = (char *)tgetstr((char *)"kh", &term_buffer); + kb = (char *)tgetstr((char *)"kb", &term_buffer); + kD = (char *)tgetstr((char *)"kD", &term_buffer); + kI = (char *)tgetstr((char *)"kI", &term_buffer); + kN = (char *)tgetstr((char *)"kN", &term_buffer); + kP = (char *)tgetstr((char *)"kP", &term_buffer); + kH = (char *)tgetstr((char *)"kH", &term_buffer); + kE = (char *)tgetstr((char *)"kE", &term_buffer); + + add_smap(kl, F_CSRLFT); + add_smap(kr, F_CSRRGT); + add_smap(ku, F_CSRUP); + add_smap(kd, F_CSRDWN); + add_smap(kI, F_TINS); + add_smap(kN, F_PAGDWN); + add_smap(kP, F_PAGUP); + add_smap(kH, F_HOME); + add_smap(kE, F_EOF); + + + add_esc_smap("[A", F_CSRUP); + add_esc_smap("[B", F_CSRDWN); + add_esc_smap("[C", F_CSRRGT); + add_esc_smap("[D", F_CSRLFT); + add_esc_smap("[1~", F_HOME); + add_esc_smap("[2~", F_TINS); + add_esc_smap("[3~", F_DELCHR); + add_esc_smap("[4~", F_EOF); + add_esc_smap("f", F_NXTWRD); + add_esc_smap("b", F_PRVWRD); +} + + +/* Restore tty mode */ +static void normode() +{ + if (old_term_params_set) { + tcsetattr(0, TCSANOW, &old_term_params); + old_term_params_set = false; + } +} + +/* Get next character from terminal/script file/unget buffer */ +static unsigned +t_gnc() +{ + return t_getch(); +} + + +/* Get next character from OS */ +static unsigned t_getch(void) +{ + unsigned char c; + + if (read(0, &c, 1) != 1) { + c = 0; + } + return (unsigned)c; +} + +/* Send message to terminal - primitive routine */ +void +t_sendl(const char *msg, int len) +{ + write(1, msg, len); +} + +void +t_send(const char *msg) +{ + if (msg == NULL) { + return; + } + t_sendl(msg, strlen(msg)); /* faster than one char at time */ +} + +/* Send single character to terminal - primitive routine - */ +void +t_char(char c) +{ + (void)write(1, &c, 1); +} + +/* ASCLRL() -- Clear to end of line from current position */ +static void asclrl(int pos, int width) +{ + int i; + + if (t_cl) { + t_send(t_cl); /* use clear to eol function */ + return; + } + if (pos==1 && linsdel_ok) { + t_delete_line(); /* delete line */ + t_insert_line(); /* reinsert it */ + return; + } + for (i=1; i<=width-pos+1; i++) + t_char(' '); /* last resort, blank it out */ + for (i=1; i<=width-pos+1; i++) /* backspace to original position */ + t_char(0x8); + return; + +} + + +/* ASCURS -- Set cursor position */ +static void ascurs(int y, int x) +{ + t_send((char *)tgoto(t_cm, x, y)); +} + + +/* ASCLRS -- Clear whole screen */ +static void asclrs() +{ + ascurs(0,0); + t_send(t_cs); +} + + + +/* ASINSL -- insert new line after cursor */ +static void asinsl() +{ + t_clrline(0, t_width); + t_send(t_il); /* insert before */ +} + +/* ASDELL -- Delete line at cursor */ +static void asdell() +{ + t_send(t_dl); +} + +#endif diff --git a/src/console/conio.h b/src/console/conio.h new file mode 100644 index 00000000..7646ffac --- /dev/null +++ b/src/console/conio.h @@ -0,0 +1,30 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef __CONIO_H +#define __CONIO_H +extern int input_line(char *line, int len); +extern void con_init(FILE *input); + +extern void con_term(); +extern void con_set_zed_keys(); +extern void t_sendl(char *buf, int len); +extern void t_send(char *buf); +extern void t_char(char c); +#endif diff --git a/src/console/console.c b/src/console/console.c new file mode 100644 index 00000000..02cb61e1 --- /dev/null +++ b/src/console/console.c @@ -0,0 +1,1770 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Console interface to the Director + * + * Kern Sibbald, September MM + * + */ + +#include "bacula.h" +#include "console_conf.h" +#include "jcr.h" + + +#if defined(HAVE_CONIO) +#include "conio.h" +//#define CONIO_FIX 1 +#else /* defined(HAVE_READLINE) || "DUMB" */ +#define con_init(x) +#define con_term() +#define con_set_zed_keys(); +#endif + +void trapctlc(); +void clrbrk(); +int usrbrk(); +static int brkflg = 0; /* set on user break */ + +#if defined(HAVE_WIN32) +#define isatty(fd) (fd==0) +#endif + +/* Exported variables */ + +//extern int rl_catch_signals; + +/* Imported functions */ +int authenticate_director(BSOCK *dir, DIRRES *director, CONRES *cons); + +/* Forward referenced functions */ +static void terminate_console(int sig); +static int check_resources(); +int get_cmd(FILE *input, const char *prompt, BSOCK *sock, int sec); +static int do_outputcmd(FILE *input, BSOCK *UA_sock); +void senditf(const char *fmt, ...); +void sendit(const char *buf); + +extern "C" void got_sigstop(int sig); +extern "C" void got_sigcontinue(int sig); +extern "C" void got_sigtout(int sig); +extern "C" void got_sigtin(int sig); + + +/* Static variables */ +static char *configfile = NULL; +static BSOCK *UA_sock = NULL; +static DIRRES *dir = NULL; +static CONRES *cons = NULL; +static FILE *output = stdout; +static bool teeout = false; /* output to output and stdout */ +static bool teein = false; /* input to output and stdout */ +static bool stop = false; +static bool no_conio = false; +static int timeout = 0; +static int argc; +static int numdir; +static POOLMEM *args; +static char *argk[MAX_CMD_ARGS]; +static char *argv[MAX_CMD_ARGS]; +static CONFIG *config; + + +/* Command prototypes */ +static int versioncmd(FILE *input, BSOCK *UA_sock); +static int inputcmd(FILE *input, BSOCK *UA_sock); +static int outputcmd(FILE *input, BSOCK *UA_sock); +static int teecmd(FILE *input, BSOCK *UA_sock); +static int teeallcmd(FILE *input, BSOCK *UA_sock); +static int quitcmd(FILE *input, BSOCK *UA_sock); +static int helpcmd(FILE *input, BSOCK *UA_sock); +static int echocmd(FILE *input, BSOCK *UA_sock); +static int timecmd(FILE *input, BSOCK *UA_sock); +static int sleepcmd(FILE *input, BSOCK *UA_sock); +static int execcmd(FILE *input, BSOCK *UA_sock); +static int putfilecmd(FILE *input, BSOCK *UA_sock); + +#ifdef HAVE_READLINE +static int eolcmd(FILE *input, BSOCK *UA_sock); + +# ifndef HAVE_REGEX_H +# include "lib/bregex.h" +# else +# include +# endif + +#endif + + +#define CONFIG_FILE "bconsole.conf" /* default configuration file */ + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: " VERSION " (" BDATE ") %s %s %s\n\n" +"Usage: bconsole [-s] [-c config_file] [-d debug_level]\n" +" -D select a Director\n" +" -l list Directors defined\n" +" -L list Consoles defined\n" +" -C select a console\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -n no conio\n" +" -s no signals\n" +" -u set command execution timeout to seconds\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n"), 2000, BDEMO, HOST_OS, DISTNAME, DISTVER); +} + + +extern "C" +void got_sigstop(int sig) +{ + stop = true; +} + +extern "C" +void got_sigcontinue(int sig) +{ + stop = false; +} + +extern "C" +void got_sigtout(int sig) +{ +// printf("Got tout\n"); +} + +extern "C" +void got_sigtin(int sig) +{ +// printf("Got tin\n"); +} + + +static int zed_keyscmd(FILE *input, BSOCK *UA_sock) +{ + con_set_zed_keys(); + return 1; +} + +/* + * These are the @command + */ +struct cmdstruct { const char *key; int (*func)(FILE *input, BSOCK *UA_sock); const char *help; }; +static struct cmdstruct commands[] = { + { N_("input"), inputcmd, _("input from file")}, + { N_("output"), outputcmd, _("output to file")}, + { N_("quit"), quitcmd, _("quit")}, + { N_("tee"), teecmd, _("output to file and terminal")}, + { N_("tall"), teeallcmd, _("output everything to file and terminal (tee all)")}, + { N_("sleep"), sleepcmd, _("sleep specified time")}, + { N_("time"), timecmd, _("print current time")}, + { N_("version"), versioncmd, _("print Console's version")}, + { N_("echo"), echocmd, _("echo command string")}, + { N_("exec"), execcmd, _("execute an external command")}, + { N_("exit"), quitcmd, _("exit = quit")}, + { N_("putfile"), putfilecmd, _("send a file to the director")}, + { N_("zed_keys"), zed_keyscmd, _("zed_keys = use zed keys instead of bash keys")}, + { N_("help"), helpcmd, _("help listing")}, +#ifdef HAVE_READLINE + { N_("separator"), eolcmd, _("set command separator")}, +#endif + }; +#define comsize ((int)(sizeof(commands)/sizeof(struct cmdstruct))) + +static int do_a_command(FILE *input, BSOCK *UA_sock) +{ + unsigned int i; + int stat; + int found; + int len; + char *cmd; + + found = 0; + stat = 1; + + Dmsg1(120, "Command: %s\n", UA_sock->msg); + if (argc == 0) { + return 1; + } + + cmd = argk[0]+1; + if (*cmd == '#') { /* comment */ + return 1; + } + len = strlen(cmd); + for (i=0; imsg, _(": is an invalid command\n")); + UA_sock->msglen = strlen(UA_sock->msg); + sendit(UA_sock->msg); + } + return stat; +} + +/* When getting .api command, we can ignore some signals, so we set + * api_mode=true + */ +static bool api_mode=false; + +static bool ignore_signal(int stat, BSOCK *s) +{ + /* Not in API mode */ + if (!api_mode) { + return false; + } + + /* not a signal */ + if (stat != -1) { + return false; + } + + /* List signal that should not stop the read loop */ + Dmsg1(100, "Got signal %s\n", bnet_sig_to_ascii(s->msglen)); + switch(s->msglen) { + case BNET_CMD_BEGIN: + case BNET_CMD_FAILED: /* might want to print **ERROR** */ + case BNET_CMD_OK: /* might want to print **OK** */ + case BNET_MSGS_PENDING: + return true; + default: + break; + } + + /* The signal should break the read loop */ + return false; +} + +static void read_and_process_input(FILE *input, BSOCK *UA_sock) +{ + const char *prompt = "*"; + bool at_prompt = false; + int tty_input = isatty(fileno(input)); + int stat; + btimer_t *tid=NULL; + + for ( ;; ) { + if (at_prompt) { /* don't prompt multiple times */ + prompt = ""; + } else { + prompt = "*"; + at_prompt = true; + } + if (tty_input) { + stat = get_cmd(input, prompt, UA_sock, 30); + if (usrbrk() >= 1) { + clrbrk(); + } + if (usrbrk()) { + break; + } + } else { + /* Reading input from a file */ + if (usrbrk()) { + break; + } + if (bfgets(UA_sock->msg, input) == NULL) { + stat = -1; + } else { + sendit(UA_sock->msg); /* echo to terminal */ + strip_trailing_junk(UA_sock->msg); + UA_sock->msglen = strlen(UA_sock->msg); + stat = 1; + } + } + if (stat < 0) { + break; /* error or interrupt */ + } else if (stat == 0) { /* timeout */ + if (strcmp(prompt, "*") == 0) { + tid = start_bsock_timer(UA_sock, timeout); + UA_sock->fsend(".messages"); + stop_bsock_timer(tid); + } else { + continue; + } + } else { + at_prompt = false; + /* @ => internal command for us */ + if (UA_sock->msg[0] == '@') { + parse_args(UA_sock->msg, &args, &argc, argk, argv, MAX_CMD_ARGS); + if (!do_a_command(input, UA_sock)) { + break; + } + continue; + } + tid = start_bsock_timer(UA_sock, timeout); + if (!UA_sock->send()) { /* send command */ + stop_bsock_timer(tid); + break; /* error */ + } + stop_bsock_timer(tid); + } + if (strncasecmp(UA_sock->msg, ".api", 4) == 0) { + api_mode = true; + } + if (strcasecmp(UA_sock->msg, ".quit") == 0 || strcasecmp(UA_sock->msg, ".exit") == 0) { + break; + } + tid = start_bsock_timer(UA_sock, timeout); + while (1) { + stat = UA_sock->recv(); + if (ignore_signal(stat, UA_sock)) { + continue; + } + + if (stat < 0) { + break; + } + + if (at_prompt) { + if (!stop) { + sendit("\n"); + } + at_prompt = false; + } + /* Suppress output if running in background or user hit ctl-c */ + if (!stop && !usrbrk()) { + sendit(UA_sock->msg); + } + } + stop_bsock_timer(tid); + if (usrbrk() > 1) { + break; + } else { + clrbrk(); + } + if (!stop) { + fflush(stdout); + } + if (UA_sock->is_stop()) { + break; /* error or term */ + } else if (stat == BNET_SIGNAL) { + if (UA_sock->msglen == BNET_SUB_PROMPT) { + at_prompt = true; + } + Dmsg1(100, "Got poll %s\n", bnet_sig_to_ascii(UA_sock->msglen)); + } + } +} + +/* + * Call-back for reading a passphrase for an encrypted PEM file + * This function uses getpass(), + * which uses a static buffer and is NOT thread-safe. + */ +static int tls_pem_callback(char *buf, int size, const void *userdata) +{ +#ifdef HAVE_TLS + const char *prompt = (const char *)userdata; +# if defined(HAVE_WIN32) + sendit(prompt); + if (win32_cgets(buf, size) == NULL) { + buf[0] = 0; + return 0; + } else { + return strlen(buf); + } +# else + char *passwd; + + passwd = getpass(prompt); + bstrncpy(buf, passwd, size); + return strlen(buf); +# endif +#else + buf[0] = 0; + return 0; +#endif +} + +#ifdef HAVE_READLINE +#define READLINE_LIBRARY 1 +#include "readline.h" +#include "history.h" + +/* Get the first keyword of the line */ +static char * +get_first_keyword() +{ + char *ret=NULL; + int len; + char *first_space = strchr(rl_line_buffer, ' '); + if (first_space) { + len = first_space - rl_line_buffer; + ret = (char *) malloc((len + 1) * sizeof(char)); + memcpy(ret, rl_line_buffer, len); + ret[len]=0; + } + return ret; +} + +/* + * Return the command before the current point. + * Set nb to the number of command to skip + */ +static char * +get_previous_keyword(int current_point, int nb) +{ + int i, end=-1, start, inquotes=0; + char *s=NULL; + + while (nb-- >= 0) { + /* first we look for a space before the current word */ + for (i = current_point; i >= 0; i--) { + if (rl_line_buffer[i] == ' ' || rl_line_buffer[i] == '=') { + break; + } + } + + /* find the end of the command */ + for (; i >= 0; i--) { + if (rl_line_buffer[i] != ' ') { + end = i; + break; + } + } + + /* no end of string */ + if (end == -1) { + return NULL; + } + + /* look for the start of the command */ + for (start = end; start > 0; start--) { + if (rl_line_buffer[start] == '"') { + inquotes = !inquotes; + } + if ((rl_line_buffer[start - 1] == ' ') && inquotes == 0) { + break; + } + current_point = start; + } + } + + s = (char *)malloc(end - start + 2); + memcpy(s, rl_line_buffer + start, end - start + 1); + s[end - start + 1] = 0; + + // printf("=======> %i:%i <%s>\n", start, end, s); + + return s; +} + +/* Simple structure that will contain the completion list */ +struct ItemList { + alist list; +}; + +static ItemList *items = NULL; +void init_items() +{ + if (!items) { + items = (ItemList*) malloc(sizeof(ItemList)); + memset(items, 0, sizeof(ItemList)); + + } else { + items->list.destroy(); + } + + items->list.init(); +} + +/* Match a regexp and add the result to the items list + * This function is recursive + */ +static void match_kw(regex_t *preg, const char *what, int len, POOLMEM **buf) +{ + int rc, size; + int nmatch=20; + regmatch_t pmatch[nmatch]; + + if (len <= 0) { + return; + } + rc = regexec(preg, what, nmatch, pmatch, 0); + if (rc == 0) { +#if 0 + Pmsg1(0, "\n\n%s\n0123456789012345678901234567890123456789\n 10 20 30\n", what); + Pmsg2(0, "%i-%i\n", pmatch[0].rm_so, pmatch[0].rm_eo); + Pmsg2(0, "%i-%i\n", pmatch[1].rm_so, pmatch[1].rm_eo); + Pmsg2(0, "%i-%i\n", pmatch[2].rm_so, pmatch[2].rm_eo); + Pmsg2(0, "%i-%i\n", pmatch[3].rm_so, pmatch[3].rm_eo); +#endif + size = pmatch[1].rm_eo - pmatch[1].rm_so; + *buf = check_pool_memory_size(*buf, size + 1); + memcpy(*buf, what+pmatch[1].rm_so, size); + (*buf)[size] = 0; + + items->list.append(bstrdup(*buf)); + /* We search for the next keyword in the line */ + match_kw(preg, what + pmatch[1].rm_eo, len - pmatch[1].rm_eo, buf); + } +} + +/* fill the items list with the output of the help command */ +void get_arguments(const char *what) +{ + regex_t preg; + POOLMEM *buf; + int rc; + init_items(); + + rc = regcomp(&preg, "(([a-z]+=)|([a-z]+)( |$))", REG_EXTENDED); + if (rc != 0) { + return; + } + + buf = get_pool_memory(PM_MESSAGE); + UA_sock->fsend(".help item=%s", what); + while (UA_sock->recv() > 0) { + strip_trailing_junk(UA_sock->msg); + match_kw(&preg, UA_sock->msg, UA_sock->msglen, &buf); + } + free_pool_memory(buf); + regfree(&preg); +} + +/* retreive a simple list (.pool, .client) and store it into items */ +void get_items(const char *what) +{ + init_items(); + + UA_sock->fsend("%s", what); + while (UA_sock->recv() > 0) { + strip_trailing_junk(UA_sock->msg); + items->list.append(bstrdup(UA_sock->msg)); + } +} + +typedef enum +{ + ITEM_ARG, /* item with simple list like .jobs */ + ITEM_HELP /* use help item=xxx and detect all arguments */ +} cpl_item_t; + +/* Generator function for command completion. STATE lets us know whether + * to start from scratch; without any state (i.e. STATE == 0), then we + * start at the top of the list. + */ +static char *item_generator(const char *text, int state, + const char *item, cpl_item_t type) +{ + static int list_index, len; + char *name; + + /* If this is a new word to complete, initialize now. This includes + * saving the length of TEXT for efficiency, and initializing the index + * variable to 0. + */ + if (!state) + { + list_index = 0; + len = strlen(text); + switch(type) { + case ITEM_ARG: + get_items(item); + break; + case ITEM_HELP: + get_arguments(item); + break; + } + } + + /* Return the next name which partially matches from the command list. */ + while (items && list_index < items->list.size()) + { + name = (char *)items->list[list_index]; + list_index++; + + if (strncmp(name, text, len) == 0) { + char *ret = (char *) actuallymalloc(strlen(name)+1); + strcpy(ret, name); + return ret; + } + } + + /* If no names matched, then return NULL. */ + return ((char *)NULL); +} + +/* gobal variables for the type and the item to search + * the readline API doesn' permit to pass user data. + */ +static const char *cpl_item; +static cpl_item_t cpl_type; + +static char *cpl_generator(const char *text, int state) +{ + return item_generator(text, state, cpl_item, cpl_type); +} + +/* this function is used to not use the default filename completion */ +static char *dummy_completion_function(const char *text, int state) +{ + return NULL; +} + +struct cpl_keywords_t { + const char *key; + const char *cmd; +}; + +static struct cpl_keywords_t cpl_keywords[] = { + {"pool=", ".pool" }, + {"fileset=", ".fileset" }, + {"client=", ".client" }, + {"job=", ".jobs" }, + {"restore_job=",".jobs type=R" }, + {"level=", ".level" }, + {"storage=", ".storage" }, + {"schedule=", ".schedule" }, + {"volume=", ".media" }, + {"oldvolume=", ".media" }, + {"volstatus=", ".volstatus" }, + {"ls", ".ls" }, + {"cd", ".lsdir" }, + {"mark", ".ls" }, + {"m", ".ls" }, + {"unmark", ".lsmark" }, + {"catalog=", ".catalogs" }, + {"actiononpurge=", ".actiononpurge" }, + {"tags=", ".tags" }, + {"recylepool=", ".pool" }, + {"allfrompool=",".pool" } +}; +#define key_size ((int)(sizeof(cpl_keywords)/sizeof(struct cpl_keywords_t))) + +/* Attempt to complete on the contents of TEXT. START and END bound the + * region of rl_line_buffer that contains the word to complete. TEXT is + * the word to complete. We can use the entire contents of rl_line_buffer + * in case we want to do some simple parsing. Return the array of matches, + * or NULL if there aren't any. + */ +static char **readline_completion(const char *text, int start, int end) +{ + bool found=false; + char **matches; + char *s, *cmd; + matches = (char **)NULL; + + /* If this word is at the start of the line, then it is a command + * to complete. Otherwise it is the name of a file in the current + * directory. + */ + s = get_previous_keyword(start, 0); + cmd = get_first_keyword(); + if (s) { + for (int i=0; i < key_size; i++) { + if (!strcasecmp(s, cpl_keywords[i].key)) { + cpl_item = cpl_keywords[i].cmd; + cpl_type = ITEM_ARG; + matches = rl_completion_matches(text, cpl_generator); + found=true; + break; + } + } + + if (!found) { /* we try to get help with the first command */ + cpl_item = cmd; + cpl_type = ITEM_HELP; + /* we don't want to append " " at the end */ + rl_completion_suppress_append=true; + matches = rl_completion_matches(text, cpl_generator); + } + free(s); + } else { /* nothing on the line, display all commands */ + cpl_item = ".help all"; + cpl_type = ITEM_ARG; + matches = rl_completion_matches(text, cpl_generator); + } + if (cmd) { + free(cmd); + } + return (matches); +} + +static char eol = '\0'; +static int eolcmd(FILE *input, BSOCK *UA_sock) +{ + if ((argc > 1) && (strchr("!$%&'()*+,-/:;<>?[]^`{|}~", argk[1][0]) != NULL)) { + eol = argk[1][0]; + } else if (argc == 1) { + eol = '\0'; + } else { + sendit(_("Illegal separator character.\n")); + } + return 1; +} + +/* + * Return 1 if OK + * 0 if no input + * -1 error (must stop) + */ +int +get_cmd(FILE *input, const char *prompt, BSOCK *sock, int sec) +{ + static char *line = NULL; + static char *next = NULL; + static int do_history = 0; + char *command; + + if (line == NULL) { + do_history = 0; + rl_catch_signals = 0; /* do it ourselves */ + /* Here, readline does ***real*** malloc + * so, be we have to use the real free + */ + line = readline((char *)prompt); /* cast needed for old readlines */ + if (!line) { + return -1; /* error return and exit */ + } + strip_trailing_junk(line); + command = line; + } else if (next) { + command = next + 1; + } else { + sendit(_("Command logic problem\n")); + sock->msglen = 0; + sock->msg[0] = 0; + return 0; /* No input */ + } + + /* + * Split "line" into multiple commands separated by the eol character. + * Each part is pointed to by "next" until finally it becomes null. + */ + if (eol == '\0') { + next = NULL; + } else { + next = strchr(command, eol); + if (next) { + *next = '\0'; + } + } + if (command != line && isatty(fileno(input))) { + senditf("%s%s\n", prompt, command); + + } else { + /* Send the intput to the output file if needed */ + if (teein && output != stdout) { + fputs(prompt, output); + fputs(command, output); + fputs("\n", output); + } + } + + sock->msglen = pm_strcpy(&sock->msg, command); + if (sock->msglen) { + do_history++; + } + + if (!next) { + if (do_history) { + add_history(line); + } + actuallyfree(line); /* allocated by readline() malloc */ + line = NULL; + } + return 1; /* OK */ +} + +#else /* no readline, do it ourselves */ + +#ifdef HAVE_CONIO +static bool bisatty(int fd) +{ + if (no_conio) { + return false; + } + return isatty(fd); +} +#endif + +/* + * Returns: 1 if data available + * 0 if timeout + * -1 if error + */ +static int +wait_for_data(int fd, int sec) +{ +#if defined(HAVE_WIN32) + return 1; +#else + for ( ;; ) { + switch(fd_wait_data(fd, WAIT_READ, sec, 0)) { + case 0: /* timeout */ + return 0; + case -1: + if (errno == EINTR || errno == EAGAIN) { + continue; + } + return -1; /* error return */ + default: + return 1; + } + } +#endif +} + +/* + * Get next input command from terminal. + * + * Returns: 1 if got input + * 0 if timeout + * -1 if EOF or error + */ +int +get_cmd(FILE *input, const char *prompt, BSOCK *sock, int sec) +{ + int len; + if (!stop) { + if (output == stdout || teeout) { + sendit(prompt); + } + } +again: + switch (wait_for_data(fileno(input), sec)) { + case 0: + return 0; /* timeout */ + case -1: + return -1; /* error */ + default: + len = sizeof_pool_memory(sock->msg) - 1; + if (stop) { + sleep(1); + goto again; + } +#ifdef HAVE_CONIO + if (bisatty(fileno(input))) { + input_line(sock->msg, len); + break; + } +#endif +#ifdef HAVE_WIN32 /* use special console for input on win32 */ + if (input == stdin) { + if (win32_cgets(sock->msg, len) == NULL) { + return -1; + } + } + else +#endif + if (bfgets(sock->msg, input) == NULL) { + return -1; + + } + break; + } + if (usrbrk()) { + clrbrk(); + } + strip_trailing_junk(sock->msg); + sock->msglen = strlen(sock->msg); + + /* Send input to log file if needed */ + if (teein && output != stdout) { + fputs(sock->msg, output); + fputs("\n", output); + } + + return 1; +} + +#endif /* ! HAVE_READLINE */ + +/* Routine to return true if user types break */ +int usrbrk() +{ + return brkflg; +} + +/* Clear break flag */ +void clrbrk() +{ + brkflg = 0; +} + +/* Interrupt caught here */ +static void sigintcatcher(int sig) +{ + brkflg++; + if (brkflg > 3) { + terminate_console(sig); + } + signal(SIGINT, sigintcatcher); +} + +/* Trap Ctl-C */ +void trapctlc() +{ + signal(SIGINT, sigintcatcher); +} + +static int console_update_history(const char *histfile) +{ + int ret=0; + +#ifdef HAVE_READLINE +/* + * first, try to truncate the history file, and if it + * fails, the file is probably not present, and we + * can use write_history to create it + */ + + if (history_truncate_file(histfile, 100) == 0) { + ret = append_history(history_length, histfile); + } else { + ret = write_history(histfile); + } +#endif + + return ret; +} + +static int console_init_history(const char *histfile) +{ + int ret=0; + +#ifdef HAVE_READLINE + using_history(); + ret = read_history(histfile); + /* Tell the completer that we want a complete . */ + rl_completion_entry_function = dummy_completion_function; + rl_attempted_completion_function = readline_completion; + rl_filename_completion_desired = 0; + stifle_history(100); +#endif + + return ret; +} + +static bool select_director(const char *director, const char *console, + DIRRES **ret_dir, CONRES **ret_cons) +{ + int numcon=0, numdir=0; + int i=0, item=0; + BSOCK *UA_sock; + DIRRES *dir = NULL; + CONRES *cons = NULL; + + *ret_cons = NULL; + *ret_dir = NULL; + + LockRes(); + numdir = 0; + foreach_res(dir, R_DIRECTOR) { + numdir++; + } + numcon = 0; + foreach_res(cons, R_CONSOLE) { + numcon++; + } + UnlockRes(); + + if (numdir == 1) { /* No choose */ + dir = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); + } + + if (director) { /* Command line choice overwrite the no choose option */ + LockRes(); + foreach_res(dir, R_DIRECTOR) { + if (bstrcasecmp(dir->hdr.name, director)) { + break; + } + } + UnlockRes(); + if (!dir) { /* Can't find Director used as argument */ + senditf(_("Can't find %s in Director list\n"), director); + return 0; + } + } + + if (dir == NULL) { /* prompt for director */ + UA_sock = new_bsock(); +try_again: + sendit(_("Available Directors:\n")); + LockRes(); + numdir = 0; + foreach_res(dir, R_DIRECTOR) { + senditf( _("%2d: %s at %s:%d\n"), 1+numdir++, dir->hdr.name, + dir->address, dir->DIRport); + } + UnlockRes(); + if (get_cmd(stdin, _("Select Director by entering a number: "), + UA_sock, 600) < 0) + { + (void)WSACleanup(); /* Cleanup Windows sockets */ + return 0; + } + if (!is_a_number(UA_sock->msg)) { + senditf(_("%s is not a number. You must enter a number between " + "1 and %d\n"), + UA_sock->msg, numdir); + goto try_again; + } + item = atoi(UA_sock->msg); + if (item < 0 || item > numdir) { + senditf(_("You must enter a number between 1 and %d\n"), numdir); + goto try_again; + } + free_bsock(UA_sock); + LockRes(); + for (i=0; ihdr.name, console) == 0) { + break; + } + } else if (cons->director && strcasecmp(cons->director, dir->hdr.name) == 0) { + break; + } + if (i == (numcon - 1)) { + cons = NULL; + } + } + + if (cons == NULL && console != NULL) { + UnlockRes(); + senditf(_("Can't find %s in Console list\n"), console); + return 0; + } + + /* Look for the first non-linked console */ + if (cons == NULL) { + for (i=0; idirector == NULL) { + break; + } + if (i == (numcon - 1)) { + cons = NULL; + } + } + } + + /* If no console, take first one */ + if (!cons) { + cons = (CONRES *)GetNextRes(R_CONSOLE, (RES *)NULL); + } + UnlockRes(); + + *ret_dir = dir; + *ret_cons = cons; + + return 1; +} + +/********************************************************************* + * + * Main Bacula Console -- User Interface Program + * + */ +int main(int argc, char *argv[]) +{ + int ch; + char *director = NULL; + char *console = NULL; + bool list_directors=false, list_consoles=false; + bool no_signals = false; + bool test_config = false; + JCR jcr; + utime_t heart_beat; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + init_stack_dump(); + lmgr_init_thread(); + my_name_is(argc, argv, "bconsole"); + init_msg(NULL, NULL); + working_directory = "/tmp"; + args = get_pool_memory(PM_FNAME); + + while ((ch = getopt(argc, argv, "D:lc:d:nstu:?C:L")) != -1) { + switch (ch) { + case 'D': /* Director */ + if (director) { + free(director); + } + director = bstrdup(optarg); + break; + + case 'C': /* Console */ + if (console) { + free(console); + } + console = bstrdup(optarg); + break; + + case 'L': /* Console */ + list_consoles = true; + test_config = true; + break; + + case 'l': + list_directors = true; + test_config = true; + break; + + case 'c': /* configuration file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 'n': /* no conio */ + no_conio = true; + break; + + case 's': /* turn off signals */ + no_signals = true; + break; + + case 't': + test_config = true; + break; + + case 'u': + timeout = atoi(optarg); + break; + + case '?': + default: + usage(); + exit(1); + } + } + argc -= optind; + argv += optind; + + if (!no_signals) { + init_signals(terminate_console); + } + + +#if !defined(HAVE_WIN32) + /* Override Bacula default signals */ + signal(SIGQUIT, SIG_IGN); + signal(SIGTSTP, got_sigstop); + signal(SIGCONT, got_sigcontinue); + signal(SIGTTIN, got_sigtin); + signal(SIGTTOU, got_sigtout); + trapctlc(); +#endif + + OSDependentInit(); + + if (argc) { + usage(); + exit(1); + } + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + config = New(CONFIG()); + parse_cons_config(config, configfile, M_ERROR_TERM); + + if (init_crypto() != 0) { + Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); + } + + if (!check_resources()) { + Emsg1(M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); + } + + if (!no_conio) { + con_init(stdin); + } + + if (list_directors) { + LockRes(); + foreach_res(dir, R_DIRECTOR) { + senditf("%s\n", dir->hdr.name); + } + UnlockRes(); + } + + if (list_consoles) { + LockRes(); + foreach_res(cons, R_CONSOLE) { + senditf("%s\n", cons->hdr.name); + } + UnlockRes(); + } + + if (test_config) { + terminate_console(0); + exit(0); + } + + memset(&jcr, 0, sizeof(jcr)); + + (void)WSA_Init(); /* Initialize Windows sockets */ + + start_watchdog(); /* Start socket watchdog */ + + if (!select_director(director, console, &dir, &cons)) { + terminate_console(0); + return 1; + } + + senditf(_("Connecting to Director %s:%d\n"), dir->address,dir->DIRport); + + char buf[1024]; + /* Initialize Console TLS context */ + if (cons && (cons->tls_enable || cons->tls_require)) { + /* Generate passphrase prompt */ + bsnprintf(buf, sizeof(buf), "Passphrase for Console \"%s\" TLS private key: ", cons->hdr.name); + + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer + */ + cons->tls_ctx = new_tls_context(cons->tls_ca_certfile, + cons->tls_ca_certdir, cons->tls_certfile, + cons->tls_keyfile, tls_pem_callback, &buf, NULL, true); + + if (!cons->tls_ctx) { + senditf(_("Failed to initialize TLS context for Console \"%s\".\n"), + dir->hdr.name); + terminate_console(0); + return 1; + } + } + + /* Initialize Director TLS context */ + if (dir->tls_enable || dir->tls_require) { + /* Generate passphrase prompt */ + bsnprintf(buf, sizeof(buf), "Passphrase for Director \"%s\" TLS private key: ", dir->hdr.name); + + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + dir->tls_ctx = new_tls_context(dir->tls_ca_certfile, + dir->tls_ca_certdir, dir->tls_certfile, + dir->tls_keyfile, tls_pem_callback, &buf, NULL, true); + + if (!dir->tls_ctx) { + senditf(_("Failed to initialize TLS context for Director \"%s\".\n"), + dir->hdr.name); + terminate_console(0); + return 1; + } + } + + if (dir->heartbeat_interval) { + heart_beat = dir->heartbeat_interval; + } else if (cons) { + heart_beat = cons->heartbeat_interval; + } else { + heart_beat = 0; + } + if (!UA_sock) { + UA_sock = new_bsock(); + } + if (!UA_sock->connect(NULL, 5, 15, heart_beat, "Director daemon", dir->address, + NULL, dir->DIRport, 0)) { + UA_sock->destroy(); + UA_sock = NULL; + terminate_console(0); + return 1; + } + jcr.dir_bsock = UA_sock; + + /* If cons==NULL, default console will be used */ + if (!authenticate_director(UA_sock, dir, cons)) { + terminate_console(0); + return 1; + } + + Dmsg0(40, "Opened connection with Director daemon\n"); + + sendit(_("Enter a period to cancel a command.\n")); + + /* Read/Update history file if HOME exists */ + POOL_MEM history_file; + + /* Run commands in ~/.bconsolerc if any */ + char *env = getenv("HOME"); + if (env) { + FILE *fd; + pm_strcpy(&UA_sock->msg, env); + pm_strcat(&UA_sock->msg, "/.bconsolerc"); + fd = bfopen(UA_sock->msg, "rb"); + if (fd) { + read_and_process_input(fd, UA_sock); + fclose(fd); + } + + pm_strcpy(history_file, env); + pm_strcat(history_file, "/.bconsole_history"); + console_init_history(history_file.c_str()); + } + + read_and_process_input(stdin, UA_sock); + + if (UA_sock) { + UA_sock->signal(BNET_TERMINATE); /* send EOF */ + UA_sock->close(); + } + + if (env) { + console_update_history(history_file.c_str()); + } + + terminate_console(0); + return 0; +} + +/* Cleanup and then exit */ +static void terminate_console(int sig) +{ + + static bool already_here = false; + + if (already_here) { /* avoid recursive temination problems */ + exit(1); + } + already_here = true; + stop_watchdog(); + delete(config); + config = NULL; + cleanup_crypto(); + free(res_head); + res_head = NULL; + free_pool_memory(args); +#if defined(HAVE_CONIO) + if (!no_conio) { + con_term(); + } +#elif defined(HAVE_READLINE) + rl_cleanup_after_signal(); +#else /* !HAVE_CONIO && !HAVE_READLINE */ +#endif + (void)WSACleanup(); /* Cleanup Windows sockets */ + lmgr_cleanup_main(); + + if (sig != 0) { + exit(1); + } + return; +} + +/* + * Make a quick check to see that we have all the + * resources needed. + */ +static int check_resources() +{ + bool OK = true; + DIRRES *director; + bool tls_needed; + + LockRes(); + + numdir = 0; + foreach_res(director, R_DIRECTOR) { + + numdir++; + /* tls_require implies tls_enable */ + if (director->tls_require) { + if (have_tls) { + director->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + tls_needed = director->tls_enable || director->tls_authenticate; + + if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." + " At least one CA certificate store is required.\n"), + director->hdr.name, configfile); + OK = false; + } + } + + if (numdir == 0) { + Emsg1(M_FATAL, 0, _("No Director resource defined in %s\n" + "Without that I don't how to speak to the Director :-(\n"), configfile); + OK = false; + } + + CONRES *cons; + /* Loop over Consoles */ + foreach_res(cons, R_CONSOLE) { + /* tls_require implies tls_enable */ + if (cons->tls_require) { + if (have_tls) { + cons->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + tls_needed = cons->tls_enable || cons->tls_authenticate; + if ((!cons->tls_ca_certfile && !cons->tls_ca_certdir) && tls_needed) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Console \"%s\" in %s.\n"), + cons->hdr.name, configfile); + OK = false; + } + } + + UnlockRes(); + + return OK; +} + +/* @version */ +static int versioncmd(FILE *input, BSOCK *UA_sock) +{ + senditf("Version: " VERSION " (" BDATE ") %s %s %s\n", + HOST_OS, DISTNAME, DISTVER); + return 1; +} + +/* @input */ +static int inputcmd(FILE *input, BSOCK *UA_sock) +{ + FILE *fd; + + if (argc > 2) { + sendit(_("Too many arguments on input command.\n")); + return 1; + } + if (argc == 1) { + sendit(_("First argument to input command must be a filename.\n")); + return 1; + } + fd = bfopen(argk[1], "rb"); + if (!fd) { + berrno be; + senditf(_("Cannot open file %s for input. ERR=%s\n"), + argk[1], be.bstrerror()); + return 1; + } + read_and_process_input(fd, UA_sock); + fclose(fd); + return 1; +} + +/* @tall */ +/* Send input/output to both terminal and specified file */ +static int teeallcmd(FILE *input, BSOCK *UA_sock) +{ + teeout = true; + teein = true; + return do_outputcmd(input, UA_sock); +} + +/* @tee */ +/* Send output to both terminal and specified file */ +static int teecmd(FILE *input, BSOCK *UA_sock) +{ + teeout = true; + teein = false; + return do_outputcmd(input, UA_sock); +} + +/* @output */ +/* Send output to specified "file" */ +static int outputcmd(FILE *input, BSOCK *UA_sock) +{ + teeout = false; + teein = false; + return do_outputcmd(input, UA_sock); +} + + +static int do_outputcmd(FILE *input, BSOCK *UA_sock) +{ + FILE *fd; + const char *mode = "a+b"; + + if (argc > 3) { + sendit(_("Too many arguments on output/tee command.\n")); + return 1; + } + if (argc == 1) { + if (output != stdout) { + fclose(output); + output = stdout; + teeout = false; + teein = false; + } + return 1; + } + if (argc == 3) { + mode = argk[2]; + } + fd = bfopen(argk[1], mode); + if (!fd) { + berrno be; + senditf(_("Cannot open file %s for output. ERR=%s\n"), + argk[1], be.bstrerror(errno)); + return 1; + } + output = fd; + return 1; +} + +/* + * @exec "some-command" [wait-seconds] +*/ +static int execcmd(FILE *input, BSOCK *UA_sock) +{ + BPIPE *bpipe; + char line[5000]; + int stat; + int wait = 0; + char *cmd; + + if (argc > 3) { + sendit(_("Too many arguments. Enclose command in double quotes.\n")); + return 1; + } + + /* old syntax */ + if (argc == 3) { + wait = atoi(argk[2]); + } + cmd = argk[1]; + + /* handle cmd=XXXX and wait=XXXX */ + for (int i=1; irfd)) { + senditf("%s", line); + } + stat = close_bpipe(bpipe); + if (stat != 0) { + berrno be; + be.set_errno(stat); + senditf(_("@exec error: ERR=%s\n"), be.bstrerror()); + } + return 1; +} + +/* @echo xxx yyy */ +static int echocmd(FILE *input, BSOCK *UA_sock) +{ + for (int i=1; i < argc; i++) { + senditf("%s ", argk[i]); + } + sendit("\n"); + return 1; +} + +/* @quit */ +static int quitcmd(FILE *input, BSOCK *UA_sock) +{ + return 0; +} + +/* @help */ +static int helpcmd(FILE *input, BSOCK *UA_sock) +{ + int i; + for (i=0; i 1) { + sleep(atoi(argk[1])); + } + return 1; +} + +/* @putfile key /path/to/file + * + * The Key parameter is needed to use the file on the director side. + */ +static int putfilecmd(FILE *input, BSOCK *UA_sock) +{ + int i = 0; + const char *key = "putfile"; + const char *fname; + FILE *fp; + + if (argc != 3) { + sendit("Usage: @putfile key file\n"); + return 1; + } + + key = argk[1]; + fname = argk[2]; + + if (!key || !fname) { + senditf("Syntax error in @putfile command\n"); + return 1; + } + + fp = bfopen(fname, "r"); + if (!fp) { + berrno be; + senditf("Unable to open %s. ERR=%s\n", fname, be.bstrerror(errno)); + return 1; + } + + UA_sock->fsend(".putfile key=\"%s\"", key); + + /* Just read the file and send it to the director */ + while (!feof(fp)) { + i = fread(UA_sock->msg, 1, sizeof_pool_memory(UA_sock->msg) - 1, fp); + if (i > 0) { + UA_sock->msg[i] = 0; + UA_sock->msglen = i; + UA_sock->send(); + } + } + + UA_sock->signal(BNET_EOD); + fclose(fp); + + /* Get the file name associated */ + while (UA_sock->recv() > 0) { + senditf("%s", UA_sock->msg); + } + return 1; +} + +/* @time */ +static int timecmd(FILE *input, BSOCK *UA_sock) +{ + char sdt[50]; + time_t ttime = time(NULL); + struct tm tm; + (void)localtime_r(&ttime, &tm); + strftime(sdt, sizeof(sdt), "%d-%b-%Y %H:%M:%S", &tm); + sendit("\n"); + return 1; +} + +/* + * Send a line to the output file and or the terminal + */ +void senditf(const char *fmt,...) +{ + char buf[3000]; + va_list arg_ptr; + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); + va_end(arg_ptr); + sendit(buf); +} + +void sendit(const char *buf) +{ +#ifdef CONIO_FIX + char obuf[3000]; + if (output == stdout || teeout) { + const char *p, *q; + /* + * Here, we convert every \n into \r\n because the + * terminal is in raw mode when we are using + * conio. + */ + for (p=q=buf; (p=strchr(q, '\n')); ) { + int len = p - q; + if (len > 0) { + memcpy(obuf, q, len); + } + memcpy(obuf+len, "\r\n", 3); + q = ++p; /* point after \n */ + fputs(obuf, output); + } + if (*q) { + fputs(q, output); + } + fflush(output); + } + if (output != stdout) { + fputs(buf, output); + } +#else + + fputs(buf, output); + fflush(output); + if (teeout) { + fputs(buf, stdout); + fflush(stdout); + } +#endif +} diff --git a/src/console/console_conf.c b/src/console/console_conf.c new file mode 100644 index 00000000..2f647af6 --- /dev/null +++ b/src/console/console_conf.c @@ -0,0 +1,323 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main configuration file parser for Bacula User Agent + * some parts may be split into separate files such as + * the schedule configuration (sch_config.c). + * + * Note, the configuration file parser consists of three parts + * + * 1. The generic lexical scanner in lib/lex.c and lib/lex.h + * + * 2. The generic config scanner in lib/parse_config.c and + * lib/parse_config.h. + * These files contain the parser code, some utility + * routines, and the common store routines (name, int, + * string). + * + * 3. The daemon specific file, which contains the Resource + * definitions as well as any specific store routines + * for the resource records. + * + * Kern Sibbald, January MM, September MM + */ + +#include "bacula.h" +#include "console_conf.h" + +/* Define the first and last resource ID record + * types. Note, these should be unique for each + * daemon though not a requirement. + */ +int32_t r_first = R_FIRST; +int32_t r_last = R_LAST; +RES_HEAD **res_head; + +/* Forward referenced subroutines */ + + +/* We build the current resource here as we are + * scanning the resource configuration definition, + * then move it to allocated memory when the resource + * scan is complete. + */ +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + URES res_all; +} +#else +URES res_all; +#endif +int32_t res_all_size = sizeof(res_all); + +/* Definition of records permitted within each + * resource with the routine to process the record + * information. + */ + +/* Console "globals" */ +static RES_ITEM cons_items[] = { + {"Name", store_name, ITEM(res_cons.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_cons.hdr.desc), 0, 0, 0}, + {"RCFile", store_dir, ITEM(res_cons.rc_file), 0, 0, 0}, + {"HistoryFile", store_dir, ITEM(res_cons.hist_file), 0, 0, 0}, + {"Password", store_password, ITEM(res_cons.password), 0, ITEM_REQUIRED, 0}, + {"TlsAuthenticate",store_bool, ITEM(res_cons.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_cons.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_cons.tls_require), 0, 0, 0}, + {"TlsCaCertificateFile", store_dir, ITEM(res_cons.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_cons.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_cons.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_cons.tls_keyfile), 0, 0, 0}, + {"Director", store_str, ITEM(res_cons.director), 0, 0, 0}, + {"HeartbeatInterval", store_time, ITEM(res_cons.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, + {"CommCompression", store_bool, ITEM(res_cons.comm_compression), 0, ITEM_DEFAULT, true}, + {NULL, NULL, {0}, 0, 0, 0} +}; + + +/* Director's that we can contact */ +static RES_ITEM dir_items[] = { + {"Name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, + {"DirPort", store_pint32, ITEM(res_dir.DIRport), 0, ITEM_DEFAULT, 9101}, + {"Address", store_str, ITEM(res_dir.address), 0, 0, 0}, + {"Password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0}, + {"TlsAuthenticate",store_bool, ITEM(res_dir.tls_enable), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_dir.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_dir.tls_require), 0, 0, 0}, + {"TlsCaCertificateFile", store_dir, ITEM(res_dir.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_dir.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_dir.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_dir.tls_keyfile), 0, 0, 0}, + {"HeartbeatInterval", store_time, ITEM(res_dir.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* + * This is the master resource definition. + * It must have one item for each of the resources. + * + * name item rcode + */ +RES_TABLE resources[] = { + {"Console", cons_items, R_CONSOLE}, + {"Director", dir_items, R_DIRECTOR}, + {NULL, NULL, 0} +}; + +/* Dump contents of resource */ +void dump_resource(int type, RES *rres, void sendit(void *sock, const char *fmt, ...), void *sock) +{ + URES *res = (URES *)rres; + bool recurse = true; + + if (res == NULL) { + printf(_("No record for %d %s\n"), type, res_to_str(type)); + return; + } + if (type < 0) { /* no recursion */ + type = - type; + recurse = false; + } + switch (type) { + case R_CONSOLE: + printf(_("Console: name=%s rcfile=%s histfile=%s\n"), rres->name, + res->res_cons.rc_file, res->res_cons.hist_file); + break; + case R_DIRECTOR: + printf(_("Director: name=%s address=%s DIRport=%d\n"), rres->name, + res->res_dir.address, res->res_dir.DIRport); + break; + default: + printf(_("Unknown resource type %d\n"), type); + } + + rres = GetNextRes(type, rres); + if (recurse && rres) { + dump_resource(type, rres, sendit, sock); + } +} + +/* + * Free memory of resource. + * NB, we don't need to worry about freeing any references + * to other resources as they will be freed when that + * resource chain is traversed. Mainly we worry about freeing + * allocated strings (names). + */ +void free_resource(RES *rres, int type) +{ + URES *res = (URES *)rres; + + if (res == NULL) { + return; + } + + /* common stuff -- free the resource name */ + if (res->res_dir.hdr.name) { + free(res->res_dir.hdr.name); + } + if (res->res_dir.hdr.desc) { + free(res->res_dir.hdr.desc); + } + + switch (type) { + case R_CONSOLE: + if (res->res_cons.rc_file) { + free(res->res_cons.rc_file); + } + if (res->res_cons.hist_file) { + free(res->res_cons.hist_file); + } + if (res->res_cons.tls_ctx) { + free_tls_context(res->res_cons.tls_ctx); + } + if (res->res_cons.tls_ca_certfile) { + free(res->res_cons.tls_ca_certfile); + } + if (res->res_cons.tls_ca_certdir) { + free(res->res_cons.tls_ca_certdir); + } + if (res->res_cons.tls_certfile) { + free(res->res_cons.tls_certfile); + } + if (res->res_cons.tls_keyfile) { + free(res->res_cons.tls_keyfile); + } + if (res->res_cons.director) { + free(res->res_cons.director); + } + if (res->res_cons.password) { + free(res->res_cons.password); + } + break; + case R_DIRECTOR: + if (res->res_dir.address) { + free(res->res_dir.address); + } + if (res->res_dir.tls_ctx) { + free_tls_context(res->res_dir.tls_ctx); + } + if (res->res_dir.tls_ca_certfile) { + free(res->res_dir.tls_ca_certfile); + } + if (res->res_dir.tls_ca_certdir) { + free(res->res_dir.tls_ca_certdir); + } + if (res->res_dir.tls_certfile) { + free(res->res_dir.tls_certfile); + } + if (res->res_dir.tls_keyfile) { + free(res->res_dir.tls_keyfile); + } + if (res->res_dir.password) { + free(res->res_dir.password); + } + break; + default: + printf(_("Unknown resource type %d\n"), type); + } + /* Common stuff again -- free the resource, recurse to next one */ + free(res); +} + +/* Save the new resource by chaining it into the head list for + * the resource. If this is pass 2, we update any resource + * pointers (currently only in the Job resource). + */ +bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) +{ + int rindex = type - r_first; + int i, size; + int error = 0; + + /* + * Ensure that all required items are present + */ + for (i=0; items[i].name; i++) { + if (items[i].flags & ITEM_REQUIRED) { + if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) { + Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), + items[i].name, resources[rindex].name); + return false; + } + } + } + + /* During pass 2, we looked up pointers to all the resources + * referrenced in the current resource, , now we + * must copy their address from the static record to the allocated + * record. + */ + if (pass == 2) { + switch (type) { + /* Resources not containing a resource */ + case R_CONSOLE: + case R_DIRECTOR: + break; + + default: + Emsg1(M_ERROR, 0, _("Unknown resource type %d\n"), type); + error = 1; + break; + } + /* Note, the resoure name was already saved during pass 1, + * so here, we can just release it. + */ + if (res_all.res_dir.hdr.name) { + free(res_all.res_dir.hdr.name); + res_all.res_dir.hdr.name = NULL; + } + if (res_all.res_dir.hdr.desc) { + free(res_all.res_dir.hdr.desc); + res_all.res_dir.hdr.desc = NULL; + } + return true; + } + + /* The following code is only executed during pass 1 */ + switch (type) { + case R_CONSOLE: + size = sizeof(CONRES); + break; + case R_DIRECTOR: + size = sizeof(DIRRES); + break; + default: + printf(_("Unknown resource type %d\n"), type); + error = 1; + size = 1; + break; + } + /* Common */ + if (!error) { + if (!config->insert_res(rindex, size)) { + return false; + } + } + return true; +} + +bool parse_cons_config(CONFIG *config, const char *configfile, int exit_code) +{ + config->init(configfile, NULL, exit_code, (void *)&res_all, res_all_size, + r_first, r_last, resources, &res_head); + return config->parse_config(); +} diff --git a/src/console/console_conf.h b/src/console/console_conf.h new file mode 100644 index 00000000..f6109331 --- /dev/null +++ b/src/console/console_conf.h @@ -0,0 +1,99 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula User Agent specific configuration and defines + * + * Kern Sibbald, Sep MM + * + */ + +/* + * Resource codes -- they must be sequential for indexing + */ + +bool parse_cons_config(CONFIG *config, const char *configfile, int exit_code); + +enum { + R_CONSOLE = 1001, + R_DIRECTOR, + R_FIRST = R_CONSOLE, + R_LAST = R_DIRECTOR /* Keep this updated */ +}; + +/* + * Some resource attributes + */ +enum { + R_NAME = 1020, + R_ADDRESS, + R_PASSWORD, + R_TYPE, + R_BACKUP +}; + + +/* Definition of the contents of each Resource */ + +/* Console "globals" */ +struct CONRES { + RES hdr; + char *rc_file; /* startup file */ + char *hist_file; /* command history file */ + char *password; /* UA server password */ + bool comm_compression; /* Enable comm line compression */ + bool tls_authenticate; /* Authenticate with TLS */ + bool tls_enable; /* Enable TLS on all connections */ + bool tls_require; /* Require TLS on all connections */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Client Certificate File */ + char *tls_keyfile; /* TLS Client Key File */ + char *director; /* bind to director */ + utime_t heartbeat_interval; /* Interval to send heartbeats to Dir */ + + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ +}; + +/* Director */ +struct DIRRES { + RES hdr; + uint32_t DIRport; /* UA server port */ + char *address; /* UA server address */ + char *password; /* UA server password */ + bool tls_authenticate; /* Authenticate with TLS */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Client Certificate File */ + char *tls_keyfile; /* TLS Client Key File */ + utime_t heartbeat_interval; /* Interval to send heartbeats to Dir */ + + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ +}; + + +/* Define the Union of all the above + * resource structure definitions. + */ +union URES { + DIRRES res_dir; + CONRES res_cons; + RES hdr; +}; diff --git a/src/console/func.h b/src/console/func.h new file mode 100755 index 00000000..e37a55a1 --- /dev/null +++ b/src/console/func.h @@ -0,0 +1,103 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* Definitions of internal function codes */ + +/* Functions that work on current line */ +#define F_CSRRGT 301 /* cursor right */ +#define F_CSRLFT 302 /* cursor left */ +#define F_ERSCHR 303 /* erase character */ +#define F_INSCHR 304 /* insert character */ +#define F_DELCHR 305 /* delete next character character */ +#define F_SOL 306 /* go to start of line */ +#define F_EOL 307 /* go to end of line */ +#define F_DELEOL 308 /* delete to end of line */ +#define F_NXTWRD 309 /* go to next word */ +#define F_PRVWRD 310 /* go to previous word */ +#define F_DELWRD 311 /* delete word */ +#define F_ERSLIN 312 /* erase line */ +#define F_TAB 313 /* tab */ +#define F_TABBAK 314 /* tab backwards */ +#define F_DELSOL 315 /* delete from start of line to cursor */ +#define F_LEFT40 316 /* move cursor left 40 cols */ +#define F_RIGHT40 317 /* move cursor right 40 cols */ +#define F_CASE 318 /* change case of next char advance csr */ +#define F_CENTERL 319 /* center line */ + +/* Functions that move the cursor line or work on groups of lines */ +#define F_CSRDWN 401 /* cursor down */ +#define F_CSRUP 402 /* cursor up */ +#define F_HOME 403 /* home cursor */ +#define F_EOF 404 /* go to end of file */ +#define F_PAGDWN 405 /* page down */ +#define F_PAGUP 406 /* page up */ +#define F_CENTER 407 /* center cursor on screen */ +#define F_SPLIT 408 /* split line at cursor */ +#define F_DELLIN 409 /* delete line */ +#define F_CONCAT 410 /* concatenate next line to current */ +#define F_RETURN 411 /* carriage return */ +#define F_NXTMCH 412 /* next match */ +#define F_DWN5 413 /* cursor down 5 lines */ +#define F_UP5 414 /* cursor up 5 lines */ +#define F_PUSH 415 /* push current location */ +#define F_POP 416 /* pop previous location */ +#define F_PAREN 417 /* find matching paren */ +#define F_POPVIEW 418 /* pop to saved view */ +#define F_OOPS 419 /* restore last oops buffer */ +#define F_PARENB 420 /* find matching paren backwards */ +#define F_BOTSCR 421 /* cursor to bottom of screen */ +#define F_TOPSCR 422 /* cursor to top of screen */ +#define F_TOPMARK 423 /* cursor to top marker line */ +#define F_BOTMARK 424 /* cursor to bottom marker line */ +#define F_CPYMARK 425 /* copy marked lines */ +#define F_MOVMARK 426 /* move marked lines */ +#define F_DELMARK 427 /* delete marked lines */ +#define F_SHFTLEFT 428 /* shift marked text left one char */ +#define F_SHFTRIGHT 429 /* shift marked text right one char */ + +/* Miscellaneous */ +#define F_ESCAPE 501 /* escape character */ +#define F_ESC 501 /* escape character */ +#define F_EOI 502 /* end of input */ +#define F_TENTRY 503 /* toggle entry mode */ +#define F_TINS 504 /* toggle insert mode */ +#define F_MARK 505 /* set marker on lines */ +#define F_CRESC 506 /* carriage return, escape */ +#define F_MACDEF 507 /* begin "macro" definition */ +#define F_MACEND 508 /* end "macro" definition */ +#define F_ZAPESC 509 /* clear screen, escape */ +#define F_CLRMARK 510 /* clear marked text */ +#define F_MARKBLK 511 /* mark blocks */ +#define F_MARKCHR 512 /* mark characters */ +#define F_HOLD 513 /* hold line */ +#define F_DUP 514 /* duplicate line */ +#define F_CHANGE 515 /* apply last change command */ +#define F_RCHANGE 516 /* reverse last change command */ +#define F_NXTFILE 517 /* next file */ +#define F_INCLUDE 518 /* include */ +#define F_FORMAT 519 /* format paragraph */ +#define F_HELP 520 /* help */ +#define F_JUSTIFY 521 /* justify paragraph */ +#define F_SAVE 522 /* save file -- not implemented */ +#define F_MOUSEI 523 /* mouse input coming -- not completed */ +#define F_SCRSIZ 524 /* Screen size coming */ +#define F_PASTECB 525 /* Paste clipboard */ +#define F_CLRSCRN 526 /* Clear the screen */ +#define F_CRNEXT 527 /* Send line, get next line */ +#define F_BREAK 528 /* Break */ +#define F_BACKGND 529 /* go into background */ diff --git a/src/count-lines b/src/count-lines new file mode 100755 index 00000000..a680905b --- /dev/null +++ b/src/count-lines @@ -0,0 +1,9 @@ +#!/bin/sh +rm -f 1 +touch 1 +find . -type d >2 +for i in `cat 2` ; do + ls -1 $i/*.c $i/*.cpp $i/*.h $i/*.in 2>/dev/null >>1 +done +cat 1 | $HOME/bin/lines +# rm -f 1 2 diff --git a/src/dird/Makefile.in b/src/dird/Makefile.in new file mode 100644 index 00000000..f7478962 --- /dev/null +++ b/src/dird/Makefile.in @@ -0,0 +1,162 @@ +# +# Bacula Director Makefile +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +@MCOMMON@ + +srcdir = . +VPATH = . +.PATH: . + +dir_group=@dir_group@ + +# one up +basedir = @BUILD_DIR@/src +# top dir +topdir = @BUILD_DIR@ +# this dir relative to top dir +thisdir = src/dird + +DEBUG=@DEBUG@ + +GETTEXT_LIBS = @LIBINTL@ +CAP_LIBS = @CAP_LIBS@ +ZLIBS=@ZLIBS@ +DB_LIBS=@DB_LIBS@ + + +first_rule: all +dummy: + +# +SVRSRCS = dird.c admin.c authenticate.c \ + autoprune.c backup.c bsr.c \ + catreq.c dir_plugins.c dird_conf.c expand.c \ + fd_cmds.c getmsg.c inc_conf.c job.c \ + jobq.c mac.c mac_sql.c \ + mountreq.c msgchan.c next_vol.c newvol.c \ + recycle.c restore.c run_conf.c \ + scheduler.c \ + ua_acl.c ua_cmds.c ua_dotcmds.c \ + ua_query.c \ + ua_input.c ua_label.c ua_output.c ua_prune.c \ + ua_purge.c ua_restore.c ua_run.c \ + ua_select.c ua_server.c snapshot.c \ + ua_status.c ua_tree.c ua_update.c vbackup.c verify.c +SVROBJS = $(SVRSRCS:.c=.o) + +JSONOBJS = bdirjson.o dird_conf.o run_conf.o inc_conf.o ua_acl.o + +# these are the objects that are changed by the .configure process +EXTRAOBJS = @OBJLIST@ + +.SUFFIXES: .c .o +.PHONY: +.DONTCARE: + +# inference rules +.c.o: + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< +#------------------------------------------------------------------------- +all: Makefile bacula-dir @STATIC_DIR@ bdirjson + @echo "==== Make of dird is good ====" + @echo " " + +bacula-dir: Makefile $(SVROBJS) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ + ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) \ + ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) + @echo "Linking $@ ..." + $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L../lib -L../cats -L../findlib -o $@ $(SVROBJS) $(ZLIBS) \ + -lbacfind -lbacsql -lbaccats -lbaccfg -lbac -lm $(DLIB) $(DB_LIBS) $(LIBS) \ + $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) + +static-bacula-dir: Makefile $(SVROBJS) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ + ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) \ + ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -static -L../lib -L../cats -L../findlib -o $@ $(SVROBJS) $(ZLIBS) \ + -lbacfind -lbacsql -lbaccats -lbaccfg -lbac -lm $(DLIB) $(DB_LIBS) $(LIBS) \ + $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) + strip $@ + +bdirjson: Makefile $(JSONOBJS) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) + @echo "Linking $@ ..." + $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L../lib -o $@ $(JSONOBJS) \ + -lbaccfg -lbac -lm $(DLIB) $(DB_LIBS) $(LIBS) \ + $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) + +Makefile: $(srcdir)/Makefile.in $(topdir)/config.status + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + +libtool-clean: + @$(RMF) -r .libs _libs + +clean: libtool-clean + @$(RMF) dird bacula-dir core core.* a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 + @$(RMF) static-bacula-dir bdirjson + +realclean: clean + @$(RMF) tags bacula-dir.conf + +distclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +devclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +install: all + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-dir $(DESTDIR)$(sbindir)/bacula-dir + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bdirjson $(DESTDIR)$(sbindir)/bdirjson + @srcconf=bacula-dir.conf; \ + if test -f ${DESTDIR}${sysconfdir}/$$srcconf; then \ + destconf=$$srcconf.new; \ + echo " ==> Found existing $$srcconf, installing new conf file as $$destconf"; \ + else \ + destconf=$$srcconf; \ + fi; \ + echo "${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf"; \ + ${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf + @if test "x${dir_group}" != "x" -a "x${DESTDIR}" = "x" ; then \ + chgrp -f ${dir_group} ${DESTDIR}${sysconfdir}/$$destconf ; \ + fi + @if test -f ${DESTDIR}${scriptdir}/query.sql; then \ + echo " ==> Saving existing query.sql to query.sql.old"; \ + $(MV) -f ${DESTDIR}${scriptdir}/query.sql ${DESTDIR}${scriptdir}/query.sql.old; \ + fi + ${INSTALL_DATA} query.sql ${DESTDIR}${scriptdir}/query.sql + @if test -f static-bacula-dir; then \ + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) static-bacula-dir $(DESTDIR)$(sbindir)/static-bacula-dir; \ + fi + + +uninstall: + (cd $(DESTDIR)$(sbindir); $(RMF) bacula-dir bdirjson) + (cd $(DESTDIR)$(sysconfdir); $(RMF) bacula-dir.conf bacula-dir.conf.new) + (cd $(DESTDIR)$(scriptdir); $(RMF) query.sql) + + + +# Semi-automatic generation of dependencies: +# Use gcc -MM because X11 `makedepend' doesn't work on all systems +# and it also includes system headers. +# `semi'-automatic since dependencies are generated at distribution time. + +depend: + @$(MV) Makefile Makefile.bak + @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile + @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile + @$(CXX) -S -M $(CPPFLAGS) $(XINC) -I$(srcdir) -I$(basedir) *.c >> Makefile + @if test -f Makefile ; then \ + $(RMF) Makefile.bak; \ + else \ + $(MV) Makefile.bak Makefile; \ + echo " ===== Something went wrong in make depend ====="; \ + fi + +# ----------------------------------------------------------------------- +# DO NOT DELETE: nice dependency list follows diff --git a/src/dird/README-config b/src/dird/README-config new file mode 100644 index 00000000..b3ae34fb --- /dev/null +++ b/src/dird/README-config @@ -0,0 +1,134 @@ + +To add a new resource -- as an example, the BAZ resource, + which will have foo record which is an integer, + a storage record, which is the name of a storage + resource, and a special time field. + + 1. Define the Resource type (R_xxx) in dir_config.h + Be sure to update the R_LAST define. + #define R_BAZ 1011 + update R_LAST to be R_BAZ + + 2. Add a new definition of the resource in dir_config.h + The first three are mandatory (will soon be changed + to a header structure). + struct s_res_baz { + char * name; + int rcode + struct s_res_baz *next; + + int foo; + struct res_store *storage; + int time; + }; + + 3. In dir_config.c add the new resource to the table + of resources (resources[]) + + {"baz", baz_items, R_BAZ, NULL}, + + 4. Create a baz_items, which defines the records that + can appear within the BAZ resource: + + static struct res_items bas_items[] = { + name, store sub, where to store, extra info + {"name", store_name, ITEM(res_baz.name), 0}, /* manditory */ + {"foo", store_int, ITEM(res_baz.foo), 0}, + {"storage", stor_res, ITEM(res_baz.storage), 0}, + {"time", store_time, ITME(res_baz.time), 0}, + }; + + 5. Update the dump_resource() subroutine to handle printing + your resource. + + 6. Update the free_resource() subroutine to handle releasing + any allocated memory. + + 7. Check for any special initialization in init_resource(). + Normally, everything is just zeroed. + + 8. Update the new_resource() subroutine to handle the two + passes of the configurator to be able to create your + resource. Pass 2 is used only for finding a reference + to a resource and stuffing its address. In the above example, + you will need to include the storage resource. See the + example for the Job Resource. + + Add an entry so that the correct size of your resource is + allocated for pass one. + + 9. Write any new store routines that you may need. In this case, + we used the store_int and store_res, which are already defined, + but we need to add the new special routine store_time(). + Note, the store subroutine gets control when the parser has + identified the record. Everything after the record name + must be scanned in the store routine. + + +To add a new resource record: + + 1. Add the new record definition to the resource structure definition. + See step 2 above. In this case, however, we only add a new field + to the existing structure. + + 2. Add the new record to the existing res_items structure. See + step 4 above. In this case, however, we only add a new record + definition to the exising structure. + + 3. Update the dump_resource() routine to dump the new record. + + 4. Update the free_resource() routine if you allocated any memory. + + 5. Update init_resource() if you have any special requirements (not + normally the case). + + 6. Update the new_resource() routine if necessary (not normally the + case). + + 7. Write any new store routine that you may have created to store + your record. + Note, the store subroutine gets control when the parser has + identified the record. Everything after the record name + must be scanned in the store routine. See the examples of + store routines that exist. + +Note, the core parsing code is in lib/parse_config.c and lib/parse_config.h. +lib/parse_config.c provides the following store routines: + + store_name stores a resource name + store_str stores a string + store_res stores a resource + store_int stores an integer + +and the following utilities: + + scan_to_eol causes the lexical scanner to scan to the end of the line + scan_error prints an error message + GetResWithName returns the resource of a specified type and name + GetNextRes returns the next resource of a specified type + parse_config parses the configuration file + free_config_resources frees all the resources allocated. + +Note: when your store routine gets control, the parser will have already +scanned the record name (i.e. "baz =") before calling your routine. +The lexical scanner is by default in a mode where spaces will be +compressed out of unquoted strings. Consequently if you want to scan + + baz = full backup every sunday + +and you do not want to get "full backup every sunday" as a single token +"fullbackupeverysunday", you must set the no identifier option in the +lexical scanner options field: + + int options = lc->options; + + lc->options |= LOPT_NO_IDENT; /* don't eat spaces */ + + get_token(lc) ... + ... + + lc->options = options; + return; + + + diff --git a/src/dird/admin.c b/src/dird/admin.c new file mode 100644 index 00000000..9c597d70 --- /dev/null +++ b/src/dird/admin.c @@ -0,0 +1,125 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- admin.c -- responsible for doing admin jobs + * + * Kern Sibbald, May MMIII + * + * Basic tasks done here: + * Display the job report. + * + */ + +#include "bacula.h" +#include "dird.h" +#include "ua.h" + + +bool do_admin_init(JCR *jcr) +{ + free_rstorage(jcr); + if (!allow_duplicate_job(jcr)) { + return false; + } + return true; +} + +/* + * Returns: false on failure + * true on success + */ +bool do_admin(JCR *jcr) +{ + + jcr->jr.JobId = jcr->JobId; + + jcr->fname = (char *)get_pool_memory(PM_FNAME); + + /* Print Job Start message */ + Jmsg(jcr, M_INFO, 0, _("Start Admin JobId %d, Job=%s\n"), + jcr->JobId, jcr->Job); + + jcr->setJobStatus(JS_Running); + admin_cleanup(jcr, JS_Terminated); + return true; +} + + +/* + * Release resources allocated during backup. + */ +void admin_cleanup(JCR *jcr, int TermCode) +{ + char sdt[50], edt[50], schedt[50]; + char term_code[100]; + const char *term_msg; + int msg_type; + MEDIA_DBR mr; + + Dmsg0(100, "Enter admin_cleanup()\n"); + + update_job_end(jcr, TermCode); + + if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"), + db_strerror(jcr->db)); + jcr->setJobStatus(JS_ErrorTerminated); + } + + msg_type = M_INFO; /* by default INFO message */ + switch (jcr->JobStatus) { + case JS_Terminated: + term_msg = _("Admin OK"); + break; + case JS_FatalError: + case JS_ErrorTerminated: + term_msg = _("*** Admin Error ***"); + msg_type = M_ERROR; /* Generate error message */ + break; + case JS_Canceled: + term_msg = _("Admin Canceled"); + break; + default: + term_msg = term_code; + sprintf(term_code, _("Inappropriate term code: %c\n"), jcr->JobStatus); + break; + } + bstrftimes(schedt, sizeof(schedt), jcr->jr.SchedTime); + bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime); + bstrftimes(edt, sizeof(edt), jcr->jr.EndTime); + + + Jmsg(jcr, msg_type, 0, _("Bacula " VERSION " (" LSMDATE "): %s\n" +" JobId: %d\n" +" Job: %s\n" +" Scheduled time: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Termination: %s\n\n"), + edt, + jcr->jr.JobId, + jcr->jr.Job, + schedt, + sdt, + edt, + term_msg); + + Dmsg0(100, "Leave admin_cleanup()\n"); +} diff --git a/src/dird/authenticate.c b/src/dird/authenticate.c new file mode 100644 index 00000000..89a6c76e --- /dev/null +++ b/src/dird/authenticate.c @@ -0,0 +1,459 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- authenticate.c -- handles authorization of + * Storage and File daemons. + * + * Written by: Kern Sibbald, May MMI + * + * This routine runs as a thread and must be thread reentrant. + * + */ + +#include "bacula.h" +#include "dird.h" + +static const int dbglvl = 50; + +extern DIRRES *director; + +/* Version at end of Hello + * prior to 06Aug13 no version + * 102 04Jun15 - added jobmedia change + * 103 14Feb17 - added comm line compression + */ +#define DIR_VERSION 103 + + +/* Command sent to SD */ +static char hello[] = "Hello %sDirector %s calling %d\n"; + +/* Responses from Storage and File daemons */ +static char OKhello[] = "3000 OK Hello"; +static char SDOKnewHello[] = "3000 OK Hello %d"; +static char FDOKhello[] = "2000 OK Hello"; +static char FDOKnewHello[] = "2000 OK Hello %d"; + +/* Sent to User Agent */ +static char Dir_sorry[] = "1999 You are not authorized.\n"; + +/* Forward referenced functions */ + +/* + * Authenticate Storage daemon connection + */ +bool authenticate_storage_daemon(JCR *jcr, STORE *store) +{ + BSOCK *sd = jcr->store_bsock; + char dirname[MAX_NAME_LENGTH]; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; + bool auth_success = false; + + if (!sd) { + Dmsg0(dbglvl, "Invalid bsock\n"); + return false; + } + + /* + * Send my name to the Storage daemon then do authentication + */ + bstrncpy(dirname, director->hdr.name, sizeof(dirname)); + bash_spaces(dirname); + /* Timeout Hello after 1 min */ + btimer_t *tid = start_bsock_timer(sd, AUTH_TIMEOUT); + /* Sent Hello SD: Bacula Director calling */ + if (!sd->fsend(hello, "SD: Bacula ", dirname, DIR_VERSION)) { + stop_bsock_timer(tid); + Dmsg1(dbglvl, _("Error sending Hello to Storage daemon. ERR=%s\n"), sd->bstrerror()); + Jmsg(jcr, M_FATAL, 0, _("Error sending Hello to Storage daemon. ERR=%s\n"), sd->bstrerror()); + return 0; + } + + /* TLS Requirement */ + if (store->tls_enable) { + if (store->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + if (store->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + auth_success = cram_md5_respond(sd, store->password, &tls_remote_need, &compatible); + if (auth_success) { + auth_success = cram_md5_challenge(sd, store->password, tls_local_need, compatible); + if (!auth_success) { + Dmsg1(dbglvl, "cram_challenge failed for %s\n", sd->who()); + } + } else { + Dmsg1(dbglvl, "cram_respond failed for %s\n", sd->who()); + } + + if (!auth_success) { + stop_bsock_timer(tid); + Dmsg0(dbglvl, _("Director and Storage daemon passwords or names not the same.\n")); + Jmsg2(jcr, M_FATAL, 0, + _("Director unable to authenticate with Storage daemon at \"%s:%d\". Possible causes:\n" + "Passwords or names not the same or\n" + "Maximum Concurrent Jobs exceeded on the SD or\n" + "SD networking messed up (restart daemon).\n" + "For help, please see: " MANUAL_AUTH_URL "\n"), + sd->host(), sd->port()); + return 0; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + stop_bsock_timer(tid); + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server did not advertise required TLS support.\n")); + return 0; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + stop_bsock_timer(tid); + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server requires TLS.\n")); + return 0; + } + + /* Is TLS Enabled? */ + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_client(store->tls_ctx, sd, NULL)) { + stop_bsock_timer(tid); + Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed with SD at \"%s:%d\"\n"), + sd->host(), sd->port()); + return 0; + } + if (store->tls_authenticate) { /* authentication only? */ + sd->free_tls(); /* yes, stop tls */ + } + } + + Dmsg1(116, ">stored: %s", sd->msg); + if (sd->recv() <= 0) { + stop_bsock_timer(tid); + Jmsg3(jcr, M_FATAL, 0, _("bdirdwho(), sd->host(), sd->bstrerror()); + return 0; + } + Dmsg1(110, "msg); + stop_bsock_timer(tid); + jcr->SDVersion = 0; + if (sscanf(sd->msg, SDOKnewHello, &jcr->SDVersion) != 1 && + strncmp(sd->msg, OKhello, sizeof(OKhello)) != 0) { + Dmsg0(dbglvl, _("Storage daemon rejected Hello command\n")); + Jmsg2(jcr, M_FATAL, 0, _("Storage daemon at \"%s:%d\" rejected Hello command\n"), + sd->host(), sd->port()); + return 0; + } + /* For newer SD turn on comm line compression */ + if (jcr->SDVersion >= 1 && director->comm_compression) { + sd->set_compress(); + } else { + sd->clear_compress(); + Dmsg0(050, "*** No Dir compression to SD\n"); + } + if (jcr->SDVersion < 2) { + Jmsg2(jcr, M_FATAL, 0, _("Older Storage daemon at \"%s:%d\" incompatible with this Director.\n"), + sd->host(), sd->port()); + return 0; + } + return 1; +} + +/* + * Authenticate File daemon connection + */ +int authenticate_file_daemon(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + CLIENT *client = jcr->client; + char dirname[MAX_NAME_LENGTH]; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; + bool auth_success = false; + + /* + * Send my name to the File daemon then do authentication + */ + bstrncpy(dirname, director->name(), sizeof(dirname)); + bash_spaces(dirname); + /* Timeout Hello after 1 min */ + btimer_t *tid = start_bsock_timer(fd, AUTH_TIMEOUT); + if (!fd->fsend(hello, "", dirname, DIR_VERSION)) { + stop_bsock_timer(tid); + Jmsg(jcr, M_FATAL, 0, _("Error sending Hello to File daemon at \"%s:%d\". ERR=%s\n"), + fd->host(), fd->port(), fd->bstrerror()); + Dmsg3(50, _("Error sending Hello to File daemon at \"%s:%d\". ERR=%s\n"), + fd->host(), fd->port(), fd->bstrerror()); + return 0; + } + Dmsg1(dbglvl, "Sent: %s", fd->msg); + + /* TLS Requirement */ + if (client->tls_enable) { + if (client->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + if (client->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + auth_success = cram_md5_respond(fd, client->password, &tls_remote_need, &compatible); + if (auth_success) { + auth_success = cram_md5_challenge(fd, client->password, tls_local_need, compatible); + if (!auth_success) { + Dmsg1(dbglvl, "cram_auth failed for %s\n", fd->who()); + } + } else { + Dmsg1(dbglvl, "cram_get_auth failed for %s\n", fd->who()); + } + if (!auth_success) { + stop_bsock_timer(tid); + Dmsg0(dbglvl, _("Director and File daemon passwords or names not the same.\n")); + Jmsg(jcr, M_FATAL, 0, + _("Unable to authenticate with File daemon at \"%s:%d\". Possible causes:\n" + "Passwords or names not the same or\n" + "Maximum Concurrent Jobs exceeded on the FD or\n" + "FD networking messed up (restart daemon).\n" + "For help, please see: " MANUAL_AUTH_URL "\n"), + fd->host(), fd->port()); + return 0; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + stop_bsock_timer(tid); + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: FD \"%s:%s\" did not advertise required TLS support.\n"), + fd->who(), fd->host()); + return 0; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + stop_bsock_timer(tid); + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: FD at \"%s:%d\" requires TLS.\n"), + fd->host(), fd->port()); + return 0; + } + + /* Is TLS Enabled? */ + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_client(client->tls_ctx, fd, client->tls_allowed_cns)) { + stop_bsock_timer(tid); + Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed with FD at \"%s:%d\".\n"), + fd->host(), fd->port()); + return 0; + } + if (client->tls_authenticate) { /* tls authentication only? */ + fd->free_tls(); /* yes, shutdown tls */ + } + } + + Dmsg1(116, ">filed: %s", fd->msg); + if (fd->recv() <= 0) { + stop_bsock_timer(tid); + Dmsg1(dbglvl, _("Bad response from File daemon to Hello command: ERR=%s\n"), + fd->bstrerror()); + Jmsg(jcr, M_FATAL, 0, _("Bad response from File daemon at \"%s:%d\" to Hello command: ERR=%s\n"), + fd->host(), fd->port(), fd->bstrerror()); + return 0; + } + Dmsg1(110, "msg); + stop_bsock_timer(tid); + jcr->FDVersion = 0; + if (strncmp(fd->msg, FDOKhello, sizeof(FDOKhello)) != 0 && + sscanf(fd->msg, FDOKnewHello, &jcr->FDVersion) != 1) { + Dmsg0(dbglvl, _("File daemon rejected Hello command\n")); + Jmsg(jcr, M_FATAL, 0, _("File daemon at \"%s:%d\" rejected Hello command\n"), + fd->host(), fd->port()); + return 0; + } + /* For newer FD turn on comm line compression */ + if (jcr->FDVersion >= 214 && director->comm_compression) { + fd->set_compress(); + } else { + fd->clear_compress(); + Dmsg0(050, "*** No Dir compression to FD\n"); + } + return 1; +} + +/********************************************************************* + * + */ +int authenticate_user_agent(UAContext *uac) +{ + char name[MAX_NAME_LENGTH]; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + bool tls_authenticate; + int compatible = true; + CONRES *cons = NULL; + BSOCK *ua = uac->UA_sock; + bool auth_success = false; + TLS_CONTEXT *tls_ctx = NULL; + alist *verify_list = NULL; + int ua_version = 0; + + if (ua->msglen < 16 || ua->msglen >= MAX_NAME_LENGTH + 15) { + Qmsg3(NULL, M_SECURITY, 0, _("UA Hello from %s:%s is invalid. Len=%d\n"), ua->who(), + ua->host(), ua->msglen); + sleep(5); + return 0; + } + + if (sscanf(ua->msg, "Hello %127s calling %d", name, &ua_version) != 2 && + sscanf(ua->msg, "Hello %127s calling", name) != 1) { + ua->msg[100] = 0; /* terminate string */ + Qmsg3(NULL, M_SECURITY, 0, _("UA Hello from %s:%s is invalid. Got: %s\n"), ua->who(), + ua->host(), ua->msg); + sleep(5); + return 0; + } + + /* Turn on compression for newer consoles */ + if (ua_version >= 1 && director->comm_compression) { + ua->set_compress(); + } else { + Dmsg0(050, "*** No Dir compression to UA\n"); + } + + name[sizeof(name)-1] = 0; /* terminate name */ + if (strcmp(name, "*UserAgent*") == 0) { /* default console */ + /* TLS Requirement */ + if (director->tls_enable) { + if (director->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + tls_authenticate = director->tls_authenticate; + + if (tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + if (director->tls_verify_peer) { + verify_list = director->tls_allowed_cns; + } + + auth_success = cram_md5_challenge(ua, director->password, tls_local_need, + compatible) && + cram_md5_respond(ua, director->password, &tls_remote_need, &compatible); + } else { + unbash_spaces(name); + cons = (CONRES *)GetResWithName(R_CONSOLE, name); + if (cons) { + /* TLS Requirement */ + if (cons->tls_enable) { + if (cons->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + tls_authenticate = cons->tls_authenticate; + + if (tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + if (cons->tls_verify_peer) { + verify_list = cons->tls_allowed_cns; + } + + auth_success = cram_md5_challenge(ua, cons->password, tls_local_need, + compatible) && + cram_md5_respond(ua, cons->password, &tls_remote_need, &compatible); + + if (auth_success) { + uac->cons = cons; /* save console resource pointer */ + } + } else { + auth_success = false; + goto auth_done; + } + } + + + /* Verify that the remote peer is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg0(NULL, M_SECURITY, 0, _("Authorization problem:" + " Remote client did not advertise required TLS support.\n")); + auth_success = false; + goto auth_done; + } + + /* Verify that we are willing to meet the peer's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg0(NULL, M_SECURITY, 0, _("Authorization problem:" + " Remote client requires TLS.\n")); + auth_success = false; + goto auth_done; + } + + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + if (cons) { + tls_ctx = cons->tls_ctx; + } else { + tls_ctx = director->tls_ctx; + } + + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_server(tls_ctx, ua, verify_list)) { + Jmsg0(NULL, M_SECURITY, 0, _("TLS negotiation failed.\n")); + auth_success = false; + goto auth_done; + } + if (tls_authenticate) { /* authentication only? */ + ua->free_tls(); /* stop tls */ + } + } + + +/* Authorization Completed */ +auth_done: + if (!auth_success) { + ua->fsend("%s", _(Dir_sorry)); + Jmsg4(NULL, M_SECURITY, 0, _("Unable to authenticate console \"%s\" at %s:%s:%d.\n"), + name, ua->who(), ua->host(), ua->port()); + sleep(5); + return 0; + } + ua->fsend(_("1000 OK: %d %s %sVersion: %s (%s)\n"), + DIR_VERSION, my_name, "", VERSION, BDATE); + return 1; +} diff --git a/src/dird/autoprune.c b/src/dird/autoprune.c new file mode 100644 index 00000000..b522da2c --- /dev/null +++ b/src/dird/autoprune.c @@ -0,0 +1,227 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- Automatic Pruning + * Applies retention periods + * + * Kern Sibbald, May MMII + */ + +#include "bacula.h" +#include "dird.h" +#include "ua.h" + +/* Forward referenced functions */ + + +/* + * Auto Prune Jobs and Files. This is called at the end of every + * Job. We do not prune volumes here. + */ +void do_autoprune(JCR *jcr) +{ + UAContext *ua; + CLIENT *client; + POOL *pool; + bool pruned; + + if (!jcr->client) { /* temp -- remove me */ + return; + } + + ua = new_ua_context(jcr); + client = jcr->client; + pool = jcr->pool; + + if (jcr->job->PruneJobs || jcr->client->AutoPrune) { + prune_jobs(ua, client, pool, jcr->getJobType()); + pruned = true; + } else { + pruned = false; + } + + if (jcr->job->PruneFiles || jcr->client->AutoPrune) { + prune_files(ua, client, pool); + pruned = true; + } + if (pruned) { + Jmsg(jcr, M_INFO, 0, _("End auto prune.\n\n")); + } + free_ua_context(ua); + return; +} + +/* + * Prune at least one Volume in current Pool. This is called from + * catreq.c => next_vol.c when the Storage daemon is asking for another + * volume and no appendable volumes are available. + * + */ +void prune_volumes(JCR *jcr, bool InChanger, MEDIA_DBR *mr, + STORE *store) +{ + int count; + int i; + dbid_list ids; + struct del_ctx prune_list; + POOL_MEM query(PM_MESSAGE), changer(PM_MESSAGE); + UAContext *ua; + char ed1[50], ed2[100], ed3[50]; + + POOL_DBR spr; + + Dmsg1(100, "Prune volumes PoolId=%d\n", jcr->jr.PoolId); + if (!jcr->job->PruneVolumes && !jcr->pool->AutoPrune) { + Dmsg0(100, "AutoPrune not set in Pool.\n"); + return; + } + + bmemset(&prune_list, 0, sizeof(prune_list)); + prune_list.max_ids = 10000; + prune_list.JobId = (JobId_t *)malloc(sizeof(JobId_t) * prune_list.max_ids); + + ua = new_ua_context(jcr); + db_lock(jcr->db); + + /* Edit PoolId */ + edit_int64(mr->PoolId, ed1); + /* + * Get Pool record for Scratch Pool + */ + bmemset(&spr, 0, sizeof(spr)); + bstrncpy(spr.Name, "Scratch", sizeof(spr.Name)); + if (db_get_pool_record(jcr, jcr->db, &spr)) { + edit_int64(spr.PoolId, ed2); + bstrncat(ed2, ",", sizeof(ed2)); + } else { + ed2[0] = 0; + } + + if (mr->ScratchPoolId) { + edit_int64(mr->ScratchPoolId, ed3); + bstrncat(ed2, ed3, sizeof(ed2)); + bstrncat(ed2, ",", sizeof(ed2)); + } + + Dmsg1(100, "Scratch pool(s)=%s\n", ed2); + /* + * ed2 ends up with scratch poolid and current poolid or + * just current poolid if there is no scratch pool + */ + bstrncat(ed2, ed1, sizeof(ed2)); + + /* + * Get the List of all media ids in the current Pool or whose + * RecyclePoolId is the current pool or the scratch pool + */ + const char *select = "SELECT DISTINCT MediaId,LastWritten FROM Media WHERE " + "(PoolId=%s OR RecyclePoolId IN (%s)) AND MediaType='%s' %s" + "ORDER BY LastWritten ASC,MediaId"; + + set_storageid_in_mr(store, mr); + if (InChanger) { + Mmsg(changer, "AND InChanger=1 AND StorageId IN (%s) ", mr->sid_group); + } + + Mmsg(query, select, ed1, ed2, mr->MediaType, changer.c_str()); + + Dmsg1(100, "query=%s\n", query.c_str()); + if (!db_get_query_dbids(ua->jcr, ua->db, query, ids)) { + Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); + goto bail_out; + } + + Dmsg1(100, "Volume prune num_ids=%d\n", ids.num_ids); + + /* Visit each Volume and Prune it until we find one that is purged */ + for (i=0; idb, &lmr)) { + Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); + continue; + } + Dmsg1(100, "Examine vol=%s\n", lmr.VolumeName); + /* Don't prune archived volumes */ + if (lmr.Enabled == 2) { + Dmsg1(100, "Vol=%s disabled\n", lmr.VolumeName); + continue; + } + /* Prune only Volumes with status "Full", or "Used" */ + if (strcmp(lmr.VolStatus, "Full") == 0 || + strcmp(lmr.VolStatus, "Used") == 0) { + Dmsg2(100, "Add prune list MediaId=%lu Volume %s\n", lmr.MediaId, lmr.VolumeName); + count = get_prune_list_for_volume(ua, &lmr, &prune_list); + Dmsg1(100, "Num pruned = %d\n", count); + if (count != 0) { + purge_job_list_from_catalog(ua, prune_list); + prune_list.num_ids = 0; /* reset count */ + } + if (!is_volume_purged(ua, &lmr)) { + Dmsg1(100, "Vol=%s not pruned\n", lmr.VolumeName); + continue; + } + Dmsg1(100, "Vol=%s is purged\n", lmr.VolumeName); + + /* + * Since we are also pruning the Scratch pool, continue + * until and check if this volume is available (InChanger + StorageId) + * If not, just skip this volume and try the next one + */ + if (InChanger) { + /* ***FIXME*** should be any StorageId in sid_group */ + if (!lmr.InChanger || (lmr.StorageId != mr->StorageId)) { + Dmsg1(100, "Vol=%s not inchanger\n", lmr.VolumeName); + continue; /* skip this volume, ie not loadable */ + } + } + + if (!lmr.Recycle) { + Dmsg1(100, "Vol=%s not recyclable\n", lmr.VolumeName); + continue; + } + + if (has_volume_expired(jcr, &lmr)) { + Dmsg1(100, "Vol=%s has expired\n", lmr.VolumeName); + continue; /* Volume not usable */ + } + + /* + * If purged and not moved to another Pool, + * then we stop pruning and take this volume. + */ + if (lmr.PoolId == mr->PoolId) { + Dmsg2(100, "Got Vol=%s MediaId=%lu purged.\n", lmr.VolumeName, lmr.MediaId); + mr->copy(&lmr); + set_storageid_in_mr(store, mr); + break; /* got a volume */ + } + } + } + +bail_out: + Dmsg0(100, "Leave prune volumes\n"); + db_unlock(jcr->db); + free_ua_context(ua); + if (prune_list.JobId) { + free(prune_list.JobId); + } + return; +} diff --git a/src/dird/backup.c b/src/dird/backup.c new file mode 100644 index 00000000..f94bf8f9 --- /dev/null +++ b/src/dird/backup.c @@ -0,0 +1,1081 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- backup.c -- responsible for doing backup jobs + * + * Kern Sibbald, March MM + * + * Basic tasks done here: + * Open DB and create records for this job. + * Open Message Channel with Storage daemon to tell him a job will be starting. + * Open connection with File daemon and pass him commands + * to do the backup. + * When the File daemon finishes the job, update the DB. + */ + +#include "bacula.h" +#include "dird.h" +#include "ua.h" + +/* Commands sent to File daemon */ +static char backupcmd[] = "backup FileIndex=%ld\n"; +static char storaddr[] = "storage address=%s port=%d ssl=%d\n"; + +/* Responses received from File daemon */ +static char OKbackup[] = "2000 OK backup\n"; +static char OKstore[] = "2000 OK storage\n"; +/* After 17 Aug 2013 */ +static char newEndJob[] = "2800 End Job TermCode=%d JobFiles=%u " + "ReadBytes=%llu JobBytes=%llu Errors=%u " + "VSS=%d Encrypt=%d " + "CommBytes=%lld CompressCommBytes=%lld\n"; +/* Pre 17 Aug 2013 */ +static char EndJob[] = "2800 End Job TermCode=%d JobFiles=%u " + "ReadBytes=%llu JobBytes=%llu Errors=%u " + "VSS=%d Encrypt=%d\n"; +/* Pre 1.39.29 (04Dec06) EndJob */ +static char OldEndJob[] = "2800 End Job TermCode=%d JobFiles=%u " + "ReadBytes=%llu JobBytes=%llu Errors=%u\n"; + +/* Commands sent to Storage daemon */ +static char clientaddr[] = "client address=%s port=%d ssl=%d\n"; + +/* Commands received from Storage daemon */ +static char OKclient[] = "3000 OK client command\n"; + +/* + * Called here before the job is run to do the job + * specific setup. + */ +bool do_backup_init(JCR *jcr) +{ + /* Make local copy */ + jcr->RescheduleIncompleteJobs = jcr->job->RescheduleIncompleteJobs; + + if (jcr->is_JobLevel(L_VIRTUAL_FULL)) { + return do_vbackup_init(jcr); + } + free_rstorage(jcr); /* we don't read so release */ + + if (!get_or_create_fileset_record(jcr)) { + return false; + } + + /* + * Get definitive Job level and since time + */ + get_level_since_time(jcr, jcr->since, sizeof(jcr->since)); + + apply_pool_overrides(jcr); + + if (!allow_duplicate_job(jcr)) { + return false; + } + + jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->name()); + if (jcr->jr.PoolId == 0) { + return false; + } + + /* If pool storage specified, use it instead of job storage */ + copy_wstorage(jcr, jcr->pool->storage, _("Pool resource")); + + if (!jcr->wstorage) { + Jmsg(jcr, M_FATAL, 0, _("No Storage specification found in Job or Pool.\n")); + return false; + } + + create_clones(jcr); /* run any clone jobs */ + + return true; +} + +/* Take all base jobs from job resource and find the + * last L_BASE jobid. + */ +static bool get_base_jobids(JCR *jcr, db_list_ctx *jobids) +{ + JOB_DBR jr; + JOB *job; + JobId_t id; + char str_jobid[50]; + + if (!jcr->job->base) { + return false; /* no base job, stop accurate */ + } + + memset(&jr, 0, sizeof(JOB_DBR)); + jr.StartTime = jcr->jr.StartTime; + + foreach_alist(job, jcr->job->base) { + bstrncpy(jr.Name, job->name(), sizeof(jr.Name)); + db_get_base_jobid(jcr, jcr->db, &jr, &id); + + if (id) { + if (jobids->count) { + pm_strcat(jobids->list, ","); + } + pm_strcat(jobids->list, edit_uint64(id, str_jobid)); + jobids->count++; + } + } + + return jobids->count > 0; +} + +/* + * Foreach files in currrent list, send "/path/fname\0LStat\0MD5\0Delta" to FD + * row[0]=Path, row[1]=Filename, row[2]=FileIndex + * row[3]=JobId row[4]=LStat row[5]=DeltaSeq row[6]=MD5 + */ +static int accurate_list_handler(void *ctx, int num_fields, char **row) +{ + JCR *jcr = (JCR *)ctx; + + if (job_canceled(jcr)) { + return 1; + } + + if (row[2][0] == '0') { /* discard when file_index == 0 */ + return 0; + } + + /* sending with checksum */ + if (jcr->use_accurate_chksum + && num_fields == 7 + && row[6][0] /* skip checksum = '0' */ + && row[6][1]) + { + jcr->file_bsock->fsend("%s%s%c%s%c%s%c%s", + row[0], row[1], 0, row[4], 0, row[6], 0, row[5]); + } else { + jcr->file_bsock->fsend("%s%s%c%s%c%c%s", + row[0], row[1], 0, row[4], 0, 0, row[5]); + } + return 0; +} + +/* In this procedure, we check if the current fileset is using checksum + * FileSet-> Include-> Options-> Accurate/Verify/BaseJob=checksum + * This procedure uses jcr->HasBase, so it must be call after the initialization + */ +static bool is_checksum_needed_by_fileset(JCR *jcr) +{ + FILESET *f; + INCEXE *inc; + FOPTS *fopts; + bool in_block=false; + bool have_basejob_option=false; + if (!jcr->job || !jcr->job->fileset) { + return false; + } + + f = jcr->job->fileset; + + for (int i=0; i < f->num_includes; i++) { /* Parse all Include {} */ + inc = f->include_items[i]; + + for (int j=0; j < inc->num_opts; j++) { /* Parse all Options {} */ + fopts = inc->opts_list[j]; + + for (char *k=fopts->opts; *k ; k++) { /* Try to find one request */ + switch (*k) { + case 'V': /* verify */ + in_block = (jcr->getJobType() == JT_VERIFY); /* not used now */ + break; + case 'J': /* Basejob keyword */ + have_basejob_option = in_block = jcr->HasBase; + break; + case 'C': /* Accurate keyword */ + in_block = !jcr->is_JobLevel(L_FULL); + break; + case ':': /* End of keyword */ + in_block = false; + break; + case '5': /* MD5 */ + case '1': /* SHA1 */ + if (in_block) { + Dmsg0(50, "Checksum will be sent to FD\n"); + return true; + } + break; + default: + break; + } + } + } + } + + /* By default for BaseJobs, we send the checksum */ + if (!have_basejob_option && jcr->HasBase) { + return true; + } + + Dmsg0(50, "Checksum will be sent to FD\n"); + return false; +} + +/* + * Send current file list to FD + * DIR -> FD : accurate files=xxxx + * DIR -> FD : /path/to/file\0Lstat\0MD5\0Delta + * DIR -> FD : /path/to/dir/\0Lstat\0MD5\0Delta + * ... + * DIR -> FD : EOD + */ +bool send_accurate_current_files(JCR *jcr) +{ + POOL_MEM buf; + db_list_ctx jobids; + db_list_ctx nb; + char ed1[50]; + + /* In base level, no previous job is used and no restart incomplete jobs */ + if (jcr->is_canceled() || jcr->is_JobLevel(L_BASE)) { + return true; + } + if (!jcr->accurate && !jcr->rerunning) { + return true; + } + + if (jcr->is_JobLevel(L_FULL)) { + /* On Full mode, if no previous base job, no accurate things */ + if (get_base_jobids(jcr, &jobids)) { + jcr->HasBase = true; + Jmsg(jcr, M_INFO, 0, _("Using BaseJobId(s): %s\n"), jobids.list); + } else if (!jcr->rerunning) { + return true; + } + + } else if (jcr->is_JobLevel(L_VERIFY_DATA)) { + char ed1[50]; + jobids.add(edit_uint64(jcr->previous_jr.JobId, ed1)); + + } else { + /* For Incr/Diff level, we search for older jobs */ + db_get_accurate_jobids(jcr, jcr->db, &jcr->jr, &jobids); + + /* We are in Incr/Diff, but no Full to build the accurate list... */ + if (jobids.count == 0) { + Jmsg(jcr, M_FATAL, 0, _("Cannot find previous jobids.\n")); + return false; /* fail */ + } + } + + /* For incomplete Jobs, we add our own id */ + if (jcr->rerunning) { + edit_int64(jcr->JobId, ed1); + jobids.add(ed1); + } + + /* Don't send and store the checksum if fileset doesn't require it */ + jcr->use_accurate_chksum = is_checksum_needed_by_fileset(jcr); + + if (jcr->JobId) { /* display the message only for real jobs */ + Jmsg(jcr, M_INFO, 0, _("Sending Accurate information to the FD.\n")); + } + + /* to be able to allocate the right size for htable */ + Mmsg(buf, "SELECT sum(JobFiles) FROM Job WHERE JobId IN (%s)", jobids.list); + db_sql_query(jcr->db, buf.c_str(), db_list_handler, &nb); + Dmsg2(200, "jobids=%s nb=%s\n", jobids.list, nb.list); + jcr->file_bsock->fsend("accurate files=%s\n", nb.list); + + if (!db_open_batch_connexion(jcr, jcr->db)) { + Jmsg0(jcr, M_FATAL, 0, "Can't get batch sql connexion"); + return false; /* Fail */ + } + + if (jcr->HasBase) { + jcr->nb_base_files = str_to_int64(nb.list); + if (!db_create_base_file_list(jcr, jcr->db, jobids.list)) { + Jmsg1(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + return false; + } + if (!db_get_base_file_list(jcr, jcr->db, jcr->use_accurate_chksum, + accurate_list_handler, (void *)jcr)) { + Jmsg1(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + return false; + } + + } else { + int opts = jcr->use_accurate_chksum ? DBL_USE_MD5 : DBL_NONE; + if (!db_get_file_list(jcr, jcr->db_batch, + jobids.list, opts, + accurate_list_handler, (void *)jcr)) { + Jmsg1(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db_batch)); + return false; + } + } + + /* TODO: close the batch connection ? (can be used very soon) */ + jcr->file_bsock->signal(BNET_EOD); + return true; +} + +bool send_store_addr_to_fd(JCR *jcr, STORE *store, + char *store_address, uint32_t store_port) +{ + int tls_need = BNET_TLS_NONE; + + /* TLS Requirement */ + if (store->tls_enable) { + if (store->tls_require) { + tls_need = BNET_TLS_REQUIRED; + } else { + tls_need = BNET_TLS_OK; + } + } + + /* + * Send Storage address to the FD + */ + jcr->file_bsock->fsend(storaddr, store_address, store_port, tls_need); + if (!response(jcr, jcr->file_bsock, OKstore, "Storage", DISPLAY_ERROR)) { + return false; + } + return true; +} + +bool send_client_addr_to_sd(JCR *jcr) +{ + int tls_need = BNET_TLS_NONE; + BSOCK *sd = jcr->store_bsock; + POOL_MEM buf; + + /* TLS Requirement for the client */ + if (jcr->client->tls_enable) { + if (jcr->client->tls_require) { + tls_need = BNET_TLS_REQUIRED; + } else { + tls_need = BNET_TLS_OK; + } + } + /* + * Send Client address to the SD + */ + sd->fsend(clientaddr, jcr->client->address(buf.addr()), jcr->client->FDport, tls_need); + if (!response(jcr, sd, OKclient, "Client", DISPLAY_ERROR)) { + return false; + } + return true; +} + +/* + * Allow to specify the address used by the Client to + * connect to the storage daemon in the Client resource + * or in the Storage resource. + */ +char *get_storage_address(CLIENT *client, STORE *store) +{ + char *store_address; + + if (client && client->fd_storage_address) { + Dmsg0(10, "Using Client resource FD Storage Address to contact the Storage\n"); + store_address = client->fd_storage_address; + + } else if (store->fd_storage_address) { + Dmsg0(10, "Using Storage resource FD Storage Address to contact the Storage\n"); + store_address = store->fd_storage_address; + + } else { + Dmsg0(10, "Using default Storage address\n"); + store_address = store->address; + } + return store_address; +} + +bool run_storage_and_start_message_thread(JCR *jcr, BSOCK *sd) +{ + /* + * Start the job prior to starting the message thread below + * to avoid two threads from using the BSOCK structure at + * the same time. + */ + if (!sd->fsend("run")) { + return false; + } + + /* + * Now start a Storage daemon message thread. Note, + * this thread is used to provide the catalog services + * for the backup job, including inserting the attributes + * into the catalog. See catalog_update() in catreq.c + */ + if (!start_storage_daemon_message_thread(jcr)) { + return false; + } + Dmsg0(150, "Storage daemon connection OK\n"); + return true; +} + +/* + * Do a backup of the specified FileSet + * + * Returns: false on failure + * true on success + */ +bool do_backup(JCR *jcr) +{ + int stat; + BSOCK *fd, *sd; + STORE *store; + char *store_address; + uint32_t store_port; + char ed1[100]; + db_int64_ctx job; + POOL_MEM buf; + + if (jcr->is_JobLevel(L_VIRTUAL_FULL)) { + return do_vbackup(jcr); + } + + /* Print Job Start message */ + if (jcr->rerunning) { + Jmsg(jcr, M_INFO, 0, _("Restart Incomplete Backup JobId %s, Job=%s\n"), + edit_uint64(jcr->JobId, ed1), jcr->Job); + } else { + Jmsg(jcr, M_INFO, 0, _("Start Backup JobId %s, Job=%s\n"), + edit_uint64(jcr->JobId, ed1), jcr->Job); + } + + jcr->setJobStatus(JS_Running); + Dmsg2(100, "JobId=%d JobLevel=%c\n", jcr->jr.JobId, jcr->jr.JobLevel); + if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + return false; + } + + /* For incomplete Jobs, we add our own id */ + if (jcr->rerunning) { + edit_int64(jcr->JobId, ed1); + Mmsg(buf, "SELECT max(FileIndex) FROM File WHERE JobId=%s", ed1); + if (db_sql_query(jcr->db, buf.c_str(), db_int64_handler, &job)) { + Jmsg(jcr, M_INFO, 0, _("Found %ld files from prior incomplete Job.\n"), + (int32_t)job.value); + } else { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + return false; + } + jcr->JobFiles = job.value; + Dmsg1(100, "==== FI=%ld\n", jcr->JobFiles); + Mmsg(buf, "SELECT VolSessionId FROM Job WHERE JobId=%s", ed1); + if (!db_sql_query(jcr->db, buf.c_str(), db_int64_handler, &job)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + return false; + } + jcr->VolSessionId = job.value; + Mmsg(buf, "SELECT VolSessionTime FROM Job WHERE JobId=%s", ed1); + if (!db_sql_query(jcr->db, buf.c_str(), db_int64_handler, &job)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + return false; + } + jcr->VolSessionTime = job.value; + Dmsg4(100, "JobId=%s JobFiles=%ld VolSessionId=%ld VolSessionTime=%ld\n", ed1, + jcr->JobFiles, jcr->VolSessionId, jcr->VolSessionTime); + } + + /* + * Open a message channel connection with the Storage + * daemon. This is to let him know that our client + * will be contacting him for a backup session. + * + */ + Dmsg0(110, "Open connection with storage daemon\n"); + jcr->setJobStatus(JS_WaitSD); + /* + * Start conversation with Storage daemon + */ + if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) { + return false; + } + /* + * Now start a job with the Storage daemon + */ + if (!start_storage_daemon_job(jcr, NULL, jcr->wstorage)) { + return false; + } + sd = jcr->store_bsock; + if (jcr->client) { + jcr->sd_calls_client = jcr->client->sd_calls_client; + } + /* + * Note startup sequence of SD/FD is different depending on + * whether the SD listens (normal) or the SD calls the FD. + */ + if (!jcr->sd_calls_client) { + if (!run_storage_and_start_message_thread(jcr, sd)) { + goto bail_out; + } + } + jcr->setJobStatus(JS_WaitFD); + if (!connect_to_file_daemon(jcr, 10, FDConnectTimeout, 1)) { + goto bail_out; + } + + jcr->setJobStatus(JS_Running); + fd = jcr->file_bsock; + + if (!send_level_command(jcr)) { + goto bail_out; + } + + if (!send_include_list(jcr)) { + goto bail_out; + } + + if (!send_exclude_list(jcr)) { + goto bail_out; + } + + /* TODO: See priority with bandwidth parameter */ + if (jcr->job->max_bandwidth > 0) { + jcr->max_bandwidth = jcr->job->max_bandwidth; + } else if (jcr->client->max_bandwidth > 0) { + jcr->max_bandwidth = jcr->client->max_bandwidth; + } + + if (jcr->max_bandwidth > 0) { + send_bwlimit(jcr, jcr->Job); /* Old clients don't have this command */ + } + + send_snapshot_retention(jcr, jcr->snapshot_retention); + + store = jcr->wstore; + + if (jcr->sd_calls_client) { + if (jcr->FDVersion < 10) { + Jmsg(jcr, M_FATAL, 0, _("The File daemon does not support SDCallsClient.\n")); + goto bail_out; + } + if (!send_client_addr_to_sd(jcr)) { + goto bail_out; + } + + if (!run_storage_and_start_message_thread(jcr, sd)) { + goto bail_out; + } + + store_address = jcr->wstore->address; /* dummy */ + store_port = 0; /* flag that SD calls FD */ + } else { + /* + * send Storage daemon address to the File daemon + */ + if (store->SDDport == 0) { + store->SDDport = store->SDport; + } + + store_address = get_storage_address(jcr->client, store); + store_port = store->SDDport; + } + + if (!send_store_addr_to_fd(jcr, store, store_address, store_port)) { + goto bail_out; + } + + /* Declare the job started to start the MaxRunTime check */ + jcr->setJobStarted(); + + /* Send and run the RunBefore */ + if (!send_runscripts_commands(jcr)) { + goto bail_out; + } + + /* + * We re-update the job start record so that the start + * time is set after the run before job. This avoids + * that any files created by the run before job will + * be saved twice. They will be backed up in the current + * job, but not in the next one unless they are changed. + * Without this, they will be backed up in this job and + * in the next job run because in that case, their date + * is after the start of this run. + */ + jcr->start_time = time(NULL); + jcr->jr.StartTime = jcr->start_time; + if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + } + + /* + * If backup is in accurate mode, we send the list of + * all files to FD. + */ + if (!send_accurate_current_files(jcr)) { + goto bail_out; /* error */ + } + + /* Send backup command */ + fd->fsend(backupcmd, jcr->JobFiles); + Dmsg1(100, ">filed: %s", fd->msg); + if (!response(jcr, fd, OKbackup, "backup", DISPLAY_ERROR)) { + goto bail_out; + } + + /* Pickup Job termination data */ + stat = wait_for_job_termination(jcr); + db_write_batch_file_records(jcr); /* used by bulk batch file insert */ + + if (jcr->HasBase) { + db_commit_base_file_attributes_record(jcr, jcr->db); + /* Any error already printed */ + } + + if (!jcr->is_canceled() && stat == JS_Terminated) { + backup_cleanup(jcr, stat); + return true; + } + return false; + +/* Come here only after starting SD thread */ +bail_out: + jcr->setJobStatus(JS_ErrorTerminated); + Dmsg1(400, "wait for sd. use=%d\n", jcr->use_count()); + /* Cancel SD */ + wait_for_job_termination(jcr, FDConnectTimeout); + Dmsg1(400, "after wait for sd. use=%d\n", jcr->use_count()); + return false; +} + + +/* + * Here we wait for the File daemon to signal termination, + * then we wait for the Storage daemon. When both + * are done, we return the job status. + * Also used by restore.c + */ +int wait_for_job_termination(JCR *jcr, int timeout) +{ + int32_t n = 0; + BSOCK *fd = jcr->file_bsock; + bool fd_ok = false; + uint32_t JobFiles, JobErrors; + uint32_t JobWarnings = 0; + uint64_t ReadBytes = 0; + uint64_t JobBytes = 0; + uint64_t CommBytes = 0; + uint64_t CommCompressedBytes = 0; + int VSS = 0; /* or Snapshot on Unix */ + int Encrypt = 0; + btimer_t *tid=NULL; + + if (fd) { + if (timeout) { + tid = start_bsock_timer(fd, timeout); /* TODO: New timeout directive??? */ + } + /* Wait for Client to terminate */ + while ((n = bget_dirmsg(fd)) >= 0) { + if (!fd_ok && + (sscanf(fd->msg, newEndJob, &jcr->FDJobStatus, &JobFiles, + &ReadBytes, &JobBytes, &JobErrors, &VSS, &Encrypt, + &CommBytes, &CommCompressedBytes) == 9 || + sscanf(fd->msg, EndJob, &jcr->FDJobStatus, &JobFiles, + &ReadBytes, &JobBytes, &JobErrors, &VSS, &Encrypt) == 7 || + sscanf(fd->msg, OldEndJob, &jcr->FDJobStatus, &JobFiles, + &ReadBytes, &JobBytes, &JobErrors) == 5)) { + fd_ok = true; + jcr->setJobStatus(jcr->FDJobStatus); + Dmsg1(100, "FDStatus=%c\n", (char)jcr->JobStatus); + } else { + Jmsg(jcr, M_WARNING, 0, _("Unexpected Client Job message: %s\n"), + fd->msg); + } + if (job_canceled(jcr)) { + break; + } + } + if (tid) { + stop_bsock_timer(tid); + } + + if (fd->is_error() && jcr->getJobStatus() != JS_Canceled) { + int i = 0; + Jmsg(jcr, M_FATAL, 0, _("Network error with FD during %s: ERR=%s\n"), + job_type_to_str(jcr->getJobType()), fd->bstrerror()); + while (i++ < 20 && jcr->job->RescheduleIncompleteJobs && jcr->is_canceled()) { + bmicrosleep(3, 0); + } + } + fd->signal(BNET_TERMINATE); /* tell Client we are terminating */ + } + + /* + * Force cancel in SD if failing, but not for Incomplete jobs + * so that we let the SD despool. + */ + Dmsg5(100, "cancel=%d fd_ok=%d FDJS=%d JS=%d SDJS=%d\n", jcr->is_canceled(), fd_ok, jcr->FDJobStatus, + jcr->JobStatus, jcr->SDJobStatus); + if (jcr->is_canceled() || (!jcr->job->RescheduleIncompleteJobs && !fd_ok)) { + Dmsg4(100, "fd_ok=%d FDJS=%d JS=%d SDJS=%d\n", fd_ok, jcr->FDJobStatus, + jcr->JobStatus, jcr->SDJobStatus); + cancel_storage_daemon_job(jcr); + } + + /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */ + wait_for_storage_daemon_termination(jcr); + + /* Return values from FD */ + if (fd_ok) { + jcr->JobFiles = JobFiles; + jcr->JobErrors += JobErrors; /* Keep total errors */ + jcr->ReadBytes = ReadBytes; + jcr->JobBytes = JobBytes; + jcr->JobWarnings = JobWarnings; + jcr->CommBytes = CommBytes; + jcr->CommCompressedBytes = CommCompressedBytes; + jcr->Snapshot = VSS; + jcr->Encrypt = Encrypt; + } else if (jcr->getJobStatus() != JS_Canceled) { + Jmsg(jcr, M_FATAL, 0, _("No Job status returned from FD.\n")); + } + + /* Return the first error status we find Dir, FD, or SD */ + if (!fd_ok || fd->is_error()) { /* if fd not set, that use !fd_ok */ + if (jcr->getJobStatus() == JS_Canceled) { + jcr->FDJobStatus = JS_Canceled; + } else { + jcr->FDJobStatus = JS_ErrorTerminated; + } + } + if (jcr->JobStatus != JS_Terminated) { + return jcr->JobStatus; + } + if (jcr->FDJobStatus != JS_Terminated) { + return jcr->FDJobStatus; + } + return jcr->SDJobStatus; +} + +/* + * Release resources allocated during backup. + */ +void backup_cleanup(JCR *jcr, int TermCode) +{ + char sdt[50], edt[50], schedt[50], edl[50]; + char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30]; + char ec6[30], ec7[30], ec8[30], ec9[30], ec10[30], elapsed[50]; + char data_compress[200], comm_compress[200]; + char fd_term_msg[100], sd_term_msg[100]; + POOL_MEM term_msg; + int msg_type = M_INFO; + MEDIA_DBR mr; + CLIENT_DBR cr; + double kbps, compression, ratio; + utime_t RunTime; + POOL_MEM base_info; + POOL_MEM vol_info; + + remove_dummy_jobmedia_records(jcr); + + if (jcr->is_JobLevel(L_VIRTUAL_FULL)) { + vbackup_cleanup(jcr, TermCode); + return; + } + + Dmsg2(100, "Enter backup_cleanup %d %c\n", TermCode, TermCode); + memset(&cr, 0, sizeof(cr)); + +#ifdef xxxx + /* The current implementation of the JS_Warning status is not + * completed. SQL part looks to be ok, but the code is using + * JS_Terminated almost everywhere instead of (JS_Terminated || JS_Warning) + * as we do with is_canceled() + */ + if (jcr->getJobStatus() == JS_Terminated && + (jcr->JobErrors || jcr->SDErrors || jcr->JobWarnings)) { + TermCode = JS_Warnings; + } +#endif + + update_job_end(jcr, TermCode); + + if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"), + db_strerror(jcr->db)); + jcr->setJobStatus(JS_ErrorTerminated); + } + + bstrncpy(cr.Name, jcr->client->name(), sizeof(cr.Name)); + if (!db_get_client_record(jcr, jcr->db, &cr)) { + Jmsg(jcr, M_WARNING, 0, _("Error getting Client record for Job report: ERR=%s"), + db_strerror(jcr->db)); + } + + bstrncpy(mr.VolumeName, jcr->VolumeName, sizeof(mr.VolumeName)); + if (!db_get_media_record(jcr, jcr->db, &mr)) { + Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"), + mr.VolumeName, db_strerror(jcr->db)); + jcr->setJobStatus(JS_ErrorTerminated); + } + + update_bootstrap_file(jcr); + + switch (jcr->JobStatus) { + case JS_Terminated: + if (jcr->JobErrors || jcr->SDErrors) { + Mmsg(term_msg, _("Backup OK -- %s"), jcr->StatusErrMsg[0] ? jcr->StatusErrMsg : _("with warnings")); + + } else { + Mmsg(term_msg, _("Backup OK")); + } + break; + case JS_Incomplete: + Mmsg(term_msg, _("Backup failed -- incomplete")); + break; + case JS_Warnings: + Mmsg(term_msg, _("Backup OK -- %s"), jcr->StatusErrMsg[0] ? jcr->StatusErrMsg : _("with warnings")); + break; + case JS_FatalError: + case JS_ErrorTerminated: + Mmsg(term_msg, _("*** Backup Error ***")); + msg_type = M_ERROR; /* Generate error message */ + terminate_sd_msg_chan_thread(jcr); + break; + case JS_Canceled: + Mmsg(term_msg, _("Backup Canceled")); + terminate_sd_msg_chan_thread(jcr); + break; + default: + Mmsg(term_msg, _("Inappropriate term code: %c\n"), jcr->JobStatus); + break; + } + bstrftimes(schedt, sizeof(schedt), jcr->jr.SchedTime); + bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime); + bstrftimes(edt, sizeof(edt), jcr->jr.EndTime); + RunTime = jcr->jr.EndTime - jcr->jr.StartTime; + if (RunTime <= 0) { + RunTime = 1; + } + kbps = ((double)jcr->jr.JobBytes) / (1000.0 * (double)RunTime); + if (!db_get_job_volume_names(jcr, jcr->db, jcr->jr.JobId, &jcr->VolumeName)) { + /* + * Note, if the job has erred, most likely it did not write any + * tape, so suppress this "error" message since in that case + * it is normal. Or look at it the other way, only for a + * normal exit should we complain about this error. + */ + if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) { + Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); + } + jcr->VolumeName[0] = 0; /* none */ + } + + if (jcr->ReadBytes == 0) { + bstrncpy(data_compress, "None", sizeof(data_compress)); + } else { + compression = (double)100 - 100.0 * ((double)jcr->SDJobBytes / (double)jcr->ReadBytes); + if (compression < 0.5) { + bstrncpy(data_compress, "None", sizeof(data_compress)); + } else { + if (jcr->SDJobBytes > 0) { + ratio = (double)jcr->ReadBytes / (double)jcr->SDJobBytes; + } else { + ratio = 1.0; + } + bsnprintf(data_compress, sizeof(data_compress), "%.1f%% %.1f:1", + compression, ratio); + } + } + if (jcr->CommBytes == 0 || jcr->CommCompressedBytes == 0) { + bstrncpy(comm_compress, "None", sizeof(comm_compress)); + } else { + compression = (double)100 - 100.0 * ((double)jcr->CommCompressedBytes / (double)jcr->CommBytes); + if (compression < 0.5) { + bstrncpy(comm_compress, "None", sizeof(comm_compress)); + } else { + ratio = (double)jcr->CommBytes / (double)jcr->CommCompressedBytes; + bsnprintf(comm_compress, sizeof(comm_compress), "%.1f%% %.1f:1", + compression, ratio); + } + Dmsg2(200, "=== CommCompressed=%lld CommBytes=%lld\n", + jcr->CommCompressedBytes, jcr->CommBytes); + } + jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg)); + jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg)); + + if (jcr->HasBase) { + Mmsg(base_info, _(" Base files/Used files: %lld/%lld (%.2f%%)\n"), + jcr->nb_base_files, + jcr->nb_base_files_used, + jcr->nb_base_files_used*100.0/jcr->nb_base_files); + } + /* Edit string for last volume size */ + if (mr.VolABytes != 0) { + Mmsg(vol_info, _("meta: %s (%sB) aligned: %s (%sB)"), + edit_uint64_with_commas(mr.VolBytes, ec7), + edit_uint64_with_suffix(mr.VolBytes, ec8), + edit_uint64_with_commas(mr.VolABytes, ec9), + edit_uint64_with_suffix(mr.VolABytes, ec10)); + } else { + Mmsg(vol_info, _("%s (%sB)"), + edit_uint64_with_commas(mr.VolBytes, ec7), + edit_uint64_with_suffix(mr.VolBytes, ec8)); + } + +// bmicrosleep(15, 0); /* for debugging SIGHUP */ + + Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" Backup Level: %s%s\n" +" Client: \"%s\" %s\n" +" FileSet: \"%s\" %s\n" +" Pool: \"%s\" (From %s)\n" +" Catalog: \"%s\" (From %s)\n" +" Storage: \"%s\" (From %s)\n" +" Scheduled time: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Elapsed time: %s\n" +" Priority: %d\n" +" FD Files Written: %s\n" +" SD Files Written: %s\n" +" FD Bytes Written: %s (%sB)\n" +" SD Bytes Written: %s (%sB)\n" +" Rate: %.1f KB/s\n" +" Software Compression: %s\n" +" Comm Line Compression: %s\n" +"%s" /* Basefile info */ +" Snapshot/VSS: %s\n" +" Encryption: %s\n" +" Accurate: %s\n" +" Volume name(s): %s\n" +" Volume Session Id: %d\n" +" Volume Session Time: %d\n" +" Last Volume Bytes: %s\n" +" Non-fatal FD errors: %d\n" +" SD Errors: %d\n" +" FD termination status: %s\n" +" SD termination status: %s\n" +" Termination: %s\n\n"), + BACULA, my_name, VERSION, LSMDATE, + HOST_OS, DISTNAME, DISTVER, + jcr->jr.JobId, + jcr->jr.Job, + level_to_str(edl, sizeof(edl), jcr->getJobLevel()), jcr->since, + jcr->client->name(), cr.Uname, + jcr->fileset->name(), jcr->FSCreateTime, + jcr->pool->name(), jcr->pool_source, + jcr->catalog->name(), jcr->catalog_source, + jcr->wstore->name(), jcr->wstore_source, + schedt, + sdt, + edt, + edit_utime(RunTime, elapsed, sizeof(elapsed)), + jcr->JobPriority, + edit_uint64_with_commas(jcr->jr.JobFiles, ec1), + edit_uint64_with_commas(jcr->SDJobFiles, ec2), + edit_uint64_with_commas(jcr->jr.JobBytes, ec3), + edit_uint64_with_suffix(jcr->jr.JobBytes, ec4), + edit_uint64_with_commas(jcr->SDJobBytes, ec5), + edit_uint64_with_suffix(jcr->SDJobBytes, ec6), + kbps, + data_compress, + comm_compress, + base_info.c_str(), + jcr->Snapshot?_("yes"):_("no"), + jcr->Encrypt?_("yes"):_("no"), + jcr->accurate?_("yes"):_("no"), + jcr->VolumeName, + jcr->VolSessionId, + jcr->VolSessionTime, + vol_info.c_str(), + jcr->JobErrors, + jcr->SDErrors, + fd_term_msg, + sd_term_msg, + term_msg.c_str()); + + Dmsg0(100, "Leave backup_cleanup()\n"); +} + +void update_bootstrap_file(JCR *jcr) +{ + /* Now update the bootstrap file if any */ + if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes && + jcr->job->WriteBootstrap) { + FILE *fd; + BPIPE *bpipe = NULL; + int got_pipe = 0; + char edl[50]; + POOLMEM *fname = get_pool_memory(PM_FNAME); + fname = edit_job_codes(jcr, fname, jcr->job->WriteBootstrap, "", + job_code_callback_director); + + VOL_PARAMS *VolParams = NULL; + int VolCount; + char edt[50], ed1[50], ed2[50]; + + if (*fname == '|') { + got_pipe = 1; + bpipe = open_bpipe(fname+1, 0, "w"); /* skip first char "|" */ + fd = bpipe ? bpipe->wfd : NULL; + } else { + /* ***FIXME*** handle BASE */ + fd = bfopen(fname, jcr->is_JobLevel(L_FULL)?"w+b":"a+b"); + } + if (fd) { + VolCount = db_get_job_volume_parameters(jcr, jcr->db, jcr->JobId, + &VolParams); + if (VolCount == 0) { + Jmsg(jcr, M_ERROR, 0, _("Could not get Job Volume Parameters to " + "update Bootstrap file. ERR=%s\n"), db_strerror(jcr->db)); + if (jcr->SDJobFiles != 0) { + jcr->setJobStatus(JS_ErrorTerminated); + } + + } + /* Start output with when and who wrote it */ + bstrftimes(edt, sizeof(edt), time(NULL)); + fprintf(fd, "# %s - %s - %s%s\n", edt, jcr->jr.Job, + level_to_str(edl, sizeof(edl), jcr->getJobLevel()), jcr->since); + for (int i=0; i < VolCount; i++) { + /* Write the record */ + fprintf(fd, "Volume=\"%s\"\n", VolParams[i].VolumeName); + fprintf(fd, "MediaType=\"%s\"\n", VolParams[i].MediaType); + if (VolParams[i].Slot > 0) { + fprintf(fd, "Slot=%d\n", VolParams[i].Slot); + } + fprintf(fd, "VolSessionId=%u\n", jcr->VolSessionId); + fprintf(fd, "VolSessionTime=%u\n", jcr->VolSessionTime); + fprintf(fd, "VolAddr=%s-%s\n", + edit_uint64(VolParams[i].StartAddr, ed1), + edit_uint64(VolParams[i].EndAddr, ed2)); + fprintf(fd, "FileIndex=%d-%d\n", VolParams[i].FirstIndex, + VolParams[i].LastIndex); + } + if (VolParams) { + free(VolParams); + } + if (got_pipe) { + close_bpipe(bpipe); + } else { + fclose(fd); + } + } else { + berrno be; + Jmsg(jcr, M_ERROR, 0, _("Could not open WriteBootstrap file:\n" + "%s: ERR=%s\n"), fname, be.bstrerror()); + jcr->setJobStatus(JS_ErrorTerminated); + } + free_pool_memory(fname); + } +} diff --git a/src/dird/bacula-dir.conf.in b/src/dird/bacula-dir.conf.in new file mode 100644 index 00000000..a9b7f04a --- /dev/null +++ b/src/dird/bacula-dir.conf.in @@ -0,0 +1,323 @@ +# +# Default Bacula Director Configuration file +# +# The only thing that MUST be changed is to add one or more +# file or directory names in the Include directive of the +# FileSet resource. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ @DISTVER@ +# +# You might also want to change the default email address +# from root to your address. See the "mail" and "operator" +# directives in the Messages resource. +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +Director { # define myself + Name = @basename@-dir + DIRport = @dir_port@ # where we listen for UA connections + QueryFile = "@scriptdir@/query.sql" + WorkingDirectory = "@working_dir@" + PidDirectory = "@piddir@" + Maximum Concurrent Jobs = 20 + Password = "@dir_password@" # Console password + Messages = Daemon +} + +JobDefs { + Name = "DefaultJob" + Type = Backup + Level = Incremental + Client = @basename@-fd + FileSet = "Full Set" + Schedule = "WeeklyCycle" + Storage = File1 + Messages = Standard + Pool = File + SpoolAttributes = yes + Priority = 10 + Write Bootstrap = "@working_dir@/%c.bsr" +} + + +# +# Define the main nightly save backup job +# By default, this job will back up to disk in @archivedir@ +Job { + Name = "BackupClient1" + JobDefs = "DefaultJob" +} + +#Job { +# Name = "BackupClient2" +# Client = @basename@2-fd +# JobDefs = "DefaultJob" +#} + +#Job { +# Name = "BackupClient1-to-Tape" +# JobDefs = "DefaultJob" +# Storage = LTO-4 +# Spool Data = yes # Avoid shoe-shine +# Pool = Default +#} + +#} + +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + JobDefs = "DefaultJob" + Level = Full + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + # This creates an ASCII copy of the catalog + # Arguments to make_catalog_backup.pl are: + # make_catalog_backup.pl + RunBeforeJob = "@scriptdir@/make_catalog_backup.pl MyCatalog" + # This deletes the copy of the catalog + RunAfterJob = "@scriptdir@/delete_catalog_backup" + Write Bootstrap = "@working_dir@/%n.bsr" + Priority = 11 # run after main backup +} + +# +# Standard Restore template, to be changed by Console program +# Only one such job is needed for all Jobs/Clients/Storage ... +# +Job { + Name = "RestoreFiles" + Type = Restore + Client=@basename@-fd + Storage = File1 +# The FileSet and Pool directives are not used by Restore Jobs +# but must not be removed + FileSet="Full Set" + Pool = File + Messages = Standard + Where = @archivedir@/bacula-restores +} + + +# List of files to be backed up +FileSet { + Name = "Full Set" + Include { + Options { + signature = MD5 + } +# +# Put your list of files here, preceded by 'File =', one per line +# or include an external list with: +# +# File = \" -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "@sbindir@/bsmtp -h @smtp_host@ -f \"\(Bacula\) \<%r\>\" -s \"Bacula: Intervention needed for %j\" %r" + mail = @job_email@ = all, !skipped + operator = @job_email@ = mount + console = all, !skipped, !saved +# +# WARNING! the following will create a file that you must cycle from +# time to time as it will grow indefinitely. However, it will +# also keep all your messages if they scroll off the console. +# + append = "@logdir@/bacula.log" = all, !skipped + catalog = all +} + + +# +# Message delivery for daemon messages (no job). +Messages { + Name = Daemon + mailcommand = "@sbindir@/bsmtp -h @smtp_host@ -f \"\(Bacula\) \<%r\>\" -s \"Bacula daemon message\" %r" + mail = @job_email@ = all, !skipped + console = all, !skipped, !saved + append = "@logdir@/bacula.log" = all, !skipped +} + +# Default pool definition +Pool { + Name = Default + Pool Type = Backup + Recycle = yes # Bacula can automatically recycle Volumes + AutoPrune = yes # Prune expired volumes + Volume Retention = 365 days # one year + Maximum Volume Bytes = 50G # Limit Volume size to something reasonable + Maximum Volumes = 100 # Limit number of Volumes in Pool +} + +# File Pool definition +Pool { + Name = File + Pool Type = Backup + Recycle = yes # Bacula can automatically recycle Volumes + AutoPrune = yes # Prune expired volumes + Volume Retention = 365 days # one year + Maximum Volume Bytes = 50G # Limit Volume size to something reasonable + Maximum Volumes = 100 # Limit number of Volumes in Pool + Label Format = "Vol-" # Auto label +} + + +# Scratch pool definition +Pool { + Name = Scratch + Pool Type = Backup +} + +# +# Restricted console used by tray-monitor to get the status of the director +# +Console { + Name = @basename@-mon + Password = "@mon_dir_password@" + CommandACL = status, .status +} diff --git a/src/dird/bdirjson.c b/src/dird/bdirjson.c new file mode 100644 index 00000000..517cb241 --- /dev/null +++ b/src/dird/bdirjson.c @@ -0,0 +1,1488 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director conf to Json + * + * Kern Sibbald, September MMXII + * + */ + +#include "bacula.h" +#include "dird.h" + +/* Exported subroutines */ +extern bool parse_dir_config(CONFIG *config, const char *configfile, int exit_code); + +static CONFIG *config; + +/* Globals Exported */ +DIRRES *director; /* Director resource */ +int FDConnectTimeout; +int SDConnectTimeout; +char *configfile = NULL; +void *start_heap; + +/* Globals Imported */ +extern RES_ITEM job_items[]; +extern s_jt jobtypes[]; +extern s_jl joblevels[]; +extern s_jt migtypes[]; +extern s_kw ReplaceOptions[]; +extern RES_ITEM2 newinc_items[]; +extern RES_ITEM options_items[]; +extern s_fs_opt FS_options[]; +extern s_kw RunFields[]; +extern s_kw tapelabels[]; +extern s_kw msg_types[]; +extern RES_TABLE resources[]; + +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + extern URES res_all; +} +#else +extern URES res_all; +#endif + + +#define CONFIG_FILE "bacula-dir.conf" /* default configuration file */ + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n\n" +"Usage: bdirjson [] [config_file]\n" +" -r get resource type \n" +" -n get resource \n" +" -l get only directives matching dirs (use with -r)\n" +" -D get only data\n" +" -R do not apply JobDefs to Job\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -s output in show text format\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n"), 2012, "", VERSION, BDATE); + + exit(1); +} + +typedef struct +{ + /* default { { "Director": { "Name": aa, ...} }, { "Job": {..} */ + bool do_list; /* [ {}, {}, ..] or { "aa": {}, "bb": {}, ...} */ + bool do_one; /* { "Name": "aa", "Description": "test, ... } */ + bool do_only_data; /* [ {}, {}, {}, ] */ + char *resource_type; + char *resource_name; + regex_t directive_reg; +} display_filter; + +/* Forward referenced subroutines */ +void terminate_dird(int sig); +static bool check_resources(bool config_test); +static void sendit(void *ua, const char *fmt, ...); +static void dump_json(display_filter *filter); + +/********************************************************************* + * + * Bacula Director conf to Json + * + */ +int main (int argc, char *argv[]) +{ + int ch; + bool test_config = false; + bool apply_jobdefs = true; + bool do_show_format = false; + display_filter filter; + memset(&filter, 0, sizeof(filter)); + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + if (init_crypto() != 0) { + Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); + } + + my_name_is(argc, argv, "bacula-dir"); + init_msg(NULL, NULL); + + while ((ch = getopt(argc, argv, "RCDc:d:stv?l:r:n:")) != -1) { + switch (ch) { + case 'R': + apply_jobdefs = false; + break; + + case 'D': + filter.do_only_data = true; + break; + + case 'l': + /* Might use something like -l '^(Name|Description)$' */ + filter.do_list = true; + if (regcomp(&filter.directive_reg, optarg, REG_EXTENDED) != 0) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, + _("Please use valid -l argument: %s\n"), optarg); + } + break; + + case 'r': + filter.resource_type = optarg; + break; + + case 'n': + filter.resource_name = optarg; + break; + + case 'c': /* specify config file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* set debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + Dmsg1(10, "Debug level = %d\n", debug_level); + break; + + case 's': /* Show text format */ + do_show_format = true; + break; + + case 't': /* test config */ + test_config = true; + break; + + case 'v': /* verbose */ + verbose++; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + + if (argc) { + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(*argv); + argc--; + argv++; + } + if (argc) { + usage(); + } + + if (filter.do_list && !filter.resource_type) { + usage(); + } + + if (filter.resource_type && filter.resource_name) { + filter.do_one = true; + } + + if (configfile == NULL || configfile[0] == 0) { + configfile = bstrdup(CONFIG_FILE); + } + + if (test_config && verbose > 0) { + char buf[1024]; + find_config_file(configfile, buf, sizeof(buf)); + sendit(NULL, "config_file=%s\n", buf); + } + + config = New(CONFIG()); + config->encode_password(false); + parse_dir_config(config, configfile, M_ERROR_TERM); + + /* TODO: If we run check_resources, jobdefs will be copied to Job, and the job resource + * will no longer be the real job... + */ + if (!check_resources(apply_jobdefs)) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); + } + + if (test_config) { + terminate_dird(0); + } + + my_name_is(0, NULL, director->name()); /* set user defined name */ + + if (do_show_format) { + /* Do show output in text */ + for (int i=r_first; i<=r_last; i++) { + dump_each_resource(i, sendit, NULL); + } + } else { + dump_json(&filter); + } + + if (filter.do_list) { + regfree(&filter.directive_reg); + } + + terminate_dird(0); + + return 0; +} + +/* Cleanup and then exit */ +void terminate_dird(int sig) +{ + static bool already_here = false; + + if (already_here) { /* avoid recursive temination problems */ + bmicrosleep(2, 0); /* yield */ + exit(1); + } + already_here = true; + debug_level = 0; /* turn off debug */ + if (configfile != NULL) { + free(configfile); + } + if (debug_level > 5) { + print_memory_pool_stats(); + } + if (config) { + delete config; + config = NULL; + } + term_msg(); + free(res_head); + res_head = NULL; + close_memory_pool(); /* release free memory in pool */ + //sm_dump(false); + exit(sig); +} + + +static void display_jobtype(HPKT &hpkt) +{ + int i; + for (i=0; jobtypes[i].type_name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == jobtypes[i].job_type) { + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + quote_string(hpkt.edbuf, jobtypes[i].type_name)); + return; + } + } +} + +static void display_label(HPKT &hpkt) +{ + int i; + for (i=0; tapelabels[i].name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == tapelabels[i].token) { + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + quote_string(hpkt.edbuf, tapelabels[i].name)); + return; + } + } +} + +static void display_joblevel(HPKT &hpkt) +{ + int i; + for (i=0; joblevels[i].level_name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == joblevels[i].level) { + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + quote_string(hpkt.edbuf, joblevels[i].level_name)); + return; + } + } +} + +static void display_replace(HPKT &hpkt) +{ + int i; + for (i=0; ReplaceOptions[i].name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == ReplaceOptions[i].token) { + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + quote_string(hpkt.edbuf, ReplaceOptions[i].name)); + return; + } + } +} + +static void display_migtype(HPKT &hpkt) +{ + int i; + for (i=0; migtypes[i].type_name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == migtypes[i].job_type) { + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + quote_string(hpkt.edbuf, migtypes[i].type_name)); + return; + } + } +} + +static void display_actiononpurge(HPKT &hpkt) +{ + sendit(NULL, "\n \"%s\":", hpkt.ritem->name); + if (*(uint32_t *)(hpkt.ritem->value) | ON_PURGE_TRUNCATE) { + sendit(NULL, "\"Truncate\""); + } else { + sendit(NULL, "null"); + } +} + +static void display_acl(HPKT &hpkt) +{ + sendit(NULL, "\n \"%s\":", hpkt.ritem->name); + hpkt.list = ((alist **)hpkt.ritem->value)[hpkt.ritem->code]; + display_alist(hpkt); +} + + +static void display_options(HPKT &hpkt, INCEXE *ie) +{ + char *elt; + bool first_opt = true; + bool first_dir; + int i, j, k; + alist *list; + + sendit(NULL, " \"Options\": [ \n {\n"); + for (i=0; inum_opts; i++) { + FOPTS *fo = ie->opts_list[i]; + if (!first_opt) { + sendit(NULL, ",\n {\n"); + } + first_dir = true; + for (j=0; options_items[j].name; j++) { + if (options_items[j].handler == store_regex) { + switch (options_items[j].code) { + case 1: /* RegexDir */ + list = &fo->regexdir; + break; + case 2: /* RegexFile */ + list = &fo->regexfile; + break; + default: + list = &fo->regex; + break; + } + if (list->size() > 0) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\":", options_items[j].name); + hpkt.list = list; + display_alist(hpkt); + first_dir = false; + first_opt = false; + } + } else if (options_items[j].handler == store_wild) { + switch (options_items[j].code) { + case 1: /* WildDir */ + list = &fo->wilddir; + break; + case 2: /* WildFile */ + /* + * Note: There used to be an enhanced wild card feature, + * which was not documented so it is removed, and + * apparently the wildfile patterns are stored in the + * wildbase list, so we dump it here. + * The old enhanced wild implementation appears to be poorly + * done, because either there should be two clearly named + * lists, or one with everything. + */ + /* We copy one list to the other, else we may print two + * times the WildFile list. I don't know how, but sometime + * the two lists contain elements. + */ + list = &fo->wildfile; + foreach_alist(elt, list) { + fo->wildbase.append(bstrdup(elt)); + } + list = &fo->wildbase; + break; + default: + list = &fo->wild; + break; + } + if (list->size() > 0) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\":", options_items[j].name); + hpkt.list = list; + display_alist(hpkt); + first_dir = false; + first_opt = false; + } + } else if (options_items[j].handler == store_base) { + list = &fo->base; + if (list->size() > 0) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\":", options_items[j].name); + hpkt.list = list; + display_alist(hpkt); + first_dir = false; + first_opt = false; + } + } else if (options_items[j].handler == store_opts) { + bool found = false; + if (bit_is_set(options_items[j].flags, ie->opt_present)) { + for (k=0; FS_options[k].name; k++) { + if (FS_options[k].keyword == (int)options_items[j].flags) { + char lopts[100]; + strip_long_opts(lopts, fo->opts); + if (strstr(lopts, FS_options[k].option)) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\": %s", options_items[j].name, + quote_string(hpkt.edbuf, FS_options[k].name)); + found = true; + break; + } + } + } + if (found) { + first_dir = false; + first_opt = false; + } + } + } else if (options_items[j].handler == store_lopts) { + bool found = false; + if (bit_is_set(options_items[j].flags, ie->opt_present)) { + char *pos; + /* Search long_options for code (V, J, C, P) */ + if ((pos=strchr(fo->opts, options_items[j].code))) { + char lopts[100]; + char *end, bkp; + pos++; /* point to beginning of options */ + bstrncpy(lopts, pos, sizeof(lopts)); + /* Now terminate at first : */ + end = strchr(pos, ':'); + if (end) { + bkp = *end; /* save the original char */ + *end = 0; /* terminate this string */ + } + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\": %s", options_items[j].name, + quote_string(hpkt.edbuf, pos)); + found = true; + if (end) { /* Still have other options to parse */ + *end = bkp; + } + } + if (found) { + first_dir = false; + first_opt = false; + } + } + } else if (options_items[j].handler == store_plugin) { + if (fo->plugin) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\": %s", options_items[j].name, + quote_string(hpkt.edbuf, fo->plugin)); + first_dir = false; + first_opt = false; + } + } else if (options_items[j].handler == store_fstype) { + list = &fo->fstype; + if (list->size() > 0) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\":", options_items[j].name); + hpkt.list = list; + display_alist(hpkt); + first_dir = false; + first_opt = false; + } + } else if (options_items[j].handler == store_drivetype) { + list = &fo->drivetype; + if (list->size() > 0) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\":", options_items[j].name); + hpkt.list = list; + display_alist(hpkt); + first_dir = false; + first_opt = false; + } + } + } + sendit(NULL, "\n }"); + } + sendit(NULL, "\n ]"); +} + +/* + * Include or Exclude in a FileSet + * TODO: Not working with multiple Include{} + * O M + * N + * I /tmp/regress/build + * N + * O Z1 + * N + * I /tmp + * N + */ +static void display_include_exclude(HPKT &hpkt) +{ + bool first_dir; + int i, j; + FILESET *fs = (FILESET *)hpkt.res; + + if (hpkt.ritem->code == 0) { /* Include */ + INCEXE *ie; + sendit(NULL, "\n \"%s\": [{\n", hpkt.ritem->name); + for (j=0; jnum_includes; j++) { + if (j > 0) { + sendit(NULL, ",\n {\n"); + } + first_dir = true; + ie = fs->include_items[j]; + for (i=0; newinc_items[i].name; i++) { + if (strcasecmp(newinc_items[i].name, "File") == 0) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\":", newinc_items[i].name); + first_dir = false; + hpkt.list = &ie->name_list; + display_alist(hpkt); + } if (strcasecmp(newinc_items[i].name, "Plugin") == 0 && + ie->plugin_list.size() > 0) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\":", newinc_items[i].name); + first_dir = false; + hpkt.list = &ie->plugin_list; + display_alist(hpkt); + } if (strcasecmp(newinc_items[i].name, "Options") == 0 && + ie->num_opts > 0) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + display_options(hpkt, ie); + } if (strcasecmp(newinc_items[i].name, "ExcludeDirContaining") == 0 && + ie->ignoredir) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\": %s ", newinc_items[i].name, + quote_string(hpkt.edbuf, ie->ignoredir)); + first_dir = false; + } + } + sendit(NULL, "\n }"); + } + sendit(NULL, "]"); + } else { + /* Exclude */ + sendit(NULL, "\n \"%s\": {\n", hpkt.ritem->name); + first_dir = true; + for (int i=0; newinc_items[i].name; i++) { + INCEXE *ie; + if (strcasecmp(newinc_items[i].name, "File") == 0) { + if (!first_dir) { + sendit(NULL, ",\n"); + } + sendit(NULL, " \"%s\": ", newinc_items[i].name); + first_dir = false; + ie = fs->exclude_items[0]; + hpkt.list = &ie->name_list; + display_alist(hpkt); + } + } + sendit(NULL, "\n }"); + } +} + +static bool display_runscript(HPKT &hpkt) +{ + RUNSCRIPT *script; + RUNSCRIPT *def = new_runscript(); + alist **runscripts = (alist **)(hpkt.ritem->value) ; + bool first=true; + + if (!*runscripts || (*runscripts)->size() == 0) { + return false; + } + + sendit(NULL, "\n \"Runscript\": [\n"); + + foreach_alist(script, *runscripts) { + if (first) { + sendit(NULL, " {\n"); + } else { + sendit(NULL, ",\n {\n"); + } + if (script->when == SCRIPT_Any) { + sendit(NULL, " \"RunsWhen\": \"Any\",\n"); + + } else if (script->when == SCRIPT_After) { + sendit(NULL, " \"RunsWhen\": \"After\",\n"); + + } else if (script->when == SCRIPT_Before) { + sendit(NULL, " \"RunsWhen\": \"Before\",\n"); + + } else if (script->when == SCRIPT_AfterVSS) { + sendit(NULL, " \"RunsWhen\": \"AfterVSS\",\n"); + } + + if (script->fail_on_error != def->fail_on_error) { + sendit(NULL, " \"FailJobOnError\": %s,\n", script->fail_on_error?"true":"false"); + } + + if (script->on_success != def->on_success) { + sendit(NULL, " \"RunsOnSuccess\": %s,\n", script->on_success?"true":"false"); + } + + if (script->on_failure != def->on_failure) { + sendit(NULL, " \"RunsOnFailure\": %s,\n", script->on_failure?"true":"false"); + } + + if (script->is_local()) { + sendit(NULL, " \"RunsOnClient\": false,\n"); + } + + if (script->command) { + sendit(NULL, " \"%s\": %s\n", + (script->cmd_type == SHELL_CMD)?"Command":"Console", + quote_string(hpkt.edbuf, script->command)); + } + sendit(NULL, " }"); + first = false; + } + + sendit(NULL, "\n ]\n"); + free_runscript(def); + return true; +} + +static void display_run(HPKT &hpkt) +{ + int i, j; + RUN **prun = (RUN **)hpkt.ritem->value; + RUN *run = *prun; + bool first = true; + bool first_run = true; + RES *res; + + sendit(NULL, "\n \"%s\": [\n", hpkt.ritem->name); + for ( ; run; run=run->next) { + if (!first_run) sendit(NULL, ",\n"); + first_run = false; + first = true; + sendit(NULL, " {\n"); + /* First do override fields */ + for (i=0; RunFields[i].name; i++) { + switch (RunFields[i].token) { + case 'f': /* FullPool */ + if (run->full_pool) { + res = (RES *)run->full_pool; + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + quote_string(hpkt.edbuf, res->name)); + first = false; + } + break; + case 'i': /* IncrementalPool */ + if (run->inc_pool) { + res = (RES *)run->inc_pool; + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + quote_string(hpkt.edbuf, res->name)); + first = false; + } + break; + case 'd': /* Differential Pool */ + if (run->diff_pool) { + res = (RES *)run->diff_pool; + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + quote_string(hpkt.edbuf, res->name)); + first = false; + } + break; + case 'N': /* Next Pool */ + if (run->next_pool) { + res = (RES *)run->next_pool; + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + quote_string(hpkt.edbuf, res->name)); + first = false; + } + break; + case 'L': /* Level */ + /* TODO: It's not always set, only when having Level= in the line */ + //if (run->level_set) { + for (j=0; joblevels[j].level_name; j++) { + if ((int)run->level == joblevels[j].level) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": \"%s\"", RunFields[i].name, + joblevels[j].level_name); + first = false; + break; + } + } + //} + break; + case 'P': /* Pool */ + if (run->pool) { + res = (RES *)run->pool; + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + quote_string(hpkt.edbuf, res->name)); + first = false; + } + break; + case 'S': /* Storage */ + if (run->storage) { + res = (RES *)run->storage; + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + quote_string(hpkt.edbuf, res->name)); + first = false; + } + break; + case 'M': /* Messages */ + if (run->msgs) { + res = (RES *)run->msgs; + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + quote_string(hpkt.edbuf, res->name)); + first = false; + } + break; + case 'p': /* priority */ + if (run->priority_set) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %d", RunFields[i].name, + run->Priority); + first = false; + } + break; + case 's': /* Spool Data */ + if (run->spool_data_set) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + run->spool_data?"true":"false"); + first = false; + } + break; + case 'W': /* Write Part After Job */ + if (run->write_part_after_job_set) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + run->write_part_after_job?"true":"false"); + first = false; + } + break; + case 'm': /* MaxRunScheduledTime */ + if (run->MaxRunSchedTime_set) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %lld", RunFields[i].name, + run->MaxRunSchedTime); + first = false; + } + break; + case 'a': /* Accurate */ + if (run->accurate_set) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"%s\": %s", RunFields[i].name, + run->accurate?"true":"false"); + first = false; + } + break; + default: + break; + } + } /* End all RunFields (overrides) */ + /* Now handle timing */ + if (byte_is_set(run->hour, sizeof(run->hour))) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"Hour\":"); + display_bit_array(run->hour, 24); + sendit(NULL, ",\n \"Minute\": %d", run->minute); + first = false; + } + /* bit 32 is used to store the keyword LastDay, so we look up to 0-31 */ + if (byte_is_set(run->mday, sizeof(run->mday))) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"Day\":"); + display_bit_array(run->mday, 31); + first = false; + } + if (run->last_day_set) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"LastDay\": 1"); + first = false; + } + if (byte_is_set(run->month, sizeof(run->month))) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"Month\":"); + display_bit_array(run->month, 12); + first = false; + } + if (byte_is_set(run->wday, sizeof(run->wday))) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"DayOfWeek\":"); + display_bit_array(run->wday, 7); + first = false; + } + if (byte_is_set(run->wom, sizeof(run->wom))) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"WeekOfMonth\":"); + display_bit_array(run->wom, 6); + first = false; + } + if (byte_is_set(run->woy, sizeof(run->woy))) { + if (!first) sendit(NULL, ",\n"); + sendit(NULL, " \"WeekOfYear\":"); + display_bit_array(run->woy, 54); + first = false; + } + sendit(NULL, "\n }"); + + } /* End this Run directive */ + sendit(NULL, "\n ]"); +} + +/* + * Dump out all resources in json format. + * Note!!!! This routine must be in this file rather + * than in src/lib/parser_conf.c otherwise the pointers + * will be all messed up. + */ +static void dump_json(display_filter *filter) +{ + int resinx, item, first_directive, name_pos=0; + bool first_res; + RES_ITEM *items; + RES *res; + HPKT hpkt; + regmatch_t pmatch[32]; + + init_hpkt(hpkt); + + /* List resources and directives */ + if (filter->do_only_data) { + /* Skip the Name */ + sendit(NULL, "["); + + /* + * { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } + * or print a single item + */ + } else if (filter->do_one || filter->do_list) { + sendit(NULL, "{"); + + } else { + /* [ { "Client": { "Name": "aa",.. } }, { "Director": { "Name": "bb", ... } } ]*/ + sendit(NULL, "["); + } + + first_res = true; + /* Main loop over all resources */ + for (resinx=0; resources[resinx].items; resinx++) { + + /* Skip this resource type? */ + if (filter->resource_type && + strcasecmp(filter->resource_type, resources[resinx].name) != 0) { + continue; + } + + /* Loop over each resource of this type */ + foreach_rblist(res, res_head[resinx]->res_list) { + hpkt.res = res; + items = resources[resinx].items; + if (!items) { + continue; + } + + /* Copy the resource into res_all */ + memcpy(&res_all, res, sizeof(res_all)); + + /* If needed, skip this resource type */ + if (filter->resource_name) { + bool skip=true; + /* The Name should be at the first place, so this is not a real loop */ + for (item=0; items[item].name; item++) { + if (strcasecmp(items[item].name, "Name") == 0) { + if (strcmp(*(items[item].value), filter->resource_name) == 0) { + skip = false; + } + break; + } + } + if (skip) { /* The name doesn't match, so skip it */ + continue; + } + } + + if (first_res) { + sendit(NULL, "\n"); + } else { + sendit(NULL, ",\n"); + } + + /* Find where the Name is defined, should always be 0 */ + for (item=0; items[item].name; item++) { + if (strcmp(items[item].name, "Name") == 0) { + name_pos = item; + break; + } + } + + if (filter->do_only_data) { + sendit(NULL, " {"); + + } else if (filter->do_one) { + /* Nothing to print */ + + /* When sending the list, the form is: + * { aa: { Name: aa, Description: aadesc...}, bb: { Name: bb + */ + } else if (filter->do_list) { + /* Search and display Name, should be the first item */ + for (item=0; items[item].name; item++) { + if (strcmp(items[item].name, "Name") == 0) { + sendit(NULL, "%s: {\n", quote_string(hpkt.edbuf2, *items[item].value)); + break; + } + } + } else { + /* Begin new resource */ + sendit(NULL, "{\n \"%s\": {", resources[resinx].name); + } + + first_res = false; + first_directive = 0; + + /* + * Here we walk through a resource displaying all the + * directives and sub-resources in the resource. + */ + for (item=0; items[item].name; item++) { + /* Check user argument -l */ + if (filter->do_list && + regexec(&filter->directive_reg, + items[item].name, 32, pmatch, 0) != 0) + { + continue; + } + + hpkt.ritem = &items[item]; + + if (bit_is_set(item, res_all.hdr.item_present)) { + + /* Skip Directive in lowercase, but check if the next + * one is pointing to the same location (for example User and dbuser) + */ + if (!B_ISUPPER(*(items[item].name))) { + int i=item+1; + while(!B_ISUPPER(*(items[i].name)) && items[i].value == items[item].value) { + i++; + } + if (items[i].value == items[item].value) { + set_bit(i, res_all.hdr.item_present); + } + continue; + } + + if (first_directive++ > 0) sendit(NULL, ","); + if (display_global_item(hpkt)) { + /* Fall-through wanted */ + } else if (items[item].handler == store_jobtype) { + display_jobtype(hpkt); + } else if (items[item].handler == store_label) { + display_label(hpkt); + } else if (items[item].handler == store_level) { + display_joblevel(hpkt); + } else if (items[item].handler == store_replace) { + display_replace(hpkt); + } else if (items[item].handler == store_migtype) { + display_migtype(hpkt); + } else if (items[item].handler == store_actiononpurge) { + display_actiononpurge(hpkt); + /* FileSet Include/Exclude directive */ + } else if (items[item].handler == store_inc) { + display_include_exclude(hpkt); + } else if (items[item].handler == store_ac_res) { + display_res(hpkt); + /* A different alist for each item.code */ + } else if (items[item].handler == store_acl) { + display_acl(hpkt); + } else if (items[item].handler == store_device) { + display_alist_res(hpkt); + } else if (items[item].handler == store_run) { + display_run(hpkt); + } else if (items[item].handler == store_runscript) { + if (!display_runscript(hpkt)) { + first_directive = 0; /* Do not print a comma after this empty runscript */ + } + } else { + sendit(NULL, "\n \"%s\": null", items[item].name); + } + } else { /* end if is present */ + /* For some directive, the bitmap is not set (like addresses) */ + /* Special trick for the Autochanger directive, it can be yes/no/storage */ + if (strcmp(resources[resinx].name, "Storage") == 0) { + if (strcasecmp(items[item].name, "Autochanger") == 0 + && items[item].handler == store_bool /* yes or no */ + && *(bool *)(items[item].value) == true) + { + if (first_directive++ > 0) sendit(NULL, ","); + if (*(items[item-1].value) == NULL) { + sendit(NULL, "\n \"Autochanger\": %s", quote_string(hpkt.edbuf2, *items[name_pos].value)); + } else { + STORE *r = (STORE *)*(items[item-1].value); + sendit(NULL, "\n \"Autochanger\": %s", quote_string(hpkt.edbuf2, r->name())); + } + } + } + + if (strcmp(resources[resinx].name, "Director") == 0) { + if (strcmp(items[item].name, "DirPort") == 0) { + if (get_first_port_host_order(director->DIRaddrs) != items[item].default_value) { + if (first_directive++ > 0) sendit(NULL, ","); + sendit(NULL, "\n \"DirPort\": %d", + get_first_port_host_order(director->DIRaddrs)); + } + + } else if (strcmp(items[item].name, "DirAddress") == 0) { + char buf[500]; + get_first_address(director->DIRaddrs, buf, sizeof(buf)); + if (strcmp(buf, "0.0.0.0") != 0) { + if (first_directive++ > 0) sendit(NULL, ","); + sendit(NULL, "\n \"DirAddress\": \"%s\"", buf); + } + + } else if (strcmp(items[item].name, "DirSourceAddress") == 0 && director->DIRsrc_addr) { + char buf[500]; + get_first_address(director->DIRsrc_addr, buf, sizeof(buf)); + if (strcmp(buf, "0.0.0.0") != 0) { + if (first_directive++ > 0) sendit(NULL, ","); + sendit(NULL, "\n \"DirSourceAddress\": \"%s\"", buf); + } + } + } + } + if (items[item].flags & ITEM_LAST) { + display_last(hpkt); /* If last bit set always call to cleanup */ + } + } /* loop over directive names */ + + /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } */ + if (filter->do_only_data || filter->do_list) { + sendit(NULL, "\n }"); /* Finish the Resource with a single } */ + + } else { + if (filter->do_one) { + /* don't print anything */ + + } else if (first_directive > 0) { + sendit(NULL, "\n }\n}"); /* end of resource */ + + } else { + sendit(NULL, "}\n}"); + } + } + } /* End loop over all resources of this type */ + } /* End loop all resource types */ + + if (filter->do_only_data) { + sendit(NULL, "\n]\n"); + + /* In list context, we are dealing with a hash */ + } else if (filter->do_one || filter->do_list) { + sendit(NULL, "\n}\n"); + + } else { + sendit(NULL, "\n]\n"); + } + term_hpkt(hpkt); +} + +/* + * Make a quick check to see that we have all the + * resources needed. + * + * **** FIXME **** this routine could be a lot more + * intelligent and comprehensive. + */ +static bool check_resources(bool apply_jobdefs) +{ + bool OK = true; + JOB *job; + bool need_tls; + + LockRes(); + + job = (JOB *)GetNextRes(R_JOB, NULL); + director = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); + if (!director) { + Jmsg(NULL, M_FATAL, 0, _("No Director resource defined in %s\n" +"Without that I don't know who I am :-(\n"), configfile); + OK = false; + } else { + set_working_directory(director->working_directory); + if (!director->messages) { /* If message resource not specified */ + director->messages = (MSGS *)GetNextRes(R_MSGS, NULL); + if (!director->messages) { + Jmsg(NULL, M_FATAL, 0, _("No Messages resource defined in %s\n"), configfile); + OK = false; + } + } + if (GetNextRes(R_DIRECTOR, (RES *)director) != NULL) { + Jmsg(NULL, M_FATAL, 0, _("Only one Director resource permitted in %s\n"), + configfile); + OK = false; + } + /* tls_require implies tls_enable */ + if (director->tls_require) { + if (have_tls) { + director->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + } + } + + need_tls = director->tls_enable || director->tls_authenticate; + + if (!director->tls_certfile && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n"), + director->name(), configfile); + OK = false; + } + + if (!director->tls_keyfile && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Director \"%s\" in %s.\n"), + director->name(), configfile); + OK = false; + } + + if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && + need_tls && director->tls_verify_peer) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\" or \"TLS CA" + " Certificate Dir\" are defined for Director \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + director->name(), configfile); + OK = false; + } + } + + /* Loop over Consoles */ + CONRES *cons; + foreach_res(cons, R_CONSOLE) { + /* tls_require implies tls_enable */ + if (cons->tls_require) { + if (have_tls) { + cons->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + + need_tls = cons->tls_enable || cons->tls_authenticate; + + if (!cons->tls_certfile && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n"), + cons->name(), configfile); + OK = false; + } + + if (!cons->tls_keyfile && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Console \"%s\" in %s.\n"), + cons->name(), configfile); + OK = false; + } + + if ((!cons->tls_ca_certfile && !cons->tls_ca_certdir) + && need_tls && cons->tls_verify_peer) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\" or \"TLS CA" + " Certificate Dir\" are defined for Console \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + cons->name(), configfile); + OK = false; + } + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (need_tls || cons->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + cons->tls_ctx = new_tls_context(cons->tls_ca_certfile, + cons->tls_ca_certdir, cons->tls_certfile, + cons->tls_keyfile, NULL, NULL, cons->tls_dhfile, cons->tls_verify_peer); + + if (!cons->tls_ctx) { + Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS context for Console \"%s\" in %s.\n"), + cons->name(), configfile); + OK = false; + } + } + + } + + /* Loop over Clients */ + CLIENT *client; + foreach_res(client, R_CLIENT) { + /* tls_require implies tls_enable */ + if (client->tls_require) { + if (have_tls) { + client->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + need_tls = client->tls_enable || client->tls_authenticate; + if ((!client->tls_ca_certfile && !client->tls_ca_certdir) && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for File daemon \"%s\" in %s.\n"), + client->name(), configfile); + OK = false; + } + + } + + if (!job) { + Jmsg(NULL, M_FATAL, 0, _("No Job records defined in %s\n"), configfile); + OK = false; + } + + /* TODO: We can't really update all job, we need to show only the real configuration + * and not Job+JobDefs + */ + if (!apply_jobdefs) { + UnlockRes(); + return OK; + } + + foreach_res(job, R_JOB) { + int i; + + if (job->jobdefs) { + JOB *jobdefs = job->jobdefs; + /* Handle RunScripts alists specifically */ + if (jobdefs->RunScripts) { + RUNSCRIPT *rs, *elt; + + if (!job->RunScripts) { + job->RunScripts = New(alist(10, not_owned_by_alist)); + } + + foreach_alist(rs, jobdefs->RunScripts) { + elt = copy_runscript(rs); + job->RunScripts->append(elt); /* we have to free it */ + } + } + + /* Transfer default items from JobDefs Resource */ + for (i=0; job_items[i].name; i++) { + char **def_svalue, **svalue; /* string value */ + uint32_t *def_ivalue, *ivalue; /* integer value */ + bool *def_bvalue, *bvalue; /* bool value */ + int64_t *def_lvalue, *lvalue; /* 64 bit values */ + uint32_t offset; + alist **def_avalue, **avalue; /* alist value */ + + Dmsg4(1400, "Job \"%s\", field \"%s\" bit=%d def=%d\n", + job->name(), job_items[i].name, + bit_is_set(i, job->hdr.item_present), + bit_is_set(i, job->jobdefs->hdr.item_present)); + + if (!bit_is_set(i, job->hdr.item_present) && + bit_is_set(i, job->jobdefs->hdr.item_present)) { + Dmsg2(400, "Job \"%s\", field \"%s\": getting default.\n", + job->name(), job_items[i].name); + offset = (char *)(job_items[i].value) - (char *)&res_all; + /* + * Handle strings and directory strings + */ + if (job_items[i].handler == store_str || + job_items[i].handler == store_dir) { + def_svalue = (char **)((char *)(job->jobdefs) + offset); + Dmsg5(400, "Job \"%s\", field \"%s\" def_svalue=%s item %d offset=%u\n", + job->name(), job_items[i].name, *def_svalue, i, offset); + svalue = (char **)((char *)job + offset); + if (*svalue) { + Pmsg1(000, _("Hey something is wrong. p=0x%lu\n"), *svalue); + } + *svalue = bstrdup(*def_svalue); + set_bit(i, job->hdr.item_present); + /* + * Handle resources + */ + } else if (job_items[i].handler == store_res) { + def_svalue = (char **)((char *)(job->jobdefs) + offset); + Dmsg4(400, "Job \"%s\", field \"%s\" item %d offset=%u\n", + job->name(), job_items[i].name, i, offset); + svalue = (char **)((char *)job + offset); + if (*svalue) { + Pmsg1(000, _("Hey something is wrong. p=0x%lu\n"), *svalue); + } + *svalue = *def_svalue; + set_bit(i, job->hdr.item_present); + /* + * Handle alist resources + */ + } else if (job_items[i].handler == store_alist_str) { + char *elt; + + def_avalue = (alist **)((char *)(job->jobdefs) + offset); + avalue = (alist **)((char *)job + offset); + + *avalue = New(alist(10, owned_by_alist)); + + foreach_alist(elt, (*def_avalue)) { + (*avalue)->append(bstrdup(elt)); + } + set_bit(i, job->hdr.item_present); + + } else if (job_items[i].handler == store_alist_res) { + void *elt; + + def_avalue = (alist **)((char *)(job->jobdefs) + offset); + avalue = (alist **)((char *)job + offset); + + *avalue = New(alist(10, not_owned_by_alist)); + + foreach_alist(elt, (*def_avalue)) { + (*avalue)->append(elt); + } + set_bit(i, job->hdr.item_present); + /* + * Handle integer fields + * Note, our store_bit does not handle bitmaped fields + */ + } else if (job_items[i].handler == store_bit || + job_items[i].handler == store_pint32 || + job_items[i].handler == store_jobtype || + job_items[i].handler == store_level || + job_items[i].handler == store_int32 || + job_items[i].handler == store_size32 || + job_items[i].handler == store_migtype || + job_items[i].handler == store_replace) { + def_ivalue = (uint32_t *)((char *)(job->jobdefs) + offset); + Dmsg5(400, "Job \"%s\", field \"%s\" def_ivalue=%d item %d offset=%u\n", + job->name(), job_items[i].name, *def_ivalue, i, offset); + ivalue = (uint32_t *)((char *)job + offset); + *ivalue = *def_ivalue; + set_bit(i, job->hdr.item_present); + /* + * Handle 64 bit integer fields + */ + } else if (job_items[i].handler == store_time || + job_items[i].handler == store_size64 || + job_items[i].handler == store_int64) { + def_lvalue = (int64_t *)((char *)(job->jobdefs) + offset); + Dmsg5(400, "Job \"%s\", field \"%s\" def_lvalue=%" lld " item %d offset=%u\n", + job->name(), job_items[i].name, *def_lvalue, i, offset); + lvalue = (int64_t *)((char *)job + offset); + *lvalue = *def_lvalue; + set_bit(i, job->hdr.item_present); + /* + * Handle bool fields + */ + } else if (job_items[i].handler == store_bool) { + def_bvalue = (bool *)((char *)(job->jobdefs) + offset); + Dmsg5(400, "Job \"%s\", field \"%s\" def_bvalue=%d item %d offset=%u\n", + job->name(), job_items[i].name, *def_bvalue, i, offset); + bvalue = (bool *)((char *)job + offset); + *bvalue = *def_bvalue; + set_bit(i, job->hdr.item_present); + + } else { + Dmsg1(10, "Handler missing for job_items[%d]\n", i); + ASSERTD(0, "JobDefs -> Job handler missing\n"); + } + } + } + } + /* + * Ensure that all required items are present + */ + for (i=0; job_items[i].name; i++) { + if (job_items[i].flags & ITEM_REQUIRED) { + if (!bit_is_set(i, job->hdr.item_present)) { + Jmsg(NULL, M_ERROR_TERM, 0, _("\"%s\" directive in Job \"%s\" resource is required, but not found.\n"), + job_items[i].name, job->name()); + OK = false; + } + } + /* If this triggers, take a look at lib/parse_conf.h */ + if (i >= MAX_RES_ITEMS) { + Emsg0(M_ERROR_TERM, 0, _("Too many items in Job resource\n")); + } + } + if (!job->storage && !job->pool->storage) { + Jmsg(NULL, M_FATAL, 0, _("No storage specified in Job \"%s\" nor in Pool.\n"), + job->name()); + OK = false; + } + } /* End loop over Job res */ + + UnlockRes(); + return OK; +} + +static void sendit(void *sock, const char *fmt, ...) +{ + char buf[3000]; + va_list arg_ptr; + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); + va_end(arg_ptr); + fputs(buf, stdout); + fflush(stdout); +} diff --git a/src/dird/bsr.c b/src/dird/bsr.c new file mode 100644 index 00000000..aeb136aa --- /dev/null +++ b/src/dird/bsr.c @@ -0,0 +1,761 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- Bootstrap Record routines. + * + * BSR (bootstrap record) handling routines split from + * ua_restore.c July MMIII + * + * Kern Sibbald, July MMII + * + */ + +#include "bacula.h" +#include "dird.h" + +/* Forward referenced functions */ +static uint32_t write_bsr(UAContext *ua, RESTORE_CTX &rx, FILE *fd); + +/* + * Create new FileIndex entry for BSR + */ +RBSR_FINDEX *new_findex() +{ + RBSR_FINDEX *fi = (RBSR_FINDEX *)bmalloc(sizeof(RBSR_FINDEX)); + memset(fi, 0, sizeof(RBSR_FINDEX)); + return fi; +} + +/* + * Get storage device name from Storage resource + */ +static bool get_storage_device(char *device, char *storage) +{ + STORE *store; + if (storage[0] == 0) { + return false; + } + store = (STORE *)GetResWithName(R_STORAGE, storage); + if (!store) { + return false; + } + DEVICE *dev = (DEVICE *)(store->device->first()); + if (!dev) { + return false; + } + bstrncpy(device, dev->hdr.name, MAX_NAME_LENGTH); + return true; +} + +/* + * Our data structures were not designed completely + * correctly, so the file indexes cover the full + * range regardless of volume. The FirstIndex and LastIndex + * passed in here are for the current volume, so when + * writing out the fi, constrain them to those values. + * + * We are called here once for each JobMedia record + * for each Volume. + */ +static uint32_t write_findex(rblist *fi_list, + int32_t FirstIndex, int32_t LastIndex, FILE *fd) +{ + RBSR_FINDEX *fi; + uint32_t count = 0; + + fi = (RBSR_FINDEX *) fi_list->first(); + while (fi) { + int32_t findex, findex2; + + /* fi points to the first item of the list, or the next item that is not + * contigous to the previous group + */ + findex = fi->findex; + findex2 = fi->findex2; + + /* Sometime (with the restore command for example), the fi_list can + * contain false gaps (1-10, 11-11, 12-20 instead of 1-20). The for loop + * is here to merge blocks and reduce the bsr output. The next while(fi) + * iteration will use the next_fi that points to the last merged element. + */ + RBSR_FINDEX *next_fi; + for (next_fi = (RBSR_FINDEX*) fi_list->next(fi); + next_fi && next_fi->findex == (findex2+1); + next_fi = (RBSR_FINDEX *) fi_list->next(next_fi)) + { + findex2 = next_fi->findex2; + } + + /* next_fi points after the current block (or to the end of the list), so + * the next while() iteration will use the next value + */ + fi = next_fi; + + /* We look if the current FI block match the volume information */ + if ((findex >= FirstIndex && findex <= LastIndex) || + (findex2 >= FirstIndex && findex2 <= LastIndex) || + (findex < FirstIndex && findex2 > LastIndex)) { + + findex = findex < FirstIndex ? FirstIndex : findex; + findex2 = findex2 > LastIndex ? LastIndex : findex2; + + if (findex == findex2) { + fprintf(fd, "FileIndex=%d\n", findex); + count++; + } else { + fprintf(fd, "FileIndex=%d-%d\n", findex, findex2); + count += findex2 - findex + 1; + } + } + } + + return count; +} + +/* + * Find out if Volume defined with FirstIndex and LastIndex + * falls within the range of selected files in the bsr. + */ +static bool is_volume_selected(rblist *fi_list, + int32_t FirstIndex, int32_t LastIndex) +{ + RBSR_FINDEX *fi; + foreach_rblist(fi, fi_list) { + if ((fi->findex >= FirstIndex && fi->findex <= LastIndex) || + (fi->findex2 >= FirstIndex && fi->findex2 <= LastIndex) || + (fi->findex < FirstIndex && fi->findex2 > LastIndex)) { + return true; + } + } + return false; +} + + +/* Create a new bootstrap record */ +RBSR *new_bsr() +{ + RBSR_FINDEX *fi=NULL; + RBSR *bsr = (RBSR *)bmalloc(sizeof(RBSR)); + memset(bsr, 0, sizeof(RBSR)); + bsr->fi_list = New(rblist(fi, &fi->link)); + return bsr; +} + +/* Free the entire BSR */ +void free_bsr(rblist *bsr_list) +{ + RBSR *bsr; + foreach_rblist(bsr, bsr_list) { + delete bsr->fi_list; + if (bsr->VolParams) { + free(bsr->VolParams); + } + if (bsr->fileregex) { + free(bsr->fileregex); + } + if (bsr->m_fi) { + free(bsr->m_fi); + } + } + delete bsr_list; +} + +/* + * Complete the BSR by filling in the VolumeName and + * VolSessionId and VolSessionTime using the JobId + */ +bool complete_bsr(UAContext *ua, rblist *bsr_list) +{ + RBSR *bsr; + foreach_rblist(bsr, bsr_list) { + JOB_DBR jr; + memset(&jr, 0, sizeof(jr)); + jr.JobId = bsr->JobId; + if (!db_get_job_record(ua->jcr, ua->db, &jr)) { + ua->error_msg(_("Unable to get Job record. ERR=%s\n"), db_strerror(ua->db)); + return false; + } + bsr->VolSessionId = jr.VolSessionId; + bsr->VolSessionTime = jr.VolSessionTime; + if (jr.JobFiles == 0) { /* zero files is OK, not an error, but */ + bsr->VolCount = 0; /* there are no volumes */ + continue; + } + if ((bsr->VolCount=db_get_job_volume_parameters(ua->jcr, ua->db, bsr->JobId, + &(bsr->VolParams))) == 0) { + ua->error_msg(_("Unable to get Job Volume Parameters. ERR=%s\n"), db_strerror(ua->db)); + if (bsr->VolParams) { + free(bsr->VolParams); + bsr->VolParams = NULL; + } + return false; + } + } + return true; +} + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +static uint32_t uniq = 0; + +static void make_unique_restore_filename(UAContext *ua, POOL_MEM &fname) +{ + JCR *jcr = ua->jcr; + int i = find_arg_with_value(ua, "bootstrap"); + if (i >= 0) { + Mmsg(fname, "%s", ua->argv[i]); + jcr->unlink_bsr = false; + } else { + P(mutex); + uniq++; + V(mutex); + Mmsg(fname, "%s/%s.restore.%u.bsr", working_directory, my_name, uniq); + jcr->unlink_bsr = true; + } + if (jcr->RestoreBootstrap) { + free(jcr->RestoreBootstrap); + } + jcr->RestoreBootstrap = bstrdup(fname.c_str()); +} + +/* + * Write the bootstrap records to file + */ +uint32_t write_bsr_file(UAContext *ua, RESTORE_CTX &rx) +{ + FILE *fd; + POOL_MEM fname(PM_MESSAGE); + uint32_t count = 0;; + bool err; + + make_unique_restore_filename(ua, fname); + fd = bfopen(fname.c_str(), "w+b"); + if (!fd) { + berrno be; + ua->error_msg(_("Unable to create bootstrap file %s. ERR=%s\n"), + fname.c_str(), be.bstrerror()); + goto bail_out; + } + /* Write them to file */ + count = write_bsr(ua, rx, fd); + err = ferror(fd); + fclose(fd); + if (count == 0) { + ua->info_msg(_("No files found to read. No bootstrap file written.\n")); + goto bail_out; + } + if (err) { + ua->error_msg(_("Error writing bsr file.\n")); + count = 0; + goto bail_out; + } + + if (chk_dbglvl(10)) { + print_bsr(ua, rx); + } + +bail_out: + return count; +} + +static void display_vol_info(UAContext *ua, RESTORE_CTX &rx, JobId_t JobId) +{ + POOL_MEM volmsg(PM_MESSAGE); + char Device[MAX_NAME_LENGTH]; + char online; + RBSR *bsr; + + foreach_rblist(bsr, rx.bsr_list) { + if (JobId && JobId != bsr->JobId) { + continue; + } + + for (int i=0; i < bsr->VolCount; i++) { + if (bsr->VolParams[i].VolumeName[0]) { + if (!get_storage_device(Device, bsr->VolParams[i].Storage)) { + Device[0] = 0; + } + if (bsr->VolParams[i].InChanger && bsr->VolParams[i].Slot) { + online = '*'; + } else { + online = ' '; + } + Mmsg(volmsg, "%c%-25s %-25s %-25s", + online, bsr->VolParams[i].VolumeName, + bsr->VolParams[i].Storage, Device); + add_prompt(ua, volmsg.c_str()); + } + } + } +} + +void display_bsr_info(UAContext *ua, RESTORE_CTX &rx) +{ + char *p; + JobId_t JobId; + + /* Tell the user what he will need to mount */ + ua->send_msg("\n"); + ua->send_msg(_("The Job will require the following (*=>InChanger):\n" + " Volume(s) Storage(s) SD Device(s)\n" + "===========================================================================\n")); + /* Create Unique list of Volumes using prompt list */ + start_prompt(ua, ""); + if (*rx.JobIds == 0) { + /* Print Volumes in any order */ + display_vol_info(ua, rx, 0); + } else { + /* Ensure that the volumes are printed in JobId order */ + for (p=rx.JobIds; get_next_jobid_from_list(&p, &JobId) > 0; ) { + display_vol_info(ua, rx, JobId); + } + } + for (int i=0; i < ua->num_prompts; i++) { + ua->send_msg(" %s\n", ua->prompt[i]); + free(ua->prompt[i]); + if (ua->unique[i]) free(ua->unique[i]); + } + if (ua->num_prompts == 0) { + ua->send_msg(_("No Volumes found to restore.\n")); + } else { + ua->send_msg(_("\nVolumes marked with \"*\" are in the Autochanger.\n")); + } + ua->num_prompts = 0; + ua->send_msg("\n"); + + return; +} + +/* + * Write bsr data for a single bsr record + */ +static uint32_t write_bsr_item(RBSR *bsr, UAContext *ua, + RESTORE_CTX &rx, FILE *fd, bool &first, uint32_t &LastIndex) +{ + char ed1[50], ed2[50]; + uint32_t count = 0; + uint32_t total_count = 0; + char device[MAX_NAME_LENGTH]; + + /* + * For a given volume, loop over all the JobMedia records. + * VolCount is the number of JobMedia records. + */ + for (int i=0; i < bsr->VolCount; i++) { + if (!is_volume_selected(bsr->fi_list, bsr->VolParams[i].FirstIndex, + bsr->VolParams[i].LastIndex)) { + bsr->VolParams[i].VolumeName[0] = 0; /* zap VolumeName */ + continue; + } + if (!rx.store) { + find_storage_resource(ua, rx, bsr->VolParams[i].Storage, + bsr->VolParams[i].MediaType); + } + fprintf(fd, "Storage=\"%s\"\n", bsr->VolParams[i].Storage); + fprintf(fd, "Volume=\"%s\"\n", bsr->VolParams[i].VolumeName); + fprintf(fd, "MediaType=\"%s\"\n", bsr->VolParams[i].MediaType); + if (bsr->fileregex) { + fprintf(fd, "FileRegex=%s\n", bsr->fileregex); + } + if (get_storage_device(device, bsr->VolParams[i].Storage)) { + fprintf(fd, "Device=\"%s\"\n", device); + } + if (bsr->VolParams[i].Slot > 0) { + fprintf(fd, "Slot=%d\n", bsr->VolParams[i].Slot); + } + fprintf(fd, "VolSessionId=%u\n", bsr->VolSessionId); + fprintf(fd, "VolSessionTime=%u\n", bsr->VolSessionTime); + fprintf(fd, "VolAddr=%s-%s\n", edit_uint64(bsr->VolParams[i].StartAddr, ed1), + edit_uint64(bsr->VolParams[i].EndAddr, ed2)); + Dmsg2(100, "bsr VolParam FI=%u LI=%u\n", + bsr->VolParams[i].FirstIndex, bsr->VolParams[i].LastIndex); + + count = write_findex(bsr->fi_list, bsr->VolParams[i].FirstIndex, + bsr->VolParams[i].LastIndex, fd); + if (count) { + fprintf(fd, "Count=%u\n", count); + } + total_count += count; + /* If the same file is present on two tapes or in two files + * on a tape, it is a continuation, and should not be treated + * twice in the totals. + */ + if (!first && LastIndex == bsr->VolParams[i].FirstIndex) { + total_count--; + } + first = false; + LastIndex = bsr->VolParams[i].LastIndex; + } + return total_count; +} + + +/* + * Here we actually write out the details of the bsr file. + * Note, there is one bsr for each JobId, but the bsr may + * have multiple volumes, which have been entered in the + * order they were written. + * The bsrs must be written out in the order the JobIds + * are found in the jobid list. + */ +static uint32_t write_bsr(UAContext *ua, RESTORE_CTX &rx, FILE *fd) +{ + bool first = true; + uint32_t LastIndex = 0; + uint32_t total_count = 0; + char *p; + JobId_t JobId; + RBSR *bsr; + if (*rx.JobIds == 0) { + foreach_rblist(bsr, rx.bsr_list) { + total_count += write_bsr_item(bsr, ua, rx, fd, first, LastIndex); + } + return total_count; + } + for (p=rx.JobIds; get_next_jobid_from_list(&p, &JobId) > 0; ) { + foreach_rblist(bsr, rx.bsr_list) { + if (JobId == bsr->JobId) { + total_count += write_bsr_item(bsr, ua, rx, fd, first, LastIndex); + } + } + } + return total_count; +} + +void print_bsr(UAContext *ua, RESTORE_CTX &rx) +{ + write_bsr(ua, rx, stdout); +} + +static int search_rbsr(void *elt1, void *elt2) +{ + RBSR *bsr1 = (RBSR *)elt1; + RBSR *bsr = (RBSR *)elt2; + + /* We might replace by a simple JobId - JobId */ + if (bsr->JobId == bsr1->JobId) { + return 0; + + } else if (bsr->JobId < bsr1->JobId) { + return 1; + } + + return -1; +} + +static int search_fi(void *elt1, void *elt2) +{ + RBSR_FINDEX *f1 = (RBSR_FINDEX *) elt1; + RBSR_FINDEX *f2 = (RBSR_FINDEX *) elt2; + + if (f1->findex == (f2->findex - 1)) { + return 0; + + } else if (f1->findex2 == (f2->findex2 + 1)) { + return 0; + + } else if (f1->findex >= f2->findex && f1->findex2 <= f2->findex2) { + return 0; + } + + return (f1->findex > f2->findex) ? 1 : -1; +} + +rblist *create_bsr_list(uint32_t JobId, int findex, int findex2) +{ + RBSR *bsr = NULL; + RBSR_FINDEX *fi = NULL; + rblist *bsr_list = New(rblist(bsr, &bsr->link)); + + bsr = new_bsr(); + bsr->JobId = JobId; + + bsr_list->insert(bsr, search_rbsr); + + fi = new_findex(); + fi->findex = findex; + fi->findex2 = findex2; + + bsr->fi_list->insert(fi, search_fi); + + return bsr_list; +} + +/* + * Add a FileIndex to the list of BootStrap records. + * Here we are only dealing with JobId's and the FileIndexes + * associated with those JobIds. + * We expect that JobId, FileIndex are sorted ascending. + * + * When doing restore from tree, FileIndex are not sorted, so it can + * create gaps. + */ +void add_findex(rblist *bsr_list, uint32_t JobId, int32_t findex) +{ + RBSR *bsr, bsr2; + RBSR_FINDEX *fi, *nfi; + + if (findex == 0) { + return; /* probably a dummy directory */ + } + if (findex < 0) findex = -findex; + + bsr2.JobId = JobId; + /* Walk down list of bsrs until we find the JobId */ + bsr = (RBSR *)bsr_list->search(&bsr2, search_rbsr); + + /* The list is empty, or the JobId is not already in, + * Must add new JobId + */ + if (!bsr) { + bsr = new_bsr(); + bsr->JobId = JobId; + bsr_list->insert(bsr, search_rbsr); + } + + if (bsr->m_fi) { + fi = bsr->m_fi; + + } else { + fi = bsr->m_fi = new_findex(); + } + + fi->findex = findex; + fi->findex2 = findex; + + Dmsg1(1000, "Trying to insert %ld\n", findex); + /* try to insert our fi */ + nfi = (RBSR_FINDEX*) bsr->fi_list->insert((void *)fi, search_fi); + + /* We found an existing one, extend it */ + if (nfi != fi) { + if (findex == (nfi->findex2 + 1)) { + Dmsg2(1000, "Extend %ld-%ld\n", nfi->findex, findex); + nfi->findex2 = findex; + + } else if (findex == (nfi->findex - 1)) { + Dmsg2(1000, "Extend %ld-%ld\n", findex, nfi->findex2); + nfi->findex = findex; + + } else { + Dmsg2(1000, "Found the same values? %ld-%ld\n", nfi->findex, nfi->findex2); + } + + } else { + Dmsg2(1000, "Inserted %ld-%ld\n", fi->findex, fi->findex2); + bsr->m_fi = NULL; /* comsumed */ + } +} + +/* + * Add all possible FileIndexes to the list of BootStrap records. + * Here we are only dealing with JobId's and the FileIndexes + * associated with those JobIds. + */ +void add_findex_all(rblist *bsr_list, uint32_t JobId, const char *fileregex) +{ + RBSR *bsr, bsr2; + RBSR_FINDEX *fi; + + bsr2.JobId = JobId; + /* Walk down list of bsrs until we find the JobId */ + bsr = (RBSR *)bsr_list->search(&bsr2, search_rbsr); + + if (!bsr) { /* Must add new JobId */ + fi = new_findex(); + fi->findex = 1; + fi->findex2 = INT32_MAX; + + bsr = new_bsr(); + bsr->JobId = JobId; + bsr->fi_list->insert(fi, search_fi); + bsr_list->insert(bsr, search_rbsr); + + if (fileregex) { + /* If we use regexp to restore, set it for each jobid */ + bsr->fileregex = bstrdup(fileregex); + } + return; + } + + /* + * At this point, bsr points to bsr containing this JobId, + */ + fi = new_findex(); + fi->findex = 1; + fi->findex2 = INT32_MAX; + bsr->fi_list->insert(fi, search_fi); + return; +} + +#ifdef needed +/* Foreach files in currrent list, send "/path/fname\0LStat\0MD5\0Delta" to FD + * row[0]=Path, row[1]=Filename, row[2]=FileIndex + * row[3]=JobId row[4]=LStat row[5]=DeltaSeq row[6]=MD5 + */ +static int sendit(void *arg, int num_fields, char **row) +{ + JCR *jcr = (JCR *)arg; + + if (job_canceled(jcr)) { + return 1; + } + + if (row[2][0] == '0') { /* discard when file_index == 0 */ + return 0; + } + + /* sending with checksum */ + if (num_fields == 7 + && row[6][0] /* skip checksum = '0' */ + && row[6][1]) + { + jcr->file_bsock->fsend("%s%s%c%s%c%s%c%s", + row[0], row[1], 0, row[4], 0, row[6], 0, row[5]); + } else { + jcr->file_bsock->fsend("%s%s%c%s%c%c%s", + row[0], row[1], 0, row[4], 0, 0, row[5]); + } + return 0; +} +#endif + +/* We list all files for a given FI structure */ +static void scan_findex(JCR *jcr, RBSR *bsr, + int32_t FirstIndex, int32_t LastIndex, + int32_t &lastFileIndex, uint32_t &lastJobId) +{ + RBSR_FINDEX *fi; + FILE_DBR fdbr; + memset(&fdbr, 0, sizeof(fdbr)); + + fi = (RBSR_FINDEX *) bsr->fi_list->first(); + while (fi) { + int32_t findex, findex2; + + /* fi points to the first item of the list, or the next item that is not + * contigous to the previous group + */ + findex = fi->findex; + findex2 = fi->findex2; + + /* Sometime (with the restore command for example), the fi_list can + * contain false gaps (1-10, 11-11, 12-20 instead of 1-20). The for loop + * is here to merge blocks and reduce the bsr output. The next while(fi) + * iteration will use the next_fi that points to the last merged element. + */ + RBSR_FINDEX *next_fi; + for (next_fi = (RBSR_FINDEX*) bsr->fi_list->next(fi); + next_fi && next_fi->findex == (findex2+1); + next_fi = (RBSR_FINDEX *) bsr->fi_list->next(next_fi)) + { + findex2 = next_fi->findex2; + } + + /* next_fi points after the current block (or to the end of the list), so + * the next while() iteration will use the next value + */ + fi = next_fi; + + /* We look if the current FI block match the volume information */ + if ((findex >= FirstIndex && findex <= LastIndex) || + (findex2 >= FirstIndex && findex2 <= LastIndex) || + (findex < FirstIndex && findex2 > LastIndex)) { + + findex = findex < FirstIndex ? FirstIndex : findex; + findex2 = findex2 > LastIndex ? LastIndex : findex2; + + bool dolist=false; + /* Display only new files */ + if (findex != lastFileIndex || bsr->JobId != lastJobId) { + /* Not the same file, or not the same job */ + fdbr.FileIndex = findex; + //dolist = true; + + } else if (findex2 != lastFileIndex) { + /* We are in the same job, and the first index was already generated */ + fdbr.FileIndex = findex + 1; + //dolist = true; + } + + /* Keep the current values for the next loop */ + lastJobId = bsr->JobId; + lastFileIndex = findex2; + + /* Generate if needed the list of files */ + if (dolist) { + fdbr.FileIndex2 = findex2; + fdbr.JobId = bsr->JobId; + /* New code not working */ + //db_list_files(jcr, jcr->db, &fdbr, sendit, jcr); + } + } + } +} + +/* + * Scan bsr data for a single bsr record + */ +static void scan_bsr_item(JCR *jcr, RBSR *bsr) +{ + int32_t lastFileIndex=0; + uint32_t lastJobId=0; + /* + * For a given volume, loop over all the JobMedia records. + * VolCount is the number of JobMedia records. + */ + for (int i=0; i < bsr->VolCount; i++) { + if (!is_volume_selected(bsr->fi_list, + bsr->VolParams[i].FirstIndex, + bsr->VolParams[i].LastIndex)) + { + continue; + } + + scan_findex(jcr, bsr, + bsr->VolParams[i].FirstIndex, + bsr->VolParams[i].LastIndex, + lastFileIndex, lastJobId); + } +} + +/* + * We need to find all files from the BSR. All files are listed, this is used + * to send the list of the files to be restored to a plugin for example. + */ +void scan_bsr(JCR *jcr) +{ + char *p; + JobId_t JobId; + RBSR *bsr; + if (!jcr->JobIds || *jcr->JobIds == 0) { + foreach_rblist(bsr, jcr->bsr_list) { + scan_bsr_item(jcr, bsr); + } + return; + } + for (p=jcr->JobIds; get_next_jobid_from_list(&p, &JobId) > 0; ) { + foreach_rblist(bsr, jcr->bsr_list) { + if (JobId == bsr->JobId) { + scan_bsr_item(jcr, bsr); + } + } + } + return; +} diff --git a/src/dird/bsr.h b/src/dird/bsr.h new file mode 100644 index 00000000..0e4ae358 --- /dev/null +++ b/src/dird/bsr.h @@ -0,0 +1,57 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bootstrap Record header file + * + * BSR (bootstrap record) handling routines split from + * ua_restore.c July MMIII + * + * Kern Sibbald, July MMII + */ + + + +/* FileIndex entry in restore bootstrap record */ +struct RBSR_FINDEX { + rblink link; + int32_t findex; + int32_t findex2; +}; + +/* + * Restore bootstrap record -- not the real one, but useful here + * The restore bsr is a chain of BSR records (linked by next). + * Each BSR represents a single JobId, and within it, it + * contains a linked list of file indexes for that JobId. + * The complete_bsr() routine, will then add all the volumes + * on which the Job is stored to the BSR. + */ +struct RBSR { + rblink link; + JobId_t JobId; /* JobId this bsr */ + uint32_t VolSessionId; + uint32_t VolSessionTime; + int VolCount; /* Volume parameter count */ + VOL_PARAMS *VolParams; /* Volume, start/end file/blocks */ + rblist *fi_list; /* File indexes this JobId */ + char *fileregex; /* Only restore files matching regex */ + + /* If we extend an existing fi, keep the memory for the next insert */ + RBSR_FINDEX *m_fi; +}; diff --git a/src/dird/catreq.c b/src/dird/catreq.c new file mode 100644 index 00000000..482de903 --- /dev/null +++ b/src/dird/catreq.c @@ -0,0 +1,815 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- catreq.c -- handles the message channel + * catalog request from the Storage daemon. + * + * Kern Sibbald, March MMI + * + * This routine runs as a thread and must be thread reentrant. + * + * Basic tasks done here: + * Handle Catalog services. + */ + +#include "bacula.h" +#include "dird.h" +#include "findlib/find.h" + +/* + * Handle catalog request + * For now, we simply return next Volume to be used + */ + +/* Requests from the Storage daemon */ +static char Find_media[] = "CatReq JobId=%ld FindMedia=%d pool_name=%127s media_type=%127s vol_type=%d\n"; +static char Get_Vol_Info[] = "CatReq JobId=%ld GetVolInfo VolName=%127s write=%d\n"; + +static char Update_media[] = "CatReq JobId=%ld UpdateMedia VolName=%s" + " VolJobs=%u VolFiles=%u VolBlocks=%u VolBytes=%lld VolABytes=%lld" + " VolHoleBytes=%lld VolHoles=%u VolMounts=%u" + " VolErrors=%u VolWrites=%lld MaxVolBytes=%lld EndTime=%lld VolStatus=%10s" + " Slot=%d relabel=%d InChanger=%d VolReadTime=%lld VolWriteTime=%lld" + " VolFirstWritten=%lld VolType=%u VolParts=%d VolCloudParts=%d" + " LastPartBytes=%lld Enabled=%d Recycle=%d\n"; + +static char Create_jobmedia[] = "CatReq JobId=%ld CreateJobMedia\n"; + +/* Responses sent to Storage daemon */ +static char OK_media[] = "1000 OK VolName=%s VolJobs=%u VolFiles=%u" + " VolBlocks=%u VolBytes=%s VolABytes=%s VolHoleBytes=%s VolHoles=%u" + " VolMounts=%u VolErrors=%u VolWrites=%s" + " MaxVolBytes=%s VolCapacityBytes=%s VolStatus=%s Slot=%d" + " MaxVolJobs=%u MaxVolFiles=%u InChanger=%d VolReadTime=%s" + " VolWriteTime=%s EndFile=%u EndBlock=%u VolType=%u LabelType=%d" + " MediaId=%s ScratchPoolId=%s VolParts=%d VolCloudParts=%d" + " LastPartBytes=%lld Enabled=%d Recycle=%d\n"; + +static char OK_create[] = "1000 OK CreateJobMedia\n"; + + +void remove_dummy_jobmedia_records(JCR *jcr) +{ + if (jcr->dummy_jobmedia) { + char ec1[30]; + POOL_MEM buf; + Mmsg(buf, "DELETE FROM JobMedia WHERE JobId=%s AND FirstIndex=0 AND LastIndex=0", + edit_int64(jcr->JobId, ec1)); + Dmsg1(150, "Delete dummy: %s\n", buf.c_str()); + db_sql_query(jcr->db, buf.c_str(), NULL, NULL); + jcr->dummy_jobmedia = false; + } +} + +static int send_volume_info_to_storage_daemon(JCR *jcr, BSOCK *sd, MEDIA_DBR *mr) +{ + int stat; + char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50], ed7[50], ed8[50], + ed9[50], ed10[50]; + + jcr->MediaId = mr->MediaId; + pm_strcpy(jcr->VolumeName, mr->VolumeName); + bash_spaces(mr->VolumeName); + stat = sd->fsend(OK_media, mr->VolumeName, mr->VolJobs, + mr->VolFiles, mr->VolBlocks, edit_uint64(mr->VolBytes, ed1), + edit_uint64(mr->VolABytes, ed2), + edit_uint64(mr->VolHoleBytes, ed3), + mr->VolHoles, mr->VolMounts, mr->VolErrors, + edit_uint64(mr->VolWrites, ed4), + edit_uint64(mr->MaxVolBytes, ed5), + edit_uint64(mr->VolCapacityBytes, ed6), + mr->VolStatus, mr->Slot, mr->MaxVolJobs, mr->MaxVolFiles, + mr->InChanger, + edit_int64(mr->VolReadTime, ed7), + edit_int64(mr->VolWriteTime, ed8), + mr->EndFile, mr->EndBlock, + mr->VolType, + mr->LabelType, + edit_uint64(mr->MediaId, ed9), + edit_uint64(mr->ScratchPoolId, ed10), + mr->VolParts, + mr->VolCloudParts, + mr->LastPartBytes, + mr->Enabled, + mr->Recycle); + unbash_spaces(mr->VolumeName); + Dmsg2(100, "Vol Info for %s: %s", jcr->Job, sd->msg); + return stat; +} + +/* TODO: See if we want to let the FD do all kind + * of catalog request/update + */ +void catalog_request(JCR *jcr, BSOCK *bs) +{ + MEDIA_DBR mr, sdmr; + JOBMEDIA_DBR jm; + char pool_name[MAX_NAME_LENGTH]; + int index, ok, label, writing; + POOLMEM *omsg; + uint64_t MediaId; + utime_t VolFirstWritten; + utime_t VolLastWritten; + int n; + int Enabled, Recycle; + JobId_t JobId = 0; + + bmemset(&sdmr, 0, sizeof(sdmr)); + bmemset(&jm, 0, sizeof(jm)); + Dsm_check(100); + + /* + * Request to find next appendable Volume for this Job + */ + Dmsg1(200, "catreq %s", bs->msg); + if (!jcr->db) { + omsg = get_memory(bs->msglen+1); + pm_strcpy(omsg, bs->msg); + bs->fsend(_("1990 Invalid Catalog Request: %s"), omsg); + Jmsg1(jcr, M_FATAL, 0, _("Invalid Catalog request; DB not open: %s"), omsg); + free_memory(omsg); + return; + } + /* + * Find next appendable medium for SD + */ + n = sscanf(bs->msg, Find_media, &JobId, &index, &pool_name, &mr.MediaType, &mr.VolType); + if (n == 5) { + POOL_MEM errmsg; + POOL_DBR pr; + bmemset(&pr, 0, sizeof(pr)); + bstrncpy(pr.Name, pool_name, sizeof(pr.Name)); + unbash_spaces(pr.Name); + ok = db_get_pool_record(jcr, jcr->db, &pr); + if (ok) { + mr.PoolId = pr.PoolId; + set_storageid_in_mr(jcr->wstore, &mr); + mr.ScratchPoolId = pr.ScratchPoolId; + ok = find_next_volume_for_append(jcr, &mr, index, fnv_create_vol, fnv_prune, errmsg); + Dmsg3(050, "find_media ok=%d idx=%d vol=%s\n", ok, index, mr.VolumeName); + } else { + /* Report problem finding pool */ + Jmsg1(jcr, M_WARNING, 0, _("Pool \"%s\" not found for SD find media request.\n"), + pr.Name); + } + /* + * Send Find Media response to Storage daemon + */ + if (ok) { + send_volume_info_to_storage_daemon(jcr, bs, &mr); + } else { + bs->fsend(_("1901 No Media. %s\n"), errmsg.c_str()); + Dmsg1(500, "1901 No Media. %s\n", errmsg.c_str()); + } + goto ok_out; + } + Dmsg1(1000, "Tried find_media. fields wanted=4, got=%d\n", n); + + /* + * Request to find specific Volume information + */ + n = sscanf(bs->msg, Get_Vol_Info, &JobId, &mr.VolumeName, &writing); + if (n == 3) { + Dmsg1(100, "CatReq GetVolInfo Vol=%s\n", mr.VolumeName); + /* + * Find the Volume + */ + unbash_spaces(mr.VolumeName); + if (db_get_media_record(jcr, jcr->db, &mr)) { + const char *reason = NULL; /* detailed reason for rejection */ + /* + * If we are reading, accept any volume (reason == NULL) + * If we are writing, check if the Volume is valid + * for this job, and do a recycle if necessary + */ + if (writing) { + /* + * SD wants to write this Volume, so make + * sure it is suitable for this job, i.e. + * Pool matches, and it is either Append or Recycle + * and Media Type matches and Pool allows any volume. + */ + if (mr.PoolId != jcr->jr.PoolId) { + reason = _("not in Pool"); + } else if (strcmp(mr.MediaType, jcr->wstore->media_type) != 0) { + reason = _("not correct MediaType"); + } else { + /* + * Now try recycling if necessary + * reason set non-NULL if we cannot use it + */ + check_if_volume_valid_or_recyclable(jcr, &mr, &reason); + } + } + if (!reason && mr.Enabled != 1) { + reason = _("is not Enabled"); + } + if (reason == NULL) { + /* + * Send Find Media response to Storage daemon + */ + send_volume_info_to_storage_daemon(jcr, bs, &mr); + } else { + /* Not suitable volume */ + bs->fsend(_("1998 Volume \"%s\" catalog status is %s, %s.\n"), mr.VolumeName, + mr.VolStatus, reason); + } + + } else { + bs->fsend(_("1997 Volume \"%s\" not in catalog.\n"), mr.VolumeName); + Dmsg1(100, "1997 Volume \"%s\" not in catalog.\n", mr.VolumeName); + } + goto ok_out; + } + Dmsg1(1000, "Tried get_vol_info. fields wanted=3, got=%d\n", n); + + + /* + * Request to update Media record. Comes typically at the end + * of a Storage daemon Job Session, when labeling/relabeling a + * Volume, or when an EOF mark is written. + */ + n = sscanf(bs->msg, Update_media, &JobId, &sdmr.VolumeName, + &sdmr.VolJobs, &sdmr.VolFiles, &sdmr.VolBlocks, &sdmr.VolBytes, + &sdmr.VolABytes, &sdmr.VolHoleBytes, &sdmr.VolHoles, + &sdmr.VolMounts, &sdmr.VolErrors, &sdmr.VolWrites, &sdmr.MaxVolBytes, + &VolLastWritten, &sdmr.VolStatus, &sdmr.Slot, &label, &sdmr.InChanger, + &sdmr.VolReadTime, &sdmr.VolWriteTime, &VolFirstWritten, + &sdmr.VolType, &sdmr.VolParts, &sdmr.VolCloudParts, + &sdmr.LastPartBytes, &Enabled, &Recycle); + if (n == 27) { + db_lock(jcr->db); + Dmsg3(400, "Update media %s oldStat=%s newStat=%s\n", sdmr.VolumeName, + mr.VolStatus, sdmr.VolStatus); + bstrncpy(mr.VolumeName, sdmr.VolumeName, sizeof(mr.VolumeName)); /* copy Volume name */ + unbash_spaces(mr.VolumeName); + if (!db_get_media_record(jcr, jcr->db, &mr)) { + Jmsg(jcr, M_ERROR, 0, _("Unable to get Media record for Volume %s: ERR=%s\n"), + mr.VolumeName, db_strerror(jcr->db)); + bs->fsend(_("1991 Catalog Request for vol=%s failed: %s"), + mr.VolumeName, db_strerror(jcr->db)); + db_unlock(jcr->db); + return; + } + /* Set first written time if this is first job */ + if (mr.FirstWritten == 0) { + if (VolFirstWritten == 0) { + mr.FirstWritten = jcr->start_time; /* use Job start time as first write */ + } else { + mr.FirstWritten = VolFirstWritten; + } + mr.set_first_written = true; + } + /* If we just labeled the tape set time */ + if (label || mr.LabelDate == 0) { + mr.LabelDate = jcr->start_time; + mr.set_label_date = true; + if (mr.InitialWrite == 0) { + mr.InitialWrite = jcr->start_time; + } + Dmsg2(400, "label=%d labeldate=%d\n", label, mr.LabelDate); + } else { + /* + * Insanity check for VolFiles get set to a smaller value + */ + if (sdmr.VolFiles < mr.VolFiles) { + Jmsg(jcr, M_INFO, 0, _("Attempt to set Volume Files from %u to %u" + " for Volume \"%s\". Ignored.\n"), + mr.VolFiles, sdmr.VolFiles, mr.VolumeName); + sdmr.VolFiles = mr.VolFiles; /* keep orginal value */ + } + } + Dmsg2(400, "Update media: BefVolJobs=%u After=%u\n", mr.VolJobs, sdmr.VolJobs); + + /* + * Check if the volume has been written by the job, + * and update the LastWritten field if needed. + */ + if (mr.VolBlocks != sdmr.VolBlocks && VolLastWritten != 0) { + mr.LastWritten = VolLastWritten; + } + + /* + * Update to point to the last device used to write the Volume. + * However, do so only if we are writing the tape, i.e. + * the number of VolWrites has increased. + */ + if (jcr->wstore && sdmr.VolWrites > mr.VolWrites) { + Dmsg2(050, "Update StorageId old=%d new=%d\n", + mr.StorageId, jcr->wstore->StorageId); + /* Update StorageId after write */ + set_storageid_in_mr(jcr->wstore, &mr); + } else { + /* Nothing written, reset same StorageId */ + set_storageid_in_mr(NULL, &mr); + } + + /* Copy updated values to original media record */ + mr.VolJobs = sdmr.VolJobs; + mr.VolFiles = sdmr.VolFiles; + mr.VolBlocks = sdmr.VolBlocks; + mr.VolBytes = sdmr.VolBytes; + mr.VolABytes = sdmr.VolABytes; + mr.VolHoleBytes = sdmr.VolHoleBytes; + mr.VolHoles = sdmr.VolHoles; + mr.VolMounts = sdmr.VolMounts; + mr.VolErrors = sdmr.VolErrors; + mr.VolWrites = sdmr.VolWrites; + mr.Slot = sdmr.Slot; + mr.InChanger = sdmr.InChanger; + mr.VolType = sdmr.VolType; + mr.VolParts = sdmr.VolParts; + mr.VolCloudParts = sdmr.VolCloudParts; + mr.LastPartBytes = sdmr.LastPartBytes; + mr.Enabled = Enabled; /* byte assignment */ + mr.Recycle = Recycle; /* byte assignment */ + bstrncpy(mr.VolStatus, sdmr.VolStatus, sizeof(mr.VolStatus)); + if (sdmr.VolReadTime >= 0) { + mr.VolReadTime = sdmr.VolReadTime; + } + if (sdmr.VolWriteTime >= 0) { + mr.VolWriteTime = sdmr.VolWriteTime; + } + + Dmsg2(400, "db_update_media_record. Stat=%s Vol=%s\n", mr.VolStatus, mr.VolumeName); + /* + * Update the database, then before sending the response to the + * SD, check if the Volume has expired. + */ + if (!db_update_media_record(jcr, jcr->db, &mr)) { + Jmsg(jcr, M_FATAL, 0, _("Catalog error updating Media record. %s"), + db_strerror(jcr->db)); + bs->fsend(_("1993 Update Media error\n")); + Pmsg0(000, "1993 Update Media error\n"); + } else { + (void)has_volume_expired(jcr, &mr); + send_volume_info_to_storage_daemon(jcr, bs, &mr); + } + db_unlock(jcr->db); + goto ok_out; + } + Dmsg1(1000, "Tried update_media. fields wanted=25, got=%d\n", n); + + /* + * Request to create a JobMedia record(s) + */ + if (sscanf(bs->msg, Create_jobmedia, &JobId) == 1) { + if (jcr->wjcr) { + jm.JobId = jcr->wjcr->JobId; + } else { + jm.JobId = jcr->JobId; + } + ok = true; + db_lock(jcr->db); + db_start_transaction(jcr, jcr->db); + while (bs->recv() >= 0) { + if (ok && sscanf(bs->msg, "%u %u %u %u %u %u %lld\n", + &jm.FirstIndex, &jm.LastIndex, &jm.StartFile, &jm.EndFile, + &jm.StartBlock, &jm.EndBlock, &MediaId) != 7) { + Jmsg(jcr, M_FATAL, 0, _("Error scanning create JobMedia request: %s\n"), + bs->msg); + ok = false; + continue; + } + if (ok) { + jm.MediaId = MediaId; + Dmsg6(400, "create_jobmedia JobId=%ld MediaId=%lu SF=%lu EF=%lu FI=%lu LI=%lu\n", + jm.JobId, jm.MediaId, jm.StartFile, jm.EndFile, jm.FirstIndex, jm.LastIndex); + ok = db_create_jobmedia_record(jcr, jcr->db, &jm); + if (!ok) { + Jmsg(jcr, M_FATAL, 0, _("Catalog error creating JobMedia record. %s"), + db_strerror(jcr->db)); + } + if (jm.FirstIndex == 0 && jm.LastIndex == 0) { + jcr->dummy_jobmedia = true; + } + } + } + db_end_transaction(jcr, jcr->db); + db_unlock(jcr->db); + if (!ok) { + bs->fsend(_("1992 Create JobMedia error\n")); + goto ok_out; + } + Dmsg0(400, "JobMedia record created\n"); + bs->fsend(OK_create); + goto ok_out; + } + + /* Handle snapshot catalog request */ + if (snapshot_catreq(jcr, bs)) { + goto ok_out; + } + + Dmsg1(1000, "Tried create_jobmedia. fields wanted=10, got=%d\n", n); + + /* Everything failed. Send error message. */ + omsg = get_memory(bs->msglen+1); + pm_strcpy(omsg, bs->msg); + bs->fsend(_("1990 Invalid Catalog Request: %s"), omsg); + Jmsg1(jcr, M_FATAL, 0, _("Invalid Catalog request: %s"), omsg); + free_memory(omsg); + +ok_out: + Dmsg1(400, ">CatReq response: %s", bs->msg); + Dmsg1(400, "Leave catreq jcr 0x%x\n", jcr); + return; +} + +/* + * Note, we receive the whole attribute record, but we select out only the stat + * packet, VolSessionId, VolSessionTime, FileIndex, file type, and file name to + * store in the catalog. + */ +static void update_attribute(JCR *jcr, char *msg, int32_t msglen) +{ + unser_declare; + uint32_t VolSessionId, VolSessionTime; + int32_t Stream; + int32_t FileIndex; + char *p; + int len; + char *fname, *attr; + ATTR_DBR *ar = NULL; + uint32_t reclen; + + /* Start transaction allocates jcr->attr and jcr->ar if needed */ + db_start_transaction(jcr, jcr->db); /* start transaction if not already open */ + ar = jcr->ar; + + /* + * Start by scanning directly in the message buffer to get Stream + * there may be a cached attr so we cannot yet write into + * jcr->attr or jcr->ar + */ + p = msg; + skip_nonspaces(&p); /* UpdCat */ + skip_spaces(&p); + skip_nonspaces(&p); /* Job=nnn */ + skip_spaces(&p); + skip_nonspaces(&p); /* "FileAttributes" */ + p += 1; + /* The following "SD header" fields are serialized */ + unser_begin(p, 0); + unser_uint32(VolSessionId); /* VolSessionId */ + unser_uint32(VolSessionTime); /* VolSessionTime */ + unser_int32(FileIndex); /* FileIndex */ + unser_int32(Stream); /* Stream */ + unser_uint32(reclen); /* Record length */ + p += unser_length(p); /* Raw record follows */ + + /** + * At this point p points to the raw record, which varies according + * to what kind of a record (Stream) was sent. Note, the integer + * fields at the beginning of these "raw" records are in ASCII with + * spaces between them so one can use scanf or manual scanning to + * extract the fields. + * + * File Attributes + * File_index + * File type + * Filename (full path) + * Encoded attributes + * Link name (if type==FT_LNK or FT_LNKSAVED) + * Encoded extended-attributes (for Win32) + * Delta sequence number (32 bit int) + * + * Restore Object + * File_index + * File_type + * Object_index + * Object_len (possibly compressed) + * Object_full_len (not compressed) + * Object_compression + * Plugin_name + * Object_name + * Binary Object data + */ + + Dmsg1(400, "UpdCat msg=%s\n", msg); + Dmsg5(400, "UpdCat VolSessId=%d VolSessT=%d FI=%d Strm=%d reclen=%d\n", + VolSessionId, VolSessionTime, FileIndex, Stream, reclen); + + if (Stream == STREAM_UNIX_ATTRIBUTES || Stream == STREAM_UNIX_ATTRIBUTES_EX) { + if (jcr->cached_attribute) { + Dmsg2(400, "Cached attr. Stream=%d fname=%s\n", ar->Stream, ar->fname); + if (!db_create_attributes_record(jcr, jcr->db, ar)) { + Jmsg1(jcr, M_FATAL, 0, _("Attribute create error: ERR=%s"), db_strerror(jcr->db)); + } + jcr->cached_attribute = false; + } + /* Any cached attr is flushed so we can reuse jcr->attr and jcr->ar */ + jcr->attr = check_pool_memory_size(jcr->attr, msglen); + memcpy(jcr->attr, msg, msglen); + p = jcr->attr - msg + p; /* point p into jcr->attr */ + skip_nonspaces(&p); /* skip FileIndex */ + skip_spaces(&p); + ar->FileType = str_to_int32(p); + skip_nonspaces(&p); /* skip FileType */ + skip_spaces(&p); + fname = p; + len = strlen(fname); /* length before attributes */ + attr = &fname[len+1]; + ar->DeltaSeq = 0; + if (ar->FileType == FT_REG) { + p = attr + strlen(attr) + 1; /* point to link */ + p = p + strlen(p) + 1; /* point to extended attributes */ + p = p + strlen(p) + 1; /* point to delta sequence */ + /* + * Older FDs don't have a delta sequence, so check if it is there + */ + if (p - jcr->attr < msglen) { + ar->DeltaSeq = str_to_int32(p); /* delta_seq */ + } + } + + Dmsg2(400, "dirdattr = attr; + ar->fname = fname; + if (ar->FileType == FT_DELETED) { + FileIndex = -FileIndex; + ar->FileIndex = FileIndex; /* special value */ + } else { + ar->FileIndex = FileIndex; + } + ar->Stream = Stream; + ar->link = NULL; + if (jcr->wjcr) { + ar->JobId = jcr->wjcr->JobId; + Dmsg1(100, "=== set JobId=%d\n", ar->JobId); + } else { + ar->JobId = jcr->JobId; + } + ar->Digest = NULL; + ar->DigestType = CRYPTO_DIGEST_NONE; + jcr->cached_attribute = true; + + Dmsg2(400, "dirdwjcr) { + ro.JobId = jcr->wjcr->JobId; + Dmsg1(100, "=== set JobId=%ld\n", ar->JobId); + } else { + ro.JobId = jcr->JobId; + } + + Dmsg1(100, "Robj=%s\n", p); + + skip_nonspaces(&p); /* skip FileIndex */ + skip_spaces(&p); + ro.FileType = str_to_int32(p); /* FileType */ + skip_nonspaces(&p); + skip_spaces(&p); + ro.object_index = str_to_int32(p); /* Object Index */ + skip_nonspaces(&p); + skip_spaces(&p); + ro.object_len = str_to_int32(p); /* object length possibly compressed */ + skip_nonspaces(&p); + skip_spaces(&p); + ro.object_full_len = str_to_int32(p); /* uncompressed object length */ + skip_nonspaces(&p); + skip_spaces(&p); + ro.object_compression = str_to_int32(p); /* compression */ + skip_nonspaces(&p); + skip_spaces(&p); + + ro.plugin_name = p; /* point to plugin name */ + len = strlen(ro.plugin_name); + ro.object_name = &ro.plugin_name[len+1]; /* point to object name */ + len = strlen(ro.object_name); + ro.object = &ro.object_name[len+1]; /* point to object */ + ro.object[ro.object_len] = 0; /* add zero for those who attempt printing */ + Dmsg7(100, "oname=%s stream=%d FT=%d FI=%d JobId=%ld, obj_len=%d\nobj=\"%s\"\n", + ro.object_name, ro.Stream, ro.FileType, ro.FileIndex, ro.JobId, + ro.object_len, ro.object); + /* Send it */ + if (!db_create_restore_object_record(jcr, jcr->db, &ro)) { + Jmsg1(jcr, M_FATAL, 0, _("Restore object create error. %s"), db_strerror(jcr->db)); + } + + } else if (crypto_digest_stream_type(Stream) != CRYPTO_DIGEST_NONE) { + fname = p; + if (ar->FileIndex < 0) FileIndex = -FileIndex; + if (ar->FileIndex != FileIndex) { + Jmsg3(jcr, M_WARNING, 0, _("%s not same FileIndex=%d as attributes FI=%d\n"), + stream_to_ascii(Stream), FileIndex, ar->FileIndex); + } else { + /* Update digest in catalog */ + char digestbuf[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)]; + int len = 0; + int type = CRYPTO_DIGEST_NONE; + + switch(Stream) { + case STREAM_MD5_DIGEST: + len = CRYPTO_DIGEST_MD5_SIZE; + type = CRYPTO_DIGEST_MD5; + break; + case STREAM_SHA1_DIGEST: + len = CRYPTO_DIGEST_SHA1_SIZE; + type = CRYPTO_DIGEST_SHA1; + break; + case STREAM_SHA256_DIGEST: + len = CRYPTO_DIGEST_SHA256_SIZE; + type = CRYPTO_DIGEST_SHA256; + break; + case STREAM_SHA512_DIGEST: + len = CRYPTO_DIGEST_SHA512_SIZE; + type = CRYPTO_DIGEST_SHA512; + break; + default: + /* Never reached ... */ + Jmsg(jcr, M_ERROR, 0, _("Catalog error updating file digest. Unsupported digest stream type: %d"), + Stream); + } + + if (len != 0) { + bin_to_base64(digestbuf, sizeof(digestbuf), fname, len, true); + Dmsg3(400, "DigestLen=%d Digest=%s type=%d\n", strlen(digestbuf), + digestbuf, Stream); + } else { + digestbuf[0] = 0; + } + if (jcr->cached_attribute) { + ar->Digest = digestbuf; + ar->DigestType = type; + Dmsg2(400, "Cached attr with digest. Stream=%d fname=%s\n", + ar->Stream, ar->fname); + + /* Update BaseFile table */ + if (!db_create_attributes_record(jcr, jcr->db, ar)) { + Jmsg1(jcr, M_FATAL, 0, _("attribute create error. ERR=%s"), + db_strerror(jcr->db)); + } + jcr->cached_attribute = false; + } else if (ar->FileId != 0) { + if (!db_add_digest_to_file_record(jcr, jcr->db, ar->FileId, digestbuf, type)) { + Jmsg(jcr, M_ERROR, 0, _("Catalog error updating file digest. %s"), + db_strerror(jcr->db)); + } + } else { /* Something is wrong FileId == 0 */ + Jmsg(jcr, M_WARNING, 0, "Illegal FileId in update attribute: FileId=0 Stream=%d fname=%s\n", + ar->Stream, ar->fname); + } + } + } +} + +/* + * Update File Attributes in the catalog with data + * sent by the Storage daemon. + */ +void catalog_update(JCR *jcr, BSOCK *bs) +{ + if (!jcr->pool->catalog_files) { + return; /* user disabled cataloging */ + } + if (jcr->is_job_canceled()) { + goto bail_out; + } + if (!jcr->db) { + POOLMEM *omsg = get_memory(bs->msglen+1); + pm_strcpy(omsg, bs->msg); + bs->fsend(_("1994 Invalid Catalog Update: %s"), omsg); + Jmsg1(jcr, M_FATAL, 0, _("Invalid Catalog Update; DB not open: %s"), omsg); + free_memory(omsg); + goto bail_out; + } + update_attribute(jcr, bs->msg, bs->msglen); + +bail_out: + if (jcr->is_job_canceled()) { + jcr->cached_attribute = false; + cancel_storage_daemon_job(jcr); + } +} + +/* + * Update File Attributes in the catalog with data read from + * the storage daemon spool file. We receive the filename and + * we try to read it. + */ +bool despool_attributes_from_file(JCR *jcr, const char *file) +{ + bool ret=false; + int32_t pktsiz; + ssize_t nbytes; + ssize_t size = 0; + int32_t msglen; /* message length */ + POOLMEM *msg = get_pool_memory(PM_MESSAGE); + FILE *spool_fd=NULL; + int32_t recnum = 0; + + Dmsg1(100, "Begin despool_attributes_from_file\n", file); + + if (jcr->is_job_canceled() || !jcr->pool->catalog_files || !jcr->db) { + goto bail_out; /* user disabled cataloging */ + } + + spool_fd = bfopen(file, "rb"); + //Dmsg1(000, "Open attr read file=%s\n", file); + if (!spool_fd) { + Dmsg0(100, "cancel despool_attributes_from_file\n"); + /* send an error message */ + goto bail_out; + } +#if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_WILLNEED) + posix_fadvise(fileno(spool_fd), 0, 0, POSIX_FADV_WILLNEED); +#endif + + /* + * We read the attributes file or stream from the SD. It should + * be in the following format: + * + * 1. 4 bytes representing the record length + * 2. An attribute string starting with: UpdCat Job=nnn FileAttributes ... + */ + for ( ;; ) { + nbytes = fread((char *)&pktsiz, 1, sizeof(int32_t), spool_fd); + if (nbytes == 0) { /* EOF */ + break; + } + if (nbytes != sizeof(int32_t)) { + Dmsg2(000, "Error: attr read status=%lld addr=%lld\n", nbytes, ftello(spool_fd)); + break; + } + size += sizeof(int32_t); + msglen = ntohl(pktsiz); + if (msglen > 10000000) { + Qmsg1(jcr, M_FATAL, 0, _("fread attr spool error. Wanted %ld bytes, maximum permitted 10000000 bytes\n"), msglen); + goto bail_out; + } + if (msglen > 0) { + if (msglen > (int32_t)sizeof_pool_memory(msg)) { + msg = realloc_pool_memory(msg, msglen + 1); + } + nbytes = fread(msg, 1, msglen, spool_fd); + recnum++; + if (nbytes > 0 && strncmp(msg, "UpdCat Job", 10) != 0) { + Dmsg3(000, "Error: recnum=%ld nbytes=%lld msg=%s\n", recnum, nbytes, msg); + } + if (nbytes != (ssize_t)msglen) { + berrno be; + boffset_t size; + size = ftello(spool_fd); + Dmsg4(000, "Error at size=%lld record %ld: got nbytes=%lld, want msglen=%ld\n", size, recnum, (int32_t)nbytes, msglen); + Qmsg3(jcr, M_FATAL, 0, _("fread attr spool error. Wanted %ld bytes but got %lld ERR=%s\n"), + msglen, nbytes, be.bstrerror()); + goto bail_out; + } + size += nbytes; + } + if (!jcr->is_job_canceled()) { + update_attribute(jcr, msg, msglen); + if (jcr->is_job_canceled() || (jcr->wjcr && jcr->wjcr->is_job_canceled())) { + goto bail_out; + } + } + } + if (ferror(spool_fd)) { + berrno be; + Qmsg1(jcr, M_FATAL, 0, _("fread attr spool error. ERR=%s\n"), + be.bstrerror()); + Dmsg1(050, "fread attr spool error. ERR=%s\n", be.bstrerror()); + goto bail_out; + } + ret = true; + +bail_out: + if (spool_fd) { + //Dmsg1(000, "Close attr read file=%s\n", file); + fclose(spool_fd); + } + + if (jcr->is_job_canceled()) { + jcr->cached_attribute = false; + cancel_storage_daemon_job(jcr); + } + + free_pool_memory(msg); + Dmsg1(100, "End despool_attributes_from_file ret=%i\n", ret); + return ret; +} diff --git a/src/dird/dir_plugins.c b/src/dird/dir_plugins.c new file mode 100644 index 00000000..59f28eab --- /dev/null +++ b/src/dird/dir_plugins.c @@ -0,0 +1,538 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main program to test loading and running Bacula plugins. + * Destined to become Bacula pluginloader, ... + * + * Kern Sibbald, October 2007 + */ +#include "bacula.h" +#include "dird.h" +#include "dir_plugins.h" + +const int dbglvl = 50; +const char *plugin_type = "-dir.so"; + + +/* Forward referenced functions */ +static bRC baculaGetValue(bpContext *ctx, brDirVariable var, void *value); +static bRC baculaSetValue(bpContext *ctx, bwDirVariable var, void *value); +static bRC baculaRegisterEvents(bpContext *ctx, ...); +static bRC baculaJobMsg(bpContext *ctx, const char *file, int line, + int type, utime_t mtime, const char *fmt, ...); +static bRC baculaDebugMsg(bpContext *ctx, const char *file, int line, + int level, const char *fmt, ...); +static bool is_plugin_compatible(Plugin *plugin); + + +/* Bacula info */ +static bDirInfo binfo = { + sizeof(bDirFuncs), + DIR_PLUGIN_INTERFACE_VERSION +}; + +/* Bacula entry points */ +static bDirFuncs bfuncs = { + sizeof(bDirFuncs), + DIR_PLUGIN_INTERFACE_VERSION, + baculaRegisterEvents, + baculaGetValue, + baculaSetValue, + baculaJobMsg, + baculaDebugMsg +}; + +/* + * Bacula private context + */ +struct bacula_ctx { + JCR *jcr; /* jcr for plugin */ + bRC rc; /* last return code */ + bool disabled; /* set if plugin disabled */ +}; + +static bool is_plugin_disabled(bpContext *plugin_ctx) +{ + bacula_ctx *b_ctx; + if (!plugin_ctx) { + return true; + } + b_ctx = (bacula_ctx *)plugin_ctx->bContext; + return b_ctx->disabled; +} + +#ifdef needed +static bool is_plugin_disabled(JCR *jcr) +{ + return is_plugin_disabled(jcr->plugin_ctx); +} +#endif + +/* + * Create a plugin event + */ +int generate_plugin_event(JCR *jcr, bDirEventType eventType, void *value) +{ + bpContext *plugin_ctx; + bDirEvent event; + Plugin *plugin; + int i = 0; + bRC rc = bRC_OK; + + if (!b_plugin_list || !jcr || !jcr->plugin_ctx_list) { + return bRC_OK; /* Return if no plugins loaded */ + } + if (jcr->is_job_canceled()) { + return bRC_Cancel; + } + + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + event.eventType = eventType; + + Dmsg2(dbglvl, "dir-plugin_ctx_list=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); + + foreach_alist_index(i, plugin, b_plugin_list) { + plugin_ctx = &plugin_ctx_list[i]; + if (is_plugin_disabled(plugin_ctx)) { + continue; + } + rc = dirplug_func(plugin)->handlePluginEvent(plugin_ctx, &event, value); + if (rc != bRC_OK) { + break; + } + } + + return rc; +} + +/* + * Print to file the plugin info. + */ +void dump_dir_plugin(Plugin *plugin, FILE *fp) +{ + if (!plugin) { + return ; + } + pDirInfo *info = (pDirInfo *) plugin->pinfo; + fprintf(fp, "\tversion=%d\n", info->version); + fprintf(fp, "\tdate=%s\n", NPRTB(info->plugin_date)); + fprintf(fp, "\tmagic=%s\n", NPRTB(info->plugin_magic)); + fprintf(fp, "\tauthor=%s\n", NPRTB(info->plugin_author)); + fprintf(fp, "\tlicence=%s\n", NPRTB(info->plugin_license)); + fprintf(fp, "\tversion=%s\n", NPRTB(info->plugin_version)); + fprintf(fp, "\tdescription=%s\n", NPRTB(info->plugin_description)); +} + +/** + * This entry point is called internally by Bacula to ensure + * that the plugin IO calls come into this code. + */ +void load_dir_plugins(const char *plugin_dir) +{ + Plugin *plugin; + int i; + + Dmsg0(dbglvl, "Load Director plugins\n"); + if (!plugin_dir) { + Dmsg0(dbglvl, "No Director plugin directory!\n"); + return; + } + b_plugin_list = New(alist(10, not_owned_by_alist)); + if (!load_plugins((void *)&binfo, (void *)&bfuncs, plugin_dir, plugin_type, + is_plugin_compatible)) { + /* Either none found, or some error */ + if (b_plugin_list->size() == 0) { + delete b_plugin_list; + b_plugin_list = NULL; + Dmsg0(dbglvl, "No plugins loaded\n"); + return; + } + } + /* + * Verify that the plugin is acceptable, and print information + * about it. + */ + foreach_alist_index(i, plugin, b_plugin_list) { + Jmsg(NULL, M_INFO, 0, _("Loaded plugin: %s\n"), plugin->file); + Dmsg1(dbglvl, "Loaded plugin: %s\n", plugin->file); + } + + Dmsg1(dbglvl, "num plugins=%d\n", b_plugin_list->size()); + dbg_plugin_add_hook(dump_dir_plugin); +} + +/** + * Check if a plugin is compatible. Called by the load_plugin function + * to allow us to verify the plugin. + */ +static bool is_plugin_compatible(Plugin *plugin) +{ + pDirInfo *info = (pDirInfo *)plugin->pinfo; + Dmsg0(50, "is_plugin_compatible called\n"); + if (chk_dbglvl(50)) { + dump_dir_plugin(plugin, stdin); + } + if (strcmp(info->plugin_magic, DIR_PLUGIN_MAGIC) != 0) { + Jmsg(NULL, M_ERROR, 0, _("Plugin magic wrong. Plugin=%s wanted=%s got=%s\n"), + plugin->file, DIR_PLUGIN_MAGIC, info->plugin_magic); + Dmsg3(50, "Plugin magic wrong. Plugin=%s wanted=%s got=%s\n", + plugin->file, DIR_PLUGIN_MAGIC, info->plugin_magic); + + return false; + } + if (info->version != DIR_PLUGIN_INTERFACE_VERSION) { + Jmsg(NULL, M_ERROR, 0, _("Plugin version incorrect. Plugin=%s wanted=%d got=%d\n"), + plugin->file, DIR_PLUGIN_INTERFACE_VERSION, info->version); + Dmsg3(50, "Plugin version incorrect. Plugin=%s wanted=%d got=%d\n", + plugin->file, DIR_PLUGIN_INTERFACE_VERSION, info->version); + return false; + } + if (strcmp(info->plugin_license, "Bacula AGPLv3") != 0 && + strcmp(info->plugin_license, "AGPLv3") != 0 && + strcmp(info->plugin_license, "Bacula") != 0) { + Jmsg(NULL, M_ERROR, 0, _("Plugin license incompatible. Plugin=%s license=%s\n"), + plugin->file, info->plugin_license); + Dmsg2(50, "Plugin license incompatible. Plugin=%s license=%s\n", + plugin->file, info->plugin_license); + return false; + } + if (info->size != sizeof(pDirInfo)) { + Jmsg(NULL, M_ERROR, 0, + _("Plugin size incorrect. Plugin=%s wanted=%d got=%d\n"), + plugin->file, sizeof(pDirInfo), info->size); + return false; + } + + return true; +} + + +/* + * Create a new instance of each plugin for this Job + */ +void new_plugins(JCR *jcr) +{ + Plugin *plugin; + int i = 0; + + Dmsg0(dbglvl, "=== enter new_plugins ===\n"); + if (!b_plugin_list) { + Dmsg0(dbglvl, "No Director plugin list!\n"); + return; + } + if (jcr->is_job_canceled()) { + return; + } + + int num = b_plugin_list->size(); + + Dmsg1(dbglvl, "dir-plugin-list size=%d\n", num); + if (num == 0) { + return; + } + + jcr->plugin_ctx_list = (bpContext *)malloc(sizeof(bpContext) * num); + + bpContext *plugin_ctx_list = jcr->plugin_ctx_list; + Dmsg2(dbglvl, "Instantiate dir-plugin_ctx_list=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); + foreach_alist_index(i, plugin, b_plugin_list) { + /* Start a new instance of each plugin */ + bacula_ctx *b_ctx = (bacula_ctx *)malloc(sizeof(bacula_ctx)); + memset(b_ctx, 0, sizeof(bacula_ctx)); + b_ctx->jcr = jcr; + plugin_ctx_list[i].bContext = (void *)b_ctx; + plugin_ctx_list[i].pContext = NULL; + if (dirplug_func(plugin)->newPlugin(&plugin_ctx_list[i]) != bRC_OK) { + b_ctx->disabled = true; + } + } +} + +/* + * Free the plugin instances for this Job + */ +void free_plugins(JCR *jcr) +{ + Plugin *plugin; + int i = 0; + + if (!b_plugin_list || !jcr->plugin_ctx_list) { + return; + } + + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + Dmsg2(dbglvl, "Free instance dir-plugin_ctx_list=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); + foreach_alist_index(i, plugin, b_plugin_list) { + /* Free the plugin instance */ + dirplug_func(plugin)->freePlugin(&plugin_ctx_list[i]); + free(plugin_ctx_list[i].bContext); /* free Bacula private context */ + } + free(plugin_ctx_list); + jcr->plugin_ctx_list = NULL; +} + + +/* ============================================================== + * + * Callbacks from the plugin + * + * ============================================================== + */ +static bRC baculaGetValue(bpContext *ctx, brDirVariable var, void *value) +{ + JCR *jcr; + POOL_DBR pr; + bRC ret = bRC_OK; + + if (!ctx) { + return bRC_Error; + } + jcr = ((bacula_ctx *)ctx->bContext)->jcr; + if (!jcr) { + return bRC_Error; + } + if (!value) { + return bRC_Error; + } + switch (var) { + case bDirVarJobId: + *((int *)value) = jcr->JobId; + Dmsg1(dbglvl, "dir-plugin: return bDirVarJobId=%d\n", jcr->JobId); + break; + case bDirVarJobName: + *((char **)value) = jcr->Job; + Dmsg1(dbglvl, "Bacula: return Job name=%s\n", jcr->Job); + break; + case bDirVarJob: + *((char **)value) = jcr->job->hdr.name; + Dmsg1(dbglvl, "Bacula: return bDirVarJob=%s\n", jcr->job->hdr.name); + break; + case bDirVarLevel: + *((int *)value) = jcr->getJobLevel(); + Dmsg1(dbglvl, "Bacula: return bDirVarLevel=%c\n", jcr->getJobLevel()); + break; + case bDirVarType: + *((int *)value) = jcr->getJobType(); + Dmsg1(dbglvl, "Bacula: return bDirVarType=%c\n", jcr->getJobType()); + break; + case bDirVarClient: + *((char **)value) = jcr->client->hdr.name; + Dmsg1(dbglvl, "Bacula: return bDirVarClient=%s\n", jcr->client->hdr.name); + break; + case bDirVarNumVols: + bmemset(&pr, 0, sizeof(pr)); + bstrncpy(pr.Name, jcr->pool->hdr.name, sizeof(pr.Name)); + if (!db_get_pool_numvols(jcr, jcr->db, &pr)) { + ret=bRC_Error; + } + *((int *)value) = pr.NumVols; + Dmsg1(dbglvl, "Bacula: return bDirVarNumVols=%d\n", pr.NumVols); + break; + case bDirVarPool: + *((char **)value) = jcr->pool->hdr.name; + Dmsg1(dbglvl, "Bacula: return bDirVarPool=%s\n", jcr->pool->hdr.name); + break; + case bDirVarStorage: + if (jcr->wstore) { + *((char **)value) = jcr->wstore->hdr.name; + } else if (jcr->rstore) { + *((char **)value) = jcr->rstore->hdr.name; + } else { + *((char **)value) = NULL; + ret=bRC_Error; + } + Dmsg1(dbglvl, "Bacula: return bDirVarStorage=%s\n", NPRT(*((char **)value))); + break; + case bDirVarWriteStorage: + if (jcr->wstore) { + *((char **)value) = jcr->wstore->hdr.name; + } else { + *((char **)value) = NULL; + ret=bRC_Error; + } + Dmsg1(dbglvl, "Bacula: return bDirVarWriteStorage=%s\n", NPRT(*((char **)value))); + break; + case bDirVarReadStorage: + if (jcr->rstore) { + *((char **)value) = jcr->rstore->hdr.name; + } else { + *((char **)value) = NULL; + ret=bRC_Error; + } + Dmsg1(dbglvl, "Bacula: return bDirVarReadStorage=%s\n", NPRT(*((char **)value))); + break; + case bDirVarCatalog: + *((char **)value) = jcr->catalog->hdr.name; + Dmsg1(dbglvl, "Bacula: return bDirVarCatalog=%s\n", jcr->catalog->hdr.name); + break; + case bDirVarMediaType: + if (jcr->wstore) { + *((char **)value) = jcr->wstore->media_type; + } else if (jcr->rstore) { + *((char **)value) = jcr->rstore->media_type; + } else { + *((char **)value) = NULL; + ret=bRC_Error; + } + Dmsg1(dbglvl, "Bacula: return bDirVarMediaType=%s\n", NPRT(*((char **)value))); + break; + case bDirVarJobStatus: + *((int *)value) = jcr->JobStatus; + Dmsg1(dbglvl, "Bacula: return bDirVarJobStatus=%c\n", jcr->JobStatus); + break; + case bDirVarPriority: + *((int *)value) = jcr->JobPriority; + Dmsg1(dbglvl, "Bacula: return bDirVarPriority=%d\n", jcr->JobPriority); + break; + case bDirVarVolumeName: + *((char **)value) = jcr->VolumeName; + Dmsg1(dbglvl, "Bacula: return bDirVarVolumeName=%s\n", jcr->VolumeName); + break; + case bDirVarCatalogRes: + ret = bRC_Error; + break; + case bDirVarJobErrors: + *((int *)value) = jcr->JobErrors; + Dmsg1(dbglvl, "Bacula: return bDirVarErrors=%d\n", jcr->JobErrors); + break; + case bDirVarJobFiles: + *((int *)value) = jcr->JobFiles; + Dmsg1(dbglvl, "Bacula: return bDirVarFiles=%d\n", jcr->JobFiles); + break; + case bDirVarSDJobFiles: + *((int *)value) = jcr->SDJobFiles; + Dmsg1(dbglvl, "Bacula: return bDirVarSDFiles=%d\n", jcr->SDJobFiles); + break; + case bDirVarSDErrors: + *((int *)value) = jcr->SDErrors; + Dmsg1(dbglvl, "Bacula: return bDirVarSDErrors=%d\n", jcr->SDErrors); + break; + case bDirVarFDJobStatus: + *((int *)value) = jcr->FDJobStatus; + Dmsg1(dbglvl, "Bacula: return bDirVarFDJobStatus=%c\n", jcr->FDJobStatus); + break; + case bDirVarSDJobStatus: + *((int *)value) = jcr->SDJobStatus; + Dmsg1(dbglvl, "Bacula: return bDirVarSDJobStatus=%c\n", jcr->SDJobStatus); + break; + default: + break; + } + return ret; +} + +static bRC baculaSetValue(bpContext *ctx, bwDirVariable var, void *value) +{ + JCR *jcr; + if (!value || !ctx) { + return bRC_Error; + } +// Dmsg1(dbglvl, "bacula: baculaGetValue var=%d\n", var); + jcr = ((bacula_ctx *)ctx->bContext)->jcr; + if (!jcr) { + return bRC_Error; + } +// Dmsg1(dbglvl, "Bacula: jcr=%p\n", jcr); + /* Nothing implemented yet */ + Dmsg1(dbglvl, "dir-plugin: baculaSetValue var=%d\n", var); + return bRC_OK; +} + +static bRC baculaRegisterEvents(bpContext *ctx, ...) +{ + va_list args; + uint32_t event; + + va_start(args, ctx); + while ((event = va_arg(args, uint32_t))) { + Dmsg1(dbglvl, "dir-Plugin wants event=%u\n", event); + } + va_end(args); + return bRC_OK; +} + +static bRC baculaJobMsg(bpContext *ctx, const char *file, int line, + int type, utime_t mtime, const char *fmt, ...) +{ + va_list arg_ptr; + char buf[2000]; + JCR *jcr; + + if (ctx) { + jcr = ((bacula_ctx *)ctx->bContext)->jcr; + } else { + jcr = NULL; + } + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); + va_end(arg_ptr); + Jmsg(jcr, type, mtime, "%s", buf); + return bRC_OK; +} + +static bRC baculaDebugMsg(bpContext *ctx, const char *file, int line, + int level, const char *fmt, ...) +{ + va_list arg_ptr; + char buf[2000]; + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); + va_end(arg_ptr); + d_msg(file, line, level, "%s", buf); + return bRC_OK; +} + +#ifdef TEST_PROGRAM + +int main(int argc, char *argv[]) +{ + char plugin_dir[1000]; + JCR mjcr1, mjcr2; + JCR *jcr1 = &mjcr1; + JCR *jcr2 = &mjcr2; + + strcpy(my_name, "test-dir"); + + getcwd(plugin_dir, sizeof(plugin_dir)-1); + load_dir_plugins(plugin_dir); + + jcr1->JobId = 111; + new_plugins(jcr1); + + jcr2->JobId = 222; + new_plugins(jcr2); + + generate_plugin_event(jcr1, bDirEventJobStart, (void *)"Start Job 1"); + generate_plugin_event(jcr1, bDirEventJobEnd); + generate_plugin_event(jcr2, bDirEventJobStart, (void *)"Start Job 1"); + free_plugins(jcr1); + generate_plugin_event(jcr2, bDirEventJobEnd); + free_plugins(jcr2); + + unload_plugins(); + + Dmsg0(dbglvl, "dir-plugin: OK ...\n"); + close_memory_pool(); + sm_dump(false); + return 0; +} + +#endif /* TEST_PROGRAM */ diff --git a/src/dird/dir_plugins.h b/src/dird/dir_plugins.h new file mode 100644 index 00000000..45dab883 --- /dev/null +++ b/src/dird/dir_plugins.h @@ -0,0 +1,184 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Interface definition for Bacula Plugins + * + * Kern Sibbald, October 2007 + */ + +#ifndef __DIR_PLUGINS_H +#define __DIR_PLUGINS_H + +#ifndef _BACULA_H +#ifdef __cplusplus +/* Workaround for SGI IRIX 6.5 */ +#define _LANGUAGE_C_PLUS_PLUS 1 +#endif +#define _REENTRANT 1 +#define _THREAD_SAFE 1 +#define _POSIX_PTHREAD_SEMANTICS 1 +#define _FILE_OFFSET_BITS 64 +#define _LARGEFILE_SOURCE 1 +#define _LARGE_FILES 1 +#endif + +#include +#ifndef __CONFIG_H +#define __CONFIG_H +#include "config.h" +#endif +#include "bc_types.h" +#include "lib/plugins.h" + +#ifdef __cplusplus +extern "C" { +#endif + + + + +/**************************************************************************** + * * + * Bacula definitions * + * * + ****************************************************************************/ + +/* Bacula Variable Ids */ /* return value */ +typedef enum { + bDirVarJob = 1, // string + bDirVarLevel = 2, // int + bDirVarType = 3, // int + bDirVarJobId = 4, // int + bDirVarClient = 5, // string + bDirVarNumVols = 6, // int + bDirVarPool = 7, // string + bDirVarStorage = 8, // string + bDirVarWriteStorage = 9, // string + bDirVarReadStorage = 10, // string + bDirVarCatalog = 11, // string + bDirVarMediaType = 12, // string + bDirVarJobName = 13, // string + bDirVarJobStatus = 14, // int + bDirVarPriority = 15, // int + bDirVarVolumeName = 16, // string + bDirVarCatalogRes = 17, // NYI + bDirVarJobErrors = 18, // int + bDirVarJobFiles = 19, // int + bDirVarSDJobFiles = 20, // int + bDirVarSDErrors = 21, // int + bDirVarFDJobStatus = 22, // int + bDirVarSDJobStatus = 23 // int +} brDirVariable; + +typedef enum { + bwDirVarJobReport = 1, + bwDirVarVolumeName = 2, + bwDirVarPriority = 3, + bwDirVarJobLevel = 4 +} bwDirVariable; + + +typedef enum { + bDirEventJobStart = 1, + bDirEventJobEnd = 2, + bDirEventJobInit = 3, + bDirEventJobRun = 4, + bDirEventVolumePurged = 5, + bDirEventNewVolume = 6, + bDirEventNeedVolume = 7, + bDirEventVolumeFull = 8, + bDirEventRecyle = 9, + bDirEventGetScratch = 10 +} bDirEventType; + +typedef struct s_bDirEvent { + uint32_t eventType; +} bDirEvent; + +typedef struct s_dirbaculaInfo { + uint32_t size; + uint32_t version; +} bDirInfo; + +/* Bacula interface version and function pointers */ +typedef struct s_dirbaculaFuncs { + uint32_t size; + uint32_t version; + bRC (*registerBaculaEvents)(bpContext *ctx, ...); + bRC (*getBaculaValue)(bpContext *ctx, brDirVariable var, void *value); + bRC (*setBaculaValue)(bpContext *ctx, bwDirVariable var, void *value); + bRC (*JobMessage)(bpContext *ctx, const char *file, int line, + int type, utime_t mtime, const char *fmt, ...); + bRC (*DebugMessage)(bpContext *ctx, const char *file, int line, + int level, const char *fmt, ...); +} bDirFuncs; + +/* Bacula Core Routines -- not used within a plugin */ +#ifdef DIRECTOR_DAEMON +void load_dir_plugins(const char *plugin_dir); +void new_plugins(JCR *jcr); +void free_plugins(JCR *jcr); +int generate_plugin_event(JCR *jcr, bDirEventType event, void *value=NULL); +#endif + + +/**************************************************************************** + * * + * Plugin definitions * + * * + ****************************************************************************/ + +typedef enum { + pDirVarName = 1, + pDirVarDescription = 2 +} pDirVariable; + + +#define DIR_PLUGIN_MAGIC "*DirPluginData*" +#define DIR_PLUGIN_INTERFACE_VERSION 1 + +typedef struct s_dirpluginInfo { + uint32_t size; + uint32_t version; + const char *plugin_magic; + const char *plugin_license; + const char *plugin_author; + const char *plugin_date; + const char *plugin_version; + const char *plugin_description; +} pDirInfo; + +typedef struct s_dirpluginFuncs { + uint32_t size; + uint32_t version; + bRC (*newPlugin)(bpContext *ctx); + bRC (*freePlugin)(bpContext *ctx); + bRC (*getPluginValue)(bpContext *ctx, pDirVariable var, void *value); + bRC (*setPluginValue)(bpContext *ctx, pDirVariable var, void *value); + bRC (*handlePluginEvent)(bpContext *ctx, bDirEvent *event, void *value); +} pDirFuncs; + +#define dirplug_func(plugin) ((pDirFuncs *)(plugin->pfuncs)) +#define dirplug_info(plugin) ((pDirInfo *)(plugin->pinfo)) + +#ifdef __cplusplus +} +#endif + +#endif /* __FD_PLUGINS_H */ diff --git a/src/dird/dird.c b/src/dird/dird.c new file mode 100644 index 00000000..2b31c2ba --- /dev/null +++ b/src/dird/dird.c @@ -0,0 +1,1520 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director daemon -- this is the main program + * + * Kern Sibbald, March MM + */ + +#include "bacula.h" +#include "dird.h" +#ifndef HAVE_REGEX_H +#include "lib/bregex.h" +#else +#include +#endif +#ifdef HAVE_DIRENT_H +#include +#endif +int breaddir(DIR *dirp, POOLMEM *&d_name); + +/* Forward referenced subroutines */ +void terminate_dird(int sig); +static bool check_resources(); +static void cleanup_old_files(); +static void resize_reload(int nb); + +/* Exported subroutines */ +extern "C" void reload_config(int sig); +extern void invalidate_schedules(); +extern bool parse_dir_config(CONFIG *config, const char *configfile, int exit_code); + +/* Imported subroutines */ +JCR *wait_for_next_job(char *runjob); +void term_scheduler(); +void start_UA_server(dlist *addrs); +void stop_UA_server(); +void init_job_server(int max_workers); +void term_job_server(); +void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_level(LEX *lc, RES_ITEM *item, int index, int pass); +void store_replace(LEX *lc, RES_ITEM *item, int index, int pass); +void store_migtype(LEX *lc, RES_ITEM *item, int index, int pass); +void init_device_resources(); + + +static char *runjob = NULL; +static bool foreground = false; +static bool make_pid_file = true; /* create pid file */ +static void init_reload(void); +static CONFIG *config; +static bool test_config = false; + +/* Globals Exported */ +DIRRES *director; /* Director resource */ +int FDConnectTimeout; +int SDConnectTimeout; +char *configfile = NULL; +void *start_heap; +utime_t last_reload_time = 0; + + +/* Globals Imported */ +extern dlist client_globals; +extern dlist store_globals; +extern dlist job_globals; +extern dlist sched_globals; +extern RES_ITEM job_items[]; +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + extern URES res_all; +} +#else +extern URES res_all; +#endif + +typedef enum { + CHECK_CONNECTION, /* Check catalog connection */ + UPDATE_CATALOG, /* Ensure that catalog is ok with conf */ + UPDATE_AND_FIX /* Ensure that catalog is ok, and fix old jobs */ +} cat_op; +static bool check_catalog(cat_op mode); + +#define CONFIG_FILE "bacula-dir.conf" /* default configuration file */ + +static bool dir_sql_query(JCR *jcr, const char *cmd) +{ + if (jcr && jcr->db && jcr->db->is_connected()) { + return db_sql_query(jcr->db, cmd, NULL, NULL); + } + return false; +} + +static bool dir_sql_escape(JCR *jcr, BDB *mdb, char *snew, char *sold, int len) +{ + if (jcr && jcr->db && jcr->db->is_connected()) { + db_escape_string(jcr, mdb, snew, sold, len); + return true; + } + return false; +} + +static void usage() +{ + fprintf(stderr, _( + PROG_COPYRIGHT + "\n%sVersion: %s (%s)\n\n" + "Usage: bacula-dir [-f -s] [-c config_file] [-d debug_level] [config_file]\n" + " -c set configuration file to file\n" + " -d [,] set debug level to , debug tags to \n" + " -dt print timestamp in debug output\n" + " -T set trace on\n" + " -f run in foreground (for debugging)\n" + " -g groupid\n" + " -m print kaboom output (for debugging)\n" + " -r run now\n" + " -P do not create pid file\n" + " -s no signals\n" + " -t test - read configuration and exit\n" + " -u userid\n" + " -v verbose user messages\n" + " -? print this message.\n" + "\n"), 2000, "", VERSION, BDATE); + + exit(1); +} + +/* + * !!! WARNING !!! Use this function only when bacula is stopped. + * ie, after a fatal signal and before exiting the program + * Print information about a JCR + */ +static void dir_debug_print(JCR *jcr, FILE *fp) +{ + fprintf(fp, "\twstore=%p rstore=%p wjcr=%p client=%p reschedule_count=%d SD_msg_chan_started=%d\n", + jcr->wstore, jcr->rstore, jcr->wjcr, jcr->client, jcr->reschedule_count, (int)jcr->SD_msg_chan_started); +} + +/********************************************************************* + * + * Main Bacula Director Server program + * + */ +#if defined(HAVE_WIN32) +/* For Win32 main() is in src/win32 code ... */ +#define main BaculaMain +#endif + +/* DELETE ME when bugs in MA1512, MA1632 MA1639 are fixed */ +extern void (*MA1512_reload_job_end_cb)(JCR *,void *); +static void reload_job_end_cb(JCR *jcr, void *ctx); + +int main (int argc, char *argv[]) +{ + int ch; + JCR *jcr; + bool no_signals = false; + char *uid = NULL; + char *gid = NULL; + + /* DELETE ME when bugs in MA1512, MA1632 MA1639 are fixed */ + MA1512_reload_job_end_cb = reload_job_end_cb; + + start_heap = sbrk(0); + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + init_stack_dump(); + my_name_is(argc, argv, "bacula-dir"); + init_msg(NULL, NULL); /* initialize message handler */ + init_reload(); + daemon_start_time = time(NULL); + setup_daemon_message_queue(); + console_command = run_console_command; + + while ((ch = getopt(argc, argv, "c:d:fg:mPr:stu:v?T")) != -1) { + switch (ch) { + case 'c': /* specify config file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* set debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + char *p; + /* We probably find a tag list -d 10,sql,bvfs */ + if ((p = strchr(optarg, ',')) != NULL) { + *p = 0; + } + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + if (p) { + debug_parse_tags(p+1, &debug_level_tags); + } + } + Dmsg1(10, "Debug level = %lld\n", debug_level); + break; + + case 'T': + set_trace(true); + break; + + case 'f': /* run in foreground */ + foreground = true; + break; + + case 'g': /* set group id */ + gid = optarg; + break; + + case 'm': /* print kaboom output */ + prt_kaboom = true; + break; + + case 'P': /* no pid file */ + make_pid_file = false; + break; + + case 'r': /* run job */ + if (runjob != NULL) { + free(runjob); + } + if (optarg) { + runjob = bstrdup(optarg); + } + break; + + case 's': /* turn off signals */ + no_signals = true; + break; + + case 't': /* test config */ + test_config = true; + break; + + case 'u': /* set uid */ + uid = optarg; + break; + + case 'v': /* verbose */ + verbose++; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (argc) { + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(*argv); + argc--; + argv++; + } + if (argc) { + usage(); + } + + if (!foreground && !test_config) { + daemon_start(); + init_stack_dump(); /* grab new pid */ + } + + if (!no_signals) { + init_signals(terminate_dird); + } + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + config = New(CONFIG()); + parse_dir_config(config, configfile, M_ERROR_TERM); + + if (init_crypto() != 0) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); + } + + if (!check_resources()) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); + } + + /* The configuration is correct */ + director = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); + + if (!test_config) { + /* Create pid must come after we are a daemon -- so we have our final pid */ + if (make_pid_file) { + create_pid_file(director->pid_directory, "bacula-dir", + get_first_port_host_order(director->DIRaddrs)); + } + read_state_file(director->working_directory, "bacula-dir", + get_first_port_host_order(director->DIRaddrs)); + } + + set_jcr_in_tsd(INVALID_JCR); + set_thread_concurrency(director->MaxConcurrentJobs * 2 + + 4 /* UA */ + 5 /* sched+watchdog+jobsvr+misc */); + lmgr_init_thread(); /* initialize the lockmanager stack */ + + load_dir_plugins(director->plugin_directory); + + drop(uid, gid, false); /* reduce privileges if requested */ + + /* If we are in testing mode, we don't try to fix the catalog */ + cat_op mode=(test_config)?CHECK_CONNECTION:UPDATE_AND_FIX; + + if (!check_catalog(mode)) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); + } + + if (test_config) { + terminate_dird(0); + } + + my_name_is(0, NULL, director->name()); /* set user defined name */ + + cleanup_old_files(); + + /* Plug database interface for library routines */ + p_sql_query = (sql_query_call)dir_sql_query; + p_sql_escape = (sql_escape_call)dir_sql_escape; + + FDConnectTimeout = (int)director->FDConnectTimeout; + SDConnectTimeout = (int)director->SDConnectTimeout; + + resize_reload(director->MaxReload); + +#if !defined(HAVE_WIN32) + signal(SIGHUP, reload_config); +#endif + + init_console_msg(working_directory); + + Dmsg0(200, "Start UA server\n"); + start_UA_server(director->DIRaddrs); + + start_watchdog(); /* start network watchdog thread */ + + init_jcr_subsystem(); /* start JCR watchdogs etc. */ + + init_job_server(director->MaxConcurrentJobs); + + dbg_jcr_add_hook(dir_debug_print); /* used to director variables */ + dbg_jcr_add_hook(bdb_debug_print); /* used to debug B_DB connexion after fatal signal */ + +// init_device_resources(); + + Dmsg0(200, "wait for next job\n"); + /* Main loop -- call scheduler to get next job to run */ + while ( (jcr = wait_for_next_job(runjob)) ) { + run_job(jcr); /* run job */ + free_jcr(jcr); /* release jcr */ + set_jcr_in_tsd(INVALID_JCR); + if (runjob) { /* command line, run a single job? */ + break; /* yes, terminate */ + } + } + + terminate_dird(0); + + return 0; +} + +struct RELOAD_TABLE { + int job_count; + RES_HEAD **res_head; +}; + +static int max_reloads = 32; +static RELOAD_TABLE *reload_table=NULL; + +static void resize_reload(int nb) +{ + if (nb <= max_reloads) { + return; + } + + reload_table = (RELOAD_TABLE*)realloc(reload_table, nb * sizeof(RELOAD_TABLE)); + for (int i=max_reloads; i < nb ; i++) { + reload_table[i].job_count = 0; + reload_table[i].res_head = NULL; + } + max_reloads = nb; +} + +static void init_reload(void) +{ + reload_table = (RELOAD_TABLE*)malloc(max_reloads * sizeof(RELOAD_TABLE)); + for (int i=0; i < max_reloads; i++) { + reload_table[i].job_count = 0; + reload_table[i].res_head = NULL; + } +} + +/* + * This subroutine frees a saved resource table. + * It was saved when a new table was created with "reload" + */ +static void free_saved_resources(int table) +{ + RES *next, *res; + int num = r_last - r_first + 1; + RES_HEAD **res_tab = reload_table[table].res_head; + + if (res_tab == NULL) { + Dmsg1(100, "res_tab for table %d already released.\n", table); + return; + } + Dmsg1(100, "Freeing resources for table %d\n", table); + for (int j=0; jfirst; + for ( ; next; ) { + res = next; + next = res->res_next; + free_resource(res, r_first + j); + } + free(res_tab[j]->res_list); + free(res_tab[j]); + res_tab[j] = NULL; + } + } + free(res_tab); + reload_table[table].job_count = 0; + reload_table[table].res_head = NULL; +} + +/* + * Called here at the end of every job that was + * hooked decrementing the active job_count. When + * it goes to zero, no one is using the associated + * resource table, so free it. + */ +static void reload_job_end_cb(JCR *jcr, void *ctx) +{ + int reload_id = (int)((intptr_t)ctx); + Dmsg3(100, "reload job_end JobId=%d table=%d cnt=%d\n", jcr->JobId, + reload_id, reload_table[reload_id].job_count); + lock_jobs(); + LockRes(); + if (--reload_table[reload_id].job_count <= 0) { + free_saved_resources(reload_id); + } + UnlockRes(); + unlock_jobs(); +} + +static int find_free_reload_table_entry() +{ + int table = -1; + for (int i=0; i < max_reloads; i++) { + if (reload_table[i].res_head == NULL) { + table = i; + break; + } + } + return table; +} + +static pthread_mutex_t reload_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * If we get here, we have received a SIGHUP, which means to + * reread our configuration file. + * + * The algorithm used is as follows: we count how many jobs are + * running and mark the running jobs to make a callback on + * exiting. The old config is saved with the reload table + * id in a reload table. The new config file is read. Now, as + * each job exits, it calls back to the reload_job_end_cb(), which + * decrements the count of open jobs for the given reload table. + * When the count goes to zero, we release those resources. + * This allows us to have pointers into the resource table (from + * jobs), and once they exit and all the pointers are released, we + * release the old table. Note, if no new jobs are running since the + * last reload, then the old resources will be immediately release. + * A console is considered a job because it may have pointers to + * resources, but a SYSTEM job is not since it *should* not have any + * permanent pointers to jobs. + */ +extern "C" +void reload_config(int sig) +{ + static bool already_here = false; +#if !defined(HAVE_WIN32) + sigset_t set; +#endif + JCR *jcr; + int njobs = 0; /* number of running jobs */ + int table, rtable; + bool ok=false; + int tries=0; + + /* Wait to do the reload */ + do { + P(reload_mutex); + if (already_here) { + V(reload_mutex); + if (tries++ > 10) { + Qmsg(NULL, M_INFO, 0, _("Already doing a reload request, " + "request ignored.\n")); + return; + } + Dmsg0(10, "Already doing a reload request, waiting a bit\n"); + bmicrosleep(1, 0); + } else { + already_here = true; + V(reload_mutex); + ok = true; + } + } while (!ok); + +#if !defined(HAVE_WIN32) + sigemptyset(&set); + sigaddset(&set, SIGHUP); + sigprocmask(SIG_BLOCK, &set, NULL); +#endif + + lock_jobs(); + LockRes(); + + table = find_free_reload_table_entry(); + if (table < 0) { + Qmsg(NULL, M_ERROR, 0, _("Too many (%d) open reload requests. " + "Request ignored.\n"), max_reloads); + goto bail_out; + } + + Dmsg1(100, "Reload_config njobs=%d\n", njobs); + /* Save current res_head */ + reload_table[table].res_head = res_head; + Dmsg1(100, "Saved old config in table %d\n", table); + + /* Create a new res_head and parse into it */ + ok = parse_dir_config(config, configfile, M_ERROR); + + Dmsg0(100, "Reloaded config file\n"); + if (!ok || !check_resources() || !check_catalog(UPDATE_CATALOG)) { + /* + * We got an error, save broken point, restore old one, + * then release everything from broken pointer. + */ + rtable = find_free_reload_table_entry(); /* save new, bad table */ + if (rtable < 0) { + Qmsg(NULL, M_ERROR, 0, _("Please correct configuration file: %s\n"), configfile); + Qmsg(NULL, M_ERROR_TERM, 0, _("Out of reload table entries. Giving up.\n")); + } else { + Qmsg(NULL, M_ERROR, 0, _("Please correct configuration file: %s\n"), configfile); + Qmsg(NULL, M_ERROR, 0, _("Resetting previous configuration.\n")); + } + /* Save broken res_head pointer */ + reload_table[rtable].res_head = res_head; + + /* Now restore old resource pointer */ + res_head = reload_table[table].res_head; + table = rtable; /* release new, bad, saved table below */ + } else { + invalidate_schedules(); + + /* We know that the configuration is correct and we will keep it, + * so we can update the global pointer to the director resource. + */ + director = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); + + /* + * Hook all active jobs so that they release this table + */ + foreach_jcr(jcr) { + if (jcr->getJobType() != JT_SYSTEM) { + reload_table[table].job_count++; + job_end_push(jcr, reload_job_end_cb, (void *)((long int)table)); + njobs++; + } + } + endeach_jcr(jcr); + /* + * Now walk through globals tables and plug them into the + * new resources. + */ + CLIENT_GLOBALS *cg; + foreach_dlist(cg, &client_globals) { + CLIENT *client; + client = GetClientResWithName(cg->name); + if (!client) { + Qmsg(NULL, M_INFO, 0, _("Client=%s not found. Assuming it was removed!!!\n"), cg->name); + } else { + client->globals = cg; /* Set globals pointer */ + } + } + STORE_GLOBALS *sg; + foreach_dlist(sg, &store_globals) { + STORE *store; + store = GetStoreResWithName(sg->name); + if (!store) { + Qmsg(NULL, M_INFO, 0, _("Storage=%s not found. Assuming it was removed!!!\n"), sg->name); + } else { + store->globals = sg; /* set globals pointer */ + Dmsg2(200, "Reload found numConcurrent=%ld for Store %s\n", + sg->NumConcurrentJobs, sg->name); + } + } + JOB_GLOBALS *jg; + foreach_dlist(jg, &job_globals) { + JOB *job; + job = GetJobResWithName(jg->name); + if (!job) { + Qmsg(NULL, M_INFO, 0, _("Job=%s not found. Assuming it was removed!!!\n"), jg->name); + } else { + job->globals = jg; /* Set globals pointer */ + } + } + SCHED_GLOBALS *schg; + foreach_dlist(schg, &sched_globals) { + SCHED *sched; + sched = GetSchedResWithName(schg->name); + if (!sched) { + Qmsg(NULL, M_INFO, 0, _("Schedule=%s not found. Assuming it was removed!!!\n"), schg->name); + } else { + sched->globals = schg; /* Set globals pointer */ + } + } + } + + /* Reset other globals */ + set_working_directory(director->working_directory); + FDConnectTimeout = director->FDConnectTimeout; + SDConnectTimeout = director->SDConnectTimeout; + Dmsg0(10, "Director's configuration file reread.\n"); + + /* Now release saved resources, if no jobs using the resources */ + if (njobs == 0) { + free_saved_resources(table); + } + +bail_out: + UnlockRes(); + unlock_jobs(); +#if !defined(HAVE_WIN32) + sigprocmask(SIG_UNBLOCK, &set, NULL); + signal(SIGHUP, reload_config); +#endif + already_here = false; +} + +/* Cleanup and then exit */ +void terminate_dird(int sig) +{ + static bool already_here = false; + + if (already_here) { /* avoid recursive temination problems */ + bmicrosleep(2, 0); /* yield */ + exit(1); + } + already_here = true; + debug_level = 0; /* turn off debug */ + stop_watchdog(); + generate_daemon_event(NULL, "Exit"); + unload_plugins(); + if (!test_config) { + write_state_file(director->working_directory, "bacula-dir", get_first_port_host_order(director->DIRaddrs)); + if (make_pid_file) { + delete_pid_file(director->pid_directory, "bacula-dir", get_first_port_host_order(director->DIRaddrs)); + } + } + term_scheduler(); + term_job_server(); + if (runjob) { + free(runjob); + } + if (configfile != NULL) { + free(configfile); + } + if (chk_dbglvl(5)) { + print_memory_pool_stats(); + } + if (config) { + delete config; + config = NULL; + } + stop_UA_server(); + term_msg(); /* terminate message handler */ + cleanup_crypto(); + + free_daemon_message_queue(); + + if (reload_table) { + free(reload_table); + } + free(res_head); + res_head = NULL; + /* + * Now walk through resource globals tables and release them + */ + CLIENT_GLOBALS *cg; + foreach_dlist(cg, &client_globals) { + free(cg->name); + if (cg->SetIPaddress) { + free(cg->SetIPaddress); + } + } + client_globals.destroy(); + + STORE_GLOBALS *sg; + foreach_dlist(sg, &store_globals) { + free(sg->name); + } + store_globals.destroy(); + + JOB_GLOBALS *jg; + foreach_dlist(jg, &job_globals) { + free(jg->name); + } + job_globals.destroy(); + + close_memory_pool(); /* release free memory in pool */ + lmgr_cleanup_main(); + sm_dump(false); + exit(sig); +} + +/* + * Make a quick check to see that we have all the + * resources needed. + * + * **** FIXME **** this routine could be a lot more + * intelligent and comprehensive. + */ +static bool check_resources() +{ + bool OK = true; + JOB *job; + bool need_tls; + DIRRES *newDirector; + + LockRes(); + + job = (JOB *)GetNextRes(R_JOB, NULL); + newDirector = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); + if (!newDirector) { + Jmsg(NULL, M_FATAL, 0, _("No Director resource defined in %s\n" +"Without that I don't know who I am :-(\n"), configfile); + OK = false; + } else { + set_working_directory(newDirector->working_directory); + if (!newDirector->messages) { /* If message resource not specified */ + newDirector->messages = (MSGS *)GetNextRes(R_MSGS, NULL); + if (!newDirector->messages) { + Jmsg(NULL, M_FATAL, 0, _("No Messages resource defined in %s\n"), configfile); + OK = false; + } + } + if (GetNextRes(R_DIRECTOR, (RES *)newDirector) != NULL) { + Jmsg(NULL, M_FATAL, 0, _("Only one Director resource permitted in %s\n"), + configfile); + OK = false; + } + /* tls_require implies tls_enable */ + if (newDirector->tls_require) { + if (have_tls) { + newDirector->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + } + } + + need_tls = newDirector->tls_enable || newDirector->tls_authenticate; + + if (!newDirector->tls_certfile && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n"), + newDirector->name(), configfile); + OK = false; + } + + if (!newDirector->tls_keyfile && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Director \"%s\" in %s.\n"), + newDirector->name(), configfile); + OK = false; + } + + if ((!newDirector->tls_ca_certfile && !newDirector->tls_ca_certdir) && + need_tls && newDirector->tls_verify_peer) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\" or \"TLS CA" + " Certificate Dir\" are defined for Director \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + newDirector->name(), configfile); + OK = false; + } + + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (need_tls || newDirector->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + newDirector->tls_ctx = new_tls_context(newDirector->tls_ca_certfile, + newDirector->tls_ca_certdir, newDirector->tls_certfile, + newDirector->tls_keyfile, NULL, NULL, newDirector->tls_dhfile, + newDirector->tls_verify_peer); + + if (!newDirector->tls_ctx) { + Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS context for Director \"%s\" in %s.\n"), + newDirector->name(), configfile); + OK = false; + } + } + } + + if (!job) { + Jmsg(NULL, M_FATAL, 0, _("No Job records defined in %s\n"), configfile); + OK = false; + } + foreach_res(job, R_JOB) { + int i; + + if (job->jobdefs) { + JOB *jobdefs = job->jobdefs; + /* Handle RunScripts alists specifically */ + if (jobdefs->RunScripts) { + RUNSCRIPT *rs, *elt; + + if (!job->RunScripts) { + job->RunScripts = New(alist(10, not_owned_by_alist)); + } + + foreach_alist(rs, jobdefs->RunScripts) { + elt = copy_runscript(rs); + job->RunScripts->append(elt); /* we have to free it */ + } + } + + /* Transfer default items from JobDefs Resource */ + for (i=0; job_items[i].name; i++) { + char **def_svalue, **svalue; /* string value */ + uint32_t *def_ivalue, *ivalue; /* integer value */ + bool *def_bvalue, *bvalue; /* bool value */ + int64_t *def_lvalue, *lvalue; /* 64 bit values */ + alist **def_avalue, **avalue; /* alist values */ + uint32_t offset; + + Dmsg4(1400, "Job \"%s\", field \"%s\" bit=%d def=%d\n", + job->name(), job_items[i].name, + bit_is_set(i, job->hdr.item_present), + bit_is_set(i, job->jobdefs->hdr.item_present)); + + if (!bit_is_set(i, job->hdr.item_present) && + bit_is_set(i, job->jobdefs->hdr.item_present)) { + Dmsg2(400, "Job \"%s\", field \"%s\": getting default.\n", + job->name(), job_items[i].name); + offset = (char *)(job_items[i].value) - (char *)&res_all; + /* + * Handle strings and directory strings + */ + if (job_items[i].handler == store_str || + job_items[i].handler == store_dir) { + def_svalue = (char **)((char *)(job->jobdefs) + offset); + Dmsg5(400, "Job \"%s\", field \"%s\" def_svalue=%s item %d offset=%u\n", + job->name(), job_items[i].name, *def_svalue, i, offset); + svalue = (char **)((char *)job + offset); + if (*svalue) { + Pmsg1(000, _("Hey something is wrong. p=0x%lu\n"), *svalue); + } + *svalue = bstrdup(*def_svalue); + set_bit(i, job->hdr.item_present); + /* + * Handle resources + */ + } else if (job_items[i].handler == store_res) { + def_svalue = (char **)((char *)(job->jobdefs) + offset); + Dmsg4(400, "Job \"%s\", field \"%s\" item %d offset=%u\n", + job->name(), job_items[i].name, i, offset); + svalue = (char **)((char *)job + offset); + if (*svalue) { + Pmsg1(000, _("Hey something is wrong. p=0x%lu\n"), *svalue); + } + *svalue = *def_svalue; + set_bit(i, job->hdr.item_present); + /* + * Handle alist resources + */ + } else if (job_items[i].handler == store_alist_res) { + void *elt; + + def_avalue = (alist **)((char *)(job->jobdefs) + offset); + avalue = (alist **)((char *)job + offset); + + *avalue = New(alist(10, not_owned_by_alist)); + + foreach_alist(elt, (*def_avalue)) { + (*avalue)->append(elt); + } + set_bit(i, job->hdr.item_present); + /* + * Handle integer fields + * Note, our store_bit does not handle bitmaped fields + */ + } else if (job_items[i].handler == store_bit || + job_items[i].handler == store_pint32 || + job_items[i].handler == store_jobtype || + job_items[i].handler == store_level || + job_items[i].handler == store_int32 || + job_items[i].handler == store_size32 || + job_items[i].handler == store_migtype || + job_items[i].handler == store_replace) { + def_ivalue = (uint32_t *)((char *)(job->jobdefs) + offset); + Dmsg5(400, "Job \"%s\", field \"%s\" def_ivalue=%d item %d offset=%u\n", + job->name(), job_items[i].name, *def_ivalue, i, offset); + ivalue = (uint32_t *)((char *)job + offset); + *ivalue = *def_ivalue; + set_bit(i, job->hdr.item_present); + /* + * Handle 64 bit integer fields + */ + } else if (job_items[i].handler == store_time || + job_items[i].handler == store_size64 || + job_items[i].handler == store_speed || + job_items[i].handler == store_int64) { + def_lvalue = (int64_t *)((char *)(job->jobdefs) + offset); + Dmsg5(400, "Job \"%s\", field \"%s\" def_lvalue=%" lld " item %d offset=%u\n", + job->name(), job_items[i].name, *def_lvalue, i, offset); + lvalue = (int64_t *)((char *)job + offset); + *lvalue = *def_lvalue; + set_bit(i, job->hdr.item_present); + /* + * Handle bool fields + */ + } else if (job_items[i].handler == store_bool) { + def_bvalue = (bool *)((char *)(job->jobdefs) + offset); + Dmsg5(400, "Job \"%s\", field \"%s\" def_bvalue=%d item %d offset=%u\n", + job->name(), job_items[i].name, *def_bvalue, i, offset); + bvalue = (bool *)((char *)job + offset); + *bvalue = *def_bvalue; + set_bit(i, job->hdr.item_present); + } + } + } + } + /* + * Ensure that all required items are present + */ + for (i=0; job_items[i].name; i++) { + if (job_items[i].flags & ITEM_REQUIRED) { + if (!bit_is_set(i, job->hdr.item_present)) { + Jmsg(NULL, M_ERROR_TERM, 0, _("\"%s\" directive in Job \"%s\" resource is required, but not found.\n"), + job_items[i].name, job->name()); + OK = false; + } + } + /* If this triggers, take a look at lib/parse_conf.h */ + if (i >= MAX_RES_ITEMS) { + Emsg0(M_ERROR_TERM, 0, _("Too many items in Job resource\n")); + } + } + if (!job->storage && !job->pool->storage) { + Jmsg(NULL, M_FATAL, 0, _("No storage specified in Job \"%s\" nor in Pool.\n"), + job->name()); + OK = false; + } + + /* Make sure the job doesn't use the Scratch Pool to start with */ + const char *name; + if (!check_pool(job->JobType, job->JobLevel, + job->pool, job->next_pool, &name)) { + Jmsg(NULL, M_FATAL, 0, + _("%s \"Scratch\" not valid in Job \"%s\".\n"), + name, job->name()); + OK = false; + } + } /* End loop over Job res */ + + + /* Loop over Consoles */ + CONRES *cons; + foreach_res(cons, R_CONSOLE) { + /* tls_require implies tls_enable */ + if (cons->tls_require) { + if (have_tls) { + cons->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + + need_tls = cons->tls_enable || cons->tls_authenticate; + + if (!cons->tls_certfile && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n"), + cons->name(), configfile); + OK = false; + } + + if (!cons->tls_keyfile && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Console \"%s\" in %s.\n"), + cons->name(), configfile); + OK = false; + } + + if ((!cons->tls_ca_certfile && !cons->tls_ca_certdir) + && need_tls && cons->tls_verify_peer) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\" or \"TLS CA" + " Certificate Dir\" are defined for Console \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + cons->name(), configfile); + OK = false; + } + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (need_tls || cons->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + cons->tls_ctx = new_tls_context(cons->tls_ca_certfile, + cons->tls_ca_certdir, cons->tls_certfile, + cons->tls_keyfile, NULL, NULL, cons->tls_dhfile, cons->tls_verify_peer); + + if (!cons->tls_ctx) { + Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS context for File daemon \"%s\" in %s.\n"), + cons->name(), configfile); + OK = false; + } + } + + } + + /* Loop over Clients */ + CLIENT *client; + foreach_res(client, R_CLIENT) { + /* tls_require implies tls_enable */ + if (client->tls_require) { + if (have_tls) { + client->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + need_tls = client->tls_enable || client->tls_authenticate; + if ((!client->tls_ca_certfile && !client->tls_ca_certdir) && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for File daemon \"%s\" in %s.\n"), + client->name(), configfile); + OK = false; + } + + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (need_tls || client->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + client->tls_ctx = new_tls_context(client->tls_ca_certfile, + client->tls_ca_certdir, client->tls_certfile, + client->tls_keyfile, NULL, NULL, NULL, + true); + + if (!client->tls_ctx) { + Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS context for File daemon \"%s\" in %s.\n"), + client->name(), configfile); + OK = false; + } + } + } + + /* Loop over all pools, check PoolType */ + POOL *pool; + foreach_res(pool, R_POOL) { + if (!pool->pool_type) { + /* This case is checked by the parse engine, we should not */ + Jmsg(NULL, M_FATAL, 0, _("PoolType required in Pool resource \"%s\".\n"), pool->hdr.name); + OK = false; + continue; + } + if ((strcasecmp(pool->pool_type, NT_("backup")) != 0) && + (strcasecmp(pool->pool_type, NT_("copy")) != 0) && + (strcasecmp(pool->pool_type, NT_("cloned")) != 0) && + (strcasecmp(pool->pool_type, NT_("archive")) != 0) && + (strcasecmp(pool->pool_type, NT_("migration")) != 0) && + (strcasecmp(pool->pool_type, NT_("scratch")) != 0)) + { + Jmsg(NULL, M_FATAL, 0, _("Invalid PoolType \"%s\" in Pool resource \"%s\".\n"), pool->pool_type, pool->hdr.name); + OK = false; + } + + if (pool->NextPool && strcmp(pool->NextPool->name(), "Scratch") == 0) { + Jmsg(NULL, M_FATAL, 0, + _("NextPool \"Scratch\" not valid in Pool \"%s\".\n"), + pool->name()); + OK = false; + } + } + + UnlockRes(); + if (OK) { + close_msg(NULL); /* close temp message handler */ + init_msg(NULL, newDirector->messages); /* open daemon message handler */ + last_reload_time = time(NULL); + } + return OK; +} + +/* + * In this routine, + * - we can check the connection (mode=CHECK_CONNECTION) + * - we can synchronize the catalog with the configuration (mode=UPDATE_CATALOG) + * - we can synchronize, and fix old job records (mode=UPDATE_AND_FIX) + * - we hook up the Autochange children with the parent, and + * we hook the shared autochangers together. + */ +static bool check_catalog(cat_op mode) +{ + bool OK = true; + bool need_tls; + STORE *store, *ac_child; + + /* Loop over databases */ + CAT *catalog; + const char *BDB_db_driver = NULL; // global dbdriver from BDB class + int db_driver_len = 0; + + foreach_res(catalog, R_CATALOG) { + BDB *db; + /* + * Make sure we can open catalog, otherwise print a warning + * message because the server is probably not running. + */ + db = db_init_database(NULL, catalog->db_driver, catalog->db_name, + catalog->db_user, + catalog->db_password, catalog->db_address, + catalog->db_port, catalog->db_socket, + catalog->db_ssl_mode, catalog->db_ssl_key, + catalog->db_ssl_cert, catalog->db_ssl_ca, + catalog->db_ssl_capath, catalog->db_ssl_cipher, + catalog->mult_db_connections, + catalog->disable_batch_insert); + + /* To fill appropriate "dbdriver" field into "CAT" catalog resource class */ + + if (db) { + /* To fetch dbdriver from "BDB" Catalog DB Interface class (global) + * filled with database passed during bacula compilation + */ + BDB_db_driver = db_get_engine_name(db); + db_driver_len = strlen(BDB_db_driver); + + if (catalog->db_driver == NULL) { // dbdriver field not present in bacula director conf file + catalog->db_driver = (char *)malloc(db_driver_len + 1); + memset(catalog->db_driver, 0 , (db_driver_len + 1)); + } else { // dbdriver field present in bacula director conf file + if (strlen(catalog->db_driver) == 0) { // dbdriver field present but empty in bacula director conf file + /* warning message displayed on Console while running bacula command: + * "bacula-dir -tc", in case "dbdriver" field is empty in director + * configuration file while database argument is passed during compile time + */ + Pmsg1(000, _("Dbdriver field within director config file is empty " \ + "but Database argument \"%s\" is " \ + "passed during Bacula compilation. \n"), + BDB_db_driver); + /* warning message displayed in log file (bacula.log) while running + * bacula command: "bacula-dir -tc", in case "dbdriver" field is empty in director + * configuration file while database argument is passed during compile time + */ + Jmsg(NULL, M_WARNING, 0, _("Dbdriver field within director config file " \ + "is empty but Database argument \"%s\" is " \ + "passed during Bacula compilation. \n"), + BDB_db_driver); + + } else if (strcasecmp(catalog->db_driver, BDB_db_driver)) { // dbdriver field mismatch in bacula director conf file + /* warning message displayed on Console while running bacula command: + * "bacula-dir -tc", in case "catalog->db_driver" field doesn’t match + * with database argument during compile time + */ + Pmsg2(000, _("Dbdriver field within director config file \"%s\" " \ + "mismatched with the Database argument \"%s\" " \ + "passed during Bacula compilation. \n"), + catalog->db_driver, BDB_db_driver); + /* warning message displayed on log file (bacula.log) while running + * bacula command: "bacula-dir -tc", in case "catalog->db_driver" field + * doesn’t match with database argument during compile time + */ + Jmsg(NULL, M_WARNING, 0, _("Dbdriver field within director config file \"%s\" " \ + "mismatched with the Database argument \"%s\" " \ + "passed during Bacula compilation. \n"), + catalog->db_driver, BDB_db_driver); + } + catalog->db_driver = (char *)realloc(catalog->db_driver, (db_driver_len + 1)); + memset(catalog->db_driver, 0 , (db_driver_len + 1)); + } + if (catalog->db_driver) { + /* To copy dbdriver field into "CAT" catalog resource class (local) + * from dbdriver in "BDB" catalog DB Interface class (global) + */ + strncpy(catalog->db_driver, BDB_db_driver, db_driver_len); + } + } + + if (!db || !db_open_database(NULL, db)) { + Pmsg2(000, _("Could not open Catalog \"%s\", database \"%s\".\n"), + catalog->name(), catalog->db_name); + Jmsg(NULL, M_FATAL, 0, _("Could not open Catalog \"%s\", database \"%s\".\n"), + catalog->name(), catalog->db_name); + if (db) { + Jmsg(NULL, M_FATAL, 0, _("%s"), db_strerror(db)); + Pmsg1(000, "%s", db_strerror(db)); + db_close_database(NULL, db); + } + OK = false; + continue; + } + + /* Display a message if the db max_connections is too low */ + if (!db_check_max_connections(NULL, db, director->MaxConcurrentJobs)) { + Pmsg1(000, "Warning, settings problem for Catalog=%s\n", catalog->name()); + Pmsg1(000, "%s", db_strerror(db)); + } + + /* we are in testing mode, so don't touch anything in the catalog */ + if (mode == CHECK_CONNECTION) { + if (db) db_close_database(NULL, db); + continue; + } + + /* Loop over all pools, defining/updating them in each database */ + POOL *pool; + foreach_res(pool, R_POOL) { + /* + * If the Pool has a catalog resource create the pool only + * in that catalog. + */ + if (!pool->catalog || pool->catalog == catalog) { + create_pool(NULL, db, pool, POOL_OP_UPDATE); /* update request */ + } + } + + /* Once they are created, we can loop over them again, updating + * references (RecyclePool) + */ + foreach_res(pool, R_POOL) { + /* + * If the Pool has a catalog resource update the pool only + * in that catalog. + */ + if (!pool->catalog || pool->catalog == catalog) { + update_pool_references(NULL, db, pool); + } + } + + /* Ensure basic client record is in DB */ + CLIENT *client; + foreach_res(client, R_CLIENT) { + CLIENT_DBR cr; + /* Create clients only if they use the current catalog */ + if (client->catalog != catalog) { + Dmsg3(500, "Skip client=%s with cat=%s not catalog=%s\n", + client->name(), client->catalog->name(), catalog->name()); + continue; + } + Dmsg2(500, "create cat=%s for client=%s\n", + client->catalog->name(), client->name()); + memset(&cr, 0, sizeof(cr)); + bstrncpy(cr.Name, client->name(), sizeof(cr.Name)); + cr.AutoPrune = client->AutoPrune; + cr.FileRetention = client->FileRetention; + cr.JobRetention = client->JobRetention; + + db_create_client_record(NULL, db, &cr); + + /* If the record doesn't reflect the current settings + * we can adjust the catalog record. + */ + if (cr.AutoPrune != client->AutoPrune || + cr.JobRetention != client->JobRetention || + cr.FileRetention != client->FileRetention) + { + cr.AutoPrune = client->AutoPrune; + cr.FileRetention = client->FileRetention; + cr.JobRetention = client->JobRetention; + db_update_client_record(NULL, db, &cr); + } + } + + /* Ensure basic storage record is in DB */ + foreach_res(store, R_STORAGE) { + STORAGE_DBR sr; + MEDIATYPE_DBR mtr; + memset(&sr, 0, sizeof(sr)); + memset(&mtr, 0, sizeof(mtr)); + if (store->media_type) { + bstrncpy(mtr.MediaType, store->media_type, sizeof(mtr.MediaType)); + mtr.ReadOnly = 0; + db_create_mediatype_record(NULL, db, &mtr); + } else { + mtr.MediaTypeId = 0; + } + bstrncpy(sr.Name, store->name(), sizeof(sr.Name)); + sr.AutoChanger = store->autochanger; + if (!db_create_storage_record(NULL, db, &sr)) { + Jmsg(NULL, M_FATAL, 0, _("Could not create storage record for %s\n"), + store->name()); + OK = false; + } + store->StorageId = sr.StorageId; /* set storage Id */ + if (!sr.created) { /* if not created, update it */ + sr.AutoChanger = store->autochanger; + if (!db_update_storage_record(NULL, db, &sr)) { + Jmsg(NULL, M_FATAL, 0, _("Could not update storage record for %s\n"), + store->name()); + OK = false; + } + } + + /* tls_require implies tls_enable */ + if (store->tls_require) { + if (have_tls) { + store->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + } + } + + need_tls = store->tls_enable || store->tls_authenticate; + + if ((!store->tls_ca_certfile && !store->tls_ca_certdir) && need_tls) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Storage \"%s\" in %s.\n"), + store->name(), configfile); + OK = false; + } + + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (need_tls || store->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + store->tls_ctx = new_tls_context(store->tls_ca_certfile, + store->tls_ca_certdir, store->tls_certfile, + store->tls_keyfile, NULL, NULL, NULL, true); + + if (!store->tls_ctx) { + Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS context for Storage \"%s\" in %s.\n"), + store->name(), configfile); + OK = false; + } + } + } + + /* Link up all the children for each changer */ + foreach_res(store, R_STORAGE) { + char sid[50]; + if (store->changer == store) { /* we are a real Autochanger */ + store->ac_group = get_pool_memory(PM_FNAME); + store->ac_group[0] = 0; + pm_strcat(store->ac_group, edit_int64(store->StorageId, sid)); + /* Now look for children who point to this storage */ + foreach_res(ac_child, R_STORAGE) { + if (ac_child != store && ac_child->changer == store) { + /* Found a child -- add StorageId */ + pm_strcat(store->ac_group, ","); + pm_strcat(store->ac_group, edit_int64(ac_child->StorageId, sid)); + } + } + } + } + + /* Link up all the shared storage devices */ + foreach_res(store, R_STORAGE) { + if (store->ac_group) { /* we are a real Autochanger */ + /* Now look for Shared Storage who point to this storage */ + foreach_res(ac_child, R_STORAGE) { + if (ac_child->shared_storage == store && ac_child->ac_group && + ac_child->shared_storage != ac_child) { + pm_strcat(store->ac_group, ","); + pm_strcat(store->ac_group, ac_child->ac_group); + } + } + } + } + + /* Loop over all counters, defining them in each database */ + /* Set default value in all counters */ + COUNTER *counter; + foreach_res(counter, R_COUNTER) { + /* Write to catalog? */ + if (!counter->created && counter->Catalog == catalog) { + COUNTER_DBR cr; + bstrncpy(cr.Counter, counter->name(), sizeof(cr.Counter)); + cr.MinValue = counter->MinValue; + cr.MaxValue = counter->MaxValue; + cr.CurrentValue = counter->MinValue; + if (counter->WrapCounter) { + bstrncpy(cr.WrapCounter, counter->WrapCounter->name(), sizeof(cr.WrapCounter)); + } else { + cr.WrapCounter[0] = 0; /* empty string */ + } + if (db_create_counter_record(NULL, db, &cr)) { + counter->CurrentValue = cr.CurrentValue; + counter->created = true; + Dmsg2(100, "Create counter %s val=%d\n", counter->name(), counter->CurrentValue); + } + } + if (!counter->created) { + counter->CurrentValue = counter->MinValue; /* default value */ + } + } + /* cleanup old job records */ + if (mode == UPDATE_AND_FIX) { + db_sql_query(db, cleanup_created_job, NULL, NULL); + db_sql_query(db, cleanup_running_job, NULL, NULL); + } + + /* Set SQL engine name in global for debugging */ + set_db_engine_name(db_get_engine_name(db)); + if (db) db_close_database(NULL, db); + } + return OK; +} + +static void cleanup_old_files() +{ + DIR* dp; + int rc, name_max; + int my_name_len = strlen(my_name); + int len = strlen(director->working_directory); + POOL_MEM dname(PM_FNAME); + POOLMEM *cleanup = get_pool_memory(PM_MESSAGE); + POOLMEM *basename = get_pool_memory(PM_MESSAGE); + regex_t preg1; + char prbuf[500]; + const int nmatch = 30; + regmatch_t pmatch[nmatch]; + + /* Exclude spaces and look for .mail, .tmp or .restore.xx.bsr files */ + const char *pat1 = "^[^ ]+\\.(restore\\.[^ ]+\\.bsr|mail|tmp)$"; + + /* Setup working directory prefix */ + pm_strcpy(basename, director->working_directory); + if (len > 0 && !IsPathSeparator(director->working_directory[len-1])) { + pm_strcat(basename, "/"); + } + + /* Compile regex expressions */ + rc = regcomp(&preg1, pat1, REG_EXTENDED); + if (rc != 0) { + regerror(rc, &preg1, prbuf, sizeof(prbuf)); + Pmsg2(000, _("Could not compile regex pattern \"%s\" ERR=%s\n"), + pat1, prbuf); + goto get_out2; + } + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + if (!(dp = opendir(director->working_directory))) { + berrno be; + Pmsg2(000, "Failed to open working dir %s for cleanup: ERR=%s\n", + director->working_directory, be.bstrerror()); + goto get_out1; + } + + while (1) { + if (breaddir(dp, dname.addr()) != 0) { + break; + } + /* Exclude any name with ., .., not my_name or containing a space */ + if (strcmp(dname.c_str(), ".") == 0 || strcmp(dname.c_str(), "..") == 0 || + strncmp(dname.c_str(), my_name, my_name_len) != 0) { + Dmsg1(500, "Skipped: %s\n", dname.c_str()); + continue; + } + + /* Unlink files that match regexes */ + if (regexec(&preg1, dname.c_str(), nmatch, pmatch, 0) == 0) { + pm_strcpy(cleanup, basename); + pm_strcat(cleanup, dname); + Dmsg1(100, "Unlink: %s\n", cleanup); + unlink(cleanup); + } + } + + closedir(dp); +/* Be careful to free up the correct resources */ +get_out1: + regfree(&preg1); +get_out2: + free_pool_memory(cleanup); + free_pool_memory(basename); +} diff --git a/src/dird/dird.h b/src/dird/dird.h new file mode 100644 index 00000000..028ef049 --- /dev/null +++ b/src/dird/dird.h @@ -0,0 +1,83 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Includes specific to the Director + * + * Kern Sibbald, December MM + */ + +#include "lib/ini.h" +#include "lib/runscript.h" +#include "lib/breg.h" +#include "dird_conf.h" + +#define DIRECTOR_DAEMON 1 + +#include "dir_plugins.h" +#include "cats/cats.h" + +#include "jcr.h" +#include "bsr.h" +#include "ua.h" +#include "jobq.h" + +/* Globals that dird.c exports */ +extern DIRRES *director; /* Director resource */ +extern int FDConnectTimeout; +extern int SDConnectTimeout; + +/* Used in ua_prune.c and ua_purge.c */ + +struct s_count_ctx { + int count; +}; + +#define MAX_DEL_LIST_LEN 2000000 + +struct del_ctx { + JobId_t *JobId; /* array of JobIds */ + char *PurgedFiles; /* Array of PurgedFile flags */ + int num_ids; /* ids stored */ + int max_ids; /* size of array */ + int num_del; /* number deleted */ + int tot_ids; /* total to process */ +}; + +/* Flags for find_next_volume_for_append() */ +enum { + fnv_create_vol = true, + fnv_no_create_vol = false, + fnv_prune = true, + fnv_no_prune = false +}; + +typedef struct { + char *plugin_name; + POOLMEM *content; +} plugin_config_item; + +struct idpkt { + POOLMEM *list; + uint32_t count; +}; + +void free_plugin_config_item(plugin_config_item *lst); +void free_plugin_config_items(alist *lst); + +#include "protos.h" diff --git a/src/dird/dird_conf.c b/src/dird/dird_conf.c new file mode 100644 index 00000000..23fa07ac --- /dev/null +++ b/src/dird/dird_conf.c @@ -0,0 +1,2578 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main configuration file parser for Bacula Directors, + * some parts may be split into separate files such as + * the schedule configuration (run_config.c). + * + * Note, the configuration file parser consists of three parts + * + * 1. The generic lexical scanner in lib/lex.c and lib/lex.h + * + * 2. The generic config scanner in lib/parse_config.c and + * lib/parse_config.h. + * These files contain the parser code, some utility + * routines, and the common store routines (name, int, + * string). + * + * 3. The daemon specific file, which contains the Resource + * definitions as well as any specific store routines + * for the resource records. + * + * Kern Sibbald, January MM + */ + + +#include "bacula.h" +#include "dird.h" + +/* Define the first and last resource ID record + * types. Note, these should be unique for each + * daemon though not a requirement. + */ +int32_t r_first = R_FIRST; +int32_t r_last = R_LAST; +RES_HEAD **res_head; + +static pthread_mutex_t globals_mutex = PTHREAD_MUTEX_INITIALIZER; +dlist client_globals; +dlist job_globals; +dlist store_globals; +dlist sched_globals; + + +/* Imported subroutines */ +extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass); +extern void store_finc(LEX *lc, RES_ITEM *item, int index, int pass); +extern void store_inc(LEX *lc, RES_ITEM *item, int index, int pass); + + +/* Forward referenced subroutines */ + +void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_level(LEX *lc, RES_ITEM *item, int index, int pass); +void store_replace(LEX *lc, RES_ITEM *item, int index, int pass); +void store_acl(LEX *lc, RES_ITEM *item, int index, int pass); +void store_migtype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_ac_res(LEX *lc, RES_ITEM *item, int index, int pass); +void store_device(LEX *lc, RES_ITEM *item, int index, int pass); +void store_actiononpurge(LEX *lc, RES_ITEM *item, int index, int pass); +static void store_runscript_when(LEX *lc, RES_ITEM *item, int index, int pass); +static void store_runscript_cmd(LEX *lc, RES_ITEM *item, int index, int pass); +static void store_short_runscript(LEX *lc, RES_ITEM *item, int index, int pass); + +/* We build the current resource here as we are + * scanning the resource configuration definition, + * then move it to allocated memory when the resource + * scan is complete. + */ +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + URES res_all; +} +#else +URES res_all; +#endif +int32_t res_all_size = sizeof(res_all); + +/* Implementation of certain classes */ + +void CLIENT::create_client_globals() +{ + globals = (CLIENT_GLOBALS *)malloc(sizeof(CLIENT_GLOBALS)); + memset(globals, 0, sizeof(CLIENT_GLOBALS)); + globals->name = bstrdup(name()); + globals->enabled = -1; /* Not set */ + client_globals.append(globals); +} + +int32_t CLIENT::getNumConcurrentJobs() +{ + if (!globals) { + return 0; + } + return globals->NumConcurrentJobs; +} + +void CLIENT::setNumConcurrentJobs(int32_t num) +{ + P(globals_mutex); + if (!globals) { + create_client_globals(); + } + globals->NumConcurrentJobs = num; + V(globals_mutex); + ASSERT(num >= 0); + Dmsg2(200, "Set NumConcurrentJobs=%ld for Client %s\n", + num, globals->name); +} + +char *CLIENT::address(POOLMEM *&buf) +{ + P(globals_mutex); + if (!globals || !globals->SetIPaddress) { + pm_strcpy(buf, client_address); + + } else { + pm_strcpy(buf, globals->SetIPaddress); + } + V(globals_mutex); + return buf; +} + +void CLIENT::setAddress(char *addr) +{ + P(globals_mutex); + if (!globals) { + create_client_globals(); + } + if (globals->SetIPaddress) { + free(globals->SetIPaddress); + } + globals->SetIPaddress = bstrdup(addr); + V(globals_mutex); +} + +bool CLIENT::is_enabled() +{ + if (!globals || globals->enabled < 0) { + return Enabled; + } + return globals->enabled; +} + +void CLIENT::setEnabled(bool val) +{ + P(globals_mutex); + if (!globals) { + create_client_globals(); + } + /* TODO: We probably need to set -1 (not set) when we are back to the default value */ + globals->enabled = val? 1 : 0; + V(globals_mutex); + Dmsg2(200, "Set Enabled=%d for Client %s\n", + val, globals->name); +} + +void JOB::create_job_globals() +{ + globals = (JOB_GLOBALS *)malloc(sizeof(JOB_GLOBALS)); + memset(globals, 0, sizeof(JOB_GLOBALS)); + globals->name = bstrdup(name()); + globals->enabled = -1; /* Not set */ + job_globals.append(globals); +} + +int32_t JOB::getNumConcurrentJobs() +{ + if (!globals) { + return 0; + } + return globals->NumConcurrentJobs; +} + +void JOB::setNumConcurrentJobs(int32_t num) +{ + P(globals_mutex); + if (!globals) { + create_job_globals(); + } + globals->NumConcurrentJobs = num; + V(globals_mutex); + ASSERT(num >= 0); + Dmsg2(200, "Set NumConcurrentJobs=%ld for Job %s\n", + num, globals->name); +} + +bool JOB::is_enabled() +{ + if (!globals || globals->enabled < 0) { + return Enabled; + } + return globals->enabled; +} + +void JOB::setEnabled(bool val) +{ + P(globals_mutex); + if (!globals) { + create_job_globals(); + } + globals->enabled = val ? 1 : 0; + V(globals_mutex); + Dmsg2(200, "Set Enabled=%d for Job %s\n", + val, globals->name); +} + +void STORE::create_store_globals() +{ + globals = (STORE_GLOBALS *)malloc(sizeof(STORE_GLOBALS)); + memset(globals, 0, sizeof(STORE_GLOBALS)); + globals->name = bstrdup(name()); + globals->enabled = -1; /* Not set */ + store_globals.append(globals); +} + +int32_t STORE::getNumConcurrentReadJobs() +{ + if (!globals) { + return 0; + } + return globals->NumConcurrentReadJobs; +} + +void STORE::setNumConcurrentReadJobs(int32_t num) +{ + P(globals_mutex); + if (!globals) { + create_store_globals(); + } + globals->NumConcurrentReadJobs = num; + V(globals_mutex); + Dmsg2(200, "Set NumConcurrentReadJobs=%ld for Store %s\n", + num, globals->name); + ASSERT(num >= 0); +} + +int32_t STORE::getNumConcurrentJobs() +{ + if (!globals) { + return 0; + } + return globals->NumConcurrentJobs; +} + +void STORE::setNumConcurrentJobs(int32_t num) +{ + P(globals_mutex); + if (!globals) { + create_store_globals(); + } + globals->NumConcurrentJobs = num; + V(globals_mutex); + Dmsg2(200, "Set numconcurrentJobs=%ld for Store %s\n", + num, globals->name); + ASSERT(num >= 0); +} + +bool STORE::is_enabled() +{ + if (!globals || globals->enabled < 0) { + return Enabled; + } + return globals->enabled; +} + +void STORE::setEnabled(bool val) +{ + P(globals_mutex); + if (!globals) { + create_store_globals(); + } + globals->enabled = val ? 1 : 0; + V(globals_mutex); + Dmsg2(200, "Set Enabled=%d for Storage %s\n", + val, globals->name); +} + +void SCHED::create_sched_globals() +{ + globals = (SCHED_GLOBALS *)malloc(sizeof(CLIENT_GLOBALS)); + memset(globals, 0, sizeof(SCHED_GLOBALS)); + globals->name = bstrdup(name()); + globals->enabled = -1; /* Not set */ + sched_globals.append(globals); +} + +bool SCHED::is_enabled() +{ + if (!globals || globals->enabled < 0) { + return Enabled; + } + return globals->enabled; +} + +void SCHED::setEnabled(bool val) +{ + P(globals_mutex); + if (!globals) { + create_sched_globals(); + } + globals->enabled = val ? 1 : 0; + V(globals_mutex); + Dmsg2(200, "Set Enabled=%d for Schedule %s\n", + val, globals->name); +} + +/* + * Definition of records permitted within each + * resource with the routine to process the record + * information. NOTE! quoted names must be in lower case. + */ +/* + * Director Resource + * + * name handler value code flags default_value + */ +static RES_ITEM dir_items[] = { + {"Name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, + {"Messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0}, + {"DirPort", store_addresses_port, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101}, + {"DirAddress", store_addresses_address, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101}, + {"DirAddresses",store_addresses, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101}, + {"DirSourceAddress",store_addresses_address, ITEM(res_dir.DIRsrc_addr), 0, ITEM_DEFAULT, 0}, + {"QueryFile", store_dir, ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0}, + {"WorkingDirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0}, + {"PluginDirectory", store_dir, ITEM(res_dir.plugin_directory), 0, 0, 0}, + {"ScriptsDirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, 0}, + {"PidDirectory", store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0}, + {"SubsysDirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, 0}, + {"MaximumConcurrentJobs", store_pint32, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 20}, + {"MaximumReloadRequests", store_pint32, ITEM(res_dir.MaxReload), 0, ITEM_DEFAULT, 32}, + {"MaximumConsoleConnections", store_pint32, ITEM(res_dir.MaxConsoleConnect), 0, ITEM_DEFAULT, 20}, + {"Password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0}, + {"FdConnectTimeout", store_time,ITEM(res_dir.FDConnectTimeout), 0, ITEM_DEFAULT, 3 * 60}, + {"SdConnectTimeout", store_time,ITEM(res_dir.SDConnectTimeout), 0, ITEM_DEFAULT, 30 * 60}, + {"HeartbeatInterval", store_time, ITEM(res_dir.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, + {"TlsAuthenticate", store_bool, ITEM(res_dir.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_dir.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_dir.tls_require), 0, 0, 0}, + {"TlsVerifyPeer", store_bool, ITEM(res_dir.tls_verify_peer), 0, ITEM_DEFAULT, true}, + {"TlsCaCertificateFile", store_dir, ITEM(res_dir.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_dir.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_dir.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_dir.tls_keyfile), 0, 0, 0}, + {"TlsDhFile", store_dir, ITEM(res_dir.tls_dhfile), 0, 0, 0}, + {"TlsAllowedCn", store_alist_str, ITEM(res_dir.tls_allowed_cns), 0, 0, 0}, + {"StatisticsRetention", store_time, ITEM(res_dir.stats_retention), 0, ITEM_DEFAULT, 60*60*24*31*12*5}, + {"VerId", store_str, ITEM(res_dir.verid), 0, 0, 0}, + {"CommCompression", store_bool, ITEM(res_dir.comm_compression), 0, ITEM_DEFAULT, true}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* + * Console Resource + * + * name handler value code flags default_value + */ +static RES_ITEM con_items[] = { + {"Name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0}, + {"Password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0}, + {"JobAcl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0}, + {"ClientAcl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0}, + {"StorageAcl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0}, + {"ScheduleAcl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0}, + {"RunAcl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0}, + {"PoolAcl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0}, + {"CommandAcl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0}, + {"FilesetAcl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0}, + {"CatalogAcl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0}, + {"WhereAcl", store_acl, ITEM(res_con.ACL_lists), Where_ACL, 0, 0}, + {"RestoreClientAcl", store_acl, ITEM(res_con.ACL_lists), RestoreClient_ACL, 0, 0}, + {"BackupClientAcl", store_acl, ITEM(res_con.ACL_lists), BackupClient_ACL, 0, 0}, + {"PluginOptionsAcl", store_acl, ITEM(res_con.ACL_lists), PluginOptions_ACL, 0, 0}, + {"DirectoryAcl", store_acl, ITEM(res_con.ACL_lists), Directory_ACL, 0, 0}, + {"TlsAuthenticate", store_bool, ITEM(res_con.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_con.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_con.tls_require), 0, 0, 0}, + {"TlsVerifyPeer", store_bool, ITEM(res_con.tls_verify_peer), 0, ITEM_DEFAULT, true}, + {"TlsCaCertificateFile", store_dir, ITEM(res_con.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_con.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_con.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_con.tls_keyfile), 0, 0, 0}, + {"TlsDhFile", store_dir, ITEM(res_con.tls_dhfile), 0, 0, 0}, + {"TlsAllowedCn", store_alist_str, ITEM(res_con.tls_allowed_cns), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + + +/* + * Client or File daemon resource + * + * name handler value code flags default_value + */ + +static RES_ITEM cli_items[] = { + {"Name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0}, + {"fdaddress", store_str, ITEM(res_client.client_address), 0, 0, 0}, + {"Address", store_str, ITEM(res_client.client_address), 0, ITEM_REQUIRED, 0}, + {"FdPort", store_pint32, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102}, + {"fdpassword", store_password, ITEM(res_client.password), 0, 0, 0}, + {"Password", store_password, ITEM(res_client.password), 0, ITEM_REQUIRED, 0}, + {"FdStorageAddress", store_str, ITEM(res_client.fd_storage_address), 0, 0, 0}, + {"Catalog", store_res, ITEM(res_client.catalog), R_CATALOG, ITEM_REQUIRED, 0}, + {"FileRetention", store_time, ITEM(res_client.FileRetention), 0, ITEM_DEFAULT, 60*60*24*60}, + {"JobRetention", store_time, ITEM(res_client.JobRetention), 0, ITEM_DEFAULT, 60*60*24*180}, + {"HeartbeatInterval", store_time, ITEM(res_client.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, + {"AutoPrune", store_bool, ITEM(res_client.AutoPrune), 0, ITEM_DEFAULT, true}, + {"SDCallsClient", store_bool, ITEM(res_client.sd_calls_client), 0, ITEM_DEFAULT, false}, + {"SnapshotRetention", store_time, ITEM(res_client.SnapRetention), 0, ITEM_DEFAULT, 0}, + {"MaximumConcurrentJobs", store_pint32, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, + {"TlsAuthenticate", store_bool, ITEM(res_client.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_client.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_client.tls_require), 0, 0, 0}, + {"TlsCaCertificateFile", store_dir, ITEM(res_client.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_client.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_client.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_client.tls_keyfile), 0, 0, 0}, + {"TlsAllowedCn", store_alist_str, ITEM(res_client.tls_allowed_cns), 0, 0, 0}, + {"MaximumBandwidthPerJob", store_speed, ITEM(res_client.max_bandwidth), 0, 0, 0}, + {"Enabled", store_bool, ITEM(res_client.Enabled), 0, ITEM_DEFAULT, true}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Storage daemon resource + * + * name handler value code flags default_value + */ +static RES_ITEM store_items[] = { + {"Name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0}, + {"SdPort", store_pint32, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103}, + {"sdaddress", store_str, ITEM(res_store.address), 0, 0, 0}, + {"Address", store_str, ITEM(res_store.address), 0, ITEM_REQUIRED, 0}, + {"FdStorageAddress", store_str, ITEM(res_store.fd_storage_address), 0, 0, 0}, + {"sdpassword", store_password, ITEM(res_store.password), 0, 0, 0}, + {"Password", store_password, ITEM(res_store.password), 0, ITEM_REQUIRED, 0}, + {"Device", store_device, ITEM(res_store.device), R_DEVICE, ITEM_REQUIRED, 0}, + {"MediaType", store_strname, ITEM(res_store.media_type), 0, ITEM_REQUIRED, 0}, + /* _bool, + * Big kludge, these two autochanger definitions must be in + * this order and together. + */ + {"autochanger", store_ac_res, ITEM(res_store.changer), 0, ITEM_DEFAULT, 0}, + {"autochanger", store_bool, ITEM(res_store.autochanger), 0, ITEM_DEFAULT, false}, + {"SharedStorage", store_ac_res, ITEM(res_store.shared_storage), 1, ITEM_DEFAULT, 0}, + {"Enabled", store_bool, ITEM(res_store.Enabled), 0, ITEM_DEFAULT, true}, + {"AllowCompression", store_bool, ITEM(res_store.AllowCompress), 0, ITEM_DEFAULT, true}, + {"HeartbeatInterval", store_time, ITEM(res_store.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, + {"MaximumConcurrentJobs", store_pint32, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, + {"MaximumConcurrentReadjobs", store_pint32, ITEM(res_store.MaxConcurrentReadJobs), 0, ITEM_DEFAULT, 0}, + {"sddport", store_pint32, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */ + {"TlsAuthenticate", store_bool, ITEM(res_store.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_store.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_store.tls_require), 0, 0, 0}, + {"TlsCaCertificateFile", store_dir, ITEM(res_store.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_store.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_store.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_store.tls_keyfile), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* + * Catalog Resource Directives + * + * name handler value code flags default_value + */ +static RES_ITEM cat_items[] = { + {"Name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0}, + {"dbaddress", store_str, ITEM(res_cat.db_address), 0, 0, 0}, + {"Address", store_str, ITEM(res_cat.db_address), 0, 0, 0}, + {"DbPort", store_pint32, ITEM(res_cat.db_port), 0, 0, 0}, + /* keep this password as store_str for the moment */ + {"dbpassword", store_str, ITEM(res_cat.db_password), 0, 0, 0}, + {"Password", store_str, ITEM(res_cat.db_password), 0, 0, 0}, + {"dbuser", store_str, ITEM(res_cat.db_user), 0, 0, 0}, + {"User", store_str, ITEM(res_cat.db_user), 0, 0, 0}, + {"DbName", store_str, ITEM(res_cat.db_name), 0, ITEM_REQUIRED, 0}, + {"dbdriver", store_str, ITEM(res_cat.db_driver), 0, 0, 0}, + {"DbSocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0}, + {"dbsslmode", store_str, ITEM(res_cat.db_ssl_mode), 0, 0, 0}, + {"dbsslkey", store_str, ITEM(res_cat.db_ssl_key), 0, 0, 0}, + {"dbsslcert", store_str, ITEM(res_cat.db_ssl_cert), 0, 0, 0}, + {"dbsslca", store_str, ITEM(res_cat.db_ssl_ca), 0, 0, 0}, + {"dbsslcapath", store_str, ITEM(res_cat.db_ssl_capath), 0, 0, 0}, + {"DbSocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0}, + /* Turned off for the moment */ + {"MultipleConnections", store_bit, ITEM(res_cat.mult_db_connections), 0, 0, 0}, + {"DisableBatchInsert", store_bool, ITEM(res_cat.disable_batch_insert), 0, ITEM_DEFAULT, false}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* + * Job Resource Directives + * + * name handler value code flags default_value + */ +RES_ITEM job_items[] = { + {"Name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0}, + {"Type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0}, + {"Level", store_level, ITEM(res_job.JobLevel), 0, 0, 0}, + {"Messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0}, + {"Storage", store_alist_res, ITEM(res_job.storage), R_STORAGE, 0, 0}, + {"Pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0}, + {"NextPool", store_res, ITEM(res_job.next_pool), R_POOL, 0, 0}, + {"FullBackupPool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0}, + {"VirtualFullBackupPool", store_res, ITEM(res_job.vfull_pool), R_POOL, 0, 0}, + {"IncrementalBackupPool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0}, + {"DifferentialBackupPool", store_res, ITEM(res_job.diff_pool), R_POOL, 0, 0}, + {"Client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0}, + {"Fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0}, + {"Schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0}, + {"VerifyJob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0}, + {"JobToVerify", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0}, + {"JobDefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0}, + {"Run", store_alist_str, ITEM(res_job.run_cmds), 0, 0, 0}, + /* Root of where to restore files */ + {"Where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0}, + {"RegexWhere", store_str, ITEM(res_job.RegexWhere), 0, 0, 0}, + {"StripPrefix", store_str, ITEM(res_job.strip_prefix), 0, 0, 0}, + {"AddPrefix", store_str, ITEM(res_job.add_prefix), 0, 0, 0}, + {"AddSuffix", store_str, ITEM(res_job.add_suffix), 0, 0, 0}, + /* Where to find bootstrap during restore */ + {"Bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0}, + {"RestoreClient", store_str, ITEM(res_job.RestoreClient), 0, 0, 0}, + /* Where to write bootstrap file during backup */ + {"WriteBootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0}, + {"WriteVerifyList",store_dir,ITEM(res_job.WriteVerifyList), 0, 0, 0}, + {"Replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS}, + {"MaximumBandwidth", store_speed, ITEM(res_job.max_bandwidth), 0, 0, 0}, + {"MaxRunSchedTime", store_time, ITEM(res_job.MaxRunSchedTime), 0, 0, 0}, + {"MaxRunTime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0}, + /* xxxMaxWaitTime are deprecated */ + {"fullmaxwaittime", store_time, ITEM(res_job.FullMaxRunTime), 0, 0, 0}, + {"incrementalmaxwaittime", store_time, ITEM(res_job.IncMaxRunTime), 0, 0, 0}, + {"differentialmaxwaittime", store_time, ITEM(res_job.DiffMaxRunTime), 0, 0, 0}, + {"FullMaxRunTime", store_time, ITEM(res_job.FullMaxRunTime), 0, 0, 0}, + {"IncrementalMaxRunTime", store_time, ITEM(res_job.IncMaxRunTime), 0, 0, 0}, + {"DifferentialMaxRunTime", store_time, ITEM(res_job.DiffMaxRunTime), 0, 0, 0}, + {"MaxWaitTime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0}, + {"MaxStartDelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0}, + {"MaxFullInterval", store_time, ITEM(res_job.MaxFullInterval), 0, 0, 0}, + {"MaxVirtualFullInterval", store_time, ITEM(res_job.MaxVirtualFullInterval), 0, 0, 0}, + {"MaxDiffInterval", store_time, ITEM(res_job.MaxDiffInterval), 0, 0, 0}, + {"PrefixLinks", store_bool, ITEM(res_job.PrefixLinks), 0, ITEM_DEFAULT, false}, + {"PruneJobs", store_bool, ITEM(res_job.PruneJobs), 0, ITEM_DEFAULT, false}, + {"PruneFiles", store_bool, ITEM(res_job.PruneFiles), 0, ITEM_DEFAULT, false}, + {"PruneVolumes",store_bool, ITEM(res_job.PruneVolumes), 0, ITEM_DEFAULT, false}, + {"PurgeMigrationJob", store_bool, ITEM(res_job.PurgeMigrateJob), 0, ITEM_DEFAULT, false}, + {"Enabled", store_bool, ITEM(res_job.Enabled), 0, ITEM_DEFAULT, true}, + {"SnapshotRetention", store_time, ITEM(res_job.SnapRetention), 0, ITEM_DEFAULT, 0}, + {"SpoolAttributes",store_bool, ITEM(res_job.SpoolAttributes), 0, ITEM_DEFAULT, true}, + {"SpoolData", store_bool, ITEM(res_job.spool_data), 0, ITEM_DEFAULT, false}, + {"SpoolSize", store_size64, ITEM(res_job.spool_size), 0, 0, 0}, + {"ReRunFailedLevels", store_bool, ITEM(res_job.rerun_failed_levels), 0, ITEM_DEFAULT, false}, + {"PreferMountedVolumes", store_bool, ITEM(res_job.PreferMountedVolumes), 0, ITEM_DEFAULT, true}, + /* + * JSON tools skip Directive in lowercase. They are deprecated or + * are synonym with an other one that follows. Like User and dbuser. + */ + {"runbeforejob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, + {"runafterjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, + {"runafterfailedjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, + {"clientrunbeforejob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, + {"clientrunafterjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, + {"consolerunbeforejob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, + {"consolerunafterjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, + {"Runscript", store_runscript, ITEM(res_job.RunScripts), 0, ITEM_NO_EQUALS, 0}, + {"MaximumConcurrentJobs", store_pint32, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, + {"MaximumSpawnedJobs", store_pint32, ITEM(res_job.MaxSpawnedJobs), 0, ITEM_DEFAULT, 600}, + {"RescheduleOnError", store_bool, ITEM(res_job.RescheduleOnError), 0, ITEM_DEFAULT, false}, + {"RescheduleIncompleteJobs", store_bool, ITEM(res_job.RescheduleIncompleteJobs), 0, ITEM_DEFAULT, true}, + {"RescheduleInterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30}, + {"RescheduleTimes", store_pint32, ITEM(res_job.RescheduleTimes), 0, 0, 0}, + {"Priority", store_pint32, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10}, + {"BackupsToKeep", store_pint32, ITEM(res_job.BackupsToKeep), 0, ITEM_DEFAULT, 0}, + {"AllowMixedPriority", store_bool, ITEM(res_job.allow_mixed_priority), 0, ITEM_DEFAULT, false}, + {"WritePartAfterJob", store_bool, ITEM(res_job.write_part_after_job), 0, ITEM_DEFAULT, true}, + {"SelectionPattern", store_str, ITEM(res_job.selection_pattern), 0, 0, 0}, + {"SelectionType", store_migtype, ITEM(res_job.selection_type), 0, 0, 0}, + {"Accurate", store_bool, ITEM(res_job.accurate), 0,0,0}, + {"AllowDuplicateJobs", store_bool, ITEM(res_job.AllowDuplicateJobs), 0, ITEM_DEFAULT, true}, + {"allowhigherduplicates", store_bool, ITEM(res_job.AllowHigherDuplicates), 0, ITEM_DEFAULT, true}, + {"CancelLowerLevelDuplicates", store_bool, ITEM(res_job.CancelLowerLevelDuplicates), 0, ITEM_DEFAULT, false}, + {"CancelQueuedDuplicates", store_bool, ITEM(res_job.CancelQueuedDuplicates), 0, ITEM_DEFAULT, false}, + {"CancelRunningDuplicates", store_bool, ITEM(res_job.CancelRunningDuplicates), 0, ITEM_DEFAULT, false}, + {"DeleteConsolidatedJobs", store_bool, ITEM(res_job.DeleteConsolidatedJobs), 0, ITEM_DEFAULT, false}, + {"PluginOptions", store_str, ITEM(res_job.PluginOptions), 0, 0, 0}, + {"Base", store_alist_res, ITEM(res_job.base), R_JOB, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Fileset resource + * + * Name handler value code flags default_value + */ +static RES_ITEM fs_items[] = { + {"Name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0}, + {"IgnoreFilesetChanges", store_bool, ITEM(res_fs.ignore_fs_changes), 0, ITEM_DEFAULT, false}, + {"EnableVss", store_bool, ITEM(res_fs.enable_vss), 0, ITEM_DEFAULT, true}, + {"EnableSnapshot",store_bool, ITEM(res_fs.enable_snapshot), 0, ITEM_DEFAULT, false}, + {"Include", store_inc, {0}, 0, ITEM_NO_EQUALS, 0}, + {"Exclude", store_inc, {0}, 1, ITEM_NO_EQUALS, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Schedule -- see run_conf.c */ +/* Schedule + * + * name handler value code flags default_value + */ +static RES_ITEM sch_items[] = { + {"Name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0}, + {"Run", store_run, ITEM(res_sch.run), 0, 0, 0}, + {"Enabled", store_bool, ITEM(res_sch.Enabled), 0, ITEM_DEFAULT, true}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Pool resource + * + * name handler value code flags default_value + */ +static RES_ITEM pool_items[] = { + {"Name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0}, + {"PoolType", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0}, + {"LabelFormat", store_strname, ITEM(res_pool.label_format), 0, 0, 0}, + {"LabelType", store_label, ITEM(res_pool.LabelType), 0, 0, 0}, + {"CleaningPrefix", store_strname, ITEM(res_pool.cleaning_prefix), 0, 0, 0}, + {"UseCatalog", store_bool, ITEM(res_pool.use_catalog), 0, ITEM_DEFAULT, true}, + {"UseVolumeOnce", store_bool, ITEM(res_pool.use_volume_once), 0, 0, 0}, + {"PurgeOldestVolume", store_bool, ITEM(res_pool.purge_oldest_volume), 0, 0, 0}, + {"ActionOnPurge", store_actiononpurge, ITEM(res_pool.action_on_purge), 0, 0, 0}, + {"RecycleOldestVolume", store_bool, ITEM(res_pool.recycle_oldest_volume), 0, 0, 0}, + {"RecycleCurrentVolume", store_bool, ITEM(res_pool.recycle_current_volume), 0, 0, 0}, + {"MaximumVolumes", store_pint32, ITEM(res_pool.max_volumes), 0, 0, 0}, + {"MaximumVolumeJobs", store_pint32, ITEM(res_pool.MaxVolJobs), 0, 0, 0}, + {"MaximumVolumeFiles", store_pint32, ITEM(res_pool.MaxVolFiles), 0, 0, 0}, + {"MaximumVolumeBytes", store_size64, ITEM(res_pool.MaxVolBytes), 0, 0, 0}, + {"CatalogFiles", store_bool, ITEM(res_pool.catalog_files), 0, ITEM_DEFAULT, true}, + {"CacheRetention", store_time, ITEM(res_pool.CacheRetention), 0, 0, 0}, + {"VolumeRetention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365}, + {"VolumeUseDuration", store_time, ITEM(res_pool.VolUseDuration), 0, 0, 0}, + {"MigrationTime", store_time, ITEM(res_pool.MigrationTime), 0, 0, 0}, + {"MigrationHighBytes", store_size64, ITEM(res_pool.MigrationHighBytes), 0, 0, 0}, + {"MigrationLowBytes", store_size64, ITEM(res_pool.MigrationLowBytes), 0, 0, 0}, + {"NextPool", store_res, ITEM(res_pool.NextPool), R_POOL, 0, 0}, + {"Storage", store_alist_res, ITEM(res_pool.storage), R_STORAGE, 0, 0}, + {"AutoPrune", store_bool, ITEM(res_pool.AutoPrune), 0, ITEM_DEFAULT, true}, + {"Recycle", store_bool, ITEM(res_pool.Recycle), 0, ITEM_DEFAULT, true}, + {"RecyclePool", store_res, ITEM(res_pool.RecyclePool), R_POOL, 0, 0}, + {"ScratchPool", store_res, ITEM(res_pool.ScratchPool), R_POOL, 0, 0}, + {"CopyPool", store_alist_res, ITEM(res_pool.CopyPool), R_POOL, 0, 0}, + {"Catalog", store_res, ITEM(res_pool.catalog), R_CATALOG, 0, 0}, + {"FileRetention", store_time, ITEM(res_pool.FileRetention), 0, 0, 0}, + {"JobRetention", store_time, ITEM(res_pool.JobRetention), 0, 0, 0}, + + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* + * Counter Resource + * name handler value code flags default_value + */ +static RES_ITEM counter_items[] = { + {"Name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0}, + {"Minimum", store_int32, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0}, + {"Maximum", store_pint32, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX}, + {"WrapCounter", store_res, ITEM(res_counter.WrapCounter), R_COUNTER, 0, 0}, + {"Catalog", store_res, ITEM(res_counter.Catalog), R_CATALOG, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + + +/* Message resource */ +extern RES_ITEM msgs_items[]; + +/* + * This is the master resource definition. + * It must have one item for each of the resources. + * + * NOTE!!! keep it in the same order as the R_codes + * or eliminate all resources[rindex].name + * + * name items rcode + */ +RES_TABLE resources[] = { + {"Director", dir_items, R_DIRECTOR}, + {"Client", cli_items, R_CLIENT}, + {"Job", job_items, R_JOB}, + {"Storage", store_items, R_STORAGE}, + {"Catalog", cat_items, R_CATALOG}, + {"Schedule", sch_items, R_SCHEDULE}, + {"Fileset", fs_items, R_FILESET}, + {"Pool", pool_items, R_POOL}, + {"Messages", msgs_items, R_MSGS}, + {"Counter", counter_items, R_COUNTER}, + {"Console", con_items, R_CONSOLE}, + {"JobDefs", job_items, R_JOBDEFS}, + {"Device", NULL, R_DEVICE}, /* info obtained from SD */ + {"Autochanger", store_items, R_AUTOCHANGER}, /* alias for R_STORAGE */ + {NULL, NULL, 0} +}; + + +/* Keywords (RHS) permitted in Job Level records + * + * level_name level job_type + */ +struct s_jl joblevels[] = { + {"Full", L_FULL, JT_BACKUP}, + {"Base", L_BASE, JT_BACKUP}, + {"Incremental", L_INCREMENTAL, JT_BACKUP}, + {"Differential", L_DIFFERENTIAL, JT_BACKUP}, + {"Since", L_SINCE, JT_BACKUP}, + {"VirtualFull", L_VIRTUAL_FULL, JT_BACKUP}, + {"Catalog", L_VERIFY_CATALOG, JT_VERIFY}, + {"InitCatalog", L_VERIFY_INIT, JT_VERIFY}, + {"VolumeToCatalog", L_VERIFY_VOLUME_TO_CATALOG, JT_VERIFY}, + {"DiskToCatalog", L_VERIFY_DISK_TO_CATALOG, JT_VERIFY}, + {"Data", L_VERIFY_DATA, JT_VERIFY}, + {"Full", L_FULL, JT_COPY}, + {"Incremental", L_INCREMENTAL, JT_COPY}, + {"Differential", L_DIFFERENTIAL, JT_COPY}, + {"Full", L_FULL, JT_MIGRATE}, + {"Incremental", L_INCREMENTAL, JT_MIGRATE}, + {"Differential", L_DIFFERENTIAL, JT_MIGRATE}, + {" ", L_NONE, JT_ADMIN}, + {" ", L_NONE, JT_RESTORE}, + {NULL, 0, 0} +}; + + +/* Keywords (RHS) permitted in Job type records + * + * type_name job_type + */ +s_jt jobtypes[] = { + {"Backup", JT_BACKUP}, + {"Admin", JT_ADMIN}, + {"Verify", JT_VERIFY}, + {"Restore", JT_RESTORE}, + {"Migrate", JT_MIGRATE}, + {"Copy", JT_COPY}, + {NULL, 0} +}; + + +/* Keywords (RHS) permitted in Selection type records + * + * type_name job_type + */ +s_jt migtypes[] = { + {"SmallestVolume", MT_SMALLEST_VOL}, + {"OldestVolume", MT_OLDEST_VOL}, + {"PoolOccupancy", MT_POOL_OCCUPANCY}, + {"PoolTime", MT_POOL_TIME}, + {"PoolUncopiedJobs", MT_POOL_UNCOPIED_JOBS}, + {"Client", MT_CLIENT}, + {"Volume", MT_VOLUME}, + {"Job", MT_JOB}, + {"SqlQuery", MT_SQLQUERY}, + {NULL, 0} +}; + + + +/* Options permitted in Restore replace= */ +s_kw ReplaceOptions[] = { + {"Always", REPLACE_ALWAYS}, + {"IfNewer", REPLACE_IFNEWER}, + {"IfOlder", REPLACE_IFOLDER}, + {"Never", REPLACE_NEVER}, + {NULL, 0} +}; + +char *CAT::display(POOLMEM *dst) { + Mmsg(dst,"catalog=%s\ndb_name=%s\ndb_driver=%s\ndb_user=%s\n" + "db_password=%s\ndb_address=%s\ndb_port=%i\n" + "db_socket=%s\n", + name(), NPRTB(db_name), + NPRTB(db_driver), NPRTB(db_user), NPRTB(db_password), + NPRTB(db_address), db_port, NPRTB(db_socket)); + return dst; +} + +char *level_to_str(char *buf, int len, int level) +{ + int i; + bsnprintf(buf, len, "%c (%d)", level, level); /* default if not found */ + for (i=0; joblevels[i].level_name; i++) { + if (level == (int)joblevels[i].level) { + bstrncpy(buf, joblevels[i].level_name, len); + break; + } + } + return buf; +} + +/* Dump contents of resource */ +void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock) +{ + RES *next; + URES *res = (URES *)ares; + bool recurse = true; + char ed1[100], ed2[100], ed3[100], edl[50]; + DEVICE *dev; + UAContext *ua = (UAContext *)sock; + POOLMEM *buf; + + if (res == NULL) { + sendit(sock, _("No %s resource defined\n"), res_to_str(type)); + return; + } + if (type < 0) { /* no recursion */ + type = -type; + recurse = false; + } + switch (type) { + case R_DIRECTOR: + sendit(sock, _("Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n"), + ares->name, res->res_dir.MaxConcurrentJobs, + edit_uint64(res->res_dir.FDConnectTimeout, ed1), + edit_uint64(res->res_dir.SDConnectTimeout, ed2)); + if (res->res_dir.query_file) { + sendit(sock, _(" query_file=%s\n"), res->res_dir.query_file); + } + if (res->res_dir.messages) { + sendit(sock, _(" --> ")); + dump_resource(-R_MSGS, (RES *)res->res_dir.messages, sendit, sock); + } + break; + case R_CONSOLE: + sendit(sock, _("Console: name=%s SSL=%d\n"), + res->res_con.hdr.name, res->res_con.tls_enable); + break; + case R_COUNTER: + if (res->res_counter.WrapCounter) { + sendit(sock, _("Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n"), + res->res_counter.hdr.name, res->res_counter.MinValue, + res->res_counter.MaxValue, res->res_counter.CurrentValue, + res->res_counter.WrapCounter->hdr.name); + } else { + sendit(sock, _("Counter: name=%s min=%d max=%d\n"), + res->res_counter.hdr.name, res->res_counter.MinValue, + res->res_counter.MaxValue); + } + if (res->res_counter.Catalog) { + sendit(sock, _(" --> ")); + dump_resource(-R_CATALOG, (RES *)res->res_counter.Catalog, sendit, sock); + } + break; + + case R_CLIENT: + if (!acl_access_ok(ua, Client_ACL, res->res_client.name())) { + break; + } + buf = get_pool_memory(PM_FNAME); + sendit(sock, _("Client: Name=%s Enabled=%d Address=%s FDport=%d MaxJobs=%u NumJobs=%u\n"), + res->res_client.name(), res->res_client.is_enabled(), + res->res_client.address(buf), res->res_client.FDport, + res->res_client.MaxConcurrentJobs, res->res_client.getNumConcurrentJobs()); + free_pool_memory(buf); + sendit(sock, _(" JobRetention=%s FileRetention=%s AutoPrune=%d\n"), + edit_utime(res->res_client.JobRetention, ed1, sizeof(ed1)), + edit_utime(res->res_client.FileRetention, ed2, sizeof(ed2)), + res->res_client.AutoPrune); + if (res->res_client.fd_storage_address) { + sendit(sock, " FDStorageAddress=%s\n", res->res_client.fd_storage_address); + } + if (res->res_client.max_bandwidth) { + sendit(sock, _(" MaximumBandwidth=%lld\n"), + res->res_client.max_bandwidth); + } + if (res->res_client.catalog) { + sendit(sock, _(" --> ")); + dump_resource(-R_CATALOG, (RES *)res->res_client.catalog, sendit, sock); + } + break; + + case R_DEVICE: + dev = &res->res_dev; + char ed1[50]; + sendit(sock, _("Device: name=%s ok=%d num_writers=%d max_writers=%d\n" +" reserved=%d open=%d append=%d read=%d labeled=%d offline=%d autochgr=%d\n" +" poolid=%s volname=%s MediaType=%s\n"), + dev->hdr.name, dev->found, dev->num_writers, dev->max_writers, + dev->reserved, dev->open, dev->append, dev->read, dev->labeled, + dev->offline, dev->autochanger, + edit_uint64(dev->PoolId, ed1), + dev->VolumeName, dev->MediaType); + break; + + case R_AUTOCHANGER: + case R_STORAGE: + if (!acl_access_ok(ua, Storage_ACL, res->res_store.hdr.name)) { + break; + } + sendit(sock, _("%s: name=%s address=%s SDport=%d MaxJobs=%u NumJobs=%u\n" +" DeviceName=%s MediaType=%s StorageId=%s Autochanger=%d\n"), + res->res_store.changer == &res->res_store ? "Autochanger" : "Storage", + res->res_store.hdr.name, res->res_store.address, res->res_store.SDport, + res->res_store.MaxConcurrentJobs, + res->res_store.getNumConcurrentJobs(), + res->res_store.dev_name(), + res->res_store.media_type, + edit_int64(res->res_store.StorageId, ed1), + res->res_store.autochanger); + if (res->res_store.fd_storage_address) { + sendit(sock, " FDStorageAddress=%s\n", res->res_store.fd_storage_address); + } + if (res->res_store.ac_group) { + STORE *shstore = res->res_store.shared_storage; + sendit(sock, " AC group=%s ShareStore=%s\n", res->res_store.ac_group, + shstore?shstore->name():"*none*"); + } + if (res->res_store.changer && res->res_store.changer != &res->res_store) { + sendit(sock, _(" Parent --> ")); + dump_resource(-R_STORAGE, (RES *)res->res_store.changer, sendit, sock); + } + break; + + case R_CATALOG: + if (!acl_access_ok(ua, Catalog_ACL, res->res_cat.hdr.name)) { + break; + } + sendit(sock, _("Catalog: name=%s address=%s DBport=%d db_name=%s\n" +" db_driver=%s db_user=%s MutliDBConn=%d\n"), + res->res_cat.hdr.name, NPRT(res->res_cat.db_address), + res->res_cat.db_port, res->res_cat.db_name, + NPRT(res->res_cat.db_driver), NPRT(res->res_cat.db_user), + res->res_cat.mult_db_connections); + break; + + case R_JOB: + case R_JOBDEFS: + if (!acl_access_ok(ua, Job_ACL, res->res_job.hdr.name)) { + break; + } + sendit(sock, _("%s: name=%s JobType=%d level=%s Priority=%d Enabled=%d\n"), + type == R_JOB ? _("Job") : _("JobDefs"), + res->res_job.hdr.name, res->res_job.JobType, + level_to_str(edl, sizeof(edl), res->res_job.JobLevel), + res->res_job.Priority, + res->res_job.is_enabled()); + sendit(sock, _(" MaxJobs=%u NumJobs=%u Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%d\n"), + res->res_job.MaxConcurrentJobs, + res->res_job.getNumConcurrentJobs(), + res->res_job.RescheduleOnError, res->res_job.RescheduleTimes, + edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1), + res->res_job.spool_data, res->res_job.write_part_after_job); + if (res->res_job.spool_size) { + sendit(sock, _(" SpoolSize=%s\n"), edit_uint64(res->res_job.spool_size, ed1)); + } + if (res->res_job.JobType == JT_BACKUP) { + sendit(sock, _(" Accurate=%d\n"), res->res_job.accurate); + } + if (res->res_job.max_bandwidth) { + sendit(sock, _(" MaximumBandwidth=%lld\n"), + res->res_job.max_bandwidth); + } + if (res->res_job.JobType == JT_MIGRATE || res->res_job.JobType == JT_COPY) { + sendit(sock, _(" SelectionType=%d\n"), res->res_job.selection_type); + } + if (res->res_job.JobType == JT_RESTORE) { + sendit(sock, _(" PrefixLinks=%d\n"), res->res_job.PrefixLinks); + } + if (res->res_job.client) { + sendit(sock, _(" --> ")); + dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock); + } + if (res->res_job.fileset) { + sendit(sock, _(" --> ")); + dump_resource(-R_FILESET, (RES *)res->res_job.fileset, sendit, sock); + } + if (res->res_job.schedule) { + sendit(sock, _(" --> ")); + dump_resource(-R_SCHEDULE, (RES *)res->res_job.schedule, sendit, sock); + } + if (res->res_job.RestoreClient) { + sendit(sock, _(" --> RestoreClient=%s\n"), NPRT(res->res_job.RestoreClient)); + } + if (res->res_job.RestoreWhere && !res->res_job.RegexWhere) { + sendit(sock, _(" --> Where=%s\n"), NPRT(res->res_job.RestoreWhere)); + } + if (res->res_job.RegexWhere) { + sendit(sock, _(" --> RegexWhere=%s\n"), NPRT(res->res_job.RegexWhere)); + } + if (res->res_job.RestoreBootstrap) { + sendit(sock, _(" --> Bootstrap=%s\n"), NPRT(res->res_job.RestoreBootstrap)); + } + if (res->res_job.WriteBootstrap) { + sendit(sock, _(" --> WriteBootstrap=%s\n"), NPRT(res->res_job.WriteBootstrap)); + } + if (res->res_job.PluginOptions) { + sendit(sock, _(" --> PluginOptions=%s\n"), NPRT(res->res_job.PluginOptions)); + } + if (res->res_job.MaxRunTime) { + sendit(sock, _(" --> MaxRunTime=%u\n"), res->res_job.MaxRunTime); + } + if (res->res_job.MaxWaitTime) { + sendit(sock, _(" --> MaxWaitTime=%u\n"), res->res_job.MaxWaitTime); + } + if (res->res_job.MaxStartDelay) { + sendit(sock, _(" --> MaxStartDelay=%u\n"), res->res_job.MaxStartDelay); + } + if (res->res_job.MaxRunSchedTime) { + sendit(sock, _(" --> MaxRunSchedTime=%u\n"), res->res_job.MaxRunSchedTime); + } + if (res->res_job.storage) { + STORE *store; + foreach_alist(store, res->res_job.storage) { + sendit(sock, _(" --> ")); + dump_resource(-R_STORAGE, (RES *)store, sendit, sock); + } + } + if (res->res_job.base) { + JOB *job; + foreach_alist(job, res->res_job.base) { + sendit(sock, _(" --> Base %s\n"), job->name()); + } + } + if (res->res_job.RunScripts) { + RUNSCRIPT *script; + foreach_alist(script, res->res_job.RunScripts) { + sendit(sock, _(" --> RunScript\n")); + sendit(sock, _(" --> Command=%s\n"), NPRT(script->command)); + sendit(sock, _(" --> Target=%s\n"), NPRT(script->target)); + sendit(sock, _(" --> RunOnSuccess=%u\n"), script->on_success); + sendit(sock, _(" --> RunOnFailure=%u\n"), script->on_failure); + sendit(sock, _(" --> FailJobOnError=%u\n"), script->fail_on_error); + sendit(sock, _(" --> RunWhen=%u\n"), script->when); + } + } + if (res->res_job.pool) { + sendit(sock, _(" --> ")); + dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock); + } + if (res->res_job.vfull_pool) { + sendit(sock, _(" --> VFullBackup")); + dump_resource(-R_POOL, (RES *)res->res_job.vfull_pool, sendit, sock); + } + if (res->res_job.full_pool) { + sendit(sock, _(" --> FullBackup")); + dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock); + } + if (res->res_job.inc_pool) { + sendit(sock, _(" --> IncrementalBackup")); + dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock); + } + if (res->res_job.diff_pool) { + sendit(sock, _(" --> DifferentialBackup")); + dump_resource(-R_POOL, (RES *)res->res_job.diff_pool, sendit, sock); + } + if (res->res_job.next_pool) { + sendit(sock, _(" --> Next")); /* Pool will be added by dump_resource */ + dump_resource(-R_POOL, (RES *)res->res_job.next_pool, sendit, sock); + } + if (res->res_job.JobType == JT_VERIFY && res->res_job.verify_job) { + sendit(sock, _(" --> JobToVerify %s"), (RES *)res->res_job.verify_job->name()); + } + if (res->res_job.run_cmds) { + char *runcmd; + foreach_alist(runcmd, res->res_job.run_cmds) { + sendit(sock, _(" --> Run=%s\n"), runcmd); + } + } + if (res->res_job.selection_pattern) { + sendit(sock, _(" --> SelectionPattern=%s\n"), NPRT(res->res_job.selection_pattern)); + } + if (res->res_job.messages) { + sendit(sock, _(" --> ")); + dump_resource(-R_MSGS, (RES *)res->res_job.messages, sendit, sock); + } + break; + + case R_FILESET: + { + int i, j, k; + if (!acl_access_ok(ua, FileSet_ACL, res->res_fs.hdr.name)) { + break; + } + sendit(sock, _("FileSet: name=%s IgnoreFileSetChanges=%d\n"), res->res_fs.hdr.name, res->res_fs.ignore_fs_changes); + for (i=0; ires_fs.num_includes; i++) { + INCEXE *incexe = res->res_fs.include_items[i]; + for (j=0; jnum_opts; j++) { + FOPTS *fo = incexe->opts_list[j]; + sendit(sock, " O %s\n", fo->opts); + + bool enhanced_wild = false; + for (k=0; fo->opts[k]!='\0'; k++) { + if (fo->opts[k]=='W') { + enhanced_wild = true; + break; + } + } + + for (k=0; kregex.size(); k++) { + sendit(sock, " R %s\n", fo->regex.get(k)); + } + for (k=0; kregexdir.size(); k++) { + sendit(sock, " RD %s\n", fo->regexdir.get(k)); + } + for (k=0; kregexfile.size(); k++) { + sendit(sock, " RF %s\n", fo->regexfile.get(k)); + } + for (k=0; kwild.size(); k++) { + sendit(sock, " W %s\n", fo->wild.get(k)); + } + for (k=0; kwilddir.size(); k++) { + sendit(sock, " WD %s\n", fo->wilddir.get(k)); + } + for (k=0; kwildfile.size(); k++) { + sendit(sock, " WF %s\n", fo->wildfile.get(k)); + } + for (k=0; kwildbase.size(); k++) { + sendit(sock, " W%c %s\n", enhanced_wild ? 'B' : 'F', fo->wildbase.get(k)); + } + for (k=0; kbase.size(); k++) { + sendit(sock, " B %s\n", fo->base.get(k)); + } + for (k=0; kfstype.size(); k++) { + sendit(sock, " X %s\n", fo->fstype.get(k)); + } + for (k=0; kdrivetype.size(); k++) { + sendit(sock, " XD %s\n", fo->drivetype.get(k)); + } + if (fo->plugin) { + sendit(sock, " G %s\n", fo->plugin); + } + if (fo->reader) { + sendit(sock, " D %s\n", fo->reader); + } + if (fo->writer) { + sendit(sock, " T %s\n", fo->writer); + } + sendit(sock, " N\n"); + } + if (incexe->ignoredir) { + sendit(sock, " Z %s\n", incexe->ignoredir); + } + for (j=0; jname_list.size(); j++) { + sendit(sock, " I %s\n", incexe->name_list.get(j)); + } + if (incexe->name_list.size()) { + sendit(sock, " N\n"); + } + for (j=0; jplugin_list.size(); j++) { + sendit(sock, " P %s\n", incexe->plugin_list.get(j)); + } + if (incexe->plugin_list.size()) { + sendit(sock, " N\n"); + } + } /* end for over includes */ + + for (i=0; ires_fs.num_excludes; i++) { + INCEXE *incexe = res->res_fs.exclude_items[i]; + for (j=0; jname_list.size(); j++) { + sendit(sock, " E %s\n", incexe->name_list.get(j)); + } + if (incexe->name_list.size()) { + sendit(sock, " N\n"); + } + } + break; + } /* end case R_FILESET */ + + case R_SCHEDULE: + if (!acl_access_ok(ua, Schedule_ACL, res->res_sch.hdr.name)) { + break; + } + + if (res->res_sch.run) { + int i; + RUN *run = res->res_sch.run; + char buf[1000], num[30]; + sendit(sock, _("Schedule: Name=%s Enabled=%d\n"), + res->res_sch.hdr.name, res->res_sch.is_enabled()); + if (!run) { + break; + } +next_run: + sendit(sock, _(" --> Run Level=%s\n"), + level_to_str(edl, sizeof(edl), run->level)); + if (run->MaxRunSchedTime) { + sendit(sock, _(" MaxRunSchedTime=%u\n"), run->MaxRunSchedTime); + } + if (run->Priority) { + sendit(sock, _(" Priority=%u\n"), run->Priority); + } + bstrncpy(buf, _(" hour="), sizeof(buf)); + for (i=0; i<24; i++) { + if (bit_is_set(i, run->hour)) { + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); + } + } + bstrncat(buf, "\n", sizeof(buf)); + sendit(sock, buf); + bstrncpy(buf, _(" mday="), sizeof(buf)); + for (i=0; i<32; i++) { + if (bit_is_set(i, run->mday)) { + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); + } + } + bstrncat(buf, "\n", sizeof(buf)); + sendit(sock, buf); + bstrncpy(buf, _(" month="), sizeof(buf)); + for (i=0; i<12; i++) { + if (bit_is_set(i, run->month)) { + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); + } + } + bstrncat(buf, "\n", sizeof(buf)); + sendit(sock, buf); + bstrncpy(buf, _(" wday="), sizeof(buf)); + for (i=0; i<7; i++) { + if (bit_is_set(i, run->wday)) { + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); + } + } + bstrncat(buf, "\n", sizeof(buf)); + sendit(sock, buf); + bstrncpy(buf, _(" wom="), sizeof(buf)); + for (i=0; i<6; i++) { + if (bit_is_set(i, run->wom)) { + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); + } + } + bstrncat(buf, "\n", sizeof(buf)); + sendit(sock, buf); + bstrncpy(buf, _(" woy="), sizeof(buf)); + for (i=0; i<54; i++) { + if (bit_is_set(i, run->woy)) { + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); + } + } + bstrncat(buf, "\n", sizeof(buf)); + sendit(sock, buf); + sendit(sock, _(" mins=%d\n"), run->minute); + if (run->pool) { + sendit(sock, _(" --> ")); + dump_resource(-R_POOL, (RES *)run->pool, sendit, sock); + } + if (run->next_pool) { + sendit(sock, _(" --> Next")); /* Pool will be added by dump_resource */ + dump_resource(-R_POOL, (RES *)run->next_pool, sendit, sock); + } + if (run->storage) { + sendit(sock, _(" --> ")); + dump_resource(-R_STORAGE, (RES *)run->storage, sendit, sock); + } + if (run->msgs) { + sendit(sock, _(" --> ")); + dump_resource(-R_MSGS, (RES *)run->msgs, sendit, sock); + } + /* If another Run record is chained in, go print it */ + if (run->next) { + run = run->next; + goto next_run; + } + } else { + sendit(sock, _("Schedule: name=%s\n"), res->res_sch.hdr.name); + } + break; + + case R_POOL: + if (!acl_access_ok(ua, Pool_ACL, res->res_pool.hdr.name)) { + break; + } + sendit(sock, _("Pool: name=%s PoolType=%s\n"), res->res_pool.hdr.name, + res->res_pool.pool_type); + sendit(sock, _(" use_cat=%d use_once=%d cat_files=%d\n"), + res->res_pool.use_catalog, res->res_pool.use_volume_once, + res->res_pool.catalog_files); + sendit(sock, _(" max_vols=%d auto_prune=%d VolRetention=%s\n"), + res->res_pool.max_volumes, res->res_pool.AutoPrune, + edit_utime(res->res_pool.VolRetention, ed1, sizeof(ed1))); + sendit(sock, _(" VolUse=%s recycle=%d LabelFormat=%s\n"), + edit_utime(res->res_pool.VolUseDuration, ed1, sizeof(ed1)), + res->res_pool.Recycle, + NPRT(res->res_pool.label_format)); + sendit(sock, _(" CleaningPrefix=%s LabelType=%d\n"), + NPRT(res->res_pool.cleaning_prefix), res->res_pool.LabelType); + sendit(sock, _(" RecyleOldest=%d PurgeOldest=%d ActionOnPurge=%d\n"), + res->res_pool.recycle_oldest_volume, + res->res_pool.purge_oldest_volume, + res->res_pool.action_on_purge); + sendit(sock, _(" MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n"), + res->res_pool.MaxVolJobs, + res->res_pool.MaxVolFiles, + edit_uint64(res->res_pool.MaxVolBytes, ed1)); + sendit(sock, _(" MigTime=%s MigHiBytes=%s MigLoBytes=%s\n"), + edit_utime(res->res_pool.MigrationTime, ed1, sizeof(ed1)), + edit_uint64(res->res_pool.MigrationHighBytes, ed2), + edit_uint64(res->res_pool.MigrationLowBytes, ed3)); + sendit(sock, _(" CacheRetention=%s\n"), + edit_utime(res->res_pool.CacheRetention, ed1, sizeof(ed1))); + sendit(sock, _(" JobRetention=%s FileRetention=%s\n"), + edit_utime(res->res_pool.JobRetention, ed1, sizeof(ed1)), + edit_utime(res->res_pool.FileRetention, ed2, sizeof(ed2))); + if (res->res_pool.NextPool) { + sendit(sock, _(" NextPool=%s\n"), res->res_pool.NextPool->name()); + } + if (res->res_pool.RecyclePool) { + sendit(sock, _(" RecyclePool=%s\n"), res->res_pool.RecyclePool->name()); + } + if (res->res_pool.ScratchPool) { + sendit(sock, _(" ScratchPool=%s\n"), res->res_pool.ScratchPool->name()); + } + if (res->res_pool.catalog) { + sendit(sock, _(" Catalog=%s\n"), res->res_pool.catalog->name()); + } + if (res->res_pool.storage) { + STORE *store; + foreach_alist(store, res->res_pool.storage) { + sendit(sock, _(" --> ")); + dump_resource(-R_STORAGE, (RES *)store, sendit, sock); + } + } + if (res->res_pool.CopyPool) { + POOL *copy; + foreach_alist(copy, res->res_pool.CopyPool) { + sendit(sock, _(" --> ")); + dump_resource(-R_POOL, (RES *)copy, sendit, sock); + } + } + + break; + + case R_MSGS: + sendit(sock, _("Messages: name=%s\n"), res->res_msgs.hdr.name); + if (res->res_msgs.mail_cmd) + sendit(sock, _(" mailcmd=%s\n"), res->res_msgs.mail_cmd); + if (res->res_msgs.operator_cmd) + sendit(sock, _(" opcmd=%s\n"), res->res_msgs.operator_cmd); + break; + + default: + sendit(sock, _("Unknown resource type %d in dump_resource.\n"), type); + break; + } + if (recurse) { + next = GetNextRes(0, (RES *)res); + if (next) { + dump_resource(type, next, sendit, sock); + } + } +} + +/* + * Free all the members of an INCEXE structure + */ +static void free_incexe(INCEXE *incexe) +{ + incexe->name_list.destroy(); + incexe->plugin_list.destroy(); + for (int i=0; inum_opts; i++) { + FOPTS *fopt = incexe->opts_list[i]; + fopt->regex.destroy(); + fopt->regexdir.destroy(); + fopt->regexfile.destroy(); + fopt->wild.destroy(); + fopt->wilddir.destroy(); + fopt->wildfile.destroy(); + fopt->wildbase.destroy(); + fopt->base.destroy(); + fopt->fstype.destroy(); + fopt->drivetype.destroy(); + if (fopt->plugin) { + free(fopt->plugin); + } + if (fopt->reader) { + free(fopt->reader); + } + if (fopt->writer) { + free(fopt->writer); + } + free(fopt); + } + if (incexe->opts_list) { + free(incexe->opts_list); + } + if (incexe->ignoredir) { + free(incexe->ignoredir); + } + free(incexe); +} + + +/* + * Free memory of resource -- called when daemon terminates. + * NB, we don't need to worry about freeing any references + * to other resources as they will be freed when that + * resource chain is traversed. Mainly we worry about freeing + * allocated strings (names). + */ +void free_resource(RES *rres, int type) +{ + int num; + URES *res = (URES *)rres; + + if (res == NULL) { + return; + } + + Dmsg3(200, "type=%d res=%p name=%s\n", type, res, res->res_dir.hdr.name); + /* common stuff -- free the resource name and description */ + if (res->res_dir.hdr.name) { + free(res->res_dir.hdr.name); + } + if (res->res_dir.hdr.desc) { + free(res->res_dir.hdr.desc); + } + + switch (type) { + case R_DIRECTOR: + if (res->res_dir.working_directory) { + free(res->res_dir.working_directory); + } + if (res->res_dir.scripts_directory) { + free((char *)res->res_dir.scripts_directory); + } + if (res->res_dir.plugin_directory) { + free((char *)res->res_dir.plugin_directory); + } + if (res->res_dir.pid_directory) { + free(res->res_dir.pid_directory); + } + if (res->res_dir.subsys_directory) { + free(res->res_dir.subsys_directory); + } + if (res->res_dir.password) { + free(res->res_dir.password); + } + if (res->res_dir.query_file) { + free(res->res_dir.query_file); + } + if (res->res_dir.DIRaddrs) { + free_addresses(res->res_dir.DIRaddrs); + } + if (res->res_dir.DIRsrc_addr) { + free_addresses(res->res_dir.DIRsrc_addr); + } + if (res->res_dir.tls_ctx) { + free_tls_context(res->res_dir.tls_ctx); + } + if (res->res_dir.tls_ca_certfile) { + free(res->res_dir.tls_ca_certfile); + } + if (res->res_dir.tls_ca_certdir) { + free(res->res_dir.tls_ca_certdir); + } + if (res->res_dir.tls_certfile) { + free(res->res_dir.tls_certfile); + } + if (res->res_dir.tls_keyfile) { + free(res->res_dir.tls_keyfile); + } + if (res->res_dir.tls_dhfile) { + free(res->res_dir.tls_dhfile); + } + if (res->res_dir.tls_allowed_cns) { + delete res->res_dir.tls_allowed_cns; + } + if (res->res_dir.verid) { + free(res->res_dir.verid); + } + break; + case R_DEVICE: + case R_COUNTER: + break; + case R_CONSOLE: + if (res->res_con.password) { + free(res->res_con.password); + } + if (res->res_con.tls_ctx) { + free_tls_context(res->res_con.tls_ctx); + } + if (res->res_con.tls_ca_certfile) { + free(res->res_con.tls_ca_certfile); + } + if (res->res_con.tls_ca_certdir) { + free(res->res_con.tls_ca_certdir); + } + if (res->res_con.tls_certfile) { + free(res->res_con.tls_certfile); + } + if (res->res_con.tls_keyfile) { + free(res->res_con.tls_keyfile); + } + if (res->res_con.tls_dhfile) { + free(res->res_con.tls_dhfile); + } + if (res->res_con.tls_allowed_cns) { + delete res->res_con.tls_allowed_cns; + } + for (int i=0; ires_con.ACL_lists[i]) { + delete res->res_con.ACL_lists[i]; + res->res_con.ACL_lists[i] = NULL; + } + } + break; + case R_CLIENT: + if (res->res_client.client_address) { + free(res->res_client.client_address); + } + if (res->res_client.fd_storage_address) { + free(res->res_client.fd_storage_address); + } + if (res->res_client.password) { + free(res->res_client.password); + } + if (res->res_client.tls_ctx) { + free_tls_context(res->res_client.tls_ctx); + } + if (res->res_client.tls_ca_certfile) { + free(res->res_client.tls_ca_certfile); + } + if (res->res_client.tls_ca_certdir) { + free(res->res_client.tls_ca_certdir); + } + if (res->res_client.tls_certfile) { + free(res->res_client.tls_certfile); + } + if (res->res_client.tls_keyfile) { + free(res->res_client.tls_keyfile); + } + if (res->res_client.tls_allowed_cns) { + delete res->res_client.tls_allowed_cns; + } + break; + case R_AUTOCHANGER: + case R_STORAGE: + if (res->res_store.address) { + free(res->res_store.address); + } + if (res->res_store.fd_storage_address) { + free(res->res_store.fd_storage_address); + } + if (res->res_store.password) { + free(res->res_store.password); + } + if (res->res_store.media_type) { + free(res->res_store.media_type); + } + if (res->res_store.ac_group) { + free_pool_memory(res->res_store.ac_group); + } + if (res->res_store.device) { + delete res->res_store.device; + } + if (res->res_store.tls_ctx) { + free_tls_context(res->res_store.tls_ctx); + } + if (res->res_store.tls_ca_certfile) { + free(res->res_store.tls_ca_certfile); + } + if (res->res_store.tls_ca_certdir) { + free(res->res_store.tls_ca_certdir); + } + if (res->res_store.tls_certfile) { + free(res->res_store.tls_certfile); + } + if (res->res_store.tls_keyfile) { + free(res->res_store.tls_keyfile); + } + break; + case R_CATALOG: + if (res->res_cat.db_address) { + free(res->res_cat.db_address); + } + if (res->res_cat.db_socket) { + free(res->res_cat.db_socket); + } + if (res->res_cat.db_user) { + free(res->res_cat.db_user); + } + if (res->res_cat.db_name) { + free(res->res_cat.db_name); + } + if (res->res_cat.db_driver) { + free(res->res_cat.db_driver); + } + if (res->res_cat.db_password) { + free(res->res_cat.db_password); + } + if (res->res_cat.db_ssl_mode) { + free(res->res_cat.db_ssl_mode); + } + if (res->res_cat.db_ssl_key) { + free(res->res_cat.db_ssl_key); + } + if (res->res_cat.db_ssl_cert) { + free(res->res_cat.db_ssl_cert); + } + if (res->res_cat.db_ssl_ca) { + free(res->res_cat.db_ssl_ca); + } + if (res->res_cat.db_ssl_capath) { + free(res->res_cat.db_ssl_capath); + } + if (res->res_cat.db_ssl_cipher) { + free(res->res_cat.db_ssl_cipher); + } + break; + case R_FILESET: + if ((num=res->res_fs.num_includes)) { + while (--num >= 0) { + free_incexe(res->res_fs.include_items[num]); + } + free(res->res_fs.include_items); + } + res->res_fs.num_includes = 0; + if ((num=res->res_fs.num_excludes)) { + while (--num >= 0) { + free_incexe(res->res_fs.exclude_items[num]); + } + free(res->res_fs.exclude_items); + } + res->res_fs.num_excludes = 0; + break; + case R_POOL: + if (res->res_pool.pool_type) { + free(res->res_pool.pool_type); + } + if (res->res_pool.label_format) { + free(res->res_pool.label_format); + } + if (res->res_pool.cleaning_prefix) { + free(res->res_pool.cleaning_prefix); + } + if (res->res_pool.storage) { + delete res->res_pool.storage; + } + break; + case R_SCHEDULE: + if (res->res_sch.run) { + RUN *nrun, *next; + nrun = res->res_sch.run; + while (nrun) { + next = nrun->next; + free(nrun); + nrun = next; + } + } + break; + case R_JOB: + case R_JOBDEFS: + if (res->res_job.RestoreWhere) { + free(res->res_job.RestoreWhere); + } + if (res->res_job.RegexWhere) { + free(res->res_job.RegexWhere); + } + if (res->res_job.strip_prefix) { + free(res->res_job.strip_prefix); + } + if (res->res_job.add_prefix) { + free(res->res_job.add_prefix); + } + if (res->res_job.add_suffix) { + free(res->res_job.add_suffix); + } + if (res->res_job.RestoreBootstrap) { + free(res->res_job.RestoreBootstrap); + } + if (res->res_job.RestoreClient) { + free(res->res_job.RestoreClient); + } + if (res->res_job.WriteBootstrap) { + free(res->res_job.WriteBootstrap); + } + if (res->res_job.PluginOptions) { + free(res->res_job.PluginOptions); + } + if (res->res_job.selection_pattern) { + free(res->res_job.selection_pattern); + } + if (res->res_job.run_cmds) { + delete res->res_job.run_cmds; + } + if (res->res_job.storage) { + delete res->res_job.storage; + } + if (res->res_job.base) { + delete res->res_job.base; + } + if (res->res_job.RunScripts) { + free_runscripts(res->res_job.RunScripts); + delete res->res_job.RunScripts; + } + break; + case R_MSGS: + if (res->res_msgs.mail_cmd) { + free(res->res_msgs.mail_cmd); + } + if (res->res_msgs.operator_cmd) { + free(res->res_msgs.operator_cmd); + } + free_msgs_res((MSGS *)res); /* free message resource */ + res = NULL; + break; + default: + printf(_("Unknown resource type %d in free_resource.\n"), type); + } + /* Common stuff again -- free the resource, recurse to next one */ + if (res) { + free(res); + } +} + +/* + * Save the new resource by chaining it into the head list for + * the resource. If this is pass 2, we update any resource + * pointers because they may not have been defined until + * later in pass 1. + */ +bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) +{ + URES *res; + int rindex = type - r_first; + int i, size = 0; + bool error = false; + + /* Check Job requirements after applying JobDefs */ + if (type != R_JOB && type != R_JOBDEFS) { + /* + * Ensure that all required items are present + */ + for (i=0; items[i].name; i++) { + if (items[i].flags & ITEM_REQUIRED) { + if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) { + Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), + items[i].name, resources[rindex].name); + return false; + } + } + /* If this triggers, take a look at lib/parse_conf.h */ + if (i >= MAX_RES_ITEMS) { + Mmsg(config->m_errmsg, _("Too many directives in \"%s\" resource\n"), resources[rindex].name); + return false; + } + } + } else if (type == R_JOB) { + /* + * Ensure that the name item is present + */ + if (items[0].flags & ITEM_REQUIRED) { + if (!bit_is_set(0, res_all.res_dir.hdr.item_present)) { + Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), + items[0].name, resources[rindex].name); + return false; + } + } + } + + /* + * During pass 2 in each "store" routine, we looked up pointers + * to all the resources referrenced in the current resource, now we + * must copy their addresses from the static record to the allocated + * record. + */ + if (pass == 2) { + switch (type) { + /* Resources not containing a resource */ + case R_CATALOG: + case R_MSGS: + case R_FILESET: + case R_DEVICE: + break; + + /* + * Resources containing another resource or alist. First + * look up the resource which contains another resource. It + * was written during pass 1. Then stuff in the pointers to + * the resources it contains, which were inserted this pass. + * Finally, it will all be stored back. + */ + case R_POOL: + /* Find resource saved in pass 1 */ + if ((res = (URES *)GetResWithName(R_POOL, res_all.res_con.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Pool resource %s\n"), res_all.res_con.hdr.name); + return false; + } + /* Explicitly copy resource pointers from this pass (res_all) */ + res->res_pool.NextPool = res_all.res_pool.NextPool; + res->res_pool.RecyclePool = res_all.res_pool.RecyclePool; + res->res_pool.ScratchPool = res_all.res_pool.ScratchPool; + res->res_pool.storage = res_all.res_pool.storage; + res->res_pool.catalog = res_all.res_pool.catalog; + break; + case R_CONSOLE: + if ((res = (URES *)GetResWithName(R_CONSOLE, res_all.res_con.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Console resource %s\n"), res_all.res_con.hdr.name); + return false; + } + res->res_con.tls_allowed_cns = res_all.res_con.tls_allowed_cns; + break; + case R_DIRECTOR: + if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Director resource %s\n"), res_all.res_dir.hdr.name); + return false; + } + res->res_dir.messages = res_all.res_dir.messages; + res->res_dir.tls_allowed_cns = res_all.res_dir.tls_allowed_cns; + break; + case R_AUTOCHANGER: /* alias for R_STORAGE */ + case R_STORAGE: + type = R_STORAGE; /* force Storage type */ + if ((res = (URES *)GetResWithName(type, res_all.res_store.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Storage resource %s\n"), + res_all.res_dir.hdr.name); + return false; + } + /* we must explicitly copy the device alist pointer */ + res->res_store.device = res_all.res_store.device; + res->res_store.changer = res_all.res_store.changer; + res->res_store.shared_storage = res_all.res_store.shared_storage; + res->res_store.autochanger = res_all.res_store.autochanger; + /* The resource name is Autochanger instead of Storage + * so we force the Autochanger attributes + */ + if (strcasecmp(resources[rindex].name, "autochanger") == 0) { + /* The Autochanger resource might be already defined */ + res->res_store.changer = (res->res_store.changer == NULL)? &res->res_store : res->res_store.changer; + res->res_store.autochanger = true; + } + break; + case R_JOB: + case R_JOBDEFS: + if ((res = (URES *)GetResWithName(type, res_all.res_dir.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Job resource %s\n"), + res_all.res_dir.hdr.name); + return false; + } + res->res_job.messages = res_all.res_job.messages; + res->res_job.schedule = res_all.res_job.schedule; + res->res_job.client = res_all.res_job.client; + res->res_job.fileset = res_all.res_job.fileset; + res->res_job.storage = res_all.res_job.storage; + res->res_job.base = res_all.res_job.base; + res->res_job.pool = res_all.res_job.pool; + res->res_job.next_pool = res_all.res_job.next_pool; + res->res_job.full_pool = res_all.res_job.full_pool; + res->res_job.vfull_pool = res_all.res_job.vfull_pool; + res->res_job.inc_pool = res_all.res_job.inc_pool; + res->res_job.diff_pool = res_all.res_job.diff_pool; + res->res_job.verify_job = res_all.res_job.verify_job; + res->res_job.jobdefs = res_all.res_job.jobdefs; + res->res_job.run_cmds = res_all.res_job.run_cmds; + res->res_job.RunScripts = res_all.res_job.RunScripts; + + /* TODO: JobDefs where/regexwhere doesn't work well (but this + * is not very useful) + * We have to set_bit(index, res_all.hdr.item_present); + * or something like that + */ + + /* we take RegexWhere before all other options */ + if (!res->res_job.RegexWhere + && + (res->res_job.strip_prefix || + res->res_job.add_suffix || + res->res_job.add_prefix)) + { + int len = bregexp_get_build_where_size(res->res_job.strip_prefix, + res->res_job.add_prefix, + res->res_job.add_suffix); + res->res_job.RegexWhere = (char *) bmalloc (len * sizeof(char)); + bregexp_build_where(res->res_job.RegexWhere, len, + res->res_job.strip_prefix, + res->res_job.add_prefix, + res->res_job.add_suffix); + /* TODO: test bregexp */ + } + + if (res->res_job.RegexWhere && res->res_job.RestoreWhere) { + free(res->res_job.RestoreWhere); + res->res_job.RestoreWhere = NULL; + } + + break; + case R_COUNTER: + if ((res = (URES *)GetResWithName(R_COUNTER, res_all.res_counter.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Counter resource %s\n"), res_all.res_counter.hdr.name); + return false; + } + res->res_counter.Catalog = res_all.res_counter.Catalog; + res->res_counter.WrapCounter = res_all.res_counter.WrapCounter; + break; + + case R_CLIENT: + if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_client.name())) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Client resource %s\n"), res_all.res_client.name()); + return false; + } + res->res_client.catalog = res_all.res_client.catalog; + res->res_client.tls_allowed_cns = res_all.res_client.tls_allowed_cns; + break; + case R_SCHEDULE: + /* + * Schedule is a bit different in that it contains a RUN record + * chain which isn't a "named" resource. This chain was linked + * in by run_conf.c during pass 2, so here we jam the pointer + * into the Schedule resource. + */ + if ((res = (URES *)GetResWithName(R_SCHEDULE, res_all.res_client.name())) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Schedule resource %s\n"), res_all.res_client.name()); + return false; + } + res->res_sch.run = res_all.res_sch.run; + break; + default: + Emsg1(M_ERROR, 0, _("Unknown resource type %d in save_resource.\n"), type); + error = true; + break; + } + /* Note, the resource name was already saved during pass 1, + * so here, we can just release it. + */ + if (res_all.res_dir.hdr.name) { + free(res_all.res_dir.hdr.name); + res_all.res_dir.hdr.name = NULL; + } + if (res_all.res_dir.hdr.desc) { + free(res_all.res_dir.hdr.desc); + res_all.res_dir.hdr.desc = NULL; + } + return true; + } + + /* R_AUTOCHANGER is alias so turn it into an R_STORAGE */ + if (type == R_AUTOCHANGER) { + type = R_STORAGE; + rindex = type - r_first; + } + + /* + * The following code is only executed during pass 1 + */ + switch (type) { + case R_DIRECTOR: + size = sizeof(DIRRES); + break; + case R_CONSOLE: + size = sizeof(CONRES); + break; + case R_CLIENT: + size =sizeof(CLIENT); + break; + case R_STORAGE: + size = sizeof(STORE); + break; + case R_CATALOG: + size = sizeof(CAT); + break; + case R_JOB: + case R_JOBDEFS: + size = sizeof(JOB); + break; + case R_FILESET: + size = sizeof(FILESET); + break; + case R_SCHEDULE: + size = sizeof(SCHED); + break; + case R_POOL: + size = sizeof(POOL); + break; + case R_MSGS: + size = sizeof(MSGS); + break; + case R_COUNTER: + size = sizeof(COUNTER); + break; + case R_DEVICE: + error = true; + break; + default: + printf(_("Unknown resource type %d in save_resource.\n"), type); + error = true; + break; + } + /* Common */ + if (!error) { + if (!config->insert_res(rindex, size)) { + return false; + } + } + return true; +} + +void store_actiononpurge(LEX *lc, RES_ITEM *item, int index, int pass) +{ + uint32_t *destination = (uint32_t*)item->value; + lex_get_token(lc, T_NAME); + if (strcasecmp(lc->str, "truncate") == 0) { + *destination = (*destination) | ON_PURGE_TRUNCATE; + } else { + scan_err2(lc, _("Expected one of: %s, got: %s"), "Truncate", lc->str); + return; + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Store an autochanger resource. Used by Autochanger and + * SharedStorage direcives. + */ +void store_ac_res(LEX *lc, RES_ITEM *item, int index, int pass) +{ + RES *res; + RES_ITEM *next = item + 1; + + lex_get_token(lc, T_NAME); + Dmsg1(100, "Got name=%s\n", lc->str); + /* + * For backward compatibility, if yes/no, set the next item + */ + if (strcasecmp(item->name, "autochanger") == 0) { + if (strcasecmp(lc->str, "yes") == 0 || strcasecmp(lc->str, "true") == 0) { + *(bool *)(next->value) = true; + *(item->value) = NULL; + Dmsg2(100, "Item=%s got value=%s\n", item->name, lc->str); + scan_to_eol(lc); + return; + } else if (strcasecmp(lc->str, "no") == 0 || strcasecmp(lc->str, "false") == 0) { + *(bool *)(next->value) = false; + *(item->value) = NULL; + Dmsg2(100, "Item=%s got value=%s\n", item->name, lc->str); + scan_to_eol(lc); + return; + } + } + Dmsg2(100, "Item=%s got value=%s\n", item->name, lc->str); + + if (pass == 2) { + res = GetResWithName(R_STORAGE, lc->str); + if (res == NULL) { + scan_err3(lc, _("Could not find Storage Resource %s referenced on line %d : %s\n"), + lc->str, lc->line_no, lc->line); + return; + } + if (*(item->value)) { + scan_err3(lc, _("Attempt to redefine Storage resource \"%s\" referenced on line %d : %s\n"), + item->name, lc->line_no, lc->line); + return; + } + Dmsg2(100, "Store %s value=%p\n", lc->str, res); + *(item->value) = (char *)res; + if (strcasecmp(item->name, "autochanger") == 0) { + *(bool *)(next->value) = true; + } + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +/* + * Store Device. Note, the resource is created upon the + * first reference. The details of the resource are obtained + * later from the SD. + */ +void store_device(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int rindex = R_DEVICE - r_first; + int size = sizeof(DEVICE); + + if (pass == 1) { + URES *ures; + RES *res; + + lex_get_token(lc, T_NAME); + rblist *list = res_head[rindex]->res_list; + ures = (URES *)malloc(size); + memset(ures, 0, size); + ures->res_dev.hdr.name = bstrdup(lc->str); + res = (RES *)ures; + if (list->empty()) { + list->insert(res, res_compare); + res_head[rindex]->first = res; + res_head[rindex]->last = res; + } else { + RES *item, *prev; + prev = res_head[rindex]->last; + item = (RES *)list->insert(res, res_compare); + if (item == res) { + prev->res_next = res; + res_head[rindex]->last = res; + } else { + /* res not inserted */ + free(ures->res_dev.hdr.name); + free(ures); + } + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); + } else { + store_alist_res(lc, item, index, pass); + } +} + +/* + * Store Migration/Copy type + * + */ +void store_migtype(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int i; + + lex_get_token(lc, T_NAME); + /* Store the type both pass 1 and pass 2 */ + for (i=0; migtypes[i].type_name; i++) { + if (strcasecmp(lc->str, migtypes[i].type_name) == 0) { + *(uint32_t *)(item->value) = migtypes[i].job_type; + i = 0; + break; + } + } + if (i != 0) { + scan_err1(lc, _("Expected a Migration Job Type keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + + +/* + * Store JobType (backup, verify, restore) + * + */ +void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int i; + + lex_get_token(lc, T_NAME); + /* Store the type both pass 1 and pass 2 */ + for (i=0; jobtypes[i].type_name; i++) { + if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) { + *(uint32_t *)(item->value) = jobtypes[i].job_type; + i = 0; + break; + } + } + if (i != 0) { + scan_err1(lc, _("Expected a Job Type keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Store Job Level (Full, Incremental, ...) + * + */ +void store_level(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int i; + + lex_get_token(lc, T_NAME); + /* Store the level pass 2 so that type is defined */ + for (i=0; joblevels[i].level_name; i++) { + if (strcasecmp(lc->str, joblevels[i].level_name) == 0) { + *(uint32_t *)(item->value) = joblevels[i].level; + i = 0; + break; + } + } + if (i != 0) { + scan_err1(lc, _("Expected a Job Level keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +void store_replace(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int i; + lex_get_token(lc, T_NAME); + /* Scan Replacement options */ + for (i=0; ReplaceOptions[i].name; i++) { + if (strcasecmp(lc->str, ReplaceOptions[i].name) == 0) { + *(uint32_t *)(item->value) = ReplaceOptions[i].token; + i = 0; + break; + } + } + if (i != 0) { + scan_err1(lc, _("Expected a Restore replacement option, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Store ACL (access control list) + * + */ +void store_acl(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token; + + for (;;) { + lex_get_token(lc, T_STRING); + if (pass == 1) { + if (((alist **)item->value)[item->code] == NULL) { + ((alist **)item->value)[item->code] = New(alist(10, owned_by_alist)); + Dmsg1(900, "Defined new ACL alist at %d\n", item->code); + } + ((alist **)item->value)[item->code]->append(bstrdup(lc->str)); + Dmsg2(900, "Appended to %d %s\n", item->code, lc->str); + } + token = lex_get_token(lc, T_ALL); + if (token == T_COMMA) { + continue; /* get another ACL */ + } + break; + } + set_bit(index, res_all.hdr.item_present); +} + +/* We build RunScripts items here */ +static RUNSCRIPT res_runscript; + +/* Store a runscript->when in a bit field */ +static void store_runscript_when(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_NAME); + + if (strcasecmp(lc->str, "before") == 0) { + *(uint32_t *)(item->value) = SCRIPT_Before ; + } else if (strcasecmp(lc->str, "after") == 0) { + *(uint32_t *)(item->value) = SCRIPT_After; + } else if (strcasecmp(lc->str, "aftervss") == 0) { + *(uint32_t *)(item->value) = SCRIPT_AfterVSS; + } else if (strcasecmp(lc->str, "aftersnapshot") == 0) { + *(uint32_t *)(item->value) = SCRIPT_AfterVSS; + } else if (strcasecmp(lc->str, "always") == 0) { + *(uint32_t *)(item->value) = SCRIPT_Any; + } else { + scan_err2(lc, _("Expect %s, got: %s"), "Before, After, AfterVSS or Always", lc->str); + } + scan_to_eol(lc); +} + +/* Store a runscript->target + * + */ +static void store_runscript_target(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_STRING); + + if (pass == 2) { + if (strcmp(lc->str, "%c") == 0) { + ((RUNSCRIPT*) item->value)->set_target(lc->str); + } else if (strcasecmp(lc->str, "yes") == 0) { + ((RUNSCRIPT*) item->value)->set_target("%c"); + } else if (strcasecmp(lc->str, "no") == 0) { + ((RUNSCRIPT*) item->value)->set_target(""); + } else { + RES *res = GetResWithName(R_CLIENT, lc->str); + if (res == NULL) { + scan_err3(lc, _("Could not find config Resource %s referenced on line %d : %s\n"), + lc->str, lc->line_no, lc->line); + } + + ((RUNSCRIPT*) item->value)->set_target(lc->str); + } + } + scan_to_eol(lc); +} + +/* + * Store a runscript->command as a string and runscript->cmd_type as a pointer + */ +static void store_runscript_cmd(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_STRING); + + if (pass == 2) { + Dmsg2(1, "runscript cmd=%s type=%c\n", lc->str, item->code); + POOLMEM *c = get_pool_memory(PM_FNAME); + /* Each runscript command takes 2 entries in commands list */ + pm_strcpy(c, lc->str); + ((RUNSCRIPT*) item->value)->commands->prepend(c); /* command line */ + ((RUNSCRIPT*) item->value)->commands->prepend((void *)(intptr_t)item->code); /* command type */ + } + scan_to_eol(lc); +} + +static void store_short_runscript(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_STRING); + alist **runscripts = (alist **)(item->value) ; + + if (pass == 2) { + RUNSCRIPT *script = new_runscript(); + script->set_job_code_callback(job_code_callback_director); + + script->set_command(lc->str); + + /* TODO: remove all script->old_proto with bacula 1.42 */ + + if (strcasecmp(item->name, "runbeforejob") == 0) { + script->when = SCRIPT_Before; + script->fail_on_error = true; + script->set_target(""); + + } else if (strcasecmp(item->name, "runafterjob") == 0) { + script->when = SCRIPT_After; + script->on_success = true; + script->on_failure = false; + script->set_target(""); + + } else if (strcasecmp(item->name, "clientrunbeforejob") == 0) { + script->old_proto = true; + script->when = SCRIPT_Before; + script->set_target("%c"); + script->fail_on_error = true; + + } else if (strcasecmp(item->name, "clientrunafterjob") == 0) { + script->old_proto = true; + script->when = SCRIPT_After; + script->set_target("%c"); + script->on_success = true; + script->on_failure = false; + + } else if (strcasecmp(item->name, "consolerunbeforejob") == 0) { + script->when = SCRIPT_Before; + script->set_target(""); + script->fail_on_error = true; + script->set_command(NPRT(script->command), CONSOLE_CMD); + + } else if (strcasecmp(item->name, "consolerunafterjob") == 0) { + script->when = SCRIPT_After; + script->set_target(""); + script->on_success = true; + script->on_failure = false; + script->set_command(NPRT(script->command), CONSOLE_CMD); + + } else if (strcasecmp(item->name, "runafterfailedjob") == 0) { + script->when = SCRIPT_After; + script->on_failure = true; + script->on_success = false; + script->set_target(""); + } + + if (*runscripts == NULL) { + *runscripts = New(alist(10, not_owned_by_alist)); + } + + (*runscripts)->append(script); + script->debug(); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* Store a bool in a bit field without modifing res_all.hdr + * We can also add an option to store_bool to skip res_all.hdr + */ +void store_runscript_bool(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_NAME); + if (strcasecmp(lc->str, "yes") == 0 || strcasecmp(lc->str, "true") == 0) { + *(bool *)(item->value) = true; + } else if (strcasecmp(lc->str, "no") == 0 || strcasecmp(lc->str, "false") == 0) { + *(bool *)(item->value) = false; + } else { + scan_err2(lc, _("Expect %s, got: %s"), "YES, NO, TRUE, or FALSE", lc->str); /* YES and NO must not be translated */ + } + scan_to_eol(lc); +} + +/* + * new RunScript items + * name handler value code flags default_value + */ +static RES_ITEM runscript_items[] = { + {"command", store_runscript_cmd, {(char **)&res_runscript}, SHELL_CMD, 0, 0}, + {"console", store_runscript_cmd, {(char **)&res_runscript}, CONSOLE_CMD, 0, 0}, + {"target", store_runscript_target,{(char **)&res_runscript}, 0, 0, 0}, + {"runsonsuccess", store_runscript_bool, {(char **)&res_runscript.on_success},0, 0, 0}, + {"runsonfailure", store_runscript_bool, {(char **)&res_runscript.on_failure},0, 0, 0}, + {"failjobonerror",store_runscript_bool, {(char **)&res_runscript.fail_on_error},0, 0, 0}, + {"abortjobonerror",store_runscript_bool, {(char **)&res_runscript.fail_on_error},0, 0, 0}, + {"runswhen", store_runscript_when, {(char **)&res_runscript.when}, 0, 0, 0}, + {"runsonclient", store_runscript_target,{(char **)&res_runscript}, 0, 0, 0}, /* TODO */ + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* + * Store RunScript info + * + * Note, when this routine is called, we are inside a Job + * resource. We treat the RunScript like a sort of + * mini-resource within the Job resource. + */ +void store_runscript(LEX *lc, RES_ITEM *item, int index, int pass) +{ + char *c; + int token, i, t; + alist **runscripts = (alist **)(item->value) ; + + Dmsg1(200, "store_runscript: begin store_runscript pass=%i\n", pass); + + token = lex_get_token(lc, T_SKIP_EOL); + + if (token != T_BOB) { + scan_err1(lc, _("Expecting open brace. Got %s"), lc->str); + } + /* setting on_success, on_failure, fail_on_error */ + res_runscript.reset_default(); + + if (pass == 2) { + res_runscript.commands = New(alist(10, not_owned_by_alist)); + } + + while ((token = lex_get_token(lc, T_SKIP_EOL)) != T_EOF) { + if (token == T_EOB) { + break; + } + if (token != T_IDENTIFIER) { + scan_err1(lc, _("Expecting keyword, got: %s\n"), lc->str); + } + for (i=0; runscript_items[i].name; i++) { + if (strcasecmp(runscript_items[i].name, lc->str) == 0) { + token = lex_get_token(lc, T_SKIP_EOL); + if (token != T_EQUALS) { + scan_err1(lc, _("expected an equals, got: %s"), lc->str); + } + + /* Call item handler */ + runscript_items[i].handler(lc, &runscript_items[i], i, pass); + i = -1; + break; + } + } + + if (i >=0) { + scan_err1(lc, _("Keyword %s not permitted in this resource"), lc->str); + } + } + + if (pass == 2) { + /* run on client by default */ + if (res_runscript.target == NULL) { + res_runscript.set_target("%c"); + } + if (*runscripts == NULL) { + *runscripts = New(alist(10, not_owned_by_alist)); + } + /* + * commands list contains 2 values per command + * - POOLMEM command string (ex: /bin/true) + * - int command type (ex: SHELL_CMD) + */ + res_runscript.set_job_code_callback(job_code_callback_director); + while ((c=(char*)res_runscript.commands->pop()) != NULL) { + t = (intptr_t)res_runscript.commands->pop(); + RUNSCRIPT *script = new_runscript(); + memcpy(script, &res_runscript, sizeof(RUNSCRIPT)); + script->command = c; + script->cmd_type = t; + /* target is taken from res_runscript, each runscript object have + * a copy + */ + script->target = NULL; + script->set_target(res_runscript.target); + + (*runscripts)->append(script); + script->debug(); + } + delete res_runscript.commands; + /* setting on_success, on_failure... cleanup target field */ + res_runscript.reset_default(true); + } + + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* callback function for edit_job_codes */ +/* See ../lib/util.c, function edit_job_codes, for more remaining codes */ +extern "C" char *job_code_callback_director(JCR *jcr, const char* param, char *buf, int buflen) +{ + static char yes[] = "yes"; + static char no[] = "no"; + static char nothing[] = ""; + + if (jcr == NULL) { + return nothing; + } + ASSERTD(buflen < 255, "buflen must be long enough to hold an ip address"); + switch (param[0]) { + case 'f': + if (jcr->fileset) { + return jcr->fileset->name(); + } + break; + case 'h': + if (jcr->client) { + POOL_MEM tmp; + jcr->client->address(tmp.addr()); + bstrncpy(buf, tmp.c_str(), buflen); + return buf; + } + break; + case 'p': + if (jcr->pool) { + return jcr->pool->name(); + } + break; + case 'w': + if (jcr->wstore) { + return jcr->wstore->name(); + } + break; + case 'x': + return jcr->spool_data ? yes : no; + case 'D': + return my_name; + case 'C': + return jcr->cloned ? yes : no; + case 'I': + if (buflen >= 50) { + if (jcr->wjcr) { + edit_uint64(jcr->wjcr->JobId, buf); + return buf; + } else { + edit_uint64(0, buf); + return buf; + } + } + } + return nothing; +} + +bool parse_dir_config(CONFIG *config, const char *configfile, int exit_code) +{ + config->init(configfile, NULL, exit_code, (void *)&res_all, res_all_size, + r_first, r_last, resources, &res_head); + return config->parse_config(); +} diff --git a/src/dird/dird_conf.h b/src/dird/dird_conf.h new file mode 100644 index 00000000..15bfb374 --- /dev/null +++ b/src/dird/dird_conf.h @@ -0,0 +1,790 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Director specific configuration and defines + * + * Kern Sibbald, Feb MM + */ + +/* NOTE: #includes at the end of this file */ + +/* + * Resource codes -- they must be sequential for indexing + */ +enum { + R_DIRECTOR = 1001, + R_CLIENT, + R_JOB, + R_STORAGE, + R_CATALOG, + R_SCHEDULE, + R_FILESET, + R_POOL, + R_MSGS, + R_COUNTER, + R_CONSOLE, + R_JOBDEFS, + R_DEVICE, /* This is the real last device class */ + + R_AUTOCHANGER, /* Alias for R_STORAGE after R_LAST */ + R_FIRST = R_DIRECTOR, + R_LAST = R_DEVICE /* keep this updated */ +}; + + +/* + * Some resource attributes + */ +enum { + R_NAME = 1020, + R_ADDRESS, + R_PASSWORD, + R_TYPE, + R_BACKUP +}; + +/* Options for FileSet keywords */ +struct s_fs_opt { + const char *name; + int keyword; + const char *option; +}; + +/* Job Level keyword structure */ +struct s_jl { + const char *level_name; /* level keyword */ + int32_t level; /* level */ + int32_t job_type; /* JobType permitting this level */ +}; + +/* Job Type keyword structure */ +struct s_jt { + const char *type_name; + int32_t job_type; +}; + +/* Definition of the contents of each Resource */ +/* Needed for forward references */ +class SCHED; +class CLIENT; +class FILESET; +class POOL; +class RUN; +class DEVICE; +class RUNSCRIPT; + +/* + * Director Resource + * + */ +class DIRRES { +public: + RES hdr; + dlist *DIRaddrs; + dlist *DIRsrc_addr; /* address to source connections from */ + char *password; /* Password for UA access */ + char *query_file; /* SQL query file */ + char *working_directory; /* WorkingDirectory */ + const char *scripts_directory; /* ScriptsDirectory */ + const char *plugin_directory; /* Plugin Directory */ + char *pid_directory; /* PidDirectory */ + char *subsys_directory; /* SubsysDirectory */ + MSGS *messages; /* Daemon message handler */ + uint32_t MaxConcurrentJobs; /* Max concurrent jobs for whole director */ + uint32_t MaxSpawnedJobs; /* Max Jobs that can be started by Migration/Copy */ + uint32_t MaxConsoleConnect; /* Max concurrent console session */ + uint32_t MaxReload; /* Maximum reload requests */ + utime_t FDConnectTimeout; /* timeout for connect in seconds */ + utime_t SDConnectTimeout; /* timeout in seconds */ + utime_t heartbeat_interval; /* Interval to send heartbeats */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Server Certificate File */ + char *tls_keyfile; /* TLS Server Key File */ + char *tls_dhfile; /* TLS Diffie-Hellman Parameters */ + alist *tls_allowed_cns; /* TLS Allowed Clients */ + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + utime_t stats_retention; /* Stats retention period in seconds */ + bool comm_compression; /* Enable comm line compression */ + bool tls_authenticate; /* Authenticated with TLS */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + bool tls_verify_peer; /* TLS Verify Client Certificate */ + char *verid; /* Custom Id to print in version command */ + /* Methods */ + char *name() const; +}; + +inline char *DIRRES::name() const { return hdr.name; } + +/* + * Device Resource + * This resource is a bit different from the other resources + * because it is not defined in the Director + * by DEVICE { ... }, but rather by a "reference" such as + * DEVICE = xxx; Then when the Director connects to the + * SD, it requests the information about the device. + */ +class DEVICE { +public: + RES hdr; + + bool found; /* found with SD */ + int32_t num_writers; /* number of writers */ + int32_t max_writers; /* = 1 for files */ + int32_t reserved; /* number of reserves */ + int32_t num_drives; /* for autochanger */ + bool autochanger; /* set if device is autochanger */ + bool open; /* drive open */ + bool append; /* in append mode */ + bool read; /* in read mode */ + bool labeled; /* Volume name valid */ + bool offline; /* not available */ + bool autoselect; /* can be selected via autochanger */ + uint32_t PoolId; + char ChangerName[MAX_NAME_LENGTH]; + char VolumeName[MAX_NAME_LENGTH]; + char MediaType[MAX_NAME_LENGTH]; + + /* Methods */ + char *name() const; +}; + +inline char *DEVICE::name() const { return hdr.name; } + +/* + * Console ACL positions + */ +enum { + Job_ACL = 0, + Client_ACL, + Storage_ACL, + Schedule_ACL, + Run_ACL, + Pool_ACL, + Command_ACL, + FileSet_ACL, + Catalog_ACL, + Where_ACL, + PluginOptions_ACL, + RestoreClient_ACL, + BackupClient_ACL, + Directory_ACL, /* List of directories that can be accessed in the restore tree */ + Num_ACL /* keep last */ +}; + +/* + * Console Resource + */ +class CONRES { +public: + RES hdr; + char *password; /* UA server password */ + alist *ACL_lists[Num_ACL]; /* pointers to ACLs */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Server Certificate File */ + char *tls_keyfile; /* TLS Server Key File */ + char *tls_dhfile; /* TLS Diffie-Hellman Parameters */ + alist *tls_allowed_cns; /* TLS Allowed Clients */ + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + bool tls_authenticate; /* Authenticated with TLS */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + bool tls_verify_peer; /* TLS Verify Client Certificate */ + + /* Methods */ + char *name() const; +}; + +inline char *CONRES::name() const { return hdr.name; } + + +/* + * Catalog Resource + * + */ +class CAT { +public: + RES hdr; + + uint32_t db_port; /* Port */ + char *db_address; /* host name for remote access */ + char *db_socket; /* Socket for local access */ + char *db_password; + char *db_user; + char *db_name; + char *db_driver; /* Select appropriate driver */ + char *db_ssl_mode; /* specifies the security state of the connection to the server */ + char *db_ssl_key; /* the path name to the key file */ + char *db_ssl_cert; /* the path name to the certificate file */ + char *db_ssl_ca; /* the path name to the certificate authority file */ + char *db_ssl_capath; /* the path name to a directory that contains trusted SSL CA certificates in PEM format */ + char *db_ssl_cipher; /* a list of permissible ciphers to use for SSL encryption */ + uint32_t mult_db_connections; /* set for multiple db connections */ + bool disable_batch_insert; /* set to disable batch inserts */ + + /* Methods */ + char *name() const; + char *display(POOLMEM *dst); /* Get catalog information */ +}; + +inline char *CAT::name() const { return hdr.name; } + +class CLIENT_GLOBALS { +public: + dlink link; /* double link */ + const char *name; /* resource name */ + int32_t NumConcurrentJobs; /* number of concurrent jobs running */ + char *SetIPaddress; /* address from SetIP command */ + int enabled; /* -1: not set, 0 disabled, 1 enabled */ +}; + +/* + * Client Resource + * + */ +class CLIENT { +public: + RES hdr; + CLIENT_GLOBALS *globals; /* global variables */ + uint32_t FDport; /* Where File daemon listens */ + utime_t FileRetention; /* file retention period in seconds */ + utime_t JobRetention; /* job retention period in seconds */ + utime_t SnapRetention; /* Snapshot retention period in seconds */ + utime_t heartbeat_interval; /* Interval to send heartbeats */ + char *client_address; /* Client address from .conf file */ + char *fd_storage_address; /* Storage address to use from FD side */ + char *password; + CAT *catalog; /* Catalog resource */ + int32_t MaxConcurrentJobs; /* Maximum concurrent jobs */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Client Certificate File */ + char *tls_keyfile; /* TLS Client Key File */ + alist *tls_allowed_cns; /* TLS Allowed Clients */ + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + bool tls_authenticate; /* Authenticated with TLS */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + bool Enabled; /* Set if client enabled */ + bool AutoPrune; /* Do automatic pruning? */ + bool sd_calls_client; /* SD calls the client */ + int64_t max_bandwidth; /* Limit speed on this client */ + + /* Methods */ + char *name() const; + void create_client_globals(); + int32_t getNumConcurrentJobs(); + void setNumConcurrentJobs(int32_t num); + char *address(POOLMEM *&buf); + void setAddress(char *addr); + bool is_enabled(); + void setEnabled(bool val); +}; + +inline char *CLIENT::name() const { return hdr.name; } + + +class STORE_GLOBALS { +public: + dlink link; /* double link */ + const char *name; /* resource name */ + int32_t NumConcurrentJobs; /* number of concurrent jobs running */ + int32_t NumConcurrentReadJobs; /* number of concurrent read jobs running */ + int enabled; /* -1: not set, 0: disabled, 1: enabled */ +}; + + +/* + * Store Resource + * + */ +class STORE { +public: + RES hdr; + STORE_GLOBALS *globals; /* global variables */ + uint32_t SDport; /* port where Directors connect */ + uint32_t SDDport; /* data port for File daemon */ + char *address; + char *fd_storage_address; /* Storage address to use from FD side */ + char *password; + char *media_type; + alist *device; /* Alternate devices for this Storage */ + int32_t MaxConcurrentJobs; /* Maximum concurrent jobs */ + int32_t MaxConcurrentReadJobs; /* Maximum concurrent jobs reading */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Client Certificate File */ + char *tls_keyfile; /* TLS Client Key File */ + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + bool tls_authenticate; /* Authenticated with TLS */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + bool Enabled; /* Set if device is enabled */ + bool AllowCompress; /* set if this Storage should allow jobs to enable compression */ + bool autochanger; /* set if we are part of an autochanger */ + POOLMEM *ac_group; /* Autochanger StorageId group */ + STORE *changer; /* points to autochanger */ + STORE *shared_storage; /* points to shared storage */ + int64_t StorageId; /* Set from Storage DB record */ + utime_t heartbeat_interval; /* Interval to send heartbeats */ + uint32_t drives; /* number of drives in autochanger */ + + /* Methods */ + char *dev_name() const; + char *name() const; + void create_store_globals(); + int32_t getNumConcurrentJobs(); + int32_t getNumConcurrentReadJobs(); + void setNumConcurrentJobs(int32_t num); + void setNumConcurrentReadJobs(int32_t num); + bool is_enabled(); + void setEnabled(bool val); +}; + +inline char *STORE::dev_name() const +{ + DEVICE *dev = (DEVICE *)device->first(); + return dev->name(); +} + +inline char *STORE::name() const { return hdr.name; } + +/* + * This is a sort of "unified" store that has both the + * storage pointer and the text of where the pointer was + * found. + */ +class USTORE { +public: + STORE *store; + POOLMEM *store_source; + + /* Methods */ + USTORE() { store = NULL; store_source = get_pool_memory(PM_MESSAGE); + *store_source = 0; }; + ~USTORE() { destroy(); } + void set_source(const char *where); + void destroy(); +}; + +inline void USTORE::destroy() +{ + if (store_source) { + free_pool_memory(store_source); + store_source = NULL; + } +} + + +inline void USTORE::set_source(const char *where) +{ + if (!store_source) { + store_source = get_pool_memory(PM_MESSAGE); + } + pm_strcpy(store_source, where); +} + +class JOB_GLOBALS { +public: + dlink link; /* double link */ + const char *name; /* resource name */ + int32_t NumConcurrentJobs; /* number of concurrent jobs running */ + int enabled; /* -1: disabled, 0: disabled, 1: Enabled */ +}; + +/* + * Job Resource + */ +class JOB { +public: + RES hdr; + JOB_GLOBALS *globals; /* global variables */ + uint32_t JobType; /* job type (backup, verify, restore */ + uint32_t JobLevel; /* default backup/verify level */ + uint32_t RestoreJobId; /* What -- JobId to restore */ + uint32_t replace; /* How (overwrite, ..) */ + uint32_t selection_type; + + int32_t Priority; /* Job priority */ + int32_t RescheduleTimes; /* Number of times to reschedule job */ + + char *RestoreWhere; /* Where on disk to restore -- directory */ + char *RegexWhere; /* RegexWhere option */ + char *strip_prefix; /* remove prefix from filename */ + char *add_prefix; /* add prefix to filename */ + char *add_suffix; /* add suffix to filename -- .old */ + char *RestoreBootstrap; /* Bootstrap file */ + char *RestoreClient; /* Who to restore */ + char *PluginOptions; /* Options to pass to plugin */ + union { + char *WriteBootstrap; /* Where to write bootstrap Job updates */ + char *WriteVerifyList; /* List of changed files */ + }; + utime_t MaxRunTime; /* max run time in seconds */ + utime_t MaxWaitTime; /* max blocking time in seconds */ + utime_t FullMaxRunTime; /* Max Full job run time */ + utime_t DiffMaxRunTime; /* Max Differential job run time */ + utime_t IncMaxRunTime; /* Max Incremental job run time */ + utime_t MaxStartDelay; /* max start delay in seconds */ + utime_t MaxRunSchedTime; /* max run time in seconds from Scheduled time*/ + utime_t RescheduleInterval; /* Reschedule interval */ + utime_t MaxFullInterval; /* Maximum time interval between Fulls */ + utime_t MaxVirtualFullInterval; /* Maximum time interval between Virtual Fulls */ + utime_t MaxDiffInterval; /* Maximum time interval between Diffs */ + utime_t DuplicateJobProximity; /* Permitted time between duplicicates */ + utime_t SnapRetention; /* Snapshot retention period in seconds */ + int64_t spool_size; /* Size of spool file for this job */ + int32_t MaxConcurrentJobs; /* Maximum concurrent jobs */ + uint32_t MaxSpawnedJobs; /* Max Jobs that can be started by Migration/Copy */ + uint32_t BackupsToKeep; /* Number of backups to keep in Virtual Full */ + bool allow_mixed_priority; /* Allow jobs with higher priority concurrently with this */ + + MSGS *messages; /* How and where to send messages */ + SCHED *schedule; /* When -- Automatic schedule */ + CLIENT *client; /* Who to backup */ + FILESET *fileset; /* What to backup -- Fileset */ + alist *storage; /* Where is device -- list of Storage to be used */ + POOL *pool; /* Where is media -- Media Pool */ + POOL *next_pool; /* Next Pool for Copy/Migrate/VirtualFull */ + POOL *full_pool; /* Pool for Full backups */ + POOL *vfull_pool; /* Pool for Virtual Full backups */ + POOL *inc_pool; /* Pool for Incremental backups */ + POOL *diff_pool; /* Pool for Differental backups */ + char *selection_pattern; + union { + JOB *verify_job; /* Job name to verify */ + }; + JOB *jobdefs; /* Job defaults */ + alist *run_cmds; /* Run commands */ + alist *RunScripts; /* Run {client} program {after|before} Job */ + + bool where_use_regexp; /* true if RestoreWhere is a BREGEXP */ + bool RescheduleOnError; /* Set to reschedule on error */ + bool RescheduleIncompleteJobs; /* Set to reschedule incomplete Jobs */ + bool PrefixLinks; /* prefix soft links with Where path */ + bool PruneJobs; /* Force pruning of Jobs */ + bool PruneFiles; /* Force pruning of Files */ + bool PruneVolumes; /* Force pruning of Volumes */ + bool SpoolAttributes; /* Set to spool attributes in SD */ + bool spool_data; /* Set to spool data in SD */ + bool rerun_failed_levels; /* Upgrade to rerun failed levels */ + bool PreferMountedVolumes; /* Prefer vols mounted rather than new one */ + bool write_part_after_job; /* Set to write part after job in SD */ + bool Enabled; /* Set if job enabled */ + bool accurate; /* Set if it is an accurate backup job */ + bool AllowDuplicateJobs; /* Allow duplicate jobs */ + bool AllowHigherDuplicates; /* Permit Higher Level */ + bool CancelLowerLevelDuplicates; /* Cancel lower level backup jobs */ + bool CancelQueuedDuplicates; /* Cancel queued jobs */ + bool CancelRunningDuplicates; /* Cancel Running jobs */ + bool PurgeMigrateJob; /* Purges source job on completion */ + bool DeleteConsolidatedJobs; /* Delete or not consolidated Virtual Full jobs */ + + alist *base; /* Base jobs */ + int64_t max_bandwidth; /* Speed limit on this job */ + + /* Methods */ + char *name() const; + void create_job_globals(); + int32_t getNumConcurrentJobs(); + void setNumConcurrentJobs(int32_t num); + bool is_enabled(); + void setEnabled(bool val); +}; + +inline char *JOB::name() const { return hdr.name; } + +/* Define FileSet Options keyword values */ +enum { + INC_KW_NONE, + INC_KW_COMPRESSION, + INC_KW_DIGEST, + INC_KW_ENCRYPTION, + INC_KW_VERIFY, + INC_KW_BASEJOB, + INC_KW_ACCURATE, + INC_KW_ONEFS, + INC_KW_RECURSE, + INC_KW_SPARSE, + INC_KW_HARDLINK, + INC_KW_REPLACE, /* restore options */ + INC_KW_READFIFO, /* Causes fifo data to be read */ + INC_KW_PORTABLE, + INC_KW_MTIMEONLY, + INC_KW_KEEPATIME, + INC_KW_EXCLUDE, + INC_KW_ACL, + INC_KW_IGNORECASE, + INC_KW_HFSPLUS, + INC_KW_NOATIME, + INC_KW_ENHANCEDWILD, + INC_KW_CHKCHANGES, + INC_KW_STRIPPATH, + INC_KW_HONOR_NODUMP, + INC_KW_XATTR, + INC_KW_DEDUP, + INC_KW_MAX /* Keep this last */ +}; + + +#undef MAX_FOPTS +#define MAX_FOPTS 50 + +/* File options structure */ +struct FOPTS { + char opts[MAX_FOPTS]; /* options string */ + alist regex; /* regex string(s) */ + alist regexdir; /* regex string(s) for directories */ + alist regexfile; /* regex string(s) for files */ + alist wild; /* wild card strings */ + alist wilddir; /* wild card strings for directories */ + alist wildfile; /* wild card strings for files */ + alist wildbase; /* wild card strings for files without '/' */ + alist base; /* list of base names */ + alist fstype; /* file system type limitation */ + alist drivetype; /* drive type limitation */ + char *reader; /* reader program */ + char *writer; /* writer program */ + char *plugin; /* plugin program */ +}; + + +/* This is either an include item or an exclude item */ +struct INCEXE { + char opt_present[INC_KW_MAX+1]; /* set if option is present in conf file */ + FOPTS *current_opts; /* points to current options structure */ + FOPTS **opts_list; /* options list */ + int32_t num_opts; /* number of options items */ + alist name_list; /* filename list -- holds char * */ + alist plugin_list; /* filename list for plugins */ + char *ignoredir; /* ignoredir string */ +}; + +/* + * FileSet Resource + * + */ +class FILESET { +public: + RES hdr; + + bool new_include; /* Set if new include used */ + INCEXE **include_items; /* array of incexe structures */ + int32_t num_includes; /* number in array */ + INCEXE **exclude_items; + int32_t num_excludes; + bool have_MD5; /* set if MD5 initialized */ + struct MD5Context md5c; /* MD5 of include/exclude */ + char MD5[30]; /* base 64 representation of MD5 */ + bool ignore_fs_changes; /* Don't force Full if FS changed */ + bool enable_vss; /* Enable Volume Shadow Copy */ + bool enable_snapshot; /* Enable Snapshot */ + + /* Methods */ + char *name() const; +}; + +inline char *FILESET::name() const { return hdr.name; } + +class SCHED_GLOBALS { +public: + dlink link; /* double link */ + const char *name; /* resource name */ + int enabled; /* -1: not set, 0: disabled, 1: Enabled */ +}; + +/* + * Schedule Resource + */ +class SCHED { +public: + RES hdr; + SCHED_GLOBALS *globals; + RUN *run; + bool Enabled; /* set if enabled */ + + /* Methods */ + char *name() const; + void create_sched_globals(); + bool is_enabled(); + void setEnabled(bool val); +}; + +inline char *SCHED::name() const { return hdr.name; } + +/* + * Counter Resource + */ +class COUNTER { +public: + RES hdr; + + int32_t MinValue; /* Minimum value */ + int32_t MaxValue; /* Maximum value */ + int32_t CurrentValue; /* Current value */ + COUNTER *WrapCounter; /* Wrap counter name */ + CAT *Catalog; /* Where to store */ + bool created; /* Created in DB */ + + /* Methods */ + char *name() const; +}; + +inline char *COUNTER::name() const { return hdr.name; } + +/* + * Pool Resource + * + */ +class POOL { +public: + RES hdr; + + char *pool_type; /* Pool type */ + char *label_format; /* Label format string */ + char *cleaning_prefix; /* Cleaning label prefix */ + int32_t LabelType; /* Bacula/ANSI/IBM label type */ + uint32_t max_volumes; /* max number of volumes */ + utime_t VolRetention; /* volume retention period in seconds */ + utime_t CacheRetention; /* cloud cache retention period in seconds */ + utime_t VolUseDuration; /* duration volume can be used */ + uint32_t MaxVolJobs; /* Maximum jobs on the Volume */ + uint32_t MaxVolFiles; /* Maximum files on the Volume */ + uint64_t MaxVolBytes; /* Maximum bytes on the Volume */ + uint64_t MaxPoolBytes; /* Maximum bytes on the pool to create new vol */ + utime_t MigrationTime; /* Time to migrate to next pool */ + uint64_t MigrationHighBytes; /* When migration starts */ + uint64_t MigrationLowBytes; /* When migration stops */ + POOL *NextPool; /* Next pool for migration */ + alist *storage; /* Where is device -- list of Storage to be used */ + bool use_catalog; /* maintain catalog for media */ + bool catalog_files; /* maintain file entries in catalog */ + bool use_volume_once; /* write on volume only once */ + bool purge_oldest_volume; /* purge oldest volume */ + bool recycle_oldest_volume; /* attempt to recycle oldest volume */ + bool recycle_current_volume; /* attempt recycle of current volume */ + bool AutoPrune; /* default for pool auto prune */ + bool Recycle; /* default for media recycle yes/no */ + uint32_t action_on_purge; /* action on purge, e.g. truncate the disk volume */ + POOL *RecyclePool; /* RecyclePool destination when media is purged */ + POOL *ScratchPool; /* ScratchPool source when requesting media */ + alist *CopyPool; /* List of copy pools */ + CAT *catalog; /* Catalog to be used */ + utime_t FileRetention; /* file retention period in seconds */ + utime_t JobRetention; /* job retention period in seconds */ + + /* Methods */ + char *name() const; +}; + +inline char *POOL::name() const { return hdr.name; } + + +/* Define the Union of all the above + * resource structure definitions. + */ +union URES { + DIRRES res_dir; + CONRES res_con; + CLIENT res_client; + STORE res_store; + CAT res_cat; + JOB res_job; + FILESET res_fs; + SCHED res_sch; + POOL res_pool; + MSGS res_msgs; + COUNTER res_counter; + DEVICE res_dev; + RES hdr; + RUNSCRIPT res_runscript; +}; + + + +/* Run structure contained in Schedule Resource */ +class RUN { +public: + RUN *next; /* points to next run record */ + uint32_t level; /* level override */ + int32_t Priority; /* priority override */ + uint32_t job_type; + utime_t MaxRunSchedTime; /* max run time in sec from Sched time */ + bool MaxRunSchedTime_set; /* MaxRunSchedTime given */ + bool spool_data; /* Data spooling override */ + bool spool_data_set; /* Data spooling override given */ + bool accurate; /* accurate */ + bool accurate_set; /* accurate given */ + bool write_part_after_job; /* Write part after job override */ + bool write_part_after_job_set; /* Write part after job override given */ + bool priority_set; /* priority override given */ + bool level_set; /* level override given */ + + POOL *pool; /* Pool override */ + POOL *next_pool; /* Next pool override */ + POOL *full_pool; /* Pool override */ + POOL *vfull_pool; /* Pool override */ + POOL *inc_pool; /* Pool override */ + POOL *diff_pool; /* Pool override */ + STORE *storage; /* Storage override */ + MSGS *msgs; /* Messages override */ + char *since; + uint32_t level_no; + uint32_t minute; /* minute to run job */ + time_t last_run; /* last time run */ + time_t next_run; /* next time to run */ + bool last_day_set; /* set if last_day is used */ + char hour[nbytes_for_bits(24)]; /* bit set for each hour */ + char mday[nbytes_for_bits(32)]; /* bit set for each day of month */ + char month[nbytes_for_bits(12)]; /* bit set for each month */ + char wday[nbytes_for_bits(7)]; /* bit set for each day of the week */ + char wom[nbytes_for_bits(6)]; /* week of month */ + char woy[nbytes_for_bits(54)]; /* week of year */ +}; + +#define GetPoolResWithName(x) ((POOL *)GetResWithName(R_POOL, (x))) +#define GetStoreResWithName(x) ((STORE *)GetResWithName(R_STORAGE, (x))) +#define GetSchedResWithName(x) ((SCHED *)GetResWithName(R_SCHEDULE, (x))) +#define GetClientResWithName(x) ((CLIENT *)GetResWithName(R_CLIENT, (x))) +#define GetJobResWithName(x) ((JOB *)GetResWithName(R_JOB, (x))) +#define GetFileSetResWithName(x) ((FILESET *)GetResWithName(R_FILESET, (x))) +#define GetCatalogResWithName(x) ((CAT *)GetResWithName(R_CATALOG, (x))) + +/* Imported subroutines */ +void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_level(LEX *lc, RES_ITEM *item, int index, int pass); +void store_replace(LEX *lc, RES_ITEM *item, int index, int pass); +void store_migtype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_acl(LEX *lc, RES_ITEM *item, int index, int pass); +void store_ac_res(LEX *lc, RES_ITEM *item, int index, int pass); +void store_device(LEX *lc, RES_ITEM *item, int index, int pass); +void store_actiononpurge(LEX *lc, RES_ITEM *item, int index, int pass); +void store_inc(LEX *lc, RES_ITEM *item, int index, int pass); +void store_regex(LEX *lc, RES_ITEM *item, int index, int pass); +void store_wild(LEX *lc, RES_ITEM *item, int index, int pass); +void store_fstype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_drivetype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_opts(LEX *lc, RES_ITEM *item, int index, int pass); +void store_lopts(LEX *lc, RES_ITEM *item, int index, int pass); +void store_base(LEX *lc, RES_ITEM *item, int index, int pass); +void store_plugin(LEX *lc, RES_ITEM *item, int index, int pass); +void store_run(LEX *lc, RES_ITEM *item, int index, int pass); +void store_runscript(LEX *lc, RES_ITEM *item, int index, int pass); diff --git a/src/dird/expand.c b/src/dird/expand.c new file mode 100644 index 00000000..b1e7707d --- /dev/null +++ b/src/dird/expand.c @@ -0,0 +1,465 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- expand.c -- does variable expansion + * in particular for the LabelFormat specification. + * + * Kern Sibbald, June MMIII + */ + +#include "bacula.h" +#include "dird.h" + + + +static int date_item(JCR *jcr, int code, + const char **val_ptr, int *val_len, int *val_size) +{ + struct tm tm; + time_t now = time(NULL); + (void)localtime_r(&now, &tm); + int val = 0; + char buf[10]; + + switch (code) { + case 1: /* year */ + val = tm.tm_year + 1900; + break; + case 2: /* month */ + val = tm.tm_mon + 1; + break; + case 3: /* day */ + val = tm.tm_mday; + break; + case 4: /* hour */ + val = tm.tm_hour; + break; + case 5: /* minute */ + val = tm.tm_min; + break; + case 6: /* second */ + val = tm.tm_sec; + break; + case 7: /* Week day */ + val = tm.tm_wday; + break; + } + bsnprintf(buf, sizeof(buf), "%d", val); + *val_ptr = bstrdup(buf); + *val_len = strlen(buf); + *val_size = *val_len + 1; + return 1; +} + +static int job_item(JCR *jcr, int code, + const char **val_ptr, int *val_len, int *val_size) +{ + const char *str = " "; + char buf[20]; + + switch (code) { + case 1: /* Job */ + str = jcr->job->name(); + break; + case 2: /* Director's name */ + str = my_name; + break; + case 3: /* level */ + str = job_level_to_str(jcr->getJobLevel()); + break; + case 4: /* type */ + str = job_type_to_str(jcr->getJobType()); + break; + case 5: /* JobId */ + bsnprintf(buf, sizeof(buf), "%d", jcr->JobId); + str = buf; + break; + case 6: /* Client */ + str = jcr->client->name(); + if (!str) { + str = " "; + } + break; + case 7: /* NumVols */ + bsnprintf(buf, sizeof(buf), "%d", jcr->NumVols); + str = buf; + break; + case 8: /* Pool */ + str = jcr->pool->name(); + break; + case 9: /* Storage */ + if (jcr->wstore) { + str = jcr->wstore->name(); + } else { + str = jcr->rstore->name(); + } + break; + case 10: /* Catalog */ + str = jcr->catalog->name(); + break; + case 11: /* MediaType */ + if (jcr->wstore) { + str = jcr->wstore->media_type; + } else { + str = jcr->rstore->media_type; + } + break; + case 12: /* JobName */ + str = jcr->Job; + break; + } + *val_ptr = bstrdup(str); + *val_len = strlen(str); + *val_size = *val_len + 1; + return 1; +} + +struct s_built_in_vars {const char *var_name; int code; int (*func)(JCR *jcr, int code, + const char **val_ptr, int *val_len, int *val_size);}; + +/* + * Table of build in variables + */ +static struct s_built_in_vars built_in_vars[] = { + { NT_("Year"), 1, date_item}, + { NT_("Month"), 2, date_item}, + { NT_("Day"), 3, date_item}, + { NT_("Hour"), 4, date_item}, + { NT_("Minute"), 5, date_item}, + { NT_("Second"), 6, date_item}, + { NT_("WeekDay"), 7, date_item}, + + { NT_("Job"), 1, job_item}, + { NT_("Dir"), 2, job_item}, + { NT_("Level"), 3, job_item}, + { NT_("Type"), 4, job_item}, + { NT_("JobId"), 5, job_item}, + { NT_("Client"), 6, job_item}, + { NT_("NumVols"), 7, job_item}, + { NT_("Pool"), 8, job_item}, + { NT_("Storage"), 9, job_item}, + { NT_("Catalog"), 10, job_item}, + { NT_("MediaType"), 11, job_item}, + { NT_("JobName"), 12, job_item}, + + { NULL, 0, NULL} +}; + + +/* + * Search the table of built-in variables, and if found, + * call the appropriate subroutine to do the work. + */ +static var_rc_t lookup_built_in_var(var_t *ctx, void *my_ctx, + const char *var_ptr, int var_len, int var_index, + const char **val_ptr, int *val_len, int *val_size) +{ + JCR *jcr = (JCR *)my_ctx; + int stat; + + for (int i=0; _(built_in_vars[i].var_name); i++) { + if (strncmp(_(built_in_vars[i].var_name), var_ptr, var_len) == 0) { + stat = (*built_in_vars[i].func)(jcr, built_in_vars[i].code, + val_ptr, val_len, val_size); + if (stat) { + return VAR_OK; + } + break; + } + } + return VAR_ERR_UNDEFINED_VARIABLE; +} + + +/* + * Search counter variables + */ +static var_rc_t lookup_counter_var(var_t *ctx, void *my_ctx, + const char *var_ptr, int var_len, int var_inc, int var_index, + const char **val_ptr, int *val_len, int *val_size) +{ + char buf[MAXSTRING]; + var_rc_t stat = VAR_ERR_UNDEFINED_VARIABLE; + + if (var_len > (int)sizeof(buf) - 1) { + return VAR_ERR_OUT_OF_MEMORY; + } + memcpy(buf, var_ptr, var_len); + buf[var_len] = 0; + LockRes(); + for (COUNTER *counter=NULL; (counter = (COUNTER *)GetNextRes(R_COUNTER, (RES *)counter)); ) { + if (strcmp(counter->name(), buf) == 0) { + Dmsg2(100, "Counter=%s val=%d\n", buf, counter->CurrentValue); + /* -1 => return size of array */ + if (var_index == -1) { + bsnprintf(buf, sizeof(buf), "%d", counter->CurrentValue); + *val_len = bsnprintf(buf, sizeof(buf), "%d", strlen(buf)); + *val_ptr = buf; + *val_size = 0; /* don't try to free val_ptr */ + return VAR_OK; + } else { + bsnprintf(buf, sizeof(buf), "%d", counter->CurrentValue); + *val_ptr = bstrdup(buf); + *val_len = strlen(buf); + *val_size = *val_len + 1; + } + if (var_inc) { /* increment the variable? */ + if (counter->CurrentValue == counter->MaxValue) { + counter->CurrentValue = counter->MinValue; + } else { + counter->CurrentValue++; + } + if (counter->Catalog) { /* update catalog if need be */ + COUNTER_DBR cr; + JCR *jcr = (JCR *)my_ctx; + memset(&cr, 0, sizeof(cr)); + bstrncpy(cr.Counter, counter->name(), sizeof(cr.Counter)); + cr.MinValue = counter->MinValue; + cr.MaxValue = counter->MaxValue; + cr.CurrentValue = counter->CurrentValue; + Dmsg1(100, "New value=%d\n", cr.CurrentValue); + if (counter->WrapCounter) { + bstrncpy(cr.WrapCounter, counter->WrapCounter->name(), sizeof(cr.WrapCounter)); + } else { + cr.WrapCounter[0] = 0; + } + if (!db_update_counter_record(jcr, jcr->db, &cr)) { + Jmsg(jcr, M_ERROR, 0, _("Count not update counter %s: ERR=%s\n"), + counter->name(), db_strerror(jcr->db)); + } + } + } + stat = VAR_OK; + break; + } + } + UnlockRes(); + return stat; +} + + +/* + * Called here from "core" expand code to look up a variable + */ +static var_rc_t lookup_var(var_t *ctx, void *my_ctx, + const char *var_ptr, int var_len, int var_inc, int var_index, + const char **val_ptr, int *val_len, int *val_size) +{ + char buf[MAXSTRING], *val, *p, *v; + var_rc_t stat; + int count; + + /* Note, if val_size > 0 and val_ptr!=NULL, the core code will free() it */ + if ((stat = lookup_built_in_var(ctx, my_ctx, var_ptr, var_len, var_index, + val_ptr, val_len, val_size)) == VAR_OK) { + return VAR_OK; + } + + if ((stat = lookup_counter_var(ctx, my_ctx, var_ptr, var_len, var_inc, var_index, + val_ptr, val_len, val_size)) == VAR_OK) { + return VAR_OK; + } + + /* Look in environment */ + if (var_len > (int)sizeof(buf) - 1) { + return VAR_ERR_OUT_OF_MEMORY; + } + memcpy(buf, var_ptr, var_len + 1); + buf[var_len] = 0; + Dmsg1(100, "Var=%s\n", buf); + + if ((val = getenv(buf)) == NULL) { + return VAR_ERR_UNDEFINED_VARIABLE; + } + /* He wants to index the "array" */ + count = 1; + /* Find the size of the "array" + * each element is separated by a | + */ + for (p = val; *p; p++) { + if (*p == '|') { + count++; + } + } + Dmsg3(100, "For %s, reqest index=%d have=%d\n", + buf, var_index, count); + + /* -1 => return size of array */ + if (var_index == -1) { + int len; + if (count == 1) { /* if not array */ + len = strlen(val); /* return length of string */ + } else { + len = count; /* else return # array items */ + } + *val_len = bsnprintf(buf, sizeof(buf), "%d", len); + *val_ptr = buf; + *val_size = 0; /* don't try to free val_ptr */ + return VAR_OK; + } + + + if (var_index < -1 || var_index > --count) { +// return VAR_ERR_SUBMATCH_OUT_OF_RANGE; + return VAR_ERR_UNDEFINED_VARIABLE; + } + /* Now find the particular item (var_index) he wants */ + count = 0; + for (p=val; *p; ) { + if (*p == '|') { + if (count < var_index) { + val = ++p; + count++; + continue; + } + break; + } + p++; + } + if (p-val > (int)sizeof(buf) - 1) { + return VAR_ERR_OUT_OF_MEMORY; + } + Dmsg2(100, "val=%s len=%d\n", val, p-val); + /* Make a copy of item, and pass it back */ + v = (char *)malloc(p-val+1); + memcpy(v, val, p-val); + v[p-val] = 0; + *val_ptr = v; + *val_len = p-val; + *val_size = p-val+1; + Dmsg1(100, "v=%s\n", v); + return VAR_OK; +} + +/* + * Called here to do a special operation on a variable + * op_ptr points to the special operation code (not EOS terminated) + * arg_ptr points to argument to special op code + * val_ptr points to the value string + * out_ptr points to string to be returned + */ +static var_rc_t operate_var(var_t *var, void *my_ctx, + const char *op_ptr, int op_len, + const char *arg_ptr, int arg_len, + const char *val_ptr, int val_len, + char **out_ptr, int *out_len, int *out_size) +{ + var_rc_t stat = VAR_ERR_UNDEFINED_OPERATION; + Dmsg0(100, "Enter operate_var\n"); + if (!val_ptr) { + *out_size = 0; + return stat; + } + if (op_len == 3 && strncmp(op_ptr, "inc", 3) == 0) { + char buf[MAXSTRING]; + if (val_len > (int)sizeof(buf) - 1) { + return VAR_ERR_OUT_OF_MEMORY; + } + memcpy(buf, arg_ptr, arg_len); + buf[arg_len] = 0; + Dmsg1(100, "Arg=%s\n", buf); + memcpy(buf, val_ptr, val_len); + buf[val_len] = 0; + Dmsg1(100, "Val=%s\n", buf); + LockRes(); + for (COUNTER *counter=NULL; (counter = (COUNTER *)GetNextRes(R_COUNTER, (RES *)counter)); ) { + if (strcmp(counter->name(), buf) == 0) { + Dmsg2(100, "counter=%s val=%s\n", counter->name(), buf); + break; + } + } + UnlockRes(); + return stat; + } + *out_size = 0; + return stat; +} + + +/* + * Expand an input line and return it. + * + * Returns: 0 on failure + * 1 on success and exp has expanded input + */ +int variable_expansion(JCR *jcr, char *inp, POOLMEM **exp) +{ + var_t *var_ctx; + var_rc_t stat; + char *outp; + int in_len, out_len; + int rtn_stat = 0; + + in_len = strlen(inp); + outp = NULL; + out_len = 0; + + /* create context */ + if ((stat = var_create(&var_ctx)) != VAR_OK) { + Jmsg(jcr, M_ERROR, 0, _("Cannot create var context: ERR=%s\n"), var_strerror(var_ctx, stat)); + goto bail_out; + } + /* define callback */ + if ((stat = var_config(var_ctx, VAR_CONFIG_CB_VALUE, lookup_var, (void *)jcr)) != VAR_OK) { + Jmsg(jcr, M_ERROR, 0, _("Cannot set var callback: ERR=%s\n"), var_strerror(var_ctx, stat)); + goto bail_out; + } + + /* define special operations */ + if ((stat = var_config(var_ctx, VAR_CONFIG_CB_OPERATION, operate_var, (void *)jcr)) != VAR_OK) { + Jmsg(jcr, M_ERROR, 0, _("Cannot set var operate: ERR=%s\n"), var_strerror(var_ctx, stat)); + goto bail_out; + } + + /* unescape in place */ + if ((stat = var_unescape(var_ctx, inp, in_len, inp, in_len+1, 0)) != VAR_OK) { + Jmsg(jcr, M_ERROR, 0, _("Cannot unescape string: ERR=%s\n"), var_strerror(var_ctx, stat)); + goto bail_out; + } + + in_len = strlen(inp); + + /* expand variables */ + if ((stat = var_expand(var_ctx, inp, in_len, &outp, &out_len, 0)) != VAR_OK) { + Jmsg(jcr, M_ERROR, 0, _("Cannot expand expression \"%s\": ERR=%s\n"), + inp, var_strerror(var_ctx, stat)); + goto bail_out; + } + + /* unescape once more in place */ + if ((stat = var_unescape(var_ctx, outp, out_len, outp, out_len+1, 1)) != VAR_OK) { + Jmsg(jcr, M_ERROR, 0, _("Cannot unescape string: ERR=%s\n"), var_strerror(var_ctx, stat)); + goto bail_out; + } + + pm_strcpy(exp, outp); + + rtn_stat = 1; + +bail_out: + /* destroy expansion context */ + if ((stat = var_destroy(var_ctx)) != VAR_OK) { + Jmsg(jcr, M_ERROR, 0, _("Cannot destroy var context: ERR=%s\n"), var_strerror(var_ctx, stat)); + } + if (outp) { + free(outp); + } + return rtn_stat; +} diff --git a/src/dird/fd_cmds.c b/src/dird/fd_cmds.c new file mode 100644 index 00000000..ce594a6c --- /dev/null +++ b/src/dird/fd_cmds.c @@ -0,0 +1,1051 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- fd_cmds.c -- send commands to File daemon + * + * Kern Sibbald, October MM + * + * This routine is run as a separate thread. There may be more + * work to be done to make it totally reentrant!!!! + * + * Utility functions for sending info to File Daemon. + * These functions are used by both backup and verify. + * + */ + +#include "bacula.h" +#include "dird.h" +#include "findlib/find.h" + +const int dbglvl = 400; + +/* Commands sent to File daemon */ +static char filesetcmd[] = "fileset%s%s\n"; /* set full fileset */ +static char jobcmd[] = "JobId=%s Job=%s SDid=%u SDtime=%u Authorization=%s\n"; +/* Note, mtime_only is not used here -- implemented as file option */ +static char levelcmd[] = "level = %s%s%s mtime_only=%d %s%s\n"; +static char runscript[] = "Run OnSuccess=%u OnFailure=%u AbortOnError=%u When=%u Command=%s\n"; +static char runbeforenow[]= "RunBeforeNow\n"; +static char bandwidthcmd[] = "setbandwidth=%lld Job=%s\n"; +static char component_info[] = "component_info\n"; + +/* Responses received from File daemon */ +static char OKinc[] = "2000 OK include\n"; +static char OKjob[] = "2000 OK Job"; +static char OKlevel[] = "2000 OK level\n"; +static char OKRunScript[] = "2000 OK RunScript\n"; +static char OKRunBeforeNow[] = "2000 OK RunBeforeNow\n"; +static char OKRestoreObject[] = "2000 OK ObjectRestored\n"; +static char OKComponentInfo[] = "2000 OK ComponentInfo\n"; +static char OKBandwidth[] = "2000 OK Bandwidth\n"; + +/* Forward referenced functions */ +static bool send_list_item(JCR *jcr, const char *code, char *item, BSOCK *fd); + +/* External functions */ +extern DIRRES *director; +extern int FDConnectTimeout; + +#define INC_LIST 0 +#define EXC_LIST 1 + +/* + * Open connection with File daemon. + * Try connecting every retry_interval (default 10 sec), and + * give up after max_retry_time (default 30 mins). + */ + +int connect_to_file_daemon(JCR *jcr, int retry_interval, int max_retry_time, + int verbose) +{ + BSOCK *fd = jcr->file_bsock; + char ed1[30]; + utime_t heart_beat; + + if (!jcr->client) { + Jmsg(jcr, M_FATAL, 0, _("File daemon not defined for current Job\n")); + Dmsg0(10, "No Client defined for the job.\n"); + return 0; + } + + if (jcr->client->heartbeat_interval) { + heart_beat = jcr->client->heartbeat_interval; + } else { + heart_beat = director->heartbeat_interval; + } + + if (!is_bsock_open(jcr->file_bsock)) { + char name[MAX_NAME_LENGTH + 100]; + POOL_MEM buf; + + if (!fd) { + fd = jcr->file_bsock = new_bsock(); + } + bstrncpy(name, _("Client: "), sizeof(name)); + bstrncat(name, jcr->client->name(), sizeof(name)); + + fd->set_source_address(director->DIRsrc_addr); + if (!fd->connect(jcr,retry_interval, + max_retry_time, + heart_beat, name, + jcr->client->address(buf.addr()), + NULL, + jcr->client->FDport, + verbose)) { + fd->close(); + jcr->setJobStatus(JS_ErrorTerminated); + return 0; + } + Dmsg0(10, "Opened connection with File daemon\n"); + } + fd->res = (RES *)jcr->client; /* save resource in BSOCK */ + jcr->setJobStatus(JS_Running); + + if (!authenticate_file_daemon(jcr)) { + jcr->setJobStatus(JS_ErrorTerminated); + Dmsg0(10, "Authentication error with FD.\n"); + return 0; + } + + /* + * Now send JobId and authorization key + */ + if (jcr->sd_auth_key == NULL) { + jcr->sd_auth_key = bstrdup("dummy"); + } + fd->fsend(jobcmd, edit_int64(jcr->JobId, ed1), jcr->Job, jcr->VolSessionId, + jcr->VolSessionTime, jcr->sd_auth_key); + if (!jcr->keep_sd_auth_key && strcmp(jcr->sd_auth_key, "dummy")) { + memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); + } + Dmsg1(100, ">filed: %s", fd->msg); + if (bget_dirmsg(fd) > 0) { + Dmsg1(110, "msg); + if (strncmp(fd->msg, OKjob, strlen(OKjob)) != 0) { + Jmsg(jcr, M_FATAL, 0, _("File daemon \"%s\" rejected Job command: %s\n"), + jcr->client->hdr.name, fd->msg); + jcr->setJobStatus(JS_ErrorTerminated); + return 0; + } else if (jcr->db) { + CLIENT_DBR cr; + memset(&cr, 0, sizeof(cr)); + bstrncpy(cr.Name, jcr->client->hdr.name, sizeof(cr.Name)); + cr.AutoPrune = jcr->client->AutoPrune; + cr.FileRetention = jcr->client->FileRetention; + cr.JobRetention = jcr->client->JobRetention; + bstrncpy(cr.Uname, fd->msg+strlen(OKjob)+1, sizeof(cr.Uname)); + if (!db_update_client_record(jcr, jcr->db, &cr)) { + Jmsg(jcr, M_WARNING, 0, _("Error updating Client record. ERR=%s\n"), + db_strerror(jcr->db)); + } + } + } else { + Jmsg(jcr, M_FATAL, 0, _("FD gave bad response to JobId command: %s\n"), + fd->bstrerror()); + jcr->setJobStatus(JS_ErrorTerminated); + return 0; + } + return 1; +} + +/* + * This subroutine edits the last job start time into a + * "since=date/time" buffer that is returned in the + * variable since. This is used for display purposes in + * the job report. The time in jcr->stime is later + * passed to tell the File daemon what to do. + */ +void get_level_since_time(JCR *jcr, char *since, int since_len) +{ + int JobLevel; + bool have_full; + bool do_full = false; + bool do_vfull = false; + bool do_diff = false; + utime_t now; + utime_t last_full_time = 0; + utime_t last_diff_time; + char prev_job[MAX_NAME_LENGTH], edl[50]; + + since[0] = 0; + /* If job cloned and a since time already given, use it */ + if (jcr->cloned && jcr->stime && jcr->stime[0]) { + bstrncpy(since, _(", since="), since_len); + bstrncat(since, jcr->stime, since_len); + return; + } + /* Make sure stime buffer is allocated */ + if (!jcr->stime) { + jcr->stime = get_pool_memory(PM_MESSAGE); + } + jcr->PrevJob[0] = jcr->stime[0] = 0; + /* + * Lookup the last FULL backup job to get the time/date for a + * differential or incremental save. + */ + switch (jcr->getJobLevel()) { + case L_DIFFERENTIAL: + case L_INCREMENTAL: + POOLMEM *stime = get_pool_memory(PM_MESSAGE); + /* Look up start time of last Full job */ + now = (utime_t)time(NULL); + jcr->jr.JobId = 0; /* flag to return since time */ + /* + * This is probably redundant, but some of the code below + * uses jcr->stime, so don't remove unless you are sure. + */ + if (!db_find_job_start_time(jcr, jcr->db, &jcr->jr, &jcr->stime, jcr->PrevJob)) { + do_full = true; + } + have_full = db_find_last_job_start_time(jcr, jcr->db, &jcr->jr, + &stime, prev_job, L_FULL); + if (have_full) { + last_full_time = str_to_utime(stime); + } else { + do_full = true; /* No full, upgrade to one */ + } + Dmsg4(50, "have_full=%d do_full=%d now=%lld full_time=%lld\n", have_full, + do_full, now, last_full_time); + /* Make sure the last diff is recent enough */ + if (have_full && jcr->getJobLevel() == L_INCREMENTAL && jcr->job->MaxDiffInterval > 0) { + /* Lookup last diff job */ + if (db_find_last_job_start_time(jcr, jcr->db, &jcr->jr, + &stime, prev_job, L_DIFFERENTIAL)) { + last_diff_time = str_to_utime(stime); + /* If no Diff since Full, use Full time */ + if (last_diff_time < last_full_time) { + last_diff_time = last_full_time; + } + Dmsg2(50, "last_diff_time=%lld last_full_time=%lld\n", last_diff_time, + last_full_time); + } else { + /* No last differential, so use last full time */ + last_diff_time = last_full_time; + Dmsg1(50, "No last_diff_time setting to full_time=%lld\n", last_full_time); + } + do_diff = ((now - last_diff_time) >= jcr->job->MaxDiffInterval); + Dmsg2(50, "do_diff=%d diffInter=%lld\n", do_diff, jcr->job->MaxDiffInterval); + } + /* Note, do_full takes precedence over do_vfull and do_diff */ + if (have_full && jcr->job->MaxFullInterval > 0) { + do_full = ((now - last_full_time) >= jcr->job->MaxFullInterval); + } + else + if (have_full && jcr->job->MaxVirtualFullInterval > 0) { + do_vfull = ((now - last_full_time) >= jcr->job->MaxVirtualFullInterval); + } + + free_pool_memory(stime); + + if (do_full) { + /* No recent Full job found, so upgrade this one to Full */ + Jmsg(jcr, M_INFO, 0, "%s", db_strerror(jcr->db)); + Jmsg(jcr, M_INFO, 0, _("No prior or suitable Full backup found in catalog. Doing FULL backup.\n")); + bsnprintf(since, since_len, _(" (upgraded from %s)"), + level_to_str(edl, sizeof(edl), jcr->getJobLevel())); + jcr->setJobLevel(jcr->jr.JobLevel = L_FULL); + } else if (do_vfull) { + /* No recent Full job found, and MaxVirtualFull is set so upgrade this one to Virtual Full */ + Jmsg(jcr, M_INFO, 0, "%s", db_strerror(jcr->db)); + Jmsg(jcr, M_INFO, 0, _("No prior or suitable Full backup found in catalog. Doing Virtual FULL backup.\n")); + bsnprintf(since, since_len, _(" (upgraded from %s)"), + level_to_str(edl, sizeof(edl), jcr->getJobLevel())); + jcr->setJobLevel(jcr->jr.JobLevel = L_VIRTUAL_FULL); + } else if (do_diff) { + /* No recent diff job found, so upgrade this one to Diff */ + Jmsg(jcr, M_INFO, 0, _("No prior or suitable Differential backup found in catalog. Doing Differential backup.\n")); + bsnprintf(since, since_len, _(" (upgraded from %s)"), + level_to_str(edl, sizeof(edl), jcr->getJobLevel())); + jcr->setJobLevel(jcr->jr.JobLevel = L_DIFFERENTIAL); + } else { + if (jcr->job->rerun_failed_levels) { + + POOLMEM *etime = get_pool_memory(PM_MESSAGE); + + /* Get the end time of our most recent successfull backup for this job */ + /* This will be used to see if there have been any failures since then */ + if (db_find_last_job_end_time(jcr, jcr->db, &jcr->jr, &etime, prev_job)) { + + /* See if there are any failed Differential/Full backups since the completion */ + /* of our last successful backup for this job */ + if (db_find_failed_job_since(jcr, jcr->db, &jcr->jr, + etime, JobLevel)) { + /* If our job is an Incremental and we have a failed job then upgrade. */ + /* If our job is a Differential and the failed job is a Full then upgrade. */ + /* Otherwise there is no reason to upgrade. */ + if ((jcr->getJobLevel() == L_INCREMENTAL) || + ((jcr->getJobLevel() == L_DIFFERENTIAL) && (JobLevel == L_FULL))) { + Jmsg(jcr, M_INFO, 0, _("Prior failed job found in catalog. Upgrading to %s.\n"), + level_to_str(edl, sizeof(edl), JobLevel)); + bsnprintf(since, since_len, _(" (upgraded from %s)"), + level_to_str(edl, sizeof(edl), jcr->getJobLevel())); + jcr->setJobLevel(jcr->jr.JobLevel = JobLevel); + jcr->jr.JobId = jcr->JobId; + break; + } + } + } + free_pool_memory(etime); + } + bstrncpy(since, _(", since="), since_len); + bstrncat(since, jcr->stime, since_len); + } + jcr->jr.JobId = jcr->JobId; + break; + } + Dmsg3(100, "Level=%c last start time=%s job=%s\n", + jcr->getJobLevel(), jcr->stime, jcr->PrevJob); +} + +static void send_since_time(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + utime_t stime; + char ed1[50]; + + stime = str_to_utime(jcr->stime); + fd->fsend(levelcmd, "", NT_("since_utime "), edit_uint64(stime, ed1), 0, + NT_("prev_job="), jcr->PrevJob); + while (bget_dirmsg(fd) >= 0) { /* allow him to poll us to sync clocks */ + Jmsg(jcr, M_INFO, 0, "%s\n", fd->msg); + } +} + +bool send_bwlimit(JCR *jcr, const char *Job) +{ + BSOCK *fd = jcr->file_bsock; + if (jcr->FDVersion >= 4) { + fd->fsend(bandwidthcmd, jcr->max_bandwidth, Job); + if (!response(jcr, fd, OKBandwidth, "Bandwidth", DISPLAY_ERROR)) { + jcr->max_bandwidth = 0; /* can't set bandwidth limit */ + return false; + } + } + return true; +} + +/* + * Send level command to FD. + * Used for backup jobs and estimate command. + */ +bool send_level_command(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + const char *accurate = jcr->accurate?"accurate_":""; + const char *not_accurate = ""; + const char *rerunning = jcr->rerunning?" rerunning ":" "; + /* + * Send Level command to File daemon + */ + switch (jcr->getJobLevel()) { + case L_BASE: + fd->fsend(levelcmd, not_accurate, "base", rerunning, 0, "", ""); + break; + /* L_NONE is the console, sending something off to the FD */ + case L_NONE: + case L_FULL: + fd->fsend(levelcmd, not_accurate, "full", rerunning, 0, "", ""); + break; + case L_DIFFERENTIAL: + fd->fsend(levelcmd, accurate, "differential", rerunning, 0, "", ""); + send_since_time(jcr); + break; + case L_INCREMENTAL: + fd->fsend(levelcmd, accurate, "incremental", rerunning, 0, "", ""); + send_since_time(jcr); + break; + case L_SINCE: + default: + Jmsg2(jcr, M_FATAL, 0, _("Unimplemented backup level %d %c\n"), + jcr->getJobLevel(), jcr->getJobLevel()); + return 0; + } + Dmsg1(120, ">filed: %s", fd->msg); + if (!response(jcr, fd, OKlevel, "Level", DISPLAY_ERROR)) { + return false; + } + return true; +} + +/* + * Send either an Included or an Excluded list to FD + */ +static bool send_fileset(JCR *jcr) +{ + FILESET *fileset = jcr->fileset; + BSOCK *fd = jcr->file_bsock; + STORE *store = jcr->wstore; + int num; + bool include = true; + + for ( ;; ) { + if (include) { + num = fileset->num_includes; + } else { + num = fileset->num_excludes; + } + for (int i=0; iinclude_items[i]; + fd->fsend("I\n"); + } else { + ie = fileset->exclude_items[i]; + fd->fsend("E\n"); + } + if (ie->ignoredir) { + fd->fsend("Z %s\n", ie->ignoredir); + } + for (j=0; jnum_opts; j++) { + FOPTS *fo = ie->opts_list[j]; + bool enhanced_wild = false; + bool stripped_opts = false; + bool compress_disabled = false; + char newopts[MAX_FOPTS]; + + for (k=0; fo->opts[k]!='\0'; k++) { + if (fo->opts[k]=='W') { + enhanced_wild = true; + break; + } + } + + /* + * Strip out compression option Zn if disallowed + * for this Storage. + * Strip out dedup option dn if old FD + */ + bool strip_compress = store && !store->AllowCompress; + if (strip_compress || jcr->FDVersion >= 11) { + int j = 0; + for (k=0; fo->opts[k]!='\0'; k++) { + /* Z compress option is followed by the single-digit compress level or 'o' */ + if (strip_compress && fo->opts[k]=='Z') { + stripped_opts = true; + compress_disabled = true; + k++; /* skip level */ + } else if (jcr->FDVersion < 11 && fo->opts[k]=='d') { + stripped_opts = true; + k++; /* skip level */ + } else { + newopts[j] = fo->opts[k]; + j++; + } + } + newopts[j] = '\0'; + if (compress_disabled) { + Jmsg(jcr, M_INFO, 0, + _("FD compression disabled for this Job because AllowCompression=No in Storage resource.\n") ); + } + } + if (stripped_opts) { + /* Send the new trimmed option set without overwriting fo->opts */ + fd->fsend("O %s\n", newopts); + } else { + /* Send the original options */ + fd->fsend("O %s\n", fo->opts); + } + for (k=0; kregex.size(); k++) { + fd->fsend("R %s\n", fo->regex.get(k)); + } + for (k=0; kregexdir.size(); k++) { + fd->fsend("RD %s\n", fo->regexdir.get(k)); + } + for (k=0; kregexfile.size(); k++) { + fd->fsend("RF %s\n", fo->regexfile.get(k)); + } + for (k=0; kwild.size(); k++) { + fd->fsend("W %s\n", fo->wild.get(k)); + } + for (k=0; kwilddir.size(); k++) { + fd->fsend("WD %s\n", fo->wilddir.get(k)); + } + for (k=0; kwildfile.size(); k++) { + fd->fsend("WF %s\n", fo->wildfile.get(k)); + } + for (k=0; kwildbase.size(); k++) { + fd->fsend("W%c %s\n", enhanced_wild ? 'B' : 'F', fo->wildbase.get(k)); + } + for (k=0; kbase.size(); k++) { + fd->fsend("B %s\n", fo->base.get(k)); + } + for (k=0; kfstype.size(); k++) { + fd->fsend("X %s\n", fo->fstype.get(k)); + } + for (k=0; kdrivetype.size(); k++) { + fd->fsend("XD %s\n", fo->drivetype.get(k)); + } + if (fo->plugin) { + fd->fsend("G %s\n", fo->plugin); + } + if (fo->reader) { + fd->fsend("D %s\n", fo->reader); + } + if (fo->writer) { + fd->fsend("T %s\n", fo->writer); + } + fd->fsend("N\n"); + } + + for (j=0; jname_list.size(); j++) { + item = (char *)ie->name_list.get(j); + if (!send_list_item(jcr, "F ", item, fd)) { + goto bail_out; + } + } + fd->fsend("N\n"); + for (j=0; jplugin_list.size(); j++) { + item = (char *)ie->plugin_list.get(j); + if (!send_list_item(jcr, "P ", item, fd)) { + goto bail_out; + } + } + fd->fsend("N\n"); + } + if (!include) { /* If we just did excludes */ + break; /* all done */ + } + include = false; /* Now do excludes */ + } + + fd->signal(BNET_EOD); /* end of data */ + if (!response(jcr, fd, OKinc, "Include", DISPLAY_ERROR)) { + goto bail_out; + } + return true; + +bail_out: + jcr->setJobStatus(JS_ErrorTerminated); + return false; + +} + +static bool send_list_item(JCR *jcr, const char *code, char *item, BSOCK *fd) +{ + BPIPE *bpipe; + FILE *ffd; + char buf[2000]; + int optlen, stat; + char *p = item; + + switch (*p) { + case '|': + p++; /* skip over the | */ + fd->msg = edit_job_codes(jcr, fd->msg, p, ""); + bpipe = open_bpipe(fd->msg, 0, "r"); + if (!bpipe) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Cannot run program: %s. ERR=%s\n"), + p, be.bstrerror()); + return false; + } + bstrncpy(buf, code, sizeof(buf)); + Dmsg1(500, "code=%s\n", buf); + optlen = strlen(buf); + while (fgets(buf+optlen, sizeof(buf)-optlen, bpipe->rfd)) { + fd->msglen = Mmsg(fd->msg, "%s", buf); + Dmsg2(500, "Inc/exc len=%d: %s", fd->msglen, fd->msg); + if (!fd->send()) { + close_bpipe(bpipe); + Jmsg(jcr, M_FATAL, 0, _(">filed: write error on socket\n")); + return false; + } + } + if ((stat=close_bpipe(bpipe)) != 0) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Error running program: %s. ERR=%s\n"), + p, be.bstrerror(stat)); + return false; + } + break; + case '<': + p++; /* skip over < */ + if ((ffd = bfopen(p, "rb")) == NULL) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Cannot open included file: %s. ERR=%s\n"), + p, be.bstrerror()); + return false; + } + bstrncpy(buf, code, sizeof(buf)); + Dmsg1(500, "code=%s\n", buf); + optlen = strlen(buf); + while (fgets(buf+optlen, sizeof(buf)-optlen, ffd)) { + fd->msglen = Mmsg(fd->msg, "%s", buf); + if (!fd->send()) { + fclose(ffd); + Jmsg(jcr, M_FATAL, 0, _(">filed: write error on socket\n")); + return false; + } + } + fclose(ffd); + break; + case '\\': + p++; /* skip over \ */ + /* Note, fall through wanted */ + default: + pm_strcpy(fd->msg, code); + fd->msglen = pm_strcat(fd->msg, p); + Dmsg1(500, "Inc/Exc name=%s\n", fd->msg); + if (!fd->send()) { + Jmsg(jcr, M_FATAL, 0, _(">filed: write error on socket\n")); + return false; + } + break; + } + return true; +} + + +/* + * Send include list to File daemon + */ +bool send_include_list(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + if (jcr->fileset->new_include) { + fd->fsend(filesetcmd, + jcr->fileset->enable_vss ? " vss=1" : "", + jcr->fileset->enable_snapshot ? " snap=1" : ""); + return send_fileset(jcr); + } + return true; +} + +/* + * Send a include list with a plugin and listing= parameter + */ +bool send_ls_plugin_fileset(JCR *jcr, const char *plugin, const char *path) +{ + BSOCK *fd = jcr->file_bsock; + fd->fsend(filesetcmd, "" /* no vss */, "" /* no snapshot */); + + fd->fsend("I\n"); + fd->fsend("O h\n"); /* is it required? */ + fd->fsend("N\n"); + fd->fsend("P %s listing=%s\n", plugin, path); + fd->fsend("N\n"); + fd->signal(BNET_EOD); /* end of data */ + + if (!response(jcr, fd, OKinc, "Include", DISPLAY_ERROR)) { + return false; + } + return true; +} + +/* + * Send a include list with only one directory and recurse=no + */ +bool send_ls_fileset(JCR *jcr, const char *path) +{ + BSOCK *fd = jcr->file_bsock; + fd->fsend(filesetcmd, "" /* no vss */, "" /* no snapshot */); + + fd->fsend("I\n"); + fd->fsend("O h\n"); /* Limit recursion to one directory */ + fd->fsend("N\n"); + fd->fsend("F %s\n", path); + fd->fsend("N\n"); + fd->signal(BNET_EOD); /* end of data */ + + if (!response(jcr, fd, OKinc, "Include", DISPLAY_ERROR)) { + return false; + } + return true; +} + +/* + * Send exclude list to File daemon + * Under the new scheme, the Exclude list + * is part of the FileSet sent with the + * "include_list" above. + */ +bool send_exclude_list(JCR *jcr) +{ + return true; +} + +/* TODO: drop this with runscript.old_proto in bacula 1.42 */ +static char runbefore[] = "RunBeforeJob %s\n"; +static char runafter[] = "RunAfterJob %s\n"; +static char OKRunBefore[] = "2000 OK RunBefore\n"; +static char OKRunAfter[] = "2000 OK RunAfter\n"; + +int send_runscript_with_old_proto(JCR *jcr, int when, POOLMEM *msg) +{ + int ret; + Dmsg1(120, "bdird: sending old runcommand to fd '%s'\n",msg); + if (when & SCRIPT_Before) { + jcr->file_bsock->fsend(runbefore, msg); + ret = response(jcr, jcr->file_bsock, OKRunBefore, "ClientRunBeforeJob", DISPLAY_ERROR); + } else { + jcr->file_bsock->fsend(runafter, msg); + ret = response(jcr, jcr->file_bsock, OKRunAfter, "ClientRunAfterJob", DISPLAY_ERROR); + } + return ret; +} /* END OF TODO */ + +/* + * Send RunScripts to File daemon + * 1) We send all runscript to FD, they can be executed Before, After, or twice + * 2) Then, we send a "RunBeforeNow" command to the FD to tell him to do the + * first run_script() call. (ie ClientRunBeforeJob) + */ +int send_runscripts_commands(JCR *jcr) +{ + POOLMEM *msg = get_pool_memory(PM_FNAME); + BSOCK *fd = jcr->file_bsock; + RUNSCRIPT *cmd; + bool launch_before_cmd = false; + POOLMEM *ehost = get_pool_memory(PM_FNAME); + int result; + + Dmsg0(120, "bdird: sending runscripts to fd\n"); + if (!jcr->job->RunScripts) { + goto norunscript; + } + foreach_alist(cmd, jcr->job->RunScripts) { + if (cmd->can_run_at_level(jcr->getJobLevel()) && cmd->target) { + ehost = edit_job_codes(jcr, ehost, cmd->target, ""); + Dmsg2(200, "bdird: runscript %s -> %s\n", cmd->target, ehost); + + if (strcmp(ehost, jcr->client->name()) == 0) { + pm_strcpy(msg, cmd->command); + bash_spaces(msg); + + Dmsg1(120, "bdird: sending runscripts to fd '%s'\n", cmd->command); + + /* TODO: remove this with bacula 1.42 */ + if (cmd->old_proto) { + result = send_runscript_with_old_proto(jcr, cmd->when, msg); + + } else { + fd->fsend(runscript, cmd->on_success, + cmd->on_failure, + cmd->fail_on_error, + cmd->when, + msg); + + result = response(jcr, fd, OKRunScript, "RunScript", DISPLAY_ERROR); + launch_before_cmd = true; + } + + if (!result) { + goto bail_out; + } + } + /* TODO : we have to play with other client */ + /* + else { + send command to an other client + } + */ + } + } + + /* Tell the FD to execute the ClientRunBeforeJob */ + if (launch_before_cmd) { + fd->fsend(runbeforenow); + if (!response(jcr, fd, OKRunBeforeNow, "RunBeforeNow", DISPLAY_ERROR)) { + goto bail_out; + } + } +norunscript: + free_pool_memory(msg); + free_pool_memory(ehost); + return 1; + +bail_out: + Jmsg(jcr, M_FATAL, 0, _("Client \"%s\" RunScript failed.\n"), ehost); + free_pool_memory(msg); + free_pool_memory(ehost); + return 0; +} + +struct OBJ_CTX { + JCR *jcr; + int count; +}; + +static int restore_object_handler(void *ctx, int num_fields, char **row) +{ + OBJ_CTX *octx = (OBJ_CTX *)ctx; + JCR *jcr = octx->jcr; + BSOCK *fd; + + fd = jcr->file_bsock; + if (jcr->is_job_canceled()) { + return 1; + } + /* Old File Daemon doesn't handle restore objects */ + if (jcr->FDVersion < 3) { + Jmsg(jcr, M_WARNING, 0, _("Client \"%s\" may not be used to restore " + "this job. Please upgrade your client.\n"), + jcr->client->name()); + return 1; + } + + if (jcr->FDVersion < 5) { /* Old version without PluginName */ + fd->fsend("restoreobject JobId=%s %s,%s,%s,%s,%s,%s\n", + row[0], row[1], row[2], row[3], row[4], row[5], row[6]); + } else { + /* bash spaces from PluginName */ + bash_spaces(row[9]); + fd->fsend("restoreobject JobId=%s %s,%s,%s,%s,%s,%s,%s\n", + row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[9]); + } + Dmsg1(010, "Send obj hdr=%s", fd->msg); + + fd->msglen = pm_strcpy(fd->msg, row[7]); + fd->send(); /* send Object name */ + + Dmsg1(010, "Send obj: %s\n", fd->msg); + +// fd->msglen = str_to_uint64(row[1]); /* object length */ +// Dmsg1(000, "obj size: %lld\n", (uint64_t)fd->msglen); + + /* object */ + db_unescape_object(jcr, jcr->db, + row[8], /* Object */ + str_to_uint64(row[1]), /* Object length */ + &fd->msg, &fd->msglen); + fd->send(); /* send object */ + octx->count++; + + if (debug_level > 100) { + for (int i=0; i < fd->msglen; i++) + if (!fd->msg[i]) + fd->msg[i] = ' '; + Dmsg1(000, "Send obj: %s\n", fd->msg); + } + + return 0; +} + +/* + * Send the plugin Restore Objects, which allow the + * plugin to get information early in the restore + * process. The RestoreObjects were created during + * the backup by the plugin. + */ +bool send_restore_objects(JCR *jcr) +{ + char ed1[50]; + POOL_MEM query(PM_MESSAGE); + BSOCK *fd; + OBJ_CTX octx; + + if (!jcr->JobIds || !jcr->JobIds[0]) { + return true; + } + octx.jcr = jcr; + octx.count = 0; + + /* restore_object_handler is called for each file found */ + + /* send restore objects for all jobs involved */ + Mmsg(query, get_restore_objects, jcr->JobIds, FT_RESTORE_FIRST); + db_sql_query(jcr->db, query.c_str(), restore_object_handler, (void *)&octx); + + /* send config objects for the current restore job */ + Mmsg(query, get_restore_objects, + edit_uint64(jcr->JobId, ed1), FT_PLUGIN_CONFIG_FILLED); + db_sql_query(jcr->db, query.c_str(), restore_object_handler, (void *)&octx); + + /* + * Send to FD only if we have at least one restore object. + * This permits backward compatibility with older FDs. + */ + if (octx.count > 0) { + fd = jcr->file_bsock; + fd->fsend("restoreobject end\n"); + if (!response(jcr, fd, OKRestoreObject, "RestoreObject", DISPLAY_ERROR)) { + Jmsg(jcr, M_FATAL, 0, _("RestoreObject failed.\n")); + return false; + } + } + return true; +} + +/* + * Send the plugin a list of component info files. These + * were files that were created during the backup for + * the VSS plugin. The list is a list of those component + * files that have been chosen for restore. We + * send them before the Restore Objects. + */ +bool send_component_info(JCR *jcr) +{ + BSOCK *fd; + char buf[2000]; + bool ok = true; + + if (!jcr->component_fd) { + return true; /* nothing to send */ + } + /* Don't send if old version FD */ + if (jcr->FDVersion < 6) { + goto bail_out; + } + + rewind(jcr->component_fd); + fd = jcr->file_bsock; + fd->fsend(component_info); + while (fgets(buf, sizeof(buf), jcr->component_fd)) { + fd->fsend("%s", buf); + Dmsg1(050, "Send component_info to FD: %s\n", buf); + } + fd->signal(BNET_EOD); + if (!response(jcr, fd, OKComponentInfo, "ComponentInfo", DISPLAY_ERROR)) { + Jmsg(jcr, M_FATAL, 0, _("ComponentInfo failed.\n")); + ok = false; + } + +bail_out: + fclose(jcr->component_fd); + jcr->component_fd = NULL; + unlink(jcr->component_fname); + free_and_null_pool_memory(jcr->component_fname); + return ok; +} + +/* + * Read the attributes from the File daemon for + * a Verify job and store them in the catalog. + */ +int get_attributes_and_put_in_catalog(JCR *jcr) +{ + BSOCK *fd; + int n = 0; + ATTR_DBR *ar = NULL; + char digest[MAXSTRING]; + + fd = jcr->file_bsock; + jcr->jr.FirstIndex = 1; + jcr->FileIndex = 0; + /* Start transaction allocates jcr->attr and jcr->ar if needed */ + db_start_transaction(jcr, jcr->db); /* start transaction if not already open */ + ar = jcr->ar; + + Dmsg0(120, "bdird: waiting to receive file attributes\n"); + /* Pickup file attributes and digest */ + while (!fd->errors && (n = bget_dirmsg(fd)) > 0) { + int32_t file_index; + int stream, len; + char *p, *fn; + char Digest[MAXSTRING]; /* either Verify opts or MD5/SHA1 digest */ + + /* Stop here if canceled */ + if (jcr->is_job_canceled()) { + jcr->cached_attribute = false; + return 0; + } + + if ((len = sscanf(fd->msg, "%ld %d %s", &file_index, &stream, Digest)) != 3) { + Jmsg(jcr, M_FATAL, 0, _("msglen, fd->msg); + jcr->setJobStatus(JS_ErrorTerminated); + jcr->cached_attribute = false; + return 0; + } + p = fd->msg; + /* The following three fields were sscanf'ed above so skip them */ + skip_nonspaces(&p); /* skip FileIndex */ + skip_spaces(&p); + skip_nonspaces(&p); /* skip Stream */ + skip_spaces(&p); + skip_nonspaces(&p); /* skip Opts_Digest */ + p++; /* skip space */ + Dmsg1(dbglvl, "Stream=%d\n", stream); + if (stream == STREAM_UNIX_ATTRIBUTES || stream == STREAM_UNIX_ATTRIBUTES_EX) { + if (jcr->cached_attribute) { + Dmsg3(dbglvl, "Cached attr. Stream=%d fname=%s\n", ar->Stream, ar->fname, + ar->attr); + if (!db_create_file_attributes_record(jcr, jcr->db, ar)) { + Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db)); + } + jcr->cached_attribute = false; + } + /* Any cached attr is flushed so we can reuse jcr->attr and jcr->ar */ + fn = jcr->fname = check_pool_memory_size(jcr->fname, fd->msglen); + while (*p != 0) { + *fn++ = *p++; /* copy filename */ + } + *fn = *p++; /* term filename and point p to attribs */ + pm_strcpy(jcr->attr, p); /* save attributes */ + jcr->JobFiles++; + jcr->FileIndex = file_index; + ar->attr = jcr->attr; + ar->fname = jcr->fname; + ar->FileIndex = file_index; + ar->Stream = stream; + ar->link = NULL; + ar->JobId = jcr->JobId; + ar->ClientId = jcr->ClientId; + ar->PathId = 0; + ar->FilenameId = 0; + ar->Digest = NULL; + ar->DigestType = CRYPTO_DIGEST_NONE; + ar->DeltaSeq = 0; + jcr->cached_attribute = true; + + Dmsg2(dbglvl, "dirdfname); + Dmsg1(dbglvl, "dirdattr); + jcr->FileId = ar->FileId; + /* + * First, get STREAM_UNIX_ATTRIBUTES and fill ATTR_DBR structure + * Next, we CAN have a CRYPTO_DIGEST, so we fill ATTR_DBR with it (or not) + * When we get a new STREAM_UNIX_ATTRIBUTES, we known that we can add file to the catalog + * At the end, we have to add the last file + */ + } else if (crypto_digest_stream_type(stream) != CRYPTO_DIGEST_NONE) { + if (jcr->FileIndex != file_index) { + Jmsg3(jcr, M_ERROR, 0, _("%s index %d not same as attributes %d\n"), + stream_to_ascii(stream), file_index, jcr->FileIndex); + continue; + } + ar->Digest = digest; + ar->DigestType = crypto_digest_stream_type(stream); + db_escape_string(jcr, jcr->db, digest, Digest, strlen(Digest)); + Dmsg4(dbglvl, "stream=%d DigestLen=%d Digest=%s type=%d\n", stream, + strlen(digest), digest, ar->DigestType); + } + jcr->jr.JobFiles = jcr->JobFiles = file_index; + jcr->jr.LastIndex = file_index; + } + if (fd->is_error()) { + Jmsg1(jcr, M_FATAL, 0, _("bstrerror()); + jcr->cached_attribute = false; + return 0; + } + if (jcr->cached_attribute) { + Dmsg3(dbglvl, "Cached attr with digest. Stream=%d fname=%s attr=%s\n", ar->Stream, + ar->fname, ar->attr); + if (!db_create_file_attributes_record(jcr, jcr->db, ar)) { + Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db)); + } + jcr->cached_attribute = false; + } + jcr->setJobStatus(JS_Terminated); + return 1; +} diff --git a/src/dird/getmsg.c b/src/dird/getmsg.c new file mode 100644 index 00000000..1fc42215 --- /dev/null +++ b/src/dird/getmsg.c @@ -0,0 +1,414 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- routines to receive network data and + * handle network signals. These routines handle the connections + * to the Storage daemon and the File daemon. + * + * Kern Sibbald, August MM + * + * This routine runs as a thread and must be thread reentrant. + * + * Basic tasks done here: + * Handle network signals (signals). + * Signals always have return status 0 from bnet_recv() and + * a zero or negative message length. + * Pass appropriate messages back to the caller (responses). + * Responses always have a digit as the first character. + * Handle requests for message and catalog services (requests). + * Requests are any message that does not begin with a digit. + * In affect, they are commands. + * + */ + +#include "bacula.h" +#include "dird.h" + +/* Forward referenced functions */ +static char *find_msg_start(char *msg); + +static char Job_status[] = "Status JobId=%ld JobStatus=%d\n"; +#ifdef needed +static char Device_update[] = "DevUpd JobId=%127s " + "device=%127s " + "append=%d read=%d num_writers=%d " + "open=%d labeled=%d offline=%d " + "reserved=%d max_writers=%d " + "autoselect=%d autochanger=%d " + "changer_name=%127s media_type=%127s volume_name=%127s " + "DevReadTime=%d DevWriteTime=%d DevReadBytes=%d " + "DevWriteBytes=%d\n"; +#endif + + +static char OK_msg[] = "1000 OK\n"; + + +static void set_jcr_sd_job_status(JCR *jcr, int SDJobStatus) +{ + bool set_waittime=false; + Dmsg2(800, "set_jcr_sd_job_status(%s, %c)\n", jcr->Job, SDJobStatus); + /* if wait state is new, we keep current time for watchdog MaxWaitTime */ + switch (SDJobStatus) { + case JS_WaitMedia: + case JS_WaitMount: + case JS_WaitMaxJobs: + set_waittime = true; + default: + break; + } + + if (job_waiting(jcr)) { + set_waittime = false; + } + + if (set_waittime) { + /* set it before JobStatus */ + Dmsg0(800, "Setting wait_time\n"); + jcr->wait_time = time(NULL); + } + jcr->SDJobStatus = SDJobStatus; + if (jcr->SDJobStatus == JS_Incomplete) { + jcr->setJobStatus(JS_Incomplete); + } + +} + +/* + * See if we are pointing to a message id + * Look for: [XYnnnn] + */ +static bool is_msgid(char *msg) +{ + if (!msg) return false; + char *end = strchr(msg, ']'); + if (!end) return false; + if ((end - msg) != 7) return false; + if (!B_ISUPPER(msg[1]) || !B_ISUPPER(msg[2])) return false; + for (int i=3; i<7; i++) { + if (!B_ISDIGIT(msg[i])) return false; + } + return true; +} + +/* + * Get a message + * Call appropriate processing routine + * If it is not a Jmsg or a ReqCat message, + * return it to the caller. + * + * This routine is called to get the next message from + * another daemon. If the message is in canonical message + * format and the type is known, it will be dispatched + * to the appropriate handler. If the message is + * in any other format, it will be returned. + * + * E.g. any message beginning with a digit will be passed + * through to the caller. + * All other messages are expected begin with some identifier + * -- for the moment only the first character is checked, but + * at a later time, the whole identifier (e.g. Jmsg, CatReq, ...) + * could be checked. + * This is followed by JobId=nnn + * info. The identifier is used to dispatch the message to the right + * place (Job message, catalog request, ...). The Job is used to lookup + * the JCR so that the action is performed on the correct jcr, and + * the rest of the message is up to the user. Note, DevUpd uses + * *System* for the Job name, and hence no JCR is obtained. This + * is a *rare* case where a jcr is not really needed. + * + */ +int bget_dirmsg(BSOCK *bs) +{ + int32_t n = BNET_TERMINATE; + char Job[MAX_NAME_LENGTH]; + JobId_t JobId = 0; + char MsgType[20]; + int type; + utime_t mtime; /* message time */ + JCR *jcr = bs->jcr(); + char *msg; + + for ( ; !bs->is_stop() && !bs->is_timed_out(); ) { + n = bs->recv(); + Dmsg4(200, "bget_dirmsg n=%d msglen=%ld is_stop=%d: %s\n", n, bs->msglen, bs->is_stop(), bs->msg); + + if (bs->is_stop() || bs->is_timed_out()) { + return n; /* error or terminate */ + } + if (n == BNET_SIGNAL) { /* handle signal */ + /* BNET_SIGNAL (-1) return from bnet_recv() => network signal */ + switch (bs->msglen) { + case BNET_EOD: /* end of data */ + return n; + case BNET_EOD_POLL: + bs->fsend(OK_msg);/* send response */ + return n; /* end of data */ + case BNET_TERMINATE: + bs->set_terminated(); + return n; + case BNET_POLL: + bs->fsend(OK_msg); /* send response */ + break; + case BNET_HEARTBEAT: +// encode_time(time(NULL), Job); +// Dmsg1(100, "%s got heartbeat.\n", Job); + break; + case BNET_HB_RESPONSE: + break; + case BNET_STATUS: + /* *****FIXME***** Implement more completely */ + bs->fsend("Status OK\n"); + bs->signal(BNET_EOD); + break; + case BNET_BTIME: /* send Bacula time */ + char ed1[50]; + bs->fsend("btime %s\n", edit_uint64(get_current_btime(),ed1)); + break; + default: + Jmsg1(jcr, M_WARNING, 0, _("bget_dirmsg: unknown bnet signal %d\n"), bs->msglen); + return n; + } + continue; + } + + /* Handle normal data */ + + if (n > 0 && B_ISDIGIT(bs->msg[0])) { /* response? */ + return n; /* yes, return it */ + } + + /* + * If we get here, it must be a request. Either + * a message to dispatch, or a catalog request. + * Try to fulfill it. + */ + if ((sscanf(bs->msg, "%020s JobId=%ld ", MsgType, &JobId) != 2) && + (sscanf(bs->msg, "%020s Job=%127s ", MsgType, Job) != 2) && + (sscanf(bs->msg, "%020s Job=x", MsgType) != 1)) { + if (jcr->JobId == 0 || is_msgid(strchr(bs->msg, '['))) { + return n; + } + Jmsg1(jcr, M_ERROR, 0, _("Malformed message: %s\n"), bs->msg); + continue; + } + + /* Skip past first two fields: "Jmsg JobId=nnn" */ + if (!(msg=find_msg_start(bs->msg))) { + if (jcr->JobId == 0) { + return n; + } + Jmsg1(jcr, M_ERROR, 0, _("Malformed message: %s\n"), bs->msg); + continue; + } + + /* + * Here we are expecting a message of the following format: + * Jmsg JobId=nnn type=nnn level=nnn Message-string + * Note, level should really be mtime, but that changes + * the protocol. + */ + if (bs->msg[0] == 'J') { /* Job message */ + if ((sscanf(bs->msg, "Jmsg JobId=%ld type=%d level=%lld", + &JobId, &type, &mtime) != 3) && + (sscanf(bs->msg, "Jmsg Job=%127s type=%d level=%lld", + Job, &type, &mtime) != 3)) { + Jmsg1(jcr, M_ERROR, 0, _("Malformed message: %s\n"), bs->msg); + continue; + } + Dmsg1(900, "Got msg: %s\n", bs->msg); + skip_spaces(&msg); + skip_nonspaces(&msg); /* skip type=nnn */ + skip_spaces(&msg); + skip_nonspaces(&msg); /* skip level=nnn */ + if (*msg == ' ') { + msg++; /* skip leading space */ + } + Dmsg1(900, "Dispatch msg: %s", msg); + dispatch_message(jcr, type, mtime, msg); + continue; + } + /* + * Here we expact a CatReq message + * CatReq JobId=nn Catalog-Request-Message + */ + if (bs->msg[0] == 'C') { /* Catalog request */ + Dmsg2(900, "Catalog req jcr=%p: %s", jcr, bs->msg); + catalog_request(jcr, bs); + continue; + } + if (bs->msg[0] == 'U') { /* SD sending attributes */ + Dmsg2(900, "Catalog upd jcr=%p: %s", jcr, bs->msg); + catalog_update(jcr, bs); + continue; + } + if (bs->msg[0] == 'B') { /* SD sending file spool attributes */ + Dmsg2(100, "Blast attributes jcr=%p: %s", jcr, bs->msg); + char filename[256]; + if (sscanf(bs->msg, "BlastAttr JobId=%ld File=%255s", + &JobId, filename) != 2) { + Jmsg1(jcr, M_ERROR, 0, _("Malformed message: %s\n"), bs->msg); + continue; + } + unbash_spaces(filename); + if (despool_attributes_from_file(jcr, filename)) { + bs->fsend("1000 OK BlastAttr\n"); + } else { + bs->fsend("1990 ERROR BlastAttr\n"); + } + continue; + } + if (bs->msg[0] == 'M') { /* Mount request */ + Dmsg1(900, "Mount req: %s", bs->msg); + mount_request(jcr, bs, msg); + continue; + } + /* Get Progress: files, bytes, bytes/sec */ + if (bs->msg[0] == 'P') { /* Progress report */ + uint32_t files, bps; + uint64_t bytes; + if ((sscanf(bs->msg, "Progress JobId=%ld files=%ld bytes=%lld bps=%ld\n", + &JobId, &files, &bytes, &bps) == 4) || + (sscanf(bs->msg, "Progress JobId=x files=%ld bytes=%lld bps=%ld\n", + &files, &bytes, &bps) == 3) || + (sscanf(bs->msg, "Progress Job=x files=%ld bytes=%lld bps=%ld\n", + &files, &bytes, &bps) == 3)) { + Dmsg2(900, "JobId=%d %s", jcr->JobId, bs->msg); + /* Save progress data */ + jcr->JobFiles = files; + jcr->JobBytes = bytes; + jcr->LastRate = bps; + } else { + Jmsg1(jcr, M_ERROR, 0, _("Malformed message: %s\n"), bs->msg); + } + continue; + } + if (bs->msg[0] == 'S') { /* Status change */ + int JobStatus; + if (sscanf(bs->msg, Job_status, &JobId, &JobStatus) == 2) { + set_jcr_sd_job_status(jcr, JobStatus); /* current status */ + } else { + Jmsg1(jcr, M_ERROR, 0, _("Malformed message: %s\n"), bs->msg); + } + continue; + } +#ifdef needed + /* No JCR for Device Updates! */ + if (bs->msg[0] = 'D') { /* Device update */ + DEVICE *dev; + POOL_MEM dev_name, changer_name, media_type, volume_name; + int dev_open, dev_append, dev_read, dev_labeled; + int dev_offline, dev_autochanger, dev_autoselect; + int dev_num_writers, dev_max_writers, dev_reserved; + uint64_t dev_read_time, dev_write_time, dev_write_bytes, dev_read_bytes; + uint64_t dev_PoolId; + Dmsg1(100, "msg); + if (sscanf(bs->msg, Device_update, + &Job, dev_name.c_str(), + &dev_append, &dev_read, + &dev_num_writers, &dev_open, + &dev_labeled, &dev_offline, &dev_reserved, + &dev_max_writers, &dev_autoselect, + &dev_autochanger, + changer_name.c_str(), media_type.c_str(), + volume_name.c_str(), + &dev_read_time, &dev_write_time, &dev_read_bytes, + &dev_write_bytes) != 19) { + Emsg1(M_ERROR, 0, _("Malformed message: %s\n"), bs->msg); + } else { + unbash_spaces(dev_name); + dev = (DEVICE *)GetResWithName(R_DEVICE, dev_name.c_str()); + if (!dev) { + continue; + } + unbash_spaces(changer_name); + unbash_spaces(media_type); + unbash_spaces(volume_name); + bstrncpy(dev->ChangerName, changer_name.c_str(), sizeof(dev->ChangerName)); + bstrncpy(dev->MediaType, media_type.c_str(), sizeof(dev->MediaType)); + bstrncpy(dev->VolumeName, volume_name.c_str(), sizeof(dev->VolumeName)); + /* Note, these are copied because they are boolean rather than + * integer. + */ + dev->open = dev_open; + dev->append = dev_append; + dev->read = dev_read; + dev->labeled = dev_labeled; + dev->offline = dev_offline; + dev->autoselect = dev_autoselect; + dev->autochanger = dev_autochanger > 0; + dev->num_drives = dev_autochanger; /* does double duty */ + dev->PoolId = dev_PoolId; + dev->num_writers = dev_num_writers; + dev->max_writers = dev_max_writers; + dev->reserved = dev_reserved; + dev->found = true; + dev->DevReadTime = dev_read_time; /* TODO : have to update database */ + dev->DevWriteTime = dev_write_time; + dev->DevReadBytes = dev_read_bytes; + dev->DevWriteBytes = dev_write_bytes; + } + continue; + } +#endif + return n; + } + return n; +} + +static char *find_msg_start(char *msg) +{ + char *p = msg; + + skip_nonspaces(&p); /* skip message type */ + skip_spaces(&p); + skip_nonspaces(&p); /* skip Job */ + skip_spaces(&p); /* after spaces come the message */ + return p; +} + +/* + * Get response from FD or SD to a command we + * sent. Check that the response agrees with what we expect. + * + * Returns: false on failure + * true on success + */ +bool response(JCR *jcr, BSOCK *bs, char *resp, const char *cmd, e_prtmsg prtmsg) +{ + int n; + + if (bs->is_error()) { + return false; + } + if ((n = bget_dirmsg(bs)) >= 0) { + if (strcmp(bs->msg, resp) == 0) { + return true; + } + if (prtmsg == DISPLAY_ERROR) { + Jmsg(jcr, M_FATAL, 0, _("Bad response to %s command: wanted %s, got %s\n"), + cmd, resp, bs->msg); + } + return false; + } + Jmsg(jcr, M_FATAL, 0, _("Socket error on %s command: ERR=%s\n"), + cmd, bs->bstrerror()); + return false; +} diff --git a/src/dird/inc_conf.c b/src/dird/inc_conf.c new file mode 100644 index 00000000..b8d42415 --- /dev/null +++ b/src/dird/inc_conf.c @@ -0,0 +1,798 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Configuration file parser for new and old Include and + * Exclude records + * + * Kern Sibbald, March MMIII + * + */ + +#include "bacula.h" +#include "dird.h" +#ifndef HAVE_REGEX_H +#include "lib/bregex.h" +#else +#include +#endif + +/* Forward referenced subroutines */ + +void store_inc(LEX *lc, RES_ITEM *item, int index, int pass); + +static void store_newinc(LEX *lc, RES_ITEM *item, int index, int pass); +void store_regex(LEX *lc, RES_ITEM *item, int index, int pass); +void store_wild(LEX *lc, RES_ITEM *item, int index, int pass); +void store_fstype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_drivetype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_opts(LEX *lc, RES_ITEM *item, int index, int pass); +void store_lopts(LEX *lc, RES_ITEM *item, int index, int pass); +void store_base(LEX *lc, RES_ITEM *item, int index, int pass); +void store_plugin(LEX *lc, RES_ITEM *item, int index, int pass); +static void setup_current_opts(void); + +/* Include and Exclude items */ +static void store_fname(LEX *lc, RES_ITEM2 *item, int index, int pass, bool exclude); +static void store_plugin_name(LEX *lc, RES_ITEM2 *item, int index, int pass, bool exclude); +static void store_options_res(LEX *lc, RES_ITEM2 *item, int index, int pass, bool exclude); +static void store_excludedir(LEX *lc, RES_ITEM2 *item, int index, int pass, bool exclude); + + +/* We build the current resource here as we are + * scanning the resource configuration definition, + * then move it to allocated memory when the resource + * scan is complete. + */ +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + extern URES res_all; +} +#else +extern URES res_all; +#endif +extern int32_t res_all_size; + +/* We build the current new Include and Exclude items here */ +static INCEXE res_incexe; + +/* + * new Include/Exclude items + * name handler value code flags default_value + */ +RES_ITEM2 newinc_items[] = { + {"File", store_fname, {0}, 0, 0, 0}, + {"Plugin", store_plugin_name, {0}, 0, 0, 0}, + {"ExcludeDirContaining", store_excludedir, {0}, 0, 0, 0}, + {"Options", store_options_res, {0}, 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* + * Items that are valid in an Options resource + * + * name handler value code flags default_value + * + * Encryption in FS_option_kw table ??? + * ReadFifo not in FS_option_kw table ??? + * + */ +RES_ITEM options_items[] = { + {"Compression", store_opts, {0}, 0, INC_KW_COMPRESSION, 0}, + {"Signature", store_opts, {0}, 0, INC_KW_DIGEST, 0}, + {"OneFs", store_opts, {0}, 0, INC_KW_ONEFS, 0}, + {"Recurse", store_opts, {0}, 0, INC_KW_RECURSE, 0}, + {"Sparse", store_opts, {0}, 0, INC_KW_SPARSE, 0}, + {"HardLinks", store_opts, {0}, 0, INC_KW_HARDLINK, 0}, + {"Replace", store_opts, {0}, 0, INC_KW_REPLACE, 0}, + {"Portable", store_opts, {0}, 0, INC_KW_PORTABLE, 0}, + {"MtimeOnly", store_opts, {0}, 0, INC_KW_MTIMEONLY, 0}, + {"KeepAtime", store_opts, {0}, 0, INC_KW_KEEPATIME, 0}, + {"Exclude", store_opts, {0}, 0, INC_KW_EXCLUDE, 0}, + {"AclSupport", store_opts, {0}, 0, INC_KW_ACL, 0}, + {"IgnoreCase", store_opts, {0}, 0, INC_KW_IGNORECASE, 0}, + {"HfsPlusSupport", store_opts, {0}, 0, INC_KW_HFSPLUS, 0}, + {"NoAtime", store_opts, {0}, 0, INC_KW_NOATIME, 0}, + {"EnhancedWild", store_opts, {0}, 0, INC_KW_ENHANCEDWILD, 0}, + {"CheckFileChanges",store_opts, {0}, 0, INC_KW_CHKCHANGES, 1}, + {"HonorNoDumpFlag", store_opts, {0}, 0, INC_KW_HONOR_NODUMP, 0}, + {"XattrSupport", store_opts, {0}, 0, INC_KW_XATTR, 0}, + {"ReadFifo", store_opts, {0}, 0, INC_KW_READFIFO, 0}, + {"BaseJob", store_lopts, {0}, 'J', INC_KW_BASEJOB, 0}, + {"Accurate", store_lopts, {0}, 'C', INC_KW_ACCURATE, 0}, + {"Verify", store_lopts, {0}, 'V', INC_KW_VERIFY, 0}, + {"StripPath", store_lopts, {0}, 'P', INC_KW_STRIPPATH, 0}, + {"Regex", store_regex, {0}, 0, 0, 0}, + {"RegexDir", store_regex, {0}, 1, 0, 0}, + {"RegexFile", store_regex, {0}, 2, 0, 0}, + {"Base", store_base, {0}, 0, 0, 0}, + {"Wild", store_wild, {0}, 0, 0, 0}, + {"WildDir", store_wild, {0}, 1, 0, 0}, + {"WildFile", store_wild, {0}, 2, 0, 0}, + {"Plugin", store_plugin, {0}, 0, 0, 0}, + {"FsType", store_fstype, {0}, 0, 0, 0}, + {"DriveType", store_drivetype, {0}, 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* + * This is the list of options that can be stored by store_opts + * Note, now that the old style Include/Exclude code is gone, + * the INC_KW code could be put into the "code" field of the + * options given above. + * + * name token + */ +s_kw FS_option_kw[] = { + {"Compression", INC_KW_COMPRESSION}, + {"Signature", INC_KW_DIGEST}, + {"Encryption", INC_KW_ENCRYPTION}, + {"Verify", INC_KW_VERIFY}, + {"BaseJob", INC_KW_BASEJOB}, + {"Accurate", INC_KW_ACCURATE}, + {"OneFs", INC_KW_ONEFS}, + {"Recurse", INC_KW_RECURSE}, + {"Sparse", INC_KW_SPARSE}, + {"HardLinks", INC_KW_HARDLINK}, + {"Replace", INC_KW_REPLACE}, + {"ReadFifo", INC_KW_READFIFO}, + {"Portable", INC_KW_PORTABLE}, + {"MtimeOnly", INC_KW_MTIMEONLY}, + {"KeepAtime", INC_KW_KEEPATIME}, + {"Exclude", INC_KW_EXCLUDE}, + {"AclSupport", INC_KW_ACL}, + {"IgnoreCase", INC_KW_IGNORECASE}, + {"HfsPlusSupport", INC_KW_HFSPLUS}, + {"NoAtime", INC_KW_NOATIME}, + {"EnhancedWild", INC_KW_ENHANCEDWILD}, + {"CheckFileChanges", INC_KW_CHKCHANGES}, + {"StripPath", INC_KW_STRIPPATH}, + {"HonorNoDumpFlag", INC_KW_HONOR_NODUMP}, + {"XattrSupport", INC_KW_XATTR}, + {NULL, 0} +}; + +/* + * Options permitted for each keyword and resulting value. + * The output goes into opts, which are then transmitted to + * the FD for application as options to the following list of + * included files. + * + * Note! all 0's in options must come after the value that + * is non-zero. + * + * NOTE!! The following long options (see scan_include_options()) + * V = Verify + * C = Accurate + * J = BaseJob + * P = StripPath + * + * name keyword option + */ +struct s_fs_opt FS_options[] = { + {"Md5", INC_KW_DIGEST, "M"}, + {"Sha1", INC_KW_DIGEST, "S"}, + {"Sha256", INC_KW_DIGEST, "S2"}, + {"Sha512", INC_KW_DIGEST, "S3"}, + {"Gzip", INC_KW_COMPRESSION, "Z6"}, + {"Gzip1", INC_KW_COMPRESSION, "Z1"}, + {"Gzip2", INC_KW_COMPRESSION, "Z2"}, + {"Gzip3", INC_KW_COMPRESSION, "Z3"}, + {"Gzip4", INC_KW_COMPRESSION, "Z4"}, + {"Gzip5", INC_KW_COMPRESSION, "Z5"}, + {"Gzip6", INC_KW_COMPRESSION, "Z6"}, + {"Gzip7", INC_KW_COMPRESSION, "Z7"}, + {"Gzip8", INC_KW_COMPRESSION, "Z8"}, + {"Gzip9", INC_KW_COMPRESSION, "Z9"}, + {"Lzo", INC_KW_COMPRESSION, "Zo"}, + {"blowfish", INC_KW_ENCRYPTION, "B"}, /* ***FIXME*** not implemented */ + {"3des", INC_KW_ENCRYPTION, "3"}, /* ***FIXME*** not implemented */ + {"No", INC_KW_ONEFS, "f"}, + {"Yes", INC_KW_ONEFS, "0"}, + {"No", INC_KW_RECURSE, "h"}, + {"Yes", INC_KW_RECURSE, "0"}, + {"Yes", INC_KW_SPARSE, "s"}, + {"No", INC_KW_SPARSE, "0"}, + {"No", INC_KW_HARDLINK, "H"}, + {"Yes", INC_KW_HARDLINK, "0"}, + {"Always", INC_KW_REPLACE, "a"}, + {"IfNewer", INC_KW_REPLACE, "w"}, + {"Never", INC_KW_REPLACE, "n"}, + {"Yes", INC_KW_READFIFO, "r"}, + {"No", INC_KW_READFIFO, "0"}, + {"Yes", INC_KW_PORTABLE, "p"}, + {"No", INC_KW_PORTABLE, "0"}, + {"Yes", INC_KW_MTIMEONLY, "m"}, + {"No", INC_KW_MTIMEONLY, "0"}, + {"Yes", INC_KW_KEEPATIME, "k"}, + {"No", INC_KW_KEEPATIME, "0"}, + {"Yes", INC_KW_EXCLUDE, "e"}, + {"No", INC_KW_EXCLUDE, "0"}, + {"Yes", INC_KW_ACL, "A"}, + {"No", INC_KW_ACL, "0"}, + {"Yes", INC_KW_IGNORECASE, "i"}, + {"No", INC_KW_IGNORECASE, "0"}, + {"Yes", INC_KW_HFSPLUS, "R"}, /* "R" for resource fork */ + {"No", INC_KW_HFSPLUS, "0"}, + {"Yes", INC_KW_NOATIME, "K"}, + {"No", INC_KW_NOATIME, "0"}, + {"Yes", INC_KW_ENHANCEDWILD, "K"}, + {"No", INC_KW_ENHANCEDWILD, "0"}, + {"Yes", INC_KW_CHKCHANGES, "c"}, + {"No", INC_KW_CHKCHANGES, "0"}, + {"Yes", INC_KW_HONOR_NODUMP, "N"}, + {"No", INC_KW_HONOR_NODUMP, "0"}, + {"Yes", INC_KW_XATTR, "X"}, + {"No", INC_KW_XATTR, "0"}, + {NULL, 0, 0} +}; + + + +/* + * Scan for right hand side of Include options (keyword=option) is + * converted into one or two characters. Verify=xxxx is Vxxxx: + * Whatever is found is concatenated to the opts string. + * This code is also used inside an Options resource. + * + * This function returns true for a long option (terminates with :) + * and false for a normal 1 or 2 character option. + */ +static void scan_include_options(LEX *lc, int keyword, char *opts, int optlen) +{ + int i; + char option[3]; + int lcopts = lc->options; + + option[0] = 0; /* default option = none */ + option[2] = 0; /* terminate options */ + lc->options |= LOPT_STRING; /* force string */ + lex_get_token(lc, T_STRING); /* expect at least one option */ + /* + * ***FIXME**** ensure these are in permitted set + */ + if (keyword == INC_KW_VERIFY) { /* special case */ + bstrncat(opts, "V", optlen); /* indicate Verify */ + bstrncat(opts, lc->str, optlen); + bstrncat(opts, ":", optlen); /* terminate it */ + Dmsg3(900, "Catopts=%s option=%s optlen=%d\n", opts, option,optlen); + } else if (keyword == INC_KW_ACCURATE) { /* special case */ + bstrncat(opts, "C", optlen); /* indicate Accurate */ + bstrncat(opts, lc->str, optlen); + bstrncat(opts, ":", optlen); /* terminate it */ + Dmsg3(900, "Catopts=%s option=%s optlen=%d\n", opts, option,optlen); + } else if (keyword == INC_KW_BASEJOB) { /* special case */ + bstrncat(opts, "J", optlen); /* indicate BaseJob */ + bstrncat(opts, lc->str, optlen); + bstrncat(opts, ":", optlen); /* terminate it */ + Dmsg3(900, "Catopts=%s option=%s optlen=%d\n", opts, option,optlen); + } else if (keyword == INC_KW_STRIPPATH) { /* another special case */ + if (!is_an_integer(lc->str)) { + scan_err1(lc, _("Expected a strip path positive integer, got:%s:"), lc->str); + } + bstrncat(opts, "P", optlen); /* indicate strip path */ + bstrncat(opts, lc->str, optlen); + bstrncat(opts, ":", optlen); /* terminate it */ + Dmsg3(900, "Catopts=%s option=%s optlen=%d\n", opts, option,optlen); + /* + * Standard keyword options for Include/Exclude + */ + } else { + for (i=0; FS_options[i].name; i++) { + if (FS_options[i].keyword == keyword && strcasecmp(lc->str, FS_options[i].name) == 0) { + /* NOTE! maximum 2 letters here or increase option[3] */ + option[0] = FS_options[i].option[0]; + option[1] = FS_options[i].option[1]; + i = 0; + break; + } + } + if (i != 0) { + scan_err1(lc, _("Expected a FileSet option keyword, got:%s:"), lc->str); + } else { /* add option */ + bstrncat(opts, option, optlen); + Dmsg3(900, "Catopts=%s option=%s optlen=%d\n", opts, option,optlen); + } + } + lc->options = lcopts; + + /* If option terminated by comma, eat it */ + if (lc->ch == ',') { + lex_get_token(lc, T_ALL); /* yes, eat comma */ + } +} + +/* + * + * Store FileSet Include/Exclude info + * new style includes are handled in store_newinc() + */ +void store_inc(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token; + + /* + * Decide if we are doing a new Include or an old include. The + * new Include is followed immediately by open brace, whereas the + * old include has options following the Include. + */ + token = lex_get_token(lc, T_SKIP_EOL); + if (token == T_BOB) { + store_newinc(lc, item, index, pass); + return; + } + scan_err0(lc, _("Old style Include/Exclude not supported\n")); +} + + +/* + * Store new style FileSet Include/Exclude info + * + * Note, when this routine is called, we are inside a FileSet + * resource. We treat the Include/Execlude like a sort of + * mini-resource within the FileSet resource. + */ +static void store_newinc(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token, i; + INCEXE *incexe; + bool options; + + if (!res_all.res_fs.have_MD5) { + MD5Init(&res_all.res_fs.md5c); + res_all.res_fs.have_MD5 = true; + } + bmemset(&res_incexe, 0, sizeof(INCEXE)); + res_all.res_fs.new_include = true; + while ((token = lex_get_token(lc, T_SKIP_EOL)) != T_EOF) { + if (token == T_EOB) { + break; + } + if (token != T_IDENTIFIER) { + scan_err1(lc, _("Expecting keyword, got: %s\n"), lc->str); + } + for (i=0; newinc_items[i].name; i++) { + options = strcasecmp(lc->str, "options") == 0; + if (strcasecmp(newinc_items[i].name, lc->str) == 0) { + if (!options) { + token = lex_get_token(lc, T_SKIP_EOL); + if (token != T_EQUALS) { + scan_err1(lc, _("expected an equals, got: %s"), lc->str); + } + } + /* Call item handler */ + newinc_items[i].handler(lc, &newinc_items[i], i, pass, item->code); + i = -1; + break; + } + } + if (i >=0) { + scan_err1(lc, _("Keyword %s not permitted in this resource"), lc->str); + } + } + if (pass == 1) { + incexe = (INCEXE *)malloc(sizeof(INCEXE)); + memcpy(incexe, &res_incexe, sizeof(INCEXE)); + bmemset(&res_incexe, 0, sizeof(INCEXE)); + if (item->code == 0) { /* include */ + if (res_all.res_fs.num_includes == 0) { + res_all.res_fs.include_items = (INCEXE **)malloc(sizeof(INCEXE *)); + } else { + res_all.res_fs.include_items = (INCEXE **)realloc(res_all.res_fs.include_items, + sizeof(INCEXE *) * (res_all.res_fs.num_includes + 1)); + } + res_all.res_fs.include_items[res_all.res_fs.num_includes++] = incexe; + Dmsg1(900, "num_includes=%d\n", res_all.res_fs.num_includes); + } else { /* exclude */ + if (res_all.res_fs.num_excludes == 0) { + res_all.res_fs.exclude_items = (INCEXE **)malloc(sizeof(INCEXE *)); + } else { + res_all.res_fs.exclude_items = (INCEXE **)realloc(res_all.res_fs.exclude_items, + sizeof(INCEXE *) * (res_all.res_fs.num_excludes + 1)); + } + res_all.res_fs.exclude_items[res_all.res_fs.num_excludes++] = incexe; + Dmsg1(900, "num_excludes=%d\n", res_all.res_fs.num_excludes); + } + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +/* Store regex info */ +void store_regex(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token, rc; + regex_t preg; + char prbuf[500]; + const char *type; + int newsize; + + token = lex_get_token(lc, T_SKIP_EOL); + if (pass == 1) { + /* Pickup regex string + */ + switch (token) { + case T_IDENTIFIER: + case T_UNQUOTED_STRING: + case T_QUOTED_STRING: + rc = regcomp(&preg, lc->str, REG_EXTENDED); + if (rc != 0) { + regerror(rc, &preg, prbuf, sizeof(prbuf)); + regfree(&preg); + scan_err1(lc, _("Regex compile error. ERR=%s\n"), prbuf); + break; + } + regfree(&preg); + if (item->code == 1) { + type = "regexdir"; + res_incexe.current_opts->regexdir.append(bstrdup(lc->str)); + newsize = res_incexe.current_opts->regexdir.size(); + } else if (item->code == 2) { + type = "regexfile"; + res_incexe.current_opts->regexfile.append(bstrdup(lc->str)); + newsize = res_incexe.current_opts->regexfile.size(); + } else { + type = "regex"; + res_incexe.current_opts->regex.append(bstrdup(lc->str)); + newsize = res_incexe.current_opts->regex.size(); + } + Dmsg4(900, "set %s %p size=%d %s\n", + type, res_incexe.current_opts, newsize, lc->str); + break; + default: + scan_err1(lc, _("Expected a regex string, got: %s\n"), lc->str); + } + } + scan_to_eol(lc); +} + +/* Store Base info */ +void store_base(LEX *lc, RES_ITEM *item, int index, int pass) +{ + + lex_get_token(lc, T_NAME); + if (pass == 1) { + /* + * Pickup Base Job Name + */ + res_incexe.current_opts->base.append(bstrdup(lc->str)); + } + scan_to_eol(lc); +} + +/* Store reader info */ +void store_plugin(LEX *lc, RES_ITEM *item, int index, int pass) +{ + + lex_get_token(lc, T_NAME); + if (pass == 1) { + /* + * Pickup plugin command + */ + res_incexe.current_opts->plugin = bstrdup(lc->str); + } + scan_to_eol(lc); +} + + +/* Store Wild-card info */ +void store_wild(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token; + const char *type; + int newsize; + + token = lex_get_token(lc, T_SKIP_EOL); + if (pass == 1) { + /* + * Pickup Wild-card string + */ + switch (token) { + case T_IDENTIFIER: + case T_UNQUOTED_STRING: + case T_QUOTED_STRING: + if (item->code == 1) { + type = "wilddir"; + res_incexe.current_opts->wilddir.append(bstrdup(lc->str)); + newsize = res_incexe.current_opts->wilddir.size(); + } else if (item->code == 2) { + if (strpbrk(lc->str, "/\\") != NULL) { + type = "wildfile"; + res_incexe.current_opts->wildfile.append(bstrdup(lc->str)); + newsize = res_incexe.current_opts->wildfile.size(); + } else { + type = "wildbase"; + res_incexe.current_opts->wildbase.append(bstrdup(lc->str)); + newsize = res_incexe.current_opts->wildbase.size(); + } + } else { + type = "wild"; + res_incexe.current_opts->wild.append(bstrdup(lc->str)); + newsize = res_incexe.current_opts->wild.size(); + } + Dmsg4(9, "set %s %p size=%d %s\n", + type, res_incexe.current_opts, newsize, lc->str); + break; + default: + scan_err1(lc, _("Expected a wild-card string, got: %s\n"), lc->str); + } + } + scan_to_eol(lc); +} + +/* Store fstype info */ +void store_fstype(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token; + if (pass == 1) { + for (;;) { + token = lex_get_token(lc, T_STRING); /* scan next item */ + if (token == T_ERROR) { + break; + } + res_incexe.current_opts->fstype.append(bstrdup(lc->str)); + + Dmsg3(900, "set fstype %p size=%d %s\n", + res_incexe.current_opts, res_incexe.current_opts->fstype.size(),lc->str); + + if (lc->ch != ',') { /* if no other item follows */ + break; /* get out */ + } + lex_get_token(lc, T_ALL); /* eat comma */ + } + } + scan_to_eol(lc); +} + +/* Store exclude directory containing info */ +static void store_excludedir(LEX *lc, RES_ITEM2 *item, int index, int pass, bool exclude) +{ + + if (exclude) { + scan_err0(lc, _("ExcludeDirContaining directive not permitted in Exclude.\n")); + /* NOT REACHED */ + } + lex_get_token(lc, T_NAME); + if (pass == 1) { + res_incexe.ignoredir = bstrdup(lc->str); + } + scan_to_eol(lc); +} + +/* Store drivetype info */ +void store_drivetype(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token; + if (pass == 1) { + for (;;) { + token = lex_get_token(lc, T_STRING); /* scan next item */ + if (token == T_ERROR) { + break; + } + res_incexe.current_opts->drivetype.append(bstrdup(lc->str)); + Dmsg3(900, "set drivetype %p size=%d %s\n", + res_incexe.current_opts, res_incexe.current_opts->drivetype.size(),lc->str); + if (lc->ch != ',') { /* if no other item follows */ + break; /* get out */ + } + lex_get_token(lc, T_ALL); /* eat comma */ + } + } + scan_to_eol(lc); +} + +/* + * Store Filename info. Note, for minor efficiency reasons, we + * always increase the name buffer by 10 items because we expect + * to add more entries. + */ +static void store_fname(LEX *lc, RES_ITEM2 *item, int index, int pass, bool exclude) +{ + int token; + INCEXE *incexe; + + token = lex_get_token(lc, T_SKIP_EOL); + if (pass == 1) { + /* Pickup Filename string + */ + switch (token) { + case T_IDENTIFIER: + case T_UNQUOTED_STRING: + if (strchr(lc->str, '\\')) { + scan_err1(lc, _("Backslash found. Use forward slashes or quote the string.: %s\n"), lc->str); + /* NOT REACHED */ + } + case T_QUOTED_STRING: + if (res_all.res_fs.have_MD5) { + MD5Update(&res_all.res_fs.md5c, (unsigned char *)lc->str, lc->str_len); + } + incexe = &res_incexe; + if (incexe->name_list.size() == 0) { + incexe->name_list.init(10, true); + } + incexe->name_list.append(bstrdup(lc->str)); + Dmsg1(900, "Add to name_list %s\n", lc->str); + break; + default: + scan_err1(lc, _("Expected a filename, got: %s"), lc->str); + } + } + scan_to_eol(lc); +} + +/* + * Store Filename info. Note, for minor efficiency reasons, we + * always increase the name buffer by 10 items because we expect + * to add more entries. + */ +static void store_plugin_name(LEX *lc, RES_ITEM2 *item, int index, int pass, bool exclude) +{ + int token; + INCEXE *incexe; + + if (exclude) { + scan_err0(lc, _("Plugin directive not permitted in Exclude\n")); + /* NOT REACHED */ + } + token = lex_get_token(lc, T_SKIP_EOL); + if (pass == 1) { + /* Pickup Filename string + */ + switch (token) { + case T_IDENTIFIER: + case T_UNQUOTED_STRING: + if (strchr(lc->str, '\\')) { + scan_err1(lc, _("Backslash found. Use forward slashes or quote the string.: %s\n"), lc->str); + /* NOT REACHED */ + } + case T_QUOTED_STRING: + if (res_all.res_fs.have_MD5) { + MD5Update(&res_all.res_fs.md5c, (unsigned char *)lc->str, lc->str_len); + } + incexe = &res_incexe; + if (incexe->plugin_list.size() == 0) { + incexe->plugin_list.init(10, true); + } + incexe->plugin_list.append(bstrdup(lc->str)); + Dmsg1(900, "Add to plugin_list %s\n", lc->str); + break; + default: + scan_err1(lc, _("Expected a filename, got: %s"), lc->str); + /* NOT REACHED */ + } + } + scan_to_eol(lc); +} + + + +/* + * Come here when Options seen in Include/Exclude + */ +static void store_options_res(LEX *lc, RES_ITEM2 *item, int index, int pass, bool exclude) +{ + int token, i; + + if (exclude) { + scan_err0(lc, _("Options section not permitted in Exclude\n")); + /* NOT REACHED */ + } + token = lex_get_token(lc, T_SKIP_EOL); + if (token != T_BOB) { + scan_err1(lc, _("Expecting open brace. Got %s"), lc->str); + } + + if (pass == 1) { + setup_current_opts(); + } + + while ((token = lex_get_token(lc, T_ALL)) != T_EOF) { + if (token == T_EOL) { + continue; + } + if (token == T_EOB) { + break; + } + if (token != T_IDENTIFIER) { + scan_err1(lc, _("Expecting keyword, got: %s\n"), lc->str); + } + for (i=0; options_items[i].name; i++) { + if (strcasecmp(options_items[i].name, lc->str) == 0) { + token = lex_get_token(lc, T_SKIP_EOL); + if (token != T_EQUALS) { + scan_err1(lc, _("expected an equals, got: %s"), lc->str); + } + /* Call item handler */ + options_items[i].handler(lc, &options_items[i], i, pass); + i = -1; + break; + } + } + if (i >=0) { + scan_err1(lc, _("Keyword %s not permitted in this resource"), lc->str); + } + } +} + +/* + * Different subroutine, but uses store_opts + */ +void store_lopts(LEX *lc, RES_ITEM *item, int index, int pass) +{ + store_opts(lc, item, index, pass); +} + +/* + * New style options come here + */ +void store_opts(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int i; + int keyword; + char inc_opts[100]; + + inc_opts[0] = 0; + keyword = INC_KW_NONE; + /* Look up the keyword */ + for (i=0; FS_option_kw[i].name; i++) { + if (strcasecmp(item->name, FS_option_kw[i].name) == 0) { + keyword = FS_option_kw[i].token; + break; + } + } + if (keyword == INC_KW_NONE) { + scan_err1(lc, _("Expected a FileSet keyword, got: %s"), lc->str); + } + /* Now scan for the value */ + scan_include_options(lc, keyword, inc_opts, sizeof(inc_opts)); + if (pass == 1) { + bstrncat(res_incexe.current_opts->opts, inc_opts, MAX_FOPTS); + Dmsg2(900, "new pass=%d incexe opts=%s\n", pass, res_incexe.current_opts->opts); + } + scan_to_eol(lc); + set_bit(keyword, res_incexe.opt_present); +} + + + + +/* If current_opts not defined, create first entry */ +static void setup_current_opts(void) +{ + FOPTS *fo = (FOPTS *)malloc(sizeof(FOPTS)); + bmemset(fo, 0, sizeof(FOPTS)); + fo->regex.init(1, true); + fo->regexdir.init(1, true); + fo->regexfile.init(1, true); + fo->wild.init(1, true); + fo->wilddir.init(1, true); + fo->wildfile.init(1, true); + fo->wildbase.init(1, true); + fo->base.init(1, true); + fo->fstype.init(1, true); + fo->drivetype.init(1, true); + res_incexe.current_opts = fo; + if (res_incexe.num_opts == 0) { + res_incexe.opts_list = (FOPTS **)malloc(sizeof(FOPTS *)); + } else { + res_incexe.opts_list = (FOPTS **)realloc(res_incexe.opts_list, + sizeof(FOPTS *) * (res_incexe.num_opts + 1)); + } + res_incexe.opts_list[res_incexe.num_opts++] = fo; +} diff --git a/src/dird/job.c b/src/dird/job.c new file mode 100644 index 00000000..4278f4b0 --- /dev/null +++ b/src/dird/job.c @@ -0,0 +1,1921 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director Job processing routines + * + * Kern Sibbald, October MM + */ + +#include "bacula.h" +#include "dird.h" + +/* Forward referenced subroutines */ +static void *job_thread(void *arg); +static void job_monitor_watchdog(watchdog_t *self); +static void job_monitor_destructor(watchdog_t *self); +static bool job_check_maxwaittime(JCR *jcr); +static bool job_check_maxruntime(JCR *jcr); +static bool job_check_maxrunschedtime(JCR *jcr); + +/* Imported subroutines */ +extern void term_scheduler(); +extern void term_ua_server(); + +/* Imported variables */ + +jobq_t job_queue; + +void init_job_server(int max_workers) +{ + int stat; + watchdog_t *wd; + + if ((stat = jobq_init(&job_queue, max_workers, job_thread)) != 0) { + berrno be; + Emsg1(M_ABORT, 0, _("Could not init job queue: ERR=%s\n"), be.bstrerror(stat)); + } + wd = new_watchdog(); + wd->callback = job_monitor_watchdog; + wd->destructor = job_monitor_destructor; + wd->one_shot = false; + wd->interval = 60; + wd->data = new_control_jcr("*JobMonitor*", JT_SYSTEM); + register_watchdog(wd); +} + +void term_job_server() +{ + jobq_destroy(&job_queue); /* ignore any errors */ +} + +/* + * Run a job -- typically called by the scheduler, but may also + * be called by the UA (Console program). + * + * Returns: 0 on failure + * JobId on success + * + */ +JobId_t run_job(JCR *jcr) +{ + int stat; + if (setup_job(jcr)) { + Dmsg0(200, "Add jrc to work queue\n"); + /* Queue the job to be run */ + if ((stat = jobq_add(&job_queue, jcr)) != 0) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Could not add job queue: ERR=%s\n"), be.bstrerror(stat)); + return 0; + } + return jcr->JobId; + } + return 0; +} + +bool setup_job(JCR *jcr) +{ + int errstat; + + jcr->lock(); + Dsm_check(100); + init_msg(jcr, jcr->messages, job_code_callback_director); + + /* Initialize termination condition variable */ + if ((errstat = pthread_cond_init(&jcr->term_wait, NULL)) != 0) { + berrno be; + Jmsg1(jcr, M_FATAL, 0, _("Unable to init job cond variable: ERR=%s\n"), be.bstrerror(errstat)); + jcr->unlock(); + goto bail_out; + } + jcr->term_wait_inited = true; + + create_unique_job_name(jcr, jcr->job->name()); + jcr->setJobStatus(JS_Created); + jcr->unlock(); + + /* + * Open database + */ + Dmsg0(100, "Open database\n"); + jcr->db = db_init_database(jcr, jcr->catalog->db_driver, jcr->catalog->db_name, + jcr->catalog->db_user, jcr->catalog->db_password, + jcr->catalog->db_address, jcr->catalog->db_port, + jcr->catalog->db_socket, jcr->catalog->db_ssl_mode, + jcr->catalog->db_ssl_key, jcr->catalog->db_ssl_cert, + jcr->catalog->db_ssl_ca, jcr->catalog->db_ssl_capath, + jcr->catalog->db_ssl_cipher, + jcr->catalog->mult_db_connections, + jcr->catalog->disable_batch_insert); + if (!jcr->db || !db_open_database(jcr, jcr->db)) { + Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"), + jcr->catalog->db_name); + if (jcr->db) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + db_close_database(jcr, jcr->db); + jcr->db = NULL; + } + goto bail_out; + } + + Dmsg0(150, "DB opened\n"); + if (!jcr->fname) { + jcr->fname = get_pool_memory(PM_FNAME); + } + if (!jcr->pool_source) { + jcr->pool_source = get_pool_memory(PM_MESSAGE); + pm_strcpy(jcr->pool_source, _("unknown source")); + } + if (!jcr->next_pool_source) { + jcr->next_pool_source = get_pool_memory(PM_MESSAGE); + pm_strcpy(jcr->next_pool_source, _("unknown source")); + } + + if (jcr->JobReads()) { + if (!jcr->rpool_source) { + jcr->rpool_source = get_pool_memory(PM_MESSAGE); + pm_strcpy(jcr->rpool_source, _("unknown source")); + } + } + + /* + * Create Job record + */ + init_jcr_job_record(jcr); + if (!get_or_create_client_record(jcr)) { + goto bail_out; + } + + if (!db_create_job_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + goto bail_out; + } + jcr->JobId = jcr->jr.JobId; + Dmsg4(100, "Created job record JobId=%d Name=%s Type=%c Level=%c\n", + jcr->JobId, jcr->Job, jcr->jr.JobType, jcr->jr.JobLevel); + + generate_daemon_event(jcr, "JobStart"); + new_plugins(jcr); /* instantiate plugins for this jcr */ + generate_plugin_event(jcr, bDirEventJobStart); + + if (job_canceled(jcr)) { + goto bail_out; + } + + if (jcr->JobReads() && !jcr->rstorage) { + if (jcr->job->storage) { + copy_rwstorage(jcr, jcr->job->storage, _("Job resource")); + } else { + copy_rwstorage(jcr, jcr->job->pool->storage, _("Pool resource")); + } + } + if (!jcr->JobReads()) { + free_rstorage(jcr); + } + + /* + * Now, do pre-run stuff, like setting job level (Inc/diff, ...) + * this allows us to setup a proper job start record for restarting + * in case of later errors. + */ + switch (jcr->getJobType()) { + case JT_BACKUP: + if (!do_backup_init(jcr)) { + backup_cleanup(jcr, JS_ErrorTerminated); + goto bail_out; + } + break; + case JT_VERIFY: + if (!do_verify_init(jcr)) { + verify_cleanup(jcr, JS_ErrorTerminated); + goto bail_out; + } + break; + case JT_RESTORE: + if (!do_restore_init(jcr)) { + restore_cleanup(jcr, JS_ErrorTerminated); + goto bail_out; + } + break; + case JT_ADMIN: + if (!do_admin_init(jcr)) { + admin_cleanup(jcr, JS_ErrorTerminated); + goto bail_out; + } + break; + case JT_COPY: + case JT_MIGRATE: + if (!do_mac_init(jcr)) { + mac_cleanup(jcr, JS_ErrorTerminated, JS_ErrorTerminated); + goto bail_out; + } + break; + default: + Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->getJobType()); + jcr->setJobStatus(JS_ErrorTerminated); + goto bail_out; + } + + generate_plugin_event(jcr, bDirEventJobInit); + Dsm_check(100); + return true; + +bail_out: + return false; +} + +/* + * Setup a job for a resume command + */ +static bool setup_resume_job(JCR *jcr, JOB_DBR *jr) +{ + int errstat; + jcr->lock(); + Dsm_check(100); + init_msg(jcr, jcr->messages); + + /* Initialize termination condition variable */ + if ((errstat = pthread_cond_init(&jcr->term_wait, NULL)) != 0) { + berrno be; + Jmsg1(jcr, M_FATAL, 0, _("Unable to init job cond variable: ERR=%s\n"), be.bstrerror(errstat)); + jcr->unlock(); + goto bail_out; + } + jcr->term_wait_inited = true; + + jcr->setJobStatus(JS_Created); + jcr->unlock(); + + /* + * Open database + */ + Dmsg0(100, "Open database\n"); + jcr->db = db_init_database(jcr, jcr->catalog->db_driver, jcr->catalog->db_name, + jcr->catalog->db_user, jcr->catalog->db_password, + jcr->catalog->db_address, jcr->catalog->db_port, + jcr->catalog->db_socket, jcr->catalog->db_ssl_mode, + jcr->catalog->db_ssl_key, jcr->catalog->db_ssl_cert, + jcr->catalog->db_ssl_ca, jcr->catalog->db_ssl_capath, + jcr->catalog->db_ssl_cipher, + jcr->catalog->mult_db_connections, + jcr->catalog->disable_batch_insert); + if (!jcr->db || !db_open_database(jcr, jcr->db)) { + Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"), + jcr->catalog->db_name); + if (jcr->db) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + db_close_database(jcr, jcr->db); + jcr->db = NULL; + } + goto bail_out; + } + Dmsg0(100, "DB opened\n"); + if (!jcr->fname) { + jcr->fname = get_pool_memory(PM_FNAME); + } + if (!jcr->pool_source) { + jcr->pool_source = get_pool_memory(PM_MESSAGE); + pm_strcpy(jcr->pool_source, _("unknown source")); + } + if (!jcr->next_pool_source) { + jcr->next_pool_source = get_pool_memory(PM_MESSAGE); + pm_strcpy(jcr->next_pool_source, _("unknown source")); + } + + + /* + * Setup Job record. Make sure original job is Incomplete. + */ + memcpy(&jcr->jr, jr, sizeof(JOB_DBR)); + jcr->sched_time = jcr->jr.SchedTime; + jcr->start_time = jcr->jr.StartTime; + jcr->jr.EndTime = 0; /* perhaps rescheduled, clear it */ + jcr->setJobType(jcr->jr.JobType); + jcr->setJobLevel(jcr->jr.JobLevel); + jcr->JobId = jcr->jr.JobId; + if (!get_or_create_client_record(jcr)) { + Dmsg0(100, "Could not create client record.\n"); + goto bail_out; + } + + Dmsg6(100, "Got job record JobId=%d Job=%s Name=%s Type=%c Level=%c Status=%c\n", + jcr->jr.JobId, jcr->jr.Job, jcr->jr.Name, jcr->jr.JobType, jcr->jr.JobLevel, + jcr->jr.JobStatus); + if (jcr->jr.JobStatus != JS_Incomplete) { + /* ***FIXME*** add error message */ + Dmsg1(100, "Job is not an Incomplete: status=%c\n", jcr->jr.JobStatus); + goto bail_out; + } + bstrncpy(jcr->Job, jcr->jr.Job, sizeof(jcr->Job)); + jcr->setJobType(jcr->jr.JobType); + jcr->setJobLevel(jcr->jr.JobLevel); + + generate_daemon_event(jcr, "JobStart"); + new_plugins(jcr); /* instantiate plugins for this jcr */ + generate_plugin_event(jcr, bDirEventJobStart); + + if (job_canceled(jcr)) { + Dmsg0(100, "Oops. Job canceled\n"); + goto bail_out; + } + + /* Re-run the old job */ + jcr->rerunning = true; + + /* + * Now, do pre-run stuff, like setting job level (Inc/diff, ...) + * this allows us to setup a proper job start record for restarting + * in case of later errors. + */ + switch (jcr->getJobType()) { + case JT_BACKUP: + if (!do_backup_init(jcr)) { + backup_cleanup(jcr, JS_ErrorTerminated); + goto bail_out; + } + break; + default: + Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->getJobType()); + jcr->setJobStatus(JS_ErrorTerminated); + goto bail_out; + } + + generate_plugin_event(jcr, bDirEventJobInit); + Dsm_check(100); + return true; + +bail_out: + return false; +} + +JobId_t resume_job(JCR *jcr, JOB_DBR *jr) +{ + int stat; + if (setup_resume_job(jcr, jr)) { + Dmsg0(200, "Add jrc to work queue\n"); + /* Queue the job to be run */ + if ((stat = jobq_add(&job_queue, jcr)) != 0) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Could not add job queue: ERR=%s\n"), be.bstrerror(stat)); + return 0; + } + return jcr->JobId; + } + return 0; +} + + + +void update_job_end(JCR *jcr, int TermCode) +{ + dequeue_messages(jcr); /* display any queued messages */ + jcr->setJobStatus(TermCode); + update_job_end_record(jcr); +} + +/* + * This is the engine called by jobq.c:jobq_add() when we were pulled + * from the work queue. + * At this point, we are running in our own thread and all + * necessary resources are allocated -- see jobq.c + */ +static void *job_thread(void *arg) +{ + JCR *jcr = (JCR *)arg; + + pthread_detach(pthread_self()); + Dsm_check(100); + + Dmsg0(200, "=====Start Job=========\n"); + jcr->setJobStatus(JS_Running); /* this will be set only if no error */ + jcr->start_time = time(NULL); /* set the real start time */ + jcr->jr.StartTime = jcr->start_time; + + if (jcr->job->MaxStartDelay != 0 && jcr->job->MaxStartDelay < + (utime_t)(jcr->start_time - jcr->sched_time)) { + jcr->setJobStatus(JS_Canceled); + Jmsg(jcr, M_FATAL, 0, _("Job canceled because max start delay time exceeded.\n")); + } + + if (job_check_maxrunschedtime(jcr)) { + jcr->setJobStatus(JS_Canceled); + Jmsg(jcr, M_FATAL, 0, _("Job canceled because max run sched time exceeded.\n")); + } + + /* TODO : check if it is used somewhere */ + if (jcr->job->RunScripts == NULL) { + Dmsg0(200, "Warning, job->RunScripts is empty\n"); + jcr->job->RunScripts = New(alist(10, not_owned_by_alist)); + } + + if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + } + + /* Run any script BeforeJob on dird */ + run_scripts(jcr, jcr->job->RunScripts, "BeforeJob"); + + /* + * We re-update the job start record so that the start + * time is set after the run before job. This avoids + * that any files created by the run before job will + * be saved twice. They will be backed up in the current + * job, but not in the next one unless they are changed. + * Without this, they will be backed up in this job and + * in the next job run because in that case, their date + * is after the start of this run. + */ + jcr->start_time = time(NULL); + jcr->jr.StartTime = jcr->start_time; + if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + } + generate_plugin_event(jcr, bDirEventJobRun); + + switch (jcr->getJobType()) { + case JT_BACKUP: + if (!job_canceled(jcr) && do_backup(jcr)) { + do_autoprune(jcr); + } else { + backup_cleanup(jcr, JS_ErrorTerminated); + } + break; + case JT_VERIFY: + if (!job_canceled(jcr) && do_verify(jcr)) { + do_autoprune(jcr); + } else { + verify_cleanup(jcr, JS_ErrorTerminated); + } + break; + case JT_RESTORE: + if (!job_canceled(jcr) && do_restore(jcr)) { + do_autoprune(jcr); + } else { + restore_cleanup(jcr, JS_ErrorTerminated); + } + break; + case JT_ADMIN: + if (!job_canceled(jcr) && do_admin(jcr)) { + do_autoprune(jcr); + } else { + admin_cleanup(jcr, JS_ErrorTerminated); + } + break; + case JT_COPY: + case JT_MIGRATE: + if (!job_canceled(jcr) && do_mac(jcr)) { + do_autoprune(jcr); + } else { + mac_cleanup(jcr, JS_ErrorTerminated, JS_ErrorTerminated); + } + break; + default: + Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->getJobType()); + break; + } + + run_scripts(jcr, jcr->job->RunScripts, "AfterJob"); + + /* Send off any queued messages */ + if (jcr->msg_queue && jcr->msg_queue->size() > 0) { + dequeue_messages(jcr); + } + + generate_daemon_event(jcr, "JobEnd"); + generate_plugin_event(jcr, bDirEventJobEnd); + Dmsg1(50, "======== End Job stat=%c ==========\n", jcr->JobStatus); + dequeue_daemon_messages(jcr); + Dsm_check(100); + return NULL; +} + +void sd_msg_thread_send_signal(JCR *jcr, int sig) +{ + jcr->lock(); + if ( !jcr->sd_msg_thread_done + && jcr->SD_msg_chan_started + && !pthread_equal(jcr->SD_msg_chan, pthread_self())) + { + Dmsg1(800, "Send kill to SD msg chan jid=%d\n", jcr->JobId); + pthread_kill(jcr->SD_msg_chan, sig); + } + jcr->unlock(); +} + +static bool cancel_file_daemon_job(UAContext *ua, const char *cmd, JCR *jcr) +{ + CLIENT *old_client; + + if (!jcr->client) { + Dmsg0(100, "No client to cancel\n"); + return false; + } + old_client = ua->jcr->client; + ua->jcr->client = jcr->client; + if (!connect_to_file_daemon(ua->jcr, 10, FDConnectTimeout, 1)) { + ua->error_msg(_("Failed to connect to File daemon.\n")); + ua->jcr->client = old_client; + return false; + } + Dmsg3(10, "Connected to file daemon %s for cancel ua.jcr=%p jcr=%p\n", + ua->jcr->client->name(), ua->jcr, jcr); + BSOCK *fd = ua->jcr->file_bsock; + fd->fsend("%s Job=%s\n", cmd, jcr->Job); + while (fd->recv() >= 0) { + ua->send_msg("%s", fd->msg); + } + fd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + ua->jcr->client = old_client; + return true; +} + +static bool cancel_sd_job(UAContext *ua, const char *cmd, JCR *jcr) +{ + if (jcr->store_bsock) { + if (jcr->rstorage) { + copy_wstorage(ua->jcr, jcr->rstorage, _("Job resource")); + } else { + copy_wstorage(ua->jcr, jcr->wstorage, _("Job resource")); + } + } else { + USTORE store; + if (jcr->rstorage) { + store.store = jcr->rstore; + } else { + store.store = jcr->wstore; + } + set_wstorage(ua->jcr, &store); + } + + if (!ua->jcr->wstore) { + ua->error_msg(_("Failed to select Storage daemon.\n")); + return false; + } + + if (!connect_to_storage_daemon(ua->jcr, 10, SDConnectTimeout, 1)) { + ua->error_msg(_("Failed to connect to Storage daemon.\n")); + return false; + } + + Dmsg3(10, "Connected to storage daemon %s for cancel ua.jcr=%p jcr=%p\n", + ua->jcr->wstore->name(), ua->jcr, jcr); + + BSOCK *sd = ua->jcr->store_bsock; + sd->fsend("%s Job=%s\n", cmd, jcr->Job); + while (sd->recv() >= 0) { + ua->send_msg("%s", sd->msg); + } + sd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->store_bsock); + return true; +} + +/* The FD is not connected, so we try to complete JCR fields and send + * the cancel command. + */ +int cancel_inactive_job(UAContext *ua) +{ + CLIENT_DBR cr; + JOB_DBR jr; + int i; + USTORE store; + CLIENT *client; + JCR *jcr = new_jcr(sizeof(JCR), dird_free_jcr); + + memset(&jr, 0, sizeof(jr)); + memset(&cr, 0, sizeof(cr)); + + if ((i = find_arg_with_value(ua, "jobid")) > 0) { + jr.JobId = str_to_int64(ua->argv[i]); + + } else if ((i = find_arg_with_value(ua, "ujobid")) > 0) { + bstrncpy(jr.Job, ua->argv[i], sizeof(jr.Job)); + + } else { + ua->error_msg(_("jobid/ujobid argument not found.\n")); + goto bail_out; + } + + if (!open_client_db(ua)) { + goto bail_out; + } + + if (!db_get_job_record(ua->jcr, ua->db, &jr)) { + ua->error_msg(_("Job %ld/%s not found in database.\n"), jr.JobId, jr.Job); + goto bail_out; + } + + if (!acl_access_ok(ua, Job_ACL, jr.Name)) { + ua->error_msg(_("Job %s is not accessible from this console\n"), jr.Name); + goto bail_out; + } + + cr.ClientId = jr.ClientId; + if (!cr.ClientId || !db_get_client_record(ua->jcr, ua->db, &cr)) { + ua->error_msg(_("Client %ld not found in database.\n"), jr.ClientId); + goto bail_out; + } + + if (acl_access_client_ok(ua, cr.Name, jr.JobType)) { + client = (CLIENT *)GetResWithName(R_CLIENT, cr.Name); + if (client) { + jcr->client = client; + } else { + Jmsg1(jcr, M_FATAL, 0, _("Client resource \"%s\" does not exist.\n"), cr.Name); + goto bail_out; + } + } else { + goto bail_out; + } + + jcr->JobId = jr.JobId; + bstrncpy(jcr->Job, jr.Job, sizeof(jcr->Job)); + + cancel_file_daemon_job(ua, "cancel", jcr); + + /* At this time, we can't really guess the storage name from + * the job record + */ + store.store = get_storage_resource(ua, false/*no default*/, true/*unique*/); + if (!store.store) { + goto bail_out; + } + + set_wstorage(jcr, &store); + cancel_sd_job(ua, "cancel", jcr); + +bail_out: + jcr->JobId = 0; + free_jcr(jcr); + return 1; +} + +/* + * Cancel a job -- typically called by the UA (Console program), but may also + * be called by the job watchdog. + * + * Returns: true if cancel appears to be successful + * false on failure. Message sent to ua->jcr. + */ +bool +cancel_job(UAContext *ua, JCR *jcr, int wait, bool cancel) +{ + char ed1[50]; + int32_t old_status = jcr->JobStatus; + int status; + const char *reason, *cmd; + + if (!cancel) { /* stop the job */ + if (!jcr->can_be_stopped()) { + ua->error_msg(_("Cannot stop JobId %s, Job %s is not a regular Backup Job\n"), + edit_uint64(jcr->JobId, ed1), jcr->Job); + return true; + } + } + + if (cancel) { + status = JS_Canceled; + reason = _("canceled"); + cmd = NT_("cancel"); + } else { + status = JS_Incomplete; + reason = _("stopped"); + cmd = NT_("stop"); + jcr->RescheduleIncompleteJobs = false; /* do not restart */ + } + + jcr->setJobStatus(status); + + switch (old_status) { + case JS_Created: + case JS_WaitJobRes: + case JS_WaitClientRes: + case JS_WaitStoreRes: + case JS_WaitPriority: + case JS_WaitMaxJobs: + case JS_WaitStartTime: + case JS_WaitDevice: + ua->info_msg(_("JobId %s, Job %s marked to be %s.\n"), + edit_uint64(jcr->JobId, ed1), jcr->Job, + reason); + jobq_remove(&job_queue, jcr); /* attempt to remove it from queue */ + break; + + default: + + /* Cancel File daemon */ + if (jcr->file_bsock) { + btimer_t *tid; + /* do not return now, we want to try to cancel the sd */ + tid = start_bsock_timer(jcr->file_bsock, 120); + cancel_file_daemon_job(ua, cmd, jcr); + stop_bsock_timer(tid); + } + + /* We test file_bsock because the previous operation can take + * several minutes + */ + if (jcr->file_bsock && cancel) { + jcr->file_bsock->set_terminated(); + jcr->my_thread_send_signal(TIMEOUT_SIGNAL); + } + + /* Cancel Storage daemon */ + if (jcr->store_bsock) { + btimer_t *tid; + /* do not return now, we want to try to cancel the sd socket */ + tid = start_bsock_timer(jcr->store_bsock, 120); + cancel_sd_job(ua, cmd, jcr); + stop_bsock_timer(tid); + } + + /* We test file_bsock because the previous operation can take + * several minutes + */ + if (jcr->store_bsock && cancel) { + jcr->store_bsock->set_timed_out(); + jcr->store_bsock->set_terminated(); + sd_msg_thread_send_signal(jcr, TIMEOUT_SIGNAL); + jcr->my_thread_send_signal(TIMEOUT_SIGNAL); + } + + /* Cancel Copy/Migration Storage daemon */ + if (jcr->wjcr) { + /* The wjcr is valid until we call free_jcr(jcr) */ + JCR *wjcr = jcr->wjcr; + + if (wjcr->store_bsock) { + btimer_t *tid; + /* do not return now, we want to try to cancel the sd socket */ + tid = start_bsock_timer(wjcr->store_bsock, 120); + cancel_sd_job(ua, cmd, wjcr); + stop_bsock_timer(tid); + } + /* We test store_bsock because the previous operation can take + * several minutes + */ + if (wjcr->store_bsock && cancel) { + wjcr->store_bsock->set_timed_out(); + wjcr->store_bsock->set_terminated(); + sd_msg_thread_send_signal(wjcr, TIMEOUT_SIGNAL); + wjcr->my_thread_send_signal(TIMEOUT_SIGNAL); + } + } + break; + } + + return true; +} + +void cancel_storage_daemon_job(JCR *jcr) +{ + if (jcr->sd_canceled) { + return; /* cancel only once */ + } + + UAContext *ua = new_ua_context(jcr); + JCR *control_jcr = new_control_jcr("*JobCancel*", JT_SYSTEM); + BSOCK *sd; + + ua->jcr = control_jcr; + if (jcr->store_bsock) { + if (!ua->jcr->wstorage) { + if (jcr->rstorage) { + copy_wstorage(ua->jcr, jcr->rstorage, _("Job resource")); + } else { + copy_wstorage(ua->jcr, jcr->wstorage, _("Job resource")); + } + } else { + USTORE store; + if (jcr->rstorage) { + store.store = jcr->rstore; + } else { + store.store = jcr->wstore; + } + set_wstorage(ua->jcr, &store); + } + + if (!connect_to_storage_daemon(ua->jcr, 10, SDConnectTimeout, 1)) { + goto bail_out; + } + Dmsg0(200, "Connected to storage daemon\n"); + sd = ua->jcr->store_bsock; + sd->fsend("cancel Job=%s\n", jcr->Job); + while (sd->recv() >= 0) { + } + sd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->store_bsock); + jcr->sd_canceled = true; + jcr->store_bsock->set_timed_out(); + jcr->store_bsock->set_terminated(); + sd_msg_thread_send_signal(jcr, TIMEOUT_SIGNAL); + jcr->my_thread_send_signal(TIMEOUT_SIGNAL); + } +bail_out: + free_jcr(control_jcr); + free_ua_context(ua); +} + +static void job_monitor_destructor(watchdog_t *self) +{ + JCR *control_jcr = (JCR *)self->data; + + free_jcr(control_jcr); +} + +extern "C" void *cancel_thread(void *arg) +{ + JCR *jcr = (JCR *)arg; + UAContext *ua; + JCR *control_jcr; + + pthread_detach(pthread_self()); + ua = new_ua_context(jcr); + control_jcr = new_control_jcr("*CancelThread*", JT_SYSTEM); + ua->jcr = control_jcr; + + Dmsg3(400, "Cancelling JCR %p JobId=%d (%s)\n", jcr, jcr->JobId, jcr->Job); + cancel_job(ua, jcr, 120); + Dmsg2(400, "Have cancelled JCR %p JobId=%d\n", jcr, jcr->JobId); + + free_ua_context(ua); + free_jcr(control_jcr); + free_jcr(jcr); + return NULL; +} + +static void job_monitor_watchdog(watchdog_t *wd) +{ + JCR *jcr; + + Dsm_check(100); + Dmsg1(800, "job_monitor_watchdog %p called\n", wd); + + foreach_jcr(jcr) { + bool cancel = false; + + if (jcr->JobId == 0 || job_canceled(jcr) || jcr->no_maxtime) { + Dmsg2(800, "Skipping JCR=%p Job=%s\n", jcr, jcr->Job); + continue; + } + + /* check MaxWaitTime */ + if (job_check_maxwaittime(jcr)) { + jcr->setJobStatus(JS_Canceled); + Qmsg(jcr, M_FATAL, 0, _("Max wait time exceeded. Job canceled.\n")); + cancel = true; + /* check MaxRunTime */ + } else if (job_check_maxruntime(jcr)) { + jcr->setJobStatus(JS_Canceled); + Qmsg(jcr, M_FATAL, 0, _("Max run time exceeded. Job canceled.\n")); + cancel = true; + /* check MaxRunSchedTime */ + } else if (job_check_maxrunschedtime(jcr)) { + jcr->setJobStatus(JS_Canceled); + Qmsg(jcr, M_FATAL, 0, _("Max run sched time exceeded. Job canceled.\n")); + cancel = true; + } + + if (cancel) { + pthread_t thid; + int status; + jcr->inc_use_count(); + if ((status=pthread_create(&thid, NULL, cancel_thread, (void *)jcr)) != 0) { + berrno be; + Jmsg1(jcr, M_WARNING, 0, _("Cannot create cancel thread: ERR=%s\n"), be.bstrerror(status)); + free_jcr(jcr); + } + } + } + /* Keep reference counts correct */ + endeach_jcr(jcr); +} + +/* + * Check if the maxwaittime has expired and it is possible + * to cancel the job. + */ +static bool job_check_maxwaittime(JCR *jcr) +{ + bool cancel = false; + JOB *job = jcr->job; + utime_t current=0; + + if (!job_waiting(jcr)) { + return false; + } + + if (jcr->wait_time) { + current = watchdog_time - jcr->wait_time; + } + + Dmsg2(200, "check maxwaittime %u >= %u\n", + current + jcr->wait_time_sum, job->MaxWaitTime); + if (job->MaxWaitTime != 0 && + (current + jcr->wait_time_sum) >= job->MaxWaitTime) { + cancel = true; + } + + return cancel; +} + +/* + * Check if maxruntime has expired and if the job can be + * canceled. + */ +static bool job_check_maxruntime(JCR *jcr) +{ + bool cancel = false; + JOB *job = jcr->job; + utime_t run_time; + + if (job_canceled(jcr) || !jcr->job_started) { + return false; + } + if (jcr->job->MaxRunTime == 0 && job->FullMaxRunTime == 0 && + job->IncMaxRunTime == 0 && job->DiffMaxRunTime == 0) { + return false; + } + run_time = watchdog_time - jcr->start_time; + Dmsg7(200, "check_maxruntime %llu-%u=%llu >= %llu|%llu|%llu|%llu\n", + watchdog_time, jcr->start_time, run_time, job->MaxRunTime, job->FullMaxRunTime, + job->IncMaxRunTime, job->DiffMaxRunTime); + + if (jcr->getJobLevel() == L_FULL && job->FullMaxRunTime != 0 && + run_time >= job->FullMaxRunTime) { + Dmsg0(200, "check_maxwaittime: FullMaxcancel\n"); + cancel = true; + } else if (jcr->getJobLevel() == L_DIFFERENTIAL && job->DiffMaxRunTime != 0 && + run_time >= job->DiffMaxRunTime) { + Dmsg0(200, "check_maxwaittime: DiffMaxcancel\n"); + cancel = true; + } else if (jcr->getJobLevel() == L_INCREMENTAL && job->IncMaxRunTime != 0 && + run_time >= job->IncMaxRunTime) { + Dmsg0(200, "check_maxwaittime: IncMaxcancel\n"); + cancel = true; + } else if (job->MaxRunTime > 0 && run_time >= job->MaxRunTime) { + Dmsg0(200, "check_maxwaittime: Maxcancel\n"); + cancel = true; + } + + return cancel; +} + +/* + * Check if MaxRunSchedTime has expired and if the job can be + * canceled. + */ +static bool job_check_maxrunschedtime(JCR *jcr) +{ + if (jcr->MaxRunSchedTime == 0 || job_canceled(jcr)) { + return false; + } + if ((watchdog_time - jcr->initial_sched_time) < jcr->MaxRunSchedTime) { + Dmsg3(200, "Job %p (%s) with MaxRunSchedTime %d not expired\n", + jcr, jcr->Job, jcr->MaxRunSchedTime); + return false; + } + + return true; +} + +/* + * Get or create a Pool record with the given name. + * Returns: 0 on error + * poolid if OK + */ +DBId_t get_or_create_pool_record(JCR *jcr, char *pool_name) +{ + POOL_DBR pr; + + memset(&pr, 0, sizeof(pr)); + bstrncpy(pr.Name, pool_name, sizeof(pr.Name)); + Dmsg1(110, "get_or_create_pool=%s\n", pool_name); + + while (!db_get_pool_record(jcr, jcr->db, &pr)) { /* get by Name */ + /* Try to create the pool */ + if (create_pool(jcr, jcr->db, jcr->pool, POOL_OP_CREATE) < 0) { + Jmsg(jcr, M_FATAL, 0, _("Cannot create pool \"%s\" in database. ERR=%s"), pr.Name, + db_strerror(jcr->db)); + return 0; + } else { + Jmsg(jcr, M_INFO, 0, _("Created database record for Pool \"%s\".\n"), pr.Name); + } + } + return pr.PoolId; +} + +/* + * Check for duplicate jobs. + * Returns: true if current job should continue + * false if current job should terminate + */ +bool allow_duplicate_job(JCR *jcr) +{ + JOB *job = jcr->job; + JCR *djcr; /* possible duplicate job */ + + /* Is AllowDuplicateJobs is set or is duplicate checking + * disabled for this job? */ + if (job->AllowDuplicateJobs || jcr->IgnoreDuplicateJobChecking) { + return true; + } + Dmsg0(800, "Enter allow_duplicate_job\n"); + /* + * After this point, we do not want to allow any duplicate + * job to run. + */ + + foreach_jcr(djcr) { + if (jcr == djcr || djcr->is_internal_job() || !djcr->job) { + continue; /* do not cancel this job or consoles */ + } + /* Does Job has the IgnoreDuplicateJobChecking flag set, + * if so do not check it against other jobs */ + if (djcr->IgnoreDuplicateJobChecking) { + continue; + } + if ((strcmp(job->name(), djcr->job->name()) == 0) && + djcr->getJobType() == jcr->getJobType()) /* A duplicate is about the same name and the same type */ + { + bool cancel_dup = false; + bool cancel_me = false; + if (job->DuplicateJobProximity > 0) { + utime_t now = (utime_t)time(NULL); + if ((now - djcr->start_time) > job->DuplicateJobProximity) { + continue; /* not really a duplicate */ + } + } + if (job->CancelLowerLevelDuplicates && + djcr->getJobType() == 'B' && jcr->getJobType() == 'B') { + switch (jcr->getJobLevel()) { + case L_FULL: + case L_VIRTUAL_FULL: + if (djcr->getJobLevel() == L_DIFFERENTIAL || + djcr->getJobLevel() == L_INCREMENTAL) { + cancel_dup = true; + } + break; + case L_DIFFERENTIAL: + if (djcr->getJobLevel() == L_INCREMENTAL) { + cancel_dup = true; + } + if (djcr->getJobLevel() == L_FULL) { + cancel_me = true; + } + break; + case L_INCREMENTAL: + if (djcr->getJobLevel() == L_FULL || + djcr->getJobLevel() == L_DIFFERENTIAL) { + cancel_me = true; + } + } + /* + * cancel_dup will be done below + */ + if (cancel_me) { + /* Zap current job */ + jcr->setJobStatus(JS_Canceled); + Jmsg(jcr, M_FATAL, 0, _("JobId %d already running. Duplicate job not allowed.\n"), + djcr->JobId); + break; /* get out of foreach_jcr */ + } + } + /* Cancel one of the two jobs (me or dup) */ + /* If CancelQueuedDuplicates is set do so only if job is queued */ + if (job->CancelQueuedDuplicates) { + switch (djcr->JobStatus) { + case JS_Created: + case JS_WaitJobRes: + case JS_WaitClientRes: + case JS_WaitStoreRes: + case JS_WaitPriority: + case JS_WaitMaxJobs: + case JS_WaitStartTime: + case JS_WaitDevice: + cancel_dup = true; /* cancel queued duplicate */ + break; + default: + break; + } + } + if (cancel_dup || job->CancelRunningDuplicates) { + /* Zap the duplicated job djcr */ + UAContext *ua = new_ua_context(jcr); + Jmsg(jcr, M_INFO, 0, _("Cancelling duplicate JobId=%d.\n"), djcr->JobId); + cancel_job(ua, djcr, 60); + bmicrosleep(0, 500000); + djcr->setJobStatus(JS_Canceled); + cancel_job(ua, djcr, 60); + free_ua_context(ua); + Dmsg2(800, "Cancel dup %p JobId=%d\n", djcr, djcr->JobId); + } else { + /* Zap current job */ + jcr->setJobStatus(JS_Canceled); + Jmsg(jcr, M_FATAL, 0, _("JobId %d already running. Duplicate job not allowed.\n"), + djcr->JobId); + Dmsg2(800, "Cancel me %p JobId=%d\n", jcr, jcr->JobId); + } + Dmsg4(800, "curJobId=%d use_cnt=%d dupJobId=%d use_cnt=%d\n", + jcr->JobId, jcr->use_count(), djcr->JobId, djcr->use_count()); + break; /* did our work, get out of foreach loop */ + } + } + endeach_jcr(djcr); + + return true; +} + +/* + * Apply pool overrides to get the storage properly setup. + */ +bool apply_wstorage_overrides(JCR *jcr, POOL *opool) +{ + const char *source; + + Dmsg1(100, "Original pool=%s\n", opool->name()); + if (jcr->cmdline_next_pool_override) { + /* Can be Command line or User input */ + source = NPRT(jcr->next_pool_source); + } else if (jcr->run_next_pool_override) { + pm_strcpy(jcr->next_pool_source, _("Run NextPool override")); + pm_strcpy(jcr->pool_source, _("Run NextPool override")); + source = _("Run NextPool override"); + } else if (jcr->job->next_pool) { + /* Use Job Next Pool */ + jcr->next_pool = jcr->job->next_pool; + pm_strcpy(jcr->next_pool_source, _("Job's NextPool resource")); + pm_strcpy(jcr->pool_source, _("Job's NextPool resource")); + source = _("Job's NextPool resource"); + } else { + /* Default to original pool->NextPool */ + jcr->next_pool = opool->NextPool; + Dmsg1(100, "next_pool=%p\n", jcr->next_pool); + if (jcr->next_pool) { + Dmsg1(100, "Original pool next Pool = %s\n", NPRT(jcr->next_pool->name())); + } + pm_strcpy(jcr->next_pool_source, _("Job Pool's NextPool resource")); + pm_strcpy(jcr->pool_source, _("Job Pool's NextPool resource")); + source = _("Pool's NextPool resource"); + } + + /* + * If the original backup pool has a NextPool, make sure a + * record exists in the database. + */ + if (jcr->next_pool) { + jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->next_pool->name()); + if (jcr->jr.PoolId == 0) { + return false; + } + } + + if (!set_mac_wstorage(NULL, jcr, jcr->pool, jcr->next_pool, source)) { + return false; + } + + /* Set write pool and source. Not read pool is in rpool. */ + jcr->pool = jcr->next_pool; + pm_strcpy(jcr->pool_source, source); + + return true; +} + + +void apply_pool_overrides(JCR *jcr) +{ + bool pool_override = false; + + if (jcr->run_pool_override) { + pm_strcpy(jcr->pool_source, _("Run Pool override")); + } + /* + * Apply any level related Pool selections + */ + switch (jcr->getJobLevel()) { + case L_FULL: + if (jcr->full_pool) { + jcr->pool = jcr->full_pool; + pool_override = true; + if (jcr->run_full_pool_override) { + pm_strcpy(jcr->pool_source, _("Run FullPool override")); + } else { + pm_strcpy(jcr->pool_source, _("Job FullPool override")); + } + } + break; + case L_VIRTUAL_FULL: + if (jcr->vfull_pool) { + jcr->pool = jcr->vfull_pool; + pool_override = true; + if (jcr->run_vfull_pool_override) { + pm_strcpy(jcr->pool_source, _("Run VFullPool override")); + } else { + pm_strcpy(jcr->pool_source, _("Job VFullPool override")); + } + } + break; + case L_INCREMENTAL: + if (jcr->inc_pool) { + jcr->pool = jcr->inc_pool; + pool_override = true; + if (jcr->run_inc_pool_override) { + pm_strcpy(jcr->pool_source, _("Run IncPool override")); + } else { + pm_strcpy(jcr->pool_source, _("Job IncPool override")); + } + } + break; + case L_DIFFERENTIAL: + if (jcr->diff_pool) { + jcr->pool = jcr->diff_pool; + pool_override = true; + if (jcr->run_diff_pool_override) { + pm_strcpy(jcr->pool_source, _("Run DiffPool override")); + } else { + pm_strcpy(jcr->pool_source, _("Job DiffPool override")); + } + } + break; + } + /* Update catalog if pool overridden */ + if (pool_override && jcr->pool->catalog) { + jcr->catalog = jcr->pool->catalog; + pm_strcpy(jcr->catalog_source, _("Pool resource")); + } +} + + +/* + * Get or create a Client record for this Job + */ +bool get_or_create_client_record(JCR *jcr) +{ + CLIENT_DBR cr; + + if (!jcr->client) { + Jmsg(jcr, M_FATAL, 0, _("No Client specified.\n")); + return false; + } + memset(&cr, 0, sizeof(cr)); + bstrncpy(cr.Name, jcr->client->hdr.name, sizeof(cr.Name)); + cr.AutoPrune = jcr->client->AutoPrune; + cr.FileRetention = jcr->client->FileRetention; + cr.JobRetention = jcr->client->JobRetention; + if (!jcr->client_name) { + jcr->client_name = get_pool_memory(PM_NAME); + } + pm_strcpy(jcr->client_name, jcr->client->hdr.name); + if (!db_create_client_record(jcr, jcr->db, &cr)) { + Jmsg(jcr, M_FATAL, 0, _("Could not create Client record. ERR=%s\n"), + db_strerror(jcr->db)); + return false; + } + jcr->jr.ClientId = cr.ClientId; + if (cr.Uname[0]) { + if (!jcr->client_uname) { + jcr->client_uname = get_pool_memory(PM_NAME); + } + pm_strcpy(jcr->client_uname, cr.Uname); + } + Dmsg2(100, "Created Client %s record %d\n", jcr->client->hdr.name, + jcr->jr.ClientId); + return true; +} + +/* + * Get or Create FileSet record + */ +bool get_or_create_fileset_record(JCR *jcr) +{ + FILESET_DBR fsr; + + memset(&fsr, 0, sizeof(FILESET_DBR)); + bstrncpy(fsr.FileSet, jcr->fileset->hdr.name, sizeof(fsr.FileSet)); + if (jcr->fileset->have_MD5) { + struct MD5Context md5c; + unsigned char digest[MD5HashSize]; + memcpy(&md5c, &jcr->fileset->md5c, sizeof(md5c)); + MD5Final(digest, &md5c); + /* + * Keep the flag (last arg) set to false otherwise old FileSets will + * get new MD5 sums and the user will get Full backups on everything + */ + bin_to_base64(fsr.MD5, sizeof(fsr.MD5), (char *)digest, MD5HashSize, false); + bstrncpy(jcr->fileset->MD5, fsr.MD5, sizeof(jcr->fileset->MD5)); + } else { + Jmsg(jcr, M_WARNING, 0, _("FileSet MD5 digest not found.\n")); + } + if (!jcr->fileset->ignore_fs_changes || + !db_get_fileset_record(jcr, jcr->db, &fsr)) { + if (!db_create_fileset_record(jcr, jcr->db, &fsr)) { + Jmsg(jcr, M_ERROR, 0, _("Could not create FileSet \"%s\" record. ERR=%s\n"), + fsr.FileSet, db_strerror(jcr->db)); + return false; + } + } + jcr->jr.FileSetId = fsr.FileSetId; + bstrncpy(jcr->FSCreateTime, fsr.cCreateTime, sizeof(jcr->FSCreateTime)); + Dmsg2(119, "Created FileSet %s record %u\n", jcr->fileset->hdr.name, + jcr->jr.FileSetId); + return true; +} + +void init_jcr_job_record(JCR *jcr) +{ + jcr->jr.SchedTime = jcr->sched_time; + jcr->jr.StartTime = jcr->start_time; + jcr->jr.EndTime = 0; /* perhaps rescheduled, clear it */ + jcr->jr.JobType = jcr->getJobType(); + jcr->jr.JobLevel = jcr->getJobLevel(); + jcr->jr.JobStatus = jcr->JobStatus; + jcr->jr.JobId = jcr->JobId; + bstrncpy(jcr->jr.Name, jcr->job->name(), sizeof(jcr->jr.Name)); + bstrncpy(jcr->jr.Job, jcr->Job, sizeof(jcr->jr.Job)); +} + +/* + * Write status and such in DB + */ +void update_job_end_record(JCR *jcr) +{ + jcr->jr.EndTime = time(NULL); + jcr->end_time = jcr->jr.EndTime; + jcr->jr.JobId = jcr->JobId; + jcr->jr.JobStatus = jcr->JobStatus; + jcr->jr.JobFiles = jcr->JobFiles; + jcr->jr.JobBytes = jcr->JobBytes; + jcr->jr.ReadBytes = jcr->ReadBytes; + jcr->jr.VolSessionId = jcr->VolSessionId; + jcr->jr.VolSessionTime = jcr->VolSessionTime; + jcr->jr.JobErrors = jcr->JobErrors + jcr->SDErrors; + jcr->jr.HasBase = jcr->HasBase; + if (!db_update_job_end_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_WARNING, 0, _("Error updating job record. %s"), + db_strerror(jcr->db)); + } +} + +/* + * Takes base_name and appends (unique) current + * date and time to form unique job name. + * + * Note, the seconds are actually a sequence number. This + * permits us to start a maximum fo 59 unique jobs a second, which + * should be sufficient. + * + * Returns: unique job name in jcr->Job + * date/time in jcr->start_time + */ +void create_unique_job_name(JCR *jcr, const char *base_name) +{ + /* Job start mutex */ + static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + static time_t last_start_time = 0; + static int seq = 0; + time_t now = time(NULL); + struct tm tm; + char dt[MAX_TIME_LENGTH]; + char name[MAX_NAME_LENGTH]; + char *p; + int len; + int local_seq; + + /* Guarantee unique start time -- maximum one per second, and + * thus unique Job Name + */ + P(mutex); /* lock creation of jobs */ + seq++; + if (seq > 59) { /* wrap as if it is seconds */ + seq = 0; + while (now == last_start_time) { + bmicrosleep(0, 500000); + now = time(NULL); + } + } + last_start_time = now; + local_seq = seq; + V(mutex); /* allow creation of jobs */ + jcr->start_time = now; + /* Form Unique JobName */ + (void)localtime_r(&now, &tm); + /* Use only characters that are permitted in Windows filenames */ + strftime(dt, sizeof(dt), "%Y-%m-%d_%H.%M.%S", &tm); + len = strlen(dt) + 5; /* dt + .%02d EOS */ + bstrncpy(name, base_name, sizeof(name)); + name[sizeof(name)-len] = 0; /* truncate if too long */ + bsnprintf(jcr->Job, sizeof(jcr->Job), "%s.%s_%02d", name, dt, local_seq); /* add date & time */ + /* Convert spaces into underscores */ + for (p=jcr->Job; *p; p++) { + if (*p == ' ') { + *p = '_'; + } + } + Dmsg2(100, "JobId=%u created Job=%s\n", jcr->JobId, jcr->Job); +} + +/* Called directly from job rescheduling */ +void dird_free_jcr_pointers(JCR *jcr) +{ + /* Close but do not free bsock packets */ + if (jcr->file_bsock) { + Dmsg0(200, "Close File bsock\n"); + jcr->file_bsock->close(); + } + if (jcr->store_bsock) { + Dmsg0(200, "Close Store bsock\n"); + jcr->store_bsock->close(); + } + + bfree_and_null(jcr->sd_auth_key); + bfree_and_null(jcr->where); + bfree_and_null(jcr->RestoreBootstrap); + jcr->cached_attribute = false; + bfree_and_null(jcr->ar); + + free_and_null_pool_memory(jcr->JobIds); + free_and_null_pool_memory(jcr->client_uname); + free_and_null_pool_memory(jcr->attr); + free_and_null_pool_memory(jcr->fname); + free_and_null_pool_memory(jcr->media_type); +} + +/* + * Free the Job Control Record if no one is still using it. + * Called from main free_jcr() routine in src/lib/jcr.c so + * that we can do our Director specific cleanup of the jcr. + */ +void dird_free_jcr(JCR *jcr) +{ + Dmsg0(200, "Start dird free_jcr\n"); + + dird_free_jcr_pointers(jcr); + if (jcr->bsr_list) { + free_bsr(jcr->bsr_list); + jcr->bsr_list = NULL; + } + if (jcr->wjcr) { + free_jcr(jcr->wjcr); + jcr->wjcr = NULL; + } + /* Free bsock packets */ + free_bsock(jcr->file_bsock); + free_bsock(jcr->store_bsock); + if (jcr->term_wait_inited) { + pthread_cond_destroy(&jcr->term_wait); + jcr->term_wait_inited = false; + } + if (jcr->db_batch) { + db_close_database(jcr, jcr->db_batch); + jcr->db_batch = NULL; + jcr->batch_started = false; + } + if (jcr->db) { + db_close_database(jcr, jcr->db); + jcr->db = NULL; + } + + free_and_null_pool_memory(jcr->stime); + free_and_null_pool_memory(jcr->fname); + free_and_null_pool_memory(jcr->pool_source); + free_and_null_pool_memory(jcr->next_pool_source); + free_and_null_pool_memory(jcr->catalog_source); + free_and_null_pool_memory(jcr->rpool_source); + free_and_null_pool_memory(jcr->wstore_source); + free_and_null_pool_memory(jcr->rstore_source); + free_and_null_pool_memory(jcr->next_vol_list); + free_and_null_pool_memory(jcr->component_fname); + + /* Delete lists setup to hold storage pointers */ + free_rwstorage(jcr); + + jcr->job_end_push.destroy(); + + if (jcr->JobId != 0) + write_state_file(director->working_directory, "bacula-dir", get_first_port_host_order(director->DIRaddrs)); + + if (jcr->plugin_config) { + free_plugin_config_items(jcr->plugin_config); + delete jcr->plugin_config; + jcr->plugin_config = NULL; + } + free_plugins(jcr); /* release instantiated plugins */ + + garbage_collect_memory_pool(); + + Dmsg0(200, "End dird free_jcr\n"); +} + +/* + * The Job storage definition must be either in the Job record + * or in the Pool record. The Pool record overrides the Job + * record. + */ +void get_job_storage(USTORE *store, JOB *job, RUN *run) +{ + if (run && run->pool && run->pool->storage) { + store->store = (STORE *)run->pool->storage->first(); + pm_strcpy(store->store_source, _("Run pool override")); + return; + } + if (run && run->storage) { + store->store = run->storage; + pm_strcpy(store->store_source, _("Run storage override")); + return; + } + if (job->pool->storage) { + store->store = (STORE *)job->pool->storage->first(); + pm_strcpy(store->store_source, _("Pool resource")); + } else { + store->store = (STORE *)job->storage->first(); + pm_strcpy(store->store_source, _("Job resource")); + } +} + +/* + * Set some defaults in the JCR necessary to + * run. These items are pulled from the job + * definition as defaults, but can be overridden + * later either by the Run record in the Schedule resource, + * or by the Console program. + */ +void set_jcr_defaults(JCR *jcr, JOB *job) +{ + jcr->job = job; + jcr->setJobType(job->JobType); + jcr->JobStatus = JS_Created; + + switch (jcr->getJobType()) { + case JT_ADMIN: + jcr->setJobLevel(L_NONE); + break; + default: + jcr->setJobLevel(job->JobLevel); + break; + } + if (!jcr->next_vol_list) { + jcr->next_vol_list = get_pool_memory(PM_FNAME); + } + if (!jcr->fname) { + jcr->fname = get_pool_memory(PM_FNAME); + } + if (!jcr->pool_source) { + jcr->pool_source = get_pool_memory(PM_MESSAGE); + } + if (!jcr->next_pool_source) { + jcr->next_pool_source = get_pool_memory(PM_MESSAGE); + } + if (!jcr->catalog_source) { + jcr->catalog_source = get_pool_memory(PM_MESSAGE); + } + + jcr->JobPriority = job->Priority; + /* Copy storage definitions -- deleted in dir_free_jcr above */ + if (job->storage) { + copy_rwstorage(jcr, job->storage, _("Job resource")); + } else { + copy_rwstorage(jcr, job->pool->storage, _("Pool resource")); + } + /* check if we run a restore */ + if (jcr->getJobType() == JT_RESTORE && job->RestoreClient){ + jcr->client = GetClientResWithName(jcr->job->RestoreClient); + } else { + jcr->client = job->client; + } + ASSERT2(jcr->client, "jcr->client==NULL!!!"); + if (!jcr->client_name) { + jcr->client_name = get_pool_memory(PM_NAME); + } + pm_strcpy(jcr->client_name, jcr->client->name()); + jcr->pool = job->pool; + pm_strcpy(jcr->pool_source, _("Job resource")); + if (job->next_pool) { + /* Use Job's Next Pool */ + jcr->next_pool = job->next_pool; + pm_strcpy(jcr->next_pool_source, _("Job's NextPool resource")); + } else { + /* Default to original pool->NextPool */ + jcr->next_pool = job->pool->NextPool; + pm_strcpy(jcr->next_pool_source, _("Job Pool's NextPool resource")); + } + jcr->full_pool = job->full_pool; + jcr->vfull_pool = job->vfull_pool; + jcr->inc_pool = job->inc_pool; + jcr->diff_pool = job->diff_pool; + if (job->pool->catalog) { + jcr->catalog = job->pool->catalog; + pm_strcpy(jcr->catalog_source, _("Pool resource")); + } else { + jcr->catalog = job->client->catalog; + pm_strcpy(jcr->catalog_source, _("Client resource")); + } + jcr->fileset = job->fileset; + jcr->accurate = job->accurate; + jcr->messages = job->messages; + jcr->spool_data = job->spool_data; + jcr->spool_size = job->spool_size; + jcr->write_part_after_job = job->write_part_after_job; + jcr->MaxRunSchedTime = job->MaxRunSchedTime; + /* This can be overridden by Console program */ + bfree_and_null(jcr->RestoreBootstrap); + if (job->RestoreBootstrap) { + jcr->RestoreBootstrap = bstrdup(job->RestoreBootstrap); + } + /* This can be overridden by Console program */ + jcr->verify_job = job->verify_job; + /* If no default level given, set one */ + if (jcr->getJobLevel() == 0) { + switch (jcr->getJobType()) { + case JT_VERIFY: + jcr->setJobLevel(L_VERIFY_CATALOG); + break; + case JT_BACKUP: + jcr->setJobLevel(L_INCREMENTAL); + break; + case JT_RESTORE: + case JT_ADMIN: + jcr->setJobLevel(L_NONE); + break; + default: + jcr->setJobLevel(L_FULL); + break; + } + } +} + +/* + * Copy the storage definitions from an alist to the JCR + */ +void copy_rwstorage(JCR *jcr, alist *storage, const char *where) +{ + if (jcr->JobReads()) { + copy_rstorage(jcr, storage, where); + } + copy_wstorage(jcr, storage, where); +} + + +/* Set storage override. Releases any previous storage definition */ +void set_rwstorage(JCR *jcr, USTORE *store) +{ + if (!store) { + Jmsg(jcr, M_FATAL, 0, _("No storage specified.\n")); + return; + } + if (jcr->JobReads()) { + set_rstorage(jcr, store); + } + set_wstorage(jcr, store); +} + +void free_rwstorage(JCR *jcr) +{ + free_rstorage(jcr); + free_wstorage(jcr); +} + +/* + * Copy the storage definitions from an alist to the JCR + */ +void copy_rstorage(JCR *jcr, alist *storage, const char *where) +{ + if (storage) { + STORE *st; + if (jcr->rstorage) { + delete jcr->rstorage; + } + jcr->rstorage = New(alist(10, not_owned_by_alist)); + foreach_alist(st, storage) { + jcr->rstorage->append(st); + } + if (!jcr->rstore_source) { + jcr->rstore_source = get_pool_memory(PM_MESSAGE); + } + pm_strcpy(jcr->rstore_source, where); + if (jcr->rstorage) { + jcr->rstore = (STORE *)jcr->rstorage->first(); + } + } +} + + +/* Set storage override. Remove all previous storage */ +void set_rstorage(JCR *jcr, USTORE *store) +{ + STORE *storage; + + if (!store->store) { + return; + } + if (jcr->rstorage) { + free_rstorage(jcr); + } + if (!jcr->rstorage) { + jcr->rstorage = New(alist(10, not_owned_by_alist)); + } + jcr->rstore = store->store; + if (!jcr->rstore_source) { + jcr->rstore_source = get_pool_memory(PM_MESSAGE); + } + pm_strcpy(jcr->rstore_source, store->store_source); + foreach_alist(storage, jcr->rstorage) { + if (store->store == storage) { + return; + } + } + /* Store not in list, so add it */ + jcr->rstorage->prepend(store->store); +} + +void free_rstorage(JCR *jcr) +{ + if (jcr->rstorage) { + delete jcr->rstorage; + jcr->rstorage = NULL; + } + jcr->rstore = NULL; +} + +/* + * Copy the storage definitions from an alist to the JCR + */ +void copy_wstorage(JCR *jcr, alist *storage, const char *where) +{ + if (storage) { + STORE *st; + if (jcr->wstorage) { + delete jcr->wstorage; + } + jcr->wstorage = New(alist(10, not_owned_by_alist)); + foreach_alist(st, storage) { + Dmsg1(100, "wstorage=%s\n", st->name()); + jcr->wstorage->append(st); + } + if (!jcr->wstore_source) { + jcr->wstore_source = get_pool_memory(PM_MESSAGE); + } + pm_strcpy(jcr->wstore_source, where); + if (jcr->wstorage) { + jcr->wstore = (STORE *)jcr->wstorage->first(); + Dmsg2(100, "wstore=%s where=%s\n", jcr->wstore->name(), jcr->wstore_source); + } + } +} + + +/* Set storage override. Remove all previous storage */ +void set_wstorage(JCR *jcr, USTORE *store) +{ + STORE *storage; + + if (!store->store) { + return; + } + if (jcr->wstorage) { + free_wstorage(jcr); + } + if (!jcr->wstorage) { + jcr->wstorage = New(alist(10, not_owned_by_alist)); + } + jcr->wstore = store->store; + if (!jcr->wstore_source) { + jcr->wstore_source = get_pool_memory(PM_MESSAGE); + } + pm_strcpy(jcr->wstore_source, store->store_source); + Dmsg2(50, "wstore=%s where=%s\n", jcr->wstore->name(), jcr->wstore_source); + foreach_alist(storage, jcr->wstorage) { + if (store->store == storage) { + return; + } + } + /* Store not in list, so add it */ + jcr->wstorage->prepend(store->store); +} + +void free_wstorage(JCR *jcr) +{ + if (jcr->wstorage) { + delete jcr->wstorage; + jcr->wstorage = NULL; + } + jcr->wstore = NULL; +} + +void create_clones(JCR *jcr) +{ + /* + * Fire off any clone jobs (run directives) + */ + Dmsg2(900, "cloned=%d run_cmds=%p\n", jcr->cloned, jcr->job->run_cmds); + if (!jcr->cloned && jcr->job->run_cmds) { + char *runcmd; + JOB *job = jcr->job; + POOLMEM *cmd = get_pool_memory(PM_FNAME); + UAContext *ua = new_ua_context(jcr); + ua->batch = true; + foreach_alist(runcmd, job->run_cmds) { + cmd = edit_job_codes(jcr, cmd, runcmd, "", job_code_callback_director); + Mmsg(ua->cmd, "run %s cloned=yes", cmd); + Dmsg1(900, "=============== Clone cmd=%s\n", ua->cmd); + parse_ua_args(ua); /* parse command */ + int stat = run_cmd(ua, ua->cmd); + if (stat == 0) { + Jmsg(jcr, M_ERROR, 0, _("Could not start clone job: \"%s\".\n"), + ua->cmd); + } else { + Jmsg(jcr, M_INFO, 0, _("Clone JobId %d started.\n"), stat); + } + } + free_ua_context(ua); + free_pool_memory(cmd); + } +} + +/* + * Given: a JobId and FileIndex + * this subroutine writes a bsr file to restore that job. + * Returns: -1 on error + * number of files if OK + */ +int create_restore_bootstrap_file(JCR *jcr, JobId_t jobid, int findex1, int findex2) +{ + RESTORE_CTX rx; + UAContext *ua; + int files; + + memset(&rx, 0, sizeof(rx)); + rx.JobIds = (char *)""; + + rx.bsr_list = create_bsr_list(jobid, findex1, findex2); + + ua = new_ua_context(jcr); + if (!complete_bsr(ua, rx.bsr_list)) { + files = -1; + goto bail_out; + } + + jcr->ExpectedFiles = write_bsr_file(ua, rx); + if (jcr->ExpectedFiles == 0) { + files = 0; + goto bail_out; + } + free_ua_context(ua); + free_bsr(rx.bsr_list); + jcr->needs_sd = true; + return jcr->ExpectedFiles; + +bail_out: + free_ua_context(ua); + free_bsr(rx.bsr_list); + return files; +} + +/* + * Given: a JobId in jcr->previous_jr.JobId, + * this subroutine writes a bsr file to restore that job. + * Returns: -1 on error + * number of files if OK + */ +int create_restore_bootstrap_file(JCR *jcr) +{ + return create_restore_bootstrap_file(jcr, jcr->previous_jr.JobId, 1, jcr->previous_jr.JobFiles); +} + +/* TODO: redirect command ouput to job log */ +bool run_console_command(JCR *jcr, const char *cmd) +{ + UAContext *ua; + bool ok; + JCR *ljcr = new_control_jcr("-RunScript-", JT_CONSOLE); + ua = new_ua_context(ljcr); + /* run from runscript and check if commands are authorized */ + ua->runscript = true; + Mmsg(ua->cmd, "%s", cmd); + Dmsg1(100, "Console command: %s\n", ua->cmd); + parse_ua_args(ua); + if (ua->argc > 0 && ua->argk[0][0] == '.') { + ok = do_a_dot_command(ua); + } else { + ok = do_a_command(ua); + } + close_db(ua); + free_ua_context(ua); + free_jcr(ljcr); + return ok; +} diff --git a/src/dird/jobq.c b/src/dird/jobq.c new file mode 100644 index 00000000..4ba28a5f --- /dev/null +++ b/src/dird/jobq.c @@ -0,0 +1,906 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula job queue routines. + * + * This code consists of three queues, the waiting_jobs + * queue, where jobs are initially queued, the ready_jobs + * queue, where jobs are placed when all the resources are + * allocated and they can immediately be run, and the + * running queue where jobs are placed when they are + * running. + * + * Kern Sibbald, July MMIII + * + * + * This code was adapted from the Bacula workq, which was + * adapted from "Programming with POSIX Threads", by + * David R. Butenhof + * + */ + +#include "bacula.h" +#include "dird.h" + +extern JCR *jobs; + +/* Forward referenced functions */ +extern "C" void *jobq_server(void *arg); +extern "C" void *sched_wait(void *arg); + +static int start_server(jobq_t *jq); +static bool acquire_resources(JCR *jcr); +static bool reschedule_job(JCR *jcr, jobq_t *jq, jobq_item_t *je); +static void dec_write_store(JCR *jcr); + +/* + * Initialize a job queue + * + * Returns: 0 on success + * errno on failure + */ +int jobq_init(jobq_t *jq, int threads, void *(*engine)(void *arg)) +{ + int stat; + jobq_item_t *item = NULL; + + if ((stat = pthread_attr_init(&jq->attr)) != 0) { + berrno be; + Jmsg1(NULL, M_ERROR, 0, _("pthread_attr_init: ERR=%s\n"), be.bstrerror(stat)); + return stat; + } + if ((stat = pthread_attr_setdetachstate(&jq->attr, PTHREAD_CREATE_DETACHED)) != 0) { + pthread_attr_destroy(&jq->attr); + return stat; + } + if ((stat = pthread_mutex_init(&jq->mutex, NULL)) != 0) { + berrno be; + Jmsg1(NULL, M_ERROR, 0, _("pthread_mutex_init: ERR=%s\n"), be.bstrerror(stat)); + pthread_attr_destroy(&jq->attr); + return stat; + } + if ((stat = pthread_cond_init(&jq->work, NULL)) != 0) { + berrno be; + Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_init: ERR=%s\n"), be.bstrerror(stat)); + pthread_mutex_destroy(&jq->mutex); + pthread_attr_destroy(&jq->attr); + return stat; + } + jq->quit = false; + jq->max_workers = threads; /* max threads to create */ + jq->num_workers = 0; /* no threads yet */ + jq->idle_workers = 0; /* no idle threads */ + jq->engine = engine; /* routine to run */ + jq->valid = JOBQ_VALID; + /* Initialize the job queues */ + jq->waiting_jobs = New(dlist(item, &item->link)); + jq->running_jobs = New(dlist(item, &item->link)); + jq->ready_jobs = New(dlist(item, &item->link)); + return 0; +} + +/* + * Destroy the job queue + * + * Returns: 0 on success + * errno on failure + */ +int jobq_destroy(jobq_t *jq) +{ + int stat, stat1, stat2; + + if (jq->valid != JOBQ_VALID) { + return EINVAL; + } + P(jq->mutex); + jq->valid = 0; /* prevent any more operations */ + + /* + * If any threads are active, wake them + */ + if (jq->num_workers > 0) { + jq->quit = true; + if (jq->idle_workers) { + if ((stat = pthread_cond_broadcast(&jq->work)) != 0) { + berrno be; + Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_broadcast: ERR=%s\n"), be.bstrerror(stat)); + V(jq->mutex); + return stat; + } + } + while (jq->num_workers > 0) { + if ((stat = pthread_cond_wait(&jq->work, &jq->mutex)) != 0) { + berrno be; + Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_wait: ERR=%s\n"), be.bstrerror(stat)); + V(jq->mutex); + return stat; + } + } + } + V(jq->mutex); + stat = pthread_mutex_destroy(&jq->mutex); + stat1 = pthread_cond_destroy(&jq->work); + stat2 = pthread_attr_destroy(&jq->attr); + delete jq->waiting_jobs; + delete jq->running_jobs; + delete jq->ready_jobs; + return (stat != 0 ? stat : (stat1 != 0 ? stat1 : stat2)); +} + +struct wait_pkt { + JCR *jcr; + jobq_t *jq; +}; + +/* + * Wait until schedule time arrives before starting. Normally + * this routine is only used for jobs started from the console + * for which the user explicitly specified a start time. Otherwise + * most jobs are put into the job queue only when their + * scheduled time arrives. + */ +extern "C" +void *sched_wait(void *arg) +{ + JCR *jcr = ((wait_pkt *)arg)->jcr; + jobq_t *jq = ((wait_pkt *)arg)->jq; + + set_jcr_in_tsd(INVALID_JCR); + Dmsg0(2300, "Enter sched_wait.\n"); + free(arg); + time_t wtime = jcr->sched_time - time(NULL); + jcr->setJobStatus(JS_WaitStartTime); + /* Wait until scheduled time arrives */ + if (wtime > 0) { + Jmsg(jcr, M_INFO, 0, _("Job %s waiting %d seconds for scheduled start time.\n"), + jcr->Job, wtime); + } + /* Check every 30 seconds if canceled */ + while (wtime > 0) { + Dmsg3(2300, "Waiting on sched time, jobid=%d secs=%d use=%d\n", + jcr->JobId, wtime, jcr->use_count()); + if (wtime > 30) { + wtime = 30; + } + bmicrosleep(wtime, 0); + if (job_canceled(jcr)) { + break; + } + wtime = jcr->sched_time - time(NULL); + } + Dmsg1(200, "resched use=%d\n", jcr->use_count()); + jobq_add(jq, jcr); + free_jcr(jcr); /* we are done with jcr */ + Dmsg0(2300, "Exit sched_wait\n"); + return NULL; +} + +/* Procedure to update the client->NumConcurrentJobs */ +static void update_client_numconcurrentjobs(JCR *jcr, int val) +{ + int num; + if (!jcr->client) { + return; + } + + switch (jcr->getJobType()) + { + case JT_MIGRATE: + case JT_COPY: + case JT_ADMIN: + break; + case JT_BACKUP: + /* Fall through wanted */ + default: + if (jcr->no_client_used() || jcr->wasVirtualFull) { + break; + } + num = jcr->client->getNumConcurrentJobs(); + jcr->client->setNumConcurrentJobs(num + val); + break; + } +} + +/* + * Add a job to the queue + * jq is a queue that was created with jobq_init + */ +int jobq_add(jobq_t *jq, JCR *jcr) +{ + int stat; + jobq_item_t *item, *li; + bool inserted = false; + time_t wtime = jcr->sched_time - time(NULL); + pthread_t id; + wait_pkt *sched_pkt; + + if (!jcr->term_wait_inited) { + /* Initialize termination condition variable */ + if ((stat = pthread_cond_init(&jcr->term_wait, NULL)) != 0) { + berrno be; + Jmsg1(jcr, M_FATAL, 0, _("Unable to init job cond variable: ERR=%s\n"), be.bstrerror(stat)); + return stat; + } + jcr->term_wait_inited = true; + } + + Dmsg3(2300, "jobq_add jobid=%d jcr=0x%x use_count=%d\n", jcr->JobId, jcr, jcr->use_count()); + if (jq->valid != JOBQ_VALID) { + Jmsg0(jcr, M_ERROR, 0, "Jobq_add queue not initialized.\n"); + return EINVAL; + } + + jcr->inc_use_count(); /* mark jcr in use by us */ + Dmsg3(2300, "jobq_add jobid=%d jcr=0x%x use_count=%d\n", jcr->JobId, jcr, jcr->use_count()); + if (!job_canceled(jcr) && wtime > 0) { + set_thread_concurrency(jq->max_workers + 2); + sched_pkt = (wait_pkt *)malloc(sizeof(wait_pkt)); + sched_pkt->jcr = jcr; + sched_pkt->jq = jq; + stat = pthread_create(&id, &jq->attr, sched_wait, (void *)sched_pkt); + if (stat != 0) { /* thread not created */ + berrno be; + Jmsg1(jcr, M_ERROR, 0, _("pthread_thread_create: ERR=%s\n"), be.bstrerror(stat)); + } + return stat; + } + + P(jq->mutex); + + if ((item = (jobq_item_t *)malloc(sizeof(jobq_item_t))) == NULL) { + free_jcr(jcr); /* release jcr */ + return ENOMEM; + } + item->jcr = jcr; + + /* While waiting in a queue this job is not attached to a thread */ + set_jcr_in_tsd(INVALID_JCR); + if (job_canceled(jcr)) { + /* Add job to ready queue so that it is canceled quickly */ + jq->ready_jobs->prepend(item); + Dmsg1(2300, "Prepended job=%d to ready queue\n", jcr->JobId); + } else { + /* Add this job to the wait queue in priority sorted order */ + foreach_dlist(li, jq->waiting_jobs) { + Dmsg2(2300, "waiting item jobid=%d priority=%d\n", + li->jcr->JobId, li->jcr->JobPriority); + if (li->jcr->JobPriority > jcr->JobPriority) { + jq->waiting_jobs->insert_before(item, li); + Dmsg2(2300, "insert_before jobid=%d before waiting job=%d\n", + li->jcr->JobId, jcr->JobId); + inserted = true; + break; + } + } + /* If not jobs in wait queue, append it */ + if (!inserted) { + jq->waiting_jobs->append(item); + Dmsg1(2300, "Appended item jobid=%d to waiting queue\n", jcr->JobId); + } + } + + /* Ensure that at least one server looks at the queue. */ + stat = start_server(jq); + + V(jq->mutex); + Dmsg0(2300, "Return jobq_add\n"); + return stat; +} + +/* + * Remove a job from the job queue. Used only by cancel_job(). + * jq is a queue that was created with jobq_init + * work_item is an element of work + * + * Note, it is "removed" from the job queue. + * If you want to cancel it, you need to provide some external means + * of doing so (e.g. pthread_kill()). + */ +int jobq_remove(jobq_t *jq, JCR *jcr) +{ + int stat; + bool found = false; + jobq_item_t *item; + + Dmsg2(2300, "jobq_remove jobid=%d jcr=0x%x\n", jcr->JobId, jcr); + if (jq->valid != JOBQ_VALID) { + return EINVAL; + } + + P(jq->mutex); + foreach_dlist(item, jq->waiting_jobs) { + if (jcr == item->jcr) { + found = true; + break; + } + } + if (!found) { + V(jq->mutex); + Dmsg2(2300, "jobq_remove jobid=%d jcr=0x%x not in wait queue\n", jcr->JobId, jcr); + return EINVAL; + } + + /* Move item to be the first on the list */ + jq->waiting_jobs->remove(item); + jq->ready_jobs->prepend(item); + Dmsg2(2300, "jobq_remove jobid=%d jcr=0x%x moved to ready queue\n", jcr->JobId, jcr); + + stat = start_server(jq); + + V(jq->mutex); + Dmsg0(2300, "Return jobq_remove\n"); + return stat; +} + + +/* + * Start the server thread if it isn't already running + */ +static int start_server(jobq_t *jq) +{ + int stat = 0; + pthread_t id; + + /* + * if any threads are idle, wake one. + * Actually we do a broadcast because on /lib/tls + * these signals seem to get lost from time to time. + */ + if (jq->idle_workers > 0) { + Dmsg0(2300, "Signal worker to wake up\n"); + if ((stat = pthread_cond_broadcast(&jq->work)) != 0) { + berrno be; + Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_signal: ERR=%s\n"), be.bstrerror(stat)); + return stat; + } + } else if (jq->num_workers < jq->max_workers) { + Dmsg0(2300, "Create worker thread\n"); + /* No idle threads so create a new one */ + set_thread_concurrency(jq->max_workers + 1); + jq->num_workers++; + if ((stat = pthread_create(&id, &jq->attr, jobq_server, (void *)jq)) != 0) { + berrno be; + jq->num_workers--; + Jmsg1(NULL, M_ERROR, 0, _("pthread_create: ERR=%s\n"), be.bstrerror(stat)); + return stat; + } + } + return stat; +} + + +/* + * This is the worker thread that serves the job queue. + * When all the resources are acquired for the job, + * it will call the user's engine. + */ +extern "C" +void *jobq_server(void *arg) +{ + struct timespec timeout; + jobq_t *jq = (jobq_t *)arg; + jobq_item_t *je; /* job entry in queue */ + int stat; + bool timedout = false; + bool work = true; + + set_jcr_in_tsd(INVALID_JCR); + Dmsg0(2300, "Start jobq_server\n"); + P(jq->mutex); + + for (;;) { + struct timeval tv; + struct timezone tz; + + Dmsg0(2300, "Top of for loop\n"); + if (!work && !jq->quit) { + gettimeofday(&tv, &tz); + timeout.tv_nsec = 0; + timeout.tv_sec = tv.tv_sec + 4; + + while (!jq->quit) { + /* + * Wait 4 seconds, then if no more work, exit + */ + Dmsg0(2300, "pthread_cond_timedwait()\n"); + stat = pthread_cond_timedwait(&jq->work, &jq->mutex, &timeout); + if (stat == ETIMEDOUT) { + Dmsg0(2300, "timedwait timedout.\n"); + timedout = true; + break; + } else if (stat != 0) { + /* This shouldn't happen */ + Dmsg0(2300, "This shouldn't happen\n"); + jq->num_workers--; + V(jq->mutex); + return NULL; + } + break; + } + } + /* + * If anything is in the ready queue, run it + */ + Dmsg0(2300, "Checking ready queue.\n"); + while (!jq->ready_jobs->empty() && !jq->quit) { + JCR *jcr; + je = (jobq_item_t *)jq->ready_jobs->first(); + jcr = je->jcr; + jq->ready_jobs->remove(je); + if (!jq->ready_jobs->empty()) { + Dmsg0(2300, "ready queue not empty start server\n"); + if (start_server(jq) != 0) { + jq->num_workers--; + V(jq->mutex); + return NULL; + } + } + jq->running_jobs->append(je); + + /* Attach jcr to this thread while we run the job */ + jcr->my_thread_id = pthread_self(); + jcr->set_killable(true); + set_jcr_in_tsd(jcr); + Dmsg1(2300, "Took jobid=%d from ready and appended to run\n", jcr->JobId); + + /* Release job queue lock */ + V(jq->mutex); + + /* Call user's routine here */ + Dmsg3(2300, "Calling user engine for jobid=%d use=%d stat=%c\n", jcr->JobId, + jcr->use_count(), jcr->JobStatus); + jq->engine(je->jcr); + + /* Job finished detach from thread */ + remove_jcr_from_tsd(je->jcr); + je->jcr->set_killable(false); + + Dmsg2(2300, "Back from user engine jobid=%d use=%d.\n", jcr->JobId, + jcr->use_count()); + + /* Reacquire job queue lock */ + P(jq->mutex); + Dmsg0(200, "Done lock mutex after running job. Release locks.\n"); + jq->running_jobs->remove(je); + /* + * Release locks if acquired. Note, they will not have + * been acquired for jobs canceled before they were + * put into the ready queue. + */ + if (jcr->acquired_resource_locks) { + int num; + dec_read_store(jcr); + dec_write_store(jcr); + update_client_numconcurrentjobs(jcr, -1); + num = jcr->job->getNumConcurrentJobs() - 1; + jcr->job->setNumConcurrentJobs(num); + jcr->acquired_resource_locks = false; + } + + if (reschedule_job(jcr, jq, je)) { + continue; /* go look for more work */ + } + + /* Clean up and release old jcr */ + Dmsg2(2300, "====== Termination job=%d use_cnt=%d\n", jcr->JobId, jcr->use_count()); + jcr->SDJobStatus = 0; + V(jq->mutex); /* release internal lock */ + free_jcr(jcr); + free(je); /* release job entry */ + P(jq->mutex); /* reacquire job queue lock */ + } + /* + * If any job in the wait queue can be run, + * move it to the ready queue + */ + Dmsg0(2300, "Done check ready, now check wait queue.\n"); + if (!jq->waiting_jobs->empty() && !jq->quit) { + int Priority; + bool running_allow_mix = false; + je = (jobq_item_t *)jq->waiting_jobs->first(); + jobq_item_t *re = (jobq_item_t *)jq->running_jobs->first(); + if (re) { + Priority = re->jcr->JobPriority; + Dmsg2(2300, "JobId %d is running. Look for pri=%d\n", + re->jcr->JobId, Priority); + running_allow_mix = true; + for ( ; re; ) { + Dmsg2(2300, "JobId %d is also running with %s\n", + re->jcr->JobId, + re->jcr->job->allow_mixed_priority ? "mix" : "no mix"); + if (!re->jcr->job->allow_mixed_priority) { + running_allow_mix = false; + break; + } + re = (jobq_item_t *)jq->running_jobs->next(re); + } + Dmsg1(2300, "The running job(s) %s mixing priorities.\n", + running_allow_mix ? "allow" : "don't allow"); + } else { + Priority = je->jcr->JobPriority; + Dmsg1(2300, "No job running. Look for Job pri=%d\n", Priority); + } + /* + * Walk down the list of waiting jobs and attempt + * to acquire the resources it needs. + */ + for ( ; je; ) { + /* je is current job item on the queue, jn is the next one */ + JCR *jcr = je->jcr; + jobq_item_t *jn = (jobq_item_t *)jq->waiting_jobs->next(je); + + Dmsg4(2300, "Examining Job=%d JobPri=%d want Pri=%d (%s)\n", + jcr->JobId, jcr->JobPriority, Priority, + jcr->job->allow_mixed_priority ? "mix" : "no mix"); + + /* Take only jobs of correct Priority */ + if (!(jcr->JobPriority == Priority + || (jcr->JobPriority < Priority && + jcr->job->allow_mixed_priority && running_allow_mix))) { + jcr->setJobStatus(JS_WaitPriority); + break; + } + + if (!acquire_resources(jcr)) { + /* If resource conflict, job is canceled */ + if (!job_canceled(jcr)) { + je = jn; /* point to next waiting job */ + continue; + } + } + + /* + * Got all locks, now remove it from wait queue and append it + * to the ready queue. Note, we may also get here if the + * job was canceled. Once it is "run", it will quickly + * terminate. + */ + jq->waiting_jobs->remove(je); + jq->ready_jobs->append(je); + Dmsg1(2300, "moved JobId=%d from wait to ready queue\n", je->jcr->JobId); + je = jn; /* Point to next waiting job */ + } /* end for loop */ + + } /* end if */ + + Dmsg0(2300, "Done checking wait queue.\n"); + /* + * If no more ready work and we are asked to quit, then do it + */ + if (jq->ready_jobs->empty() && jq->quit) { + jq->num_workers--; + if (jq->num_workers == 0) { + Dmsg0(2300, "Wake up destroy routine\n"); + /* Wake up destroy routine if he is waiting */ + pthread_cond_broadcast(&jq->work); + } + break; + } + Dmsg0(2300, "Check for work request\n"); + /* + * If no more work requests, and we waited long enough, quit + */ + Dmsg2(2300, "timedout=%d read empty=%d\n", timedout, + jq->ready_jobs->empty()); + if (jq->ready_jobs->empty() && timedout) { + Dmsg0(2300, "break big loop\n"); + jq->num_workers--; + break; + } + + work = !jq->ready_jobs->empty() || !jq->waiting_jobs->empty(); + if (work) { + /* + * If a job is waiting on a Resource, don't consume all + * the CPU time looping looking for work, and even more + * important, release the lock so that a job that has + * terminated can give us the resource. + */ + V(jq->mutex); + bmicrosleep(2, 0); /* pause for 2 seconds */ + P(jq->mutex); + /* Recompute work as something may have changed in last 2 secs */ + work = !jq->ready_jobs->empty() || !jq->waiting_jobs->empty(); + } + Dmsg1(2300, "Loop again. work=%d\n", work); + } /* end of big for loop */ + + Dmsg0(200, "unlock mutex\n"); + V(jq->mutex); + Dmsg0(2300, "End jobq_server\n"); + return NULL; +} + +/* + * Returns true if cleanup done and we should look for more work + */ +static bool reschedule_job(JCR *jcr, jobq_t *jq, jobq_item_t *je) +{ + bool resched = false; + /* + * Reschedule the job if requested and possible + */ + /* Basic condition is that more reschedule times remain */ + if (jcr->job->RescheduleTimes == 0 || + jcr->reschedule_count < jcr->job->RescheduleTimes) { + + /* Check for incomplete jobs */ + if (jcr->is_incomplete()) { + resched = (jcr->RescheduleIncompleteJobs && jcr->is_JobType(JT_BACKUP) && + !(jcr->HasBase||jcr->is_JobLevel(L_BASE))); + } else { + /* Check for failed jobs */ + resched = (jcr->job->RescheduleOnError && + !jcr->is_JobStatus(JS_Terminated) && + !jcr->is_JobStatus(JS_Canceled) && + jcr->is_JobType(JT_BACKUP)); + } + } + if (resched) { + char dt[50], dt2[50]; + + /* + * Reschedule this job by cleaning it up, but + * reuse the same JobId if possible. + */ + jcr->rerunning = jcr->is_incomplete(); /* save incomplete status */ + time_t now = time(NULL); + jcr->reschedule_count++; + jcr->sched_time = now + jcr->job->RescheduleInterval; + bstrftime(dt, sizeof(dt), now); + bstrftime(dt2, sizeof(dt2), jcr->sched_time); + Dmsg4(2300, "Rescheduled Job %s to re-run in %d seconds.(now=%u,then=%u)\n", jcr->Job, + (int)jcr->job->RescheduleInterval, now, jcr->sched_time); + Jmsg(jcr, M_INFO, 0, _("Rescheduled Job %s at %s to re-run in %d seconds (%s).\n"), + jcr->Job, dt, (int)jcr->job->RescheduleInterval, dt2); + dird_free_jcr_pointers(jcr); /* partial cleanup old stuff */ + jcr->JobStatus = -1; + jcr->setJobStatus(JS_WaitStartTime); + jcr->SDJobStatus = 0; + jcr->JobErrors = 0; + if (!allow_duplicate_job(jcr)) { + return false; + } + /* Only jobs with no output or Incomplete jobs can run on same JCR */ + if (jcr->JobBytes == 0 || jcr->rerunning) { + Dmsg2(2300, "Requeue job=%d use=%d\n", jcr->JobId, jcr->use_count()); + V(jq->mutex); + /* + * Special test here since a Virtual Full gets marked + * as a Full, so we look at the resource record + */ + if (jcr->wasVirtualFull) { + jcr->setJobLevel(L_VIRTUAL_FULL); + } + /* + * When we are using the same jcr then make sure to reset + * RealEndTime back to zero. + */ + jcr->jr.RealEndTime = 0; + jobq_add(jq, jcr); /* queue the job to run again */ + P(jq->mutex); + free_jcr(jcr); /* release jcr */ + free(je); /* free the job entry */ + return true; /* we already cleaned up */ + } + /* + * Something was actually backed up, so we cannot reuse + * the old JobId or there will be database record + * conflicts. We now create a new job, copying the + * appropriate fields. + */ + JCR *njcr = new_jcr(sizeof(JCR), dird_free_jcr); + set_jcr_defaults(njcr, jcr->job); + /* + * Eliminate the new job_end_push, then copy the one from + * the old job, and set the old one to be empty. + */ + void *v; + lock_jobs(); /* protect ourself from reload_config() */ + LockRes(); + foreach_alist(v, (&jcr->job_end_push)) { + njcr->job_end_push.append(v); + } + jcr->job_end_push.destroy(); + jcr->job_end_push.init(1, false); + UnlockRes(); + unlock_jobs(); + + njcr->reschedule_count = jcr->reschedule_count; + njcr->sched_time = jcr->sched_time; + njcr->initial_sched_time = jcr->initial_sched_time; + /* + * Special test here since a Virtual Full gets marked + * as a Full, so we look at the resource record + */ + if (jcr->wasVirtualFull) { + njcr->setJobLevel(L_VIRTUAL_FULL); + } else { + njcr->setJobLevel(jcr->getJobLevel()); + } + njcr->pool = jcr->pool; + njcr->run_pool_override = jcr->run_pool_override; + njcr->next_pool = jcr->next_pool; + njcr->run_next_pool_override = jcr->run_next_pool_override; + njcr->full_pool = jcr->full_pool; + njcr->vfull_pool = jcr->vfull_pool; + njcr->run_full_pool_override = jcr->run_full_pool_override; + njcr->run_vfull_pool_override = jcr->run_vfull_pool_override; + njcr->inc_pool = jcr->inc_pool; + njcr->run_inc_pool_override = jcr->run_inc_pool_override; + njcr->diff_pool = jcr->diff_pool; + njcr->JobStatus = -1; + njcr->setJobStatus(jcr->JobStatus); + if (jcr->rstore) { + copy_rstorage(njcr, jcr->rstorage, _("previous Job")); + } else { + free_rstorage(njcr); + } + if (jcr->wstore) { + copy_wstorage(njcr, jcr->wstorage, _("previous Job")); + } else { + free_wstorage(njcr); + } + njcr->messages = jcr->messages; + njcr->spool_data = jcr->spool_data; + njcr->write_part_after_job = jcr->write_part_after_job; + Dmsg0(2300, "Call to run new job\n"); + V(jq->mutex); + run_job(njcr); /* This creates a "new" job */ + free_jcr(njcr); /* release "new" jcr */ + P(jq->mutex); + Dmsg0(2300, "Back from running new job.\n"); + } + return false; +} + +/* + * See if we can acquire all the necessary resources for the job (JCR) + * + * Returns: true if successful + * false if resource failure + */ +static bool acquire_resources(JCR *jcr) +{ + bool skip_this_jcr = false; + + jcr->acquired_resource_locks = false; +/* + * Turning this code off is likely to cause some deadlocks, + * but we do not really have enough information here to + * know if this is really a deadlock (it may be a dual drive + * autochanger), and in principle, the SD reservation system + * should detect these deadlocks, so push the work off on it. + */ +#ifdef xxx + if (jcr->rstore && jcr->rstore == jcr->wstore) { /* possible deadlock */ + Jmsg(jcr, M_FATAL, 0, _("Job canceled. Attempt to read and write same device.\n" + " Read storage \"%s\" (From %s) -- Write storage \"%s\" (From %s)\n"), + jcr->rstore->name(), jcr->rstore_source, jcr->wstore->name(), jcr->wstore_source); + jcr->setJobStatus(JS_Canceled); + return false; + } +#endif + if (jcr->rstore) { + Dmsg1(200, "Rstore=%s\n", jcr->rstore->name()); + if (!inc_read_store(jcr)) { + Dmsg1(200, "Fail rncj=%d\n", jcr->rstore->getNumConcurrentJobs()); + jcr->setJobStatus(JS_WaitStoreRes); + return false; + } + } + + if (jcr->wstore) { + Dmsg1(200, "Wstore=%s\n", jcr->wstore->name()); + int num = jcr->wstore->getNumConcurrentJobs(); + if (num < jcr->wstore->MaxConcurrentJobs) { + Dmsg1(200, "Inc wncj=%d\n", num + 1); + jcr->wstore->setNumConcurrentJobs(num + 1); + } else if (jcr->rstore) { + dec_read_store(jcr); + skip_this_jcr = true; + } else { + Dmsg1(200, "Fail wncj=%d\n", num); + skip_this_jcr = true; + } + } + if (skip_this_jcr) { + jcr->setJobStatus(JS_WaitStoreRes); + return false; + } + + if (jcr->client) { + if (jcr->client->getNumConcurrentJobs() < jcr->client->MaxConcurrentJobs) { + update_client_numconcurrentjobs(jcr, 1); + } else { + /* Back out previous locks */ + dec_write_store(jcr); + dec_read_store(jcr); + jcr->setJobStatus(JS_WaitClientRes); + return false; + } + } + if (jcr->job->getNumConcurrentJobs() < jcr->job->MaxConcurrentJobs) { + int num; + num = jcr->job->getNumConcurrentJobs() + 1; + jcr->job->setNumConcurrentJobs(num); + } else { + /* Back out previous locks */ + dec_write_store(jcr); + dec_read_store(jcr); + update_client_numconcurrentjobs(jcr, -1); + jcr->setJobStatus(JS_WaitJobRes); + return false; + } + + jcr->acquired_resource_locks = true; + return true; +} + +static pthread_mutex_t rstore_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * Note: inc_read_store() and dec_read_store() are + * called from select_rstore() in src/dird/restore.c + */ +bool inc_read_store(JCR *jcr) +{ + P(rstore_mutex); + int num = jcr->rstore->getNumConcurrentJobs(); + int numread = jcr->rstore->getNumConcurrentReadJobs(); + int maxread = jcr->rstore->MaxConcurrentReadJobs; + if (num < jcr->rstore->MaxConcurrentJobs && + (jcr->getJobType() == JT_RESTORE || + numread == 0 || + maxread == 0 || /* No limit set */ + numread < maxread)) /* Below the limit */ + { + num++; + numread++; + jcr->rstore->setNumConcurrentReadJobs(numread); + jcr->rstore->setNumConcurrentJobs(num); + Dmsg1(200, "Inc rncj=%d\n", num); + V(rstore_mutex); + return true; + } + V(rstore_mutex); + return false; +} + +void dec_read_store(JCR *jcr) +{ + if (jcr->rstore) { + P(rstore_mutex); + int numread = jcr->rstore->getNumConcurrentReadJobs() - 1; + int num = jcr->rstore->getNumConcurrentJobs() - 1; + jcr->rstore->setNumConcurrentReadJobs(numread); + jcr->rstore->setNumConcurrentJobs(num); + Dmsg1(200, "Dec rncj=%d\n", num); + V(rstore_mutex); + } +} + +static void dec_write_store(JCR *jcr) +{ + if (jcr->wstore) { + int num = jcr->wstore->getNumConcurrentJobs() - 1; + Dmsg1(200, "Dec wncj=%d\n", num); + jcr->wstore->setNumConcurrentJobs(num); + } +} diff --git a/src/dird/jobq.h b/src/dird/jobq.h new file mode 100644 index 00000000..ec328669 --- /dev/null +++ b/src/dird/jobq.h @@ -0,0 +1,69 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula job queue routines. + * + * Kern Sibbald, July MMIII + * + * This code adapted from Bacula work queue code, which was + * adapted from "Programming with POSIX Threads", by + * David R. Butenhof + */ + +#ifndef __JOBQ_H +#define __JOBQ_H 1 + +/* + * Structure to keep track of job queue request + */ +struct jobq_item_t { + dlink link; + JCR *jcr; +}; + +/* + * Structure describing a work queue + */ +struct jobq_t { + pthread_mutex_t mutex; /* queue access control */ + pthread_cond_t work; /* wait for work */ + pthread_attr_t attr; /* create detached threads */ + dlist *waiting_jobs; /* list of jobs waiting */ + dlist *running_jobs; /* jobs running */ + dlist *ready_jobs; /* jobs ready to run */ + int valid; /* queue initialized */ + bool quit; /* jobq should quit */ + int max_workers; /* max threads */ + int num_workers; /* current threads */ + int idle_workers; /* idle threads */ + void *(*engine)(void *arg); /* user engine */ +}; + +#define JOBQ_VALID 0xdec1993 + +extern int jobq_init( + jobq_t *wq, + int threads, /* maximum threads */ + void *(*engine)(void *) /* engine routine */ + ); +extern int jobq_destroy(jobq_t *wq); +extern int jobq_add(jobq_t *wq, JCR *jcr); +extern int jobq_remove(jobq_t *wq, JCR *jcr); + +#endif /* __JOBQ_H */ diff --git a/src/dird/mac.c b/src/dird/mac.c new file mode 100644 index 00000000..b54dcab6 --- /dev/null +++ b/src/dird/mac.c @@ -0,0 +1,943 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- mac.c -- responsible for doing + * migration and copy jobs. + * + * Also handles Copy jobs (March MMVIII) + * + * Written by Kern Sibbald, September MMIV + * + * Basic tasks done here: + * Open DB and create records for this job. + * Open Message Channel with Storage daemon to tell him a job will be starting. + * Open connection with Storage daemon and pass him commands + * to do the backup. + * When the Storage daemon finishes the job, update the DB. + */ + +#include "bacula.h" +#include "dird.h" +#include "ua.h" + +static const int dbglevel = 10; +static char storaddr[] = "storage address=%s port=%d ssl=%d Job=%s Authentication=%s\n"; +static char OKstore[] = "2000 OK storage\n"; + +/* Imported subroutines */ +extern int getJob_to_migrate(JCR *jcr); +extern bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1, + const char *query2, const char *type); +extern bool find_mediaid_then_jobids(JCR *jcr, idpkt *ids, const char *query1, + const char *type); +extern bool find_jobids_of_pool_uncopied_jobs(JCR *jcr, idpkt *ids); + +static bool set_mac_next_pool(JCR *jcr, POOL **pool); + +/* + * Called here before the job is run to do the job + * specific setup. Note, one of the important things to + * complete in this init code is to make the definitive + * choice of input and output storage devices. This is + * because immediately after the init, the job is queued + * in the jobq.c code, and it checks that all the resources + * (storage resources in particular) are available, so these + * must all be properly defined. + * + * previous_jr refers to the job DB record of the Job that is + * going to be migrated. + * prev_job refers to the job resource of the Job that is + * going to be migrated. + * jcr is the jcr for the current "migration" job. It is a + * control job that is put in the DB as a migration job, which + * means that this job migrated a previous job to a new job. + * No Volume or File data is associated with this control + * job. + * wjcr refers to the migrate/copy job that is writing and is run by + * the current jcr. It is a backup job that writes the + * data written for the previous_jr into the new pool. This + * job (wjcr) becomes the new backup job that replaces + * the original backup job. Note, this jcr is not really run. It + * is simply attached to the current jcr. It will show up in + * the Director's status output, but not in the SD or FD, both of + * which deal only with the current migration job (i.e. jcr). + */ +bool do_mac_init(JCR *jcr) +{ + POOL *pool = NULL; + JOB *job, *prev_job; + JCR *wjcr; /* jcr of writing job */ + int count; + + + apply_pool_overrides(jcr); + + if (!allow_duplicate_job(jcr)) { + return false; + } + + jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->name()); + if (jcr->jr.PoolId == 0) { + Dmsg1(dbglevel, "JobId=%d no PoolId\n", (int)jcr->JobId); + Jmsg(jcr, M_FATAL, 0, _("Could not get or create a Pool record.\n")); + return false; + } + /* + * Note, at this point, pool is the pool for this job. We + * transfer it to rpool (read pool), and a bit later, + * pool will be changed to point to the write pool, + * which comes from pool->NextPool. + */ + jcr->rpool = jcr->pool; /* save read pool */ + pm_strcpy(jcr->rpool_source, jcr->pool_source); + Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->rpool->name(), jcr->rpool_source); + + if (!get_or_create_fileset_record(jcr)) { + Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId); + Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n")); + return false; + } + + /* If we find a job or jobs to migrate it is previous_jr.JobId */ + count = getJob_to_migrate(jcr); + if (count < 0) { + return false; + } + if (count == 0) { + set_mac_next_pool(jcr, &pool); + return true; /* no work */ + } + + Dmsg1(dbglevel, "Back from getJob_to_migrate JobId=%d\n", (int)jcr->JobId); + + if (jcr->previous_jr.JobId == 0) { + Dmsg1(dbglevel, "JobId=%d no previous JobId\n", (int)jcr->JobId); + Jmsg(jcr, M_INFO, 0, _("No previous Job found to %s.\n"), jcr->get_ActionName(0)); + set_mac_next_pool(jcr, &pool); + return true; /* no work */ + } + + if (create_restore_bootstrap_file(jcr) < 0) { + Jmsg(jcr, M_FATAL, 0, _("Create bootstrap file failed.\n")); + return false; + } + + if (jcr->previous_jr.JobId == 0 || jcr->ExpectedFiles == 0) { + jcr->setJobStatus(JS_Terminated); + Dmsg1(dbglevel, "JobId=%d expected files == 0\n", (int)jcr->JobId); + if (jcr->previous_jr.JobId == 0) { + Jmsg(jcr, M_INFO, 0, _("No previous Job found to %s.\n"), jcr->get_ActionName(0)); + } else { + Jmsg(jcr, M_INFO, 0, _("Previous Job has no data to %s.\n"), jcr->get_ActionName(0)); + } + set_mac_next_pool(jcr, &pool); + return true; /* no work */ + } + + + Dmsg5(dbglevel, "JobId=%d: Current: Name=%s JobId=%d Type=%c Level=%c\n", + (int)jcr->JobId, + jcr->jr.Name, (int)jcr->jr.JobId, + jcr->jr.JobType, jcr->jr.JobLevel); + + LockRes(); + job = (JOB *)GetResWithName(R_JOB, jcr->jr.Name); + prev_job = (JOB *)GetResWithName(R_JOB, jcr->previous_jr.Name); + UnlockRes(); + if (!job) { + Jmsg(jcr, M_FATAL, 0, _("Job resource not found for \"%s\".\n"), jcr->jr.Name); + return false; + } + if (!prev_job) { + Jmsg(jcr, M_FATAL, 0, _("Previous Job resource not found for \"%s\".\n"), + jcr->previous_jr.Name); + return false; + } + + + /* Create a write jcr */ + wjcr = jcr->wjcr = new_jcr(sizeof(JCR), dird_free_jcr); + memcpy(&wjcr->previous_jr, &jcr->previous_jr, sizeof(wjcr->previous_jr)); + + /* + * Turn the wjcr into a "real" job that takes on the aspects of + * the previous backup job "prev_job". + */ + set_jcr_defaults(wjcr, prev_job); + /* fix MA 987 cannot copy/migrate jobs with a Level=VF in the job resource + * If the prev_job level definition is VirtualFull, + * change it to Incremental, otherwise the writing SD would do a VF + */ + if (wjcr->getJobLevel() == L_VIRTUAL_FULL) { + wjcr->setJobLevel(L_INCREMENTAL); + } + + /* Don't check for duplicates on this jobs. We do it before setup_job(), + * because we check allow_duplicate_job() here. + */ + wjcr->IgnoreDuplicateJobChecking = true; + + if (!setup_job(wjcr)) { + Jmsg(jcr, M_FATAL, 0, _("setup job failed.\n")); + return false; + } + + /* Now reset the job record from the previous job */ + memcpy(&wjcr->jr, &jcr->previous_jr, sizeof(wjcr->jr)); + /* Update the jr to reflect the new values of PoolId and JobId. */ + wjcr->jr.PoolId = jcr->jr.PoolId; + wjcr->jr.JobId = wjcr->JobId; + wjcr->sd_client = true; + //wjcr->setJobType(jcr->getJobType()); + wjcr->setJobLevel(jcr->getJobLevel()); + wjcr->spool_data = job->spool_data; /* turn on spooling if requested in job */ + wjcr->spool_size = jcr->spool_size; + jcr->spool_size = 0; + + /* Don't let WatchDog checks Max*Time value on this Job */ + wjcr->no_maxtime = true; + Dmsg4(dbglevel, "wjcr: Name=%s JobId=%d Type=%c Level=%c\n", + wjcr->jr.Name, (int)wjcr->jr.JobId, + wjcr->jr.JobType, wjcr->jr.JobLevel); + + if (set_mac_next_pool(jcr, &pool)) { + /* If pool storage specified, use it for restore */ + copy_rstorage(wjcr, pool->storage, _("Pool resource")); + copy_rstorage(jcr, pool->storage, _("Pool resource")); + + wjcr->pool = jcr->pool; + wjcr->next_pool = jcr->next_pool; + wjcr->jr.PoolId = jcr->jr.PoolId; + } + + return true; +} + +/* + * set_mac_next_pool() called by do_mac_init() + * at differents stages. + * The idea here is to make a common subroutine for the + * NextPool's search code and to permit do_mac_init() + * to return with NextPool set in jcr struct. + */ +static bool set_mac_next_pool(JCR *jcr, POOL **retpool) +{ + POOL_DBR pr; + POOL *pool; + char ed1[100]; + + /* + * Get the PoolId used with the original job. Then + * find the pool name from the database record. + */ + bmemset(&pr, 0, sizeof(pr)); + pr.PoolId = jcr->jr.PoolId; + if (!db_get_pool_record(jcr, jcr->db, &pr)) { + Jmsg(jcr, M_FATAL, 0, _("Pool for JobId %s not in database. ERR=%s\n"), + edit_int64(pr.PoolId, ed1), db_strerror(jcr->db)); + return false; + } + /* Get the pool resource corresponding to the original job */ + pool = (POOL *)GetResWithName(R_POOL, pr.Name); + *retpool = pool; + if (!pool) { + Jmsg(jcr, M_FATAL, 0, _("Pool resource \"%s\" not found.\n"), pr.Name); + return false; + } + + if (!apply_wstorage_overrides(jcr, pool)) { + return false; + } + + Dmsg2(dbglevel, "Write pool=%s read rpool=%s\n", jcr->pool->name(), jcr->rpool->name()); + + return true; +} + +/* + * Send storage address and authentication to deblock the other + * job. + */ +static bool send_store_addr_to_sd(JCR *jcr, char *Job, char *sd_auth_key, + STORE *store, char *store_address, uint32_t store_port) +{ + int tls_need = BNET_TLS_NONE; + + /* TLS Requirement */ + if (store->tls_enable) { + if (store->tls_require) { + tls_need = BNET_TLS_REQUIRED; + } else { + tls_need = BNET_TLS_OK; + } + } + + /* + * Send Storage address to the SD client + */ + Dmsg2(200, "=== Job=%s sd auth key=%s\n", Job, sd_auth_key); + jcr->store_bsock->fsend(storaddr, store_address, store_port, + tls_need, Job, sd_auth_key); + if (!response(jcr, jcr->store_bsock, OKstore, "Storage", DISPLAY_ERROR)) { + Dmsg4(050, "Response fail for: JobId=%d storeaddr=%s:%d Job=%s\n", + jcr->JobId, store_address, store_port, Job); + Jmsg3(jcr, M_FATAL, 0, "Response failure: storeddr=%s:%d Job=%s\n", + store_address, store_port, Job); + + return false; + } + return true; +} + +/* + * Do a Migration and Copy of a previous job + * + * Returns: false on failure + * true on success + */ +bool do_mac(JCR *jcr) +{ + char ed1[100]; + BSOCK *sd, *wsd; + JCR *wjcr = jcr->wjcr; /* newly migrated job */ + bool ok = false; + STORE *store; + char *store_address; + uint32_t store_port; + + /* + * If wjcr is NULL, there is nothing to do for this job, + * so set a normal status, cleanup and return OK. + */ + if (!wjcr) { + jcr->setJobStatus(JS_Terminated); + mac_cleanup(jcr, JS_Terminated, JS_Terminated); + return true; + } + + if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) { + Jmsg(jcr, M_FATAL, 0, _("Could not get job record for JobId %s to %s. ERR=%s"), + edit_int64(jcr->previous_jr.JobId, ed1), + jcr->get_ActionName(0), + db_strerror(jcr->db)); + jcr->setJobStatus(JS_Terminated); + mac_cleanup(jcr, JS_Terminated, JS_Terminated); + return true; + } + /* Make sure this job was not already migrated */ + if (jcr->previous_jr.JobType != JT_BACKUP && + jcr->previous_jr.JobType != JT_JOB_COPY) { + Jmsg(jcr, M_INFO, 0, _("JobId %s already %s probably by another Job. %s stopped.\n"), + edit_int64(jcr->previous_jr.JobId, ed1), + jcr->get_ActionName(1), + jcr->get_OperationName()); + jcr->setJobStatus(JS_Terminated); + mac_cleanup(jcr, JS_Terminated, JS_Terminated); + return true; + } + + /* Print Job Start message */ + Jmsg(jcr, M_INFO, 0, _("Start %s JobId %s, Job=%s\n"), + jcr->get_OperationName(), edit_uint64(jcr->JobId, ed1), jcr->Job); + + Dmsg3(200, "Start %s JobId %s, Job=%s\n", + jcr->get_OperationName(), edit_uint64(jcr->JobId, ed1), jcr->Job); + + + /* + * Now separate the read and write storages. jcr has no wstor... + * they all go into wjcr. + */ + free_rwstorage(wjcr); + wjcr->rstore = NULL; + wjcr->wstore = jcr->wstore; + jcr->wstore = NULL; + wjcr->wstorage = jcr->wstorage; + jcr->wstorage = NULL; + + /* TODO: See priority with bandwidth parameter */ + if (jcr->job->max_bandwidth > 0) { + jcr->max_bandwidth = jcr->job->max_bandwidth; + } else if (jcr->client && jcr->client->max_bandwidth > 0) { + jcr->max_bandwidth = jcr->client->max_bandwidth; + } + + if (jcr->max_bandwidth > 0) { + send_bwlimit(jcr, jcr->Job); /* Old clients don't have this command */ + } + + /* + * Open a message channel connection with the Storage + * daemon. This is to let him know that our client + * will be contacting him for a backup session. + * + */ + jcr->setJobStatus(JS_WaitSD); + wjcr->setJobStatus(JS_WaitSD); + + /* + * Start conversation with write Storage daemon + */ + Dmsg0(200, "Connect to write (wjcr) storage daemon.\n"); + if (!connect_to_storage_daemon(wjcr, 10, SDConnectTimeout, 1)) { + goto bail_out; + } + wsd = wjcr->store_bsock; + + /* + * Start conversation with read Storage daemon + */ + Dmsg1(200, "Connect to read (jcr) storage daemon. Jid=%d\n", jcr->JobId); + if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) { + goto bail_out; + } + sd = jcr->store_bsock; + if (jcr->client) { + jcr->sd_calls_client = jcr->client->sd_calls_client; + } + + Dmsg2(dbglevel, "Read store=%s, write store=%s\n", + ((STORE *)jcr->rstorage->first())->name(), + ((STORE *)wjcr->wstorage->first())->name()); + + /* + * Now start a job with the read Storage daemon sending the bsr. + * This call returns the sd_auth_key + */ + Dmsg1(200, "Start job with read (jcr) storage daemon. Jid=%d\n", jcr->JobId); + if (!start_storage_daemon_job(jcr, jcr->rstorage, NULL, /*send_bsr*/true)) { + goto bail_out; + } + Dmsg0(150, "Read storage daemon connection OK\n"); + + if (jcr->sd_calls_client) { + wjcr->sd_calls_client = true; + wjcr->sd_client = false; + } else { + wjcr->sd_calls_client = true; + wjcr->sd_client = true; + } + + /* + * Now start a job with the write Storage daemon sending. + */ + Dmsg1(200, "Start Job with write (wjcr) storage daemon. Jid=%d\n", jcr->JobId); + if (!start_storage_daemon_job(wjcr, NULL, wjcr->wstorage, /*no_send_bsr*/false)) { + goto bail_out; + } + Dmsg0(150, "Write storage daemon connection OK\n"); + + + /* Declare the job started to start the MaxRunTime check */ + jcr->setJobStarted(); + + /* + * We re-update the job start record so that the start + * time is set after the run before job. This avoids + * that any files created by the run before job will + * be saved twice. They will be backed up in the current + * job, but not in the next one unless they are changed. + * Without this, they will be backed up in this job and + * in the next job run because in that case, their date + * is after the start of this run. + */ + jcr->start_time = time(NULL); + jcr->jr.StartTime = jcr->start_time; + jcr->jr.JobTDate = jcr->start_time; + jcr->setJobStatus(JS_Running); + + /* Update job start record for this mac control job */ + if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + goto bail_out; + } + + /* Declare the job started to start the MaxRunTime check */ + jcr->setJobStarted(); + + wjcr->start_time = time(NULL); + wjcr->jr.StartTime = wjcr->start_time; + wjcr->jr.JobTDate = wjcr->start_time; + wjcr->setJobStatus(JS_Running); + + + /* Update job start record for the real mac backup job */ + if (!db_update_job_start_record(wjcr, wjcr->db, &wjcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(wjcr->db)); + goto bail_out; + } + + Dmsg4(dbglevel, "wjcr: Name=%s JobId=%d Type=%c Level=%c\n", + wjcr->jr.Name, (int)wjcr->jr.JobId, + wjcr->jr.JobType, wjcr->jr.JobLevel); + + + if (jcr->sd_calls_client) { + /* + * Reading SD must call the "client" i.e. the writing SD + */ + if (jcr->SDVersion < 3) { + Jmsg(jcr, M_FATAL, 0, _("The Storage daemon does not support SDCallsClient.\n")); + goto bail_out; + } + + /* Setup the storage address and port */ + store = wjcr->wstore; + if (store->SDDport == 0) { + store->SDDport = store->SDport; + } + store_address = store->address; /* note: store points to wstore */ + + Dmsg2(200, "Start write message thread jid=%d Job=%s\n", wjcr->JobId, wjcr->Job); + if (!run_storage_and_start_message_thread(wjcr, wsd)) { + goto bail_out; + } + + store_port = store->SDDport; + + /* + * Send writing SD address to the reading SD + */ + /* Send and wait for connection */ + /* ***FIXME*** this should probably be jcr->rstore, store_address, ... + * to get TLS right */ + if (!send_store_addr_to_sd(jcr, wjcr->Job, wjcr->sd_auth_key, + store, store_address, store_port)) { + goto bail_out; + } + + /* Start read message thread */ + Dmsg2(200, "Start read message thread jid=%d Job=%s\n", jcr->JobId, jcr->Job); + if (!run_storage_and_start_message_thread(jcr, sd)) { + goto bail_out; + } + + } else { + /* + * Writing SD must simulate an FD and call the reading SD + * + * Send Storage daemon address to the writing SD + */ + store = jcr->rstore; + if (store->SDDport == 0) { + store->SDDport = store->SDport; + } + store_address = get_storage_address(jcr->client, store); + store_port = store->SDDport; + + /* Start read message thread */ + Dmsg2(200, "Start read message thread jid=%d Job=%s\n", jcr->JobId, jcr->Job); + if (!run_storage_and_start_message_thread(jcr, sd)) { + goto bail_out; + } + + /* Attempt connection for one hour */ + if (!send_store_addr_to_sd(wjcr, jcr->Job, jcr->sd_auth_key, + store, store_address, store_port)) { + goto bail_out; + } + /* Start write message thread */ + Dmsg2(200, "Start write message thread jid=%d Job=%s\n", wjcr->JobId, wjcr->Job); + if (!run_storage_and_start_message_thread(wjcr, wsd)) { + goto bail_out; + } + } + + jcr->setJobStatus(JS_Running); + wjcr->setJobStatus(JS_Running); + + /* Pickup Job termination data */ + /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */ + wait_for_storage_daemon_termination(wjcr); + wjcr->setJobStatus(wjcr->SDJobStatus); + wait_for_storage_daemon_termination(jcr); + jcr->setJobStatus(jcr->SDJobStatus); + db_write_batch_file_records(wjcr); /* used by bulk batch file insert */ + + ok = jcr->is_JobStatus(JS_Terminated) && wjcr->is_JobStatus(JS_Terminated); + +bail_out: + /* Put back jcr write storages for proper cleanup */ + jcr->wstorage = wjcr->wstorage; + jcr->wstore = wjcr->wstore; + wjcr->wstore = NULL; + wjcr->wstorage = NULL; + wjcr->file_bsock = NULL; + + if (ok) { + mac_cleanup(jcr, jcr->JobStatus, wjcr->JobStatus); + } + return ok; +} + +/* + * Called from mac_sql.c for each migration/copy job to start + */ +void start_mac_job(JCR *jcr) +{ + UAContext *ua = new_ua_context(jcr); + char ed1[50]; + char args[MAX_NAME_LENGTH + 50]; + + ua->batch = true; + Mmsg(ua->cmd, "run job=\"%s\" jobid=%s ignoreduplicatecheck=yes pool=\"%s\"", + jcr->job->name(), edit_uint64(jcr->MigrateJobId, ed1), + jcr->pool->name()); + if (jcr->next_pool) { + bsnprintf(args, sizeof(args), " nextpool=\"%s\"", jcr->next_pool->name()); + pm_strcat(ua->cmd, args); + } + Dmsg2(dbglevel, "=============== %s cmd=%s\n", jcr->get_OperationName(), ua->cmd); + parse_ua_args(ua); /* parse command */ + JobId_t jobid = run_cmd(ua, ua->cmd); + if (jobid == 0) { + Jmsg(jcr, M_ERROR, 0, _("Could not start migration/copy job.\n")); + } else { + Jmsg(jcr, M_INFO, 0, _("%s JobId %d started.\n"), jcr->get_OperationName(), (int)jobid); + } + free_ua_context(ua); +} + +/* + * Release resources allocated during backup. + */ +/* ***FIXME*** implement writeTermCode */ +void mac_cleanup(JCR *jcr, int TermCode, int writeTermCode) +{ + char sdt[MAX_TIME_LENGTH], edt[MAX_TIME_LENGTH]; + char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30], elapsed[50]; + char ec6[50], ec7[50], ec8[50], ec9[30], ec10[30], edl[50]; + char sd_term_msg[100]; + POOL_MEM term_code; + POOL_MEM term_msg; + int msg_type = M_INFO; + MEDIA_DBR mr; + double kbps; + utime_t RunTime; + bool goterrors=false; + JCR *wjcr = jcr->wjcr; + POOL_MEM query(PM_MESSAGE); + POOL_MEM vol_info; + + remove_dummy_jobmedia_records(jcr); + + Dmsg2(100, "Enter mac_cleanup %d %c\n", TermCode, TermCode); + update_job_end(jcr, TermCode); + + /* + * Check if we actually did something. + * wjcr is jcr of the newly migrated job. + */ + if (wjcr) { + char old_jobid[50], new_jobid[50]; + + edit_uint64(jcr->previous_jr.JobId, old_jobid); + edit_uint64(wjcr->jr.JobId, new_jobid); + + wjcr->JobFiles = jcr->JobFiles = wjcr->SDJobFiles; + wjcr->JobBytes = jcr->JobBytes = wjcr->SDJobBytes; + wjcr->jr.RealEndTime = 0; + wjcr->jr.PriorJobId = jcr->previous_jr.JobId; + if (jcr->previous_jr.PriorJob[0]) { + bstrncpy(wjcr->jr.PriorJob, jcr->previous_jr.PriorJob, sizeof(wjcr->jr.PriorJob)); + } else { + bstrncpy(wjcr->jr.PriorJob, jcr->previous_jr.Job, sizeof(wjcr->jr.PriorJob)); + } + wjcr->JobErrors += wjcr->SDErrors; + update_job_end(wjcr, TermCode); + + /* Update final items to set them to the previous job's values */ + Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s'," + "JobTDate=%s WHERE JobId=%s", + jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime, + edit_uint64(jcr->previous_jr.JobTDate, ec1), + new_jobid); + db_sql_query(wjcr->db, query.c_str(), NULL, NULL); + + goterrors = jcr->SDErrors > 0 || jcr->JobErrors > 0 || + jcr->SDJobStatus == JS_Canceled || + jcr->SDJobStatus == JS_ErrorTerminated || + jcr->SDJobStatus == JS_FatalError || + jcr->JobStatus == JS_FatalError; + + if (goterrors && jcr->getJobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) { + Jmsg(jcr, M_WARNING, 0, _("Found errors during the migration process. " + "The original job %s will be kept in the catalog " + "and the Migration job will be marked in Error\n"), old_jobid); + } + + /* + * If we terminated a migration normally: + * - mark the previous job as migrated + * - move any Log records to the new JobId + * - Purge the File records from the previous job + */ + if (!goterrors && jcr->getJobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) { + Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s", + (char)JT_MIGRATED_JOB, old_jobid); + db_sql_query(wjcr->db, query.c_str(), NULL, NULL); + UAContext *ua = new_ua_context(jcr); + /* Move JobLog to new JobId */ + Mmsg(query, "UPDATE Log SET JobId=%s WHERE JobId=%s", + new_jobid, old_jobid); + db_sql_query(wjcr->db, query.c_str(), NULL, NULL); + + /* Move RestoreObjects */ + Mmsg(query, "UPDATE RestoreObject SET JobId=%s WHERE JobId=%s", + new_jobid, old_jobid); + db_sql_query(wjcr->db, query.c_str(), NULL, NULL); + + if (jcr->job->PurgeMigrateJob) { + /* Purge old Job record */ + purge_jobs_from_catalog(ua, old_jobid); + } else { + /* Purge all old file records, but leave Job record */ + purge_files_from_jobs(ua, old_jobid); + } + + free_ua_context(ua); + } + + /* + * If we terminated a Copy (rather than a Migration) normally: + * - copy any Log records to the new JobId + * - set type="Job Copy" for the new job + */ + if (goterrors || (jcr->getJobType() == JT_COPY && jcr->JobStatus == JS_Terminated)) { + /* Copy JobLog to new JobId */ + Mmsg(query, "INSERT INTO Log (JobId, Time, LogText ) " + "SELECT %s, Time, LogText FROM Log WHERE JobId=%s", + new_jobid, old_jobid); + db_sql_query(wjcr->db, query.c_str(), NULL, NULL); + + /* We are in a real copy job */ + Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s", + (char)JT_JOB_COPY, new_jobid); + db_sql_query(wjcr->db, query.c_str(), NULL, NULL); + + /* Copy RestoreObjects */ + Mmsg(query, "INSERT INTO RestoreObject (ObjectName,PluginName,RestoreObject," + "ObjectLength,ObjectFullLength,ObjectIndex,ObjectType," + "ObjectCompression,FileIndex,JobId) " + "SELECT ObjectName,PluginName,RestoreObject," + "ObjectLength,ObjectFullLength,ObjectIndex,ObjectType," + "ObjectCompression,FileIndex,%s FROM RestoreObject WHERE JobId=%s", + new_jobid, old_jobid); + db_sql_query(wjcr->db, query.c_str(), NULL, NULL); + } + + if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"), + db_strerror(jcr->db)); + jcr->setJobStatus(JS_ErrorTerminated); + } + + update_bootstrap_file(wjcr); + + if (!db_get_job_volume_names(wjcr, wjcr->db, wjcr->jr.JobId, &wjcr->VolumeName)) { + /* + * Note, if the job has failed, most likely it did not write any + * tape, so suppress this "error" message since in that case + * it is normal. Or look at it the other way, only for a + * normal exit should we complain about this error. + */ + if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) { + Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(wjcr->db)); + } + wjcr->VolumeName[0] = 0; /* none */ + } + + if (wjcr->VolumeName[0]) { + /* Find last volume name. Multiple vols are separated by | */ + char *p = strrchr(wjcr->VolumeName, '|'); + if (p) { + p++; /* skip | */ + } else { + p = wjcr->VolumeName; /* no |, take full name */ + } + bstrncpy(mr.VolumeName, p, sizeof(mr.VolumeName)); + if (!db_get_media_record(jcr, jcr->db, &mr)) { + Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"), + mr.VolumeName, db_strerror(jcr->db)); + } + } + + /* We keep all information in the catalog because most of the work is + * done, and users might restore things from what we have + */ + if (goterrors) { + jcr->setJobStatus(JS_ErrorTerminated); + Mmsg(query, "UPDATE Job SET JobStatus='%c' WHERE JobId=%s", + JS_ErrorTerminated, new_jobid); + db_sql_query(wjcr->db, query.c_str(), NULL, NULL); + } + } + + switch (jcr->JobStatus) { + case JS_Terminated: + if (jcr->JobErrors || jcr->SDErrors) { + Mmsg(term_msg, _("%%s OK -- %s"), jcr->StatusErrMsg[0] ? jcr->StatusErrMsg : _("with warnings")); + } else { + Mmsg(term_msg, _("%%s OK")); + } + break; + case JS_FatalError: + case JS_ErrorTerminated: + Mmsg(term_msg, _("*** %%s Error ***")); + msg_type = M_ERROR; /* Generate error message */ + terminate_sd_msg_chan_thread(jcr); + terminate_sd_msg_chan_thread(wjcr); + break; + case JS_Canceled: + Mmsg(term_msg, _("%%s Canceled")); + terminate_sd_msg_chan_thread(jcr); + terminate_sd_msg_chan_thread(wjcr); + break; + default: + Mmsg(term_msg, _("Inappropriate %s term code")); + break; + } + + if (!wjcr) { /* We did nothing */ + goterrors = jcr->JobErrors > 0 || jcr->JobStatus == JS_FatalError; + if (!goterrors) { + if (jcr->getJobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) { + /* Mark previous job as migrated */ + Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s", + (char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1)); + db_sql_query(jcr->db, query.c_str(), NULL, NULL); + } + Mmsg(term_msg, _("%%s -- no files to %%s")); + } + } + + Mmsg(term_code, term_msg.c_str(), jcr->get_OperationName(), jcr->get_ActionName(0)); + bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime); + bstrftimes(edt, sizeof(edt), jcr->jr.EndTime); + RunTime = jcr->jr.EndTime - jcr->jr.StartTime; + if (RunTime <= 0) { + RunTime = 1; + } + kbps = (double)jcr->SDJobBytes / (1000.0 * (double)RunTime); + + jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg)); + + /* Edit string for last volume size */ + if (mr.VolABytes != 0) { + Mmsg(vol_info, _("meta: %s (%sB) aligned: %s (%sB)"), + edit_uint64_with_commas(mr.VolBytes, ec4), + edit_uint64_with_suffix(mr.VolBytes, ec5), + edit_uint64_with_commas(mr.VolABytes, ec9), + edit_uint64_with_suffix(mr.VolBytes, ec10)); + } else { + Mmsg(vol_info, _("%s (%sB)"), + edit_uint64_with_commas(mr.VolBytes, ec4), + edit_uint64_with_suffix(mr.VolBytes, ec5)); + } + + Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" +" Build OS: %s %s %s\n" +" Prev Backup JobId: %s\n" +" Prev Backup Job: %s\n" +" New Backup JobId: %s\n" +" Current JobId: %s\n" +" Current Job: %s\n" +" Backup Level: %s%s\n" +" Client: %s\n" +" FileSet: \"%s\" %s\n" +" Read Pool: \"%s\" (From %s)\n" +" Read Storage: \"%s\" (From %s)\n" +" Write Pool: \"%s\" (From %s)\n" +" Write Storage: \"%s\" (From %s)\n" +" Catalog: \"%s\" (From %s)\n" +" Start time: %s\n" +" End time: %s\n" +" Elapsed time: %s\n" +" Priority: %d\n" +" SD Files Written: %s\n" +" SD Bytes Written: %s (%sB)\n" +" Rate: %.1f KB/s\n" +" Volume name(s): %s\n" +" Volume Session Id: %d\n" +" Volume Session Time: %d\n" +" Last Volume Bytes: %s\n" +" SD Errors: %d\n" +" SD termination status: %s\n" +" Termination: %s\n\n"), + BACULA, my_name, VERSION, LSMDATE, + HOST_OS, DISTNAME, DISTVER, + edit_uint64(jcr->previous_jr.JobId, ec6), + jcr->previous_jr.Job, + wjcr ? edit_uint64(wjcr->jr.JobId, ec7) : "0", + edit_uint64(jcr->jr.JobId, ec8), + jcr->jr.Job, + level_to_str(edl, sizeof(edl), jcr->getJobLevel()), jcr->since, + jcr->client->name(), + jcr->fileset->name(), jcr->FSCreateTime, + jcr->rpool->name(), jcr->rpool_source, + jcr->rstore?jcr->rstore->name():"*None*", + NPRT(jcr->rstore_source), + jcr->pool->name(), jcr->pool_source, + jcr->wstore?jcr->wstore->name():"*None*", + NPRT(jcr->wstore_source), + jcr->catalog->name(), jcr->catalog_source, + sdt, + edt, + edit_utime(RunTime, elapsed, sizeof(elapsed)), + jcr->JobPriority, + edit_uint64_with_commas(jcr->SDJobFiles, ec1), + edit_uint64_with_commas(jcr->SDJobBytes, ec2), + edit_uint64_with_suffix(jcr->SDJobBytes, ec3), + (float)kbps, + wjcr ? wjcr->VolumeName : "", + jcr->VolSessionId, + jcr->VolSessionTime, + vol_info.c_str(), + jcr->SDErrors, + sd_term_msg, + term_code.c_str()); + + Dmsg0(100, "Leave migrate_cleanup()\n"); +} + +bool set_mac_wstorage(UAContext *ua, JCR *jcr, POOL *pool, POOL *next_pool, + const char *source) +{ + if (!next_pool) { + if (ua) { + ua->error_msg(_("No Next Pool specification found in Pool \"%s\".\n"), + pool->hdr.name); + } else { + Jmsg(jcr, M_FATAL, 0, _("No Next Pool specification found in Pool \"%s\".\n"), + pool->hdr.name); + } + return false; + } + + if (!next_pool->storage || next_pool->storage->size() == 0) { + Jmsg(jcr, M_FATAL, 0, _("No Storage specification found in Next Pool \"%s\".\n"), + next_pool->name()); + return false; + } + + /* If pool storage specified, use it instead of job storage for backup */ + copy_wstorage(jcr, next_pool->storage, source); + + return true; +} diff --git a/src/dird/mac_sql.c b/src/dird/mac_sql.c new file mode 100644 index 00000000..40bbf917 --- /dev/null +++ b/src/dird/mac_sql.c @@ -0,0 +1,763 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- mac.c -- responsible for doing + * migration and copy jobs. + * + * Also handles Copy jobs (March MMVIII) + * + * Kern Sibbald, September MMIV + * + * Basic tasks done here: + * Open DB and create records for this job. + * Open Message Channel with Storage daemon to tell him a job will be starting. + * Open connection with Storage daemon and pass him commands + * to do the backup. + * When the Storage daemon finishes the job, update the DB. + * + */ + +#include "bacula.h" +#include "dird.h" +#include "ua.h" +#ifndef HAVE_REGEX_H +#include "lib/bregex.h" +#else +#include +#endif + +struct uitem { + dlink link; + char *item; +}; + +/* Imported functions */ +extern void start_mac_job(JCR*); + +static const int dbglevel = 10; + +/* Forware referenced functions */ +static bool find_mediaid_then_jobids(JCR *jcr, idpkt *ids, const char *query1, + const char *type); +static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1, + const char *query2, const char *type); +static int get_next_dbid_from_list(char **p, DBId_t *DBId); +static int unique_dbid_handler(void *ctx, int num_fields, char **row); +static int unique_name_handler(void *ctx, int num_fields, char **row); +static bool find_jobids_from_mediaid_list(JCR *jcr, idpkt *ids, const char *type); +static bool find_jobids_of_pool_uncopied_jobs(JCR *jcr, idpkt *ids); + +/* Get Job names in Pool */ +static const char *sql_job = + "SELECT DISTINCT Job.Name from Job,Pool" + " WHERE Pool.Name='%s' AND Job.PoolId=Pool.PoolId"; + +/* Get JobIds from regex'ed Job names */ +static const char *sql_jobids_from_job = + "SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool" + " WHERE Job.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId" + " ORDER by Job.StartTime"; + +/* Get Client names in Pool */ +static const char *sql_client = + "SELECT DISTINCT Client.Name from Client,Pool,Job" + " WHERE Pool.Name='%s' AND Job.ClientId=Client.ClientId AND" + " Job.PoolId=Pool.PoolId"; + +/* Get JobIds from regex'ed Client names */ +static const char *sql_jobids_from_client = + "SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool,Client" + " WHERE Client.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId" + " AND Job.ClientId=Client.ClientId AND Job.Type IN ('B','C')" + " AND Job.JobStatus IN ('T','W')" + " ORDER by Job.StartTime"; + +/* Get Volume names in Pool */ +static const char *sql_vol = + "SELECT DISTINCT VolumeName FROM Media,Pool WHERE" + " VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND" + " Media.PoolId=Pool.PoolId AND Pool.Name='%s'"; + +/* Get JobIds from regex'ed Volume names */ +static const char *sql_jobids_from_vol = + "SELECT DISTINCT Job.JobId,Job.StartTime FROM Media,JobMedia,Job" + " WHERE Media.VolumeName='%s' AND Media.MediaId=JobMedia.MediaId" + " AND JobMedia.JobId=Job.JobId AND Job.Type IN ('B','C')" + " AND Job.JobStatus IN ('T','W') AND Media.Enabled=1" + " ORDER by Job.StartTime"; + +static const char *sql_smallest_vol = + "SELECT Media.MediaId FROM Media,Pool,JobMedia WHERE" + " Media.MediaId in (SELECT DISTINCT MediaId from JobMedia) AND" + " Media.VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND" + " Media.PoolId=Pool.PoolId AND Pool.Name='%s'" + " ORDER BY VolBytes ASC LIMIT 1"; + +static const char *sql_oldest_vol = + "SELECT Media.MediaId FROM Media,Pool,JobMedia WHERE" + " Media.MediaId in (SELECT DISTINCT MediaId from JobMedia) AND" + " Media.VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND" + " Media.PoolId=Pool.PoolId AND Pool.Name='%s'" + " ORDER BY LastWritten ASC LIMIT 1"; + +/* Get JobIds when we have selected MediaId */ +static const char *sql_jobids_from_mediaid = + "SELECT DISTINCT Job.JobId,Job.StartTime FROM JobMedia,Job" + " WHERE JobMedia.JobId=Job.JobId AND JobMedia.MediaId IN (%s)" + " AND Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W')" + " ORDER by Job.StartTime"; + +/* Get the number of bytes in the pool */ +static const char *sql_pool_bytes = + "SELECT SUM(JobBytes) FROM Job WHERE JobId IN" + " (SELECT DISTINCT Job.JobId from Pool,Job,Media,JobMedia WHERE" + " Pool.Name='%s' AND Media.PoolId=Pool.PoolId AND" + " VolStatus in ('Full','Used','Error','Append') AND Media.Enabled=1 AND" + " Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W') AND" + " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId)"; + +/* Get the number of bytes in the Jobs */ +static const char *sql_job_bytes = + "SELECT SUM(JobBytes) FROM Job WHERE JobId IN (%s)"; + +/* Get Media Ids in Pool */ +static const char *sql_mediaids = + "SELECT MediaId FROM Media,Pool WHERE" + " VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND" + " Media.PoolId=Pool.PoolId AND Pool.Name='%s' ORDER BY LastWritten ASC"; + +/* Get JobIds in Pool longer than specified time */ +static const char *sql_pool_time = + "SELECT DISTINCT Job.JobId FROM Pool,Job,Media,JobMedia WHERE" + " Pool.Name='%s' AND Media.PoolId=Pool.PoolId AND" + " VolStatus IN ('Full','Used','Error') AND Media.Enabled=1 AND" + " Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W') AND" + " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId" + " AND Job.RealEndTime<='%s'"; + +/* Get JobIds from successfully completed backup jobs which have not been copied before */ +static const char *sql_jobids_of_pool_uncopied_jobs = + "SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool" + " WHERE Pool.Name = '%s' AND Pool.PoolId = Job.PoolId" + " AND Job.Type = 'B' AND Job.JobStatus IN ('T','W')" + " AND Job.jobBytes > 0" + " AND Job.JobId NOT IN" + " (SELECT PriorJobId FROM Job WHERE" + " Type IN ('B','C') AND Job.JobStatus IN ('T','W')" + " AND PriorJobId != 0)" + " ORDER by Job.StartTime"; + +/* + * + * This is the central piece of code that finds a job or jobs + * actually JobIds to migrate. It first looks to see if one + * has been "manually" specified in jcr->MigrateJobId, and if + * so, it returns that JobId to be run. Otherwise, it + * examines the Selection Type to see what kind of migration + * we are doing (Volume, Job, Client, ...) and applies any + * Selection Pattern if appropriate to obtain a list of JobIds. + * Finally, it will loop over all the JobIds found, except the last + * one starting a new job with MigrationJobId set to that JobId, and + * finally, it returns the last JobId to the caller. + * + * Returns: -1 on error + * 0 if no jobs to migrate + * 1 if OK and jcr->previous_jr filled in + */ +int getJob_to_migrate(JCR *jcr) +{ + char ed1[30], ed2[30]; + POOL_MEM query(PM_MESSAGE); + JobId_t JobId; + DBId_t DBId = 0; + int stat; + char *p; + idpkt ids, mid, jids; + db_int64_ctx ctx; + int64_t pool_bytes; + time_t ttime; + struct tm tm; + char dt[MAX_TIME_LENGTH]; + int count = 0; + int limit = jcr->job->MaxSpawnedJobs; /* limit is max jobs to start */ + + ids.list = get_pool_memory(PM_MESSAGE); + ids.list[0] = 0; + ids.count = 0; + mid.list = get_pool_memory(PM_MESSAGE); + mid.list[0] = 0; + mid.count = 0; + jids.list = get_pool_memory(PM_MESSAGE); + jids.list[0] = 0; + jids.count = 0; + + /* + * If MigrateJobId is set, then we migrate only that Job, + * otherwise, we go through the full selection of jobs to + * migrate. + */ + if (jcr->MigrateJobId != 0) { + Dmsg1(dbglevel, "At Job start previous jobid=%u\n", jcr->MigrateJobId); + JobId = jcr->MigrateJobId; + } else { + switch (jcr->job->selection_type) { + case MT_JOB: + if (!regex_find_jobids(jcr, &ids, sql_job, sql_jobids_from_job, "Job")) { + goto bail_out; + } + break; + case MT_CLIENT: + if (!regex_find_jobids(jcr, &ids, sql_client, sql_jobids_from_client, "Client")) { + goto bail_out; + } + break; + case MT_VOLUME: + if (!regex_find_jobids(jcr, &ids, sql_vol, sql_jobids_from_vol, "Volume")) { + goto bail_out; + } + break; + case MT_SQLQUERY: + if (!jcr->job->selection_pattern) { + Jmsg(jcr, M_FATAL, 0, _("No %s SQL selection pattern specified.\n"), jcr->get_OperationName()); + goto bail_out; + } + Dmsg1(dbglevel, "SQL=%s\n", jcr->job->selection_pattern); + if (!db_sql_query(jcr->db, jcr->job->selection_pattern, + unique_dbid_handler, (void *)&ids)) { + Jmsg(jcr, M_FATAL, 0, + _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + break; + case MT_SMALLEST_VOL: + if (!find_mediaid_then_jobids(jcr, &ids, sql_smallest_vol, "Smallest Volume")) { + goto bail_out; + } + break; + case MT_OLDEST_VOL: + if (!find_mediaid_then_jobids(jcr, &ids, sql_oldest_vol, "Oldest Volume")) { + goto bail_out; + } + break; + case MT_POOL_OCCUPANCY: + ctx.count = 0; + /* Find count of bytes in pool */ + Mmsg(query, sql_pool_bytes, jcr->rpool->name()); + if (!db_sql_query(jcr->db, query.c_str(), db_int64_handler, (void *)&ctx)) { + Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + if (ctx.count == 0) { + Jmsg(jcr, M_INFO, 0, _("No Volumes found to %s.\n"), jcr->get_ActionName(0)); + goto ok_out; + } + pool_bytes = ctx.value; + Dmsg2(dbglevel, "highbytes=%lld pool=%lld\n", jcr->rpool->MigrationHighBytes, + pool_bytes); + if (pool_bytes < (int64_t)jcr->rpool->MigrationHighBytes) { + Jmsg(jcr, M_INFO, 0, _("No Volumes found to %s.\n"), jcr->get_ActionName(0)); + goto ok_out; + } + Dmsg0(dbglevel, "We should do Occupation migration.\n"); + + ids.count = 0; + /* Find a list of MediaIds that could be migrated */ + Mmsg(query, sql_mediaids, jcr->rpool->name()); + Dmsg1(dbglevel, "query=%s\n", query.c_str()); + if (!db_sql_query(jcr->db, query.c_str(), unique_dbid_handler, (void *)&ids)) { + Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + if (ids.count == 0) { + Jmsg(jcr, M_INFO, 0, _("No Volumes found to %s.\n"), jcr->get_ActionName(0)); + goto ok_out; + } + Dmsg2(dbglevel, "Pool Occupancy ids=%d MediaIds=%s\n", ids.count, ids.list); + + if (!find_jobids_from_mediaid_list(jcr, &ids, "Volume")) { + goto bail_out; + } + /* ids == list of jobs */ + p = ids.list; + for (int i=0; i < (int)ids.count; i++) { + stat = get_next_dbid_from_list(&p, &DBId); + Dmsg2(dbglevel, "get_next_dbid stat=%d JobId=%u\n", stat, (uint32_t)DBId); + if (stat < 0) { + Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n")); + goto bail_out; + } else if (stat == 0) { + break; + } + + mid.count = 1; + Mmsg(mid.list, "%s", edit_int64(DBId, ed1)); + if (jids.count > 0) { + pm_strcat(jids.list, ","); + } + pm_strcat(jids.list, mid.list); + jids.count += mid.count; + + /* Find count of bytes from Jobs */ + Mmsg(query, sql_job_bytes, mid.list); + Dmsg1(dbglevel, "Jobbytes query: %s\n", query.c_str()); + if (!db_sql_query(jcr->db, query.c_str(), db_int64_handler, (void *)&ctx)) { + Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + pool_bytes -= ctx.value; + Dmsg2(dbglevel, "Total %s Job bytes=%s\n", jcr->get_ActionName(0), edit_int64_with_commas(ctx.value, ed1)); + Dmsg2(dbglevel, "lowbytes=%s poolafter=%s\n", + edit_int64_with_commas(jcr->rpool->MigrationLowBytes, ed1), + edit_int64_with_commas(pool_bytes, ed2)); + if (pool_bytes <= (int64_t)jcr->rpool->MigrationLowBytes) { + Dmsg0(dbglevel, "We should be done.\n"); + break; + } + } + /* Transfer jids to ids, where the jobs list is expected */ + ids.count = jids.count; + pm_strcpy(ids.list, jids.list); + Dmsg2(dbglevel, "Pool Occupancy ids=%d JobIds=%s\n", ids.count, ids.list); + break; + case MT_POOL_TIME: + ttime = time(NULL) - (time_t)jcr->rpool->MigrationTime; + (void)localtime_r(&ttime, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + + ids.count = 0; + Mmsg(query, sql_pool_time, jcr->rpool->name(), dt); + Dmsg1(dbglevel, "query=%s\n", query.c_str()); + if (!db_sql_query(jcr->db, query.c_str(), unique_dbid_handler, (void *)&ids)) { + Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + if (ids.count == 0) { + Jmsg(jcr, M_INFO, 0, _("No Volumes found to %s.\n"), jcr->get_ActionName(0)); + goto ok_out; + } + Dmsg2(dbglevel, "PoolTime ids=%d JobIds=%s\n", ids.count, ids.list); + break; + case MT_POOL_UNCOPIED_JOBS: + if (!find_jobids_of_pool_uncopied_jobs(jcr, &ids)) { + goto bail_out; + } + break; + default: + Jmsg(jcr, M_FATAL, 0, _("Unknown %s Selection Type.\n"), jcr->get_OperationName()); + goto bail_out; + } + + /* + * Loop over all jobids except the last one, sending + * them to start_mac_job(), which will start a job + * for each of them. For the last JobId, we handle it below. + */ + p = ids.list; + if (ids.count == 0) { + Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0)); + goto ok_out; + } + + Jmsg(jcr, M_INFO, 0, _("The following %u JobId%s chosen to be %s: %s\n"), + ids.count, (ids.count < 2) ? _(" was") : _("s were"), + jcr->get_ActionName(1), ids.list); + + Dmsg2(dbglevel, "Before loop count=%d ids=%s\n", ids.count, ids.list); + /* + * Note: to not over load the system, limit the number + * of new jobs started to Maximum Spawned Jobs + */ + for (int i=1; i < (int)ids.count; i++) { + JobId = 0; + stat = get_next_jobid_from_list(&p, &JobId); + Dmsg3(dbglevel, "getJobid_no=%d stat=%d JobId=%u\n", i, stat, JobId); + if (stat < 0) { + Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n")); + goto bail_out; + } else if (stat == 0) { + Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0)); + goto ok_out; + } + jcr->MigrateJobId = JobId; + /* Don't start any more when limit reaches zero */ + limit--; + if (limit > 0) { + start_mac_job(jcr); + Dmsg0(dbglevel, "Back from start_mac_job\n"); + } + } + + /* Now get the last JobId and handle it in the current job */ + JobId = 0; + stat = get_next_jobid_from_list(&p, &JobId); + Dmsg2(dbglevel, "Last get_next_jobid stat=%d JobId=%u\n", stat, (int)JobId); + if (stat < 0) { + Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n")); + goto bail_out; + } else if (stat == 0) { + Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0)); + goto ok_out; + } + } + + jcr->previous_jr.JobId = JobId; + Dmsg1(dbglevel, "Previous jobid=%d\n", (int)jcr->previous_jr.JobId); + + if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) { + Jmsg(jcr, M_FATAL, 0, _("Could not get job record for JobId %s to %s. ERR=%s"), + edit_int64(jcr->previous_jr.JobId, ed1), + jcr->get_ActionName(0), + db_strerror(jcr->db)); + goto bail_out; + } + + Jmsg(jcr, M_INFO, 0, _("%s using JobId=%s Job=%s\n"), + jcr->get_OperationName(), + edit_int64(jcr->previous_jr.JobId, ed1), jcr->previous_jr.Job); + Dmsg4(dbglevel, "%s JobId=%d using JobId=%s Job=%s\n", + jcr->get_OperationName(), + jcr->JobId, + edit_int64(jcr->previous_jr.JobId, ed1), jcr->previous_jr.Job); + count = 1; + +ok_out: + goto out; + +bail_out: + count = -1; + +out: + free_pool_memory(ids.list); + free_pool_memory(mid.list); + free_pool_memory(jids.list); + return count; +} + +/* + * This routine returns: + * false if an error occurred + * true otherwise + * ids.count number of jobids found (may be zero) + */ +static bool find_jobids_from_mediaid_list(JCR *jcr, idpkt *ids, const char *type) +{ + bool ok = false; + POOL_MEM query(PM_MESSAGE); + + Mmsg(query, sql_jobids_from_mediaid, ids->list); + ids->count = 0; + if (!db_sql_query(jcr->db, query.c_str(), unique_dbid_handler, (void *)ids)) { + Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + if (ids->count == 0) { + Jmsg(jcr, M_INFO, 0, _("No %ss found to %s.\n"), type, jcr->get_ActionName(0)); + } + ok = true; + +bail_out: + return ok; +} + +/* + * This routine returns: + * false if an error occurred + * true otherwise + * ids.count number of jobids found (may be zero) + */ +static bool find_jobids_of_pool_uncopied_jobs(JCR *jcr, idpkt *ids) +{ + bool ok = false; + POOL_MEM query(PM_MESSAGE); + + /* Only a copy job is allowed */ + if (jcr->getJobType() != JT_COPY) { + Jmsg(jcr, M_FATAL, 0, + _("Selection Type 'pooluncopiedjobs' only applies to Copy Jobs")); + goto bail_out; + } + + Dmsg1(dbglevel, "copy selection pattern=%s\n", jcr->rpool->name()); + Mmsg(query, sql_jobids_of_pool_uncopied_jobs, jcr->rpool->name()); + Dmsg1(dbglevel, "get uncopied jobs query=%s\n", query.c_str()); + if (!db_sql_query(jcr->db, query.c_str(), unique_dbid_handler, (void *)ids)) { + Jmsg(jcr, M_FATAL, 0, + _("SQL to get uncopied jobs failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + ok = true; + +bail_out: + return ok; +} + +static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1, + const char *query2, const char *type) +{ + dlist *item_chain; + uitem *item = NULL; + uitem *last_item = NULL; + regex_t preg; + char prbuf[500]; + int rc; + bool ok = false; + POOL_MEM query(PM_MESSAGE); + + item_chain = New(dlist(item, &item->link)); + if (!jcr->job->selection_pattern) { + Jmsg(jcr, M_FATAL, 0, _("No %s %s selection pattern specified.\n"), + jcr->get_OperationName(), type); + goto bail_out; + } + Dmsg1(dbglevel, "regex-sel-pattern=%s\n", jcr->job->selection_pattern); + /* Basic query for names */ + Mmsg(query, query1, jcr->rpool->name()); + Dmsg1(dbglevel, "get name query1=%s\n", query.c_str()); + if (!db_sql_query(jcr->db, query.c_str(), unique_name_handler, + (void *)item_chain)) { + Jmsg(jcr, M_FATAL, 0, + _("SQL to get %s failed. ERR=%s\n"), type, db_strerror(jcr->db)); + goto bail_out; + } + Dmsg1(dbglevel, "query1 returned %d names\n", item_chain->size()); + if (item_chain->size() == 0) { + Jmsg(jcr, M_INFO, 0, _("Query of Pool \"%s\" returned no Jobs to %s.\n"), + jcr->rpool->name(), jcr->get_ActionName(0)); + ok = true; + goto bail_out; /* skip regex match */ + } else { + /* Compile regex expression */ + rc = regcomp(&preg, jcr->job->selection_pattern, REG_EXTENDED); + if (rc != 0) { + regerror(rc, &preg, prbuf, sizeof(prbuf)); + Jmsg(jcr, M_FATAL, 0, _("Could not compile regex pattern \"%s\" ERR=%s\n"), + jcr->job->selection_pattern, prbuf); + goto bail_out; + } + /* Now apply the regex to the names and remove any item not matched */ + foreach_dlist(item, item_chain) { + const int nmatch = 30; + regmatch_t pmatch[nmatch]; + if (last_item) { + Dmsg1(dbglevel, "Remove item %s\n", last_item->item); + free(last_item->item); + item_chain->remove(last_item); + } + Dmsg1(dbglevel, "get name Item=%s\n", item->item); + rc = regexec(&preg, item->item, nmatch, pmatch, 0); + if (rc == 0) { + last_item = NULL; /* keep this one */ + } else { + last_item = item; + } + } + if (last_item) { + Dmsg1(dbglevel, "Remove item %s\n", last_item->item); + free(last_item->item); + item_chain->remove(last_item); + } + regfree(&preg); + } + if (item_chain->size() == 0) { + Jmsg(jcr, M_INFO, 0, _("Regex pattern matched no Jobs to %s.\n"), jcr->get_ActionName(0)); + ok = true; + goto bail_out; /* skip regex match */ + } + + /* + * At this point, we have a list of items in item_chain + * that have been matched by the regex, so now we need + * to look up their jobids. + */ + ids->count = 0; + foreach_dlist(item, item_chain) { + Dmsg2(dbglevel, "Got %s: %s\n", type, item->item); + Mmsg(query, query2, item->item, jcr->rpool->name()); + Dmsg1(dbglevel, "get id from name query2=%s\n", query.c_str()); + if (!db_sql_query(jcr->db, query.c_str(), unique_dbid_handler, (void *)ids)) { + Jmsg(jcr, M_FATAL, 0, + _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + } + if (ids->count == 0) { + Jmsg(jcr, M_INFO, 0, _("No %ss found to %s.\n"), type, jcr->get_ActionName(0)); + } + ok = true; + +bail_out: + Dmsg2(dbglevel, "Count=%d Jobids=%s\n", ids->count, ids->list); + foreach_dlist(item, item_chain) { + free(item->item); + } + delete item_chain; + return ok; +} + +static bool find_mediaid_then_jobids(JCR *jcr, idpkt *ids, const char *query1, + const char *type) +{ + bool ok = false; + POOL_MEM query(PM_MESSAGE); + + ids->count = 0; + /* Basic query for MediaId */ + Mmsg(query, query1, jcr->rpool->name()); + if (!db_sql_query(jcr->db, query.c_str(), unique_dbid_handler, (void *)ids)) { + Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + if (ids->count == 0) { + Jmsg(jcr, M_INFO, 0, _("No %s found to %s.\n"), type, jcr->get_ActionName(0)); + ok = true; /* Not an error */ + goto bail_out; + } else if (ids->count != 1) { + Jmsg(jcr, M_FATAL, 0, _("SQL error. Expected 1 MediaId got %d\n"), ids->count); + goto bail_out; + } + Dmsg2(dbglevel, "%s MediaIds=%s\n", type, ids->list); + + ok = find_jobids_from_mediaid_list(jcr, ids, type); + +bail_out: + return ok; +} + +/* +* const char *sql_ujobid = +* "SELECT DISTINCT Job.Job from Client,Pool,Media,Job,JobMedia " +* " WHERE Media.PoolId=Pool.PoolId AND Pool.Name='%s' AND" +* " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId"; +*/ + +/* Add an item to the list if it is unique */ +static void add_unique_id(idpkt *ids, char *item) +{ + const int maxlen = 30; + char id[maxlen+1]; + char *q = ids->list; + + /* Walk through current list to see if each item is the same as item */ + for ( ; *q; ) { + id[0] = 0; + for (int i=0; icount == 0) { + ids->list[0] = 0; + } else { + pm_strcat(ids->list, ","); + } + pm_strcat(ids->list, item); + ids->count++; +// Dmsg3(0, "add_uniq count=%d Ids=%p %s\n", ids->count, ids->list, ids->list); + return; +} + +/* + * Callback handler make list of DB Ids + */ +static int unique_dbid_handler(void *ctx, int num_fields, char **row) +{ + idpkt *ids = (idpkt *)ctx; + + /* Sanity check */ + if (!row || !row[0]) { + Dmsg0(dbglevel, "dbid_hdlr error empty row\n"); + return 1; /* stop calling us */ + } + + add_unique_id(ids, row[0]); + Dmsg3(dbglevel, "dbid_hdlr count=%d Ids=%p %s\n", ids->count, ids->list, ids->list); + return 0; +} + +static int item_compare(void *item1, void *item2) +{ + uitem *i1 = (uitem *)item1; + uitem *i2 = (uitem *)item2; + return strcmp(i1->item, i2->item); +} + +static int unique_name_handler(void *ctx, int num_fields, char **row) +{ + dlist *list = (dlist *)ctx; + + uitem *new_item = (uitem *)malloc(sizeof(uitem)); + uitem *item; + + memset(new_item, 0, sizeof(uitem)); + new_item->item = bstrdup(row[0]); + Dmsg1(dbglevel, "Unique_name_hdlr Item=%s\n", row[0]); + item = (uitem *)list->binary_insert((void *)new_item, item_compare); + if (item != new_item) { /* already in list */ + free(new_item->item); + free((char *)new_item); + return 0; + } + return 0; +} + +/* + * Return next DBId from comma separated list + * + * Returns: + * 1 if next DBId returned + * 0 if no more DBIds are in list + * -1 there is an error + */ +static int get_next_dbid_from_list(char **p, DBId_t *DBId) +{ + const int maxlen = 30; + char id[maxlen+1]; + char *q = *p; + + id[0] = 0; + for (int i=0; ijcr = jcr; + mreq->bs = bs; + P(mutex); + num_reqs++; + qinsert(&mountq, &mreq->bq); + V(mutex); + return; +} diff --git a/src/dird/msgchan.c b/src/dird/msgchan.c new file mode 100644 index 00000000..d091fcf8 --- /dev/null +++ b/src/dird/msgchan.c @@ -0,0 +1,622 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- msgchan.c -- handles the message channel + * to the Storage daemon and the File daemon. + * + * Written by Kern Sibbald, August MM + * + * This routine runs as a thread and must be thread reentrant. + * + * Basic tasks done here: + * Open a message channel with the Storage daemon + * to authenticate ourself and to pass the JobId. + * Create a thread to interact with the Storage daemon + * who returns a job status and requests Catalog services, etc. + */ + +#include "bacula.h" +#include "dird.h" + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +/* Commands sent to Storage daemon */ +static char jobcmd[] = "JobId=%s job=%s job_name=%s client_name=%s " + "type=%d level=%d FileSet=%s NoAttr=%d SpoolAttr=%d FileSetMD5=%s " + "SpoolData=%d WritePartAfterJob=%d PreferMountedVols=%d SpoolSize=%s " + "rerunning=%d VolSessionId=%d VolSessionTime=%d sd_client=%d " + "Authorization=%s\n"; +static char use_storage[] = "use storage=%s media_type=%s pool_name=%s " + "pool_type=%s append=%d copy=%d stripe=%d\n"; +static char use_device[] = "use device=%s\n"; +//static char query_device[] = _("query device=%s"); + +/* Response from Storage daemon */ +static char OKjob[] = "3000 OK Job SDid=%d SDtime=%d Authorization=%100s\n"; +static char OK_device[] = "3000 OK use device device=%s\n"; + +/* Storage Daemon requests */ +static char Job_start[] = "3010 Job %127s start\n"; +static char Job_end[] = + "3099 Job %127s end JobStatus=%d JobFiles=%d JobBytes=%lld JobErrors=%u ErrMsg=%256s\n"; + +/* Forward referenced functions */ +extern "C" void *msg_thread(void *arg); + +BSOCK *open_sd_bsock(UAContext *ua) +{ + STORE *store = ua->jcr->wstore; + + if (!is_bsock_open(ua->jcr->store_bsock)) { + ua->send_msg(_("Connecting to Storage daemon %s at %s:%d ...\n"), + store->name(), store->address, store->SDport); + if (!connect_to_storage_daemon(ua->jcr, 10, SDConnectTimeout, 1)) { + ua->error_msg(_("Failed to connect to Storage daemon.\n")); + return NULL; + } + } + return ua->jcr->store_bsock; +} + +void close_sd_bsock(UAContext *ua) +{ + if (ua->jcr->store_bsock) { + ua->jcr->store_bsock->signal(BNET_TERMINATE); + free_bsock(ua->jcr->store_bsock); + } +} + +/* + * Establish a message channel connection with the Storage daemon + * and perform authentication. + */ +bool connect_to_storage_daemon(JCR *jcr, int retry_interval, + int max_retry_time, int verbose) +{ + BSOCK *sd = jcr->store_bsock; + STORE *store; + utime_t heart_beat; + + if (is_bsock_open(sd)) { + return true; /* already connected */ + } + if (!sd) { + sd = new_bsock(); + } + + /* If there is a write storage use it */ + if (jcr->wstore) { + store = jcr->wstore; + } else { + store = jcr->rstore; + } + + if (store->heartbeat_interval) { + heart_beat = store->heartbeat_interval; + } else { + heart_beat = director->heartbeat_interval; + } + + /* + * Open message channel with the Storage daemon + */ + Dmsg2(100, "Connect to Storage daemon %s:%d\n", store->address, + store->SDport); + sd->set_source_address(director->DIRsrc_addr); + if (!sd->connect(jcr, retry_interval, max_retry_time, heart_beat, _("Storage daemon"), + store->address, NULL, store->SDport, verbose)) { + + if (!jcr->store_bsock) { /* The bsock was locally created, so we free it here */ + free_bsock(sd); + } + sd = NULL; + } + + if (sd == NULL) { + return false; + } + sd->res = (RES *)store; /* save pointer to other end */ + jcr->store_bsock = sd; + + if (!authenticate_storage_daemon(jcr, store)) { + sd->close(); + return false; + } + return true; +} + +/* + * Here we ask the SD to send us the info for a + * particular device resource. + */ +#ifdef xxx +bool update_device_res(JCR *jcr, DEVICE *dev) +{ + POOL_MEM device_name; + BSOCK *sd; + if (!connect_to_storage_daemon(jcr, 5, 30, 0)) { + return false; + } + sd = jcr->store_bsock; + pm_strcpy(device_name, dev->name()); + bash_spaces(device_name); + sd->fsend(query_device, device_name.c_str()); + Dmsg1(100, ">stored: %s\n", sd->msg); + /* The data is returned through Device_update */ + if (bget_dirmsg(sd) <= 0) { + return false; + } + return true; +} +#endif + +static char OKbootstrap[] = "3000 OK bootstrap\n"; + +/* + * Start a job with the Storage daemon + */ +bool start_storage_daemon_job(JCR *jcr, alist *rstore, alist *wstore, bool send_bsr) +{ + bool ok = true; + STORE *storage; + BSOCK *sd; + char sd_auth_key[100]; + POOL_MEM store_name, device_name, pool_name, pool_type, media_type; + POOL_MEM job_name, client_name, fileset_name; + int copy = 0; + int stripe = 0; + char ed1[30], ed2[30]; + int sd_client; + + sd = jcr->store_bsock; + /* + * Now send JobId and permissions, and get back the authorization key. + */ + pm_strcpy(job_name, jcr->job->name()); + bash_spaces(job_name); + if (jcr->client) { + pm_strcpy(client_name, jcr->client->name()); + } else { + pm_strcpy(client_name, "**Dummy**"); + } + bash_spaces(client_name); + pm_strcpy(fileset_name, jcr->fileset->name()); + bash_spaces(fileset_name); + if (jcr->fileset->MD5[0] == 0) { + bstrncpy(jcr->fileset->MD5, "**Dummy**", sizeof(jcr->fileset->MD5)); + } + /* If rescheduling, cancel the previous incarnation of this job + * with the SD, which might be waiting on the FD connection. + * If we do not cancel it the SD will not accept a new connection + * for the same jobid. + */ + if (jcr->reschedule_count) { + sd->fsend("cancel Job=%s\n", jcr->Job); + while (sd->recv() >= 0) + { } + } + + sd_client = jcr->sd_client; + if (jcr->sd_auth_key) { + bstrncpy(sd_auth_key, jcr->sd_auth_key, sizeof(sd_auth_key)); + } else { + bstrncpy(sd_auth_key, "dummy", sizeof(sd_auth_key)); + } + + sd->fsend(jobcmd, edit_int64(jcr->JobId, ed1), jcr->Job, + job_name.c_str(), client_name.c_str(), + jcr->getJobType(), jcr->getJobLevel(), + fileset_name.c_str(), !jcr->pool->catalog_files, + jcr->job->SpoolAttributes, jcr->fileset->MD5, jcr->spool_data, + jcr->write_part_after_job, jcr->job->PreferMountedVolumes, + edit_int64(jcr->spool_size, ed2), jcr->rerunning, + jcr->VolSessionId, jcr->VolSessionTime, sd_client, + sd_auth_key); + + Dmsg1(100, ">stored: %s", sd->msg); + Dmsg2(100, "=== rstore=%p wstore=%p\n", rstore, wstore); + if (bget_dirmsg(sd) > 0) { + Dmsg1(100, "msg); + if (sscanf(sd->msg, OKjob, &jcr->VolSessionId, + &jcr->VolSessionTime, &sd_auth_key) != 3) { + Dmsg1(100, "BadJob=%s\n", sd->msg); + Jmsg(jcr, M_FATAL, 0, _("Storage daemon rejected Job command: %s\n"), sd->msg); + return false; + } else { + bfree_and_null(jcr->sd_auth_key); + jcr->sd_auth_key = bstrdup(sd_auth_key); + Dmsg1(150, "sd_auth_key=%s\n", jcr->sd_auth_key); + } + } else { + Jmsg(jcr, M_FATAL, 0, _("bstrerror()); + return false; + } + + if (send_bsr && (!send_bootstrap_file(jcr, sd) || + !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR))) { + return false; + } + + /* + * We have two loops here. The first comes from the + * Storage = associated with the Job, and we need + * to attach to each one. + * The inner loop loops over all the alternative devices + * associated with each Storage. It selects the first + * available one. + * + */ + /* Do read side of storage daemon */ + if (ok && rstore) { + /* For the moment, only migrate, copy and vbackup have rpool */ + if (jcr->is_JobType(JT_MIGRATE) || jcr->is_JobType(JT_COPY) || + (jcr->is_JobType(JT_BACKUP) && jcr->is_JobLevel(L_VIRTUAL_FULL))) { + pm_strcpy(pool_type, jcr->rpool->pool_type); + pm_strcpy(pool_name, jcr->rpool->name()); + } else { + pm_strcpy(pool_type, jcr->pool->pool_type); + pm_strcpy(pool_name, jcr->pool->name()); + } + bash_spaces(pool_type); + bash_spaces(pool_name); + foreach_alist(storage, rstore) { + Dmsg1(100, "Rstore=%s\n", storage->name()); + pm_strcpy(store_name, storage->name()); + bash_spaces(store_name); + if (jcr->media_type) { + pm_strcpy(media_type, jcr->media_type); /* user override */ + } else { + pm_strcpy(media_type, storage->media_type); + } + bash_spaces(media_type); + sd->fsend(use_storage, store_name.c_str(), media_type.c_str(), + pool_name.c_str(), pool_type.c_str(), 0, copy, stripe); + Dmsg1(100, "rstore >stored: %s", sd->msg); + DEVICE *dev; + /* Loop over alternative storage Devices until one is OK */ + foreach_alist(dev, storage->device) { + pm_strcpy(device_name, dev->name()); + bash_spaces(device_name); + sd->fsend(use_device, device_name.c_str()); + Dmsg1(100, ">stored: %s", sd->msg); + } + sd->signal(BNET_EOD); /* end of Devices */ + } + sd->signal(BNET_EOD); /* end of Storages */ + if (bget_dirmsg(sd) > 0) { + Dmsg1(100, "msg); + /* ****FIXME**** save actual device name */ + ok = sscanf(sd->msg, OK_device, device_name.c_str()) == 1; + } else { + ok = false; + } + if (ok) { + Jmsg(jcr, M_INFO, 0, _("Using Device \"%s\" to read.\n"), device_name.c_str()); + } + } + + /* Do write side of storage daemon */ + if (ok && wstore) { + pm_strcpy(pool_type, jcr->pool->pool_type); + pm_strcpy(pool_name, jcr->pool->name()); + bash_spaces(pool_type); + bash_spaces(pool_name); + foreach_alist(storage, wstore) { + Dmsg1(100, "Wstore=%s\n", storage->name()); + pm_strcpy(store_name, storage->name()); + bash_spaces(store_name); + pm_strcpy(media_type, storage->media_type); + bash_spaces(media_type); + sd->fsend(use_storage, store_name.c_str(), media_type.c_str(), + pool_name.c_str(), pool_type.c_str(), 1, copy, stripe); + + Dmsg1(100, "wstore >stored: %s", sd->msg); + DEVICE *dev; + /* Loop over alternative storage Devices until one is OK */ + foreach_alist(dev, storage->device) { + pm_strcpy(device_name, dev->name()); + bash_spaces(device_name); + sd->fsend(use_device, device_name.c_str()); + Dmsg1(100, ">stored: %s", sd->msg); + } + sd->signal(BNET_EOD); /* end of Devices */ + } + sd->signal(BNET_EOD); /* end of Storages */ + if (bget_dirmsg(sd) > 0) { + Dmsg1(100, "msg); + /* ****FIXME**** save actual device name */ + ok = sscanf(sd->msg, OK_device, device_name.c_str()) == 1; + } else { + ok = false; + } + if (ok) { + Jmsg(jcr, M_INFO, 0, _("Using Device \"%s\" to write.\n"), device_name.c_str()); + } + } + if (!ok) { + POOL_MEM err_msg; + if (sd->msg[0]) { + pm_strcpy(err_msg, sd->msg); /* save message */ + Jmsg(jcr, M_FATAL, 0, _("\n" + " Storage daemon didn't accept Device \"%s\" because:\n %s"), + device_name.c_str(), err_msg.c_str()/* sd->msg */); + } else { + Jmsg(jcr, M_FATAL, 0, _("\n" + " Storage daemon didn't accept Device \"%s\" command.\n"), + device_name.c_str()); + } + } + return ok; +} + +/* + * Start a thread to handle Storage daemon messages and + * Catalog requests. + */ +bool start_storage_daemon_message_thread(JCR *jcr) +{ + int status; + pthread_t thid; + + jcr->inc_use_count(); /* mark in use by msg thread */ + jcr->sd_msg_thread_done = false; + jcr->SD_msg_chan_started = false; + Dmsg0(150, "Start SD msg_thread.\n"); + if ((status=pthread_create(&thid, NULL, msg_thread, (void *)jcr)) != 0) { + berrno be; + Jmsg1(jcr, M_ABORT, 0, _("Cannot create message thread: %s\n"), be.bstrerror(status)); + } + /* Wait for thread to start */ + while (jcr->SD_msg_chan_started == false) { + bmicrosleep(0, 50); + if (job_canceled(jcr) || jcr->sd_msg_thread_done) { + return false; + } + } + Dmsg1(150, "SD msg_thread started. use=%d\n", jcr->use_count()); + return true; +} + +extern "C" void msg_thread_cleanup(void *arg) +{ + JCR *jcr = (JCR *)arg; + db_end_transaction(jcr, jcr->db); /* terminate any open transaction */ + jcr->lock(); + jcr->sd_msg_thread_done = true; + jcr->SD_msg_chan_started = false; + jcr->unlock(); + pthread_cond_broadcast(&jcr->term_wait); /* wakeup any waiting threads */ + Dmsg2(100, "=== End msg_thread. JobId=%d usecnt=%d\n", jcr->JobId, jcr->use_count()); + db_thread_cleanup(jcr->db); /* remove thread specific data */ + free_jcr(jcr); /* release jcr */ +} + +/* + * Handle the message channel (i.e. requests from the + * Storage daemon). + * Note, we are running in a separate thread. + */ +extern "C" void *msg_thread(void *arg) +{ + JCR *jcr = (JCR *)arg; + BSOCK *sd; + int JobStatus; + int n; + char Job[MAX_NAME_LENGTH]; + char ErrMsg[256]; + uint32_t JobFiles, JobErrors; + uint64_t JobBytes; + ErrMsg[0] = 0; + + pthread_detach(pthread_self()); + set_jcr_in_tsd(jcr); + jcr->SD_msg_chan = pthread_self(); + jcr->SD_msg_chan_started = true; + pthread_cleanup_push(msg_thread_cleanup, arg); + sd = jcr->store_bsock; + + /* Read the Storage daemon's output. + */ + Dmsg0(100, "Start msg_thread loop\n"); + n = 0; + while (!job_canceled(jcr) && (n=bget_dirmsg(sd)) >= 0) { + Dmsg1(400, "msg); + if (sscanf(sd->msg, Job_start, Job) == 1) { + continue; + } + if (sscanf(sd->msg, Job_end, Job, &JobStatus, &JobFiles, + &JobBytes, &JobErrors, ErrMsg) == 6) { + jcr->SDJobStatus = JobStatus; /* termination status */ + jcr->SDJobFiles = JobFiles; + jcr->SDJobBytes = JobBytes; + jcr->SDErrors = JobErrors; + unbash_spaces(ErrMsg); /* Error message if any */ + pm_strcpy(jcr->StatusErrMsg, ErrMsg); + break; + } + Dmsg1(400, "end loop use=%d\n", jcr->use_count()); + } + if (n == BNET_HARDEOF && jcr->getJobStatus() != JS_Canceled) { + /* + * This probably should be M_FATAL, but I am not 100% sure + * that this return *always* corresponds to a dropped line. + */ + Qmsg(jcr, M_ERROR, 0, _("Director's connection to SD for this Job was lost.\n")); + } + if (jcr->getJobStatus() == JS_Canceled) { + jcr->SDJobStatus = JS_Canceled; + } else if (sd->is_error()) { + jcr->SDJobStatus = JS_ErrorTerminated; + } + pthread_cleanup_pop(1); /* remove and execute the handler */ + return NULL; +} + +void wait_for_storage_daemon_termination(JCR *jcr) +{ + int cancel_count = 0; + /* Now wait for Storage daemon to terminate our message thread */ + while (!jcr->sd_msg_thread_done) { + struct timeval tv; + struct timezone tz; + struct timespec timeout; + + gettimeofday(&tv, &tz); + timeout.tv_nsec = 0; + timeout.tv_sec = tv.tv_sec + 5; /* wait 5 seconds */ + Dmsg0(400, "I'm waiting for message thread termination.\n"); + P(mutex); + pthread_cond_timedwait(&jcr->term_wait, &mutex, &timeout); + V(mutex); + if (jcr->is_canceled()) { + if (jcr->SD_msg_chan_started) { + jcr->store_bsock->set_timed_out(); + jcr->store_bsock->set_terminated(); + sd_msg_thread_send_signal(jcr, TIMEOUT_SIGNAL); + } + cancel_count++; + } + /* Give SD 30 seconds to clean up after cancel */ + if (cancel_count == 6) { + break; + } + } + jcr->setJobStatus(JS_Terminated); +} + +void terminate_sd_msg_chan_thread(JCR *jcr) +{ + if (jcr && jcr->store_bsock) { + jcr->store_bsock->signal(BNET_TERMINATE); + jcr->lock(); + if ( !jcr->sd_msg_thread_done + && jcr->SD_msg_chan_started + && !pthread_equal(jcr->SD_msg_chan, pthread_self())) { + Dmsg1(800, "Send kill to SD msg chan jid=%d\n", jcr->JobId); + int cnt = 6; // 6*5sec + while (!jcr->sd_msg_thread_done && cnt>0) { + jcr->unlock(); + pthread_kill(jcr->SD_msg_chan, TIMEOUT_SIGNAL); + struct timeval tv; + struct timezone tz; + struct timespec timeout; + + gettimeofday(&tv, &tz); + timeout.tv_nsec = 0; + timeout.tv_sec = tv.tv_sec + 5; /* wait 5 seconds */ + Dmsg0(00, "I'm waiting for message thread termination.\n"); + P(mutex); + pthread_cond_timedwait(&jcr->term_wait, &mutex, &timeout); + V(mutex); + jcr->lock(); + cnt--; + } + } + jcr->unlock(); + } +} + +/* + * Send bootstrap file to Storage daemon. + * This is used for restore, verify VolumeToCatalog, migration, + * and copy Jobs. + */ +bool send_bootstrap_file(JCR *jcr, BSOCK *sd) +{ + FILE *bs; + char buf[1000]; + const char *bootstrap = "bootstrap\n"; + + Dmsg1(400, "send_bootstrap_file: %s\n", jcr->RestoreBootstrap); + if (!jcr->RestoreBootstrap) { + return true; + } + bs = bfopen(jcr->RestoreBootstrap, "rb"); + if (!bs) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Could not open bootstrap file %s: ERR=%s\n"), + jcr->RestoreBootstrap, be.bstrerror()); + jcr->setJobStatus(JS_ErrorTerminated); + return false; + } + sd->fsend(bootstrap); + while (fgets(buf, sizeof(buf), bs)) { + sd->fsend("%s", buf); + } + sd->signal(BNET_EOD); + fclose(bs); + if (jcr->unlink_bsr) { + unlink(jcr->RestoreBootstrap); + jcr->unlink_bsr = false; + } + return true; +} + + +#ifdef needed +#define MAX_TRIES 30 +#define WAIT_TIME 2 +extern "C" void *device_thread(void *arg) +{ + int i; + JCR *jcr; + DEVICE *dev; + + + pthread_detach(pthread_self()); + jcr = new_control_jcr("*DeviceInit*", JT_SYSTEM); + for (i=0; i < MAX_TRIES; i++) { + if (!connect_to_storage_daemon(jcr, 10, 30, 1)) { + Dmsg0(900, "Failed connecting to SD.\n"); + continue; + } + LockRes(); + foreach_res(dev, R_DEVICE) { + if (!update_device_res(jcr, dev)) { + Dmsg1(900, "Error updating device=%s\n", dev->name()); + } else { + Dmsg1(900, "Updated Device=%s\n", dev->name()); + } + } + UnlockRes(); + free_bsock(jcr->store_bsock); + break; + + } + free_jcr(jcr); + return NULL; +} + +/* + * Start a thread to handle getting Device resource information + * from SD. This is called once at startup of the Director. + */ +void init_device_resources() +{ + int status; + pthread_t thid; + + Dmsg0(100, "Start Device thread.\n"); + if ((status=pthread_create(&thid, NULL, device_thread, NULL)) != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("Cannot create message thread: %s\n"), be.bstrerror(status)); + } +} +#endif diff --git a/src/dird/newvol.c b/src/dird/newvol.c new file mode 100644 index 00000000..4f7751b3 --- /dev/null +++ b/src/dird/newvol.c @@ -0,0 +1,171 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- newvol.c -- creates new Volumes in + * catalog Media table from the LabelFormat specification. + * + * Kern Sibbald, May MMI + * + * This routine runs as a thread and must be thread reentrant. + * + * Basic tasks done here: + * If possible create a new Media entry + * + */ + +#include "bacula.h" +#include "dird.h" + +/* Forward referenced functions */ +static bool create_simple_name(JCR *jcr, MEDIA_DBR *mr, POOL_DBR *pr); +static bool perform_full_name_substitution(JCR *jcr, MEDIA_DBR *mr, POOL_DBR *pr); + + +/* + * Automatic Volume name creation using the LabelFormat + * + * The media record must have the PoolId filled in when + * calling this routine. + */ +bool newVolume(JCR *jcr, MEDIA_DBR *mr, STORE *store, POOL_MEM &errmsg) +{ + POOL_DBR pr; + + bmemset(&pr, 0, sizeof(pr)); + + /* See if we can create a new Volume */ + db_lock(jcr->db); + pr.PoolId = mr->PoolId; + + if (!db_get_pool_numvols(jcr, jcr->db, &pr)) { + goto bail_out; + } + + if (pr.MaxVols > 0 && pr.NumVols >= pr.MaxVols) { + Mmsg(errmsg, "Maximum Volumes exceeded for Pool %s", pr.Name); + Dmsg1(90, "Too many volumes for Pool %s\n", pr.Name); + goto bail_out; + } + + mr->clear(); + set_pool_dbr_defaults_in_media_dbr(mr, &pr); + jcr->VolumeName[0] = 0; + bstrncpy(mr->MediaType, jcr->wstore->media_type, sizeof(mr->MediaType)); + generate_plugin_event(jcr, bDirEventNewVolume); /* return void... */ + if (jcr->VolumeName[0] && is_volume_name_legal(NULL, jcr->VolumeName)) { + bstrncpy(mr->VolumeName, jcr->VolumeName, sizeof(mr->VolumeName)); + /* Check for special characters */ + } else if (pr.LabelFormat[0] && pr.LabelFormat[0] != '*') { + if (is_volume_name_legal(NULL, pr.LabelFormat)) { + /* No special characters, so apply simple algorithm */ + if (!create_simple_name(jcr, mr, &pr)) { + goto bail_out; + } + } else { /* try full substitution */ + /* Found special characters, so try substitution */ + if (!perform_full_name_substitution(jcr, mr, &pr)) { + goto bail_out; + } + if (!is_volume_name_legal(NULL, mr->VolumeName)) { + Mmsg(errmsg, _("Illegal character in Volume name")); + Jmsg(jcr, M_ERROR, 0, _("Illegal character in Volume name \"%s\"\n"), + mr->VolumeName); + goto bail_out; + } + } + } else { + goto bail_out; + } + pr.NumVols++; + mr->Enabled = 1; + set_storageid_in_mr(store, mr); + if (db_create_media_record(jcr, jcr->db, mr) && + db_update_pool_record(jcr, jcr->db, &pr)) { + Jmsg(jcr, M_INFO, 0, _("Created new Volume=\"%s\", Pool=\"%s\", MediaType=\"%s\" in catalog.\n"), + mr->VolumeName, pr.Name, mr->MediaType); + Dmsg1(90, "Created new Volume=%s\n", mr->VolumeName); + db_unlock(jcr->db); + return true; + } else { + Mmsg(errmsg, "%s", db_strerror(jcr->db)); + Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); + } + +bail_out: + db_unlock(jcr->db); + return false; +} + +static bool create_simple_name(JCR *jcr, MEDIA_DBR *mr, POOL_DBR *pr) +{ + char name[MAXSTRING]; + char num[20]; + db_int64_ctx ctx; + POOL_MEM query(PM_MESSAGE); + char ed1[50]; + + /* See if volume already exists */ + mr->VolumeName[0] = 0; + bstrncpy(name, pr->LabelFormat, sizeof(name)); + ctx.value = 0; + /* TODO: Remove Pool as it is not used in the query */ + Mmsg(query, "SELECT MAX(MediaId) FROM Media,Pool WHERE Pool.PoolId=%s", + edit_int64(pr->PoolId, ed1)); + if (!db_sql_query(jcr->db, query.c_str(), db_int64_handler, (void *)&ctx)) { + Jmsg(jcr, M_WARNING, 0, _("SQL failed, but ignored. ERR=%s\n"), db_strerror(jcr->db)); + ctx.value = pr->NumVols+1; + } + for (int i=(int)ctx.value+1; i<(int)ctx.value+100; i++) { + MEDIA_DBR tmr; + sprintf(num, "%04d", i); + bstrncpy(tmr.VolumeName, name, sizeof(tmr.VolumeName)); + bstrncat(tmr.VolumeName, num, sizeof(tmr.VolumeName)); + if (db_get_media_record(jcr, jcr->db, &tmr)) { + Jmsg(jcr, M_WARNING, 0, + _("Wanted to create Volume \"%s\", but it already exists. Trying again.\n"), + tmr.VolumeName); + continue; + } + bstrncpy(mr->VolumeName, name, sizeof(mr->VolumeName)); + bstrncat(mr->VolumeName, num, sizeof(mr->VolumeName)); + break; /* Got good name */ + } + if (mr->VolumeName[0] == 0) { + Jmsg(jcr, M_ERROR, 0, _("Too many failures. Giving up creating Volume name.\n")); + return false; + } + return true; +} + +/* + * Perform full substitution on Label + */ +static bool perform_full_name_substitution(JCR *jcr, MEDIA_DBR *mr, POOL_DBR *pr) +{ + bool ok = false; + POOLMEM *label = get_pool_memory(PM_FNAME); + jcr->NumVols = pr->NumVols; + if (variable_expansion(jcr, pr->LabelFormat, &label)) { + bstrncpy(mr->VolumeName, label, sizeof(mr->VolumeName)); + ok = true; + } + free_pool_memory(label); + return ok; +} diff --git a/src/dird/next_vol.c b/src/dird/next_vol.c new file mode 100644 index 00000000..c4aa7b28 --- /dev/null +++ b/src/dird/next_vol.c @@ -0,0 +1,515 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- next_vol -- handles finding the next + * volume for append. Split out of catreq.c August MMIII + * catalog request from the Storage daemon. + * + * Kern Sibbald, March MMI + */ + +#include "bacula.h" +#include "dird.h" + +static int const dbglvl = 50; /* debug level */ + +/* + * We setup the StorageId or StorageId group if it is + * an autochanger from the Storage and put it in + * the media record. + * store == NULL => use existing StorageId + */ +void set_storageid_in_mr(STORE *store, MEDIA_DBR *mr) +{ + if (store == NULL) { + /* Just use the plain (single) StorageId */ + mr->sid_group = edit_int64(mr->StorageId, mr->sid); + return; + } + + /* At this point we know store != NULL */ + mr->StorageId = store->StorageId; + /* Get to the parent of the autochanger (if any) */ + if (store->changer) { + store = store->changer; + mr->StorageId = store->StorageId; + } + /* Go to the master shared storage head (if any) */ + if (store->shared_storage && store->shared_storage->ac_group) { + store = store->shared_storage; + } + /* If it is an autochanger we should have an ac_group */ + if (store->autochanger && store->ac_group) { + /* Note we keep the StorageId of the local autochanger */ + mr->sid_group = store->ac_group; + } else { + /* Otherwise, we just use the plain (single) StorageId */ + mr->sid_group = edit_int64(mr->StorageId, mr->sid); + } +} + +static void add_volume_to_exclude_list(JCR *jcr, int index, MEDIA_DBR *mr) +{ + char ed1[50]; + if (index == 1) { + *jcr->next_vol_list = 0; + + } else if (*jcr->next_vol_list) { + pm_strcat(jcr->next_vol_list, ","); + } + pm_strcat(jcr->next_vol_list, edit_int64(mr->MediaId, ed1)); + + /* The list is valid only in find_next_volume_for_append() */ + mr->exclude_list = NULL; +} + +static void set_volume_to_exclude_list(JCR *jcr, int index, MEDIA_DBR *mr) +{ + if (index == 1) { + *jcr->next_vol_list = 0; + } + mr->exclude_list = jcr->next_vol_list; +} + +/* + * Items needed: + * mr.PoolId must be set + * mr.ScratchPoolId could be set (used if create==true) + * jcr->wstore + * jcr->db + * jcr->pool + * MEDIA_DBR mr with PoolId set + * create -- whether or not to create a new volume + */ +int find_next_volume_for_append(JCR *jcr, MEDIA_DBR *mr, int index, + bool create, bool prune, POOL_MEM &errmsg) +{ + int retry = 0; + bool ok; + bool InChanger; + STORE *store = jcr->wstore; + + bstrncpy(mr->MediaType, store->media_type, sizeof(mr->MediaType)); + Dmsg6(dbglvl, "find_next_vol_for_append: JobId=%u PoolId=%d, MediaType=%s index=%d create=%d prune=%d\n", + (uint32_t)jcr->JobId, (int)mr->PoolId, mr->MediaType, index, + create, prune); + /* + * If we are using an Autochanger, restrict Volume + * search to the Autochanger on the first pass + */ + InChanger = (store->autochanger)? true : false; + + /* Make sure we don't send two times the same volume in the same session */ + set_volume_to_exclude_list(jcr, index, mr); + + /* + * Find the Next Volume for Append + */ + db_lock(jcr->db); + for ( ;; ) { + pm_strcpy(errmsg, ""); + bstrncpy(mr->VolStatus, "Append", sizeof(mr->VolStatus)); /* want only appendable volumes */ + /* + * 1. Look for volume with "Append" status. + */ + set_storageid_in_mr(store, mr); /* put StorageId in new record */ + ok = db_find_next_volume(jcr, jcr->db, index, InChanger, mr); + + if (!ok) { + /* + * No volume found, apply algorithm + */ + Dmsg4(dbglvl, "after find_next_vol ok=%d index=%d InChanger=%d Vstat=%s\n", + ok, index, InChanger, mr->VolStatus); + /* + * 2. Try finding a recycled volume + */ + ok = find_recycled_volume(jcr, InChanger, mr, store); + set_storageid_in_mr(store, mr); /* put StorageId in new record */ + Dmsg2(dbglvl, "find_recycled_volume ok=%d FW=%d\n", ok, mr->FirstWritten); + if (!ok) { + /* + * 3. Try recycling any purged volume + */ + ok = recycle_oldest_purged_volume(jcr, InChanger, mr, store); + set_storageid_in_mr(store, mr); /* put StorageId in new record */ + if (!ok) { + /* + * 4. Try pruning Volumes + */ + if (prune) { + Dmsg0(dbglvl, "Call prune_volumes\n"); + prune_volumes(jcr, InChanger, mr, store); + } + ok = recycle_oldest_purged_volume(jcr, InChanger, mr, store); + set_storageid_in_mr(store, mr); /* put StorageId in new record */ + if (!ok && create) { + Dmsg4(dbglvl, "after prune volumes_vol ok=%d index=%d InChanger=%d Vstat=%s\n", + ok, index, InChanger, mr->VolStatus); + /* + * 5. Try pulling a volume from the Scratch pool + */ + ok = get_scratch_volume(jcr, InChanger, mr, store); + set_storageid_in_mr(store, mr); /* put StorageId in new record */ + Dmsg4(dbglvl, "after get scratch volume ok=%d index=%d InChanger=%d Vstat=%s\n", + ok, index, InChanger, mr->VolStatus); + } + /* + * If we are using an Autochanger and have not found + * a volume, retry looking for any volume. + */ + if (!ok && InChanger) { + InChanger = false; + continue; /* retry again accepting any volume */ + } + } + } + + + if (!ok && create) { + /* + * 6. Try "creating" a new Volume + */ + ok = newVolume(jcr, mr, store, errmsg); + } + /* + * Look at more drastic ways to find an Appendable Volume + */ + if (!ok && (jcr->pool->purge_oldest_volume || + jcr->pool->recycle_oldest_volume)) { + Dmsg2(dbglvl, "No next volume found. PurgeOldest=%d\n RecyleOldest=%d", + jcr->pool->purge_oldest_volume, jcr->pool->recycle_oldest_volume); + /* Find oldest volume to recycle */ + set_storageid_in_mr(store, mr); /* update storage id */ + ok = db_find_next_volume(jcr, jcr->db, -1, InChanger, mr); + set_storageid_in_mr(store, mr); /* update storageid */ + Dmsg1(dbglvl, "Find oldest=%d Volume\n", ok); + if (ok && prune) { + UAContext *ua; + Dmsg0(dbglvl, "Try purge Volume.\n"); + /* + * 7. Try to purging oldest volume only if not UA calling us. + */ + ua = new_ua_context(jcr); + if (jcr->pool->purge_oldest_volume && create) { + Jmsg(jcr, M_INFO, 0, _("Purging oldest volume \"%s\"\n"), mr->VolumeName); + ok = purge_jobs_from_volume(ua, mr); + /* + * 8. or try recycling the oldest volume + */ + } else if (jcr->pool->recycle_oldest_volume) { + Jmsg(jcr, M_INFO, 0, _("Pruning oldest volume \"%s\"\n"), mr->VolumeName); + ok = prune_volume(ua, mr); + } + free_ua_context(ua); + if (ok) { + ok = recycle_volume(jcr, mr); + Dmsg1(dbglvl, "Recycle after purge oldest=%d\n", ok); + } + } + } + } + Dmsg2(dbglvl, "VolJobs=%d FirstWritten=%d\n", mr->VolJobs, mr->FirstWritten); + if (ok) { + /* If we can use the volume, check if it is expired */ + if (has_volume_expired(jcr, mr)) { + if (retry++ < 200) { /* sanity check */ + continue; /* try again from the top */ + } else { + Jmsg(jcr, M_ERROR, 0, _( +"We seem to be looping trying to find the next volume. I give up.\n")); + ok = false; + } + } + } + break; + } /* end for loop */ + db_unlock(jcr->db); + Dmsg1(dbglvl, "return ok=%d find_next_vol\n", ok); + + /* We keep the record of all previous volumes requested */ + if (ok) { + add_volume_to_exclude_list(jcr, index, mr);; + } + return ok; +} + +/* + * Check if any time limits or use limits have expired + * if so, set the VolStatus appropriately. + */ +bool has_volume_expired(JCR *jcr, MEDIA_DBR *mr) +{ + bool expired = false; + char ed1[50]; + /* + * Check limits and expirations if "Append" and it has been used + * i.e. mr->VolJobs > 0 + * + */ + if (strcmp(mr->VolStatus, "Append") == 0 && mr->VolJobs > 0) { + /* First handle Max Volume Bytes */ + if ((mr->MaxVolBytes > 0 && mr->VolBytes >= mr->MaxVolBytes)) { + Jmsg(jcr, M_INFO, 0, _("Max Volume bytes=%s exceeded. " + "Marking Volume \"%s\" as Full.\n"), + edit_uint64_with_commas(mr->MaxVolBytes, ed1), mr->VolumeName); + bstrncpy(mr->VolStatus, "Full", sizeof(mr->VolStatus)); + expired = true; + + /* Now see if Volume should only be used once */ + } else if (mr->VolBytes > 0 && jcr->pool->use_volume_once) { + Jmsg(jcr, M_INFO, 0, _("Volume used once. " + "Marking Volume \"%s\" as Used.\n"), mr->VolumeName); + bstrncpy(mr->VolStatus, "Used", sizeof(mr->VolStatus)); + expired = true; + + /* Now see if Max Jobs written to volume */ + } else if (mr->MaxVolJobs > 0 && mr->MaxVolJobs <= mr->VolJobs) { + Jmsg(jcr, M_INFO, 0, _("Max Volume jobs=%s exceeded. " + "Marking Volume \"%s\" as Used.\n"), + edit_uint64_with_commas(mr->MaxVolJobs, ed1), mr->VolumeName); + Dmsg3(dbglvl, "MaxVolJobs=%d JobId=%d Vol=%s\n", mr->MaxVolJobs, + (uint32_t)jcr->JobId, mr->VolumeName); + bstrncpy(mr->VolStatus, "Used", sizeof(mr->VolStatus)); + expired = true; + + /* Now see if Max Files written to volume */ + } else if (mr->MaxVolFiles > 0 && mr->MaxVolFiles <= mr->VolFiles) { + Jmsg(jcr, M_INFO, 0, _("Max Volume files=%s exceeded. " + "Marking Volume \"%s\" as Used.\n"), + edit_uint64_with_commas(mr->MaxVolFiles, ed1), mr->VolumeName); + bstrncpy(mr->VolStatus, "Used", sizeof(mr->VolStatus)); + expired = true; + + /* Finally, check Use duration expiration */ + } else if (mr->VolUseDuration > 0) { + utime_t now = time(NULL); + /* See if Vol Use has expired */ + if (mr->VolUseDuration <= (now - mr->FirstWritten)) { + Jmsg(jcr, M_INFO, 0, _("Max configured use duration=%s sec. exceeded. " + "Marking Volume \"%s\" as Used.\n"), + edit_uint64_with_commas(mr->VolUseDuration, ed1), mr->VolumeName); + bstrncpy(mr->VolStatus, "Used", sizeof(mr->VolStatus)); + expired = true; + } + } + } + if (expired) { + /* Need to update media */ + Dmsg1(dbglvl, "Vol=%s has expired update media record\n", mr->VolumeName); + set_storageid_in_mr(NULL, mr); + if (!db_update_media_record(jcr, jcr->db, mr)) { + Jmsg(jcr, M_ERROR, 0, _("Catalog error updating volume \"%s\". ERR=%s"), + mr->VolumeName, db_strerror(jcr->db)); + } + } + Dmsg2(dbglvl, "Vol=%s expired=%d\n", mr->VolumeName, expired); + return expired; +} + +/* + * Try hard to recycle the current volume + * + * Returns: on failure - reason = NULL + * on success - reason - pointer to reason + */ +void check_if_volume_valid_or_recyclable(JCR *jcr, MEDIA_DBR *mr, const char **reason) +{ + int ok; + + *reason = NULL; + + /* Check if a duration or limit has expired */ + if (has_volume_expired(jcr, mr)) { + *reason = _("volume has expired"); + if (!mr->Recycle) { /* cannot recycle */ + return; + } + /* Keep going because we may be able to recycle volume */ + } + + /* + * Now see if we can use the volume as is + */ + if (strcmp(mr->VolStatus, "Append") == 0 || + strcmp(mr->VolStatus, "Recycle") == 0) { + *reason = NULL; + return; + } + + /* + * Check if the Volume is already marked for recycling + */ + if (strcmp(mr->VolStatus, "Purged") == 0) { + if (recycle_volume(jcr, mr)) { + Jmsg(jcr, M_INFO, 0, _("Recycled current volume \"%s\"\n"), mr->VolumeName); + *reason = NULL; + return; + } else { + /* In principle this shouldn't happen */ + *reason = _("and recycling of current volume failed"); + return; + } + } + + /* At this point, the volume is not valid for writing */ + *reason = _("but should be Append, Purged or Recycle"); + + /* + * What we're trying to do here is see if the current volume is + * "recyclable" - ie. if we prune all expired jobs off it, is + * it now possible to reuse it for the job that it is currently + * needed for? + */ + if (!mr->Recycle) { + *reason = _("volume has recycling disabled"); + return; + } + /* + * Check retention period from last written, but recycle to within + * a minute to try to catch close calls ... + */ + if ((mr->LastWritten + mr->VolRetention - 60) < (utime_t)time(NULL) + && jcr->pool->recycle_current_volume + && (strcmp(mr->VolStatus, "Full") == 0 || + strcmp(mr->VolStatus, "Used") == 0)) { + /* + * Attempt prune of current volume to see if we can + * recycle it for use. + */ + UAContext *ua; + + ua = new_ua_context(jcr); + ok = prune_volume(ua, mr); + free_ua_context(ua); + + if (ok) { + /* If fully purged, recycle current volume */ + if (recycle_volume(jcr, mr)) { + Jmsg(jcr, M_INFO, 0, _("Recycled current volume \"%s\"\n"), mr->VolumeName); + *reason = NULL; + } else { + *reason = _("but should be Append, Purged or Recycle (recycling of the " + "current volume failed)"); + } + } else { + *reason = _("but should be Append, Purged or Recycle (cannot automatically " + "recycle current volume, as it still contains unpruned data " + "or the Volume Retention time has not expired.)"); + } + } +} + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +bool get_scratch_volume(JCR *jcr, bool InChanger, MEDIA_DBR *mr, + STORE *store) +{ + MEDIA_DBR smr; /* for searching scratch pool */ + POOL_DBR spr; + bool ok = false; + bool found = false; + + /* Only one thread at a time can pull from the scratch pool */ + P(mutex); + /* + * Get Pool record for Scratch Pool + * choose between ScratchPoolId and Scratch + * db_get_pool_numvols will first try ScratchPoolId, + * and then try the pool named Scratch + */ + bmemset(&spr, 0, sizeof(spr)); + bstrncpy(spr.Name, "Scratch", sizeof(spr.Name)); + spr.PoolId = mr->ScratchPoolId; + if (db_get_pool_record(jcr, jcr->db, &spr)) { + smr.PoolId = spr.PoolId; + bstrncpy(smr.VolStatus, "Append", sizeof(smr.VolStatus)); /* want only appendable volumes */ + bstrncpy(smr.MediaType, mr->MediaType, sizeof(smr.MediaType)); + + /* + * If we do not find a valid Scratch volume, try + * recycling any existing purged volumes, then + * try to take the oldest volume. + */ + set_storageid_in_mr(store, &smr); /* put StorageId in new record */ + if (db_find_next_volume(jcr, jcr->db, 1, InChanger, &smr)) { + found = true; + + } else if (find_recycled_volume(jcr, InChanger, &smr, store)) { + found = true; + + } else if (recycle_oldest_purged_volume(jcr, InChanger, &smr, store)) { + found = true; + } + + if (found) { + POOL_DBR pr; + POOL_MEM query(PM_MESSAGE); + + /* + * Get pool record where the Scratch Volume will go to ensure + * that we can add a Volume. + */ + bmemset(&pr, 0, sizeof(pr)); + bstrncpy(pr.Name, jcr->pool->name(), sizeof(pr.Name)); + + if (!db_get_pool_numvols(jcr, jcr->db, &pr)) { + Jmsg(jcr, M_WARNING, 0, _("Unable to get Pool record: ERR=%s"), + db_strerror(jcr->db)); + goto bail_out; + } + + /* Make sure there is room for another volume */ + if (pr.MaxVols > 0 && pr.NumVols >= pr.MaxVols) { + Jmsg(jcr, M_WARNING, 0, _("Unable add Scratch Volume, Pool \"%s\" full MaxVols=%d\n"), + jcr->pool->name(), pr.MaxVols); + goto bail_out; + } + + mr->copy(&smr); + set_storageid_in_mr(store, mr); + + /* Set default parameters from current pool */ + set_pool_dbr_defaults_in_media_dbr(mr, &pr); + + /* + * set_pool_dbr_defaults_in_media_dbr set VolStatus to Append, + * we could have Recycled media, also, we retain the old + * RecyclePoolId. + */ + bstrncpy(mr->VolStatus, smr.VolStatus, sizeof(smr.VolStatus)); + mr->RecyclePoolId = smr.RecyclePoolId; + + if (!db_update_media_record(jcr, jcr->db, mr)) { + Jmsg(jcr, M_WARNING, 0, _("Failed to move Scratch Volume. ERR=%s\n"), + db_strerror(jcr->db)); + goto bail_out; + } + + Jmsg(jcr, M_INFO, 0, _("Using Volume \"%s\" from '%s' %spool.\n"), + mr->VolumeName, spr.Name, + ((strcmp(spr.Name, "Scratch") == 0) ? "" : "Scratch ")); + + ok = true; + } + } +bail_out: + V(mutex); + return ok; +} diff --git a/src/dird/protos.h b/src/dird/protos.h new file mode 100644 index 00000000..41400461 --- /dev/null +++ b/src/dird/protos.h @@ -0,0 +1,361 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Director external function prototypes + */ + +/* admin.c */ +extern bool do_admin_init(JCR *jcr); +extern bool do_admin(JCR *jcr); +extern void admin_cleanup(JCR *jcr, int TermCode); + + +/* authenticate.c */ +extern bool authenticate_storage_daemon(JCR *jcr, STORE *store); +extern int authenticate_file_daemon(JCR *jcr); +extern int authenticate_user_agent(UAContext *ua); + +/* autoprune.c */ +extern void do_autoprune(JCR *jcr); +extern void prune_volumes(JCR *jcr, bool InChanger, MEDIA_DBR *mr, + STORE *store); + +/* autorecycle.c */ +extern bool recycle_oldest_purged_volume(JCR *jcr, bool InChanger, + MEDIA_DBR *mr, STORE *store); + +extern int recycle_volume(JCR *jcr, MEDIA_DBR *mr); +extern bool find_recycled_volume(JCR *jcr, bool InChanger, + MEDIA_DBR *mr, STORE *store); + +/* backup.c */ +extern int wait_for_job_termination(JCR *jcr, int timeout=0); +extern bool do_backup_init(JCR *jcr); +extern bool do_backup(JCR *jcr); +extern void backup_cleanup(JCR *jcr, int TermCode); +extern void update_bootstrap_file(JCR *jcr); +extern bool send_accurate_current_files(JCR *jcr); +extern char *get_storage_address(CLIENT *cli, STORE *store); +extern bool run_storage_and_start_message_thread(JCR *jcr, BSOCK *sd); +extern bool send_client_addr_to_sd(JCR *jcr); +extern bool send_store_addr_to_fd(JCR *jcr, STORE *store, + char *store_address, uint32_t store_port); + +/* vbackup.c */ +extern bool do_vbackup_init(JCR *jcr); +extern bool do_vbackup(JCR *jcr); +extern void vbackup_cleanup(JCR *jcr, int TermCode); + + +/* bsr.c */ +RBSR *new_bsr(); +rblist *create_bsr_list(uint32_t JobId, int findex, int findex2); +void free_bsr(rblist *bsr_list); +bool complete_bsr(UAContext *ua, rblist *bsr_list); +uint32_t write_bsr_file(UAContext *ua, RESTORE_CTX &rx); +void display_bsr_info(UAContext *ua, RESTORE_CTX &rx); +void add_findex(rblist *bsr_list, uint32_t JobId, int32_t findex); +void add_findex_all(rblist *bsr_list, uint32_t JobId, const char *fileregex); +RBSR_FINDEX *new_findex(); +void make_unique_restore_filename(UAContext *ua, POOLMEM **fname); +void print_bsr(UAContext *ua, RESTORE_CTX &rx); + + +/* catreq.c */ +extern void catalog_request(JCR *jcr, BSOCK *bs); +extern void catalog_update(JCR *jcr, BSOCK *bs); +extern bool despool_attributes_from_file(JCR *jcr, const char *file); +extern void remove_dummy_jobmedia_records(JCR *jcr); + +/* dird_conf.c */ +extern char *level_to_str(char *buf, int len, int level); +extern "C" char *job_code_callback_director(JCR *jcr, const char*, char *, int); + +/* expand.c */ +int variable_expansion(JCR *jcr, char *inp, POOLMEM **exp); + + +/* fd_cmds.c */ +extern int connect_to_file_daemon(JCR *jcr, int retry_interval, + int max_retry_time, int verbose); +extern bool send_ls_fileset(JCR *jcr, const char *path); +extern bool send_ls_plugin_fileset(JCR *jcr, const char *plugin, const char *path); +extern bool send_include_list(JCR *jcr); +extern bool send_exclude_list(JCR *jcr); +extern bool send_level_command(JCR *jcr); +extern bool send_bwlimit(JCR *jcr, const char *Job); +extern int get_attributes_and_put_in_catalog(JCR *jcr); +extern void get_attributes_and_compare_to_catalog(JCR *jcr, JobId_t JobId); +extern int put_file_into_catalog(JCR *jcr, long file_index, char *fname, + char *link, char *attr, int stream); +extern void get_level_since_time(JCR *jcr, char *since, int since_len); +extern int send_runscripts_commands(JCR *jcr); +extern bool send_restore_objects(JCR *jcr); +extern bool send_component_info(JCR *jcr); + +/* getmsg.c */ +enum e_prtmsg { + DISPLAY_ERROR, + NO_DISPLAY +}; +extern bool response(JCR *jcr, BSOCK *fd, char *resp, const char *cmd, e_prtmsg prtmsg); + +/* job.c */ +extern bool allow_duplicate_job(JCR *jcr); +extern void set_jcr_defaults(JCR *jcr, JOB *job); +extern void create_unique_job_name(JCR *jcr, const char *base_name); +extern void update_job_end_record(JCR *jcr); +extern bool get_or_create_client_record(JCR *jcr); +extern bool get_or_create_fileset_record(JCR *jcr); +extern DBId_t get_or_create_pool_record(JCR *jcr, char *pool_name); +extern void apply_pool_overrides(JCR *jcr); +extern bool apply_wstorage_overrides(JCR *jcr, POOL *original_pool); +extern JobId_t run_job(JCR *jcr); +extern JobId_t resume_job(JCR *jcr, JOB_DBR *jr); +extern bool cancel_job(UAContext *ua, JCR *jcr, int wait, bool cancel=true); +extern int cancel_inactive_job(UAContext *ua); +extern void get_job_storage(USTORE *store, JOB *job, RUN *run); +extern void init_jcr_job_record(JCR *jcr); +extern void update_job_end(JCR *jcr, int TermCode); +extern void copy_rwstorage(JCR *jcr, alist *storage, const char *where); +extern void set_rwstorage(JCR *jcr, USTORE *store); +extern void free_rwstorage(JCR *jcr); +extern void copy_wstorage(JCR *jcr, alist *storage, const char *where); +extern void set_wstorage(JCR *jcr, USTORE *store); +extern void free_wstorage(JCR *jcr); +extern void copy_rstorage(JCR *jcr, alist *storage, const char *where); +extern void set_rstorage(JCR *jcr, USTORE *store); +extern void free_rstorage(JCR *jcr); +extern bool setup_job(JCR *jcr); +extern void create_clones(JCR *jcr); +extern int create_restore_bootstrap_file(JCR *jcr); +extern int create_restore_bootstrap_file(JCR *jcr, JobId_t jobid, int findex1, int findex2); +extern void dird_free_jcr(JCR *jcr); +extern void dird_free_jcr_pointers(JCR *jcr); +extern void cancel_storage_daemon_job(JCR *jcr); +extern bool run_console_command(JCR *jcr, const char *cmd); +extern void sd_msg_thread_send_signal(JCR *jcr, int sig); +void terminate_sd_msg_chan_thread(JCR *jcr); + +/* jobq.c */ +extern bool inc_read_store(JCR *jcr); +extern void dec_read_store(JCR *jcr); + +/* mac.c */ +extern bool do_mac(JCR *jcr); +extern bool do_mac_init(JCR *jcr); +extern void mac_cleanup(JCR *jcr, int TermCode, int writeTermCode); +extern bool set_mac_wstorage(UAContext *ua, JCR *jcr, POOL *pool, + POOL *next_pool, const char *source); + + +/* mountreq.c */ +extern void mount_request(JCR *jcr, BSOCK *bs, char *buf); + +/* msgchan.c */ +extern BSOCK *open_sd_bsock(UAContext *ua); +extern void close_sd_bsock(UAContext *ua); +extern bool connect_to_storage_daemon(JCR *jcr, int retry_interval, + int max_retry_time, int verbose); +extern bool start_storage_daemon_job(JCR *jcr, alist *rstore, alist *wstore, + bool send_bsr=false); +extern bool start_storage_daemon_message_thread(JCR *jcr); +extern int bget_dirmsg(BSOCK *bs); +extern void wait_for_storage_daemon_termination(JCR *jcr); +extern bool send_bootstrap_file(JCR *jcr, BSOCK *sd); + +/* next_vol.c */ +int find_next_volume_for_append(JCR *jcr, MEDIA_DBR *mr, int index, + bool create, bool purge, POOL_MEM &errmsg); +void set_storageid_in_mr(STORE *store, MEDIA_DBR *mr); +bool has_volume_expired(JCR *jcr, MEDIA_DBR *mr); +void check_if_volume_valid_or_recyclable(JCR *jcr, MEDIA_DBR *mr, const char **reason); +bool get_scratch_volume(JCR *jcr, bool InChanger, MEDIA_DBR *mr, + STORE *store); + +/* newvol.c */ +bool newVolume(JCR *jcr, MEDIA_DBR *mr, STORE *store, POOL_MEM &errmsg); + +/* restore.c */ +extern bool do_restore(JCR *jcr); +extern bool do_restore_init(JCR *jcr); +extern void restore_cleanup(JCR *jcr, int TermCode); + + +/* ua_acl.c */ +bool acl_access_ok(UAContext *ua, int acl, const char *item); +bool acl_access_ok(UAContext *ua, int acl, const char *item, int len); +bool have_restricted_acl(UAContext *ua, int acl); +bool acl_access_client_ok(UAContext *ua, const char *name, int32_t jobtype); + +/* ua_cmds.c */ +bool get_uid_gid_from_acl(UAContext *ua, alist **uid, alist **gid, alist **dir); +bool do_a_command(UAContext *ua); +bool do_a_dot_command(UAContext *ua); +int qmessagescmd(UAContext *ua, const char *cmd); +bool open_new_client_db(UAContext *ua); +bool open_client_db(UAContext *ua); +bool open_db(UAContext *ua); +void close_db(UAContext *ua); +int cloud_volumes_cmd(UAContext *ua, const char *cmd, const char *mode); +enum e_pool_op { + POOL_OP_UPDATE, + POOL_OP_CREATE +}; +int create_pool(JCR *jcr, BDB *db, POOL *pool, e_pool_op op); +void set_pool_dbr_defaults_in_media_dbr(MEDIA_DBR *mr, POOL_DBR *pr); +bool set_pooldbr_references(JCR *jcr, BDB *db, POOL_DBR *pr, POOL *pool); +void set_pooldbr_from_poolres(POOL_DBR *pr, POOL *pool, e_pool_op op); +int update_pool_references(JCR *jcr, BDB *db, POOL *pool); + +/* ua_input.c */ +bool get_cmd(UAContext *ua, const char *prompt, bool subprompt=false); +bool get_selection_list(UAContext *ua, sellist &sl, const char *prompt, bool subprompt=false); +bool get_pint(UAContext *ua, const char *prompt); +bool get_yesno(UAContext *ua, const char *prompt); +bool is_yesno(char *val, int *ret); +int get_enabled(UAContext *ua, const char *val); +void parse_ua_args(UAContext *ua); +bool is_comment_legal(UAContext *ua, const char *name); + +/* ua_label.c */ +bool is_volume_name_legal(UAContext *ua, const char *name); +int get_num_drives_from_SD(UAContext *ua); +void update_slots(UAContext *ua); + +/* ua_update.c */ +void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr); + +/* ua_output.c */ +void prtit(void *ctx, const char *msg); +bool complete_jcr_for_job(JCR *jcr, JOB *job, POOL *pool); +RUN *find_next_run(RUN *run, JOB *job, utime_t &runtime, int ndays); +bool acl_access_jobid_ok(UAContext *ua, const char *jobids); + +/* ua_restore.c */ +void find_storage_resource(UAContext *ua, RESTORE_CTX &rx, char *Storage, char *MediaType); +bool insert_table_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *table); +void new_rx(RESTORE_CTX *rx); +void free_rx(RESTORE_CTX *rx); + +/* ua_server.c */ +void bsendmsg(void *ua_ctx, const char *fmt, ...); +void berrormsg(void *ua_ctx, const char *fmt, ...); +void bwarningmsg(void *ua_ctx, const char *fmt, ...); +void binfomsg(void *ua_ctx, const char *fmt, ...); +UAContext *new_ua_context(JCR *jcr); +JCR *new_control_jcr(const char *base_name, int job_type); +void free_ua_context(UAContext *ua); + +/* ua_select.c */ +STORE *select_storage_resource(UAContext *ua, bool unique=false); +JOB *select_job_resource(UAContext *ua); +JOB *select_enable_disable_job_resource(UAContext *ua, bool enable); +JOB *select_restore_job_resource(UAContext *ua); +CLIENT *select_enable_disable_client_resource(UAContext *ua, bool enable); +CLIENT *select_client_resource(UAContext *ua, int32_t jobtype); +FILESET *select_fileset_resource(UAContext *ua); +SCHED *select_enable_disable_schedule_resource(UAContext *ua, bool enable); +int select_pool_and_media_dbr(UAContext *ua, POOL_DBR *pr, MEDIA_DBR *mr); +int select_media_dbr(UAContext *ua, MEDIA_DBR *mr); +bool select_pool_dbr(UAContext *ua, POOL_DBR *pr, const char *argk="pool"); +bool select_client_dbr(UAContext *ua, CLIENT_DBR *cr, int32_t jobtype); + +void start_prompt(UAContext *ua, const char *msg); +void add_prompt(UAContext *ua, const char *prompt, char *unique=NULL); +int do_prompt(UAContext *ua, const char *automsg, const char *msg, char *prompt, int max_prompt); +int do_alist_prompt(UAContext *ua, const char *automsg, const char *msg, + alist *selected); +CAT *get_catalog_resource(UAContext *ua); +STORE *get_storage_resource(UAContext *ua, bool use_default, bool unique=false); +int get_storage_drive(UAContext *ua, STORE *store); +int get_storage_slot(UAContext *ua, STORE *store); +int get_media_type(UAContext *ua, char *MediaType, int max_media); +bool get_pool_dbr(UAContext *ua, POOL_DBR *pr, const char *argk="pool"); +bool get_client_dbr(UAContext *ua, CLIENT_DBR *cr, int32_t jobtype); +POOL *get_pool_resource(UAContext *ua); +JOB *get_restore_job(UAContext *ua); +POOL *select_pool_resource(UAContext *ua); +int select_running_jobs(UAContext *ua, alist *jcrs, const char *reason); +CLIENT *get_client_resource(UAContext *ua, int32_t jobtype); +int get_job_dbr(UAContext *ua, JOB_DBR *jr); + +int find_arg_keyword(UAContext *ua, const char **list); +int find_arg(UAContext *ua, const char *keyword); +int find_arg_with_value(UAContext *ua, const char *keyword); +int do_keyword_prompt(UAContext *ua, const char *msg, const char **list); +int confirm_retention(UAContext *ua, utime_t *ret, const char *msg); +int confirm_retention_yesno(UAContext *ua, utime_t ret, const char *msg); +bool get_level_from_name(JCR *jcr, const char *level_name); +int get_level_code_from_name(const char *level_name); +int scan_storage_cmd(UAContext *ua, const char *cmd, + bool allfrompool, + int *drive, MEDIA_DBR *mr, POOL_DBR *pr, + const char **action, char *storage, int *nb, uint32_t **results); + + +/* ua_status.c */ +void list_dir_status_header(UAContext *ua); + +/* ua_tree.c */ +bool user_select_files_from_tree(TREE_CTX *tree); +int insert_tree_handler(void *ctx, int num_fields, char **row); +bool check_directory_acl(char **last_dir, alist *dir_acl, const char *path); + +/* ua_prune.c */ +int prune_files(UAContext *ua, CLIENT *client, POOL *pool); +int prune_jobs(UAContext *ua, CLIENT *client, POOL *pool, int JobType); +int prune_stats(UAContext *ua, utime_t retention); +bool prune_volume(UAContext *ua, MEDIA_DBR *mr); +int job_delete_handler(void *ctx, int num_fields, char **row); +int del_count_handler(void *ctx, int num_fields, char **row); +int file_delete_handler(void *ctx, int num_fields, char **row); +int get_prune_list_for_volume(UAContext *ua, MEDIA_DBR *mr, del_ctx *del); +int exclude_running_jobs_from_list(del_ctx *prune_list); + +/* ua_purge.c */ +bool is_volume_purged(UAContext *ua, MEDIA_DBR *mr, bool force=false); +bool mark_media_purged(UAContext *ua, MEDIA_DBR *mr); +void purge_files_from_volume(UAContext *ua, MEDIA_DBR *mr); +bool purge_jobs_from_volume(UAContext *ua, MEDIA_DBR *mr, bool force=false); +void purge_files_from_jobs(UAContext *ua, char *jobs); +void purge_jobs_from_catalog(UAContext *ua, char *jobs); +void purge_job_list_from_catalog(UAContext *ua, del_ctx &del); +void purge_files_from_job_list(UAContext *ua, del_ctx &del); + + +/* ua_run.c */ +extern int run_cmd(UAContext *ua, const char *cmd); +extern int restart_cmd(UAContext *ua, const char *cmd); +extern bool check_pool(int32_t JobType, int32_t JobLevel, POOL *pool, POOL *nextpool, const char **name); + +/* verify.c */ +extern bool do_verify(JCR *jcr); +extern bool do_verify_init(JCR *jcr); +extern void verify_cleanup(JCR *jcr, int TermCode); + +/* snapshot.c */ +int select_snapshot_dbr(UAContext *ua, SNAPSHOT_DBR *sr); +void snapshot_list(UAContext *ua, int i, DB_LIST_HANDLER *sendit, e_list_type llist); +int snapshot_cmd(UAContext *ua, const char *cmd); +int snapshot_catreq(JCR *jcr, BSOCK *bs); +int delete_snapshot(UAContext *ua); +bool update_snapshot(UAContext *ua); +int prune_snapshot(UAContext *ua); +bool send_snapshot_retention(JCR *jcr, utime_t val); diff --git a/src/dird/query.sql b/src/dird/query.sql new file mode 100644 index 00000000..d04cb8c0 --- /dev/null +++ b/src/dird/query.sql @@ -0,0 +1,7 @@ +# +# See the file /examples/sample-query.sql +# for some sample queries. +# +# 1 +:The default file is empty, see sample-query.sql (in /opt/bacula/scripts or /examples) for samples +SELECT 'See sample-query.sql (in /opt/bacula/scripts or /examples) for samples' AS Info; diff --git a/src/dird/recycle.c b/src/dird/recycle.c new file mode 100644 index 00000000..39e45a99 --- /dev/null +++ b/src/dird/recycle.c @@ -0,0 +1,80 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- Automatic Recycling of Volumes + * Recycles Volumes that have been purged + * + * Kern Sibbald, May MMII + */ + + +#include "bacula.h" +#include "dird.h" +#include "ua.h" + +/* Forward referenced functions */ + +bool find_recycled_volume(JCR *jcr, bool InChanger, MEDIA_DBR *mr, + STORE *store) +{ + bstrncpy(mr->VolStatus, "Recycle", sizeof(mr->VolStatus)); + set_storageid_in_mr(store, mr); + if (db_find_next_volume(jcr, jcr->db, 1, InChanger, mr)) { + jcr->MediaId = mr->MediaId; + Dmsg1(20, "Find_next_vol MediaId=%lu\n", jcr->MediaId); + pm_strcpy(jcr->VolumeName, mr->VolumeName); + set_storageid_in_mr(store, mr); + return true; + } + return false; +} + +/* + * Look for oldest Purged volume + */ +bool recycle_oldest_purged_volume(JCR *jcr, bool InChanger, + MEDIA_DBR *mr, STORE *store) +{ + bstrncpy(mr->VolStatus, "Purged", sizeof(mr->VolStatus)); + if (db_find_next_volume(jcr, jcr->db, 1, InChanger, mr)) { + set_storageid_in_mr(store, mr); + if (recycle_volume(jcr, mr)) { + Jmsg(jcr, M_INFO, 0, _("Recycled volume \"%s\"\n"), mr->VolumeName); + Dmsg1(100, "return 1 recycle_oldest_purged_volume Vol=%s\n", mr->VolumeName); + return true; + } + } + Dmsg0(100, "return 0 recycle_oldest_purged_volume end\n"); + return false; +} + +/* + * Recycle the specified volume + */ +int recycle_volume(JCR *jcr, MEDIA_DBR *mr) +{ + bstrncpy(mr->VolStatus, "Recycle", sizeof(mr->VolStatus)); + mr->VolJobs = mr->VolFiles = mr->VolBlocks = mr->VolErrors = 0; + mr->VolBytes = 1; + mr->FirstWritten = mr->LastWritten = 0; + mr->RecycleCount++; + mr->set_first_written = true; + set_storageid_in_mr(NULL, mr); + return db_update_media_record(jcr, jcr->db, mr); +} diff --git a/src/dird/restore.c b/src/dird/restore.c new file mode 100644 index 00000000..1ba17ea8 --- /dev/null +++ b/src/dird/restore.c @@ -0,0 +1,755 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/** + * Bacula Director -- restore.c -- responsible for restoring files + * + * Written by Kern Sibbald, November MM + * + * This routine is run as a separate thread. + * + * Current implementation is Catalog verification only (i.e. no + * verification versus tape). + * + * Basic tasks done here: + * Open DB + * Open Message Channel with Storage daemon to tell him a job will be starting. + * Open connection with File daemon and pass him commands + * to do the restore. + * Update the DB according to what files where restored???? + * + */ + + +#include "bacula.h" +#include "dird.h" +#include "lib/ini.h" + +/* Imported variables */ +extern struct s_kw ReplaceOptions[]; + +/* Commands sent to File daemon */ +static char restorecmd[] = "restore %sreplace=%c prelinks=%d where=%s\n"; +static char restorecmdR[] = "restore %sreplace=%c prelinks=%d regexwhere=%s\n"; +static char storaddr[] = "storage address=%s port=%d ssl=%d Authorization=%s\n"; + +/* Responses received from File daemon */ +static char OKrestore[] = "2000 OK restore\n"; +static char OKstore[] = "2000 OK storage\n"; +static char OKstoreend[] = "2000 OK storage end\n"; + +/* Responses received from the Storage daemon */ +static char OKbootstrap[] = "3000 OK bootstrap\n"; + +static void get_restore_params(JCR *jcr, POOL_MEM &ret_where, char *ret_replace, char **ret_restorecmd) +{ + char replace, *where, *cmd; + char empty = '\0'; + + /* Build the restore command */ + + if (jcr->replace != 0) { + replace = jcr->replace; + } else if (jcr->job->replace != 0) { + replace = jcr->job->replace; + } else { + replace = REPLACE_ALWAYS; /* always replace */ + } + + if (jcr->RegexWhere) { + where = jcr->RegexWhere; /* override */ + cmd = restorecmdR; + } else if (jcr->job->RegexWhere) { + where = jcr->job->RegexWhere; /* no override take from job */ + cmd = restorecmdR; + + } else if (jcr->where) { + where = jcr->where; /* override */ + cmd = restorecmd; + } else if (jcr->job->RestoreWhere) { + where = jcr->job->RestoreWhere; /* no override take from job */ + cmd = restorecmd; + + } else { /* nothing was specified */ + where = ∅ /* use default */ + cmd = restorecmd; + } + + pm_strcpy(ret_where, where); /* Where can be a local variable */ + if (ret_replace) { + *ret_replace = replace; + } + if (ret_restorecmd) { + *ret_restorecmd = cmd; + } +} + +static void build_restore_command(JCR *jcr, POOL_MEM &ret) +{ + POOL_MEM where; + char replace; + char *cmd; + char files[100]; + + get_restore_params(jcr, where, &replace, &cmd); + + jcr->prefix_links = jcr->job->PrefixLinks; + + bash_spaces(where.c_str()); + if (jcr->FDVersion < 7) { + Mmsg(ret, cmd, "", replace, jcr->prefix_links, where.c_str()); + } else { + snprintf(files, sizeof(files), "files=%d ", jcr->ExpectedFiles); + Mmsg(ret, cmd, files, replace, jcr->prefix_links, where.c_str()); + } + unbash_spaces(where.c_str()); +} + +struct bootstrap_info +{ + FILE *bs; + UAContext *ua; + char storage[MAX_NAME_LENGTH+1]; +}; + +#define UA_CMD_SIZE 1000 + +/** + * Open the bootstrap file and find the first Storage= + * Returns ok if able to open + * It fills the storage name (should be the first line) + * and the file descriptor to the bootstrap file, + * it should be used for next operations, and need to be closed + * at the end. + */ +static bool open_bootstrap_file(JCR *jcr, bootstrap_info &info) +{ + FILE *bs; + UAContext *ua; + info.bs = NULL; + info.ua = NULL; + + if (!jcr->RestoreBootstrap) { + return false; + } + strncpy(info.storage, jcr->rstore->name(), MAX_NAME_LENGTH); + + bs = bfopen(jcr->RestoreBootstrap, "rb"); + if (!bs) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Could not open bootstrap file %s: ERR=%s\n"), + jcr->RestoreBootstrap, be.bstrerror()); + jcr->setJobStatus(JS_ErrorTerminated); + return false; + } + + ua = new_ua_context(jcr); + ua->cmd = check_pool_memory_size(ua->cmd, UA_CMD_SIZE+1); + while (!fgets(ua->cmd, UA_CMD_SIZE, bs)) { + parse_ua_args(ua); + if (ua->argc != 1) { + continue; + } + if (!strcasecmp(ua->argk[0], "Storage")) { + strncpy(info.storage, ua->argv[0], MAX_NAME_LENGTH); + break; + } + } + info.bs = bs; + info.ua = ua; + fseek(bs, 0, SEEK_SET); /* return to the top of the file */ + return true; +} + +/** + * This function compare the given storage name with the + * the current one. We compare the name and the address:port. + * Returns true if we use the same storage. + */ +static bool is_on_same_storage(JCR *jcr, char *new_one) +{ + STORE *new_store; + + /* with old FD, we send the whole bootstrap to the storage */ + if (jcr->FDVersion < 2) { + return true; + } + /* we are in init loop ? shoudn't fail here */ + if (!*new_one) { + return true; + } + /* same name */ + if (!strcmp(new_one, jcr->rstore->name())) { + return true; + } + new_store = (STORE *)GetResWithName(R_STORAGE, new_one); + if (!new_store) { + Jmsg(jcr, M_WARNING, 0, + _("Could not get storage resource '%s'.\n"), new_one); + /* If not storage found, use last one */ + return true; + } + /* if Port and Hostname/IP are same, we are talking to the same + * Storage Daemon + */ + if (jcr->rstore->SDport != new_store->SDport || + strcmp(jcr->rstore->address, new_store->address)) + { + return false; + } + return true; +} + +/** + * Check if the current line contains Storage="xxx", and compare the + * result to the current storage. We use UAContext to analyse the bsr + * string. + * + * Returns true if we need to change the storage, and it set the new + * Storage resource name in "storage" arg. + */ +static bool check_for_new_storage(JCR *jcr, bootstrap_info &info) +{ + UAContext *ua = info.ua; + parse_ua_args(ua); + if (ua->argc != 1) { + return false; + } + if (!strcasecmp(ua->argk[0], "Storage")) { + /* Continue if this is a volume from the same storage. */ + if (is_on_same_storage(jcr, ua->argv[0])) { + return false; + } + /* note the next storage name */ + strncpy(info.storage, ua->argv[0], MAX_NAME_LENGTH); + Dmsg1(5, "Change storage to %s\n", info.storage); + return true; + } + return false; +} + +/** + * Send bootstrap file to Storage daemon section by section. + */ +static bool send_bootstrap_file(JCR *jcr, BSOCK *sock, + bootstrap_info &info) +{ + boffset_t pos; + const char *bootstrap = "bootstrap\n"; + UAContext *ua = info.ua; + FILE *bs = info.bs; + + Dmsg1(400, "send_bootstrap_file: %s\n", jcr->RestoreBootstrap); + if (!jcr->RestoreBootstrap) { + return false; + } + sock->fsend(bootstrap); + pos = ftello(bs); + while(fgets(ua->cmd, UA_CMD_SIZE, bs)) { + if (check_for_new_storage(jcr, info)) { + /* Otherwise, we need to contact another storage daemon. + * Reset bs to the beginning of the current segment. + */ + fseeko(bs, pos, SEEK_SET); + break; + } + sock->fsend("%s", ua->cmd); + pos = ftello(bs); + } + sock->signal(BNET_EOD); + return true; +} + +#define MAX_TRIES 6 * 360 /* 6 hours */ + +/** + * Change the read storage resource for the current job. + */ +static bool select_rstore(JCR *jcr, bootstrap_info &info) +{ + USTORE ustore; + int i; + + + if (!strcmp(jcr->rstore->name(), info.storage)) { + return true; /* same SD nothing to change */ + } + + if (!(ustore.store = (STORE *)GetResWithName(R_STORAGE,info.storage))) { + Jmsg(jcr, M_FATAL, 0, + _("Could not get storage resource '%s'.\n"), info.storage); + jcr->setJobStatus(JS_ErrorTerminated); + return false; + } + + /* + * This releases the store_bsock between calls to the SD. + * I think. + */ + free_bsock(jcr->store_bsock); + + /* + * release current read storage and get a new one + */ + dec_read_store(jcr); + free_rstorage(jcr); + set_rstorage(jcr, &ustore); + jcr->setJobStatus(JS_WaitSD); + /* + * Wait for up to 6 hours to increment read stoage counter + */ + for (i=0; i < MAX_TRIES; i++) { + /* try to get read storage counter incremented */ + if (inc_read_store(jcr)) { + jcr->setJobStatus(JS_Running); + return true; + } + bmicrosleep(10, 0); /* sleep 10 secs */ + if (job_canceled(jcr)) { + free_rstorage(jcr); + return false; + } + } + /* Failed to inc_read_store() */ + free_rstorage(jcr); + Jmsg(jcr, M_FATAL, 0, + _("Could not acquire read storage lock for \"%s\""), info.storage); + return false; +} + +/* + * Clean the bootstrap_info struct + */ +static void close_bootstrap_file(bootstrap_info &info) +{ + if (info.bs) { + fclose(info.bs); + } + if (info.ua) { + free_ua_context(info.ua); + } +} + +/** + * The bootstrap is stored in a file, so open the file, and loop + * through it processing each storage device in turn. If the + * storage is different from the prior one, we open a new connection + * to the new storage and do a restore for that part. + * This permits handling multiple storage daemons for a single + * restore. E.g. your Full is stored on tape, and Incrementals + * on disk. + */ +bool restore_bootstrap(JCR *jcr) +{ + int tls_need = BNET_TLS_NONE; + BSOCK *fd = NULL; + BSOCK *sd; + char *store_address; + uint32_t store_port; + bool first_time = true; + bootstrap_info info; + POOL_MEM restore_cmd(PM_MESSAGE); + bool ret = false; + + + /* Open the bootstrap file */ + if (!open_bootstrap_file(jcr, info)) { + goto bail_out; + } + /* Read the bootstrap file */ + while (!feof(info.bs)) { + + if (!select_rstore(jcr, info)) { + goto bail_out; + } + + /** + * Open a message channel connection with the Storage + * daemon. This is to let him know that our client + * will be contacting him for a backup session. + * + */ + Dmsg0(10, "Open connection with storage daemon\n"); + jcr->setJobStatus(JS_WaitSD); + /* + * Start conversation with Storage daemon + */ + if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) { + goto bail_out; + } + sd = jcr->store_bsock; + /* + * Now start a job with the Storage daemon + */ + if (!start_storage_daemon_job(jcr, jcr->rstorage, NULL)) { + goto bail_out; + } + + if (first_time) { + /* + * Start conversation with File daemon + */ + jcr->setJobStatus(JS_WaitFD); + jcr->keep_sd_auth_key = true; /* don't clear the sd_auth_key now */ + if (!connect_to_file_daemon(jcr, 10, FDConnectTimeout, 1)) { + goto bail_out; + } + fd = jcr->file_bsock; + build_restore_command(jcr, restore_cmd); + } + + jcr->setJobStatus(JS_Running); + + /* + * Send the bootstrap file -- what Volumes/files to restore + */ + if (!send_bootstrap_file(jcr, sd, info) || + !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) { + goto bail_out; + } + + if (jcr->sd_calls_client) { + /* + * SD must call "client" i.e. FD + */ + if (jcr->FDVersion < 10) { + Jmsg(jcr, M_FATAL, 0, _("The File daemon does not support SDCallsClient.\n")); + goto bail_out; + } + if (!send_client_addr_to_sd(jcr)) { + goto bail_out; + } + if (!run_storage_and_start_message_thread(jcr, sd)) { + goto bail_out; + } + + store_address = jcr->rstore->address; /* dummy */ + store_port = 0; /* flag that SD calls FD */ + + } else { + /* + * Default case where FD must call the SD + */ + if (!run_storage_and_start_message_thread(jcr, sd)) { + goto bail_out; + } + + /* + * send Storage daemon address to the File daemon, + * then wait for File daemon to make connection + * with Storage daemon. + */ + if (jcr->rstore->SDDport == 0) { + jcr->rstore->SDDport = jcr->rstore->SDport; + } + + store_address = get_storage_address(jcr->client, jcr->rstore); + store_port = jcr->rstore->SDDport; + } + + /* TLS Requirement */ + if (jcr->rstore->tls_enable) { + if (jcr->rstore->tls_require) { + tls_need = BNET_TLS_REQUIRED; + } else { + tls_need = BNET_TLS_OK; + } + } + + /* + * Send storage address to FD + * if port==0 FD must wait for SD to call it. + */ + fd->fsend(storaddr, store_address, store_port, tls_need, jcr->sd_auth_key); + memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); + Dmsg1(6, "dird>filed: %s\n", fd->msg); + if (!response(jcr, fd, OKstore, "Storage", DISPLAY_ERROR)) { + goto bail_out; + } + + /* Declare the job started to start the MaxRunTime check */ + jcr->setJobStarted(); + + /* Only pass "global" commands to the FD once */ + if (first_time) { + first_time = false; + if (!send_runscripts_commands(jcr)) { + goto bail_out; + } + if (!send_component_info(jcr)) { + Pmsg0(000, "FAIL: Send component info\n"); + goto bail_out; + } + if (!send_restore_objects(jcr)) { + Pmsg0(000, "FAIL: Send restore objects\n"); + goto bail_out; + } + } + + fd->fsend("%s", restore_cmd.c_str()); + if (!response(jcr, fd, OKrestore, "Restore", DISPLAY_ERROR)) { + goto bail_out; + } + + if (jcr->FDVersion < 2) { /* Old FD */ + break; /* we do only one loop */ + } else { + if (!response(jcr, fd, OKstoreend, "Store end", DISPLAY_ERROR)) { + goto bail_out; + } + wait_for_storage_daemon_termination(jcr); + } + } /* the whole boostrap has been send */ + + if (fd && jcr->FDVersion >= 2) { + fd->fsend("endrestore"); + } + + ret = true; + +bail_out: + close_bootstrap_file(info); + return ret; +} + +/** + * Do a restore of the specified files + * + * Returns: 0 on failure + * 1 on success + */ +bool do_restore(JCR *jcr) +{ + JOB_DBR rjr; /* restore job record */ + int stat; + + free_wstorage(jcr); /* we don't write */ + + if (!allow_duplicate_job(jcr)) { + goto bail_out; + } + + memset(&rjr, 0, sizeof(rjr)); + jcr->jr.JobLevel = L_FULL; /* Full restore */ + if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + goto bail_out; + } + Dmsg0(20, "Updated job start record\n"); + + Dmsg1(20, "RestoreJobId=%d\n", jcr->job->RestoreJobId); + + if (!jcr->RestoreBootstrap) { + Jmsg(jcr, M_FATAL, 0, _("Cannot restore without a bootstrap file.\n" + "You probably ran a restore job directly. All restore jobs must\n" + "be run using the restore command.\n")); + goto bail_out; + } + + + /* Print Job Start message */ + Jmsg(jcr, M_INFO, 0, _("Start Restore Job %s\n"), jcr->Job); + + if (jcr->client) { + jcr->sd_calls_client = jcr->client->sd_calls_client; + } + + /* Read the bootstrap file and do the restore */ + if (!restore_bootstrap(jcr)) { + goto bail_out; + } + + /* Wait for Job Termination */ + stat = wait_for_job_termination(jcr); + restore_cleanup(jcr, stat); + return true; + +bail_out: + restore_cleanup(jcr, JS_ErrorTerminated); + return false; +} + +/* Create a Plugin Config RestoreObject, will be sent + * at restore time to the Plugin + */ +static void plugin_create_restoreobject(JCR *jcr, plugin_config_item *elt) +{ + ROBJECT_DBR ro; + memset(&ro, 0, sizeof(ro)); + ro.FileIndex = 1; + ro.JobId = jcr->JobId; + ro.FileType = FT_PLUGIN_CONFIG_FILLED; + ro.object_index = 1; + ro.object_full_len = ro.object_len = strlen(elt->content); + ro.object_compression = 0; + ro.plugin_name = elt->plugin_name; + ro.object_name = (char*)INI_RESTORE_OBJECT_NAME; + ro.object = elt->content; + db_create_restore_object_record(jcr, jcr->db, &ro); + Dmsg1(50, "Creating restore object for %s\n", elt->plugin_name); +} + +bool do_restore_init(JCR *jcr) +{ + /* Will add RestoreObject used for the Plugin configuration */ + if (jcr->plugin_config) { + + plugin_config_item *elt; + foreach_alist(elt, jcr->plugin_config) { + plugin_create_restoreobject(jcr, elt); + free_plugin_config_item(elt); + } + + delete jcr->plugin_config; + jcr->plugin_config = NULL; + } + free_wstorage(jcr); + return true; +} + +/** + * Release resources allocated during restore. + * + */ +void restore_cleanup(JCR *jcr, int TermCode) +{ + POOL_MEM where; + char creplace; + const char *replace; + char sdt[MAX_TIME_LENGTH], edt[MAX_TIME_LENGTH]; + char ec1[30], ec2[30], ec3[30], ec4[30], elapsed[50]; + char term_code[100], fd_term_msg[100], sd_term_msg[100]; + const char *term_msg; + int msg_type = M_INFO; + double kbps; + utime_t RunTime; + + Dmsg0(20, "In restore_cleanup\n"); + update_job_end(jcr, TermCode); + + if (jcr->component_fd) { + fclose(jcr->component_fd); + jcr->component_fd = NULL; + } + if (jcr->component_fname && *jcr->component_fname) { + unlink(jcr->component_fname); + } + free_and_null_pool_memory(jcr->component_fname); + + if (jcr->unlink_bsr && jcr->RestoreBootstrap) { + unlink(jcr->RestoreBootstrap); + jcr->unlink_bsr = false; + } + + if (job_canceled(jcr)) { + cancel_storage_daemon_job(jcr); + } + + switch (TermCode) { + case JS_Terminated: + if (jcr->ExpectedFiles > jcr->jr.JobFiles) { + term_msg = _("Restore OK -- warning file count mismatch"); + + } else if (jcr->JobErrors > 0 || jcr->SDErrors > 0) { + term_msg = _("Restore OK -- with errors"); + + } else { + term_msg = _("Restore OK"); + } + break; + case JS_Warnings: + term_msg = _("Restore OK -- with warnings"); + break; + case JS_FatalError: + case JS_ErrorTerminated: + term_msg = _("*** Restore Error ***"); + msg_type = M_ERROR; /* Generate error message */ + terminate_sd_msg_chan_thread(jcr); + break; + case JS_Canceled: + term_msg = _("Restore Canceled"); + terminate_sd_msg_chan_thread(jcr); + break; + default: + term_msg = term_code; + sprintf(term_code, _("Inappropriate term code: %c\n"), TermCode); + break; + } + bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime); + bstrftimes(edt, sizeof(edt), jcr->jr.EndTime); + + RunTime = jcr->jr.EndTime - jcr->jr.StartTime; + if (RunTime <= 0) { + RunTime = 1; + } + kbps = (double)jcr->jr.JobBytes / (1000.0 * (double)RunTime); + if (kbps < 0.05) { + kbps = 0; + } + + get_restore_params(jcr, where, &creplace, NULL); + + replace = ReplaceOptions[0].name; /* default */ + for (int i=0; ReplaceOptions[i].name; i++) { + if (ReplaceOptions[i].token == (int)creplace) { + replace = ReplaceOptions[i].name; + } + } + + jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg)); + jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg)); + + Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" Restore Client: %s\n" +" Where: %s\n" +" Replace: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Elapsed time: %s\n" +" Files Expected: %s\n" +" Files Restored: %s\n" +" Bytes Restored: %s (%sB)\n" +" Rate: %.1f KB/s\n" +" FD Errors: %d\n" +" FD termination status: %s\n" +" SD termination status: %s\n" +" Termination: %s\n\n"), + BACULA, my_name, VERSION, LSMDATE, + HOST_OS, DISTNAME, DISTVER, + jcr->jr.JobId, + jcr->jr.Job, + jcr->client->name(), + where.c_str(), + replace, + sdt, + edt, + edit_utime(RunTime, elapsed, sizeof(elapsed)), + edit_uint64_with_commas((uint64_t)jcr->ExpectedFiles, ec1), + edit_uint64_with_commas((uint64_t)jcr->jr.JobFiles, ec2), + edit_uint64_with_commas(jcr->jr.JobBytes, ec3), edit_uint64_with_suffix(jcr->jr.JobBytes, ec4), + (float)kbps, + jcr->JobErrors, + fd_term_msg, + sd_term_msg, + term_msg); + + Dmsg0(20, "Leaving restore_cleanup\n"); +} diff --git a/src/dird/run_conf.c b/src/dird/run_conf.c new file mode 100644 index 00000000..c4a41578 --- /dev/null +++ b/src/dird/run_conf.c @@ -0,0 +1,688 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Configuration parser for Director Run Configuration + * directives, which are part of the Schedule Resource + * + * Kern Sibbald, May MM + * + */ + +#include "bacula.h" +#include "dird.h" + +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + extern URES res_all; +} +#else +extern URES res_all; +#endif +extern s_jl joblevels[]; + +/* Forward referenced subroutines */ + +enum e_state { + s_none = 0, + s_range, + s_mday, + s_month, + s_time, + s_at, + s_wday, + s_daily, + s_weekly, + s_monthly, + s_hourly, + s_wom, /* 1st, 2nd, ...*/ + s_woy, /* week of year w00 - w53 */ + s_ldom /* last day of month */ +}; + +struct s_keyw { + const char *name; /* keyword */ + enum e_state state; /* parser state */ + int code; /* state value */ +}; + +/* Keywords understood by parser */ +static struct s_keyw keyw[] = { + {NT_("on"), s_none, 0}, + {NT_("at"), s_at, 0}, + {NT_("lastday"), s_ldom, 0}, + + {NT_("sun"), s_wday, 0}, + {NT_("mon"), s_wday, 1}, + {NT_("tue"), s_wday, 2}, + {NT_("wed"), s_wday, 3}, + {NT_("thu"), s_wday, 4}, + {NT_("fri"), s_wday, 5}, + {NT_("sat"), s_wday, 6}, + {NT_("jan"), s_month, 0}, + {NT_("feb"), s_month, 1}, + {NT_("mar"), s_month, 2}, + {NT_("apr"), s_month, 3}, + {NT_("may"), s_month, 4}, + {NT_("jun"), s_month, 5}, + {NT_("jul"), s_month, 6}, + {NT_("aug"), s_month, 7}, + {NT_("sep"), s_month, 8}, + {NT_("oct"), s_month, 9}, + {NT_("nov"), s_month, 10}, + {NT_("dec"), s_month, 11}, + + {NT_("sunday"), s_wday, 0}, + {NT_("monday"), s_wday, 1}, + {NT_("tuesday"), s_wday, 2}, + {NT_("wednesday"), s_wday, 3}, + {NT_("thursday"), s_wday, 4}, + {NT_("friday"), s_wday, 5}, + {NT_("saturday"), s_wday, 6}, + {NT_("january"), s_month, 0}, + {NT_("february"), s_month, 1}, + {NT_("march"), s_month, 2}, + {NT_("april"), s_month, 3}, + {NT_("june"), s_month, 5}, + {NT_("july"), s_month, 6}, + {NT_("august"), s_month, 7}, + {NT_("september"), s_month, 8}, + {NT_("october"), s_month, 9}, + {NT_("november"), s_month, 10}, + {NT_("december"), s_month, 11}, + + {NT_("daily"), s_daily, 0}, + {NT_("weekly"), s_weekly, 0}, + {NT_("monthly"), s_monthly, 0}, + {NT_("hourly"), s_hourly, 0}, + + {NT_("1st"), s_wom, 0}, + {NT_("2nd"), s_wom, 1}, + {NT_("3rd"), s_wom, 2}, + {NT_("4th"), s_wom, 3}, + {NT_("5th"), s_wom, 4}, + {NT_("6th"), s_wom, 5}, + + {NT_("first"), s_wom, 0}, + {NT_("second"), s_wom, 1}, + {NT_("third"), s_wom, 2}, + {NT_("fourth"), s_wom, 3}, + {NT_("fifth"), s_wom, 4}, + {NT_("sixth"), s_wom, 5}, + {NULL, s_none, 0} +}; + +static bool have_hour, have_mday, have_wday, have_month, have_wom; +static bool have_at, have_woy; +static RUN lrun; + +static void set_defaults() +{ + have_hour = have_mday = have_wday = have_month = have_wom = have_woy = false; + have_at = false; + set_bits(0, 23, lrun.hour); + set_bits(0, 30, lrun.mday); + set_bits(0, 6, lrun.wday); + set_bits(0, 11, lrun.month); + set_bits(0, 5, lrun.wom); + set_bits(0, 53, lrun.woy); +} + + +/* + * Keywords (RHS) permitted in Run records + * + * name token + */ +s_kw RunFields[] = { + {"Pool", 'P'}, + {"FullPool", 'f'}, + {"IncrementalPool", 'i'}, + {"DifferentialPool", 'd'}, + {"Level", 'L'}, + {"Storage", 'S'}, + {"Messages", 'M'}, + {"Priority", 'p'}, + {"SpoolData", 's'}, + {"writepartafterjob", 'W'}, + {"MaxRunSchedTime", 'm'}, + {"Accurate", 'a'}, + {"NextPool", 'N'}, + {NULL, 0} +}; + +/* + * Store Schedule Run information + * + * Parse Run statement: + * + * Run [on] 2 january at 23:45 + * + * Default Run time is daily at 0:0 + * + * There can be multiple run statements, they are simply chained + * together. + * + */ +void store_run(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int i, j; + bool found; + utime_t utime; + int token, state, state2 = 0, code = 0, code2 = 0; + int options = lc->options; + RUN **run = (RUN **)(item->value); + char *p; + RES *res; + + + lc->options |= LOPT_NO_IDENT; /* want only "strings" */ + + /* clear local copy of run record */ + memset(&lrun, 0, sizeof(RUN)); + + /* scan for Job level "full", "incremental", ... */ + for (found=true; found; ) { + found = false; + token = lex_get_token(lc, T_NAME); + for (i=0; !found && RunFields[i].name; i++) { + if (strcasecmp(lc->str, RunFields[i].name) == 0) { + found = true; + if (lex_get_token(lc, T_ALL) != T_EQUALS) { + scan_err1(lc, _("Expected an equals, got: %s"), lc->str); + /* NOT REACHED */ + } + switch (RunFields[i].token) { + case 's': /* Data spooling */ + token = lex_get_token(lc, T_NAME); + if (strcasecmp(lc->str, "yes") == 0 || strcasecmp(lc->str, "true") == 0) { + lrun.spool_data = true; + lrun.spool_data_set = true; + } else if (strcasecmp(lc->str, "no") == 0 || strcasecmp(lc->str, "false") == 0) { + lrun.spool_data = false; + lrun.spool_data_set = true; + } else { + scan_err1(lc, _("Expect a YES or NO, got: %s"), lc->str); + } + break; + case 'W': /* Write part after job */ + token = lex_get_token(lc, T_NAME); + if (strcasecmp(lc->str, "yes") == 0 || strcasecmp(lc->str, "true") == 0) { + lrun.write_part_after_job = true; + lrun.write_part_after_job_set = true; + } else if (strcasecmp(lc->str, "no") == 0 || strcasecmp(lc->str, "false") == 0) { + lrun.write_part_after_job = false; + lrun.write_part_after_job_set = true; + } else { + scan_err1(lc, _("Expect a YES or NO, got: %s"), lc->str); + } + break; + case 'L': /* level */ + token = lex_get_token(lc, T_NAME); + for (j=0; joblevels[j].level_name; j++) { + if (strcasecmp(lc->str, joblevels[j].level_name) == 0) { + lrun.level = joblevels[j].level; + lrun.job_type = joblevels[j].job_type; + lrun.level_set = true; + j = 0; + break; + } + } + if (j != 0) { + scan_err1(lc, _("Job level field: %s not found in run record"), lc->str); + /* NOT REACHED */ + } + break; + case 'p': /* Priority */ + token = lex_get_token(lc, T_PINT32); + if (pass == 2) { + lrun.Priority = lc->pint32_val; + lrun.priority_set = true; + } + break; + case 'P': /* Pool */ + case 'N': /* NextPool */ + case 'f': /* FullPool */ + case 'v': /* VFullPool */ + case 'i': /* IncPool */ + case 'd': /* DifPool */ + token = lex_get_token(lc, T_NAME); + if (pass == 2) { + res = GetResWithName(R_POOL, lc->str); + if (res == NULL) { + scan_err1(lc, _("Could not find specified Pool Resource: %s"), + lc->str); + /* NOT REACHED */ + } + switch(RunFields[i].token) { + case 'P': + lrun.pool = (POOL *)res; + break; + case 'N': + lrun.next_pool = (POOL *)res; + break; + case 'f': + lrun.full_pool = (POOL *)res; + break; + case 'v': + lrun.vfull_pool = (POOL *)res; + break; + case 'i': + lrun.inc_pool = (POOL *)res; + break; + case 'd': + lrun.diff_pool = (POOL *)res; + break; + } + } + break; + case 'S': /* storage */ + token = lex_get_token(lc, T_NAME); + if (pass == 2) { + res = GetResWithName(R_STORAGE, lc->str); + if (res == NULL) { + scan_err1(lc, _("Could not find specified Storage Resource: %s"), + lc->str); + /* NOT REACHED */ + } + lrun.storage = (STORE *)res; + } + break; + case 'M': /* messages */ + token = lex_get_token(lc, T_NAME); + if (pass == 2) { + res = GetResWithName(R_MSGS, lc->str); + if (res == NULL) { + scan_err1(lc, _("Could not find specified Messages Resource: %s"), + lc->str); + /* NOT REACHED */ + } + lrun.msgs = (MSGS *)res; + } + break; + case 'm': /* max run sched time */ + token = lex_get_token(lc, T_QUOTED_STRING); + if (!duration_to_utime(lc->str, &utime)) { + scan_err1(lc, _("expected a time period, got: %s"), lc->str); + return; + } + lrun.MaxRunSchedTime = utime; + lrun.MaxRunSchedTime_set = true; + break; + case 'a': /* accurate */ + token = lex_get_token(lc, T_NAME); + if (strcasecmp(lc->str, "yes") == 0 || strcasecmp(lc->str, "true") == 0) { + lrun.accurate = true; + lrun.accurate_set = true; + } else if (strcasecmp(lc->str, "no") == 0 || strcasecmp(lc->str, "false") == 0) { + lrun.accurate = false; + lrun.accurate_set = true; + } else { + scan_err1(lc, _("Expect a YES or NO, got: %s"), lc->str); + } + break; + default: + scan_err1(lc, _("Expected a keyword name, got: %s"), lc->str); + /* NOT REACHED */ + break; + } /* end switch */ + } /* end if strcasecmp */ + } /* end for RunFields */ + + /* At this point, it is not a keyword. Check for old syle + * Job Levels without keyword. This form is depreciated!!! + */ + if (!found) { + for (j=0; joblevels[j].level_name; j++) { + if (strcasecmp(lc->str, joblevels[j].level_name) == 0) { + lrun.level = joblevels[j].level; + lrun.job_type = joblevels[j].job_type; + found = true; + break; + } + } + } + } /* end for found */ + + + /* + * Scan schedule times. + * Default is: daily at 0:0 + */ + state = s_none; + set_defaults(); + + for ( ; token != T_EOL; (token = lex_get_token(lc, T_ALL))) { + int len; + bool pm = false; + bool am = false; + switch (token) { + case T_NUMBER: + state = s_mday; + code = atoi(lc->str) - 1; + if (code < 0 || code > 30) { + scan_err0(lc, _("Day number out of range (1-31)")); + } + break; + case T_NAME: /* this handles drop through from keyword */ + case T_UNQUOTED_STRING: + if (strchr(lc->str, (int)'-')) { + state = s_range; + break; + } + if (strchr(lc->str, (int)':')) { + state = s_time; + break; + } + if (lc->str_len == 3 && (lc->str[0] == 'w' || lc->str[0] == 'W') && + is_an_integer(lc->str+1)) { + code = atoi(lc->str+1); + if (code < 0 || code > 53) { + scan_err0(lc, _("Week number out of range (0-53)")); + /* NOT REACHED */ + } + state = s_woy; /* week of year */ + break; + } + /* everything else must be a keyword */ + for (i=0; keyw[i].name; i++) { + if (strcasecmp(lc->str, keyw[i].name) == 0) { + state = keyw[i].state; + code = keyw[i].code; + i = 0; + break; + } + } + if (i != 0) { + scan_err1(lc, _("Job type field: %s in run record not found"), lc->str); + /* NOT REACHED */ + } + break; + case T_COMMA: + continue; + default: + scan_err2(lc, _("Unexpected token: %d:%s"), token, lc->str); + /* NOT REACHED */ + break; + } + switch (state) { + case s_none: + continue; + case s_mday: /* day of month */ + if (!have_mday) { + clear_bits(0, 30, lrun.mday); + have_mday = true; + } + set_bit(code, lrun.mday); + break; + case s_month: /* month of year */ + if (!have_month) { + clear_bits(0, 11, lrun.month); + have_month = true; + } + set_bit(code, lrun.month); + break; + case s_wday: /* week day */ + if (!have_wday) { + clear_bits(0, 6, lrun.wday); + have_wday = true; + } + set_bit(code, lrun.wday); + break; + case s_wom: /* Week of month 1st, ... */ + if (!have_wom) { + clear_bits(0, 5, lrun.wom); + have_wom = true; + } + set_bit(code, lrun.wom); + break; + case s_woy: + if (!have_woy) { + clear_bits(0, 53, lrun.woy); + have_woy = true; + } + set_bit(code, lrun.woy); + break; + case s_time: /* time */ + if (!have_at) { + scan_err0(lc, _("Time must be preceded by keyword AT.")); + /* NOT REACHED */ + } + if (!have_hour) { + clear_bits(0, 23, lrun.hour); + } +// Dmsg1(000, "s_time=%s\n", lc->str); + p = strchr(lc->str, ':'); + if (!p) { + scan_err0(lc, _("Time logic error.\n")); + /* NOT REACHED */ + } + *p++ = 0; /* separate two halves */ + code = atoi(lc->str); /* pick up hour */ + code2 = atoi(p); /* pick up minutes */ + len = strlen(p); + if (len >= 2) { + p += 2; + } + if (strcasecmp(p, "pm") == 0) { + pm = true; + } else if (strcasecmp(p, "am") == 0) { + am = true; + } else if (len != 2) { + scan_err0(lc, _("Bad time specification.")); + /* NOT REACHED */ + } + /* + * Note, according to NIST, 12am and 12pm are ambiguous and + * can be defined to anything. However, 12:01am is the same + * as 00:01 and 12:01pm is the same as 12:01, so we define + * 12am as 00:00 and 12pm as 12:00. + */ + if (pm) { + /* Convert to 24 hour time */ + if (code != 12) { + code += 12; + } + /* am */ + } else if (am && code == 12) { + code -= 12; + } + if (code < 0 || code > 23 || code2 < 0 || code2 > 59) { + scan_err0(lc, _("Bad time specification.")); + /* NOT REACHED */ + } +// Dmsg2(000, "hour=%d min=%d\n", code, code2); + set_bit(code, lrun.hour); + lrun.minute = code2; + have_hour = true; + break; + case s_at: + have_at = true; + break; + case s_ldom: + if (!have_mday) { + clear_bits(0, 30, lrun.mday); + have_mday = true; + } + lrun.last_day_set = true; + set_bit(31, lrun.mday); /* day 32 => last day of month */ + break; + case s_range: + p = strchr(lc->str, '-'); + if (!p) { + scan_err0(lc, _("Range logic error.\n")); + } + *p++ = 0; /* separate two halves */ + + /* Check for day range */ + if (is_an_integer(lc->str) && is_an_integer(p)) { + code = atoi(lc->str) - 1; + code2 = atoi(p) - 1; + if (code < 0 || code > 30 || code2 < 0 || code2 > 30) { + scan_err0(lc, _("Bad day range specification.")); + } + if (!have_mday) { + clear_bits(0, 30, lrun.mday); + have_mday = true; + } + if (code < code2) { + set_bits(code, code2, lrun.mday); + } else { + set_bits(code, 30, lrun.mday); + set_bits(0, code2, lrun.mday); + } + break; + } + /* Check for week of year range */ + if (strlen(lc->str) == 3 && strlen(p) == 3 && + (lc->str[0] == 'w' || lc->str[0] == 'W') && + (p[0] == 'w' || p[0] == 'W') && + is_an_integer(lc->str+1) && is_an_integer(p+1)) { + code = atoi(lc->str+1); + code2 = atoi(p+1); + if (code < 0 || code > 53 || code2 < 0 || code2 > 53) { + scan_err0(lc, _("Week number out of range (0-53)")); + } + if (!have_woy) { + clear_bits(0, 53, lrun.woy); + have_woy = true; + } + if (code < code2) { + set_bits(code, code2, lrun.woy); + } else { + set_bits(code, 53, lrun.woy); + set_bits(0, code2, lrun.woy); + } + break; + } + /* lookup first half of keyword range (week days or months) */ + lcase(lc->str); + for (i=0; keyw[i].name; i++) { + if (strcasecmp(lc->str, keyw[i].name) == 0) { + state = keyw[i].state; + code = keyw[i].code; + i = 0; + break; + } + } + if (i != 0 || (state != s_month && state != s_wday && state != s_wom)) { + scan_err0(lc, _("Invalid month, week or position day range")); + /* NOT REACHED */ + } + + /* Lookup end of range */ + lcase(p); + for (i=0; keyw[i].name; i++) { + if (strcasecmp(p, keyw[i].name) == 0) { + state2 = keyw[i].state; + code2 = keyw[i].code; + i = 0; + break; + } + } + if (i != 0 || state != state2 || code == code2) { + scan_err0(lc, _("Invalid month, weekday or position range")); + /* NOT REACHED */ + } + if (state == s_wday) { + if (!have_wday) { + clear_bits(0, 6, lrun.wday); + have_wday = true; + } + if (code < code2) { + set_bits(code, code2, lrun.wday); + } else { + set_bits(code, 6, lrun.wday); + set_bits(0, code2, lrun.wday); + } + } else if (state == s_month) { + if (!have_month) { + clear_bits(0, 11, lrun.month); + have_month = true; + } + if (code < code2) { + set_bits(code, code2, lrun.month); + } else { + /* this is a bit odd, but we accept it anyway */ + set_bits(code, 11, lrun.month); + set_bits(0, code2, lrun.month); + } + } else { + /* Must be position */ + if (!have_wom) { + clear_bits(0, 5, lrun.wom); + have_wom = true; + } + if (code < code2) { + set_bits(code, code2, lrun.wom); + } else { + set_bits(code, 5, lrun.wom); + set_bits(0, code2, lrun.wom); + } + } + break; + case s_hourly: + have_hour = true; + set_bits(0, 23, lrun.hour); + break; + case s_weekly: + have_mday = have_wom = have_woy = true; + set_bits(0, 30, lrun.mday); + set_bits(0, 5, lrun.wom); + set_bits(0, 53, lrun.woy); + break; + case s_daily: + have_mday = true; + set_bits(0, 6, lrun.wday); + break; + case s_monthly: + have_month = true; + set_bits(0, 11, lrun.month); + break; + default: + scan_err0(lc, _("Unexpected run state\n")); + /* NOT REACHED */ + break; + } + } + + /* Allocate run record, copy new stuff into it, + * and append it to the list of run records + * in the schedule resource. + */ + if (pass == 2) { + RUN *tail; + + /* Create new run record */ + RUN *nrun = (RUN *)malloc(sizeof(RUN)); + memcpy(nrun, &lrun, sizeof(RUN)); + nrun ->next = NULL; + + if (!*run) { /* if empty list */ + *run = nrun; /* add new record */ + } else { + for (tail = *run; tail->next; tail=tail->next) + { } + tail->next = nrun; + } + } + + lc->options = options; /* restore scanner options */ + set_bit(index, res_all.res_sch.hdr.item_present); +} diff --git a/src/dird/scheduler.c b/src/dird/scheduler.c new file mode 100644 index 00000000..24ff916b --- /dev/null +++ b/src/dird/scheduler.c @@ -0,0 +1,455 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula scheduler + * It looks at what jobs are to be run and when + * and waits around until it is time to + * fire them up. + * + * Kern Sibbald, May MM, major revision December MMIII + * + */ + +#include "bacula.h" +#include "dird.h" + +#if 0 +#define SCHED_DEBUG +#define DBGLVL 0 +#else +#undef SCHED_DEBUG +#define DBGLVL DT_SCHEDULER|200 +#endif + +const int dbglvl = DBGLVL; + +/* Local variables */ +struct job_item { + RUN *run; + JOB *job; + time_t runtime; + int Priority; + dlink link; /* link for list */ +}; + +/* List of jobs to be run. They were scheduled in this hour or the next */ +static dlist *jobs_to_run; /* list of jobs to be run */ + +/* Time interval in secs to sleep if nothing to be run */ +static int const next_check_secs = 60; + +/* Forward referenced subroutines */ +static void find_runs(); +static void add_job(JOB *job, RUN *run, time_t now, time_t runtime); +static void dump_job(job_item *ji, const char *msg); + +/* Imported subroutines */ + +/* Imported variables */ + +/* + * called by reload_config to tell us that the schedules + * we may have based our next jobs to run queues have been + * invalidated. In fact the schedules may not have changed + * but the run object that we have recorded the last_run time + * on are new and no longer have a valid last_run time which + * causes us to double run schedules that get put into the list + * because run_nh = 1. + */ +static bool schedules_invalidated = false; +void invalidate_schedules(void) { + schedules_invalidated = true; +} + +/********************************************************************* + * + * Main Bacula Scheduler + * + */ +JCR *wait_for_next_job(char *one_shot_job_to_run) +{ + JCR *jcr; + JOB *job; + RUN *run; + time_t now, prev; + static bool first = true; + job_item *next_job = NULL; + + Dmsg0(dbglvl, "Enter wait_for_next_job\n"); + if (first) { + first = false; + /* Create scheduled jobs list */ + jobs_to_run = New(dlist(next_job, &next_job->link)); + if (one_shot_job_to_run) { /* one shot */ + job = (JOB *)GetResWithName(R_JOB, one_shot_job_to_run); + if (!job) { + Emsg1(M_ABORT, 0, _("Job %s not found\n"), one_shot_job_to_run); + } + Dmsg1(5, "Found one_shot_job_to_run %s\n", one_shot_job_to_run); + jcr = new_jcr(sizeof(JCR), dird_free_jcr); + set_jcr_defaults(jcr, job); + return jcr; + } + } + + /* Wait until we have something in the + * next hour or so. + */ +again: + while (jobs_to_run->empty()) { + find_runs(); + if (!jobs_to_run->empty()) { + break; + } + bmicrosleep(next_check_secs, 0); /* recheck once per minute */ + } + +#ifdef list_chain + job_item *je; + foreach_dlist(je, jobs_to_run) { + dump_job(je, _("Walk queue")); + } +#endif + /* + * Pull the first job to run (already sorted by runtime and + * Priority, then wait around until it is time to run it. + */ + next_job = (job_item *)jobs_to_run->first(); + jobs_to_run->remove(next_job); + + dump_job(next_job, _("Dequeued job")); + + if (!next_job) { /* we really should have something now */ + Emsg0(M_ABORT, 0, _("Scheduler logic error\n")); + } + + /* Now wait for the time to run the job */ + for (;;) { + time_t twait; + /** discard scheduled queue and rebuild with new schedule objects. **/ + lock_jobs(); + if (schedules_invalidated) { + dump_job(next_job, "Invalidated job"); + free(next_job); + while (!jobs_to_run->empty()) { + next_job = (job_item *)jobs_to_run->first(); + jobs_to_run->remove(next_job); + dump_job(next_job, "Invalidated job"); + free(next_job); + } + schedules_invalidated = false; + unlock_jobs(); + goto again; + } + unlock_jobs(); + prev = now = time(NULL); + twait = next_job->runtime - now; + if (twait <= 0) { /* time to run it */ + break; + } + /* Recheck at least once per minute */ + bmicrosleep((next_check_secs < twait)?next_check_secs:twait, 0); + /* Attempt to handle clock shift (but not daylight savings time changes) + * we allow a skew of 10 seconds before invalidating everything. + */ + now = time(NULL); + if (now < prev-10 || now > (prev+next_check_secs+10)) { + schedules_invalidated = true; + } + } + jcr = new_jcr(sizeof(JCR), dird_free_jcr); + run = next_job->run; /* pick up needed values */ + job = next_job->job; + if (job->is_enabled() && (!job->client || job->client->is_enabled())) { + dump_job(next_job, _("Run job")); /* no client and job enabled */ + } + free(next_job); + if (!job->is_enabled() || (job->client && !job->client->is_enabled())) { + free_jcr(jcr); + goto again; /* ignore this job */ + } + run->last_run = now; /* mark as run now */ + + ASSERT(job); + set_jcr_defaults(jcr, job); + if (run->level) { + jcr->setJobLevel(run->level); /* override run level */ + } + if (run->pool) { + jcr->pool = run->pool; /* override pool */ + jcr->run_pool_override = true; + } + if (run->next_pool) { + jcr->next_pool = run->next_pool; /* override next pool */ + jcr->run_next_pool_override = true; + } + if (run->full_pool) { + jcr->full_pool = run->full_pool; /* override full pool */ + jcr->run_full_pool_override = true; + } + if (run->vfull_pool) { + jcr->vfull_pool = run->vfull_pool; /* override virtual full pool */ + jcr->run_vfull_pool_override = true; + } + if (run->inc_pool) { + jcr->inc_pool = run->inc_pool; /* override inc pool */ + jcr->run_inc_pool_override = true; + } + if (run->diff_pool) { + jcr->diff_pool = run->diff_pool; /* override dif pool */ + jcr->run_diff_pool_override = true; + } + if (run->storage) { + USTORE store; + store.store = run->storage; + pm_strcpy(store.store_source, _("run override")); + set_rwstorage(jcr, &store); /* override storage */ + } + if (run->msgs) { + jcr->messages = run->msgs; /* override messages */ + } + if (run->Priority) { + jcr->JobPriority = run->Priority; + } + if (run->spool_data_set) { + jcr->spool_data = run->spool_data; + } + if (run->accurate_set) { /* overwrite accurate mode */ + jcr->accurate = run->accurate; + } + if (run->write_part_after_job_set) { + jcr->write_part_after_job = run->write_part_after_job; + } + if (run->MaxRunSchedTime_set) { + jcr->MaxRunSchedTime = run->MaxRunSchedTime; + } + Dmsg0(dbglvl, "Leave wait_for_next_job()\n"); + return jcr; +} + + +/* + * Shutdown the scheduler + */ +void term_scheduler() +{ + if (jobs_to_run) { + delete jobs_to_run; + } +} + +/* + * Find all jobs to be run this hour and the next hour. + */ +static void find_runs() +{ + time_t now, next_hour, runtime; + RUN *run; + JOB *job; + SCHED *sched; + struct tm tm; + int hour, mday, wday, month, wom, woy, ldom; + /* Items corresponding to above at the next hour */ + int nh_hour, nh_mday, nh_wday, nh_month, nh_wom, nh_woy, nh_ldom; + + Dmsg0(dbglvl, "enter find_runs()\n"); + + /* compute values for time now */ + now = time(NULL); + (void)localtime_r(&now, &tm); + hour = tm.tm_hour; + mday = tm.tm_mday - 1; + wday = tm.tm_wday; + month = tm.tm_mon; + wom = mday / 7; + woy = tm_woy(now); /* get week of year */ + ldom = tm_ldom(month, tm.tm_year + 1900); + + Dmsg7(dbglvl, "now = %x: h=%d m=%d md=%d wd=%d wom=%d woy=%d\n", + now, hour, month, mday, wday, wom, woy); + + /* + * Compute values for next hour from now. + * We do this to be sure we don't miss a job while + * sleeping. + */ + next_hour = now + 3600; + (void)localtime_r(&next_hour, &tm); + nh_hour = tm.tm_hour; + nh_mday = tm.tm_mday - 1; + nh_wday = tm.tm_wday; + nh_month = tm.tm_mon; + nh_wom = nh_mday / 7; + nh_woy = tm_woy(next_hour); /* get week of year */ + nh_ldom = tm_ldom(nh_month, tm.tm_year + 1900); + + Dmsg7(dbglvl, "nh = %x: h=%d m=%d md=%d wd=%d wom=%d woy=%d\n", + next_hour, nh_hour, nh_month, nh_mday, nh_wday, nh_wom, nh_woy); + + /* Loop through all jobs */ + LockRes(); + foreach_res(job, R_JOB) { + sched = job->schedule; + if (!sched || !job->is_enabled() || (sched && !sched->is_enabled()) || + (job->client && !job->client->is_enabled())) { + continue; /* no, skip this job */ + } + Dmsg1(dbglvl, "Got job: %s\n", job->hdr.name); + for (run=sched->run; run; run=run->next) { + bool run_now, run_nh; + /* + * Find runs scheduled between now and the next hour. + */ +#ifdef xxxx + Dmsg0(000, "\n"); + Dmsg7(000, "run h=%d m=%d md=%d wd=%d wom=%d woy=%d ldom=%d\n", + hour, month, mday, wday, wom, woy, ldom); + Dmsg7(000, "bitset bsh=%d bsm=%d bsmd=%d bswd=%d bswom=%d bswoy=%d bsldom=%d\n", + bit_is_set(hour, run->hour), + bit_is_set(month, run->month), + bit_is_set(mday, run->mday), + bit_is_set(wday, run->wday), + bit_is_set(wom, run->wom), + bit_is_set(woy, run->woy), + bit_is_set(31, run->mday)); + + + Dmsg7(000, "nh_run h=%d m=%d md=%d wd=%d wom=%d woy=%d ldom=%d\n", + nh_hour, nh_month, nh_mday, nh_wday, nh_wom, nh_woy, nh_ldom); + Dmsg7(000, "nh_bitset bsh=%d bsm=%d bsmd=%d bswd=%d bswom=%d bswoy=%d bsldom=%d\n", + bit_is_set(nh_hour, run->hour), + bit_is_set(nh_month, run->month), + bit_is_set(nh_mday, run->mday), + bit_is_set(nh_wday, run->wday), + bit_is_set(nh_wom, run->wom), + bit_is_set(nh_woy, run->woy), + bit_is_set(31, run->mday)); +#endif + + run_now = bit_is_set(hour, run->hour) && + ((bit_is_set(mday, run->mday) && + bit_is_set(wday, run->wday) && + bit_is_set(month, run->month) && + bit_is_set(wom, run->wom) && + bit_is_set(woy, run->woy)) || + (bit_is_set(month, run->month) && + bit_is_set(31, run->mday) && mday == ldom)); + + run_nh = bit_is_set(nh_hour, run->hour) && + ((bit_is_set(nh_mday, run->mday) && + bit_is_set(nh_wday, run->wday) && + bit_is_set(nh_month, run->month) && + bit_is_set(nh_wom, run->wom) && + bit_is_set(nh_woy, run->woy)) || + (bit_is_set(nh_month, run->month) && + bit_is_set(31, run->mday) && nh_mday == nh_ldom)); + + Dmsg3(dbglvl, "run@%p: run_now=%d run_nh=%d\n", run, run_now, run_nh); + + if (run_now || run_nh) { + /* find time (time_t) job is to be run */ + (void)localtime_r(&now, &tm); /* reset tm structure */ + tm.tm_min = run->minute; /* set run minute */ + tm.tm_sec = 0; /* zero secs */ + runtime = mktime(&tm); + if (run_now) { + add_job(job, run, now, runtime); + } + /* If job is to be run in the next hour schedule it */ + if (run_nh) { + add_job(job, run, now, runtime + 3600); + } + } + } + } + UnlockRes(); + Dmsg0(dbglvl, "Leave find_runs()\n"); +} + +static void add_job(JOB *job, RUN *run, time_t now, time_t runtime) +{ + job_item *ji; + bool inserted = false; + /* + * Don't run any job that ran less than a minute ago, but + * do run any job scheduled less than a minute ago. + */ + if (((runtime - run->last_run) < 61) || ((runtime+59) < now)) { +#ifdef SCHED_DEBUG + Dmsg4(000, "Drop: Job=\"%s\" run=%lld. last_run=%lld. now=%lld\n", job->hdr.name, + (utime_t)runtime, (utime_t)run->last_run, (utime_t)now); + fflush(stdout); +#endif + return; + } +#ifdef SCHED_DEBUG + Dmsg4(000, "Add: Job=\"%s\" run=%lld last_run=%lld now=%lld\n", job->hdr.name, + (utime_t)runtime, (utime_t)run->last_run, (utime_t)now); +#endif + /* accept to run this job */ + job_item *je = (job_item *)malloc(sizeof(job_item)); + je->run = run; + je->job = job; + je->runtime = runtime; + if (run->Priority) { + je->Priority = run->Priority; + } else { + je->Priority = job->Priority; + } + + /* Add this job to the wait queue in runtime, priority sorted order */ + foreach_dlist(ji, jobs_to_run) { + if (ji->runtime > je->runtime || + (ji->runtime == je->runtime && ji->Priority > je->Priority)) { + jobs_to_run->insert_before(je, ji); + dump_job(je, _("Inserted job")); + inserted = true; + break; + } + } + /* If place not found in queue, append it */ + if (!inserted) { + jobs_to_run->append(je); + dump_job(je, _("Appended job")); + } +#ifdef SCHED_DEBUG + foreach_dlist(ji, jobs_to_run) { + dump_job(ji, _("Run queue")); + } + Dmsg0(000, "End run queue\n"); +#endif +} + +static void dump_job(job_item *ji, const char *msg) +{ +#ifdef SCHED_DEBUG + char dt[MAX_TIME_LENGTH]; + int64_t save_debug = debug_level; + + if (!chk_dbglvl(dbglvl)) { + return; + } + bstrftime_nc(dt, sizeof(dt), ji->runtime); + Dmsg4(dbglvl, "%s: Job=%s priority=%d run %s\n", msg, ji->job->hdr.name, + ji->Priority, dt); + fflush(stdout); + debug_level = save_debug; +#endif +} diff --git a/src/dird/snapshot.c b/src/dird/snapshot.c new file mode 100644 index 00000000..5aa7a769 --- /dev/null +++ b/src/dird/snapshot.c @@ -0,0 +1,766 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bacula.h" +#include "dird.h" + +static char CreateSnap[] = "CatReq Job=%127s new_snapshot name=%127s volume=%s device=%s tdate=%d type=%127s retention=%50s"; +static char ListSnap[] = "CatReq Job=%127s list_snapshot name=%127s volume=%s device=%s tdate=%d type=%127s before=%50s after=%50s"; +static char DelSnap[] = "CatReq Job=%127s del_snapshot name=%127s device=%s"; +static char snapretentioncmd[] = "snapshot retention=%s\n"; + + +static void send_list(void *ctx, const char *msg) +{ + BSOCK *bs = (BSOCK *)ctx; + bs->fsend("%s", msg); +} + + +/* Scan command line for common snapshot arguments */ +static void snapshot_scan_cmdline(UAContext *ua, int start, SNAPSHOT_DBR *snapdbr) +{ + for (int j=start; jargc; j++) { + if (strcasecmp(ua->argk[j], NT_("device")) == 0 && ua->argv[j]) { + snapdbr->Device = bstrdup(ua->argv[j]); + snapdbr->need_to_free = true; + + } else if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) { + snapdbr->JobId = str_to_int64(ua->argv[j]); + + } else if (strcasecmp(ua->argk[j], NT_("type")) == 0 && ua->argv[j]) { + bstrncpy(snapdbr->Type, ua->argv[j], sizeof(snapdbr->Type)); + + } else if (strcasecmp(ua->argk[j], NT_("client")) == 0 && ua->argv[j]) { + bstrncpy(snapdbr->Client, ua->argv[j], sizeof(snapdbr->Client)); + + } else if (strcasecmp(ua->argk[j], NT_("snapshotid")) == 0 && ua->argv[j]) { + snapdbr->SnapshotId = str_to_int64(ua->argv[j]); + + } else if (strcasecmp(ua->argk[j], NT_("snapshot")) == 0 && ua->argv[j]) { + bstrncpy(snapdbr->Name, ua->argv[j], sizeof(snapdbr->Name)); + + } else if (strcasecmp(ua->argk[j], NT_("volume")) == 0 && ua->argv[j]) { + snapdbr->Volume = bstrdup(ua->argv[j]); + snapdbr->need_to_free = true; + + } else if (strcasecmp(ua->argk[j], NT_("createdate")) == 0 && ua->argv[j]) { + bstrncpy(snapdbr->CreateDate, ua->argv[j], sizeof(snapdbr->CreateDate)); + snapdbr->CreateTDate = str_to_utime(ua->argv[j]); + + } else if (strcasecmp(ua->argk[j], NT_("createtdate")) == 0 && ua->argv[j]) { + snapdbr->CreateTDate = str_to_uint64(ua->argv[j]); + bstrutime(snapdbr->CreateDate, sizeof(snapdbr->CreateDate), snapdbr->CreateTDate); + + } else if (strcasecmp(ua->argk[j], NT_("name")) == 0 && ua->argv[j]) { + bstrncpy(snapdbr->Name, ua->argv[j], sizeof(snapdbr->Name)); + + } else if (strcasecmp(ua->argk[j], NT_("size")) == 0 && ua->argv[j]) { + snapdbr->Size = str_to_uint64(ua->argv[j]); + + } else if (strcasecmp(ua->argk[j], NT_("status")) == 0 && ua->argv[j]) { + snapdbr->status = str_to_uint64(ua->argv[j]); + + } else if (strcasecmp(ua->argk[j], NT_("error")) == 0 && ua->argv[j]) { + snapdbr->errmsg = bstrdup(ua->argv[j]); + unbash_spaces(snapdbr->errmsg); + snapdbr->need_to_free = true; + + } else { + continue; + } + } +} + +/* Get a snapshot record, and check that the current UA can access to the Client/FileSet */ +static int get_snapshot_record(UAContext *ua, SNAPSHOT_DBR *snapdbr) +{ + if (!open_client_db(ua)) { + Dmsg0(10, "Unable to open database\n"); + return 0; + } + if (!db_get_snapshot_record(ua->jcr, ua->db, snapdbr)) { + Dmsg0(10, "Unable to get snapshot record\n"); + return 0; + } + /* Need to check if the client is authorized */ + if (!acl_access_client_ok(ua, snapdbr->Client, JT_BACKUP_RESTORE)) { + Dmsg0(10, "Client access denied\n"); + return 0; + } + if (snapdbr->FileSetId && !acl_access_ok(ua, FileSet_ACL, snapdbr->FileSet)) { + Dmsg0(10, "Fileset access denied\n"); + return 0; + } + return 1; +} + +static int check_response(UAContext *ua, BSOCK *sd, const char *resp, const char *cmd) +{ + if (sd->errors) { + return 0; + } + if (bget_msg(sd) > 0) { + unbash_spaces(sd->msg); + if (strcmp(sd->msg, resp) == 0) { + return 1; + } + } + if (sd->is_error()) { + ua->error_msg(_("Comm error with SD. bad response to %s. ERR=%s\n"), + cmd, sd->bstrerror()); + } else { + ua->error_msg(_("Bad response from SD to %s command. Wanted %s, got %s len=%ld\n"), + cmd, resp, sd->msg, sd->msglen); + } + return 0; +} + +bool send_snapshot_retention(JCR *jcr, utime_t val) +{ + BSOCK *fd = jcr->file_bsock; + char ed1[50]; + if (val > 0 && jcr->FDVersion >= 13) { + fd->fsend(snapretentioncmd, edit_uint64(val, ed1)); + if (!response(jcr, fd, (char*)"2000 Snapshot retention\n", "set Snapshot Retention", DISPLAY_ERROR)) { + jcr->snapshot_retention = 0; /* can't set snapshot retention */ + return false; + } + } + return true; +} + +/* Called from delete_cmd() in ua_cmd.c */ +int delete_snapshot(UAContext *ua) +{ + POOL_MEM buf; + POOLMEM *out; + SNAPSHOT_DBR snapdbr; + CLIENT *client; + BSOCK *fd; + + if (!open_new_client_db(ua)) { + return 1; + } + + /* If the client or the fileset are not authorized, + * the function will fail. + */ + if (!select_snapshot_dbr(ua, &snapdbr)) { + ua->error_msg(_("Snapshot not found\n")); + snapdbr.debug(0); + return 0; + } + + client = (CLIENT *)GetResWithName(R_CLIENT, snapdbr.Client); + if (!client) { + ua->error_msg(_("Client resource not found\n")); + return 0; + } + + /* Connect to File daemon */ + ua->jcr->client = client; + + /* Try to connect for 15 seconds */ + ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + client->name(), client->address(buf.addr()), client->FDport); + if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Client.\n")); + ua->jcr->client = NULL; + return 0; + } + + fd = ua->jcr->file_bsock; + out = get_pool_memory(PM_FNAME); + fd->fsend("snapshot del %s\n", snapdbr.as_arg(&out)); + free_pool_memory(out); + + /* If the snapshot is not found, still delete ours */ + if (check_response(ua, fd, "2000 Snapshot deleted ERR=\n", "Snapshot")) { + ua->send_msg(_("Snapshot \"%s\" deleted from client %s\n"), snapdbr.Name, + snapdbr.Client); + } + + ua->jcr->file_bsock->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + ua->jcr->client = NULL; + + db_delete_snapshot_record(ua->jcr, ua->db, &snapdbr); + ua->send_msg(_("Snapshot \"%s\" deleted from catalog\n"), snapdbr.Name); + return 1; +} + +/* Called from menu, if snap_list is valid, the snapshot + * list will be stored in this list. (not_owned_by_alist) + */ +int list_snapshot(UAContext *ua, alist *snap_list) +{ + POOL_MEM tmp; + SNAPSHOT_DBR snap; + POOLMEM *buf; + CLIENT *client; + BSOCK *fd; + + client = select_client_resource(ua, JT_BACKUP_RESTORE); + if (!client) { + return 0; + } + + /* Connect to File daemon */ + ua->jcr->client = client; + + /* Try to connect for 15 seconds */ + ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + client->name(), client->address(tmp.addr()), client->FDport); + + if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Client.\n")); + return 0; + } + + fd = ua->jcr->file_bsock; + + /* The command line can have filters */ + snapshot_scan_cmdline(ua, 0, &snap); + buf = get_pool_memory(PM_FNAME); + + fd->fsend("snapshot list %s\n", snap.as_arg(&buf)); + while (fd->recv() >= 0) { + if (snap_list) { + SNAPSHOT_DBR *snapr = new SNAPSHOT_DBR(); + parse_args(fd->msg, &ua->args, &ua->argc, ua->argk, ua->argv, MAX_CMD_ARGS); + snapshot_scan_cmdline(ua, 0, snapr); + bstrncpy(snapr->Client, client->name(), sizeof(snapr->Client)); + snap_list->append(snapr); + snapr->debug(0); + } else { + ua->send_msg("%s", fd->msg); + } + } + + /* Reset the UA arg list */ + parse_args(ua->cmd, &ua->args, &ua->argc, ua->argk, ua->argv, MAX_CMD_ARGS); + + ua->jcr->file_bsock->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + ua->jcr->client = NULL; + free_pool_memory(buf); + return 1; +} + +static void storeit(void *ctx, const char *msg) +{ + char ed1[51]; + alist *lst = (alist *)ctx; + if (sscanf(msg, "snapshotid=%50s", ed1) == 1) { + lst->append((void *)(intptr_t) str_to_int64(ed1)); + } +} + +int prune_snapshot(UAContext *ua) +{ + /* First, we get the snapshot list that can be pruned */ + CLIENT *client = NULL; + BSOCK *fd = NULL; + POOLMEM *buf = NULL; + POOL_MEM tmp; + SNAPSHOT_DBR snapdbr; + alist *lst; + intptr_t id; + + snapshot_scan_cmdline(ua, 0, &snapdbr); + snapdbr.expired = true; + if (!open_client_db(ua)) { + Dmsg0(10, "Unable to open database\n"); + return 0; + } + + buf = get_pool_memory(PM_FNAME); + lst = New(alist(10, not_owned_by_alist)); + db_list_snapshot_records(ua->jcr, ua->db, &snapdbr, storeit, lst, ARG_LIST); + foreach_alist(id, lst) { + snapdbr.reset(); + snapdbr.SnapshotId = id; + if (get_snapshot_record(ua, &snapdbr)) { + + ua->send_msg(_("Snapshot \"%s\" on Client %s\n"), snapdbr.Name, snapdbr.Client); + if (!confirm_retention_yesno(ua, snapdbr.Retention, "Snapshot")) { + continue; + } + + if (client && strcmp(client->hdr.name, snapdbr.Client) != 0) { + ua->jcr->file_bsock->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + ua->jcr->client = NULL; + client = NULL; + } + + if (!client) { + client = (CLIENT *)GetResWithName(R_CLIENT, snapdbr.Client); + if (!client) { + continue; + } + + /* Connect to File daemon */ + ua->jcr->client = client; + + /* Try to connect for 15 seconds */ + ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + client->name(), client->address(tmp.addr()), client->FDport); + if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Client.\n")); + free_bsock(ua->jcr->file_bsock); + ua->jcr->client = NULL; + client = NULL; + continue; + } + + fd = ua->jcr->file_bsock; + } + + fd->fsend("snapshot del %s\n", snapdbr.as_arg(&buf)); + + fd->recv(); + if (strncmp(fd->msg, "2000", 4) == 0) { + ua->send_msg("Snapshot %s deleted\n", snapdbr.Volume); + db_delete_snapshot_record(ua->jcr, ua->db, &snapdbr); + } else { + unbash_spaces(fd->msg); + ua->send_msg("%s", fd->msg); + } + } + } + + if (ua->jcr->file_bsock) { + ua->jcr->file_bsock->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + ua->jcr->client = NULL; + } + + free_pool_memory(buf); + delete lst; + return 1; +} + + +/* Called from the FD, in catreq.c */ +int snapshot_catreq(JCR *jcr, BSOCK *bs) +{ + SNAPSHOT_DBR snapdbr; + char Job[MAX_NAME_LENGTH], ed1[50]; + POOLMEM *vol = get_memory(bs->msglen); + POOLMEM *dev = get_memory(bs->msglen); + POOLMEM *err = get_pool_memory(PM_MESSAGE); + int n, ret = 1, expired; + *vol = *dev = 0; + + Dmsg1(DT_SNAPSHOT|10, "Get snapshot catalog request %s\n", bs->msg); + + /* We need to create a snapshot record in the catalog */ + n = sscanf(bs->msg, CreateSnap, Job, snapdbr.Name, vol, dev, + &snapdbr.CreateTDate, snapdbr.Type, ed1); + if (n == 7) { + snapdbr.Volume = vol; + snapdbr.Device = dev; + snapdbr.JobId = jcr->JobId; + unbash_spaces(snapdbr.Name); + unbash_spaces(snapdbr.Volume); + unbash_spaces(snapdbr.Device); + snapdbr.Retention = str_to_uint64(ed1); + bstrftimes(snapdbr.CreateDate, sizeof(snapdbr.CreateDate), snapdbr.CreateTDate); + unbash_spaces(snapdbr.Type); + bstrncpy(snapdbr.Client, jcr->client->hdr.name, sizeof(snapdbr.Client)); + bstrncpy(snapdbr.FileSet, (jcr->fileset)?jcr->fileset->hdr.name:"", sizeof(snapdbr.FileSet)); + + Dmsg1(DT_SNAPSHOT|10, "Creating snapshot %s\n", snapdbr.Name); + snapdbr.debug(20); + + /* We lock the db before to keep the error message */ + db_lock(jcr->db); + ret = db_create_snapshot_record(jcr, jcr->db, &snapdbr); + pm_strcpy(err, jcr->db->errmsg); + db_unlock(jcr->db); + + if (ret) { + bs->fsend("1000 Snapshot created\n"); + + } else { + bs->fsend("1999 Snapshot not created ERR=%s\n", err); + } + goto bail_out; + } + + n = sscanf(bs->msg, ListSnap, Job, snapdbr.Name, vol, dev, &snapdbr.CreateTDate, snapdbr.Type, + snapdbr.created_before, snapdbr.created_after, &expired); + if (n == 8) { + snapdbr.Volume = vol; + snapdbr.Device = dev; + unbash_spaces(snapdbr.Name); + unbash_spaces(snapdbr.Volume); + unbash_spaces(snapdbr.Device); + bstrftimes(snapdbr.CreateDate, sizeof(snapdbr.CreateDate), snapdbr.CreateTDate); + unbash_spaces(snapdbr.Type); + unbash_spaces(snapdbr.created_before); + unbash_spaces(snapdbr.created_after); + bstrncpy(snapdbr.Client, jcr->client->hdr.name, sizeof(snapdbr.Client)); + snapdbr.expired = (expired != 0); + Dmsg0(DT_SNAPSHOT|10, "List snapshots\n"); + snapdbr.debug(20); + db_list_snapshot_records(jcr, jcr->db, &snapdbr, send_list, bs, ARG_LIST); + bs->signal(BNET_EOD); + goto bail_out; + } + + n = sscanf(bs->msg, DelSnap, Job, snapdbr.Name, dev); + if (n == 3) { + snapdbr.Device = dev; + unbash_spaces(snapdbr.Name); + unbash_spaces(snapdbr.Device); + bstrncpy(snapdbr.Client, jcr->client->hdr.name, sizeof(snapdbr.Client)); + Dmsg2(DT_SNAPSHOT|10, "Delete snapshot %s from %s\n", snapdbr.Name, snapdbr.Client); + snapdbr.debug(20); + + /* We lock the db before to keep the error message */ + db_lock(jcr->db); + ret = db_delete_snapshot_record(jcr, jcr->db, &snapdbr); + pm_strcpy(err, jcr->db->errmsg); + db_unlock(jcr->db); + + if (ret) { + bs->fsend("1000 Snapshot deleted\n"); + + } else { + bs->fsend("1999 Snapshot not deleted ERR=%s\n", err); + } + goto bail_out; + } + ret = 0; + +bail_out: + free_pool_memory(vol); + free_pool_memory(dev); + free_pool_memory(err); + return ret; +} + +/* List snapshots, allow to use some parameters from the command line */ +void snapshot_list(UAContext *ua, int i, DB_LIST_HANDLER *sendit, e_list_type llist) +{ + SNAPSHOT_DBR snapdbr; + snapshot_scan_cmdline(ua, i, &snapdbr); + if (open_new_client_db(ua)) { + db_list_snapshot_records(ua->jcr, ua->db, &snapdbr, sendit, ua, llist); + } +} + +static int list_client_snapshot(UAContext *ua, bool sync) +{ + SNAPSHOT_DBR *s, stemp; + alist *lst; +// char ed1[50]; + + if (sync) { + if (!open_new_client_db(ua)) { + return 1; + } + } + + lst = New(alist(10, not_owned_by_alist)); + if (list_snapshot(ua, lst)) { + foreach_alist(s, lst) { + ua->send_msg(_( + "Snapshot %s:\n" + " Volume: %s\n" + " Device: %s\n" + " CreateDate: %s\n" +// " Size: %sB\n", + " Type: %s\n" + " Status: %s\n" + " Error: %s\n"), + s->Name, NPRT(s->Volume), NPRT(s->Device), + s->CreateDate, +// edit_uint64_with_suffix(s->Size, ed1), + s->Type, s->status?_("OK"):_("Error"), s->errmsg); + if (sync && s->Device && *s->Name) { + stemp.reset(); + stemp.Device = s->Device; + bstrncpy(stemp.Name, s->Name, sizeof(stemp.Name)); + if (!db_get_snapshot_record(ua->jcr, ua->db, &stemp)) { + if (db_create_snapshot_record(ua->jcr, ua->db, s)) { + ua->send_msg(_("Snapshot added in Catalog\n")); + } + } + } + } + if (lst->size() == 0) { + ua->send_msg(_("No snapshot found\n")); + } + } + /* Cleanup the list */ + foreach_alist (s, lst) { + delete s; + } + delete lst; + return 1; +} + +/* Ask client to create/prune/delete a snapshot via the command line */ +int snapshot_cmd(UAContext *ua, const char *cmd) +{ + SNAPSHOT_DBR snapdbr; + for (int i=0; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("purge")) == 0) { + + } else if (strcasecmp(ua->argk[i], NT_("prune")) == 0) { + return prune_snapshot(ua); + + } else if (strcasecmp(ua->argk[i], NT_("listclient")) == 0) { + return list_client_snapshot(ua, false); + + } else if (strcasecmp(ua->argk[i], NT_("list")) == 0) { + snapshot_list(ua, 0, prtit, HORZ_LIST); + return 1; + + } else if (strcasecmp(ua->argk[i], NT_("create")) == 0) { + /* We need a job definition, or a client */ + + } else if (strcasecmp(ua->argk[i], NT_("delete")) == 0) { + return delete_snapshot(ua); + + } else if (strcasecmp(ua->argk[i], NT_("status")) == 0) { + + } else if (strcasecmp(ua->argk[i], NT_("sync")) == 0) { + return list_client_snapshot(ua, true); + + } else if (strcasecmp(ua->argk[i], NT_("update")) == 0) { + return update_snapshot(ua); + + } else { + continue; + } + } + + for ( ;; ) { + + start_prompt(ua, _("Snapshot choice: \n")); + add_prompt(ua, _("List snapshots in Catalog")); + add_prompt(ua, _("List snapshots on Client")); + add_prompt(ua, _("Prune snapshots")); + add_prompt(ua, _("Delete snapshot")); + add_prompt(ua, _("Update snapshot parameters")); + add_prompt(ua, _("Update catalog with Client snapshots")); + add_prompt(ua, _("Done")); + + switch(do_prompt(ua, "", _("Select action to perform on Snapshot Engine"), NULL, 0)) { + case 0: /* list catalog */ + snapshot_list(ua, 0, prtit, HORZ_LIST); + break; + case 1: /* list client */ + list_client_snapshot(ua, false); + break; + case 2: /* prune */ + prune_snapshot(ua); + break; + case 3: /* delete */ + delete_snapshot(ua); + break; + case 4: /* update snapshot */ + update_snapshot(ua); + break; + case 5: /* sync snapshot */ + list_client_snapshot(ua, true); + break; + case 6: /* done */ + default: + ua->info_msg(_("Selection terminated.\n")); + return 1; + } + } + return 1; +} + +/* Select a Snapshot record from the database, might be in ua_select.c */ +int select_snapshot_dbr(UAContext *ua, SNAPSHOT_DBR *sr) +{ + int ret = 0; + char *p; + POOLMEM *err = get_pool_memory(PM_FNAME); + *err=0; + + sr->reset(); + snapshot_scan_cmdline(ua, 0, sr); + + if (sr->SnapshotId == 0 && (sr->Name[0] == 0 || sr->Client[0] == 0)) { + CLIENT_DBR cr; + memset(&cr, 0, sizeof(cr)); + /* Get the pool from client= */ + if (!get_client_dbr(ua, &cr, JT_BACKUP_RESTORE)) { + goto bail_out; + } + sr->ClientId = cr.ClientId; + db_list_snapshot_records(ua->jcr, ua->db, sr, prtit, ua, HORZ_LIST); + if (!get_cmd(ua, _("Enter a SnapshotId: "))) { + goto bail_out; + } + p = ua->cmd; + if (*p == '*') { + p++; + } + if (is_a_number(p)) { + sr->SnapshotId = str_to_int64(p); + } else { + goto bail_out; + } + } + + if (!get_snapshot_record(ua, sr)) { + ua->error_msg(_("Unable to get Snapshot record.\n")); + goto bail_out; + } + + ret = 1; + +bail_out: + if (!ret && *err) { + ua->error_msg("%s", err); + } + free_pool_memory(err); + return ret; +} + +/* This part should be in ua_update.c */ +static void update_snapretention(UAContext *ua, char *val, SNAPSHOT_DBR *sr) +{ + char ed1[150]; + POOL_MEM tmp(PM_MESSAGE); + bool ret; + if (!duration_to_utime(val, &sr->Retention)) { + ua->error_msg(_("Invalid retention period specified: %s\n"), val); + return; + } + + db_lock(ua->db); + if (!(ret = db_update_snapshot_record(ua->jcr, ua->db, sr))) { + pm_strcpy(tmp, db_strerror(ua->db)); + } + db_unlock(ua->db); + + if (!ret) { + ua->error_msg("%s", tmp.c_str()); + + } else { + ua->info_msg(_("New retention period is: %s\n"), + edit_utime(sr->Retention, ed1, sizeof(ed1))); + } +} + +/* This part should be in ua_update.c */ +static void update_snapcomment(UAContext *ua, char *val, SNAPSHOT_DBR *sr) +{ + POOL_MEM tmp(PM_MESSAGE); + bool ret; + + bstrncpy(sr->Comment, val, sizeof(sr->Comment)); + + db_lock(ua->db); + if (!(ret = db_update_snapshot_record(ua->jcr, ua->db, sr))) { + pm_strcpy(tmp, db_strerror(ua->db)); + } + db_unlock(ua->db); + + if (!ret) { + ua->error_msg("%s", tmp.c_str()); + + } else { + ua->info_msg(_("New Comment is: %s\n"), sr->Comment); + } +} + +/* This part should be in ua_update.c */ +bool update_snapshot(UAContext *ua) +{ + SNAPSHOT_DBR sr; + POOL_MEM ret; + char ed1[130]; + bool done = false; + int i; + const char *kw[] = { + NT_("Retention"), /* 0 */ + NT_("Comment"), /* 1 */ + NULL }; + + for (i=0; kw[i]; i++) { + int j; + if ((j=find_arg_with_value(ua, kw[i])) > 0) { + /* If all from pool don't select a media record */ + if (!select_snapshot_dbr(ua, &sr)) { + return 0; + } + switch (i) { + case 0: + update_snapretention(ua, ua->argv[j], &sr); + break; + case 1: + update_snapcomment(ua, ua->argv[j], &sr); + break; + default: + break; + } + done = true; + } + } + + for ( ; !done; ) { + start_prompt(ua, _("Parameters to modify:\n")); + add_prompt(ua, _("Snapshot Retention Period")); /* 0 */ + add_prompt(ua, _("Snapshot Comment")); /* 1 */ + add_prompt(ua, _("Done")); /* 2 */ + i = do_prompt(ua, "", _("Select parameter to modify"), NULL, 0); + if (i == 2) { + return 0; + } + + if (!select_snapshot_dbr(ua, &sr)) { /* Get Snapshot record */ + return 0; + } + ua->info_msg(_("Updating Snapshot \"%s\" on \"%s\"\n"), sr.Name, sr.Client); + + switch (i) { + case 0: /* Snapshot retention */ + ua->info_msg(_("Current retention period is: %s\n"), + edit_utime(sr.Retention, ed1, sizeof(ed1))); + if (!get_cmd(ua, _("Enter Snapshot Retention period: "))) { + return 0; + } + update_snapretention(ua, ua->cmd, &sr); + break; + case 1: + ua->info_msg(_("Current comment is: %s\n"), NPRTB(sr.Comment)); + if (!get_cmd(ua, _("Enter Snapshot comment: "))) { + return 0; + } + update_snapcomment(ua, ua->cmd, &sr); + break; + default: /* Done or error */ + ua->info_msg(_("Selection terminated.\n")); + return 1; + } + } + return 1; +} diff --git a/src/dird/ua.h b/src/dird/ua.h new file mode 100644 index 00000000..6e93966c --- /dev/null +++ b/src/dird/ua.h @@ -0,0 +1,145 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Includes specific to the Director User Agent Server + * + * Kern Sibbald, August MMI + */ + +#ifndef __UA_H_ +#define __UA_H_ 1 + +class UAContext { +public: + BSOCK *UA_sock; + BSOCK *sd; + JCR *jcr; + BDB *db; /* Pointing to shared or private db */ + BDB *shared_db; /* Main Bacula DB access */ + BDB *private_db; /* Private DB access */ + CAT *catalog; + CONRES *cons; /* console resource */ + POOLMEM *cmd; /* return command/name buffer */ + POOLMEM *args; /* command line arguments */ + POOLMEM *errmsg; /* store error message */ + char *argk[MAX_CMD_ARGS]; /* argument keywords */ + char *argv[MAX_CMD_ARGS]; /* argument values */ + int argc; /* number of arguments */ + char **prompt; /* list of prompts */ + char **unique; /* extra unique field */ + int max_prompts; /* max size of list */ + int num_prompts; /* current number in list */ + char api_opts[MAX_NAME_LENGTH]; /* Api options */ + int api; /* For programs want an API */ + int cmd_index; /* Index in command table */ + bool force_mult_db_connections; /* overwrite cat.mult_db_connections */ + bool auto_display_messages; /* if set, display messages */ + bool user_notified_msg_pending; /* set when user notified */ + bool automount; /* if set, mount after label */ + bool quit; /* if set, quit */ + bool verbose; /* set for normal UA verbosity */ + bool batch; /* set for non-interactive mode */ + bool gui; /* set if talking to GUI program */ + bool runscript; /* set if we are in runscript */ + uint32_t pint32_val; /* positive integer */ + int32_t int32_val; /* positive/negative */ + int64_t int64_val; /* big int */ + + void *bvfs; /* used in some bvfs queries */ + + void signal(int sig) { UA_sock->signal(sig); }; + + /* The below are in ua_output.c */ + void send_msg(const char *fmt, ...); + void error_msg(const char *fmt, ...); + void warning_msg(const char *fmt, ...); + void info_msg(const char *fmt, ...); +}; + +/* Context for insert_tree_handler() */ +struct TREE_CTX { + TREE_ROOT *root; /* root */ + TREE_NODE *node; /* current node */ + TREE_NODE *avail_node; /* unused node last insert */ + int cnt; /* count for user feedback */ + bool all; /* if set mark all as default */ + bool hardlinks_in_mem; /* Set to optimize for speed */ + bool no_auto_parent; /* Set to not select parent directories */ + UAContext *ua; + uint32_t FileEstimate; /* estimate of number of files */ + uint32_t FileCount; /* current count of files */ + uint32_t LastCount; /* last count of files */ + uint32_t DeltaCount; /* trigger for printing */ + alist *uid_acl; /* UID allowed in the tree */ + alist *gid_acl; /* GID allowed in the tree */ + alist *dir_acl; /* Directories that can be displayed */ + char *last_dir_acl; /* Last directory from the DirectoryACL list */ +}; + +struct NAME_LIST { + char **name; /* list of names */ + int num_ids; /* ids stored */ + int max_ids; /* size of array */ + int num_del; /* number deleted */ + int tot_ids; /* total to process */ +}; + + +/* Main structure for obtaining JobIds or Files to be restored */ +struct RESTORE_CTX { + utime_t JobTDate; + uint32_t TotalFiles; + JobId_t JobId; + char ClientName[MAX_NAME_LENGTH]; /* backup client */ + char RestoreClientName[MAX_NAME_LENGTH]; /* restore client */ + char RestoreMediaType[MAX_NAME_LENGTH]; /* restore Media type when storage override */ + char last_jobid[20]; + POOLMEM *JobIds; /* User entered string of JobIds */ + POOLMEM *BaseJobIds; /* Base jobids */ + STORE *store; + JOB *restore_job; + POOL *pool; + int restore_jobs; + uint32_t selected_files; + char *comment; + char *where; + char *RegexWhere; + char *replace; + char *fileregex; + + char *when; + rblist *bsr_list; + POOLMEM *fname; /* filename only */ + POOLMEM *path; /* path only */ + POOLMEM *query; + int fnl; /* filename length */ + int pnl; /* path length */ + bool found; + bool all; /* mark all as default */ + bool hardlinks_in_mem; /* keep hard links in memory */ + bool fdcalled; /* True if we should reuse the FD socket */ + bool no_auto_parent; /* Select or not parent directories */ + NAME_LIST name_list; + POOLMEM *component_fname; + FILE *component_fd; +}; + +#define MAX_ID_LIST_LEN 2000000 + +#endif diff --git a/src/dird/ua_acl.c b/src/dird/ua_acl.c new file mode 100644 index 00000000..5396b1ad --- /dev/null +++ b/src/dird/ua_acl.c @@ -0,0 +1,119 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- User Agent Access Control List (ACL) handling + * + * Kern Sibbald, January MMIV + */ + +#include "bacula.h" +#include "dird.h" + +/* + * Check if access is permitted to item in acl + */ +bool acl_access_ok(UAContext *ua, int acl, const char *item) +{ + return acl_access_ok(ua, acl, item, strlen(item)); +} + +bool acl_access_client_ok(UAContext *ua, const char *name, int32_t jobtype) +{ + if (acl_access_ok(ua, Client_ACL, name)) { + return true; + } + if (jobtype == JT_BACKUP && acl_access_ok(ua, BackupClient_ACL, name)) { + return true; + } + if (jobtype == JT_RESTORE && acl_access_ok(ua, RestoreClient_ACL, name)) { + return true; + } + /* Some commands such as "status client" are for both Backup and Restore */ + if (jobtype == JT_BACKUP_RESTORE && + (acl_access_ok(ua, RestoreClient_ACL, name) || + acl_access_ok(ua, BackupClient_ACL, name))) + { + return true; + } + return false; +} + + + +/* This version expects the length of the item which we must check. */ +bool acl_access_ok(UAContext *ua, int acl, const char *item, int len) +{ + /* The resource name contains nasty characters */ + if (acl != Where_ACL && !is_name_valid(item, NULL)) { + Dmsg1(1400, "Access denied for item=%s\n", item); + return false; + } + + /* If no console resource => default console and all is permitted */ + if (!ua || !ua->cons) { + Dmsg0(1400, "Root cons access OK.\n"); + return true; /* No cons resource -> root console OK for everything */ + } + + alist *list = ua->cons->ACL_lists[acl]; + if (!list) { /* empty list */ + if (len == 0 && acl == Where_ACL) { + return true; /* Empty list for Where => empty where */ + } + return false; /* List empty, reject everything */ + } + + /* Special case *all* gives full access */ + if (list->size() == 1 && strcasecmp("*all*", (char *)list->get(0)) == 0) { + return true; + } + + /* Search list for item */ + for (int i=0; isize(); i++) { + if (strcasecmp(item, (char *)list->get(i)) == 0) { + Dmsg3(1400, "ACL found %s in %d %s\n", item, acl, (char *)list->get(i)); + return true; + } + } + return false; +} + +/* + * Return true if we have a restriction on the ACL + * false if there is no ACL restriction + */ +bool have_restricted_acl(UAContext *ua, int acl) +{ + alist *list; + + /* If no console resource => default console and all is permitted */ + if (!ua || !ua->cons) { + return false; /* no restrictions */ + } + + list = ua->cons->ACL_lists[acl]; + if (!list) { + return false; + } + /* Special case *all* gives full access */ + if (list->size() == 1 && strcasecmp("*all*", (char *)list->get(0)) == 0) { + return false; + } + return list->size() > 0; +} diff --git a/src/dird/ua_cmds.c b/src/dird/ua_cmds.c new file mode 100644 index 00000000..10712262 --- /dev/null +++ b/src/dird/ua_cmds.c @@ -0,0 +1,2678 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- User Agent Commands + * + * Kern Sibbald, September MM + */ + +#include "bacula.h" +#include "dird.h" + +/* Imported subroutines */ + +/* Imported variables */ +extern jobq_t job_queue; /* job queue */ + + +/* Imported functions */ +extern int autodisplay_cmd(UAContext *ua, const char *cmd); +extern int gui_cmd(UAContext *ua, const char *cmd); +extern int label_cmd(UAContext *ua, const char *cmd); +extern int list_cmd(UAContext *ua, const char *cmd); +extern int llist_cmd(UAContext *ua, const char *cmd); +extern int messagescmd(UAContext *ua, const char *cmd); +extern int prunecmd(UAContext *ua, const char *cmd); +extern int purge_cmd(UAContext *ua, const char *cmd); +extern int truncate_cmd(UAContext *ua, const char *cmd); /* in ua_purge.c */ +extern int query_cmd(UAContext *ua, const char *cmd); +extern int relabel_cmd(UAContext *ua, const char *cmd); +extern int restore_cmd(UAContext *ua, const char *cmd); +extern int retentioncmd(UAContext *ua, const char *cmd); +extern int show_cmd(UAContext *ua, const char *cmd); +extern int sqlquery_cmd(UAContext *ua, const char *cmd); +extern int status_cmd(UAContext *ua, const char *cmd); +extern int update_cmd(UAContext *ua, const char *cmd); + +/* Forward referenced functions */ +static int add_cmd(UAContext *ua, const char *cmd); +static int automount_cmd(UAContext *ua, const char *cmd); +static int cancel_cmd(UAContext *ua, const char *cmd); +static int create_cmd(UAContext *ua, const char *cmd); +static int delete_cmd(UAContext *ua, const char *cmd); +static int disable_cmd(UAContext *ua, const char *cmd); +static int enable_cmd(UAContext *ua, const char *cmd); +static int estimate_cmd(UAContext *ua, const char *cmd); +static int help_cmd(UAContext *ua, const char *cmd); +static int memory_cmd(UAContext *ua, const char *cmd); +static int mount_cmd(UAContext *ua, const char *cmd); +static int release_cmd(UAContext *ua, const char *cmd); +static int reload_cmd(UAContext *ua, const char *cmd); +static int setdebug_cmd(UAContext *ua, const char *cmd); +static int setbwlimit_cmd(UAContext *ua, const char *cmd); +static int setip_cmd(UAContext *ua, const char *cmd); +static int time_cmd(UAContext *ua, const char *cmd); +static int trace_cmd(UAContext *ua, const char *cmd); +static int unmount_cmd(UAContext *ua, const char *cmd); +static int use_cmd(UAContext *ua, const char *cmd); +static int cloud_cmd(UAContext *ua, const char *cmd); +static int var_cmd(UAContext *ua, const char *cmd); +static int version_cmd(UAContext *ua, const char *cmd); +static int wait_cmd(UAContext *ua, const char *cmd); + +static void do_job_delete(UAContext *ua, JobId_t JobId); +static int delete_volume(UAContext *ua); +static int delete_pool(UAContext *ua); +static void delete_job(UAContext *ua); +static int delete_client(UAContext *ua); +static void do_storage_cmd(UAContext *ua, const char *command); + +int qhelp_cmd(UAContext *ua, const char *cmd); +int quit_cmd(UAContext *ua, const char *cmd); + +/* not all in alphabetical order. New commands are added after existing commands with similar letters + to prevent breakage of existing user scripts. */ +struct cmdstruct { + const char *key; /* command */ + int (*func)(UAContext *ua, const char *cmd); /* handler */ + const char *help; /* main purpose */ + const char *usage; /* all arguments to build usage */ + const bool use_in_rs; /* Can use it in Console RunScript */ +}; +static struct cmdstruct commands[] = { /* Can use it in Console RunScript*/ + { NT_("add"), add_cmd, _("Add media to a pool"), NT_("pool= storage= jobid="), false}, + { NT_("autodisplay"), autodisplay_cmd,_("Autodisplay console messages"), NT_("on | off"), false}, + { NT_("automount"), automount_cmd, _("Automount after label"), NT_("on | off"), false}, + { NT_("cancel"), cancel_cmd, _("Cancel a job"), NT_("jobid= | job= | ujobid= | inactive client= storage= | all"), false}, + { NT_("cloud"), cloud_cmd, _("Specific Cloud commands"), + NT_("[storage=] [volume=] [pool=] [allpools] [allfrompool] [mediatype=] [drive=] [slots="), false}, + { NT_("delete"), delete_cmd, _("Delete volume, pool, client or job"), NT_("volume= | pool= | jobid= | client= | snapshot"), true}, + { NT_("disable"), disable_cmd, _("Disable a job, attributes batch process"), NT_("job= | client= | schedule= | storage= | batch"), true}, + { NT_("enable"), enable_cmd, _("Enable a job, attributes batch process"), NT_("job= | client= | schedule= | storage= | batch"), true}, + { NT_("estimate"), estimate_cmd, _("Performs FileSet estimate, listing gives full listing"), + NT_("fileset= client= level= accurate= job= listing"), true}, + + { NT_("exit"), quit_cmd, _("Terminate Bconsole session"), NT_(""), false}, + { NT_("gui"), gui_cmd, _("Non-interactive gui mode"), NT_("on | off"), false}, + { NT_("help"), help_cmd, _("Print help on specific command"), + NT_("add autodisplay automount cancel create delete disable\n\tenable estimate exit gui label list llist" + "\n\tmessages memory mount prune purge quit query\n\trestore relabel release reload run status" + "\n\tsetbandwidth setdebug setip show sqlquery time trace unmount\n\tumount update use var version wait" + "\n\tsnapshot"), false}, + + { NT_("label"), label_cmd, _("Label a tape"), NT_("storage= volume= pool= slot= drive= barcodes"), false}, + { NT_("list"), list_cmd, _("List objects from catalog"), + NT_("jobs [client=] [jobid=] [ujobid=] [job=] [joberrors] [jobstatus=] [level=] [jobtype=] [limit=]|\n" + "\tjobtotals | pools | volume | media | files [type=] jobid= | copies jobid= |\n" + "\tjoblog jobid= | pluginrestoreconf jobid= restoreobjectid= | snapshot | \n" + "\tfileindex= | clients\n" + ), false}, + + { NT_("llist"), llist_cmd, _("Full or long list like list command"), + NT_("jobs [client=] [jobid=] [ujobid=] [job=] [joberrors] [jobstatus=] [level=] [jobtype=] [order=] [limit=]|\n" + "\tjobtotals | pools | volume | media | files jobid= | copies jobid= |\n" + "\tjoblog jobid= | pluginrestoreconf jobid= restoreobjectid= | snapshot |\n" + "\tjobid= fileindex= | clients\n"), false}, + + { NT_("messages"), messagescmd, _("Display pending messages"), NT_(""), false}, + { NT_("memory"), memory_cmd, _("Print current memory usage"), NT_(""), true}, + { NT_("mount"), mount_cmd, _("Mount storage"), + NT_("storage= slot= drive= [ device= ] [ jobid= | job= ]"), false}, + + { NT_("prune"), prunecmd, _("Prune expired records from catalog"), + NT_("files | jobs | pool= | snapshot [client=] | client= | [ expired ] volume= "), true}, + + { NT_("purge"), purge_cmd, _("Purge records from catalog"), NT_("files jobs volume= [mediatype= pool= allpools storage= drive=]"), true}, + { NT_("quit"), quit_cmd, _("Terminate Bconsole session"), NT_(""), false}, + { NT_("query"), query_cmd, _("Query catalog"), NT_("[]"), false}, + { NT_("restore"), restore_cmd, _("Restore files"), + NT_("where= client= storage= bootstrap= " + "restorejob= restoreclient= noautoparent" + "\n\tcomment= jobid= jobuser= jobgroup= copies done select all"), false}, + + { NT_("relabel"), relabel_cmd, _("Relabel a tape"), + NT_("storage= oldvolume=\n\tvolume= pool="), false}, + + { NT_("release"), release_cmd, _("Release storage"), NT_("storage= [ device= ] "), false}, + { NT_("reload"), reload_cmd, _("Reload conf file"), NT_(""), true}, + { NT_("run"), run_cmd, _("Run a job"), + NT_("job= client=\n\tfileset= level=\n\tstorage=" + " where=\n\twhen= pool=\n\t" + " nextpool= comment= accurate= spooldata= yes"), false}, + + { NT_("restart"), restart_cmd, _("Restart a job"), + NT_("incomplete job= client=\n\tfileset= level=\n\tstorage=" + "when=\n\tcomment= spooldata= jobid="), false}, + + { NT_("resume"), restart_cmd, _("Resume a job"), + NT_("incomplete job= client=\n\tfileset= level=\n\tstorage=" + "when=\n\tcomment= spooldata= jobid="), false}, + + { NT_("status"), status_cmd, _("Report status"), + NT_("all | network [bytes=] | dir= | director | client= |\n" + "\tstorage= slots |\n" + "\tschedule [job=] [client=] [schedule=] [days=] [limit=]\n" + "\t\t[time=]"), true}, + + { NT_("stop"), cancel_cmd, _("Stop a job"), NT_("jobid= job= ujobid= all"), false}, + { NT_("setdebug"), setdebug_cmd, _("Sets debug level"), + NT_("level= tags= trace=0/1 options=<0tTc> tags= | client= | dir | storage= | all"), true}, + + { NT_("setbandwidth"), setbwlimit_cmd, _("Sets bandwidth"), + NT_("limit= client= jobid= job= ujobid="), true}, + + { NT_("snapshot"), snapshot_cmd, _("Handle snapshots"), + NT_("[client= | job= | jobid=] [delete | list | listclient | prune | sync | update]"), true}, + + { NT_("setip"), setip_cmd, _("Sets new client address -- if authorized"), NT_(""), false}, + { NT_("show"), show_cmd, _("Show resource records"), + NT_("job= | pool= | fileset= | schedule= | client= | storage= | disabled | all"), true}, + + { NT_("sqlquery"), sqlquery_cmd, _("Use SQL to query catalog"), NT_(""), false}, + { NT_("time"), time_cmd, _("Print current time"), NT_(""), true}, + { NT_("trace"), trace_cmd, _("Turn on/off trace to file"), NT_("on | off"), true}, + { NT_("truncate"), truncate_cmd, _("Truncate one or more Volumes"), NT_("volume= [mediatype= pool= allpools storage= drive=]"), true}, + { NT_("unmount"), unmount_cmd, _("Unmount storage"), + NT_("storage= [ drive= ] | jobid= | job="), false}, + + { NT_("umount"), unmount_cmd, _("Umount - for old-time Unix guys, see unmount"), + NT_("storage= [ drive= ] [ device= ]| jobid= | job="), false}, + + { NT_("update"), update_cmd, _("Update volume, pool or stats"), + NT_("stats\n\tsnapshot\n\tpool=\n\tslots storage= scan" + "\n\tvolume= volstatus= volretention= cacheretention=" + "\n\t pool= recycle= slot=\n\t inchanger=" + "\n\t maxvolbytes= maxvolfiles= maxvoljobs=" + "\n\t enabled= recyclepool= actiononpurge=" + "\n\t allfrompool= fromallpools frompool"),true}, + { NT_("use"), use_cmd, _("Use catalog xxx"), NT_("catalog="), false}, + { NT_("var"), var_cmd, _("Does variable expansion"), NT_(""), false}, + { NT_("version"), version_cmd, _("Print Director version"), NT_(""), true}, + { NT_("wait"), wait_cmd, _("Wait until no jobs are running"), + NT_("jobname= | jobid= | ujobid="), false} +}; + +#define comsize ((int)(sizeof(commands)/sizeof(struct cmdstruct))) + +const char *get_command(int index) { + return commands[index].key; +} + +/* + * Execute a command from the UA + */ +bool do_a_command(UAContext *ua) +{ + int i; + int len; + bool ok = false; + bool found = false; + + Dmsg1(900, "Command: %s\n", ua->argk[0]); + if (ua->argc == 0) { + return false; + } + + if (ua->jcr->wstorage) { + while (ua->jcr->wstorage->size()) { + ua->jcr->wstorage->remove(0); + } + } + + len = strlen(ua->argk[0]); + for (i=0; iargk[0], commands[i].key, len) == 0) { + ua->cmd_index = i; + /* Check if command permitted, but "quit" is always OK */ + if (strcmp(ua->argk[0], NT_("quit")) != 0 && + !acl_access_ok(ua, Command_ACL, ua->argk[0], len)) { + break; + } + /* Check if this command is authorized in RunScript */ + if (ua->runscript && !commands[i].use_in_rs) { + ua->error_msg(_("Can't use %s command in a runscript"), ua->argk[0]); + break; + } + if (ua->api) ua->signal(BNET_CMD_BEGIN); + ok = (*commands[i].func)(ua, ua->cmd); /* go execute command */ + if (ua->api) ua->signal(ok?BNET_CMD_OK:BNET_CMD_FAILED); + found = ua->UA_sock && ua->UA_sock->is_stop() ? false : true; + break; + } + } + if (!found) { + ua->error_msg(_("%s: is an invalid command.\n"), ua->argk[0]); + ok = false; + } + return ok; +} + +/* + * This is a common routine used to stuff the Pool DB record defaults + * into the Media DB record just before creating a media (Volume) + * record. + */ +void set_pool_dbr_defaults_in_media_dbr(MEDIA_DBR *mr, POOL_DBR *pr) +{ + mr->PoolId = pr->PoolId; + bstrncpy(mr->VolStatus, NT_("Append"), sizeof(mr->VolStatus)); + mr->Recycle = pr->Recycle; + mr->VolRetention = pr->VolRetention; + mr->CacheRetention = pr->CacheRetention; + mr->VolUseDuration = pr->VolUseDuration; + mr->ActionOnPurge = pr->ActionOnPurge; + mr->RecyclePoolId = pr->RecyclePoolId; + mr->MaxVolJobs = pr->MaxVolJobs; + mr->MaxVolFiles = pr->MaxVolFiles; + mr->MaxVolBytes = pr->MaxVolBytes; + mr->LabelType = pr->LabelType; + mr->Enabled = 1; +} + + +/* + * Add Volumes to an existing Pool + */ +static int add_cmd(UAContext *ua, const char *cmd) +{ + POOL_DBR pr; + MEDIA_DBR mr; + int num, i, max, startnum; + char name[MAX_NAME_LENGTH]; + STORE *store; + int Slot = 0, InChanger = 0; + + ua->send_msg(_( +"You probably don't want to be using this command since it\n" +"creates database records without labeling the Volumes.\n" +"You probably want to use the \"label\" command.\n\n")); + + if (!open_client_db(ua)) { + return 1; + } + + memset(&pr, 0, sizeof(pr)); + + if (!get_pool_dbr(ua, &pr)) { + return 1; + } + + Dmsg4(120, "id=%d Num=%d Max=%d type=%s\n", pr.PoolId, pr.NumVols, + pr.MaxVols, pr.PoolType); + + while (pr.MaxVols > 0 && pr.NumVols >= pr.MaxVols) { + ua->warning_msg(_("Pool already has maximum volumes=%d\n"), pr.MaxVols); + if (!get_pint(ua, _("Enter new maximum (zero for unlimited): "))) { + return 1; + } + pr.MaxVols = ua->pint32_val; + } + + /* Get media type */ + if ((store = get_storage_resource(ua, false/*no default*/)) != NULL) { + bstrncpy(mr.MediaType, store->media_type, sizeof(mr.MediaType)); + } else if (!get_media_type(ua, mr.MediaType, sizeof(mr.MediaType))) { + return 1; + } + + if (pr.MaxVols == 0) { + max = 1000; + } else { + max = pr.MaxVols - pr.NumVols; + } + for (;;) { + char buf[100]; + bsnprintf(buf, sizeof(buf), _("Enter number of Volumes to create. 0=>fixed name. Max=%d: "), max); + if (!get_pint(ua, buf)) { + return 1; + } + num = ua->pint32_val; + if (num < 0 || num > max) { + ua->warning_msg(_("The number must be between 0 and %d\n"), max); + continue; + } + break; + } + + for (;;) { + if (num == 0) { + if (!get_cmd(ua, _("Enter Volume name: "))) { + return 1; + } + } else { + if (!get_cmd(ua, _("Enter base volume name: "))) { + return 1; + } + } + /* Don't allow | in Volume name because it is the volume separator character */ + if (!is_volume_name_legal(ua, ua->cmd)) { + continue; + } + if (strlen(ua->cmd) >= MAX_NAME_LENGTH-10) { + ua->warning_msg(_("Volume name too long.\n")); + continue; + } + if (strlen(ua->cmd) == 0) { + ua->warning_msg(_("Volume name must be at least one character long.\n")); + continue; + } + break; + } + + bstrncpy(name, ua->cmd, sizeof(name)); + if (num > 0) { + bstrncat(name, "%04d", sizeof(name)); + + for (;;) { + if (!get_pint(ua, _("Enter the starting number: "))) { + return 1; + } + startnum = ua->pint32_val; + if (startnum < 1) { + ua->warning_msg(_("Start number must be greater than zero.\n")); + continue; + } + break; + } + } else { + startnum = 1; + num = 1; + } + + if (store && store->autochanger) { + if (!get_pint(ua, _("Enter slot (0 for none): "))) { + return 1; + } + Slot = ua->pint32_val; + if (!get_yesno(ua, _("InChanger? yes/no: "))) { + return 1; + } + InChanger = ua->pint32_val; + } + + set_pool_dbr_defaults_in_media_dbr(&mr, &pr); + for (i=startnum; i < num+startnum; i++) { + bsnprintf(mr.VolumeName, sizeof(mr.VolumeName), name, i); + mr.Slot = Slot++; + mr.InChanger = InChanger; + mr.Enabled = 1; + set_storageid_in_mr(store, &mr); + Dmsg1(200, "Create Volume %s\n", mr.VolumeName); + if (!db_create_media_record(ua->jcr, ua->db, &mr)) { + ua->error_msg("%s", db_strerror(ua->db)); + return 1; + } +// if (i == startnum) { +// first_id = mr.PoolId; +// } + } + pr.NumVols += num; + Dmsg0(200, "Update pool record.\n"); + if (db_update_pool_record(ua->jcr, ua->db, &pr) != 1) { + ua->warning_msg("%s", db_strerror(ua->db)); + return 1; + } + ua->send_msg(_("%d Volumes created in pool %s\n"), num, pr.Name); + + return 1; +} + +/* + * Turn auto mount on/off + * + * automount on + * automount off + */ +int automount_cmd(UAContext *ua, const char *cmd) +{ + char *onoff; + + if (ua->argc != 2) { + if (!get_cmd(ua, _("Turn on or off? "))) { + return 1; + } + onoff = ua->cmd; + } else { + onoff = ua->argk[1]; + } + + ua->automount = (strcasecmp(onoff, NT_("off")) == 0) ? 0 : 1; + return 1; +} + +/* + * Cancel/Stop a job -- Stop marks it as Incomplete + * so that it can be restarted. + */ +static int cancel_cmd(UAContext *ua, const char *cmd) +{ + JCR *jcr; + bool ret = true; + int nb; + bool cancel = strcasecmp(commands[ua->cmd_index].key, "cancel") == 0; + alist *jcrs = New(alist(5, not_owned_by_alist)); + + /* If the user explicitely ask, we can send the cancel command to + * the FD. + */ + if (find_arg(ua, "inactive") > 0) { + ret = cancel_inactive_job(ua); + goto bail_out; + } + + nb = select_running_jobs(ua, jcrs, commands[ua->cmd_index].key); + + foreach_alist(jcr, jcrs) { + /* Execute the cancel command only if we don't have an error */ + if (nb != -1) { + ret &= cancel_job(ua, jcr, 60, cancel); + } + free_jcr(jcr); + } + +bail_out: + delete jcrs; + return ret; +} + +/* + * This is a common routine to create or update a + * Pool DB base record from a Pool Resource. We handle + * the setting of MaxVols and NumVols slightly differently + * depending on if we are creating the Pool or we are + * simply bringing it into agreement with the resource (updage). + * + * Caution : RecyclePoolId isn't setup in this function. + * You can use set_pooldbr_recyclepoolid(); + * + */ +void set_pooldbr_from_poolres(POOL_DBR *pr, POOL *pool, e_pool_op op) +{ + bstrncpy(pr->PoolType, pool->pool_type, sizeof(pr->PoolType)); + if (op == POOL_OP_CREATE) { + pr->MaxVols = pool->max_volumes; + pr->NumVols = 0; + } else { /* update pool */ + if (pr->MaxVols != pool->max_volumes) { + pr->MaxVols = pool->max_volumes; + } + if (pr->MaxVols != 0 && pr->MaxVols < pr->NumVols) { + pr->MaxVols = pr->NumVols; + } + } + pr->LabelType = pool->LabelType; + pr->UseOnce = pool->use_volume_once; + pr->UseCatalog = pool->use_catalog; + pr->Recycle = pool->Recycle; + pr->VolRetention = pool->VolRetention; + pr->CacheRetention = pool->CacheRetention; + pr->VolUseDuration = pool->VolUseDuration; + pr->MaxVolJobs = pool->MaxVolJobs; + pr->MaxVolFiles = pool->MaxVolFiles; + pr->MaxVolBytes = pool->MaxVolBytes; + pr->AutoPrune = pool->AutoPrune; + pr->ActionOnPurge = pool->action_on_purge; + pr->Recycle = pool->Recycle; + if (pool->label_format) { + bstrncpy(pr->LabelFormat, pool->label_format, sizeof(pr->LabelFormat)); + } else { + bstrncpy(pr->LabelFormat, "*", sizeof(pr->LabelFormat)); /* none */ + } +} + +/* set/update Pool.RecyclePoolId and Pool.ScratchPoolId in Catalog */ +int update_pool_references(JCR *jcr, BDB *db, POOL *pool) +{ + POOL_DBR pr; + + if (pool->ScratchPool == pool) { + Jmsg(NULL, M_WARNING, 0, + _("The ScratchPool directive for Pool \"%s\" is incorrect. Using default Scratch pool instead.\n"), + pool->name()); + pool->ScratchPool = NULL; + } + + if (!pool->RecyclePool && !pool->ScratchPool) { + return 1; + } + + memset(&pr, 0, sizeof(POOL_DBR)); + bstrncpy(pr.Name, pool->name(), sizeof(pr.Name)); + + /* Don't compute NumVols here */ + if (!db_get_pool_record(jcr, db, &pr)) { + return -1; /* not exists in database */ + } + + set_pooldbr_from_poolres(&pr, pool, POOL_OP_UPDATE); + + if (!set_pooldbr_references(jcr, db, &pr, pool)) { + return -1; /* error */ + } + + /* NumVols is updated here */ + if (!db_update_pool_record(jcr, db, &pr)) { + return -1; /* error */ + } + return 1; +} + +/* set POOL_DBR.RecyclePoolId and POOL_DBR.ScratchPoolId from Pool resource + * works with set_pooldbr_from_poolres + */ +bool set_pooldbr_references(JCR *jcr, BDB *db, POOL_DBR *pr, POOL *pool) +{ + POOL_DBR rpool; + bool ret = true; + + if (pool->RecyclePool) { + memset(&rpool, 0, sizeof(POOL_DBR)); + + bstrncpy(rpool.Name, pool->RecyclePool->name(), sizeof(rpool.Name)); + if (db_get_pool_record(jcr, db, &rpool)) { + pr->RecyclePoolId = rpool.PoolId; + } else { + Jmsg(jcr, M_WARNING, 0, + _("Can't set %s RecyclePool to %s, %s is not in database.\n" \ + "Try to update it with 'update pool=%s'\n"), + pool->name(), rpool.Name, rpool.Name,pool->name()); + + ret = false; + } + } else { /* no RecyclePool used, set it to 0 */ + pr->RecyclePoolId = 0; + } + + if (pool->ScratchPool) { + memset(&rpool, 0, sizeof(POOL_DBR)); + + bstrncpy(rpool.Name, pool->ScratchPool->name(), sizeof(rpool.Name)); + if (db_get_pool_record(jcr, db, &rpool)) { + pr->ScratchPoolId = rpool.PoolId; + } else { + Jmsg(jcr, M_WARNING, 0, + _("Can't set %s ScratchPool to %s, %s is not in database.\n" \ + "Try to update it with 'update pool=%s'\n"), + pool->name(), rpool.Name, rpool.Name,pool->name()); + ret = false; + } + } else { /* no ScratchPool used, set it to 0 */ + pr->ScratchPoolId = 0; + } + + return ret; +} + + +/* + * Create a pool record from a given Pool resource + * Also called from backup.c + * Returns: -1 on error + * 0 record already exists + * 1 record created + */ + +int create_pool(JCR *jcr, BDB *db, POOL *pool, e_pool_op op) +{ + POOL_DBR pr; + memset(&pr, 0, sizeof(POOL_DBR)); + bstrncpy(pr.Name, pool->name(), sizeof(pr.Name)); + + if (db_get_pool_record(jcr, db, &pr)) { + /* Pool Exists */ + if (op == POOL_OP_UPDATE) { /* update request */ + set_pooldbr_from_poolres(&pr, pool, op); + set_pooldbr_references(jcr, db, &pr, pool); + db_update_pool_record(jcr, db, &pr); + } + return 0; /* exists */ + } + + set_pooldbr_from_poolres(&pr, pool, op); + set_pooldbr_references(jcr, db, &pr, pool); + + if (!db_create_pool_record(jcr, db, &pr)) { + return -1; /* error */ + } + return 1; +} + + + +/* + * Create a Pool Record in the database. + * It is always created from the Resource record. + */ +static int create_cmd(UAContext *ua, const char *cmd) +{ + POOL *pool; + + if (!open_client_db(ua)) { + return 1; + } + + pool = get_pool_resource(ua); + if (!pool) { + return 1; + } + + switch (create_pool(ua->jcr, ua->db, pool, POOL_OP_CREATE)) { + case 0: + ua->error_msg(_("Error: Pool %s already exists.\n" + "Use update to change it.\n"), pool->name()); + break; + + case -1: + ua->error_msg("%s", db_strerror(ua->db)); + break; + + default: + break; + } + ua->send_msg(_("Pool %s created.\n"), pool->name()); + return 1; +} + + +extern DIRRES *director; +extern char *configfile; + +static int setbwlimit_client(UAContext *ua, CLIENT *client, char *Job, int64_t limit) +{ + POOL_MEM buf; + CLIENT *old_client; + char ed1[50]; + if (!client) { + return 1; + } + + /* Connect to File daemon */ + old_client = ua->jcr->client; + ua->jcr->client = client; + ua->jcr->max_bandwidth = limit; + + /* Try to connect for 15 seconds */ + ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + client->name(), client->address(buf.addr()), client->FDport); + if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Client.\n")); + goto bail_out; + } + Dmsg0(120, "Connected to file daemon\n"); + + if (!send_bwlimit(ua->jcr, Job)) { + ua->error_msg(_("Failed to set bandwidth limit to Client.\n")); + + } else { + /* Note, we add 2000 OK that was sent by FD to us to message */ + ua->info_msg(_("2000 OK Limiting bandwidth to %sB/s %s\n"), + edit_uint64_with_suffix(limit, ed1), *Job?Job:_("on running and future jobs")); + } + + ua->jcr->file_bsock->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + ua->jcr->max_bandwidth = 0; + +bail_out: + ua->jcr->client = old_client; + return 1; +} + +static int setbwlimit_cmd(UAContext *ua, const char *cmd) +{ + int action = -1; + CLIENT *client = NULL; + char Job[MAX_NAME_LENGTH]; + *Job=0; + uint64_t limit = 0; + JCR *jcr = NULL; + int i; + + const char *lst_all[] = { "job", "jobid", "jobname", "client", NULL }; + if (find_arg_keyword(ua, lst_all) < 0) { + start_prompt(ua, _("Set Bandwidth choice:\n")); + add_prompt(ua, _("Running Job")); /* 0 */ + add_prompt(ua, _("Running and future Jobs for a Client")); /* 1 */ + action = do_prompt(ua, "item", _("Choose where to limit the bandwidth"), + NULL, 0); + if (action < 0) { + return 1; + } + } + + i = find_arg_with_value(ua, "limit"); + if (i >= 0) { + if (!speed_to_uint64(ua->argv[i], strlen(ua->argv[i]), &limit)) { + ua->error_msg(_("Invalid value for limit parameter. Expecting speed.\n")); + return 1; + } + } else { + if (!get_cmd(ua, _("Enter new bandwidth limit: "))) { + return 1; + } + if (!speed_to_uint64(ua->cmd, strlen(ua->cmd), &limit)) { + ua->error_msg(_("Invalid value for limit parameter. Expecting speed.\n")); + return 1; + } + } + + const char *lst[] = { "job", "jobid", "jobname", NULL }; + if (action == 0 || find_arg_keyword(ua, lst) > 0) { + alist *jcrs = New(alist(10, not_owned_by_alist)); + select_running_jobs(ua, jcrs, "limit"); + foreach_alist(jcr, jcrs) { + jcr->max_bandwidth = limit; /* TODO: see for locking (Should be safe)*/ + bstrncpy(Job, jcr->Job, sizeof(Job)); + client = jcr->client; + setbwlimit_client(ua, client, Job, limit); + free_jcr(jcr); + } + + } else { + client = get_client_resource(ua, JT_BACKUP_RESTORE); + if (client) { + setbwlimit_client(ua, client, Job, limit); + } + } + return 1; +} + +/* + * Set a new address in a Client resource. We do this only + * if the Console name is the same as the Client name + * and the Console can access the client. + */ +static int setip_cmd(UAContext *ua, const char *cmd) +{ + CLIENT *client; + char addr[1024]; + if (!ua->cons || !acl_access_client_ok(ua, ua->cons->name(), JT_BACKUP_RESTORE)) { + ua->error_msg(_("Unauthorized command from this console.\n")); + return 1; + } + LockRes(); + client = GetClientResWithName(ua->cons->name()); + + if (!client) { + ua->error_msg(_("Client \"%s\" not found.\n"), ua->cons->name()); + goto get_out; + } + /* MA Bug 6 remove ifdef */ + sockaddr_to_ascii(&(ua->UA_sock->client_addr), + sizeof(ua->UA_sock->client_addr), addr, sizeof(addr)); + client->setAddress(addr); + ua->send_msg(_("Client \"%s\" address set to %s\n"), + client->name(), addr); +get_out: + UnlockRes(); + return 1; +} + +/* + * Does all sorts of enable/disable commands: batch, scheduler (not implemented) + * job, client, schedule, storage + */ +static void do_enable_disable_cmd(UAContext *ua, bool setting) +{ + JOB *job = NULL; + CLIENT *client = NULL; + SCHED *sched = NULL; + int i; + + if (find_arg(ua, NT_("batch")) > 0) { + ua->send_msg(_("Job Attributes Insertion %sabled\n"), setting?"en":"dis"); + db_disable_batch_insert(setting); + return; + } + + /* + * if (find_arg(ua, NT_("scheduler")) > 0) { + * ua->send_msg(_("Job Scheduler %sabled\n"), setting?"en":"dis"); + * return; + * } + */ + + i = find_arg(ua, NT_("job")); + if (i >= 0) { + if (ua->argv[i]) { + LockRes(); + job = GetJobResWithName(ua->argv[i]); + UnlockRes(); + } else { + job = select_enable_disable_job_resource(ua, setting); + if (!job) { + return; + } + } + } + if (job) { + if (!acl_access_ok(ua, Job_ACL, job->name())) { + ua->error_msg(_("Unauthorized command from this console.\n")); + return; + } + job->setEnabled(setting); + ua->send_msg(_("Job \"%s\" %sabled\n"), job->name(), setting?"en":"dis"); + } + + i = find_arg(ua, NT_("client")); + if (i >= 0) { + if (ua->argv[i]) { + LockRes(); + client = GetClientResWithName(ua->argv[i]); + UnlockRes(); + } else { + client = select_enable_disable_client_resource(ua, setting); + if (!client) { + return; + } + } + } + if (client) { + if (!acl_access_client_ok(ua, client->name(), JT_BACKUP_RESTORE)) { + ua->error_msg(_("Unauthorized command from this console.\n")); + return; + } + client->setEnabled(setting); + ua->send_msg(_("Client \"%s\" %sabled\n"), client->name(), setting?"en":"dis"); + } + + i = find_arg(ua, NT_("schedule")); + if (i >= 0) { + if (ua->argv[i]) { + LockRes(); + sched = (SCHED *)GetResWithName(R_SCHEDULE, ua->argv[i]); + UnlockRes(); + } else { + sched = select_enable_disable_schedule_resource(ua, setting); + if (!sched) { + return; + } + } + } + if (sched) { + if (!acl_access_ok(ua, Schedule_ACL, sched->name())) { + ua->error_msg(_("Unauthorized command from this console.\n")); + return; + } + sched->setEnabled(setting); + ua->send_msg(_("Schedule \"%s\" %sabled\n"), sched->name(), setting?"en":"dis"); + } + + i = find_arg(ua, NT_("storage")); + if (i >= 0) { + do_storage_cmd(ua, setting?"enable":"disable"); + } + + if (i < 0 && !sched && !client && !job) { + ua->error_msg(_("You must enter one of the following keywords: job, client, schedule, or storage.\n")); + } + + return; +} + +static int enable_cmd(UAContext *ua, const char *cmd) +{ + do_enable_disable_cmd(ua, true); + return 1; +} + +static int disable_cmd(UAContext *ua, const char *cmd) +{ + do_enable_disable_cmd(ua, false); + return 1; +} + +static void do_dir_setdebug(UAContext *ua, int64_t level, int trace_flag, char *options, int64_t tags) +{ + debug_level = level; + debug_level_tags = tags; + set_trace(trace_flag); + set_debug_flags(options); +} + +static void do_storage_setdebug(UAContext *ua, STORE *store, + int64_t level, int trace_flag, int hangup, int blowup, + char *options, char *tags) +{ + BSOCK *sd; + USTORE lstore; + + lstore.store = store; + pm_strcpy(lstore.store_source, _("unknown source")); + set_wstorage(ua->jcr, &lstore); + /* Try connecting for up to 15 seconds */ + ua->send_msg(_("Connecting to Storage daemon %s at %s:%d\n"), + store->name(), store->address, store->SDport); + if (!connect_to_storage_daemon(ua->jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Storage daemon.\n")); + return; + } + Dmsg0(120, _("Connected to storage daemon\n")); + sd = ua->jcr->store_bsock; + sd->fsend("setdebug=%ld trace=%ld hangup=%ld blowup=%ld options=%s tags=%s\n", + (int32_t)level, trace_flag, hangup, blowup, options, NPRTB(tags)); + if (sd->recv() >= 0) { + ua->send_msg("%s", sd->msg); + } + sd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->store_bsock); + return; +} + +/* + * For the client, we have the following values that can be set + * level = debug level + * trace = send debug output to a file + * options = various options for debug or specific FD behavior + * hangup = how many records to send to FD before hanging up + * obviously this is most useful for testing restarting + * failed jobs. + * blowup = how many records to send to FD before blowing up the FD. + */ +static void do_client_setdebug(UAContext *ua, CLIENT *client, + int64_t level, int trace, int hangup, int blowup, + char *options, char *tags) +{ + POOL_MEM buf; + CLIENT *old_client; + BSOCK *fd; + + /* Connect to File daemon */ + + old_client = ua->jcr->client; + ua->jcr->client = client; + /* Try to connect for 15 seconds */ + ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + client->name(), client->address(buf.addr()), client->FDport); + + if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Client.\n")); + ua->jcr->client = old_client; + return; + } + Dmsg0(120, "Connected to file daemon\n"); + + fd = ua->jcr->file_bsock; + if (ua->jcr->FDVersion <= 10) { + fd->fsend("setdebug=%ld trace=%d hangup=%d\n", + (int32_t)level, trace, hangup); + } else { + fd->fsend("setdebug=%ld trace=%d hangup=%d blowup=%d options=%s tags=%s\n", + (int32_t)level, trace, hangup, blowup, options, NPRTB(tags)); + } + if (fd->recv() >= 0) { + ua->send_msg("%s", fd->msg); + } + fd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + ua->jcr->client = old_client; + return; +} + + +static void do_all_setdebug(UAContext *ua, int64_t level, + int trace_flag, int hangup, int blowup, + char *options, char *tags) +{ + STORE *store, **unique_store; + CLIENT *client, **unique_client; + POOL_MEM buf1, buf2; + int i, j, found; + int64_t t=0; + + /* Director */ + debug_parse_tags(tags, &t); + do_dir_setdebug(ua, level, trace_flag, options, t); + + /* Count Storage items */ + LockRes(); + store = NULL; + i = 0; + foreach_res(store, R_STORAGE) { + i++; + } + unique_store = (STORE **) malloc(i * sizeof(STORE)); + /* Find Unique Storage address/port */ + store = (STORE *)GetNextRes(R_STORAGE, NULL); + i = 0; + unique_store[i++] = store; + while ((store = (STORE *)GetNextRes(R_STORAGE, (RES *)store))) { + found = 0; + for (j=0; jaddress, store->address) == 0 && + unique_store[j]->SDport == store->SDport) { + found = 1; + break; + } + } + if (!found) { + unique_store[i++] = store; + Dmsg2(140, "Stuffing: %s:%d\n", store->address, store->SDport); + } + } + UnlockRes(); + + /* Call each unique Storage daemon */ + for (j=0; jaddress(buf1.addr()), client->address(buf2.addr())) == 0 && + unique_client[j]->FDport == client->FDport) { + found = 1; + break; + } + } + if (!found) { + unique_client[i++] = client; + Dmsg2(140, "Stuffing: %s:%d\n", client->address(buf1.addr()), client->FDport); + } + } + UnlockRes(); + + /* Call each unique File daemon */ + for (j=0; j= 0) { + bstrncpy(options, ua->argv[i], sizeof(options) - 1); + } + level = -1; + i = find_arg_with_value(ua, "level"); + if (i >= 0) { + level = str_to_int64(ua->argv[i]); + } + if (level < 0) { + if (!get_pint(ua, _("Enter new debug level: "))) { + return 1; + } + level = ua->pint32_val; + } + + /* Better to send the tag string instead of tweaking the level + * in case where we extend the tag or change the representation + */ + i = find_arg_with_value(ua, "tags"); + if (i > 0) { + tags_str = ua->argv[i]; + if (!debug_parse_tags(tags_str, &tags)) { + ua->error_msg(_("Incorrect tags found on command line %s\n"), tags_str); + return 1; + } + } + + /* Look for trace flag. -1 => not change */ + i = find_arg_with_value(ua, "trace"); + if (i >= 0) { + trace_flag = atoi(ua->argv[i]); + if (trace_flag > 0) { + trace_flag = 1; + } + } + + /* Look for hangup (debug only) flag. -1 => not change */ + i = find_arg_with_value(ua, "hangup"); + if (i >= 0) { + hangup = atoi(ua->argv[i]); + } + + /* Look for blowup (debug only) flag. -1 => not change */ + i = find_arg_with_value(ua, "blowup"); + if (i >= 0) { + blowup = atoi(ua->argv[i]); + } + + /* General debug? */ + for (i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], "all") == 0) { + do_all_setdebug(ua, level, trace_flag, hangup, blowup, options, tags_str); + return 1; + } + if (strcasecmp(ua->argk[i], "dir") == 0 || + strcasecmp(ua->argk[i], "director") == 0) { + do_dir_setdebug(ua, level, trace_flag, options, tags); + return 1; + } + if (strcasecmp(ua->argk[i], "client") == 0 || + strcasecmp(ua->argk[i], "fd") == 0) { + client = NULL; + if (ua->argv[i]) { + client = GetClientResWithName(ua->argv[i]); + if (client) { + do_client_setdebug(ua, client, level, trace_flag, + hangup, blowup, options, tags_str); + return 1; + } + } + client = select_client_resource(ua, JT_BACKUP_RESTORE); + if (client) { + do_client_setdebug(ua, client, level, trace_flag, + hangup, blowup, options, tags_str); + return 1; + } + } + + if (strcasecmp(ua->argk[i], NT_("store")) == 0 || + strcasecmp(ua->argk[i], NT_("storage")) == 0 || + strcasecmp(ua->argk[i], NT_("sd")) == 0) { + store = NULL; + if (ua->argv[i]) { + store = GetStoreResWithName(ua->argv[i]); + if (store) { + do_storage_setdebug(ua, store, level, trace_flag, + hangup, blowup, options, tags_str); + return 1; + } + } + store = get_storage_resource(ua, false/*no default*/, true/*unique*/); + if (store) { + do_storage_setdebug(ua, store, level, trace_flag, + hangup, blowup, options, tags_str); + return 1; + } + } + } + /* + * We didn't find an appropriate keyword above, so + * prompt the user. + */ + start_prompt(ua, _("Available daemons are: \n")); + add_prompt(ua, _("Director")); + add_prompt(ua, _("Storage")); + add_prompt(ua, _("Client")); + add_prompt(ua, _("All")); + switch(do_prompt(ua, "", _("Select daemon type to set debug level"), NULL, 0)) { + case 0: /* Director */ + do_dir_setdebug(ua, level, trace_flag, options, tags); + break; + case 1: + store = get_storage_resource(ua, false/*no default*/, true/*unique*/); + if (store) { + do_storage_setdebug(ua, store, level, trace_flag, hangup, blowup, + options, tags_str); + } + break; + case 2: + client = select_client_resource(ua, JT_BACKUP_RESTORE); + if (client) { + do_client_setdebug(ua, client, level, trace_flag, hangup, blowup, + options, tags_str); + } + break; + case 3: + do_all_setdebug(ua, level, trace_flag, hangup, blowup, options, tags_str); + break; + default: + break; + } + return 1; +} + +/* + * Turn debug tracing to file on/off + */ +static int trace_cmd(UAContext *ua, const char *cmd) +{ + char *onoff; + + if (ua->argc != 2) { + if (!get_cmd(ua, _("Turn on or off? "))) { + return 1; + } + onoff = ua->cmd; + } else { + onoff = ua->argk[1]; + } + + set_trace((strcasecmp(onoff, NT_("off")) == 0) ? false : true); + return 1; +} + +static int var_cmd(UAContext *ua, const char *cmd) +{ + POOLMEM *val = get_pool_memory(PM_FNAME); + char *var; + + if (!open_client_db(ua)) { + return 1; + } + for (var=ua->cmd; *var != ' '; ) { /* skip command */ + var++; + } + while (*var == ' ') { /* skip spaces */ + var++; + } + Dmsg1(100, "Var=%s:\n", var); + variable_expansion(ua->jcr, var, &val); + ua->send_msg("%s\n", val); + free_pool_memory(val); + return 1; +} + +static int estimate_cmd(UAContext *ua, const char *cmd) +{ + JOB *job = NULL; + CLIENT *client = NULL; + FILESET *fileset = NULL; + POOL_MEM buf; + int listing = 0; + char since[MAXSTRING]; + JCR *jcr = ua->jcr; + int accurate=-1; + + jcr->setJobType(JT_BACKUP); + jcr->start_time = time(NULL); + jcr->setJobLevel(L_FULL); + + for (int i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("client")) == 0 || + strcasecmp(ua->argk[i], NT_("fd")) == 0) { + if (ua->argv[i]) { + client = GetClientResWithName(ua->argv[i]); + if (!client) { + ua->error_msg(_("Client \"%s\" not found.\n"), ua->argv[i]); + return 1; + } + if (!acl_access_client_ok(ua, client->name(), JT_BACKUP)) { + ua->error_msg(_("No authorization for Client \"%s\"\n"), client->name()); + return 1; + } + continue; + } else { + ua->error_msg(_("Client name missing.\n")); + return 1; + } + } + if (strcasecmp(ua->argk[i], NT_("job")) == 0) { + if (ua->argv[i]) { + job = GetJobResWithName(ua->argv[i]); + if (!job) { + ua->error_msg(_("Job \"%s\" not found.\n"), ua->argv[i]); + return 1; + } + if (!acl_access_ok(ua, Job_ACL, job->name())) { + ua->error_msg(_("No authorization for Job \"%s\"\n"), job->name()); + return 1; + } + continue; + } else { + ua->error_msg(_("Job name missing.\n")); + return 1; + } + + } + if (strcasecmp(ua->argk[i], NT_("fileset")) == 0) { + if (ua->argv[i]) { + fileset = GetFileSetResWithName(ua->argv[i]); + if (!fileset) { + ua->error_msg(_("Fileset \"%s\" not found.\n"), ua->argv[i]); + return 1; + } + if (!acl_access_ok(ua, FileSet_ACL, fileset->name())) { + ua->error_msg(_("No authorization for FileSet \"%s\"\n"), fileset->name()); + return 1; + } + continue; + } else { + ua->error_msg(_("Fileset name missing.\n")); + return 1; + } + } + if (strcasecmp(ua->argk[i], NT_("listing")) == 0) { + listing = 1; + continue; + } + if (strcasecmp(ua->argk[i], NT_("level")) == 0) { + if (ua->argv[i]) { + if (!get_level_from_name(ua->jcr, ua->argv[i])) { + ua->error_msg(_("Level \"%s\" not valid.\n"), ua->argv[i]); + return 1; + } + continue; + } else { + ua->error_msg(_("Level value missing.\n")); + return 1; + } + } + if (strcasecmp(ua->argk[i], NT_("accurate")) == 0) { + if (ua->argv[i]) { + if (!is_yesno(ua->argv[i], &accurate)) { + ua->error_msg(_("Invalid value for accurate. " + "It must be yes or no.\n")); + return 1; + } + continue; + } else { + ua->error_msg(_("Accurate value missing.\n")); + return 1; + } + } + } + if (!job && !(client && fileset)) { + if (!(job = select_job_resource(ua))) { + return 1; + } + } + if (!job) { + job = GetJobResWithName(ua->argk[1]); + if (!job) { + ua->error_msg(_("No job specified.\n")); + return 1; + } + if (!acl_access_ok(ua, Job_ACL, job->name())) { + ua->error_msg(_("No authorization for Job \"%s\"\n"), job->name()); + return 1; + } + } + jcr->job = job; + if (!client) { + client = job->client; + } + if (!fileset) { + fileset = job->fileset; + } + jcr->client = client; + jcr->fileset = fileset; + close_db(ua); + if (job->pool->catalog) { + ua->catalog = job->pool->catalog; + } else { + ua->catalog = client->catalog; + } + + if (!open_db(ua)) { + return 1; + } + + init_jcr_job_record(jcr); + + if (!get_or_create_client_record(jcr)) { + return 1; + } + if (!get_or_create_fileset_record(jcr)) { + return 1; + } + + get_level_since_time(ua->jcr, since, sizeof(since)); + + ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + jcr->client->name(), jcr->client->address(buf.addr()), jcr->client->FDport); + if (!connect_to_file_daemon(jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Client.\n")); + return 1; + } + + /* The level string change if accurate mode is enabled */ + if (accurate >= 0) { + jcr->accurate = accurate; + } else { + jcr->accurate = job->accurate; + } + + if (!send_level_command(jcr)) { + goto bail_out; + } + + if (!send_include_list(jcr)) { + ua->error_msg(_("Error sending include list.\n")); + goto bail_out; + } + + if (!send_exclude_list(jcr)) { + ua->error_msg(_("Error sending exclude list.\n")); + goto bail_out; + } + + /* + * If the job is in accurate mode, we send the list of + * all files to FD. + */ + Dmsg1(40, "estimate accurate=%d\n", jcr->accurate); + if (!send_accurate_current_files(jcr)) { + goto bail_out; + } + + jcr->file_bsock->fsend("estimate listing=%d\n", listing); + while (jcr->file_bsock->recv() >= 0) { + ua->send_msg("%s", jcr->file_bsock->msg); + } + +bail_out: + if (jcr->file_bsock) { + jcr->file_bsock->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + } + return 1; +} + +/* + * print time + */ +static int time_cmd(UAContext *ua, const char *cmd) +{ + char sdt[50]; + time_t ttime = time(NULL); + struct tm tm; + (void)localtime_r(&ttime, &tm); + strftime(sdt, sizeof(sdt), "%a %d-%b-%Y %H:%M:%S", &tm); + ua->send_msg("%s\n", sdt); + return 1; +} + +/* + * reload the conf file + */ +extern "C" void reload_config(int sig); + +static int reload_cmd(UAContext *ua, const char *cmd) +{ + reload_config(1); + return 1; +} + +/* + * Delete Pool records (should purge Media with it). + * + * delete pool= + * delete volume pool= volume= + * delete jobid=xxx + */ +static int delete_cmd(UAContext *ua, const char *cmd) +{ + static const char *keywords[] = { + NT_("volume"), + NT_("pool"), + NT_("jobid"), + NT_("snapshot"), + NT_("client"), + NULL}; + + /* Deleting large jobs can take time! */ + if (!open_new_client_db(ua)) { + return 1; + } + + switch (find_arg_keyword(ua, keywords)) { + case 0: + delete_volume(ua); + return 1; + case 1: + delete_pool(ua); + return 1; + case 2: + int i; + while ((i=find_arg(ua, "jobid")) > 0) { + delete_job(ua); + *ua->argk[i] = 0; /* zap keyword already visited */ + } + return 1; + case 3: + delete_snapshot(ua); + return 1; + case 4: + delete_client(ua); + return 1; + default: + break; + } + + ua->warning_msg(_( +"In general it is not a good idea to delete either a\n" +"Pool or a Volume since they may contain data.\n\n")); + + switch (do_keyword_prompt(ua, _("Choose catalog item to delete"), keywords)) { + case 0: + delete_volume(ua); + break; + case 1: + delete_pool(ua); + break; + case 2: + delete_job(ua); + return 1; + case 3: + delete_snapshot(ua); + return 1; + case 4: + delete_client(ua); + return 1; + default: + ua->warning_msg(_("Nothing done.\n")); + break; + } + return 1; +} + +/* + * delete_job has been modified to parse JobID lists like the + * following: + * delete JobID=3,4,6,7-11,14 + * + * Thanks to Phil Stracchino for the above addition. + */ +static void delete_job(UAContext *ua) +{ + int JobId; /* not JobId_t because it's unsigned and not compatible with sellist */ + char buf[256]; + sellist sl; + + int i = find_arg_with_value(ua, NT_("jobid")); + if (i >= 0) { + if (!sl.set_string(ua->argv[i], true)) { + ua->warning_msg("%s", sl.get_errmsg()); + return; + } + + if (sl.size() > 25 && (find_arg(ua, "yes") < 0)) { + bsnprintf(buf, sizeof(buf), + _("Are you sure you want to delete %d JobIds ? (yes/no): "), sl.size()); + if (!get_yesno(ua, buf) || ua->pint32_val==0) { + return; + } + } + + foreach_sellist(JobId, &sl) { + do_job_delete(ua, JobId); + } + + } else if (!get_pint(ua, _("Enter JobId to delete: "))) { + return; + + } else { + JobId = ua->int64_val; + do_job_delete(ua, JobId); + } +} + +/* + * do_job_delete now performs the actual delete operation atomically + */ +static void do_job_delete(UAContext *ua, JobId_t JobId) +{ + char ed1[50]; + + edit_int64(JobId, ed1); + purge_jobs_from_catalog(ua, ed1); + ua->send_msg(_("JobId=%s and associated records deleted from the catalog.\n"), ed1); +} + +/* + * Delete media records from database -- dangerous + */ +static int delete_volume(UAContext *ua) +{ + MEDIA_DBR mr; + char buf[1000]; + db_list_ctx lst; + + if (!select_media_dbr(ua, &mr)) { + return 1; + } + ua->warning_msg(_("\nThis command will delete volume %s\n" + "and all Jobs saved on that volume from the Catalog\n"), + mr.VolumeName); + + if (find_arg(ua, "yes") >= 0) { + ua->pint32_val = 1; /* Have "yes" on command line already" */ + } else { + bsnprintf(buf, sizeof(buf), _("Are you sure you want to delete Volume \"%s\"? (yes/no): "), + mr.VolumeName); + if (!get_yesno(ua, buf)) { + return 1; + } + } + if (!ua->pint32_val) { + return 1; + } + + /* If not purged, do it */ + if (strcmp(mr.VolStatus, "Purged") != 0) { + if (!db_get_volume_jobids(ua->jcr, ua->db, &mr, &lst)) { + ua->error_msg(_("Can't list jobs on this volume\n")); + return 1; + } + if (lst.count) { + purge_jobs_from_catalog(ua, lst.list); + } + } + + db_delete_media_record(ua->jcr, ua->db, &mr); + return 1; +} + +/* + * Delete a pool record from the database -- dangerous + * TODO: Check if the resource is still defined? + */ +static int delete_pool(UAContext *ua) +{ + POOL_DBR pr; + char buf[200]; + + memset(&pr, 0, sizeof(pr)); + + if (!get_pool_dbr(ua, &pr)) { + return 1; + } + bsnprintf(buf, sizeof(buf), _("Are you sure you want to delete Pool \"%s\"? (yes/no): "), + pr.Name); + if (!get_yesno(ua, buf)) { + return 1; + } + if (ua->pint32_val) { + db_delete_pool_record(ua->jcr, ua->db, &pr); + } + return 1; +} + +/* + * Delete a client record from the database + */ +static int delete_client(UAContext *ua) +{ + CLIENT *client; + CLIENT_DBR cr; + char buf[200]; + db_list_ctx lst; + + memset(&cr, 0, sizeof(cr)); + + if (!get_client_dbr(ua, &cr, 0)) { + return 1; + } + + client = (CLIENT*) GetResWithName(R_CLIENT, cr.Name); + + if (client) { + ua->error_msg(_("Unable to delete Client \"%s\", the resource is still defined in the configuration.\n"), cr.Name); + return 1; + } + + if (!db_get_client_jobids(ua->jcr, ua->db, &cr, &lst)) { + ua->error_msg(_("Can't list jobs on this client\n")); + return 1; + } + + if (find_arg(ua, "yes") > 0) { + ua->pint32_val = 1; + + } else { + if (lst.count == 0) { + bsnprintf(buf, sizeof(buf), _("Are you sure you want to delete Client \"%s\? (yes/no): "), cr.Name); + } else { + bsnprintf(buf, sizeof(buf), _("Are you sure you want to delete Client \"%s\" and purge %d job(s)? (yes/no): "), cr.Name, lst.count); + } + if (!get_yesno(ua, buf)) { + return 1; + } + } + + if (ua->pint32_val) { + if (lst.count) { + ua->send_msg(_("Purging %d job(s).\n"), lst.count); + purge_jobs_from_catalog(ua, lst.list); + } + ua->send_msg(_("Deleting client \"%s\".\n"), cr.Name); + db_delete_client_record(ua->jcr, ua->db, &cr); + } + return 1; +} + +int memory_cmd(UAContext *ua, const char *cmd) +{ + garbage_collect_memory(); + list_dir_status_header(ua); + sm_dump(false, true); + return 1; +} + +static void do_storage_cmd(UAContext *ua, const char *command) +{ + USTORE store; + BSOCK *sd; + JCR *jcr = ua->jcr; + char dev_name[MAX_NAME_LENGTH]; + int drive, i; + int slot; + + if (!open_client_db(ua)) { + return; + } + Dmsg2(120, "%s: %s\n", command, ua->UA_sock->msg); + + store.store = get_storage_resource(ua, true/*arg is storage*/); + if (!store.store) { + return; + } + pm_strcpy(store.store_source, _("unknown source")); + set_wstorage(jcr, &store); + drive = get_storage_drive(ua, store.store); + /* For the disable/enable/unmount commands, the slot is not mandatory */ + if (strcasecmp(command, "disable") == 0 || + strcasecmp(command, "enable") == 0 || + strcasecmp(command, "unmount") == 0) { + slot = 0; + } else { + slot = get_storage_slot(ua, store.store); + } + /* Users may set a device name directly on the command line */ + if ((i = find_arg_with_value(ua, "device")) > 0) { + POOLMEM *errmsg = get_pool_memory(PM_NAME); + if (!is_name_valid(ua->argv[i], &errmsg)) { + ua->error_msg(_("Invalid device name. %s"), errmsg); + free_pool_memory(errmsg); + return; + } + free_pool_memory(errmsg); + bstrncpy(dev_name, ua->argv[i], sizeof(dev_name)); + + } else { /* We take the default device name */ + bstrncpy(dev_name, store.store->dev_name(), sizeof(dev_name)); + } + + Dmsg3(120, "Found storage, MediaType=%s DevName=%s drive=%d\n", + store.store->media_type, store.store->dev_name(), drive); + Dmsg4(120, "Cmd: %s %s drive=%d slot=%d\n", command, dev_name, drive, slot); + + if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) { + ua->error_msg(_("Failed to connect to Storage daemon.\n")); + return; + } + sd = jcr->store_bsock; + bash_spaces(dev_name); + sd->fsend("%s %s drive=%d slot=%d\n", command, dev_name, drive, slot); + while (sd->recv() >= 0) { + ua->send_msg("%s", sd->msg); + } + sd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->store_bsock); +} + +/* + * mount [storage=] [drive=nn] [slot=mm] + */ +static int mount_cmd(UAContext *ua, const char *cmd) +{ + do_storage_cmd(ua, "mount") ; /* mount */ + return 1; +} + + +/* + * unmount [storage=] [drive=nn] + */ +static int unmount_cmd(UAContext *ua, const char *cmd) +{ + do_storage_cmd(ua, "unmount"); /* unmount */ + return 1; +} + + +/* + * release [storage=] [drive=nn] + */ +static int release_cmd(UAContext *ua, const char *cmd) +{ + do_storage_cmd(ua, "release"); /* release */ + return 1; +} + +/* + * cloud functions, like to upload cached parts to cloud. + */ +int cloud_volumes_cmd(UAContext *ua, const char *cmd, const char *mode) +{ + int drive = -1; + int nb = 0; + uint32_t *results = NULL; + MEDIA_DBR mr; + POOL_DBR pr; + BSOCK *sd = NULL; + char storage[MAX_NAME_LENGTH]; + const char *action = mode; + memset(&pr, 0, sizeof(pr)); + + /* + * Look for all volumes that are enabled and + * have more the 200 bytes. + */ + mr.Enabled = 1; + mr.Recycle = -1; /* All Recycle status */ + if (strcmp("prunecache", mode) == 0) { + mr.CacheRetention = 1; + action = "truncate cache"; + } + + if (!scan_storage_cmd(ua, cmd, false, /* fromallpool*/ + &drive, &mr, &pr, NULL, storage, + &nb, &results)) + { + goto bail_out; + } + + if ((sd=open_sd_bsock(ua)) == NULL) { + Dmsg0(100, "Can't open connection to sd\n"); + goto bail_out; + } + + /* + * Loop over the candidate Volumes and upload parts + */ + for (int i=0; i < nb; i++) { + bool ok=false; + mr.clear(); + mr.MediaId = results[i]; + if (!db_get_media_record(ua->jcr, ua->db, &mr)) { + goto bail_out; + } + + /* Protect us from spaces */ + bash_spaces(mr.VolumeName); + bash_spaces(mr.MediaType); + bash_spaces(pr.Name); + bash_spaces(storage); + + sd->fsend("%s Storage=%s Volume=%s PoolName=%s MediaType=%s " + "Slot=%d drive=%d CacheRetention=%lld\n", + action, storage, mr.VolumeName, pr.Name, mr.MediaType, + mr.Slot, drive, mr.CacheRetention); + + unbash_spaces(mr.VolumeName); + unbash_spaces(mr.MediaType); + unbash_spaces(pr.Name); + unbash_spaces(storage); + + /* Check for valid response */ + while (bget_dirmsg(sd) >= 0) { + if (strncmp(sd->msg, "3000 OK truncate cache", 22) == 0) { + ua->send_msg("%s", sd->msg); + ok = true; + + } else if (strncmp(sd->msg, "3000 OK", 7) == 0) { + ua->send_msg(_("The volume \"%s\" has been uploaded\n"), mr.VolumeName); + ok = true; + + + } else if (strncmp(sd->msg, "39", 2) == 0) { + ua->warning_msg("%s", sd->msg); + + } else { + ua->send_msg("%s", sd->msg); + } + } + if (!ok) { + ua->warning_msg(_("Unable to %s for volume \"%s\"\n"), action, mr.VolumeName); + } + } + +bail_out: + close_db(ua); + close_sd_bsock(ua); + ua->jcr->wstore = NULL; + if (results) { + free(results); + } + + return 1; +} + +/* List volumes in the cloud */ +/* TODO: Update the code for .api 2 and llist */ +static int cloud_list_cmd(UAContext *ua, const char *cmd) +{ + int drive = -1; + int64_t size, mtime; + STORE *store = NULL; + MEDIA_DBR mr; + POOL_DBR pr; + BSOCK *sd = NULL; + char storage[MAX_NAME_LENGTH]; + char ed1[50], ed2[50]; + bool first=true; + uint32_t maxpart=0, part; + uint64_t maxpart_size=0; + memset(&pr, 0, sizeof(pr)); + memset(&mr, 0, sizeof(mr)); + + /* Look at arguments */ + for (int i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("volume")) == 0 + && is_name_valid(ua->argv[i], NULL)) { + bstrncpy(mr.VolumeName, ua->argv[i], sizeof(mr.VolumeName)); + + } else if (strcasecmp(ua->argk[i], NT_("drive")) == 0 && ua->argv[i]) { + drive = atoi(ua->argv[i]); + } + } + + if (!open_client_db(ua)) { + goto bail_out; + } + + /* Choose storage */ + ua->jcr->wstore = store = get_storage_resource(ua, false); + if (!store) { + goto bail_out; + } + bstrncpy(storage, store->dev_name(), sizeof(storage)); + bstrncpy(mr.MediaType, store->media_type, sizeof(mr.MediaType)); + + if ((sd=open_sd_bsock(ua)) == NULL) { + Dmsg0(100, "Can't open connection to SD\n"); + goto bail_out; + } + + /* Protect us from spaces */ + bash_spaces(mr.MediaType); + bash_spaces(storage); + bash_spaces(mr.VolumeName); + + sd->fsend("cloudlist Storage=%s Volume=%s MediaType=%s Slot=%d drive=%d\n", + storage, mr.VolumeName, mr.MediaType, mr.Slot, drive); + + if (mr.VolumeName[0]) { /* Want to list parts */ + const char *output_hformat="| %8d | %12sB | %20s |\n"; + uint64_t volsize=0; + /* Check for valid response */ + while (sd->recv() >= 0) { + if (sscanf(sd->msg, "part=%d size=%lld mtime=%lld", &part, &size, &mtime) != 3) { + if (sd->msg[0] == '3') { + ua->send_msg("%s", sd->msg); + } + continue; + } + /* Print information */ + if (first) { + ua->send_msg(_("+----------+---------------+----------------------+\n")); + ua->send_msg(_("| Part | Size | MTime |\n")); + ua->send_msg(_("+----------+---------------+----------------------+\n")); + first=false; + } + if (part > maxpart) { + maxpart = part; + maxpart_size = size; + } + volsize += size; + ua->send_msg(output_hformat, part, edit_uint64_with_suffix(size, ed1), bstrftimes(ed2, sizeof(ed2), mtime)); + } + if (!first) { + ua->send_msg(_("+----------+---------------+----------------------+\n")); + } + /* TODO: See if we fix the catalog record directly */ + if (db_get_media_record(ua->jcr, ua->db, &mr)) { + POOL_MEM errmsg, tmpmsg; + if (mr.LastPartBytes != maxpart_size) { + Mmsg(tmpmsg, "Error on volume \"%s\". Catalog LastPartBytes mismatch %lld != %lld\n", + mr.VolumeName, mr.LastPartBytes, maxpart_size); + pm_strcpy(errmsg, tmpmsg.c_str()); + } + if (mr.VolCloudParts != maxpart) { + Mmsg(tmpmsg, "Error on volume \"%s\". Catalog VolCloudParts mismatch %ld != %ld\n", + mr.VolumeName, mr.VolCloudParts, maxpart); + pm_strcpy(errmsg, tmpmsg.c_str()); + } + if (strlen(errmsg.c_str()) > 0) { + ua->error_msg("\n%s", errmsg.c_str()); + } + } + } else { /* TODO: Get the last part if possible? */ + const char *output_hformat="| %18s | %9s | %20s | %20s | %12sB |\n"; + + /* Check for valid response */ + while (sd->recv() >= 0) { + if (sscanf(sd->msg, "volume=%127s", mr.VolumeName) != 1) { + if (sd->msg[0] == '3') { + ua->send_msg("%s", sd->msg); + } + continue; + } + unbash_spaces(mr.VolumeName); + + mr.MediaId = 0; + + if (mr.VolumeName[0] && db_get_media_record(ua->jcr, ua->db, &mr)) { + memset(&pr, 0, sizeof(POOL_DBR)); + pr.PoolId = mr.PoolId; + if (!db_get_pool_record(ua->jcr, ua->db, &pr)) { + strcpy(pr.Name, "?"); + } + + if (first) { + ua->send_msg(_("+--------------------+-----------+----------------------+----------------------+---------------+\n")); + ua->send_msg(_("| Volume Name | Status | Media Type | Pool | VolBytes |\n")); + ua->send_msg(_("+--------------------+-----------+----------------------+----------------------+---------------+\n")); + first=false; + } + /* Print information */ + ua->send_msg(output_hformat, mr.VolumeName, mr.VolStatus, mr.MediaType, pr.Name, + edit_uint64_with_suffix(mr.VolBytes, ed1)); + } + } + if (!first) { + ua->send_msg(_("+--------------------+-----------+----------------------+----------------------+---------------+\n")); + } + } + +bail_out: + close_db(ua); + close_sd_bsock(ua); + ua->jcr->wstore = NULL; + return 1; +} + +/* Ask client to create/prune/delete a snapshot via the command line */ +static int cloud_cmd(UAContext *ua, const char *cmd) +{ + for (int i=0; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("upload")) == 0) { + return cloud_volumes_cmd(ua, cmd, "upload"); + + } else if (strcasecmp(ua->argk[i], NT_("list")) == 0) { + return cloud_list_cmd(ua, cmd); + + } else if (strcasecmp(ua->argk[i], NT_("truncate")) == 0) { + return cloud_volumes_cmd(ua, cmd, "truncate cache"); + + } else if (strcasecmp(ua->argk[i], NT_("status")) == 0) { + + } else if (strcasecmp(ua->argk[i], NT_("prune")) == 0) { + return cloud_volumes_cmd(ua, cmd, "prunecache"); + + } else { + continue; + } + } + + for ( ;; ) { + + start_prompt(ua, _("Cloud choice: \n")); + add_prompt(ua, _("List Cloud Volumes in the Cloud")); + add_prompt(ua, _("Upload a Volume to the Cloud")); + add_prompt(ua, _("Prune the Cloud Cache")); + add_prompt(ua, _("Truncate a Volume Cache")); + add_prompt(ua, _("Done")); + + switch(do_prompt(ua, "", _("Select action to perform on Cloud"), NULL, 0)) { + case 0: /* list cloud */ + cloud_list_cmd(ua, cmd); + break; + case 1: /* upload */ + cloud_volumes_cmd(ua, cmd, "upload"); + break; + case 2: /* Prune cache */ + cloud_volumes_cmd(ua, cmd, "prunecache"); + break; + case 3: /* Truncate cache */ + cloud_volumes_cmd(ua, cmd, "truncate cache"); + break; + default: + ua->info_msg(_("Selection terminated.\n")); + return 1; + } + } + return 1; +} + +/* + * Switch databases + * use catalog= + */ +static int use_cmd(UAContext *ua, const char *cmd) +{ + CAT *oldcatalog, *catalog; + + + close_db(ua); /* close any previously open db */ + oldcatalog = ua->catalog; + + if (!(catalog = get_catalog_resource(ua))) { + ua->catalog = oldcatalog; + } else { + ua->catalog = catalog; + } + if (open_db(ua)) { + ua->send_msg(_("Using Catalog name=%s DB=%s\n"), + ua->catalog->name(), ua->catalog->db_name); + } + return 1; +} + +int quit_cmd(UAContext *ua, const char *cmd) +{ + ua->quit = true; + return 1; +} + +/* Handler to get job status */ +static int status_handler(void *ctx, int num_fields, char **row) +{ + char *val = (char *)ctx; + + if (row[0]) { + *val = row[0][0]; + } else { + *val = '?'; /* Unknown by default */ + } + + return 0; +} + +/* + * Wait until no job is running + */ +int wait_cmd(UAContext *ua, const char *cmd) +{ + JCR *jcr; + int i; + time_t stop_time = 0; + + /* + * no args + * Wait until no job is running + */ + if (ua->argc == 1) { + bmicrosleep(0, 200000); /* let job actually start */ + for (bool running=true; running; ) { + running = false; + foreach_jcr(jcr) { + if (!jcr->is_internal_job()) { + running = true; + break; + } + } + endeach_jcr(jcr); + + if (running) { + bmicrosleep(1, 0); + } + } + return 1; + } + + i = find_arg_with_value(ua, NT_("timeout")); + if (i > 0 && ua->argv[i]) { + stop_time = time(NULL) + str_to_int64(ua->argv[i]); + } + + /* we have jobid, jobname or ujobid argument */ + + uint32_t jobid = 0 ; + + if (!open_client_db(ua)) { + ua->error_msg(_("ERR: Can't open db\n")) ; + return 1; + } + + for (int i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], "jobid") == 0) { + if (!ua->argv[i]) { + break; + } + jobid = str_to_int64(ua->argv[i]); + break; + } else if (strcasecmp(ua->argk[i], "jobname") == 0 || + strcasecmp(ua->argk[i], "job") == 0) { + if (!ua->argv[i]) { + break; + } + jcr=get_jcr_by_partial_name(ua->argv[i]) ; + if (jcr) { + jobid = jcr->JobId ; + free_jcr(jcr); + } + break; + } else if (strcasecmp(ua->argk[i], "ujobid") == 0) { + if (!ua->argv[i]) { + break; + } + jcr=get_jcr_by_full_name(ua->argv[i]) ; + if (jcr) { + jobid = jcr->JobId ; + free_jcr(jcr); + } + break; + /* Wait for a mount request */ + } else if (strcasecmp(ua->argk[i], "mount") == 0) { + for (bool waiting=false; !waiting; ) { + foreach_jcr(jcr) { + if (!jcr->is_internal_job() && + (jcr->JobStatus == JS_WaitMedia || jcr->JobStatus == JS_WaitMount || + jcr->SDJobStatus == JS_WaitMedia || jcr->SDJobStatus == JS_WaitMount)) + { + waiting = true; + break; + } + } + endeach_jcr(jcr); + if (waiting) { + break; + } + if (stop_time && (time(NULL) >= stop_time)) { + ua->warning_msg(_("Wait on mount timed out\n")); + return 1; + } + bmicrosleep(1, 0); + } + return 1; + } + } + + if (jobid == 0) { + ua->error_msg(_("ERR: Job was not found\n")); + return 1 ; + } + + /* + * We wait the end of a specific job + */ + + bmicrosleep(0, 200000); /* let job actually start */ + for (bool running=true; running; ) { + running = false; + + jcr=get_jcr_by_id(jobid) ; + + if (jcr) { + running = true ; + free_jcr(jcr); + } + + if (running) { + bmicrosleep(1, 0); + } + } + + /* + * We have to get JobStatus + */ + + int status ; + char jobstatus = '?'; /* Unknown by default */ + char buf[256] ; + + bsnprintf(buf, sizeof(buf), + "SELECT JobStatus FROM Job WHERE JobId='%i'", jobid); + + + db_sql_query(ua->db, buf, status_handler, (void *)&jobstatus); + + switch (jobstatus) { + case JS_Error: + status = 1 ; /* Warning */ + break; + + case JS_Incomplete: + case JS_FatalError: + case JS_ErrorTerminated: + case JS_Canceled: + status = 2 ; /* Critical */ + break; + + case JS_Warnings: + case JS_Terminated: + status = 0 ; /* Ok */ + break; + + default: + status = 3 ; /* Unknown */ + break; + } + + ua->send_msg("JobId=%i\n", jobid) ; + ua->send_msg("JobStatus=%s (%c)\n", + job_status_to_str(jobstatus, 0), + jobstatus) ; + + if (ua->gui || ua->api) { + ua->send_msg("ExitStatus=%i\n", status) ; + } + + return 1; +} + + +static int help_cmd(UAContext *ua, const char *cmd) +{ + int i; + ua->send_msg(_(" Command Description\n ======= ===========\n")); + for (i=0; iargc == 2) { + if (!strcasecmp(ua->argk[1], commands[i].key)) { + ua->send_msg(_(" %-13s %s\n\nArguments:\n\t%s\n"), commands[i].key, + commands[i].help, commands[i].usage); + break; + } + } else { + ua->send_msg(_(" %-13s %s\n"), commands[i].key, commands[i].help); + } + } + if (i == comsize && ua->argc == 2) { + ua->send_msg(_("\nCan't find %s command.\n\n"), ua->argk[1]); + } + ua->send_msg(_("\nWhen at a prompt, entering a period cancels the command.\n\n")); + return 1; +} + +int qhelp_cmd(UAContext *ua, const char *cmd) +{ + int i,j; + /* Want to display only commands */ + j = find_arg(ua, NT_("all")); + if (j >= 0) { + for (i=0; isend_msg("%s\n", commands[i].key); + } + return 1; + } + /* Want to display a specific help section */ + j = find_arg_with_value(ua, NT_("item")); + if (j >= 0 && ua->argk[j]) { + for (i=0; iargv[j])) { + ua->send_msg("%s\n", commands[i].usage); + break; + } + } + return 1; + } + /* Want to display everything */ + for (i=0; isend_msg("%s %s -- %s\n", commands[i].key, commands[i].help, commands[i].usage); + } + return 1; +} + +#if 1 +static int version_cmd(UAContext *ua, const char *cmd) +{ + ua->send_msg(_("%s Version: %s (%s) %s %s %s %s\n"), my_name, VERSION, BDATE, + HOST_OS, DISTNAME, DISTVER, NPRTB(director->verid)); + return 1; +} +#else +/* + * Test code -- turned on only for debug testing + */ +static int version_cmd(UAContext *ua, const char *cmd) +{ + dbid_list ids; + POOL_MEM query(PM_MESSAGE); + open_db(ua); + Mmsg(query, "select MediaId from Media,Pool where Pool.PoolId=Media.PoolId and Pool.Name='Full'"); + db_get_query_dbids(ua->jcr, ua->db, query, ids); + ua->send_msg("num_ids=%d max_ids=%d tot_ids=%d\n", ids.num_ids, ids.max_ids, ids.tot_ids); + for (int i=0; i < ids.num_ids; i++) { + ua->send_msg("id=%d\n", ids.DBId[i]); + } + close_db(ua); + return 1; +} +#endif + +/* + * This call uses open_client_db() and force a + * new dedicated connection to the catalog + */ +bool open_new_client_db(UAContext *ua) +{ + bool ret; + + /* Force a new dedicated connection */ + ua->force_mult_db_connections = true; + ret = open_client_db(ua); + ua->force_mult_db_connections = false; + + return ret; +} + +/* + * This call explicitly checks for a catalog=xxx and + * if given, opens that catalog. It also checks for + * client=xxx and if found, opens the catalog + * corresponding to that client. If we still don't + * have a catalog, look for a Job keyword and get the + * catalog from its client record. + */ +bool open_client_db(UAContext *ua) +{ + int i; + CAT *catalog; + CLIENT *client; + JOB *job; + + /* Try for catalog keyword */ + i = find_arg_with_value(ua, NT_("catalog")); + if (i >= 0) { + if (!acl_access_ok(ua, Catalog_ACL, ua->argv[i])) { + ua->error_msg(_("No authorization for Catalog \"%s\"\n"), ua->argv[i]); + return false; + } + catalog = GetCatalogResWithName(ua->argv[i]); + if (catalog) { + if (ua->catalog && ua->catalog != catalog) { + close_db(ua); + } + ua->catalog = catalog; + return open_db(ua); + } + } + + /* Try for client keyword */ + i = find_arg_with_value(ua, NT_("client")); + if (i >= 0) { + if (!acl_access_client_ok(ua, ua->argv[i], JT_BACKUP_RESTORE)) { + ua->error_msg(_("No authorization for Client \"%s\"\n"), ua->argv[i]); + return false; + } + client = GetClientResWithName(ua->argv[i]); + if (client) { + catalog = client->catalog; + if (ua->catalog && ua->catalog != catalog) { + close_db(ua); + } + if (!acl_access_ok(ua, Catalog_ACL, catalog->name())) { + ua->error_msg(_("No authorization for Catalog \"%s\"\n"), catalog->name()); + return false; + } + ua->catalog = catalog; + return open_db(ua); + } + } + + /* Try for Job keyword */ + i = find_arg_with_value(ua, NT_("job")); + if (i >= 0) { + if (!acl_access_ok(ua, Job_ACL, ua->argv[i])) { + ua->error_msg(_("No authorization for Job \"%s\"\n"), ua->argv[i]); + return false; + } + job = GetJobResWithName(ua->argv[i]); + if (job) { + catalog = job->client->catalog; + if (ua->catalog && ua->catalog != catalog) { + close_db(ua); + } + if (!acl_access_ok(ua, Catalog_ACL, catalog->name())) { + ua->error_msg(_("No authorization for Catalog \"%s\"\n"), catalog->name()); + return false; + } + ua->catalog = catalog; + return open_db(ua); + } + } + + return open_db(ua); +} + + +/* + * Open the catalog database. + */ +bool open_db(UAContext *ua) +{ + bool mult_db_conn; + + /* With a restricted console, we can't share a SQL connection */ + if (ua->cons) { + ua->force_mult_db_connections = true; + } + + /* The force_mult_db_connections is telling us if we modify the + * private or the shared link + */ + if (ua->force_mult_db_connections) { + ua->db = ua->private_db; + + } else { + ua->db = ua->shared_db; + } + + if (ua->db) { + return true; + } + + if (!ua->catalog) { + ua->catalog = get_catalog_resource(ua); + if (!ua->catalog) { + ua->error_msg( _("Could not find a Catalog resource\n")); + return false; + } + } + + /* Some modules like bvfs need their own catalog connection */ + mult_db_conn = ua->catalog->mult_db_connections; + if (ua->force_mult_db_connections) { + mult_db_conn = true; + } + + ua->jcr->catalog = ua->catalog; + + Dmsg0(100, "UA Open database\n"); + ua->db = db_init_database(ua->jcr, ua->catalog->db_driver, + ua->catalog->db_name, + ua->catalog->db_user, + ua->catalog->db_password, ua->catalog->db_address, + ua->catalog->db_port, ua->catalog->db_socket, + ua->catalog->db_ssl_mode, ua->catalog->db_ssl_key, + ua->catalog->db_ssl_cert, ua->catalog->db_ssl_ca, + ua->catalog->db_ssl_capath, ua->catalog->db_ssl_cipher, + mult_db_conn, ua->catalog->disable_batch_insert); + if (!ua->db || !db_open_database(ua->jcr, ua->db)) { + ua->error_msg(_("Could not open catalog database \"%s\".\n"), + ua->catalog->db_name); + if (ua->db) { + ua->error_msg("%s", db_strerror(ua->db)); + } + close_db(ua); + return false; + } + ua->jcr->db = ua->db; + + /* Depending on the type of connection, we set the right variable */ + if (ua->force_mult_db_connections) { + ua->private_db = ua->db; + + } else { + ua->shared_db = ua->db; + } + /* With a restricted console, the DB backend should know restrictions about + * Pool, Job, etc... + */ + if (ua->cons) { + ua->db->set_acl(ua->jcr, DB_ACL_JOB, ua->cons->ACL_lists[Job_ACL]); + ua->db->set_acl(ua->jcr, DB_ACL_CLIENT, ua->cons->ACL_lists[Client_ACL]); + ua->db->set_acl(ua->jcr, DB_ACL_POOL, ua->cons->ACL_lists[Pool_ACL]); + ua->db->set_acl(ua->jcr, DB_ACL_FILESET, ua->cons->ACL_lists[FileSet_ACL]); + + /* For RestoreClient and BackupClient, we take also in account the Client list */ + ua->db->set_acl(ua->jcr, DB_ACL_RCLIENT, + ua->cons->ACL_lists[Client_ACL], + ua->cons->ACL_lists[RestoreClient_ACL]); + + ua->db->set_acl(ua->jcr, DB_ACL_BCLIENT, + ua->cons->ACL_lists[Client_ACL], + ua->cons->ACL_lists[BackupClient_ACL]); + } + if (!ua->api) { + ua->send_msg(_("Using Catalog \"%s\"\n"), ua->catalog->name()); + } + Dmsg1(150, "DB %s opened\n", ua->catalog->db_name); + return true; +} + +void close_db(UAContext *ua) +{ + if (ua->jcr) { + ua->jcr->db = NULL; + } + + if (ua->shared_db) { + db_close_database(ua->jcr, ua->shared_db); + ua->shared_db = NULL; + } + + if (ua->private_db) { + db_close_database(ua->jcr, ua->private_db); + ua->private_db = NULL; + } + + ua->db = NULL; +} diff --git a/src/dird/ua_dotcmds.c b/src/dird/ua_dotcmds.c new file mode 100644 index 00000000..ff7cfdf0 --- /dev/null +++ b/src/dird/ua_dotcmds.c @@ -0,0 +1,2179 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- User Agent Commands + * These are "dot" commands, i.e. commands preceded + * by a period. These commands are meant to be used + * by a program, so there is no prompting, and the + * returned results are (supposed to be) predictable. + * + * Kern Sibbald, April MMII + */ + +#include "bacula.h" +#include "dird.h" +#include "cats/bvfs.h" +#include "findlib/find.h" + +/* Imported variables */ +extern struct s_jl joblevels[]; +extern struct s_jt jobtypes[]; + +/* Imported functions */ +extern void do_messages(UAContext *ua, const char *cmd); +extern int quit_cmd(UAContext *ua, const char *cmd); +extern int qhelp_cmd(UAContext *ua, const char *cmd); +extern bool dot_status_cmd(UAContext *ua, const char *cmd); + + +/* Forward referenced functions */ +static bool admin_cmds(UAContext *ua, const char *cmd); +static bool jobscmd(UAContext *ua, const char *cmd); +static bool dotestimatecmd(UAContext *ua, const char *cmd); +static bool filesetscmd(UAContext *ua, const char *cmd); +static bool clientscmd(UAContext *ua, const char *cmd); +static bool msgscmd(UAContext *ua, const char *cmd); +static bool poolscmd(UAContext *ua, const char *cmd); +static bool schedulescmd(UAContext *ua, const char *cmd); +static bool storagecmd(UAContext *ua, const char *cmd); +static bool defaultscmd(UAContext *ua, const char *cmd); +static bool typescmd(UAContext *ua, const char *cmd); +static bool tagscmd(UAContext *ua, const char *cmd); +static bool backupscmd(UAContext *ua, const char *cmd); +static bool levelscmd(UAContext *ua, const char *cmd); +static bool getmsgscmd(UAContext *ua, const char *cmd); +static bool volstatuscmd(UAContext *ua, const char *cmd); +static bool mediatypescmd(UAContext *ua, const char *cmd); +static bool locationscmd(UAContext *ua, const char *cmd); +static bool mediacmd(UAContext *ua, const char *cmd); +static bool aopcmd(UAContext *ua, const char *cmd); +static bool catalogscmd(UAContext *ua, const char *cmd); + +static bool dot_ls_cmd(UAContext *ua, const char *cmd); +static bool dot_bvfs_lsdirs(UAContext *ua, const char *cmd); +static bool dot_bvfs_lsfiles(UAContext *ua, const char *cmd); +static bool dot_bvfs_update(UAContext *ua, const char *cmd); +static bool dot_bvfs_get_jobids(UAContext *ua, const char *cmd); +static bool dot_bvfs_versions(UAContext *ua, const char *cmd); +static bool dot_bvfs_restore(UAContext *ua, const char *cmd); +static bool dot_bvfs_cleanup(UAContext *ua, const char *cmd); +static bool dot_bvfs_clear_cache(UAContext *ua, const char *cmd); +static bool dot_bvfs_decode_lstat(UAContext *ua, const char *cmd); +static bool dot_bvfs_update_fv(UAContext *ua, const char *cmd); +static bool dot_bvfs_get_volumes(UAContext *ua, const char *cmd); +static bool dot_bvfs_get_jobs(UAContext *ua, const char *cmd); +static bool dot_bvfs_get_bootstrap(UAContext *ua, const char *cmd); +static bool dot_bvfs_get_delta(UAContext *ua, const char *cmd); +static void bvfs_get_filter(UAContext *ua, POOL_MEM &where, char *limit, int len); + +static bool putfile_cmd(UAContext *ua, const char *cmd); +static bool api_cmd(UAContext *ua, const char *cmd); +static bool sql_cmd(UAContext *ua, const char *cmd); +static bool dot_quit_cmd(UAContext *ua, const char *cmd); +static bool dot_help_cmd(UAContext *ua, const char *cmd); +static int one_handler(void *ctx, int num_field, char **row); + +struct cmdstruct { const char *key; bool (*func)(UAContext *ua, const char *cmd); const char *help;const bool use_in_rs;}; +static struct cmdstruct commands[] = { /* help */ /* can be used in runscript */ + { NT_(".api"), api_cmd, NULL, false}, + { NT_(".backups"), backupscmd, NULL, false}, + { NT_(".clients"), clientscmd, NULL, true}, + { NT_(".catalogs"), catalogscmd, NULL, false}, + { NT_(".defaults"), defaultscmd, NULL, false}, + { NT_(".die"), admin_cmds, NULL, false}, + { NT_(".dump"), admin_cmds, NULL, false}, + { NT_(".exit"), admin_cmds, NULL, false}, + { NT_(".filesets"), filesetscmd, NULL, false}, + { NT_(".help"), dot_help_cmd, NULL, false}, + { NT_(".jobs"), jobscmd, NULL, true}, + { NT_(".estimate"), dotestimatecmd, NULL, false}, + { NT_(".levels"), levelscmd, NULL, false}, + { NT_(".messages"), getmsgscmd, NULL, false}, + { NT_(".msgs"), msgscmd, NULL, false}, + { NT_(".pools"), poolscmd, NULL, true}, + { NT_(".quit"), dot_quit_cmd, NULL, false}, + { NT_(".putfile"), putfile_cmd, NULL, false}, /* use @putfile */ + { NT_(".schedule"), schedulescmd, NULL, false}, + { NT_(".sql"), sql_cmd, NULL, false}, + { NT_(".status"), dot_status_cmd, NULL, false}, + { NT_(".storage"), storagecmd, NULL, true}, + { NT_(".volstatus"), volstatuscmd, NULL, true}, + { NT_(".media"), mediacmd, NULL, true}, + { NT_(".mediatypes"), mediatypescmd, NULL, true}, + { NT_(".locations"), locationscmd, NULL, true}, + { NT_(".actiononpurge"),aopcmd, NULL, true}, + { NT_(".bvfs_lsdirs"), dot_bvfs_lsdirs, NULL, true}, + { NT_(".bvfs_lsfiles"),dot_bvfs_lsfiles, NULL, true}, + { NT_(".bvfs_get_volumes"),dot_bvfs_get_volumes,NULL, true}, + { NT_(".bvfs_update"), dot_bvfs_update, NULL, true}, + { NT_(".bvfs_get_jobids"), dot_bvfs_get_jobids, NULL, true}, + { NT_(".bvfs_get_jobs"), dot_bvfs_get_jobs, NULL, true}, + { NT_(".bvfs_get_bootstrap"), dot_bvfs_get_bootstrap,NULL, true}, + { NT_(".bvfs_versions"), dot_bvfs_versions, NULL, true}, + { NT_(".bvfs_get_delta"), dot_bvfs_get_delta, NULL, true}, + { NT_(".bvfs_restore"), dot_bvfs_restore, NULL, true}, + { NT_(".bvfs_cleanup"), dot_bvfs_cleanup, NULL, true}, + { NT_(".bvfs_decode_lstat"),dot_bvfs_decode_lstat,NULL, true}, + { NT_(".bvfs_clear_cache"),dot_bvfs_clear_cache,NULL, false}, + { NT_(".bvfs_update_fv"),dot_bvfs_update_fv, NULL, true}, + { NT_(".ls"), dot_ls_cmd, NULL, false}, + { NT_(".types"), typescmd, NULL, false}, + { NT_(".tags"), tagscmd, NULL, false} +}; +#define comsize ((int)(sizeof(commands)/sizeof(struct cmdstruct))) + +/* + * Execute a command from the UA + */ +bool do_a_dot_command(UAContext *ua) +{ + int i; + int len; + bool ok = false; + bool found = false; + + Dmsg1(1400, "Dot command: %s\n", ua->UA_sock?ua->UA_sock->msg:""); + if (ua->argc == 0 || !ua->UA_sock) { + return false; + } + + len = strlen(ua->argk[0]); + if (len == 1) { + if (ua->api) ua->signal(BNET_CMD_BEGIN); + if (ua->api) ua->signal(BNET_CMD_OK); + return true; /* no op */ + } + for (i=0; iargk[0], _(commands[i].key), len) == 0) { + /* Check if this command is authorized in RunScript */ + if (ua->runscript && !commands[i].use_in_rs) { + ua->error_msg(_("Can't use %s command in a runscript"), ua->argk[0]); + break; + } + bool gui = ua->gui; + /* Check if command permitted, but "quit" is always OK */ + if (strcmp(ua->argk[0], NT_(".quit")) != 0 && + strcmp(ua->argk[0], NT_(".api")) != 0 && + !acl_access_ok(ua, Command_ACL, ua->argk[0], len)) { + Dmsg1(100, "not allowed %s\n", ua->cmd); + break; + } + Dmsg1(100, "Cmd: %s\n", ua->cmd); + ua->gui = true; + if (ua->api) ua->signal(BNET_CMD_BEGIN); + ok = (*commands[i].func)(ua, ua->cmd); /* go execute command */ + if (ua->api) ua->signal(ok?BNET_CMD_OK:BNET_CMD_FAILED); + ua->gui = gui; + if (ua->UA_sock) { + found = ua->UA_sock->is_stop() ? false : true; + } + break; + } + } + if (!found) { + ua->error_msg("%s%s", ua->argk[0], _(": is an invalid command.\n")); + ok = false; + } + return ok; +} + +/* + * Send ls to Client + */ +static bool dot_ls_cmd(UAContext *ua, const char *cmd) +{ + POOL_MEM buf; + CLIENT *client = NULL; + char *path = NULL; + char *plugin = NULL; + JCR *jcr = ua->jcr; + int i; + + jcr->setJobLevel(L_FULL); + i = find_arg_with_value(ua, NT_("client")); + if (i > 0) { + client = GetClientResWithName(ua->argv[i]); + if (!client) { + ua->error_msg(_("Client \"%s\" not found.\n"), ua->argv[i]); + return false; + } + if (!acl_access_client_ok(ua, client->name(), JT_BACKUP)) { + ua->error_msg(_("No authorization for Client \"%s\"\n"), client->name()); + return false; + } + + } else { + ua->error_msg(_("Client name missing.\n")); + return false; + } + + i = find_arg_with_value(ua, NT_("path")); + if (i > 0) { + path = ua->argv[i]; + + } else { + ua->error_msg(_("path name missing.\n")); + return false; + } + + /* optional plugin=... parameter */ + i = find_arg_with_value(ua, NT_("plugin")); + if (i > 0) { + plugin = ua->argv[i]; + } + + jcr->client = client; + + jcr->setJobType(JT_BACKUP); + jcr->start_time = time(NULL); + init_jcr_job_record(jcr); // need job + + ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + jcr->client->name(), jcr->client->address(buf.addr()), jcr->client->FDport); + + if (!connect_to_file_daemon(jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Client.\n")); + return false; + } + + /* when .ls plugin prepare a special ls_plugin_fileset */ + if (plugin){ + if (!send_ls_plugin_fileset(jcr, plugin, path)) { + ua->error_msg(_("Failed to send plugin command to Client.\n")); + goto bail_out; + } + } else { + if (!send_ls_fileset(jcr, path)) { + ua->error_msg(_("Failed to send command to Client.\n")); + goto bail_out; + } + } + + jcr->file_bsock->fsend("estimate listing=%d\n", 1); + while (jcr->file_bsock->recv() >= 0) { + ua->send_msg("%s", jcr->file_bsock->msg); + } + +bail_out: + if (jcr->file_bsock) { + jcr->file_bsock->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + } + return true; +} + +static void bvfs_set_acl(UAContext *ua, Bvfs *bvfs) +{ + if (!ua) { + return; + } + + /* If no console resource => default console and all is permitted */ + if (!ua->cons) { + return; + } + bvfs->set_job_acl(ua->cons->ACL_lists[Job_ACL]); + bvfs->set_client_acl(ua->cons->ACL_lists[Client_ACL]); + bvfs->set_fileset_acl(ua->cons->ACL_lists[FileSet_ACL]); + bvfs->set_pool_acl(ua->cons->ACL_lists[Pool_ACL]); +} + +static bool dot_bvfs_decode_lstat(UAContext *ua, const char *cmd) +{ + int32_t LinkFI; + struct stat sp; + POOL_MEM q; + char buf[32]; + int pos = find_arg_with_value(ua, "lstat"); + + if (pos > 0) { + for (char *p = ua->argv[pos] ; *p ; p++) { + if (! (B_ISALPHA(*p) || B_ISDIGIT(*p) || B_ISSPACE(*p) || *p == '/' || *p == '+' || *p == '-')) { + ua->error_msg("Can't accept %c in lstat\n", *p); + return true; + } + } + + decode_stat(ua->argv[pos], &sp, sizeof(sp), &LinkFI); + encode_mode(sp.st_mode, buf); + Mmsg(q, "st_nlink=%lld\nst_mode=%lld\nperm=%s\nst_uid=%lld\nst_gid=%lld\n" + "st_size=%lld\nst_blocks=%lld\nst_ino=%lld\nst_ctime=%lld\n" + "st_mtime=%lld\nst_atime=%lld\nst_dev=%lld\nLinkFI=%lld\n", + (int64_t) sp.st_nlink, + (int64_t) sp.st_mode, + buf, + (int64_t) sp.st_uid, + (int64_t) sp.st_gid, + (int64_t) sp.st_size, + (int64_t) sp.st_blocks, + (int64_t) sp.st_ino, + (int64_t) sp.st_ctime, + (int64_t) sp.st_mtime, + (int64_t) sp.st_atime, + (int64_t) sp.st_dev, + (int64_t) LinkFI + ); + + ua->send_msg("%s", q.c_str()); + } + return true; +} + +static bool dot_bvfs_update(UAContext *ua, const char *cmd) +{ + if (!open_new_client_db(ua)) { + return 1; + } + + int pos = find_arg_with_value(ua, "jobid"); + if (pos != -1 && is_a_number_list(ua->argv[pos])) { + if (!bvfs_update_path_hierarchy_cache(ua->jcr, ua->db, ua->argv[pos])) { + ua->error_msg("ERROR: BVFS reported a problem for %s\n", + ua->argv[pos]); + } + } else { + /* update cache for all jobids */ + bvfs_update_cache(ua->jcr, ua->db); + } + + return true; +} + +static bool dot_bvfs_update_fv(UAContext *ua, const char *cmd) +{ + int pos = find_arg_with_value(ua, "jobid"); + + if (pos == -1 || !is_a_number_list(ua->argv[pos])) { + ua->error_msg("Expecting to find jobid=1,2,3 argument\n"); + return 1; + } + + if (!open_new_client_db(ua)) { + return 1; + } + + bvfs_update_path_hierarchy_cache(ua->jcr, ua->db, ua->argv[pos]); + bvfs_update_fv_cache(ua->jcr, ua->db, ua->argv[pos]); + + ua->info_msg("OK\n"); + + return true; +} + +static bool dot_bvfs_clear_cache(UAContext *ua, const char *cmd) +{ + if (!open_client_db(ua)) { + return 1; + } + + int pos = find_arg(ua, "yes"); + if (pos != -1) { + Bvfs fs(ua->jcr, ua->db); + fs.clear_cache(); + ua->info_msg("OK\n"); + } else { + ua->error_msg("Can't find 'yes' argument\n"); + } + + return true; +} + +static int bvfs_result_handler(void *ctx, int fields, char **row) +{ + UAContext *ua = (UAContext *)ctx; + struct stat statp; + int32_t LinkFI; + char *fileid=row[BVFS_FileId]; + char *lstat=row[BVFS_LStat]; + char *jobid=row[BVFS_JobId]; + + char empty[] = "A A A A A A A A A A A A A A"; + char zero[] = "0"; + + /* We need to deal with non existant path */ + if (!fileid || !is_a_number(fileid)) { + lstat = empty; + jobid = zero; + fileid = zero; + } + + memset(&statp, 0, sizeof(struct stat)); + decode_stat(lstat, &statp, sizeof(statp), &LinkFI); + Dmsg1(100, "type=%s\n", row[0]); + if (bvfs_is_dir(row)) { + char *path = bvfs_basename_dir(row[BVFS_Name]); + ua->send_msg("%s\t0\t%s\t%s\t%s\t%s\n", row[BVFS_PathId], fileid, + jobid, lstat, path); + + } else if (bvfs_is_version(row)) { + ua->send_msg("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", row[BVFS_PathId], + row[BVFS_FilenameId], fileid, jobid, + lstat, row[BVFS_Md5], row[BVFS_VolName], + row[BVFS_VolInchanger]); + + } else if (bvfs_is_file(row)) { + ua->send_msg("%s\t%s\t%s\t%s\t%s\t%s\n", row[BVFS_PathId], + row[BVFS_FilenameId], fileid, jobid, + lstat, row[BVFS_Name]); + + } else if (bvfs_is_volume_list(row)) { + ua->send_msg("%s\t%s\n", row[BVFS_VolName], + row[BVFS_VolInchanger]); + + } else if (bvfs_is_delta_list(row)) { + ua->send_msg("%s\t%s\t%s\t%s\t%s\t%s\t%s\n", row[BVFS_PathId], + row[BVFS_FilenameId], fileid, jobid, + lstat, row[BVFS_DeltaSeq], row[BVFS_JobTDate]); + } + + return 0; +} + +static void parse_list(char *items, alist *list) +{ + char *start; + for(char *p = start = items; *p ; p++) { + if (*p == ',') { + *p = 0; + if (p > start) { + list->append(bstrdup(start)); + } + *p = ','; + start = p + 1; + } + } + if (*start) { + list->append(bstrdup(start)); + } +} + +static bool bvfs_parse_arg_version(UAContext *ua, + char **client, + alist *clients, + FileId_t *fnid, + bool *versions, + bool *copies) +{ + *fnid=0; + *client=NULL; + *versions=false; + *copies=false; + + for (int i=1; iargc; i++) { + if (fnid && strcasecmp(ua->argk[i], NT_("fnid")) == 0) { + if (is_a_number(ua->argv[i])) { + *fnid = str_to_int64(ua->argv[i]); + } + } + + if (strcasecmp(ua->argk[i], NT_("client")) == 0) { + *client = ua->argv[i]; + if (clients) { + clients->append(bstrdup(*client)); + } + } + + if (clients != NULL && strcasecmp(ua->argk[i], NT_("clients")) == 0) { + /* Turn client1,client2,client3 to a alist of clients */ + parse_list(ua->argv[i], clients); + } + + if (copies && strcasecmp(ua->argk[i], NT_("copies")) == 0) { + *copies = true; + } + + if (versions && strcasecmp(ua->argk[i], NT_("versions")) == 0) { + *versions = true; + } + } + return ((*client || (clients && clients->size() > 0)) && *fnid > 0); +} + +static bool bvfs_parse_arg(UAContext *ua, + DBId_t *pathid, char **path, char **jobid, + char **username, + int *limit, int *offset) +{ + *pathid=0; + *limit=2000; + *offset=0; + *path=NULL; + *username=NULL; + if (jobid) { + *jobid=NULL; + } + + for (int i=1; iargc; i++) { + if (!ua->argv[i]) { + continue; + } + if (strcasecmp(ua->argk[i], NT_("pathid")) == 0) { + if (is_a_number(ua->argv[i])) { + *pathid = str_to_int64(ua->argv[i]); + } + } + + if (strcasecmp(ua->argk[i], NT_("path")) == 0) { + *path = ua->argv[i]; + } + + if (strcasecmp(ua->argk[i], NT_("username")) == 0) { + *username = ua->argv[i]; + } + + if (jobid && strcasecmp(ua->argk[i], NT_("jobid")) == 0) { + if (is_a_number_list(ua->argv[i])) { + *jobid = ua->argv[i]; + } + } + + if (strcasecmp(ua->argk[i], NT_("ujobid")) == 0) { + JOB_DBR jr; + memset(&jr, 0, sizeof(jr)); + bstrncpy(jr.Job, ua->argv[i], sizeof(jr.Job)); + if (!open_new_client_db(ua)) { + return false; + } + if (!db_get_job_record(ua->jcr, ua->db, &jr)) { + return false; + } + if (!acl_access_ok(ua, Job_ACL, jr.Name)) { + return false; + } + /* Store the jobid after the ua->cmd, a bit kluggy */ + int len = strlen(ua->cmd); + ua->cmd = check_pool_memory_size(ua->cmd, len + 1 + 50); + *jobid = edit_uint64(jr.JobId, ua->cmd + len + 1); + } + + if (strcasecmp(ua->argk[i], NT_("limit")) == 0) { + if (is_a_number(ua->argv[i])) { + *limit = str_to_int64(ua->argv[i]); + } + } + + if (strcasecmp(ua->argk[i], NT_("offset")) == 0) { + if (is_a_number(ua->argv[i])) { + *offset = str_to_int64(ua->argv[i]); + } + } + } + + if (jobid && *jobid == NULL) { + return false; + } + + if (!(*pathid || *path)) { + return false; + } + + return true; +} + +/* .bvfs_cleanup path=b2XXXXX + */ +static bool dot_bvfs_cleanup(UAContext *ua, const char *cmd) +{ + int i; + if ((i = find_arg_with_value(ua, "path")) >= 0) { + if (!open_client_db(ua)) { + return 1; + } + Bvfs fs(ua->jcr, ua->db); + fs.drop_restore_list(ua->argv[i]); + } + return true; +} + +/* .bvfs_restore path=b2XXXXX jobid=1,2 fileid=1,2 dirid=1,2 hardlink=1,2,3,4 + */ +static bool dot_bvfs_restore(UAContext *ua, const char *cmd) +{ + DBId_t pathid=0; + int limit=2000, offset=0, i; + char *path=NULL, *jobid=NULL, *username=NULL; + char *empty = (char *)""; + char *fileid, *dirid, *hardlink; + + fileid = dirid = hardlink = empty; + + if (!bvfs_parse_arg(ua, &pathid, &path, &jobid, &username, + &limit, &offset) || !path) + { + ua->error_msg("Can't find jobid, pathid or path argument\n"); + return true; /* not enough param */ + } + + if (!open_new_client_db(ua)) { + return true; + } + + Bvfs fs(ua->jcr, ua->db); + bvfs_set_acl(ua, &fs); + fs.set_username(username); + fs.set_jobids(jobid); + + if ((i = find_arg_with_value(ua, "fileid")) >= 0) { + fileid = ua->argv[i]; + } + if ((i = find_arg_with_value(ua, "dirid")) >= 0) { + dirid = ua->argv[i]; + } + if ((i = find_arg_with_value(ua, "hardlink")) >= 0) { + hardlink = ua->argv[i]; + } + if ((i = find_arg(ua, "nodelta")) >= 0) { + fs.set_compute_delta(false); + } + if (fs.compute_restore_list(fileid, dirid, hardlink, path)) { + ua->send_msg("OK\n"); + } else { + ua->error_msg("Cannot create restore list.\n"); + } + + return true; +} + +/* Get a bootstrap for a given bvfs restore session + * .bvfs_get_bootstrap path=b21xxxxxx + * Volume=Vol1 + * Storage=Store1 + * VolAddress=10 + * VolSessionTime=xxx + * VolSessionId=yyyy + */ +static bool dot_bvfs_get_bootstrap(UAContext *ua, const char *cmd) +{ + RESTORE_CTX rx; /* restore context */ + POOLMEM *buf = get_pool_memory(PM_MESSAGE); + int pos; + + new_rx(&rx); + if (!open_new_client_db(ua)) { + ua->error_msg("ERROR: Unable to open database\n"); + goto bail_out; + } + pos = find_arg_with_value(ua, "path"); + if (pos < 0) { + ua->error_msg("ERROR: Unable to get path argument\n"); + goto bail_out; + } + + insert_table_into_findex_list(ua, &rx, ua->argv[pos]); + + if (rx.bsr_list->size() > 0) { + if (!complete_bsr(ua, rx.bsr_list)) { /* find Vol, SessId, SessTime from JobIds */ + ua->error_msg("ERROR: Unable to construct a valid BSR. Cannot continue.\n"); + goto bail_out; + } + if (!(rx.selected_files = write_bsr_file(ua, rx))) { + ua->error_msg("ERROR: No files selected to be restored.\n"); + goto bail_out; + } + FILE *fp = bfopen(ua->jcr->RestoreBootstrap, "r"); + if (!fp) { + ua->error_msg("ERROR: Unable to open bootstrap file\n"); + goto bail_out; + } + while (bfgets(buf, fp)) { + ua->send_msg("%s", buf); + } + fclose(fp); + } else { + ua->error_msg("ERROR: Unable to find files to restore\n"); + goto bail_out; + } + +bail_out: + if (ua->jcr->unlink_bsr) { + unlink(ua->jcr->RestoreBootstrap); + ua->jcr->unlink_bsr = false; + } + free_pool_memory(buf); + free_rx(&rx); + return true; +} + +/* + * .bvfs_get_volumes [path=/ filename=test jobid=1 | fileid=1] + * Vol001 + * Vol002 + * Vol003 + */ +static bool dot_bvfs_get_volumes(UAContext *ua, const char *cmd) +{ + DBId_t pathid=0; + FileId_t fileid=0; + char *path=NULL, *jobid=NULL, *username=NULL; + char *filename=NULL; + int limit=2000, offset=0; + int i; + + bvfs_parse_arg(ua, &pathid, &path, &jobid, &username, &limit, &offset); + + if ((i = find_arg_with_value(ua, "filename")) >= 0) { + if (!(jobid && (path || pathid))) { /* Need JobId and Path/PathId */ + ua->error_msg("Can't find jobid, pathid or path argument\n"); + return true; + } + + filename = ua->argv[i]; + + } else if ((i = find_arg_with_value(ua, "fileid")) >= 0) { + if (!is_a_number(ua->argv[i])) { + ua->error_msg("Expecting integer for FileId, got %s\n", ua->argv[i]); + return true; + } + fileid = str_to_int64(ua->argv[i]); + } + + if (!open_new_client_db(ua)) { + return 1; + } + + Bvfs fs(ua->jcr, ua->db); + bvfs_set_acl(ua, &fs); + fs.set_username(username); + fs.set_handler(bvfs_result_handler, ua); + fs.set_limit(limit); + ua->bvfs = &fs; + + if (filename) { + /* TODO */ + + } else { + fs.get_volumes(fileid); + } + ua->bvfs = NULL; + return true; +} + +/* + * .bvfs_lsfiles jobid=1,2,3,4 pathid=10 + * .bvfs_lsfiles jobid=1,2,3,4 path=/ + */ +static bool dot_bvfs_lsfiles(UAContext *ua, const char *cmd) +{ + DBId_t pathid=0; + int limit=2000, offset=0; + char *path=NULL, *jobid=NULL, *username=NULL; + char *pattern=NULL, *filename=NULL; + bool ok; + int i; + + if (!bvfs_parse_arg(ua, &pathid, &path, &jobid, &username, + &limit, &offset)) + { + ua->error_msg("Can't find jobid, pathid or path argument\n"); + return true; /* not enough param */ + } + if ((i = find_arg_with_value(ua, "pattern")) >= 0) { + pattern = ua->argv[i]; + } + if ((i = find_arg_with_value(ua, "filename")) >= 0) { + filename = ua->argv[i]; + } + + if (!open_new_client_db(ua)) { + return 1; + } + + Bvfs fs(ua->jcr, ua->db); + bvfs_set_acl(ua, &fs); + fs.set_username(username); + fs.set_jobids(jobid); + fs.set_handler(bvfs_result_handler, ua); + fs.set_limit(limit); + fs.set_offset(offset); + ua->bvfs = &fs; + if (pattern) { + fs.set_pattern(pattern); + } + if (filename) { + fs.set_filename(filename); + } + if (pathid) { + ok = fs.ch_dir(pathid); + } else { + ok = fs.ch_dir(path); + } + if (!ok) { + goto bail_out; + } + + fs.ls_files(); + +bail_out: + ua->bvfs = NULL; + return true; +} + +/* + * .bvfs_lsdirs jobid=1,2,3,4 pathid=10 + * .bvfs_lsdirs jobid=1,2,3,4 path=/ + * .bvfs_lsdirs jobid=1,2,3,4 path= + */ +static bool dot_bvfs_lsdirs(UAContext *ua, const char *cmd) +{ + DBId_t pathid=0; + int limit=2000, offset=0; + char *path=NULL, *jobid=NULL, *username=NULL; + char *pattern=NULL; + int dironly; + bool ok; + int i; + + if (!bvfs_parse_arg(ua, &pathid, &path, &jobid, &username, + &limit, &offset)) + { + ua->error_msg("Can't find jobid, pathid or path argument\n"); + return true; /* not enough param */ + } + + if ((i = find_arg_with_value(ua, "pattern")) >= 0) { + pattern = ua->argv[i]; + } + + dironly = find_arg(ua, "dironly"); + + if (!open_new_client_db(ua)) { + return 1; + } + + Bvfs fs(ua->jcr, ua->db); + bvfs_set_acl(ua, &fs); + fs.set_username(username); + fs.set_jobids(jobid); + fs.set_limit(limit); + fs.set_handler(bvfs_result_handler, ua); + fs.set_offset(offset); + ua->bvfs = &fs; + + if (pattern) { + fs.set_pattern(pattern); + } + + if (pathid) { + ok = fs.ch_dir(pathid); + } else { + ok = fs.ch_dir(path); + } + + if (!ok) { + goto bail_out; + } + + fs.ls_special_dirs(); + + if (dironly < 0) { + fs.ls_dirs(); + } +bail_out: + ua->bvfs = NULL; + return true; +} + +/* + * .bvfs_get_delta fileid=10 + * + */ +static bool dot_bvfs_get_delta(UAContext *ua, const char *cmd) +{ + bool ret; + FileId_t fileid=0; + int i; + + if ((i = find_arg_with_value(ua, "fileid")) >= 0) { + if (!is_a_number(ua->argv[i])) { + ua->error_msg("Expecting integer for FileId, got %s\n", ua->argv[i]); + return true; + } + fileid = str_to_int64(ua->argv[i]); + + } else { + ua->error_msg("Expecting FileId\n"); + return true; + } + + if (!open_new_client_db(ua)) { + return 1; + } + Bvfs fs(ua->jcr, ua->db); + bvfs_set_acl(ua, &fs); + fs.set_handler(bvfs_result_handler, ua); + ua->bvfs = &fs; + ret = fs.get_delta(fileid); + ua->bvfs = NULL; + return ret; +} + +/* + * .bvfs_versions fnid=10 pathid=10 client=xxx copies versions + * + */ +static bool dot_bvfs_versions(UAContext *ua, const char *cmd) +{ + DBId_t pathid=0; + FileId_t fnid=0; + int limit=2000, offset=0; + char *path=NULL, *client=NULL, *username=NULL; + bool copies=false, versions=false; + alist clients(10, owned_by_alist); + if (!bvfs_parse_arg(ua, &pathid, &path, NULL, &username, + &limit, &offset)) + { + ua->error_msg("Can't find pathid or path argument\n"); + return true; /* not enough param */ + } + + if (!bvfs_parse_arg_version(ua, &client, &clients, &fnid, &versions, &copies)) + { + ua->error_msg("Can't find client or fnid argument\n"); + return true; /* not enough param */ + } + + if (!open_new_client_db(ua)) { + return 1; + } + + Bvfs fs(ua->jcr, ua->db); + bvfs_set_acl(ua, &fs); + fs.set_limit(limit); + fs.set_see_all_versions(versions); + fs.set_see_copies(copies); + fs.set_handler(bvfs_result_handler, ua); + fs.set_offset(offset); + ua->bvfs = &fs; + + fs.get_all_file_versions(pathid, fnid, &clients); + + ua->bvfs = NULL; + return true; +} + +/* .bvfs_get_jobids jobid=1 + * -> returns needed jobids to restore + * .bvfs_get_jobids ujobid=xxx only + * -> returns the jobid of the job + * .bvfs_get_jobids jobid=1 jobname + * -> returns the jobname + * .bvfs_get_jobids client=xxx [ujobid=yyyy] [jobname=] [fileset=] [start=] [end=] + * -> returns all jobid for the client + * .bvfs_get_jobids client=xxx count + * -> returns the number of jobids for the client + * .bvfs_get_jobids jobid=1 all + * -> returns needed jobids to restore with all filesets a JobId=1 time + * .bvfs_get_jobids job=XXXXX + * -> returns needed jobids to restore with the jobname + * .bvfs_get_jobids ujobid=JobName + * -> returns needed jobids to restore + */ +static bool dot_bvfs_get_jobids(UAContext *ua, const char *cmd) +{ + JOB_DBR jr; + memset(&jr, 0, sizeof(JOB_DBR)); + + db_list_ctx jobids, tempids; + int pos; + char ed1[50]; + POOL_MEM query; + dbid_list ids; /* Store all FileSetIds for this client */ + + if (!open_new_client_db(ua)) { + return true; + } + + Bvfs fs(ua->jcr, ua->db); + bvfs_set_acl(ua, &fs); + + if ((pos = find_arg_with_value(ua, "username")) >= 0) { + fs.set_username(ua->argv[pos]); + } + + if ((pos = find_arg_with_value(ua, "ujobid")) >= 0) { + bstrncpy(jr.Job, ua->argv[pos], sizeof(jr.Job)); + } + + if ((pos = find_arg_with_value(ua, "jobid")) >= 0) { + jr.JobId = str_to_int64(ua->argv[pos]); + + /* Guess JobId from Job name, take the last successful jobid */ + } else if ((pos = find_arg_with_value(ua, "job")) >= 0) { + JOB *job; + bool ret; + int32_t JobId=0; + + bstrncpy(jr.Name, ua->argv[pos], MAX_NAME_LENGTH); + /* TODO: enhance this function to take client and/or fileset as argument*/ + + job = GetJobResWithName(jr.Name); + if (!job) { + ua->error_msg(_("Unable to get Job record for Job=%s\n"), jr.Name); + return true; + } + db_lock(ua->db); + Mmsg(ua->db->cmd, + "SELECT JobId " + "FROM Job JOIN FileSet USING (FileSetId) JOIN Client USING (ClientId) " + "WHERE Client.Name = '%s' AND FileSet.FileSet = '%s' " + "AND Job.Type = 'B' AND Job.JobStatus IN ('T', 'W') " + "ORDER By JobTDate DESC LIMIT 1", + job->client->name(), job->fileset->name()); + ret = db_sql_query(ua->db, ua->db->cmd, db_int_handler, &JobId); + db_unlock(ua->db); + + if (!ret) { + ua->error_msg(_("Unable to get last Job record for Job=%s\n"),jr.Name); + } + + jr.JobId = JobId; + + /* Get JobId from ujobid */ + } else if ((pos = find_arg_with_value(ua, "ujobid")) >= 0) { + bstrncpy(jr.Job, ua->argv[pos], MAX_NAME_LENGTH); + + /* Return all backup jobid for a client list */ + } else if ((pos = find_arg_with_value(ua, "client")) >= 0 || + (pos = find_arg_with_value(ua, "clients")) >= 0) { + POOL_MEM where; + char limit[50]; + bool ret; + int nbjobs; + alist clients(10, owned_by_alist); + + /* Turn client1,client2,client3 to a alist of clients */ + parse_list(ua->argv[pos], &clients); + + db_lock(ua->db); + bvfs_get_filter(ua, where, limit, sizeof(limit)); + Mmsg(ua->db->cmd, + "SELECT JobId " + "FROM Job JOIN Client USING (ClientId) " + "WHERE Client.Name IN (%s) " + "AND Job.Type = 'B' AND Job.JobStatus IN ('T', 'W') %s " + "ORDER By JobTDate ASC %s", + fs.escape_list(&clients), + where.c_str(), limit); + ret = db_sql_query(ua->db, ua->db->cmd, db_list_handler, &jobids); + db_unlock(ua->db); + + if (!ret) { + ua->error_msg(_("Unable to get last Job record for Client=%s\n"), + ua->argv[pos]); + } + + nbjobs = fs.set_jobids(jobids.list); + + /* Apply the ACL filter on JobIds */ + if (find_arg(ua, "count") >= 0) { + ua->send_msg("%d\n", nbjobs); + + } else { + ua->send_msg("%s\n", fs.get_jobids()); + } + return true; + } + + if (!db_get_job_record(ua->jcr, ua->db, &jr)) { + ua->error_msg(_("Unable to get Job record for JobId=%s: ERR=%s\n"), + ua->cmd, db_strerror(ua->db)); + return true; + } + + /* Display only the requested jobid or + * When in level base, we don't rely on any Full/Incr/Diff + */ + if (find_arg(ua, "only") > 0 || jr.JobLevel == L_BASE) { + /* Apply the ACL filter on JobIds */ + fs.set_jobid(jr.JobId); + ua->send_msg("%s\n", fs.get_jobids()); + return true; + } + + /* Display only the requested job name + */ + if (find_arg(ua, "jobname") > 0) { + /* Apply the ACL filter on JobIds */ + fs.set_jobid(jr.JobId); + if (str_to_int64(fs.get_jobids()) == (int64_t)jr.JobId) { + ua->send_msg("%s\n", jr.Job); + } + return true; + } + + /* If we have the "all" option, we do a search on all defined fileset + * for this client + */ + if (find_arg(ua, "all") > 0) { + edit_int64(jr.ClientId, ed1); + Mmsg(query, uar_sel_filesetid, ed1); + db_get_query_dbids(ua->jcr, ua->db, query, ids); + } else { + ids.num_ids = 1; + ids.DBId[0] = jr.FileSetId; + } + + jr.JobLevel = L_INCREMENTAL; /* Take Full+Diff+Incr */ + + /* Foreach different FileSet, we build a restore jobid list */ + for (int i=0; i < ids.num_ids; i++) { + jr.FileSetId = ids.DBId[i]; + if (!db_get_accurate_jobids(ua->jcr, ua->db, &jr, &tempids)) { + return true; + } + jobids.add(tempids); + } + + fs.set_jobids(jobids.list); + ua->send_msg("%s\n", fs.get_jobids()); + return true; +} + +static int jobs_handler(void *ctx, int num_field, char **row) +{ + UAContext *ua = (UAContext *)ctx; + ua->send_msg("%s %s %s %s\n", row[0], row[1], row[2], row[3]); + return 0; +} + +static char *get_argument(UAContext *ua, const char *arg, char *esc, bool convert) +{ + int pos; + if (((pos = find_arg_with_value(ua, arg)) < 0) || + (strlen(ua->argv[pos]) > MAX_NAME_LENGTH)) + { + return NULL; + } + db_escape_string(ua->jcr, ua->db, esc, + ua->argv[pos], strlen(ua->argv[pos])); + if (convert) { + for (int i=0; esc[i] ; i++) { + if (esc[i] == '*') { + esc[i] = '%'; + } + } + } + return esc; +} + +/* The DB should be locked */ +static void bvfs_get_filter(UAContext *ua, POOL_MEM &where, char *limit, int len) +{ + POOL_MEM tmp; + char esc_name[MAX_ESCAPE_NAME_LENGTH]; + + if (get_argument(ua, "jobname", esc_name, true) != NULL) { + Mmsg(where, "AND Job.Job LIKE '%s' ", esc_name); + } + + if (get_argument(ua, "fileset", esc_name, true) != NULL) { + Mmsg(tmp, "AND FileSet.FileSet LIKE '%s' ", esc_name); + pm_strcat(where, tmp.c_str()); + } + + if (get_argument(ua, "jobid", esc_name, false) != NULL) { + Mmsg(tmp, "AND Job.JobId = '%s' ", esc_name); + pm_strcat(where, tmp.c_str()); + } + + if (get_argument(ua, "ujobid", esc_name, false) != NULL) { + Mmsg(tmp, "AND Job.Job = '%s' ", esc_name); + pm_strcat(where, tmp.c_str()); + } + + if (get_argument(ua, "start", esc_name, false) != NULL) { + Mmsg(tmp, "AND Job.StartTime >= '%s' ", esc_name); + pm_strcat(where, tmp.c_str()); + } + + if (get_argument(ua, "end", esc_name, false) != NULL) { + Mmsg(tmp, "AND Job.EndTime <= '%s' ", esc_name); + pm_strcat(where, tmp.c_str()); + } + + *limit = 0; + if (get_argument(ua, "limit", esc_name, false) != NULL) { + if (is_a_number(esc_name)) { + bsnprintf(limit, len, "LIMIT %s ", esc_name); + } + } +} + +/* .bvfs_get_jobs client=xxx [ujobid=yyyy] [jobname=] [fileset=] [start=] [end=] + * 1 yyyyy 1 Backup1_xxx_xxx_xxxx_xxx + * 2 yyyyy 0 Backup1_xxx_xxx_xxxx_xxx + */ +static bool dot_bvfs_get_jobs(UAContext *ua, const char *cmd) +{ + int pos; + POOL_MEM where; + char esc_cli[MAX_ESCAPE_NAME_LENGTH]; + char limit[MAX_ESCAPE_NAME_LENGTH]; + if (!open_new_client_db(ua)) { + return true; + } + + if (((pos = find_arg_with_value(ua, "client")) < 0) || + (strlen(ua->argv[pos]) > MAX_NAME_LENGTH)) + { + return true; + } + + /* TODO: Do checks on Jobs, FileSet, etc... */ + if (!acl_access_client_ok(ua, ua->argv[pos], JT_BACKUP_RESTORE)) { + return true; + } + + db_lock(ua->db); + db_escape_string(ua->jcr, ua->db, esc_cli, + ua->argv[pos], strlen(ua->argv[pos])); + + bvfs_get_filter(ua, where, limit, sizeof(limit)); + + Mmsg(ua->db->cmd, + "SELECT JobId, JobTDate, HasCache, Job " + "FROM Job JOIN Client USING (ClientId) JOIN FileSet USING (FileSetId) " + "WHERE Client.Name = '%s' AND Job.Type = 'B' AND Job.JobStatus IN ('T', 'W') " + "%s " + "ORDER By JobTDate DESC %s", + esc_cli, where.c_str(), limit); + + db_sql_query(ua->db, ua->db->cmd, jobs_handler, ua); + db_unlock(ua->db); + return true; +} + +static bool dot_quit_cmd(UAContext *ua, const char *cmd) +{ + quit_cmd(ua, cmd); + return true; +} + +static bool dot_help_cmd(UAContext *ua, const char *cmd) +{ + qhelp_cmd(ua, cmd); + return true; +} + +static bool getmsgscmd(UAContext *ua, const char *cmd) +{ + if (console_msg_pending) { + do_messages(ua, cmd); + } + return 1; +} + +#ifdef DEVELOPER +static void do_storage_cmd(UAContext *ua, STORE *store, const char *cmd) +{ + BSOCK *sd; + JCR *jcr = ua->jcr; + USTORE lstore; + + lstore.store = store; + pm_strcpy(lstore.store_source, _("unknown source")); + set_wstorage(jcr, &lstore); + /* Try connecting for up to 15 seconds */ + ua->send_msg(_("Connecting to Storage daemon %s at %s:%d\n"), + store->name(), store->address, store->SDport); + if (!connect_to_storage_daemon(jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Storage daemon.\n")); + return; + } + Dmsg0(120, _("Connected to storage daemon\n")); + sd = jcr->store_bsock; + sd->fsend("%s", cmd); + if (sd->recv() >= 0) { + ua->send_msg("%s", sd->msg); + } + sd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->store_bsock); + return; +} + +static void do_client_cmd(UAContext *ua, CLIENT *client, const char *cmd) +{ + BSOCK *fd; + POOL_MEM buf; + /* Connect to File daemon */ + + ua->jcr->client = client; + /* Try to connect for 15 seconds */ + ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + client->name(), client->address(buf.addr()), client->FDport); + if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Client.\n")); + return; + } + Dmsg0(120, "Connected to file daemon\n"); + fd = ua->jcr->file_bsock; + fd->fsend("%s", cmd); + if (fd->recv() >= 0) { + ua->send_msg("%s", fd->msg); + } + fd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + return; +} + +/* + * .die (seg fault) + * .dump (sm_dump) + * .exit (no arg => .quit) + */ +static bool admin_cmds(UAContext *ua, const char *cmd) +{ + pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + STORE *store=NULL; + CLIENT *client=NULL; + bool dir=false; + bool do_deadlock=false; + const char *remote_cmd; + int i; + JCR *jcr = NULL; + int a; + if (strncmp(ua->argk[0], ".die", 4) == 0) { + if (find_arg(ua, "deadlock") > 0) { + do_deadlock = true; + remote_cmd = ".die deadlock"; + } else { + remote_cmd = ".die"; + } + } else if (strncmp(ua->argk[0], ".dump", 5) == 0) { + remote_cmd = "sm_dump"; + } else if (strncmp(ua->argk[0], ".exit", 5) == 0) { + remote_cmd = "exit"; + } else { + ua->error_msg(_("Unknown command: %s\n"), ua->argk[0]); + return true; + } + /* General debug? */ + for (i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], "dir") == 0 || + strcasecmp(ua->argk[i], "director") == 0) { + dir = true; + } + if (strcasecmp(ua->argk[i], "client") == 0 || + strcasecmp(ua->argk[i], "fd") == 0) { + client = NULL; + if (ua->argv[i]) { + client = (CLIENT *)GetResWithName(R_CLIENT, ua->argv[i]); + } + if (!client) { + client = select_client_resource(ua, JT_SYSTEM); + } + } + + if (strcasecmp(ua->argk[i], NT_("store")) == 0 || + strcasecmp(ua->argk[i], NT_("storage")) == 0 || + strcasecmp(ua->argk[i], NT_("sd")) == 0) { + store = NULL; + if (ua->argv[i]) { + store = (STORE *)GetResWithName(R_STORAGE, ua->argv[i]); + } + if (!store) { + store = get_storage_resource(ua, false/*no default*/); + } + } + } + + if (!dir && !store && !client) { + /* + * We didn't find an appropriate keyword above, so + * prompt the user. + */ + start_prompt(ua, _("Available daemons are: \n")); + add_prompt(ua, _("Director")); + add_prompt(ua, _("Storage")); + add_prompt(ua, _("Client")); + switch(do_prompt(ua, "", _("Select daemon type to make die"), NULL, 0)) { + case 0: /* Director */ + dir=true; + break; + case 1: + store = get_storage_resource(ua, false/*no default*/); + break; + case 2: + client = select_client_resource(ua, JT_BACKUP_RESTORE); + break; + default: + break; + } + } + + if (store) { + do_storage_cmd(ua, store, remote_cmd); + } + + if (client) { + do_client_cmd(ua, client, remote_cmd); + } + + if (dir) { + if (strncmp(remote_cmd, ".die", 4) == 0) { + if (do_deadlock) { + ua->send_msg(_("The Director will generate a deadlock.\n")); + P(mutex); + P(mutex); + } + ua->send_msg(_("The Director will segment fault.\n")); + a = jcr->JobId; /* ref NULL pointer */ + jcr->JobId = 1000; /* another ref NULL pointer */ + jcr->JobId = a; + + } else if (strncmp(remote_cmd, ".dump", 5) == 0) { + sm_dump(false, true); + } else if (strncmp(remote_cmd, ".exit", 5) == 0) { + dot_quit_cmd(ua, cmd); + } + } + + return true; +} + +#else + +/* + * Dummy routine for non-development version + */ +static bool admin_cmds(UAContext *ua, const char *cmd) +{ + ua->error_msg(_("Unknown command: %s\n"), ua->argk[0]); + return true; +} + +#endif + +/* + * Send a file to the director from bconsole @putfile command + * The .putfile can not be used directly. + */ +static bool putfile_cmd(UAContext *ua, const char *cmd) +{ + int pos, i, pnl, fnl; + bool ok = true; + POOLMEM *name = get_pool_memory(PM_FNAME); + POOLMEM *path = get_pool_memory(PM_FNAME); + POOLMEM *fname= get_pool_memory(PM_FNAME); + const char *key = "putfile"; + FILE *fp = NULL; + + if ((pos = find_arg_with_value(ua, "key")) > 0) { + /* Check the string if the string is valid */ + for (i=0; ua->argv[pos][i] && isalnum(ua->argv[pos][i]) && i < 16; i++); + + if (ua->argv[pos][i] == 0) { + key = ua->argv[pos]; + + } else { + ua->error_msg("Invalid key name for putfile command"); + ok = false; + goto bail_out; + } + } + + /* the (intptr_t)ua will allow one file per console session */ + make_unique_filename(&name, (intptr_t)ua, (char *)key); + + fp = bfopen(name, "w"); + if (!fp) { + berrno be; + ua->error_msg("Unable to open destination file. ERR=%s\n", + be.bstrerror(errno)); + ok = false; + goto bail_out; + } + + while (ua->UA_sock->recv() > 0) { + if (fwrite(ua->UA_sock->msg, ua->UA_sock->msglen, 1, fp) != 1) { + berrno be; + ua->error_msg("Unable to write to the destination file. ERR=%s\n", + be.bstrerror(errno)); + ok = false; + /* TODO: Check if we need to quit here (data will still be in the + * buffer...) */ + } + } + + split_path_and_filename(name, &path, &pnl, &fname, &fnl); + +bail_out: + if (ok) { + ua->send_msg("OK\n"); + + } else { + ua->send_msg("ERROR\n"); + } + + free_pool_memory(name); + free_pool_memory(path); + free_pool_memory(fname); + if (fp) { + fclose(fp); + } + return true; +} + +/* .estimate command */ +static bool dotestimatecmd(UAContext *ua, const char *cmd) +{ + JOB *jres; + JOB_DBR jr; + //FILESET_DBR fr; + //CLIENT_DBR cr; + char *job = NULL, level = 0, *fileset = NULL, *client = NULL; + memset(&jr, 0, sizeof(jr)); + + for (int i = 1 ; i < ua->argc ; i++) { + if (!ua->argv[i]) { + ua->error_msg(_("Invalid argument for %s\n"), ua->argk[i]); + return true; + + } else if (strcasecmp(ua->argk[i], "job") == 0) { + job = ua->argv[i]; + + } else if (strcasecmp(ua->argk[i], "level") == 0) { + level = toupper(ua->argv[i][0]); + + } else if (strcasecmp(ua->argk[i], "fileset") == 0) { + fileset = ua->argv[i]; + + } else if (strcasecmp(ua->argk[i], "client") == 0) { + client = ua->argv[i]; + } + } + if (!job) { + ua->error_msg(_("Invalid argument for job\n")); + return true; + } + if (!acl_access_ok(ua, Job_ACL, job) || + (fileset && !acl_access_ok(ua, FileSet_ACL, fileset)) || + (client && !acl_access_client_ok(ua, client, JT_BACKUP))) + { + ua->error_msg(_("Access to specified Job, FileSet or Client not allowed.\n")); + return true; + } + jres = (JOB *) GetResWithName(R_JOB, job); + if (!jres) { + ua->error_msg(_("Invalid argument for job\n")); + return true; + } + if (!open_client_db(ua)) { + ua->error_msg(_("Unable to open the catalog.\n")); + return true; + } + + bstrncpy(jr.Name, jres->hdr.name, sizeof(jr.Name)); + jr.JobLevel = level ? level : jres->JobLevel; + if (fileset) { + /* Get FileSetId */ + } + if (client) { + /* Get ClientId */ + } + db_lock(ua->db); + if (db_get_job_statistics(ua->jcr, ua->db, &jr)) { + db_unlock(ua->db); + OutputWriter o(ua->api_opts); + char *p = o.get_output(OT_START_OBJ, + OT_JOBLEVEL, "level", jr.JobLevel, + OT_INT, "nbjob", jr.CorrNbJob, + OT_INT, "corrbytes", jr.CorrJobBytes, + OT_SIZE, "jobbytes", jr.JobBytes, + OT_INT, "corrfiles", jr.CorrJobFiles, + OT_INT32, "jobfiles", jr.JobFiles, + OT_INT, "duration", (int)0, + OT_STRING, "job", jres->hdr.name, + OT_END_OBJ, + OT_END); + ua->send_msg("%s", p); + } else { + /* We unlock the DB after the errmsg copy */ + pm_strcpy(ua->jcr->errmsg, ua->db->errmsg); + db_unlock(ua->db); + ua->error_msg("Error with .estimate %s\n", ua->jcr->errmsg); + } + return true; +} + + +/* + * Can use an argument to filter on JobType + * .jobs [type=B] or [type=!B] + */ +static bool jobscmd(UAContext *ua, const char *cmd) +{ + JOB *job; + uint32_t type = 0; + bool exclude=false; + int pos; + if ((pos = find_arg_with_value(ua, "type")) >= 0) { + if (ua->argv[pos][0] == '!') { + exclude = true; + type = ua->argv[pos][1]; + } else { + type = ua->argv[pos][0]; + } + } + LockRes(); + foreach_res(job, R_JOB) { + if (type) { + if ((exclude && type == job->JobType) || (!exclude && type != job->JobType)) { + continue; + } + } + if (acl_access_ok(ua, Job_ACL, job->name())) { + ua->send_msg("%s\n", job->name()); + } + } + UnlockRes(); + return true; +} + +static bool filesetscmd(UAContext *ua, const char *cmd) +{ + FILESET *fs; + LockRes(); + foreach_res(fs, R_FILESET) { + if (acl_access_ok(ua, FileSet_ACL, fs->name())) { + ua->send_msg("%s\n", fs->name()); + } + } + UnlockRes(); + return true; +} + +static bool catalogscmd(UAContext *ua, const char *cmd) +{ + CAT *cat; + LockRes(); + foreach_res(cat, R_CATALOG) { + if (acl_access_ok(ua, Catalog_ACL, cat->name())) { + ua->send_msg("%s\n", cat->name()); + } + } + UnlockRes(); + return true; +} + +/* This is not a good idea to lock the entire resource list to send information + * on the network or query the DNS. So, we don't use the foreach_res() command + * with a global lock and we do a copy of the client list in a specific list to + * avoid any problem, I'm pretty sure we can use the res_head directly without + * a global lock, but it needs testing to avoid race conditions. + */ +class TmpClient +{ +public: + char *name; + char *address; + + TmpClient(char *n, char *a): + name(bstrdup(n)), address(bstrdup(a)) + { + }; + ~TmpClient() { + free(name); + free(address); + }; +}; + +static bool clientscmd(UAContext *ua, const char *cmd) +{ + int i; + CLIENT *client; + const char *ip=NULL; + bool found=false; + alist *clientlist = NULL; + TmpClient *elt; + POOL_MEM buf; + + if ((i = find_arg_with_value(ua, "address")) >= 0) { + ip = ua->argv[i]; + clientlist = New(alist(50, not_owned_by_alist)); + } + + /* This is not a good idea to lock the entire resource list + * to send information on the network or query the DNS. So, + * we don't use the foreach_res() command with a global lock here. + */ + LockRes(); + foreach_res(client, R_CLIENT) { + if (acl_access_client_ok(ua, client->name(), JT_BACKUP_RESTORE)) { + if (ip) { + elt = new TmpClient(client->name(), client->address(buf.addr())); + clientlist->append(elt); + + } else { + /* do not check for a specific ip, display everything */ + ua->send_msg("%s\n", client->name()); + } + } + } + UnlockRes(); + + if (!ip) { + return true; + } + + foreach_alist(elt, clientlist) { + /* We look for a client that matches the specific ip address */ + dlist *addr_list=NULL; + IPADDR *ipaddr; + char buf[128]; + const char *errstr; + + if (strcmp(elt->address, ip) == 0) { + found = true; + + } else if ((addr_list = bnet_host2ipaddrs(elt->address, 0, &errstr)) == NULL) { + Dmsg2(10, "bnet_host2ipaddrs() for host %s failed: ERR=%s\n", + elt->address, errstr); + + } else { + /* Try to find the ip address from the list, we might have + * other ways to compare ip addresses + */ + foreach_dlist(ipaddr, addr_list) { + if (strcmp(ip, ipaddr->get_address(buf, sizeof(buf))) == 0) { + found = true; + break; + } + } + free_addresses(addr_list); + } + + if (found) { + ua->send_msg("%s\n", elt->name); + break; + } + } + /* Cleanup the temp list */ + foreach_alist(elt, clientlist) { + delete elt; + } + delete clientlist; + return true; +} + +static bool msgscmd(UAContext *ua, const char *cmd) +{ + MSGS *msgs = NULL; + LockRes(); + foreach_res(msgs, R_MSGS) { + ua->send_msg("%s\n", msgs->name()); + } + UnlockRes(); + return true; +} + +static bool poolscmd(UAContext *ua, const char *cmd) +{ + POOL *pool; + LockRes(); + foreach_res(pool, R_POOL) { + if (acl_access_ok(ua, Pool_ACL, pool->name())) { + ua->send_msg("%s\n", pool->name()); + } + } + UnlockRes(); + return true; +} + +static bool schedulescmd(UAContext *ua, const char *cmd) +{ + SCHED *sched; + LockRes(); + foreach_res(sched, R_SCHEDULE) { + if (acl_access_ok(ua, Schedule_ACL, sched->name())) { + ua->send_msg("%s\n", sched->name()); + } + } + UnlockRes(); + return true; +} + +static bool storagecmd(UAContext *ua, const char *cmd) +{ + STORE *store; + POOL_MEM tmp; + bool unique=false; + alist *already_in = NULL; + + /* .storage unique */ + if (find_arg(ua, "unique") > 0) { + unique=true; + already_in = New(alist(10, owned_by_alist)); + } + + LockRes(); + foreach_res(store, R_STORAGE) { + if (acl_access_ok(ua, Storage_ACL, store->name())) { + char *elt; + bool display=true; + + if (unique) { + Mmsg(tmp, "%s:%d", store->address, store->SDport); + foreach_alist(elt, already_in) { /* TODO: See if we need a hash or an ordered list here */ + if (strcmp(tmp.c_str(), elt) == 0) { + display = false; + break; + } + } + if (display) { + already_in->append(bstrdup(tmp.c_str())); + } + } + if (display) { + ua->send_msg("%s\n", store->name()); + } + } + } + UnlockRes(); + if (already_in) { + delete already_in; + } + return true; +} + +static bool aopcmd(UAContext *ua, const char *cmd) +{ + ua->send_msg("None\n"); + ua->send_msg("Truncate\n"); + return true; +} + +static bool typescmd(UAContext *ua, const char *cmd) +{ + ua->send_msg("Backup\n"); + ua->send_msg("Restore\n"); + ua->send_msg("Admin\n"); + ua->send_msg("Verify\n"); + ua->send_msg("Migrate\n"); + ua->send_msg("Copy\n"); + return true; +} + +static bool tagscmd(UAContext *ua, const char *cmd) +{ + uint32_t i = 0; + for (const char *p = debug_get_tag(i++, NULL) ; p ; p = debug_get_tag(i++, NULL)) { + ua->send_msg("%s\n", p); + } + return true; +} + +/* + * If this command is called, it tells the director that we + * are a program that wants a sort of API, and hence, + * we will probably suppress certain output, include more + * error codes, and most of all send back a good number + * of new signals that indicate whether or not the command + * succeeded. + */ +static bool api_cmd(UAContext *ua, const char *cmd) +{ + int i; + if (ua->argc >= 2) { + ua->api = atoi(ua->argk[1]); + + /* Get output configuration options such as time format or separator */ + if ((i = find_arg_with_value(ua, "api_opts")) > 0) { + bstrncpy(ua->api_opts, ua->argv[i], sizeof(ua->api_opts)); + + } else { + *ua->api_opts = 0; + } + } else { + ua->api = 1; + } + return true; +} + +static int client_backups_handler(void *ctx, int num_field, char **row) +{ + UAContext *ua = (UAContext *)ctx; + ua->send_msg("| %s | %s | %s | %s | %s | %s | %s | %s |\n", + row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]); + return 0; +} + +/* + * Return the backups for this client + * + * .backups client=xxx fileset=yyy + * + */ +static bool backupscmd(UAContext *ua, const char *cmd) +{ + if (!open_client_db(ua)) { + return true; + } + if (ua->argc != 3 || strcmp(ua->argk[1], "client") != 0 || + strcmp(ua->argk[2], "fileset") != 0) { + return true; + } + if (!acl_access_client_ok(ua, ua->argv[1], JT_BACKUP_RESTORE) || + !acl_access_ok(ua, FileSet_ACL, ua->argv[2])) { + ua->error_msg(_("Access to specified Client or FileSet not allowed.\n")); + return true; + } + Mmsg(ua->cmd, client_backups, ua->argv[1], ua->argv[2]); + if (!db_sql_query(ua->db, ua->cmd, client_backups_handler, (void *)ua)) { + ua->error_msg(_("Query failed: %s. ERR=%s\n"), ua->cmd, db_strerror(ua->db)); + return true; + } + return true; +} + +static int sql_handler(void *ctx, int num_field, char **row) +{ + UAContext *ua = (UAContext *)ctx; + POOL_MEM rows(PM_MESSAGE); + + /* Check for nonsense */ + if (num_field == 0 || row == NULL || row[0] == NULL) { + return 0; /* nothing returned */ + } + for (int i=0; num_field--; i++) { + if (i == 0) { + pm_strcpy(rows, NPRT(row[0])); + } else { + pm_strcat(rows, NPRT(row[i])); + } + pm_strcat(rows, "\t"); + } + if (!rows.c_str() || !*rows.c_str()) { + ua->send_msg("\t"); + } else { + ua->send_msg("%s", rows.c_str()); + } + return 0; +} + +static bool sql_cmd(UAContext *ua, const char *cmd) +{ + int index; + if (!open_new_client_db(ua)) { + return true; + } + index = find_arg_with_value(ua, "query"); + if (index < 0) { + ua->error_msg(_("query keyword not found.\n")); + return true; + } + if (!db_sql_query(ua->db, ua->argv[index], sql_handler, (void *)ua)) { + Dmsg1(100, "Query failed: ERR=%s\n", db_strerror(ua->db)); + ua->error_msg(_("Query failed: %s. ERR=%s\n"), ua->cmd, db_strerror(ua->db)); + return true; + } + return true; +} + +static int one_handler(void *ctx, int num_field, char **row) +{ + UAContext *ua = (UAContext *)ctx; + ua->send_msg("%s\n", row[0]); + return 0; +} + +static bool mediatypescmd(UAContext *ua, const char *cmd) +{ + if (!open_client_db(ua)) { + return true; + } + if (!db_sql_query(ua->db, + "SELECT DISTINCT MediaType FROM MediaType ORDER BY MediaType", + one_handler, (void *)ua)) + { + ua->error_msg(_("List MediaType failed: ERR=%s\n"), db_strerror(ua->db)); + } + return true; +} + +static bool mediacmd(UAContext *ua, const char *cmd) +{ + if (!open_client_db(ua)) { + return true; + } + if (!db_sql_query(ua->db, + "SELECT DISTINCT Media.VolumeName FROM Media ORDER BY VolumeName", + one_handler, (void *)ua)) + { + ua->error_msg(_("List Media failed: ERR=%s\n"), db_strerror(ua->db)); + } + return true; +} + +static bool locationscmd(UAContext *ua, const char *cmd) +{ + if (!open_client_db(ua)) { + return true; + } + if (!db_sql_query(ua->db, + "SELECT DISTINCT Location FROM Location ORDER BY Location", + one_handler, (void *)ua)) + { + ua->error_msg(_("List Location failed: ERR=%s\n"), db_strerror(ua->db)); + } + return true; +} + +static bool levelscmd(UAContext *ua, const char *cmd) +{ + int i; + /* Note some levels are blank, which means none is needed */ + if (ua->argc == 1) { + for (i=0; joblevels[i].level_name; i++) { + if (joblevels[i].level_name[0] != ' ') { + ua->send_msg("%s\n", joblevels[i].level_name); + } + } + } else if (ua->argc == 2) { + int jobtype = 0; + /* Assume that first argument is the Job Type */ + for (i=0; jobtypes[i].type_name; i++) { + if (strcasecmp(ua->argk[1], jobtypes[i].type_name) == 0) { + jobtype = jobtypes[i].job_type; + break; + } + } + for (i=0; joblevels[i].level_name; i++) { + if ((joblevels[i].job_type == jobtype) && (joblevels[i].level_name[0] != ' ')) { + ua->send_msg("%s\n", joblevels[i].level_name); + } + } + } + + return true; +} + +static bool volstatuscmd(UAContext *ua, const char *cmd) +{ + ua->send_msg("Append\n"); + ua->send_msg("Full\n"); + ua->send_msg("Used\n"); + ua->send_msg("Recycle\n"); + ua->send_msg("Purged\n"); + ua->send_msg("Cleaning\n"); + ua->send_msg("Error\n"); + return true; +} + +/* + * Return default values for a job + */ +static bool defaultscmd(UAContext *ua, const char *cmd) +{ + char ed1[50]; + if (ua->argc != 2 || !ua->argv[1]) { + return true; + } + + /* Send Job defaults */ + if (strcmp(ua->argk[1], "job") == 0) { + if (!acl_access_ok(ua, Job_ACL, ua->argv[1])) { + return true; + } + JOB *job = (JOB *)GetResWithName(R_JOB, ua->argv[1]); + if (job) { + USTORE store; + char edl[50]; + ua->send_msg("job=%s", job->name()); + ua->send_msg("pool=%s", job->pool->name()); + ua->send_msg("messages=%s", job->messages->name()); + ua->send_msg("client=%s", job->client?job->client->name():_("*None*")); + get_job_storage(&store, job, NULL); + ua->send_msg("storage=%s", store.store->name()); + ua->send_msg("where=%s", job->RestoreWhere?job->RestoreWhere:""); + ua->send_msg("level=%s", level_to_str(edl, sizeof(edl), job->JobLevel)); + ua->send_msg("type=%s", job_type_to_str(job->JobType)); + ua->send_msg("fileset=%s", job->fileset->name()); + ua->send_msg("enabled=%d", job->is_enabled()); + ua->send_msg("catalog=%s", job->client?job->client->catalog->name():_("*None*")); + ua->send_msg("priority=%d", job->Priority); + } + } + /* Send Pool defaults */ + else if (strcmp(ua->argk[1], "pool") == 0) { + if (!acl_access_ok(ua, Pool_ACL, ua->argv[1])) { + return true; + } + POOL *pool = (POOL *)GetResWithName(R_POOL, ua->argv[1]); + if (pool) { + ua->send_msg("pool=%s", pool->name()); + ua->send_msg("pool_type=%s", pool->pool_type); + ua->send_msg("label_format=%s", pool->label_format?pool->label_format:""); + ua->send_msg("use_volume_once=%d", pool->use_volume_once); + ua->send_msg("purge_oldest_volume=%d", pool->purge_oldest_volume); + ua->send_msg("recycle_oldest_volume=%d", pool->recycle_oldest_volume); + ua->send_msg("recycle_current_volume=%d", pool->recycle_current_volume); + ua->send_msg("max_volumes=%d", pool->max_volumes); + ua->send_msg("vol_retention=%s", edit_uint64(pool->VolRetention, ed1)); + ua->send_msg("vol_use_duration=%s", edit_uint64(pool->VolUseDuration, ed1)); + ua->send_msg("max_vol_jobs=%d", pool->MaxVolJobs); + ua->send_msg("max_vol_files=%d", pool->MaxVolFiles); + ua->send_msg("max_vol_bytes=%s", edit_uint64(pool->MaxVolBytes, ed1)); + ua->send_msg("auto_prune=%d", pool->AutoPrune); + ua->send_msg("recycle=%d", pool->Recycle); + ua->send_msg("file_retention=%s", edit_uint64(pool->FileRetention, ed1)); + ua->send_msg("job_retention=%s", edit_uint64(pool->JobRetention, ed1)); + } + } + /* Send Storage defaults */ + else if (strcmp(ua->argk[1], "storage") == 0) { + if (!acl_access_ok(ua, Storage_ACL, ua->argv[1])) { + return true; + } + STORE *storage = (STORE *)GetResWithName(R_STORAGE, ua->argv[1]); + DEVICE *device; + if (storage) { + ua->send_msg("storage=%s", storage->name()); + ua->send_msg("address=%s", storage->address); + ua->send_msg("enabled=%d", storage->is_enabled()); + ua->send_msg("media_type=%s", storage->media_type); + ua->send_msg("sdport=%d", storage->SDport); + device = (DEVICE *)storage->device->first(); + ua->send_msg("device=%s", device->name()); + if (storage->device && storage->device->size() > 1) { + while ((device = (DEVICE *)storage->device->next())) { + ua->send_msg(",%s", device->name()); + } + } + } + } + /* Send Client defaults */ + else if (strcmp(ua->argk[1], "client") == 0) { + if (!acl_access_client_ok(ua, ua->argv[1], JT_BACKUP_RESTORE)) { + return true; + } + CLIENT *client = (CLIENT *)GetResWithName(R_CLIENT, ua->argv[1]); + if (client) { + POOL_MEM buf; + ua->send_msg("client=%s", client->name()); + ua->send_msg("address=%s", client->address(buf.addr())); + ua->send_msg("fdport=%d", client->FDport); + ua->send_msg("file_retention=%s", edit_uint64(client->FileRetention, ed1)); + ua->send_msg("job_retention=%s", edit_uint64(client->JobRetention, ed1)); + ua->send_msg("autoprune=%d", client->AutoPrune); + ua->send_msg("catalog=%s", client->catalog->name()); + } + } + return true; +} diff --git a/src/dird/ua_input.c b/src/dird/ua_input.c new file mode 100644 index 00000000..a9197231 --- /dev/null +++ b/src/dird/ua_input.c @@ -0,0 +1,251 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- User Agent Input and scanning code + * + * Kern Sibbald, October MMI + * + */ + +#include "bacula.h" +#include "dird.h" + + +/* Imported variables */ + + +/* Exported functions */ + +/* + * If subprompt is set, we send a BNET_SUB_PROMPT signal otherwise + * send a BNET_TEXT_INPUT signal. + */ +bool get_cmd(UAContext *ua, const char *prompt, bool subprompt) +{ + BSOCK *sock = ua->UA_sock; + int stat; + + ua->cmd[0] = 0; + if (!sock || ua->batch) { /* No UA or batch mode */ + return false; + } + if (!subprompt && ua->api) { + sock->signal(BNET_TEXT_INPUT); + } + sock->fsend("%s", prompt); + if (!ua->api || subprompt) { + sock->signal(BNET_SUB_PROMPT); + } + for ( ;; ) { + stat = sock->recv(); + if (stat == BNET_SIGNAL) { + continue; /* ignore signals */ + } + if (sock->is_stop()) { + return false; /* error or terminate */ + } + pm_strcpy(ua->cmd, sock->msg); + strip_trailing_junk(ua->cmd); + if (strcmp(ua->cmd, ".messages") == 0) { + qmessagescmd(ua, ua->cmd); + } + /* Lone dot => break */ + if (ua->cmd[0] == '.' && ua->cmd[1] == 0) { + return false; + } + break; + } + return true; +} + +/* + * Get a selection list + * We get a command from the user, scan it, then + * return when OK + * Returns true if OK + * false if error + */ +bool get_selection_list(UAContext *ua, sellist &sl, + const char *prompt, bool subprompt) +{ + for ( ;; ) { + if (!get_cmd(ua, prompt, subprompt)) { + return false; + } + if (!sl.set_string(ua->cmd, true)) { + ua->send_msg("%s", sl.get_errmsg()); + continue; + } + return true; + } +} + +/* + * Get a positive integer + * Returns: false if failure + * true if success => value in ua->pint32_val + */ +bool get_pint(UAContext *ua, const char *prompt) +{ + double dval; + ua->pint32_val = 0; + ua->int64_val = 0; + for (;;) { + ua->cmd[0] = 0; + if (!get_cmd(ua, prompt)) { + return false; + } + /* Kludge for slots blank line => 0 */ + if (ua->cmd[0] == 0 && strncmp(prompt, _("Enter slot"), strlen(_("Enter slot"))) == 0) { + return true; + } + if (!is_a_number(ua->cmd)) { + ua->warning_msg(_("Expected a positive integer, got: %s\n"), ua->cmd); + continue; + } + errno = 0; + dval = strtod(ua->cmd, NULL); + if (errno != 0 || dval < 0) { + ua->warning_msg(_("Expected a positive integer, got: %s\n"), ua->cmd); + continue; + } + ua->pint32_val = (uint32_t)dval; + ua->int64_val = (int64_t)dval; + return true; + } +} + +/* + * Test a yes or no response + * Returns: false if failure + * true if success => ret == 1 for yes + * ret == 0 for no + */ +bool is_yesno(char *val, int *ret) +{ + *ret = 0; + if ((strcasecmp(val, _("yes")) == 0) || + (strcasecmp(val, NT_("yes")) == 0)) + { + *ret = 1; + } else if ((strcasecmp(val, _("no")) == 0) || + (strcasecmp(val, NT_("no")) == 0)) + { + *ret = 0; + } else { + return false; + } + + return true; +} + +/* + * Gets a yes or no response + * Returns: false if failure + * true if success => ua->pint32_val == 1 for yes + * ua->pint32_val == 0 for no + */ +bool get_yesno(UAContext *ua, const char *prompt) +{ + int len; + int ret; + ua->pint32_val = 0; + for (;;) { + if (ua->api) ua->UA_sock->signal(BNET_YESNO); + if (!get_cmd(ua, prompt)) { + return false; + } + len = strlen(ua->cmd); + if (len < 1 || len > 3) { + continue; + } + if (is_yesno(ua->cmd, &ret)) { + ua->pint32_val = ret; + return true; + } + ua->warning_msg(_("Invalid response. You must answer yes or no.\n")); + } +} + +/* + * Gets an Enabled value => 0, 1, 2, yes, no, archived + * Returns: 0, 1, 2 if OK + * -1 on error + */ +int get_enabled(UAContext *ua, const char *val) +{ + int Enabled = -1; + + if (strcasecmp(val, "yes") == 0 || strcasecmp(val, "true") == 0) { + Enabled = 1; + } else if (strcasecmp(val, "no") == 0 || strcasecmp(val, "false") == 0) { + Enabled = 0; + } else if (strcasecmp(val, "archived") == 0) { + Enabled = 2; + } else { + Enabled = atoi(val); + } + if (Enabled < 0 || Enabled > 2) { + ua->error_msg(_("Invalid Enabled value, it must be yes, no, archived, 0, 1, or 2\n")); + return -1; + } + return Enabled; +} + +void parse_ua_args(UAContext *ua) +{ + parse_args(ua->cmd, &ua->args, &ua->argc, ua->argk, ua->argv, MAX_CMD_ARGS); +} + +/* + * Check if the comment has legal characters + * If ua is non-NULL send the message + */ +bool is_comment_legal(UAContext *ua, const char *name) +{ + int len; + const char *p; + const char *forbid = "'<>&\\\""; + + /* Restrict the characters permitted in the comment */ + for (p=name; *p; p++) { + if (!strchr(forbid, (int)(*p))) { + continue; + } + if (ua) { + ua->error_msg(_("Illegal character \"%c\" in a comment.\n"), *p); + } + return 0; + } + len = strlen(name); + if (len >= MAX_NAME_LENGTH) { + if (ua) { + ua->error_msg(_("Comment too long.\n")); + } + return 0; + } + if (len == 0) { + if (ua) { + ua->error_msg(_("Comment must be at least one character long.\n")); + } + return 0; + } + return 1; +} diff --git a/src/dird/ua_label.c b/src/dird/ua_label.c new file mode 100644 index 00000000..9fec5ed1 --- /dev/null +++ b/src/dird/ua_label.c @@ -0,0 +1,1288 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- Tape labeling commands + * + * Kern Sibbald, April MMIII + */ + +#include "bacula.h" +#include "dird.h" + +/* Slot list definition */ +typedef struct s_vol_list { + struct s_vol_list *next; + char *VolName; + int Slot; +} vol_list_t; + + +/* Forward referenced functions */ +static int do_label(UAContext *ua, const char *cmd, int relabel); +static void label_from_barcodes(UAContext *ua, int drive); +static bool send_label_request(UAContext *ua, MEDIA_DBR *mr, MEDIA_DBR *omr, + POOL_DBR *pr, int relabel, bool media_record_exits, int drive); +static vol_list_t *get_vol_list_from_SD(UAContext *ua, bool scan); +static void free_vol_list(vol_list_t *vol_list); +static bool is_cleaning_tape(UAContext *ua, MEDIA_DBR *mr, POOL_DBR *pr); +BSOCK *open_sd_bsock(UAContext *ua); +void close_sd_bsock(UAContext *ua); +static char *get_volume_name_from_SD(UAContext *ua, int Slot, int drive); +static int get_num_slots_from_SD(UAContext *ua); + + +/* + * Label a tape + * + * label storage=xxx volume=vvv + */ +int label_cmd(UAContext *ua, const char *cmd) +{ + return do_label(ua, cmd, 0); /* standard label */ +} + +int relabel_cmd(UAContext *ua, const char *cmd) +{ + return do_label(ua, cmd, 1); /* relabel tape */ +} + +static bool get_user_slot_list(UAContext *ua, char *slot_list, int num_slots) +{ + int i; + const char *msg; + + /* slots are numbered 1 to num_slots */ + for (int i=0; i <= num_slots; i++) { + slot_list[i] = 0; + } + i = find_arg_with_value(ua, "slots"); + if (i == -1) { /* not found */ + i = find_arg_with_value(ua, "slot"); + } + if (i > 0) { + /* scan slot list in ua->argv[i] */ + char *p, *e, *h; + int beg, end; + + strip_trailing_junk(ua->argv[i]); + for (p=ua->argv[i]; p && *p; p=e) { + /* Check for list */ + e = strchr(p, ','); + if (e) { + *e++ = 0; + } + /* Check for range */ + h = strchr(p, '-'); /* range? */ + if (h == p) { + msg = _("Negative numbers not permitted\n"); + goto bail_out; + } + if (h) { + *h++ = 0; + if (!is_an_integer(h)) { + msg = _("Range end is not integer.\n"); + goto bail_out; + } + skip_spaces(&p); + if (!is_an_integer(p)) { + msg = _("Range start is not an integer.\n"); + goto bail_out; + } + beg = atoi(p); + end = atoi(h); + if (end < beg) { + msg = _("Range end not bigger than start.\n"); + goto bail_out; + } + } else { + skip_spaces(&p); + if (!is_an_integer(p)) { + msg = _("Input value is not an integer.\n"); + goto bail_out; + } + beg = end = atoi(p); + } + if (beg <= 0 || end <= 0) { + msg = _("Values must be be greater than zero.\n"); + goto bail_out; + } + if (end > num_slots) { + msg = _("Slot too large.\n"); + goto bail_out; + } + for (i=beg; i<=end; i++) { + slot_list[i] = 1; /* Turn on specified range */ + } + } + } else { + /* Turn everything on */ + for (i=1; i <= num_slots; i++) { + slot_list[i] = 1; + } + } + if (debug_level >= 100) { + Dmsg0(100, "Slots turned on:\n"); + for (i=1; i <= num_slots; i++) { + if (slot_list[i]) { + Dmsg1(100, "%d\n", i); + } + } + } + return true; + +bail_out: + Dmsg1(100, "Problem with user selection ERR=%s\n", msg); + return false; +} + +/* + * Update Slots corresponding to Volumes in autochanger + */ +void update_slots(UAContext *ua) +{ + USTORE store; + vol_list_t *vl, *vol_list = NULL; + MEDIA_DBR mr; + char *slot_list; + bool scan; + int max_slots; + int drive; + int Enabled = 1; + bool have_enabled; + int i; + + + if (!open_client_db(ua)) { + return; + } + store.store = get_storage_resource(ua, true/*arg is storage*/); + if (!store.store) { + return; + } + pm_strcpy(store.store_source, _("Command input")); + set_wstorage(ua->jcr, &store); + drive = get_storage_drive(ua, store.store); + + scan = find_arg(ua, NT_("scan")) >= 0; + if ((i=find_arg_with_value(ua, NT_("Enabled"))) >= 0) { + Enabled = get_enabled(ua, ua->argv[i]); + if (Enabled < 0) { + return; + } + have_enabled = true; + } else { + have_enabled = false; + } + + max_slots = get_num_slots_from_SD(ua); + Dmsg1(100, "max_slots=%d\n", max_slots); + if (max_slots <= 0) { + ua->warning_msg(_("No slots in changer to scan.\n")); + return; + } + slot_list = (char *)malloc(max_slots+1); + if (!get_user_slot_list(ua, slot_list, max_slots)) { + free(slot_list); + return; + } + + vol_list = get_vol_list_from_SD(ua, scan); + + if (!vol_list) { + ua->warning_msg(_("No Volumes found to label, or no barcodes.\n")); + goto bail_out; + } + + /* First zap out any InChanger with StorageId=0 */ + db_sql_query(ua->db, "UPDATE Media SET InChanger=0 WHERE StorageId=0", NULL, NULL); + + /* Walk through the list updating the media records */ + for (vl=vol_list; vl; vl=vl->next) { + if (vl->Slot > max_slots) { + ua->warning_msg(_("Slot %d greater than max %d ignored.\n"), + vl->Slot, max_slots); + continue; + } + /* Check if user wants us to look at this slot */ + if (!slot_list[vl->Slot]) { + Dmsg1(100, "Skipping slot=%d\n", vl->Slot); + continue; + } + /* If scanning, we read the label rather than the barcode */ + if (scan) { + if (vl->VolName) { + free(vl->VolName); + vl->VolName = NULL; + } + vl->VolName = get_volume_name_from_SD(ua, vl->Slot, drive); + Dmsg2(100, "Got Vol=%s from SD for Slot=%d\n", vl->VolName, vl->Slot); + } + slot_list[vl->Slot] = 0; /* clear Slot */ + mr.Slot = vl->Slot; + mr.InChanger = 1; + mr.MediaId = 0; /* Force using VolumeName */ + if (vl->VolName) { + bstrncpy(mr.VolumeName, vl->VolName, sizeof(mr.VolumeName)); + } else { + mr.VolumeName[0] = 0; + } + set_storageid_in_mr(store.store, &mr); + Dmsg4(100, "Before make unique: Vol=%s slot=%d inchanger=%d sid=%d\n", + mr.VolumeName, mr.Slot, mr.InChanger, mr.StorageId); + db_lock(ua->db); + /* Set InChanger to zero for this Slot */ + db_make_inchanger_unique(ua->jcr, ua->db, &mr); + db_unlock(ua->db); + Dmsg4(100, "After make unique: Vol=%s slot=%d inchanger=%d sid=%d\n", + mr.VolumeName, mr.Slot, mr.InChanger, mr.StorageId); + if (!vl->VolName) { + Dmsg1(100, "No VolName for Slot=%d setting InChanger to zero.\n", vl->Slot); + ua->info_msg(_("No VolName for Slot=%d InChanger set to zero.\n"), vl->Slot); + continue; + } + db_lock(ua->db); + Dmsg4(100, "Before get MR: Vol=%s slot=%d inchanger=%d sid=%d\n", + mr.VolumeName, mr.Slot, mr.InChanger, mr.StorageId); + if (db_get_media_record(ua->jcr, ua->db, &mr)) { + Dmsg4(100, "After get MR: Vol=%s slot=%d inchanger=%d sid=%d\n", + mr.VolumeName, mr.Slot, mr.InChanger, mr.StorageId); + /* If Slot, Inchanger, and StorageId have changed, update the Media record */ + if (mr.Slot != vl->Slot || !mr.InChanger || mr.StorageId != store.store->StorageId) { + mr.Slot = vl->Slot; + mr.InChanger = 1; + if (have_enabled) { + mr.Enabled = Enabled; + } + set_storageid_in_mr(store.store, &mr); + if (!db_update_media_record(ua->jcr, ua->db, &mr)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_( + "Catalog record for Volume \"%s\" updated to reference slot %d.\n"), + mr.VolumeName, mr.Slot); + } + } else { + ua->info_msg(_("Catalog record for Volume \"%s\" is up to date.\n"), + mr.VolumeName); + } + db_unlock(ua->db); + continue; + } else { + ua->warning_msg(_("Volume \"%s\" not found in catalog. Slot=%d InChanger set to zero.\n"), + mr.VolumeName, vl->Slot); + } + db_unlock(ua->db); + } + mr.clear(); + mr.InChanger = 1; + set_storageid_in_mr(store.store, &mr); + db_lock(ua->db); + for (int i=1; i <= max_slots; i++) { + if (slot_list[i]) { + mr.Slot = i; + /* Set InChanger to zero for this Slot */ + db_make_inchanger_unique(ua->jcr, ua->db, &mr); + } + } + db_unlock(ua->db); + +bail_out: + if (vol_list) { + free_vol_list(vol_list); + } + free(slot_list); + close_sd_bsock(ua); + + return; +} + + +/* + * Common routine for both label and relabel + */ +static int do_label(UAContext *ua, const char *cmd, int relabel) +{ + USTORE store; + BSOCK *sd; + char dev_name[MAX_NAME_LENGTH]; + MEDIA_DBR mr, omr; + POOL_DBR pr; + bool print_reminder = true; + bool label_barcodes = false; + bool ok = false; + int i, j; + int drive; + bool media_record_exists = false; + static const char *barcode_keyword[] = { + "barcode", + "barcodes", + NULL}; + + + memset(&pr, 0, sizeof(pr)); + if (!open_client_db(ua)) { + return 1; + } + + /* Look for one of the barcode keywords */ + if (!relabel && (i=find_arg_keyword(ua, barcode_keyword)) >= 0) { + /* Now find the keyword in the list */ + if ((j = find_arg(ua, barcode_keyword[i])) > 0) { + *ua->argk[j] = 0; /* zap barcode keyword */ + } + label_barcodes = true; + } + + store.store = get_storage_resource(ua, true/*use default*/); + if (!store.store) { + return 1; + } + pm_strcpy(store.store_source, _("Command input")); + set_wstorage(ua->jcr, &store); + drive = get_storage_drive(ua, store.store); + + if (label_barcodes) { + label_from_barcodes(ua, drive); + return 1; + } + + /* If relabel get name of Volume to relabel */ + if (relabel) { + /* Check for oldvolume=name */ + i = find_arg_with_value(ua, "oldvolume"); + if (i >= 0) { + bstrncpy(omr.VolumeName, ua->argv[i], sizeof(omr.VolumeName)); + omr.MediaId = 0; + if (db_get_media_record(ua->jcr, ua->db, &omr)) { + goto checkVol; + } + ua->error_msg("%s", db_strerror(ua->db)); + } + /* No keyword or Vol not found, ask user to select */ + if (!select_media_dbr(ua, &omr)) { + return 1; + } + + /* Require Volume to be Purged or Recycled */ +checkVol: + if (strcmp(omr.VolStatus, "Purged") != 0 && strcmp(omr.VolStatus, "Recycle") != 0) { + ua->error_msg(_("Volume \"%s\" has VolStatus %s. It must be Purged or Recycled before relabeling.\n"), + omr.VolumeName, omr.VolStatus); + return 1; + } + } + + /* Check for volume=NewVolume */ + i = find_arg_with_value(ua, "volume"); + if (i >= 0) { + pm_strcpy(ua->cmd, ua->argv[i]); + goto checkName; + } + + /* Get a new Volume name */ + for ( ;; ) { + media_record_exists = false; + if (!get_cmd(ua, _("Enter new Volume name: "))) { + return 1; + } +checkName: + if (!is_volume_name_legal(ua, ua->cmd)) { + continue; + } + + bstrncpy(mr.VolumeName, ua->cmd, sizeof(mr.VolumeName)); + mr.MediaId = 0; + /* If VolBytes are zero the Volume is not labeled */ + if (db_get_media_record(ua->jcr, ua->db, &mr)) { + if (mr.VolBytes != 0) { + ua->error_msg(_("Media record for new Volume \"%s\" already exists.\n"), + mr.VolumeName); + continue; + } + media_record_exists = true; + } + break; /* Got it */ + } + + /* If autochanger, request slot */ + i = find_arg_with_value(ua, "slot"); + if (i >= 0) { + mr.Slot = atoi(ua->argv[i]); + if (mr.Slot < 0) { + mr.Slot = 0; + } + mr.InChanger = mr.Slot > 0; /* if slot give assume in changer */ + } else if (store.store->autochanger) { + if (!get_pint(ua, _("Enter slot (0 or Enter for none): "))) { + return 1; + } + mr.Slot = ua->pint32_val; + if (mr.Slot < 0) { + mr.Slot = 0; + } + mr.InChanger = mr.Slot > 0; /* if slot give assume in changer */ + } + set_storageid_in_mr(store.store, &mr); + + bstrncpy(mr.MediaType, store.store->media_type, sizeof(mr.MediaType)); + + /* Must select Pool if not already done */ + if (pr.PoolId == 0) { + memset(&pr, 0, sizeof(pr)); + if (!select_pool_dbr(ua, &pr)) { + return 1; + } + } + + ok = send_label_request(ua, &mr, &omr, &pr, relabel, media_record_exists, drive); + + if (ok) { + sd = ua->jcr->store_bsock; + if (relabel) { + /* Delete the old media record */ + if (!db_delete_media_record(ua->jcr, ua->db, &omr)) { + ua->error_msg(_("Delete of Volume \"%s\" failed. ERR=%s"), + omr.VolumeName, db_strerror(ua->db)); + } else { + ua->info_msg(_("Old volume \"%s\" deleted from catalog.\n"), + omr.VolumeName); + /* Update the number of Volumes in the pool */ + pr.NumVols--; + if (!db_update_pool_record(ua->jcr, ua->db, &pr)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + } + } + if (ua->automount) { + bstrncpy(dev_name, store.store->dev_name(), sizeof(dev_name)); + ua->info_msg(_("Requesting to mount %s ...\n"), dev_name); + bash_spaces(dev_name); + sd->fsend("mount %s drive=%d slot=%d", dev_name, drive, mr.Slot); + unbash_spaces(dev_name); + while (bget_dirmsg(sd) >= 0) { + ua->send_msg("%s", sd->msg); + /* Here we can get + * 3001 OK mount. Device=xxx or + * 3001 Mounted Volume vvvv + * 3002 Device "DVD-Writer" (/dev/hdc) is mounted. + * 3906 is cannot mount non-tape + * So for those, no need to print a reminder + */ + if (strncmp(sd->msg, "3001 ", 5) == 0 || + strncmp(sd->msg, "3002 ", 5) == 0 || + strncmp(sd->msg, "3906 ", 5) == 0) { + print_reminder = false; + } + } + } + } + if (print_reminder) { + ua->info_msg(_("Do not forget to mount the drive!!!\n")); + } + close_sd_bsock(ua); + + return 1; +} + +/* + * Request SD to send us the slot:barcodes, then wiffle + * through them all labeling them. + */ +static void label_from_barcodes(UAContext *ua, int drive) +{ + STORE *store = ua->jcr->wstore; + POOL_DBR pr; + MEDIA_DBR mr, omr; + vol_list_t *vl, *vol_list = NULL; + bool media_record_exists; + char *slot_list; + int max_slots; + + + max_slots = get_num_slots_from_SD(ua); + if (max_slots <= 0) { + ua->warning_msg(_("No slots in changer to scan.\n")); + return; + } + slot_list = (char *)malloc(max_slots+1); + if (!get_user_slot_list(ua, slot_list, max_slots)) { + goto bail_out; + } + + vol_list = get_vol_list_from_SD(ua, false /*no scan*/); + + if (!vol_list) { + ua->warning_msg(_("No Volumes found to label, or no barcodes.\n")); + goto bail_out; + } + + /* Display list of Volumes and ask if he really wants to proceed */ + ua->send_msg(_("The following Volumes will be labeled:\n" + "Slot Volume\n" + "==============\n")); + for (vl=vol_list; vl; vl=vl->next) { + if (!vl->VolName || !slot_list[vl->Slot]) { + continue; + } + ua->send_msg("%4d %s\n", vl->Slot, vl->VolName); + } + if (!get_yesno(ua, _("Do you want to label these Volumes? (yes|no): ")) || + (ua->pint32_val == 0)) { + goto bail_out; + } + /* Select a pool */ + memset(&pr, 0, sizeof(pr)); + if (!select_pool_dbr(ua, &pr)) { + goto bail_out; + } + + /* Fire off the label requests */ + for (vl=vol_list; vl; vl=vl->next) { + if (!vl->VolName || !slot_list[vl->Slot]) { + continue; + } + mr.clear(); + bstrncpy(mr.VolumeName, vl->VolName, sizeof(mr.VolumeName)); + media_record_exists = false; + if (db_get_media_record(ua->jcr, ua->db, &mr)) { + if (mr.VolBytes != 0) { + ua->warning_msg(_("Media record for Slot %d Volume \"%s\" already exists.\n"), + vl->Slot, mr.VolumeName); + mr.Slot = vl->Slot; + mr.InChanger = mr.Slot > 0; /* if slot give assume in changer */ + set_storageid_in_mr(store, &mr); + if (!db_update_media_record(ua->jcr, ua->db, &mr)) { + ua->error_msg(_("Error setting InChanger: ERR=%s"), db_strerror(ua->db)); + } + continue; + } + media_record_exists = true; + } + mr.InChanger = mr.Slot > 0; /* if slot give assume in changer */ + set_storageid_in_mr(store, &mr); + /* + * Deal with creating cleaning tape here. Normal tapes created in + * send_label_request() below + */ + if (is_cleaning_tape(ua, &mr, &pr)) { + if (media_record_exists) { /* we update it */ + mr.VolBytes = 1; /* any bytes to indicate it exists */ + bstrncpy(mr.VolStatus, "Cleaning", sizeof(mr.VolStatus)); + mr.MediaType[0] = 0; + set_storageid_in_mr(store, &mr); + if (!db_update_media_record(ua->jcr, ua->db, &mr)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + } else { /* create the media record */ + if (pr.MaxVols > 0 && pr.NumVols >= pr.MaxVols) { + ua->error_msg(_("Maximum pool Volumes=%d reached.\n"), pr.MaxVols); + goto bail_out; + } + set_pool_dbr_defaults_in_media_dbr(&mr, &pr); + bstrncpy(mr.VolStatus, "Cleaning", sizeof(mr.VolStatus)); + mr.MediaType[0] = 0; + set_storageid_in_mr(store, &mr); + if (db_create_media_record(ua->jcr, ua->db, &mr)) { + ua->send_msg(_("Catalog record for cleaning tape \"%s\" successfully created.\n"), + mr.VolumeName); + pr.NumVols++; /* this is a bit suspect */ + if (!db_update_pool_record(ua->jcr, ua->db, &pr)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + } else { + ua->error_msg(_("Catalog error on cleaning tape: %s"), db_strerror(ua->db)); + } + } + continue; /* done, go handle next volume */ + } + + /* Not a cleaning tape */ + bstrncpy(mr.MediaType, store->media_type, sizeof(mr.MediaType)); + mr.Slot = vl->Slot; + + send_label_request(ua, &mr, &omr, &pr, 0, media_record_exists, drive); + } + + +bail_out: + free(slot_list); + free_vol_list(vol_list); + close_sd_bsock(ua); + + return; +} + +/* + * Check if the Volume name has legal characters + * If ua is non-NULL send the message + */ +bool is_volume_name_legal(UAContext *ua, const char *name) +{ + int len; + const char *p; + const char *accept = ":.-_"; + + /* Restrict the characters permitted in the Volume name */ + for (p=name; *p; p++) { + if (B_ISALPHA(*p) || B_ISDIGIT(*p) || strchr(accept, (int)(*p))) { + continue; + } + if (ua) { + ua->error_msg(_("Illegal character \"%c\" in a volume name.\n"), *p); + } + return 0; + } + len = strlen(name); + if (len >= MAX_NAME_LENGTH) { + if (ua) { + ua->error_msg(_("Volume name too long.\n")); + } + return 0; + } + if (len == 0) { + if (ua) { + ua->error_msg(_("Volume name must be at least one character long.\n")); + } + return 0; + } + return 1; +} + +/* + * NOTE! This routine opens the SD socket but leaves it open + */ +static bool send_label_request(UAContext *ua, MEDIA_DBR *mr, MEDIA_DBR *omr, + POOL_DBR *pr, int relabel, bool media_record_exists, + int drive) +{ + BSOCK *sd; + char dev_name[MAX_NAME_LENGTH]; + bool ok = false; + uint64_t VolBytes = 0; + uint64_t VolABytes = 0; + uint32_t VolType = 0; + + if (!(sd=open_sd_bsock(ua))) { + return false; + } + bstrncpy(dev_name, ua->jcr->wstore->dev_name(), sizeof(dev_name)); + bash_spaces(dev_name); + bash_spaces(mr->VolumeName); + bash_spaces(mr->MediaType); + bash_spaces(pr->Name); + if (relabel) { + bash_spaces(omr->VolumeName); + sd->fsend("relabel %s OldName=%s NewName=%s PoolName=%s " + "MediaType=%s Slot=%d drive=%d", + dev_name, omr->VolumeName, mr->VolumeName, pr->Name, + mr->MediaType, mr->Slot, drive); + ua->send_msg(_("Sending relabel command from \"%s\" to \"%s\" ...\n"), + omr->VolumeName, mr->VolumeName); + } else { + sd->fsend("label %s VolumeName=%s PoolName=%s MediaType=%s " + "Slot=%d drive=%d", + dev_name, mr->VolumeName, pr->Name, mr->MediaType, + mr->Slot, drive); + ua->send_msg(_("Sending label command for Volume \"%s\" Slot %d ...\n"), + mr->VolumeName, mr->Slot); + Dmsg6(100, "label %s VolumeName=%s PoolName=%s MediaType=%s Slot=%d drive=%d\n", + dev_name, mr->VolumeName, pr->Name, mr->MediaType, mr->Slot, drive); + } + + while (bget_dirmsg(sd) >= 0) { + ua->send_msg("%s", sd->msg); + if (sscanf(sd->msg, "3000 OK label. VolBytes=%llu VolABytes=%lld VolType=%d ", + &VolBytes, &VolABytes, &VolType) == 3) { + ok = true; + if (media_record_exists) { /* we update it */ + mr->VolBytes = VolBytes; + mr->VolABytes = VolABytes; + mr->VolType = VolType; + mr->InChanger = mr->Slot > 0; /* if slot give assume in changer */ + set_storageid_in_mr(ua->jcr->wstore, mr); + if (!db_update_media_record(ua->jcr, ua->db, mr)) { + ua->error_msg("%s", db_strerror(ua->db)); + ok = false; + } + } else { /* create the media record */ + set_pool_dbr_defaults_in_media_dbr(mr, pr); + mr->VolBytes = VolBytes; + mr->VolABytes = VolABytes; + mr->VolType = VolType; + mr->InChanger = mr->Slot > 0; /* if slot give assume in changer */ + mr->Enabled = 1; + set_storageid_in_mr(ua->jcr->wstore, mr); + if (db_create_media_record(ua->jcr, ua->db, mr)) { + ua->info_msg(_("Catalog record for Volume \"%s\", Slot %d successfully created.\n"), + mr->VolumeName, mr->Slot); + /* Update number of volumes in pool */ + pr->NumVols++; + if (!db_update_pool_record(ua->jcr, ua->db, pr)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + } else { + ua->error_msg("%s", db_strerror(ua->db)); + ok = false; + } + } + } + } + if (!ok) { + ua->error_msg(_("Label command failed for Volume %s.\n"), mr->VolumeName); + } + return ok; +} + +static char *get_volume_name_from_SD(UAContext *ua, int Slot, int drive) +{ + STORE *store = ua->jcr->wstore; + BSOCK *sd; + char dev_name[MAX_NAME_LENGTH]; + char *VolName = NULL; + int rtn_slot; + + if (!(sd=open_sd_bsock(ua))) { + ua->error_msg(_("Could not open SD socket.\n")); + return NULL; + } + bstrncpy(dev_name, store->dev_name(), sizeof(dev_name)); + bash_spaces(dev_name); + /* Ask for autochanger list of volumes */ + sd->fsend(NT_("readlabel %s Slot=%d drive=%d\n"), dev_name, Slot, drive); + Dmsg1(100, "Sent: %s", sd->msg); + + /* Get Volume name in this Slot */ + while (sd->recv() >= 0) { + ua->send_msg("%s", sd->msg); + Dmsg1(100, "Got: %s", sd->msg); + if (strncmp(sd->msg, NT_("3001 Volume="), 12) == 0) { + VolName = (char *)malloc(sd->msglen); + if (sscanf(sd->msg, NT_("3001 Volume=%s Slot=%d"), VolName, &rtn_slot) == 2) { + break; + } + free(VolName); + VolName = NULL; + } + } + close_sd_bsock(ua); + Dmsg1(100, "get_vol_name=%s\n", NPRT(VolName)); + return VolName; +} + +/* + * We get the slot list from the Storage daemon. + * If scan is set, we return all slots found, + * otherwise, we return only slots with valid barcodes (Volume names) + */ +static vol_list_t *get_vol_list_from_SD(UAContext *ua, bool scan) +{ + STORE *store = ua->jcr->wstore; + char dev_name[MAX_NAME_LENGTH]; + BSOCK *sd; + vol_list_t *vl; + vol_list_t *vol_list = NULL; + + + if (!(sd=open_sd_bsock(ua))) { + return NULL; + } + + bstrncpy(dev_name, store->dev_name(), sizeof(dev_name)); + bash_spaces(dev_name); + /* Ask for autochanger list of volumes */ + sd->fsend(NT_("autochanger list %s \n"), dev_name); + + /* Read and organize list of Volumes */ + while (sd->recv() >= 0) { + char *p; + int Slot; + strip_trailing_junk(sd->msg); + + /* Check for returned SD messages */ + if (sd->msg[0] == '3' && B_ISDIGIT(sd->msg[1]) && + B_ISDIGIT(sd->msg[2]) && B_ISDIGIT(sd->msg[3]) && + sd->msg[4] == ' ') { + ua->send_msg("%s\n", sd->msg); /* pass them on to user */ + continue; + } + + /* Validate Slot: if scanning, otherwise Slot:Barcode */ + p = strchr(sd->msg, ':'); + if (scan && p) { + /* Scanning -- require only valid slot */ + Slot = atoi(sd->msg); + if (Slot <= 0) { + p--; + *p = ':'; + ua->error_msg(_("Invalid Slot number: %s\n"), sd->msg); + continue; + } + } else { + /* Not scanning */ + if (p && strlen(p) > 1) { + *p++ = 0; + if (!is_an_integer(sd->msg) || (Slot=atoi(sd->msg)) <= 0) { + p--; + *p = ':'; + ua->error_msg(_("Invalid Slot number: %s\n"), sd->msg); + continue; + } + } else { + continue; + } + if (!is_volume_name_legal(ua, p)) { + p--; + *p = ':'; + ua->error_msg(_("Invalid Volume name: %s. Volume skipped.\n"), sd->msg); + continue; + } + } + + /* Add Slot and VolumeName to list */ + vl = (vol_list_t *)malloc(sizeof(vol_list_t)); + vl->Slot = Slot; + if (p) { + if (*p == ':') { + p++; /* skip separator */ + } + vl->VolName = bstrdup(p); + } else { + vl->VolName = NULL; + } + Dmsg2(100, "Add slot=%d Vol=%s to SD list.\n", vl->Slot, NPRT(vl->VolName)); + if (!vol_list) { + vl->next = vol_list; + vol_list = vl; + } else { + vol_list_t *prev=vol_list; + /* Add new entry to the right place in the list */ + for (vol_list_t *tvl=vol_list; tvl; tvl=tvl->next) { + if (tvl->Slot > vl->Slot) { + /* no previous item, update vol_list directly */ + if (prev == vol_list) { + vl->next = vol_list; + vol_list = vl; + + } else { /* replace the previous pointer */ + prev->next = vl; + vl->next = tvl; + } + break; + } + /* we are at the end */ + if (!tvl->next) { + tvl->next = vl; + vl->next = NULL; + break; + } + prev = tvl; + } + } + } + close_sd_bsock(ua); + return vol_list; +} + +static void free_vol_list(vol_list_t *vol_list) +{ + vol_list_t *vl; + + /* Free list */ + for (vl=vol_list; vl; ) { + vol_list_t *ovl; + if (vl->VolName) { + free(vl->VolName); + } + ovl = vl; + vl = vl->next; + free(ovl); + } +} + +/* + * We get the number of slots in the changer from the SD + */ +static int get_num_slots_from_SD(UAContext *ua) +{ + STORE *store = ua->jcr->wstore; + char dev_name[MAX_NAME_LENGTH]; + BSOCK *sd; + int slots = 0; + + + if (!(sd=open_sd_bsock(ua))) { + return 0; + } + + bstrncpy(dev_name, store->dev_name(), sizeof(dev_name)); + bash_spaces(dev_name); + /* Ask for autochanger number of slots */ + sd->fsend(NT_("autochanger slots %s\n"), dev_name); + + while (sd->recv() >= 0) { + if (sscanf(sd->msg, "slots=%d\n", &slots) == 1) { + break; + } else { + ua->send_msg("%s", sd->msg); + } + } + close_sd_bsock(ua); + ua->send_msg(_("Device \"%s\" has %d slots.\n"), store->dev_name(), slots); + return slots; +} + +/* + * We get the number of drives in the changer from the SD + */ +int get_num_drives_from_SD(UAContext *ua) +{ + STORE *store = ua->jcr->wstore; + char dev_name[MAX_NAME_LENGTH]; + BSOCK *sd; + int drives = 0; + + + if (!(sd=open_sd_bsock(ua))) { + return 0; + } + + bstrncpy(dev_name, store->dev_name(), sizeof(dev_name)); + bash_spaces(dev_name); + /* Ask for autochanger number of slots */ + sd->fsend(NT_("autochanger drives %s\n"), dev_name); + + while (sd->recv() >= 0) { + if (sscanf(sd->msg, NT_("drives=%d\n"), &drives) == 1) { + break; + } else { + ua->send_msg("%s", sd->msg); + } + } + close_sd_bsock(ua); +// bsendmsg(ua, _("Device \"%s\" has %d drives.\n"), store->dev_name(), drives); + return drives; +} + +/* + * Check if this is a cleaning tape by comparing the Volume name + * with the Cleaning Prefix. If they match, this is a cleaning + * tape. + */ +static bool is_cleaning_tape(UAContext *ua, MEDIA_DBR *mr, POOL_DBR *pr) +{ + /* Find Pool resource */ + ua->jcr->pool = (POOL *)GetResWithName(R_POOL, pr->Name); + if (!ua->jcr->pool) { + ua->error_msg(_("Pool \"%s\" resource not found for volume \"%s\"!\n"), + pr->Name, mr->VolumeName); + return false; + } + if (ua->jcr->pool->cleaning_prefix == NULL) { + return false; /* if no cleaning prefix, this is not a cleaning tape */ + } + Dmsg4(100, "CLNprefix=%s: Vol=%s: len=%d strncmp=%d\n", + ua->jcr->pool->cleaning_prefix, mr->VolumeName, + strlen(ua->jcr->pool->cleaning_prefix), + strncmp(mr->VolumeName, ua->jcr->pool->cleaning_prefix, + (int)strlen(ua->jcr->pool->cleaning_prefix))); + return strncmp(mr->VolumeName, ua->jcr->pool->cleaning_prefix, + strlen(ua->jcr->pool->cleaning_prefix)) == 0; +} + +/* + * Send Volume info to caller in API format + */ +static void send_volume_info(UAContext *ua, char type, int Slot, char *vol_name) +{ + char ed1[50], ed2[50], ed3[50]; + POOL_DBR pr; + MEDIA_DBR mr; + /* Type|Slot|RealSlot|Volume|Bytes|Status|MediaType|Pool|LastW|Expire */ + const char *slot_api_full_format="%c|%i|%i|%s|%s|%s|%s|%s|%s|%s\n"; + const char *slot_api_empty_format="%c|%i||||||||\n"; + + if (is_volume_name_legal(NULL, vol_name)) { + bstrncpy(mr.VolumeName, vol_name, sizeof(mr.VolumeName)); + if (db_get_media_record(ua->jcr, ua->db, &mr)) { + memset(&pr, 0, sizeof(POOL_DBR)); + pr.PoolId = mr.PoolId; + if (!db_get_pool_record(ua->jcr, ua->db, &pr)) { + strcpy(pr.Name, "?"); + } + ua->send_msg(slot_api_full_format, type, + Slot, mr.Slot, mr.VolumeName, + edit_uint64(mr.VolBytes, ed1), + mr.VolStatus, mr.MediaType, pr.Name, + edit_uint64(mr.LastWritten, ed2), + edit_uint64(mr.LastWritten+mr.VolRetention, ed3)); + + } else { /* Media unknown */ + ua->send_msg(slot_api_full_format, + type, Slot, 0, mr.VolumeName, "?", "?", "?", "?", + "0", "0"); + + } + } else { + ua->send_msg(slot_api_empty_format, type, Slot); + } +} + +/* + * Input (output of mxt-changer listall): + * + * Drive content: D:Drive num:F:Slot loaded:Volume Name + * D:0:F:2:vol2 or D:Drive num:E + * D:1:F:42:vol42 + * D:3:E + * + * Slot content: + * S:1:F:vol1 S:Slot num:F:Volume Name + * S:2:E or S:Slot num:E + * S:3:F:vol4 + * + * Import/Export tray slots: + * I:10:F:vol10 I:Slot num:F:Volume Name + * I:11:E or I:Slot num:E + * I:12:F:vol40 + * + * If a drive is loaded, the slot *should* be empty + * + * Output: + * + * Drive list: D|Drive num|Slot loaded|Volume Name + * D|0|45|vol45 + * D|1|42|vol42 + * D|3|| + * + * Slot list: Type|Slot|RealSlot|Volume|Bytes|Status|MediaType|Pool|LastW|Expire + * + * S|1|1|vol1|31417344|Full|LTO1-ANSI|Inc|1250858902|1282394902 + * S|2|||||||| + * S|3|3|vol4|15869952|Append|LTO1-ANSI|Inc|1250858907|1282394907 + * + * TODO: need to merge with status_slots() + */ +void status_content(UAContext *ua, STORE *store) +{ + int Slot, Drive; + char type; + char dev_name[MAX_NAME_LENGTH]; + char vol_name[MAX_NAME_LENGTH]; + BSOCK *sd; + vol_list_t *vl=NULL, *vol_list = NULL; + + if (!(sd=open_sd_bsock(ua))) { + return; + } + + if (!open_client_db(ua)) { + return; + } + + bstrncpy(dev_name, store->dev_name(), sizeof(dev_name)); + bash_spaces(dev_name); + /* Ask for autochanger list of volumes */ + sd->fsend(NT_("autochanger listall %s \n"), dev_name); + + /* Read and organize list of Drive, Slots and I/O Slots */ + while (sd->recv() >= 0) { + strip_trailing_junk(sd->msg); + + /* Check for returned SD messages */ + if (sd->msg[0] == '3' && B_ISDIGIT(sd->msg[1]) && + B_ISDIGIT(sd->msg[2]) && B_ISDIGIT(sd->msg[3]) && + sd->msg[4] == ' ') { + ua->send_msg("%s\n", sd->msg); /* pass them on to user */ + continue; + } + + Drive = Slot = -1; + *vol_name = 0; + + if (sscanf(sd->msg, "D:%d:F:%d:%127s", &Drive, &Slot, vol_name) == 3) { + ua->send_msg("D|%d|%d|%s\n", Drive, Slot, vol_name); + + /* we print information on the slot if we have a volume name */ + if (*vol_name) { + /* Add Slot and VolumeName to list */ + vl = (vol_list_t *)malloc(sizeof(vol_list_t)); + vl->Slot = Slot; + vl->VolName = bstrdup(vol_name); + vl->next = vol_list; + vol_list = vl; + } + + } else if (sscanf(sd->msg, "D:%d:E", &Drive) == 1) { + ua->send_msg("D|%d||\n", Drive); + + } else if (sscanf(sd->msg, "%c:%d:F:%127s", &type, &Slot, vol_name)== 3) { + send_volume_info(ua, type, Slot, vol_name); + + } else if (sscanf(sd->msg, "%c:%d:E", &type, &Slot) == 2) { + /* type can be S (slot) or I (Import/Export slot) */ + vol_list_t *prev=NULL; + for (vl = vol_list; vl; vl = vl->next) { + if (vl->Slot == Slot) { + bstrncpy(vol_name, vl->VolName, MAX_NAME_LENGTH); + + /* remove the node */ + if (prev) { + prev->next = vl->next; + } else { + vol_list = vl->next; + } + free(vl->VolName); + free(vl); + break; + } + prev = vl; + } + send_volume_info(ua, type, Slot, vol_name); + + } else { + Dmsg1(10, "Discarding msg=%s\n", sd->msg); + } + } + close_sd_bsock(ua); +} + +/* + * Print slots from AutoChanger + */ +void status_slots(UAContext *ua, STORE *store_r) +{ + USTORE store; + POOL_DBR pr; + vol_list_t *vl, *vol_list = NULL; + MEDIA_DBR mr; + char *slot_list; + int max_slots; + int i=1; + /* Slot | Volume | Status | MediaType | Pool */ + const char *slot_hformat="| %4i%c| %-20s | %-9s | %-15s | %-18s |\n"; + + if (ua->api) { + status_content(ua, store_r); + return; + } + + if (!open_client_db(ua)) { + return; + } + store.store = store_r; + + pm_strcpy(store.store_source, _("Command input")); + set_wstorage(ua->jcr, &store); + get_storage_drive(ua, store.store); + + max_slots = get_num_slots_from_SD(ua); + + if (max_slots <= 0) { + ua->warning_msg(_("No slots in changer to scan.\n")); + return; + } + slot_list = (char *)malloc(max_slots+1); + if (!get_user_slot_list(ua, slot_list, max_slots)) { + free(slot_list); + return; + } + + vol_list = get_vol_list_from_SD(ua, true /* want to see all slots */); + + if (!vol_list) { + ua->warning_msg(_("No Volumes found, or no barcodes.\n")); + goto bail_out; + } + ua->send_msg(_("+------+----------------------+-----------+-----------------+--------------------+\n")); + ua->send_msg(_("| Slot | Volume Name | Status | Media Type | Pool |\n")); + ua->send_msg(_("+------+----------------------+-----------+-----------------+--------------------+\n")); + + /* Walk through the list getting the media records */ + for (vl=vol_list; vl; vl=vl->next) { + if (vl->Slot > max_slots) { + ua->warning_msg(_("Slot %d greater than max %d ignored.\n"), + vl->Slot, max_slots); + continue; + } + /* Check if user wants us to look at this slot */ + if (!slot_list[vl->Slot]) { + Dmsg1(100, "Skipping slot=%d\n", vl->Slot); + continue; + } + + slot_list[vl->Slot] = 0; /* clear Slot */ + + if (!vl->VolName) { + Dmsg1(100, "No VolName for Slot=%d.\n", vl->Slot); + ua->send_msg(slot_hformat, + vl->Slot, '*', + "?", "?", "?", "?"); + continue; + } + + /* Hope that slots are ordered */ + for (; i < vl->Slot; i++) { + if (slot_list[i]) { + ua->send_msg(slot_hformat, + i, ' ', "", "", "", ""); + slot_list[i]=0; + } + } + ua->send_msg(_("+------+----------------------+-----------+-----------------+--------------------+\n")); + + memset(&mr, 0, sizeof(MEDIA_DBR)); + bstrncpy(mr.VolumeName, vl->VolName, sizeof(mr.VolumeName)); + + if (mr.VolumeName[0] && db_get_media_record(ua->jcr, ua->db, &mr)) { + memset(&pr, 0, sizeof(POOL_DBR)); + pr.PoolId = mr.PoolId; + if (!db_get_pool_record(ua->jcr, ua->db, &pr)) { + strcpy(pr.Name, "?"); + } + + /* Print information */ + ua->send_msg(slot_hformat, + vl->Slot, ((vl->Slot==mr.Slot)?' ':'*'), + mr.VolumeName, mr.VolStatus, mr.MediaType, pr.Name); + + } else { /* TODO: get information from catalog */ + ua->send_msg(slot_hformat, + vl->Slot, '*', + mr.VolumeName, "?", "?", "?"); + } + } + + /* Display the rest of the autochanger + */ + for (; i <= max_slots; i++) { + if (slot_list[i]) { + ua->send_msg(slot_hformat, + i, ' ', "", "", "", ""); + slot_list[i]=0; + } + } + +bail_out: + + free_vol_list(vol_list); + free(slot_list); + close_sd_bsock(ua); + + return; +} diff --git a/src/dird/ua_output.c b/src/dird/ua_output.c new file mode 100644 index 00000000..5afb543a --- /dev/null +++ b/src/dird/ua_output.c @@ -0,0 +1,1193 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- User Agent Output Commands + * I.e. messages, listing database, showing resources, ... + * + * Kern Sibbald, September MM + */ + +#include "bacula.h" +#include "dird.h" + +/* Imported subroutines */ + +/* Imported variables */ + +/* Imported functions */ + +/* Forward referenced functions */ +static int do_list_cmd(UAContext *ua, const char *cmd, e_list_type llist); +static bool list_nextvol(UAContext *ua, int ndays); + +/* + * Turn auto display of console messages on/off + */ +int autodisplay_cmd(UAContext *ua, const char *cmd) +{ + static const char *kw[] = { + NT_("on"), + NT_("off"), + NULL}; + + switch (find_arg_keyword(ua, kw)) { + case 0: + ua->auto_display_messages = true; + break; + case 1: + ua->auto_display_messages = false; + break; + default: + ua->error_msg(_("ON or OFF keyword missing.\n")); + break; + } + return 1; +} + +/* + * Turn GUI mode on/off + */ +int gui_cmd(UAContext *ua, const char *cmd) +{ + static const char *kw[] = { + NT_("on"), + NT_("off"), + NULL}; + + switch (find_arg_keyword(ua, kw)) { + case 0: + ua->jcr->gui = ua->gui = true; + break; + case 1: + ua->jcr->gui = ua->gui = false; + break; + default: + ua->error_msg(_("ON or OFF keyword missing.\n")); + break; + } + return 1; +} + +/* + * Enter with Resources locked + */ +static void show_disabled_jobs(UAContext *ua) +{ + JOB *job; + bool first = true; + foreach_res(job, R_JOB) { + if (!acl_access_ok(ua, Job_ACL, job->name())) { + continue; + } + if (!job->is_enabled()) { + if (first) { + first = false; + ua->send_msg(_("Disabled Jobs:\n")); + } + ua->send_msg(" %s\n", job->name()); + } + } + if (first) { + ua->send_msg(_("No disabled Jobs.\n")); + } +} + +struct showstruct {const char *res_name; int type;}; +static struct showstruct reses[] = { + {NT_("directors"), R_DIRECTOR}, + {NT_("clients"), R_CLIENT}, + {NT_("counters"), R_COUNTER}, + {NT_("devices"), R_DEVICE}, + {NT_("jobs"), R_JOB}, + {NT_("storages"), R_STORAGE}, + {NT_("catalogs"), R_CATALOG}, + {NT_("schedules"), R_SCHEDULE}, + {NT_("filesets"), R_FILESET}, + {NT_("pools"), R_POOL}, + {NT_("messages"), R_MSGS}, +// {NT_("consoles"), R_CONSOLE}, +// {NT_("jobdefs"), R_JOBDEFS}, +// {NT_{"autochangers"), R_AUTOCHANGER}, + {NT_("all"), -1}, + {NT_("help"), -2}, + {NULL, 0} +}; + + +/* + * Displays Resources + * + * show all + * show e.g. show directors + * show = e.g. show director=HeadMan + * show disabled shows disabled jobs + * + */ +int show_cmd(UAContext *ua, const char *cmd) +{ + int i, j, type, len; + int recurse; + char *res_name; + RES_HEAD *reshead = NULL; + RES *res = NULL; + + Dmsg1(20, "show: %s\n", ua->UA_sock->msg); + + + LockRes(); + for (i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("disabled")) == 0) { + show_disabled_jobs(ua); + goto bail_out; + } + + res = NULL; + reshead = NULL; + type = 0; + + res_name = ua->argk[i]; + if (!ua->argv[i]) { /* was a name given? */ + /* No name, dump all resources of specified type */ + recurse = 1; + len = strlen(res_name); + for (j=0; reses[j].res_name; j++) { + if (strncasecmp(res_name, reses[j].res_name, len) == 0) { + type = reses[j].type; + if (type > 0) { + reshead = res_head[type-r_first]; + } else { + reshead = NULL; + } + break; + } + } + + } else { + /* Dump a single resource with specified name */ + recurse = 0; + len = strlen(res_name); + for (j=0; reses[j].res_name; j++) { + if (strncasecmp(res_name, reses[j].res_name, len) == 0) { + type = reses[j].type; + res = (RES *)GetResWithName(type, ua->argv[i]); + if (!res) { + type = -3; + } + break; + } + } + } + + switch (type) { + /* All resources */ + case -1: + for (j=r_first; j<=r_last; j++) { + /* Skip R_DEVICE since it is really not used or updated */ + if (j != R_DEVICE) { + dump_each_resource(j, bsendmsg, ua); + } + } + break; + /* Help */ + case -2: + ua->send_msg(_("Keywords for the show command are:\n")); + for (j=0; reses[j].res_name; j++) { + ua->error_msg("%s\n", reses[j].res_name); + } + goto bail_out; + /* Resource not found */ + case -3: + ua->error_msg(_("%s resource %s not found.\n"), res_name, ua->argv[i]); + goto bail_out; + /* Resource not found */ + case 0: + ua->error_msg(_("Resource %s not found\n"), res_name); + goto bail_out; + /* Dump a specific type */ + default: + if (res) { /* keyword and argument, ie: show job=name */ + dump_resource(recurse?type:-type, res, bsendmsg, ua); + + } else if (reshead) { /* keyword only, ie: show job */ + dump_each_resource(-type, bsendmsg, ua); + } + break; + } + } +bail_out: + UnlockRes(); + return 1; +} + +/* + * Check if the access is permitted for a list of jobids + * + * Not in ua_acl.c because it's using db access, and tools such + * as bdirjson are not linked with cats. + */ +bool acl_access_jobid_ok(UAContext *ua, const char *jobids) +{ + char *tmp=NULL, *p; + bool ret=false; + JOB_DBR jr; + uint32_t jid; + + if (!jobids) { + return false; + } + + if (!is_a_number_list(jobids)) { + return false; + } + + /* If no console resource => default console and all is permitted */ + if (!ua || !ua->cons) { + Dmsg0(1400, "Root cons access OK.\n"); + return true; /* No cons resource -> root console OK for everything */ + } + + alist *list = ua->cons->ACL_lists[Job_ACL]; + if (!list) { /* empty list */ + return false; /* List empty, reject everything */ + } + + /* Special case *all* gives full access */ + if (list->size() == 1 && strcasecmp("*all*", (char *)list->get(0)) == 0) { + return true; + } + + /* If we can't open the database, just say no */ + if (!open_new_client_db(ua)) { + return false; + } + + p = tmp = bstrdup(jobids); + + while (get_next_jobid_from_list(&p, &jid) > 0) { + memset(&jr, 0, sizeof(jr)); + jr.JobId = jid; + + if (db_get_job_record(ua->jcr, ua->db, &jr)) { + for (int i=0; isize(); i++) { + if (strcasecmp(jr.Name, (char *)list->get(i)) == 0) { + Dmsg3(1400, "ACL found %s in %d %s\n", jr.Name, + Job_ACL, (char *)list->get(i)); + ret = true; + goto bail_out; + } + } + } + } + +bail_out: + if (tmp) { + free(tmp); + } + return ret; +} + +/* + * List contents of database + * + * list jobs - lists all jobs run + * list jobid=nnn - list job data for jobid + * list ujobid=uname - list job data for unique jobid + * list job=name - list all jobs with "name" + * list jobname=name - same as above + * list jobmedia jobid= + * list jobmedia job=name + * list joblog jobid= + * list joblog job=name + * list files [type=] jobid= - list files saved for job nn + * list files [type=] job=name + * list pools - list pool records + * list jobtotals - list totals for all jobs + * list media - list media for given pool (deprecated) + * list volumes - list Volumes + * list clients - list clients + * list nextvol job=xx - list the next vol to be used by job + * list nextvolume job=xx - same as above. + * list copies jobid=x,y,z + * list pluginrestoreconf jobid=x,y,z [id=k] + * + * Note: keyword "long" is before the first command on the command + * line results in doing a llist (long listing). + */ + +/* Do long or full listing */ +int llist_cmd(UAContext *ua, const char *cmd) +{ + return do_list_cmd(ua, cmd, VERT_LIST); +} + +/* Do short or summary listing */ +int list_cmd(UAContext *ua, const char *cmd) +{ + if (find_arg(ua, "long") > 0) { + return do_list_cmd(ua, cmd, VERT_LIST); /* do a long list */ + } else { + return do_list_cmd(ua, cmd, HORZ_LIST); /* do a short list */ + } +} + +static int do_list_cmd(UAContext *ua, const char *cmd, e_list_type llist) +{ + POOLMEM *VolumeName; + int jobid=0, n; + int i, j; + JOB_DBR jr; + POOL_DBR pr; + MEDIA_DBR mr; + + if (!open_new_client_db(ua)) + return 1; + + memset(&jr, 0, sizeof(jr)); + memset(&pr, 0, sizeof(pr)); + + Dmsg1(20, "list: %s\n", cmd); + + if (!ua->db) { + ua->error_msg(_("Hey! DB is NULL\n")); + } + /* Apply any limit */ + for (j = 1; j < ua->argc ; j++) { + if (strcasecmp(ua->argk[j], NT_("joberrors")) == 0) { + jr.JobErrors = 1; + } else if (!ua->argv[j]) { + /* skip */ + } else if (strcasecmp(ua->argk[j], NT_("order")) == 0) { + if ((strcasecmp(ua->argv[j], NT_("desc")) == 0) || + strcasecmp(ua->argv[j], NT_("descending")) == 0) { + jr.order = 1; + } else if ((strcasecmp(ua->argv[j], NT_("asc")) == 0) || + strcasecmp(ua->argv[j], NT_("ascending")) == 0) { + jr.order = 0; + } else { + ua->error_msg(_("Unknown order type %s\n"), ua->argv[j]); + return 1; + } + } else if (strcasecmp(ua->argk[j], NT_("limit")) == 0) { + jr.limit = atoi(ua->argv[j]); + + } else if (strcasecmp(ua->argk[j], NT_("jobstatus")) == 0) { + if (B_ISALPHA(ua->argv[j][0])) { + jr.JobStatus = ua->argv[j][0]; /* TODO: Check if the code is correct */ + } + } else if (strcasecmp(ua->argk[j], NT_("jobtype")) == 0) { + if (B_ISALPHA(ua->argv[j][0])) { + jr.JobType = ua->argv[j][0]; /* TODO: Check if the code is correct */ + } + } else if (strcasecmp(ua->argk[j], NT_("level")) == 0) { + if (strlen(ua->argv[j]) > 1) { + jr.JobLevel = get_level_code_from_name(ua->argv[j]); + + } else if (B_ISALPHA(ua->argv[j][0])) { + jr.JobLevel = ua->argv[j][0]; /* TODO: Check if the code is correct */ + } + } else if (strcasecmp(ua->argk[j], NT_("level")) == 0) { + + + } else if (strcasecmp(ua->argk[j], NT_("client")) == 0) { + if (is_name_valid(ua->argv[j], NULL)) { + CLIENT_DBR cr; + memset(&cr, 0, sizeof(cr)); + /* Both Backup & Restore wants to list jobs for this client */ + if(get_client_dbr(ua, &cr, JT_BACKUP_RESTORE)) { + jr.ClientId = cr.ClientId; + } + } + } + } + + /* Scan arguments looking for things to do */ + for (i=1; iargc; i++) { + /* List JOBS */ + if (strcasecmp(ua->argk[i], NT_("jobs")) == 0) { + db_list_job_records(ua->jcr, ua->db, &jr, prtit, ua, llist); + + /* List JOBTOTALS */ + } else if (strcasecmp(ua->argk[i], NT_("jobtotals")) == 0) { + db_list_job_totals(ua->jcr, ua->db, &jr, prtit, ua); + + /* List JOBID=nn */ + } else if (strcasecmp(ua->argk[i], NT_("jobid")) == 0) { + if (ua->argv[i]) { + jobid = str_to_int64(ua->argv[i]); + if (jobid > 0) { + jr.JobId = jobid; + db_list_job_records(ua->jcr, ua->db, &jr, prtit, ua, llist); + } + } + + /* List JOB=xxx */ + } else if ((strcasecmp(ua->argk[i], NT_("job")) == 0 || + strcasecmp(ua->argk[i], NT_("jobname")) == 0) && ua->argv[i]) { + bstrncpy(jr.Name, ua->argv[i], MAX_NAME_LENGTH); + jr.JobId = 0; + db_list_job_records(ua->jcr, ua->db, &jr, prtit, ua, llist); + + /* List UJOBID=xxx */ + } else if (strcasecmp(ua->argk[i], NT_("ujobid")) == 0 && ua->argv[i]) { + bstrncpy(jr.Job, ua->argv[i], MAX_NAME_LENGTH); + jr.JobId = 0; + db_list_job_records(ua->jcr, ua->db, &jr, prtit, ua, llist); + + /* List Base files */ + } else if (strcasecmp(ua->argk[i], NT_("basefiles")) == 0) { + /* TODO: cleanup this block */ + for (j=i+1; jargc; j++) { + if (strcasecmp(ua->argk[j], NT_("ujobid")) == 0 && ua->argv[j]) { + bstrncpy(jr.Job, ua->argv[j], MAX_NAME_LENGTH); + jr.JobId = 0; + db_get_job_record(ua->jcr, ua->db, &jr); + jobid = jr.JobId; + } else if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) { + jobid = str_to_int64(ua->argv[j]); + } else { + continue; + } + if (jobid > 0) { + db_list_base_files_for_job(ua->jcr, ua->db, jobid, prtit, ua); + } + } + + /* List FILES */ + } else if (strcasecmp(ua->argk[i], NT_("files")) == 0) { + int deleted = 0; /* see only backed up files */ + for (j=i+1; jargc; j++) { + if (strcasecmp(ua->argk[j], NT_("ujobid")) == 0 && ua->argv[j]) { + bstrncpy(jr.Job, ua->argv[j], MAX_NAME_LENGTH); + jr.JobId = 0; + db_get_job_record(ua->jcr, ua->db, &jr); + jobid = jr.JobId; + + } else if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) { + jobid = str_to_int64(ua->argv[j]); + + } else if (strcasecmp(ua->argk[j], NT_("type")) == 0 && ua->argv[j]) { + if (strcasecmp(ua->argv[j], NT_("deleted")) == 0) { + deleted = 1; + } else if (strcasecmp(ua->argv[j], NT_("all")) == 0) { + deleted = -1; + } + continue; /* Type should be before the jobid... */ + } else { + continue; + } + if (jobid > 0) { + db_list_files_for_job(ua->jcr, ua->db, jobid, deleted, prtit, ua); + } + } + + /* List JOBMEDIA */ + } else if (strcasecmp(ua->argk[i], NT_("jobmedia")) == 0) { + bool done = false; + for (j=i+1; jargc; j++) { + if (strcasecmp(ua->argk[j], NT_("ujobid")) == 0 && ua->argv[j]) { + bstrncpy(jr.Job, ua->argv[j], MAX_NAME_LENGTH); + jr.JobId = 0; + db_get_job_record(ua->jcr, ua->db, &jr); + jobid = jr.JobId; + } else if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) { + jobid = str_to_int64(ua->argv[j]); + } else { + continue; + } + db_list_jobmedia_records(ua->jcr, ua->db, jobid, prtit, ua, llist); + done = true; + } + if (!done) { + /* List for all jobs (jobid=0) */ + db_list_jobmedia_records(ua->jcr, ua->db, 0, prtit, ua, llist); + } + + /* List JOBLOG */ + } else if (strcasecmp(ua->argk[i], NT_("joblog")) == 0) { + bool done = false; + for (j=i+1; jargc; j++) { + if (strcasecmp(ua->argk[j], NT_("ujobid")) == 0 && ua->argv[j]) { + bstrncpy(jr.Job, ua->argv[j], MAX_NAME_LENGTH); + jr.JobId = 0; + db_get_job_record(ua->jcr, ua->db, &jr); + jobid = jr.JobId; + } else if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) { + jobid = str_to_int64(ua->argv[j]); + } else { + continue; + } + db_list_joblog_records(ua->jcr, ua->db, jobid, prtit, ua, llist); + done = true; + } + if (!done) { + /* List for all jobs (jobid=0) */ + db_list_joblog_records(ua->jcr, ua->db, 0, prtit, ua, llist); + } + + + /* List POOLS */ + } else if (strcasecmp(ua->argk[i], NT_("pool")) == 0 || + strcasecmp(ua->argk[i], NT_("pools")) == 0) { + POOL_DBR pr; + memset(&pr, 0, sizeof(pr)); + if (ua->argv[i]) { + bstrncpy(pr.Name, ua->argv[i], sizeof(pr.Name)); + } + db_list_pool_records(ua->jcr, ua->db, &pr, prtit, ua, llist); + + } else if (strcasecmp(ua->argk[i], NT_("clients")) == 0) { + db_list_client_records(ua->jcr, ua->db, prtit, ua, llist); + + } else if (strcasecmp(ua->argk[i], NT_("pluginrestoreconf")) == 0) { + ROBJECT_DBR rr; + memset(&rr, 0, sizeof(rr)); + rr.FileType = FT_PLUGIN_CONFIG; + + for (j=i+1; jargc; j++) { + if (strcasecmp(ua->argk[j], NT_("ujobid")) == 0 && ua->argv[j]) { + bstrncpy(jr.Job, ua->argv[j], MAX_NAME_LENGTH); + jr.JobId = 0; + + } else if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) { + + if (acl_access_jobid_ok(ua, ua->argv[j])) { + + if (is_a_number(ua->argv[j])) { + rr.JobId = str_to_uint64(ua->argv[j]); + + } else if (is_a_number_list(ua->argv[j])) { + /* In this case, loop directly to find if all jobids are + * accessible */ + rr.JobIds = ua->argv[j]; + } + + } else { + ua->error_msg(_("Invalid jobid argument\n")); + return 1; + } + + } else if (((strcasecmp(ua->argk[j], NT_("id")) == 0) || + (strcasecmp(ua->argk[j], NT_("restoreobjectid")) == 0)) + && ua->argv[j]) + { + rr.RestoreObjectId = str_to_uint64(ua->argv[j]); + + } else if (strcasecmp(ua->argk[j], NT_("objecttype")) == 0 && ua->argv[j]) { + if (strcasecmp(ua->argv[j], NT_("PLUGIN_CONFIG")) == 0) { + rr.FileType = FT_PLUGIN_CONFIG; + + } else if (strcasecmp(ua->argv[j], NT_("PLUGIN_CONFIG_FILLED")) == 0) { + rr.FileType = FT_PLUGIN_CONFIG_FILLED; + + } else if (strcasecmp(ua->argv[j], NT_("RESTORE_FIRST")) == 0) { + rr.FileType = FT_RESTORE_FIRST; + + } else if (strcasecmp(ua->argv[j], NT_("ALL")) == 0) { + rr.FileType = 0; + + } else { + ua->error_msg(_("Unknown ObjectType %s\n"), ua->argv[j]); + return 1; + } + + } else { + continue; + } + } + + if (!rr.JobId && !rr.JobIds) { + ua->error_msg(_("list pluginrestoreconf requires jobid argument\n")); + return 1; + } + + /* Display the content of the restore object */ + if (rr.RestoreObjectId > 0) { + /* Here, the JobId and the RestoreObjectId are set */ + if (db_get_restoreobject_record(ua->jcr, ua->db, &rr)) { + ua->send_msg("%s\n", NPRTB(rr.object)); + } else { + Dmsg0(200, "Object not found\n"); + } + + } else { + db_list_restore_objects(ua->jcr, ua->db, &rr, prtit, ua, llist); + } + + db_free_restoreobject_record(ua->jcr, &rr); + return 1; + + /* List MEDIA or VOLUMES */ + } else if (strcasecmp(ua->argk[i], NT_("media")) == 0 || + strcasecmp(ua->argk[i], NT_("volume")) == 0 || + strcasecmp(ua->argk[i], NT_("volumes")) == 0) { + bool done = false; + for (j=i+1; jargc; j++) { + if (strcasecmp(ua->argk[j], NT_("ujobid")) == 0 && ua->argv[j]) { + bstrncpy(jr.Job, ua->argv[j], MAX_NAME_LENGTH); + jr.JobId = 0; + db_get_job_record(ua->jcr, ua->db, &jr); + jobid = jr.JobId; + } else if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) { + jobid = str_to_int64(ua->argv[j]); + } else { + continue; + } + VolumeName = get_pool_memory(PM_FNAME); + n = db_get_job_volume_names(ua->jcr, ua->db, jobid, &VolumeName); + ua->send_msg(_("Jobid %d used %d Volume(s): %s\n"), jobid, n, VolumeName); + free_pool_memory(VolumeName); + done = true; + } + + /* if no job or jobid keyword found, then we list all media */ + if (!done) { + int num_pools; + uint32_t *ids; + /* List a specific volume? */ + if (ua->argv[i] && *ua->argv[i]) { + bstrncpy(mr.VolumeName, ua->argv[i], sizeof(mr.VolumeName)); + db_list_media_records(ua->jcr, ua->db, &mr, prtit, ua, llist); + return 1; + } + /* Is a specific pool wanted? */ + for (i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("pool")) == 0) { + if (!get_pool_dbr(ua, &pr)) { + ua->error_msg(_("No Pool specified.\n")); + return 1; + } + mr.PoolId = pr.PoolId; + db_list_media_records(ua->jcr, ua->db, &mr, prtit, ua, llist); + return 1; + } + } + + /* List Volumes in all pools */ + if (!db_get_pool_ids(ua->jcr, ua->db, &num_pools, &ids)) { + ua->error_msg(_("Error obtaining pool ids. ERR=%s\n"), + db_strerror(ua->db)); + return 1; + } + if (num_pools <= 0) { + return 1; + } + for (i=0; i < num_pools; i++) { + pr.PoolId = ids[i]; + if (db_get_pool_record(ua->jcr, ua->db, &pr)) { + ua->send_msg(_("Pool: %s\n"), pr.Name); + } + mr.PoolId = ids[i]; + db_list_media_records(ua->jcr, ua->db, &mr, prtit, ua, llist); + } + free(ids); + return 1; + } + /* List next volume */ + } else if (strcasecmp(ua->argk[i], NT_("nextvol")) == 0 || + strcasecmp(ua->argk[i], NT_("nextvolume")) == 0) { + n = 1; + j = find_arg_with_value(ua, NT_("days")); + if (j >= 0) { + n = atoi(ua->argv[j]); + if ((n < 0) || (n > 50)) { + ua->warning_msg(_("Ignoring invalid value for days. Max is 50.\n")); + n = 1; + } + } + list_nextvol(ua, n); + } else if (strcasecmp(ua->argk[i], NT_("copies")) == 0) { + char *jobids = NULL; + uint32_t limit=0; + for (j=i+1; jargc; j++) { + if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) { + if (is_a_number_list(ua->argv[j])) { + jobids = ua->argv[j]; + } + } else if (strcasecmp(ua->argk[j], NT_("limit")) == 0 && ua->argv[j]) { + limit = atoi(ua->argv[j]); + } + } + db_list_copies_records(ua->jcr,ua->db,limit,jobids,prtit,ua,llist); + } else if (strcasecmp(ua->argk[i], NT_("limit")) == 0 + || strcasecmp(ua->argk[i], NT_("days")) == 0 + || strcasecmp(ua->argk[i], NT_("joberrors")) == 0 + || strcasecmp(ua->argk[i], NT_("order")) == 0 + || strcasecmp(ua->argk[i], NT_("jobstatus")) == 0 + || strcasecmp(ua->argk[i], NT_("client")) == 0 + || strcasecmp(ua->argk[i], NT_("type")) == 0 + || strcasecmp(ua->argk[i], NT_("level")) == 0 + || strcasecmp(ua->argk[i], NT_("jobtype")) == 0 + || strcasecmp(ua->argk[i], NT_("long")) == 0 + ) { + /* Ignore it */ + } else if (strcasecmp(ua->argk[i], NT_("snapshot")) == 0 || + strcasecmp(ua->argk[i], NT_("snapshots")) == 0) + { + snapshot_list(ua, i, prtit, llist); + return 1; + + } else { + ua->error_msg(_("Unknown list keyword: %s\n"), NPRT(ua->argk[i])); + } + } + return 1; +} + +static bool list_nextvol(UAContext *ua, int ndays) +{ + JOB *job; + JCR *jcr; + USTORE store; + RUN *run; + utime_t runtime; + bool found = false; + MEDIA_DBR mr; + POOL_DBR pr; + POOL_MEM errmsg; + char edl[50]; + + int i = find_arg_with_value(ua, "job"); + if (i <= 0) { + if ((job = select_job_resource(ua)) == NULL) { + return false; + } + } else { + job = (JOB *)GetResWithName(R_JOB, ua->argv[i]); + if (!job) { + Jmsg(ua->jcr, M_ERROR, 0, _("%s is not a job name.\n"), ua->argv[i]); + if ((job = select_job_resource(ua)) == NULL) { + return false; + } + } + } + + jcr = new_jcr(sizeof(JCR), dird_free_jcr); + for (run=NULL; (run = find_next_run(run, job, runtime, ndays)); ) { + if (!complete_jcr_for_job(jcr, job, run->pool)) { + found = false; + goto get_out; + } + if (!jcr->jr.PoolId) { + ua->error_msg(_("Could not find Pool for Job %s\n"), job->name()); + continue; + } + bmemset(&pr, 0, sizeof(pr)); + pr.PoolId = jcr->jr.PoolId; + if (!db_get_pool_record(jcr, jcr->db, &pr)) { + bstrncpy(pr.Name, "*UnknownPool*", sizeof(pr.Name)); + } + mr.PoolId = jcr->jr.PoolId; + get_job_storage(&store, job, run); + set_storageid_in_mr(store.store, &mr); + /* no need to set ScratchPoolId, since we use fnv_no_create_vol */ + if (!find_next_volume_for_append(jcr, &mr, 1, fnv_no_create_vol, fnv_prune, errmsg)) { + ua->error_msg(_("Could not find next Volume for Job %s (Pool=%s, Level=%s). %s\n"), + job->name(), pr.Name, level_to_str(edl, sizeof(edl), run->level), errmsg.c_str()); + } else { + ua->send_msg( + _("The next Volume to be used by Job \"%s\" (Pool=%s, Level=%s) will be %s\n"), + job->name(), pr.Name, level_to_str(edl, sizeof(edl), run->level), mr.VolumeName); + found = true; + } + } + +get_out: + if (jcr->db) db_close_database(jcr, jcr->db); + jcr->db = NULL; + free_jcr(jcr); + if (!found) { + ua->error_msg(_("Could not find next Volume for Job %s.\n"), + job->hdr.name); + return false; + } + return true; +} + + +/* + * For a given job, we examine all his run records + * to see if it is scheduled today or tomorrow. + */ +RUN *find_next_run(RUN *run, JOB *job, utime_t &runtime, int ndays) +{ + time_t now, future, endtime; + SCHED *sched; + struct tm tm, runtm; + int mday, wday, month, wom, i; + int woy, ldom; + int day; + bool is_scheduled; + + sched = job->schedule; + if (!sched || !job->is_enabled() || (sched && !sched->is_enabled()) || + (job->client && !job->client->is_enabled())) { + return NULL; /* no nothing to report */ + } + + /* Break down the time into components */ + now = time(NULL); + endtime = now + (ndays * 60 * 60 * 24); + + if (run == NULL) { + run = sched->run; + } else { + run = run->next; + } + for ( ; run; run=run->next) { + /* + * Find runs in next 24 hours. Day 0 is today, so if + * ndays=1, look at today and tomorrow. + */ + for (day = 0; day <= ndays; day++) { + future = now + (day * 60 * 60 * 24); + + /* Break down the time into components */ + (void)localtime_r(&future, &tm); + mday = tm.tm_mday - 1; + wday = tm.tm_wday; + month = tm.tm_mon; + wom = mday / 7; + woy = tm_woy(future); + ldom = tm_ldom(month, tm.tm_year + 1900); + + is_scheduled = (bit_is_set(mday, run->mday) && + bit_is_set(wday, run->wday) && + bit_is_set(month, run->month) && + bit_is_set(wom, run->wom) && + bit_is_set(woy, run->woy)) || + (bit_is_set(month, run->month) && + bit_is_set(31, run->mday) && mday == ldom); + +#ifdef xxx + Pmsg2(000, "day=%d is_scheduled=%d\n", day, is_scheduled); + Pmsg1(000, "bit_set_mday=%d\n", bit_is_set(mday, run->mday)); + Pmsg1(000, "bit_set_wday=%d\n", bit_is_set(wday, run->wday)); + Pmsg1(000, "bit_set_month=%d\n", bit_is_set(month, run->month)); + Pmsg1(000, "bit_set_wom=%d\n", bit_is_set(wom, run->wom)); + Pmsg1(000, "bit_set_woy=%d\n", bit_is_set(woy, run->woy)); +#endif + + if (is_scheduled) { /* Jobs scheduled on that day */ +#ifdef xxx + char buf[300], num[10]; + bsnprintf(buf, sizeof(buf), "tm.hour=%d hour=", tm.tm_hour); + for (i=0; i<24; i++) { + if (bit_is_set(i, run->hour)) { + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); + } + } + bstrncat(buf, "\n", sizeof(buf)); + Pmsg1(000, "%s", buf); +#endif + /* find time (time_t) job is to be run */ + (void)localtime_r(&future, &runtm); + for (i= 0; i < 24; i++) { + if (bit_is_set(i, run->hour)) { + runtm.tm_hour = i; + runtm.tm_min = run->minute; + runtm.tm_sec = 0; + runtime = mktime(&runtm); + Dmsg2(200, "now=%d runtime=%lld\n", now, runtime); + if ((runtime > now) && (runtime < endtime)) { + Dmsg2(200, "Found it level=%d %c\n", run->level, run->level); + return run; /* found it, return run resource */ + } + } + } + } + } + } /* end for loop over runs */ + /* Nothing found */ + return NULL; +} + +/* + * Fill in the remaining fields of the jcr as if it + * is going to run the job. + */ +bool complete_jcr_for_job(JCR *jcr, JOB *job, POOL *pool) +{ + POOL_DBR pr; + + bmemset(&pr, 0, sizeof(POOL_DBR)); + set_jcr_defaults(jcr, job); + if (pool) { + jcr->pool = pool; /* override */ + } + if (jcr->db) { + Dmsg0(100, "complete_jcr close db\n"); + db_close_database(jcr, jcr->db); + jcr->db = NULL; + } + + Dmsg0(100, "complete_jcr open db\n"); + jcr->db = db_init_database(jcr, jcr->catalog->db_driver, jcr->catalog->db_name, + jcr->catalog->db_user, + jcr->catalog->db_password, jcr->catalog->db_address, + jcr->catalog->db_port, jcr->catalog->db_socket, + jcr->catalog->db_ssl_mode, jcr->catalog->db_ssl_key, + jcr->catalog->db_ssl_cert, jcr->catalog->db_ssl_ca, + jcr->catalog->db_ssl_capath, jcr->catalog->db_ssl_cipher, + jcr->catalog->mult_db_connections, + jcr->catalog->disable_batch_insert); + if (!jcr->db || !db_open_database(jcr, jcr->db)) { + Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"), + jcr->catalog->db_name); + if (jcr->db) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + db_close_database(jcr, jcr->db); + jcr->db = NULL; + } + return false; + } + bstrncpy(pr.Name, jcr->pool->name(), sizeof(pr.Name)); + while (!db_get_pool_record(jcr, jcr->db, &pr)) { /* get by Name */ + /* Try to create the pool */ + if (create_pool(jcr, jcr->db, jcr->pool, POOL_OP_CREATE) < 0) { + Jmsg(jcr, M_FATAL, 0, _("Pool %s not in database. %s"), pr.Name, + db_strerror(jcr->db)); + if (jcr->db) { + db_close_database(jcr, jcr->db); + jcr->db = NULL; + } + return false; + } else { + Jmsg(jcr, M_INFO, 0, _("Pool %s created in database.\n"), pr.Name); + } + } + jcr->jr.PoolId = pr.PoolId; + return true; +} + + +static void con_lock_release(void *arg) +{ + Vw(con_lock); +} + +void do_messages(UAContext *ua, const char *cmd) +{ + char msg[2000]; + int mlen; + bool do_truncate = false; + + if (ua->jcr) { + dequeue_messages(ua->jcr); + } + Pw(con_lock); + pthread_cleanup_push(con_lock_release, (void *)NULL); + rewind(con_fd); + while (fgets(msg, sizeof(msg), con_fd)) { + mlen = strlen(msg); + ua->UA_sock->msg = check_pool_memory_size(ua->UA_sock->msg, mlen+1); + strcpy(ua->UA_sock->msg, msg); + ua->UA_sock->msglen = mlen; + ua->UA_sock->send(); + do_truncate = true; + } + if (do_truncate) { + (void)ftruncate(fileno(con_fd), 0L); + } + console_msg_pending = false; + ua->user_notified_msg_pending = false; + pthread_cleanup_pop(0); + Vw(con_lock); +} + + +int qmessagescmd(UAContext *ua, const char *cmd) +{ + if (console_msg_pending && ua->auto_display_messages) { + do_messages(ua, cmd); + } + return 1; +} + +int messagescmd(UAContext *ua, const char *cmd) +{ + if (console_msg_pending) { + do_messages(ua, cmd); + } else { + ua->UA_sock->fsend(_("You have no messages.\n")); + } + return 1; +} + +/* + * Callback routine for "printing" database file listing + */ +void prtit(void *ctx, const char *msg) +{ + UAContext *ua = (UAContext *)ctx; + + if (ua) ua->send_msg("%s", msg); +} + +/* + * Format message and send to other end. + + * If the UA_sock is NULL, it means that there is no user + * agent, so we are being called from Bacula core. In + * that case direct the messages to the Job. + */ +#ifdef HAVE_VA_COPY +void bmsg(UAContext *ua, const char *fmt, va_list arg_ptr) +{ + BSOCK *bs = ua->UA_sock; + int maxlen, len; + POOLMEM *msg = NULL; + va_list ap; + + if (bs) { + msg = bs->msg; + } + if (!msg) { + msg = get_pool_memory(PM_EMSG); + } + +again: + maxlen = sizeof_pool_memory(msg) - 1; + va_copy(ap, arg_ptr); + len = bvsnprintf(msg, maxlen, fmt, ap); + va_end(ap); + if (len < 0 || len >= maxlen) { + msg = realloc_pool_memory(msg, maxlen + maxlen/2); + goto again; + } + + if (bs) { + bs->msg = msg; + bs->msglen = len; + bs->send(); + } else { /* No UA, send to Job */ + Jmsg(ua->jcr, M_INFO, 0, "%s", msg); + free_pool_memory(msg); + } + +} + +#else /* no va_copy() -- brain damaged version of variable arguments */ + +void bmsg(UAContext *ua, const char *fmt, va_list arg_ptr) +{ + BSOCK *bs = ua->UA_sock; + int maxlen, len; + POOLMEM *msg = NULL; + + if (bs) { + msg = bs->msg; + } + if (!msg) { + msg = get_memory(5000); + } + + maxlen = sizeof_pool_memory(msg) - 1; + if (maxlen < 4999) { + msg = realloc_pool_memory(msg, 5000); + maxlen = 4999; + } + len = bvsnprintf(msg, maxlen, fmt, arg_ptr); + if (len < 0 || len >= maxlen) { + pm_strcpy(msg, _("Message too long to display.\n")); + len = strlen(msg); + } + + if (bs) { + bs->msg = msg; + bs->msglen = len; + bs->send(); + } else { /* No UA, send to Job */ + Jmsg(ua->jcr, M_INFO, 0, "%s", msg); + free_pool_memory(msg); + } + +} +#endif + +void bsendmsg(void *ctx, const char *fmt, ...) +{ + va_list arg_ptr; + va_start(arg_ptr, fmt); + bmsg((UAContext *)ctx, fmt, arg_ptr); + va_end(arg_ptr); +} + +/* + * The following UA methods are mainly intended for GUI + * programs + */ +/* + * This is a message that should be displayed on the user's + * console. + */ +void UAContext::send_msg(const char *fmt, ...) +{ + va_list arg_ptr; + va_start(arg_ptr, fmt); + bmsg(this, fmt, arg_ptr); + va_end(arg_ptr); +} + + +/* + * This is an error condition with a command. The gui should put + * up an error or critical dialog box. The command is aborted. + */ +void UAContext::error_msg(const char *fmt, ...) +{ + BSOCK *bs = UA_sock; + va_list arg_ptr; + + if (bs && api) bs->signal(BNET_ERROR_MSG); + va_start(arg_ptr, fmt); + bmsg(this, fmt, arg_ptr); + va_end(arg_ptr); +} + +/* + * This is a warning message, that should bring up a warning + * dialog box on the GUI. The command is not aborted, but something + * went wrong. + */ +void UAContext::warning_msg(const char *fmt, ...) +{ + BSOCK *bs = UA_sock; + va_list arg_ptr; + + if (bs && api) bs->signal(BNET_WARNING_MSG); + va_start(arg_ptr, fmt); + bmsg(this, fmt, arg_ptr); + va_end(arg_ptr); +} + +/* + * This is an information message that should probably be put + * into the status line of a GUI program. + */ +void UAContext::info_msg(const char *fmt, ...) +{ + BSOCK *bs = UA_sock; + va_list arg_ptr; + + if (bs && api) bs->signal(BNET_INFO_MSG); + va_start(arg_ptr, fmt); + bmsg(this, fmt, arg_ptr); + va_end(arg_ptr); +} diff --git a/src/dird/ua_prune.c b/src/dird/ua_prune.c new file mode 100644 index 00000000..063af642 --- /dev/null +++ b/src/dird/ua_prune.c @@ -0,0 +1,859 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- User Agent Database prune Command + * Applies retention periods + * + * Kern Sibbald, February MMII + */ + +#include "bacula.h" +#include "dird.h" + +/* Imported functions */ + +/* Forward referenced functions */ +static bool grow_del_list(struct del_ctx *del); +static bool prune_expired_volumes(UAContext*); +static bool prune_selected_volumes(UAContext *ua); + +/* + * Called here to count entries to be deleted + */ +int del_count_handler(void *ctx, int num_fields, char **row) +{ + struct s_count_ctx *cnt = (struct s_count_ctx *)ctx; + + if (row[0]) { + cnt->count = str_to_int64(row[0]); + } else { + cnt->count = 0; + } + return 0; +} + + +/* + * Called here to make in memory list of JobIds to be + * deleted and the associated PurgedFiles flag. + * The in memory list will then be transversed + * to issue the SQL DELETE commands. Note, the list + * is allowed to get to MAX_DEL_LIST_LEN to limit the + * maximum malloc'ed memory. + */ +int job_delete_handler(void *ctx, int num_fields, char **row) +{ + struct del_ctx *del = (struct del_ctx *)ctx; + + if (!grow_del_list(del)) { + return 1; + } + del->JobId[del->num_ids] = (JobId_t)str_to_int64(row[0]); + Dmsg2(60, "job_delete_handler row=%d val=%d\n", del->num_ids, del->JobId[del->num_ids]); + del->PurgedFiles[del->num_ids++] = (char)str_to_int64(row[1]); + return 0; +} + +int file_delete_handler(void *ctx, int num_fields, char **row) +{ + struct del_ctx *del = (struct del_ctx *)ctx; + + if (!grow_del_list(del)) { + return 1; + } + del->JobId[del->num_ids++] = (JobId_t)str_to_int64(row[0]); +// Dmsg2(150, "row=%d val=%d\n", del->num_ids-1, del->JobId[del->num_ids-1]); + return 0; +} + +/* + * Prune records from database + * + * prune files (from) client=xxx [pool=yyy] + * prune jobs (from) client=xxx [pool=yyy] + * prune volume=xxx + * prune stats + */ +int prunecmd(UAContext *ua, const char *cmd) +{ + DIRRES *dir; + CLIENT *client; + POOL *pool; + MEDIA_DBR mr; + utime_t retention; + int kw; + + static const char *keywords[] = { + NT_("Files"), + NT_("Jobs"), + NT_("Volume"), + NT_("Stats"), + NT_("Snapshots"), + NULL}; + + if (!open_new_client_db(ua)) { + return false; + } + + /* First search args */ + kw = find_arg_keyword(ua, keywords); + if (kw < 0 || kw > 4) { + /* no args, so ask user */ + kw = do_keyword_prompt(ua, _("Choose item to prune"), keywords); + } + + switch (kw) { + case 0: /* prune files */ + /* We restrict the client list to ClientAcl, maybe something to change later */ + if (!(client = get_client_resource(ua, JT_SYSTEM))) { + return false; + } + if (find_arg_with_value(ua, "pool") >= 0) { + pool = get_pool_resource(ua); + } else { + pool = NULL; + } + /* Pool File Retention takes precedence over client File Retention */ + if (pool && pool->FileRetention > 0) { + if (!confirm_retention(ua, &pool->FileRetention, "File")) { + return false; + } + } else if (!confirm_retention(ua, &client->FileRetention, "File")) { + return false; + } + prune_files(ua, client, pool); + return true; + + case 1: /* prune jobs */ + /* We restrict the client list to ClientAcl, maybe something to change later */ + if (!(client = get_client_resource(ua, JT_SYSTEM))) { + return false; + } + if (find_arg_with_value(ua, "pool") >= 0) { + pool = get_pool_resource(ua); + } else { + pool = NULL; + } + /* Pool Job Retention takes precedence over client Job Retention */ + if (pool && pool->JobRetention > 0) { + if (!confirm_retention(ua, &pool->JobRetention, "Job")) { + return false; + } + } else if (!confirm_retention(ua, &client->JobRetention, "Job")) { + return false; + } + /* ****FIXME**** allow user to select JobType */ + prune_jobs(ua, client, pool, JT_BACKUP); + return 1; + + case 2: /* prune volume */ + + /* Look for All expired volumes, mostly designed for runscript */ + if (find_arg(ua, "expired") >= 0) { + return prune_expired_volumes(ua); + } + prune_selected_volumes(ua); + return true; + case 3: /* prune stats */ + dir = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); + if (!dir->stats_retention) { + return false; + } + retention = dir->stats_retention; + if (!confirm_retention(ua, &retention, "Statistics")) { + return false; + } + prune_stats(ua, retention); + return true; + case 4: /* prune snapshots */ + prune_snapshot(ua); + return true; + default: + break; + } + + return true; +} + +/* Prune Job stat records from the database. + * + */ +int prune_stats(UAContext *ua, utime_t retention) +{ + char ed1[50]; + POOL_MEM query(PM_MESSAGE); + utime_t now = (utime_t)time(NULL); + + db_lock(ua->db); + Mmsg(query, "DELETE FROM JobHisto WHERE JobTDate < %s", + edit_int64(now - retention, ed1)); + db_sql_query(ua->db, query.c_str(), NULL, NULL); + db_unlock(ua->db); + + ua->info_msg(_("Pruned Jobs from JobHisto catalog.\n")); + + return true; +} + +/* + * Use pool and client specified by user to select jobs to prune + * returns add_from string to add in FROM clause + * add_where string to add in WHERE clause + */ +bool prune_set_filter(UAContext *ua, CLIENT *client, POOL *pool, utime_t period, + POOL_MEM *add_from, POOL_MEM *add_where) +{ + utime_t now; + char ed1[50], ed2[MAX_ESCAPE_NAME_LENGTH]; + POOL_MEM tmp(PM_MESSAGE); + + now = (utime_t)time(NULL); + edit_int64(now - period, ed1); + Dmsg3(150, "now=%lld period=%lld JobTDate=%s\n", now, period, ed1); + Mmsg(tmp, " AND JobTDate < %s ", ed1); + pm_strcat(*add_where, tmp.c_str()); + + db_lock(ua->db); + if (client) { + db_escape_string(ua->jcr, ua->db, ed2, + client->name(), strlen(client->name())); + Mmsg(tmp, " AND Client.Name = '%s' ", ed2); + pm_strcat(*add_where, tmp.c_str()); + pm_strcat(*add_from, " JOIN Client USING (ClientId) "); + } + + if (pool) { + db_escape_string(ua->jcr, ua->db, ed2, + pool->name(), strlen(pool->name())); + Mmsg(tmp, " AND Pool.Name = '%s' ", ed2); + pm_strcat(*add_where, tmp.c_str()); + /* Use ON() instead of USING for some old SQLite */ + pm_strcat(*add_from, " JOIN Pool ON (Job.PoolId = Pool.PoolId) "); + } + Dmsg2(150, "f=%s w=%s\n", add_from->c_str(), add_where->c_str()); + db_unlock(ua->db); + return true; +} + +/* + * Prune File records from the database. For any Job which + * is older than the retention period, we unconditionally delete + * all File records for that Job. This is simple enough that no + * temporary tables are needed. We simply make an in memory list of + * the JobIds meeting the prune conditions, then delete all File records + * pointing to each of those JobIds. + * + * This routine assumes you want the pruning to be done. All checking + * must be done before calling this routine. + * + * Note: client or pool can possibly be NULL (not both). + */ +int prune_files(UAContext *ua, CLIENT *client, POOL *pool) +{ + struct del_ctx del; + struct s_count_ctx cnt; + POOL_MEM query(PM_MESSAGE); + POOL_MEM sql_where(PM_MESSAGE); + POOL_MEM sql_from(PM_MESSAGE); + utime_t period; + char ed1[50]; + + memset(&del, 0, sizeof(del)); + + if (pool && pool->FileRetention > 0) { + period = pool->FileRetention; + + } else if (client) { + period = client->FileRetention; + + } else { /* should specify at least pool or client */ + return false; + } + + db_lock(ua->db); + /* Specify JobTDate and Pool.Name= and/or Client.Name= in the query */ + if (!prune_set_filter(ua, client, pool, period, &sql_from, &sql_where)) { + goto bail_out; + } + +// edit_utime(now-period, ed1, sizeof(ed1)); +// Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Jobs older than %s secs.\n"), ed1); + Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Files.\n")); + /* Select Jobs -- for counting */ + Mmsg(query, + "SELECT COUNT(1) FROM Job %s WHERE PurgedFiles=0 %s", + sql_from.c_str(), sql_where.c_str()); + Dmsg1(100, "select sql=%s\n", query.c_str()); + cnt.count = 0; + if (!db_sql_query(ua->db, query.c_str(), del_count_handler, (void *)&cnt)) { + ua->error_msg("%s", db_strerror(ua->db)); + Dmsg0(100, "Count failed\n"); + goto bail_out; + } + + if (cnt.count == 0) { + if (ua->verbose) { + ua->warning_msg(_("No Files found to prune.\n")); + } + goto bail_out; + } + + if (cnt.count < MAX_DEL_LIST_LEN) { + del.max_ids = cnt.count + 1; + } else { + del.max_ids = MAX_DEL_LIST_LEN; + } + del.tot_ids = 0; + + del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); + + /* Now process same set but making a delete list */ + Mmsg(query, "SELECT JobId FROM Job %s WHERE PurgedFiles=0 %s", + sql_from.c_str(), sql_where.c_str()); + Dmsg1(100, "select sql=%s\n", query.c_str()); + db_sql_query(ua->db, query.c_str(), file_delete_handler, (void *)&del); + + purge_files_from_job_list(ua, del); + + edit_uint64_with_commas(del.num_del, ed1); + ua->info_msg(_("Pruned Files from %s Jobs for client %s from catalog.\n"), + ed1, client->name()); + +bail_out: + db_unlock(ua->db); + if (del.JobId) { + free(del.JobId); + } + return 1; +} + + +static void drop_temp_tables(UAContext *ua) +{ + int i; + for (i=0; drop_deltabs[i]; i++) { + db_sql_query(ua->db, drop_deltabs[i], NULL, (void *)NULL); + } +} + +static bool create_temp_tables(UAContext *ua) +{ + /* Create temp tables and indicies */ + if (!db_sql_query(ua->db, create_deltabs[ua->db->bdb_get_type_index()], NULL, (void *)NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + Dmsg0(100, "create DelTables table failed\n"); + return false; + } + if (!db_sql_query(ua->db, create_delindex, NULL, (void *)NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + Dmsg0(100, "create DelInx1 index failed\n"); + return false; + } + return true; +} + +static bool grow_del_list(struct del_ctx *del) +{ + if (del->num_ids == MAX_DEL_LIST_LEN) { + return false; + } + + if (del->num_ids == del->max_ids) { + del->max_ids = (del->max_ids * 3) / 2; + del->JobId = (JobId_t *)brealloc(del->JobId, sizeof(JobId_t) * + del->max_ids); + del->PurgedFiles = (char *)brealloc(del->PurgedFiles, del->max_ids); + } + return true; +} + +struct accurate_check_ctx { + DBId_t ClientId; /* Id of client */ + DBId_t FileSetId; /* Id of FileSet */ +}; + +/* row: Job.Name, FileSet, Client.Name, FileSetId, ClientId, Type */ +static int job_select_handler(void *ctx, int num_fields, char **row) +{ + alist *lst = (alist *)ctx; + struct accurate_check_ctx *res; + ASSERT(num_fields == 6); + + /* Quick fix for #5507, avoid locking res_head after db_lock() */ + +#ifdef bug5507 + /* If this job doesn't exist anymore in the configuration, delete it */ + if (GetResWithName(R_JOB, row[0]) == NULL) { + return 0; + } + + /* If this fileset doesn't exist anymore in the configuration, delete it */ + if (GetResWithName(R_FILESET, row[1]) == NULL) { + return 0; + } + + /* If this client doesn't exist anymore in the configuration, delete it */ + if (GetResWithName(R_CLIENT, row[2]) == NULL) { + return 0; + } +#endif + + /* Don't compute accurate things for Verify jobs */ + if (*row[5] == 'V') { + return 0; + } + + res = (struct accurate_check_ctx*) malloc(sizeof(struct accurate_check_ctx)); + res->FileSetId = str_to_int64(row[3]); + res->ClientId = str_to_int64(row[4]); + lst->append(res); + +// Dmsg2(150, "row=%d val=%d\n", del->num_ids-1, del->JobId[del->num_ids-1]); + return 0; +} + +/* + * Pruning Jobs is a bit more complicated than purging Files + * because we delete Job records only if there is a more current + * backup of the FileSet. Otherwise, we keep the Job record. + * In other words, we never delete the only Job record that + * contains a current backup of a FileSet. This prevents the + * Volume from being recycled and destroying a current backup. + * + * For Verify Jobs, we do not delete the last InitCatalog. + * + * For Restore Jobs there are no restrictions. + */ +int prune_jobs(UAContext *ua, CLIENT *client, POOL *pool, int JobType) +{ + POOL_MEM query(PM_MESSAGE); + POOL_MEM sql_where(PM_MESSAGE); + POOL_MEM sql_from(PM_MESSAGE); + utime_t period; + char ed1[50]; + alist *jobids_check=NULL; + struct accurate_check_ctx *elt; + db_list_ctx jobids, tempids; + JOB_DBR jr; + struct del_ctx del; + memset(&del, 0, sizeof(del)); + + if (pool && pool->JobRetention > 0) { + period = pool->JobRetention; + + } else if (client) { + period = client->JobRetention; + + } else { /* should specify at least pool or client */ + return false; + } + + db_lock(ua->db); + if (!prune_set_filter(ua, client, pool, period, &sql_from, &sql_where)) { + goto bail_out; + } + + /* Drop any previous temporary tables still there */ + drop_temp_tables(ua); + + /* Create temp tables and indicies */ + if (!create_temp_tables(ua)) { + goto bail_out; + } + + edit_utime(period, ed1, sizeof(ed1)); + Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Jobs older than %s.\n"), ed1); + + del.max_ids = 100; + del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); + del.PurgedFiles = (char *)malloc(del.max_ids); + + /* + * Select all files that are older than the JobRetention period + * and add them into the "DeletionCandidates" table. + */ + Mmsg(query, + "INSERT INTO DelCandidates " + "SELECT JobId,PurgedFiles,FileSetId,JobFiles,JobStatus " + "FROM Job %s " /* JOIN Pool/Client */ + "WHERE Type IN ('B', 'C', 'M', 'V', 'D', 'R', 'c', 'm', 'g') " + " %s ", /* Pool/Client + JobTDate */ + sql_from.c_str(), sql_where.c_str()); + + Dmsg1(100, "select sql=%s\n", query.c_str()); + if (!db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL)) { + if (ua->verbose) { + ua->error_msg("%s", db_strerror(ua->db)); + } + goto bail_out; + } + + /* Now, for the selection, we discard some of them in order to be always + * able to restore files. (ie, last full, last diff, last incrs) + * Note: The DISTINCT could be more useful if we don't get FileSetId + */ + jobids_check = New(alist(10, owned_by_alist)); + Mmsg(query, +"SELECT DISTINCT Job.Name, FileSet, Client.Name, Job.FileSetId, " + "Job.ClientId, Job.Type " + "FROM DelCandidates " + "JOIN Job USING (JobId) " + "JOIN Client USING (ClientId) " + "JOIN FileSet ON (Job.FileSetId = FileSet.FileSetId) " + "WHERE Job.Type IN ('B') " /* Look only Backup jobs */ + "AND Job.JobStatus IN ('T', 'W') " /* Look only useful jobs */ + ); + + /* The job_select_handler will skip jobs or filesets that are no longer + * in the configuration file. Interesting ClientId/FileSetId will be + * added to jobids_check (currently disabled in 6.0.7b) + */ + if (!db_sql_query(ua->db, query.c_str(), job_select_handler, jobids_check)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + + /* For this selection, we exclude current jobs used for restore or + * accurate. This will prevent to prune the last full backup used for + * current backup & restore + */ + memset(&jr, 0, sizeof(jr)); + /* To find useful jobs, we do like an incremental */ + jr.JobLevel = L_INCREMENTAL; + foreach_alist(elt, jobids_check) { + jr.ClientId = elt->ClientId; /* should be always the same */ + jr.FileSetId = elt->FileSetId; + db_get_accurate_jobids(ua->jcr, ua->db, &jr, &tempids); + jobids.add(tempids); + } + + /* Discard latest Verify level=InitCatalog job + * TODO: can have multiple fileset + */ + Mmsg(query, + "SELECT JobId, JobTDate " + "FROM Job %s " /* JOIN Client/Pool */ + "WHERE Type='V' AND Level='V' " + " %s " /* Pool, JobTDate, Client */ + "ORDER BY JobTDate DESC LIMIT 1", + sql_from.c_str(), sql_where.c_str()); + + if (!db_sql_query(ua->db, query.c_str(), db_list_handler, &jobids)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + + /* If we found jobs to exclude from the DelCandidates list, we should + * also remove BaseJobs that can be linked with them + */ + if (jobids.count > 0) { + Dmsg1(60, "jobids to exclude before basejobs = %s\n", jobids.list); + /* We also need to exclude all basejobs used */ + db_get_used_base_jobids(ua->jcr, ua->db, jobids.list, &jobids); + + /* Removing useful jobs from the DelCandidates list */ + Mmsg(query, "DELETE FROM DelCandidates " + "WHERE JobId IN (%s) " /* JobId used in accurate */ + "AND JobFiles!=0", /* Discard when JobFiles=0 */ + jobids.list); + + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + goto bail_out; /* Don't continue if the list isn't clean */ + } + Dmsg1(60, "jobids to exclude = %s\n", jobids.list); + } + + /* We use DISTINCT because we can have two times the same job */ + Mmsg(query, + "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles " + "FROM DelCandidates"); + if (!db_sql_query(ua->db, query.c_str(), job_delete_handler, (void *)&del)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + + purge_job_list_from_catalog(ua, del); + + if (del.num_del > 0) { + ua->info_msg(_("Pruned %d %s for client %s from catalog.\n"), del.num_del, + del.num_del==1?_("Job"):_("Jobs"), client->name()); + } else if (ua->verbose) { + ua->info_msg(_("No Jobs found to prune.\n")); + } + +bail_out: + drop_temp_tables(ua); + db_unlock(ua->db); + if (del.JobId) { + free(del.JobId); + } + if (del.PurgedFiles) { + free(del.PurgedFiles); + } + if (jobids_check) { + delete jobids_check; + } + return 1; +} + +static bool prune_selected_volumes(UAContext *ua) +{ + int nb=0; + uint32_t *results=NULL; + MEDIA_DBR mr; + POOL_DBR pr; + JCR *jcr = ua->jcr; + POOL_MEM tmp; + + mr.Recycle=1; /* Look for volumes to prune and recycle */ + + if (!scan_storage_cmd(ua, ua->cmd, false, /* fromallpool*/ + NULL /* drive */, + &mr, &pr, + NULL /* action */, + NULL /* storage */, + &nb, &results)) + { + goto bail_out; + } + for (int i = 0; i < nb; i++) { + mr.clear(); + mr.MediaId = results[i]; + if (!db_get_media_record(jcr, jcr->db, &mr)) { + ua->error_msg(_("Unable to get Media record for MediaId %d.\n"), mr.MediaId); + continue; + } + if (mr.Enabled == 2 || strcmp(mr.VolStatus, "Archive") == 0) { + ua->error_msg(_("Cannot prune Volume \"%s\" because it is archived.\n"), + mr.VolumeName); + continue; + } + if (strcmp(mr.VolStatus, "Full") != 0 && + strcmp(mr.VolStatus, "Used") != 0 ) + { + ua->error_msg(_("Cannot prune Volume \"%s\" because the volume status is \"%s\" and should be Full or Used.\n"), mr.VolumeName, mr.VolStatus); + continue; + } + Mmsg(tmp, "Volume \"%s\"", mr.VolumeName); + if (!confirm_retention(ua, &mr.VolRetention, tmp.c_str())) { + goto bail_out; + } + prune_volume(ua, &mr); + } + +bail_out: + if (results) { + free(results); + } + return true; +} + +/* + * Prune a expired Volumes + */ +static bool prune_expired_volumes(UAContext *ua) +{ + bool ok=false; + POOL_MEM query(PM_MESSAGE); + POOL_MEM filter(PM_MESSAGE); + alist *lst=NULL; + int nb=0, i=0; + char *val; + MEDIA_DBR mr; + + db_lock(ua->db); + /* We can restrict to a specific pool */ + if ((i = find_arg_with_value(ua, "pool")) >= 0) { + POOL_DBR pdbr; + memset(&pdbr, 0, sizeof(pdbr)); + bstrncpy(pdbr.Name, ua->argv[i], sizeof(pdbr.Name)); + if (!db_get_pool_record(ua->jcr, ua->db, &pdbr)) { + ua->error_msg("%s", db_strerror(ua->db)); + goto bail_out; + } + Mmsg(query, " AND PoolId = %lld ", (int64_t) pdbr.PoolId); + pm_strcat(filter, query.c_str()); + } + + /* We can restrict by MediaType */ + if (((i = find_arg_with_value(ua, "mediatype")) >= 0) && + (strlen(ua->argv[i]) <= MAX_NAME_LENGTH)) + { + char ed1[MAX_ESCAPE_NAME_LENGTH]; + db_escape_string(ua->jcr, ua->db, ed1, + ua->argv[i], strlen(ua->argv[i])); + Mmsg(query, " AND MediaType = '%s' ", ed1); + pm_strcat(filter, query.c_str()); + } + + /* Use a limit */ + if ((i = find_arg_with_value(ua, "limit")) >= 0) { + if (is_an_integer(ua->argv[i])) { + Mmsg(query, " LIMIT %s ", ua->argv[i]); + pm_strcat(filter, query.c_str()); + } else { + ua->error_msg(_("Expecting limit argument as integer\n")); + goto bail_out; + } + } + + lst = New(alist(5, owned_by_alist)); + + Mmsg(query, expired_volumes[db_get_type_index(ua->db)], filter.c_str()); + db_sql_query(ua->db, query.c_str(), db_string_list_handler, &lst); + + foreach_alist(val, lst) { + nb++; + memset(&mr, 0, sizeof(mr)); + bstrncpy(mr.VolumeName, val, sizeof(mr.VolumeName)); + db_get_media_record(ua->jcr, ua->db, &mr); + Mmsg(query, _("Volume \"%s\""), val); + if (confirm_retention(ua, &mr.VolRetention, query.c_str())) { + prune_volume(ua, &mr); + } + } + ua->send_msg(_("%d expired volume%s found\n"), + nb, nb>1?"s":""); + ok = true; + +bail_out: + db_unlock(ua->db); + if (lst) { + delete lst; + } + return ok; +} + +/* + * Prune a given Volume + */ +bool prune_volume(UAContext *ua, MEDIA_DBR *mr) +{ + POOL_MEM query(PM_MESSAGE); + struct del_ctx del; + bool ok = false; + int count; + + if (mr->Enabled == 2) { + return false; /* Cannot prune archived volumes */ + } + + memset(&del, 0, sizeof(del)); + del.max_ids = 10000; + del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); + + db_lock(ua->db); + + /* Prune only Volumes with status "Full", or "Used" */ + if (strcmp(mr->VolStatus, "Full") == 0 || + strcmp(mr->VolStatus, "Used") == 0) { + Dmsg2(100, "get prune list MediaId=%lu Volume %s\n", mr->MediaId, mr->VolumeName); + count = get_prune_list_for_volume(ua, mr, &del); + Dmsg1(100, "Num pruned = %d\n", count); + if (count != 0) { + ua->info_msg(_("Found %d Job(s) associated with the Volume \"%s\" that will be pruned\n"), + count, mr->VolumeName); + purge_job_list_from_catalog(ua, del); + + } else { + ua->info_msg(_("Found no Job associated with the Volume \"%s\" to prune\n"), + mr->VolumeName); + } + ok = is_volume_purged(ua, mr); + } + + db_unlock(ua->db); + if (del.JobId) { + free(del.JobId); + } + return ok; +} + +/* + * Get prune list for a volume + */ +int get_prune_list_for_volume(UAContext *ua, MEDIA_DBR *mr, del_ctx *del) +{ + POOL_MEM query(PM_MESSAGE); + int count = 0; + utime_t now, period; + char ed1[50], ed2[50]; + + if (mr->Enabled == 2) { + return 0; /* cannot prune Archived volumes */ + } + + /* + * Now add to the list of JobIds for Jobs written to this Volume + */ + edit_int64(mr->MediaId, ed1); + period = mr->VolRetention; + now = (utime_t)time(NULL); + edit_int64(now-period, ed2); + Mmsg(query, sel_JobMedia, ed1, ed2); + Dmsg3(250, "Now=%d period=%d now-period=%s\n", (int)now, (int)period, + ed2); + + Dmsg1(100, "Query=%s\n", query.c_str()); + if (!db_sql_query(ua->db, query.c_str(), file_delete_handler, (void *)del)) { + if (ua->verbose) { + ua->error_msg("%s", db_strerror(ua->db)); + } + Dmsg0(100, "Count failed\n"); + goto bail_out; + } + count = exclude_running_jobs_from_list(del); + +bail_out: + return count; +} + +/* + * We have a list of jobs to prune or purge. If any of them is + * currently running, we set its JobId to zero which effectively + * excludes it. + * + * Returns the number of jobs that can be prunned or purged. + * + */ +int exclude_running_jobs_from_list(del_ctx *prune_list) +{ + int count = 0; + JCR *jcr; + bool skip; + int i; + + /* Do not prune any job currently running */ + for (i=0; i < prune_list->num_ids; i++) { + skip = false; + foreach_jcr(jcr) { + if (jcr->JobId == prune_list->JobId[i]) { + Dmsg2(100, "skip running job JobId[%d]=%d\n", i, (int)prune_list->JobId[i]); + prune_list->JobId[i] = 0; + skip = true; + break; + } + } + endeach_jcr(jcr); + if (skip) { + continue; /* don't increment count */ + } + Dmsg2(100, "accept JobId[%d]=%d\n", i, (int)prune_list->JobId[i]); + count++; + } + return count; +} diff --git a/src/dird/ua_purge.c b/src/dird/ua_purge.c new file mode 100644 index 00000000..dd6c9ef7 --- /dev/null +++ b/src/dird/ua_purge.c @@ -0,0 +1,806 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- User Agent Database Purge Command + * + * Purges Files from specific JobIds + * or + * Purges Jobs from Volumes + * + * Kern Sibbald, February MMII + * + */ + +#include "bacula.h" +#include "dird.h" + +/* Forward referenced functions */ +static int purge_files_from_client(UAContext *ua, CLIENT *client); +static int purge_jobs_from_client(UAContext *ua, CLIENT *client); +int truncate_cmd(UAContext *ua, const char *cmd); + +static const char *select_jobsfiles_from_client = + "SELECT JobId FROM Job " + "WHERE ClientId=%s " + "AND PurgedFiles=0"; + +static const char *select_jobs_from_client = + "SELECT JobId, PurgedFiles FROM Job " + "WHERE ClientId=%s"; + +/* + * Purge records from database + * + * Purge Files (from) [Job|JobId|Client|Volume] + * Purge Jobs (from) [Client|Volume] + * Purge Volumes + * + * N.B. Not all above is implemented yet. + */ +int purge_cmd(UAContext *ua, const char *cmd) +{ + int i; + CLIENT *client; + MEDIA_DBR mr; + JOB_DBR jr; + memset(&jr, 0, sizeof(jr)); + + static const char *keywords[] = { + NT_("files"), + NT_("jobs"), + NT_("volume"), + NULL}; + + static const char *files_keywords[] = { + NT_("Job"), + NT_("JobId"), + NT_("Client"), + NT_("Volume"), + NULL}; + + static const char *jobs_keywords[] = { + NT_("Client"), + NT_("Volume"), + NULL}; + + /* Special case for the "Action On Purge", this option is working only on + * Purged volume, so no jobs or files will be purged. + * We are skipping this message if "purge volume action=xxx" + */ + if (!(find_arg(ua, "volume") >= 0 && find_arg(ua, "action") >= 0)) { + ua->warning_msg(_( + "\nThis command can be DANGEROUS!!!\n\n" + "It purges (deletes) all Files from a Job,\n" + "JobId, Client or Volume; or it purges (deletes)\n" + "all Jobs from a Client or Volume without regard\n" + "to retention periods. Normally you should use the\n" + "PRUNE command, which respects retention periods.\n")); + } + + if (!open_new_client_db(ua)) { + return 1; + } + switch (find_arg_keyword(ua, keywords)) { + /* Files */ + case 0: + switch(find_arg_keyword(ua, files_keywords)) { + case 0: /* Job */ + case 1: /* JobId */ + if (get_job_dbr(ua, &jr)) { + char jobid[50]; + edit_int64(jr.JobId, jobid); + purge_files_from_jobs(ua, jobid); + } + return 1; + case 2: /* client */ + /* We restrict the client list to ClientAcl, maybe something to change later */ + client = get_client_resource(ua, JT_SYSTEM); + if (client) { + purge_files_from_client(ua, client); + } + return 1; + case 3: /* Volume */ + if (select_media_dbr(ua, &mr)) { + purge_files_from_volume(ua, &mr); + } + return 1; + } + /* Jobs */ + case 1: + switch(find_arg_keyword(ua, jobs_keywords)) { + case 0: /* client */ + /* We restrict the client list to ClientAcl, maybe something to change later */ + client = get_client_resource(ua, JT_SYSTEM); + if (client) { + purge_jobs_from_client(ua, client); + } + return 1; + case 1: /* Volume */ + if (select_media_dbr(ua, &mr)) { + purge_jobs_from_volume(ua, &mr, /*force*/true); + } + return 1; + } + /* Volume */ + case 2: + /* Perform ActionOnPurge (action=truncate) */ + if (find_arg(ua, "action") >= 0) { + return truncate_cmd(ua, ua->cmd); + } + + while ((i=find_arg(ua, NT_("volume"))) >= 0) { + if (select_media_dbr(ua, &mr)) { + purge_jobs_from_volume(ua, &mr, /*force*/true); + } + *ua->argk[i] = 0; /* zap keyword already seen */ + ua->send_msg("\n"); + } + return 1; + default: + break; + } + switch (do_keyword_prompt(ua, _("Choose item to purge"), keywords)) { + case 0: /* files */ + /* We restrict the client list to ClientAcl, maybe something to change later */ + client = get_client_resource(ua, JT_SYSTEM); + if (client) { + purge_files_from_client(ua, client); + } + break; + case 1: /* jobs */ + /* We restrict the client list to ClientAcl, maybe something to change later */ + client = get_client_resource(ua, JT_SYSTEM); + if (client) { + purge_jobs_from_client(ua, client); + } + break; + case 2: /* Volume */ + if (select_media_dbr(ua, &mr)) { + purge_jobs_from_volume(ua, &mr, /*force*/true); + } + break; + } + return 1; +} + +/* + * Purge File records from the database. For any Job which + * is older than the retention period, we unconditionally delete + * all File records for that Job. This is simple enough that no + * temporary tables are needed. We simply make an in memory list of + * the JobIds meeting the prune conditions, then delete all File records + * pointing to each of those JobIds. + */ +static int purge_files_from_client(UAContext *ua, CLIENT *client) +{ + struct del_ctx del; + POOL_MEM query(PM_MESSAGE); + CLIENT_DBR cr; + char ed1[50]; + + memset(&cr, 0, sizeof(cr)); + bstrncpy(cr.Name, client->name(), sizeof(cr.Name)); + if (!db_create_client_record(ua->jcr, ua->db, &cr)) { + return 0; + } + + memset(&del, 0, sizeof(del)); + del.max_ids = 1000; + del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); + + ua->info_msg(_("Begin purging files for Client \"%s\"\n"), cr.Name); + + Mmsg(query, select_jobsfiles_from_client, edit_int64(cr.ClientId, ed1)); + Dmsg1(050, "select sql=%s\n", query.c_str()); + db_sql_query(ua->db, query.c_str(), file_delete_handler, (void *)&del); + + purge_files_from_job_list(ua, del); + + if (del.num_del == 0) { + ua->warning_msg(_("No Files found for client %s to purge from %s catalog.\n"), + client->name(), client->catalog->name()); + } else { + ua->info_msg(_("Files for %d Jobs for client \"%s\" purged from %s catalog.\n"), del.num_del, + client->name(), client->catalog->name()); + } + + if (del.JobId) { + free(del.JobId); + } + return 1; +} + + + +/* + * Purge Job records from the database. For any Job which + * is older than the retention period, we unconditionally delete + * it and all File records for that Job. This is simple enough that no + * temporary tables are needed. We simply make an in memory list of + * the JobIds then delete the Job, Files, and JobMedia records in that list. + */ +static int purge_jobs_from_client(UAContext *ua, CLIENT *client) +{ + struct del_ctx del; + POOL_MEM query(PM_MESSAGE); + CLIENT_DBR cr; + char ed1[50]; + + memset(&cr, 0, sizeof(cr)); + + bstrncpy(cr.Name, client->name(), sizeof(cr.Name)); + if (!db_create_client_record(ua->jcr, ua->db, &cr)) { + return 0; + } + + memset(&del, 0, sizeof(del)); + del.max_ids = 1000; + del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); + del.PurgedFiles = (char *)malloc(del.max_ids); + + ua->info_msg(_("Begin purging jobs from Client \"%s\"\n"), cr.Name); + + Mmsg(query, select_jobs_from_client, edit_int64(cr.ClientId, ed1)); + Dmsg1(150, "select sql=%s\n", query.c_str()); + db_sql_query(ua->db, query.c_str(), job_delete_handler, (void *)&del); + + purge_job_list_from_catalog(ua, del); + + if (del.num_del == 0) { + ua->warning_msg(_("No Jobs found for client %s to purge from %s catalog.\n"), + client->name(), client->catalog->name()); + } else { + ua->info_msg(_("%d Jobs for client %s purged from %s catalog.\n"), del.num_del, + client->name(), client->catalog->name()); + } + + if (del.JobId) { + free(del.JobId); + } + if (del.PurgedFiles) { + free(del.PurgedFiles); + } + return 1; +} + + +/* + * Remove File records from a list of JobIds + */ +void purge_files_from_jobs(UAContext *ua, char *jobs) +{ + POOL_MEM query(PM_MESSAGE); + + Mmsg(query, "DELETE FROM File WHERE JobId IN (%s)", jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + Dmsg1(050, "Delete File sql=%s\n", query.c_str()); + + Mmsg(query, "DELETE FROM BaseFiles WHERE JobId IN (%s)", jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + Dmsg1(050, "Delete BaseFiles sql=%s\n", query.c_str()); + + Mmsg(query, "DELETE FROM PathVisibility WHERE JobId IN (%s)", jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + Dmsg1(050, "Delete PathVisibility sql=%s\n", query.c_str()); + + /* + * Now mark Job as having files purged. This is necessary to + * avoid having too many Jobs to process in future prunings. If + * we don't do this, the number of JobId's in our in memory list + * could grow very large. + */ + Mmsg(query, "UPDATE Job SET PurgedFiles=1 WHERE JobId IN (%s)", jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + Dmsg1(050, "Mark purged sql=%s\n", query.c_str()); +} + +/* + * Delete jobs (all records) from the catalog in groups of 1000 + * at a time. + */ +void purge_job_list_from_catalog(UAContext *ua, del_ctx &del) +{ + POOL_MEM jobids(PM_MESSAGE); + char ed1[50]; + + for (int i=0; del.num_ids; ) { + Dmsg1(150, "num_ids=%d\n", del.num_ids); + pm_strcat(jobids, ""); + for (int j=0; j<1000 && del.num_ids>0; j++) { + del.num_ids--; + if (del.JobId[i] == 0 || ua->jcr->JobId == del.JobId[i]) { + Dmsg2(150, "skip JobId[%d]=%d\n", i, (int)del.JobId[i]); + i++; + continue; + } + if (*jobids.c_str() != 0) { + pm_strcat(jobids, ","); + } + pm_strcat(jobids, edit_int64(del.JobId[i++], ed1)); + Dmsg1(150, "Add id=%s\n", ed1); + del.num_del++; + } + Dmsg1(150, "num_ids=%d\n", del.num_ids); + purge_jobs_from_catalog(ua, jobids.c_str()); + } +} + +/* + * Delete files from a list of jobs in groups of 1000 + * at a time. + */ +void purge_files_from_job_list(UAContext *ua, del_ctx &del) +{ + POOL_MEM jobids(PM_MESSAGE); + char ed1[50]; + /* + * OK, now we have the list of JobId's to be pruned, send them + * off to be deleted batched 1000 at a time. + */ + for (int i=0; del.num_ids; ) { + pm_strcat(jobids, ""); + for (int j=0; j<1000 && del.num_ids>0; j++) { + del.num_ids--; + if (del.JobId[i] == 0 || ua->jcr->JobId == del.JobId[i]) { + Dmsg2(150, "skip JobId[%d]=%d\n", i, (int)del.JobId[i]); + i++; + continue; + } + if (*jobids.c_str() != 0) { + pm_strcat(jobids, ","); + } + pm_strcat(jobids, edit_int64(del.JobId[i++], ed1)); + Dmsg1(150, "Add id=%s\n", ed1); + del.num_del++; + } + purge_files_from_jobs(ua, jobids.c_str()); + } +} + +/* + * Change the type of the next copy job to backup. + * We need to upgrade the next copy of a normal job, + * and also upgrade the next copy when the normal job + * already have been purged. + * + * JobId: 1 PriorJobId: 0 (original) + * JobId: 2 PriorJobId: 1 (first copy) + * JobId: 3 PriorJobId: 1 (second copy) + * + * JobId: 2 PriorJobId: 1 (first copy, now regular backup) + * JobId: 3 PriorJobId: 1 (second copy) + * + * => Search through PriorJobId in jobid and + * PriorJobId in PriorJobId (jobid) + */ +void upgrade_copies(UAContext *ua, char *jobs) +{ + POOL_MEM query(PM_MESSAGE); + int dbtype = ua->db->bdb_get_type_index(); + + db_lock(ua->db); + + Mmsg(query, uap_upgrade_copies_oldest_job[dbtype], JT_JOB_COPY, jobs, jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + Dmsg1(050, "Upgrade copies Log sql=%s\n", query.c_str()); + + /* Now upgrade first copy to Backup */ + Mmsg(query, "UPDATE Job SET Type='B' " /* JT_JOB_COPY => JT_BACKUP */ + "WHERE JobId IN ( SELECT JobId FROM cpy_tmp )"); + + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + + Mmsg(query, "DROP TABLE cpy_tmp"); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + + db_unlock(ua->db); +} + +/* + * Remove all records from catalog for a list of JobIds + */ +void purge_jobs_from_catalog(UAContext *ua, char *jobs) +{ + POOL_MEM query(PM_MESSAGE); + + /* Delete (or purge) records associated with the job */ + purge_files_from_jobs(ua, jobs); + + Mmsg(query, "DELETE FROM JobMedia WHERE JobId IN (%s)", jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + Dmsg1(050, "Delete JobMedia sql=%s\n", query.c_str()); + + Mmsg(query, "DELETE FROM Log WHERE JobId IN (%s)", jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + Dmsg1(050, "Delete Log sql=%s\n", query.c_str()); + + Mmsg(query, "DELETE FROM RestoreObject WHERE JobId IN (%s)", jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + Dmsg1(050, "Delete RestoreObject sql=%s\n", query.c_str()); + + /* The JobId of the Snapshot record is no longer usable + * TODO: Migth want to use a copy for the jobid? + */ + Mmsg(query, "UPDATE Snapshot SET JobId=0 WHERE JobId IN (%s)", jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + + upgrade_copies(ua, jobs); + + /* Now remove the Job record itself */ + Mmsg(query, "DELETE FROM Job WHERE JobId IN (%s)", jobs); + db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL); + + Dmsg1(050, "Delete Job sql=%s\n", query.c_str()); +} + +void purge_files_from_volume(UAContext *ua, MEDIA_DBR *mr ) +{} /* ***FIXME*** implement */ + +/* + * Returns: 1 if Volume purged + * 0 if Volume not purged + */ +bool purge_jobs_from_volume(UAContext *ua, MEDIA_DBR *mr, bool force) +{ + POOL_MEM query(PM_MESSAGE); + db_list_ctx lst_all, lst; + char *jobids=NULL; + int i; + bool purged = false; + bool stat; + + stat = strcmp(mr->VolStatus, "Append") == 0 || + strcmp(mr->VolStatus, "Full") == 0 || + strcmp(mr->VolStatus, "Used") == 0 || + strcmp(mr->VolStatus, "Error") == 0; + if (!stat) { + ua->error_msg(_("\nVolume \"%s\" has VolStatus \"%s\" and cannot be purged.\n" + "The VolStatus must be: Append, Full, Used, or Error to be purged.\n"), + mr->VolumeName, mr->VolStatus); + return 0; + } + + /* + * Check if he wants to purge a single jobid + */ + i = find_arg_with_value(ua, "jobid"); + if (i >= 0 && is_a_number_list(ua->argv[i])) { + jobids = ua->argv[i]; + + } else { + POOL_MEM query; + /* + * Purge ALL JobIds + */ + if (!db_get_volume_jobids(ua->jcr, ua->db, mr, &lst_all)) { + ua->error_msg("%s", db_strerror(ua->db)); + Dmsg0(050, "Count failed\n"); + goto bail_out; + } + + if (lst_all.count > 0) { + Mmsg(query, "SELECT JobId FROM Job WHERE JobId IN (%s) AND JobStatus NOT IN ('R', 'C')", + lst_all.list); + if (!db_sql_query(ua->db, query.c_str(), db_list_handler, &lst)) { + ua->error_msg("%s", db_strerror(ua->db)); + goto bail_out; + } + } + jobids = lst.list; + } + + if (*jobids) { + purge_jobs_from_catalog(ua, jobids); + ua->info_msg(_("%d Job%s on Volume \"%s\" purged from catalog.\n"), + lst.count, lst.count<=1?"":"s", mr->VolumeName); + } + purged = is_volume_purged(ua, mr, force); + +bail_out: + return purged; +} + +/* + * This routine will check the JobMedia records to see if the + * Volume has been purged. If so, it marks it as such and + * + * Returns: true if volume purged + * false if not + * + * Note, we normally will not purge a volume that has Firstor LastWritten + * zero, because it means the volume is most likely being written + * however, if the user manually purges using the purge command in + * the console, he has been warned, and we go ahead and purge + * the volume anyway, if possible). + */ +bool is_volume_purged(UAContext *ua, MEDIA_DBR *mr, bool force) +{ + POOL_MEM query(PM_MESSAGE); + struct s_count_ctx cnt; + bool purged = false; + char ed1[50]; + + if (!force && (mr->FirstWritten == 0 || mr->LastWritten == 0)) { + goto bail_out; /* not written cannot purge */ + } + + if (strcmp(mr->VolStatus, "Purged") == 0) { + Dmsg1(100, "Volume=%s already purged.\n", mr->VolumeName); + purged = true; + goto bail_out; + } + + /* If purged, mark it so */ + cnt.count = 0; + Mmsg(query, "SELECT 1 FROM JobMedia WHERE MediaId=%s LIMIT 1", + edit_int64(mr->MediaId, ed1)); + if (!db_sql_query(ua->db, query.c_str(), del_count_handler, (void *)&cnt)) { + ua->error_msg("%s", db_strerror(ua->db)); + Dmsg0(050, "Count failed\n"); + goto bail_out; + } + + if (cnt.count == 0) { + ua->warning_msg(_("There are no more Jobs associated with Volume \"%s\". Marking it purged.\n"), + mr->VolumeName); + Dmsg1(100, "There are no more Jobs associated with Volume \"%s\". Marking it purged.\n", + mr->VolumeName); + if (!(purged = mark_media_purged(ua, mr))) { + ua->error_msg("%s", db_strerror(ua->db)); + } + } +bail_out: + return purged; +} + +/* + * Called here to send the appropriate commands to the SD + * to do truncate on purge. + */ +static void truncate_volume(UAContext *ua, MEDIA_DBR *mr, + char *pool, char *storage, + int drive, BSOCK *sd) +{ + bool ok = false; + uint64_t VolBytes = 0; + uint64_t VolABytes = 0; + uint32_t VolType = 0; + + if (!mr->Recycle) { + return; + } + + /* Do it only if action on purge = truncate is set */ + if (!(mr->ActionOnPurge & ON_PURGE_TRUNCATE)) { + ua->error_msg(_("\nThe option \"Action On Purge = Truncate\" was not defined in the Pool resource.\n" + "Truncate not allowed on Volume \"%s\"\n"), mr->VolumeName); + return; + } + + /* + * Send the command to truncate the volume after purge. If this feature + * is disabled for the specific device, this will be a no-op. + */ + + /* Protect us from spaces */ + bash_spaces(mr->VolumeName); + bash_spaces(mr->MediaType); + bash_spaces(pool); + bash_spaces(storage); + + /* Do it by relabeling the Volume, which truncates it */ + sd->fsend("relabel %s OldName=%s NewName=%s PoolName=%s " + "MediaType=%s Slot=%d drive=%d\n", + storage, + mr->VolumeName, mr->VolumeName, + pool, mr->MediaType, mr->Slot, drive); + + unbash_spaces(mr->VolumeName); + unbash_spaces(mr->MediaType); + unbash_spaces(pool); + unbash_spaces(storage); + + /* Check for valid response. With cloud volumes, the upload of the part.1 can + * generate a dir_update_volume_info() message that is handled by bget_dirmsg() + */ + while (bget_dirmsg(sd) >= 0) { + ua->send_msg("%s", sd->msg); + if (sscanf(sd->msg, "3000 OK label. VolBytes=%llu VolABytes=%lld VolType=%d ", + &VolBytes, &VolABytes, &VolType) == 3) { + + ok = true; + /* Clean up a few things in the media record */ + mr->VolBytes = VolBytes; + mr->VolABytes = VolABytes; + mr->VolType = VolType; + mr->VolFiles = 0; + mr->VolParts = 1; + mr->VolCloudParts = 0; + mr->LastPartBytes = VolBytes; + mr->VolJobs = 0; + mr->VolBlocks = 1; + mr->VolHoleBytes = 0; + mr->VolHoles = 0; + mr->EndBlock = 1; + + set_storageid_in_mr(NULL, mr); + if (!db_update_media_record(ua->jcr, ua->db, mr)) { + ua->error_msg(_("Can't update volume size in the catalog for Volume \"%s\"\n"), + mr->VolumeName); + ok = false; + } + ua->send_msg(_("The volume \"%s\" has been truncated\n"), mr->VolumeName); + } + } + if (!ok) { + ua->warning_msg(_("Error truncating Volume \"%s\"\n"), mr->VolumeName); + } +} + +/* + * Implement Bacula bconsole command purge action + * purge action=truncate pool= volume= storage= mediatype= + * or + * truncate [cache] pool= volume= storage= mediatype= + * + * If the keyword "cache: is present, then we use the truncate + * command rather than relabel so that the driver can decide + * whether or not it wants to truncate. Note: only the + * Cloud driver permits truncating the cache. + * + * Note, later we might want to rename this action_on_purge_cmd() as + * was the original, but only if we add additional actions such as + * erase, ... For the moment, we only do a truncate. + * + */ +int truncate_cmd(UAContext *ua, const char *cmd) +{ + int drive = -1; + int nb = 0; + uint32_t *results = NULL; + const char *action = "truncate"; + MEDIA_DBR mr; + POOL_DBR pr; + BSOCK *sd; + char storage[MAX_NAME_LENGTH]; + + if (find_arg(ua, "cache") > 0) { + return cloud_volumes_cmd(ua, cmd, "truncate cache"); + } + + memset(&pr, 0, sizeof(pr)); + + /* + * Look for all Purged volumes that can be recycled, are enabled and + * have more than 1,000 bytes (i.e. actually have data). + */ + mr.Recycle = 1; + mr.Enabled = 1; + mr.VolBytes = 1000; + bstrncpy(mr.VolStatus, "Purged", sizeof(mr.VolStatus)); + /* Get list of volumes to truncate */ + if (!scan_storage_cmd(ua, cmd, true, /* allfrompool */ + &drive, &mr, &pr, &action, storage, &nb, &results)) { + goto bail_out; + } + + if ((sd=open_sd_bsock(ua)) == NULL) { + Dmsg0(100, "Can't open connection to sd\n"); + goto bail_out; + } + + /* + * Loop over the candidate Volumes and actually truncate them + */ + for (int i=0; i < nb; i++) { + mr.clear(); + mr.MediaId = results[i]; + if (db_get_media_record(ua->jcr, ua->db, &mr)) { + if (strcasecmp(mr.VolStatus, "Purged") != 0) { + ua->send_msg(_("Truncate Volume \"%s\" skipped. Status is \"%s\", but must be \"Purged\".\n"), + mr.VolumeName, mr.VolStatus); + continue; + } + if (drive < 0) { + STORE *store = (STORE*)GetResWithName(R_STORAGE, storage); + drive = get_storage_drive(ua, store); + } + + /* Must select Pool if not already done */ + if (pr.PoolId == 0) { + pr.PoolId = mr.PoolId; + if (!db_get_pool_record(ua->jcr, ua->db, &pr)) { + goto bail_out; /* free allocated memory */ + } + } + if (strcasecmp("truncate", action) == 0) { + truncate_volume(ua, &mr, pr.Name, storage, + drive, sd); + } + } else { + Dmsg1(0, "Can't find MediaId=%lu\n", mr.MediaId); + } + } + +bail_out: + close_db(ua); + close_sd_bsock(ua); + ua->jcr->wstore = NULL; + if (results) { + free(results); + } + + return 1; +} + +/* + * IF volume status is Append, Full, Used, or Error, mark it Purged + * Purged volumes can then be recycled (if enabled). + */ +bool mark_media_purged(UAContext *ua, MEDIA_DBR *mr) +{ + JCR *jcr = ua->jcr; + if (strcmp(mr->VolStatus, "Append") == 0 || + strcmp(mr->VolStatus, "Full") == 0 || + strcmp(mr->VolStatus, "Used") == 0 || + strcmp(mr->VolStatus, "Error") == 0) { + bstrncpy(mr->VolStatus, "Purged", sizeof(mr->VolStatus)); + set_storageid_in_mr(NULL, mr); + if (!db_update_media_record(jcr, ua->db, mr)) { + return false; + } + pm_strcpy(jcr->VolumeName, mr->VolumeName); + generate_plugin_event(jcr, bDirEventVolumePurged); + /* + * If the RecyclePool is defined, move the volume there + */ + if (mr->RecyclePoolId && mr->RecyclePoolId != mr->PoolId) { + POOL_DBR oldpr, newpr; + memset(&oldpr, 0, sizeof(POOL_DBR)); + memset(&newpr, 0, sizeof(POOL_DBR)); + newpr.PoolId = mr->RecyclePoolId; + oldpr.PoolId = mr->PoolId; + if ( db_get_pool_numvols(jcr, ua->db, &oldpr) + && db_get_pool_numvols(jcr, ua->db, &newpr)) { + /* check if destination pool size is ok */ + if (newpr.MaxVols > 0 && newpr.NumVols >= newpr.MaxVols) { + ua->error_msg(_("Unable move recycled Volume in full " + "Pool \"%s\" MaxVols=%d\n"), + newpr.Name, newpr.MaxVols); + + } else { /* move media */ + update_vol_pool(ua, newpr.Name, mr, &oldpr); + } + } else { + ua->error_msg("%s", db_strerror(ua->db)); + } + } + + /* Send message to Job report, if it is a *real* job */ + if (jcr && jcr->JobId > 0) { + Jmsg(jcr, M_INFO, 0, _("All records pruned from Volume \"%s\"; marking it \"Purged\"\n"), + mr->VolumeName); + } + return true; + } else { + ua->error_msg(_("Cannot purge Volume with VolStatus=%s\n"), mr->VolStatus); + } + return strcmp(mr->VolStatus, "Purged") == 0; +} diff --git a/src/dird/ua_query.c b/src/dird/ua_query.c new file mode 100644 index 00000000..86fc4d1a --- /dev/null +++ b/src/dird/ua_query.c @@ -0,0 +1,299 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- User Agent Database Query Commands + * + * Kern Sibbald, December MMI + * + */ + +#include "bacula.h" +#include "dird.h" + +extern DIRRES *director; + +static POOLMEM *substitute_prompts(UAContext *ua, + POOLMEM *query, char **prompt, int nprompt); + +/* + * Read a file containing SQL queries and prompt + * the user to select which one. + * + * File format: + * # => comment + * :prompt for query + * *prompt for subst %1 + * *prompt for subst %2 + * ... + * SQL statement possibly terminated by ; + * :next query prompt + */ +int query_cmd(UAContext *ua, const char *cmd) +{ + FILE *fd = NULL; + POOLMEM *query = get_pool_memory(PM_MESSAGE); + char line[1000]; + int i, len; + int item = 0; + long val; + char *prompt[9]; + int nprompt = 0;; + char *query_file = director->query_file; + bool must_prompt = true; + + if (!open_client_db(ua)) { + goto bail_out; + } + if ((fd=bfopen(query_file, "rb")) == NULL) { + berrno be; + ua->error_msg(_("Could not open %s: ERR=%s\n"), query_file, + be.bstrerror()); + goto bail_out; + } + + start_prompt(ua, _("Available queries:\n")); + while (fgets(line, sizeof(line), fd) != NULL) { + if (line[0] == ':') { + strip_trailing_junk(line); + add_prompt(ua, line+1); + } + } + if (ua->argc >= 2) { + errno = 0; + val = strtol(ua->argk[1], NULL, 10) - 1; + if (val < 0) { + errno = 1; + } + if (errno != 0) { + ua->error_msg(_("Invalid command line query item specified.\n")); + must_prompt = true; + } else { + item = val; + must_prompt = false; + } + } + if (must_prompt && (item=do_prompt(ua, "", _("Choose a query"), NULL, 0)) < 0) { + goto bail_out; + } + rewind(fd); + i = -1; + while (fgets(line, sizeof(line), fd) != NULL) { + if (line[0] == ':') { + i++; + } + if (i == item) { + break; + } + } + if (i != item) { + ua->error_msg(_("Could not find query.\n")); + goto bail_out; + } + query[0] = 0; + for (i=0; i<9; i++) { + prompt[i] = NULL; + } + while (fgets(line, sizeof(line), fd) != NULL) { + if (line[0] == '#') { + continue; + } + if (line[0] == ':') { + break; + } + strip_trailing_junk(line); + len = strlen(line); + if (line[0] == '*') { /* prompt */ + if (nprompt >= 9) { + ua->error_msg(_("Too many prompts in query, max is 9.\n")); + } else { + line[len++] = ' '; + line[len] = 0; + prompt[nprompt++] = bstrdup(line+1); + continue; + } + } + if (*query != 0) { + pm_strcat(query, " "); + } + pm_strcat(query, line); + if (line[len-1] != ';') { + continue; + } + line[len-1] = 0; /* zap ; */ + if (query[0] != 0) { + query = substitute_prompts(ua, query, prompt, nprompt); + Dmsg1(100, "Query2=%s\n", query); + if (query[0] == '!') { + db_list_sql_query(ua->jcr, ua->db, query+1, prtit, ua, 0, VERT_LIST); + } else if (!db_list_sql_query(ua->jcr, ua->db, query, prtit, ua, 1, HORZ_LIST)) { + ua->send_msg("%s\n", query); + } + query[0] = 0; + } + } /* end while */ + + if (query[0] != 0) { + query = substitute_prompts(ua, query, prompt, nprompt); + Dmsg1(100, "Query2=%s\n", query); + if (query[0] == '!') { + db_list_sql_query(ua->jcr, ua->db, query+1, prtit, ua, 0, VERT_LIST); + } else if (!db_list_sql_query(ua->jcr, ua->db, query, prtit, ua, 1, HORZ_LIST)) { + ua->error_msg("%s\n", query); + } + } + +bail_out: + if (fd) { + fclose(fd); + } + free_pool_memory(query); + for (i=0; icmd); + p = (char *)malloc(len * 2 + 1); + db_escape_string(ua->jcr, ua->db, p, ua->cmd, len); + subst[n] = p; + olen = o - new_query; + new_query = check_pool_memory_size(new_query, olen + strlen(p) + 10); + o = new_query + olen; + while (*p) { + *o++ = *p++; + } + } else { + ua->error_msg(_("Warning prompt %d missing.\n"), n+1); + } + q += 2; + break; + case '%': + *o++ = '%'; + q += 2; + break; + default: + *o++ = '%'; + q++; + break; + } + } + } + olen = o - new_query; + new_query = check_pool_memory_size(new_query, olen + strlen(q) + 10); + o = new_query + olen; + while (*q) { + *o++ = *q++; + } + *o = 0; + for (i=0; i<9; i++) { + if (subst[i]) { + free(subst[i]); + } + } + free_pool_memory(query); + return new_query; +} + +/* + * Get general SQL query for Catalog + */ +int sqlquery_cmd(UAContext *ua, const char *cmd) +{ + POOL_MEM query(PM_MESSAGE); + int len; + const char *msg; + + if (!open_new_client_db(ua)) { + return 1; + } + *query.c_str() = 0; + + ua->send_msg(_("Entering SQL query mode.\n" +"Terminate each query with a semicolon.\n" +"Terminate query mode with a blank line.\n")); + msg = _("Enter SQL query: "); + while (get_cmd(ua, msg)) { + len = strlen(ua->cmd); + Dmsg2(400, "len=%d cmd=%s:\n", len, ua->cmd); + /* Break on empty or . */ + if (len == 0 || (len == 1 && ua->cmd[0] == '.')) { + break; + } + if (*query.c_str() != 0) { + pm_strcat(query, " "); + } + pm_strcat(query, ua->cmd); + if (ua->cmd[len-1] == ';') { + ua->cmd[len-1] = 0; /* zap ; */ + /* Submit query */ + db_list_sql_query(ua->jcr, ua->db, query.c_str(), prtit, ua, 1, HORZ_LIST); + *query.c_str() = 0; /* start new query */ + msg = _("Enter SQL query: "); + } else { + msg = _("Add to SQL query: "); + } + } + ua->send_msg(_("End query mode.\n")); + return 1; +} diff --git a/src/dird/ua_restore.c b/src/dird/ua_restore.c new file mode 100644 index 00000000..adfcace6 --- /dev/null +++ b/src/dird/ua_restore.c @@ -0,0 +1,1739 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- User Agent Database restore Command + * Creates a bootstrap file for restoring files and + * starts the restore job. + * + * Tree handling routines split into ua_tree.c July MMIII. + * BSR (bootstrap record) handling routines split into + * bsr.c July MMIII + * + * Kern Sibbald, July MMII + */ + + +#include "bacula.h" +#include "dird.h" + +/* Imported functions */ +extern void print_bsr(UAContext *ua, RBSR *bsr); + + +/* Forward referenced functions */ +static int last_full_handler(void *ctx, int num_fields, char **row); +static int jobid_handler(void *ctx, int num_fields, char **row); +static int user_select_jobids_or_files(UAContext *ua, RESTORE_CTX *rx); +static int fileset_handler(void *ctx, int num_fields, char **row); +static void free_name_list(NAME_LIST *name_list); +static bool select_backups_before_date(UAContext *ua, RESTORE_CTX *rx, char *date); +static bool build_directory_tree(UAContext *ua, RESTORE_CTX *rx); +static void split_path_and_filename(UAContext *ua, RESTORE_CTX *rx, char *fname); +static int jobid_fileindex_handler(void *ctx, int num_fields, char **row); +static bool insert_file_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *file, + char *date); +static bool insert_dir_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *dir, + char *date); +static void insert_one_file_or_dir(UAContext *ua, RESTORE_CTX *rx, char *date, bool dir); +static int get_client_name(UAContext *ua, RESTORE_CTX *rx); +static int get_restore_client_name(UAContext *ua, RESTORE_CTX &rx, char * RestoreClient); +static bool get_date(UAContext *ua, char *date, int date_len); +static int restore_count_handler(void *ctx, int num_fields, char **row); +static void get_and_display_basejobs(UAContext *ua, RESTORE_CTX *rx); + +void new_rx(RESTORE_CTX *rx) +{ + RBSR *bsr = NULL; + memset(rx, 0, sizeof(*rx)); + rx->path = get_pool_memory(PM_FNAME); + rx->path[0] = 0; + + rx->fname = get_pool_memory(PM_FNAME); + rx->fname[0] = 0; + + rx->JobIds = get_pool_memory(PM_FNAME); + rx->JobIds[0] = 0; + + rx->component_fname = get_pool_memory(PM_FNAME); + rx->component_fname[0] = 0; + + rx->BaseJobIds = get_pool_memory(PM_FNAME); + rx->BaseJobIds[0] = 0; + + rx->query = get_pool_memory(PM_FNAME); + rx->query[0] = 0; + + rx->bsr_list = New(rblist(bsr, &bsr->link)); + rx->hardlinks_in_mem = true; +} + + +/* + * Restore files + * + */ +int restore_cmd(UAContext *ua, const char *cmd) +{ + RESTORE_CTX rx; /* restore context */ + POOL_MEM buf; + JOB *job; + int i; + JCR *jcr = ua->jcr; + char *escaped_bsr_name = NULL; + char *escaped_where_name = NULL; + char *strip_prefix, *add_prefix, *add_suffix, *regexp; + strip_prefix = add_prefix = add_suffix = regexp = NULL; + + new_rx(&rx); /* Initialize RESTORE_CTX */ + + if (!open_new_client_db(ua)) { + goto bail_out; + } + + for (i = 0; i < ua->argc ; i++) { + if (strcasecmp(ua->argk[i], "fdcalled") == 0) { + rx.fdcalled = true; + + } else if (strcasecmp(ua->argk[i], "noautoparent") == 0) { + rx.no_auto_parent = true; + } + if (!ua->argv[i]) { + continue; /* skip if no value given */ + } + if (strcasecmp(ua->argk[i], "comment") == 0) { + rx.comment = ua->argv[i]; + if (!is_comment_legal(ua, rx.comment)) { + goto bail_out; + } + + } else if (strcasecmp(ua->argk[i], "where") == 0) { + rx.where = ua->argv[i]; + + } else if (strcasecmp(ua->argk[i], "when") == 0) { + rx.when = ua->argv[i]; + + } else if (strcasecmp(ua->argk[i], "replace") == 0) { + rx.replace = ua->argv[i]; + + } else if (strcasecmp(ua->argk[i], "strip_prefix") == 0) { + strip_prefix = ua->argv[i]; + + } else if (strcasecmp(ua->argk[i], "add_prefix") == 0) { + add_prefix = ua->argv[i]; + + } else if (strcasecmp(ua->argk[i], "add_suffix") == 0) { + add_suffix = ua->argv[i]; + + } else if (strcasecmp(ua->argk[i], "regexwhere") == 0) { + rx.RegexWhere = ua->argv[i]; + + } else if (strcasecmp(ua->argk[i], "optimizespeed") == 0) { + if (strcasecmp(ua->argv[i], "0") || strcasecmp(ua->argv[i], "no") || + strcasecmp(ua->argv[i], "false")) { + rx.hardlinks_in_mem = false; + } + } + } + + if (strip_prefix || add_suffix || add_prefix) { + int len = bregexp_get_build_where_size(strip_prefix, add_prefix, add_suffix); + regexp = (char *)bmalloc(len * sizeof(char)); + + bregexp_build_where(regexp, len, strip_prefix, add_prefix, add_suffix); + rx.RegexWhere = regexp; + } + + /* TODO: add acl for regexwhere ? */ + + if (rx.RegexWhere) { + if (!acl_access_ok(ua, Where_ACL, rx.RegexWhere)) { + ua->error_msg(_("\"RegexWhere\" specification not authorized.\n")); + goto bail_out; + } + } + + if (rx.where) { + if (!acl_access_ok(ua, Where_ACL, rx.where)) { + ua->error_msg(_("\"where\" specification not authorized.\n")); + goto bail_out; + } + } + + /* Ensure there is at least one Restore Job */ + LockRes(); + foreach_res(job, R_JOB) { + if (job->JobType == JT_RESTORE) { + if (!rx.restore_job) { + rx.restore_job = job; + } + rx.restore_jobs++; + } + } + UnlockRes(); + if (!rx.restore_jobs) { + ua->error_msg(_( + "No Restore Job Resource found in bacula-dir.conf.\n" + "You must create at least one before running this command.\n")); + goto bail_out; + } + + /* + * Request user to select JobIds or files by various different methods + * last 20 jobs, where File saved, most recent backup, ... + * In the end, a list of files are pumped into + * add_findex() + */ + switch (user_select_jobids_or_files(ua, &rx)) { + case 0: /* error */ + goto bail_out; + case 1: /* selected by jobid */ + get_and_display_basejobs(ua, &rx); + if (!build_directory_tree(ua, &rx)) { + ua->send_msg(_("Restore not done.\n")); + goto bail_out; + } + break; + case 2: /* selected by filename, no tree needed */ + break; + } + + if (rx.bsr_list->size() > 0) { + char ed1[50]; + if (!complete_bsr(ua, rx.bsr_list)) { /* find Vol, SessId, SessTime from JobIds */ + ua->error_msg(_("Unable to construct a valid BSR. Cannot continue.\n")); + goto bail_out; + } + if (!(rx.selected_files = write_bsr_file(ua, rx))) { + ua->warning_msg(_("No files selected to be restored.\n")); + goto bail_out; + } + + ua->send_msg(_("Bootstrap records written to %s\n"), ua->jcr->RestoreBootstrap); + display_bsr_info(ua, rx); /* display vols needed, etc */ + + if (rx.selected_files==1) { + ua->info_msg(_("\n1 file selected to be restored.\n\n")); + } else { + ua->info_msg(_("\n%s files selected to be restored.\n\n"), + edit_uint64_with_commas(rx.selected_files, ed1)); + } + } else { + ua->warning_msg(_("No files selected to be restored.\n")); + goto bail_out; + } + + if (rx.restore_jobs == 1) { + job = rx.restore_job; + } else { + job = get_restore_job(ua); + } + if (!job) { + goto bail_out; + } + + get_client_name(ua, &rx); + if (!rx.ClientName[0]) { + ua->error_msg(_("No Client resource found!\n")); + goto bail_out; + } + get_restore_client_name(ua, rx, job->RestoreClient); + + escaped_bsr_name = escape_filename(jcr->RestoreBootstrap); + + Mmsg(ua->cmd, + "run job=\"%s\" client=\"%s\" restoreclient=\"%s\" storage=\"%s\"" + " bootstrap=\"%s\" files=%u catalog=\"%s\"", + job->name(), rx.ClientName, rx.RestoreClientName, + rx.store?rx.store->name():"", + escaped_bsr_name ? escaped_bsr_name : jcr->RestoreBootstrap, + rx.selected_files, ua->catalog->name()); + + /* Build run command */ + pm_strcpy(buf, ""); + if (rx.RestoreMediaType[0]) { + Mmsg(buf, " mediatype=\"%s\"", rx.RestoreMediaType); + pm_strcat(ua->cmd, buf); + pm_strcpy(buf, ""); + } + if (rx.RegexWhere) { + escaped_where_name = escape_filename(rx.RegexWhere); + Mmsg(buf, " regexwhere=\"%s\"", + escaped_where_name ? escaped_where_name : rx.RegexWhere); + + } else if (rx.where) { + escaped_where_name = escape_filename(rx.where); + Mmsg(buf," where=\"%s\"", + escaped_where_name ? escaped_where_name : rx.where); + } + pm_strcat(ua->cmd, buf); + + if (rx.replace) { + Mmsg(buf, " replace=%s", rx.replace); + pm_strcat(ua->cmd, buf); + } + + if (rx.fdcalled) { + pm_strcat(ua->cmd, " fdcalled=yes"); + } + + if (rx.when) { + Mmsg(buf, " when=\"%s\"", rx.when); + pm_strcat(ua->cmd, buf); + } + + if (rx.comment) { + Mmsg(buf, " comment=\"%s\"", rx.comment); + pm_strcat(ua->cmd, buf); + } + + if (escaped_bsr_name != NULL) { + bfree(escaped_bsr_name); + } + + if (escaped_where_name != NULL) { + bfree(escaped_where_name); + } + + if (regexp) { + bfree(regexp); + } + + if (find_arg(ua, NT_("yes")) > 0) { + pm_strcat(ua->cmd, " yes"); /* pass it on to the run command */ + } + Dmsg1(200, "Submitting: %s\n", ua->cmd); + /* + * Transfer jobids, component stuff to jcr to + * pass to run_cmd(). Note, these are fields and + * other things that are not passed on the command + * line. + */ + /* ***FIXME*** pass jobids on command line */ + if (jcr->JobIds) { + free_pool_memory(jcr->JobIds); + } + jcr->JobIds = rx.JobIds; + rx.JobIds = NULL; + jcr->component_fname = rx.component_fname; + rx.component_fname = NULL; + jcr->component_fd = rx.component_fd; + rx.component_fd = NULL; + parse_ua_args(ua); + run_cmd(ua, ua->cmd); + free_rx(&rx); + garbage_collect_memory(); /* release unused memory */ + return 1; + +bail_out: + if (escaped_bsr_name != NULL) { + bfree(escaped_bsr_name); + } + + if (escaped_where_name != NULL) { + bfree(escaped_where_name); + } + + if (regexp) { + bfree(regexp); + } + + /* Free the plugin config if needed, we don't want to re-use + * this part of the next try + */ + free_plugin_config_items(jcr->plugin_config); + jcr->plugin_config = NULL; + + free_rx(&rx); + garbage_collect_memory(); /* release unused memory */ + return 0; + +} + +/* + * Fill the rx->BaseJobIds and display the list + */ +static void get_and_display_basejobs(UAContext *ua, RESTORE_CTX *rx) +{ + db_list_ctx jobids; + + if (!db_get_used_base_jobids(ua->jcr, ua->db, rx->JobIds, &jobids)) { + ua->warning_msg("%s", db_strerror(ua->db)); + } + + if (jobids.count) { + POOL_MEM q; + Mmsg(q, uar_print_jobs, jobids.list); + ua->send_msg(_("The restore will use the following job(s) as Base\n")); + db_list_sql_query(ua->jcr, ua->db, q.c_str(), prtit, ua, 1, HORZ_LIST); + } + pm_strcpy(rx->BaseJobIds, jobids.list); +} + +void free_rx(RESTORE_CTX *rx) +{ + free_bsr(rx->bsr_list); + rx->bsr_list = NULL; + free_and_null_pool_memory(rx->JobIds); + free_and_null_pool_memory(rx->BaseJobIds); + free_and_null_pool_memory(rx->fname); + free_and_null_pool_memory(rx->path); + free_and_null_pool_memory(rx->query); + if (rx->fileregex) { + free(rx->fileregex); + rx->fileregex = NULL; + } + if (rx->component_fd) { + fclose(rx->component_fd); + rx->component_fd = NULL; + } + if (rx->component_fname) { + unlink(rx->component_fname); + } + free_and_null_pool_memory(rx->component_fname); + free_name_list(&rx->name_list); +} + +static bool has_value(UAContext *ua, int i) +{ + if (!ua->argv[i]) { + ua->error_msg(_("Missing value for keyword: %s\n"), ua->argk[i]); + return false; + } + return true; +} + +/* + * This gets the client name from which the backup was made + */ +static int get_client_name(UAContext *ua, RESTORE_CTX *rx) +{ + /* If no client name specified yet, get it now */ + if (!rx->ClientName[0]) { + CLIENT_DBR cr; + /* try command line argument */ + int i = find_arg_with_value(ua, NT_("client")); + if (i < 0) { + i = find_arg_with_value(ua, NT_("backupclient")); + } + if (i >= 0) { + if (!is_name_valid(ua->argv[i], &ua->errmsg)) { + ua->error_msg("%s argument: %s", ua->argk[i], ua->errmsg); + return 0; + } + bstrncpy(rx->ClientName, ua->argv[i], sizeof(rx->ClientName)); + return 1; + } + memset(&cr, 0, sizeof(cr)); + /* We want the name of the client where the backup was made */ + if (!get_client_dbr(ua, &cr, JT_BACKUP_RESTORE)) { + return 0; + } + bstrncpy(rx->ClientName, cr.Name, sizeof(rx->ClientName)); + } + return 1; +} + +/* + * This is where we pick up a client name to restore to. + */ +static int get_restore_client_name(UAContext *ua, RESTORE_CTX &rx, char * RestoreClient) +{ + /* Start with same name as backup client or set in RestoreClient */ + if (!RestoreClient){ + bstrncpy(rx.RestoreClientName, rx.ClientName, sizeof(rx.RestoreClientName)); + } else { + bstrncpy(rx.RestoreClientName, RestoreClient, sizeof(rx.RestoreClientName)); + } + + /* try command line argument */ + int i = find_arg_with_value(ua, NT_("restoreclient")); + if (i >= 0) { + if (!is_name_valid(ua->argv[i], &ua->errmsg)) { + ua->error_msg("%s argument: %s", ua->argk[i], ua->errmsg); + return 0; + } + bstrncpy(rx.RestoreClientName, ua->argv[i], sizeof(rx.RestoreClientName)); + return 1; + } + return 1; +} + + + +/* + * The first step in the restore process is for the user to + * select a list of JobIds from which he will subsequently + * select which files are to be restored. + * + * Returns: 2 if filename list made + * 1 if jobid list made + * 0 on error + */ +static int user_select_jobids_or_files(UAContext *ua, RESTORE_CTX *rx) +{ + char *p; + char date[MAX_TIME_LENGTH]; + bool have_date = false; + /* Include current second if using current time */ + utime_t now = time(NULL) + 1; + JobId_t JobId; + JOB_DBR jr = { (JobId_t)-1 }; + bool done = false; + int i, j; + const char *list[] = { + _("List last 20 Jobs run"), + _("List Jobs where a given File is saved"), + _("Enter list of comma separated JobIds to select"), + _("Enter SQL list command"), + _("Select the most recent backup for a client"), + _("Select backup for a client before a specified time"), + _("Enter a list of files to restore"), + _("Enter a list of files to restore before a specified time"), + _("Find the JobIds of the most recent backup for a client"), + _("Find the JobIds for a backup for a client before a specified time"), + _("Enter a list of directories to restore for found JobIds"), + _("Select full restore to a specified Job date"), + _("Cancel"), + NULL }; + + const char *kw[] = { + /* These keywords are handled in a for loop */ + "jobid", /* 0 */ + "current", /* 1 */ + "before", /* 2 */ + "file", /* 3 */ + "directory", /* 4 */ + "select", /* 5 */ + "pool", /* 6 */ + "all", /* 7 */ + + /* The keyword below are handled by individual arg lookups */ + "client", /* 8 */ + "storage", /* 9 */ + "fileset", /* 10 */ + "where", /* 11 */ + "yes", /* 12 */ + "bootstrap", /* 13 */ + "done", /* 14 */ + "strip_prefix", /* 15 */ + "add_prefix", /* 16 */ + "add_suffix", /* 17 */ + "regexwhere", /* 18 */ + "restoreclient", /* 19 */ + "copies", /* 20 */ + "comment", /* 21 */ + "restorejob", /* 22 */ + "replace", /* 23 */ + "xxxxxxxxx", /* 24 */ + "fdcalled", /* 25 */ + "when", /* 26 */ + "noautoparent", /* 27 */ + NULL + }; + + rx->JobIds[0] = 0; + + for (i=1; iargc; i++) { /* loop through arguments */ + bool found_kw = false; + for (j=0; kw[j]; j++) { /* loop through keywords */ + if (strcasecmp(kw[j], ua->argk[i]) == 0) { + found_kw = true; + break; + } + } + + if (!found_kw) { + ua->error_msg(_("Unknown keyword: %s\n"), ua->argk[i]); + return 0; + } + /* Found keyword in kw[] list, process it */ + switch (j) { + case 0: /* jobid */ + if (!has_value(ua, i)) { + return 0; + } + if (*rx->JobIds != 0) { + pm_strcat(rx->JobIds, ","); + } + pm_strcat(rx->JobIds, ua->argv[i]); + done = true; + break; + case 1: /* current */ + /* + * Note, we add one second here just to include any job + * that may have finished within the current second, + * which happens a lot in scripting small jobs. + */ + bstrutime(date, sizeof(date), now); + have_date = true; + break; + case 2: /* before */ + if (have_date || !has_value(ua, i)) { + return 0; + } + if (str_to_utime(ua->argv[i]) == 0) { + ua->error_msg(_("Improper date format: %s\n"), ua->argv[i]); + return 0; + } + bstrncpy(date, ua->argv[i], sizeof(date)); + have_date = true; + break; + case 3: /* file */ + case 4: /* dir */ + if (!has_value(ua, i)) { + return 0; + } + if (!have_date) { + bstrutime(date, sizeof(date), now); + } + if (!get_client_name(ua, rx)) { + return 0; + } + pm_strcpy(ua->cmd, ua->argv[i]); + insert_one_file_or_dir(ua, rx, date, j==4); + return 2; + case 5: /* select */ + if (!have_date) { + bstrutime(date, sizeof(date), now); + } + if (!select_backups_before_date(ua, rx, date)) { + return 0; + } + done = true; + break; + case 6: /* pool specified */ + if (!has_value(ua, i)) { + return 0; + } + rx->pool = (POOL *)GetResWithName(R_POOL, ua->argv[i]); + if (!rx->pool) { + ua->error_msg(_("Error: Pool resource \"%s\" does not exist.\n"), ua->argv[i]); + return 0; + } + if (!acl_access_ok(ua, Pool_ACL, ua->argv[i])) { + rx->pool = NULL; + ua->error_msg(_("Error: Pool resource \"%s\" access not allowed.\n"), ua->argv[i]); + return 0; + } + break; + case 7: /* all specified */ + rx->all = true; + break; + /* + * All keywords 7 or greater are ignored or handled by a select prompt + */ + default: + break; + } + } + + if (!done) { + ua->send_msg(_("\nFirst you select one or more JobIds that contain files\n" + "to be restored. You will be presented several methods\n" + "of specifying the JobIds. Then you will be allowed to\n" + "select which files from those JobIds are to be restored.\n\n")); + } + + /* If choice not already made above, prompt */ + for ( ; !done; ) { + char *fname; + int len; + bool gui_save; + db_list_ctx jobids; + + start_prompt(ua, _("To select the JobIds, you have the following choices:\n")); + for (int i=0; list[i]; i++) { + add_prompt(ua, list[i]); + } + done = true; + switch (do_prompt(ua, "", _("Select item: "), NULL, 0)) { + case -1: /* error or cancel */ + return 0; + case 0: /* list last 20 Jobs run */ + if (!acl_access_ok(ua, Command_ACL, NT_("sqlquery"), 8)) { + ua->error_msg(_("SQL query not authorized.\n")); + return 0; + } + gui_save = ua->jcr->gui; + ua->jcr->gui = true; + db_list_sql_query(ua->jcr, ua->db, uar_list_jobs, prtit, ua, 1, HORZ_LIST); + ua->jcr->gui = gui_save; + done = false; + break; + case 1: /* list where a file is saved */ + if (!get_client_name(ua, rx)) { + return 0; + } + if (!get_cmd(ua, _("Enter Filename (no path):"))) { + return 0; + } + len = strlen(ua->cmd); + fname = (char *)malloc(len * 2 + 1); + db_escape_string(ua->jcr, ua->db, fname, ua->cmd, len); + Mmsg(rx->query, uar_file[db_get_type_index(ua->db)], rx->ClientName, fname); + free(fname); + gui_save = ua->jcr->gui; + ua->jcr->gui = true; + db_list_sql_query(ua->jcr, ua->db, rx->query, prtit, ua, 1, HORZ_LIST); + ua->jcr->gui = gui_save; + done = false; + break; + case 2: /* enter a list of JobIds */ + if (!get_cmd(ua, _("Enter JobId(s), comma separated, to restore: "))) { + return 0; + } + pm_strcpy(rx->JobIds, ua->cmd); + break; + case 3: /* Enter an SQL list command */ + if (!acl_access_ok(ua, Command_ACL, NT_("sqlquery"), 8)) { + ua->error_msg(_("SQL query not authorized.\n")); + return 0; + } + if (!get_cmd(ua, _("Enter SQL list command: "))) { + return 0; + } + gui_save = ua->jcr->gui; + ua->jcr->gui = true; + db_list_sql_query(ua->jcr, ua->db, ua->cmd, prtit, ua, 1, HORZ_LIST); + ua->jcr->gui = gui_save; + done = false; + break; + case 4: /* Select the most recent backups */ + if (!have_date) { + bstrutime(date, sizeof(date), now); + } + if (!select_backups_before_date(ua, rx, date)) { + return 0; + } + break; + case 5: /* select backup at specified time */ + if (!have_date) { + if (!get_date(ua, date, sizeof(date))) { + return 0; + } + } + if (!select_backups_before_date(ua, rx, date)) { + return 0; + } + break; + case 6: /* Enter files */ + if (!have_date) { + bstrutime(date, sizeof(date), now); + } + if (!get_client_name(ua, rx)) { + return 0; + } + ua->send_msg(_("Enter file names with paths, or < to enter a filename\n" + "containing a list of file names with paths, and terminate\n" + "them with a blank line.\n")); + for ( ;; ) { + if (!get_cmd(ua, _("Enter full filename: "))) { + return 0; + } + len = strlen(ua->cmd); + if (len == 0) { + break; + } + insert_one_file_or_dir(ua, rx, date, false); + } + return 2; + case 7: /* enter files backed up before specified time */ + if (!have_date) { + if (!get_date(ua, date, sizeof(date))) { + return 0; + } + } + if (!get_client_name(ua, rx)) { + return 0; + } + ua->send_msg(_("Enter file names with paths, or < to enter a filename\n" + "containing a list of file names with paths, and terminate\n" + "them with a blank line.\n")); + for ( ;; ) { + if (!get_cmd(ua, _("Enter full filename: "))) { + return 0; + } + len = strlen(ua->cmd); + if (len == 0) { + break; + } + insert_one_file_or_dir(ua, rx, date, false); + } + return 2; + + case 8: /* Find JobIds for current backup */ + if (!have_date) { + bstrutime(date, sizeof(date), now); + } + if (!select_backups_before_date(ua, rx, date)) { + return 0; + } + done = false; + break; + + case 9: /* Find JobIds for give date */ + if (!have_date) { + if (!get_date(ua, date, sizeof(date))) { + return 0; + } + } + if (!select_backups_before_date(ua, rx, date)) { + return 0; + } + done = false; + break; + + case 10: /* Enter directories */ + if (*rx->JobIds != 0) { + ua->send_msg(_("You have already selected the following JobIds: %s\n"), + rx->JobIds); + } else if (get_cmd(ua, _("Enter JobId(s), comma separated, to restore: "))) { + if (*rx->JobIds != 0 && *ua->cmd) { + pm_strcat(rx->JobIds, ","); + } + pm_strcat(rx->JobIds, ua->cmd); + } + if (*rx->JobIds == 0 || *rx->JobIds == '.') { + *rx->JobIds = 0; + return 0; /* nothing entered, return */ + } + if (!have_date) { + bstrutime(date, sizeof(date), now); + } + if (!get_client_name(ua, rx)) { + return 0; + } + ua->send_msg(_("Enter full directory names or start the name\n" + "with a < to indicate it is a filename containing a list\n" + "of directories and terminate them with a blank line.\n")); + for ( ;; ) { + if (!get_cmd(ua, _("Enter directory name: "))) { + return 0; + } + len = strlen(ua->cmd); + if (len == 0) { + break; + } + /* Add trailing slash to end of directory names */ + if (ua->cmd[0] != '<' && !IsPathSeparator(ua->cmd[len-1])) { + strcat(ua->cmd, "/"); + } + insert_one_file_or_dir(ua, rx, date, true); + } + return 2; + + case 11: /* Choose a jobid and select jobs */ + if (!get_cmd(ua, _("Enter JobId to get the state to restore: ")) || + !is_an_integer(ua->cmd)) + { + return 0; + } + + memset(&jr, 0, sizeof(JOB_DBR)); + jr.JobId = str_to_int64(ua->cmd); + if (!db_get_job_record(ua->jcr, ua->db, &jr)) { + ua->error_msg(_("Unable to get Job record for JobId=%s: ERR=%s\n"), + ua->cmd, db_strerror(ua->db)); + return 0; + } + ua->send_msg(_("Selecting jobs to build the Full state at %s\n"), + jr.cStartTime); + jr.JobLevel = L_INCREMENTAL; /* Take Full+Diff+Incr */ + if (!db_get_accurate_jobids(ua->jcr, ua->db, &jr, &jobids)) { + return 0; + } + pm_strcpy(rx->JobIds, jobids.list); + Dmsg1(30, "Item 12: jobids = %s\n", rx->JobIds); + break; + case 12: /* Cancel or quit */ + return 0; + } + } + + memset(&jr, 0, sizeof(JOB_DBR)); + POOLMEM *JobIds = get_pool_memory(PM_FNAME); + *JobIds = 0; + rx->TotalFiles = 0; + /* + * Find total number of files to be restored, and filter the JobId + * list to contain only ones permitted by the ACL conditions. + */ + for (p=rx->JobIds; ; ) { + char ed1[50]; + int stat = get_next_jobid_from_list(&p, &JobId); + if (stat < 0) { + ua->error_msg(_("Invalid JobId in list.\n")); + free_pool_memory(JobIds); + return 0; + } + if (stat == 0) { + break; + } + if (jr.JobId == JobId) { + continue; /* duplicate of last JobId */ + } + memset(&jr, 0, sizeof(JOB_DBR)); + jr.JobId = JobId; + if (!db_get_job_record(ua->jcr, ua->db, &jr)) { + ua->error_msg(_("Unable to get Job record for JobId=%s: ERR=%s\n"), + edit_int64(JobId, ed1), db_strerror(ua->db)); + free_pool_memory(JobIds); + return 0; + } + if (!acl_access_ok(ua, Job_ACL, jr.Name)) { + ua->error_msg(_("Access to JobId=%s (Job \"%s\") not authorized. Not selected.\n"), + edit_int64(JobId, ed1), jr.Name); + continue; + } + if (*JobIds != 0) { + pm_strcat(JobIds, ","); + } + pm_strcat(JobIds, edit_int64(JobId, ed1)); + rx->TotalFiles += jr.JobFiles; + } + pm_strcpy(rx->JobIds, JobIds); /* Set ACL filtered list */ + free_pool_memory(JobIds); + if (*rx->JobIds == 0) { + ua->warning_msg(_("No Jobs selected.\n")); + return 0; + } + + if (strchr(rx->JobIds,',')) { + ua->info_msg(_("You have selected the following JobIds: %s\n"), rx->JobIds); + } else { + ua->info_msg(_("You have selected the following JobId: %s\n"), rx->JobIds); + } + return 1; +} + +/* + * Get date from user + */ +static bool get_date(UAContext *ua, char *date, int date_len) +{ + ua->send_msg(_("The restored files will the most current backup\n" + "BEFORE the date you specify below.\n\n")); + for ( ;; ) { + if (!get_cmd(ua, _("Enter date as YYYY-MM-DD HH:MM:SS :"))) { + return false; + } + if (str_to_utime(ua->cmd) != 0) { + break; + } + ua->error_msg(_("Improper date format.\n")); + } + bstrncpy(date, ua->cmd, date_len); + return true; +} + +/* + * Insert a single file, or read a list of files from a file + */ +static void insert_one_file_or_dir(UAContext *ua, RESTORE_CTX *rx, char *date, bool dir) +{ + FILE *ffd; + char file[5000]; + char *p = ua->cmd; + int line = 0; + + switch (*p) { + case '<': + p++; + if ((ffd = bfopen(p, "rb")) == NULL) { + berrno be; + ua->error_msg(_("Cannot open file %s: ERR=%s\n"), + p, be.bstrerror()); + break; + } + while (fgets(file, sizeof(file), ffd)) { + line++; + if (dir) { + if (!insert_dir_into_findex_list(ua, rx, file, date)) { + ua->error_msg(_("Error occurred on line %d of file \"%s\"\n"), line, p); + } + } else { + if (!insert_file_into_findex_list(ua, rx, file, date)) { + ua->error_msg(_("Error occurred on line %d of file \"%s\"\n"), line, p); + } + } + } + fclose(ffd); + break; + case '?': + p++; + insert_table_into_findex_list(ua, rx, p); + break; + default: + if (dir) { + insert_dir_into_findex_list(ua, rx, ua->cmd, date); + } else { + insert_file_into_findex_list(ua, rx, ua->cmd, date); + } + break; + } +} + +/* + * For a given file (path+filename), split into path and file, then + * lookup the most recent backup in the catalog to get the JobId + * and FileIndex, then insert them into the findex list. + */ +static bool insert_file_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *file, + char *date) +{ + strip_trailing_newline(file); + split_path_and_filename(ua, rx, file); + if (*rx->JobIds == 0) { + Mmsg(rx->query, uar_jobid_fileindex, date, rx->path, rx->fname, + rx->ClientName); + } else { + Mmsg(rx->query, uar_jobids_fileindex, rx->JobIds, date, + rx->path, rx->fname, rx->ClientName); + /* + * Note: we have just edited the JobIds into the query, so + * we need to clear JobIds, or they will be added + * back into JobIds with the query below, and then + * restored twice. Fixes bug #2212. + */ + rx->JobIds[0] = 0; + } + rx->found = false; + /* Find and insert jobid and File Index */ + if (!db_sql_query(ua->db, rx->query, jobid_fileindex_handler, (void *)rx)) { + ua->error_msg(_("Query failed: %s. ERR=%s\n"), + rx->query, db_strerror(ua->db)); + } + if (!rx->found) { + ua->error_msg(_("No database record found for: %s\n"), file); +// ua->error_msg("Query=%s\n", rx->query); + return true; + } + return true; +} + +/* + * For a given path lookup the most recent backup in the catalog + * to get the JobId and FileIndexes of all files in that directory. + */ +static bool insert_dir_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *dir, + char *date) +{ + strip_trailing_junk(dir); + if (*rx->JobIds == 0) { + ua->error_msg(_("No JobId specified cannot continue.\n")); + return false; + } else { + Mmsg(rx->query, uar_jobid_fileindex_from_dir[db_get_type_index(ua->db)], rx->JobIds, dir, rx->ClientName); + } + rx->found = false; + /* Find and insert jobid and File Index */ + if (!db_sql_query(ua->db, rx->query, jobid_fileindex_handler, (void *)rx)) { + ua->error_msg(_("Query failed: %s. ERR=%s\n"), + rx->query, db_strerror(ua->db)); + } + if (!rx->found) { + ua->error_msg(_("No database record found for: %s\n"), dir); + return true; + } + return true; +} + +/* + * Get the JobId and FileIndexes of all files in the specified table + */ +bool insert_table_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *table) +{ + strip_trailing_junk(table); + Mmsg(rx->query, uar_jobid_fileindex_from_table, table); + + rx->found = false; + /* Find and insert jobid and File Index */ + if (!db_sql_query(ua->db, rx->query, jobid_fileindex_handler, (void *)rx)) { + ua->error_msg(_("Query failed: %s. ERR=%s\n"), + rx->query, db_strerror(ua->db)); + } + if (!rx->found) { + ua->error_msg(_("No table found: %s\n"), table); + return true; + } + return true; +} + +static void split_path_and_filename(UAContext *ua, RESTORE_CTX *rx, char *name) +{ + char *p, *f; + + /* Find path without the filename. + * I.e. everything after the last / is a "filename". + * OK, maybe it is a directory name, but we treat it like + * a filename. If we don't find a / then the whole name + * must be a path name (e.g. c:). + */ + for (p=f=name; *p; p++) { + if (IsPathSeparator(*p)) { + f = p; /* set pos of last slash */ + } + } + if (IsPathSeparator(*f)) { /* did we find a slash? */ + f++; /* yes, point to filename */ + } else { /* no, whole thing must be path name */ + f = p; + } + + /* If filename doesn't exist (i.e. root directory), we + * simply create a blank name consisting of a single + * space. This makes handling zero length filenames + * easier. + */ + rx->fnl = p - f; + if (rx->fnl > 0) { + rx->fname = check_pool_memory_size(rx->fname, 2*(rx->fnl)+1); + db_escape_string(ua->jcr, ua->db, rx->fname, f, rx->fnl); + } else { + rx->fname[0] = 0; + rx->fnl = 0; + } + + rx->pnl = f - name; + if (rx->pnl > 0) { + rx->path = check_pool_memory_size(rx->path, 2*(rx->pnl)+1); + db_escape_string(ua->jcr, ua->db, rx->path, name, rx->pnl); + } else { + rx->path[0] = 0; + rx->pnl = 0; + } + + Dmsg2(100, "split path=%s file=%s\n", rx->path, rx->fname); +} + +static bool can_restore_all_files(UAContext *ua) +{ + alist *lst; + if (ua->cons) { + lst = ua->cons->ACL_lists[Directory_ACL]; + /* ACL not defined, or the first entry is not *all* */ + /* TODO: See if we search for *all* in all the list */ + if (!lst || strcasecmp((char*)lst->get(0), "*all*") != 0) { + return false; + } + if (!lst || strcasecmp((char *)lst->get(0), "*all*") != 0) { + return false; + } + } + return true; +} + +static bool ask_for_fileregex(UAContext *ua, RESTORE_CTX *rx) +{ + bool can_restore=can_restore_all_files(ua); + + if (can_restore && find_arg(ua, NT_("all")) >= 0) { /* if user enters all on command line */ + return true; /* select everything */ + } + + ua->send_msg(_("\n\nFor one or more of the JobIds selected, no files were found,\n" + "so file selection is not possible.\n" + "Most likely your retention policy pruned the files.\n")); + + if (!can_restore) { + ua->error_msg(_("\nThe current Console has UserId or Directory restrictions. " + "The full restore is not allowed.\n")); + return false; + } + + if (get_yesno(ua, _("\nDo you want to restore all the files? (yes|no): "))) { + if (ua->pint32_val == 1) + return true; + while (get_cmd(ua, _("\nRegexp matching files to restore? (empty to abort): "))) { + if (ua->cmd[0] == '\0') { + break; + } else { + regex_t *fileregex_re = NULL; + int rc; + char errmsg[500] = ""; + + fileregex_re = (regex_t *)bmalloc(sizeof(regex_t)); + rc = regcomp(fileregex_re, ua->cmd, REG_EXTENDED|REG_NOSUB); + if (rc != 0) { + regerror(rc, fileregex_re, errmsg, sizeof(errmsg)); + } + regfree(fileregex_re); + free(fileregex_re); + if (*errmsg) { + ua->send_msg(_("Regex compile error: %s\n"), errmsg); + } else { + rx->fileregex = bstrdup(ua->cmd); + return true; + } + } + } + } + return false; +} + +/* Walk on the delta_list of a TREE_NODE item and insert all parts + * TODO: Optimize for bootstrap creation, remove recursion + * 6 -> 5 -> 4 -> 3 -> 2 -> 1 -> 0 + * should insert as + * 0, 1, 2, 3, 4, 5, 6 + */ +static void add_delta_list_findex(RESTORE_CTX *rx, struct delta_list *lst) +{ + if (lst == NULL) { + return; + } + if (lst->next) { + add_delta_list_findex(rx, lst->next); + } + add_findex(rx->bsr_list, lst->JobId, lst->FileIndex); +} + +/* + * This is a list of all the files (components) that the + * user has requested for restore. It is requested by + * the plugin (for now hard coded only for VSS). + * In the future, this will be requested by a RestoreObject + * and the plugin name will be sent to the FD. + */ +static bool write_component_file(UAContext *ua, RESTORE_CTX *rx, char *fname) +{ + int fd; + if (!rx->component_fd) { + Mmsg(rx->component_fname, "%s/%s.restore.sel.XXXXXX", working_directory, my_name); + fd = mkstemp(rx->component_fname); + if (fd < 0) { + berrno be; + ua->error_msg(_("Unable to create component file %s. ERR=%s\n"), + rx->component_fname, be.bstrerror()); + return false; + } + rx->component_fd = fdopen(fd, "w+"); + if (!rx->component_fd) { + berrno be; + ua->error_msg(_("Unable to fdopen component file %s. ERR=%s\n"), + rx->component_fname, be.bstrerror()); + return false; + } + } + fprintf(rx->component_fd, "%s\n", fname); + if (ferror(rx->component_fd)) { + ua->error_msg(_("Error writing component file.\n")); + fclose(rx->component_fd); + unlink(rx->component_fname); + rx->component_fd = NULL; + return false; + } + return true; +} + +static bool build_directory_tree(UAContext *ua, RESTORE_CTX *rx) +{ + TREE_CTX tree; + JobId_t JobId, last_JobId; + char *p; + bool OK = true; + char ed1[50]; + + memset(&tree, 0, sizeof(TREE_CTX)); + /* + * Build the directory tree containing JobIds user selected + */ + tree.root = new_tree(rx->TotalFiles); + tree.ua = ua; + tree.all = rx->all; + tree.hardlinks_in_mem = rx->hardlinks_in_mem; + tree.no_auto_parent = rx->no_auto_parent; + last_JobId = 0; + tree.last_dir_acl = NULL; + /* + * For display purposes, the same JobId, with different volumes may + * appear more than once, however, we only insert it once. + */ + p = rx->JobIds; + tree.FileEstimate = 0; + if (get_next_jobid_from_list(&p, &JobId) > 0) { + /* Use first JobId as estimate of the number of files to restore */ + Mmsg(rx->query, uar_count_files, edit_int64(JobId, ed1)); + if (!db_sql_query(ua->db, rx->query, restore_count_handler, (void *)rx)) { + ua->error_msg("%s\n", db_strerror(ua->db)); + } + if (rx->found) { + /* Add about 25% more than this job for over estimate */ + tree.FileEstimate = rx->JobId + (rx->JobId >> 2); + tree.DeltaCount = rx->JobId/50; /* print 50 ticks */ + } + } + + ua->info_msg(_("\nBuilding directory tree for JobId(s) %s ... "), + rx->JobIds); + +#define new_get_file_list +#ifdef new_get_file_list + if (!db_get_file_list(ua->jcr, ua->db, + rx->JobIds, DBL_USE_DELTA, + insert_tree_handler, (void *)&tree)) + { + ua->error_msg("%s", db_strerror(ua->db)); + } + if (*rx->BaseJobIds) { + pm_strcat(rx->JobIds, ","); + pm_strcat(rx->JobIds, rx->BaseJobIds); + } +#else + for (p=rx->JobIds; get_next_jobid_from_list(&p, &JobId) > 0; ) { + char ed1[50]; + + if (JobId == last_JobId) { + continue; /* eliminate duplicate JobIds */ + } + last_JobId = JobId; + /* + * Find files for this JobId and insert them in the tree + */ + Mmsg(rx->query, uar_sel_files, edit_int64(JobId, ed1)); + if (!db_sql_query(ua->db, rx->query, insert_tree_handler, (void *)&tree)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + } +#endif + /* + * At this point, the tree is built, so we can garbage collect + * any memory released by the SQL engine that RedHat has + * not returned to the OS :-( + */ + garbage_collect_memory(); + + /* + * Look at the first JobId on the list (presumably the oldest) and + * if it is marked purged, don't do the manual selection because + * the Job was pruned, so the tree is incomplete. + */ + if (tree.FileCount != 0) { + /* Find out if any Job is purged */ + Mmsg(rx->query, "SELECT SUM(PurgedFiles) FROM Job WHERE JobId IN (%s)", rx->JobIds); + if (!db_sql_query(ua->db, rx->query, restore_count_handler, (void *)rx)) { + ua->error_msg("%s\n", db_strerror(ua->db)); + } + /* rx->JobId is the PurgedFiles flag */ + if (rx->found && rx->JobId > 0) { + tree.FileCount = 0; /* set count to zero, no tree selection */ + } + } + if (tree.FileCount == 0) { + OK = ask_for_fileregex(ua, rx); + if (OK) { + last_JobId = 0; + for (p=rx->JobIds; get_next_jobid_from_list(&p, &JobId) > 0; ) { + if (JobId == last_JobId) { + continue; /* eliminate duplicate JobIds */ + } + add_findex_all(rx->bsr_list, JobId, rx->fileregex); + } + } + } else { + char ec1[50]; + if (tree.all) { + ua->info_msg(_("\n%s files inserted into the tree and marked for extraction.\n"), + edit_uint64_with_commas(tree.FileCount, ec1)); + } else { + ua->info_msg(_("\n%s files inserted into the tree.\n"), + edit_uint64_with_commas(tree.FileCount, ec1)); + } + + if (find_arg(ua, NT_("done")) < 0) { + /* Let the user interact in selecting which files to restore */ + OK = user_select_files_from_tree(&tree); + } + + /* + * Walk down through the tree finding all files marked to be + * extracted making a bootstrap file. + */ + if (OK) { + char cwd[2000]; + for (TREE_NODE *node=first_tree_node(tree.root); node; node=next_tree_node(node)) { + Dmsg2(400, "FI=%d node=0x%x\n", node->FileIndex, node); + if (node->extract || node->extract_dir) { + Dmsg3(400, "JobId=%lld type=%d FI=%d\n", (uint64_t)node->JobId, node->type, node->FileIndex); + /* TODO: optimize bsr insertion when jobid are non sorted */ + add_delta_list_findex(rx, node->delta_list); + add_findex(rx->bsr_list, node->JobId, node->FileIndex); + /* + * Special VSS plugin code to return selected + * components. For the moment, it is hard coded + * for the VSS plugin. + */ + if (fnmatch(":component_info_*", node->fname, 0) == 0) { + tree_getpath(node, cwd, sizeof(cwd)); + if (!write_component_file(ua, rx, cwd)) { + OK = false; + break; + } + } + if (node->extract && node->type != TN_NEWDIR) { + rx->selected_files++; /* count only saved files */ + } + } + } + } + } + if (tree.uid_acl) { + delete tree.uid_acl; + delete tree.gid_acl; + delete tree.dir_acl; + } + free_tree(tree.root); /* free the directory tree */ + return OK; +} + + +/* + * This routine is used to get the current backup or a backup + * before the specified date. + */ +static bool select_backups_before_date(UAContext *ua, RESTORE_CTX *rx, char *date) +{ + bool ok = false; + FILESET_DBR fsr; + CLIENT_DBR cr; + char fileset_name[MAX_NAME_LENGTH]; + char ed1[50], ed2[50]; + char pool_select[MAX_NAME_LENGTH]; + int i; + + /* Create temp tables */ + db_sql_query(ua->db, uar_del_temp, NULL, NULL); + db_sql_query(ua->db, uar_del_temp1, NULL, NULL); + if (!db_sql_query(ua->db, uar_create_temp[db_get_type_index(ua->db)], NULL, NULL)) { + ua->error_msg("%s\n", db_strerror(ua->db)); + } + if (!db_sql_query(ua->db, uar_create_temp1[db_get_type_index(ua->db)], NULL, NULL)) { + ua->error_msg("%s\n", db_strerror(ua->db)); + } + /* + * Select Client from the Catalog + */ + memset(&cr, 0, sizeof(cr)); + if (!get_client_dbr(ua, &cr, JT_BACKUP_RESTORE)) { + goto bail_out; + } + bstrncpy(rx->ClientName, cr.Name, sizeof(rx->ClientName)); + + /* + * Get FileSet + */ + memset(&fsr, 0, sizeof(fsr)); + i = find_arg_with_value(ua, "FileSet"); + + if (i >= 0 && is_name_valid(ua->argv[i], &ua->errmsg)) { + bstrncpy(fsr.FileSet, ua->argv[i], sizeof(fsr.FileSet)); + if (!db_get_fileset_record(ua->jcr, ua->db, &fsr)) { + ua->error_msg(_("Error getting FileSet \"%s\": ERR=%s\n"), fsr.FileSet, + db_strerror(ua->db)); + i = -1; + } + } else if (i >= 0) { /* name is invalid */ + ua->error_msg(_("FileSet argument: %s\n"), ua->errmsg); + } + + if (i < 0) { /* fileset not found */ + edit_int64(cr.ClientId, ed1); + Mmsg(rx->query, uar_sel_fileset, ed1, ed1); + start_prompt(ua, _("The defined FileSet resources are:\n")); + if (!db_sql_query(ua->db, rx->query, fileset_handler, (void *)ua)) { + ua->error_msg("%s\n", db_strerror(ua->db)); + } + if (do_prompt(ua, _("FileSet"), _("Select FileSet resource"), + fileset_name, sizeof(fileset_name)) < 0) { + ua->error_msg(_("No FileSet found for client \"%s\".\n"), cr.Name); + goto bail_out; + } + + bstrncpy(fsr.FileSet, fileset_name, sizeof(fsr.FileSet)); + if (!db_get_fileset_record(ua->jcr, ua->db, &fsr)) { + ua->warning_msg(_("Error getting FileSet record: %s\n"), db_strerror(ua->db)); + ua->send_msg(_("This probably means you modified the FileSet.\n" + "Continuing anyway.\n")); + } + } + + /* If Pool specified, add PoolId specification */ + pool_select[0] = 0; + if (rx->pool) { + POOL_DBR pr; + memset(&pr, 0, sizeof(pr)); + bstrncpy(pr.Name, rx->pool->name(), sizeof(pr.Name)); + if (db_get_pool_record(ua->jcr, ua->db, &pr)) { + bsnprintf(pool_select, sizeof(pool_select), "AND Media.PoolId=%s ", + edit_int64(pr.PoolId, ed1)); + } else { + ua->warning_msg(_("Pool \"%s\" not found, using any pool.\n"), pr.Name); + } + } + + /* Find JobId of last Full backup for this client, fileset */ + edit_int64(cr.ClientId, ed1); + Mmsg(rx->query, uar_last_full, ed1, ed1, date, fsr.FileSet, + pool_select); + if (!db_sql_query(ua->db, rx->query, NULL, NULL)) { + ua->error_msg("%s\n", db_strerror(ua->db)); + goto bail_out; + } + + /* Find all Volumes used by that JobId */ + if (!db_sql_query(ua->db, uar_full, NULL, NULL)) { + ua->error_msg("%s\n", db_strerror(ua->db)); + goto bail_out; + } + + /* Note, this is needed because I don't seem to get the callback + * from the call just above. + */ + rx->JobTDate = 0; + if (!db_sql_query(ua->db, uar_sel_all_temp1, last_full_handler, (void *)rx)) { + ua->warning_msg("%s\n", db_strerror(ua->db)); + } + if (rx->JobTDate == 0) { + ua->error_msg(_("No Full backup before %s found.\n"), date); + goto bail_out; + } + + /* Now find most recent Differental Job after Full save, if any */ + Mmsg(rx->query, uar_dif, edit_uint64(rx->JobTDate, ed1), date, + edit_int64(cr.ClientId, ed2), fsr.FileSet, pool_select); + if (!db_sql_query(ua->db, rx->query, NULL, NULL)) { + ua->warning_msg("%s\n", db_strerror(ua->db)); + } + /* Now update JobTDate to look into Differental, if any */ + rx->JobTDate = 0; + if (!db_sql_query(ua->db, uar_sel_all_temp, last_full_handler, (void *)rx)) { + ua->warning_msg("%s\n", db_strerror(ua->db)); + } + if (rx->JobTDate == 0) { + ua->error_msg(_("No Full backup before %s found.\n"), date); + goto bail_out; + } + + /* Now find all Incremental Jobs after Full/dif save */ + Mmsg(rx->query, uar_inc, edit_uint64(rx->JobTDate, ed1), date, + edit_int64(cr.ClientId, ed2), fsr.FileSet, pool_select); + if (!db_sql_query(ua->db, rx->query, NULL, NULL)) { + ua->warning_msg("%s\n", db_strerror(ua->db)); + } + + /* Get the JobIds from that list */ + rx->last_jobid[0] = rx->JobIds[0] = 0; + + if (!db_sql_query(ua->db, uar_sel_jobid_temp, jobid_handler, (void *)rx)) { + ua->warning_msg("%s\n", db_strerror(ua->db)); + } + + if (rx->JobIds[0] != 0) { + if (find_arg(ua, NT_("copies")) > 0) { + /* Display a list of all copies */ + db_list_copies_records(ua->jcr, ua->db, 0, rx->JobIds, + prtit, ua, HORZ_LIST); + } + /* Display a list of Jobs selected for this restore */ + db_list_sql_query(ua->jcr, ua->db, uar_list_temp, prtit, ua, 1,HORZ_LIST); + ok = true; + + } else { + ua->warning_msg(_("No jobs found.\n")); + } + +bail_out: + db_sql_query(ua->db, uar_del_temp, NULL, NULL); + db_sql_query(ua->db, uar_del_temp1, NULL, NULL); + return ok; +} + +static int restore_count_handler(void *ctx, int num_fields, char **row) +{ + RESTORE_CTX *rx = (RESTORE_CTX *)ctx; + rx->JobId = str_to_int64(row[0]); + rx->found = true; + return 0; +} + +/* + * Callback handler to get JobId and FileIndex for files + * can insert more than one depending on the caller. + */ +static int jobid_fileindex_handler(void *ctx, int num_fields, char **row) +{ + RESTORE_CTX *rx = (RESTORE_CTX *)ctx; + JobId_t JobId = str_to_int64(row[0]); + + Dmsg3(200, "JobId=%s JobIds=%s FileIndex=%s\n", row[0], rx->JobIds, row[1]); + + /* New JobId, add it to JobIds + * The list is sorted by JobId, so we need a cache for the previous value + * + * It will permit to find restore objects to send during the restore + */ + if (rx->JobId != JobId) { + if (*rx->JobIds) { + pm_strcat(rx->JobIds, ","); + } + pm_strcat(rx->JobIds, row[0]); + rx->JobId = JobId; + } + + add_findex(rx->bsr_list, rx->JobId, str_to_int64(row[1])); + rx->found = true; + rx->selected_files++; + return 0; +} + +/* + * Callback handler make list of JobIds + */ +static int jobid_handler(void *ctx, int num_fields, char **row) +{ + RESTORE_CTX *rx = (RESTORE_CTX *)ctx; + + if (strcmp(rx->last_jobid, row[0]) == 0) { + return 0; /* duplicate id */ + } + bstrncpy(rx->last_jobid, row[0], sizeof(rx->last_jobid)); + if (rx->JobIds[0] != 0) { + pm_strcat(rx->JobIds, ","); + } + pm_strcat(rx->JobIds, row[0]); + return 0; +} + + +/* + * Callback handler to pickup last Full backup JobTDate + */ +static int last_full_handler(void *ctx, int num_fields, char **row) +{ + RESTORE_CTX *rx = (RESTORE_CTX *)ctx; + + rx->JobTDate = str_to_int64(row[1]); + return 0; +} + +/* + * Callback handler build FileSet name prompt list + */ +static int fileset_handler(void *ctx, int num_fields, char **row) +{ + /* row[0] = FileSet (name) */ + if (row[0]) { + add_prompt((UAContext *)ctx, row[0]); + } + return 0; +} + +/* + * Free names in the list + */ +static void free_name_list(NAME_LIST *name_list) +{ + for (int i=0; i < name_list->num_ids; i++) { + free(name_list->name[i]); + } + bfree_and_null(name_list->name); + name_list->max_ids = 0; + name_list->num_ids = 0; +} + +void find_storage_resource(UAContext *ua, RESTORE_CTX &rx, char *Storage, char *MediaType) +{ + STORE *store; + + if (rx.store) { + Dmsg1(200, "Already have store=%s\n", rx.store->name()); + return; + } + /* + * Try looking up Storage by name + */ + LockRes(); + foreach_res(store, R_STORAGE) { + if (strcmp(Storage, store->name()) == 0) { + if (acl_access_ok(ua, Storage_ACL, store->name())) { + rx.store = store; + } + break; + } + } + UnlockRes(); + + if (rx.store) { + /* Check if an explicit storage resource is given */ + store = NULL; + int i = find_arg_with_value(ua, "storage"); + if (i > 0) { + store = (STORE *)GetResWithName(R_STORAGE, ua->argv[i]); + if (store && !acl_access_ok(ua, Storage_ACL, store->name())) { + store = NULL; + } + } + if (store && (store != rx.store)) { + ua->info_msg(_("\nWarning Storage is overridden by \"%s\" on the command line.\n"), + store->name()); + rx.store = store; + bstrncpy(rx.RestoreMediaType, MediaType, sizeof(rx.RestoreMediaType)); + if (strcmp(MediaType, store->media_type) != 0) { + ua->info_msg(_("This may not work because of two different MediaTypes:\n" + " Storage MediaType=\"%s\"\n" + " Volume MediaType=\"%s\".\n\n"), + store->media_type, MediaType); + } + Dmsg2(200, "Set store=%s MediaType=%s\n", rx.store->name(), rx.RestoreMediaType); + } + return; + } + + /* If no storage resource, try to find one from MediaType */ + if (!rx.store) { + LockRes(); + foreach_res(store, R_STORAGE) { + if (strcmp(MediaType, store->media_type) == 0) { + if (acl_access_ok(ua, Storage_ACL, store->name())) { + rx.store = store; + Dmsg1(200, "Set store=%s\n", rx.store->name()); + if (Storage == NULL || Storage[0] == 0) { + ua->warning_msg(_("Using Storage \"%s\" from MediaType \"%s\".\n"), + store->name(), MediaType); + } else { + ua->warning_msg(_("Storage \"%s\" not found, using Storage \"%s\" from MediaType \"%s\".\n"), + Storage, store->name(), MediaType); + } + } + UnlockRes(); + return; + } + } + UnlockRes(); + ua->warning_msg(_("\nUnable to find Storage resource for\n" + "MediaType \"%s\", needed by the Jobs you selected.\n"), MediaType); + } + + /* Take command line arg, or ask user if none */ + rx.store = get_storage_resource(ua, false /* don't use default */); + if (rx.store) { + Dmsg1(200, "Set store=%s\n", rx.store->name()); + } + +} diff --git a/src/dird/ua_run.c b/src/dird/ua_run.c new file mode 100644 index 00000000..5e7954b9 --- /dev/null +++ b/src/dird/ua_run.c @@ -0,0 +1,2605 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- Run Command + * + * Kern Sibbald, December MMI + */ + +#include "bacula.h" +#include "dird.h" + +const char *get_command(int index); + +class run_ctx { +public: + char *job_name, *level_name, *jid, *store_name, *pool_name; + char *where, *fileset_name, *client_name, *bootstrap, *regexwhere; + char *restore_client_name, *comment, *media_type, *next_pool_name; + const char *replace; + char *when, *verify_job_name, *catalog_name; + char *previous_job_name; + char *since; + char *plugin_options; + const char *verify_list; + JOB *job; + JOB *verify_job; + JOB *previous_job; + JOB_DBR jr; + POOL_DBR pr; + USTORE *store; + CLIENT *client; + FILESET *fileset; + POOL *pool; + POOL *next_pool; + CAT *catalog; + JobId_t JobId; + alist *JobIds; + int Priority; + int files; + bool cloned; + bool mod; + bool restart; + bool done; + bool alljobid; + bool fdcalled; + int spool_data; + bool spool_data_set; + int accurate; + bool accurate_set; + int ignoreduplicatecheck; + bool ignoreduplicatecheck_set; + alist *plugin_config; /* List of all plugin_item */ + /* Methods */ + run_ctx() { memset(this, 0, sizeof(run_ctx)); + store = new USTORE; }; + ~run_ctx() { + delete store; + if (JobIds) { + delete JobIds; + } + }; +}; + +/* Forward referenced subroutines */ +static void select_job_level(UAContext *ua, JCR *jcr); +static bool display_job_parameters(UAContext *ua, JCR *jcr, JOB *job, + const char *verify_list, char *jid, const char *replace, + char *client_name); +static void select_where_regexp(UAContext *ua, JCR *jcr); +static bool scan_run_command_line_arguments(UAContext *ua, run_ctx &rc); +static bool set_run_context_in_jcr(UAContext *ua, JCR *jcr, run_ctx &rc); +static int modify_job_parameters(UAContext *ua, JCR *jcr, run_ctx &rc); +static JobId_t start_job(UAContext *ua, JCR *jcr, run_ctx &rc); + +/* Imported variables */ +extern struct s_kw ReplaceOptions[]; + +/* + * For Backup and Verify Jobs + * run [job=] level= + * + * For Restore Jobs + * run + * + * Returns: 0 on error + * JobId if OK + * + */ +int run_cmd(UAContext *ua, const char *cmd) +{ + JCR *jcr = NULL; + run_ctx rc; + int status; + + if (!open_client_db(ua)) { + goto bail_out; + } + + if (!scan_run_command_line_arguments(ua, rc)) { + goto bail_out; + } + + for ( ;; ) { + /* + * Create JCR to run job. NOTE!!! after this point, free_jcr() + * before returning. + */ + if (!jcr) { + jcr = new_jcr(sizeof(JCR), dird_free_jcr); + set_jcr_defaults(jcr, rc.job); + jcr->unlink_bsr = ua->jcr->unlink_bsr; /* copy unlink flag from caller */ + ua->jcr->unlink_bsr = false; + if (find_arg(ua, NT_("fdcalled")) > 0) { + rc.fdcalled = true; + } + } + /* Transfer JobIds to new restore Job */ + if (ua->jcr->JobIds) { + if (jcr->JobIds) { + free_pool_memory(jcr->JobIds); + } + jcr->JobIds = ua->jcr->JobIds; + ua->jcr->JobIds = NULL; + } + /* Transfer VSS component info */ + if (ua->jcr->component_fname) { + jcr->component_fname = ua->jcr->component_fname; + ua->jcr->component_fname = NULL; + jcr->component_fd = ua->jcr->component_fd; + ua->jcr->component_fd = NULL; + } + /* Transfer Plugin Restore Configuration */ + if (ua->jcr->plugin_config) { + jcr->plugin_config = ua->jcr->plugin_config; + ua->jcr->plugin_config = NULL; + } + + if (!set_run_context_in_jcr(ua, jcr, rc)) { + break; /* error get out of while loop */ + } + + /* Run without prompting? */ + if (ua->batch || find_arg(ua, NT_("yes")) > 0) { + return start_job(ua, jcr, rc); + } + + /* + * Prompt User to see if all run job parameters are correct, and + * allow him to modify them. + */ + if (!display_job_parameters(ua, jcr, rc.job, rc.verify_list, rc.jid, rc.replace, + rc.client_name ? rc.client_name : jcr->job->client->hdr.name)) { + break; /* error get out of while loop */ + } + + if (!get_cmd(ua, _("OK to run? (yes/mod/no): "))) { + break; /* error get out of while loop */ + } + + if (strncasecmp(ua->cmd, ".mod ", 5) == 0 || + (strncasecmp(ua->cmd, "mod ", 4) == 0 && strlen(ua->cmd) > 6)) { + parse_ua_args(ua); + rc.mod = true; + if (!scan_run_command_line_arguments(ua, rc)) { + break; /* error get out of while loop */ + } + continue; /* another round with while loop */ + } + + /* Allow the user to modify the settings */ + status = modify_job_parameters(ua, jcr, rc); + if (status == 0) { + continue; /* another round with while loop */ + } + if (status == -1) { /* error */ + break; /* error get out of while loop */ + } + + if (ua->cmd[0] == 0 || strncasecmp(ua->cmd, _("yes"), strlen(ua->cmd)) == 0) { + return start_job(ua, jcr, rc); + } + if (strncasecmp(ua->cmd, _("no"), strlen(ua->cmd)) == 0) { + break; /* get out of while loop */ + } + ua->send_msg(_("\nBad response: %s. You must answer yes, mod, or no.\n\n"), ua->cmd); + } + +bail_out: + ua->send_msg(_("Job not run.\n")); + if (ua->jcr->component_fd) { + fclose(ua->jcr->component_fd); + ua->jcr->component_fd = NULL; + } + if (ua->jcr->component_fname) { + unlink(ua->jcr->component_fname); + free_and_null_pool_memory(ua->jcr->component_fname); + } + if (jcr) { + if (jcr->component_fd) { + fclose(jcr->component_fd); + jcr->component_fd = NULL; + } + if (jcr->component_fname) { + unlink(jcr->component_fname); + free_and_null_pool_memory(jcr->component_fname); + } + free_jcr(jcr); + } + return 0; /* do not run */ +} + +static JobId_t start_job(UAContext *ua, JCR *jcr, run_ctx &rc) +{ + JobId_t JobId; + char ed1[50]; + + /* Do a final check for the client, the job can change in the previous menu */ + if (jcr->client && jcr->job) { + if (!acl_access_client_ok(ua, jcr->client->name(), jcr->job->JobType)) { + ua->error_msg(_("Job failed. Client \"%s\" not authorized on this console\n"), jcr->client->name()); + free_jcr(jcr); + return 0; + } + } + + /* Do a final check for the where/regexwhere, the job can change in the previous menu */ + if (jcr->getJobType() == JT_RESTORE) { + char *p = jcr->RegexWhere ? jcr->RegexWhere : jcr->job->RegexWhere; + if (p) { + if (!acl_access_ok(ua, Where_ACL, p)) { + ua->error_msg(_("\"RegexWhere\" specification not authorized.\n")); + free_jcr(jcr); + return 0; + } + + } else { + p = jcr->where ? jcr->where : jcr->job->RestoreWhere; + if (p) { + if (!acl_access_ok(ua, Where_ACL, p)) { + ua->error_msg(_("\"where\" specification not authorized.\n")); + free_jcr(jcr); + return 0; + } + } + } + } + + /* If we use the fdcalled feature, we keep use the UA socket + * as a FileDaemon socket. We do not use dup_bsock() because + * it doesn't work, when the UA will do a free_bsock() all + * socket childs will be closed as well. + */ + if (rc.fdcalled) { + jcr->file_bsock = ua->UA_sock; + jcr->file_bsock->set_jcr(jcr); + } + if (rc.jr.JobStatus == JS_Incomplete) { + Dmsg1(100, "Ressuming JobId=%d\n", rc.jr.JobId); + JobId = resume_job(jcr, &rc.jr); + } else { + Dmsg1(100, "Starting JobId=%d\n", rc.jr.JobId); + JobId = run_job(jcr); + } + Dmsg4(100, "JobId=%u NewJobId=%d pool=%s priority=%d\n", (int)jcr->JobId, + JobId, jcr->pool->name(), jcr->JobPriority); + free_jcr(jcr); /* release jcr */ + if (JobId == 0) { + ua->error_msg(_("Job %s failed.\n"), edit_int64(rc.jr.JobId, ed1)); + + } else { + ua->send_msg(_("Job queued. JobId=%s\n"), edit_int64(JobId, ed1)); + } + if (rc.fdcalled) { + ua->signal(BNET_FDCALLED); /* After this point, this is a new connection */ + ua->UA_sock = new_bsock(); + ua->quit = true; + } + return JobId; +} + +/* + * If no job_name defined in the run context, ask + * the user for it. + * Then put the job resource in the run context and + * check the access rights. + */ +static bool get_job(UAContext *ua, run_ctx &rc) +{ + if (rc.job_name) { + /* Find Job */ + rc.job = GetJobResWithName(rc.job_name); + if (!rc.job) { + if (*rc.job_name != 0) { + ua->send_msg(_("Job \"%s\" not found\n"), rc.job_name); + } + rc.job = select_job_resource(ua); + } else { + Dmsg1(100, "Found job=%s\n", rc.job_name); + } + } else if (!rc.job) { + ua->send_msg(_("A job name must be specified.\n")); + rc.job = select_job_resource(ua); + } + if (!rc.job) { + return false; + } else if (!acl_access_ok(ua, Job_ACL, rc.job->name())) { + ua->error_msg( _("No authorization. Job \"%s\".\n"), rc.job->name()); + return false; + } + return true; +} + +/* + * If no pool_name defined in the run context, ask + * the user for it. + * Then put the pool resource in the run context and + * check the access rights. + */ +static bool get_pool(UAContext *ua, run_ctx &rc) +{ + if (rc.pool_name) { + rc.pool = GetPoolResWithName(rc.pool_name); + if (!rc.pool) { + if (*rc.pool_name != 0) { + ua->warning_msg(_("Pool \"%s\" not found.\n"), rc.pool_name); + } + rc.pool = select_pool_resource(ua); + } + } else if (!rc.pool) { + rc.pool = rc.job->pool; /* use default */ + } + if (!rc.pool) { + return false; + } else if (!acl_access_ok(ua, Pool_ACL, rc.pool->name())) { + ua->error_msg(_("No authorization. Pool \"%s\".\n"), rc.pool->name()); + return false; + } + Dmsg1(100, "Using Pool=%s\n", rc.pool->name()); + return true; +} + +static bool get_next_pool(UAContext *ua, run_ctx &rc) +{ + if (rc.next_pool_name) { + Dmsg1(100, "Have next pool=%s\n", rc.next_pool_name); + rc.next_pool = GetPoolResWithName(rc.next_pool_name); + if (!rc.next_pool) { + if (*rc.next_pool_name != 0) { + ua->warning_msg(_("NextPool \"%s\" not found.\n"), rc.next_pool_name); + } + rc.next_pool = select_pool_resource(ua); + } + } + /* NextPool can come from Job resource NextPool or Pool resource NextPool */ + if (!rc.next_pool) { + if (rc.job->next_pool) { + rc.next_pool = rc.job->next_pool; + } else { + rc.next_pool = rc.pool->NextPool; /* use default */ + } + } + if (rc.next_pool && !acl_access_ok(ua, Pool_ACL, rc.next_pool->name())) { + ua->error_msg(_("No authorization. NextPool \"%s\".\n"), rc.next_pool->name()); + return false; + } + if (rc.next_pool) { + Dmsg1(100, "Using NextPool=%s\n", NPRT(rc.next_pool->name())); + } + return true; +} + + +/* + * Fill in client data according to what is setup + * in the run context, and make sure the user + * has authorized access to it. + */ +static bool get_client(UAContext *ua, run_ctx &rc) +{ + bool authorized=false; + if (rc.client_name) { + rc.client = GetClientResWithName(rc.client_name); + if (!rc.client) { + if (*rc.client_name != 0) { + ua->warning_msg(_("Client \"%s\" not found.\n"), rc.client_name); + } + rc.client = select_client_resource(ua, rc.job->JobType); + } + } else if (!rc.client) { + rc.client = rc.job->client; /* use default */ + } + + Dmsg1(800, "Using client=%s\n", rc.client->name()); + + if (rc.job->RestoreClient){ + /* Use restoreclient defined in config Job resource */ + rc.restore_client_name = rc.job->RestoreClient; + } + if (rc.restore_client_name) { + rc.client = GetClientResWithName(rc.restore_client_name); + if (!rc.client) { + if (*rc.restore_client_name != 0) { + ua->warning_msg(_("Restore Client \"%s\" not found.\n"), rc.restore_client_name); + } + rc.client = select_client_resource(ua, rc.job->JobType); + } + } else if (!rc.client) { + rc.client = rc.job->client; /* use default */ + } + + if (!rc.client) { + return false; + + } else if (acl_access_client_ok(ua, rc.client->name(), rc.job->JobType)) { + authorized = true; + } + if (!authorized) { + ua->error_msg(_("No authorization. Client \"%s\".\n"), + rc.client->name()); + return false; + } + Dmsg1(800, "Using restore client=%s\n", rc.client->name()); + return true; +} + + +/* + * Fill in fileset data according to what is setup + * in the run context, and make sure the user + * has authorized access to it. + */ +static bool get_fileset(UAContext *ua, run_ctx &rc) +{ + if (rc.fileset_name) { + rc.fileset = GetFileSetResWithName(rc.fileset_name); + if (!rc.fileset) { + ua->send_msg(_("FileSet \"%s\" not found.\n"), rc.fileset_name); + rc.fileset = select_fileset_resource(ua); + } + } else if (!rc.fileset) { + rc.fileset = rc.job->fileset; /* use default */ + } + if (!rc.fileset) { + return false; + } else if (!acl_access_ok(ua, FileSet_ACL, rc.fileset->name())) { + ua->send_msg(_("No authorization. FileSet \"%s\".\n"), + rc.fileset->name()); + return false; + } + return true; +} + +/* + * Fill in storage data according to what is setup + * in the run context, and make sure the user + * has authorized access to it. + */ +static bool get_storage(UAContext *ua, run_ctx &rc) +{ + if (rc.store_name) { + rc.store->store = GetStoreResWithName(rc.store_name); + pm_strcpy(rc.store->store_source, _("Command input")); + if (!rc.store->store) { + if (*rc.store_name != 0) { + ua->warning_msg(_("Storage \"%s\" not found.\n"), rc.store_name); + } + rc.store->store = select_storage_resource(ua); + pm_strcpy(rc.store->store_source, _("user selection")); + } + } else if (!rc.store->store) { + get_job_storage(rc.store, rc.job, NULL); /* use default */ + } + if (!rc.store->store) { + ua->error_msg(_("No storage specified.\n")); + return false; + } else if (!acl_access_ok(ua, Storage_ACL, rc.store->store->name())) { + ua->error_msg(_("No authorization. Storage \"%s\".\n"), + rc.store->store->name()); + return false; + } + Dmsg1(800, "Using storage=%s\n", rc.store->store->name()); + return true; +} + +/* + * Get and pass back a list of Jobids in rc.jid + */ +static bool get_jobid_list(UAContext *ua, sellist &sl, run_ctx &rc) +{ + int i, JobId; + JOB_DBR jr; + char *pJobId; + bool found = false; + + memset(&jr, 0, sizeof(jr)); + rc.jid = NULL; + /* See if any JobId is specified */ + if ((i=find_arg(ua, "jobid")) >= 0) { + rc.jid = ua->argv[i]; + if (!rc.jid) { + ua->send_msg(_("No JobId specified.\n")); + return false; + } + if (!sl.set_string(ua->argv[i], true)) { + ua->send_msg("%s", sl.get_errmsg()); + return false; + } + return true; + } + + /* No JobId list give, so see if he specified a Job */ + if ((i=find_arg(ua, "job")) >= 0) { + rc.job_name = ua->argv[i]; + if (!get_job(ua, rc)) { + ua->send_msg(_("Invalid or no Job name specified.\n")); + return false; + } + } + + if ((i=find_arg_with_value(ua, "limit")) >= 0) { + jr.limit = str_to_int64(ua->argv[i]); + + } else { + jr.limit = 100; /* max 100 records */ + } + + if (rc.job_name) { + bstrncpy(jr.Name, rc.job_name, sizeof(jr.Name)); + } else { + jr.Name[0] = 0; + } + jr.JobStatus = rc.jr.JobStatus; + Dmsg2(100, "JobStatus=%d JobName=%s\n", jr.JobStatus, jr.Name); + /* rc.JobIds is alist of all records found and printed */ + rc.JobIds = db_list_job_records(ua->jcr, ua->db, &jr, prtit, ua, INCOMPLETE_JOBS); + if (!rc.JobIds || rc.JobIds->size()==0 || + !get_selection_list(ua, sl, _("Enter the JobId list to select: "), false)) { + return false; + } + Dmsg1(100, "list=%s\n", sl.get_list()); + /* + * Make sure each item entered is in the JobIds list + */ + while ( (JobId = sl.next()) > 0) { + foreach_alist(pJobId, rc.JobIds) { + if (JobId == str_to_int64(pJobId)) { + pJobId[0] = 0; + found = true; + break; + } + } + if (!found) { + ua->error_msg(_("JobId=%d entered is not in the list.\n"), JobId); + return false; + } + } + sl.begin(); /* reset to walk list again */ + rc.done = false; + return true; +} + +static bool get_jobid_from_list(UAContext *ua, sellist &sl, run_ctx &rc) +{ + int JobId; + if (rc.done) { + return false; + } + if ((JobId = sl.next()) < 0) { + Dmsg1(100, "sl.next()=%d\n", JobId); + rc.done = true; + return false; + } + rc.jr.JobId = rc.JobId = JobId; + Dmsg1(100, "Next JobId=%d\n", rc.JobId); + if (!db_get_job_record(ua->jcr, ua->db, &rc.jr)) { + ua->error_msg(_("Could not get job record for selected JobId=%d. ERR=%s"), + rc.JobId, db_strerror(ua->db)); + return false; + } + Dmsg3(100, "Job=%s JobId=%d JobStatus=%c\n", rc.jr.Name, rc.jr.JobId, + rc.jr.JobStatus); + rc.job_name = rc.jr.Name; + + if (!get_job(ua, rc)) { + return false; + } + rc.pr.PoolId = rc.jr.PoolId; + if (!db_get_pool_record(ua->jcr, ua->db, &rc.pr)) { + ua->error_msg(_("Could not get pool record for selected JobId=%d. ERR=%s"), + rc.JobId, db_strerror(ua->db)); + return false; + } + rc.pool_name = rc.pr.Name; + if (!get_pool(ua, rc)) { + return false; + } + get_job_storage(rc.store, rc.job, NULL); + rc.client_name = rc.job->client->hdr.name; + if (!get_client(ua, rc)) { + return false; + } + if (!get_fileset(ua, rc)) { + return false; + } + if (!get_storage(ua, rc)) { + return false; + } + return true; +} + +/* + * Restart Canceled, Failed, or Incomplete Jobs + * + * Returns: 0 on error + * JobId if OK + * + */ +int restart_cmd(UAContext *ua, const char *cmd) +{ + JCR *jcr = NULL; + run_ctx rc; + sellist sl; + int i, j; + bool got_kw = false; + struct s_js { + const char *status_name; + int32_t job_status; + }; + struct s_js kw[] = { + {"Incomplete", JS_Incomplete}, + {"Canceled", JS_Canceled}, + {"Failed", JS_FatalError}, + {"All", 0}, + {NULL, 0} + }; + + if (!open_client_db(ua)) { + return 0; + } + + rc.jr.JobStatus = 0; + for (i=1; iargc; i++) { + for (j=0; kw[j].status_name; j++) { + if (strcasecmp(ua->argk[i], kw[j].status_name) == 0) { + rc.jr.JobStatus = kw[j].job_status; + got_kw = true; + break; + } + } + } + if (!got_kw) { /* Must prompt user */ + start_prompt(ua, _("You have the following choices:\n")); + for (i=0; kw[i].status_name; i++) { + add_prompt(ua, kw[i].status_name); + } + i = do_prompt(ua, NULL, _("Select termination code: "), NULL, 0); + if (i < 0) { + return 0; + } + rc.jr.JobStatus = kw[i].job_status; + } + + /* type now has what job termination code we want to look at */ + Dmsg1(100, "Termination code=%c\n", rc.jr.JobStatus); + + /* Get a list of JobIds to restore */ + if (!get_jobid_list(ua, sl, rc)) { + if (rc.JobIds) { + rc.JobIds->destroy(); + } + return false; + } + Dmsg1(100, "list=%s\n", sl.get_list()); + + while (get_jobid_from_list(ua, sl, rc)) { + /* + * Create JCR to run job. NOTE!!! after this point, free_jcr() + * before returning. + */ + if (!jcr) { + jcr = new_jcr(sizeof(JCR), dird_free_jcr); + set_jcr_defaults(jcr, rc.job); + jcr->unlink_bsr = ua->jcr->unlink_bsr; /* copy unlink flag from caller */ + ua->jcr->unlink_bsr = false; + } + + if (!set_run_context_in_jcr(ua, jcr, rc)) { + break; + } + start_job(ua, jcr, rc); + jcr = NULL; + } + + if (jcr) { + free_jcr(jcr); + } + if (rc.JobIds) { + rc.JobIds->destroy(); + } + return 0; /* do not run */ +} + + +/* + * Plugin restore option part + */ + +/* Free a plugin_config_item */ +void free_plugin_config_item(plugin_config_item *elt) +{ + free(elt->plugin_name); + free_pool_memory(elt->content); + free(elt); +} + +/* Free a list of plugins (do not free the list itself) */ +void free_plugin_config_items(alist *lst) +{ + plugin_config_item *elt; + + if (!lst) { + return; + } + + foreach_alist(elt, lst) { + free_plugin_config_item(elt); + } +} + +/* Structure used in the sql query to get configuration restore objects */ +struct plugin_config_handler_t +{ + UAContext *ua; /* UAContext for user input */ + POOLMEM *tmp; /* Used to store the config object */ + alist *plugins; /* Configuration plugin list */ + alist *content; /* Temp file used by each plugin */ +}; + +/* DB handler to get all configuration restore objects for a given + * set of jobids + */ +static int plugin_config_handler(void *ctx, int num_fields, char **row) +{ + struct plugin_config_handler_t *pch = (struct plugin_config_handler_t *)ctx; + UAContext *ua = pch->ua; + JCR *jcr = ua->jcr; + int32_t len; + + /* object */ + db_unescape_object(jcr, ua->db, + row[8], /* Object */ + str_to_uint64(row[1]), /* Object length */ + &pch->tmp, &len); + + /* Is compressed ? */ + if (str_to_int64(row[5]) > 0) { + int full_len = str_to_int64(row[2]); + int out_len = full_len + 100; /* full length */ + char *obj = (char *)malloc(out_len); + Zinflate(pch->tmp, len, obj, out_len); /* out_len is updated */ + if (out_len != full_len) { + ua->error_msg(_("Decompression failed. Len wanted=%d got=%d. Object=%s\n"), + full_len, out_len, row[9]); + } + obj[out_len] = 0; + pch->content->append(obj); + + } else { + pch->tmp[len]=0; + pch->content->append(bstrdup(pch->tmp)); + } + + pch->plugins->append(bstrdup(row[9])); + return 0; +} + +/* Save a Plugin Config object (ConfigFile) inside the JCR + * using a list of plugin_config_item + * + * We allow only one Plugin Config object per Plugin + */ +static void plugin_config_save_jcr(UAContext *ua, JCR *jcr, + char *pname, ConfigFile *ini) +{ + plugin_config_item *elt; + if (!jcr->plugin_config) { + jcr->plugin_config = New(alist(5, not_owned_by_alist)); + } + + /* Store only one Plugin Config object per plugin command */ + for (int i = 0; i < jcr->plugin_config->size() ; i++) { + elt = (plugin_config_item *) jcr->plugin_config->get(i); + if (strcmp(elt->plugin_name, pname) == 0) { + jcr->plugin_config->remove(i); + free_plugin_config_item(elt); + break; + } + } + + elt = (plugin_config_item *) malloc (sizeof(plugin_config_item)); + elt->plugin_name = bstrdup(pname); + elt->content = get_pool_memory(PM_FNAME); + ini->dump_results(&elt->content); + jcr->plugin_config->append(elt); +} + +/* TODO: Allow to have sub-menus Advanced.restore_mode can be + * in a Advanced panel (sub menu) + */ + +/* Take the ConfigIni struture and display user menu for a given plugin */ +static int plugin_display_options(UAContext *ua, JCR *jcr, ConfigFile *ini) +{ + int i, nb; + int jcr_pos = -1; + POOL_MEM prompt, tmp; + bool found; + INI_ITEM_HANDLER *h; + + /* TODO: See how to work in API mode + if (ua->api) { + ua->signal(BNET_RUN_CMD); + } + */ + + /* Take a look in the plugin_config list to see if we have something to + * initialize + */ + if (jcr->plugin_config) { + plugin_config_item *item=NULL; + + for (jcr_pos = 0; jcr_pos < jcr->plugin_config->size() ; jcr_pos++) { + item = (plugin_config_item *)jcr->plugin_config->get(jcr_pos); + + if (strcmp(item->plugin_name, ini->plugin_name) == 0) /* bpipe:xxx:yyyy */ + { + if (!ini->dump_string(item->content, strlen(item->content)) || + !ini->parse(ini->out_fname)) + { + ua->error_msg(_("Unable to use current plugin configuration, " + "discarding it.")); + } + /* When we are here, we can type yes (it will add it back), or no + * to not use this plugin configuration. So, don't keep it in the + * list. + */ + jcr->plugin_config->remove(jcr_pos); + free_plugin_config_item(item); + break; + } + } + } + +configure_again: + ua->send_msg(_("Plugin Restore Options\n")); + + for (nb=0; ini->items[nb].name; nb++) { + + if (ini->items[nb].found) { + /* When calling the handler, It will convert the value + * to a string representation in ini->edit + */ + ini->items[nb].handler(NULL, ini, &ini->items[nb]); + } else { + if (ini->items[nb].required) { + pm_strcpy(ini->edit, _("*None, but required*")); + + } else { + pm_strcpy(ini->edit, _("*None*")); + } + } + + Mmsg(tmp, "%s:", ini->items[nb].name); + + Mmsg(prompt, "%-20s %-20s ", + tmp.c_str(), ini->edit); + + if (ini->items[nb].default_value) { + Mmsg(tmp, "(%s)", ini->items[nb].default_value); + pm_strcat(prompt, tmp.c_str()); + } + + ua->send_msg("%s\n", prompt.c_str()); + } + + if (!get_cmd(ua, _("Use above plugin configuration? (yes/mod/no): "))) { + ini->clear_items(); + return 0; + } + + /* '', 'y', 'ye', and 'yes' are valid */ + if (strncasecmp(ua->cmd, _("yes"), strlen(ua->cmd)) == 0) { + return 1; + } + + if (strncasecmp(ua->cmd, _("no"), strlen(ua->cmd)) == 0) { + ini->clear_items(); + return 0; + } + + /* When using "mod", we display the list of parameters with their + * comments, and we let the user choose one entry to modify + */ + if (strncasecmp(ua->cmd, _("mod"), strlen(ua->cmd)) == 0) { + start_prompt(ua, _("You have the following choices:\n")); + + for (nb=0; ini->items[nb].name; nb++) { + + if (ini->items[nb].comment) { + Mmsg(tmp, " (%s)", ini->items[nb].comment); + } else { + pm_strcpy(tmp, ""); + } + + Mmsg(prompt, "%s%s ", + ini->items[nb].name, tmp.c_str()); + + add_prompt(ua, prompt.c_str()); + } + + i = do_prompt(ua, NULL, _("Select parameter to modify"), NULL, 0); + + if (i < 0) { + ini->clear_items(); + return 0; + } + + Mmsg(prompt, _("Please enter a value for %s: "), ini->items[i].name); + + /* Now use the handler to know how to ask the value to the user. + * For example, boolean will use get_yes_no(), pint32 will use get_pint() + */ + h = ini->items[i].handler; + if (h == ini_store_int32 || + h == ini_store_pint32) { + found = ini->items[i].found = get_pint(ua, prompt.c_str()); + if (found) { + ini->items[i].val.int32val = ua->pint32_val; + } + + } else if (h == ini_store_bool) { + found = ini->items[i].found = get_yesno(ua, prompt.c_str()); + if (found) { + ini->items[i].val.boolval = ua->pint32_val; + } + + } else if (h == ini_store_name) { + found = ini->items[i].found = get_cmd(ua, prompt.c_str()); + if (found) { + strncpy(ini->items[i].val.nameval, ua->cmd, MAX_NAME_LENGTH -1); + ini->items[i].val.nameval[MAX_NAME_LENGTH - 1] = 0; + } + + } else if (h == ini_store_str) { + found = ini->items[i].found = get_cmd(ua, prompt.c_str()); + if (found) { + ini->items[i].val.strval = bstrdup(ua->cmd); + } + + } else if (h == ini_store_int64 || + h == ini_store_pint64) { + found = ini->items[i].found = get_pint(ua, prompt.c_str()); + if (found) { + ini->items[i].val.int64val = ua->int64_val; + } + } + goto configure_again; + } + + return 1; /* never reached */ +} + +/* Display a menu with all plugins */ +static void plugin_config(UAContext *ua, JCR *jcr, run_ctx &rc) +{ + int i, nb; + char *elt, *tmp; + ConfigFile *ini = NULL; + POOLMEM *query=NULL; + struct plugin_config_handler_t pch; + + /* No jobids for this restore, probably wrong */ + if (!jcr->JobIds || !jcr->JobIds[0]) { + return; + } + + if (!open_client_db(ua)) { + return; + } + + pch.ua = ua; + query = get_pool_memory(PM_FNAME); + pch.tmp = get_pool_memory(PM_MESSAGE); + pch.plugins = New(alist(10, owned_by_alist)); + pch.content = New(alist(10, owned_by_alist)); + + /* Get all RestoreObject PLUGIN_CONFIG for the given Job */ + Mmsg(query, get_restore_objects, jcr->JobIds, FT_PLUGIN_CONFIG); + db_sql_query(ua->db, query, plugin_config_handler, &pch); + + if (!pch.plugins || pch.plugins->size() == 0) { + ua->info_msg(_("No plugin to configure\n")); + goto bail_out; + } + + /* TODO: Let see if we want to configure plugins that were given in command + * line. + */ + + start_prompt(ua, _("Plugins to configure:\n")); + + nb=0; + foreach_alist(elt, pch.plugins) { + nb++; + pm_strcpy(query, elt); + add_prompt(ua, query); + } + + i = do_prompt(ua, "", _("Select plugin to configure"), NULL, 0); + + if (i < 0) { + goto bail_out; + } + + + elt = (char *)pch.plugins->get(i); + ini = new ConfigFile(); + /* Try to read the plugin configuration, if error, loop to configure + * something else, or bail_out + */ + tmp = (char *)pch.content->get(i); + if (!ini->dump_string(tmp, strlen(tmp)) || /* Send the string to a file */ + !ini->unserialize(ini->out_fname)) { /* Read the file to initialize the ConfigFile */ + + ua->error_msg(_("Can't configure %32s\n"), elt); + goto bail_out; + } + + ini->set_plugin_name(elt); + + if (plugin_display_options(ua, jcr, ini)) { + ini->dump_results(&query); + Dmsg1(50, "plugin: %s\n", query); + + /* Save the plugin somewhere in the JCR */ + plugin_config_save_jcr(ua, jcr, elt, ini); + } + +bail_out: + free_pool_memory(pch.tmp); + free_pool_memory(query); + if (ini) { + delete ini; + } + delete pch.plugins; + delete pch.content; +} + +int modify_job_parameters(UAContext *ua, JCR *jcr, run_ctx &rc) +{ + int i, opt; + + /* + * At user request modify parameters of job to be run. + */ + if (ua->cmd[0] != 0 && strncasecmp(ua->cmd, _("mod"), strlen(ua->cmd)) == 0){ + FILE *fd; + + start_prompt(ua, _("Parameters to modify:\n")); + add_prompt(ua, _("Level")); /* 0 */ + add_prompt(ua, _("Storage")); /* 1 */ + add_prompt(ua, _("Job")); /* 2 */ + add_prompt(ua, _("FileSet")); /* 3 */ + if (jcr->getJobType() == JT_RESTORE) { + add_prompt(ua, _("Restore Client")); /* 4 */ + } else { + add_prompt(ua, _("Client")); /* 4 */ + } + add_prompt(ua, _("When")); /* 5 */ + add_prompt(ua, _("Priority")); /* 6 */ + if (jcr->getJobType() == JT_BACKUP || + jcr->getJobType() == JT_COPY || + jcr->getJobType() == JT_MIGRATE || + jcr->getJobType() == JT_VERIFY) { + add_prompt(ua, _("Pool")); /* 7 */ + if ((jcr->getJobType() == JT_BACKUP && /* Virtual full */ + jcr->is_JobLevel(L_VIRTUAL_FULL)) || + jcr->getJobType() == JT_COPY || + jcr->getJobType() == JT_MIGRATE) { + add_prompt(ua, _("NextPool")); /* 8 */ + } else if (jcr->getJobType() == JT_VERIFY) { + add_prompt(ua, _("Verify Job")); /* 8 */ + } + } else if (jcr->getJobType() == JT_RESTORE) { + add_prompt(ua, _("Bootstrap")); /* 7 */ + add_prompt(ua, _("Where")); /* 8 */ + add_prompt(ua, _("File Relocation"));/* 9 */ + add_prompt(ua, _("Replace")); /* 10 */ + add_prompt(ua, _("JobId")); /* 11 */ + } + if (jcr->getJobType() == JT_BACKUP || jcr->getJobType() == JT_RESTORE) { + add_prompt(ua, _("Plugin Options")); /* 12 */ + } + switch (do_prompt(ua, "", _("Select parameter to modify"), NULL, 0)) { + case 0: + /* Level */ + select_job_level(ua, jcr); + goto try_again; + case 1: + /* Storage */ + rc.store->store = select_storage_resource(ua); + if (rc.store->store) { + pm_strcpy(rc.store->store_source, _("user selection")); + set_rwstorage(jcr, rc.store); + goto try_again; + } + break; + case 2: + /* Job */ + rc.job = select_job_resource(ua); + if (rc.job) { + jcr->job = rc.job; + set_jcr_defaults(jcr, rc.job); + goto try_again; + } + break; + case 3: + /* FileSet */ + rc.fileset = select_fileset_resource(ua); + if (rc.fileset) { + jcr->fileset = rc.fileset; + goto try_again; + } + break; + case 4: + /* Client */ + { + int32_t jt = rc.job ? rc.job->JobType : JT_SYSTEM; + rc.client = select_client_resource(ua, jt); + if (rc.client) { + jcr->client = rc.client; + goto try_again; + } + } + break; + case 5: + /* When */ + if (!get_cmd(ua, _("Please enter start time as a duration or YYYY-MM-DD HH:MM:SS or return for now: "))) { + break; + } + if (ua->cmd[0] == 0) { + jcr->sched_time = time(NULL); + } else { + utime_t duration; + jcr->sched_time = str_to_utime(ua->cmd); + if (jcr->sched_time == 0) { + if (duration_to_utime(ua->cmd, &duration)) { + jcr->sched_time = time(NULL) + duration; + } else { + ua->send_msg(_("Invalid time, using current time.\n")); + jcr->sched_time = time(NULL); + } + } + } + + goto try_again; + case 6: + /* Priority */ + if (!get_pint(ua, _("Enter new Priority: "))) { + break; + } + if (ua->pint32_val == 0) { + ua->send_msg(_("Priority must be a positive integer.\n")); + } else { + jcr->JobPriority = ua->pint32_val; + } + goto try_again; + case 7: + /* Pool or Bootstrap depending on JobType */ + if (jcr->getJobType() == JT_BACKUP || + jcr->getJobType() == JT_COPY || + jcr->getJobType() == JT_MIGRATE || + jcr->getJobType() == JT_VERIFY) { /* Pool */ + rc.pool = select_pool_resource(ua); + if (rc.pool) { + jcr->pool = rc.pool; + Dmsg1(100, "Set new pool=%s\n", jcr->pool->name()); + goto try_again; + } + break; + } + + /* Bootstrap */ + if (!get_cmd(ua, _("Please enter the Bootstrap file name: "))) { + break; + } + if (jcr->RestoreBootstrap) { + free(jcr->RestoreBootstrap); + jcr->RestoreBootstrap = NULL; + } + if (ua->cmd[0] != 0) { + jcr->RestoreBootstrap = bstrdup(ua->cmd); + fd = bfopen(jcr->RestoreBootstrap, "rb"); + if (!fd) { + berrno be; + ua->send_msg(_("Warning cannot open %s: ERR=%s\n"), + jcr->RestoreBootstrap, be.bstrerror()); + free(jcr->RestoreBootstrap); + jcr->RestoreBootstrap = NULL; + } else { + fclose(fd); + } + } + goto try_again; + case 8: + /* Specify Next Pool */ + if ((jcr->getJobType() == JT_BACKUP && /* Virtual full */ + jcr->is_JobLevel(L_VIRTUAL_FULL)) || + jcr->getJobType() == JT_COPY || + jcr->getJobType() == JT_MIGRATE) { + rc.next_pool = select_pool_resource(ua); + if (rc.next_pool) { + jcr->next_pool = rc.next_pool; + goto try_again; + } + } + /* Verify Job */ + if (jcr->getJobType() == JT_VERIFY) { + rc.verify_job = select_job_resource(ua); + if (rc.verify_job) { + jcr->verify_job = rc.verify_job; + } + goto try_again; + } + /* Where */ + if (!get_cmd(ua, _("Please enter the full path prefix for restore (/ for none): "))) { + break; + } + if (jcr->RegexWhere) { /* cannot use regexwhere and where */ + free(jcr->RegexWhere); + jcr->RegexWhere = NULL; + } + if (jcr->where) { + free(jcr->where); + jcr->where = NULL; + } + if (IsPathSeparator(ua->cmd[0]) && ua->cmd[1] == '\0') { + ua->cmd[0] = 0; + } + jcr->where = bstrdup(ua->cmd); + goto try_again; + case 9: + /* File relocation */ + select_where_regexp(ua, jcr); + goto try_again; + case 10: + /* Replace */ + start_prompt(ua, _("Replace:\n")); + for (i=0; ReplaceOptions[i].name; i++) { + add_prompt(ua, ReplaceOptions[i].name); + } + opt = do_prompt(ua, "", _("Select replace option"), NULL, 0); + if (opt >= 0) { + rc.replace = ReplaceOptions[opt].name; + jcr->replace = ReplaceOptions[opt].token; + } + goto try_again; + case 11: + /* JobId */ + rc.jid = NULL; /* force reprompt */ + jcr->RestoreJobId = 0; + if (jcr->RestoreBootstrap) { + ua->send_msg(_("You must set the bootstrap file to NULL to be able to specify a JobId.\n")); + } + goto try_again; + case 12: + + if (jcr->getJobType() == JT_RESTORE) { + plugin_config(ua, jcr, rc); + + } else { + //generate_plugin_event(jcr, bEventJobConfig, &rc); + + /* Plugin Options */ + if (!get_cmd(ua, _("Please Plugin Options string: "))) { + break; + } + if (jcr->plugin_options) { + free(jcr->plugin_options); + jcr->plugin_options = NULL; + } + jcr->plugin_options = bstrdup(ua->cmd); + } + goto try_again; + case -1: /* error or cancel */ + goto bail_out; + default: + goto try_again; + } + goto bail_out; + } + return 1; + +bail_out: + return -1; +try_again: + return 0; +} + + +/* Not a good idea to start a job with the Scratch pool. It creates all kind + * of recycling issues while the job is running. See Mantis #303 + */ +bool check_pool(int32_t JobType, int32_t JobLevel, POOL *pool, POOL *next_pool, + const char **name) +{ + if (JobType == JT_BACKUP) { + if (pool && strcmp(pool->name(), NT_("Scratch")) == 0) { + *name = NT_("Pool"); + return false; + } + } + /* The NextPool should also not be a Scratch pool */ + if (JobType == JT_MIGRATE || JobType == JT_COPY || + (JobType == JT_BACKUP && JobLevel == L_VIRTUAL_FULL)) { + if (next_pool && strcmp(next_pool->name(), NT_("Scratch")) == 0) { + *name = NT_("NextPool"); + return false; + } + } + return true; +} + +/* + * Put the run context that we have at this point into the JCR. + * That allows us to re-ask for the run context. + * This subroutine can be called multiple times, so it + * must keep any prior settings. + */ +static bool set_run_context_in_jcr(UAContext *ua, JCR *jcr, run_ctx &rc) +{ + int i; + + jcr->verify_job = rc.verify_job; + jcr->previous_job = rc.previous_job; + jcr->pool = rc.pool; + jcr->next_pool = rc.next_pool; + if (rc.next_pool) { + jcr->cmdline_next_pool_override = true; + } + if (rc.pool_name) { + pm_strcpy(jcr->pool_source, _("Command input")); + } else if (jcr->pool != jcr->job->pool) { + pm_strcpy(jcr->pool_source, _("User input")); + } + if (rc.next_pool_name) { + pm_strcpy(jcr->next_pool_source, _("Command input")); + } else if (jcr->next_pool == jcr->job->next_pool) { + pm_strcpy(jcr->next_pool_source, _("Job resource")); + } else if (jcr->next_pool != jcr->pool->NextPool) { + pm_strcpy(jcr->next_pool_source, _("User input")); + } + + set_rwstorage(jcr, rc.store); + jcr->client = rc.client; + if (jcr->client) { + pm_strcpy(jcr->client_name, rc.client->name()); + } else { + pm_strcpy(jcr->client_name, "**Dummy**"); + } + if (rc.media_type) { + if (!jcr->media_type) { + jcr->media_type = get_pool_memory(PM_NAME); + } + pm_strcpy(jcr->media_type, rc.media_type); + } + jcr->fileset = rc.fileset; + jcr->ExpectedFiles = rc.files; + if (rc.catalog) { + jcr->catalog = rc.catalog; + pm_strcpy(jcr->catalog_source, _("User input")); + } + + pm_strcpy(jcr->comment, rc.comment); + + if (rc.where) { + if (jcr->where) { + free(jcr->where); + } + jcr->where = bstrdup(rc.where); + rc.where = NULL; + } + + if (rc.regexwhere) { + if (jcr->RegexWhere) { + free(jcr->RegexWhere); + } + jcr->RegexWhere = bstrdup(rc.regexwhere); + rc.regexwhere = NULL; + } + + if (rc.when) { + utime_t duration; + jcr->sched_time = str_to_utime(rc.when); + if (jcr->sched_time == 0) { + if (duration_to_utime(rc.when, &duration)) { + jcr->sched_time = time(NULL) + duration; + } else { + ua->send_msg(_("Invalid time, using current time.\n")); + jcr->sched_time = time(NULL); + } + } + rc.when = NULL; + } + + if (rc.bootstrap) { + if (jcr->RestoreBootstrap) { + free(jcr->RestoreBootstrap); + } + jcr->RestoreBootstrap = bstrdup(rc.bootstrap); + rc.bootstrap = NULL; + } + + if (rc.plugin_options) { + if (jcr->plugin_options) { + free(jcr->plugin_options); + } + jcr->plugin_options = bstrdup(rc.plugin_options); + rc.plugin_options = NULL; + } + + if (rc.plugin_config) { + if (jcr->plugin_config) { + free_plugin_config_items(jcr->plugin_config); + delete jcr->plugin_config; + } + jcr->plugin_config = rc.plugin_config; + rc.plugin_config = NULL; + } + + if (rc.replace) { + jcr->replace = 0; + for (i=0; ReplaceOptions[i].name; i++) { + if (strcasecmp(rc.replace, ReplaceOptions[i].name) == 0) { + jcr->replace = ReplaceOptions[i].token; + } + } + if (!jcr->replace) { + ua->send_msg(_("Invalid replace option: %s\n"), rc.replace); + return false; + } + } else if (rc.job->replace) { + jcr->replace = rc.job->replace; + } else { + jcr->replace = REPLACE_ALWAYS; + } + rc.replace = NULL; + + /* Set Snapshot Retention (Job <- Client) */ + if (jcr->client) { + jcr->snapshot_retention = jcr->client->SnapRetention; + } + if (jcr->job && jcr->job->SnapRetention > 0) { + jcr->snapshot_retention = jcr->job->SnapRetention; + } + + if (rc.Priority) { + jcr->JobPriority = rc.Priority; + rc.Priority = 0; + } + + if (rc.since) { + if (!jcr->stime) { + jcr->stime = get_pool_memory(PM_MESSAGE); + } + pm_strcpy(jcr->stime, rc.since); + rc.since = NULL; + } + + if (rc.cloned) { + jcr->cloned = rc.cloned; + rc.cloned = false; + } + + /* If pool changed, update migration write storage */ + if (jcr->is_JobType(JT_MIGRATE) || jcr->is_JobType(JT_COPY) || + (jcr->is_JobType(JT_BACKUP) && jcr->is_JobLevel(L_VIRTUAL_FULL))) { + if (!set_mac_wstorage(ua, jcr, rc.pool, rc.next_pool, + jcr->next_pool_source)) { + return false; + } + } + rc.replace = ReplaceOptions[0].name; + for (i=0; ReplaceOptions[i].name; i++) { + if (ReplaceOptions[i].token == (int)jcr->replace) { + rc.replace = ReplaceOptions[i].name; + } + } + if (rc.level_name) { + if (!get_level_from_name(jcr, rc.level_name)) { + ua->send_msg(_("Level \"%s\" not valid.\n"), rc.level_name); + return false; + } + rc.level_name = NULL; + } + if (rc.jid) { + /* Note, this is also MigrateJobId and a VerifyJobId */ + jcr->RestoreJobId = str_to_int64(rc.jid); + + /* Copy also this parameter for VirtualFull in jcr->JobIds */ + if (!jcr->JobIds) { + jcr->JobIds = get_pool_memory(PM_FNAME); + } + pm_strcpy(jcr->JobIds, rc.jid); + jcr->use_all_JobIds = rc.alljobid; /* if we found the "alljobid=" kw */ + rc.alljobid = false; + rc.jid = 0; + } + + /* Some options are not available through the menu + * TODO: Add an advanced menu? + */ + if (rc.spool_data_set) { + jcr->spool_data = rc.spool_data; + } + + if (rc.accurate_set) { + jcr->accurate = rc.accurate; + } + + /* Used by migration jobs that can have the same name, + * but can run at the same time + */ + if (rc.ignoreduplicatecheck_set) { + jcr->IgnoreDuplicateJobChecking = rc.ignoreduplicatecheck; + } + + /* Do not start a Backup job from the Scratch Pool */ + const char *name; + if (!check_pool(jcr->getJobType(), jcr->getJobLevel(), + rc.pool, rc.next_pool, &name)) { + ua->send_msg(_("%s \"Scratch\" not valid in Job \"%s\".\n"), + name, rc.job->name()); + return false; + } + + return true; +} + +static void select_where_regexp(UAContext *ua, JCR *jcr) +{ + alist *regs; + char *strip_prefix, *add_prefix, *add_suffix, *rwhere; + strip_prefix = add_suffix = rwhere = add_prefix = NULL; + +try_again_reg: + ua->send_msg(_("strip_prefix=%s add_prefix=%s add_suffix=%s\n"), + NPRT(strip_prefix), NPRT(add_prefix), NPRT(add_suffix)); + + start_prompt(ua, _("This will replace your current Where value\n")); + add_prompt(ua, _("Strip prefix")); /* 0 */ + add_prompt(ua, _("Add prefix")); /* 1 */ + add_prompt(ua, _("Add file suffix")); /* 2 */ + add_prompt(ua, _("Enter a regexp")); /* 3 */ + add_prompt(ua, _("Test filename manipulation")); /* 4 */ + add_prompt(ua, _("Use this ?")); /* 5 */ + + switch (do_prompt(ua, "", _("Select parameter to modify"), NULL, 0)) { + case 0: + /* Strip prefix */ + if (get_cmd(ua, _("Please enter the path prefix to strip: "))) { + if (strip_prefix) bfree(strip_prefix); + strip_prefix = bstrdup(ua->cmd); + } + + goto try_again_reg; + case 1: + /* Add prefix */ + if (get_cmd(ua, _("Please enter the path prefix to add (/ for none): "))) { + if (IsPathSeparator(ua->cmd[0]) && ua->cmd[1] == '\0') { + ua->cmd[0] = 0; + } + + if (add_prefix) bfree(add_prefix); + add_prefix = bstrdup(ua->cmd); + } + goto try_again_reg; + case 2: + /* Add suffix */ + if (get_cmd(ua, _("Please enter the file suffix to add: "))) { + if (add_suffix) bfree(add_suffix); + add_suffix = bstrdup(ua->cmd); + } + goto try_again_reg; + case 3: + /* Add rwhere */ + if (get_cmd(ua, _("Please enter a valid regexp (!from!to!): "))) { + if (rwhere) bfree(rwhere); + rwhere = bstrdup(ua->cmd); + } + + goto try_again_reg; + case 4: + /* Test regexp */ + char *result; + char *regexp; + + if (rwhere && rwhere[0] != '\0') { + regs = get_bregexps(rwhere); + ua->send_msg(_("regexwhere=%s\n"), NPRT(rwhere)); + } else { + int len = bregexp_get_build_where_size(strip_prefix, add_prefix, add_suffix); + regexp = (char *) bmalloc (len * sizeof(char)); + bregexp_build_where(regexp, len, strip_prefix, add_prefix, add_suffix); + regs = get_bregexps(regexp); + ua->send_msg(_("strip_prefix=%s add_prefix=%s add_suffix=%s result=%s\n"), + NPRT(strip_prefix), NPRT(add_prefix), NPRT(add_suffix), NPRT(regexp)); + + bfree(regexp); + } + + if (!regs) { + ua->send_msg(_("Cannot use your regexp\n")); + goto try_again_reg; + } + ua->send_msg(_("Enter a period (.) to stop this test\n")); + while (get_cmd(ua, _("Please enter filename to test: "))) { + apply_bregexps(ua->cmd, regs, &result); + ua->send_msg(_("%s -> %s\n"), ua->cmd, result); + } + free_bregexps(regs); + delete regs; + goto try_again_reg; + + case 5: + /* OK */ + break; + case -1: /* error or cancel */ + goto bail_out_reg; + default: + goto try_again_reg; + } + + /* replace the existing where */ + if (jcr->where) { + bfree(jcr->where); + jcr->where = NULL; + } + + /* replace the existing regexwhere */ + if (jcr->RegexWhere) { + bfree(jcr->RegexWhere); + jcr->RegexWhere = NULL; + } + + if (rwhere) { + jcr->RegexWhere = bstrdup(rwhere); + } else if (strip_prefix || add_prefix || add_suffix) { + int len = bregexp_get_build_where_size(strip_prefix, add_prefix, add_suffix); + jcr->RegexWhere = (char *) bmalloc(len*sizeof(char)); + bregexp_build_where(jcr->RegexWhere, len, strip_prefix, add_prefix, add_suffix); + } + + regs = get_bregexps(jcr->RegexWhere); + if (regs) { + free_bregexps(regs); + delete regs; + } else { + if (jcr->RegexWhere) { + bfree(jcr->RegexWhere); + jcr->RegexWhere = NULL; + } + ua->send_msg(_("Cannot use your regexp.\n")); + } + +bail_out_reg: + if (strip_prefix) bfree(strip_prefix); + if (add_prefix) bfree(add_prefix); + if (add_suffix) bfree(add_suffix); + if (rwhere) bfree(rwhere); +} + +static void select_job_level(UAContext *ua, JCR *jcr) +{ + if (jcr->getJobType() == JT_BACKUP) { + start_prompt(ua, _("Levels:\n")); +// add_prompt(ua, _("Base")); + add_prompt(ua, _("Full")); + add_prompt(ua, _("Incremental")); + add_prompt(ua, _("Differential")); + add_prompt(ua, _("Since")); + add_prompt(ua, _("VirtualFull")); + switch (do_prompt(ua, "", _("Select level"), NULL, 0)) { +// case 0: +// jcr->JobLevel = L_BASE; +// break; + case 0: + jcr->setJobLevel(L_FULL); + break; + case 1: + jcr->setJobLevel(L_INCREMENTAL); + break; + case 2: + jcr->setJobLevel(L_DIFFERENTIAL); + break; + case 3: + jcr->setJobLevel(L_SINCE); + break; + case 4: + jcr->setJobLevel(L_VIRTUAL_FULL); + break; + default: + break; + } + } else if (jcr->getJobType() == JT_VERIFY) { + start_prompt(ua, _("Levels:\n")); + add_prompt(ua, _("Initialize Catalog")); + add_prompt(ua, _("Verify Catalog")); + add_prompt(ua, _("Verify Volume to Catalog")); + add_prompt(ua, _("Verify Disk to Catalog")); + add_prompt(ua, _("Verify Volume Data")); + switch (do_prompt(ua, "", _("Select level"), NULL, 0)) { + case 0: + jcr->setJobLevel(L_VERIFY_INIT); + break; + case 1: + jcr->setJobLevel(L_VERIFY_CATALOG); + break; + case 2: + jcr->setJobLevel(L_VERIFY_VOLUME_TO_CATALOG); + break; + case 3: + jcr->setJobLevel(L_VERIFY_DISK_TO_CATALOG); + break; + case 4: + jcr->setJobLevel(L_VERIFY_DATA); + break; + default: + break; + } + } else { + ua->warning_msg(_("Level not appropriate for this Job. Cannot be changed.\n")); + } + return; +} + +static bool display_job_parameters(UAContext *ua, JCR *jcr, JOB *job, const char *verify_list, + char *jid, const char *replace, char *client_name) +{ + char ec1[30], edl[50]; + char dt[MAX_TIME_LENGTH]; + + Dmsg1(800, "JobType=%c\n", jcr->getJobType()); + switch (jcr->getJobType()) { + case JT_ADMIN: + if (ua->api) { + ua->signal(BNET_RUN_CMD); + ua->send_msg("Type: Admin\n" + "Title: Run Admin Job\n" + "JobName: %s\n" + "FileSet: %s\n" + "Client: %s\n" + "Storage: %s\n" + "When: %s\n" + "Priority: %d\n", + job->name(), + jcr->fileset->name(), + NPRT(jcr->client->name()), + jcr->wstore?jcr->wstore->name():"*None*", + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->JobPriority); + } else { + ua->send_msg(_("Run Admin Job\n" + "JobName: %s\n" + "FileSet: %s\n" + "Client: %s\n" + "Storage: %s\n" + "When: %s\n" + "Priority: %d\n"), + job->name(), + jcr->fileset->name(), + NPRT(jcr->client->name()), + jcr->wstore?jcr->wstore->name():"*None*", + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->JobPriority); + } + jcr->setJobLevel(L_FULL); + break; + case JT_BACKUP: + case JT_VERIFY: + char next_pool[MAX_NAME_LENGTH + 50]; + next_pool[0] = 0; + if (jcr->getJobType() == JT_BACKUP) { + if (ua->api) { + ua->signal(BNET_RUN_CMD); + if (jcr->is_JobLevel(L_VIRTUAL_FULL)) { + bsnprintf(next_pool, sizeof(next_pool), "NextPool: %s\n", + jcr->next_pool ? jcr->next_pool->name() : "*None*"); + } + ua->send_msg("Type: Backup\n" + "Title: Run Backup Job\n" + "JobName: %s\n" + "Level: %s\n" + "Client: %s\n" + "FileSet: %s\n" + "Pool: %s\n" + "%s" + "Storage: %s\n" + "When: %s\n" + "Priority: %d\n" + "%s%s%s", + job->name(), + level_to_str(edl, sizeof(edl), jcr->getJobLevel()), + jcr->client->name(), + jcr->fileset->name(), + NPRT(jcr->pool->name()), + next_pool, + jcr->wstore?jcr->wstore->name():"*None*", + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->JobPriority, + jcr->plugin_options?"Plugin Options: ":"", + jcr->plugin_options?jcr->plugin_options:"", + jcr->plugin_options?"\n":""); + } else { + if (jcr->is_JobLevel(L_VIRTUAL_FULL)) { + bsnprintf(next_pool, sizeof(next_pool), + "NextPool: %s (From %s)\n", + jcr->next_pool ? jcr->next_pool->name() : "*None*", + jcr->next_pool_source); + } + ua->send_msg(_("Run Backup job\n" + "JobName: %s\n" + "Level: %s\n" + "Client: %s\n" + "FileSet: %s\n" + "Pool: %s (From %s)\n" + "%s" + "Storage: %s (From %s)\n" + "When: %s\n" + "Priority: %d\n" + "%s%s%s"), + job->name(), + level_to_str(edl, sizeof(edl), jcr->getJobLevel()), + jcr->client->name(), + jcr->fileset->name(), + NPRT(jcr->pool->name()), jcr->pool_source, + next_pool, + jcr->wstore?jcr->wstore->name():"*None*", jcr->wstore_source, + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->JobPriority, + jcr->plugin_options?"Plugin Options: ":"", + jcr->plugin_options?jcr->plugin_options:"", + jcr->plugin_options?"\n":""); + } + } else { /* JT_VERIFY */ + JOB_DBR jr; + const char *Name; + if (jcr->verify_job) { + Name = jcr->verify_job->name(); + } else if (jcr->RestoreJobId) { /* Display job name if jobid requested */ + memset(&jr, 0, sizeof(jr)); + jr.JobId = jcr->RestoreJobId; + if (!db_get_job_record(jcr, ua->db, &jr)) { + ua->error_msg(_("Could not get job record for selected JobId. ERR=%s"), + db_strerror(ua->db)); + return false; + } + Name = jr.Job; + } else { + Name = ""; + } + if (!verify_list) { + verify_list = job->WriteVerifyList; + } + if (!verify_list) { + verify_list = ""; + } + if (ua->api) { + ua->signal(BNET_RUN_CMD); + ua->send_msg("Type: Verify\n" + "Title: Run Verify Job\n" + "JobName: %s\n" + "Level: %s\n" + "Client: %s\n" + "FileSet: %s\n" + "Pool: %s (From %s)\n" + "Storage: %s (From %s)\n" + "Verify Job: %s\n" + "Verify List: %s\n" + "When: %s\n" + "Priority: %d\n", + job->name(), + level_to_str(edl, sizeof(edl), jcr->getJobLevel()), + jcr->client->name(), + jcr->fileset->name(), + NPRT(jcr->pool->name()), jcr->pool_source, + jcr->rstore->name(), jcr->rstore_source, + Name, + verify_list, + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->JobPriority); + } else { + ua->send_msg(_("Run Verify Job\n" + "JobName: %s\n" + "Level: %s\n" + "Client: %s\n" + "FileSet: %s\n" + "Pool: %s (From %s)\n" + "Storage: %s (From %s)\n" + "Verify Job: %s\n" + "Verify List: %s\n" + "When: %s\n" + "Priority: %d\n"), + job->name(), + level_to_str(edl, sizeof(edl), jcr->getJobLevel()), + jcr->client->name(), + jcr->fileset->name(), + NPRT(jcr->pool->name()), jcr->pool_source, + jcr->rstore->name(), jcr->rstore_source, + Name, + verify_list, + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->JobPriority); + } + } + break; + case JT_RESTORE: + if (jcr->RestoreJobId == 0 && !jcr->RestoreBootstrap) { + if (jid) { + jcr->RestoreJobId = str_to_int64(jid); + } else { + if (!get_pint(ua, _("Please enter a JobId for restore: "))) { + return false; + } + jcr->RestoreJobId = ua->int64_val; + } + } + jcr->setJobLevel(L_FULL); /* default level */ + Dmsg1(800, "JobId to restore=%d\n", jcr->RestoreJobId); + if (jcr->RestoreJobId == 0) { + /* RegexWhere is take before RestoreWhere */ + if (jcr->RegexWhere || (job->RegexWhere && !jcr->where)) { + if (ua->api) { + ua->signal(BNET_RUN_CMD); + ua->send_msg("Type: Restore\n" + "Title: Run Restore Job\n" + "JobName: %s\n" + "Bootstrap: %s\n" + "RegexWhere: %s\n" + "Replace: %s\n" + "FileSet: %s\n" + "Backup Client: %s\n" + "Restore Client: %s\n" + "Storage: %s\n" + "When: %s\n" + "Catalog: %s\n" + "Priority: %d\n" + "Plugin Options: %s\n", + job->name(), + NPRT(jcr->RestoreBootstrap), + jcr->RegexWhere?jcr->RegexWhere:job->RegexWhere, + replace, + jcr->fileset->name(), + client_name, + jcr->client->name(), + jcr->rstore->name(), + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->catalog->name(), + jcr->JobPriority, + (jcr->plugin_config && jcr->plugin_config->size() > 0) ? + _("User specified") : _("*None*")); + } else { + ua->send_msg(_("Run Restore job\n" + "JobName: %s\n" + "Bootstrap: %s\n" + "RegexWhere: %s\n" + "Replace: %s\n" + "FileSet: %s\n" + "Backup Client: %s\n" + "Restore Client: %s\n" + "Storage: %s\n" + "When: %s\n" + "Catalog: %s\n" + "Priority: %d\n" + "Plugin Options: %s\n"), + job->name(), + NPRT(jcr->RestoreBootstrap), + jcr->RegexWhere?jcr->RegexWhere:job->RegexWhere, + replace, + jcr->fileset->name(), + client_name, + jcr->client->name(), + jcr->rstore->name(), + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->catalog->name(), + jcr->JobPriority, + (jcr->plugin_config && jcr->plugin_config->size() > 0) ? + _("User specified") : _("*None*")); + } + } else { + if (ua->api) { + ua->signal(BNET_RUN_CMD); + ua->send_msg("Type: Restore\n" + "Title: Run Restore job\n" + "JobName: %s\n" + "Bootstrap: %s\n" + "Where: %s\n" + "Replace: %s\n" + "FileSet: %s\n" + "Backup Client: %s\n" + "Restore Client: %s\n" + "Storage: %s\n" + "When: %s\n" + "Catalog: %s\n" + "Priority: %d\n" + "Plugin Options: %s\n", + job->name(), + NPRT(jcr->RestoreBootstrap), + jcr->where?jcr->where:NPRT(job->RestoreWhere), + replace, + jcr->fileset->name(), + client_name, + jcr->client->name(), + jcr->rstore->name(), + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->catalog->name(), + jcr->JobPriority, + (jcr->plugin_config && jcr->plugin_config->size() > 0) ? + _("User specified") : _("*None*")); + } else { + ua->send_msg(_("Run Restore job\n" + "JobName: %s\n" + "Bootstrap: %s\n" + "Where: %s\n" + "Replace: %s\n" + "FileSet: %s\n" + "Backup Client: %s\n" + "Restore Client: %s\n" + "Storage: %s\n" + "When: %s\n" + "Catalog: %s\n" + "Priority: %d\n" + "Plugin Options: %s\n"), + job->name(), + NPRT(jcr->RestoreBootstrap), + jcr->where?jcr->where:NPRT(job->RestoreWhere), + replace, + jcr->fileset->name(), + client_name, + jcr->client->name(), + jcr->rstore->name(), + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->catalog->name(), + jcr->JobPriority, + (jcr->plugin_config && jcr->plugin_config->size() > 0) ? + _("User specified") : _("*None*")); + } + } + + } else { + /* ***FIXME*** This needs to be fixed for bat */ + if (ua->api) ua->signal(BNET_RUN_CMD); + ua->send_msg(_("Run Restore job\n" + "JobName: %s\n" + "Bootstrap: %s\n"), + job->name(), + NPRT(jcr->RestoreBootstrap)); + + /* RegexWhere is take before RestoreWhere */ + if (jcr->RegexWhere || (job->RegexWhere && !jcr->where)) { + ua->send_msg(_("RegexWhere: %s\n"), + jcr->RegexWhere?jcr->RegexWhere:job->RegexWhere); + } else { + ua->send_msg(_("Where: %s\n"), + jcr->where?jcr->where:NPRT(job->RestoreWhere)); + } + + ua->send_msg(_("Replace: %s\n" + "Client: %s\n" + "Storage: %s\n" + "JobId: %s\n" + "When: %s\n" + "Catalog: %s\n" + "Priority: %d\n" + "Plugin Options: %s\n"), + replace, + jcr->client->name(), + jcr->rstore->name(), + jcr->RestoreJobId==0?"*None*":edit_uint64(jcr->RestoreJobId, ec1), + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->catalog->name(), + jcr->JobPriority, + (jcr->plugin_config && jcr->plugin_config->size() > 0) ? + _("User specified") : _("*None*")); + } + break; + case JT_COPY: + case JT_MIGRATE: + const char *prt_type; + jcr->setJobLevel(L_FULL); /* default level */ + if (ua->api) { + ua->signal(BNET_RUN_CMD); + if (jcr->getJobType() == JT_COPY) { + prt_type = (char *)"Type: Copy\nTitle: Run Copy Job\n"; + } else { + prt_type = (char *)"Type: Migration\nTitle: Run Migration Job\n"; + } + ua->send_msg("%s" + "JobName: %s\n" + "Bootstrap: %s\n" + "Client: %s\n" + "FileSet: %s\n" + "Pool: %s\n" + "NextPool: %s\n" + "Read Storage: %s\n" + "Write Storage: %s\n" + "JobId: %s\n" + "When: %s\n" + "Catalog: %s\n" + "Priority: %d\n", + prt_type, + job->name(), + NPRT(jcr->RestoreBootstrap), + jcr->client->name(), + jcr->fileset->name(), + NPRT(jcr->pool->name()), + jcr->next_pool?jcr->next_pool->name():"*None*", + jcr->rstore->name(), + jcr->wstore?jcr->wstore->name():"*None*", + jcr->MigrateJobId==0?"*None*":edit_uint64(jcr->MigrateJobId, ec1), + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->catalog->name(), + jcr->JobPriority); + } else { + if (jcr->getJobType() == JT_COPY) { + prt_type = _("Run Copy job\n"); + } else { + prt_type = _("Run Migration job\n"); + } + ua->send_msg("%s" + "JobName: %s\n" + "Bootstrap: %s\n" + "Client: %s\n" + "FileSet: %s\n" + "Pool: %s (From %s)\n" + "NextPool: %s (From %s)\n" + "Read Storage: %s (From %s)\n" + "Write Storage: %s (From %s)\n" + "JobId: %s\n" + "When: %s\n" + "Catalog: %s\n" + "Priority: %d\n", + prt_type, + job->name(), + NPRT(jcr->RestoreBootstrap), + jcr->client->name(), + jcr->fileset->name(), + NPRT(jcr->pool->name()), jcr->pool_source, + jcr->next_pool?jcr->next_pool->name():"*None*", + NPRT(jcr->next_pool_source), + jcr->rstore->name(), jcr->rstore_source, + jcr->wstore?jcr->wstore->name():"*None*", jcr->wstore_source, + jcr->MigrateJobId==0?"*None*":edit_uint64(jcr->MigrateJobId, ec1), + bstrutime(dt, sizeof(dt), jcr->sched_time), + jcr->catalog->name(), + jcr->JobPriority); + } + break; + default: + ua->error_msg(_("Unknown Job Type=%d\n"), jcr->getJobType()); + return false; + } + return true; +} + + +static bool scan_run_command_line_arguments(UAContext *ua, run_ctx &rc) +{ + bool kw_ok; + int i, j; + static const char *kw[] = { /* command line arguments */ + "alljobid", /* 0 Used in a switch() */ + "jobid", /* 1 */ + "client", /* 2 */ + "fd", + "fileset", /* 4 */ + "level", /* 5 */ + "storage", /* 6 */ + "sd", /* 7 */ + "regexwhere", /* 8 where string as a bregexp */ + "where", /* 9 */ + "bootstrap", /* 10 */ + "replace", /* 11 */ + "when", /* 12 */ + "priority", /* 13 */ + "yes", /* 14 -- if you change this change YES_POS too */ + "verifyjob", /* 15 */ + "files", /* 16 number of files to restore */ + "catalog", /* 17 override catalog */ + "since", /* 18 since */ + "cloned", /* 19 cloned */ + "verifylist", /* 20 verify output list */ + "migrationjob", /* 21 migration job name */ + "pool", /* 22 */ + "backupclient", /* 23 */ + "restoreclient", /* 24 */ + "pluginoptions", /* 25 */ + "spooldata", /* 26 */ + "comment", /* 27 */ + "ignoreduplicatecheck", /* 28 */ + "accurate", /* 29 */ + "job", /* 30 */ + "mediatype", /* 31 */ + "nextpool", /* 32 override next pool name */ + "fdcalled", /* 33 */ + + NULL}; + +#define YES_POS 14 + + rc.catalog_name = NULL; + rc.job_name = NULL; + rc.pool_name = NULL; + rc.next_pool_name = NULL; + rc.store_name = NULL; + rc.client_name = NULL; + rc.media_type = NULL; + rc.restore_client_name = NULL; + rc.fileset_name = NULL; + rc.verify_job_name = NULL; + rc.previous_job_name = NULL; + rc.accurate_set = false; + rc.spool_data_set = false; + rc.ignoreduplicatecheck = false; + rc.comment = NULL; + free_plugin_config_items(rc.plugin_config); + + for (i=1; iargc; i++) { + Dmsg2(800, "Doing arg %d = %s\n", i, ua->argk[i]); + kw_ok = false; + /* Keep looking until we find a good keyword */ + for (j=0; !kw_ok && kw[j]; j++) { + if (strcasecmp(ua->argk[i], kw[j]) == 0) { + /* Note, yes and run have no value, so do not fail */ + if (!ua->argv[i] && j != YES_POS /*yes*/) { + ua->send_msg(_("Value missing for keyword %s\n"), ua->argk[i]); + return false; + } + Dmsg2(800, "Got j=%d keyword=%s\n", j, NPRT(kw[j])); + switch (j) { + case 0: /* alljobid */ + rc.alljobid = true; + /* Fall through wanted */ + case 1: /* JobId */ + if (rc.jid && !rc.mod) { + ua->send_msg(_("JobId specified twice.\n")); + return false; + } + rc.jid = ua->argv[i]; + kw_ok = true; + break; + case 2: /* client */ + case 3: /* fd */ + if (rc.client_name) { + ua->send_msg(_("Client specified twice.\n")); + return false; + } + rc.client_name = ua->argv[i]; + kw_ok = true; + break; + case 4: /* fileset */ + if (rc.fileset_name) { + ua->send_msg(_("FileSet specified twice.\n")); + return false; + } + rc.fileset_name = ua->argv[i]; + kw_ok = true; + break; + case 5: /* level */ + if (rc.level_name) { + ua->send_msg(_("Level specified twice.\n")); + return false; + } + rc.level_name = ua->argv[i]; + kw_ok = true; + break; + case 6: /* storage */ + case 7: /* sd */ + if (rc.store_name) { + ua->send_msg(_("Storage specified twice.\n")); + return false; + } + rc.store_name = ua->argv[i]; + kw_ok = true; + break; + case 8: /* regexwhere */ + if ((rc.regexwhere || rc.where) && !rc.mod) { + ua->send_msg(_("RegexWhere or Where specified twice.\n")); + return false; + } + rc.regexwhere = ua->argv[i]; + if (!acl_access_ok(ua, Where_ACL, rc.regexwhere)) { + ua->send_msg(_("No authorization for \"regexwhere\" specification.\n")); + return false; + } + kw_ok = true; + break; + case 9: /* where */ + if ((rc.where || rc.regexwhere) && !rc.mod) { + ua->send_msg(_("Where or RegexWhere specified twice.\n")); + return false; + } + rc.where = ua->argv[i]; + if (!acl_access_ok(ua, Where_ACL, rc.where)) { + ua->send_msg(_("No authoriztion for \"where\" specification.\n")); + return false; + } + kw_ok = true; + break; + case 10: /* bootstrap */ + if (rc.bootstrap && !rc.mod) { + ua->send_msg(_("Bootstrap specified twice.\n")); + return false; + } + rc.bootstrap = ua->argv[i]; + kw_ok = true; + break; + case 11: /* replace */ + if (rc.replace && !rc.mod) { + ua->send_msg(_("Replace specified twice.\n")); + return false; + } + rc.replace = ua->argv[i]; + kw_ok = true; + break; + case 12: /* When */ + if (rc.when && !rc.mod) { + ua->send_msg(_("When specified twice.\n")); + return false; + } + rc.when = ua->argv[i]; + kw_ok = true; + break; + case 13: /* Priority */ + if (rc.Priority && !rc.mod) { + ua->send_msg(_("Priority specified twice.\n")); + return false; + } + rc.Priority = atoi(ua->argv[i]); + if (rc.Priority <= 0) { + ua->send_msg(_("Priority must be positive nonzero setting it to 10.\n")); + rc.Priority = 10; + } + kw_ok = true; + break; + case 14: /* yes */ + kw_ok = true; + break; + case 15: /* Verify Job */ + if (rc.verify_job_name) { + ua->send_msg(_("Verify Job specified twice.\n")); + return false; + } + rc.verify_job_name = ua->argv[i]; + kw_ok = true; + break; + case 16: /* files */ + rc.files = atoi(ua->argv[i]); + kw_ok = true; + break; + case 17: /* catalog */ + rc.catalog_name = ua->argv[i]; + kw_ok = true; + break; + case 18: /* since */ + rc.since = ua->argv[i]; + kw_ok = true; + break; + case 19: /* cloned */ + rc. cloned = true; + kw_ok = true; + break; + case 20: /* write verify list output */ + rc.verify_list = ua->argv[i]; + kw_ok = true; + break; + case 21: /* Migration Job */ + if (rc.previous_job_name) { + ua->send_msg(_("Migration Job specified twice.\n")); + return false; + } + rc.previous_job_name = ua->argv[i]; + kw_ok = true; + break; + case 22: /* pool */ + if (rc.pool_name) { + ua->send_msg(_("Pool specified twice.\n")); + return false; + } + rc.pool_name = ua->argv[i]; + kw_ok = true; + break; + case 23: /* backupclient */ + if (rc.client_name) { + ua->send_msg(_("Client specified twice.\n")); + return 0; + } + rc.client_name = ua->argv[i]; + kw_ok = true; + break; + case 24: /* restoreclient */ + if (rc.restore_client_name && !rc.mod) { + ua->send_msg(_("Restore Client specified twice.\n")); + return false; + } + rc.restore_client_name = ua->argv[i]; + kw_ok = true; + break; + case 25: /* pluginoptions */ + ua->send_msg(_("Plugin Options not yet implemented.\n")); + return false; + if (rc.plugin_options) { + ua->send_msg(_("Plugin Options specified twice.\n")); + return false; + } + rc.plugin_options = ua->argv[i]; + if (!acl_access_ok(ua, PluginOptions_ACL, rc.plugin_options)) { + ua->send_msg(_("No authoriztion for \"PluginOptions\" specification.\n")); + return false; + } + kw_ok = true; + break; + case 26: /* spooldata */ + if (rc.spool_data_set) { + ua->send_msg(_("Spool flag specified twice.\n")); + return false; + } + if (is_yesno(ua->argv[i], &rc.spool_data)) { + rc.spool_data_set = true; + kw_ok = true; + } else { + ua->send_msg(_("Invalid spooldata flag.\n")); + } + break; + case 27: /* comment */ + rc.comment = ua->argv[i]; + kw_ok = true; + break; + case 28: /* ignoreduplicatecheck */ + if (rc.ignoreduplicatecheck_set) { + ua->send_msg(_("IgnoreDuplicateCheck flag specified twice.\n")); + return false; + } + if (is_yesno(ua->argv[i], &rc.ignoreduplicatecheck)) { + rc.ignoreduplicatecheck_set = true; + kw_ok = true; + } else { + ua->send_msg(_("Invalid ignoreduplicatecheck flag.\n")); + } + break; + case 29: /* accurate */ + if (rc.accurate_set) { + ua->send_msg(_("Accurate flag specified twice.\n")); + return false; + } + if (is_yesno(ua->argv[i], &rc.accurate)) { + rc.accurate_set = true; + kw_ok = true; + } else { + ua->send_msg(_("Invalid accurate flag.\n")); + } + break; + case 30: /* job */ + if (rc.job_name) { + ua->send_msg(_("Job name specified twice.\n")); + return false; + } + rc.job_name = ua->argv[i]; + kw_ok = true; + break; + case 31: /* mediatype */ + if (rc.media_type) { + ua->send_msg(_("Media Type specified twice.\n")); + return false; + } + rc.media_type = ua->argv[i]; + kw_ok = true; + break; + case 32: /* Next Pool */ + if (rc.next_pool_name) { + ua->send_msg(_("NextPool specified twice.\n")); + return false; + } + rc.next_pool_name = ua->argv[i]; + kw_ok = true; + break; + case 33: /* fdcalled */ + kw_ok = true; + break; + default: + break; + } + } /* end strcase compare */ + } /* end keyword loop */ + + /* + * End of keyword for loop -- if not found, we got a bogus keyword + */ + if (!kw_ok) { + Dmsg1(800, "%s not found\n", ua->argk[i]); + /* + * Special case for Job Name, it can be the first + * keyword that has no value. + */ + if (!rc.job_name && !ua->argv[i]) { + rc.job_name = ua->argk[i]; /* use keyword as job name */ + Dmsg1(800, "Set jobname=%s\n", rc.job_name); + } else { + ua->send_msg(_("Invalid keyword: %s\n"), ua->argk[i]); + return false; + } + } + } /* end argc loop */ + + Dmsg0(800, "Done scan.\n"); + if (rc.comment) { + if (!is_comment_legal(ua, rc.comment)) { + return false; + } + } + if (rc.catalog_name) { + rc.catalog = GetCatalogResWithName(rc.catalog_name); + if (rc.catalog == NULL) { + ua->error_msg(_("Catalog \"%s\" not found\n"), rc.catalog_name); + return false; + } + if (!acl_access_ok(ua, Catalog_ACL, rc.catalog->name())) { + ua->error_msg(_("No authorization. Catalog \"%s\".\n"), rc.catalog->name()); + return false; + } + } + Dmsg1(800, "Using catalog=%s\n", NPRT(rc.catalog_name)); + + if (!get_job(ua, rc)) { + return false; + } + + if (!get_pool(ua, rc)) { + return false; + } + + if (!get_next_pool(ua, rc)) { + return false; + } + + if (!get_storage(ua, rc)) { + return false; + } + + + if (!get_client(ua, rc)) { + return false; + } + + if (!get_fileset(ua, rc)) { + return false; + } + + if (rc.verify_job_name) { + rc.verify_job = GetJobResWithName(rc.verify_job_name); + if (!rc.verify_job) { + ua->send_msg(_("Verify Job \"%s\" not found.\n"), rc.verify_job_name); + rc.verify_job = select_job_resource(ua); + } + } else if (!rc.verify_job) { + rc.verify_job = rc.job->verify_job; + } + + if (rc.previous_job_name) { + rc.previous_job = GetJobResWithName(rc.previous_job_name); + if (!rc.previous_job) { + ua->send_msg(_("Migration Job \"%s\" not found.\n"), rc.previous_job_name); + rc.previous_job = select_job_resource(ua); + } + } else { + rc.previous_job = rc.job->verify_job; + } + return true; +} diff --git a/src/dird/ua_select.c b/src/dird/ua_select.c new file mode 100644 index 00000000..1122f12a --- /dev/null +++ b/src/dird/ua_select.c @@ -0,0 +1,1662 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- User Agent Prompt and Selection code + * + * Kern Sibbald, October MMI + * + */ + +#include "bacula.h" +#include "dird.h" + +/* Imported variables */ +extern struct s_jl joblevels[]; + +int confirm_retention_yesno(UAContext *ua, utime_t ret, const char *msg) +{ + char ed1[100]; + int val; + + /* Look for "yes" in command line */ + if (find_arg(ua, NT_("yes")) != -1) { + return 1; + } + + for ( ;; ) { + ua->info_msg(_("The current %s retention period is: %s\n"), + msg, edit_utime(ret, ed1, sizeof(ed1))); + if (!get_cmd(ua, _("Continue? (yes/no): "))) { + return 0; + } + if (is_yesno(ua->cmd, &val)) { + return val; /* is 1 for yes, 0 for no */ + } + } + return 1; +} + +/* + * Confirm a retention period + */ +int confirm_retention(UAContext *ua, utime_t *ret, const char *msg) +{ + char ed1[100]; + int val; + + /* Look for "yes" in command line */ + if (find_arg(ua, NT_("yes")) != -1) { + return 1; + } + + for ( ;; ) { + ua->info_msg(_("The current %s retention period is: %s\n"), + msg, edit_utime(*ret, ed1, sizeof(ed1))); + + if (!get_cmd(ua, _("Continue? (yes/mod/no): "))) { + return 0; + } + if (strcasecmp(ua->cmd, _("mod")) == 0) { + if (!get_cmd(ua, _("Enter new retention period: "))) { + return 0; + } + if (!duration_to_utime(ua->cmd, ret)) { + ua->error_msg(_("Invalid period.\n")); + continue; + } + continue; + } + if (is_yesno(ua->cmd, &val)) { + return val; /* is 1 for yes, 0 for no */ + } + } + return 1; +} + +/* + * Given a list of keywords, find the first one + * that is in the argument list. + * Returns: -1 if not found + * index into list (base 0) on success + */ +int find_arg_keyword(UAContext *ua, const char **list) +{ + for (int i=1; iargc; i++) { + for(int j=0; list[j]; j++) { + if (strcasecmp(list[j], ua->argk[i]) == 0) { + return j; + } + } + } + return -1; +} + +/* + * Given one keyword, find the first one that + * is in the argument list. + * Returns: argk index (always gt 0) + * -1 if not found + */ +int find_arg(UAContext *ua, const char *keyword) +{ + for (int i=1; iargc; i++) { + if (strcasecmp(keyword, ua->argk[i]) == 0) { + return i; + } + } + return -1; +} + +/* + * Given a single keyword, find it in the argument list, but + * it must have a value + * Returns: -1 if not found or no value + * list index (base 0) on success + */ +int find_arg_with_value(UAContext *ua, const char *keyword) +{ + for (int i=1; iargc; i++) { + if (strcasecmp(keyword, ua->argk[i]) == 0) { + if (ua->argv[i]) { + return i; + } else { + return -1; + } + } + } + return -1; +} + +/* + * Given a list of keywords, prompt the user + * to choose one. + * + * Returns: -1 on failure + * index into list (base 0) on success + */ +int do_keyword_prompt(UAContext *ua, const char *msg, const char **list) +{ + int i; + start_prompt(ua, _("You have the following choices:\n")); + for (i=0; list[i]; i++) { + add_prompt(ua, list[i]); + } + return do_prompt(ua, "", msg, NULL, 0); +} + + +/* + * Select a Storage resource from prompt list + * If unique is set storage resources that have the main address are + * combined into one (i.e. they are all part of the same) + * storage. Note, not all commands want this. + */ +STORE *select_storage_resource(UAContext *ua, bool unique) +{ + POOL_MEM tmp; + char name[MAX_NAME_LENGTH]; + STORE *store; + + /* Does user want a full selection? */ + if (unique && find_arg(ua, NT_("select")) > 0) { + unique = false; + } + start_prompt(ua, _("The defined Storage resources are:\n")); + LockRes(); + foreach_res(store, R_STORAGE) { + if (store->is_enabled() && acl_access_ok(ua, Storage_ACL, store->name())) { + if (unique) { + Mmsg(tmp, "%s:%d", store->address, store->SDport); + add_prompt(ua, store->name(), tmp.c_str()); + } else { + add_prompt(ua, store->name()); + } + } + } + UnlockRes(); + if (do_prompt(ua, _("Storage"), _("Select Storage resource"), name, sizeof(name)) < 0) { + return NULL; + } + store = (STORE *)GetResWithName(R_STORAGE, name); + return store; +} + +/* + * Select a FileSet resource from prompt list + */ +FILESET *select_fileset_resource(UAContext *ua) +{ + char name[MAX_NAME_LENGTH]; + FILESET *fs; + + start_prompt(ua, _("The defined FileSet resources are:\n")); + LockRes(); + foreach_res(fs, R_FILESET) { + if (acl_access_ok(ua, FileSet_ACL, fs->name())) { + add_prompt(ua, fs->name()); + } + } + UnlockRes(); + if (do_prompt(ua, _("FileSet"), _("Select FileSet resource"), name, sizeof(name)) < 0) { + return NULL; + } + fs = (FILESET *)GetResWithName(R_FILESET, name); + return fs; +} + + +/* + * Get a catalog resource from prompt list + */ +CAT *get_catalog_resource(UAContext *ua) +{ + char name[MAX_NAME_LENGTH]; + CAT *catalog = NULL; + CLIENT *client = NULL; + int i; + + for (i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("catalog")) == 0 && ua->argv[i]) { + if (acl_access_ok(ua, Catalog_ACL, ua->argv[i])) { + catalog = (CAT *)GetResWithName(R_CATALOG, ua->argv[i]); + break; + } + } + if (strcasecmp(ua->argk[i], NT_("client")) == 0 && ua->argv[i]) { + if (acl_access_client_ok(ua, ua->argv[i], JT_BACKUP_RESTORE)) { + client = (CLIENT *)GetResWithName(R_CLIENT, ua->argv[i]); + break; + } + } + } + if (!catalog && client) { /* Try to take the catalog from the client */ + catalog = client->catalog; + } + if (ua->gui && !catalog) { + LockRes(); + catalog = (CAT *)GetNextRes(R_CATALOG, NULL); + UnlockRes(); + if (!catalog) { + ua->error_msg(_("Could not find a Catalog resource\n")); + return NULL; + } else if (!acl_access_ok(ua, Catalog_ACL, catalog->name())) { + ua->error_msg(_("You must specify a \"use \" command before continuing.\n")); + return NULL; + } + return catalog; + } + if (!catalog) { + start_prompt(ua, _("The defined Catalog resources are:\n")); + LockRes(); + foreach_res(catalog, R_CATALOG) { + if (acl_access_ok(ua, Catalog_ACL, catalog->name())) { + add_prompt(ua, catalog->name()); + } + } + UnlockRes(); + if (do_prompt(ua, _("Catalog"), _("Select Catalog resource"), name, sizeof(name)) < 0) { + return NULL; + } + catalog = (CAT *)GetResWithName(R_CATALOG, name); + } + return catalog; +} + + +/* + * Select a job to enable or disable + */ +JOB *select_enable_disable_job_resource(UAContext *ua, bool enable) +{ + char name[MAX_NAME_LENGTH]; + JOB *job; + + LockRes(); + if (enable) { + start_prompt(ua, _("The disabled Job resources are:\n")); + } else { + start_prompt(ua, _("The enabled Job resources are:\n")); + } + foreach_res(job, R_JOB) { + if (!acl_access_ok(ua, Job_ACL, job->name())) { + continue; + } + if (job->is_enabled() == enable) { /* Already enabled/disabled? */ + continue; /* yes, skip */ + } + add_prompt(ua, job->name()); + } + UnlockRes(); + if (do_prompt(ua, _("Job"), _("Select Job resource"), name, sizeof(name)) < 0) { + return NULL; + } + job = (JOB *)GetResWithName(R_JOB, name); + return job; +} + +/* + * Select a Job resource from prompt list + */ +JOB *select_job_resource(UAContext *ua) +{ + char name[MAX_NAME_LENGTH]; + JOB *job; + + start_prompt(ua, _("The defined Job resources are:\n")); + LockRes(); + foreach_res(job, R_JOB) { + if (job->is_enabled() && acl_access_ok(ua, Job_ACL, job->name())) { + add_prompt(ua, job->name()); + } + } + UnlockRes(); + if (do_prompt(ua, _("Job"), _("Select Job resource"), name, sizeof(name)) < 0) { + return NULL; + } + job = (JOB *)GetResWithName(R_JOB, name); + return job; +} + +/* + * Select a Restore Job resource from argument or prompt + */ +JOB *get_restore_job(UAContext *ua) +{ + JOB *job; + int i = find_arg_with_value(ua, "restorejob"); + if (i >= 0 && acl_access_ok(ua, Job_ACL, ua->argv[i])) { + job = (JOB *)GetResWithName(R_JOB, ua->argv[i]); + if (job && job->JobType == JT_RESTORE) { + return job; + } + ua->error_msg(_("Error: Restore Job resource \"%s\" does not exist.\n"), + ua->argv[i]); + } + return select_restore_job_resource(ua); +} + +/* + * Select a Restore Job resource from prompt list + */ +JOB *select_restore_job_resource(UAContext *ua) +{ + char name[MAX_NAME_LENGTH]; + JOB *job; + + start_prompt(ua, _("The defined Restore Job resources are:\n")); + LockRes(); + foreach_res(job, R_JOB) { + if (job->JobType == JT_RESTORE && job->is_enabled() && + acl_access_ok(ua, Job_ACL, job->name())) { + add_prompt(ua, job->name()); + } + } + UnlockRes(); + if (do_prompt(ua, _("Job"), _("Select Restore Job"), name, sizeof(name)) < 0) { + return NULL; + } + job = (JOB *)GetResWithName(R_JOB, name); + return job; +} + +/* + * Select a client to enable or disable + */ +CLIENT *select_enable_disable_client_resource(UAContext *ua, bool enable) +{ + char name[MAX_NAME_LENGTH]; + CLIENT *client; + + LockRes(); + start_prompt(ua, _("The defined Client resources are:\n")); + foreach_res(client, R_CLIENT) { + if (!acl_access_client_ok(ua, client->name(), JT_BACKUP_RESTORE)) { + continue; + } + if (client->is_enabled() == enable) { /* Already enabled/disabled? */ + continue; /* yes, skip */ + } + add_prompt(ua, client->name()); + } + UnlockRes(); + if (do_prompt(ua, _("Client"), _("Select Client resource"), name, sizeof(name)) < 0) { + return NULL; + } + client = (CLIENT *)GetResWithName(R_CLIENT, name); + return client; +} + + +/* + * Select a client resource from prompt list + */ +CLIENT *select_client_resource(UAContext *ua, int32_t jobtype) +{ + char name[MAX_NAME_LENGTH]; + CLIENT *client; + + start_prompt(ua, _("The defined Client resources are:\n")); + LockRes(); + foreach_res(client, R_CLIENT) { + if (client->is_enabled() && acl_access_client_ok(ua, client->name(), jobtype)) { + add_prompt(ua, client->name()); + } + } + UnlockRes(); + if (do_prompt(ua, _("Client"), _("Select Client (File daemon) resource"), name, sizeof(name)) < 0) { + return NULL; + } + client = (CLIENT *)GetResWithName(R_CLIENT, name); + return client; +} + +/* + * Get client resource, start by looking for + * client= + * if we don't find the keyword, we prompt the user. + */ +CLIENT *get_client_resource(UAContext *ua, int32_t jobtype) +{ + CLIENT *client = NULL; + int i; + + for (i=1; iargc; i++) { + if ((strcasecmp(ua->argk[i], NT_("client")) == 0 || + strcasecmp(ua->argk[i], NT_("fd")) == 0) && ua->argv[i]) { + if (!acl_access_client_ok(ua, ua->argv[i], jobtype)) { + break; + } + client = (CLIENT *)GetResWithName(R_CLIENT, ua->argv[i]); + if (client) { + return client; + } + ua->error_msg(_("Error: Client resource %s does not exist.\n"), ua->argv[i]); + break; + } + } + return select_client_resource(ua, jobtype); +} + +/* + * Select a schedule to enable or disable + */ +SCHED *select_enable_disable_schedule_resource(UAContext *ua, bool enable) +{ + char name[MAX_NAME_LENGTH]; + SCHED *sched; + + LockRes(); + start_prompt(ua, _("The defined Schedule resources are:\n")); + foreach_res(sched, R_SCHEDULE) { + if (!acl_access_ok(ua, Schedule_ACL, sched->name())) { + continue; + } + if (sched->is_enabled() == enable) { /* Already enabled/disabled? */ + continue; /* yes, skip */ + } + add_prompt(ua, sched->name()); + } + UnlockRes(); + if (do_prompt(ua, _("Schedule"), _("Select Schedule resource"), name, sizeof(name)) < 0) { + return NULL; + } + sched = (SCHED *)GetResWithName(R_SCHEDULE, name); + return sched; +} + + +/* Scan what the user has entered looking for: + * + * client= + * + * if error or not found, put up a list of client DBRs + * to choose from. + * + * returns: 0 on error + * 1 on success and fills in CLIENT_DBR + */ +bool get_client_dbr(UAContext *ua, CLIENT_DBR *cr, int32_t jobtype) +{ + int i; + + if (cr->Name[0]) { /* If name already supplied */ + if (db_get_client_record(ua->jcr, ua->db, cr)) { + return 1; + } + ua->error_msg(_("Could not find Client %s: ERR=%s"), cr->Name, db_strerror(ua->db)); + } + for (i=1; iargc; i++) { + if ((strcasecmp(ua->argk[i], NT_("client")) == 0 || + strcasecmp(ua->argk[i], NT_("fd")) == 0) && ua->argv[i]) { + if (!acl_access_client_ok(ua, ua->argv[i], jobtype)) { + break; + } + bstrncpy(cr->Name, ua->argv[i], sizeof(cr->Name)); + if (!db_get_client_record(ua->jcr, ua->db, cr)) { + ua->error_msg(_("Could not find Client \"%s\": ERR=%s"), ua->argv[i], + db_strerror(ua->db)); + cr->ClientId = 0; + break; + } + return 1; + } + } + if (!select_client_dbr(ua, cr, jobtype)) { /* try once more by proposing a list */ + return 0; + } + return 1; +} + +/* + * Select a Client record from the catalog + * Returns 1 on success + * 0 on failure + */ +bool select_client_dbr(UAContext *ua, CLIENT_DBR *cr, int32_t jobtype) +{ + CLIENT_DBR ocr; + char name[MAX_NAME_LENGTH]; + int num_clients, i; + uint32_t *ids; + + + cr->ClientId = 0; + if (!db_get_client_ids(ua->jcr, ua->db, &num_clients, &ids)) { + ua->error_msg(_("Error obtaining client ids. ERR=%s\n"), db_strerror(ua->db)); + return 0; + } + if (num_clients <= 0) { + ua->error_msg(_("No clients defined. You must run a job before using this command.\n")); + return 0; + } + + start_prompt(ua, _("Defined Clients:\n")); + for (i=0; i < num_clients; i++) { + ocr.ClientId = ids[i]; + if (!db_get_client_record(ua->jcr, ua->db, &ocr) || + !acl_access_client_ok(ua, ocr.Name, jobtype)) { + continue; + } + add_prompt(ua, ocr.Name); + } + free(ids); + if (do_prompt(ua, _("Client"), _("Select the Client"), name, sizeof(name)) < 0) { + return 0; + } + memset(&ocr, 0, sizeof(ocr)); + bstrncpy(ocr.Name, name, sizeof(ocr.Name)); + + if (!db_get_client_record(ua->jcr, ua->db, &ocr)) { + ua->error_msg(_("Could not find Client \"%s\": ERR=%s"), name, db_strerror(ua->db)); + return 0; + } + memcpy(cr, &ocr, sizeof(ocr)); + return 1; +} + +/* Scan what the user has entered looking for: + * + * argk= + * + * where argk can be : pool, recyclepool, scratchpool, nextpool etc.. + * + * if error or not found, put up a list of pool DBRs + * to choose from. + * + * returns: false on error + * true on success and fills in POOL_DBR + */ +bool get_pool_dbr(UAContext *ua, POOL_DBR *pr, const char *argk) +{ + if (pr->Name[0]) { /* If name already supplied */ + if (db_get_pool_numvols(ua->jcr, ua->db, pr) && + acl_access_ok(ua, Pool_ACL, pr->Name)) { + return true; + } + ua->error_msg(_("Could not find Pool \"%s\": ERR=%s"), pr->Name, db_strerror(ua->db)); + } + if (!select_pool_dbr(ua, pr, argk)) { /* try once more */ + return false; + } + return true; +} + +/* + * Select a Pool record from catalog + * argk can be pool, recyclepool, scratchpool etc.. + */ +bool select_pool_dbr(UAContext *ua, POOL_DBR *pr, const char *argk) +{ + POOL_DBR opr; + char name[MAX_NAME_LENGTH]; + int num_pools, i; + uint32_t *ids = NULL; + + for (i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], argk) == 0 && ua->argv[i] && + acl_access_ok(ua, Pool_ACL, ua->argv[i])) { + bstrncpy(pr->Name, ua->argv[i], sizeof(pr->Name)); + if (!db_get_pool_numvols(ua->jcr, ua->db, pr)) { + ua->error_msg(_("Could not find Pool \"%s\": ERR=%s"), ua->argv[i], + db_strerror(ua->db)); + pr->PoolId = 0; + break; + } + return true; + } + } + + pr->PoolId = 0; + if (!db_get_pool_ids(ua->jcr, ua->db, &num_pools, &ids)) { + ua->error_msg(_("Error obtaining pool ids. ERR=%s\n"), db_strerror(ua->db)); + return 0; + } + if (num_pools <= 0) { + ua->error_msg(_("No pools defined. Use the \"create\" command to create one.\n")); + if (ids) { + free(ids); + } + return false; + } + + start_prompt(ua, _("Defined Pools:\n")); + if (bstrcmp(argk, NT_("recyclepool"))) { + add_prompt(ua, _("*None*")); + } + for (i=0; i < num_pools; i++) { + opr.PoolId = ids[i]; + if (!db_get_pool_numvols(ua->jcr, ua->db, &opr) || + !acl_access_ok(ua, Pool_ACL, opr.Name)) { + continue; + } + add_prompt(ua, opr.Name); + } + free(ids); + if (do_prompt(ua, _("Pool"), _("Select the Pool"), name, sizeof(name)) < 0) { + return false; + } + + memset(&opr, 0, sizeof(opr)); + /* *None* is only returned when selecting a recyclepool, and in that case + * the calling code is only interested in opr.Name, so then we can leave + * pr as all zero. + */ + if (!bstrcmp(name, _("*None*"))) { + bstrncpy(opr.Name, name, sizeof(opr.Name)); + + if (!db_get_pool_numvols(ua->jcr, ua->db, &opr)) { + ua->error_msg(_("Could not find Pool \"%s\": ERR=%s"), name, db_strerror(ua->db)); + return false; + } + } + + memcpy(pr, &opr, sizeof(opr)); + return true; +} + +/* + * Select a Pool and a Media (Volume) record from the database + */ +int select_pool_and_media_dbr(UAContext *ua, POOL_DBR *pr, MEDIA_DBR *mr) +{ + + if (!select_media_dbr(ua, mr)) { + return 0; + } + memset(pr, 0, sizeof(POOL_DBR)); + pr->PoolId = mr->PoolId; + if (!db_get_pool_record(ua->jcr, ua->db, pr)) { + ua->error_msg("%s", db_strerror(ua->db)); + return 0; + } + if (!acl_access_ok(ua, Pool_ACL, pr->Name)) { + ua->error_msg(_("No access to Pool \"%s\"\n"), pr->Name); + return 0; + } + return 1; +} + +/* Select a Media (Volume) record from the database */ +int select_media_dbr(UAContext *ua, MEDIA_DBR *mr) +{ + int i; + int ret = 0; + POOLMEM *err = get_pool_memory(PM_FNAME); + *err=0; + + mr->clear(); + i = find_arg_with_value(ua, "volume"); + if (i >= 0) { + if (is_name_valid(ua->argv[i], &err)) { + bstrncpy(mr->VolumeName, ua->argv[i], sizeof(mr->VolumeName)); + } else { + goto bail_out; + } + } + if (mr->VolumeName[0] == 0) { + POOL_DBR pr; + memset(&pr, 0, sizeof(pr)); + /* Get the pool from pool= */ + if (!get_pool_dbr(ua, &pr)) { + goto bail_out; + } + mr->PoolId = pr.PoolId; + db_list_media_records(ua->jcr, ua->db, mr, prtit, ua, HORZ_LIST); + if (!get_cmd(ua, _("Enter a Volume name or *MediaId: "))) { + goto bail_out; + } + if (ua->cmd[0] == '*' && is_a_number(ua->cmd+1)) { + mr->MediaId = str_to_int64(ua->cmd+1); + } else if (is_name_valid(ua->cmd, &err)) { + bstrncpy(mr->VolumeName, ua->cmd, sizeof(mr->VolumeName)); + } else { + goto bail_out; + } + } + + if (!db_get_media_record(ua->jcr, ua->db, mr)) { + pm_strcpy(err, db_strerror(ua->db)); + goto bail_out; + } + ret = 1; + +bail_out: + if (!ret && *err) { + ua->error_msg("%s", err); + } + free_pool_memory(err); + return ret; +} + + +/* + * Select a pool resource from prompt list + */ +POOL *select_pool_resource(UAContext *ua) +{ + char name[MAX_NAME_LENGTH]; + POOL *pool; + + start_prompt(ua, _("The defined Pool resources are:\n")); + LockRes(); + foreach_res(pool, R_POOL) { + if (acl_access_ok(ua, Pool_ACL, pool->name())) { + add_prompt(ua, pool->name()); + } + } + UnlockRes(); + if (do_prompt(ua, _("Pool"), _("Select Pool resource"), name, sizeof(name)) < 0) { + return NULL; + } + pool = (POOL *)GetResWithName(R_POOL, name); + return pool; +} + + +/* + * If you are thinking about using it, you + * probably want to use select_pool_dbr() + * or get_pool_dbr() above. + */ +POOL *get_pool_resource(UAContext *ua) +{ + POOL *pool = NULL; + int i; + + i = find_arg_with_value(ua, "pool"); + if (i >= 0 && acl_access_ok(ua, Pool_ACL, ua->argv[i])) { + pool = (POOL *)GetResWithName(R_POOL, ua->argv[i]); + if (pool) { + return pool; + } + ua->error_msg(_("Error: Pool resource \"%s\" does not exist.\n"), ua->argv[i]); + } + return select_pool_resource(ua); +} + +/* + * List all jobs and ask user to select one + */ +static int select_job_dbr(UAContext *ua, JOB_DBR *jr) +{ + db_list_job_records(ua->jcr, ua->db, jr, prtit, ua, HORZ_LIST); + if (!get_pint(ua, _("Enter the JobId to select: "))) { + return 0; + } + jr->JobId = ua->int64_val; + if (!db_get_job_record(ua->jcr, ua->db, jr)) { + ua->error_msg("%s", db_strerror(ua->db)); + return 0; + } + return jr->JobId; + +} + + +/* Scan what the user has entered looking for: + * + * jobid=nn + * + * if error or not found, put up a list of Jobs + * to choose from. + * + * returns: 0 on error + * JobId on success and fills in JOB_DBR + */ +int get_job_dbr(UAContext *ua, JOB_DBR *jr) +{ + int i; + + for (i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("ujobid")) == 0 && ua->argv[i]) { + jr->JobId = 0; + bstrncpy(jr->Job, ua->argv[i], sizeof(jr->Job)); + } else if (strcasecmp(ua->argk[i], NT_("jobid")) == 0 && ua->argv[i]) { + jr->JobId = str_to_int64(ua->argv[i]); + jr->Job[0] = 0; + } else { + continue; + } + if (!db_get_job_record(ua->jcr, ua->db, jr)) { + ua->error_msg(_("Could not find Job \"%s\": ERR=%s"), ua->argv[i], + db_strerror(ua->db)); + jr->JobId = 0; + break; + } + return jr->JobId; + } + + jr->JobId = 0; + jr->Job[0] = 0; + + for (i=1; iargc; i++) { + if ((strcasecmp(ua->argk[i], NT_("jobname")) == 0 || + strcasecmp(ua->argk[i], NT_("job")) == 0) && ua->argv[i]) { + jr->JobId = 0; + bstrncpy(jr->Name, ua->argv[i], sizeof(jr->Name)); + break; + } + } + if (!select_job_dbr(ua, jr)) { /* try once more */ + return 0; + } + return jr->JobId; +} + +/* + * Implement unique set of prompts + */ +void start_prompt(UAContext *ua, const char *msg) +{ + if (ua->max_prompts == 0) { + ua->max_prompts = 10; + ua->prompt = (char **)bmalloc(sizeof(char *) * ua->max_prompts); + ua->unique = (char **)bmalloc(sizeof(char *) * ua->max_prompts); + } + ua->num_prompts = 1; + ua->prompt[0] = bstrdup(msg); + ua->unique[0] = NULL; +} + +/* + * Add to prompts -- keeping them unique by name + */ +void add_prompt(UAContext *ua, const char *prompt, char *unique) +{ + int i; + if (ua->num_prompts == ua->max_prompts) { + ua->max_prompts *= 2; + ua->prompt = (char **)brealloc(ua->prompt, sizeof(char *) * + ua->max_prompts); + ua->unique = (char **)brealloc(ua->unique, sizeof(char *) * + ua->max_prompts); + } + for (i=1; i < ua->num_prompts; i++) { + if (strcmp(ua->prompt[i], prompt) == 0) { + return; + } else if (unique && strcmp(ua->unique[i], unique) == 0) { + return; + } + } + ua->prompt[ua->num_prompts] = bstrdup(prompt); + if (unique) { + ua->unique[ua->num_prompts++] = bstrdup(unique); + } else { + ua->unique[ua->num_prompts++] = NULL; + } +} + +/* + * Display prompts and get user's choice + * + * Returns: -1 on error + * index base 0 on success, and choice + * is copied to prompt if not NULL + * prompt is set to the chosen prompt item string + */ +int do_prompt(UAContext *ua, const char *automsg, const char *msg, + char *prompt, int max_prompt) +{ + int i, item; + char pmsg[MAXSTRING]; + BSOCK *user = ua->UA_sock; + + if (prompt) { + *prompt = 0; + } + if (ua->num_prompts == 2) { + item = 1; + if (prompt) { + bstrncpy(prompt, ua->prompt[1], max_prompt); + } + ua->send_msg(_("Automatically selected %s: %s\n"), NPRTB(automsg), ua->prompt[1]); + goto done; + } + /* If running non-interactive, bail out */ + if (ua->batch) { + /* First print the choices he wanted to make */ + ua->send_msg(ua->prompt[0]); + for (i=1; i < ua->num_prompts; i++) { + ua->send_msg("%6d: %s\n", i, ua->prompt[i]); + } + /* Now print error message */ + ua->send_msg(_("Your request has multiple choices for \"%s\". Selection is not possible in batch mode.\n"), automsg); + item = -1; + goto done; + } + if (ua->api) user->signal(BNET_START_SELECT); + ua->send_msg(ua->prompt[0]); + for (i=1; i < ua->num_prompts; i++) { + if (ua->api) { + ua->send_msg("%s", ua->prompt[i]); + } else { + ua->send_msg("%6d: %s\n", i, ua->prompt[i]); + } + } + if (ua->api) user->signal(BNET_END_SELECT); + + for ( ;; ) { + /* First item is the prompt string, not the items */ + if (ua->num_prompts == 1) { + ua->error_msg(_("Selection list for \"%s\" is empty!\n"), automsg); + item = -1; /* list is empty ! */ + break; + } + if (ua->num_prompts == 2) { + item = 1; + ua->send_msg(_("Automatically selected: %s\n"), ua->prompt[1]); + if (prompt) { + bstrncpy(prompt, ua->prompt[1], max_prompt); + } + break; + } else { + sprintf(pmsg, "%s (1-%d): ", msg, ua->num_prompts-1); + } + /* Either a . or an @ will get you out of the loop */ + if (ua->api) user->signal(BNET_SELECT_INPUT); + if (!get_pint(ua, pmsg)) { + item = -1; /* error */ + ua->info_msg(_("Selection aborted, nothing done.\n")); + break; + } + item = ua->pint32_val; + if (item < 1 || item >= ua->num_prompts) { + ua->warning_msg(_("Please enter a number between 1 and %d\n"), ua->num_prompts-1); + continue; + } + if (prompt) { + bstrncpy(prompt, ua->prompt[item], max_prompt); + } + break; + } + +done: + for (i=0; i < ua->num_prompts; i++) { + free(ua->prompt[i]); + if (ua->unique[i]) free(ua->unique[i]); + } + ua->num_prompts = 0; + return item>0 ? item-1 : item; +} + +/* + * Display prompts and get user's choice + * + * Returns: -1 on error + * number of items selected and the choices are + * copied to selected if not NULL + * selected is an alist of the prompts chosen + * Note! selected must already be initialized. + */ +int do_alist_prompt(UAContext *ua, const char *automsg, const char *msg, + alist *selected) +{ + int i, item; + char pmsg[MAXSTRING]; + BSOCK *user = ua->UA_sock; + sellist sl; + + /* First item is the prompt string, not the items */ + if (ua->num_prompts == 1) { + ua->error_msg(_("Selection list for \"%s\" is empty!\n"), automsg); + item = -1; /* list is empty ! */ + goto done; + } + if (ua->num_prompts == 2) { + item = 1; + selected->append(bstrdup(ua->prompt[1])); + ua->send_msg(_("Automatically selected %s: %s\n"), automsg, ua->prompt[1]); + goto done; + } + /* If running non-interactive, bail out */ + if (ua->batch) { + /* First print the choices he wanted to make */ + ua->send_msg(ua->prompt[0]); + for (i=1; i < ua->num_prompts; i++) { + ua->send_msg("%6d: %s\n", i, ua->prompt[i]); + } + /* Now print error message */ + ua->send_msg(_("Your request has multiple choices for \"%s\". Selection is not possible in batch mode.\n"), automsg); + item = -1; + goto done; + } + if (ua->api) user->signal(BNET_START_SELECT); + ua->send_msg(ua->prompt[0]); + for (i=1; i < ua->num_prompts; i++) { + if (ua->api) { + ua->send_msg("%s", ua->prompt[i]); + } else { + ua->send_msg("%6d: %s\n", i, ua->prompt[i]); + } + } + if (ua->api) user->signal(BNET_END_SELECT); + + sprintf(pmsg, "%s (1-%d): ", msg, ua->num_prompts-1); + + for ( ;; ) { + bool ok = true; + /* Either a . or an @ will get you out of the loop */ + if (ua->api) user->signal(BNET_SELECT_INPUT); + + if (!get_selection_list(ua, sl, pmsg, false)) { + item = -1; + break; + } + + if (sl.is_all()) { + for (i=1; i < ua->num_prompts; i++) { + selected->append(bstrdup(ua->prompt[i])); + } + } else { + while ( (item = sl.next()) > 0) { + if (item < 1 || item >= ua->num_prompts) { + ua->warning_msg(_("Please enter a number between 1 and %d\n"), ua->num_prompts-1); + ok = false; + break; + } + selected->append(bstrdup(ua->prompt[item])); + } + } + if (ok) { + item = selected->size(); + break; + } + } + +done: + for (i=0; i < ua->num_prompts; i++) { + free(ua->prompt[i]); + if (ua->unique[i]) free(ua->unique[i]); + } + ua->num_prompts = 0; + return item; +} + + +/* + * We scan what the user has entered looking for + * storage= + * job= + * jobid= + * ? (prompt him with storage list) + * (prompt him with storage list) + * + * If use_default is set, we assume that any keyword without a value + * is the name of the Storage resource wanted. + */ +STORE *get_storage_resource(UAContext *ua, bool use_default, bool unique) +{ + char store_name[MAX_NAME_LENGTH]; + STORE *store = NULL; + int jobid; + JCR *jcr; + int i; + char ed1[50]; + *store_name = 0; + + for (i=1; iargc; i++) { + if (use_default && !ua->argv[i]) { + /* Ignore slots, scan and barcode(s) keywords */ + if (strcasecmp("scan", ua->argk[i]) == 0 || + strcasecmp("barcode", ua->argk[i]) == 0 || + strcasecmp("barcodes", ua->argk[i]) == 0 || + strcasecmp("slots", ua->argk[i]) == 0) { + continue; + } + /* Default argument is storage (except in enable/disable command) */ + if (store_name[0]) { + ua->error_msg(_("Storage name given twice.\n")); + return NULL; + } + bstrncpy(store_name, ua->argk[i], sizeof(store_name)); + if (store_name[0] == '?') { + *store_name = 0; + break; + } + } else { + if (strcasecmp(ua->argk[i], NT_("storage")) == 0 || + strcasecmp(ua->argk[i], NT_("sd")) == 0) { + bstrncpy(store_name, NPRTB(ua->argv[i]), sizeof(store_name)); + + } else if (strcasecmp(ua->argk[i], NT_("jobid")) == 0) { + jobid = str_to_int64(ua->argv[i]); + if (jobid <= 0) { + ua->error_msg(_("Expecting jobid=nn command, got: %s\n"), ua->argk[i]); + return NULL; + } + if (!(jcr=get_jcr_by_id(jobid))) { + ua->error_msg(_("JobId %s is not running.\n"), edit_int64(jobid, ed1)); + return NULL; + } + if (jcr->wstore) { + bstrncpy(store_name, jcr->wstore->name(), sizeof(store_name)); + } + free_jcr(jcr); + + } else if (strcasecmp(ua->argk[i], NT_("job")) == 0 || + strcasecmp(ua->argk[i], NT_("jobname")) == 0) { + if (!ua->argv[i]) { + ua->error_msg(_("Expecting job=xxx, got: %s.\n"), ua->argk[i]); + return NULL; + } + if (!(jcr=get_jcr_by_partial_name(ua->argv[i]))) { + ua->error_msg(_("Job \"%s\" is not running.\n"), ua->argv[i]); + return NULL; + } + if (jcr->wstore) { + bstrncpy(store_name, jcr->wstore->name(), sizeof(store_name)); + } + free_jcr(jcr); + + } else if (strcasecmp(ua->argk[i], NT_("ujobid")) == 0) { + if (!ua->argv[i]) { + ua->error_msg(_("Expecting ujobid=xxx, got: %s.\n"), ua->argk[i]); + return NULL; + } + if ((jcr=get_jcr_by_full_name(ua->argv[i]))) { + if (jcr->wstore) { + bstrncpy(store_name, jcr->wstore->name(), sizeof(store_name)); + } + free_jcr(jcr); + } + } + if (store_name[0]) { + break; /* We can stop the loop if we have something */ + } + } + } + + if (store_name[0] != 0) { + store = (STORE *)GetResWithName(R_STORAGE, store_name); + if (!store && strcmp(store_name, "storage") != 0) { + /* Looks that the first keyword of the line was not a storage name, make + * sure that it's not "storage=" before we print the following message + */ + ua->error_msg(_("Storage resource \"%s\": not found\n"), store_name); + } + } + if (store && !acl_access_ok(ua, Storage_ACL, store->name())) { + store = NULL; + } + /* No keywords found, so present a selection list */ + if (!store) { + store = select_storage_resource(ua, unique); + } + return store; +} + +/* Get drive that we are working with for this storage */ +int get_storage_drive(UAContext *ua, STORE *store) +{ + int i, drive = -1; + /* Get drive for autochanger if possible */ + i = find_arg_with_value(ua, "drive"); + if (i >=0) { + drive = atoi(ua->argv[i]); + } else if (store && store->autochanger) { + /* If our structure is not set ask SD for # drives */ + if (store->drives == 0) { + store->drives = get_num_drives_from_SD(ua); + } + /* If only one drive, default = 0 */ + if (store->drives == 1) { + drive = 0; + } else { + /* Ask user to enter drive number */ + ua->cmd[0] = 0; + if (!get_cmd(ua, _("Enter autochanger drive[0]: "))) { + drive = -1; /* None */ + } else { + drive = atoi(ua->cmd); + } + } + } + return drive; +} + +/* Get slot that we are working with for this storage */ +int get_storage_slot(UAContext *ua, STORE *store) +{ + int i, slot = -1; + /* Get slot for autochanger if possible */ + i = find_arg_with_value(ua, "slot"); + if (i >=0) { + slot = atoi(ua->argv[i]); + } else if (store && store->autochanger) { + /* Ask user to enter slot number */ + ua->cmd[0] = 0; + if (!get_cmd(ua, _("Enter autochanger slot: "))) { + slot = -1; /* None */ + } else { + slot = atoi(ua->cmd); + } + } + return slot; +} + + + +/* + * Scan looking for mediatype= + * + * if not found or error, put up selection list + * + * Returns: 0 on error + * 1 on success, MediaType is set + */ +int get_media_type(UAContext *ua, char *MediaType, int max_media) +{ + STORE *store; + int i; + + i = find_arg_with_value(ua, "mediatype"); + if (i >= 0) { + bstrncpy(MediaType, ua->argv[i], max_media); + return 1; + } + + start_prompt(ua, _("Media Types defined in conf file:\n")); + LockRes(); + foreach_res(store, R_STORAGE) { + if (store->is_enabled()) { + add_prompt(ua, store->media_type); + } + } + UnlockRes(); + return (do_prompt(ua, _("Media Type"), _("Select the Media Type"), MediaType, max_media) < 0) ? 0 : 1; +} + +int get_level_code_from_name(const char *level_name) +{ + int ret = 0; + if (!level_name) { + return ret; + } + for (int i=0; joblevels[i].level_name; i++) { + if (strcasecmp(level_name, joblevels[i].level_name) == 0) { + ret = joblevels[i].level; + break; + } + } + return ret; +} + +bool get_level_from_name(JCR *jcr, const char *level_name) +{ + int level = get_level_code_from_name(level_name); + if (level > 0) { + jcr->setJobLevel(level); + return true; + } + return false; +} + +static int count_running_jobs(UAContext *ua) +{ + int tjobs = 0; /* total # number jobs */ + int njobs = 0; + JCR *jcr; + /* Count Jobs running */ + foreach_jcr(jcr) { + if (jcr->is_internal_job()) { /* this is us */ + continue; + } + tjobs++; /* count of all jobs */ + if (!acl_access_ok(ua, Job_ACL, jcr->job->name())) { + continue; /* skip not authorized */ + } + njobs++; /* count of authorized jobs */ + } + endeach_jcr(jcr); + + if (njobs == 0) { /* no authorized */ + if (tjobs == 0) { + ua->send_msg(_("No Jobs running.\n")); + } else { + ua->send_msg(_("None of your jobs are running.\n")); + } + } + return njobs; +} + + +/* Get a list of running jobs + * "reason" is used in user messages + * can be: cancel, limit, ... + * Returns: -1 on error + * nb of JCR on success (should be free_jcr() after) + */ +int select_running_jobs(UAContext *ua, alist *jcrs, const char *reason) +{ + int i; + JCR *jcr = NULL; + int njobs = 0; + char JobName[MAX_NAME_LENGTH]; + char temp[256]; + alist *selected = NULL; + + for (i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("jobid")) == 0) { + sellist sl; + int32_t JobId; + + if (!ua->argv[i]) { + ua->error_msg(_("No value given for \"jobid\".\n")); + goto bail_out; + } + if (!sl.set_string(ua->argv[i], true)) { + ua->send_msg("%s", sl.get_errmsg()); + goto bail_out; + } + foreach_sellist(JobId, &sl) { + jcr = get_jcr_by_id(JobId); + if (jcr && jcr->job && acl_access_ok(ua, Job_ACL, jcr->job->name())) { + jcrs->append(jcr); + } else if (jcr) { + ua->error_msg(_("Unauthorized command from this console " + "for JobId=%d.\n"), JobId); + free_jcr(jcr); + } else { + ua->warning_msg(_("Warning Job JobId=%d is not running.\n"), JobId); + } + } + if (jcrs->size() == 0) { + goto bail_out; /* If we did not find specified jobid, get out */ + } + break; + + /* TODO: might want to implement filters (client, status, etc...) */ + } else if (strcasecmp(ua->argk[i], NT_("all")) == 0) { + foreach_jcr(jcr) { + if (jcr->is_internal_job()) { /* Do not cancel consoles */ + continue; + } + if (!acl_access_ok(ua, Job_ACL, jcr->job->name())) { + continue; /* skip not authorized */ + } + jcr->inc_use_count(); + jcrs->append(jcr); + } + endeach_jcr(jcr); + + /* If we have something and no "yes" on command line, get confirmation */ + if (jcrs->size() > 0 && find_arg(ua, NT_("yes")) < 0) { + char nbuf[1000]; + bsnprintf(nbuf, sizeof(nbuf), _("Confirm %s of %d Job%s (yes/no): "), + reason, jcrs->size(), jcrs->size()>1?"s":""); + if (!get_yesno(ua, nbuf) || ua->pint32_val == 0) { + goto bail_out; + } + } + if (jcrs->size() == 0) { + goto bail_out; /* If we did not find specified jobid, get out */ + } + break; + + } else if (strcasecmp(ua->argk[i], NT_("job")) == 0) { + if (!ua->argv[i]) { + ua->error_msg(_("No value given for \"job\".\n")); + goto bail_out; + } + jcr = get_jcr_by_partial_name(ua->argv[i]); + + if (jcr && jcr->job && acl_access_ok(ua, Job_ACL, jcr->job->name())) { + jcrs->append(jcr); + + } else if (jcr) { + if (jcr->job) { + ua->error_msg(_("Unauthorized command from this console " + "for job=%s.\n"), ua->argv[i]); + } + free_jcr(jcr); + + } else { + ua->warning_msg(_("Warning Job %s is not running.\n"), ua->argv[i]); + } + if (jcrs->size() == 0) { + goto bail_out; /* If we did not find specified jobid, get out */ + } + break; + + } else if (strcasecmp(ua->argk[i], NT_("ujobid")) == 0) { + if (!ua->argv[i]) { + ua->error_msg(_("No value given for \"ujobid\".\n")); + goto bail_out; + } + jcr = get_jcr_by_full_name(ua->argv[i]); + + if (jcr && jcr->job && acl_access_ok(ua, Job_ACL, jcr->job->name())) { + jcrs->append(jcr); + + } else if (jcr) { + if (jcr->job) { + ua->error_msg(_("Unauthorized command from this console " + "for ujobid=%s.\n"), ua->argv[i]); + } + free_jcr(jcr); + + } else { + ua->warning_msg(_("Warning Job %s is not running.\n"), ua->argv[i]); + } + if (jcrs->size() == 0) { + goto bail_out; /* If we did not find specified jobid, get out */ + } + break; + } + } + + if (jcrs->size() == 0) { + /* + * If we still do not have a jcr, + * throw up a list and ask the user to select one. + */ + char *item; + char buf[1000]; + njobs = count_running_jobs(ua); + if (njobs == 0) { + goto bail_out; + } + start_prompt(ua, _("Select Job(s):\n")); + foreach_jcr(jcr) { + char ed1[50]; + if (jcr->is_internal_job()) { /* this is us */ + continue; + } + bsnprintf(buf, sizeof(buf), _("JobId=%s Job=%s"), edit_int64(jcr->JobId, ed1), jcr->Job); + add_prompt(ua, buf); + } + endeach_jcr(jcr); + bsnprintf(temp, sizeof(temp), _("Choose Job list to %s"), _(reason)); + selected = New(alist(5, owned_by_alist)); + if (do_alist_prompt(ua, _("Job"), temp, selected) < 0) { + goto bail_out; + } + /* Possibly ask for confirmation */ + if (selected->size() > 0 && find_arg(ua, NT_("yes")) < 0) { + char nbuf[1000]; + foreach_alist(item, selected) { + ua->send_msg("%s\n", item); + } + bsnprintf(nbuf, sizeof(nbuf), _("Confirm %s of %d Job%s (yes/no): "), + reason, selected->size(), selected->size()>1?"s":""); + if (!get_yesno(ua, nbuf) || ua->pint32_val == 0) { + goto bail_out; + } + } + + foreach_alist(item, selected) { + if (sscanf(item, "JobId=%d Job=%127s", &njobs, JobName) != 2) { + ua->warning_msg(_("Job \"%s\" not found.\n"), item); + continue; + } + jcr = get_jcr_by_full_name(JobName); + if (jcr) { + jcrs->append(jcr); + } else { + ua->warning_msg(_("Job \"%s\" not found.\n"), JobName); + } + } + } +bail_out: + if (selected) delete selected; + return jcrs->size(); +} + +/* Small helper to scan storage daemon commands and search for volumes */ +int scan_storage_cmd(UAContext *ua, const char *cmd, + bool allfrompool, /* Choose to select a specific volume or not */ + int *drive, /* Drive number */ + MEDIA_DBR *mr, /* Media Record, can have options already filled */ + POOL_DBR *pr, /* Pool Record */ + const char **action, /* action= argument, can be NULL if not relevant */ + char *storage, /* Storage name, must be MAX_NAME_LENGTH long */ + int *nb, /* Number of media found */ + uint32_t **results) /* List of MediaId */ +{ + bool allpools=false, has_vol = false;; + STORE *store; + + *nb = 0; + *results = NULL; + + /* Optional parameters */ + if (drive) { + *drive = 0; + } + if (storage) { + *storage = 0; + } + + /* Look at arguments */ + for (int i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("allpools")) == 0) { + allpools = true; + + } else if (strcasecmp(ua->argk[i], NT_("allfrompool")) == 0) { + allfrompool = true; + + } else if (strcasecmp(ua->argk[i], NT_("volume")) == 0 + && is_name_valid(ua->argv[i], NULL)) { + bstrncpy(mr->VolumeName, ua->argv[i], sizeof(mr->VolumeName)); + has_vol = true; + + } else if (strcasecmp(ua->argk[i], NT_("mediatype")) == 0 + && ua->argv[i]) { + bstrncpy(mr->MediaType, ua->argv[i], sizeof(mr->MediaType)); + + } else if (strcasecmp(ua->argk[i], NT_("drive")) == 0 && ua->argv[i]) { + if (drive) { + *drive = atoi(ua->argv[i]); + } else { + ua->warning_msg(_("Invalid argument \"drive\".\n")); + } + + } else if (strcasecmp(ua->argk[i], NT_("action")) == 0 + && is_name_valid(ua->argv[i], NULL)) { + + if (action) { + *action = ua->argv[i]; + } else { + ua->warning_msg(_("Invalid argument \"action\".\n")); + } + } + } + + if (storage) { + /* Choose storage */ + ua->jcr->wstore = store = get_storage_resource(ua, false); + if (!store) { + goto bail_out; + } + bstrncpy(storage, store->dev_name(), MAX_NAME_LENGTH); + set_storageid_in_mr(store, mr); + } + + if (!open_db(ua)) { + Dmsg0(100, "Can't open db\n"); + goto bail_out; + } + + /* + * Look for all volumes that are enabled + */ + mr->Enabled = 1; + + if (allfrompool && !has_vol) { /* We need a list of volumes */ + + /* We don't take all pools and we don't have a volume in argument, + * so we need to choose a pool + */ + if (!allpools) { + /* force pool selection */ + POOL *pool = get_pool_resource(ua); + + if (!pool) { + Dmsg0(100, "Can't get pool resource\n"); + goto bail_out; + } + bstrncpy(pr->Name, pool->name(), sizeof(pr->Name)); + if (!db_get_pool_record(ua->jcr, ua->db, pr)) { + Dmsg0(100, "Can't get pool record\n"); + goto bail_out; + } + mr->PoolId = pr->PoolId; + } + + if (!db_get_media_ids(ua->jcr, ua->db, mr, nb, results)) { + Dmsg0(100, "No results from db_get_media_ids\n"); + goto bail_out; + } + + } else { /* We want a single volume */ + MEDIA_DBR mr2; + if (!select_media_dbr(ua, &mr2)) { + goto bail_out; + } + mr->MediaId = mr2.MediaId; + mr->Recycle = mr2.Recycle; /* Should be the same to find a result */ + if (!db_get_media_ids(ua->jcr, ua->db, mr, nb, results)) { + Dmsg0(100, "No results from db_get_media_ids\n"); + goto bail_out; + } + *nb = 1; + *results = (uint32_t *) malloc(1 * sizeof(uint32_t)); + *results[0] = mr2.MediaId; + } + + if (*nb == 0) { + goto bail_out; + } + return 1; + +bail_out: + if (!*nb) { + ua->send_msg(_("No Volumes found to perform the command.\n")); + } + + close_db(ua); + ua->jcr->wstore = NULL; + if (*results) { + free(*results); + *results = NULL; + } + *nb = 0; + return 0; +} diff --git a/src/dird/ua_server.c b/src/dird/ua_server.c new file mode 100644 index 00000000..56cfecba --- /dev/null +++ b/src/dird/ua_server.c @@ -0,0 +1,231 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- User Agent Server + * + * Kern Sibbald, September MM + * + */ + +#include "bacula.h" +#include "dird.h" + +/* Imported variables */ + + +/* Forward referenced functions */ +extern "C" void *connect_thread(void *arg); +static void *handle_UA_client_request(void *arg); + + +/* Global variables */ +static bool started = false; +static pthread_t server_tid; +static bool server_tid_valid = false; +static workq_t ua_workq; + +struct s_addr_port { + char *addr; + char *port; +}; + +/* Called here by Director daemon to start UA (user agent) + * command thread. This routine creates the thread and then + * returns. + */ +void start_UA_server(dlist *addrs) +{ + pthread_t thid; + int status; + static dlist *myaddrs = addrs; + + if ((status=pthread_create(&thid, NULL, connect_thread, (void *)myaddrs)) != 0) { + berrno be; + Emsg1(M_ABORT, 0, _("Cannot create UA thread: %s\n"), be.bstrerror(status)); + } + started = true; + return; +} + +void stop_UA_server() +{ + if (started && server_tid_valid) { + server_tid_valid = false; + bnet_stop_thread_server(server_tid); + } +} + +extern "C" +void *connect_thread(void *arg) +{ + pthread_detach(pthread_self()); + set_jcr_in_tsd(INVALID_JCR); + + server_tid = pthread_self(); + server_tid_valid = true; + + /* Permit MaxConsoleConnect console connections */ + bnet_thread_server((dlist*)arg, director->MaxConsoleConnect, &ua_workq, handle_UA_client_request); + return NULL; +} + +/* + * Create a Job Control Record for a control "job", + * filling in all the appropriate fields. + */ +JCR *new_control_jcr(const char *base_name, int job_type) +{ + JCR *jcr; + jcr = new_jcr(sizeof(JCR), dird_free_jcr); + /* + * The job and defaults are not really used, but + * we set them up to ensure that everything is correctly + * initialized. + */ + LockRes(); + jcr->job = (JOB *)GetNextRes(R_JOB, NULL); + set_jcr_defaults(jcr, jcr->job); + /* We use a resource, so we should count in the reload */ + jcr->setJobType(job_type); + UnlockRes(); + + jcr->sd_auth_key = bstrdup("dummy"); /* dummy Storage daemon key */ + create_unique_job_name(jcr, base_name); + jcr->sched_time = jcr->start_time; + jcr->setJobLevel(L_NONE); + jcr->setJobStatus(JS_Running); + jcr->JobId = 0; + return jcr; +} + +/* + * Handle Director User Agent commands + * + */ +static void *handle_UA_client_request(void *arg) +{ + int stat; + UAContext *ua; + JCR *jcr; + BSOCK *user = (BSOCK *)arg; + + pthread_detach(pthread_self()); + + jcr = new_control_jcr("-Console-", JT_CONSOLE); + + ua = new_ua_context(jcr); + ua->UA_sock = user; + set_jcr_in_tsd(INVALID_JCR); + + user->recv(); /* Get first message */ + if (!authenticate_user_agent(ua)) { + goto getout; + } + + while (!ua->quit) { + if (ua->api) user->signal(BNET_MAIN_PROMPT); + stat = user->recv(); + if (stat >= 0) { + pm_strcpy(ua->cmd, ua->UA_sock->msg); + parse_ua_args(ua); + if (ua->argc > 0 && ua->argk[0][0] == '.') { + do_a_dot_command(ua); + } else { + do_a_command(ua); + } + dequeue_messages(ua->jcr); + if (!ua->quit) { + if (console_msg_pending && acl_access_ok(ua, Command_ACL, "messages", 8)) { + if (ua->auto_display_messages) { + pm_strcpy(ua->cmd, "messages"); + qmessagescmd(ua, ua->cmd); + ua->user_notified_msg_pending = false; + } else if (!ua->gui && !ua->user_notified_msg_pending && console_msg_pending) { + if (ua->api) { + user->signal(BNET_MSGS_PENDING); + } else { + bsendmsg(ua, _("You have messages.\n")); + } + ua->user_notified_msg_pending = true; + } + } + if (!ua->api) user->signal(BNET_EOD); /* send end of command */ + } + } else if (user->is_stop()) { + ua->quit = true; + } else { /* signal */ + user->signal(BNET_POLL); + } + + /* At the end of each command, revert to the main shared SQL link */ + ua->db = ua->shared_db; + } + +getout: + close_db(ua); + free_ua_context(ua); + free_jcr(jcr); + + return NULL; +} + +/* + * Create a UAContext for a Job that is running so that + * it can the User Agent routines and + * to ensure that the Job gets the proper output. + * This is a sort of mini-kludge, and should be + * unified at some point. + */ +UAContext *new_ua_context(JCR *jcr) +{ + UAContext *ua; + + ua = (UAContext *)malloc(sizeof(UAContext)); + memset(ua, 0, sizeof(UAContext)); + ua->jcr = jcr; + ua->shared_db = ua->db = jcr->db; + ua->cmd = get_pool_memory(PM_FNAME); + ua->args = get_pool_memory(PM_FNAME); + ua->errmsg = get_pool_memory(PM_FNAME); + ua->verbose = true; + ua->automount = true; + return ua; +} + +void free_ua_context(UAContext *ua) +{ + if (ua->cmd) { + free_pool_memory(ua->cmd); + } + if (ua->args) { + free_pool_memory(ua->args); + } + if (ua->errmsg) { + free_pool_memory(ua->errmsg); + } + if (ua->prompt) { + free(ua->prompt); + } + if (ua->unique) { + free(ua->unique); + } + free_bsock(ua->UA_sock); + free(ua); +} diff --git a/src/dird/ua_status.c b/src/dird/ua_status.c new file mode 100644 index 00000000..27c231a2 --- /dev/null +++ b/src/dird/ua_status.c @@ -0,0 +1,1537 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- User Agent Status Command + * + * Kern Sibbald, August MMI + */ + + +#include "bacula.h" +#include "dird.h" + +extern void *start_heap; +extern utime_t last_reload_time; + +static void list_scheduled_jobs(UAContext *ua); +static void llist_scheduled_jobs(UAContext *ua); +static void list_running_jobs(UAContext *ua); +static void list_terminated_jobs(UAContext *ua); +static void do_storage_status(UAContext *ua, STORE *store, char *cmd); +static void do_client_status(UAContext *ua, CLIENT *client, char *cmd); +static void do_director_status(UAContext *ua); +static void do_all_status(UAContext *ua); +void status_slots(UAContext *ua, STORE *store); +void status_content(UAContext *ua, STORE *store); + +static char OKqstatus[] = "1000 OK .status\n"; +static char DotStatusJob[] = "JobId=%s JobStatus=%c JobErrors=%d\n"; + +/* + * .status command + */ + +bool dot_status_cmd(UAContext *ua, const char *cmd) +{ + STORE *store; + CLIENT *client; + JCR* njcr = NULL; + s_last_job* job; + char ed1[50]; + + Dmsg2(20, "status=\"%s\" argc=%d\n", cmd, ua->argc); + + if (ua->argc < 3) { + ua->send_msg("1900 Bad .status command, missing arguments.\n"); + return false; + } + + if (strcasecmp(ua->argk[1], "dir") == 0) { + if (strcasecmp(ua->argk[2], "current") == 0) { + ua->send_msg(OKqstatus, ua->argk[2]); + foreach_jcr(njcr) { + if (!njcr->is_internal_job() && acl_access_ok(ua, Job_ACL, njcr->job->name())) { + ua->send_msg(DotStatusJob, edit_int64(njcr->JobId, ed1), + njcr->JobStatus, njcr->JobErrors); + } + } + endeach_jcr(njcr); + } else if (strcasecmp(ua->argk[2], "last") == 0) { + ua->send_msg(OKqstatus, ua->argk[2]); + if ((last_jobs) && (last_jobs->size() > 0)) { + job = (s_last_job*)last_jobs->last(); + if (acl_access_ok(ua, Job_ACL, job->Job)) { + ua->send_msg(DotStatusJob, edit_int64(job->JobId, ed1), + job->JobStatus, job->Errors); + } + } + } else if (strcasecmp(ua->argk[2], "header") == 0) { + list_dir_status_header(ua); + } else if (strcasecmp(ua->argk[2], "scheduled") == 0) { + list_scheduled_jobs(ua); + } else if (strcasecmp(ua->argk[2], "running") == 0) { + list_running_jobs(ua); + } else if (strcasecmp(ua->argk[2], "terminated") == 0) { + list_terminated_jobs(ua); + } else { + ua->send_msg("1900 Bad .status command, wrong argument.\n"); + return false; + } + } else if (strcasecmp(ua->argk[1], "client") == 0) { + client = get_client_resource(ua, JT_BACKUP_RESTORE); + if (client) { + Dmsg2(200, "Client=%s arg=%s\n", client->name(), NPRT(ua->argk[2])); + do_client_status(ua, client, ua->argk[2]); + } + } else if (strcasecmp(ua->argk[1], "storage") == 0) { + store = get_storage_resource(ua, false /*no default*/, true/*unique*/); + if (!store) { + ua->send_msg("1900 Bad .status command, wrong argument.\n"); + return false; + } + do_storage_status(ua, store, ua->argk[2]); + } else { + ua->send_msg("1900 Bad .status command, wrong argument.\n"); + return false; + } + + return true; +} + +/* Test the network between FD and SD */ +static int do_network_status(UAContext *ua) +{ + CLIENT *client = NULL; + USTORE store; + JCR *jcr = ua->jcr; + char *store_address, ed1[50]; + uint32_t store_port; + uint64_t nb = 50 * 1024 * 1024; + POOL_MEM buf; + + int i = find_arg_with_value(ua, "bytes"); + if (i > 0) { + if (!size_to_uint64(ua->argv[i], strlen(ua->argv[i]), &nb)) { + return 1; + } + } + + client = get_client_resource(ua, JT_BACKUP_RESTORE); + if (!client) { + return 1; + } + + store.store = get_storage_resource(ua, false, true); + if (!store.store) { + return 1; + } + + jcr->client = client; + set_wstorage(jcr, &store); + + if (!ua->api) { + ua->send_msg(_("Connecting to Storage %s at %s:%d\n"), + store.store->name(), store.store->address, store.store->SDport); + } + + if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) { + ua->error_msg(_("Failed to connect to Storage.\n")); + goto bail_out; + } + + if (!start_storage_daemon_job(jcr, NULL, NULL)) { + goto bail_out; + } + + /* + * Note startup sequence of SD/FD is different depending on + * whether the SD listens (normal) or the SD calls the FD. + */ + if (!client->sd_calls_client) { + if (!run_storage_and_start_message_thread(jcr, jcr->store_bsock)) { + goto bail_out; + } + } /* Else it's done in init_storage_job() */ + + if (!ua->api) { + ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + client->name(), client->address(buf.addr()), client->FDport); + } + + if (!connect_to_file_daemon(jcr, 1, 15, 0)) { + ua->error_msg(_("Failed to connect to Client.\n")); + goto bail_out; + } + + if (jcr->sd_calls_client) { + /* + * SD must call "client" i.e. FD + */ + if (jcr->FDVersion < 10) { + Jmsg(jcr, M_FATAL, 0, _("The File daemon does not support SDCallsClient.\n")); + goto bail_out; + } + if (!send_client_addr_to_sd(jcr)) { + goto bail_out; + } + if (!run_storage_and_start_message_thread(jcr, jcr->store_bsock)) { + goto bail_out; + } + + store_address = store.store->address; /* dummy */ + store_port = 0; /* flag that SD calls FD */ + + } else { + /* + * send Storage daemon address to the File daemon, + * then wait for File daemon to make connection + * with Storage daemon. + */ + if (store.store->SDDport == 0) { + store.store->SDDport = store.store->SDport; + } + + store_address = get_storage_address(jcr->client, store.store); + store_port = store.store->SDDport; + } + + if (!send_store_addr_to_fd(jcr, store.store, store_address, store_port)) { + goto bail_out; + } + + if (!ua->api) { + ua->info_msg(_("Running network test between Client=%s and Storage=%s with %sB ...\n"), + client->name(), store.store->name(), edit_uint64_with_suffix(nb, ed1)); + } + + if (!jcr->file_bsock->fsend("testnetwork bytes=%lld\n", nb)) { + goto bail_out; + } + + while (jcr->file_bsock->recv() > 0) { + ua->info_msg(jcr->file_bsock->msg); + } + +bail_out: + if (jcr->file_bsock) { + jcr->file_bsock->signal(BNET_TERMINATE); + } + if (jcr->store_bsock) { + jcr->store_bsock->signal(BNET_TERMINATE); + } + wait_for_storage_daemon_termination(jcr); + + free_bsock(jcr->file_bsock); + free_bsock(jcr->store_bsock); + + jcr->client = NULL; + free_wstorage(jcr); + return 1; +} + +/* This is the *old* command handler, so we must return + * 1 or it closes the connection + */ +int qstatus_cmd(UAContext *ua, const char *cmd) +{ + dot_status_cmd(ua, cmd); + return 1; +} + +/* + * status command + */ +int status_cmd(UAContext *ua, const char *cmd) +{ + STORE *store; + CLIENT *client; + int item, i; + + Dmsg1(20, "status:%s:\n", cmd); + + for (i=1; iargc; i++) { + if (strcasecmp(ua->argk[i], NT_("network")) == 0) { + do_network_status(ua); + return 1; + } else if (strcasecmp(ua->argk[i], NT_("schedule")) == 0 || + strcasecmp(ua->argk[i], NT_("scheduled")) == 0) { + llist_scheduled_jobs(ua); + return 1; + } else if (strcasecmp(ua->argk[i], NT_("all")) == 0) { + do_all_status(ua); + return 1; + } else if (strcasecmp(ua->argk[i], NT_("dir")) == 0 || + strcasecmp(ua->argk[i], NT_("director")) == 0) { + do_director_status(ua); + return 1; + } else if (strcasecmp(ua->argk[i], NT_("client")) == 0) { + client = get_client_resource(ua, JT_BACKUP_RESTORE); + if (client) { + do_client_status(ua, client, NULL); + } + return 1; + } else { + store = get_storage_resource(ua, false/*no default*/, true/*unique*/); + if (store) { + if (find_arg(ua, NT_("slots")) > 0) { + status_slots(ua, store); + } else { + do_storage_status(ua, store, NULL); + } + } + return 1; + } + } + /* If no args, ask for status type */ + if (ua->argc == 1) { + char prmt[MAX_NAME_LENGTH]; + + start_prompt(ua, _("Status available for:\n")); + add_prompt(ua, NT_("Director")); + add_prompt(ua, NT_("Storage")); + add_prompt(ua, NT_("Client")); + add_prompt(ua, NT_("Scheduled")); + add_prompt(ua, NT_("Network")); + add_prompt(ua, NT_("All")); + Dmsg0(20, "do_prompt: select daemon\n"); + if ((item=do_prompt(ua, "", _("Select daemon type for status"), prmt, sizeof(prmt))) < 0) { + return 1; + } + Dmsg1(20, "item=%d\n", item); + switch (item) { + case 0: /* Director */ + do_director_status(ua); + break; + case 1: + store = select_storage_resource(ua, true/*unique*/); + if (store) { + do_storage_status(ua, store, NULL); + } + break; + case 2: + client = select_client_resource(ua, JT_BACKUP_RESTORE); + if (client) { + do_client_status(ua, client, NULL); + } + break; + case 3: + llist_scheduled_jobs(ua); + break; + case 4: + do_network_status(ua); + break; + case 5: + do_all_status(ua); + break; + default: + break; + } + } + return 1; +} + +static void do_all_status(UAContext *ua) +{ + STORE *store, **unique_store; + CLIENT *client, **unique_client; + POOL_MEM buf1, buf2; + int i, j; + bool found; + + do_director_status(ua); + + /* Count Storage items */ + LockRes(); + i = 0; + foreach_res(store, R_STORAGE) { + i++; + } + unique_store = (STORE **) malloc(i * sizeof(STORE)); + /* Find Unique Storage address/port */ + i = 0; + foreach_res(store, R_STORAGE) { + found = false; + if (!acl_access_ok(ua, Storage_ACL, store->name())) { + continue; + } + for (j=0; jaddress, store->address) == 0 && + unique_store[j]->SDport == store->SDport) { + found = true; + break; + } + } + if (!found) { + unique_store[i++] = store; + Dmsg2(40, "Stuffing: %s:%d\n", store->address, store->SDport); + } + } + UnlockRes(); + + /* Call each unique Storage daemon */ + for (j=0; jname(), JT_BACKUP_RESTORE)) { + continue; + } + for (j=0; jaddress(buf1.addr()), client->address(buf2.addr())) == 0 && + unique_client[j]->FDport == client->FDport) { + found = true; + break; + } + } + if (!found) { + unique_client[i++] = client; + Dmsg2(40, "Stuffing: %s:%d\n", client->address(buf1.addr()), client->FDport); + } + } + UnlockRes(); + + /* Call each unique File daemon */ + for (j=0; japi_opts); + wt.start_group("header"); + wt.get_output( + OT_STRING, "name", my_name, + OT_STRING, "version", VERSION " (" BDATE ")", + OT_STRING, "uname", HOST_OS " " DISTNAME " " DISTVER, + OT_UTIME, "started", daemon_start_time, + OT_UTIME, "reloaded", last_reload_time, + OT_INT64, "pid", (int64_t)getpid(), + OT_INT, "jobs_run", num_jobs_run, + OT_INT, "jobs_running",job_count(), + OT_INT, "nclients", ((rblist *)res_head[R_CLIENT-r_first]->res_list)->size(), + OT_INT, "nstores", ((rblist *)res_head[R_STORAGE-r_first]->res_list)->size(), + OT_INT, "npools", ((rblist *)res_head[R_POOL-r_first]->res_list)->size(), + OT_INT, "ncats", ((rblist *)res_head[R_CATALOG-r_first]->res_list)->size(), + OT_INT, "nfset", ((rblist *)res_head[R_FILESET-r_first]->res_list)->size(), + OT_INT, "nscheds", ((rblist *)res_head[R_SCHEDULE-r_first]->res_list)->size(), + OT_PLUGINS,"plugins", b_plugin_list, + OT_END); + + ua->send_msg("%s", wt.end_group()); +} + +void list_dir_status_header(UAContext *ua) +{ + char dt[MAX_TIME_LENGTH], dt1[MAX_TIME_LENGTH]; + char b1[35], b2[35], b3[35], b4[35], b5[35]; + + if (ua->api > 1) { + api_list_dir_status_header(ua); + return; + } + + ua->send_msg(_("%s %sVersion: %s (%s) %s %s %s\n"), my_name, + "", VERSION, BDATE, HOST_OS, DISTNAME, DISTVER); + bstrftime_nc(dt, sizeof(dt), daemon_start_time); + bstrftimes(dt1, sizeof(dt1), last_reload_time); + ua->send_msg(_("Daemon started %s, conf reloaded %s\n"), dt, dt1); + ua->send_msg(_(" Jobs: run=%d, running=%d mode=%d,%d\n"), + num_jobs_run, job_count(), (int)DEVELOPER_MODE, 0); + ua->send_msg(_(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"), + edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1), + edit_uint64_with_commas(sm_bytes, b2), + edit_uint64_with_commas(sm_max_bytes, b3), + edit_uint64_with_commas(sm_buffers, b4), + edit_uint64_with_commas(sm_max_buffers, b5)); + ua->send_msg(_(" Res: njobs=%d nclients=%d nstores=%d npools=%d ncats=%d" + " nfsets=%d nscheds=%d\n"), + ((rblist *)res_head[R_JOB-r_first]->res_list)->size(), + ((rblist *)res_head[R_CLIENT-r_first]->res_list)->size(), + ((rblist *)res_head[R_STORAGE-r_first]->res_list)->size(), + ((rblist *)res_head[R_POOL-r_first]->res_list)->size(), + ((rblist *)res_head[R_CATALOG-r_first]->res_list)->size(), + ((rblist *)res_head[R_FILESET-r_first]->res_list)->size(), + ((rblist *)res_head[R_SCHEDULE-r_first]->res_list)->size()); + + + /* TODO: use this function once for all daemons */ + if (b_plugin_list && b_plugin_list->size() > 0) { + int len; + Plugin *plugin; + POOL_MEM msg(PM_FNAME); + pm_strcpy(msg, " Plugin: "); + foreach_alist(plugin, b_plugin_list) { + len = pm_strcat(msg, plugin->file); + if (len > 80) { + pm_strcat(msg, "\n "); + } else { + pm_strcat(msg, " "); + } + } + ua->send_msg("%s\n", msg.c_str()); + } +} + +static void do_director_status(UAContext *ua) +{ + list_dir_status_header(ua); + + /* + * List scheduled Jobs + */ + list_scheduled_jobs(ua); + + /* + * List running jobs + */ + list_running_jobs(ua); + + /* + * List terminated jobs + */ + list_terminated_jobs(ua); + ua->send_msg("====\n"); +} + +static void do_storage_status(UAContext *ua, STORE *store, char *cmd) +{ + BSOCK *sd; + USTORE lstore; + + + if (!acl_access_ok(ua, Storage_ACL, store->name())) { + ua->error_msg(_("No authorization for Storage \"%s\"\n"), store->name()); + return; + } + /* + * The Storage daemon is problematic because it shows information + * related to multiple Job, so if there is a Client or Job + * ACL restriction, we forbid all access to the Storage. + */ + if (have_restricted_acl(ua, Client_ACL) || + have_restricted_acl(ua, Job_ACL)) { + ua->error_msg(_("Restricted Client or Job does not permit access to Storage daemons\n")); + return; + } + lstore.store = store; + pm_strcpy(lstore.store_source, _("unknown source")); + set_wstorage(ua->jcr, &lstore); + /* Try connecting for up to 15 seconds */ + if (!ua->api) ua->send_msg(_("Connecting to Storage daemon %s at %s:%d\n"), + store->name(), store->address, store->SDport); + if (!connect_to_storage_daemon(ua->jcr, 1, 15, 0)) { + ua->send_msg(_("\nFailed to connect to Storage daemon %s.\n====\n"), + store->name()); + free_bsock(ua->jcr->store_bsock); + return; + } + Dmsg0(20, "Connected to storage daemon\n"); + sd = ua->jcr->store_bsock; + if (cmd) { + POOL_MEM devname; + /* + * For .status storage=xxx shstore list + * send .status shstore list xxx-device + */ + if (strcasecmp(cmd, "shstore") == 0) { + if (!ua->argk[3]) { + ua->send_msg(_("Must have three arguments\n")); + return; + } + pm_strcpy(devname, store->dev_name()); + bash_spaces(devname.c_str()); + sd->fsend(".status %s %s %s api=%d api_opts=%s", + cmd, ua->argk[3], devname.c_str(), + ua->api, ua->api_opts); + } else { + int i = find_arg_with_value(ua, "device"); + if (i>0) { + Mmsg(devname, "device=%s", ua->argv[i]); + bash_spaces(devname.c_str()); + } + sd->fsend(".status %s api=%d api_opts=%s %s", + cmd, ua->api, ua->api_opts, devname.c_str()); + } + } else { + sd->fsend("status"); + } + while (sd->recv() >= 0) { + ua->send_msg("%s", sd->msg); + } + sd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->store_bsock); + return; +} + +static void do_client_status(UAContext *ua, CLIENT *client, char *cmd) +{ + BSOCK *fd; + POOL_MEM buf; + + if (!acl_access_client_ok(ua, client->name(), JT_BACKUP_RESTORE)) { + ua->error_msg(_("No authorization for Client \"%s\"\n"), client->name()); + return; + } + /* Connect to File daemon */ + ua->jcr->client = client; + /* Release any old dummy key */ + if (ua->jcr->sd_auth_key) { + free(ua->jcr->sd_auth_key); + } + /* Create a new dummy SD auth key */ + ua->jcr->sd_auth_key = bstrdup("dummy"); + + /* Try to connect for 15 seconds */ + if (!ua->api) ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + client->name(), client->address(buf.addr()), client->FDport); + if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { + ua->send_msg(_("Failed to connect to Client %s.\n====\n"), + client->name()); + free_bsock(ua->jcr->file_bsock); + return; + } + Dmsg0(20, _("Connected to file daemon\n")); + fd = ua->jcr->file_bsock; + if (cmd) { + fd->fsend(".status %s api=%d api_opts=%s", cmd, ua->api, ua->api_opts); + } else { + fd->fsend("status"); + } + while (fd->recv() >= 0) { + ua->send_msg("%s", fd->msg); + } + fd->signal(BNET_TERMINATE); + free_bsock(ua->jcr->file_bsock); + + return; +} + +static void prt_runhdr(UAContext *ua) +{ + if (!ua->api) { + ua->send_msg(_("\nScheduled Jobs:\n")); + ua->send_msg(_("Level Type Pri Scheduled Job Name Volume\n")); + ua->send_msg(_("===================================================================================\n")); + } +} + +static void prt_lrunhdr(UAContext *ua) +{ + if (!ua->api) { + ua->send_msg(_("\nScheduled Jobs:\n")); + ua->send_msg(_("Level Type Pri Scheduled Job Name Schedule\n")); + ua->send_msg(_("=====================================================================================\n")); + } +} + + +/* Scheduling packet */ +struct sched_pkt { + dlink link; /* keep this as first item!!! */ + JOB *job; + int level; + int priority; + utime_t runtime; + POOL *pool; + STORE *store; +}; + +static void prt_runtime(UAContext *ua, sched_pkt *sp, OutputWriter *ow) +{ + char dt[MAX_TIME_LENGTH], edl[50]; + const char *level_ptr; + bool ok = false; + bool close_db = false; + JCR *jcr = ua->jcr; + MEDIA_DBR mr; + POOL_MEM errmsg; + int orig_jobtype; + + orig_jobtype = jcr->getJobType(); + if (sp->job->JobType == JT_BACKUP) { + jcr->db = NULL; + ok = complete_jcr_for_job(jcr, sp->job, sp->pool); + Dmsg1(250, "Using pool=%s\n", jcr->pool->name()); + if (jcr->db) { + close_db = true; /* new db opened, remember to close it */ + } + if (ok) { + mr.PoolId = jcr->jr.PoolId; + jcr->wstore = sp->store; + set_storageid_in_mr(jcr->wstore, &mr); + Dmsg0(250, "call find_next_volume_for_append\n"); + /* no need to set ScratchPoolId, since we use fnv_no_create_vol */ + ok = find_next_volume_for_append(jcr, &mr, 1, fnv_no_create_vol, fnv_no_prune, errmsg); + } + if (!ok) { + bstrncpy(mr.VolumeName, "*unknown*", sizeof(mr.VolumeName)); + } + } + bstrftime_nc(dt, sizeof(dt), sp->runtime); + switch (sp->job->JobType) { + case JT_ADMIN: + level_ptr = "Admin"; + break; + case JT_RESTORE: + level_ptr = "Restore"; + break; + default: + level_ptr = level_to_str(edl, sizeof(edl), sp->level); + break; + } + if (ua->api == 1) { + ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"), + level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt, + sp->job->name(), mr.VolumeName); + + } else if (ua->api > 1) { + ua->send_msg("%s", + ow->get_output(OT_CLEAR, + OT_START_OBJ, + OT_STRING, "name", sp->job->name(), + OT_JOBLEVEL, "level", sp->level, + OT_JOBTYPE, "type", sp->job->JobType, + OT_INT, "priority",sp->priority, + OT_UTIME, "schedtime", sp->runtime, + OT_STRING, "volume", mr.VolumeName, + OT_STRING, "pool", jcr->pool?jcr->pool->name():"", + OT_STRING, "storage", jcr->wstore?jcr->wstore->name():"", + OT_END_OBJ, + OT_END)); + + + } else { + ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"), + level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt, + sp->job->name(), mr.VolumeName); + } + if (close_db) { + db_close_database(jcr, jcr->db); + } + jcr->db = ua->db; /* restore ua db to jcr */ + jcr->setJobType(orig_jobtype); +} + +/* We store each job schedule into a rblist to display them ordered */ +typedef struct { + rblink lnk; + btime_t time; + int prio; + int level; + SCHED *sched; + JOB *job; +} schedule; + +static int compare(void *i1, void *i2) +{ + int ret; + schedule *item1 = (schedule *)i1; + schedule *item2 = (schedule *)i2; + + /* Order by time */ + if (item1->time > item2->time) { + return 1; + } else if (item1->time < item2->time) { + return -1; + } + + /* If same time, order by priority */ + if (item1->prio > item2->prio) { + return 1; + } else if (item1->prio < item2->prio) { + return -1; + } + + /* If same priority, order by name */ + ret = strcmp(item1->job->name(), item2->job->name()); + if (ret != 0) { + return ret; + } + + return 1; /* If same name, same time, same prio => insert after */ +} + +static bool is_included(const char *str, alist *list) +{ + char *v; + if (list->size() == 0) { /* The list is empty, we take everything */ + return true; + } + foreach_alist(v, list) { + if (strcmp(v, str) == 0) { + return true; + } + } + return false; +} + +/* + * Detailed listing of all scheduler jobs + */ +static void llist_scheduled_jobs(UAContext *ua) +{ + utime_t runtime=0; + RUN *run; + JOB *job; + int level, num_jobs = 0; + int priority; + bool limit_set = false; + char sched_name[MAX_NAME_LENGTH] = {0}, edl[50]; + char *n, *p; + SCHED *sched; + int days=10, limit=30; + time_t now = time(NULL); + time_t next; + rblist *list; + alist clients(10, not_owned_by_alist); + alist jobs(10, not_owned_by_alist); + schedule *item = NULL; + + Dmsg0(200, "enter list_sched_jobs()\n"); + + for (int i=0; i < ua->argc ; i++) { + if (strcmp(ua->argk[i], NT_("limit")) == 0) { + limit = atoi(ua->argv[i]); + if (((limit < 0) || (limit > 2000)) && !ua->api) { + ua->send_msg(_("Ignoring invalid value for limit. Max is 2000.\n")); + limit = 2000; + } + limit_set = true; + + } else if (strcmp(ua->argk[i], NT_("days")) == 0) { + days = atoi(ua->argv[i]); + if (((days < 0) || (days > 3000)) && !ua->api) { + ua->send_msg(_("Ignoring invalid value for days. Max is 3000.\n")); + days = 3000; + } + if (!limit_set) { + limit = 0; /* Disable limit if not set explicitely */ + } + + } else if (strcmp(ua->argk[i], NT_("time")) == 0) { + now = str_to_utime(ua->argv[i]); + if (now == 0) { + ua->send_msg(_("Ignoring invalid time.\n")); + now = time(NULL); + } + + } else if (strcmp(ua->argk[i], NT_("schedule")) == 0 && ua->argv[i]) { + bstrncpy(sched_name, ua->argv[i], sizeof(sched_name)); + + } else if (strcmp(ua->argk[i], NT_("job")) == 0) { + p = ua->argv[i]; + while ((n = next_name(&p)) != NULL) { + jobs.append(n); + } + + } else if (strcmp(ua->argk[i], NT_("client")) == 0) { + p = ua->argv[i]; + while ((n = next_name(&p)) != NULL) { + clients.append(n); + } + } + } + + list = New(rblist(item, &item->lnk)); + + /* Loop through all jobs */ + LockRes(); + foreach_res(job, R_JOB) { + if (!acl_access_ok(ua, Job_ACL, job->name())) { + continue; + } + sched = job->schedule; + if (!sched || !job->is_enabled() || (sched && !sched->is_enabled()) || + (job->client && !job->client->is_enabled())) { + continue; /* no, skip this job */ + } + if (sched_name[0] && strcmp(sched_name, sched->name()) != 0) { + continue; + } + if (!is_included(job->name(), &jobs)) { + continue; + } + if (!is_included(job->client->name(), &clients)) { + continue; + } + for (run=sched->run; run; run=run->next) { + next = now; + for (int i=0; imonth), + bit_is_set(mday, run->mday), + bit_is_set(wday, run->wday), + bit_is_set(wom, run->wom), + bit_is_set(woy, run->woy), + bit_is_set(31, run->mday)); +#endif + + ok = (bit_is_set(mday, run->mday) && + bit_is_set(wday, run->wday) && + bit_is_set(month, run->month) && + bit_is_set(wom, run->wom) && + bit_is_set(woy, run->woy)) || + (bit_is_set(month, run->month) && + bit_is_set(31, run->mday) && mday == ldom); + if (!ok) { + next += 24 * 60 * 60; /* Add one day */ + continue; + } + for (int j=0; j < 24; j++) { + if (bit_is_set(j, run->hour)) { + tm.tm_hour = j; + tm.tm_min = run->minute; + tm.tm_sec = 0; + runtime = mktime(&tm); + break; + } + } + + level = job->JobLevel; + if (run->level) { + level = run->level; + } + priority = job->Priority; + if (run->Priority) { + priority = run->Priority; + } + + item = (schedule *) malloc(sizeof(schedule)); + item->time = runtime; + item->prio = priority; + item->job = job; + item->sched = sched; + item->level = level; + list->insert(item, compare); + + next += 24 * 60 * 60; /* Add one day */ + num_jobs++; + if (limit > 0 && num_jobs >= limit) { + goto get_out; + } + } + } /* end loop over run pkts */ + } /* end for loop over resources */ +get_out: + UnlockRes(); + prt_lrunhdr(ua); + OutputWriter ow(ua->api_opts); + if (ua->api > 1) { + ua->send_msg("%s", ow.start_group("scheduled")); + } + foreach_rblist(item, list) { + char dt[MAX_TIME_LENGTH]; + const char *level_ptr; + bstrftime_dn(dt, sizeof(dt), item->time); + + switch (item->job->JobType) { + case JT_ADMIN: + level_ptr = "Admin"; + break; + case JT_RESTORE: + level_ptr = "Restore"; + break; + default: + level_ptr = level_to_str(edl, sizeof(edl), item->level); + break; + } + + if (ua->api > 1) { + bool use_client = + item->job->JobType == JT_BACKUP || + item->job->JobType == JT_RESTORE; + + ua->send_msg("%s", ow.get_output(OT_CLEAR, + OT_START_OBJ, + OT_JOBLEVEL ,"level", item->level, + OT_JOBTYPE, "type", item->job->JobType, + OT_STRING, "name", item->job->name(), + OT_STRING, "client", use_client?item->job->client->name() : "", + OT_STRING, "fileset", item->job->fileset->name(), + OT_UTIME, "schedtime", item->time, + OT_INT32, "priority", item->prio, + OT_STRING, "schedule", item->sched->name(), + OT_END_OBJ, + OT_END)); + + } else if (ua->api) { + ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"), + level_ptr, job_type_to_str(item->job->JobType), + item->prio, dt, + item->job->name(), item->sched->name()); + } else { + ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"), + level_ptr, job_type_to_str(item->job->JobType), item->prio, dt, + item->job->name(), item->sched->name()); + } + } + if (ua->api > 1) { + ua->send_msg("%s", ow.end_group()); + } + delete list; + + if (num_jobs == 0 && !ua->api) { + ua->send_msg(_("No Scheduled Jobs.\n")); + } + if (!ua->api) ua->send_msg("====\n"); + Dmsg0(200, "Leave ;list_sched_jobs_runs()\n"); +} + +/* + * Sort items by runtime, priority + */ +static int my_compare(void *item1, void *item2) +{ + sched_pkt *p1 = (sched_pkt *)item1; + sched_pkt *p2 = (sched_pkt *)item2; + if (p1->runtime < p2->runtime) { + return -1; + } else if (p1->runtime > p2->runtime) { + return 1; + } + if (p1->priority < p2->priority) { + return -1; + } else if (p1->priority > p2->priority) { + return 1; + } + return 0; +} + +/* + * Find all jobs to be run in roughly the + * next 24 hours. + */ +static void list_scheduled_jobs(UAContext *ua) +{ + OutputWriter ow(ua->api_opts); + utime_t runtime; + RUN *run; + JOB *job; + int level, num_jobs = 0; + int priority; + char sched_name[MAX_NAME_LENGTH]; + dlist sched; + sched_pkt *sp; + int days, i; + + Dmsg0(200, "enter list_sched_jobs()\n"); + + days = 1; + i = find_arg_with_value(ua, NT_("days")); + if (i >= 0) { + days = atoi(ua->argv[i]); + if (((days < 0) || (days > 500)) && !ua->api) { + ua->send_msg(_("Ignoring invalid value for days. Max is 500.\n")); + days = 1; + } + } + i = find_arg_with_value(ua, NT_("schedule")); + if (i >= 0) { + bstrncpy(sched_name, ua->argv[i], sizeof(sched_name)); + } else { + sched_name[0] = 0; + } + + /* Loop through all jobs */ + LockRes(); + foreach_res(job, R_JOB) { + if (!acl_access_ok(ua, Job_ACL, job->name()) || !job->is_enabled()) { + continue; + } + if (sched_name[0] && job->schedule && + strcasecmp(job->schedule->name(), sched_name) != 0) { + continue; + } + for (run=NULL; (run = find_next_run(run, job, runtime, days)); ) { + USTORE store; + level = job->JobLevel; + if (run->level) { + level = run->level; + } + priority = job->Priority; + if (run->Priority) { + priority = run->Priority; + } + sp = (sched_pkt *)malloc(sizeof(sched_pkt)); + sp->job = job; + sp->level = level; + sp->priority = priority; + sp->runtime = runtime; + sp->pool = run->pool; + get_job_storage(&store, job, run); + sp->store = store.store; + Dmsg3(250, "job=%s store=%s MediaType=%s\n", job->name(), sp->store->name(), sp->store->media_type); + sched.binary_insert_multiple(sp, my_compare); + num_jobs++; + } + } /* end for loop over resources */ + UnlockRes(); + prt_runhdr(ua); + foreach_dlist(sp, &sched) { + prt_runtime(ua, sp, &ow); + } + if (num_jobs == 0 && !ua->api) { + ua->send_msg(_("No Scheduled Jobs.\n")); + } + if (!ua->api) ua->send_msg("====\n"); + Dmsg0(200, "Leave list_sched_jobs_runs()\n"); +} + +static void list_running_jobs(UAContext *ua) +{ + JCR *jcr; + int njobs = 0; + int i; + int32_t status; + const char *msg, *msgdir; + char *emsg; /* edited message */ + char dt[MAX_TIME_LENGTH]; + char level[10]; + bool pool_mem = false; + OutputWriter ow(ua->api_opts); + JobId_t jid = 0; + + if ((i = find_arg_with_value(ua, "jobid")) >= 0) { + jid = str_to_int64(ua->argv[i]); + } + + Dmsg0(200, "enter list_run_jobs()\n"); + + if (!ua->api) { + ua->send_msg(_("\nRunning Jobs:\n")); + foreach_jcr(jcr) { + if (jcr->JobId == 0) { /* this is us */ + /* this is a console or other control job. We only show console + * jobs in the status output. + */ + if (jcr->getJobType() == JT_CONSOLE) { + bstrftime_nc(dt, sizeof(dt), jcr->start_time); + ua->send_msg(_("Console connected %sat %s\n"), + (ua->UA_sock && ua->UA_sock->tls)?_("using TLS "):"", + dt); + } + continue; + } + } + endeach_jcr(jcr); + } + + njobs = 0; /* count the number of job really displayed */ + foreach_jcr(jcr) { + if (jcr->JobId == 0 || !jcr->job || !acl_access_ok(ua, Job_ACL, jcr->job->name())) { + continue; + } + /* JobId keyword found in command line */ + if (jid > 0 && jcr->JobId != jid) { + continue; + } + + if (++njobs == 1) { + /* display the header for the first job */ + if (!ua->api) { + ua->send_msg(_(" JobId Type Level Files Bytes Name Status\n")); + ua->send_msg(_("======================================================================\n")); + + } else if (ua->api > 1) { + ua->send_msg(ow.start_group("running", false)); + } + } + status = jcr->JobStatus; + switch (status) { + case JS_Created: + msg = _("is waiting execution"); + break; + case JS_Running: + msg = _("is running"); + break; + case JS_Blocked: + msg = _("is blocked"); + break; + case JS_Terminated: + msg = _("has terminated"); + break; + case JS_Warnings: + msg = _("has terminated with warnings"); + break; + case JS_Incomplete: + msg = _("has terminated in incomplete state"); + break; + case JS_ErrorTerminated: + msg = _("has erred"); + break; + case JS_Error: + msg = _("has errors"); + break; + case JS_FatalError: + msg = _("has a fatal error"); + break; + case JS_Differences: + msg = _("has verify differences"); + break; + case JS_Canceled: + msg = _("has been canceled"); + break; + case JS_WaitFD: + emsg = (char *) get_pool_memory(PM_FNAME); + if (!jcr->client) { + Mmsg(emsg, _("is waiting on Client")); + } else { + Mmsg(emsg, _("is waiting on Client %s"), jcr->client->name()); + } + pool_mem = true; + msg = emsg; + break; + case JS_WaitSD: + emsg = (char *) get_pool_memory(PM_FNAME); + if (jcr->wstore) { + Mmsg(emsg, _("is waiting on Storage \"%s\""), jcr->wstore->name()); + } else if (jcr->rstore) { + Mmsg(emsg, _("is waiting on Storage \"%s\""), jcr->rstore->name()); + } else { + Mmsg(emsg, _("is waiting on Storage")); + } + pool_mem = true; + msg = emsg; + break; + case JS_WaitStoreRes: + msg = _("is waiting on max Storage jobs"); + break; + case JS_WaitClientRes: + msg = _("is waiting on max Client jobs"); + break; + case JS_WaitJobRes: + msg = _("is waiting on max Job jobs"); + break; + case JS_WaitMaxJobs: + msg = _("is waiting on max total jobs"); + break; + case JS_WaitStartTime: + emsg = (char *) get_pool_memory(PM_FNAME); + Mmsg(emsg, _("is waiting for its start time (%s)"), + bstrftime_ny(dt, sizeof(dt), jcr->sched_time)); + pool_mem = true; + msg = emsg; + break; + case JS_WaitPriority: + msg = _("is waiting for higher priority jobs to finish"); + break; + case JS_WaitDevice: + msg = _("is waiting for a Shared Storage device"); + break; + case JS_DataCommitting: + msg = _("SD committing Data"); + break; + case JS_DataDespooling: + msg = _("SD despooling Data"); + break; + case JS_AttrDespooling: + msg = _("SD despooling Attributes"); + break; + case JS_AttrInserting: + msg = _("Dir inserting Attributes"); + break; + + default: + emsg = (char *)get_pool_memory(PM_FNAME); + Mmsg(emsg, _("is in unknown state %c"), jcr->JobStatus); + pool_mem = true; + msg = emsg; + break; + } + msgdir = msg; /* Keep it to know if we update the status variable */ + /* + * Now report Storage daemon status code + */ + switch (jcr->SDJobStatus) { + case JS_WaitMount: + if (pool_mem) { + free_pool_memory(emsg); + pool_mem = false; + } + msg = _("is waiting for a mount request"); + break; + case JS_WaitMedia: + if (pool_mem) { + free_pool_memory(emsg); + pool_mem = false; + } + msg = _("is waiting for an appendable Volume"); + break; + case JS_WaitFD: + /* Special case when JobStatus=JS_WaitFD, we don't have a FD link yet + * we need to stay in WaitFD status See bee mantis #1414 */ + if (jcr->JobStatus != JS_WaitFD) { + if (!pool_mem) { + emsg = (char *)get_pool_memory(PM_FNAME); + pool_mem = true; + } + if (!jcr->client || !jcr->wstore) { + Mmsg(emsg, _("is waiting for Client to connect to Storage daemon")); + } else { + Mmsg(emsg, _("is waiting for Client %s to connect to Storage %s"), + jcr->client->name(), jcr->wstore->name()); + } + msg = emsg; + } + break; + case JS_DataCommitting: + msg = _("SD committing Data"); + break; + case JS_DataDespooling: + msg = _("SD despooling Data"); + break; + case JS_AttrDespooling: + msg = _("SD despooling Attributes"); + break; + case JS_AttrInserting: + msg = _("Dir inserting Attributes"); + break; + } + if (msg != msgdir) { + status = jcr->SDJobStatus; + } + switch (jcr->getJobType()) { + case JT_ADMIN: + bstrncpy(level, "Admin", sizeof(level)); + break; + case JT_RESTORE: + bstrncpy(level, "Restore", sizeof(level)); + break; + default: + level_to_str(level, sizeof(level), jcr->getJobLevel()); + level[7] = 0; + break; + } + + if (ua->api == 1) { + bash_spaces(jcr->comment); + ua->send_msg(_("%6d\t%-6s\t%-20s\t%s\t%s\n"), + jcr->JobId, level, jcr->Job, msg, jcr->comment); + unbash_spaces(jcr->comment); + + } else if (ua->api > 1) { + ua->send_msg("%s", ow.get_output(OT_CLEAR, + OT_START_OBJ, + OT_INT32, "jobid", jcr->JobId, + OT_JOBLEVEL,"level", jcr->getJobLevel(), + OT_JOBTYPE, "type", jcr->getJobType(), + OT_JOBSTATUS,"status", status, + OT_STRING, "status_desc",msg, + OT_STRING, "comment", jcr->comment, + OT_SIZE, "jobbytes", jcr->JobBytes, + OT_INT32, "jobfiles", jcr->JobFiles, + OT_STRING, "job", jcr->Job, + OT_STRING, "name", jcr->job->name(), + OT_STRING, "clientname",jcr->client?jcr->client->name():"", + OT_STRING, "fileset", jcr->fileset?jcr->fileset->name():"", + OT_STRING, "storage", jcr->wstore?jcr->wstore->name():"", + OT_STRING, "rstorage", jcr->rstore?jcr->rstore->name():"", + OT_UTIME, "schedtime", jcr->sched_time, + OT_UTIME, "starttime", jcr->start_time, + OT_INT32, "priority", jcr->JobPriority, + OT_INT32, "errors", jcr->JobErrors, + OT_END_OBJ, + OT_END)); + + } else { + char b1[50], b2[50], b3[50]; + level[4] = 0; + bstrncpy(b1, job_type_to_str(jcr->getJobType()), sizeof(b1)); + b1[4] = 0; + ua->send_msg(_("%6d %-4s %-3s %10s %10s %-17s %s\n"), + jcr->JobId, b1, level, + edit_uint64_with_commas(jcr->JobFiles, b2), + edit_uint64_with_suffix(jcr->JobBytes, b3), + jcr->job->name(), msg); + } + + if (pool_mem) { + free_pool_memory(emsg); + pool_mem = false; + } + } + endeach_jcr(jcr); + + if (njobs == 0) { + /* Note the following message is used in regress -- don't change */ + ua->send_msg(_("No Jobs running.\n====\n")); + Dmsg0(200, "leave list_run_jobs()\n"); + return; + } else { + /* display a closing header */ + if (!ua->api) { + ua->send_msg("====\n"); + } else if (ua->api > 1) { + ua->send_msg(ow.end_group(false)); + } + } + Dmsg0(200, "leave list_run_jobs()\n"); +} + +static void list_terminated_jobs(UAContext *ua) +{ + char dt[MAX_TIME_LENGTH], b1[30], b2[30]; + char level[10]; + OutputWriter ow(ua->api_opts); + + if (last_jobs->empty()) { + if (!ua->api) ua->send_msg(_("No Terminated Jobs.\n")); + return; + } + lock_last_jobs_list(); + struct s_last_job *je; + if (!ua->api) { + ua->send_msg(_("\nTerminated Jobs:\n")); + ua->send_msg(_(" JobId Level Files Bytes Status Finished Name \n")); + ua->send_msg(_("====================================================================\n")); + } else if (ua->api > 1) { + ua->send_msg(ow.start_group("terminated")); + } + foreach_dlist(je, last_jobs) { + char JobName[MAX_NAME_LENGTH]; + const char *termstat; + + bstrncpy(JobName, je->Job, sizeof(JobName)); + /* There are three periods after the Job name */ + char *p; + for (int i=0; i<3; i++) { + if ((p=strrchr(JobName, '.')) != NULL) { + *p = 0; + } + } + + if (!acl_access_ok(ua, Job_ACL, JobName)) { + continue; + } + + bstrftime_nc(dt, sizeof(dt), je->end_time); + switch (je->JobType) { + case JT_ADMIN: + bstrncpy(level, "Admin", sizeof(level)); + break; + case JT_RESTORE: + bstrncpy(level, "Restore", sizeof(level)); + break; + default: + level_to_str(level, sizeof(level), je->JobLevel); + level[4] = 0; + break; + } + switch (je->JobStatus) { + case JS_Created: + termstat = _("Created"); + break; + case JS_FatalError: + case JS_ErrorTerminated: + termstat = _("Error"); + break; + case JS_Differences: + termstat = _("Diffs"); + break; + case JS_Canceled: + termstat = _("Cancel"); + break; + case JS_Terminated: + termstat = _("OK"); + break; + case JS_Warnings: + termstat = _("OK -- with warnings"); + break; + case JS_Incomplete: + termstat = _("Incomplete"); + break; + default: + termstat = _("Other"); + break; + } + if (ua->api == 1) { + ua->send_msg(_("%7d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n"), + je->JobId, + level, + edit_uint64_with_commas(je->JobFiles, b1), + edit_uint64_with_suffix(je->JobBytes, b2), + termstat, + dt, JobName); + } else if (ua->api > 1) { + ua->send_msg("%s", + ow.get_output(OT_CLEAR, + OT_START_OBJ, + OT_INT32, "jobid", je->JobId, + OT_JOBLEVEL,"level", je->JobLevel, + OT_JOBTYPE, "type", je->JobType, + OT_JOBSTATUS,"status", je->JobStatus, + OT_STRING, "status_desc",termstat, + OT_SIZE, "jobbytes", je->JobBytes, + OT_INT32, "jobfiles", je->JobFiles, + OT_STRING, "job", je->Job, + OT_UTIME, "starttime", je->start_time, + OT_UTIME, "endtime", je->end_time, + OT_INT32, "errors", je->Errors, + OT_END_OBJ, + OT_END)); + + } else { + ua->send_msg(_("%6d %-7s %8s %10s %-7s %-8s %s\n"), + je->JobId, + level, + edit_uint64_with_commas(je->JobFiles, b1), + edit_uint64_with_suffix(je->JobBytes, b2), + termstat, + dt, JobName); + } + } + if (!ua->api) { + ua->send_msg(_("\n")); + } else if (ua->api > 1) { + ua->send_msg(ow.end_group(false)); + } + unlock_last_jobs_list(); +} diff --git a/src/dird/ua_tree.c b/src/dird/ua_tree.c new file mode 100644 index 00000000..a03eb1b1 --- /dev/null +++ b/src/dird/ua_tree.c @@ -0,0 +1,935 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- User Agent Database File tree for Restore + * command. This file interacts with the user implementing the + * UA tree commands. + * + * Kern Sibbald, July MMII + */ + +#include "bacula.h" +#include "dird.h" +#ifdef HAVE_FNMATCH +#include +#else +#include "lib/fnmatch.h" +#endif +#include "findlib/find.h" + + +/* Forward referenced commands */ + +static int markcmd(UAContext *ua, TREE_CTX *tree); +static int markdircmd(UAContext *ua, TREE_CTX *tree); +static int countcmd(UAContext *ua, TREE_CTX *tree); +static int findcmd(UAContext *ua, TREE_CTX *tree); +static int lscmd(UAContext *ua, TREE_CTX *tree); +static int lsmarkcmd(UAContext *ua, TREE_CTX *tree); +static int dircmd(UAContext *ua, TREE_CTX *tree); +static int dot_dircmd(UAContext *ua, TREE_CTX *tree); +static int estimatecmd(UAContext *ua, TREE_CTX *tree); +static int helpcmd(UAContext *ua, TREE_CTX *tree); +static int cdcmd(UAContext *ua, TREE_CTX *tree); +static int pwdcmd(UAContext *ua, TREE_CTX *tree); +static int dot_pwdcmd(UAContext *ua, TREE_CTX *tree); +static int unmarkcmd(UAContext *ua, TREE_CTX *tree); +static int unmarkdircmd(UAContext *ua, TREE_CTX *tree); +static int quitcmd(UAContext *ua, TREE_CTX *tree); +static int donecmd(UAContext *ua, TREE_CTX *tree); +static int dot_lsdircmd(UAContext *ua, TREE_CTX *tree); +static int dot_lscmd(UAContext *ua, TREE_CTX *tree); +static int dot_helpcmd(UAContext *ua, TREE_CTX *tree); +static int dot_lsmarkcmd(UAContext *ua, TREE_CTX *tree); + +struct cmdstruct { const char *key; int (*func)(UAContext *ua, TREE_CTX *tree); const char *help; }; +static struct cmdstruct commands[] = { + { NT_("add"), markcmd, _("add dir/file to be restored recursively, wildcards allowed")}, + { NT_("cd"), cdcmd, _("change current directory")}, + { NT_("count"), countcmd, _("count marked files in and below the cd")}, + { NT_("delete"), unmarkcmd, _("delete dir/file to be restored recursively in dir")}, + { NT_("dir"), dircmd, _("long list current directory, wildcards allowed")}, + { NT_(".dir"), dot_dircmd, _("long list current directory, wildcards allowed")}, + { NT_("done"), donecmd, _("leave file selection mode")}, + { NT_("estimate"), estimatecmd, _("estimate restore size")}, + { NT_("exit"), donecmd, _("same as done command")}, + { NT_("find"), findcmd, _("find files, wildcards allowed")}, + { NT_("help"), helpcmd, _("print help")}, + { NT_("ls"), lscmd, _("list current directory, wildcards allowed")}, + { NT_(".ls"), dot_lscmd, _("list current directory, wildcards allowed")}, + { NT_(".lsdir"), dot_lsdircmd, _("list subdir in current directory, wildcards allowed")}, + { NT_("lsmark"), lsmarkcmd, _("list the marked files in and below the cd")}, + { NT_(".lsmark"), dot_lsmarkcmd,_("list the marked files in")}, + { NT_("mark"), markcmd, _("mark dir/file to be restored recursively, wildcards allowed")}, + { NT_("markdir"), markdircmd, _("mark directory name to be restored (no files)")}, + { NT_("pwd"), pwdcmd, _("print current working directory")}, + { NT_(".pwd"), dot_pwdcmd, _("print current working directory")}, + { NT_("unmark"), unmarkcmd, _("unmark dir/file to be restored recursively in dir")}, + { NT_("unmarkdir"), unmarkdircmd, _("unmark directory name only no recursion")}, + { NT_("quit"), quitcmd, _("quit and do not do restore")}, + { NT_(".help"), dot_helpcmd, _("print help")}, + { NT_("?"), helpcmd, _("print help")}, + }; +#define comsize ((int)(sizeof(commands)/sizeof(struct cmdstruct))) + +/* + * Enter a prompt mode where the user can select/deselect + * files to be restored. This is sort of like a mini-shell + * that allows "cd", "pwd", "add", "rm", ... + */ +bool user_select_files_from_tree(TREE_CTX *tree) +{ + char cwd[2000]; + bool stat; + /* Get a new context so we don't destroy restore command args */ + UAContext *ua = new_ua_context(tree->ua->jcr); + ua->UA_sock = tree->ua->UA_sock; /* patch in UA socket */ + ua->api = tree->ua->api; /* keep API flag too */ + BSOCK *user = ua->UA_sock; + + ua->send_msg(_( + "\nYou are now entering file selection mode where you add (mark) and\n" + "remove (unmark) files to be restored. No files are initially added, unless\n" + "you used the \"all\" keyword on the command line.\n" + "Enter \"done\" to leave this mode.\n\n")); + if (ua->api) user->signal(BNET_START_RTREE); + /* + * Enter interactive command handler allowing selection + * of individual files. + */ + tree->node = (TREE_NODE *)tree->root; + tree_getpath(tree->node, cwd, sizeof(cwd)); + ua->send_msg(_("cwd is: %s\n"), cwd); + for ( ;; ) { + int found, len, i; + if (!get_cmd(ua, "$ ", true)) { + break; + } + if (ua->api) user->signal(BNET_CMD_BEGIN); + parse_args_only(ua->cmd, &ua->args, &ua->argc, ua->argk, ua->argv, MAX_CMD_ARGS); + if (ua->argc == 0) { + ua->warning_msg(_("Invalid command \"%s\". Enter \"done\" to exit.\n"), ua->cmd); + if (ua->api) user->signal(BNET_CMD_FAILED); + continue; + } + + len = strlen(ua->argk[0]); + found = 0; + stat = false; + for (i=0; iargk[0], commands[i].key, len) == 0) { + stat = (*commands[i].func)(ua, tree); /* go execute command */ + found = 1; + break; + } + if (!found) { + if (*ua->argk[0] == '.') { + /* Some unknow dot command -- probably .messages, ignore it */ + continue; + } + ua->warning_msg(_("Invalid command \"%s\". Enter \"done\" to exit.\n"), ua->cmd); + if (ua->api) user->signal(BNET_CMD_FAILED); + continue; + } + if (ua->api) user->signal(BNET_CMD_OK); + if (!stat) { + break; + } + } + if (ua->api) user->signal(BNET_END_RTREE); + ua->UA_sock = NULL; /* don't release restore socket */ + stat = !ua->quit; + ua->quit = false; + free_ua_context(ua); /* get rid of temp UA context */ + return stat; +} + +/* + * This callback routine is responsible for inserting the + * items it gets into the directory tree. For each JobId selected + * this routine is called once for each file. We do not allow + * duplicate filenames, but instead keep the info from the most + * recent file entered (i.e. the JobIds are assumed to be sorted) + * + * See uar_sel_files in sql_cmds.c for query that calls us. + * row[0]=Path, row[1]=Filename, row[2]=FileIndex + * row[3]=JobId row[4]=LStat row[5]=DeltaSeq + */ +int insert_tree_handler(void *ctx, int num_fields, char **row) +{ + struct stat statp; + TREE_CTX *tree = (TREE_CTX *)ctx; + TREE_NODE *node; + int type; + bool hard_link, ok; + int FileIndex; + int32_t delta_seq; + JobId_t JobId; + HL_ENTRY *entry = NULL; + int32_t LinkFI; + + Dmsg4(150, "Path=%s%s FI=%s JobId=%s\n", row[0], row[1], + row[2], row[3]); + if (*row[1] == 0) { /* no filename => directory */ + if (!IsPathSeparator(*row[0])) { /* Must be Win32 directory */ + type = TN_DIR_NLS; + } else { + type = TN_DIR; + } + } else { + type = TN_FILE; + } + decode_stat(row[4], &statp, sizeof(statp), &LinkFI); + + hard_link = (LinkFI != 0); + node = insert_tree_node(row[0], row[1], type, tree->root, NULL); + JobId = str_to_int64(row[3]); + FileIndex = str_to_int64(row[2]); + delta_seq = str_to_int64(row[5]); + Dmsg6(150, "node=0x%p JobId=%s FileIndex=%s Delta=%s node.delta=%d LinkFI=%d\n", + node, row[3], row[2], row[5], node->delta_seq, LinkFI); + + /* TODO: check with hardlinks */ + if (delta_seq > 0) { + if (delta_seq == (node->delta_seq + 1)) { + tree_add_delta_part(tree->root, node, node->JobId, node->FileIndex); + + } else { + /* File looks to be deleted */ + if (node->delta_seq == -1) { /* just created */ + tree_remove_node(tree->root, node); + + } else { + tree->ua->warning_msg(_("Something is wrong with the Delta sequence of %s, " + "skipping new parts. Current sequence is %d\n"), + row[1], node->delta_seq); + + Dmsg3(0, "Something is wrong with Delta, skip it " + "fname=%s d1=%d d2=%d\n", row[1], node->delta_seq, delta_seq); + } + return 0; + } + } + /* + * - The first time we see a file (node->inserted==true), we accept it. + * - In the same JobId, we accept only the first copy of a + * hard linked file (the others are simply pointers). + * - In the same JobId, we accept the last copy of any other + * file -- in particular directories. + * + * All the code to set ok could be condensed to a single + * line, but it would be even harder to read. + */ + ok = true; + if (!node->inserted && JobId == node->JobId) { + if ((hard_link && FileIndex > node->FileIndex) || + (!hard_link && FileIndex < node->FileIndex)) { + ok = false; + } + } + if (ok) { + node->hard_link = hard_link; + node->FileIndex = FileIndex; + node->JobId = JobId; + node->type = type; + node->soft_link = S_ISLNK(statp.st_mode) != 0; + node->delta_seq = delta_seq; + node->can_access = true; + if (tree->all) { + node->extract = true; /* extract all by default */ + if (type == TN_DIR || type == TN_DIR_NLS) { + node->extract_dir = true; /* if dir, extract it */ + } + } + /* insert file having hardlinks into hardlink hashtable */ + if (statp.st_nlink > 1 && type != TN_DIR && type != TN_DIR_NLS) { + if (!LinkFI) { + /* first occurrence - file hardlinked to */ + entry = (HL_ENTRY *)tree->root->hardlinks.hash_malloc(sizeof(HL_ENTRY)); + entry->key = (((uint64_t) JobId) << 32) + FileIndex; + entry->node = node; + tree->root->hardlinks.insert(entry->key, entry); + } else if (tree->hardlinks_in_mem) { + /* hardlink to known file index: lookup original file */ + uint64_t file_key = (((uint64_t) JobId) << 32) + LinkFI; + HL_ENTRY *first_hl = (HL_ENTRY *) tree->root->hardlinks.lookup(file_key); + if (first_hl && first_hl->node) { + /* then add hardlink entry to linked node*/ + entry = (HL_ENTRY *)tree->root->hardlinks.hash_malloc(sizeof(HL_ENTRY)); + entry->key = (((uint64_t) JobId) << 32) + FileIndex; + entry->node = first_hl->node; + tree->root->hardlinks.insert(entry->key, entry); + } + } + } + } + if (node->inserted) { + tree->FileCount++; + if (tree->DeltaCount > 0 && (tree->FileCount-tree->LastCount) > tree->DeltaCount) { + tree->ua->send_msg("+"); + tree->LastCount = tree->FileCount; + } + } + tree->cnt++; + return 0; +} + +/* + * Set extract to value passed. We recursively walk + * down the tree setting all children if the + * node is a directory. + */ +static int set_extract(UAContext *ua, TREE_NODE *node, TREE_CTX *tree, bool extract) +{ + TREE_NODE *n; + int count = 0; + + node->extract = extract; + if (node->type == TN_DIR || node->type == TN_DIR_NLS) { + node->extract_dir = extract; /* set/clear dir too */ + } + if (node->type != TN_NEWDIR) { + count++; + } + /* For a non-file (i.e. directory), we see all the children */ + if (node->type != TN_FILE || (node->soft_link && tree_node_has_child(node))) { + /* Recursive set children within directory */ + foreach_child(n, node) { + count += set_extract(ua, n, tree, extract); + } + /* + * Walk up tree marking any unextracted parent to be + * extracted. + */ + if (!tree->no_auto_parent && extract) { + while (node->parent && !node->parent->extract_dir) { + node = node->parent; + node->extract_dir = true; + } + } + } else if (extract) { + uint64_t key = 0; + if (tree->hardlinks_in_mem) { + if (node->hard_link) { + key = (((uint64_t) node->JobId) << 32) + node->FileIndex; /* every hardlink is in hashtable, and it points to linked file */ + } + } else { + /* Get the hard link if it exists */ + FILE_DBR fdbr; + struct stat statp; + char cwd[2000]; + /* + * Ordinary file, we get the full path, look up the + * attributes, decode them, and if we are hard linked to + * a file that was saved, we must load that file too. + */ + tree_getpath(node, cwd, sizeof(cwd)); + fdbr.FileId = 0; + fdbr.JobId = node->JobId; + if (node->hard_link && db_get_file_attributes_record(ua->jcr, ua->db, cwd, NULL, &fdbr)) { + int32_t LinkFI; + decode_stat(fdbr.LStat, &statp, sizeof(statp), &LinkFI); /* decode stat pkt */ + key = (((uint64_t) node->JobId) << 32) + LinkFI; /* lookup by linked file's fileindex */ + } + } + /* If file hard linked and we have a key */ + if (node->hard_link && key != 0) { + /* + * If we point to a hard linked file, find that file in + * hardlinks hashmap, and mark it to be restored as well. + */ + HL_ENTRY *entry = (HL_ENTRY *)tree->root->hardlinks.lookup(key); + if (entry && entry->node) { + n = entry->node; + n->extract = true; + n->extract_dir = (n->type == TN_DIR || n->type == TN_DIR_NLS); + } + } + } + return count; +} + +static void strip_trailing_slash(char *arg) +{ + int len = strlen(arg); + if (len == 0) { + return; + } + len--; + if (arg[len] == '/') { /* strip any trailing slash */ + arg[len] = 0; + } +} + +/* + * Recursively mark the current directory to be restored as + * well as all directories and files below it. + */ +static int markcmd(UAContext *ua, TREE_CTX *tree) +{ + TREE_NODE *node; + int count = 0; + char ec1[50]; + + if (ua->argc < 2 || !tree_node_has_child(tree->node)) { + ua->send_msg(_("No files marked.\n")); + return 1; + } + for (int i=1; i < ua->argc; i++) { + strip_trailing_slash(ua->argk[i]); + foreach_child(node, tree->node) { + if (fnmatch(ua->argk[i], node->fname, 0) == 0) { + count += set_extract(ua, node, tree, true); + } + } + } + if (count == 0) { + ua->send_msg(_("No files marked.\n")); + } else if (count == 1) { + ua->send_msg(_("1 file marked.\n")); + } else { + ua->send_msg(_("%s files marked.\n"), + edit_uint64_with_commas(count, ec1)); + } + return 1; +} + +static int markdircmd(UAContext *ua, TREE_CTX *tree) +{ + TREE_NODE *node; + int count = 0; + char ec1[50]; + + if (ua->argc < 2 || !tree_node_has_child(tree->node)) { + ua->send_msg(_("No files marked.\n")); + return 1; + } + for (int i=1; i < ua->argc; i++) { + strip_trailing_slash(ua->argk[i]); + foreach_child(node, tree->node) { + if (fnmatch(ua->argk[i], node->fname, 0) == 0) { + if (node->type == TN_DIR || node->type == TN_DIR_NLS) { + node->extract_dir = true; + count++; + } + } + } + } + if (count == 0) { + ua->send_msg(_("No directories marked.\n")); + } else if (count == 1) { + ua->send_msg(_("1 directory marked.\n")); + } else { + ua->send_msg(_("%s directories marked.\n"), + edit_uint64_with_commas(count, ec1)); + } + return 1; +} + + +static int countcmd(UAContext *ua, TREE_CTX *tree) +{ + int total, num_extract; + char ec1[50], ec2[50]; + + total = num_extract = 0; + for (TREE_NODE *node=first_tree_node(tree->root); node; node=next_tree_node(node)) { + if (node->type != TN_NEWDIR) { + total++; + if (node->extract || node->extract_dir) { + num_extract++; + } + } + } + ua->send_msg(_("%s total files/dirs. %s marked to be restored.\n"), + edit_uint64_with_commas(total, ec1), + edit_uint64_with_commas(num_extract, ec2)); + return 1; +} + +static int findcmd(UAContext *ua, TREE_CTX *tree) +{ + char cwd[2000]; + + if (ua->argc == 1) { + ua->send_msg(_("No file specification given.\n")); + return 1; /* make it non-fatal */ + } + + for (int i=1; i < ua->argc; i++) { + for (TREE_NODE *node=first_tree_node(tree->root); node; node=next_tree_node(node)) { + if (fnmatch(ua->argk[i], node->fname, 0) == 0) { + const char *tag; + tree_getpath(node, cwd, sizeof(cwd)); + if (node->extract) { + tag = "*"; + } else if (node->extract_dir) { + tag = "+"; + } else { + tag = ""; + } + ua->send_msg("%s%s\n", tag, cwd); + } + } + } + return 1; +} + +static int dot_lsdircmd(UAContext *ua, TREE_CTX *tree) +{ + TREE_NODE *node; + + if (!tree_node_has_child(tree->node)) { + return 1; + } + + foreach_child(node, tree->node) { + if (ua->argc == 1 || fnmatch(ua->argk[1], node->fname, 0) == 0) { + if (tree_node_has_child(node)) { + ua->send_msg("%s/\n", node->fname); + } + } + } + + return 1; +} + +static int dot_helpcmd(UAContext *ua, TREE_CTX *tree) +{ + for (int i=0; isend_msg("%s\n", commands[i].key); + } + } + return 1; +} + +static int dot_lscmd(UAContext *ua, TREE_CTX *tree) +{ + TREE_NODE *node; + + if (!tree_node_has_child(tree->node)) { + return 1; + } + + foreach_child(node, tree->node) { + if (ua->argc == 1 || fnmatch(ua->argk[1], node->fname, 0) == 0) { + ua->send_msg("%s%s\n", node->fname, tree_node_has_child(node)?"/":""); + } + } + + return 1; +} + +static int lscmd(UAContext *ua, TREE_CTX *tree) +{ + TREE_NODE *node; + + if (!tree_node_has_child(tree->node)) { + return 1; + } + foreach_child(node, tree->node) { + if (ua->argc == 1 || fnmatch(ua->argk[1], node->fname, 0) == 0) { + const char *tag; + if (node->extract) { + tag = "*"; + } else if (node->extract_dir) { + tag = "+"; + } else { + tag = ""; + } + ua->send_msg("%s%s%s\n", tag, node->fname, tree_node_has_child(node)?"/":""); + } + } + return 1; +} + +/* + * Ls command that lists only the marked files + */ +static int dot_lsmarkcmd(UAContext *ua, TREE_CTX *tree) +{ + TREE_NODE *node; + if (!tree_node_has_child(tree->node)) { + return 1; + } + foreach_child(node, tree->node) { + if ((ua->argc == 1 || fnmatch(ua->argk[1], node->fname, 0) == 0) && + (node->extract || node->extract_dir)) { + ua->send_msg("%s%s\n", node->fname, tree_node_has_child(node)?"/":""); + } + } + return 1; +} + +/* + * This recursive ls command that lists only the marked files + */ +static void rlsmark(UAContext *ua, TREE_NODE *tnode, int level) +{ + TREE_NODE *node; + const int max_level = 100; + char indent[max_level*2+1]; + int i, j; + if (!tree_node_has_child(tnode)) { + return; + } + level = MIN(level, max_level); + j = 0; + for (i=0; iargc == 1 || fnmatch(ua->argk[1], node->fname, 0) == 0) && + (node->extract || node->extract_dir)) { + const char *tag; + if (node->extract) { + tag = "*"; + } else if (node->extract_dir) { + tag = "+"; + } else { + tag = ""; + } + ua->send_msg("%s%s%s%s\n", indent, tag, node->fname, tree_node_has_child(node)?"/":""); + if (tree_node_has_child(node)) { + rlsmark(ua, node, level+1); + } + } + } +} + +static int lsmarkcmd(UAContext *ua, TREE_CTX *tree) +{ + rlsmark(ua, tree->node, 0); + return 1; +} + +/* + * This is actually the long form used for "dir" + */ +static void ls_output(guid_list *guid, char *buf, const char *fname, const char *tag, + struct stat *statp, bool dot_cmd) +{ + char *p; + const char *f; + char ec1[30]; + char en1[30], en2[30]; + int n; + time_t time; + + p = encode_mode(statp->st_mode, buf); + if (dot_cmd) { + *p++ = ','; + n = sprintf(p, "%d,", (uint32_t)statp->st_nlink); + p += n; + n = sprintf(p, "%s,%s,", + guid->uid_to_name(statp->st_uid, en1, sizeof(en1)), + guid->gid_to_name(statp->st_gid, en2, sizeof(en2))); + p += n; + n = sprintf(p, "%s,", edit_int64(statp->st_size, ec1)); + p += n; + p = encode_time(statp->st_mtime, p); + *p++ = ','; + *p++ = *tag; + *p++ = ','; + } else { + n = sprintf(p, " %2d ", (uint32_t)statp->st_nlink); + p += n; + n = sprintf(p, "%-8.8s %-8.8s", + guid->uid_to_name(statp->st_uid, en1, sizeof(en1)), + guid->gid_to_name(statp->st_gid, en2, sizeof(en2))); + p += n; + n = sprintf(p, "%12.12s ", edit_int64(statp->st_size, ec1)); + p += n; + if (statp->st_ctime > statp->st_mtime) { + time = statp->st_ctime; + } else { + time = statp->st_mtime; + } + /* Display most recent time */ + p = encode_time(time, p); + *p++ = ' '; + *p++ = *tag; + } + for (f=fname; *f; ) { + *p++ = *f++; + } + *p = 0; +} + +/* + * Like ls command, but give more detail on each file + */ +static int do_dircmd(UAContext *ua, TREE_CTX *tree, bool dot_cmd) +{ + TREE_NODE *node; + FILE_DBR fdbr; + struct stat statp; + char buf[1100]; + char cwd[1100], *pcwd; + guid_list *guid; + + if (!tree_node_has_child(tree->node)) { + ua->send_msg(_("Node %s has no children.\n"), tree->node->fname); + return 1; + } + + guid = new_guid_list(); + foreach_child(node, tree->node) { + const char *tag; + if (ua->argc == 1 || fnmatch(ua->argk[1], node->fname, 0) == 0) { + if (node->extract) { + tag = "*"; + } else if (node->extract_dir) { + tag = "+"; + } else { + tag = " "; + } + tree_getpath(node, cwd, sizeof(cwd)); + fdbr.FileId = 0; + fdbr.JobId = node->JobId; + /* + * Strip / from soft links to directories. + * This is because soft links to files have a trailing slash + * when returned from tree_getpath, but db_get_file_attr... + * treats soft links as files, so they do not have a trailing + * slash like directory names. + */ + if (node->type == TN_FILE && tree_node_has_child(node)) { + bstrncpy(buf, cwd, sizeof(buf)); + pcwd = buf; + int len = strlen(buf); + if (len > 1) { + buf[len-1] = 0; /* strip trailing / */ + } + } else { + pcwd = cwd; + } + if (db_get_file_attributes_record(ua->jcr, ua->db, pcwd, NULL, &fdbr)) { + int32_t LinkFI; + decode_stat(fdbr.LStat, &statp, sizeof(statp), &LinkFI); /* decode stat pkt */ + } else { + /* Something went wrong getting attributes -- print name */ + memset(&statp, 0, sizeof(statp)); + } + ls_output(guid, buf, cwd, tag, &statp, dot_cmd); + ua->send_msg("%s\n", buf); + } + } + free_guid_list(guid); + return 1; +} + +int dot_dircmd(UAContext *ua, TREE_CTX *tree) +{ + return do_dircmd(ua, tree, true/*dot command*/); +} + +static int dircmd(UAContext *ua, TREE_CTX *tree) +{ + return do_dircmd(ua, tree, false/*not dot command*/); +} + + +static int estimatecmd(UAContext *ua, TREE_CTX *tree) +{ + int total, num_extract; + uint64_t total_bytes = 0; + FILE_DBR fdbr; + struct stat statp; + char cwd[1100]; + char ec1[50]; + + total = num_extract = 0; + for (TREE_NODE *node=first_tree_node(tree->root); node; node=next_tree_node(node)) { + if (node->type != TN_NEWDIR) { + total++; + /* If regular file, get size */ + if (node->extract && node->type == TN_FILE) { + num_extract++; + tree_getpath(node, cwd, sizeof(cwd)); + fdbr.FileId = 0; + fdbr.JobId = node->JobId; + if (db_get_file_attributes_record(ua->jcr, ua->db, cwd, NULL, &fdbr)) { + int32_t LinkFI; + decode_stat(fdbr.LStat, &statp, sizeof(statp), &LinkFI); /* decode stat pkt */ + if (S_ISREG(statp.st_mode) && statp.st_size > 0) { + total_bytes += statp.st_size; + } + } + /* Directory, count only */ + } else if (node->extract || node->extract_dir) { + num_extract++; + } + } + } + ua->send_msg(_("%d total files; %d marked to be restored; %s bytes.\n"), + total, num_extract, edit_uint64_with_commas(total_bytes, ec1)); + return 1; +} + + + +static int helpcmd(UAContext *ua, TREE_CTX *tree) +{ + unsigned int i; + + ua->send_msg(_(" Command Description\n ======= ===========\n")); + for (i=0; isend_msg(" %-10s %s\n", _(commands[i].key), _(commands[i].help)); + } + } + ua->send_msg("\n"); + return 1; +} + +/* + * Change directories. Note, if the user specifies x: and it fails, + * we assume it is a Win32 absolute cd rather than relative and + * try a second time with /x: ... Win32 kludge. + */ +static int cdcmd(UAContext *ua, TREE_CTX *tree) +{ + TREE_NODE *node; + char cwd[2000]; + + + if (ua->argc != 2) { + ua->error_msg(_("Too few or too many arguments. Try using double quotes.\n")); + return 1; + } + + node = tree_cwd(ua->argk[1], tree->root, tree->node); + if (!node) { + /* Try once more if Win32 drive -- make absolute */ + if (ua->argk[1][1] == ':') { /* win32 drive */ + bstrncpy(cwd, "/", sizeof(cwd)); + bstrncat(cwd, ua->argk[1], sizeof(cwd)); + node = tree_cwd(cwd, tree->root, tree->node); + } + if (!node) { + ua->warning_msg(_("Invalid path given.\n")); + } + } + if (node) { + if (node->can_access) { + tree->node = node; + } else { + ua->warning_msg(_("Invalid path given. Permission denied.\n")); + } + } + return pwdcmd(ua, tree); +} + +static int pwdcmd(UAContext *ua, TREE_CTX *tree) +{ + char cwd[2000]; + tree_getpath(tree->node, cwd, sizeof(cwd)); + if (ua->api) { + ua->send_msg("%s", cwd); + } else { + ua->send_msg(_("cwd is: %s\n"), cwd); + } + return 1; +} + +static int dot_pwdcmd(UAContext *ua, TREE_CTX *tree) +{ + char cwd[2000]; + tree_getpath(tree->node, cwd, sizeof(cwd)); + ua->send_msg("%s", cwd); + return 1; +} + +static int unmarkcmd(UAContext *ua, TREE_CTX *tree) +{ + TREE_NODE *node; + int count = 0; + + if (ua->argc < 2 || !tree_node_has_child(tree->node)) { + ua->send_msg(_("No files unmarked.\n")); + return 1; + } + for (int i=1; i < ua->argc; i++) { + strip_trailing_slash(ua->argk[i]); + foreach_child(node, tree->node) { + if (fnmatch(ua->argk[i], node->fname, 0) == 0) { + count += set_extract(ua, node, tree, false); + } + } + } + if (count == 0) { + ua->send_msg(_("No files unmarked.\n")); + } else if (count == 1) { + ua->send_msg(_("1 file unmarked.\n")); + } else { + char ed1[50]; + ua->send_msg(_("%s files unmarked.\n"), edit_uint64_with_commas(count, ed1)); + } + return 1; +} + +static int unmarkdircmd(UAContext *ua, TREE_CTX *tree) +{ + TREE_NODE *node; + int count = 0; + + if (ua->argc < 2 || !tree_node_has_child(tree->node)) { + ua->send_msg(_("No directories unmarked.\n")); + return 1; + } + + for (int i=1; i < ua->argc; i++) { + strip_trailing_slash(ua->argk[i]); + foreach_child(node, tree->node) { + if (fnmatch(ua->argk[i], node->fname, 0) == 0) { + if (node->type == TN_DIR || node->type == TN_DIR_NLS) { + node->extract_dir = false; + count++; + } + } + } + } + + if (count == 0) { + ua->send_msg(_("No directories unmarked.\n")); + } else if (count == 1) { + ua->send_msg(_("1 directory unmarked.\n")); + } else { + ua->send_msg(_("%d directories unmarked.\n"), count); + } + return 1; +} + + +static int donecmd(UAContext *ua, TREE_CTX *tree) +{ + return 0; +} + +static int quitcmd(UAContext *ua, TREE_CTX *tree) +{ + ua->quit = true; + return 0; +} diff --git a/src/dird/ua_update.c b/src/dird/ua_update.c new file mode 100644 index 00000000..92059520 --- /dev/null +++ b/src/dird/ua_update.c @@ -0,0 +1,1073 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Director -- Update command processing + * Split from ua_cmds.c March 2005 + * + * Kern Sibbald, September MM + * + */ + +#include "bacula.h" +#include "dird.h" + +/* Forward referenced functions */ +static int update_volume(UAContext *ua); +static bool update_pool(UAContext *ua); +static bool update_job(UAContext *ua); +static bool update_stats(UAContext *ua); + +/* + * Update a Pool Record in the database. + * It is always updated from the Resource record. + * + * update pool= + * updates pool from Pool resource + * update media pool= volume= + * changes pool info for volume + * update slots [scan=...] + * updates autochanger slots + * update stats [days=...] + * updates long term statistics + */ +int update_cmd(UAContext *ua, const char *cmd) +{ + static const char *kw[] = { + NT_("media"), /* 0 */ + NT_("volume"), /* 1 */ + NT_("pool"), /* 2 */ + NT_("slots"), /* 3 */ + NT_("slot"), /* 4 */ + NT_("jobid"), /* 5 */ + NT_("stats"), /* 6 */ + NT_("snap"), /* 7 */ + NT_("snapshot"),/* 8 */ + NULL}; + + if (!open_client_db(ua)) { + return 1; + } + + switch (find_arg_keyword(ua, kw)) { + case 0: + case 1: + update_volume(ua); + return 1; + case 2: + update_pool(ua); + return 1; + case 3: + case 4: + update_slots(ua); + return 1; + case 5: + update_job(ua); + return 1; + case 6: + update_stats(ua); + return 1; + case 7: + case 8: + update_snapshot(ua); + return 1; + default: + break; + } + + start_prompt(ua, _("Update choice:\n")); + add_prompt(ua, _("Volume parameters")); + add_prompt(ua, _("Pool from resource")); + add_prompt(ua, _("Slots from autochanger")); + add_prompt(ua, _("Long term statistics")); + add_prompt(ua, _("Snapshot parameters")); + switch (do_prompt(ua, _("item"), _("Choose catalog item to update"), NULL, 0)) { + case 0: + update_volume(ua); + break; + case 1: + update_pool(ua); + break; + case 2: + update_slots(ua); + break; + case 3: + update_stats(ua); + break; + case 4: + update_snapshot(ua); + break; + default: + break; + } + return 1; +} + +static void update_volstatus(UAContext *ua, const char *val, MEDIA_DBR *mr) +{ + POOL_MEM query(PM_MESSAGE); + const char *kw[] = { + NT_("Append"), + NT_("Archive"), + NT_("Disabled"), + NT_("Full"), + NT_("Used"), + NT_("Cleaning"), + NT_("Recycle"), + NT_("Read-Only"), + NT_("Error"), + NULL}; + bool found = false; + int i; + + for (i=0; kw[i]; i++) { + if (strcasecmp(val, kw[i]) == 0) { + found = true; + break; + } + } + if (!found) { + ua->error_msg(_("Invalid VolStatus specified: %s\n"), val); + } else { + char ed1[50]; + bstrncpy(mr->VolStatus, kw[i], sizeof(mr->VolStatus)); + Mmsg(query, "UPDATE Media SET VolStatus='%s' WHERE MediaId=%s", + mr->VolStatus, edit_int64(mr->MediaId,ed1)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New Volume status is: %s\n"), mr->VolStatus); + } + } +} + +static void update_volretention(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + char ed1[150], ed2[50]; + POOL_MEM query(PM_MESSAGE); + if (!duration_to_utime(val, &mr->VolRetention)) { + ua->error_msg(_("Invalid retention period specified: %s\n"), val); + return; + } + Mmsg(query, "UPDATE Media SET VolRetention=%s WHERE MediaId=%s", + edit_uint64(mr->VolRetention, ed1), edit_int64(mr->MediaId,ed2)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New retention period is: %s\n"), + edit_utime(mr->VolRetention, ed1, sizeof(ed1))); + } +} + +static void update_vol_cacheretention(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + char ed1[150], ed2[50]; + POOL_MEM query(PM_MESSAGE); + if (!duration_to_utime(val, &mr->CacheRetention)) { + ua->error_msg(_("Invalid cache retention period specified: %s\n"), val); + return; + } + Mmsg(query, "UPDATE Media SET CacheRetention=%s WHERE MediaId=%s", + edit_uint64(mr->CacheRetention, ed1), edit_int64(mr->MediaId,ed2)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New Cache Retention period is: %s\n"), + edit_utime(mr->CacheRetention, ed1, sizeof(ed1))); + } +} + +static void update_voluseduration(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + char ed1[150], ed2[50]; + POOL_MEM query(PM_MESSAGE); + + if (!duration_to_utime(val, &mr->VolUseDuration)) { + ua->error_msg(_("Invalid use duration specified: %s\n"), val); + return; + } + Mmsg(query, "UPDATE Media SET VolUseDuration=%s WHERE MediaId=%s", + edit_uint64(mr->VolUseDuration, ed1), edit_int64(mr->MediaId,ed2)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New use duration is: %s\n"), + edit_utime(mr->VolUseDuration, ed1, sizeof(ed1))); + } +} + +static void update_volmaxjobs(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + POOL_MEM query(PM_MESSAGE); + char ed1[50]; + Mmsg(query, "UPDATE Media SET MaxVolJobs=%s WHERE MediaId=%s", + val, edit_int64(mr->MediaId,ed1)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New max jobs is: %s\n"), val); + } +} + +static void update_volmaxfiles(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + POOL_MEM query(PM_MESSAGE); + char ed1[50]; + Mmsg(query, "UPDATE Media SET MaxVolFiles=%s WHERE MediaId=%s", + val, edit_int64(mr->MediaId, ed1)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New max files is: %s\n"), val); + } +} + +static void update_volmaxbytes(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + uint64_t maxbytes; + char ed1[50], ed2[50]; + POOL_MEM query(PM_MESSAGE); + + if (!size_to_uint64(val, strlen(val), &maxbytes)) { + ua->error_msg(_("Invalid max. bytes specification: %s\n"), val); + return; + } + Mmsg(query, "UPDATE Media SET MaxVolBytes=%s WHERE MediaId=%s", + edit_uint64(maxbytes, ed1), edit_int64(mr->MediaId, ed2)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New Max bytes is: %s\n"), edit_uint64(maxbytes, ed1)); + } +} + +static void update_volrecycle(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + int recycle; + char ed1[50]; + + POOL_MEM query(PM_MESSAGE); + if (!is_yesno(val, &recycle)) { + ua->error_msg(_("Invalid value. It must be yes or no.\n")); + return; + } + Mmsg(query, "UPDATE Media SET Recycle=%d WHERE MediaId=%s", + recycle, edit_int64(mr->MediaId, ed1)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New Recycle flag is: %s\n"), + recycle==1?_("yes"):_("no")); + } +} + +static void update_volinchanger(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + int InChanger; + char ed1[50]; + + POOL_MEM query(PM_MESSAGE); + if (!is_yesno(val, &InChanger)) { + ua->error_msg(_("Invalid value. It must be yes or no.\n")); + return; + } + Mmsg(query, "UPDATE Media SET InChanger=%d WHERE MediaId=%s", + InChanger, edit_int64(mr->MediaId, ed1)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New InChanger flag is: %s\n"), + InChanger==1?_("yes"):_("no")); + } +} + + +static void update_volslot(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + POOL_DBR pr; + + memset(&pr, 0, sizeof(POOL_DBR)); + pr.PoolId = mr->PoolId; + if (!db_get_pool_numvols(ua->jcr, ua->db, &pr)) { + ua->error_msg("%s", db_strerror(ua->db)); + return; + } + mr->Slot = atoi(val); + if (mr->Slot < 0) { + ua->error_msg(_("Invalid slot, it must be greater than zero\n")); + } + /* + * Make sure to use db_update... rather than doing this directly, + * so that any Slot is handled correctly. + */ + set_storageid_in_mr(NULL, mr); + if (!db_update_media_record(ua->jcr, ua->db, mr)) { + ua->error_msg(_("Error updating media record Slot: ERR=%s"), db_strerror(ua->db)); + } else { + ua->info_msg(_("New Slot is: %d\n"), mr->Slot); + } +} + +/* Modify the Pool in which this Volume is located */ +void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr) +{ + POOL_DBR pr; + POOL_MEM query(PM_MESSAGE); + char ed1[50], ed2[50]; + + memset(&pr, 0, sizeof(pr)); + bstrncpy(pr.Name, val, sizeof(pr.Name)); + if (!get_pool_dbr(ua, &pr)) { + return; + } + mr->PoolId = pr.PoolId; /* set new PoolId */ + /* + */ + db_lock(ua->db); + Mmsg(query, "UPDATE Media SET PoolId=%s WHERE MediaId=%s", + edit_int64(mr->PoolId, ed1), edit_int64(mr->MediaId, ed2)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New Pool is: %s\n"), pr.Name); + opr->NumVols--; + if (!db_update_pool_record(ua->jcr, ua->db, opr)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + pr.NumVols++; + if (!db_update_pool_record(ua->jcr, ua->db, &pr)) { + ua->error_msg("%s", db_strerror(ua->db)); + } + } + db_unlock(ua->db); +} + +/* Modify the RecyclePool of a Volume */ +void update_vol_recyclepool(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + POOL_DBR pr; + POOL_MEM query(PM_MESSAGE); + char ed1[50], ed2[50]; + const char *poolname; + + if(val && *val) { /* update volume recyclepool="Scratch" */ + /* If a pool name is given, look up the PoolId */ + memset(&pr, 0, sizeof(pr)); + bstrncpy(pr.Name, val, sizeof(pr.Name)); + if (!get_pool_dbr(ua, &pr, NT_("recyclepool"))) { + return; + } + /* pool = select_pool_resource(ua); */ + mr->RecyclePoolId = pr.PoolId; /* get the PoolId */ + poolname = pr.Name; + + } else { /* update volume recyclepool="" */ + /* If no pool name is given, set the PoolId to 0 (the default) */ + mr->RecyclePoolId = 0; + poolname = _("*None*"); + } + + db_lock(ua->db); + Mmsg(query, "UPDATE Media SET RecyclePoolId=%s WHERE MediaId=%s", + edit_int64(mr->RecyclePoolId, ed1), edit_int64(mr->MediaId, ed2)); + if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New RecyclePool is: %s\n"), poolname); + } + db_unlock(ua->db); +} + +/* + * Refresh the Volume information from the Pool record + */ +static void update_vol_from_pool(UAContext *ua, MEDIA_DBR *mr) +{ + POOL_DBR pr; + + memset(&pr, 0, sizeof(pr)); + pr.PoolId = mr->PoolId; + if (!db_get_pool_numvols(ua->jcr, ua->db, &pr) || + !acl_access_ok(ua, Pool_ACL, pr.Name)) { + return; + } + set_pool_dbr_defaults_in_media_dbr(mr, &pr); + if (!db_update_media_defaults(ua->jcr, ua->db, mr)) { + ua->error_msg(_("Error updating Volume record: ERR=%s"), db_strerror(ua->db)); + } else { + ua->info_msg(_("Volume defaults updated from \"%s\" Pool record.\n"), + pr.Name); + } +} + +/* + * Refresh the Volume information from the Pool record + * for all Volumes + */ +static void update_all_vols_from_pool(UAContext *ua, const char *pool_name) +{ + POOL_DBR pr; + MEDIA_DBR mr; + + memset(&pr, 0, sizeof(pr)); + + bstrncpy(pr.Name, pool_name, sizeof(pr.Name)); + if (!get_pool_dbr(ua, &pr)) { + return; + } + set_pool_dbr_defaults_in_media_dbr(&mr, &pr); + mr.PoolId = pr.PoolId; + if (!db_update_media_defaults(ua->jcr, ua->db, &mr)) { + ua->error_msg(_("Error updating Volume records: ERR=%s"), db_strerror(ua->db)); + } else { + ua->info_msg(_("All Volume defaults updated from \"%s\" Pool record.\n"), + pr.Name); + } +} + +static void update_all_vols(UAContext *ua) +{ + int i, num_pools; + uint32_t *ids; + POOL_DBR pr; + MEDIA_DBR mr; + + memset(&pr, 0, sizeof(pr)); + + if (!db_get_pool_ids(ua->jcr, ua->db, &num_pools, &ids)) { + ua->error_msg(_("Error obtaining pool ids. ERR=%s\n"), db_strerror(ua->db)); + return; + } + + for (i=0; ijcr, ua->db, &pr)) { /* ***FIXME*** use acl? */ + ua->warning_msg(_("Updating all pools, but skipped PoolId=%d. ERR=%s\n"), db_strerror(ua->db)); + continue; + } + + set_pool_dbr_defaults_in_media_dbr(&mr, &pr); + mr.PoolId = pr.PoolId; + + if (!db_update_media_defaults(ua->jcr, ua->db, &mr)) { + ua->error_msg(_("Error updating Volume records: ERR=%s"), db_strerror(ua->db)); + } else { + ua->info_msg(_("All Volume defaults updated from \"%s\" Pool record.\n"), + pr.Name); + } + } + + free(ids); +} + +static void update_volenabled(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + mr->Enabled = get_enabled(ua, val); + if (mr->Enabled < 0) { + return; + } + set_storageid_in_mr(NULL, mr); + if (!db_update_media_record(ua->jcr, ua->db, mr)) { + ua->error_msg(_("Error updating media record Enabled: ERR=%s"), + db_strerror(ua->db)); + } else { + ua->info_msg(_("New Enabled is: %d\n"), mr->Enabled); + } +} + +static void update_vol_actiononpurge(UAContext *ua, char *val, MEDIA_DBR *mr) +{ + POOL_MEM ret; + if (strcasecmp(val, "truncate") == 0) { + mr->ActionOnPurge = ON_PURGE_TRUNCATE; + } else { + mr->ActionOnPurge = 0; + } + + set_storageid_in_mr(NULL, mr); + if (!db_update_media_record(ua->jcr, ua->db, mr)) { + ua->error_msg(_("Error updating media record ActionOnPurge: ERR=%s"), + db_strerror(ua->db)); + } else { + ua->info_msg(_("New ActionOnPurge is: %s\n"), + action_on_purge_to_string(mr->ActionOnPurge, ret)); + } +} + +/* + * Update a media record -- allows you to change the + * Volume status. E.g. if you want Bacula to stop + * writing on the volume, set it to anything other + * than Append. + */ +static int update_volume(UAContext *ua) +{ + MEDIA_DBR mr; + POOL *pool; + POOL_DBR pr; + POOLMEM *query; + POOL_MEM ret; + char buf[1000]; + char ed1[130]; + bool done = false; + int i; + const char *kw[] = { + NT_("VolStatus"), /* 0 */ + NT_("VolRetention"), /* 1 */ + NT_("VolUse"), /* 2 */ + NT_("MaxVolJobs"), /* 3 */ + NT_("MaxVolFiles"), /* 4 */ + NT_("MaxVolBytes"), /* 5 */ + NT_("Recycle"), /* 6 */ + NT_("InChanger"), /* 7 */ + NT_("Slot"), /* 8 */ + NT_("Pool"), /* 9 */ + NT_("FromPool"), /* 10 !!! see below !!! */ + NT_("AllFromPool"), /* 11 !!! see below !!! */ + NT_("Enabled"), /* 12 */ + NT_("RecyclePool"), /* 13 */ + NT_("ActionOnPurge"), /* 14 */ + NT_("FromAllPools"), /* 15 !!! see bellow !!! */ + NT_("CacheRetention"), /* 16 */ + NULL }; + +#define FromPool 10 /* keep this updated */ +#define AllFromPool 11 /* keep this updated with above */ +#define FromAllPools 15 /* keep this updated */ + + for (i=0; kw[i]; i++) { + int j; + POOL_DBR pr; + /* No argv with these parameters */ + if (i == FromPool || i == FromAllPools) { + j = find_arg(ua, kw[i]); + + } else { + j = find_arg_with_value(ua, kw[i]); + } + if (j > 0) { + /* If all from pool/from all pools don't select a media record */ + if (i != AllFromPool && i != FromAllPools && !select_media_dbr(ua, &mr)) { + return 0; + } + switch (i) { + case 0: + update_volstatus(ua, ua->argv[j], &mr); + break; + case 1: + update_volretention(ua, ua->argv[j], &mr); + break; + case 2: + update_voluseduration(ua, ua->argv[j], &mr); + break; + case 3: + update_volmaxjobs(ua, ua->argv[j], &mr); + break; + case 4: + update_volmaxfiles(ua, ua->argv[j], &mr); + break; + case 5: + update_volmaxbytes(ua, ua->argv[j], &mr); + break; + case 6: + update_volrecycle(ua, ua->argv[j], &mr); + break; + case 7: + update_volinchanger(ua, ua->argv[j], &mr); + break; + case 8: + update_volslot(ua, ua->argv[j], &mr); + break; + case 9: + memset(&pr, 0, sizeof(POOL_DBR)); + pr.PoolId = mr.PoolId; + if (!db_get_pool_numvols(ua->jcr, ua->db, &pr)) { + ua->error_msg("%s", db_strerror(ua->db)); + break; + } + update_vol_pool(ua, ua->argv[j], &mr, &pr); + break; + case 10: + update_vol_from_pool(ua, &mr); + return 1; + case 11: + update_all_vols_from_pool(ua, ua->argv[j]); + return 1; + case 12: + update_volenabled(ua, ua->argv[j], &mr); + break; + case 13: + update_vol_recyclepool(ua, ua->argv[j], &mr); + break; + case 14: + update_vol_actiononpurge(ua, ua->argv[j], &mr); + break; + case 15: + update_all_vols(ua); + break; + case 16: + update_vol_cacheretention(ua, ua->argv[j], &mr); + break; + } + done = true; + } + } + + for ( ; !done; ) { + start_prompt(ua, _("Parameters to modify:\n")); + add_prompt(ua, _("Volume Status")); /* 0 */ + add_prompt(ua, _("Volume Retention Period")); /* 1 */ + add_prompt(ua, _("Volume Use Duration")); /* 2 */ + add_prompt(ua, _("Maximum Volume Jobs")); /* 3 */ + add_prompt(ua, _("Maximum Volume Files")); /* 4 */ + add_prompt(ua, _("Maximum Volume Bytes")); /* 5 */ + add_prompt(ua, _("Recycle Flag")); /* 6 */ + add_prompt(ua, _("Slot")); /* 7 */ + add_prompt(ua, _("InChanger Flag")); /* 8 */ + add_prompt(ua, _("Volume Files")); /* 9 */ + add_prompt(ua, _("Pool")); /* 10 */ + add_prompt(ua, _("Volume from Pool")); /* 11 */ + add_prompt(ua, _("All Volumes from Pool")); /* 12 */ + add_prompt(ua, _("All Volumes from all Pools")); /* 13 */ + add_prompt(ua, _("Enabled")), /* 14 */ + add_prompt(ua, _("RecyclePool")), /* 15 */ + add_prompt(ua, _("Action On Purge")), /* 16 */ + add_prompt(ua, _("Cache Retention")), /* 17 */ + add_prompt(ua, _("Done")); /* 18 */ + i = do_prompt(ua, "", _("Select parameter to modify"), NULL, 0); + + /* For All Volumes, All Volumes from Pool, and Done, we don't need + * a Volume record */ + if ( i != 12 && i != 13 && i != 18) { + if (!select_media_dbr(ua, &mr)) { /* Get Volume record */ + return 0; + } + ua->info_msg(_("Updating Volume \"%s\"\n"), mr.VolumeName); + } + switch (i) { + case 0: /* Volume Status */ + /* Modify Volume Status */ + ua->info_msg(_("Current Volume status is: %s\n"), mr.VolStatus); + start_prompt(ua, _("Possible Values are:\n")); + add_prompt(ua, NT_("Append")); + add_prompt(ua, NT_("Archive")); + add_prompt(ua, NT_("Disabled")); + add_prompt(ua, NT_("Full")); + add_prompt(ua, NT_("Used")); + add_prompt(ua, NT_("Cleaning")); + if (strcmp(mr.VolStatus, NT_("Purged")) == 0) { + add_prompt(ua, NT_("Recycle")); + } + add_prompt(ua, NT_("Read-Only")); + if (do_prompt(ua, "", _("Choose new Volume Status"), ua->cmd, sizeof(mr.VolStatus)) < 0) { + return 1; + } + update_volstatus(ua, ua->cmd, &mr); + break; + case 1: /* Retention */ + ua->info_msg(_("Current retention period is: %s\n"), + edit_utime(mr.VolRetention, ed1, sizeof(ed1))); + if (!get_cmd(ua, _("Enter Volume Retention period: "))) { + return 0; + } + update_volretention(ua, ua->cmd, &mr); + break; + + case 2: /* Use Duration */ + ua->info_msg(_("Current use duration is: %s\n"), + edit_utime(mr.VolUseDuration, ed1, sizeof(ed1))); + if (!get_cmd(ua, _("Enter Volume Use Duration: "))) { + return 0; + } + update_voluseduration(ua, ua->cmd, &mr); + break; + + case 3: /* Max Jobs */ + ua->info_msg(_("Current max jobs is: %u\n"), mr.MaxVolJobs); + if (!get_pint(ua, _("Enter new Maximum Jobs: "))) { + return 0; + } + update_volmaxjobs(ua, ua->cmd, &mr); + break; + + case 4: /* Max Files */ + ua->info_msg(_("Current max files is: %u\n"), mr.MaxVolFiles); + if (!get_pint(ua, _("Enter new Maximum Files: "))) { + return 0; + } + update_volmaxfiles(ua, ua->cmd, &mr); + break; + + case 5: /* Max Bytes */ + ua->info_msg(_("Current value is: %s\n"), edit_uint64(mr.MaxVolBytes, ed1)); + if (!get_cmd(ua, _("Enter new Maximum Bytes: "))) { + return 0; + } + update_volmaxbytes(ua, ua->cmd, &mr); + break; + + + case 6: /* Recycle */ + ua->info_msg(_("Current recycle flag is: %s\n"), + mr.Recycle==1?_("yes"):_("no")); + if (!get_yesno(ua, _("Enter new Recycle status: "))) { + return 0; + } + update_volrecycle(ua, ua->cmd, &mr); + break; + + case 7: /* Slot */ + ua->info_msg(_("Current Slot is: %d\n"), mr.Slot); + if (!get_pint(ua, _("Enter new Slot: "))) { + return 0; + } + update_volslot(ua, ua->cmd, &mr); + break; + + case 8: /* InChanger */ + ua->info_msg(_("Current InChanger flag is: %d\n"), mr.InChanger); + bsnprintf(buf, sizeof(buf), _("Set InChanger flag for Volume \"%s\": yes/no: "), + mr.VolumeName); + if (!get_yesno(ua, buf)) { + return 0; + } + mr.InChanger = ua->pint32_val; + /* + * Make sure to use db_update... rather than doing this directly, + * so that any Slot is handled correctly. + */ + set_storageid_in_mr(NULL, &mr); + if (!db_update_media_record(ua->jcr, ua->db, &mr)) { + ua->error_msg(_("Error updating media record Slot: ERR=%s"), db_strerror(ua->db)); + } else { + ua->info_msg(_("New InChanger flag is: %d\n"), mr.InChanger); + } + break; + + + case 9: /* Volume Files */ + int32_t VolFiles; + ua->warning_msg(_("Warning changing Volume Files can result\n" + "in loss of data on your Volume\n\n")); + ua->info_msg(_("Current Volume Files is: %u\n"), mr.VolFiles); + if (!get_pint(ua, _("Enter new number of Files for Volume: "))) { + return 0; + } + VolFiles = ua->pint32_val; + if (VolFiles != (int)(mr.VolFiles + 1)) { + ua->warning_msg(_("Normally, you should only increase Volume Files by one!\n")); + if (!get_yesno(ua, _("Increase Volume Files? (yes/no): ")) || ua->pint32_val == 0) { + break; + } + } + query = get_pool_memory(PM_MESSAGE); + Mmsg(query, "UPDATE Media SET VolFiles=%u WHERE MediaId=%s", + VolFiles, edit_int64(mr.MediaId, ed1)); + if (!db_sql_query(ua->db, query, NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + } else { + ua->info_msg(_("New Volume Files is: %u\n"), VolFiles); + } + free_pool_memory(query); + break; + + case 10: /* Volume's Pool */ + memset(&pr, 0, sizeof(POOL_DBR)); + pr.PoolId = mr.PoolId; + if (!db_get_pool_numvols(ua->jcr, ua->db, &pr)) { + ua->error_msg("%s", db_strerror(ua->db)); + return 0; + } + ua->info_msg(_("Current Pool is: %s\n"), pr.Name); + if (!get_cmd(ua, _("Enter new Pool name: "))) { + return 0; + } + update_vol_pool(ua, ua->cmd, &mr, &pr); + return 1; + + case 11: + update_vol_from_pool(ua, &mr); + return 1; + case 12: + pool = select_pool_resource(ua); + if (pool) { + update_all_vols_from_pool(ua, pool->name()); + } + return 1; + + case 13: + update_all_vols(ua); + return 1; + + case 14: + ua->info_msg(_("Current Enabled is: %d\n"), mr.Enabled); + if (!get_cmd(ua, _("Enter new Enabled: "))) { + return 0; + } + update_volenabled(ua, ua->cmd, &mr); + break; + + case 15: + memset(&pr, 0, sizeof(POOL_DBR)); + pr.PoolId = mr.RecyclePoolId; + if (db_get_pool_numvols(ua->jcr, ua->db, &pr)) { + ua->info_msg(_("Current RecyclePool is: %s\n"), pr.Name); + } else { + ua->info_msg(_("No current RecyclePool\n")); + } + if (!select_pool_dbr(ua, &pr, NT_("recyclepool"))) { + return 0; + } + update_vol_recyclepool(ua, pr.Name, &mr); + return 1; + + case 16: + pm_strcpy(ret, ""); + ua->info_msg(_("Current ActionOnPurge is: %s\n"), + action_on_purge_to_string(mr.ActionOnPurge, ret)); + if (!get_cmd(ua, _("Enter new ActionOnPurge (one of: Truncate, None): "))) { + return 0; + } + + update_vol_actiononpurge(ua, ua->cmd, &mr); + break; + + case 17: + pm_strcpy(ret, ""); + ua->info_msg(_("Current Cache Retention period is: %s\n"), + edit_utime(mr.CacheRetention, ed1, sizeof(ed1))); + if (!get_cmd(ua, _("Enter Cache Retention period: "))) { + return 0; + } + update_vol_cacheretention(ua, ua->cmd, &mr); + break; + + default: /* Done or error */ + ua->info_msg(_("Selection terminated.\n")); + return 1; + } + } + return 1; +} + +/* + * Update long term statistics + */ +static bool update_stats(UAContext *ua) +{ + int i = find_arg_with_value(ua, NT_("days")); + utime_t since=0; + + if (i >= 0) { + since = atoi(ua->argv[i]) * 24*60*60; + } + + int nb = db_update_stats(ua->jcr, ua->db, since); + ua->info_msg(_("Updating %i job(s).\n"), nb); + + return true; +} + +/* + * Update pool record -- pull info from current POOL resource + */ +static bool update_pool(UAContext *ua) +{ + POOL_DBR pr; + int id; + POOL *pool; + POOLMEM *query; + char ed1[50]; + + pool = get_pool_resource(ua); + if (!pool) { + return false; + } + + memset(&pr, 0, sizeof(pr)); + bstrncpy(pr.Name, pool->name(), sizeof(pr.Name)); + if (!get_pool_dbr(ua, &pr)) { + return false; + } + + set_pooldbr_from_poolres(&pr, pool, POOL_OP_UPDATE); /* update */ + set_pooldbr_references(ua->jcr, ua->db, &pr, pool); + + id = db_update_pool_record(ua->jcr, ua->db, &pr); + if (id <= 0) { + ua->error_msg(_("db_update_pool_record returned %d. ERR=%s\n"), + id, db_strerror(ua->db)); + } + query = get_pool_memory(PM_MESSAGE); + Mmsg(query, list_pool, edit_int64(pr.PoolId, ed1)); + db_list_sql_query(ua->jcr, ua->db, query, prtit, ua, 1, HORZ_LIST); + free_pool_memory(query); + ua->info_msg(_("Pool DB record updated from resource.\n")); + return true; +} + +/* + * Update a Job record -- allows you to change the + * date fields in a Job record. This helps when + * providing migration from other vendors. + */ +static bool update_job(UAContext *ua) +{ + int i, priority=0; + char ed1[50], ed2[50], ed3[50], ed4[50]; + POOL_MEM cmd(PM_MESSAGE); + JOB_DBR jr; + CLIENT_DBR cr; + POOL_DBR pr; + JCR *jcr; + utime_t StartTime; + char *client_name = NULL; + char *start_time = NULL; + char *pool_name = NULL; + const char *kw[] = { + NT_("starttime"), /* 0 */ + NT_("client"), /* 1 */ + NT_("priority"), /* 2 */ + NT_("pool"), /* 3 */ + NULL }; + + Dmsg1(200, "cmd=%s\n", ua->cmd); + i = find_arg_with_value(ua, NT_("jobid")); + if (i < 0) { + ua->error_msg(_("Expect JobId keyword, not found.\n")); + return false; + } + memset(&jr, 0, sizeof(jr)); + memset(&cr, 0, sizeof(cr)); + jr.JobId = str_to_int64(ua->argv[i]); + if (jr.JobId == 0) { + ua->error_msg("Bad jobid\n"); + return false; + } + if (!db_get_job_record(ua->jcr, ua->db, &jr)) { + ua->error_msg("%s", db_strerror(ua->db)); + return false; + } + if (!acl_access_ok(ua, Job_ACL, jr.Name)) { + ua->error_msg(_("Update failed. Job not authorized on this console\n")); + return false; + } + for (i=0; kw[i]; i++) { + int j; + if ((j=find_arg_with_value(ua, kw[i])) >= 0) { + switch (i) { + case 0: /* start time */ + start_time = ua->argv[j]; + break; + case 1: /* Client name */ + client_name = ua->argv[j]; + break; + case 2: /* Priority */ + priority = str_to_int64(ua->argv[j]); + break; + case 3: /* Change Pool */ + pool_name = ua->argv[j]; + break; + } + } + } + if (!client_name && !start_time && !priority && !pool_name) { + ua->error_msg(_("Neither Client, StartTime, Pool or Priority specified.\n")); + return 0; + } + if (priority > 0) { + foreach_jcr(jcr) { + if (jcr->JobId == jr.JobId) { + int old = jcr->JobPriority; + jcr->JobPriority = priority; + free_jcr(jcr); + ua->send_msg(_("Priority updated for running job \"%s\" from %d to %d\n"), jr.Job, old, priority); + return true; + } + } + endeach_jcr(jcr); + ua->error_msg(_("Job not found.\n")); + return true; + } + if (client_name) { + if (!get_client_dbr(ua, &cr, JT_BACKUP_RESTORE)) { + return false; + } + jr.ClientId = cr.ClientId; + } + if (start_time) { + utime_t delta_start; + + StartTime = str_to_utime(start_time); + if (StartTime == 0) { + ua->error_msg(_("Improper date format: %s\n"), ua->argv[i]); + return false; + } + delta_start = StartTime - jr.StartTime; + Dmsg3(200, "ST=%lld jr.ST=%lld delta=%lld\n", StartTime, + (utime_t)jr.StartTime, delta_start); + jr.StartTime = (time_t)StartTime; + jr.SchedTime += (time_t)delta_start; + jr.EndTime += (time_t)delta_start; + jr.JobTDate += delta_start; + /* Convert to DB times */ + bstrutime(jr.cStartTime, sizeof(jr.cStartTime), jr.StartTime); + bstrutime(jr.cSchedTime, sizeof(jr.cSchedTime), jr.SchedTime); + bstrutime(jr.cEndTime, sizeof(jr.cEndTime), jr.EndTime); + } + if (pool_name) { + MEDIA_DBR mr; + dbid_list lst; + + memset(&pr, 0, sizeof(POOL_DBR)); + bstrncpy(pr.Name, pool_name, sizeof(pr.Name)); + /* Get the list of all volumes and update the pool */ + if (!db_get_pool_record(ua->jcr, ua->db, &pr)) { + ua->error_msg("Unable to get pool record %s", db_strerror(ua->db)); + return false; + } + jr.PoolId = pr.PoolId; + Mmsg(cmd, "SELECT DISTINCT MediaId " + "FROM Media JOIN JobMedia USING (MediaId) " + "WHERE JobId = %s", edit_uint64(jr.JobId, ed1)); + if (!db_get_query_dbids(ua->jcr, ua->db, cmd, lst)) { + ua->error_msg("%s", db_strerror(ua->db)); + return false; + } + + for (int i=0; i < lst.num_ids; i++) { + mr.MediaId = lst.DBId[i]; + update_vol_pool(ua, pool_name, &mr, &pr); + } + } + Mmsg(cmd, "UPDATE Job SET ClientId=%s,StartTime='%s',SchedTime='%s'," + "EndTime='%s',JobTDate=%s, PoolId=%s WHERE JobId=%s", + edit_int64(jr.ClientId, ed1), + jr.cStartTime, + jr.cSchedTime, + jr.cEndTime, + edit_uint64(jr.JobTDate, ed2), + edit_uint64(jr.PoolId, ed3), + edit_uint64(jr.JobId, ed4)); + if (!db_sql_query(ua->db, cmd.c_str(), NULL, NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + return false; + } + return true; +} diff --git a/src/dird/vbackup.c b/src/dird/vbackup.c new file mode 100644 index 00000000..1b2a6431 --- /dev/null +++ b/src/dird/vbackup.c @@ -0,0 +1,600 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- vbackup.c -- responsible for doing virtual + * backup jobs or in other words, consolidation or synthetic + * backups. + * + * Kern Sibbald, July MMVIII + * + * Basic tasks done here: + * Open DB and create records for this job. + * Figure out what Jobs to copy. + * Open Message Channel with Storage daemon to tell him a job will be starting. + * Open connection with File daemon and pass him commands + * to do the backup. + * When the File daemon finishes the job, update the DB. + */ + +#include "bacula.h" +#include "dird.h" +#include "ua.h" + +static const int dbglevel = 10; + +static bool create_bootstrap_file(JCR *jcr, char *jobids); +void vbackup_cleanup(JCR *jcr, int TermCode); + +/* + * Called here before the job is run to do the job + * specific setup. + */ +bool do_vbackup_init(JCR *jcr) +{ + + if (!get_or_create_fileset_record(jcr)) { + Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId); + return false; + } + + apply_pool_overrides(jcr); + + if (!allow_duplicate_job(jcr)) { + return false; + } + + jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->name()); + if (jcr->jr.PoolId == 0) { + Dmsg1(dbglevel, "JobId=%d no PoolId\n", (int)jcr->JobId); + Jmsg(jcr, M_FATAL, 0, _("Could not get or create a Pool record.\n")); + return false; + } + /* + * Note, at this point, pool is the pool for this job. We + * transfer it to rpool (read pool), and a bit later, + * pool will be changed to point to the write pool, + * which comes from pool->NextPool. + */ + jcr->rpool = jcr->pool; /* save read pool */ + pm_strcpy(jcr->rpool_source, jcr->pool_source); + + /* If pool storage specified, use it for virtual full */ + copy_rstorage(jcr, jcr->pool->storage, _("Pool resource")); + + Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->rpool->name(), jcr->rpool_source); + + jcr->start_time = time(NULL); + jcr->jr.StartTime = jcr->start_time; + jcr->jr.JobLevel = L_FULL; /* we want this to appear as a Full backup */ + if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + } + + if (!apply_wstorage_overrides(jcr, jcr->pool)) { + return false; + } + + Dmsg2(dbglevel, "Write pool=%s read rpool=%s\n", jcr->pool->name(), jcr->rpool->name()); + + return true; +} + +/* + * Do a virtual backup, which consolidates all previous backups into + * a sort of synthetic Full. + * + * Returns: false on failure + * true on success + */ +bool do_vbackup(JCR *jcr) +{ + char level_computed = L_FULL; + char ed1[100]; + BSOCK *sd; + char *p; + sellist sel; + db_list_ctx jobids; + UAContext *ua; + + Dmsg2(100, "rstorage=%p wstorage=%p\n", jcr->rstorage, jcr->wstorage); + Dmsg2(100, "Read store=%s, write store=%s\n", + ((STORE *)jcr->rstorage->first())->name(), + ((STORE *)jcr->wstorage->first())->name()); + + jcr->wasVirtualFull = true; /* remember where we came from */ + + /* Print Job Start message */ + Jmsg(jcr, M_INFO, 0, _("Start Virtual Backup JobId %s, Job=%s\n"), + edit_uint64(jcr->JobId, ed1), jcr->Job); + if (!jcr->accurate) { + Jmsg(jcr, M_WARNING, 0, +_("This Job is not an Accurate backup so is not equivalent to a Full backup.\n")); + } + + if (jcr->JobIds && *jcr->JobIds) { + JOB_DBR jr; + db_list_ctx status; + POOL_MEM query(PM_MESSAGE); + + memset(&jr, 0, sizeof(jr)); + + if (is_an_integer(jcr->JobIds)) { + /* Single JobId, so start the accurate code based on this id */ + + jr.JobId = str_to_int64(jcr->JobIds); + if (!db_get_job_record(jcr, jcr->db, &jr)) { + Jmsg(jcr, M_ERROR, 0, + _("Unable to get Job record for JobId=%s: ERR=%s\n"), + jcr->JobIds, db_strerror(jcr->db)); + return false; + } + Jmsg(jcr, M_INFO,0,_("Selecting jobs to build the Full state at %s\n"), + jr.cStartTime); + + jr.JobLevel = L_INCREMENTAL; /* Take Full+Diff+Incr */ + db_get_accurate_jobids(jcr, jcr->db, &jr, &jobids); + + } else if (sel.set_string(jcr->JobIds, true)) { + /* Found alljobid keyword */ + if (jcr->use_all_JobIds) { + jobids.count = sel.size(); + pm_strcpy(jobids.list, sel.get_expanded_list()); + + /* Need to apply some filter on the job name */ + } else { + Mmsg(query, + "SELECT JobId FROM Job " + "WHERE Job.Name = '%s' " + "AND Job.JobId IN (%s) " + "ORDER BY JobTDate ASC", + jcr->job->name(), + sel.get_expanded_list()); + + db_sql_query(jcr->db, query.c_str(), db_list_handler, &jobids); + } + + if (jobids.count == 0) { + Jmsg(jcr, M_FATAL, 0, _("No valid Jobs found from user selection.\n")); + return false; + } + + Jmsg(jcr, M_INFO, 0, _("Using user supplied JobIds=%s\n"), + jobids.list); + + /* Check status */ + Mmsg(query, + "SELECT Level FROM Job " + "WHERE Job.JobId IN (%s) " + "GROUP BY Level", + jobids.list); + + /* Will produce something like F,D,I or F,I */ + db_sql_query(jcr->db, query.c_str(), db_list_handler, &status); + + /* If no full found in the list, we build a "virtualdiff" or + * a "virtualinc". + */ + if (strchr(status.list, L_FULL) == NULL) { + if (strchr(status.list, L_DIFFERENTIAL)) { + level_computed = L_DIFFERENTIAL; + Jmsg(jcr, M_INFO, 0, _("No previous Full found in list, " + "using Differential level\n")); + + } else { + level_computed = L_INCREMENTAL; + Jmsg(jcr, M_INFO, 0, _("No previous Full found in list, " + "using Incremental level\n")); + } + } + } + + } else { /* No argument provided */ + jcr->jr.JobLevel = L_VIRTUAL_FULL; + /* We restrict the search of the JobIds to the current job */ + bstrncpy(jcr->jr.Name, jcr->job->name(), sizeof(jcr->jr.Name)); + db_get_accurate_jobids(jcr, jcr->db, &jcr->jr, &jobids); + Dmsg1(10, "Accurate jobids=%s\n", jobids.list); + } + + if (jobids.count == 0) { + Jmsg(jcr, M_FATAL, 0, _("No previous Jobs found.\n")); + return false; + } + jobids.count -= jcr->job->BackupsToKeep; + if (jobids.count <= 0) { + Jmsg(jcr, M_WARNING, 0, _("Insufficient Backups to Keep.\n")); + return false; + } + if (jobids.count == 1) { + Jmsg(jcr, M_WARNING, 0, _("Only one Job found. Consolidation not needed.\n")); + return false; + } + + /* Remove number of JobIds we want to keep */ + for (int i=0; i < (int)jcr->job->BackupsToKeep; i++) { + p = strrchr(jobids.list, ','); /* find last jobid */ + if (p == NULL) { + break; + } else { + *p = 0; + } + } + + /* Full by default, or might be Incr/Diff when jobid= is used */ + jcr->jr.JobLevel = level_computed; + + Jmsg(jcr, M_INFO, 0, "Consolidating JobIds=%s\n", jobids.list); + + /* + * Now we find the last job that ran and store it's info in + * the previous_jr record. We will set our times to the + * values from that job so that anything changed after that + * time will be picked up on the next backup. + */ + p = strrchr(jobids.list, ','); /* find last jobid */ + if (p != NULL) { + p++; + } else { + p = jobids.list; + } + memset(&jcr->previous_jr, 0, sizeof(jcr->previous_jr)); + jcr->previous_jr.JobId = str_to_int64(p); + Dmsg1(10, "Previous JobId=%s\n", p); + if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) { + Jmsg(jcr, M_FATAL, 0, _("Error getting Job record for previous Job: ERR=%s"), + db_strerror(jcr->db)); + return false; + } + + if (!create_bootstrap_file(jcr, jobids.list)) { + Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n")); + return false; + } + + /* + * Open a message channel connection with the Storage + * daemon. This is to let him know that our client + * will be contacting him for a backup session. + * + */ + Dmsg0(110, "Open connection with storage daemon\n"); + jcr->setJobStatus(JS_WaitSD); + /* + * Start conversation with Storage daemon + */ + if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) { + return false; + } + sd = jcr->store_bsock; + + /* + * Now start a job with the Storage daemon + */ + if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage, /*send_bsr*/true)) { + return false; + } + Dmsg0(100, "Storage daemon connection OK\n"); + + /* + * We re-update the job start record so that the start + * time is set after the run before job. This avoids + * that any files created by the run before job will + * be saved twice. They will be backed up in the current + * job, but not in the next one unless they are changed. + * Without this, they will be backed up in this job and + * in the next job run because in that case, their date + * is after the start of this run. + */ + jcr->start_time = time(NULL); + jcr->jr.StartTime = jcr->start_time; + jcr->jr.JobTDate = jcr->start_time; + jcr->setJobStatus(JS_Running); + + /* Update job start record */ + if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + return false; + } + + /* Declare the job started to start the MaxRunTime check */ + jcr->setJobStarted(); + + /* + * Start the job prior to starting the message thread below + * to avoid two threads from using the BSOCK structure at + * the same time. + */ + if (!sd->fsend("run")) { + return false; + } + + /* + * Now start a Storage daemon message thread + */ + if (!start_storage_daemon_message_thread(jcr)) { + return false; + } + + jcr->setJobStatus(JS_Running); + + /* Pickup Job termination data */ + /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */ + wait_for_storage_daemon_termination(jcr); + jcr->setJobStatus(jcr->SDJobStatus); + db_write_batch_file_records(jcr); /* used by bulk batch file insert */ + if (jcr->JobStatus != JS_Terminated) { + return false; + } + if (jcr->job->DeleteConsolidatedJobs) { + ua = new_ua_context(jcr); + purge_jobs_from_catalog(ua, jobids.list); + free_ua_context(ua); + Jmsg(jcr, M_INFO, 0, _("Deleted consolidated JobIds=%s\n"), jobids.list); + } + + vbackup_cleanup(jcr, jcr->JobStatus); + return true; +} + + +/* + * Release resources allocated during backup. + */ +void vbackup_cleanup(JCR *jcr, int TermCode) +{ + char sdt[50], edt[50], schedt[50]; + char ec1[30], ec3[30], ec4[30], compress[50]; + char ec7[30], ec8[30], elapsed[50]; + char term_code[100], sd_term_msg[100]; + const char *term_msg; + int msg_type = M_INFO; + MEDIA_DBR mr; + CLIENT_DBR cr; + double kbps, compression; + utime_t RunTime; + POOL_MEM query(PM_MESSAGE); + + Dmsg2(100, "Enter vbackup_cleanup %d %c\n", TermCode, TermCode); + memset(&cr, 0, sizeof(cr)); + + jcr->jr.JobLevel = L_FULL; /* we want this to appear as a Full backup */ + jcr->JobFiles = jcr->SDJobFiles; + jcr->JobBytes = jcr->SDJobBytes; + update_job_end(jcr, TermCode); + + /* Update final items to set them to the previous job's values */ + Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s'," + "JobTDate=%s WHERE JobId=%s", + jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime, + edit_uint64(jcr->previous_jr.JobTDate, ec1), + edit_uint64(jcr->JobId, ec3)); + db_sql_query(jcr->db, query.c_str(), NULL, NULL); + + /* Get the fully updated job record */ + if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"), + db_strerror(jcr->db)); + jcr->setJobStatus(JS_ErrorTerminated); + } + + bstrncpy(cr.Name, jcr->client->name(), sizeof(cr.Name)); + if (!db_get_client_record(jcr, jcr->db, &cr)) { + Jmsg(jcr, M_WARNING, 0, _("Error getting Client record for Job report: ERR=%s"), + db_strerror(jcr->db)); + } + + bstrncpy(mr.VolumeName, jcr->VolumeName, sizeof(mr.VolumeName)); + if (!db_get_media_record(jcr, jcr->db, &mr)) { + Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"), + mr.VolumeName, db_strerror(jcr->db)); + jcr->setJobStatus(JS_ErrorTerminated); + } + + update_bootstrap_file(jcr); + + switch (jcr->JobStatus) { + case JS_Terminated: + if (jcr->JobErrors || jcr->SDErrors) { + term_msg = _("Backup OK -- with warnings"); + } else { + term_msg = _("Backup OK"); + } + break; + case JS_FatalError: + case JS_ErrorTerminated: + term_msg = _("*** Backup Error ***"); + msg_type = M_ERROR; /* Generate error message */ + terminate_sd_msg_chan_thread(jcr); + break; + case JS_Canceled: + term_msg = _("Backup Canceled"); + terminate_sd_msg_chan_thread(jcr); + break; + default: + term_msg = term_code; + sprintf(term_code, _("Inappropriate term code: %c\n"), jcr->JobStatus); + break; + } + bstrftimes(schedt, sizeof(schedt), jcr->jr.SchedTime); + bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime); + bstrftimes(edt, sizeof(edt), jcr->jr.EndTime); + RunTime = jcr->jr.EndTime - jcr->jr.StartTime; + if (RunTime <= 0) { + RunTime = 1; + } + kbps = ((double)jcr->jr.JobBytes) / (1000.0 * (double)RunTime); + if (!db_get_job_volume_names(jcr, jcr->db, jcr->jr.JobId, &jcr->VolumeName)) { + /* + * Note, if the job has erred, most likely it did not write any + * tape, so suppress this "error" message since in that case + * it is normal. Or look at it the other way, only for a + * normal exit should we complain about this error. + */ + if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) { + Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); + } + jcr->VolumeName[0] = 0; /* none */ + } + + if (jcr->ReadBytes == 0) { + bstrncpy(compress, "None", sizeof(compress)); + } else { + compression = (double)100 - 100.0 * ((double)jcr->JobBytes / (double)jcr->ReadBytes); + if (compression < 0.5) { + bstrncpy(compress, "None", sizeof(compress)); + } else { + bsnprintf(compress, sizeof(compress), "%.1f %%", compression); + } + } + jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg)); + + Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" Backup Level: Virtual Full\n" +" Client: \"%s\" %s\n" +" FileSet: \"%s\" %s\n" +" Pool: \"%s\" (From %s)\n" +" Catalog: \"%s\" (From %s)\n" +" Storage: \"%s\" (From %s)\n" +" Scheduled time: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Elapsed time: %s\n" +" Priority: %d\n" +" SD Files Written: %s\n" +" SD Bytes Written: %s (%sB)\n" +" Rate: %.1f KB/s\n" +" Volume name(s): %s\n" +" Volume Session Id: %d\n" +" Volume Session Time: %d\n" +" Last Volume Bytes: %s (%sB)\n" +" SD Errors: %d\n" +" SD termination status: %s\n" +" Termination: %s\n\n"), + BACULA, my_name, VERSION, LSMDATE, + HOST_OS, DISTNAME, DISTVER, + jcr->jr.JobId, + jcr->jr.Job, + jcr->client->name(), cr.Uname, + jcr->fileset->name(), jcr->FSCreateTime, + jcr->pool->name(), jcr->pool_source, + jcr->catalog->name(), jcr->catalog_source, + jcr->wstore->name(), jcr->wstore_source, + schedt, + sdt, + edt, + edit_utime(RunTime, elapsed, sizeof(elapsed)), + jcr->JobPriority, + edit_uint64_with_commas(jcr->jr.JobFiles, ec1), + edit_uint64_with_commas(jcr->jr.JobBytes, ec3), + edit_uint64_with_suffix(jcr->jr.JobBytes, ec4), + kbps, + jcr->VolumeName, + jcr->VolSessionId, + jcr->VolSessionTime, + edit_uint64_with_commas(mr.VolBytes, ec7), + edit_uint64_with_suffix(mr.VolBytes, ec8), + jcr->SDErrors, + sd_term_msg, + term_msg); + + Dmsg0(100, "Leave vbackup_cleanup()\n"); +} + +/* + * This callback routine is responsible for inserting the + * items it gets into the bootstrap structure. For each JobId selected + * this routine is called once for each file. We do not allow + * duplicate filenames, but instead keep the info from the most + * recent file entered (i.e. the JobIds are assumed to be sorted) + * + * See uar_sel_files in sql_cmds.c for query that calls us. + * row[0]=Path, row[1]=Filename, row[2]=FileIndex + * row[3]=JobId row[4]=LStat + */ +int insert_bootstrap_handler(void *ctx, int num_fields, char **row) +{ + JobId_t JobId; + int FileIndex; + rblist *bsr_list = (rblist *)ctx; + + JobId = str_to_int64(row[3]); + FileIndex = str_to_int64(row[2]); + add_findex(bsr_list, JobId, FileIndex); + return 0; +} + + +static bool create_bootstrap_file(JCR *jcr, char *jobids) +{ + RESTORE_CTX rx; + UAContext *ua; + RBSR *bsr = NULL; + + memset(&rx, 0, sizeof(rx)); + rx.bsr_list = New(rblist(bsr, &bsr->link)); + ua = new_ua_context(jcr); + rx.JobIds = jobids; + +#define new_get_file_list +#ifdef new_get_file_list + if (!db_open_batch_connexion(jcr, jcr->db)) { + Jmsg0(jcr, M_FATAL, 0, "Can't get batch sql connexion"); + return false; + } + + if (!db_get_file_list(jcr, jcr->db_batch, jobids, DBL_USE_DELTA | DBL_ALL_FILES, + insert_bootstrap_handler, (void *)rx.bsr_list)) + { + Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db_batch)); + } +#else + char *p; + JobId_t JobId, last_JobId = 0; + rx.query = get_pool_memory(PM_MESSAGE); + for (p=rx.JobIds; get_next_jobid_from_list(&p, &JobId) > 0; ) { + char ed1[50]; + + if (JobId == last_JobId) { + continue; /* eliminate duplicate JobIds */ + } + last_JobId = JobId; + /* + * Find files for this JobId and insert them in the tree + */ + Mmsg(rx.query, uar_sel_files, edit_int64(JobId, ed1)); + Dmsg1(100, "uar_sel_files=%s\n", rx.query); + if (!db_sql_query(ua->db, rx.query, insert_bootstrap_handler, (void *)rx.bsr_list)) { + Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(ua->db)); + } + free_pool_memory(rx.query); + rx.query = NULL; + } +#endif + + complete_bsr(ua, rx.bsr_list); + jcr->ExpectedFiles = write_bsr_file(ua, rx); + Jmsg(jcr, M_INFO, 0, _("Found %d files to consolidate into Virtual Full.\n"), + jcr->ExpectedFiles); + free_ua_context(ua); + free_bsr(rx.bsr_list); + return jcr->ExpectedFiles==0?false:true; +} diff --git a/src/dird/verify.c b/src/dird/verify.c new file mode 100644 index 00000000..c0486ee9 --- /dev/null +++ b/src/dird/verify.c @@ -0,0 +1,887 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Director -- verify.c -- responsible for running file verification + * + * Kern Sibbald, October MM + * + * Basic tasks done here: + * Open DB + * Open connection with File daemon and pass him commands + * to do the verify. + * When the File daemon sends the attributes, compare them to + * what is in the DB. + */ + + +#include "bacula.h" +#include "dird.h" +#include "findlib/find.h" + +/* Commands sent to File daemon */ +static char verifycmd[] = "verify level=%s\n"; + +/* Responses received from File daemon */ +static char OKverify[] = "2000 OK verify\n"; + +/* Commands received from Storage daemon */ +static char OKbootstrap[] = "3000 OK bootstrap\n"; + +/* Forward referenced functions */ +static void prt_fname(JCR *jcr); +static int missing_handler(void *ctx, int num_fields, char **row); + +/* + * Called here before the job is run to do the job + * specific setup. + */ +bool do_verify_init(JCR *jcr) +{ + if (!allow_duplicate_job(jcr)) { + return false; + } + switch (jcr->getJobLevel()) { + case L_VERIFY_INIT: + case L_VERIFY_CATALOG: + case L_VERIFY_DISK_TO_CATALOG: + free_rstorage(jcr); + free_wstorage(jcr); + break; + case L_VERIFY_DATA: + case L_VERIFY_VOLUME_TO_CATALOG: + free_wstorage(jcr); + break; + default: + Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->getJobLevel(), + jcr->getJobLevel()); + return false; + } + return true; +} + + +/* + * Do a verification of the specified files against the Catlaog + * + * Returns: false on failure + * true on success + */ +bool do_verify(JCR *jcr) +{ + const char *level; + BSOCK *fd, *sd; + int stat; + char ed1[100], edl[50]; + JOB_DBR jr; + JobId_t verify_jobid = 0; + char *store_address; + uint32_t store_port; + const char *Name; + + free_wstorage(jcr); /* we don't write */ + + memset(&jcr->previous_jr, 0, sizeof(jcr->previous_jr)); + + /* + * Find JobId of last job that ran. Note, we do this when + * the job actually starts running, not at schedule time, + * so that we find the last job that terminated before + * this job runs rather than before it is scheduled. This + * permits scheduling a Backup and Verify at the same time, + * but with the Verify at a lower priority. + * + * For VERIFY_CATALOG we want the JobId of the last INIT. + * For VERIFY_VOLUME_TO_CATALOG, we want the JobId of the + * last backup Job. + */ + if (jcr->getJobLevel() == L_VERIFY_CATALOG || + jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || + jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG || + jcr->getJobLevel() == L_VERIFY_DATA) { + memcpy(&jr, &jcr->jr, sizeof(jr)); + if (jcr->verify_job && + (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || + jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG || + jcr->getJobLevel() == L_VERIFY_DATA)) { + Name = jcr->verify_job->name(); + } else { + Name = NULL; + } + Dmsg1(100, "find last jobid for: %s\n", NPRT(Name)); + + /* see if user supplied a jobid= as run argument or from menu */ + if (jcr->RestoreJobId) { + verify_jobid = jcr->RestoreJobId; + Dmsg1(100, "Supplied jobid=%d\n", verify_jobid); + + } else { + if (!db_find_last_jobid(jcr, jcr->db, Name, &jr)) { + if (jcr->getJobLevel() == L_VERIFY_CATALOG) { + Jmsg(jcr, M_FATAL, 0, _( + "Unable to find JobId of previous InitCatalog Job.\n" + "Please run a Verify with Level=InitCatalog before\n" + "running the current Job.\n")); + } else { + Jmsg(jcr, M_FATAL, 0, _( + "Unable to find JobId of previous Job for this client.\n")); + } + return false; + } + verify_jobid = jr.JobId; + } + Dmsg1(100, "Last full jobid=%d\n", verify_jobid); + } + /* + * Now get the job record for the previous backup that interests + * us. We use the verify_jobid that we found above. + */ + if (jcr->getJobLevel() == L_VERIFY_CATALOG || + jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || + jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG || + jcr->getJobLevel() == L_VERIFY_DATA) { + jcr->previous_jr.JobId = verify_jobid; + if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) { + Jmsg(jcr, M_FATAL, 0, _("Could not get job record for previous Job. ERR=%s"), + db_strerror(jcr->db)); + return false; + } + if (!(jcr->previous_jr.JobStatus == JS_Terminated || + jcr->previous_jr.JobStatus == JS_Warnings)) { + Jmsg(jcr, M_FATAL, 0, _("Last Job %d did not terminate normally. JobStatus=%c\n"), + verify_jobid, jcr->previous_jr.JobStatus); + return false; + } + Jmsg(jcr, M_INFO, 0, _("Verifying against JobId=%d Job=%s\n"), + jcr->previous_jr.JobId, jcr->previous_jr.Job); + } + + /* + * If we are verifying a Volume, we need the Storage + * daemon, so open a connection, otherwise, just + * create a dummy authorization key (passed to + * File daemon but not used). + */ + if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) { + int stat; + /* + * Note: negative status is an error, zero status, means + * no files were backed up, so skip calling SD and + * client. + */ + stat = create_restore_bootstrap_file(jcr); + if (stat < 0) { /* error */ + return false; + } else if (stat == 0) { /* No files, nothing to do */ + verify_cleanup(jcr, JS_Terminated); /* clean up */ + return true; /* get out */ + } + } else { + jcr->sd_auth_key = bstrdup("dummy"); /* dummy Storage daemon key */ + } + + /* Pass the original fileset to the client */ + if (jcr->getJobLevel() == L_VERIFY_DATA) { + FILESET_DBR fdbr; + memset(&fdbr, 0, sizeof(fdbr)); + fdbr.FileSetId = jcr->previous_jr.FileSetId; + if (!db_get_fileset_record(jcr, jcr->db, &fdbr)) { + Jmsg(jcr, M_FATAL, 0, + _("Could not get fileset record from previous Job. ERR=%s"), + db_strerror(jcr->db)); + return false; + } + + jcr->fileset = (FILESET *)GetResWithName(R_FILESET, fdbr.FileSet); + if (!jcr->fileset) { + if (jcr->verify_job) { + jcr->fileset = jcr->verify_job->fileset; + Jmsg(jcr, M_WARNING, 0, + _("Could not find FileSet resource \"%s\" from previous Job\n"), + fdbr.FileSet); + Jmsg(jcr, M_INFO, 0, + _("Using FileSet \"%\"\n"), jcr->fileset->name()); + + } else { + Jmsg(jcr, M_FATAL, 0, + _("Could not get FileSet resource for verify Job.")); + return false; + } + } + Dmsg1(50, "FileSet = %s\n", jcr->fileset->name()); + } + + /* Pass the current fileset to the client */ + if (jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG && jcr->verify_job) { + jcr->fileset = jcr->verify_job->fileset; + } + Dmsg2(100, "ClientId=%u JobLevel=%c\n", + jcr->previous_jr.ClientId, jcr->getJobLevel()); + + if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); + return false; + } + + /* Print Job Start message */ + Jmsg(jcr, M_INFO, 0, _("Start Verify JobId=%s Level=%s Job=%s\n"), + edit_uint64(jcr->JobId, ed1), level_to_str(edl, sizeof(edl), jcr->getJobLevel()), jcr->Job); + + if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || + jcr->getJobLevel() == L_VERIFY_DATA) + { + /* + * Start conversation with Storage daemon + */ + jcr->setJobStatus(JS_Blocked); + if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) { + return false; + } + /* + * Now start a job with the Storage daemon + */ + if (!start_storage_daemon_job(jcr, jcr->rstorage, NULL)) { + return false; + } + sd = jcr->store_bsock; + jcr->sd_calls_client = jcr->client->sd_calls_client; + /* + * Send the bootstrap file -- what Volumes/files to restore + */ + if (!send_bootstrap_file(jcr, sd) || + !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) { + goto bail_out; + } + if (!jcr->sd_calls_client) { + if (!run_storage_and_start_message_thread(jcr, sd)) { + return false; + } + } + } + /* + * OK, now connect to the File daemon + * and ask him for the files. + */ + jcr->setJobStatus(JS_Blocked); + if (!connect_to_file_daemon(jcr, 10, FDConnectTimeout, 1)) { + goto bail_out; + } + + jcr->setJobStatus(JS_Running); + fd = jcr->file_bsock; + + + Dmsg0(30, ">filed: Send include list\n"); + if (!send_include_list(jcr)) { + goto bail_out; + } + + Dmsg0(30, ">filed: Send exclude list\n"); + if (!send_exclude_list(jcr)) { + goto bail_out; + } + + /* + * Send Level command to File daemon, as well + * as the Storage address if appropriate. + */ + switch (jcr->getJobLevel()) { + case L_VERIFY_INIT: + level = "init"; + break; + case L_VERIFY_CATALOG: + level = "catalog"; + break; + case L_VERIFY_DATA: + send_accurate_current_files(jcr); + /* Fall-through wanted */ + case L_VERIFY_VOLUME_TO_CATALOG: + if (jcr->sd_calls_client) { + if (jcr->FDVersion < 10) { + Jmsg(jcr, M_FATAL, 0, _("The File daemon does not support SDCallsClient.\n")); + goto bail_out; + } + + if (!send_client_addr_to_sd(jcr)) { + goto bail_out; + } + + if (!run_storage_and_start_message_thread(jcr, jcr->store_bsock)) { + return false; + } + store_address = jcr->rstore->address; /* dummy */ + store_port = 0; /* flag that SD calls FD */ + } else { + /* + * send Storage daemon address to the File daemon + */ + if (jcr->rstore->SDDport == 0) { + jcr->rstore->SDDport = jcr->rstore->SDport; + } + + store_address = get_storage_address(jcr->client, jcr->rstore); + store_port = jcr->rstore->SDDport; + } + + if (!send_store_addr_to_fd(jcr, jcr->rstore, store_address, store_port)) { + goto bail_out; + } + + if (!jcr->RestoreBootstrap) { + Jmsg0(jcr, M_FATAL, 0, _("Deprecated feature ... use bootstrap.\n")); + goto bail_out; + } + if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG) { + level = "volume"; + } else { + level = "data"; + } + break; + case L_VERIFY_DISK_TO_CATALOG: + level="disk_to_catalog"; + break; + default: + Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), + jcr->getJobLevel(), + jcr->getJobLevel()); + goto bail_out; + } + + if (!send_runscripts_commands(jcr)) { + goto bail_out; + } + + /* + * Send verify command/level to File daemon + */ + fd->fsend(verifycmd, level); + if (!response(jcr, fd, OKverify, "Verify", DISPLAY_ERROR)) { + goto bail_out; + } + + /* + * Now get data back from File daemon and + * compare it to the catalog or store it in the + * catalog depending on the run type. + */ + /* Compare to catalog */ + switch (jcr->getJobLevel()) { + case L_VERIFY_CATALOG: + Dmsg0(10, "Verify level=catalog\n"); + jcr->sd_msg_thread_done = true; /* no SD msg thread, so it is done */ + jcr->SDJobStatus = JS_Terminated; + get_attributes_and_compare_to_catalog(jcr, jcr->previous_jr.JobId); + break; + + case L_VERIFY_VOLUME_TO_CATALOG: + Dmsg0(10, "Verify level=volume\n"); + get_attributes_and_compare_to_catalog(jcr, jcr->previous_jr.JobId); + break; + + case L_VERIFY_DISK_TO_CATALOG: + Dmsg0(10, "Verify level=disk_to_catalog\n"); + jcr->sd_msg_thread_done = true; /* no SD msg thread, so it is done */ + jcr->SDJobStatus = JS_Terminated; + get_attributes_and_compare_to_catalog(jcr, jcr->previous_jr.JobId); + break; + + case L_VERIFY_INIT: + /* Build catalog */ + Dmsg0(10, "Verify level=init\n"); + jcr->sd_msg_thread_done = true; /* no SD msg thread, so it is done */ + jcr->SDJobStatus = JS_Terminated; + get_attributes_and_put_in_catalog(jcr); + db_end_transaction(jcr, jcr->db); /* terminate any open transaction */ + db_write_batch_file_records(jcr); + break; + + case L_VERIFY_DATA: + /* Nothing special to do */ + bget_dirmsg(fd); /* eat EOD */ + break; + default: + Jmsg1(jcr, M_FATAL, 0, _("Unimplemented verify level %d\n"), jcr->getJobLevel()); + goto bail_out; + } + + stat = wait_for_job_termination(jcr); + verify_cleanup(jcr, stat); + return true; + +bail_out: + return false; +} + + +/* + * Release resources allocated during backup. + * + */ +void verify_cleanup(JCR *jcr, int TermCode) +{ + char sdt[50], edt[50], edl[50]; + char ec1[30], ec2[30], elapsed[50]; + char term_code[100], fd_term_msg[100], sd_term_msg[100]; + const char *term_msg; + int msg_type; + utime_t RunTime; + const char *Name; + +// Dmsg1(100, "Enter verify_cleanup() TermCod=%d\n", TermCode); + + Dmsg3(900, "JobLevel=%c Expected=%u JobFiles=%u\n", jcr->getJobLevel(), + jcr->ExpectedFiles, jcr->JobFiles); + if ((jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) && + jcr->ExpectedFiles != jcr->JobFiles) { + TermCode = JS_ErrorTerminated; + } + + update_job_end(jcr, TermCode); + + if (job_canceled(jcr)) { + cancel_storage_daemon_job(jcr); + } + + if (jcr->unlink_bsr && jcr->RestoreBootstrap) { + unlink(jcr->RestoreBootstrap); + jcr->unlink_bsr = false; + } + + msg_type = M_INFO; /* by default INFO message */ + switch (TermCode) { + case JS_Terminated: + if (jcr->JobErrors || jcr->SDErrors) { + term_msg = _("Verify OK -- with warnings"); + } else { + term_msg = _("Verify OK"); + } + break; + case JS_FatalError: + case JS_ErrorTerminated: + term_msg = _("*** Verify Error ***"); + msg_type = M_ERROR; /* Generate error message */ + break; + case JS_Error: + term_msg = _("Verify warnings"); + break; + case JS_Canceled: + term_msg = _("Verify Canceled"); + break; + case JS_Differences: + term_msg = _("Verify Differences"); + break; + default: + term_msg = term_code; + bsnprintf(term_code, sizeof(term_code), + _("Inappropriate term code: %d %c\n"), TermCode, TermCode); + break; + } + bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime); + bstrftimes(edt, sizeof(edt), jcr->jr.EndTime); + RunTime = jcr->jr.EndTime - jcr->jr.StartTime; + if (jcr->verify_job) { + Name = jcr->verify_job->hdr.name; + } else { + Name = ""; + } + + jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg)); + if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) { + const char *accurate = "yes"; + if (jcr->is_JobLevel(L_VERIFY_DATA)) { + accurate = jcr->accurate ? "yes": "no"; + } + jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg)); + Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Elapsed time: %s\n" +" Accurate: %s\n" +" Files Expected: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" SD Errors: %d\n" +" FD termination status: %s\n" +" SD termination status: %s\n" +" Termination: %s\n\n"), + BACULA, my_name, VERSION, LSMDATE, + HOST_OS, DISTNAME, DISTVER, + jcr->jr.JobId, + jcr->jr.Job, + jcr->fileset->hdr.name, + level_to_str(edl, sizeof(edl), jcr->getJobLevel()), + jcr->client->hdr.name, + jcr->previous_jr.JobId, + Name, + sdt, + edt, + edit_utime(RunTime, elapsed, sizeof(elapsed)), + accurate, + edit_uint64_with_commas(jcr->ExpectedFiles, ec1), + edit_uint64_with_commas(jcr->JobFiles, ec2), + jcr->JobErrors, + jcr->SDErrors, + fd_term_msg, + sd_term_msg, + term_msg); + } else { + Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" +" Build OS: %s %s %s\n" +" JobId: %d\n" +" Job: %s\n" +" FileSet: %s\n" +" Verify Level: %s\n" +" Client: %s\n" +" Verify JobId: %d\n" +" Verify Job: %s\n" +" Start time: %s\n" +" End time: %s\n" +" Elapsed time: %s\n" +" Files Examined: %s\n" +" Non-fatal FD errors: %d\n" +" FD termination status: %s\n" +" Termination: %s\n\n"), + BACULA, my_name, VERSION, LSMDATE, + HOST_OS, DISTNAME, DISTVER, + jcr->jr.JobId, + jcr->jr.Job, + jcr->fileset->hdr.name, + level_to_str(edl, sizeof(edl), jcr->getJobLevel()), + jcr->client->name(), + jcr->previous_jr.JobId, + Name, + sdt, + edt, + edit_utime(RunTime, elapsed, sizeof(elapsed)), + edit_uint64_with_commas(jcr->JobFiles, ec1), + jcr->JobErrors, + fd_term_msg, + term_msg); + } + Dmsg0(100, "Leave verify_cleanup()\n"); +} + +/* + * This routine is called only during a Verify + */ +void get_attributes_and_compare_to_catalog(JCR *jcr, JobId_t JobId) +{ + BSOCK *fd; + int n, len; + FILE_DBR fdbr; + struct stat statf; /* file stat */ + struct stat statc; /* catalog stat */ + char buf[MAXSTRING]; + POOLMEM *fname = get_pool_memory(PM_MESSAGE); + int do_Digest = CRYPTO_DIGEST_NONE; + int32_t file_index = 0; + + memset(&fdbr, 0, sizeof(FILE_DBR)); + fd = jcr->file_bsock; + fdbr.JobId = JobId; + jcr->FileIndex = 0; + + Dmsg0(20, "bdird: waiting to receive file attributes\n"); + /* + * Get Attributes and Signature from File daemon + * We expect: + * FileIndex + * Stream + * Options or Digest (MD5/SHA1) + * Filename + * Attributes + * Link name ??? + */ + while ((n=bget_dirmsg(fd)) >= 0 && !job_canceled(jcr)) { + int32_t stream, full_stream; + char *attr, *p, *fn; + char Opts_Digest[MAXSTRING]; /* Verify Opts or MD5/SHA1 digest */ + + if (job_canceled(jcr)) { + free_pool_memory(fname); + return; + } + fname = check_pool_memory_size(fname, fd->msglen); + jcr->fname = check_pool_memory_size(jcr->fname, fd->msglen); + Dmsg1(200, "Atts+Digest=%s\n", fd->msg); + if ((len = sscanf(fd->msg, "%ld %d %100s", &file_index, &full_stream, + fname)) != 3) { + Jmsg3(jcr, M_FATAL, 0, _("birdmsglen, fd->msg); + free_pool_memory(fname); + return; + } + stream = full_stream & STREAMMASK_TYPE; + Dmsg4(30, "Got hdr: FilInx=%d FullStream=%d Stream=%d fname=%s.\n", file_index, full_stream, stream, fname); + + /* + * We read the Options or Signature into fname + * to prevent overrun, now copy it to proper location. + */ + bstrncpy(Opts_Digest, fname, sizeof(Opts_Digest)); + p = fd->msg; + skip_nonspaces(&p); /* skip FileIndex */ + skip_spaces(&p); + skip_nonspaces(&p); /* skip Stream */ + skip_spaces(&p); + skip_nonspaces(&p); /* skip Opts_Digest */ + p++; /* skip space */ + fn = fname; + while (*p != 0) { + *fn++ = *p++; /* copy filename */ + } + *fn = *p++; /* term filename and point to attribs */ + attr = p; + /* + * Got attributes stream, decode it + */ + if (stream == STREAM_UNIX_ATTRIBUTES || stream == STREAM_UNIX_ATTRIBUTES_EX) { + int32_t LinkFIf, LinkFIc; + Dmsg2(400, "file_index=%d attr=%s\n", file_index, attr); + jcr->JobFiles++; + jcr->FileIndex = file_index; /* remember attribute file_index */ + jcr->previous_jr.FileIndex = file_index; + decode_stat(attr, &statf, sizeof(statf), &LinkFIf); /* decode file stat packet */ + do_Digest = CRYPTO_DIGEST_NONE; + jcr->fn_printed = false; + pm_strcpy(jcr->fname, fname); /* move filename into JCR */ + + Dmsg2(040, "dirdfname); + Dmsg1(020, "dirdFileIndex <= 0) { + continue; + } + if (!db_get_file_attributes_record(jcr, jcr->db, jcr->fname, + &jcr->previous_jr, &fdbr)) { + Jmsg(jcr, M_INFO, 0, _("New file: %s\n"), jcr->fname); + Dmsg1(020, _("File not in catalog: %s\n"), jcr->fname); + jcr->setJobStatus(JS_Differences); + continue; + } else { + /* + * mark file record as visited by stuffing the + * current JobId, which is unique, into the MarkId field. + */ + db_mark_file_record(jcr, jcr->db, fdbr.FileId, jcr->JobId); + } + + Dmsg3(400, "Found %s in catalog. inx=%d Opts=%s\n", jcr->fname, + file_index, Opts_Digest); + decode_stat(fdbr.LStat, &statc, sizeof(statc), &LinkFIc); /* decode catalog stat */ + /* + * Loop over options supplied by user and verify the + * fields he requests. + */ + for (p=Opts_Digest; *p; p++) { + char ed1[30], ed2[30]; + switch (*p) { + case 'i': /* compare INODEs */ + if (statc.st_ino != statf.st_ino) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_ino differ. Cat: %s File: %s\n"), + edit_uint64((uint64_t)statc.st_ino, ed1), + edit_uint64((uint64_t)statf.st_ino, ed2)); + jcr->setJobStatus(JS_Differences); + } + break; + case 'p': /* permissions bits */ + if (statc.st_mode != statf.st_mode) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_mode differ. Cat: %x File: %x\n"), + (uint32_t)statc.st_mode, (uint32_t)statf.st_mode); + jcr->setJobStatus(JS_Differences); + } + break; + case 'n': /* number of links */ + if (statc.st_nlink != statf.st_nlink) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_nlink differ. Cat: %d File: %d\n"), + (uint32_t)statc.st_nlink, (uint32_t)statf.st_nlink); + jcr->setJobStatus(JS_Differences); + } + break; + case 'u': /* user id */ + if (statc.st_uid != statf.st_uid) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_uid differ. Cat: %u File: %u\n"), + (uint32_t)statc.st_uid, (uint32_t)statf.st_uid); + jcr->setJobStatus(JS_Differences); + } + break; + case 'g': /* group id */ + if (statc.st_gid != statf.st_gid) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_gid differ. Cat: %u File: %u\n"), + (uint32_t)statc.st_gid, (uint32_t)statf.st_gid); + jcr->setJobStatus(JS_Differences); + } + break; + case 's': /* size */ + if (statc.st_size != statf.st_size) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_size differ. Cat: %s File: %s\n"), + edit_uint64((uint64_t)statc.st_size, ed1), + edit_uint64((uint64_t)statf.st_size, ed2)); + jcr->setJobStatus(JS_Differences); + } + break; + case 'a': /* access time */ + if (statc.st_atime != statf.st_atime) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_atime differs\n")); + jcr->setJobStatus(JS_Differences); + } + break; + case 'm': + if (statc.st_mtime != statf.st_mtime) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_mtime differs\n")); + jcr->setJobStatus(JS_Differences); + } + break; + case 'c': /* ctime */ + if (statc.st_ctime != statf.st_ctime) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_ctime differs\n")); + jcr->setJobStatus(JS_Differences); + } + break; + case 'd': /* file size decrease */ + if (statc.st_size > statf.st_size) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" st_size decrease. Cat: %s File: %s\n"), + edit_uint64((uint64_t)statc.st_size, ed1), + edit_uint64((uint64_t)statf.st_size, ed2)); + jcr->setJobStatus(JS_Differences); + } + break; + case '5': /* compare MD5 */ + Dmsg1(500, "set Do_MD5 for %s\n", jcr->fname); + do_Digest = CRYPTO_DIGEST_MD5; + break; + case '1': /* compare SHA1 */ + do_Digest = CRYPTO_DIGEST_SHA1; + break; + case ':': + case 'V': + default: + break; + } + } + /* + * Got Digest Signature from Storage daemon + * It came across in the Opts_Digest field. + */ + } else if (crypto_digest_stream_type(stream) != CRYPTO_DIGEST_NONE) { + Dmsg2(400, "stream=Digest inx=%d Digest=%s\n", file_index, Opts_Digest); + /* + * When ever we get a digest it MUST have been + * preceded by an attributes record, which sets attr_file_index + */ + if (jcr->FileIndex != file_index) { + Jmsg2(jcr, M_FATAL, 0, _("MD5/SHA1 index %d not same as attributes %d\n"), + file_index, jcr->FileIndex); + free_pool_memory(fname); + return; + } + if (do_Digest != CRYPTO_DIGEST_NONE) { + db_escape_string(jcr, jcr->db, buf, Opts_Digest, strlen(Opts_Digest)); + if (strcmp(buf, fdbr.Digest) != 0) { + prt_fname(jcr); + Jmsg(jcr, M_INFO, 0, _(" %s differs. File=%s Cat=%s\n"), + stream_to_ascii(stream), buf, fdbr.Digest); + jcr->setJobStatus(JS_Differences); + } + do_Digest = CRYPTO_DIGEST_NONE; + } + } + jcr->JobFiles = file_index; + } + if (fd->is_error()) { + berrno be; + Jmsg2(jcr, M_FATAL, 0, _("bdirdfn_printed = false; + bsnprintf(buf, sizeof(buf), + "SELECT Path.Path,Filename.Name FROM File,Path,Filename " + "WHERE File.JobId=%d AND File.FileIndex > 0 " + "AND File.MarkId!=%d AND File.PathId=Path.PathId " + "AND File.FilenameId=Filename.FilenameId", + JobId, jcr->JobId); + /* missing_handler is called for each file found */ + db_sql_query(jcr->db, buf, missing_handler, (void *)jcr); + if (jcr->fn_printed) { + jcr->setJobStatus(JS_Differences); + } + free_pool_memory(fname); +} + +/* + * We are called here for each record that matches the above + * SQL query -- that is for each file contained in the Catalog + * that was not marked earlier. This means that the file in + * question is a missing file (in the Catalog but not on Disk). + */ +static int missing_handler(void *ctx, int num_fields, char **row) +{ + JCR *jcr = (JCR *)ctx; + + if (job_canceled(jcr)) { + return 1; + } + if (!jcr->fn_printed) { + Qmsg(jcr, M_WARNING, 0, _("The following files are in the Catalog but not on %s:\n"), + jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG ? "the Volume(s)" : "disk"); + jcr->fn_printed = true; + } + Qmsg(jcr, M_INFO, 0, " %s%s\n", row[0]?row[0]:"", row[1]?row[1]:""); + return 0; +} + + +/* + * Print filename for verify + */ +static void prt_fname(JCR *jcr) +{ + if (!jcr->fn_printed) { + Jmsg(jcr, M_INFO, 0, _("File: %s\n"), jcr->fname); + jcr->fn_printed = true; + } +} diff --git a/src/filed/Makefile.in b/src/filed/Makefile.in new file mode 100644 index 00000000..6c8ace6b --- /dev/null +++ b/src/filed/Makefile.in @@ -0,0 +1,194 @@ +# +# Bacula Makefile for the File daemon +# +# Copyright (C) 2000-2016 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +@MCOMMON@ + +srcdir = . +VPATH = . +.PATH: . + +fd_group=@fd_group@ + +# one up +basedir = .. +# top dir +topdir = ../.. +# this dir relative to top dir +thisdir = src/filed + +DEBUG=@DEBUG@ + +GETTEXT_LIBS = @LIBINTL@ + +AFS_CFLAGS = @AFS_CFLAGS@ +AFS_LIBS = @AFS_LIBS@ +ACLOBJS = @ACLOBJS@ +XATTROBJS=@XATTROBJS@ + +first_rule: all +dummy: + +# +SVRSRCS = filed.c authenticate.c backup.c crypto.c \ + win_efs.c estimate.c \ + fd_plugins.c accurate.c \ + filed_conf.c heartbeat.c hello.c job.c fd_snapshot.c \ + restore.c status.c verify.c verify_vol.c \ + $(ACLOBJS) $(XATTROBJS) + +SVROBJS = $(SVRSRCS:.c=.o) + +JSONOBJS = bfdjson.o filed_conf.o + +# these are the objects that are changed by the .configure process +EXTRAOBJS = @OBJLIST@ + +CAP_LIBS = @CAP_LIBS@ +FDLIBS = @FDLIBS@ # extra libs for File daemon +ZLIBS = @ZLIBS@ +LZO_LIBS = @LZO_LIBS@ +LZO_INC= @LZO_INC@ + +# extra items for linking on Win32 +WIN32OBJS = win32/winmain.o win32/winlib.a win32/winres.res +win32 = $(WIN32OBJS) -luser32 -lgdi32 + +WIN32LIBS = $(@WIN32@) + +.SUFFIXES: .c .o +.PHONY: +.DONTCARE: + +# inference rules +.c.o: + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(LZO_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< +#------------------------------------------------------------------------- +all: Makefile @WIN32@ bacula-fd @STATIC_FD@ bfdjson + @echo "==== Make of filed is good ====" + @echo " " + +bacl.o: bacl.c + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(LZO_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $(AFS_CFLAGS) $< + +win32/winlib.a: + @if test -f win32/Makefile -a "${GMAKE}" != "none"; then \ + (cd win32; $(GMAKE) DESTDIR=$(DESTDIR)); \ + fi + @rm -f bacula-fd.exe + +win32/winmain.o: + @if test -f win32/Makefile -a "${GMAKE}" != "none"; then \ + (cd win32; $(GMAKE) DESTDIR=$(DESTDIR)); \ + fi + @rm -f bacula-fd.exe + +win32/winres.res: + @if test -f win32/Makefile -a "${GMAKE}" != "none"; then \ + (cd win32; $(GMAKE) DESTDIR=$(DESTDIR)); \ + fi + @rm -f bacula-fd.exe + +# win32 libraries if needed +win32: $(WIN32OBJS) + @if test -f win32/Makefile -a "${GMAKE}" != "none"; then \ + (cd win32; $(GMAKE) DESTDIR=$(DESTDIR)); \ + fi + @rm -f bacula-fd.exe + +bacula-fd: Makefile $(SVROBJS) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) @WIN32@ + @echo "Linking $@ ..." + $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(SVROBJS) \ + $(WIN32LIBS) $(FDLIBS) $(ZLIBS) -lbacfind -lbaccfg -lbac -lm $(LIBS) \ + $(DLIB) $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) $(AFS_LIBS) $(LZO_LIBS) + +bfdjson: Makefile $(JSONOBJS) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) @WIN32@ + @echo "Linking $@ ..." + $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(JSONOBJS) \ + $(WIN32LIBS) $(FDLIBS) $(ZLIBS) -lbacfind -lbaccfg -lbac -lm $(LIBS) \ + $(DLIB) $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) $(AFS_LIBS) $(LZO_LIBS) + +static-bacula-fd: Makefile $(SVROBJS) ../findlib/libbacfind.a ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) @WIN32@ + $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -static -L../lib -L../findlib -o $@ $(SVROBJS) \ + $(WIN32LIBS) $(FDLIBS) $(ZLIBS) -lbacfind -lbaccfg -lbac -lm $(LIBS) \ + $(DLIB) $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) $(AFS_LIBS) $(LZO_LIBS) + strip $@ + +Makefile: $(srcdir)/Makefile.in $(topdir)/config.status + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + +libtool-clean: + @$(RMF) -r .libs _libs + +clean: libtool-clean + @$(RMF) bacula-fd filed core core.* a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 + @$(RMF) static-bacula-fd + @if test -f win32/Makefile -a "${GMAKE}" != "none"; then \ + (cd win32; $(GMAKE) clean); \ + fi + +realclean: clean + @$(RMF) tags bacula-fd.conf + +distclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + @if test -f win32/Makefile -a "${GMAKE}" != "none"; then \ + (cd win32; $(GMAKE) distclean); \ + fi + +devclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + @if test -f win32/Makefile -a "${GMAKE}" != "none"; then \ + (cd win32; $(GMAKE) devclean); \ + fi + +install: all + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-fd $(DESTDIR)$(sbindir)/bacula-fd + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bfdjson $(DESTDIR)$(sbindir)/bfdjson + @srcconf=bacula-fd.conf; \ + if test -f ${DESTDIR}${sysconfdir}/$$srcconf; then \ + destconf=$$srcconf.new; \ + echo " ==> Found existing $$srcconf, installing new conf file as $$destconf"; \ + else \ + destconf=$$srcconf; \ + fi; \ + echo "${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf"; \ + ${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf + @if test "x${fd_group}" != "x" -a "x${DESTDIR}" = "x" ; then \ + chgrp -f ${fd_group} ${DESTDIR}${sysconfdir}/$$destconf ; \ + fi + @if test -f static-bacula-fd; then \ + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) static-bacula-fd $(DESTDIR)$(sbindir)/static-bacula-fd; \ + fi + +uninstall: + (cd $(DESTDIR)$(sbindir); $(RMF) bacula-fd bfdjson) + (cd $(DESTDIR)$(sysconfdir); $(RMF) bacula-fd.conf) + (cd $(DESTDIR)$(sysconfdir); $(RMF) bacula-fd.conf.new) + +# Semi-automatic generation of dependencies: +# Use gcc -MM because X11 `makedepend' doesn't work on all systems +# and it also includes system headers. +# `semi'-automatic since dependencies are generated at distribution time. + +depend: + @$(MV) Makefile Makefile.bak + @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile + @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile + @$(CXX) -S -M $(CPPFLAGS) $(XINC) $(LZO_INC) -I$(srcdir) -I$(basedir) *.c >> Makefile + @if test -f Makefile ; then \ + $(RMF) Makefile.bak; \ + else \ + $(MV) Makefile.bak Makefile; \ + echo " ===== Something went wrong in make depend ====="; \ + fi + +# ----------------------------------------------------------------------- +# DO NOT DELETE: nice dependency list follows diff --git a/src/filed/accurate.c b/src/filed/accurate.c new file mode 100644 index 00000000..94911ef5 --- /dev/null +++ b/src/filed/accurate.c @@ -0,0 +1,649 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bacula.h" +#include "filed.h" +#include "backup.h" + +static int dbglvl=100; + +typedef struct PrivateCurFile { + hlink link; + char *fname; + char *lstat; + char *chksum; + int32_t delta_seq; + bool seen; +} CurFile; + +bool accurate_mark_file_as_seen(JCR *jcr, char *fname) +{ + if (!jcr->accurate || !jcr->file_list) { + return false; + } + /* TODO: just use elt->seen = 1 */ + CurFile *temp = (CurFile *)jcr->file_list->lookup(fname); + if (temp) { + temp->seen = 1; /* records are in memory */ + Dmsg1(dbglvl, "marked <%s> as seen\n", fname); + } else { + Dmsg1(dbglvl, "<%s> not found to be marked as seen\n", fname); + } + return true; +} + +static bool accurate_mark_file_as_seen(JCR *jcr, CurFile *elt) +{ + /* TODO: just use elt->seen = 1 */ + CurFile *temp = (CurFile *)jcr->file_list->lookup(elt->fname); + if (temp) { + temp->seen = 1; /* records are in memory */ + } + return true; +} + +static bool accurate_lookup(JCR *jcr, char *fname, CurFile *ret) +{ + bool found=false; + ret->seen = 0; + + CurFile *temp = (CurFile *)jcr->file_list->lookup(fname); + if (temp) { + memcpy(ret, temp, sizeof(CurFile)); + found=true; + Dmsg1(dbglvl, "lookup <%s> ok\n", fname); + } + + return found; +} + +static bool accurate_init(JCR *jcr, int nbfile) +{ + CurFile *elt = NULL; + jcr->file_list = (htable *)malloc(sizeof(htable)); + jcr->file_list->init(elt, &elt->link, nbfile); + return true; +} + +static bool accurate_send_base_file_list(JCR *jcr) +{ + CurFile *elt; + struct stat statc; + int32_t LinkFIc; + bctx_t bctx; + + memset(&bctx, 0, sizeof(bctx)); + bctx.jcr = jcr; + bctx.data_stream = STREAM_UNIX_ATTRIBUTES; + + if (!jcr->accurate || jcr->getJobLevel() != L_FULL) { + return true; + } + + if (jcr->file_list == NULL) { + return true; + } + + bctx.ff_pkt = init_find_files(); + bctx.ff_pkt->type = FT_BASE; + + foreach_htable(elt, jcr->file_list) { + if (elt->seen) { + Dmsg2(dbglvl, "base file fname=%s seen=%i\n", elt->fname, elt->seen); + /* TODO: skip the decode and use directly the lstat field */ + decode_stat(elt->lstat, &statc, sizeof(statc), &LinkFIc); /* decode catalog stat */ + bctx.ff_pkt->fname = elt->fname; + bctx.ff_pkt->statp = statc; + encode_and_send_attributes(bctx); +// free(elt->fname); + } + } + + term_find_files(bctx.ff_pkt); + return true; +} + + +/* This function is called at the end of backup + * We walk over all hash disk element, and we check + * for elt.seen. + */ +static bool accurate_send_deleted_list(JCR *jcr) +{ + CurFile *elt; + struct stat statc; + int32_t LinkFIc; + bctx_t bctx; + + memset(&bctx, 0, sizeof(bctx)); + bctx.jcr = jcr; + bctx.data_stream = STREAM_UNIX_ATTRIBUTES; + + if (!jcr->accurate) { + return true; + } + + if (jcr->file_list == NULL) { + return true; + } + + bctx.ff_pkt = init_find_files(); + bctx.ff_pkt->type = FT_DELETED; + + foreach_htable(elt, jcr->file_list) { + if (elt->seen || plugin_check_file(jcr, elt->fname)) { + continue; + } + Dmsg2(dbglvl, "deleted fname=%s seen=%i\n", elt->fname, elt->seen); + /* TODO: skip the decode and use directly the lstat field */ + decode_stat(elt->lstat, &statc, sizeof(statc), &LinkFIc); /* decode catalog stat */ + bctx.ff_pkt->fname = elt->fname; + bctx.ff_pkt->statp.st_mtime = statc.st_mtime; + bctx.ff_pkt->statp.st_ctime = statc.st_ctime; + encode_and_send_attributes(bctx); +// free(elt->fname); + } + + term_find_files(bctx.ff_pkt); + return true; +} + + +/* This function is called at the end of verify job + * We walk over all hash disk element, and we check + * for elt.seen. + */ +static bool accurate_check_deleted_list(JCR *jcr) +{ + bool ret=true; + CurFile *elt; + + if (!jcr->accurate) { + return true; + } + + if (jcr->file_list == NULL) { + return true; + } + + foreach_htable(elt, jcr->file_list) { + if (elt->seen) { + continue; + } + if (ret) { + Jmsg(jcr, M_INFO, 0, _("The following files were in the Catalog, but not in the Job data:\n"), elt->fname); + } + ret = false; + Jmsg(jcr, M_INFO, 0, _(" %s\n"), elt->fname); + } + return ret; +} + +void accurate_free(JCR *jcr) +{ + if (jcr->file_list) { + jcr->file_list->destroy(); + free(jcr->file_list); + jcr->file_list = NULL; + } +} + +/* Send the deleted or the base file list and cleanup */ +bool accurate_finish(JCR *jcr) +{ + bool ret = true; + + if (jcr->is_canceled() || jcr->is_incomplete()) { + accurate_free(jcr); + return ret; + } + if (jcr->accurate) { + if (jcr->is_JobLevel(L_FULL)) { + if (!jcr->rerunning) { + ret = accurate_send_base_file_list(jcr); + } + } else if (jcr->is_JobLevel(L_VERIFY_DATA)) { + ret = accurate_check_deleted_list(jcr); + + } else { + ret = accurate_send_deleted_list(jcr); + } + accurate_free(jcr); + if (jcr->is_JobLevel(L_FULL)) { + Jmsg(jcr, M_INFO, 0, _("Space saved with Base jobs: %lld MB\n"), + jcr->base_size/(1024*1024)); + } + } + return ret; +} + +static bool accurate_add_file(JCR *jcr, uint32_t len, + char *fname, char *lstat, char *chksum, + int32_t delta) +{ + bool ret = true; + CurFile *item; + + /* we store CurFile, fname and ctime/mtime in the same chunk + * we need one extra byte to handle an empty chksum + */ + item = (CurFile *)jcr->file_list->hash_malloc(sizeof(CurFile)+len+3); + item->seen = 0; + + /* TODO: see if we can optimize this part with memcpy instead of strcpy */ + item->fname = (char *)item+sizeof(CurFile); + strcpy(item->fname, fname); + + item->lstat = item->fname+strlen(item->fname)+1; + strcpy(item->lstat, lstat); + + item->chksum = item->lstat+strlen(item->lstat)+1; + strcpy(item->chksum, chksum); + + item->delta_seq = delta; + + jcr->file_list->insert(item->fname, item); + + Dmsg4(dbglvl, "add fname=<%s> lstat=%s delta_seq=%i chksum=%s\n", + fname, lstat, delta, chksum); + return ret; +} + +bool accurate_check_file(JCR *jcr, ATTR *attr, char *digest) +{ + struct stat statc; + int32_t LinkFIc; + bool stat = false; + char ed1[50], ed2[50]; + CurFile elt; + + if (!jcr->accurate) { + goto bail_out; + } + + if (!jcr->file_list) { + goto bail_out; /* Not initialized properly */ + } + + if (!accurate_lookup(jcr, attr->fname, &elt)) { + Dmsg1(dbglvl, "accurate %s (not found)\n", attr->fname); + stat = true; + goto bail_out; + } + decode_stat(elt.lstat, &statc, sizeof(statc), &LinkFIc); /* decode catalog stat */ + + /* + * Loop over options supplied by user and verify the + * fields he requests. + */ + if (statc.st_size != attr->statp.st_size) { + Dmsg3(50, "%s st_size differs. Cat: %s File: %s\n", + attr->fname, + edit_uint64((uint64_t)statc.st_size, ed1), + edit_uint64((uint64_t)attr->statp.st_size, ed2)); + Jmsg(jcr, M_INFO, 0, "Cat st_size differs: %s\n", attr->fname); + stat = true; + } + + if (*elt.chksum && digest && *digest) { + if (strcmp(digest, elt.chksum)) { + Dmsg3(50, "%s chksum differs. Cat: %s File: %s\n", + attr->fname, + elt.chksum, + digest); + Jmsg(jcr, M_INFO, 0, "Cat checksum differs: %s\n", attr->fname); + stat = true; + } + } + + accurate_mark_file_as_seen(jcr, &elt); + +bail_out: + return stat; + +} + +/* + * This function is called for each file seen in fileset. + * We check in file_list hash if fname have been backuped + * the last time. After we can compare Lstat field. + * Full Lstat usage have been removed on 6612 + * + * Returns: true if file has changed (must be backed up) + * false file not changed + */ +bool accurate_check_file(JCR *jcr, FF_PKT *ff_pkt) +{ + int digest_stream = STREAM_NONE; + DIGEST *digest = NULL; + + struct stat statc; + int32_t LinkFIc; + bool stat = false; + char *opts; + char *fname; + CurFile elt; + + ff_pkt->delta_seq = 0; + ff_pkt->accurate_found = false; + + if (!jcr->accurate && !jcr->rerunning) { + return true; + } + + if (!jcr->file_list) { + return true; /* Not initialized properly */ + } + + strip_path(ff_pkt); + + if (S_ISDIR(ff_pkt->statp.st_mode)) { + fname = ff_pkt->link; + } else { + fname = ff_pkt->fname; + } + + if (!accurate_lookup(jcr, fname, &elt)) { + Dmsg1(dbglvl, "accurate %s (not found)\n", fname); + stat = true; + unstrip_path(ff_pkt); + goto bail_out; + } + + unstrip_path(ff_pkt); /* Get full path back */ + ff_pkt->accurate_found = true; + ff_pkt->delta_seq = elt.delta_seq; + + decode_stat(elt.lstat, &statc, sizeof(statc), &LinkFIc); /* decode catalog stat */ + + if (!jcr->rerunning && (jcr->getJobLevel() == L_FULL)) { + opts = ff_pkt->BaseJobOpts; + } else { + opts = ff_pkt->AccurateOpts; + } + + /* + * Loop over options supplied by user and verify the + * fields he requests. + */ + for (char *p=opts; !stat && *p; p++) { + char ed1[30], ed2[30]; + switch (*p) { + case 'i': /* compare INODEs */ + if (statc.st_ino != ff_pkt->statp.st_ino) { + Dmsg3(dbglvl-1, "%s st_ino differ. Cat: %s File: %s\n", + fname, + edit_uint64((uint64_t)statc.st_ino, ed1), + edit_uint64((uint64_t)ff_pkt->statp.st_ino, ed2)); + stat = true; + } + break; + case 'p': /* permissions bits */ + /* TODO: If something change only in perm, user, group + * Backup only the attribute stream + */ + if (statc.st_mode != ff_pkt->statp.st_mode) { + Dmsg3(dbglvl-1, "%s st_mode differ. Cat: %x File: %x\n", + fname, + (uint32_t)statc.st_mode, (uint32_t)ff_pkt->statp.st_mode); + stat = true; + } + break; + case 'n': /* number of links */ + if (statc.st_nlink != ff_pkt->statp.st_nlink) { + Dmsg3(dbglvl-1, "%s st_nlink differ. Cat: %d File: %d\n", + fname, + (uint32_t)statc.st_nlink, (uint32_t)ff_pkt->statp.st_nlink); + stat = true; + } + break; + case 'u': /* user id */ + if (statc.st_uid != ff_pkt->statp.st_uid) { + Dmsg3(dbglvl-1, "%s st_uid differ. Cat: %u File: %u\n", + fname, + (uint32_t)statc.st_uid, (uint32_t)ff_pkt->statp.st_uid); + stat = true; + } + break; + case 'g': /* group id */ + if (statc.st_gid != ff_pkt->statp.st_gid) { + Dmsg3(dbglvl-1, "%s st_gid differ. Cat: %u File: %u\n", + fname, + (uint32_t)statc.st_gid, (uint32_t)ff_pkt->statp.st_gid); + stat = true; + } + break; + case 's': /* size */ + if (statc.st_size != ff_pkt->statp.st_size) { + Dmsg3(dbglvl-1, "%s st_size differ. Cat: %s File: %s\n", + fname, + edit_uint64((uint64_t)statc.st_size, ed1), + edit_uint64((uint64_t)ff_pkt->statp.st_size, ed2)); + stat = true; + } + break; + case 'a': /* access time */ + if (statc.st_atime != ff_pkt->statp.st_atime) { + Dmsg1(dbglvl-1, "%s st_atime differs\n", fname); + stat = true; + } + break; + case 'm': /* modification time */ + if (statc.st_mtime != ff_pkt->statp.st_mtime) { + Dmsg1(dbglvl-1, "%s st_mtime differs\n", fname); + stat = true; + } + break; + case 'M': /* Look mtime/ctime like normal incremental backup */ + if (ff_pkt->incremental && + (ff_pkt->statp.st_mtime > ff_pkt->save_time && + ((ff_pkt->flags & FO_MTIMEONLY) || + ff_pkt->statp.st_ctime > ff_pkt->save_time))) + { + Dmsg1(dbglvl-1, "%s mtime/ctime more recent than save_time\n", fname); + stat = true; + } + break; + case 'c': /* ctime */ + if (statc.st_ctime != ff_pkt->statp.st_ctime) { + Dmsg1(dbglvl-1, "%s st_ctime differs\n", fname); + stat = true; + } + break; + case 'd': /* file size decrease */ + if (statc.st_size > ff_pkt->statp.st_size) { + Dmsg3(dbglvl-1, "%s st_size decrease. Cat: %s File: %s\n", + fname, + edit_uint64((uint64_t)statc.st_size, ed1), + edit_uint64((uint64_t)ff_pkt->statp.st_size, ed2)); + stat = true; + } + break; + case 'A': /* Always backup a file */ + stat = true; + break; + /* TODO: cleanup and factorise this function with verify.c */ + case '5': /* compare MD5 */ + case '1': /* compare SHA1 */ + /* + * The remainder of the function is all about getting the checksum. + * First we initialise, then we read files, other streams and Finder Info. + */ + if (!stat && ff_pkt->type != FT_LNKSAVED && + (S_ISREG(ff_pkt->statp.st_mode) && + ff_pkt->flags & (FO_MD5|FO_SHA1|FO_SHA256|FO_SHA512))) + { + + if (!*elt.chksum && !jcr->rerunning) { + Jmsg(jcr, M_WARNING, 0, _("Cannot verify checksum for %s\n"), + ff_pkt->fname); + stat = true; + break; + } + + /* + * Create our digest context. If this fails, the digest will be set + * to NULL and not used. + */ + if (ff_pkt->flags & FO_MD5) { + digest = crypto_digest_new(jcr, CRYPTO_DIGEST_MD5); + digest_stream = STREAM_MD5_DIGEST; + + } else if (ff_pkt->flags & FO_SHA1) { + digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA1); + digest_stream = STREAM_SHA1_DIGEST; + + } else if (ff_pkt->flags & FO_SHA256) { + digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA256); + digest_stream = STREAM_SHA256_DIGEST; + + } else if (ff_pkt->flags & FO_SHA512) { + digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA512); + digest_stream = STREAM_SHA512_DIGEST; + } + + /* Did digest initialization fail? */ + if (digest_stream != STREAM_NONE && digest == NULL) { + Jmsg(jcr, M_WARNING, 0, _("%s digest initialization failed\n"), + stream_to_ascii(digest_stream)); + } + + /* compute MD5 or SHA1 hash */ + if (digest) { + char md[CRYPTO_DIGEST_MAX_SIZE]; + uint32_t size; + + size = sizeof(md); + + if (digest_file(jcr, ff_pkt, digest) != 0) { + jcr->JobErrors++; + + } else if (crypto_digest_finalize(digest, (uint8_t *)md, &size)) { + char *digest_buf; + const char *digest_name; + + digest_buf = (char *)malloc(BASE64_SIZE(size)); + digest_name = crypto_digest_name(digest); + + bin_to_base64(digest_buf, BASE64_SIZE(size), md, size, true); + + if (strcmp(digest_buf, elt.chksum)) { + Dmsg4(dbglvl,"%s %s chksum diff. Cat: %s File: %s\n", + fname, + digest_name, + elt.chksum, + digest_buf); + stat = true; + } + + free(digest_buf); + } + crypto_digest_free(digest); + } + } + + break; + case ':': + case 'J': + case 'C': + default: + break; + } + } + + /* In Incr/Diff accurate mode, we mark all files as seen + * When in Full+Base mode, we mark only if the file match exactly + */ + if (jcr->getJobLevel() == L_FULL) { + if (!stat) { + /* compute space saved with basefile */ + jcr->base_size += ff_pkt->statp.st_size; + accurate_mark_file_as_seen(jcr, &elt); + } + } else { + accurate_mark_file_as_seen(jcr, &elt); + } + +bail_out: + return stat; +} + +/* + * TODO: use big buffer from htable + */ +int accurate_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + int lstat_pos, chksum_pos; + int32_t nb; + uint16_t delta_seq; + + if (job_canceled(jcr)) { + return true; + } + if (sscanf(dir->msg, "accurate files=%ld", &nb) != 1) { + dir->fsend(_("2991 Bad accurate command\n")); + return false; + } + + jcr->accurate = true; + + accurate_init(jcr, nb); + + /* + * buffer = sizeof(CurFile) + dirmsg + * dirmsg = fname + \0 + lstat + \0 + checksum + \0 + delta_seq + \0 + */ + /* get current files */ + while (dir->recv() >= 0) { + lstat_pos = strlen(dir->msg) + 1; + if (lstat_pos < dir->msglen) { + chksum_pos = lstat_pos + strlen(dir->msg + lstat_pos) + 1; + + if (chksum_pos >= dir->msglen) { + chksum_pos = lstat_pos - 1; /* tweak: no checksum, point to the last \0 */ + delta_seq = 0; + } else { + delta_seq = str_to_int32(dir->msg + + chksum_pos + + strlen(dir->msg + chksum_pos) + 1); + } + + accurate_add_file(jcr, dir->msglen, + dir->msg, /* Path */ + dir->msg + lstat_pos, /* LStat */ + dir->msg + chksum_pos, /* CheckSum */ + delta_seq); /* Delta Sequence */ + } + } + +#ifdef DEBUG + extern void *start_heap; + + char b1[50], b2[50], b3[50], b4[50], b5[50]; + Dmsg5(dbglvl," Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n", + edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1), + edit_uint64_with_commas(sm_bytes, b2), + edit_uint64_with_commas(sm_max_bytes, b3), + edit_uint64_with_commas(sm_buffers, b4), + edit_uint64_with_commas(sm_max_buffers, b5)); +#endif + + return true; +} diff --git a/src/filed/authenticate.c b/src/filed/authenticate.c new file mode 100644 index 00000000..81c8a0e1 --- /dev/null +++ b/src/filed/authenticate.c @@ -0,0 +1,268 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Authenticate Director who is attempting to connect. + * + * Kern Sibbald, October 2000 + * + */ + +#include "bacula.h" +#include "filed.h" + +extern CLIENT *me; /* my resource */ + +const int dbglvl = 50; + +/* Version at end of Hello + * prior to 10Mar08 no version + * 1 10Mar08 + * 2 13Mar09 - added the ability to restore from multiple storages + * 3 03Sep10 - added the restore object command for vss plugin 4.0 + * 4 25Nov10 - added bandwidth command 5.1 + * 5 24Nov11 - added new restore object command format (pluginname) 6.0 + * 6 15Feb12 - added Component selection information list + * 7 19Feb12 - added Expected files to restore + * 8 22Mar13 - added restore options + version for SD + * 9 06Aug13 - added comm line compression + * 10 01Jan14 - added SD Calls Client and api version to status command + */ +#define FD_VERSION 10 + +/* For compatibility with old Community SDs */ +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * Authenticated the Director + */ +bool authenticate_director(JCR *jcr) +{ + DIRRES *director = jcr->director; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; /* Want md5 compatible DIR */ + bool auth_success = false; + alist *verify_list = NULL; + btimer_t *tid = NULL; + BSOCK *dir = jcr->dir_bsock; + + if (have_tls) { + /* TLS Requirement */ + if (director->tls_enable) { + if (director->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + if (director->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + if (director->tls_verify_peer) { + verify_list = director->tls_allowed_cns; + } + } + + tid = start_bsock_timer(dir, AUTH_TIMEOUT); + /* Challenge the director */ + auth_success = cram_md5_challenge(dir, director->password, tls_local_need, compatible); + if (job_canceled(jcr)) { + auth_success = false; + goto auth_fatal; /* quick exit */ + } + if (auth_success) { + auth_success = cram_md5_respond(dir, director->password, &tls_remote_need, &compatible); + if (!auth_success) { + char addr[64]; + char *who = dir->get_peer(addr, sizeof(addr)) ? dir->who() : addr; + Dmsg1(dbglvl, "cram_get_auth respond failed for Director: %s\n", who); + } + } else { + char addr[64]; + char *who = dir->get_peer(addr, sizeof(addr)) ? dir->who() : addr; + Dmsg1(dbglvl, "cram_auth challenge failed for Director %s\n", who); + } + if (!auth_success) { + Emsg1(M_FATAL, 0, _("Incorrect password given by Director at %s.\n"), + dir->who()); + goto auth_fatal; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg0(jcr, M_FATAL, 0, _("Authorization problem: Remote server did not" + " advertize required TLS support.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg0(jcr, M_FATAL, 0, _("Authorization problem: Remote server requires TLS.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_server(director->tls_ctx, dir, verify_list)) { + Jmsg0(jcr, M_FATAL, 0, _("TLS negotiation failed.\n")); + auth_success = false; + goto auth_fatal; + } + if (director->tls_authenticate) { /* authentication only? */ + dir->free_tls(); /* shutodown tls */ + } + } + auth_success = true; + +auth_fatal: + if (tid) { + stop_bsock_timer(tid); + tid = NULL; + } + if (auth_success) { + return send_hello_ok(dir); + } + send_sorry(dir); + /* Single thread all failures to avoid DOS */ + P(mutex); + bmicrosleep(6, 0); + V(mutex); + return false; +} + + +/* + * First prove our identity to the Storage daemon, then + * make him prove his identity. + */ +bool authenticate_storagedaemon(JCR *jcr) +{ + BSOCK *sd = jcr->store_bsock; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; + bool auth_success = false; + int sd_version = 0; + + btimer_t *tid = start_bsock_timer(sd, AUTH_TIMEOUT); + + /* TLS Requirement */ + if (have_tls && me->tls_enable) { + if (me->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + if (me->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + if (job_canceled(jcr)) { + auth_success = false; /* force quick exit */ + goto auth_fatal; + } + + /* Respond to SD challenge */ + Dmsg0(050, "==== respond to SD challenge\n"); + auth_success = cram_md5_respond(sd, jcr->sd_auth_key, &tls_remote_need, &compatible); + if (job_canceled(jcr)) { + auth_success = false; /* force quick exit */ + goto auth_fatal; + } + if (!auth_success) { + Dmsg1(dbglvl, "cram_respond failed for SD: %s\n", sd->who()); + } else { + /* Now challenge him */ + Dmsg0(050, "==== Challenge SD\n"); + auth_success = cram_md5_challenge(sd, jcr->sd_auth_key, tls_local_need, compatible); + if (!auth_success) { + Dmsg1(dbglvl, "cram_challenge failed for SD: %s\n", sd->who()); + } + } + + if (!auth_success) { + Jmsg(jcr, M_FATAL, 0, _("Authorization key rejected by Storage daemon.\n" + "For help, please see " MANUAL_AUTH_URL "\n")); + goto auth_fatal; + } else { + Dmsg0(050, "Authorization with SD is OK\n"); + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server did not" + " advertize required TLS support.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server requires TLS.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_client(me->tls_ctx, sd, NULL)) { + Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed.\n")); + auth_success = false; + goto auth_fatal; + } + if (me->tls_authenticate) { /* tls authentication only? */ + sd->free_tls(); /* yes, shutdown tls */ + } + } + if (sd->recv() <= 0) { + auth_success = false; + goto auth_fatal; + } + sscanf(sd->msg, "3000 OK Hello %d", &sd_version); + if (sd_version >= 1 && me->comm_compression) { + sd->set_compress(); + } else { + sd->clear_compress(); + Dmsg0(050, "*** No FD compression with SD\n"); + } + + /* At this point, we have successfully connected */ + +auth_fatal: + /* Destroy session key */ + memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); + stop_bsock_timer(tid); + /* Single thread all failures to avoid DOS */ + if (!auth_success) { + P(mutex); + bmicrosleep(6, 0); + V(mutex); + } + return auth_success; +} diff --git a/src/filed/backup.c b/src/filed/backup.c new file mode 100644 index 00000000..7e001381 --- /dev/null +++ b/src/filed/backup.c @@ -0,0 +1,1456 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/* + * Bacula File Daemon backup.c send file attributes and data + * to the Storage daemon. + * + * Kern Sibbald, March MM + * + */ + +#include "bacula.h" +#include "filed.h" +#include "backup.h" + +#ifdef HAVE_LZO +const bool have_lzo = true; +#else +const bool have_lzo = false; +#endif + +#ifdef HAVE_LIBZ +const bool have_libz = true; +#else +const bool have_libz = false; +#endif + +/* Forward referenced functions */ +int save_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level); +static int send_data(bctx_t &bctx, int stream); +static void close_vss_backup_session(JCR *jcr); +#ifdef HAVE_DARWIN_OS +static bool send_resource_fork(bctx_t &bctx); +#endif +static bool setup_compression(bctx_t &bctx); +static bool do_lzo_compression(bctx_t &bctx); +static bool do_libz_compression(bctx_t &bctx); + +/** + * Find all the requested files and send them + * to the Storage daemon. + * + * Note, we normally carry on a one-way + * conversation from this point on with the SD, simply blasting + * data to him. To properly know what is going on, we + * also run a "heartbeat" monitor which reads the socket and + * reacts accordingly (at the moment it has nothing to do + * except echo the heartbeat to the Director). + * + */ +bool blast_data_to_storage_daemon(JCR *jcr, char *addr) +{ + BSOCK *sd; + bool ok = true; + // TODO landonf: Allow user to specify encryption algorithm + + sd = jcr->store_bsock; + + jcr->setJobStatus(JS_Running); + + Dmsg1(300, "bfiled: opened data connection %d to stored\n", sd->m_fd); + + LockRes(); + CLIENT *client = (CLIENT *)GetNextRes(R_CLIENT, NULL); + UnlockRes(); + uint32_t buf_size; + if (client) { + buf_size = client->max_network_buffer_size; + } else { + buf_size = 0; /* use default */ + } + if (!sd->set_buffer_size(buf_size, BNET_SETBUF_WRITE)) { + jcr->setJobStatus(JS_ErrorTerminated); + Jmsg(jcr, M_FATAL, 0, _("Cannot set buffer size FD->SD.\n")); + return false; + } + + jcr->buf_size = sd->msglen; + /** + * Adjust for compression so that output buffer is + * 12 bytes + 0.1% larger than input buffer plus 18 bytes. + * This gives a bit extra plus room for the sparse addr if any. + * Note, we adjust the read size to be smaller so that the + * same output buffer can be used without growing it. + * + * For LZO1X compression the recommended value is : + * output_block_size = input_block_size + (input_block_size / 16) + 64 + 3 + sizeof(comp_stream_header) + * + * The zlib compression workset is initialized here to minimize + * the "per file" load. The jcr member is only set, if the init + * was successful. + * + * For the same reason, lzo compression is initialized here. + */ + if (have_lzo) { + jcr->compress_buf_size = MAX(jcr->buf_size + (jcr->buf_size / 16) + 67 + (int)sizeof(comp_stream_header), jcr->buf_size + ((jcr->buf_size+999) / 1000) + 30); + jcr->compress_buf = get_memory(jcr->compress_buf_size); + } else { + jcr->compress_buf_size = jcr->buf_size + ((jcr->buf_size+999) / 1000) + 30; + jcr->compress_buf = get_memory(jcr->compress_buf_size); + } + +#ifdef HAVE_LIBZ + z_stream *pZlibStream = (z_stream*)malloc(sizeof(z_stream)); + if (pZlibStream) { + pZlibStream->zalloc = Z_NULL; + pZlibStream->zfree = Z_NULL; + pZlibStream->opaque = Z_NULL; + pZlibStream->state = Z_NULL; + + if (deflateInit(pZlibStream, Z_DEFAULT_COMPRESSION) == Z_OK) { + jcr->pZLIB_compress_workset = pZlibStream; + } else { + free (pZlibStream); + } + } +#endif + +#ifdef HAVE_LZO + lzo_voidp pLzoMem = (lzo_voidp) malloc(LZO1X_1_MEM_COMPRESS); + if (pLzoMem) { + if (lzo_init() == LZO_E_OK) { + jcr->LZO_compress_workset = pLzoMem; + } else { + free (pLzoMem); + } + } +#endif + + if (!crypto_session_start(jcr)) { + return false; + } + + set_find_options(jcr->ff, jcr->incremental, jcr->mtime); + set_find_snapshot_function(jcr->ff, snapshot_convert_path); + + /** in accurate mode, we overload the find_one check function */ + if (jcr->accurate) { + set_find_changed_function((FF_PKT *)jcr->ff, accurate_check_file); + } + start_heartbeat_monitor(jcr); + +#ifdef HAVE_ACL + jcr->bacl = (BACL*)new_bacl(); +#endif +#ifdef HAVE_XATTR + jcr->bxattr = (BXATTR*)new_bxattr(); +#endif + + /* Subroutine save_file() is called for each file */ + if (!find_files(jcr, (FF_PKT *)jcr->ff, save_file, plugin_save)) { + ok = false; /* error */ + jcr->setJobStatus(JS_ErrorTerminated); + } +#ifdef HAVE_ACL + if (jcr->bacl && jcr->bacl->get_acl_nr_errors() > 0) { + Jmsg(jcr, M_WARNING, 0, _("Had %ld acl errors while doing backup\n"), + jcr->bacl->get_acl_nr_errors()); + } +#endif +#ifdef HAVE_XATTR + if (jcr->bxattr && jcr->bxattr->get_xattr_nr_errors() > 0) { + Jmsg(jcr, M_WARNING, 0, _("Had %ld xattr errors while doing backup\n"), + jcr->bxattr->get_xattr_nr_errors()); + } +#endif + + /* Delete or keep snapshots */ + close_snapshot_backup_session(jcr); + close_vss_backup_session(jcr); + + accurate_finish(jcr); /* send deleted or base file list to SD */ + + stop_heartbeat_monitor(jcr); + + sd->signal(BNET_EOD); /* end of sending data */ + +#ifdef HAVE_ACL + if (jcr->bacl) { + delete(jcr->bacl); + jcr->bacl = NULL; + } +#endif +#ifdef HAVE_XATTR + if (jcr->bxattr) { + delete(jcr->bxattr); + jcr->bxattr = NULL; + } +#endif + if (jcr->big_buf) { + bfree_and_null(jcr->big_buf); + } + if (jcr->compress_buf) { + free_and_null_pool_memory(jcr->compress_buf); + } + if (jcr->pZLIB_compress_workset) { + /* Free the zlib stream */ +#ifdef HAVE_LIBZ + deflateEnd((z_stream *)jcr->pZLIB_compress_workset); +#endif + bfree_and_null(jcr->pZLIB_compress_workset); + } + if (jcr->LZO_compress_workset) { + bfree_and_null(jcr->LZO_compress_workset); + } + + crypto_session_end(jcr); + + + Dmsg1(100, "end blast_data ok=%d\n", ok); + return ok; +} + + +/** + * Called here by find() for each file included. + * This is a callback. The original is find_files() above. + * + * Send the file and its data to the Storage daemon. + * + * Returns: 1 if OK + * 0 if error + * -1 to ignore file/directory (not used here) + */ +int save_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level) +{ + bool do_read = false; + bool plugin_started = false; + bool do_plugin_set = false; + int stat; + int rtnstat = 0; + bool has_file_data = false; + struct save_pkt sp; /* used by option plugin */ + BSOCK *sd = jcr->store_bsock; + bctx_t bctx; /* backup context */ + + memset(&bctx, 0, sizeof(bctx)); + bctx.sd = sd; + bctx.ff_pkt = ff_pkt; + bctx.jcr = jcr; + + + time_t now = time(NULL); + if (jcr->last_stat_time == 0) { + jcr->last_stat_time = now; + jcr->stat_interval = 30; /* Default 30 seconds */ + } else if (now >= jcr->last_stat_time + jcr->stat_interval) { + jcr->dir_bsock->fsend("Progress JobId=%ld files=%ld bytes=%lld bps=%ld\n", + jcr->JobId, jcr->JobFiles, jcr->JobBytes, jcr->LastRate); + jcr->last_stat_time = now; + } + + if (jcr->is_canceled() || jcr->is_incomplete()) { + Dmsg0(100, "Job canceled by user or marked incomplete.\n"); + return 0; + } + + jcr->num_files_examined++; /* bump total file count */ + + switch (ff_pkt->type) { + case FT_LNKSAVED: /* Hard linked, file already saved */ + Dmsg2(130, "FT_LNKSAVED hard link: %s => %s\n", ff_pkt->fname, ff_pkt->link); + break; + case FT_REGE: + Dmsg1(130, "FT_REGE saving: %s\n", ff_pkt->fname); + has_file_data = true; + break; + case FT_REG: + Dmsg1(130, "FT_REG saving: %s\n", ff_pkt->fname); + has_file_data = true; + break; + case FT_LNK: + Dmsg2(130, "FT_LNK saving: %s -> %s\n", ff_pkt->fname, ff_pkt->link); + break; + case FT_RESTORE_FIRST: + Dmsg1(100, "FT_RESTORE_FIRST saving: %s\n", ff_pkt->fname); + break; + case FT_PLUGIN_CONFIG: + Dmsg1(100, "FT_PLUGIN_CONFIG saving: %s\n", ff_pkt->fname); + break; + case FT_DIRBEGIN: + jcr->num_files_examined--; /* correct file count */ + return 1; /* not used */ + case FT_NORECURSE: + Jmsg(jcr, M_INFO, 1, _(" Recursion turned off. Will not descend from %s into %s\n"), + ff_pkt->top_fname, ff_pkt->fname); + ff_pkt->type = FT_DIREND; /* Backup only the directory entry */ + break; + case FT_NOFSCHG: + /* Suppress message for /dev filesystems */ + if (!is_in_fileset(ff_pkt)) { + Jmsg(jcr, M_INFO, 1, _(" %s is a different filesystem. Will not descend from %s into it.\n"), + ff_pkt->fname, ff_pkt->top_fname); + } + ff_pkt->type = FT_DIREND; /* Backup only the directory entry */ + break; + case FT_INVALIDFS: + Jmsg(jcr, M_INFO, 1, _(" Disallowed filesystem. Will not descend from %s into %s\n"), + ff_pkt->top_fname, ff_pkt->fname); + ff_pkt->type = FT_DIREND; /* Backup only the directory entry */ + break; + case FT_INVALIDDT: + Jmsg(jcr, M_INFO, 1, _(" Disallowed drive type. Will not descend into %s\n"), + ff_pkt->fname); + break; + case FT_REPARSE: + case FT_JUNCTION: + case FT_DIREND: + Dmsg1(130, "FT_DIREND: %s\n", ff_pkt->link); + break; + case FT_SPEC: + Dmsg1(130, "FT_SPEC saving: %s\n", ff_pkt->fname); + if (S_ISSOCK(ff_pkt->statp.st_mode)) { + Jmsg(jcr, M_SKIPPED, 1, _(" Socket file skipped: %s\n"), ff_pkt->fname); + return 1; + } + break; + case FT_RAW: + Dmsg1(130, "FT_RAW saving: %s\n", ff_pkt->fname); + has_file_data = true; + break; + case FT_FIFO: + Dmsg1(130, "FT_FIFO saving: %s\n", ff_pkt->fname); + break; + case FT_NOACCESS: { + berrno be; + Jmsg(jcr, M_NOTSAVED, 0, _(" Could not access \"%s\": ERR=%s\n"), ff_pkt->fname, + be.bstrerror(ff_pkt->ff_errno)); + jcr->JobErrors++; + return 1; + } + case FT_NOFOLLOW: { + berrno be; + Jmsg(jcr, M_NOTSAVED, 0, _(" Could not follow link \"%s\": ERR=%s\n"), + ff_pkt->fname, be.bstrerror(ff_pkt->ff_errno)); + jcr->JobErrors++; + return 1; + } + case FT_NOSTAT: { + berrno be; + Jmsg(jcr, M_NOTSAVED, 0, _(" Could not stat \"%s\": ERR=%s\n"), ff_pkt->fname, + be.bstrerror(ff_pkt->ff_errno)); + jcr->JobErrors++; + return 1; + } + case FT_DIRNOCHG: + case FT_NOCHG: + Jmsg(jcr, M_SKIPPED, 1, _(" Unchanged file skipped: %s\n"), ff_pkt->fname); + return 1; + case FT_ISARCH: + Jmsg(jcr, M_NOTSAVED, 0, _(" Archive file not saved: %s\n"), ff_pkt->fname); + return 1; + case FT_NOOPEN: { + berrno be; + Jmsg(jcr, M_NOTSAVED, 0, _(" Could not open directory \"%s\": ERR=%s\n"), + ff_pkt->fname, be.bstrerror(ff_pkt->ff_errno)); + jcr->JobErrors++; + return 1; + } + case FT_DELETED: + Dmsg1(130, "FT_DELETED: %s\n", ff_pkt->fname); + break; + default: + Jmsg(jcr, M_NOTSAVED, 0, _(" Unknown file type %d; not saved: %s\n"), + ff_pkt->type, ff_pkt->fname); + jcr->JobErrors++; + return 1; + } + + Dmsg1(130, "bfiled: sending %s to stored\n", ff_pkt->fname); + + /** Digests and encryption are only useful if there's file data */ + if (has_file_data && !crypto_setup_digests(bctx)) { + goto good_rtn; + } + + /** Initialize the file descriptor we use for data and other streams. */ + binit(&ff_pkt->bfd); + if (ff_pkt->flags & FO_PORTABLE) { + set_portable_backup(&ff_pkt->bfd); /* disable Win32 BackupRead() */ + } + + if (ff_pkt->cmd_plugin) { + do_plugin_set = true; + + /* option and cmd plugin are not compatible together */ + } else if (ff_pkt->opt_plugin) { + + /* ask the option plugin what to do with this file */ + switch (plugin_option_handle_file(jcr, ff_pkt, &sp)) { + case bRC_OK: + Dmsg2(10, "Option plugin %s will be used to backup %s\n", + ff_pkt->plugin, ff_pkt->fname); + do_plugin_set = true; + break; + case bRC_Skip: + Dmsg2(10, "Option plugin %s decided to skip %s\n", + ff_pkt->plugin, ff_pkt->fname); + goto good_rtn; + default: + Dmsg2(10, "Option plugin %s decided to let bacula handle %s\n", + ff_pkt->plugin, ff_pkt->fname); + break; + } + } + + if (do_plugin_set) { + /* Tell bfile that it needs to call plugin */ + if (!set_cmd_plugin(&ff_pkt->bfd, jcr)) { + goto bail_out; + } + send_plugin_name(jcr, sd, true); /* signal start of plugin data */ + plugin_started = true; + } + + /** Send attributes -- must be done after binit() */ + if (!encode_and_send_attributes(bctx)) { + goto bail_out; + } + /** Meta data only for restore object */ + if (IS_FT_OBJECT(ff_pkt->type)) { + goto good_rtn; + } + /** Meta data only for deleted files */ + if (ff_pkt->type == FT_DELETED) { + goto good_rtn; + } + /** Set up the encryption context and send the session data to the SD */ + if (has_file_data && jcr->crypto.pki_encrypt) { + if (!crypto_session_send(jcr, sd)) { + goto bail_out; + } + } + + /** + * Open any file with data that we intend to save, then save it. + * + * Note, if is_win32_backup, we must open the Directory so that + * the BackupRead will save its permissions and ownership streams. + */ + if (ff_pkt->type != FT_LNKSAVED && S_ISREG(ff_pkt->statp.st_mode)) { +#ifdef HAVE_WIN32 + do_read = !is_portable_backup(&ff_pkt->bfd) || ff_pkt->statp.st_size > 0; +#else + do_read = ff_pkt->statp.st_size > 0; +#endif + } else if (ff_pkt->type == FT_RAW || ff_pkt->type == FT_FIFO || + ff_pkt->type == FT_REPARSE || ff_pkt->type == FT_JUNCTION || + (!is_portable_backup(&ff_pkt->bfd) && ff_pkt->type == FT_DIREND)) { + do_read = true; + } + + if (ff_pkt->cmd_plugin && !ff_pkt->no_read) { + do_read = true; + } + + Dmsg2(150, "type=%d do_read=%d\n", ff_pkt->type, do_read); + if (do_read) { + btimer_t *tid; + + if (ff_pkt->type == FT_FIFO) { + tid = start_thread_timer(jcr, pthread_self(), 60); + } else { + tid = NULL; + } + int noatime = ff_pkt->flags & FO_NOATIME ? O_NOATIME : 0; + ff_pkt->bfd.reparse_point = (ff_pkt->type == FT_REPARSE || + ff_pkt->type == FT_JUNCTION); + set_fattrs(&ff_pkt->bfd, &ff_pkt->statp); + if (bopen(&ff_pkt->bfd, ff_pkt->fname, O_RDONLY | O_BINARY | noatime, 0) < 0) { + ff_pkt->ff_errno = errno; + berrno be; + Jmsg(jcr, M_NOTSAVED, 0, _(" Cannot open \"%s\": ERR=%s.\n"), ff_pkt->fname, + be.bstrerror()); + jcr->JobErrors++; + if (tid) { + stop_thread_timer(tid); + tid = NULL; + } + goto good_rtn; + } + if (tid) { + stop_thread_timer(tid); + tid = NULL; + } + + stat = send_data(bctx, bctx.data_stream); + + if (ff_pkt->flags & FO_CHKCHANGES) { + has_file_changed(jcr, ff_pkt); + } + + bclose(&ff_pkt->bfd); + + if (!stat) { + goto bail_out; + } + } + +#ifdef HAVE_DARWIN_OS + if (!send_resource_fork(bctx)) { + goto bail_out; + } +#endif + + /* + * Save ACLs and Extended Attributes when requested and available + * for anything not being a symlink. + */ +#ifdef HAVE_ACL + if (jcr->bacl && jcr->bacl->backup_acl(jcr, ff_pkt) != bRC_BACL_ok) { + goto bail_out; + } +#endif +#ifdef HAVE_XATTR + if (jcr->bxattr && jcr->bxattr->backup_xattr(jcr, ff_pkt) != bRC_BXATTR_ok) { + goto bail_out; + } +#endif + + if (!crypto_terminate_digests(bctx)) { + goto bail_out; + } + +good_rtn: + rtnstat = 1; + +bail_out: + if (jcr->is_incomplete() || jcr->is_canceled()) { + Dmsg0(100, "Job canceled by user or marked incomplete.\n"); + rtnstat = 0; + } + if (plugin_started) { + send_plugin_name(jcr, sd, false); /* signal end of plugin data */ + } + if (ff_pkt->opt_plugin) { + jcr->plugin_sp = NULL; /* sp is local to this function */ + jcr->plugin_ctx = NULL; + jcr->plugin = NULL; + jcr->opt_plugin = false; + } + crypto_free(bctx); + return rtnstat; +} + +/** + * Send data read from an already open file descriptor. + * + * We return 1 on success and 0 on errors. + * + * ***FIXME*** + * We use ff_pkt->statp.st_size when FO_SPARSE to know when to stop + * reading. + * Currently this is not a problem as the only other stream, resource forks, + * are not handled as sparse files. + */ +static int send_data(bctx_t &bctx, int stream) +{ + JCR *jcr = bctx.jcr; + BSOCK *sd = jcr->store_bsock; + +#ifdef FD_NO_SEND_TEST + return 1; +#endif + + bctx.rsize = jcr->buf_size; + bctx.fileAddr = 0; + bctx.cipher_ctx = NULL; + bctx.msgsave = sd->msg; + bctx.rbuf = sd->msg; /* read buffer */ + bctx.wbuf = sd->msg; /* write buffer */ + bctx.cipher_input = (uint8_t *)bctx.rbuf; /* encrypt uncompressed data */ + + Dmsg1(300, "Saving data, type=%d\n", bctx.ff_pkt->type); + + if (!setup_compression(bctx)) { + goto err; + } + + if (bctx.ff_pkt->flags & FO_ENCRYPT && !crypto_allocate_ctx(bctx)) { + return false; + } + + /** + * Send Data header to Storage daemon + * + */ + if (!sd->fsend("%ld %d %lld", jcr->JobFiles, stream, + (int64_t)bctx.ff_pkt->statp.st_size)) { + if (!jcr->is_job_canceled()) { + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), + sd->bstrerror()); + } + goto err; + } + Dmsg1(300, ">stored: datahdr %s\n", sd->msg); + + /** + * Make space at beginning of buffer for fileAddr because this + * same buffer will be used for writing if compression is off. + */ + if ((bctx.ff_pkt->flags & FO_SPARSE) || (bctx.ff_pkt->flags & FO_OFFSETS)) { + bctx.rbuf += OFFSET_FADDR_SIZE; + bctx.rsize -= OFFSET_FADDR_SIZE; +#if defined(HAVE_FREEBSD_OS) || defined(__FreeBSD_kernel__) + /** + * To read FreeBSD partitions, the read size must be + * a multiple of 512. + */ + bctx.rsize = (bctx.rsize/512) * 512; +#endif + } + + /** a RAW device read on win32 only works if the buffer is a multiple of 512 */ +#ifdef HAVE_WIN32 + if (S_ISBLK(bctx.ff_pkt->statp.st_mode)) { + bctx.rsize = (bctx.rsize/512) * 512; + } + Dmsg1(200, "Fattrs=0X%x\n", bctx.ff_pkt->bfd.fattrs); + if (bctx.ff_pkt->bfd.fattrs & FILE_ATTRIBUTE_ENCRYPTED) { + if (!p_ReadEncryptedFileRaw) { + Jmsg0(bctx.jcr, M_FATAL, 0, _("Windows Encrypted data not supported on this OS.\n")); + goto err; + } + /* This single call reads all EFS data delivers it to a callback */ + if (p_ReadEncryptedFileRaw((PFE_EXPORT_FUNC)read_efs_data_cb, &bctx, + bctx.ff_pkt->bfd.pvContext) != 0) { + goto err; + } + /* All read, so skip to finish sending */ + goto finish_sending; + } + /* Fall through to standard bread() loop */ +#endif + + /* + * Normal read the file data in a loop and send it to SD + */ + while ((sd->msglen=(uint32_t)bread(&bctx.ff_pkt->bfd, bctx.rbuf, bctx.rsize)) > 0) { + if (!process_and_send_data(bctx)) { + goto err; + } + } /* end while read file data */ + goto finish_sending; + +finish_sending: + if (sd->msglen < 0) { /* error */ + berrno be; + Jmsg(jcr, M_ERROR, 0, _("Read error on file %s. ERR=%s\n"), + bctx.ff_pkt->fname, be.bstrerror(bctx.ff_pkt->bfd.berrno)); + if (jcr->JobErrors++ > 1000) { /* insanity check */ + Jmsg(jcr, M_FATAL, 0, _("Too many errors. JobErrors=%d.\n"), jcr->JobErrors); + } + } else if (bctx.ff_pkt->flags & FO_ENCRYPT) { + /** + * For encryption, we must call finalize to push out any + * buffered data. + */ + if (!crypto_cipher_finalize(bctx.cipher_ctx, (uint8_t *)jcr->crypto.crypto_buf, + &bctx.encrypted_len)) { + /* Padding failed. Shouldn't happen. */ + Jmsg(jcr, M_FATAL, 0, _("Encryption padding error\n")); + goto err; + } + + /** Note, on SSL pre-0.9.7, there is always some output */ + if (bctx.encrypted_len > 0) { + sd->msglen = bctx.encrypted_len; /* set encrypted length */ + sd->msg = jcr->crypto.crypto_buf; /* set correct write buffer */ + if (!sd->send()) { + if (!jcr->is_job_canceled()) { + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), + sd->bstrerror()); + } + goto err; + } + Dmsg1(130, "Send data to SD len=%d\n", sd->msglen); + jcr->JobBytes += sd->msglen; /* count bytes saved possibly compressed/encrypted */ + sd->msg = bctx.msgsave; /* restore bnet buffer */ + } + } + + + if (!sd->signal(BNET_EOD)) { /* indicate end of file data */ + if (!jcr->is_job_canceled()) { + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), + sd->bstrerror()); + } + goto err; + } + + /** Free the cipher context */ + if (bctx.cipher_ctx) { + crypto_cipher_free(bctx.cipher_ctx); + } + return 1; + +err: + /** Free the cipher context */ + if (bctx.cipher_ctx) { + crypto_cipher_free(bctx.cipher_ctx); + } + + sd->msg = bctx.msgsave; /* restore bnet buffer */ + sd->msglen = 0; + return 0; +} + + +/* + * Apply processing (sparse, compression, encryption, and + * send to the SD. + */ +bool process_and_send_data(bctx_t &bctx) +{ + BSOCK *sd = bctx.sd; + JCR *jcr = bctx.jcr; + + /** Check for sparse blocks */ + if (bctx.ff_pkt->flags & FO_SPARSE) { + ser_declare; + bool allZeros = false; + if ((sd->msglen == bctx.rsize && + bctx.fileAddr+sd->msglen < (uint64_t)bctx.ff_pkt->statp.st_size) || + ((bctx.ff_pkt->type == FT_RAW || bctx.ff_pkt->type == FT_FIFO) && + (uint64_t)bctx.ff_pkt->statp.st_size == 0)) { + allZeros = is_buf_zero(bctx.rbuf, bctx.rsize); + } + if (!allZeros) { + /** Put file address as first data in buffer */ + ser_begin(bctx.wbuf, OFFSET_FADDR_SIZE); + ser_uint64(bctx.fileAddr); /* store fileAddr in begin of buffer */ + } + bctx.fileAddr += sd->msglen; /* update file address */ + /** Skip block of all zeros */ + if (allZeros) { + return true; /* skip block of zeros */ + } + } else if (bctx.ff_pkt->flags & FO_OFFSETS) { + ser_declare; + ser_begin(bctx.wbuf, OFFSET_FADDR_SIZE); + ser_uint64(bctx.ff_pkt->bfd.offset); /* store offset in begin of buffer */ + } + + jcr->ReadBytes += sd->msglen; /* count bytes read */ + + /** Uncompressed cipher input length */ + bctx.cipher_input_len = sd->msglen; + + /** Update checksum if requested */ + if (bctx.digest) { + crypto_digest_update(bctx.digest, (uint8_t *)bctx.rbuf, sd->msglen); + } + + /** Update signing digest if requested */ + if (bctx.signing_digest) { + crypto_digest_update(bctx.signing_digest, (uint8_t *)bctx.rbuf, sd->msglen); + } + + if (have_libz && !do_libz_compression(bctx)) { + goto err; + } + + if (have_lzo && !do_lzo_compression(bctx)) { + goto err; + } + + /** + * Note, here we prepend the current record length to the beginning + * of the encrypted data. This is because both sparse and compression + * restore handling want records returned to them with exactly the + * same number of bytes that were processed in the backup handling. + * That is, both are block filters rather than a stream. When doing + * compression, the compression routines may buffer data, so that for + * any one record compressed, when it is decompressed the same size + * will not be obtained. Of course, the buffered data eventually comes + * out in subsequent crypto_cipher_update() calls or at least + * when crypto_cipher_finalize() is called. Unfortunately, this + * "feature" of encryption enormously complicates the restore code. + */ + if (bctx.ff_pkt->flags & FO_ENCRYPT) { + uint32_t initial_len = 0; + ser_declare; + + if ((bctx.ff_pkt->flags & FO_SPARSE) || (bctx.ff_pkt->flags & FO_OFFSETS)) { + bctx.cipher_input_len += OFFSET_FADDR_SIZE; + } + + /** Encrypt the length of the input block */ + uint8_t packet_len[sizeof(uint32_t)]; + + ser_begin(packet_len, sizeof(uint32_t)); + ser_uint32(bctx.cipher_input_len); /* store data len in begin of buffer */ + Dmsg1(20, "Encrypt len=%d\n", bctx.cipher_input_len); + + if (!crypto_cipher_update(bctx.cipher_ctx, packet_len, sizeof(packet_len), + (uint8_t *)jcr->crypto.crypto_buf, &initial_len)) { + /** Encryption failed. Shouldn't happen. */ + Jmsg(jcr, M_FATAL, 0, _("Encryption error\n")); + goto err; + } + + /** Encrypt the input block */ + if (crypto_cipher_update(bctx.cipher_ctx, bctx.cipher_input, bctx.cipher_input_len, + (uint8_t *)&jcr->crypto.crypto_buf[initial_len], &bctx.encrypted_len)) { + if ((initial_len + bctx.encrypted_len) == 0) { + /** No full block of data available, read more data */ + return true; + } + Dmsg2(400, "encrypted len=%d unencrypted len=%d\n", bctx.encrypted_len, + sd->msglen); + sd->msglen = initial_len + bctx.encrypted_len; /* set encrypted length */ + } else { + /** Encryption failed. Shouldn't happen. */ + Jmsg(jcr, M_FATAL, 0, _("Encryption error\n")); + goto err; + } + } + + /* Send the buffer to the Storage daemon */ + if ((bctx.ff_pkt->flags & FO_SPARSE) || (bctx.ff_pkt->flags & FO_OFFSETS)) { + sd->msglen += OFFSET_FADDR_SIZE; /* include fileAddr in size */ + } + sd->msg = bctx.wbuf; /* set correct write buffer */ + if (!sd->send()) { + if (!jcr->is_job_canceled()) { + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), + sd->bstrerror()); + } + goto err; + } + Dmsg1(130, "Send data to SD len=%d\n", sd->msglen); + /* #endif */ + jcr->JobBytes += sd->msglen; /* count bytes saved possibly compressed/encrypted */ + sd->msg = bctx.msgsave; /* restore read buffer */ + return true; + +err: + return false; +} + +bool encode_and_send_attributes(bctx_t &bctx) +{ + BSOCK *sd = bctx.jcr->store_bsock; + JCR *jcr = bctx.jcr; + FF_PKT *ff_pkt = bctx.ff_pkt; + char attribs[MAXSTRING]; + char attribsExBuf[MAXSTRING]; + char *attribsEx = NULL; + int attr_stream; + int comp_len; + bool stat; + int hangup = get_hangup(); + int blowup = get_blowup(); +#ifdef FD_NO_SEND_TEST + return true; +#endif + + Dmsg1(300, "encode_and_send_attrs fname=%s\n", ff_pkt->fname); + /** Find what data stream we will use, then encode the attributes */ + if ((bctx.data_stream = select_data_stream(ff_pkt)) == STREAM_NONE) { + /* This should not happen */ + Jmsg0(jcr, M_FATAL, 0, _("Invalid file flags, no supported data stream type.\n")); + return false; + } + encode_stat(attribs, &ff_pkt->statp, sizeof(ff_pkt->statp), ff_pkt->LinkFI, bctx.data_stream); + + /** Now possibly extend the attributes */ + if (IS_FT_OBJECT(ff_pkt->type)) { + attr_stream = STREAM_RESTORE_OBJECT; + } else { + attribsEx = attribsExBuf; + attr_stream = encode_attribsEx(jcr, attribsEx, ff_pkt); + } + + Dmsg3(300, "File %s\nattribs=%s\nattribsEx=%s\n", ff_pkt->fname, attribs, attribsEx); + + jcr->lock(); + jcr->JobFiles++; /* increment number of files sent */ + ff_pkt->FileIndex = jcr->JobFiles; /* return FileIndex */ + pm_strcpy(jcr->last_fname, ff_pkt->fname); + jcr->unlock(); + + /* Display the information about the current file if requested */ + if (is_message_type_set(jcr, M_SAVED)) { + ATTR attr; + memcpy(&attr.statp, &ff_pkt->statp, sizeof(struct stat)); + attr.type = ff_pkt->type; + attr.ofname = (POOLMEM *)ff_pkt->fname; + attr.olname = (POOLMEM *)ff_pkt->link; + print_ls_output(jcr, &attr, M_SAVED); + } + + /* Debug code: check if we must hangup */ + if (hangup > 0 && (jcr->JobFiles > (uint32_t)hangup)) { + jcr->setJobStatus(JS_Incomplete); + Jmsg1(jcr, M_FATAL, 0, "Debug hangup requested after %d files.\n", hangup); + set_hangup(0); + return false; + } + + if (blowup > 0 && (jcr->JobFiles > (uint32_t)blowup)) { + Jmsg1(jcr, M_ABORT, 0, "Debug blowup requested after %d files.\n", blowup); + return false; + } + + /** + * Send Attributes header to Storage daemon + * + */ + if (!sd->fsend("%ld %d 0", jcr->JobFiles, attr_stream)) { + if (!jcr->is_canceled() && !jcr->is_incomplete()) { + Jmsg2(jcr, M_FATAL, 0, _("Network send error to SD. Data=%s ERR=%s\n"), + sd->msg, sd->bstrerror()); + } + return false; + } + Dmsg1(300, ">stored: attrhdr %s\n", sd->msg); + + /** + * Send file attributes to Storage daemon + * File_index + * File type + * Filename (full path) + * Encoded attributes + * Link name (if type==FT_LNK or FT_LNKSAVED) + * Encoded extended-attributes (for Win32) + * + * or send Restore Object to Storage daemon + * File_index + * File_type + * Object_index + * Object_len (possibly compressed) + * Object_full_len (not compressed) + * Object_compression + * Plugin_name + * Object_name + * Binary Object data + * + * For a directory, link is the same as fname, but with trailing + * slash. For a linked file, link is the link. + */ + if (!IS_FT_OBJECT(ff_pkt->type) && ff_pkt->type != FT_DELETED) { /* already stripped */ + strip_path(ff_pkt); + } + switch (ff_pkt->type) { + case FT_LNK: + case FT_LNKSAVED: + Dmsg3(300, "Link %d %s to %s\n", jcr->JobFiles, ff_pkt->fname, ff_pkt->link); + stat = sd->fsend("%ld %d %s%c%s%c%s%c%s%c%u%c", jcr->JobFiles, + ff_pkt->type, ff_pkt->fname, 0, attribs, 0, + ff_pkt->link, 0, attribsEx, 0, ff_pkt->delta_seq, 0); + break; + case FT_DIREND: + case FT_REPARSE: + case FT_JUNCTION: + /* Here link is the canonical filename (i.e. with trailing slash) */ + stat = sd->fsend("%ld %d %s%c%s%c%c%s%c%u%c", jcr->JobFiles, + ff_pkt->type, ff_pkt->link, 0, attribs, 0, 0, + attribsEx, 0, ff_pkt->delta_seq, 0); + break; + case FT_PLUGIN_CONFIG: + case FT_RESTORE_FIRST: + comp_len = ff_pkt->object_len; + ff_pkt->object_compression = 0; + if (ff_pkt->object_len > 1000) { + /* Big object, compress it */ + comp_len = ff_pkt->object_len + 1000; + POOLMEM *comp_obj = get_memory(comp_len); + /* *** FIXME *** check Zdeflate error */ + Zdeflate(ff_pkt->object, ff_pkt->object_len, comp_obj, comp_len); + if (comp_len < ff_pkt->object_len) { + ff_pkt->object = comp_obj; + ff_pkt->object_compression = 1; /* zlib level 9 compression */ + } else { + /* Uncompressed object smaller, use it */ + comp_len = ff_pkt->object_len; + } + Dmsg2(100, "Object compressed from %d to %d bytes\n", ff_pkt->object_len, comp_len); + } + sd->msglen = Mmsg(sd->msg, "%d %d %d %d %d %d %s%c%s%c", + jcr->JobFiles, ff_pkt->type, ff_pkt->object_index, + comp_len, ff_pkt->object_len, ff_pkt->object_compression, + ff_pkt->fname, 0, ff_pkt->object_name, 0); + sd->msg = check_pool_memory_size(sd->msg, sd->msglen + comp_len + 2); + memcpy(sd->msg + sd->msglen, ff_pkt->object, comp_len); + /* Note we send one extra byte so Dir can store zero after object */ + sd->msglen += comp_len + 1; + stat = sd->send(); + if (ff_pkt->object_compression) { + free_and_null_pool_memory(ff_pkt->object); + } + break; + case FT_REG: + stat = sd->fsend("%ld %d %s%c%s%c%c%s%c%d%c", jcr->JobFiles, + ff_pkt->type, ff_pkt->fname, 0, attribs, 0, 0, attribsEx, 0, + ff_pkt->delta_seq, 0); + break; + default: + stat = sd->fsend("%ld %d %s%c%s%c%c%s%c%u%c", jcr->JobFiles, + ff_pkt->type, ff_pkt->fname, 0, attribs, 0, 0, + attribsEx, 0, ff_pkt->delta_seq, 0); + break; + } + + if (!IS_FT_OBJECT(ff_pkt->type) && ff_pkt->type != FT_DELETED) { + unstrip_path(ff_pkt); + } + + Dmsg2(300, ">stored: attr len=%d: %s\n", sd->msglen, sd->msg); + if (!stat && !jcr->is_job_canceled()) { + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), + sd->bstrerror()); + } + sd->signal(BNET_EOD); /* indicate end of attributes data */ + return stat; +} + +/* + * Setup bctx for doing compression + */ +static bool setup_compression(bctx_t &bctx) +{ + JCR *jcr = bctx.jcr; + +#if defined(HAVE_LIBZ) || defined(HAVE_LZO) + bctx.compress_len = 0; + bctx.max_compress_len = 0; + bctx.cbuf = NULL; + #ifdef HAVE_LIBZ + int zstat; + + if ((bctx.ff_pkt->flags & FO_COMPRESS) && bctx.ff_pkt->Compress_algo == COMPRESS_GZIP) { + if ((bctx.ff_pkt->flags & FO_SPARSE) || (bctx.ff_pkt->flags & FO_OFFSETS)) { + bctx.cbuf = (unsigned char *)jcr->compress_buf + OFFSET_FADDR_SIZE; + bctx.max_compress_len = jcr->compress_buf_size - OFFSET_FADDR_SIZE; + } else { + bctx.cbuf = (unsigned char *)jcr->compress_buf; + bctx.max_compress_len = jcr->compress_buf_size; /* set max length */ + } + bctx.wbuf = jcr->compress_buf; /* compressed output here */ + bctx.cipher_input = (uint8_t *)jcr->compress_buf; /* encrypt compressed data */ + + /** + * Only change zlib parameters if there is no pending operation. + * This should never happen as deflatereset is called after each + * deflate. + */ + + if (((z_stream*)jcr->pZLIB_compress_workset)->total_in == 0) { + /** set gzip compression level - must be done per file */ + if ((zstat=deflateParams((z_stream*)jcr->pZLIB_compress_workset, + bctx.ff_pkt->Compress_level, Z_DEFAULT_STRATEGY)) != Z_OK) { + Jmsg(jcr, M_FATAL, 0, _("Compression deflateParams error: %d\n"), zstat); + jcr->setJobStatus(JS_ErrorTerminated); + return false; + } + } + } + #endif + #ifdef HAVE_LZO + memset(&bctx.ch, 0, sizeof(comp_stream_header)); + bctx.cbuf2 = NULL; + + if ((bctx.ff_pkt->flags & FO_COMPRESS) && bctx.ff_pkt->Compress_algo == COMPRESS_LZO1X) { + if ((bctx.ff_pkt->flags & FO_SPARSE) || (bctx.ff_pkt->flags & FO_OFFSETS)) { + bctx.cbuf = (unsigned char *)jcr->compress_buf + OFFSET_FADDR_SIZE; + bctx.cbuf2 = (unsigned char *)jcr->compress_buf + OFFSET_FADDR_SIZE + sizeof(comp_stream_header); + bctx.max_compress_len = jcr->compress_buf_size - OFFSET_FADDR_SIZE; + } else { + bctx.cbuf = (unsigned char *)jcr->compress_buf; + bctx.cbuf2 = (unsigned char *)jcr->compress_buf + sizeof(comp_stream_header); + bctx.max_compress_len = jcr->compress_buf_size; /* set max length */ + } + bctx.ch.magic = COMPRESS_LZO1X; + bctx.ch.version = COMP_HEAD_VERSION; + bctx.wbuf = jcr->compress_buf; /* compressed output here */ + bctx.cipher_input = (uint8_t *)jcr->compress_buf; /* encrypt compressed data */ + } + #endif +#endif + return true; +} + +/* + * Send MacOS resource fork to SD + */ +#ifdef HAVE_DARWIN_OS +static bool send_resource_fork(bctx_t &bctx) +{ + FF_PKT *ff_pkt = bctx.ff_pkt; + JCR *jcr = bctx.jcr; + BSOCK *sd = bctx.sd; + int stat; + + /** Regular files can have resource forks and Finder Info */ + if (ff_pkt->type != FT_LNKSAVED && (S_ISREG(ff_pkt->statp.st_mode) && + ff_pkt->flags & FO_HFSPLUS)) { + if (ff_pkt->hfsinfo.rsrclength > 0) { + int flags; + int rsrc_stream; + if (bopen_rsrc(&ff_pkt->bfd, ff_pkt->fname, O_RDONLY | O_BINARY, 0) < 0) { + ff_pkt->ff_errno = errno; + berrno be; + Jmsg(jcr, M_NOTSAVED, -1, _(" Cannot open resource fork for \"%s\": ERR=%s.\n"), + ff_pkt->fname, be.bstrerror()); + jcr->JobErrors++; + if (is_bopen(&ff_pkt->bfd)) { + bclose(&ff_pkt->bfd); + } + return true; + } + flags = ff_pkt->flags; + ff_pkt->flags &= ~(FO_COMPRESS|FO_SPARSE|FO_OFFSETS); + if (flags & FO_ENCRYPT) { + rsrc_stream = STREAM_ENCRYPTED_MACOS_FORK_DATA; + } else { + rsrc_stream = STREAM_MACOS_FORK_DATA; + } + stat = send_data(bctx, rsrc_stream); + ff_pkt->flags = flags; + bclose(&ff_pkt->bfd); + if (!stat) { + return false; + } + } + + Dmsg1(300, "Saving Finder Info for \"%s\"\n", ff_pkt->fname); + sd->fsend("%ld %d 0", jcr->JobFiles, STREAM_HFSPLUS_ATTRIBUTES); + Dmsg1(300, "bfiled>stored:header %s\n", sd->msg); + pm_memcpy(sd->msg, ff_pkt->hfsinfo.fndrinfo, 32); + sd->msglen = 32; + if (bctx.digest) { + crypto_digest_update(bctx.digest, (uint8_t *)sd->msg, sd->msglen); + } + if (bctx.signing_digest) { + crypto_digest_update(bctx.signing_digest, (uint8_t *)sd->msg, sd->msglen); + } + sd->send(); + sd->signal(BNET_EOD); + } + return true; +} +#endif + +static bool do_libz_compression(bctx_t &bctx) +{ +#ifdef HAVE_LIBZ + JCR *jcr = bctx.jcr; + BSOCK *sd = bctx.sd; + int zstat; + + /** Do compression if turned on */ + if (bctx.ff_pkt->flags & FO_COMPRESS && bctx.ff_pkt->Compress_algo == COMPRESS_GZIP && jcr->pZLIB_compress_workset) { + Dmsg3(400, "cbuf=0x%x rbuf=0x%x len=%u\n", bctx.cbuf, bctx.rbuf, sd->msglen); + + ((z_stream*)jcr->pZLIB_compress_workset)->next_in = (unsigned char *)bctx.rbuf; + ((z_stream*)jcr->pZLIB_compress_workset)->avail_in = sd->msglen; + ((z_stream*)jcr->pZLIB_compress_workset)->next_out = bctx.cbuf; + ((z_stream*)jcr->pZLIB_compress_workset)->avail_out = bctx.max_compress_len; + + if ((zstat=deflate((z_stream*)jcr->pZLIB_compress_workset, Z_FINISH)) != Z_STREAM_END) { + Jmsg(jcr, M_FATAL, 0, _("Compression deflate error: %d\n"), zstat); + jcr->setJobStatus(JS_ErrorTerminated); + return false; + } + bctx.compress_len = ((z_stream*)jcr->pZLIB_compress_workset)->total_out; + /** reset zlib stream to be able to begin from scratch again */ + if ((zstat=deflateReset((z_stream*)jcr->pZLIB_compress_workset)) != Z_OK) { + Jmsg(jcr, M_FATAL, 0, _("Compression deflateReset error: %d\n"), zstat); + jcr->setJobStatus(JS_ErrorTerminated); + return false; + } + + Dmsg2(400, "GZIP compressed len=%d uncompressed len=%d\n", bctx.compress_len, + sd->msglen); + + sd->msglen = bctx.compress_len; /* set compressed length */ + bctx.cipher_input_len = bctx.compress_len; + } +#endif + return true; +} + +static bool do_lzo_compression(bctx_t &bctx) +{ +#ifdef HAVE_LZO + JCR *jcr = bctx.jcr; + BSOCK *sd = bctx.sd; + int lzores; + + /** Do compression if turned on */ + if (bctx.ff_pkt->flags & FO_COMPRESS && bctx.ff_pkt->Compress_algo == COMPRESS_LZO1X && jcr->LZO_compress_workset) { + lzo_uint len; /* TODO: See with the latest patch how to handle lzo_uint with 64bit */ + + ser_declare; + ser_begin(bctx.cbuf, sizeof(comp_stream_header)); + + Dmsg3(400, "cbuf=0x%x rbuf=0x%x len=%u\n", bctx.cbuf, bctx.rbuf, sd->msglen); + + lzores = lzo1x_1_compress((const unsigned char*)bctx.rbuf, sd->msglen, bctx.cbuf2, + &len, jcr->LZO_compress_workset); + bctx.compress_len = len; + if (lzores == LZO_E_OK && bctx.compress_len <= bctx.max_compress_len) { + /* complete header */ + ser_uint32(COMPRESS_LZO1X); + ser_uint32(bctx.compress_len); + ser_uint16(bctx.ch.level); + ser_uint16(bctx.ch.version); + } else { + /** this should NEVER happen */ + Jmsg(jcr, M_FATAL, 0, _("Compression LZO error: %d\n"), lzores); + jcr->setJobStatus(JS_ErrorTerminated); + return false; + } + + Dmsg2(400, "LZO compressed len=%d uncompressed len=%d\n", bctx.compress_len, + sd->msglen); + + bctx.compress_len += sizeof(comp_stream_header); /* add size of header */ + sd->msglen = bctx.compress_len; /* set compressed length */ + bctx.cipher_input_len = bctx.compress_len; + } +#endif + return true; +} + +/* + * Do in place strip of path + */ +static bool do_snap_strip(FF_PKT *ff) +{ + /* if the string starts with the snapshot path name, we can replace + * by the volume name. The volume_path is smaller than the snapshot_path + * snapshot_path = volume_path + /.snapshots/job-xxxx + */ + ASSERT(strlen(ff->snapshot_path) > strlen(ff->volume_path)); + int sp_first = strlen(ff->snapshot_path); /* point after snapshot_path in fname */ + if (strncmp(ff->fname, ff->snapshot_path, sp_first) == 0) { + int last = pm_strcpy(ff->snap_fname, ff->volume_path); + last = MAX(last - 1, 0); + + if (ff->snap_fname[last] == '/') { + if (ff->fname[sp_first] == '/') { /* compare with the first character of the string (sp_first not sp_first-1) */ + ff->snap_fname[last] = 0; + } + } else { + if (ff->fname[sp_first] != '/') { + pm_strcat(ff->snap_fname, "/"); + } + } + + pm_strcat(ff->snap_fname, ff->fname + sp_first); + ASSERT(strlen(ff->fname) > strlen(ff->snap_fname)); + strcpy(ff->fname, ff->snap_fname); + Dmsg2(DT_SNAPSHOT|20, "%s -> %s\n", ff->fname_save, ff->fname); + } + if (strncmp(ff->link, ff->snapshot_path, sp_first) == 0) { + int last = pm_strcpy(ff->snap_fname, ff->volume_path); + last = MAX(last - 1, 0); + + if (ff->snap_fname[last] == '/') { + if (ff->link[sp_first] == '/') { /* compare with the first character of the string (sp_first not sp_first-1) */ + ff->snap_fname[last] = 0; + } + } else { + if (ff->link[sp_first] != '/') { + pm_strcat(ff->snap_fname, "/"); + } + } + + pm_strcat(ff->snap_fname, ff->link + sp_first); + ASSERT(strlen(ff->link) > strlen(ff->snap_fname)); + strcpy(ff->link, ff->snap_fname); + Dmsg2(DT_SNAPSHOT|20, "%s -> %s\n", ff->link_save, ff->link); + } + + return true; +} + +/* + * Do in place strip of path + */ +static bool do_strip(int count, char *in) +{ + char *out = in; + int stripped; + int numsep = 0; + + /** Copy to first path separator -- Win32 might have c: ... */ + while (*in && !IsPathSeparator(*in)) { + out++; in++; + } + if (*in) { /* Not at the end of the string */ + out++; in++; + numsep++; /* one separator seen */ + } + for (stripped=0; strippedcount=%d\n", + stripped, count, numsep, numsep>count); + return stripped==count && numsep>count; +} + +/** + * If requested strip leading components of the path so that we can + * save file as if it came from a subdirectory. This is most useful + * for dealing with snapshots, by removing the snapshot directory, or + * in handling vendor migrations where files have been restored with + * a vendor product into a subdirectory. + * + * When we are using snapshots, we might need to convert the path + * back to the original one using the strip_snap_path option. + */ +void strip_path(FF_PKT *ff_pkt) +{ + if (!ff_pkt->strip_snap_path && + (!(ff_pkt->flags & FO_STRIPPATH) || ff_pkt->strip_path <= 0)) + { + Dmsg1(200, "No strip for %s\n", ff_pkt->fname); + return; + } + /* shared part between strip and snapshot */ + if (!ff_pkt->fname_save) { + ff_pkt->fname_save = get_pool_memory(PM_FNAME); + ff_pkt->link_save = get_pool_memory(PM_FNAME); + *ff_pkt->link_save = 0; + } + pm_strcpy(ff_pkt->fname_save, ff_pkt->fname); + if (ff_pkt->type != FT_LNK && ff_pkt->fname != ff_pkt->link) { + pm_strcpy(ff_pkt->link_save, ff_pkt->link); + Dmsg2(500, "strcpy link_save=%d link=%d\n", strlen(ff_pkt->link_save), + strlen(ff_pkt->link)); + Dsm_check(200); + } + + if (ff_pkt->strip_snap_path) { + if (!do_snap_strip(ff_pkt)) { + Dmsg1(0, "Something wrong with do_snap_strip(%s)\n", ff_pkt->fname); + unstrip_path(ff_pkt); + goto rtn; + } + } + + /* See if we want also to strip the path */ + if (!(ff_pkt->flags & FO_STRIPPATH) || ff_pkt->strip_path <= 0) { + goto rtn; + } + + /** + * Strip path. If it doesn't succeed put it back. If + * it does, and there is a different link string, + * attempt to strip the link. If it fails, back them + * both back. + * Do not strip symlinks. + * I.e. if either stripping fails don't strip anything. + */ + if (!do_strip(ff_pkt->strip_path, ff_pkt->fname)) { + unstrip_path(ff_pkt); + goto rtn; + } + /** Strip links but not symlinks */ + if (ff_pkt->type != FT_LNK && ff_pkt->fname != ff_pkt->link) { + if (!do_strip(ff_pkt->strip_path, ff_pkt->link)) { + unstrip_path(ff_pkt); + } + } + +rtn: + Dmsg3(10, "fname=%s stripped=%s link=%s\n", ff_pkt->fname_save, ff_pkt->fname, + ff_pkt->link); +} + +void unstrip_path(FF_PKT *ff_pkt) +{ + if (!ff_pkt->strip_snap_path && + (!(ff_pkt->flags & FO_STRIPPATH) || ff_pkt->strip_path <= 0)) + { + return; + } + + strcpy(ff_pkt->fname, ff_pkt->fname_save); + if (ff_pkt->type != FT_LNK && ff_pkt->fname != ff_pkt->link) { + Dmsg2(10, "strcpy link=%s link_save=%s\n", ff_pkt->link, + ff_pkt->link_save); + strcpy(ff_pkt->link, ff_pkt->link_save); + Dmsg2(10, "strcpy link=%d link_save=%d\n", strlen(ff_pkt->link), + strlen(ff_pkt->link_save)); + Dsm_check(200); + } +} + +static void close_vss_backup_session(JCR *jcr) +{ +#if defined(WIN32_VSS) + /* STOP VSS ON WIN32 */ + /* tell vss to close the backup session */ + if (jcr->Snapshot && jcr->pVSSClient) { + if (jcr->pVSSClient->CloseBackup()) { + /* inform user about writer states */ + for (int i=0; i<(int)jcr->pVSSClient->GetWriterCount(); i++) { + int msg_type = M_INFO; + if (jcr->pVSSClient->GetWriterState(i) < 1) { + msg_type = M_WARNING; + jcr->JobErrors++; + } + Jmsg(jcr, msg_type, 0, _("VSS Writer (BackupComplete): %s\n"), + jcr->pVSSClient->GetWriterInfo(i)); + } + } + /* Generate Job global writer metadata */ + WCHAR *metadata = jcr->pVSSClient->GetMetadata(); + if (metadata) { + FF_PKT *ff_pkt = jcr->ff; + ff_pkt->fname = (char *)"*all*"; /* for all plugins */ + ff_pkt->type = FT_RESTORE_FIRST; + ff_pkt->LinkFI = 0; + ff_pkt->object_name = (char *)"job_metadata.xml"; + ff_pkt->object = (char *)metadata; + ff_pkt->object_len = (wcslen(metadata) + 1) * sizeof(WCHAR); + ff_pkt->object_index = (int)time(NULL); + save_file(jcr, ff_pkt, true); + } + } +#endif +} diff --git a/src/filed/backup.h b/src/filed/backup.h new file mode 100644 index 00000000..17188491 --- /dev/null +++ b/src/filed/backup.h @@ -0,0 +1,86 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef __BACKUP_H +#define __BACKUP_H + +#include "ch.h" + +/* + * Define a backup context + */ +struct bctx_t { + /* Global variables */ + JCR *jcr; + FF_PKT *ff_pkt; /* find file packet */ + int data_stream; + BSOCK *sd; + uint64_t fileAddr; + char *rbuf, *wbuf; + int32_t rsize; + POOLMEM *msgsave; + + /* Crypto variables */ + DIGEST *digest; + DIGEST *signing_digest; + int digest_stream; + SIGNATURE *sig; + CIPHER_CONTEXT *cipher_ctx; + const uint8_t *cipher_input; + uint32_t cipher_input_len; + uint32_t cipher_block_size; + uint32_t encrypted_len; + + /* Compression variables */ + /* These are the same as used by libz, but I find it very + * uncomfortable to define variables like this rather than + * specifying a number of bits. Defining them here allows us + * to have code that compiles with and without libz and lzo. + * + * uLong == unsigned long int + * Bytef == unsigned char + */ + unsigned long int max_compress_len; + unsigned long int compress_len; + unsigned char *cbuf; + unsigned char *cbuf2; + +#ifdef HAVE_LZO + comp_stream_header ch; +#endif + +}; + +bool crypto_setup_digests(bctx_t &bctx); +bool crypto_terminate_digests(bctx_t &bctx); +bool crypto_session_start(JCR *jcr); +void crypto_session_end(JCR *jcr); +bool crypto_session_send(JCR *jcr, BSOCK *sd); +bool crypto_allocate_ctx(bctx_t &bctx); +void crypto_free(bctx_t &bctx); + +bool encode_and_send_attributes(bctx_t &bctx); + +bool process_and_send_data(bctx_t &bctx); + +#ifdef HAVE_WIN32 +DWORD WINAPI read_efs_data_cb(PBYTE pbData, PVOID pvCallbackContext, ULONG ulLength); +#endif + +#endif diff --git a/src/filed/bacl.c b/src/filed/bacl.c new file mode 100644 index 00000000..4eb61134 --- /dev/null +++ b/src/filed/bacl.c @@ -0,0 +1,777 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + * + * A specialized class to handle ACL in Bacula Enterprise. + * The runtime consist of two parts: + * 1. OS independent class: BACL + * 2. OS dependent subclass: BACL_* + * + * OS dependent subclasses are available for the following OS: + * - Darwin (OSX) + * - FreeBSD (POSIX and NFSv4/ZFS acls) + * - Linux + * - Solaris (POSIX and NFSv4/ZFS acls) + * + * OS dependent subclasses in progress: + * - AIX (pre-5.3 and post 5.3 acls, acl_get and aclx_get interface) + * - HPUX + * - IRIX + * - Tru64 + * + * OS independent class support AFS acls using the pioctl interface. + * + * ACLs are saved in OS native text format provided by acl(3) API and uses + * different streams for all different platforms. + * Above behavior is a backward compatibility with previous Bacula implementation + * we need to maintain. + * + * During OS specific implementation of BACL you need to implement a following methods: + * + * [bacl] - indicates bacl function/method to call + * [os] - indicates OS specific function, which could be different on specific OS + * (we use a Linux API calls as an example) + * + * ::os_get_acl(JCR *jcr, BACL_type bacltype) + * + * 1. get binary form of the acl - acl_get_file[os] + * 2. check if acl is trivial if required - call acl_issimple[bacl] + * 3. translate binary form into text representation - acl_to_text[os] + * 4. save acl text into content - set_content[bacl] + * 5. if acl not supported on filesystem - call clear_flag(BACL_FLAG_NATIVE)[bacl] + * + * ::os_backup_acl (JCR *jcr, FF_PKT *ff_pkt) + * + * 1. call os_get_acl[bacl] for all supported ACL_TYPES + * 2. call send_acl_stream[bacl] for all supported ACL_STREAMS + * + * ::os_set_acl(JCR *jcr, BACL_type bacltype, char *content, uint32_t length) + * + * 1. prepare acl binary form from text representation stored in content - acl_from_text[os] + * 2. set acl on file - acl_set_file[os] + * 3. if acl not supported on filesystem, clear_flag(BACL_FLAG_NATIVE) + * + * ::os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length) + * + * 1. call os_set_acl for all supported ACL_TYPES + */ + +#include "bacula.h" +#include "filed.h" +#include "fd_plugins.h" + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +/* + * This is a constructor of the base BACL class which is OS independent + * + * - for initialization it uses ::init() + * + */ +BACL::BACL (){ + init(); +}; + +/* + * This is a destructor of the BACL class + */ +BACL::~BACL (){ + free_pool_memory(content); +}; + +/* + * Initialization routine + * - initializes all variables to required status + * - allocates required memory + */ +void BACL::init(){ +#if defined(HAVE_ACL) + acl_ena = TRUE; +#else + acl_ena = FALSE; +#endif + + /* generic variables */ + flags = BACL_FLAG_NONE; + current_dev = 0; + content = get_pool_memory(PM_BSOCK); /* it is better to have a 4k buffer */ + content_len = 0; + acl_nr_errors = 0; + acl_streams = NULL; + default_acl_streams = NULL; +}; + +/* + * Enables ACL handling in runtime, could be disabled with disable_acl + * when ACL is not configured then cannot change status + */ +void BACL::enable_acl(){ +#if defined(HAVE_ACL) + acl_ena = TRUE; +#endif +}; + +/* + * Disables ACL handling in runtime, could be enabled with enable_acl + * when ACL is configured + */ +void BACL::disable_acl(){ + acl_ena = FALSE; +}; + +/* + * Copies a text into a content variable and sets a content_len respectively + * + * in: + * text - a standard null terminated string + * out: + * pointer to content variable to use externally + */ +POOLMEM * BACL::set_content(char *text){ + content_len = pm_strcpy(&content, text); + if (content_len > 0){ + /* count the nul terminated char */ + content_len++; + } + // Dmsg2(400, "BACL::set_content: %p %i\n", text, content_len); + return content; +}; + +/* + * Copies a data with length of len into a content variable + * + * in: + * data - data pointer to copy into content buffer + * out: + * pointer to content variable to use externally + */ +POOLMEM * BACL::set_content(char *data, int len){ + content_len = pm_memcpy(&content, data, len); + return content; +}; + +/* + * Check if we changed the device, + * if so setup a flags + * + * in: + * jcr - Job Control Record + * out: + * bRC_BACL_ok - change of device checked and finish successful + * bRC_BACL_error - encountered error + * bRC_BACL_skip - cannot verify device - no file found + * bRC_BACL_inval - invalid input data + */ +bRC_BACL BACL::check_dev (JCR *jcr){ + + int lst; + struct stat st; + + /* sanity check of input variables */ + if (jcr == NULL || jcr->last_fname == NULL){ + return bRC_BACL_inval; + } + + lst = lstat(jcr->last_fname, &st); + switch (lst){ + case -1: { + berrno be; + switch (errno){ + case ENOENT: + return bRC_BACL_skip; + default: + Mmsg2(jcr->errmsg, _("Unable to stat file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "Unable to stat file \"%s\": ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + break; + } + case 0: + break; + } + + check_dev(jcr, st.st_dev); + + return bRC_BACL_ok; +}; + +/* + * Check if we changed the device, if so setup a flags + * + * in: + * jcr - Job Control Record + * out: + * internal flags status set + */ +void BACL::check_dev (JCR *jcr, uint32_t dev){ + + /* sanity check of input variables */ + if (jcr == NULL || jcr->last_fname == NULL){ + return; + } + + if (current_dev != dev){ + flags = BACL_FLAG_NONE; +#if defined(HAVE_AFS_ACL) + /* handle special fs: AFS */ + if (fstype_equals(jcr->last_fname, "afs")){ + set_flag(BACL_FLAG_AFS); + } else { + set_flag(BACL_FLAG_NATIVE); + } +#else + set_flag(BACL_FLAG_NATIVE); +#endif + current_dev = dev; + } +}; + +/* + * It sends a stream located in this->content to Storage Daemon, so the main Bacula + * backup loop is free from this. It sends a header followed by data. + * + * in: + * jcr - Job Control Record + * stream - a stream number to save + * out: + * bRC_BACL_inval - when supplied variables are incorrect + * bRC_BACL_fatal - when we can't send data to the SD + * bRC_BACL_ok - send finish without errors + */ +bRC_BACL BACL::send_acl_stream(JCR *jcr, int stream){ + + BSOCK * sd; + POOLMEM * msgsave; +#ifdef FD_NO_SEND_TEST + return bRC_BACL_ok; +#endif + + /* sanity check of input variables */ + if (jcr == NULL || jcr->store_bsock == NULL){ + return bRC_BACL_inval; + } + if (content_len <= 0){ + return bRC_BACL_ok; + } + + sd = jcr->store_bsock; + /* send header */ + if (!sd->fsend("%ld %d 0", jcr->JobFiles, stream)){ + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); + return bRC_BACL_fatal; + } + + /* send the buffer to the storage daemon */ + Dmsg1(400, "Backing up ACL: %i\n", content_len); +#if 0 + POOL_MEM tmp(PM_FNAME); + pm_memcpy(tmp, content, content_len); + Dmsg2(400, "Backing up ACL: (%i) <%s>\n", strlen(tmp.addr()), tmp.c_str()); +#endif + msgsave = sd->msg; + sd->msg = content; + sd->msglen = content_len; + if (!sd->send()){ + sd->msg = msgsave; + sd->msglen = 0; + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); + return bRC_BACL_fatal; + } + + jcr->JobBytes += sd->msglen; + sd->msg = msgsave; + if (!sd->signal(BNET_EOD)){ + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); + return bRC_BACL_fatal; + } + + Dmsg1(200, "ACL of file: %s successfully backed up!\n", jcr->last_fname); + return bRC_BACL_ok; +}; + +/* + * The main public backup method for ACL + * + * in: + * jcr - Job Control Record + * ff_pkt - file backup record + * out: + * bRC_BACL_fatal - when ACL backup is not compiled in Bacula + * bRC_BACL_ok - backup finish without problems + * bRC_BACL_error - when you can't backup acl data because some error + */ +bRC_BACL BACL::backup_acl (JCR *jcr, FF_PKT *ff_pkt) +{ +#if !defined(HAVE_ACL) && !defined(HAVE_AFS_ACL) + Jmsg(jcr, M_FATAL, 0, "ACL backup requested but not configured in Bacula.\n"); + return bRC_BACL_fatal; +#else + /* sanity check of input variables and verify if engine is enabled */ + if (acl_ena && jcr != NULL && ff_pkt != NULL){ + /* acl engine enabled, proceed */ + bRC_BACL rc; + + jcr->errmsg[0] = 0; + /* check if we have a plugin generated backup */ + if (ff_pkt->cmd_plugin){ + rc = backup_plugin_acl(jcr, ff_pkt); + } else { + /* Check for aclsupport flag and no acl request for link */ + if (!(ff_pkt->flags & FO_ACL && ff_pkt->type != FT_LNK)){ + return bRC_BACL_ok; + } + + check_dev(jcr, ff_pkt->statp.st_dev); + +#if defined(HAVE_AFS_ACL) + if (flags & BACL_FLAG_AFS){ + Dmsg0(400, "make AFS ACL call\n"); + rc = afs_backup_acl(jcr, ff_pkt); + goto bail_out; + } +#endif + +#if defined(HAVE_ACL) + if (flags & BACL_FLAG_NATIVE){ + Dmsg0(400, "make Native ACL call\n"); + rc = os_backup_acl(jcr, ff_pkt); + } else { + /* skip acl backup */ + return bRC_BACL_ok; + } +#endif + } +#if defined(HAVE_AFS_ACL) + bail_out: +#endif + if (rc == bRC_BACL_error){ + if (acl_nr_errors < ACL_MAX_ERROR_PRINT_PER_JOB){ + if (!jcr->errmsg[0]){ + Jmsg(jcr, M_WARNING, 0, "No OS ACL configured.\n"); + } else { + Jmsg(jcr, M_WARNING, 0, "%s", jcr->errmsg); + } + inc_acl_errors(); + } + return bRC_BACL_ok; + } + return rc; + } + return bRC_BACL_ok; +#endif +}; + +/* + * The main public restore method for ACL + * + * in: + * jcr - Job Control Record + * stream - a backup stream type number to restore_acl + * data - a pointer to the data stream to restore + * length - a data stream length + * out: + * bRC_BACL_fatal - when ACL restore is not compiled in Bacula + * bRC_BACL_ok - restore finish without problems + * bRC_BACL_error - when you can't restore a stream because some error + */ +bRC_BACL BACL::restore_acl (JCR *jcr, int stream, char *data, uint32_t length) +{ +#if !defined(HAVE_ACL) && !defined(HAVE_AFS_ACL) + Jmsg(jcr, M_FATAL, 0, "ACL restore requested but not configured in Bacula.\n"); + return bRC_BACL_fatal; +#else + /* sanity check of input variables and verify if engine is enabled */ + if (acl_ena && jcr != NULL && data != NULL){ + /* acl engine enabled, proceed */ + int a; + bRC_BACL rc; + + /* check_dev supported on real fs only */ + if (stream != STREAM_XACL_PLUGIN_ACL){ + rc = check_dev(jcr); + + switch (rc){ + case bRC_BACL_skip: + return bRC_BACL_ok; + case bRC_BACL_ok: + break; + default: + return rc; + } + } + + /* copy a data into a content buffer */ + set_content(data, length); + + switch (stream){ +#if defined(HAVE_AFS_ACL) + case STREAM_BACL_AFS_TEXT: + if (flags & BACL_FLAG_AFS){ + return afs_restore_acl(jcr, stream); + } else { + /* + * Increment error count but don't log an error again for the same filesystem. + */ + inc_acl_errors(); + return bRC_BACL_ok; + } +#endif +#if defined(HAVE_ACL) + case STREAM_UNIX_ACCESS_ACL: + case STREAM_UNIX_DEFAULT_ACL: + if (flags & BACL_FLAG_NATIVE){ + return os_restore_acl(jcr, stream, content, content_len); + } else { + inc_acl_errors(); + return bRC_BACL_ok; + } + break; + case STREAM_XACL_PLUGIN_ACL: + return restore_plugin_acl(jcr); + default: + if (flags & BACL_FLAG_NATIVE){ + Dmsg0(400, "make Native ACL call\n"); + for (a = 0; acl_streams[a] > 0; a++){ + if (acl_streams[a] == stream){ + return os_restore_acl(jcr, stream, content, content_len); + } + } + for (a = 0; default_acl_streams[a] > 0; a++){ + if (default_acl_streams[a] == stream){ + return os_restore_acl(jcr, stream, content, content_len); + } + } + } else { + inc_acl_errors(); + return bRC_BACL_ok; + } + break; +#else + default: + break; +#endif + } + /* cannot find a valid stream to support */ + Qmsg2(jcr, M_WARNING, 0, _("Can't restore ACLs of %s - incompatible acl stream encountered - %d\n"), jcr->last_fname, stream); + return bRC_BACL_error; + } + return bRC_BACL_ok; +#endif +}; + +/* + * Performs a generic ACL backup using OS specific methods for + * getting acl data from file + * + * in: + * jcr - Job Control Record + * ff_pkt - file to backup control package + * out: + * bRC_BACL_ok - backup of acl's was successful + * bRC_BACL_fatal - was an error during acl backup + */ +bRC_BACL BACL::generic_backup_acl (JCR *jcr, FF_PKT *ff_pkt) +{ + /* sanity check of input variables */ + if (jcr == NULL || ff_pkt == NULL){ + return bRC_BACL_inval; + } + + if (os_get_acl(jcr, BACL_TYPE_ACCESS) == bRC_BACL_fatal){ + /* XXX: check if os_get_acl return fatal and decide what to do when error is returned */ + return bRC_BACL_fatal; + } + + if (content_len > 0){ + if (send_acl_stream(jcr, acl_streams[0]) == bRC_BACL_fatal){ + return bRC_BACL_fatal; + } + } + + if (ff_pkt->type == FT_DIREND){ + if (os_get_acl(jcr, BACL_TYPE_DEFAULT) == bRC_BACL_fatal){ + return bRC_BACL_fatal; + } + if (content_len > 0){ + if (send_acl_stream(jcr, default_acl_streams[0]) == bRC_BACL_fatal){ + return bRC_BACL_fatal; + } + } + } + return bRC_BACL_ok; +}; + +/* + * Performs a generic ACL restore using OS specific methods for + * setting acl data on file. + * + * in: + * jcr - Job Control Record + * stream - a stream number to restore + * out: + * bRC_BACL_ok - restore of acl's was successful + * bRC_BACL_error - was an error during acl restore + * bRC_BACL_fatal - was a fatal error during acl restore or input data + * is invalid + */ +bRC_BACL BACL::generic_restore_acl (JCR *jcr, int stream){ + + unsigned int count; + + /* sanity check of input variables */ + if (jcr == NULL){ + return bRC_BACL_inval; + } + + switch (stream){ + case STREAM_UNIX_ACCESS_ACL: + return os_set_acl(jcr, BACL_TYPE_ACCESS, content, content_len); + case STREAM_UNIX_DEFAULT_ACL: + return os_set_acl(jcr, BACL_TYPE_DEFAULT, content, content_len); + default: + for (count = 0; acl_streams[count] > 0; count++){ + if (acl_streams[count] == stream){ + return os_set_acl(jcr, BACL_TYPE_ACCESS, content, content_len); + } + } + for (count = 0; default_acl_streams[count] > 0; count++){ + if (default_acl_streams[count] == stream){ + return os_set_acl(jcr, BACL_TYPE_DEFAULT, content, content_len); + } + } + break; + } + return bRC_BACL_error; +}; + +/* + * Perform a generic ACL backup using a plugin. It calls the plugin API to + * get required acl data from plugin. + * + * in: + * jcr - Job Control Record + * ff_pkt - file to backup control package + * out: + * bRC_BACL_ok - backup of acls was successful + * bRC_BACL_fatal - was an error during acl backup + */ +bRC_BACL BACL::backup_plugin_acl (JCR *jcr, FF_PKT *ff_pkt) +{ + int status; + char *data; + + /* sanity check of input variables */ + if (jcr == NULL || ff_pkt == NULL){ + return bRC_BACL_inval; + } + + while ((status = plugin_backup_acl(jcr, ff_pkt, &data)) > 0){ + /* data is a plugin buffer which contains data to backup + * and status is a length of the buffer when > 0 */ + set_content(data, status); + if (send_acl_stream(jcr, STREAM_XACL_PLUGIN_ACL) == bRC_BACL_fatal){ + return bRC_BACL_fatal; + } + } + if (status < 0){ + /* error */ + return bRC_BACL_error; + } + + return bRC_BACL_ok; +}; + +/* + * Perform a generic ACL restore using a plugin. It calls the plugin API to + * send acl data to plugin. + * + * in: + * jcr - Job Control Record + * stream - a stream number to restore + * out: + * bRC_BACL_ok - restore of acls was successful + * bRC_BACL_error - was an error during acls restore + * bRC_BACL_fatal - was a fatal error during acl restore or input data + * is invalid + */ +bRC_BACL BACL::restore_plugin_acl (JCR *jcr) +{ + /* sanity check of input variables */ + if (jcr == NULL){ + return bRC_BACL_inval; + } + + if (!plugin_restore_acl(jcr, content, content_len)){ + /* error */ + return bRC_BACL_error; + } + + return bRC_BACL_ok; +} + +/* + * Initialize variables acl_streams and default_acl_streams for a specified OS. + * The rutine should be called from object instance constructor + * + * in: + * pacl - acl streams supported for specific OS + * pacl_def - default (directory) acl streams supported for specific OS + */ +void BACL::set_acl_streams (const int *pacl, const int *pacl_def){ + + acl_streams = pacl; + default_acl_streams = pacl_def; +}; + +#if defined(HAVE_AFS_ACL) +#if defined(HAVE_AFS_AFSINT_H) && defined(HAVE_AFS_VENUS_H) +#include +#include +#else +#error "configure failed to detect availability of afs/afsint.h and/or afs/venus.h" +#endif + +/* + * External references to functions in the libsys library function not in current include files. + */ +extern "C" { +long pioctl(char *pathp, long opcode, struct ViceIoctl *blobp, int follow); +} + +/* + * Backup ACL data of AFS + * + * in: + * jcr - Job Control Record + * ff_pkt - file backup record + * out: + * bRC_BACL_inval - input variables are invalid (NULL) + * bRC_BACL_ok - backup finish without problems + * bRC_BACL_error - when you can't backup acl data because some error + */ +bRC_BACL BACL::afs_backup_acl (JCR *jcr, FF_PKT *ff_pkt){ + + int rc; + struct ViceIoctl vip; + char data[BUFSIZ]; + + /* sanity check of input variables */ + if (jcr == NULL || ff_pkt == NULL){ + return bRC_BACL_inval; + } + + /* AFS ACLs can only be set on a directory, so no need to try other files */ + if (ff_pkt->type != FT_DIREND){ + return bRC_BACL_ok; + } + + vip.in = NULL; + vip.in_size = 0; + vip.out = data; + vip.out_size = BUFSIZE; + memset(data, 0, BUFSIZE); + + if ((rc = pioctl(jcr->last_fname, VIOCGETAL, &vip, 0)) < 0){ + berrno be; + + Mmsg2(jcr->errmsg, _("pioctl VIOCGETAL error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "pioctl VIOCGETAL error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + set_content(data); + return send_acl_stream(jcr, STREAM_BACL_AFS_TEXT); +}; + +/* + * Restore ACL data of AFS + * in: + * jcr - Job Control Record + * stream - a backup stream type number to restore_acl + * out: + * bRC_BACL_inval - input variables are invalid (NULL) + * bRC_BACL_ok - backup finish without problems + * bRC_BACL_error - when you can't backup acl data because some error + */ +bRC_BACL BACL::afs_restore_acl (JCR *jcr, int stream){ + + int rc; + struct ViceIoctl vip; + + /* sanity check of input variables */ + if (jcr == NULL || ff_pkt == NULL){ + return bRC_BACL_inval; + } + + vip.in = content; + vip.in_size = content_len; + vip.out = NULL; + vip.out_size = 0; + + if ((rc = pioctl(jcr->last_fname, VIOCSETAL, &vip, 0)) < 0){ + berrno be; + + Mmsg2(jcr->errmsg, _("pioctl VIOCSETAL error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "pioctl VIOCSETAL error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + + return bRC_BACL_error; + } + return bRC_BACL_ok; +}; +#endif /* HAVE_AFS_ACL */ + +#include "bacl_osx.h" +#include "bacl_linux.h" +#include "bacl_freebsd.h" +#include "bacl_solaris.h" +// #include "bacl_aix.h" + +/* + * Creating the correct instance of the BACL for a supported OS + */ +void *new_bacl() +{ +#if defined(HAVE_DARWIN_OS) + return new BACL_OSX(); +#elif defined(HAVE_LINUX_OS) + return new BACL_Linux(); +#elif defined(HAVE_FREEBSD_OS) + return new BACL_FreeBSD(); +#elif defined(HAVE_HURD_OS) + return new BACL_Hurd(); +#elif defined(HAVE_AIX_OS) + return new BACL_AIX(); +#elif defined(HAVE_IRIX_OS) + return new BACL_IRIX(); +#elif defined(HAVE_OSF1_OS) + return new BACL_OSF1(); +#elif defined(HAVE_SUN_OS) + return new BACL_Solaris(); +#else + return NULL; +#endif +}; + +#endif /* HAVE_ACL */ diff --git a/src/filed/bacl.h b/src/filed/bacl.h new file mode 100644 index 00000000..5ae448ac --- /dev/null +++ b/src/filed/bacl.h @@ -0,0 +1,195 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BACL_H_ +#define __BACL_H_ + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +/* + * Return value status enumeration + * You have an error when value is less then zero. + * You have a positive status when value is not negative + * (greater or equal to zero). + */ +enum bRC_BACL { + bRC_BACL_inval = -3, // input data invalid + bRC_BACL_fatal = -2, // a fatal error + bRC_BACL_error = -1, // standard error + bRC_BACL_ok = 0, // success + bRC_BACL_skip = 1, // processing should skip current runtime + bRC_BACL_cont = 2 // processing should skip current element + // and continue with next one +}; + +/* + * We support the following types of ACLs + */ +typedef enum { + BACL_TYPE_NONE = 0, + BACL_TYPE_ACCESS = 1, + BACL_TYPE_DEFAULT = 2, + BACL_TYPE_DEFAULT_DIR = 3, + BACL_TYPE_EXTENDED = 4, + BACL_TYPE_NFS4 = 5, + BACL_TYPE_PLUGIN = 6 +} BACL_type; + +/* + * Flags which control what ACL engine to use for backup/restore + */ +#define BACL_FLAG_NONE 0 +#define BACL_FLAG_NATIVE 0x01 +#define BACL_FLAG_AFS 0x02 +#define BACL_FLAG_PLUGIN 0x04 + +/* + * Ensure we have none + */ +#ifndef ACL_TYPE_NONE +#define ACL_TYPE_NONE 0x0 +#endif + +/* + * Basic ACL class which is a foundation for any other OS specific implementation. + * + * This class cannot be used directly as it is an abstraction class with a lot + * of virtual methods laying around. As a basic class it has all public API + * available for backup and restore functionality. As a bonus it handles all + * ACL generic functions and OS independent API, i.e. for AFS ACL or Plugins ACL + * (future functionality). + */ +class BACL { +private: + bool acl_ena; + uint32_t flags; + uint32_t current_dev; + POOLMEM *content; + uint32_t content_len; + uint32_t acl_nr_errors; + const int *acl_streams; + const int *default_acl_streams; + const char **xattr_skiplist; + const char **xattr_acl_skiplist; + + void init(); + + /** + * Perform OS specific ACL backup. + * in: + * jcr - Job Control Record + * ff_pkt - file to backup information rector + * out: + * bRC_BACL_ok - backup performed without problems + * any other - some error occurred + */ + virtual bRC_BACL os_backup_acl (JCR *jcr, FF_PKT *ff_pkt){return bRC_BACL_fatal;}; + + /** + * Perform OS specific ACL restore. Runtime is called only when stream is supported by OS. + * in: + * jcr - Job Control Record + * ff_pkt - file to backup information rector + * out: + * bRC_BACL_ok - backup performed without problems + * any other - some error occurred + */ + virtual bRC_BACL os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length){return bRC_BACL_fatal;}; + + /** + * Low level OS specific runtime to get ACL data from file. The ACL data is set in internal content buffer. + * + * in: + * jcr - Job Control Record + * bacltype - the acl type to restore + * out: + * bRC_BACL_ok - + * bRC_BACL_error/fatal - an error or fatal error occurred + */ + virtual bRC_BACL os_get_acl (JCR *jcr, BACL_type bacltype){return bRC_BACL_fatal;}; + + /** + * Low level OS specific runtime to set ACL data on file. + * + * in: + * jcr - Job Control Record + * bacltype - the acl type to restore + * content - a buffer with data to restore + * length - a data restore length + * out: + * bRC_BACL_ok - + * bRC_BACL_error/fatal - an error or fatal error occurred + */ + virtual bRC_BACL os_set_acl (JCR *jcr, BACL_type bacltype, char *content, uint32_t length){return bRC_BACL_fatal;}; + + void inc_acl_errors(){ acl_nr_errors++;}; + bRC_BACL check_dev (JCR *jcr); + void check_dev (JCR *jcr, uint32_t dev); + +public: + BACL (); + virtual ~BACL(); + + /* enable/disable functionality */ + void enable_acl(); + void disable_acl(); + + /* + * public methods used outside the class or derivatives + */ + bRC_BACL backup_acl (JCR *jcr, FF_PKT *ff_pkt); + bRC_BACL restore_acl (JCR *jcr, int stream, char *content, uint32_t content_length); + + /* utility functions */ + inline uint32_t get_acl_nr_errors(){ return acl_nr_errors;}; + void set_acl_streams (const int *pacl, const int *pacl_def); + inline void clear_flag (uint32_t flag){ flags &= ~flag;}; + inline void set_flag (uint32_t flag){ flags |= flag;}; + POOLMEM * set_content (char *text); + POOLMEM * set_content(char *data, int len); + inline POOLMEM * get_content (void){ return content;}; + inline uint32_t get_content_size (void){ return sizeof_pool_memory(content);}; + inline uint32_t get_content_len (void){ return content_len;}; + + /* sending data to the storage */ + bRC_BACL send_acl_stream (JCR *jcr, int stream); + + /* generic functions */ + bRC_BACL generic_backup_acl (JCR *jcr, FF_PKT *ff_pkt); + bRC_BACL generic_restore_acl (JCR *jcr, int stream); + bRC_BACL afs_backup_acl (JCR *jcr, FF_PKT *ff_pkt); + bRC_BACL afs_restore_acl (JCR *jcr, int stream); + bRC_BACL backup_plugin_acl (JCR *jcr, FF_PKT *ff_pkt); + bRC_BACL restore_plugin_acl (JCR *jcr); +}; + +void *new_bacl(); + +#endif /* HAVE_ACL */ + +#endif /* __BACL_H_ */ diff --git a/src/filed/bacl_freebsd.c b/src/filed/bacl_freebsd.c new file mode 100644 index 00000000..247248ca --- /dev/null +++ b/src/filed/bacl_freebsd.c @@ -0,0 +1,533 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#include "bacula.h" +#include "filed.h" +#include "bacl_freebsd.h" + +#if defined(HAVE_FREEBSD_OS) + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +/* + * Define the supported ACL streams for this OS + */ +static const int os_acl_streams[] = { + STREAM_XACL_FREEBSD_ACCESS, + STREAM_XACL_FREEBSD_NFS4, + 0 +}; + +static const int os_default_acl_streams[] = { + STREAM_XACL_FREEBSD_DEFAULT, + 0 +}; + +/* + * OS specific constructor + */ +BACL_FreeBSD::BACL_FreeBSD(){ + + set_acl_streams(os_acl_streams, os_default_acl_streams); +}; + +/* + * Translates Bacula internal acl representation into acl type + * + * in: + * bacltype - internal Bacula acl type (BACL_type) + * out: + * acl_type_t - os dependent acl type + * when failed - ACL_TYPE_NONE is returned + */ +acl_type_t BACL_FreeBSD::get_acltype(BACL_type bacltype){ + + acl_type_t acltype; + + switch (bacltype){ +#ifdef HAVE_ACL_TYPE_NFS4 + case BACL_TYPE_NFS4: + acltype = ACL_TYPE_NFS4; + break; +#endif + case BACL_TYPE_ACCESS: + acltype = ACL_TYPE_ACCESS; + break; + case BACL_TYPE_DEFAULT: + acltype = ACL_TYPE_DEFAULT; + break; + default: + /* + * sanity check for acl's not supported by OS + */ + acltype = (acl_type_t)ACL_TYPE_NONE; + break; + } + return acltype; +}; + +/* + * Counts a number of acl entries + * + * in: + * acl - acl object + * out: + * int - number of entries in acl object + * when no acl entry available or any error then return zero '0' + */ +int BACL_FreeBSD::acl_nrentries(acl_t acl){ + + int nr = 0; + acl_entry_t aclentry; + int rc; + + rc = acl_get_entry(acl, ACL_FIRST_ENTRY, &aclentry); + while (rc == 1){ + nr++; + rc = acl_get_entry(acl, ACL_NEXT_ENTRY, &aclentry); + } + + return nr; +}; + +/* + * Checks if acl is simple. + * + * acl is simple if it has only the following entries: + * "user::", + * "group::", + * "other::" + * + * in: + * acl - acl object + * out: + * true - when acl object is simple + * false - when acl object is not simple + */ +bool BACL_FreeBSD::acl_issimple(acl_t acl){ + + acl_entry_t aclentry; + acl_tag_t acltag; + int rc; + + rc = acl_get_entry(acl, ACL_FIRST_ENTRY, &aclentry); + while (rc == 1){ + if (acl_get_tag_type(aclentry, &acltag) < 0){ + return true; + } + /* + * Check for ACL_USER_OBJ, ACL_GROUP_OBJ or ACL_OTHER to find out. + */ + if (acltag != ACL_USER_OBJ && + acltag != ACL_GROUP_OBJ && + acltag != ACL_OTHER){ + return false; + } + rc = acl_get_entry(acl, ACL_NEXT_ENTRY, &aclentry); + } + return true; +}; + +/* + * Checks if ACL's are available for a specified file + * + * in: + * jcr - Job Control Record + * name - specifies the system variable to be queried + * out: + * bRC_BACL_ok - check successful, lets setup bacltype variable + * bRC_BACL_error - in case of error + * bRC_BACL_skip - you should skip all other routine + * bRC_BACL_cont - you need to continue your routine + */ +bRC_BACL BACL_FreeBSD::check_bacltype (JCR *jcr, int name){ + + int aclrc = 0; + + aclrc = pathconf(jcr->last_fname, name); + switch (aclrc){ + case -1: { + /* some error check why */ + berrno be; + if (errno == ENOENT){ + /* file does not exist skip it */ + return bRC_BACL_skip; + } else { + Mmsg2(jcr->errmsg, _("pathconf error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "pathconf error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + } + case 0: + /* continue the routine */ + return bRC_BACL_cont; + default: + break; + } + return bRC_BACL_ok; +}; + +/* + * Perform OS specific ACL backup + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_FreeBSD::os_backup_acl (JCR *jcr, FF_PKT *ff_pkt){ + + bRC_BACL rc; + BACL_type bacltype = BACL_TYPE_NONE; + +#if defined(_PC_ACL_NFS4) + /* + * Check if filesystem supports NFS4 acls. + */ + rc = check_bacltype(jcr, _PC_ACL_NFS4); + switch (rc){ + case bRC_BACL_ok: + bacltype = BACL_TYPE_NFS4; + break; + case bRC_BACL_skip: + return bRC_BACL_ok; + case bRC_BACL_cont: + break; + default: + /* errors */ + return rc; + } +#endif + if (bacltype == BACL_TYPE_NONE){ + /* + * Check if filesystem supports POSIX acls. + */ + rc = check_bacltype(jcr, _PC_ACL_EXTENDED); + switch (rc){ + case bRC_BACL_ok: + bacltype = BACL_TYPE_ACCESS; + break; + case bRC_BACL_skip: + return bRC_BACL_ok; + case bRC_BACL_cont: + break; + default: + /* errors */ + return rc; + } + } + + /* no ACL's available for file, so skip this filesystem */ + if (bacltype == BACL_TYPE_NONE){ + clear_flag(BACL_FLAG_NATIVE); + /* + * it is a bit of hardcore to clear a poolmemory with a NULL pointer, + * but it is working, hehe :) + * you may ask why it is working? it is simple, a pm_strcpy function is handling + * a null pointer with a substitiution of empty string. + */ + set_content(NULL); + return bRC_BACL_ok; + } + + switch (bacltype){ + case BACL_TYPE_NFS4: + /* + * Read NFS4 ACLs + */ + if (os_get_acl(jcr, BACL_TYPE_NFS4) == bRC_BACL_fatal) + return bRC_BACL_fatal; + + if (get_content_len() > 0){ + if (send_acl_stream(jcr, STREAM_XACL_FREEBSD_NFS4) == bRC_BACL_fatal) + return bRC_BACL_fatal; + } + break; + case BACL_TYPE_ACCESS: + /* + * Read access ACLs + */ + if (os_get_acl(jcr, BACL_TYPE_ACCESS) == bRC_BACL_fatal) + return bRC_BACL_fatal; + + if (get_content_len() > 0){ + if (send_acl_stream(jcr, STREAM_XACL_FREEBSD_ACCESS) == bRC_BACL_fatal) + return bRC_BACL_fatal; + } + + /* + * Directories can have default ACLs too + */ + if (ff_pkt->type == FT_DIREND){ + if (os_get_acl(jcr, BACL_TYPE_DEFAULT) == bRC_BACL_fatal) + return bRC_BACL_fatal; + if (get_content_len() > 0){ + if (send_acl_stream(jcr, STREAM_XACL_FREEBSD_DEFAULT) == bRC_BACL_fatal) + return bRC_BACL_fatal; + } + } + break; + default: + break; + } + + return bRC_BACL_ok; +}; + +/* + * Perform OS specific ACL restore + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_FreeBSD::os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length){ + + int aclrc = 0; + const char *acl_type_name; + + switch (stream){ + case STREAM_UNIX_ACCESS_ACL: + case STREAM_XACL_FREEBSD_ACCESS: + case STREAM_UNIX_DEFAULT_ACL: + case STREAM_XACL_FREEBSD_DEFAULT: + aclrc = pathconf(jcr->last_fname, _PC_ACL_EXTENDED); + acl_type_name = "POSIX"; + break; + case STREAM_XACL_FREEBSD_NFS4: +#if defined(_PC_ACL_NFS4) + aclrc = pathconf(jcr->last_fname, _PC_ACL_NFS4); +#endif + acl_type_name = "NFS4"; + break; + default: + acl_type_name = "unknown"; + break; + } + + switch (aclrc){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + return bRC_BACL_ok; + default: + Mmsg2(jcr->errmsg, _("pathconf error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "pathconf error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + } + case 0: + clear_flag(BACL_FLAG_NATIVE); + Mmsg2(jcr->errmsg, _("Trying to restore acl on file \"%s\" on filesystem without %s acl support\n"), jcr->last_fname, acl_type_name); + return bRC_BACL_error; + default: + break; + } + + switch (stream){ + case STREAM_UNIX_ACCESS_ACL: + case STREAM_XACL_FREEBSD_ACCESS: + return os_set_acl(jcr, BACL_TYPE_ACCESS, content, length); + case STREAM_UNIX_DEFAULT_ACL: + case STREAM_XACL_FREEBSD_DEFAULT: + return os_set_acl(jcr, BACL_TYPE_DEFAULT, content, length); + case STREAM_XACL_FREEBSD_NFS4: + return os_set_acl(jcr, BACL_TYPE_NFS4, content, length); + default: + break; + } + return bRC_BACL_error; +}; + +/* + * Low level OS specific runtime to get ACL data from file. + * The ACL data is set in internal content buffer + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_FreeBSD::os_get_acl(JCR *jcr, BACL_type bacltype){ + + acl_t acl; + acl_type_t acltype; + char *acltext; + bRC_BACL rc = bRC_BACL_ok; + + acltype = get_acltype(bacltype); + acl = acl_get_file(jcr->last_fname, acltype); + + if (acl){ + Dmsg1(400, "OS_ACL read from file: %s\n",jcr->last_fname); + if (acl_nrentries(acl) == 0){ + goto bail_out; + } + + /* check for simple ACL which correspond to standard permissions only */ + if (bacltype == BACL_TYPE_ACCESS && acl_issimple(acl)){ + goto bail_out; + } + +#if defined(_PC_ACL_NFS4) + if (bacltype == BACL_TYPE_NFS4){ + int trivial; + if (acl_is_trivial_np(acl, &trivial) == 0){ + if (trivial == 1){ + goto bail_out; + } + } + } +#endif + + if ((acltext = acl_to_text(acl, NULL)) != NULL){ + set_content(acltext); + acl_free(acl); + acl_free(acltext); + return bRC_BACL_ok; + } + + berrno be; + + Mmsg2(jcr->errmsg, _("acl_to_text error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "acl_to_text error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + + rc = bRC_BACL_error; + + } else { + berrno be; + + switch (errno){ + case EOPNOTSUPP: + /* fs does not support acl, skip it */ + Dmsg0(400, "Wow, ACL is not supported on this filesystem\n"); + clear_flag(BACL_FLAG_NATIVE); + break; + case ENOENT: + break; + default: + /* Some real error */ + Mmsg2(jcr->errmsg, _("acl_get_file error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "acl_get_file error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + rc = bRC_BACL_error; + break; + } + } + +bail_out: + if (acl){ + acl_free(acl); + } + /* + * it is a bit of hardcore to clear a pool memory with a NULL pointer, + * but it is working, hehe :) + * you may ask why it is working? it is simple, a pm_strcpy function is handling + * a null pointer with a substitution of empty string. + */ + set_content(NULL); + return rc; +}; + +/* + * Low level OS specific runtime to set ACL data on file + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_FreeBSD::os_set_acl(JCR *jcr, BACL_type bacltype, char *content, uint32_t length){ + + acl_t acl; + acl_type_t acltype; + + acltype = get_acltype(bacltype); + if (acltype == ACL_TYPE_DEFAULT && length == 0){ + /* delete ACl from file when no acl data available for default acl's */ + if (acl_delete_def_file(jcr->last_fname) == 0){ + return bRC_BACL_ok; + } + + berrno be; + switch (errno){ + case ENOENT: + return bRC_BACL_ok; + case ENOTSUP: + /* + * If the filesystem reports it doesn't support acl's we clear the + * BACL_FLAG_NATIVE flag so we skip ACL restores on all other files + * on the same filesystem. The BACL_FLAG_NATIVE flag gets set again + * when we change from one filesystem to an other. + */ + clear_flag(BACL_FLAG_NATIVE); + Mmsg(jcr->errmsg, _("acl_delete_def_file error on file \"%s\": filesystem doesn't support ACLs\n"), jcr->last_fname); + return bRC_BACL_error; + default: + Mmsg2(jcr->errmsg, _("acl_delete_def_file error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + } + + acl = acl_from_text(content); + if (acl == NULL){ + berrno be; + + Mmsg2(jcr->errmsg, _("acl_from_text error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "acl_from_text error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + + /* + * Restore the ACLs, but don't complain about links which really should + * not have attributes, and the file it is linked to may not yet be restored. + * This is only true for the old acl streams as in the new implementation we + * don't save acls of symlinks (which cannot have acls anyhow) + */ + if (acl_set_file(jcr->last_fname, acltype, acl) != 0 && jcr->last_type != FT_LNK){ + berrno be; + switch (errno){ + case ENOENT: + acl_free(acl); + return bRC_BACL_ok; + case ENOTSUP: + /* + * If the filesystem reports it doesn't support ACLs we clear the + * BACL_FLAG_NATIVE flag so we skip ACL restores on all other files + * on the same filesystem. The BACL_FLAG_NATIVE flag gets set again + * when we change from one filesystem to an other. + */ + clear_flag(BACL_FLAG_NATIVE); + Mmsg(jcr->errmsg, _("acl_set_file error on file \"%s\": filesystem doesn't support ACLs\n"), jcr->last_fname); + Dmsg2(100, "acl_set_file error acl=%s file=%s filesystem doesn't support ACLs\n", content, jcr->last_fname); + acl_free(acl); + return bRC_BACL_error; + default: + Mmsg2(jcr->errmsg, _("acl_set_file error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "acl_set_file error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, be.bstrerror()); + acl_free(acl); + return bRC_BACL_error; + } + } + acl_free(acl); + return bRC_BACL_ok; +}; + +#endif /* HAVE_ACL */ + +#endif /* HAVE_FREEBSD_OS */ diff --git a/src/filed/bacl_freebsd.h b/src/filed/bacl_freebsd.h new file mode 100644 index 00000000..1800dd15 --- /dev/null +++ b/src/filed/bacl_freebsd.h @@ -0,0 +1,70 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BACL_FreeBSD_H_ +#define __BACL_FreeBSD_H_ + +#if defined(HAVE_FREEBSD_OS) +#include + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +#ifdef HAVE_SYS_ACL_H +#include +#else +#error "configure failed to detect availability of sys/acl.h" +#endif + +#ifdef HAVE_LIBUTIL_H +#include +#endif + +/* + * + * + */ +class BACL_FreeBSD : public BACL { +private: + bRC_BACL os_backup_acl (JCR *jcr, FF_PKT *ff_pkt); + bRC_BACL os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length); + bRC_BACL os_get_acl(JCR *jcr, BACL_type bacltype); + bRC_BACL os_set_acl(JCR *jcr, BACL_type bacltype, char *content, uint32_t length); + /* requires acl.h available */ + bool acl_issimple(acl_t acl); + acl_type_t get_acltype(BACL_type bacltype); + int acl_nrentries(acl_t acl); + bRC_BACL check_bacltype (JCR *jcr, int name); +public: + BACL_FreeBSD (); +}; + +#endif /* HAVE_ACL */ + +#endif /* HAVE_FREEBSD_OS */ + +#endif /* __BACL_FreeBSD_H_ */ diff --git a/src/filed/bacl_linux.c b/src/filed/bacl_linux.c new file mode 100644 index 00000000..33bf27bc --- /dev/null +++ b/src/filed/bacl_linux.c @@ -0,0 +1,344 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#include "bacula.h" +#include "filed.h" +#include "bacl_linux.h" + +#if defined(HAVE_LINUX_OS) + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +/* + * Define the supported ACL streams for this OS + */ +static const int os_acl_streams[] = { + STREAM_XACL_LINUX_ACCESS, + 0 +}; + +static const int os_default_acl_streams[] = { + STREAM_XACL_LINUX_DEFAULT, + 0 +}; + +/* + * OS specific constructor + */ +BACL_Linux::BACL_Linux(){ + set_acl_streams(os_acl_streams, os_default_acl_streams); +}; + +/* + * Translates Bacula internal acl representation into + * acl type + * + * in: + * bacltype - internal Bacula acl type (BACL_type) + * out: + * acl_type_t - os dependent acl type + * when failed - ACL_TYPE_NONE is returned + */ +acl_type_t BACL_Linux::get_acltype(BACL_type bacltype){ + + acl_type_t acltype; + + switch (bacltype){ + case BACL_TYPE_ACCESS: + acltype = ACL_TYPE_ACCESS; + break; + case BACL_TYPE_DEFAULT: + acltype = ACL_TYPE_DEFAULT; + break; + default: + /* + * sanity check for acl's not supported by OS + */ + acltype = (acl_type_t)ACL_TYPE_NONE; + break; + } + return acltype; +}; + +/* + * Counts a number of acl entries + * + * in: + * acl - acl object + * out: + * int - number of entries in acl object + * when no acl entry available or any error then return zero '0' + */ +int BACL_Linux::acl_nrentries(acl_t acl){ + + int nr = 0; + acl_entry_t aclentry; + int rc; + + rc = acl_get_entry(acl, ACL_FIRST_ENTRY, &aclentry); + while (rc == 1){ + nr++; + rc = acl_get_entry(acl, ACL_NEXT_ENTRY, &aclentry); + } + + return nr; +}; + +/* + * Checks if acl is simple. + * + * acl is simple if it has only the following entries: + * "user::", + * "group::", + * "other::" + * + * in: + * acl - acl object + * out: + * true - when acl object is simple + * false - when acl object is not simple + */ +bool BACL_Linux::acl_issimple(acl_t acl){ + + acl_entry_t aclentry; + acl_tag_t acltag; + int rc; + + rc = acl_get_entry(acl, ACL_FIRST_ENTRY, &aclentry); + while (rc == 1){ + if (acl_get_tag_type(aclentry, &acltag) < 0){ + return true; + } + /* + * Check for ACL_USER_OBJ, ACL_GROUP_OBJ or ACL_OTHER to find out. + */ + if (acltag != ACL_USER_OBJ && + acltag != ACL_GROUP_OBJ && + acltag != ACL_OTHER){ + return false; + } + rc = acl_get_entry(acl, ACL_NEXT_ENTRY, &aclentry); + } + return true; +}; + +/* + * Perform OS specific ACL backup + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_Linux::os_backup_acl (JCR *jcr, FF_PKT *ff_pkt){ + return generic_backup_acl(jcr, ff_pkt); +}; + +/* + * Perform OS specific ACL restore + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_Linux::os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length){ + return generic_restore_acl(jcr, stream); +}; + +/* + * Low level OS specific runtime to get ACL data from file. The ACL data is set in internal content buffer. + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_Linux::os_get_acl(JCR *jcr, BACL_type bacltype){ + + acl_t acl; + acl_type_t acltype; + char *acltext; + bRC_BACL rc = bRC_BACL_ok; + + /* check input data */ + if (jcr == NULL){ + return bRC_BACL_inval; + } + + acltype = get_acltype(bacltype); + acl = acl_get_file(jcr->last_fname, acltype); + + if (acl){ + Dmsg1(400, "OS_ACL read from file: %s\n",jcr->last_fname); + if (acl_nrentries(acl) == 0){ + goto bail_out; + } + + /* check for simple ACL which correspond to standard permissions only */ + if (bacltype == BACL_TYPE_ACCESS && acl_issimple(acl)){ + goto bail_out; + } + + if ((acltext = acl_to_text(acl, NULL)) != NULL){ + set_content(acltext); + acl_free(acl); + acl_free(acltext); + return bRC_BACL_ok; + } + + berrno be; + + Mmsg2(jcr->errmsg, _("acl_to_text error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "acl_to_text error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + + rc = bRC_BACL_error; + } else { + berrno be; + + switch (errno){ + case EOPNOTSUPP: + /* fs does not support acl, skip it */ + Dmsg0(400, "Wow, ACL is not supported on this filesystem\n"); + clear_flag(BACL_FLAG_NATIVE); + break; + case ENOENT: + break; + default: + /* Some real error */ + Mmsg2(jcr->errmsg, _("acl_get_file error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "acl_get_file error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + rc = bRC_BACL_error; + break; + } + } + +bail_out: + if (acl){ + acl_free(acl); + } + /* + * it is a bit of hardcore to clear a poolmemory with a NULL pointer, + * but it is working, hehe :) + * you may ask why it is working? it is simple, a pm_strcpy function is handling + * a null pointer with a substitiution of empty string. + */ + set_content(NULL); + return rc; +}; + +/* + * Low level OS specific runtime to set ACL data on file + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_Linux::os_set_acl(JCR *jcr, BACL_type bacltype, char *content, uint32_t length){ + + acl_t acl; + acl_type_t acltype; + + /* check input data */ + if (jcr == NULL || content == NULL){ + return bRC_BACL_inval; + } + + acl = acl_from_text(content); + if (acl == NULL){ + berrno be; + + Mmsg2(jcr->errmsg, _("acl_from_text error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "acl_from_text error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + + if (acl_valid(acl) != 0){ + berrno be; + + Mmsg2(jcr->errmsg, _("acl_valid error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "acl_valid error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, be.bstrerror()); + acl_free(acl); + return bRC_BACL_error; + } + + /* handle different acl types for Linux */ + acltype = get_acltype(bacltype); + if (acltype == ACL_TYPE_DEFAULT && length == 0){ + /* delete ACl from file when no acl data available for default acl's */ + if (acl_delete_def_file(jcr->last_fname) == 0){ + return bRC_BACL_ok; + } + + berrno be; + switch (errno){ + case ENOENT: + return bRC_BACL_ok; + case ENOTSUP: + /* + * If the filesystem reports it doesn't support acl's we clear the + * BACL_FLAG_NATIVE flag so we skip ACL restores on all other files + * on the same filesystem. The BACL_FLAG_NATIVE flag gets set again + * when we change from one filesystem to an other. + */ + clear_flag(BACL_FLAG_NATIVE); + Mmsg(jcr->errmsg, _("acl_delete_def_file error on file \"%s\": filesystem doesn't support ACLs\n"), jcr->last_fname); + return bRC_BACL_error; + default: + Mmsg2(jcr->errmsg, _("acl_delete_def_file error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + } + + /* + * Restore the ACLs, but don't complain about links which really should + * not have attributes, and the file it is linked to may not yet be restored. + * This is only true for the old acl streams as in the new implementation we + * don't save acls of symlinks (which cannot have acls anyhow) + */ + if (acl_set_file(jcr->last_fname, acltype, acl) != 0 && jcr->last_type != FT_LNK){ + berrno be; + switch (errno){ + case ENOENT: + acl_free(acl); + return bRC_BACL_ok; + case ENOTSUP: + /* + * If the filesystem reports it doesn't support ACLs we clear the + * BACL_FLAG_NATIVE flag so we skip ACL restores on all other files + * on the same filesystem. The BACL_FLAG_NATIVE flag gets set again + * when we change from one filesystem to an other. + */ + clear_flag(BACL_FLAG_NATIVE); + Mmsg(jcr->errmsg, _("acl_set_file error on file \"%s\": filesystem doesn't support ACLs\n"), jcr->last_fname); + Dmsg2(100, "acl_set_file error acl=%s file=%s filesystem doesn't support ACLs\n", content, jcr->last_fname); + acl_free(acl); + return bRC_BACL_error; + default: + Mmsg2(jcr->errmsg, _("acl_set_file error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "acl_set_file error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, be.bstrerror()); + acl_free(acl); + return bRC_BACL_error; + } + } + acl_free(acl); + return bRC_BACL_ok; +}; + +#endif /* HAVE_ACL */ + +#endif /* HAVE_LINUX_OS */ diff --git a/src/filed/bacl_linux.h b/src/filed/bacl_linux.h new file mode 100644 index 00000000..4ed131d6 --- /dev/null +++ b/src/filed/bacl_linux.h @@ -0,0 +1,64 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BACL_LINUX_H_ +#define __BACL_LINUX_H_ + +#if defined(HAVE_LINUX_OS) +#include + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +#ifdef HAVE_SYS_ACL_H +#include +#else +#error "configure failed to detect availability of sys/acl.h" +#endif + +/* + * + * + */ +class BACL_Linux : public BACL { +private: + bRC_BACL os_backup_acl (JCR *jcr, FF_PKT *ff_pkt); + bRC_BACL os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length); + bRC_BACL os_get_acl(JCR *jcr, BACL_type bacltype); + bRC_BACL os_set_acl(JCR *jcr, BACL_type bacltype, char *content, uint32_t length); + acl_type_t get_acltype(BACL_type bacltype); + int acl_nrentries(acl_t acl); + bool acl_issimple(acl_t acl); +public: + BACL_Linux (); +}; + +#endif /* HAVE_ACL */ + +#endif /* HAVE_LINUX_OS */ + +#endif /* __BACL_LINUX_H_ */ diff --git a/src/filed/bacl_osx.c b/src/filed/bacl_osx.c new file mode 100644 index 00000000..82a3d749 --- /dev/null +++ b/src/filed/bacl_osx.c @@ -0,0 +1,294 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#include "bacula.h" +#include "filed.h" +#include "bacl_osx.h" + +#if defined(HAVE_DARWIN_OS) + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +/* + * Define the supported ACL streams for this OS + */ +static const int os_acl_streams[] = { + STREAM_XACL_DARWIN_ACCESS, + 0 +}; + +static const int os_default_acl_streams[] = { + 0 +}; + +/* + * OS specific constructor + */ +BACL_OSX::BACL_OSX(){ + + set_acl_streams(os_acl_streams, os_default_acl_streams); +}; + +/* + * Translates Bacula internal acl representation into + * acl type + * + * in: + * bacltype - internal Bacula acl type (BACL_type) + * out: + * acl_type_t - os dependent acl type + * when failed - ACL_TYPE_NONE is returned + */ +acl_type_t BACL_OSX::get_acltype(BACL_type bacltype){ + + acl_type_t acltype; + + switch (bacltype){ + case BACL_TYPE_ACCESS: + acltype = ACL_TYPE_ACCESS; + break; + #ifdef HAVE_ACL_TYPE_EXTENDED + case BACL_TYPE_EXTENDED: + acltype = ACL_TYPE_EXTENDED; + break; + #endif + default: + /* + * sanity check for acl's not supported by OS + */ + acltype = (acl_type_t)ACL_TYPE_NONE; + break; + } + return acltype; +}; + +/* + * Counts a number of acl entries + * + * in: + * acl - acl object + * out: + * int - number of entries in acl object + * when no acl entry available or any error then return zero '0' + */ +int BACL_OSX::acl_nrentries(acl_t acl){ + + int nr = 0; + acl_entry_t entry; + int rc; + + rc = acl_get_entry(acl, ACL_FIRST_ENTRY, &entry); + while (rc == 0){ + nr++; + rc = acl_get_entry(acl, ACL_NEXT_ENTRY, &entry); + } + + return nr; +}; + +/* + * Perform OS specific ACL backup + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_OSX::os_backup_acl (JCR *jcr, FF_PKT *ff_pkt){ + + /* check input data */ + if (jcr == NULL || ff_pkt == NULL){ + return bRC_BACL_inval; + } + +#if defined(HAVE_ACL_TYPE_EXTENDED) + /* + * Use BACL_TYPE_EXTENDED only when available + */ + Dmsg0(400, "MacOSX Extended ACL computed\n"); + if (os_get_acl(jcr, BACL_TYPE_EXTENDED) == bRC_BACL_fatal){ + return bRC_BACL_fatal; + } +#else + Dmsg0(400, "MacOSX standard ACL computed\n"); + if (os_get_acl(jcr, BACL_TYPE_ACCESS) == bRC_BACL_fatal){ + return bRC_BACL_fatal; + } +#endif + + return send_acl_stream(jcr, STREAM_XACL_DARWIN_ACCESS); +}; + +/* + * Perform OS specific ACL restore + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_OSX::os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length){ + +#if defined(HAVE_ACL_TYPE_EXTENDED) + return os_set_acl(jcr, BACL_TYPE_EXTENDED, content, length); +#else + return os_set_acl(jcr, BACL_TYPE_ACCESS, content, length); +#endif +}; + +/* + * Low level OS specific runtime to get ACL data from file. The ACL data is set in internal content buffer + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_OSX::os_get_acl(JCR *jcr, BACL_type bacltype){ + + acl_t acl; + acl_type_t acltype; + char *acltext; + bRC_BACL rc = bRC_BACL_ok; + + /* check input data */ + if (jcr == NULL){ + return bRC_BACL_inval; + } + + acltype = get_acltype(bacltype); + acl = acl_get_file(jcr->last_fname, acltype); + + if (acl){ + Dmsg1(400, "OS_ACL read from file: %s\n",jcr->last_fname); + if (acl_nrentries(acl) == 0){ + goto bail_out; + } + + if ((acltext = acl_to_text(acl, NULL)) != NULL){ + set_content(acltext); + acl_free(acl); + acl_free(acltext); + return bRC_BACL_ok; + } + + berrno be; + + Mmsg2(jcr->errmsg, _("acl_to_text error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "acl_to_text error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + + rc = bRC_BACL_error; + } else { + berrno be; + + switch (errno){ + case EOPNOTSUPP: + /* fs does not support acl, skip it */ + Dmsg0(400, "Wow, ACL is not supported on this filesystem\n"); + clear_flag(BACL_FLAG_NATIVE); + break; + case ENOENT: + break; + default: + /* Some real error */ + Mmsg2(jcr->errmsg, _("acl_get_file error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "acl_get_file error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + rc = bRC_BACL_error; + break; + } + } + +bail_out: + if (acl){ + acl_free(acl); + } + /* + * it is a bit of hardcore to clear a poolmemory with a NULL pointer, + * but it is working, hehe :) + * you may ask why it is working? it is simple, a pm_strcpy function is handling + * a null pointer with a substitiution of empty string. + */ + set_content(NULL); + return rc; +}; + +/* + * Low level OS specific runtime to set ACL data on file + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_OSX::os_set_acl(JCR *jcr, BACL_type bacltype, char *content, uint32_t length){ + + acl_t acl; + acl_type_t acltype; + + /* check input data */ + if (jcr == NULL || content == NULL){ + return bRC_BACL_inval; + } + + acl = acl_from_text(content); + if (acl == NULL){ + berrno be; + + Mmsg2(jcr->errmsg, _("acl_from_text error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "acl_from_text error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + + acltype = get_acltype(bacltype); + + /* + * Restore the ACLs, but don't complain about links which really should + * not have attributes, and the file it is linked to may not yet be restored. + * This is only true for the old acl streams as in the new implementation we + * don't save acls of symlinks (which cannot have acls anyhow) + */ + if (acl_set_file(jcr->last_fname, acltype, acl) != 0 && jcr->last_type != FT_LNK){ + berrno be; + switch (errno){ + case ENOENT: + acl_free(acl); + return bRC_BACL_ok; + case EOPNOTSUPP: + /* + * If the filesystem reports it doesn't support ACLs we clear the + * BACL_FLAG_NATIVE flag so we skip ACL restores on all other files + * on the same filesystem. The BACL_FLAG_NATIVE flag gets set again + * when we change from one filesystem to an other. + */ + clear_flag(BACL_FLAG_NATIVE); + Mmsg1(jcr->errmsg, _("acl_set_file error on file \"%s\": filesystem doesn't support ACLs\n"), jcr->last_fname); + Dmsg2(100, "acl_set_file error acl=%s file=%s filesystem doesn't support ACLs\n", content, jcr->last_fname); + acl_free(acl); + return bRC_BACL_error; + default: + Mmsg2(jcr->errmsg, _("acl_set_file error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "acl_set_file error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, be.bstrerror()); + acl_free(acl); + return bRC_BACL_error; + } + } + acl_free(acl); + return bRC_BACL_ok; +}; + +#endif /* HAVE_ACL */ + +#endif /* HAVE_DARWIN_OS */ diff --git a/src/filed/bacl_osx.h b/src/filed/bacl_osx.h new file mode 100644 index 00000000..a148dd08 --- /dev/null +++ b/src/filed/bacl_osx.h @@ -0,0 +1,64 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BACL_OSX_H_ +#define __BACL_OSX_H_ + +#if defined(HAVE_DARWIN_OS) +#include + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +#ifdef HAVE_SYS_ACL_H +#include +#else +#error "configure failed to detect availability of sys/acl.h" +#endif + +/* + * + * + */ +class BACL_OSX : public BACL { +private: + bRC_BACL os_backup_acl (JCR *jcr, FF_PKT *ff_pkt); + bRC_BACL os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length); + bRC_BACL os_get_acl(JCR *jcr, BACL_type bacltype); + bRC_BACL os_set_acl(JCR *jcr, BACL_type bacltype, char *content, uint32_t length); + /* requires acl.h available */ + acl_type_t get_acltype(BACL_type bacltype); + int acl_nrentries(acl_t acl); +public: + BACL_OSX (); +}; + +#endif /* HAVE_ACL */ + +#endif /* HAVE_DARWIN_OS */ + +#endif /* __BACL_OSX_H_ */ diff --git a/src/filed/bacl_solaris.c b/src/filed/bacl_solaris.c new file mode 100644 index 00000000..d3a725c9 --- /dev/null +++ b/src/filed/bacl_solaris.c @@ -0,0 +1,324 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#include "bacula.h" +#include "filed.h" +#include "bacl_solaris.h" + +#if defined(HAVE_SUN_OS) + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +/* + * Define the supported ACL streams for this OS + */ +static const int os_acl_streams[] = { + STREAM_XACL_SOLARIS_POSIX, + STREAM_XACL_SOLARIS_NFS4, + 0 +}; + +static const int os_default_acl_streams[] = { + 0 +}; + +/* + * OS specific constructor + */ +BACL_Solaris::BACL_Solaris(){ + + set_acl_streams(os_acl_streams, os_default_acl_streams); + cache = NULL; +}; + +/* + * OS specific destructor + */ +BACL_Solaris::~BACL_Solaris(){}; + +/* + * Checks if ACL's are available for a specified file + * + * in: + * jcr - Job Control Record + * name - specifies the system variable to be queried + * out: + * bRC_BACL_ok - check successful, lets setup bacltype variable + * bRC_BACL_error - in case of error + * bRC_BACL_skip - you should skip all other routine + */ +bRC_BACL BACL_Solaris::check_bacltype (JCR *jcr, int name){ + + int rc = 0; + + rc = pathconf(jcr->last_fname, name); + switch (rc){ + case -1: { + /* some error check why */ + berrno be; + if (errno == ENOENT){ + /* file does not exist skip it */ + return bRC_BACL_skip; + } else { + Mmsg2(jcr->errmsg, _("pathconf error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "pathconf error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + } + case 0: + /* No support for ACLs */ + clear_flag(BACL_FLAG_NATIVE); + set_content(NULL); + return bRC_BACL_skip; + default: + break; + } + return bRC_BACL_ok; +}; + +/* + * Perform OS specific ACL backup + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_Solaris::os_backup_acl (JCR *jcr, FF_PKT *ff_pkt){ + + bRC_BACL rc; + int stream; + + /* + * See if filesystem supports acls. + */ + rc = check_bacltype(jcr, _PC_ACL_ENABLED); + switch (rc){ + case bRC_BACL_ok: + break; + case bRC_BACL_skip: + return bRC_BACL_ok; + default: + /* errors */ + return rc; + } + + rc = os_get_acl(jcr, &stream); + switch (rc){ + case bRC_BACL_ok: + if (get_content_len() > 0){ + if (send_acl_stream(jcr, stream) == bRC_BACL_fatal){ + return bRC_BACL_fatal; + } + } + break; + default: + return rc; + } + + return bRC_BACL_ok; +}; + +/* + * Perform OS specific ACL restore + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_Solaris::os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length){ + + int aclrc = 0; + + switch (stream){ + case STREAM_UNIX_ACCESS_ACL: + case STREAM_XACL_SOLARIS_POSIX: + case STREAM_XACL_SOLARIS_NFS4: + aclrc = pathconf(jcr->last_fname, _PC_ACL_ENABLED); + break; + default: + return bRC_BACL_error; + } + + switch (aclrc){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + return bRC_BACL_ok; + default: + Mmsg2(jcr->errmsg, _("pathconf error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "pathconf error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, be.bstrerror()); + return bRC_BACL_error; + } + } + case 0: + clear_flag(BACL_FLAG_NATIVE); + Mmsg(jcr->errmsg, _("Trying to restore acl on file \"%s\" on filesystem without acl support\n"), jcr->last_fname); + return bRC_BACL_error; + default: + break; + } + + Dmsg2(400, "restore acl stream %i on file: %s\n", stream, jcr->last_fname); + switch (stream){ + case STREAM_XACL_SOLARIS_POSIX: + if ((aclrc & (_ACL_ACLENT_ENABLED | _ACL_ACE_ENABLED)) == 0){ + Mmsg(jcr->errmsg, _("Trying to restore POSIX acl on file \"%s\" on filesystem without aclent acl support\n"), jcr->last_fname); + return bRC_BACL_error; + } + break; + case STREAM_XACL_SOLARIS_NFS4: + if ((aclrc & _ACL_ACE_ENABLED) == 0){ + Mmsg(jcr->errmsg, _("Trying to restore NFSv4 acl on file \"%s\" on filesystem without ace acl support\n"), jcr->last_fname); + return bRC_BACL_error; + } + break; + default: + break; + } + + return os_set_acl(jcr, stream, content, length); +}; + +/* + * Low level OS specific runtime to get ACL data from file. The ACL data is set in internal content buffer + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_Solaris::os_get_acl(JCR *jcr, int *stream){ + + int flags; + acl_t *aclp; + char *acl_text; + bRC_BACL rc = bRC_BACL_ok; + + if (!stream){ + return bRC_BACL_fatal; + } + + if (acl_get(jcr->last_fname, ACL_NO_TRIVIAL, &aclp) != 0){ + /* we've got some error */ + berrno be; + switch (errno){ + case ENOENT: + /* file does not exist */ + return bRC_BACL_ok; + default: + Mmsg2(jcr->errmsg, _("acl_get error on file \"%s\": ERR=%s\n"), jcr->last_fname, acl_strerror(errno)); + Dmsg2(100, "acl_get error file=%s ERR=%s\n", jcr->last_fname, acl_strerror(errno)); + return bRC_BACL_error; + } + } + + if (!aclp){ + /* + * The ACLs simply reflect the (already known) standard permissions + * So we don't send an ACL stream to the SD. + */ + set_content(NULL); + return bRC_BACL_ok; + } + +#if defined(ACL_SID_FMT) + /* new format flag added in newer Solaris versions */ + flags = ACL_APPEND_ID | ACL_COMPACT_FMT | ACL_SID_FMT; +#else + flags = ACL_APPEND_ID | ACL_COMPACT_FMT; +#endif /* ACL_SID_FMT */ + + if ((acl_text = acl_totext(aclp, flags)) != NULL){ + set_content(acl_text); + switch (acl_type(aclp)){ + case ACLENT_T: + *stream = STREAM_XACL_SOLARIS_POSIX; + Dmsg1(500, "found acl SOLARIS_POSIX: %s\n", acl_text); + break; + case ACE_T: + *stream = STREAM_XACL_SOLARIS_NFS4; + Dmsg1(500, "found acl SOLARIS_NFS4: %s\n", acl_text); + break; + default: + rc = bRC_BACL_error; + break; + } + actuallyfree(acl_text); + acl_free(aclp); + } + return rc; +}; + +/* + * Low level OS specific runtime to set ACL data on file + * + * in/out - check API at bacl.h + */ +bRC_BACL BACL_Solaris::os_set_acl(JCR *jcr, int stream, char *content, uint32_t length){ + + int rc; + acl_t *aclp; + + if ((rc = acl_fromtext(content, &aclp)) != 0){ + Mmsg2(jcr->errmsg, _("acl_fromtext error on file \"%s\": ERR=%s\n"), jcr->last_fname, acl_strerror(rc)); + Dmsg3(100, "acl_fromtext error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, acl_strerror(rc)); + return bRC_BACL_error; + } + + switch (stream){ + case STREAM_XACL_SOLARIS_POSIX: + if (acl_type(aclp) != ACLENT_T){ + Mmsg(jcr->errmsg, _("wrong encoding of acl type in acl stream on file \"%s\"\n"), jcr->last_fname); + return bRC_BACL_error; + } + break; + case STREAM_XACL_SOLARIS_NFS4: + if (acl_type(aclp) != ACE_T){ + Mmsg(jcr->errmsg, _("wrong encoding of acl type in acl stream on file \"%s\"\n"), jcr->last_fname); + return bRC_BACL_error; + } + break; + default: + break; + } + + if ((rc = acl_set(jcr->last_fname, aclp)) == -1 && jcr->last_type != FT_LNK){ + switch (errno){ + case ENOENT: + acl_free(aclp); + return bRC_BACL_ok; + default: + Mmsg2(jcr->errmsg, _("acl_set error on file \"%s\": ERR=%s\n"), jcr->last_fname, acl_strerror(rc)); + Dmsg3(100, "acl_set error acl=%s file=%s ERR=%s\n", content, jcr->last_fname, acl_strerror(rc)); + acl_free(aclp); + return bRC_BACL_error; + } + } + + acl_free(aclp); + return bRC_BACL_ok; +}; + +#endif /* HAVE_ACL */ + +#endif /* HAVE_SUN_OS */ diff --git a/src/filed/bacl_solaris.h b/src/filed/bacl_solaris.h new file mode 100644 index 00000000..d074f2aa --- /dev/null +++ b/src/filed/bacl_solaris.h @@ -0,0 +1,84 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of ACL code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BACL_Solaris_H_ +#define __BACL_Solaris_H_ + +#if defined(HAVE_SUN_OS) + +/* check if ACL support is enabled */ +#if defined(HAVE_ACL) + +#ifdef HAVE_SYS_ACL_H +#include +#else +#error "configure failed to detect availability of sys/acl.h" +#endif + +/* + * As the header acl.h doesn't seem to define this one we need to. + */ +extern "C" { +int acl_type(acl_t *); +char *acl_strerror(int); +}; + +/* + * + */ +#if defined(HAVE_EXTENDED_ACL) +#if !defined(_SYS_ACL_IMPL_H) +typedef enum acl_type { + ACLENT_T = 0, + ACE_T = 1 +} acl_type_t; +#endif +#endif + +/* + * + * + */ +class BACL_Solaris : public BACL { +private: + alist * cache; + bRC_BACL os_backup_acl (JCR *jcr, FF_PKT *ff_pkt); + bRC_BACL os_restore_acl (JCR *jcr, int stream, char *content, uint32_t length); + bRC_BACL os_get_acl(JCR *jcr, int *stream); + bRC_BACL os_set_acl(JCR *jcr, int stream, char *content, uint32_t length); + /* requires acl.h available */ + bRC_BACL check_bacltype (JCR *jcr, int name); +public: + BACL_Solaris (); + ~BACL_Solaris (); +}; + +#endif /* HAVE_ACL */ + +#endif /* HAVE_SUN_OS */ + +#endif /* __BACL_Solaris_H_ */ diff --git a/src/filed/bacula-fd.conf.in b/src/filed/bacula-fd.conf.in new file mode 100644 index 00000000..0770d9e6 --- /dev/null +++ b/src/filed/bacula-fd.conf.in @@ -0,0 +1,48 @@ +# +# Default Bacula File Daemon Configuration file +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ @DISTVER@ +# +# There is not much to change here except perhaps the +# File daemon Name to +# +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# +# List Directors who are permitted to contact this File daemon +# +Director { + Name = @basename@-dir + Password = "@fd_password@" +} + +# +# Restricted Director, used by tray-monitor to get the +# status of the file daemon +# +Director { + Name = @basename@-mon + Password = "@mon_fd_password@" + Monitor = yes +} + +# +# "Global" File daemon configuration specifications +# +FileDaemon { # this is me + Name = @basename@-fd + FDport = @fd_port@ # where we listen for the director + WorkingDirectory = @working_dir@ + Pid Directory = @piddir@ + Maximum Concurrent Jobs = 20 + Plugin Directory = @plugindir@ +} + +# Send all messages except skipped files back to Director +Messages { + Name = Standard + director = @basename@-dir = all, !skipped, !restored +} diff --git a/src/filed/bfdjson.c b/src/filed/bfdjson.c new file mode 100644 index 00000000..a3a87bec --- /dev/null +++ b/src/filed/bfdjson.c @@ -0,0 +1,642 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon to Json + * + * Kern Sibbald, Sept MMXII + * + */ + +#include "bacula.h" +#include "filed.h" + +/* Imported Functions */ +extern bool parse_fd_config(CONFIG *config, const char *configfile, int exit_code); +void store_msgs(LEX *lc, RES_ITEM *item, int index, int pass); + +typedef struct +{ + /* default { { "Director": { "Name": aa, ...} }, { "Job": {..} */ + bool do_list; /* [ {}, {}, ..] or { "aa": {}, "bb": {}, ...} */ + bool do_one; /* { "Name": "aa", "Description": "test, ... } */ + bool do_only_data; /* [ {}, {}, {}, ] */ + char *resource_type; + char *resource_name; + regex_t directive_reg; +} display_filter; + +/* Forward referenced functions */ +static bool check_resources(); +static void sendit(void *sock, const char *fmt, ...); +static void dump_json(display_filter *filter); + +/* Exported variables */ +CLIENT *me; /* my resource */ +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + extern URES res_all; +} +#else +extern URES res_all; +#endif +extern s_kw msg_types[]; +extern s_ct ciphertypes[]; +extern s_ct digesttypes[]; +extern RES_TABLE resources[]; + +#define CONFIG_FILE "bacula-fd.conf" /* default config file */ + +char *configfile = NULL; +static CONFIG *config; + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n\n" +"Usage: bfdjson [options] [config_file]\n" +" -r get resource type \n" +" -n get resource \n" +" -l get only directives matching dirs (use with -r)\n" +" -D get only data\n" +" -c use as configuration file\n" +" -d set debug level to \n" +" -dt print a timestamp in debug output\n" +" -t test configuration file and exit\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n"), 2012, "", VERSION, BDATE); + + exit(1); +} + +static void display_cipher(HPKT &hpkt) +{ + int i; + for (i=0; ciphertypes[i].type_name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == ciphertypes[i].type_value) { + sendit(NULL, "\n \"%s\": \"%s\"", hpkt.ritem->name, + ciphertypes[i].type_name); + return; + } + } +} + +static void display_digest(HPKT &hpkt) +{ + int i; + for (i=0; digesttypes[i].type_name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == digesttypes[i].type_value) { + sendit(NULL, "\n \"%s\": \"%s\"", hpkt.ritem->name, + digesttypes[i].type_name); + return; + } + } +} + + +/********************************************************************* + * + * Bacula File daemon to Json + * + */ + +int main (int argc, char *argv[]) +{ + int ch; + bool test_config = false; + display_filter filter; + memset(&filter, 0, sizeof(filter)); + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + if (init_crypto() != 0) { + Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); + } + + my_name_is(argc, argv, "bacula-fd"); + init_msg(NULL, NULL); + + while ((ch = getopt(argc, argv, "Dr:n:c:d:tv?l:")) != -1) { + switch (ch) { + case 'D': + filter.do_only_data = true; + break; + case 'l': + /* Might use something like -l '^(Name|Description)$' */ + filter.do_list = true; + if (regcomp(&filter.directive_reg, optarg, REG_EXTENDED) != 0) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, + _("Please use valid -l argument: %s\n"), optarg); + } + break; + + case 'r': + filter.resource_type = optarg; + break; + + case 'n': + filter.resource_name = optarg; + break; + + case 'c': /* configuration file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 't': + test_config = true; + break; + + case 'v': /* verbose */ + verbose++; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (argc) { + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(*argv); + argc--; + argv++; + } + if (argc) { + usage(); + } + + if (filter.do_list && !filter.resource_type) { + usage(); + } + + if (filter.resource_type && filter.resource_name) { + filter.do_one = true; + } + + if (configfile == NULL || configfile[0] == 0) { + configfile = bstrdup(CONFIG_FILE); + } + + if (test_config && verbose > 0) { + char buf[1024]; + find_config_file(configfile, buf, sizeof(buf)); + sendit(NULL, "config_file=%s\n", buf); + } + + config = New(CONFIG()); + config->encode_password(false); + parse_fd_config(config, configfile, M_ERROR_TERM); + + if (!check_resources()) { + Emsg1(M_ERROR, 0, _("Please correct configuration file: %s\n"), configfile); + terminate_filed(1); + } + + if (test_config) { + terminate_filed(0); + } + + dump_json(&filter); + + if (filter.do_list) { + regfree(&filter.directive_reg); + } + + terminate_filed(0); + exit(0); /* should never get here */ +} + +void terminate_filed(int sig) +{ + static bool already_here = false; + + if (already_here) { + bmicrosleep(2, 0); /* yield */ + exit(1); /* prevent loops */ + } + already_here = true; + debug_level = 0; /* turn off debug */ + + if (configfile != NULL) { + free(configfile); + } + + if (debug_level > 0) { + print_memory_pool_stats(); + } + if (config) { + delete config; + config = NULL; + } + term_msg(); + free(res_head); + res_head = NULL; + close_memory_pool(); /* release free memory in pool */ + //sm_dump(false); /* dump orphaned buffers */ + exit(sig); +} + +/* + * Dump out all resources in json format. + * Note!!!! This routine must be in this file rather + * than in src/lib/parser_conf.c otherwise the pointers + * will be all messed up. + */ +static void dump_json(display_filter *filter) +{ + int resinx, item, directives, first_directive; + bool first_res; + RES_ITEM *items; + RES *res; + HPKT hpkt; + regmatch_t pmatch[32]; + + init_hpkt(hpkt); + + me = (CLIENT *)GetNextRes(R_CLIENT, NULL); + + if (filter->do_only_data) { + sendit(NULL, "["); + + /* List resources and directives */ + /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } + * or print a single item + */ + } else if (filter->do_one || filter->do_list) { + sendit(NULL, "{"); + + } else { + /* [ { "Client": { "Name": "aa",.. } }, { "Director": { "Name": "bb", ... } } ]*/ + sendit(NULL, "["); + } + + first_res = true; + + /* Loop over all resource types */ + for (resinx=0; resources[resinx].name; resinx++) { + /* Skip Client alias */ + if (strcmp(resources[resinx].name, "Client") == 0) { + continue; + } + + /* Skip this resource type */ + if (filter->resource_type && + strcasecmp(filter->resource_type, resources[resinx].name) != 0) { + continue; + } + + directives = 0; + /* Loop over all resources of this type */ + foreach_rblist(res, res_head[resinx]->res_list) { + hpkt.res = res; + items = resources[resinx].items; + if (!items) { + break; + } + /* Copy the resource into res_all */ + memcpy(&res_all, res, sizeof(res_all)); + + if (filter->resource_name) { + bool skip=true; + /* The Name should be at the first place, so this is not a real loop */ + for (item=0; items[item].name; item++) { + if (strcasecmp(items[item].name, "Name") == 0) { + if (strcasecmp(*(items[item].value), filter->resource_name) == 0) { + skip = false; + } + break; + } + } + if (skip) { /* The name doesn't match, so skip it */ + continue; + } + } + + if (!first_res) { + printf(",\n"); + } + + first_directive = 0; + + if (filter->do_only_data) { + sendit(NULL, " {"); + + } else if (filter->do_one) { + /* Nothing to print */ + + /* When sending the list, the form is: + * { aa: { Name: aa, Description: aadesc...}, bb: { Name: bb + */ + } else if (filter->do_list) { + /* Search and display Name, should be the first item */ + for (item=0; items[item].name; item++) { + if (strcmp(items[item].name, "Name") == 0) { + sendit(NULL, "%s: {\n", quote_string(hpkt.edbuf2, *items[item].value)); + break; + } + } + } else { + /* Begin new resource */ + sendit(NULL, "{\n \"%s\": {", resources[resinx].name); + } + + /* dirtry trick for a deprecated directive */ + bool dedup_index_directory_set = false; + + /* Loop over all items in the resource */ + for (item=0; items[item].name; item++) { + /* Check user argument -l */ + if (filter->do_list && + regexec(&filter->directive_reg, + items[item].name, 32, pmatch, 0) != 0) + { + continue; + } + + /* Special tweak for a deprecated variable */ + if (strcmp(items[item].name, "DedupIndexDirectory") == 0) { + dedup_index_directory_set = bit_is_set(item, res_all.hdr.item_present); + continue; + } + if (strcmp(items[item].name, "EnableClientRehydration") == 0) { + if (dedup_index_directory_set && !bit_is_set(item, res_all.hdr.item_present)) { + set_bit(item, res_all.hdr.item_present); + *(bool *)(items[item].value) = true; + } + } + + hpkt.ritem = &items[item]; + if (bit_is_set(item, res_all.hdr.item_present)) { + if (first_directive++ > 0) printf(","); + if (display_global_item(hpkt)) { + /* Fall-through wanted */ + } else if (items[item].handler == store_cipher_type) { + display_cipher(hpkt); + } else if (items[item].handler == store_digest_type) { + display_digest(hpkt); + } else { + printf("\n \"%s\": null", items[item].name); + } + directives++; + } else { /* end if is present */ + /* For some directives, the bit_is_set() is not set (e.g. addresses) */ + if (me && strcmp(resources[resinx].name, "FileDaemon") == 0) { + if (strcmp(items[item].name, "FdPort") == 0) { + if (get_first_port_host_order(me->FDaddrs) != items[item].default_value) { + if (first_directive++ > 0) printf(","); + printf("\n \"FdPort\": %d", + get_first_port_host_order(me->FDaddrs)); + } + } else if (me && strcmp(items[item].name, "FdAddress") == 0) { + char buf[500]; + get_first_address(me->FDaddrs, buf, sizeof(buf)); + if (strcmp(buf, "0.0.0.0") != 0) { + if (first_directive++ > 0) printf(","); + printf("\n \"FdAddress\": \"%s\"", buf); + } + } else if (me && strcmp(items[item].name, "FdSourceAddress") == 0 + && me->FDsrc_addr) { + char buf[500]; + get_first_address(me->FDsrc_addr, buf, sizeof(buf)); + if (strcmp(buf, "0.0.0.0") != 0) { + if (first_directive++ > 0) printf(","); + printf("\n \"FdSourceAddress\": \"%s\"", buf); + } + } + } + } + if (items[item].flags & ITEM_LAST) { + display_last(hpkt); /* If last bit set always call to cleanup */ + } + } + + /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } */ + if (filter->do_only_data || filter->do_list) { + sendit(NULL, "\n }"); /* Finish the Resource with a single } */ + + } else { + if (filter->do_one) { + /* don't print anything */ + } else if (first_directive > 0) { + sendit(NULL, "\n }\n}"); /* end of resource */ + } else { + sendit(NULL, "}\n}"); + } + } + first_res = false; + } /* End loop over all resources of this type */ + } /* End loop all resource types */ + + if (filter->do_only_data) { + sendit(NULL, "\n]\n"); + + /* In list context, we are dealing with a hash */ + } else if (filter->do_one || filter->do_list) { + sendit(NULL, "\n}\n"); + + } else { + sendit(NULL, "\n]\n"); + } + term_hpkt(hpkt); +} + + +/* +* Make a quick check to see that we have all the +* resources needed. +*/ +static bool check_resources() +{ + bool OK = true; + DIRRES *director; + CONSRES *cons; + bool need_tls; + + LockRes(); + + me = (CLIENT *)GetNextRes(R_CLIENT, NULL); + if (!me) { + Emsg1(M_FATAL, 0, _("No File daemon resource defined in %s\n" + "Without that I don't know who I am :-(\n"), configfile); + OK = false; + } else { + if (GetNextRes(R_CLIENT, (RES *) me) != NULL) { + Emsg1(M_FATAL, 0, _("Only one Client resource permitted in %s\n"), + configfile); + OK = false; + } + my_name_is(0, NULL, me->hdr.name); + if (!me->messages) { + me->messages = (MSGS *)GetNextRes(R_MSGS, NULL); + if (!me->messages) { + Emsg1(M_FATAL, 0, _("No Messages resource defined in %s\n"), configfile); + OK = false; + } + } + /* tls_require implies tls_enable */ + if (me->tls_require) { +#ifndef HAVE_TLS + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; +#else + me->tls_enable = true; +#endif + } + need_tls = me->tls_enable || me->tls_authenticate; + + if ((!me->tls_ca_certfile && !me->tls_ca_certdir) && need_tls) { + Emsg1(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for File daemon in %s.\n"), + configfile); + OK = false; + } + + /* pki_encrypt implies pki_sign */ + if (me->pki_encrypt) { + me->pki_sign = true; + } + + if ((me->pki_encrypt || me->pki_sign) && !me->pki_keypair_file) { + Emsg2(M_FATAL, 0, _("\"PKI Key Pair\" must be defined for File" + " daemon \"%s\" in %s if either \"PKI Sign\" or" + " \"PKI Encrypt\" are enabled.\n"), me->hdr.name, configfile); + OK = false; + } + } + + + /* Verify that a director record exists */ + LockRes(); + director = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); + UnlockRes(); + if (!director) { + Emsg1(M_FATAL, 0, _("No Director resource defined in %s\n"), + configfile); + OK = false; + } + + foreach_res(director, R_DIRECTOR) { + /* tls_require implies tls_enable */ + if (director->tls_require) { +#ifndef HAVE_TLS + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; +#else + director->tls_enable = true; +#endif + } + need_tls = director->tls_enable || director->tls_authenticate; + + if (!director->tls_certfile && need_tls) { + Emsg2(M_FATAL, 0, _("\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + + if (!director->tls_keyfile && need_tls) { + Emsg2(M_FATAL, 0, _("\"TLS Key\" file not defined for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + + if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && need_tls && director->tls_verify_peer) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + director->hdr.name, configfile); + OK = false; + } + } + + foreach_res(cons, R_CONSOLE) { + /* tls_require implies tls_enable */ + if (cons->tls_require) { +#ifndef HAVE_TLS + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; +#else + cons->tls_enable = true; +#endif + } + need_tls = cons->tls_enable || cons->tls_authenticate; + + if (!cons->tls_certfile && need_tls) { + Emsg2(M_FATAL, 0, _("\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n"), + cons->hdr.name, configfile); + OK = false; + } + + if (!cons->tls_keyfile && need_tls) { + Emsg2(M_FATAL, 0, _("\"TLS Key\" file not defined for Console \"%s\" in %s.\n"), + cons->hdr.name, configfile); + OK = false; + } + + if ((!cons->tls_ca_certfile && !cons->tls_ca_certdir) && need_tls && cons->tls_verify_peer) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Console \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + cons->hdr.name, configfile); + OK = false; + } + } + + UnlockRes(); + + return OK; +} + +static void sendit(void *sock, const char *fmt, ...) +{ + char buf[3000]; + va_list arg_ptr; + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); + va_end(arg_ptr); + fputs(buf, stdout); + fflush(stdout); +} diff --git a/src/filed/bxattr.c b/src/filed/bxattr.c new file mode 100644 index 00000000..4c1122f5 --- /dev/null +++ b/src/filed/bxattr.c @@ -0,0 +1,954 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + * + * A specialized class to handle XATTR in Bacula Enterprise. + * The runtime consist of two parts: + * 1. OS independent class: BXATTR + * 2. OS dependent subclass: BXATTR_* + * + * OS dependent subclasses are available for the following OS: + * - Darwin (OSX) + * - FreeBSD + * - Linux + * - Solaris + * + * OS depended subclasses in progress: + * - AIX (pre-5.3 and post 5.3 acls, acl_get and aclx_get interface) + * - HPUX + * - IRIX + * - Tru64 + * + * XATTRs are saved in OS independent format (Bacula own) and uses different streams + * for all different platforms. In theory it is possible to restore XATTRs from + * particular OS on different OS platform. But this functionality is not available. + * The behavior above is backward compatibility with previous Bacula implementation + * we need to maintain. + * + * During OS specific implementation of BXATTR you need to implement a following methods: + * + * [bxattr] - indicates bxattr function/method to call + * [os] - indicates OS specific function, which could be different on specific OS + * (we use a Linux api calls as an example) + * + * ::os_get_xattr_names (JCR *jcr, int namespace, POOLMEM ** pxlist, uint32_t * xlen) + * + * 1. get a size of the extended attributes list for the file - llistxattr[os] + * in most os'es it is required to have a sufficient space for attributes list + * and we wont allocate too much and too low space + * 2. allocate the buffer of required space + * 3. get an extended attributes list for file - llistxattr[os] + * 4. return allocated space buffer in pxlist and length of the buffer in xlen + * + * ::os_get_xattr_value (JCR *jcr, char * name, char ** pvalue, uint32_t * plen) + * + * 1. get a size of the extended attribute value for the file - lgetxattr[os] + * in most os'es it is required to have a sufficient space for attribute value + * and we wont allocate too much and too low space + * 2. allocate the buffer of required space + * 3. get an extended attribute value for file - lgetxattr[os] + * 4. return allocated space buffer in pvalue and length of the buffer in plen + * + * ::os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt) + * + * 1. get a list of extended attributes (name and value) for a file; in most implementations + * it require to get a separate list of attributes names and separate values for every name, + * so it is: + * 1A. get a list of xattr attribute names available on file - os_get_xattr_names[bxattr] + * 1B. for every attribute name get a value - os_get_xattr_value[bxattr] + * You should skip some OS specific attributes like ACL attributes or NFS4; you can use + * check_xattr_skiplists[bxattr] for this + * 1C. build a list [type alist] of name/value pairs stored in BXATTR_xattr struct + * 2. if the xattr list is not empty then serialize the list using serialize_xattr_stream[bxattr] + * 3. call send_xattr_stream[bxattr] + * + * ::os_set_xattr (JCR *jcr, BXATTR_xattr *xattr) + * + * 1. set xattr on file using name/value in xattr - lsetxattr[os] + * 2. if xattr not supported on filesystem - call clear_flag(BXATTR_FLAG_NATIVE)[bxattr] + * + * ::os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length) + * + * 1. unserialize backup stream + * 2. for every extended attribute restored call os_set_xattr[bxattr] to set this attribute on file + */ + +#include "bacula.h" +#include "filed.h" +#include "fd_plugins.h" + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +/* + * This is a constructor of the base BXATTR class which is OS independent + * + * - for initialization it uses ::init() + * + */ +BXATTR::BXATTR (){ + init(); +}; + +/* + * This is a destructor of the BXATTR class + */ +BXATTR::~BXATTR (){ + free_pool_memory(content); +}; + +/* + * Initialization routine + * - initializes all variables to required status + * - allocates required memory + */ +void BXATTR::init(){ + +#if defined(HAVE_XATTR) + xattr_ena = TRUE; +#else + xattr_ena = FALSE; +#endif + + /* generic variables */ + flags = BXATTR_FLAG_NONE; + current_dev = 0; + content = get_pool_memory(PM_BSOCK); /* it is better to have a 4k buffer */ + content_len = 0; + xattr_nr_errors = 0; + xattr_streams = NULL; + xattr_skiplist = NULL; + xattr_acl_skiplist = NULL; +}; + +/* + * Enables XATTR handling in runtime, could be disabled with disable_xattr + * when XATTR is not configured then cannot change status + */ +void BXATTR::enable_xattr(){ +#ifdef HAVE_XATTR + xattr_ena = TRUE; +#endif +}; + +/* + * Disables XATTR handling in runtime, could be enabled with enable_xattr + * when XATTR is configured + */ +void BXATTR::disable_xattr(){ + xattr_ena = FALSE; +}; + +/* + * Copies a text into a content variable and sets a content_len respectively + * + * in: + * text - a standard null terminated string + * out: + * pointer to content variable to use externally + */ +POOLMEM * BXATTR::set_content(char *text){ + content_len = pm_strcpy(&content, text); + if (content_len > 0){ + /* count the nul terminated char */ + content_len++; + } + // Dmsg2(400, "BXATTR::set_content: %p %i\n", text, content_len); + return content; +}; + +/* + * Copies a data with length of len into a content variable + * + * in: + * data - data pointer to copy into content buffer + * out: + * pointer to content variable to use externally + */ +POOLMEM * BXATTR::set_content(char *data, int len){ + content_len = pm_memcpy(&content, data, len); + return content; +}; + +/* + * Check if we changed the device, + * if so setup a flags + * + * in: + * jcr - Job Control Record + * out: + * bRC_BXATTR_ok - change of device checked and finish successful + * bRC_BXATTR_error - encountered error + * bRC_BXATTR_skip - cannot verify device - no file found + * bRC_BXATTR_inval - invalid input data + */ +bRC_BXATTR BXATTR::check_dev (JCR *jcr){ + + int lst; + struct stat st; + + /* sanity check of input variables */ + if (jcr == NULL || jcr->last_fname == NULL){ + return bRC_BXATTR_inval; + } + + lst = lstat(jcr->last_fname, &st); + switch (lst){ + case -1: { + berrno be; + switch (errno){ + case ENOENT: + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("Unable to stat file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "Unable to stat file \"%s\": ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + break; + } + case 0: + break; + } + + check_dev(jcr, st.st_dev); + + return bRC_BXATTR_ok; +}; + +/* + * Check if we changed the device, if so setup a flags + * + * in: + * jcr - Job Control Record + * out: + * internal flags status set + */ +void BXATTR::check_dev (JCR *jcr, uint32_t dev){ + + /* sanity check of input variables */ + if (jcr == NULL || jcr->last_fname == NULL){ + return; + } + + if (current_dev != dev){ + flags = BXATTR_FLAG_NONE; + set_flag(BXATTR_FLAG_NATIVE); + current_dev = dev; + } +}; + +/* + * It sends a stream located in this->content to Storage Daemon, so the main Bacula + * backup loop is free from this. It sends a header followed by data. + * + * in: + * jcr - Job Control Record + * stream - a stream number to save + * out: + * bRC_BXATTR_inval - when supplied variables are incorrect + * bRC_BXATTR_fatal - when we can't send data to the SD + * bRC_BXATTR_ok - send finish without errors + */ +bRC_BXATTR BXATTR::send_xattr_stream(JCR *jcr, int stream){ + + BSOCK * sd; + POOLMEM * msgsave; +#ifdef FD_NO_SEND_TEST + return bRC_BXATTR_ok; +#endif + + /* sanity check of input variables */ + if (jcr == NULL || jcr->store_bsock == NULL){ + return bRC_BXATTR_inval; + } + if (content_len <= 0){ + return bRC_BXATTR_ok; + } + + sd = jcr->store_bsock; + /* send header */ + if (!sd->fsend("%ld %d 0", jcr->JobFiles, stream)){ + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); + return bRC_BXATTR_fatal; + } + + /* send the buffer to the storage daemon */ + Dmsg1(400, "Backing up XATTR: %i\n", content_len); +#if 0 + POOL_MEM tmp(PM_FNAME); + pm_memcpy(tmp, content, content_len); + Dmsg2(400, "Backing up XATTR: (%i) <%s>\n", strlen(tmp.addr()), tmp.c_str()); +#endif + msgsave = sd->msg; + sd->msg = content; + sd->msglen = content_len; + if (!sd->send()){ + sd->msg = msgsave; + sd->msglen = 0; + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); + return bRC_BXATTR_fatal; + } + + jcr->JobBytes += sd->msglen; + sd->msg = msgsave; + if (!sd->signal(BNET_EOD)){ + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); + return bRC_BXATTR_fatal; + } + Dmsg1(200, "XATTR of file: %s successfully backed up!\n", jcr->last_fname); + return bRC_BXATTR_ok; +}; + +/* + * The main public backup method for XATTR + * + * in: + * jcr - Job Control Record + * ff_pkt - file backup record + * out: + * bRC_BXATTR_fatal - when XATTR backup is not compiled in Bacula + * bRC_BXATTR_ok - backup finish without problems + * bRC_BXATTR_error - when you can't backup xattr data because some error + */ +bRC_BXATTR BXATTR::backup_xattr (JCR *jcr, FF_PKT *ff_pkt){ + +#if !defined(HAVE_XATTR) + Jmsg(jcr, M_FATAL, 0, "XATTR backup requested but not configured in Bacula.\n"); + return bRC_BXATTR_fatal; +#else + /* sanity check of input variables and verify if engine is enabled */ + if (xattr_ena && jcr != NULL && ff_pkt != NULL){ + /* xattr engine enabled, proceed */ + bRC_BXATTR rc; + + jcr->errmsg[0] = 0; + /* check if we have a plugin generated backup */ + if (ff_pkt->cmd_plugin){ + rc = backup_plugin_xattr(jcr, ff_pkt); + } else { + /* Check for xattrsupport flag */ + if (!(ff_pkt->flags & FO_XATTR && !ff_pkt->cmd_plugin)){ + return bRC_BXATTR_ok; + } + + check_dev(jcr, ff_pkt->statp.st_dev); + + if (flags & BXATTR_FLAG_NATIVE){ + Dmsg0(400, "make Native XATTR call\n"); + rc = os_backup_xattr(jcr, ff_pkt); + } else { + /* skip xattr backup */ + return bRC_BXATTR_ok; + } + + } + + if (rc == bRC_BXATTR_error){ + if (xattr_nr_errors < XATTR_MAX_ERROR_PRINT_PER_JOB){ + if (!jcr->errmsg[0]){ + Jmsg(jcr, M_WARNING, 0, "No OS XATTR configured.\n"); + } else { + Jmsg(jcr, M_WARNING, 0, "%s", jcr->errmsg); + } + inc_xattr_errors(); + } + return bRC_BXATTR_ok; + } + return rc; + } + return bRC_BXATTR_ok; +#endif +}; + +/* + * The main public restore method for XATTR + * + * in: + * jcr - Job Control Record + * stream - a backup stream type number to restore_acl + * data - a potinter to the data stream to restore + * length - a data stream length + * out: + * bRC_BXATTR_fatal - when XATTR restore is not compiled in Bacula + * bRC_BXATTR_ok - restore finish without problems + * bRC_BXATTR_error - when you can't restore a stream because some error + */ +bRC_BXATTR BXATTR::restore_xattr (JCR *jcr, int stream, char *data, uint32_t length){ + +#if !defined(HAVE_XATTR) + Jmsg(jcr, M_FATAL, 0, "XATTR retore requested but not configured in Bacula.\n"); + return bRC_BXATTR_fatal; +#else + /* sanity check of input variables and verify if engine is enabled */ + if (xattr_ena && jcr != NULL && data != NULL){ + /* xattr engine enabled, proceed */ + int a; + bRC_BXATTR rc; + + /* check_dev supported on real fs only */ + if (stream != STREAM_XACL_PLUGIN_XATTR){ + rc = check_dev(jcr); + + switch (rc){ + case bRC_BXATTR_skip: + return bRC_BXATTR_ok; + case bRC_BXATTR_ok: + break; + default: + return rc; + } + } + + /* copy a data into a content buffer */ + set_content(data, length); + + switch (stream){ + case STREAM_XACL_PLUGIN_XATTR: + return restore_plugin_xattr(jcr); + default: + if (flags & BXATTR_FLAG_NATIVE){ + for (a = 0; xattr_streams[a] > 0; a++){ + if (xattr_streams[a] == stream){ + Dmsg0(400, "make Native XATTR call\n"); + return os_restore_xattr(jcr, stream, content, content_len); + } + } + } else { + inc_xattr_errors(); + return bRC_BXATTR_ok; + } + } + /* cannot find a valid stream to support */ + Qmsg2(jcr, M_WARNING, 0, _("Can't restore Extended Attributes of %s - incompatible xattr stream encountered - %d\n"), jcr->last_fname, stream); + return bRC_BXATTR_error; + } + return bRC_BXATTR_ok; +#endif +}; + +/* + * Checks if supplied xattr attribute name is indicated on OS specific lists + * + * in: + * jcr - Job Control Record + * ff_pkt - file to backup control package + * name - a name of the attribute to check + * out: + * TRUE - the attribute name is found on OS specific skip lists and should be skipped during backup + * FALSE - the attribute should be saved on backup stream + */ +bool BXATTR::check_xattr_skiplists (JCR *jcr, FF_PKT *ff_pkt, char * name){ + + bool skip = FALSE; + int count; + + /* sanity check of input variables */ + if (jcr == NULL || ff_pkt == NULL || name == NULL){ + return false; + } + + /* + * On some OSes you also get the acls in the extented attribute list. + * So we check if we are already backing up acls and if we do we + * don't store the extended attribute with the same info. + */ + if (ff_pkt->flags & FO_ACL){ + for (count = 0; xattr_acl_skiplist[count] != NULL; count++){ + if (bstrcmp(name, xattr_acl_skiplist[count])){ + skip = true; + break; + } + } + } + /* on some OSes we want to skip certain xattrs which are in the xattr_skiplist array. */ + if (!skip){ + for (count = 0; xattr_skiplist[count] != NULL; count++){ + if (bstrcmp(name, xattr_skiplist[count])){ + skip = true; + break; + } + } + } + + return skip; +}; + + +/* + * Performs generic XATTR backup using OS specific methods for + * getting xattr data from files - os_get_xattr_names and os_get_xattr_value + * + * in: + * jcr - Job Control Record + * ff_pkt - file to backup control package + * out: + * bRC_BXATTR_ok - xattr backup ok or no xattr to backup found + * bRC_BXATTR_error/fatal - an error or fatal error occurred + * bRC_BXATTR_inval - input variables was invalid + */ +bRC_BXATTR BXATTR::generic_backup_xattr (JCR *jcr, FF_PKT *ff_pkt){ + + bRC_BXATTR rc; + POOLMEM *xlist; + uint32_t xlen; + char *name; + uint32_t name_len; + POOLMEM *value; + uint32_t value_len; + bool skip; + alist *xattr_list = NULL; + int xattr_count = 0; + uint32_t len = 0; + BXATTR_xattr *xattr; + + /* sanity check of input variables */ + if (jcr == NULL || ff_pkt == NULL){ + return bRC_BXATTR_inval; + } + + /* xlist is allocated as POOLMEM by os_get_xattr_names */ + rc = os_get_xattr_names(jcr, &xlist, &xlen); + switch (rc){ + case bRC_BXATTR_ok: + /* it's ok, so go further */ + break; + case bRC_BXATTR_skip: + case bRC_BXATTR_cont: + /* no xattr available, so skip rest of it */ + return bRC_BXATTR_ok; + default: + return rc; + } + + /* follow the list of xattr names and get the values + * TODO: change a standard NULL-terminated list of names into alist of structures */ + for (name = xlist; (name - xlist) + 1 < xlen; name = strchr(name, '\0') + 1){ + + name_len = strlen(name); + skip = check_xattr_skiplists(jcr, ff_pkt, name); + if (skip || name_len == 0){ + Dmsg1(100, "Skipping xattr named \"%s\"\n", name); + continue; + } + + /* value is allocated as POOLMEM by os_get_xattr_value */ + rc = os_get_xattr_value(jcr, name, &value, &value_len); + switch (rc){ + case bRC_BXATTR_ok: + /* it's ok, so go further */ + break; + case bRC_BXATTR_skip: + /* no xattr available, so skip rest of it */ + free_pool_memory(xlist); + return bRC_BXATTR_ok; + default: + /* error / fatal */ + free_pool_memory(xlist); + return rc; + } + + /* + * we have a name of the extended attribute in the name variable + * and value of the extended attribute in the value variable + * so we need to build a list + */ + xattr = (BXATTR_xattr*)malloc(sizeof(BXATTR_xattr)); + xattr->name_len = name_len; + xattr->name = name; + xattr->value_len = value_len; + xattr->value = value; + /* magic name_len name value_len value */ + len += sizeof(uint32_t) + sizeof(uint32_t) + name_len + sizeof(uint32_t) + value_len; + + if (xattr_list == NULL){ + xattr_list = New(alist(10, not_owned_by_alist)); + } + xattr_list->append(xattr); + xattr_count++; + } + if (xattr_count > 0){ + /* serialize the stream */ + rc = serialize_xattr_stream(jcr, len, xattr_list); + if (rc != bRC_BXATTR_ok){ + Mmsg(jcr->errmsg, _("Failed to serialize extended attributes on file \"%s\"\n"), jcr->last_fname); + Dmsg1(100, "Failed to serialize extended attributes on file \"%s\"\n", jcr->last_fname); + goto bailout; + } else { + /* send data to SD */ + rc = send_xattr_stream(jcr, xattr_streams[0]); + } + } else { + rc = bRC_BXATTR_ok; + } + +bailout: + /* free allocated data */ + if (xattr_list != NULL){ + foreach_alist(xattr, xattr_list){ + if (xattr == NULL){ + break; + } + if (xattr->value){ + free_pool_memory(xattr->value); + } + free(xattr); + } + delete xattr_list; + } + if (xlist != NULL){ + free_pool_memory(xlist); + } + + return rc; +}; + +/* + * Performs a generic XATTR restore using OS specific methods for + * setting XATTR data on file. + * + * in: + * jcr - Job Control Record + * stream - a stream number to restore + * out: + * bRC_BXATTR_ok - restore of acl's was successful + * bRC_BXATTR_error - was an error during xattr restore + * bRC_BXATTR_fatal - was a fatal error during xattr restore + * bRC_BXATTR_inval - input variables was invalid + */ +bRC_BXATTR BXATTR::generic_restore_xattr (JCR *jcr, int stream){ + + bRC_BXATTR rc = bRC_BXATTR_ok; + alist *xattr_list; + BXATTR_xattr *xattr; + + /* sanity check of input variables */ + if (jcr == NULL){ + return bRC_BXATTR_inval; + } + + /* empty list */ + xattr_list = New(alist(10, not_owned_by_alist)); + + /* unserialize data */ + unserialize_xattr_stream(jcr, content, content_len, xattr_list); + + /* follow the list to set all attributes */ + foreach_alist(xattr, xattr_list){ + rc = os_set_xattr(jcr, xattr); + if (rc != bRC_BXATTR_ok){ + Dmsg2(100, "Failed to set extended attribute %s on file \"%s\"\n", xattr->name, jcr->last_fname); + goto bailout; + } + } + +bailout: + /* free allocated data */ + if (xattr_list != NULL){ + foreach_alist(xattr, xattr_list){ + if (xattr == NULL){ + break; + } + if (xattr->name){ + free(xattr->name); + } + if (xattr->value){ + free(xattr->value); + } + free(xattr); + } + delete xattr_list; + } + return rc; +}; + +/* + * Perform a generic XATTR backup using a plugin. It calls the plugin API to + * get required xattr data from plugin. + * + * in: + * jcr - Job Control Record + * ff_pkt - file to backup control package + * out: + * bRC_BXATTR_ok - backup of xattrs was successful + * bRC_BXATTR_fatal - was an error during xattr backup + */ +bRC_BXATTR BXATTR::backup_plugin_xattr (JCR *jcr, FF_PKT *ff_pkt) +{ + int status; + char *data; + + /* sanity check of input variables */ + if (jcr == NULL || ff_pkt == NULL){ + return bRC_BXATTR_inval; + } + + while ((status = plugin_backup_xattr(jcr, ff_pkt, &data)) > 0){ + /* data is a plugin buffer which contains data to backup + * and status is a length of the buffer when > 0 */ + set_content(data, status); + if (send_xattr_stream(jcr, STREAM_XACL_PLUGIN_XATTR) == bRC_BXATTR_fatal){ + return bRC_BXATTR_fatal; + } + } + if (status < 0){ + /* error */ + return bRC_BXATTR_error; + } + + return bRC_BXATTR_ok; +}; + +/* + * Perform a generic XATTR restore using a plugin. It calls the plugin API to + * send acl data to plugin. + * + * in: + * jcr - Job Control Record + * stream - a stream number to restore + * out: + * bRC_BXATTR_ok - restore of xattrs was successful + * bRC_BXATTR_error - was an error during xattrs restore + * bRC_BXATTR_fatal - was a fatal error during xattrs restore or input data + * is invalid + */ +bRC_BXATTR BXATTR::restore_plugin_xattr (JCR *jcr) +{ + /* sanity check of input variables */ + if (jcr == NULL){ + return bRC_BXATTR_inval; + } + + if (!plugin_restore_xattr(jcr, content, content_len)){ + /* error */ + return bRC_BXATTR_error; + } + + return bRC_BXATTR_ok; +} + +/* + * Initialize a variable xattr_streams for a specified OS. + * The rutine should be called from object instance constructor + * + * in: + * pxattr - xattr streams supported for specific OS + */ +void BXATTR::set_xattr_streams (const int *pxattr){ + + xattr_streams = pxattr; +}; + +/* + * Initialize variables xattr_skiplist and xattr_acl_skiplist for a specified OS. + * The rutine should be called from object instance constructor + * + * in: + * pxattr - xattr skip list for specific OS + * pxattr_acl - xattr acl names skip list for specific OS + */ +void BXATTR::set_xattr_skiplists (const char **pxattr, const char **pxattr_acl){ + + xattr_skiplist = pxattr; + xattr_acl_skiplist = pxattr_acl; +}; + +/* + * Serialize the XATTR stream which will be saved into archive. Serialization elements cames from + * a list and for backward compatibility we produce the same stream as prievous Bacula versions. + * + * serialized stream consists of the following elements: + * magic - A magic string which makes it easy to detect any binary incompatabilites + * required for backward compatibility + * name_len - The length of the following xattr name + * name - The name of the extended attribute + * value_len - The length of the following xattr data + * value - The actual content of the extended attribute only if value_len is greater then zero + * + * in: + * jcr - Job Control Record + * len - expected serialize length + * list - a list of xattr elements to serialize + * out: + * bRC_BXATTR_ok - when serialization was perfect + * bRC_BXATTR_inval - when we have invalid variables + * bRC_BXATTR_error - illegal attribute name + */ +bRC_BXATTR BXATTR::serialize_xattr_stream(JCR *jcr, uint32_t len, alist *list){ + + ser_declare; + BXATTR_xattr *xattr; + + /* sanity check of input variables */ + if (jcr == NULL || list == NULL){ + return bRC_BXATTR_inval; + } + + /* we serialize data direct to content buffer, so check if data fits */ + content = check_pool_memory_size(content, len + 20); + ser_begin(content, len + 20); + + foreach_alist(xattr, list){ + if (xattr == NULL){ + break; + } + /* + * serialize data + * + * we have to start with the XATTR_MAGIC for backward compatibility (the magic is silly) + */ + ser_uint32(XATTR_MAGIC); + /* attribute name length and name itself */ + if (xattr->name_len > 0 && xattr->name){ + ser_uint32(xattr->name_len); + ser_bytes(xattr->name, xattr->name_len); + } else { + /* error - name cannot be empty */ + Mmsg0(jcr->errmsg, _("Illegal empty xattr attribute name\n")); + Dmsg0(100, "Illegal empty xattr attribute name\n"); + return bRC_BXATTR_error; + } + /* attibute value length and value itself */ + ser_uint32(xattr->value_len); + if (xattr->value_len > 0 && xattr->value){ + ser_bytes(xattr->value, xattr->value_len); + Dmsg3(100, "Backup xattr named %s, value %*.s\n", xattr->name, xattr->value_len, xattr->value); + } else { + Dmsg1(100, "Backup empty xattr named %s\n", xattr->name); + } + } + + ser_end(content, len + 20); + content_len = ser_length(content); + + return bRC_BXATTR_ok; +}; + +/* + * Unserialize XATTR stream on *content and produce a xattr *list which contain + * key => value pairs + * + * in: + * jcr - Job Control Record + * content - a stream content to unserialize + * length - a content length + * list - a pointer to the xattr list to populate + * out: + * bRC_BXATTR_ok - when unserialize was perfect + * bRC_BXATTR_inval - when we have invalid variables + * list - key/value pairs populated xattr list + */ +bRC_BXATTR BXATTR::unserialize_xattr_stream(JCR *jcr, char *content, uint32_t length, alist *list){ + + unser_declare; + uint32_t magic; + BXATTR_xattr *xattr; + + /* sanity check of input variables */ + if (jcr == NULL || content == NULL || list == NULL){ + return bRC_BXATTR_inval; + } + + unser_begin(content, length); + while (unser_length(content) < length){ + /* + * Sanity check of correct stream magic number + * Someone was too paranoid to implement this kind of verification in original Bacula code + * Unfortunate for backward compatibility we have to follow this insane implementation + * + * XXX: design a new xattr stream format + */ + unser_uint32(magic); + if (magic != XATTR_MAGIC){ + Mmsg(jcr->errmsg, _("Illegal xattr stream, no XATTR_MAGIC on file \"%s\"\n"), jcr->last_fname); + Dmsg1(100, "Illegal xattr stream, no XATTR_MAGIC on file \"%s\"\n", jcr->last_fname); + return bRC_BXATTR_error; + } + /* first attribute name length */ + xattr = (BXATTR_xattr *)malloc(sizeof(BXATTR_xattr)); + unser_uint32(xattr->name_len); + if (xattr->name_len == 0){ + /* attribute name cannot be empty */ + Mmsg(jcr->errmsg, _("Illegal xattr stream, xattr name length <= 0 on file \"%s\"\n"), jcr->last_fname); + Dmsg1(100, "Illegal xattr stream, xattr name length <= 0 on file \"%s\"\n", jcr->last_fname); + free(xattr); + return bRC_BXATTR_error; + } + /* followed by attribute name itself */ + xattr->name = (char *)malloc(xattr->name_len + 1); + unser_bytes(xattr->name, xattr->name_len); + xattr->name[xattr->name_len] = '\0'; + /* attribute value */ + unser_uint32(xattr->value_len); + if (xattr->value_len > 0){ + /* we have a value */ + xattr->value = (char *)malloc(xattr->value_len + 1); + unser_bytes(xattr->value, xattr->value_len); + xattr->value[xattr->value_len] = '\0'; + Dmsg3(100, "Restoring xattr named %s, value %.*s\n", xattr->name, xattr->value_len, xattr->value); + } else { + /* value is empty */ + xattr->value = NULL; + Dmsg1(100, "Restoring empty xattr named %s\n", xattr->name); + } + list->append(xattr); + } + unser_end(content, length); + + return bRC_BXATTR_ok; +}; + +#include "bxattr_osx.h" +#include "bxattr_linux.h" +#include "bxattr_freebsd.h" +#include "bxattr_solaris.h" +// #include "bxattr_aix.h" + +/* + * Creating the current instance of the BXATTR for a supported OS + */ +void *new_bxattr() +{ +#if defined(HAVE_DARWIN_OS) + return new BXATTR_OSX(); +#elif defined(HAVE_LINUX_OS) + return new BXATTR_Linux(); +#elif defined(HAVE_FREEBSD_OS) + return new BXATTR_FreeBSD(); +#elif defined(HAVE_HURD_OS) + return new BXATTR_Hurd(); +#elif defined(HAVE_AIX_OS) + return new BXATTR_AIX(); +#elif defined(HAVE_IRIX_OS) + return new BXATTR_IRIX(); +#elif defined(HAVE_OSF1_OS) + return new BXATTR_OSF1(); +#elif defined(HAVE_SUN_OS) + return new BXATTR_Solaris(); +#else + return NULL; +#endif +}; + +#endif /* HAVE_XATTR */ diff --git a/src/filed/bxattr.h b/src/filed/bxattr.h new file mode 100644 index 00000000..26e11d2b --- /dev/null +++ b/src/filed/bxattr.h @@ -0,0 +1,231 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BXATTR_H_ +#define __BXATTR_H_ + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +/* + * Magic used in the magic field of the xattr struct. + * This way we can see if we encounter a valid xattr struct. + * Used for backward compatibility only. + */ +#define XATTR_MAGIC 0x5C5884 + +/* + * Return value status enumeration + * You have an error when value is less then zero. + * You have a positive status when value is not negative + * (greater or equal to zero). + */ +enum bRC_BXATTR { + bRC_BXATTR_inval = -3, // input data invalid + bRC_BXATTR_fatal = -2, // a fatal error + bRC_BXATTR_error = -1, // standard error + bRC_BXATTR_ok = 0, // success + bRC_BXATTR_skip = 1, // processing should skip current runtime + bRC_BXATTR_cont = 2 // processing should skip current element + // and continue with next one +}; + +/* + * Flags which control what XATTR engine + * to use for backup/restore + */ +#define BXATTR_FLAG_NONE 0 +#define BXATTR_FLAG_NATIVE 0x01 +#define BXATTR_FLAG_AFS 0x02 +#define BXATTR_FLAG_PLUGIN 0x04 + +/* + * Extended attribute (xattr) list element. + * + * Every xattr consist of a Key=>Value pair where + * both could be a binary data. + */ +struct BXATTR_xattr { + uint32_t name_len; + char *name; + uint32_t value_len; + char *value; +}; + +/* + * Basic XATTR class which is a foundation for any other OS specific implementation. + * + * This class cannot be used directly as it is an abstraction class with a lot of virtual + * methods laying around. As a basic class it has all public API available for backup and + * restore functionality. As a bonus it handles all XATTR generic functions and OS + * independent API, i.e. for AFS XATTR or Plugins XATTR (future functionality). + */ +class BXATTR { +private: + bool xattr_ena; + uint32_t flags; + uint32_t current_dev; + POOLMEM *content; + uint32_t content_len; + uint32_t xattr_nr_errors; + const int *xattr_streams; + const char **xattr_skiplist; + const char **xattr_acl_skiplist; + + void init(); + + /** + * Perform OS specific XATTR backup. + * + * in: + * jcr - Job Control Record + * ff_pkt - file to backup control package + * out: + * bRC_BXATTR_ok - xattr backup ok or no xattr to backup found + * bRC_BXATTR_error/fatal - an error or fatal error occurred + */ + virtual bRC_BXATTR os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt){return bRC_BXATTR_fatal;}; + + /** + * Perform OS specific XATTR restore. Runtime is called only when stream is supported by OS. + * + * in: + * jcr - Job Control Record + * stream - backup stream number + * content - a buffer with data to restore + * length - a data restore length + * out: + * bRC_BXATTR_ok - xattr backup ok or no xattr to backup found + * bRC_BXATTR_error/fatal - an error or fatal error occurred + */ + virtual bRC_BXATTR os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length){return bRC_BXATTR_fatal;}; + + /** + * Returns a list of xattr names in newly allocated pool memory and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. The list of xattr names is returned as an unordered array of NULL terminated + * character strings (attribute names are separated by NULL characters), like this: + * user.name1\0system.name1\0user.name2\0 + * The format of the list is based on standard "llistxattr" function call. + * TODO: change the format of the list from an array of NULL terminated strings into an alist of structures. + * + * in: + * jcr - Job Control Record + * xlen - non NULL pointer to the uint32_t variable for storing a length of the xattr names list + * pxlist - non NULL pointer to the char* variable for allocating a memoty data for xattr names list + * out: + * bRC_BXATTR_ok - we've got a xattr data to backup + * bRC_BXATTR_skip - no xattr data available, no fatal error, skip rest of the runtime + * bRC_BXATTR_fatal - when required buffers are unallocated + * bRC_BXATTR_error - in case of any error + */ + virtual bRC_BXATTR os_get_xattr_names (JCR *jcr, POOLMEM ** pxlist, uint32_t * xlen){return bRC_BXATTR_fatal;}; + + /** + * Returns a value of the requested attribute name and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. + * + * in: + * jcr - Job Control Record + * name - a name of the extended attribute + * pvalue - the pointer for the buffer with value - it is allocated by function and should be freed when no needed + * plen - the pointer for the length of the allocated buffer + * + * out: + * pxlist - the atributes list + * bRC_BXATTR_ok - we've got a xattr data which could be empty when xlen=0 + * bRC_BXATTR_skip - no xattr data available, no fatal error, skip rest of the runtime + * bRC_BXATTR_error - error getting an attribute + * bRC_BXATTR_fatal - required buffers are unallocated + */ + virtual bRC_BXATTR os_get_xattr_value (JCR *jcr, char * name, char ** pvalue, uint32_t * plen){return bRC_BXATTR_fatal;}; + + /** + * Low level OS specific runtime to set extended attribute on file + * + * in: + * jcr - Job Control Record + * xattr - the struct with attribute/value to set + * + * out: + * bRC_BXATTR_ok - setting the attribute was ok + * bRC_BXATTR_error - error during extattribute set + * bRC_BXATTR_fatal - required buffers are unallocated + */ + virtual bRC_BXATTR os_set_xattr (JCR *jcr, BXATTR_xattr *xattr){return bRC_BXATTR_fatal;}; + + void inc_xattr_errors(){ xattr_nr_errors++;}; + bRC_BXATTR check_dev (JCR *jcr); + void check_dev (JCR *jcr, uint32_t dev); + +public: + BXATTR (); + virtual ~BXATTR(); + + /* enable/disable functionality */ + void enable_xattr(); + void disable_xattr(); + + /* + * public methods used outside the class or derivatives + */ + bRC_BXATTR backup_xattr (JCR *jcr, FF_PKT *ff_pkt); + bRC_BXATTR restore_xattr (JCR *jcr, int stream, char *content, uint32_t content_length); + + /* utility functions */ + inline uint32_t get_xattr_nr_errors(){ return xattr_nr_errors;}; + void set_xattr_streams (const int *pxattr); + void set_xattr_skiplists (const char **pxattr, const char **pxattr_acl); + inline void clear_flag (uint32_t flag){ flags &= ~flag;}; + inline void set_flag (uint32_t flag){ flags |= flag;}; + POOLMEM * set_content (char *text); + POOLMEM * set_content(char *data, int len); + inline POOLMEM * get_content (void){ return content;}; + inline uint32_t get_content_size (void){ return sizeof_pool_memory(content);}; + inline uint32_t get_content_len (void){ return content_len;}; + bool check_xattr_skiplists (JCR *jcr, FF_PKT *ff_pkt, char * name); + + /* sending data to the storage */ + bRC_BXATTR send_xattr_stream (JCR *jcr, int stream); + + /* serialize / unserialize stream */ + bRC_BXATTR unserialize_xattr_stream(JCR *jcr, char *content, uint32_t length, alist *list); + bRC_BXATTR serialize_xattr_stream(JCR *jcr, uint32_t len, alist *list); + + /* generic functions */ + bRC_BXATTR generic_backup_xattr (JCR *jcr, FF_PKT *ff_pkt); + bRC_BXATTR generic_restore_xattr (JCR *jcr, int stream); + bRC_BXATTR backup_plugin_xattr (JCR *jcr, FF_PKT *ff_pkt); + bRC_BXATTR restore_plugin_xattr (JCR *jcr); +}; + +void *new_bxattr(); + +#endif /* HAVE_XATTR */ + +#endif /* __BXATTR_H_ */ diff --git a/src/filed/bxattr_freebsd.c b/src/filed/bxattr_freebsd.c new file mode 100644 index 00000000..ebb0c59d --- /dev/null +++ b/src/filed/bxattr_freebsd.c @@ -0,0 +1,465 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#include "bacula.h" +#include "filed.h" +#include "bxattr_freebsd.h" + +#if defined(HAVE_FREEBSD_OS) + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +/* + * Define the supported XATTR streams for this OS + */ +static const int os_xattr_streams[] = { + STREAM_XACL_FREEBSD_XATTR, + 0 +}; + +static const int os_xattr_namespaces[] = { + EXTATTR_NAMESPACE_USER, + EXTATTR_NAMESPACE_SYSTEM, + -1 +}; + +static const char *os_xattr_acl_skiplist[] = { + "system.posix1e.acl_access", + "system.posix1e.acl_default", + "system.nfs4.acl", + NULL +}; + +static const char *os_xattr_skiplist[] = { + NULL +}; + +/* + * OS specific constructor + */ +BXATTR_FreeBSD::BXATTR_FreeBSD() +{ + set_xattr_streams(os_xattr_streams); + set_xattr_skiplists(os_xattr_skiplist, os_xattr_acl_skiplist); +}; + +/* + * Perform OS specific extended attribute backup + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_FreeBSD::os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt){ + + bRC_BXATTR rc; + POOLMEM *xlist; + uint32_t xlen; + char *name; + uint32_t name_len; + POOLMEM *value; + uint32_t value_len; + POOLMEM *name_gen; + uint32_t name_gen_len; + char * namespace_str; + int namespace_len; + bool skip; + alist *xattr_list = NULL; + int xattr_count = 0; + uint32_t len = 0; + BXATTR_xattr *xattr; + int a; + + for (a = 0; os_xattr_namespaces[a] != -1; a++){ // loop through all available namespaces + /* xlist is allocated as POOLMEM by os_get_xattr_names_local */ + rc = os_get_xattr_names_local(jcr, os_xattr_namespaces[a], &xlist, &xlen); + switch (rc){ + case bRC_BXATTR_ok: + /* it's ok, so go further */ + break; + case bRC_BXATTR_skip: + case bRC_BXATTR_cont: + /* no xattr available, so skip rest of it */ + return bRC_BXATTR_ok; + default: + return rc; + } + + /* get a string representation of the namespace */ + if (extattr_namespace_to_string(os_xattr_namespaces[a], &namespace_str) != 0){ + Mmsg2(jcr->errmsg, _("Failed to convert %d into namespace on file \"%s\"\n"), os_xattr_namespaces[a], jcr->last_fname); + Dmsg2(100, "Failed to convert %d into namespace on file \"%s\"\n", os_xattr_namespaces[a], jcr->last_fname); + goto bail_out; + } + namespace_len = strlen(namespace_str); + + /* follow the list of xattr names and get the values */ + for (name = xlist; (name - xlist) + 1 < xlen; name = strchr(name, '\0') + 1){ + name_len = strlen(name); + name_gen = get_pool_memory(PM_FNAME); + name_gen = check_pool_memory_size(name_gen, name_len + namespace_len + 2); + bsnprintf(name_gen, name_len + namespace_len + 2, "%s.%s", namespace_str, name); + name_gen_len = strlen(name_gen); + + skip = check_xattr_skiplists(jcr, ff_pkt, name_gen); + if (skip || name_len == 0){ + Dmsg1(100, "Skipping xattr named %s\n", name_gen); + continue; + } + + /* value is allocated as POOLMEM by os_get_xattr_value_local */ + rc = os_get_xattr_value_local(jcr, os_xattr_namespaces[a], name, &value, &value_len); + switch (rc){ + case bRC_BXATTR_ok: + /* it's ok, so go further */ + break; + case bRC_BXATTR_skip: + /* no xattr available, so skip rest of it */ + rc = bRC_BXATTR_ok; + goto bail_out; + default: + /* error / fatal */ + goto bail_out; + } + + /* + * we have a name of the extended attribute in the name variable + * and value of the extended attribute in the value variable + * so we need to build a list + */ + xattr = (BXATTR_xattr*)malloc(sizeof(BXATTR_xattr)); + xattr->name_len = name_gen_len; + xattr->name = name_gen; + xattr->value_len = value_len; + xattr->value = value; + /* magic name_len name value_len value */ + len += sizeof(uint32_t) + sizeof(uint32_t) + name_gen_len + sizeof(uint32_t) + value_len; + + if (xattr_list == NULL){ + xattr_list = New(alist(10, not_owned_by_alist)); + } + xattr_list->append(xattr); + xattr_count++; + } + if (xattr_count > 0){ + /* serialize the stream */ + rc = serialize_xattr_stream(jcr, len, xattr_list); + if (rc != bRC_BXATTR_ok){ + Mmsg(jcr->errmsg, _("Failed to serialize extended attributes on file \"%s\"\n"), jcr->last_fname); + Dmsg1(100, "Failed to serialize extended attributes on file \"%s\"\n", jcr->last_fname); + goto bail_out; + } else { + /* send data to SD */ + rc = send_xattr_stream(jcr, STREAM_XACL_FREEBSD_XATTR); + } + } else { + rc = bRC_BXATTR_ok; + } + } +bail_out: + /* free allocated data */ + if (xattr_list != NULL){ + foreach_alist(xattr, xattr_list){ + if (xattr == NULL){ + break; + } + if (xattr->name){ + free_pool_memory(name_gen); + } + if (xattr->value){ + free(xattr->value); + } + free(xattr); + } + delete xattr_list; + } + if (xlist != NULL){ + free(xlist); + } + + return rc; +}; + +/* + * Perform OS specific XATTR restore. Runtime is called only when stream is supported by OS. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_FreeBSD::os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length){ + return generic_restore_xattr(jcr, stream); +}; + +/* + * Return a list of xattr names in newly allocated pool memory and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. + * + * in/out - check API at bxattr.h + * + * As a FreeBSD uses a different attributes name schema/format then this method is a very different + * from a standard generic method because it uses a namespace (ns) value for os dependent optimization. + */ +bRC_BXATTR BXATTR_FreeBSD::os_get_xattr_names_local (JCR *jcr, int ns, POOLMEM ** pxlist, uint32_t * xlen){ + + int len; + POOLMEM * list; + int a; + int stra; + POOLMEM * genlist; + + /* check input data */ + if (jcr == NULL || xlen == NULL || pxlist == NULL){ + return bRC_BXATTR_inval; + } + /* get the length of the extended attributes */ + len = extattr_list_link(jcr->last_fname, ns, NULL, 0); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it */ + return bRC_BXATTR_skip; + case EOPNOTSUPP: + /* no xattr supported on filesystem, clear a flag and skip it */ + clear_flag(BXATTR_FLAG_NATIVE); + set_content(NULL); + return bRC_BXATTR_skip; + case EPERM: + if (ns == EXTATTR_NAMESPACE_SYSTEM){ + return bRC_BXATTR_cont; + } /* else show error */ + default: + Mmsg2(jcr->errmsg, _("extattr_list_link error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "extattr_list_link error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + break; + } + case 0: + /* xattr available but empty, skip it */ + return bRC_BXATTR_skip; + default: + break; + } + + /* + * allocate memory for the extented attribute list + * default size is a 4k for PM_BSOCK, which should be sufficient on almost all + * Linux system where xattrs a limited in size to single filesystem block ~4kB + * so we need to check required size + */ + list = get_pool_memory(PM_BSOCK); + list = check_pool_memory_size(list, len + 1); + memset(list, 0, len + 1); + + /* get the list of extended attributes names for a file */ + len = extattr_list_link(jcr->last_fname, ns, list, len); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it, first release allocated memory */ + free_pool_memory(list); + return bRC_BXATTR_skip; + case EPERM: + if (ns == EXTATTR_NAMESPACE_SYSTEM){ + return bRC_BXATTR_cont; + } /* else show error */ + default: + Mmsg2(jcr->errmsg, _("extattr_list_link error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "extattr_list_link error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + free_pool_memory(list); + return bRC_BXATTR_error; + } + break; + } + default: + break; + } + /* convert FreeBSD list type to the generic one */ + genlist = get_pool_memory(PM_BSOCK); + genlist = check_pool_memory_size(genlist, len + 1); + memset(genlist, 0, len + 1); + for (a = 0; a < len; a += list[a] + 1){ + stra = list[a]; + memcpy(genlist + a, list + a + 1, stra); + genlist[a + stra] = '\0'; + } + free_pool_memory(list); + /* setup return data */ + *pxlist = genlist; + *xlen = len; + return bRC_BXATTR_ok; +}; + +/* + * Return a value of the requested attribute name and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. + * + * in/out - check API at bxattr.h + * + * As a FreeBSD uses a different attributes name schema/format then this method is a very different + * from a standard generic method because it uses a namespace (ns) value for os dependent optimization. + */ +bRC_BXATTR BXATTR_FreeBSD::os_get_xattr_value_local (JCR *jcr, int ns, char * name, char ** pvalue, uint32_t * plen){ + + int len; + POOLMEM * value; + + /* check input data */ + if (jcr == NULL || name == NULL || plen == NULL || pvalue == NULL){ + return bRC_BXATTR_inval; + } + /* get the length of the value for extended attribute */ + len = extattr_get_link(jcr->last_fname, ns, name, NULL, 0); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it */ + return bRC_BXATTR_skip; + default: + /* XXX: what about ENOATTR error value? */ + Mmsg2(jcr->errmsg, _("extattr_get_link error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "extattr_get_link error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + break; + } + default: + break; + } + + if (len > 0){ + /* + * allocate memory for the extented attribute value + * default size is a 256B for PM_MESSAGE, so we need to check required size + */ + value = get_pool_memory(PM_MESSAGE); + value = check_pool_memory_size(value, len + 1); + memset(value, 0, len + 1); + /* value is not empty, get a data */ + len = extattr_get_link(jcr->last_fname, ns, name, value, len); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it, first release allocated memory */ + free_pool_memory(value); + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("extattr_get_link error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "extattr_get_link error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + free_pool_memory(value); + return bRC_BXATTR_error; + } + break; + } + default: + break; + } + /* ensure a value is nul terminated */ + value[len] = '\0'; + } else { + /* empty value */ + value = NULL; + len = 0; + } + /* setup return data */ + *pvalue = value; + *plen = len; + return bRC_BXATTR_ok; +}; + +/* + * Low level OS specific runtime to set extended attribute on file + * + * in/out - check API at bxattr.h + * + * xattr->name should be in '.' format which + * function handle without problem, otherwise it returns an error + * TODO: it is possible to handle a different attributes name format + * for OS portability where default namespace 'user' can be used + */ +bRC_BXATTR BXATTR_FreeBSD::os_set_xattr (JCR *jcr, BXATTR_xattr *xattr){ + + char * name; + char * nspace; + int ns; + int rc; + + /* check input data */ + if (jcr == NULL || xattr == NULL){ + return bRC_BXATTR_inval; + } + + /* search for attribute namespace which is distinguished from attribute name by a dot '.' character */ + if ((name = strchr(xattr->name, '.')) == (char *)NULL){ + Mmsg2(jcr->errmsg, _("Failed to split %s into namespace and name part on file \"%s\"\n"), xattr->name, jcr->last_fname); + Dmsg2(100, "Failed to split %s into namespace and name part on file \"%s\"\n", xattr->name, jcr->last_fname); + return bRC_BXATTR_error; + } + + /* split namespace and name of the attribute */ + nspace = xattr->name; + *name++ = '\0'; + + /* check if namespace is valid on this system */ + if (extattr_string_to_namespace(nspace, &ns) != 0){ + Mmsg2(jcr->errmsg, _("Failed to convert %s into namespace on file \"%s\"\n"), nspace, jcr->last_fname); + Dmsg2(100, "Failed to convert %s into namespace on file \"%s\"\n", nspace, jcr->last_fname); + return bRC_BXATTR_error; + } + + /* set extattr on file */ + rc = extattr_set_link(jcr->last_fname, ns, name, xattr->value, xattr->value_len); + if (rc < 0 || rc != (int)xattr->value_len){ + berrno be; + + switch (errno){ + case ENOENT: + break; + default: + Mmsg2(jcr->errmsg, _("extattr_set_link error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "extattr_set_link error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + } + return bRC_BXATTR_ok; +}; + +#endif /* HAVE_XATTR */ + +#endif /* HAVE_FREEBSD_OS */ diff --git a/src/filed/bxattr_freebsd.h b/src/filed/bxattr_freebsd.h new file mode 100644 index 00000000..6c56865a --- /dev/null +++ b/src/filed/bxattr_freebsd.h @@ -0,0 +1,85 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BXATTR_FreeBSD_H_ +#define __BXATTR_FreeBSD_H_ + +#if defined(HAVE_FREEBSD_OS) +#include + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +#if (!defined(HAVE_EXTATTR_GET_LINK) && !defined(HAVE_EXTATTR_GET_FILE)) || \ + (!defined(HAVE_EXTATTR_SET_LINK) && !defined(HAVE_EXTATTR_SET_FILE)) || \ + (!defined(HAVE_EXTATTR_LIST_LINK) && !defined(HAVE_EXTATTR_LIST_FILE)) || \ + !defined(HAVE_EXTATTR_NAMESPACE_TO_STRING) || \ + !defined(HAVE_EXTATTR_STRING_TO_NAMESPACE) +#error "Missing full support for the extattr functions." +#endif + +#ifdef HAVE_SYS_EXTATTR_H +#include +#include +#else +#error "Missing sys/extattr.h header file" +#endif + +#ifdef HAVE_LIBUTIL_H +#include +#endif + +#if !defined(HAVE_EXTATTR_GET_LINK) && defined(HAVE_EXTATTR_GET_FILE) +#define extattr_get_link extattr_get_file +#endif +#if !defined(HAVE_EXTATTR_SET_LINK) && defined(HAVE_EXTATTR_SET_FILE) +#define extattr_set_link extattr_set_file +#endif +#if !defined(HAVE_EXTATTR_LIST_LINK) && defined(HAVE_EXTATTR_LIST_FILE) +#define extattr_list_link extattr_list_file +#endif + +/* + * + * + */ +class BXATTR_FreeBSD : public BXATTR { +private: + bRC_BXATTR os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt); + bRC_BXATTR os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length); + bRC_BXATTR os_get_xattr_names_local (JCR *jcr, const int ns, POOLMEM **list, uint32_t *length); + bRC_BXATTR os_get_xattr_value_local (JCR *jcr, const int ns, char * name, char ** pvalue, uint32_t * plen); + bRC_BXATTR os_set_xattr (JCR *jcr, BXATTR_xattr *xattr); +public: + BXATTR_FreeBSD (); +}; + +#endif /* HAVE_XATTR */ + +#endif /* HAVE_FREEBSD_OS */ + +#endif /* __BXATTR_FreeBSD_H_ */ diff --git a/src/filed/bxattr_linux.c b/src/filed/bxattr_linux.c new file mode 100644 index 00000000..6e896025 --- /dev/null +++ b/src/filed/bxattr_linux.c @@ -0,0 +1,290 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#include "bacula.h" +#include "filed.h" +#include "bxattr_linux.h" + +#if defined(HAVE_LINUX_OS) + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +/* + * Define the supported XATTR streams for this OS + */ +static const int os_xattr_streams[] = { + STREAM_XACL_LINUX_XATTR, + 0 +}; + +static const char *os_xattr_acl_skiplist[] = { + "system.posix_acl_access", + "system.posix_acl_default", + NULL +}; + +static const char *os_xattr_skiplist[] = { + NULL +}; + +/* + * OS specific constructor + */ +BXATTR_Linux::BXATTR_Linux(){ + set_xattr_streams(os_xattr_streams); + set_xattr_skiplists(os_xattr_skiplist, os_xattr_acl_skiplist); +}; + +/* + * Perform OS specific extended attribute backup + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_Linux::os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt){ + return generic_backup_xattr(jcr, ff_pkt); +}; + +/* + * Perform OS specific XATTR restore. Runtime is called only when stream is supported by OS. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_Linux::os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length){ + return generic_restore_xattr(jcr, stream); +}; + +/* + * Return a list of xattr names in newly allocated pool memory and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_Linux::os_get_xattr_names (JCR *jcr, POOLMEM ** pxlist, uint32_t * xlen){ + + int len; + POOLMEM * list; + + /* check input data */ + if (jcr == NULL || xlen == NULL || pxlist == NULL){ + return bRC_BXATTR_inval; + } + + /* get the length of the extended attributes */ + len = llistxattr(jcr->last_fname, NULL, 0); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it */ + return bRC_BXATTR_skip; + case EOPNOTSUPP: + /* no xattr supported on filesystem, clear a flag and skip it */ + clear_flag(BXATTR_FLAG_NATIVE); + set_content(NULL); + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("llistxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "llistxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + break; + } + case 0: + /* xattr available but empty, skip it */ + return bRC_BXATTR_skip; + default: + break; + } + + /* + * allocate memory for the extented attribute list + * default size is a 4k for PM_BSOCK, which should be sufficient on almost all + * Linux system where xattrs a limited in size to single filesystem block ~4kB + * so we need to check required size + */ + list = get_pool_memory(PM_BSOCK); + list = check_pool_memory_size(list, len + 1); + memset(list, 0, len + 1); + + /* get the list of extended attributes names for a file */ + len = llistxattr(jcr->last_fname, list, len); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it, first release allocated memory */ + free_pool_memory(list); + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("llistxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "llistxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + free_pool_memory(list); + return bRC_BXATTR_error; + } + break; + } + default: + break; + } + /* ensure a list is nul terminated */ + list[len] = '\0'; + /* setup return data */ + *pxlist = list; + *xlen = len; + return bRC_BXATTR_ok; +}; + +/* + * Return a value of the requested attribute name and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_Linux::os_get_xattr_value (JCR *jcr, char * name, char ** pvalue, uint32_t * plen){ + + int len; + POOLMEM * value; + + /* check input data */ + if (jcr == NULL || name == NULL || plen == NULL || pvalue == NULL){ + return bRC_BXATTR_inval; + } + + /* get the length of the value for extended attribute */ + len = lgetxattr(jcr->last_fname, name, NULL, 0); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it */ + return bRC_BXATTR_skip; + default: + /* XXX: what about ENOATTR error value? */ + Mmsg2(jcr->errmsg, _("lgetxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "lgetxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + break; + } + default: + break; + } + + if (len > 0){ + /* + * allocate memory for the extented attribute value + * default size is a 256B for PM_MESSAGE, so we need to check required size + */ + value = get_pool_memory(PM_MESSAGE); + value = check_pool_memory_size(value, len + 1); + memset(value, 0, len + 1); + /* value is not empty, get a data */ + len = lgetxattr(jcr->last_fname, name, value, len); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it, first release allocated memory */ + free_pool_memory(value); + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("lgetxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "lgetxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + free_pool_memory(value); + return bRC_BXATTR_error; + } + break; + } + default: + break; + } + /* ensure a value is nul terminated */ + value[len] = '\0'; + } else { + /* empty value */ + value = NULL; + len = 0; + } + /* setup return data */ + *pvalue = value; + *plen = len; + return bRC_BXATTR_ok; +}; + +/* + * Low level OS specific runtime to set extended attribute on file + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_Linux::os_set_xattr (JCR *jcr, BXATTR_xattr *xattr){ + + /* check input data */ + if (jcr == NULL || xattr == NULL){ + return bRC_BXATTR_inval; + } + + /* set extattr on file */ + if (lsetxattr(jcr->last_fname, xattr->name, xattr->value, xattr->value_len, 0) != 0){ + berrno be; + + switch (errno){ + case ENOENT: + break; + case ENOTSUP: + /* + * If the filesystem reports it doesn't support XATTR we clear the + * BXATTR_FLAG_NATIVE flag so we skip XATTR restores on all other files + * on the same filesystem. The BXATTR_FLAG_NATIVE flag gets set again + * when we change from one filesystem to an other. + */ + clear_flag(BXATTR_FLAG_NATIVE); + Mmsg(jcr->errmsg, _("setxattr error on file \"%s\": filesystem doesn't support XATTR\n"), jcr->last_fname); + Dmsg3(100, "setxattr error name=%s value=%s file=%s filesystem doesn't support XATTR\n", xattr->name, xattr->value, jcr->last_fname); + break; + default: + Mmsg2(jcr->errmsg, _("setxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "setxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + } + return bRC_BXATTR_ok; +}; + +#endif /* HAVE_XATTR */ + +#endif /* HAVE_LINUX_OS */ diff --git a/src/filed/bxattr_linux.h b/src/filed/bxattr_linux.h new file mode 100644 index 00000000..da8b6b48 --- /dev/null +++ b/src/filed/bxattr_linux.h @@ -0,0 +1,68 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BXATTR_LINUX_H_ +#define __BXATTR_LINUX_H_ + +#if defined(HAVE_LINUX_OS) +#include + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +/* verify a support for XATTR on build os */ +#if !defined(HAVE_LLISTXATTR) || !defined(HAVE_LGETXATTR) || !defined(HAVE_LSETXATTR) +#error "Missing full support for the XATTR functions." +#endif + +#ifdef HAVE_SYS_XATTR_H +#include +#else +#error "Missing sys/xattr.h header file" +#endif + +/* + * + * + */ +class BXATTR_Linux : public BXATTR { +private: + bRC_BXATTR os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt); + bRC_BXATTR os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length); + bRC_BXATTR os_get_xattr_names (JCR *jcr, POOLMEM **list, uint32_t *length); + bRC_BXATTR os_get_xattr_value (JCR *jcr, char * name, char ** pvalue, uint32_t * plen); + bRC_BXATTR os_set_xattr (JCR *jcr, FF_PKT *ff_pkt, char *list, uint32_t length); + bRC_BXATTR os_set_xattr (JCR *jcr, BXATTR_xattr *xattr); +public: + BXATTR_Linux (); +}; + +#endif /* HAVE_XATTR */ + +#endif /* HAVE_LINUX_OS */ + +#endif /* __BXATTR_LINUX_H_ */ diff --git a/src/filed/bxattr_osx.c b/src/filed/bxattr_osx.c new file mode 100644 index 00000000..8b1425eb --- /dev/null +++ b/src/filed/bxattr_osx.c @@ -0,0 +1,291 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#include "bacula.h" +#include "filed.h" +#include "bxattr_osx.h" + +#if defined(HAVE_DARWIN_OS) + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +/* + * Define the supported XATTR streams for this OS + */ +static const int os_xattr_streams[] = { + STREAM_XACL_DARWIN_XATTR, + 0 +}; + +static const char *os_xattr_skiplist[] = { + "com.apple.system.extendedsecurity", + "com.apple.ResourceFork", + NULL +}; + +static const char *os_xattr_acl_skiplist[] = { + "com.apple.system.Security", + NULL +}; + +/* + * OS specific constructor + */ +BXATTR_OSX::BXATTR_OSX() +{ + set_xattr_streams(os_xattr_streams); + set_xattr_skiplists(os_xattr_skiplist, os_xattr_acl_skiplist); +}; + +/* + * Perform OS specific extended attribute backup + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_OSX::os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt){ + return generic_backup_xattr(jcr, ff_pkt); +}; + +/* + * Perform OS specific XATTR restore. Runtime is called only when stream is supported by OS. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_OSX::os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length){ + return generic_restore_xattr(jcr, stream); +}; + +/* + * Return a list of xattr names in newly allocated pool memory and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_OSX::os_get_xattr_names (JCR *jcr, POOLMEM ** pxlist, uint32_t * xlen){ + + int len; + POOLMEM * list; + + /* check input data */ + if (jcr == NULL || xlen == NULL || pxlist == NULL){ + return bRC_BXATTR_inval; + } + /* get the length of the extended attributes */ + len = listxattr(jcr->last_fname, NULL, 0, XATTR_NOFOLLOW); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it */ + return bRC_BXATTR_skip; + case ENOTSUP: + /* no xattr supported on filesystem, clear a flag and skip it */ + clear_flag(BXATTR_FLAG_NATIVE); + set_content(NULL); + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("llistxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "llistxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + break; + } + case 0: + /* xattr available but empty, skip it */ + return bRC_BXATTR_skip; + default: + break; + } + + /* + * allocate memory for the extented attribute list + * default size is a 4k for PM_BSOCK, which should be sufficient on almost all + * Linux system where xattrs a limited in size to single filesystem block ~4kB + * so we need to check required size + */ + list = get_pool_memory(PM_BSOCK); + list = check_pool_memory_size(list, len + 1); + memset(list, 0, len + 1); + + /* get the list of extended attributes names for a file */ + len = listxattr(jcr->last_fname, list, len, XATTR_NOFOLLOW); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it, first release allocated memory */ + free_pool_memory(list); + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("llistxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "llistxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + free_pool_memory(list); + return bRC_BXATTR_error; + } + break; + } + default: + break; + } + /* ensure a list is nul terminated */ + list[len] = '\0'; + /* setup return data */ + *pxlist = list; + *xlen = len; + return bRC_BXATTR_ok; +}; + +/* + * Return a value of the requested attribute name and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_OSX::os_get_xattr_value (JCR *jcr, char * name, char ** pvalue, uint32_t * plen){ + + int len; + POOLMEM * value; + + /* check input data */ + if (jcr == NULL || name == NULL || plen == NULL || pvalue == NULL){ + return bRC_BXATTR_inval; + } + + /* get the length of the value for extended attribute */ + len = getxattr(jcr->last_fname, name, NULL, 0, 0, XATTR_NOFOLLOW); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it */ + return bRC_BXATTR_skip; + default: + /* XXX: what about ENOATTR error value? */ + Mmsg2(jcr->errmsg, _("lgetxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "lgetxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + break; + } + default: + break; + } + + if (len > 0){ + /* + * allocate memory for the extented attribute value + * default size is a 256B for PM_MESSAGE, so we need to check required size + */ + value = get_pool_memory(PM_MESSAGE); + value = check_pool_memory_size(value, len + 1); + memset(value, 0, len + 1); + /* value is not empty, get a data */ + len = getxattr(jcr->last_fname, name, value, len, 0, XATTR_NOFOLLOW); + switch (len){ + case -1: { + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it, first release allocated memory */ + free_pool_memory(value); + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("lgetxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "lgetxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + free_pool_memory(value); + return bRC_BXATTR_error; + } + break; + } + default: + break; + } + /* ensure a value is nul terminated */ + value[len] = '\0'; + } else { + /* empty value */ + value = NULL; + len = 0; + } + /* setup return data */ + *pvalue = value; + *plen = len; + return bRC_BXATTR_ok; +}; + +/* + * Low level OS specific runtime to set extended attribute on file + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_OSX::os_set_xattr (JCR *jcr, BXATTR_xattr *xattr){ + + /* check input data */ + if (jcr == NULL || xattr == NULL){ + return bRC_BXATTR_inval; + } + + /* set extattr on file */ + if (setxattr(jcr->last_fname, xattr->name, xattr->value, xattr->value_len, 0, XATTR_NOFOLLOW) != 0){ + berrno be; + + switch (errno){ + case ENOENT: + break; + case ENOTSUP: + /* + * If the filesystem reports it doesn't support XATTR we clear the + * BXATTR_FLAG_NATIVE flag so we skip XATTR restores on all other files + * on the same filesystem. The BXATTR_FLAG_NATIVE flag gets set again + * when we change from one filesystem to an other. + */ + clear_flag(BXATTR_FLAG_NATIVE); + Mmsg1(jcr->errmsg, _("setxattr error on file \"%s\": filesystem doesn't support XATTR\n"), jcr->last_fname); + Dmsg3(100, "setxattr error name=%s value=%s file=%s filesystem doesn't support XATTR\n", xattr->name, xattr->value, jcr->last_fname); + break; + default: + Mmsg2(jcr->errmsg, _("setxattr error on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "setxattr error file=%s ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + } + return bRC_BXATTR_ok; +}; + +#endif /* HAVE_XATTR */ + +#endif /* HAVE_DARWIN_OS */ diff --git a/src/filed/bxattr_osx.h b/src/filed/bxattr_osx.h new file mode 100644 index 00000000..cc120773 --- /dev/null +++ b/src/filed/bxattr_osx.h @@ -0,0 +1,66 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BXATTR_OSX_H_ +#define __BXATTR_OSX_H_ + +#if defined(HAVE_DARWIN_OS) +#include + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +#if !defined(HAVE_LISTXATTR) || !defined(HAVE_GETXATTR) || !defined(HAVE_SETXATTR) +#error "Missing full support for the XATTR functions." +#endif + +#ifdef HAVE_SYS_XATTR_H +#include +#else +#error "Missing sys/xattr.h header file" +#endif + +/* + * + * + */ +class BXATTR_OSX : public BXATTR { +private: + bRC_BXATTR os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt); + bRC_BXATTR os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length); + bRC_BXATTR os_get_xattr_names (JCR *jcr, POOLMEM **list, uint32_t *length); + bRC_BXATTR os_get_xattr_value (JCR *jcr, char * name, char ** pvalue, uint32_t * plen); + bRC_BXATTR os_set_xattr (JCR *jcr, BXATTR_xattr *xattr); +public: + BXATTR_OSX (); +}; + +#endif /* HAVE_XATTR */ + +#endif /* HAVE_DARWIN_OS */ + +#endif /* __BXATTR_OSX_H_ */ diff --git a/src/filed/bxattr_solaris.c b/src/filed/bxattr_solaris.c new file mode 100644 index 00000000..b152fa94 --- /dev/null +++ b/src/filed/bxattr_solaris.c @@ -0,0 +1,986 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#include "bacula.h" +#include "filed.h" +#include "bxattr_solaris.h" + +#if defined(HAVE_SUN_OS) + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +#ifdef HAVE_SYS_NVPAIR_H +#include +#endif + +#if !defined(HAVE_OPENAT) || \ + !defined(HAVE_ATTROPEN) || \ + !defined(HAVE_UNLINKAT) || \ + !defined(HAVE_FCHOWNAT) || \ + !defined(HAVE_FUTIMESAT) +#error "Unable to compile code because of missing openat, attropen, unlinkat, fchownat or futimesat functions" +#endif + +/* + * Define the supported XATTR streams for this OS + */ +static const int os_xattr_streams[] = { + STREAM_XACL_SOLARIS_XATTR, +#if defined(HAVE_SYS_NVPAIR_H) && defined(_PC_SATTR_ENABLED) + STREAM_XACL_SOLARIS_SYS_XATTR, +#endif /* defined(HAVE_SYS_NVPAIR_H) && defined(_PC_SATTR_ENABLED) */ + 0 +}; + +static const char *os_xattr_skiplist[] = { + "..", +#if defined(HAVE_SYS_NVPAIR_H) && defined(_PC_SATTR_ENABLED) + VIEW_READONLY, +#endif /* defined(HAVE_SYS_NVPAIR_H) && defined(_PC_SATTR_ENABLED) */ + NULL +}; + +static const char *os_xattr_acl_skiplist[] = { + NULL +}; + +/* Provide a suplement for Solaris 10 missing linkat function */ +#ifndef HAVE_LINKAT +int linkat(int fd1, const char *path1, int fd2, const char *path2, int flag) +{ + int rc; + + rc = fchdir(fd1); + if (rc != 0){ + return rc; + } + + rc = link(path1, path2); + return rc; +}; +#endif +/* + * OS Specific constructor + */ +BXATTR_Solaris::BXATTR_Solaris(){ + + set_xattr_streams(os_xattr_streams); + set_xattr_skiplists(os_xattr_skiplist, os_xattr_acl_skiplist); + cache = NULL; +}; + +/* + * OS Specific destructor + */ +BXATTR_Solaris::~BXATTR_Solaris(){ + + delete_xattr_cache(); +}; + +/* + * Perform OS specific extended attribute backup + * + * in/out - check API at bxattr.h + * + * The Solaris implementation of XATTR is very, very different then all other "generic" unix implementations, + * so the original author of the Bacula XATTR support for Solaris OS decided to totally change the XATTR Stream + * content, and we need to follow this design to support previous behavior. The stream consist of a number of + * "files" with STREAM_XACL_SOLARIS_XATTR or STREAM_XACL_SOLARIS_SYS_XATTR stream id. Every singe stream represents + * a single attribute. The content is a NULL-terminated array with a following data: + * \0\0\0 + * when an attribute file has a hardlinked other attributes then a content stream changes a bit into: + * \0\0\0 + * where: + * is an attribute name - a file name in Solaris + * is a standard file stat struct encoded by Bacula (the same encoding goes with a regular file) + * is a Solaris dependent acltotext data + * is the attribute file raw content + * is a name of the first hardlinked attribute file which a current attribute has to linked to + * + * The raw content of the attribute is copied into memory before send to the SD and for a very large attribute + * data can allocate a large amount of additional memory. In most cases it should not be a problem because most + * xattrs should has a few /hundred/ bytes in size. This is the same behavior as in previous implementation. + */ +bRC_BXATTR BXATTR_Solaris::os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt){ + + bRC_BXATTR rc; + POOLMEM *xlist = NULL; + uint32_t xlen; + char *name; + char *lnkname; + uint32_t name_len; + POOLMEM *value = NULL; + uint32_t value_len; + char * bxattracl; + POOLMEM *data = NULL; + bool skip; + struct stat st; + char attribs[MAXSTRING]; + int stream; + int attrfd; + int len; + mode_t stmode; + + /* sanity check of input variables */ + if (jcr == NULL || ff_pkt == NULL){ + return bRC_BXATTR_inval; + } + + /* check if extended/extensible attributes are present */ + if (pathconf(jcr->last_fname, _PC_XATTR_EXISTS) > 0){ + /* xlist is allocated as POOLMEM by os_get_xattr_names */ + rc = os_get_xattr_names(jcr, &xlist, &xlen); + switch (rc){ + case bRC_BXATTR_ok: + /* it's ok, so go further */ + break; + case bRC_BXATTR_skip: + case bRC_BXATTR_cont: + /* no xattr available, so skip rest of it */ + return bRC_BXATTR_ok; + default: + return rc; + } + + data = get_pool_memory(PM_BSOCK); + /* follow the list of xattr names and get the values */ + for (name = xlist; (unsigned int)(name - xlist) + 1 < xlen; name = strchr(name, '\0') + 1){ + name_len = strlen(name); + /* skip read-only or other unused attribute names */ + skip = check_xattr_skiplists(jcr, ff_pkt, name); + if (skip || name_len == 0){ + Dmsg1(200, "Skipping xattr named \"%s\"\n", name); + continue; + } + Dmsg1(200, "Processing xattr: %s\n", name); + /* set a correct stream */ + stream = STREAM_XACL_SOLARIS_XATTR; + +#if defined(HAVE_SYS_NVPAIR_H) && defined(_PC_SATTR_ENABLED) + /* check for system attributes name */ + if (bstrcmp(name, VIEW_READWRITE)){ + stream = STREAM_XACL_SOLARIS_SYS_XATTR; + } +#endif /* HAVE_SYS_NVPAIR_H && _PC_SATTR_ENABLED */ + + /* open an attribute descriptor, it will be used for backup */ + attrfd = attropen(jcr->last_fname, name, O_RDONLY); + + /* get the stat of the attribute */ + if (fstat(attrfd, &st) < 0){ + berrno be; + + switch (errno){ + case ENOENT: + rc = bRC_BXATTR_ok; + goto bailout; + default: + Mmsg3(jcr->errmsg, _("Unable to get status on xattr \"%s\" on file \"%s\": ERR=%s\n"), name, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "fstat of xattr %s on \"%s\" failed: ERR=%s\n", name, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + goto bailout; + } + } + + /* get xattr acl data, but only when it is not trivial acls */ + rc = os_get_xattr_acl(jcr, attrfd, &bxattracl, name); + if (rc != bRC_BXATTR_ok){ + goto bailout; + } + + /* + * Solaris support only S_IFREG and S_IFDIR as an attribute file type, no other types are supported + * the previous Solaris xattr implementation in Bacula had an unverified and untested code for other + * types of attribute files which was a nonsense and unnecessarily complicate the code. We decided + * to remove unsupported code. To check if the current Solaris version support for xattr was extended + * simply verify a man fsattr(5) for it. + */ + stmode = st.st_mode & S_IFMT; + Dmsg1(800, "st.st_mode: 0x%X\n", stmode); + switch (stmode){ + case S_IFDIR: + /* we have a struct stat of the attribute so encode it to the buffer + * we distinguish standard file by setting ino to zero in attribs */ + encode_stat(attribs, &st, sizeof(st), 0, stream); + /* prepare and save xattr info */ + len = bsnprintf(data, sizeof_pool_memory(data), "%s%c%s%c%s%c", name, 0, attribs, 0, (bxattracl) ? bxattracl : "", 0); + set_content(data, len); + break; + case S_IFREG: + /* check for hardlinked attributes which Solaris support */ + if (st.st_nlink > 1){ + /* search for already saved file of the same inode number */ + lnkname = find_xattr_cache(jcr, st.st_ino, name); + if (lnkname != NULL){ + /* found a previous saved file, link to it and render xattr data for hardlinked attribute */ + Dmsg0(400, "saving linked attr\n"); + /* we have a struct stat of the attribute so encode it to the buffer + * we distinguish hardlinked file by setting a real ino in attribs */ + encode_stat(attribs, &st, sizeof(st), st.st_ino, stream); + len = bsnprintf(data, sizeof_pool_memory(data), "%s%c%s%c%s%c", name, 0, attribs, 0, lnkname, 0); + set_content(data, len); + /* content is ready */ + break; + } + } + /* value is allocated as POOLMEM by os_get_xattr_value */ + rc = os_get_xattr_value(jcr, name, &value, &value_len); + switch (rc){ + case bRC_BXATTR_ok: + /* it's ok, so go further */ + break; + case bRC_BXATTR_skip: + /* no xattr available, so skip rest of it */ + rc = bRC_BXATTR_ok; + goto bailout; + default: + /* error / fatal */ + goto bailout; + } + /* we have a struct stat of the attribute so encode it to the buffer + * we distinguish standard file by setting ino to zero in attribs */ + encode_stat(attribs, &st, sizeof(st), 0, stream); + /* prepare and save xattr info */ + len = bsnprintf(data, sizeof_pool_memory(data), "%s%c%s%c%s%c", name, 0, attribs, 0, (bxattracl) ? bxattracl : "", 0); + /* append value data to the end of the xattr info */ + if (value_len > 0){ + check_pool_memory_size(data, len + value_len); + memcpy(data + len, value, value_len); + set_content(data, len + value_len); + free_pool_memory(value); + value = NULL; + } else { + set_content(data, len); + } + break; + default: + Dmsg3(100, "Unsupported extended attribute type: 0x%X for \"%s\" on file \"%s\"\n", stmode, name, jcr->last_fname); + Mmsg3(jcr->errmsg, _("Unsupported extended attribute type: 0x%X for \"%s\" on file \"%s\"\n"), stmode, name, jcr->last_fname); + rc = bRC_BXATTR_error; + goto bailout; + } + if (bxattracl){ + actuallyfree(bxattracl); + bxattracl = NULL; + } + /* send stream to the sd */ + rc = send_xattr_stream(jcr, stream); + if (rc != bRC_BXATTR_ok){ + Mmsg2(jcr->errmsg, _("Failed to send extended attribute \"%s\" on file \"%s\"\n"), name, jcr->last_fname); + Dmsg2(100, "Failed to send extended attribute \"%s\" on file \"%s\"\n", name, jcr->last_fname); + goto bailout; + } + } + +bailout: + /* free allocated data: xlist, value (if not freed), data, etc. */ + free_pool_memory(data); + if (value != NULL){ + free_pool_memory(value); + } + if (xlist != NULL){ + // Dmsg1(400, "free xlist: %p\n", xlist); + free_pool_memory(xlist); + } + /* this is a cache for a particular file, so no needed after backup of this file */ + delete_xattr_cache(); + + return rc; + } else { + Dmsg1(500, "xattr does not exist on: %s\n", jcr->last_fname); + } + return bRC_BXATTR_ok; +}; + +/* + * BXATTR_Solaris cache is a simple linked list cache of inode number and names used to handle + * xattr hard linked data. The function is searching for cached entry. When not found it append + * entry to the cache. + * in: + * jcr - Job Control Record (well, it is not used here) + * ino - inode number to compare/search for + * name - the name of the current attribute + * out: + * NULL - when entry not found in cache and new entry was added + * - a name of the linked entry + */ +inline char * BXATTR_Solaris::find_xattr_cache(JCR *jcr, ino_t ino, char * name){ + + BXATTR_Solaris_Cache *entry; + + if (cache != NULL){ + foreach_alist(entry, cache){ + if (entry && entry->inode == ino){ + /* found in cache, return name */ + return entry->name; + } + } + } else { + cache = New (alist(10, not_owned_by_alist)); + } + /* not found, so add this one to the cache */ + entry = (BXATTR_Solaris_Cache*) malloc (sizeof(BXATTR_Solaris_Cache)); + entry->inode = ino; + entry->name = name; + cache->append(entry); + return NULL; +} + +/* + * The function deletes a cache + * in/out - void + */ +inline void BXATTR_Solaris::delete_xattr_cache(){ + + BXATTR_Solaris_Cache *entry; + + if (cache != NULL){ + foreach_alist(entry, cache){ + free(entry); + } + delete cache; + cache = NULL; + } +} + +/* + * Perform OS specific XATTR restore. Runtime is called only when stream is supported by OS. + * + * The way Solaris xattr support is designed in Bacula we will have a single attribute restore + * with every call to this function. So multiple attributes are restored with multiple calls. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_Solaris::os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length){ + + bRC_BXATTR rc = bRC_BXATTR_error; + bool extended = false; + + /* check input data */ + if (jcr == NULL || content == NULL){ + return bRC_BXATTR_inval; + } + + Dmsg2(400, "restore xattr stream %i on file: %s\n", stream, jcr->last_fname); + /* First make sure we can restore xattr on the filesystem */ + switch (stream){ +#if defined(HAVE_SYS_NVPAIR_H) && defined(_PC_SATTR_ENABLED) + case STREAM_XACL_SOLARIS_SYS_XATTR: + if (pathconf(jcr->last_fname, _PC_SATTR_ENABLED) <= 0){ + Mmsg(jcr->errmsg, _("Failed to restore extensible attributes on file \"%s\"\n"), jcr->last_fname); + Dmsg1(100, "Unable to restore extensible attributes on file \"%s\", filesystem doesn't support this\n", jcr->last_fname); + goto bail_out; + } + extended = true; + break; +#endif + case STREAM_XACL_SOLARIS_XATTR: + if (pathconf(jcr->last_fname, _PC_XATTR_ENABLED) <= 0){ + Mmsg(jcr->errmsg, _("Failed to restore extended attributes on file \"%s\"\n"), jcr->last_fname); + Dmsg1(100, "Unable to restore extended attributes on file \"%s\", filesystem doesn't support this\n", jcr->last_fname); + goto bail_out; + } + break; + default: + goto bail_out; + } + + rc = os_set_xattr(jcr, extended, content, length); + +bail_out: + return rc; +}; + +#ifdef HAVE_ACL +/* + * See if an acl is a trivial one (e.g. just the stat bits encoded as acl.) + * There is no need to store those acls as we already store the stat bits too. + */ +bool acl_is_trivial(int count, aclent_t *entries) +{ + int n; + aclent_t *ace; + + for (n = 0; n < count; n++) { + ace = &entries[n]; + + if (!(ace->a_type == USER_OBJ || + ace->a_type == GROUP_OBJ || + ace->a_type == OTHER_OBJ || + ace->a_type == CLASS_OBJ)) + return false; + } + return true; +} +#endif + +/* + * Low level OS specific runtime to get ACL on XATTR. The ACL data is set in supplied buffer + * + * in: + * jcr - Job Control Record + * fd - an opened file descriptor of the saved attribute + * buffer - a memory buffer pointer which we will return when render an acl text + * out: + * buffer - NULL when no ACL or any error else the memory with acl representation. + * The memory for the external text string is obtained using malloc(3C). + * The caller is responsible for freeing the memory upon completion. + * bRC_BXATTR_ok - backup acl for extended attribute finish without problems + * bRC_BXATTR_error - backup acl unsuccessful + * bRC_BXATTR_inval - input variables are invalid (null) + * + */ +bRC_BXATTR BXATTR_Solaris::os_get_xattr_acl(JCR *jcr, int fd, char **buffer, char *attrname) +{ +// a function is valid only when Bacula have a support for ACL +#ifdef HAVE_ACL + bRC_BXATTR rc = bRC_BXATTR_error; + + /* sanity check of input variables */ + if (jcr == NULL || buffer == NULL || fd < 0){ + return bRC_BXATTR_inval; + } + +#ifdef HAVE_EXTENDED_ACL + + int flags; + acl_t *aclp = NULL; + + /* check if an attribute has acl on it which we can save */ + if (fpathconf(fd, _PC_ACL_ENABLED) > 0){ + /* check for non trivial acl on the file */ + if (facl_get(fd, ACL_NO_TRIVIAL, &aclp) != 0){ + berrno be; + + switch (errno){ + case ENOENT: + rc = bRC_BXATTR_ok; + goto bail_out; + default: + Mmsg2(jcr->errmsg, _("Unable to get xattr acl on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "facl_get/acl_get of xattr on \"%s\" failed: ERR=%s\n", jcr->last_fname, be.bstrerror()); + goto bail_out; + } + } + + if (aclp != NULL){ +#if defined(ACL_SID_FMT) + /* New format flag added in newer Solaris versions. */ + flags = ACL_APPEND_ID | ACL_COMPACT_FMT | ACL_SID_FMT; +#else + flags = ACL_APPEND_ID | ACL_COMPACT_FMT; +#endif /* ACL_SID_FMT */ + + *buffer = acl_totext(aclp, flags); + Dmsg1(500, "xattr extended acl found: %s\n", *buffer); + acl_free(aclp); + } else { + *buffer = NULL; + } + } else { + *buffer = NULL; + } + rc = bRC_BXATTR_ok; +bail_out: + +#else /* !HAVE_EXTENDED_ACL */ + + int n; + aclent_t *acls = NULL; + + /* See if this attribute has an ACL */ + n = facl(fd, GETACLCNT, 0, NULL); + + if (n >= MIN_ACL_ENTRIES){ + acls = (aclent_t *)malloc(n * sizeof(aclent_t)); + if (facl(fd, GETACL, n, acls) != n){ + berrno be; + + switch (errno){ + case ENOENT: + rc = bRC_BXATTR_ok; + goto bail_out; + default: + Mmsg3(jcr->errmsg, _("Unable to get acl on xattr %s on file \"%s\": ERR=%s\n"), attrname, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "facl/acl of xattr %s on \"%s\" failed: ERR=%s\n", attrname, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + goto bail_out; + } + } + + /* See if there is a non trivial acl on the file. */ + if (!acl_is_trivial(n, acls)){ + if ((*buffer = acltotext(acls, n)) == NULL){ + berrno be; + + Mmsg3(jcr->errmsg, _("Unable to get acl text on xattr %s on file \"%s\": ERR=%s\n"), attrname, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "acltotext of xattr %s on \"%s\" failed: ERR=%s\n", attrname, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + } + Dmsg1(500, "xattr acl found: %s\n", *buffer); + } else { + *buffer = NULL; + } +bail_out: + free(acls); + } else { + *buffer = NULL; + } + rc = bRC_BXATTR_ok; +#endif /* HAVE_EXTENDED_ACL */ + return rc; +#else /* HAVE_ACL */ + return bRC_BXATTR_ok; +#endif /* HAVE_ACL */ +} + +/* + * Low level OS specific runtime to set ACL on XATTR. The ACL data is set from supplied text + * + * in: + * jcr - Job Control Record + * fd - an opened file descriptor of the restored attribute + * buffer - a pointer to the memory buffer where we will render an acl text + * out: + * bRC_BXATTR_ok - backup acl for extended attribute finish without problems + * bRC_BXATTR_inval - input variables are invalid (null) + * + */ +bRC_BXATTR BXATTR_Solaris::os_set_xattr_acl(JCR *jcr, int fd, char *name, char *acltext) +{ +// a function is valid only when Bacula have a support for ACL +#ifdef HAVE_ACL + + bRC_BXATTR rc = bRC_BXATTR_ok; + + /* sanity check of input variables */ + if (jcr == NULL || name == NULL || acltext == NULL || fd < 0){ + return bRC_BXATTR_inval; + } + +#ifdef HAVE_EXTENDED_ACL + + int error; + acl_t *aclp = NULL; + + if ((error = acl_fromtext(acltext, &aclp)) != 0){ + Mmsg1(jcr->errmsg, _("Unable to convert acl from text on file \"%s\"\n"), jcr->last_fname); + return bRC_BXATTR_error; + } + + if (facl_set(fd, aclp) != 0){ + berrno be; + + Mmsg3(jcr->errmsg, _("Unable to restore acl of xattr %s on file \"%s\": ERR=%s\n"), name, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "Unable to restore acl of xattr %s on file \"%s\": ERR=%s\n", name, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + } + + if (aclp){ + acl_free(aclp); + } + +#else /* !HAVE_EXTENDED_ACL */ + + int n; + aclent_t *acls = NULL; + + acls = aclfromtext(acltext, &n); + if (acls){ + if (facl(fd, SETACL, n, acls) != 0){ + berrno be; + + Mmsg3(jcr->errmsg, _("Unable to restore acl of xattr %s on file \"%s\": ERR=%s\n"), name, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "Unable to restore acl of xattr %s on file \"%s\": ERR=%s\n", name, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + } + actuallyfree(acls); + } + +#endif /* HAVE_EXTENDED_ACL */ + + return rc; +#else /* HAVE_ACL */ + return bRC_BXATTR_ok; +#endif /* HAVE_ACL */ +}; + +/* + * Return a list of xattr names in newly allocated pool memory and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_Solaris::os_get_xattr_names (JCR *jcr, POOLMEM ** pxlist, uint32_t * xlen){ + + int xattrdfd; + DIR *dirp; + struct dirent *dp; + + int len; + int slen; + POOLMEM * list; + + /* check input data */ + if (jcr == NULL || xlen == NULL || pxlist == NULL){ + return bRC_BXATTR_inval; + } + + /* Open the xattr stream on file */ + Dmsg1(500, "os_get_xattr_names on file: %s\n", jcr->last_fname); + if ((xattrdfd = attropen(jcr->last_fname, ".", O_RDONLY)) < 0){ + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it */ + return bRC_BXATTR_skip; + case EINVAL: + /* no xattr supported on file skip it */ + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("Unable to open xattr on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "Unable to open xattr on file \"%s\": ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + } + + /* open an extended file directory to read all xattr names */ + if ((dirp = fdopendir(xattrdfd)) == (DIR *)NULL){ + berrno be; + + Mmsg2(jcr->errmsg, _("Unable to list the xattr on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg3(100, "Unable to fdopendir xattr on file \"%s\" using fd %d: ERR=%s\n", jcr->last_fname, xattrdfd, be.bstrerror()); + close(xattrdfd); + return bRC_BXATTR_error; + } + + /* + * allocate memory for the extented attribute list + * default size is a 4k for PM_BSOCK, which should be sufficient in most cases + */ + list = get_pool_memory(PM_BSOCK); + // Dmsg1(400, "allocated xlist: %p\n", list); + memset(list, 0, sizeof_pool_memory(list)); + bstrncpy(list, ".", sizeof_pool_memory(list)); + len = strlen(list) + 1; + + /* read all directory entries as a xattr names */ + while ((dp = readdir(dirp)) != NULL){ + + /* skip '.' as we added it above */ + if (bstrcmp(dp->d_name, ".")){ + continue; + } + + Dmsg1(500, "Found attribute: %s\n", dp->d_name); + /* compute a buffer length = string length and nul char */ + slen = strlen (dp->d_name) + 1; + list = check_pool_memory_size(list, len + slen); + // Dmsg3(400, "xlist: %p len: %i slen: %i\n", list, len, slen); + /* copy the name into a list */ + bstrncpy(list + len, dp->d_name, sizeof_pool_memory(list) - len); + len += slen; + } + if (closedir(dirp) < 0){ + berrno be; + + Mmsg2(jcr->errmsg, _("Unable to close xattr list on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "Unable to close xattr list on file \"%s\": ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + + *pxlist = list; + *xlen = len; + + return bRC_BXATTR_ok; +}; + +/* + * Return a value of the requested attribute name and a length of the allocated buffer. + * It allocates a memory with poolmem subroutines every time a function is called, so it must be freed + * when not needed. + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_Solaris::os_get_xattr_value (JCR *jcr, char * name, char ** pvalue, uint32_t * plen){ + + int xattrfd; + int len; + POOLMEM * value; + struct stat st; + + /* check input data */ + if (jcr == NULL || name == NULL || plen == NULL || pvalue == NULL){ + return bRC_BXATTR_inval; + } + + /* Open the xattr on file */ + if ((xattrfd = attropen(jcr->last_fname, name, O_RDONLY)) < 0){ + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it */ + return bRC_BXATTR_skip; + case EINVAL: + /* no xattr supported on file skip it */ + return bRC_BXATTR_skip; + default: + Mmsg2(jcr->errmsg, _("Unable to open xattr on file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "Unable to open xattr on file \"%s\": ERR=%s\n", jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + } + + /* get some info about extended attribute */ + if (fstat(xattrfd, &st) < 0){ + berrno be; + + switch (errno){ + case ENOENT: + /* no file available, skip it */ + return bRC_BXATTR_skip; + default: + Mmsg3(jcr->errmsg, _("Unable to stat xattr \"%s\" on file \"%s\": ERR=%s\n"), name, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "Unable to stat xattr \"%s\" on file \"%s\": ERR=%s\n", name, jcr->last_fname, be.bstrerror()); + return bRC_BXATTR_error; + } + } + + /* default empty value */ + value = NULL; + len = 0; + + /* only a file has a data/value we should care about */ + if ((st.st_mode & S_IFMT) != S_IFDIR){ + /* get size of the attribute data/value */ + len = lseek(xattrfd, 0, SEEK_END); + lseek(xattrfd, 0, SEEK_SET); + } + if (len > 0){ + /* + * allocate memory for the extented attribute value + * default size is a 256B for PM_MESSAGE, so we need to check required size + */ + value = get_pool_memory(PM_MESSAGE); + value = check_pool_memory_size(value, len); + memset(value, 0, len); + /* read teh data */ + read (xattrfd, value, len); + close(xattrfd); + } + + /* setup return data */ + *pvalue = value; + *plen = len; + return bRC_BXATTR_ok; +}; + +/* + * Low level OS specific runtime to set extended attribute on file + * + * in/out - check API at bxattr.h + */ +bRC_BXATTR BXATTR_Solaris::os_set_xattr (JCR *jcr, bool extended, char *content, uint32_t length){ + + char *bp; + char *name; + char *attribs; + char *acltext; + char *lntarget; + int attrfd = 0; + int attrdirfd = 0; + int cnt; + int len; + int inum; + struct stat st; + struct timeval times[2]; + bRC_BXATTR rc = bRC_BXATTR_ok; + mode_t stmode; + + /* check input data */ + if (jcr == NULL || content == NULL){ + return bRC_BXATTR_inval; + } + /* + * Parse content stream and extract valuable data. + * STD/EXT: \0\0\0 + * where .inum = 0 + * LNK: \0\0\0 + * where .inum != 0 + */ + bp = content; + if (bp[0] == '/'){ + bp = content + 1; /* original code saves attribute name with '/' */ + } + /* attribute name in name variable */ + name = bp; + len = strlen (bp); + bp += len + 1; + + /* attribute encoded stat in attribs variable */ + attribs = bp; + len = strlen (bp); + bp += len + 1; + /* decode attributes into st and inum which distinguish between STX/EXT and LNK xattrs */ + decode_stat(attribs, &st, sizeof(st), &inum); + + /* acltext and link target name goes here */ + acltext = lntarget = bp; + len = strlen (bp); + /* now 'bp' should have the xattr data */ + bp += len + 1; + + /* + * Open the xattr on which to restore the xattrs read-only. + */ + if ((attrdirfd = attropen(jcr->last_fname, ".", O_RDONLY)) < 0){ + berrno be; + + Mmsg2(jcr->errmsg, _("Unable to open file \"%s\": ERR=%s\n"), jcr->last_fname, be.bstrerror()); + Dmsg2(100, "Unable to open file \"%s\": ERR=%s\n", jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + goto bail_out; + } + + stmode = st.st_mode & S_IFMT; + Dmsg1(800, "st.st_mode: 0x%X\n", stmode); + switch (stmode) { + case S_IFDIR: + /* if it is a current dir on file then we can restore acl data only */ + Dmsg1(400, "Processing dir xattr: %s\n", name); + if (bstrcmp(name, ".")){ + break; + } + break; + case S_IFREG: + if (inum != 0){ + /* it is a linked attribute, perform a link operation */ + Dmsg2(400, "Processing linked xattr: %s => %s\n", lntarget, name); + unlinkat(attrdirfd, name, 0); + if (linkat(attrdirfd, lntarget, attrdirfd, name, 0) < 0){ + berrno be; + + Mmsg4(jcr->errmsg, _("Unable to link xattr %s to %s on file \"%s\": ERR=%s\n"), name, lntarget, jcr->last_fname, be.bstrerror()); + Dmsg4(100, "Unable to link xattr %s to %s on file \"%s\": ERR=%s\n", name, lntarget, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + goto bail_out; + } + goto bail_out; + } else { + Dmsg1(400, "Processing xattr: %s\n", name); + if (!extended){ + unlinkat(attrdirfd, name, 0); + } + if ((attrfd = openat(attrdirfd, name, O_CREAT | O_RDWR | O_TRUNC, st.st_mode)) < 0){ + berrno be; + + Mmsg3(jcr->errmsg, _("Unable to open attribute \"%s\" at file \"%s\": ERR=%s\n"), name, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "Unable to open attribute \"%s\" at file \"%s\": ERR=%s\n", name, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + goto bail_out; + } + /* restore any data if are available */ + if (st.st_size > 0){ + cnt = write (attrfd, bp, length - (bp - content) ); + if (cnt < 0){ + berrno be; + + Mmsg3(jcr->errmsg, _("Unable to restore data of xattr %s on file \"%s\": ERR=%s\n"), name, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "Unable to restore data of xattr %s on file \"%s\": ERR=%s\n", name, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + goto bail_out; + } + } + } + break; + default: + Dmsg3(100, "Unsupported extended attribute type: 0x%X for \"%s\" on file \"%s\"\n", stmode, name, jcr->last_fname); + Mmsg3(jcr->errmsg, _("Unsupported extended attribute type: 0x%X for \"%s\" on file \"%s\"\n"), stmode, name, jcr->last_fname); + goto bail_out; + } + + /* file data restored, so setup permissions and acl data */ + if (!extended){ + if (fchownat(attrdirfd, name, st.st_uid, st.st_gid, AT_SYMLINK_NOFOLLOW) < 0){ + berrno be; + + switch (errno){ + case EINVAL: + case ENOENT: + break; + default: + Mmsg3(jcr->errmsg, _("Unable to restore owner of xattr %s on file \"%s\": ERR=%s\n"), name, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "Unable to restore owner of xattr %s on file \"%s\": ERR=%s\n", name, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + } + goto bail_out; + } + } + +#ifdef HAVE_ACL + if (strlen(acltext)){ + rc = os_set_xattr_acl(jcr, attrfd, name, acltext); + if (rc != bRC_BXATTR_ok){ + goto bail_out; + } + } +#endif /* HAVE_ACL */ + + /* now restore a access and modification time - only for standard attribute */ + if (!extended){ + times[0].tv_sec = st.st_atime; + times[0].tv_usec = 0; + times[1].tv_sec = st.st_mtime; + times[1].tv_usec = 0; + + if (futimesat(attrdirfd, name, times) < 0){ + berrno be; + + Mmsg3(jcr->errmsg, _("Unable to restore filetimes of xattr %s on file \"%s\": ERR=%s\n"), name, jcr->last_fname, be.bstrerror()); + Dmsg3(100, "Unable to restore filetimes of xattr %s on file \"%s\": ERR=%s\n", name, jcr->last_fname, be.bstrerror()); + rc = bRC_BXATTR_error; + goto bail_out; + } + } + +bail_out: + if (attrfd != 0){ + close(attrfd); + } + if (attrdirfd != 0){ + close(attrdirfd); + } + return rc; +}; + +#endif /* HAVE_XATTR */ + +#endif /* HAVE_SUN_OS */ diff --git a/src/filed/bxattr_solaris.h b/src/filed/bxattr_solaris.h new file mode 100644 index 00000000..8ab9c60b --- /dev/null +++ b/src/filed/bxattr_solaris.h @@ -0,0 +1,162 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/** + * Major refactoring of XATTR code written by: + * + * Radosław Korzeniewski, MMXVI + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#ifndef __BXATTR_Solaris_H_ +#define __BXATTR_Solaris_H_ + +#if defined(HAVE_SUN_OS) + +/* check if XATTR support is enabled */ +#if defined(HAVE_XATTR) + +/* + * + */ +#if defined(HAVE_SYS_ATTR_H) +#include +#elif defined(HAVE_ATTR_H) +#include +#endif + +/* + * Required for XATTR/ACL backup + */ +#ifdef HAVE_SYS_ACL_H +#include +bool acl_is_trivial(int count, aclent_t *entries); +#endif + +/* + * Cache structure in alist + */ +struct BXATTR_Solaris_Cache { + ino_t inode; + char * name; +}; + +/* + * This is a Solaris specific XATTR implementation. + * + * Solaris extended attributes were introduced in Solaris 9 + * by PSARC 1999/209 + * + * Solaris extensible attributes were introduced in OpenSolaris + * by PSARC 2007/315 Solaris extensible attributes are also + * sometimes called extended system attributes. + * + * man fsattr(5) on Solaris gives a wealth of info. The most + * important bits are: + * + * Attributes are logically supported as files within the file + * system. The file system is therefore augmented with an + * orthogonal name space of file attributes. Any file (includ- + * ing attribute files) can have an arbitrarily deep attribute + * tree associated with it. Attribute values are accessed by + * file descriptors obtained through a special attribute inter- + * face. This logical view of "attributes as files" allows the + * leveraging of existing file system interface functionality + * to support the construction, deletion, and manipulation of + * attributes. + * + * The special files "." and ".." retain their accustomed + * semantics within the attribute hierarchy. The "." attribute + * file refers to the current directory and the ".." attribute + * file refers to the parent directory. The unnamed directory + * at the head of each attribute tree is considered the "child" + * of the file it is associated with and the ".." file refers + * to the associated file. For any non-directory file with + * attributes, the ".." entry in the unnamed directory refers + * to a file that is not a directory. + * + * Conceptually, the attribute model is fully general. Extended + * attributes can be any type of file (doors, links, direc- + * tories, and so forth) and can even have their own attributes + * (fully recursive). As a result, the attributes associated + * with a file could be an arbitrarily deep directory hierarchy + * where each attribute could have an equally complex attribute + * tree associated with it. Not all implementations are able + * to, or want to, support the full model. Implementation are + * therefore permitted to reject operations that are not sup- + * ported. For example, the implementation for the UFS file + * system allows only regular files as attributes (for example, + * no sub-directories) and rejects attempts to place attributes + * on attributes. + * + * The following list details the operations that are rejected + * in the current implementation: + * + * link Any attempt to create links between + * attribute and non-attribute space + * is rejected to prevent security- + * related or otherwise sensitive + * attributes from being exposed, and + * therefore manipulable, as regular + * files. + * + * rename Any attempt to rename between + * attribute and non-attribute space + * is rejected to prevent an already + * linked file from being renamed and + * thereby circumventing the link res- + * triction above. + * + * mkdir, symlink, mknod Any attempt to create a "non- + * regular" file in attribute space is + * rejected to reduce the functional- + * ity, and therefore exposure and + * risk, of the initial implementa- + * tion. + * + * The entire available name space has been allocated to "gen- + * eral use" to bring the implementation in line with the NFSv4 + * draft standard [NFSv4]. That standard defines "named attri- + * butes" (equivalent to Solaris Extended Attributes) with no + * naming restrictions. All Sun applications making use of + * opaque extended attributes will use the prefix "SUNW". + */ +class BXATTR_Solaris : public BXATTR { +private: + alist * cache; + bRC_BXATTR os_backup_xattr (JCR *jcr, FF_PKT *ff_pkt); + bRC_BXATTR os_restore_xattr (JCR *jcr, int stream, char *content, uint32_t length); + bRC_BXATTR os_get_xattr_names (JCR *jcr, POOLMEM **list, uint32_t *length); + bRC_BXATTR os_get_xattr_value (JCR *jcr, char * name, char ** pvalue, uint32_t * plen); + bRC_BXATTR os_set_xattr (JCR *jcr, bool extended, char *content, uint32_t length); + bRC_BXATTR os_get_xattr_acl(JCR *jcr, int fd, char **buffer, char* attrname); + bRC_BXATTR os_set_xattr_acl(JCR *jcr, int fd, char *name, char *acltext); + inline char * find_xattr_cache(JCR *jcr, ino_t ino, char * name); + inline void delete_xattr_cache(); +public: + BXATTR_Solaris (); + ~BXATTR_Solaris (); +}; + +#endif /* HAVE_XATTR */ + +#endif /* HAVE_SUN_OS */ + +#endif /* __BXATTR_Solaris_H_ */ diff --git a/src/filed/crypto.c b/src/filed/crypto.c new file mode 100644 index 00000000..1c2f6754 --- /dev/null +++ b/src/filed/crypto.c @@ -0,0 +1,321 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + */ +/* + * Crypto subroutines used in backup.c + * + * Split from backup.c August 2014 + * + * Kern Sibbald, August MMXIV + * + */ + +#include "bacula.h" +#include "filed.h" +#include "ch.h" +#include "backup.h" + + +bool crypto_allocate_ctx(bctx_t &bctx) +{ + JCR *jcr = bctx.jcr; + + if ((bctx.ff_pkt->flags & FO_SPARSE) || (bctx.ff_pkt->flags & FO_OFFSETS)) { + Jmsg0(jcr, M_FATAL, 0, _("Encrypting sparse or offset data not supported.\n")); + return false; + } + /** Allocate the cipher context */ + if ((bctx.cipher_ctx = crypto_cipher_new(jcr->crypto.pki_session, true, + &bctx.cipher_block_size)) == NULL) { + /* Shouldn't happen! */ + Jmsg0(jcr, M_FATAL, 0, _("Failed to initialize encryption context.\n")); + return false; + } + + /** + * Grow the crypto buffer, if necessary. + * crypto_cipher_update() will buffer up to (cipher_block_size - 1). + * We grow crypto_buf to the maximum number of blocks that + * could be returned for the given read buffer size. + * (Using the larger of either rsize or max_compress_len) + */ + jcr->crypto.crypto_buf = check_pool_memory_size(jcr->crypto.crypto_buf, + (MAX(bctx.rsize + (int)sizeof(uint32_t), (int32_t)bctx.max_compress_len) + + bctx.cipher_block_size - 1) / bctx.cipher_block_size * bctx.cipher_block_size); + + bctx.wbuf = jcr->crypto.crypto_buf; /* Encrypted, possibly compressed output here. */ + return true; +} + + +bool crypto_setup_digests(bctx_t &bctx) +{ + JCR *jcr = bctx.jcr; + FF_PKT *ff_pkt = bctx.ff_pkt; + + crypto_digest_t signing_algorithm = (crypto_digest_t)me->pki_digest; + + /** + * Setup for digest handling. If this fails, the digest will be set to NULL + * and not used. Note, the digest (file hash) can be any one of the four + * algorithms below. + * + * The signing digest is a single algorithm depending on + * whether or not we have SHA2. + * ****FIXME**** the signing algoritm should really be + * determined a different way!!!!!! What happens if + * sha2 was available during backup but not restore? + */ + if (ff_pkt->flags & FO_MD5) { + bctx.digest = crypto_digest_new(jcr, CRYPTO_DIGEST_MD5); + bctx.digest_stream = STREAM_MD5_DIGEST; + + } else if (ff_pkt->flags & FO_SHA1) { + bctx.digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA1); + bctx.digest_stream = STREAM_SHA1_DIGEST; + + } else if (ff_pkt->flags & FO_SHA256) { + bctx.digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA256); + bctx.digest_stream = STREAM_SHA256_DIGEST; + + } else if (ff_pkt->flags & FO_SHA512) { + bctx.digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA512); + bctx.digest_stream = STREAM_SHA512_DIGEST; + } + + /** Did digest initialization fail? */ + if (bctx.digest_stream != STREAM_NONE && bctx.digest == NULL) { + Jmsg(jcr, M_WARNING, 0, _("%s digest initialization failed\n"), + stream_to_ascii(bctx.digest_stream)); + } + + /** + * Set up signature digest handling. If this fails, the signature digest + * will be set to NULL and not used. + */ + /* TODO landonf: We should really only calculate the digest once, for + * both verification and signing. + */ + if (jcr->crypto.pki_sign) { + bctx.signing_digest = crypto_digest_new(jcr, signing_algorithm); + + /** Full-stop if a failure occurred initializing the signature digest */ + if (bctx.signing_digest == NULL) { + Jmsg(jcr, M_NOTSAVED, 0, _("%s signature digest initialization failed\n"), + stream_to_ascii(signing_algorithm)); + jcr->JobErrors++; + return false; + } + } + + /** Enable encryption */ + if (jcr->crypto.pki_encrypt) { + ff_pkt->flags |= FO_ENCRYPT; + } + return true; +} + + +bool crypto_session_start(JCR *jcr) +{ + crypto_cipher_t cipher = (crypto_cipher_t) me->pki_cipher; + + /** + * Create encryption session data and a cached, DER-encoded session data + * structure. We use a single session key for each backup, so we'll encode + * the session data only once. + */ + if (jcr->crypto.pki_encrypt) { + uint32_t size = 0; + + /** Create per-job session encryption context */ + jcr->crypto.pki_session = crypto_session_new(cipher, jcr->crypto.pki_recipients); + if (!jcr->crypto.pki_session) { + Jmsg(jcr, M_FATAL, 0, _("Unsupported cipher on this system.\n")); + return false; + } + + /** Get the session data size */ + if (!crypto_session_encode(jcr->crypto.pki_session, (uint8_t *)0, &size)) { + Jmsg(jcr, M_FATAL, 0, _("An error occurred while encrypting the stream.\n")); + return false; + } + + /** Allocate buffer */ + jcr->crypto.pki_session_encoded = get_memory(size); + + /** Encode session data */ + if (!crypto_session_encode(jcr->crypto.pki_session, (uint8_t *)jcr->crypto.pki_session_encoded, &size)) { + Jmsg(jcr, M_FATAL, 0, _("An error occurred while encrypting the stream.\n")); + return false; + } + + /** ... and store the encoded size */ + jcr->crypto.pki_session_encoded_size = size; + + /** Allocate the encryption/decryption buffer */ + jcr->crypto.crypto_buf = get_memory(CRYPTO_CIPHER_MAX_BLOCK_SIZE); + } + return true; +} + +void crypto_session_end(JCR *jcr) +{ + if (jcr->crypto.crypto_buf) { + free_pool_memory(jcr->crypto.crypto_buf); + jcr->crypto.crypto_buf = NULL; + } + if (jcr->crypto.pki_session) { + crypto_session_free(jcr->crypto.pki_session); + } + if (jcr->crypto.pki_session_encoded) { + free_pool_memory(jcr->crypto.pki_session_encoded); + jcr->crypto.pki_session_encoded = NULL; + } +} + +bool crypto_session_send(JCR *jcr, BSOCK *sd) +{ + POOLMEM *msgsave; + + /** Send our header */ + Dmsg2(100, "Send hdr fi=%ld stream=%d\n", jcr->JobFiles, STREAM_ENCRYPTED_SESSION_DATA); + sd->fsend("%ld %d %lld", jcr->JobFiles, STREAM_ENCRYPTED_SESSION_DATA, + (int64_t)jcr->ff->statp.st_size); + msgsave = sd->msg; + sd->msg = jcr->crypto.pki_session_encoded; + sd->msglen = jcr->crypto.pki_session_encoded_size; + jcr->JobBytes += sd->msglen; + + Dmsg1(100, "Send data len=%d\n", sd->msglen); + sd->send(); + sd->msg = msgsave; + sd->signal(BNET_EOD); + return true; +} + +bool crypto_terminate_digests(bctx_t &bctx) +{ + JCR *jcr; + BSOCK *sd; + FF_PKT *ff_pkt; + + jcr = bctx.jcr; + sd = bctx.sd; + ff_pkt = bctx.ff_pkt; + + /** Terminate the signing digest and send it to the Storage daemon */ + if (bctx.signing_digest) { + uint32_t size = 0; + + if ((bctx.sig = crypto_sign_new(jcr)) == NULL) { + Jmsg(jcr, M_FATAL, 0, _("Failed to allocate memory for crypto signature.\n")); + return false; + } + + if (!crypto_sign_add_signer(bctx.sig, bctx.signing_digest, jcr->crypto.pki_keypair)) { + Jmsg(jcr, M_FATAL, 0, _("An error occurred while adding signer the stream.\n")); + return false; + } + + /** Get signature size */ + if (!crypto_sign_encode(bctx.sig, NULL, &size)) { + Jmsg(jcr, M_FATAL, 0, _("An error occurred while signing the stream.\n")); + return false; + } + + /** Grow the bsock buffer to fit our message if necessary */ + if (sizeof_pool_memory(sd->msg) < (int32_t)size) { + sd->msg = realloc_pool_memory(sd->msg, size); + } + + /** Send our header */ + sd->fsend("%ld %ld 0", jcr->JobFiles, STREAM_SIGNED_DIGEST); + Dmsg1(300, "bfiled>stored:header %s\n", sd->msg); + + /** Encode signature data */ + if (!crypto_sign_encode(bctx.sig, (uint8_t *)sd->msg, &size)) { + Jmsg(jcr, M_FATAL, 0, _("An error occurred while signing the stream.\n")); + return false; + } + + sd->msglen = size; + sd->send(); + sd->signal(BNET_EOD); /* end of checksum */ + } + + /** Terminate any digest and send it to Storage daemon */ + if (bctx.digest) { + uint32_t size; + + sd->fsend("%ld %d 0", jcr->JobFiles, bctx.digest_stream); + Dmsg1(300, "bfiled>stored:header %s\n", sd->msg); + + size = CRYPTO_DIGEST_MAX_SIZE; + + /** Grow the bsock buffer to fit our message if necessary */ + if (sizeof_pool_memory(sd->msg) < (int32_t)size) { + sd->msg = realloc_pool_memory(sd->msg, size); + } + + if (!crypto_digest_finalize(bctx.digest, (uint8_t *)sd->msg, &size)) { + Jmsg(jcr, M_FATAL, 0, _("An error occurred finalizing signing the stream.\n")); + return false; + } + + /* Keep the checksum if this file is a hardlink */ + if (ff_pkt->linked) { + ff_pkt_set_link_digest(ff_pkt, bctx.digest_stream, sd->msg, size); + } + + sd->msglen = size; + sd->send(); + sd->signal(BNET_EOD); /* end of checksum */ + } + + /* Check if original file has a digest, and send it */ + if (ff_pkt->type == FT_LNKSAVED && ff_pkt->digest) { + Dmsg2(300, "Link %s digest %d\n", ff_pkt->fname, ff_pkt->digest_len); + sd->fsend("%ld %d 0", jcr->JobFiles, ff_pkt->digest_stream); + + sd->msg = check_pool_memory_size(sd->msg, ff_pkt->digest_len); + memcpy(sd->msg, ff_pkt->digest, ff_pkt->digest_len); + sd->msglen = ff_pkt->digest_len; + sd->send(); + + sd->signal(BNET_EOD); /* end of hardlink record */ + } + + return true; +} + +void crypto_free(bctx_t &bctx) +{ + if (bctx.digest) { + crypto_digest_free(bctx.digest); + bctx.digest = NULL; + } + if (bctx.signing_digest) { + crypto_digest_free(bctx.signing_digest); + bctx.signing_digest = NULL; + } + if (bctx.sig) { + crypto_sign_free(bctx.sig); + bctx.sig = NULL; + } +} diff --git a/src/filed/estimate.c b/src/filed/estimate.c new file mode 100644 index 00000000..c6b1594c --- /dev/null +++ b/src/filed/estimate.c @@ -0,0 +1,117 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon estimate.c + * Make and estimate of the number of files and size to be saved. + * + * Kern Sibbald, September MMI + * + * Version $Id$ + * + */ + +#include "bacula.h" +#include "filed.h" + +static int tally_file(JCR *jcr, FF_PKT *ff_pkt, bool); + +/* + * Find all the requested files and count them. + */ +int make_estimate(JCR *jcr) +{ + int stat; + + jcr->setJobStatus(JS_Running); + + set_find_options((FF_PKT *)jcr->ff, jcr->incremental, jcr->mtime); + /* in accurate mode, we overwrite the find_one check function */ + if (jcr->accurate) { + set_find_changed_function((FF_PKT *)jcr->ff, accurate_check_file); + } + + stat = find_files(jcr, (FF_PKT *)jcr->ff, tally_file, plugin_estimate); + accurate_free(jcr); + return stat; +} + +/* + * Called here by find() for each file included. + * + */ +static int tally_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level) +{ + ATTR attr; + + if (job_canceled(jcr)) { + return 0; + } + switch (ff_pkt->type) { + case FT_LNKSAVED: /* Hard linked, file already saved */ + case FT_REGE: + case FT_REG: + case FT_LNK: + case FT_NORECURSE: + case FT_NOFSCHG: + case FT_INVALIDFS: + case FT_INVALIDDT: + case FT_REPARSE: + case FT_JUNCTION: + case FT_DIREND: + case FT_SPEC: + case FT_RAW: + case FT_FIFO: + break; + case FT_DIRBEGIN: + case FT_NOACCESS: + case FT_NOFOLLOW: + case FT_NOSTAT: + case FT_DIRNOCHG: + case FT_NOCHG: + case FT_ISARCH: + case FT_NOOPEN: + default: + return 1; + } + + if (ff_pkt->type != FT_LNKSAVED && S_ISREG(ff_pkt->statp.st_mode)) { + if (ff_pkt->statp.st_size > 0) { + jcr->JobBytes += ff_pkt->statp.st_size; + } +#ifdef HAVE_DARWIN_OS + if (ff_pkt->flags & FO_HFSPLUS) { + if (ff_pkt->hfsinfo.rsrclength > 0) { + jcr->JobBytes += ff_pkt->hfsinfo.rsrclength; + } + jcr->JobBytes += 32; /* Finder info */ + } +#endif + } + jcr->num_files_examined++; + jcr->JobFiles++; /* increment number of files seen */ + if (jcr->listing) { + memcpy(&attr.statp, &ff_pkt->statp, sizeof(struct stat)); + attr.type = ff_pkt->type; + attr.ofname = (POOLMEM *)ff_pkt->fname; + attr.olname = (POOLMEM *)ff_pkt->link; + print_ls_output(jcr, &attr); + } + /* TODO: Add loop over jcr->file_list to get Accurate deleted files*/ + return 1; +} diff --git a/src/filed/fd_plugins.c b/src/filed/fd_plugins.c new file mode 100644 index 00000000..eeec495d --- /dev/null +++ b/src/filed/fd_plugins.c @@ -0,0 +1,2258 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main program to test loading and running Bacula plugins. + * Destined to become Bacula pluginloader, ... + * + * Kern Sibbald, October 2007 + */ +#include "bacula.h" +#include "filed.h" + +extern CLIENT *me; +extern DLL_IMP_EXP char *exepath; +extern DLL_IMP_EXP char *version; +extern DLL_IMP_EXP char *dist_name; + +const int dbglvl = 150; +#ifdef HAVE_WIN32 +const char *plugin_type = "-fd.dll"; +#else +const char *plugin_type = "-fd.so"; +#endif + +extern int save_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level); +extern bool check_changes(JCR *jcr, FF_PKT *ff_pkt); + +/* Function pointers to be set here */ +extern DLL_IMP_EXP int (*plugin_bopen)(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode); +extern DLL_IMP_EXP int (*plugin_bclose)(BFILE *bfd); +extern DLL_IMP_EXP ssize_t (*plugin_bread)(BFILE *bfd, void *buf, size_t count); +extern DLL_IMP_EXP ssize_t (*plugin_bwrite)(BFILE *bfd, void *buf, size_t count); +extern DLL_IMP_EXP boffset_t (*plugin_blseek)(BFILE *bfd, boffset_t offset, int whence); + + +/* Forward referenced functions */ +static bRC baculaGetValue(bpContext *ctx, bVariable var, void *value); +static bRC baculaSetValue(bpContext *ctx, bVariable var, void *value); +static bRC baculaRegisterEvents(bpContext *ctx, ...); +static bRC baculaJobMsg(bpContext *ctx, const char *file, int line, + int type, utime_t mtime, const char *fmt, ...); +static bRC baculaDebugMsg(bpContext *ctx, const char *file, int line, + int level, const char *fmt, ...); +static void *baculaMalloc(bpContext *ctx, const char *file, int line, + size_t size); +static void baculaFree(bpContext *ctx, const char *file, int line, void *mem); +static bRC baculaAddExclude(bpContext *ctx, const char *file); +static bRC baculaAddInclude(bpContext *ctx, const char *file); +static bRC baculaAddOptions(bpContext *ctx, const char *opts); +static bRC baculaAddRegex(bpContext *ctx, const char *item, int type); +static bRC baculaAddWild(bpContext *ctx, const char *item, int type); +static bRC baculaNewOptions(bpContext *ctx); +static bRC baculaNewInclude(bpContext *ctx); +static bRC baculaNewPreInclude(bpContext *ctx); +static bool is_plugin_compatible(Plugin *plugin); +static bool get_plugin_name(JCR *jcr, char *cmd, int *ret); +static bRC baculaCheckChanges(bpContext *ctx, struct save_pkt *sp); +static bRC baculaAcceptFile(bpContext *ctx, struct save_pkt *sp); + +/* + * These will be plugged into the global pointer structure for + * the findlib. + */ +static int my_plugin_bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode); +static int my_plugin_bclose(BFILE *bfd); +static ssize_t my_plugin_bread(BFILE *bfd, void *buf, size_t count); +static ssize_t my_plugin_bwrite(BFILE *bfd, void *buf, size_t count); +static boffset_t my_plugin_blseek(BFILE *bfd, boffset_t offset, int whence); + + +/* Bacula info */ +static bInfo binfo = { + sizeof(bInfo), + FD_PLUGIN_INTERFACE_VERSION +}; + +/* Bacula entry points */ +static bFuncs bfuncs = { + sizeof(bFuncs), + FD_PLUGIN_INTERFACE_VERSION, + baculaRegisterEvents, + baculaGetValue, + baculaSetValue, + baculaJobMsg, + baculaDebugMsg, + baculaMalloc, + baculaFree, + baculaAddExclude, + baculaAddInclude, + baculaAddOptions, + baculaAddRegex, + baculaAddWild, + baculaNewOptions, + baculaNewInclude, + baculaNewPreInclude, + baculaCheckChanges, + baculaAcceptFile +}; + +/* + * Bacula private context + */ +struct bacula_ctx { + JCR *jcr; /* jcr for plugin */ + bRC rc; /* last return code */ + bool disabled; /* set if plugin disabled */ + findINCEXE *exclude; /* pointer to exclude files */ + findINCEXE *include; /* pointer to include/exclude files */ +}; + +/* + * Test if event is for this plugin + */ +static bool for_this_plugin(Plugin *plugin, char *name, int len) +{ + Dmsg4(dbglvl, "name=%s len=%d plugin=%s plen=%d\n", name, len, plugin->file, plugin->file_len); + if (!name) { /* if no plugin name, all plugins get it */ + return true; + } + /* Return global VSS job metadata to all plugins */ + if (strcmp("job", name) == 0) { /* old V4.0 name for VSS job metadata */ + return true; + } + if (strcmp("*all*", name) == 0) { /* new v6.0 name for VSS job metadata */ + return true; + } + /* Check if this is the correct plugin */ + if (len == plugin->file_len && strncmp(plugin->file, name, len) == 0) { + return true; + } + return false; +} + + +bool is_plugin_disabled(bpContext *plugin_ctx) +{ + bacula_ctx *b_ctx; + Dsm_check(999); + if (!plugin_ctx) { + return true; + } + b_ctx = (bacula_ctx *)plugin_ctx->bContext; + if (!b_ctx) { + return true; + } + return b_ctx->disabled; +} + +bool is_plugin_disabled(JCR *jcr) +{ + return is_plugin_disabled(jcr->plugin_ctx); +} + +/** + * Create a plugin event When receiving bEventCancelCommand, this function is + * called by an other thread. + */ +void generate_plugin_event(JCR *jcr, bEventType eventType, void *value) +{ + bpContext *plugin_ctx; + bEvent event; + Plugin *plugin; + char *name = NULL; + int i; + int len = 0; + bool call_if_canceled = false; + restore_object_pkt *rop; + + Dsm_check(999); + if (!b_plugin_list || !jcr || !jcr->plugin_ctx_list) { + return; /* Return if no plugins loaded */ + } + + /* + * Some events are sent to only a particular plugin or must be + * called even if the job is canceled + */ + switch(eventType) { + case bEventPluginCommand: + case bEventOptionPlugin: + name = (char *)value; + if (!get_plugin_name(jcr, name, &len)) { + return; + } + break; + case bEventRestoreObject: + /* After all RestoreObject, we have it one more time with value=NULL */ + if (value) { + /* Some RestoreObjects may not have a plugin name */ + rop = (restore_object_pkt *)value; + if (*rop->plugin_name) { + name = rop->plugin_name; + get_plugin_name(jcr, name, &len); + } + } + + break; + case bEventEndBackupJob: + case bEventEndVerifyJob: + call_if_canceled = true; /* plugin *must* see this call */ + break; + case bEventStartRestoreJob: + foreach_alist_index(i, plugin, b_plugin_list) { + plugin->restoreFileStarted = false; + plugin->createFileCalled = false; + } + break; + case bEventEndRestoreJob: + call_if_canceled = true; /* plugin *must* see this call */ + break; + default: + break; + } + + /* If call_if_canceled is set, we call the plugin anyway */ + if (!call_if_canceled && jcr->is_job_canceled()) { + return; + } + + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + event.eventType = eventType; + + Dmsg2(dbglvl, "plugin_ctx=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); + + /* + * Pass event to every plugin (except if name is set). If name + * is set, we pass it only to the plugin with that name. + */ + foreach_alist_index(i, plugin, b_plugin_list) { + if (!for_this_plugin(plugin, name, len)) { + Dmsg2(dbglvl, "Not for this plugin name=%s NULL=%d\n", + name, name==NULL?1:0); + continue; + } + /* + * Note, at this point do not change + * jcr->plugin or jcr->plugin_ctx + */ + Dsm_check(999); + plugin_ctx = &plugin_ctx_list[i]; + if (is_plugin_disabled(plugin_ctx)) { + Dmsg1(50, "Plugin %s disabled\n", plugin->file); + continue; + } + if (eventType == bEventEndRestoreJob) { + Dmsg0(50, "eventType==bEventEndRestoreJob\n"); + if (jcr->plugin && jcr->plugin->restoreFileStarted) { + plug_func(jcr->plugin)->endRestoreFile(jcr->plugin_ctx); + } + if (jcr->plugin) { + jcr->plugin->restoreFileStarted = false; + jcr->plugin->createFileCalled = false; + } + } + plug_func(plugin)->handlePluginEvent(plugin_ctx, &event, value); + } + return; +} + +/** + * Check if file was seen for accurate + */ +bool plugin_check_file(JCR *jcr, char *fname) +{ + Plugin *plugin; + int rc = bRC_OK; + int i; + + Dsm_check(999); + if (!b_plugin_list || !jcr || !jcr->plugin_ctx_list || jcr->is_job_canceled()) { + return false; /* Return if no plugins loaded */ + } + + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + + Dmsg2(dbglvl, "plugin_ctx=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); + + /* Pass event to every plugin */ + foreach_alist_index(i, plugin, b_plugin_list) { + jcr->plugin_ctx = &plugin_ctx_list[i]; + jcr->plugin = plugin; + if (is_plugin_disabled(jcr)) { + continue; + } + if (plug_func(plugin)->checkFile == NULL) { + continue; + } + rc = plug_func(plugin)->checkFile(jcr->plugin_ctx, fname); + if (rc == bRC_Seen) { + break; + } + } + + Dsm_check(999); + jcr->plugin = NULL; + jcr->plugin_ctx = NULL; + return rc == bRC_Seen; +} + +/* Get the first part of the the plugin command + * systemstate:/@SYSTEMSTATE/ + * => ret = 11 + * => can use for_this_plugin(plug, cmd, ret); + * + * The plugin command can contain only the plugin name + * Plugin = alldrives + * => ret = 9 + */ +static bool get_plugin_name(JCR *jcr, char *cmd, int *ret) +{ + char *p; + int len; + Dsm_check(999); + if (!cmd || (*cmd == '\0')) { + return false; + } + /* Handle plugin command here backup */ + Dmsg1(dbglvl, "plugin cmd=%s\n", cmd); + if ((p = strchr(cmd, ':')) == NULL) { + if (strchr(cmd, ' ') == NULL) { /* we have just the plugin name */ + len = strlen(cmd); + } else { + Jmsg1(jcr, M_ERROR, 0, "Malformed plugin command: %s\n", cmd); + return false; + } + } else { /* plugin:argument */ + len = p - cmd; + if (len <= 0) { + return false; + } + } + *ret = len; + Dsm_check(999); + return true; +} + + +static void update_ff_pkt(FF_PKT *ff_pkt, struct save_pkt *sp) +{ + Dsm_check(999); + ff_pkt->no_read = sp->no_read; + ff_pkt->delta_seq = sp->delta_seq; + + if (sp->flags & FO_DELTA) { + ff_pkt->flags |= FO_DELTA; + ff_pkt->delta_seq++; /* make new delta sequence number */ + } else { + ff_pkt->flags &= ~FO_DELTA; /* clean delta sequence number */ + ff_pkt->delta_seq = 0; + } + + if (sp->flags & FO_OFFSETS) { + ff_pkt->flags |= FO_OFFSETS; + } else { + ff_pkt->flags &= ~FO_OFFSETS; + } + /* Sparse code doesn't work with plugins + * that use FIFO or STDOUT/IN to communicate + */ + if (sp->flags & FO_SPARSE) { + ff_pkt->flags |= FO_SPARSE; + } else { + ff_pkt->flags &= ~FO_SPARSE; + } + if (sp->flags & FO_PORTABLE) { + ff_pkt->flags |= FO_PORTABLE; + } else { + ff_pkt->flags &= ~FO_PORTABLE; + } + ff_pkt->flags |= FO_PLUGIN; /* data from plugin */ + Dsm_check(999); +} + +/* Ask to a Option Plugin what to do with the current file */ +bRC plugin_option_handle_file(JCR *jcr, FF_PKT *ff_pkt, struct save_pkt *sp) +{ + Plugin *plugin; + bRC ret = bRC_Error; + bool found=false; + char *cmd = ff_pkt->plugin; + int len; + int i=0; + bEvent event; + event.eventType = bEventHandleBackupFile; + + Dsm_check(999); + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + memset(sp, 0, sizeof(struct save_pkt)); + sp->pkt_size = sp->pkt_end = sizeof(struct save_pkt); + sp->portable = true; + sp->cmd = cmd; + sp->link = ff_pkt->link; + sp->cmd = ff_pkt->plugin; + sp->statp = ff_pkt->statp; + sp->fname = ff_pkt->fname; + sp->delta_seq = ff_pkt->delta_seq; + sp->accurate_found = ff_pkt->accurate_found; + + if (!b_plugin_list || !jcr->plugin_ctx_list || jcr->is_job_canceled()) { + Jmsg1(jcr, M_FATAL, 0, "Command plugin \"%s\" requested, but is not loaded.\n", cmd); + goto bail_out; /* Return if no plugins loaded */ + } + + if (!get_plugin_name(jcr, cmd, &len)) { + goto bail_out; + } + + /* Note, we stop the loop on the first plugin that matches the name */ + foreach_alist_index(i, plugin, b_plugin_list) { + Dmsg4(dbglvl, "plugin=%s plen=%d cmd=%s len=%d\n", plugin->file, plugin->file_len, cmd, len); + if (!for_this_plugin(plugin, cmd, len)) { + continue; + } + + found=true; + + Dsm_check(999); + if (is_plugin_disabled(&plugin_ctx_list[i])) { + goto bail_out; + } + + jcr->plugin_ctx = &plugin_ctx_list[i]; + jcr->plugin = plugin; + + ret = plug_func(plugin)->handlePluginEvent(&plugin_ctx_list[i], + &event, sp); + + /* TODO: would be better to set this in save_file() */ + if (ret == bRC_OK) { + jcr->opt_plugin = true; + jcr->plugin = plugin; + jcr->plugin_sp = sp; /* Unset sp in save_file */ + jcr->plugin_ctx = &plugin_ctx_list[i]; + + update_ff_pkt(ff_pkt, sp); + /* reset plugin in JCR if not used this time */ + } else { + jcr->plugin_ctx = NULL; + jcr->plugin = NULL; + } + + goto bail_out; + } /* end foreach loop */ +bail_out: + if (!found) { + Jmsg1(jcr, M_FATAL, 0, "Options plugin \"%s\" not found.\n", cmd); + } + Dsm_check(999); + return ret; +} + +/** + * Sequence of calls for a backup: + * 1. plugin_save() here is called with ff_pkt + * 2. we find the plugin requested on the command string + * 3. we generate a bEventBackupCommand event to the specified plugin + * and pass it the command string. + * 4. we make a startPluginBackup call to the plugin, which gives + * us the data we need in save_pkt + * 5. we call Bacula's save_file() subroutine to save the specified + * file. The plugin will be called at pluginIO() to supply the + * file data. + * + * Sequence of calls for restore: + * See subroutine plugin_name_stream() below. + */ +int plugin_save(JCR *jcr, FF_PKT *ff_pkt, bool top_level) +{ + Plugin *plugin; + int len; + int i; + char *cmd = ff_pkt->top_fname; + struct save_pkt sp; + bEvent event; + POOL_MEM fname(PM_FNAME); + POOL_MEM link(PM_FNAME); + + Dsm_check(999); + if (!b_plugin_list || !jcr->plugin_ctx_list || jcr->is_job_canceled()) { + Jmsg1(jcr, M_FATAL, 0, "Command plugin \"%s\" requested, but is not loaded.\n", cmd); + return 1; /* Return if no plugins loaded */ + } + + jcr->cmd_plugin = true; + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + event.eventType = bEventBackupCommand; + + if (!get_plugin_name(jcr, cmd, &len)) { + goto bail_out; + } + + /* Note, we stop the loop on the first plugin that matches the name */ + foreach_alist_index(i, plugin, b_plugin_list) { + Dmsg4(dbglvl, "plugin=%s plen=%d cmd=%s len=%d\n", plugin->file, plugin->file_len, cmd, len); + if (!for_this_plugin(plugin, cmd, len)) { + continue; + } + /* + * We put the current plugin pointer, and the plugin context + * into the jcr, because during save_file(), the plugin + * will be called many times and these values are needed. + */ + Dsm_check(999); + jcr->plugin_ctx = &plugin_ctx_list[i]; + jcr->plugin = plugin; + if (is_plugin_disabled(jcr)) { + goto bail_out; + } + + Dmsg1(dbglvl, "Command plugin = %s\n", cmd); + /* Send the backup command to the right plugin*/ + if (plug_func(plugin)->handlePluginEvent(jcr->plugin_ctx, &event, cmd) != bRC_OK) { + goto bail_out; + } + /* Loop getting filenames to backup then saving them */ + while (!jcr->is_job_canceled()) { + memset(&sp, 0, sizeof(sp)); + sp.pkt_size = sizeof(sp); + sp.pkt_end = sizeof(sp); + sp.portable = true; + sp.no_read = false; + sp.flags = 0; + sp.cmd = cmd; + Dmsg3(dbglvl, "startBackup st_size=%p st_blocks=%p sp=%p\n", &sp.statp.st_size, &sp.statp.st_blocks, + &sp); + Dsm_check(999); + /* Get the file save parameters. I.e. the stat pkt ... */ + if (plug_func(plugin)->startBackupFile(jcr->plugin_ctx, &sp) != bRC_OK) { + goto bail_out; + } + if (sp.type == 0) { + Jmsg1(jcr, M_FATAL, 0, _("Command plugin \"%s\": no type in startBackupFile packet.\n"), + cmd); + goto bail_out; + } + jcr->plugin_sp = &sp; + ff_pkt = jcr->ff; + /* + * Copy fname and link because save_file() zaps them. This + * avoids zaping the plugin's strings. + */ + ff_pkt->type = sp.type; + if (IS_FT_OBJECT(sp.type)) { + if (!sp.object_name) { + Jmsg1(jcr, M_FATAL, 0, _("Command plugin \"%s\": no object_name in startBackupFile packet.\n"), + cmd); + goto bail_out; + } + ff_pkt->fname = cmd; /* full plugin string */ + ff_pkt->object_name = sp.object_name; + ff_pkt->object_index = sp.index; /* restore object index */ + ff_pkt->object_compression = 0; /* no compression for now */ + ff_pkt->object = sp.object; + ff_pkt->object_len = sp.object_len; + } else { + Dsm_check(999); + if (!sp.fname) { + Jmsg1(jcr, M_FATAL, 0, _("Command plugin \"%s\": no fname in startBackupFile packet.\n"), + cmd); + goto bail_out; + } + pm_strcpy(fname, sp.fname); + pm_strcpy(link, sp.link); + + + ff_pkt->fname = fname.c_str(); + ff_pkt->link = link.c_str(); + ff_pkt->LinkFI = sp.LinkFI; + update_ff_pkt(ff_pkt, &sp); + } + + memcpy(&ff_pkt->statp, &sp.statp, sizeof(ff_pkt->statp)); + Dmsg2(dbglvl, "startBackup returned type=%d, fname=%s\n", sp.type, sp.fname); + if (sp.object) { + Dmsg2(dbglvl, "index=%d object=%s\n", sp.index, sp.object); + } + /* Call Bacula core code to backup the plugin's file */ + save_file(jcr, ff_pkt, true); + bRC rc = plug_func(plugin)->endBackupFile(jcr->plugin_ctx); + if (rc == bRC_More || rc == bRC_OK) { + accurate_mark_file_as_seen(jcr, fname.c_str()); + } + Dsm_check(999); + if (rc == bRC_More) { + continue; + } + goto bail_out; + } /* end while loop */ + goto bail_out; + } /* end loop over all plugins */ + Jmsg1(jcr, M_FATAL, 0, "Command plugin \"%s\" not found.\n", cmd); + +bail_out: + Dsm_check(999); + jcr->cmd_plugin = false; + jcr->plugin = NULL; + jcr->plugin_ctx = NULL; + return 1; +} + + +/** + * Sequence of calls for a estimate: + * 1. plugin_estimate() here is called with ff_pkt + * 2. we find the plugin requested on the command string + * 3. we generate a bEventEstimateCommand event to the specified plugin + * and pass it the command string. + * 4. we make a startPluginBackup call to the plugin, which gives + * us the data we need in save_pkt + * + */ +int plugin_estimate(JCR *jcr, FF_PKT *ff_pkt, bool top_level) +{ + Plugin *plugin; + int len; + int i; + char *cmd = ff_pkt->top_fname; + struct save_pkt sp; + bEvent event; + POOL_MEM fname(PM_FNAME); + POOL_MEM link(PM_FNAME); + ATTR attr; + + Dsm_check(999); + if (!b_plugin_list || !jcr->plugin_ctx_list) { + Jmsg1(jcr, M_FATAL, 0, "Command plugin \"%s\" requested, but is not loaded.\n", cmd); + return 1; /* Return if no plugins loaded */ + } + + jcr->cmd_plugin = true; + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + event.eventType = bEventEstimateCommand; + + if (!get_plugin_name(jcr, cmd, &len)) { + goto bail_out; + } + + /* Note, we stop the loop on the first plugin that matches the name */ + foreach_alist_index(i, plugin, b_plugin_list) { + Dmsg4(dbglvl, "plugin=%s plen=%d cmd=%s len=%d\n", plugin->file, plugin->file_len, cmd, len); + if (!for_this_plugin(plugin, cmd, len)) { + continue; + } + /* + * We put the current plugin pointer, and the plugin context + * into the jcr, because during save_file(), the plugin + * will be called many times and these values are needed. + */ + Dsm_check(999); + jcr->plugin_ctx = &plugin_ctx_list[i]; + jcr->plugin = plugin; + if (is_plugin_disabled(jcr)) { + goto bail_out; + } + + Dmsg1(dbglvl, "Command plugin = %s\n", cmd); + /* Send the backup command to the right plugin*/ + if (plug_func(plugin)->handlePluginEvent(jcr->plugin_ctx, &event, cmd) != bRC_OK) { + goto bail_out; + } + /* Loop getting filenames to backup then saving them */ + while (!jcr->is_job_canceled()) { + Dsm_check(999); + memset(&sp, 0, sizeof(sp)); + sp.pkt_size = sizeof(sp); + sp.pkt_end = sizeof(sp); + sp.portable = true; + sp.flags = 0; + sp.cmd = cmd; + Dmsg3(dbglvl, "startBackup st_size=%p st_blocks=%p sp=%p\n", &sp.statp.st_size, &sp.statp.st_blocks, + &sp); + /* Get the file save parameters. I.e. the stat pkt ... */ + if (plug_func(plugin)->startBackupFile(jcr->plugin_ctx, &sp) != bRC_OK) { + goto bail_out; + } + if (sp.type == 0) { + Jmsg1(jcr, M_FATAL, 0, _("Command plugin \"%s\": no type in startBackupFile packet.\n"), + cmd); + goto bail_out; + } + + if (!IS_FT_OBJECT(sp.type)) { + if (!sp.fname) { + Jmsg1(jcr, M_FATAL, 0, _("Command plugin \"%s\": no fname in startBackupFile packet.\n"), + cmd); + goto bail_out; + } + + /* Count only files backed up */ + switch (sp.type) { + case FT_REGE: + case FT_REG: + case FT_LNK: + case FT_DIREND: + case FT_SPEC: + case FT_RAW: + case FT_FIFO: + case FT_LNKSAVED: + jcr->JobFiles++; /* increment number of files backed up */ + break; + default: + break; + } + jcr->num_files_examined++; + + if (sp.type != FT_LNKSAVED && S_ISREG(sp.statp.st_mode)) { + if (sp.statp.st_size > 0) { + jcr->JobBytes += sp.statp.st_size; + } + } + + if (jcr->listing) { + memcpy(&attr.statp, &sp.statp, sizeof(struct stat)); + attr.type = sp.type; + attr.ofname = (POOLMEM *)sp.fname; + attr.olname = (POOLMEM *)sp.link; + print_ls_output(jcr, &attr); + } + } + + Dmsg2(dbglvl, "startBackup returned type=%d, fname=%s\n", sp.type, sp.fname); + if (sp.object) { + Dmsg2(dbglvl, "index=%d object=%s\n", sp.index, sp.object); + } + bRC rc = plug_func(plugin)->endBackupFile(jcr->plugin_ctx); + if (rc == bRC_More || rc == bRC_OK) { + accurate_mark_file_as_seen(jcr, sp.fname); + } + Dsm_check(999); + if (rc == bRC_More) { + continue; + } + goto bail_out; + } /* end while loop */ + goto bail_out; + } /* end loop over all plugins */ + Jmsg1(jcr, M_FATAL, 0, "Command plugin \"%s\" not found.\n", cmd); + +bail_out: + Dsm_check(999); + jcr->cmd_plugin = false; + jcr->plugin = NULL; + jcr->plugin_ctx = NULL; + return 1; +} + +/** + * Send plugin name start/end record to SD + */ +bool send_plugin_name(JCR *jcr, BSOCK *sd, bool start) +{ + int stat; + int index = jcr->JobFiles; + struct save_pkt *sp = (struct save_pkt *)jcr->plugin_sp; + int32_t stream = STREAM_PLUGIN_NAME; + + Dsm_check(999); + if (!sp) { + Jmsg0(jcr, M_FATAL, 0, _("Plugin save packet not found.\n")); + return false; + } + if (jcr->is_job_canceled()) { + return false; + } + + if (start) { + index++; /* JobFiles not incremented yet */ + } + Dmsg1(dbglvl, "send_plugin_name=%s\n", sp->cmd); + /* Send stream header */ + Dsm_check(999); + if (start) { + plugin_check_stream(jcr, stream); /* get stream modified by plugin */ + } + if (!sd->fsend("%ld %d 0", index, stream)) { + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), + sd->bstrerror()); + return false; + } + Dmsg1(dbglvl, "send plugin name hdr: %s\n", sd->msg); + + Dsm_check(999); + if (start) { + /* Send data -- not much */ + stat = sd->fsend("%ld 1 %d %s%c", index, sp->portable, sp->cmd, 0); + } else { + /* Send end of data */ + stat = sd->fsend("%ld 0", jcr->JobFiles); + } + Dsm_check(999); + if (!stat) { + Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), + sd->bstrerror()); + return false; + } + Dmsg1(dbglvl, "send plugin start/end: %s\n", sd->msg); + sd->signal(BNET_EOD); /* indicate end of plugin name data */ + Dsm_check(999); + return true; +} + +/** + * Plugin name stream found during restore. The record passed in + * argument name was generated in send_plugin_name() above. + * + * Returns: true if start of stream + * false if end of steam + */ +bool plugin_name_stream(JCR *jcr, char *name) +{ + char *p = name; + char *cmd; + bool start; + Plugin *plugin; + int len; + int i; + bpContext *plugin_ctx_list = jcr->plugin_ctx_list; + + Dsm_check(999); + Dmsg1(dbglvl, "Read plugin stream string=%s\n", name); + skip_nonspaces(&p); /* skip over jcr->JobFiles */ + skip_spaces(&p); + start = *p == '1'; + if (start) { + /* Start of plugin data */ + skip_nonspaces(&p); /* skip start/end flag */ + skip_spaces(&p); +// portable = *p == '1'; + skip_nonspaces(&p); /* skip portable flag */ + skip_spaces(&p); + cmd = p; + } else { + /* + * End of plugin data, notify plugin, then clear flags + */ + Dmsg2(dbglvl, "End plugin data plugin=%p ctx=%p\n", jcr->plugin, jcr->plugin_ctx); + if (jcr->plugin && jcr->plugin->restoreFileStarted) { + plug_func(jcr->plugin)->endRestoreFile(jcr->plugin_ctx); + } + if (jcr->plugin) { + jcr->plugin->restoreFileStarted = false; + jcr->plugin->createFileCalled = false; + } + jcr->plugin_ctx = NULL; + jcr->plugin = NULL; + goto bail_out; + } + Dsm_check(999); + if (!plugin_ctx_list) { + goto bail_out; + } + + /* + * After this point, we are dealing with a restore start + */ + if (!get_plugin_name(jcr, cmd, &len)) { + goto bail_out; + } + + /* + * Search for correct plugin as specified on the command + */ + Dsm_check(999); + foreach_alist_index(i, plugin, b_plugin_list) { + bEvent event; + Dmsg3(dbglvl, "plugin=%s cmd=%s len=%d\n", plugin->file, cmd, len); + if (!for_this_plugin(plugin, cmd, len)) { + continue; + } + Dsm_check(999); + jcr->plugin_ctx = &plugin_ctx_list[i]; + jcr->plugin = plugin; + if (is_plugin_disabled(jcr)) { + Dmsg1(dbglvl, "Plugin %s disabled\n", cmd); + goto bail_out; + } + Dmsg1(dbglvl, "Restore Command plugin = %s\n", cmd); + event.eventType = bEventRestoreCommand; + if (plug_func(plugin)->handlePluginEvent(jcr->plugin_ctx, + &event, cmd) != bRC_OK) { + Dmsg1(dbglvl, "Handle event failed. Plugin=%s\n", cmd); + goto bail_out; + } + if (plugin->restoreFileStarted) { + Jmsg2(jcr, M_FATAL, 0, "Second call to startRestoreFile. plugin=%s cmd=%s\n", plugin->file, cmd); + plugin->restoreFileStarted = false; + goto bail_out; + } + if (plug_func(plugin)->startRestoreFile(jcr->plugin_ctx, cmd) == bRC_OK) { + plugin->restoreFileStarted = true; + goto ok_out; + } else { + Dmsg1(dbglvl, "startRestoreFile failed. plugin=%s\n", cmd); + } + goto bail_out; + } + Jmsg1(jcr, M_WARNING, 0, _("Plugin=%s not found.\n"), cmd); + goto bail_out; + +ok_out: + return start; + +bail_out: + Dsm_check(999); + jcr->plugin = NULL; + jcr->plugin_ctx = NULL; + return start; +} + +/** + * Tell the plugin to create the file. Return values are + * This is called only during Restore + * + * CF_ERROR -- error + * CF_SKIP -- skip processing this file + * CF_EXTRACT -- extract the file (i.e.call i/o routines) + * CF_CREATED -- created, but no content to extract (typically directories) + * + */ +int plugin_create_file(JCR *jcr, ATTR *attr, BFILE *bfd, int replace) +{ + bpContext *plugin_ctx = jcr->plugin_ctx; + Plugin *plugin = jcr->plugin; + struct restore_pkt rp; + int flags; + int rc; + + Dsm_check(999); + if (!plugin || !plugin_ctx || jcr->is_job_canceled()) { + return CF_ERROR; + } + + rp.pkt_size = sizeof(rp); + rp.pkt_end = sizeof(rp); + rp.delta_seq = attr->delta_seq; + rp.stream = attr->stream; + rp.data_stream = attr->data_stream; + rp.type = attr->type; + rp.file_index = attr->file_index; + rp.LinkFI = attr->LinkFI; + rp.uid = attr->uid; + rp.statp = attr->statp; /* structure assignment */ + rp.attrEx = attr->attrEx; + rp.ofname = attr->ofname; + rp.olname = attr->olname; + rp.where = jcr->where; + rp.RegexWhere = jcr->RegexWhere; + rp.replace = jcr->replace; + rp.create_status = CF_ERROR; + Dmsg4(dbglvl, "call plugin createFile stream=%d type=%d LinkFI=%d File=%s\n", + rp.stream, rp.type, rp.LinkFI, rp.ofname); + if (rp.attrEx) { + Dmsg1(dbglvl, "attrEx=\"%s\"\n", rp.attrEx); + } + Dsm_check(999); + if (!plugin->restoreFileStarted || plugin->createFileCalled) { + Jmsg2(jcr, M_FATAL, 0, "Unbalanced call to createFile=%d %d\n", + plugin->createFileCalled, plugin->restoreFileStarted); + plugin->createFileCalled = false; + return CF_ERROR; + } + rc = plug_func(plugin)->createFile(plugin_ctx, &rp); + if (rc != bRC_OK) { + Qmsg2(jcr, M_ERROR, 0, _("Plugin createFile call failed. Stat=%d file=%s\n"), + rc, attr->ofname); + return CF_ERROR; + } + if (rp.create_status == CF_ERROR) { + Qmsg1(jcr, M_ERROR, 0, _("Plugin createFile call failed. Returned CF_ERROR file=%s\n"), + attr->ofname); + return CF_ERROR; + } + + if (rp.create_status == CF_SKIP) { + return CF_SKIP; + } + + if (rp.create_status == CF_CORE) { + return CF_CORE; /* Let Bacula core handle the file creation */ + } + + /* Use the bfile for plugin */ + set_cmd_plugin(bfd, jcr); + + /* Created link or directory? */ + if (rp.create_status == CF_CREATED) { + return rp.create_status; /* yes, no need to bopen */ + } + + Dsm_check(999); + + flags = O_WRONLY | O_CREAT | O_TRUNC | O_BINARY; + Dmsg0(dbglvl, "call bopen\n"); + int stat = bopen(bfd, attr->ofname, flags, S_IRUSR | S_IWUSR); + Dmsg1(dbglvl, "bopen status=%d\n", stat); + if (stat < 0) { + berrno be; + be.set_errno(bfd->berrno); + Qmsg2(jcr, M_ERROR, 0, _("Could not create %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + Dmsg2(dbglvl,"Could not bopen file %s: ERR=%s\n", attr->ofname, be.bstrerror()); + return CF_ERROR; + } + + if (!is_bopen(bfd)) { + Dmsg0(000, "===== BFD is not open!!!!\n"); + } + Dsm_check(999); + return CF_EXTRACT; +} + +/** + * Reset the file attributes after all file I/O is done -- this allows + * the previous access time/dates to be set properly, and it also allows + * us to properly set directory permissions. + * Not currently Implemented. + */ +bool plugin_set_attributes(JCR *jcr, ATTR *attr, BFILE *ofd) +{ + Plugin *plugin = (Plugin *)jcr->plugin; + struct restore_pkt rp; + + Dmsg0(dbglvl, "plugin_set_attributes\n"); + + if (!plugin || !jcr->plugin_ctx) { + return false; + } + + memset(&rp, 0, sizeof(rp)); + rp.pkt_size = sizeof(rp); + rp.pkt_end = sizeof(rp); + rp.stream = attr->stream; + rp.data_stream = attr->data_stream; + rp.type = attr->type; + rp.file_index = attr->file_index; + rp.LinkFI = attr->LinkFI; + rp.uid = attr->uid; + rp.statp = attr->statp; /* structure assignment */ + rp.attrEx = attr->attrEx; + rp.ofname = attr->ofname; + rp.olname = attr->olname; + rp.where = jcr->where; + rp.RegexWhere = jcr->RegexWhere; + rp.replace = jcr->replace; + rp.create_status = CF_ERROR; + + plug_func(plugin)->setFileAttributes(jcr->plugin_ctx, &rp); + + if (rp.create_status == CF_CORE) { + set_attributes(jcr, attr, ofd); + } else { + if (is_bopen(ofd)) { + bclose(ofd); + } + pm_strcpy(attr->ofname, "*none*"); + } + + Dsm_check(999); + return true; +} + +/* + * The Plugin ACL data backup. We are using a new Plugin callback: + * handleXACLdata() for that. The new callback get a pointer to + * struct xacl_pkt as a main argument which consist of the following + * data: + * xacl.func - could be the one of BACL_BACKUP, BACL_RESTORE, + * BXATTR_BACKUP, BXATTR_RESTORE + * xacl.count - the length of data at the content buffer + * xacl.content - the buffer itself + * The buffer (xacl.content) is supplied by Bacula during restore and has to + * be supplied by a Plugin during backup. + * The new callback should return bRC_OK on success and bRC_Error on + * any error. + * + * in: + * jcr - Job Control Record + * ff_pkt - file save packet + * data is a pointer to variable returned + * out: + * data - the pointer to data buffer returned from plugin + * 0 - Success, no more data to save + * > 0 - Success and the number of bytes returned in **data buffer + * -1 - Error, no acls data to backup + */ +int plugin_backup_acl(JCR *jcr, FF_PKT *ff_pkt, char **data) +{ + struct xacl_pkt xacl; + Plugin *plugin = (Plugin *)jcr->plugin; + bRC rc; + + Dmsg0(dbglvl, "plugin_backup_acl\n"); + + /* check of input variables */ + if (!plugin || !jcr->plugin_ctx || !data) { + return 0; + } + + /* The plugin is not handling ACL/XATTR */ + if (plug_func(jcr->plugin)->handleXACLdata == NULL) { + return 0; + } + + /* prepare the xacl packet */ + memset(&xacl, 0, sizeof(xacl)); + xacl.pkt_size = sizeof(xacl); + xacl.pkt_end = sizeof(xacl); + xacl.func = BACL_BACKUP; + + rc = plug_func(plugin)->handleXACLdata(jcr->plugin_ctx, &xacl); + + /* check out status */ + if (rc != bRC_OK){ + Dmsg0(dbglvl, "plugin->handleXACLdata returned error\n"); + return -1; + } + if (xacl.count > 0){ + /* we have something to save, so prepare return data */ + *data = xacl.content; + return xacl.count; + } + + return 0; +} + +/* + * Called here when Bacula got ACL stream to restore but not every stream but + * a specific one: STREAM_XACL_PLUGIN_ACL which means a plugin has to + * be called. + * + * in: + * jcr - Job Control Record + * data - content to restore + * length - the length of the content to restore + * out: + * true - when successful + * false - on any Error + */ +bool plugin_restore_acl(JCR *jcr, char *data, uint32_t length) +{ + struct xacl_pkt xacl; + Plugin *plugin = (Plugin *)jcr->plugin; + bRC rc; + + Dmsg0(dbglvl, "plugin_restore_acl\n"); + + /* check of input variables */ + if (!plugin || !jcr->plugin_ctx || !data || length == 0) { + return true; + } + + /* The plugin is not handling ACL/XATTR */ + if (plug_func(jcr->plugin)->handleXACLdata == NULL) { + return 0; + } + + /* prepare the xacl packet */ + memset(&xacl, 0, sizeof(xacl)); + xacl.pkt_size = sizeof(xacl); + xacl.pkt_end = sizeof(xacl); + xacl.func = BACL_RESTORE; + xacl.content = data; + xacl.count = length; + + rc = plug_func(plugin)->handleXACLdata(jcr->plugin_ctx, &xacl); + + /* check out status */ + if (rc != bRC_OK){ + Dmsg0(dbglvl, "plugin->handleXACLdata returned error\n"); + return false; + } + + return true; +} + +/* + * The Plugin XATTR data backup. We are using a new Plugin callback: + * handleXACLdata() for that. Check plugin_backup_acl for new callback + * description. + * + * in: + * jcr - Job Control Record + * ff_pkt - file save packet + * data is a pointer to variable returned + * out: + * data - the pointer to data buffer returned from plugin + * 0 - Success, no more data to save + * >0 - Success and the number of bytes returned in **data buffer + * <0 - Error + */ +int plugin_backup_xattr(JCR *jcr, FF_PKT *ff_pkt, char **data) +{ + + struct xacl_pkt xacl; + Plugin *plugin = (Plugin *)jcr->plugin; + bRC rc; + + Dmsg0(dbglvl, "plugin_backup_xattr\n"); + + /* check of input variables */ + if (!plugin || !jcr->plugin_ctx || !data) { + return 0; + } + + /* The plugin is not handling ACL/XATTR */ + if (plug_func(jcr->plugin)->handleXACLdata == NULL) { + return 0; + } + + /* prepare the xacl packet */ + memset(&xacl, 0, sizeof(xacl)); + xacl.pkt_size = sizeof(xacl); + xacl.pkt_end = sizeof(xacl); + xacl.func = BXATTR_BACKUP; + + rc = plug_func(plugin)->handleXACLdata(jcr->plugin_ctx, &xacl); + + /* check out status */ + if (rc != bRC_OK){ + Dmsg0(dbglvl, "plugin->handleXACLdata returned error\n"); + return -1; + } + if (xacl.count > 0){ + /* we have something to save, so prepare return data */ + *data = xacl.content; + return xacl.count; + } + + return 0; +} + +/* + * Called here when Bacula got XATTR stream to restore but not every stream but + * a specific one: STREAM_XACL_PLUGIN_XATTR which means a plugin has to + * be called. + * + * in: + * jcr - Job Control Record + * data - content to restore + * length - the length of the content to restore + * out: + * true - when successful + * false - on any Error + */ +bool plugin_restore_xattr(JCR *jcr, char *data, uint32_t length) +{ + struct xacl_pkt xacl; + Plugin *plugin = (Plugin *)jcr->plugin; + bRC rc; + + Dmsg0(dbglvl, "plugin_restore_xattr\n"); + + /* check of input variables */ + if (!plugin || !jcr->plugin_ctx || !data || length == 0) { + return true; + } + + /* The plugin is not handling ACL/XATTR */ + if (plug_func(jcr->plugin)->handleXACLdata == NULL) { + return 0; + } + + /* prepare the xacl packet */ + memset(&xacl, 0, sizeof(xacl)); + xacl.pkt_size = sizeof(xacl); + xacl.pkt_end = sizeof(xacl); + xacl.func = BXATTR_RESTORE; + xacl.content = data; + xacl.count = length; + + rc = plug_func(plugin)->handleXACLdata(jcr->plugin_ctx, &xacl); + + /* check out status */ + if (rc != bRC_OK){ + Dmsg0(dbglvl, "plugin->handleXACLdata returned error\n"); + return false; + } + + return true; +} + +/* + * Allow plugin to modify stream + */ +bool plugin_check_stream(JCR *jcr, int32_t &stream) +{ + struct stream_pkt sp; + Plugin *plugin = (Plugin *)jcr->plugin; + bRC rc; + + Dmsg0(dbglvl, "plugin_check_stream\n"); + + /* check of input variables */ + if (!plugin || !jcr->plugin_ctx || !plug_func(plugin)->checkStream) { + return true; + } + + /* prepare the stream packet */ + memset(&sp, 0, sizeof(sp)); + sp.pkt_size = sizeof(sp); + sp.stream = stream; + sp.pkt_end = sizeof(sp); + + rc = plug_func(plugin)->checkStream(jcr->plugin_ctx, &sp); + + /* check out status */ + if (rc != bRC_OK){ + Dmsg0(dbglvl, "plugin->checkStream returned error\n"); + return false; + } + stream = sp.stream; + + return true; +} + +/* + * Print to file the plugin info. + */ +void dump_fd_plugin(Plugin *plugin, FILE *fp) +{ + if (!plugin) { + return ; + } + pInfo *info = (pInfo *)plugin->pinfo; + fprintf(fp, "\tversion=%d\n", info->version); + fprintf(fp, "\tdate=%s\n", NPRTB(info->plugin_date)); + fprintf(fp, "\tmagic=%s\n", NPRTB(info->plugin_magic)); + fprintf(fp, "\tauthor=%s\n", NPRTB(info->plugin_author)); + fprintf(fp, "\tlicence=%s\n", NPRTB(info->plugin_license)); + fprintf(fp, "\tversion=%s\n", NPRTB(info->plugin_version)); + fprintf(fp, "\tdescription=%s\n", NPRTB(info->plugin_description)); +} + +/** + * This entry point is called internally by Bacula to ensure + * that the plugin IO calls come into this code. + */ +void load_fd_plugins(const char *plugin_dir) +{ + Plugin *plugin; + int i; + + if (!plugin_dir) { + Dmsg0(dbglvl, "plugin dir is NULL\n"); + return; + } + + b_plugin_list = New(alist(10, not_owned_by_alist)); + Dsm_check(999); + if (!load_plugins((void *)&binfo, (void *)&bfuncs, plugin_dir, plugin_type, + is_plugin_compatible)) { + /* Either none found, or some error */ + if (b_plugin_list->size() == 0) { + delete b_plugin_list; + b_plugin_list = NULL; + Dmsg0(dbglvl, "No plugins loaded\n"); + return; + } + } + + /* Plug entry points called from findlib */ + plugin_bopen = my_plugin_bopen; + plugin_bclose = my_plugin_bclose; + plugin_bread = my_plugin_bread; + plugin_bwrite = my_plugin_bwrite; + plugin_blseek = my_plugin_blseek; + Dsm_check(999); + + /* + * Verify that the plugin is acceptable, and print information + * about it. + */ + foreach_alist_index(i, plugin, b_plugin_list) { + Jmsg(NULL, M_INFO, 0, _("Loaded plugin: %s\n"), plugin->file); + Dmsg1(dbglvl, "Loaded plugin: %s\n", plugin->file); + } + + dbg_plugin_add_hook(dump_fd_plugin); + Dsm_check(999); +} + +/** + * Check if a plugin is compatible. Called by the load_plugin function + * to allow us to verify the plugin. + */ +static bool is_plugin_compatible(Plugin *plugin) +{ + pInfo *info = (pInfo *)plugin->pinfo; + Dmsg0(dbglvl, "is_plugin_compatible called\n"); + Dsm_check(999); + if (chk_dbglvl(50)) { + dump_fd_plugin(plugin, stdin); + } + if (strcmp(info->plugin_magic, FD_PLUGIN_MAGIC) != 0) { + Jmsg(NULL, M_ERROR, 0, _("Plugin magic wrong. Plugin=%s wanted=%s got=%s\n"), + plugin->file, FD_PLUGIN_MAGIC, info->plugin_magic); + Dmsg3(50, "Plugin magic wrong. Plugin=%s wanted=%s got=%s\n", + plugin->file, FD_PLUGIN_MAGIC, info->plugin_magic); + + return false; + } + if (info->version != FD_PLUGIN_INTERFACE_VERSION) { + Jmsg(NULL, M_ERROR, 0, _("Plugin version incorrect. Plugin=%s wanted=%d got=%d\n"), + plugin->file, FD_PLUGIN_INTERFACE_VERSION, info->version); + Dmsg3(50, "Plugin version incorrect. Plugin=%s wanted=%d got=%d\n", + plugin->file, FD_PLUGIN_INTERFACE_VERSION, info->version); + return false; + } + if (strcmp(info->plugin_license, "Bacula AGPLv3") != 0 && + strcmp(info->plugin_license, "AGPLv3") != 0 && + strcmp(info->plugin_license, "Bacula") != 0) { + Jmsg(NULL, M_ERROR, 0, _("Plugin license incompatible. Plugin=%s license=%s\n"), + plugin->file, info->plugin_license); + Dmsg2(50, "Plugin license incompatible. Plugin=%s license=%s\n", + plugin->file, info->plugin_license); + return false; + } + if (info->size != sizeof(pInfo)) { + Jmsg(NULL, M_ERROR, 0, + _("Plugin size incorrect. Plugin=%s wanted=%d got=%d\n"), + plugin->file, sizeof(pInfo), info->size); + return false; + } + + Dsm_check(999); + return true; +} + + +/** + * Create a new instance of each plugin for this Job + * Note, b_plugin_list can exist but jcr->plugin_ctx_list can + * be NULL if no plugins were loaded. + */ +void new_plugins(JCR *jcr) +{ + Plugin *plugin; + int i; + + Dsm_check(999); + if (!b_plugin_list) { + Dmsg0(dbglvl, "plugin list is NULL\n"); + return; + } + if (jcr->is_job_canceled()) { + return; + } + + int num = b_plugin_list->size(); + + if (num == 0) { + Dmsg0(dbglvl, "No plugins loaded\n"); + return; + } + + jcr->plugin_ctx_list = (bpContext *)malloc(sizeof(bpContext) * num); + + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + Dmsg2(dbglvl, "Instantiate plugin_ctx=%p JobId=%d\n", plugin_ctx_list, jcr->JobId); + foreach_alist_index(i, plugin, b_plugin_list) { + Dsm_check(999); + /* Start a new instance of each plugin */ + bacula_ctx *b_ctx = (bacula_ctx *)malloc(sizeof(bacula_ctx)); + memset(b_ctx, 0, sizeof(bacula_ctx)); + b_ctx->jcr = jcr; + plugin_ctx_list[i].bContext = (void *)b_ctx; /* Bacula private context */ + plugin_ctx_list[i].pContext = NULL; + if (plug_func(plugin)->newPlugin(&plugin_ctx_list[i]) != bRC_OK) { + Dmsg1(000, "Plugin %s will be disabled\n", plugin->file); + b_ctx->disabled = true; + } + } + if (i > num) { + Jmsg2(jcr, M_ABORT, 0, "Num plugins=%d exceeds list size=%d\n", + i, num); + } + Dsm_check(999); +} + +/** + * Free the plugin instances for this Job + */ +void free_plugins(JCR *jcr) +{ + Plugin *plugin; + int i; + + if (!b_plugin_list || !jcr->plugin_ctx_list) { + return; /* no plugins, nothing to do */ + } + + Dsm_check(999); + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + Dmsg2(dbglvl, "Free instance plugin_ctx=%p JobId=%d\n", plugin_ctx_list, jcr->JobId); + foreach_alist_index(i, plugin, b_plugin_list) { + /* Free the plugin instance */ + plug_func(plugin)->freePlugin(&plugin_ctx_list[i]); + free(plugin_ctx_list[i].bContext); /* free Bacula private context */ + Dsm_check(999); + } + Dsm_check(999); + free(plugin_ctx_list); + jcr->plugin_ctx_list = NULL; +} + +static int my_plugin_bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) +{ + JCR *jcr = bfd->jcr; + Plugin *plugin = (Plugin *)jcr->plugin; + struct io_pkt io; + + Dmsg1(dbglvl, "plugin_bopen flags=%x\n", flags); + Dsm_check(999); + if (!plugin || !jcr->plugin_ctx) { + return 0; + } + io.pkt_size = sizeof(io); + io.pkt_end = sizeof(io); + io.func = IO_OPEN; + io.count = 0; + io.buf = NULL; + io.fname = fname; + io.flags = flags; + io.mode = mode; + io.win32 = false; + io.lerror = 0; + io.status = -1; + plug_func(plugin)->pluginIO(jcr->plugin_ctx, &io); + bfd->berrno = io.io_errno; + if (io.win32) { + errno = b_errno_win32; + } else { + errno = io.io_errno; + bfd->lerror = io.lerror; + } + Dmsg1(dbglvl, "Return from plugin open status=%d\n", io.status); + Dsm_check(999); + return io.status; +} + +static int my_plugin_bclose(BFILE *bfd) +{ + JCR *jcr = bfd->jcr; + Plugin *plugin = (Plugin *)jcr->plugin; + struct io_pkt io; + + Dsm_check(999); + Dmsg0(dbglvl, "===== plugin_bclose\n"); + if (!plugin || !jcr->plugin_ctx) { + return 0; + } + io.pkt_size = sizeof(io); + io.pkt_end = sizeof(io); + io.func = IO_CLOSE; + io.count = 0; + io.buf = NULL; + io.win32 = false; + io.lerror = 0; + io.status = -1; + plug_func(plugin)->pluginIO(jcr->plugin_ctx, &io); + bfd->berrno = io.io_errno; + if (io.win32) { + errno = b_errno_win32; + } else { + errno = io.io_errno; + bfd->lerror = io.lerror; + } + Dmsg1(dbglvl, "plugin_bclose stat=%d\n", io.status); + Dsm_check(999); + return io.status; +} + +static ssize_t my_plugin_bread(BFILE *bfd, void *buf, size_t count) +{ + JCR *jcr = bfd->jcr; + Plugin *plugin = (Plugin *)jcr->plugin; + struct io_pkt io; + + Dsm_check(999); + Dmsg0(dbglvl, "plugin_bread\n"); + if (!plugin || !jcr->plugin_ctx) { + return 0; + } + io.pkt_size = sizeof(io); + io.pkt_end = sizeof(io); + io.func = IO_READ; + io.count = count; + io.buf = (char *)buf; + io.win32 = false; + io.offset = 0; + io.lerror = 0; + io.status = -1; + plug_func(plugin)->pluginIO(jcr->plugin_ctx, &io); + bfd->offset = io.offset; + bfd->berrno = io.io_errno; + if (io.win32) { + errno = b_errno_win32; + } else { + errno = io.io_errno; + bfd->lerror = io.lerror; + } + Dsm_check(999); + return (ssize_t)io.status; +} + +static ssize_t my_plugin_bwrite(BFILE *bfd, void *buf, size_t count) +{ + JCR *jcr = bfd->jcr; + Plugin *plugin = (Plugin *)jcr->plugin; + struct io_pkt io; + + Dsm_check(999); + Dmsg0(dbglvl, "plugin_bwrite\n"); + if (!plugin || !jcr->plugin_ctx) { + Dmsg0(0, "No plugin context\n"); + return 0; + } + io.pkt_size = sizeof(io); + io.pkt_end = sizeof(io); + io.func = IO_WRITE; + io.count = count; + io.buf = (char *)buf; + io.win32 = false; + io.lerror = 0; + io.status = -1; + plug_func(plugin)->pluginIO(jcr->plugin_ctx, &io); + bfd->berrno = io.io_errno; + if (io.win32) { + errno = b_errno_win32; + } else { + errno = io.io_errno; + bfd->lerror = io.lerror; + } + Dsm_check(999); + return (ssize_t)io.status; +} + +static boffset_t my_plugin_blseek(BFILE *bfd, boffset_t offset, int whence) +{ + JCR *jcr = bfd->jcr; + Plugin *plugin = (Plugin *)jcr->plugin; + struct io_pkt io; + + Dsm_check(999); + Dmsg0(dbglvl, "plugin_bseek\n"); + if (!plugin || !jcr->plugin_ctx) { + return 0; + } + io.pkt_size = sizeof(io); + io.pkt_end = sizeof(io); + io.func = IO_SEEK; + io.offset = offset; + io.whence = whence; + io.win32 = false; + io.lerror = 0; + plug_func(plugin)->pluginIO(jcr->plugin_ctx, &io); + bfd->berrno = io.io_errno; + if (io.win32) { + errno = b_errno_win32; + } else { + errno = io.io_errno; + bfd->lerror = io.lerror; + } + Dsm_check(999); + return (boffset_t)io.offset; +} + +/* ============================================================== + * + * Callbacks from the plugin + * + * ============================================================== + */ +static bRC baculaGetValue(bpContext *ctx, bVariable var, void *value) +{ + JCR *jcr; + if (!value) { + return bRC_Error; + } + + Dsm_check(999); + switch (var) { /* General variables, no need of ctx */ + case bVarFDName: + *((char **)value) = my_name; + break; + case bVarWorkingDir: + *(void **)value = me->working_directory; + break; + case bVarExePath: + *(char **)value = exepath; + break; + case bVarVersion: + *(char **)value = version; + break; + case bVarDistName: + *(char **)value = dist_name; + break; + case bVarxxx: + break; + case bVarPrevJobName: + break; + case bVarPrefixLinks: + break; + default: + break; + } + + if (!ctx) { /* Other variables need context */ + return bRC_Error; + } + + jcr = ((bacula_ctx *)ctx->bContext)->jcr; + if (!jcr) { + return bRC_Error; + } + + switch (var) { + case bVarJobId: + *((int *)value) = jcr->JobId; + Dmsg1(dbglvl, "Bacula: return bVarJobId=%d\n", jcr->JobId); + break; + case bVarLevel: + *((int *)value) = jcr->getJobLevel(); + Dmsg1(dbglvl, "Bacula: return bVarJobLevel=%d\n", jcr->getJobLevel()); + break; + case bVarType: + *((int *)value) = jcr->getJobType(); + Dmsg1(dbglvl, "Bacula: return bVarJobType=%d\n", jcr->getJobType()); + break; + case bVarClient: + *((char **)value) = jcr->client_name; + Dmsg1(dbglvl, "Bacula: return Client_name=%s\n", jcr->client_name); + break; + case bVarJobName: + *((char **)value) = jcr->Job; + Dmsg1(dbglvl, "Bacula: return Job name=%s\n", jcr->Job); + break; + case bVarPrevJobName: + *((char **)value) = jcr->PrevJob; + Dmsg1(dbglvl, "Bacula: return Previous Job name=%s\n", jcr->PrevJob); + break; + case bVarJobStatus: + *((int *)value) = jcr->JobStatus; + Dmsg1(dbglvl, "Bacula: return bVarJobStatus=%d\n", jcr->JobStatus); + break; + case bVarSinceTime: + *((int *)value) = (int)jcr->mtime; + Dmsg1(dbglvl, "Bacula: return since=%d\n", (int)jcr->mtime); + break; + case bVarAccurate: + *((int *)value) = (int)jcr->accurate; + Dmsg1(dbglvl, "Bacula: return accurate=%d\n", (int)jcr->accurate); + break; + case bVarInteractiveSession: + *(int *)value = (int)jcr->interactive_session; + break; + case bVarFileIndex: + *(int *)value = (int)jcr->JobFiles; + break; + case bVarFileSeen: + break; /* a write only variable, ignore read request */ + case bVarVssObject: +#ifdef HAVE_WIN32 + if (jcr->pVSSClient) { + *(void **)value = jcr->pVSSClient->GetVssObject(); + break; + } +#endif + return bRC_Error; + case bVarVssDllHandle: +#ifdef HAVE_WIN32 + *(void **)value = vsslib; + break; +#endif + return bRC_Error; + case bVarWhere: + *(char **)value = jcr->where; + break; + case bVarRegexWhere: + *(char **)value = jcr->RegexWhere; + break; + case bVarPrefixLinks: + *(int *)value = (int)jcr->prefix_links; + break; + case bVarReplace: + *((int*)value) = jcr->replace; + Dmsg1(dbglvl, "Bacula: return replace=%c\n", jcr->replace); + break; + case bVarFDName: /* get warning with g++ if we missed one */ + case bVarWorkingDir: + case bVarExePath: + case bVarVersion: + case bVarDistName: + case bVarxxx: + break; + } + Dsm_check(999); + return bRC_OK; +} + +static bRC baculaSetValue(bpContext *ctx, bVariable var, void *value) +{ + JCR *jcr; + Dsm_check(999); + if (!value || !ctx) { + return bRC_Error; + } +// Dmsg1(dbglvl, "bacula: baculaGetValue var=%d\n", var); + jcr = ((bacula_ctx *)ctx->bContext)->jcr; + if (!jcr) { + return bRC_Error; + } +// Dmsg1(dbglvl, "Bacula: jcr=%p\n", jcr); + switch (var) { + case bVarFileSeen: + if (!accurate_mark_file_as_seen(jcr, (char *)value)) { + return bRC_Error; + } + break; + case bVarInteractiveSession: + jcr->interactive_session = (((intptr_t) value) == 1); + break; + default: + break; + } + Dsm_check(999); + return bRC_OK; +} + +static bRC baculaRegisterEvents(bpContext *ctx, ...) +{ + va_list args; + uint32_t event; + + Dsm_check(999); + if (!ctx) { + return bRC_Error; + } + + va_start(args, ctx); + while ((event = va_arg(args, uint32_t))) { + Dmsg1(dbglvl, "Plugin wants event=%u\n", event); + } + va_end(args); + Dsm_check(999); + return bRC_OK; +} + +static bRC baculaJobMsg(bpContext *ctx, const char *file, int line, + int type, utime_t mtime, const char *fmt, ...) +{ + va_list arg_ptr; + char buf[2000]; + JCR *jcr; + + Dsm_check(999); + if (ctx) { + jcr = ((bacula_ctx *)ctx->bContext)->jcr; + } else { + jcr = NULL; + } + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); + va_end(arg_ptr); + Jmsg(jcr, type, mtime, "%s", buf); + Dsm_check(999); + return bRC_OK; +} + +static bRC baculaDebugMsg(bpContext *ctx, const char *file, int line, + int level, const char *fmt, ...) +{ + va_list arg_ptr; + char buf[2000]; + + Dsm_check(999); + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); + va_end(arg_ptr); + d_msg(file, line, level, "%s", buf); + Dsm_check(999); + return bRC_OK; +} + +static void *baculaMalloc(bpContext *ctx, const char *file, int line, + size_t size) +{ +#ifdef SMARTALLOC + return sm_malloc(file, line, size); +#else + return malloc(size); +#endif +} + +static void baculaFree(bpContext *ctx, const char *file, int line, void *mem) +{ +#ifdef SMARTALLOC + sm_free(file, line, mem); +#else + free(mem); +#endif +} + +static bool is_ctx_good(bpContext *ctx, JCR *&jcr, bacula_ctx *&bctx) +{ + Dsm_check(999); + if (!ctx) { + return false; + } + bctx = (bacula_ctx *)ctx->bContext; + if (!bctx) { + return false; + } + jcr = bctx->jcr; + if (!jcr) { + return false; + } + return true; +} + +/** + * Let the plugin define files/directories to be excluded + * from the main backup. + */ +static bRC baculaAddExclude(bpContext *ctx, const char *file) +{ + JCR *jcr; + findINCEXE *old; + bacula_ctx *bctx; + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + return bRC_Error; + } + if (!file) { + return bRC_Error; + } + + /* Save the include context */ + old = get_incexe(jcr); + + /* Not right time to add exlude */ + if (!old) { + return bRC_Error; + } + + if (!bctx->exclude) { + bctx->exclude = new_exclude(jcr); + } + + /* Set the Exclude context */ + set_incexe(jcr, bctx->exclude); + + add_file_to_fileset(jcr, file, true); + + /* Restore the current context */ + set_incexe(jcr, old); + + Dmsg1(100, "Add exclude file=%s\n", file); + Dsm_check(999); + return bRC_OK; +} + +/** + * Let the plugin define files/directories to be excluded + * from the main backup. + */ +static bRC baculaAddInclude(bpContext *ctx, const char *file) +{ + JCR *jcr; + findINCEXE *old; + bacula_ctx *bctx; + + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + return bRC_Error; + } + if (!file) { + return bRC_Error; + } + + /* Save the include context */ + old = get_incexe(jcr); + + /* Not right time to add include */ + if (!old) { + return bRC_Error; + } + if (!bctx->include) { + bctx->include = old; + } + + set_incexe(jcr, bctx->include); + add_file_to_fileset(jcr, file, true); + + /* Restore the current context */ + set_incexe(jcr, old); + + Dmsg1(100, "Add include file=%s\n", file); + Dsm_check(999); + return bRC_OK; +} + +static bRC baculaAddOptions(bpContext *ctx, const char *opts) +{ + JCR *jcr; + bacula_ctx *bctx; + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + return bRC_Error; + } + if (!opts) { + return bRC_Error; + } + add_options_to_fileset(jcr, opts); + Dsm_check(999); + Dmsg1(1000, "Add options=%s\n", opts); + return bRC_OK; +} + +static bRC baculaAddRegex(bpContext *ctx, const char *item, int type) +{ + JCR *jcr; + bacula_ctx *bctx; + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + return bRC_Error; + } + if (!item) { + return bRC_Error; + } + add_regex_to_fileset(jcr, item, type); + Dmsg1(100, "Add regex=%s\n", item); + Dsm_check(999); + return bRC_OK; +} + +static bRC baculaAddWild(bpContext *ctx, const char *item, int type) +{ + JCR *jcr; + bacula_ctx *bctx; + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + return bRC_Error; + } + if (!item) { + return bRC_Error; + } + add_wild_to_fileset(jcr, item, type); + Dmsg1(100, "Add wild=%s\n", item); + Dsm_check(999); + return bRC_OK; +} + +static bRC baculaNewOptions(bpContext *ctx) +{ + JCR *jcr; + bacula_ctx *bctx; + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + return bRC_Error; + } + (void)new_options(jcr, NULL); + Dsm_check(999); + return bRC_OK; +} + +static bRC baculaNewInclude(bpContext *ctx) +{ + JCR *jcr; + bacula_ctx *bctx; + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + return bRC_Error; + } + (void)new_include(jcr); + Dsm_check(999); + return bRC_OK; +} + +static bRC baculaNewPreInclude(bpContext *ctx) +{ + JCR *jcr; + bacula_ctx *bctx; + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + return bRC_Error; + } + + bctx->include = new_preinclude(jcr); + new_options(jcr, bctx->include); + set_incexe(jcr, bctx->include); + + Dsm_check(999); + return bRC_OK; +} + +/* + * Check if a file have to be backuped using Accurate code + */ +static bRC baculaCheckChanges(bpContext *ctx, struct save_pkt *sp) +{ + JCR *jcr; + bacula_ctx *bctx; + FF_PKT *ff_pkt; + bRC ret = bRC_Error; + + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + goto bail_out; + } + if (!sp) { + goto bail_out; + } + + ff_pkt = jcr->ff; + /* + * Copy fname and link because save_file() zaps them. This + * avoids zaping the plugin's strings. + */ + ff_pkt->type = sp->type; + if (!sp->fname) { + Jmsg0(jcr, M_FATAL, 0, _("Command plugin: no fname in baculaCheckChanges packet.\n")); + goto bail_out; + } + + ff_pkt->fname = sp->fname; + ff_pkt->link = sp->link; + memcpy(&ff_pkt->statp, &sp->statp, sizeof(ff_pkt->statp)); + + if (check_changes(jcr, ff_pkt)) { + ret = bRC_OK; + } else { + ret = bRC_Seen; + } + + /* check_changes() can update delta sequence number, return it to the + * plugin + */ + sp->delta_seq = ff_pkt->delta_seq; + sp->accurate_found = ff_pkt->accurate_found; + +bail_out: + Dsm_check(999); + Dmsg1(100, "checkChanges=%i\n", ret); + return ret; +} + +/* + * Check if a file would be saved using current Include/Exclude code + */ +static bRC baculaAcceptFile(bpContext *ctx, struct save_pkt *sp) +{ + JCR *jcr; + FF_PKT *ff_pkt; + bacula_ctx *bctx; + + char *old; + struct stat oldstat; + bRC ret = bRC_Error; + + Dsm_check(999); + if (!is_ctx_good(ctx, jcr, bctx)) { + goto bail_out; + } + if (!sp) { + goto bail_out; + } + + ff_pkt = jcr->ff; + + /* Probably not needed, but keep a copy */ + old = ff_pkt->fname; + oldstat = ff_pkt->statp; + + ff_pkt->fname = sp->fname; + ff_pkt->statp = sp->statp; + + if (accept_file(ff_pkt)) { + ret = bRC_OK; + } else { + ret = bRC_Skip; + } + + ff_pkt->fname = old; + ff_pkt->statp = oldstat; + +bail_out: + return ret; +} + +#ifdef TEST_PROGRAM + +int (*plugin_bopen)(JCR *jcr, const char *fname, uint64_t flags, mode_t mode) = NULL; +int (*plugin_bclose)(JCR *jcr) = NULL; +ssize_t (*plugin_bread)(JCR *jcr, void *buf, size_t count) = NULL; +ssize_t (*plugin_bwrite)(JCR *jcr, void *buf, size_t count) = NULL; +boffset_t (*plugin_blseek)(JCR *jcr, boffset_t offset, int whence) = NULL; + +int save_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level) +{ + return 0; +} + +bool set_cmd_plugin(BFILE *bfd, JCR *jcr) +{ + return true; +} + +int main(int argc, char *argv[]) +{ + char plugin_dir[1000]; + JCR mjcr1, mjcr2; + JCR *jcr1 = &mjcr1; + JCR *jcr2 = &mjcr2; + + strcpy(my_name, "test-fd"); + + getcwd(plugin_dir, sizeof(plugin_dir)-1); + load_fd_plugins(plugin_dir); + + jcr1->JobId = 111; + new_plugins(jcr1); + + jcr2->JobId = 222; + new_plugins(jcr2); + + generate_plugin_event(jcr1, bEventJobStart, (void *)"Start Job 1"); + generate_plugin_event(jcr1, bEventJobEnd); + generate_plugin_event(jcr2, bEventJobStart, (void *)"Start Job 2"); + free_plugins(jcr1); + generate_plugin_event(jcr2, bEventJobEnd); + free_plugins(jcr2); + + unload_plugins(); + + Dmsg0(dbglvl, "bacula: OK ...\n"); + close_memory_pool(); + sm_dump(false); /* unit test */ + return 0; +} + +#endif /* TEST_PROGRAM */ diff --git a/src/filed/fd_plugins.h b/src/filed/fd_plugins.h new file mode 100644 index 00000000..26051573 --- /dev/null +++ b/src/filed/fd_plugins.h @@ -0,0 +1,401 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Application Programming Interface (API) definition for Bacula Plugins + * + * Kern Sibbald, October 2007 + * + */ + +#ifndef __FD_PLUGINS_H +#define __FD_PLUGINS_H + +#ifndef _BACULA_H +#ifdef __cplusplus +/* Workaround for SGI IRIX 6.5 */ +#define _LANGUAGE_C_PLUS_PLUS 1 +#endif +#define _REENTRANT 1 +#define _THREAD_SAFE 1 +#define _POSIX_PTHREAD_SEMANTICS 1 +#define _FILE_OFFSET_BITS 64 +#define _LARGEFILE_SOURCE 1 +#define _LARGE_FILES 1 +#endif /* ! _BACULA_H */ + +#include + +#if defined(HAVE_WIN32) +#if defined(HAVE_MINGW) +#include "mingwconfig.h" +#else +#include "winconfig.h" +#endif +#else /* !HAVE_WIN32 */ +#ifndef __CONFIG_H +#include "config.h" +#define __CONFIG_H +#endif +#endif + +#include "../src/version.h" +#include "bc_types.h" +#include "lib/plugins.h" +#include +#ifdef HAVE_WIN32 +#include "../win32/filed/vss.h" +#endif + +/* + * This packet is used for the restore objects + * It is passed to the plugin when restoring + * the object. + */ +struct restore_object_pkt { + int32_t pkt_size; /* size of this packet */ + char *object_name; /* Object name */ + char *object; /* restore object data to save */ + char *plugin_name; /* Plugin name */ + int32_t object_type; /* FT_xx for this file */ + int32_t object_len; /* restore object length */ + int32_t object_full_len; /* restore object uncompressed length */ + int32_t object_index; /* restore object index */ + int32_t object_compression; /* set to compression type */ + int32_t stream; /* attribute stream id */ + uint32_t JobId; /* JobId object came from */ + int32_t pkt_end; /* end packet sentinel */ +}; + +/* + * Packet to allow plugin to set stream + */ +struct stream_pkt { + int32_t pkt_size; /* size of this packet */ + int32_t stream; /* Stream */ + int32_t pkt_end; /* end packet sentinel */ +}; + +/* + * This packet is used for file save info transfer. +*/ +struct save_pkt { + int32_t pkt_size; /* size of this packet */ + char *fname; /* Full path and filename */ + char *link; /* Link name if any */ + struct stat statp; /* System stat() packet for file */ + int32_t type; /* FT_xx for this file */ + uint64_t flags; /* Bacula internal flags */ + bool no_read; /* During the save, the file should not be saved */ + bool portable; /* set if data format is portable */ + bool accurate_found; /* Found in accurate list (valid after check_changes()) */ + char *cmd; /* command */ + uint32_t delta_seq; /* Delta sequence number */ + char *object_name; /* Object name to create */ + char *object; /* restore object data to save */ + int32_t object_len; /* restore object length */ + int32_t index; /* restore object index */ + int32_t LinkFI; /* LinkFI if LINKSAVED */ + int32_t pkt_end; /* end packet sentinel */ +}; + +/* + * This packet is used for file restore info transfer. +*/ +struct restore_pkt { + int32_t pkt_size; /* size of this packet */ + int32_t stream; /* attribute stream id */ + int32_t data_stream; /* id of data stream to follow */ + int32_t type; /* file type FT */ + int32_t file_index; /* file index */ + int32_t LinkFI; /* file index to data if hard link */ + uid_t uid; /* userid */ + struct stat statp; /* decoded stat packet */ + const char *attrEx; /* extended attributes if any */ + const char *ofname; /* output filename */ + const char *olname; /* output link name */ + const char *where; /* where */ + const char *RegexWhere; /* regex where */ + int replace; /* replace flag */ + int create_status; /* status from createFile() */ + uint32_t delta_seq; /* Delta sequence number */ + int32_t pkt_end; /* end packet sentinel */ +}; + +/* + * This packet is used for file restore info transfer. +*/ +struct restore_filelist_pkt { + int32_t pkt_size; /* size of this packet */ + int32_t file_index; /* file index */ + int32_t LinkFI; /* file index to data if hard link */ + struct stat statp; /* decoded stat packet */ + const char *attrEx; /* extended attributes if any */ + const char *ofname; /* output filename */ + uint32_t delta_seq; /* Delta sequence number */ + const char *chksum; /* Checksum if available */ + int32_t pkt_end; /* end packet sentinel */ +}; + +enum { + IO_OPEN = 1, + IO_READ = 2, + IO_WRITE = 3, + IO_CLOSE = 4, + IO_SEEK = 5 +}; + +struct io_pkt { + int32_t pkt_size; /* Size of this packet */ + int32_t func; /* Function code */ + int32_t count; /* read/write count */ + int32_t flags; /* Open flags */ + mode_t mode; /* permissions for created files */ + char *buf; /* read/write buffer */ + const char *fname; /* open filename */ + int32_t status; /* return status */ + int32_t io_errno; /* errno code */ + int32_t lerror; /* Win32 error code */ + int32_t whence; /* lseek argument */ + boffset_t offset; /* lseek argument or in bread current offset*/ + bool win32; /* Win32 GetLastError returned */ + int32_t pkt_end; /* end packet sentinel */ +}; + +enum { + BACL_BACKUP = 1, + BACL_RESTORE = 2, + BXATTR_BACKUP = 3, + BXATTR_RESTORE = 4 +}; + +struct xacl_pkt { + int32_t pkt_size; /* Size of this packet */ + int32_t func; /* Function code */ + int32_t count; /* read/write count */ + char *content; /* read/write buffer */ + int32_t pkt_end; /* end packet sentinel */ +}; + +/**************************************************************************** + * * + * Bacula definitions * + * * + ****************************************************************************/ + +/* Bacula Variable Ids */ +typedef enum { + bVarJobId = 1, + bVarFDName = 2, + bVarLevel = 3, + bVarType = 4, + bVarClient = 5, + bVarJobName = 6, + bVarJobStatus = 7, + bVarSinceTime = 8, + bVarAccurate = 9, + bVarFileSeen = 10, + bVarVssObject = 11, + bVarVssDllHandle = 12, + bVarWorkingDir = 13, + bVarWhere = 14, + bVarRegexWhere = 15, + bVarExePath = 16, + bVarVersion = 17, + bVarDistName = 18, + bVarxxx = 19, + bVarPrevJobName = 20, + bVarPrefixLinks = 21, + bVarInteractiveSession = 22, + bVarFileIndex = 23, + bVarReplace = 24 +} bVariable; + +/* Events that are passed to plugin */ +typedef enum { + bEventJobStart = 1, + bEventJobEnd = 2, + bEventStartBackupJob = 3, + bEventEndBackupJob = 4, + bEventStartRestoreJob = 5, + bEventEndRestoreJob = 6, + bEventStartVerifyJob = 7, + bEventEndVerifyJob = 8, + bEventBackupCommand = 9, + bEventRestoreCommand = 10, + bEventEstimateCommand = 11, + bEventLevel = 12, + bEventSince = 13, + bEventCancelCommand = 14, /* Executed by another thread */ + bEventVssBackupAddComponents = 15, /* Just before bEventVssPrepareSnapshot */ + bEventVssRestoreLoadComponentMetadata = 16, + bEventVssRestoreSetComponentsSelected = 17, + bEventRestoreObject = 18, + bEventEndFileSet = 19, + bEventPluginCommand = 20, /* Sent during FileSet creation */ + bEventVssBeforeCloseRestore = 21, + /* Add drives to VSS snapshot + * argument: char[27] drivelist + * You need to add them without duplicates, + * see fd_common.h add_drive() copy_drives() to get help + */ + bEventVssPrepareSnapshot = 22, + bEventOptionPlugin = 23, + bEventHandleBackupFile = 24, /* Used with Options Plugin */ + bEventComponentInfo = 25, /* Plugin component */ + bEventFeatures = 26 /* Ask for file list, ... "xxx,yyy,zzz" */ +} bEventType; + + +typedef struct s_bEvent { + uint32_t eventType; +} bEvent; + +typedef struct s_baculaInfo { + uint32_t size; + uint32_t version; +} bInfo; + +/* Bacula Core Routines -- not used within a plugin */ +#ifdef FILE_DAEMON +struct BFILE; /* forward referenced */ +struct FF_PKT; +void load_fd_plugins(const char *plugin_dir); +void new_plugins(JCR *jcr); +void free_plugins(JCR *jcr); +void generate_plugin_event(JCR *jcr, bEventType event, void *value=NULL); +bool send_plugin_name(JCR *jcr, BSOCK *sd, bool start); +bool plugin_name_stream(JCR *jcr, char *name); +int plugin_create_file(JCR *jcr, ATTR *attr, BFILE *bfd, int replace); +bool plugin_set_attributes(JCR *jcr, ATTR *attr, BFILE *ofd); +int plugin_save(JCR *jcr, FF_PKT *ff_pkt, bool top_level); +int plugin_estimate(JCR *jcr, FF_PKT *ff_pkt, bool top_level); +bool plugin_check_file(JCR *jcr, char *fname); +bRC plugin_option_handle_file(JCR *jcr, FF_PKT *ff_pkt, struct save_pkt *sp); +int plugin_get_idx(JCR *jcr, char *plugin); +bool plugin_send_restorefilelist(JCR *jcr, int plugin_index, + char *path, char *lstat, char *checksum, + int delta_seq); + +typedef struct { + const char *plugin; + const char *features; +} bFeature; + +bool plugin_get_features(JCR *jcr, alist *list); +int plugin_backup_acl(JCR *jcr, FF_PKT *ff_pkt, char **data); +bool plugin_restore_acl(JCR *jcr, char *data, uint32_t length); +int plugin_backup_xattr(JCR *jcr, FF_PKT *ff_pkt, char **data); +bool plugin_restore_xattr(JCR *jcr, char *data, uint32_t length); +bool plugin_check_stream(JCR *jcr, int32_t &stream); +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Bacula interface version and function pointers -- + * i.e. callbacks from the plugin to Bacula + */ +typedef struct s_baculaFuncs { + uint32_t size; + uint32_t version; + bRC (*registerBaculaEvents)(bpContext *ctx, ...); + bRC (*getBaculaValue)(bpContext *ctx, bVariable var, void *value); + bRC (*setBaculaValue)(bpContext *ctx, bVariable var, void *value); + bRC (*JobMessage)(bpContext *ctx, const char *file, int line, + int type, utime_t mtime, const char *fmt, ...); + bRC (*DebugMessage)(bpContext *ctx, const char *file, int line, + int level, const char *fmt, ...); + void *(*baculaMalloc)(bpContext *ctx, const char *file, int line, + size_t size); + void (*baculaFree)(bpContext *ctx, const char *file, int line, void *mem); + bRC (*AddExclude)(bpContext *ctx, const char *file); + bRC (*AddInclude)(bpContext *ctx, const char *file); + bRC (*AddOptions)(bpContext *ctx, const char *opts); + bRC (*AddRegex)(bpContext *ctx, const char *item, int type); + bRC (*AddWild)(bpContext *ctx, const char *item, int type); + bRC (*NewOptions)(bpContext *ctx); + bRC (*NewInclude)(bpContext *ctx); + bRC (*NewPreInclude)(bpContext *ctx); + bRC (*checkChanges)(bpContext *ctx, struct save_pkt *sp); + bRC (*AcceptFile)(bpContext *ctx, struct save_pkt *sp); /* Need fname and statp */ +} bFuncs; + + + + +/**************************************************************************** + * * + * Plugin definitions * + * * + ****************************************************************************/ + +typedef enum { + pVarName = 1, + pVarDescription = 2 +} pVariable; + +#define FD_PLUGIN_MAGIC "*FDPluginData*" + +#define FD_PLUGIN_INTERFACE_VERSION ( 14 ) + +typedef struct s_pluginInfo { + uint32_t size; + uint32_t version; + const char *plugin_magic; + const char *plugin_license; + const char *plugin_author; + const char *plugin_date; + const char *plugin_version; + const char *plugin_description; +} pInfo; + +/* + * This is a set of function pointers that Bacula can call + * within the plugin. + */ +typedef struct s_pluginFuncs { + uint32_t size; + uint32_t version; + bRC (*newPlugin)(bpContext *ctx); + bRC (*freePlugin)(bpContext *ctx); + bRC (*getPluginValue)(bpContext *ctx, pVariable var, void *value); + bRC (*setPluginValue)(bpContext *ctx, pVariable var, void *value); + bRC (*handlePluginEvent)(bpContext *ctx, bEvent *event, void *value); + bRC (*startBackupFile)(bpContext *ctx, struct save_pkt *sp); + bRC (*endBackupFile)(bpContext *ctx); + bRC (*startRestoreFile)(bpContext *ctx, const char *cmd); + bRC (*endRestoreFile)(bpContext *ctx); + bRC (*pluginIO)(bpContext *ctx, struct io_pkt *io); + bRC (*createFile)(bpContext *ctx, struct restore_pkt *rp); + bRC (*setFileAttributes)(bpContext *ctx, struct restore_pkt *rp); + bRC (*checkFile)(bpContext *ctx, char *fname); + bRC (*handleXACLdata)(bpContext *ctx, struct xacl_pkt *xacl); + bRC (*checkStream)(bpContext *ctx, struct stream_pkt *sp); +} pFuncs; + +#define plug_func(plugin) ((pFuncs *)(plugin->pfuncs)) +#define plug_info(plugin) ((pInfo *)(plugin->pinfo)) + +#ifdef __cplusplus +} +#endif + +#endif /* __FD_PLUGINS_H */ diff --git a/src/filed/fd_snapshot.c b/src/filed/fd_snapshot.c new file mode 100644 index 00000000..f92f2c01 --- /dev/null +++ b/src/filed/fd_snapshot.c @@ -0,0 +1,1890 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + + Written by Eric Bollengier, 2015 +*/ + +/* + Documentation about snapshot backend +---------------------------------------------------------------- + +The snapshot manager is using environment variables to communicate. + +Variables: + +SNAPSHOT_ACTION + Operation such as: + create, delete, list, mount, unmount, check, support, subvolume + +SNAPSHOT_VOLUME + Volume name + ex: /dev/vgroot/home_Test-2014-01-01_00_00 (lvm) + /home/.snapshots/Test-2014-01-01_00_00 (btrfs) + /.zfs/snapshot/Test-2014-01-01_00_00 (zfs) + + The volume name is generated by the create command + +SNAPSHOT_DEVICE + Device name + ex: /dev/vgroot/home (lvm) + /home (btrfs) + / (zfs) + + The device name can be found via getmntent() + +SNAPSHOT_NAME + Snapshot name, usually the Job name + +SNAPSHOT_FSTYPE + Device filesystem type, can be found via getmntent() + ex: btrfs, zfs, ext4 + +SNAPSHOT_TYPE + Snapshot backend type, generated by support command + ex: lvm, btrfs, zfs + +SNAPSHOT_MOUNTPOINT + Device mount point, found via getmntent() + +SNAPSHOT_SNAPMOUNTPOINT + Snapshot mount point is generated by the mount command + + + Protocol +---------------------------------------------------------------- + +OK: exit code 0 and status=1 in the output +ERR: exit code <> 0 and/or status=0 in the output + +status=1 keyword="value" keyword2="value2" + +status=0 error="Error message" + + + Workflow +---------------------------------------------------------------- + +1) List filesystems + get SNAPSHOT_DEVICE, SNAPSHOT_FSTYPE, SNAPSHOT_MOUNTPOINT + + volume="" name="" device="" createtime="" + +2) Test if a filesystem supports snapshot feature + SNAPSHOT_ACTION=support + SNAPSHOT_DEVICE=/home + SNAPSHOT_MOUNTPOINT=/home + SNAPSHOT_FSTYPE=btrfs + + => status=1 type=btrfs device=/home + => status=0 + +2a) Test if a filesystem contains subvolumes + SNAPSHOT_ACTION=subvolumes + SNAPSHOT_DEVICE=/home + SNAPSHOT_FSTYPE=btrfs + + => dev=10 mountpoint=/home/subvol fstype=btrfs + +3) Create a snapshot + SNAPSHOT_ACTION=create + SNAPSHOT_NAME=Test-2014-01-01_00_00 + SNAPSHOT_DEVICE=/home + SNAPSHOT_MOUNTPOINT=/home + SNAPSHOT_FSTYPE=btrfs + SNAPSHOT_TYPE=btrfs + + => status=1 volume="/home/.snapshots/Test-2014-01-01_00_00" createtdate=1418935776 type=btrfs + +4) Mount the snapshot + SNAPSHOT_ACTION=mount + SNAPSHOT_NAME=Test-2014-01-01_00_00 + SNAPSHOT_DEVICE=/home + SNAPSHOT_MOUNTPOINT=/home + SNAPSHOT_FSTYPE=btrfs + SNAPSHOT_TYPE=btrfs + + => status=1 volume="/home/.snapshots/Test-2014-01-01_00_00" createtdate=1418935776 type=btrfs + + +5) Unmount the snapshot + SNAPSHOT_ACTION=unmount + SNAPSHOT_SNAPMOUNTPOINT=/home/.snapshots/Test-2014-01-01_00_00 + + => status=1 + +6) Delete the snapshot + SNAPSHOT_ACTION=delete + SNAPSHOT_VOLUME=/home/.snapshot/Test-2014-01-01_00_00 + + => status=1 + + */ + +#include "bacula.h" +#include "filed.h" +#define USE_CMD_PARSER +#include "plugins/fd/fd_common.h" +#undef Jmsg +#include "fd_snapshot.h" +#define APPMANAGER_CMD "%eappmanager" +#define APP_DIR "/tmp/app.d" +#define SNAPSHOT_CMD "%ebsnapshot" + +/* Defined in messages.c */ +extern char *exepath; + +/* Catalog interface with the director */ +static char CreateSnap[] = "CatReq Job=%s new_snapshot name=%s volume=%s device=%s tdate=%d type=%s retention=%s"; +static char DelSnap[] = "CatReq Job=%s del_snapshot name=%s device=%s"; +static char GetSnap[] = "CatReq Job=%s get_snapshot name=%s volume=%s"; + +/* Command line interface with the director */ +static char LsCmd[] = "snapshot ls name=%127s volume=%s device=%s tdate=%d type=%127s path=%s"; +static char DelCmd[] = "snapshot del name=%127s volume=%s device=%s tdate=%d type=%127s"; +static char QueryCmd[] = "snapshot query name=%127s volume=%s device=%s tdate=%d type=%127s"; +static char PruneCmd[] = "snapshot prune volume=%s type=%127s"; +static char SyncCmd[] = "snapshot sync volume=%s type=%127%"; +static char ListCmd[] = "snapshot list"; +static char ConfCmd[] = "snapshot retention=%50s"; + +/* Small function to quickly tell us if we can do snapshot here */ +static bool is_snapshot_supported(JCR *jcr) +{ + bool ret; + struct stat sp; + POOLMEM *cmd = get_pool_memory(PM_FNAME); + const char *p; + const char *str; + char add[20]; + + /* We are not really interested by arguments, just + * the filename + */ + *cmd = 0; + for (p=me->snapshot_command; *p; p++) { + if (*p == '%') { + switch (*++p) { + case '%': + str = "%"; + break; + case 'e': + str = NPRTB(exepath); + break; + default: + add[0] = '%'; + add[1] = *p; + add[2] = 0; + str = add; + } + + } else if (*p == ' ') { + break; + + } else { + add[0] = *p; + add[1] = 0; + str = add; + } + pm_strcat(cmd, str); + } + + ret = stat(cmd, &sp) == 0; + free_pool_memory(cmd); + Dmsg1(10, "Snapshot = %d\n", ret); + return ret; +} + +/* Return the default snapshot handler, must be freed at the end */ +char *snapshot_get_command() +{ + return bstrdup(SNAPSHOT_CMD); +} + +/* Initialize the snapshot manager at the begining of the + * job and create snapshots + */ +bool open_snapshot_backup_session(JCR *jcr) +{ + if (!is_snapshot_supported(jcr)) { + Dmsg0(DT_SNAPSHOT, "Snapshot not supported\n"); + return false; + } + + jcr->snap_mgr = New(snapshot_manager(jcr)); + /* Get all volumes and subvolumes */ + if (!jcr->snap_mgr->scan_mtab()) { + berrno be; /* error probably in errno */ + Dmsg1(DT_SNAPSHOT, "Unable to scan mtab. ERR=%s\n", be.bstrerror()); + Jmsg(jcr, M_ERROR, 0, "Unable to scan mtab to determine devices to snapshot\n"); + return false; + } + /* Match the volume list with the fileset */ + if (!jcr->snap_mgr->scan_fileset()) { + Jmsg(jcr, M_ERROR,0, "Unable to scan fileset to determine devices to snapshot\n"); + return false; + } + /* Create fileset needed */ + if (!jcr->snap_mgr->create_snapshots()) { + /* Error message already displayed if needed */ + return false; + } + return true; /* We should have some snapshots */ +} + +/* Command that can be called from outside */ +bool list_all_snapshots(JCR *jcr, alist *lst) +{ + snapshot_manager snap_mgr(jcr); + + /* Get all volumes and subvolumes */ + if (!snap_mgr.scan_mtab()) { + return false; + } + /* list snapshots */ + if (snap_mgr.list_snapshots(lst)) { + return false; + } + return true; +} + +/* Cleanup the snapshot manager at the end of the job */ +void close_snapshot_backup_session(JCR *jcr) +{ + if (jcr->snap_mgr) { + jcr->snap_mgr->cleanup_snapshots(); + delete jcr->snap_mgr; + jcr->snap_mgr = NULL; + } +} + +class snapshot; + +/* Device that exists on the system */ +class fs_device: public SMARTALLOC +{ +public: + rblink link; + + uint32_t dev; /* dev no */ + char *mountpoint; /* where it's mounted */ + char *fstype; /* ntfs, ext3, ext4... */ + char *device; /* /dev/mapper/xxx */ + + bool supportSnapshotTested; /* True if support() was called */ + bool isSuitableForSnapshot; /* Compatible with snapshots */ + bool inSnapshotSet; + bool inFileSet; + snapshot *snap; /* Associated snapshot */ + + dlist *include; /* Where the fs_device was found in the fileset */ + void *node; /* At which node */ + + + fs_device(): + dev(0), mountpoint(NULL), fstype(NULL), device(NULL), + supportSnapshotTested(false), isSuitableForSnapshot(false), snap(NULL) + { + }; + + fs_device(uint32_t adev, const char *adevice, const char *amountpoint, const char *aftype) { + dev = adev; + fstype = bstrdup(aftype); + device = bstrdup(adevice); + mountpoint = bstrdup(amountpoint); + supportSnapshotTested = false; + isSuitableForSnapshot = false; + inSnapshotSet = false; + inFileSet = false; + snap = NULL; + include = NULL; + node = NULL; + }; + + ~fs_device() { + destroy(); + }; + + /* Call support() and cache the result in supportSnapshotTested and isSuitableForSnapshot */ + bool can_do_snapshot(); + + void setInFileSet(dlist *inc, void *where) { + include = inc; /* where we need to include subvolumes */ + node = where; /* after which node */ + inFileSet = true; + inSnapshotSet = true; + }; + + void set_snap(snapshot *s) { + snap = s; + }; + + void destroy(); +}; + +/* The device list is stored in a rblist, using the + * dev no as key. The devno can be found in every stat() + * packet. + */ +static int compare_entries(void *item1, void *item2) +{ + fs_device *dev1 = (fs_device *) item1; + fs_device *dev2 = (fs_device *) item2; + if (dev1->dev > dev2->dev) { + return 1; + + } else if (dev1->dev < dev2->dev) { + return -1; + + } else { + return 0; + } +} + +static int search_entry(void *item1, void *item2) +{ + uint32_t dev1 = (intptr_t) item1; + fs_device* dev2 = (fs_device *) item2; + if (dev1 > dev2->dev) { + return 1; + + } else if (dev1 < dev2->dev) { + return -1; + + } else { + return 0; + } +} + +/* List of all fd_device that are on the system + * Some devices are excluded automatically from + * the list, such as proc, sysfs, etc... + */ +class mtab: public SMARTALLOC +{ +public: + rblist *entries; + int sCount; /* Snapshot count */ + int dCount; /* Device count */ + mtab() { + fs_device *elt = NULL; + entries = New(rblist(elt, &elt->link)); + dCount = sCount = 0; + }; + + ~mtab() { + fs_device *elt; + foreach_rblist(elt, entries) { + elt->destroy(); + } + delete entries; + }; + + /* Have we devices for snapshot in our list ? */ + bool empty() { + return sCount == 0; + }; + + /* Get a fs_device corresponding to a file */ + fs_device *search(char *file); + + /* Get subvolumes for a specific device */ + bool get_subvolumes(uint32_t dev, alist *items, FF_PKT *ff) { + fs_device *elt, *elt2; + elt = (fs_device *)entries->search((void*)(intptr_t)dev, search_entry); + if (!elt) { + return false; + } + + foreach_rblist(elt2, entries) { + if (elt2->dev == elt->dev) { + continue; + } + if (strncmp(elt2->mountpoint, elt->mountpoint, strlen(elt->mountpoint)) == 0) { + /* the mount point is included in the volume */ + + if (file_is_excluded(ff, elt2->mountpoint)) { + Dmsg1(DT_SNAPSHOT|50, "Looks to be excluded %s\n", elt2->mountpoint); + + } else { + items->append(elt2); + } + } + } + return items->size() > 0; + }; + + bool add_in_snapshot_set(char *file, dlist *inc, void *node) { + + fs_device *elt = search(file); + if (!elt) { + Dmsg1(DT_SNAPSHOT, "%s will not be added to snapshot set\n", file); + return sCount == dCount; /* not found in our list, skip it */ + } + return add_in_snapshot_set(elt, inc, node); + }; + + bool add_in_snapshot_set(fs_device *elt, dlist *inc, void *node) { + Dmsg4(DT_SNAPSHOT|10, "%s in=%d can=%d tested=%d\n", elt->mountpoint, elt->inSnapshotSet, + elt->isSuitableForSnapshot, elt->supportSnapshotTested); + if (!elt->inSnapshotSet && elt->can_do_snapshot()) { + Dmsg1(DT_SNAPSHOT, "Marking %s for snapshot\n", elt->mountpoint); + elt->setInFileSet(inc, node); + sCount++; + } + /* It will help to count when all devices are in the snapshot set */ + Dmsg2(DT_SNAPSHOT|10, "sCount %d = dCount %d\n", sCount, dCount); + return sCount == dCount; + }; + + bool add_entry(fs_device *vol) { + fs_device *ret = (fs_device *) entries->insert(vol, compare_entries); + if (ret == vol && vol->snap) { + dCount++; /* We skip directly FS such as /proc, /sys or /dev */ + } + return ret == vol; + }; +}; + +/* Snapshot descriptor, used to communicate with the snapshot + * backend on the system. + */ +class snapshot: public SMARTALLOC +{ +private: + JCR *jcr; + +public: + const char *action; /* current action */ + char Name[MAX_NAME_LENGTH]; /* Name of the snapshot */ + char Type[MAX_NAME_LENGTH]; /* lvm, btrfs, netapp */ + char FSType[MAX_NAME_LENGTH]; /* btrfs, zfs, ext3 */ + char CreateDate[MAX_TIME_LENGTH]; /* Creation date */ + time_t CreateTDate; /* Creation date in seconds */ + int64_t size; /* Size of the snapshot */ + int status; /* snapshot status */ + utime_t Retention; /* Snapshot retention, might come from Pool/FileSet */ + + POOLMEM *Volume; /* Path of the volume */ + POOLMEM *Device; /* Device path */ + POOLMEM *MountPoint; /* Device Mount point */ + POOLMEM *SnapMountPoint; /* Snapshot Mount point */ + POOLMEM *path; /* path used in ls query */ + POOLMEM *errmsg; /* Error message generated by commands */ + POOLMEM *SnapDirectory; /* Where snapshots are stored */ + + char **env; /* Variables used to call snapshot */ + bool mounted; /* True if mounted on SnapMountPoint */ + bool created; /* True if the snapshot is created */ + + snapshot(JCR *ajcr) { + jcr = ajcr; + env = NULL; + path = get_pool_memory(PM_FNAME); + errmsg = get_pool_memory(PM_MESSAGE); + Volume = get_pool_memory(PM_FNAME); + Device = get_pool_memory(PM_FNAME); + MountPoint = get_pool_memory(PM_FNAME); + SnapMountPoint = get_pool_memory(PM_FNAME); + SnapDirectory = get_pool_memory(PM_FNAME); + reset(); + }; + + ~snapshot() { + free_pool_memory(path); + free_pool_memory(errmsg); + free_pool_memory(Volume); + free_pool_memory(Device); + free_pool_memory(MountPoint); + free_pool_memory(SnapMountPoint); + free_pool_memory(SnapDirectory); + free_env(); + }; + + void reset() { + *SnapDirectory = *Type = *FSType = *SnapMountPoint = 0; + *MountPoint = *Volume = *Device = *path = *errmsg = 0; + action = NULL; + size = -1; + status = 0; + mounted = false; + created = false; + Retention = jcr->snapshot_retention; + }; + + /* Free the env[] structure */ + void free_env() { + if (env) { + for (int i=0; env[i] ; i++) { + free(env[i]); + } + free(env); + env = NULL; + } + }; + + void set_device(const char *d) { + pm_strcpy(Device, d); + }; + + void set_mountpoint(const char *d) { + pm_strcpy(MountPoint, d); + }; + + void set_name(const char *n) { + bstrncpy(Name, n, sizeof(Name)); + }; + + void set_fstype(const char *n) { + bstrncpy(FSType, n, sizeof(FSType)); + }; + + void set_action(const char *a) { + action = a; + }; + + /* Convert a real top path to a snapshot path + * and set proper variables inside ff_pkt + * to translate back all subfiles. + */ + bool convert_path(FF_PKT *ff) { + if (!*MountPoint || !*SnapMountPoint) { + Dmsg2(DT_SNAPSHOT, "MountPoint=%s SnapMountPoint=%s\n", + NPRT(MountPoint), NPRT(SnapMountPoint)); + return false; + } + if (!ff->snap_top_fname) { + ff->snap_top_fname = get_pool_memory(PM_FNAME); + } + ff->volume_path = MountPoint; /* /tmp */ + ff->snapshot_path = SnapMountPoint; /* /tmp/.snapshot/Job.20140502.01.01.01 */ + ff->top_fname_save = ff->top_fname; + + int mp_first = strlen(MountPoint); /* will point to after MountPoint in top_fname */ + int last = pm_strcpy(ff->snap_top_fname, SnapMountPoint); + last = MAX(last - 1, 0); + + /* We need to concat path and avoid double / and no / */ + if (ff->snap_top_fname[last] == '/') { + if (ff->top_fname[mp_first] == '/') { + ff->snap_top_fname[last] = 0; /* strip double / */ + } + } else { /* no / at all */ + if (ff->top_fname[mp_first] != '/') { + pm_strcat(ff->snap_top_fname, "/"); + } + } + + pm_strcat(ff->snap_top_fname, ff->top_fname + mp_first); + ff->top_fname = ff->snap_top_fname; + ff->strip_snap_path = true; + Dmsg1(DT_SNAPSHOT|50, "top_fname=%s\n", ff->top_fname); + return true; + }; + + /* Create a environment used in the child process */ + int edit_snapshot_env() { + int i = 0; + POOLMEM *tmp = get_pool_memory(PM_FNAME); + free_env(); + + /* Update "10" to add more variables */ + env = (char **) malloc(sizeof(char *) * 10); + + if (*Name) { + Mmsg(tmp, "SNAPSHOT_NAME=%s", Name); + env[i++] = bstrdup(tmp); + } + if (*Volume) { + Mmsg(tmp, "SNAPSHOT_VOLUME=%s", Volume); + env[i++] = bstrdup(tmp); + } + if (*Device) { + Mmsg(tmp, "SNAPSHOT_DEVICE=%s", Device); + env[i++] = bstrdup(tmp); + } + if (*Type) { + Mmsg(tmp, "SNAPSHOT_TYPE=%s", Type); + env[i++] = bstrdup(tmp); + } + if (*FSType) { + Mmsg(tmp, "SNAPSHOT_FSTYPE=%s", FSType); + env[i++] = bstrdup(tmp); + } + if (*MountPoint) { + Mmsg(tmp, "SNAPSHOT_MOUNTPOINT=%s", MountPoint); + env[i++] = bstrdup(tmp); + } + if (*SnapDirectory) { + Mmsg(tmp, "SNAPSHOT_SNAPDIRECTORY=%s", SnapDirectory); + env[i++] = bstrdup(tmp); + } + if (*SnapMountPoint) { + Mmsg(tmp, "SNAPSHOT_SNAPMOUNTPOINT=%s", SnapMountPoint); + env[i++] = bstrdup(tmp); + } + /* When adding new entries, do not forget to add more slots to env[] */ + + Mmsg(tmp, "SNAPSHOT_ACTION=%s", action); + env[i++] = bstrdup(tmp); + + env[i] = NULL; /* last record */ + + if (chk_dbglvl(DT_SNAPSHOT|100)) { + for (i = 0; env[i] ; i++) { + Dmsg1(0, "%s\n", env[i]); + } + } + + free_pool_memory(tmp); + return 1; + }; + + /* Edit the command line if needed */ + int edit_snapshot_codes(POOLMEM **omsg, const char *imsg) { + const char *p; + const char *str; + char add[20]; + + **omsg = 0; + for (p=imsg; *p; p++) { + if (*p == '%') { + switch (*++p) { + case '%': + str = "%"; + break; + case 'e': + str = NPRTB(exepath); + break; + case 'n': + str = Name; + break; + case 'v': + str = Volume; + break; + case 'd': + str = Device; + break; + case 'D': + str = SnapDirectory; + break; + case 'a': + str = NPRT(action); + break; + case 't': + str = Type; + break; + case 'f': + str = FSType; + break; + case 'p': + str = MountPoint; + break; + case 's': + str = SnapMountPoint; + break; + default: + add[0] = '%'; + add[1] = *p; + add[2] = 0; + str = add; + } + + } else { + add[0] = *p; + add[1] = 0; + str = add; + } + pm_strcat(omsg, str); + } + + if (chk_dbglvl(DT_SNAPSHOT|10)) { + POOL_MEM tmp; + Mmsg(tmp, " -d %d -o /tmp/bsnapshot.log ", debug_level); + pm_strcat(omsg, tmp.c_str()); + } + + Dmsg2(DT_SNAPSHOT|30, "edit_snapshot_codes: %s -> %s\n", imsg, *omsg); + return 1; + }; + + /* Call the snapshot backend to know if we can snapshot the current FS */ + int support_snapshot(fs_device *vol) { + arg_parser cmd; + status = 0; + + reset(); + set_device(vol->device); + set_mountpoint(vol->mountpoint); + set_fstype(vol->fstype); + + if (!do_command("support", &cmd)) { + goto bail_out; + } + scan_arg(&cmd); + + bail_out: + Dmsg2(DT_SNAPSHOT|50, "%s snapshot support status=%d\n", vol->mountpoint, status); + return status; + }; + + /* Scan sub volumes for a particular volume */ + int scan_subvolumes(fs_device *vol, alist *lst) { + int ret = 0; + arg_parser cmd; + uint32_t dev=0; + char *mp=NULL, *fstype=NULL, *device=Device; + + reset(); + set_device(vol->device); + set_mountpoint(vol->mountpoint); + set_fstype(vol->fstype); + + if (!do_command("subvolumes", &cmd)) { + goto bail_out; + } + + for (int i = 0; i < cmd.argc ; i++) { + if (strcasecmp(cmd.argk[i], "Dev") == 0 && cmd.argv[i]) { + dev = str_to_int64(cmd.argv[i]); + + } else if (strcasecmp(cmd.argk[i], "MountPoint") == 0 && cmd.argv[i]) { + mp = cmd.argv[i]; + + } else if (strcasecmp(cmd.argk[i], "Device") == 0 && cmd.argv[i]) { + device = cmd.argv[i]; + + } else if (strcasecmp(cmd.argk[i], "FSType") == 0 && cmd.argv[i]) { + fstype = cmd.argv[i]; + if (mp && fstype && dev) { + fs_device *elt = New(fs_device(dev, device, mp, fstype)); + lst->append(elt); + /* reset variables */ + } + dev = 0; + mp = fstype = NULL; + device = Device; + } + } + ret = 1; + + bail_out: + return ret; + }; + + /* Prune current snapshots + * - List snapshots available on the director, keep a list locally + * - get mtab, list snapshots for all devices, or devices that are in the director list + */ + int prune(BSOCK *bs) { + return 1; + }; + + /* List local snapshots, list director snapshots, and synchronize the two */ + int sync(BSOCK *bs) { + return 1; + }; + + /* List files from a snapshot + * Need to set the Volume and the Path + */ + int ls(BSOCK *bs) { + return 1; + }; + + /* Scan common arguments */ + int scan_arg(arg_parser *cmd) { + for (int i = 0; i < cmd->argc ; i++) { + if (strcasecmp(cmd->argk[i], "Volume") == 0 && cmd->argv[i]) { + pm_strcpy(Volume, cmd->argv[i]); + + } else if (strcasecmp(cmd->argk[i], "CreateDate") == 0 && cmd->argv[i]) { + bstrncpy(CreateDate, cmd->argv[i], sizeof(CreateDate)); + CreateTDate = str_to_utime(CreateDate); + + } else if (strcasecmp(cmd->argk[i], "CreateTDate") == 0 && cmd->argv[i]) { + CreateTDate = str_to_int64(cmd->argv[i]); + bstrftimes(CreateDate, sizeof(CreateDate), CreateTDate); + + } else if (strcasecmp(cmd->argk[i], "Type") == 0 && cmd->argv[i]) { + bstrncpy(Type, cmd->argv[i], sizeof(Type)); + + } else if (strcasecmp(cmd->argk[i], "SnapMountPoint") == 0 && cmd->argv[i]) { + pm_strcpy(SnapMountPoint, cmd->argv[i]); + + } else if (strcasecmp(cmd->argk[i], "SnapDirectory") == 0 && cmd->argv[i]) { + pm_strcpy(SnapDirectory, cmd->argv[i]); + + } else if (strcasecmp(cmd->argk[i], "status") == 0 && cmd->argv[i]) { + status = str_to_int64(cmd->argv[i]); + + } else if (strcasecmp(cmd->argk[i], "Device") == 0 && cmd->argv[i]) { + pm_strcpy(Device, cmd->argv[i]); + } + } + return 1; + }; + + /* Create a snapshot with already given attributes + * Need to set Name and Device at the minimum + */ + int create() { + int ret = 0; + arg_parser cmd; + + if (!*Name || !*Device) { + goto bail_out; + } + + Dmsg2(DT_SNAPSHOT, "Create Snapshot of %s %s\n", Device, Name); + + /* TODO: see if we handle multiple snapshots per call */ + if (!do_command("create", &cmd)) { + goto bail_out; + } + + scan_arg(&cmd); + created = 1; + + ret = 1; + + bail_out: + return ret; + }; + + int mount() { + arg_parser cmd; + status = 0; + + if (!*Name || !*Volume || !*Device || !*Type) { + goto bail_out; + } + + Dmsg1(DT_SNAPSHOT, "Doing mount of %s\n", Volume); + + if (!do_command("mount", &cmd)) { + goto bail_out; + } + + *SnapMountPoint = 0; + + scan_arg(&cmd); + + mounted = (status > 0 && *SnapMountPoint); + + bail_out: + return status; + }; + + int unmount() { + arg_parser cmd; + status = 0; + + if (!*Name || !*SnapMountPoint || !*Type || !mounted) { + goto bail_out; + } + + Dmsg1(DT_SNAPSHOT, "Doing unmount of a %s\n", SnapMountPoint); + + if (!do_command("unmount", &cmd)) { + goto bail_out; + } + + scan_arg(&cmd); + + mounted = 0; + + bail_out: + return status; + }; + + /* Delete a snapshot with the given device name */ + int del() { + int ret = 0; + arg_parser cmd; + if (!*Name || !*Volume || !created) { + goto bail_out; + } + ret = do_command("delete", &cmd); + bail_out: + return ret; + }; + + /* TODO: Need to read stdout as well */ + int do_command(const char *act, arg_parser *cmd) { + int ret = 0, rcode; + POOLMEM *command = get_pool_memory(PM_FNAME); + POOLMEM *out = get_pool_memory(PM_FNAME); + + set_action(act); + edit_snapshot_codes(&command, me->snapshot_command); + edit_snapshot_env(); + + Dmsg1(DT_SNAPSHOT|20, "Execute %s command\n", act); + + if (*command == 0) { + Mmsg(errmsg, _("Error while creating command string %s.\n"), act); + Dmsg1(DT_SNAPSHOT, "%s", errmsg); + goto bail_out; + } + + /* If the exit code is 1, we can expect to have a clear error + * message in the output + */ + if ((rcode = run_program_full_output(command, 600, out, env)) != 0) { + if ((rcode & ~b_errno_exit) == 1) { /* exit 1, look the output */ + if (cmd->parse_cmd(out) == bRC_OK) { + int i = cmd->find_arg_with_value("status"); + if (i >= 0) { + /* If we have a status, take it */ + status = str_to_int64(cmd->argv[i]); + } else { + status = 0; + } + i = cmd->find_arg_with_value("error"); + if (i >= 0) { + pm_strcpy(errmsg, cmd->argv[i]); + Dmsg1(DT_SNAPSHOT|20, "%s\n", errmsg); + goto bail_out; + } + } + } + berrno be; + Mmsg(errmsg, _("Error while executing \"%s\" %s. %s %s\n"), + act, command, out, be.bstrerror()); + Dmsg1(DT_SNAPSHOT, "%s", errmsg); + goto bail_out; + } + + /* Need to decode the output of the script + * TODO: some commands will have multiple lines + */ + if (cmd->parse_cmd(out) != bRC_OK) { + Dmsg2(DT_SNAPSHOT, "snapshot command %s output error: [%s]\n", act, out); + Mmsg(errmsg, _("Unable to parse snapshot command output\n")); + goto bail_out; + } + Dmsg1(DT_SNAPSHOT|50, "ret = %s\n", out); + ret = 1; + bail_out: + free_pool_memory(command); + free_pool_memory(out); + return ret; + }; + + int list(alist *lst) { + Dmsg0(DT_SNAPSHOT, "Doing list of a snapshots of a given device\n"); + snapshot *snap; + arg_parser cmd; + int i, ret=0, status=1; + char *volume=NULL, *name=NULL, *device=NULL, *createdate=NULL, *error=NULL; + utime_t createtdate = 0; + + /* TODO: Here we need to loop over a list */ + if (!do_command("list", &cmd)) { + goto bail_out; + } + ret = 1; + + /* should get + * volume=xxx device=zzzz name=yyy createtdate=12121212 size=xx status=xx error=xx type=lvm + */ + for (i = 0; i < cmd.argc ; i++) { + if (strcasecmp(cmd.argk[i], "Volume") == 0) { + volume = cmd.argv[i]; + + } else if (strcasecmp(cmd.argk[i], "Name") == 0) { + name = cmd.argv[i]; + + } else if (strcasecmp(cmd.argk[i], "Device") == 0) { + device = cmd.argv[i]; + + } else if (strcasecmp(cmd.argk[i], "Error") == 0) { + error = cmd.argv[i]; + + } else if (strcasecmp(cmd.argk[i], "Status") == 0) { + status = str_to_int64(cmd.argv[i]); + + } else if (strcasecmp(cmd.argk[i], "Type") == 0) { + snap = New(snapshot(jcr)); + pm_strcpy(snap->Volume, volume); + pm_strcpy(snap->Device, NPRTB(device)); + bstrncpy(snap->Name, NPRTB(name), sizeof(snap->Name)); + bstrncpy(snap->Type, cmd.argv[i], sizeof(snap->Type)); + bstrncpy(snap->CreateDate, createdate, sizeof(snap->CreateDate)); + pm_strcpy(snap->errmsg, NPRTB(error)); + snap->status = status; + snap->CreateTDate = createtdate; + error = createdate = device = name = volume = NULL; + status = 1; + createtdate = 0; + lst->append(snap); + + } else if (strcasecmp(cmd.argk[i], "CreateTDate") == 0) { + createtdate = str_to_int64(cmd.argv[i]); + + } else if (strcasecmp(cmd.argk[i], "CreateDate") == 0) { + createdate = cmd.argv[i]; + createtdate = str_to_utime(cmd.argv[i]); + } + } + bail_out: + return ret; + }; + + /* Query information about snapshot */ + int query() { + Dmsg0(0, "Doing query of a snapshot\n"); + arg_parser cmd; + int i, ret=0; + + if (!*Volume) { + goto bail_out; + } + + if (!do_command("query", &cmd)) { + goto bail_out; + } + + if ((i = cmd.find_arg_with_value("size")) >= 0) { + size = str_to_int64(cmd.argv[i]); + } + + if ((i = cmd.find_arg_with_value("status")) >= 0) { + status = str_to_int64(cmd.argv[i]); + } + + ret = 1; + + bail_out: + return ret; + }; + + /* Quickly unbash all attributes after a sscanf */ + void unbash_spaces() { + ::unbash_spaces(Volume); + ::unbash_spaces(Device); + ::unbash_spaces(path); + ::unbash_spaces(Name); + ::unbash_spaces(CreateDate); + ::unbash_spaces(errmsg); + }; + + void bash_spaces() { + ::bash_spaces(Volume); + ::bash_spaces(Device); + ::bash_spaces(path); + ::bash_spaces(Name); + ::bash_spaces(CreateDate); + ::bash_spaces(errmsg); + }; + + /* Quicky make sure we have enough space to handle the request */ + void check_buffer_size(int len) { + Volume = check_pool_memory_size(Volume, len); + Device = check_pool_memory_size(Device, len); + path = check_pool_memory_size(path, len); + }; + + /* Create Catalog entry for the current snapshot */ + int create_catalog_entry() { + int ret = 0; + char ed1[50]; + bash_spaces(); + jcr->dir_bsock->fsend(CreateSnap, + jcr->Job, Name, Volume, + Device, CreateTDate, Type, edit_uint64(Retention, ed1)); + if (jcr->dir_bsock->recv() < 0) { + Mmsg(errmsg, _("Unable to create snapshot record. ERR=%s\n"), + jcr->dir_bsock->bstrerror()); + + } else if (strncmp(jcr->dir_bsock->msg, "1000", 4) != 0) { + Mmsg(errmsg, _("Unable to create snapshot record, got %s\n"), + jcr->dir_bsock->msg); + + } else { + ret = 1; /* OK */ + } + unbash_spaces(); + return ret; + }; + + /* Delete Catalog entry of the current snapshot */ + int delete_catalog_entry() { + int ret = 0; + bash_spaces(); + jcr->dir_bsock->fsend(DelSnap, jcr->Job, Name, Device); + + if (jcr->dir_bsock->recv() < 0) { + Mmsg(errmsg, _("Unable to delete snapshot record. ERR=%s\n"), + jcr->dir_bsock->bstrerror()); + + } else if (strncmp(jcr->dir_bsock->msg, "1000", 4) != 0) { + Mmsg(errmsg, _("Unable to delete snapshot record, got %s\n"), + jcr->dir_bsock->msg); + + } else { + ret = 1; /* OK */ + } + + unbash_spaces(); + return ret; + }; + + /* Get Catalog entry of the current snapshot */ + int get_catalog_entry() { + int ret = 0; + arg_parser cmd; + + if (!*Name || !*Volume) { + return ret; + } + + bash_spaces(); + jcr->dir_bsock->fsend(GetSnap, jcr->Job, Name, Volume); + + if (jcr->dir_bsock->recv() < 0) { + Mmsg(errmsg, _("Unable to get snapshot record. ERR=%s\n"), + jcr->dir_bsock->bstrerror()); + + } else if (strncmp(jcr->dir_bsock->msg, "1000", 4) != 0) { + Mmsg(errmsg, _("Unable to get snapshot record, got %s\n"), + jcr->dir_bsock->msg); + + } else { + if (cmd.parse_cmd(jcr->dir_bsock->msg) != bRC_OK) { + Mmsg(errmsg, _("Unable to parse command output\n")); + scan_arg(&cmd); /* Fill all parameters from director */ + + } else { + ret = 1; /* OK */ + } + } + + unbash_spaces(); + return ret; + }; + +}; + +/* Should be after snapshot declaration */ +void fs_device::destroy() { + if (fstype) { + free(fstype); + fstype = NULL; + } + if (mountpoint) { + free(mountpoint); + mountpoint = NULL; + } + if (device) { + free(device); + device = NULL; + } + if (snap) { + delete snap; + snap = NULL; + } +} + +bool fs_device::can_do_snapshot() { + if (snap && !supportSnapshotTested) { + if (snap->support_snapshot(this)) { + Dmsg2(DT_SNAPSHOT, "%s suitable for snapshot, type %s\n", + mountpoint, snap->Type); + isSuitableForSnapshot = true; + + } else { + Dmsg2(DT_SNAPSHOT, "%s is not suitable for snapshot, type %s\n", + mountpoint, snap->Type); + } + supportSnapshotTested = true; + } + return isSuitableForSnapshot; +} + +/* Should be after the snapshot declaration */ +fs_device *mtab::search(char *file) { + struct stat statp; + if (lstat(file, &statp) != 0) { + Dmsg1(DT_SNAPSHOT, "%s not found\n", file); + return NULL; /* not found */ + } + + fs_device *elt = (fs_device *)entries->search((void *)((intptr_t)(statp.st_dev)), + search_entry); + if (!elt) { + Dmsg2(DT_SNAPSHOT, "Device %d for file %s not found in our mount list\n", + statp.st_dev, file); + return NULL; /* not found in our list, skip it */ + } + + if (!elt->can_do_snapshot()) { + Dmsg2(DT_SNAPSHOT, "Device %d for file %s not snapshotable\n", + statp.st_dev, file); + return NULL; + } + Dmsg2(DT_SNAPSHOT, "Found device %d for file %s\n", elt->dev, file); + return elt; +} + +/* Application to quiesce/un-quiesce */ +struct app { + BPIPE *fd; /* Communication channel */ + char *name; /* Pointer to the script name */ + char cmd[1]; /* Command line */ +}; + +/* In the application manager, we want to run a set + * of scripts and see if applications are running or + * not on our partitions. + * + * We can specify application in the fileset, or just + * try all application that are installed. + * + */ +class app_manager: public SMARTALLOC +{ +private: + JCR *jcr; + char *appdir; /* Where to find application scripts */ + alist *applst; /* Application list (script list to call) */ + int apptimeout; /* Timeout when trying to quiesce application */ + mtab *mount_list; /* snapshot set */ + +public: + app_manager(JCR *ajcr, mtab *m, char *dir): + jcr(ajcr), + appdir(dir), + applst(New(alist(10, owned_by_alist))), + apptimeout(300), + mount_list(m) {}; + + ~app_manager() { + delete applst; + }; + + /* Put in a file the list of all devices that + * are in the snapshot set + */ + bool dump_snapshotset() { + return true; + }; + + /* Scan application */ + bool scan() { + POOLMEM *results; + bool ret=false; + char *end, *start; + struct stat sp; + struct app *elt = NULL; + + if (!appdir || !*appdir || stat(appdir, &sp) == -1) { + Dmsg0(DT_SNAPSHOT, "app not configured\n"); + return true; + } + + /* Create a file with the list of all devices that are suitable + * for snapshot + */ + dump_snapshotset(); + + results = get_pool_memory(PM_FNAME); + if (run_program_full_output((char*)APPMANAGER_CMD, apptimeout, results) != 0) { + berrno be; + Dmsg2(DT_SNAPSHOT, "app scan error results=%s ERR=%s\n", results, be.bstrerror()); + goto bail_out; + } + + ret = true; + start = results; + + /* Put each line of the output in our list */ + for (start = results; start && *start;) { + end = strchr(start, '\n'); + if (end) { + *end = 0; + elt = (struct app *) malloc(sizeof(struct app) + strlen(start) + 1); + + elt->fd = NULL; + strcpy(elt->cmd, start); + elt->name = (char *)last_path_separator(elt->cmd); + if (elt->name) { + elt->name++; + + } else { + elt->name = elt->cmd; + } + + applst->append(elt); + Dmsg2(10, "+ %s (%s)\n", elt->name, elt->cmd); + *end = '\n'; + end++; + } + start = end; + } + bail_out: + free_pool_memory(results); + return ret; + }; + + bool unquiesce() { + if (applst->size() == 0) { + return true; + } + + Jmsg(jcr, M_INFO, 0, _("Un-Quiescing applications\n")); + return true; + }; + + /* Quiesce applications */ + bool quiesce() { + bool ret = true; + + if (applst->size() == 0) { + return true; + } + + Jmsg(jcr, M_INFO, 0, _("Quiescing applications\n")); + + for (int i = 0 ; i < applst->size() ; i++) { + struct app *elt = (struct app *) applst->get(i); + elt->fd = open_bpipe(elt->cmd, 0, "rw"); + if (!elt->fd) { + /* Unable to execute the program */ + continue; + } + /* Send some commands here */ + } + return ret; + }; +}; + +snapshot_manager::snapshot_manager(JCR *ajcr): + jcr(ajcr), mount_list(New(mtab())) { +} + +snapshot_manager::~snapshot_manager() { + delete mount_list; +} + +bool snapshot_manager::cleanup_snapshots() +{ + fs_device *elt; + foreach_rblist(elt, mount_list->entries) { + if (elt->can_do_snapshot() && elt->inSnapshotSet) { + snapshot *s = elt->snap; + if (!s->created) { + continue; + } + if (s->unmount()) { + /* TODO: Display an error? Can check mounted status */ + } + /* When no retention is set, we delete the snapshot + * just after the backup + */ + if (s->Retention == 0) { + if (s->del()) { + Jmsg(jcr, M_INFO, 0, _(" Delete Snapshot for %s\n"), elt->mountpoint); + + } else { + Jmsg(jcr, M_ERROR, 0, _(" Unable to delete snapshot of %s ERR=%s\n"), + elt->mountpoint, s->errmsg); + } + } + } + } + return true; +} + +bool snapshot_manager::create_snapshots() +{ + /* No snapshot, no quiescing */ + if (mount_list->empty()) { + Dmsg0(DT_SNAPSHOT, "The mount list is empty, no snapshot to take\n"); + return false; + } + + /* First thing to do is to quiesce application */ + app_manager apps(jcr, mount_list, (char *)APP_DIR); + + /* TODO: Let see if we really need to abort + * the snapshot part if application + */ + if (!apps.scan()) { + return false; + } + + if (!apps.quiesce()) { + return false; + } + + fs_device *elt; + foreach_rblist(elt, mount_list->entries) { + if (elt->can_do_snapshot() && elt->inSnapshotSet) { + snapshot *s = elt->snap; + if (s->create()) { + Jmsg(jcr, M_INFO, 0, _(" Create Snapshot for %s\n"), elt->mountpoint); + + if (s->Retention > 0) {/* Create snapshot catalog entry if we need to keep them */ + s->create_catalog_entry(); + } + + } else if (s->status == 2) { /* Use Error message */ + elt->isSuitableForSnapshot = false; /* Disable snapshot for this device */ + Jmsg(jcr, M_ERROR, 0, _(" Unable to create snapshot of %s ERR=%s\n"), + elt->mountpoint, s->errmsg); + + } else { /* By default, an error in the creation should be fatal */ + Jmsg(jcr, M_FATAL, 0, _(" Unable to create snapshot of %s ERR=%s\n"), + elt->mountpoint, s->errmsg); + apps.unquiesce(); + return false; + } + + } else { + Dmsg3(DT_SNAPSHOT|20, "No Snapshot for %s suitable=%d inset=%d\n", + elt->mountpoint, elt->isSuitableForSnapshot, elt->inSnapshotSet); + } + } + + /* Snapshots are ok, we need to release applications */ + if (!apps.unquiesce()) { + return false; + } + + /* Save the include context */ + POOL_MEM t; + findINCEXE *old = get_incexe(jcr); + findINCEXE *exclude = NULL; + foreach_rblist(elt, mount_list->entries) { + if (elt->can_do_snapshot() && elt->inSnapshotSet) { + snapshot *s = elt->snap; + + if (!s->mount()) { + Jmsg(jcr, M_ERROR, 0, " Unable to mount snapshot %s ERR=%s\n", + elt->mountpoint, s->errmsg); + + } else if (*s->SnapDirectory) { + if (!exclude) { + exclude = new_exclude(jcr); + /* Set the Exclude context */ + set_incexe(jcr, exclude); + } + Mmsg(t, "%s/.snapshots", elt->snap->SnapMountPoint); + Dmsg1(DT_SNAPSHOT|10, "Excluding %s\n", t.c_str()); + add_file_to_fileset(jcr, t.c_str(), true); + } + } + } + + /* Restore the current context */ + if (exclude) { + set_incexe(jcr, old); + } + return true; +} + +/* TODO: We might want to use some filters here */ +bool snapshot_manager::list_snapshots(alist *lst) +{ + fs_device *elt; + + /* No device, no snapshot */ + if (mount_list->dCount == 0) { + Dmsg0(DT_SNAPSHOT, "mount list is empty, no snapshot\n"); + return false; + } + + foreach_rblist(elt, mount_list->entries) { + if (elt->can_do_snapshot()) { + elt->snap->list(lst); + } + } + return true; +} + +void snapshot_manager::add_mount_point(uint32_t dev, const char *device, + const char *mountpoint, const char *fstype) +{ + bool check=true; + alist list(10, not_owned_by_alist); + fs_device *vol = New(fs_device(dev, device, mountpoint, fstype)); + + /* These FS are not supposed to use snapshot */ + const char *specialmp[] = { + "/proc/", + "/sys/", + NULL + }; + /* These FS are not supposed to use snapshot */ + const char *specialfs[] = { + "autofs", + "binfmt_misc", + "cgroup", + "configfs", + "debugfs", + "dev", + "devpts", + "devtmpfs", + "ecryptfs", + "fuse.gvfsd-fuse", + "fusectl", + "fd", + "hugetlbfs", + "mqueue", + "proc", + "pstore", + "rpc_pipefs", + "securityfs", + "selinuxfs", + "sysfs", + "systemd-1", + "tmpfs", + NULL + }; + + /* We skip directly /proc, /sys */ + for (int i=0; specialmp[i] ; i++) { + if (strncasecmp(specialmp[i], mountpoint, strlen(specialmp[i])) == 0) { + check = false; + break; + } + } + + if (check) { + for (int i=0; specialfs[i] ; i++) { + if (strcasecmp(specialfs[i], fstype) == 0) { + check = false; + break; + } + } + } + + if (check) { + snapshot *snap = New(snapshot(jcr)); + snap->set_name(jcr->Job); + vol->snap = snap; + + if (snap->scan_subvolumes(vol, &list)) { + for (int i = list.size() - 1 ; i >= 0 ; i--) { + fs_device *d = (fs_device *)list.remove(i); + add_mount_point(d->dev, d->device, d->mountpoint, d->fstype); + delete d; + } + } + } + + Dmsg4(DT_SNAPSHOT|20, "Adding %s dev=%d snap?=%d to the mount list (%d)\n", + mountpoint, dev, check, mount_list->dCount); + + if (!mount_list->add_entry(vol)) { + Dmsg1(DT_SNAPSHOT, "%s Already exists in mount list\n", vol->mountpoint); + delete vol; + } +} + +/* In this handler, we need to fill a mtab structure */ +static void add_handler(void *user_ctx, + struct stat *st, + const char *fstype, + const char *mountpoint, + const char *mntopts, + const char *device) +{ + Dmsg5(DT_SNAPSHOT|50, "dev=%ld device=%s mountpoint=%s fstype=%s mntopts=%s\n", + st->st_dev, device, mountpoint, fstype, mntopts); + + /* TODO: If the fstype is btrfs or zfs, the fs might contains subvolumes, + * and these subvolumes may not be reported in the mntent list. In this + * case, we need to call proper btrfs/zfs commands to list them. + * # btrfs subvolume list /mnt + * ID 256 gen 17 top level 5 path test + * ID 259 gen 18 top level 5 path test/titi + */ + snapshot_manager *snapmgr = (snapshot_manager *)user_ctx; + snapmgr->add_mount_point(st->st_dev, device, mountpoint, fstype); +} + +bool snapshot_manager::scan_mtab() +{ + return read_mtab(add_handler, this); +} + + +/* Scan the fileset to select partitions to snapshot */ +bool snapshot_manager::scan_fileset() +{ + if (!jcr->ff || !jcr->ff->fileset) { + Dmsg0(DT_SNAPSHOT, "No fileset associated with JCR\n"); + return false; + } + + findFILESET *fileset = jcr->ff->fileset; + dlistString *node; + int64_t flags = 0; + + for (int i=0; iinclude_list.size(); i++) { + + findFOPTS *fo; + findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); + + /* look through all files */ + foreach_dlist(node, &incexe->name_list) { + char *fname = node->c_str(); + if (mount_list->add_in_snapshot_set(fname, &incexe->name_list, node)) { + /* When all volumes are selected, we can stop */ + Dmsg0(DT_SNAPSHOT, "All Volumes are marked, stopping the loop here\n"); + goto all_included; + } + } + + foreach_alist(fo, &incexe->opts_list) { + flags |= fo->flags; /* We are looking for FO_MULTIFS and recurse */ + } + } + +all_included: + /* If we allow recursion and multifs, we need to include sub volumes by hand + * in the backup list + */ + if (flags & FO_MULTIFS) { + fs_device *elt, *elt2; + + foreach_rblist(elt, mount_list->entries) { + if (!elt->inFileSet) { + continue; + } + alist *lst = New(alist(10, not_owned_by_alist)); + mount_list->get_subvolumes(elt->dev, lst, jcr->ff); + foreach_alist(elt2, lst) { + if (elt2->inFileSet) { + continue; + } + + /* TODO: See how to avoid having two entries for the same directory */ + /* Add the directory explicitely in the fileset */ + elt->include->insert_after(new_dlistString(elt2->mountpoint), elt->node); + + if (mount_list->add_in_snapshot_set(elt2, elt->include, elt->node)) { + /* When all volumes are selected, we can stop */ + Dmsg0(DT_SNAPSHOT, "All Volumes are marked, stopping the loop here\n"); + delete lst; + return true; + } + } + delete lst; + } + } + return true; +} + +int snapshot_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + int n, ret = 1; + char ed1[50]; + snapshot snap(jcr); + snap.check_buffer_size(dir->msglen); + + n = sscanf(dir->msg, QueryCmd, snap.Name, snap.Volume, snap.Device, &snap.CreateTDate, snap.Type); + if (n == 5) { + snap.unbash_spaces(); + Dmsg0(DT_SNAPSHOT|10, "Doing query of a snapshot\n"); + n = snap.query(); + bash_spaces(snap.errmsg); + dir->fsend("%d Snapshot status=%d size=%lld ERR=%s\n", n?2000:2999, snap.status, snap.size, snap.errmsg); + goto bail_out; + } + + n = sscanf(dir->msg, LsCmd, snap.Name, snap.Volume, snap.Device, &snap.CreateTDate, snap.Type, snap.path); + if (n == 6) { + snap.unbash_spaces(); + Dmsg0(DT_SNAPSHOT|10, "Doing ls of a snapshot\n"); + n = snap.ls(dir); + dir->signal(BNET_EOD); + goto bail_out; + } + + n = sscanf(dir->msg, DelCmd, snap.Name, snap.Volume, snap.Device, &snap.CreateTDate, snap.Type); + if (n == 5) { + Dmsg0(DT_SNAPSHOT|10, "Doing del of a snapshot\n"); + snap.unbash_spaces(); + n = snap.del(); + bash_spaces(snap.errmsg); + dir->fsend("%d Snapshot deleted ERR=%s\n", n?2000:2999, snap.errmsg); + goto bail_out; + } + + n = sscanf(dir->msg, PruneCmd, snap.Volume, snap.Type); + if (n == 2) { + snap.unbash_spaces(); + n = snap.prune(dir); + bash_spaces(snap.errmsg); + dir->fsend("%d Snapshot pruned ERR=%s\n", n?2000:2999, snap.errmsg); + Dmsg0(DT_SNAPSHOT|10, "Doing pruning of snapshots\n"); + goto bail_out; + } + + n = sscanf(dir->msg, SyncCmd, snap.Volume, snap.Type); + if (n == 2) { + snap.unbash_spaces(); + n = snap.sync(dir); + bash_spaces(snap.errmsg); + dir->fsend("%d Snapshot synced ERR=%s\n", n?2000:2999, snap.errmsg); + Dmsg0(DT_SNAPSHOT|10, "Doing sync of snapshots\n"); + goto bail_out; + } + + /* TODO: Include a path name or a device name */ + if (strncmp(dir->msg, ListCmd, strlen(ListCmd)) == 0) { + snapshot *elt; + char ed1[50]; + alist *lst = New(alist(10, not_owned_by_alist)); + list_all_snapshots(jcr, lst); + foreach_alist(elt, lst) { + elt->bash_spaces(); + dir->fsend("volume=\"%s\" createtdate=\"%s\" name=\"%s\" device=\"%s\" status=%d error=\"%s\" type=\"%s\"\n", + elt->Volume, edit_uint64(elt->CreateTDate, ed1), + elt->Name, elt->Device, elt->status, elt->errmsg, elt->Type); + delete elt; + } + delete lst; + dir->signal(BNET_EOD); + goto bail_out; + } + + n = sscanf(dir->msg, ConfCmd, ed1); + if (n == 1) { + jcr->snapshot_retention = str_to_uint64(ed1); + dir->fsend("2000 Snapshot retention\n"); + goto bail_out; + } + + dir->fsend("2999 Snapshot command not found\n"); + dir->signal(BNET_EOD); + ret = 0; + +bail_out: + return ret; +} + +bool snapshot_convert_path(JCR *jcr, FF_PKT *ff, dlist *filelist, dlistString *node) +{ + Dmsg1(DT_SNAPSHOT, "snapshot_convert_path(%s)\n", ff->top_fname); + snapshot_manager *snapmgr = jcr->snap_mgr; + ff->strip_snap_path = false; + + if (!snapmgr) { + return true; + } + + fs_device *elt = snapmgr->mount_list->search(ff->top_fname); + if (!elt) { + return true; /* not found */ + } + + if (!ff->snap_fname) { + ff->snap_fname = get_pool_memory(PM_FNAME); + } + + /* Convert the filename to the original path */ + if (!elt->snap->convert_path(ff)) { + Dmsg2(DT_SNAPSHOT, "Device %d for file %s not snapshotable\n", + elt->dev, ff->top_fname); + return true; + } + return true; +} + +/* ListSnap[] = "CatReq Job=%s list_snapshot name=%s volume=%s device=%s tdate=%d type=%s before=%s after=%s expired=%d"; */ + +/* List Catalog entry of the current client */ +int snapshot_list_catalog(JCR *jcr, + const char *query, + alist *lst) +{ + int ret = 0, i; + arg_parser cmd; + POOL_MEM q, tmp; + if (cmd.parse_cmd(query) != bRC_OK) { + Dmsg1(DT_SNAPSHOT, "Unable to decode query %s\n", query); + return 0; + } + Mmsg(q, "CatReq Job=%s list_snapshot name=", jcr->Job); + if ((i = cmd.find_arg_with_value("name")) >= 0) { + bash_spaces(cmd.argv[i]); + pm_strcat(q, cmd.argv[i]); + } + + pm_strcat(q, " volume="); + if ((i = cmd.find_arg_with_value("volume")) >= 0) { + bash_spaces(cmd.argv[i]); + pm_strcat(q, cmd.argv[i]); + } + + pm_strcat(q, " device="); + if ((i = cmd.find_arg_with_value("device")) >= 0) { + bash_spaces(cmd.argv[i]); + pm_strcat(q, cmd.argv[i]); + } + + pm_strcat(q, " tdate="); + if ((i = cmd.find_arg_with_value("tdate")) >= 0) { + bash_spaces(cmd.argv[i]); + pm_strcat(q, cmd.argv[i]); + } + + pm_strcat(q, " type="); + if ((i = cmd.find_arg_with_value("type")) >= 0) { + bash_spaces(cmd.argv[i]); + pm_strcat(q, cmd.argv[i]); + } + + pm_strcat(q, " before="); + if ((i = cmd.find_arg_with_value("before")) >= 0) { + bash_spaces(cmd.argv[i]); + pm_strcat(q, cmd.argv[i]); + } + + pm_strcat(q, " after="); + if ((i = cmd.find_arg_with_value("after")) >= 0) { + bash_spaces(cmd.argv[i]); + pm_strcat(q, cmd.argv[i]); + } + + pm_strcat(q, " expired="); + if ((i = cmd.find_arg_with_value("expired")) >= 0) { + bash_spaces(cmd.argv[i]); + pm_strcat(q, cmd.argv[i]); + } + + jcr->dir_bsock->fsend("%s\n", q.c_str()); + + while (jcr->dir_bsock->recv() > 0) { + if (cmd.parse_cmd(jcr->dir_bsock->msg) != bRC_OK) { + Dmsg1(DT_SNAPSHOT, "Unable to decode director output %s\n", jcr->dir_bsock->msg); + + } else { + ret = 1; /* OK */ + snapshot *s = New(snapshot(jcr)); + s->scan_arg(&cmd); + lst->append(s); + } + } + return ret; +} diff --git a/src/filed/fd_snapshot.h b/src/filed/fd_snapshot.h new file mode 100644 index 00000000..2d61fc6e --- /dev/null +++ b/src/filed/fd_snapshot.h @@ -0,0 +1,67 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. + + Written by Eric Bollengier 2015 +*/ + +#ifndef FD_SNAPSHOT_H +#define FD_SNAPSHOT_H + +/* default snapshot handler */ +char *snapshot_get_command(); + +/* Internal objects */ +class mtab; /* device list */ +class fs_device; + +class snapshot_manager: public SMARTALLOC +{ +private: + JCR *jcr; +public: + mtab *mount_list; + + snapshot_manager(JCR *ajcr); + virtual ~snapshot_manager(); + + /* Quiesce application and take snapshot */ + bool create_snapshots(); + + /* Cleanup snapshots */ + bool cleanup_snapshots(); + + /* List snapshots */ + bool list_snapshots(alist *ret); + + /* Scan the fileset for devices and application */ + bool scan_fileset(); + + /* Scan the mtab */ + bool scan_mtab(); + + /* Add a mount point to the mtab list */ + void add_mount_point(uint32_t dev, const char *device, + const char *mountpoint, const char *fstype); +}; + +void close_snapshot_backup_session(JCR *jcr); +bool open_snapshot_backup_session(JCR *jcr); + +bool snapshot_convert_path(JCR *jcr, FF_PKT *ff, dlist *filelist, dlistString *node); + +#endif /* FD_SNAPSHOT_H */ diff --git a/src/filed/filed.c b/src/filed/filed.c new file mode 100644 index 00000000..d6036cf5 --- /dev/null +++ b/src/filed/filed.c @@ -0,0 +1,706 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon + * + * Kern Sibbald, March MM + */ + +#include "bacula.h" +#include "filed.h" + +/* Imported Functions */ +extern void *handle_connection_request(void *dir_sock); +extern bool parse_fd_config(CONFIG *config, const char *configfile, int exit_code); + +/* Forward referenced functions */ +static bool check_resources(); + +/* Exported variables */ +CLIENT *me; /* my resource */ +bool no_signals = false; +void *start_heap; +extern struct s_cmds cmds[]; + +#ifndef CONFIG_FILE /* Might be overwritten */ + #define CONFIG_FILE "bacula-fd.conf" /* default config file */ + #define PROG_NAME "bacula-fd" +#endif + +char *configfile = NULL; +static bool test_config = false; +static bool foreground = false; +static bool make_pid_file = true; /* create pid file */ +static workq_t dir_workq; /* queue of work from Director */ +static pthread_t server_tid; +static CONFIG *config; + +static void usage() +{ + fprintf(stderr, _( + PROG_COPYRIGHT + "\nVersion: %s (%s)\n\n" + "Usage: bacula-fd [-f -s] [-c config_file] [-d debug_level]\n" + " -c use as configuration file\n" + " -d [,] set debug level to , debug tags to \n" + " -dt print a timestamp in debug output\n" + " -f run in foreground (for debugging)\n" + " -g groupid\n" + " -k keep readall capabilities\n" + " -m print kaboom output (for debugging)\n" + " -P do not create pid file\n" + " -s no signals (for debugging)\n" + " -t test configuration file and exit\n" + " -T set trace on\n" + " -u userid\n" + " -v verbose user messages\n" + " -? print this message.\n" + "\n"), 2000, VERSION, BDATE); + + exit(1); +} + + +/********************************************************************* + * + * Main Bacula Unix Client Program + * + */ + +#if defined(HAVE_WIN32) +#define main BaculaMain +#endif + +int main(int argc, char *argv[]) +{ + int ch; + bool keep_readall_caps = false; + char *uid = NULL; + char *gid = NULL; + + start_heap = sbrk(0); + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + init_stack_dump(); + my_name_is(argc, argv, PROG_NAME); + init_msg(NULL, NULL); + daemon_start_time = time(NULL); + setup_daemon_message_queue(); + + while ((ch = getopt(argc, argv, "c:d:fg:kmPstTu:v?D:")) != -1) { + switch (ch) { + case 'c': /* configuration file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + char *p; + /* We probably find a tag list -d 10,sql,bvfs */ + if ((p = strchr(optarg, ',')) != NULL) { + *p = 0; + } + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + if (p) { + debug_parse_tags(p+1, &debug_level_tags); + } + } + break; + + case 'f': /* run in foreground */ + foreground = true; + break; + + case 'g': /* set group */ + gid = optarg; + break; + + case 'k': + keep_readall_caps = true; + break; + + case 'm': /* print kaboom output */ + prt_kaboom = true; + break; + + case 'P': + make_pid_file = false; + break; + + case 's': + no_signals = true; + break; + + case 't': + test_config = true; + break; + + case 'T': + set_trace(true); + break; + + case 'u': /* set userid */ + uid = optarg; + break; + + case 'v': /* verbose */ + verbose++; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (argc) { + if (configfile != NULL) + free(configfile); + configfile = bstrdup(*argv); + argc--; + argv++; + } + if (argc) { + usage(); + } + + if (!uid && keep_readall_caps) { + Emsg0(M_ERROR_TERM, 0, _("-k option has no meaning without -u option.\n")); + } + + server_tid = pthread_self(); + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + if (!foreground && !test_config) { + daemon_start(); + init_stack_dump(); /* set new pid */ + } + + if (!no_signals) { + init_signals(terminate_filed); + } else { + /* This reduces the number of signals facilitating debugging */ + watchdog_sleep_time = 120; /* long timeout for debugging */ + } + + config = New(CONFIG()); + parse_fd_config(config, configfile, M_ERROR_TERM); + + if (init_crypto() != 0) { + Emsg0(M_ERROR, 0, _("Cryptography library initialization failed.\n")); + terminate_filed(1); + } + + if (!check_resources()) { + Emsg1(M_ERROR, 0, _("Please correct configuration file: %s\n"), configfile); + terminate_filed(1); + } + + set_working_directory(me->working_directory); + + if (test_config) { + terminate_filed(0); + } + + set_thread_concurrency(me->MaxConcurrentJobs + 10); + lmgr_init_thread(); /* initialize the lockmanager stack */ + + /* Maximum 1 daemon at a time */ + if (make_pid_file) { + create_pid_file(me->pid_directory, PROG_NAME, + get_first_port_host_order(me->FDaddrs)); + } + read_state_file(me->working_directory, PROG_NAME, + get_first_port_host_order(me->FDaddrs)); + + load_fd_plugins(me->plugin_directory); + + drop(uid, gid, keep_readall_caps); + +#ifdef BOMB + me += 1000000; +#endif + + /* Setup default value for the the snapshot handler */ + if (!me->snapshot_command) { + me->snapshot_command = snapshot_get_command(); + } + + if (!no_signals) { + start_watchdog(); /* start watchdog thread */ + init_jcr_subsystem(); /* start JCR watchdogs etc. */ + } + server_tid = pthread_self(); + + /* Become server, and handle requests */ + IPADDR *p; + foreach_dlist(p, me->FDaddrs) { + Dmsg1(10, "filed: listening on port %d\n", p->get_port_host_order()); + } + bnet_thread_server(me->FDaddrs, me->MaxConcurrentJobs, &dir_workq, + handle_connection_request); + + terminate_filed(0); + exit(0); /* should never get here */ +} + +void terminate_filed(int sig) +{ + static bool already_here = false; + + if (already_here) { + bmicrosleep(2, 0); /* yield */ + exit(1); /* prevent loops */ + } + already_here = true; + debug_level = 0; /* turn off debug */ + stop_watchdog(); + + bnet_stop_thread_server(server_tid); + generate_daemon_event(NULL, "Exit"); + unload_plugins(); + + free_daemon_message_queue(); + + if (!test_config) { + write_state_file(me->working_directory, + "bacula-fd", get_first_port_host_order(me->FDaddrs)); + if (make_pid_file) { + delete_pid_file(me->pid_directory, + "bacula-fd", get_first_port_host_order(me->FDaddrs)); + } + } + + if (configfile != NULL) { + free(configfile); + } + + if (debug_level > 0) { + print_memory_pool_stats(); + } + + if (config) { + delete config; + config = NULL; + } + term_msg(); + cleanup_crypto(); + free(res_head); + res_head = NULL; + close_memory_pool(); /* release free memory in pool */ + lmgr_cleanup_main(); + sm_dump(false); /* dump orphaned buffers */ + exit(sig); +} + +/* +* Make a quick check to see that we have all the +* resources needed. +*/ +static bool check_resources() +{ + int i; + bool found; + char *cmd; + bool OK = true; + DIRRES *director; + bool need_tls; + + LockRes(); + + me = (CLIENT *)GetNextRes(R_CLIENT, NULL); + if (!me) { + Emsg1(M_FATAL, 0, _("No File daemon resource defined in %s\n" + "Without that I don't know who I am :-(\n"), configfile); + OK = false; + } else { + if (GetNextRes(R_CLIENT, (RES *) me) != NULL) { + Emsg1(M_FATAL, 0, _("Only one Client resource permitted in %s\n"), + configfile); + OK = false; + } + my_name_is(0, NULL, me->hdr.name); + if (!me->messages) { + me->messages = (MSGS *)GetNextRes(R_MSGS, NULL); + if (!me->messages) { + Emsg1(M_FATAL, 0, _("No Messages resource defined in %s\n"), configfile); + OK = false; + } + } + + /* Construct disabled command array */ + for (i=0; cmds[i].cmd; i++) { } /* Count commands */ + if (me->disable_cmds) { + me->disabled_cmds_array = (bool *)malloc(i); + memset(me->disabled_cmds_array, 0, i); + foreach_alist(cmd, me->disable_cmds) { + found = false; + for (i=0; cmds[i].cmd; i++) { + if (strncasecmp(cmds[i].cmd, cmd, strlen(cmd)) == 0) { + me->disabled_cmds_array[i] = true; + found = true; + break; + } + } + if (!found) { + Jmsg(NULL, M_FATAL, 0, _("Disable Command \"%s\" not found.\n"), + cmd); + OK = false; + } + } + } +#ifdef xxxDEBUG + for (i=0; cmds[i].cmd; i++) { } /* Count commands */ + while (i-- >= 0) { + if (me->disabled_cmds_array[i]) { + Dmsg1(050, "Command: %s disabled.\n", cmds[i].cmd); + } + } +#endif + + /* tls_require implies tls_enable */ + if (me->tls_require) { +#ifndef HAVE_TLS + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; +#else + me->tls_enable = true; +#endif + } + need_tls = me->tls_enable || me->tls_authenticate; + + if ((!me->tls_ca_certfile && !me->tls_ca_certdir) && need_tls) { + Emsg1(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for File daemon in %s.\n"), + configfile); + OK = false; + } + + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (need_tls || me->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + me->tls_ctx = new_tls_context(me->tls_ca_certfile, + me->tls_ca_certdir, me->tls_certfile, me->tls_keyfile, + NULL, NULL, NULL, true); + + if (!me->tls_ctx) { + Emsg2(M_FATAL, 0, _("Failed to initialize TLS context for File daemon \"%s\" in %s.\n"), + me->hdr.name, configfile); + OK = false; + } + } + + if (me->pki_encrypt || me->pki_sign) { +#ifndef HAVE_CRYPTO + Jmsg(NULL, M_FATAL, 0, _("PKI encryption/signing enabled but not compiled into Bacula.\n")); + OK = false; +#endif + } + + /* pki_encrypt implies pki_sign */ + if (me->pki_encrypt) { + me->pki_sign = true; + } + + if ((me->pki_encrypt || me->pki_sign) && !me->pki_keypair_file) { + Emsg2(M_FATAL, 0, _("\"PKI Key Pair\" must be defined for File" + " daemon \"%s\" in %s if either \"PKI Sign\" or" + " \"PKI Encrypt\" are enabled.\n"), me->hdr.name, configfile); + OK = false; + } + + /* If everything is well, attempt to initialize our public/private keys */ + if (OK && (me->pki_encrypt || me->pki_sign)) { + char *filepath; + /* Load our keypair */ + me->pki_keypair = crypto_keypair_new(); + if (!me->pki_keypair) { + Emsg0(M_FATAL, 0, _("Failed to allocate a new keypair object.\n")); + OK = false; + } else { + if (!crypto_keypair_load_cert(me->pki_keypair, me->pki_keypair_file)) { + Emsg2(M_FATAL, 0, _("Failed to load public certificate for File" + " daemon \"%s\" in %s.\n"), me->hdr.name, configfile); + OK = false; + } + + if (!crypto_keypair_load_key(me->pki_keypair, me->pki_keypair_file, NULL, NULL)) { + Emsg2(M_FATAL, 0, _("Failed to load private key for File" + " daemon \"%s\" in %s.\n"), me->hdr.name, configfile); + OK = false; + } + } + + /* + * Trusted Signers. We're always trusted. + */ + me->pki_signers = New(alist(10, not_owned_by_alist)); + if (me->pki_keypair) { + me->pki_signers->append(crypto_keypair_dup(me->pki_keypair)); + } + + /* If additional signing public keys have been specified, load them up */ + if (me->pki_signing_key_files) { + foreach_alist(filepath, me->pki_signing_key_files) { + X509_KEYPAIR *keypair; + + keypair = crypto_keypair_new(); + if (!keypair) { + Emsg0(M_FATAL, 0, _("Failed to allocate a new keypair object.\n")); + OK = false; + } else { + if (crypto_keypair_load_cert(keypair, filepath)) { + me->pki_signers->append(keypair); + + /* Attempt to load a private key, if available */ + if (crypto_keypair_has_key(filepath)) { + if (!crypto_keypair_load_key(keypair, filepath, NULL, NULL)) { + Emsg3(M_FATAL, 0, _("Failed to load private key from file %s for File" + " daemon \"%s\" in %s.\n"), filepath, me->hdr.name, configfile); + OK = false; + } + } + + } else { + Emsg3(M_FATAL, 0, _("Failed to load trusted signer certificate" + " from file %s for File daemon \"%s\" in %s.\n"), filepath, me->hdr.name, configfile); + OK = false; + } + } + } + } + + /* + * Crypto recipients. We're always included as a recipient. + * The symmetric session key will be encrypted for each of these readers. + */ + me->pki_recipients = New(alist(10, not_owned_by_alist)); + if (me->pki_keypair) { + me->pki_recipients->append(crypto_keypair_dup(me->pki_keypair)); + } + + /* Put a default cipher (not possible in the filed_conf.c structure */ + if (!me->pki_cipher) { + me->pki_cipher = CRYPTO_CIPHER_AES_128_CBC; + } + + /* Put a default digest (not possible in the filed_conf.c structure */ + if (!me->pki_digest) { + me->pki_digest = CRYPTO_DIGEST_DEFAULT; + } + + /* If additional keys have been specified, load them up */ + if (me->pki_master_key_files) { + foreach_alist(filepath, me->pki_master_key_files) { + X509_KEYPAIR *keypair; + + keypair = crypto_keypair_new(); + if (!keypair) { + Emsg0(M_FATAL, 0, _("Failed to allocate a new keypair object.\n")); + OK = false; + } else { + if (crypto_keypair_load_cert(keypair, filepath)) { + me->pki_recipients->append(keypair); + } else { + Emsg3(M_FATAL, 0, _("Failed to load master key certificate" + " from file %s for File daemon \"%s\" in %s.\n"), filepath, me->hdr.name, configfile); + OK = false; + } + } + } + } + } + } + + + /* Verify that a director record exists */ + LockRes(); + director = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); + UnlockRes(); + if (!director) { + Emsg1(M_FATAL, 0, _("No Director resource defined in %s\n"), + configfile); + OK = false; + } + + foreach_res(director, R_DIRECTOR) { + + /* Construct disabled command array */ + for (i=0; cmds[i].cmd; i++) { } /* Count commands */ + if (director->disable_cmds) { + director->disabled_cmds_array = (bool *)malloc(i); + memset(director->disabled_cmds_array, 0, i); + foreach_alist(cmd, director->disable_cmds) { + found = false; + for (i=0; cmds[i].cmd; i++) { + if (strncasecmp(cmds[i].cmd, cmd, strlen(cmd)) == 0) { + director->disabled_cmds_array[i] = true; + found = true; + break; + } + } + if (!found) { + Jmsg(NULL, M_FATAL, 0, _("Disable Command \"%s\" not found.\n"), + cmd); + OK = false; + } + } + } + +#ifdef xxxDEBUG + for (i=0; cmds[i].cmd; i++) { } /* Count commands */ + while (i-- >= 0) { + if (director->disabled_cmds_array[i]) { + Dmsg1(050, "Command: %s disabled for Director.\n", cmds[i].cmd); + } + } +#endif + + /* tls_require implies tls_enable */ + if (director->tls_require) { +#ifndef HAVE_TLS + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; +#else + director->tls_enable = true; +#endif + } + need_tls = director->tls_enable || director->tls_authenticate; + + if (!director->tls_certfile && need_tls) { + Emsg2(M_FATAL, 0, _("\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + + if (!director->tls_keyfile && need_tls) { + Emsg2(M_FATAL, 0, _("\"TLS Key\" file not defined for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + + if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && need_tls && director->tls_verify_peer) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + director->hdr.name, configfile); + OK = false; + } + + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (need_tls || director->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + director->tls_ctx = new_tls_context(director->tls_ca_certfile, + director->tls_ca_certdir, director->tls_certfile, + director->tls_keyfile, NULL, NULL, director->tls_dhfile, + director->tls_verify_peer); + + if (!director->tls_ctx) { + Emsg2(M_FATAL, 0, _("Failed to initialize TLS context for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + } + } + + CONSRES *console; + foreach_res(console, R_CONSOLE) { + /* tls_require implies tls_enable */ + if (console->tls_require) { +#ifndef HAVE_TLS + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; +#else + console->tls_enable = true; +#endif + } + need_tls = console->tls_enable || console->tls_authenticate; + + if (!console->tls_certfile && need_tls) { + Emsg2(M_FATAL, 0, _("\"TLS Certificate\" file not defined for Console \"%s\" in %s.\n"), + console->hdr.name, configfile); + OK = false; + } + + if (!console->tls_keyfile && need_tls) { + Emsg2(M_FATAL, 0, _("\"TLS Key\" file not defined for Console \"%s\" in %s.\n"), + console->hdr.name, configfile); + OK = false; + } + + if ((!console->tls_ca_certfile && !console->tls_ca_certdir) && need_tls && console->tls_verify_peer) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Console \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + console->hdr.name, configfile); + OK = false; + } + + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (need_tls || console->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + console->tls_ctx = new_tls_context(console->tls_ca_certfile, + console->tls_ca_certdir, console->tls_certfile, + console->tls_keyfile, NULL, NULL, console->tls_dhfile, + console->tls_verify_peer); + + if (!console->tls_ctx) { + Emsg2(M_FATAL, 0, _("Failed to initialize TLS context for Console \"%s\" in %s.\n"), + console->hdr.name, configfile); + OK = false; + } + } + + } + + UnlockRes(); + + if (OK) { + close_msg(NULL); /* close temp message handler */ + init_msg(NULL, me->messages); /* open user specified message handler */ + } + + return OK; +} diff --git a/src/filed/filed.h b/src/filed/filed.h new file mode 100644 index 00000000..eae83293 --- /dev/null +++ b/src/filed/filed.h @@ -0,0 +1,72 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon specific configuration and defines + * + * Kern Sibbald, Jan MMI + */ + +//#define TEST_WORKER +#ifdef TEST_WORKER +#define ERROR_BUFFER_OVERFLOW 1 +#define ERROR_SUCCESS 0 +#endif + +/* acl errors to report per job. */ +#define ACL_MAX_ERROR_PRINT_PER_JOB 25 + +/* xattr errors to report per job. */ +#define XATTR_MAX_ERROR_PRINT_PER_JOB 25 + +#define FILE_DAEMON 1 +#include "lib/htable.h" +#include "filed_conf.h" +#include "fd_plugins.h" +#include "fd_snapshot.h" +#include "findlib/find.h" +#include "bacl.h" +#include "bxattr.h" +#include "jcr.h" +#include "protos.h" /* file daemon prototypes */ +#include "lib/runscript.h" +#include "lib/breg.h" +#ifdef HAVE_LIBZ +#include /* compression headers */ +#else +#define uLongf uint32_t +#endif +#ifdef HAVE_LZO +#include +#include +#endif + +extern CLIENT *me; /* "Global" Client resource */ +extern bool win32decomp; /* Use decomposition of BackupRead data */ +extern bool no_win32_write_errors; /* Ignore certain errors */ + +void terminate_filed(int sig); + +struct s_cmds { + const char *cmd; + int (*func)(JCR *); + int access; /* specify if monitors/restricted have access to this function */ +}; + +void allow_os_suspensions(); +void prevent_os_suspensions(); diff --git a/src/filed/filed_conf.c b/src/filed/filed_conf.c new file mode 100644 index 00000000..56fb2ed7 --- /dev/null +++ b/src/filed/filed_conf.c @@ -0,0 +1,595 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main configuration file parser for Bacula File Daemon (Client) + * some parts may be split into separate files such as + * the schedule configuration (sch_config.c). + * + * Note, the configuration file parser consists of three parts + * + * 1. The generic lexical scanner in lib/lex.c and lib/lex.h + * + * 2. The generic config scanner in lib/parse_config.c and + * lib/parse_config.h. + * These files contain the parser code, some utility + * routines, and the common store routines (name, int, + * string). + * + * 3. The daemon specific file, which contains the Resource + * definitions as well as any specific store routines + * for the resource records. + * + * Kern Sibbald, September MM + */ + +#include "bacula.h" +#include "filed.h" + +/* Define the first and last resource ID record + * types. Note, these should be unique for each + * daemon though not a requirement. + */ +int32_t r_first = R_FIRST; +int32_t r_last = R_LAST; +RES_HEAD **res_head; + + +/* Forward referenced subroutines */ + + +/* We build the current resource here as we are + * scanning the resource configuration definition, + * then move it to allocated memory when the resource + * scan is complete. + */ +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + URES res_all; +} +#else +URES res_all; +#endif +int32_t res_all_size = sizeof(res_all); + +/* Definition of records permitted within each + * resource with the routine to process the record + * information. + */ + +/* Client or File daemon "Global" resources */ +static RES_ITEM cli_items[] = { + {"Name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0}, + {"FdPort", store_addresses_port, ITEM(res_client.FDaddrs), 0, ITEM_DEFAULT, 9102}, + {"FdAddress", store_addresses_address, ITEM(res_client.FDaddrs), 0, ITEM_DEFAULT, 9102}, + {"FdAddresses", store_addresses, ITEM(res_client.FDaddrs), 0, ITEM_DEFAULT, 9102}, + {"FdSourceAddress", store_addresses_address, ITEM(res_client.FDsrc_addr), 0, ITEM_DEFAULT, 0}, + + {"WorkingDirectory", store_dir, ITEM(res_client.working_directory), 0, ITEM_REQUIRED, 0}, + {"PidDirectory", store_dir, ITEM(res_client.pid_directory), 0, ITEM_REQUIRED, 0}, + {"SubsysDirectory", store_dir, ITEM(res_client.subsys_directory), 0, 0, 0}, + {"PluginDirectory", store_dir, ITEM(res_client.plugin_directory), 0, 0, 0}, + {"SnapshotCommand", store_str, ITEM(res_client.snapshot_command), 0, 0, 0}, + {"ScriptsDirectory", store_dir, ITEM(res_client.scripts_directory), 0, 0, 0}, + {"MaximumConcurrentJobs", store_pint32, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 20}, + {"Messages", store_res, ITEM(res_client.messages), R_MSGS, 0, 0}, + {"SdConnectTimeout", store_time,ITEM(res_client.SDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30}, + {"HeartbeatInterval", store_time, ITEM(res_client.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, + {"MaximumNetworkBufferSize", store_pint32, ITEM(res_client.max_network_buffer_size), 0, 0, 0}, +#ifdef DATA_ENCRYPTION + {"PkiSignatures", store_bool, ITEM(res_client.pki_sign), 0, ITEM_DEFAULT, 0}, + {"PkiEncryption", store_bool, ITEM(res_client.pki_encrypt), 0, ITEM_DEFAULT, 0}, + {"PkiKeyPair", store_dir, ITEM(res_client.pki_keypair_file), 0, 0, 0}, + {"PkiSigner", store_alist_str, ITEM(res_client.pki_signing_key_files), 0, 0, 0}, + {"PkiMasterKey", store_alist_str, ITEM(res_client.pki_master_key_files), 0, 0, 0}, + {"PkiCipher", store_cipher_type, ITEM(res_client.pki_cipher), 0, 0, 0}, + {"PkiDigest", store_digest_type, ITEM(res_client.pki_digest), 0, 0, 0}, +#endif + {"TlsAuthenticate", store_bool, ITEM(res_client.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_client.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_client.tls_require), 0, 0, 0}, + {"TlsCaCertificateFile", store_dir, ITEM(res_client.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_client.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_client.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_client.tls_keyfile), 0, 0, 0}, + {"VerId", store_str, ITEM(res_client.verid), 0, 0, 0}, + {"MaximumBandwidthPerJob",store_speed, ITEM(res_client.max_bandwidth_per_job), 0, 0, 0}, + {"CommCompression", store_bool, ITEM(res_client.comm_compression), 0, ITEM_DEFAULT, true}, + {"DisableCommand", store_alist_str, ITEM(res_client.disable_cmds), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Directors that can use our services */ +static RES_ITEM dir_items[] = { + {"Name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, + {"Password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0}, + {"Address", store_str, ITEM(res_dir.address), 0, 0, 0}, + {"Monitor", store_bool, ITEM(res_dir.monitor), 0, ITEM_DEFAULT, 0}, + {"Remote", store_bool, ITEM(res_dir.remote), 0, ITEM_DEFAULT, 0}, + {"TlsAuthenticate", store_bool, ITEM(res_dir.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_dir.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_dir.tls_require), 0, 0, 0}, + {"TlsVerifyPeer", store_bool, ITEM(res_dir.tls_verify_peer), 0, ITEM_DEFAULT, 1}, + {"TlsCaCertificateFile", store_dir, ITEM(res_dir.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_dir.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_dir.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_dir.tls_keyfile), 0, 0, 0}, + {"TlsDhFile", store_dir, ITEM(res_dir.tls_dhfile), 0, 0, 0}, + {"TlsAllowedCn", store_alist_str, ITEM(res_dir.tls_allowed_cns), 0, 0, 0}, + {"MaximumBandwidthPerJob", store_speed, ITEM(res_dir.max_bandwidth_per_job), 0, 0, 0}, + {"DisableCommand", store_alist_str, ITEM(res_dir.disable_cmds), 0, 0, 0}, + {"Console", store_res, ITEM(res_dir.console), R_CONSOLE, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Consoles that we can use to connect a Director */ +static RES_ITEM cons_items[] = { + {"Name", store_name, ITEM(res_cons.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_cons.hdr.desc), 0, 0, 0}, + {"Password", store_password, ITEM(res_cons.password), 0, ITEM_REQUIRED, 0}, + {"Address", store_str, ITEM(res_cons.address), 0, 0, 0}, + {"DirPort", store_pint32, ITEM(res_cons.DIRport), 0, ITEM_DEFAULT, 9101}, + {"TlsAuthenticate", store_bool, ITEM(res_cons.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_cons.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_cons.tls_require), 0, 0, 0}, + {"TlsVerifyPeer", store_bool, ITEM(res_cons.tls_verify_peer), 0, ITEM_DEFAULT, 1}, + {"TlsCaCertificateFile", store_dir, ITEM(res_cons.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_cons.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_cons.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_cons.tls_keyfile), 0, 0, 0}, + {"TlsDhFile", store_dir, ITEM(res_cons.tls_dhfile), 0, 0, 0}, + {"TlsAllowedCn", store_alist_str, ITEM(res_cons.tls_allowed_cns), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Message resource */ +extern RES_ITEM msgs_items[]; + +/* + * This is the master resource definition. + * It must have one item for each of the resources. + */ +RES_TABLE resources[] = { + {"Director", dir_items, R_DIRECTOR}, + {"FileDaemon", cli_items, R_CLIENT}, + {"Messages", msgs_items, R_MSGS}, + {"Console", cons_items, R_CONSOLE}, + {"Client", cli_items, R_CLIENT}, /* alias for filedaemon */ + {NULL, NULL, 0} +}; + +struct s_ct ciphertypes[] = { + {"aes128", CRYPTO_CIPHER_AES_128_CBC}, + {"aes192", CRYPTO_CIPHER_AES_192_CBC}, + {"aes256", CRYPTO_CIPHER_AES_256_CBC}, + {"blowfish", CRYPTO_CIPHER_BLOWFISH_CBC}, + {NULL, 0} +}; + +struct s_ct digesttypes[] = { + {"md5", CRYPTO_DIGEST_MD5}, + {"sha1", CRYPTO_DIGEST_SHA1}, + {"sha256", CRYPTO_DIGEST_SHA256}, +// {"sha512", CRYPTO_DIGEST_SHA512}, /* Not working yet */ + {NULL, 0} +}; + +/* + * Store cipher type + * + */ +void store_cipher_type(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int i; + + lex_get_token(lc, T_NAME); + /* Store the type both pass 1 and pass 2 */ + for (i=0; ciphertypes[i].type_name; i++) { + if (strcasecmp(lc->str, ciphertypes[i].type_name) == 0) { + *(uint32_t *)(item->value) = ciphertypes[i].type_value; + i = 0; + break; + } + } + if (i != 0) { + scan_err1(lc, _("Expected a Cipher Type keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Store digest type + * + */ +void store_digest_type(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int i; + + lex_get_token(lc, T_NAME); + /* Store the type both pass 1 and pass 2 */ + for (i=0; digesttypes[i].type_name; i++) { + if (strcasecmp(lc->str, digesttypes[i].type_name) == 0) { + *(uint32_t *)(item->value) = digesttypes[i].type_value; + i = 0; + break; + } + } + if (i != 0) { + scan_err1(lc, _("Expected a Cipher Type keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* Dump contents of resource */ +void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock) +{ + URES *res = (URES *)ares; + int recurse = 1; + + if (res == NULL) { + sendit(sock, "No record for %d %s\n", type, res_to_str(type)); + return; + } + if (type < 0) { /* no recursion */ + type = - type; + recurse = 0; + } + switch (type) { + case R_CONSOLE: + sendit(sock, "Console: name=%s password=%s\n", ares->name, + res->res_cons.password); + break; + case R_DIRECTOR: + sendit(sock, "Director: name=%s password=%s\n", ares->name, + res->res_dir.password); + break; + case R_CLIENT: + sendit(sock, "Client: name=%s FDport=%d\n", ares->name, + get_first_port_host_order(res->res_client.FDaddrs)); + break; + case R_MSGS: + sendit(sock, "Messages: name=%s\n", res->res_msgs.hdr.name); + if (res->res_msgs.mail_cmd) + sendit(sock, " mailcmd=%s\n", res->res_msgs.mail_cmd); + if (res->res_msgs.operator_cmd) + sendit(sock, " opcmd=%s\n", res->res_msgs.operator_cmd); + break; + default: + sendit(sock, "Unknown resource type %d\n", type); + } + ares = GetNextRes(type, ares); + if (recurse && ares) { + dump_resource(type, ares, sendit, sock); + } +} + + +/* + * Free memory of resource. + * NB, we don't need to worry about freeing any references + * to other resources as they will be freed when that + * resource chain is traversed. Mainly we worry about freeing + * allocated strings (names). + */ +void free_resource(RES *sres, int type) +{ + URES *res = (URES *)sres; + + if (res == NULL) { + return; + } + + /* common stuff -- free the resource name */ + if (res->res_dir.hdr.name) { + free(res->res_dir.hdr.name); + } + if (res->res_dir.hdr.desc) { + free(res->res_dir.hdr.desc); + } + switch (type) { + case R_DIRECTOR: + if (res->res_dir.password) { + free(res->res_dir.password); + } + if (res->res_dir.address) { + free(res->res_dir.address); + } + if (res->res_dir.tls_ctx) { + free_tls_context(res->res_dir.tls_ctx); + } + if (res->res_dir.tls_ca_certfile) { + free(res->res_dir.tls_ca_certfile); + } + if (res->res_dir.tls_ca_certdir) { + free(res->res_dir.tls_ca_certdir); + } + if (res->res_dir.tls_certfile) { + free(res->res_dir.tls_certfile); + } + if (res->res_dir.tls_keyfile) { + free(res->res_dir.tls_keyfile); + } + if (res->res_dir.tls_dhfile) { + free(res->res_dir.tls_dhfile); + } + if (res->res_dir.tls_allowed_cns) { + delete res->res_dir.tls_allowed_cns; + } + if (res->res_dir.disable_cmds) { + delete res->res_dir.disable_cmds; + } + if (res->res_dir.disabled_cmds_array) { + free(res->res_dir.disabled_cmds_array); + } + break; + case R_CONSOLE: + if (res->res_cons.password) { + free(res->res_cons.password); + } + if (res->res_cons.address) { + free(res->res_cons.address); + } + if (res->res_cons.tls_ctx) { + free_tls_context(res->res_cons.tls_ctx); + } + if (res->res_cons.tls_ca_certfile) { + free(res->res_cons.tls_ca_certfile); + } + if (res->res_cons.tls_ca_certdir) { + free(res->res_cons.tls_ca_certdir); + } + if (res->res_cons.tls_certfile) { + free(res->res_cons.tls_certfile); + } + if (res->res_cons.tls_keyfile) { + free(res->res_cons.tls_keyfile); + } + if (res->res_cons.tls_dhfile) { + free(res->res_cons.tls_dhfile); + } + if (res->res_cons.tls_allowed_cns) { + delete res->res_cons.tls_allowed_cns; + } + break; + case R_CLIENT: + if (res->res_client.working_directory) { + free(res->res_client.working_directory); + } + if (res->res_client.pid_directory) { + free(res->res_client.pid_directory); + } + if (res->res_client.subsys_directory) { + free(res->res_client.subsys_directory); + } + if (res->res_client.scripts_directory) { + free(res->res_client.scripts_directory); + } + if (res->res_client.plugin_directory) { + free(res->res_client.plugin_directory); + } + if (res->res_client.FDaddrs) { + free_addresses(res->res_client.FDaddrs); + } + if (res->res_client.FDsrc_addr) { + free_addresses(res->res_client.FDsrc_addr); + } + if (res->res_client.snapshot_command) { + free(res->res_client.snapshot_command); + } + if (res->res_client.pki_keypair_file) { + free(res->res_client.pki_keypair_file); + } + if (res->res_client.pki_keypair) { + crypto_keypair_free(res->res_client.pki_keypair); + } + + if (res->res_client.pki_signing_key_files) { + delete res->res_client.pki_signing_key_files; + } + if (res->res_client.pki_signers) { + X509_KEYPAIR *keypair; + foreach_alist(keypair, res->res_client.pki_signers) { + crypto_keypair_free(keypair); + } + delete res->res_client.pki_signers; + } + + if (res->res_client.pki_master_key_files) { + delete res->res_client.pki_master_key_files; + } + + if (res->res_client.pki_recipients) { + X509_KEYPAIR *keypair; + foreach_alist(keypair, res->res_client.pki_recipients) { + crypto_keypair_free(keypair); + } + delete res->res_client.pki_recipients; + } + + if (res->res_client.tls_ctx) { + free_tls_context(res->res_client.tls_ctx); + } + if (res->res_client.tls_ca_certfile) { + free(res->res_client.tls_ca_certfile); + } + if (res->res_client.tls_ca_certdir) { + free(res->res_client.tls_ca_certdir); + } + if (res->res_client.tls_certfile) { + free(res->res_client.tls_certfile); + } + if (res->res_client.tls_keyfile) { + free(res->res_client.tls_keyfile); + } + if (res->res_client.disable_cmds) { + delete res->res_client.disable_cmds; + } + if (res->res_client.disabled_cmds_array) { + free(res->res_client.disabled_cmds_array); + } + if (res->res_client.verid) { + free(res->res_client.verid); + } + break; + case R_MSGS: + if (res->res_msgs.mail_cmd) { + free(res->res_msgs.mail_cmd); + } + if (res->res_msgs.operator_cmd) { + free(res->res_msgs.operator_cmd); + } + free_msgs_res((MSGS *)res); /* free message resource */ + res = NULL; + break; + default: + printf(_("Unknown resource type %d\n"), type); + } + /* Common stuff again -- free the resource, recurse to next one */ + if (res) { + free(res); + } +} + +/* Save the new resource by chaining it into the head list for + * the resource. If this is pass 2, we update any resource + * pointers (currently only in the Job resource). + */ +bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) +{ + URES *res; + CONSRES *cons; + int rindex = type - r_first; + int i, size; + int error = 0; + + /* + * Ensure that all required items are present + */ + for (i=0; items[i].name; i++) { + if (items[i].flags & ITEM_REQUIRED) { + if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) { + Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), + items[i].name, resources[rindex].name); + return false; + } + } + } + + /* During pass 2, we looked up pointers to all the resources + * referrenced in the current resource, , now we + * must copy their address from the static record to the allocated + * record. + */ + if (pass == 2) { + switch (type) { + /* Resources not containing a resource */ + case R_MSGS: + break; + + /* Resources containing another resource */ + case R_DIRECTOR: + if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Director resource %s\n"), res_all.res_dir.hdr.name); + return false; + } + res->res_dir.tls_allowed_cns = res_all.res_dir.tls_allowed_cns; + res->res_dir.disable_cmds = res_all.res_dir.disable_cmds; + res->res_dir.console = res_all.res_dir.console; + if (res_all.res_dir.remote && !res_all.res_dir.console) { + if ((cons = (CONSRES *)GetNextRes(R_CONSOLE, NULL)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find any Console resource for remote access\n")); + return false; + } + res->res_dir.console = cons; + } + break; + /* Resources containing another resource */ + case R_CONSOLE: + break; + case R_CLIENT: + if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_dir.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Client resource %s\n"), res_all.res_dir.hdr.name); + return false; + } + res->res_client.pki_signing_key_files = res_all.res_client.pki_signing_key_files; + res->res_client.pki_master_key_files = res_all.res_client.pki_master_key_files; + + res->res_client.pki_signers = res_all.res_client.pki_signers; + res->res_client.pki_recipients = res_all.res_client.pki_recipients; + + res->res_client.messages = res_all.res_client.messages; + res->res_client.disable_cmds = res_all.res_client.disable_cmds; + break; + default: + Emsg1(M_ERROR, 0, _("Unknown resource type %d\n"), type); + error = 1; + break; + } + /* Note, the resoure name was already saved during pass 1, + * so here, we can just release it. + */ + if (res_all.res_dir.hdr.name) { + free(res_all.res_dir.hdr.name); + res_all.res_dir.hdr.name = NULL; + } + if (res_all.res_dir.hdr.desc) { + free(res_all.res_dir.hdr.desc); + res_all.res_dir.hdr.desc = NULL; + } + return true; + } + + /* The following code is only executed on pass 1 */ + switch (type) { + case R_DIRECTOR: + size = sizeof(DIRRES); + break; + case R_CONSOLE: + size = sizeof(CONSRES); + break; + case R_CLIENT: + size = sizeof(CLIENT); + break; + case R_MSGS: + size = sizeof(MSGS); + break; + default: + printf(_("Unknown resource type %d\n"), type); + error = 1; + size = 1; + break; + } + /* Common */ + if (!error) { + if (!config->insert_res(rindex, size)) { + return false; + } + } + return true; +} + +bool parse_fd_config(CONFIG *config, const char *configfile, int exit_code) +{ + config->init(configfile, NULL, exit_code, (void *)&res_all, res_all_size, + r_first, r_last, resources, &res_head); + return config->parse_config(); +} diff --git a/src/filed/filed_conf.h b/src/filed/filed_conf.h new file mode 100644 index 00000000..8f6f5903 --- /dev/null +++ b/src/filed/filed_conf.h @@ -0,0 +1,148 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon specific configuration + * + * Kern Sibbald, Sep MM + */ + +/* + * Resource codes -- they must be sequential for indexing + */ +#define R_FIRST 1001 + +#define R_DIRECTOR 1001 +#define R_CLIENT 1002 +#define R_MSGS 1003 +#define R_CONSOLE 1004 + +#define R_LAST R_CONSOLE + +/* + * Some resource attributes + */ +#define R_NAME 1020 +#define R_ADDRESS 1021 +#define R_PASSWORD 1022 +#define R_TYPE 1023 + +/* Cipher/Digest keyword structure */ +struct s_ct { + const char *type_name; + int32_t type_value; +}; + +/* Definition of the contents of each Resource */ +struct CONSRES { + RES hdr; + char *password; /* Director password */ + char *address; /* Director address or zero */ + int heartbeat_interval; + int comm_compression; + int32_t DIRport; + bool tls_authenticate; /* Authenticate with TSL */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + bool tls_verify_peer; /* TLS Verify Client Certificate */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Server Certificate File */ + char *tls_keyfile; /* TLS Server Key File */ + char *tls_dhfile; /* TLS Diffie-Hellman Parameters */ + alist *tls_allowed_cns; /* TLS Allowed Clients */ + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ +}; + +/* Definition of the contents of each Resource */ +struct DIRRES { + RES hdr; + char *password; /* Director password */ + char *address; /* Director address or zero */ + bool monitor; /* Have only access to status and .status functions */ + bool remote; /* Remote console, can run and control jobs */ + bool tls_authenticate; /* Authenticate with TSL */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + bool tls_verify_peer; /* TLS Verify Client Certificate */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Server Certificate File */ + char *tls_keyfile; /* TLS Server Key File */ + char *tls_dhfile; /* TLS Diffie-Hellman Parameters */ + alist *tls_allowed_cns; /* TLS Allowed Clients */ + uint64_t max_bandwidth_per_job; /* Bandwidth limitation (per director) */ + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + alist *disable_cmds; /* Commands to disable */ + bool *disabled_cmds_array; /* Disabled commands array */ + CONSRES *console; +}; + +struct CLIENT { + RES hdr; + dlist *FDaddrs; + dlist *FDsrc_addr; /* address to source connections from */ + char *working_directory; + char *pid_directory; + char *subsys_directory; + char *plugin_directory; /* Plugin directory */ + char *scripts_directory; + char *snapshot_command; + MSGS *messages; /* daemon message handler */ + uint32_t MaxConcurrentJobs; + utime_t SDConnectTimeout; /* timeout in seconds */ + utime_t heartbeat_interval; /* Interval to send heartbeats */ + uint32_t max_network_buffer_size; /* max network buf size */ + bool comm_compression; /* Enable comm line compression */ + bool pki_sign; /* Enable Data Integrity Verification via Digital Signatures */ + bool pki_encrypt; /* Enable Data Encryption */ + char *pki_keypair_file; /* PKI Key Pair File */ + alist *pki_signing_key_files; /* PKI Signing Key Files */ + alist *pki_master_key_files; /* PKI Master Key Files */ + uint32_t pki_cipher; /* PKI Cipher type */ + uint32_t pki_digest; /* PKI Digest type */ + bool tls_authenticate; /* Authenticate with TLS */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Client Certificate File */ + char *tls_keyfile; /* TLS Client Key File */ + + X509_KEYPAIR *pki_keypair; /* Shared PKI Public/Private Keypair */ + alist *pki_signers; /* Shared PKI Trusted Signers */ + alist *pki_recipients; /* Shared PKI Recipients */ + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + char *verid; /* Custom Id to print in version command */ + uint64_t max_bandwidth_per_job; /* Bandwidth limitation (global) */ + alist *disable_cmds; /* Commands to disable */ + bool *disabled_cmds_array; /* Disabled commands array */ +}; + + + +/* Define the Union of all the above + * resource structure definitions. + */ +union URES { + DIRRES res_dir; + CLIENT res_client; + MSGS res_msgs; + CONSRES res_cons; + RES hdr; +}; diff --git a/src/filed/heartbeat.c b/src/filed/heartbeat.c new file mode 100644 index 00000000..2a8eeafd --- /dev/null +++ b/src/filed/heartbeat.c @@ -0,0 +1,213 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon heartbeat routines + * Listens for heartbeats coming from the SD + * If configured, sends heartbeats to Dir + * + * Kern Sibbald, May MMIII + * + */ + +#include "bacula.h" +#include "filed.h" + +#define WAIT_INTERVAL 5 + +extern "C" void *sd_heartbeat_thread(void *arg); +extern "C" void *dir_heartbeat_thread(void *arg); +extern bool no_signals; + +/* + * Listen on the SD socket for heartbeat signals. + * Send heartbeats to the Director every HB_TIME + * seconds. + */ +extern "C" void *sd_heartbeat_thread(void *arg) +{ + int32_t n; + JCR *jcr = (JCR *)arg; + BSOCK *sd, *dir; + time_t last_heartbeat = time(NULL); + time_t now; + + set_jcr_in_tsd(jcr); + pthread_detach(pthread_self()); + + /* Get our own local copy */ + sd = dup_bsock(jcr->store_bsock); + dir = dup_bsock(jcr->dir_bsock); + + jcr->hb_bsock = sd; + jcr->hb_started = true; + jcr->hb_dir_bsock = dir; + dir->suppress_error_messages(true); + sd->suppress_error_messages(true); + + /* Hang reading the socket to the SD, and every time we get + * a heartbeat or we get a wait timeout (5 seconds), we + * check to see if we need to send a heartbeat to the + * Director. + */ + while (!sd->is_stop()) { + n = sd->wait_data_intr(WAIT_INTERVAL); + if (n < 0 || sd->is_stop()) { + break; + } + if (me->heartbeat_interval) { + now = time(NULL); + if (now-last_heartbeat >= me->heartbeat_interval) { + dir->signal(BNET_HEARTBEAT); + if (dir->is_stop()) { + break; + } + last_heartbeat = now; + } + } + if (n == 1) { /* input waiting */ + sd->recv(); /* read it -- probably heartbeat from sd */ + if (sd->is_stop()) { + break; + } + if (sd->msglen <= 0) { + Dmsg1(100, "Got BNET_SIG %d from SD\n", sd->msglen); + } else { + Dmsg2(100, "Got %d bytes from SD. MSG=%s\n", sd->msglen, sd->msg); + } + } + Dmsg2(200, "wait_intr=%d stop=%d\n", n, sd->is_stop()); + } + sd->close(); + dir->close(); + jcr->hb_bsock = NULL; + jcr->hb_started = false; + jcr->hb_dir_bsock = NULL; + return NULL; +} + +/* Startup the heartbeat thread -- see above */ +void start_heartbeat_monitor(JCR *jcr) +{ + /* + * If no signals are set, do not start the heartbeat because + * it gives a constant stream of TIMEOUT_SIGNAL signals that + * make debugging impossible. + */ + if (!no_signals && (me->heartbeat_interval > 0)) { + jcr->hb_bsock = NULL; + jcr->hb_started = false; + jcr->hb_dir_bsock = NULL; + pthread_create(&jcr->heartbeat_id, NULL, sd_heartbeat_thread, (void *)jcr); + } +} + +/* Terminate the heartbeat thread. Used for both SD and DIR */ +void stop_heartbeat_monitor(JCR *jcr) +{ + int cnt = 0; + if (no_signals) { + return; + } + /* Wait max 10 secs for heartbeat thread to start */ + while (!jcr->hb_started && cnt++ < 200) { + bmicrosleep(0, 50000); /* wait for start */ + } + + if (jcr->hb_started) { + jcr->hb_bsock->set_timed_out(); /* set timed_out to terminate read */ + jcr->hb_bsock->set_terminated(); /* set to terminate read */ + } + if (jcr->hb_dir_bsock) { + jcr->hb_dir_bsock->set_timed_out(); /* set timed_out to terminate read */ + jcr->hb_dir_bsock->set_terminated(); /* set to terminate read */ + } + if (jcr->hb_started) { + Dmsg0(100, "Send kill to heartbeat id\n"); + pthread_kill(jcr->heartbeat_id, TIMEOUT_SIGNAL); /* make heartbeat thread go away */ + bmicrosleep(0, 50000); + } + cnt = 0; + /* Wait max 100 secs for heartbeat thread to stop */ + while (jcr->hb_started && cnt++ < 200) { + pthread_kill(jcr->heartbeat_id, TIMEOUT_SIGNAL); /* make heartbeat thread go away */ + bmicrosleep(0, 500000); + } +} + +/* + * Thread for sending heartbeats to the Director when there + * is no SD monitoring needed -- e.g. restore and verify Vol + * both do their own read() on the SD socket. + */ +extern "C" void *dir_heartbeat_thread(void *arg) +{ + JCR *jcr = (JCR *)arg; + BSOCK *dir; + time_t last_heartbeat = time(NULL); + + pthread_detach(pthread_self()); + + /* Get our own local copy */ + dir = dup_bsock(jcr->dir_bsock); + + jcr->hb_bsock = dir; + jcr->hb_started = true; + dir->suppress_error_messages(true); + + while (!dir->is_stop()) { + time_t now, next; + + now = time(NULL); + next = now - last_heartbeat; + if (next >= me->heartbeat_interval) { + dir->signal(BNET_HEARTBEAT); + if (dir->is_stop()) { + break; + } + last_heartbeat = now; + } + /* This should never happen, but it might ... */ + if (next <= 0) { + next = 1; + } + bmicrosleep(next, 0); + } + dir->close(); + jcr->hb_bsock = NULL; + jcr->hb_started = false; + return NULL; +} + +/* + * Same as above but we don't listen to the SD + */ +void start_dir_heartbeat(JCR *jcr) +{ + if (!no_signals && (me->heartbeat_interval > 0)) { + jcr->dir_bsock->set_locking(); + pthread_create(&jcr->heartbeat_id, NULL, dir_heartbeat_thread, (void *)jcr); + } +} + +void stop_dir_heartbeat(JCR *jcr) +{ + if (me->heartbeat_interval > 0) { + stop_heartbeat_monitor(jcr); + } +} diff --git a/src/filed/hello.c b/src/filed/hello.c new file mode 100644 index 00000000..ce379ff8 --- /dev/null +++ b/src/filed/hello.c @@ -0,0 +1,369 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Authenticate Director who is attempting to connect. + * + * Kern Sibbald, October 2000 + */ + +#include "bacula.h" +#include "filed.h" + +extern CLIENT *me; /* my resource */ + +const int dbglvl = 50; + +/* FD_VERSION history + * None prior to 10Mar08 + * 1 10Mar08 + * 2 13Mar09 - added the ability to restore from multiple storages + * 3 03Sep10 - added the restore object command for vss plugin 4.0 + * 4 25Nov10 - added bandwidth command 5.1 + * 5 24Nov11 - added new restore object command format (pluginname) 6.0 + * 6 15Feb12 - added Component selection information list + * 7 19Feb12 - added Expected files to restore + * 8 22Mar13 - added restore options + version for SD + * 9 06Aug13 - skipped + * 10 01Jan14 - added SD Calls Client and api version to status command + * 11 O4May14 - skipped + * 12 22Jun14 - skipped + * 213 04Feb15 - added snapshot protocol with the DIR + * 214 20Mar17 - added comm line compression + */ + +#define FD_VERSION 214 /* FD version */ + +static char hello_sd[] = "Hello Bacula SD: Start Job %s %d\n"; +static char hello_dir[] = "2000 OK Hello %d\n"; +static char sorry_dir[] = "2999 Authentication failed.\n"; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +/********************************************************************* + * + * Validate hello from the Director + * + * Returns: true if Hello is good. + * false if Hello is bad. + */ +bool validate_dir_hello(JCR *jcr) +{ + POOLMEM *dirname; + DIRRES *director = NULL; + int dir_version = 0; + BSOCK *dir = jcr->dir_bsock; + bool auth_success = false; + + if (dir->msglen < 25 || dir->msglen > 500) { + Dmsg2(dbglvl, "Bad Hello command from Director at %s. Len=%d.\n", + dir->who(), dir->msglen); + Jmsg2(jcr, M_FATAL, 0, _("Bad Hello command from Director at %s. Len=%d.\n"), + dir->who(), dir->msglen); + return false; + } + dirname = get_pool_memory(PM_MESSAGE); + dirname = check_pool_memory_size(dirname, dir->msglen); + + if (sscanf(dir->msg, "Hello Director %127s calling %d", dirname, &dir_version) != 2 && + sscanf(dir->msg, "Hello Director %127s calling", dirname) != 1 && + sscanf(dir->msg, "Hello %127s calling %d", dirname, &dir_version) != 2 ) { + char addr[64]; + char *who = dir->get_peer(addr, sizeof(addr)) ? dir->who() : addr; + dir->msg[100] = 0; + Dmsg2(dbglvl, "Bad Hello command from Director at %s: %s\n", + dir->who(), dir->msg); + Jmsg2(jcr, M_FATAL, 0, _("Bad Hello command from Director at %s: %s\n"), + who, dir->msg); + goto auth_fatal; + } + if (dir_version >= 1 && me->comm_compression) { + dir->set_compress(); + } else { + dir->clear_compress(); + Dmsg0(050, "*** No FD compression to DIR\n"); + } + unbash_spaces(dirname); + foreach_res(director, R_DIRECTOR) { + if (strcmp(director->hdr.name, dirname) == 0) + break; + } + if (!director) { + char addr[64]; + char *who = dir->get_peer(addr, sizeof(addr)) ? dir->who() : addr; + Jmsg2(jcr, M_FATAL, 0, _("Connection from unknown Director %s at %s rejected.\n"), + dirname, who); + goto auth_fatal; + } + auth_success = true; + +auth_fatal: + free_pool_memory(dirname); + jcr->director = director; + /* Single thread all failures to avoid DOS */ + if (!auth_success) { + P(mutex); + bmicrosleep(6, 0); + V(mutex); + } + return auth_success; +} + +/* + * Note, we handle the initial connection request here. + * We only get the jobname and the SD version, then we + * return, authentication will be done when the Director + * sends the storage command -- as is usually the case. + * This should be called only once by the SD. + */ +void *handle_storage_connection(BSOCK *sd) +{ + char job_name[500]; + char tbuf[150]; + int sd_version = 0; + JCR *jcr; + + if (sscanf(sd->msg, "Hello FD: Bacula Storage calling Start Job %127s %d", + job_name, &sd_version) != 2) { + Jmsg(NULL, M_FATAL, 0, _("SD connect failed: Bad Hello command\n")); + return NULL; + } + Dmsg1(110, "Got a SD connection at %s\n", bstrftimes(tbuf, sizeof(tbuf), + (utime_t)time(NULL))); + Dmsg1(50, "%s", sd->msg); + + if (!(jcr=get_jcr_by_full_name(job_name))) { + Jmsg1(NULL, M_FATAL, 0, _("SD connect failed: Job name not found: %s\n"), job_name); + Dmsg1(3, "**** Job \"%s\" not found.\n", job_name); + sd->destroy(); + return NULL; + } + set_jcr_in_tsd(jcr); + Dmsg1(150, "Found Job %s\n", job_name); + + jcr->lock_auth(); + /* We already have a socket connected, just discard it */ + if (jcr->sd_calls_client_bsock) { + Qmsg1(jcr, M_WARNING, 0, _("SD \"%s\" tried to connect two times.\n"), sd->who()); + free_bsock(sd); + /* will exit just after the unlock() */ + + } else { + /* If we have a previous socket in store_bsock, we are in multi restore mode */ + jcr->sd_calls_client_bsock = sd; + sd->set_jcr(jcr); + } + jcr->unlock_auth(); + + if (!sd) { /* freed by free_bsock(), connection already done */ + free_jcr(jcr); + return NULL; + } + + /* Turn on compression for newer FDs */ + if (sd_version >= 1 && me->comm_compression) { + sd->set_compress(); /* set compression allowed */ + } else { + sd->clear_compress(); + Dmsg2(050, "******** No FD compression to SD. sd_ver=%d compres=%d\n", + sd_version, me->comm_compression); + } + + if (!jcr->max_bandwidth) { + if (jcr->director->max_bandwidth_per_job) { + jcr->max_bandwidth = jcr->director->max_bandwidth_per_job; + + } else if (me->max_bandwidth_per_job) { + jcr->max_bandwidth = me->max_bandwidth_per_job; + } + } + sd->set_bwlimit(jcr->max_bandwidth); + + Dmsg1(200, "sd_version=%ld\n", sd_version); + + pthread_cond_signal(&jcr->job_start_wait); /* wake waiting job */ + free_jcr(jcr); + return NULL; +} + + +/* + * Send Hello OK to DIR + */ +bool send_hello_ok(BSOCK *bs) +{ + return bs->fsend(hello_dir, FD_VERSION); +} + +bool send_sorry(BSOCK *bs) +{ + return bs->fsend(sorry_dir); +} + +/* + * Send Hello to SD + */ +bool send_hello_sd(JCR *jcr, char *Job) +{ + bool rtn; + BSOCK *sd = jcr->store_bsock; + + bash_spaces(Job); + rtn = sd->fsend(hello_sd, Job, FD_VERSION); + unbash_spaces(Job); + Dmsg1(100, "Send to SD: %s\n", sd->msg); + return rtn; +} + +/* ======================== */ + +bool send_fdcaps(JCR *jcr, BSOCK *sd) { return false; } +bool recv_sdcaps(JCR *jcr) { return false; } + +/* Commands sent to Director */ +static char hello[] = "Hello %s calling %d\n"; + +/* Response from Director */ +static char DirOKhello[] = "1000 OK: %d"; +#define UA_VERSION 1 + +BSOCK *connect_director(JCR *jcr, CONSRES *dir) +{ + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + bool tls_authenticate; + int compatible = true; + int dir_version = 0; + char bashed_name[MAX_NAME_LENGTH]; + char *password; + TLS_CONTEXT *tls_ctx = NULL; + BSOCK *UA_sock = NULL; + int heart_beat; + + if (!dir) { + return 0; + } + + Dmsg2(0, "Connecting to Director %s:%d\n", dir->address, dir->DIRport); + + if (dir) { + heart_beat = dir->heartbeat_interval; + } else { + heart_beat = 0; + } + UA_sock = new_bsock(); + if (!UA_sock->connect(NULL, 5, 15, heart_beat, "Director daemon", dir->address, + NULL, dir->DIRport, 0)) { + free_bsock(UA_sock); + return NULL; + } + + /* + * Send my name to the Director then do authentication + */ + bstrncpy(bashed_name, dir->hdr.name, sizeof(bashed_name)); + bash_spaces(bashed_name); + password = dir->password; + /* TLS Requirement */ + if (dir->tls_enable) { + if (dir->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + if (dir->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + tls_authenticate = dir->tls_authenticate; + tls_ctx = dir->tls_ctx; + + /* Timeout Hello after 15 secs */ + btimer_t *tid = start_bsock_timer(UA_sock, 15); + UA_sock->fsend(hello, bashed_name, UA_VERSION); + + if (!cram_md5_respond(UA_sock, password, &tls_remote_need, &compatible) || + !cram_md5_challenge(UA_sock, password, tls_local_need, compatible)) { + goto bail_out; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Mmsg(jcr->errmsg, _("Authorization problem:" + " Remote server did not advertise required TLS support.\n")); + goto bail_out; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Mmsg(jcr->errmsg, _("Authorization problem:" + " Remote server requires TLS.\n")); + goto bail_out; + } + + /* Is TLS Enabled? */ + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_client(tls_ctx, UA_sock, NULL)) { + Mmsg(jcr->errmsg, _("TLS negotiation failed\n")); + goto bail_out; + } + if (tls_authenticate) { /* Authenticate only? */ + UA_sock->free_tls(); /* yes, shutdown tls */ + } + } + + /* + * It's possible that the TLS connection will + * be dropped here if an invalid client certificate was presented + */ + Dmsg1(6, ">dird: %s", UA_sock->msg); + if (UA_sock->recv() <= 0) { + Mmsg(jcr->errmsg, _("Bad response to Hello command: ERR=%s\n"), + UA_sock->bstrerror()); + goto bail_out; + } + + Dmsg1(10, "msg); + if (strncmp(UA_sock->msg, DirOKhello, sizeof(DirOKhello)-3) == 0) { + sscanf(UA_sock->msg, DirOKhello, &dir_version); + Dmsg1(0, "%s\n", UA_sock->msg); + + } else { + Mmsg(jcr->errmsg, _("Director rejected Hello command\n")); + goto bail_out; + } + /* Turn on compression for newer Directors */ + if (dir_version >= 1 && (!dir || dir->comm_compression)) { + UA_sock->set_compress(); + } else { + UA_sock->clear_compress(); + } + stop_bsock_timer(tid); + return UA_sock; + +bail_out: + free_bsock(UA_sock); + stop_bsock_timer(tid); + Mmsg(jcr->errmsg, + ( _("Director authorization problem.\n" + "Most likely the passwords do not agree.\n" + "If you are using TLS, there may have been a certificate validation error during the TLS handshake.\n" + "For help, please see " MANUAL_AUTH_URL "\n"))); + return NULL; +} diff --git a/src/filed/job.c b/src/filed/job.c new file mode 100644 index 00000000..8de8e3d6 --- /dev/null +++ b/src/filed/job.c @@ -0,0 +1,3020 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon Job processing + * + * Written by Kern Sibbald, October MM + */ + +#include "bacula.h" +#include "filed.h" +#include "ch.h" +#ifdef WIN32_VSS +#include "vss.h" +static pthread_mutex_t vss_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + +/* Globals */ +bool win32decomp = false; +bool no_win32_write_errors = false; + +/* Static variables */ +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +#ifdef HAVE_WIN32 +const bool have_win32 = true; +#else +const bool have_win32 = false; +#endif + +#ifdef HAVE_ACL +const bool have_acl = true; +#else +const bool have_acl = false; +#endif + +#if HAVE_XATTR +const bool have_xattr = true; +#else +const bool have_xattr = false; +#endif + +extern CLIENT *me; /* our client resource */ + +/* Imported functions */ +extern int status_cmd(JCR *jcr); +extern int qstatus_cmd(JCR *jcr); +extern int accurate_cmd(JCR *jcr); + +/* Forward referenced functions */ +static int backup_cmd(JCR *jcr); +static int component_cmd(JCR *jcr); +static int cancel_cmd(JCR *jcr); +static int setdebug_cmd(JCR *jcr); +static int setbandwidth_cmd(JCR *jcr); +static int estimate_cmd(JCR *jcr); +static int hello_cmd(JCR *jcr); +static int job_cmd(JCR *jcr); +static int fileset_cmd(JCR *jcr); +static int level_cmd(JCR *jcr); +static int verify_cmd(JCR *jcr); +static int restore_cmd(JCR *jcr); +static int end_restore_cmd(JCR *jcr); +static int storage_cmd(JCR *jcr); +static int session_cmd(JCR *jcr); +static int response(JCR *jcr, BSOCK *sd, char *resp, const char *cmd); +static void filed_free_jcr(JCR *jcr); +static int open_sd_read_session(JCR *jcr); +static int runscript_cmd(JCR *jcr); +static int runbefore_cmd(JCR *jcr); +static int runafter_cmd(JCR *jcr); +static int runbeforenow_cmd(JCR *jcr); +static int restore_object_cmd(JCR *jcr); +static int set_options(findFOPTS *fo, const char *opts); +static void set_storage_auth_key(JCR *jcr, char *key); +static int sm_dump_cmd(JCR *jcr); +static int proxy_cmd(JCR *jcr); +static int fd_testnetwork_cmd(JCR *jcr); +#ifdef DEVELOPER +static int exit_cmd(JCR *jcr); +#endif + +/* Exported functions */ + +#define ACCESS_MONITOR 1 +#define ACCESS_REMOTE 2 + +/* + * The following are the recognized commands from the Director. + */ +struct s_cmds cmds[] = { + {"backup", backup_cmd, 0}, + {"cancel", cancel_cmd, ACCESS_REMOTE}, + {"setdebug=", setdebug_cmd, 0}, + {"setbandwidth=",setbandwidth_cmd, ACCESS_REMOTE}, + {"snapshot", snapshot_cmd, 0}, + {"estimate", estimate_cmd, 0}, + {"Hello", hello_cmd, 1}, + {"fileset", fileset_cmd, 0}, + {"JobId=", job_cmd, 0}, + {"level = ", level_cmd, 0}, + {"restore ", restore_cmd, 0}, + {"endrestore", end_restore_cmd, 0}, + {"session", session_cmd, 0}, + {"status", status_cmd, ACCESS_MONITOR|ACCESS_REMOTE}, + {".status", qstatus_cmd, ACCESS_MONITOR|ACCESS_REMOTE}, + {"storage ", storage_cmd, 0}, + {"verify", verify_cmd, 0}, + {"component", component_cmd, 0}, + {"RunBeforeNow", runbeforenow_cmd, 0}, + {"RunBeforeJob", runbefore_cmd, 0}, + {"RunAfterJob", runafter_cmd, 0}, + {"Run", runscript_cmd, 0}, + {"accurate", accurate_cmd, 0}, + {"restoreobject", restore_object_cmd, 0}, + {"sm_dump", sm_dump_cmd, 0}, + {"stop", cancel_cmd, ACCESS_REMOTE}, + {"proxy", proxy_cmd, ACCESS_REMOTE}, + {"testnetwork", fd_testnetwork_cmd, 0}, +#ifdef DEVELOPER + {"exit", exit_cmd, 0}, +#endif + {NULL, NULL} /* list terminator */ +}; + +/* Commands received from director that need scanning */ +static char jobcmd[] = "JobId=%d Job=%127s SDid=%d SDtime=%d Authorization=%100s"; +static char storaddr[] = "storage address=%s port=%d ssl=%d Authorization=%100s"; +static char storaddr_v1[] = "storage address=%s port=%d ssl=%d"; +static char sessioncmd[] = "session %127s %ld %ld %ld %ld %ld %ld\n"; + +static char restorecmd1[] = "restore replace=%c prelinks=%d where=\n"; +static char restorefcmd1[] = "restore files=%d replace=%c prelinks=%d where=\n"; + +/* The following restore commands may have a big where=/regexwhere= parameter + * the bsscanf is limiting the default %s to 1000c. To allow more than 1000 bytes, + * we can specify %xxxxs where xxxx is the size expected in bytes. + * + * So, the code will add %s\n to the end of the following restore commands + */ +static char restorecmd[] = "restore replace=%c prelinks=%d where="; +static char restorecmdR[] = "restore replace=%c prelinks=%d regexwhere="; +static char restorefcmd[] = "restore files=%d replace=%c prelinks=%d where="; +static char restorefcmdR[] = "restore files=%d replace=%c prelinks=%d regexwhere="; + +static char restoreobjcmd[] = "restoreobject JobId=%u %d,%d,%d,%d,%d,%d,%s"; +static char restoreobjcmd1[] = "restoreobject JobId=%u %d,%d,%d,%d,%d,%d\n"; +static char endrestoreobjectcmd[] = "restoreobject end\n"; +static char verifycmd[] = "verify level=%30s"; +static char estimatecmd[] = "estimate listing=%d"; +static char runbefore[] = "RunBeforeJob %s"; +static char runafter[] = "RunAfterJob %s"; +static char runscript[] = "Run OnSuccess=%d OnFailure=%d AbortOnError=%d When=%d Command=%s"; +static char setbandwidth[]= "setbandwidth=%lld Job=%127s"; + +/* Responses sent to Director */ +static char errmsg[] = "2999 Invalid command\n"; +static char no_auth[] = "2998 No Authorization\n"; +static char invalid_cmd[] = "2997 Invalid command for a Director with Monitor directive enabled.\n"; +static char OKBandwidth[] = "2000 OK Bandwidth\n"; +static char OKinc[] = "2000 OK include\n"; +static char OKest[] = "2000 OK estimate files=%s bytes=%s\n"; +static char OKlevel[] = "2000 OK level\n"; +static char OKbackup[] = "2000 OK backup\n"; +static char OKverify[] = "2000 OK verify\n"; +static char OKrestore[] = "2000 OK restore\n"; +static char OKsession[] = "2000 OK session\n"; +static char OKstore[] = "2000 OK storage\n"; +static char OKstoreend[] = "2000 OK storage end\n"; +static char OKjob[] = "2000 OK Job %s (%s) %s,%s,%s"; +static char OKsetdebug[] = "2000 OK setdebug=%ld trace=%ld hangup=%ld" + " blowup=%ld options=%s tags=%s\n"; +static char BADjob[] = "2901 Bad Job\n"; +static char EndJob[] = "2800 End Job TermCode=%d JobFiles=%d ReadBytes=%lld" + " JobBytes=%lld Errors=%d VSS=%d Encrypt=%d" + " CommBytes=%lld CompressCommBytes=%lld\n"; +static char OKRunBefore[] = "2000 OK RunBefore\n"; +static char OKRunBeforeNow[] = "2000 OK RunBeforeNow\n"; +static char OKRunAfter[] = "2000 OK RunAfter\n"; +static char OKRunScript[] = "2000 OK RunScript\n"; +static char BADcmd[] = "2902 Bad %s\n"; +static char OKRestoreObject[] = "2000 OK ObjectRestored\n"; +static char OKComponentInfo[] = "2000 OK ComponentInfo\n"; + + +/* Responses received from Storage Daemon */ +static char OK_end[] = "3000 OK end\n"; +static char OK_close[] = "3000 OK close Status = %d\n"; +static char OK_open[] = "3000 OK open ticket = %d\n"; +static char OK_data[] = "3000 OK data\n"; +static char OK_append[] = "3000 OK append data\n"; + + +/* Commands sent to Storage Daemon */ +static char append_open[] = "append open session\n"; +static char append_data[] = "append data %d\n"; +static char append_end[] = "append end session %d\n"; +static char append_close[] = "append close session %d\n"; +static char read_open[] = "read open session = %s %ld %ld %ld %ld %ld %ld\n"; +static char read_data[] = "read data %d\n"; +static char read_close[] = "read close session %d\n"; +static char read_ctrl[] = "read control %d\n"; + +/* Should tell us if a command is authorized or not */ +static bool access_ok(struct s_cmds *cmd, DIRRES* dir) +{ + if ((cmd->access & ACCESS_MONITOR) && dir->monitor) { + return true; + } + if ((cmd->access & ACCESS_REMOTE) && dir->remote) { + return true; + } + if (!dir->remote && !dir->monitor) { + return true; + } + return false; +} + +/* + * Accept requests from a Director + * + * NOTE! We are running as a separate thread + * + * Send output one line + * at a time followed by a zero length transmission. + * + * Return when the connection is terminated or there + * is an error. + * + * Basic task here is: + * Authenticate Director (during Hello command). + * Accept commands one at a time from the Director + * and execute them. + * + * Concerning ClientRunBefore/After, the sequence of events + * is rather critical. If they are not done in the right + * order one can easily get FD->SD timeouts if the script + * runs a long time. + * + * The current sequence of events is: + * 1. Dir starts job with FD + * 2. Dir connects to SD + * 3. Dir connects to FD + * 4. FD connects to SD + * 5. FD gets/runs ClientRunBeforeJob and sends ClientRunAfterJob + * 6. Dir sends include/exclude + * 7. FD sends the file data to SD + * 8. SD/FD disconnects while the SD despools data and attributes (optional) + * 9. FD runs ClientRunAfterJob + */ + +static void *handle_director_request(BSOCK *dir) +{ + int i; + bool found, quit; + bool first = true; + JCR *jcr; + const char jobname[12] = "*Director*"; + + prevent_os_suspensions(); /* do not suspend during backup/restore */ + jcr = new_jcr(sizeof(JCR), filed_free_jcr); /* create JCR */ + jcr->sd_calls_client_bsock = NULL; + jcr->sd_calls_client = false; + jcr->dir_bsock = dir; + jcr->ff = init_find_files(); + jcr->start_time = time(NULL); + jcr->RunScripts = New(alist(10, not_owned_by_alist)); + jcr->last_fname = get_pool_memory(PM_FNAME); + jcr->last_fname[0] = 0; + jcr->client_name = get_memory(strlen(my_name) + 1); + pm_strcpy(jcr->client_name, my_name); + bstrncpy(jcr->Job, jobname, sizeof(jobname)); /* dummy */ + jcr->crypto.pki_sign = me->pki_sign; + jcr->crypto.pki_encrypt = me->pki_encrypt; + jcr->crypto.pki_keypair = me->pki_keypair; + jcr->crypto.pki_signers = me->pki_signers; + jcr->crypto.pki_recipients = me->pki_recipients; + + dir->set_jcr(jcr); + jcr->set_killable(true); /* allow dir to kill/cancel job */ + /* Initialize SD start condition variable */ + int errstat = pthread_cond_init(&jcr->job_start_wait, NULL); + if (errstat != 0) { + berrno be; + Jmsg1(jcr, M_FATAL, 0, _("Unable to init job cond variable: ERR=%s\n"), be.bstrerror(errstat)); + goto bail_out; + } + enable_backup_privileges(NULL, 1 /* ignore_errors */); + + for (quit=false; !quit;) { + if (!first) { /* first call the read is done */ + /* Read command */ + if (dir->recv() < 0) { + break; /* connection terminated */ + } + } + if (dir->msglen == 0) { /* Bad connection */ + break; + } + first = false; + dir->msg[dir->msglen] = 0; + Dmsg1(100, "msg); + found = false; + for (i=0; cmds[i].cmd; i++) { + if (strncmp(cmds[i].cmd, dir->msg, strlen(cmds[i].cmd)) == 0) { + found = true; /* indicate command found */ + if (!jcr->authenticated && cmds[i].func != hello_cmd) { + dir->fsend(no_auth); + dir->signal(BNET_EOD); + break; + } + if (jcr->authenticated && !access_ok(&cmds[i], jcr->director)) { + Dmsg1(100, "Command \"%s\" is invalid.\n", cmds[i].cmd); + dir->fsend(invalid_cmd); + dir->signal(BNET_EOD); + break; + } + if ((me->disabled_cmds_array && me->disabled_cmds_array[i]) || + (jcr->director && jcr->director->disabled_cmds_array && + jcr->director->disabled_cmds_array[i])) { + Jmsg(jcr, M_FATAL, 0, _("Command: \"%s\" is disabled.\n"), cmds[i].cmd); + quit = true; + break; + } + Dmsg2(100, "Executing %s Dir %s command.\n", cmds[i].cmd, dir->msg); + if (!cmds[i].func(jcr)) { /* do command */ + quit = true; /* error or fully terminated, get out */ + Dmsg1(100, "Quit command loop. Canceled=%d\n", job_canceled(jcr)); + } + break; + } + } + if (!found) { /* command not found */ + dir->fsend(errmsg); + quit = true; + break; + } + } + + /* Inform Storage daemon that we are done */ + if (jcr->store_bsock) { + jcr->store_bsock->signal(BNET_TERMINATE); + } + + /* Run the after job */ + run_scripts(jcr, jcr->RunScripts, "ClientAfterJob"); + + /* send any queued messages before reporting the jobstatus to the director */ + dequeue_messages(jcr); + + if (jcr->JobId) { /* send EndJob if running a job */ + uint64_t CommBytes, CommCompressedBytes; + uint32_t vss, encrypt; + /* Send termination status back to Dir */ + if (jcr->store_bsock) { + CommBytes = jcr->store_bsock->CommBytes(); + CommCompressedBytes = jcr->store_bsock->CommCompressedBytes(); + } else { + CommBytes = CommCompressedBytes = 0; + } + encrypt = jcr->crypto.pki_encrypt; + vss = jcr->Snapshot; + dir->fsend(EndJob, jcr->JobStatus, jcr->JobFiles, + jcr->ReadBytes, jcr->JobBytes, jcr->JobErrors, vss, + encrypt, CommBytes, CommCompressedBytes); + //Dmsg0(0, dir->msg); + } + + generate_daemon_event(jcr, "JobEnd"); + generate_plugin_event(jcr, bEventJobEnd); + +bail_out: + dequeue_messages(jcr); /* send any queued messages, will no longer impact + * the job status... */ + dequeue_daemon_messages(jcr); + + /* Inform Director that we are done */ + dir->signal(BNET_TERMINATE); + + free_and_null_pool_memory(jcr->job_metadata); + + /* Clean up fileset */ + FF_PKT *ff = jcr->ff; + findFILESET *fileset = ff->fileset; + if (fileset) { + int i, j, k; + /* Delete FileSet Include lists */ + for (i=0; iinclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); + for (j=0; jopts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + if (fo->plugin) { + free(fo->plugin); + } + for (k=0; kregex.size(); k++) { + regfree((regex_t *)fo->regex.get(k)); + } + for (k=0; kregexdir.size(); k++) { + regfree((regex_t *)fo->regexdir.get(k)); + } + for (k=0; kregexfile.size(); k++) { + regfree((regex_t *)fo->regexfile.get(k)); + } + fo->regex.destroy(); + fo->regexdir.destroy(); + fo->regexfile.destroy(); + fo->wild.destroy(); + fo->wilddir.destroy(); + fo->wildfile.destroy(); + fo->wildbase.destroy(); + fo->base.destroy(); + fo->fstype.destroy(); + fo->drivetype.destroy(); + } + incexe->opts_list.destroy(); + incexe->name_list.destroy(); + incexe->plugin_list.destroy(); + if (incexe->ignoredir) { + free(incexe->ignoredir); + } + } + fileset->include_list.destroy(); + + /* Delete FileSet Exclude lists */ + for (i=0; iexclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->exclude_list.get(i); + for (j=0; jopts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + fo->regex.destroy(); + fo->regexdir.destroy(); + fo->regexfile.destroy(); + fo->wild.destroy(); + fo->wilddir.destroy(); + fo->wildfile.destroy(); + fo->wildbase.destroy(); + fo->base.destroy(); + fo->fstype.destroy(); + fo->drivetype.destroy(); + } + incexe->opts_list.destroy(); + incexe->name_list.destroy(); + incexe->plugin_list.destroy(); + if (incexe->ignoredir) { + free(incexe->ignoredir); + } + } + fileset->exclude_list.destroy(); + free(fileset); + } + ff->fileset = NULL; + ff->mount_points.destroy(); + Dmsg0(100, "Calling term_find_files\n"); + term_find_files(jcr->ff); + jcr->ff = NULL; + Dmsg0(100, "Done with term_find_files\n"); + pthread_cond_destroy(&jcr->job_start_wait); + free_jcr(jcr); /* destroy JCR record */ + Dmsg0(100, "Done with free_jcr\n"); + allow_os_suspensions(); /* FD can now be suspended */ + Dsm_check(100); + garbage_collect_memory_pool(); + return NULL; +} + + +/* + * Accept requests from a Director or a Storage daemon + */ +void *handle_connection_request(void *caller) +{ + BSOCK *bs = (BSOCK *)caller; + + if (bs->recv() > 0) { + if (strncmp(bs->msg, "Ping", 4) == 0) { + bs->fsend("2000 Ping OK\n"); + bs->destroy(); + return NULL; + } + if (bs->msglen < 25 || bs->msglen > 500) { + goto bail_out; + } + if (strncmp(bs->msg, "Hello FD: Bacula Storage", 20) ==0) { + return handle_storage_connection(bs); + } + if (strncmp(bs->msg, "Hello ", 5) == 0) { + return handle_director_request(bs); + } + } +bail_out: + Dmsg2(100, "Bad command from %s. Len=%d.\n", bs->who(), bs->msglen); + char addr[64]; + char *who = bs->get_peer(addr, sizeof(addr)) ? bs->who() : addr; + Qmsg2(NULL, M_SECURITY, 0, _("FD expecting Hello got bad command from %s. Len=%d.\n"), who, bs->msglen); + sleep(5); + bs->destroy(); + return NULL; +} + + +/* + * Test the Network between FD/SD + */ +static int fd_testnetwork_cmd(JCR *jcr) +{ + bool can_compress, ok=true; + BSOCK *sd = jcr->store_bsock; + int64_t nb=0, nb2=0; + char ed1[50]; + btime_t start, end; + + if (!sd || !jcr->dir_bsock) { + return 1; + } + if (sscanf(jcr->dir_bsock->msg, "testnetwork bytes=%lld", &nb) != 1 || nb <= 0) { + sd->fsend("2999 testnetwork command error\n"); + return 1; + } + + /* We disable the comline compression, else all numbers will be wrong */ + can_compress = sd->can_compress(); + + sd->fsend("testnetwork bytes=%lld\n", nb); + sd->clear_compress(); + + /* In the first step, we send X bytes to the SD */ + memset(sd->msg, 0xAA, sizeof_pool_memory(sd->msg)); + sd->msglen = sizeof_pool_memory(sd->msg); + + start = get_current_btime(); + for (nb2 = nb ; nb2 > 0 && ok ; nb2 -= sd->msglen) { + if (nb2 < sd->msglen) { + sd->msglen = nb2; + } + ok = sd->send(); + } + sd->signal(BNET_EOD); + end = get_current_btime() + 1; + + if (!ok) { + goto bail_out; + } + + jcr->dir_bsock->fsend("2000 OK bytes=%lld duration=%lldms write_speed=%sB/s\n", + nb, end/1000 - start/1000, + edit_uint64_with_suffix(nb * 1000000 / (end - start), ed1)); + + /* Now we receive X bytes from the SD */ + start = get_current_btime(); + for (nb2 = 0; sd->recv() > 0; nb2 += sd->msglen) { } + end = get_current_btime() + 1; + + jcr->dir_bsock->fsend("2000 OK bytes=%lld duration=%lldms read_speed=%sB/s\n", + nb2, end/1000 - start/1000, + edit_uint64_with_suffix(nb2 * 1000000 / (end - start), ed1)); + + jcr->dir_bsock->signal(BNET_CMD_OK); + +bail_out: + if (can_compress) { + sd->set_compress(); + } + if (!ok) { + jcr->dir_bsock->fsend("2999 network test failed ERR=%s\n", sd->errmsg); + jcr->dir_bsock->signal(BNET_CMD_FAILED); + } + + return 1; +} + +static int proxy_cmd(JCR *jcr) +{ + bool OK=true, fdcalled = false; + BSOCK *cons_bsock; + CONSRES *cons = jcr->director->console; + int v, maxfd; + fd_set fdset; + struct timeval tv; + + if (!cons) { + cons = (CONSRES *)GetNextRes(R_CONSOLE, NULL); + } + /* Here, dir_bsock is not really the director, this is a console */ + cons_bsock = connect_director(jcr, cons); + if (!cons_bsock) { + jcr->dir_bsock->signal(BNET_ERROR_MSG); + jcr->dir_bsock->fsend("2999 proxy error. ERR=%s\n", jcr->errmsg); + jcr->dir_bsock->signal(BNET_MAIN_PROMPT); + /* Error during the connect */ + return 1; + } + + /* Inform the console that the command is OK */ + jcr->dir_bsock->fsend("2000 proxy OK.\n"); + jcr->dir_bsock->signal(BNET_MAIN_PROMPT); + + maxfd = MAX(cons_bsock->m_fd, jcr->dir_bsock->m_fd) + 1; + + /* Start to forward events from one to the other + * It can be done with 2 threads, or with a select + */ + do { + FD_ZERO(&fdset); + FD_SET((unsigned)cons_bsock->m_fd, &fdset); + FD_SET((unsigned)jcr->dir_bsock->m_fd, &fdset); + + tv.tv_sec = 5; + tv.tv_usec = 0; + switch ((v = select(maxfd, &fdset, NULL, NULL, &tv))) { + case 0: /* timeout */ + OK = !jcr->is_canceled(); + break; + case -1: + Dmsg1(0, "Bad call to select ERR=%d\n", errno); + OK = false; + default: +#ifdef HAVE_TLS + if (cons_bsock->tls && !tls_bsock_probe(cons_bsock)) { + /* maybe a session key negotiation waked up the socket */ + FD_CLR(cons_bsock->m_fd, &fdset); + } + if (jcr->dir_bsock->tls && !tls_bsock_probe(jcr->dir_bsock)) { + /* maybe a session key negotiation waked up the socket */ + FD_CLR(jcr->dir_bsock->m_fd, &fdset); + } +#endif + break; + } + Dmsg1(DT_NETWORK, "select = %d\n", v); + if (OK) { + if (FD_ISSET(cons_bsock->m_fd, &fdset)) { + v = cons_bsock->recv(); + if (v == BNET_SIGNAL) { + if (cons_bsock->msglen == BNET_FDCALLED) { + OK = false; + fdcalled = true; + } else { + jcr->dir_bsock->signal(cons_bsock->msglen); + } + + } else if (v >= 0) { + jcr->dir_bsock->fsend("%s", cons_bsock->msg); + + } else { + /* We should not have such kind of message */ + OK = false; + } + } + if (FD_ISSET(jcr->dir_bsock->m_fd, &fdset)) { + v = jcr->dir_bsock->recv(); + if (v == BNET_SIGNAL) { + cons_bsock->signal(jcr->dir_bsock->msglen); + } else if (v >= 0) { + cons_bsock->fsend("%s", jcr->dir_bsock->msg); + } else { + /* We should not have such kind of message */ + OK = false; + } + } + } + if (cons_bsock->is_error() || jcr->dir_bsock->is_error()) { + OK = false; + } + } while (OK && !jcr->is_canceled()); + + /* Close the socket, nothing more will come */ + jcr->dir_bsock->signal(BNET_TERMINATE); + jcr->dir_bsock->close(); + if (fdcalled) { + handle_connection_request(cons_bsock); /* will release the socket */ + } else { + free_bsock(cons_bsock); + } + return 1; +} + +static int sm_dump_cmd(JCR *jcr) +{ + close_memory_pool(); + sm_dump(false, true); + jcr->dir_bsock->fsend("2000 sm_dump OK\n"); + return 1; +} + +#ifdef DEVELOPER +static int exit_cmd(JCR *jcr) +{ + jcr->dir_bsock->fsend("2000 exit OK\n"); + terminate_filed(0); + return 0; +} +#endif + +/* + * Hello from Director he must identify himself and provide his + * password. + */ +static int hello_cmd(JCR *jcr) +{ + Dmsg0(120, "Calling Authenticate\n"); + if (!validate_dir_hello(jcr)) { + return 0; + } + if (!authenticate_director(jcr)) { + return 0; + } + Dmsg0(120, "OK Authenticate\n"); + jcr->authenticated = true; + + dequeue_messages(jcr); /* dequeue any daemon messages */ + return 1; +} + +/* + * Cancel a Job + */ +static int cancel_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + char Job[MAX_NAME_LENGTH]; + JCR *cjcr; + int status; + const char *reason; + + if (sscanf(dir->msg, "cancel Job=%127s", Job) == 1) { + status = JS_Canceled; + reason = "canceled"; + } else if (sscanf(dir->msg, "stop Job=%127s", Job) == 1) { + status = JS_Incomplete; + reason = "stopped"; + } else { + dir->fsend(_("2902 Error scanning cancel command.\n")); + goto bail_out; + } + if (!(cjcr=get_jcr_by_full_name(Job))) { + dir->fsend(_("2901 Job %s not found.\n"), Job); + } else { + generate_plugin_event(cjcr, bEventCancelCommand, NULL); + cjcr->setJobStatus(status); + if (cjcr->store_bsock) { + cjcr->store_bsock->cancel(); + } + cjcr->my_thread_send_signal(TIMEOUT_SIGNAL); + free_jcr(cjcr); + dir->fsend(_("2001 Job \"%s\" marked to be %s.\n"), + Job, reason); + } + +bail_out: + dir->signal(BNET_EOD); + return 1; +} + +/** + * Set bandwidth limit as requested by the Director + * + */ +static int setbandwidth_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + int64_t bw=0; + JCR *cjcr; + char Job[MAX_NAME_LENGTH]; + *Job=0; + + if (sscanf(dir->msg, setbandwidth, &bw, Job) != 2 || bw < 0) { + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("2991 Bad setbandwidth command: %s\n"), jcr->errmsg); + return 0; + } + + if (*Job) { + if(!(cjcr=get_jcr_by_full_name(Job))) { + dir->fsend(_("2901 Job %s not found.\n"), Job); + } else { + cjcr->max_bandwidth = bw; + if (cjcr->store_bsock) { + cjcr->store_bsock->set_bwlimit(bw); + } + free_jcr(cjcr); + } + + } else { /* No job requested, apply globally */ + me->max_bandwidth_per_job = bw; /* Overwrite directive */ + foreach_jcr(cjcr) { + cjcr->max_bandwidth = bw; + if (cjcr->store_bsock) { + cjcr->store_bsock->set_bwlimit(bw); + } + } + endeach_jcr(cjcr); + } + + return dir->fsend(OKBandwidth); +} + +/** + * Set debug level as requested by the Director + * + */ +static int setdebug_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + int32_t trace, lvl; + int32_t hangup = -1; + int32_t blowup = -1; + int64_t level, level_tags = 0; + int scan; + char options[60]; + char tags[512]; + + Dmsg1(50, "setdebug_cmd: %s", dir->msg); + tags[0] = options[0] = 0; + scan = sscanf(dir->msg, "setdebug=%ld trace=%ld hangup=%ld blowup=%ld" + " options=%55s tags=%511s", + &lvl, &trace, &hangup, &blowup, options, tags); + if (scan != 6) { + scan = sscanf(dir->msg, "setdebug=%ld trace=%ld hangup=%ld", + &lvl, &trace, &hangup); + if (scan != 3) { + Dmsg2(20, "sscanf failed: msg=%s scan=%d\n", dir->msg, scan); + if (sscanf(dir->msg, "setdebug=%ld trace=%ld", &lvl, &trace) != 2) { + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("2991 Bad setdebug command: %s\n"), jcr->errmsg); + return 0; + } else { + hangup = -1; + } + } + } + level = lvl; + set_trace(trace); + set_hangup(hangup); + set_blowup(blowup); + if (!debug_parse_tags(tags, &level_tags)) { + *tags = 0; + } + if (level >= 0) { + debug_level = level; + } + debug_level_tags = level_tags; + + /* Parse specific FD options */ + for (char *p = options; *p ; p++) { + switch(*p) { + case 'i': + /* Turn on/off ignore bwrite() errors on restore */ + no_win32_write_errors = true; + break; + case 'd': + /* Turn on/off decomp of BackupRead() streams */ + win32decomp = true; + break; + } + } + + /* handle other options */ + set_debug_flags(options); + + Dmsg6(150, "level=%ld trace=%ld hangup=%ld blowup=%d options=%s tags=%s\n", + lvl, get_trace(), get_hangup(), get_blowup(), options, tags); + return dir->fsend(OKsetdebug, lvl, get_trace(), get_hangup(), + get_blowup(), options, tags); +} + + +static int estimate_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + char ed1[50], ed2[50]; + + if (sscanf(dir->msg, estimatecmd, &jcr->listing) != 1) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg(jcr, M_FATAL, 0, _("Bad estimate command: %s"), jcr->errmsg); + dir->fsend(_("2992 Bad estimate command.\n")); + return 0; + } + make_estimate(jcr); + dir->fsend(OKest, edit_uint64_with_commas(jcr->num_files_examined, ed1), + edit_uint64_with_commas(jcr->JobBytes, ed2)); + dir->signal(BNET_EOD); + return 1; +} + +/** + * Get JobId and Storage Daemon Authorization key from Director + */ +static int job_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + POOL_MEM sd_auth_key(PM_MESSAGE); + sd_auth_key.check_size(dir->msglen); + + if (sscanf(dir->msg, jobcmd, &jcr->JobId, jcr->Job, + &jcr->VolSessionId, &jcr->VolSessionTime, + sd_auth_key.c_str()) != 5) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg(jcr, M_FATAL, 0, _("Bad Job Command: %s"), jcr->errmsg); + dir->fsend(BADjob); + return 0; + } + set_jcr_in_tsd(jcr); + set_storage_auth_key(jcr, sd_auth_key.c_str()); + Dmsg2(120, "JobId=%d Auth=%s\n", jcr->JobId, jcr->sd_auth_key); + Mmsg(jcr->errmsg, "JobId=%d Job=%s", jcr->JobId, jcr->Job); + new_plugins(jcr); /* instantiate plugins for this jcr */ + generate_plugin_event(jcr, bEventJobStart, (void *)jcr->errmsg); +#ifdef HAVE_WIN32 + return dir->fsend(OKjob, VERSION, LSMDATE, win_os, DISTNAME, DISTVER); +#else + return dir->fsend(OKjob, VERSION, LSMDATE, HOST_OS, DISTNAME, DISTVER); +#endif +} + +extern "C" char *job_code_callback_filed(JCR *jcr, const char* param, char *buf, int buflen) +{ + switch (param[0]) { + case 'D': + if (jcr->director) { + return jcr->director->hdr.name; + } + break; + case 'S': + return jcr->PrevJob; + } + return NULL; + +} + +static int runbefore_cmd(JCR *jcr) +{ + bool ok; + BSOCK *dir = jcr->dir_bsock; + POOLMEM *cmd = get_memory(dir->msglen+1); + RUNSCRIPT *script; + + Dmsg1(100, "runbefore_cmd: %s", dir->msg); + if (sscanf(dir->msg, runbefore, cmd) != 1) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg1(jcr, M_FATAL, 0, _("Bad RunBeforeJob command: %s\n"), jcr->errmsg); + dir->fsend(_("2905 Bad RunBeforeJob command.\n")); + free_memory(cmd); + return 0; + } + unbash_spaces(cmd); + + /* Run the command now */ + script = new_runscript(); + script->set_job_code_callback(job_code_callback_filed); + script->set_command(cmd); + script->when = SCRIPT_Before; + ok = script->run(jcr, "ClientRunBeforeJob"); + free_runscript(script); + + free_memory(cmd); + if (ok) { + dir->fsend(OKRunBefore); + return 1; + } else { + dir->fsend(_("2905 Bad RunBeforeJob command.\n")); + return 0; + } +} + +static int runbeforenow_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + + run_scripts(jcr, jcr->RunScripts, "ClientBeforeJob"); + if (job_canceled(jcr)) { + dir->fsend(_("2905 Bad RunBeforeNow command.\n")); + Dmsg0(100, "Back from run_scripts ClientBeforeJob now: FAILED\n"); + return 0; + } else { + dir->fsend(OKRunBeforeNow); + Dmsg0(100, "Back from run_scripts ClientBeforeJob now: OK\n"); + return 1; + } +} + +static int runafter_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + POOLMEM *msg = get_memory(dir->msglen+1); + RUNSCRIPT *cmd; + + Dmsg1(100, "runafter_cmd: %s", dir->msg); + if (sscanf(dir->msg, runafter, msg) != 1) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg1(jcr, M_FATAL, 0, _("Bad RunAfter command: %s\n"), jcr->errmsg); + dir->fsend(_("2905 Bad RunAfterJob command.\n")); + free_memory(msg); + return 0; + } + unbash_spaces(msg); + + cmd = new_runscript(); + cmd->set_job_code_callback(job_code_callback_filed); + cmd->set_command(msg); + cmd->on_success = true; + cmd->on_failure = false; + cmd->when = SCRIPT_After; + + jcr->RunScripts->append(cmd); + + free_pool_memory(msg); + return dir->fsend(OKRunAfter); +} + +static int runscript_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + POOLMEM *msg = get_memory(dir->msglen+1); + int on_success, on_failure, fail_on_error; + + RUNSCRIPT *cmd = new_runscript() ; + cmd->set_job_code_callback(job_code_callback_filed); + + Dmsg1(100, "runscript_cmd: '%s'\n", dir->msg); + /* Note, we cannot sscanf into bools */ + if (sscanf(dir->msg, runscript, &on_success, + &on_failure, + &fail_on_error, + &cmd->when, + msg) != 5) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg1(jcr, M_FATAL, 0, _("Bad RunScript command: %s\n"), jcr->errmsg); + dir->fsend(_("2905 Bad RunScript command.\n")); + free_runscript(cmd); + free_memory(msg); + return 0; + } + cmd->on_success = on_success; + cmd->on_failure = on_failure; + cmd->fail_on_error = fail_on_error; + unbash_spaces(msg); + + cmd->set_command(msg); + cmd->debug(); + jcr->RunScripts->append(cmd); + + free_pool_memory(msg); + return dir->fsend(OKRunScript); +} + +/* + * This reads data sent from the Director from the + * RestoreObject table that allows us to get objects + * that were backed up (VSS .xml data) and are needed + * before starting the restore. + */ +static int restore_object_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + int32_t FileIndex; + restore_object_pkt rop; + + memset(&rop, 0, sizeof(rop)); + rop.pkt_size = sizeof(rop); + rop.pkt_end = sizeof(rop); + + Dmsg1(100, "Enter restoreobject_cmd: %s", dir->msg); + if (strcmp(dir->msg, endrestoreobjectcmd) == 0) { + Dmsg0(20, "Got endrestoreobject\n"); + generate_plugin_event(jcr, bEventRestoreObject, NULL); + return dir->fsend(OKRestoreObject); + } + + rop.plugin_name = (char *)malloc(dir->msglen); + *rop.plugin_name = 0; + + if (sscanf(dir->msg, restoreobjcmd, &rop.JobId, &rop.object_len, + &rop.object_full_len, &rop.object_index, + &rop.object_type, &rop.object_compression, &FileIndex, + rop.plugin_name) != 8) { + + /* Old version, no plugin_name */ + if (sscanf(dir->msg, restoreobjcmd1, &rop.JobId, &rop.object_len, + &rop.object_full_len, &rop.object_index, + &rop.object_type, &rop.object_compression, &FileIndex) != 7) { + Dmsg0(5, "Bad restore object command\n"); + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg1(jcr, M_FATAL, 0, _("Bad RestoreObject command: %s\n"), jcr->errmsg); + goto bail_out; + } + } + + unbash_spaces(rop.plugin_name); + + Dmsg7(100, "Recv object: JobId=%u objlen=%d full_len=%d objinx=%d objtype=%d " + "FI=%d plugin_name=%s\n", + rop.JobId, rop.object_len, rop.object_full_len, + rop.object_index, rop.object_type, FileIndex, rop.plugin_name); + /* Read Object name */ + if (dir->recv() < 0) { + goto bail_out; + } + Dmsg2(100, "Recv Oname object: len=%d Oname=%s\n", dir->msglen, dir->msg); + rop.object_name = bstrdup(dir->msg); + + /* Read Object */ + if (dir->recv() < 0) { + goto bail_out; + } + /* Transfer object from message buffer, and get new message buffer */ + rop.object = dir->msg; + dir->msg = get_pool_memory(PM_MESSAGE); + + /* If object is compressed, uncompress it */ + if (rop.object_compression == 1) { /* zlib level 9 */ + int stat; + int out_len = rop.object_full_len + 100; + POOLMEM *obj = get_memory(out_len); + Dmsg2(100, "Inflating from %d to %d\n", rop.object_len, rop.object_full_len); + stat = Zinflate(rop.object, rop.object_len, obj, out_len); + Dmsg1(100, "Zinflate stat=%d\n", stat); + if (out_len != rop.object_full_len) { + Jmsg3(jcr, M_ERROR, 0, ("Decompression failed. Len wanted=%d got=%d. Object=%s\n"), + rop.object_full_len, out_len, rop.object_name); + } + free_pool_memory(rop.object); /* release compressed object */ + rop.object = obj; /* new uncompressed object */ + rop.object_len = out_len; + } + Dmsg2(100, "Recv Object: len=%d Object=%s\n", rop.object_len, rop.object); + /* we still need to do this to detect a vss restore */ + if (strcmp(rop.object_name, "job_metadata.xml") == 0) { + Dmsg0(100, "got job metadata\n"); + jcr->got_metadata = true; + } + + generate_plugin_event(jcr, bEventRestoreObject, (void *)&rop); + + if (rop.object_name) { + free(rop.object_name); + } + if (rop.object) { + free_pool_memory(rop.object); + } + if (rop.plugin_name) { + free(rop.plugin_name); + } + + Dmsg1(100, "Send: %s", OKRestoreObject); + return 1; + +bail_out: + dir->fsend(_("2909 Bad RestoreObject command.\n")); + return 0; + +} + + +static bool init_fileset(JCR *jcr) +{ + FF_PKT *ff; + findFILESET *fileset; + + if (!jcr->ff) { + return false; + } + ff = jcr->ff; + if (ff->fileset) { + return false; + } + fileset = (findFILESET *)malloc(sizeof(findFILESET)); + memset(fileset, 0, sizeof(findFILESET)); + ff->fileset = fileset; + fileset->state = state_none; + fileset->include_list.init(1, true); + fileset->exclude_list.init(1, true); + return true; +} + +static void append_file(JCR *jcr, findINCEXE *incexe, + const char *buf, bool is_file) +{ + if (is_file) { +#ifdef HAVE_WIN32 + /* Special case for / under Win32, + * user is requesting to include all local drives + */ + if (strcmp(buf, "/") == 0) { + //list_drives(&incexe->name_list); + + } else { + incexe->name_list.append(new_dlistString(buf)); + } +#else + incexe->name_list.append(new_dlistString(buf)); +#endif /* HAVE_WIN32 */ + + } else if (me->plugin_directory) { + generate_plugin_event(jcr, bEventPluginCommand, (void *)buf); + incexe->plugin_list.append(new_dlistString(buf)); + } else { + Jmsg(jcr, M_FATAL, 0, + _("Plugin Directory not defined. Cannot use plugin: \"%s\"\n"), + buf); + } +} + +/** + * Add fname to include/exclude fileset list. First check for + * | and < and if necessary perform command. + */ +void add_file_to_fileset(JCR *jcr, const char *fname, bool is_file) +{ + findFILESET *fileset = jcr->ff->fileset; + char *p; + BPIPE *bpipe; + POOLMEM *fn; + FILE *ffd; + char buf[1000]; + int ch; + int stat; + + p = (char *)fname; + ch = (uint8_t)*p; + switch (ch) { + case '|': + p++; /* skip over | */ + fn = get_pool_memory(PM_FNAME); + fn = edit_job_codes(jcr, fn, p, "", job_code_callback_filed); + bpipe = open_bpipe(fn, 0, "r"); + if (!bpipe) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Cannot run program: %s. ERR=%s\n"), + p, be.bstrerror()); + free_pool_memory(fn); + return; + } + free_pool_memory(fn); + while (fgets(buf, sizeof(buf), bpipe->rfd)) { + strip_trailing_junk(buf); + append_file(jcr, fileset->incexe, buf, is_file); + } + if ((stat=close_bpipe(bpipe)) != 0) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Error running program: %s. stat=%d: ERR=%s\n"), + p, be.code(stat), be.bstrerror(stat)); + return; + } + break; + case '<': + Dmsg1(100, "Doing < of '%s' include on client.\n", p + 1); + p++; /* skip over < */ + if ((ffd = fopen(p, "rb")) == NULL) { + berrno be; + Jmsg(jcr, M_FATAL, 0, + _("Cannot open FileSet input file: %s. ERR=%s\n"), + p, be.bstrerror()); + return; + } + while (fgets(buf, sizeof(buf), ffd)) { + strip_trailing_junk(buf); + append_file(jcr, fileset->incexe, buf, is_file); + } + fclose(ffd); + break; + default: + append_file(jcr, fileset->incexe, fname, is_file); + break; + } +} + +findINCEXE *get_incexe(JCR *jcr) +{ + if (jcr->ff && jcr->ff->fileset) { + return jcr->ff->fileset->incexe; + } + return NULL; +} + +void set_incexe(JCR *jcr, findINCEXE *incexe) +{ + findFILESET *fileset = jcr->ff->fileset; + fileset->incexe = incexe; +} + + +/** + * Define a new Exclude block in the FileSet + */ +findINCEXE *new_exclude(JCR *jcr) +{ + findFILESET *fileset = jcr->ff->fileset; + + /* New exclude */ + fileset->incexe = (findINCEXE *)malloc(sizeof(findINCEXE)); + memset(fileset->incexe, 0, sizeof(findINCEXE)); + fileset->incexe->opts_list.init(1, true); + fileset->incexe->name_list.init(); + fileset->incexe->plugin_list.init(); + fileset->exclude_list.append(fileset->incexe); + return fileset->incexe; +} + +/** + * Define a new Include block in the FileSet + */ +findINCEXE *new_include(JCR *jcr) +{ + findFILESET *fileset = jcr->ff->fileset; + + /* New include */ + fileset->incexe = (findINCEXE *)malloc(sizeof(findINCEXE)); + memset(fileset->incexe, 0, sizeof(findINCEXE)); + fileset->incexe->opts_list.init(1, true); + fileset->incexe->name_list.init(); /* for dlist; was 1,true for alist */ + fileset->incexe->plugin_list.init(); + fileset->include_list.append(fileset->incexe); + return fileset->incexe; +} + +/** + * Define a new preInclude block in the FileSet + * That is the include is prepended to the other + * Includes. This is used for plugin exclusions. + */ +findINCEXE *new_preinclude(JCR *jcr) +{ + findFILESET *fileset = jcr->ff->fileset; + + /* New pre-include */ + fileset->incexe = (findINCEXE *)malloc(sizeof(findINCEXE)); + memset(fileset->incexe, 0, sizeof(findINCEXE)); + fileset->incexe->opts_list.init(1, true); + fileset->incexe->name_list.init(); /* for dlist; was 1,true for alist */ + fileset->incexe->plugin_list.init(); + fileset->include_list.prepend(fileset->incexe); + return fileset->incexe; +} + +static findFOPTS *start_options(FF_PKT *ff) +{ + int state = ff->fileset->state; + findINCEXE *incexe = ff->fileset->incexe; + + if (state != state_options) { + ff->fileset->state = state_options; + findFOPTS *fo = (findFOPTS *)malloc(sizeof(findFOPTS)); + memset(fo, 0, sizeof(findFOPTS)); + fo->regex.init(1, true); + fo->regexdir.init(1, true); + fo->regexfile.init(1, true); + fo->wild.init(1, true); + fo->wilddir.init(1, true); + fo->wildfile.init(1, true); + fo->wildbase.init(1, true); + fo->base.init(1, true); + fo->fstype.init(1, true); + fo->drivetype.init(1, true); + incexe->current_opts = fo; + incexe->opts_list.append(fo); + } + return incexe->current_opts; +} + +/* + * Used by plugins to define a new options block + */ +void new_options(JCR *jcr, findINCEXE *incexe) +{ + if (!incexe) { + incexe = jcr->ff->fileset->incexe; + } + findFOPTS *fo = (findFOPTS *)malloc(sizeof(findFOPTS)); + memset(fo, 0, sizeof(findFOPTS)); + fo->regex.init(1, true); + fo->regexdir.init(1, true); + fo->regexfile.init(1, true); + fo->wild.init(1, true); + fo->wilddir.init(1, true); + fo->wildfile.init(1, true); + fo->wildbase.init(1, true); + fo->base.init(1, true); + fo->fstype.init(1, true); + fo->drivetype.init(1, true); + incexe->current_opts = fo; + incexe->opts_list.prepend(fo); + jcr->ff->fileset->state = state_options; +} + +/** + * Add a regex to the current fileset + */ +int add_regex_to_fileset(JCR *jcr, const char *item, int type) +{ + findFOPTS *current_opts = start_options(jcr->ff); + regex_t *preg; + int rc; + char prbuf[500]; + + preg = (regex_t *)malloc(sizeof(regex_t)); + if (current_opts->flags & FO_IGNORECASE) { + rc = regcomp(preg, item, REG_EXTENDED|REG_ICASE); + } else { + rc = regcomp(preg, item, REG_EXTENDED); + } + if (rc != 0) { + regerror(rc, preg, prbuf, sizeof(prbuf)); + regfree(preg); + free(preg); + Jmsg(jcr, M_FATAL, 0, _("REGEX %s compile error. ERR=%s\n"), item, prbuf); + return state_error; + } + if (type == ' ') { + current_opts->regex.append(preg); + } else if (type == 'D') { + current_opts->regexdir.append(preg); + } else if (type == 'F') { + current_opts->regexfile.append(preg); + } else { + return state_error; + } + return state_options; +} + +/** + * Add a wild card to the current fileset + */ +int add_wild_to_fileset(JCR *jcr, const char *item, int type) +{ + findFOPTS *current_opts = start_options(jcr->ff); + + if (type == ' ') { + current_opts->wild.append(bstrdup(item)); + } else if (type == 'D') { + current_opts->wilddir.append(bstrdup(item)); + } else if (type == 'F') { + current_opts->wildfile.append(bstrdup(item)); + } else if (type == 'B') { + current_opts->wildbase.append(bstrdup(item)); + } else { + return state_error; + } + return state_options; +} + + +/** + * Add options to the current fileset + */ +int add_options_to_fileset(JCR *jcr, const char *item) +{ + findFOPTS *current_opts = start_options(jcr->ff); + + set_options(current_opts, item); + return state_options; +} + +static void add_fileset(JCR *jcr, const char *item) +{ + FF_PKT *ff = jcr->ff; + findFILESET *fileset = ff->fileset; + int state = fileset->state; + findFOPTS *current_opts; + + /* Get code, optional subcode, and position item past the dividing space */ + Dmsg1(100, "%s\n", item); + int code = item[0]; + if (code != '\0') { + ++item; + } + int subcode = ' '; /* A space is always a valid subcode */ + if (item[0] != '\0' && item[0] != ' ') { + subcode = item[0]; + ++item; + } + if (*item == ' ') { + ++item; + } + + /* Skip all lines we receive after an error */ + if (state == state_error) { + Dmsg0(100, "State=error return\n"); + return; + } + + /** + * The switch tests the code for validity. + * The subcode is always good if it is a space, otherwise we must confirm. + * We set state to state_error first assuming the subcode is invalid, + * requiring state to be set in cases below that handle subcodes. + */ + if (subcode != ' ') { + state = state_error; + Dmsg0(100, "Set state=error or double code.\n"); + } + switch (code) { + case 'I': + (void)new_include(jcr); + break; + case 'E': + (void)new_exclude(jcr); + break; + case 'N': /* null */ + state = state_none; + break; + case 'F': /* file = */ + /* File item to include or exclude list */ + state = state_include; + add_file_to_fileset(jcr, item, true); + break; + case 'P': /* plugin */ + /* Plugin item to include list */ + state = state_include; + add_file_to_fileset(jcr, item, false); + break; + case 'R': /* regex */ + state = add_regex_to_fileset(jcr, item, subcode); + break; + case 'B': + current_opts = start_options(ff); + current_opts->base.append(bstrdup(item)); + state = state_options; + break; + case 'X': /* Filetype or Drive type */ + current_opts = start_options(ff); + state = state_options; + if (subcode == ' ') { + current_opts->fstype.append(bstrdup(item)); + } else if (subcode == 'D') { + current_opts->drivetype.append(bstrdup(item)); + } else { + state = state_error; + } + break; + case 'W': /* wild cards */ + state = add_wild_to_fileset(jcr, item, subcode); + break; + case 'O': /* Options */ + state = add_options_to_fileset(jcr, item); + break; + case 'Z': /* ignore dir */ + state = state_include; + fileset->incexe->ignoredir = bstrdup(item); + break; + case 'D': + current_opts = start_options(ff); +// current_opts->reader = bstrdup(item); /* deprecated */ + state = state_options; + break; + case 'T': + current_opts = start_options(ff); +// current_opts->writer = bstrdup(item); /* deprecated */ + state = state_options; + break; + case 'G': /* Plugin command for this Option block */ + current_opts = start_options(ff); + current_opts->plugin = bstrdup(item); + state = state_options; + break; + default: + Jmsg(jcr, M_FATAL, 0, _("Invalid FileSet command: %s\n"), item); + state = state_error; + break; + } + ff->fileset->state = state; +} + +static bool term_fileset(JCR *jcr) +{ + FF_PKT *ff = jcr->ff; + +#ifdef xxx_DEBUG_CODE + findFILESET *fileset = ff->fileset; + int i, j, k; + + for (i=0; iinclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); + Dmsg0(400, "I\n"); + for (j=0; jopts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + for (k=0; kregex.size(); k++) { + Dmsg1(400, "R %s\n", (char *)fo->regex.get(k)); + } + for (k=0; kregexdir.size(); k++) { + Dmsg1(400, "RD %s\n", (char *)fo->regexdir.get(k)); + } + for (k=0; kregexfile.size(); k++) { + Dmsg1(400, "RF %s\n", (char *)fo->regexfile.get(k)); + } + for (k=0; kwild.size(); k++) { + Dmsg1(400, "W %s\n", (char *)fo->wild.get(k)); + } + for (k=0; kwilddir.size(); k++) { + Dmsg1(400, "WD %s\n", (char *)fo->wilddir.get(k)); + } + for (k=0; kwildfile.size(); k++) { + Dmsg1(400, "WF %s\n", (char *)fo->wildfile.get(k)); + } + for (k=0; kwildbase.size(); k++) { + Dmsg1(400, "WB %s\n", (char *)fo->wildbase.get(k)); + } + for (k=0; kbase.size(); k++) { + Dmsg1(400, "B %s\n", (char *)fo->base.get(k)); + } + for (k=0; kfstype.size(); k++) { + Dmsg1(400, "X %s\n", (char *)fo->fstype.get(k)); + } + for (k=0; kdrivetype.size(); k++) { + Dmsg1(400, "XD %s\n", (char *)fo->drivetype.get(k)); + } + } + if (incexe->ignoredir) { + Dmsg1(400, "Z %s\n", incexe->ignoredir); + } + dlistString *node; + foreach_dlist(node, &incexe->name_list) { + Dmsg1(400, "F %s\n", node->c_str()); + } + foreach_dlist(node, &incexe->plugin_list) { + Dmsg1(400, "P %s\n", node->c_str()); + } + } + for (i=0; iexclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->exclude_list.get(i); + Dmsg0(400, "E\n"); + for (j=0; jopts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + for (k=0; kregex.size(); k++) { + Dmsg1(400, "R %s\n", (char *)fo->regex.get(k)); + } + for (k=0; kregexdir.size(); k++) { + Dmsg1(400, "RD %s\n", (char *)fo->regexdir.get(k)); + } + for (k=0; kregexfile.size(); k++) { + Dmsg1(400, "RF %s\n", (char *)fo->regexfile.get(k)); + } + for (k=0; kwild.size(); k++) { + Dmsg1(400, "W %s\n", (char *)fo->wild.get(k)); + } + for (k=0; kwilddir.size(); k++) { + Dmsg1(400, "WD %s\n", (char *)fo->wilddir.get(k)); + } + for (k=0; kwildfile.size(); k++) { + Dmsg1(400, "WF %s\n", (char *)fo->wildfile.get(k)); + } + for (k=0; kwildbase.size(); k++) { + Dmsg1(400, "WB %s\n", (char *)fo->wildbase.get(k)); + } + for (k=0; kbase.size(); k++) { + Dmsg1(400, "B %s\n", (char *)fo->base.get(k)); + } + for (k=0; kfstype.size(); k++) { + Dmsg1(400, "X %s\n", (char *)fo->fstype.get(k)); + } + for (k=0; kdrivetype.size(); k++) { + Dmsg1(400, "XD %s\n", (char *)fo->drivetype.get(k)); + } + } + dlistString *node; + foreach_dlist(node, &incexe->name_list) { + Dmsg1(400, "F %s\n", node->c_str()); + } + foreach_dlist(node, &incexe->plugin_list) { + Dmsg1(400, "P %s\n", node->c_str()); + } + } +#endif + return ff->fileset->state != state_error; +} + + +/** + * As an optimization, we should do this during + * "compile" time in filed/job.c, and keep only a bit mask + * and the Verify options. + */ +static int set_options(findFOPTS *fo, const char *opts) +{ + int j; + const char *p; + char strip[100]; + +// Commented out as it is not backward compatible - KES + + for (p=opts; *p; p++) { + switch (*p) { + case 'a': /* alway replace */ + case '0': /* no option */ + break; + case 'e': + fo->flags |= FO_EXCLUDE; + break; + case 'f': + fo->flags |= FO_MULTIFS; + break; + case 'h': /* no recursion */ + fo->flags |= FO_NO_RECURSION; + break; + case 'H': /* no hard link handling */ + fo->flags |= FO_NO_HARDLINK; + break; + case 'i': + fo->flags |= FO_IGNORECASE; + break; + case 'M': /* MD5 */ + fo->flags |= FO_MD5; + break; + case 'n': + fo->flags |= FO_NOREPLACE; + break; + case 'p': /* use portable data format */ + fo->flags |= FO_PORTABLE; + break; + case 'R': /* Resource forks and Finder Info */ + fo->flags |= FO_HFSPLUS; + break; + case 'r': /* read fifo */ + fo->flags |= FO_READFIFO; + break; + case 'S': + switch(*(p + 1)) { + case '1': + fo->flags |= FO_SHA1; + p++; + break; +#ifdef HAVE_SHA2 + case '2': + fo->flags |= FO_SHA256; + p++; + break; + case '3': + fo->flags |= FO_SHA512; + p++; + break; +#endif + default: + /* + * If 2 or 3 is seen here, SHA2 is not configured, so + * eat the option, and drop back to SHA-1. + */ + if (p[1] == '2' || p[1] == '3') { + p++; + } + fo->flags |= FO_SHA1; + break; + } + break; + case 's': + fo->flags |= FO_SPARSE; + break; + case 'm': + fo->flags |= FO_MTIMEONLY; + break; + case 'k': + fo->flags |= FO_KEEPATIME; + break; + case 'A': + fo->flags |= FO_ACL; + break; + case 'V': /* verify options */ + /* Copy Verify Options */ + for (j=0; *p && *p != ':'; p++) { + fo->VerifyOpts[j] = *p; + if (j < (int)sizeof(fo->VerifyOpts) - 1) { + j++; + } + } + fo->VerifyOpts[j] = 0; + break; + case 'C': /* accurate options */ + /* Copy Accurate Options */ + for (j=0; *p && *p != ':'; p++) { + fo->AccurateOpts[j] = *p; + if (j < (int)sizeof(fo->AccurateOpts) - 1) { + j++; + } + } + fo->AccurateOpts[j] = 0; + break; + case 'J': /* Basejob options */ + /* Copy BaseJob Options */ + for (j=0; *p && *p != ':'; p++) { + fo->BaseJobOpts[j] = *p; + if (j < (int)sizeof(fo->BaseJobOpts) - 1) { + j++; + } + } + fo->BaseJobOpts[j] = 0; + break; + case 'P': /* strip path */ + /* Get integer */ + p++; /* skip P */ + for (j=0; *p && *p != ':'; p++) { + strip[j] = *p; + if (j < (int)sizeof(strip) - 1) { + j++; + } + } + strip[j] = 0; + fo->strip_path = atoi(strip); + fo->flags |= FO_STRIPPATH; + Dmsg2(100, "strip=%s strip_path=%d\n", strip, fo->strip_path); + break; + case 'w': + fo->flags |= FO_IF_NEWER; + break; + case 'W': + fo->flags |= FO_ENHANCEDWILD; + break; + case 'Z': /* compression */ + p++; /* skip Z */ + if (*p >= '0' && *p <= '9') { + fo->flags |= FO_COMPRESS; + fo->Compress_algo = COMPRESS_GZIP; + fo->Compress_level = *p - '0'; + } + else if (*p == 'o') { + fo->flags |= FO_COMPRESS; + fo->Compress_algo = COMPRESS_LZO1X; + fo->Compress_level = 1; /* not used with LZO */ + } + break; + case 'K': + fo->flags |= FO_NOATIME; + break; + case 'c': + fo->flags |= FO_CHKCHANGES; + break; + case 'N': + fo->flags |= FO_HONOR_NODUMP; + break; + case 'X': + fo->flags |= FO_XATTR; + break; + default: + Jmsg1(NULL, M_ERROR, 0, _("Unknown include/exclude option: %c\n"), *p); + break; + } + } + return state_options; +} + + +/** + * Director is passing his Fileset + */ +static int fileset_cmd(JCR *jcr) +{ + POOL_MEM buf(PM_MESSAGE); + BSOCK *dir = jcr->dir_bsock; + int rtnstat; + +#if HAVE_WIN32 + jcr->Snapshot = (strstr(dir->msg, "vss=1") != NULL); +#else + jcr->Snapshot = (strstr(dir->msg, "snap=1") != NULL); +#endif + if (!init_fileset(jcr)) { + return 0; + } + while (dir->recv() >= 0) { + strip_trailing_junk(dir->msg); + Dmsg1(500, "Fileset: %s\n", dir->msg); + pm_strcpy(buf, dir->msg); + add_fileset(jcr, buf.c_str()); + } + if (!term_fileset(jcr)) { + return 0; + } + rtnstat = dir->fsend(OKinc); + generate_plugin_event(jcr, bEventEndFileSet); + return rtnstat; +} + + +/* + * The Director sends us the component info file, which + * we will in turn pass to the VSS plugin. + */ +static int component_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + + while (dir->recv() >= 0) { + Dmsg1(200, "filedmsg); + generate_plugin_event(jcr, bEventComponentInfo, (void *)dir->msg); + } + return dir->fsend(OKComponentInfo); +} + + +/** + * Get backup level from Director + * + * Note: there are odd things such as accurate_differential, + * and accurate_incremental that are passed in level, thus + * the calls to strstr() below. + * + */ +static int level_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + POOLMEM *level, *buf = NULL; + int mtime_only; + + level = get_memory(dir->msglen+1); + Dmsg1(10, "level_cmd: %s", dir->msg); + + /* keep compatibility with older directors */ + if (strstr(dir->msg, "accurate")) { + jcr->accurate = true; + } + if (strstr(dir->msg, "rerunning")) { + jcr->rerunning = true; + } + if (sscanf(dir->msg, "level = %s ", level) != 1) { + goto bail_out; + } + /* Base backup requested? */ + if (strcasecmp(level, "base") == 0) { + jcr->setJobLevel(L_BASE); + /* Full backup requested? */ + } else if (strcasecmp(level, "full") == 0) { + jcr->setJobLevel(L_FULL); + } else if (strstr(level, "differential")) { + jcr->setJobLevel(L_DIFFERENTIAL); + free_memory(level); + return 1; + } else if (strstr(level, "incremental")) { + jcr->setJobLevel(L_INCREMENTAL); + free_memory(level); + return 1; + /* + * We get his UTC since time, then sync the clocks and correct it + * to agree with our clock. + */ + } else if (strcasecmp(level, "since_utime") == 0) { + buf = get_memory(dir->msglen+1); + utime_t since_time, adj; + btime_t his_time, bt_start, rt=0, bt_adj=0, his_time_prev=0, n=0; + if (jcr->getJobLevel() == L_NONE) { + jcr->setJobLevel(L_SINCE); /* if no other job level set, do it now */ + } + if (sscanf(dir->msg, "level = since_utime %s mtime_only=%d prev_job=%127s", + buf, &mtime_only, jcr->PrevJob) != 3) { + if (sscanf(dir->msg, "level = since_utime %s mtime_only=%d", + buf, &mtime_only) != 2) { + goto bail_out; + } + } + since_time = str_to_uint64(buf); /* this is the since time */ + Dmsg2(100, "since_time=%lld prev_job=%s\n", since_time, jcr->PrevJob); + char ed1[50], ed2[50]; + /* + * Sync clocks by polling him for the time. We take + * 10 samples of his time throwing out the first two. + */ + for (int i=0; i<10; i++) { + bt_start = get_current_btime(); + dir->signal(BNET_BTIME); /* poll for time */ + if (dir->recv() <= 0) { /* get response */ + goto bail_out; + } + if (sscanf(dir->msg, "btime %s", buf) != 1) { + goto bail_out; + } + his_time = str_to_uint64(buf); + rt = get_current_btime() - bt_start; /* compute round trip time */ + /* skip first two results and check for leap second */ + /* if any of the FD or DIR went back in time, skip this iteration */ + if (i < 2 || (his_time_prev > 0 && his_time < his_time_prev) || rt<0) { + his_time_prev = his_time; + continue; + } + his_time_prev = his_time; + n++; + Dmsg2(100, "Dirtime=%s FDtime=%s\n", edit_uint64(his_time, ed1), + edit_uint64(bt_start, ed2)); + bt_adj += bt_start - his_time - rt/2; + Dmsg2(100, "rt=%s adj=%s\n", edit_uint64(rt, ed1), edit_uint64(bt_adj, ed2)); + } + adj = 0; + if (n > 0) { /* Should be 1 in the worst case */ + bt_adj = bt_adj / n; /* compute average time */ + Dmsg2(100, "rt=%s adj=%s\n", edit_uint64(rt, ed1), edit_uint64(bt_adj, ed2)); + adj = btime_to_utime(bt_adj); + since_time += adj; /* adjust for clock difference */ + } + /* Don't notify if time within 3 seconds */ + if (adj > 3 || adj < -3) { + int type; + if (adj > 600 || adj < -600) { + type = M_WARNING; + } else { + type = M_INFO; + } + Jmsg(jcr, type, 0, _("DIR and FD clocks differ by %lld seconds, FD automatically compensating.\n"), adj); + } + dir->signal(BNET_EOD); + + Dmsg2(100, "adj=%lld since_time=%lld\n", adj, since_time); + jcr->incremental = 1; /* set incremental or decremental backup */ + jcr->mtime = since_time; /* set since time */ + generate_plugin_event(jcr, bEventSince, (void *)(time_t)jcr->mtime); + } else { + Jmsg1(jcr, M_FATAL, 0, _("Unknown backup level: %s\n"), level); + free_memory(level); + return 0; + } + free_memory(level); + if (buf) { + free_memory(buf); + } + generate_plugin_event(jcr, bEventLevel, (void*)(intptr_t)jcr->getJobLevel()); + return dir->fsend(OKlevel); + +bail_out: + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg1(jcr, M_FATAL, 0, _("Bad level command: %s\n"), jcr->errmsg); + free_memory(level); + if (buf) { + free_memory(buf); + } + return 0; +} + +/** + * Get session parameters from Director -- this is for a Restore command + * This is deprecated. It is now passed via the bsr. + */ +static int session_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + + Dmsg1(100, "SessionCmd: %s", dir->msg); + if (sscanf(dir->msg, sessioncmd, jcr->VolumeName, + &jcr->VolSessionId, &jcr->VolSessionTime, + &jcr->StartFile, &jcr->EndFile, + &jcr->StartBlock, &jcr->EndBlock) != 7) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg(jcr, M_FATAL, 0, _("Bad session command: %s"), jcr->errmsg); + return 0; + } + + return dir->fsend(OKsession); +} + +static void set_storage_auth_key(JCR *jcr, char *key) +{ + /* if no key don't update anything */ + if (!*key) { + return; + } + + /** + * We can be contacting multiple storage daemons. + * So, make sure that any old jcr->store_bsock is cleaned up. + */ + free_bsock(jcr->store_bsock); + + /** + * We can be contacting multiple storage daemons. + * So, make sure that any old jcr->sd_auth_key is cleaned up. + */ + if (jcr->sd_auth_key) { + /* + * If we already have a Authorization key, director can do multi + * storage restore + */ + Dmsg0(5, "set multi_restore=true\n"); + jcr->multi_restore = true; + bfree(jcr->sd_auth_key); + } + + jcr->sd_auth_key = bstrdup(key); + Dmsg1(200, "set sd auth key %s\n", jcr->sd_auth_key); +} + +/** + * Get address of storage daemon from Director + * + */ +static int storage_cmd(JCR *jcr) +{ + int stored_port = 0; /* storage daemon port */ + int enable_ssl; /* enable ssl to sd */ + POOL_MEM sd_auth_key(PM_MESSAGE); + BSOCK *dir = jcr->dir_bsock; + BSOCK *sd; + + Dmsg1(100, "StorageCmd: %s", dir->msg); + sd_auth_key.check_size(dir->msglen); + if (sscanf(dir->msg, storaddr, &jcr->stored_addr, &stored_port, + &enable_ssl, sd_auth_key.c_str()) == 4) { + Dmsg1(100, "Set auth key %s\n", sd_auth_key.c_str()); + set_storage_auth_key(jcr, sd_auth_key.c_str()); + } else if (sscanf(dir->msg, storaddr_v1, &jcr->stored_addr, + &stored_port, &enable_ssl) != 3) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg(jcr, M_FATAL, 0, _("Bad storage command: %s"), jcr->errmsg); + Pmsg1(010, "Bad storage command: %s", jcr->errmsg); + goto bail_out; + } + + + /* TODO: see if we put limit on restore and backup... */ + if (!jcr->max_bandwidth) { + if (jcr->director->max_bandwidth_per_job) { + jcr->max_bandwidth = jcr->director->max_bandwidth_per_job; + + } else if (me->max_bandwidth_per_job) { + jcr->max_bandwidth = me->max_bandwidth_per_job; + } + } + + if (stored_port != 0) { /* We are doing the connecting */ + Dmsg3(110, "Connect to storage: %s:%d ssl=%d\n", jcr->stored_addr, stored_port, + enable_ssl); + jcr->sd_calls_client = false; + sd = new_bsock(); + /* Open command communications with Storage daemon */ + /* Try to connect for 1 hour at 10 second intervals */ + sd->set_source_address(me->FDsrc_addr); + if (!sd->connect(jcr, 10, (int)me->SDConnectTimeout, me->heartbeat_interval, + _("Storage daemon"), jcr->stored_addr, NULL, stored_port, 1)) { + /* destroy() OK because sd is local */ + sd->destroy(); + Jmsg2(jcr, M_FATAL, 0, _("Failed to connect to Storage daemon: %s:%d\n"), + jcr->stored_addr, stored_port); + Dmsg2(100, "Failed to connect to Storage daemon: %s:%d\n", + jcr->stored_addr, stored_port); + goto bail_out; + } + + Dmsg0(110, "Connection OK to SD.\n"); + jcr->store_bsock = sd; + } else { /* The storage daemon called us */ + struct timeval tv; + struct timezone tz; + struct timespec timeout; + int errstat; + + free_bsock(jcr->store_bsock); + jcr->sd_calls_client = true; + + /* + * Wait for the Storage daemon to contact us to start the Job, + * when he does, we will be released, unless the 30 minutes + * expires. + */ + gettimeofday(&tv, &tz); + timeout.tv_nsec = tv.tv_usec * 1000; + timeout.tv_sec = tv.tv_sec + 30 * 60; /* wait 30 minutes */ + P(mutex); + while (jcr->sd_calls_client_bsock == NULL && !jcr->is_job_canceled()) { + errstat = pthread_cond_timedwait(&jcr->job_start_wait, &mutex, &timeout); + if (errstat == ETIMEDOUT || errstat == EINVAL || errstat == EPERM) { + break; + } + Dmsg1(800, "=== Auth cond errstat=%d\n", errstat); + } + V(mutex); + Dmsg2(800, "Auth fail or cancel for jid=%d %p\n", jcr->JobId, jcr); + + /* We should already have a storage connection! */ + if (jcr->sd_calls_client_bsock == NULL) { + Pmsg0(000, "Failed connect from Storage daemon. SD bsock=NULL.\n"); + Pmsg1(000, "Storagecmd: %s", dir->msg); + Jmsg0(jcr, M_FATAL, 0, _("Failed connect from Storage daemon. SD bsock=NULL.\n")); + goto bail_out; + } + if (jcr->is_job_canceled()) { + goto bail_out; + } + /* Assign the new socket to the main one */ + jcr->lock_auth(); + jcr->store_bsock = jcr->sd_calls_client_bsock; + jcr->sd_calls_client_bsock = NULL; + jcr->unlock_auth(); + } + jcr->store_bsock->set_bwlimit(jcr->max_bandwidth); + + if (!send_hello_sd(jcr, jcr->Job)) { + goto bail_out; + } + + if (!authenticate_storagedaemon(jcr)) { + goto bail_out; + } + memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); + Dmsg0(110, "Authenticated with SD.\n"); + + /* Send OK to Director */ + return dir->fsend(OKstore); + +bail_out: + dir->fsend(BADcmd, "storage"); + return 0; +} + +#ifdef HAVE_WIN32 +/* TODO: merge find.c ? */ +static bool is_excluded(findFILESET *fileset, char *path) +{ + int fnm_flags=FNM_CASEFOLD; + int fnmode=0; + + /* Now apply the Exclude { } directive */ + for (int i=0; iexclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->exclude_list.get(i); + dlistString *node; + + foreach_dlist(node, &incexe->name_list) { + char *fname = node->c_str(); + Dmsg2(DT_VOLUME|50, "Testing %s against %s\n", path, fname); + if (fnmatch(fname, path, fnmode|fnm_flags) == 0) { + Dmsg1(050, "Reject wild2: %s\n", path); + return true; + } + /* On windows, the path separator is a bit complex to handle. For + * example, in fnmatch(), \ is written as \\\\ in the config file / is + * different from \ So we have our own little strcmp for filenames + */ + char *p; + bool same=true; + for (p = path; *p && *fname && same ; p++, fname++) { + if (!((IsPathSeparator(*p) && IsPathSeparator(*fname)) || + (tolower(*p) == tolower(*fname)))) { + same = false; /* Stop after the first one */ + } + } + + if (same) { + /* End of the for loop, strings looks to be identical */ + Dmsg1(DT_VOLUME|50, "Reject: %s\n", path); + return true; + } + + /* Looks to be the same string, but with a trailing slash */ + if (fname[0] && IsPathSeparator(fname[0]) && fname[1] == '\0' + && p[0] == '\0') + { + Dmsg1(DT_VOLUME|50, "Reject: %s\n", path); + return true; + } + } + } + return false; +} + +/* + * For VSS we need to know which windows drives + * are used, because we create a snapshot of all used + * drives before operation + * + */ +static int +get_win32_driveletters(JCR *jcr, FF_PKT *ff, char* szDrives) +{ + int nCount = 0; + + findFILESET *fileset = ff->fileset; + int flags = 0; + char drive[4]; + + MTab mtab; + mtab.get(); /* read the disk structure */ + + /* Keep this part for compatibility reasons */ + strcpy(drive, "c:\\"); + for (int i=0; szDrives[i] ; i++) { + drive[0] = szDrives[i]; + if (mtab.addInSnapshotSet(drive)) { /* When all volumes are selected, we can stop */ + Dmsg0(DT_VOLUME|50, "All Volumes are marked, stopping the loop here\n"); + goto all_included; + } + } + + if (fileset) { + dlistString *node; + + for (int i=0; iinclude_list.size(); i++) { + + findFOPTS *fo; + findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); + + /* look through all files */ + foreach_dlist(node, &incexe->name_list) { + char *fname = node->c_str(); + if (mtab.addInSnapshotSet(fname)) { + /* When all volumes are selected, we can stop */ + Dmsg0(DT_VOLUME|50, "All Volumes are marked, stopping the loop here\n"); + goto all_included; + } + } + + foreach_alist(fo, &incexe->opts_list) { + flags |= fo->flags; /* We are looking for FO_MULTIFS and recurse */ + } + } + + /* TODO: it needs to be done Include by Include, but in the worst case, + * we take too much snapshots... + */ + if (flags & FO_MULTIFS) { + /* Need to add subdirectories */ + POOLMEM *fn = get_pool_memory(PM_FNAME); + MTabEntry *elt, *elt2; + int len; + + Dmsg0(DT_VOLUME|50, "OneFS is set, looking for remaining volumes\n"); + + foreach_rblist(elt, mtab.entries) { + if (elt->in_SnapshotSet) { + continue; /* Already in */ + } + /* A volume can have multiple mount points */ + for (wchar_t *p = elt->first() ; p && *p ; p = elt->next(p)) { + wchar_2_UTF8(&fn, p); + + Dmsg1(DT_VOLUME|50, "Looking for path %s\n", fn); + + /* First case, root drive (c:/, e:/, d:/), not a submount point */ + len = strlen(fn); + if (len <= 3) { + Dmsg1(DT_VOLUME|50, "Skiping %s\n", fn); + continue; + } + + /* First thing is to look in the exclude list to see if this directory + * is explicitely excluded + */ + if (is_excluded(fileset, fn)) { + Dmsg1(DT_VOLUME|50, "Looks to be excluded %s\n", fn); + continue; + } + + /* c:/vol/vol2/vol3 + * will look c:/, then c:/vol/, then c:/vol2/ and if one of them + * is selected, the sub volume will be directly marked. + */ + for (char *p1 = fn ; *p1 && !elt->in_SnapshotSet ; p1++) { + if (IsPathSeparator(*p1)) { + + char c = *(p1 + 1); + *(p1 + 1) = 0; + + /* We look for the previous directory, and if marked, we mark + * the current one as well + */ + Dmsg1(DT_VOLUME|50, "Looking for %s\n", fn); + elt2 = mtab.search(fn); + if (elt2 && elt2->in_SnapshotSet) { + Dmsg0(DT_VOLUME|50, "Put volume in SnapshotSet\n"); + elt->setInSnapshotSet(); + } + + *(p1 + 1) = c; /* restore path separator */ + } + } + } + } + free_pool_memory(fn); + } + all_included: + /* Now, we look the volume list to know which one to include */ + MTabEntry *elt; + foreach_rblist(elt, mtab.entries) { + if (elt->in_SnapshotSet) { + Dmsg1(DT_VOLUME|50,"Adding volume in mount_points list %ls\n",elt->volumeName); + nCount++; + ff->mount_points.append(bwcsdup(elt->volumeName)); + } + } + } + + return nCount; +} +#endif /* HAVE_WIN32 */ + +/* + * Do a backup. + */ +static int backup_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + BSOCK *sd = jcr->store_bsock; + int ok = 0; + int SDJobStatus; + int32_t FileIndex; + + if (sscanf(dir->msg, "backup FileIndex=%ld\n", &FileIndex) == 1) { + jcr->JobFiles = FileIndex; + Dmsg1(100, "JobFiles=%ld\n", jcr->JobFiles); + } + + /* + * If explicitly requesting FO_ACL or FO_XATTR, fail job if it + * is not available on Client machine + */ + if (jcr->ff->flags & FO_ACL && !(have_acl||have_win32)) { + Jmsg(jcr, M_FATAL, 0, _("ACL support not configured for Client.\n")); + goto cleanup; + } + if (jcr->ff->flags & FO_XATTR && !have_xattr) { + Jmsg(jcr, M_FATAL, 0, _("XATTR support not configured for Client.\n")); + goto cleanup; + } + jcr->setJobStatus(JS_Blocked); + jcr->setJobType(JT_BACKUP); + Dmsg1(100, "begin backup ff=%p\n", jcr->ff); + if (sd == NULL) { + Jmsg(jcr, M_FATAL, 0, _("Cannot contact Storage daemon\n")); + dir->fsend(BADcmd, "backup"); + goto cleanup; + } + + dir->fsend(OKbackup); + Dmsg1(110, "filed>dird: %s", dir->msg); + + /* + * Send Append Open Session to Storage daemon + */ + sd->fsend(append_open); + Dmsg1(110, ">stored: %s", sd->msg); + /** + * Expect to receive back the Ticket number + */ + if (bget_msg(sd) >= 0) { + Dmsg1(110, "msg); + if (sscanf(sd->msg, OK_open, &jcr->Ticket) != 1) { + Jmsg(jcr, M_FATAL, 0, _("Bad response to append open: %s\n"), sd->msg); + goto cleanup; + } + Dmsg1(110, "Got Ticket=%d\n", jcr->Ticket); + } else { + Jmsg(jcr, M_FATAL, 0, _("Bad response from stored to open command\n")); + goto cleanup; + } + + /** + * Send Append data command to Storage daemon + */ + sd->fsend(append_data, jcr->Ticket); + Dmsg1(110, ">stored: %s", sd->msg); + + /** + * Expect to get OK data + */ + Dmsg1(110, "msg); + if (!response(jcr, sd, OK_data, "Append Data")) { + goto cleanup; + } + + generate_daemon_event(jcr, "JobStart"); + generate_plugin_event(jcr, bEventStartBackupJob); + + if (jcr->Snapshot) { +#if defined(WIN32_VSS) + P(vss_mutex); + /* START VSS ON WIN32 */ + jcr->pVSSClient = VSSInit(); + if (jcr->pVSSClient->InitializeForBackup(jcr)) { + generate_plugin_event(jcr, bEventVssBackupAddComponents); + /* tell vss which drives to snapshot */ + char szWinDriveLetters[27]; + *szWinDriveLetters=0; + generate_plugin_event(jcr, bEventVssPrepareSnapshot, szWinDriveLetters); + if (get_win32_driveletters(jcr, jcr->ff, szWinDriveLetters)) { + Jmsg(jcr, M_INFO, 0, _("Generate VSS snapshots. Driver=\"%s\"\n"), + jcr->pVSSClient->GetDriverName()); + + if (!jcr->pVSSClient->CreateSnapshots(&jcr->ff->mount_points)) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("VSS CreateSnapshots failed. ERR=%s\n"), + be.bstrerror()); + } else { + /* inform user about writer states */ + for (int i=0; i < (int)jcr->pVSSClient->GetWriterCount(); i++) { + if (jcr->pVSSClient->GetWriterState(i) < 1) { + Jmsg(jcr, M_INFO, 0, _("VSS Writer (PrepareForBackup): %s\n"), + jcr->pVSSClient->GetWriterInfo(i)); + } + } + } + } else { + Jmsg(jcr, M_WARNING, 0, _("No drive letters found for generating VSS snapshots.\n")); + } + } else { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("VSS was not initialized properly. ERR=%s\n"), + be.bstrerror()); + } + V(vss_mutex); +#else + Dmsg0(10, "Open a snapshot session\n"); + /* TODO: See if we abort the job */ + jcr->Snapshot = open_snapshot_backup_session(jcr); +#endif + } + /* Call RunScript just after the Snapshot creation, usually, we restart services */ + run_scripts(jcr, jcr->RunScripts, "ClientAfterVSS"); + + /* + * Send Files to Storage daemon + */ + Dmsg1(110, "begin blast ff=%p\n", (FF_PKT *)jcr->ff); + if (!blast_data_to_storage_daemon(jcr, NULL)) { + jcr->setJobStatus(JS_ErrorTerminated); + sd->suppress_error_messages(true); + Dmsg0(110, "Error in blast_data.\n"); + } else { + jcr->setJobStatus(JS_Terminated); + /* Note, the above set status will not override an error */ + if (!(jcr->JobStatus == JS_Terminated || jcr->JobStatus == JS_Warnings)) { + sd->suppress_error_messages(true); + goto cleanup; /* bail out now */ + } + /** + * Expect to get response to append_data from Storage daemon + */ + if (!response(jcr, sd, OK_append, "Append Data")) { + jcr->setJobStatus(JS_ErrorTerminated); + goto cleanup; + } + + /** + * Send Append End Data to Storage daemon + */ + sd->fsend(append_end, jcr->Ticket); + /* Get end OK */ + if (!response(jcr, sd, OK_end, "Append End")) { + jcr->setJobStatus(JS_ErrorTerminated); + goto cleanup; + } + + /** + * Send Append Close to Storage daemon + */ + sd->fsend(append_close, jcr->Ticket); + while (bget_msg(sd) >= 0) { /* stop on signal or error */ + if (sscanf(sd->msg, OK_close, &SDJobStatus) == 1) { + ok = 1; + Dmsg2(200, "SDJobStatus = %d %c\n", SDJobStatus, (char)SDJobStatus); + } + } + if (!ok) { + Jmsg(jcr, M_FATAL, 0, _("Append Close with SD failed.\n")); + goto cleanup; + } + if (!(SDJobStatus == JS_Terminated || SDJobStatus == JS_Warnings || + SDJobStatus == JS_Incomplete)) { + Jmsg(jcr, M_FATAL, 0, _("Bad status %d %c returned from Storage Daemon.\n"), + SDJobStatus, (char)SDJobStatus); + } + } + +cleanup: +#if defined(WIN32_VSS) + if (jcr->Snapshot) { + Win32ConvCleanupCache(); + if (jcr->pVSSClient) { + jcr->pVSSClient->DestroyWriterInfo(); + } + } +#endif + generate_plugin_event(jcr, bEventEndBackupJob); + return 0; /* return and stop command loop */ +} + +/** + * Do a Verify for Director + * + */ +static int verify_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + BSOCK *sd = jcr->store_bsock; + char level[100]; + + jcr->setJobType(JT_VERIFY); + if (sscanf(dir->msg, verifycmd, level) != 1) { + dir->fsend(_("2994 Bad verify command: %s\n"), dir->msg); + return 0; + } + + if (strcasecmp(level, "init") == 0) { + jcr->setJobLevel(L_VERIFY_INIT); + } else if (strcasecmp(level, "catalog") == 0){ + jcr->setJobLevel(L_VERIFY_CATALOG); + } else if (strcasecmp(level, "volume") == 0){ + jcr->setJobLevel(L_VERIFY_VOLUME_TO_CATALOG); + } else if (strcasecmp(level, "data") == 0){ + jcr->setJobLevel(L_VERIFY_DATA); + } else if (strcasecmp(level, "disk_to_catalog") == 0) { + jcr->setJobLevel(L_VERIFY_DISK_TO_CATALOG); + } else { + dir->fsend(_("2994 Bad verify level: %s\n"), dir->msg); + return 0; + } + + dir->fsend(OKverify); + + generate_daemon_event(jcr, "JobStart"); + generate_plugin_event(jcr, bEventLevel,(void *)(intptr_t)jcr->getJobLevel()); + generate_plugin_event(jcr, bEventStartVerifyJob); + + Dmsg1(110, "filed>dird: %s", dir->msg); + + switch (jcr->getJobLevel()) { + case L_VERIFY_INIT: + case L_VERIFY_CATALOG: + do_verify(jcr); + break; + case L_VERIFY_DATA: + case L_VERIFY_VOLUME_TO_CATALOG: + if (!open_sd_read_session(jcr)) { + return 0; + } + start_dir_heartbeat(jcr); + do_verify_volume(jcr); + stop_dir_heartbeat(jcr); + /* + * Send Close session command to Storage daemon + */ + sd->fsend(read_close, jcr->Ticket); + Dmsg1(130, "filed>stored: %s", sd->msg); + + /* ****FIXME**** check response */ + bget_msg(sd); /* get OK */ + + /* Inform Storage daemon that we are done */ + sd->signal(BNET_TERMINATE); + + break; + case L_VERIFY_DISK_TO_CATALOG: + do_verify(jcr); + break; + default: + dir->fsend(_("2994 Bad verify level: %s\n"), dir->msg); + return 0; + } + + dir->signal(BNET_EOD); + generate_plugin_event(jcr, bEventEndVerifyJob); + return 0; /* return and terminate command loop */ +} + +/* + * Do a Restore for Director + * + */ +static int restore_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + BSOCK *sd = jcr->store_bsock; + POOLMEM *args=NULL, *restore_where=NULL, *restore_rwhere=NULL; + bool use_regexwhere=false; + int prefix_links; + char replace; + bool scan_ok = true; + int files; + int ret = 0; + + /** + * Scan WHERE (base directory for restore) from command + */ + Dmsg0(100, "restore command\n"); +#if defined(WIN32_VSS) + + /** + * No need to enable VSS for restore if we do not have plugin + * data to restore + */ + jcr->Snapshot = jcr->got_metadata; +#endif + + /* Pickup where string */ + args = get_memory(dir->msglen+1); + *args = 0; + + restore_where = get_pool_memory(PM_FNAME); + restore_rwhere = get_pool_memory(PM_FNAME); + + /* We don't know the size of where/rwhere in advance, + * where= -> where=%202s\n + */ + Mmsg(restore_where, "%s%%%ds\n", restorefcmd, dir->msglen); + Mmsg(restore_rwhere, "%s%%%ds\n", restorefcmdR, dir->msglen); + + Dmsg2(200, "where=%srwhere=%s", restore_where, restore_rwhere); + + /* Scan for new form with number of files to restore */ + if (sscanf(dir->msg, restore_where, &files, &replace, &prefix_links, args) != 4) { + if (sscanf(dir->msg, restore_rwhere, &files, &replace, &prefix_links, args) != 4) { + if (sscanf(dir->msg, restorefcmd1, &files, &replace, &prefix_links) != 3) { + scan_ok = false; + } + *args = 0; /* No where argument */ + } else { + use_regexwhere = true; + } + } + + if (scan_ok) { + jcr->ExpectedFiles = files; + } else { + /* Scan for old form without number of files */ + jcr->ExpectedFiles = 0; + + /* where= -> where=%202s\n */ + Mmsg(restore_where, "%s%%%ds\n", restorecmd, dir->msglen); + Mmsg(restore_rwhere, "%s%%%ds\n", restorecmdR, dir->msglen); + + if (sscanf(dir->msg, restore_where, &replace, &prefix_links, args) != 3) { + if (sscanf(dir->msg, restore_rwhere, &replace, &prefix_links, args) != 3){ + if (sscanf(dir->msg, restorecmd1, &replace, &prefix_links) != 2) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg(jcr, M_FATAL, 0, _("Bad replace command. CMD=%s\n"), jcr->errmsg); + goto free_mempool; + } + *args = 0; /* No where argument */ + } else { + use_regexwhere = true; + } + } + } + + /* Turn / into nothing */ + if (IsPathSeparator(args[0]) && args[1] == '\0') { + args[0] = '\0'; + } + + Dmsg2(150, "Got replace %c, where=%s\n", replace, args); + unbash_spaces(args); + + /* Keep track of newly created directories to apply them correct attributes */ + if (replace == REPLACE_NEVER || replace == REPLACE_IFNEWER) { + jcr->keep_path_list = true; + } + + if (use_regexwhere) { + jcr->where_bregexp = get_bregexps(args); + if (!jcr->where_bregexp) { + Jmsg(jcr, M_FATAL, 0, _("Bad where regexp. where=%s\n"), args); + goto free_mempool; + } + jcr->RegexWhere = bstrdup(args); + } else { + jcr->where = bstrdup(args); + } + + jcr->replace = replace; + jcr->prefix_links = prefix_links; + + dir->fsend(OKrestore); + Dmsg1(110, "filed>dird: %s", dir->msg); + + jcr->setJobType(JT_RESTORE); + + jcr->setJobStatus(JS_Blocked); + + if (!open_sd_read_session(jcr)) { + jcr->setJobStatus(JS_ErrorTerminated); + goto bail_out; + } + + jcr->setJobStatus(JS_Running); + + /** + * Do restore of files and data + */ + start_dir_heartbeat(jcr); + generate_daemon_event(jcr, "JobStart"); + generate_plugin_event(jcr, bEventStartRestoreJob); + +#if defined(WIN32_VSS) + /* START VSS ON WIN32 */ + if (jcr->Snapshot) { + jcr->pVSSClient = VSSInit(); + if (!jcr->pVSSClient->InitializeForRestore(jcr)) { + berrno be; + Jmsg(jcr, M_WARNING, 0, _("VSS was not initialized properly. VSS support is disabled. ERR=%s\n"), be.bstrerror()); + } + //free_and_null_pool_memory(jcr->job_metadata); + run_scripts(jcr, jcr->RunScripts, "ClientAfterVSS"); + } +#endif + + if (!jcr->is_canceled()) { + do_restore(jcr); + } + + stop_dir_heartbeat(jcr); + + jcr->setJobStatus(JS_Terminated); + if (jcr->JobStatus != JS_Terminated) { + sd->suppress_error_messages(true); + } + + /** + * Send Close session command to Storage daemon + */ + sd->fsend(read_close, jcr->Ticket); + Dmsg1(100, "filed>stored: %s", sd->msg); + + bget_msg(sd); /* get OK */ + + /* Inform Storage daemon that we are done */ + sd->signal(BNET_TERMINATE); + +#if defined(WIN32_VSS) + /* STOP VSS ON WIN32 */ + /* tell vss to close the restore session */ + Dmsg0(100, "About to call CloseRestore\n"); + if (jcr->Snapshot) { +#if 0 + generate_plugin_event(jcr, bEventVssBeforeCloseRestore); +#endif + Dmsg0(100, "Really about to call CloseRestore\n"); + if (jcr->pVSSClient->CloseRestore()) { + Dmsg0(100, "CloseRestore success\n"); +#if 0 + /* inform user about writer states */ + for (int i=0; i<(int)jcr->pVSSClient->GetWriterCount(); i++) { + int msg_type = M_INFO; + if (jcr->pVSSClient->GetWriterState(i) < 1) { + //msg_type = M_WARNING; + //jcr->JobErrors++; + } + Jmsg(jcr, msg_type, 0, _("VSS Writer (RestoreComplete): %s\n"), + jcr->pVSSClient->GetWriterInfo(i)); + } +#endif + } + else { + Dmsg1(100, "CloseRestore fail - %08x\n", errno); + } + } +#endif + +bail_out: + bfree_and_null(jcr->where); + bfree_and_null(jcr->RegexWhere); + + Dmsg0(100, "Done in job.c\n"); + + if (jcr->multi_restore) { + Dmsg0(100, OKstoreend); + dir->fsend(OKstoreend); + ret = 1; /* we continue the loop, waiting for next part */ + } else { + ret = 0; /* we stop here */ + } + + if (job_canceled(jcr)) { + ret = 0; /* we stop here */ + } + + if (ret == 0) { + end_restore_cmd(jcr); /* stopping so send bEventEndRestoreJob */ + } + +free_mempool: + free_and_null_pool_memory(args); + free_and_null_pool_memory(restore_where); + free_and_null_pool_memory(restore_rwhere); + + return ret; +} + +static int end_restore_cmd(JCR *jcr) +{ + Dmsg0(5, "end_restore_cmd\n"); + if (jcr->JobErrors) { + jcr->setJobStatus(JS_ErrorTerminated); + } + generate_plugin_event(jcr, bEventEndRestoreJob); + return 0; /* return and terminate command loop */ +} + +static int open_sd_read_session(JCR *jcr) +{ + BSOCK *sd = jcr->store_bsock; + + if (!sd) { + Jmsg(jcr, M_FATAL, 0, _("Improper calling sequence.\n")); + return 0; + } + Dmsg4(120, "VolSessId=%ld VolsessT=%ld SF=%ld EF=%ld\n", + jcr->VolSessionId, jcr->VolSessionTime, jcr->StartFile, jcr->EndFile); + Dmsg2(120, "JobId=%d vol=%s\n", jcr->JobId, "DummyVolume"); + /* + * Open Read Session with Storage daemon + */ + sd->fsend(read_open, "DummyVolume", + jcr->VolSessionId, jcr->VolSessionTime, jcr->StartFile, jcr->EndFile, + jcr->StartBlock, jcr->EndBlock); + Dmsg1(110, ">stored: %s", sd->msg); + + /* + * Get ticket number + */ + if (bget_msg(sd) >= 0) { + Dmsg1(110, "filedmsg); + if (sscanf(sd->msg, OK_open, &jcr->Ticket) != 1) { + Jmsg(jcr, M_FATAL, 0, _("Bad response to SD read open: %s\n"), sd->msg); + return 0; + } + Dmsg1(110, "filed: got Ticket=%d\n", jcr->Ticket); + } else { + Jmsg(jcr, M_FATAL, 0, _("Bad response from stored to read open command\n")); + return 0; + } + + /* + * Use interactive session for the current restore + */ + if (jcr->interactive_session) { + sd->fsend(read_ctrl, jcr->Ticket); + Dmsg1(110, ">stored: %s", sd->msg); + } + + /* + * Start read of data with Storage daemon + */ + sd->fsend(read_data, jcr->Ticket); + Dmsg1(110, ">stored: %s", sd->msg); + + /* + * Get OK data + */ + if (!response(jcr, sd, OK_data, "Read Data")) { + return 0; + } + return 1; +} + +/** + * Destroy the Job Control Record and associated + * resources (sockets). + */ +static void filed_free_jcr(JCR *jcr) +{ + if (jcr->dir_bsock) { + free_bsock(jcr->dir_bsock); + jcr->dir_bsock = NULL; + } + if (jcr->sd_calls_client_bsock) { + free_bsock(jcr->sd_calls_client_bsock); + jcr->sd_calls_client_bsock = NULL; + } + if (jcr->store_bsock) { + free_bsock(jcr->store_bsock); + jcr->store_bsock = NULL; + } + if (jcr->last_fname) { + free_pool_memory(jcr->last_fname); + } +#ifdef WIN32_VSS + VSSCleanup(jcr->pVSSClient); +#endif + free_plugins(jcr); /* release instantiated plugins */ + free_runscripts(jcr->RunScripts); + delete jcr->RunScripts; + free_path_list(jcr); + + if (jcr->JobId != 0) { + write_state_file(me->working_directory, "bacula-fd", get_first_port_host_order(me->FDaddrs)); + } + return; +} + +/** + * Get response from Storage daemon to a command we + * sent. Check that the response is OK. + * + * Returns: 0 on failure + * 1 on success + */ +int response(JCR *jcr, BSOCK *sd, char *resp, const char *cmd) +{ + int ret; + + if (sd->errors) { + return 0; + } + if ((ret = bget_msg(sd)) > 0) { + Dmsg0(110, sd->msg); + if (strcmp(sd->msg, resp) == 0) { + return 1; + } + } + if (job_canceled(jcr)) { + return 0; /* if canceled avoid useless error messages */ + } + if (sd->is_error()) { + Jmsg2(jcr, M_FATAL, 0, _("Comm error with SD. bad response to %s. ERR=%s\n"), + cmd, sd->bstrerror()); + } else { + char buf[256]; + if (ret > 0) { + Jmsg4(jcr, M_FATAL, 0, _("Bad response from SD to %s command. Wanted %s, got len=%ld msg=\"%s\"\n"), + cmd, resp, sd->msglen, smartdump(sd->msg, sd->msglen, buf, sizeof(buf))); + } else { + Jmsg3(jcr, M_FATAL, 0, _("Bad response from SD to %s command. Wanted %s, got SIGNAL %s\n"), + cmd, resp, bnet_sig_to_ascii(ret)); + } + } + return 0; +} diff --git a/src/filed/protos.h b/src/filed/protos.h new file mode 100644 index 00000000..8da10a98 --- /dev/null +++ b/src/filed/protos.h @@ -0,0 +1,86 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by Kern Sibbald, MM + */ + +extern bool blast_data_to_storage_daemon(JCR *jcr, char *addr); +extern void do_verify_volume(JCR *jcr); +extern void do_restore(JCR *jcr); +extern int make_estimate(JCR *jcr); + +/* From restore.c */ +bool decompress_data(JCR *jcr, int32_t stream, char **data, uint32_t *length); + +/* From authenticate.c */ +bool authenticate_director(JCR *jcr); +bool authenticate_storagedaemon(JCR *jcr); + +/* From hello.c */ +bool validate_dir_hello(JCR *jcr); +bool send_hello_ok(BSOCK *bs); +bool send_sorry(BSOCK *bs); +bool send_hello_sd(JCR *jcr, char *Job); +void *handle_storage_connection(BSOCK *sd); +BSOCK *connect_director(JCR *jcr, CONSRES *dir); + +/* From verify.c */ +int digest_file(JCR *jcr, FF_PKT *ff_pkt, DIGEST *digest); +void do_verify(JCR *jcr); + +/* From heartbeat.c */ +void start_heartbeat_monitor(JCR *jcr); +void stop_heartbeat_monitor(JCR *jcr); +void start_dir_heartbeat(JCR *jcr); +void stop_dir_heartbeat(JCR *jcr); + +/* from accurate.c */ +bool accurate_finish(JCR *jcr); +bool accurate_check_file(JCR *jcr, FF_PKT *ff_pkt); +bool accurate_mark_file_as_seen(JCR *jcr, char *fname); +void accurate_free(JCR *jcr); +bool accurate_check_file(JCR *jcr, ATTR *attr, char *digest); + +/* from backup.c */ +void strip_path(FF_PKT *ff_pkt); +void unstrip_path(FF_PKT *ff_pkt); + +/* from job.c */ +findINCEXE *new_exclude(JCR *jcr); +findINCEXE *new_preinclude(JCR *jcr); +findINCEXE *get_incexe(JCR *jcr); +void set_incexe(JCR *jcr, findINCEXE *incexe); +void new_options(JCR *jcr, findINCEXE *incexe); +void add_file_to_fileset(JCR *jcr, const char *fname, bool is_file); +int add_options_to_fileset(JCR *jcr, const char *item); +int add_wild_to_fileset(JCR *jcr, const char *item, int type); +int add_regex_to_fileset(JCR *jcr, const char *item, int type); +findINCEXE *new_include(JCR *jcr); + +/* from snapshot.c */ +int snapshot_cmd(JCR *jcr); + +#ifdef HAVE_WIN32 +void VSSCleanup(VSSClient *c); +VSSClient *VSSInit(); +#endif + +/* Definition for encyption cipher/digest type */ +void store_cipher_type(LEX *lc, RES_ITEM *item, int index, int pass); +void store_digest_type(LEX *lc, RES_ITEM *item, int index, int pass); diff --git a/src/filed/restore.c b/src/filed/restore.c new file mode 100644 index 00000000..7f4c212d --- /dev/null +++ b/src/filed/restore.c @@ -0,0 +1,1810 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon restore.c Restorefiles. + * + * Kern Sibbald, November MM + */ + +#include "bacula.h" +#include "filed.h" +#include "ch.h" +#include "restore.h" + +#ifdef HAVE_DARWIN_OS +#include +const bool have_darwin_os = true; +#else +const bool have_darwin_os = false; +#endif + +#if defined(HAVE_CRYPTO) +const bool have_crypto = true; +#else +const bool have_crypto = false; +#endif + +#if defined(HAVE_ACL) +const bool have_acl = true; +#else +const bool have_acl = false; +#endif + +#ifdef HAVE_SHA2 +const bool have_sha2 = true; +#else +const bool have_sha2 = false; +#endif + +#if defined(HAVE_XATTR) +const bool have_xattr = true; +#else +const bool have_xattr = false; +#endif + +/* Data received from Storage Daemon */ +static char rec_header[] = "rechdr %ld %ld %ld %ld %ld"; + +/* Forward referenced functions */ +#if defined(HAVE_LIBZ) +static const char *zlib_strerror(int stat); +const bool have_libz = true; +#else +const bool have_libz = false; +#endif +#ifdef HAVE_LZO +const bool have_lzo = true; +#else +const bool have_lzo = false; +#endif + +static void deallocate_cipher(r_ctx &rctx); +static void deallocate_fork_cipher(r_ctx &rctx); +static bool verify_signature(r_ctx &rctx); +static void free_signature(r_ctx &rctx); +static void free_session(r_ctx &rctx); +static bool close_previous_stream(r_ctx &rctx); +static int32_t extract_data(r_ctx &rctx, POOLMEM *buf, int32_t buflen); +static bool flush_cipher(r_ctx &rctx, BFILE *bfd, uint64_t *addr, int flags, int32_t stream, + RESTORE_CIPHER_CTX *cipher_ctx); + +/* + * Close a bfd check that we are at the expected file offset. + * Makes use of some code from set_attributes(). + */ +static int bclose_chksize(r_ctx &rctx, BFILE *bfd, boffset_t osize) +{ + char ec1[50], ec2[50]; + boffset_t fsize; + JCR *jcr = rctx.jcr; + + fsize = blseek(bfd, 0, SEEK_CUR); + bclose(bfd); /* first close file */ + if (fsize > 0 && fsize != osize) { + Qmsg3(jcr, M_WARNING, 0, _("Size of data or stream of %s not correct. Original %s, restored %s.\n"), + jcr->last_fname, edit_uint64(osize, ec1), + edit_uint64(fsize, ec2)); + return -1; + } + return 0; +} + +#ifdef HAVE_DARWIN_OS +static bool restore_finderinfo(JCR *jcr, POOLMEM *buf, int32_t buflen) +{ + struct attrlist attrList; + + memset(&attrList, 0, sizeof(attrList)); + attrList.bitmapcount = ATTR_BIT_MAP_COUNT; + attrList.commonattr = ATTR_CMN_FNDRINFO; + + Dmsg0(130, "Restoring Finder Info\n"); + jcr->ff->flags |= FO_HFSPLUS; + if (buflen != 32) { + Jmsg(jcr, M_WARNING, 0, _("Invalid length of Finder Info (got %d, wanted 32)\n"), buflen); + return false; + } + + if (setattrlist(jcr->last_fname, &attrList, buf, buflen, 0) != 0) { + Jmsg(jcr, M_WARNING, 0, _("Error setting Finder Info on \"%s\"\n"), jcr->last_fname); + return false; + } + + return true; +} +#else + +static bool restore_finderinfo(JCR *jcr, POOLMEM *buf, int32_t buflen) +{ + return true; +} + +#endif + +/* + * Cleanup of delayed restore stack with streams for later processing. + */ +static void drop_delayed_restore_streams(r_ctx &rctx, bool reuse) +{ + RESTORE_DATA_STREAM *rds; + + if (!rctx.delayed_streams) { + if (reuse) { + rctx.delayed_streams = New(alist(10, owned_by_alist)); + } + return; + } + if (rctx.delayed_streams->empty()) { + return; + } + + foreach_alist(rds, rctx.delayed_streams) { + if (rds->content) { + free(rds->content); + rds->content = NULL; + } + } + rctx.delayed_streams->destroy(); + if (reuse) { + rctx.delayed_streams->init(10, owned_by_alist); + } +} + + +/* + * Push a data stream onto the delayed restore stack for + * later processing. + */ +static inline void push_delayed_restore_stream(r_ctx &rctx, char *msg, int msglen) +{ + RESTORE_DATA_STREAM *rds; + + if (msglen <= 0) { + return; + } + if (!rctx.delayed_streams) { + rctx.delayed_streams = New(alist(10, owned_by_alist)); + } + + rds = (RESTORE_DATA_STREAM *)malloc(sizeof(RESTORE_DATA_STREAM)); + rds->stream = rctx.stream; + rds->content = (char *)malloc(msglen); + memcpy(rds->content, msg, msglen); + rds->content_length = msglen; + rctx.delayed_streams->append(rds); +} + +/* + * Perform a restore of an ACL using the stream received. + * This can either be a delayed restore or direct restore. + */ +static inline bool do_restore_acl(JCR *jcr, int stream, char *content, + uint32_t content_length) +{ +#ifdef HAVE_ACL + if (!jcr->bacl) { + return true; + } + switch (jcr->bacl->restore_acl(jcr, stream, content, content_length)) { + case bRC_BACL_fatal: + return false; + case bRC_BACL_error: + /* + * Non-fatal errors, count them and when the number is under ACL_MAX_ERROR_PRINT_PER_JOB + * print the error message set by the lower level routine in jcr->errmsg. + */ + if (jcr->bacl->get_acl_nr_errors() < ACL_MAX_ERROR_PRINT_PER_JOB) { + Jmsg(jcr, M_WARNING, 0, "%s", jcr->errmsg); + } + break; + default: + break; + } +#endif + return true; +} + +/* + * Perform a restore of an XATTR using the stream received. + * This can either be a delayed restore or direct restore. + */ +static inline bool do_restore_xattr(JCR *jcr, int stream, char *content, + uint32_t content_length) +{ +#ifdef HAVE_XATTR + if (!jcr->bxattr) { + return true; + } + + switch (jcr->bxattr->restore_xattr(jcr, stream, content, content_length)) { + case bRC_BXATTR_fatal: + return false; + case bRC_BXATTR_error: + /* + * Non-fatal errors, count them and when the number is under XATTR_MAX_ERROR_PRINT_PER_JOB + * print the error message set by the lower level routine in jcr->errmsg. + */ + if (jcr->bxattr->get_xattr_nr_errors() < XATTR_MAX_ERROR_PRINT_PER_JOB) { + Jmsg(jcr, M_WARNING, 0, "%s", jcr->errmsg); + } + break; + default: + break; + } +#endif + return true; +} + +/* + * Restore any data streams that are restored after the file + * is fully restored and has its attributes restored. Things + * like acls and xattr are restored after we set the file + * attributes otherwise we might clear some security flags + * by setting the attributes. + */ +static inline bool pop_delayed_data_streams(r_ctx &rctx) +{ + RESTORE_DATA_STREAM *rds; + JCR *jcr = rctx.jcr; + + /* + * See if there is anything todo. + */ + if (!rctx.delayed_streams || + rctx.delayed_streams->empty()) { + return true; + } + + /* + * Only process known delayed data streams here. + * If you start using more delayed data streams + * be sure to add them in this loop and add the + * proper calls here. + * + * Currently we support delayed data stream + * processing for the following type of streams: + * - *_ACL_* + * - *_XATTR_* + */ + foreach_alist(rds, rctx.delayed_streams) { + Dmsg1(0, "Delayed Stream=%d\n", rds->stream); + switch (rds->stream) { + case STREAM_UNIX_ACCESS_ACL: + case STREAM_UNIX_DEFAULT_ACL: + case STREAM_XACL_AIX_TEXT: + case STREAM_XACL_DARWIN_ACCESS: + case STREAM_XACL_FREEBSD_DEFAULT: + case STREAM_XACL_FREEBSD_ACCESS: + case STREAM_XACL_HPUX_ACL_ENTRY: + case STREAM_XACL_IRIX_DEFAULT: + case STREAM_XACL_IRIX_ACCESS: + case STREAM_XACL_LINUX_DEFAULT: + case STREAM_XACL_LINUX_ACCESS: + case STREAM_XACL_TRU64_DEFAULT: + case STREAM_XACL_TRU64_DEFAULT_DIR: + case STREAM_XACL_TRU64_ACCESS: + case STREAM_XACL_SOLARIS_POSIX: + case STREAM_XACL_SOLARIS_NFS4: + case STREAM_XACL_AFS_TEXT: + case STREAM_XACL_AIX_AIXC: + case STREAM_XACL_AIX_NFS4: + case STREAM_XACL_FREEBSD_NFS4: + case STREAM_XACL_HURD_DEFAULT: + case STREAM_XACL_HURD_ACCESS: + case STREAM_XACL_PLUGIN_ACL: + if (!do_restore_acl(jcr, rds->stream, rds->content, rds->content_length)) { + goto get_out; + } + break; + case STREAM_XACL_PLUGIN_XATTR: + case STREAM_XACL_HURD_XATTR: + case STREAM_XACL_IRIX_XATTR: + case STREAM_XACL_TRU64_XATTR: + case STREAM_XACL_AIX_XATTR: + case STREAM_XACL_OPENBSD_XATTR: + case STREAM_XACL_SOLARIS_SYS_XATTR: + case STREAM_XACL_DARWIN_XATTR: + case STREAM_XACL_FREEBSD_XATTR: + case STREAM_XACL_LINUX_XATTR: + case STREAM_XACL_NETBSD_XATTR: + if (!do_restore_xattr(jcr, rds->stream, rds->content, rds->content_length)) { + goto get_out; + } + break; + default: + Jmsg(jcr, M_WARNING, 0, _("Unknown stream=%d ignored. This shouldn't happen!\n"), + rds->stream); + Dmsg2(0, "Unknown stream=%d data=%s\n", rds->stream, rds->content); + break; + } + if (rds->content) { + free(rds->content); + rds->content = NULL; + } + } + + drop_delayed_restore_streams(rctx, true); + return true; + +get_out: + drop_delayed_restore_streams(rctx, true); + return false; +} + + +/* + * Restore the requested files. + */ +void do_restore(JCR *jcr) +{ + BSOCK *sd; + uint32_t VolSessionId, VolSessionTime; + int32_t file_index; + char ec1[50]; /* Buffer printing huge values */ + uint32_t buf_size; /* client buffer size */ + int stat; + int64_t rsrc_len = 0; /* Original length of resource fork */ + r_ctx rctx; + ATTR *attr; + int bget_ret = 0; + /* ***FIXME*** make configurable */ + crypto_digest_t signing_algorithm = have_sha2 ? + CRYPTO_DIGEST_SHA256 : CRYPTO_DIGEST_SHA1; + memset(&rctx, 0, sizeof(rctx)); + rctx.jcr = jcr; + + /* The following variables keep track of "known unknowns" */ + int non_suppored_data = 0; + int non_suppored_attr = 0; + int non_suppored_rsrc = 0; + int non_suppored_finfo = 0; + int non_suppored_acl = 0; + int non_suppored_progname = 0; + int non_suppored_crypto = 0; + int non_suppored_xattr = 0; + + sd = jcr->store_bsock; + jcr->setJobStatus(JS_Running); + + LockRes(); + CLIENT *client = (CLIENT *)GetNextRes(R_CLIENT, NULL); + UnlockRes(); + if (client) { + buf_size = client->max_network_buffer_size; + } else { + buf_size = 0; /* use default */ + } + if (!sd->set_buffer_size(buf_size, BNET_SETBUF_WRITE)) { + jcr->setJobStatus(JS_ErrorTerminated); + return; + } + jcr->buf_size = sd->msglen; + + /* use the same buffer size to decompress both gzip and lzo */ + if (have_libz || have_lzo) { + uint32_t compress_buf_size = jcr->buf_size + 12 + ((jcr->buf_size+999) / 1000) + 100; + jcr->compress_buf = get_memory(compress_buf_size); + jcr->compress_buf_size = compress_buf_size; + } + + GetMsg *fdmsg; + fdmsg = New(GetMsg(jcr, sd, rec_header, GETMSG_MAX_MSG_SIZE)); + + fdmsg->start_read_sock(); + bmessage *bmsg = fdmsg->new_msg(); /* get a message, to exchange with fdmsg */ + +#ifdef HAVE_LZO + if (lzo_init() != LZO_E_OK) { + Jmsg(jcr, M_FATAL, 0, _("LZO init failed\n")); + goto get_out; + } +#endif + + if (have_crypto) { + rctx.cipher_ctx.buf = get_memory(CRYPTO_CIPHER_MAX_BLOCK_SIZE); + if (have_darwin_os) { + rctx.fork_cipher_ctx.buf = get_memory(CRYPTO_CIPHER_MAX_BLOCK_SIZE); + } + } + + /* + * Get a record from the Storage daemon. We are guaranteed to + * receive records in the following order: + * 1. Stream record header + * 2. Stream data (one or more of the following in the order given) + * a. Attributes (Unix or Windows) + * b. Possibly stream encryption session data (e.g., symmetric session key) + * c. File data for the file + * d. Alternate data stream (e.g. Resource Fork) + * e. Finder info + * f. ACLs + * g. XATTRs + * h. Possibly a cryptographic signature + * i. Possibly MD5 or SHA1 record + * 3. Repeat step 1 + * + * NOTE: We keep track of two bacula file descriptors: + * 1. bfd for file data. + * This fd is opened for non empty files when an attribute stream is + * encountered and closed when we find the next attribute stream. + * 2. fork_bfd for alternate data streams + * This fd is opened every time we encounter a new alternate data + * stream for the current file. When we find any other stream, we + * close it again. + * The expected size of the stream, fork_len, should be set when + * opening the fd. + * 3. Not all the stream data records are required -- e.g. if there + * is no fork, there is no alternate data stream, no ACL, ... + */ + binit(&rctx.bfd); + binit(&rctx.forkbfd); + attr = rctx.attr = new_attr(jcr); +#ifdef HAVE_ACL + jcr->bacl = (BACL*)new_bacl(); +#endif +#ifdef HAVE_XATTR + jcr->bxattr = (BXATTR*)new_bxattr(); +#endif + + Dsm_check(200); + while ((bget_ret = fdmsg->bget_msg(&bmsg)) >= 0 && !job_canceled(jcr)) { + time_t now = time(NULL); + if (jcr->last_stat_time == 0) { + jcr->last_stat_time = now; + jcr->stat_interval = 30; /* Default 30 seconds */ + } else if (now >= jcr->last_stat_time + jcr->stat_interval) { + jcr->dir_bsock->fsend("Progress JobId=%ld files=%ld bytes=%lld bps=%ld\n", + jcr->JobId, jcr->JobFiles, jcr->JobBytes, jcr->LastRate); + jcr->last_stat_time = now; + } + + /* Remember previous stream type */ + rctx.prev_stream = rctx.stream; + + /* First we expect a Stream Record Header */ + Dsm_check(200); + if (sscanf(bmsg->rbuf, rec_header, &VolSessionId, &VolSessionTime, &file_index, + &rctx.full_stream, &rctx.size) != 5) { + Jmsg1(jcr, M_FATAL, 0, _("Record header scan error: %s\n"), bmsg->rbuf); + goto get_out; + } + /* Strip off new stream high bits */ + rctx.stream = rctx.full_stream & STREAMMASK_TYPE; + + /* Now we expect the Stream Data */ + if ((bget_ret = fdmsg->bget_msg(&bmsg)) < 0) { + if (bget_ret != BNET_EXT_TERMINATE) { + Jmsg1(jcr, M_FATAL, 0, _("Data record error. ERR=%s\n"), sd->bstrerror()); + } else { + /* The error has been handled somewhere else, just quit */ + } + goto get_out; + } + if (rctx.size != (uint32_t)bmsg->origlen) { + Jmsg2(jcr, M_FATAL, 0, _("Actual data size %d not same as header %d\n"), + bmsg->origlen, rctx.size); + Dmsg2(50, "Actual data size %d not same as header %d\n", + bmsg->origlen, rctx.size); + goto get_out; + } + + /* If we change streams, close and reset alternate data streams */ + if (rctx.prev_stream != rctx.stream) { + if (is_bopen(&rctx.forkbfd)) { + deallocate_fork_cipher(rctx); + bclose_chksize(rctx, &rctx.forkbfd, rctx.fork_size); + } + /* Use an impossible value and set a proper one below */ + rctx.fork_size = -1; + rctx.fork_addr = 0; + } + + /* File Attributes stream */ + switch (rctx.stream) { + case STREAM_UNIX_ATTRIBUTES: + case STREAM_UNIX_ATTRIBUTES_EX: + /* if any previous stream open, close it */ + if (!close_previous_stream(rctx)) { + goto get_out; + } + + /* + * TODO: manage deleted files + */ + if (rctx.type == FT_DELETED) { /* deleted file */ + continue; + } + /* + * Restore objects should be ignored here -- they are + * returned at the beginning of the restore. + */ + if (IS_FT_OBJECT(rctx.type)) { + continue; + } + + /* + * Unpack attributes and do sanity check them + */ + if (!unpack_attributes_record(jcr, rctx.stream, bmsg->rbuf, bmsg->rbuflen, attr)) { + goto get_out; + } + + attr->data_stream = decode_stat(attr->attr, &attr->statp, sizeof(attr->statp), &attr->LinkFI); + + Dmsg5(100, "Stream %d: %s, File %s\nattrib=%s\nattribsEx=%s\n", + attr->data_stream, stream_to_ascii(attr->data_stream), + attr->fname, attr->attr, attr->attrEx); + Dmsg3(100, "=== msglen=%d attrExlen=%d msg=%s\n", bmsg->rbuflen, + strlen(attr->attrEx), bmsg->rbuf); + + if (!is_restore_stream_supported(attr->data_stream)) { + Dmsg2(15, "Non-supported data stream %d: %s\n", + attr->data_stream, stream_to_ascii(attr->data_stream)); + if (!non_suppored_data++) { + Jmsg(jcr, M_WARNING, 0, _("%s stream not supported on this Client.\n"), + stream_to_ascii(attr->data_stream)); + } + continue; + } + + build_attr_output_fnames(jcr, attr); + + /* + * Try to actually create the file, which returns a status telling + * us if we need to extract or not. + */ + jcr->num_files_examined++; + rctx.extract = false; + stat = CF_CORE; /* By default, let Bacula's core handle it */ + + if (jcr->plugin) { + stat = plugin_create_file(jcr, attr, &rctx.bfd, jcr->replace); + } + + if (stat == CF_CORE) { + stat = create_file(jcr, attr, &rctx.bfd, jcr->replace); + } + jcr->lock(); + pm_strcpy(jcr->last_fname, attr->ofname); + jcr->last_type = attr->type; + jcr->unlock(); + Dmsg2(130, "Outfile=%s create_file stat=%d\n", attr->ofname, stat); + switch (stat) { + case CF_ERROR: + case CF_SKIP: + jcr->JobFiles++; + break; + case CF_EXTRACT: /* File created and we expect file data */ + rctx.extract = true; + /* FALLTHROUGH WANTED */ + case CF_CREATED: /* File created, but there is no content */ + /* File created, but there is no content */ + rctx.fileAddr = 0; + print_ls_output(jcr, attr); + + if (have_darwin_os) { + /* Only restore the resource fork for regular files */ + from_base64(&rsrc_len, attr->attrEx); + if (attr->type == FT_REG && rsrc_len > 0) { + rctx.extract = true; + } + + /* + * Do not count the resource forks as regular files being restored. + */ + if (rsrc_len == 0) { + jcr->JobFiles++; + } + } else { + jcr->JobFiles++; + } + + if (!rctx.extract) { + /* set attributes now because file will not be extracted */ + if (jcr->plugin) { + plugin_set_attributes(jcr, attr, &rctx.bfd); + } else { + set_attributes(jcr, attr, &rctx.bfd); + } + } + break; + } + + break; + + /* Data stream */ + case STREAM_ENCRYPTED_SESSION_DATA: + crypto_error_t cryptoerr; + + /* The current file will not be extracted, do not create a crypto session */ + if (!rctx.extract) { + break; + } + + /* Is this an unexpected session data entry? */ + if (rctx.cs) { + Jmsg0(jcr, M_ERROR, 0, _("Unexpected cryptographic session data stream.\n")); + rctx.extract = false; + bclose(&rctx.bfd); + continue; + } + + /* Do we have any keys at all? */ + if (!jcr->crypto.pki_recipients) { + Jmsg(jcr, M_ERROR, 0, _("No private decryption keys have been defined to decrypt encrypted backup data.\n")); + rctx.extract = false; + bclose(&rctx.bfd); + break; + } + + if (jcr->crypto.digest) { + crypto_digest_free(jcr->crypto.digest); + } + jcr->crypto.digest = crypto_digest_new(jcr, signing_algorithm); + if (!jcr->crypto.digest) { + Jmsg0(jcr, M_FATAL, 0, _("Could not create digest.\n")); + rctx.extract = false; + bclose(&rctx.bfd); + break; + } + + /* Decode and save session keys. */ + cryptoerr = crypto_session_decode((uint8_t *)bmsg->rbuf, (uint32_t)bmsg->rbuflen, + jcr->crypto.pki_recipients, &rctx.cs); + switch (cryptoerr) { + case CRYPTO_ERROR_NONE: + /* Success */ + break; + case CRYPTO_ERROR_NORECIPIENT: + Jmsg(jcr, M_ERROR, 0, _("Missing private key required to decrypt encrypted backup data.\n")); + break; + case CRYPTO_ERROR_DECRYPTION: + Jmsg(jcr, M_ERROR, 0, _("Decrypt of the session key failed.\n")); + break; + case CRYPTO_ERROR_NOSIGNER: + Jmsg(jcr, M_ERROR, 0, _("Signer not found. Decryption failed.\n")); + break; + case CRYPTO_ERROR_INVALID_DIGEST: + Jmsg(jcr, M_ERROR, 0, _("Unsupported digest algorithm. Decrypt failed.\n")); + break; + case CRYPTO_ERROR_INVALID_CRYPTO: + Jmsg(jcr, M_ERROR, 0, _("Unsupported encryption algorithm. Decrypt failed.\n")); + break; + default: + /* This shouldn't happen */ + Jmsg2(jcr, M_ERROR, 0, _("An error=%d occurred while decoding encrypted session data stream: ERR=%s\n"), + cryptoerr, crypto_strerror(cryptoerr)); + break; + } + + if (cryptoerr != CRYPTO_ERROR_NONE) { + rctx.extract = false; + bclose(&rctx.bfd); + continue; + } + + break; + + case STREAM_FILE_DATA: + case STREAM_SPARSE_DATA: + case STREAM_WIN32_DATA: + case STREAM_GZIP_DATA: + case STREAM_SPARSE_GZIP_DATA: + case STREAM_WIN32_GZIP_DATA: + case STREAM_COMPRESSED_DATA: + case STREAM_SPARSE_COMPRESSED_DATA: + case STREAM_WIN32_COMPRESSED_DATA: + case STREAM_ENCRYPTED_FILE_DATA: + case STREAM_ENCRYPTED_WIN32_DATA: + case STREAM_ENCRYPTED_FILE_GZIP_DATA: + case STREAM_ENCRYPTED_WIN32_GZIP_DATA: + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: + /* Force an expected, consistent stream type here */ + if (rctx.extract && (rctx.prev_stream == rctx.stream + || rctx.prev_stream == STREAM_UNIX_ATTRIBUTES + || rctx.prev_stream == STREAM_UNIX_ATTRIBUTES_EX + || rctx.prev_stream == STREAM_ENCRYPTED_SESSION_DATA)) { + rctx.flags = 0; + + if (rctx.stream == STREAM_SPARSE_DATA + || rctx.stream == STREAM_SPARSE_COMPRESSED_DATA + || rctx.stream == STREAM_SPARSE_GZIP_DATA) + { + rctx.flags |= FO_SPARSE; + } + + if (rctx.stream == STREAM_GZIP_DATA + || rctx.stream == STREAM_SPARSE_GZIP_DATA + || rctx.stream == STREAM_WIN32_GZIP_DATA + || rctx.stream == STREAM_ENCRYPTED_FILE_GZIP_DATA + || rctx.stream == STREAM_COMPRESSED_DATA + || rctx.stream == STREAM_SPARSE_COMPRESSED_DATA + || rctx.stream == STREAM_WIN32_COMPRESSED_DATA + || rctx.stream == STREAM_ENCRYPTED_FILE_COMPRESSED_DATA + || rctx.stream == STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA + || rctx.stream == STREAM_ENCRYPTED_WIN32_GZIP_DATA) { + rctx.flags |= FO_COMPRESS; + rctx.comp_stream = rctx.stream; + } + + if (rctx.stream == STREAM_ENCRYPTED_FILE_DATA + || rctx.stream == STREAM_ENCRYPTED_FILE_GZIP_DATA + || rctx.stream == STREAM_ENCRYPTED_WIN32_DATA + || rctx.stream == STREAM_ENCRYPTED_FILE_COMPRESSED_DATA + || rctx.stream == STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA + || rctx.stream == STREAM_ENCRYPTED_WIN32_GZIP_DATA) { + /* Set up a decryption context */ + if (!rctx.cipher_ctx.cipher) { + if (!rctx.cs) { + Jmsg1(jcr, M_ERROR, 0, _("Missing encryption session data stream for %s\n"), jcr->last_fname); + rctx.extract = false; + bclose(&rctx.bfd); + continue; + } + + if ((rctx.cipher_ctx.cipher = crypto_cipher_new(rctx.cs, false, + &rctx.cipher_ctx.block_size)) == NULL) { + Jmsg1(jcr, M_ERROR, 0, _("Failed to initialize decryption context for %s\n"), jcr->last_fname); + free_session(rctx); + rctx.extract = false; + bclose(&rctx.bfd); + continue; + } + } + rctx.flags |= FO_ENCRYPT; + } + + if (is_win32_stream(rctx.stream) && + (win32decomp || !have_win32_api())) { + set_portable_backup(&rctx.bfd); + rctx.flags |= FO_WIN32DECOMP; /* "decompose" BackupWrite data */ + } + + if (extract_data(rctx, bmsg->rbuf, bmsg->rbuflen) < 0) { + rctx.extract = false; + bclose(&rctx.bfd); + continue; + } + } + break; + + /* + * Resource fork stream - only recorded after a file to be restored + * Silently ignore if we cannot write - we already reported that + */ + case STREAM_ENCRYPTED_MACOS_FORK_DATA: + case STREAM_MACOS_FORK_DATA: + if (have_darwin_os) { + rctx.fork_flags = 0; + jcr->ff->flags |= FO_HFSPLUS; + + if (rctx.stream == STREAM_ENCRYPTED_MACOS_FORK_DATA) { + rctx.fork_flags |= FO_ENCRYPT; + + /* Set up a decryption context */ + if (rctx.extract && !rctx.fork_cipher_ctx.cipher) { + if (!rctx.cs) { + Jmsg1(jcr, M_ERROR, 0, _("Missing encryption session data stream for %s\n"), jcr->last_fname); + rctx.extract = false; + bclose(&rctx.bfd); + continue; + } + + if ((rctx.fork_cipher_ctx.cipher = crypto_cipher_new(rctx.cs, false, &rctx.fork_cipher_ctx.block_size)) == NULL) { + Jmsg1(jcr, M_ERROR, 0, _("Failed to initialize decryption context for %s\n"), jcr->last_fname); + free_session(rctx); + rctx.extract = false; + bclose(&rctx.bfd); + continue; + } + } + } + + if (rctx.extract) { + if (rctx.prev_stream != rctx.stream) { + if (bopen_rsrc(&rctx.forkbfd, jcr->last_fname, O_WRONLY | O_TRUNC | O_BINARY, 0) < 0) { + Jmsg(jcr, M_WARNING, 0, _("Cannot open resource fork for %s.\n"), jcr->last_fname); + rctx.extract = false; + continue; + } + + rctx.fork_size = rsrc_len; + Dmsg0(130, "Restoring resource fork\n"); + } + + if (extract_data(rctx, bmsg->rbuf, bmsg->rbuflen) < 0) { + rctx.extract = false; + bclose(&rctx.forkbfd); + continue; + } + } + } else { + non_suppored_rsrc++; + } + break; + + case STREAM_HFSPLUS_ATTRIBUTES: + if (have_darwin_os) { + if (!restore_finderinfo(jcr, bmsg->rbuf, bmsg->rbuflen)) { + continue; + } + } else { + non_suppored_finfo++; + } + break; + + case STREAM_UNIX_ACCESS_ACL: + case STREAM_UNIX_DEFAULT_ACL: + case STREAM_XACL_AIX_TEXT: + case STREAM_XACL_DARWIN_ACCESS: + case STREAM_XACL_FREEBSD_DEFAULT: + case STREAM_XACL_FREEBSD_ACCESS: + case STREAM_XACL_HPUX_ACL_ENTRY: + case STREAM_XACL_IRIX_DEFAULT: + case STREAM_XACL_IRIX_ACCESS: + case STREAM_XACL_LINUX_DEFAULT: + case STREAM_XACL_LINUX_ACCESS: + case STREAM_XACL_TRU64_DEFAULT: + case STREAM_XACL_TRU64_DEFAULT_DIR: + case STREAM_XACL_TRU64_ACCESS: + case STREAM_XACL_SOLARIS_POSIX: + case STREAM_XACL_SOLARIS_NFS4: + case STREAM_XACL_AFS_TEXT: + case STREAM_XACL_AIX_AIXC: + case STREAM_XACL_AIX_NFS4: + case STREAM_XACL_FREEBSD_NFS4: + case STREAM_XACL_HURD_DEFAULT: + case STREAM_XACL_HURD_ACCESS: + case STREAM_XACL_PLUGIN_ACL: + /* + * Do not restore ACLs when + * a) The current file is not extracted + * b) and it is not a directory (they are never "extracted") + * c) or the file name is empty + */ + if ((!rctx.extract && + jcr->last_type != FT_DIREND) || + (*jcr->last_fname == 0)) { + break; + } + if (have_acl) { + /* + * For anything that is not a directory we delay + * the restore of acls till a later stage. + */ + if (jcr->last_type != FT_DIREND) { + push_delayed_restore_stream(rctx, bmsg->rbuf, bmsg->rbuflen); + } else { + if (!do_restore_acl(jcr, rctx.stream, bmsg->rbuf, bmsg->rbuflen)) { + goto get_out; + } + } + } else { + non_suppored_acl++; + } + break; + + case STREAM_XACL_PLUGIN_XATTR: + case STREAM_XACL_HURD_XATTR: + case STREAM_XACL_IRIX_XATTR: + case STREAM_XACL_TRU64_XATTR: + case STREAM_XACL_AIX_XATTR: + case STREAM_XACL_OPENBSD_XATTR: + case STREAM_XACL_SOLARIS_SYS_XATTR: + case STREAM_XACL_DARWIN_XATTR: + case STREAM_XACL_FREEBSD_XATTR: + case STREAM_XACL_LINUX_XATTR: + case STREAM_XACL_NETBSD_XATTR: + /* + * Do not restore Extended Attributes when + * a) The current file is not extracted + * b) and it is not a directory (they are never "extracted") + * c) or the file name is empty + */ + if ((!rctx.extract && + jcr->last_type != FT_DIREND) || + (*jcr->last_fname == 0)) { + break; + } + if (have_xattr) { + /* + * For anything that is not a directory we delay + * the restore of xattr till a later stage. + */ + if (jcr->last_type != FT_DIREND) { + push_delayed_restore_stream(rctx, bmsg->rbuf, bmsg->rbuflen); + } else { + if (!do_restore_xattr(jcr, rctx.stream, bmsg->rbuf, bmsg->rbuflen)) { + goto get_out; + } + } + } else { + non_suppored_xattr++; + } + break; + + case STREAM_XACL_SOLARIS_XATTR: + /* + * Do not restore Extended Attributes when + * a) The current file is not extracted + * b) and it is not a directory (they are never "extracted") + * c) or the file name is empty + */ + if ((!rctx.extract && + jcr->last_type != FT_DIREND) || + (*jcr->last_fname == 0)) { + break; + } + if (have_xattr) { + if (!do_restore_xattr(jcr, rctx.stream, bmsg->rbuf, bmsg->rbuflen)) { + goto get_out; + } + } else { + non_suppored_xattr++; + } + break; + + case STREAM_SIGNED_DIGEST: + /* Is this an unexpected signature? */ + if (rctx.sig) { + Jmsg0(jcr, M_ERROR, 0, _("Unexpected cryptographic signature data stream.\n")); + free_signature(rctx); + continue; + } + /* Save signature. */ + if (rctx.extract && (rctx.sig = crypto_sign_decode(jcr, (uint8_t *)bmsg->rbuf, (uint32_t)bmsg->rbuflen)) == NULL) { + Jmsg1(jcr, M_ERROR, 0, _("Failed to decode message signature for %s\n"), jcr->last_fname); + } + break; + + case STREAM_MD5_DIGEST: + case STREAM_SHA1_DIGEST: + case STREAM_SHA256_DIGEST: + case STREAM_SHA512_DIGEST: + break; + + case STREAM_PROGRAM_NAMES: + case STREAM_PROGRAM_DATA: + if (!non_suppored_progname) { + Pmsg0(000, "Got Program Name or Data Stream. Ignored.\n"); + non_suppored_progname++; + } + break; + + case STREAM_PLUGIN_NAME: + if (!close_previous_stream(rctx)) { + goto get_out; + } + Dmsg1(150, "restore stream_plugin_name=%s\n", bmsg->rbuf); + plugin_name_stream(jcr, bmsg->rbuf); + break; + + case STREAM_RESTORE_OBJECT: + break; /* these are sent by Director */ + + default: + if (!close_previous_stream(rctx)) { + goto get_out; + } + Jmsg(jcr, M_WARNING, 0, _("Unknown stream=%d ignored. This shouldn't happen!\n"), + rctx.stream); + Dmsg2(0, "Unknown stream=%d data=%s\n", rctx.stream, bmsg->rbuf); + break; + } /* end switch(stream) */ + + /* Debug code: check if we must hangup or blowup */ + if (handle_hangup_blowup(jcr, jcr->JobFiles, jcr->JobBytes)) { + goto get_out; + } + + Dsm_check(200); + } /* end while bufmsg->bget_msg(&bmsg)) */ + + if (bget_ret == BNET_EXT_TERMINATE) { + goto get_out; + } + /* + * If output file is still open, it was the last one in the + * archive since we just hit an end of file, so close the file. + */ + if (is_bopen(&rctx.forkbfd)) { + bclose_chksize(rctx, &rctx.forkbfd, rctx.fork_size); + } + + if (!close_previous_stream(rctx)) { + goto get_out; + } + jcr->setJobStatus(JS_Terminated); + goto ok_out; + +get_out: + jcr->setJobStatus(JS_ErrorTerminated); + +ok_out: + Dsm_check(200); + fdmsg->wait_read_sock(jcr->is_job_canceled()); + delete bmsg; + free_GetMsg(fdmsg); + Dsm_check(200); + /* + * First output the statistics. + */ + Dmsg2(10, "End Do Restore. Files=%d Bytes=%s\n", jcr->JobFiles, + edit_uint64(jcr->JobBytes, ec1)); + +#ifdef HAVE_ACL + if (jcr->bacl && jcr->bacl->get_acl_nr_errors() > 0) { + Jmsg(jcr, M_WARNING, 0, _("Encountered %ld acl errors while doing restore\n"), jcr->bacl->get_acl_nr_errors()); + } +#endif +#ifdef HAVE_XATTR + if (jcr->bxattr && jcr->bxattr->get_xattr_nr_errors() > 0) { + Jmsg(jcr, M_WARNING, 0, _("Encountered %ld xattr errors while doing restore\n"), jcr->bxattr->get_xattr_nr_errors()); + } +#endif + + if (non_suppored_data > 1 || non_suppored_attr > 1) { + Jmsg(jcr, M_WARNING, 0, _("%d non-supported data streams and %d non-supported attrib streams ignored.\n"), + non_suppored_data, non_suppored_attr); + } + if (non_suppored_rsrc) { + Jmsg(jcr, M_INFO, 0, _("%d non-supported resource fork streams ignored.\n"), non_suppored_rsrc); + } + if (non_suppored_finfo) { + Jmsg(jcr, M_INFO, 0, _("%d non-supported Finder Info streams ignored.\n"), non_suppored_finfo); + } + if (non_suppored_acl) { + Jmsg(jcr, M_INFO, 0, _("%d non-supported acl streams ignored.\n"), non_suppored_acl); + } + if (non_suppored_crypto) { + Jmsg(jcr, M_INFO, 0, _("%d non-supported crypto streams ignored.\n"), non_suppored_acl); + } + if (non_suppored_xattr) { + Jmsg(jcr, M_INFO, 0, _("%d non-supported xattr streams ignored.\n"), non_suppored_xattr); + } + + /* Free Signature & Crypto Data */ + free_signature(rctx); + free_session(rctx); + if (jcr->crypto.digest) { + crypto_digest_free(jcr->crypto.digest); + jcr->crypto.digest = NULL; + } + + /* Free file cipher restore context */ + if (rctx.cipher_ctx.cipher) { + crypto_cipher_free(rctx.cipher_ctx.cipher); + rctx.cipher_ctx.cipher = NULL; + } + + if (rctx.cipher_ctx.buf) { + free_pool_memory(rctx.cipher_ctx.buf); + rctx.cipher_ctx.buf = NULL; + } + + /* Free alternate stream cipher restore context */ + if (rctx.fork_cipher_ctx.cipher) { + crypto_cipher_free(rctx.fork_cipher_ctx.cipher); + rctx.fork_cipher_ctx.cipher = NULL; + } + if (rctx.fork_cipher_ctx.buf) { + free_pool_memory(rctx.fork_cipher_ctx.buf); + rctx.fork_cipher_ctx.buf = NULL; + } + + if (jcr->compress_buf) { + free_pool_memory(jcr->compress_buf); + jcr->compress_buf = NULL; + jcr->compress_buf_size = 0; + } + +#ifdef HAVE_ACL + if (jcr->bacl) { + delete(jcr->bacl); + jcr->bacl = NULL; + } +#endif +#ifdef HAVE_XATTR + if (jcr->bxattr) { + delete(jcr->bxattr); + jcr->bxattr = NULL; + } +#endif + + /* Free the delayed stream stack list. */ + if (rctx.delayed_streams) { + drop_delayed_restore_streams(rctx, false); + delete rctx.delayed_streams; + } + + if (rctx.efs) { + rctx.efs->stop(); + rctx.efs->destroy(); + free(rctx.efs); + rctx.efs = NULL; + } + Dsm_check(200); + bclose(&rctx.forkbfd); + bclose(&rctx.bfd); + free_attr(rctx.attr); +} + +#ifdef HAVE_LIBZ +/* + * Convert ZLIB error code into an ASCII message + */ +static const char *zlib_strerror(int stat) +{ + if (stat >= 0) { + return _("None"); + } + switch (stat) { + case Z_ERRNO: + return _("Zlib errno"); + case Z_STREAM_ERROR: + return _("Zlib stream error"); + case Z_DATA_ERROR: + return _("Zlib data error"); + case Z_MEM_ERROR: + return _("Zlib memory error"); + case Z_BUF_ERROR: + return _("Zlib buffer error"); + case Z_VERSION_ERROR: + return _("Zlib version error"); + default: + return _("*none*"); + } +} +#endif + +static int do_file_digest(JCR *jcr, FF_PKT *ff_pkt, bool top_level) +{ + Dmsg1(50, "do_file_digest jcr=%p\n", jcr); + return (digest_file(jcr, ff_pkt, jcr->crypto.digest)); +} + +bool sparse_data(JCR *jcr, BFILE *bfd, uint64_t *addr, char **data, uint32_t *length, int flags) +{ + unser_declare; + uint64_t faddr; + char ec1[50]; + unser_begin(*data, OFFSET_FADDR_SIZE); + unser_uint64(faddr); + /* We seek only if we have a SPARSE stream, not for OFFSET */ + if ((flags & FO_SPARSE) && *addr != faddr) { + *addr = faddr; + if (blseek(bfd, (boffset_t)*addr, SEEK_SET) < 0) { + berrno be; + Jmsg3(jcr, M_ERROR, 0, _("Seek to %s error on %s: ERR=%s\n"), + edit_uint64(*addr, ec1), jcr->last_fname, + be.bstrerror(bfd->berrno)); + return false; + } + } + *data += OFFSET_FADDR_SIZE; + *length -= OFFSET_FADDR_SIZE; + return true; +} + +bool decompress_data(JCR *jcr, int32_t stream, char **data, uint32_t *length) +{ +#if defined(HAVE_LZO) || defined(HAVE_LIBZ) + char ec1[50]; /* Buffer printing huge values */ +#endif + + Dmsg1(200, "Stream found in decompress_data(): %d\n", stream); + if(stream == STREAM_COMPRESSED_DATA || stream == STREAM_SPARSE_COMPRESSED_DATA || stream == STREAM_WIN32_COMPRESSED_DATA + || stream == STREAM_ENCRYPTED_FILE_COMPRESSED_DATA || stream == STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA) + { + uint32_t comp_magic, comp_len; + uint16_t comp_level, comp_version; +#ifdef HAVE_LZO + lzo_uint compress_len; + const unsigned char *cbuf; + int r, real_compress_len; +#endif + + /* read compress header */ + unser_declare; + unser_begin(*data, sizeof(comp_stream_header)); + unser_uint32(comp_magic); + unser_uint32(comp_len); + unser_uint16(comp_level); + unser_uint16(comp_version); + Dmsg4(200, "Compressed data stream found: magic=0x%x, len=%d, level=%d, ver=0x%x\n", comp_magic, comp_len, + comp_level, comp_version); + + /* version check */ + if (comp_version != COMP_HEAD_VERSION) { + Qmsg(jcr, M_ERROR, 0, _("Compressed header version error. Got=0x%x want=0x%x\n"), comp_version, COMP_HEAD_VERSION); + return false; + } + /* size check */ + if (comp_len + sizeof(comp_stream_header) != *length) { + Qmsg(jcr, M_ERROR, 0, _("Compressed header size error. comp_len=%d, msglen=%d\n"), + comp_len, *length); + return false; + } + switch(comp_magic) { +#ifdef HAVE_LZO + case COMPRESS_LZO1X: + compress_len = jcr->compress_buf_size; + cbuf = (const unsigned char*)*data + sizeof(comp_stream_header); + real_compress_len = *length - sizeof(comp_stream_header); + Dmsg2(200, "Comp_len=%d msglen=%d\n", compress_len, *length); + while ((r=lzo1x_decompress_safe(cbuf, real_compress_len, + (unsigned char *)jcr->compress_buf, &compress_len, NULL)) == LZO_E_OUTPUT_OVERRUN) + { + /* + * The buffer size is too small, try with a bigger one + */ + compress_len = jcr->compress_buf_size = jcr->compress_buf_size + (jcr->compress_buf_size >> 1); + Dmsg2(200, "Comp_len=%d msglen=%d\n", compress_len, *length); + jcr->compress_buf = check_pool_memory_size(jcr->compress_buf, + compress_len); + } + if (r != LZO_E_OK) { + Qmsg(jcr, M_ERROR, 0, _("LZO uncompression error on file %s. ERR=%d\n"), + jcr->last_fname, r); + return false; + } + *data = jcr->compress_buf; + *length = compress_len; + Dmsg2(200, "Write uncompressed %d bytes, total before write=%s\n", compress_len, edit_uint64(jcr->JobBytes, ec1)); + return true; +#endif + default: + Qmsg(jcr, M_ERROR, 0, _("Compression algorithm 0x%x found, but not supported!\n"), comp_magic); + return false; + } + } else { +#ifdef HAVE_LIBZ + uLong compress_len; + int stat; + + /* + * NOTE! We only use uLong and Byte because they are + * needed by the zlib routines, they should not otherwise + * be used in Bacula. + */ + compress_len = jcr->compress_buf_size; + Dmsg2(200, "Comp_len=%d msglen=%d\n", compress_len, *length); + while ((stat=uncompress((Byte *)jcr->compress_buf, &compress_len, + (const Byte *)*data, (uLong)*length)) == Z_BUF_ERROR) + { + /* The buffer size is too small, try with a bigger one. */ + compress_len = jcr->compress_buf_size = jcr->compress_buf_size + (jcr->compress_buf_size >> 1); + Dmsg2(200, "Comp_len=%d msglen=%d\n", compress_len, *length); + jcr->compress_buf = check_pool_memory_size(jcr->compress_buf, + compress_len); + } + if (stat != Z_OK) { + Qmsg(jcr, M_ERROR, 0, _("Uncompression error on file %s. ERR=%s\n"), + jcr->last_fname, zlib_strerror(stat)); + return false; + } + *data = jcr->compress_buf; + *length = compress_len; + Dmsg2(200, "Write uncompressed %d bytes, total before write=%s\n", compress_len, edit_uint64(jcr->JobBytes, ec1)); + return true; +#else + Qmsg(jcr, M_ERROR, 0, _("GZIP data stream found, but GZIP not configured!\n")); + return false; +#endif + } +} + +static void unser_crypto_packet_len(RESTORE_CIPHER_CTX *ctx) +{ + unser_declare; + if (ctx->packet_len == 0 && ctx->buf_len >= CRYPTO_LEN_SIZE) { + unser_begin(&ctx->buf[0], CRYPTO_LEN_SIZE); + unser_uint32(ctx->packet_len); + ctx->packet_len += CRYPTO_LEN_SIZE; + } +} + +static bool store_data(r_ctx &rctx, char *data, const int32_t length, bool win32_decomp) +{ + JCR *jcr = rctx.jcr; + BFILE *bfd = &rctx.bfd; + ssize_t wstat; + + if (jcr->crypto.digest) { + crypto_digest_update(jcr->crypto.digest, (uint8_t *)data, length); + } +#ifdef TEST_WORKER + if (!test_write_efs_data(rctx, data, length)) { + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Write error on %s: ERR=%s\n"), + jcr->last_fname, be.bstrerror(bfd->berrno)); + return false; + } + return true; +#endif + +#ifdef HAVE_WIN32 + if (bfd->fattrs & FILE_ATTRIBUTE_ENCRYPTED) { + if (!p_WriteEncryptedFileRaw) { + Jmsg0(jcr, M_FATAL, 0, _("Windows Encrypted data not supported on this OS.\n")); + return false; + } + if (!win_write_efs_data(rctx, data, length)) { + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Encrypted file write error on %s: ERR=%s\n"), + jcr->last_fname, be.bstrerror(bfd->berrno)); + return false; + } + return true; + } +#endif + if (win32_decomp) { + if (!processWin32BackupAPIBlock(bfd, data, length)) { + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Write error in Win32 Block Decomposition on %s: %s\n"), + jcr->last_fname, be.bstrerror(bfd->berrno)); + return false; + } + } else if ((wstat=bwrite(bfd, data, length)) != (ssize_t)length) { + berrno be; + int type = M_ERROR; + int len = strlen(jcr->last_fname); + /* + * If this is the first write and the "file" is a directory + * or a drive letter, then only issue a warning as we are + * not able to reset the metadata, then continue. + * If the above is true and we have an error code 91 + * (directory not empty), supress the error entirely. + */ + if (bfd->block == 0 && len >= 2 && (jcr->last_fname[len-1] == '/' || + jcr->last_fname[len-1] == ':')) { + type = M_WARNING; + if (bfd->lerror == 91) { /* Directory not empty */ + type = 0; /* suppress error */ + } + } + if (type != 0) { + if (wstat >= 0) { + /* Insufficient bytes written */ + Jmsg4(jcr, type, 0, _("Wrong write size error at byte=%lld block=%d wanted=%d wrote=%d\n"), + bfd->total_bytes, bfd->block, length, wstat); + } else { + /* Error */ + Jmsg6(jcr, type, 0, _("Write error at byte=%lld block=%d write_len=%d lerror=%d on %s: ERR=%s\n"), + bfd->total_bytes, bfd->block, length, bfd->lerror, + jcr->last_fname, be.bstrerror(bfd->berrno)); + } + } + + /* Ignore errors? */ + if (type == M_WARNING || type == 0 || no_win32_write_errors) { + return true; + } + return false; + } + return true; +} + +/* + * In the context of jcr, write data to bfd. + * We write buflen bytes in buf at addr. addr is updated in place. + * The flags specify whether to use sparse files or compression. + * Return value is the number of bytes written, or -1 on errors. + */ +int32_t extract_data(r_ctx &rctx, POOLMEM *buf, int32_t buflen) +{ + JCR *jcr = rctx.jcr; + BFILE *bfd = &rctx.bfd; + int flags = rctx.flags; + int32_t stream = rctx.stream; + RESTORE_CIPHER_CTX *cipher_ctx = &rctx.cipher_ctx; + char *wbuf; /* write buffer */ + uint32_t wsize; /* write size */ + uint32_t rsize; /* read size */ + uint32_t decrypted_len = 0; /* Decryption output length */ + char ec1[50]; /* Buffer printing huge values */ + + rsize = buflen; + jcr->ReadBytes += rsize; + wsize = rsize; + wbuf = buf; + + if (flags & FO_ENCRYPT) { + ASSERT(cipher_ctx->cipher); + + /* + * Grow the crypto buffer, if necessary. + * crypto_cipher_update() will process only whole blocks, + * buffering the remaining input. + */ + cipher_ctx->buf = check_pool_memory_size(cipher_ctx->buf, + cipher_ctx->buf_len + wsize + cipher_ctx->block_size); + + /* Decrypt the input block */ + if (!crypto_cipher_update(cipher_ctx->cipher, + (const u_int8_t *)wbuf, + wsize, + (u_int8_t *)&cipher_ctx->buf[cipher_ctx->buf_len], + &decrypted_len)) { + /* Decryption failed. Shouldn't happen. */ + Jmsg(jcr, M_FATAL, 0, _("Decryption error\n")); + goto get_out; + } + + if (decrypted_len == 0) { + /* No full block of encrypted data available, write more data */ + return 0; + } + + Dmsg2(200, "decrypted len=%d encrypted len=%d\n", decrypted_len, wsize); + + cipher_ctx->buf_len += decrypted_len; + wbuf = cipher_ctx->buf; + + /* If one full preserved block is available, write it to disk, + * and then buffer any remaining data. This should be effecient + * as long as Bacula's block size is not significantly smaller than the + * encryption block size (extremely unlikely!) + */ + unser_crypto_packet_len(cipher_ctx); + Dmsg1(500, "Crypto unser block size=%d\n", cipher_ctx->packet_len - CRYPTO_LEN_SIZE); + + if (cipher_ctx->packet_len == 0 || cipher_ctx->buf_len < cipher_ctx->packet_len) { + /* No full preserved block is available. */ + return 0; + } + + /* We have one full block, set up the filter input buffers */ + wsize = cipher_ctx->packet_len - CRYPTO_LEN_SIZE; + wbuf = &wbuf[CRYPTO_LEN_SIZE]; /* Skip the block length header */ + cipher_ctx->buf_len -= cipher_ctx->packet_len; + Dmsg2(130, "Encryption writing full block, %u bytes, remaining %u bytes in buffer\n", wsize, cipher_ctx->buf_len); + } + + if ((flags & FO_SPARSE) || (flags & FO_OFFSETS)) { + if (!sparse_data(jcr, bfd, &rctx.fileAddr, &wbuf, &wsize, flags)) { + goto get_out; + } + } + + if (flags & FO_COMPRESS) { + if (!decompress_data(jcr, stream, &wbuf, &wsize)) { + goto get_out; + } + } + + if (!store_data(rctx, wbuf, wsize, (flags & FO_WIN32DECOMP) != 0)) { + goto get_out; + } + jcr->JobBytes += wsize; + rctx.fileAddr += wsize; + Dmsg2(130, "Write %u bytes, JobBytes=%s\n", wsize, edit_uint64(jcr->JobBytes, ec1)); + + /* Clean up crypto buffers */ + if (flags & FO_ENCRYPT) { + /* Move any remaining data to start of buffer */ + if (cipher_ctx->buf_len > 0) { + Dmsg1(130, "Moving %u buffered bytes to start of buffer\n", cipher_ctx->buf_len); + memmove(cipher_ctx->buf, &cipher_ctx->buf[cipher_ctx->packet_len], + cipher_ctx->buf_len); + } + /* The packet was successfully written, reset the length so that + * the next packet length may be re-read by unser_crypto_packet_len() */ + cipher_ctx->packet_len = 0; + } + return wsize; + +get_out: + return -1; +} + +/* + * If extracting, close any previous stream + */ +static bool close_previous_stream(r_ctx &rctx) +{ + bool rtn = true; + + /* + * If extracting, it was from previous stream, so + * close the output file and validate the signature. + */ + if (rctx.extract) { + if (rctx.size > 0 && !is_bopen(&rctx.bfd)) { + Jmsg0(rctx.jcr, M_ERROR, 0, _("Logic error: output file should be open\n")); + Pmsg2(000, "=== logic error size=%d bopen=%d\n", rctx.size, + is_bopen(&rctx.bfd)); + } + + if (rctx.prev_stream != STREAM_ENCRYPTED_SESSION_DATA) { + deallocate_cipher(rctx); + deallocate_fork_cipher(rctx); + } + + if (rctx.efs) { + rctx.efs->finish_work(); + bclose(&rctx.bfd); + rctx.count = 0; + } + + if (rctx.jcr->plugin) { + plugin_set_attributes(rctx.jcr, rctx.attr, &rctx.bfd); + } else { + set_attributes(rctx.jcr, rctx.attr, &rctx.bfd); + } + rctx.extract = false; + + /* Now perform the delayed restore of some specific data streams. */ + rtn = pop_delayed_data_streams(rctx); + + /* Verify the cryptographic signature, if any */ + rctx.type = rctx.attr->type; + verify_signature(rctx); + + /* Free Signature */ + free_signature(rctx); + free_session(rctx); + rctx.jcr->ff->flags = 0; + Dmsg0(130, "Stop extracting.\n"); + } else if (is_bopen(&rctx.bfd)) { + Jmsg0(rctx.jcr, M_ERROR, 0, _("Logic error: output file should not be open\n")); + Pmsg0(000, "=== logic error !open\n"); + bclose(&rctx.bfd); + } + + return rtn; +} + +/* + * In the context of jcr, flush any remaining data from the cipher context, + * writing it to bfd. + * Return value is true on success, false on failure. + */ +bool flush_cipher(r_ctx &rctx, BFILE *bfd, uint64_t *addr, int flags, int32_t stream, + RESTORE_CIPHER_CTX *cipher_ctx) +{ + JCR *jcr = rctx.jcr; + uint32_t decrypted_len = 0; + char *wbuf; /* write buffer */ + uint32_t wsize; /* write size */ + char ec1[50]; /* Buffer printing huge values */ + bool second_pass = false; + +again: + /* Write out the remaining block and free the cipher context */ + cipher_ctx->buf = check_pool_memory_size(cipher_ctx->buf, + cipher_ctx->buf_len + cipher_ctx->block_size); + + if (!crypto_cipher_finalize(cipher_ctx->cipher, (uint8_t *)&cipher_ctx->buf[cipher_ctx->buf_len], + &decrypted_len)) { + /* Writing out the final, buffered block failed. Shouldn't happen. */ + Jmsg3(jcr, M_ERROR, 0, _("Decryption error. buf_len=%d decrypt_len=%d on file %s\n"), + cipher_ctx->buf_len, decrypted_len, jcr->last_fname); + } + + Dmsg2(130, "Flush decrypt len=%d buf_len=%d\n", decrypted_len, cipher_ctx->buf_len); + /* If nothing new was decrypted, and our output buffer is empty, return */ + if (decrypted_len == 0 && cipher_ctx->buf_len == 0) { + return true; + } + + cipher_ctx->buf_len += decrypted_len; + + unser_crypto_packet_len(cipher_ctx); + Dmsg1(500, "Crypto unser block size=%d\n", cipher_ctx->packet_len - CRYPTO_LEN_SIZE); + wsize = cipher_ctx->packet_len - CRYPTO_LEN_SIZE; + /* Decrypted, possibly decompressed output here. */ + wbuf = &cipher_ctx->buf[CRYPTO_LEN_SIZE]; /* Skip the block length header */ + cipher_ctx->buf_len -= cipher_ctx->packet_len; + Dmsg2(130, "Encryption writing full block, %u bytes, remaining %u bytes in buffer\n", wsize, cipher_ctx->buf_len); + + if ((flags & FO_SPARSE) || (flags & FO_OFFSETS)) { + if (!sparse_data(jcr, bfd, addr, &wbuf, &wsize, flags)) { + return false; + } + } + + if (flags & FO_COMPRESS) { + if (!decompress_data(jcr, stream, &wbuf, &wsize)) { + return false; + } + } + + Dmsg0(130, "Call store_data\n"); + if (!store_data(rctx, wbuf, wsize, (flags & FO_WIN32DECOMP) != 0)) { + return false; + } + jcr->JobBytes += wsize; + Dmsg2(130, "Flush write %u bytes, JobBytes=%s\n", wsize, edit_uint64(jcr->JobBytes, ec1)); + + /* Move any remaining data to start of buffer. */ + if (cipher_ctx->buf_len > 0) { + Dmsg1(130, "Moving %u buffered bytes to start of buffer\n", cipher_ctx->buf_len); + memmove(cipher_ctx->buf, &cipher_ctx->buf[cipher_ctx->packet_len], + cipher_ctx->buf_len); + } + /* The packet was successfully written, reset the length so that the next + * packet length may be re-read by unser_crypto_packet_len() */ + cipher_ctx->packet_len = 0; + + if (cipher_ctx->buf_len >0 && !second_pass) { + second_pass = true; + goto again; + } + + /* Stop decryption */ + cipher_ctx->buf_len = 0; + cipher_ctx->packet_len = 0; + + return true; +} + +static void deallocate_cipher(r_ctx &rctx) +{ + /* Flush and deallocate previous stream's cipher context */ + if (rctx.cipher_ctx.cipher) { + flush_cipher(rctx, &rctx.bfd, &rctx.fileAddr, rctx.flags, rctx.comp_stream, &rctx.cipher_ctx); + crypto_cipher_free(rctx.cipher_ctx.cipher); + rctx.cipher_ctx.cipher = NULL; + } +} + +static void deallocate_fork_cipher(r_ctx &rctx) +{ + + /* Flush and deallocate previous stream's fork cipher context */ + if (rctx.fork_cipher_ctx.cipher) { + flush_cipher(rctx, &rctx.forkbfd, &rctx.fork_addr, rctx.fork_flags, rctx.comp_stream, &rctx.fork_cipher_ctx); + crypto_cipher_free(rctx.fork_cipher_ctx.cipher); + rctx.fork_cipher_ctx.cipher = NULL; + } +} + +static void free_signature(r_ctx &rctx) +{ + if (rctx.sig) { + crypto_sign_free(rctx.sig); + rctx.sig = NULL; + } +} + +static void free_session(r_ctx &rctx) +{ + if (rctx.cs) { + crypto_session_free(rctx.cs); + rctx.cs = NULL; + } +} + +/* + * Verify the signature for the last restored file + * Return value is either true (signature correct) + * or false (signature could not be verified). + * TODO landonf: Implement without using find_one_file and + * without re-reading the file. + */ +static bool verify_signature(r_ctx &rctx) +{ + JCR *jcr = rctx.jcr; + X509_KEYPAIR *keypair; + DIGEST *digest = NULL; + crypto_error_t err; + uint64_t saved_bytes; + crypto_digest_t signing_algorithm = have_sha2 ? + CRYPTO_DIGEST_SHA256 : CRYPTO_DIGEST_SHA1; + crypto_digest_t algorithm; + SIGNATURE *sig = rctx.sig; + + + if (!jcr->crypto.pki_sign) { + /* no signature OK */ + return true; + } + if (!sig) { + if (rctx.type == FT_REGE || rctx.type == FT_REG || rctx.type == FT_RAW) { + Jmsg1(jcr, M_ERROR, 0, _("Missing cryptographic signature for %s\n"), + jcr->last_fname); + goto get_out; + } + return true; + } + + /* Iterate through the trusted signers */ + foreach_alist(keypair, jcr->crypto.pki_signers) { + err = crypto_sign_get_digest(sig, jcr->crypto.pki_keypair, algorithm, &digest); + switch (err) { + case CRYPTO_ERROR_NONE: + Dmsg0(50, "== Got digest\n"); + /* + * We computed jcr->crypto.digest using signing_algorithm while writing + * the file. If it is not the same as the algorithm used for + * this file, punt by releasing the computed algorithm and + * computing by re-reading the file. + */ + if (algorithm != signing_algorithm) { + if (jcr->crypto.digest) { + crypto_digest_free(jcr->crypto.digest); + jcr->crypto.digest = NULL; + } + } + if (jcr->crypto.digest) { + /* Use digest computed while writing the file to verify + * the signature */ + if ((err = crypto_sign_verify(sig, keypair, jcr->crypto.digest)) != CRYPTO_ERROR_NONE) { + Dmsg1(50, "Bad signature on %s\n", jcr->last_fname); + Jmsg2(jcr, M_ERROR, 0, _("Signature validation failed for file %s: ERR=%s\n"), + jcr->last_fname, crypto_strerror(err)); + goto get_out; + } + } else { + /* Signature found, digest allocated. Old method, + * re-read the file and compute the digest */ + jcr->crypto.digest = digest; + + /* Checksum the entire file + * Make sure we don't modify JobBytes by saving and + * restoring it */ + saved_bytes = jcr->JobBytes; + if (find_one_file(jcr, jcr->ff, do_file_digest, jcr->last_fname, (dev_t)-1, 1) != 0) { + Jmsg(jcr, M_ERROR, 0, _("Digest one file failed for file: %s\n"), + jcr->last_fname); + jcr->JobBytes = saved_bytes; + goto get_out; + } + jcr->JobBytes = saved_bytes; + + /* Verify the signature */ + if ((err = crypto_sign_verify(sig, keypair, digest)) != CRYPTO_ERROR_NONE) { + Dmsg1(50, "Bad signature on %s\n", jcr->last_fname); + Jmsg2(jcr, M_ERROR, 0, _("Signature validation failed for file %s: ERR=%s\n"), + jcr->last_fname, crypto_strerror(err)); + goto get_out; + } + jcr->crypto.digest = NULL; + } + + /* Valid signature */ + Dmsg1(50, "Signature good on %s\n", jcr->last_fname); + crypto_digest_free(digest); + return true; + + case CRYPTO_ERROR_NOSIGNER: + /* Signature not found, try again */ + if (digest) { + crypto_digest_free(digest); + digest = NULL; + } + continue; + default: + /* Something strange happened (that shouldn't happen!)... */ + Qmsg2(jcr, M_ERROR, 0, _("Signature validation failed for %s: %s\n"), jcr->last_fname, crypto_strerror(err)); + goto get_out; + } + } + + /* No signer */ + Dmsg1(50, "Could not find a valid public key for signature on %s\n", jcr->last_fname); + +get_out: + if (digest) { + crypto_digest_free(digest); + } + return false; +} diff --git a/src/filed/restore.h b/src/filed/restore.h new file mode 100644 index 00000000..91aae066 --- /dev/null +++ b/src/filed/restore.h @@ -0,0 +1,73 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef __RESTORE_H +#define __RESTORE_H + +struct RESTORE_DATA_STREAM { + int32_t stream; /* stream less new bits */ + char *content; /* stream data */ + uint32_t content_length; /* stream length */ +}; + +struct RESTORE_CIPHER_CTX { + CIPHER_CONTEXT *cipher; + uint32_t block_size; + + POOLMEM *buf; /* Pointer to descryption buffer */ + int32_t buf_len; /* Count of bytes currently in buf */ + int32_t packet_len; /* Total bytes in packet */ +}; + +struct r_ctx { + JCR *jcr; + int32_t stream; /* stream less new bits */ + int32_t prev_stream; /* previous stream */ + int32_t full_stream; /* full stream including new bits */ + int32_t comp_stream; /* last compressed stream found. needed only to restore encrypted compressed backup */ + BFILE bfd; /* File content */ + uint64_t fileAddr; /* file write address */ + uint32_t size; /* Size of file */ + int flags; /* Options for extract_data() */ + BFILE forkbfd; /* Alternative data stream */ + uint64_t fork_addr; /* Write address for alternative stream */ + int64_t fork_size; /* Size of alternate stream */ + int fork_flags; /* Options for extract_data() */ + int32_t type; /* file type FT_ */ + ATTR *attr; /* Pointer to attributes */ + bool extract; /* set when extracting */ + alist *delayed_streams; /* streams that should be restored as last */ + worker *efs; /* Windows EFS worker thread */ + int32_t count; /* Debug count */ + + SIGNATURE *sig; /* Cryptographic signature (if any) for file */ + CRYPTO_SESSION *cs; /* Cryptographic session data (if any) for file */ + RESTORE_CIPHER_CTX cipher_ctx; /* Cryptographic restore context (if any) for file */ + RESTORE_CIPHER_CTX fork_cipher_ctx; /* Cryptographic restore context (if any) for alternative stream */ +}; + +#endif + +#ifdef TEST_WORKER +bool test_write_efs_data(r_ctx &rctx, char *data, const int32_t length); +#endif + +#ifdef HAVE_WIN32 +bool win_write_efs_data(r_ctx &rctx, char *data, const int32_t length); +#endif diff --git a/src/filed/status.c b/src/filed/status.c new file mode 100644 index 00000000..dae07f50 --- /dev/null +++ b/src/filed/status.c @@ -0,0 +1,519 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon Status routines + * + * Kern Sibbald, August MMI + * + */ + +#include "bacula.h" +#include "filed.h" +#include "lib/status.h" + +extern void *start_heap; + +extern bool GetWindowsVersionString(char *buf, int maxsiz); + + +/* Forward referenced functions */ +static void list_running_jobs(STATUS_PKT *sp); +static void list_status_header(STATUS_PKT *sp); + +/* Static variables */ +static char qstatus1[] = ".status %127s\n"; +static char qstatus2[] = ".status %127s api=%d api_opts=%127s"; + +static char OKqstatus[] = "2000 OK .status\n"; +static char DotStatusJob[] = "JobId=%d JobStatus=%c JobErrors=%d\n"; + +#if defined(HAVE_WIN32) +static int privs = 0; +#endif +#ifdef WIN32_VSS +#include "vss.h" +#define VSS " VSS" +#else +#define VSS "" +#endif + +/* + * General status generator + */ +void output_status(STATUS_PKT *sp) +{ + list_status_header(sp); + list_running_jobs(sp); + list_terminated_jobs(sp); /* defined in lib/status.h */ +} + +#if defined(HAVE_LZO) +static const bool have_lzo = true; +#else +static const bool have_lzo = false; +#endif + + +static void api_list_status_header(STATUS_PKT *sp) +{ + char *p; + char buf[300]; + OutputWriter wt(sp->api_opts); + *buf = 0; + +#if defined(HAVE_WIN32) + if (!GetWindowsVersionString(buf, sizeof(buf))) { + *buf = 0; + } +#endif + + wt.start_group("header"); + wt.get_output( + OT_STRING, "name", my_name, + OT_STRING, "version", VERSION " (" BDATE ")", + OT_STRING, "uname", HOST_OS " " DISTNAME " " DISTVER, + OT_UTIME, "started", daemon_start_time, + OT_INT64, "pid", (int64_t)getpid(), + OT_INT, "jobs_run", num_jobs_run, + OT_INT, "jobs_running",job_count(), + OT_STRING, "winver", buf, + OT_INT64, "debug", debug_level, + OT_INT, "trace", get_trace(), + OT_INT64, "bwlimit", me->max_bandwidth_per_job, + OT_PLUGINS, "plugins", b_plugin_list, + OT_END); + p = wt.end_group(); + sendit(p, strlen(p), sp); +} + +static void list_status_header(STATUS_PKT *sp) +{ + POOL_MEM msg(PM_MESSAGE); + char b1[32], b2[32], b3[32], b4[32], b5[35]; + int64_t memused = (char *)sbrk(0)-(char *)start_heap; + int len; + char dt[MAX_TIME_LENGTH]; + + if (sp->api) { + api_list_status_header(sp); + return; + } + + len = Mmsg(msg, _("%s %sVersion: %s (%s) %s %s %s %s\n"), + my_name, BDEMO, VERSION, BDATE, VSS, HOST_OS, + DISTNAME, DISTVER); + sendit(msg.c_str(), len, sp); + bstrftime_nc(dt, sizeof(dt), daemon_start_time); + len = Mmsg(msg, _("Daemon started %s. Jobs: run=%d running=%d.\n"), + dt, num_jobs_run, job_count()); + sendit(msg.c_str(), len, sp); +#if defined(HAVE_WIN32) + char buf[300]; + if (GetWindowsVersionString(buf, sizeof(buf))) { + len = Mmsg(msg, "%s\n", buf); + sendit(msg.c_str(), len, sp); + } + memused = get_memory_info(buf, sizeof(buf)); + if (debug_level > 0) { + if (!privs) { + privs = enable_backup_privileges(NULL, 1); + } + len = Mmsg(msg, "Priv 0x%x\n", privs); + sendit(msg.c_str(), len, sp); + + /* Display detailed information that we got from get_memory_info() */ + len = Mmsg(msg, "Memory: %s\n", buf); + sendit(msg.c_str(), len, sp); + + len = Mmsg(msg, "APIs=%sOPT,%sATP,%sLPV,%sCFA,%sCFW,\n", + p_OpenProcessToken?"":"!", + p_AdjustTokenPrivileges?"":"!", + p_LookupPrivilegeValue?"":"!", + p_CreateFileA?"":"!", + p_CreateFileW?"":"!"); + sendit(msg.c_str(), len, sp); + len = Mmsg(msg, " %sWUL,%sWMKD,%sGFAA,%sGFAW,%sGFAEA,%sGFAEW,%sSFAA,%sSFAW,%sBR,%sBW,%sSPSP,\n", + p_wunlink?"":"!", + p_wmkdir?"":"!", + p_GetFileAttributesA?"":"!", + p_GetFileAttributesW?"":"!", + p_GetFileAttributesExA?"":"!", + p_GetFileAttributesExW?"":"!", + p_SetFileAttributesA?"":"!", + p_SetFileAttributesW?"":"!", + p_BackupRead?"":"!", + p_BackupWrite?"":"!", + p_SetProcessShutdownParameters?"":"!"); + sendit(msg.c_str(), len, sp); + len = Mmsg(msg, " %sWC2MB,%sMB2WC,%sFFFA,%sFFFW,%sFNFA,%sFNFW,%sSCDA,%sSCDW,\n", + p_WideCharToMultiByte?"":"!", + p_MultiByteToWideChar?"":"!", + p_FindFirstFileA?"":"!", + p_FindFirstFileW?"":"!", + p_FindNextFileA?"":"!", + p_FindNextFileW?"":"!", + p_SetCurrentDirectoryA?"":"!", + p_SetCurrentDirectoryW?"":"!"); + sendit(msg.c_str(), len, sp); + len = Mmsg(msg, " %sGCDA,%sGCDW,%sGVPNW,%sGVNFVMPW,%sLZO,%sEFS\n", + p_GetCurrentDirectoryA?"":"!", + p_GetCurrentDirectoryW?"":"!", + p_GetVolumePathNameW?"":"!", + p_GetVolumeNameForVolumeMountPointW?"":"!", + have_lzo?"":"!", + "!"); + sendit(msg.c_str(), len, sp); + } +#endif + len = Mmsg(msg, _(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"), + edit_uint64_with_commas(memused, b1), + edit_uint64_with_commas(sm_bytes, b2), + edit_uint64_with_commas(sm_max_bytes, b3), + edit_uint64_with_commas(sm_buffers, b4), + edit_uint64_with_commas(sm_max_buffers, b5)); + sendit(msg.c_str(), len, sp); + len = Mmsg(msg, _(" Sizes: boffset_t=%d size_t=%d debug=%s trace=%d " + "mode=%d,%d bwlimit=%skB/s\n"), + sizeof(boffset_t), sizeof(size_t), + edit_uint64(debug_level, b2), get_trace(), (int)DEVELOPER_MODE, 0, + edit_uint64_with_commas(me->max_bandwidth_per_job/1024, b1)); + sendit(msg.c_str(), len, sp); + if (b_plugin_list && b_plugin_list->size() > 0) { + Plugin *plugin; + int len; + pm_strcpy(msg, " Plugin: "); + foreach_alist(plugin, b_plugin_list) { + len = pm_strcat(msg, plugin->file); + /* Print plugin version when debug activated */ + if (debug_level > 0 && plugin->pinfo) { + pInfo *info = (pInfo *)plugin->pinfo; + pm_strcat(msg, "("); + pm_strcat(msg, NPRT(info->plugin_version)); + len = pm_strcat(msg, ")"); + } + if (len > 80) { + pm_strcat(msg, "\n "); + } else { + pm_strcat(msg, " "); + } + } + len = pm_strcat(msg, "\n"); + sendit(msg.c_str(), len, sp); + } +} + +/* + * List running jobs in for humans. + */ +static void list_running_jobs_plain(STATUS_PKT *sp) +{ + int total_sec, inst_sec; + uint64_t total_bps, inst_bps; + POOL_MEM msg(PM_MESSAGE); + char b1[50], b2[50], b3[50], b4[50], b5[50], b6[50]; + int len; + bool found = false; + JCR *njcr; + time_t now = time(NULL); + char dt[MAX_TIME_LENGTH]; + + Dmsg0(1000, "Begin status jcr loop.\n"); + len = Mmsg(msg, _("\nRunning Jobs:\n")); + sendit(msg.c_str(), len, sp); + foreach_jcr(njcr) { + const char *vss = ""; +#ifdef WIN32_VSS + if (njcr->pVSSClient && njcr->pVSSClient->IsInitialized()) { + vss = "VSS "; + } +#endif + bstrftime_nc(dt, sizeof(dt), njcr->start_time); + if (njcr->JobId == 0) { + len = Mmsg(msg, _("Director connected %sat: %s\n"), + (njcr->dir_bsock && njcr->dir_bsock->tls)?_("using TLS "):"", + dt); + } else { + len = Mmsg(msg, _("JobId %d Job %s is running.\n"), + njcr->JobId, njcr->Job); + sendit(msg.c_str(), len, sp); + len = Mmsg(msg, _(" %s%s %s Job started: %s\n"), + vss, job_level_to_str(njcr->getJobLevel()), + job_type_to_str(njcr->getJobType()), dt); + } + sendit(msg.c_str(), len, sp); + if (njcr->JobId == 0) { + continue; + } + if (njcr->last_time == 0) { + njcr->last_time = njcr->start_time; + } + total_sec = now - njcr->start_time; + inst_sec = now - njcr->last_time; + if (total_sec <= 0) { + total_sec = 1; + } + if (inst_sec <= 0) { + inst_sec = 1; + } + /* Instanteous bps not smoothed */ + inst_bps = (njcr->JobBytes - njcr->LastJobBytes) / inst_sec; + if (njcr->LastRate <= 0) { + njcr->LastRate = inst_bps; + } + /* Smooth the instantaneous bps a bit */ + inst_bps = (2 * njcr->LastRate + inst_bps) / 3; + /* total bps (AveBytes/sec) since start of job */ + total_bps = njcr->JobBytes / total_sec; + len = Mmsg(msg, _(" Files=%s Bytes=%s AveBytes/sec=%s LastBytes/sec=%s Errors=%d\n" + " Bwlimit=%s ReadBytes=%s\n"), + edit_uint64_with_commas(njcr->JobFiles, b1), + edit_uint64_with_commas(njcr->JobBytes, b2), + edit_uint64_with_commas(total_bps, b3), + edit_uint64_with_commas(inst_bps, b4), + njcr->JobErrors, edit_uint64_with_commas(njcr->max_bandwidth, b5), + edit_uint64_with_commas(njcr->ReadBytes, b6)); + sendit(msg.c_str(), len, sp); + + if (njcr->is_JobType(JT_RESTORE)) { + if (njcr->ExpectedFiles > 0) { + len = Mmsg(msg, _(" Files: Restored=%s Expected=%s Completed=%d%%\n"), + edit_uint64_with_commas(njcr->num_files_examined, b1), + edit_uint64_with_commas(njcr->ExpectedFiles, b2), + (100*njcr->num_files_examined)/njcr->ExpectedFiles); + + } else { + len = Mmsg(msg, _(" Files: Restored=%s\n"), + edit_uint64_with_commas(njcr->num_files_examined, b1)); + } + } else { + len = Mmsg(msg, _(" Files: Examined=%s Backed up=%s\n"), + edit_uint64_with_commas(njcr->num_files_examined, b1), + edit_uint64_with_commas(njcr->JobFiles, b2)); + } + /* Update only every 10 seconds */ + if (now - njcr->last_time > 10) { + njcr->LastRate = inst_bps; + njcr->LastJobBytes = njcr->JobBytes; + njcr->last_time = now; + } + sendit(msg.c_str(), len, sp); + if (njcr->JobFiles > 0) { + njcr->lock(); + len = Mmsg(msg, _(" Processing file: %s\n"), njcr->last_fname); + njcr->unlock(); + sendit(msg.c_str(), len, sp); + } + + found = true; + if (njcr->store_bsock) { + len = Mmsg(msg, " SDReadSeqNo=%" lld " fd=%d SDtls=%d\n", + njcr->store_bsock->read_seqno, njcr->store_bsock->m_fd, + (njcr->store_bsock->tls)?1:0); + sendit(msg.c_str(), len, sp); + } else { + len = Mmsg(msg, _(" SDSocket closed.\n")); + sendit(msg.c_str(), len, sp); + } + } + endeach_jcr(njcr); + + if (!found) { + len = Mmsg(msg, _("No Jobs running.\n")); + sendit(msg.c_str(), len, sp); + } + sendit(_("====\n"), 5, sp); +} + +/* + * List running jobs for Bat or Bweb in a format + * simpler to parse. Be careful when changing this + * subroutine. + */ +static void list_running_jobs_api(STATUS_PKT *sp) +{ + OutputWriter ow(sp->api_opts); + int sec, bps; + char *p; + JCR *njcr; + + /* API v1, edit with comma, space before the name, sometime ' ' as separator */ + + foreach_jcr(njcr) { + int vss = 0; +#ifdef WIN32_VSS + if (njcr->pVSSClient && njcr->pVSSClient->IsInitialized()) { + vss = 1; + } +#endif + p = ow.get_output(OT_CLEAR, OT_START_OBJ, OT_END); + + if (njcr->JobId == 0) { + int val = (njcr->dir_bsock && njcr->dir_bsock->tls)?1:0; + ow.get_output(OT_UTIME, "DirectorConnected", njcr->start_time, + OT_INT, "DirTLS", val, + OT_END); + } else { + ow.get_output(OT_INT32, "JobId", njcr->JobId, + OT_STRING, "Job", njcr->Job, + OT_INT, "VSS", vss, + OT_JOBLEVEL,"Level", njcr->getJobLevel(), + OT_JOBTYPE, "Type", njcr->getJobType(), + OT_JOBSTATUS, "Status", njcr->getJobStatus(), + OT_UTIME, "StartTime", njcr->start_time, + OT_END); + + } + sendit(p, strlen(p), sp); + if (njcr->JobId == 0) { + continue; + } + sec = time(NULL) - njcr->start_time; + if (sec <= 0) { + sec = 1; + } + bps = (int)(njcr->JobBytes / sec); + ow.get_output(OT_CLEAR, + OT_INT32, "JobFiles", njcr->JobFiles, + OT_SIZE, "JobBytes", njcr->JobBytes, + OT_INT, "Bytes/sec", bps, + OT_INT, "Errors", njcr->JobErrors, + OT_INT64, "Bwlimit", njcr->max_bandwidth, + OT_SIZE, "ReadBytes", njcr->ReadBytes, + OT_END); + + ow.get_output(OT_INT32, "Files Examined", njcr->num_files_examined, OT_END); + + if (njcr->is_JobType(JT_RESTORE) && njcr->ExpectedFiles > 0) { + ow.get_output(OT_INT32, "Expected Files", njcr->ExpectedFiles, + OT_INT32, "Percent Complete", 100*(njcr->num_files_examined/njcr->ExpectedFiles), + OT_END); + } + + sendit(p, strlen(p), sp); + ow.get_output(OT_CLEAR, OT_END); + + if (njcr->JobFiles > 0) { + njcr->lock(); + ow.get_output(OT_STRING, "Processing file", njcr->last_fname, OT_END); + njcr->unlock(); + } + + if (njcr->store_bsock) { + int val = (njcr->store_bsock->tls)?1:0; + ow.get_output(OT_INT64, "SDReadSeqNo", (int64_t)njcr->store_bsock->read_seqno, + OT_INT, "fd", njcr->store_bsock->m_fd, + OT_INT, "SDtls", val, + OT_END); + } else { + ow.get_output(OT_STRING, "SDSocket", "closed", OT_END); + } + ow.get_output(OT_END_OBJ, OT_END); + sendit(p, strlen(p), sp); + } + endeach_jcr(njcr); +} + +static void list_running_jobs(STATUS_PKT *sp) +{ + if (sp->api) { + list_running_jobs_api(sp); + } else { + list_running_jobs_plain(sp); + } +} + +/* + * Status command from Director + */ +int status_cmd(JCR *jcr) +{ + BSOCK *user = jcr->dir_bsock; + STATUS_PKT sp; + + user->fsend("\n"); + sp.bs = user; + sp.api = false; /* no API output */ + output_status(&sp); + + user->signal(BNET_EOD); + return 1; +} + +/* + * .status command from Director + */ +int qstatus_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + POOLMEM *cmd; + JCR *njcr; + s_last_job* job; + STATUS_PKT sp; + + sp.bs = dir; + cmd = get_memory(dir->msglen+1); + + if (sscanf(dir->msg, qstatus2, cmd, &sp.api, sp.api_opts) != 3) { + if (sscanf(dir->msg, qstatus1, cmd) != 1) { + pm_strcpy(&jcr->errmsg, dir->msg); + Jmsg1(jcr, M_FATAL, 0, _("Bad .status command: %s\n"), jcr->errmsg); + dir->fsend(_("2900 Bad .status command, missing argument.\n")); + dir->signal(BNET_EOD); + free_memory(cmd); + return 0; + } + } + unbash_spaces(cmd); + + if (strcasecmp(cmd, "current") == 0) { + dir->fsend(OKqstatus, cmd); + foreach_jcr(njcr) { + if (njcr->JobId != 0) { + dir->fsend(DotStatusJob, njcr->JobId, njcr->JobStatus, njcr->JobErrors); + } + } + endeach_jcr(njcr); + } else if (strcasecmp(cmd, "last") == 0) { + dir->fsend(OKqstatus, cmd); + if ((last_jobs) && (last_jobs->size() > 0)) { + job = (s_last_job*)last_jobs->last(); + dir->fsend(DotStatusJob, job->JobId, job->JobStatus, job->Errors); + } + } else if (strcasecmp(cmd, "header") == 0) { + sp.api = true; + list_status_header(&sp); + } else if (strcasecmp(cmd, "running") == 0) { + sp.api = true; + list_running_jobs(&sp); + } else if (strcasecmp(cmd, "terminated") == 0) { + sp.api = MAX(sp.api, 1); + list_terminated_jobs(&sp); /* defined in lib/status.h */ + } else { + pm_strcpy(&jcr->errmsg, dir->msg); + Jmsg1(jcr, M_FATAL, 0, _("Bad .status command: %s\n"), jcr->errmsg); + dir->fsend(_("2900 Bad .status command, wrong argument.\n")); + dir->signal(BNET_EOD); + free_memory(cmd); + return 0; + } + + dir->signal(BNET_EOD); + free_memory(cmd); + return 1; +} diff --git a/src/filed/verify.c b/src/filed/verify.c new file mode 100644 index 00000000..427f711e --- /dev/null +++ b/src/filed/verify.c @@ -0,0 +1,375 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon verify.c Verify files. + * + * Kern Sibbald, October MM + * + */ + +#include "bacula.h" +#include "filed.h" + +static int verify_file(JCR *jcr, FF_PKT *ff_pkt, bool); +static int read_digest(BFILE *bfd, DIGEST *digest, JCR *jcr); + +/* + * Find all the requested files and send attributes + * to the Director. + * + */ +void do_verify(JCR *jcr) +{ + jcr->setJobStatus(JS_Running); + jcr->buf_size = DEFAULT_NETWORK_BUFFER_SIZE; + if ((jcr->big_buf = (char *) malloc(jcr->buf_size)) == NULL) { + Jmsg1(jcr, M_ABORT, 0, _("Cannot malloc %d network read buffer\n"), + DEFAULT_NETWORK_BUFFER_SIZE); + } + set_find_options((FF_PKT *)jcr->ff, jcr->incremental, jcr->mtime); + Dmsg0(10, "Start find files\n"); + /* Subroutine verify_file() is called for each file */ + find_files(jcr, (FF_PKT *)jcr->ff, verify_file, NULL); + Dmsg0(10, "End find files\n"); + + if (jcr->big_buf) { + free(jcr->big_buf); + jcr->big_buf = NULL; + } + jcr->setJobStatus(JS_Terminated); +} + +/* + * Called here by find() for each file. + * + * Find the file, compute the MD5 or SHA1 and send it back to the Director + */ +static int verify_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level) +{ + char attribs[MAXSTRING]; + char attribsEx[MAXSTRING]; + int digest_stream = STREAM_NONE; + int stat; + DIGEST *digest = NULL; + BSOCK *dir; + + if (job_canceled(jcr)) { + return 0; + } + + dir = jcr->dir_bsock; + jcr->num_files_examined++; /* bump total file count */ + + switch (ff_pkt->type) { + case FT_LNKSAVED: /* Hard linked, file already saved */ + Dmsg2(30, "FT_LNKSAVED saving: %s => %s\n", ff_pkt->fname, ff_pkt->link); + break; + case FT_REGE: + Dmsg1(30, "FT_REGE saving: %s\n", ff_pkt->fname); + break; + case FT_REG: + Dmsg1(30, "FT_REG saving: %s\n", ff_pkt->fname); + break; + case FT_LNK: + Dmsg2(30, "FT_LNK saving: %s -> %s\n", ff_pkt->fname, ff_pkt->link); + break; + case FT_DIRBEGIN: + jcr->num_files_examined--; /* correct file count */ + return 1; /* ignored */ + case FT_REPARSE: + case FT_JUNCTION: + case FT_DIREND: + Dmsg1(30, "FT_DIR saving: %s\n", ff_pkt->fname); + break; + case FT_SPEC: + Dmsg1(30, "FT_SPEC saving: %s\n", ff_pkt->fname); + break; + case FT_RAW: + Dmsg1(30, "FT_RAW saving: %s\n", ff_pkt->fname); + break; + case FT_FIFO: + Dmsg1(30, "FT_FIFO saving: %s\n", ff_pkt->fname); + break; + case FT_NOACCESS: { + berrno be; + be.set_errno(ff_pkt->ff_errno); + Jmsg(jcr, M_NOTSAVED, 1, _(" Could not access %s: ERR=%s\n"), ff_pkt->fname, be.bstrerror()); + jcr->JobErrors++; + return 1; + } + case FT_NOFOLLOW: { + berrno be; + be.set_errno(ff_pkt->ff_errno); + Jmsg(jcr, M_NOTSAVED, 1, _(" Could not follow link %s: ERR=%s\n"), ff_pkt->fname, be.bstrerror()); + jcr->JobErrors++; + return 1; + } + case FT_NOSTAT: { + berrno be; + be.set_errno(ff_pkt->ff_errno); + Jmsg(jcr, M_NOTSAVED, 1, _(" Could not stat %s: ERR=%s\n"), ff_pkt->fname, be.bstrerror()); + jcr->JobErrors++; + return 1; + } + case FT_DIRNOCHG: + case FT_NOCHG: + Jmsg(jcr, M_SKIPPED, 1, _(" Unchanged file skipped: %s\n"), ff_pkt->fname); + return 1; + case FT_ISARCH: + Jmsg(jcr, M_SKIPPED, 1, _(" Archive file skipped: %s\n"), ff_pkt->fname); + return 1; + case FT_NORECURSE: + Jmsg(jcr, M_SKIPPED, 1, _(" Recursion turned off. Directory skipped: %s\n"), ff_pkt->fname); + ff_pkt->type = FT_DIREND; /* directory entry was backed up */ + break; + case FT_NOFSCHG: + Jmsg(jcr, M_SKIPPED, 1, _(" File system change prohibited. Directory skipped: %s\n"), ff_pkt->fname); + return 1; + case FT_PLUGIN_CONFIG: + case FT_RESTORE_FIRST: + return 1; /* silently skip */ + case FT_NOOPEN: { + berrno be; + be.set_errno(ff_pkt->ff_errno); + Jmsg(jcr, M_NOTSAVED, 1, _(" Could not open directory %s: ERR=%s\n"), ff_pkt->fname, be.bstrerror()); + jcr->JobErrors++; + return 1; + } + default: + Jmsg(jcr, M_NOTSAVED, 0, _(" Unknown file type %d: %s\n"), ff_pkt->type, ff_pkt->fname); + jcr->JobErrors++; + return 1; + } + + /* Encode attributes and possibly extend them */ + encode_stat(attribs, &ff_pkt->statp, sizeof(ff_pkt->statp), ff_pkt->LinkFI, 0); + encode_attribsEx(jcr, attribsEx, ff_pkt); + + jcr->lock(); + jcr->JobFiles++; /* increment number of files sent */ + pm_strcpy(jcr->last_fname, ff_pkt->fname); + jcr->unlock(); + + /* + * Send file attributes to Director + * File_index + * Stream + * Verify Options + * Filename (full path) + * Encoded attributes + * Link name (if type==FT_LNK) + * For a directory, link is the same as fname, but with trailing + * slash. For a linked file, link is the link. + */ + /* Send file attributes to Director (note different format than for Storage) */ + Dmsg2(400, "send ATTR inx=%d fname=%s\n", jcr->JobFiles, ff_pkt->fname); + if (ff_pkt->type == FT_LNK || ff_pkt->type == FT_LNKSAVED) { + stat = dir->fsend("%d %d %s %s%c%s%c%s%c", jcr->JobFiles, + STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->fname, + 0, attribs, 0, ff_pkt->link, 0); + } else if (ff_pkt->type == FT_DIREND || ff_pkt->type == FT_REPARSE || + ff_pkt->type == FT_JUNCTION) { + /* Here link is the canonical filename (i.e. with trailing slash) */ + stat = dir->fsend("%d %d %s %s%c%s%c%c", jcr->JobFiles, + STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->link, + 0, attribs, 0, 0); + } else { + stat = dir->fsend("%d %d %s %s%c%s%c%c", jcr->JobFiles, + STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->fname, + 0, attribs, 0, 0); + } + Dmsg2(20, "bfiled>bdird: attribs len=%d: msg=%s\n", dir->msglen, dir->msg); + if (!stat) { + Jmsg(jcr, M_FATAL, 0, _("Network error in send to Director: ERR=%s\n"), dir->bstrerror()); + return 0; + } + + /* + * The remainder of the function is all about getting the checksum. + * First we initialise, then we read files, other streams and Finder Info. + */ + if (ff_pkt->type != FT_LNKSAVED && (S_ISREG(ff_pkt->statp.st_mode) && + ff_pkt->flags & (FO_MD5|FO_SHA1|FO_SHA256|FO_SHA512))) { + /* + * Create our digest context. If this fails, the digest will be set to NULL + * and not used. + */ + if (ff_pkt->flags & FO_MD5) { + digest = crypto_digest_new(jcr, CRYPTO_DIGEST_MD5); + digest_stream = STREAM_MD5_DIGEST; + + } else if (ff_pkt->flags & FO_SHA1) { + digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA1); + digest_stream = STREAM_SHA1_DIGEST; + + } else if (ff_pkt->flags & FO_SHA256) { + digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA256); + digest_stream = STREAM_SHA256_DIGEST; + + } else if (ff_pkt->flags & FO_SHA512) { + digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA512); + digest_stream = STREAM_SHA512_DIGEST; + } + + /* Did digest initialization fail? */ + if (digest_stream != STREAM_NONE && digest == NULL) { + Jmsg(jcr, M_WARNING, 0, _("%s digest initialization failed\n"), + stream_to_ascii(digest_stream)); + } + + /* compute MD5 or SHA1 hash */ + if (digest) { + char md[CRYPTO_DIGEST_MAX_SIZE]; + uint32_t size; + + size = sizeof(md); + + if (digest_file(jcr, ff_pkt, digest) != 0) { + jcr->JobErrors++; + goto good_rtn; + } + + if (crypto_digest_finalize(digest, (uint8_t *)md, &size)) { + char *digest_buf; + const char *digest_name; + + digest_buf = (char *)malloc(BASE64_SIZE(size)); + digest_name = crypto_digest_name(digest); + + bin_to_base64(digest_buf, BASE64_SIZE(size), md, size, true); + Dmsg3(400, "send inx=%d %s=%s\n", jcr->JobFiles, digest_name, digest_buf); + dir->fsend("%d %d %s *%s-%d*", jcr->JobFiles, digest_stream, digest_buf, + digest_name, jcr->JobFiles); + Dmsg3(20, "bfiled>bdird: %s len=%d: msg=%s\n", digest_name, + dir->msglen, dir->msg); + + free(digest_buf); + } + } + } + +good_rtn: + if (digest) { + crypto_digest_free(digest); + } + return 1; +} + +/* + * Compute message digest for the file specified by ff_pkt. + * In case of errors we need the job control record and file name. + */ +int digest_file(JCR *jcr, FF_PKT *ff_pkt, DIGEST *digest) +{ + BFILE bfd; + + Dmsg0(50, "=== digest_file\n"); + binit(&bfd); + + if (ff_pkt->statp.st_size > 0 || ff_pkt->type == FT_RAW + || ff_pkt->type == FT_FIFO) { + int noatime = ff_pkt->flags & FO_NOATIME ? O_NOATIME : 0; + if ((bopen(&bfd, ff_pkt->fname, O_RDONLY | O_BINARY | noatime, 0)) < 0) { + ff_pkt->ff_errno = errno; + berrno be; + be.set_errno(bfd.berrno); + Dmsg2(100, "Cannot open %s: ERR=%s\n", ff_pkt->fname, be.bstrerror()); + Jmsg(jcr, M_ERROR, 1, _(" Cannot open %s: ERR=%s.\n"), + ff_pkt->fname, be.bstrerror()); + return 1; + } + read_digest(&bfd, digest, jcr); + bclose(&bfd); + } + +#ifdef HAVE_DARWIN_OS + /* Open resource fork if necessary */ + if (ff_pkt->flags & FO_HFSPLUS && ff_pkt->hfsinfo.rsrclength > 0) { + if (bopen_rsrc(&bfd, ff_pkt->fname, O_RDONLY | O_BINARY, 0) < 0) { + ff_pkt->ff_errno = errno; + berrno be; + Jmsg(jcr, M_ERROR, -1, _(" Cannot open resource fork for %s: ERR=%s.\n"), + ff_pkt->fname, be.bstrerror()); + if (is_bopen(&ff_pkt->bfd)) { + bclose(&ff_pkt->bfd); + } + return 1; + } + read_digest(&bfd, digest, jcr); + bclose(&bfd); + } + if (digest && ff_pkt->flags & FO_HFSPLUS) { + crypto_digest_update(digest, (uint8_t *)ff_pkt->hfsinfo.fndrinfo, 32); + } +#endif + return 0; +} + +/* + * Read message digest of bfd, updating digest + * In case of errors we need the job control record and file name. + */ +static int read_digest(BFILE *bfd, DIGEST *digest, JCR *jcr) +{ + char buf[DEFAULT_NETWORK_BUFFER_SIZE]; + int64_t n; + int64_t bufsiz = (int64_t)sizeof(buf); + FF_PKT *ff_pkt = (FF_PKT *)jcr->ff; + uint64_t fileAddr = 0; /* file address */ + + + Dmsg0(50, "=== read_digest\n"); + while ((n=bread(bfd, buf, bufsiz)) > 0) { + /* Check for sparse blocks */ + if (ff_pkt->flags & FO_SPARSE) { + bool allZeros = false; + if ((n == bufsiz && + fileAddr+n < (uint64_t)ff_pkt->statp.st_size) || + ((ff_pkt->type == FT_RAW || ff_pkt->type == FT_FIFO) && + (uint64_t)ff_pkt->statp.st_size == 0)) { + allZeros = is_buf_zero(buf, bufsiz); + } + fileAddr += n; /* update file address */ + /* Skip any block of all zeros */ + if (allZeros) { + continue; /* skip block of zeros */ + } + } + + crypto_digest_update(digest, (uint8_t *)buf, n); + + /* Can be used by BaseJobs or with accurate, update only for Verify + * jobs + */ + if (jcr->getJobType() == JT_VERIFY) { + jcr->JobBytes += n; + } + jcr->ReadBytes += n; + } + if (n < 0) { + berrno be; + be.set_errno(bfd->berrno); + Dmsg2(100, "Error reading file %s: ERR=%s\n", jcr->last_fname, be.bstrerror()); + Jmsg(jcr, M_ERROR, 1, _("Error reading file %s: ERR=%s\n"), + jcr->last_fname, be.bstrerror()); + jcr->JobErrors++; + return -1; + } + return 0; +} diff --git a/src/filed/verify_vol.c b/src/filed/verify_vol.c new file mode 100644 index 00000000..5fba40c3 --- /dev/null +++ b/src/filed/verify_vol.c @@ -0,0 +1,584 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon verify-vol.c Verify files on a Volume + * versus attributes in Catalog + * + * Kern Sibbald, July MMII + * + * Data verification added by Eric Bollengier + */ + +#include "bacula.h" +#include "filed.h" +#include "findlib/win32filter.h" + +#if defined(HAVE_LIBZ) +const bool have_libz = true; +#else +const bool have_libz = false; +#endif + +#ifdef HAVE_LZO +const bool have_lzo = true; +#else +const bool have_lzo = false; +#endif + +/* Context used during Verify Data job. We use it in the + * verify loop to compute checksums and check attributes. + */ +class v_ctx { +public: + JCR *jcr; + int32_t stream; /* stream less new bits */ + int32_t prev_stream; /* previous stream */ + int32_t full_stream; /* full stream including new bits */ + int32_t type; /* file type FT_ */ + int64_t size; /* current file size */ + ATTR *attr; /* Pointer to attributes */ + + bool check_size; /* Check or not the size attribute */ + bool check_chksum; /* Check the checksum */ + + crypto_digest_t digesttype; + Win32Filter win32filter; + char digest[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)]; /* current digest */ + + v_ctx(JCR *ajcr) : + jcr(ajcr), stream(0), prev_stream(0), full_stream(0), type(0), size(-1), + attr(new_attr(jcr)), check_size(false), check_chksum(false), + digesttype(CRYPTO_DIGEST_NONE), win32filter() + { + *digest = 0; + scan_fileset(); + }; + ~v_ctx() { + free_attr(attr); + }; + /* Call this function when we change the file + * We check the st_size and we compute the digest + */ + bool close_previous_stream(); + + /* Call when we have a sparse record */ + void skip_sparse_header(char **data, uint32_t *length); + + /* Scan the fileset to know if we want to check checksums or st_size */ + void scan_fileset(); + + /* Check the catalog to locate the file */ + void check_accurate(); + + /* In cleanup, we reset the current file size to -1 */ + void reset_size() { + size = -1; + }; + + /* Used for sparse files */ + void set_size(int64_t val) { + size = MAX(size, val); + }; + + void update_size(int64_t val) { + if (size == -1) { + size = 0; + } + size += val; + }; + + void update_checksum(char *wbuf, int32_t wsize) { + if (wsize > 0 && check_chksum) { + if (!jcr->crypto.digest) { + jcr->crypto.digest = crypto_digest_new(jcr, digesttype); + } + crypto_digest_update(jcr->crypto.digest, (uint8_t *)wbuf, wsize); + } + }; +}; + +/* Data received from Storage Daemon */ +static char rec_header[] = "rechdr %ld %ld %ld %ld %ld"; + +/* Forward referenced functions */ + +/* We don't know in advance which digest mode is needed, we do not + * want to store files on disk either to check afterward. So, we read + * the fileset definition and we try to guess the digest that will be + * used. If the FileSet uses multiple digests, it will not work. + */ +void v_ctx::scan_fileset() +{ + findFILESET *fileset; + + check_size = check_chksum = false; + digesttype = CRYPTO_DIGEST_NONE; + + if (!jcr->ff || !jcr->ff->fileset) { + return; + } + + fileset = jcr->ff->fileset; + + for (int i=0; iinclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); + + for (int j=0; jopts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + check_size = (strchr(fo->VerifyOpts, 's') != NULL); + if ((strchr(fo->VerifyOpts, '1') != NULL) || + (strchr(fo->VerifyOpts, '5') != NULL)) + { + check_chksum = true; + } + + if (fo->flags & FO_MD5) { + digesttype = CRYPTO_DIGEST_MD5; + return; + } + if (fo->flags & FO_SHA1) { + digesttype = CRYPTO_DIGEST_SHA1; + return; + } + if (fo->flags & FO_SHA256) { + digesttype = CRYPTO_DIGEST_SHA256; + return; + } + if (fo->flags & FO_SHA512) { + digesttype = CRYPTO_DIGEST_SHA512; + return; + } + } + } + digesttype = CRYPTO_DIGEST_NONE; + if (check_chksum) { + Jmsg(jcr, M_WARNING, 0, _("Checksum verification required in Verify FileSet option, but no Signature found in the FileSet\n")); + check_chksum = false; + } +} + +/* Compute the file size for sparse records and adjust the data */ +void v_ctx::skip_sparse_header(char **data, uint32_t *length) +{ + unser_declare; + uint64_t faddr; + unser_begin(*data, OFFSET_FADDR_SIZE); + unser_uint64(faddr); + + /* For sparse, we assume that the file is at least big as faddr */ + set_size(faddr); + + *data += OFFSET_FADDR_SIZE; + *length -= OFFSET_FADDR_SIZE; +} + +void v_ctx::check_accurate() +{ + attr->fname = jcr->last_fname; /* struct stat is still valid, but not the fname */ + if (accurate_check_file(jcr, attr, digest)) { + jcr->setJobStatus(JS_Differences); + } +} + +/* + * If extracting, close any previous stream + */ +bool v_ctx::close_previous_stream() +{ + bool rtn = true; + uint8_t buf[CRYPTO_DIGEST_MAX_SIZE]; + uint32_t len = CRYPTO_DIGEST_MAX_SIZE; + char ed1[50], ed2[50]; + + /* Reset the win32 filter that strips header stream out of the file */ + win32filter.init(); + + /* Check the size if possible */ + if (check_size && size >= 0) { + if (attr->type == FT_REG && size != (int64_t)attr->statp.st_size) { + Dmsg1(50, "Size comparison failed for %s\n", jcr->last_fname); + Jmsg(jcr, M_INFO, 0, + _(" st_size differs on \"%s\". Vol: %s File: %s\n"), + jcr->last_fname, + edit_int64(size, ed1), + edit_int64((int64_t)attr->statp.st_size, ed2)); + jcr->setJobStatus(JS_Differences); + } + reset_size(); + } + + /* Compute the digest and store it */ + *digest = 0; + if (jcr->crypto.digest) { + if (!crypto_digest_finalize(jcr->crypto.digest, buf, &len)) { + Dmsg1(50, "Unable to finalize digest for %s\n", jcr->last_fname); + rtn = false; + + } else { + bin_to_base64(digest, sizeof(digest), (char *)buf, len, true); + } + crypto_digest_free(jcr->crypto.digest); + jcr->crypto.digest = NULL; + } + return rtn; +} + +/* + * Verify attributes or data of the requested files on the Volume + * + */ +void do_verify_volume(JCR *jcr) +{ + BSOCK *sd, *dir; + uint32_t size; + uint32_t VolSessionId, VolSessionTime, file_index; + char digest[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)]; + int stat; + int bget_ret = 0; + char *wbuf; /* write buffer */ + uint32_t wsize; /* write size */ + uint32_t rsize; /* read size */ + bool msg_encrypt = false, do_check_accurate=false; + v_ctx vctx(jcr); + ATTR *attr = vctx.attr; + + sd = jcr->store_bsock; + if (!sd) { + Jmsg(jcr, M_FATAL, 0, _("Storage command not issued before Verify.\n")); + jcr->setJobStatus(JS_FatalError); + return; + } + dir = jcr->dir_bsock; + jcr->setJobStatus(JS_Running); + + LockRes(); + CLIENT *client = (CLIENT *)GetNextRes(R_CLIENT, NULL); + UnlockRes(); + uint32_t buf_size; + if (client) { + buf_size = client->max_network_buffer_size; + } else { + buf_size = 0; /* use default */ + } + if (!sd->set_buffer_size(buf_size, BNET_SETBUF_WRITE)) { + jcr->setJobStatus(JS_FatalError); + return; + } + jcr->buf_size = sd->msglen; + + /* use the same buffer size to decompress both gzip and lzo */ + if (have_libz || have_lzo) { + uint32_t compress_buf_size = jcr->buf_size + 12 + ((jcr->buf_size+999) / 1000) + 100; + jcr->compress_buf = get_memory(compress_buf_size); + jcr->compress_buf_size = compress_buf_size; + } + + GetMsg *fdmsg; + fdmsg = New(GetMsg(jcr, sd, rec_header, GETMSG_MAX_MSG_SIZE)); + + fdmsg->start_read_sock(); + bmessage *bmsg = fdmsg->new_msg(); /* get a message, to exchange with fdmsg */ + + /* + * Get a record from the Storage daemon + */ + while ((bget_ret = fdmsg->bget_msg(&bmsg)) >= 0 && !job_canceled(jcr)) { + /* Remember previous stream type */ + vctx.prev_stream = vctx.stream; + + /* + * First we expect a Stream Record Header + */ + if (sscanf(bmsg->rbuf, rec_header, &VolSessionId, &VolSessionTime, &file_index, + &vctx.full_stream, &size) != 5) { + Jmsg1(jcr, M_FATAL, 0, _("Record header scan error: %s\n"), bmsg->rbuf); + goto bail_out; + } + vctx.stream = vctx.full_stream & STREAMMASK_TYPE; + Dmsg4(30, "Got hdr: FilInx=%d FullStream=%d Stream=%d size=%d.\n", + file_index, vctx.full_stream, vctx.stream, size); + + /* + * Now we expect the Stream Data + */ + if ((bget_ret = fdmsg->bget_msg(&bmsg)) < 0) { + if (bget_ret != BNET_EXT_TERMINATE) { + Jmsg1(jcr, M_FATAL, 0, _("Data record error. ERR=%s\n"), sd->bstrerror()); + } else { + /* The error has been handled somewhere else, just quit */ + } + goto bail_out; + } + if (size != ((uint32_t)bmsg->origlen)) { + Jmsg2(jcr, M_FATAL, 0, _("Actual data size %d not same as header %d\n"), bmsg->origlen, size); + goto bail_out; + } + Dmsg2(30, "Got stream data %s, len=%d\n", stream_to_ascii(vctx.stream), bmsg->rbuflen); + + /* File Attributes stream */ + switch (vctx.stream) { + case STREAM_UNIX_ATTRIBUTES: + case STREAM_UNIX_ATTRIBUTES_EX: + Dmsg0(400, "Stream=Unix Attributes.\n"); + if (!vctx.close_previous_stream()) { + goto bail_out; + } + if (do_check_accurate) { + vctx.check_accurate(); + } + /* Next loop, we want to check the file (or we do it with the md5) */ + do_check_accurate = true; + + /* + * Unpack attributes and do sanity check them + */ + if (!unpack_attributes_record(jcr, vctx.stream, + bmsg->rbuf, bmsg->rbuflen, attr)) { + goto bail_out; + } + + attr->data_stream = decode_stat(attr->attr, &attr->statp, + sizeof(attr->statp), &attr->LinkFI); + + jcr->lock(); + jcr->JobFiles++; + jcr->num_files_examined++; + pm_strcpy(jcr->last_fname, attr->fname); /* last file examined */ + jcr->unlock(); + + if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG) { + /* + * Send file attributes to Director + * File_index + * Stream + * Verify Options + * Filename (full path) + * Encoded attributes + * Link name (if type==FT_LNK) + * For a directory, link is the same as fname, but with trailing + * slash. For a linked file, link is the link. + */ + /* Send file attributes to Director */ + Dmsg2(200, "send ATTR inx=%d fname=%s\n", jcr->JobFiles, attr->fname); + if (attr->type == FT_LNK || attr->type == FT_LNKSAVED) { + stat = dir->fsend("%d %d %s %s%c%s%c%s%c", jcr->JobFiles, + STREAM_UNIX_ATTRIBUTES, "pinsug5", attr->fname, + 0, attr->attr, 0, attr->lname, 0); + /* for a deleted record, we set fileindex=0 */ + } else if (attr->type == FT_DELETED) { + stat = dir->fsend("%d %d %s %s%c%s%c%c", 0, + STREAM_UNIX_ATTRIBUTES, "pinsug5", attr->fname, + 0, attr->attr, 0, 0); + } else { + stat = dir->fsend("%d %d %s %s%c%s%c%c", jcr->JobFiles, + STREAM_UNIX_ATTRIBUTES, "pinsug5", attr->fname, + 0, attr->attr, 0, 0); + } + Dmsg2(200, "bfiled>bdird: attribs len=%d: msg=%s\n", dir->msglen, dir->msg); + if (!stat) { + Jmsg(jcr, M_FATAL, 0, _("Network error in send to Director: ERR=%s\n"), dir->bstrerror()); + goto bail_out; + } + } + break; + + /* + * Restore stream object is counted, but not restored here + */ + case STREAM_RESTORE_OBJECT: + jcr->lock(); + jcr->JobFiles++; + jcr->num_files_examined++; + jcr->unlock(); + break; + + default: + break; + } + + const char *digest_code = NULL; + + switch(vctx.stream) { + case STREAM_MD5_DIGEST: + bin_to_base64(digest, sizeof(digest), (char *)bmsg->rbuf, CRYPTO_DIGEST_MD5_SIZE, true); + digest_code = "MD5"; + break; + + case STREAM_SHA1_DIGEST: + bin_to_base64(digest, sizeof(digest), (char *)bmsg->rbuf, CRYPTO_DIGEST_SHA1_SIZE, true); + digest_code = "SHA1"; + break; + + case STREAM_SHA256_DIGEST: + bin_to_base64(digest, sizeof(digest), (char *)bmsg->rbuf, CRYPTO_DIGEST_SHA256_SIZE, true); + digest_code = "SHA256"; + break; + + case STREAM_SHA512_DIGEST: + bin_to_base64(digest, sizeof(digest), (char *)bmsg->rbuf, CRYPTO_DIGEST_SHA512_SIZE, true); + digest_code = "SHA512"; + break; + + default: + *digest = 0; + break; + } + + if (digest_code && jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG) { + dir->fsend("%d %d %s *%s-%d*", jcr->JobFiles, vctx.stream, + digest, digest_code, jcr->JobFiles); + + } else if (jcr->getJobLevel() == L_VERIFY_DATA) { + /* Compare digest */ + if (vctx.check_chksum && *digest) { + /* probably an empty file, we can create an empty crypto session */ + if (!jcr->crypto.digest) { + jcr->crypto.digest = crypto_digest_new(jcr, vctx.digesttype); + } + vctx.close_previous_stream(); + if (strncmp(digest, vctx.digest, + MIN(sizeof(digest), sizeof(vctx.digest))) != 0) + { + Jmsg(jcr, M_INFO, 0, + _(" %s differs on \"%s\". File=%s Vol=%s\n"), + stream_to_ascii(vctx.stream), jcr->last_fname, + vctx.digest, digest); + jcr->setJobStatus(JS_Differences); + Dmsg3(50, "Signature verification failed for %s %s != %s\n", + jcr->last_fname, digest, vctx.digest); + } + if (do_check_accurate) { + vctx.check_accurate(); + do_check_accurate = false; /* Don't do it in the next loop */ + } + } + + /* Compute size and checksum for level=Data */ + switch (vctx.stream) { + case STREAM_ENCRYPTED_FILE_DATA: + case STREAM_ENCRYPTED_WIN32_DATA: + case STREAM_ENCRYPTED_FILE_GZIP_DATA: + case STREAM_ENCRYPTED_WIN32_GZIP_DATA: + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: + if (!msg_encrypt) { + Jmsg(jcr, M_WARNING, 0, + _("Verification of encrypted file data is not supported.\n")); + msg_encrypt = true; + } + break; + + case STREAM_PLUGIN_DATA: + case STREAM_FILE_DATA: + case STREAM_SPARSE_DATA: + case STREAM_WIN32_DATA: + case STREAM_GZIP_DATA: + case STREAM_SPARSE_GZIP_DATA: + case STREAM_WIN32_GZIP_DATA: + case STREAM_COMPRESSED_DATA: + case STREAM_SPARSE_COMPRESSED_DATA: + case STREAM_WIN32_COMPRESSED_DATA: + if (!(attr->type == FT_RAW || attr->type == FT_FIFO || attr->type == FT_REG || attr->type == FT_REGE)) { + break; + } + + wbuf = bmsg->rbuf; + rsize = bmsg->rbuflen; + jcr->ReadBytes += rsize; + wsize = rsize; + + if (vctx.stream == STREAM_SPARSE_DATA + || vctx.stream == STREAM_SPARSE_COMPRESSED_DATA + || vctx.stream == STREAM_SPARSE_GZIP_DATA) { + vctx.skip_sparse_header(&wbuf, &wsize); + } + + if (vctx.stream == STREAM_GZIP_DATA + || vctx.stream == STREAM_SPARSE_GZIP_DATA + || vctx.stream == STREAM_WIN32_GZIP_DATA + || vctx.stream == STREAM_ENCRYPTED_FILE_GZIP_DATA + || vctx.stream == STREAM_COMPRESSED_DATA + || vctx.stream == STREAM_SPARSE_COMPRESSED_DATA + || vctx.stream == STREAM_WIN32_COMPRESSED_DATA + || vctx.stream == STREAM_ENCRYPTED_FILE_COMPRESSED_DATA + || vctx.stream == STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA + || vctx.stream == STREAM_ENCRYPTED_WIN32_GZIP_DATA) { + + if (!decompress_data(jcr, vctx.stream, &wbuf, &wsize)) { + dequeue_messages(jcr); + goto bail_out; + } + } + + vctx.update_checksum(wbuf, wsize); + + if (vctx.stream == STREAM_WIN32_GZIP_DATA + || vctx.stream == STREAM_WIN32_DATA + || vctx.stream == STREAM_WIN32_COMPRESSED_DATA + || vctx.stream == STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA + || vctx.stream == STREAM_ENCRYPTED_WIN32_GZIP_DATA) { + + int64_t wbuf_len = wsize; + int64_t wsize64 = 0; + if (vctx.win32filter.have_data(&wbuf, &wbuf_len, &wsize64)) { + wsize = wsize64; + } + } + jcr->JobBytes += wsize; + vctx.update_size(wsize); + break; + + /* TODO: Handle data to compute checksums */ + /* Ignore everything else */ + default: + break; + } + } /* end switch */ + } /* end while bnet_get */ + if (bget_ret == BNET_EXT_TERMINATE) { + goto bail_out; + } + if (!vctx.close_previous_stream()) { + goto bail_out; + } + /* Check the last file */ + if (do_check_accurate) { + vctx.check_accurate(); + } + if (!accurate_finish(jcr)) { + goto bail_out; + } + jcr->setJobStatus(JS_Terminated); + goto ok_out; + +bail_out: + jcr->setJobStatus(JS_ErrorTerminated); + +ok_out: + fdmsg->wait_read_sock(jcr->is_job_canceled()); + delete bmsg; + free_GetMsg(fdmsg); + if (jcr->compress_buf) { + free_pool_memory(jcr->compress_buf); + jcr->compress_buf = NULL; + } + /* TODO: We probably want to mark the job as failed if we have errors */ + Dmsg2(50, "End Verify-Vol. Files=%d Bytes=%" lld "\n", jcr->JobFiles, + jcr->JobBytes); +} diff --git a/src/filed/win_efs.c b/src/filed/win_efs.c new file mode 100644 index 00000000..b485980c --- /dev/null +++ b/src/filed/win_efs.c @@ -0,0 +1,310 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File Daemon Windows EFS restore + * + * Kern Sibbald, September MMXIV + * + */ + +#include "bacula.h" +#include "filed.h" +#include "ch.h" +#include "restore.h" +#include "backup.h" + +#ifdef TEST_WORKER +/* + * This is the test version of the worker routines, which simulates + * Windows EFS backup on Linux. + * + * This subroutine is called back from the Windows + * WriteEncryptedFileRaw and returns a single buffer of data or + * sets ulLength = 0 to indicate the end. + */ +static uint32_t test_write_efs_data_cb(char *pbData, void *arctx, uint32_t *len) +{ + r_ctx *rctx = (r_ctx *)arctx; + worker *wrk = (worker *)rctx->efs; + char *head, *buf; + uint32_t data_len; + int32_t count; + + head = (char *)wrk->dequeue(); /* dequeue buffer to write */ + Dmsg1(200, "dequeue buffer. head=%p\n", head); + if (!head) { + *len = 0; + Dmsg0(200, "cb got NULL.\n"); + } else { + data_len = *(int32_t *)head; + Dmsg1(200, "data_len=%d\n", data_len); + if (data_len == 0) { + Dmsg0(200, "Length is zero.\n"); + wrk->push_free_buffer(head); + return ERROR_BUFFER_OVERFLOW; + } + if (data_len > *len) { + Dmsg2(200, "Restore data %ld bytes too long for Microsoft buffer %ld bytes.\n", + data_len, *len); + *len = 0; + errno = b_errno_win32; + wrk->push_free_buffer(head); + return ERROR_BUFFER_OVERFLOW; + } else { + buf = head + sizeof(uint32_t); /* Point to buffer */ + count = *(int32_t *)buf; + buf += sizeof(int32_t); + memcpy(pbData, buf, data_len); + *len = data_len; + Dmsg2(200, "Got count=%d len=%d\n", count, data_len); + wrk->push_free_buffer(head); + } + } + return ERROR_SUCCESS; +} + +/* + * Thread created to run the WriteEncryptedFileRaw code + */ +static void *test_efs_write_thread(void *awrk) +{ + ssize_t wstat; + worker *wrk = (worker *)awrk; + r_ctx *rctx; + uint32_t len; + uint32_t size = 100000; + char *buf = (char *)malloc(size); /* allocate working buffer */ + + rctx = (r_ctx *)wrk->get_ctx(); + Dmsg2(200, "rctx=%p wrk=%p\n", rctx, wrk); + wrk->set_running(); + + while (!wrk->is_quit_state()) { + if (wrk->is_wait_state()) { /* wait if so requested */ + Dmsg0(200, "Enter wait state\n"); + wrk->wait(); + Dmsg0(200, "Leave wait state\n"); + continue; + } + len = size; + if (test_write_efs_data_cb(buf, rctx, &len) != 0) { /* get a buffer */ + berrno be; + Qmsg2(rctx->jcr, M_FATAL, 0, _("Restore data %ld bytes too long for Microsoft buffer %ld bytes.\n"), + len, size); + break; + } + if (len == 0) { /* done? */ + Dmsg0(200, "Got len 0 set_wait_state.\n"); + continue; /* yes */ + } + Dmsg2(200, "Write buf=%p len=%d\n", buf, len); + if ((wstat=bwrite(&rctx->bfd, buf, len)) != (ssize_t)len) { + Dmsg4(000, "bwrite of %d error %d open=%d on file=%s\n", + len, wstat, is_bopen(&rctx->bfd), rctx->jcr->last_fname); + continue; + } + } + Dmsg0(200, "worker thread quiting\n"); + free(buf); + return NULL; +} + +/* + * If the writer thread is not created, create it, then queue + * a buffer to be written by the thread. + */ +bool test_write_efs_data(r_ctx &rctx, char *data, const int32_t length) +{ + POOLMEM *buf, *head; + + if (!rctx.efs) { + rctx.efs = New(worker(10)); + Dmsg2(200, "Start test_efs_write_thread rctx=%p work=%p\n", &rctx, rctx.efs); + rctx.efs->start(test_efs_write_thread, &rctx); + } + head = (POOLMEM *)rctx.efs->pop_free_buffer(); + if (!head) { + head = get_memory(length + 2*sizeof(int32_t)+1); + } else { + head = check_pool_memory_size(head, length+2*sizeof(int32_t)+1); + } + buf = head; + *(int32_t *)buf = length; + buf += sizeof(int32_t); + *(int32_t *)buf = ++rctx.count; + buf += sizeof(int32_t); + memcpy(buf, data, length); + Dmsg3(200, "Put count=%d len=%d head=%p\n", rctx.count, length, head); + rctx.efs->queue(head); + rctx.efs->set_run_state(); + return true; +} +#endif + + +#ifdef HAVE_WIN32 + +/* ============================================================= + * + * Win EFS functions for restore + * + * ============================================================= + */ + +/* + * This subroutine is called back from the Windows + * WriteEncryptedFileRaw. + */ +static DWORD WINAPI write_efs_data_cb(PBYTE pbData, PVOID arctx, PULONG ulLength) +{ + r_ctx *rctx = (r_ctx *)arctx; + worker *wrk = (worker *)rctx->efs; + char *data; + char *buf; + uint32_t data_len; + JCR *jcr = rctx->jcr; + + data = (char *)rctx->efs->dequeue(); /* dequeue buffer to write */ + Dmsg1(200, "dequeue buffer. head=%p\n", data); + if (jcr->is_job_canceled()) { + return ERROR_CANCELLED; + } + if (!data) { + *ulLength = 0; + Dmsg0(200, "cb got NULL.\n"); + } else { + data_len = *(int32_t *)data; + if (data_len > *ulLength) { + Qmsg2(rctx->jcr, M_FATAL, 0, _("Restore data %ld bytes too long for Microsoft buffer %lld bytes.\n"), + data_len, *ulLength); + *ulLength = 0; + } else { + buf = data + sizeof(uint32_t); + memcpy(pbData, buf, data_len); + *ulLength = (ULONG)data_len; + Dmsg1(200, "Got len=%d\n", data_len); + } + wrk->push_free_buffer(data); + } + return ERROR_SUCCESS; +} + +/* + * Thread created to run the WriteEncryptedFileRaw code + */ +static void *efs_write_thread(void *awrk) +{ + worker *wrk = (worker *)awrk; + r_ctx *rctx; + + rctx = (r_ctx *)wrk->get_ctx(); + wrk->set_running(); + + while (!wrk->is_quit_state() && !rctx->jcr->is_job_canceled()) { + if (wrk->is_wait_state()) { /* wait if so requested */ + Dmsg0(200, "Enter wait state\n"); + wrk->wait(); + Dmsg0(200, "Leave wait state\n"); + continue; + } + if (p_WriteEncryptedFileRaw((PFE_IMPORT_FUNC)write_efs_data_cb, rctx, + rctx->bfd.pvContext)) { + berrno be; + Qmsg1(rctx->jcr, M_FATAL, 0, _("WriteEncryptedFileRaw failure: ERR=%s\n"), + be.bstrerror(b_errno_win32)); + return NULL; + } + Dmsg0(200, "Got return from WriteEncryptedFileRaw\n"); + } + return NULL; +} + +/* + * Called here from Bacula to write a block to a Windows EFS file. + * Since the Windows WriteEncryptedFileRaw function uses a callback + * subroutine to get the blocks to write, we create a writer thread, + * and queue the blocks (buffers) we get in this routine. That + * writer thread then hangs on the WriteEncryptedRaw file, calling + * back to the callback subroutine which then dequeues the blocks + * we have queued. + * + * If the writer thread is not created, create it, then queue + * a buffer to be written by the thread. + */ +bool win_write_efs_data(r_ctx &rctx, char *data, const int32_t length) +{ + POOLMEM *buf; + + if (!rctx.efs) { + rctx.efs = New(worker(10)); + rctx.efs->start(efs_write_thread, &rctx); + } + buf = (POOLMEM *)rctx.efs->pop_free_buffer(); + if (!buf) { + buf = get_memory(length + sizeof(int32_t)+1); + } else { + buf = check_pool_memory_size(buf, length+sizeof(int32_t)+1); + } + *(int32_t *)buf = length; + memcpy(buf+sizeof(int32_t), data, length); + Dmsg2(200, "Put len=%d head=%p\n", length, buf); + rctx.efs->queue(buf); + rctx.efs->set_run_state(); + return true; +} + +/* + * The ReadEncryptedFileRaw from bacula.c calls us back here + */ +DWORD WINAPI read_efs_data_cb(PBYTE pbData, PVOID pvCallbackContext, ULONG ulLength) +{ + bctx_t *ctx = (bctx_t *)pvCallbackContext; /* get our context */ + BSOCK *sd = ctx->jcr->store_bsock; + ULONG ulSent = 0; + + if (ctx->jcr->is_job_canceled()) { + return ERROR_CANCELLED; + } + if (ulLength == 0) { + Dmsg0(200, "ulLen=0 => done.\n"); + return ERROR_SUCCESS; /* all done */ + } + while (ulLength > 0) { + /* Get appropriate block length */ + if (ulLength <= (ULONG)ctx->rsize) { + sd->msglen = ulLength; + } else { + sd->msglen = (ULONG)ctx->rsize; + } + Dmsg5(200, "ctx->rbuf=%p msg=%p msgbuflen=%d ulSent=%d len=%d\n", + ctx->rbuf, sd->msg, ctx->rsize, ulSent, sd->msglen); + /* Copy data into Bacula buffer */ + memcpy(ctx->rbuf, pbData + ulSent, sd->msglen); + /* Update sent count and remaining count */ + ulSent += sd->msglen; + ulLength -= sd->msglen; + /* Send the data off to the SD */ + if (!process_and_send_data(*ctx)) { + return ERROR_UNEXP_NET_ERR; + } + } + return ERROR_SUCCESS; +} + +#endif diff --git a/src/fileopts.h b/src/fileopts.h new file mode 100644 index 00000000..7850add6 --- /dev/null +++ b/src/fileopts.h @@ -0,0 +1,66 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * File types + * + * Kern Sibbald MMI + * + * Extracted from findlib/find.h Nov 2010 + */ + +#ifndef __BFILEOPTS_H +#define __BFILEOPTS_H + +/* + * Options saved int "options" of the include/exclude lists. + * They are directly jammed ito "flag" of ff packet + */ +#define FO_PORTABLE_DATA (1<<0) /* Data is portable (Not used) */ +#define FO_MD5 (1<<1) /* Do MD5 checksum */ +#define FO_COMPRESS (1<<2) /* Do compression */ +#define FO_NO_RECURSION (1<<3) /* no recursion in directories */ +#define FO_MULTIFS (1<<4) /* multiple file systems */ +#define FO_SPARSE (1<<5) /* do sparse file checking */ +#define FO_IF_NEWER (1<<6) /* replace if newer */ +#define FO_NOREPLACE (1<<7) /* never replace */ +#define FO_READFIFO (1<<8) /* read data from fifo */ +#define FO_SHA1 (1<<9) /* Do SHA1 checksum */ +#define FO_PORTABLE (1<<10) /* Use portable data format -- no BackupWrite */ +#define FO_MTIMEONLY (1<<11) /* Use mtime rather than mtime & ctime */ +#define FO_KEEPATIME (1<<12) /* Reset access time */ +#define FO_EXCLUDE (1<<13) /* Exclude file */ +#define FO_ACL (1<<14) /* Backup ACLs */ +#define FO_NO_HARDLINK (1<<15) /* don't handle hard links */ +#define FO_IGNORECASE (1<<16) /* Ignore file name case */ +#define FO_HFSPLUS (1<<17) /* Resource forks and Finder Info */ +#define FO_WIN32DECOMP (1<<18) /* Use BackupRead decomposition */ +#define FO_SHA256 (1<<19) /* Do SHA256 checksum */ +#define FO_SHA512 (1<<20) /* Do SHA512 checksum */ +#define FO_ENCRYPT (1<<21) /* Encrypt data stream */ +#define FO_NOATIME (1<<22) /* Use O_NOATIME to prevent atime change */ +#define FO_ENHANCEDWILD (1<<23) /* Enhanced wild card processing */ +#define FO_CHKCHANGES (1<<24) /* Check if file have been modified during backup */ +#define FO_STRIPPATH (1<<25) /* Check for stripping path */ +#define FO_HONOR_NODUMP (1<<26) /* honor NODUMP flag */ +#define FO_XATTR (1<<27) /* Backup Extended Attributes */ +#define FO_DELTA (1<<28) /* Delta data -- i.e. all copies returned on restore */ +#define FO_PLUGIN (1<<29) /* Plugin data stream -- return to plugin on restore */ +#define FO_OFFSETS (1<<30) /* Keep I/O file offsets */ + +#endif /* __BFILEOPTSS_H */ diff --git a/src/filetypes.h b/src/filetypes.h new file mode 100644 index 00000000..bb928176 --- /dev/null +++ b/src/filetypes.h @@ -0,0 +1,78 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/** + * Stream definitions. Split from baconfig.h Nov 2010 + * + * Kern Sibbald, MM + */ + +#ifndef __BFILETYPES_H +#define __BFILETYPES_H 1 + + +/** + * File type (Bacula defined). + * NOTE!!! These are saved in the Attributes record on the tape, so + * do not change them. If need be, add to them. + * + * This is stored as 32 bits on the Volume, but only FT_MASK (16) bits are + * used for the file type. The upper bits are used to indicate + * additional optional fields in the attribute record. + */ +#define FT_MASK 0xFFFF /* Bits used by FT (type) */ +#define FT_LNKSAVED 1 /* hard link to file already saved */ +#define FT_REGE 2 /* Regular file but empty */ +#define FT_REG 3 /* Regular file */ +#define FT_LNK 4 /* Soft Link */ +#define FT_DIREND 5 /* Directory at end (saved) */ +#define FT_SPEC 6 /* Special file -- chr, blk, fifo, sock */ +#define FT_NOACCESS 7 /* Not able to access */ +#define FT_NOFOLLOW 8 /* Could not follow link */ +#define FT_NOSTAT 9 /* Could not stat file */ +#define FT_NOCHG 10 /* Incremental option, file not changed */ +#define FT_DIRNOCHG 11 /* Incremental option, directory not changed */ +#define FT_ISARCH 12 /* Trying to save archive file */ +#define FT_NORECURSE 13 /* No recursion into directory */ +#define FT_NOFSCHG 14 /* Different file system, prohibited */ +#define FT_NOOPEN 15 /* Could not open directory */ +#define FT_RAW 16 /* Raw block device */ +#define FT_FIFO 17 /* Raw fifo device */ +/** + * The DIRBEGIN packet is sent to the FD file processing routine so + * that it can filter packets, but otherwise, it is not used + * or saved */ +#define FT_DIRBEGIN 18 /* Directory at beginning (not saved) */ +#define FT_INVALIDFS 19 /* File system not allowed for */ +#define FT_INVALIDDT 20 /* Drive type not allowed for */ +#define FT_REPARSE 21 /* Win NTFS reparse point */ +#define FT_PLUGIN 22 /* Plugin generated filename */ +#define FT_DELETED 23 /* Deleted file entry */ +#define FT_BASE 24 /* Duplicate base file entry */ +#define FT_RESTORE_FIRST 25 /* Restore this "object" first */ +#define FT_JUNCTION 26 /* Win32 Junction point */ +#define FT_PLUGIN_CONFIG 27 /* Object for Plugin configuration */ +#define FT_PLUGIN_CONFIG_FILLED 28 /* Object for Plugin configuration filled by Director */ + +/* Definitions for upper part of type word (see above). */ +#define AR_DATA_STREAM (1<<16) /* Data stream id present */ + +/* Quick way to know if a Filetype is about a plugin "Object" */ +#define IS_FT_OBJECT(x) (((x) == FT_RESTORE_FIRST) || ((x) == FT_PLUGIN_CONFIG_FILLED) || ((x) == FT_PLUGIN_CONFIG)) + +#endif /* __BFILETYPES_H */ diff --git a/src/findlib/Makefile.in b/src/findlib/Makefile.in new file mode 100644 index 00000000..2b92e337 --- /dev/null +++ b/src/findlib/Makefile.in @@ -0,0 +1,131 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Find files library Makefile +# +@MCOMMON@ + +srcdir = . +VPATH = . +.PATH: . + +# one up +basedir = .. +# top dir +topdir = ../.. +# this dir relative to top dir +thisdir = src/findlib + +DEBUG=@DEBUG@ + +first_rule: all +dummy: + +# +# include files installed when using libtool +# +INCLUDE_FILES = bfile.h find.h protos.h win32filter.h + +# +LIBBACFIND_SRCS = find.c match.c find_one.c attribs.c create_file.c \ + bfile.c drivetype.c enable_priv.c fstype.c mkpath.c \ + savecwd.c namedpipe.c win32filter.c +LIBBACFIND_OBJS = $(LIBBACFIND_SRCS:.c=.o) +LIBBACFIND_LOBJS = $(LIBBACFIND_SRCS:.c=.lo) + +LIBBACFIND_LT_RELEASE = @LIBBACFIND_LT_RELEASE@ + +.SUFFIXES: .c .o .lo +.PHONY: +.DONTCARE: + +# inference rules +.c.o: + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +.c.lo: + @echo "Compiling $<" + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< +#------------------------------------------------------------------------- +all: Makefile libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + @echo "==== Make of findlib is good ====" + @echo " " + +libbacfind.a: $(LIBBACFIND_OBJS) + @echo "Making $@ ..." + $(AR) rc $@ $(LIBBACFIND_OBJS) + $(RANLIB) $@ + +libbacfind.la: Makefile $(LIBBACFIND_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(LIBBACFIND_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACFIND_LT_RELEASE) + +Makefile: $(srcdir)/Makefile.in $(topdir)/config.status + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + +install-includes: + $(MKDIR) $(DESTDIR)/$(includedir)/bacula/findlib + for I in $(INCLUDE_FILES); do \ + $(INSTALL_DATA) $$I $(DESTDIR)$(includedir)/bacula/findlib/`basename $$I`; \ + done + +uninstall-includes: + for I in $(INCLUDE_FILES); do \ + $(RMF) $(DESTDIR)$(includedir)/bacula/findlib/`basename $$I`; \ + done + +libtool-install: all + $(MKDIR) $(DESTDIR)$(libdir) + $(RMF) $(DESTDIR)$(libdir)/libbacfind-*.so $(DESTDIR)$(libdir)/libbacfind.la + $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbacfind$(DEFAULT_ARCHIVE_TYPE) $(DESTDIR)$(libdir) + +libtool-uninstall: + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbacfind.la + +install: @LIBTOOL_INSTALL_TARGET@ @INCLUDE_INSTALL_TARGET@ + +uninstall: @LIBTOOL_UNINSTALL_TARGET@ @INCLUDE_UNINSTALL_TARGET@ + +libtool-clean: + @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) + @$(RMF) *.la + @$(RMF) -r .libs _libs + +clean: libtool-clean + @$(RMF) find core a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 + +realclean: clean + @$(RMF) tags + +distclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +devclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +# Semi-automatic generation of dependencies: +# Use gcc -M because X11 `makedepend' doesn't work on all systems +# and it also includes system headers. +# `semi'-automatic since dependencies are generated at distribution time. + +depend: + @$(MV) Makefile Makefile.bak + @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile + @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile + @for src in $(LIBBACFIND_SRCS); do \ + $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) -I$(srcdir) -I$(basedir) $$src >> Makefile; \ + done + @if test -f Makefile ; then \ + $(RMF) Makefile.bak; \ + else \ + $(MV) Makefile.bak Makefile; \ + echo " ===== Something went wrong in make depend ====="; \ + fi + +# ----------------------------------------------------------------------- +# DO NOT DELETE: nice dependency list follows diff --git a/src/findlib/attribs.c b/src/findlib/attribs.c new file mode 100644 index 00000000..10242233 --- /dev/null +++ b/src/findlib/attribs.c @@ -0,0 +1,986 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Encode and decode standard Unix attributes and + * Extended attributes for Win32 and + * other non-Unix systems, or Unix systems with ACLs, ... + * + * Kern Sibbald, October MMII + * + */ + +//#define _POSIX_C_SOURCE 200809L +//#define _BSD_SOURCE 1 + +#include "bacula.h" +#include "find.h" +#include "ch.h" + +static uid_t my_uid = 1; +static gid_t my_gid = 1; +static bool uid_set = false; + +#ifdef HAVE_WIN32 +/* Forward referenced Windows subroutines */ +static bool set_win32_attributes(JCR *jcr, ATTR *attr, BFILE *ofd); +void unix_name_to_win32(POOLMEM **win32_name, const char *name); +void win_error(JCR *jcr, int type, const char *prefix, POOLMEM *ofile); +void win_error(JCR *jcr, const char *prefix, POOLMEM *ofile); +HANDLE bget_handle(BFILE *bfd); +#endif /* HAVE_WIN32 */ + +/* + * For old systems that don't have lchown() or lchmod() + */ +#ifndef HAVE_LCHOWN +#define lchown chown +#endif +#ifndef HAVE_LCHMOD +#define lchmod chmod +#endif + +/*=============================================================*/ +/* */ +/* *** A l l S y s t e m s *** */ +/* */ +/*=============================================================*/ + +/* + * To turn off use of fchown(), fchmod(), or futimes(), + * uncomment one or more of the following. + */ +//#undef HAVE_FCHOWN +//#undef HAVE_FCHMOD +//#undef HAVE_FUTIMES + +/* + * Print errors only if debug level defined or we are root. + * root should not get errors. Errors for users causes + * too much output. + */ +#define print_error(jcr) (chk_dbglvl(100) || (my_uid == 0 && (!jcr || jcr->job_uid == 0))) + +/* + * Restore the owner and permissions (mode) of a Directory. + * See attribs.c for the equivalent for files. + */ +void set_own_mod(ATTR *attr, char *path, uid_t owner, gid_t group, mode_t mode) +{ + if (lchown(path, owner, group) != 0 && print_error(attr->jcr) +#ifdef AFS + && errno != EPERM +#endif + ) { + berrno be; + Jmsg4(attr->jcr, M_WARNING, 0, _("Cannot change owner and/or group of %s: ERR=%s %d %d\n"), + path, be.bstrerror(), getuid(), attr->jcr->job_uid); + } + if (lchmod(path, mode) != 0 && print_error(attr->jcr)) { + berrno be; + Jmsg2(attr->jcr, M_WARNING, 0, _("Cannot change permissions of %s: ERR=%s\n"), + path, be.bstrerror()); + } +} + + +bool set_mod_own_time(JCR *jcr, BFILE *ofd, ATTR *attr) +{ + bool ok = true; + struct utimbuf ut; + + /* Do not try to set rights with f functions when using a plugin */ + if (is_bopen(ofd) && !ofd->cmd_plugin) { /* TODO: Look with opt_plugin */ + /* + * The #ifdefing is a bit ugly, but it is the only + * way we can ensure this works on older systems. + */ +#ifdef HAVE_FCHOWN + if (fchown(ofd->fid, attr->statp.st_uid, attr->statp.st_gid) < 0 && print_error(jcr)) { +#else + if (lchown(attr->ofname, attr->statp.st_uid, attr->statp.st_gid) < 0 && print_error(jcr)) { +#endif + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Unable to set file owner %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + ok = false; + } + +#ifdef HAVE_FCHMOD + if (fchmod(ofd->fid, attr->statp.st_mode) < 0 && print_error(jcr)) { +#else + if (lchmod(attr->ofname, attr->statp.st_mode) < 0 && print_error(jcr)) { +#endif + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Unable to set file modes %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + ok = false; + } + +#ifdef HAVE_FUTIMES + struct timeval times[2]; + times[0].tv_sec = attr->statp.st_atime; + times[0].tv_usec = 0; + times[1].tv_sec = attr->statp.st_mtime; + times[1].tv_usec = 0; + if (futimes(ofd->fid, times) < 0 && print_error(jcr)) { +#else + ut.actime = attr->statp.st_atime; + ut.modtime = attr->statp.st_mtime; + //bclose(ofd); + if (utime(attr->ofname, &ut) < 0 && print_error(jcr)) { +#endif + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Unable to set file times %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + ok = false; + } + } else { + if (lchown(attr->ofname, attr->statp.st_uid, attr->statp.st_gid) < 0 && print_error(jcr)) { + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Unable to set file owner %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + ok = false; + } + if (lchmod(attr->ofname, attr->statp.st_mode) < 0 && print_error(jcr)) { + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Unable to set file modes %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + ok = false; + } + /* + * Reset file times. + */ + ut.actime = attr->statp.st_atime; + ut.modtime = attr->statp.st_mtime; + + if (utime(attr->ofname, &ut) < 0 && print_error(jcr)) { + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Unable to set file times %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + ok = false; + } + } + return ok; +} + +/* + * Return the data stream that will be used + */ +int select_data_stream(FF_PKT *ff_pkt) +{ + int stream; + + /* This is a plugin special restore object */ + if (ff_pkt->type == FT_RESTORE_FIRST) { + ff_pkt->flags = 0; + return STREAM_FILE_DATA; + } + + /* + * Fix all incompatible options + */ + /* No sparse option for encrypted data */ + if (ff_pkt->flags & FO_ENCRYPT) { + ff_pkt->flags &= ~FO_SPARSE; + } + + /* Note, no sparse option for win32_data */ + if (!is_portable_backup(&ff_pkt->bfd)) { + stream = STREAM_WIN32_DATA; + ff_pkt->flags &= ~FO_SPARSE; + } else if (ff_pkt->flags & FO_SPARSE) { + stream = STREAM_SPARSE_DATA; + } else { + stream = STREAM_FILE_DATA; + } + if (ff_pkt->flags & FO_OFFSETS) { + stream = STREAM_SPARSE_DATA; + } + + /* Encryption is only supported for file data */ + if (stream != STREAM_FILE_DATA && stream != STREAM_WIN32_DATA && + stream != STREAM_MACOS_FORK_DATA) { + ff_pkt->flags &= ~FO_ENCRYPT; + } + + /* Compression is not supported for Mac fork data */ + if (stream == STREAM_MACOS_FORK_DATA) { + ff_pkt->flags &= ~FO_COMPRESS; + } + + /* + * Handle compression and encryption options + */ +#if defined(HAVE_LIBZ) || defined(HAVE_LZO) + if (ff_pkt->flags & FO_COMPRESS) { + #ifdef HAVE_LIBZ + if(ff_pkt->Compress_algo == COMPRESS_GZIP) { + switch (stream) { + case STREAM_WIN32_DATA: + stream = STREAM_WIN32_GZIP_DATA; + break; + case STREAM_SPARSE_DATA: + stream = STREAM_SPARSE_GZIP_DATA; + break; + case STREAM_FILE_DATA: + stream = STREAM_GZIP_DATA; + break; + default: + /* + * All stream types that do not support compression should clear out + * FO_COMPRESS above, and this code block should be unreachable. + */ + ASSERT(!(ff_pkt->flags & FO_COMPRESS)); + return STREAM_NONE; + } + } + #endif + #ifdef HAVE_LZO + if(ff_pkt->Compress_algo == COMPRESS_LZO1X) { + switch (stream) { + case STREAM_WIN32_DATA: + stream = STREAM_WIN32_COMPRESSED_DATA; + break; + case STREAM_SPARSE_DATA: + stream = STREAM_SPARSE_COMPRESSED_DATA; + break; + case STREAM_FILE_DATA: + stream = STREAM_COMPRESSED_DATA; + break; + default: + /* + * All stream types that do not support compression should clear out + * FO_COMPRESS above, and this code block should be unreachable. + */ + ASSERT(!(ff_pkt->flags & FO_COMPRESS)); + return STREAM_NONE; + } + } + #endif + } +#endif +#ifdef HAVE_CRYPTO + if (ff_pkt->flags & FO_ENCRYPT) { + switch (stream) { + case STREAM_WIN32_DATA: + stream = STREAM_ENCRYPTED_WIN32_DATA; + break; + case STREAM_WIN32_GZIP_DATA: + stream = STREAM_ENCRYPTED_WIN32_GZIP_DATA; + break; + case STREAM_WIN32_COMPRESSED_DATA: + stream = STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA; + break; + case STREAM_FILE_DATA: + stream = STREAM_ENCRYPTED_FILE_DATA; + break; + case STREAM_GZIP_DATA: + stream = STREAM_ENCRYPTED_FILE_GZIP_DATA; + break; + case STREAM_COMPRESSED_DATA: + stream = STREAM_ENCRYPTED_FILE_COMPRESSED_DATA; + break; + default: + /* All stream types that do not support encryption should clear out + * FO_ENCRYPT above, and this code block should be unreachable. */ + ASSERT(!(ff_pkt->flags & FO_ENCRYPT)); + return STREAM_NONE; + } + } +#endif + + return stream; +} + + +/* + * Encode a stat structure into a base64 character string + * All systems must create such a structure. + * In addition, we tack on the LinkFI, which is non-zero in + * the case of a hard linked file that has no data. This + * is a File Index pointing to the link that does have the + * data (always the first one encountered in a save). + * You may piggyback attributes on this packet by encoding + * them in the encode_attribsEx() subroutine, but this is + * not recommended. + */ +void encode_stat(char *buf, struct stat *statp, int stat_size, int32_t LinkFI, int data_stream) +{ + char *p = buf; + /* + * We read the stat packet so make sure the caller's conception + * is the same as ours. They can be different if LARGEFILE is not + * the same when compiling this library and the calling program. + */ + ASSERT(stat_size == (int)sizeof(struct stat)); + + /* + * Encode a stat packet. I should have done this more intelligently + * with a length so that it could be easily expanded. + */ + p += to_base64((int64_t)statp->st_dev, p); + *p++ = ' '; /* separate fields with a space */ + p += to_base64((int64_t)statp->st_ino, p); + *p++ = ' '; + p += to_base64((int64_t)statp->st_mode, p); + *p++ = ' '; + p += to_base64((int64_t)statp->st_nlink, p); + *p++ = ' '; + p += to_base64((int64_t)statp->st_uid, p); + *p++ = ' '; + p += to_base64((int64_t)statp->st_gid, p); + *p++ = ' '; + p += to_base64((int64_t)statp->st_rdev, p); + *p++ = ' '; + p += to_base64((int64_t)statp->st_size, p); + *p++ = ' '; +#ifndef HAVE_MINGW + p += to_base64((int64_t)statp->st_blksize, p); + *p++ = ' '; + p += to_base64((int64_t)statp->st_blocks, p); + *p++ = ' '; +#else + p += to_base64((int64_t)0, p); /* output place holder */ + *p++ = ' '; + p += to_base64((int64_t)0, p); /* output place holder */ + *p++ = ' '; +#endif + p += to_base64((int64_t)statp->st_atime, p); + *p++ = ' '; + p += to_base64((int64_t)statp->st_mtime, p); + *p++ = ' '; + p += to_base64((int64_t)statp->st_ctime, p); + *p++ = ' '; + p += to_base64((int64_t)LinkFI, p); + *p++ = ' '; + +#ifdef HAVE_CHFLAGS + /* FreeBSD function */ + p += to_base64((int64_t)statp->st_flags, p); /* output st_flags */ +#else + p += to_base64((int64_t)0, p); /* output place holder */ +#endif + *p++ = ' '; + p += to_base64((int64_t)data_stream, p); +#ifdef HAVE_MINGW + *p++ = ' '; + p += to_base64((int64_t)statp->st_fattrs, p); +#endif + *p = 0; + return; +} + +/* Do casting according to unknown type to keep compiler happy */ +#ifdef HAVE_TYPEOF + #define plug(st, val) st = (typeof st)val +#else + #if !HAVE_GCC & HAVE_SUN_OS + /* Sun compiler does not handle templates correctly */ + #define plug(st, val) st = val + #elif __sgi + #define plug(st, val) st = val + #else + /* Use templates to do the casting */ + template void plug(T &st, uint64_t val) + { st = static_cast(val); } + #endif +#endif + + +/* + * Decode a stat packet from base64 characters + * returns: data_stream + */ +int decode_stat(char *buf, struct stat *statp, int stat_size, int32_t *LinkFI) +{ + char *p = buf; + int64_t val; + int data_stream; + + /* + * We store into the stat packet so make sure the caller's conception + * is the same as ours. They can be different if LARGEFILE is not + * the same when compiling this library and the calling program. + */ + ASSERT(stat_size == (int)sizeof(struct stat)); + + p += from_base64(&val, p); + plug(statp->st_dev, val); + p++; + p += from_base64(&val, p); + plug(statp->st_ino, val); + p++; + p += from_base64(&val, p); + plug(statp->st_mode, val); + p++; + p += from_base64(&val, p); + plug(statp->st_nlink, val); + p++; + p += from_base64(&val, p); + plug(statp->st_uid, val); + p++; + p += from_base64(&val, p); + plug(statp->st_gid, val); + p++; + p += from_base64(&val, p); + plug(statp->st_rdev, val); + p++; + p += from_base64(&val, p); + plug(statp->st_size, val); + p++; +#ifndef HAVE_MINGW + p += from_base64(&val, p); + plug(statp->st_blksize, val); + p++; + p += from_base64(&val, p); + plug(statp->st_blocks, val); + p++; +#else + p += from_base64(&val, p); +// plug(statp->st_blksize, val); + p++; + p += from_base64(&val, p); +// plug(statp->st_blocks, val); + p++; +#endif + p += from_base64(&val, p); + plug(statp->st_atime, val); + p++; + p += from_base64(&val, p); + plug(statp->st_mtime, val); + p++; + p += from_base64(&val, p); + plug(statp->st_ctime, val); + + /* Optional FileIndex of hard linked file data */ + if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { + p++; + p += from_base64(&val, p); + *LinkFI = (uint32_t)val; + } else { + *LinkFI = 0; + return 0; + } + + /* FreeBSD user flags */ + if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { + p++; + p += from_base64(&val, p); +#ifdef HAVE_CHFLAGS + plug(statp->st_flags, val); + } else { + statp->st_flags = 0; +#endif + } + + /* Look for data stream id */ + if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { + p++; + p += from_base64(&val, p); + } else { + val = 0; + } + data_stream = val; +#ifdef HAVE_MINGW + if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { + p++; + p += from_base64(&val, p); + plug(statp->st_fattrs, val); + } else { + statp->st_fattrs = 0; + val = 0; + } +#endif + return data_stream; +} + +/* Decode a LinkFI field of encoded stat packet */ +int32_t decode_LinkFI(char *buf, struct stat *statp, int stat_size) +{ + char *p = buf; + int64_t val; + /* + * We store into the stat packet so make sure the caller's conception + * is the same as ours. They can be different if LARGEFILE is not + * the same when compiling this library and the calling program. + */ + ASSERT(stat_size == (int)sizeof(struct stat)); + + skip_nonspaces(&p); /* st_dev */ + p++; /* skip space */ + skip_nonspaces(&p); /* st_ino */ + p++; + p += from_base64(&val, p); + plug(statp->st_mode, val); /* st_mode */ + p++; + skip_nonspaces(&p); /* st_nlink */ + p++; + skip_nonspaces(&p); /* st_uid */ + p++; + skip_nonspaces(&p); /* st_gid */ + p++; + skip_nonspaces(&p); /* st_rdev */ + p++; + skip_nonspaces(&p); /* st_size */ + p++; + skip_nonspaces(&p); /* st_blksize */ + p++; + skip_nonspaces(&p); /* st_blocks */ + p++; + skip_nonspaces(&p); /* st_atime */ + p++; + skip_nonspaces(&p); /* st_mtime */ + p++; + skip_nonspaces(&p); /* st_ctime */ + + /* Optional FileIndex of hard linked file data */ + if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { + p++; + p += from_base64(&val, p); + return (int32_t)val; + } + return 0; +} + +/* + * Set file modes, permissions and times + * + * fname is the original filename + * ofile is the output filename (may be in a different directory) + * + * Returns: true on success + * false on failure + */ +bool set_attributes(JCR *jcr, ATTR *attr, BFILE *ofd) +{ + mode_t old_mask; + bool ok = true; + boffset_t fsize; + + if (!uid_set) { + my_uid = getuid(); + my_gid = getgid(); + uid_set = true; + } + +#ifdef HAVE_WIN32 + if (attr->stream == STREAM_UNIX_ATTRIBUTES_EX && + set_win32_attributes(jcr, attr, ofd)) { + if (is_bopen(ofd)) { + bclose(ofd); + } + pm_strcpy(attr->ofname, "*none*"); + return true; + } + if (attr->data_stream == STREAM_WIN32_DATA || + attr->data_stream == STREAM_WIN32_GZIP_DATA || + attr->data_stream == STREAM_WIN32_COMPRESSED_DATA) { + if (is_bopen(ofd)) { + bclose(ofd); + } + pm_strcpy(attr->ofname, "*none*"); + return true; + } + + /* + * If Windows stuff failed, e.g. attempt to restore Unix file + * to Windows, simply fall through and we will do it the + * universal way. + */ +#endif /* HAVE_WIN32 */ + + old_mask = umask(0); + if (is_bopen(ofd)) { + char ec1[50], ec2[50]; + fsize = blseek(ofd, 0, SEEK_END); + if (attr->type == FT_REG && fsize > 0 && attr->statp.st_size > 0 && + fsize != (boffset_t)attr->statp.st_size) { + Jmsg3(jcr, M_ERROR, 0, _("File size of restored file %s not correct. Original %s, restored %s.\n"), + attr->ofname, edit_uint64(attr->statp.st_size, ec1), + edit_uint64(fsize, ec2)); + } + } + + /* + * We do not restore sockets, so skip trying to restore their + * attributes. + */ + if (attr->type == FT_SPEC && S_ISSOCK(attr->statp.st_mode)) { + goto bail_out; + } + + /* ***FIXME**** optimize -- don't do if already correct */ + /* + * For link, change owner of link using lchown, but don't + * try to do a chmod as that will update the file behind it. + */ + if (attr->type == FT_LNK) { +#ifdef HAVE_LCHOWN + /* Change owner of link, not of real file */ + if (lchown(attr->ofname, attr->statp.st_uid, attr->statp.st_gid) < 0 && print_error(jcr)) { + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Unable to set file owner %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + ok = false; + } +#endif +#ifdef HAVE_LUTIMES + struct timeval times[2]; + times[0].tv_sec = attr->statp.st_atime; + times[0].tv_usec = 0; + times[1].tv_sec = attr->statp.st_mtime; + times[1].tv_usec = 0; + if (lutimes(attr->ofname, times) < 0 && print_error(jcr)) { + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Unable to set file times %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + ok = false; + } +#endif + } else { + /* + * At this point, we have a file that is not a LINK + */ + ok = set_mod_own_time(jcr, ofd, attr); + +#ifdef HAVE_CHFLAGS + /* + * FreeBSD user flags + * + * Note, this should really be done before the utime() above, + * but if the immutable bit is set, it will make the utimes() + * fail. + */ + if (chflags(attr->ofname, attr->statp.st_flags) < 0 && print_error(jcr)) { + berrno be; + Jmsg2(jcr, M_ERROR, 0, _("Unable to set file flags %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + ok = false; + } +#endif + } + +bail_out: + if (is_bopen(ofd)) { + bclose(ofd); + } + pm_strcpy(attr->ofname, "*none*"); + umask(old_mask); + return ok; +} + + +/*=============================================================*/ +/* */ +/* * * * U n i x * * * * */ +/* */ +/*=============================================================*/ + +#ifndef HAVE_WIN32 + +/* + * It is possible to piggyback additional data e.g. ACLs on + * the encode_stat() data by returning the extended attributes + * here. They must be "self-contained" (i.e. you keep track + * of your own length), and they must be in ASCII string + * format. Using this feature is not recommended. + * The code below shows how to return nothing. See the Win32 + * code below for returning something in the attributes. + */ +int encode_attribsEx(JCR *jcr, char *attribsEx, FF_PKT *ff_pkt) +{ +#ifdef HAVE_DARWIN_OS + /* + * We save the Mac resource fork length so that on a + * restore, we can be sure we put back the whole resource. + */ + char *p; + + *attribsEx = 0; /* no extended attributes (yet) */ + if (jcr->cmd_plugin || ff_pkt->type == FT_DELETED) { + return STREAM_UNIX_ATTRIBUTES; + } + p = attribsEx; + if (ff_pkt->flags & FO_HFSPLUS) { + p += to_base64((uint64_t)(ff_pkt->hfsinfo.rsrclength), p); + } + *p = 0; +#else + *attribsEx = 0; /* no extended attributes */ +#endif + return STREAM_UNIX_ATTRIBUTES; +} + +#endif + + + +/*=============================================================*/ +/* */ +/* * * * W i n 3 2 * * * * */ +/* */ +/*=============================================================*/ + +#ifdef HAVE_WIN32 + +int encode_attribsEx(JCR *jcr, char *attribsEx, FF_PKT *ff_pkt) +{ + char *p = attribsEx; + WIN32_FILE_ATTRIBUTE_DATA atts; + ULARGE_INTEGER li; + + attribsEx[0] = 0; /* no extended attributes */ + + if (jcr->cmd_plugin || ff_pkt->type == FT_DELETED) { + return STREAM_UNIX_ATTRIBUTES; + } + + unix_name_to_win32(&ff_pkt->sys_fname, ff_pkt->fname); + + /* try unicode version */ + if (p_GetFileAttributesExW) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, ff_pkt->fname); + + BOOL b=p_GetFileAttributesExW((LPCWSTR)pwszBuf, GetFileExInfoStandard, + (LPVOID)&atts); + free_pool_memory(pwszBuf); + + if (!b) { + win_error(jcr, "GetFileAttributesExW:", ff_pkt->sys_fname); + return STREAM_UNIX_ATTRIBUTES; + } + } + else { + if (!p_GetFileAttributesExA) + return STREAM_UNIX_ATTRIBUTES; + + if (!p_GetFileAttributesExA(ff_pkt->sys_fname, GetFileExInfoStandard, + (LPVOID)&atts)) { + win_error(jcr, "GetFileAttributesExA:", ff_pkt->sys_fname); + return STREAM_UNIX_ATTRIBUTES; + } + } + + p += to_base64((uint64_t)atts.dwFileAttributes, p); + *p++ = ' '; /* separate fields with a space */ + li.LowPart = atts.ftCreationTime.dwLowDateTime; + li.HighPart = atts.ftCreationTime.dwHighDateTime; + p += to_base64((uint64_t)li.QuadPart, p); + *p++ = ' '; + li.LowPart = atts.ftLastAccessTime.dwLowDateTime; + li.HighPart = atts.ftLastAccessTime.dwHighDateTime; + p += to_base64((uint64_t)li.QuadPart, p); + *p++ = ' '; + li.LowPart = atts.ftLastWriteTime.dwLowDateTime; + li.HighPart = atts.ftLastWriteTime.dwHighDateTime; + p += to_base64((uint64_t)li.QuadPart, p); + *p++ = ' '; + p += to_base64((uint64_t)atts.nFileSizeHigh, p); + *p++ = ' '; + p += to_base64((uint64_t)atts.nFileSizeLow, p); + *p = 0; + return STREAM_UNIX_ATTRIBUTES_EX; +} + +/* Define attributes that are legal to set with SetFileAttributes() */ +#define SET_ATTRS ( \ + FILE_ATTRIBUTE_ARCHIVE| \ + FILE_ATTRIBUTE_HIDDEN| \ + FILE_ATTRIBUTE_NORMAL| \ + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED| \ + FILE_ATTRIBUTE_OFFLINE| \ + FILE_ATTRIBUTE_READONLY| \ + FILE_ATTRIBUTE_SYSTEM| \ + FILE_ATTRIBUTE_TEMPORARY) + +/* + * Set Extended File Attributes for Win32 + * + * fname is the original filename + * ofile is the output filename (may be in a different directory) + * + * Returns: true on success + * false on failure + */ +static bool set_win32_attributes(JCR *jcr, ATTR *attr, BFILE *ofd) +{ + char *p = attr->attrEx; + int64_t val; + WIN32_FILE_ATTRIBUTE_DATA atts; + ULARGE_INTEGER li; + POOLMEM *win32_ofile; + + /* if we have neither Win ansi nor wchar API, get out */ + if (!(p_SetFileAttributesW || p_SetFileAttributesA)) { + return false; + } + + if (!p || !*p) { /* we should have attributes */ + Dmsg2(100, "Attributes missing. of=%s ofd=%d\n", attr->ofname, ofd->fid); + if (is_bopen(ofd)) { + bclose(ofd); + } + return false; + } else { + Dmsg2(100, "Attribs %s = %s\n", attr->ofname, attr->attrEx); + } + + p += from_base64(&val, p); + plug(atts.dwFileAttributes, val); + p++; /* skip space */ + p += from_base64(&val, p); + li.QuadPart = val; + atts.ftCreationTime.dwLowDateTime = li.LowPart; + atts.ftCreationTime.dwHighDateTime = li.HighPart; + p++; /* skip space */ + p += from_base64(&val, p); + li.QuadPart = val; + atts.ftLastAccessTime.dwLowDateTime = li.LowPart; + atts.ftLastAccessTime.dwHighDateTime = li.HighPart; + p++; /* skip space */ + p += from_base64(&val, p); + li.QuadPart = val; + atts.ftLastWriteTime.dwLowDateTime = li.LowPart; + atts.ftLastWriteTime.dwHighDateTime = li.HighPart; + p++; + p += from_base64(&val, p); + plug(atts.nFileSizeHigh, val); + p++; + p += from_base64(&val, p); + plug(atts.nFileSizeLow, val); + + /* Convert to Windows path format */ + win32_ofile = get_pool_memory(PM_FNAME); + unix_name_to_win32(&win32_ofile, attr->ofname); + + /* At this point, we have reconstructed the WIN32_FILE_ATTRIBUTE_DATA pkt */ + + if (!is_bopen(ofd)) { + Dmsg1(100, "File not open: %s\n", attr->ofname); + bopen(ofd, attr->ofname, O_WRONLY|O_BINARY, 0); /* attempt to open the file */ + } + + if (is_bopen(ofd)) { + Dmsg1(100, "SetFileTime %s\n", attr->ofname); + if (!SetFileTime(bget_handle(ofd), + &atts.ftCreationTime, + &atts.ftLastAccessTime, + &atts.ftLastWriteTime)) { + win_error(jcr, M_WARNING, "SetFileTime:", win32_ofile); + } + + /* + * Inform win32 api that the given file is a sparse file + */ + if (atts.dwFileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) { + DWORD bytesReturned; + + Dmsg1(100, "Set FILE_ATTRIBUTE_SPARSE_FILE on %s\n", attr->ofname); + if (!DeviceIoControl(bget_handle(ofd), FSCTL_SET_SPARSE, + NULL, 0, NULL, 0, &bytesReturned, NULL)) + { + /* Not sure we really want to have a Warning for such attribute */ + win_error(jcr, M_WARNING, "set SPARSE_FILE:", win32_ofile); + } + } + + /* + * Restore the file as compressed. + */ + if (atts.dwFileAttributes & FILE_ATTRIBUTE_COMPRESSED) { + int fmt = COMPRESSION_FORMAT_DEFAULT; + DWORD bytesReturned; + + Dmsg1(100, "Set FILE_ATTRIBUTE_COMPRESSED on %s\n", attr->ofname); + if (!DeviceIoControl(bget_handle(ofd), FSCTL_SET_COMPRESSION, + &fmt, sizeof(fmt), NULL, 0, &bytesReturned, NULL)) + { + /* Not sure we really want to have a Warning for such attribute */ + win_error(jcr, M_WARNING, "set COMPRESSED:", win32_ofile); + } + } + + bclose(ofd); + } + + Dmsg1(100, "SetFileAtts %s\n", attr->ofname); + if (!(atts.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) { + if (p_SetFileAttributesW) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, attr->ofname); + + BOOL b=p_SetFileAttributesW((LPCWSTR)pwszBuf, atts.dwFileAttributes & SET_ATTRS); + free_pool_memory(pwszBuf); + + if (!b) + win_error(jcr, M_WARNING, "SetFileAttributesW:", win32_ofile); + } + else { + if (!p_SetFileAttributesA(win32_ofile, atts.dwFileAttributes & SET_ATTRS)) { + win_error(jcr, M_WARNING, "SetFileAttributesA:", win32_ofile); + } + } + } + free_pool_memory(win32_ofile); + return true; +} + +void win_error(JCR *jcr, int type, const char *prefix, POOLMEM *win32_ofile) +{ + DWORD lerror = GetLastError(); + LPTSTR msg; + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER| + FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + lerror, + 0, + (LPTSTR)&msg, + 0, + NULL); + Dmsg3(100, "Error in %s on file %s: ERR=%s\n", prefix, win32_ofile, msg); + strip_trailing_junk(msg); + Jmsg3(jcr, type, 0, _("Error in %s file %s: ERR=%s\n"), prefix, win32_ofile, msg); + LocalFree(msg); +} + +void win_error(JCR *jcr, const char *prefix, POOLMEM *win32_ofile) +{ + win_error(jcr, M_ERROR, prefix, win32_ofile); +} + +void win_error(JCR *jcr, const char *prefix, DWORD lerror) +{ + LPTSTR msg; + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER| + FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + lerror, + 0, + (LPTSTR)&msg, + 0, + NULL); + strip_trailing_junk(msg); + if (jcr) { + Jmsg2(jcr, M_ERROR, 0, _("Error in %s: ERR=%s\n"), prefix, msg); + } + MessageBox(NULL, msg, prefix, MB_OK); + LocalFree(msg); +} +#endif /* HAVE_WIN32 */ diff --git a/src/findlib/bfile.c b/src/findlib/bfile.c new file mode 100644 index 00000000..da424832 --- /dev/null +++ b/src/findlib/bfile.c @@ -0,0 +1,1176 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula low level File I/O routines. This routine simulates + * open(), read(), write(), and close(), but using native routines. + * I.e. on Windows, we use Windows APIs. + * + * Kern Sibbald, April MMIII + * + */ + +#include "bacula.h" +#include "find.h" + +const int dbglvl = 200; + +int (*plugin_bopen)(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) = NULL; +int (*plugin_bclose)(BFILE *bfd) = NULL; +ssize_t (*plugin_bread)(BFILE *bfd, void *buf, size_t count) = NULL; +ssize_t (*plugin_bwrite)(BFILE *bfd, void *buf, size_t count) = NULL; +boffset_t (*plugin_blseek)(BFILE *bfd, boffset_t offset, int whence) = NULL; + + +#ifdef HAVE_DARWIN_OS +#include +#endif + +#if !defined(HAVE_FDATASYNC) +#define fdatasync(fd) +#endif + +#ifdef HAVE_WIN32 +void pause_msg(const char *file, const char *func, int line, const char *msg) +{ + char buf[1000]; + if (msg) { + bsnprintf(buf, sizeof(buf), "%s:%s:%d %s", file, func, line, msg); + } else { + bsnprintf(buf, sizeof(buf), "%s:%s:%d", file, func, line); + } + MessageBox(NULL, buf, "Pause", MB_OK); +} +#endif + +/* =============================================================== + * + * U N I X AND W I N D O W S + * + * =============================================================== + */ + +bool is_win32_stream(int stream) +{ + switch (stream) { + case STREAM_WIN32_DATA: + case STREAM_WIN32_GZIP_DATA: + case STREAM_WIN32_COMPRESSED_DATA: + case STREAM_ENCRYPTED_WIN32_DATA: + case STREAM_ENCRYPTED_WIN32_GZIP_DATA: + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: + return true; + } + return false; +} + +const char *stream_to_ascii(int stream) +{ + static char buf[20]; + + switch (stream & STREAMMASK_TYPE) { + case STREAM_UNIX_ATTRIBUTES: + return _("Unix attributes"); + case STREAM_FILE_DATA: + return _("File data"); + case STREAM_MD5_DIGEST: + return _("MD5 digest"); + case STREAM_GZIP_DATA: + return _("GZIP data"); + case STREAM_COMPRESSED_DATA: + return _("Compressed data"); + case STREAM_UNIX_ATTRIBUTES_EX: + return _("Extended attributes"); + case STREAM_SPARSE_DATA: + return _("Sparse data"); + case STREAM_SPARSE_GZIP_DATA: + return _("GZIP sparse data"); + case STREAM_SPARSE_COMPRESSED_DATA: + return _("Compressed sparse data"); + case STREAM_PROGRAM_NAMES: + return _("Program names"); + case STREAM_PROGRAM_DATA: + return _("Program data"); + case STREAM_SHA1_DIGEST: + return _("SHA1 digest"); + case STREAM_WIN32_DATA: + return _("Win32 data"); + case STREAM_WIN32_GZIP_DATA: + return _("Win32 GZIP data"); + case STREAM_WIN32_COMPRESSED_DATA: + return _("Win32 compressed data"); + case STREAM_MACOS_FORK_DATA: + return _("MacOS Fork data"); + case STREAM_HFSPLUS_ATTRIBUTES: + return _("HFS+ attribs"); + case STREAM_UNIX_ACCESS_ACL: + return _("Standard Unix ACL attribs"); + case STREAM_UNIX_DEFAULT_ACL: + return _("Default Unix ACL attribs"); + case STREAM_SHA256_DIGEST: + return _("SHA256 digest"); + case STREAM_SHA512_DIGEST: + return _("SHA512 digest"); + case STREAM_SIGNED_DIGEST: + return _("Signed digest"); + case STREAM_ENCRYPTED_FILE_DATA: + return _("Encrypted File data"); + case STREAM_ENCRYPTED_WIN32_DATA: + return _("Encrypted Win32 data"); + case STREAM_ENCRYPTED_SESSION_DATA: + return _("Encrypted session data"); + case STREAM_ENCRYPTED_FILE_GZIP_DATA: + return _("Encrypted GZIP data"); + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + return _("Encrypted compressed data"); + case STREAM_ENCRYPTED_WIN32_GZIP_DATA: + return _("Encrypted Win32 GZIP data"); + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: + return _("Encrypted Win32 Compressed data"); + case STREAM_ENCRYPTED_MACOS_FORK_DATA: + return _("Encrypted MacOS fork data"); + case STREAM_PLUGIN_NAME: + return _("Plugin Name"); + case STREAM_PLUGIN_DATA: + return _("Plugin Data"); + case STREAM_RESTORE_OBJECT: + return _("Restore Object"); + case STREAM_XACL_AIX_TEXT: + return _("AIX ACL attribs"); + case STREAM_XACL_DARWIN_ACCESS: + return _("Darwin ACL attribs"); + case STREAM_XACL_FREEBSD_DEFAULT: + return _("FreeBSD Default ACL attribs"); + case STREAM_XACL_FREEBSD_ACCESS: + return _("FreeBSD Access ACL attribs"); + case STREAM_XACL_HPUX_ACL_ENTRY: + return _("HPUX ACL attribs"); + case STREAM_XACL_IRIX_DEFAULT: + return _("Irix Default ACL attribs"); + case STREAM_XACL_IRIX_ACCESS: + return _("Irix Access ACL attribs"); + case STREAM_XACL_LINUX_DEFAULT: + return _("Linux Default ACL attribs"); + case STREAM_XACL_LINUX_ACCESS: + return _("Linux Access ACL attribs"); + case STREAM_XACL_TRU64_DEFAULT: + return _("TRU64 Default ACL attribs"); + case STREAM_XACL_TRU64_ACCESS: + return _("TRU64 Access ACL attribs"); + case STREAM_XACL_SOLARIS_POSIX: + return _("Solaris POSIX ACL attribs"); + case STREAM_XACL_SOLARIS_NFS4: + return _("Solaris NFSv4/ZFS ACL attribs"); + case STREAM_XACL_AFS_TEXT: + return _("AFS ACL attribs"); + case STREAM_XACL_AIX_AIXC: + return _("AIX POSIX ACL attribs"); + case STREAM_XACL_AIX_NFS4: + return _("AIX NFSv4 ACL attribs"); + case STREAM_XACL_FREEBSD_NFS4: + return _("FreeBSD NFSv4/ZFS ACL attribs"); + case STREAM_XACL_HURD_DEFAULT: + return _("GNU Hurd Default ACL attribs"); + case STREAM_XACL_HURD_ACCESS: + return _("GNU Hurd Access ACL attribs"); + case STREAM_XACL_HURD_XATTR: + return _("GNU Hurd Extended attribs"); + case STREAM_XACL_IRIX_XATTR: + return _("IRIX Extended attribs"); + case STREAM_XACL_TRU64_XATTR: + return _("TRU64 Extended attribs"); + case STREAM_XACL_AIX_XATTR: + return _("AIX Extended attribs"); + case STREAM_XACL_OPENBSD_XATTR: + return _("OpenBSD Extended attribs"); + case STREAM_XACL_SOLARIS_SYS_XATTR: + return _("Solaris Extensible attribs or System Extended attribs"); + case STREAM_XACL_SOLARIS_XATTR: + return _("Solaris Extended attribs"); + case STREAM_XACL_DARWIN_XATTR: + return _("Darwin Extended attribs"); + case STREAM_XACL_FREEBSD_XATTR: + return _("FreeBSD Extended attribs"); + case STREAM_XACL_LINUX_XATTR: + return _("Linux Extended attribs"); + case STREAM_XACL_NETBSD_XATTR: + return _("NetBSD Extended attribs"); + default: + sprintf(buf, "%d", stream); + return (const char *)buf; + } +} + +/** + * Convert a 64 bit little endian to a big endian + */ +void int64_LE2BE(int64_t* pBE, const int64_t v) +{ + /* convert little endian to big endian */ + if (htonl(1) != 1L) { /* no work if on little endian machine */ + memcpy(pBE, &v, sizeof(int64_t)); + } else { + int i; + uint8_t rv[sizeof(int64_t)]; + uint8_t *pv = (uint8_t *) &v; + + for (i = 0; i < 8; i++) { + rv[i] = pv[7 - i]; + } + memcpy(pBE, &rv, sizeof(int64_t)); + } +} + +/** + * Convert a 32 bit little endian to a big endian + */ +void int32_LE2BE(int32_t* pBE, const int32_t v) +{ + /* convert little endian to big endian */ + if (htonl(1) != 1L) { /* no work if on little endian machine */ + memcpy(pBE, &v, sizeof(int32_t)); + } else { + int i; + uint8_t rv[sizeof(int32_t)]; + uint8_t *pv = (uint8_t *) &v; + + for (i = 0; i < 4; i++) { + rv[i] = pv[3 - i]; + } + memcpy(pBE, &rv, sizeof(int32_t)); + } +} + + +/** + * Read a BackupRead block and pull out the file data + */ +bool processWin32BackupAPIBlock (BFILE *bfd, void *pBuffer, ssize_t dwSize) +{ + int64_t len = dwSize; + char *dat=(char *)pBuffer; + int64_t use_len; + + while (len>0 && bfd->win32filter.have_data(&dat, &len, &use_len)) { + if (bwrite(bfd, dat, use_len) != (ssize_t)use_len) { + return false; + } + dat+=use_len; + } + return true; +} + +/* =============================================================== + * + * W I N D O W S + * + * =============================================================== + */ + +#if defined(HAVE_WIN32) + +void unix_name_to_win32(POOLMEM **win32_name, const char *name); +extern "C" HANDLE get_osfhandle(int fd); + + +void binit(BFILE *bfd) +{ + memset(bfd, 0, sizeof(BFILE)); + bfd->fid = -1; + bfd->mode = BF_CLOSED; + bfd->use_backup_api = have_win32_api(); + bfd->cmd_plugin = false; +} + +/* + * Enables using the Backup API (win32_data). + * Returns 1 if function worked + * Returns 0 if failed (i.e. do not have Backup API on this machine) + */ +bool set_win32_backup(BFILE *bfd) +{ + /* We enable if possible here */ + bfd->use_backup_api = have_win32_api(); + return bfd->use_backup_api; +} + +void set_fattrs(BFILE *bfd, struct stat *statp) +{ + bfd->fattrs = statp->st_fattrs; + Dmsg1(200, "set_fattrs 0x%x\n", bfd->fattrs); +} + +bool set_portable_backup(BFILE *bfd) +{ + bfd->use_backup_api = false; + return true; +} + +bool set_cmd_plugin(BFILE *bfd, JCR *jcr) +{ + bfd->cmd_plugin = true; + bfd->jcr = jcr; + return true; +} + +/* + * Return 1 if we are NOT using Win32 BackupWrite() + * return 0 if are + */ +bool is_portable_backup(BFILE *bfd) +{ + return !bfd->use_backup_api; +} + +bool is_plugin_data(BFILE *bfd) +{ + return bfd->cmd_plugin; +} + +bool have_win32_api() +{ + return p_BackupRead && p_BackupWrite; +} + + +/* + * Return true if we support the stream + * false if we do not support the stream + * + * This code is running under Win32, so we + * do not need #ifdef on MACOS ... + */ +bool is_restore_stream_supported(int stream) +{ + switch (stream) { + +/* Streams known not to be supported */ +#ifndef HAVE_LIBZ + case STREAM_GZIP_DATA: + case STREAM_SPARSE_GZIP_DATA: + case STREAM_WIN32_GZIP_DATA: +#endif +#ifndef HAVE_LZO + case STREAM_COMPRESSED_DATA: + case STREAM_SPARSE_COMPRESSED_DATA: + case STREAM_WIN32_COMPRESSED_DATA: + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: +#endif + case STREAM_MACOS_FORK_DATA: + case STREAM_HFSPLUS_ATTRIBUTES: + case STREAM_ENCRYPTED_MACOS_FORK_DATA: + return false; + + /* Known streams */ +#ifdef HAVE_LIBZ + case STREAM_GZIP_DATA: + case STREAM_SPARSE_GZIP_DATA: + case STREAM_WIN32_GZIP_DATA: +#endif +#ifdef HAVE_LZO + case STREAM_COMPRESSED_DATA: + case STREAM_SPARSE_COMPRESSED_DATA: + case STREAM_WIN32_COMPRESSED_DATA: +#endif + case STREAM_WIN32_DATA: + case STREAM_UNIX_ATTRIBUTES: + case STREAM_FILE_DATA: + case STREAM_MD5_DIGEST: + case STREAM_UNIX_ATTRIBUTES_EX: + case STREAM_SPARSE_DATA: + case STREAM_PROGRAM_NAMES: + case STREAM_PROGRAM_DATA: + case STREAM_SHA1_DIGEST: +#ifdef HAVE_SHA2 + case STREAM_SHA256_DIGEST: + case STREAM_SHA512_DIGEST: +#endif +#ifdef HAVE_CRYPTO + case STREAM_SIGNED_DIGEST: + case STREAM_ENCRYPTED_FILE_DATA: + case STREAM_ENCRYPTED_FILE_GZIP_DATA: + case STREAM_ENCRYPTED_WIN32_DATA: + case STREAM_ENCRYPTED_WIN32_GZIP_DATA: +#ifdef HAVE_LZO + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: +#endif +#endif /* !HAVE_CRYPTO */ + case 0: /* compatibility with old tapes */ + return true; + } + return false; +} + +HANDLE bget_handle(BFILE *bfd) +{ + return bfd->fh; +} + +/* + * The following code was contributed by Graham Keeling from his + * burp project. August 2014 + */ +static int encrypt_bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) +{ + ULONG ulFlags = 0; + POOLMEM *win32_fname; + POOLMEM *win32_fname_wchar; + + bfd->mode = BF_CLOSED; + bfd->fid = -1; + + if (!(p_OpenEncryptedFileRawA || p_OpenEncryptedFileRawW)) { + Dmsg0(50, "No OpenEncryptedFileRawA and no OpenEncryptedFileRawW APIs!!!\n"); + return -1; + } + + /* Convert to Windows path format */ + win32_fname = get_pool_memory(PM_FNAME); + win32_fname_wchar = get_pool_memory(PM_FNAME); + + unix_name_to_win32(&win32_fname, (char *)fname); + + if (p_CreateFileW && p_MultiByteToWideChar) { + make_win32_path_UTF8_2_wchar(&win32_fname_wchar, fname); + } + + if ((flags & O_CREAT) || (flags & O_WRONLY)) { + ulFlags = CREATE_FOR_IMPORT | OVERWRITE_HIDDEN; + if (bfd->fattrs & FILE_ATTRIBUTE_DIRECTORY) { + mkdir(fname, 0777); + ulFlags |= CREATE_FOR_DIR; + } + bfd->mode = BF_WRITE; + Dmsg0(200, "encrypt_bopen for write.\n"); + } else { + /* Open existing for read */ + ulFlags = CREATE_FOR_EXPORT; + bfd->mode = BF_READ; + Dmsg0(200, "encrypt_bopen for read.\n"); + } + + if (p_OpenEncryptedFileRawW && p_MultiByteToWideChar) { + /* unicode open */ + if (p_OpenEncryptedFileRawW((LPCWSTR)win32_fname_wchar, + ulFlags, &(bfd->pvContext))) { + bfd->mode = BF_CLOSED; + errno = b_errno_win32; + bfd->berrno = b_errno_win32; + } + } else { + /* ascii open */ + if (p_OpenEncryptedFileRawA(win32_fname, ulFlags, &(bfd->pvContext))) { + bfd->mode = BF_CLOSED; + errno = b_errno_win32; + bfd->berrno = b_errno_win32; + } + } + free_pool_memory(win32_fname_wchar); + free_pool_memory(win32_fname); + bfd->fid = (bfd->mode == BF_CLOSED) ? -1 : 0; + return bfd->mode==BF_CLOSED ? -1: 1; +} + +static int encrypt_bclose(BFILE *bfd) +{ + Dmsg0(200, "encrypt_bclose\n"); + if (p_CloseEncryptedFileRaw) { + p_CloseEncryptedFileRaw(bfd->pvContext); + } + bfd->pvContext = NULL; + bfd->mode = BF_CLOSED; + bfd->fattrs = 0; + bfd->fid = -1; + return 0; +} + +/* Windows */ +int bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) +{ + POOLMEM *win32_fname; + POOLMEM *win32_fname_wchar; + + DWORD dwaccess, dwflags, dwshare; + + if (bfd->fattrs & FILE_ATTRIBUTE_ENCRYPTED) { + return encrypt_bopen(bfd, fname, flags, mode); + } + + /* Convert to Windows path format */ + win32_fname = get_pool_memory(PM_FNAME); + win32_fname_wchar = get_pool_memory(PM_FNAME); + + unix_name_to_win32(&win32_fname, (char *)fname); + + if (bfd->cmd_plugin && plugin_bopen) { + int rtnstat; + Dmsg1(50, "call plugin_bopen fname=%s\n", fname); + rtnstat = plugin_bopen(bfd, fname, flags, mode); + Dmsg1(50, "return from plugin_bopen status=%d\n", rtnstat); + if (rtnstat >= 0) { + if (flags & O_CREAT || flags & O_WRONLY) { /* Open existing for write */ + Dmsg1(50, "plugin_open for write OK file=%s.\n", fname); + bfd->mode = BF_WRITE; + } else { + Dmsg1(50, "plugin_open for read OK file=%s.\n", fname); + bfd->mode = BF_READ; + } + bfd->fid = -1; /* The file descriptor is invalid */ + } else { + bfd->mode = BF_CLOSED; + bfd->fid = -1; + Dmsg1(000, "==== plugin_bopen returned bad status=%d\n", rtnstat); + } + free_pool_memory(win32_fname_wchar); + free_pool_memory(win32_fname); + return bfd->mode == BF_CLOSED ? -1 : 1; + } + Dmsg0(100, "=== NO plugin\n"); + + if (!(p_CreateFileA || p_CreateFileW)) { + Dmsg0(50, "No CreateFileA and no CreateFileW!!!!!\n"); + return 0; + } + + if (p_CreateFileW && p_MultiByteToWideChar) { + make_win32_path_UTF8_2_wchar(&win32_fname_wchar, fname); + } + + if (flags & O_CREAT) { /* Create */ + if (bfd->use_backup_api) { + dwaccess = GENERIC_WRITE|FILE_ALL_ACCESS|WRITE_OWNER|WRITE_DAC|ACCESS_SYSTEM_SECURITY; + dwflags = FILE_FLAG_BACKUP_SEMANTICS; + } else { + dwaccess = GENERIC_WRITE; + dwflags = 0; + } + + if (p_CreateFileW && p_MultiByteToWideChar) { + // unicode open for create write + Dmsg1(100, "Create CreateFileW=%ls\n", win32_fname_wchar); + bfd->fh = p_CreateFileW((LPCWSTR)win32_fname_wchar, + dwaccess, /* Requested access */ + 0, /* Shared mode */ + NULL, /* SecurityAttributes */ + CREATE_ALWAYS, /* CreationDisposition */ + dwflags, /* Flags and attributes */ + NULL); /* TemplateFile */ + } else { + // ascii open + Dmsg1(100, "Create CreateFileA=%s\n", win32_fname); + bfd->fh = p_CreateFileA(win32_fname, + dwaccess, /* Requested access */ + 0, /* Shared mode */ + NULL, /* SecurityAttributes */ + CREATE_ALWAYS, /* CreationDisposition */ + dwflags, /* Flags and attributes */ + NULL); /* TemplateFile */ + } + + bfd->mode = BF_WRITE; + + } else if (flags & O_WRONLY) { /* Open existing for write */ + if (bfd->use_backup_api) { + dwaccess = GENERIC_READ|GENERIC_WRITE|WRITE_OWNER|WRITE_DAC; + /* If deduped we do not want to open the reparse point */ + if (bfd->fattrs & FILE_ATTRIBUTE_DEDUP) { + dwflags = FILE_FLAG_BACKUP_SEMANTICS; + } else { + dwflags = FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT; + } + } else { + dwaccess = GENERIC_READ|GENERIC_WRITE; + dwflags = 0; + } + + if (p_CreateFileW && p_MultiByteToWideChar) { + // unicode open for open existing write + Dmsg1(100, "Write only CreateFileW=%s\n", win32_fname); + bfd->fh = p_CreateFileW((LPCWSTR)win32_fname_wchar, + dwaccess, /* Requested access */ + 0, /* Shared mode */ + NULL, /* SecurityAttributes */ + OPEN_EXISTING, /* CreationDisposition */ + dwflags, /* Flags and attributes */ + NULL); /* TemplateFile */ + } else { + // ascii open + Dmsg1(100, "Write only CreateFileA=%s\n", win32_fname); + bfd->fh = p_CreateFileA(win32_fname, + dwaccess, /* Requested access */ + 0, /* Shared mode */ + NULL, /* SecurityAttributes */ + OPEN_EXISTING, /* CreationDisposition */ + dwflags, /* Flags and attributes */ + NULL); /* TemplateFile */ + + } + + bfd->mode = BF_WRITE; + + } else { /* Read */ + if (bfd->use_backup_api) { + dwaccess = GENERIC_READ|READ_CONTROL|ACCESS_SYSTEM_SECURITY; + dwflags = FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_SEQUENTIAL_SCAN | + FILE_FLAG_OPEN_REPARSE_POINT; + dwshare = FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE; + } else { + dwaccess = GENERIC_READ; + dwflags = 0; + dwshare = FILE_SHARE_READ|FILE_SHARE_WRITE; + } + + if (p_CreateFileW && p_MultiByteToWideChar) { + // unicode open for open existing read + Dmsg1(100, "Read CreateFileW=%ls\n", win32_fname_wchar); + bfd->fh = p_CreateFileW((LPCWSTR)win32_fname_wchar, + dwaccess, /* Requested access */ + dwshare, /* Share modes */ + NULL, /* SecurityAttributes */ + OPEN_EXISTING, /* CreationDisposition */ + dwflags, /* Flags and attributes */ + NULL); /* TemplateFile */ + } else { + // ascii open + Dmsg1(100, "Read CreateFileA=%s\n", win32_fname); + bfd->fh = p_CreateFileA(win32_fname, + dwaccess, /* Requested access */ + dwshare, /* Share modes */ + NULL, /* SecurityAttributes */ + OPEN_EXISTING, /* CreationDisposition */ + dwflags, /* Flags and attributes */ + NULL); /* TemplateFile */ + } + + bfd->mode = BF_READ; + } + + if (bfd->fh == INVALID_HANDLE_VALUE) { + berrno be; + bfd->lerror = GetLastError(); + bfd->berrno = b_errno_win32; + errno = b_errno_win32; + bfd->mode = BF_CLOSED; + Dmsg1(100, "Open failed: %s\n", be.bstrerror()); + } + bfd->block = 0; + bfd->total_bytes = 0; + bfd->errmsg = NULL; + bfd->lpContext = NULL; + bfd->win32filter.init(); + free_pool_memory(win32_fname_wchar); + free_pool_memory(win32_fname); + bfd->fid = (bfd->mode == BF_CLOSED) ? -1 : 0; + return bfd->mode == BF_CLOSED ? -1 : 1; +} + +/* + * Returns 0 on success + * -1 on error + */ +/* Windows */ +int bclose(BFILE *bfd) +{ + int stat = 0; + + if (bfd->mode == BF_CLOSED) { + Dmsg0(50, "=== BFD already closed.\n"); + return 0; + } + + if (bfd->cmd_plugin && plugin_bclose) { + stat = plugin_bclose(bfd); + Dmsg0(50, "==== BFD closed!!!\n"); + goto all_done; + } + + if (bfd->fattrs & FILE_ATTRIBUTE_ENCRYPTED) { + return encrypt_bclose(bfd); + } + + /* + * We need to tell the API to release the buffer it + * allocated in lpContext. We do so by calling the + * API one more time, but with the Abort bit set. + */ + if (bfd->use_backup_api && bfd->mode == BF_READ) { + BYTE buf[10]; + if (bfd->lpContext && !p_BackupRead(bfd->fh, + buf, /* buffer */ + (DWORD)0, /* bytes to read */ + &bfd->rw_bytes, /* bytes read */ + 1, /* Abort */ + 1, /* ProcessSecurity */ + &bfd->lpContext)) { /* Read context */ + errno = b_errno_win32; + stat = -1; + } + } else if (bfd->use_backup_api && bfd->mode == BF_WRITE) { + BYTE buf[10]; + if (bfd->lpContext && !p_BackupWrite(bfd->fh, + buf, /* buffer */ + (DWORD)0, /* bytes to read */ + &bfd->rw_bytes, /* bytes written */ + 1, /* Abort */ + 1, /* ProcessSecurity */ + &bfd->lpContext)) { /* Write context */ + errno = b_errno_win32; + stat = -1; + } + } + if (!CloseHandle(bfd->fh)) { + stat = -1; + errno = b_errno_win32; + } + +all_done: + if (bfd->errmsg) { + free_pool_memory(bfd->errmsg); + bfd->errmsg = NULL; + } + bfd->mode = BF_CLOSED; + bfd->fattrs = 0; + bfd->fid = -1; + bfd->lpContext = NULL; + bfd->cmd_plugin = false; + return stat; +} + +/* Returns: bytes read on success + * 0 on EOF + * -1 on error + */ +/* Windows */ +ssize_t bread(BFILE *bfd, void *buf, size_t count) +{ + bfd->rw_bytes = 0; + + if (bfd->cmd_plugin && plugin_bread) { + return plugin_bread(bfd, buf, count); + } + + if (bfd->use_backup_api) { + if (!p_BackupRead(bfd->fh, + (BYTE *)buf, + count, + &bfd->rw_bytes, + 0, /* no Abort */ + 1, /* Process Security */ + &bfd->lpContext)) { /* Context */ + berrno be; + bfd->lerror = GetLastError(); + bfd->berrno = b_errno_win32; + errno = b_errno_win32; + Dmsg1(100, "Read failed: %s\n", be.bstrerror()); + return -1; + } + } else { + if (!ReadFile(bfd->fh, + buf, + count, + &bfd->rw_bytes, + NULL)) { + bfd->lerror = GetLastError(); + bfd->berrno = b_errno_win32; + errno = b_errno_win32; + return -1; + } + } + bfd->block++; + if (bfd->rw_bytes > 0) { + bfd->total_bytes += bfd->rw_bytes; + } + return (ssize_t)bfd->rw_bytes; +} + +/* Windows */ +ssize_t bwrite(BFILE *bfd, void *buf, size_t count) +{ + bfd->rw_bytes = 0; + + if (bfd->cmd_plugin && plugin_bwrite) { + return plugin_bwrite(bfd, buf, count); + } + + if (bfd->use_backup_api) { + if (!p_BackupWrite(bfd->fh, + (BYTE *)buf, + count, + &bfd->rw_bytes, + 0, /* No abort */ + 1, /* Process Security */ + &bfd->lpContext)) { /* Context */ + berrno be; + bfd->lerror = GetLastError(); + bfd->berrno = b_errno_win32; + errno = b_errno_win32; + Dmsg1(100, "Write failed: %s\n", be.bstrerror()); + return -1; + } + } else { + if (!WriteFile(bfd->fh, + buf, + count, + &bfd->rw_bytes, + NULL)) { + bfd->lerror = GetLastError(); + bfd->berrno = b_errno_win32; + errno = b_errno_win32; + return -1; + } + } + bfd->block++; + if (bfd->rw_bytes > 0) { + bfd->total_bytes += bfd->rw_bytes; + } + return (ssize_t)bfd->rw_bytes; +} + +/* Windows */ +bool is_bopen(BFILE *bfd) +{ + return bfd->mode != BF_CLOSED; +} + +/* Windows */ +boffset_t blseek(BFILE *bfd, boffset_t offset, int whence) +{ + LONG offset_low = (LONG)offset; + LONG offset_high = (LONG)(offset >> 32); + DWORD dwResult; + + if (bfd->cmd_plugin && plugin_blseek) { + return plugin_blseek(bfd, offset, whence); + } + + dwResult = SetFilePointer(bfd->fh, offset_low, &offset_high, whence); + + if (dwResult == INVALID_SET_FILE_POINTER && GetLastError() != NO_ERROR) { + return (boffset_t)-1; + } + + return ((boffset_t)offset_high << 32) | dwResult; +} + +#else /* Unix systems */ + +/* =============================================================== + * + * U N I X + * + * =============================================================== + */ +/* Unix */ +void binit(BFILE *bfd) +{ + memset(bfd, 0, sizeof(BFILE)); + bfd->fid = -1; +} + +/* Unix */ +bool have_win32_api() +{ + return false; /* no can do */ +} + +/* + * Enables using the Backup API (win32_data). + * Returns true if function worked + * Returns false if failed (i.e. do not have Backup API on this machine) + */ +/* Unix */ +bool set_win32_backup(BFILE *bfd) +{ + return false; /* no can do */ +} + + +/* Unix */ +bool set_portable_backup(BFILE *bfd) +{ + return true; /* no problem */ +} + +bool is_plugin_data(BFILE *bfd) +{ + return bfd->cmd_plugin; +} + +/* + * Return true if we are writing in portable format + * return false if not + */ +/* Unix */ +bool is_portable_backup(BFILE *bfd) +{ + return true; /* portable by definition */ +} + +/* Unix */ +bool set_prog(BFILE *bfd, char *prog, JCR *jcr) +{ + return false; +} + +/* Unix */ +bool set_cmd_plugin(BFILE *bfd, JCR *jcr) +{ + bfd->cmd_plugin = true; + bfd->jcr = jcr; + return true; +} + +/* + * This code is running on a non-Win32 machine + */ +/* Unix */ +bool is_restore_stream_supported(int stream) +{ + /* No Win32 backup on this machine */ + switch (stream) { +#ifndef HAVE_LIBZ + case STREAM_GZIP_DATA: + case STREAM_SPARSE_GZIP_DATA: + case STREAM_WIN32_GZIP_DATA: +#endif +#ifndef HAVE_LZO + case STREAM_COMPRESSED_DATA: + case STREAM_SPARSE_COMPRESSED_DATA: + case STREAM_WIN32_COMPRESSED_DATA: + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: +#endif +#ifndef HAVE_DARWIN_OS + case STREAM_MACOS_FORK_DATA: + case STREAM_HFSPLUS_ATTRIBUTES: +#endif + return false; + + /* Known streams */ +#ifdef HAVE_LIBZ + case STREAM_GZIP_DATA: + case STREAM_SPARSE_GZIP_DATA: + case STREAM_WIN32_GZIP_DATA: +#endif +#ifdef HAVE_LZO + case STREAM_COMPRESSED_DATA: + case STREAM_SPARSE_COMPRESSED_DATA: + case STREAM_WIN32_COMPRESSED_DATA: + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: +#endif + case STREAM_WIN32_DATA: + case STREAM_UNIX_ATTRIBUTES: + case STREAM_FILE_DATA: + case STREAM_MD5_DIGEST: + case STREAM_UNIX_ATTRIBUTES_EX: + case STREAM_SPARSE_DATA: + case STREAM_PROGRAM_NAMES: + case STREAM_PROGRAM_DATA: + case STREAM_SHA1_DIGEST: +#ifdef HAVE_SHA2 + case STREAM_SHA256_DIGEST: + case STREAM_SHA512_DIGEST: +#endif +#ifdef HAVE_CRYPTO + case STREAM_SIGNED_DIGEST: + case STREAM_ENCRYPTED_FILE_DATA: + case STREAM_ENCRYPTED_FILE_GZIP_DATA: + case STREAM_ENCRYPTED_WIN32_DATA: + case STREAM_ENCRYPTED_WIN32_GZIP_DATA: +#endif +#ifdef HAVE_DARWIN_OS + case STREAM_MACOS_FORK_DATA: + case STREAM_HFSPLUS_ATTRIBUTES: +#ifdef HAVE_CRYPTO + case STREAM_ENCRYPTED_MACOS_FORK_DATA: +#endif /* HAVE_CRYPTO */ +#endif /* HAVE_DARWIN_OS */ + case 0: /* compatibility with old tapes */ + return true; + + } + return false; +} + +/* Unix */ +int bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) +{ + if (bfd->cmd_plugin && plugin_bopen) { + Dmsg1(400, "call plugin_bopen fname=%s\n", fname); + bfd->fid = plugin_bopen(bfd, fname, flags, mode); + Dmsg2(400, "Plugin bopen fid=%d file=%s\n", bfd->fid, fname); + return bfd->fid; + } + + /* Normal file open */ + Dmsg1(dbglvl, "open file %s\n", fname); + + /* We use fnctl to set O_NOATIME if requested to avoid open error */ + bfd->fid = open(fname, (flags | O_CLOEXEC) & ~O_NOATIME, mode); + + /* Set O_NOATIME if possible */ + if (bfd->fid != -1 && flags & O_NOATIME) { + int oldflags = fcntl(bfd->fid, F_GETFL, 0); + if (oldflags == -1) { + bfd->berrno = errno; + close(bfd->fid); + bfd->fid = -1; + } else { + int ret = fcntl(bfd->fid, F_SETFL, oldflags | O_NOATIME); + /* EPERM means setting O_NOATIME was not allowed */ + if (ret == -1 && errno != EPERM) { + bfd->berrno = errno; + close(bfd->fid); + bfd->fid = -1; + } + } + } + bfd->berrno = errno; + bfd->m_flags = flags; + bfd->block = 0; + bfd->total_bytes = 0; + Dmsg1(400, "Open file %d\n", bfd->fid); + errno = bfd->berrno; + + bfd->win32filter.init(); + +#if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_WILLNEED) + /* If not RDWR or WRONLY must be Read Only */ + if (bfd->fid != -1 && !(flags & (O_RDWR|O_WRONLY))) { + int stat = posix_fadvise(bfd->fid, 0, 0, POSIX_FADV_WILLNEED); + Dmsg3(400, "Did posix_fadvise WILLNEED on %s fid=%d stat=%d\n", fname, bfd->fid, stat); + } +#endif + + return bfd->fid; +} + +#ifdef HAVE_DARWIN_OS +/* Open the resource fork of a file. */ +int bopen_rsrc(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) +{ + POOLMEM *rsrc_fname; + + rsrc_fname = get_pool_memory(PM_FNAME); + pm_strcpy(rsrc_fname, fname); + pm_strcat(rsrc_fname, _PATH_RSRCFORKSPEC); + bopen(bfd, rsrc_fname, flags, mode); + free_pool_memory(rsrc_fname); + return bfd->fid; +} +#else /* Unix */ + +/* Unix */ +int bopen_rsrc(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) + { return -1; } + +#endif + + +/* Unix */ +int bclose(BFILE *bfd) +{ + int stat; + + Dmsg2(400, "Close bfd=%p file %d\n", bfd, bfd->fid); + + if (bfd->fid == -1) { + return 0; + } + if (bfd->cmd_plugin && plugin_bclose) { + stat = plugin_bclose(bfd); + bfd->fid = -1; + bfd->cmd_plugin = false; + } + +#if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED) + /* If not RDWR or WRONLY must be Read Only */ + if (!(bfd->m_flags & (O_RDWR|O_WRONLY))) { + fdatasync(bfd->fid); /* sync the file */ + /* Tell OS we don't need it any more */ + posix_fadvise(bfd->fid, 0, 0, POSIX_FADV_DONTNEED); + Dmsg1(400, "Did posix_fadvise DONTNEED on fid=%d\n", bfd->fid); + } +#endif + + /* Close normal file */ + stat = close(bfd->fid); + bfd->berrno = errno; + bfd->fid = -1; + bfd->cmd_plugin = false; + return stat; +} + +/* Unix */ +ssize_t bread(BFILE *bfd, void *buf, size_t count) +{ + ssize_t stat; + + if (bfd->cmd_plugin && plugin_bread) { + return plugin_bread(bfd, buf, count); + } + + stat = read(bfd->fid, buf, count); + bfd->berrno = errno; + bfd->block++; + if (stat > 0) { + bfd->total_bytes += stat; + } + return stat; +} + +/* Unix */ +ssize_t bwrite(BFILE *bfd, void *buf, size_t count) +{ + ssize_t stat; + + if (bfd->cmd_plugin && plugin_bwrite) { + return plugin_bwrite(bfd, buf, count); + } + stat = write(bfd->fid, buf, count); + bfd->berrno = errno; + bfd->block++; + if (stat > 0) { + bfd->total_bytes += stat; + } + return stat; +} + +/* Unix */ +bool is_bopen(BFILE *bfd) +{ + return bfd->fid >= 0; +} + +/* Unix */ +boffset_t blseek(BFILE *bfd, boffset_t offset, int whence) +{ + boffset_t pos; + + if (bfd->cmd_plugin && plugin_bwrite) { + return plugin_blseek(bfd, offset, whence); + } + pos = (boffset_t)lseek(bfd->fid, offset, whence); + bfd->berrno = errno; + return pos; +} + +#endif diff --git a/src/findlib/bfile.h b/src/findlib/bfile.h new file mode 100644 index 00000000..d7b4c51b --- /dev/null +++ b/src/findlib/bfile.h @@ -0,0 +1,138 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula low level File I/O routines. This routine simulates + * open(), read(), write(), and close(), but using native routines. + * I.e. on Windows, we use Windows APIs. + * + * Kern Sibbald May MMIII + */ + +#ifndef __BFILE_H +#define __BFILE_H + +#include "win32filter.h" + +typedef struct _PROCESS_WIN32_BACKUPAPIBLOCK_CONTEXT { + int64_t liNextHeader; + bool bIsInData; + BWIN32_STREAM_ID header_stream; +} PROCESS_WIN32_BACKUPAPIBLOCK_CONTEXT; + +/* ======================================================= + * + * W I N D O W S + * + * ======================================================= + */ +#ifdef HAVE_WIN32 + +enum { + BF_CLOSED, + BF_READ, /* BackupRead */ + BF_WRITE /* BackupWrite */ +}; + +/* In bfile.c */ + +/* Basic Win32 low level I/O file packet */ +class BFILE { +public: + bool use_backup_api; /* set if using BackupRead/Write */ + int mode; /* set if file is open */ + HANDLE fh; /* Win32 file handle */ + int fid; /* fd if doing Unix style */ + LPVOID lpContext; /* BackupRead/Write context */ + PVOID pvContext; /* Windows encryption (EFS) context */ + POOLMEM *errmsg; /* error message buffer */ + DWORD rw_bytes; /* Bytes read or written */ + DWORD lerror; /* Last error code */ + DWORD fattrs; /* Windows file attributes */ + int berrno; /* errno */ + int block; /* Count of read/writes */ + uint64_t total_bytes; /* bytes written */ + boffset_t offset; /* Delta offset */ + JCR *jcr; /* jcr for editing job codes */ + Win32Filter win32filter; /* context for decomposition of win32 backup streams */ + int use_backup_decomp; /* set if using BackupRead Stream Decomposition */ + bool reparse_point; /* set if reparse point */ + bool cmd_plugin; /* set if we have a command plugin */ + bool const is_encrypted() const; +}; + +/* Windows encrypted file system */ +inline const bool BFILE::is_encrypted() const + { return (fattrs & FILE_ATTRIBUTE_ENCRYPTED) != 0; }; + +HANDLE bget_handle(BFILE *bfd); + + +#else /* Linux/Unix systems */ + +/* ======================================================= + * + * U N I X + * + * ======================================================= + */ + +/* Basic Unix low level I/O file packet */ +struct BFILE { + int fid; /* file id on Unix */ + int berrno; /* errno */ + int32_t lerror; /* not used - simplies Win32 builds */ + int block; /* Count of read/writes */ + uint64_t m_flags; /* open flags */ + uint64_t total_bytes; /* bytes written */ + boffset_t offset; /* Delta offset */ + JCR *jcr; /* jcr for editing job codes */ + Win32Filter win32filter; /* context for decomposition of win32 backup streams */ + int use_backup_decomp; /* set if using BackupRead Stream Decomposition */ + bool reparse_point; /* not used in Unix */ + bool cmd_plugin; /* set if we have a command plugin */ +}; + +#endif + +void binit(BFILE *bfd); +bool is_bopen(BFILE *bfd); +#ifdef HAVE_WIN32 +void set_fattrs(BFILE *bfd, struct stat *statp); +#else +#define set_fattrs(bfd, fattrs) +#endif +bool set_win32_backup(BFILE *bfd); +bool set_portable_backup(BFILE *bfd); +bool set_cmd_plugin(BFILE *bfd, JCR *jcr); +bool have_win32_api(); +bool is_portable_backup(BFILE *bfd); +bool is_plugin_data(BFILE *bfd); +bool is_restore_stream_supported(int stream); +bool is_win32_stream(int stream); +int bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode); +int bopen_rsrc(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode); +int bclose(BFILE *bfd); +ssize_t bread(BFILE *bfd, void *buf, size_t count); +ssize_t bwrite(BFILE *bfd, void *buf, size_t count); +boffset_t blseek(BFILE *bfd, boffset_t offset, int whence); +const char *stream_to_ascii(int stream); + +bool processWin32BackupAPIBlock (BFILE *bfd, void *pBuffer, ssize_t dwSize); + +#endif /* __BFILE_H */ diff --git a/src/findlib/create_file.c b/src/findlib/create_file.c new file mode 100644 index 00000000..1e3b8f71 --- /dev/null +++ b/src/findlib/create_file.c @@ -0,0 +1,484 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Create a file, and reset the modes + * + * Kern Sibbald, November MM + * + */ + +#include "bacula.h" +#include "find.h" + +#ifndef S_IRWXUGO +#define S_IRWXUGO (S_IRWXU | S_IRWXG | S_IRWXO) +#endif + +#ifndef IS_CTG +#define IS_CTG(x) 0 +#define O_CTG 0 +#endif + +#ifndef O_EXCL +#define O_EXCL 0 +#endif +#ifndef O_NOFOLLOW +#define O_NOFOLLOW 0 +#endif + +static int separate_path_and_file(JCR *jcr, char *fname, char *ofile); +static int path_already_seen(JCR *jcr, char *path, int pnl); + + +/* + * Create the file, or the directory + * + * fname is the original filename + * ofile is the output filename (may be in a different directory) + * + * Returns: CF_SKIP if file should be skipped + * CF_ERROR on error + * CF_EXTRACT file created and data to restore + * CF_CREATED file created no data to restore + * + * Note, we create the file here, except for special files, + * we do not set the attributes because we want to first + * write the file, then when the writing is done, set the + * attributes. + * So, we return with the file descriptor open for normal + * files. + * + */ +int create_file(JCR *jcr, ATTR *attr, BFILE *bfd, int replace) +{ + mode_t new_mode, parent_mode; + int flags; + uid_t uid; + gid_t gid; + int pnl; + bool exists = false; + struct stat mstatp; + + bfd->reparse_point = false; + if (is_win32_stream(attr->data_stream)) { + set_win32_backup(bfd); + } else { + set_portable_backup(bfd); + } + + new_mode = attr->statp.st_mode; + Dmsg3(200, "type=%d newmode=%x file=%s\n", attr->type, new_mode, attr->ofname); + parent_mode = S_IWUSR | S_IXUSR | new_mode; + gid = attr->statp.st_gid; + uid = attr->statp.st_uid; + +#ifdef HAVE_WIN32 + if (!bfd->use_backup_api) { + // eliminate invalid windows filename characters from foreign filenames + char *ch = (char *)attr->ofname; + if (ch[0] != 0 && ch[1] != 0) { + ch += 2; + while (*ch) { + switch (*ch) { + case ':': + case '<': + case '>': + case '*': + case '?': + case '|': + *ch = '_'; + break; + } + ch++; + } + } + } +#endif + + Dmsg2(400, "Replace=%c %d\n", (char)replace, replace); + if (lstat(attr->ofname, &mstatp) == 0) { + exists = true; + switch (replace) { + case REPLACE_IFNEWER: + /* Set attributes if we created this directory */ + if (attr->type == FT_DIREND && path_list_lookup(jcr, attr->ofname)) { + break; + } + + if (attr->statp.st_mtime <= mstatp.st_mtime) { + Qmsg(jcr, M_SKIPPED, 0, _("File skipped. Not newer: %s\n"), attr->ofname); + return CF_SKIP; + } + break; + + case REPLACE_IFOLDER: + if (attr->statp.st_mtime >= mstatp.st_mtime) { + Qmsg(jcr, M_SKIPPED, 0, _("File skipped. Not older: %s\n"), attr->ofname); + return CF_SKIP; + } + break; + + case REPLACE_NEVER: + /* Set attributes if we created this directory */ + if (attr->type == FT_DIREND && path_list_lookup(jcr, attr->ofname)) { + break; + } + Qmsg(jcr, M_SKIPPED, 0, _("File skipped. Already exists: %s\n"), attr->ofname); + return CF_SKIP; + + case REPLACE_ALWAYS: + break; + } + } + switch (attr->type) { + case FT_RAW: /* raw device to be written */ + case FT_FIFO: /* FIFO to be written to */ + case FT_LNKSAVED: /* Hard linked, file already saved */ + case FT_LNK: + case FT_SPEC: /* fifo, ... to be backed up */ + case FT_REGE: /* empty file */ + case FT_REG: /* regular file */ + /* + * Note, we do not delete FT_RAW because these are device files + * or FIFOs that should already exist. If we blow it away, + * we may blow away a FIFO that is being used to read the + * restore data, or we may blow away a partition definition. + */ + if (exists && attr->type != FT_RAW && attr->type != FT_FIFO) { + /* Get rid of old copy */ + Dmsg1(400, "unlink %s\n", attr->ofname); + if (unlink(attr->ofname) == -1) { + berrno be; + Qmsg(jcr, M_ERROR, 0, _("File %s already exists and could not be replaced. ERR=%s.\n"), + attr->ofname, be.bstrerror()); + return CF_ERROR; + } + } + /* + * Here we do some preliminary work for all the above + * types to create the path to the file if it does + * not already exist. Below, we will split to + * do the file type specific work + */ + pnl = separate_path_and_file(jcr, attr->fname, attr->ofname); + if (pnl < 0) { + return CF_ERROR; + } + + /* + * If path length is <= 0 we are making a file in the root + * directory. Assume that the directory already exists. + */ + if (pnl > 0) { + char savechr; + savechr = attr->ofname[pnl]; + attr->ofname[pnl] = 0; /* terminate path */ + + if (!path_already_seen(jcr, attr->ofname, pnl)) { + Dmsg1(400, "Make path %s\n", attr->ofname); + /* + * If we need to make the directory, ensure that it is with + * execute bit set (i.e. parent_mode), and preserve what already + * exists. Normally, this should do nothing. + */ + if (!makepath(attr, attr->ofname, parent_mode, parent_mode, uid, gid, 1)) { + Dmsg1(10, "Could not make path. %s\n", attr->ofname); + attr->ofname[pnl] = savechr; /* restore full name */ + return CF_ERROR; + } + } + attr->ofname[pnl] = savechr; /* restore full name */ + } + + /* Now we do the specific work for each file type */ + switch(attr->type) { + case FT_REGE: + case FT_REG: + Dmsg1(100, "Create=%s\n", attr->ofname); + flags = O_WRONLY | O_CREAT | O_BINARY | O_EXCL; + if (IS_CTG(attr->statp.st_mode)) { + flags |= O_CTG; /* set contiguous bit if needed */ + } + if (is_bopen(bfd)) { + Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid); + bclose(bfd); + } + + set_fattrs(bfd, &attr->statp); + if ((bopen(bfd, attr->ofname, flags, S_IRUSR | S_IWUSR)) < 0) { + berrno be; + be.set_errno(bfd->berrno); + Qmsg2(jcr, M_ERROR, 0, _("Could not create %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + Dmsg2(100,"Could not create %s: ERR=%s\n", attr->ofname, be.bstrerror()); + return CF_ERROR; + } + return CF_EXTRACT; + +#ifndef HAVE_WIN32 // none of these exist in MS Windows + case FT_RAW: /* Bacula raw device e.g. /dev/sda1 */ + case FT_FIFO: /* Bacula fifo to save data */ + case FT_SPEC: + if (S_ISFIFO(attr->statp.st_mode)) { + Dmsg1(400, "Restore fifo: %s\n", attr->ofname); + if (mkfifo(attr->ofname, attr->statp.st_mode) != 0 && errno != EEXIST) { + berrno be; + Qmsg2(jcr, M_ERROR, 0, _("Cannot make fifo %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + return CF_ERROR; + } + } else if (S_ISSOCK(attr->statp.st_mode)) { + Dmsg1(200, "Skipping restore of socket: %s\n", attr->ofname); +#ifdef S_IFDOOR // Solaris high speed RPC mechanism + } else if (S_ISDOOR(attr->statp.st_mode)) { + Dmsg1(200, "Skipping restore of door file: %s\n", attr->ofname); +#endif +#ifdef S_IFPORT // Solaris event port for handling AIO + } else if (S_ISPORT(attr->statp.st_mode)) { + Dmsg1(200, "Skipping restore of event port file: %s\n", attr->ofname); +#endif + } else { + Dmsg1(400, "Restore node: %s\n", attr->ofname); + if (mknod(attr->ofname, attr->statp.st_mode, attr->statp.st_rdev) != 0 && errno != EEXIST) { + berrno be; + Qmsg2(jcr, M_ERROR, 0, _("Cannot make node %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + return CF_ERROR; + } + } + /* + * Here we are going to attempt to restore to a FIFO, which + * means that the FIFO must already exist, AND there must + * be some process already attempting to read from the + * FIFO, so we open it write-only. + */ + if (attr->type == FT_RAW || attr->type == FT_FIFO) { + btimer_t *tid; + Dmsg1(400, "FT_RAW|FT_FIFO %s\n", attr->ofname); + flags = O_WRONLY | O_BINARY; + /* Timeout open() in 60 seconds */ + if (attr->type == FT_FIFO) { + Dmsg0(400, "Set FIFO timer\n"); + tid = start_thread_timer(jcr, pthread_self(), 60); + } else { + tid = NULL; + } + if (is_bopen(bfd)) { + Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid); + } + Dmsg2(400, "open %s flags=0x%x\n", attr->ofname, flags); + set_fattrs(bfd, &attr->statp); + if ((bopen(bfd, attr->ofname, flags, 0)) < 0) { + berrno be; + be.set_errno(bfd->berrno); + Qmsg2(jcr, M_ERROR, 0, _("Could not open %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + Dmsg2(400, "Could not open %s: ERR=%s\n", attr->ofname, be.bstrerror()); + stop_thread_timer(tid); + return CF_ERROR; + } + stop_thread_timer(tid); + return CF_EXTRACT; + } + Dmsg1(400, "FT_SPEC %s\n", attr->ofname); + return CF_CREATED; + + case FT_LNK: + Dmsg2(130, "FT_LNK should restore: %s -> %s\n", attr->ofname, attr->olname); + if (symlink(attr->olname, attr->ofname) != 0 && errno != EEXIST) { + berrno be; + Qmsg3(jcr, M_ERROR, 0, _("Could not symlink %s -> %s: ERR=%s\n"), + attr->ofname, attr->olname, be.bstrerror()); + return CF_ERROR; + } + return CF_CREATED; + + case FT_LNKSAVED: /* Hard linked, file already saved */ + Dmsg2(130, "Hard link %s => %s\n", attr->ofname, attr->olname); + if (link(attr->olname, attr->ofname) != 0) { + berrno be; +#ifdef HAVE_CHFLAGS + struct stat s; + /* + * If using BSD user flags, maybe has a file flag + * preventing this. So attempt to disable, retry link, + * and reset flags. + * Note that BSD securelevel may prevent disabling flag. + */ + if (stat(attr->olname, &s) == 0 && s.st_flags != 0) { + if (chflags(attr->olname, 0) == 0) { + if (link(attr->olname, attr->ofname) != 0) { + /* restore original file flags even when linking failed */ + if (chflags(attr->olname, s.st_flags) < 0) { + Qmsg2(jcr, M_ERROR, 0, _("Could not restore file flags for file %s: ERR=%s\n"), + attr->olname, be.bstrerror()); + } +#endif /* HAVE_CHFLAGS */ + Qmsg3(jcr, M_ERROR, 0, _("Could not hard link %s -> %s: ERR=%s\n"), + attr->ofname, attr->olname, be.bstrerror()); + Dmsg3(200, "Could not hard link %s -> %s: ERR=%s\n", + attr->ofname, attr->olname, be.bstrerror()); + return CF_ERROR; +#ifdef HAVE_CHFLAGS + } + /* finally restore original file flags */ + if (chflags(attr->olname, s.st_flags) < 0) { + Qmsg2(jcr, M_ERROR, 0, _("Could not restore file flags for file %s: ERR=%s\n"), + attr->olname, be.bstrerror()); + } + } else { + Qmsg2(jcr, M_ERROR, 0, _("Could not reset file flags for file %s: ERR=%s\n"), + attr->olname, be.bstrerror()); + } + } else { + Qmsg3(jcr, M_ERROR, 0, _("Could not hard link %s -> %s: ERR=%s\n"), + attr->ofname, attr->olname, be.bstrerror()); + return CF_ERROR; + } +#endif /* HAVE_CHFLAGS */ + + } + return CF_CREATED; +#endif + } /* End inner switch */ + + case FT_REPARSE: + case FT_JUNCTION: + bfd->reparse_point = true; + /* Fall through wanted */ + case FT_DIRBEGIN: + case FT_DIREND: + Dmsg2(200, "Make dir mode=%o dir=%s\n", new_mode, attr->ofname); + if (!makepath(attr, attr->ofname, new_mode, parent_mode, uid, gid, 0)) { + return CF_ERROR; + } + /* + * If we are using the Win32 Backup API, we open the + * directory so that the security info will be read + * and saved. + */ + if (!is_portable_backup(bfd)) { + if (is_bopen(bfd)) { + Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid); + } + set_fattrs(bfd, &attr->statp); + if ((bopen(bfd, attr->ofname, O_WRONLY|O_BINARY, 0)) < 0) { + berrno be; + be.set_errno(bfd->berrno); +#ifdef HAVE_WIN32 + /* Check for trying to create a drive, if so, skip */ + if (attr->ofname[1] == ':' && + IsPathSeparator(attr->ofname[2]) && + attr->ofname[3] == '\0') { + return CF_SKIP; + } +#endif + Qmsg2(jcr, M_ERROR, 0, _("Could not open %s: ERR=%s\n"), + attr->ofname, be.bstrerror()); + return CF_ERROR; + } + return CF_EXTRACT; + } else { + return CF_CREATED; + } + + case FT_DELETED: + Qmsg2(jcr, M_INFO, 0, _("Original file %s have been deleted: type=%d\n"), attr->fname, attr->type); + break; + /* The following should not occur */ + case FT_NOACCESS: + case FT_NOFOLLOW: + case FT_NOSTAT: + case FT_DIRNOCHG: + case FT_NOCHG: + case FT_ISARCH: + case FT_NORECURSE: + case FT_NOFSCHG: + case FT_NOOPEN: + Qmsg2(jcr, M_ERROR, 0, _("Original file %s not saved: type=%d\n"), attr->fname, attr->type); + break; + default: + Qmsg2(jcr, M_ERROR, 0, _("Unknown file type %d; not restored: %s\n"), attr->type, attr->fname); + Pmsg2(000, "Unknown file type %d; not restored: %s\n", attr->type, attr->fname); + break; + } + return CF_ERROR; +} + +/* + * Returns: > 0 index into path where last path char is. + * 0 no path + * -1 filename is zero length + */ +static int separate_path_and_file(JCR *jcr, char *fname, char *ofile) +{ + char *f, *p, *q; + int fnl, pnl; + + /* Separate pathname and filename */ + for (q=p=f=ofile; *p; p++) { +#ifdef HAVE_WIN32 + if (IsPathSeparator(*p)) { + f = q; + if (IsPathSeparator(p[1])) { + p++; + } + } + *q++ = *p; /* copy data */ +#else + if (IsPathSeparator(*p)) { + f = q; /* possible filename */ + } + q++; +#endif + } + + if (IsPathSeparator(*f)) { + f++; + } + *q = 0; /* terminate string */ + + fnl = q - f; + if (fnl == 0) { + /* The filename length must not be zero here because we + * are dealing with a file (i.e. FT_REGE or FT_REG). + */ + Jmsg1(jcr, M_ERROR, 0, _("Zero length filename: %s\n"), fname); + return -1; + } + pnl = f - ofile - 1; + return pnl; +} + +/* + * Primitive caching of path to prevent recreating a pathname + * each time as long as we remain in the same directory. + */ +static int path_already_seen(JCR *jcr, char *path, int pnl) +{ + if (!jcr->cached_path) { + jcr->cached_path = get_pool_memory(PM_FNAME); + } + if (jcr->cached_pnl == pnl && strcmp(path, jcr->cached_path) == 0) { + return 1; + } + pm_strcpy(jcr->cached_path, path); + jcr->cached_pnl = pnl; + return 0; +} diff --git a/src/findlib/drivetype.c b/src/findlib/drivetype.c new file mode 100644 index 00000000..cc559015 --- /dev/null +++ b/src/findlib/drivetype.c @@ -0,0 +1,118 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Implement routines to determine drive type (Windows specific). + * + * Written by Robert Nelson, June 2006 + * + */ + +#ifndef TEST_PROGRAM + +#include "bacula.h" +#include "find.h" + +#else /* Set up for testing a stand alone program */ + +#include +#include +#include +#define SUPPORTEDOSES \ + "HAVE_WIN32\n" +#define false 0 +#define true 1 +#define bstrncpy strncpy +#define Dmsg0(n,s) fprintf(stderr, s) +#define Dmsg1(n,s,a1) fprintf(stderr, s, a1) +#define Dmsg2(n,s,a1,a2) fprintf(stderr, s, a1, a2) +#endif + +/* + * These functions should be implemented for each OS + * + * bool drivetype(const char *fname, char *dt, int dtlen); + */ + +#if defined (HAVE_WIN32) +/* Windows */ + +bool drivetype(const char *fname, char *dt, int dtlen) +{ + CHAR rootpath[4]; + UINT type; + + /* Copy Drive Letter, colon, and backslash to rootpath */ + bstrncpy(rootpath, fname, 3); + rootpath[3] = '\0'; + + type = GetDriveType(rootpath); + + switch (type) { + case DRIVE_REMOVABLE: bstrncpy(dt, "removable", dtlen); return true; + case DRIVE_FIXED: bstrncpy(dt, "fixed", dtlen); return true; + case DRIVE_REMOTE: bstrncpy(dt, "remote", dtlen); return true; + case DRIVE_CDROM: bstrncpy(dt, "cdrom", dtlen); return true; + case DRIVE_RAMDISK: bstrncpy(dt, "ramdisk", dtlen); return true; + case DRIVE_UNKNOWN: + case DRIVE_NO_ROOT_DIR: + default: + return false; + } +} +/* Windows */ + +#else /* No recognised OS */ + +bool drivetype(const char *fname, char *dt, int dtlen) +{ + Dmsg0(10, "!!! drivetype() not implemented for this OS. !!!\n"); +#ifdef TEST_PROGRAM + Dmsg1(10, "Please define one of the following when compiling:\n\n%s\n", + SUPPORTEDOSES); + exit(EXIT_FAILURE); +#endif + + return false; +} +#endif + +#ifdef TEST_PROGRAM +int main(int argc, char **argv) +{ + char *p; + char dt[1000]; + int status = 0; + + if (argc < 2) { + p = (argc < 1) ? "drivetype" : argv[0]; + printf("usage:\t%s path ...\n" + "\t%s prints the drive type and pathname of the paths.\n", + p, p); + return EXIT_FAILURE; + } + while (*++argv) { + if (!drivetype(*argv, dt, sizeof(dt))) { + status = EXIT_FAILURE; + } else { + printf("%s\t%s\n", dt, *argv); + } + } + return status; +} +#endif diff --git a/src/findlib/enable_priv.c b/src/findlib/enable_priv.c new file mode 100644 index 00000000..a324cf89 --- /dev/null +++ b/src/findlib/enable_priv.c @@ -0,0 +1,156 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Enable backup privileges for Win32 systems. + * + * Kern Sibbald, May MMIII + * + */ + +#include "bacula.h" +#include "find.h" +#include "jcr.h" + + +/*=============================================================*/ +/* */ +/* * * * U n i x * * * * */ +/* */ +/*=============================================================*/ + +#if !defined(HAVE_WIN32) + +int enable_backup_privileges(JCR *jcr, int ignore_errors) + { return 0; } + + +#endif + + + +/*=============================================================*/ +/* */ +/* * * * W i n 3 2 * * * * */ +/* */ +/*=============================================================*/ + +#if defined(HAVE_WIN32) + +void win_error(JCR *jcr, const char *prefix, DWORD lerror); + +static int +enable_priv(JCR *jcr, HANDLE hToken, const char *name, int ignore_errors) +{ + TOKEN_PRIVILEGES tkp; + DWORD lerror; + + if (!(p_LookupPrivilegeValue && p_AdjustTokenPrivileges)) { + return 0; /* not avail on this OS */ + } + + // Get the LUID for the security privilege. + if (!p_LookupPrivilegeValue(NULL, name, &tkp.Privileges[0].Luid)) { + win_error(jcr, "LookupPrivilegeValue", GetLastError()); + return 0; + } + + /* Set the security privilege for this process. */ + tkp.PrivilegeCount = 1; + tkp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + p_AdjustTokenPrivileges(hToken, FALSE, &tkp, sizeof(TOKEN_PRIVILEGES), NULL, NULL); + lerror = GetLastError(); + if (lerror != ERROR_SUCCESS) { + if (!ignore_errors) { + char buf[200]; + strcpy(buf, _("AdjustTokenPrivileges set ")); + bstrncat(buf, name, sizeof(buf)); + win_error(jcr, buf, lerror); + } + return 0; + } + return 1; +} + +/* + * Setup privileges we think we will need. We probably do not need + * the SE_SECURITY_NAME, but since nothing seems to be working, + * we get it hoping to fix the problems. + */ +int enable_backup_privileges(JCR *jcr, int ignore_errors) +{ + HANDLE hToken, hProcess; + int stat = 0; + + if (!p_OpenProcessToken) { + return 0; /* No avail on this OS */ + } + + hProcess = OpenProcess(PROCESS_ALL_ACCESS, FALSE, GetCurrentProcessId()); + + // Get a token for this process. + if (!p_OpenProcessToken(hProcess, + TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken)) { + if (!ignore_errors) { + win_error(jcr, "OpenProcessToken", GetLastError()); + } + /* Forge on anyway */ + } + + /* Return a bit map of permissions set. */ + if (enable_priv(jcr, hToken, SE_BACKUP_NAME, ignore_errors)) { + stat |= 1<<1; + } + if (enable_priv(jcr, hToken, SE_RESTORE_NAME, ignore_errors)) { + stat |= 1<<2; + } + if (enable_priv(jcr, hToken, SE_SECURITY_NAME, ignore_errors)) { + stat |= 1<<0; + } + if (enable_priv(jcr, hToken, SE_TAKE_OWNERSHIP_NAME, ignore_errors)) { + stat |= 1<<3; + } + if (enable_priv(jcr, hToken, SE_ASSIGNPRIMARYTOKEN_NAME, ignore_errors)) { + stat |= 1<<4; + } + if (enable_priv(jcr, hToken, SE_SYSTEM_ENVIRONMENT_NAME, ignore_errors)) { + stat |= 1<<5; + } + if (enable_priv(jcr, hToken, SE_CREATE_TOKEN_NAME, ignore_errors)) { + stat |= 1<<6; + } + if (enable_priv(jcr, hToken, SE_MACHINE_ACCOUNT_NAME, ignore_errors)) { + stat |= 1<<7; + } + if (enable_priv(jcr, hToken, SE_TCB_NAME, ignore_errors)) { + stat |= 1<<8; + } + if (enable_priv(jcr, hToken, SE_CREATE_PERMANENT_NAME, ignore_errors)) { + stat |= 1<<10; + } + + if (stat) { + stat |= 1<<9; + } + + CloseHandle(hToken); + CloseHandle(hProcess); + return stat; +} + +#endif /* HAVE_WIN32 */ diff --git a/src/findlib/find.c b/src/findlib/find.c new file mode 100644 index 00000000..47d78123 --- /dev/null +++ b/src/findlib/find.c @@ -0,0 +1,491 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main routine for finding files on a file system. + * The heart of the work to find the files on the + * system is done in find_one.c. Here we have the + * higher level control as well as the matching + * routines for the new syntax Options resource. + * + * Kern E. Sibbald, MM + * + */ + + +#include "bacula.h" +#include "find.h" + +static const int dbglvl = 450; + +int32_t name_max; /* filename max length */ +int32_t path_max; /* path name max length */ + +#ifdef DEBUG +#undef bmalloc +#define bmalloc(x) sm_malloc(__FILE__, __LINE__, x) +#endif +static int our_callback(JCR *jcr, FF_PKT *ff, bool top_level); + +static const int fnmode = 0; + +/* + * Initialize the find files "global" variables + */ +FF_PKT *init_find_files() +{ + FF_PKT *ff; + + ff = (FF_PKT *)bmalloc(sizeof(FF_PKT)); + memset(ff, 0, sizeof(FF_PKT)); + + ff->sys_fname = get_pool_memory(PM_FNAME); + + /* Get system path and filename maximum lengths */ + path_max = pathconf(".", _PC_PATH_MAX); + if (path_max < 2048) { + path_max = 2048; + } + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 2048) { + name_max = 2048; + } + path_max++; /* add for EOS */ + name_max++; /* add for EOS */ + + Dmsg1(dbglvl, "init_find_files ff=%p\n", ff); + return ff; +} + +/* + * Set find_files options. For the moment, we only + * provide for full/incremental saves, and setting + * of save_time. For additional options, see above + */ +void +set_find_options(FF_PKT *ff, int incremental, time_t save_time) +{ + Dmsg0(dbglvl, "Enter set_find_options()\n"); + ff->incremental = incremental; + ff->save_time = save_time; + Dmsg0(dbglvl, "Leave set_find_options()\n"); +} + +void +set_find_changed_function(FF_PKT *ff, bool check_fct(JCR *jcr, FF_PKT *ff)) +{ + Dmsg0(dbglvl, "Enter set_find_changed_function()\n"); + ff->check_fct = check_fct; +} + +void +set_find_snapshot_function(FF_PKT *ff, + bool convert_path(JCR *jcr, FF_PKT *ff, dlist *filelist, dlistString *node)) +{ + ff->snapshot_convert_fct = convert_path; +} + +/* + * Call this subroutine with a callback subroutine as the first + * argument and a packet as the second argument, this packet + * will be passed back to the callback subroutine as the last + * argument. + * + */ +int +find_files(JCR *jcr, FF_PKT *ff, int file_save(JCR *jcr, FF_PKT *ff_pkt, bool top_level), + int plugin_save(JCR *jcr, FF_PKT *ff_pkt, bool top_level)) +{ + ff->file_save = file_save; + ff->plugin_save = plugin_save; + + /* This is the new way */ + findFILESET *fileset = ff->fileset; + if (fileset) { + int i, j; + /* TODO: We probably need be move the initialization in the fileset loop, + * at this place flags options are "concatenated" accross Include {} blocks + * (not only Options{} blocks inside a Include{}) + */ + ff->flags = 0; + for (i=0; iinclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); + fileset->incexe = incexe; + + /* Here, we reset some values between two different Include{} */ + strcpy(ff->VerifyOpts, "V"); + strcpy(ff->AccurateOpts, "Cmcs"); /* mtime+ctime+size by default */ + strcpy(ff->BaseJobOpts, "Jspug5"); /* size+perm+user+group+chk */ + ff->plugin = NULL; + ff->opt_plugin = false; + + /* + * By setting all options, we in effect OR the global options + * which is what we want. + */ + for (j=0; jopts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + /* TODO options are "simply" reset by Options block that come next + * For example : + * Options { IgnoreCase = yes } + * ATTN: some plugins use AddOptions() that create extra Option block + * Also see accept_file() below that could suffer of the same problem + */ + ff->flags |= fo->flags; + /* If the compress option was set in the previous block, overwrite the + * algorithm only if defined + */ + if ((ff->flags & FO_COMPRESS) && fo->Compress_algo != 0) { + ff->Compress_algo = fo->Compress_algo; + ff->Compress_level = fo->Compress_level; + } + ff->strip_path = fo->strip_path; + ff->fstypes = fo->fstype; + ff->drivetypes = fo->drivetype; + if (fo->plugin != NULL) { + ff->plugin = fo->plugin; /* TODO: generate a plugin event ? */ + ff->opt_plugin = true; + } + bstrncat(ff->VerifyOpts, fo->VerifyOpts, sizeof(ff->VerifyOpts)); /* TODO: Concat or replace? */ + if (fo->AccurateOpts[0]) { + bstrncpy(ff->AccurateOpts, fo->AccurateOpts, sizeof(ff->AccurateOpts)); + } + if (fo->BaseJobOpts[0]) { + bstrncpy(ff->BaseJobOpts, fo->BaseJobOpts, sizeof(ff->BaseJobOpts)); + } + } + Dmsg4(50, "Verify=<%s> Accurate=<%s> BaseJob=<%s> flags=<%lld>\n", + ff->VerifyOpts, ff->AccurateOpts, ff->BaseJobOpts, ff->flags); + dlistString *node; + foreach_dlist(node, &incexe->name_list) { + char *fname = node->c_str(); + Dmsg1(dbglvl, "F %s\n", fname); + + ff->top_fname = fname; + /* Convert the filename if needed */ + if (ff->snapshot_convert_fct) { + ff->snapshot_convert_fct(jcr, ff, &incexe->name_list, node); + } + + if (find_one_file(jcr, ff, our_callback, ff->top_fname, (dev_t)-1, true) == 0) { + return 0; /* error return */ + } + + if (job_canceled(jcr)) { + return 0; + } + } + foreach_dlist(node, &incexe->plugin_list) { + char *fname = node->c_str(); + if (!plugin_save) { + Jmsg(jcr, M_FATAL, 0, _("Plugin: \"%s\" not found.\n"), fname); + return 0; + } + Dmsg1(dbglvl, "PluginCommand: %s\n", fname); + ff->top_fname = fname; + ff->cmd_plugin = true; + + /* Make sure that opt plugin is not set + * The current implementation doesn't allow option plugin + * and command plugin to run at the same time + */ + ff->opt_plugin = false; + ff->plugin = NULL; + + plugin_save(jcr, ff, true); + ff->cmd_plugin = false; + if (job_canceled(jcr)) { + return 0; + } + } + } + } + return 1; +} + +/* + * Test if the currently selected directory (in ff->fname) is + * explicitly in the Include list or explicitly in the Exclude + * list. + */ +bool is_in_fileset(FF_PKT *ff) +{ + dlistString *node; + char *fname; + int i; + findINCEXE *incexe; + findFILESET *fileset = ff->fileset; + if (fileset) { + for (i=0; iinclude_list.size(); i++) { + incexe = (findINCEXE *)fileset->include_list.get(i); + foreach_dlist(node, &incexe->name_list) { + fname = node->c_str(); + Dmsg2(dbglvl, "Inc fname=%s ff->fname=%s\n", fname, ff->fname); + if (strcmp(fname, ff->fname) == 0) { + return true; + } + } + } + for (i=0; iexclude_list.size(); i++) { + incexe = (findINCEXE *)fileset->exclude_list.get(i); + foreach_dlist(node, &incexe->name_list) { + fname = node->c_str(); + Dmsg2(dbglvl, "Exc fname=%s ff->fname=%s\n", fname, ff->fname); + if (strcmp(fname, ff->fname) == 0) { + return true; + } + } + } + } + return false; +} + + +bool accept_file(FF_PKT *ff) +{ + int i, j, k; + int fnm_flags; + findFILESET *fileset = ff->fileset; + findINCEXE *incexe = fileset->incexe; + const char *basename; + int (*match_func)(const char *pattern, const char *string, int flags); + + Dmsg1(dbglvl, "enter accept_file: fname=%s\n", ff->fname); + if (ff->flags & FO_ENHANCEDWILD) { +// match_func = enh_fnmatch; + match_func = fnmatch; + if ((basename = last_path_separator(ff->fname)) != NULL) + basename++; + else + basename = ff->fname; + } else { + match_func = fnmatch; + basename = ff->fname; + } + + for (j = 0; j < incexe->opts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + ff->flags = fo->flags; + ff->Compress_algo = fo->Compress_algo; + ff->Compress_level = fo->Compress_level; + ff->fstypes = fo->fstype; + ff->drivetypes = fo->drivetype; + + fnm_flags = (ff->flags & FO_IGNORECASE) ? FNM_CASEFOLD : 0; + fnm_flags |= (ff->flags & FO_ENHANCEDWILD) ? FNM_PATHNAME : 0; + + if (S_ISDIR(ff->statp.st_mode)) { + for (k=0; kwilddir.size(); k++) { + if (match_func((char *)fo->wilddir.get(k), ff->fname, fnmode|fnm_flags) == 0) { + if (ff->flags & FO_EXCLUDE) { + Dmsg2(dbglvl, "Exclude wilddir: %s file=%s\n", (char *)fo->wilddir.get(k), + ff->fname); + return false; /* reject dir */ + } + return true; /* accept dir */ + } + } + } else { + for (k=0; kwildfile.size(); k++) { + if (match_func((char *)fo->wildfile.get(k), ff->fname, fnmode|fnm_flags) == 0) { + if (ff->flags & FO_EXCLUDE) { + Dmsg2(dbglvl, "Exclude wildfile: %s file=%s\n", (char *)fo->wildfile.get(k), + ff->fname); + return false; /* reject file */ + } + return true; /* accept file */ + } + } + + for (k=0; kwildbase.size(); k++) { + if (match_func((char *)fo->wildbase.get(k), basename, fnmode|fnm_flags) == 0) { + if (ff->flags & FO_EXCLUDE) { + Dmsg2(dbglvl, "Exclude wildbase: %s file=%s\n", (char *)fo->wildbase.get(k), + basename); + return false; /* reject file */ + } + return true; /* accept file */ + } + } + } + for (k=0; kwild.size(); k++) { + if (match_func((char *)fo->wild.get(k), ff->fname, fnmode|fnm_flags) == 0) { + if (ff->flags & FO_EXCLUDE) { + Dmsg2(dbglvl, "Exclude wild: %s file=%s\n", (char *)fo->wild.get(k), + ff->fname); + return false; /* reject file */ + } + return true; /* accept file */ + } + } + if (S_ISDIR(ff->statp.st_mode)) { + for (k=0; kregexdir.size(); k++) { + const int nmatch = 30; + regmatch_t pmatch[nmatch]; + if (regexec((regex_t *)fo->regexdir.get(k), ff->fname, nmatch, pmatch, 0) == 0) { + if (ff->flags & FO_EXCLUDE) { + return false; /* reject file */ + } + return true; /* accept file */ + } + } + } else { + for (k=0; kregexfile.size(); k++) { + const int nmatch = 30; + regmatch_t pmatch[nmatch]; + if (regexec((regex_t *)fo->regexfile.get(k), ff->fname, nmatch, pmatch, 0) == 0) { + if (ff->flags & FO_EXCLUDE) { + return false; /* reject file */ + } + return true; /* accept file */ + } + } + } + for (k=0; kregex.size(); k++) { + const int nmatch = 30; + regmatch_t pmatch[nmatch]; + if (regexec((regex_t *)fo->regex.get(k), ff->fname, nmatch, pmatch, 0) == 0) { + if (ff->flags & FO_EXCLUDE) { + return false; /* reject file */ + } + return true; /* accept file */ + } + } + /* + * If we have an empty Options clause with exclude, then + * exclude the file + */ + if (ff->flags & FO_EXCLUDE && + fo->regex.size() == 0 && fo->wild.size() == 0 && + fo->regexdir.size() == 0 && fo->wilddir.size() == 0 && + fo->regexfile.size() == 0 && fo->wildfile.size() == 0 && + fo->wildbase.size() == 0) { + return false; /* reject file */ + } + } + + /* Now apply the Exclude { } directive */ + for (i=0; iexclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->exclude_list.get(i); + for (j=0; jopts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + fnm_flags = (fo->flags & FO_IGNORECASE) ? FNM_CASEFOLD : 0; + for (k=0; kwild.size(); k++) { + if (fnmatch((char *)fo->wild.get(k), ff->fname, fnmode|fnm_flags) == 0) { + Dmsg1(dbglvl, "Reject wild1: %s\n", ff->fname); + return false; /* reject file */ + } + } + } + fnm_flags = (incexe->current_opts != NULL && incexe->current_opts->flags & FO_IGNORECASE) + ? FNM_CASEFOLD : 0; + dlistString *node; + foreach_dlist(node, &incexe->name_list) { + char *fname = node->c_str(); + if (fnmatch(fname, ff->fname, fnmode|fnm_flags) == 0) { + Dmsg1(dbglvl, "Reject wild2: %s\n", ff->fname); + return false; /* reject file */ + } + } + } + return true; +} + +/* + * The code comes here for each file examined. + * We filter the files, then call the user's callback if + * the file is included. + */ +static int our_callback(JCR *jcr, FF_PKT *ff, bool top_level) +{ + if (top_level) { + return ff->file_save(jcr, ff, top_level); /* accept file */ + } + switch (ff->type) { + case FT_NOACCESS: + case FT_NOFOLLOW: + case FT_NOSTAT: + case FT_NOCHG: + case FT_ISARCH: + case FT_NORECURSE: + case FT_NOFSCHG: + case FT_INVALIDFS: + case FT_INVALIDDT: + case FT_NOOPEN: +// return ff->file_save(jcr, ff, top_level); + + /* These items can be filtered */ + case FT_LNKSAVED: + case FT_REGE: + case FT_REG: + case FT_LNK: + case FT_DIRBEGIN: + case FT_DIREND: + case FT_RAW: + case FT_FIFO: + case FT_SPEC: + case FT_DIRNOCHG: + case FT_REPARSE: + case FT_JUNCTION: + if (accept_file(ff)) { + return ff->file_save(jcr, ff, top_level); + } else { + Dmsg1(dbglvl, "Skip file %s\n", ff->fname); + return -1; /* ignore this file */ + } + + default: + Dmsg1(000, "Unknown FT code %d\n", ff->type); + return 0; + } +} + + +/* + * Terminate find_files() and release + * all allocated memory + */ +int +term_find_files(FF_PKT *ff) +{ + int hard_links; + + free_pool_memory(ff->sys_fname); + if (ff->fname_save) { + free_pool_memory(ff->fname_save); + } + if (ff->link_save) { + free_pool_memory(ff->link_save); + } + if (ff->ignoredir_fname) { + free_pool_memory(ff->ignoredir_fname); + } + if (ff->snap_fname) { + free_pool_memory(ff->snap_fname); + } + if (ff->snap_top_fname) { + free_pool_memory(ff->snap_top_fname); + } + if (ff->mtab_list) { + delete ff->mtab_list; + } + hard_links = term_find_one(ff); + free(ff); + return hard_links; +} diff --git a/src/findlib/find.h b/src/findlib/find.h new file mode 100644 index 00000000..8403a181 --- /dev/null +++ b/src/findlib/find.h @@ -0,0 +1,231 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * File types as returned by find_files() + * + * Kern Sibbald MMI + */ + +#ifndef __FILES_H +#define __FILES_H + +#include "jcr.h" +#include "fileopts.h" +#include "bfile.h" +#include "../filed/fd_plugins.h" + +#ifdef HAVE_DIRENT_H +#include +#endif + +#include +#if !defined(HAVE_WIN32) || defined(HAVE_MINGW) +#include +#endif +#if HAVE_UTIME_H +#include +#else +struct utimbuf { + long actime; + long modtime; +}; +#endif + +#define MODE_RALL (S_IRUSR|S_IRGRP|S_IROTH) + +#include "lib/fnmatch.h" +// #include "lib/enh_fnmatch.h" + +#ifndef HAVE_REGEX_H +#include "lib/bregex.h" +#else +#include +#endif + +/* For options FO_xxx values see src/fileopts.h */ + +struct s_included_file { + struct s_included_file *next; + uint64_t options; /* backup options */ + uint32_t algo; /* compression algorithm. 4 letters stored as an interger */ + int Compress_level; /* compression level */ + int len; /* length of fname */ + int pattern; /* set if wild card pattern */ + char VerifyOpts[20]; /* Options for verify */ + char fname[1]; +}; + +struct s_excluded_file { + struct s_excluded_file *next; + int len; + char fname[1]; +}; + +/* FileSet definitions very similar to the resource + * contained in the Director because the components + * of the structure are passed by the Director to the + * File daemon and recompiled back into this structure + */ +#undef MAX_FOPTS +#define MAX_FOPTS 30 + +enum { + state_none, + state_options, + state_include, + state_error +}; + +/* File options structure */ +struct findFOPTS { + uint64_t flags; /* options in bits */ + uint32_t Compress_algo; /* compression algorithm. 4 letters stored as an interger */ + int Compress_level; /* compression level */ + int strip_path; /* strip path count */ + char VerifyOpts[MAX_FOPTS]; /* verify options */ + char AccurateOpts[MAX_FOPTS]; /* accurate mode options */ + char BaseJobOpts[MAX_FOPTS]; /* basejob mode options */ + char *plugin; /* Plugin that handle this section */ + alist regex; /* regex string(s) */ + alist regexdir; /* regex string(s) for directories */ + alist regexfile; /* regex string(s) for files */ + alist wild; /* wild card strings */ + alist wilddir; /* wild card strings for directories */ + alist wildfile; /* wild card strings for files */ + alist wildbase; /* wild card strings for basenames */ + alist base; /* list of base names */ + alist fstype; /* file system type limitation */ + alist drivetype; /* drive type limitation */ +}; + + +/* This is either an include item or an exclude item */ +struct findINCEXE { + findFOPTS *current_opts; /* points to current options structure */ + alist opts_list; /* options list */ + dlist name_list; /* filename list -- holds dlistString */ + dlist plugin_list; /* plugin list -- holds dlistString */ + char *ignoredir; /* ignore directories with this file */ +}; + +/* + * FileSet Resource + * + */ +struct findFILESET { + int state; + findINCEXE *incexe; /* current item */ + alist include_list; + alist exclude_list; +}; + +struct HFSPLUS_INFO { + unsigned long length; /* Mandatory field */ + char fndrinfo[32]; /* Finder Info */ + off_t rsrclength; /* Size of resource fork */ +}; + +/* + * Definition of the find_files packet passed as the + * first argument to the find_files callback subroutine. + */ +struct FF_PKT { + char *top_fname; /* full filename before descending */ + char *fname; /* full filename */ + char *link; /* link if file linked */ + char *object_name; /* Object name */ + char *object; /* restore object */ + char *plugin; /* Current Options{Plugin=} name */ + + /* Specific snapshot part */ + char *volume_path; /* volume path */ + char *snapshot_path; /* snapshot path */ + char *top_fname_save; + POOLMEM *snap_fname; /* buffer used when stripping path */ + POOLMEM *snap_top_fname; + bool strip_snap_path; /* convert snapshot path or not */ + bool (*snapshot_convert_fct)(JCR *jcr, FF_PKT *ff, dlist *filelist, dlistString *node); + + POOLMEM *sys_fname; /* system filename */ + POOLMEM *fname_save; /* save when stripping path */ + POOLMEM *link_save; /* save when stripping path */ + POOLMEM *ignoredir_fname; /* used to ignore directories */ + char *digest; /* set to file digest when the file is a hardlink */ + struct stat statp; /* stat packet */ + uint32_t digest_len; /* set to the digest len when the file is a hardlink*/ + int32_t digest_stream; /* set to digest type when the file is hardlink */ + int32_t FileIndex; /* FileIndex of this file */ + int32_t LinkFI; /* FileIndex of main hard linked file */ + int32_t delta_seq; /* Delta Sequence number */ + int32_t object_index; /* Object index */ + int32_t object_len; /* Object length */ + int32_t object_compression; /* Type of compression for object */ + struct f_link *linked; /* Set if this file is hard linked */ + int type; /* FT_ type from above */ + int ff_errno; /* errno */ + BFILE bfd; /* Bacula file descriptor */ + time_t save_time; /* start of incremental time */ + bool accurate_found; /* Found in the accurate hash (valid after check_changes()) */ + bool dereference; /* follow links (not implemented) */ + bool null_output_device; /* using null output device */ + bool incremental; /* incremental save */ + bool no_read; /* Do not read this file when using Plugin */ + char VerifyOpts[20]; + char AccurateOpts[20]; + char BaseJobOpts[20]; + struct s_included_file *included_files_list; + struct s_excluded_file *excluded_files_list; + struct s_excluded_file *excluded_paths_list; + findFILESET *fileset; + int (*file_save)(JCR *, FF_PKT *, bool); /* User's callback */ + int (*plugin_save)(JCR *, FF_PKT *, bool); /* User's callback */ + bool (*check_fct)(JCR *, FF_PKT *); /* optionnal user fct to check file changes */ + + /* Values set by accept_file while processing Options */ + uint64_t flags; /* backup options */ + uint32_t Compress_algo; /* compression algorithm. 4 letters stored as an interger */ + int Compress_level; /* compression level */ + int strip_path; /* strip path count */ + bool cmd_plugin; /* set if we have a command plugin */ + bool opt_plugin; /* set if we have an option plugin */ + rblist *mtab_list; /* List of mtab entries */ + uint64_t last_fstype; /* cache last file system type */ + char last_fstypename[32]; /* cache last file system type name */ + alist fstypes; /* allowed file system types */ + alist drivetypes; /* allowed drive types */ + alist mount_points; /* Possible mount points to be snapshotted */ + + /* List of all hard linked files found */ + struct f_link **linkhash; /* hard linked files */ + + /* Darwin specific things. + * To avoid clutter, we always include rsrc_bfd and volhas_attrlist */ + BFILE rsrc_bfd; /* fd for resource forks */ + bool volhas_attrlist; /* Volume supports getattrlist() */ + struct HFSPLUS_INFO hfsinfo; /* Finder Info and resource fork size */ +}; + +typedef void (mtab_handler_t)(void *user_ctx, struct stat *st, + const char *fstype, const char *mountpoint, + const char *mntopts, const char *fsname); +bool read_mtab(mtab_handler_t *mtab_handler, void *user_ctx); + +#include "protos.h" + +#endif /* __FILES_H */ diff --git a/src/findlib/find_one.c b/src/findlib/find_one.c new file mode 100644 index 00000000..764a3e5c --- /dev/null +++ b/src/findlib/find_one.c @@ -0,0 +1,857 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + + This file was derived from GNU TAR source code. Except for a few key + ideas, it has been entirely rewritten for Bacula. + + Kern Sibbald, MM + + Thanks to the TAR programmers. + + */ + +#include "bacula.h" +#include "find.h" +#ifdef HAVE_DARWIN_OS +#include +#include +#include +#endif + +int breaddir(DIR *dirp, POOLMEM *&d_name); + +extern int32_t name_max; /* filename max length */ +extern int32_t path_max; /* path name max length */ + +/* + * Structure for keeping track of hard linked files, we + * keep an entry for each hardlinked file that we save, + * which is the first one found. For all the other files that + * are linked to this one, we save only the directory + * entry so we can link it. + */ +struct f_link { + struct f_link *next; + dev_t dev; /* device */ + ino_t ino; /* inode with device is unique */ + int32_t FileIndex; /* Bacula FileIndex of this file */ + int32_t digest_stream; /* Digest type if needed */ + uint32_t digest_len; /* Digest len if needed */ + char *digest; /* Checksum of the file if needed */ + char name[1]; /* The name */ +}; + +typedef struct f_link link_t; +#define LINK_HASHTABLE_BITS 16 +#define LINK_HASHTABLE_SIZE (1<>= 16; + hash ^= i; + i >>= 16; + hash ^= i; + i >>= 16; + hash ^= i; + return hash & LINK_HASHTABLE_MASK; +} + +/* + * Create a new directory Find File packet, but copy + * some of the essential info from the current packet. + * However, be careful to zero out the rest of the + * packet. + */ +static FF_PKT *new_dir_ff_pkt(FF_PKT *ff_pkt) +{ + FF_PKT *dir_ff_pkt = (FF_PKT *)bmalloc(sizeof(FF_PKT)); + memcpy(dir_ff_pkt, ff_pkt, sizeof(FF_PKT)); + dir_ff_pkt->fname = bstrdup(ff_pkt->fname); + dir_ff_pkt->link = bstrdup(ff_pkt->link); + dir_ff_pkt->sys_fname = get_pool_memory(PM_FNAME); + + if (ff_pkt->strip_snap_path) { + dir_ff_pkt->fname_save = get_pool_memory(PM_FNAME); + dir_ff_pkt->link_save = get_pool_memory(PM_FNAME); + pm_strcpy(dir_ff_pkt->fname_save, ff_pkt->fname_save); + pm_strcpy(dir_ff_pkt->link_save, ff_pkt->link_save); + + // need its own "working" buffers, some pm_strcat or pm_strcopy + // will realloc these ones, no need to copy the current values + dir_ff_pkt->snap_top_fname = get_pool_memory(PM_FNAME); + dir_ff_pkt->snap_fname = get_pool_memory(PM_FNAME); + } else { + dir_ff_pkt->fname_save = NULL; + dir_ff_pkt->link_save = NULL; + } + + dir_ff_pkt->included_files_list = NULL; + dir_ff_pkt->excluded_files_list = NULL; + dir_ff_pkt->excluded_paths_list = NULL; + dir_ff_pkt->linkhash = NULL; + dir_ff_pkt->ignoredir_fname = NULL; + return dir_ff_pkt; +} + +/* + * Free the temp directory ff_pkt + */ +static void free_dir_ff_pkt(FF_PKT *dir_ff_pkt) +{ + free(dir_ff_pkt->fname); + free(dir_ff_pkt->link); + free_pool_memory(dir_ff_pkt->sys_fname); + if (dir_ff_pkt->fname_save) { + free_pool_memory(dir_ff_pkt->fname_save); + } + if (dir_ff_pkt->link_save) { + free_pool_memory(dir_ff_pkt->link_save); + } + if (dir_ff_pkt->snap_top_fname) { + free_pool_memory(dir_ff_pkt->snap_top_fname); + free_pool_memory(dir_ff_pkt->snap_fname); + } + free(dir_ff_pkt); +} + +/* + * Check to see if we allow the file system type of a file or directory. + * If we do not have a list of file system types, we accept anything. + */ +static int accept_fstype(FF_PKT *ff, void *dummy) { + int i; + char fs[1000]; + bool accept = true; + + if (ff->fstypes.size()) { + accept = false; + if (!fstype(ff, fs, sizeof(fs))) { + Dmsg1(50, "Cannot determine file system type for \"%s\"\n", ff->fname); + } else { + for (i = 0; i < ff->fstypes.size(); ++i) { + if (strcmp(fs, (char *)ff->fstypes.get(i)) == 0) { + Dmsg2(100, "Accepting fstype %s for \"%s\"\n", fs, ff->fname); + accept = true; + break; + } + Dmsg3(200, "fstype %s for \"%s\" does not match %s\n", fs, + ff->fname, ff->fstypes.get(i)); + } + } + } + return accept; +} + +/* + * Check to see if we allow the drive type of a file or directory. + * If we do not have a list of drive types, we accept anything. + */ +static int accept_drivetype(FF_PKT *ff, void *dummy) { + int i; + char dt[100]; + bool accept = true; + + if (ff->drivetypes.size()) { + accept = false; + if (!drivetype(ff->fname, dt, sizeof(dt))) { + Dmsg1(50, "Cannot determine drive type for \"%s\"\n", ff->fname); + } else { + for (i = 0; i < ff->drivetypes.size(); ++i) { + if (strcmp(dt, (char *)ff->drivetypes.get(i)) == 0) { + Dmsg2(100, "Accepting drive type %s for \"%s\"\n", dt, ff->fname); + accept = true; + break; + } + Dmsg3(200, "drive type %s for \"%s\" does not match %s\n", dt, + ff->fname, ff->drivetypes.get(i)); + } + } + } + return accept; +} + +/* + * This function determines whether we can use getattrlist() + * It's odd, but we have to use the function to determine that... + * Also, the man pages talk about things as if they were implemented. + * + * On Mac OS X, this succesfully differentiates between HFS+ and UFS + * volumes, which makes me trust it is OK for others, too. + */ +static bool volume_has_attrlist(const char *fname) +{ +#ifdef HAVE_DARWIN_OS + struct statfs st; + struct volinfo_struct { + unsigned long length; /* Mandatory field */ + vol_capabilities_attr_t info; /* Volume capabilities */ + } vol; + struct attrlist attrList; + + memset(&attrList, 0, sizeof(attrList)); + attrList.bitmapcount = ATTR_BIT_MAP_COUNT; + attrList.volattr = ATTR_VOL_INFO | ATTR_VOL_CAPABILITIES; + if (statfs(fname, &st) == 0) { + /* We need to check on the mount point */ + if (getattrlist(st.f_mntonname, &attrList, &vol, sizeof(vol), FSOPT_NOFOLLOW) == 0 + && (vol.info.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_ATTRLIST) + && (vol.info.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_ATTRLIST)) { + return true; + } + } +#endif + return false; +} + +/* + * check for BSD nodump flag + */ +static bool no_dump(JCR *jcr, FF_PKT *ff_pkt) +{ +#if defined(HAVE_CHFLAGS) && defined(UF_NODUMP) + if ( (ff_pkt->flags & FO_HONOR_NODUMP) && + (ff_pkt->statp.st_flags & UF_NODUMP) ) { + Jmsg(jcr, M_INFO, 1, _(" NODUMP flag set - will not process %s\n"), + ff_pkt->fname); + return true; /* do not backup this file */ + } +#endif + return false; /* do backup */ +} + +/* check if a file have changed during backup and display an error */ +bool has_file_changed(JCR *jcr, FF_PKT *ff_pkt) +{ + struct stat statp; + Dmsg1(500, "has_file_changed fname=%s\n",ff_pkt->fname); + + if (ff_pkt->type != FT_REG) { /* not a regular file */ + return false; + } + + if (lstat(ff_pkt->fname, &statp) != 0) { + berrno be; + Jmsg(jcr, M_WARNING, 0, + _("Cannot stat file %s: ERR=%s\n"),ff_pkt->fname,be.bstrerror()); + return true; + } + + if (statp.st_mtime != ff_pkt->statp.st_mtime) { + Jmsg(jcr, M_ERROR, 0, _("%s mtime changed during backup.\n"), ff_pkt->fname); + Dmsg3(50, "%s mtime (%lld) changed during backup (%lld).\n", ff_pkt->fname, + (int64_t)ff_pkt->statp.st_mtime, (int64_t)statp.st_mtime); + return true; + } + + if (statp.st_ctime != ff_pkt->statp.st_ctime) { + Jmsg(jcr, M_ERROR, 0, _("%s ctime changed during backup.\n"), ff_pkt->fname); + Dmsg3(50, "%s ctime (%lld) changed during backup (%lld).\n", ff_pkt->fname, + (int64_t)ff_pkt->statp.st_ctime, (int64_t)statp.st_ctime); + return true; + } + + if ((int64_t)statp.st_size != (int64_t)ff_pkt->statp.st_size) { + Jmsg(jcr, M_ERROR, 0, _("%s size of %lld changed during backup to %lld.n"),ff_pkt->fname, + (int64_t)ff_pkt->statp.st_size, (int64_t)statp.st_size); + Dmsg3(50, "%s size (%lld) changed during backup (%lld).\n", ff_pkt->fname, + (int64_t)ff_pkt->statp.st_size, (int64_t)statp.st_size); + return true; + } + + return false; +} + +/* + * For incremental/diffential or accurate backups, we + * determine if the current file has changed. + */ +bool check_changes(JCR *jcr, FF_PKT *ff_pkt) +{ + /* in special mode (like accurate backup), the programmer can + * choose his comparison function. + */ + if (ff_pkt->check_fct) { + return ff_pkt->check_fct(jcr, ff_pkt); + } + + /* For normal backups (incr/diff), we use this default + * behaviour + */ + if (ff_pkt->incremental && + (ff_pkt->statp.st_mtime < ff_pkt->save_time && + ((ff_pkt->flags & FO_MTIMEONLY) || + ff_pkt->statp.st_ctime < ff_pkt->save_time))) + { + return false; + } + + return true; +} + +static bool have_ignoredir(FF_PKT *ff_pkt) +{ + struct stat sb; + char *ignoredir; + + /* Ensure that pointers are defined */ + if (!ff_pkt->fileset || !ff_pkt->fileset->incexe) { + return false; + } + ignoredir = ff_pkt->fileset->incexe->ignoredir; + + if (ignoredir) { + if (!ff_pkt->ignoredir_fname) { + ff_pkt->ignoredir_fname = get_pool_memory(PM_FNAME); + } + Mmsg(ff_pkt->ignoredir_fname, "%s/%s", ff_pkt->fname, ignoredir); + if (stat(ff_pkt->ignoredir_fname, &sb) == 0) { + Dmsg2(100, "Directory '%s' ignored (found %s)\n", + ff_pkt->fname, ignoredir); + return true; /* Just ignore this directory */ + } + } + return false; +} + +/* + * When the current file is a hardlink, the backup code can compute + * the checksum and store it into the link_t structure. + */ +void +ff_pkt_set_link_digest(FF_PKT *ff_pkt, + int32_t digest_stream, const char *digest, uint32_t len) +{ + if (ff_pkt->linked && !ff_pkt->linked->digest) { /* is a hardlink */ + ff_pkt->linked->digest = (char *) bmalloc(len); + memcpy(ff_pkt->linked->digest, digest, len); + ff_pkt->linked->digest_len = len; + ff_pkt->linked->digest_stream = digest_stream; + } +} + +/* + * Find a single file. + * handle_file is the callback for handling the file. + * p is the filename + * parent_device is the device we are currently on + * top_level is 1 when not recursing or 0 when + * descending into a directory. + */ +int +find_one_file(JCR *jcr, FF_PKT *ff_pkt, + int handle_file(JCR *jcr, FF_PKT *ff, bool top_level), + char *fname, dev_t parent_device, bool top_level) +{ + struct utimbuf restore_times; + int rtn_stat; + int len; + + ff_pkt->fname = ff_pkt->link = fname; + + if (lstat(fname, &ff_pkt->statp) != 0) { + /* Cannot stat file */ + ff_pkt->type = FT_NOSTAT; + ff_pkt->ff_errno = errno; + return handle_file(jcr, ff_pkt, top_level); + } + + Dmsg1(300, "File ----: %s\n", fname); + + /* Save current times of this directory in case we need to + * reset them because the user doesn't want them changed. + */ + restore_times.actime = ff_pkt->statp.st_atime; + restore_times.modtime = ff_pkt->statp.st_mtime; + + /* + * We check for allowed fstypes and drivetypes at top_level and fstype change (below). + */ + if (top_level) { + if (!accept_fstype(ff_pkt, NULL)) { + ff_pkt->type = FT_INVALIDFS; + if (ff_pkt->flags & FO_KEEPATIME) { + utime(fname, &restore_times); + } + + char fs[100]; + + if (!fstype(ff_pkt, fs, sizeof(fs))) { + bstrncpy(fs, "unknown", sizeof(fs)); + } + + Jmsg(jcr, M_INFO, 0, _("Top level directory \"%s\" has unlisted fstype \"%s\"\n"), fname, fs); + return 1; /* Just ignore this error - or the whole backup is cancelled */ + } + if (!accept_drivetype(ff_pkt, NULL)) { + ff_pkt->type = FT_INVALIDDT; + if (ff_pkt->flags & FO_KEEPATIME) { + utime(fname, &restore_times); + } + + char dt[100]; + + if (!drivetype(ff_pkt->fname, dt, sizeof(dt))) { + bstrncpy(dt, "unknown", sizeof(dt)); + } + + Jmsg(jcr, M_INFO, 0, _("Top level directory \"%s\" has an unlisted drive type \"%s\"\n"), fname, dt); + return 1; /* Just ignore this error - or the whole backup is cancelled */ + } + ff_pkt->volhas_attrlist = volume_has_attrlist(fname); + } + + /* + * Ignore this entry if no_dump() returns true + */ + if (no_dump(jcr, ff_pkt)) { + Dmsg1(100, "'%s' ignored (NODUMP flag set)\n", + ff_pkt->fname); + return 1; + } + + /* + * If this is an Incremental backup, see if file was modified + * since our last "save_time", presumably the last Full save + * or Incremental. + */ + if ( !S_ISDIR(ff_pkt->statp.st_mode) + && !check_changes(jcr, ff_pkt)) + { + Dmsg1(500, "Non-directory incremental: %s\n", ff_pkt->fname); + ff_pkt->type = FT_NOCHG; + return handle_file(jcr, ff_pkt, top_level); + } + +#ifdef HAVE_DARWIN_OS + if (ff_pkt->flags & FO_HFSPLUS && ff_pkt->volhas_attrlist + && S_ISREG(ff_pkt->statp.st_mode)) { + /* TODO: initialise attrList once elsewhere? */ + struct attrlist attrList; + memset(&attrList, 0, sizeof(attrList)); + attrList.bitmapcount = ATTR_BIT_MAP_COUNT; + attrList.commonattr = ATTR_CMN_FNDRINFO; + attrList.fileattr = ATTR_FILE_RSRCLENGTH; + if (getattrlist(fname, &attrList, &ff_pkt->hfsinfo, + sizeof(ff_pkt->hfsinfo), FSOPT_NOFOLLOW) != 0) { + ff_pkt->type = FT_NOSTAT; + ff_pkt->ff_errno = errno; + return handle_file(jcr, ff_pkt, top_level); + } + return -1; /* ignore */ + } +#endif + + ff_pkt->LinkFI = 0; + /* + * Handle hard linked files + * + * Maintain a list of hard linked files already backed up. This + * allows us to ensure that the data of each file gets backed + * up only once. + */ + if (!(ff_pkt->flags & FO_NO_HARDLINK) + && ff_pkt->statp.st_nlink > 1 + && (S_ISREG(ff_pkt->statp.st_mode) + || S_ISCHR(ff_pkt->statp.st_mode) + || S_ISBLK(ff_pkt->statp.st_mode) + || S_ISFIFO(ff_pkt->statp.st_mode) + || S_ISSOCK(ff_pkt->statp.st_mode))) { + + struct f_link *lp; + if (ff_pkt->linkhash == NULL) { + ff_pkt->linkhash = (link_t **)bmalloc(LINK_HASHTABLE_SIZE * sizeof(link_t *)); + memset(ff_pkt->linkhash, 0, LINK_HASHTABLE_SIZE * sizeof(link_t *)); + } + const int linkhash = LINKHASH(ff_pkt->statp); + + /* Search link list of hard linked files */ + for (lp = ff_pkt->linkhash[linkhash]; lp; lp = lp->next) + if (lp->ino == (ino_t)ff_pkt->statp.st_ino && + lp->dev == (dev_t)ff_pkt->statp.st_dev) { + /* If we have already backed up the hard linked file don't do it again */ + if (strcmp(lp->name, fname) == 0) { + Dmsg2(400, "== Name identical skip FI=%d file=%s\n", lp->FileIndex, fname); + return 1; /* ignore */ + } + ff_pkt->link = lp->name; + ff_pkt->type = FT_LNKSAVED; /* Handle link, file already saved */ + ff_pkt->LinkFI = lp->FileIndex; + ff_pkt->linked = 0; + ff_pkt->digest = lp->digest; + ff_pkt->digest_stream = lp->digest_stream; + ff_pkt->digest_len = lp->digest_len; + rtn_stat = handle_file(jcr, ff_pkt, top_level); + Dmsg3(400, "FT_LNKSAVED FI=%d LinkFI=%d file=%s\n", + ff_pkt->FileIndex, lp->FileIndex, lp->name); + return rtn_stat; + } + + /* File not previously dumped. Chain it into our list. */ + len = strlen(fname) + 1; + lp = (struct f_link *)bmalloc(sizeof(struct f_link) + len); + lp->digest = NULL; /* set later */ + lp->digest_stream = 0; /* set later */ + lp->digest_len = 0; /* set later */ + lp->ino = ff_pkt->statp.st_ino; + lp->dev = ff_pkt->statp.st_dev; + lp->FileIndex = 0; /* set later */ + bstrncpy(lp->name, fname, len); + lp->next = ff_pkt->linkhash[linkhash]; + ff_pkt->linkhash[linkhash] = lp; + ff_pkt->linked = lp; /* mark saved link */ + Dmsg2(400, "added to hash FI=%d file=%s\n", ff_pkt->FileIndex, lp->name); + } else { + ff_pkt->linked = NULL; + } + + /* This is not a link to a previously dumped file, so dump it. */ + if (S_ISREG(ff_pkt->statp.st_mode)) { + boffset_t sizeleft; + + sizeleft = ff_pkt->statp.st_size; + + /* Don't bother opening empty, world readable files. Also do not open + files when archive is meant for /dev/null. */ + if (ff_pkt->null_output_device || (sizeleft == 0 + && MODE_RALL == (MODE_RALL & ff_pkt->statp.st_mode))) { + ff_pkt->type = FT_REGE; + } else { + ff_pkt->type = FT_REG; + } + rtn_stat = handle_file(jcr, ff_pkt, top_level); + if (ff_pkt->linked) { + ff_pkt->linked->FileIndex = ff_pkt->FileIndex; + } + Dmsg3(400, "FT_REG FI=%d linked=%d file=%s\n", ff_pkt->FileIndex, + ff_pkt->linked ? 1 : 0, fname); + if (ff_pkt->flags & FO_KEEPATIME) { + utime(fname, &restore_times); + } + return rtn_stat; + + + } else if (S_ISLNK(ff_pkt->statp.st_mode)) { /* soft link */ + int size; + char *buffer = (char *)alloca(path_max + name_max + 102); + + size = readlink(fname, buffer, path_max + name_max + 101); + if (size < 0) { + /* Could not follow link */ + ff_pkt->type = FT_NOFOLLOW; + ff_pkt->ff_errno = errno; + rtn_stat = handle_file(jcr, ff_pkt, top_level); + if (ff_pkt->linked) { + ff_pkt->linked->FileIndex = ff_pkt->FileIndex; + } + return rtn_stat; + } + buffer[size] = 0; + ff_pkt->link = buffer; /* point to link */ + ff_pkt->type = FT_LNK; /* got a real link */ + rtn_stat = handle_file(jcr, ff_pkt, top_level); + if (ff_pkt->linked) { + ff_pkt->linked->FileIndex = ff_pkt->FileIndex; + } + return rtn_stat; + + } else if (S_ISDIR(ff_pkt->statp.st_mode)) { + DIR *directory; + POOL_MEM dname(PM_FNAME); + char *link; + int link_len; + int len; + int status; + dev_t our_device = ff_pkt->statp.st_dev; + bool recurse = true; + bool volhas_attrlist = ff_pkt->volhas_attrlist; /* Remember this if we recurse */ + + /* + * Ignore this directory and everything below if the file .nobackup + * (or what is defined for IgnoreDir in this fileset) exists + */ + if (have_ignoredir(ff_pkt)) { + return 1; /* Just ignore this directory */ + } + + /* Build a canonical directory name with a trailing slash in link var */ + len = strlen(fname); + link_len = len + 200; + link = (char *)bmalloc(link_len + 2); + bstrncpy(link, fname, link_len); + /* Strip all trailing slashes */ + while (len >= 1 && IsPathSeparator(link[len - 1])) + len--; + link[len++] = '/'; /* add back one */ + link[len] = 0; + + ff_pkt->link = link; + if (!check_changes(jcr, ff_pkt)) { + /* Incremental/Full+Base option, directory entry not changed */ + ff_pkt->type = FT_DIRNOCHG; + } else { + ff_pkt->type = FT_DIRBEGIN; + } + /* + * We have set st_rdev to 1 if it is a reparse point, otherwise 0, + * if st_rdev is 2, it is a mount point + */ +#if defined(HAVE_WIN32) + /* + * A reparse point (WIN32_REPARSE_POINT) + * is something special like one of the following: + * IO_REPARSE_TAG_DFS 0x8000000A + * IO_REPARSE_TAG_DFSR 0x80000012 + * IO_REPARSE_TAG_HSM 0xC0000004 + * IO_REPARSE_TAG_HSM2 0x80000006 + * IO_REPARSE_TAG_SIS 0x80000007 + * IO_REPARSE_TAG_SYMLINK 0xA000000C + * + * A junction point is a: + * IO_REPARSE_TAG_MOUNT_POINT 0xA0000003 + * which can be either a link to a Volume (WIN32_MOUNT_POINT) + * or a link to a directory (WIN32_JUNCTION_POINT) + * + * Ignore WIN32_REPARSE_POINT and WIN32_JUNCTION_POINT + */ + if (ff_pkt->statp.st_rdev == WIN32_REPARSE_POINT) { + ff_pkt->type = FT_REPARSE; + } else if (ff_pkt->statp.st_rdev == WIN32_JUNCTION_POINT) { + ff_pkt->type = FT_JUNCTION; + } +#endif + /* + * Note, we return the directory to the calling program (handle_file) + * when we first see the directory (FT_DIRBEGIN. + * This allows the program to apply matches and make a + * choice whether or not to accept it. If it is accepted, we + * do not immediately save it, but do so only after everything + * in the directory is seen (i.e. the FT_DIREND). + */ + rtn_stat = handle_file(jcr, ff_pkt, top_level); + if (rtn_stat < 1 || ff_pkt->type == FT_REPARSE || + ff_pkt->type == FT_JUNCTION) { /* ignore or error status */ + free(link); + return rtn_stat; + } + /* Done with DIRBEGIN, next call will be DIREND */ + if (ff_pkt->type == FT_DIRBEGIN) { + ff_pkt->type = FT_DIREND; + } + + /* + * Create a temporary ff packet for this directory + * entry, and defer handling the directory until + * we have recursed into it. This saves the + * directory after all files have been processed, and + * during the restore, the directory permissions will + * be reset after all the files have been restored. + */ + Dmsg1(300, "Create temp ff packet for dir: %s\n", ff_pkt->fname); + FF_PKT *dir_ff_pkt = new_dir_ff_pkt(ff_pkt); + + /* + * Do not descend into subdirectories (recurse) if the + * user has turned it off for this directory. + * + * If we are crossing file systems, we are either not allowed + * to cross, or we may be restricted by a list of permitted + * file systems. + */ + bool is_win32_mount_point = false; +#if defined(HAVE_WIN32) + is_win32_mount_point = ff_pkt->statp.st_rdev == WIN32_MOUNT_POINT; +#endif + if (!top_level && ff_pkt->flags & FO_NO_RECURSION) { + ff_pkt->type = FT_NORECURSE; + recurse = false; + } else if (!top_level && (parent_device != ff_pkt->statp.st_dev || + is_win32_mount_point)) { + if(!(ff_pkt->flags & FO_MULTIFS)) { + ff_pkt->type = FT_NOFSCHG; + recurse = false; + } else if (!accept_fstype(ff_pkt, NULL)) { + ff_pkt->type = FT_INVALIDFS; + recurse = false; + } else { + ff_pkt->volhas_attrlist = volume_has_attrlist(fname); + } + } + /* If not recursing, just backup dir and return */ + if (!recurse) { + rtn_stat = handle_file(jcr, ff_pkt, top_level); + if (ff_pkt->linked) { + ff_pkt->linked->FileIndex = ff_pkt->FileIndex; + } + free(link); + free_dir_ff_pkt(dir_ff_pkt); + ff_pkt->link = ff_pkt->fname; /* reset "link" */ + if (ff_pkt->flags & FO_KEEPATIME) { + utime(fname, &restore_times); + } + return rtn_stat; + } + + ff_pkt->link = ff_pkt->fname; /* reset "link" */ + + /* + * Descend into or "recurse" into the directory to read + * all the files in it. + */ + errno = 0; + if ((directory = opendir(fname)) == NULL) { + ff_pkt->type = FT_NOOPEN; + ff_pkt->ff_errno = errno; + rtn_stat = handle_file(jcr, ff_pkt, top_level); + if (ff_pkt->linked) { + ff_pkt->linked->FileIndex = ff_pkt->FileIndex; + } + free(link); + free_dir_ff_pkt(dir_ff_pkt); + return rtn_stat; + } + + /* + * Process all files in this directory entry (recursing). + * This would possibly run faster if we chdir to the directory + * before traversing it. + */ + rtn_stat = 1; + while (!job_canceled(jcr)) { + char *p, *q; + int l; + int i; + + status = breaddir(directory, dname.addr()); + if (status != 0) { + /* error or end of directory */ +// Dmsg1(99, "breaddir returned stat=%d\n", status); + break; + } + p = dname.c_str(); + /* Skip `.', `..', and excluded file names. */ + if (p[0] == '\0' || (p[0] == '.' && (p[1] == '\0' || + (p[1] == '.' && p[2] == '\0')))) { + continue; + } + l = strlen(dname.c_str()); + if (l + len >= link_len) { + link_len = len + l + 1; + link = (char *)brealloc(link, link_len + 1); + } + q = link + len; + for (i=0; i < l; i++) { + *q++ = *p++; + } + *q = 0; + if (!file_is_excluded(ff_pkt, link)) { + rtn_stat = find_one_file(jcr, ff_pkt, handle_file, link, our_device, false); + if (ff_pkt->linked) { + ff_pkt->linked->FileIndex = ff_pkt->FileIndex; + } + } + } + closedir(directory); + free(link); + + /* + * Now that we have recursed through all the files in the + * directory, we "save" the directory so that after all + * the files are restored, this entry will serve to reset + * the directory modes and dates. Temp directory values + * were used without this record. + */ + handle_file(jcr, dir_ff_pkt, top_level); /* handle directory entry */ + if (ff_pkt->linked) { + ff_pkt->linked->FileIndex = dir_ff_pkt->FileIndex; + } + free_dir_ff_pkt(dir_ff_pkt); + + if (ff_pkt->flags & FO_KEEPATIME) { + utime(fname, &restore_times); + } + ff_pkt->volhas_attrlist = volhas_attrlist; /* Restore value in case it changed. */ + return rtn_stat; + } /* end check for directory */ + + /* + * If it is explicitly mentioned (i.e. top_level) and is + * a block device, we do a raw backup of it or if it is + * a fifo, we simply read it. + */ +#ifdef HAVE_FREEBSD_OS + /* + * On FreeBSD, all block devices are character devices, so + * to be able to read a raw disk, we need the check for + * a character device. + * crw-r----- 1 root operator - 116, 0x00040002 Jun 9 19:32 /dev/ad0s3 + * crw-r----- 1 root operator - 116, 0x00040002 Jun 9 19:32 /dev/rad0s3 + */ + if (top_level && (S_ISBLK(ff_pkt->statp.st_mode) || S_ISCHR(ff_pkt->statp.st_mode))) { +#else + if (top_level && S_ISBLK(ff_pkt->statp.st_mode)) { +#endif + ff_pkt->type = FT_RAW; /* raw partition */ + } else if (top_level && S_ISFIFO(ff_pkt->statp.st_mode) && + ff_pkt->flags & FO_READFIFO) { + ff_pkt->type = FT_FIFO; + } else { + /* The only remaining types are special (character, ...) files */ + ff_pkt->type = FT_SPEC; + } + rtn_stat = handle_file(jcr, ff_pkt, top_level); + if (ff_pkt->linked) { + ff_pkt->linked->FileIndex = ff_pkt->FileIndex; + } + return rtn_stat; +} + +int term_find_one(FF_PKT *ff) +{ + struct f_link *lp, *lc; + int count = 0; + int i; + + + if (ff->linkhash == NULL) return 0; + + for (i =0 ; i < LINK_HASHTABLE_SIZE; i ++) { + /* Free up list of hard linked files */ + lp = ff->linkhash[i]; + while (lp) { + lc = lp; + lp = lp->next; + if (lc) { + if (lc->digest) { + free(lc->digest); + } + free(lc); + count++; + } + } + ff->linkhash[i] = NULL; + } + free(ff->linkhash); + ff->linkhash = NULL; + return count; +} diff --git a/src/findlib/fstype.c b/src/findlib/fstype.c new file mode 100644 index 00000000..107f7a87 --- /dev/null +++ b/src/findlib/fstype.c @@ -0,0 +1,477 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Implement routines to determine file system types. + * + * Written by Preben 'Peppe' Guldberg, December MMIV + * Updated by Kern Sibbald, April MMXV + */ + + +#ifndef TEST_PROGRAM + +#include "bacula.h" +#include "find.h" +#include +#include +#ifdef HAVE_SUN_OS + #include +#endif +#else /* Set up for testing a stand alone program */ + +#include +#include +#include +#define bstrncpy strncpy +#define Dmsg0(n,s) fprintf(stderr, s) +#define Dmsg1(n,s,a1) fprintf(stderr, s, a1) +#define Dmsg2(n,s,a1,a2) fprintf(stderr, s, a1, a2) +#endif + +#define is_rootfs(x) bstrcmp("rootfs", x) + +#if defined(HAVE_GETMNTINFO) || defined(HAVE_GETMNTENT) +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + +struct mtab_item { + rblink link; + uint64_t dev; + char fstype[1]; +}; + +/* Compare two device types */ +static int compare_mtab_items(void *item1, void *item2) +{ + mtab_item *mtab1, *mtab2; + mtab1 = (mtab_item *)item1; + mtab2 = (mtab_item *)item2; + if (mtab1->dev < mtab2->dev) return -1; + if (mtab1->dev > mtab2->dev) return 1; + return 0; +} + +void add_mtab_item(void *user_ctx, struct stat *st, const char *fstype, + const char *mountpoint, const char *mntopts, + const char *fsname) +{ + rblist *mtab_list = (rblist *)user_ctx; + mtab_item *item, *ritem; + int len = strlen(fstype) + 1; + + item = (mtab_item *)malloc(sizeof(mtab_item) + len); + item->dev = (uint64_t)st->st_dev; + bstrncpy(item->fstype, fstype, len); + ritem = (mtab_item *)mtab_list->insert((void *)item, compare_mtab_items); + if (ritem != item) { + /* Item already inserted, so we discard this one */ + free(item); + } +} + + +/* + * These functions should be implemented for each OS + * + * bool fstype(FF_PKT *ff_pkt, char *fs, int fslen); + */ +#if defined(HAVE_DARWIN_OS) \ + || defined(HAVE_FREEBSD_OS ) \ + || defined(HAVE_KFREEBSD_OS ) \ + || defined(HAVE_OPENBSD_OS) + +#include +#include + +bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) +{ + char *fname = ff_pkt->fname; + struct statfs st; + + if (statfs(fname, &st) == 0) { + bstrncpy(fs, st.f_fstypename, fslen); + return true; + } + Dmsg1(50, "statfs() failed for \"%s\"\n", fname); + return false; +} +#elif defined(HAVE_NETBSD_OS) +#include +#include +#ifdef HAVE_SYS_STATVFS_H +#include +#else +#define statvfs statfs +#endif + +bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) +{ + char *fname = ff_pkt->fname; + struct statvfs st; + if (statvfs(fname, &st) == 0) { + bstrncpy(fs, st.f_fstypename, fslen); + return true; + } + Dmsg1(50, "statfs() failed for \"%s\"\n", fname); + return false; +} +#elif defined(HAVE_HPUX_OS) \ + || defined(HAVE_IRIX_OS) + +#include +#include + +bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) +{ + char *fname = ff_pkt->fname; + struct statvfs st; + if (statvfs(fname, &st) == 0) { + bstrncpy(fs, st.f_basetype, fslen); + return true; + } + Dmsg1(50, "statfs() failed for \"%s\"\n", fname); + return false; +} + +#elif defined(HAVE_LINUX_OS) + +#include +#include + +/* + * Linux statfs() does not return the filesystem name type. It + * only returns a binary fstype, so we must look up the type name + * in mtab. + */ +bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) +{ + char *fname = ff_pkt->fname; + struct statfs st; + const char *fstype; + + if (statfs(fname, &st) == 0) { + mtab_item *item, search_item; + if (*ff_pkt->last_fstypename && ff_pkt->last_fstype == (uint64_t)st.f_type) { + bstrncpy(fs, ff_pkt->last_fstypename, fslen); + return true; + } + if (!ff_pkt->mtab_list) { + ff_pkt->mtab_list = New(rblist()); + read_mtab(add_mtab_item, ff_pkt->mtab_list); + } + search_item.dev = st.f_type; + item = (mtab_item *)ff_pkt->mtab_list->search((void *)&search_item, compare_mtab_items); + if (item) { + ff_pkt->last_fstype = st.f_type; + bstrncpy(ff_pkt->last_fstypename, item->fstype, sizeof(ff_pkt->last_fstypename)); + bstrncpy(fs, ff_pkt->last_fstypename, fslen); + return true; + } + /* + * Values obtained from statfs(2), testing and + * + * $ grep -r SUPER_MAGIC /usr/include/linux + */ + switch (st.f_type) { + /* Known good values */ + /* ext2, ext3, and ext4 have the same code */ + case 0xef53: fstype = "ext2"; break; /* EXT2_SUPER_MAGIC */ + case 0x3153464a: fstype = "jfs"; break; /* JFS_SUPER_MAGIC */ + case 0x5346544e: fstype = "ntfs"; break; /* NTFS_SB_MAGIC */ + case 0x9fa0: fstype = "proc"; break; /* PROC_SUPER_MAGIC */ + case 0x52654973: fstype = "reiserfs"; break; /* REISERFS_SUPER_MAGIC */ + case 0x58465342: fstype = "xfs"; break; /* XFS_SB_MAGIC */ + case 0x9fa2: fstype = "usbdevfs"; break; /* USBDEVICE_SUPER_MAGIC */ + case 0x62656572: fstype = "sysfs"; break; /* SYSFS_MAGIC */ + case 0x517B: fstype = "smbfs"; break; /* SMB_SUPER_MAGIC */ + case 0x9660: fstype = "iso9660"; break; /* ISOFS_SUPER_MAGIC */ + case 0xadf5: fstype = "adfs"; break; /* ADFS_SUPER_MAGIC */ + case 0xadff: fstype = "affs"; break; /* AFFS_SUPER_MAGIC */ + case 0x42465331: fstype = "befs"; break; /* BEFS_SUPER_MAGIC */ + case 0xFF534D42: fstype = "cifs"; break; /* CIFS_MAGIC_NUMBER */ + case 0x73757245: fstype = "coda"; break; /* CODA_SUPER_MAGIC */ + case 0x012ff7b7: fstype = "coherent"; break; /* COH_SUPER_MAGIC */ + case 0x28cd3d45: fstype = "cramfs"; break; /* CRAMFS_MAGIC */ + case 0x1373: fstype = "devfs"; break; /* DEVFS_SUPER_MAGIC */ + case 0x414A53: fstype = "efs"; break; /* EFS_SUPER_MAGIC */ + case 0x137d: fstype = "ext"; break; /* EXT_SUPER_MAGIC */ + case 0xef51: fstype = "oldext2"; break; /* EXT2_OLD_SUPER_MAGIC */ + case 0x4244: fstype = "hfs"; break; /* EXT2_OLD_SUPER_MAGIC */ + case 0xf995e849: fstype = "hpfs"; break; /* HPFS_SUPER_MAGIC */ + case 0x958458f6: fstype = "hugetlbfs"; break; /* HUGETLBFS_MAGIC */ + case 0x72b6: fstype = "jffs2"; break; /* JFFS2_SUPER_MAGIC */ + case 0x2468: fstype = "minix"; break; /* MINIX2_SUPER_MAGIC */ + case 0x2478: fstype = "minix"; break; /* MINIX2_SUPER_MAGIC2 */ + case 0x137f: fstype = "minix"; break; /* MINIX_SUPER_MAGIC */ + case 0x138f: fstype = "minix"; break; /* MINIX_SUPER_MAGIC2 */ + case 0x4d44: fstype = "msdos"; break; /* MSDOS_SUPER_MAGIC */ + case 0x564c: fstype = "ncpfs"; break; /* NCP_SUPER_MAGIC */ + case 0x6969: fstype = "nfs"; break; /* NFS_SUPER_MAGIC */ + case 0x9fa1: fstype = "openpromfs"; break; /* OPENPROM_SUPER_MAGIC */ + case 0x002f: fstype = "qnx4"; break; /* QNX4_SUPER_MAGIC */ + case 0x7275: fstype = "romfs"; break; /* QNX4_SUPER_MAGIC */ + case 0x012ff7b6: fstype = "sysv2"; break; + case 0x012ff7b5: fstype = "sysv4"; break; + case 0x01021994: fstype = "tmpfs"; break; + case 0x15013346: fstype = "udf"; break; + case 0x00011954: fstype = "ufs"; break; + case 0xa501FCF5: fstype = "vxfs"; break; + case 0x012FF7B4: fstype = "xenix"; break; + case 0x012FD16D: fstype = "xiafs"; break; + case 0x9123683e: fstype = "btrfs"; break; + case 0x7461636f: fstype = "ocfs2"; break; /* OCFS2_SUPER_MAGIC */ + +#if 0 /* These need confirmation */ + case 0x6B414653: fstype = "afs"; break; /* AFS_FS_MAGIC */ + case 0x0187: fstype = "autofs"; break; /* AUTOFS_SUPER_MAGIC */ + case 0x62646576: fstype = "bdev"; break; /* ??? */ + case 0x1BADFACE: fstype = "bfs"; break; /* BFS_MAGIC */ + case 0x42494e4d: fstype = "binfmt_misc"; break; /* ??? */ + case (('C'<<8)|'N'): fstype = "capifs"; break; /* CAPIFS_SUPER_MAGIC */ + case 0x1cd1: fstype = "devpts"; break; /* ??? */ + case 0x03111965: fstype = "eventpollfs"; break; /* EVENTPOLLFS_MAGIC */ + case 0xBAD1DEA: fstype = "futexfs"; break; /* ??? */ + case 0xaee71ee7: fstype = "gadgetfs"; break; /* GADGETFS_MAGIC */ + case 0x00c0ffee: fstype = "hostfs"; break; /* HOSTFS_SUPER_MAGIC */ + case 0xb00000ee: fstype = "hppfs"; break; /* HPPFS_SUPER_MAGIC */ + case 0x12061983: fstype = "hwgfs"; break; /* HWGFS_MAGIC */ + case 0x66726f67: fstype = "ibmasmfs"; break; /* IBMASMFS_MAGIC */ + case 0x19800202: fstype = "mqueue"; break; /* MQUEUE_MAGIC */ + case 0x6f70726f: fstype = "oprofilefs"; break; /* OPROFILEFS_MAGIC */ + case 0xa0b4d889: fstype = "pfmfs"; break; /* PFMFS_MAGIC */ + case 0x50495045: fstype = "pipfs"; break; /* PIPEFS_MAGIC */ + case 0x858458f6: fstype = "ramfs"; break; /* RAMFS_MAGIC */ + case 0x7275: fstype = "romfs"; break; /* ROMFS_MAGIC */ + case 0x858458f6: fstype = "rootfs"; break; /* RAMFS_MAGIC */ + case 0x67596969: fstype = "rpc_pipefs"; break; /* RPCAUTH_GSSMAGIC */ + case 0x534F434B: fstype = "sockfs"; break; /* SOCKFS_MAGIC */ + case 0x858458f6: fstype = "tmpfs"; break; /* RAMFS_MAGIC */ + case 0x01021994: fstype = "tmpfs"; break; /* TMPFS_MAGIC */ +#endif + + default: + Dmsg2(10, "Unknown file system type \"0x%x\" for \"%s\".\n", st.f_type, + fname); + return false; + } + ff_pkt->last_fstype = st.f_type; + bstrncpy(ff_pkt->last_fstypename, fstype, sizeof(ff_pkt->last_fstypename)); + bstrncpy(fs, fstype, fslen); + return true; + } + Dmsg1(50, "statfs() failed for \"%s\"\n", fname); + return false; +} + +#elif defined(HAVE_SUN_OS) + +#include +#include + +bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) +{ + /* Solaris has the filesystem type name in the lstat packet */ + bstrncpy(fs, ff_pkt->statp.st_fstype, fslen); + return true; +} + +#elif defined (__digital__) && defined (__unix__) /* Tru64 */ +/* Tru64 */ +#include +#include +#include + +bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) +{ + char *fname = ff_pkt->fname; + struct statfs st; + if (statfs((char *)fname, &st) == 0) { + switch (st.f_type) { + /* Known good values */ + case 0xa: bstrncpy(fs, "advfs", fslen); return true; /* Tru64 AdvFS */ + case 0xe: bstrncpy(fs, "nfs", fslen); return true; /* Tru64 NFS */ + default: + Dmsg2(10, "Unknown file system type \"0x%x\" for \"%s\".\n", st.f_type, + fname); + return false; + } + } + Dmsg1(50, "statfs() failed for \"%s\"\n", fname); + return false; +} +/* Tru64 */ + +#elif defined (HAVE_WIN32) +/* Windows */ +bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) +{ + char *fname = ff_pkt->fname; + DWORD componentlength; + DWORD fsflags; + CHAR rootpath[4]; + UINT oldmode; + BOOL result; + + /* Copy Drive Letter, colon, and backslash to rootpath */ + bstrncpy(rootpath, fname, sizeof(rootpath)); + + /* We don't want any popups if there isn't any media in the drive */ + oldmode = SetErrorMode(SEM_FAILCRITICALERRORS); + result = GetVolumeInformation(rootpath, NULL, 0, NULL, &componentlength, &fsflags, fs, fslen); + SetErrorMode(oldmode); + + if (result) { + /* Windows returns NTFS, FAT, etc. Make it lowercase to be consistent with other OSes */ + lcase(fs); + } else { + Dmsg2(10, "GetVolumeInformation() failed for \"%s\", Error = %d.\n", rootpath, GetLastError()); + } + return result != 0; +} +/* Windows */ + +#else /* No recognised OS */ + +bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) +{ + char *fname = ff_pkt->fname; + Dmsg0(10, "!!! fstype() not implemented for this OS. !!!\n"); + return false; +} +#endif + +/* Read mtab entries */ +bool read_mtab(mtab_handler_t *mtab_handler, void *user_ctx) +{ +/* Debian stretch GNU/KFreeBSD has both getmntinfo and getmntent, but + only the first seems to work, so we skip over getmntent in this case */ +#ifndef HAVE_KFREEBSD_OS +#ifdef HAVE_GETMNTENT + FILE *mntfp; + struct stat st; + +#ifdef HAVE_LINUX_OS + struct mntent *mnt; + P(mutex); + if ((mntfp = setmntent("/proc/mounts", "r")) == NULL) { + if ((mntfp = setmntent(_PATH_MOUNTED, "r")) == NULL) { + V(mutex); + return false; + } + } + while ((mnt = getmntent(mntfp)) != NULL) { + if (is_rootfs(mnt->mnt_type)) { + continue; + } + + if (stat(mnt->mnt_dir, &st) < 0) { + continue; + } + mtab_handler(user_ctx, &st, mnt->mnt_type, mnt->mnt_dir, + mnt->mnt_opts, mnt->mnt_fsname); + } + endmntent(mntfp); + V(mutex); +#endif + +#ifdef HAVE_SUN_OS + struct mnttab mnt; + + P(mutex); + if ((mntfp = bfopen(MNTTAB, "r")) == NULL) { + V(mutex); + return false; + } + + while (getmntent(mntfp, &mnt) == 0) { + if (is_rootfs(mnt.mnt_fstype)) { + continue; + } + if (stat(mnt.mnt_mountp, &st) < 0) { + continue; + } + mtab_handler(user_ctx, &st, mnt.mnt_fstype, mnt.mnt_mountp, + mnt.mnt_mntopts, mnt.mnt_special); + } + fclose(mntfp); + V(mutex); +#endif + +#endif /* HAVE_GETMNTENT */ +#endif /* HAVE_KFREEBSD_OS */ + +#ifdef HAVE_GETMNTINFO + struct stat st; +#if defined(ST_NOWAIT) + int flags = ST_NOWAIT; +#elif defined(MNT_NOWAIT) + int flags = MNT_NOWAIT; +#else + int flags = 0; +#endif +#if defined(HAVE_NETBSD_OS) + struct statvfs *mntinfo; +#else + struct statfs *mntinfo; +#endif + int nument; + + P(mutex); + if ((nument = getmntinfo(&mntinfo, flags)) > 0) { + while (nument-- > 0) { + if (is_rootfs(mntinfo->f_fstypename)) { + continue; + } + if (stat(mntinfo->f_mntonname, &st) < 0) { + continue; + } + mtab_handler(user_ctx, &st, mntinfo->f_mntfromname, + mntinfo->f_mntonname, mntinfo->f_fstypename, NULL); + mntinfo++; + } + } + V(mutex); +#endif /* HAVE_GETMNTINFO */ + return true; +} + +#ifdef TEST_PROGRAM +int main(int argc, char **argv) +{ + char *p; + char fs[1000]; + int status = 0; + + if (argc < 2) { + p = (argc < 1) ? "fstype" : argv[0]; + printf("usage:\t%s path ...\n" + "\t%s prints the file system type and pathname of the paths.\n", + p, p); + return EXIT_FAILURE; + } + while (*++argv) { + if (!fstype(*argv, fs, sizeof(fs))) { + status = EXIT_FAILURE; + } else { + printf("%s\t%s\n", fs, *argv); + } + } + return status; +} +#endif diff --git a/src/findlib/match.c b/src/findlib/match.c new file mode 100644 index 00000000..b1214f3e --- /dev/null +++ b/src/findlib/match.c @@ -0,0 +1,404 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Old style + * + * Routines used to keep and match include and exclude + * filename/pathname patterns. + * + * Note, this file is used for the old style include and + * excludes, so is deprecated. The new style code is + * found in find.c. + * This code is still used for lists in testls and bextract. + * + * Kern E. Sibbald, December MMI + * + */ + +#include "bacula.h" +#include "find.h" +#include "ch.h" + +#include + +#ifndef FNM_LEADING_DIR +#define FNM_LEADING_DIR 0 +#endif + +/* Fold case in fnmatch() on Win32 */ +#ifdef HAVE_WIN32 +static const int fnmode = FNM_CASEFOLD; +#else +static const int fnmode = 0; +#endif + + +#undef bmalloc +#define bmalloc(x) sm_malloc(__FILE__, __LINE__, x) + + +int +match_files(JCR *jcr, FF_PKT *ff, int file_save(JCR *, FF_PKT *ff_pkt, bool)) +{ + ff->file_save = file_save; + + struct s_included_file *inc = NULL; + + /* This is the old deprecated way */ + while (!job_canceled(jcr) && (inc = get_next_included_file(ff, inc))) { + /* Copy options for this file */ + bstrncat(ff->VerifyOpts, inc->VerifyOpts, sizeof(ff->VerifyOpts)); + Dmsg1(100, "find_files: file=%s\n", inc->fname); + if (!file_is_excluded(ff, inc->fname)) { + if (find_one_file(jcr, ff, file_save, inc->fname, (dev_t)-1, 1) ==0) { + return 0; /* error return */ + } + } + } + return 1; +} + + +/* + * Done doing filename matching, release all + * resources used. + */ +void term_include_exclude_files(FF_PKT *ff) +{ + struct s_included_file *inc, *next_inc; + struct s_excluded_file *exc, *next_exc; + + for (inc=ff->included_files_list; inc; ) { + next_inc = inc->next; + free(inc); + inc = next_inc; + } + ff->included_files_list = NULL; + + for (exc=ff->excluded_files_list; exc; ) { + next_exc = exc->next; + free(exc); + exc = next_exc; + } + ff->excluded_files_list = NULL; + + for (exc=ff->excluded_paths_list; exc; ) { + next_exc = exc->next; + free(exc); + exc = next_exc; + } + ff->excluded_paths_list = NULL; +} + +/* + * Add a filename to list of included files + */ +void add_fname_to_include_list(FF_PKT *ff, int prefixed, const char *fname) +{ + int len, j; + struct s_included_file *inc; + char *p; + const char *rp; + + len = strlen(fname); + + inc =(struct s_included_file *)bmalloc(sizeof(struct s_included_file) + len + 1); + inc->options = 0; + inc->VerifyOpts[0] = 'V'; + inc->VerifyOpts[1] = ':'; + inc->VerifyOpts[2] = 0; + + /* prefixed = preceded with options */ + if (prefixed) { + for (rp=fname; *rp && *rp != ' '; rp++) { + switch (*rp) { + case 'a': /* alway replace */ + case '0': /* no option */ + break; + case 'f': + inc->options |= FO_MULTIFS; + break; + case 'h': /* no recursion */ + inc->options |= FO_NO_RECURSION; + break; + case 'M': /* MD5 */ + inc->options |= FO_MD5; + break; + case 'n': + inc->options |= FO_NOREPLACE; + break; + case 'p': /* use portable data format */ + inc->options |= FO_PORTABLE; + break; + case 'r': /* read fifo */ + inc->options |= FO_READFIFO; + break; + case 'S': + inc->options |= FO_SHA1; + break; + case 's': + inc->options |= FO_SPARSE; + break; + case 'm': + inc->options |= FO_MTIMEONLY; + break; + case 'k': + inc->options |= FO_KEEPATIME; + break; + case 'V': /* verify options */ + /* Copy Verify Options */ + for (j=0; *rp && *rp != ':'; rp++) { + inc->VerifyOpts[j] = *rp; + if (j < (int)sizeof(inc->VerifyOpts) - 1) { + j++; + } + } + inc->VerifyOpts[j] = 0; + break; + case 'w': + inc->options |= FO_IF_NEWER; + break; + case 'A': + inc->options |= FO_ACL; + break; + case 'Z': /* compression */ + rp++; /* skip Z */ + if (*rp >= '0' && *rp <= '9') { + inc->options |= FO_COMPRESS; + inc->algo = COMPRESS_GZIP; + inc->Compress_level = *rp - '0'; + } + else if (*rp == 'o') { + inc->options |= FO_COMPRESS; + inc->algo = COMPRESS_LZO1X; + inc->Compress_level = 1; /* not used with LZO */ + } + Dmsg2(200, "Compression alg=%d level=%d\n", inc->algo, inc->Compress_level); + break; + case 'K': + inc->options |= FO_NOATIME; + break; + case 'X': + inc->options |= FO_XATTR; + break; + default: + Emsg1(M_ERROR, 0, _("Unknown include/exclude option: %c\n"), *rp); + break; + } + } + /* Skip past space(s) */ + for ( ; *rp == ' '; rp++) + {} + } else { + rp = fname; + } + + strcpy(inc->fname, rp); + p = inc->fname; + len = strlen(p); + /* Zap trailing slashes. */ + p += len - 1; + while (p > inc->fname && IsPathSeparator(*p)) { + *p-- = 0; + len--; + } + inc->len = len; + /* Check for wild cards */ + inc->pattern = 0; + for (p=inc->fname; *p; p++) { + if (*p == '*' || *p == '[' || *p == '?') { + inc->pattern = 1; + break; + } + } +#if defined(HAVE_WIN32) + /* Convert any \'s into /'s */ + for (p=inc->fname; *p; p++) { + if (*p == '\\') { + *p = '/'; + } + } +#endif + inc->next = NULL; + /* Chain this one on the end of the list */ + if (!ff->included_files_list) { + /* First one, so set head */ + ff->included_files_list = inc; + } else { + struct s_included_file *next; + /* Walk to end of list */ + for (next=ff->included_files_list; next->next; next=next->next) + { } + next->next = inc; + } + Dmsg4(100, "add_fname_to_include prefix=%d compres=%d alg= %d fname=%s\n", + prefixed, !!(inc->options & FO_COMPRESS), inc->algo, inc->fname); +} + +/* + * We add an exclude name to either the exclude path + * list or the exclude filename list. + */ +void add_fname_to_exclude_list(FF_PKT *ff, const char *fname) +{ + int len; + struct s_excluded_file *exc, **list; + + Dmsg1(20, "Add name to exclude: %s\n", fname); + + if (first_path_separator(fname) != NULL) { + list = &ff->excluded_paths_list; + } else { + list = &ff->excluded_files_list; + } + + len = strlen(fname); + + exc = (struct s_excluded_file *)bmalloc(sizeof(struct s_excluded_file) + len + 1); + exc->next = *list; + exc->len = len; + strcpy(exc->fname, fname); +#if defined(HAVE_WIN32) + /* Convert any \'s into /'s */ + for (char *p=exc->fname; *p; p++) { + if (*p == '\\') { + *p = '/'; + } + } +#endif + *list = exc; +} + + +/* + * Get next included file + */ +struct s_included_file *get_next_included_file(FF_PKT *ff, struct s_included_file *ainc) +{ + struct s_included_file *inc; + + if (ainc == NULL) { + inc = ff->included_files_list; + } else { + inc = ainc->next; + } + /* + * copy inc_options for this file into the ff packet + */ + if (inc) { + ff->flags = inc->options; + ff->Compress_algo = inc->algo; + ff->Compress_level = inc->Compress_level; + } + return inc; +} + +/* + * Walk through the included list to see if this + * file is included possibly with wild-cards. + */ + +int file_is_included(FF_PKT *ff, const char *file) +{ + struct s_included_file *inc = ff->included_files_list; + int len; + + for ( ; inc; inc=inc->next ) { + if (inc->pattern) { + if (fnmatch(inc->fname, file, fnmode|FNM_LEADING_DIR) == 0) { + return 1; + } + continue; + } + /* + * No wild cards. We accept a match to the + * end of any component. + */ + Dmsg2(900, "pat=%s file=%s\n", inc->fname, file); + len = strlen(file); + if (inc->len == len && strcmp(inc->fname, file) == 0) { + return 1; + } + if (inc->len < len && IsPathSeparator(file[inc->len]) && + strncmp(inc->fname, file, inc->len) == 0) { + return 1; + } + if (inc->len == 1 && IsPathSeparator(inc->fname[0])) { + return 1; + } + } + return 0; +} + + +/* + * This is the workhorse of excluded_file(). + * Determine if the file is excluded or not. + */ +static int +file_in_excluded_list(struct s_excluded_file *exc, const char *file) +{ + if (exc == NULL) { + Dmsg0(900, "exc is NULL\n"); + } + for ( ; exc; exc=exc->next ) { + if (fnmatch(exc->fname, file, fnmode|FNM_PATHNAME) == 0) { + Dmsg2(900, "Match exc pat=%s: file=%s:\n", exc->fname, file); + return 1; + } + Dmsg2(900, "No match exc pat=%s: file=%s:\n", exc->fname, file); + } + return 0; +} + + +/* + * Walk through the excluded lists to see if this + * file is excluded, or if it matches a component + * of an excluded directory. + */ + +int file_is_excluded(FF_PKT *ff, const char *file) +{ + const char *p; + +#if defined(HAVE_WIN32) + /* + * ***NB*** this removes the drive from the exclude + * rule. Why????? + */ + if (file[1] == ':') { + file += 2; + } +#endif + + if (file_in_excluded_list(ff->excluded_paths_list, file)) { + return 1; + } + + /* Try each component */ + for (p = file; *p; p++) { + /* Match from the beginning of a component only */ + if ((p == file || (!IsPathSeparator(*p) && IsPathSeparator(p[-1]))) + && file_in_excluded_list(ff->excluded_files_list, p)) { + return 1; + } + } + return 0; +} diff --git a/src/findlib/mkpath.c b/src/findlib/mkpath.c new file mode 100644 index 00000000..e1e19be2 --- /dev/null +++ b/src/findlib/mkpath.c @@ -0,0 +1,323 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, September MMVII + * + * This is tricky code, especially when writing from scratch. Fortunately, + * a non-copyrighted version of mkdir was available to consult. + * + * ***FIXME*** the mkpath code could be significantly optimized by + * walking up the path chain from the bottom until it either gets + * to the top or finds an existing directory then walk back down + * creating the path components. Currently, it always starts at + * the top, which can be rather inefficient for long path names. + * + */ +#include "bacula.h" +#include "jcr.h" + +#define dbglvl 50 + +/* + * For old systems that don't have lchown() or lchmod() + */ +#ifndef HAVE_LCHOWN +#define lchown chown +#endif +#ifndef HAVE_LCHMOD +#define lchmod chmod +#endif + +/* Defined in attribs.c */ +void set_own_mod(ATTR *attr, char *path, uid_t owner, gid_t group, mode_t mode); + +typedef struct PrivateCurDir { + hlink link; + char fname[1]; +} CurDir; + +/* Initialize the path hash table */ +static bool path_list_init(JCR *jcr) +{ + CurDir *elt = NULL; + jcr->path_list = (htable *)malloc(sizeof(htable)); + + /* Hard to know in advance how many directories will + * be stored in this hash + */ + jcr->path_list->init(elt, &elt->link, 10000); + return true; +} + +/* Add a path to the hash when we create a directory + * with the replace=NEVER option + */ +bool path_list_add(JCR *jcr, uint32_t len, char *fname) +{ + bool ret = true; + CurDir *item; + + if (!jcr->path_list) { + path_list_init(jcr); + } + + /* we store CurDir, fname in the same chunk */ + item = (CurDir *)jcr->path_list->hash_malloc(sizeof(CurDir)+len+1); + + memset(item, 0, sizeof(CurDir)); + memcpy(item->fname, fname, len+1); + + jcr->path_list->insert(item->fname, item); + + Dmsg1(dbglvl, "add fname=<%s>\n", fname); + return ret; +} + +void free_path_list(JCR *jcr) +{ + if (jcr->path_list) { + jcr->path_list->destroy(); + free(jcr->path_list); + jcr->path_list = NULL; + } +} + +bool path_list_lookup(JCR *jcr, char *fname) +{ + bool found=false; + char bkp; + + if (!jcr->path_list) { + return false; + } + + /* Strip trailing / */ + int len = strlen(fname); + if (len == 0) { + return false; + } + len--; + bkp = fname[len]; + if (fname[len] == '/') { /* strip any trailing slash */ + fname[len] = 0; + } + + CurDir *temp = (CurDir *)jcr->path_list->lookup(fname); + if (temp) { + found=true; + } + + Dmsg2(dbglvl, "lookup <%s> %s\n", fname, found?"ok":"not ok"); + + fname[len] = bkp; /* restore last / */ + return found; +} + +static bool makedir(JCR *jcr, char *path, mode_t mode, int *created) +{ + struct stat statp; + + if (mkdir(path, mode) != 0) { + berrno be; + *created = false; + if (lstat(path, &statp) != 0) { + Jmsg2(jcr, M_ERROR, 0, _("Cannot create directory %s: ERR=%s\n"), + path, be.bstrerror()); + return false; + } else if (!S_ISDIR(statp.st_mode)) { + Jmsg1(jcr, M_ERROR, 0, _("%s exists but is not a directory.\n"), path); + return false; + } + return true; /* directory exists */ + } +#if 0 + /* TODO: This code rely on statp that is not initialized, we need to do a stat() */ + if (S_ISLNK(statp.st_mode)) { + /* + * Note, we created a directory, not a link, so if we find a + * link, there is a security problem here. + */ + Jmsg1(jcr, M_FATAL, 0, _("Security problem!! We created directory %s, but it is a link.\n"), + path); + return false; + } +#endif + if (jcr->keep_path_list) { + /* When replace=NEVER, we keep track of all directories newly created */ + path_list_add(jcr, strlen(path), path); + } + + *created = true; + return true; +} + +/* + * mode is the mode bits to use in creating a new directory + * + * parent_mode are the parent's modes if we need to create parent + * directories. + * + * owner and group are to set on any created dirs + * + * keep_dir_modes if set means don't change mode bits if dir exists + */ +bool makepath(ATTR *attr, const char *apath, mode_t mode, mode_t parent_mode, + uid_t owner, gid_t group, int keep_dir_modes) +{ + struct stat statp; + mode_t omask, tmode; + char *path = (char *)apath; + char *p; + int len; + bool ok = false; + int created; + char new_dir[5000]; + int ndir = 0; + int i = 0; + int max_dirs = (int)sizeof(new_dir); + JCR *jcr = attr->jcr; + + if (stat(path, &statp) == 0) { /* Does dir exist? */ + if (!S_ISDIR(statp.st_mode)) { + Jmsg1(jcr, M_ERROR, 0, _("%s exists but is not a directory.\n"), path); + return false; + } + /* Full path exists */ + if (keep_dir_modes) { + return true; + } + set_own_mod(attr, path, owner, group, mode); + return true; + } + omask = umask(0); + umask(omask); + len = strlen(apath); + path = (char *)alloca(len+1); + bstrncpy(path, apath, len+1); + strip_trailing_slashes(path); + /* + * Now for one of the complexities. If we are not running as root, + * then if the parent_mode does not have wx user perms, or we are + * setting the userid or group, and the parent_mode has setuid, setgid, + * or sticky bits, we must create the dir with open permissions, then + * go back and patch all the dirs up with the correct perms. + * Solution, set everything to 0777, then go back and reset them at the + * end. + */ + tmode = 0777; + +#if defined(HAVE_WIN32) + /* Validate drive letter */ + if (path[1] == ':') { + char drive[4] = "X:\\"; + + drive[0] = path[0]; + + UINT drive_type = GetDriveType(drive); + + if (drive_type == DRIVE_UNKNOWN || drive_type == DRIVE_NO_ROOT_DIR) { + Jmsg1(jcr, M_ERROR, 0, _("%c: is not a valid drive.\n"), path[0]); + goto bail_out; + } + + if (path[2] == '\0') { /* attempt to create a drive */ + ok = true; + goto bail_out; /* OK, it is already there */ + } + + p = &path[3]; + } else { + p = path; + } +#else + p = path; +#endif + + /* Skip leading slash(es) */ + while (IsPathSeparator(*p)) { + p++; + } + while ((p = first_path_separator(p))) { + char save_p; + save_p = *p; + *p = 0; + if (!makedir(jcr, path, tmode, &created)) { + goto bail_out; + } + if (ndir < max_dirs) { + new_dir[ndir++] = created; + } + *p = save_p; + while (IsPathSeparator(*p)) { + p++; + } + } + /* Create final component */ + if (!makedir(jcr, path, tmode, &created)) { + goto bail_out; + } + if (ndir < max_dirs) { + new_dir[ndir++] = created; + } + if (ndir >= max_dirs) { + Jmsg0(jcr, M_WARNING, 0, _("Too many subdirectories. Some permissions not reset.\n")); + } + + /* Now set the proper owner and modes */ +#if defined(HAVE_WIN32) + + /* Don't propagate the hidden or encrypted attributes to parent directories */ + parent_mode &= ~S_ISVTX; + parent_mode &= ~S_ISGID; + + if (path[1] == ':') { + p = &path[3]; + } else { + p = path; + } +#else + p = path; +#endif + /* Skip leading slash(es) */ + while (IsPathSeparator(*p)) { + p++; + } + while ((p = first_path_separator(p))) { + char save_p; + save_p = *p; + *p = 0; + if (i < ndir && new_dir[i++] && !keep_dir_modes) { + set_own_mod(attr, path, owner, group, parent_mode); + } + *p = save_p; + while (IsPathSeparator(*p)) { + p++; + } + } + + /* Set for final component */ + if (i < ndir && new_dir[i++]) { + set_own_mod(attr, path, owner, group, mode); + } + + ok = true; +bail_out: + umask(omask); + return ok; +} diff --git a/src/findlib/namedpipe.c b/src/findlib/namedpipe.c new file mode 100644 index 00000000..fbfa2d07 --- /dev/null +++ b/src/findlib/namedpipe.c @@ -0,0 +1,338 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifdef HAVE_WIN32 + +# include +# include +# include + +#else /* !HAVE_WIN32 */ + +# include +# include +# include +# include +# include + +#endif /* HAVE_WIN32 */ + +/* Common include */ +#include +#include +#include + +#include "namedpipe.h" + +#ifdef TEST_PROGRAM +# define Dmsg(level, ...) printf(__VA_ARGS__ ) +#endif + +#ifdef HAVE_WIN32 + +void namedpipe_init(NamedPipe *self) +{ + self->fd = INVALID_HANDLE_VALUE; + self->ifd = -1; +} + +void namedpipe_free(NamedPipe *self) +{ + if (self->fd != INVALID_HANDLE_VALUE) { + CloseHandle(self->fd); + self->fd = INVALID_HANDLE_VALUE; + self->ifd = -1; + } +} +#define BUFSIZE 8192 +int namedpipe_create(NamedPipe *self, const char *path, mode_t mode) +{ + /* On windows, */ + self->fd = CreateNamedPipeA( + path, // pipe name + PIPE_ACCESS_DUPLEX, // read/write access + PIPE_TYPE_MESSAGE | // message type pipe + PIPE_READMODE_MESSAGE | // message-read mode + PIPE_WAIT, // blocking mode + PIPE_UNLIMITED_INSTANCES, // max. instances + BUFSIZE, // output buffer size + BUFSIZE, // input buffer size + 0, // client time-out + NULL); // default security attribute + + if (self->fd == INVALID_HANDLE_VALUE) { + Dmsg(10, "CreateNamedPipe failed, ERR=%d.\n", (int)GetLastError()); + return -1; + } + + return 0; +} + +int namedpipe_open(NamedPipe *self, const char *path, mode_t mode) +{ + bool fConnected=false; + int retry = 30; + + if (self->fd != INVALID_HANDLE_VALUE) { /* server mode */ + + fConnected = ConnectNamedPipe(self->fd, NULL) ? + TRUE : (GetLastError() == ERROR_PIPE_CONNECTED); + + } else { /* client mode */ + + /* Need to wait for creation */ + while (retry-- > 0) + { + self->fd = CreateFileA( + path, // pipe name + GENERIC_WRITE | GENERIC_READ, + 0, // no sharing + NULL, // default security attributes + OPEN_EXISTING, // opens existing pipe + 0, // default attributes + NULL); // no template file + + // Break if the pipe handle is valid. + if (self->fd != INVALID_HANDLE_VALUE) { + break; + } + + /* Wait a little bit for the other side to create the fifo */ + if (GetLastError() == ERROR_FILE_NOT_FOUND) { + Dmsg(10, "File not found, ERR=%d.\n", (int)GetLastError()); + Sleep(20000); + continue; + } + + // Exit if an error other than ERROR_PIPE_BUSY occurs. + if (GetLastError() != ERROR_PIPE_BUSY) { + Dmsg(10, "CreateFile failed, ERR=%d.\n", + (int)GetLastError()); + return -1; + } + + // All pipe instances are busy, so wait for 20 seconds. + if (!WaitNamedPipeA(path, 20000)) { + Dmsg(10, "WaitNamedPipe failed, ERR=%d.\n", + (int)GetLastError()); + return -1; + } + } + } + + DWORD dwMode = PIPE_READMODE_MESSAGE; + + fConnected = SetNamedPipeHandleState( + self->fd, // pipe handle + &dwMode, // new pipe mode + NULL, // don't set maximum bytes + NULL); // don't set maximum time + + if (!fConnected) { + Dmsg(10, "SetNamedPipeHandleState failed, ERR=%d.\n", + (int)GetLastError()); + } + + if (fConnected) { + int m = 0; + if (mode & O_WRONLY || mode & O_APPEND) { + m |= O_APPEND; + + } else if (mode & O_RDONLY) { + m |= O_RDONLY; + } + self->ifd = _open_osfhandle((intptr_t)self->fd, m); + } + + return self->ifd; +} + + +#else /* !HAVE_WIN32 */ + +void namedpipe_init(NamedPipe *self) +{ + self->fd = -1; + self->ifd = -1; + self->name = NULL; +} + +void namedpipe_free(NamedPipe *self) +{ + if (self->fd != -1) { + close(self->fd); + self->fd = -1; + self->ifd = -1; + } + if (self->name) { + unlink(self->name); + free(self->name); + self->name = NULL; + } +} + +int namedpipe_create(NamedPipe *self, const char *path, mode_t mode) +{ + self->name = (char *)malloc(strlen(path) + 1); + strcpy(self->name, path); + + if (mkfifo(path, mode) < 0 && errno != EEXIST) { + return -1; + } + + return 0; +} + +int namedpipe_open(NamedPipe *self, const char *path, mode_t mode) +{ + self->ifd = self->fd = open(path, mode); + return self->fd; +} + +#endif /* HAVE_WIN32 */ + +#ifdef TEST_PROGRAM + +#include +#include + +#define BUF "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + +int main(int argc, char **argv) +{ + FILE *fp; + NamedPipe p; + char buf[65*1024], file[128]; + int fd; + int mode; + int n, m, o; + if (argc < 4) { + printf("Usage: %s client|server pipe file\n", argv[0]); + exit(3); + } + + namedpipe_init(&p); + + if (strcmp(argv[1], "server") == 0) { + mode = O_WRONLY; + if (namedpipe_create(&p, argv[2], 0600) < 0) { + exit(2); + } + + } else { + mode = O_RDONLY; + } + + printf("Trying to open %s mode=%d\n", argv[2], mode); + fd = namedpipe_open(&p, argv[2], mode); + + if (fd < 0) { + printf("Unable to open pipe\n"); + exit(1); + } + + if (strcmp(argv[1], "server") == 0) { + if (write(fd, BUF, strlen(BUF)+1) != strlen(BUF)+1) { + printf("Unable to write data\n"); + exit(4); + } + + if (write(fd, BUF, strlen(BUF)+1) != strlen(BUF)+1) { + printf("Unable to write data\n"); + exit(4); + } + + fp = bfopen(argv[3], "rb"); + if (!fp) { + printf("Unable to open %s for reading\n", argv[3]); + exit(4); + } + + fseek(fp, 0, SEEK_END); + m = ftell(fp); + fseek(fp, 0, SEEK_SET); + + snprintf(buf, sizeof(buf), "%.10d\n", m); + write(fd, buf, strlen(buf)+1); + + while (m > 0 && !feof(fp)) { + n = fread(buf, 1, sizeof(buf), fp); + Dmsg(000, "read %d from file\n", n); + if (write(fd, buf, n) != n) { + printf("Unable to write data from file\n"); + exit(5); + } + m -= n; + } + Dmsg(000, "EOF found\n"); + fclose(fp); + + } else { + if ((n = read(fd, buf, sizeof(buf))) != strlen(BUF)+1) { + Dmsg(000, "read failed (%d != %d), ERR=%d.\n", n, + (int)strlen(BUF)+1, errno); + exit(4); + } + if (read(fd, buf, sizeof(buf)) != strlen(BUF)+1) { + Dmsg(000, "read failed, ERR=%d.\n", errno); + exit(4); + } + + printf("buf=[%s]\n", buf); + + snprintf(file, sizeof(file), "%s.out", argv[3]); + fp = bfopen(file, "wb"); + if (!fp) { + printf("Unable to open %s for writing\n", buf); + exit(4); + } + + if ((n = read(fd, buf, sizeof(buf))) != 12) { + Dmsg(000, "read failed (%d != %d), ERR=%d.\n", n, 12, errno); + exit(4); + } + + m = atoi(buf); + Dmsg(000, "will read %d from fifo\n", m); + + while (m > 0) { + n = read(fd, buf, sizeof(buf)); + Dmsg(000, "Got %d bytes\n", n); + if ((o = fwrite(buf, n, 1, fp)) != 1) { + Dmsg(000, "write to file failed (%d != %d) ERR=%d.\n", o, n, errno); + exit(4); + } + m -= n; + } + fclose(fp); + } + + namedpipe_free(&p); + + exit(0); +} +#endif diff --git a/src/findlib/namedpipe.h b/src/findlib/namedpipe.h new file mode 100644 index 00000000..7e6757a3 --- /dev/null +++ b/src/findlib/namedpipe.h @@ -0,0 +1,48 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + Basic abstraction for named pipe between windows and linux + */ + +#ifndef NAMEDPIPE_H +#define NAMEDPIPE_H + +#ifdef HAVE_WIN32 +#include +#endif + +#include + +typedef struct { +#ifdef HAVE_WIN32 + HANDLE fd; +#else + char *name; + int fd; +#endif + int ifd; +} NamedPipe; + + +void namedpipe_init(NamedPipe *self); +void namedpipe_free(NamedPipe *self); +int namedpipe_create(NamedPipe *self, const char *path, mode_t mode); +int namedpipe_open(NamedPipe *self, const char *path, mode_t mode); + +#endif diff --git a/src/findlib/protos.h b/src/findlib/protos.h new file mode 100644 index 00000000..90eb6da7 --- /dev/null +++ b/src/findlib/protos.h @@ -0,0 +1,91 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Prototypes for finlib directory of Bacula + * + */ + +/* from attribs.c */ +bool check_directory_acl(char **last_dir, alist *dir_acl, const char *path); + +void encode_stat (char *buf, struct stat *statp, int stat_size, int32_t LinkFI, int data_stream); +int decode_stat (char *buf, struct stat *statp, int stat_size, int32_t *LinkFI); +int32_t decode_LinkFI (char *buf, struct stat *statp, int stat_size); +int encode_attribsEx (JCR *jcr, char *attribsEx, FF_PKT *ff_pkt); +bool set_attributes (JCR *jcr, ATTR *attr, BFILE *ofd); +int select_data_stream(FF_PKT *ff_pkt); + +/* from create_file.c */ +int create_file (JCR *jcr, ATTR *attr, BFILE *ofd, int replace); + +/* From find.c */ +FF_PKT *init_find_files(); +void set_find_snapshot_function(FF_PKT *ff, + bool convert_path(JCR *jcr, FF_PKT *ff, dlist *filelist, dlistString *node)); +void set_find_options(FF_PKT *ff, int incremental, time_t mtime); +void set_find_changed_function(FF_PKT *ff, bool check_fct(JCR *jcr, FF_PKT *ff)); +int find_files(JCR *jcr, FF_PKT *ff, int file_sub(JCR *, FF_PKT *ff_pkt, bool), + int plugin_sub(JCR *, FF_PKT *ff_pkt, bool)); +int match_files(JCR *jcr, FF_PKT *ff, int sub(JCR *, FF_PKT *ff_pkt, bool)); +int term_find_files(FF_PKT *ff); +bool is_in_fileset(FF_PKT *ff); +bool accept_file(FF_PKT *ff); + +/* From match.c */ +void init_include_exclude_files(FF_PKT *ff); +void term_include_exclude_files(FF_PKT *ff); +void add_fname_to_include_list(FF_PKT *ff, int prefixed, const char *fname); +void add_fname_to_exclude_list(FF_PKT *ff, const char *fname); +int file_is_excluded(FF_PKT *ff, const char *file); +int file_is_included(FF_PKT *ff, const char *file); +struct s_included_file *get_next_included_file(FF_PKT *ff, + struct s_included_file *inc); + +/* From find_one.c */ +int find_one_file(JCR *jcr, FF_PKT *ff, + int handle_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level), + char *p, dev_t parent_device, bool top_level); +int term_find_one(FF_PKT *ff); +bool has_file_changed(JCR *jcr, FF_PKT *ff_pkt); +bool check_changes(JCR *jcr, FF_PKT *ff_pkt); +void ff_pkt_set_link_digest(FF_PKT *ff_pkt, + int32_t digest_stream, const char *digest, uint32_t len); + +/* From get_priv.c */ +int enable_backup_privileges(JCR *jcr, int ignore_errors); + + +/* from makepath.c */ +bool makepath(ATTR *attr, const char *path, mode_t mode, + mode_t parent_mode, uid_t owner, gid_t group, + int keep_dir_modes); +void free_path_list(JCR *jcr); +bool path_list_lookup(JCR *jcr, char *fname); +bool path_list_add(JCR *jcr, uint32_t len, char *fname); + + +/* from fstype.c */ +bool fstype(FF_PKT *ff_pkt, char *fs, int fslen); +bool fstype_equals(const char *fname, const char *fstype_name); + +/* from drivetype.c */ +bool drivetype(const char *fname, char *fs, int fslen); + +/* from bfile.c -- see bfile.h */ +/* from namedpipe.c -- see namedpipe.h */ diff --git a/src/findlib/savecwd.c b/src/findlib/savecwd.c new file mode 100644 index 00000000..236e4d08 --- /dev/null +++ b/src/findlib/savecwd.c @@ -0,0 +1,120 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Kern Sibbald, August MMVII + * + */ + +#include "bacula.h" +#include "savecwd.h" + +/* + * Attempt to save the current working directory by various means so that + * we can optimize code by doing a cwd and then restore the cwd. + */ + +#ifdef HAVE_FCHDIR +static bool fchdir_failed = false; /* set if we get a fchdir failure */ +#else +static bool fchdir_failed = true; /* set if we get a fchdir failure */ +#endif + +/* + * Save current working directory. + * Returns: true if OK + * false if failed + */ +bool saveCWD::save(JCR *jcr) +{ + release(); /* clean up */ + if (!fchdir_failed) { + m_fd = open(".", O_RDONLY); + if (m_fd < 0) { + berrno be; + Jmsg1(jcr, M_ERROR, 0, _("Cannot open current directory: ERR=%s\n"), be.bstrerror()); + m_saved = false; + return false; + } + } + + if (fchdir_failed) { + POOLMEM *buf = get_memory(5000); + m_cwd = (POOLMEM *)getcwd(buf, sizeof_pool_memory(buf)); + if (m_cwd == NULL) { + berrno be; + Jmsg1(jcr, M_ERROR, 0, _("Cannot get current directory: ERR=%s\n"), be.bstrerror()); + free_pool_memory(buf); + m_saved = false; + return false; + } + } + m_saved = true; + return true; +} + +/* + * Restore previous working directory. + * Returns: true if OK + * false if failed + */ +bool saveCWD::restore(JCR *jcr) +{ + if (!m_saved) { + return true; + } + m_saved = false; + if (m_fd >= 0) { + if (fchdir(m_fd) != 0) { + berrno be; + Jmsg1(jcr, M_ERROR, 0, _("Cannot reset current directory: ERR=%s\n"), be.bstrerror()); + close(m_fd); + m_fd = -1; + fchdir_failed = true; + chdir("/"); /* punt */ + return false; + } + return true; + } + if (chdir(m_cwd) < 0) { + berrno be; + Jmsg1(jcr, M_ERROR, 0, _("Cannot reset current directory: ERR=%s\n"), be.bstrerror()); + chdir("/"); + free_pool_memory(m_cwd); + m_cwd = NULL; + return false; + } + return true; +} + +void saveCWD::release() +{ + if (!m_saved) { + return; + } + m_saved = false; + if (m_fd >= 0) { + close(m_fd); + m_fd = -1; + } + if (m_cwd) { + free_pool_memory(m_cwd); + m_cwd = NULL; + } +} diff --git a/src/findlib/savecwd.h b/src/findlib/savecwd.h new file mode 100644 index 00000000..aa763e9a --- /dev/null +++ b/src/findlib/savecwd.h @@ -0,0 +1,42 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Kern Sibbald, August MMVII + * + */ + +#ifndef _SAVECWD_H +#define _SAVECWD_H 1 + +class saveCWD { + bool m_saved; /* set if we should do chdir i.e. save_cwd worked */ + int m_fd; /* fd of current dir before chdir */ + char *m_cwd; /* cwd before chdir if fd fchdir() works */ + +public: + saveCWD() { m_saved=false; m_fd=-1; m_cwd=NULL; }; + ~saveCWD() { release(); }; + bool save(JCR *jcr); + bool restore(JCR *jcr); + void release(); + bool is_saved() { return m_saved; }; +}; + +#endif /* _SAVECWD_H */ diff --git a/src/findlib/win32filter.c b/src/findlib/win32filter.c new file mode 100644 index 00000000..1766bc89 --- /dev/null +++ b/src/findlib/win32filter.c @@ -0,0 +1,97 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "win32filter.h" + +#define WIN32_STREAM_HEADER_SIZE 20 /* the size of the WIN32_STREAM_ID header without the name */ + +/* search in a record of a STREAM_WIN32_DATA for the true data + * when found: return true, '*raw' is set at the beginning of the data + * and *use_len is the length of data to read. + * *raw_len is decremented and contains the amount of data that as not + * been filtered yet. + * For this STREAM_WIN32_DATA, you can call have_data() only one + * per record. + * If the stream where the data is can be spread all around the stream + * you must call have_data() until *raw_len is zero and increment + * *data before the next call. + */ +bool Win32Filter::have_data(char **raw, int64_t *raw_len, int64_t *use_len) +{ + int64_t size; + char *orig=*raw; + initialized = true; + Dmsg1(100, "have_data(%lld)\n", *raw_len); + while (*raw_len > 0) { + /* In this rec, we could have multiple streams of data and headers + * to handle before to reach the data, then we must iterate + */ + + Dmsg4(100, "s off=%lld len=%lld skip_size=%lld data_size=%lld\n", *raw-orig, *raw_len, skip_size, data_size); + if (skip_size > 0) { + /* skip what the previous header told us to skip */ + size = *raw_len < skip_size ? *raw_len : skip_size; + skip_size -= size; + *raw_len -= size; + *raw += size; + } + + Dmsg4(100, "h off=%lld len=%lld skip_size=%lld data_size=%lld\n", *raw-orig, *raw_len, skip_size, data_size); + if (data_size == 0 && skip_size == 0 && *raw_len > 0) { + /* read a WIN32_STREAM header, merge it with the part that was read + * from the previous record, if any, if the header was split across + * 2 records. + */ + size = WIN32_STREAM_HEADER_SIZE - header_pos; + if (*raw_len < size) { + size = *raw_len; + } + memcpy((char *)&header + header_pos, *raw, size); + header_pos += size; + *raw_len -= size; + *raw += size; + if (header_pos == WIN32_STREAM_HEADER_SIZE) { + Dmsg5(100, "header pos=%d size=%lld name_size=%d len=%lld StreamId=0x%x\n", header_pos, size, + header.dwStreamNameSize, header.Size, header.dwStreamId); + header_pos = 0; + skip_size = header.dwStreamNameSize; /* skip the name of the stream */ + if (header.dwStreamId == WIN32_BACKUP_DATA) { + data_size = header.Size; + } else { + skip_size += header.Size; /* skip the all stream */ + } + } + Dmsg4(100, "H off=%lld len=%lld skip_size=%lld data_size=%lld\n", *raw-orig, *raw_len, skip_size, data_size); + } + + Dmsg4(100, "d off=%lld len=%lld skip_size=%lld data_size=%lld\n", *raw - orig, *raw_len, skip_size, data_size); + if (data_size > 0 && skip_size == 0 && *raw_len > 0) { + /* some data to read */ + size = *raw_len < data_size ? *raw_len : data_size; + data_size -= size; + *raw_len -= size; + *use_len = size; + Dmsg5(100, "D off=%lld len=%lld use_len=%lld skip_size=%lld data_size=%lld\n", *raw-orig, *raw_len, + *use_len, skip_size, data_size); + return true; + } + } + + return false; +} diff --git a/src/findlib/win32filter.h b/src/findlib/win32filter.h new file mode 100644 index 00000000..d65f073d --- /dev/null +++ b/src/findlib/win32filter.h @@ -0,0 +1,79 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* Pulled from other files by Alain Spineux */ + +#ifndef WIN32FILTER_H_ +#define WIN32FILTER_H_ + +#include "bacula.h" + +/* this should physically correspond to WIN32_STREAM_ID + * from winbase.h on Win32. We didn't inlcude cStreamName + * as we don't use it and don't need it for a correct struct size. + */ + +#define WIN32_BACKUP_DATA 1 + +typedef struct _BWIN32_STREAM_ID { + int32_t dwStreamId; + int32_t dwStreamAttributes; + int64_t Size; + int32_t dwStreamNameSize; +} BWIN32_STREAM_ID, *LPBWIN32_STREAM_ID ; + +class Win32Filter +{ +public: + bool initialized; + int64_t skip_size; /* how many bytes we have to skip before next header */ + int64_t data_size; /* how many data are expected in the stream */ + int header_pos; /* the part of the header that was filled in by previous record */ + + BWIN32_STREAM_ID header; + + Win32Filter() { + init(); + }; + + void init() { + initialized = false; + skip_size = 0; + data_size = 0; + header_pos = 0; + }; + + void copy(Win32Filter *f) + { + skip_size = f->skip_size; + data_size = f->data_size; + header_pos = f->header_pos; + header = f->header; + initialized = (skip_size != 0 || + data_size != 0 || + header_pos != 0); + }; + + /* If the stream is HHHDDDDD, you can call have_data("HHHDDDDD", 8, ) + * and it will return "DDDDD", 0, 5 + */ + bool have_data(char **raw, int64_t *raw_len, int64_t *data_len); +}; + +#endif diff --git a/src/host.h.in b/src/host.h.in new file mode 100644 index 00000000..b110cdab --- /dev/null +++ b/src/host.h.in @@ -0,0 +1,32 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Define Host machine + * + */ + +#define HOST_OS "@host@@post_host@" +#define BACULA "@BACULA@" +#define DISTNAME "@DISTNAME@" +#define DISTVER "@DISTVER@" +#ifdef HAVE_WIN32 +#define HELPDIR "c://Program Files//Bacula//help" +#else +#define HELPDIR "@htmldir@" +#endif diff --git a/src/jcr.h b/src/jcr.h new file mode 100644 index 00000000..e8ecfedc --- /dev/null +++ b/src/jcr.h @@ -0,0 +1,593 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula JCR Structure definition for Daemons and the Library + * This definition consists of a "Global" definition common + * to all daemons and used by the library routines, and a + * daemon specific part that is enabled with #defines. + * + * Written by Kern Sibbald, Nov MM + */ + + +#ifndef __JCR_H_ +#define __JCR_H_ 1 + +/* Backup/Verify level code. These are stored in the DB */ +#define L_FULL 'F' /* Full backup */ +#define L_INCREMENTAL 'I' /* since last backup */ +#define L_DIFFERENTIAL 'D' /* since last full backup */ +#define L_SINCE 'S' +#define L_VERIFY_CATALOG 'C' /* verify from catalog */ +#define L_VERIFY_INIT 'V' /* verify save (init DB) */ +#define L_VERIFY_VOLUME_TO_CATALOG 'O' /* verify Volume to catalog entries */ +#define L_VERIFY_DISK_TO_CATALOG 'd' /* verify Disk attributes to catalog */ +#define L_VERIFY_DATA 'A' /* verify data on volume */ +#define L_BASE 'B' /* Base level job */ +#define L_NONE ' ' /* None, for Restore and Admin */ +#define L_VIRTUAL_FULL 'f' /* Virtual full backup */ + + +/* Job Types. These are stored in the DB */ +#define JT_BACKUP 'B' /* Backup Job */ +#define JT_MIGRATED_JOB 'M' /* A previous backup job that was migrated */ +#define JT_VERIFY 'V' /* Verify Job */ +#define JT_RESTORE 'R' /* Restore Job */ +#define JT_CONSOLE 'U' /* console program */ +#define JT_SYSTEM 'I' /* internal system "job" */ +#define JT_ADMIN 'D' /* admin job */ +#define JT_ARCHIVE 'A' /* Archive Job */ +#define JT_JOB_COPY 'C' /* Copy of a Job */ +#define JT_COPY 'c' /* Copy Job */ +#define JT_MIGRATE 'g' /* Migration Job */ +#define JT_SCAN 'S' /* Scan Job */ + +/* Used to handle ClientAcl in various commands, not stored in the DB */ +#define JT_BACKUP_RESTORE '*' /* Backup or Restore Job */ + +/* Job Status. Some of these are stored in the DB */ +#define JS_Canceled 'A' /* canceled by user */ +#define JS_Blocked 'B' /* blocked */ +#define JS_Created 'C' /* created but not yet running */ +#define JS_Differences 'D' /* Verify differences */ +#define JS_ErrorTerminated 'E' /* Job terminated in error */ +#define JS_WaitFD 'F' /* waiting on File daemon */ +#define JS_Incomplete 'I' /* Incomplete Job */ +#define JS_DataCommitting 'L' /* Committing data (last despool) */ +#define JS_WaitMount 'M' /* waiting for Mount */ +#define JS_Running 'R' /* running */ +#define JS_WaitSD 'S' /* waiting on the Storage daemon */ +#define JS_Terminated 'T' /* terminated normally */ +#define JS_Warnings 'W' /* Terminated normally with warnings */ + +#define JS_AttrDespooling 'a' /* SD despooling attributes */ +#define JS_WaitClientRes 'c' /* Waiting for Client resource */ +#define JS_WaitMaxJobs 'd' /* Waiting for maximum jobs */ +#define JS_Error 'e' /* Non-fatal error */ +#define JS_FatalError 'f' /* Fatal error */ +#define JS_AttrInserting 'i' /* Doing batch insert file records */ +#define JS_WaitJobRes 'j' /* Waiting for job resource */ +#define JS_DataDespooling 'l' /* Doing data despooling */ +#define JS_WaitMedia 'm' /* waiting for new media */ +#define JS_WaitPriority 'p' /* Waiting for higher priority jobs to finish */ +#define JS_WaitDevice 'q' /* Queued waiting for device */ +#define JS_WaitStoreRes 's' /* Waiting for storage resource */ +#define JS_WaitStartTime 't' /* Waiting for start time */ +#define JS_CloudUpload 'u' /* Cloud upload */ +#define JS_CloudDownload 'w' /* Cloud download */ + +/* Migration selection types. Do not change the order. */ +enum { + MT_SMALLEST_VOL = 1, + MT_OLDEST_VOL, + MT_POOL_OCCUPANCY, + MT_POOL_TIME, + MT_POOL_UNCOPIED_JOBS, + MT_CLIENT, + MT_VOLUME, + MT_JOB, + MT_SQLQUERY +}; + +#define job_canceled(jcr) \ + (jcr->JobStatus == JS_Canceled || \ + jcr->JobStatus == JS_ErrorTerminated || \ + jcr->JobStatus == JS_FatalError \ + ) + +#define job_waiting(jcr) \ + (jcr->job_started && \ + (jcr->JobStatus == JS_WaitFD || \ + jcr->JobStatus == JS_WaitSD || \ + jcr->JobStatus == JS_WaitMedia || \ + jcr->JobStatus == JS_WaitMount || \ + jcr->JobStatus == JS_WaitStoreRes || \ + jcr->JobStatus == JS_WaitJobRes || \ + jcr->JobStatus == JS_WaitClientRes || \ + jcr->JobStatus == JS_WaitMaxJobs || \ + jcr->JobStatus == JS_WaitPriority || \ + jcr->SDJobStatus == JS_WaitMedia || \ + jcr->SDJobStatus == JS_WaitMount || \ + jcr->SDJobStatus == JS_WaitDevice || \ + jcr->SDJobStatus == JS_CloudUpload || \ + jcr->SDJobStatus == JS_CloudDownload || \ + jcr->SDJobStatus == JS_WaitMaxJobs)) + + + +#define foreach_jcr(jcr) \ + for (jcr=jcr_walk_start(); jcr; (jcr=jcr_walk_next(jcr)) ) + +#define endeach_jcr(jcr) jcr_walk_end(jcr) + +#define SD_APPEND true +#define SD_READ false + +/* Forward referenced structures */ +class JCR; +class BSOCK; +struct FF_PKT; +class BDB; +struct ATTR_DBR; +class Plugin; +struct save_pkt; +struct bpContext; + + +#ifdef FILE_DAEMON +class VSSClient; +class htable; +class BACL; +class BXATTR; +class snapshot_manager; + +struct CRYPTO_CTX { + bool pki_sign; /* Enable PKI Signatures? */ + bool pki_encrypt; /* Enable PKI Encryption? */ + DIGEST *digest; /* Last file's digest context */ + X509_KEYPAIR *pki_keypair; /* Encryption key pair */ + alist *pki_signers; /* Trusted Signers */ + alist *pki_recipients; /* Trusted Recipients */ + CRYPTO_SESSION *pki_session; /* PKE Public Keys + Symmetric Session Keys */ + POOLMEM *pki_session_encoded; /* Cached DER-encoded copy of pki_session */ + int32_t pki_session_encoded_size; /* Size of DER-encoded pki_session */ + POOLMEM *crypto_buf; /* Encryption/Decryption buffer */ +}; +#endif + +typedef void (JCR_free_HANDLER)(JCR *jcr); + +/* Job Control Record (JCR) */ +class JCR { +private: + pthread_mutex_t mutex; /* jcr mutex */ + pthread_mutex_t mutex_auth; /* used during authentication */ + volatile int32_t _use_count; /* use count */ + int32_t m_JobType; /* backup, restore, verify ... */ + int32_t m_JobLevel; /* Job level */ + bool my_thread_killable; /* can we kill the thread? */ +public: + void lock() {P(mutex); }; + void unlock() {V(mutex); }; + void lock_auth() {P(mutex_auth);}; + void unlock_auth() {V(mutex_auth);}; + void inc_use_count(void) {lock(); _use_count++; unlock(); }; + void dec_use_count(void) {lock(); _use_count--; unlock(); }; + int32_t use_count() const { return _use_count; }; + void init_mutex(void) { + pthread_mutex_init(&mutex, NULL); + pthread_mutex_init(&mutex_auth, NULL); + }; + void destroy_mutex(void) { + pthread_mutex_destroy(&mutex_auth); + pthread_mutex_destroy(&mutex); + }; + bool is_internal_job() {return (JobId == 0 || m_JobType == JT_SYSTEM || m_JobType == JT_CONSOLE); }; + bool is_job_canceled() {return job_canceled(this); }; + bool is_canceled() {return job_canceled(this); }; + bool is_incomplete() { return JobStatus == JS_Incomplete; }; + bool is_JobLevel(int32_t JobLevel) { return JobLevel == m_JobLevel; }; + bool is_JobType(int32_t JobType) { return JobType == m_JobType; }; + bool is_JobStatus(int32_t aJobStatus) { return aJobStatus == JobStatus; }; + void setJobLevel(int32_t JobLevel) { m_JobLevel = JobLevel; }; + void setJobType(int32_t JobType) { m_JobType = JobType; }; + void forceJobStatus(int32_t aJobStatus) { JobStatus = aJobStatus; }; + void setJobStarted(); + int32_t getJobType() const { return m_JobType; }; + int32_t getJobLevel() const { return m_JobLevel; }; + int32_t getJobStatus() const { return JobStatus; }; + bool no_client_used() const { + return (m_JobLevel == L_VIRTUAL_FULL); + }; + bool can_be_stopped(); /* in lib/jcr.c */ + const char *get_OperationName(); /* in lib/jcr.c */ + const char *get_ActionName(bool past); /* in lib/jcr.c */ + void setJobStatus(int JobStatus); /* in lib/jcr.c */ + bool sendJobStatus(); /* in lib/jcr.c */ + bool sendJobStatus(int JobStatus); /* in lib/jcr.c */ + bool JobReads(); /* in lib/jcr.c */ + void my_thread_send_signal(int sig); /* in lib/jcr.c */ + void set_killable(bool killable); /* in lib/jcr.c */ + bool is_killable() const { return my_thread_killable; }; + + /* Global part of JCR common to all daemons */ + dlink link; /* JCR chain link */ + pthread_t my_thread_id; /* id of thread controlling jcr */ + BSOCK *dir_bsock; /* Director bsock or NULL if we are him */ + BSOCK *store_bsock; /* Storage connection socket */ + BSOCK *file_bsock; /* File daemon connection socket */ + JCR_free_HANDLER *daemon_free_jcr; /* Local free routine */ + dlist *msg_queue; /* Queued messages */ + pthread_mutex_t msg_queue_mutex; /* message queue mutex */ + bool dequeuing_msgs; /* Set when dequeuing messages */ + alist job_end_push; /* Job end pushed calls */ + POOLMEM *VolumeName; /* Volume name desired -- pool_memory */ + POOLMEM *errmsg; /* edited error message */ + char Job[MAX_NAME_LENGTH]; /* Unique name of this Job */ + char event[MAX_NAME_LENGTH]; /* Current event (python) */ + uint32_t eventType; /* Current event type (plugin) */ + + uint32_t JobId; /* Director's JobId */ + uint32_t VolSessionId; + uint32_t VolSessionTime; + uint32_t JobFiles; /* Number of files written, this job */ + uint32_t JobErrors; /* Number of non-fatal errors this job */ + uint32_t SDErrors; /* Number of non-fatal SD errors */ + uint32_t JobWarnings; /* Number of warning messages */ + uint32_t LastRate; /* Last sample bytes/sec */ + uint64_t JobBytes; /* Number of bytes processed this job */ + uint64_t LastJobBytes; /* Last sample number bytes */ + uint64_t ReadBytes; /* Bytes read -- before compression */ + uint64_t CommBytes; /* FD comm line bytes sent to SD */ + uint64_t CommCompressedBytes; /* FD comm line compressed bytes sent to SD */ + FileId_t FileId; /* Last FileId used */ + volatile int32_t JobStatus; /* ready, running, blocked, terminated */ + int32_t JobPriority; /* Job priority */ + time_t sched_time; /* job schedule time, i.e. when it should start */ + time_t initial_sched_time; /* original sched time before any reschedules are done */ + time_t start_time; /* when job actually started */ + time_t run_time; /* used for computing speed */ + time_t last_time; /* Last sample time */ + time_t end_time; /* job end time */ + time_t wait_time_sum; /* cumulative wait time since job start */ + time_t wait_time; /* timestamp when job have started to wait */ + time_t job_started_time; /* Time when the MaxRunTime start to count */ + POOLMEM *client_name; /* client name */ + POOLMEM *JobIds; /* User entered string of JobIds */ + POOLMEM *RestoreBootstrap; /* Bootstrap file to restore */ + POOLMEM *stime; /* start time for incremental/differential */ + char *sd_auth_key; /* SD auth key */ + MSGS *jcr_msgs; /* Copy of message resource -- actually used */ + uint32_t ClientId; /* Client associated with Job */ + char *where; /* prefix to restore files to */ + char *RegexWhere; /* file relocation in restore */ + alist *where_bregexp; /* BREGEXP alist for path manipulation */ + int32_t cached_pnl; /* cached path length */ + POOLMEM *cached_path; /* cached path */ + bool prefix_links; /* Prefix links with Where path */ + bool gui; /* set if gui using console */ + bool authenticated; /* set when client authenticated */ + bool cached_attribute; /* set if attribute is cached */ + bool batch_started; /* is batch mode already started ? */ + bool cmd_plugin; /* Set when processing a command Plugin = */ + bool opt_plugin; /* Set when processing an option Plugin = */ + bool keep_path_list; /* Keep newly created path in a hash */ + bool accurate; /* true if job is accurate */ + bool HasBase; /* True if job use base jobs */ + bool rerunning; /* rerunning an incomplete job */ + bool job_started; /* Set when the job is actually started */ + bool sd_calls_client; /* Set for SD to call client (FD/SD) */ + bool exiting; /* Set when exiting */ + + void *Python_job; /* Python Job Object */ + void *Python_events; /* Python Events Object */ + POOLMEM *attr; /* Attribute string from SD */ + BDB *db; /* database pointer */ + BDB *db_batch; /* database pointer for batch and accurate */ + uint64_t nb_base_files; /* Number of base files */ + uint64_t nb_base_files_used; /* Number of useful files in base */ + + ATTR_DBR *ar; /* DB attribute record */ + guid_list *id_list; /* User/group id to name list */ + + bpContext *plugin_ctx_list; /* list of contexts for plugins */ + bpContext *plugin_ctx; /* current plugin context */ + Plugin *plugin; /* plugin instance */ + save_pkt *plugin_sp; /* plugin save packet */ + char *plugin_options; /* user set options for plugin */ + POOLMEM *comment; /* Comment for this Job */ + int64_t max_bandwidth; /* Bandwidth limit for this Job */ + htable *path_list; /* Directory list (used by findlib) */ + int job_uid; /* UID used during job session */ + char *job_user; /* Specific permission for a job */ + char *job_group; /* Specific permission for a job */ + POOLMEM *StatusErrMsg; /* Error message displayed in the job report */ + uint32_t getErrors() { return JobErrors + SDErrors; }; /* Get error count */ + + /* Daemon specific part of JCR */ + /* This should be empty in the library */ + +#ifdef DIRECTOR_DAEMON + /* Director Daemon specific data part of JCR */ + bool SD_msg_chan_started; /* True if the msg thread is started */ + pthread_t SD_msg_chan; /* Message channel thread id */ + pthread_cond_t term_wait; /* Wait for job termination */ + workq_ele_t *work_item; /* Work queue item if scheduled */ + BSOCK *ua; /* User agent */ + JOB *job; /* Job resource */ + JOB *verify_job; /* Job resource of verify previous job */ + alist *plugin_config; /* List of ConfigFile needed for restore */ + alist *rstorage; /* Read storage possibilities */ + STORE *rstore; /* Selected read storage */ + alist *wstorage; /* Write storage possibilities */ + STORE *wstore; /* Selected write storage */ + CLIENT *client; /* Client resource */ + POOL *pool; /* Pool resource = write for migration */ + POOL *next_pool; /* Next pool override */ + POOL *rpool; /* Read pool. Used only in migration */ + POOL *full_pool; /* Full backup pool resource */ + POOL *vfull_pool; /* Virtual Full backup pool resource */ + POOL *inc_pool; /* Incremental backup pool resource */ + POOL *diff_pool; /* Differential backup pool resource */ + FILESET *fileset; /* FileSet resource */ + CAT *catalog; /* Catalog resource */ + MSGS *messages; /* Default message handler */ + uint32_t SDJobFiles; /* Number of files written, this job */ + uint64_t SDJobBytes; /* Number of bytes processed this job */ + volatile int32_t SDJobStatus; /* Storage Job Status */ + volatile int32_t FDJobStatus; /* File daemon Job Status */ + uint32_t ExpectedFiles; /* Expected restore files */ + uint32_t MediaId; /* DB record IDs associated with this job */ + int32_t FileIndex; /* Last FileIndex processed */ + utime_t MaxRunSchedTime; /* max run time in seconds from Initial Scheduled time */ + POOLMEM *fname; /* name to put into catalog */ + POOLMEM *component_fname; /* Component info file name */ + POOLMEM *media_type; /* Set if user supplied Storage */ + FILE *component_fd; /* Component info file desc */ + JOB_DBR jr; /* Job DB record for current job */ + JOB_DBR previous_jr; /* previous job database record */ + JOB *previous_job; /* Job resource of migration previous job */ + JCR *wjcr; /* JCR for migration/copy write job */ + char FSCreateTime[MAX_TIME_LENGTH]; /* FileSet CreateTime as returned from DB */ + char since[MAX_TIME_LENGTH]; /* since time */ + char PrevJob[MAX_NAME_LENGTH]; /* Previous job name assiciated with since time */ + union { + JobId_t RestoreJobId; /* Id specified by UA */ + JobId_t MigrateJobId; + }; + POOLMEM *client_uname; /* client uname */ + POOLMEM *pool_source; /* Where pool came from */ + POOLMEM *next_pool_source; /* Where next pool came from */ + POOLMEM *rpool_source; /* Where migrate read pool came from */ + POOLMEM *rstore_source; /* Where read storage came from */ + POOLMEM *wstore_source; /* Where write storage came from */ + POOLMEM *catalog_source; /* Where catalog came from */ + POOLMEM *next_vol_list; /* Volumes previously requested */ + rblist *bsr_list; /* Bootstrap that can be needed during restore */ + int32_t replace; /* Replace option */ + int32_t NumVols; /* Number of Volume used in pool */ + int32_t reschedule_count; /* Number of times rescheduled */ + int32_t FDVersion; /* File daemon version number */ + int32_t SDVersion; /* Storage daemon version number */ + int64_t spool_size; /* Spool size for this job */ + utime_t snapshot_retention; /* Snapshot retention (from Client/Job resource) */ + volatile bool sd_msg_thread_done; /* Set when Storage message thread done */ + bool wasVirtualFull; /* set if job was VirtualFull */ + bool IgnoreDuplicateJobChecking; /* set in migration jobs */ + bool spool_data; /* Spool data in SD */ + bool acquired_resource_locks; /* set if resource locks acquired */ + bool term_wait_inited; /* Set when cond var inited */ + bool fn_printed; /* printed filename */ + bool write_part_after_job; /* Write part after job in SD */ + bool needs_sd; /* set if SD needed by Job */ + bool cloned; /* set if cloned */ + bool unlink_bsr; /* Unlink bsr file created */ + bool Snapshot; /* Snapshot used by FD (VSS on Windows) */ + bool Encrypt; /* Encryption used by FD */ + bool stats_enabled; /* Keep all job records in a table for long term statistics */ + bool no_maxtime; /* Don't check Max*Time for this JCR */ + bool keep_sd_auth_key; /* Clear or not the SD auth key after connection*/ + bool use_accurate_chksum; /* Use or not checksum option in accurate code */ + bool run_pool_override; + bool cmdline_next_pool_override; /* Next pool is overridden */ + bool run_next_pool_override; /* Next pool is overridden */ + bool run_full_pool_override; + bool run_vfull_pool_override; + bool run_inc_pool_override; + bool run_diff_pool_override; + bool sd_canceled; /* set if SD canceled */ + bool RescheduleIncompleteJobs; /* set if incomplete can be rescheduled */ + bool use_all_JobIds; /* Use all jobids present in command line */ + bool sd_client; /* This job runs as SD client */ + bool dummy_jobmedia; /* Dummy JobMedia written */ +#endif /* DIRECTOR_DAEMON */ + + +#ifdef FILE_DAEMON + /* File Daemon specific part of JCR */ + BSOCK *sd_calls_client_bsock; /* Socket used by SDCallsClient feature */ + uint32_t num_files_examined; /* files examined this job */ + POOLMEM *last_fname; /* last file saved/verified */ + POOLMEM *job_metadata; /* VSS job metadata */ + pthread_cond_t job_start_wait; /* Wait for SD to start Job */ + BACL *bacl; /* ACLs for backup/restore */ + BXATTR *bxattr; /* Extended Attributes for backup/restore */ + int32_t last_type; /* type of last file saved/verified */ + int incremental; /* set if incremental for SINCE */ + time_t last_stat_time; /* Last time stats sent to Dir */ + time_t stat_interval; /* Stats send interval */ + utime_t mtime; /* begin time for SINCE */ + int listing; /* job listing in estimate */ + long Ticket; /* Ticket */ + char *big_buf; /* I/O buffer */ + POOLMEM *compress_buf; /* Compression buffer */ + int32_t compress_buf_size; /* Length of compression buffer */ + void *pZLIB_compress_workset; /* zlib compression session data */ + void *LZO_compress_workset; /* lzo compression session data */ + int32_t replace; /* Replace options */ + int32_t buf_size; /* length of buffer */ + FF_PKT *ff; /* Find Files packet */ + char stored_addr[MAX_NAME_LENGTH]; /* storage daemon address */ + char PrevJob[MAX_NAME_LENGTH]; /* Previous job name assiciated with since time */ + uint32_t ExpectedFiles; /* Expected restore files */ + uint32_t StartFile; + uint32_t EndFile; + uint32_t StartBlock; + uint32_t EndBlock; + pthread_t heartbeat_id; /* id of heartbeat thread */ + volatile bool hb_started; /* heartbeat running */ + BSOCK *hb_bsock; /* duped SD socket */ + BSOCK *hb_dir_bsock; /* duped DIR socket */ + alist *RunScripts; /* Commands to run before and after job */ + CRYPTO_CTX crypto; /* Crypto ctx */ + DIRRES* director; /* Director resource */ + bool Snapshot; /* Snapshot used by FD (or VSS) */ + bool got_metadata; /* set when found job_metatdata */ + bool multi_restore; /* Dir can do multiple storage restore */ + bool interactive_session; /* Use interactive session with the SD */ + htable *file_list; /* Previous file list (accurate mode) */ + uint64_t base_size; /* compute space saved with base job */ + utime_t snapshot_retention; /* Snapshot retention (from director) */ + snapshot_manager *snap_mgr; /* Snapshot manager */ + VSSClient *pVSSClient; /* VSS handler */ +#endif /* FILE_DAEMON */ + + +#ifdef STORAGE_DAEMON + /* Storage Daemon specific part of JCR */ + JCR *next_dev; /* next JCR attached to device */ + JCR *prev_dev; /* previous JCR attached to device */ + dlist *jobmedia_queue; /* JobMedia queue */ + char *dir_auth_key; /* Dir auth key */ + pthread_cond_t job_start_wait; /* Wait for FD to start Job */ + int32_t type; + DCR *read_dcr; /* device context for reading */ + DCR *dcr; /* device context record */ + alist *dcrs; /* list of dcrs open */ + POOLMEM *job_name; /* base Job name (not unique) */ + POOLMEM *fileset_name; /* FileSet */ + POOLMEM *fileset_md5; /* MD5 for FileSet */ + char stored_addr[MAX_NAME_LENGTH]; /* storage daemon address */ + char client_addr[MAX_NAME_LENGTH]; /* client daemon address */ + VOL_LIST *VolList; /* list to read, freed at the end of the job */ + int32_t NumWriteVolumes; /* number of volumes written */ + int32_t NumReadVolumes; /* total number of volumes to read */ + int32_t CurReadVolume; /* current read volume number */ + int32_t label_errors; /* count of label errors */ + int32_t DIRVersion; /* Director version number */ + int32_t FDVersion; /* File daemon version number */ + int32_t SDVersion; /* Storage daemon version number */ + bool session_opened; + bool interactive_session; /* Interactive session with the FD */ + bool is_ok_data_sent; /* the "3000 OK data" has been sent */ + long Ticket; /* ticket for this job */ + bool ignore_label_errors; /* ignore Volume label errors */ + bool spool_attributes; /* set if spooling attributes */ + bool no_attributes; /* set if no attributes wanted */ + int64_t spool_size; /* Spool size for this job */ + bool spool_data; /* set to spool data */ + int32_t CurVol; /* Current Volume count */ + DIRRES* director; /* Director resource */ + alist *write_store; /* list of write storage devices sent by DIR */ + alist *read_store; /* list of read devices sent by DIR */ + alist *reserve_msgs; /* reserve fail messages */ + bool write_part_after_job; /* Set to write part after job */ + bool PreferMountedVols; /* Prefer mounted vols rather than new */ + bool Resched; /* Job may be rescheduled */ + bool bscan_insert_jobmedia_records; /*Bscan: needs to insert job media records */ + bool sd_client; /* Set if acting as client */ + bool use_new_match_all; /* TODO: Remove when the match_bsr() will be well tested */ + + /* Parmaters for Open Read Session */ + BSR *bsr; /* Bootstrap record -- has everything */ + bool mount_next_volume; /* set to cause next volume mount */ + uint32_t read_VolSessionId; + uint32_t read_VolSessionTime; + uint32_t read_StartFile; + uint32_t read_EndFile; + uint32_t read_StartBlock; + uint32_t read_EndBlock; + /* Device wait times */ + int32_t min_wait; + int32_t max_wait; + int32_t max_num_wait; + int32_t wait_sec; + int32_t rem_wait_sec; + int32_t num_wait; +#endif /* STORAGE_DAEMON */ + +}; + +/* + * Setting a NULL in tsd doesn't clear the tsd but instead tells + * pthreads not to call the tsd destructor. Consequently, we + * define this *invalid* jcr address and stuff it in the tsd + * when the jcr is not valid. + */ +#define INVALID_JCR ((JCR *)(-1)) + + +/* + * Structure for all daemons that keeps some summary + * info on the last job run. + */ +struct s_last_job { + dlink link; + int32_t Errors; /* FD/SD errors */ + int32_t JobType; + int32_t JobStatus; + int32_t JobLevel; + uint32_t JobId; + uint32_t VolSessionId; + uint32_t VolSessionTime; + uint32_t JobFiles; + uint64_t JobBytes; + utime_t start_time; + utime_t end_time; + char Job[MAX_NAME_LENGTH]; +}; + +extern struct s_last_job last_job; +extern DLL_IMP_EXP dlist *last_jobs; + + +/* The following routines are found in lib/jcr.c */ +extern int get_next_jobid_from_list(char **p, uint32_t *JobId); +extern bool init_jcr_subsystem(void); +extern JCR *new_jcr(int size, JCR_free_HANDLER *daemon_free_jcr); +extern JCR *get_jcr_by_id(uint32_t JobId); +extern JCR *get_jcr_by_session(uint32_t SessionId, uint32_t SessionTime); +extern JCR *get_jcr_by_partial_name(char *Job); +extern JCR *get_jcr_by_full_name(char *Job); +extern JCR *get_next_jcr(JCR *jcr); +extern void set_jcr_job_status(JCR *jcr, int JobStatus); +extern int DLL_IMP_EXP num_jobs_run; + +#ifdef DEBUG +extern void b_free_jcr(const char *file, int line, JCR *jcr); +#define free_jcr(jcr) b_free_jcr(__FILE__, __LINE__, (jcr)) +#else +extern void free_jcr(JCR *jcr); +#endif + +/* Used to display specific job information after a fatal signal */ +typedef void (dbg_jcr_hook_t)(JCR *jcr, FILE *fp); +extern void dbg_jcr_add_hook(dbg_jcr_hook_t *fct); + +#endif /* __JCR_H_ */ diff --git a/src/lib/Makefile.in b/src/lib/Makefile.in new file mode 100644 index 00000000..a73cd9aa --- /dev/null +++ b/src/lib/Makefile.in @@ -0,0 +1,361 @@ +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +@MCOMMON@ + +# Define library versions +LIBBAC_LT_RELEASE = @LIBBAC_LT_RELEASE@ +LIBBACCFG_LT_RELEASE = @LIBBACCFG_LT_RELEASE@ + +ZLIBS = @ZLIBS@ +DEBUG = @DEBUG@ +CAP_LIBS = @CAP_LIBS@ + +# +srcdir = . +VPATH = . +.PATH: . + +# one up +basedir = .. +# top dir +topdir = ../.. +# this dir relative to top dir +thisdir = src/lib + +first_rule: all +dummy: + +# +# include files installed when using libtool +# +INCLUDE_FILES = ../baconfig.h ../bacula.h ../bc_types.h \ + ../config.h ../jcr.h ../version.h \ + address_conf.h alist.h attr.h base64.h bsockcore.h \ + berrno.h bits.h bjson.h bpipe.h breg.h bregex.h \ + bsock.h btime.h btimers.h crypto.h dlist.h \ + flist.h fnmatch.h guid_to_name.h htable.h lex.h \ + lib.h lz4.h md5.h mem_pool.h message.h \ + openssl.h parse_conf.h plugins.h protos.h queue.h rblist.h \ + runscript.h rwlock.h serial.h sellist.h sha1.h sha2.h \ + smartall.h status.h tls.h tree.h var.h \ + watchdog.h workq.h ini.h \ + lockmgr.h devlock.h output.h bwlimit.h + +# +# libbac +# +LIBBAC_SRCS = attr.c base64.c berrno.c bsys.c binflate.c bget_msg.c \ + bnet.c bnet_server.c bsock.c bpipe.c bsnprintf.c btime.c \ + cram-md5.c crc32.c crypto.c daemon.c edit.c fnmatch.c \ + guid_to_name.c hmac.c jcr.c lex.c lz4.c alist.c dlist.c \ + md5.c message.c mem_pool.c openssl.c \ + plugins.c priv.c queue.c bregex.c bsockcore.c \ + runscript.c rwlock.c scan.c sellist.c serial.c sha1.c sha2.c \ + signal.c smartall.c rblist.c tls.c tree.c \ + util.c var.c watchdog.c workq.c btimers.c \ + worker.c flist.c \ + address_conf.c breg.c htable.c lockmgr.c devlock.c output.c bwlimit.c + +LIBBAC_OBJS = $(LIBBAC_SRCS:.c=.o) +LIBBAC_LOBJS = $(LIBBAC_SRCS:.c=.lo) + +# +# libbaccfg (config functions) +# +LIBBACCFG_SRCS = ini.c parse_conf.c res.c bjson.c +LIBBACCFG_OBJS = $(LIBBACCFG_SRCS:.c=.o) +LIBBACCFG_LOBJS = $(LIBBACCFG_SRCS:.c=.lo) + +.SUFFIXES: .c .cc .o .lo .ch .dvi .pdf .tex .view .w .1 +.PHONY: +.DONTCARE: + +# inference rules +.c.o: + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +.c.lo: + @echo "Compiling $<" + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +.cc.o: + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +.cc.lo: + @echo "Compiling $<" + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +# specific build rules + +#------------------------------------------------------------------------- +all: Makefile libbac$(DEFAULT_ARCHIVE_TYPE) libbaccfg$(DEFAULT_ARCHIVE_TYPE) + @echo "==== Make of lib is good ====" + @echo " " + +libbac.a: $(LIBBAC_OBJS) + @echo "Making $@ ..." + $(AR) rc $@ $(LIBBAC_OBJS) + $(RANLIB) $@ + +libbac.la: Makefile $(LIBBAC_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(LIBBAC_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBAC_LT_RELEASE) $(WRAPLIBS) $(CAP_LIBS) $(ZLIBS) $(OPENSSL_LIBS) $(LIBS) $(DLLIBS) + +libbaccfg.a: $(LIBBACCFG_OBJS) + @echo "Making $@ ..." + $(AR) rc $@ $(LIBBACCFG_OBJS) + $(RANLIB) $@ + +libbaccfg.la: Makefile $(LIBBACCFG_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(LIBBACCFG_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCFG_LT_RELEASE) $(OPENSSL_LIBS) $(LIBS) + +Makefile: $(srcdir)/Makefile.in $(topdir)/config.status + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + +wait_test: Makefile bsys.c + $(RMF) bsys.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsys.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -L../findlib -o $@ bsys.o $(DLIB) -lbac -lbacfind -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) bsys.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsys.c + +output_test: Makefile output.c unittests.o + $(RMF) output.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) output.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ output.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) output.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) output.c + +lockmgr_test: Makefile libbac.la lockmgr.c unittests.o + $(RMF) lockmgr.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) lockmgr.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ lockmgr.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) lockmgr.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) lockmgr.c + +base64_test: Makefile base64.c unittests.o + $(RMF) base64.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) base64.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ base64.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) base64.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) base64.c + +flist_test: Makefile flist.c unittests.o + $(RMF) flist.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) flist.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ flist.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) flist.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) flist.c + +fnmatch_test: Makefile fnmatch.c unittests.o + $(RMF) fnmatch.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) fnmatch.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ fnmatch.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) fnmatch.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) fnmatch.c + +worker_test: Makefile worker.c + $(RMF) worker.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) worker.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ worker.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) worker.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) worker.c + +workq_test: Makefile workq.c + $(RMF) workq.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) workq.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ workq.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) workq.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) workq.c + +rwlock_test: Makefile rwlock.c + $(RMF) rwlock.o + $(CXX) -DTEST_RWLOCK $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) rwlock.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ rwlock.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) rwlock.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) rwlock.c + +devlock_test: Makefile + $(RMF) devlock.o + $(CXX) -DTEST_devlock $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) devlock.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ devlock.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) devlock.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) devlock.c + +htable_test: Makefile libbac.la htable.c unittests.o + $(RMF) htable.o + $(CXX) -DTEST_SMALL_HTABLE -DTEST_NON_CHAR -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) htable.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ htable.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) htable.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) htable.c + +alist_test: Makefile alist.c unittests.o + $(RMF) alist.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) alist.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ alist.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) alist.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) alist.c + +crc32_test: Makefile crc32.c unittests.o + $(RMF) crc32.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) crc32.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ crc32.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) crc32.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) crc32.c + +sellist_test: Makefile libbac.la sellist.c unittests.o + $(RMF) sellist.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) sellist.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ sellist.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) sellist.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) sellist.c + +xml_test: Makefile libbac.la xml.c + $(RMF) xml.o + $(CXX) -DTEST_PROG $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) xml.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ xml.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) xml.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) xml.c + +md5sum: Makefile libbac.la md5.c + $(RMF) md5.o + $(CXX) -DMD5_SUM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) md5.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ md5.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) md5.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) md5.c + +sha1sum: Makefile libbac.la sha1.c + $(RMF) sha1.o + $(CXX) -DSHA1_SUM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) sha1.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ sha1.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) sha1.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) sha1.c + +sha1_test: Makefile libbac.la sha1.c unittests.o + $(RMF) sha1.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) sha1.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ sha1.o unittests.o $(DLIB) -lbac -lm $(LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) sha1.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) sha1.c + +bsnprintf_test: Makefile libbac.la bsnprintf.c unittests.o + $(RMF) bsnprintf.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsnprintf.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ bsnprintf.o unittests.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) bsnprintf.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsnprintf.c + +ini_test: Makefile libbac.la ini.c unittests.o + $(RMF) ini.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) ini.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ ini.o unittests.o -lbaccfg -lbac $(DLIB) -lm $(LIBS) $(OPENSSL_LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) ini.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) ini.c + +bsockcore_test: Makefile libbac.la bsockcore.c unittests.o + $(RMF) bsockcore.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsockcore.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ bsockcore.o unittests.o $(DLIB) -lbac -lm $(LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) bsockcore.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsockcore.c + +bsock_test: Makefile libbac.la bsock.c unittests.o + $(RMF) bsock.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsock.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -o $@ bsock.o unittests.o $(DLIB) -lbac -lm $(LIBS) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ + $(RMF) bsock.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsock.c + +install-includes: + $(MKDIR) $(DESTDIR)/$(includedir)/bacula + for I in $(INCLUDE_FILES); do \ + $(INSTALL_DATA) $$I $(DESTDIR)$(includedir)/bacula/`basename $$I`; \ + done + +uninstall-includes: + for I in $(INCLUDE_FILES); do \ + $(RMF) $(DESTDIR)$(includedir)/bacula/`basename $$I`; \ + done + +libtool-install: all + $(MKDIR) $(DESTDIR)$(libdir) + $(RMF) $(DESTDIR)$(libdir)/libbac-*.so $(DESTDIR)$(libdir)/libbac.la + $(RMF) $(DESTDIR)$(libdir)/libbaccfg-*.so $(DESTDIR)$(libdir)/libbaccfg.la + $(RMF) $(DESTDIR)$(libdir)/libbacpy-*.so $(DESTDIR)$(libdir)/libbacpy.la + $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbac.la $(DESTDIR)$(libdir) + $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbaccfg.la $(DESTDIR)$(libdir) + +libtool-uninstall: + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbac.la + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbaccfg.la + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbacpy.la + +install: @LIBTOOL_INSTALL_TARGET@ @INCLUDE_INSTALL_TARGET@ + +uninstall: @LIBTOOL_UNINSTALL_TARGET@ @INCLUDE_UNINSTALL_TARGET@ + +libtool-clean: + @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) + @$(RMF) *.la + @$(RMF) -r .libs _libs + +clean: libtool-clean + @$(RMF) core a.out *.o *.bak *.tex *.pdf *~ *.intpro *.extpro 1 2 3 + @$(RMF) rwlock_test md5sum sha1sum + +realclean: clean + @$(RMF) tags + +distclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +# Semi-automatic generation of dependencies: +# Use gcc -M because X11 `makedepend' doesn't work on all systems +# and it also includes system headers. +# `semi'-automatic since dependencies are generated at distribution time. + +depend: + @$(MV) Makefile Makefile.bak + @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile + @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile + @for src in $(LIBBAC_SRCS) $(LIBBACCFG_SRCS); do \ + $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) -I$(srcdir) -I$(basedir) $$src >> Makefile; \ + done + @if test -f Makefile ; then \ + $(RMF) Makefile.bak; \ + else \ + $(MV) Makefile.bak Makefile; \ + echo " ===== Something went wrong in make depend ====="; \ + fi + +# ----------------------------------------------------------------------- +# DO NOT DELETE: nice dependency list follows diff --git a/src/lib/address_conf.c b/src/lib/address_conf.c new file mode 100644 index 00000000..ab96bd70 --- /dev/null +++ b/src/lib/address_conf.c @@ -0,0 +1,695 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Configuration file parser for IP-Addresse ipv4 and ipv6 + * + * Written by Meno Abels, June MMIV + * + */ + + +#include "bacula.h" +#ifdef HAVE_ARPA_NAMESER_H +#include +#endif +#ifdef HAVE_RESOLV_H +//#include +#endif + +static int add_address(dlist **out, IPADDR::i_type type, unsigned short defaultport, int family, + const char *hostname_str, const char *port_str, char *buf, int buflen); + + +IPADDR::IPADDR(const IPADDR &src) : type(src.type) +{ + memcpy(&saddrbuf, &src.saddrbuf, sizeof(saddrbuf)); + saddr = &saddrbuf.dontuse; + saddr4 = &saddrbuf.dontuse4; +#ifdef HAVE_IPV6 + saddr6 = &saddrbuf.dontuse6; +#endif +} + +IPADDR::IPADDR(int af) : type(R_EMPTY) +{ +#ifdef HAVE_IPV6 + if (!(af == AF_INET6 || af == AF_INET)) { + Emsg1(M_ERROR_TERM, 0, _("Only ipv4 and ipv6 are supported (%d)\n"), af); + } +#else + if (af != AF_INET) { + Emsg1(M_ERROR_TERM, 0, _("Only ipv4 is supported (%d)\n"), af); + } +#endif + memset(&saddrbuf, 0, sizeof(saddrbuf)); + saddr = &saddrbuf.dontuse; + saddr4 = &saddrbuf.dontuse4; +#ifdef HAVE_IPV6 + saddr6 = &saddrbuf.dontuse6; +#endif + saddr->sa_family = af; + if (af == AF_INET) { + saddr4->sin_port = 0xffff; + } +#ifdef HAVE_IPV6 + else { + saddr6->sin6_port = 0xffff; + } +#endif +#ifdef HAVE_SA_LEN +#ifdef HAVE_IPV6 + saddr->sa_len = (af == AF_INET) ? sizeof(sockaddr_in) : sizeof(sockaddr_in6); +#else + saddr->sa_len = sizeof(sockaddr_in); +#endif +#endif + set_addr_any(); +} + +void IPADDR::set_type(i_type o) +{ + type = o; +} + +IPADDR::i_type IPADDR::get_type() const +{ + return type; +} + +unsigned short IPADDR::get_port_net_order() const +{ + unsigned short port = 0; + if (saddr->sa_family == AF_INET) { + port = saddr4->sin_port; + } +#ifdef HAVE_IPV6 + else { + port = saddr6->sin6_port; + } +#endif + return port; +} + +void IPADDR::set_port_net(unsigned short port) +{ + if (saddr->sa_family == AF_INET) { + saddr4->sin_port = port; + } +#ifdef HAVE_IPV6 + else { + saddr6->sin6_port = port; + } +#endif +} + +int IPADDR::get_family() const +{ + return saddr->sa_family; +} + +/* + * Note, this returns the address of the socket structure + * not the address of the socket address. + * This socket address is a union of the different types + * of sockets (IPv4, ...) available, so it is portable. + */ +struct sockaddr *IPADDR::get_sockaddr() +{ + return saddr; +} + +int IPADDR::get_sockaddr_len() +{ +#ifdef HAVE_IPV6 + return saddr->sa_family == AF_INET ? sizeof(*saddr4) : sizeof(*saddr6); +#else + return sizeof(*saddr4); +#endif +} +void IPADDR::copy_addr(IPADDR *src) +{ + if (saddr->sa_family == AF_INET) { + saddr4->sin_addr.s_addr = src->saddr4->sin_addr.s_addr; + } +#ifdef HAVE_IPV6 + else if (saddr->sa_family == AF_INET6) { + saddr6->sin6_addr = src->saddr6->sin6_addr; + } +#endif +} + +#ifdef NEED_IN6ADDR_ANY +/* The header shall declare the following external variable + * On Oses such as Solaris, it requires to define also XPG4_2 and EXTENSIONS + * and we have no real idea on what it will change. + */ +extern const struct in6_addr in6addr_any; +#endif + +void IPADDR::set_addr_any() +{ + if (saddr->sa_family == AF_INET) { + saddr4->sin_addr.s_addr = INADDR_ANY; + } +#ifdef HAVE_IPV6 + else if (saddr->sa_family == AF_INET6) { + saddr6->sin6_addr = in6addr_any; + } +#endif +} + +void IPADDR::set_addr4(struct in_addr *ip4) +{ + if (saddr->sa_family != AF_INET) { + Emsg1(M_ERROR_TERM, 0, _("You tried to assign a ipv6 address to an ipv4(%d)\n"), saddr->sa_family); + } + saddr4->sin_addr = *ip4; +} + +#ifdef HAVE_IPV6 +void IPADDR::set_addr6(struct in6_addr *ip6) +{ + if (saddr->sa_family != AF_INET6) { + Emsg1(M_ERROR_TERM, 0, _("You tried to assign an ipv4 address to an ipv6(%d)\n"), saddr->sa_family); + } + saddr6->sin6_addr = *ip6; +} +#endif + +const char *IPADDR::get_address(char *outputbuf, int outlen) +{ + outputbuf[0] = '\0'; +#ifdef HAVE_INET_NTOP +# ifdef HAVE_IPV6 + inet_ntop(saddr->sa_family, saddr->sa_family == AF_INET ? + (void*)&(saddr4->sin_addr) : (void*)&(saddr6->sin6_addr), + outputbuf, outlen); +# else + inet_ntop(saddr->sa_family, (void*)&(saddr4->sin_addr), outputbuf, outlen); +# endif +#else + bstrncpy(outputbuf, inet_ntoa(saddr4->sin_addr), outlen); +#endif + return outputbuf; +} + +const char *IPADDR::build_address_str(char *buf, int blen) +{ + char tmp[1024]; + if (get_family() == AF_INET) { + bsnprintf(buf, blen, "%s:%hu ", + get_address(tmp, sizeof(tmp) - 1), get_port_host_order()); + } else { + bsnprintf(buf, blen, "[%s]:%hu ", + get_address(tmp, sizeof(tmp) - 1), get_port_host_order()); + } + return buf; +} + +const char *build_addresses_str(dlist *addrs, char *buf, int blen) +{ + if (!addrs || addrs->size() == 0) { + bstrncpy(buf, "", blen); + return buf; + } + char *work = buf; + IPADDR *p; + foreach_dlist(p, addrs) { + char tmp[1024]; + int len = bsnprintf(work, blen, "%s", p->build_address_str(tmp, sizeof(tmp))); + if (len < 0) + break; + work += len; + blen -= len; + } + return buf; +} + +const char *get_first_address(dlist *addrs, char *outputbuf, int outlen) +{ + return ((IPADDR *)(addrs->first()))->get_address(outputbuf, outlen); +} + +int get_first_port_net_order(dlist *addrs) +{ + if (!addrs) { + return 0; + } else { + return ((IPADDR *)(addrs->first()))->get_port_net_order(); + } +} + +int get_first_port_host_order(dlist *addrs) +{ + if (!addrs) { + return 0; + } else { + return ((IPADDR *)(addrs->first()))->get_port_host_order(); + } +} + +void init_default_addresses(dlist **addr_list, int port) +{ + char buf[1024]; + unsigned short sport = port; + if (!add_address(addr_list, IPADDR::R_DEFAULT, htons(sport), AF_INET, 0, 0, buf, sizeof(buf))) { + Emsg1(M_ERROR_TERM, 0, _("Can't add default IPv4 address (%s)\n"), buf); + } + Dmsg1(20, "Initaddr %s\n", build_addresses_str(*addr_list, buf, sizeof(buf))); + +} + +static int add_address(dlist **out, IPADDR::i_type type, unsigned short defaultport, int family, + const char *hostname_str, const char *port_str, char *buf, int buflen) +{ + IPADDR *iaddr; + IPADDR *jaddr; + dlist *hostaddrs; + unsigned short port; + IPADDR::i_type intype = type; + + buf[0] = 0; + dlist *addrs = (dlist *)(*(out)); + if (!addrs) { + IPADDR *tmp = 0; + addrs = *out = New(dlist(tmp, &tmp->link)); + } + + type = (type == IPADDR::R_SINGLE_PORT || + type == IPADDR::R_SINGLE_ADDR) ? IPADDR::R_SINGLE : type; + if (type != IPADDR::R_DEFAULT) { + IPADDR *def = 0; + foreach_dlist(iaddr, addrs) { + if (iaddr->get_type() == IPADDR::R_DEFAULT) { + def = iaddr; + } else if (iaddr->get_type() != type) { + bsnprintf(buf, buflen, + _("Old style addresses cannot be mixed with new style. Try removing Port=nnn.")); + Dmsg1(20, "%s\n", buf); + return 0; + } + } + if (def) { + addrs->remove(def); + delete def; + } + } + + if (!port_str || port_str[0] == '\0') { + port = defaultport; + } else { + int pnum = atol(port_str); + if (0 < pnum && pnum < 0xffff) { + port = htons(pnum); + } else { + struct servent *s = getservbyname(port_str, "tcp"); + if (s) { + port = s->s_port; + } else { + bsnprintf(buf, buflen, _("Cannot resolve service(%s)"), port_str); + Dmsg1(20, "%s\n", buf); + return 0; + } + } + } + + const char *myerrstr; + hostaddrs = bnet_host2ipaddrs(hostname_str, family, &myerrstr); + if (!hostaddrs) { + bsnprintf(buf, buflen, _("Cannot resolve hostname(%s) %s"), hostname_str, + myerrstr); + Dmsg1(20, "%s\n", buf); + return 0; + } + + if (intype == IPADDR::R_SINGLE_PORT || intype == IPADDR::R_SINGLE_ADDR) { + IPADDR *addr; + if (addrs->size()) { + addr = (IPADDR *)addrs->first(); + } else { + addr = New(IPADDR(family)); + addr->set_type(type); + addr->set_port_net(defaultport); + addr->set_addr_any(); + addrs->append(addr); + } + if (intype == IPADDR::R_SINGLE_PORT) { + addr->set_port_net(port); + } + if (intype == IPADDR::R_SINGLE_ADDR) { + addr->copy_addr((IPADDR *)(hostaddrs->first())); + } + } else { + foreach_dlist(iaddr, hostaddrs) { + IPADDR *clone; + /* for duplicates */ + foreach_dlist(jaddr, addrs) { + if (iaddr->get_sockaddr_len() == jaddr->get_sockaddr_len() && + !memcmp(iaddr->get_sockaddr(), jaddr->get_sockaddr(), + iaddr->get_sockaddr_len())) + { + goto skip; /* no price */ + } + } + clone = New(IPADDR(*iaddr)); + clone->set_type(type); + clone->set_port_net(port); + addrs->append(clone); + skip: + continue; + } + } + free_addresses(hostaddrs); + return 1; +} + +/* + * Some IPv6 rules from Wikipedia: + * + * For convenience, an IPv6 address may be abbreviated to shorter + * notations by application of the following rules, where possible. + * + * 1. One or more leading zeroes from any groups of hexadecimal + * digits are removed; this is usually done to either all or none of + * the leading zeroes. For example, the group 0042 is converted to + * 42. + * + * 2. Consecutive sections of zeroes are replaced with a double + * colon (::). The double colon may only be used once in an + * address, as multiple use would render the address indeterminate. + * RFC 5952 recommends that a double colon must not be used to + * denote an omitted single section of zeroes.[39] + * + * my tests + * positiv + * = { ip = { addr = 1.2.3.4; port = 1205; } ipv4 = { addr = 1.2.3.4; port = http; } } + * = { ip = { + * addr = 1.2.3.4; port = 1205; } + * ipv4 = { + * addr = 1.2.3.4; port = http; } + * ipv6 = { + * addr = 1.2.3.4; + * port = 1205; + * } + * ip = { + * addr = 1.2.3.4 + * port = 1205 + * } + * ip = { + * addr = 1.2.3.4 + * } + * ip = { + * addr = 2001:220:222::2 + * } + * ip = { + * addr = bluedot.thun.net + * } + * } + * + * negativ + * = { ip = { } } + * = { ipv4 { addr = doof.nowaytoheavenxyz.uhu; } } + * = { ipv4 { port = 4711 } } + */ +void store_addresses(LEX * lc, RES_ITEM * item, int index, int pass) +{ + int token; + enum { EMPTYLINE = 0, PORTLINE = 0x1, ADDRLINE = 0x2 } next_line = EMPTYLINE; + int exist; + char hostname_str[1024]; + char port_str[128]; + int family = 0; + char errmsg[1024]; + + + token = lex_get_token(lc, T_SKIP_EOL); + if (token != T_BOB) { + scan_err1(lc, _("Expected a block to begin with { but got: %s"), lc->str); + } + + token = lex_get_token(lc, T_SKIP_EOL); + if (token == T_EOB) { + scan_err0(lc, _("Empty addr block is not allowed")); + } + do { + if (!(token == T_UNQUOTED_STRING || token == T_IDENTIFIER)) { + scan_err1(lc, _("Expected a string but got: %s"), lc->str); + } + if (strcasecmp("ip", lc->str) == 0) { + family = AF_INET6; + } else if (strcasecmp("ipv4", lc->str) == 0) { + family = AF_INET; + } +#ifdef HAVE_IPV6 + else if (strcasecmp("ipv6", lc->str) == 0) { + family = AF_INET6; + } else { + scan_err1(lc, _("Expected a string [ip|ipv4|ipv6] but got: %s"), lc->str); + } +#else + else { + scan_err1(lc, _("Expected a string [ip|ipv4] but got: %s"), lc->str); + } +#endif + token = lex_get_token(lc, T_SKIP_EOL); + if (token != T_EQUALS) { + scan_err1(lc, _("Expected an equal = but got: %s"), lc->str); + } + token = lex_get_token(lc, T_SKIP_EOL); + if (token != T_BOB) { + scan_err1(lc, _("Expected a block to begin with { but got: %s"), lc->str); + } + token = lex_get_token(lc, T_SKIP_EOL); + exist = EMPTYLINE; + port_str[0] = hostname_str[0] = '\0'; + do { + if (token != T_IDENTIFIER) { + scan_err1(lc, _("Expected an identifier [addr|port] but got: %s"), lc->str); + } + if (strcasecmp("port", lc->str) == 0) { + next_line = PORTLINE; + if (exist & PORTLINE) { + scan_err0(lc, _("Only one port per address block")); + } + exist |= PORTLINE; + } else if (strcasecmp("addr", lc->str) == 0) { + next_line = ADDRLINE; + if (exist & ADDRLINE) { + scan_err0(lc, _("Only one addr per address block")); + } + exist |= ADDRLINE; + } else { + scan_err1(lc, _("Expected a identifier [addr|port] but got: %s"), lc->str); + } + token = lex_get_token(lc, T_SKIP_EOL); + if (token != T_EQUALS) { + scan_err1(lc, _("Expected a equal =, got: %s"), lc->str); + } + token = lex_get_token(lc, T_SKIP_EOL); + switch (next_line) { + case PORTLINE: + if (! + (token == T_UNQUOTED_STRING || token == T_NUMBER || + token == T_IDENTIFIER)) { + scan_err1(lc, _("Expected a number or a string but got: %s"), lc->str); + } + bstrncpy(port_str, lc->str, sizeof(port_str)); + break; + case ADDRLINE: + if (!(token == T_UNQUOTED_STRING || token == T_IDENTIFIER)) { + scan_err1(lc, _("Expected an IP number or a hostname but got: %s"), + lc->str); + } + bstrncpy(hostname_str, lc->str, sizeof(hostname_str)); + break; + case EMPTYLINE: + scan_err0(lc, _("State machine mismatch")); + break; + } + token = lex_get_token(lc, T_SKIP_EOL); + } while (token == T_IDENTIFIER); + if (token != T_EOB) { + scan_err1(lc, _("Expected a end of block with } but got: %s"), lc->str); + } + + if (pass == 1 && !add_address((dlist **)(item->value), IPADDR::R_MULTIPLE, + htons(item->default_value), family, hostname_str, port_str, + errmsg, sizeof(errmsg))) { + scan_err3(lc, _("Cannot add hostname(%s) and port(%s) to addrlist (%s)"), + hostname_str, port_str, errmsg); + } + token = scan_to_next_not_eol(lc); + } while ((token == T_IDENTIFIER || token == T_UNQUOTED_STRING)); + if (token != T_EOB) { + scan_err1(lc, _("Expected an end of block with } but got: %s"), lc->str); + } +} + +void store_addresses_address(LEX * lc, RES_ITEM * item, int index, int pass) +{ + char errmsg[1024]; + int token = lex_get_token(lc, T_SKIP_EOL); + if (!(token == T_UNQUOTED_STRING || token == T_NUMBER || token == T_IDENTIFIER)) { + scan_err1(lc, _("Expected an IP number or a hostname, got: %s"), lc->str); + } + if (pass == 1 && !add_address((dlist **)(item->value), IPADDR::R_SINGLE_ADDR, + htons(item->default_value), AF_INET, lc->str, 0, + errmsg, sizeof(errmsg))) { + scan_err2(lc, _("Cannot add port (%s) to (%s)"), lc->str, errmsg); + } +} + +void store_addresses_port(LEX * lc, RES_ITEM * item, int index, int pass) +{ + char errmsg[1024]; + int token = lex_get_token(lc, T_SKIP_EOL); + if (!(token == T_UNQUOTED_STRING || token == T_NUMBER || token == T_IDENTIFIER)) { + scan_err1(lc, _("Expected a port number or string, got: %s"), lc->str); + } + if (pass == 1 && !add_address((dlist **)(item->value), IPADDR::R_SINGLE_PORT, + htons(item->default_value), AF_INET, 0, lc->str, + errmsg, sizeof(errmsg))) { + scan_err2(lc, _("Cannot add port (%s) to (%s)"), lc->str, errmsg); + } +} + +void free_addresses(dlist * addrs) +{ + while (!addrs->empty()) { + IPADDR *ptr = (IPADDR*)addrs->first(); + addrs->remove(ptr); + delete ptr; + } + delete addrs; +} + +int sockaddr_get_port_net_order(const struct sockaddr *client_addr) +{ + if (client_addr->sa_family == AF_INET) { + return ((struct sockaddr_in *)client_addr)->sin_port; + } +#ifdef HAVE_IPV6 + else { + return ((struct sockaddr_in6 *)client_addr)->sin6_port; + } +#endif + return -1; +} + +int sockaddr_get_port(const struct sockaddr *client_addr) +{ + if (client_addr->sa_family == AF_INET) { + return ntohs(((struct sockaddr_in *)client_addr)->sin_port); + } +#ifdef HAVE_IPV6 + else { + return ntohs(((struct sockaddr_in6 *)client_addr)->sin6_port); + } +#endif + return -1; +} + + +char *sockaddr_to_ascii(const struct sockaddr *sa, int socklen, char *buf, int buflen) +{ +#ifdef HAVE_GETNAMEINFO + /* This is the more modern way of doing it */ + char clienthost[NI_MAXHOST]; + char clientservice[NI_MAXSERV]; + int status = 1; + if (sa->sa_family == AF_INET) { + status = getnameinfo(sa, sizeof(sockaddr_in), clienthost, sizeof(clienthost), + clientservice, sizeof(clientservice), + NI_NUMERICHOST | NI_NUMERICSERV); + } +#ifdef HAVE_IPV6 + else { + status = getnameinfo(sa, sizeof(sockaddr_in6), clienthost, sizeof(clienthost), + clientservice, sizeof(clientservice), + NI_NUMERICHOST | NI_NUMERICSERV); + } +#endif + if (status == 0) { + /* Enclose IPv6 in [] */ + if (strchr(clienthost, ':') != NULL) { + bsnprintf(buf, buflen, "[%s]", clienthost); + } else { + bstrncpy(buf, clienthost, buflen); + } + } else { + bstrncpy(buf, "Hostname not found", buflen); + } + +#else +#ifdef HAVE_INET_NTOP + inet_ntop(sa->sa_family, +# ifdef HAVE_IPV6 + sa->sa_family == AF_INET ? + (void*)&(((struct sockaddr_in*)sa)->sin_addr) : + (void*)&(((struct sockaddr_in6*)sa)->sin6_addr), +# else + (void*)&(((struct sockaddr_in*)sa)->sin_addr), +# endif /* HAVE_IPV6 */ + buf, buflen); +#else + bstrncpy(buf, inet_ntoa(((struct sockaddr_in *)sa)->sin_addr), buflen); +#endif +#endif + return buf; +} + +/* + * Remove duplicate IP addresses. + */ +void remove_duplicate_addresses(dlist *addr_list) +{ + IPADDR *ipaddr, *next, *duplicate; + /* + * Remove any duplicate addresses. + */ + for (ipaddr = (IPADDR *)addr_list->first(); ipaddr; + ipaddr = (IPADDR *)addr_list->next(ipaddr)) { + for (next = (IPADDR *)addr_list->next(ipaddr); next; ) { + duplicate = NULL; + if (ipaddr->get_sockaddr_len() == next->get_sockaddr_len() && + memcmp(ipaddr->get_sockaddr(), next->get_sockaddr(), + ipaddr->get_sockaddr_len()) == 0) { + duplicate = next; + } + next = (IPADDR *)addr_list->next(next); + if (duplicate) { + addr_list->remove(duplicate); /* remove from list */ + delete duplicate; /* free it */ + } + } + } +} + +#ifdef HAVE_OLD_SOCKOPT +int inet_aton(const char *cp, struct in_addr *inp) +{ + struct in_addr inaddr; + + if((inaddr.s_addr = inet_addr(cp)) != INADDR_NONE) { + inp->s_addr = inaddr.s_addr; + return 1; + } + return 0; +} +#endif diff --git a/src/lib/address_conf.h b/src/lib/address_conf.h new file mode 100644 index 00000000..4c603464 --- /dev/null +++ b/src/lib/address_conf.h @@ -0,0 +1,94 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Written by Meno Abels, June MMIV + * + */ + + +class IPADDR : public SMARTALLOC { + public: + typedef enum { R_SINGLE, R_SINGLE_PORT, R_SINGLE_ADDR, R_MULTIPLE, + R_DEFAULT, R_EMPTY + } i_type; + IPADDR(int af); + IPADDR(const IPADDR & src); + private: + IPADDR() { /* block this construction */ } + i_type type; + union { + struct sockaddr dontuse; + struct sockaddr_in dontuse4; +#ifdef HAVE_IPV6 + struct sockaddr_in6 dontuse6; +#endif + } saddrbuf; + struct sockaddr *saddr; + struct sockaddr_in *saddr4; +#ifdef HAVE_IPV6 + struct sockaddr_in6 *saddr6; +#endif + public: + void set_type(i_type o); + i_type get_type() const; + unsigned short get_port_net_order() const; + unsigned short get_port_host_order() const + { + return ntohs(get_port_net_order()); + } + void set_port_net(unsigned short port); + int get_family() const; + struct sockaddr *get_sockaddr(); + int get_sockaddr_len(); + void copy_addr(IPADDR * src); + void set_addr_any(); + void set_addr4(struct in_addr *ip4); +#ifdef HAVE_IPV6 + void set_addr6(struct in6_addr *ip6); +#endif + const char *get_address(char *outputbuf, int outlen); + + const char *build_address_str(char *buf, int blen); + + /* private */ + dlink link; +}; + +extern void store_addresses(LEX * lc, RES_ITEM * item, int index, int pass); +extern void free_addresses(dlist * addrs); +extern void store_addresses_address(LEX * lc, RES_ITEM * item, int index, int pass); +extern void store_addresses_port(LEX * lc, RES_ITEM * item, int index, int pass); +extern void init_default_addresses(dlist ** addr, int port); + +extern const char *get_first_address(dlist * addrs, char *outputbuf, int outlen); +extern int get_first_port_net_order(dlist * addrs); +extern int get_first_port_host_order(dlist * addrs); + +extern const char *build_addresses_str(dlist *addrs, char *buf, int blen); + +extern int sockaddr_get_port_net_order(const struct sockaddr *sa); +extern int sockaddr_get_port(const struct sockaddr *sa); +extern char *sockaddr_to_ascii(const struct sockaddr *sa, int socklen, char *buf, int buflen); +#ifdef WIN32 +#undef HAVE_OLD_SOCKOPT +#endif +#ifdef HAVE_OLD_SOCKOPT +extern int inet_aton(const char *cp, struct in_addr *inp); +#endif diff --git a/src/lib/alist.c b/src/lib/alist.c new file mode 100644 index 00000000..8d60bd55 --- /dev/null +++ b/src/lib/alist.c @@ -0,0 +1,473 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula array list routines + * + * alist is a simple malloc'ed array of pointers. For the moment, + * it simply malloc's a bigger array controlled by num_grow. + * Default is to realloc the pointer array for each new member. + * + * Note: the list can have holes (empty items). This is done by + * using get() and put(). If you are using this kind of indexed + * list, you cannot use: prepend() and remove() as they will + * reorder the list. So, in the ilist array, these functions are + * disabled and the put method is defined. + * + * Kern Sibbald, June MMIII + * + */ + +#include "bacula.h" + +/* + * Private grow list function. Used to insure that + * at least one more "slot" is available. + */ +void baselist::grow_list() +{ + int i; + int new_max_items; + + /* put() can insert and item anywhere in the list so + it's important to allocate at least last_item+1 items */ + int min_grow = MAX(10, last_item+1); + if (num_grow < min_grow) { + num_grow = min_grow; /* default if not initialized */ + } + + if (items == NULL) { + items = (void **)malloc(num_grow * sizeof(void *)); + for (i=0; i= max_items) { + new_max_items = last_item + num_grow; + items = (void **)realloc(items, new_max_items * sizeof(void *)); + for (i=max_items; i= last_item) { + return NULL; + } else { + return items[cur_item++]; + } +} + +void *alist::prev() +{ + if (cur_item <= 1) { + return NULL; + } else { + return items[--cur_item]; + } +} + +/* + * prepend an item to the list -- i.e. add to beginning + */ +void alist::prepend(void *item) +{ + grow_list(); + if (num_items == 0) { + items[num_items++] = item; + if (num_items > last_item) { + last_item = num_items; + } + return; + } + for (int i=last_item; i > 0; i--) { + items[i] = items[i-1]; + } + items[0] = item; + num_items++; + last_item++; +} + + +/* + * Append an item to the list + */ +void baselist::append(void *item) +{ + grow_list(); + items[last_item++] = item; + num_items++; +} + +/* + * Put an item at a particular index + */ +void ilist::put(int index, void *item) +{ + if (index > last_item) { + last_item = index; + } + grow_list(); + if (items[index] == NULL) { + num_items++; + } + items[index] = item; +} + + +/* + * Remove an item from the list + * Note: you must free the item when + * you are done with it. + */ +void * baselist::remove_item(int index) +{ + void *item; + if (index < 0 || index >= last_item) { + return NULL; + } + item = items[index]; + + /* last_item is from 1..n, we work from 0..n-1 */ + for (int i=index; i < (last_item-1); i++) { + items[i] = items[i+1]; + } + + items[last_item-1] = NULL; /* The last item is shifted by one, the last slot is always free */ + + last_item--; /* We have shifted all items by 1 */ + num_items--; /* We have 1 item less */ + + return item; +} + + +/* Get the index item -- we should probably allow real indexing here */ +void * baselist::get(int index) +{ + if (items == NULL || index < 0 || index > last_item) { + return NULL; + } + return items[index]; +} + +/* Destroy the list and its contents */ +void baselist::destroy() +{ + if (items) { + if (own_items) { + for (int i=0; isize(); i++) { + sprintf(buf, "This is item %d", i); + if (strcmp(buf, (char*)mlist->get(i)) != 0){ + check_cont = false; + } + } + ok(check_cont, "Checking alist contents"); +}; + +void check_all_ilist_contents(ilist *vlist, int start) +{ + bool check_cont = true; + char buf[30]; + int i; + + for (i = start; i< vlist->size(); i++) { + sprintf(buf, "This is item %d", i); + if (strcmp(buf, (char*)vlist->get(i)) != 0){ + check_cont = false; + } + } + ok(check_cont, "Checking ilist contents"); +}; + +void check_all_alist_indexes(alist *mlist) +{ + bool check_cont = true; + char *bp; + int i = 0; + int nb; + + foreach_alist(bp, mlist) { + nb = atoi(bp); + if (nb != i++){ + check_cont = false; + } + } + ok(check_cont, "Check all alist indexes"); +}; + +void check_alist_destroy_and_delete(alist *mlist) +{ + mlist->destroy(); + ok(mlist->size() == 0, "Check alist size after destroy"); + ok(mlist->last() == NULL, "Check alist last after destroy"); + delete mlist; +}; + +void check_ilist_destroy_delete(ilist *vlist) +{ + vlist->destroy(); + ok(vlist->size() == 0, "Check ilist size after destroy"); + delete vlist; +} + +int main() +{ + Unittests alist_test("alist_test"); + FILESET *fileset; + char buf[30]; + alist *mlist; + ilist *vlist; + char *bp; + int i; + bool check_cont; + bool check_indx; + + Pmsg0(0, "Initialize tests ...\n"); + fileset = (FILESET *)malloc(sizeof(FILESET)); + bmemzero(fileset, sizeof(FILESET)); + fileset->mylist.init(); + ok(fileset && fileset->mylist.empty() && fileset->mylist.max_size() == 0, + "Default initialization"); + + Pmsg0(0, "Automatic allocation/destruction of alist:\n"); + + for (int i = 0; i < NUMITEMS; i++) { + sprintf(buf, "This is item %d", i); + fileset->mylist.append(bstrdup(buf)); + } + ok(fileset->mylist.size() == NUMITEMS, "Checking size"); + + check_all_alist_contents(&fileset->mylist); + fileset->mylist.destroy(); + ok(fileset->mylist.size() == 0, "Check size after delete"); + ok(fileset->mylist.last() == NULL, "Check last after delete"); + free(fileset); + + Pmsg0(0, "Allocation/destruction using new delete\n"); + + mlist = New(alist(50)); + ok(mlist && mlist->empty() && mlist->max_size() == 0, + "Constructor initialization"); + for (i = 0; i < NUMITEMS; i++) { + sprintf(buf, "This is item %d", i); + mlist->append(bstrdup(buf)); + } + ok(mlist->size() == NUMITEMS, "Checking size"); + check_all_alist_contents(mlist); + check_alist_destroy_and_delete(mlist); + + Pmsg0(0, "Test alist::remove(0)\n"); + mlist = New(alist(10, owned_by_alist)); + mlist->append(bstrdup("trash")); + mlist->append(bstrdup("0")); + mlist->append(bstrdup("1")); + mlist->append(bstrdup("2")); + mlist->append(bstrdup("3")); + ok(mlist && mlist->size() == 5, "Checking size"); + ok(mlist->last_index() == 5, "Check last_index"); + free(mlist->remove(0)); + ok(mlist->size() == 4, "Remove test size"); + ok(mlist->last_index() == 4, "Check last_index"); + check_all_alist_indexes(mlist); + check_alist_destroy_and_delete(mlist); + + Pmsg0(0, "Test alist::remove(3)\n"); + mlist = New(alist(10, owned_by_alist)); + mlist->append(bstrdup("0")); + mlist->append(bstrdup("1")); + mlist->append(bstrdup("2")); + mlist->append(bstrdup("trash")); + mlist->append(bstrdup("3")); + ok(mlist && mlist->size() == 5, "Checking size"); + ok(mlist->last_index() == 5, "Check last_index"); + free(mlist->remove(3)); + ok(mlist->size() == 4, "Remove test size"); + ok(mlist->last_index() == 4, "Check last_index"); + check_all_alist_indexes(mlist); + check_alist_destroy_and_delete(mlist); + + Pmsg0(0, "Test alist::remove(last)\n"); + mlist = New(alist(10, owned_by_alist)); + mlist->append(bstrdup("0")); + mlist->append(bstrdup("1")); + mlist->append(bstrdup("2")); + mlist->append(bstrdup("3")); + mlist->append(bstrdup("trash")); + ok(mlist && mlist->size() == 5, "Checking size"); + ok(mlist->last_index() == 5, "Check last_index"); + free(mlist->remove(4)); + ok(mlist->size() == 4, "Remove test size"); + check_all_alist_indexes(mlist); + check_alist_destroy_and_delete(mlist); + + Pmsg0(0, "Test alist::remove(last+1)\n"); + mlist = New(alist(10, owned_by_alist)); + mlist->append(bstrdup("0")); + mlist->append(bstrdup("1")); + mlist->append(bstrdup("2")); + mlist->append(bstrdup("3")); + mlist->append(bstrdup("4")); + ok(mlist && mlist->size() == 5, "Checking size"); + ok(mlist->last_index() == 5, "Check last_index"); + ok(mlist->remove(5) == NULL, "Check remove returns null"); + ok(mlist->size() == 5, "Remove test size"); + check_all_alist_indexes(mlist); + check_alist_destroy_and_delete(mlist); + + Pmsg0(0, "Test alist::pop()\n"); + mlist = New(alist(10, owned_by_alist)); + mlist->append(bstrdup("0")); + mlist->append(bstrdup("1")); + mlist->append(bstrdup("2")); + mlist->append(bstrdup("3")); + mlist->append(bstrdup("trash")); + ok(mlist && mlist->size() == 5, "Checking size"); + ok(mlist->last_index() == 5, "Check last_index"); + free(mlist->pop()); + ok(mlist->size() == 4, "Check last_index after pop()"); + check_all_alist_indexes(mlist); + check_alist_destroy_and_delete(mlist); + + Pmsg0(0, "Test ilist::put()\n"); + vlist = New(ilist(10, owned_by_alist)); + sprintf(buf, "This is item 10"); + vlist->put(10, bstrdup(buf)); + ok(vlist && vlist->size() == 1, "Checking size after put()"); + ok(vlist->last_index() == 10, "Check last_index"); + check_ilist_destroy_delete(vlist); + + Pmsg0(0, "Test ilist with multiple put()\n"); + vlist = New(ilist(50, owned_by_alist)); + sprintf(buf, "This is item 10"); + vlist->put(10, bstrdup(buf)); + ok(vlist && vlist->size() == 1, "Checking size after put()"); + ok(vlist->last_index() == 10, "Check last_index"); + sprintf(buf, "This is item 15"); + vlist->put(15, bstrdup(buf)); + ok(vlist->size() == 2, "Checking size after put()"); + ok(vlist->last_index() == 15, "Check last_index"); + for (i = NUMITEMS; i < NUMITEMS + MORENUMITEMS; i++) { + sprintf(buf, "This is item %d", i); + vlist->put(i, bstrdup(buf)); + } + ok(vlist->size() == 2 + MORENUMITEMS, "Checking size after put()"); + ok(vlist->last_index() == NUMITEMS + MORENUMITEMS - 1, "Check last_index"); + /* check contents, first two sparse elements */ + ok(strcmp("This is item 10", (char *)vlist->get(10)) == 0, "Check ilist content at 10"); + ok(strcmp("This is item 15", (char *)vlist->get(15)) == 0, "Check ilist content at 15"); + check_all_ilist_contents(vlist, NUMITEMS); + check_ilist_destroy_delete(vlist); + + Pmsg0(0, "Test alist::push()\n"); + mlist = New(alist(10, owned_by_alist)); + check_cont = true; + check_indx = true; + for (i = 0; i < NUMITEMS; i++) { + sprintf(buf, "This is item %d", i); + mlist->push(bstrdup(buf)); + if (mlist->size() != i + 1){ + check_cont = false; + } + if (mlist->last_index() != i + 1){ + check_indx = false; + } + } + ok(check_cont, "Check all sizes after push"); + ok(check_indx, "Check all last_indexes after push"); + Pmsg0(0, "Test alist::pop()\n"); + check_cont = true; + for (i = NUMITEMS-1; (bp = (char *)mlist->pop()); i--) { + sprintf(buf, "This is item %d", i); + if (strcmp(buf, bp) != 0){ + check_cont = false; + } + free(bp); + } + ok(check_cont, "Check alist content after pop()"); + ok(mlist->size() == 0, "Check alist size after pop()"); + ok(mlist->last_index() == 0, "Check alist last_index after pop()"); + /* check get after pop, it should be NULL */ + check_cont = true; + for (int i=0; imax_size(); i++) { + bp = (char *) mlist->get(i); + if (bp != NULL){ + check_cont = false; + } + } + ok(check_cont, "Check get() after pop() contents."); + check_alist_destroy_and_delete(mlist); + + return report(); +} +#endif diff --git a/src/lib/alist.h b/src/lib/alist.h new file mode 100644 index 00000000..bbe49260 --- /dev/null +++ b/src/lib/alist.h @@ -0,0 +1,172 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, June MMIII + */ + + +extern bool is_null(const void *ptr); + +/* + * There is a lot of extra casting here to work around the fact + * that some compilers (Sun and Visual C++) do not accept + * (void *) as an lvalue on the left side of an equal. + * + * Loop var through each member of list + */ +#ifdef HAVE_TYPEOF +#define foreach_alist(var, list) \ + for((var)=(typeof(var))(list)->first(); (var); (var)=(typeof(var))(list)->next() ) +#else +#define foreach_alist(var, list) \ + for((*((void **)&(var))=(void*)((list)->first())); \ + (var); \ + (*((void **)&(var))=(void*)((list)->next()))) +#endif + +#ifdef HAVE_TYPEOF +#define foreach_alist_index(inx, var, list) \ + for(inx=0; ((var)=(typeof(var))(list)->get(inx)); inx++ ) +#else +#define foreach_alist_index(inx, var, list) \ + for(inx=0; ((*((void **)&(var))=(void*)((list)->get(inx)))); inx++ ) +#endif + + + + +/* Second arg of init */ +enum { + owned_by_alist = true, + not_owned_by_alist = false +}; + +/* + * Array list -- much like a simplified STL vector + * array of pointers to inserted items. baselist is + * the common code between alist and ilist + */ +class baselist : public SMARTALLOC { +protected: + void **items; /* from 0..n-1 */ + int num_items; /* from 1..n */ + int last_item; /* maximum item index (1..n) */ + int max_items; /* maximum possible items (array size) (1..n) */ + int num_grow; + int cur_item; /* from 1..n */ + bool own_items; + void grow_list(void); + void *remove_item(int index); +public: + baselist(int num = 100, bool own=true); + ~baselist(); + void init(int num = 100, bool own=true); + void append(void *item); + void *get(int index); + bool empty() const; + int last_index() const { return last_item; }; + int max_size() const { return max_items; }; + void * operator [](int index) const; + int current() const { return cur_item; }; + int size() const; + void destroy(); + void grow(int num); + + /* Use it as a stack, pushing and poping from the end */ + void push(void *item) { append(item); }; + void *pop() { return remove_item(num_items-1); }; +}; + +class alist: public baselist +{ +public: + alist(int num = 100, bool own=true): baselist(num, own) {}; + void *prev(); + void *next(); + void *last(); + void *first(); + void prepend(void *item); + void *remove(int index) { return remove_item(index);}; +}; + +/* + * Indexed list -- much like a simplified STL vector + * array of pointers to inserted items + */ +class ilist : public baselist { +public: + ilist(int num = 100, bool own=true): baselist(num, own) {}; + /* put() is not compatible with remove(), prepend() or foreach_alist */ + void put(int index, void *item); +}; + +/* + * Define index operator [] + */ +inline void * baselist::operator [](int index) const { + if (index < 0 || index >= max_items) { + return NULL; + } + return items[index]; +} + +inline bool baselist::empty() const +{ + return num_items == 0; +} + +/* + * This allows us to do explicit initialization, + * allowing us to mix C++ classes inside malloc'ed + * C structures. Define before called in constructor. + */ +inline void baselist::init(int num, bool own) +{ + items = NULL; + num_items = 0; + last_item = 0; + max_items = 0; + num_grow = num; + own_items = own; +} + +/* Constructor */ +inline baselist::baselist(int num, bool own) +{ + init(num, own); +} + +/* Destructor */ +inline baselist::~baselist() +{ + destroy(); +} + +/* Current size of list */ +inline int baselist::size() const +{ + if (is_null(this)) return 0; + return num_items; +} + +/* How much to grow by each time */ +inline void baselist::grow(int num) +{ + num_grow = num; +} diff --git a/src/lib/attr.c b/src/lib/attr.c new file mode 100644 index 00000000..497939aa --- /dev/null +++ b/src/lib/attr.c @@ -0,0 +1,301 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * attr.c Unpack an Attribute record returned from the tape + * + * Kern Sibbald, June MMIII (code pulled from filed/restore.c and updated) + * + */ + + +#include "bacula.h" +#include "jcr.h" +#include "lib/breg.h" + +static const int dbglvl = 150; + +ATTR *new_attr(JCR *jcr) +{ + ATTR *attr = (ATTR *)malloc(sizeof(ATTR)); + memset(attr, 0, sizeof(ATTR)); + attr->ofname = get_pool_memory(PM_FNAME); + attr->olname = get_pool_memory(PM_FNAME); + attr->attrEx = get_pool_memory(PM_FNAME); + attr->jcr = jcr; + attr->uid = getuid(); + return attr; +} + +void free_attr(ATTR *attr) +{ + free_pool_memory(attr->olname); + free_pool_memory(attr->ofname); + free_pool_memory(attr->attrEx); + free(attr); +} + +int unpack_attributes_record(JCR *jcr, int32_t stream, char *rec, int32_t reclen, ATTR *attr) +{ + char *p; + int object_len; + /* + * An Attributes record consists of: + * File_index + * Type (FT_types) + * Filename + * Attributes + * Link name (if file linked i.e. FT_LNK) + * Extended attributes (Win32) + * plus optional values determined by AR_ flags in upper bits of Type + * Data_stream + * + */ + attr->stream = stream; + Dmsg1(dbglvl, "Attr: %s\n", rec); + if (sscanf(rec, "%d %d", &attr->file_index, &attr->type) != 2) { + Jmsg(jcr, M_FATAL, 0, _("Error scanning attributes: %s\n"), rec); + Dmsg1(dbglvl, "\nError scanning attributes. %s\n", rec); + return 0; + } + Dmsg2(dbglvl, "Got Attr: FilInx=%d type=%d\n", attr->file_index, attr->type); + /* + * Note AR_DATA_STREAM should never be set since it is encoded + * at the end of the attributes. + */ + if (attr->type & AR_DATA_STREAM) { + attr->data_stream = 1; + } else { + attr->data_stream = 0; + } + attr->type &= FT_MASK; /* keep only type bits */ + p = rec; + while (*p++ != ' ') /* skip record file index */ + { } + while (*p++ != ' ') /* skip type */ + { } + + attr->fname = p; /* set filename position */ + while (*p++ != 0) /* skip filename */ + { } + attr->attr = p; /* set attributes position */ + while (*p++ != 0) /* skip attributes */ + { } + attr->lname = p; /* set link position */ + while (*p++ != 0) /* skip link */ + { } + attr->delta_seq = 0; + if (attr->type == FT_RESTORE_FIRST) { + /* We have an object, so do a binary copy */ + object_len = reclen + rec - p; + attr->attrEx = check_pool_memory_size(attr->attrEx, object_len + 1); + memcpy(attr->attrEx, p, object_len); + /* Add a EOS for those who attempt to print the object */ + p = attr->attrEx + object_len; + *p = 0; + } else { + pm_strcpy(attr->attrEx, p); /* copy extended attributes, if any */ + if (attr->data_stream) { + int64_t val; + while (*p++ != 0) /* skip extended attributes */ + { } + from_base64(&val, p); + attr->data_stream = (int32_t)val; + } else { + while (*p++ != 0) /* skip extended attributes */ + { } + if (p - rec < reclen) { + attr->delta_seq = str_to_int32(p); /* delta_seq */ + } + } + } + Dmsg8(dbglvl, "unpack_attr FI=%d Type=%d fname=%s attr=%s lname=%s attrEx=%s datastr=%d delta_seq=%d\n", + attr->file_index, attr->type, attr->fname, attr->attr, attr->lname, + attr->attrEx, attr->data_stream, attr->delta_seq); + *attr->ofname = 0; + *attr->olname = 0; + return 1; +} + +#if defined(HAVE_WIN32) +static void strip_double_slashes(char *fname) +{ + char *p = fname; + while (p && *p) { + p = strpbrk(p, "/\\"); + if (p != NULL) { + if (IsPathSeparator(p[1])) { + strcpy(p, p+1); + } + p++; + } + } +} +#endif + +/* + * Build attr->ofname from attr->fname and + * attr->olname from attr->olname + */ +void build_attr_output_fnames(JCR *jcr, ATTR *attr) +{ + /* + * Prepend the where directory so that the + * files are put where the user wants. + * + * We do a little jig here to handle Win32 files with + * a drive letter -- we simply change the drive + * from, for example, c: to c/ for + * every filename if a prefix is supplied. + * + */ + + if (jcr->where_bregexp) { + char *ret; + apply_bregexps(attr->fname, &attr->statp, jcr->where_bregexp, &ret); + pm_strcpy(attr->ofname, ret); + + if (attr->type == FT_LNKSAVED || attr->type == FT_LNK) { + /* Always add prefix to hard links (FT_LNKSAVED) and + * on user request to soft links + */ + + if ((attr->type == FT_LNKSAVED || jcr->prefix_links)) { + apply_bregexps(attr->lname, &attr->statp, jcr->where_bregexp, &ret); + pm_strcpy(attr->olname, ret); + + } else { + pm_strcpy(attr->olname, attr->lname); + } + } + + } else if (jcr->where[0] == 0) { + pm_strcpy(attr->ofname, attr->fname); + pm_strcpy(attr->olname, attr->lname); + + } else { + const char *fn; + int wherelen = strlen(jcr->where); + pm_strcpy(attr->ofname, jcr->where); /* copy prefix */ +#if defined(HAVE_WIN32) + if (attr->fname[1] == ':') { + attr->fname[1] = '/'; /* convert : to / */ + } +#endif + fn = attr->fname; /* take whole name */ + /* Ensure where is terminated with a slash */ + if (!IsPathSeparator(jcr->where[wherelen-1]) && !IsPathSeparator(fn[0])) { + pm_strcat(attr->ofname, "/"); + } + pm_strcat(attr->ofname, fn); /* copy rest of name */ + /* + * Fixup link name -- if it is an absolute path + */ + if (attr->type == FT_LNKSAVED || attr->type == FT_LNK) { + bool add_link; + /* Always add prefix to hard links (FT_LNKSAVED) and + * on user request to soft links + */ + if (IsPathSeparator(attr->lname[0]) && + (attr->type == FT_LNKSAVED || jcr->prefix_links)) { + pm_strcpy(attr->olname, jcr->where); + add_link = true; + } else { + attr->olname[0] = 0; + add_link = false; + } + +#if defined(HAVE_WIN32) + if (attr->lname[1] == ':') { + attr->lname[1] = '/'; /* turn : into / */ + } +#endif + fn = attr->lname; /* take whole name */ + /* Ensure where is terminated with a slash */ + if (add_link && + !IsPathSeparator(jcr->where[wherelen-1]) && + !IsPathSeparator(fn[0])) { + pm_strcat(attr->olname, "/"); + } + pm_strcat(attr->olname, fn); /* copy rest of link */ + } + } +#if defined(HAVE_WIN32) + strip_double_slashes(attr->ofname); + strip_double_slashes(attr->olname); +#endif +} + +extern char *getuser(uid_t uid, char *name, int len); +extern char *getgroup(gid_t gid, char *name, int len); + +/* + * Print an ls style message, also send M_RESTORED/M_SAVED + */ +void print_ls_output(JCR *jcr, ATTR *attr, int message_type /* M_RESTORED */) +{ + char buf[5000]; + char ec1[30]; + char en1[30], en2[30]; + char *p, *f; + guid_list *guid; + + /* No need to compute everything if it's not required */ + if (!chk_dbglvl(dbglvl) && !is_message_type_set(jcr, message_type)) { + return; + } + + if (attr->type == FT_DELETED) { /* TODO: change this to get last seen values */ + bsnprintf(buf, sizeof(buf), + "-*DELETED- - - - - ---------- -------- %s\n", attr->ofname); + Dmsg1(dbglvl, "%s", buf); + Jmsg(jcr, message_type, 1, "%s", buf); + return; + } + + if (!jcr->id_list) { + jcr->id_list = new_guid_list(); + } + guid = jcr->id_list; + p = encode_mode(attr->statp.st_mode, buf); + p += sprintf(p, " %2d ", (uint32_t)attr->statp.st_nlink); + p += sprintf(p, "%-8.8s %-8.8s", + guid->uid_to_name(attr->statp.st_uid, en1, sizeof(en1)), + guid->gid_to_name(attr->statp.st_gid, en2, sizeof(en2))); + p += sprintf(p, " %18.18s ", edit_int64(attr->statp.st_size, ec1)); + p = encode_time(attr->statp.st_ctime, p); + *p++ = ' '; + *p++ = ' '; + for (f=attr->ofname; *f && (p-buf) < (int)sizeof(buf)-10; ) { + *p++ = *f++; + } + if (attr->type == FT_LNK) { + *p++ = ' '; + *p++ = '-'; + *p++ = '>'; + *p++ = ' '; + /* Copy link name */ + for (f=attr->olname; *f && (p-buf) < (int)sizeof(buf)-10; ) { + *p++ = *f++; + } + } + *p++ = '\n'; + *p = 0; + Dmsg1(dbglvl, "%s", buf); + Jmsg(jcr, message_type, 1, "%s", buf); +} diff --git a/src/lib/attr.h b/src/lib/attr.h new file mode 100644 index 00000000..6c765546 --- /dev/null +++ b/src/lib/attr.h @@ -0,0 +1,53 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * attr.h Definition of attributes packet for unpacking from tape + * + * Kern Sibbald, June MMIII + * + */ + +#ifndef __ATTR_H_ +#define __ATTR_H_ 1 + + +struct ATTR { + int32_t stream; /* attribute stream id */ + int32_t data_stream; /* id of data stream to follow */ + int32_t type; /* file type FT */ + int32_t file_index; /* file index */ + int32_t LinkFI; /* file index to data if hard link */ + int32_t delta_seq; /* delta sequence numbr */ + uid_t uid; /* userid */ + struct stat statp; /* decoded stat packet */ + POOLMEM *attrEx; /* extended attributes if any */ + POOLMEM *ofname; /* output filename */ + POOLMEM *olname; /* output link name */ + /* + * Note the following three variables point into the + * current BSOCK record, so they are invalid after + * the next socket read! + */ + char *attr; /* attributes position */ + char *fname; /* filename */ + char *lname; /* link name if any */ + JCR *jcr; /* jcr pointer */ +}; + +#endif /* __ATTR_H_ */ diff --git a/src/lib/base64.c b/src/lib/base64.c new file mode 100644 index 00000000..28d0ce10 --- /dev/null +++ b/src/lib/base64.c @@ -0,0 +1,445 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Generic base 64 input and output routines + * + * Written by Kern E. Sibbald, March MM. + */ + + +#include "bacula.h" + + +#ifdef TEST_MODE +#include +#endif + + +static uint8_t const base64_digits[64] = +{ + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', + 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/' +}; + +static int base64_inited = 0; +static uint8_t base64_map[256]; + + +/* Initialize the Base 64 conversion routines */ +void +base64_init(void) +{ + int i; + memset(base64_map, 0, sizeof(base64_map)); + for (i=0; i<64; i++) + base64_map[(uint8_t)base64_digits[i]] = i; + base64_inited = 1; +} + +/* Convert a value to base64 characters. + * The result is stored in where, which + * must be at least 8 characters long. + * + * Returns the number of characters + * stored (not including the EOS). + */ +int +to_base64(int64_t value, char *where) +{ + uint64_t val; + int i = 0; + int n; + + /* Handle negative values */ + if (value < 0) { + where[i++] = '-'; + value = -value; + } + + /* Determine output size */ + val = value; + do { + val >>= 6; + i++; + } while (val); + n = i; + + /* Output characters */ + val = value; + where[i] = 0; + do { + where[--i] = base64_digits[val & (uint64_t)0x3F]; + val >>= 6; + } while (val); + return n; +} + +/* + * Convert the Base 64 characters in where to + * a value. No checking is done on the validity + * of the characters!! + * + * Returns the value. + */ +int +from_base64(int64_t *value, char *where) +{ + uint64_t val = 0; + int i, neg; + + if (!base64_inited) + base64_init(); + /* Check if it is negative */ + i = neg = 0; + if (where[i] == '-') { + i++; + neg = 1; + } + /* Construct value */ + while (where[i] != 0 && where[i] != ' ') { + val <<= 6; + val += base64_map[(uint8_t)where[i++]]; + } + + *value = neg ? -(int64_t)val : (int64_t)val; + return i; +} + + +/* + * Encode binary data in bin of len bytes into + * buf as base64 characters. + * + * If compatible is true, the bin_to_base64 routine will be compatible + * with what the rest of the world uses. + * + * Returns: the number of characters stored not + * including the EOS + */ +int +bin_to_base64(char *buf, int buflen, char *bin, int binlen, int compatible) +{ + uint32_t reg, save, mask; + int rem, i; + int j = 0; + + reg = 0; + rem = 0; + buflen--; /* allow for storing EOS */ + for (i=0; i < binlen; ) { + if (rem < 6) { + reg <<= 8; + if (compatible) { + reg |= (uint8_t)bin[i++]; + } else { + reg |= (int8_t)bin[i++]; + } + rem += 8; + } + save = reg; + reg >>= (rem - 6); + if (j < buflen) { + buf[j++] = base64_digits[reg & 0x3F]; + } + reg = save; + rem -= 6; + } + if (rem && j < buflen) { + mask = (1 << rem) - 1; + if (compatible) { + buf[j++] = base64_digits[(reg & mask) << (6 - rem)]; + } else { + buf[j++] = base64_digits[reg & mask]; + } + } + buf[j] = 0; + return j; +} + +/* + * Decode base64 data in bin of len bytes into + * buf as binary characters. + * + * the base64_to_bin routine is compatible with what the rest of the world + * uses. + * + * 'dest_size' must be big enough! Giving the right size here could fail as + * we consider 'srclen' as an unpadded size, even if 'src' is padded + * we suggest to use dest_size=srclen for easiness or at least + * dest_size=((srclen + 3) / 4) * 3) for optimization lovers + * + * Returns: the number of characters stored not + * including the EOS + */ +int base64_to_bin(char *dest, int dest_size, char *src, int srclen) +{ + int nprbytes; + uint8_t *bufout; + uint8_t *bufplain = (uint8_t*) dest; + const uint8_t *bufin; + + if (!base64_inited) + base64_init(); + + if (dest_size < (((srclen + 3) / 4) * 3)) { + /* dest buffer too small */ + *dest = 0; + return 0; + } + + bufin = (const uint8_t *) src; + while ((*bufin != ' ') && (srclen != 0)) { + bufin++; + srclen--; + } + + nprbytes = bufin - (const uint8_t *) src; + bufin = (const uint8_t *) src; + bufout = (uint8_t *) bufplain; + + while (nprbytes > 4) + { + *(bufout++) = (base64_map[bufin[0]] << 2 | base64_map[bufin[1]] >> 4); + *(bufout++) = (base64_map[bufin[1]] << 4 | base64_map[bufin[2]] >> 2); + *(bufout++) = (base64_map[bufin[2]] << 6 | base64_map[bufin[3]]); + bufin += 4; + nprbytes -= 4; + } + + /* Bacula base64 strings are not always padded with = */ + if (nprbytes > 1) { + *(bufout++) = (base64_map[bufin[0]] << 2 | base64_map[bufin[1]] >> 4); + } + if (nprbytes > 2) { + *(bufout++) = (base64_map[bufin[1]] << 4 | base64_map[bufin[2]] >> 2); + } + if (nprbytes > 3) { + *(bufout++) = (base64_map[bufin[2]] << 6 | base64_map[bufin[3]]); + } + *bufout = 0; + + return (bufout - (uint8_t *) dest); +} + +#ifdef BIN_TEST +int main(int argc, char *argv[]) +{ + int len; + char buf[100]; + char junk[100]; + int i; + +#ifdef xxxx + int xx = 0; + for (i=0; i < 1000; i++) { + bin_to_base64(buf, sizeof(buf), (char *)&xx, 4, true); + printf("xx=%s\n", buf); + xx++; + } +#endif + junk[0] = 0xFF; + for (i=1; i<100; i++) { + junk[i] = junk[i-1]-1; + } + len = bin_to_base64(buf, sizeof(buf), junk, 16, true); + printf("len=%d junk=%s\n", len, buf); + + strcpy(junk, "This is a sample stringa"); + len = bin_to_base64(buf, sizeof(buf), junk, strlen(junk), true); + buf[len] = 0; + base64_to_bin(junk, sizeof(junk), buf, len); + printf("buf=<%s>\n", junk); + return 0; +} +#endif + +#ifdef TEST_MODE +static int errfunc(const char *epath, int eernoo) +{ + printf("in errfunc\n"); + return 1; +} + + +/* + * Test the base64 routines by encoding and decoding + * lstat() packets. + */ +int main(int argc, char *argv[]) +{ + char where[500]; + int i; + glob_t my_glob; + char *fname; + struct stat statp; + struct stat statn; + int debug_level = 0; + char *p; + int32_t j; + time_t t = 1028712799; + + if (argc > 1 && strcmp(argv[1], "-v") == 0) + debug_level++; + + base64_init(); + + my_glob.gl_offs = 0; + glob("/etc/grub.conf", GLOB_MARK, errfunc, &my_glob); + + for (i=0; my_glob.gl_pathv[i]; i++) { + fname = my_glob.gl_pathv[i]; + if (lstat(fname, &statp) < 0) { + berrno be; + printf("Cannot stat %s: %s\n", fname, be.bstrerror(errno)); + continue; + } + encode_stat(where, &statp, sizeof(statp), 0, 0); + + printf("Encoded stat=%s\n", where); + +#ifdef xxx + p = where; + p += to_base64((int64_t)(statp.st_atime), p); + *p++ = ' '; + p += to_base64((int64_t)t, p); + printf("%s %s\n", fname, where); + + printf("%s %lld\n", "st_dev", (int64_t)statp.st_dev); + printf("%s %lld\n", "st_ino", (int64_t)statp.st_ino); + printf("%s %lld\n", "st_mode", (int64_t)statp.st_mode); + printf("%s %lld\n", "st_nlink", (int64_t)statp.st_nlink); + printf("%s %lld\n", "st_uid", (int64_t)statp.st_uid); + printf("%s %lld\n", "st_gid", (int64_t)statp.st_gid); + printf("%s %lld\n", "st_rdev", (int64_t)statp.st_rdev); + printf("%s %lld\n", "st_size", (int64_t)statp.st_size); + printf("%s %lld\n", "st_blksize", (int64_t)statp.st_blksize); + printf("%s %lld\n", "st_blocks", (int64_t)statp.st_blocks); + printf("%s %lld\n", "st_atime", (int64_t)statp.st_atime); + printf("%s %lld\n", "st_mtime", (int64_t)statp.st_mtime); + printf("%s %lld\n", "st_ctime", (int64_t)statp.st_ctime); +#endif + + if (debug_level) + printf("%s: len=%d val=%s\n", fname, strlen(where), where); + + decode_stat(where, &statn, sizeof(statn), &j); + + if (statp.st_dev != statn.st_dev || + statp.st_ino != statn.st_ino || + statp.st_mode != statn.st_mode || + statp.st_nlink != statn.st_nlink || + statp.st_uid != statn.st_uid || + statp.st_gid != statn.st_gid || + statp.st_rdev != statn.st_rdev || + statp.st_size != statn.st_size || + statp.st_blksize != statn.st_blksize || + statp.st_blocks != statn.st_blocks || + statp.st_atime != statn.st_atime || + statp.st_mtime != statn.st_mtime || + statp.st_ctime != statn.st_ctime) { + + printf("%s: %s\n", fname, where); + encode_stat(where, &statn, sizeof(statn), 0, 0); + printf("%s: %s\n", fname, where); + printf("NOT EQAL\n"); + } + + } + globfree(&my_glob); + + printf("%d files examined\n", i); + + to_base64(UINT32_MAX, where); + printf("UINT32_MAX=%s\n", where); + + return 0; +} +#endif + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +#ifdef TEST_PROGRAM +#include "unittests.h" + +static const unsigned char rnddata[16] = { + 0xa5, 0x7d, 0xa3, 0xc4, 0x2c, 0xa0, 0x08, 0xe9, 0x32, 0xb9, 0xc7, 0x84, 0xf6, 0xd3, 0xdf, 0x4f +}; +static const char *resb16 = "pX2jxCygCOkyuceE9tPfTw"; +static const char *resb8 = "q83v7c"; +#define VARREF 0xABCDEFEDC + +int main() +{ + Unittests base64_test("base64_test"); + char buf[30]; + char binbuf[30]; + uint len; + bool check_cont; + int64_t var; + + base64_init(); +/* + for (int a=0; a < 16; a++){ + fprintf(stderr, "%c", rnddata[a]); + } +*/ + /* encode reference binary data to base64 */ + len = bin_to_base64(buf, 30, (char*)rnddata, 16, true); + ok(len == strlen(resb16), "Checking bin_to_base64 encoded length"); + ok(strcmp(resb16, buf) == 0, "Checking bin_to_base64 encoded data"); + /* decode reference base64 data to bin*/ + len = base64_to_bin(binbuf, 30, (char*)resb16, strlen(resb16)); + ok(len == 16, "Checking base64_to_bin decoded length"); + check_cont = true; + for (uint a = 0; a < len; a++){ + if ((unsigned char)binbuf[a] != rnddata[a]){ + check_cont = false; + } + } + ok(check_cont, "Checking base64_to_bin decoded data"); + /* decode the encoded base64 data to bin */ + len = base64_to_bin(binbuf, 30, buf, strlen(buf)); + ok(len == 16, "Checking base64_to_bin decoded length - encoded"); + check_cont = true; + for (uint a = 0; a < len; a++){ + if ((unsigned char)binbuf[a] != rnddata[a]){ + check_cont = false; + } + } + ok(check_cont, "Checking base64_to_bin decoded data - encoded"); + /* encode reference variable to base64 */ + len = to_base64(VARREF, buf); + ok(len == 6, "Checking to_base64 encode length"); + ok(strcmp(resb8, buf) == 0, "Checking to_base64 encoded data"); + /* decode reference data to bin */ + len = from_base64(&var, (char*)resb8); + ok(var == VARREF, "Checking from_base64 decoded data"); + ok(len == 6, "Checking from_base64 decoded length"); + /* decode encoded data to bin */ + len = from_base64(&var, buf); + ok(var == VARREF, "Checking from_base64 decoded data - encoded"); + ok(len == 6, "Checking from_base64 decoded length - encoded"); + return report(); +}; +#endif /* TEST_PROGRAM */ diff --git a/src/lib/base64.h b/src/lib/base64.h new file mode 100644 index 00000000..ff48a715 --- /dev/null +++ b/src/lib/base64.h @@ -0,0 +1,28 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Generic base 64 input and output routines + * + * Written by Kern E. Sibbald, March MM. + */ + +/* Maximum size of len bytes after base64 encoding */ +#define BASE64_SIZE(len) ((4 * len + 2) / 3 + 1) + +// #define BASE64_SIZE(len) (((len + 3 - (len % 3)) / 3) * 4) diff --git a/src/lib/berrno.c b/src/lib/berrno.c new file mode 100644 index 00000000..747337db --- /dev/null +++ b/src/lib/berrno.c @@ -0,0 +1,105 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula errno handler + * + * berrno is a simplistic errno handler that works for + * Unix, Win32, and Bacula bpipes. + * + * See berrno.h for how to use berrno. + * + * Kern Sibbald, July MMIV + * + * + */ + +#include "bacula.h" + +#ifndef HAVE_WIN32 +extern const char *get_signal_name(int sig); +extern int num_execvp_errors; +extern int execvp_errors[]; +#endif + +const char *berrno::bstrerror() +{ + *m_buf = 0; +#ifdef HAVE_WIN32 + if (m_berrno & (b_errno_win32 | b_errno_WSA)) { + format_win32_message(); + return (const char *)m_buf; + } +#else + int stat = 0; + + if (m_berrno & b_errno_exit) { + stat = (m_berrno & ~b_errno_exit); /* remove bit */ + if (stat == 0) { + return _("Child exited normally."); /* this really shouldn't happen */ + } else { + /* Maybe an execvp failure */ + if (stat >= 200) { + if (stat < 200 + num_execvp_errors) { + m_berrno = execvp_errors[stat - 200]; + } else { + return _("Unknown error during program execvp"); + } + } else { + Mmsg(m_buf, _("Child exited with code %d"), stat); + return m_buf; + } + /* If we drop out here, m_berrno is set to an execvp errno */ + } + } + if (m_berrno & b_errno_signal) { + stat = (m_berrno & ~b_errno_signal); /* remove bit */ + Mmsg(m_buf, _("Child died from signal %d: %s"), stat, get_signal_name(stat)); + return m_buf; + } +#endif + /* Normal errno */ + if (b_strerror(m_berrno, m_buf, sizeof_pool_memory(m_buf)) < 0) { + return _("Invalid errno. No error message possible."); + } + return m_buf; +} + +void berrno::format_win32_message() +{ +#ifdef HAVE_WIN32 + LPVOID msg; + FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + m_berrno & b_errno_WSA ? WSAGetLastError() : GetLastError(), + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPTSTR)&msg, + 0, + NULL); + pm_strcpy(&m_buf, (const char *)msg); + LocalFree(msg); +#endif +} + +#ifdef TEST_PROGRAM + +int main() +{ +} +#endif diff --git a/src/lib/berrno.h b/src/lib/berrno.h new file mode 100644 index 00000000..e5c4b709 --- /dev/null +++ b/src/lib/berrno.h @@ -0,0 +1,88 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Kern Sibbald, July MMIV + * + */ + +/* + * Extra bits set to interpret errno value differently from errno + */ +#ifdef HAVE_WIN32 +#define b_errno_win32 (1<<29) /* user reserved bit */ +#define b_errno_WSA (1<<26) +#else +#define b_errno_win32 0 /* On Unix/Linix system */ +#endif +#define b_errno_exit (1<<28) /* child exited, exit code returned */ +#define b_errno_signal (1<<27) /* child died, signal code returned */ + +/* + * A more generalized way of handling errno that works with Unix, Windows, + * and with Bacula bpipes. + * + * It works by picking up errno and creating a memory pool buffer + * for editing the message. strerror() does the actual editing, and + * it is thread safe. + * + * If bit 29 in m_berrno is set then it is a Win32 error, and we + * must do a GetLastError() to get the error code for formatting. + * If bit 29 in m_berrno is not set, then it is a Unix errno. + * + */ +class berrno : public SMARTALLOC { + POOLMEM *m_buf; + int m_berrno; + void format_win32_message(); +public: + berrno(int pool=PM_EMSG); + ~berrno(); + const char *bstrerror(); + const char *bstrerror(int errnum); + void set_errno(int errnum); + int code() { return m_berrno & ~(b_errno_exit|b_errno_signal); } + int code(int stat) { return stat & ~(b_errno_exit|b_errno_signal); } +}; + +/* Constructor */ +inline berrno::berrno(int pool) +{ + m_berrno = errno; + m_buf = get_pool_memory(pool); + *m_buf = 0; + errno = m_berrno; +} + +inline berrno::~berrno() +{ + free_pool_memory(m_buf); +} + +inline const char *berrno::bstrerror(int errnum) +{ + m_berrno = errnum; + return berrno::bstrerror(); +} + + +inline void berrno::set_errno(int errnum) +{ + m_berrno = errnum; +} diff --git a/src/lib/bget_msg.c b/src/lib/bget_msg.c new file mode 100644 index 00000000..75385164 --- /dev/null +++ b/src/lib/bget_msg.c @@ -0,0 +1,168 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Subroutines to receive network data and handle + * network signals for the FD and the SD. + * + * Kern Sibbald, May MMI previously in src/stored/fdmsg.c + * + */ + +#include "bacula.h" +#include "jcr.h" + +static char OK_msg[] = "2000 OK\n"; +static char TERM_msg[] = "2999 Terminate\n"; + +#define msglvl 500 + +/* + * This routine does a bnet_recv(), then if a signal was + * sent, it handles it. The return codes are the same as + * bne_recv() except the BNET_SIGNAL messages that can + * be handled are done so without returning. + * + * Returns number of bytes read (may return zero) + * Returns -1 on signal (BNET_SIGNAL) + * Returns -2 on hard end of file (BNET_HARDEOF) + * Returns -3 on error (BNET_ERROR) + * Returns -4 on Command (BNET_COMMAND) + */ +int bget_msg(BSOCK *sock) +{ + int n; + for ( ;; ) { + n = sock->recv(); + if (n >= 0) { /* normal return */ + return n; + } + if (sock->is_stop()) { /* error return */ + return n; + } + if (n == BNET_COMMAND) { + return n; + } + + /* BNET_SIGNAL (-1) return from bnet_recv() => network signal */ + switch (sock->msglen) { + case BNET_EOD: /* end of data */ + Dmsg0(msglvl, "Got BNET_EOD\n"); + return n; + case BNET_EOD_POLL: + Dmsg0(msglvl, "Got BNET_EOD_POLL\n"); + if (sock->is_terminated()) { + sock->fsend(TERM_msg); + } else { + sock->fsend(OK_msg); /* send response */ + } + return n; /* end of data */ + case BNET_TERMINATE: + Dmsg0(msglvl, "Got BNET_TERMINATE\n"); + sock->set_terminated(); + return n; + case BNET_POLL: + Dmsg0(msglvl, "Got BNET_POLL\n"); + if (sock->is_terminated()) { + sock->fsend(TERM_msg); + } else { + sock->fsend(OK_msg); /* send response */ + } + break; + case BNET_HEARTBEAT: + case BNET_HB_RESPONSE: + break; + case BNET_STATUS: + /* *****FIXME***** Implement BNET_STATUS */ + Dmsg0(msglvl, "Got BNET_STATUS\n"); + sock->fsend(_("Status OK\n")); + sock->signal(BNET_EOD); + break; + default: + Emsg1(M_ERROR, 0, _("bget_msg: unknown signal %d\n"), sock->msglen); + break; + } + } +} + +bmessage::bmessage(int bufsize) +{ + msg = get_pool_memory(PM_BSOCK); + msg = realloc_pool_memory(msg, bufsize); + status = bmessage::bm_busy; + jobbytes = 0; +} + +bmessage::~bmessage() +{ + free_pool_memory(msg); +} + +void bmessage::swap(BSOCK *sock) +{ + POOLMEM *swap = sock->msg; + sock->msg = msg; + msg = swap; +} + +GetMsg::GetMsg(JCR *a_jcr, BSOCK *a_bsock, const char *a_rec_header, int32_t a_bufsize): + jcr(a_jcr), + bsock(a_bsock), + rec_header(a_rec_header), + bufsize(a_bufsize), + m_is_stop(false), + m_is_done(false), + m_is_error(false), + m_use_count(1) +{ + jcr->inc_use_count(); /* We own a copy of the JCR */ + bmsg_aux = New(bmessage(bufsize)); + bmsg = bmsg_aux; + pthread_mutex_init(&mutex, 0); + pthread_cond_init(&cond, NULL); +}; + +GetMsg::~GetMsg() +{ + free_jcr(jcr); /* Release our copy of the JCR */ + delete bmsg_aux; + pthread_mutex_destroy(&mutex); + pthread_cond_destroy(&cond); +}; + +int GetMsg::bget_msg(bmessage **pbmsg) +{ + // Get our own local copy of the socket + + if (pbmsg == NULL) { + pbmsg = &bmsg_aux; + } + bmessage *bmsg = *pbmsg; + bmsg->ret = ::bget_msg(bsock); + bmsg->status = bmessage::bm_ready; + bmsg->rbuflen = bmsg->msglen = bmsg->origlen = bsock->msglen; +/* bmsg->is_header = !bmsg->is_header; ALAIN SAYS: I think this line is useless */ + /* swap msg instead of copying */ + bmsg->swap(bsock); + bmsg->rbuf = bmsg->msg; + + msglen = bmsg->msglen; + msg = bmsg->msg; + m_is_stop = bsock->is_stop() || bsock->is_error(); + return bmsg->ret; +} diff --git a/src/lib/bget_msg.h b/src/lib/bget_msg.h new file mode 100644 index 00000000..e8b3458b --- /dev/null +++ b/src/lib/bget_msg.h @@ -0,0 +1,110 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef __BGET_MSG_H_ +#define __BGET_MSG_H_ + +#include "bacula.h" + +typedef uint64_t blockaddr; +typedef int64_t blockidx; + +#ifdef COMMUNITY +#define BLOCK_HEAD_SIZE 4 // only one int +#define GETMSG_MAX_BLOCK_SIZE (65*1024-BLOCK_HEAD_SIZE) +#define GETMSG_MAX_HASH_SIZE 64 /* SHA512 */ +#define GETMSG_MAX_MSG_SIZE (GETMSG_MAX_BLOCK_SIZE+GETMSG_MAX_HASH_SIZE+sizeof(uint32_t)+OFFSET_FADDR_SIZE+100) +#endif + + +class bmessage: public SMARTALLOC +{ +public: + enum { bm_none, bm_ready, bm_busy, bm_ref }; + + POOLMEM *msg; // exchanged with BSOCK + int32_t msglen; // length from BSOCK + int32_t origlen; // length before rehydration, to be compared with length in header + char *rbuf; // adjusted to point to data inside *msg + int32_t rbuflen; // adjusted from msglen + int status; + int ret; // return value from bget_msg() + int jobbytes; // must be added to jcr->JobBytes if block is downloaded + + bmessage(int bufsize); + virtual ~bmessage(); + void swap(BSOCK *sock); +}; + +class GetMsg: public SMARTALLOC +{ +public: + JCR *jcr; + BSOCK *bsock; + const char *rec_header; /* Format of a header */ + int32_t bufsize; /* "ideal" bufsize from JCR */ + + bool m_is_stop; /* set by the read thread when bsock->is_stop() */ + bool m_is_done; /* set when the read thread finish (no more record will be pushed) */ + bool m_is_error; /* set when the read thread get an error */ + + int32_t m_use_count; + + pthread_mutex_t mutex; + pthread_cond_t cond; + bmessage *bmsg_aux; + bmessage *bmsg; // local bmsg used by bget_msg(NULL) + int32_t msglen; // used to mimic BSOCK, updated by bget_msg() + POOLMEM *msg; // used to mimic BSOCK, updated by bget_msg() + + void inc_use_count(void) {P(mutex); m_use_count++; V(mutex); }; + void dec_use_count(void) {P(mutex); m_use_count--; V(mutex); }; + int32_t use_count() { int32_t v; P(mutex); v = m_use_count; V(mutex); return v;}; + + + GetMsg(JCR *a_jcr, BSOCK *a_bsock, const char *a_rec_header, int32_t a_bufsize); + virtual ~GetMsg(); + + virtual int bget_msg(bmessage **pbmsg=NULL); + inline virtual void *do_read_sock_thread(void) { return NULL; }; + inline virtual int start_read_sock() { return 0; }; + inline virtual void *wait_read_sock(int /*emergency_quit*/) { return NULL;}; + + virtual bool is_stop() { return (m_is_stop!=false); }; + virtual bool is_done() { return (m_is_done!=false); }; + virtual bool is_error(){ return (m_is_error!=false); }; + + bmessage *new_msg() { return New(bmessage(bufsize)); }; + +}; + +/* Call this function to release the memory associated with the message queue + * The reading thread is using the BufferedMsgBase to work, so we need to free + * the memory only when the main thread and the reading thread agree + */ +inline void free_GetMsg(GetMsg *b) +{ + b->dec_use_count(); + ASSERT2(b->use_count() >= 0, "GetMsg use_count too low"); + if (b->use_count() == 0) { + delete b; + } +} + +#endif diff --git a/src/lib/binflate.c b/src/lib/binflate.c new file mode 100644 index 00000000..dcaa844a --- /dev/null +++ b/src/lib/binflate.c @@ -0,0 +1,103 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula zlib compression wrappers + * + */ + +#include "bacula.h" +#ifdef HAVE_LIBZ +#include +#endif + +/* + * Deflate or compress and input buffer. You must supply an + * output buffer sufficiently long and the length of the + * output buffer. Generally, if the output buffer is the + * same size as the input buffer, it should work (at least + * for text). + */ +int Zdeflate(char *in, int in_len, char *out, int &out_len) +{ +#ifdef HAVE_LIBZ + z_stream strm; + int ret; + + /* allocate deflate state */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + ret = deflateInit(&strm, 9); + if (ret != Z_OK) { + Dmsg0(200, "deflateInit error\n"); + (void)deflateEnd(&strm); + return ret; + } + + strm.next_in = (Bytef *)in; + strm.avail_in = in_len; + Dmsg1(200, "In: %d bytes\n", strm.avail_in); + strm.avail_out = out_len; + strm.next_out = (Bytef *)out; + ret = deflate(&strm, Z_FINISH); + out_len = out_len - strm.avail_out; + Dmsg1(200, "compressed=%d\n", out_len); + (void)deflateEnd(&strm); + return ret; +#else + return 1; +#endif +} + +/* + * Inflate or uncompress an input buffer. You must supply + * and output buffer and an output length sufficiently long + * or there will be an error. This uncompresses in one call. + */ +int Zinflate(char *in, int in_len, char *out, int &out_len) +{ +#ifdef HAVE_LIBZ + z_stream strm; + int ret; + + /* allocate deflate state */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + strm.next_in = (Bytef *)in; + strm.avail_in = in_len; + ret = inflateInit(&strm); + if (ret != Z_OK) { + Dmsg0(200, "inflateInit error\n"); + (void)inflateEnd(&strm); + return ret; + } + + Dmsg1(200, "In len: %d bytes\n", strm.avail_in); + strm.avail_out = out_len; + strm.next_out = (Bytef *)out; + ret = inflate(&strm, Z_FINISH); + out_len -= strm.avail_out; + Dmsg1(200, "Uncompressed=%d\n", out_len); + (void)inflateEnd(&strm); + return ret; +#else + return 1; +#endif +} diff --git a/src/lib/bits.h b/src/lib/bits.h new file mode 100644 index 00000000..43a132b8 --- /dev/null +++ b/src/lib/bits.h @@ -0,0 +1,59 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* Some elementary bit manipulations + * + * Kern Sibbald, MM + * + * NOTE: base 0 + * + */ + +#ifndef __BITS_H_ +#define __BITS_H_ + +/* number of bytes to hold n bits */ +#define nbytes_for_bits(n) ((((n)-1)>>3)+1) + +/* test if bit is set */ +#define bit_is_set(b, var) (((var)[(b)>>3] & (1<<((b)&0x7))) != 0) + +/* set bit */ +#define set_bit(b, var) ((var)[(b)>>3] |= (1<<((b)&0x7))) + +/* clear bit */ +#define clear_bit(b, var) ((var)[(b)>>3] &= ~(1<<((b)&0x7))) + +/* clear all bits */ +#define clear_all_bits(b, var) memset(var, 0, nbytes_for_bits(b)) + +/* set range of bits */ +#define set_bits(f, l, var) { \ + int i; \ + for (i=f; i<=l; i++) \ + set_bit(i, var); \ +} + +/* clear range of bits */ +#define clear_bits(f, l, var) { \ + int i; \ + for (i=f; i<=l; i++) \ + clear_bit(i, var); \ +} + +#endif /* __BITS_H_ */ diff --git a/src/lib/bjson.c b/src/lib/bjson.c new file mode 100644 index 00000000..7ea1d36b --- /dev/null +++ b/src/lib/bjson.c @@ -0,0 +1,408 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Json library routines + * + * Kern Sibbald, September MMXII + * + */ + +#include "bacula.h" +#include "lib/breg.h" + +extern s_kw msg_types[]; +extern RES_TABLE resources[]; + +union URES { + MSGS res_msgs; + RES hdr; +}; + +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + extern URES res_all; +} +#else +extern URES res_all; +#endif + +struct display_filter +{ + /* default { { "Director": { "Name": aa, ...} }, { "Job": {..} */ + bool do_list; /* [ {}, {}, ..] or { "aa": {}, "bb": {}, ...} */ + bool do_one; /* { "Name": "aa", "Description": "test, ... } */ + bool do_only_data; /* [ {}, {}, {}, ] */ + char *resource_type; + char *resource_name; + regex_t directive_reg; +}; + +static void sendit(void *sock, const char *fmt, ...) +{ + char buf[3000]; + va_list arg_ptr; + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); + va_end(arg_ptr); + fputs(buf, stdout); + fflush(stdout); +} + +void init_hpkt(HPKT &hpkt) +{ + memset(&hpkt, 0, sizeof(hpkt)); + hpkt.edbuf = get_pool_memory(PM_EMSG); + hpkt.edbuf2 = get_pool_memory(PM_EMSG); + hpkt.json = true; + hpkt.hfunc = HF_DISPLAY; + hpkt.sendit = sendit; +} + +void term_hpkt(HPKT &hpkt) +{ + free_pool_memory(hpkt.edbuf); + free_pool_memory(hpkt.edbuf2); + memset(&hpkt, 0, sizeof(hpkt)); +} + +/* + * Strip long options out of fo->opts string so that + * they will not give us false matches for regular + * 1 or 2 character options. + */ +void strip_long_opts(char *out, const char *in) +{ + const char *p; + for (p=in; *p; p++) { + switch (*p) { + /* V, C, J, and P are long options, skip them */ + case 'V': + case 'C': + case 'J': + case 'P': + while (*p != ':') { + p++; /* skip to after : */ + } + break; + /* Copy everything else */ + default: + *out++ = *p; + break; + } + } + *out = 0; /* terminate string */ +} + +void edit_alist(HPKT &hpkt) +{ + bool f = true; + char *citem; + + pm_strcpy(hpkt.edbuf, " ["); + foreach_alist(citem, hpkt.list) { + if (!f) { + pm_strcat(hpkt.edbuf, ", "); + } + pm_strcat(hpkt.edbuf, quote_string(hpkt.edbuf2, citem)); + f = false; + } + pm_strcat(hpkt.edbuf, "]"); +} + +void edit_msg_types(HPKT &hpkt, DEST *dest) +{ + int i, j, count = 0; + bool first_type = true; + bool found; + + pm_strcpy(hpkt.edbuf, "["); + for (i=1; imsg_types)) { + found = false; + if (!first_type) pm_strcat(hpkt.edbuf, ","); + first_type = false; + for (j=0; msg_types[j].name; j++) { + if ((int)msg_types[j].token == i) { + pm_strcat(hpkt.edbuf, "\""); + pm_strcat(hpkt.edbuf, msg_types[j].name); + pm_strcat(hpkt.edbuf, "\""); + found = true; + break; + } + } + if (!found) { + sendit(NULL, "No find for type=%d\n", i); + } + count++; + } + } + /* + * Note, if we have more than half of the total items, + * redo using All and !item, which will give fewer items + * total. + */ + if (count > M_MAX/2) { + pm_strcpy(hpkt.edbuf, "[\"All\""); + for (i=1; imsg_types)) { + found = false; + pm_strcat(hpkt.edbuf, ","); + for (j=0; msg_types[j].name; j++) { + if ((int)msg_types[j].token == i) { + pm_strcat(hpkt.edbuf, "\"!"); + pm_strcat(hpkt.edbuf, msg_types[j].name); + pm_strcat(hpkt.edbuf, "\""); + found = true; + break; + } + } + if (!found) { + sendit(NULL, "No find for type=%d in second loop\n", i); + } + } else if (i == M_SAVED) { + /* Saved is not set by default, users must explicitly use it + * on the configuration line + */ + pm_strcat(hpkt.edbuf, ",\"Saved\""); + } + } + } + pm_strcat(hpkt.edbuf, "]"); +} + +bool display_global_item(HPKT &hpkt) +{ + bool found = true; + + if (hpkt.ritem->handler == store_res) { + display_res(hpkt); + } else if (hpkt.ritem->handler == store_str || + hpkt.ritem->handler == store_name || + hpkt.ritem->handler == store_password || + hpkt.ritem->handler == store_strname || + hpkt.ritem->handler == store_dir) { + display_string_pair(hpkt); + } else if (hpkt.ritem->handler == store_int32 || + hpkt.ritem->handler == store_pint32 || + hpkt.ritem->handler == store_size32) { + display_int32_pair(hpkt); + } else if (hpkt.ritem->handler == store_size64 || + hpkt.ritem->handler == store_int64 || + hpkt.ritem->handler == store_time || + hpkt.ritem->handler == store_speed) { + display_int64_pair(hpkt); + } else if (hpkt.ritem->handler == store_bool) { + display_bool_pair(hpkt); + } else if (hpkt.ritem->handler == store_msgs) { + display_msgs(hpkt); + } else if (hpkt.ritem->handler == store_bit) { + display_bit_pair(hpkt); + } else if (hpkt.ritem->handler == store_alist_res) { + found = display_alist_res(hpkt); /* In some cases, the list is null... */ + } else if (hpkt.ritem->handler == store_alist_str) { + found = display_alist_str(hpkt); /* In some cases, the list is null... */ + } else { + found = false; + } + + return found; +} + +/* + * Called here for each store_msgs resource + */ +void display_msgs(HPKT &hpkt) +{ + MSGS *msgs = (MSGS *)hpkt.ritem->value; /* Message res */ + DEST *dest; /* destination chain */ + int first = true; + + if (!hpkt.in_store_msg) { + hpkt.in_store_msg = true; + sendit(NULL, "\n \"Destinations\": ["); + } + for (dest=msgs->dest_chain; dest; dest=dest->next) { + if (dest->dest_code == hpkt.ritem->code) { + if (!first) sendit(NULL, ","); + first = false; + edit_msg_types(hpkt, dest); + switch (hpkt.ritem->code) { + /* Output only message types */ + case MD_STDOUT: + case MD_STDERR: + case MD_SYSLOG: + case MD_CONSOLE: + case MD_CATALOG: + sendit(NULL, "\n {\n \"Type\": \"%s\"," + "\n \"MsgTypes\": %s\n }", + hpkt.ritem->name, hpkt.edbuf); + break; + /* Output MsgTypes, Where */ + case MD_DIRECTOR: + case MD_FILE: + case MD_APPEND: + sendit(NULL, "\n {\n \"Type\": \"%s\"," + "\n \"MsgTypes\": %s,\n", + hpkt.ritem->name, hpkt.edbuf); + sendit(NULL, " \"Where\": [%s]\n }", + quote_where(hpkt.edbuf, dest->where)); + break; + /* Now we edit MsgTypes, Where, and Command */ + case MD_MAIL: + case MD_OPERATOR: + case MD_MAIL_ON_ERROR: + case MD_MAIL_ON_SUCCESS: + sendit(NULL, "\n {\n \"Type\": \"%s\"," + "\n \"MsgTypes\": %s,\n", + hpkt.ritem->name, hpkt.edbuf); + sendit(NULL, " \"Where\": [%s],\n", + quote_where(hpkt.edbuf, dest->where)); + sendit(NULL, " \"Command\": %s\n }", + quote_string(hpkt.edbuf, dest->mail_cmd)); + break; + } + } + } +} + +/* + * Called here if the ITEM_LAST is set in flags, + * that means there are no more items to examine + * for this resource and that we can close any + * open json list. + */ +void display_last(HPKT &hpkt) +{ + if (hpkt.in_store_msg) { + hpkt.in_store_msg = false; + sendit(NULL, "\n ]"); + } +} + +void display_alist(HPKT &hpkt) +{ + edit_alist(hpkt); + sendit(NULL, "%s", hpkt.edbuf); +} + +bool display_alist_str(HPKT &hpkt) +{ + hpkt.list = (alist *)(*(hpkt.ritem->value)); + if (!hpkt.list) { + return false; + } + sendit(NULL, "\n \"%s\":", hpkt.ritem->name); + display_alist(hpkt); + return true; +} + +bool display_alist_res(HPKT &hpkt) +{ + bool f = true; + alist *list; + RES *res; + + list = (alist *)(*(hpkt.ritem->value)); + if (!list) { + return false; + } + sendit(NULL, "\n \"%s\":", hpkt.ritem->name); + sendit(NULL, " ["); + foreach_alist(res, list) { + if (!f) { + sendit(NULL, ", "); + } + sendit(NULL, "%s", quote_string(hpkt.edbuf, res->name)); + f = false; + } + sendit(NULL, "]"); + return true; +} + +void display_res(HPKT &hpkt) +{ + RES *res; + + res = (RES *)*hpkt.ritem->value; + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + quote_string(hpkt.edbuf, res->name)); +} + +void display_string_pair(HPKT &hpkt) +{ + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + quote_string(hpkt.edbuf, *hpkt.ritem->value)); +} + +void display_int32_pair(HPKT &hpkt) +{ + char ed1[50]; + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + edit_int64(*(int32_t *)hpkt.ritem->value, ed1)); +} + +void display_int64_pair(HPKT &hpkt) +{ + char ed1[50]; + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + edit_int64(*(int64_t *)hpkt.ritem->value, ed1)); +} + +void display_bool_pair(HPKT &hpkt) +{ + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + ((*(bool *)(hpkt.ritem->value)) == 0)?"false":"true"); +} + +void display_bit_pair(HPKT &hpkt) +{ + sendit(NULL, "\n \"%s\": %s", hpkt.ritem->name, + ((*(uint32_t *)(hpkt.ritem->value) & hpkt.ritem->code) + == 0)?"false":"true"); +} + +bool byte_is_set(char *byte, int num) +{ + int i; + bool found = false; + for (i=0; i +#endif +#include + +/* + * Structures and definitions for mag tape io control commands + */ + +/* structure for MTIOCTOP - mag tape op command */ +struct mtop { + short mt_op; /* operations defined below */ + int32_t mt_count; /* how many of them */ +}; + +/* operations */ +#define MTWEOF 0 /* write an end-of-file record */ +#define MTFSF 1 /* forward space file */ +#define MTBSF 2 /* backward space file */ +#define MTFSR 3 /* forward space record */ +#define MTBSR 4 /* backward space record */ +#define MTREW 5 /* rewind */ +#define MTOFFL 6 /* rewind and put the drive offline */ +#define MTNOP 7 /* no operation, sets status only */ +#define MTCACHE 8 /* enable controller cache */ +#define MTNOCACHE 9 /* disable controller cache */ + +#if defined(__FreeBSD__) +/* Set block size for device. If device is a variable size dev */ +/* a non zero parameter will change the device to a fixed block size */ +/* device with block size set to that of the parameter passed in. */ +/* Resetting the block size to 0 will restore the device to a variable */ +/* block size device. */ + +#define MTSETBSIZ 10 + +/* Set density values for device. Sets the value for the opened mode only. */ + +#define MTSETDNSTY 11 + +#define MTERASE 12 /* erase to EOM */ +#define MTEOD 13 /* Space to EOM */ +#define MTCOMP 14 /* select compression mode 0=off, 1=def */ +#define MTRETENS 15 /* re-tension tape */ +#define MTWSS 16 /* write setmark(s) */ +#define MTFSS 17 /* forward space setmark */ +#define MTBSS 18 /* backward space setmark */ + +#define MT_COMP_ENABLE 0xffffffff +#define MT_COMP_DISABLED 0xfffffffe +#define MT_COMP_UNSUPP 0xfffffffd + +/* + * Values in mt_dsreg that say what the device is doing + */ +#define MTIO_DSREG_NIL 0 /* Unknown */ +#define MTIO_DSREG_REST 1 /* Doing Nothing */ +#define MTIO_DSREG_RBSY 2 /* Communicating with tape (but no motion) */ +#define MTIO_DSREG_WR 20 /* Writing */ +#define MTIO_DSREG_FMK 21 /* Writing Filemarks */ +#define MTIO_DSREG_ZER 22 /* Erasing */ +#define MTIO_DSREG_RD 30 /* Reading */ +#define MTIO_DSREG_FWD 40 /* Spacing Forward */ +#define MTIO_DSREG_REV 41 /* Spacing Reverse */ +#define MTIO_DSREG_POS 42 /* Hardware Positioning (direction unknown) */ +#define MTIO_DSREG_REW 43 /* Rewinding */ +#define MTIO_DSREG_TEN 44 /* Retensioning */ +#define MTIO_DSREG_UNL 45 /* Unloading */ +#define MTIO_DSREG_LD 46 /* Loading */ + +#endif /* __FreeBSD__ */ + +/* structure for MTIOCGET - mag tape get status command */ + +struct mtget { + short mt_type; /* type of magtape device */ +/* the following two registers are grossly device dependent */ + short mt_dsreg; /* ``drive status'' register */ + short mt_erreg; /* ``error'' register */ +/* end device-dependent registers */ + /* + * Note that the residual count, while maintained, may be + * be nonsense because the size of the residual may (greatly) + * exceed 32 K-bytes. Use the MTIOCERRSTAT ioctl to get a + * more accurate count. + */ + short mt_resid; /* residual count */ +#if defined (__FreeBSD__) + int32_t mt_blksiz; /* presently operating blocksize */ + int32_t mt_density; /* presently operating density */ + u_int32_t mt_comp; /* presently operating compression */ + int32_t mt_blksiz0; /* blocksize for mode 0 */ + int32_t mt_blksiz1; /* blocksize for mode 1 */ + int32_t mt_blksiz2; /* blocksize for mode 2 */ + int32_t mt_blksiz3; /* blocksize for mode 3 */ + int32_t mt_density0; /* density for mode 0 */ + int32_t mt_density1; /* density for mode 1 */ + int32_t mt_density2; /* density for mode 2 */ + int32_t mt_density3; /* density for mode 3 */ +/* the following are not yet implemented */ + u_int32_t mt_comp0; /* compression type for mode 0 */ + u_int32_t mt_comp1; /* compression type for mode 1 */ + u_int32_t mt_comp2; /* compression type for mode 2 */ + u_int32_t mt_comp3; /* compression type for mode 3 */ +/* end not yet implemented */ +#endif + int32_t mt_fileno; /* relative file number of current position */ + int32_t mt_blkno; /* relative block number of current position */ +}; + +/* structure for MTIOCERRSTAT - tape get error status command */ +/* really only supported for SCSI tapes right now */ +struct scsi_tape_errors { + /* + * These are latched from the last command that had a SCSI + * Check Condition noted for these operations. The act + * of issuing an MTIOCERRSTAT unlatches and clears them. + */ + u_int8_t io_sense[32]; /* Last Sense Data For Data I/O */ + int32_t io_resid; /* residual count from last Data I/O */ + u_int8_t io_cdb[16]; /* Command that Caused the Last Data Sense */ + u_int8_t ctl_sense[32]; /* Last Sense Data For Control I/O */ + int32_t ctl_resid; /* residual count from last Control I/O */ + u_int8_t ctl_cdb[16]; /* Command that Caused the Last Control Sense */ + /* + * These are the read and write cumulative error counters. + * (how to reset cumulative error counters is not yet defined). + * (not implemented as yet but space is being reserved for them) + */ + struct { + u_int32_t retries; /* total # retries performed */ + u_int32_t corrected; /* total # corrections performed */ + u_int32_t processed; /* total # corrections successful */ + u_int32_t failures; /* total # corrections/retries failed */ + u_int64_t nbytes; /* total # bytes processed */ + } wterr, rderr; +}; + +union mterrstat { + struct scsi_tape_errors scsi_errstat; + char _reserved_padding[256]; +}; + +/* + * Constants for mt_type byte. These are the same + * for controllers compatible with the types listed. + */ +#define MT_ISTS 0x01 /* TS-11 */ +#define MT_ISHT 0x02 /* TM03 Massbus: TE16, TU45, TU77 */ +#define MT_ISTM 0x03 /* TM11/TE10 Unibus */ +#define MT_ISMT 0x04 /* TM78/TU78 Massbus */ +#define MT_ISUT 0x05 /* SI TU-45 emulation on Unibus */ +#define MT_ISCPC 0x06 /* SUN */ +#define MT_ISAR 0x07 /* SUN */ +#define MT_ISTMSCP 0x08 /* DEC TMSCP protocol (TU81, TK50) */ +#define MT_ISCY 0x09 /* CCI Cipher */ +#define MT_ISCT 0x0a /* HP 1/4 tape */ +#define MT_ISFHP 0x0b /* HP 7980 1/2 tape */ +#define MT_ISEXABYTE 0x0c /* Exabyte */ +#define MT_ISEXA8200 0x0c /* Exabyte EXB-8200 */ +#define MT_ISEXA8500 0x0d /* Exabyte EXB-8500 */ +#define MT_ISVIPER1 0x0e /* Archive Viper-150 */ +#define MT_ISPYTHON 0x0f /* Archive Python (DAT) */ +#define MT_ISHPDAT 0x10 /* HP 35450A DAT drive */ +#define MT_ISMFOUR 0x11 /* M4 Data 1/2 9track drive */ +#define MT_ISTK50 0x12 /* DEC SCSI TK50 */ +#define MT_ISMT02 0x13 /* Emulex MT02 SCSI tape controller */ + +/* mag tape io control commands */ +#define MTIOCTOP _IOW('m', 1, struct mtop) /* do a mag tape op */ +#define MTIOCGET _IOR('m', 2, struct mtget) /* get tape status */ +/* these two do not appear to be used anywhere */ +#define MTIOCIEOT _IO('m', 3) /* ignore EOT error */ +#define MTIOCEEOT _IO('m', 4) /* enable EOT error */ +/* + * When more SCSI-3 SSC (streaming device) devices are out there + * that support the full 32 byte type 2 structure, we'll have to + * rethink these ioctls to support all the entities they haul into + * the picture (64 bit blocks, logical file record numbers, etc..). + */ +#define MTIOCRDSPOS _IOR('m', 5, u_int32_t) /* get logical blk addr */ +#define MTIOCRDHPOS _IOR('m', 6, u_int32_t) /* get hardware blk addr */ +#define MTIOCSLOCATE _IOW('m', 5, u_int32_t) /* seek to logical blk addr */ +#define MTIOCHLOCATE _IOW('m', 6, u_int32_t) /* seek to hardware blk addr */ +#define MTIOCERRSTAT _IOR('m', 7, union mterrstat) /* get tape errors */ +/* + * Set EOT model- argument is number of filemarks to end a tape with. + * Note that not all possible values will be accepted. + */ +#define MTIOCSETEOTMODEL _IOW('m', 8, u_int32_t) +/* Get current EOT model */ +#define MTIOCGETEOTMODEL _IOR('m', 8, u_int32_t) + +#ifndef _KERNEL +#define DEFTAPE "/dev/nsa0" +#endif + +#endif /* !_SYS_MTIO_H_ */ diff --git a/src/lib/bnet.c b/src/lib/bnet.c new file mode 100644 index 00000000..0d2ad257 --- /dev/null +++ b/src/lib/bnet.c @@ -0,0 +1,494 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Network Utility Routines + * + * by Kern Sibbald + * + * Adapted and enhanced for Bacula, originally written + * for inclusion in the Apcupsd package + * + */ + + +#include "bacula.h" +#include "jcr.h" +#include + +#ifndef INADDR_NONE +#define INADDR_NONE -1 +#endif + +#ifdef HAVE_WIN32 +#undef inet_pton +#define inet_pton binet_pton +#define socketRead(fd, buf, len) recv(fd, buf, len, 0) +#define socketWrite(fd, buf, len) send(fd, buf, len, 0) +#define socketClose(fd) closesocket(fd) +#else +#define socketRead(fd, buf, len) read(fd, buf, len) +#define socketWrite(fd, buf, len) write(fd, buf, len) +#define socketClose(fd) close(fd) +#endif + +#ifndef HAVE_GETADDRINFO +static pthread_mutex_t ip_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + +/* + * Establish a TLS connection -- server side + * Returns: true on success + * false on failure + */ +#ifdef HAVE_TLS +bool bnet_tls_server(TLS_CONTEXT *ctx, BSOCK * bsock, alist *verify_list) +{ + TLS_CONNECTION *tls; + JCR *jcr = bsock->jcr(); + + tls = new_tls_connection(ctx, bsock->m_fd); + if (!tls) { + Qmsg0(bsock->jcr(), M_FATAL, 0, _("TLS connection initialization failed.\n")); + return false; + } + + bsock->tls = tls; + + /* Initiate TLS Negotiation */ + if (!tls_bsock_accept(bsock)) { + Qmsg0(bsock->jcr(), M_FATAL, 0, _("TLS Negotiation failed.\n")); + goto err; + } + + if (verify_list) { + if (!tls_postconnect_verify_cn(jcr, tls, verify_list)) { + Qmsg1(bsock->jcr(), M_FATAL, 0, _("TLS certificate verification failed." + " Peer certificate did not match a required commonName\n"), + bsock->host()); + goto err; + } + } + Dmsg0(50, "TLS server negotiation established.\n"); + return true; + +err: + free_tls_connection(tls); + bsock->tls = NULL; + return false; +} + +/* + * Establish a TLS connection -- client side + * Returns: true on success + * false on failure + */ +bool bnet_tls_client(TLS_CONTEXT *ctx, BSOCK *bsock, alist *verify_list) +{ + TLS_CONNECTION *tls; + JCR *jcr = bsock->jcr(); + + tls = new_tls_connection(ctx, bsock->m_fd); + if (!tls) { + Qmsg0(bsock->jcr(), M_FATAL, 0, _("TLS connection initialization failed.\n")); + return false; + } + + bsock->tls = tls; + + /* Initiate TLS Negotiation */ + if (!tls_bsock_connect(bsock)) { + goto err; + } + + /* If there's an Allowed CN verify list, use that to validate the remote + * certificate's CN. Otherwise, we use standard host/CN matching. */ + if (verify_list) { + if (!tls_postconnect_verify_cn(jcr, tls, verify_list)) { + Qmsg1(bsock->jcr(), M_FATAL, 0, _("TLS certificate verification failed." + " Peer certificate did not match a required commonName\n"), + bsock->host()); + goto err; + } + } else if (!tls_postconnect_verify_host(jcr, tls, bsock->host())) { + /* If host is 127.0.0.1, try localhost */ + if (strcmp(bsock->host(), "127.0.0.1") != 0 || + !tls_postconnect_verify_host(jcr, tls, "localhost")) { + Qmsg1(bsock->jcr(), M_FATAL, 0, _("TLS host certificate verification failed. Host name \"%s\" did not match presented certificate\n"), + bsock->host()); + goto err; + } + } + Dmsg0(50, "TLS client negotiation established.\n"); + return true; + +err: + free_tls_connection(tls); + bsock->tls = NULL; + return false; +} +#else + +bool bnet_tls_server(TLS_CONTEXT *ctx, BSOCK * bsock, alist *verify_list) +{ + Jmsg(bsock->jcr(), M_ABORT, 0, _("TLS enabled but not configured.\n")); + return false; +} + +bool bnet_tls_client(TLS_CONTEXT *ctx, BSOCK * bsock, alist *verify_list) +{ + Jmsg(bsock->jcr(), M_ABORT, 0, _("TLS enable but not configured.\n")); + return false; +} + +#endif /* HAVE_TLS */ + +#ifndef NETDB_INTERNAL +#define NETDB_INTERNAL -1 /* See errno. */ +#endif +#ifndef NETDB_SUCCESS +#define NETDB_SUCCESS 0 /* No problem. */ +#endif +#ifndef HOST_NOT_FOUND +#define HOST_NOT_FOUND 1 /* Authoritative Answer Host not found. */ +#endif +#ifndef TRY_AGAIN +#define TRY_AGAIN 2 /* Non-Authoritative Host not found, or SERVERFAIL. */ +#endif +#ifndef NO_RECOVERY +#define NO_RECOVERY 3 /* Non recoverable errors, FORMERR, REFUSED, NOTIMP. */ +#endif +#ifndef NO_DATA +#define NO_DATA 4 /* Valid name, no data record of requested type. */ +#endif + +#if defined(HAVE_GETADDRINFO) +/* + * getaddrinfo.c - Simple example of using getaddrinfo(3) function. + * + * Michal Ludvig (c) 2002, 2003 + * http://www.logix.cz/michal/devel/ + * + * License: public domain. + */ +const char *resolv_host(int family, const char *host, dlist *addr_list) +{ + IPADDR *ipaddr; + struct addrinfo hints, *res, *rp; + int errcode; + //char addrstr[100]; + void *ptr; + + memset (&hints, 0, sizeof(hints)); + hints.ai_family = family; + hints.ai_socktype = SOCK_STREAM; + //hints.ai_flags |= AI_CANONNAME; + + errcode = getaddrinfo (host, NULL, &hints, &res); + if (errcode != 0) return gai_strerror(errcode); + + for (rp=res; res; res=res->ai_next) { + //inet_ntop (res->ai_family, res->ai_addr->sa_data, addrstr, 100); + switch (res->ai_family) { + case AF_INET: + ipaddr = New(IPADDR(rp->ai_addr->sa_family)); + ipaddr->set_type(IPADDR::R_MULTIPLE); + ptr = &((struct sockaddr_in *) res->ai_addr)->sin_addr; + ipaddr->set_addr4((in_addr *)ptr); + break; +#if defined(HAVE_IPV6) + case AF_INET6: + ipaddr = New(IPADDR(rp->ai_addr->sa_family)); + ipaddr->set_type(IPADDR::R_MULTIPLE); + ptr = &((struct sockaddr_in6 *) res->ai_addr)->sin6_addr; + ipaddr->set_addr6((in6_addr *)ptr); + break; +#endif + default: + continue; + } + //inet_ntop (res->ai_family, ptr, addrstr, 100); + //Pmsg3(000, "IPv%d address: %s (%s)\n", res->ai_family == PF_INET6 ? 6 : 4, + // addrstr, res->ai_canonname); + addr_list->append(ipaddr); + } + freeaddrinfo(rp); + return NULL; +} + +#else + +/* + * Get human readable error for gethostbyname() + */ +static const char *gethost_strerror() +{ + const char *msg; + berrno be; + switch (h_errno) { + case NETDB_INTERNAL: + msg = be.bstrerror(); + break; + case NETDB_SUCCESS: + msg = _("No problem."); + break; + case HOST_NOT_FOUND: + msg = _("Authoritative answer for host not found."); + break; + case TRY_AGAIN: + msg = _("Non-authoritative for host not found, or ServerFail."); + break; + case NO_RECOVERY: + msg = _("Non-recoverable errors, FORMERR, REFUSED, or NOTIMP."); + break; + case NO_DATA: + msg = _("Valid name, no data record of resquested type."); + break; + default: + msg = _("Unknown error."); + } + return msg; +} + +/* + * Note: this is the old way of resolving a host + * that does not use the new getaddrinfo() above. + */ +static const char *resolv_host(int family, const char *host, dlist * addr_list) +{ + struct hostent *hp; + const char *errmsg; + + P(ip_mutex); /* gethostbyname() is not thread safe */ +#ifdef HAVE_GETHOSTBYNAME2 + if ((hp = gethostbyname2(host, family)) == NULL) { +#else + if ((hp = gethostbyname(host)) == NULL) { +#endif + /* may be the strerror give not the right result -:( */ + errmsg = gethost_strerror(); + V(ip_mutex); + return errmsg; + } else { + char **p; + for (p = hp->h_addr_list; *p != 0; p++) { + IPADDR *addr = New(IPADDR(hp->h_addrtype)); + addr->set_type(IPADDR::R_MULTIPLE); + if (addr->get_family() == AF_INET) { + addr->set_addr4((struct in_addr*)*p); + } +#ifdef HAVE_IPV6 + else { + addr->set_addr6((struct in6_addr*)*p); + } +#endif + addr_list->append(addr); + } + V(ip_mutex); + } + return NULL; +} +#endif + +static IPADDR *add_any(int family) +{ + IPADDR *addr = New(IPADDR(family)); + addr->set_type(IPADDR::R_MULTIPLE); + addr->set_addr_any(); + return addr; +} + +/* + * i host = 0 means INADDR_ANY only for IPv4 + */ +dlist *bnet_host2ipaddrs(const char *host, int family, const char **errstr) +{ + struct in_addr inaddr; + IPADDR *addr = 0; + const char *errmsg; +#ifdef HAVE_IPV6 + struct in6_addr inaddr6; +#endif + + dlist *addr_list = New(dlist(addr, &addr->link)); + if (!host || host[0] == '\0') { + if (family != 0) { + addr_list->append(add_any(family)); + } else { + addr_list->append(add_any(AF_INET)); +#ifdef HAVE_IPV6 + addr_list->append(add_any(AF_INET6)); +#endif + } + } else if (inet_aton(host, &inaddr)) { /* MA Bug 4 */ + addr = New(IPADDR(AF_INET)); + addr->set_type(IPADDR::R_MULTIPLE); + addr->set_addr4(&inaddr); + addr_list->append(addr); +#ifdef HAVE_IPV6 + } else if (inet_pton(AF_INET6, host, &inaddr6) == 1) { + addr = New(IPADDR(AF_INET6)); + addr->set_type(IPADDR::R_MULTIPLE); + addr->set_addr6(&inaddr6); + addr_list->append(addr); +#endif + } else { + if (family != 0) { + errmsg = resolv_host(family, host, addr_list); + if (errmsg) { + *errstr = errmsg; + free_addresses(addr_list); + return 0; + } + } else { +#ifdef HAVE_IPV6 + /* We try to resolv host for ipv6 and ipv4, the connection procedure + * will try to reach the host for each protocols. We report only "Host + * not found" ipv4 message (no need to have ipv6 and ipv4 messages). + */ + resolv_host(AF_INET6, host, addr_list); +#endif + errmsg = resolv_host(AF_INET, host, addr_list); + + if (addr_list->size() == 0) { + *errstr = errmsg; + free_addresses(addr_list); + return 0; + } + } + } + return addr_list; +} + +/* + * Convert a network "signal" code into + * human readable ASCII. + */ +const char *bnet_sig_to_ascii(int32_t msglen) +{ + static char buf[30]; + switch (msglen) { + case BNET_EOD: + return "BNET_EOD"; /* End of data stream, new data may follow */ + case BNET_EOD_POLL: + return "BNET_EOD_POLL"; /* End of data and poll all in one */ + case BNET_STATUS: + return "BNET_STATUS"; /* Send full status */ + case BNET_TERMINATE: + return "BNET_TERMINATE"; /* Conversation terminated, doing close() */ + case BNET_POLL: + return "BNET_POLL"; /* Poll request, I'm hanging on a read */ + case BNET_HEARTBEAT: + return "BNET_HEARTBEAT"; /* Heartbeat Response requested */ + case BNET_HB_RESPONSE: + return "BNET_HB_RESPONSE"; /* Only response permited to HB */ + case BNET_BTIME: + return "BNET_BTIME"; /* Send UTC btime */ + case BNET_BREAK: + return "BNET_BREAK"; /* Stop current command -- ctl-c */ + case BNET_START_SELECT: + return "BNET_START_SELECT"; /* Start of a selection list */ + case BNET_END_SELECT: + return "BNET_END_SELECT"; /* End of a select list */ + case BNET_INVALID_CMD: + return "BNET_INVALID_CMD"; /* Invalid command sent */ + case BNET_CMD_FAILED: + return "BNET_CMD_FAILED"; /* Command failed */ + case BNET_CMD_OK: + return "BNET_CMD_OK"; /* Command succeeded */ + case BNET_CMD_BEGIN: + return "BNET_CMD_BEGIN"; /* Start command execution */ + case BNET_MSGS_PENDING: + return "BNET_MSGS_PENDING"; /* Messages pending */ + case BNET_MAIN_PROMPT: + return "BNET_MAIN_PROMPT"; /* Server ready and waiting */ + case BNET_SELECT_INPUT: + return "BNET_SELECT_INPUT"; /* Return selection input */ + case BNET_WARNING_MSG: + return "BNET_WARNING_MSG"; /* Warning message */ + case BNET_ERROR_MSG: + return "BNET_ERROR_MSG"; /* Error message -- command failed */ + case BNET_INFO_MSG: + return "BNET_INFO_MSG"; /* Info message -- status line */ + case BNET_RUN_CMD: + return "BNET_RUN_CMD"; /* Run command follows */ + case BNET_YESNO: + return "BNET_YESNO"; /* Request yes no response */ + case BNET_START_RTREE: + return "BNET_START_RTREE"; /* Start restore tree mode */ + case BNET_END_RTREE: + return "BNET_END_RTREE"; /* End restore tree mode */ + case BNET_SUB_PROMPT: + return "BNET_SUB_PROMPT"; /* Indicate we are at a subprompt */ + case BNET_TEXT_INPUT: + return "BNET_TEXT_INPUT"; /* Get text input from user */ + case BNET_EXT_TERMINATE: + return "BNET_EXT_TERMINATE"; /* A Terminate condition has been met and + already reported somewhere else */ + case BNET_FDCALLED: + return "BNET_FDCALLED"; /* The FD should keep the connection for a new job */ + default: + bsnprintf(buf, sizeof(buf), _("Unknown sig %d"), (int)msglen); + return buf; + } +} + +int set_socket_errno(int sockstat) +{ +#ifdef HAVE_WIN32 + /* + * For Windows, we must simulate Unix errno on a socket + * error in order to handle errors correctly. + */ + if (sockstat == SOCKET_ERROR) { + berrno be; + DWORD err = WSAGetLastError(); + if (err == WSAEINTR) { + errno = EINTR; + return sockstat; + } else if (err == WSAEWOULDBLOCK) { + errno = EAGAIN; + return sockstat; + } else { + errno = b_errno_win32 | b_errno_WSA; + } + Dmsg2(20, "Socket error: err=%d %s\n", err, be.bstrerror(err)); + } +#else + if (sockstat == SOCKET_ERROR) { + /* Handle errrors from prior connections as EAGAIN */ + switch (errno) { + case ENETDOWN: + case EPROTO: + case ENOPROTOOPT: + case EHOSTDOWN: +#ifdef ENONET + case ENONET: +#endif + case EHOSTUNREACH: + case EOPNOTSUPP: + case ENETUNREACH: + errno = EAGAIN; + break; + default: + break; + } + } +#endif + return sockstat; +} diff --git a/src/lib/bnet_server.c b/src/lib/bnet_server.c new file mode 100644 index 00000000..0d989468 --- /dev/null +++ b/src/lib/bnet_server.c @@ -0,0 +1,251 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + /* + * Originally written by Kern Sibbald for inclusion in apcupsd, + * but heavily modified for Bacula + * + */ + +#include "bacula.h" +#include +#include +#include +#include +#include +#ifdef HAVE_ARPA_NAMESER_H +#include +#endif +#ifdef HAVE_RESOLV_H +//#include +#endif + + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +#ifdef HAVE_LIBWRAP +#include "tcpd.h" +int allow_severity = LOG_NOTICE; +int deny_severity = LOG_WARNING; +#endif + +static bool quit = false; + +void bnet_stop_thread_server(pthread_t tid) +{ + quit = true; + if (!pthread_equal(tid, pthread_self())) { + pthread_kill(tid, TIMEOUT_SIGNAL); + } +} + + /* + Become Threaded Network Server + This function is able to handle multiple server ips in + ipv4 and ipv6 style. The Addresse are give in a comma + seperated string in bind_addr + In the moment it is inpossible to bind different ports. + */ +void bnet_thread_server(dlist *addrs, int max_clients, + workq_t *client_wq, void *handle_client_request(void *bsock)) +{ + int newsockfd, stat; + socklen_t clilen; + struct sockaddr_storage clientaddr; /* client's address */ + int tlog; + int turnon = 1; +#ifdef HAVE_LIBWRAP + struct request_info request; +#endif + IPADDR *addr; + struct s_sockfd { + dlink link; /* this MUST be the first item */ + int fd; + int port; + } *fd_ptr = NULL; + char buf[128]; + dlist sockfds; + + char allbuf[256 * 10]; + remove_duplicate_addresses(addrs); + Dmsg1(20, "Addresses %s\n", build_addresses_str(addrs, allbuf, sizeof(allbuf))); + /* + * Listen on each address provided. + */ + foreach_dlist(addr, addrs) { + /* Allocate on stack from -- no need to free */ + fd_ptr = (s_sockfd *)alloca(sizeof(s_sockfd)); + fd_ptr->port = addr->get_port_net_order(); + /* + * Open a TCP socket + */ + for (tlog= 60; (fd_ptr->fd=socket(addr->get_family(), SOCK_STREAM, 0)) < 0; tlog -= 10) { + if (tlog <= 0) { + berrno be; + char curbuf[256]; + Emsg3(M_ABORT, 0, _("Cannot open stream socket. ERR=%s. Current %s All %s\n"), + be.bstrerror(), + addr->build_address_str(curbuf, sizeof(curbuf)), + build_addresses_str(addrs, allbuf, sizeof(allbuf))); + } + bmicrosleep(10, 0); + } + /* + * Reuse old sockets + */ + if (setsockopt(fd_ptr->fd, SOL_SOCKET, SO_REUSEADDR, (sockopt_val_t)&turnon, + sizeof(turnon)) < 0) { + berrno be; + Emsg1(M_WARNING, 0, _("Cannot set SO_REUSEADDR on socket: %s\n"), + be.bstrerror()); + } + + int tmax = 1 * (60 / 5); /* wait 1 minute max */ + for (tlog = 0; bind(fd_ptr->fd, addr->get_sockaddr(), addr->get_sockaddr_len()) == SOCKET_ERROR; tlog -= 5) { + berrno be; + if (tlog <= 0) { + tlog = 1 * 60; /* Complain every 1 minute */ + Emsg2(M_WARNING, 0, _("Cannot bind port %d: ERR=%s: Retrying ...\n"), + ntohs(fd_ptr->port), be.bstrerror()); + Dmsg2(20, "Cannot bind port %d: ERR=%s: Retrying ...\n", + ntohs(fd_ptr->port), be.bstrerror()); + + } + bmicrosleep(5, 0); + if (--tmax <= 0) { + Emsg2(M_ABORT, 0, _("Cannot bind port %d: ERR=%s.\n"), ntohs(fd_ptr->port), + be.bstrerror()); + Pmsg2(000, "Aborting cannot bind port %d: ERR=%s.\n", ntohs(fd_ptr->port), + be.bstrerror()); + } + } + if (listen(fd_ptr->fd, 50) < 0) { /* tell system we are ready */ + berrno be; + Emsg2(M_ABORT, 0, _("Cannot bind port %d: ERR=%s.\n"), ntohs(fd_ptr->port), + be.bstrerror()); + } else { + sockfds.append(fd_ptr); + } + } + if (sockfds.size() == 0) { + Emsg0(M_ABORT, 0, _("No addr/port found to listen on.\n")); + } + /* Start work queue thread */ + if ((stat = workq_init(client_wq, max_clients, handle_client_request)) != 0) { + berrno be; + be.set_errno(stat); + Emsg1(M_ABORT, 0, _("Could not init client queue: ERR=%s\n"), be.bstrerror()); + } + /* + * Wait for a connection from the client process. + */ + for (; !quit;) { + unsigned int maxfd = 0; + fd_set sockset; + FD_ZERO(&sockset); + foreach_dlist(fd_ptr, &sockfds) { + FD_SET((unsigned)fd_ptr->fd, &sockset); + maxfd = maxfd > (unsigned)fd_ptr->fd ? maxfd : fd_ptr->fd; + } + errno = 0; + if ((stat = select(maxfd + 1, &sockset, NULL, NULL, NULL)) < 0) { + berrno be; /* capture errno */ + if (errno == EINTR) { + continue; + } + Emsg1(M_FATAL, 0, _("Error in select: %s\n"), be.bstrerror()); + break; + } + + foreach_dlist(fd_ptr, &sockfds) { + if (FD_ISSET(fd_ptr->fd, &sockset)) { + /* Got a connection, now accept it. */ + do { + clilen = sizeof(clientaddr); + newsockfd = baccept(fd_ptr->fd, (struct sockaddr *)&clientaddr, &clilen); + newsockfd = set_socket_errno(newsockfd); + } while (newsockfd == SOCKET_ERROR && (errno == EINTR || errno == EAGAIN)); + if (newsockfd == SOCKET_ERROR) { + Dmsg2(20, "Accept=%d errno=%d\n", newsockfd, errno); + continue; + } +#ifdef HAVE_LIBWRAP + P(mutex); /* hosts_access is not thread safe */ + request_init(&request, RQ_DAEMON, my_name, RQ_FILE, newsockfd, 0); + fromhost(&request); + if (!hosts_access(&request)) { + V(mutex); + Qmsg2(NULL, M_SECURITY, 0, + _("Connection from %s:%d refused by hosts.access\n"), + sockaddr_to_ascii((struct sockaddr *)&clientaddr, + sizeof(clientaddr), buf, sizeof(buf)), + sockaddr_get_port((struct sockaddr *)&clientaddr)); + close(newsockfd); + continue; + } + V(mutex); +#endif + + /* + * Receive notification when connection dies. + */ + if (setsockopt(newsockfd, SOL_SOCKET, SO_KEEPALIVE, (sockopt_val_t)&turnon, + sizeof(turnon)) < 0) { + berrno be; + Qmsg1(NULL, M_WARNING, 0, _("Cannot set SO_KEEPALIVE on socket: %s\n"), + be.bstrerror()); + } + + /* see who client is. i.e. who connected to us. */ + P(mutex); + sockaddr_to_ascii((struct sockaddr *)&clientaddr, sizeof(clientaddr), buf, sizeof(buf)); + V(mutex); + BSOCK *bs; + bs = init_bsock(NULL, newsockfd, "client", buf, + sockaddr_get_port((struct sockaddr *)&clientaddr), + (struct sockaddr *)&clientaddr); + if (bs == NULL) { + Qmsg0(NULL, M_ABORT, 0, _("Could not create client BSOCK.\n")); + } + + /* Queue client to be served */ + if ((stat = workq_add(client_wq, (void *)bs, NULL, 0)) != 0) { + berrno be; + be.set_errno(stat); + bs->destroy(); + Qmsg1(NULL, M_ABORT, 0, _("Could not add job to client queue: ERR=%s\n"), + be.bstrerror()); + } + } + } + } + + /* Cleanup open files and pointers to them */ + while ((fd_ptr = (s_sockfd *)sockfds.first())) { + close(fd_ptr->fd); + sockfds.remove(fd_ptr); /* don't free() item it is on stack */ + } + + /* Stop work queue thread */ + if ((stat = workq_destroy(client_wq)) != 0) { + berrno be; + be.set_errno(stat); + Jmsg1(NULL, M_FATAL, 0, _("Could not destroy client queue: ERR=%s\n"), + be.bstrerror()); + } +} diff --git a/src/lib/bpipe.c b/src/lib/bpipe.c new file mode 100644 index 00000000..20b46f77 --- /dev/null +++ b/src/lib/bpipe.c @@ -0,0 +1,586 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * bpipe.c bi-directional pipe + * + * Kern Sibbald, November MMII + * + */ + + +#include "bacula.h" +#include "jcr.h" + +#ifdef HAVE_GETRLIMIT +#include +#else +/* If not available, use a wrapper that will not use it */ +#define getrlimit(a,b) -1 +#endif + +int execvp_errors[] = { + EACCES, + ENOEXEC, + EFAULT, + EINTR, + E2BIG, + ENAMETOOLONG, + ENOMEM, +#ifndef HAVE_WIN32 + ETXTBSY, +#endif + ENOENT +}; +int num_execvp_errors = (int)(sizeof(execvp_errors)/sizeof(int)); + + +#define MAX_ARGV 100 + +#define MODE_READ 1 +#define MODE_WRITE 2 +#define MODE_SHELL 4 +#define MODE_STDERR 8 + +#if !defined(HAVE_WIN32) +static void build_argc_argv(char *cmd, int *bargc, char *bargv[], int max_arg); +static void set_keepalive(int sockfd); + +void build_sh_argc_argv(char *cmd, int *bargc, char *bargv[], int max_arg) +{ + bargv[0] = (char *)"/bin/sh"; + bargv[1] = (char *)"-c"; + bargv[2] = cmd; + bargv[3] = NULL; + *bargc = 3; +} + +/* + * Run an external program. Optionally wait a specified number + * of seconds. Program killed if wait exceeded. We open + * a bi-directional pipe so that the user can read from and + * write to the program. + */ +BPIPE *open_bpipe(char *prog, int wait, const char *mode, char *envp[]) +{ + char *bargv[MAX_ARGV]; + int bargc, i; + int readp[2], writep[2], errp[2]; + POOLMEM *tprog; + int mode_map = 0; + BPIPE *bpipe; + int save_errno; + struct rlimit rl; + int64_t rlimitResult=0; + + if (!prog || !*prog) { + /* execve(3) A component of the file does not name an existing file or file is an empty string. */ + errno = ENOENT; + return NULL; + } + + bpipe = (BPIPE *)malloc(sizeof(BPIPE)); + memset(bpipe, 0, sizeof(BPIPE)); + if (strchr(mode,'r')) mode_map|=MODE_READ; + if (strchr(mode,'w')) mode_map|=MODE_WRITE; + if (strchr(mode,'s')) mode_map|=MODE_SHELL; + if (strchr(mode,'e')) mode_map|=MODE_STDERR; + + /* Build arguments for running program. */ + tprog = get_pool_memory(PM_FNAME); + pm_strcpy(tprog, prog); + if (mode_map & MODE_SHELL) { + build_sh_argc_argv(tprog, &bargc, bargv, MAX_ARGV); + } else { + build_argc_argv(tprog, &bargc, bargv, MAX_ARGV); + } + + /* Unable to parse the command, avoid segfault after the fork() */ + if (bargc == 0 || bargv[0] == NULL) { + free_pool_memory(tprog); + free(bpipe); + /* execve(3) A component of the file does not name an existing file or file is an empty string. */ + errno = ENOENT; + return NULL; + } + +#ifdef xxxxxx + printf("argc=%d\n", bargc); + for (i=0; iworker_pid = fork()) { + case -1: /* error */ + save_errno = errno; + if (mode_map & MODE_WRITE) { + close(writep[0]); + close(writep[1]); + } + if (mode_map & MODE_READ) { + close(readp[0]); + close(readp[1]); + } + if (mode_map & MODE_STDERR) { + close(errp[0]); + close(errp[1]); + } + free(bpipe); + free_pool_memory(tprog); + errno = save_errno; + return NULL; + + case 0: /* child */ + if (mode_map & MODE_WRITE) { + close(writep[1]); + dup2(writep[0], 0); /* Dup our write to his stdin */ + } + if (mode_map & MODE_READ) { + close(readp[0]); /* Close unused child fds */ + dup2(readp[1], 1); /* dup our read to his stdout */ + if (mode_map & MODE_STDERR) { /* and handle stderr */ + close(errp[0]); + dup2(errp[1], 2); + } else { + dup2(readp[1], 2); + } + } + +#if HAVE_FCNTL_F_CLOSEM + fcntl(3, F_CLOSEM); +#elif HAVE_CLOSEFROM + closefrom(3); +#else + for (i=rlimitResult; i >= 3; i--) { + close(i); + } +#endif + + /* Setup the environment if requested, we do not use execvpe() + * because it's not wildly available + * TODO: Implement envp to windows version of bpipe + */ + setup_env(envp); + + execvp(bargv[0], bargv); /* call the program */ + /* Convert errno into an exit code for later analysis */ + for (i=0; i< num_execvp_errors; i++) { + if (execvp_errors[i] == errno) { + _exit(200 + i); /* exit code => errno */ + } + } + /* Do not flush stdio */ + _exit(255); /* unknown errno */ + + default: /* parent */ + break; + } + free_pool_memory(tprog); + if (mode_map & MODE_READ) { + close(readp[1]); /* close unused parent fds */ + set_keepalive(readp[0]); + bpipe->rfd = fdopen(readp[0], "r"); /* open file descriptor */ + } + if (mode_map & MODE_STDERR) { + close(errp[1]); /* close unused parent fds */ + set_keepalive(errp[0]); + bpipe->efd = fdopen(errp[0], "r"); /* open file descriptor */ + } + if (mode_map & MODE_WRITE) { + close(writep[0]); + set_keepalive(writep[1]); + bpipe->wfd = fdopen(writep[1], "w"); + } + bpipe->worker_stime = time(NULL); + bpipe->wait = wait; + if (wait > 0) { + bpipe->timer_id = start_child_timer(NULL, bpipe->worker_pid, wait); + } + return bpipe; +} + +/* Close the write pipe only + * BE careful ! return 1 if ok */ +int close_wpipe(BPIPE *bpipe) +{ + int stat = 1; + + if (bpipe->wfd) { + fflush(bpipe->wfd); + if (fclose(bpipe->wfd) != 0) { + stat = 0; + } + bpipe->wfd = NULL; + } + return stat; +} + +/* Close the stderror pipe only */ +int close_epipe(BPIPE *bpipe) +{ + int stat = 1; + + if (bpipe->efd) { + if (fclose(bpipe->efd) != 0) { + stat = 0; + } + bpipe->efd = NULL; + } + return stat; +} + +/* + * Close both pipes and free resources + * + * Returns: 0 on success + * berrno on failure + */ +int close_bpipe(BPIPE *bpipe) +{ + int chldstatus = 0; + int stat = 0; + int wait_option; + int remaining_wait; + pid_t wpid = 0; + + + /* Close pipes */ + if (bpipe->rfd) { + fclose(bpipe->rfd); + bpipe->rfd = NULL; + } + if (bpipe->wfd) { + fclose(bpipe->wfd); + bpipe->wfd = NULL; + } + if (bpipe->efd) { + fclose(bpipe->efd); + bpipe->efd = NULL; + } + + if (bpipe->wait == 0) { + wait_option = 0; /* wait indefinitely */ + } else { + wait_option = WNOHANG; /* don't hang */ + } + remaining_wait = bpipe->wait; + + /* wait for worker child to exit */ + for ( ;; ) { + Dmsg2(100, "Wait for %d opt=%d\n", bpipe->worker_pid, wait_option); + do { + wpid = waitpid(bpipe->worker_pid, &chldstatus, wait_option); + } while (wpid == -1 && (errno == EINTR || errno == EAGAIN)); + if (wpid == bpipe->worker_pid || wpid == -1) { + berrno be; + stat = errno; + Dmsg3(100, "Got break wpid=%d status=%d ERR=%s\n", wpid, chldstatus, + wpid==-1?be.bstrerror():"none"); + break; + } + Dmsg3(100, "Got wpid=%d status=%d ERR=%s\n", wpid, chldstatus, + wpid==-1?strerror(errno):"none"); + if (remaining_wait > 0) { + bmicrosleep(1, 0); /* wait one second */ + remaining_wait--; + } else { + stat = ETIME; /* set error status */ + wpid = -1; + break; /* don't wait any longer */ + } + } + if (wpid > 0) { + if (WIFEXITED(chldstatus)) { /* process exit()ed */ + stat = WEXITSTATUS(chldstatus); + if (stat != 0) { + Dmsg1(100, "Non-zero status %d returned from child.\n", stat); + stat |= b_errno_exit; /* exit status returned */ + } + Dmsg1(100, "child status=%d\n", stat & ~b_errno_exit); + } else if (WIFSIGNALED(chldstatus)) { /* process died */ +#ifndef HAVE_WIN32 + stat = WTERMSIG(chldstatus); +#else + stat = 1; /* fake child status */ +#endif + Dmsg1(100, "Child died from signal %d\n", stat); + stat |= b_errno_signal; /* exit signal returned */ + } + } + if (bpipe->timer_id) { + stop_child_timer(bpipe->timer_id); + } + free(bpipe); + Dmsg2(100, "returning stat=%d,%d\n", stat & ~(b_errno_exit|b_errno_signal), stat); + return stat; +} + +/* + * Build argc and argv from a string + */ +static void build_argc_argv(char *cmd, int *bargc, char *bargv[], int max_argv) +{ + int i; + char *p, *q, quote; + int argc = 0; + + argc = 0; + for (i=0; iSUCCESS), so we check if the watchdog killed the program. + * + * Contrary to my normal calling conventions, this program + * + * Returns: 0 on success + * non-zero on error == berrno status + */ +int run_program(char *prog, int wait, POOLMEM *&results) +{ + BPIPE *bpipe; + int stat1, stat2; + char *mode; + + mode = (char *)"r"; + bpipe = open_bpipe(prog, wait, mode); + if (!bpipe) { + return ENOENT; + } + results[0] = 0; + int len = sizeof_pool_memory(results) - 1; + fgets(results, len, bpipe->rfd); + results[len] = 0; + if (feof(bpipe->rfd)) { + stat1 = 0; + } else { + stat1 = ferror(bpipe->rfd); + } + if (stat1 < 0) { + berrno be; + Dmsg2(100, "Run program fgets stat=%d ERR=%s\n", stat1, be.bstrerror(errno)); + } else if (stat1 != 0) { + Dmsg1(100, "Run program fgets stat=%d\n", stat1); + if (bpipe->timer_id) { + Dmsg1(100, "Run program fgets killed=%d\n", bpipe->timer_id->killed); + /* NB: I'm not sure it is really useful for run_program. Without the + * following lines run_program would not detect if the program was killed + * by the watchdog. */ + if (bpipe->timer_id->killed) { + stat1 = ETIME; + pm_strcpy(results, _("Program killed by Bacula (timeout)\n")); + } + } + } + stat2 = close_bpipe(bpipe); + stat1 = stat2 != 0 ? stat2 : stat1; + Dmsg1(100, "Run program returning %d\n", stat1); + return stat1; +} + +/* + * Run an external program. Optionally wait a specified number + * of seconds. Program killed if wait exceeded (it is done by the + * watchdog, as fgets is a blocking function). + * + * If the watchdog kills the program, fgets returns, and ferror is set + * to 1 (=>SUCCESS), so we check if the watchdog killed the program. + * + * Return the full output from the program (not only the first line). + * + * Contrary to my normal calling conventions, this program + * + * Returns: 0 on success + * non-zero on error == berrno status + * + */ +int run_program_full_output(char *prog, int wait, POOLMEM *&results, char *env[]) +{ + BPIPE *bpipe; + int stat1, stat2; + char *mode; + POOLMEM* tmp; + char *buf; + const int bufsize = 32000; + + + Dsm_check(200); + + tmp = get_pool_memory(PM_MESSAGE); + buf = (char *)malloc(bufsize+1); + + results[0] = 0; + mode = (char *)"r"; + bpipe = open_bpipe(prog, wait, mode, env); + if (!bpipe) { + stat1 = ENOENT; + goto bail_out; + } + + Dsm_check(200); + tmp[0] = 0; + while (1) { + buf[0] = 0; + fgets(buf, bufsize, bpipe->rfd); + buf[bufsize] = 0; + pm_strcat(tmp, buf); + if (feof(bpipe->rfd)) { + stat1 = 0; + Dmsg1(100, "Run program fgets stat=%d\n", stat1); + break; + } else { + stat1 = ferror(bpipe->rfd); + } + if (stat1 < 0) { + berrno be; + Dmsg2(100, "Run program fgets stat=%d ERR=%s\n", stat1, be.bstrerror()); + break; + } else if (stat1 != 0) { + Dmsg1(200, "Run program fgets stat=%d\n", stat1); + if (bpipe->timer_id && bpipe->timer_id->killed) { + Dmsg1(100, "Run program saw fgets killed=%d\n", bpipe->timer_id->killed); + break; + } + } + } + /* + * We always check whether the timer killed the program. We would see + * an eof even when it does so we just have to trust the killed flag + * and set the timer values to avoid edge cases where the program ends + * just as the timer kills it. + */ + if (bpipe->timer_id && bpipe->timer_id->killed) { + Dmsg1(100, "Run program fgets killed=%d\n", bpipe->timer_id->killed); + pm_strcpy(tmp, _("Program killed by Bacula (timeout)\n")); + stat1 = ETIME; + } + pm_strcpy(results, tmp); + Dmsg3(200, "resadr=0x%x reslen=%d res=%s\n", results, strlen(results), results); + stat2 = close_bpipe(bpipe); + stat1 = stat2 != 0 ? stat2 : stat1; + + Dmsg1(100, "Run program returning %d\n", stat1); +bail_out: + free_pool_memory(tmp); + free(buf); + return stat1; +} diff --git a/src/lib/bpipe.h b/src/lib/bpipe.h new file mode 100644 index 00000000..7d7786a2 --- /dev/null +++ b/src/lib/bpipe.h @@ -0,0 +1,32 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bi-directional pipe structure + */ + +class BPIPE { +public: + pid_t worker_pid; + time_t worker_stime; + int wait; + btimer_t *timer_id; + FILE *rfd; + FILE *wfd; + FILE *efd; +}; diff --git a/src/lib/breg.c b/src/lib/breg.c new file mode 100644 index 00000000..fbb71383 --- /dev/null +++ b/src/lib/breg.c @@ -0,0 +1,441 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Manipulation routines for BREGEXP list + * + * Eric Bollengier, March 2007 + * + */ + + +#include "bacula.h" + +#include "breg.h" +#include "mem_pool.h" + +BREGEXP *new_bregexp(const char *motif) +{ + Dmsg0(500, "bregexp: creating new bregexp object\n"); + BREGEXP *self = (BREGEXP *)bmalloc(sizeof(BREGEXP)); + memset(self, 0, sizeof(BREGEXP)); + + if (!self->extract_regexp(motif)) { + Dmsg0(100, "bregexp: extract_regexp error\n"); + free_bregexp(self); + return NULL; + } + + self->result = get_pool_memory(PM_FNAME); + self->result[0] = '\0'; + + return self; +} + +void free_bregexp(BREGEXP *self) +{ + Dmsg0(500, "bregexp: freeing BREGEXP object\n"); + + if (!self) { + return; + } + + if (self->expr) { + bfree(self->expr); + } + if (self->result) { + free_pool_memory(self->result); + } + regfree(&self->preg); + bfree(self); +} + +/* Free a bregexps alist + */ +void free_bregexps(alist *bregexps) +{ + BREGEXP *elt; + Dmsg0(500, "bregexp: freeing all BREGEXP object\n"); + if (!bregexps) { + return; + } + foreach_alist(elt, bregexps) { + free_bregexp(elt); + } +} + + +/* Apply all regexps to fname + */ +bool apply_bregexps(const char *fname, alist *bregexps, char **result) +{ + return apply_bregexps(fname, NULL, bregexps, result); +} + +/* Apply all regexps to fname + */ +bool apply_bregexps(const char *fname, struct stat *sp, alist *bregexps, char **result) +{ + BREGEXP *elt; + bool ok=false; + + char *ret = (char *) fname; + foreach_alist(elt, bregexps) { + ret = elt->replace(ret, sp); + ok = ok || elt->success; + } + Dmsg2(500, "bregexp: fname=%s ret=%s\n", fname, ret); + + *result = ret; + return ok; +} + +/* return an alist of BREGEXP or return NULL if it's not a + * where=!tmp!opt!ig,!temp!opt!i + */ +alist *get_bregexps(const char *where) +{ + char *p = (char *)where; + alist *list = New(alist(10, not_owned_by_alist)); + BREGEXP *reg; + + reg = new_bregexp(p); + + while(reg) { + p = reg->eor; + list->append(reg); + reg = new_bregexp(p); + } + + if (list->size()) { + return list; + } else { + delete list; + return NULL; + } +} + +bool BREGEXP::extract_regexp(const char *motif) +{ + if ( !motif ) { + return false; + } + + char sep = motif[0]; + + if (!(sep == '!' || + sep == ':' || + sep == ';' || + sep == '|' || + sep == ',' || + sep == '&' || + sep == '%' || + sep == '=' || + sep == '~' || + sep == '/' || + sep == '<' || + sep == '#' )) + { + return false; + } + + char *search = (char *) motif + 1; + int options = REG_EXTENDED | REG_NEWLINE; + bool ok = false; + + /* extract 1st part */ + char *dest = expr = bstrdup(motif); + + while (*search && !ok) { + if (search[0] == '\\' && search[1] == sep) { + *dest++ = *++search; /* we skip separator */ + + } else if (search[0] == '\\' && search[1] == '\\') { + *dest++ = *++search; /* we skip the second \ */ + + } else if (*search == sep) { /* we found end of expression */ + *dest++ = '\0'; + + if (subst) { /* already have found motif */ + ok = true; + + } else { + *dest++ = *++search; /* we skip separator */ + subst = dest; /* get replaced string */ + } + + } else { + *dest++ = *search++; + } + } + *dest = '\0'; /* in case of */ + + if (!ok || !subst) { + /* bad regexp */ + return false; + } + + ok = false; + /* find options */ + while (*search && !ok) { + if (*search == 'i') { + options |= REG_ICASE; + + } else if (*search == 'g') { + /* recherche multiple*/ + + } else if (*search == sep) { + /* skip separator */ + + } else { /* end of options */ + ok = true; + } + search++; + } + + int rc = regcomp(&preg, expr, options); + if (rc != 0) { + char prbuf[500]; + regerror(rc, &preg, prbuf, sizeof(prbuf)); + Dmsg1(100, "bregexp: compile error: %s\n", prbuf); + return false; + } + + eor = search; /* useful to find the next regexp in where */ + + return true; +} + +/* return regexp->result */ +char *BREGEXP::replace(const char *fname, struct stat *sp) +{ + success = false; /* use this.success to known if it's ok */ + int flen = strlen(fname); + int rc = regexec(&preg, fname, BREG_NREGS, regs, 0); + + if (rc == REG_NOMATCH) { + Dmsg0(500, "bregexp: regex mismatch\n"); + return return_fname(fname, flen); + } + + int len = compute_dest_len(fname, regs); + + if (len) { + result = check_pool_memory_size(result, len); + edit_subst(fname, sp, regs); + success = true; + Dmsg2(500, "bregexp: len = %i, result_len = %i\n", len, strlen(result)); + + } else { /* error in substitution */ + Dmsg0(100, "bregexp: error in substitution\n"); + return return_fname(fname, flen); + } + + return result; +} + +char *BREGEXP::return_fname(const char *fname, int len) +{ + result = check_pool_memory_size(result, len+1); + strcpy(result,fname); + return result; +} + +int BREGEXP::compute_dest_len(const char *fname, regmatch_t breg[]) +{ + int len=0; + char *p; + char *psubst = subst; + int no; + + if (!fname || !breg) { + return 0; + } + + /* match failed ? */ + if (breg[0].rm_so < 0) { + return 0; + } + + for (p = psubst++; *p ; p = psubst++) { + /* match a substitution with a struct stat field */ + if ((*p == '$') && (*psubst == 'm')) { + len += 50; /* Will add a integer */ + + /* match $1 \1 back references */ + } else if ((*p == '$' || *p == '\\') && ('0' <= *psubst && *psubst <= '9')) { + no = *psubst++ - '0'; + + /* we check if the back reference exists */ + /* references can not match if we are using (..)? */ + + if (breg[no].rm_so >= 0 && breg[no].rm_eo >= 0) { + len += breg[no].rm_eo - breg[no].rm_so; + } + + } else { + len++; + } + } + + /* $0 is replaced by subst */ + len -= breg[0].rm_eo - breg[0].rm_so; + len += strlen(fname) + 1; + + return len; +} + +char *BREGEXP::edit_subst(const char *fname, struct stat *sp, regmatch_t breg[]) +{ + int i; + char *p; + char ed[50]; + char *psubst = subst; + int no; + int len; + + /* il faut recopier fname dans dest + * on recopie le debut fname -> breg->start[0] + */ + + for (i = 0; i < breg[0].rm_so ; i++) { + result[i] = fname[i]; + } + + /* on recopie le motif de remplacement (avec tous les $x) */ + for (p = psubst++; *p ; p = psubst++) { + /* match specific % variables, coming from struct stat */ + if ((*p == '$' && *psubst == 'm')) { + edit_uint64(sp?sp->st_mtime : 0, ed); + len = strlen(ed); + bstrncpy(result + i, ed, len+1); + i+=len; + psubst++; /* Eat m */ + + /* match $1 \1 back references */ + } else if ((*p == '$' || *p == '\\') && ('0' <= *psubst && *psubst <= '9')) { + no = *psubst++ - '0'; + + /* have a back reference ? */ + if (breg[no].rm_so >= 0 && breg[no].rm_eo >= 0) { + len = breg[no].rm_eo - breg[no].rm_so; + bstrncpy(result + i, fname + breg[no].rm_so, len + 1); + i += len ; + } + + } else { + result[i++] = *p; + } + } + + /* we copy what is out of the match */ + strcpy(result + i, fname + breg[0].rm_eo); + + return result; +} + +/* escape sep char and \ + * dest must be long enough (src*2+1) + * return end of the string */ +char *bregexp_escape_string(char *dest, const char *src, const char sep) +{ + char *ret = dest; + while (*src) + { + if (*src == sep) { + *dest++ = '\\'; + } else if (*src == '\\') { + *dest++ = '\\'; + } + *dest++ = *src++; + } + *dest = '\0'; + + return ret; +} + +static const char regexp_sep = '!'; +static const char *str_strip_prefix = "!%s!!i"; +static const char *str_add_prefix = "!^!%s!"; +static const char *str_add_suffix = "!([^/])$!$1%s!"; + +int bregexp_get_build_where_size(char *strip_prefix, + char *add_prefix, + char *add_suffix) +{ + int str_size = ((strip_prefix?strlen(strip_prefix)+strlen(str_strip_prefix):0) + + (add_prefix?strlen(add_prefix)+strlen(str_add_prefix) :0) + + (add_suffix?strlen(add_suffix)+strlen(str_add_suffix) :0) ) + /* escape + 3*, + \0 */ + * 2 + 3 + 1; + + Dmsg1(200, "bregexp_get_build_where_size = %i\n", str_size); + return str_size; +} + +/* build a regexp string with user arguments + * Usage : + * + * int len = bregexp_get_build_where_size(a,b,c) ; + * char *dest = (char *) bmalloc (len * sizeof(char)); + * bregexp_build_where(dest, len, a, b, c); + * bfree(dest); + * + */ +char *bregexp_build_where(char *dest, int str_size, + char *strip_prefix, + char *add_prefix, + char *add_suffix) +{ + int len=0; + + POOLMEM *str_tmp = get_memory(str_size); + + *str_tmp = *dest = '\0'; + + if (strip_prefix) { + len += bsnprintf(dest, str_size - len, str_strip_prefix, + bregexp_escape_string(str_tmp, strip_prefix, regexp_sep)); + } + + if (add_suffix) { + if (len) dest[len++] = ','; + + len += bsnprintf(dest + len, str_size - len, str_add_suffix, + bregexp_escape_string(str_tmp, add_suffix, regexp_sep)); + } + + if (add_prefix) { + if (len) dest[len++] = ','; + + len += bsnprintf(dest + len, str_size - len, str_add_prefix, + bregexp_escape_string(str_tmp, add_prefix, regexp_sep)); + } + + free_pool_memory(str_tmp); + + return dest; +} + + +void BREGEXP::debug() +{ + printf("expr=[%s]\n", expr); + printf("subst=[%s]\n", subst); + printf("result=%s\n", NPRT(result)); +} diff --git a/src/lib/breg.h b/src/lib/breg.h new file mode 100644 index 00000000..e2d9da09 --- /dev/null +++ b/src/lib/breg.h @@ -0,0 +1,110 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula BREGEXP Structure definition for FileDaemon + * Eric Bollengier March 2007 + */ + + +#ifndef __BREG_H_ +#define __BREG_H_ 1 + +//#undef HAVE_REGEX_H + +#ifndef HAVE_REGEX_H +#include "bregex.h" +#else +#include +#endif + +/* Usage: + * + * #include "lib/breg.h" + * + * BREGEXP *breg = new_bregexp("!/prod!/test!"); + * char *filename = breg->replace("/prod/data.dat"); + * or + * char *filename = breg->result; + * free_bregexp(breg); + */ + +#define BREG_NREGS 11 + +/* + * Structure for BREGEXP ressource + */ +class BREGEXP { +public: + POOLMEM *result; /* match result */ + bool success; /* match is ok */ + + char *replace(const char *fname, struct stat *sp=NULL); /* return this.result */ + void debug(); + + /* private */ + POOLMEM *expr; /* search epression */ + POOLMEM *subst; /* substitution */ + regex_t preg; /* regex_t result of regcomp() */ + regmatch_t regs[BREG_NREGS]; /* contains match */ + char *eor; /* end of regexp in expr */ + + char *return_fname(const char *fname, int len); /* return fname as result */ + char *edit_subst(const char *fname, struct stat *sp, regmatch_t breg[]); + int compute_dest_len(const char *fname, regmatch_t breg[]); + bool extract_regexp(const char *motif); +}; + +/* create new BREGEXP and compile regex_t */ +BREGEXP *new_bregexp(const char *motif); + +/* launch each bregexp on filename */ +int run_bregexp(alist *bregexps, const char *fname); + +/* free BREGEXP (and all POOLMEM) */ +void free_bregexp(BREGEXP *script); + +/* fill an alist with BREGEXP from where */ +alist *get_bregexps(const char *where); + +/* apply every regexps from the alist */ +bool apply_bregexps(const char *fname, struct stat *sp, alist *bregexps, char **result); +bool apply_bregexps(const char *fname, alist *bregexps, char **result); + +/* foreach_alist free RUNSCRIPT */ +void free_bregexps(alist *bregexps); /* you have to free alist */ + +/* get regexp size */ +int bregexp_get_build_where_size(char *strip_prefix, + char *add_prefix, + char *add_suffix); + +/* get a bregexp string from user arguments + * you must allocate it with bregexp_get_build_where_size(); + */ +char *bregexp_build_where(char *dest, int str_size, + char *strip_prefix, + char *add_prefix, + char *add_suffix); + +/* escape a string to regexp format (sep and \) + * dest must be long enough (dest = 2*src + 1) + */ +char *bregexp_escape_string(char *dest, const char *src, const char sep); + +#endif /* __BREG_H_ */ diff --git a/src/lib/bregex.c b/src/lib/bregex.c new file mode 100644 index 00000000..9b9a7b5a --- /dev/null +++ b/src/lib/bregex.c @@ -0,0 +1,2024 @@ +/* regexpr.c + * + * Author: Tatu Ylonen + * + * Copyright (c) 1991 Tatu Ylonen, Espoo, Finland + * + * Permission to use, copy, modify, distribute, and sell this software + * and its documentation for any purpose is hereby granted without + * fee, provided that the above copyright notice appear in all copies. + * This software is provided "as is" without express or implied + * warranty. + * + * Created: Thu Sep 26 17:14:05 1991 ylo + * Last modified: Mon Nov 4 17:06:48 1991 ylo + * Ported to Think C: 19 Jan 1992 guido@cwi.nl + * + * This code draws many ideas from the regular expression packages by + * Henry Spencer of the University of Toronto and Richard Stallman of + * the Free Software Foundation. + * + * Emacs-specific code and syntax table code is almost directly borrowed + * from GNU regexp. + * + * Bugs fixed and lots of reorganization by Jeffrey C. Ollie, April + * 1997 Thanks for bug reports and ideas from Andrew Kuchling, Tim + * Peters, Guido van Rossum, Ka-Ping Yee, Sjoerd Mullender, and + * probably one or two others that I'm forgetting. + * + * This file modified to work with Bacula and C++ by + * Kern Sibbald, April 2006 + * + * This file modified to work with REG_ICASE and Bacula by + * Eric Bollengier April 2007 + */ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + + +#include "bacula.h" +#include "bregex.h" + +#define set_error(x) bufp->errmsg=((char *)(x)) +#define got_error bufp->errmsg!=NULL + +/* The original code blithely assumed that sizeof(short) == 2. Not + * always true. Original instances of "(short)x" were replaced by + * SHORT(x), where SHORT is #defined below. */ + +#define SHORT(x) ((x) & 0x8000 ? (x) - 0x10000 : (x)) + +/* The stack implementation is taken from an idea by Andrew Kuchling. + * It's a doubly linked list of arrays. The advantages of this over a + * simple linked list are that the number of mallocs required are + * reduced. It also makes it possible to statically allocate enough + * space so that small patterns don't ever need to call malloc. + * + * The advantages over a single array is that is periodically + * realloced when more space is needed is that we avoid ever copying + * the stack. */ + +/* item_t is the basic stack element. Defined as a union of + * structures so that both registers, failure points, and counters can + * be pushed/popped from the stack. There's nothing built into the + * item to keep track of whether a certain stack item is a register, a + * failure point, or a counter. */ + +typedef union item_t { + struct { + int num; + int level; + unsigned char *start; + unsigned char *end; + } reg; + struct { + int count; + int level; + int phantom; + unsigned char *code; + unsigned char *text; + } fail; + struct { + int num; + int level; + int count; + } cntr; +} item_t; + +#define B_STACK_PAGE_SIZE 256 +#define NUM_REGISTERS 256 + +/* A 'page' of stack items. */ + +typedef struct item_page_t { + item_t items[B_STACK_PAGE_SIZE]; + struct item_page_t *prev; + struct item_page_t *next; +} item_page_t; + + +typedef struct match_state { + /* The number of registers that have been pushed onto the stack + * since the last failure point. */ + + int count; + + /* Used to control when registers need to be pushed onto the + * stack. */ + + int level; + + /* The number of failure points on the stack. */ + + int point; + + /* Storage for the registers. Each register consists of two + * pointers to characters. So register N is represented as + * start[N] and end[N]. The pointers must be converted to + * offsets from the beginning of the string before returning the + * registers to the calling program. */ + + unsigned char *start[NUM_REGISTERS]; + unsigned char *end[NUM_REGISTERS]; + + /* Keeps track of whether a register has changed recently. */ + + int changed[NUM_REGISTERS]; + + /* Structure to encapsulate the stack. */ + struct { + /* index into the current page. If index == 0 and you need + * to pop an item, move to the previous page and set index + * = B_STACK_PAGE_SIZE - 1. Otherwise decrement index to + * push a page. If index == B_STACK_PAGE_SIZE and you need + * to push a page move to the next page and set index = + * 0. If there is no new next page, allocate a new page + * and link it in. Otherwise, increment index to push a + * page. */ + + int index; + item_page_t *current; /* Pointer to the current page. */ + item_page_t first; /* First page is statically allocated. */ + } stack; +} match_state; + +/* Initialize a state object */ + +/* #define NEW_STATE(state) \ */ +/* memset(&state, 0, (void *)(&state.stack) - (void *)(&state)); \ */ +/* state.stack.current = &state.stack.first; \ */ +/* state.stack.first.prev = NULL; \ */ +/* state.stack.first.next = NULL; \ */ +/* state.stack.index = 0; \ */ +/* state.level = 1 */ + +#define NEW_STATE(state, nregs) \ +{ \ + int i; \ + for (i = 0; i < nregs; i++) \ + { \ + state.start[i] = NULL; \ + state.end[i] = NULL; \ + state.changed[i] = 0; \ + } \ + state.stack.current = &state.stack.first; \ + state.stack.first.prev = NULL; \ + state.stack.first.next = NULL; \ + state.stack.index = 0; \ + state.level = 1; \ + state.count = 0; \ + state.level = 0; \ + state.point = 0; \ +} + +/* Free any memory that might have been malloc'd */ + +#define FREE_STATE(state) \ +while(state.stack.first.next != NULL) \ +{ \ + state.stack.current = state.stack.first.next; \ + state.stack.first.next = state.stack.current->next; \ + free(state.stack.current); \ +} + +/* Discard the top 'count' stack items. */ + +#define STACK_DISCARD(stack, count, on_error) \ +stack.index -= count; \ +while (stack.index < 0) \ +{ \ + if (stack.current->prev == NULL) \ + on_error; \ + stack.current = stack.current->prev; \ + stack.index += B_STACK_PAGE_SIZE; \ +} + +/* Store a pointer to the previous item on the stack. Used to pop an + * item off of the stack. */ + +#define STACK_PREV(stack, top, on_error) \ +if (stack.index == 0) \ +{ \ + if (stack.current->prev == NULL) \ + on_error; \ + stack.current = stack.current->prev; \ + stack.index = B_STACK_PAGE_SIZE - 1; \ +} \ +else \ +{ \ + stack.index--; \ +} \ +top = &(stack.current->items[stack.index]) + +/* Store a pointer to the next item on the stack. Used to push an item + * on to the stack. */ + +#define STACK_NEXT(stack, top, on_error) \ +if (stack.index == B_STACK_PAGE_SIZE) \ +{ \ + if (stack.current->next == NULL) \ + { \ + stack.current->next = (item_page_t *)malloc(sizeof(item_page_t)); \ + if (stack.current->next == NULL) \ + on_error; \ + stack.current->next->prev = stack.current; \ + stack.current->next->next = NULL; \ + } \ + stack.current = stack.current->next; \ + stack.index = 0; \ +} \ +top = &(stack.current->items[stack.index++]) + +/* Store a pointer to the item that is 'count' items back in the + * stack. STACK_BACK(stack, top, 1, on_error) is equivalent to + * STACK_TOP(stack, top, on_error). */ + +#define STACK_BACK(stack, top, count, on_error) \ +{ \ + int index; \ + item_page_t *current; \ + current = stack.current; \ + index = stack.index - (count); \ + while (index < 0) \ + { \ + if (current->prev == NULL) \ + on_error; \ + current = current->prev; \ + index += B_STACK_PAGE_SIZE; \ + } \ + top = &(current->items[index]); \ +} + +/* Store a pointer to the top item on the stack. Execute the + * 'on_error' code if there are no items on the stack. */ + +#define STACK_TOP(stack, top, on_error) \ +if (stack.index == 0) \ +{ \ + if (stack.current->prev == NULL) \ + on_error; \ + top = &(stack.current->prev->items[B_STACK_PAGE_SIZE - 1]); \ +} \ +else \ +{ \ + top = &(stack.current->items[stack.index - 1]); \ +} + +/* Test to see if the stack is empty */ + +#define STACK_EMPTY(stack) ((stack.index == 0) && \ + (stack.current->prev == NULL)) + +/* Return the start of register 'reg' */ + +#define GET_REG_START(state, reg) (state.start[reg]) + +/* Return the end of register 'reg' */ + +#define GET_REG_END(state, reg) (state.end[reg]) + +/* Set the start of register 'reg'. If the state of the register needs + * saving, push it on the stack. */ + +#define SET_REG_START(state, reg, text, on_error) \ +if(state.changed[reg] < state.level) \ +{ \ + item_t *item; \ + STACK_NEXT(state.stack, item, on_error); \ + item->reg.num = reg; \ + item->reg.start = state.start[reg]; \ + item->reg.end = state.end[reg]; \ + item->reg.level = state.changed[reg]; \ + state.changed[reg] = state.level; \ + state.count++; \ +} \ +state.start[reg] = text + +/* Set the end of register 'reg'. If the state of the register needs + * saving, push it on the stack. */ + +#define SET_REG_END(state, reg, text, on_error) \ +if(state.changed[reg] < state.level) \ +{ \ + item_t *item; \ + STACK_NEXT(state.stack, item, on_error); \ + item->reg.num = reg; \ + item->reg.start = state.start[reg]; \ + item->reg.end = state.end[reg]; \ + item->reg.level = state.changed[reg]; \ + state.changed[reg] = state.level; \ + state.count++; \ +} \ +state.end[reg] = text + +#define PUSH_FAILURE(state, xcode, xtext, on_error) \ +{ \ + item_t *item; \ + STACK_NEXT(state.stack, item, on_error); \ + item->fail.code = xcode; \ + item->fail.text = xtext; \ + item->fail.count = state.count; \ + item->fail.level = state.level; \ + item->fail.phantom = 0; \ + state.count = 0; \ + state.level++; \ + state.point++; \ +} + +/* Update the last failure point with a new position in the text. */ + +#define UPDATE_FAILURE(state, xtext, on_error) \ +{ \ + item_t *item; \ + STACK_BACK(state.stack, item, state.count + 1, on_error); \ + if (!item->fail.phantom) \ + { \ + item_t *item2; \ + STACK_NEXT(state.stack, item2, on_error); \ + item2->fail.code = item->fail.code; \ + item2->fail.text = xtext; \ + item2->fail.count = state.count; \ + item2->fail.level = state.level; \ + item2->fail.phantom = 1; \ + state.count = 0; \ + state.level++; \ + state.point++; \ + } \ + else \ + { \ + STACK_DISCARD(state.stack, state.count, on_error); \ + STACK_TOP(state.stack, item, on_error); \ + item->fail.text = xtext; \ + state.count = 0; \ + state.level++; \ + } \ +} + +#define POP_FAILURE(state, xcode, xtext, on_empty, on_error) \ +{ \ + item_t *item; \ + do \ + { \ + while(state.count > 0) \ + { \ + STACK_PREV(state.stack, item, on_error); \ + state.start[item->reg.num] = item->reg.start; \ + state.end[item->reg.num] = item->reg.end; \ + state.changed[item->reg.num] = item->reg.level; \ + state.count--; \ + } \ + STACK_PREV(state.stack, item, on_empty); \ + xcode = item->fail.code; \ + xtext = item->fail.text; \ + state.count = item->fail.count; \ + state.level = item->fail.level; \ + state.point--; \ + } \ + while (item->fail.text == NULL); \ +} + +enum regexp_compiled_ops { /* opcodes for compiled regexp */ + Cend, /* end of pattern reached */ + Cbol, /* beginning of line */ + Ceol, /* end of line */ + Cset, /* character set. Followed by 32 bytes of set. */ + Cexact, /* followed by a byte to match */ + Canychar, /* matches any character except newline */ + Cstart_memory, /* set register start addr (followed by reg number) */ + Cend_memory, /* set register end addr (followed by reg number) */ + Cmatch_memory, /* match a duplicate of reg contents (regnum follows) */ + Cjump, /* followed by two bytes (lsb,msb) of displacement. */ + Cstar_jump, /* will change to jump/update_failure_jump at runtime */ + Cfailure_jump, /* jump to addr on failure */ + Cupdate_failure_jump, /* update topmost failure point and jump */ + Cdummy_failure_jump, /* push a dummy failure point and jump */ + Cbegbuf, /* match at beginning of buffer */ + Cendbuf, /* match at end of buffer */ + Cwordbeg, /* match at beginning of word */ + Cwordend, /* match at end of word */ + Cwordbound, /* match if at word boundary */ + Cnotwordbound, /* match if not at word boundary */ + Csyntaxspec, /* matches syntax code (1 byte follows) */ + Cnotsyntaxspec, /* matches if syntax code does not match (1 byte follows) */ + Crepeat1 +}; + +enum regexp_syntax_op { /* syntax codes for plain and quoted characters */ + Rend, /* special code for end of regexp */ + Rnormal, /* normal character */ + Ranychar, /* any character except newline */ + Rquote, /* the quote character */ + Rbol, /* match beginning of line */ + Reol, /* match end of line */ + Roptional, /* match preceding expression optionally */ + Rstar, /* match preceding expr zero or more times */ + Rplus, /* match preceding expr one or more times */ + Ror, /* match either of alternatives */ + Ropenpar, /* opening parenthesis */ + Rclosepar, /* closing parenthesis */ + Rmemory, /* match memory register */ + Rextended_memory, /* \vnn to match registers 10-99 */ + Ropenset, /* open set. Internal syntax hard-coded below. */ + /* the following are gnu extensions to "normal" regexp syntax */ + Rbegbuf, /* beginning of buffer */ + Rendbuf, /* end of buffer */ + Rwordchar, /* word character */ + Rnotwordchar, /* not word character */ + Rwordbeg, /* beginning of word */ + Rwordend, /* end of word */ + Rwordbound, /* word bound */ + Rnotwordbound, /* not word bound */ + Rnum_ops +}; + +static int re_compile_initialized = 0; +static int regexp_syntax = RE_SYNTAX_EGREP; +int re_syntax = RE_SYNTAX_EGREP; /* Exported copy of regexp_syntax */ +static unsigned char plain_ops[256]; +static unsigned char quoted_ops[256]; +static unsigned char precedences[Rnum_ops]; +static int regexp_context_indep_ops; +static int regexp_ansi_sequences; + +#define NUM_LEVELS 5 /* number of precedence levels in use */ +#define MAX_NESTING 100 /* max nesting level of operators */ + +#define SYNTAX(ch) re_syntax_table[(unsigned char)(ch)] + +unsigned char re_syntax_table[256]; + +void re_compile_initialize(void) +{ + int a; + + static int syntax_table_inited = 0; + + if (!syntax_table_inited) { + syntax_table_inited = 1; + memset(re_syntax_table, 0, 256); + for (a = 'a'; a <= 'z'; a++) + re_syntax_table[a] = Sword; + for (a = 'A'; a <= 'Z'; a++) + re_syntax_table[a] = Sword; + for (a = '0'; a <= '9'; a++) + re_syntax_table[a] = Sword | Sdigit | Shexdigit; + for (a = '0'; a <= '7'; a++) + re_syntax_table[a] |= Soctaldigit; + for (a = 'A'; a <= 'F'; a++) + re_syntax_table[a] |= Shexdigit; + for (a = 'a'; a <= 'f'; a++) + re_syntax_table[a] |= Shexdigit; + re_syntax_table[(int)'_'] = Sword; + for (a = 9; a <= 13; a++) + re_syntax_table[a] = Swhitespace; + re_syntax_table[(int)' '] = Swhitespace; + } + re_compile_initialized = 1; + for (a = 0; a < 256; a++) { + plain_ops[a] = Rnormal; + quoted_ops[a] = Rnormal; + } + for (a = '0'; a <= '9'; a++) + quoted_ops[a] = Rmemory; + plain_ops[(int)'\134'] = Rquote; + if (regexp_syntax & RE_NO_BK_PARENS) { + plain_ops[(int)'('] = Ropenpar; + plain_ops[(int)')'] = Rclosepar; + } else { + quoted_ops[(int)'('] = Ropenpar; + quoted_ops[(int)')'] = Rclosepar; + } + if (regexp_syntax & RE_NO_BK_VBAR) { + plain_ops[(int)'\174'] = Ror; + } else { + quoted_ops[(int)'\174'] = Ror; + } + plain_ops[(int)'*'] = Rstar; + if (regexp_syntax & RE_BK_PLUS_QM) { + quoted_ops[(int)'+'] = Rplus; + quoted_ops[(int)'?'] = Roptional; + } else { + plain_ops[(int)'+'] = Rplus; + plain_ops[(int)'?'] = Roptional; + } + if (regexp_syntax & RE_NEWLINE_OR) { + plain_ops[(int)'\n'] = Ror; + } + plain_ops[(int)'\133'] = Ropenset; + plain_ops[(int)'\136'] = Rbol; + plain_ops[(int)'$'] = Reol; + plain_ops[(int)'.'] = Ranychar; + if (!(regexp_syntax & RE_NO_GNU_EXTENSIONS)) { + quoted_ops[(int)'w'] = Rwordchar; + quoted_ops[(int)'W'] = Rnotwordchar; + quoted_ops[(int)'<'] = Rwordbeg; + quoted_ops[(int)'>'] = Rwordend; + quoted_ops[(int)'b'] = Rwordbound; + quoted_ops[(int)'B'] = Rnotwordbound; + quoted_ops[(int)'`'] = Rbegbuf; + quoted_ops[(int)'\''] = Rendbuf; + } + if (regexp_syntax & RE_ANSI_HEX) { + quoted_ops[(int)'v'] = Rextended_memory; + } + for (a = 0; a < Rnum_ops; a++) { + precedences[a] = 4; + } + if (regexp_syntax & RE_TIGHT_VBAR) { + precedences[Ror] = 3; + precedences[Rbol] = 2; + precedences[Reol] = 2; + } else { + precedences[Ror] = 2; + precedences[Rbol] = 3; + precedences[Reol] = 3; + } + precedences[Rclosepar] = 1; + precedences[Rend] = 0; + regexp_context_indep_ops = (regexp_syntax & RE_CONTEXT_INDEP_OPS) != 0; + regexp_ansi_sequences = (regexp_syntax & RE_ANSI_HEX) != 0; +} + +int re_set_syntax(int syntax) { + int ret; + + ret = regexp_syntax; + regexp_syntax = syntax; + re_syntax = syntax; /* Exported copy */ + re_compile_initialize(); + return ret; +} + +static int hex_char_to_decimal(int ch) { + if (ch >= '0' && ch <= '9') + return ch - '0'; + if (ch >= 'a' && ch <= 'f') + return ch - 'a' + 10; + if (ch >= 'A' && ch <= 'F') + return ch - 'A' + 10; + return 16; +} + +static void re_compile_fastmap_aux(regex_t * bufp, unsigned char *code, int pos, + unsigned char *visited, + unsigned char *can_be_null, + unsigned char *fastmap) +{ + int a; + int b; + int syntaxcode; + + if (visited[pos]) + return; /* we have already been here */ + visited[pos] = 1; + for (;;) { + switch (code[pos++]) { + case Cend: + *can_be_null = 1; + return; + case Cbol: + case Cbegbuf: + case Cendbuf: + case Cwordbeg: + case Cwordend: + case Cwordbound: + case Cnotwordbound: + for (a = 0; a < 256; a++) + fastmap[a] = 1; + break; + case Csyntaxspec: + syntaxcode = code[pos++]; + for (a = 0; a < 256; a++) + if (SYNTAX(a) & syntaxcode) + fastmap[a] = 1; + return; + case Cnotsyntaxspec: + syntaxcode = code[pos++]; + for (a = 0; a < 256; a++) + if (!(SYNTAX(a) & syntaxcode)) + fastmap[a] = 1; + return; + case Ceol: + fastmap[(int)'\n'] = 1; + if (*can_be_null == 0) + *can_be_null = 2; /* can match null, but only at end of buffer */ + return; + case Cset: + for (a = 0; a < 256 / 8; a++) + if (code[pos + a] != 0) + for (b = 0; b < 8; b++) + if (code[pos + a] & (1 << b)) + fastmap[(a << 3) + b] = 1; + pos += 256 / 8; + return; + case Cexact: + fastmap[(unsigned char)code[pos]] = 1; + return; + case Canychar: + for (a = 0; a < 256; a++) + if (a != '\n') + fastmap[a] = 1; + return; + case Cstart_memory: + case Cend_memory: + pos++; + break; + case Cmatch_memory: + for (a = 0; a < 256; a++) + fastmap[a] = 1; + *can_be_null = 1; + return; + case Cjump: + case Cdummy_failure_jump: + case Cupdate_failure_jump: + case Cstar_jump: + a = (unsigned char)code[pos++]; + a |= (unsigned char)code[pos++] << 8; + pos += (int)SHORT(a); + if (visited[pos]) { + /* argh... the regexp contains empty loops. This is not + good, as this may cause a failure stack overflow when + matching. Oh well. */ + /* this path leads nowhere; pursue other paths. */ + return; + } + visited[pos] = 1; + break; + case Cfailure_jump: + a = (unsigned char)code[pos++]; + a |= (unsigned char)code[pos++] << 8; + a = pos + (int)SHORT(a); + re_compile_fastmap_aux(bufp, code, a, visited, can_be_null, fastmap); + break; + case Crepeat1: + pos += 2; + break; + default: + set_error("Unknown regex opcode: memory corrupted?"); + return; + } + } +} + +static int re_do_compile_fastmap(regex_t * bufp, unsigned char *buffer, int used, + int pos, unsigned char *can_be_null, + unsigned char *fastmap) +{ + unsigned char small_visited[512], *visited; + + if (used <= (int)sizeof(small_visited)) + visited = small_visited; + else { + visited = (unsigned char *)malloc(used); + if (!visited) + return 0; + } + *can_be_null = 0; + memset(fastmap, 0, 256); + memset(visited, 0, used); + re_compile_fastmap_aux(bufp, buffer, pos, visited, can_be_null, fastmap); + if (visited != small_visited) + free(visited); + return 1; +} + +void re_compile_fastmap(regex_t * bufp) +{ + if (!bufp->fastmap || bufp->fastmap_accurate) + return; +// assert(bufp->used > 0); + if (!re_do_compile_fastmap(bufp, bufp->buffer, + bufp->used, 0, &bufp->can_be_null, bufp->fastmap)) + return; + if (got_error) + return; + if (bufp->buffer[0] == Cbol) + bufp->anchor = 1; /* begline */ + else if (bufp->buffer[0] == Cbegbuf) + bufp->anchor = 2; /* begbuf */ + else + bufp->anchor = 0; /* none */ + bufp->fastmap_accurate = 1; +} + +/* + * star is coded as: + * 1: failure_jump 2 + * ... code for operand of star + * star_jump 1 + * 2: ... code after star + * + * We change the star_jump to update_failure_jump if we can determine + * that it is safe to do so; otherwise we change it to an ordinary + * jump. + * + * plus is coded as + * + * jump 2 + * 1: failure_jump 3 + * 2: ... code for operand of plus + * star_jump 1 + * 3: ... code after plus + * + * For star_jump considerations this is processed identically to star. + * + */ + +static int re_optimize_star_jump(regex_t * bufp, unsigned char *code) +{ + unsigned char map[256]; + unsigned char can_be_null; + unsigned char *p1; + unsigned char *p2; + unsigned char ch; + int a; + int b; + int num_instructions = 0; + + a = (unsigned char)*code++; + a |= (unsigned char)*code++ << 8; + a = (int)SHORT(a); + + p1 = code + a + 3; /* skip the failure_jump */ + /* Check that the jump is within the pattern */ + if (p1 < bufp->buffer || bufp->buffer + bufp->used < p1) { + set_error("Regex VM jump out of bounds (failure_jump opt)"); + return 0; + } +// assert(p1[-3] == Cfailure_jump); + p2 = code; + /* p1 points inside loop, p2 points to after loop */ + if (!re_do_compile_fastmap(bufp, bufp->buffer, bufp->used, + (int)(p2 - bufp->buffer), &can_be_null, map)) + goto make_normal_jump; + + /* If we might introduce a new update point inside the + * loop, we can't optimize because then update_jump would + * update a wrong failure point. Thus we have to be + * quite careful here. + */ + + /* loop until we find something that consumes a character */ + for (;;) { + num_instructions++; + switch (*p1++) { + case Cbol: + case Ceol: + case Cbegbuf: + case Cendbuf: + case Cwordbeg: + case Cwordend: + case Cwordbound: + case Cnotwordbound: + continue; + case Cstart_memory: + case Cend_memory: + p1++; + continue; + case Cexact: + ch = (unsigned char)*p1++; + if (map[(int)ch]) + goto make_normal_jump; + break; + case Canychar: + for (b = 0; b < 256; b++) + if (b != '\n' && map[b]) + goto make_normal_jump; + break; + case Cset: + for (b = 0; b < 256; b++) + if ((p1[b >> 3] & (1 << (b & 7))) && map[b]) + goto make_normal_jump; + p1 += 256 / 8; + break; + default: + goto make_normal_jump; + } + break; + } + /* now we know that we can't backtrack. */ + while (p1 != p2 - 3) { + num_instructions++; + switch (*p1++) { + case Cend: + return 0; + case Cbol: + case Ceol: + case Canychar: + case Cbegbuf: + case Cendbuf: + case Cwordbeg: + case Cwordend: + case Cwordbound: + case Cnotwordbound: + break; + case Cset: + p1 += 256 / 8; + break; + case Cexact: + case Cstart_memory: + case Cend_memory: + case Cmatch_memory: + case Csyntaxspec: + case Cnotsyntaxspec: + p1++; + break; + case Cjump: + case Cstar_jump: + case Cfailure_jump: + case Cupdate_failure_jump: + case Cdummy_failure_jump: + goto make_normal_jump; + default: + return 0; + } + } + + /* make_update_jump: */ + code -= 3; + a += 3; /* jump to after the Cfailure_jump */ + code[0] = Cupdate_failure_jump; + code[1] = a & 0xff; + code[2] = a >> 8; + if (num_instructions > 1) + return 1; +// assert(num_instructions == 1); + /* if the only instruction matches a single character, we can do + * better */ + p1 = code + 3 + a; /* start of sole instruction */ + if (*p1 == Cset || *p1 == Cexact || *p1 == Canychar || + *p1 == Csyntaxspec || *p1 == Cnotsyntaxspec) + code[0] = Crepeat1; + return 1; + + make_normal_jump: + code -= 3; + *code = Cjump; + return 1; +} + +static int re_optimize(regex_t * bufp) +{ + unsigned char *code; + + code = bufp->buffer; + + while (1) { + switch (*code++) { + case Cend: + return 1; + case Canychar: + case Cbol: + case Ceol: + case Cbegbuf: + case Cendbuf: + case Cwordbeg: + case Cwordend: + case Cwordbound: + case Cnotwordbound: + break; + case Cset: + code += 256 / 8; + break; + case Cexact: + case Cstart_memory: + case Cend_memory: + case Cmatch_memory: + case Csyntaxspec: + case Cnotsyntaxspec: + code++; + break; + case Cstar_jump: + if (!re_optimize_star_jump(bufp, code)) { + return 0; + } + /* fall through */ + case Cupdate_failure_jump: + case Cjump: + case Cdummy_failure_jump: + case Cfailure_jump: + case Crepeat1: + code += 2; + break; + default: + return 0; + } + } +} + +#define NEXTCHAR(var) \ +{ \ + if (pos >= size) \ + goto ends_prematurely; \ + (var) = regex[pos]; \ + pos++; \ +} + +#define ALLOC(amount) \ +{ \ + if (pattern_offset+(amount) > alloc) \ + { \ + alloc += 256 + (amount); \ + pattern = (unsigned char *)realloc(pattern, alloc); \ + if (!pattern) \ + goto out_of_memory; \ + } \ +} + +#define STORE(ch) pattern[pattern_offset++] = (ch) + +#define CURRENT_LEVEL_START (starts[starts_base + current_level]) + +#define SET_LEVEL_START starts[starts_base + current_level] = pattern_offset + +#define PUSH_LEVEL_STARTS \ +if (starts_base < (MAX_NESTING-1)*NUM_LEVELS) \ + starts_base += NUM_LEVELS; \ +else \ + goto too_complex \ + +#define POP_LEVEL_STARTS starts_base -= NUM_LEVELS + +#define PUT_ADDR(offset,addr) \ +{ \ + int disp = (addr) - (offset) - 2; \ + pattern[(offset)] = disp & 0xff; \ + pattern[(offset)+1] = (disp>>8) & 0xff; \ +} + +#define INSERT_JUMP(pos,type,addr) \ +{ \ + int a, p = (pos), t = (type), ad = (addr); \ + for (a = pattern_offset - 1; a >= p; a--) \ + pattern[a + 3] = pattern[a]; \ + pattern[p] = t; \ + PUT_ADDR(p+1,ad); \ + pattern_offset += 3; \ +} + +#define SETBIT(buf,offset,bit) (buf)[(offset)+(bit)/8] |= (1<<((bit) & 7)) + +#define SET_FIELDS \ +{ \ + bufp->allocated = alloc; \ + bufp->buffer = pattern; \ + bufp->used = pattern_offset; \ +} + +#define GETHEX(var) \ +{ \ + unsigned char gethex_ch, gethex_value; \ + NEXTCHAR(gethex_ch); \ + gethex_value = hex_char_to_decimal(gethex_ch); \ + if (gethex_value == 16) \ + goto hex_error; \ + NEXTCHAR(gethex_ch); \ + gethex_ch = hex_char_to_decimal(gethex_ch); \ + if (gethex_ch == 16) \ + goto hex_error; \ + (var) = gethex_value * 16 + gethex_ch; \ +} + +#define ANSI_TRANSLATE(ch) \ +{ \ + switch (ch) \ + { \ + case 'a': \ + case 'A': \ + { \ + ch = 7; /* audible bell */ \ + break; \ + } \ + case 'b': \ + case 'B': \ + { \ + ch = 8; /* backspace */ \ + break; \ + } \ + case 'f': \ + case 'F': \ + { \ + ch = 12; /* form feed */ \ + break; \ + } \ + case 'n': \ + case 'N': \ + { \ + ch = 10; /* line feed */ \ + break; \ + } \ + case 'r': \ + case 'R': \ + { \ + ch = 13; /* carriage return */ \ + break; \ + } \ + case 't': \ + case 'T': \ + { \ + ch = 9; /* tab */ \ + break; \ + } \ + case 'v': \ + case 'V': \ + { \ + ch = 11; /* vertical tab */ \ + break; \ + } \ + case 'x': /* hex code */ \ + case 'X': \ + { \ + GETHEX(ch); \ + break; \ + } \ + default: \ + { \ + /* other characters passed through */ \ + if (translate) \ + ch = translate[(unsigned char)ch]; \ + break; \ + } \ + } \ +} + +const char *re_compile_pattern(regex_t * bufp, unsigned char *regex) +{ + int a; + int pos; + int op; + int current_level; + int level; + int opcode; + int pattern_offset = 0, alloc; + int starts[NUM_LEVELS * MAX_NESTING]; + int starts_base; + int future_jumps[MAX_NESTING]; + int num_jumps; + unsigned char ch = '\0'; + unsigned char *pattern; + unsigned char *translate; + int next_register; + int paren_depth; + int num_open_registers; + int open_registers[RE_NREGS]; + int beginning_context; + int size = strlen((char *)regex); + + if (!re_compile_initialized) + re_compile_initialize(); + bufp->used = 0; + bufp->fastmap_accurate = 0; + bufp->uses_registers = 1; + bufp->num_registers = 1; + translate = bufp->translate; + pattern = bufp->buffer; + alloc = bufp->allocated; + if (alloc == 0 || pattern == NULL) { + alloc = 256; + bufp->buffer = pattern = (unsigned char *)malloc(alloc); + if (!pattern) + goto out_of_memory; + } + pattern_offset = 0; + starts_base = 0; + num_jumps = 0; + current_level = 0; + SET_LEVEL_START; + num_open_registers = 0; + next_register = 1; + paren_depth = 0; + beginning_context = 1; + op = -1; + /* we use Rend dummy to ensure that pending jumps are updated + (due to low priority of Rend) before exiting the loop. */ + pos = 0; + while (op != Rend) { + if (pos >= size) + op = Rend; + else { + NEXTCHAR(ch); + if (translate) + ch = translate[(unsigned char)ch]; + op = plain_ops[(unsigned char)ch]; + if (op == Rquote) { + NEXTCHAR(ch); + op = quoted_ops[(unsigned char)ch]; + if (op == Rnormal && regexp_ansi_sequences) + ANSI_TRANSLATE(ch); + } + } + level = precedences[op]; + /* printf("ch='%c' op=%d level=%d current_level=%d + curlevstart=%d\n", ch, op, level, current_level, + CURRENT_LEVEL_START); */ + if (level > current_level) { + for (current_level++; current_level < level; current_level++) + SET_LEVEL_START; + SET_LEVEL_START; + } else if (level < current_level) { + current_level = level; + for (; num_jumps > 0 && + future_jumps[num_jumps - 1] >= CURRENT_LEVEL_START; num_jumps--) + PUT_ADDR(future_jumps[num_jumps - 1], pattern_offset); + } + switch (op) { + case Rend: + break; + case Rnormal: + normal_char: + opcode = Cexact; + store_opcode_and_arg: /* opcode & ch must be set */ + SET_LEVEL_START; + ALLOC(2); + STORE(opcode); + STORE(ch); + break; + case Ranychar: + opcode = Canychar; + store_opcode: + SET_LEVEL_START; + ALLOC(1); + STORE(opcode); + break; + case Rquote: + set_error("Rquote"); + /*NOTREACHED*/ case Rbol: + if (!beginning_context) { + if (regexp_context_indep_ops) + goto op_error; + else + goto normal_char; + } + opcode = Cbol; + goto store_opcode; + case Reol: + if (!((pos >= size) || + ((regexp_syntax & RE_NO_BK_VBAR) ? + (regex[pos] == '\174') : + (pos + 1 < size && regex[pos] == '\134' && + regex[pos + 1] == '\174')) || + ((regexp_syntax & RE_NO_BK_PARENS) ? + (regex[pos] == ')') : + (pos + 1 < size && regex[pos] == '\134' && + regex[pos + 1] == ')')))) { + if (regexp_context_indep_ops) + goto op_error; + else + goto normal_char; + } + opcode = Ceol; + goto store_opcode; + /* NOTREACHED */ + break; + case Roptional: + if (beginning_context) { + if (regexp_context_indep_ops) + goto op_error; + else + goto normal_char; + } + if (CURRENT_LEVEL_START == pattern_offset) + break; /* ignore empty patterns for ? */ + ALLOC(3); + INSERT_JUMP(CURRENT_LEVEL_START, Cfailure_jump, pattern_offset + 3); + break; + case Rstar: + case Rplus: + if (beginning_context) { + if (regexp_context_indep_ops) + goto op_error; + else + goto normal_char; + } + if (CURRENT_LEVEL_START == pattern_offset) + break; /* ignore empty patterns for + and * */ + ALLOC(9); + INSERT_JUMP(CURRENT_LEVEL_START, Cfailure_jump, pattern_offset + 6); + INSERT_JUMP(pattern_offset, Cstar_jump, CURRENT_LEVEL_START); + if (op == Rplus) /* jump over initial failure_jump */ + INSERT_JUMP(CURRENT_LEVEL_START, Cdummy_failure_jump, + CURRENT_LEVEL_START + 6); + break; + case Ror: + ALLOC(6); + INSERT_JUMP(CURRENT_LEVEL_START, Cfailure_jump, pattern_offset + 6); + if (num_jumps >= MAX_NESTING) + goto too_complex; + STORE(Cjump); + future_jumps[num_jumps++] = pattern_offset; + STORE(0); + STORE(0); + SET_LEVEL_START; + break; + case Ropenpar: + { + SET_LEVEL_START; + if (next_register < RE_NREGS) { + bufp->uses_registers = 1; + ALLOC(2); + STORE(Cstart_memory); + STORE(next_register); + open_registers[num_open_registers++] = next_register; + bufp->num_registers++; + next_register++; + } + paren_depth++; + PUSH_LEVEL_STARTS; + current_level = 0; + SET_LEVEL_START; + break; + } + case Rclosepar: + if (paren_depth <= 0) + goto parenthesis_error; + POP_LEVEL_STARTS; + current_level = precedences[Ropenpar]; + paren_depth--; + if (paren_depth < num_open_registers) { + bufp->uses_registers = 1; + ALLOC(2); + STORE(Cend_memory); + num_open_registers--; + STORE(open_registers[num_open_registers]); + } + break; + case Rmemory: + if (ch == '0') + goto bad_match_register; + if (!(ch >= '0' && ch <= '9')) { + goto bad_match_register; + } + bufp->uses_registers = 1; + opcode = Cmatch_memory; + ch -= '0'; + goto store_opcode_and_arg; + case Rextended_memory: + NEXTCHAR(ch); + if (ch < '0' || ch > '9') + goto bad_match_register; + NEXTCHAR(a); + if (a < '0' || a > '9') + goto bad_match_register; + ch = 10 * (a - '0') + ch - '0'; + if (ch == 0 || ch >= RE_NREGS) + goto bad_match_register; + bufp->uses_registers = 1; + opcode = Cmatch_memory; + goto store_opcode_and_arg; + case Ropenset: + { + int complement; + int prev; + int offset; + int range; + int firstchar; + + SET_LEVEL_START; + ALLOC(1 + 256 / 8); + STORE(Cset); + offset = pattern_offset; + for (a = 0; a < 256 / 8; a++) + STORE(0); + NEXTCHAR(ch); + if (translate) + ch = translate[(unsigned char)ch]; + if (ch == '\136') { + complement = 1; + NEXTCHAR(ch); + if (translate) + ch = translate[(unsigned char)ch]; + } else + complement = 0; + prev = -1; + range = 0; + firstchar = 1; + while (ch != '\135' || firstchar) { + firstchar = 0; + if (regexp_ansi_sequences && ch == '\134') { + NEXTCHAR(ch); + ANSI_TRANSLATE(ch); + } + if (range) { + for (a = prev; a <= (int)ch; a++) + SETBIT(pattern, offset, a); + prev = -1; + range = 0; + } else if (prev != -1 && ch == '-') + range = 1; + else { + SETBIT(pattern, offset, ch); + prev = ch; + } + NEXTCHAR(ch); + if (translate) + ch = translate[(unsigned char)ch]; + } + if (range) + SETBIT(pattern, offset, '-'); + if (complement) { + for (a = 0; a < 256 / 8; a++) + pattern[offset + a] ^= 0xff; + } + break; + } + case Rbegbuf: + { + opcode = Cbegbuf; + goto store_opcode; + } + case Rendbuf: + { + opcode = Cendbuf; + goto store_opcode; + } + case Rwordchar: + { + opcode = Csyntaxspec; + ch = Sword; + goto store_opcode_and_arg; + } + case Rnotwordchar: + { + opcode = Cnotsyntaxspec; + ch = Sword; + goto store_opcode_and_arg; + } + case Rwordbeg: + { + opcode = Cwordbeg; + goto store_opcode; + } + case Rwordend: + { + opcode = Cwordend; + goto store_opcode; + } + case Rwordbound: + { + opcode = Cwordbound; + goto store_opcode; + } + case Rnotwordbound: + { + opcode = Cnotwordbound; + goto store_opcode; + } + default: + { + abort(); + } + } + beginning_context = (op == Ropenpar || op == Ror); + } + if (starts_base != 0) + goto parenthesis_error; +// assert(num_jumps == 0); + ALLOC(1); + STORE(Cend); + SET_FIELDS; + if (!re_optimize(bufp)) + return "Optimization error"; + return NULL; + + op_error: + SET_FIELDS; + return "Badly placed special character"; + + bad_match_register: + SET_FIELDS; + return "Bad match register number"; + + hex_error: + SET_FIELDS; + return "Bad hexadecimal number"; + + parenthesis_error: + SET_FIELDS; + return "Badly placed parenthesis"; + + out_of_memory: + SET_FIELDS; + return "Out of memory"; + + ends_prematurely: + SET_FIELDS; + return "Regular expression ends prematurely"; + + too_complex: + SET_FIELDS; + return "Regular expression too complex"; +} + +#undef CHARAT +#undef NEXTCHAR +#undef GETHEX +#undef ALLOC +#undef STORE +#undef CURRENT_LEVEL_START +#undef SET_LEVEL_START +#undef PUSH_LEVEL_STARTS +#undef POP_LEVEL_STARTS +#undef PUT_ADDR +#undef INSERT_JUMP +#undef SETBIT +#undef SET_FIELDS + +#define PREFETCH if (text == textend) goto fail + +#define NEXTCHAR(var) \ +PREFETCH; \ +var = (unsigned char)*text++; \ +if (translate) \ + var = translate[var] + + +int regcomp(regex_t * bufp, const char *regex, int cflags) +{ + memset(bufp, 0, sizeof(regex_t)); + bufp->cflags = cflags; + if (bufp->cflags & REG_ICASE) { + char *p, *lcase = bstrdup(regex); + for( p = lcase; *p ; p++) { + *p = tolower(*p); + } + re_compile_pattern(bufp, (unsigned char *)lcase); + bfree(lcase); + } else { + re_compile_pattern(bufp, (unsigned char *)regex); + } + if (got_error) { + return -1; + } + return 0; +} + +void re_registers_to_regmatch(regexp_registers_t old_regs, + regmatch_t pmatch[], + size_t nmatch) +{ + size_t i=0; + + /* We have to set the last entry to -1 */ + nmatch = nmatch - 1; + for (i=0; (i < nmatch) && (old_regs->start[i] > -1) ; i++) { + pmatch[i].rm_so = old_regs->start[i]; + pmatch[i].rm_eo = old_regs->end[i]; + } + + pmatch[i].rm_eo = pmatch[i].rm_so = -1; +} + +int regexec(regex_t * preg, const char *string, size_t nmatch, + regmatch_t pmatch[], int eflags) +{ + int stat; + int len = strlen(string); + struct re_registers regs; + stat = re_search(preg, (unsigned char *)string, len, 0, len, ®s); + if (stat >= 0 && nmatch > 0) { + re_registers_to_regmatch(®s, pmatch, nmatch); + } + /* stat is the start position in the string base 0 where + * the pattern was found or negative if not found. + */ + return stat < 0 ? -1 : 0; +} + +size_t regerror(int errcode, regex_t * preg, char *errbuf, size_t errbuf_size) +{ + bstrncpy(errbuf, preg->errmsg, errbuf_size); + return 0; +} + +void regfree(regex_t * preg) +{ + if (preg->lcase) { + free_pool_memory(preg->lcase); + preg->lcase = NULL; + } + if (preg->buffer) { + free(preg->buffer); + preg->buffer = NULL; + } +} + +int re_match(regex_t * bufp, unsigned char *string, int size, int pos, + regexp_registers_t old_regs) +{ + unsigned char *code; + unsigned char *translate; + unsigned char *text; + unsigned char *textstart; + unsigned char *textend; + int a; + int b; + int ch; + int reg; + int match_end; + unsigned char *regstart; + unsigned char *regend; + int regsize; + match_state state; + +// assert(pos >= 0 && size >= 0); +// assert(pos <= size); + + text = string + pos; + textstart = string; + textend = string + size; + + code = bufp->buffer; + + translate = bufp->translate; + + NEW_STATE(state, bufp->num_registers); + + continue_matching: + switch (*code++) { + case Cend: + { + match_end = text - textstart; + if (old_regs) { + old_regs->start[0] = pos; + old_regs->end[0] = match_end; + if (!bufp->uses_registers) { + for (a = 1; a < RE_NREGS; a++) { + old_regs->start[a] = -1; + old_regs->end[a] = -1; + } + } else { + for (a = 1; a < bufp->num_registers; a++) { + if ((GET_REG_START(state, a) == NULL) || + (GET_REG_END(state, a) == NULL)) { + old_regs->start[a] = -1; + old_regs->end[a] = -1; + continue; + } + old_regs->start[a] = GET_REG_START(state, a) - textstart; + old_regs->end[a] = GET_REG_END(state, a) - textstart; + } + for (; a < RE_NREGS; a++) { + old_regs->start[a] = -1; + old_regs->end[a] = -1; + } + } + } + FREE_STATE(state); + return match_end - pos; + } + case Cbol: + { + if (text == textstart || text[-1] == '\n') + goto continue_matching; + goto fail; + } + case Ceol: + { + if (text == textend || *text == '\n') + goto continue_matching; + goto fail; + } + case Cset: + { + NEXTCHAR(ch); + if (code[ch / 8] & (1 << (ch & 7))) { + code += 256 / 8; + goto continue_matching; + } + goto fail; + } + case Cexact: + { + NEXTCHAR(ch); + if (ch != (unsigned char)*code++) + goto fail; + goto continue_matching; + } + case Canychar: + { + NEXTCHAR(ch); + if (ch == '\n') + goto fail; + goto continue_matching; + } + case Cstart_memory: + { + reg = *code++; + SET_REG_START(state, reg, text, goto error); + goto continue_matching; + } + case Cend_memory: + { + reg = *code++; + SET_REG_END(state, reg, text, goto error); + goto continue_matching; + } + case Cmatch_memory: + { + reg = *code++; + regstart = GET_REG_START(state, reg); + regend = GET_REG_END(state, reg); + if ((regstart == NULL) || (regend == NULL)) + goto fail; /* or should we just match nothing? */ + regsize = regend - regstart; + + if (regsize > (textend - text)) + goto fail; + if (translate) { + for (; regstart < regend; regstart++, text++) + if (translate[*regstart] != translate[*text]) + goto fail; + } else + for (; regstart < regend; regstart++, text++) + if (*regstart != *text) + goto fail; + goto continue_matching; + } + case Cupdate_failure_jump: + { + UPDATE_FAILURE(state, text, goto error); + /* fall to next case */ + } + /* treat Cstar_jump just like Cjump if it hasn't been optimized */ + case Cstar_jump: + case Cjump: + { + a = (unsigned char)*code++; + a |= (unsigned char)*code++ << 8; + code += (int)SHORT(a); + if (code < bufp->buffer || bufp->buffer + bufp->used < code) { + set_error("Regex VM jump out of bounds (Cjump)"); + FREE_STATE(state); + return -2; + } + goto continue_matching; + } + case Cdummy_failure_jump: + { + unsigned char *failuredest; + + a = (unsigned char)*code++; + a |= (unsigned char)*code++ << 8; + a = (int)SHORT(a); +// assert(*code == Cfailure_jump); + b = (unsigned char)code[1]; + b |= (unsigned char)code[2] << 8; + failuredest = code + (int)SHORT(b) + 3; + if (failuredest < bufp->buffer || bufp->buffer + bufp->used < failuredest) { + set_error + ("Regex VM jump out of bounds (Cdummy_failure_jump failuredest)"); + FREE_STATE(state); + return -2; + } + PUSH_FAILURE(state, failuredest, NULL, goto error); + code += a; + if (code < bufp->buffer || bufp->buffer + bufp->used < code) { + set_error("Regex VM jump out of bounds (Cdummy_failure_jump code)"); + FREE_STATE(state); + return -2; + } + goto continue_matching; + } + case Cfailure_jump: + { + a = (unsigned char)*code++; + a |= (unsigned char)*code++ << 8; + a = (int)SHORT(a); + if (code + a < bufp->buffer || bufp->buffer + bufp->used < code + a) { + set_error("Regex VM jump out of bounds (Cfailure_jump)"); + FREE_STATE(state); + return -2; + } + PUSH_FAILURE(state, code + a, text, goto error); + goto continue_matching; + } + case Crepeat1: + { + unsigned char *pinst; + a = (unsigned char)*code++; + a |= (unsigned char)*code++ << 8; + a = (int)SHORT(a); + pinst = code + a; + if (pinst < bufp->buffer || bufp->buffer + bufp->used < pinst) { + set_error("Regex VM jump out of bounds (Crepeat1)"); + FREE_STATE(state); + return -2; + } + /* pinst is sole instruction in loop, and it matches a + * single character. Since Crepeat1 was originally a + * Cupdate_failure_jump, we also know that backtracking + * is useless: so long as the single-character + * expression matches, it must be used. Also, in the + * case of +, we've already matched one character, so + + * can't fail: nothing here can cause a failure. */ + switch (*pinst++) { + case Cset: + { + if (translate) { + while (text < textend) { + ch = translate[(unsigned char)*text]; + if (pinst[ch / 8] & (1 << (ch & 7))) + text++; + else + break; + } + } else { + while (text < textend) { + ch = (unsigned char)*text; + if (pinst[ch / 8] & (1 << (ch & 7))) + text++; + else + break; + } + } + break; + } + case Cexact: + { + ch = (unsigned char)*pinst; + if (translate) { + while (text < textend && translate[(unsigned char)*text] == ch) + text++; + } else { + while (text < textend && (unsigned char)*text == ch) + text++; + } + break; + } + case Canychar: + { + while (text < textend && (unsigned char)*text != '\n') + text++; + break; + } + case Csyntaxspec: + { + a = (unsigned char)*pinst; + if (translate) { + while (text < textend && (SYNTAX(translate[*text]) & a)) + text++; + } else { + while (text < textend && (SYNTAX(*text) & a)) + text++; + } + break; + } + case Cnotsyntaxspec: + { + a = (unsigned char)*pinst; + if (translate) { + while (text < textend && !(SYNTAX(translate[*text]) & a)) + text++; + } else { + while (text < textend && !(SYNTAX(*text) & a)) + text++; + } + break; + } + default: + { + FREE_STATE(state); + set_error("Unknown regex opcode: memory corrupted?"); + return -2; + /*NOTREACHED*/} + } + /* due to the funky way + and * are compiled, the top + * failure- stack entry at this point is actually a + * success entry -- update it & pop it */ + UPDATE_FAILURE(state, text, goto error); + goto fail; /* i.e., succeed */ + } + case Cbegbuf: + { + if (text == textstart) + goto continue_matching; + goto fail; + } + case Cendbuf: + { + if (text == textend) + goto continue_matching; + goto fail; + } + case Cwordbeg: + { + if (text == textend) + goto fail; + if (!(SYNTAX(*text) & Sword)) + goto fail; + if (text == textstart) + goto continue_matching; + if (!(SYNTAX(text[-1]) & Sword)) + goto continue_matching; + goto fail; + } + case Cwordend: + { + if (text == textstart) + goto fail; + if (!(SYNTAX(text[-1]) & Sword)) + goto fail; + if (text == textend) + goto continue_matching; + if (!(SYNTAX(*text) & Sword)) + goto continue_matching; + goto fail; + } + case Cwordbound: + { + /* Note: as in gnu regexp, this also matches at the + * beginning and end of buffer. */ + + if (text == textstart || text == textend) + goto continue_matching; + if ((SYNTAX(text[-1]) & Sword) ^ (SYNTAX(*text) & Sword)) + goto continue_matching; + goto fail; + } + case Cnotwordbound: + { + /* Note: as in gnu regexp, this never matches at the + * beginning and end of buffer. */ + if (text == textstart || text == textend) + goto fail; + if (!((SYNTAX(text[-1]) & Sword) ^ (SYNTAX(*text) & Sword))) + goto continue_matching; + goto fail; + } + case Csyntaxspec: + { + NEXTCHAR(ch); + if (!(SYNTAX(ch) & (unsigned char)*code++)) + goto fail; + goto continue_matching; + } + case Cnotsyntaxspec: + { + NEXTCHAR(ch); + if (SYNTAX(ch) & (unsigned char)*code++) + goto fail; + goto continue_matching; + } + default: + { + FREE_STATE(state); + set_error("Unknown regex opcode: memory corrupted?"); + return -2; + /*NOTREACHED*/} + } + + + +#if 0 /* This line is never reached --Guido */ + abort(); +#endif + /* + *NOTREACHED + */ + + /* Using "break;" in the above switch statement is equivalent to "goto fail;" */ + fail: + POP_FAILURE(state, code, text, goto done_matching, goto error); + goto continue_matching; + + done_matching: +/* if(translated != NULL) */ +/* free(translated); */ + FREE_STATE(state); + return -1; + + error: +/* if (translated != NULL) */ +/* free(translated); */ + FREE_STATE(state); + return -2; +} + + +#undef PREFETCH +#undef NEXTCHAR + +int re_search(regex_t * bufp, unsigned char *str, int size, int pos, + int range, regexp_registers_t regs) +{ + unsigned char *fastmap; + unsigned char *translate; + unsigned char *text; + unsigned char *partstart; + unsigned char *partend; + int dir; + int ret; + unsigned char anchor; + unsigned char *string = str; + + if (bufp->cflags & REG_ICASE) { /* we must use string in lowercase */ + int len = strlen((const char *)str); + if (!bufp->lcase) { + bufp->lcase = get_pool_memory(PM_FNAME); + } + bufp->lcase = check_pool_memory_size(bufp->lcase, len+1); + unsigned char *dst = (unsigned char *)bufp->lcase; + while (*string) { + *dst++ = tolower(*string++); + } + *dst = '\0'; + string = (unsigned char *)bufp->lcase; + } + +// assert(size >= 0 && pos >= 0); +// assert(pos + range >= 0 && pos + range <= size); /* Bugfix by ylo */ + + fastmap = bufp->fastmap; + translate = bufp->translate; + if (fastmap && !bufp->fastmap_accurate) { + re_compile_fastmap(bufp); + if (got_error) + return -2; + } + + anchor = bufp->anchor; + if (bufp->can_be_null == 1) /* can_be_null == 2: can match null at eob */ + fastmap = NULL; + + if (range < 0) { + dir = -1; + range = -range; + } else + dir = 1; + + if (anchor == 2) { + if (pos != 0) + return -1; + else + range = 0; + } + + for (; range >= 0; range--, pos += dir) { + if (fastmap) { + if (dir == 1) { /* searching forwards */ + + text = string + pos; + partend = string + size; + partstart = text; + if (translate) + while (text != partend && + !fastmap[(unsigned char)translate[(unsigned char)*text]]) + text++; + else + while (text != partend && !fastmap[(unsigned char)*text]) + text++; + pos += text - partstart; + range -= text - partstart; + if (pos == size && bufp->can_be_null == 0) + return -1; + } else { /* searching backwards */ + text = string + pos; + partstart = string + pos - range; + partend = text; + if (translate) + while (text != partstart && !fastmap[(unsigned char) + translate[(unsigned char)*text]]) + text--; + else + while (text != partstart && !fastmap[(unsigned char)*text]) + text--; + pos -= partend - text; + range -= partend - text; + } + } + if (anchor == 1) { /* anchored to begline */ + if (pos > 0 && (string[pos - 1] != '\n')) + continue; + } +// assert(pos >= 0 && pos <= size); + ret = re_match(bufp, string, size, pos, regs); + if (ret >= 0) + return pos; + if (ret == -2) + return -2; + } + return -1; +} + +/* +** Local Variables: +** mode: c +** c-file-style: "python" +** End: +*/ diff --git a/src/lib/bregex.h b/src/lib/bregex.h new file mode 100644 index 00000000..654e569e --- /dev/null +++ b/src/lib/bregex.h @@ -0,0 +1,196 @@ + +#ifndef __b_REGEXPR_H__ +#define __b_REGEXPR_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * regexpr.h + * + * Author: Tatu Ylonen + * + * Copyright (c) 1991 Tatu Ylonen, Espoo, Finland + * + * Permission to use, copy, modify, distribute, and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies. This + * software is provided "as is" without express or implied warranty. + * + * Created: Thu Sep 26 17:15:36 1991 ylo + * Last modified: Mon Nov 4 15:49:46 1991 ylo + * + * Modified to work with C++ for use in Bacula, + * Kern Sibbald April, 2006 + */ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef REGEXPR_H +#define REGEXPR_H + +/* If we pull in this header, make sure we only get our own library + * bregex.c + */ +#define regex_t b_regex_t +#define regmatch_t b_regmatch_t +#define re_syntax b_re_syntax +#define re_syntax_table b_re_syntax_table +#define re_compile_initialize b_re_compile_initialize +#define re_set_syntax b_re_set_syntax +#define re_compile_pattern b_re_compile_pattern +#define re_match b_re_match +#define re_search b_re_search +#define re_compile_fastmap b_re_compile_fastmap +#define re_comp b_re_comp +#define re_exec b_re_exec +#define regcomp b_regcomp +#define regexec b_regexec +#define regerror b_regerror +#define regfree b_regfree + + +#define RE_NREGS 100 /* number of registers available */ + +#define regoff_t int + +typedef struct { + regoff_t rm_so; + regoff_t rm_eo; +} regmatch_t; + + +#define REG_EXTENDED (1<<1) +#define REG_ICASE (1<<2) +#define REG_NOSUB (1<<3) +#define REG_NEWLINE (1<<4) +#define REG_NOTBOL (1<<5) + +#define REG_NOMATCH -1 + +struct regex_t +{ + unsigned char *buffer; /* compiled pattern */ + int allocated; /* allocated size of compiled pattern */ + int used; /* actual length of compiled pattern */ + unsigned char *fastmap; /* fastmap[ch] is true if ch can start pattern */ + unsigned char *translate; /* translation to apply during compilation/matching */ + unsigned char fastmap_accurate; /* true if fastmap is valid */ + unsigned char can_be_null; /* true if can match empty string */ + unsigned char uses_registers; /* registers are used and need to be initialized */ + int num_registers; /* number of registers used */ + unsigned char anchor; /* anchor: 0=none 1=begline 2=begbuf */ + char *errmsg; + int cflags; /* compilation flags */ + POOLMEM *lcase; /* used by REG_ICASE */ +}; + + +typedef struct re_registers +{ + int start[RE_NREGS]; /* start offset of region */ + int end[RE_NREGS]; /* end offset of region */ +} *regexp_registers_t; + +/* bit definitions for syntax */ +#define RE_NO_BK_PARENS 1 /* no quoting for parentheses */ +#define RE_NO_BK_VBAR 2 /* no quoting for vertical bar */ +#define RE_BK_PLUS_QM 4 /* quoting needed for + and ? */ +#define RE_TIGHT_VBAR 8 /* | binds tighter than ^ and $ */ +#define RE_NEWLINE_OR 16 /* treat newline as or */ +#define RE_CONTEXT_INDEP_OPS 32 /* ^$?*+ are special in all contexts */ +#define RE_ANSI_HEX 64 /* ansi sequences (\n etc) and \xhh */ +#define RE_NO_GNU_EXTENSIONS 128 /* no gnu extensions */ + +/* definitions for some common regexp styles */ +#define RE_SYNTAX_AWK (RE_NO_BK_PARENS|RE_NO_BK_VBAR|RE_CONTEXT_INDEP_OPS) +#define RE_SYNTAX_EGREP (RE_SYNTAX_AWK|RE_NEWLINE_OR) +#define RE_SYNTAX_GREP (RE_BK_PLUS_QM|RE_NEWLINE_OR) +#define RE_SYNTAX_EMACS 0 + +#define Sword 1 +#define Swhitespace 2 +#define Sdigit 4 +#define Soctaldigit 8 +#define Shexdigit 16 + +/* Rename all exported symbols to avoid conflicts with similarly named + symbols in some systems' standard C libraries... */ + + +extern int re_syntax; +/* This is the actual syntax mask. It was added so that Python could do + * syntax-dependent munging of patterns before compilation. */ + +extern unsigned char re_syntax_table[256]; + +void re_compile_initialize(void); + +int re_set_syntax(int syntax); +/* This sets the syntax to use and returns the previous syntax. The + * syntax is specified by a bit mask of the above defined bits. */ + +const char *re_compile_pattern(regex_t *compiled, unsigned char *regex); +/* This compiles the regexp (given in regex and length in regex_size). + * This returns NULL if the regexp compiled successfully, and an error + * message if an error was encountered. The buffer field must be + * initialized to a memory area allocated by malloc (or to NULL) before + * use, and the allocated field must be set to its length (or 0 if + * buffer is NULL). Also, the translate field must be set to point to a + * valid translation table, or NULL if it is not used. */ + +int re_match(regex_t *compiled, unsigned char *string, int size, int pos, + regexp_registers_t old_regs); +/* This tries to match the regexp against the string. This returns the + * length of the matched portion, or -1 if the pattern could not be + * matched and -2 if an error (such as failure stack overflow) is + * encountered. */ + +int re_search(regex_t *compiled, unsigned char *string, int size, int startpos, + int range, regexp_registers_t regs); +/* This searches for a substring matching the regexp. This returns the + * first index at which a match is found. range specifies at how many + * positions to try matching; positive values indicate searching + * forwards, and negative values indicate searching backwards. mstop + * specifies the offset beyond which a match must not go. This returns + * -1 if no match is found, and -2 if an error (such as failure stack + * overflow) is encountered. */ + +void re_compile_fastmap(regex_t *compiled); +/* This computes the fastmap for the regexp. For this to have any effect, + * the calling program must have initialized the fastmap field to point + * to an array of 256 characters. */ + + +int regcomp(regex_t *preg, const char *regex, int cflags); +int regexec(regex_t *preg, const char *string, size_t nmatch, + regmatch_t pmatch[], int eflags); +size_t regerror(int errcode, regex_t *preg, char *errbuf, + size_t errbuf_size); +void regfree(regex_t *preg); + +#endif /* REGEXPR_H */ + + + +#ifdef __cplusplus +} +#endif +#endif /* !__b_REGEXPR_H__ */ diff --git a/src/lib/bsnprintf.c b/src/lib/bsnprintf.c new file mode 100644 index 00000000..09be60da --- /dev/null +++ b/src/lib/bsnprintf.c @@ -0,0 +1,1071 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Copyright Patrick Powell 1995 + * + * This code is based on code written by Patrick Powell + * (papowell@astart.com) It may be used for any purpose as long + * as this notice remains intact on all source code distributions. + * + * Adapted for Bacula -- note there were lots of bugs in + * the original code: %lld and %s were seriously broken, and + * with FP turned off %f seg faulted. + * + * Kern Sibbald, November MMV + * + */ + + + +#include "bacula.h" +#include + +#define FP_OUTPUT 1 /* Bacula uses floating point */ + +/* Define the following if you want all the features of + * normal printf, but with all the security problems. + * For Bacula we turn this off, and it silently ignores + * formats that could pose a security problem. + */ +#undef SECURITY_PROBLEM + +#ifdef USE_BSNPRINTF + +#ifdef HAVE_LONG_DOUBLE +#define LDOUBLE long double +#else +#define LDOUBLE double +#endif + +int bvsnprintf(char *buffer, int32_t maxlen, const char *format, va_list args); +static int32_t fmtstr(char *buffer, int32_t currlen, int32_t maxlen, + const char *value, int flags, int min, int max); +static int32_t fmtwstr(char *buffer, int32_t currlen, int32_t maxlen, + const wchar_t *value, int flags, int min, int max); +static int32_t fmtint(char *buffer, int32_t currlen, int32_t maxlen, + int64_t value, int base, int min, int max, int flags); + +#ifdef FP_OUTPUT +# ifdef HAVE_FCVTL +# define fcvt fcvtl +# endif +static int32_t fmtfp(char *buffer, int32_t currlen, int32_t maxlen, + LDOUBLE fvalue, int min, int max, int flags); +#else +#define fmtfp(b, c, m, f, min, max, fl) currlen +#endif + +/* + * NOTE!!!! do not use this #define with a construct such + * as outch(--place);. It just will NOT work, because the + * decrement of place is done ONLY if there is room in the + * output buffer. + */ +#define outch(c) {int len=currlen; if (currlen < maxlen) \ + { buffer[len] = (c); currlen++; }} + +/* format read states */ +#define DP_S_DEFAULT 0 +#define DP_S_FLAGS 1 +#define DP_S_MIN 2 +#define DP_S_DOT 3 +#define DP_S_MAX 4 +#define DP_S_MOD 5 +#define DP_S_CONV 6 +#define DP_S_DONE 7 + +/* format flags - Bits */ +#define DP_F_MINUS (1 << 0) +#define DP_F_PLUS (1 << 1) +#define DP_F_SPACE (1 << 2) +#define DP_F_NUM (1 << 3) +#define DP_F_ZERO (1 << 4) +#define DP_F_UP (1 << 5) +#define DP_F_UNSIGNED (1 << 6) +#define DP_F_DOT (1 << 7) + +/* Conversion Flags */ +#define DP_C_INT16 1 +#define DP_C_INT32 2 +#define DP_C_LDOUBLE 3 +#define DP_C_INT64 4 +#define DP_C_WCHAR 5 /* wide characters */ +#define DP_C_SIZE_T 6 + +#define char_to_int(p) ((p)- '0') +#undef MAX +#define MAX(p,q) (((p) >= (q)) ? (p) : (q)) + +/* + You might ask why does Bacula have it's own printf routine? Well, + There are two reasons: 1. Here (as opposed to library routines), we + define %d and %ld to be 32 bit; %lld and %q to be 64 bit. 2. We + disable %n for security reasons. + */ + +int bsnprintf(char *str, int32_t size, const char *fmt, ...) +{ + va_list arg_ptr; + int len; + + va_start(arg_ptr, fmt); + len = bvsnprintf(str, size, fmt, arg_ptr); + va_end(arg_ptr); + return len; +} + + +int bvsnprintf(char *buffer, int32_t maxlen, const char *format, va_list args) +{ + char ch; + int64_t value; + char *strvalue; + wchar_t *wstrvalue; + int min; + int max; + int state; + int flags; + int cflags; + int32_t currlen; + int base; +#ifdef FP_OUTPUT + LDOUBLE fvalue; +#endif + + state = DP_S_DEFAULT; + currlen = flags = cflags = min = 0; + max = -1; + ch = *format++; + *buffer = 0; + + while (state != DP_S_DONE) { + if ((ch == '\0') || (currlen >= maxlen)) { + state = DP_S_DONE; + } + switch (state) { + case DP_S_DEFAULT: + if (ch == '%') { + state = DP_S_FLAGS; + } else { + outch(ch); + } + ch = *format++; + break; + case DP_S_FLAGS: + switch (ch) { + case '-': + flags |= DP_F_MINUS; + ch = *format++; + break; + case '+': + flags |= DP_F_PLUS; + ch = *format++; + break; + case ' ': + flags |= DP_F_SPACE; + ch = *format++; + break; + case '#': + flags |= DP_F_NUM; + ch = *format++; + break; + case '0': + flags |= DP_F_ZERO; + ch = *format++; + break; + default: + state = DP_S_MIN; + break; + } + break; + case DP_S_MIN: + if (isdigit((unsigned char)ch)) { + min = 10 * min + char_to_int(ch); + ch = *format++; + } else if (ch == '*') { + min = va_arg(args, int); + ch = *format++; + state = DP_S_DOT; + } else + state = DP_S_DOT; + break; + case DP_S_DOT: + if (ch == '.') { + state = DP_S_MAX; + flags |= DP_F_DOT; + ch = *format++; + } else + state = DP_S_MOD; + break; + case DP_S_MAX: + if (isdigit((unsigned char)ch)) { + if (max < 0) + max = 0; + max = 10 * max + char_to_int(ch); + ch = *format++; + } else if (ch == '*') { + max = va_arg(args, int); + ch = *format++; + state = DP_S_MOD; + } else + state = DP_S_MOD; + break; + case DP_S_MOD: + switch (ch) { + case 'h': + cflags = DP_C_INT16; + ch = *format++; + break; + case 'l': + cflags = DP_C_INT32; + ch = *format++; + if (ch == 's') { + cflags = DP_C_WCHAR; + } else if (ch == 'l') { /* It's a long long */ + cflags = DP_C_INT64; + ch = *format++; + } + break; + case 'z': + cflags = DP_C_SIZE_T; + ch = *format++; + break; + case 'L': + cflags = DP_C_LDOUBLE; + ch = *format++; + break; + case 'q': /* same as long long */ + cflags = DP_C_INT64; + ch = *format++; + break; + default: + break; + } + state = DP_S_CONV; + break; + case DP_S_CONV: + switch (ch) { + case 'd': + case 'i': + if (cflags == DP_C_INT16) { + value = va_arg(args, int32_t); + } else if (cflags == DP_C_INT32) { + value = va_arg(args, int32_t); + } else if (cflags == DP_C_INT64) { + value = va_arg(args, int64_t); + } else if (cflags == DP_C_SIZE_T) { + value = va_arg(args, ssize_t); + } else { + value = va_arg(args, int); + } + currlen = fmtint(buffer, currlen, maxlen, value, 10, min, max, flags); + break; + case 'X': + case 'x': + case 'o': + case 'u': + if (ch == 'o') { + base = 8; + } else if (ch == 'x') { + base = 16; + } else if (ch == 'X') { + base = 16; + flags |= DP_F_UP; + } else { + base = 10; + } + flags |= DP_F_UNSIGNED; + if (cflags == DP_C_INT16) { + value = va_arg(args, uint32_t); + } else if (cflags == DP_C_INT32) { + value = va_arg(args, uint32_t); + } else if (cflags == DP_C_INT64) { + value = va_arg(args, uint64_t); + } else if (cflags == DP_C_SIZE_T) { + value = va_arg(args, size_t); + } else { + value = va_arg(args, unsigned int); + } + currlen = fmtint(buffer, currlen, maxlen, value, base, min, max, flags); + break; + case 'f': + if (cflags == DP_C_LDOUBLE) { + fvalue = va_arg(args, LDOUBLE); + } else { + fvalue = va_arg(args, double); + } + currlen = fmtfp(buffer, currlen, maxlen, fvalue, min, max, flags); + break; + case 'E': + flags |= DP_F_UP; + case 'e': + if (cflags == DP_C_LDOUBLE) { + fvalue = va_arg(args, LDOUBLE); + } else { + fvalue = va_arg(args, double); + } + currlen = fmtfp(buffer, currlen, maxlen, fvalue, min, max, flags); + break; + case 'G': + flags |= DP_F_UP; + case 'g': + if (cflags == DP_C_LDOUBLE) { + fvalue = va_arg(args, LDOUBLE); + } else { + fvalue = va_arg(args, double); + } + currlen = fmtfp(buffer, currlen, maxlen, fvalue, min, max, flags); + break; + case 'c': + ch = va_arg(args, int); + outch(ch); + break; + case 's': + if (cflags != DP_C_WCHAR) { + strvalue = va_arg(args, char *); + if (!strvalue) { + strvalue = (char *)""; + } + currlen = fmtstr(buffer, currlen, maxlen, strvalue, flags, min, max); + } else { + /* %ls means to edit wide characters */ + wstrvalue = va_arg(args, wchar_t *); + if (!wstrvalue) { + wstrvalue = (wchar_t *)L""; + } + currlen = fmtwstr(buffer, currlen, maxlen, wstrvalue, flags, min, max); + } + break; + case 'p': + flags |= DP_F_UNSIGNED; + if (sizeof(char *) == 4) { + value = va_arg(args, uint32_t); + } else if (sizeof(char *) == 8) { + value = va_arg(args, uint64_t); + } else { + value = 0; /* we have a problem */ + } + currlen = fmtint(buffer, currlen, maxlen, value, 16, min, max, flags); + break; + +#ifdef SECURITY_PROBLEM + case 'n': + if (cflags == DP_C_INT16) { + int16_t *num; + num = va_arg(args, int16_t *); + *num = currlen; + } else if (cflags == DP_C_INT32) { + int32_t *num; + num = va_arg(args, int32_t *); + *num = (int32_t)currlen; + } else if (cflags == DP_C_INT64) { + int64_t *num; + num = va_arg(args, int64_t *); + *num = (int64_t)currlen; + } else { + int32_t *num; + num = va_arg(args, int32_t *); + *num = (int32_t)currlen; + } + break; +#endif + case '%': + outch(ch); + break; + case 'w': + /* not supported yet, treat as next char */ + ch = *format++; + break; + default: + /* Unknown, skip */ + break; + } + ch = *format++; + state = DP_S_DEFAULT; + flags = cflags = min = 0; + max = -1; + break; + case DP_S_DONE: + break; + default: + /* hmm? */ + break; /* some picky compilers need this */ + } + } + if (currlen < maxlen - 1) { + buffer[currlen] = '\0'; + } else { + buffer[maxlen - 1] = '\0'; + } + return currlen; +} + +static int32_t fmtstr(char *buffer, int32_t currlen, int32_t maxlen, + const char *value, int flags, int min, int max) +{ + int padlen, strln; /* amount to pad */ + int cnt = 0; + char ch; + + + if (flags & DP_F_DOT && max < 0) { /* Max not specified */ + max = 0; + } else if (max < 0) { + max = maxlen; + } + strln = strlen(value); + if (strln > max) { + strln = max; /* truncate to max */ + } + padlen = min - strln; + if (padlen < 0) { + padlen = 0; + } + if (flags & DP_F_MINUS) { + padlen = -padlen; /* Left Justify */ + } + + while (padlen > 0) { + outch(' '); + --padlen; + } + while (*value && (cnt < max)) { + ch = *value++; + outch(ch); + ++cnt; + } + while (padlen < 0) { + outch(' '); + ++padlen; + } + return currlen; +} + +static int32_t fmtwstr(char *buffer, int32_t currlen, int32_t maxlen, + const wchar_t *value, int flags, int min, int max) +{ + int padlen, strln; /* amount to pad */ + int cnt = 0; + char ch; + + + if (flags & DP_F_DOT && max < 0) { /* Max not specified */ + max = 0; + } else if (max < 0) { + max = maxlen; + } + strln = wcslen(value); + if (strln > max) { + strln = max; /* truncate to max */ + } + padlen = min - strln; + if (padlen < 0) { + padlen = 0; + } + if (flags & DP_F_MINUS) { + padlen = -padlen; /* Left Justify */ + } + + while (padlen > 0) { + outch(' '); + --padlen; + } + while (*value && (cnt < max)) { + + ch = (*value++) & 0xff; + outch(ch); + ++cnt; + } + while (padlen < 0) { + outch(' '); + ++padlen; + } + return currlen; +} + +/* Have to handle DP_F_NUM (ie 0x and 0 alternates) */ + +static int32_t fmtint(char *buffer, int32_t currlen, int32_t maxlen, + int64_t value, int base, int min, int max, int flags) +{ + int signvalue = 0; + uint64_t uvalue; + char convert[25]; + int place = 0; + int spadlen = 0; /* amount to space pad */ + int zpadlen = 0; /* amount to zero pad */ + int caps = 0; + const char *cvt_string; + + if (max < 0) { + max = 0; + } + + uvalue = value; + + if (!(flags & DP_F_UNSIGNED)) { + if (value < 0) { + signvalue = '-'; + uvalue = -value; + } else if (flags & DP_F_PLUS) { /* Do a sign (+/i) */ + signvalue = '+'; + } else if (flags & DP_F_SPACE) { + signvalue = ' '; + } + } + + if (flags & DP_F_UP) { + caps = 1; /* Should characters be upper case? */ + } + + cvt_string = caps ? "0123456789ABCDEF" : "0123456789abcdef"; + do { + convert[place++] = cvt_string[uvalue % (unsigned)base]; + uvalue = (uvalue / (unsigned)base); + } while (uvalue && (place < (int)sizeof(convert))); + if (place == (int)sizeof(convert)) { + place--; + } + convert[place] = 0; + + zpadlen = max - place; + spadlen = min - MAX(max, place) - (signvalue ? 1 : 0); + if (zpadlen < 0) + zpadlen = 0; + if (spadlen < 0) + spadlen = 0; + if (flags & DP_F_ZERO) { + zpadlen = MAX(zpadlen, spadlen); + spadlen = 0; + } + if (flags & DP_F_MINUS) + spadlen = -spadlen; /* Left Justifty */ + +#ifdef DEBUG_SNPRINTF + printf("zpad: %d, spad: %d, min: %d, max: %d, place: %d\n", + zpadlen, spadlen, min, max, place); +#endif + + /* Spaces */ + while (spadlen > 0) { + outch(' '); + --spadlen; + } + + /* Sign */ + if (signvalue) { + outch(signvalue); + } + + /* Zeros */ + if (zpadlen > 0) { + while (zpadlen > 0) { + outch('0'); + --zpadlen; + } + } + + /* Output digits backward giving correct order */ + while (place > 0) { + place--; + outch(convert[place]); + } + + /* Left Justified spaces */ + while (spadlen < 0) { + outch(' '); + ++spadlen; + } + return currlen; +} + +#ifdef FP_OUTPUT + +static LDOUBLE abs_val(LDOUBLE value) +{ + LDOUBLE result = value; + + if (value < 0) + result = -value; + + return result; +} + +static LDOUBLE pow10(int exp) +{ + LDOUBLE result = 1; + + while (exp) { + result *= 10; + exp--; + } + + return result; +} + +static int64_t round(LDOUBLE value) +{ + int64_t intpart; + + intpart = (int64_t)value; + value = value - intpart; + if (value >= 0.5) + intpart++; + + return intpart; +} + +static int32_t fmtfp(char *buffer, int32_t currlen, int32_t maxlen, + LDOUBLE fvalue, int min, int max, int flags) +{ + int signvalue = 0; + LDOUBLE ufvalue; +#ifndef HAVE_FCVT + char iconvert[311]; + char fconvert[311]; +#else + char iconvert[311]; + char fconvert[311]; + char *result; + char dummy[10]; + int dec_pt, sig; + int r_length; + extern char *fcvt(double value, int ndigit, int *decpt, int *sign); +#endif + int iplace = 0; + int fplace = 0; + int padlen = 0; /* amount to pad */ + int zpadlen = 0; + int64_t intpart; + int64_t fracpart; + const char *cvt_str; + + /* + * AIX manpage says the default is 0, but Solaris says the default + * is 6, and sprintf on AIX defaults to 6 + */ + if (max < 0) + max = 6; + + ufvalue = abs_val(fvalue); + + if (fvalue < 0) + signvalue = '-'; + else if (flags & DP_F_PLUS) /* Do a sign (+/i) */ + signvalue = '+'; + else if (flags & DP_F_SPACE) + signvalue = ' '; + +#ifndef HAVE_FCVT + intpart = (int64_t)ufvalue; + + /* + * Sorry, we only support 9 digits past the decimal because of our + * conversion method + */ + if (max > 9) + max = 9; + + /* We "cheat" by converting the fractional part to integer by + * multiplying by a factor of 10 + */ + fracpart = round((pow10(max)) * (ufvalue - intpart)); + + if (fracpart >= pow10(max)) { + intpart++; + fracpart -= (int64_t)pow10(max); + } + +#ifdef DEBUG_SNPRINTF + printf("fmtfp: %g %lld.%lld min=%d max=%d\n", + (double)fvalue, intpart, fracpart, min, max); +#endif + + /* Convert integer part */ + cvt_str = "0123456789"; + do { + iconvert[iplace++] = cvt_str[(int)(intpart % 10)]; + intpart = (intpart / 10); + } while (intpart && (iplace < (int)sizeof(iconvert))); + + if (iplace == (int)sizeof(fconvert)) { + iplace--; + } + iconvert[iplace] = 0; + + /* Convert fractional part */ + cvt_str = "0123456789"; + for (int fiter = max; fiter > 0; fiter--) { + fconvert[fplace++] = cvt_str[fracpart % 10]; + fracpart = (fracpart / 10); + } + + if (fplace == (int)sizeof(fconvert)) { + fplace--; + } + fconvert[fplace] = 0; +#else /* use fcvt() */ + if (max > 310) { + max = 310; + } +# ifdef HAVE_FCVTL + result = fcvtl(ufvalue, max, &dec_pt, &sig); +# else + result = fcvt(ufvalue, max, &dec_pt, &sig); +# endif + + if (!result) { + r_length = 0; + dummy[0] = 0; + result = dummy; + } else { + r_length = strlen(result); + } + + /* + * Fix broken fcvt implementation returns.. + */ + + if (r_length == 0) { + result[0] = '0'; + result[1] = '\0'; + r_length = 1; + } + + if (r_length < dec_pt) + dec_pt = r_length; + + if (dec_pt <= 0) { + iplace = 1; + iconvert[0] = '0'; + iconvert[1] = '\0'; + + fplace = 0; + + while (r_length) { + fconvert[fplace++] = result[--r_length]; + } + + while ((dec_pt < 0) && (fplace < max)) { + fconvert[fplace++] = '0'; + dec_pt++; + } + } else { + int c; + + iplace = 0; + for (c = dec_pt; c; iconvert[iplace++] = result[--c]); + iconvert[iplace] = '\0'; + + result += dec_pt; + fplace = 0; + + for (c = (r_length - dec_pt); c; fconvert[fplace++] = result[--c]); + } +#endif /* HAVE_FCVT */ + + /* -1 for decimal point, another -1 if we are printing a sign */ + padlen = min - iplace - max - 1 - ((signvalue) ? 1 : 0); + zpadlen = max - fplace; + if (zpadlen < 0) { + zpadlen = 0; + } + if (padlen < 0) { + padlen = 0; + } + if (flags & DP_F_MINUS) { + padlen = -padlen; /* Left Justifty */ + } + + if ((flags & DP_F_ZERO) && (padlen > 0)) { + if (signvalue) { + outch(signvalue); + --padlen; + signvalue = 0; + } + while (padlen > 0) { + outch('0'); + --padlen; + } + } + while (padlen > 0) { + outch(' '); + --padlen; + } + if (signvalue) { + outch(signvalue); + } + + while (iplace > 0) { + iplace--; + outch(iconvert[iplace]); + } + + +#ifdef DEBUG_SNPRINTF + printf("fmtfp: fplace=%d zpadlen=%d\n", fplace, zpadlen); +#endif + + /* + * Decimal point. This should probably use locale to find the correct + * char to print out. + */ + if (max > 0) { + outch('.'); + while (fplace > 0) { + fplace--; + outch(fconvert[fplace]); + } + } + + while (zpadlen > 0) { + outch('0'); + --zpadlen; + } + + while (padlen < 0) { + outch(' '); + ++padlen; + } + return currlen; +} +#endif /* FP_OUTPUT */ + +#ifdef TEST_PROGRAM +#include "unittests.h" + +#ifndef LONG_STRING +#define LONG_STRING 1024 +#endif +#define MSGLEN 80 + +int main(int argc, char *argv[]) +{ + Unittests bsnprintf_test("bsnprintf_test"); + char buf1[LONG_STRING]; + char buf2[LONG_STRING]; + char msg[MSGLEN]; + bool check_cont, check_nr; + int pcount, bcount; + +#ifdef FP_OUTPUT + const char *fp_fmt[] = { + "%-1.5f", + "%1.5f", + "%123.9f", + "%10.5f", + "% 10.5f", + "%+22.9f", + "%+4.9f", + "%01.3f", + "%4f", + "%3.1f", + "%3.2f", + "%.0f", + "%.1f", + "%.2f", + NULL + }; + double fp_nums[] = { 1.05, -1.5, 134.21, 91340.2, 341.1234, 0203.9, 0.96, 0.996, + 0.9996, 1.996, 4.136, 6442452944.1234, 0, 23365.5 + }; +#endif + const char *int_fmt[] = { + "%-1.5d", + "%1.5d", + "%123.9d", + "%5.5d", + "%10.5d", + "% 10.5d", + "%+22.33d", + "%01.3d", + "%4d", + "%-1.5ld", + "%1.5ld", + "%123.9ld", + "%5.5ld", + "%10.5ld", + "% 10.5ld", + "%+22.33ld", + "%01.3ld", + "%4ld", + NULL + }; + long int_nums[] = { -1, 134, 91340, 341, 0203, 0 }; + + const char *ll_fmt[] = { + "%-1.8lld", + "%1.8lld", + "%123.9lld", + "%5.8lld", + "%10.5lld", + "% 10.8lld", + "%+22.33lld", + "%01.3lld", + "%4lld", + NULL + }; + int64_t ll_nums[] = { -1976, 789134567890LL, 91340, 34123, 0203, 0 }; + + const char *s_fmt[] = { + "%-1.8s", + "%1.8s", + "%123.9s", + "%5.8s", + "%10.5s", + "% 10.3s", + "%+22.1s", + "%01.3s", + "%s", + "%10s", + "%3s", + "%3.0s", + "%3.s", + NULL + }; + const char *s_nums[] = { "abc", "def", "ghi", "123", "4567", "a", "bb", "ccccccc", NULL}; + + const char *ls_fmt[] = { + "%-1.8ls", + "%1.8ls", + "%123.9ls", + "%5.8ls", + "%10.5ls", + "% 10.3ls", + "%+22.1ls", + "%01.3ls", + "%ls", + "%10ls", + "%3ls", + "%3.0ls", + "%3.ls", + NULL + }; + const wchar_t *ls_nums[] = { L"abc", L"def", L"ghi", L"123", L"4567", L"a", L"bb", L"ccccccc", NULL}; + + int x, y; + + printf("\n\tTesting bsnprintf against system sprintf...\n\n"); +#ifdef FP_OUTPUT + printf("Testing bsnprintf float format codes\n"); + for (x = 0; fp_fmt[x] != NULL; x++){ + check_cont = true; + check_nr = true; + for (y = 0; fp_nums[y] != 0; y++) { + bcount = bsnprintf(buf1, sizeof(buf1), fp_fmt[x], fp_nums[y]); + pcount = sprintf(buf2, fp_fmt[x], fp_nums[y]); + if (bcount != pcount) { + check_nr = false; + } + if (strcmp(buf1, buf2) != 0){ + check_cont = false; + } + } + snprintf(msg, MSGLEN, "Checking return length for format %s", fp_fmt[x]); + ok(check_nr, msg); + snprintf(msg, MSGLEN, "Checking format %s", fp_fmt[x]); + ok(check_cont, msg); + } +#endif + + printf("Testing bsnprintf int format codes\n"); + for (x = 0; int_fmt[x] != NULL; x++){ + check_cont = true; + check_nr = true; + for (y = 0; int_nums[y] != 0; y++) { + bcount = bsnprintf(buf1, sizeof(buf1), int_fmt[x], int_nums[y]); + pcount = sprintf(buf2, int_fmt[x], int_nums[y]); + if (bcount != pcount) { + check_nr = false; + } + if (strcmp(buf1, buf2) != 0) { + check_cont = false; + } + } + snprintf(msg, MSGLEN, "Checking return length for format %s", int_fmt[x]); + ok(check_nr, msg); + snprintf(msg, MSGLEN, "Checking format %s", int_fmt[x]); + ok(check_cont, msg); + } + + printf("Testing bsnprintf long format codes\n"); + for (x = 0; ll_fmt[x] != NULL; x++) { + check_cont = true; + check_nr = true; + for (y = 0; ll_nums[y] != 0; y++) { + bcount = bsnprintf(buf1, sizeof(buf1), ll_fmt[x], ll_nums[y]); + pcount = sprintf(buf2, ll_fmt[x], ll_nums[y]); + if (bcount != pcount) { + check_nr = false; + } + if (strcmp(buf1, buf2) != 0) { + check_cont = false; + } + } + snprintf(msg, MSGLEN, "Checking return length for format %s", ll_fmt[x]); + ok(check_nr, msg); + snprintf(msg, MSGLEN, "Checking format %s", ll_fmt[x]); + ok(check_cont, msg); + } + + printf("Testing bsnprintf str format codes\n"); + for (x = 0; s_fmt[x] != NULL; x++) { + check_cont = true; + check_nr = true; + for (y = 0; s_nums[y] != 0; y++) { + bcount = bsnprintf(buf1, sizeof(buf1), s_fmt[x], s_nums[y]); + pcount = sprintf(buf2, s_fmt[x], s_nums[y]); + if (bcount != pcount) { + check_nr = false; + } + if (strcmp(buf1, buf2) != 0) { + check_cont = false; + } + } + snprintf(msg, MSGLEN, "Checking return length for format %s", s_fmt[x]); + ok(check_nr, msg); + snprintf(msg, MSGLEN, "Checking format %s", s_fmt[x]); + ok(check_cont, msg); + } + + printf("Testing bsnprintf long str format codes\n"); + for (x = 0; ls_fmt[x] != NULL; x++) { + check_cont = true; + check_nr = true; + for (y = 0; ls_nums[y] != 0; y++) { + bcount = bsnprintf(buf1, sizeof(buf1), ls_fmt[x], ls_nums[y]); + pcount = sprintf(buf2, ls_fmt[x], ls_nums[y]); + if (bcount != pcount) { + check_nr = false; + } + if (strcmp(buf1, buf2) != 0) { + check_cont = false; + } + } + snprintf(msg, MSGLEN, "Checking return length for format %s", ls_fmt[x]); + ok(check_nr, msg); + snprintf(msg, MSGLEN, "Checking format %s", ls_fmt[x]); + ok(check_cont, msg); + } + + return report(); +} +#endif /* TEST_PROGRAM */ + +#endif /* USE_BSNPRINTF */ diff --git a/src/lib/bsock.c b/src/lib/bsock.c new file mode 100644 index 00000000..c21b293a --- /dev/null +++ b/src/lib/bsock.c @@ -0,0 +1,1001 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Network Utility Routines + * + * The new code inherit common functions from BSOCKCORE class + * and implement BSOCK/Bacula specific protocols and data flow. + * + * Written by Kern Sibbald + * + * Major refactoring of BSOCK code written by: + * + * Radosław Korzeniewski, MMXVIII + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + */ + +#include "bacula.h" +#include "jcr.h" +#include "lz4.h" +#include +#include + +#define BSOCK_DEBUG_LVL 900 + +#if !defined(ENODATA) /* not defined on BSD systems */ +#define ENODATA EPIPE +#endif + +/* Commands sent to Director */ +static char hello[] = "Hello %s calling\n"; + +/* Response from Director */ +static char OKhello[] = "1000 OK:"; + + +/* + * BSOCK default constructor - initializes object. + */ +BSOCK::BSOCK() +{ + init(); +}; + +/* + * BSOCK special constructor initializes object and sets proper socked descriptor. + */ +BSOCK::BSOCK(int sockfd): + BSOCKCORE(), + m_spool_fd(NULL), + cmsg(NULL), + m_data_end(0), + m_last_data_end(0), + m_FileIndex(0), + m_lastFileIndex(0), + m_spool(false), + m_compress(false), + m_CommBytes(0), + m_CommCompressedBytes(0) +{ + init(); + m_terminated = false; + m_closed = false; + m_fd = sockfd; +}; + +/* + * BSOCK default destructor. + */ +BSOCK::~BSOCK() +{ + Dmsg0(BSOCK_DEBUG_LVL, "BSOCK::~BSOCK()\n"); + _destroy(); +}; + +/* + * BSOCK initialization method handles bsock specific variables. + */ +void BSOCK::init() +{ + /* the BSOCKCORE::init() is executed in base class constructor */ + timeout = BSOCK_TIMEOUT; + m_spool_fd = NULL; + cmsg = get_pool_memory(PM_BSOCK); +} + +/* + * BSOCK private destroy method releases bsock specific variables. + */ +void BSOCK::_destroy() +{ + Dmsg0(BSOCK_DEBUG_LVL, "BSOCK::_destroy()\n"); + if (cmsg) { + free_pool_memory(cmsg); + cmsg = NULL; + } +}; + +/* + * Authenticate Director + */ +bool BSOCK::authenticate_director(const char *name, const char *password, + TLS_CONTEXT *tls_ctx, char *errmsg, int errmsg_len) +{ + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; + char bashed_name[MAX_NAME_LENGTH]; + BSOCK *dir = this; /* for readability */ + + *errmsg = 0; + /* + * Send my name to the Director then do authentication + */ + + /* Timeout Hello after 15 secs */ + dir->start_timer(15); + dir->fsend(hello, bashed_name); + + if (get_tls_enable(tls_ctx)) { + tls_local_need = get_tls_enable(tls_ctx) ? BNET_TLS_REQUIRED : BNET_TLS_OK; + } + + /* respond to Dir challenge */ + if (!cram_md5_respond(dir, password, &tls_remote_need, &compatible) || + /* Now challenge dir */ + !cram_md5_challenge(dir, password, tls_local_need, compatible)) { + bsnprintf(errmsg, errmsg_len, _("Director authorization error at \"%s:%d\"\n"), + dir->host(), dir->port()); + goto bail_out; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + bsnprintf(errmsg, errmsg_len, _("Authorization error:" + " Remote server at \"%s:%d\" did not advertise required TLS support.\n"), + dir->host(), dir->port()); + goto bail_out; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + bsnprintf(errmsg, errmsg_len, _("Authorization error with Director at \"%s:%d\":" + " Remote server requires TLS.\n"), + dir->host(), dir->port()); + + goto bail_out; + } + + /* Is TLS Enabled? */ + if (have_tls) { + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_client(tls_ctx, dir, NULL)) { + bsnprintf(errmsg, errmsg_len, _("TLS negotiation failed with Director at \"%s:%d\"\n"), + dir->host(), dir->port()); + goto bail_out; + } + } + } + + Dmsg1(6, ">dird: %s", dir->msg); + if (dir->recv() <= 0) { + dir->stop_timer(); + bsnprintf(errmsg, errmsg_len, _("Bad errmsg to Hello command: ERR=%s\n" + "The Director at \"%s:%d\" may not be running.\n"), + dir->bstrerror(), dir->host(), dir->port()); + return false; + } + + dir->stop_timer(); + Dmsg1(10, "msg); + if (strncmp(dir->msg, OKhello, sizeof(OKhello)-1) != 0) { + bsnprintf(errmsg, errmsg_len, _("Director at \"%s:%d\" rejected Hello command\n"), + dir->host(), dir->port()); + return false; + } else { + bsnprintf(errmsg, errmsg_len, "%s", dir->msg); + } + return true; + +bail_out: + dir->stop_timer(); + bsnprintf(errmsg, errmsg_len, _("Authorization error with Director at \"%s:%d\"\n" + "Most likely the passwords do not agree.\n" + "If you are using TLS, there may have been a certificate validation error during the TLS handshake.\n" + "For help, please see: " MANUAL_AUTH_URL "\n"), + dir->host(), dir->port()); + return false; +} + +/* + * Send a message over the network. Everything is sent in one + * write request, but depending on the mode you are using + * there will be either two or three read requests done. + * Read 1: 32 bits, gets essentially the packet length, but may + * have the upper bits set to indicate compression or + * an extended header packet. + * Read 2: 32 bits, this read is done only of BNET_HDR_EXTEND is set. + * In this case the top 16 bits of this 32 bit word are reserved + * for flags and the lower 16 bits for data. This word will be + * stored in the field "flags" in the BSOCKCORE packet. + * Read 2 or 3: depending on if Read 2 is done. This is the data. + * + * For normal comm line compression, the whole data packet is compressed + * but not the msglen (first read). + * To do data compression rather than full comm line compression, prior to + * call send(flags) where the lower 32 bits is the offset to the data to + * be compressed. The top 32 bits are reserved for flags that can be + * set. The are: + * BNET_IS_CMD We are sending a command + * BNET_OFFSET An offset is specified (this implies data compression) + * BNET_NOCOMPRESS Inhibit normal comm line compression + * BNET_DATACOMPRESSED The data using the specified offset was + * compressed, and normal comm line compression will + * not be done. + * If any of the above bits are set, then BNET_HDR_EXTEND will be set + * in the top bits of msglen, and the full set of flags + the offset + * will be passed as a 32 bit word just after the msglen, and then + * followed by any data that is either compressed or not. + * + * Note, neither comm line nor data compression is not + * guaranteed since it may result in more data, in which case, the + * record is sent uncompressed and there will be no offset. + * On the receive side, if BNET_OFFSET is set, then the data is compressed. + * + * Returns: false on failure + * true on success + */ +#define display_errmsg() (!m_suppress_error_msgs && m_jcr && m_jcr->JobId != 0) + +bool BSOCK::send(int aflags) +{ + int32_t rc; + int32_t pktsiz; + int32_t *hdrptr; + int offset; + int hdrsiz; + bool ok = true; + int32_t save_msglen; + POOLMEM *save_msg; + bool compressed; + bool locked = false; + + if (is_closed()) { + if (display_errmsg()) { + Qmsg0(m_jcr, M_ERROR, 0, _("Socket is closed\n")); + } + return false; + } + if (errors) { + if (display_errmsg()) { + Qmsg4(m_jcr, M_ERROR, 0, _("Socket has errors=%d on call to %s:%s:%d\n"), + errors, m_who, m_host, m_port); + } + return false; + } + if (is_terminated()) { + if (display_errmsg()) { + Qmsg4(m_jcr, M_ERROR, 0, _("Bsock send while terminated=%d on call to %s:%s:%d\n"), + is_terminated(), m_who, m_host, m_port); + } + return false; + } + + if (msglen > 4000000) { + if (!m_suppress_error_msgs) { + Qmsg4(m_jcr, M_ERROR, 0, + _("Write socket has insane msglen=%d on call to %s:%s:%d\n"), + msglen, m_who, m_host, m_port); + } + return false; + } + + if (send_hook_cb) { + if (!send_hook_cb->bsock_send_cb()) { + Dmsg3(1, "Flowcontrol failure on %s:%s:%d\n", m_who, m_host, m_port); + Qmsg3(m_jcr, M_ERROR, 0, + _("Flowcontrol failure on %s:%s:%d\n"), + m_who, m_host, m_port); + return false; + } + } + if (m_use_locking) { + pP(pm_wmutex); + locked = true; + } + save_msglen = msglen; + save_msg = msg; + m_flags = aflags; + + offset = aflags & 0xFF; /* offset is 16 bits */ + if (offset) { + m_flags |= BNET_OFFSET; + } + if (m_flags & BNET_DATACOMPRESSED) { /* Check if already compressed */ + compressed = true; + } else if (m_flags & BNET_NOCOMPRESS) { + compressed = false; + } else { + compressed = comm_compress(); /* do requested compression */ + } + if (offset && compressed) { + m_flags |= BNET_DATACOMPRESSED; + } + if (!compressed) { + m_flags &= ~BNET_COMPRESSED; + } + + /* Compute total packet length */ + if (msglen <= 0) { + hdrsiz = sizeof(pktsiz); + pktsiz = hdrsiz; /* signal, no data */ + } else if (m_flags) { + hdrsiz = 2 * sizeof(pktsiz); /* have 64 bit header */ + pktsiz = msglen + hdrsiz; + } else { + hdrsiz = sizeof(pktsiz); /* have 32 bit header */ + pktsiz = msglen + hdrsiz; + } + + /* Set special bits */ + if (m_flags & BNET_OFFSET) { /* if data compression on */ + compressed = false; /* no comm compression */ + } + if (compressed) { + msglen |= BNET_COMPRESSED; /* comm line compression */ + } + + if (m_flags) { + msglen |= BNET_HDR_EXTEND; /* extended header */ + } + + /* + * Store packet length at head of message -- note, we + * have reserved an int32_t just before msg, so we can + * store there + */ + hdrptr = (int32_t *)(msg - hdrsiz); + *hdrptr = htonl(msglen); /* store signal/length */ + if (m_flags) { + *(hdrptr+1) = htonl(m_flags); /* store flags */ + } + + (*pout_msg_no)++; /* increment message number */ + + /* send data packet */ + timer_start = watchdog_time; /* start timer */ + clear_timed_out(); + /* Full I/O done in one write */ + rc = write_nbytes((char *)hdrptr, pktsiz); + if (chk_dbglvl(DT_NETWORK|1900)) dump_bsock_msg(m_fd, *pout_msg_no, "SEND", rc, msglen, m_flags, save_msg, save_msglen); + timer_start = 0; /* clear timer */ + if (rc != pktsiz) { + errors++; + if (errno == 0) { + b_errno = EIO; + } else { + b_errno = errno; + } + if (rc < 0) { + if (!m_suppress_error_msgs) { + Qmsg5(m_jcr, M_ERROR, 0, + _("Write error sending %d bytes to %s:%s:%d: ERR=%s\n"), + pktsiz, m_who, + m_host, m_port, this->bstrerror()); + } + } else { + Qmsg5(m_jcr, M_ERROR, 0, + _("Wrote %d bytes to %s:%s:%d, but only %d accepted.\n"), + pktsiz, m_who, m_host, m_port, rc); + } + ok = false; + } +// Dmsg4(000, "cmpr=%d ext=%d cmd=%d m_flags=0x%x\n", msglen&BNET_COMPRESSED?1:0, +// msglen&BNET_HDR_EXTEND?1:0, msglen&BNET_CMD_BIT?1:0, m_flags); + msglen = save_msglen; + msg = save_msg; + if (locked) pV(pm_wmutex); + return ok; +} + +/* + * Receive a message from the other end. Each message consists of + * two packets. The first is a header that contains the size + * of the data that follows in the second packet. + * Returns number of bytes read (may return zero) + * Returns -1 on signal (BNET_SIGNAL) + * Returns -2 on hard end of file (BNET_HARDEOF) + * Returns -3 on error (BNET_ERROR) + * Returns -4 on COMMAND (BNET_COMMAND) + * Unfortunately, it is a bit complicated because we have these + * four return types: + * 1. Normal data + * 2. Signal including end of data stream + * 3. Hard end of file + * 4. Error + * Using bsock->is_stop() and bsock->is_error() you can figure this all out. + */ +int32_t BSOCK::recv() +{ + int32_t nbytes; + int32_t pktsiz; + int32_t o_pktsiz = 0; + bool compressed = false; + bool command = false; + bool locked = false; + + cmsg[0] = msg[0] = 0; + msglen = 0; + m_flags = 0; + if (errors || is_terminated() || is_closed()) { + return BNET_HARDEOF; + } + if (m_use_locking) { + pP(pm_rmutex); + locked = true; + } + + read_seqno++; /* bump sequence number */ + timer_start = watchdog_time; /* set start wait time */ + clear_timed_out(); + /* get data size -- in int32_t */ + if ((nbytes = read_nbytes((char *)&pktsiz, sizeof(int32_t))) <= 0) { + timer_start = 0; /* clear timer */ + /* probably pipe broken because client died */ + if (errno == 0) { + b_errno = ENODATA; + } else { + b_errno = errno; + } + errors++; + nbytes = BNET_HARDEOF; /* assume hard EOF received */ + goto get_out; + } + timer_start = 0; /* clear timer */ + if (nbytes != sizeof(int32_t)) { + errors++; + b_errno = EIO; + Qmsg5(m_jcr, M_ERROR, 0, _("Read expected %d got %d from %s:%s:%d\n"), + sizeof(int32_t), nbytes, m_who, m_host, m_port); + nbytes = BNET_ERROR; + goto get_out; + } + + pktsiz = ntohl(pktsiz); /* decode no. of bytes that follow */ + o_pktsiz = pktsiz; + /* If extension, read it */ + if (pktsiz > 0 && (pktsiz & BNET_HDR_EXTEND)) { + timer_start = watchdog_time; /* set start wait time */ + clear_timed_out(); + if ((nbytes = read_nbytes((char *)&m_flags, sizeof(int32_t))) <= 0) { + timer_start = 0; /* clear timer */ + /* probably pipe broken because client died */ + if (errno == 0) { + b_errno = ENODATA; + } else { + b_errno = errno; + } + errors++; + nbytes = BNET_HARDEOF; /* assume hard EOF received */ + goto get_out; + } + timer_start = 0; /* clear timer */ + if (nbytes != sizeof(int32_t)) { + errors++; + b_errno = EIO; + Qmsg5(m_jcr, M_ERROR, 0, _("Read expected %d got %d from %s:%s:%d\n"), + sizeof(int32_t), nbytes, m_who, m_host, m_port); + nbytes = BNET_ERROR; + goto get_out; + } + pktsiz &= ~BNET_HDR_EXTEND; + m_flags = ntohl(m_flags); + } + + if (pktsiz > 0 && (pktsiz & BNET_COMPRESSED)) { + compressed = true; + pktsiz &= ~BNET_COMPRESSED; + } + + if (m_flags & BNET_IS_CMD) { + command = true; + } + if (m_flags & BNET_OFFSET) { + compressed = true; + } + + if (pktsiz == 0) { /* No data transferred */ + timer_start = 0; /* clear timer */ + in_msg_no++; + msglen = 0; + nbytes = 0; /* zero bytes read */ + goto get_out; + } + + /* If signal or packet size too big */ + if (pktsiz < 0 || pktsiz > 1000000) { + if (pktsiz > 0) { /* if packet too big */ + if (m_jcr) { + Qmsg4(m_jcr, M_FATAL, 0, + _("Packet size=%d too big from \"%s:%s:%d\". Maximum permitted 1000000. Terminating connection.\n"), + pktsiz, m_who, m_host, m_port); + } + pktsiz = BNET_TERMINATE; /* hang up */ + } + if (pktsiz == BNET_TERMINATE) { + set_terminated(); + } + timer_start = 0; /* clear timer */ + b_errno = ENODATA; + msglen = pktsiz; /* signal code */ + nbytes = BNET_SIGNAL; /* signal */ + goto get_out; + } + + /* Make sure the buffer is big enough + one byte for EOS */ + if (pktsiz >= (int32_t) sizeof_pool_memory(msg)) { + msg = realloc_pool_memory(msg, pktsiz + 100); + } + + timer_start = watchdog_time; /* set start wait time */ + clear_timed_out(); + /* now read the actual data */ + if ((nbytes = read_nbytes(msg, pktsiz)) <= 0) { + timer_start = 0; /* clear timer */ + if (errno == 0) { + b_errno = ENODATA; + } else { + b_errno = errno; + } + errors++; + Qmsg4(m_jcr, M_ERROR, 0, _("Read error from %s:%s:%d: ERR=%s\n"), + m_who, m_host, m_port, this->bstrerror()); + nbytes = BNET_ERROR; + goto get_out; + } + timer_start = 0; /* clear timer */ + in_msg_no++; + msglen = nbytes; + if (nbytes != pktsiz) { + b_errno = EIO; + errors++; + Qmsg5(m_jcr, M_ERROR, 0, _("Read expected %d got %d from %s:%s:%d\n"), + pktsiz, nbytes, m_who, m_host, m_port); + nbytes = BNET_ERROR; + goto get_out; + } + /* If compressed uncompress it */ + if (compressed) { + int offset = 0; + int psize = nbytes * 4; + if (psize >= ((int32_t)sizeof_pool_memory(cmsg))) { + cmsg = realloc_pool_memory(cmsg, psize); + } + psize = sizeof_pool_memory(cmsg); + if (m_flags & BNET_OFFSET) { + offset = m_flags & 0xFF; + msg += offset; + msglen -= offset; + } + /* Grow buffer to max approx 4MB */ + for (int i=0; i < 7; i++) { + nbytes = LZ4_decompress_safe(msg, cmsg, msglen, psize); + if (nbytes >= 0) { + break; + } + if (psize < 65536) { + psize = 65536; + } else { + psize = psize * 2; + } + if (psize >= ((int32_t)sizeof_pool_memory(cmsg))) { + cmsg = realloc_pool_memory(cmsg, psize + 100); + } + } + if (m_flags & BNET_OFFSET) { + msg -= offset; + msglen += offset; + } + if (nbytes < 0) { + Jmsg1(m_jcr, M_ERROR, 0, "Decompress error!!!! ERR=%d\n", nbytes); + Pmsg3(000, "Decompress error!! pktsiz=%d cmsgsiz=%d nbytes=%d\n", pktsiz, + psize, nbytes); + b_errno = EIO; + errors++; + Qmsg5(m_jcr, M_ERROR, 0, _("Read expected %d got %d from %s:%s:%d\n"), + pktsiz, nbytes, m_who, m_host, m_port); + nbytes = BNET_ERROR; + goto get_out; + } + msglen = nbytes; + /* Make sure the buffer is big enough + one byte for EOS */ + if (msglen >= (int32_t)sizeof_pool_memory(msg)) { + msg = realloc_pool_memory(msg, msglen + 100); + } + /* If this is a data decompress, leave msg compressed */ + if (!(m_flags & BNET_OFFSET)) { + memcpy(msg, cmsg, msglen); + } + } + + /* always add a zero by to properly terminate any + * string that was send to us. Note, we ensured above that the + * buffer is at least one byte longer than the message length. + */ + msg[nbytes] = 0; /* terminate in case it is a string */ + /* + * The following uses *lots* of resources so turn it on only for + * serious debugging. + */ + Dsm_check(300); + +get_out: + if ((chk_dbglvl(DT_NETWORK|1900))) dump_bsock_msg(m_fd, read_seqno, "RECV", nbytes, o_pktsiz, m_flags, msg, msglen); + if (nbytes != BNET_ERROR && command) { + nbytes = BNET_COMMAND; + } + + if (locked) pV(pm_rmutex); + return nbytes; /* return actual length of message */ +} + +/* + * Send a signal + */ +bool BSOCK::signal(int signal) +{ + msglen = signal; + if (signal == BNET_TERMINATE) { + m_suppress_error_msgs = true; + } + return send(); +} + +/* + * Despool spooled attributes + */ +bool BSOCK::despool(void update_attr_spool_size(ssize_t size), ssize_t tsize) +{ + int32_t pktsiz; + size_t nbytes; + ssize_t last = 0, size = 0; + int count = 0; + JCR *jcr = get_jcr(); + + rewind(m_spool_fd); + +#if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_WILLNEED) + posix_fadvise(fileno(m_spool_fd), 0, 0, POSIX_FADV_WILLNEED); +#endif + + while (fread((char *)&pktsiz, 1, sizeof(int32_t), m_spool_fd) == + sizeof(int32_t)) { + size += sizeof(int32_t); + msglen = ntohl(pktsiz); + if (msglen > 0) { + if (msglen > (int32_t)sizeof_pool_memory(msg)) { + msg = realloc_pool_memory(msg, msglen + 1); + } + nbytes = fread(msg, 1, msglen, m_spool_fd); + if (nbytes != (size_t)msglen) { + berrno be; + Dmsg2(400, "nbytes=%d msglen=%d\n", nbytes, msglen); + Qmsg2(get_jcr(), M_FATAL, 0, _("fread attr spool error. Wanted=%d got=%d bytes.\n"), + msglen, nbytes); + update_attr_spool_size(tsize - last); + return false; + } + size += nbytes; + if ((++count & 0x3F) == 0) { + update_attr_spool_size(size - last); + last = size; + } + } + send(); + if (jcr && job_canceled(jcr)) { + return false; + } + } + update_attr_spool_size(tsize - last); + if (ferror(m_spool_fd)) { + Qmsg(jcr, M_FATAL, 0, _("fread attr spool I/O error.\n")); + return false; + } + return true; +} + +/* + * Open a TCP connection to the server + * Returns NULL + * Returns BSOCKCORE * pointer on success + */ +bool BSOCK::open(JCR *jcr, const char *name, char *host, char *service, + int port, utime_t heart_beat, int *fatal) +{ + bool status = BSOCKCORE::open(jcr, name, host, service, port, heart_beat, fatal); + m_spool = false; + return status; +}; + +/* + * Do comm line compression (LZ4) of a bsock message. + * Returns: true if the compression was done + * false if no compression was done + * The "offset" defines where to start compressing the message. This + * allows passing "header" information uncompressed and the actual + * data part compressed. + * + * Note, we don't compress lines less than 20 characters because we + * want to save at least 10 characters otherwise compression doesn't + * help enough to warrant doing the decompression. + */ +bool BSOCK::comm_compress() +{ + bool compress = false; + bool compressed = false; + int offset = m_flags & 0xFF; + + /* + * Enable compress if allowed and not spooling and the + * message is long enough (>20) to get some reasonable savings. + */ + if (msglen > 20) { + compress = can_compress() && !is_spooling(); + } + m_CommBytes += msglen; /* uncompressed bytes */ + Dmsg4(DT_NETWORK|200, "can_compress=%d compress=%d CommBytes=%lld CommCompresedBytes=%lld\n", + can_compress(), compress, m_CommBytes, m_CommCompressedBytes); + if (compress) { + int clen; + int need_size; + + ASSERT2(offset <= msglen, "Comm offset bigger than message\n"); + ASSERT2(offset < 255, "Offset greater than 254\n"); + need_size = LZ4_compressBound(msglen); + if (need_size >= ((int32_t)sizeof_pool_memory(cmsg))) { + cmsg = realloc_pool_memory(cmsg, need_size + 100); + } + msglen -= offset; + msg += offset; + cmsg += offset; + clen = LZ4_compress_default(msg, cmsg, msglen, msglen); + //Dmsg2(000, "clen=%d msglen=%d\n", clen, msglen); + /* Compression should save at least 10 characters */ + if (clen > 0 && clen + 10 <= msglen) { + +#ifdef xxx_debug + /* Debug code -- decompress and compare */ + int blen, rlen, olen; + olen = msglen; + POOLMEM *rmsg = get_pool_memory(PM_BSOCK); + blen = sizeof_pool_memory(msg) * 2; + if (blen >= sizeof_pool_memory(rmsg)) { + rmsg = realloc_pool_memory(rmsg, blen); + } + rlen = LZ4_decompress_safe(cmsg, rmsg, clen, blen); + //Dmsg4(000, "blen=%d clen=%d olen=%d rlen=%d\n", blen, clen, olen, rlen); + ASSERT(olen == rlen); + ASSERT(memcmp(msg, rmsg, olen) == 0); + free_pool_memory(rmsg); + /* end Debug code */ +#endif + + msg = cmsg; + msglen = clen; + compressed = true; + } + msglen += offset; + msg -= offset; + cmsg -= offset; + } + m_CommCompressedBytes += msglen; + return compressed; +} + +/* + * Note, this routine closes the socket, but leaves the + * bsock memory in place. + * every thread is responsible of closing and destroying its own duped or not + * duped BSOCKCORE + */ +void BSOCK::close() +{ + Dmsg0(BSOCK_DEBUG_LVL, "BSOCK::close()\n"); + BSOCKCORE::close(); + return; +} + +/* + * Write nbytes to the network. + * It may require several writes. + */ + +int32_t BSOCK::write_nbytes(char *ptr, int32_t nbytes) +{ + int32_t nwritten; + + if (is_spooling()) { + nwritten = fwrite(ptr, 1, nbytes, m_spool_fd); + if (nwritten != nbytes) { + berrno be; + b_errno = errno; + Qmsg3(jcr(), M_FATAL, 0, _("Attr spool write error. wrote=%d wanted=%d bytes. ERR=%s\n"), + nbytes, nwritten, be.bstrerror()); + Dmsg2(400, "nwritten=%d nbytes=%d.\n", nwritten, nbytes); + errno = b_errno; + return -1; + } + return nbytes; + } + + /* reuse base code */ + return BSOCKCORE::write_nbytes(ptr, nbytes); +} + +/* + * This is a non-class BSOCK "constructor" because we want to + * call the Bacula smartalloc routines instead of new. + */ +BSOCK *new_bsock() +{ + BSOCK *bsock = New(BSOCK); + return bsock; +} + + +/* Initialize internal socket structure. + * This probably should be done in bsock.c + */ +BSOCK *init_bsock(JCR *jcr, int sockfd, const char *who, + const char *host, int port, struct sockaddr *client_addr) +{ + Dmsg4(100, "socket=%d who=%s host=%s port=%d\n", sockfd, who, host, port); + BSOCK *bsock = New(BSOCK(sockfd)); + bsock->m_master = bsock; /* don't use set_master() here */ + bsock->set_who(bstrdup(who)); + bsock->set_host(bstrdup(host)); + bsock->set_port(port); + bmemzero(&bsock->peer_addr, sizeof(bsock->peer_addr)); + memcpy(&bsock->client_addr, client_addr, sizeof(bsock->client_addr)); + bsock->set_jcr(jcr); + return bsock; +} + +BSOCK *dup_bsock(BSOCK *osock) +{ + POOLMEM *cmsg; + POOLMEM *msg; + POOLMEM *errmsg; + + osock->set_locking(); + BSOCK *bsock = New(BSOCK); + // save already allocated variables + msg = bsock->msg; + cmsg = bsock->cmsg; + errmsg = bsock->errmsg; + // with this we make virtually the same job as with memcpy() + *bsock = *osock; + // restore saved variables + bsock->msg = msg; + bsock->cmsg = cmsg; + bsock->errmsg = errmsg; + if (osock->who()) { + bsock->set_who(bstrdup(osock->who())); + } + if (osock->host()) { + bsock->set_host(bstrdup(osock->host())); + } + if (osock->src_addr) { + bsock->src_addr = New( IPADDR( *(osock->src_addr)) ); + } + bsock->set_duped(); + bsock->set_master(osock); + return bsock; +} + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +void BSOCK::dump() +{ +#ifdef TEST_PROGRAM + char ed1[50]; + BSOCKCORE::dump(); + Pmsg1(-1, "BSOCK::dump(): %p\n", this); + Pmsg1(-1, "\tm_spool_fd: %p\n", m_spool_fd); + Pmsg1(-1, "\tcmsg: %p\n", cmsg); + Pmsg1(-1, "\tm_data_end: %s\n", edit_int64(m_data_end, ed1)); + Pmsg1(-1, "\tm_last_data_end: %s\n", edit_int64(m_last_data_end, ed1)); + Pmsg1(-1, "\tm_FileIndex: %s\n", edit_int64(m_FileIndex, ed1)); + Pmsg1(-1, "\tm_lastFileIndex: %s\n", edit_int64(m_lastFileIndex, ed1)); + Pmsg1(-1, "\tm_spool: %s\n", m_spool?"true":"false"); + Pmsg1(-1, "\tm_compress: %s\n", m_compress?"true":"false"); + Pmsg1(-1, "\tm_CommBytes: %s\n", edit_uint64(m_CommBytes, ed1)); + Pmsg1(-1, "\tm_CommCompressedBytes: %s\n", edit_uint64(m_CommCompressedBytes, ed1)); +#endif +}; + +#ifdef TEST_PROGRAM +#include "unittests.h" + +void free_my_jcr(JCR *jcr){ + /* TODO: handle full JCR free */ + free_jcr(jcr); +}; + +#define ofnamefmt "/tmp/bsock.%d.test" +const char *data = "This is a BSOCK communication test: 1234567\n"; +const char *hexdata = "< 00000000 00 00 00 2c 54 68 69 73 20 69 73 20 61 20 42 53 # ...,This is a BS\n" \ + "< 00000010 4f 43 4b 20 63 6f 6d 6d 75 6e 69 63 61 74 69 6f # OCK communicatio\n" \ + "< 00000020 6e 20 74 65 73 74 3a 20 31 32 33 34 35 36 37 0a # n test: 1234567.\n"; + +int main() +{ + Unittests bsock_test("bsock_test", true); + BSOCK *bs; + BSOCK *bsdup; + pid_t pid; + int rc; + char *host = (char*)"localhost"; + char *name = (char*)"Test"; + JCR *jcr; + bool btest; + char buf[256]; // extend this buffer when hexdata becomes longer + int fd; + + Pmsg0(0, "Initialize tests ...\n"); + + jcr = new_jcr(sizeof(JCR), NULL); + bs = New(BSOCK); + bs->set_jcr(jcr); + ok(bs != NULL && bs->jcr() == jcr, + "Default initialization"); + + Pmsg0(0, "Preparing fork\n"); + pid = fork(); + if (0 == pid){ + Pmsg0(0, "Prepare to execute netcat\n"); + pid_t mypid = getpid(); + char ofname[30]; + snprintf(ofname, sizeof(ofname), ofnamefmt, mypid); + rc = execl("/bin/netcat", "netcat", "-v", "-p", "20000", "-l", "-o", ofname, NULL); + Pmsg1(0, "Error executing netcat: %s\n", strerror(rc)); + exit(1); + } + Pmsg1(0, "After fork: %d\n", pid); + bmicrosleep(2, 0); // we wait a bit to netcat to start + btest = bs->connect(jcr, 1, 10, 0, name, host, NULL, 20000, 0); + ok(btest, "BSOCK connection test"); + if (btest) { + /* connected */ + bsdup = dup_bsock(bs); + ok(bsdup->is_duped() && bsdup->jcr() == jcr, + "Check duped BSOCK"); + delete(bsdup); + /* we are connected, so send some data */ + bs->fsend("%s", data); + bmicrosleep(2, 0); // wait until data received by netcat + bs->close(); + ok(bs->is_closed(), "Close bsock"); + /* now check what netcat received */ + char ofname[30]; + snprintf(ofname, sizeof(ofname), ofnamefmt, pid); + fd = open(ofname, O_RDONLY); + btest = false; + if (fd > 0){ + btest = true; + read(fd, buf, strlen(hexdata)); + close(fd); + unlink(ofname); + } + ok(btest, "Output file available"); + ok(strcmp(buf, hexdata) == 0, "Communication data"); + } + kill(pid, SIGTERM); + delete(bs); + free_my_jcr(jcr); + term_last_jobs_list(); + + return report(); +}; +#endif /* TEST_PROGRAM */ diff --git a/src/lib/bsock.h b/src/lib/bsock.h new file mode 100644 index 00000000..fd4e76f1 --- /dev/null +++ b/src/lib/bsock.h @@ -0,0 +1,219 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Sock Class definition + * Note, the old non-class code is in bnet.c, and the + * new class code associated with this file is in bsock.c + * + * The new code inherit common functions from BSOCKCORE class + * and implement BSOCK/Bacula specific protocols and data flow. + * + * Kern Sibbald, May MM + * + * Major refactoring of BSOCK code written by: + * + * Radosław Korzeniewski, MMXVIII + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + * Zero msglen from other end indicates soft eof (usually + * end of some binary data stream, but not end of conversation). + * + * Negative msglen, is special "signal" (no data follows). + * See below for SIGNAL codes. + */ + +#ifndef __BSOCK_H_ +#define __BSOCK_H_ + +#define BSOCK_TIMEOUT 3600 * 24 * 200; /* default 200 days */ + +class BSOCK: public BSOCKCORE { +public: + FILE *m_spool_fd; /* spooling file */ + POOLMEM *cmsg; /* Compress buffer */ + +private: + boffset_t m_data_end; /* offset of data written */ + boffset_t m_last_data_end; /* offset of last valid data written */ + int32_t m_FileIndex; /* attr spool FI */ + int32_t m_lastFileIndex; /* last valid attr spool FI */ + bool m_spool: 1; /* set for spooling */ + bool m_compress: 1; /* set to use comm line compression */ + uint64_t m_CommBytes; /* Bytes sent */ + uint64_t m_CommCompressedBytes; /* Compressed bytes sent */ + + bool open(JCR *jcr, const char *name, char *host, char *service, + int port, utime_t heart_beat, int *fatal); + void init(); + void _destroy(); + int32_t write_nbytes(char *ptr, int32_t nbytes); + +public: + BSOCK(); + BSOCK(int sockfd); + ~BSOCK(); + // int32_t recv(int /*len*/) { return recv(); }; + int32_t recv(); + bool send() { return send(0); }; + bool send(int flags); + bool signal(int signal); + void close(); /* close connection and destroy packet */ + bool comm_compress(); /* in bsock.c */ + bool despool(void update_attr_spool_size(ssize_t size), ssize_t tsize); + bool authenticate_director(const char *name, const char *password, + TLS_CONTEXT *tls_ctx, char *response, int response_len); + + /* Inline functions */ + bool is_spooling() const { return m_spool; }; + bool can_compress() const { return m_compress; }; + void set_data_end(int32_t FileIndex) { + if (m_spool && FileIndex > m_FileIndex) { + m_lastFileIndex = m_FileIndex; + m_last_data_end = m_data_end; + m_FileIndex = FileIndex; + m_data_end = ftello(m_spool_fd); + } + }; + boffset_t get_last_data_end() { return m_last_data_end; }; + int32_t get_lastFileIndex() { return m_lastFileIndex; }; + uint32_t CommBytes() { return m_CommBytes; }; + uint32_t CommCompressedBytes() { return m_CommCompressedBytes; }; + void set_spooling() { m_spool = true; }; + void clear_spooling() { m_spool = false; }; + void set_compress() { m_compress = true; }; + void clear_compress() { m_compress = false; }; + void dump(); +}; + +/* + * Signal definitions for use in bsock->signal() + * Note! These must be negative. There are signals that are generated + * by the bsock software not by the OS ... + */ +enum { + BNET_EOD = -1, /* End of data stream, new data may follow */ + BNET_EOD_POLL = -2, /* End of data and poll all in one */ + BNET_STATUS = -3, /* Send full status */ + BNET_TERMINATE = -4, /* Conversation terminated, doing close() */ + BNET_POLL = -5, /* Poll request, I'm hanging on a read */ + BNET_HEARTBEAT = -6, /* Heartbeat Response requested */ + BNET_HB_RESPONSE = -7, /* Only response permited to HB */ + BNET_xxxxxxPROMPT = -8, /* No longer used -- Prompt for subcommand */ + BNET_BTIME = -9, /* Send UTC btime */ + BNET_BREAK = -10, /* Stop current command -- ctl-c */ + BNET_START_SELECT = -11, /* Start of a selection list */ + BNET_END_SELECT = -12, /* End of a select list */ + BNET_INVALID_CMD = -13, /* Invalid command sent */ + BNET_CMD_FAILED = -14, /* Command failed */ + BNET_CMD_OK = -15, /* Command succeeded */ + BNET_CMD_BEGIN = -16, /* Start command execution */ + BNET_MSGS_PENDING = -17, /* Messages pending */ + BNET_MAIN_PROMPT = -18, /* Server ready and waiting */ + BNET_SELECT_INPUT = -19, /* Return selection input */ + BNET_WARNING_MSG = -20, /* Warning message */ + BNET_ERROR_MSG = -21, /* Error message -- command failed */ + BNET_INFO_MSG = -22, /* Info message -- status line */ + BNET_RUN_CMD = -23, /* Run command follows */ + BNET_YESNO = -24, /* Request yes no response */ + BNET_START_RTREE = -25, /* Start restore tree mode */ + BNET_END_RTREE = -26, /* End restore tree mode */ + BNET_SUB_PROMPT = -27, /* Indicate we are at a subprompt */ + BNET_TEXT_INPUT = -28, /* Get text input from user */ + BNET_EXT_TERMINATE = -29, /* A Terminate condition has been met and + already reported somewhere else */ + BNET_FDCALLED = -30 /* The FD should keep the connection for a new job */ +}; + +/* + * These bits ares set in the packet length field. Attempt to + * keep the number of bits to a minimum and instead use the new + * flag field for passing bits using the BNET_HDR_EXTEND bit. + * Note: we must not set the high bit as that indicates a signal. + */ +#define BNET_COMPRESSED (1<<30) /* set for lz4 compressed data */ +#define BNET_HDR_EXTEND (1<<29) /* extended header */ + +/* + * The following bits are kept in flags. The high 16 bits are + * for flags, and the low 16 bits are for other info such as + * compressed data offset (BNET_OFFSET) + */ +#define BNET_IS_CMD (1<<28) /* set for command data */ +#define BNET_OFFSET (1<<27) /* Data compression offset specified */ +#define BNET_NOCOMPRESS (1<<25) /* Disable compression */ +#define BNET_DATACOMPRESSED (1<<24) /* Data compression */ + +#define BNET_SETBUF_READ 1 /* Arg for bnet_set_buffer_size */ +#define BNET_SETBUF_WRITE 2 /* Arg for bnet_set_buffer_size */ + +/* + * Return status from bnet_recv() + * Note, the HARDEOF and ERROR refer to comm status/problems + * rather than the BNET_xxx above, which are software signals. + */ +enum { + BNET_SIGNAL = -1, + BNET_HARDEOF = -2, + BNET_ERROR = -3, + BNET_COMMAND = -4 +}; + +/* + * Inter-daemon commands + * When BNET_IS_CMD is on, the next int32 is a command + */ +#define BNET_CMD_SIZE sizeof(int32_t) + +enum { + BNET_CMD_NONE = 0, /* reserved */ + BNET_CMD_ACK_HASH = 1, /* backup SD->FD SD already know this hash, don't need the block */ + BNET_CMD_UNK_HASH = 2, /* restore SD->FD hash is unknown */ + BNET_CMD_GET_HASH = 3, /* backup SD->FD SD ask FD to send the corresponding block */ + /* restore FD->SD FD ask SD to send the corresponding block */ + BNET_CMD_STO_BLOCK = 4, /* backup FD->SD FD send requested block */ + BNET_CMD_REC_ACK = 5, /* restore FD->SD FD has consumed records from the buffer */ + BNET_CMD_STP_THREAD = 6, /* restore FD->SD SD must stop thread */ + BNET_CMD_STP_FLOWCTRL = 7 /* backup FD->SD SD must stop sending flowcontrol information */ +}; + +/* + * TLS enabling values. Value is important for comparison, ie: + * if (tls_remote_need < BNET_TLS_REQUIRED) { ... } + */ +enum { + BNET_TLS_NONE = 0, /* cannot do TLS */ + BNET_TLS_OK = 1, /* can do, but not required on my end */ + BNET_TLS_REQUIRED = 2 /* TLS is required */ +}; + +const char *bnet_cmd_to_name(int val); + +BSOCK *new_bsock(); +/* + * Completely release the socket packet, and NULL the pointer + */ +#define free_bsock(a) free_bsockcore(a) + +/* + * Does the socket exist and is it open? + */ +#define is_bsock_open(a) is_bsockcore_open(a) + +#endif /* __BSOCK_H_ */ diff --git a/src/lib/bsockcore.c b/src/lib/bsockcore.c new file mode 100644 index 00000000..9b68c1d8 --- /dev/null +++ b/src/lib/bsockcore.c @@ -0,0 +1,1340 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Core Sock Class definition + * + * Kern Sibbald, May MM + * + * Major refactoring of BSOCK code written by: + * + * Radosław Korzeniewski, MMXVIII + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + * This is a common class for socket network communication derived from + * BSOCK class. It acts as a base class for non-Bacula network communication + * and as a base class for standard BSOCK implementation. Basically the BSOCK + * class did not changed its functionality for any Bacula specific part. + * Now you can use a BSOCKCLASS for other network communication. + */ + +#include "bacula.h" +#include "jcr.h" +#include +#include + +#define BSOCKCORE_DEBUG_LVL 900 + +#if !defined(ENODATA) /* not defined on BSD systems */ +#define ENODATA EPIPE +#endif + +#if !defined(SOL_TCP) /* Not defined on some systems */ +#define SOL_TCP IPPROTO_TCP +#endif + +#ifdef HAVE_WIN32 +#include +static void win_close_wait(int fd); +#ifndef SOCK_CLOEXEC +#define SOCK_CLOEXEC 0 +#endif +#endif + +/* + * make a nice dump of a message + */ +void dump_bsock_msg(int sock, uint32_t msgno, const char *what, uint32_t rc, int32_t pktsize, uint32_t flags, + POOLMEM *msg, int32_t msglen) +{ + char buf[54]; + bool is_ascii; + int dbglvl = DT_ASX; + + if (msglen<0) { + Dmsg4(dbglvl, "%s %d:%d SIGNAL=%s\n", what, sock, msgno, bnet_sig_to_ascii(msglen)); + // data + smartdump(msg, msglen, buf, sizeof(buf)-9, &is_ascii); + if (is_ascii) { + Dmsg5(dbglvl, "%s %d:%d len=%d \"%s\"\n", what, sock, msgno, msglen, buf); + } else { + Dmsg5(dbglvl, "%s %d:%d len=%d %s\n", what, sock, msgno, msglen, buf); + } + } +} + +BSOCKCallback::BSOCKCallback() +{ +} + +BSOCKCallback::~BSOCKCallback() +{ +} + +/* + * Default constructor does class initialization. + */ +BSOCKCORE::BSOCKCORE() : + msg(NULL), + errmsg(NULL), + res(NULL), + tls(NULL), + src_addr(NULL), + read_seqno(0), + in_msg_no(0), + out_msg_no(0), + pout_msg_no(NULL), + msglen(0), + timer_start(0), + timeout(0), + m_fd(-1), + b_errno(0), + m_blocking(0), + errors(0), + m_suppress_error_msgs(false), + send_hook_cb(NULL), + m_next(NULL), + m_jcr(NULL), + pm_rmutex(NULL), + pm_wmutex(NULL), + m_who(NULL), + m_host(NULL), + m_port(0), + m_tid(NULL), + m_flags(0), + m_timed_out(false), + m_terminated(false), + m_closed(false), + m_duped(false), + m_use_locking(false), + m_bwlimit(0), + m_nb_bytes(0), + m_last_tick(0) +{ + pthread_mutex_init(&m_rmutex, NULL); + pthread_mutex_init(&m_wmutex, NULL); + pthread_mutex_init(&m_mmutex, NULL); + bmemzero(&peer_addr, sizeof(peer_addr)); + bmemzero(&client_addr, sizeof(client_addr)); + init(); +}; + +/* + * Default destructor releases resources. + */ +BSOCKCORE::~BSOCKCORE() +{ + Dmsg0(BSOCKCORE_DEBUG_LVL, "BSOCKCORE::~BSOCKCORE()\n"); + _destroy(); +}; + +/* + * Initialization method. + */ +void BSOCKCORE::init() +{ + m_master = this; + set_closed(); + set_terminated(); + m_blocking = 1; + msg = get_pool_memory(PM_BSOCK); + errmsg = get_pool_memory(PM_MESSAGE); + timeout = BSOCKCORE_TIMEOUT; + pout_msg_no = &out_msg_no; +} + +void BSOCKCORE::free_tls() +{ + free_tls_connection(this->tls); + this->tls = NULL; +} + +/* + * Try to connect to host for max_retry_time at retry_time intervals. + * Note, you must have called the constructor prior to calling + * this routine. + */ +bool BSOCKCORE::connect(JCR * jcr, int retry_interval, utime_t max_retry_time, + utime_t heart_beat, + const char *name, char *host, char *service, int port, + int verbose) +{ + bool ok = false; + int i; + int fatal = 0; + time_t begin_time = time(NULL); + time_t now; + btimer_t *tid = NULL; + + /* Try to trap out of OS call when time expires */ + if (max_retry_time) { + tid = start_thread_timer(jcr, pthread_self(), (uint32_t)max_retry_time); + } + + for (i = 0; !open(jcr, name, host, service, port, heart_beat, &fatal); + i -= retry_interval) { + berrno be; + if (fatal || (jcr && job_canceled(jcr))) { + goto bail_out; + } + Dmsg4(50, "Unable to connect to %s on %s:%d. ERR=%s\n", + name, host, port, be.bstrerror()); + if (i < 0) { + i = 60 * 5; /* complain again in 5 minutes */ + if (verbose) + Qmsg4(jcr, M_WARNING, 0, _( + "Could not connect to %s on %s:%d. ERR=%s\n" + "Retrying ...\n"), name, host, port, be.bstrerror()); + } + bmicrosleep(retry_interval, 0); + now = time(NULL); + if (begin_time + max_retry_time <= now) { + Qmsg4(jcr, M_FATAL, 0, _("Unable to connect to %s on %s:%d. ERR=%s\n"), + name, host, port, be.bstrerror()); + goto bail_out; + } + } + ok = true; + +bail_out: + if (tid) { + stop_thread_timer(tid); + } + return ok; +} + +/* + * Finish initialization of the packet structure. + */ +void BSOCKCORE::fin_init(JCR * jcr, int sockfd, const char *who, const char *host, int port, + struct sockaddr *lclient_addr) +{ + Dmsg3(100, "who=%s host=%s port=%d\n", who, host, port); + m_fd = sockfd; + if (m_who) { + free(m_who); + } + if (m_host) { + free(m_host); + } + set_who(bstrdup(who)); + set_host(bstrdup(host)); + set_port(port); + memcpy(&client_addr, lclient_addr, sizeof(client_addr)); + set_jcr(jcr); +} + +/* + * Copy the address from the configuration dlist that gets passed in + */ +void BSOCKCORE::set_source_address(dlist *src_addr_list) +{ + IPADDR *addr = NULL; + + // delete the object we already have, if it's allocated + if (src_addr) { + /* TODO: Why free() instead of delete as src_addr is a IPADDR class */ + free( src_addr); + src_addr = NULL; + } + + if (src_addr_list) { + addr = (IPADDR*) src_addr_list->first(); + src_addr = New( IPADDR(*addr)); + } +} + +/* + * Open a TCP connection to the server + * Returns true when connection was successful or false otherwise. + */ +bool BSOCKCORE::open(JCR *jcr, const char *name, char *host, char *service, + int port, utime_t heart_beat, int *fatal) +{ + int sockfd = -1; + dlist *addr_list; + IPADDR *ipaddr; + bool connected = false; + int turnon = 1; + const char *errstr; + int save_errno = 0; + + /* + * Fill in the structure serv_addr with the address of + * the server that we want to connect with. + */ + if ((addr_list = bnet_host2ipaddrs(host, 0, &errstr)) == NULL) { + /* Note errstr is not malloc'ed */ + Qmsg2(jcr, M_ERROR, 0, _("gethostbyname() for host \"%s\" failed: ERR=%s\n"), + host, errstr); + Dmsg2(100, "bnet_host2ipaddrs() for host %s failed: ERR=%s\n", + host, errstr); + *fatal = 1; + return false; + } + + remove_duplicate_addresses(addr_list); + foreach_dlist(ipaddr, addr_list) { + ipaddr->set_port_net(htons(port)); + char allbuf[256 * 10]; + char curbuf[256]; + Dmsg2(100, "Current %sAll %s\n", + ipaddr->build_address_str(curbuf, sizeof(curbuf)), + build_addresses_str(addr_list, allbuf, sizeof(allbuf))); + /* Open a TCP socket */ + if ((sockfd = socket(ipaddr->get_family(), SOCK_STREAM|SOCK_CLOEXEC, 0)) < 0) { + berrno be; + save_errno = errno; + switch (errno) { +#ifdef EAFNOSUPPORT + case EAFNOSUPPORT: + /* + * The name lookup of the host returned an address in a protocol family + * we don't support. Suppress the error and try the next address. + */ + break; +#endif +#ifdef EPROTONOSUPPORT + /* See above comments */ + case EPROTONOSUPPORT: + break; +#endif +#ifdef EPROTOTYPE + /* See above comments */ + case EPROTOTYPE: + break; +#endif + default: + *fatal = 1; + Qmsg3(jcr, M_ERROR, 0, _("Socket open error. proto=%d port=%d. ERR=%s\n"), + ipaddr->get_family(), ipaddr->get_port_host_order(), be.bstrerror()); + Pmsg3(300, _("Socket open error. proto=%d port=%d. ERR=%s\n"), + ipaddr->get_family(), ipaddr->get_port_host_order(), be.bstrerror()); + break; + } + continue; + } + + /* Bind to the source address if it is set */ + if (src_addr) { + if (bind(sockfd, src_addr->get_sockaddr(), src_addr->get_sockaddr_len()) < 0) { + berrno be; + save_errno = errno; + *fatal = 1; + Qmsg2(jcr, M_ERROR, 0, _("Source address bind error. proto=%d. ERR=%s\n"), + src_addr->get_family(), be.bstrerror() ); + Pmsg2(000, _("Source address bind error. proto=%d. ERR=%s\n"), + src_addr->get_family(), be.bstrerror() ); + if (sockfd >= 0) socketClose(sockfd); + continue; + } + } + + /* + * Keep socket from timing out from inactivity + */ + if (setsockopt(sockfd, SOL_SOCKET, SO_KEEPALIVE, (sockopt_val_t)&turnon, sizeof(turnon)) < 0) { + berrno be; + Qmsg1(jcr, M_WARNING, 0, _("Cannot set SO_KEEPALIVE on socket: %s\n"), + be.bstrerror()); + } +#if defined(TCP_KEEPIDLE) + if (heart_beat) { + int opt = heart_beat; + if (setsockopt(sockfd, SOL_TCP, TCP_KEEPIDLE, (sockopt_val_t)&opt, sizeof(opt)) < 0) { + berrno be; + Qmsg1(jcr, M_WARNING, 0, _("Cannot set TCP_KEEPIDLE on socket: %s\n"), + be.bstrerror()); + } + } +#endif + + /* connect to server */ + if (::connect(sockfd, ipaddr->get_sockaddr(), ipaddr->get_sockaddr_len()) < 0) { + save_errno = errno; + if (sockfd >= 0) socketClose(sockfd); + continue; + } + *fatal = 0; + connected = true; + break; + } + + if (!connected) { + berrno be; + free_addresses(addr_list); + errno = save_errno | b_errno_win32; + Dmsg4(50, "Could not connect to server %s %s:%d. ERR=%s\n", + name, host, port, be.bstrerror()); + return false; + } + /* + * Keep socket from timing out from inactivity + * Do this a second time out of paranoia + */ + if (setsockopt(sockfd, SOL_SOCKET, SO_KEEPALIVE, (sockopt_val_t)&turnon, sizeof(turnon)) < 0) { + berrno be; + Qmsg1(jcr, M_WARNING, 0, _("Cannot set SO_KEEPALIVE on socket: %s\n"), + be.bstrerror()); + } + fin_init(jcr, sockfd, name, host, port, ipaddr->get_sockaddr()); + free_addresses(addr_list); + + /* Clean the packet a bit */ + m_closed = false; + m_duped = false; + // Moved to BSOCK m_spool = false; + m_use_locking = false; + m_timed_out = false; + m_terminated = false; + m_suppress_error_msgs = false; + errors = 0; + m_blocking = 0; + + Dmsg3(50, "OK connected to server %s %s:%d.\n", + name, host, port); + + return true; +} + +/* + * Force read/write to use locking + */ +bool BSOCKCORE::set_locking() +{ + int stat; + if (m_use_locking) { + return true; /* already set */ + } + pm_rmutex = &m_rmutex; + pm_wmutex = &m_wmutex; + if ((stat = pthread_mutex_init(pm_rmutex, NULL)) != 0) { + berrno be; + Qmsg(m_jcr, M_FATAL, 0, _("Could not init bsockcore read mutex. ERR=%s\n"), + be.bstrerror(stat)); + return false; + } + if ((stat = pthread_mutex_init(pm_wmutex, NULL)) != 0) { + berrno be; + Qmsg(m_jcr, M_FATAL, 0, _("Could not init bsockcore write mutex. ERR=%s\n"), + be.bstrerror(stat)); + return false; + } + if ((stat = pthread_mutex_init(&m_mmutex, NULL)) != 0) { + berrno be; + Qmsg(m_jcr, M_FATAL, 0, _("Could not init bsockcore attribute mutex. ERR=%s\n"), + be.bstrerror(stat)); + return false; + } + m_use_locking = true; + return true; +} + +void BSOCKCORE::clear_locking() +{ + if (!m_use_locking || m_duped) { + return; + } + m_use_locking = false; + pthread_mutex_destroy(pm_rmutex); + pthread_mutex_destroy(pm_wmutex); + pthread_mutex_destroy(&m_mmutex); + pm_rmutex = NULL; + pm_wmutex = NULL; + return; +} + +/* + * Send a message over the network. Everything is sent in one write request. + * + * Returns: false on failure + * true on success + */ +bool BSOCKCORE::send() +{ + int32_t rc; + bool ok = true; + bool locked = false; + + if (is_closed()) { + if (!m_suppress_error_msgs) { + Qmsg0(m_jcr, M_ERROR, 0, _("Socket is closed\n")); + } + return false; + } + if (errors) { + if (!m_suppress_error_msgs) { + Qmsg4(m_jcr, M_ERROR, 0, _("Socket has errors=%d on call to %s:%s:%d\n"), + errors, m_who, m_host, m_port); + } + return false; + } + if (is_terminated()) { + if (!m_suppress_error_msgs) { + Qmsg4(m_jcr, M_ERROR, 0, _("BSOCKCORE send while terminated=%d on call to %s:%s:%d\n"), + is_terminated(), m_who, m_host, m_port); + } + return false; + } + + if (msglen > 4000000) { + if (!m_suppress_error_msgs) { + Qmsg4(m_jcr, M_ERROR, 0, + _("Socket has insane msglen=%d on call to %s:%s:%d\n"), + msglen, m_who, m_host, m_port); + } + return false; + } + + if (send_hook_cb) { + if (!send_hook_cb->bsock_send_cb()) { + Dmsg3(1, "Flowcontrol failure on %s:%s:%d\n", m_who, m_host, m_port); + Qmsg3(m_jcr, M_ERROR, 0, _("Flowcontrol failure on %s:%s:%d\n"), m_who, m_host, m_port); + return false; + } + } + if (m_use_locking) { + pP(pm_wmutex); + locked = true; + } + + (*pout_msg_no)++; /* increment message number */ + + /* send data packet */ + timer_start = watchdog_time; /* start timer */ + clear_timed_out(); + /* Full I/O done in one write */ + rc = write_nbytes(msg, msglen); + if (chk_dbglvl(DT_NETWORK|1900)) dump_bsock_msg(m_fd, *pout_msg_no, "SEND", rc, msglen, m_flags, msg, msglen); + timer_start = 0; /* clear timer */ + if (rc != msglen) { + errors++; + if (errno == 0) { + b_errno = EIO; + } else { + b_errno = errno; + } + if (rc < 0) { + if (!m_suppress_error_msgs) { + Qmsg5(m_jcr, M_ERROR, 0, + _("Write error sending %d bytes to %s:%s:%d: ERR=%s\n"), + msglen, m_who, + m_host, m_port, this->bstrerror()); + } + } else { + Qmsg5(m_jcr, M_ERROR, 0, + _("Wrote %d bytes to %s:%s:%d, but only %d accepted.\n"), + msglen, m_who, m_host, m_port, rc); + } + ok = false; + } +// Dmsg4(000, "cmpr=%d ext=%d cmd=%d m_flags=0x%x\n", msglen&BNET_COMPRESSED?1:0, +// msglen&BNET_HDR_EXTEND?1:0, msglen&BNET_CMD_BIT?1:0, m_flags); + if (locked) pV(pm_wmutex); + return ok; +} + +/* + * Format and send a message + * Returns: false on error + * true on success + */ +bool BSOCKCORE::fsend(const char *fmt, ...) +{ + va_list arg_ptr; + int maxlen; + + if (is_null(this)) { + return false; /* do not seg fault */ + } + if (errors || is_terminated() || is_closed()) { + return false; + } + /* This probably won't work, but we vsnprintf, then if we + * get a negative length or a length greater than our buffer + * (depending on which library is used), the printf was truncated, so + * get a bigger buffer and try again. + */ + for (;;) { + maxlen = sizeof_pool_memory(msg) - 1; + va_start(arg_ptr, fmt); + msglen = bvsnprintf(msg, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (msglen >= 0 && msglen < (maxlen - 5)) { + break; + } + msg = realloc_pool_memory(msg, maxlen + maxlen / 2); + } + return send(); +} + +/* + * Receive a data from the other end. + * The number of expected bytes in len. + * Returns number of bytes read (may return zero), the msglen is set accordingly. + * Returns -1 on error so msglen will be zero. + */ +int32_t BSOCKCORE::recvn(int len) +{ + /* The method has to be redesigned from scratch */ + int32_t nbytes; + bool locked = false; + + msglen = nbytes = 0; + msg[msglen] = 0; + if (errors || is_terminated() || is_closed()) { + /* error, cannot receive */ + return -1; + } + + if (len > 0) { + /* do read only when len > 0 */ + if (m_use_locking) { + pP(pm_rmutex); + locked = true; + } + read_seqno++; /* bump sequence number */ + timer_start = watchdog_time; /* set start wait time */ + clear_timed_out(); + /* Make sure the buffer is big enough + one byte for EOS */ + if (len >= (int32_t) sizeof_pool_memory(msg)) { + msg = realloc_pool_memory(msg, len + 100); + } + timer_start = watchdog_time; /* set start wait time */ + clear_timed_out(); + if ((nbytes = read_nbytes(msg, len)) <= 0) { + timer_start = 0; /* clear timer */ + /* probably pipe broken because client died */ + if (errno == 0) { + b_errno = ENODATA; + } else { + b_errno = errno; + } + nbytes = -1; + errors++; + msglen = 0; /* assume hard EOF received */ + Qmsg4(m_jcr, M_ERROR, 0, _("Read error from %s:%s:%d: ERR=%s\n"), + m_who, m_host, m_port, this->bstrerror()); + goto bailout; + } + timer_start = 0; /* clear timer */ + in_msg_no++; + msglen = nbytes; + /* + * always add a zero by to properly terminate any + * string that was send to us. Note, we ensured above that the + * buffer is at least one byte longer than the message length. + */ + msg[nbytes] = 0; /* terminate in case it is a string */ + /* + * The following uses *lots* of resources so turn it on only for + * serious debugging. + */ + Dsm_check(300); + } + +bailout: + if ((chk_dbglvl(DT_NETWORK|1900))) dump_bsock_msg(m_fd, read_seqno, "GRECV", nbytes, len, m_flags, msg, msglen); + + if (locked) pV(pm_rmutex); + return nbytes; /* return actual length of message or -1 */ +} + +/* + * Return the string for the error that occurred + * on the socket. Only the first error is retained. + */ +const char *BSOCKCORE::bstrerror() +{ + berrno be; + if (errmsg == NULL) { + errmsg = get_pool_memory(PM_MESSAGE); + } + if (b_errno == 0) { + pm_strcpy(errmsg, "I/O Error"); + } else { + pm_strcpy(errmsg, be.bstrerror(b_errno)); + } + return errmsg; +} + +int BSOCKCORE::get_peer(char *buf, socklen_t buflen) +{ +#if !defined(HAVE_WIN32) + if (peer_addr.sin_family == 0) { + socklen_t salen = sizeof(peer_addr); + int rval = (getpeername)(m_fd, (struct sockaddr *)&peer_addr, &salen); + if (rval < 0) return rval; + } + if (!inet_ntop(peer_addr.sin_family, &peer_addr.sin_addr, buf, buflen)) + return -1; + + return 0; +#else + return -1; +#endif +} + +/* + * Set the network buffer size, suggested size is in size. + * Actual size obtained is returned in bs->msglen + * + * Returns: false on failure + * true on success + */ +bool BSOCKCORE::set_buffer_size(uint32_t size, int rw) +{ + uint32_t dbuf_size, start_size; + +#if defined(IP_TOS) && defined(IPTOS_THROUGHPUT) + int opt; + opt = IPTOS_THROUGHPUT; + setsockopt(m_fd, IPPROTO_IP, IP_TOS, (sockopt_val_t)&opt, sizeof(opt)); +#endif + + if (size != 0) { + dbuf_size = size; + } else { + dbuf_size = DEFAULT_NETWORK_BUFFER_SIZE; + } + start_size = dbuf_size; + /* The extra 512 can hold data such as Sparse/Offset pointer */ + if ((msg = realloc_pool_memory(msg, dbuf_size + 512)) == NULL) { + Qmsg0(get_jcr(), M_FATAL, 0, _("Could not malloc BSOCKCORE data buffer\n")); + return false; + } + + /* + * If user has not set the size, use the OS default -- i.e. do not + * try to set it. This allows sys admins to set the size they + * want in the OS, and Bacula will comply. See bug #1493 + */ + if (size == 0) { + msglen = dbuf_size; + return true; + } + + if (rw & BNET_SETBUF_READ) { + while ((dbuf_size > TAPE_BSIZE) && (setsockopt(m_fd, SOL_SOCKET, + SO_RCVBUF, (sockopt_val_t) & dbuf_size, sizeof(dbuf_size)) < 0)) { + berrno be; + Qmsg1(get_jcr(), M_ERROR, 0, _("sockopt error: %s\n"), be.bstrerror()); + dbuf_size -= TAPE_BSIZE; + } + Dmsg1(200, "set network buffer size=%d\n", dbuf_size); + if (dbuf_size != start_size) { + Qmsg1(get_jcr(), M_WARNING, 0, + _("Warning network buffer = %d bytes not max size.\n"), dbuf_size); + } + } + if (size != 0) { + dbuf_size = size; + } else { + dbuf_size = DEFAULT_NETWORK_BUFFER_SIZE; + } + start_size = dbuf_size; + if (rw & BNET_SETBUF_WRITE) { + while ((dbuf_size > TAPE_BSIZE) && (setsockopt(m_fd, SOL_SOCKET, + SO_SNDBUF, (sockopt_val_t) & dbuf_size, sizeof(dbuf_size)) < 0)) { + berrno be; + Qmsg1(get_jcr(), M_ERROR, 0, _("sockopt error: %s\n"), be.bstrerror()); + dbuf_size -= TAPE_BSIZE; + } + Dmsg1(900, "set network buffer size=%d\n", dbuf_size); + if (dbuf_size != start_size) { + Qmsg1(get_jcr(), M_WARNING, 0, + _("Warning network buffer = %d bytes not max size.\n"), dbuf_size); + } + } + + msglen = dbuf_size; + return true; +} + +/* + * Set socket non-blocking + * Returns previous socket flag + */ +int BSOCKCORE::set_nonblocking() +{ + int oflags; + + /* Get current flags */ + if ((oflags = fcntl(m_fd, F_GETFL, 0)) < 0) { + berrno be; + Qmsg1(get_jcr(), M_ABORT, 0, _("fcntl F_GETFL error. ERR=%s\n"), be.bstrerror()); + } + + /* Set O_NONBLOCK flag */ + if ((fcntl(m_fd, F_SETFL, oflags|O_NONBLOCK)) < 0) { + berrno be; + Qmsg1(get_jcr(), M_ABORT, 0, _("fcntl F_SETFL error. ERR=%s\n"), be.bstrerror()); + } + + m_blocking = 0; + return oflags; +} + +/* + * Set socket blocking + * Returns previous socket flags + */ +int BSOCKCORE::set_blocking() +{ + int oflags; + /* Get current flags */ + if ((oflags = fcntl(m_fd, F_GETFL, 0)) < 0) { + berrno be; + Qmsg1(get_jcr(), M_ABORT, 0, _("fcntl F_GETFL error. ERR=%s\n"), be.bstrerror()); + } + + /* Set O_NONBLOCK flag */ + if ((fcntl(m_fd, F_SETFL, oflags & ~O_NONBLOCK)) < 0) { + berrno be; + Qmsg1(get_jcr(), M_ABORT, 0, _("fcntl F_SETFL error. ERR=%s\n"), be.bstrerror()); + } + + m_blocking = 1; + return oflags; +} + +void BSOCKCORE::set_killable(bool killable) +{ + if (m_jcr) { + m_jcr->set_killable(killable); + } +} + +/* + * Restores socket flags + */ +void BSOCKCORE::restore_blocking (int flags) +{ + if ((fcntl(m_fd, F_SETFL, flags)) < 0) { + berrno be; + Qmsg1(get_jcr(), M_ABORT, 0, _("fcntl F_SETFL error. ERR=%s\n"), be.bstrerror()); + } + + m_blocking = (flags & O_NONBLOCK) ? true : false; +} + +/* + * Wait for a specified time for data to appear on + * the BSOCKCORE connection. + * + * Returns: 1 if data available + * 0 if timeout + * -1 if error + */ +int BSOCKCORE::wait_data(int sec, int msec) +{ + for (;;) { + switch (fd_wait_data(m_fd, WAIT_READ, sec, msec)) { + case 0: /* timeout */ + b_errno = 0; + return 0; + case -1: + b_errno = errno; + if (errno == EINTR) { + continue; + } + return -1; /* error return */ + default: + b_errno = 0; +#ifdef HAVE_TLS + if (this->tls && !tls_bsock_probe(this)) { + continue; /* false alarm, maybe a session key negotiation in progress on the socket */ + } +#endif + return 1; + } + } +} + +/* + * As above, but returns on interrupt + */ +int BSOCKCORE::wait_data_intr(int sec, int msec) +{ + switch (fd_wait_data(m_fd, WAIT_READ, sec, msec)) { + case 0: /* timeout */ + b_errno = 0; + return 0; + case -1: + b_errno = errno; + return -1; /* error return */ + default: + b_errno = 0; +#ifdef HAVE_TLS + if (this->tls && !tls_bsock_probe(this)) { + /* maybe a session key negotiation waked up the socket */ + return 0; + } +#endif + break; + } + return 1; +} + +/* + * This routine closes the current BSOCKCORE. + * It does not delete the socket packet + * resources, which are released in bsock->destroy(). + */ +#ifndef SHUT_RDWR +#define SHUT_RDWR 2 +#endif + +/* + * The JCR is canceled, set terminate for chained BSOCKCOREs starting from master + */ +void BSOCKCORE::cancel() +{ + master_lock(); + for (BSOCKCORE *next = m_master; next != NULL; next = next->m_next) { + if (!next->m_closed) { + next->m_terminated = true; + next->m_timed_out = true; + } + } + master_unlock(); +} + +/* + * Note, this routine closes the socket, but leaves the + * bsockcore memory in place. + * every thread is responsible of closing and destroying its own duped or not + * duped BSOCKCORE + */ +void BSOCKCORE::close() +{ + BSOCKCORE *bsock = this; + + Dmsg0(BSOCKCORE_DEBUG_LVL, "BSOCKCORE::close()\n"); + if (bsock->is_closed()) { + return; + } + if (!m_duped) { + clear_locking(); + } + bsock->set_closed(); + bsock->set_terminated(); + if (!bsock->m_duped) { + /* Shutdown tls cleanly. */ + if (bsock->tls) { + tls_bsock_shutdown(bsock); + free_tls_connection(bsock->tls); + bsock->tls = NULL; + } + +#ifdef HAVE_WIN32 + if (!bsock->is_timed_out()) { + win_close_wait(bsock->m_fd); /* Ensure that data is not discarded */ + } +#else + if (bsock->is_timed_out()) { + shutdown(bsock->m_fd, SHUT_RDWR); /* discard any pending I/O */ + } +#endif + /* On Windows this discards data if we did not do a close_wait() */ + socketClose(bsock->m_fd); /* normal close */ + } + return; +} + +/* + * Destroy the socket (i.e. release all resources) + */ +void BSOCKCORE::_destroy() +{ + Dmsg0(BSOCKCORE_DEBUG_LVL, "BSOCKCORE::_destroy()\n"); + this->close(); /* Ensure that socket is closed */ + if (msg) { + free_pool_memory(msg); + msg = NULL; + } else { + ASSERT2(1 == 0, "Two calls to destroy socket"); /* double destroy */ + } + if (errmsg) { + free_pool_memory(errmsg); + errmsg = NULL; + } + if (m_who) { + free(m_who); + m_who = NULL; + } + if (m_host) { + free(m_host); + m_host = NULL; + } + if (src_addr) { + free(src_addr); + src_addr = NULL; + } +} + +/* + * Destroy the socket (i.e. release all resources) + * including duped sockets. + * should not be called from duped BSOCKCORE + */ +void BSOCKCORE::destroy() +{ + Dmsg0(BSOCKCORE_DEBUG_LVL, "BSOCKCORE::destroy()\n"); + ASSERTD(reinterpret_cast(m_next) != 0xaaaaaaaaaaaaaaaa, "BSOCKCORE::destroy() already called\n") + ASSERTD(this == m_master, "BSOCKCORE::destroy() called by a non master BSOCKCORE\n") + ASSERTD(!m_duped, "BSOCKCORE::destroy() called by a duped BSOCKCORE\n") + /* I'm the master I must destroy() all the duped BSOCKCOREs */ + master_lock(); + BSOCKCORE *ahead; + for (BSOCKCORE *next = m_next; next != NULL; next = ahead) { + ahead = next->m_next; + Dmsg1(BSOCKCORE_DEBUG_LVL, "BSOCKCORE::destroy():delete(%p)\n", next); + delete(next); + } + master_unlock(); + Dmsg0(BSOCKCORE_DEBUG_LVL, "BSOCKCORE::destroy():delete(this)\n"); + delete(this); +} + +/* Try to limit the bandwidth of a network connection + */ +void BSOCKCORE::control_bwlimit(int bytes) +{ + btime_t now, temp; + if (bytes == 0) { + return; + } + + now = get_current_btime(); /* microseconds */ + temp = now - m_last_tick; /* microseconds */ + + m_nb_bytes += bytes; + + if (temp < 0 || temp > 10000000) { /* Take care of clock problems (>10s) or back in time */ + m_nb_bytes = bytes; + m_last_tick = now; + return; + } + + /* Less than 0.1ms since the last call, see the next time */ + if (temp < 100) { + return; + } + + /* Remove what was authorised to be written in temp us */ + m_nb_bytes -= (int64_t)(temp * ((double)m_bwlimit / 1000000.0)); + + if (m_nb_bytes < 0) { + m_nb_bytes = 0; + } + + /* What exceed should be converted in sleep time */ + int64_t usec_sleep = (int64_t)(m_nb_bytes /((double)m_bwlimit / 1000000.0)); + if (usec_sleep > 100) { + bmicrosleep(usec_sleep/1000000, usec_sleep%1000000); /* TODO: Check that bmicrosleep slept enough or sleep again */ + m_last_tick = get_current_btime(); + m_nb_bytes = 0; + } else { + m_last_tick = now; + } +} + +/* + * Write nbytes to the network. + * It may require several writes. + */ + +int32_t BSOCKCORE::write_nbytes(char *ptr, int32_t nbytes) +{ + int32_t nleft, nwritten; + +#ifdef HAVE_TLS + if (tls) { + /* TLS enabled */ + return (tls_bsock_writen((BSOCK*)this, ptr, nbytes)); + } +#endif /* HAVE_TLS */ + + nleft = nbytes; + while (nleft > 0) { + do { + errno = 0; + nwritten = socketWrite(m_fd, ptr, nleft); + if (is_timed_out() || is_terminated()) { + return -1; + } + +#ifdef HAVE_WIN32 + /* + * We simulate errno on Windows for a socket + * error in order to handle errors correctly. + */ + if (nwritten == SOCKET_ERROR) { + DWORD err = WSAGetLastError(); + nwritten = -1; + if (err == WSAEINTR) { + errno = EINTR; + } else if (err == WSAEWOULDBLOCK) { + errno = EAGAIN; + } else { + errno = EIO; /* some other error */ + } + } +#endif + + } while (nwritten == -1 && errno == EINTR); + /* + * If connection is non-blocking, we will get EAGAIN, so + * use select()/poll to keep from consuming all the CPU + * and try again. + */ + if (nwritten == -1 && errno == EAGAIN) { + fd_wait_data(m_fd, WAIT_WRITE, 1, 0); + continue; + } + if (nwritten <= 0) { + return -1; /* error */ + } + nleft -= nwritten; + ptr += nwritten; + if (use_bwlimit()) { + control_bwlimit(nwritten); + } + } + return nbytes - nleft; +} + +/* + * Read a nbytes from the network. + * It is possible that the total bytes require in several + * read requests + */ + +int32_t BSOCKCORE::read_nbytes(char *ptr, int32_t nbytes) +{ + int32_t nleft, nread; + +#ifdef HAVE_TLS + if (tls) { + /* TLS enabled */ + return (tls_bsock_readn((BSOCK*)this, ptr, nbytes)); + } +#endif /* HAVE_TLS */ + + nleft = nbytes; + while (nleft > 0) { + errno = 0; + nread = socketRead(m_fd, ptr, nleft); + if (is_timed_out() || is_terminated()) { + return -1; + } + +#ifdef HAVE_WIN32 + /* + * We simulate errno on Windows for a socket + * error in order to handle errors correctly. + */ + if (nread == SOCKET_ERROR) { + DWORD err = WSAGetLastError(); + nread = -1; + if (err == WSAEINTR) { + errno = EINTR; + } else if (err == WSAEWOULDBLOCK) { + errno = EAGAIN; + } else { + errno = EIO; /* some other error */ + } + } +#endif + + if (nread == -1) { + if (errno == EINTR) { + continue; + } + if (errno == EAGAIN) { + bmicrosleep(0, 20000); /* try again in 20ms */ + continue; + } + } + if (nread <= 0) { + return -1; /* error, or EOF */ + } + nleft -= nread; + ptr += nread; + if (use_bwlimit()) { + control_bwlimit(nread); + } + } + return nbytes - nleft; /* return >= 0 */ +} + +#ifdef HAVE_WIN32 +/* + * closesocket is supposed to do a graceful disconnect under Window + * but it doesn't. Comments on http://msdn.microsoft.com/en-us/li + * confirm this behaviour. DisconnectEx is required instead, but + * that function needs to be retrieved via WS IOCTL + */ +static void +win_close_wait(int fd) +{ + int ret; + GUID disconnectex_guid = WSAID_DISCONNECTEX; + DWORD bytes_returned; + LPFN_DISCONNECTEX DisconnectEx; + ret = WSAIoctl(fd, SIO_GET_EXTENSION_FUNCTION_POINTER, &disconnectex_guid, sizeof(disconnectex_guid), &DisconnectEx, sizeof(DisconnectEx), &bytes_returned, NULL, NULL); + Dmsg1(100, "WSAIoctl(SIO_GET_EXTENSION_FUNCTION_POINTER, WSAID_DISCONNECTEX) ret = %d\n", ret); + if (!ret) { + DisconnectEx(fd, NULL, 0, 0); + } +} +#endif + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +void BSOCKCORE::dump() +{ +#ifdef TEST_PROGRAM + char ed1[50]; + Pmsg1(-1, "BSOCKCORE::dump(): %p\n", this); + Pmsg1(-1, "\tmsg: %p\n", msg); + Pmsg1(-1, "\terrmsg: %p\n", errmsg); + Pmsg1(-1, "\tres: %p\n", res); + Pmsg1(-1, "\ttls: %p\n", tls); + Pmsg1(-1, "\tsrc_addr: %p\n", src_addr); + Pmsg1(-1, "\tread_seqno: %s\n", edit_uint64(read_seqno, ed1)); + Pmsg1(-1, "\tin_msg_no: %s\n", edit_uint64(in_msg_no, ed1)); + Pmsg1(-1, "\tout_msg_no: %s\n", edit_uint64(out_msg_no, ed1)); + Pmsg1(-1, "\tpout_msg_no: %p\n", pout_msg_no); + Pmsg1(-1, "\tmsglen: %s\n", edit_int64(msglen, ed1)); + Pmsg1(-1, "\ttimer_start: %ld\n", timer_start); + Pmsg1(-1, "\ttimeout: %ld\n", timeout); + Pmsg1(-1, "\tm_fd: %d\n", m_fd); + Pmsg1(-1, "\tb_errno: %d\n", b_errno); + Pmsg1(-1, "\tm_blocking: %d\n", m_blocking); + Pmsg1(-1, "\terrors: %d\n", errors); + Pmsg1(-1, "\tm_suppress_error_msgs: %s\n", m_suppress_error_msgs?"true":"false"); +// Pmsg1(0, "\tclient_addr:{ } struct sockaddr client_addr; /* client's IP address */ +// Pmsg1(0, "\tstruct sockaddr_in peer_addr; /* peer's IP address */ + Pmsg1(-1, "\tsend_hook_cb: %p\n", send_hook_cb); + Pmsg1(-1, "\tm_master: %p\n", m_master); + Pmsg1(-1, "\tm_next: %p\n", m_next); + Pmsg1(-1, "\tm_jcr: %p\n", m_jcr); +// pthread_mutex_t m_rmutex; /* for read locking if use_locking set */ +// pthread_mutex_t m_wmutex; /* for write locking if use_locking set */ +// mutable pthread_mutex_t m_mmutex; /* when accessing the master/next chain */ +// pthread_mutex_t *pm_rmutex; /* Pointer to the read mutex */ +// pthread_mutex_t *pm_wmutex; /* Pointer to the write mutex */ + Pmsg1(-1, "\tm_who: %p\n", m_who); + Pmsg1(-1, "\tm_host: %p\n", m_host); + Pmsg1(-1, "\tm_port: %d\n", m_port); + Pmsg1(-1, "\tm_tid: %p\n", m_tid); + Pmsg1(-1, "\tm_flags: %s\n", edit_uint64(m_flags, ed1)); + Pmsg1(-1, "\tm_timed_out: %s\n", m_timed_out?"true":"false"); + Pmsg1(-1, "\tm_terminated: %s\n", m_terminated?"true":"false"); + Pmsg1(-1, "\tm_closed: %s\n", m_closed?"true":"false"); + Pmsg1(-1, "\tm_duped: %s\n", m_duped?"true":"false"); + Pmsg1(-1, "\tm_use_locking: %s\n", m_use_locking?"true":"false"); + Pmsg1(-1, "\tm_bwlimit: %s\n", edit_int64(m_bwlimit, ed1)); + Pmsg1(-1, "\tm_nb_bytes: %s\n", edit_int64(m_nb_bytes, ed1)); + Pmsg1(-1, "\tm_last_tick: %s\n", edit_int64(m_last_tick, ed1)); +#endif +}; + + +#ifdef TEST_PROGRAM +#include "unittests.h" + +void free_my_jcr(JCR *jcr){ + /* TODO: handle full JCR free */ + free_jcr(jcr); +}; + +#define ofnamefmt "/tmp/bsockcore.%d.test" +const char *data = "This is a BSOCKCORE communication test: 1234567\n"; +const char *hexdata = "< 00000000 54 68 69 73 20 69 73 20 61 20 42 53 4f 43 4b 43 # This is a BSOCKC\n" \ + "< 00000010 4f 52 45 20 63 6f 6d 6d 75 6e 69 63 61 74 69 6f # ORE communicatio\n" \ + "< 00000020 6e 20 74 65 73 74 3a 20 31 32 33 34 35 36 37 0a # n test: 1234567.\n"; + +int main() +{ + Unittests bsockcore_test("bsockcore_test", true); + BSOCKCORE *bs; + pid_t pid; + int rc; + char *host = (char*)"localhost"; + char *name = (char*)"Test"; + JCR *jcr; + bool btest; + char buf[256]; // extend this buffer when hexdata becomes longer + int fd; + + Pmsg0(0, "Initialize tests ...\n"); + + jcr = new_jcr(sizeof(JCR), NULL); + bs = New(BSOCKCORE); + bs->set_jcr(jcr); + ok(bs != NULL && bs->jcr() == jcr, + "Default initialization"); + + Pmsg0(0, "Preparing fork\n"); + pid = fork(); + if (0 == pid){ + Pmsg0(0, "Prepare to execute netcat\n"); + pid_t mypid = getpid(); + char ofname[30]; + snprintf(ofname, sizeof(ofname), ofnamefmt, mypid); + rc = execl("/bin/netcat", "netcat", "-v", "-p", "20000", "-l", "-o", ofname, NULL); + Pmsg1(0, "Error executing netcat: %s\n", strerror(rc)); + exit(1); + } + Pmsg1(0, "After fork: %d\n", pid); + bmicrosleep(2, 0); // we wait a bit to netcat to start + btest = bs->connect(jcr, 1, 10, 0, name, host, NULL, 20000, 0); + ok(btest, "BSOCKCORE connection test"); + if (btest){ + /* we are connected, so send some data */ + bs->fsend("%s", data); + bmicrosleep(2, 0); // wait until data received by netcat + bs->close(); + ok(bs->is_closed(), "Close bsockcore"); + /* now check what netcat received */ + char ofname[30]; + snprintf(ofname, sizeof(ofname), ofnamefmt, pid); + fd = open(ofname, O_RDONLY); + btest = false; + if (fd > 0){ + btest = true; + read(fd, buf, strlen(hexdata)); + close(fd); + unlink(ofname); + } + ok(btest, "Output file available"); + ok(strcmp(buf, hexdata) == 0, "Communication data"); + } + kill(pid, SIGTERM); + delete(bs); + free_my_jcr(jcr); + term_last_jobs_list(); + return report(); +}; +#endif /* TEST_PROGRAM */ diff --git a/src/lib/bsockcore.h b/src/lib/bsockcore.h new file mode 100644 index 00000000..a9cba851 --- /dev/null +++ b/src/lib/bsockcore.h @@ -0,0 +1,219 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Core Sock Class definition + * + * Kern Sibbald, May MM + * + * Major refactoring of BSOCK code written by: + * + * Radosław Korzeniewski, MMXVIII + * radoslaw@korzeniewski.net, radekk@inteos.pl + * Inteos Sp. z o.o. http://www.inteos.pl/ + * + * This is a common class for socket network communication derived from + * BSOCK class. It acts as a base class for non-Bacula network communication + * and as a base class for standard BSOCK implementation. Basically the BSOCK + * class did not changed its functionality for any Bacula specific part. + * Now you can use a BSOCKCLASS for other network communication. + */ + +#ifndef __BSOCKCORE_H_ +#define __BSOCKCORE_H_ + +#define BSOCKCORE_TIMEOUT 3600 * 24 * 5; /* default 5 days */ + +struct btimer_t; /* forward reference */ +class BSOCKCORE; +btimer_t *start_bsock_timer(BSOCKCORE *bs, uint32_t wait); +void stop_bsock_timer(btimer_t *wid); +void dump_bsock_msg(int sock, uint32_t msgno, const char *what, uint32_t rc, int32_t pktsize, uint32_t flags, + POOLMEM *msg, int32_t msglen); + +class BSOCKCallback { +public: + BSOCKCallback(); + virtual ~BSOCKCallback(); + virtual bool bsock_send_cb() = 0; +}; + +class BSOCKCORE: public SMARTALLOC { +/* + * Note, keep this public part before the private otherwise + * bat breaks on some systems such as RedHat. + */ +public: + POOLMEM *msg; /* message pool buffer */ + POOLMEM *errmsg; /* edited error message */ + RES *res; /* Resource to which we are connected */ + TLS_CONNECTION *tls; /* associated tls connection */ + IPADDR *src_addr; /* IP address to source connections from */ + uint64_t read_seqno; /* read sequence number */ + uint32_t in_msg_no; /* input message number */ + uint32_t out_msg_no; /* output message number */ + uint32_t *pout_msg_no; /* pointer to the above */ + int32_t msglen; /* message length */ + volatile time_t timer_start; /* time started read/write */ + volatile time_t timeout; /* timeout BSOCKCORE after this interval */ + int m_fd; /* socket file descriptor */ + int b_errno; /* bsockcore errno */ + int m_blocking; /* blocking state (0 = nonblocking, 1 = blocking) */ + volatile int errors; /* incremented for each error on socket */ + volatile bool m_suppress_error_msgs; /* set to suppress error messages */ + /* when "installed", send_hook_cb->bsock_send_cb() is called before + * any ::send(). */ + BSOCKCallback *send_hook_cb; + struct sockaddr client_addr; /* client's IP address */ + struct sockaddr_in peer_addr; /* peer's IP address */ + +protected: + /* m_master is used by "duped" BSOCKCORE to access some attributes of the "parent" + * thread to have an up2date status (for example when the job is canceled, + * the "parent" BSOCKCORE is "terminated", but the duped BSOCKCORE is unchanged) + * In the future more attributes and method could use the "m_master" + * indirection. + * master->m_rmutex could replace pm_rmutex, idem for the (w)rite" mutex + * "m_master->error" should be incremented instead of "error", but + * this require a lock. + * + * USAGE: the parent thread MUST be sure that the child thread have quit + * before to free the "parent" BSOCKCORE. + */ + BSOCKCORE *m_next; /* next BSOCKCORE if duped (not actually used) */ + JCR *m_jcr; /* jcr or NULL for error msgs */ + pthread_mutex_t m_rmutex; /* for read locking if use_locking set */ + pthread_mutex_t m_wmutex; /* for write locking if use_locking set */ + mutable pthread_mutex_t m_mmutex; /* when accessing the master/next chain */ + pthread_mutex_t *pm_rmutex; /* Pointer to the read mutex */ + pthread_mutex_t *pm_wmutex; /* Pointer to the write mutex */ + char *m_who; /* Name of daemon to which we are talking */ + char *m_host; /* Host name/IP */ + int m_port; /* desired port */ + btimer_t *m_tid; /* timer id */ + uint32_t m_flags; /* Special flags */ + volatile bool m_timed_out: 1; /* timed out in read/write */ + volatile bool m_terminated: 1; /* set when BNET_TERMINATE arrives */ + bool m_closed: 1; /* set when socket is closed */ + bool m_duped: 1; /* set if duped BSOCKCORE */ + bool m_use_locking; /* set to use locking (out of a bitfield */ + /* to avoid race conditions) */ + int64_t m_bwlimit; /* set to limit bandwidth */ + int64_t m_nb_bytes; /* bytes sent/recv since the last tick */ + btime_t m_last_tick; /* last tick used by bwlimit */ + + void fin_init(JCR * jcr, int sockfd, const char *who, const char *host, int port, + struct sockaddr *lclient_addr); + virtual bool open(JCR *jcr, const char *name, char *host, char *service, + int port, utime_t heart_beat, int *fatal); + void master_lock() const { if (m_use_locking) pP((&m_mmutex)); }; + void master_unlock() const { if (m_use_locking) pV((&m_mmutex)); }; + virtual void init(); + void _destroy(); /* called by destroy() */ + virtual int32_t write_nbytes(char *ptr, int32_t nbytes); + virtual int32_t read_nbytes(char *ptr, int32_t nbytes); + +public: + BSOCKCORE *m_master; /* "this" or the "parent" BSOCK if duped */ + /* methods -- in bsockcore.c */ + BSOCKCORE(); + virtual ~BSOCKCORE(); + void free_tls(); + bool connect(JCR * jcr, int retry_interval, utime_t max_retry_time, + utime_t heart_beat, const char *name, char *host, + char *service, int port, int verbose); + virtual int32_t recvn(int /*len*/); + virtual bool send(); + bool fsend(const char*, ...); + void close(); /* close connection and destroy packet */ + void destroy(); /* destroy socket packet */ + const char *bstrerror(); /* last error on socket */ + int get_peer(char *buf, socklen_t buflen); + bool set_buffer_size(uint32_t size, int rw); + int set_nonblocking(); + int set_blocking(); + void restore_blocking(int flags); + void set_killable(bool killable); + int wait_data(int sec, int msec=0); + int wait_data_intr(int sec, int msec=0); + bool set_locking(); + void clear_locking(); + void set_source_address(dlist *src_addr_list); + void control_bwlimit(int bytes); + + /* Inline functions */ + void suppress_error_messages(bool flag) { m_suppress_error_msgs = flag; }; + void set_jcr(JCR *jcr) { m_jcr = jcr; }; + void set_who(char *who) { m_who = who; }; + void set_host(char *host) { m_host = host; }; + void set_port(int port) { m_port = port; }; + char *who() const { return m_who; }; + char *host() const { return m_host; }; + int port() const { return m_port; }; + JCR *jcr() const { return m_jcr; }; + JCR *get_jcr() const { return m_jcr; }; + bool is_duped() const { return m_duped; }; + bool is_terminated() const { return m_terminated; }; + bool is_timed_out() const { return m_timed_out; }; + bool is_closed() const { return m_closed; }; + bool is_open() const { return !m_closed; }; + bool is_stop() const { return errors || is_terminated() || is_closed(); }; + bool is_error() { errno = b_errno; return errors; }; + void set_bwlimit(int64_t maxspeed) { m_bwlimit = maxspeed; }; + bool use_bwlimit() { return m_bwlimit > 0;}; + void set_duped() { m_duped = true; }; + void set_master(BSOCKCORE *master) { + master_lock(); + m_master = master; + m_next = master->m_next; + master->m_next = this; + master_unlock(); + }; + void set_timed_out() { m_timed_out = true; }; + void clear_timed_out() { m_timed_out = false; }; + void set_terminated() { m_terminated = true; }; + void set_closed() { m_closed = true; }; + void start_timer(int sec) { m_tid = start_bsock_timer(this, sec); }; + void stop_timer() { stop_bsock_timer(m_tid); }; + void swap_msgs(); + void install_send_hook_cb(BSOCKCallback *obj) { send_hook_cb=obj; }; + void uninstall_send_hook_cb() { send_hook_cb=NULL; }; + void cancel(); /* call it when JCR is canceled */ +#ifdef HAVE_WIN32 + int socketRead(int fd, void *buf, size_t len) { return ::recv(fd, (char *)buf, len, 0); }; + int socketWrite(int fd, void *buf, size_t len) { return ::send(fd, (const char*)buf, len, 0); }; + int socketClose(int fd) { return ::closesocket(fd); }; +#else + int socketRead(int fd, void *buf, size_t len) { return ::read(fd, buf, len); }; + int socketWrite(int fd, void *buf, size_t len) { return ::write(fd, buf, len); }; + int socketClose(int fd) { return ::close(fd); }; +#endif + void dump(); +}; + +/* + * Completely release the socket packet, and NULL the pointer + */ +#define free_bsockcore(a) do{if(a){(a)->destroy(); (a)=NULL;}} while(0) + +/* + * Does the socket exist and is it open? + */ +#define is_bsockcore_open(a) ((a) && (a)->is_open()) + +#endif /* __BSOCKCORE_H_ */ diff --git a/src/lib/bsys.c b/src/lib/bsys.c new file mode 100644 index 00000000..d69e3b2d --- /dev/null +++ b/src/lib/bsys.c @@ -0,0 +1,1468 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Miscellaneous Bacula memory and thread safe routines + * Generally, these are interfaces to system or standard + * library routines. + * + * Bacula utility functions are in util.c + * + */ + +#include "bacula.h" +#ifndef HAVE_REGEX_H +#include "lib/bregex.h" +#else +#include +#endif + +static pthread_mutex_t timer_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t timer = PTHREAD_COND_INITIALIZER; + +/* bacula => Bacula + * Works only for standard ASCII strings + */ +char *ucfirst(char *dst, const char *src, int len) +{ + int i=0; + len--; /* Keep the last byte for \0 */ + for (i=0; src[i] && i < len ; i++) { + dst[i] = (i == 0) ? toupper(src[i]) : tolower(src[i]); + } + dst[i] = 0; + return dst; +} + +/* + * Quote a string + */ +POOLMEM *quote_string(POOLMEM *snew, const char *old) +{ + char *n; + int i; + + if (!old) { + strcpy(snew, "null"); + return snew; + } + n = snew; + *n++ = '"'; + for (i=0; old[i]; i++) { + switch (old[i]) { + case '"': + *n++ = '\\'; + *n++ = '"'; + break; + case '\\': + *n++ = '\\'; + *n++ = '\\'; + break; + case '\r': + *n++ = '\\'; + *n++ = 'r'; + break; + case '\n': + *n++ = '\\'; + *n++ = 'n'; + break; + default: + *n++ = old[i]; + break; + } + } + *n++ = '"'; + *n = 0; + return snew; +} + +/* + * Quote a where (list of addresses separated by spaces) + */ +POOLMEM *quote_where(POOLMEM *snew, const char *old) +{ + char *n; + int i; + + if (!old) { + strcpy(snew, "null"); + return snew; + } + n = snew; + *n++ = '"'; + for (i=0; old[i]; i++) { + switch (old[i]) { + case ' ': + *n++ = '"'; + *n++ = ','; + *n++ = '"'; + break; + case '"': + *n++ = '\\'; + *n++ = '"'; + break; + case '\\': + *n++ = '\\'; + *n++ = '\\'; + break; + default: + *n++ = old[i]; + break; + } + } + *n++ = '"'; + *n = 0; + return snew; +} + +/* + * This routine is a somewhat safer unlink in that it + * allows you to run a regex on the filename before + * excepting it. It also requires the file to be in + * the working directory. + */ +int safer_unlink(const char *pathname, const char *regx) +{ + int rc; + regex_t preg1; + char prbuf[500]; + const int nmatch = 30; + regmatch_t pmatch[nmatch]; + int rtn; + + /* Name must start with working directory */ + if (strncmp(pathname, working_directory, strlen(working_directory)) != 0) { + Pmsg1(000, "Safe_unlink excluded: %s\n", pathname); + return EROFS; + } + + /* Compile regex expression */ + rc = regcomp(&preg1, regx, REG_EXTENDED); + if (rc != 0) { + regerror(rc, &preg1, prbuf, sizeof(prbuf)); + Pmsg2(000, _("safe_unlink could not compile regex pattern \"%s\" ERR=%s\n"), + regx, prbuf); + return ENOENT; + } + + /* Unlink files that match regexes */ + if (regexec(&preg1, pathname, nmatch, pmatch, 0) == 0) { + Dmsg1(100, "safe_unlink unlinking: %s\n", pathname); + rtn = unlink(pathname); + } else { + Pmsg2(000, "safe_unlink regex failed: regex=%s file=%s\n", regx, pathname); + rtn = EROFS; + } + regfree(&preg1); + return rtn; +} + +/* + * This routine will sleep (sec, microsec). Note, however, that if a + * signal occurs, it will return early. It is up to the caller + * to recall this routine if he/she REALLY wants to sleep the + * requested time. + */ +int bmicrosleep(int32_t sec, int32_t usec) +{ + struct timespec timeout; + struct timeval tv; + struct timezone tz; + int stat; + + timeout.tv_sec = sec; + timeout.tv_nsec = usec * 1000; + +#ifdef HAVE_NANOSLEEP + stat = nanosleep(&timeout, NULL); + if (!(stat < 0 && errno == ENOSYS)) { + return stat; + } + /* If we reach here it is because nanosleep is not supported by the OS */ +#endif + + /* Do it the old way */ + gettimeofday(&tv, &tz); + timeout.tv_nsec += tv.tv_usec * 1000; + timeout.tv_sec += tv.tv_sec; + while (timeout.tv_nsec >= 1000000000) { + timeout.tv_nsec -= 1000000000; + timeout.tv_sec++; + } + + Dmsg2(200, "pthread_cond_timedwait sec=%d usec=%d\n", sec, usec); + /* Note, this unlocks mutex during the sleep */ + P(timer_mutex); + stat = pthread_cond_timedwait(&timer, &timer_mutex, &timeout); + if (stat != 0) { + berrno be; + Dmsg2(200, "pthread_cond_timedwait stat=%d ERR=%s\n", stat, + be.bstrerror(stat)); + } + V(timer_mutex); + return stat; +} + +/* + * Guarantee that the string is properly terminated */ +char *bstrncpy(char *dest, const char *src, int maxlen) +{ + strncpy(dest, src, maxlen-1); + dest[maxlen-1] = 0; + return dest; +} + +/* + * Guarantee that the string is properly terminated */ +char *bstrncpy(char *dest, POOL_MEM &src, int maxlen) +{ + strncpy(dest, src.c_str(), maxlen-1); + dest[maxlen-1] = 0; + return dest; +} + +/* + * Note: Here the maxlen is the maximum length permitted + * stored in dest, while on Unix systems, it is the maximum characters + * that may be copied from src. + */ +char *bstrncat(char *dest, const char *src, int maxlen) +{ + int len = strlen(dest); + if (len < maxlen-1) { + strncpy(dest+len, src, maxlen-len-1); + } + dest[maxlen-1] = 0; + return dest; +} + +/* + * Note: Here the maxlen is the maximum length permitted + * stored in dest, while on Unix systems, it is the maximum characters + * that may be copied from src. + */ +char *bstrncat(char *dest, POOL_MEM &src, int maxlen) +{ + int len = strlen(dest); + if (len < maxlen-1) { + strncpy(dest+len, src.c_str(), maxlen-len-1); + } + dest[maxlen-1] = 0; + return dest; +} + +/* + * Allows one or both pointers to be NULL + */ +bool bstrcmp(const char *s1, const char *s2) +{ + if (s1 == s2) return true; + if (s1 == NULL || s2 == NULL) return false; + return strcmp(s1, s2) == 0; +} + +/* + * Allows one or both pointers to be NULL + */ +bool bstrcasecmp(const char *s1, const char *s2) +{ + if (s1 == s2) return true; + if (s1 == NULL || s2 == NULL) return false; + return strcasecmp(s1, s2) == 0; +} + + +/* + * Get character length of UTF-8 string + * + * Valid UTF-8 codes + * U-00000000 - U-0000007F: 0xxxxxxx + * U-00000080 - U-000007FF: 110xxxxx 10xxxxxx + * U-00000800 - U-0000FFFF: 1110xxxx 10xxxxxx 10xxxxxx + * U-00010000 - U-001FFFFF: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + * U-00200000 - U-03FFFFFF: 111110xx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx + * U-04000000 - U-7FFFFFFF: 1111110x 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx + */ +int cstrlen(const char *str) +{ + uint8_t *p = (uint8_t *)str; + int len = 0; + if (str == NULL) { + return 0; + } + while (*p) { + if ((*p & 0xC0) != 0xC0) { + p++; + len++; + continue; + } + if ((*p & 0xD0) == 0xC0) { + p += 2; + len++; + continue; + } + if ((*p & 0xF0) == 0xD0) { + p += 3; + len++; + continue; + } + if ((*p & 0xF8) == 0xF0) { + p += 4; + len++; + continue; + } + if ((*p & 0xFC) == 0xF8) { + p += 5; + len++; + continue; + } + if ((*p & 0xFE) == 0xFC) { + p += 6; + len++; + continue; + } + p++; /* Shouln't get here but must advance */ + } + return len; +} + +/* We need to disable the malloc() macro if SMARTALLOC is not used, + * else, it points to b_malloc() and causes problems. + */ +#ifndef SMARTALLOC + #ifdef malloc + #undef malloc + #endif +#endif + +#ifndef bmalloc +void *bmalloc(size_t size) +{ + void *buf; + +#ifdef SMARTALLOC + buf = sm_malloc(file, line, size); +#else + buf = malloc(size); +#endif + if (buf == NULL) { + berrno be; + Emsg1(M_ABORT, 0, _("Out of memory: ERR=%s\n"), be.bstrerror()); + } + return buf; +} +#endif + +void *b_malloc(const char *file, int line, size_t size) +{ + void *buf; + +#ifdef SMARTALLOC + buf = sm_malloc(file, line, size); +#else + buf = malloc(size); +#endif + if (buf == NULL) { + berrno be; + e_msg(file, line, M_ABORT, 0, _("Out of memory: ERR=%s\n"), be.bstrerror()); + } + return buf; +} + + +void bfree(void *buf) +{ +#ifdef SMARTALLOC + sm_free(__FILE__, __LINE__, buf); +#else + free(buf); +#endif +} + +void *brealloc (void *buf, size_t size) +{ +#ifdef SMARTALOC + buf = sm_realloc(__FILE__, __LINE__, buf, size); +#else + buf = realloc(buf, size); +#endif + if (buf == NULL) { + berrno be; + Emsg1(M_ABORT, 0, _("Out of memory: ERR=%s\n"), be.bstrerror()); + } + return buf; +} + + +void *bcalloc(size_t size1, size_t size2) +{ + void *buf; + + buf = calloc(size1, size2); + if (buf == NULL) { + berrno be; + Emsg1(M_ABORT, 0, _("Out of memory: ERR=%s\n"), be.bstrerror()); + } + return buf; +} + +/* Code now in src/lib/bsnprintf.c */ +#ifndef USE_BSNPRINTF + +#define BIG_BUF 5000 +/* + * Implement snprintf + */ +int bsnprintf(char *str, int32_t size, const char *fmt, ...) +{ + va_list arg_ptr; + int len; + + va_start(arg_ptr, fmt); + len = bvsnprintf(str, size, fmt, arg_ptr); + va_end(arg_ptr); + return len; +} + +/* + * Implement vsnprintf() + */ +int bvsnprintf(char *str, int32_t size, const char *format, va_list ap) +{ +#ifdef HAVE_VSNPRINTF + int len; + len = vsnprintf(str, size, format, ap); + str[size-1] = 0; + return len; + +#else + + int len, buflen; + char *buf; + buflen = size > BIG_BUF ? size : BIG_BUF; + buf = get_memory(buflen); + len = vsprintf(buf, format, ap); + if (len >= buflen) { + Emsg0(M_ABORT, 0, _("Buffer overflow.\n")); + } + memcpy(str, buf, len); + str[len] = 0; /* len excludes the null */ + free_memory(buf); + return len; +#endif +} +#endif /* USE_BSNPRINTF */ + +#ifndef HAVE_LOCALTIME_R + +struct tm *localtime_r(const time_t *timep, struct tm *tm) +{ + static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + struct tm *ltm, + + P(mutex); + ltm = localtime(timep); + if (ltm) { + memcpy(tm, ltm, sizeof(struct tm)); + } + V(mutex); + return ltm ? tm : NULL; +} +#endif /* HAVE_LOCALTIME_R */ + +#ifndef HAVE_WIN32 +#include +/* + * This is bacula own readdir function, that should be used instead of any + * other function + * This function is thread safe. + * Not all supported systems have a thread safe readdir() function + * This is why we are using a mutex. + * + * The name of the "next" file or directory is returned into d_name + * that can be resized to fit the size of the entry + * + * return 0 for OK + * return -1 for EOF + * return >0 is for error, the value returned is errno +*/ +int breaddir(DIR *dirp, POOLMEM *&d_name) +{ + static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + + P(mutex); + errno = 0; + struct dirent *d=readdir(dirp); + int ret = errno; + if (d != NULL) { + pm_strcpy(d_name, d->d_name); + ret=0; + } else { + ret = errno==0?-1:errno; // -1 for EOF or errno for error + } + V(mutex); + return ret; +} +#endif + +int b_strerror(int errnum, char *buf, size_t bufsiz) +{ + static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + int stat = 0; + const char *msg; + + P(mutex); + + msg = strerror(errnum); + if (!msg) { + msg = _("Bad errno"); + stat = -1; + } + bstrncpy(buf, msg, bufsiz); + V(mutex); + return stat; +} + +#ifdef DEBUG_MEMSET +/* These routines are not normally turned on */ +#undef memset +void b_memset(const char *file, int line, void *mem, int val, size_t num) +{ + /* Testing for 2000 byte zero at beginning of Volume block */ + if (num > 1900 && num < 3000) { + Pmsg3(000, _("Memset for %d bytes at %s:%d\n"), (int)num, file, line); + } + memset(mem, val, num); +} +#endif + +#if !defined(HAVE_WIN32) +static int del_pid_file_ok = FALSE; +#endif +static int pid_fd = -1; + +#ifdef HAVE_FCNTL_LOCK +/* a convenient function [un]lock file using fnctl() + * code must be in F_UNLCK, F_RDLCK, F_WRLCK + * return -1 for error and errno is set + */ +int fcntl_lock(int fd, int code) +{ + struct flock l; + l.l_type = code; + l.l_whence = l.l_start = l.l_len = 0; + l.l_len = 1; + return fcntl(fd, F_SETLK, &l); +} +#endif + +/* Create a disk pid "lock" file + * returns + * 0: Error with the error message in errmsg + * 1: Succcess + * 2: Successs, but a previous file was found + */ +#if !defined(HAVE_FCNTL_LOCK) || defined(HAVE_WIN32) +int create_lock_file(char *fname, const char *progname, const char *filetype, POOLMEM **errmsg, int *fd) +{ + int ret = 1; +#if !defined(HAVE_WIN32) + int pidfd, len; + int oldpid; + char pidbuf[20]; + struct stat statp; + + if (stat(fname, &statp) == 0) { + /* File exists, see what we have */ + *pidbuf = 0; + if ((pidfd = open(fname, O_RDONLY|O_BINARY, 0)) < 0 || + read(pidfd, &pidbuf, sizeof(pidbuf)) < 0 || + sscanf(pidbuf, "%d", &oldpid) != 1) { + berrno be; + Mmsg(errmsg, _("Cannot open %s file. %s ERR=%s\n"), filetype, fname, + be.bstrerror()); + close(pidfd); /* if it was successfully opened */ + return 0; + } + /* Some OSes (IRIX) don't bother to clean out the old pid files after a crash, and + * since they use a deterministic algorithm for assigning PIDs, we can have + * pid conflicts with the old PID file after a reboot. + * The intent the following code is to check if the oldpid read from the pid + * file is the same as the currently executing process's pid, + * and if oldpid == getpid(), skip the attempt to + * kill(oldpid,0), since the attempt is guaranteed to succeed, + * but the success won't actually mean that there is an + * another Bacula process already running. + * For more details see bug #797. + */ + if ((oldpid != (int)getpid()) && (kill(oldpid, 0) != -1 || errno != ESRCH)) { + Mmsg(errmsg, _("%s is already running. pid=%d\nCheck file %s\n"), + progname, oldpid, fname); + return 0; + } + /* He is not alive, so take over file ownership */ + unlink(fname); /* remove stale pid file */ + ret = 2; + } + /* Create new pid file */ + if ((pidfd = open(fname, O_CREAT|O_TRUNC|O_WRONLY|O_BINARY, 0640)) >= 0) { + len = sprintf(pidbuf, "%d\n", (int)getpid()); + write(pidfd, pidbuf, len); + close(pidfd); + /* ret is already 1 */ + } else { + berrno be; + Mmsg(errmsg, _("Could not open %s file. %s ERR=%s\n"), filetype, fname, be.bstrerror()); + return 0; + } +#endif + return ret; +} +#else /* defined(HAVE_FCNTL_LOCK) */ +int create_lock_file(char *fname, const char *progname, const char *filetype, POOLMEM **errmsg, int *fd) +{ + int len; + int oldpid; + char pidbuf[20]; + + /* Open the pidfile for writing */ + if ((*fd = open(fname, O_CREAT|O_RDWR, 0640)) >= 0) { + if (fcntl_lock(*fd, F_WRLCK) == -1) { + berrno be; + /* already locked by someone else, try to read the pid */ + if (read(*fd, &pidbuf, sizeof(pidbuf)) > 0 && + sscanf(pidbuf, "%d", &oldpid) == 1) { + Mmsg(errmsg, _("%s is already running. pid=%d, check file %s\n"), + progname, oldpid, fname); + } else { + Mmsg(errmsg, _("Cannot lock %s file. %s ERR=%s\n"), filetype, fname, be.bstrerror()); + } + close(*fd); + *fd=-1; + return 0; + } + /* write the pid */ + len = sprintf(pidbuf, "%d\n", (int)getpid()); + write(*fd, pidbuf, len); + /* KEEP THE FILE OPEN TO KEEP THE LOCK !!! */ + return 1; + } else { + berrno be; + Mmsg(errmsg, _("Cannot not open %s file. %s ERR=%s\n"), filetype, fname, be.bstrerror()); + return 0; + } +} +#endif + +/* + * Create a standard "Unix" pid file. + */ +void create_pid_file(char *dir, const char *progname, int port) +{ + POOLMEM *errmsg = get_pool_memory(PM_MESSAGE); + POOLMEM *fname = get_pool_memory(PM_FNAME); + + Mmsg(fname, "%s/%s.%d.pid", dir, progname, port); + if (create_lock_file(fname, progname, "pid", &errmsg, &pid_fd) == 0) { + Emsg1(M_ERROR_TERM, 0, "%s", errmsg); + /* never return */ + } +#if !defined(HAVE_WIN32) + del_pid_file_ok = TRUE; /* we created it so we can delete it */ +#endif + + free_pool_memory(fname); + free_pool_memory(errmsg); +} + +/* + * Delete the pid file if we created it + */ +int delete_pid_file(char *dir, const char *progname, int port) +{ +#if !defined(HAVE_WIN32) + POOLMEM *fname = get_pool_memory(PM_FNAME); + if (pid_fd!=-1) { + close(pid_fd); + } + if (!del_pid_file_ok) { + free_pool_memory(fname); + return 0; + } + del_pid_file_ok = FALSE; + Mmsg(&fname, "%s/%s.%d.pid", dir, progname, port); + unlink(fname); + free_pool_memory(fname); +#endif + return 1; +} + +struct s_state_hdr { + char id[14]; + int32_t version; + uint64_t last_jobs_addr; + uint64_t reserved[20]; +}; + +static struct s_state_hdr state_hdr = { + "Bacula State\n", + 4, + 0 +}; + +/* + * Open and read the state file for the daemon + */ +void read_state_file(char *dir, const char *progname, int port) +{ + int sfd; + ssize_t stat; + bool ok = false; + POOLMEM *fname = get_pool_memory(PM_FNAME); + struct s_state_hdr hdr; + int hdr_size = sizeof(hdr); + + Mmsg(&fname, "%s/%s.%d.state", dir, progname, port); + /* If file exists, see what we have */ +// Dmsg1(10, "O_BINARY=%d\n", O_BINARY); + if ((sfd = open(fname, O_RDONLY|O_BINARY)) < 0) { + berrno be; + Dmsg3(010, "Could not open state file. sfd=%d size=%d: ERR=%s\n", + sfd, (int)sizeof(hdr), be.bstrerror()); + goto bail_out; + } + if ((stat=read(sfd, &hdr, hdr_size)) != hdr_size) { + berrno be; + Dmsg4(010, "Could not read state file. sfd=%d stat=%d size=%d: ERR=%s\n", + sfd, (int)stat, hdr_size, be.bstrerror()); + goto bail_out; + } + if (hdr.version != state_hdr.version) { + Dmsg2(010, "Bad hdr version. Wanted %d got %d\n", + state_hdr.version, hdr.version); + goto bail_out; + } + hdr.id[13] = 0; + if (strcmp(hdr.id, state_hdr.id) != 0) { + Dmsg0(000, "State file header id invalid.\n"); + goto bail_out; + } +// Dmsg1(010, "Read header of %d bytes.\n", sizeof(hdr)); + if (!read_last_jobs_list(sfd, hdr.last_jobs_addr)) { + goto bail_out; + } + ok = true; +bail_out: + if (sfd >= 0) { + close(sfd); + } + if (!ok) { + unlink(fname); + } + free_pool_memory(fname); +} + +/* + * Write the state file + */ +static pthread_mutex_t state_mutex = PTHREAD_MUTEX_INITIALIZER; + +void write_state_file(char *dir, const char *progname, int port) +{ + int sfd; + bool ok = false; + POOLMEM *fname = get_pool_memory(PM_FNAME); + + P(state_mutex); /* Only one job at a time can call here */ + Mmsg(&fname, "%s/%s.%d.state", dir, progname, port); + /* Create new state file */ + unlink(fname); + if ((sfd = open(fname, O_CREAT|O_WRONLY|O_BINARY, 0640)) < 0) { + berrno be; + Dmsg2(000, "Could not create state file. %s ERR=%s\n", fname, be.bstrerror()); + Emsg2(M_ERROR, 0, _("Could not create state file. %s ERR=%s\n"), fname, be.bstrerror()); + goto bail_out; + } + if (write(sfd, &state_hdr, sizeof(state_hdr)) != sizeof(state_hdr)) { + berrno be; + Dmsg1(000, "Write hdr error: ERR=%s\n", be.bstrerror()); + goto bail_out; + } +// Dmsg1(010, "Wrote header of %d bytes\n", sizeof(state_hdr)); + state_hdr.last_jobs_addr = sizeof(state_hdr); + state_hdr.reserved[0] = write_last_jobs_list(sfd, state_hdr.last_jobs_addr); +// Dmsg1(010, "write last job end = %d\n", (int)state_hdr.reserved[0]); + if (lseek(sfd, 0, SEEK_SET) < 0) { + berrno be; + Dmsg1(000, "lseek error: ERR=%s\n", be.bstrerror()); + goto bail_out; + } + if (write(sfd, &state_hdr, sizeof(state_hdr)) != sizeof(state_hdr)) { + berrno be; + Pmsg1(000, _("Write final hdr error: ERR=%s\n"), be.bstrerror()); + goto bail_out; + } + ok = true; +// Dmsg1(010, "rewrote header = %d\n", sizeof(state_hdr)); +bail_out: + if (sfd >= 0) { + close(sfd); + } + if (!ok) { + unlink(fname); + } + V(state_mutex); + free_pool_memory(fname); +} + + +/* BSDI does not have this. This is a *poor* simulation */ +#ifndef HAVE_STRTOLL +long long int +strtoll(const char *ptr, char **endptr, int base) +{ + return (long long int)strtod(ptr, endptr); +} +#endif + +/* + * Bacula's implementation of fgets(). The difference is that it handles + * being interrupted by a signal (e.g. a SIGCHLD). + */ +#undef fgetc +char *bfgets(char *s, int size, FILE *fd) +{ + char *p = s; + int ch; + *p = 0; + for (int i=0; i < size-1; i++) { + do { + errno = 0; + ch = fgetc(fd); + } while (ch == EOF && ferror(fd) && (errno == EINTR || errno == EAGAIN)); + if (ch == EOF) { + if (i == 0) { + return NULL; + } else { + return s; + } + } + *p++ = ch; + *p = 0; + if (ch == '\r') { /* Support for Mac/Windows file format */ + ch = fgetc(fd); + if (ch != '\n') { /* Mac (\r only) */ + (void)ungetc(ch, fd); /* Push next character back to fd */ + } + p[-1] = '\n'; + break; + } + if (ch == '\n') { + break; + } + } + return s; +} + +/* + * Bacula's implementation of fgets(). The difference is that it handles + * being interrupted by a signal (e.g. a SIGCHLD) and it has a + * different calling sequence which implements input lines of + * up to a million characters. + */ +char *bfgets(POOLMEM *&s, FILE *fd) +{ + int ch; + int soft_max; + int i = 0; + + s[0] = 0; + soft_max = sizeof_pool_memory(s) - 10; + for ( ;; ) { + do { + errno = 0; + ch = fgetc(fd); + } while (ch == EOF && ferror(fd) && (errno == EINTR || errno == EAGAIN)); + if (ch == EOF) { + if (i == 0) { + return NULL; + } else { + return s; + } + } + if (i > soft_max) { + /* Insanity check */ + if (soft_max > 1000000) { + return s; + } + s = check_pool_memory_size(s, soft_max+10000); + soft_max = sizeof_pool_memory(s) - 10; + } + s[i++] = ch; + s[i] = 0; + if (ch == '\r') { /* Support for Mac/Windows file format */ + ch = fgetc(fd); + if (ch != '\n') { /* Mac (\r only) */ + (void)ungetc(ch, fd); /* Push next character back to fd */ + } + s[i-1] = '\n'; + break; + } + if (ch == '\n') { + break; + } + } + return s; +} + +/* + * Make a "unique" filename. It is important that if + * called again with the same "what" that the result + * will be identical. This allows us to use the file + * without saving its name, and re-generate the name + * so that it can be deleted. + */ +void make_unique_filename(POOLMEM **name, int Id, char *what) +{ + Mmsg(name, "%s/%s.%s.%d.tmp", working_directory, my_name, what, Id); +} + +char *escape_filename(const char *file_path) +{ + if (file_path == NULL || strpbrk(file_path, "\"\\") == NULL) { + return NULL; + } + + char *escaped_path = (char *)bmalloc(2 * (strlen(file_path) + 1)); + char *cur_char = escaped_path; + + while (*file_path) { + if (*file_path == '\\' || *file_path == '"') { + *cur_char++ = '\\'; + } + + *cur_char++ = *file_path++; + } + + *cur_char = '\0'; + + return escaped_path; +} + +/* + * For the moment preventing suspensions is only + * implemented on Windows. + */ +#ifndef HAVE_WIN32 +void prevent_os_suspensions() +{ } + +void allow_os_suspensions() +{ } +#endif + + +#if HAVE_BACKTRACE && HAVE_GCC +/* if some names are not resolved you can try using : addr2line, like this + * $ addr2line -e bin/bacula-sd -a 0x43cd11 + * OR + * use the the -rdynamic option in the linker, like this + * $ LDFLAGS="-rdynamic" make setup + */ +#include +#include +void stack_trace() +{ + const size_t max_depth = 100; + size_t stack_depth; + void *stack_addrs[max_depth]; + char **stack_strings; + + stack_depth = backtrace(stack_addrs, max_depth); + stack_strings = backtrace_symbols(stack_addrs, stack_depth); + + for (size_t i = 3; i < stack_depth; i++) { + size_t sz = 200; /* just a guess, template names will go much wider */ + char *function = (char *)actuallymalloc(sz); + char *begin = 0, *end = 0; + /* find the parentheses and address offset surrounding the mangled name */ + for (char *j = stack_strings[i]; *j; ++j) { + if (*j == '(') { + begin = j; + } else if (*j == '+') { + end = j; + } + } + if (begin && end) { + *begin++ = '\0'; + *end = '\0'; + /* found our mangled name, now in [begin, end] */ + + int status; + char *ret = abi::__cxa_demangle(begin, function, &sz, &status); + if (ret) { + /* return value may be a realloc() of the input */ + function = ret; + } else { + /* demangling failed, just pretend it's a C function with no args */ + strncpy(function, begin, sz); + strncat(function, "()", sz); + function[sz-1] = '\0'; + } + Pmsg2(000, " %s:%s\n", stack_strings[i], function); + + } else { + /* didn't find the mangled name, just print the whole line */ + Pmsg1(000, " %s\n", stack_strings[i]); + } + actuallyfree(function); + } + actuallyfree(stack_strings); /* malloc()ed by backtrace_symbols */ +} +#else /* HAVE_BACKTRACE && HAVE_GCC */ +void stack_trace() {} +#endif /* HAVE_BACKTRACE && HAVE_GCC */ + +#ifdef HAVE_SYS_STATVFS_H +#include +#else +#define statvfs statfs +#endif +/* statvfs.h defines ST_APPEND, which is also used by Bacula */ +#undef ST_APPEND + + +int fs_get_free_space(const char *path, int64_t *freeval, int64_t *totalval) +{ +#if defined(HAVE_SYS_STATVFS_H) || !defined(HAVE_WIN32) + struct statvfs st; + + if (statvfs(path, &st) == 0) { + *freeval = (uint64_t)st.f_bavail * (uint64_t)st.f_frsize; + *totalval = (uint64_t)st.f_blocks * (uint64_t)st.f_frsize; + return 0; + } +#endif + + *totalval = *freeval = 0; + return -1; +} + +/* This function is used after a fork, the memory manager is not be initialized + * properly, so we must stay simple. + */ +void setup_env(char *envp[]) +{ + if (envp) { +#if defined(HAVE_SETENV) + char *p; + for (int i=0; envp[i] ; i++) { + p = strchr(envp[i], '='); /* HOME=/tmp */ + if (p) { + *p=0; /* HOME\0tmp\0 */ + setenv(envp[i], p+1, true); + *p='='; + } + } +#elif defined(HAVE_PUTENV) + for (int i=0; envp[i] ; i++) { + putenv(envp[i]); + } +#else +#error "putenv() and setenv() are not available on this system" +#endif + } +} + +/* Small function to copy a file somewhere else, + * for debug purpose. + */ +int copyfile(const char *src, const char *dst) +{ + int fd_src=-1, fd_dst=-1; + ssize_t len, lenw; + char buf[4096]; + berrno be; + fd_src = open(src, O_RDONLY); + if (fd_src < 0) { + Dmsg2(0, "Unable to open %s ERR=%s\n", src, be.bstrerror(errno)); + goto bail_out; + } + fd_dst = open(dst, O_WRONLY | O_CREAT | O_EXCL, 0600); + if (fd_dst < 0) { + Dmsg2(0, "Unable to open %s ERR=%s\n", dst, be.bstrerror(errno)); + goto bail_out; + } + + while ((len = read(fd_src, buf, sizeof(buf))) > 0) + { + char *out_ptr = buf; + do { + lenw = write(fd_dst, out_ptr, len); + if (lenw >= 0) { + len -= lenw; + out_ptr += lenw; + } else if (errno != EINTR) { + Dmsg3(0, "Unable to write %d bytes in %s. ERR=%s\n", len, dst, be.bstrerror(errno)); + goto bail_out; + } + } while (len > 0); + } + + if (len == 0) { + close(fd_src); + if (close(fd_dst) < 0) { + Dmsg2(0, "Unable to close %s properly. ERR=%s\n", dst, be.bstrerror(errno)); + return -1; + } + /* Success! */ + return 0; + } +bail_out: + close(fd_src); + close(fd_dst); + return -1; +} + +/* The poll() code is currently disabled */ +#ifdef HAVE_POLL + +#include +#define NB_EVENT 1 + +int fd_wait_data(int fd, fd_wait_mode mode, int sec, int msec) +{ + int ret; + struct pollfd fds[NB_EVENT]; /* The structure for one event */ + + fds[0].fd = fd; + fds[0].events = (mode == WAIT_READ) ? POLLIN : POLLOUT; + + ret = poll(fds, NB_EVENT, sec * 1000 + msec); + + /* Check if poll actually succeed */ + switch(ret) { + case 0: /* timeout; no event detected */ + return 0; + + case -1: /* report error and abort */ + return -1; + + default: + if (fds[0].revents & POLLIN || fds[0].revents & POLLOUT) { + return 1; + + } else { + return -1; /* unexpected... */ + } + } + return -1; /* unexpected... */ +} +#else + +/* The select() code with a bigger fd_set was tested on Linux, FreeBSD and SunOS */ +#if defined(HAVE_LINUX_OS) || defined(HAVE_FREEBSD_OS) || defined(HAVE_SUN_OS) || defined(HAVE_WIN32) + #define SELECT_MAX_FD 7990 +#else + #define SELECT_MAX_FD 1023 /* For others, we keep it low */ +#endif + +int fd_wait_data(int fd, fd_wait_mode mode, int sec, int msec) +{ + union { + fd_set fdset; + char bfd_buf[1000]; + }; + + fd_set *pfdset=NULL, *tmp=NULL; + struct timeval tv; + int ret; + + /* If the amount of static memory is not big enough to handle the file + * descriptor, we allocate a new buffer ourself + */ + if (fd > SELECT_MAX_FD) { + int len = (fd+1+1024) * sizeof(char); + tmp = (fd_set *) malloc(len); + pfdset = tmp; + memset(tmp, 0, len); /* FD_ZERO() */ + + } else { + pfdset = &fdset; + memset(&bfd_buf, 0, sizeof(bfd_buf)); /* FD_ZERO(&fdset) */ + } + + FD_SET((unsigned)fd, pfdset); + + tv.tv_sec = sec; + tv.tv_usec = msec * 1000; + + if (mode == WAIT_READ) { + ret = select(fd + 1, pfdset, NULL, NULL, &tv); + + } else { /* WAIT_WRITE */ + ret = select(fd + 1, NULL, pfdset, NULL, &tv); + } + if (tmp) { + free(tmp); + } + switch (ret) { + case 0: /* timeout */ + return 0; + case -1: + return -1; /* error return */ + default: + break; + } + return 1; +} +#endif + +/* Use SOCK_CLOEXEC option when calling accept(). If not available, + * do it ourself (but with a race condition...) + */ +int baccept(int sockfd, struct sockaddr *addr, socklen_t *addrlen) +{ + int fd; +#ifdef HAVE_ACCEPT4 + fd = accept4(sockfd, addr, addrlen, SOCK_CLOEXEC); +#else + fd = accept(sockfd, addr, addrlen); + +# ifdef HAVE_DECL_FD_CLOEXEC + if (fd >= 0) { + int tmp_errno = errno; + if (fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC) < 0) { + berrno be; + Dmsg2(0, "Unable to set the CLOEXEC flag on fd=%d ERR=%s\n", fd, be.bstrerror()); + } + errno = tmp_errno; + } + +# endif /* HAVE_DECL_FD_CLOEXEC */ +#endif /* HAVE_ACCEPT4 */ + return fd; +} + +#undef fopen +FILE *bfopen(const char *path, const char *mode) +{ + FILE *fp; + char options[50]; + + bstrncpy(options, mode, sizeof(options)); + +#if defined(HAVE_STREAM_CLOEXEC) + bstrncat(options, STREAM_CLOEXEC, sizeof(options)); +#endif + + fp = fopen(path, options); + +#if !defined(HAVE_STREAM_CLOEXEC) && defined(HAVE_DECL_FD_CLOEXEC) + if (fp) { + int fd = fileno(fp); + if (fd >= 0) { + int tmp_errno = errno; + if (fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC) < 0) { + berrno be; + Dmsg2(0, "Unable to set the CLOEXEC flag on fd=%d ERR=%s\n", fd, be.bstrerror()); + } + errno = tmp_errno; + } + } +#endif + return fp; +} + +#ifdef TEST_PROGRAM +/* The main idea of the test is pretty simple, we have a writer and a reader, and + * they wait a little bit to read or send data over the fifo. + * So, for the first packets, the writer will wait, then the reader will wait + * read/write requests should always be fast. Only the time of the fd_wait_data() + * should be long. + */ +#include "findlib/namedpipe.h" +#define PIPENAME "/tmp/wait.pipe.%d" + +#define NBPACKETS 10 +#define BUFSIZE 128*512 /* The pipe size looks to be 65K */ + +typedef struct { + int nb; + pthread_t writer; + pthread_t reader; +} job; + +pthread_cond_t cond = PTHREAD_COND_INITIALIZER; +pthread_mutex_t cond_mutex = PTHREAD_MUTEX_INITIALIZER; +int nb_ready=0; + +void *th1(void *a) +{ + NamedPipe p; + int fd, r; + btime_t s, e; + ssize_t nb; + char buf[BUFSIZE]; + job *j = (job *)a; + + namedpipe_init(&p); + bsnprintf(buf, sizeof(buf), PIPENAME, j->nb); + if (namedpipe_create(&p, buf, 0600) < 0) { + berrno be; + Dmsg2(0, "R: Unable to create the fifo %s. ERR=%s\n", buf, be.bstrerror()); + namedpipe_free(&p); + exit(2); + } + fd = namedpipe_open(&p, buf, O_RDONLY); + if (fd < 0) { + berrno be; + Dmsg2(0, "R: Unable to open the fifo %s. ERR=%s\n", buf, be.bstrerror()); + return NULL; + } + P(cond_mutex); + nb_ready++; + pthread_cond_wait(&cond, &cond_mutex); + V(cond_mutex); + for (int i = 0; i < NBPACKETS; i++) { + if (i < (NBPACKETS/2)) { + bmicrosleep(5, 0); + } + s = get_current_btime(); + r = fd_wait_data(fd, WAIT_READ, 10, 0); + if (r > 0) { + e = get_current_btime(); + Dmsg2(0, "Wait to read pkt %d %lldms\n",i, (int64_t) (e - s)); + + if (i <= NBPACKETS/2) { + ASSERT2((e-s) < 10000, "In the 1st phase, we are blocking the process"); + } else { + ASSERT2((e-s) > 10000, "In the 2nd phase, the writer is slowing down things"); + } + + s = get_current_btime(); + nb = read(fd, buf, sizeof(buf)); + e = get_current_btime(); + Dmsg3(0, "Read pkt %d %d bytes in %lldms\n",i, (int)nb, (int64_t) (e - s)); + ASSERT2((e-s) < 10000, "The read operation should be FAST"); + } + } + namedpipe_free(&p); + return NULL; +} + +void *th2(void *a) +{ + NamedPipe p; + btime_t s, e; + job *j = (job *)a; + char buf[BUFSIZE]; + int fd; + ssize_t nb; + + bsnprintf(buf, sizeof(buf), PIPENAME, j->nb); + namedpipe_init(&p); + if (namedpipe_create(&p, buf, 0600) < 0) { + berrno be; + Dmsg2(0, "W: Unable to create the fifo %s. ERR=%s\n", buf, be.bstrerror()); + namedpipe_free(&p); + exit(2); + } + + fd = namedpipe_open(&p, buf, O_WRONLY); + if (fd < 0) { + berrno be; + Dmsg2(0, "W: Unable to open the fifo %s. ERR=%s\n", buf, be.bstrerror()); + namedpipe_free(&p); + exit(2); + } + + P(cond_mutex); + nb_ready++; + pthread_cond_wait(&cond, &cond_mutex); + V(cond_mutex); + + unlink(buf); + + for (int i=0; i < NBPACKETS; i++) { + if (i > (NBPACKETS/2)) { + bmicrosleep(5, 0); + } + s = get_current_btime(); + if (fd_wait_data(fd, WAIT_WRITE, 10, 0) > 0) { + e = get_current_btime(); + Dmsg2(0, "Wait to write pkt %d %lldms\n",i, (int64_t) (e - s)); + + if (i == 0 || i > NBPACKETS/2) { /* The first packet doesn't count */ + ASSERT2((e-s) < 100000, "In the 2nd phase, it's fast to send, we are the blocker"); + } else { + ASSERT2((e-s) > 100000, "In the 1st phase, we wait for the reader"); + } + + s = get_current_btime(); + nb = write(fd, buf, sizeof(buf)); + e = get_current_btime(); + Dmsg3(0, "Wrote pkt %d %d bytes in %lldms\n", i, (int)nb, (int64_t) (e - s)); + ASSERT2((e-s) < 100000, "The write operation should never block"); + } + } + namedpipe_free(&p); + return NULL; +} + +int main(int argc, char **argv) +{ + job pthread_list[10000]; + int j = (argc >= 2) ? atoi(argv[1]) : 1; + int maxfd = (argc == 3) ? atoi(argv[2]) : 0; + + j = MIN(10000, j); + + lmgr_init_thread(); + set_debug_flags((char *)"h"); + + for (int i=3; i < maxfd; i++) { + open("/dev/null", O_RDONLY); + } + + for (int i=0; i < j; i++) { + pthread_list[i].nb=i; + pthread_create(&pthread_list[i].writer, NULL, th2, &pthread_list[i]); + pthread_create(&pthread_list[i].reader, NULL, th1, &pthread_list[i]); + } + + while (nb_ready < j*2) { + bmicrosleep(1, 0); + } + + Dmsg0(0, "All threads are started\n"); + P(cond_mutex); + pthread_cond_broadcast(&cond); + V(cond_mutex); + + for (int i=0; i < j; i++) { + pthread_join(pthread_list[i].writer, NULL); + pthread_join(pthread_list[i].reader, NULL); + } + + for (int i=3; i < maxfd; i++) { + close(i); + } + return 0; +} +#endif diff --git a/src/lib/btime.c b/src/lib/btime.c new file mode 100644 index 00000000..3f0f548e --- /dev/null +++ b/src/lib/btime.c @@ -0,0 +1,468 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula floating point time and date routines -- John Walker + * + * Later double precision integer time/date routines -- Kern Sibbald + * + */ + +/* + * Concerning times. There are a number of different time standards + * in Bacula (fdate_t, ftime_t, time_t (Unix standard), btime_t, and + * utime_t). fdate_t and ftime_t are deprecated and should no longer + * be used, and in general, Unix time time_t should no longer be used, + * it is being phased out. + * + * Epoch is the base of Unix time in seconds (time_t, ...) + * and is 1 Jan 1970 at 0:0 UTC + * + * The major two times that should be left are: + * btime_t (64 bit integer in microseconds base Epoch) + * utime_t (64 bit integer in seconds base Epoch) + */ + +#include "bacula.h" +#include + +/* Formatted time for user display: dd-Mon-yyyy hh:mm */ +char *bstrftime(char *dt, int maxlen, utime_t utime) +{ + time_t time = (time_t)utime; + struct tm tm; + + /* ***FIXME**** the format and localtime_r() should be user configurable */ + (void)localtime_r(&time, &tm); + strftime(dt, maxlen, "%d-%b-%Y %H:%M", &tm); + return dt; +} + +/* Formatted time for user display: dd-Mon-yyyy hh:mm:ss */ +char *bstrftimes(char *dt, int maxlen, utime_t utime) +{ + time_t time = (time_t)utime; + struct tm tm; + + /* ***FIXME**** the format and localtime_r() should be user configurable */ + (void)localtime_r(&time, &tm); + strftime(dt, maxlen, "%d-%b-%Y %H:%M:%S", &tm); + return dt; +} + +/* Formatted time with day name for user display: dd-Mon hh:mm */ +char *bstrftime_dn(char *dt, int maxlen, utime_t utime) +{ + time_t time = (time_t)utime; + struct tm tm; + + /* ***FIXME**** the format and localtime_r() should be user configurable */ + (void)localtime_r(&time, &tm); + strftime(dt, maxlen, "%a %d-%b %H:%M", &tm); + return dt; +} + +/* Formatted time (no year) for user display: dd-Mon hh:mm */ +char *bstrftime_ny(char *dt, int maxlen, utime_t utime) +{ + time_t time = (time_t)utime; + struct tm tm; + + /* ***FIXME**** the format and localtime_r() should be user configurable */ + (void)localtime_r(&time, &tm); + strftime(dt, maxlen, "%d-%b %H:%M", &tm); + return dt; +} + + +/* Formatted time for user display: dd-Mon-yy hh:mm (no century) */ +char *bstrftime_nc(char *dt, int maxlen, utime_t utime) +{ + time_t time = (time_t)utime; + struct tm tm; + char *p, *q; + + /* ***FIXME**** the format and localtime_r() should be user configurable */ + (void)localtime_r(&time, &tm); + /* NOTE! since the compiler complains about %y, I use %y and cut the century */ + strftime(dt, maxlen, "%d-%b-%Y %H:%M", &tm); + /* overlay the century */ + p = dt+7; + q = dt+9; + while (*q) { + *p++ = *q++; + } + *p = 0; + return dt; +} + + +/* Unix time to standard time string yyyy-mm-dd hh:mm:ss */ +char *bstrutime(char *dt, int maxlen, utime_t utime) +{ + time_t time = (time_t)utime; + struct tm tm; + (void)localtime_r(&time, &tm); + strftime(dt, maxlen, "%Y-%m-%d %H:%M:%S", &tm); + return dt; +} + +/* Convert standard time string yyyy-mm-dd hh:mm:ss to Unix time */ +utime_t str_to_utime(char *str) +{ + struct tm tm; + time_t time; + + /* Check for bad argument */ + if (!str || *str == 0) { + return 0; + } + + if (sscanf(str, "%d-%d-%d %d:%d:%d", &tm.tm_year, &tm.tm_mon, &tm.tm_mday, + &tm.tm_hour, &tm.tm_min, &tm.tm_sec) != 6) { + return 0; + } + if (tm.tm_mon > 0) { + tm.tm_mon--; + } else { + return 0; + } + if (tm.tm_year >= 1900) { + tm.tm_year -= 1900; + } else { + return 0; + } + tm.tm_wday = tm.tm_yday = 0; + tm.tm_isdst = -1; + time = mktime(&tm); + if (time == -1) { + time = 0; + } + return (utime_t)time; +} + + +/* + * Bacula's time (btime_t) is an unsigned 64 bit integer that contains + * the number of microseconds since Epoch Time (1 Jan 1970) UTC. + */ + +btime_t get_current_btime() +{ + struct timeval tv; + if (gettimeofday(&tv, NULL) != 0) { + tv.tv_sec = (long)time(NULL); /* fall back to old method */ + tv.tv_usec = 0; + } + return ((btime_t)tv.tv_sec) * 1000000 + (btime_t)tv.tv_usec; +} + +/* Convert btime to Unix time */ +time_t btime_to_unix(btime_t bt) +{ + return (time_t)(bt/1000000); +} + +/* Convert btime to utime */ +utime_t btime_to_utime(btime_t bt) +{ + return (utime_t)(bt/1000000); +} + +/* + * Definition of a leap year from Wikipedia. + * I knew it anyway but better check. + */ +static bool is_leap_year(int year) +{ + if (year % 400 == 0) return true; + if (year % 100 == 0) return false; + if (year % 4 == 0) return true; + return false; +} + +/* + * Return the last day of the month, base 0 + * month=0-11, year is actual year + * ldom is base 0 + */ +int tm_ldom(int month, int year) +{ /* jan feb mar apr may jun jul aug sep oct nov dec */ + static int dom[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + + if (is_leap_year(year) && month == 1) return 28; + return dom[month] - 1; +} + +/* + * Return the week of the month, base 0 (wom) + * given tm_mday and tm_wday. Value returned + * can be from 0 to 5 => week1, ... week6 + */ +int tm_wom(int mday, int wday) +{ + int fs; /* first sunday */ + fs = (mday%7) - wday; + if (fs <= 0) { + fs += 7; + } + if (mday <= fs) { +// Dmsg3(100, "mday=%d wom=0 wday=%d <= fs=%d\n", mday, wday, fs); + return 0; + } + int wom = 1 + (mday - fs - 1) / 7; +// Dmsg4(100, "mday=%d wom=%d wday=%d fs=%d\n", mday, wom, wday, fs); + return wom; +} + +/* + * Given a Unix date return the week of the year. + * The returned value can be 0-53. Officially + * the weeks are numbered from 1 to 53 where week1 + * is the week in which the first Thursday of the + * year occurs (alternatively, the week which contains + * the 4th of January). We return 0, if the week of the + * year does not fall in the current year. + */ +int tm_woy(time_t stime) +{ + int woy, fty, tm_yday; + time_t time4; + struct tm tm; + + memset(&tm, 0, sizeof(struct tm)); + (void)localtime_r(&stime, &tm); + tm_yday = tm.tm_yday; + tm.tm_mon = 0; + tm.tm_mday = 4; + tm.tm_isdst = 0; /* 4 Jan is not DST */ + time4 = mktime(&tm); + (void)localtime_r(&time4, &tm); + fty = 1 - tm.tm_wday; + if (fty <= 0) { + fty += 7; + } + woy = tm_yday - fty + 4; + if (woy < 0) { + return 0; + } + return 1 + woy / 7; +} + +/* Deprecated. Do not use. */ +void get_current_time(struct date_time *dt) +{ + struct tm tm; + time_t now; + + now = time(NULL); + (void)gmtime_r(&now, &tm); + Dmsg6(200, "m=%d d=%d y=%d h=%d m=%d s=%d\n", tm.tm_mon+1, tm.tm_mday, tm.tm_year+1900, + tm.tm_hour, tm.tm_min, tm.tm_sec); + tm_encode(dt, &tm); +#ifdef DEBUG + Dmsg2(200, "jday=%f jmin=%f\n", dt->julian_day_number, dt->julian_day_fraction); + tm_decode(dt, &tm); + Dmsg6(200, "m=%d d=%d y=%d h=%d m=%d s=%d\n", tm.tm_mon+1, tm.tm_mday, tm.tm_year+1900, + tm.tm_hour, tm.tm_min, tm.tm_sec); +#endif +} + + + +/* date_encode -- Encode civil date as a Julian day number. */ +/* Deprecated. Do not use. */ +fdate_t date_encode(uint32_t year, uint8_t month, uint8_t day) +{ + + /* Algorithm as given in Meeus, Astronomical Algorithms, Chapter 7, page 61 */ + + int32_t a, b, m; + uint32_t y; + + ASSERT(month < 13); + ASSERT(day > 0 && day < 32); + + m = month; + y = year; + + if (m <= 2) { + y--; + m += 12; + } + + /* Determine whether date is in Julian or Gregorian calendar based on + canonical date of calendar reform. */ + + if ((year < 1582) || ((year == 1582) && ((month < 9) || (month == 9 && day < 5)))) { + b = 0; + } else { + a = ((int) (y / 100)); + b = 2 - a + (a / 4); + } + + return (((int32_t) (365.25 * (y + 4716))) + ((int) (30.6001 * (m + 1))) + + day + b - 1524.5); +} + +/* time_encode -- Encode time from hours, minutes, and seconds + into a fraction of a day. */ + +/* Deprecated. Do not use. */ +ftime_t time_encode(uint8_t hour, uint8_t minute, uint8_t second, + float32_t second_fraction) +{ + ASSERT((second_fraction >= 0.0) || (second_fraction < 1.0)); + return (ftime_t) (((second + 60L * (minute + 60L * hour)) / 86400.0)) + + second_fraction; +} + +/* date_time_encode -- Set day number and fraction from date + and time. */ + +/* Deprecated. Do not use. */ +void date_time_encode(struct date_time *dt, + uint32_t year, uint8_t month, uint8_t day, + uint8_t hour, uint8_t minute, uint8_t second, + float32_t second_fraction) +{ + dt->julian_day_number = date_encode(year, month, day); + dt->julian_day_fraction = time_encode(hour, minute, second, second_fraction); +} + +/* date_decode -- Decode a Julian day number into civil date. */ + +/* Deprecated. Do not use. */ +void date_decode(fdate_t date, uint32_t *year, uint8_t *month, + uint8_t *day) +{ + fdate_t z, f, a, alpha, b, c, d, e; + + date += 0.5; + z = floor(date); + f = date - z; + + if (z < 2299161.0) { + a = z; + } else { + alpha = floor((z - 1867216.25) / 36524.25); + a = z + 1 + alpha - floor(alpha / 4); + } + + b = a + 1524; + c = floor((b - 122.1) / 365.25); + d = floor(365.25 * c); + e = floor((b - d) / 30.6001); + + *day = (uint8_t) (b - d - floor(30.6001 * e) + f); + *month = (uint8_t) ((e < 14) ? (e - 1) : (e - 13)); + *year = (uint32_t) ((*month > 2) ? (c - 4716) : (c - 4715)); +} + +/* time_decode -- Decode a day fraction into civil time. */ + +/* Deprecated. Do not use. */ +void time_decode(ftime_t time, uint8_t *hour, uint8_t *minute, + uint8_t *second, float32_t *second_fraction) +{ + uint32_t ij; + + ij = (uint32_t) ((time - floor(time)) * 86400.0); + *hour = (uint8_t) (ij / 3600L); + *minute = (uint8_t) ((ij / 60L) % 60L); + *second = (uint8_t) (ij % 60L); + if (second_fraction != NULL) { + *second_fraction = (float32_t)(time - floor(time)); + } +} + +/* date_time_decode -- Decode a Julian day and day fraction + into civil date and time. */ + +/* Deprecated. Do not use. */ +void date_time_decode(struct date_time *dt, + uint32_t *year, uint8_t *month, uint8_t *day, + uint8_t *hour, uint8_t *minute, uint8_t *second, + float32_t *second_fraction) +{ + date_decode(dt->julian_day_number, year, month, day); + time_decode(dt->julian_day_fraction, hour, minute, second, second_fraction); +} + +/* tm_encode -- Encode a civil date and time from a tm structure + * to a Julian day and day fraction. + */ + +/* Deprecated. Do not use. */ +void tm_encode(struct date_time *dt, + struct tm *tm) +{ + uint32_t year; + uint8_t month, day, hour, minute, second; + + year = tm->tm_year + 1900; + month = tm->tm_mon + 1; + day = tm->tm_mday; + hour = tm->tm_hour; + minute = tm->tm_min; + second = tm->tm_sec; + dt->julian_day_number = date_encode(year, month, day); + dt->julian_day_fraction = time_encode(hour, minute, second, 0.0); +} + + +/* tm_decode -- Decode a Julian day and day fraction + into civil date and time in tm structure */ + +/* Deprecated. Do not use. */ +void tm_decode(struct date_time *dt, + struct tm *tm) +{ + uint32_t year; + uint8_t month, day, hour, minute, second; + + date_decode(dt->julian_day_number, &year, &month, &day); + time_decode(dt->julian_day_fraction, &hour, &minute, &second, NULL); + tm->tm_year = year - 1900; + tm->tm_mon = month - 1; + tm->tm_mday = day; + tm->tm_hour = hour; + tm->tm_min = minute; + tm->tm_sec = second; +} + + +/* date_time_compare -- Compare two dates and times and return + the relationship as follows: + + -1 dt1 < dt2 + 0 dt1 = dt2 + 1 dt1 > dt2 +*/ + +/* Deprecated. Do not use. */ +int date_time_compare(struct date_time *dt1, struct date_time *dt2) +{ + if (dt1->julian_day_number == dt2->julian_day_number) { + if (dt1->julian_day_fraction == dt2->julian_day_fraction) { + return 0; + } + return (dt1->julian_day_fraction < dt2->julian_day_fraction) ? -1 : 1; + } + return (dt1->julian_day_number - dt2->julian_day_number) ? -1 : 1; +} diff --git a/src/lib/btime.h b/src/lib/btime.h new file mode 100644 index 00000000..6d339b21 --- /dev/null +++ b/src/lib/btime.h @@ -0,0 +1,97 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * See btime.c for defintions. + * Kern Sibbald, MM + */ + + +#ifndef __btime_INCLUDED +#define __btime_INCLUDED + +/* New btime definition -- use this */ +btime_t get_current_btime(void); +time_t btime_to_unix(btime_t bt); /* bacula time to epoch time */ +utime_t btime_to_utime(btime_t bt); /* bacula time to utime_t */ + +int tm_wom(int mday, int wday); +int tm_woy(time_t stime); +int tm_ldom(int month, int year); + +char *bstrutime(char *dt, int maxlen, utime_t tim); +char *bstrftime(char *dt, int maxlen, utime_t tim); +char *bstrftimes(char *dt, int maxlen, utime_t tim); +char *bstrftime_ny(char *dt, int maxlen, utime_t tim); +char *bstrftime_nc(char *dt, int maxlen, utime_t tim); +char *bstrftime_dn(char *dt, int maxlen, utime_t tim); +utime_t str_to_utime(char *str); + + +/* =========================================================== */ +/* old code deprecated below. Do not use. */ + +typedef float64_t fdate_t; /* Date type */ +typedef float64_t ftime_t; /* Time type */ + +struct date_time { + fdate_t julian_day_number; /* Julian day number */ + ftime_t julian_day_fraction; /* Julian day fraction */ +}; + +/* In arguments and results of the following functions, + quantities are expressed as follows. + + year Year in the Common Era. The canonical + date of adoption of the Gregorian calendar + (October 5, 1582 in the Julian calendar) + is assumed. + + month Month index with January 0, December 11. + + day Day number of month, 1 to 31. + +*/ + + +extern fdate_t date_encode(uint32_t year, uint8_t month, uint8_t day); +extern ftime_t time_encode(uint8_t hour, uint8_t minute, uint8_t second, + float32_t second_fraction); +extern void date_time_encode(struct date_time *dt, + uint32_t year, uint8_t month, uint8_t day, + uint8_t hour, uint8_t minute, uint8_t second, + float32_t second_fraction); + +extern void date_decode(fdate_t date, uint32_t *year, uint8_t *month, + uint8_t *day); +extern void time_decode(ftime_t time, uint8_t *hour, uint8_t *minute, + uint8_t *second, float32_t *second_fraction); +extern void date_time_decode(struct date_time *dt, + uint32_t *year, uint8_t *month, uint8_t *day, + uint8_t *hour, uint8_t *minute, uint8_t *second, + float32_t *second_fraction); + +extern int date_time_compare(struct date_time *dt1, struct date_time *dt2); + +extern void tm_encode(struct date_time *dt, struct tm *tm); +extern void tm_decode(struct date_time *dt, struct tm *tm); +extern void get_current_time(struct date_time *dt); + + +#endif /* __btime_INCLUDED */ diff --git a/src/lib/btimers.c b/src/lib/btimers.c new file mode 100644 index 00000000..64bbdcf4 --- /dev/null +++ b/src/lib/btimers.c @@ -0,0 +1,280 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Process and thread timer routines, built on top of watchdogs. + * + * Nic Bellamy , October 2004. + * +*/ + +#include "bacula.h" +#include "jcr.h" + +const int dbglvl = 900; + +/* Forward referenced functions */ +static void stop_btimer(btimer_t *wid); +static btimer_t *btimer_start_common(uint32_t wait); + +/* Forward referenced callback functions */ +static void callback_child_timer(watchdog_t *self); +static void callback_thread_timer(watchdog_t *self); +#ifdef xxx +static void destructor_thread_timer(watchdog_t *self); +static void destructor_child_timer(watchdog_t *self); +#endif + +/* + * Start a timer on a child process of pid, kill it after wait seconds. + * + * Returns: btimer_t *(pointer to btimer_t struct) on success + * NULL on failure + */ +btimer_t *start_child_timer(JCR *jcr, pid_t pid, uint32_t wait) +{ + btimer_t *wid; + + wid = btimer_start_common(wait); + if (wid == NULL) { + return NULL; + } + wid->type = TYPE_CHILD; + wid->pid = pid; + wid->killed = false; + wid->jcr = jcr; + + wid->wd->callback = callback_child_timer; + wid->wd->one_shot = false; + wid->wd->interval = wait; + register_watchdog(wid->wd); + + Dmsg3(dbglvl, "Start child timer %p, pid %d for %d secs.\n", wid, pid, wait); + return wid; +} + +/* + * Stop child timer + */ +void stop_child_timer(btimer_t *wid) +{ + if (wid == NULL) { + return; + } + Dmsg2(dbglvl, "Stop child timer %p pid %d\n", wid, wid->pid); + stop_btimer(wid); +} + +#ifdef xxx +static void destructor_child_timer(watchdog_t *self) +{ + btimer_t *wid = (btimer_t *)self->data; + free(wid->wd); + free(wid); +} +#endif + +static void callback_child_timer(watchdog_t *self) +{ + btimer_t *wid = (btimer_t *)self->data; + + if (!wid->killed) { + /* First kill attempt; try killing it softly (kill -SONG) first */ + wid->killed = true; + + Dmsg2(dbglvl, "watchdog %p term PID %d\n", self, wid->pid); + + /* Kill -TERM the specified PID, and reschedule a -KILL for 5 seconds + * later. (Warning: this should let dvd-writepart enough time to term + * and kill growisofs, which takes 3 seconds, so the interval must not + * be less than 5 seconds) + */ + kill(wid->pid, SIGTERM); + self->interval = 10; + } else { + /* This is the second call - terminate with prejudice. */ + Dmsg2(dbglvl, "watchdog %p kill PID %d\n", self, wid->pid); + + kill(wid->pid, SIGKILL); + + /* Setting one_shot to true before we leave ensures we don't get + * rescheduled. + */ + self->one_shot = true; + } +} + +/* + * Start a timer on a thread. kill it after wait seconds. + * + * Returns: btimer_t *(pointer to btimer_t struct) on success + * NULL on failure + */ +btimer_t *start_thread_timer(JCR *jcr, pthread_t tid, uint32_t wait) +{ + btimer_t *wid; + wid = btimer_start_common(wait); + if (wid == NULL) { + Dmsg1(dbglvl, "start_thread_timer return NULL from common. wait=%d.\n", wait); + return NULL; + } + wid->type = TYPE_PTHREAD; + wid->tid = tid; + wid->jcr = jcr; + + wid->wd->callback = callback_thread_timer; + wid->wd->one_shot = true; + wid->wd->interval = wait; + register_watchdog(wid->wd); + + Dmsg3(dbglvl, "Start thread timer %p tid %p for %d secs.\n", wid, tid, wait); + + return wid; +} + +/* + * Start a timer on a BSOCK. kill it after wait seconds. + * + * Returns: btimer_t *(pointer to btimer_t struct) on success + * NULL on failure + */ +btimer_t *_start_bsock_timer(BSOCK *bsock, uint32_t wait) +{ + btimer_t *wid; + if (wait <= 0) { /* wait should be > 0 */ + return NULL; + } + wid = btimer_start_common(wait); + if (wid == NULL) { + return NULL; + } + wid->type = TYPE_BSOCK; + wid->tid = pthread_self(); + wid->bsock = bsock; + wid->jcr = bsock->jcr(); + + wid->wd->callback = callback_thread_timer; + wid->wd->one_shot = true; + wid->wd->interval = wait; + register_watchdog(wid->wd); + + Dmsg4(dbglvl, "Start bsock timer %p tid=%p for %d secs at %d\n", wid, + wid->tid, wait, time(NULL)); + + return wid; +} + +/* + * Start a timer on a BSOCK. kill it after wait seconds. + * + * Returns: btimer_t *(pointer to btimer_t struct) on success + * NULL on failure + */ +btimer_t *start_bsock_timer(BSOCKCORE *bsock, uint32_t wait) +{ + return _start_bsock_timer((BSOCK*)bsock, wait); +}; + +/* + * Start a timer on a BSOCK. kill it after wait seconds. + * + * Returns: btimer_t *(pointer to btimer_t struct) on success + * NULL on failure + */ +btimer_t *start_bsock_timer(BSOCK *bsock, uint32_t wait) +{ + return _start_bsock_timer(bsock, wait); +}; + +/* + * Stop bsock timer + */ +void stop_bsock_timer(btimer_t *wid) +{ + if (wid == NULL) { + return; + } + Dmsg3(dbglvl, "Stop bsock timer %p tid=%p at %d.\n", wid, wid->tid, time(NULL)); + stop_btimer(wid); +} + + +/* + * Stop thread timer + */ +void stop_thread_timer(btimer_t *wid) +{ + if (wid == NULL) { + return; + } + Dmsg2(dbglvl, "Stop thread timer %p tid=%p.\n", wid, wid->tid); + stop_btimer(wid); +} + +#ifdef xxx +static void destructor_thread_timer(watchdog_t *self) +{ + btimer_t *wid = (btimer_t *)self->data; + free(wid->wd); + free(wid); +} +#endif + +static void callback_thread_timer(watchdog_t *self) +{ + btimer_t *wid = (btimer_t *)self->data; + + Dmsg4(dbglvl, "thread timer %p kill %s tid=%p at %d.\n", self, + wid->type == TYPE_BSOCK ? "bsock" : "thread", wid->tid, time(NULL)); + if (wid->jcr) { + Dmsg2(dbglvl, "killed jid=%u Job=%s\n", wid->jcr->JobId, wid->jcr->Job); + } + + if (wid->type == TYPE_BSOCK && wid->bsock) { + wid->bsock->set_timed_out(); + } + pthread_kill(wid->tid, TIMEOUT_SIGNAL); +} + +static btimer_t *btimer_start_common(uint32_t wait) +{ + btimer_t *wid = (btimer_t *)malloc(sizeof(btimer_t)); + + wid->wd = new_watchdog(); + if (wid->wd == NULL) { + free(wid); + return NULL; + } + wid->wd->data = wid; + wid->killed = false; + + return wid; +} + +/* + * Stop btimer + */ +static void stop_btimer(btimer_t *wid) +{ + if (wid == NULL) { + Emsg0(M_ABORT, 0, _("stop_btimer called with NULL btimer_id\n")); + } + unregister_watchdog(wid->wd); + free(wid->wd); + free(wid); +} diff --git a/src/lib/btimers.h b/src/lib/btimers.h new file mode 100644 index 00000000..0b14bb49 --- /dev/null +++ b/src/lib/btimers.h @@ -0,0 +1,39 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Process and thread timer routines, built on top of watchdogs. + * + * Nic Bellamy , October 2003. + * +*/ + +#ifndef __BTIMERS_H_ +#define __BTIMERS_H_ + +struct btimer_t { + watchdog_t *wd; /* Parent watchdog */ + int type; + bool killed; + pid_t pid; /* process id if TYPE_CHILD */ + pthread_t tid; /* thread id if TYPE_PTHREAD */ + BSOCK *bsock; /* Pointer to BSOCK */ + JCR *jcr; /* Pointer to job control record */ +}; + +#endif /* __BTIMERS_H_ */ diff --git a/src/lib/bwlimit.c b/src/lib/bwlimit.c new file mode 100644 index 00000000..8d24f958 --- /dev/null +++ b/src/lib/bwlimit.c @@ -0,0 +1,133 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bacula.h" +#include "bwlimit.h" + +#define ONE_SEC 1000000L /* number of microseconds in a second */ + +void bwlimit::reset_sample() +{ + memset(samples_time, '\0', sizeof(samples_time)); + memset(samples_byte, '\0', sizeof(samples_byte)); + memset(samples_sleep, '\0', sizeof(samples_sleep)); + total_time=total_byte=total_sleep=0; + current_sample=0; + current_byte=0; +} + +void bwlimit::push_sample(int64_t t, int64_t bytes, int64_t sleep) +{ + // accumulate data in current sample + current_time+=t; + current_byte+=bytes; + current_sleep+=sleep; + if (current_time>ONE_SEC) { + // we have accumulated enough data for this sample go to the next one + // First "pop" the data from the total + total_time-=samples_time[current_sample]; + total_byte-=samples_byte[current_sample]; + total_sleep-=samples_sleep[current_sample]; + // Push the new one + total_time+=current_time; + total_byte+=current_byte; + total_sleep+=current_sleep; + // record the data in the table + samples_time[current_sample]=current_time; + samples_byte[current_sample]=current_byte; + samples_sleep[current_sample]=current_sleep; + // be ready for next sample + current_time=0; + current_byte=0; + current_sleep=0; + current_sample=(current_sample+1)%sample_capacity; + } +} + +void bwlimit::get_total(int64_t *t, int64_t *bytes, int64_t *sleep) +{ + pthread_mutex_lock(&m_bw_mutex); + *t=total_time+current_time; + *bytes=total_byte+current_byte; + *sleep=total_sleep+current_sleep; + pthread_mutex_unlock(&m_bw_mutex); +} + +int64_t bwlimit::get_bw() +{ + int64_t bw = 0; + btime_t temp = get_current_btime() - m_last_tick; + if (temp < 0) { + temp = 0; + } + pthread_mutex_lock(&m_bw_mutex); + if (total_time+current_time>0) { + bw=(total_byte+current_byte)*ONE_SEC/(total_time+current_time+temp); + } + pthread_mutex_unlock(&m_bw_mutex); + return bw; +} + +void bwlimit::control_bwlimit(int bytes) +{ + btime_t now, temp; + if (bytes == 0 || m_bwlimit == 0) { + return; + } + + lock_guard lg(m_bw_mutex); /* Release the mutex automatically when we quit the function*/ + now = get_current_btime(); /* microseconds */ + temp = now - m_last_tick; /* microseconds */ + + if (temp < 0 || temp > m_backlog_limit) { /* Take care of clock problems (>10s) or back in time */ + m_nb_bytes = bytes; + m_last_tick = now; + reset_sample(); + return; + } + + /* remove what as been consumed */ + m_nb_bytes -= bytes; + + /* Less than 0.1ms since the last call, see the next time */ + if (temp < 100) { + push_sample(temp, bytes, 0); + return; + } + + /* Add what is authorized to be written in temp us */ + m_nb_bytes += (int64_t)(temp * ((double)m_bwlimit / ONE_SEC)); + m_last_tick = now; + + /* limit the backlog */ + if (m_nb_bytes > m_backlog_limit*m_bwlimit) { + m_nb_bytes = m_backlog_limit*m_bwlimit; + push_sample(temp, bytes, 0); + } else if (m_nb_bytes < 0) { + /* What exceed should be converted in sleep time */ + int64_t usec_sleep = (int64_t)(-m_nb_bytes /((double)m_bwlimit / ONE_SEC)); + if (usec_sleep > 100) { + pthread_mutex_unlock(&m_bw_mutex); + bmicrosleep(usec_sleep / ONE_SEC, usec_sleep % ONE_SEC); + pthread_mutex_lock(&m_bw_mutex); + } + push_sample(temp, bytes, usec_sleep>100?usec_sleep:0); + /* m_nb_bytes & m_last_tick will be updated at next iteration */ + } +} diff --git a/src/lib/bwlimit.h b/src/lib/bwlimit.h new file mode 100644 index 00000000..3b4312a1 --- /dev/null +++ b/src/lib/bwlimit.h @@ -0,0 +1,63 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef BWLIMIT_H +#define BWLIMIT_H + +class bwlimit: public SMARTALLOC +{ + static const int sample_capacity=10; + +private: + int64_t m_bwlimit; /* set to limit bandwidth */ + int64_t m_nb_bytes; /* bytes sent/recv since the last tick */ + btime_t m_last_tick; /* last tick used by bwlimit */ + btime_t m_backlog_limit; /* don't handle more backlog thna this us */ + pthread_mutex_t m_bw_mutex; + + int64_t samples_time[sample_capacity]; + int64_t samples_byte[sample_capacity]; + int64_t samples_sleep[sample_capacity]; + int64_t total_time, total_byte, total_sleep; + int64_t current_time, current_byte, current_sleep; + int64_t current_sample; + + void push_sample(int64_t t, int64_t bytes, int64_t sleep); + +public: + bwlimit(int64_t speed=0): m_bwlimit(speed), m_nb_bytes(0), m_last_tick(0), + m_backlog_limit(10*1000*1000), + total_time(0), total_byte(0), total_sleep(0), current_sample(0) + { + pthread_mutex_init(&m_bw_mutex, NULL); + reset_sample(); + }; + ~bwlimit() { + pthread_mutex_destroy(&m_bw_mutex); + }; + + void control_bwlimit(int bytes); + void set_bwlimit(int64_t maxspeed) { m_bwlimit = maxspeed; }; + int64_t get_bwlimit() { return m_bwlimit; }; + bool use_bwlimit() { return m_bwlimit > 0;}; + int64_t get_bw(); + void get_total(int64_t *t, int64_t *bytes, int64_t *sleep); + void reset_sample(); +}; +#endif diff --git a/src/lib/cmd_parser.h b/src/lib/cmd_parser.h new file mode 100644 index 00000000..750a254d --- /dev/null +++ b/src/lib/cmd_parser.h @@ -0,0 +1,196 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef CMD_PARSER_H +#define CMD_PARSER_H + +extern int parse_args(POOLMEM *cmd, POOLMEM **args, int *argc, + char **argk, char **argv, int max_args); +extern int parse_args_only(POOLMEM *cmd, POOLMEM **args, int *argc, + char **argk, char **argv, int max_args); + +class cmd_parser { +public: + POOLMEM *args; + POOLMEM *cmd; /* plugin command line */ + POOLMEM *org; /* original command line */ + + char **argk; /* Argument keywords */ + char **argv; /* Argument values */ + int argc; /* Number of arguments */ + int max_cmd; /* Max number of arguments */ + bool handle_plugin_name; /* Search for : */ + + cmd_parser(bool handle_plugin_name=true) + : handle_plugin_name(handle_plugin_name) + { + org = get_pool_memory(PM_FNAME); + args = get_pool_memory(PM_FNAME); + cmd = get_pool_memory(PM_FNAME); + *args = *org = *cmd = 0; + argc = 0; + max_cmd = MAX_CMD_ARGS; + argk = argv = NULL; + }; + + virtual ~cmd_parser() { + free_pool_memory(org); + free_pool_memory(cmd); + free_pool_memory(args); + if (argk) { + free(argk); + } + if (argv) { + free(argv); + } + } + + /* + * Given a single keyword, find it in the argument list, but + * it must have a value + * Returns: -1 if not found or no value + * list index (base 0) on success + */ + int find_arg_with_value(const char *keyword) + { + for (int i=handle_plugin_name?1:0; imsg); + if (!bs->send()) { + Dmsg1(dbglvl, "Send challenge failed. ERR=%s\n", bs->bstrerror()); + return false; + } + Dmsg1(99, "sending resp to challenge: %s\n", bs->msg); + if (bs->wait_data(180) <= 0 || bs->recv() <= 0) { + Dmsg1(dbglvl, "Receive cram-md5 response failed. ERR=%s\n", bs->bstrerror()); + bmicrosleep(5, 0); + return false; + } + if (strcmp(bs->msg, "1000 OK auth\n") == 0) { + return true; + } + Dmsg1(dbglvl, "Received bad response: %s\n", bs->msg); + bmicrosleep(5, 0); + return false; +} diff --git a/src/lib/crc32.c b/src/lib/crc32.c new file mode 100644 index 00000000..a177f98b --- /dev/null +++ b/src/lib/crc32.c @@ -0,0 +1,546 @@ +/* + Bacula® - The Network Backup Solution + + Copyright (c) 2010-2015, Joakim Tjernlund + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +/* + * Original 32 bit CRC. Algorithm from RFC 2083 (png format) + * + * By Kern Sibbald, January 2001 + * + * Improved, faster version + * + * By Joakim Tjernlund, 2010 + */ + + +#ifdef GENERATE_STATIC_CRC_TABLE +/* + * The following code can be used to generate the static CRC table. + * + * Note, the magic number 0xedb88320L below comes from the terms + * of the defining polynomial x^n, + * where n=0,1,2,4,5,7,8,10,11,12,16,22,23,26 + */ +#include + +main() +{ + unsigned long crc; + unsigned long buf[5]; + int i, j, k; + k = 0; + for (i = 0; i < 256; i++) { + crc = (unsigned long)i; + for (j = 0; j < 8; j++) { + if (crc & 1) { + crc = 0xedb88320L ^ (crc >> 1); + } else { + crc = crc >> 1; + } + } + buf[k++] = crc; + if (k == 5) { + k = 0; + printf(" 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x,\n", + buf[0], buf[1], buf[2], buf[3], buf[4]); + } + } + printf(" 0x%08x\n", buf[0]); +} +#endif + +#include "bacula.h" + +#ifdef HAVE_DARWIN_OS +#if !defined(HAVE_LITTLE_ENDIAN) && !defined(HAVE_BIG_ENDIAN) +#define HAVE_LITTLE_ENDIAN +#endif +#endif + +#if !defined(HAVE_LITTLE_ENDIAN) && !defined(HAVE_BIG_ENDIAN) +#error Either HAVE_LITTLE_ENDIAN or HAVE_BIG_ENDIAN must be defined! +#endif + +/* tole == To Little Endian */ +#ifdef HAVE_BIG_ENDIAN +#define tole(x) \ + ((uint32_t)( \ + (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \ + (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \ + (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \ + (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24) )) + +#else +#define tole(x) x +#endif + +/* + * The magic number 0xedb88320L below comes from the terms + * of the defining polynomial x^n, + * where n=0,1,2,4,5,7,8,10,11,12,16,22,23,26 + */ + +static const uint32_t tab[4][256] = {{ +tole(0x00000000L), tole(0x77073096L), tole(0xee0e612cL), tole(0x990951baL), +tole(0x076dc419L), tole(0x706af48fL), tole(0xe963a535L), tole(0x9e6495a3L), +tole(0x0edb8832L), tole(0x79dcb8a4L), tole(0xe0d5e91eL), tole(0x97d2d988L), +tole(0x09b64c2bL), tole(0x7eb17cbdL), tole(0xe7b82d07L), tole(0x90bf1d91L), +tole(0x1db71064L), tole(0x6ab020f2L), tole(0xf3b97148L), tole(0x84be41deL), +tole(0x1adad47dL), tole(0x6ddde4ebL), tole(0xf4d4b551L), tole(0x83d385c7L), +tole(0x136c9856L), tole(0x646ba8c0L), tole(0xfd62f97aL), tole(0x8a65c9ecL), +tole(0x14015c4fL), tole(0x63066cd9L), tole(0xfa0f3d63L), tole(0x8d080df5L), +tole(0x3b6e20c8L), tole(0x4c69105eL), tole(0xd56041e4L), tole(0xa2677172L), +tole(0x3c03e4d1L), tole(0x4b04d447L), tole(0xd20d85fdL), tole(0xa50ab56bL), +tole(0x35b5a8faL), tole(0x42b2986cL), tole(0xdbbbc9d6L), tole(0xacbcf940L), +tole(0x32d86ce3L), tole(0x45df5c75L), tole(0xdcd60dcfL), tole(0xabd13d59L), +tole(0x26d930acL), tole(0x51de003aL), tole(0xc8d75180L), tole(0xbfd06116L), +tole(0x21b4f4b5L), tole(0x56b3c423L), tole(0xcfba9599L), tole(0xb8bda50fL), +tole(0x2802b89eL), tole(0x5f058808L), tole(0xc60cd9b2L), tole(0xb10be924L), +tole(0x2f6f7c87L), tole(0x58684c11L), tole(0xc1611dabL), tole(0xb6662d3dL), +tole(0x76dc4190L), tole(0x01db7106L), tole(0x98d220bcL), tole(0xefd5102aL), +tole(0x71b18589L), tole(0x06b6b51fL), tole(0x9fbfe4a5L), tole(0xe8b8d433L), +tole(0x7807c9a2L), tole(0x0f00f934L), tole(0x9609a88eL), tole(0xe10e9818L), +tole(0x7f6a0dbbL), tole(0x086d3d2dL), tole(0x91646c97L), tole(0xe6635c01L), +tole(0x6b6b51f4L), tole(0x1c6c6162L), tole(0x856530d8L), tole(0xf262004eL), +tole(0x6c0695edL), tole(0x1b01a57bL), tole(0x8208f4c1L), tole(0xf50fc457L), +tole(0x65b0d9c6L), tole(0x12b7e950L), tole(0x8bbeb8eaL), tole(0xfcb9887cL), +tole(0x62dd1ddfL), tole(0x15da2d49L), tole(0x8cd37cf3L), tole(0xfbd44c65L), +tole(0x4db26158L), tole(0x3ab551ceL), tole(0xa3bc0074L), tole(0xd4bb30e2L), +tole(0x4adfa541L), tole(0x3dd895d7L), tole(0xa4d1c46dL), tole(0xd3d6f4fbL), +tole(0x4369e96aL), tole(0x346ed9fcL), tole(0xad678846L), tole(0xda60b8d0L), +tole(0x44042d73L), tole(0x33031de5L), tole(0xaa0a4c5fL), tole(0xdd0d7cc9L), +tole(0x5005713cL), tole(0x270241aaL), tole(0xbe0b1010L), tole(0xc90c2086L), +tole(0x5768b525L), tole(0x206f85b3L), tole(0xb966d409L), tole(0xce61e49fL), +tole(0x5edef90eL), tole(0x29d9c998L), tole(0xb0d09822L), tole(0xc7d7a8b4L), +tole(0x59b33d17L), tole(0x2eb40d81L), tole(0xb7bd5c3bL), tole(0xc0ba6cadL), +tole(0xedb88320L), tole(0x9abfb3b6L), tole(0x03b6e20cL), tole(0x74b1d29aL), +tole(0xead54739L), tole(0x9dd277afL), tole(0x04db2615L), tole(0x73dc1683L), +tole(0xe3630b12L), tole(0x94643b84L), tole(0x0d6d6a3eL), tole(0x7a6a5aa8L), +tole(0xe40ecf0bL), tole(0x9309ff9dL), tole(0x0a00ae27L), tole(0x7d079eb1L), +tole(0xf00f9344L), tole(0x8708a3d2L), tole(0x1e01f268L), tole(0x6906c2feL), +tole(0xf762575dL), tole(0x806567cbL), tole(0x196c3671L), tole(0x6e6b06e7L), +tole(0xfed41b76L), tole(0x89d32be0L), tole(0x10da7a5aL), tole(0x67dd4accL), +tole(0xf9b9df6fL), tole(0x8ebeeff9L), tole(0x17b7be43L), tole(0x60b08ed5L), +tole(0xd6d6a3e8L), tole(0xa1d1937eL), tole(0x38d8c2c4L), tole(0x4fdff252L), +tole(0xd1bb67f1L), tole(0xa6bc5767L), tole(0x3fb506ddL), tole(0x48b2364bL), +tole(0xd80d2bdaL), tole(0xaf0a1b4cL), tole(0x36034af6L), tole(0x41047a60L), +tole(0xdf60efc3L), tole(0xa867df55L), tole(0x316e8eefL), tole(0x4669be79L), +tole(0xcb61b38cL), tole(0xbc66831aL), tole(0x256fd2a0L), tole(0x5268e236L), +tole(0xcc0c7795L), tole(0xbb0b4703L), tole(0x220216b9L), tole(0x5505262fL), +tole(0xc5ba3bbeL), tole(0xb2bd0b28L), tole(0x2bb45a92L), tole(0x5cb36a04L), +tole(0xc2d7ffa7L), tole(0xb5d0cf31L), tole(0x2cd99e8bL), tole(0x5bdeae1dL), +tole(0x9b64c2b0L), tole(0xec63f226L), tole(0x756aa39cL), tole(0x026d930aL), +tole(0x9c0906a9L), tole(0xeb0e363fL), tole(0x72076785L), tole(0x05005713L), +tole(0x95bf4a82L), tole(0xe2b87a14L), tole(0x7bb12baeL), tole(0x0cb61b38L), +tole(0x92d28e9bL), tole(0xe5d5be0dL), tole(0x7cdcefb7L), tole(0x0bdbdf21L), +tole(0x86d3d2d4L), tole(0xf1d4e242L), tole(0x68ddb3f8L), tole(0x1fda836eL), +tole(0x81be16cdL), tole(0xf6b9265bL), tole(0x6fb077e1L), tole(0x18b74777L), +tole(0x88085ae6L), tole(0xff0f6a70L), tole(0x66063bcaL), tole(0x11010b5cL), +tole(0x8f659effL), tole(0xf862ae69L), tole(0x616bffd3L), tole(0x166ccf45L), +tole(0xa00ae278L), tole(0xd70dd2eeL), tole(0x4e048354L), tole(0x3903b3c2L), +tole(0xa7672661L), tole(0xd06016f7L), tole(0x4969474dL), tole(0x3e6e77dbL), +tole(0xaed16a4aL), tole(0xd9d65adcL), tole(0x40df0b66L), tole(0x37d83bf0L), +tole(0xa9bcae53L), tole(0xdebb9ec5L), tole(0x47b2cf7fL), tole(0x30b5ffe9L), +tole(0xbdbdf21cL), tole(0xcabac28aL), tole(0x53b39330L), tole(0x24b4a3a6L), +tole(0xbad03605L), tole(0xcdd70693L), tole(0x54de5729L), tole(0x23d967bfL), +tole(0xb3667a2eL), tole(0xc4614ab8L), tole(0x5d681b02L), tole(0x2a6f2b94L), +tole(0xb40bbe37L), tole(0xc30c8ea1L), tole(0x5a05df1bL), tole(0x2d02ef8dL)}, +{ +tole(0x00000000L), tole(0x191b3141L), tole(0x32366282L), tole(0x2b2d53c3L), +tole(0x646cc504L), tole(0x7d77f445L), tole(0x565aa786L), tole(0x4f4196c7L), +tole(0xc8d98a08L), tole(0xd1c2bb49L), tole(0xfaefe88aL), tole(0xe3f4d9cbL), +tole(0xacb54f0cL), tole(0xb5ae7e4dL), tole(0x9e832d8eL), tole(0x87981ccfL), +tole(0x4ac21251L), tole(0x53d92310L), tole(0x78f470d3L), tole(0x61ef4192L), +tole(0x2eaed755L), tole(0x37b5e614L), tole(0x1c98b5d7L), tole(0x05838496L), +tole(0x821b9859L), tole(0x9b00a918L), tole(0xb02dfadbL), tole(0xa936cb9aL), +tole(0xe6775d5dL), tole(0xff6c6c1cL), tole(0xd4413fdfL), tole(0xcd5a0e9eL), +tole(0x958424a2L), tole(0x8c9f15e3L), tole(0xa7b24620L), tole(0xbea97761L), +tole(0xf1e8e1a6L), tole(0xe8f3d0e7L), tole(0xc3de8324L), tole(0xdac5b265L), +tole(0x5d5daeaaL), tole(0x44469febL), tole(0x6f6bcc28L), tole(0x7670fd69L), +tole(0x39316baeL), tole(0x202a5aefL), tole(0x0b07092cL), tole(0x121c386dL), +tole(0xdf4636f3L), tole(0xc65d07b2L), tole(0xed705471L), tole(0xf46b6530L), +tole(0xbb2af3f7L), tole(0xa231c2b6L), tole(0x891c9175L), tole(0x9007a034L), +tole(0x179fbcfbL), tole(0x0e848dbaL), tole(0x25a9de79L), tole(0x3cb2ef38L), +tole(0x73f379ffL), tole(0x6ae848beL), tole(0x41c51b7dL), tole(0x58de2a3cL), +tole(0xf0794f05L), tole(0xe9627e44L), tole(0xc24f2d87L), tole(0xdb541cc6L), +tole(0x94158a01L), tole(0x8d0ebb40L), tole(0xa623e883L), tole(0xbf38d9c2L), +tole(0x38a0c50dL), tole(0x21bbf44cL), tole(0x0a96a78fL), tole(0x138d96ceL), +tole(0x5ccc0009L), tole(0x45d73148L), tole(0x6efa628bL), tole(0x77e153caL), +tole(0xbabb5d54L), tole(0xa3a06c15L), tole(0x888d3fd6L), tole(0x91960e97L), +tole(0xded79850L), tole(0xc7cca911L), tole(0xece1fad2L), tole(0xf5facb93L), +tole(0x7262d75cL), tole(0x6b79e61dL), tole(0x4054b5deL), tole(0x594f849fL), +tole(0x160e1258L), tole(0x0f152319L), tole(0x243870daL), tole(0x3d23419bL), +tole(0x65fd6ba7L), tole(0x7ce65ae6L), tole(0x57cb0925L), tole(0x4ed03864L), +tole(0x0191aea3L), tole(0x188a9fe2L), tole(0x33a7cc21L), tole(0x2abcfd60L), +tole(0xad24e1afL), tole(0xb43fd0eeL), tole(0x9f12832dL), tole(0x8609b26cL), +tole(0xc94824abL), tole(0xd05315eaL), tole(0xfb7e4629L), tole(0xe2657768L), +tole(0x2f3f79f6L), tole(0x362448b7L), tole(0x1d091b74L), tole(0x04122a35L), +tole(0x4b53bcf2L), tole(0x52488db3L), tole(0x7965de70L), tole(0x607eef31L), +tole(0xe7e6f3feL), tole(0xfefdc2bfL), tole(0xd5d0917cL), tole(0xcccba03dL), +tole(0x838a36faL), tole(0x9a9107bbL), tole(0xb1bc5478L), tole(0xa8a76539L), +tole(0x3b83984bL), tole(0x2298a90aL), tole(0x09b5fac9L), tole(0x10aecb88L), +tole(0x5fef5d4fL), tole(0x46f46c0eL), tole(0x6dd93fcdL), tole(0x74c20e8cL), +tole(0xf35a1243L), tole(0xea412302L), tole(0xc16c70c1L), tole(0xd8774180L), +tole(0x9736d747L), tole(0x8e2de606L), tole(0xa500b5c5L), tole(0xbc1b8484L), +tole(0x71418a1aL), tole(0x685abb5bL), tole(0x4377e898L), tole(0x5a6cd9d9L), +tole(0x152d4f1eL), tole(0x0c367e5fL), tole(0x271b2d9cL), tole(0x3e001cddL), +tole(0xb9980012L), tole(0xa0833153L), tole(0x8bae6290L), tole(0x92b553d1L), +tole(0xddf4c516L), tole(0xc4eff457L), tole(0xefc2a794L), tole(0xf6d996d5L), +tole(0xae07bce9L), tole(0xb71c8da8L), tole(0x9c31de6bL), tole(0x852aef2aL), +tole(0xca6b79edL), tole(0xd37048acL), tole(0xf85d1b6fL), tole(0xe1462a2eL), +tole(0x66de36e1L), tole(0x7fc507a0L), tole(0x54e85463L), tole(0x4df36522L), +tole(0x02b2f3e5L), tole(0x1ba9c2a4L), tole(0x30849167L), tole(0x299fa026L), +tole(0xe4c5aeb8L), tole(0xfdde9ff9L), tole(0xd6f3cc3aL), tole(0xcfe8fd7bL), +tole(0x80a96bbcL), tole(0x99b25afdL), tole(0xb29f093eL), tole(0xab84387fL), +tole(0x2c1c24b0L), tole(0x350715f1L), tole(0x1e2a4632L), tole(0x07317773L), +tole(0x4870e1b4L), tole(0x516bd0f5L), tole(0x7a468336L), tole(0x635db277L), +tole(0xcbfad74eL), tole(0xd2e1e60fL), tole(0xf9ccb5ccL), tole(0xe0d7848dL), +tole(0xaf96124aL), tole(0xb68d230bL), tole(0x9da070c8L), tole(0x84bb4189L), +tole(0x03235d46L), tole(0x1a386c07L), tole(0x31153fc4L), tole(0x280e0e85L), +tole(0x674f9842L), tole(0x7e54a903L), tole(0x5579fac0L), tole(0x4c62cb81L), +tole(0x8138c51fL), tole(0x9823f45eL), tole(0xb30ea79dL), tole(0xaa1596dcL), +tole(0xe554001bL), tole(0xfc4f315aL), tole(0xd7626299L), tole(0xce7953d8L), +tole(0x49e14f17L), tole(0x50fa7e56L), tole(0x7bd72d95L), tole(0x62cc1cd4L), +tole(0x2d8d8a13L), tole(0x3496bb52L), tole(0x1fbbe891L), tole(0x06a0d9d0L), +tole(0x5e7ef3ecL), tole(0x4765c2adL), tole(0x6c48916eL), tole(0x7553a02fL), +tole(0x3a1236e8L), tole(0x230907a9L), tole(0x0824546aL), tole(0x113f652bL), +tole(0x96a779e4L), tole(0x8fbc48a5L), tole(0xa4911b66L), tole(0xbd8a2a27L), +tole(0xf2cbbce0L), tole(0xebd08da1L), tole(0xc0fdde62L), tole(0xd9e6ef23L), +tole(0x14bce1bdL), tole(0x0da7d0fcL), tole(0x268a833fL), tole(0x3f91b27eL), +tole(0x70d024b9L), tole(0x69cb15f8L), tole(0x42e6463bL), tole(0x5bfd777aL), +tole(0xdc656bb5L), tole(0xc57e5af4L), tole(0xee530937L), tole(0xf7483876L), +tole(0xb809aeb1L), tole(0xa1129ff0L), tole(0x8a3fcc33L), tole(0x9324fd72L)}, +{ +tole(0x00000000L), tole(0x01c26a37L), tole(0x0384d46eL), tole(0x0246be59L), +tole(0x0709a8dcL), tole(0x06cbc2ebL), tole(0x048d7cb2L), tole(0x054f1685L), +tole(0x0e1351b8L), tole(0x0fd13b8fL), tole(0x0d9785d6L), tole(0x0c55efe1L), +tole(0x091af964L), tole(0x08d89353L), tole(0x0a9e2d0aL), tole(0x0b5c473dL), +tole(0x1c26a370L), tole(0x1de4c947L), tole(0x1fa2771eL), tole(0x1e601d29L), +tole(0x1b2f0bacL), tole(0x1aed619bL), tole(0x18abdfc2L), tole(0x1969b5f5L), +tole(0x1235f2c8L), tole(0x13f798ffL), tole(0x11b126a6L), tole(0x10734c91L), +tole(0x153c5a14L), tole(0x14fe3023L), tole(0x16b88e7aL), tole(0x177ae44dL), +tole(0x384d46e0L), tole(0x398f2cd7L), tole(0x3bc9928eL), tole(0x3a0bf8b9L), +tole(0x3f44ee3cL), tole(0x3e86840bL), tole(0x3cc03a52L), tole(0x3d025065L), +tole(0x365e1758L), tole(0x379c7d6fL), tole(0x35dac336L), tole(0x3418a901L), +tole(0x3157bf84L), tole(0x3095d5b3L), tole(0x32d36beaL), tole(0x331101ddL), +tole(0x246be590L), tole(0x25a98fa7L), tole(0x27ef31feL), tole(0x262d5bc9L), +tole(0x23624d4cL), tole(0x22a0277bL), tole(0x20e69922L), tole(0x2124f315L), +tole(0x2a78b428L), tole(0x2bbade1fL), tole(0x29fc6046L), tole(0x283e0a71L), +tole(0x2d711cf4L), tole(0x2cb376c3L), tole(0x2ef5c89aL), tole(0x2f37a2adL), +tole(0x709a8dc0L), tole(0x7158e7f7L), tole(0x731e59aeL), tole(0x72dc3399L), +tole(0x7793251cL), tole(0x76514f2bL), tole(0x7417f172L), tole(0x75d59b45L), +tole(0x7e89dc78L), tole(0x7f4bb64fL), tole(0x7d0d0816L), tole(0x7ccf6221L), +tole(0x798074a4L), tole(0x78421e93L), tole(0x7a04a0caL), tole(0x7bc6cafdL), +tole(0x6cbc2eb0L), tole(0x6d7e4487L), tole(0x6f38fadeL), tole(0x6efa90e9L), +tole(0x6bb5866cL), tole(0x6a77ec5bL), tole(0x68315202L), tole(0x69f33835L), +tole(0x62af7f08L), tole(0x636d153fL), tole(0x612bab66L), tole(0x60e9c151L), +tole(0x65a6d7d4L), tole(0x6464bde3L), tole(0x662203baL), tole(0x67e0698dL), +tole(0x48d7cb20L), tole(0x4915a117L), tole(0x4b531f4eL), tole(0x4a917579L), +tole(0x4fde63fcL), tole(0x4e1c09cbL), tole(0x4c5ab792L), tole(0x4d98dda5L), +tole(0x46c49a98L), tole(0x4706f0afL), tole(0x45404ef6L), tole(0x448224c1L), +tole(0x41cd3244L), tole(0x400f5873L), tole(0x4249e62aL), tole(0x438b8c1dL), +tole(0x54f16850L), tole(0x55330267L), tole(0x5775bc3eL), tole(0x56b7d609L), +tole(0x53f8c08cL), tole(0x523aaabbL), tole(0x507c14e2L), tole(0x51be7ed5L), +tole(0x5ae239e8L), tole(0x5b2053dfL), tole(0x5966ed86L), tole(0x58a487b1L), +tole(0x5deb9134L), tole(0x5c29fb03L), tole(0x5e6f455aL), tole(0x5fad2f6dL), +tole(0xe1351b80L), tole(0xe0f771b7L), tole(0xe2b1cfeeL), tole(0xe373a5d9L), +tole(0xe63cb35cL), tole(0xe7fed96bL), tole(0xe5b86732L), tole(0xe47a0d05L), +tole(0xef264a38L), tole(0xeee4200fL), tole(0xeca29e56L), tole(0xed60f461L), +tole(0xe82fe2e4L), tole(0xe9ed88d3L), tole(0xebab368aL), tole(0xea695cbdL), +tole(0xfd13b8f0L), tole(0xfcd1d2c7L), tole(0xfe976c9eL), tole(0xff5506a9L), +tole(0xfa1a102cL), tole(0xfbd87a1bL), tole(0xf99ec442L), tole(0xf85cae75L), +tole(0xf300e948L), tole(0xf2c2837fL), tole(0xf0843d26L), tole(0xf1465711L), +tole(0xf4094194L), tole(0xf5cb2ba3L), tole(0xf78d95faL), tole(0xf64fffcdL), +tole(0xd9785d60L), tole(0xd8ba3757L), tole(0xdafc890eL), tole(0xdb3ee339L), +tole(0xde71f5bcL), tole(0xdfb39f8bL), tole(0xddf521d2L), tole(0xdc374be5L), +tole(0xd76b0cd8L), tole(0xd6a966efL), tole(0xd4efd8b6L), tole(0xd52db281L), +tole(0xd062a404L), tole(0xd1a0ce33L), tole(0xd3e6706aL), tole(0xd2241a5dL), +tole(0xc55efe10L), tole(0xc49c9427L), tole(0xc6da2a7eL), tole(0xc7184049L), +tole(0xc25756ccL), tole(0xc3953cfbL), tole(0xc1d382a2L), tole(0xc011e895L), +tole(0xcb4dafa8L), tole(0xca8fc59fL), tole(0xc8c97bc6L), tole(0xc90b11f1L), +tole(0xcc440774L), tole(0xcd866d43L), tole(0xcfc0d31aL), tole(0xce02b92dL), +tole(0x91af9640L), tole(0x906dfc77L), tole(0x922b422eL), tole(0x93e92819L), +tole(0x96a63e9cL), tole(0x976454abL), tole(0x9522eaf2L), tole(0x94e080c5L), +tole(0x9fbcc7f8L), tole(0x9e7eadcfL), tole(0x9c381396L), tole(0x9dfa79a1L), +tole(0x98b56f24L), tole(0x99770513L), tole(0x9b31bb4aL), tole(0x9af3d17dL), +tole(0x8d893530L), tole(0x8c4b5f07L), tole(0x8e0de15eL), tole(0x8fcf8b69L), +tole(0x8a809decL), tole(0x8b42f7dbL), tole(0x89044982L), tole(0x88c623b5L), +tole(0x839a6488L), tole(0x82580ebfL), tole(0x801eb0e6L), tole(0x81dcdad1L), +tole(0x8493cc54L), tole(0x8551a663L), tole(0x8717183aL), tole(0x86d5720dL), +tole(0xa9e2d0a0L), tole(0xa820ba97L), tole(0xaa6604ceL), tole(0xaba46ef9L), +tole(0xaeeb787cL), tole(0xaf29124bL), tole(0xad6fac12L), tole(0xacadc625L), +tole(0xa7f18118L), tole(0xa633eb2fL), tole(0xa4755576L), tole(0xa5b73f41L), +tole(0xa0f829c4L), tole(0xa13a43f3L), tole(0xa37cfdaaL), tole(0xa2be979dL), +tole(0xb5c473d0L), tole(0xb40619e7L), tole(0xb640a7beL), tole(0xb782cd89L), +tole(0xb2cddb0cL), tole(0xb30fb13bL), tole(0xb1490f62L), tole(0xb08b6555L), +tole(0xbbd72268L), tole(0xba15485fL), tole(0xb853f606L), tole(0xb9919c31L), +tole(0xbcde8ab4L), tole(0xbd1ce083L), tole(0xbf5a5edaL), tole(0xbe9834edL)}, +{ +tole(0x00000000L), tole(0xb8bc6765L), tole(0xaa09c88bL), tole(0x12b5afeeL), +tole(0x8f629757L), tole(0x37def032L), tole(0x256b5fdcL), tole(0x9dd738b9L), +tole(0xc5b428efL), tole(0x7d084f8aL), tole(0x6fbde064L), tole(0xd7018701L), +tole(0x4ad6bfb8L), tole(0xf26ad8ddL), tole(0xe0df7733L), tole(0x58631056L), +tole(0x5019579fL), tole(0xe8a530faL), tole(0xfa109f14L), tole(0x42acf871L), +tole(0xdf7bc0c8L), tole(0x67c7a7adL), tole(0x75720843L), tole(0xcdce6f26L), +tole(0x95ad7f70L), tole(0x2d111815L), tole(0x3fa4b7fbL), tole(0x8718d09eL), +tole(0x1acfe827L), tole(0xa2738f42L), tole(0xb0c620acL), tole(0x087a47c9L), +tole(0xa032af3eL), tole(0x188ec85bL), tole(0x0a3b67b5L), tole(0xb28700d0L), +tole(0x2f503869L), tole(0x97ec5f0cL), tole(0x8559f0e2L), tole(0x3de59787L), +tole(0x658687d1L), tole(0xdd3ae0b4L), tole(0xcf8f4f5aL), tole(0x7733283fL), +tole(0xeae41086L), tole(0x525877e3L), tole(0x40edd80dL), tole(0xf851bf68L), +tole(0xf02bf8a1L), tole(0x48979fc4L), tole(0x5a22302aL), tole(0xe29e574fL), +tole(0x7f496ff6L), tole(0xc7f50893L), tole(0xd540a77dL), tole(0x6dfcc018L), +tole(0x359fd04eL), tole(0x8d23b72bL), tole(0x9f9618c5L), tole(0x272a7fa0L), +tole(0xbafd4719L), tole(0x0241207cL), tole(0x10f48f92L), tole(0xa848e8f7L), +tole(0x9b14583dL), tole(0x23a83f58L), tole(0x311d90b6L), tole(0x89a1f7d3L), +tole(0x1476cf6aL), tole(0xaccaa80fL), tole(0xbe7f07e1L), tole(0x06c36084L), +tole(0x5ea070d2L), tole(0xe61c17b7L), tole(0xf4a9b859L), tole(0x4c15df3cL), +tole(0xd1c2e785L), tole(0x697e80e0L), tole(0x7bcb2f0eL), tole(0xc377486bL), +tole(0xcb0d0fa2L), tole(0x73b168c7L), tole(0x6104c729L), tole(0xd9b8a04cL), +tole(0x446f98f5L), tole(0xfcd3ff90L), tole(0xee66507eL), tole(0x56da371bL), +tole(0x0eb9274dL), tole(0xb6054028L), tole(0xa4b0efc6L), tole(0x1c0c88a3L), +tole(0x81dbb01aL), tole(0x3967d77fL), tole(0x2bd27891L), tole(0x936e1ff4L), +tole(0x3b26f703L), tole(0x839a9066L), tole(0x912f3f88L), tole(0x299358edL), +tole(0xb4446054L), tole(0x0cf80731L), tole(0x1e4da8dfL), tole(0xa6f1cfbaL), +tole(0xfe92dfecL), tole(0x462eb889L), tole(0x549b1767L), tole(0xec277002L), +tole(0x71f048bbL), tole(0xc94c2fdeL), tole(0xdbf98030L), tole(0x6345e755L), +tole(0x6b3fa09cL), tole(0xd383c7f9L), tole(0xc1366817L), tole(0x798a0f72L), +tole(0xe45d37cbL), tole(0x5ce150aeL), tole(0x4e54ff40L), tole(0xf6e89825L), +tole(0xae8b8873L), tole(0x1637ef16L), tole(0x048240f8L), tole(0xbc3e279dL), +tole(0x21e91f24L), tole(0x99557841L), tole(0x8be0d7afL), tole(0x335cb0caL), +tole(0xed59b63bL), tole(0x55e5d15eL), tole(0x47507eb0L), tole(0xffec19d5L), +tole(0x623b216cL), tole(0xda874609L), tole(0xc832e9e7L), tole(0x708e8e82L), +tole(0x28ed9ed4L), tole(0x9051f9b1L), tole(0x82e4565fL), tole(0x3a58313aL), +tole(0xa78f0983L), tole(0x1f336ee6L), tole(0x0d86c108L), tole(0xb53aa66dL), +tole(0xbd40e1a4L), tole(0x05fc86c1L), tole(0x1749292fL), tole(0xaff54e4aL), +tole(0x322276f3L), tole(0x8a9e1196L), tole(0x982bbe78L), tole(0x2097d91dL), +tole(0x78f4c94bL), tole(0xc048ae2eL), tole(0xd2fd01c0L), tole(0x6a4166a5L), +tole(0xf7965e1cL), tole(0x4f2a3979L), tole(0x5d9f9697L), tole(0xe523f1f2L), +tole(0x4d6b1905L), tole(0xf5d77e60L), tole(0xe762d18eL), tole(0x5fdeb6ebL), +tole(0xc2098e52L), tole(0x7ab5e937L), tole(0x680046d9L), tole(0xd0bc21bcL), +tole(0x88df31eaL), tole(0x3063568fL), tole(0x22d6f961L), tole(0x9a6a9e04L), +tole(0x07bda6bdL), tole(0xbf01c1d8L), tole(0xadb46e36L), tole(0x15080953L), +tole(0x1d724e9aL), tole(0xa5ce29ffL), tole(0xb77b8611L), tole(0x0fc7e174L), +tole(0x9210d9cdL), tole(0x2aacbea8L), tole(0x38191146L), tole(0x80a57623L), +tole(0xd8c66675L), tole(0x607a0110L), tole(0x72cfaefeL), tole(0xca73c99bL), +tole(0x57a4f122L), tole(0xef189647L), tole(0xfdad39a9L), tole(0x45115eccL), +tole(0x764dee06L), tole(0xcef18963L), tole(0xdc44268dL), tole(0x64f841e8L), +tole(0xf92f7951L), tole(0x41931e34L), tole(0x5326b1daL), tole(0xeb9ad6bfL), +tole(0xb3f9c6e9L), tole(0x0b45a18cL), tole(0x19f00e62L), tole(0xa14c6907L), +tole(0x3c9b51beL), tole(0x842736dbL), tole(0x96929935L), tole(0x2e2efe50L), +tole(0x2654b999L), tole(0x9ee8defcL), tole(0x8c5d7112L), tole(0x34e11677L), +tole(0xa9362eceL), tole(0x118a49abL), tole(0x033fe645L), tole(0xbb838120L), +tole(0xe3e09176L), tole(0x5b5cf613L), tole(0x49e959fdL), tole(0xf1553e98L), +tole(0x6c820621L), tole(0xd43e6144L), tole(0xc68bceaaL), tole(0x7e37a9cfL), +tole(0xd67f4138L), tole(0x6ec3265dL), tole(0x7c7689b3L), tole(0xc4caeed6L), +tole(0x591dd66fL), tole(0xe1a1b10aL), tole(0xf3141ee4L), tole(0x4ba87981L), +tole(0x13cb69d7L), tole(0xab770eb2L), tole(0xb9c2a15cL), tole(0x017ec639L), +tole(0x9ca9fe80L), tole(0x241599e5L), tole(0x36a0360bL), tole(0x8e1c516eL), +tole(0x866616a7L), tole(0x3eda71c2L), tole(0x2c6fde2cL), tole(0x94d3b949L), +tole(0x090481f0L), tole(0xb1b8e695L), tole(0xa30d497bL), tole(0x1bb12e1eL), +tole(0x43d23e48L), tole(0xfb6e592dL), tole(0xe9dbf6c3L), tole(0x516791a6L), +tole(0xccb0a91fL), tole(0x740cce7aL), tole(0x66b96194L), tole(0xde0506f1L)}, +}; + +/* + * Calculate the PNG 32 bit CRC on a buffer + */ +uint32_t bcrc32(unsigned char*buf, int len) +{ +# if defined(HAVE_LITTLE_ENDIAN) +# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255 ] ^ (crc >> 8) +# define DO_CRC4 crc = tab[3][(crc) & 255 ] ^ \ + tab[2][(crc >> 8) & 255 ] ^ \ + tab[1][(crc >> 16) & 255 ] ^ \ + tab[0][(crc >> 24) & 255 ] +# else +# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8) +# define DO_CRC4 crc = tab[0][(crc) & 255 ] ^ \ + tab[1][(crc >> 8) & 255 ] ^ \ + tab[2][(crc >> 16) & 255 ] ^ \ + tab[3][(crc >> 24) & 255 ] +# endif + const uint32_t *b; + size_t rem_len; + uint32_t crc = tole(~0); + + /* Align it */ + if ((intptr_t)buf & 3 && len) { + do { + DO_CRC(*buf++); + } while ((--len) && ((intptr_t)buf)&3); + } + rem_len = len & 3; + /* load data 32 bits wide, xor data 32 bits wide. */ + b = (const uint32_t *)buf; + len = len >> 2; + for (--b; len; --len) { + crc ^= *++b; /* use pre increment for speed */ + DO_CRC4; + } + len = rem_len; + /* And the last few bytes */ + if (len) { + uint8_t *p = (uint8_t *)(b + 1) - 1; + do { + DO_CRC(*++p); /* use pre increment for speed */ + } while (--len); + } + return tole(crc) ^ ~0; +} + + +#ifdef CRC32_SUM + +static void usage() +{ + fprintf(stderr, +"\n" +"Usage: crc32 \n" +" -? print this message.\n" +"\n\n"); + + exit(1); +} + +/* + * Reads a single ASCII file and prints the HEX md5 sum. + */ +#include +int main(int argc, char *argv[]) +{ + FILE *fd; + char buf[5000]; + int ch; + + while ((ch = getopt(argc, argv, "h?")) != -1) { + switch (ch) { + case 'h': + case '?': + default: + usage(); + } + } + + argc -= optind; + argv += optind; + + if (argc < 1) { + printf("Must have filename\n"); + exit(1); + } + + fd = fopen(argv[0], "rb"); + if (!fd) { + printf("Could not open %s: ERR=%s\n", argv[0], strerror(errno)); + exit(1); + } + uint32_t res; + while (fgets(buf, sizeof(buf), fd)) { + res = bcrc32((unsigned char *)buf, strlen(buf)); + printf("%02x\n", res); + } + printf(" %s\n", argv[0]); + fclose(fd); +} +#endif + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +#ifdef TEST_PROGRAM +#include "unittests.h" + +static const unsigned char rnddata[512] = { + 0xa5, 0x7d, 0xa3, 0xc4, 0x2c, 0xa0, 0x08, 0xe9, 0x32, 0xb9, 0xc7, 0x84, 0xf6, 0xd3, 0xdf, 0x4f, + 0xec, 0xb4, 0x6d, 0x66, 0x19, 0x98, 0x58, 0x26, 0x7d, 0xc8, 0x82, 0xfb, 0x03, 0x76, 0x6f, 0x9e, + 0xbc, 0x51, 0xda, 0xe7, 0xf8, 0xca, 0x70, 0x4b, 0xcb, 0x9a, 0xad, 0x64, 0x76, 0x8c, 0xe2, 0x00, + 0x8c, 0xb0, 0x58, 0x16, 0x2b, 0xe2, 0x19, 0x8c, 0xd1, 0xa2, 0x19, 0x4a, 0x6a, 0x40, 0x17, 0x3e, + 0x0b, 0x59, 0x5f, 0x4c, 0x23, 0x12, 0x00, 0xea, 0xbc, 0x58, 0xa3, 0x25, 0xe1, 0x8e, 0x01, 0x91, + 0x9b, 0x44, 0x21, 0x8b, 0x24, 0x7c, 0x3a, 0x34, 0xce, 0xd9, 0x51, 0xbc, 0x62, 0x28, 0x18, 0x76, + 0x32, 0x89, 0x14, 0xae, 0x7c, 0x17, 0xae, 0xb4, 0xb7, 0xb0, 0x5e, 0xb4, 0x56, 0x01, 0x01, 0x5c, + 0x23, 0x03, 0x0f, 0x30, 0xa1, 0x5b, 0xd5, 0xb7, 0xeb, 0x23, 0x74, 0xbd, 0xc6, 0x81, 0x7c, 0x11, + 0x84, 0x90, 0x04, 0x8c, 0xe5, 0xda, 0x46, 0x0f, 0x06, 0x51, 0x05, 0xb9, 0x41, 0x10, 0xce, 0xfb, + 0x21, 0x55, 0xb3, 0x78, 0xe9, 0x8c, 0x72, 0x14, 0xc4, 0x13, 0x5c, 0x09, 0xd7, 0x45, 0xb1, 0x1f, + 0xde, 0x13, 0x51, 0x5c, 0xe8, 0xb9, 0xfa, 0x5d, 0x0d, 0x46, 0x20, 0x8e, 0xbf, 0x41, 0x04, 0x14, + 0xa4, 0xf3, 0x71, 0x73, 0x62, 0x76, 0x1e, 0x19, 0xc4, 0xfe, 0x1a, 0xb4, 0xc9, 0xc1, 0xf3, 0x41, + 0xb9, 0xd9, 0x45, 0x4f, 0xc4, 0xe7, 0xb5, 0x2b, 0x31, 0x1c, 0x5e, 0xcc, 0xe4, 0xca, 0x5b, 0x81, + 0x6b, 0x97, 0xbd, 0x89, 0xf3, 0x3c, 0x76, 0xf0, 0x6c, 0x50, 0x62, 0x46, 0xbb, 0x0d, 0xbb, 0x5b, + 0x90, 0xaa, 0x25, 0x7b, 0x70, 0x2c, 0x37, 0xab, 0xba, 0x44, 0x5b, 0xa5, 0x15, 0x3e, 0xa6, 0x3d, + 0x5b, 0x7f, 0x1a, 0x36, 0xfb, 0x0d, 0x38, 0xe8, 0x42, 0x84, 0xf1, 0x74, 0xde, 0x7b, 0x17, 0x6f, + 0x68, 0x61, 0x6f, 0x93, 0xaa, 0xe2, 0xcc, 0x47, 0xfc, 0x95, 0x4d, 0x24, 0xc4, 0x26, 0x6c, 0x90, + 0xb3, 0x21, 0x16, 0x9f, 0x2c, 0x82, 0xb7, 0x99, 0x2f, 0x37, 0xb4, 0x5f, 0xad, 0x95, 0xec, 0x9e, + 0x84, 0xd8, 0xb9, 0x03, 0xc8, 0x61, 0x6e, 0xd5, 0xb6, 0x26, 0xb5, 0xa1, 0x08, 0x25, 0x15, 0xa3, + 0x0f, 0xc0, 0x0e, 0x53, 0x52, 0x7b, 0x4d, 0x96, 0xb2, 0x26, 0x8f, 0xe2, 0xc8, 0x35, 0xbe, 0xfa, + 0x18, 0x6a, 0xa1, 0x78, 0xc1, 0x5b, 0xf5, 0x2b, 0x25, 0xb7, 0x7a, 0xb5, 0x43, 0x08, 0x4d, 0xd3, + 0x3b, 0x71, 0xa4, 0x5e, 0xba, 0xba, 0x02, 0x67, 0x33, 0x11, 0x5b, 0xde, 0xa4, 0xc5, 0x11, 0x59, + 0xdf, 0xa1, 0x8c, 0x81, 0x51, 0x09, 0x87, 0xc4, 0x4e, 0xf4, 0x34, 0x3d, 0xcd, 0xd4, 0x04, 0x78, + 0xba, 0x3e, 0xc6, 0xdb, 0x5f, 0x9d, 0xac, 0x29, 0xa9, 0x7c, 0x7b, 0x59, 0x50, 0xe6, 0xc2, 0xb6, + 0xd8, 0x2d, 0xcb, 0x44, 0xf4, 0xf7, 0xb5, 0x64, 0xce, 0xd4, 0x8a, 0xd7, 0x28, 0x6b, 0xf8, 0x4c, + 0x8f, 0xc0, 0xcf, 0x20, 0xba, 0xf3, 0xe6, 0xe0, 0xd9, 0xca, 0x0f, 0x74, 0x84, 0xe8, 0x70, 0x25, + 0x0a, 0x8f, 0x2f, 0xc8, 0x7d, 0x06, 0x24, 0x38, 0x0d, 0x46, 0xc9, 0x00, 0x3a, 0xe3, 0x14, 0x88, + 0xfb, 0xde, 0x40, 0x21, 0xd9, 0x14, 0xab, 0x27, 0x3f, 0x35, 0x3b, 0x48, 0x8f, 0x47, 0x0c, 0x50, + 0x3f, 0x6c, 0x2d, 0x11, 0x24, 0xe2, 0xdf, 0x88, 0x1f, 0xcd, 0x00, 0x71, 0x62, 0x33, 0x5a, 0x0c, + 0xaf, 0x8c, 0x8c, 0xa1, 0xce, 0xd5, 0x74, 0x94, 0x12, 0xa4, 0xad, 0x59, 0xc3, 0x17, 0xad, 0x11, + 0x2b, 0x2c, 0x56, 0x8c, 0xe3, 0xbe, 0xda, 0x34, 0xbf, 0xf2, 0x1c, 0xf9, 0x5c, 0xb9, 0x24, 0x7d, + 0x74, 0xac, 0x54, 0x41, 0xce, 0x20, 0x13, 0xeb, 0xcd, 0xf5, 0xf0, 0x41, 0x66, 0x4e, 0x10, 0x2f, +}; + +/* These are a CRC32 of rnddata computed by external utility */ +const uint32_t res128 = 0x3D7E7BB7; +const uint32_t res128of = 0xAD78C1B6; +const uint32_t res256 = 0x86CA2622; +const uint32_t res256of = 0x0486BB38; +const uint32_t res512 = 0xE727C7DA; + +int main() +{ + Unittests crc32_test("crc32_test"); + uint32_t res; + + res = bcrc32((unsigned char *)&rnddata, 128); + // printf ("R: %X\n", res); + ok(res == res128, "Check CRC32 on 128B random data"); + res = bcrc32((unsigned char *)&rnddata, 256); + // printf ("R: %X\n", res); + ok(res == res256, "Check CRC32 on 256B random data"); + res = bcrc32((unsigned char *)&rnddata, 512); + // printf ("R: %X\n", res); + ok(res == res512, "Check CRC32 on 512B random data"); + res = bcrc32((unsigned char *)&rnddata[128], 128); + // printf ("R: %X\n", res); + ok(res == res128of, "Check CRC32 on 128B random data at offset"); + res = bcrc32((unsigned char *)&rnddata[256], 256); + // printf ("R: %X\n", res); + ok(res == res256of, "Check CRC32 on 256B random data at offset"); + + return report(); +}; +#endif /* TEST_PROGRAM */ \ No newline at end of file diff --git a/src/lib/crypto.c b/src/lib/crypto.c new file mode 100644 index 00000000..c8e9b2a3 --- /dev/null +++ b/src/lib/crypto.c @@ -0,0 +1,1585 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * crypto.c Encryption support functions + * + * Author: Landon Fuller + * + * This file was contributed to the Bacula project by Landon Fuller. + * + * Landon Fuller has been granted a perpetual, worldwide, non-exclusive, + * no-charge, royalty-free, irrevocable copyright license to reproduce, + * prepare derivative works of, publicly display, publicly perform, + * sublicense, and distribute the original work contributed by Landon Fuller + * to the Bacula project in source or object form. + * + * If you wish to license these contributions under an alternate open source + * license please contact Landon Fuller . + */ + + +#include "bacula.h" +#include "jcr.h" +#include + +/** + * For OpenSSL version 1.x, EVP_PKEY_encrypt no longer + * exists. It was not an official API. + */ +#ifdef HAVE_OPENSSLv1 +#define EVP_PKEY_encrypt EVP_PKEY_encrypt_old +#define EVP_PKEY_decrypt EVP_PKEY_decrypt_old +#endif + +/* + * Bacula ASN.1 Syntax + * + * OID Allocation: + * Prefix: iso.org.dod.internet.private.enterprise.threerings.external.bacula (1.3.6.1.4.1.22054.500.2) + * Organization: Bacula Project + * Contact Name: Kern Sibbald + * Contact E-mail: kern@sibbald.com + * + * Top Level Allocations - 500.2 + * 1 - Published Allocations + * 1.1 - Bacula Encryption + * + * Bacula Encryption - 500.2.1.1 + * 1 - ASN.1 Modules + * 1.1 - BaculaCrypto + * 2 - ASN.1 Object Identifiers + * 2.1 - SignatureData + * 2.2 - SignerInfo + * 2.3 - CryptoData + * 2.4 - RecipientInfo + * + * BaculaCrypto { iso(1) identified-organization(3) usdod(6) + * internet(1) private(4) enterprises(1) three-rings(22054) + * external(500) bacula(2) published(1) bacula-encryption(1) + * asn1-modules(1) bacula-crypto(1) } + * + * DEFINITIONS AUTOMATIC TAGS ::= + * BEGIN + * + * SignatureData ::= SEQUENCE { + * version Version DEFAULT v0, + * signerInfo SignerInfo } + * + * CryptoData ::= SEQUENCE { + * version Version DEFAULT v0, + * contentEncryptionAlgorithm ContentEncryptionAlgorithmIdentifier, + * iv InitializationVector, + * recipientInfo RecipientInfo + * } + * + * SignerInfo ::= SET OF SignerInfo + * RecipientInfo ::= SET OF RecipientInfo + * + * Version ::= INTEGER { v0(0) } + * + * SignerInfo ::= SEQUENCE { + * version Version, + * subjectKeyIdentifier SubjectKeyIdentifier, + * digestAlgorithm DigestAlgorithmIdentifier, + * signatureAlgorithm SignatureAlgorithmIdentifier, + * signature SignatureValue } + * + * RecipientInfo ::= SEQUENCE { + * version Version + * subjectKeyIdentifier SubjectKeyIdentifier + * keyEncryptionAlgorithm KeyEncryptionAlgorithmIdentifier + * encryptedKey EncryptedKey + * } + * + * SubjectKeyIdentifier ::= OCTET STRING + * + * DigestAlgorithmIdentifier ::= AlgorithmIdentifier + * + * SignatureAlgorithmIdentifier ::= AlgorithmIdentifier + * + * KeyEncryptionAlgorithmIdentifier ::= AlgorithmIdentifier + * + * ContentEncryptionAlgorithmIdentifier ::= AlgorithmIdentifier + * + * InitializationVector ::= OCTET STRING + * + * SignatureValue ::= OCTET STRING + * + * EncryptedKey ::= OCTET STRING + * + * AlgorithmIdentifier ::= OBJECT IDENTIFIER + * + * END + */ + +#ifdef HAVE_CRYPTO /* Is encryption enabled? */ +#ifdef HAVE_OPENSSL /* How about OpenSSL? */ + +#include "openssl-compat.h" + +/* ASN.1 Declarations */ +#define BACULA_ASN1_VERSION 0 + +typedef struct { + ASN1_INTEGER *version; + ASN1_OCTET_STRING *subjectKeyIdentifier; + ASN1_OBJECT *digestAlgorithm; + ASN1_OBJECT *signatureAlgorithm; + ASN1_OCTET_STRING *signature; +} SignerInfo; + +typedef struct { + ASN1_INTEGER *version; + ASN1_OCTET_STRING *subjectKeyIdentifier; + ASN1_OBJECT *keyEncryptionAlgorithm; + ASN1_OCTET_STRING *encryptedKey; +} RecipientInfo; + +ASN1_SEQUENCE(SignerInfo) = { + ASN1_SIMPLE(SignerInfo, version, ASN1_INTEGER), + ASN1_SIMPLE(SignerInfo, subjectKeyIdentifier, ASN1_OCTET_STRING), + ASN1_SIMPLE(SignerInfo, digestAlgorithm, ASN1_OBJECT), + ASN1_SIMPLE(SignerInfo, signatureAlgorithm, ASN1_OBJECT), + ASN1_SIMPLE(SignerInfo, signature, ASN1_OCTET_STRING) +} ASN1_SEQUENCE_END(SignerInfo); + +ASN1_SEQUENCE(RecipientInfo) = { + ASN1_SIMPLE(RecipientInfo, version, ASN1_INTEGER), + ASN1_SIMPLE(RecipientInfo, subjectKeyIdentifier, ASN1_OCTET_STRING), + ASN1_SIMPLE(RecipientInfo, keyEncryptionAlgorithm, ASN1_OBJECT), + ASN1_SIMPLE(RecipientInfo, encryptedKey, ASN1_OCTET_STRING), +} ASN1_SEQUENCE_END(RecipientInfo); + +typedef struct { + ASN1_INTEGER *version; + STACK_OF(SignerInfo) *signerInfo; +} SignatureData; + +typedef struct { + ASN1_INTEGER *version; + ASN1_OBJECT *contentEncryptionAlgorithm; + ASN1_OCTET_STRING *iv; + STACK_OF(RecipientInfo) *recipientInfo; +} CryptoData; + +ASN1_SEQUENCE(SignatureData) = { + ASN1_SIMPLE(SignatureData, version, ASN1_INTEGER), + ASN1_SET_OF(SignatureData, signerInfo, SignerInfo), +} ASN1_SEQUENCE_END(SignatureData); + +ASN1_SEQUENCE(CryptoData) = { + ASN1_SIMPLE(CryptoData, version, ASN1_INTEGER), + ASN1_SIMPLE(CryptoData, contentEncryptionAlgorithm, ASN1_OBJECT), + ASN1_SIMPLE(CryptoData, iv, ASN1_OCTET_STRING), + ASN1_SET_OF(CryptoData, recipientInfo, RecipientInfo) +} ASN1_SEQUENCE_END(CryptoData); + +IMPLEMENT_ASN1_FUNCTIONS(SignerInfo) +IMPLEMENT_ASN1_FUNCTIONS(RecipientInfo) +IMPLEMENT_ASN1_FUNCTIONS(SignatureData) +IMPLEMENT_ASN1_FUNCTIONS(CryptoData) + +#if defined(DEFINE_STACK_OF) +DEFINE_STACK_OF(SignerInfo); +DEFINE_STACK_OF(RecipientInfo); +#else +/* + * SignerInfo and RecipientInfo stack macros, generated by OpenSSL's util/mkstack.pl. + */ +#define sk_SignerInfo_new(st) SKM_sk_new(SignerInfo, (st)) +#define sk_SignerInfo_new_null() SKM_sk_new_null(SignerInfo) +#define sk_SignerInfo_free(st) SKM_sk_free(SignerInfo, (st)) +#define sk_SignerInfo_num(st) SKM_sk_num(SignerInfo, (st)) +#define sk_SignerInfo_value(st, i) SKM_sk_value(SignerInfo, (st), (i)) +#define sk_SignerInfo_set(st, i, val) SKM_sk_set(SignerInfo, (st), (i), (val)) +#define sk_SignerInfo_zero(st) SKM_sk_zero(SignerInfo, (st)) +#define sk_SignerInfo_push(st, val) SKM_sk_push(SignerInfo, (st), (val)) +#define sk_SignerInfo_unshift(st, val) SKM_sk_unshift(SignerInfo, (st), (val)) +#define sk_SignerInfo_find(st, val) SKM_sk_find(SignerInfo, (st), (val)) +#define sk_SignerInfo_delete(st, i) SKM_sk_delete(SignerInfo, (st), (i)) +#define sk_SignerInfo_delete_ptr(st, ptr) SKM_sk_delete_ptr(SignerInfo, (st), (ptr)) +#define sk_SignerInfo_insert(st, val, i) SKM_sk_insert(SignerInfo, (st), (val), (i)) +#define sk_SignerInfo_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(SignerInfo, (st), (cmp)) +#define sk_SignerInfo_dup(st) SKM_sk_dup(SignerInfo, st) +#define sk_SignerInfo_pop_free(st, free_func) SKM_sk_pop_free(SignerInfo, (st), (free_func)) +#define sk_SignerInfo_shift(st) SKM_sk_shift(SignerInfo, (st)) +#define sk_SignerInfo_pop(st) SKM_sk_pop(SignerInfo, (st)) +#define sk_SignerInfo_sort(st) SKM_sk_sort(SignerInfo, (st)) +#define sk_SignerInfo_is_sorted(st) SKM_sk_is_sorted(SignerInfo, (st)) + +#define d2i_ASN1_SET_OF_SignerInfo(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \ + SKM_ASN1_SET_OF_d2i(SignerInfo, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) +#define i2d_ASN1_SET_OF_SignerInfo(st, pp, i2d_func, ex_tag, ex_class, is_set) \ + SKM_ASN1_SET_OF_i2d(SignerInfo, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set)) +#define ASN1_seq_pack_SignerInfo(st, i2d_func, buf, len) \ + SKM_ASN1_seq_pack(SignerInfo, (st), (i2d_func), (buf), (len)) +#define ASN1_seq_unpack_SignerInfo(buf, len, d2i_func, free_func) \ + SKM_ASN1_seq_unpack(SignerInfo, (buf), (len), (d2i_func), (free_func)) + +#define sk_RecipientInfo_new(st) SKM_sk_new(RecipientInfo, (st)) +#define sk_RecipientInfo_new_null() SKM_sk_new_null(RecipientInfo) +#define sk_RecipientInfo_free(st) SKM_sk_free(RecipientInfo, (st)) +#define sk_RecipientInfo_num(st) SKM_sk_num(RecipientInfo, (st)) +#define sk_RecipientInfo_value(st, i) SKM_sk_value(RecipientInfo, (st), (i)) +#define sk_RecipientInfo_set(st, i, val) SKM_sk_set(RecipientInfo, (st), (i), (val)) +#define sk_RecipientInfo_zero(st) SKM_sk_zero(RecipientInfo, (st)) +#define sk_RecipientInfo_push(st, val) SKM_sk_push(RecipientInfo, (st), (val)) +#define sk_RecipientInfo_unshift(st, val) SKM_sk_unshift(RecipientInfo, (st), (val)) +#define sk_RecipientInfo_find(st, val) SKM_sk_find(RecipientInfo, (st), (val)) +#define sk_RecipientInfo_delete(st, i) SKM_sk_delete(RecipientInfo, (st), (i)) +#define sk_RecipientInfo_delete_ptr(st, ptr) SKM_sk_delete_ptr(RecipientInfo, (st), (ptr)) +#define sk_RecipientInfo_insert(st, val, i) SKM_sk_insert(RecipientInfo, (st), (val), (i)) +#define sk_RecipientInfo_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(RecipientInfo, (st), (cmp)) +#define sk_RecipientInfo_dup(st) SKM_sk_dup(RecipientInfo, st) +#define sk_RecipientInfo_pop_free(st, free_func) SKM_sk_pop_free(RecipientInfo, (st), (free_func)) +#define sk_RecipientInfo_shift(st) SKM_sk_shift(RecipientInfo, (st)) +#define sk_RecipientInfo_pop(st) SKM_sk_pop(RecipientInfo, (st)) +#define sk_RecipientInfo_sort(st) SKM_sk_sort(RecipientInfo, (st)) +#define sk_RecipientInfo_is_sorted(st) SKM_sk_is_sorted(RecipientInfo, (st)) + +#define d2i_ASN1_SET_OF_RecipientInfo(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \ + SKM_ASN1_SET_OF_d2i(RecipientInfo, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) +#define i2d_ASN1_SET_OF_RecipientInfo(st, pp, i2d_func, ex_tag, ex_class, is_set) \ + SKM_ASN1_SET_OF_i2d(RecipientInfo, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set)) +#define ASN1_seq_pack_RecipientInfo(st, i2d_func, buf, len) \ + SKM_ASN1_seq_pack(RecipientInfo, (st), (i2d_func), (buf), (len)) +#define ASN1_seq_unpack_RecipientInfo(buf, len, d2i_func, free_func) \ + SKM_ASN1_seq_unpack(RecipientInfo, (buf), (len), (d2i_func), (free_func)) +/* End of util/mkstack.pl block */ +#endif + +/* X509 Public/Private Key Pair Structure */ +struct X509_Keypair { + ASN1_OCTET_STRING *keyid; + EVP_PKEY *pubkey; + EVP_PKEY *privkey; +}; + +/* Message Digest Structure */ +struct Digest { + crypto_digest_t type; + JCR *jcr; + EVP_MD_CTX *ctx; +}; + +/* Message Signature Structure */ +struct Signature { + SignatureData *sigData; + JCR *jcr; +}; + +/* Encryption Session Data */ +struct Crypto_Session { + CryptoData *cryptoData; /* ASN.1 Structure */ + unsigned char *session_key; /* Private symmetric session key */ + size_t session_key_len; /* Symmetric session key length */ +}; + +/* Symmetric Cipher Context */ +struct Cipher_Context { + EVP_CIPHER_CTX *ctx; +}; + +/* PEM Password Dispatch Context */ +typedef struct PEM_CB_Context { + CRYPTO_PEM_PASSWD_CB *pem_callback; + const void *pem_userdata; +} PEM_CB_CONTEXT; + +/* + * Extract subjectKeyIdentifier from x509 certificate. + * Returns: On success, an ASN1_OCTET_STRING that must be freed via ASN1_OCTET_STRING_free(). + * NULL on failure. + */ +static ASN1_OCTET_STRING *openssl_cert_keyid(X509 *cert) { + X509_EXTENSION *ext; + const X509V3_EXT_METHOD *method; + ASN1_OCTET_STRING *keyid; + int i; + const ASN1_STRING *asn1_ext_val; + const unsigned char *ext_value_data; + + /* Find the index to the subjectKeyIdentifier extension */ + i = X509_get_ext_by_NID(cert, NID_subject_key_identifier, -1); + if (i < 0) { + /* Not found */ + return NULL; + } + + /* Grab the extension */ + ext = X509_get_ext(cert, i); + + /* Get x509 extension method structure */ + if (!(method = X509V3_EXT_get(ext))) { + return NULL; + } + + asn1_ext_val = X509_EXTENSION_get_data(ext); + ext_value_data = ASN1_STRING_get0_data(asn1_ext_val); + + if (method->it) { + /* New style ASN1 */ + + /* Decode ASN1 item in data */ + keyid = (ASN1_OCTET_STRING *) ASN1_item_d2i(NULL, &ext_value_data, ASN1_STRING_length(asn1_ext_val), + ASN1_ITEM_ptr(method->it)); + } else { + /* Old style ASN1 */ + + /* Decode ASN1 item in data */ + keyid = (ASN1_OCTET_STRING *) method->d2i(NULL, &ext_value_data, ASN1_STRING_length(asn1_ext_val)); + } + + return keyid; +} + +/* + * Create a new keypair object. + * Returns: A pointer to a X509 KEYPAIR object on success. + * NULL on failure. + */ +X509_KEYPAIR *crypto_keypair_new(void) +{ + X509_KEYPAIR *keypair; + + /* Allocate our keypair structure */ + keypair = (X509_KEYPAIR *)malloc(sizeof(X509_KEYPAIR)); + + /* Initialize our keypair structure */ + keypair->keyid = NULL; + keypair->pubkey = NULL; + keypair->privkey = NULL; + + return keypair; +} + +/* + * Create a copy of a keypair object. The underlying + * EVP objects are not duplicated, as no EVP_PKEY_dup() + * API is available. Instead, the reference count is + * incremented. + */ +X509_KEYPAIR *crypto_keypair_dup(X509_KEYPAIR *keypair) +{ + X509_KEYPAIR *newpair; + int ret; + + newpair = crypto_keypair_new(); + + if (!newpair) { + /* Allocation failed */ + return NULL; + } + + /* Increment the public key ref count */ + if (keypair->pubkey) { + ret = EVP_PKEY_up_ref(keypair->pubkey); + if (ret == 0) + goto out_free_new; + newpair->pubkey = keypair->pubkey; + } + + /* Increment the private key ref count */ + if (keypair->privkey) { + ret = EVP_PKEY_up_ref(keypair->privkey); + if (ret == 0) + goto out_free_new; + newpair->privkey = keypair->privkey; + } + + /* Duplicate the keyid */ + if (keypair->keyid) { + newpair->keyid = ASN1_OCTET_STRING_dup(keypair->keyid); + if (!newpair->keyid) + goto out_free_new; + } + + return newpair; + +out_free_new: + crypto_keypair_free(newpair); + return NULL; +} + + +/* + * Load a public key from a PEM-encoded x509 certificate. + * Returns: true on success + * false on failure + */ +int crypto_keypair_load_cert(X509_KEYPAIR *keypair, const char *file) +{ + BIO *bio; + X509 *cert; + + /* Open the file */ + if (!(bio = BIO_new_file(file, "r"))) { + openssl_post_errors(M_ERROR, _("Unable to open certificate file")); + return false; + } + + cert = PEM_read_bio_X509(bio, NULL, NULL, NULL); + BIO_free(bio); + if (!cert) { + openssl_post_errors(M_ERROR, _("Unable to read certificate from file")); + return false; + } + + /* Extract the public key */ + if (!(keypair->pubkey = X509_get_pubkey(cert))) { + openssl_post_errors(M_ERROR, _("Unable to extract public key from certificate")); + goto err; + } + + /* Extract the subjectKeyIdentifier extension field */ + if ((keypair->keyid = openssl_cert_keyid(cert)) == NULL) { + Jmsg0(NULL, M_ERROR, 0, + _("Provided certificate does not include the required subjectKeyIdentifier extension.")); + goto err; + } + + /* Validate the public key type (only RSA is supported) */ + if (EVP_PKEY_base_id(keypair->pubkey) != EVP_PKEY_RSA) { + Jmsg1(NULL, M_ERROR, 0, + _("Unsupported key type provided: %d\n"), EVP_PKEY_id(keypair->pubkey)); + goto err; + } + + X509_free(cert); + return true; + +err: + X509_free(cert); + if (keypair->pubkey) { + EVP_PKEY_free(keypair->pubkey); + } + return false; +} + +/* Dispatch user PEM encryption callbacks */ +static int crypto_pem_callback_dispatch (char *buf, int size, int rwflag, void *userdata) +{ + PEM_CB_CONTEXT *ctx = (PEM_CB_CONTEXT *) userdata; + return (ctx->pem_callback(buf, size, ctx->pem_userdata)); +} + +/* + * Check a PEM-encoded file + * for the existence of a private key. + * Returns: true if a private key is found + * false otherwise + */ +bool crypto_keypair_has_key(const char *file) { + BIO *bio; + char *name = NULL; + char *header = NULL; + unsigned char *data = NULL; + bool retval = false; + long len; + + if (!(bio = BIO_new_file(file, "r"))) { + openssl_post_errors(M_ERROR, _("Unable to open private key file")); + return false; + } + + while (PEM_read_bio(bio, &name, &header, &data, &len)) { + /* We don't care what the data is, just that it's there */ + OPENSSL_free(header); + OPENSSL_free(data); + + /* + * PEM Header Found, check for a private key + * Due to OpenSSL limitations, we must specifically + * list supported PEM private key encodings. + */ + if (strcmp(name, PEM_STRING_RSA) == 0 + || strcmp(name, PEM_STRING_DSA) == 0 + || strcmp(name, PEM_STRING_PKCS8) == 0 + || strcmp(name, PEM_STRING_PKCS8INF) == 0) { + retval = true; + OPENSSL_free(name); + break; + } else { + OPENSSL_free(name); + } + } + + /* Free our bio */ + BIO_free(bio); + + /* Post PEM-decoding error messages, if any */ + openssl_post_errors(M_ERROR, _("Unable to read private key from file")); + return retval; +} + +/* + * Load a PEM-encoded private key. + * Returns: true on success + * false on failure + */ +int crypto_keypair_load_key(X509_KEYPAIR *keypair, const char *file, + CRYPTO_PEM_PASSWD_CB *pem_callback, + const void *pem_userdata) +{ + BIO *bio; + PEM_CB_CONTEXT ctx; + + /* Open the file */ + if (!(bio = BIO_new_file(file, "r"))) { + openssl_post_errors(M_ERROR, _("Unable to open private key file")); + return false; + } + + /* Set up PEM encryption callback */ + if (pem_callback) { + ctx.pem_callback = pem_callback; + ctx.pem_userdata = pem_userdata; + } else { + ctx.pem_callback = crypto_default_pem_callback; + ctx.pem_userdata = NULL; + } + + keypair->privkey = PEM_read_bio_PrivateKey(bio, NULL, crypto_pem_callback_dispatch, &ctx); + BIO_free(bio); + if (!keypair->privkey) { + openssl_post_errors(M_ERROR, _("Unable to read private key from file")); + return false; + } + + return true; +} + +/* + * Free memory associated with a keypair object. + */ +void crypto_keypair_free(X509_KEYPAIR *keypair) +{ + if (keypair->pubkey) { + EVP_PKEY_free(keypair->pubkey); + } + if (keypair->privkey) { + EVP_PKEY_free(keypair->privkey); + } + if (keypair->keyid) { + ASN1_OCTET_STRING_free(keypair->keyid); + } + free(keypair); +} + +/* + * Create a new message digest context of the specified type + * Returns: A pointer to a DIGEST object on success. + * NULL on failure. + */ +DIGEST *crypto_digest_new(JCR *jcr, crypto_digest_t type) +{ + DIGEST *digest; + const EVP_MD *md = NULL; /* Quell invalid uninitialized warnings */ + + digest = (DIGEST *)malloc(sizeof(DIGEST)); + digest->type = type; + digest->jcr = jcr; + Dmsg1(150, "crypto_digest_new jcr=%p\n", jcr); + + /* Initialize the OpenSSL message digest context */ + digest->ctx = EVP_MD_CTX_new(); + if (!digest->ctx) + goto err; + EVP_MD_CTX_reset(digest->ctx); + + /* Determine the correct OpenSSL message digest type */ + switch (type) { + case CRYPTO_DIGEST_MD5: + md = EVP_md5(); + break; + case CRYPTO_DIGEST_SHA1: + md = EVP_sha1(); + break; +#ifdef HAVE_SHA2 + case CRYPTO_DIGEST_SHA256: + md = EVP_sha256(); + break; + case CRYPTO_DIGEST_SHA512: + md = EVP_sha512(); + break; +#endif + default: + Jmsg1(jcr, M_ERROR, 0, _("Unsupported digest type: %d\n"), type); + goto err; + } + + /* Initialize the backing OpenSSL context */ + if (EVP_DigestInit_ex(digest->ctx, md, NULL) == 0) { + goto err; + } + + return digest; + +err: + /* This should not happen, but never say never ... */ + Dmsg0(150, "Digest init failed.\n"); + openssl_post_errors(jcr, M_ERROR, _("OpenSSL digest initialization failed")); + crypto_digest_free(digest); + return NULL; +} + +/* + * Hash length bytes of data into the provided digest context. + * Returns: true on success + * false on failure + */ +bool crypto_digest_update(DIGEST *digest, const uint8_t *data, uint32_t length) +{ + if (EVP_DigestUpdate(digest->ctx, data, length) == 0) { + Dmsg0(150, "digest update failed\n"); + openssl_post_errors(digest->jcr, M_ERROR, _("OpenSSL digest update failed")); + return false; + } else { + return true; + } +} + +/* + * Finalize the data in digest, storing the result in dest and the result size + * in length. The result size can be determined with crypto_digest_size(). + * + * Returns: true on success + * false on failure + */ +bool crypto_digest_finalize(DIGEST *digest, uint8_t *dest, uint32_t *length) +{ + if (!EVP_DigestFinal(digest->ctx, dest, (unsigned int *)length)) { + Dmsg0(150, "digest finalize failed\n"); + openssl_post_errors(digest->jcr, M_ERROR, _("OpenSSL digest finalize failed")); + return false; + } else { + return true; + } +} + +/* + * Free memory associated with a digest object. + */ +void crypto_digest_free(DIGEST *digest) +{ + EVP_MD_CTX_free(digest->ctx); + free(digest); +} + +/* + * Create a new message signature context. + * Returns: A pointer to a SIGNATURE object on success. + * NULL on failure. + */ +SIGNATURE *crypto_sign_new(JCR *jcr) +{ + SIGNATURE *sig; + + sig = (SIGNATURE *)malloc(sizeof(SIGNATURE)); + if (!sig) { + return NULL; + } + + sig->sigData = SignatureData_new(); + sig->jcr = jcr; + Dmsg1(150, "crypto_sign_new jcr=%p\n", jcr); + + if (!sig->sigData) { + /* Allocation failed in OpenSSL */ + free(sig); + return NULL; + } + + /* Set the ASN.1 structure version number */ + ASN1_INTEGER_set(sig->sigData->version, BACULA_ASN1_VERSION); + + return sig; +} + +/* + * For a given public key, find the associated SignatureInfo record + * and create a digest context for signature validation + * + * Returns: CRYPTO_ERROR_NONE on success, with the newly allocated DIGEST in digest. + * A crypto_error_t value on failure. + */ +crypto_error_t crypto_sign_get_digest(SIGNATURE *sig, X509_KEYPAIR *keypair, + crypto_digest_t &type, DIGEST **digest) +{ + STACK_OF(SignerInfo) *signers; + SignerInfo *si; + int i; + + signers = sig->sigData->signerInfo; + + for (i = 0; i < sk_SignerInfo_num(signers); i++) { + si = sk_SignerInfo_value(signers, i); + if (ASN1_OCTET_STRING_cmp(keypair->keyid, si->subjectKeyIdentifier) == 0) { + /* Get the digest algorithm and allocate a digest context */ + Dmsg1(150, "crypto_sign_get_digest jcr=%p\n", sig->jcr); + switch (OBJ_obj2nid(si->digestAlgorithm)) { + case NID_md5: + Dmsg0(100, "sign digest algorithm is MD5\n"); + type = CRYPTO_DIGEST_MD5; + *digest = crypto_digest_new(sig->jcr, CRYPTO_DIGEST_MD5); + break; + case NID_sha1: + Dmsg0(100, "sign digest algorithm is SHA1\n"); + type = CRYPTO_DIGEST_SHA1; + *digest = crypto_digest_new(sig->jcr, CRYPTO_DIGEST_SHA1); + break; +#ifdef HAVE_SHA2 + case NID_sha256: + Dmsg0(100, "sign digest algorithm is SHA256\n"); + type = CRYPTO_DIGEST_SHA256; + *digest = crypto_digest_new(sig->jcr, CRYPTO_DIGEST_SHA256); + break; + case NID_sha512: + Dmsg0(100, "sign digest algorithm is SHA512\n"); + type = CRYPTO_DIGEST_SHA512; + *digest = crypto_digest_new(sig->jcr, CRYPTO_DIGEST_SHA512); + break; +#endif + default: + type = CRYPTO_DIGEST_NONE; + *digest = NULL; + return CRYPTO_ERROR_INVALID_DIGEST; + } + + /* Shouldn't happen */ + if (*digest == NULL) { + openssl_post_errors(sig->jcr, M_ERROR, _("OpenSSL digest_new failed")); + return CRYPTO_ERROR_INVALID_DIGEST; + } else { + return CRYPTO_ERROR_NONE; + } + } else { + openssl_post_errors(sig->jcr, M_ERROR, _("OpenSSL sign get digest failed")); + } + + } + + return CRYPTO_ERROR_NOSIGNER; +} + +/* + * For a given signature, public key, and digest, verify the SIGNATURE. + * Returns: CRYPTO_ERROR_NONE on success. + * A crypto_error_t value on failure. + */ +crypto_error_t crypto_sign_verify(SIGNATURE *sig, X509_KEYPAIR *keypair, DIGEST *digest) +{ + STACK_OF(SignerInfo) *signers; + SignerInfo *si; + int ok, i; + unsigned int sigLen; + const unsigned char *sigData; + + signers = sig->sigData->signerInfo; + + /* Find the signer */ + for (i = 0; i < sk_SignerInfo_num(signers); i++) { + si = sk_SignerInfo_value(signers, i); + if (ASN1_OCTET_STRING_cmp(keypair->keyid, si->subjectKeyIdentifier) == 0) { + /* Extract the signature data */ + sigLen = ASN1_STRING_length(si->signature); + sigData = ASN1_STRING_get0_data(si->signature); + + ok = EVP_VerifyFinal(digest->ctx, sigData, sigLen, keypair->pubkey); + if (ok >= 1) { + return CRYPTO_ERROR_NONE; + } else if (ok == 0) { + openssl_post_errors(sig->jcr, M_ERROR, _("OpenSSL digest Verify final failed")); + return CRYPTO_ERROR_BAD_SIGNATURE; + } else if (ok < 0) { + /* Shouldn't happen */ + openssl_post_errors(sig->jcr, M_ERROR, _("OpenSSL digest Verify final failed")); + return CRYPTO_ERROR_INTERNAL; + } + } + } + Jmsg(sig->jcr, M_ERROR, 0, _("No signers found for crypto verify.\n")); + /* Signer wasn't found. */ + return CRYPTO_ERROR_NOSIGNER; +} + + +/* + * Add a new signer + * Returns: true on success + * false on failure + */ +int crypto_sign_add_signer(SIGNATURE *sig, DIGEST *digest, X509_KEYPAIR *keypair) +{ + SignerInfo *si = NULL; + unsigned char *buf = NULL; + unsigned int len; + + si = SignerInfo_new(); + + if (!si) { + /* Allocation failed in OpenSSL */ + return false; + } + + /* Set the ASN.1 structure version number */ + ASN1_INTEGER_set(si->version, BACULA_ASN1_VERSION); + + /* Set the digest algorithm identifier */ + switch (digest->type) { + case CRYPTO_DIGEST_MD5: + si->digestAlgorithm = OBJ_nid2obj(NID_md5); + break; + case CRYPTO_DIGEST_SHA1: + si->digestAlgorithm = OBJ_nid2obj(NID_sha1); + break; +#ifdef HAVE_SHA2 + case CRYPTO_DIGEST_SHA256: + si->digestAlgorithm = OBJ_nid2obj(NID_sha256); + break; + case CRYPTO_DIGEST_SHA512: + si->digestAlgorithm = OBJ_nid2obj(NID_sha512); + break; +#endif + default: + /* This should never happen */ + goto err; + } + + /* Drop the string allocated by OpenSSL, and add our subjectKeyIdentifier */ + ASN1_OCTET_STRING_free(si->subjectKeyIdentifier); + si->subjectKeyIdentifier = ASN1_OCTET_STRING_dup(keypair->keyid); + + /* Set our signature algorithm. We currently require RSA */ + assert(EVP_PKEY_base_id(keypair->pubkey) == EVP_PKEY_RSA); + /* This is slightly evil. Reach into the MD structure and grab the key type */ + si->signatureAlgorithm = OBJ_nid2obj(EVP_MD_pkey_type(EVP_MD_CTX_md(digest->ctx))); + + /* Finalize/Sign our Digest */ + len = EVP_PKEY_size(keypair->privkey); + buf = (unsigned char *) malloc(len); + if (!EVP_SignFinal(digest->ctx, buf, &len, keypair->privkey)) { + openssl_post_errors(M_ERROR, _("Signature creation failed")); + goto err; + } + + /* Add the signature to the SignerInfo structure */ + if (!ASN1_OCTET_STRING_set(si->signature, buf, len)) { + /* Allocation failed in OpenSSL */ + goto err; + } + + /* No longer needed */ + free(buf); + + /* Push the new SignerInfo structure onto the stack */ + sk_SignerInfo_push(sig->sigData->signerInfo, si); + + return true; + +err: + if (si) { + SignerInfo_free(si); + } + if (buf) { + free(buf); + } + + return false; +} + +/* + * Encodes the SignatureData structure. The length argument is used to specify the + * size of dest. A length of 0 will cause no data to be written to dest, and the + * required length to be written to length. The caller can then allocate sufficient + * space for the output. + * + * Returns: true on success, stores the encoded data in dest, and the size in length. + * false on failure. + */ +int crypto_sign_encode(SIGNATURE *sig, uint8_t *dest, uint32_t *length) +{ + if (*length == 0) { + *length = i2d_SignatureData(sig->sigData, NULL); + return true; + } + + *length = i2d_SignatureData(sig->sigData, (unsigned char **)&dest); + return true; +} + +/* + * Decodes the SignatureData structure. The length argument is used to specify the + * size of sigData. + * + * Returns: SIGNATURE instance on success. + * NULL on failure. + + */ + +SIGNATURE *crypto_sign_decode(JCR *jcr, const uint8_t *sigData, uint32_t length) +{ + SIGNATURE *sig; + const unsigned char *p = (const unsigned char *) sigData; + + sig = (SIGNATURE *)malloc(sizeof(SIGNATURE)); + if (!sig) { + return NULL; + } + sig->jcr = jcr; + + /* d2i_SignatureData modifies the supplied pointer */ + sig->sigData = d2i_SignatureData(NULL, &p, length); + + if (!sig->sigData) { + /* Allocation / Decoding failed in OpenSSL */ + openssl_post_errors(jcr, M_ERROR, _("Signature decoding failed")); + free(sig); + return NULL; + } + + return sig; +} + +/* + * Free memory associated with a signature object. + */ +void crypto_sign_free(SIGNATURE *sig) +{ + SignatureData_free(sig->sigData); + free (sig); +} + +/* + * Create a new encryption session. + * Returns: A pointer to a CRYPTO_SESSION object on success. + * NULL on failure. + * + * Note! Bacula malloc() fails if out of memory. + */ +CRYPTO_SESSION *crypto_session_new (crypto_cipher_t cipher, alist *pubkeys) +{ + CRYPTO_SESSION *cs; + X509_KEYPAIR *keypair; + const EVP_CIPHER *ec; + unsigned char *iv; + int iv_len; + + /* Allocate our session description structures */ + cs = (CRYPTO_SESSION *)malloc(sizeof(CRYPTO_SESSION)); + + /* Initialize required fields */ + cs->session_key = NULL; + + /* Allocate a CryptoData structure */ + cs->cryptoData = CryptoData_new(); + + if (!cs->cryptoData) { + /* Allocation failed in OpenSSL */ + free(cs); + return NULL; + } + + /* Set the ASN.1 structure version number */ + ASN1_INTEGER_set(cs->cryptoData->version, BACULA_ASN1_VERSION); + + /* + * Acquire a cipher instance and set the ASN.1 cipher NID + */ + switch (cipher) { + case CRYPTO_CIPHER_AES_128_CBC: + /* AES 128 bit CBC */ + cs->cryptoData->contentEncryptionAlgorithm = OBJ_nid2obj(NID_aes_128_cbc); + ec = EVP_aes_128_cbc(); + break; +#ifndef HAVE_OPENSSL_EXPORT_LIBRARY + case CRYPTO_CIPHER_AES_192_CBC: + /* AES 192 bit CBC */ + cs->cryptoData->contentEncryptionAlgorithm = OBJ_nid2obj(NID_aes_192_cbc); + ec = EVP_aes_192_cbc(); + break; + case CRYPTO_CIPHER_AES_256_CBC: + /* AES 256 bit CBC */ + cs->cryptoData->contentEncryptionAlgorithm = OBJ_nid2obj(NID_aes_256_cbc); + ec = EVP_aes_256_cbc(); + break; +#endif + case CRYPTO_CIPHER_BLOWFISH_CBC: + /* Blowfish CBC */ + cs->cryptoData->contentEncryptionAlgorithm = OBJ_nid2obj(NID_bf_cbc); + ec = EVP_bf_cbc(); + break; + default: + Jmsg0(NULL, M_ERROR, 0, _("Unsupported cipher type specified\n")); + crypto_session_free(cs); + return NULL; + } + + /* Generate a symmetric session key */ + cs->session_key_len = EVP_CIPHER_key_length(ec); + cs->session_key = (unsigned char *) malloc(cs->session_key_len); + if (RAND_bytes(cs->session_key, cs->session_key_len) <= 0) { + /* OpenSSL failure */ + crypto_session_free(cs); + return NULL; + } + + /* Generate an IV if possible */ + if ((iv_len = EVP_CIPHER_iv_length(ec))) { + iv = (unsigned char *)malloc(iv_len); + + /* Generate random IV */ + if (RAND_bytes(iv, iv_len) <= 0) { + /* OpenSSL failure */ + crypto_session_free(cs); + free(iv); + return NULL; + } + + /* Store it in our ASN.1 structure */ + if (!ASN1_OCTET_STRING_set(cs->cryptoData->iv, iv, iv_len)) { + /* Allocation failed in OpenSSL */ + crypto_session_free(cs); + free(iv); + return NULL; + } + free(iv); + } + + /* + * Create RecipientInfo structures for supplied + * public keys. + */ + foreach_alist(keypair, pubkeys) { + RecipientInfo *ri; + unsigned char *ekey; + int ekey_len; + + ri = RecipientInfo_new(); + if (!ri) { + /* Allocation failed in OpenSSL */ + crypto_session_free(cs); + return NULL; + } + + /* Set the ASN.1 structure version number */ + ASN1_INTEGER_set(ri->version, BACULA_ASN1_VERSION); + + /* Drop the string allocated by OpenSSL, and add our subjectKeyIdentifier */ + ASN1_OCTET_STRING_free(ri->subjectKeyIdentifier); + ri->subjectKeyIdentifier = ASN1_OCTET_STRING_dup(keypair->keyid); + + /* Set our key encryption algorithm. We currently require RSA */ + assert(keypair->pubkey && EVP_PKEY_base_id(keypair->pubkey) == EVP_PKEY_RSA); + ri->keyEncryptionAlgorithm = OBJ_nid2obj(NID_rsaEncryption); + + /* Encrypt the session key */ + ekey = (unsigned char *)malloc(EVP_PKEY_size(keypair->pubkey)); + + if ((ekey_len = EVP_PKEY_encrypt(ekey, cs->session_key, cs->session_key_len, keypair->pubkey)) <= 0) { + /* OpenSSL failure */ + RecipientInfo_free(ri); + crypto_session_free(cs); + free(ekey); + return NULL; + } + + /* Store it in our ASN.1 structure */ + if (!ASN1_OCTET_STRING_set(ri->encryptedKey, ekey, ekey_len)) { + /* Allocation failed in OpenSSL */ + RecipientInfo_free(ri); + crypto_session_free(cs); + free(ekey); + return NULL; + } + + /* Free the encrypted key buffer */ + free(ekey); + + /* Push the new RecipientInfo structure onto the stack */ + sk_RecipientInfo_push(cs->cryptoData->recipientInfo, ri); + } + + return cs; +} + +/* + * Encodes the CryptoData structure. The length argument is used to specify the + * size of dest. A length of 0 will cause no data to be written to dest, and the + * required length to be written to length. The caller can then allocate sufficient + * space for the output. + * + * Returns: true on success, stores the encoded data in dest, and the size in length. + * false on failure. + */ +bool crypto_session_encode(CRYPTO_SESSION *cs, uint8_t *dest, uint32_t *length) +{ + if (*length == 0) { + *length = i2d_CryptoData(cs->cryptoData, NULL); + return true; + } + + *length = i2d_CryptoData(cs->cryptoData, &dest); + return true; +} + +/* + * Decodes the CryptoData structure. The length argument is + * used to specify the size of data. + * + * Returns: CRYPTO_SESSION instance on success. + * NULL on failure. + * Returns: CRYPTO_ERROR_NONE and a pointer to a newly allocated CRYPTO_SESSION structure in *session on success. + * A crypto_error_t value on failure. + */ +crypto_error_t crypto_session_decode(const uint8_t *data, uint32_t length, alist *keypairs, CRYPTO_SESSION **session) +{ + CRYPTO_SESSION *cs; + X509_KEYPAIR *keypair; + STACK_OF(RecipientInfo) *recipients; + crypto_error_t retval = CRYPTO_ERROR_NONE; + const unsigned char *p = (const unsigned char *)data; + + /* bacula-fd.conf doesn't contains any key */ + if (!keypairs) { + return CRYPTO_ERROR_NORECIPIENT; + } + + cs = (CRYPTO_SESSION *)malloc(sizeof(CRYPTO_SESSION)); + + /* Initialize required fields */ + cs->session_key = NULL; + + /* d2i_CryptoData modifies the supplied pointer */ + cs->cryptoData = d2i_CryptoData(NULL, &p, length); + + if (!cs->cryptoData) { + /* Allocation / Decoding failed in OpenSSL */ + openssl_post_errors(M_ERROR, _("CryptoData decoding failed")); + retval = CRYPTO_ERROR_INTERNAL; + goto err; + } + + recipients = cs->cryptoData->recipientInfo; + + /* + * Find a matching RecipientInfo structure for a supplied + * public key + */ + foreach_alist(keypair, keypairs) { + RecipientInfo *ri; + int i; + + /* Private key available? */ + if (keypair->privkey == NULL) { + continue; + } + + for (i = 0; i < sk_RecipientInfo_num(recipients); i++) { + ri = sk_RecipientInfo_value(recipients, i); + + /* Match against the subjectKeyIdentifier */ + if (ASN1_OCTET_STRING_cmp(keypair->keyid, ri->subjectKeyIdentifier) == 0) { + /* Match found, extract symmetric encryption session data */ + + /* RSA is required. */ + assert(EVP_PKEY_base_id(keypair->privkey) == EVP_PKEY_RSA); + + /* If we recieve a RecipientInfo structure that does not use + * RSA, return an error */ + if (OBJ_obj2nid(ri->keyEncryptionAlgorithm) != NID_rsaEncryption) { + retval = CRYPTO_ERROR_INVALID_CRYPTO; + goto err; + } + + /* Decrypt the session key */ + /* Allocate sufficient space for the largest possible decrypted data */ + cs->session_key = (unsigned char *)malloc(EVP_PKEY_size(keypair->privkey)); + cs->session_key_len = EVP_PKEY_decrypt(cs->session_key, ASN1_STRING_get0_data(ri->encryptedKey), + ASN1_STRING_length(ri->encryptedKey), keypair->privkey); + + if (cs->session_key_len <= 0) { + openssl_post_errors(M_ERROR, _("Failure decrypting the session key")); + retval = CRYPTO_ERROR_DECRYPTION; + goto err; + } + + /* Session key successfully extracted, return the CRYPTO_SESSION structure */ + *session = cs; + return CRYPTO_ERROR_NONE; + } + } + } + + /* No matching recipient found */ + return CRYPTO_ERROR_NORECIPIENT; + +err: + crypto_session_free(cs); + return retval; +} + +/* + * Free memory associated with a crypto session object. + */ +void crypto_session_free(CRYPTO_SESSION *cs) +{ + if (cs->cryptoData) { + CryptoData_free(cs->cryptoData); + } + if (cs->session_key){ + free(cs->session_key); + } + free(cs); +} + +/* + * Create a new crypto cipher context with the specified session object + * Returns: A pointer to a CIPHER_CONTEXT object on success. The cipher block size is returned in blocksize. + * NULL on failure. + */ +CIPHER_CONTEXT *crypto_cipher_new(CRYPTO_SESSION *cs, bool encrypt, uint32_t *blocksize) +{ + CIPHER_CONTEXT *cipher_ctx; + const EVP_CIPHER *ec; + + cipher_ctx = (CIPHER_CONTEXT *)malloc(sizeof(CIPHER_CONTEXT)); + if (!cipher_ctx) + return NULL; + + cipher_ctx->ctx = EVP_CIPHER_CTX_new(); + if (!cipher_ctx->ctx) + goto err; + + /* + * Acquire a cipher instance for the given ASN.1 cipher NID + */ + if ((ec = EVP_get_cipherbyobj(cs->cryptoData->contentEncryptionAlgorithm)) == NULL) { + Jmsg1(NULL, M_ERROR, 0, + _("Unsupported contentEncryptionAlgorithm: %d\n"), OBJ_obj2nid(cs->cryptoData->contentEncryptionAlgorithm)); + free(cipher_ctx); + return NULL; + } + + /* Initialize the OpenSSL cipher context */ + EVP_CIPHER_CTX_reset(cipher_ctx->ctx); + if (encrypt) { + /* Initialize for encryption */ + if (!EVP_CipherInit_ex(cipher_ctx->ctx, ec, NULL, NULL, NULL, 1)) { + openssl_post_errors(M_ERROR, _("OpenSSL cipher context initialization failed")); + goto err; + } + } else { + /* Initialize for decryption */ + if (!EVP_CipherInit_ex(cipher_ctx->ctx, ec, NULL, NULL, NULL, 0)) { + openssl_post_errors(M_ERROR, _("OpenSSL cipher context initialization failed")); + goto err; + } + } + + /* Set the key size */ + if (!EVP_CIPHER_CTX_set_key_length(cipher_ctx->ctx, cs->session_key_len)) { + openssl_post_errors(M_ERROR, _("Encryption session provided an invalid symmetric key")); + goto err; + } + + /* Validate the IV length */ + if (EVP_CIPHER_iv_length(ec) != ASN1_STRING_length(cs->cryptoData->iv)) { + openssl_post_errors(M_ERROR, _("Encryption session provided an invalid IV")); + goto err; + } + + /* Add the key and IV to the cipher context */ + if (!EVP_CipherInit_ex(cipher_ctx->ctx, NULL, NULL, cs->session_key, ASN1_STRING_get0_data(cs->cryptoData->iv), -1)) { + openssl_post_errors(M_ERROR, _("OpenSSL cipher context key/IV initialization failed")); + goto err; + } + + *blocksize = EVP_CIPHER_CTX_block_size(cipher_ctx->ctx); + return cipher_ctx; + +err: + crypto_cipher_free(cipher_ctx); + return NULL; +} + + +/* + * Encrypt/Decrypt length bytes of data using the provided cipher context + * Returns: true on success, number of bytes output in written + * false on failure + */ +bool crypto_cipher_update(CIPHER_CONTEXT *cipher_ctx, const uint8_t *data, uint32_t length, const uint8_t *dest, uint32_t *written) +{ + if (!EVP_CipherUpdate(cipher_ctx->ctx, (unsigned char *)dest, (int *)written, (const unsigned char *)data, length)) { + /* This really shouldn't fail */ + return false; + } else { + return true; + } +} + +/* + * Finalize the cipher context, writing any remaining data and necessary padding + * to dest, and the size in written. + * The result size will either be one block of data or zero. + * + * Returns: true on success + * false on failure + */ +bool crypto_cipher_finalize (CIPHER_CONTEXT *cipher_ctx, uint8_t *dest, uint32_t *written) +{ + if (!EVP_CipherFinal_ex(cipher_ctx->ctx, (unsigned char *)dest, (int *) written)) { + /* This really shouldn't fail */ + return false; + } else { + return true; + } +} + + +/* + * Free memory associated with a cipher context. + */ +void crypto_cipher_free (CIPHER_CONTEXT *cipher_ctx) +{ + EVP_CIPHER_CTX_free(cipher_ctx->ctx); + free (cipher_ctx); +} + +#else /* HAVE_OPENSSL */ +# error No encryption library available +#endif /* HAVE_OPENSSL */ + +#else /* HAVE_CRYPTO */ + +/* + * Cryptography Support Disabled + */ + +/* Message Digest Structure */ +struct Digest { + crypto_digest_t type; + JCR *jcr; + union { + SHA1Context sha1; + MD5Context md5; + }; +}; + +/* Dummy Signature Structure */ +struct Signature { + JCR *jcr; +}; + +DIGEST *crypto_digest_new(JCR *jcr, crypto_digest_t type) +{ + DIGEST *digest; + + digest = (DIGEST *)malloc(sizeof(DIGEST)); + digest->type = type; + digest->jcr = jcr; + + switch (type) { + case CRYPTO_DIGEST_MD5: + MD5Init(&digest->md5); + break; + case CRYPTO_DIGEST_SHA1: + SHA1Init(&digest->sha1); + break; + default: + Jmsg1(jcr, M_ERROR, 0, _("Unsupported digest type=%d specified\n"), type); + free(digest); + return NULL; + } + + return (digest); +} + +bool crypto_digest_update(DIGEST *digest, const uint8_t *data, uint32_t length) +{ + switch (digest->type) { + case CRYPTO_DIGEST_MD5: + /* Doesn't return anything ... */ + MD5Update(&digest->md5, (unsigned char *) data, length); + return true; + case CRYPTO_DIGEST_SHA1: + int ret; + if ((ret = SHA1Update(&digest->sha1, (const u_int8_t *) data, length)) == shaSuccess) { + return true; + } else { + Jmsg1(NULL, M_ERROR, 0, _("SHA1Update() returned an error: %d\n"), ret); + return false; + } + break; + default: + return false; + } +} + +bool crypto_digest_finalize(DIGEST *digest, uint8_t *dest, uint32_t *length) +{ + switch (digest->type) { + case CRYPTO_DIGEST_MD5: + /* Guard against programmer error by either the API client or + * an out-of-sync CRYPTO_DIGEST_MAX_SIZE */ + assert(*length >= CRYPTO_DIGEST_MD5_SIZE); + *length = CRYPTO_DIGEST_MD5_SIZE; + /* Doesn't return anything ... */ + MD5Final((unsigned char *)dest, &digest->md5); + return true; + case CRYPTO_DIGEST_SHA1: + /* Guard against programmer error by either the API client or + * an out-of-sync CRYPTO_DIGEST_MAX_SIZE */ + assert(*length >= CRYPTO_DIGEST_SHA1_SIZE); + *length = CRYPTO_DIGEST_SHA1_SIZE; + if (SHA1Final(&digest->sha1, (u_int8_t *) dest) == shaSuccess) { + return true; + } else { + return false; + } + break; + default: + return false; + } + + return false; +} + +void crypto_digest_free(DIGEST *digest) +{ + free(digest); +} + +SIGNATURE *crypto_sign_new(JCR *jcr) { return NULL; } + +crypto_error_t crypto_sign_get_digest (SIGNATURE *sig, X509_KEYPAIR *keypair, + crypto_digest_t &type, DIGEST **digest) + { return CRYPTO_ERROR_INTERNAL; } + +crypto_error_t crypto_sign_verify (SIGNATURE *sig, X509_KEYPAIR *keypair, DIGEST *digest) { return CRYPTO_ERROR_INTERNAL; } + +int crypto_sign_add_signer (SIGNATURE *sig, DIGEST *digest, X509_KEYPAIR *keypair) { return false; } +int crypto_sign_encode (SIGNATURE *sig, uint8_t *dest, uint32_t *length) { return false; } + +SIGNATURE *crypto_sign_decode (JCR *jcr, const uint8_t *sigData, uint32_t length) { return NULL; } +void crypto_sign_free (SIGNATURE *sig) { } + + +X509_KEYPAIR *crypto_keypair_new(void) { return NULL; } +X509_KEYPAIR *crypto_keypair_dup (X509_KEYPAIR *keypair) { return NULL; } +int crypto_keypair_load_cert (X509_KEYPAIR *keypair, const char *file) { return false; } +bool crypto_keypair_has_key (const char *file) { return false; } +int crypto_keypair_load_key (X509_KEYPAIR *keypair, const char *file, CRYPTO_PEM_PASSWD_CB *pem_callback, const void *pem_userdata) { return false; } +void crypto_keypair_free (X509_KEYPAIR *keypair) { } + +CRYPTO_SESSION *crypto_session_new (crypto_cipher_t cipher, alist *pubkeys) { return NULL; } +void crypto_session_free (CRYPTO_SESSION *cs) { } +bool crypto_session_encode (CRYPTO_SESSION *cs, uint8_t *dest, uint32_t *length) { return false; } +crypto_error_t crypto_session_decode(const uint8_t *data, uint32_t length, alist *keypairs, CRYPTO_SESSION **session) { return CRYPTO_ERROR_INTERNAL; } + +CIPHER_CONTEXT *crypto_cipher_new (CRYPTO_SESSION *cs, bool encrypt, uint32_t *blocksize) { return NULL; } +bool crypto_cipher_update (CIPHER_CONTEXT *cipher_ctx, const uint8_t *data, uint32_t length, const uint8_t *dest, uint32_t *written) { return false; } +bool crypto_cipher_finalize (CIPHER_CONTEXT *cipher_ctx, uint8_t *dest, uint32_t *written) { return false; } +void crypto_cipher_free (CIPHER_CONTEXT *cipher_ctx) { } + +#endif /* HAVE_CRYPTO */ + +/* Shared Code */ + +/* + * Default PEM encryption passphrase callback. + * Returns an empty password. + */ +int crypto_default_pem_callback(char *buf, int size, const void *userdata) +{ + bstrncpy(buf, "", size); + return (strlen(buf)); +} + +/* + * Returns the ASCII name of the digest type. + * Returns: ASCII name of digest type. + */ +const char *crypto_digest_name(DIGEST *digest) +{ + switch (digest->type) { + case CRYPTO_DIGEST_MD5: + return "MD5"; + case CRYPTO_DIGEST_SHA1: + return "SHA1"; + case CRYPTO_DIGEST_SHA256: + return "SHA256"; + case CRYPTO_DIGEST_SHA512: + return "SHA512"; + case CRYPTO_DIGEST_NONE: + return "None"; + default: + return "Invalid Digest Type"; + } + +} + +/* + * Given a stream type, returns the associated + * crypto_digest_t value. + */ +crypto_digest_t crypto_digest_stream_type(int stream) +{ + switch (stream) { + case STREAM_MD5_DIGEST: + return CRYPTO_DIGEST_MD5; + case STREAM_SHA1_DIGEST: + return CRYPTO_DIGEST_SHA1; + case STREAM_SHA256_DIGEST: + return CRYPTO_DIGEST_SHA256; + case STREAM_SHA512_DIGEST: + return CRYPTO_DIGEST_SHA512; + default: + return CRYPTO_DIGEST_NONE; + } +} + +/* + * * Given a crypto_error_t value, return the associated + * * error string + * */ +const char *crypto_strerror(crypto_error_t error) { + switch (error) { + case CRYPTO_ERROR_NONE: + return _("No error"); + case CRYPTO_ERROR_NOSIGNER: + return _("Signer not found"); + case CRYPTO_ERROR_NORECIPIENT: + return _("Recipient not found"); + case CRYPTO_ERROR_INVALID_DIGEST: + return _("Unsupported digest algorithm"); + case CRYPTO_ERROR_INVALID_CRYPTO: + return _("Unsupported encryption algorithm"); + case CRYPTO_ERROR_BAD_SIGNATURE: + return _("Signature is invalid"); + case CRYPTO_ERROR_DECRYPTION: + return _("Decryption error"); + case CRYPTO_ERROR_INTERNAL: + /* This shouldn't happen */ + return _("Internal error"); + default: + return _("Unknown error"); + } +} diff --git a/src/lib/crypto.h b/src/lib/crypto.h new file mode 100644 index 00000000..f0366718 --- /dev/null +++ b/src/lib/crypto.h @@ -0,0 +1,131 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * crypto.h Encryption support functions + * + * Author: Landon Fuller + * + * Version $Id$ + * + * This file was contributed to the Bacula project by Landon Fuller. + * + * Landon Fuller has been granted a perpetual, worldwide, non-exclusive, + * no-charge, royalty-free, irrevocable copyright * license to reproduce, + * prepare derivative works of, publicly display, publicly perform, + * sublicense, and distribute the original work contributed by Landon Fuller + * to the Bacula project in source or object form. + * + * If you wish to license these contributions under an alternate open source + * license please contact Landon Fuller . + */ + +#ifndef __CRYPTO_H_ +#define __CRYPTO_H_ + +/* Opaque X509 Public/Private Key Pair Structure */ +typedef struct X509_Keypair X509_KEYPAIR; + +/* Opaque Message Digest Structure */ +/* Digest is defined (twice) in crypto.c */ +typedef struct Digest DIGEST; + +/* Opaque Message Signature Structure */ +typedef struct Signature SIGNATURE; + +/* Opaque PKI Symmetric Key Data Structure */ +typedef struct Crypto_Session CRYPTO_SESSION; + +/* Opaque Encryption/Decryption Context Structure */ +typedef struct Cipher_Context CIPHER_CONTEXT; + +/* PEM Decryption Passphrase Callback */ +typedef int (CRYPTO_PEM_PASSWD_CB) (char *buf, int size, const void *userdata); + +/* Digest Types */ +typedef enum { + /* These are stored on disk and MUST NOT change */ + CRYPTO_DIGEST_NONE = 0, + CRYPTO_DIGEST_MD5 = 1, + CRYPTO_DIGEST_SHA1 = 2, + CRYPTO_DIGEST_SHA256 = 3, + CRYPTO_DIGEST_SHA512 = 4 +} crypto_digest_t; + + +#ifdef HAVE_SHA2 +# define CRYPTO_DIGEST_DEFAULT CRYPTO_DIGEST_SHA256 +#else +# define CRYPTO_DIGEST_DEFAULT CRYPTO_DIGEST_SHA1 +#endif + +/* Cipher Types */ +typedef enum { + /* These are not stored on disk */ + CRYPTO_CIPHER_AES_128_CBC, /* Keep AES128 as the first one */ + CRYPTO_CIPHER_AES_192_CBC, + CRYPTO_CIPHER_AES_256_CBC, + CRYPTO_CIPHER_BLOWFISH_CBC +} crypto_cipher_t; + +/* Crypto API Errors */ +typedef enum { + CRYPTO_ERROR_NONE = 0, /* No error */ + CRYPTO_ERROR_NOSIGNER = 1, /* Signer not found */ + CRYPTO_ERROR_NORECIPIENT = 2, /* Recipient not found */ + CRYPTO_ERROR_INVALID_DIGEST = 3, /* Unsupported digest algorithm */ + CRYPTO_ERROR_INVALID_CRYPTO = 4, /* Unsupported encryption algorithm */ + CRYPTO_ERROR_BAD_SIGNATURE = 5, /* Signature is invalid */ + CRYPTO_ERROR_DECRYPTION = 6, /* Decryption error */ + CRYPTO_ERROR_INTERNAL = 7 /* Internal Error */ +} crypto_error_t; + +/* Message Digest Sizes */ +#define CRYPTO_DIGEST_MD5_SIZE 16 /* 128 bits */ +#define CRYPTO_DIGEST_SHA1_SIZE 20 /* 160 bits */ +#define CRYPTO_DIGEST_SHA256_SIZE 32 /* 256 bits */ +#define CRYPTO_DIGEST_SHA512_SIZE 64 /* 512 bits */ + +/* Maximum Message Digest Size */ +#ifdef HAVE_OPENSSL + +/* Let OpenSSL define a few things */ +#define CRYPTO_DIGEST_MAX_SIZE EVP_MAX_MD_SIZE +#define CRYPTO_CIPHER_MAX_BLOCK_SIZE EVP_MAX_BLOCK_LENGTH + +#else /* HAVE_OPENSSL */ + +/* + * This must be kept in sync with the available message digest algorithms. + * Just in case someone forgets, I've added assertions + * to crypto_digest_finalize(). + * MD5: 128 bits + * SHA-1: 160 bits + */ +#ifndef HAVE_SHA2 +#define CRYPTO_DIGEST_MAX_SIZE CRYPTO_DIGEST_SHA1_SIZE +#else +#define CRYPTO_DIGEST_MAX_SIZE CRYPTO_DIGEST_SHA512_SIZE +#endif + +/* Dummy Value */ +#define CRYPTO_CIPHER_MAX_BLOCK_SIZE 0 + +#endif /* HAVE_OPENSSL */ + +#endif /* __CRYPTO_H_ */ diff --git a/src/lib/daemon.c b/src/lib/daemon.c new file mode 100644 index 00000000..de644690 --- /dev/null +++ b/src/lib/daemon.c @@ -0,0 +1,134 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * daemon.c by Kern Sibbald 2000 + * + * This code is inspired by the Prentice Hall book + * "Unix Network Programming" by W. Richard Stevens + * and later updated from his book "Advanced Programming + * in the UNIX Environment" + * + * Initialize a daemon process completely detaching us from + * any terminal processes. + * + */ + + +#include "bacula.h" + +#ifdef HAVE_GETRLIMIT +#include +#else +/* If not available, use a wrapper that will not use it */ +#define getrlimit(a,b) -1 +struct rlimit { + int64_t rlim_max; +}; +#endif + +void +daemon_start() +{ +#ifndef HAVE_WIN32 + int i, fd, next_fd; + pid_t cpid; + mode_t oldmask; +#ifdef DEVELOPER + next_fd = 3; +#else + next_fd = 0; +#endif + /* + * Become a daemon. + */ + Dmsg0(900, "Enter daemon_start\n"); + if ( (cpid = fork() ) < 0) { + berrno be; + Emsg1(M_ABORT, 0, _("Cannot fork to become daemon: ERR=%s\n"), be.bstrerror()); + } else if (cpid > 0) { + exit(0); /* parent exits */ + } + + /* Child continues */ + setsid(); + + /* In the PRODUCTION system, we close ALL + * file descriptors except stdin, stdout, and stderr. + */ + if (debug_level > 0) { + next_fd = 3; /* don't close debug output */ + } + +#if defined(HAVE_FCNTL_F_CLOSEM) + fcntl(next_fd, F_CLOSEM); +#elif defined(HAVE_CLOSEFROM) + closefrom(next_fd); +#else + struct rlimit rl; + int64_t rlimitResult=0; + + /* Many systems doesn't have the correct system call + * to determine the FD list to close. + */ + if (getrlimit(RLIMIT_NOFILE, &rl) == -1) { + rlimitResult = sysconf(_SC_OPEN_MAX); + } else { + rlimitResult = rl.rlim_max; + } + + for (i=rlimitResult; i >= next_fd; i--) { + close(i); + } +#endif + + /* Move to root directory. For debug we stay + * in current directory so dumps go there. + */ +#ifndef DEBUG + chdir("/"); +#endif + + /* + * Avoid creating files 666 but don't override any + * more restrictive mask set by the user. + */ + oldmask = umask(026); + oldmask |= 026; + umask(oldmask); + + + /* + * Make sure we have fd's 0, 1, 2 open + * If we don't do this one of our sockets may open + * there and if we then use stdout, it could + * send total garbage to our socket. + * + */ + fd = open("/dev/null", O_RDONLY, 0644); + if (fd > 2) { + close(fd); + } else { + for(i=1; fd + i <= 2; i++) { + dup2(fd, fd+i); + } + } + +#endif /* HAVE_WIN32 */ + Dmsg0(900, "Exit daemon_start\n"); +} diff --git a/src/lib/devlock.c b/src/lib/devlock.c new file mode 100644 index 00000000..5975f6a1 --- /dev/null +++ b/src/lib/devlock.c @@ -0,0 +1,739 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Thread Read/Write locking code. It permits + * multiple readers but only one writer. Note, however, + * that the writer thread is permitted to make multiple + * nested write lock calls. + * + * Kern Sibbald, January MMI + * + * This code adapted from "Programming with POSIX Threads", by + * David R. Butenhof + * + */ + +#define LOCKMGR_COMPLIANT +#include "bacula.h" +#include "devlock.h" + +/* + * Initialize a read/write lock + * + * Returns: 0 on success + * errno on failure + */ + +devlock *new_devlock() +{ + devlock *lock; + lock = (devlock *)malloc(sizeof (devlock)); + bmemset(lock, 0, sizeof(devlock)); + return lock; +} + +int devlock::init(int init_priority) +{ + int stat; + devlock *rwl = this; + + rwl->r_active = rwl->w_active = 0; + rwl->r_wait = rwl->w_wait = 0; + rwl->priority = init_priority; + if ((stat = pthread_mutex_init(&rwl->mutex, NULL)) != 0) { + return stat; + } + if ((stat = pthread_cond_init(&rwl->read, NULL)) != 0) { + pthread_mutex_destroy(&rwl->mutex); + return stat; + } + if ((stat = pthread_cond_init(&rwl->write, NULL)) != 0) { + pthread_cond_destroy(&rwl->read); + pthread_mutex_destroy(&rwl->mutex); + return stat; + } + rwl->valid = DEVLOCK_VALID; + return 0; +} + +/* + * Destroy a read/write lock + * + * Returns: 0 on success + * errno on failure + */ +int devlock::destroy() +{ + devlock *rwl = this; + int stat, stat1, stat2; + + if (rwl->valid != DEVLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + + /* + * If any threads are active, report EBUSY + */ + if (rwl->r_active > 0 || rwl->w_active) { + pthread_mutex_unlock(&rwl->mutex); + return EBUSY; + } + + /* + * If any threads are waiting, report EBUSY + */ + if (rwl->r_wait > 0 || rwl->w_wait > 0) { + pthread_mutex_unlock(&rwl->mutex); + return EBUSY; + } + + rwl->valid = 0; + if ((stat = pthread_mutex_unlock(&rwl->mutex)) != 0) { + return stat; + } + stat = pthread_mutex_destroy(&rwl->mutex); + stat1 = pthread_cond_destroy(&rwl->read); + stat2 = pthread_cond_destroy(&rwl->write); + return (stat != 0 ? stat : (stat1 != 0 ? stat1 : stat2)); +} + +/* + * Handle cleanup when the read lock condition variable + * wait is released. + */ +static void devlock_read_release(void *arg) +{ + devlock *rwl = (devlock *)arg; + rwl->read_release(); +} + +void devlock::read_release() +{ + r_wait--; + pthread_mutex_unlock(&mutex); +} + +/* + * Handle cleanup when the write lock condition variable wait + * is released. + */ +static void devlock_write_release(void *arg) +{ + devlock *rwl = (devlock *)arg; + rwl->write_release(); +} + +void devlock::write_release() +{ + w_wait--; + pthread_mutex_unlock(&mutex); +} + +/* + * Lock for read access, wait until locked (or error). + */ +int devlock::readlock() +{ + devlock *rwl = this; + int stat; + + if (rwl->valid != DEVLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active) { + rwl->r_wait++; /* indicate that we are waiting */ + pthread_cleanup_push(devlock_read_release, (void *)rwl); + while (rwl->w_active) { + stat = pthread_cond_wait(&rwl->read, &rwl->mutex); + if (stat != 0) { + break; /* error, bail out */ + } + } + pthread_cleanup_pop(0); + rwl->r_wait--; /* we are no longer waiting */ + } + if (stat == 0) { + rwl->r_active++; /* we are running */ + } + pthread_mutex_unlock(&rwl->mutex); + return stat; +} + +/* + * Attempt to lock for read access, don't wait + */ +int devlock::readtrylock() +{ + devlock *rwl = this; + int stat, stat2; + + if (rwl->valid != DEVLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active) { + stat = EBUSY; + } else { + rwl->r_active++; /* we are running */ + } + stat2 = pthread_mutex_unlock(&rwl->mutex); + return (stat == 0 ? stat2 : stat); +} + +/* + * Unlock read lock + */ +int devlock::readunlock() +{ + devlock *rwl = this; + int stat, stat2; + + if (rwl->valid != DEVLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + rwl->r_active--; + if (rwl->r_active == 0 && rwl->w_wait > 0) { /* if writers waiting */ + stat = pthread_cond_broadcast(&rwl->write); + } + stat2 = pthread_mutex_unlock(&rwl->mutex); + return (stat == 0 ? stat2 : stat); +} + + +/* + * Lock for write access, wait until locked (or error). + * Multiple nested write locking is permitted. + */ +int devlock::writelock(int areason, bool acan_take) +{ + devlock *rwl = this; + int stat; + + if (rwl->valid != DEVLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active && pthread_equal(rwl->writer_id, pthread_self())) { + rwl->w_active++; + pthread_mutex_unlock(&rwl->mutex); + return 0; + } + lmgr_pre_lock(rwl, rwl->priority, __FILE__, __LINE__); + if (rwl->w_active || rwl->r_active > 0) { + rwl->w_wait++; /* indicate that we are waiting */ + pthread_cleanup_push(devlock_write_release, (void *)rwl); + while (rwl->w_active || rwl->r_active > 0) { + if ((stat = pthread_cond_wait(&rwl->write, &rwl->mutex)) != 0) { + lmgr_do_unlock(rwl); + break; /* error, bail out */ + } + } + pthread_cleanup_pop(0); + rwl->w_wait--; /* we are no longer waiting */ + } + if (stat == 0) { + rwl->w_active++; /* we are running */ + rwl->writer_id = pthread_self(); /* save writer thread's id */ + lmgr_post_lock(); + } + rwl->reason = areason; + rwl->can_take = acan_take; + pthread_mutex_unlock(&rwl->mutex); + return stat; +} + +/* + * Attempt to lock for write access, don't wait + */ +int devlock::writetrylock() +{ + devlock *rwl = this; + int stat, stat2; + + if (rwl->valid != DEVLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active && pthread_equal(rwl->writer_id, pthread_self())) { + rwl->w_active++; + pthread_mutex_unlock(&rwl->mutex); + return 0; + } + if (rwl->w_active || rwl->r_active > 0) { + stat = EBUSY; + } else { + rwl->w_active = 1; /* we are running */ + rwl->writer_id = pthread_self(); /* save writer thread's id */ + lmgr_do_lock(rwl, rwl->priority, __FILE__, __LINE__); + } + stat2 = pthread_mutex_unlock(&rwl->mutex); + return (stat == 0 ? stat2 : stat); +} + +/* + * Unlock write lock + * Start any waiting writers in preference to waiting readers + */ +int devlock::writeunlock() +{ + devlock *rwl = this; + int stat, stat2; + + if (rwl->valid != DEVLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active <= 0) { + pthread_mutex_unlock(&rwl->mutex); + Jmsg0(NULL, M_ABORT, 0, _("writeunlock called too many times.\n")); + } + rwl->w_active--; + if (!pthread_equal(pthread_self(), rwl->writer_id)) { + pthread_mutex_unlock(&rwl->mutex); + Jmsg0(NULL, M_ABORT, 0, _("writeunlock by non-owner.\n")); + } + if (rwl->w_active > 0) { + stat = 0; /* writers still active */ + } else { + lmgr_do_unlock(rwl); + /* No more writers, awaken someone */ + if (rwl->r_wait > 0) { /* if readers waiting */ + stat = pthread_cond_broadcast(&rwl->read); + } else if (rwl->w_wait > 0) { + stat = pthread_cond_broadcast(&rwl->write); + } + } + stat2 = pthread_mutex_unlock(&rwl->mutex); + return (stat == 0 ? stat2 : stat); +} + +int devlock::take_lock(take_lock_t *hold, int areason) +{ + int stat; + + if (valid != DEVLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&mutex)) != 0) { + return stat; + } + hold->reason = reason; + hold->prev_reason = prev_reason; + hold->writer_id = writer_id; + reason = areason; + writer_id = pthread_self(); + stat = pthread_mutex_unlock(&mutex); + return stat; +} + +int devlock::return_lock(take_lock_t *hold) +{ + int stat, stat2; + + if (valid != DEVLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&mutex)) != 0) { + return stat; + } + reason = hold->reason; + prev_reason = hold->prev_reason; + writer_id = hold->writer_id; + writer_id = pthread_self(); + stat2 = pthread_mutex_unlock(&mutex); + if (w_active || w_wait) { + stat = pthread_cond_broadcast(&write); + } + return (stat == 0 ? stat2 : stat); + +} + +#ifdef TEST_RWLOCK + +#define THREADS 300 +#define DATASIZE 15 +#define ITERATIONS 1000000 + +/* + * Keep statics for each thread. + */ +typedef struct thread_tag { + int thread_num; + pthread_t thread_id; + int writes; + int reads; + int interval; +} thread_t; + +/* + * Read/write lock and shared data. + */ +typedef struct data_tag { + brwlock_t lock; + int data; + int writes; +} data_t; + +static thread_t threads[THREADS]; +static data_t data[DATASIZE]; + +/* + * Thread start routine that uses read/write locks. + */ +void *thread_routine(void *arg) +{ + thread_t *self = (thread_t *)arg; + int repeats = 0; + int iteration; + int element = 0; + int status; + + for (iteration=0; iteration < ITERATIONS; iteration++) { + /* + * Each "self->interval" iterations, perform an + * update operation (write lock instead of read + * lock). + */ +// if ((iteration % self->interval) == 0) { + status = writelock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Write lock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + data[element].data = self->thread_num; + data[element].writes++; + self->writes++; + status = writelock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Write lock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + data[element].data = self->thread_num; + data[element].writes++; + self->writes++; + status = writeunlock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Write unlock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + status = writeunlock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Write unlock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + +#ifdef xxx + } else { + /* + * Look at the current data element to see whether + * the current thread last updated it. Count the + * times to report later. + */ + status = readlock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Read lock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + self->reads++; + if (data[element].data == self->thread_num) + repeats++; + status = readunlock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Read unlock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + } +#endif + element++; + if (element >= DATASIZE) { + element = 0; + } + } + if (repeats > 0) { + Pmsg2(000, _("Thread %d found unchanged elements %d times\n"), + self->thread_num, repeats); + } + return NULL; +} + +int main (int argc, char *argv[]) +{ + int count; + int data_count; + int status; + unsigned int seed = 1; + int thread_writes = 0; + int data_writes = 0; + + /* + * For Solaris 2.5,2.6,7 and 8 threads are not timesliced. + * To ensure our threads can run concurrently, we specifically + * set the concurrency level to THREADS. + */ + thr_setconcurrency(THREADS); /* Turned on only for Solaris */ + + /* + * Initialize the shared data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) { + data[data_count].data = 0; + data[data_count].writes = 0; + status = rwl_init(&data[data_count].lock); + if (status != 0) { + berrno be; + printf("Init rwlock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + } + + /* + * Create THREADS threads to access shared data. + */ + for (count = 0; count < THREADS; count++) { + threads[count].thread_num = count + 1; + threads[count].writes = 0; + threads[count].reads = 0; + threads[count].interval = rand_r(&seed) % 71; + if (threads[count].interval <= 0) { + threads[count].interval = 1; + } + status = pthread_create (&threads[count].thread_id, + NULL, thread_routine, (void*)&threads[count]); + if (status != 0 || (int)threads[count].thread_id == 0) { + berrno be; + printf("Create thread failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + } + + /* + * Wait for all threads to complete, and collect + * statistics. + */ + for (count = 0; count < THREADS; count++) { + status = pthread_join (threads[count].thread_id, NULL); + if (status != 0) { + berrno be; + printf("Join thread failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + thread_writes += threads[count].writes; + printf (_("%02d: interval %d, writes %d, reads %d\n"), + count, threads[count].interval, + threads[count].writes, threads[count].reads); + } + + /* + * Collect statistics for the data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) { + data_writes += data[data_count].writes; + printf (_("data %02d: value %d, %d writes\n"), + data_count, data[data_count].data, data[data_count].writes); + rwl_destroy (&data[data_count].lock); + } + + printf (_("Total: %d thread writes, %d data writes\n"), + thread_writes, data_writes); + return 0; +} + +#endif + +#ifdef TEST_RW_TRY_LOCK +/* + * brwlock_try_main.c + * + * Demonstrate use of non-blocking read-write locks. + * + * On older Solaris system, call thr_setconcurrency() + * to allow interleaved thread execution, since threads are not + * timesliced. + */ +#include +#include "rwlock.h" +#include "errors.h" + +#define THREADS 5 +#define ITERATIONS 1000 +#define DATASIZE 15 + +/* + * Keep statistics for each thread. + */ +typedef struct thread_tag { + int thread_num; + pthread_t thread_id; + int r_collisions; + int w_collisions; + int updates; + int interval; +} thread_t; + +/* + * Read-write lock and shared data + */ +typedef struct data_tag { + brwlock_t lock; + int data; + int updates; +} data_t; + +thread_t threads[THREADS]; +data_t data[DATASIZE]; + +/* + * Thread start routine that uses read-write locks + */ +void *thread_routine (void *arg) +{ + thread_t *self = (thread_t*)arg; + int iteration; + int element; + int status; + lmgr_init_thread(); + element = 0; /* Current data element */ + + for (iteration = 0; iteration < ITERATIONS; iteration++) { + if ((iteration % self->interval) == 0) { + status = rwl_writetrylock (&data[element].lock); + if (status == EBUSY) + self->w_collisions++; + else if (status == 0) { + data[element].data++; + data[element].updates++; + self->updates++; + rwl_writeunlock (&data[element].lock); + } else + err_abort (status, _("Try write lock")); + } else { + status = rwl_readtrylock (&data[element].lock); + if (status == EBUSY) + self->r_collisions++; + else if (status != 0) { + err_abort (status, _("Try read lock")); + } else { + if (data[element].data != data[element].updates) + printf ("%d: data[%d] %d != %d\n", + self->thread_num, element, + data[element].data, data[element].updates); + rwl_readunlock (&data[element].lock); + } + } + + element++; + if (element >= DATASIZE) + element = 0; + } + lmgr_cleanup_thread(); + return NULL; +} + +int main (int argc, char *argv[]) +{ + int count, data_count; + unsigned int seed = 1; + int thread_updates = 0, data_updates = 0; + int status; + + /* + * For Solaris 2.5,2.6,7 and 8 threads are not timesliced. + * To ensure our threads can run concurrently, we specifically + * set the concurrency level to THREADS. + */ + DPRINTF (("Setting concurrency level to %d\n", THREADS)); + thr_setconcurrency(THREADS); /* Turned on only for Solaris */ + + /* + * Initialize the shared data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) { + data[data_count].data = 0; + data[data_count].updates = 0; + rwl_init(&data[data_count].lock); + } + + /* + * Create THREADS threads to access shared data. + */ + for (count = 0; count < THREADS; count++) { + threads[count].thread_num = count; + threads[count].r_collisions = 0; + threads[count].w_collisions = 0; + threads[count].updates = 0; + threads[count].interval = rand_r (&seed) % ITERATIONS; + status = pthread_create (&threads[count].thread_id, + NULL, thread_routine, (void*)&threads[count]); + if (status != 0) + err_abort (status, _("Create thread")); + } + + /* + * Wait for all threads to complete, and collect + * statistics. + */ + for (count = 0; count < THREADS; count++) { + status = pthread_join (threads[count].thread_id, NULL); + if (status != 0) + err_abort (status, _("Join thread")); + thread_updates += threads[count].updates; + printf (_("%02d: interval %d, updates %d, " + "r_collisions %d, w_collisions %d\n"), + count, threads[count].interval, + threads[count].updates, + threads[count].r_collisions, threads[count].w_collisions); + } + + /* + * Collect statistics for the data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) { + data_updates += data[data_count].updates; + printf (_("data %02d: value %d, %d updates\n"), + data_count, data[data_count].data, data[data_count].updates); + rwl_destroy (&data[data_count].lock); + } + + return 0; +} + +#endif diff --git a/src/lib/devlock.h b/src/lib/devlock.h new file mode 100644 index 00000000..7824d0d9 --- /dev/null +++ b/src/lib/devlock.h @@ -0,0 +1,86 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Thread Read/Write locking code. It permits + * multiple readers but only one writer. + * + * Kern Sibbald, January MMI + * + * This code adapted from "Programming with POSIX Threads", by + * David R. Butenhof + * + */ + +#ifndef __DEVLOCK_H +#define __DEVLOCK_H 1 + +struct take_lock_t { + pthread_t writer_id; /* id of writer */ + int reason; /* save reason */ + int prev_reason; /* previous reason */ +}; + + +class devlock { +private: + pthread_mutex_t mutex; + pthread_cond_t read; /* wait for read */ + pthread_cond_t write; /* wait for write */ + pthread_t writer_id; /* writer's thread id */ + int priority; /* used in deadlock detection */ + int valid; /* set when valid */ + int r_active; /* readers active */ + int w_active; /* writers active */ + int r_wait; /* readers waiting */ + int w_wait; /* writers waiting */ + int reason; /* reason for lock */ + int prev_reason; /* previous reason */ + bool can_take; /* can the lock be taken? */ + + +public: + devlock(int reason, bool can_take=false); + ~devlock(); + int init(int init_priority); + int destroy(); + int take_lock(take_lock_t *hold, int reason); + int return_lock(take_lock_t *hold); + void new_reason(int nreason) { prev_reason = reason; reason = nreason; }; + void restore_reason() { reason = prev_reason; prev_reason = 0; }; + + int writelock(int reason, bool can_take=false); + int writetrylock(); + int writeunlock(); + void write_release(); + + int readunlock(); + int readlock(); + int readtrylock(); + void read_release(); + +}; + + +#define DEVLOCK_VALID 0xfadbec + +#define DEVLOCK_INIIALIZER \ + {PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, \ + PTHREAD_COND_INITIALIZER, DEVOCK_VALID, 0, 0, 0, 0} + +#endif /* __DEVLOCK_H */ diff --git a/src/lib/dlist.c b/src/lib/dlist.c new file mode 100644 index 00000000..5e821ba0 --- /dev/null +++ b/src/lib/dlist.c @@ -0,0 +1,546 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula doubly linked list routines. + * + * dlist is a doubly linked list with the links being in the + * list data item. + * + * Kern Sibbald, July MMIII + * + */ + +#include "bacula.h" + +/* =================================================================== + * dlist + */ + +/* + * Append an item to the list + */ +void dlist::append(void *item) +{ + set_next(item, NULL); + set_prev(item, tail); + if (tail) { + set_next(tail, item); + } + tail = item; + if (head == NULL) { /* if empty list, */ + head = item; /* item is head as well */ + } + num_items++; +} + +/* + * Append an item to the list + */ +void dlist::prepend(void *item) +{ + set_next(item, head); + set_prev(item, NULL); + if (head) { + set_prev(head, item); + } + head = item; + if (tail == NULL) { /* if empty list, */ + tail = item; /* item is tail too */ + } + num_items++; +} + +void dlist::insert_before(void *item, void *where) +{ + dlink *where_link = get_link(where); + + set_next(item, where); + set_prev(item, where_link->prev); + + if (where_link->prev) { + set_next(where_link->prev, item); + } + where_link->prev = item; + if (head == where) { + head = item; + } + num_items++; +} + +void dlist::insert_after(void *item, void *where) +{ + dlink *where_link = get_link(where); + + set_next(item, where_link->next); + set_prev(item, where); + + if (where_link->next) { + set_prev(where_link->next, item); + } + where_link->next = item; + if (tail == where) { + tail = item; + } + num_items++; +} + +/* + * Insert an item in the list, but only if it is unique + * otherwise, the item is returned non inserted + * + * Returns: item if item inserted + * other_item if same value already exists (item not inserted) + */ +void *dlist::binary_insert(void *item, int compare(void *item1, void *item2)) +{ + int comp; + int low, high, cur; + void *cur_item; + + if (num_items == 0) { + //Dmsg0(000, "Append first.\n"); + append(item); + return item; + } + if (num_items == 1) { + comp = compare(item, first()); + if (comp < 0) { + prepend(item); + //Dmsg0(000, "Insert before first.\n"); + return item; + } else if (comp > 0) { + insert_after(item, first()); + //Dmsg0(000, "Insert after first.\n"); + return item; + } else { + //Dmsg0(000, "Same as first.\n"); + return first(); + } + } + /* Check against last item */ + comp = compare(item, last()); + if (comp > 0) { + append(item); + //Dmsg0(000, "Appended item.\n"); + return item; + } else if (comp == 0) { + //Dmsg0(000, "Same as last.\n"); + return last(); + } + /* Check against first item */ + comp = compare(item, first()); + if (comp < 0) { + prepend(item); + //Dmsg0(000, "Inserted item before.\n"); + return item; + } else if (comp == 0) { + //Dmsg0(000, "Same as first.\n"); + return first(); + } + if (num_items == 2) { + insert_after(item, first()); + //Dmsg0(000, "Inserted item after.\n"); + return item; + } + low = 1; + high = num_items; + cur = 1; + cur_item = first(); + while (low < high) { + int nxt; + nxt = (low + high) / 2; + while (nxt > cur) { + cur_item = next(cur_item); + cur++; + } + while (nxt < cur) { + cur_item = prev(cur_item); + cur--; + } + //Dmsg1(000, "Compare item to %d\n", cur); + comp = compare(item, cur_item); + //Dmsg2(000, "Compare item to %d = %d\n", cur, comp); + if (comp < 0) { + high = cur; + //Dmsg2(000, "set high; low=%d high=%d\n", low, high); + } else if (comp > 0) { + low = cur + 1; + //Dmsg2(000, "set low; low=%d high=%d\n", low, high); + } else { + //Dmsg1(000, "Same as item %d\n", cur); + return cur_item; + } + } + if (high == cur) { + insert_before(item, cur_item); + //Dmsg1(000, "Insert before item %d\n", cur); + } else { + insert_after(item, cur_item); + //Dmsg1(000, "Insert after item %d\n", cur); + } + return item; +} + +/* + * Insert an item in the list, regardless if it is unique + * or not. + */ +void dlist::binary_insert_multiple(void *item, int compare(void *item1, void *item2)) +{ + void *ins_item = binary_insert(item, compare); + /* If identical, insert after the one found */ + if (ins_item != item) { + insert_after(item, ins_item); + } +} + +/* + * Search for item + */ +void *dlist::binary_search(void *item, int compare(void *item1, void *item2)) +{ + int comp; + int low, high, cur; + void *cur_item; + + + if (num_items == 0) { + return NULL; + } + cur_item = first(); + if (num_items == 1) { + comp = compare(item, cur_item); + if (comp == 0) { + return cur_item; + } else { + return NULL; + } + } + low = 1; + high = num_items; + cur = 1; + cur_item = first(); + while (low < high) { + int nxt; + nxt = (low + high) / 2; + /* Now get cur pointing to nxt */ + while (nxt > cur) { + cur_item = next(cur_item); + cur++; + } + while (nxt < cur) { + cur_item = prev(cur_item); + cur--; + } + comp = compare(item, cur_item); + //Dmsg2(000, "Compare item to %d = %d\n", cur, comp); + if (comp < 0) { + high = cur; + //Dmsg2(000, "set high; low=%d high=%d\n", low, high); + } else if (comp > 0) { + low = cur + 1; + //Dmsg2(000, "set low; low=%d high=%d\n", low, high); + } else { + return cur_item; + } + } + /* + * low == high can only happen if low just + * got incremented from cur, and we have + * not yet tested cur+1 + */ + if (low == high) { + cur_item = next(cur_item); + comp = compare(item, cur_item); + if (comp == 0) { + return cur_item; + } + } + return NULL; +} + + +void dlist::remove(void *item) +{ + void *xitem; + dlink *ilink = get_link(item); /* item's link */ + if (item == head) { + head = ilink->next; + if (head) { + set_prev(head, NULL); + } + if (item == tail) { + tail = ilink->prev; + } + } else if (item == tail) { + tail = ilink->prev; + if (tail) { + set_next(tail, NULL); + } + } else { + xitem = ilink->next; + set_prev(xitem, ilink->prev); + xitem = ilink->prev; + set_next(xitem, ilink->next); + } + num_items--; + if (num_items == 0) { + head = tail = NULL; + } + ilink->prev = ilink->next = NULL; +} + +void *dlist::next(void *item) +{ + if (item == NULL) { + return head; + } + return get_next(item); +} + +void *dlist::prev(void *item) +{ + if (item == NULL) { + return tail; + } + return get_prev(item); +} + + +/* Destroy the list contents */ +void dlist::destroy() +{ + for (void *n=head; n; ) { + void *ni = get_next(n); + free(n); + n = ni; + } + num_items = 0; + head = tail = NULL; +} + +/* String helpers for dlist usage */ + +dlistString *new_dlistString(const char *str) +{ + return new_dlistString(str, strlen(str)); +} + +dlistString *new_dlistString(const char *str, int len) +{ + dlistString *node; + node = (dlistString *)malloc(sizeof(dlink) + len +1); + bstrncpy(node->c_str(), str, len + 1); + return node; +} + +#ifdef TEST_PROGRAM + +struct MYJCR { + char *buf; + dlink link; +}; + +static int my_compare(void *item1, void *item2) +{ + MYJCR *jcr1, *jcr2; + int comp; + jcr1 = (MYJCR *)item1; + jcr2 = (MYJCR *)item2; + comp = strcmp(jcr1->buf, jcr2->buf); + //Dmsg3(000, "compare=%d: %s to %s\n", comp, jcr1->buf, jcr2->buf); + return comp; +} + +int main() +{ + char buf[30]; + dlist *jcr_chain; + MYJCR *jcr = NULL; + MYJCR *jcr1; + MYJCR *save_jcr = NULL; + MYJCR *next_jcr; + int count; + + jcr_chain = (dlist *)malloc(sizeof(dlist)); + jcr_chain->init(jcr, &jcr->link); + + printf("Prepend 20 items 0-19\n"); + for (int i=0; i<20; i++) { + sprintf(buf, "This is dlist item %d", i); + jcr = (MYJCR *)malloc(sizeof(MYJCR)); + jcr->buf = bstrdup(buf); + jcr_chain->prepend(jcr); + if (i == 10) { + save_jcr = jcr; + } + } + + next_jcr = (MYJCR *)jcr_chain->next(save_jcr); + printf("11th item=%s\n", next_jcr->buf); + jcr1 = (MYJCR *)malloc(sizeof(MYJCR)); + jcr1->buf = save_jcr->buf; + printf("Remove 10th item\n"); + jcr_chain->remove(save_jcr); + free(save_jcr); + printf("Re-insert 10th item\n"); + jcr_chain->insert_before(jcr1, next_jcr); + + printf("Print remaining list.\n"); + foreach_dlist(jcr, jcr_chain) { + printf("Dlist item = %s\n", jcr->buf); + free(jcr->buf); + } + jcr_chain->destroy(); + free(jcr_chain); + + /* The following may seem a bit odd, but we create a chaing + * of jcr objects. Within a jcr object, there is a buf + * that points to a malloced string containing data + */ + jcr_chain = New(dlist(jcr, &jcr->link)); + printf("append 20 items 0-19\n"); + for (int i=0; i<20; i++) { + sprintf(buf, "This is dlist item %d", i); + jcr = (MYJCR *)malloc(sizeof(MYJCR)); + jcr->buf = bstrdup(buf); + jcr_chain->append(jcr); + if (i == 10) { + save_jcr = jcr; + } + } + + next_jcr = (MYJCR *)jcr_chain->next(save_jcr); + printf("11th item=%s\n", next_jcr->buf); + jcr = (MYJCR *)malloc(sizeof(MYJCR)); + jcr->buf = save_jcr->buf; + printf("Remove 10th item\n"); + jcr_chain->remove(save_jcr); + free(save_jcr); + printf("Re-insert 10th item\n"); + jcr_chain->insert_before(jcr, next_jcr); + + printf("Print remaining list.\n"); + foreach_dlist (jcr, jcr_chain) { + printf("Dlist item = %s\n", jcr->buf); + free(jcr->buf); + } + + delete jcr_chain; + + + /* Now do a binary insert for the list */ + jcr_chain = New(dlist(jcr, &jcr->link)); +#define CNT 26 + printf("append %d items\n", CNT*CNT*CNT); + strcpy(buf, "ZZZ"); + count = 0; + for (int i=0; ibuf = bstrdup(buf); + jcr1 = (MYJCR *)jcr_chain->binary_insert(jcr, my_compare); + if (jcr != jcr1) { + Dmsg2(000, "Insert of %s vs %s failed.\n", jcr->buf, jcr1->buf); + } + buf[1]--; + } + buf[1] = 'Z'; + buf[2]--; + } + buf[2] = 'Z'; + buf[0]--; + } + + jcr = (MYJCR *)malloc(sizeof(MYJCR)); + strcpy(buf, "a"); + jcr->buf = bstrdup(buf); + if (jcr_chain->binary_search(jcr, my_compare)) { + printf("One less failed!!!!\n"); + } else { + printf("One less: OK\n"); + } + free(jcr->buf); + strcpy(buf, "ZZZZZZZZZZZZZZZZ"); + jcr->buf = bstrdup(buf); + if (jcr_chain->binary_search(jcr, my_compare)) { + printf("One greater failed!!!!\n"); + } else { + printf("One greater: OK\n"); + } + free(jcr->buf); + free(jcr); + + + printf("Find each of %d items in list.\n", count); + foreach_dlist (jcr, jcr_chain) { + if (!jcr_chain->binary_search(jcr, my_compare)) { + printf("Dlist binary_search item not found = %s\n", jcr->buf); + } + } + printf("Free each of %d items in list.\n", count); + foreach_dlist (jcr, jcr_chain) { + free(jcr->buf); + jcr->buf = NULL; + } + delete jcr_chain; + + /* Finally, do a test using the dlistString string helper, which + * allocates a dlist node and stores the string directly in + * it. + */ + dlist chain; + chain.append(new_dlistString("This is a long test line")); +#define CNT 26 + printf("append %d dlistString items\n", CNT*CNT*CNT); + strcpy(buf, "ZZZ"); + count = 0; + for (int i=0; ic_str()); + } + printf("destroy dlistString chain\n"); + chain.destroy(); + + sm_dump(false); /* unit test */ + +} +#endif diff --git a/src/lib/dlist.h b/src/lib/dlist.h new file mode 100644 index 00000000..414999cb --- /dev/null +++ b/src/lib/dlist.h @@ -0,0 +1,207 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by Kern Sibbald MMIV + * + */ + + +/* ======================================================================== + * + * Doubly linked list -- dlist + * + * See the end of the file for the dlistString class which + * facilitates storing strings in a dlist. + * + * Kern Sibbald, MMIV and MMVII + * + */ + +#define M_ABORT 1 + +/* In case you want to specifically specify the offset to the link */ +#define OFFSET(item, link) (int)((char *)(link) - (char *)(item)) +/* + * There is a lot of extra casting here to work around the fact + * that some compilers (Sun and Visual C++) do not accept + * (void *) as an lvalue on the left side of an equal. + * + * Loop var through each member of list + */ +#ifdef HAVE_TYPEOF +#define foreach_dlist(var, list) \ + for((var)=NULL; ((var)=(typeof(var))(list)->next(var)); ) +#else +#define foreach_dlist(var, list) \ + for((var)=NULL; (*((void **)&(var))=(void*)((list)->next(var))); ) +#endif + +struct dlink { + void *next; + void *prev; +}; + +class dlist : public SMARTALLOC { + void *head; + void *tail; + int16_t loffset; + uint32_t num_items; +public: + dlist(void *item, dlink *link); + dlist(void); + ~dlist() { destroy(); } + void init(void *item, dlink *link); + void init(); + void prepend(void *item); + void append(void *item); + void set_prev(void *item, void *prev); + void set_next(void *item, void *next); + void *get_prev(void *item); + void *get_next(void *item); + dlink *get_link(void *item); + void insert_before(void *item, void *where); + void insert_after(void *item, void *where); + void *binary_insert(void *item, int compare(void *item1, void *item2)); + void *binary_search(void *item, int compare(void *item1, void *item2)); + void binary_insert_multiple(void *item, int compare(void *item1, void *item2)); + void remove(void *item); + bool empty() const; + int size() const; + void *next(void *item); + void *prev(void *item); + void destroy(); + void *first() const; + void *last() const; +}; + + +/* + * This allows us to do explicit initialization, + * allowing us to mix C++ classes inside malloc'ed + * C structures. Define before called in constructor. + */ +inline void dlist::init(void *item, dlink *link) +{ + head = tail = NULL; + loffset = (int)((char *)link - (char *)item); + if (loffset < 0 || loffset > 5000) { + Emsg0(M_ABORT, 0, "Improper dlist initialization.\n"); + } + num_items = 0; +} + +inline void dlist::init() +{ + head = tail = NULL; + loffset = 0; + num_items = 0; +} + + +/* + * Constructor called with the address of a + * member of the list (not the list head), and + * the address of the link within that member. + * If the link is at the beginning of the list member, + * then there is no need to specify the link address + * since the offset is zero. + */ +inline dlist::dlist(void *item, dlink *link) +{ + init(item, link); +} + +/* Constructor with link at head of item */ +inline dlist::dlist(void) : head(0), tail(0), loffset(0), num_items(0) +{ +} + +inline void dlist::set_prev(void *item, void *prev) +{ + ((dlink *)(((char *)item)+loffset))->prev = prev; +} + +inline void dlist::set_next(void *item, void *next) +{ + ((dlink *)(((char *)item)+loffset))->next = next; +} + +inline void *dlist::get_prev(void *item) +{ + return ((dlink *)(((char *)item)+loffset))->prev; +} + +inline void *dlist::get_next(void *item) +{ + return ((dlink *)(((char *)item)+loffset))->next; +} + + +inline dlink *dlist::get_link(void *item) +{ + return (dlink *)(((char *)item)+loffset); +} + + + +inline bool dlist::empty() const +{ + return head == NULL; +} + +inline int dlist::size() const +{ + return num_items; +} + + + +inline void * dlist::first() const +{ + return head; +} + +inline void * dlist::last() const +{ + return tail; +} + +/* + * C string helper routines for dlist + * The string (char *) is kept in the node + * + * Kern Sibbald, February 2007 + * + */ +class dlistString +{ +public: + char *c_str() { return m_str; }; + dlink get_link() { return m_link; }; /* eliminate clang compiler warning */ + +private: + dlink m_link; + char m_str[1]; + /* !!! Don't put anything after this as this space is used + * to hold the string in inline + */ +}; + +extern dlistString *new_dlistString(const char *str, int len); +extern dlistString *new_dlistString(const char *str); diff --git a/src/lib/edit.c b/src/lib/edit.c new file mode 100644 index 00000000..fbfc7f12 --- /dev/null +++ b/src/lib/edit.c @@ -0,0 +1,578 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * edit.c edit string to ascii, and ascii to internal + * + * Kern Sibbald, December MMII + * + */ + +#include "bacula.h" +#include + +/* We assume ASCII input and don't worry about overflow */ +uint64_t str_to_uint64(char *str) +{ + char *p = str; + uint64_t value = 0; + + if (!p) { + return 0; + } + while (B_ISSPACE(*p)) { + p++; + } + if (*p == '+') { + p++; + } + if (*p == '0' && *(p+1) == 'x') { + p = p + 2; /* skip 0x */ + + while (B_ISXDIGIT(*p)) { + if (B_ISDIGIT(*p)) { + value = (value<<4) + (*p - '0'); + + } else { + value = (value<<4) + (tolower(*p) - 'a' + 10); + } + p++; + } + } else { + while (B_ISDIGIT(*p)) { + value = B_TIMES10(value) + *p - '0'; + p++; + } + } + return value; +} + +int64_t str_to_int64(char *str) +{ + char *p = str; + int64_t value; + bool negative = false; + + if (!p) { + return 0; + } + while (B_ISSPACE(*p)) { + p++; + } + if (*p == '+') { + p++; + } else if (*p == '-') { + negative = true; + p++; + } + value = str_to_uint64(p); + if (negative) { + value = -value; + } + return value; +} + + +/* + * Edit an integer number with commas, the supplied buffer + * must be at least 27 bytes long. The incoming number + * is always widened to 64 bits. + */ +char *edit_uint64_with_commas(uint64_t val, char *buf) +{ + edit_uint64(val, buf); + return add_commas(buf, buf); +} + +/* + * Edit an integer into "human-readable" format with four or fewer + * significant digits followed by a suffix that indicates the scale + * factor. The buf array inherits a 27 byte minimim length + * requirement from edit_unit64_with_commas(), although the output + * string is limited to eight characters. + */ +char *edit_uint64_with_suffix(uint64_t val, char *buf) +{ + int commas = 0; + char *c, mbuf[50]; + const char *suffix[] = + { "", "K", "M", "G", "T", "P", "E", "Z", "Y", "FIX ME" }; + int suffixes = sizeof(suffix) / sizeof(*suffix); + + edit_uint64_with_commas(val, mbuf); + + if ((c = strchr(mbuf, ',')) != NULL) { + commas++; + *c++ = '.'; + while ((c = strchr(c, ',')) != NULL) { + commas++; + *c++ = '\0'; + } + mbuf[5] = '\0'; // drop this to get '123.456 TB' rather than '123.4 TB' + } + + if (commas >= suffixes) + commas = suffixes - 1; + bsnprintf(buf, 27, "%s %s", mbuf, suffix[commas]); + return buf; +} + +/* + * Edit an integer number, the supplied buffer + * must be at least 27 bytes long. The incoming number + * is always widened to 64 bits. + */ +char *edit_uint64(uint64_t val, char *buf) +{ + /* + * Replacement for sprintf(buf, "%" llu, val) + */ + char mbuf[50]; + mbuf[sizeof(mbuf)-1] = 0; + int i = sizeof(mbuf)-2; /* edit backward */ + if (val == 0) { + mbuf[i--] = '0'; + } else { + while (val != 0) { + mbuf[i--] = "0123456789"[val%10]; + val /= 10; + } + } + bstrncpy(buf, &mbuf[i+1], 27); + return buf; +} + +char *edit_int64(int64_t val, char *buf) +{ + /* + * Replacement for sprintf(buf, "%" llu, val) + */ + char mbuf[50]; + bool negative = false; + mbuf[sizeof(mbuf)-1] = 0; + int i = sizeof(mbuf)-2; /* edit backward */ + if (val == 0) { + mbuf[i--] = '0'; + } else { + if (val < 0) { + negative = true; + val = -val; + } + while (val != 0) { + mbuf[i--] = "0123456789"[val%10]; + val /= 10; + } + } + if (negative) { + mbuf[i--] = '-'; + } + bstrncpy(buf, &mbuf[i+1], 27); + return buf; +} + +/* + * Edit an integer number with commas, the supplied buffer + * must be at least 27 bytes long. The incoming number + * is always widened to 64 bits. + */ +char *edit_int64_with_commas(int64_t val, char *buf) +{ + edit_int64(val, buf); + return add_commas(buf, buf); +} + +/* + * Given a string "str", separate the numeric part into + * str, and the modifier into mod. + */ +static bool get_modifier(char *str, char *num, int num_len, char *mod, int mod_len) +{ + int i, len, num_begin, num_end, mod_begin, mod_end; + + strip_trailing_junk(str); + len = strlen(str); + + for (i=0; i (num_end - num_begin + 1)) { + num_len = num_end - num_begin + 1; + } + if (num_len == 0) { + return false; + } + /* Eat any spaces in front of modifier */ + for ( ; i (mod_end - mod_begin + 1)) { + mod_len = mod_end - mod_begin + 1; + } + Dmsg5(900, "str=%s: num_beg=%d num_end=%d mod_beg=%d mod_end=%d\n", + str, num_begin, num_end, mod_begin, mod_end); + bstrncpy(num, &str[num_begin], num_len); + bstrncpy(mod, &str[mod_begin], mod_len); + if (!is_a_number(num)) { + return false; + } + bstrncpy(str, &str[mod_end], len); + Dmsg2(900, "num=%s mod=%s\n", num, mod); + + return true; +} + +/* + * Convert a string duration to utime_t (64 bit seconds) + * Returns false: if error + true: if OK, and value stored in value + */ +bool duration_to_utime(char *str, utime_t *value) +{ + int i, mod_len; + double val, total = 0.0; + char mod_str[20]; + char num_str[50]; + /* + * The "n" = mins and months appears before minutes so that m maps + * to months. These "kludges" make it compatible with pre 1.31 + * Baculas. + */ + static const char *mod[] = {"n", "seconds", "months", "minutes", "mins", + "hours", "days", "weeks", "quarters", "years", NULL}; + static const int32_t mult[] = {60, 1, 60*60*24*30, 60, 60, + 3600, 3600*24, 3600*24*7, 3600*24*91, 3600*24*365}; + + while (*str) { + if (!get_modifier(str, num_str, sizeof(num_str), mod_str, sizeof(mod_str))) { + return false; + } + /* Now find the multiplier corresponding to the modifier */ + mod_len = strlen(mod_str); + if (mod_len == 0) { + i = 1; /* default to seconds */ + } else { + for (i=0; mod[i]; i++) { + if (strncasecmp(mod_str, mod[i], mod_len) == 0) { + break; + } + } + if (mod[i] == NULL) { + return false; + } + } + Dmsg2(900, "str=%s: mult=%d\n", num_str, mult[i]); + errno = 0; + val = strtod(num_str, NULL); + if (errno != 0 || val < 0) { + return false; + } + total += val * mult[i]; + } + *value = (utime_t)total; + return true; +} + +/* + * Edit a utime "duration" into ASCII + */ +char *edit_utime(utime_t val, char *buf, int buf_len) +{ + char mybuf[200]; + static const int32_t mult[] = {60*60*24*365, 60*60*24*30, 60*60*24, 60*60, 60}; + static const char *mod[] = {"year", "month", "day", "hour", "min"}; + int i; + uint32_t times; + + *buf = 0; + for (i=0; i<5; i++) { + times = (uint32_t)(val / mult[i]); + if (times > 0) { + val = val - (utime_t)times * mult[i]; + bsnprintf(mybuf, sizeof(mybuf), "%d %s%s ", times, mod[i], times>1?"s":""); + bstrncat(buf, mybuf, buf_len); + } + } + if (val == 0 && strlen(buf) == 0) { + bstrncat(buf, "0 secs", buf_len); + } else if (val != 0) { + bsnprintf(mybuf, sizeof(mybuf), "%d sec%s", (uint32_t)val, val>1?"s":""); + bstrncat(buf, mybuf, buf_len); + } + return buf; +} + +static bool strunit_to_uint64(char *str, int str_len, uint64_t *value, + const char **mod) +{ + int i, mod_len; + double val; + char mod_str[20]; + char num_str[50]; + const int64_t mult[] = {1, /* byte */ + 1024, /* kilobyte */ + 1000, /* kb kilobyte */ + 1048576, /* megabyte */ + 1000000, /* mb megabyte */ + 1073741824, /* gigabyte */ + 1000000000, /* gb gigabyte */ + 1099511627776LL, /* terabyte */ + 1000000000000LL}; /* tb terabyte */ + + if (!get_modifier(str, num_str, sizeof(num_str), mod_str, sizeof(mod_str))) { + return 0; + } + /* Now find the multiplier corresponding to the modifier */ + mod_len = strlen(mod_str); + if (mod_len == 0) { + i = 0; /* default with no modifier = 1 */ + } else { + for (i=0; mod[i]; i++) { + if (strncasecmp(mod_str, mod[i], mod_len) == 0) { + break; + } + } + if (mod[i] == NULL) { + return false; + } + } + Dmsg2(900, "str=%s: mult=%d\n", str, mult[i]); + errno = 0; + val = strtod(num_str, NULL); + if (errno != 0 || val < 0) { + return false; + } + *value = (utime_t)(val * mult[i]); + return true; +} + +/* + * Convert a size in bytes to uint64_t + * Returns false: if error + true: if OK, and value stored in value + */ +bool size_to_uint64(char *str, int str_len, uint64_t *value) +{ + /* first item * not used */ + static const char *mod[] = {"*", "k", "kb", "m", "mb", + "g", "gb", "t", "tb", NULL}; + return strunit_to_uint64(str, str_len, value, mod); +} + +/* + * Convert a speed in bytes/s to uint64_t + * Returns false: if error + true: if OK, and value stored in value + */ +bool speed_to_uint64(char *str, int str_len, uint64_t *value) +{ + /* first item * not used */ + static const char *mod[] = {"*", "k/s", "kb/s", "m/s", "mb/s", NULL}; + return strunit_to_uint64(str, str_len, value, mod); +} + +/* + * Check if specified string is a number or not. + * Taken from SQLite, cool, thanks. + */ +bool is_a_number(const char *n) +{ + bool digit_seen = false; + + if (n == NULL) { + return false; + } + + if( *n == '-' || *n == '+' ) { + n++; + } + while (B_ISDIGIT(*n)) { + digit_seen = true; + n++; + } + if (digit_seen && *n == '.') { + n++; + while (B_ISDIGIT(*n)) { n++; } + } + if (digit_seen && (*n == 'e' || *n == 'E') + && (B_ISDIGIT(n[1]) || ((n[1]=='-' || n[1] == '+') && B_ISDIGIT(n[2])))) { + n += 2; /* skip e- or e+ or e digit */ + while (B_ISDIGIT(*n)) { n++; } + } + return digit_seen && *n==0; +} + +/* + * Check if specified string is a list of numbers or not + */ +bool is_a_number_list(const char *n) +{ + bool previous_digit = false; + bool digit_seen = false; + if (n == NULL) { + return false; + } + while (*n) { + if (B_ISDIGIT(*n)) { + previous_digit=true; + digit_seen = true; + } else if (*n == ',' && previous_digit) { + previous_digit = false; + } else { + return false; + } + n++; + } + return digit_seen && *n==0; +} + +/* + * Check if the specified string is an integer + */ +bool is_an_integer(const char *n) +{ + bool digit_seen = false; + if (n == NULL) { + return false; + } + while (B_ISDIGIT(*n)) { + digit_seen = true; + n++; + } + return digit_seen && *n==0; +} + +/* + * Check if Bacula Resoure Name is valid + */ +/* + * Check if the Volume name has legal characters + * If ua is non-NULL send the message + */ +bool is_name_valid(const char *name, POOLMEM **msg) +{ + int len; + const char *p; + /* Special characters to accept */ + const char *accept = ":.-_ "; + + /* No name is invalid */ + if (!name) { + if (msg) { + Mmsg(msg, _("Empty name not allowed.\n")); + } + return false; + } + /* Restrict the characters permitted in the Volume name */ + for (p=name; *p; p++) { + if (B_ISALPHA(*p) || B_ISDIGIT(*p) || strchr(accept, (int)(*p))) { + continue; + } + if (msg) { + Mmsg(msg, _("Illegal character \"%c\" in name.\n"), *p); + } + return false; + } + len = p - name; + if (len >= MAX_NAME_LENGTH) { + if (msg) { + Mmsg(msg, _("Name too long.\n")); + } + return false; + } + if (len == 0) { + if (msg) { + Mmsg(msg, _("Volume name must be at least one character long.\n")); + } + return false; + } + return true; +} + + + +/* + * Add commas to a string, which is presumably + * a number. + */ +char *add_commas(char *val, char *buf) +{ + int len, nc; + char *p, *q; + int i; + + if (val != buf) { + strcpy(buf, val); + } + len = strlen(buf); + if (len < 1) { + len = 1; + } + nc = (len - 1) / 3; + p = buf+len; + q = p + nc; + *q-- = *p--; + for ( ; nc; nc--) { + for (i=0; i < 3; i++) { + *q-- = *p--; + } + *q-- = ','; + } + return buf; +} + +#ifdef TEST_PROGRAM +void d_msg(const char*, int, int, const char*, ...) +{} +int main(int argc, char *argv[]) +{ + char *str[] = {"3", "3n", "3 hours", "3.5 day", "3 week", "3 m", "3 q", "3 years"}; + utime_t val; + char buf[100]; + char outval[100]; + + for (int i=0; i<8; i++) { + strcpy(buf, str[i]); + if (!duration_to_utime(buf, &val)) { + printf("Error return from duration_to_utime for in=%s\n", str[i]); + continue; + } + edit_utime(val, outval); + printf("in=%s val=%" lld " outval=%s\n", str[i], val, outval); + } +} +#endif diff --git a/src/lib/flist.c b/src/lib/flist.c new file mode 100644 index 00000000..935e7f19 --- /dev/null +++ b/src/lib/flist.c @@ -0,0 +1,191 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula fifo list routines + * + * flist is a simple malloc'ed array of pointers. Derived from alist. + * + * Kern Sibbald, August 2014 + * + */ + +#include "bacula.h" + +void *flist::dequeue() +{ + void *item; + + if (num_items == 0) { + return NULL; + } + num_items--; + item = items[get_item]; + items[get_item++] = NULL; + if (get_item >= max_items) { + get_item = 0; + } + return item; +} + + +/* + * Queue an item to the list + */ +bool flist::queue(void *item) +{ + if (num_items == max_items) { + return false; + } + num_items++; + items[add_item++] = item; + if (add_item >= max_items) { + add_item = 0; + } + return true; +} + +/* Destroy the list and its contents */ +void flist::destroy() +{ + if (num_items && own_items) { + for (int i=0; imylist.init(); + ok(fileset && fileset->mylist.empty(), "Default initialization"); + + Pmsg0(0, "Manual allocation/destruction of flist:\n"); + fileset->mylist.queue((void*)"first"); + fileset->mylist.queue((void*)"second"); + fileset->mylist.queue((void*)"third"); + q = (char*)fileset->mylist.dequeue(); + ok(strcmp("first", q) == 0, "Checking first dequeue"); + q = (char*)fileset->mylist.dequeue(); + ok(strcmp("second", q) == 0, "Checking second dequeue"); + q = (char*)fileset->mylist.dequeue(); + ok(strcmp("third", q) == 0, "Checking third dequeue"); + ok(fileset->mylist.empty(), "Checking if empty"); + + Pmsg0(0, "automatic allocation/destruction of flist:\n"); + + dn = 0; + check_dq = true; + check_qa = true; + for (i = dn; i < NUMITEMS; i++) { + sprintf(buf, "This is item %d", i); + p = bstrdup(buf); + if (!fileset->mylist.queue(p)) { + q = (char *)fileset->mylist.dequeue(); + sprintf(buftmp, "This is item %d", dn++); + if (strcmp(q, buftmp) != 0){ + check_dq = false; + } + free(q); + if (!fileset->mylist.queue(p)) { + check_qa = false; + } + } + } + ok(check_dq, "Checking first out dequeue"); + ok(check_qa, "Checking queue again after dequeue"); + /* dequeue rest of the list */ + check_dq = true; + while ((q=(char *)fileset->mylist.dequeue())) { + sprintf(buftmp, "This is item %d", dn++); + if (strcmp(q, buftmp) != 0){ + check_dq = false; + } + free(q); + } + ok(check_dq, "Checking dequeue rest of the list"); + ok(fileset->mylist.empty(), "Checking if list is empty"); + ok(fileset->mylist.dequeue() == NULL, "Checking empty dequeue"); + fileset->mylist.destroy(); + ok(fileset->mylist.size() == 0, "Check size after destroy"); + free(fileset); + + Pmsg0(0, "Allocation/destruction using new delete\n"); + + mlist = New(flist(10)); + ok(mlist && mlist->empty(), "Constructor initialization"); + + for (i = 0; i < NUMITEMS; i++) { + sprintf(buf, "This is item %d", i); + p = bstrdup(buf); + if (!mlist->queue(p)) { + free(p); + break; + } + } + ok(mlist->size() == 10, "Checking list size"); + dn = 0; + check_dq = true; + for (i=1; !mlist->empty(); i++) { + p = (char *)mlist->dequeue(); + sprintf(buf, "This is item %d", dn++); + if (strcmp(p, buf) != 0){ + check_dq = false; + } + free(p); + } + ok(check_dq, "Checking dequeue list"); + ok(mlist->empty(), "Checking list empty"); + + delete mlist; + + return report(); +} +#endif /* TEST_PROGRAM */ diff --git a/src/lib/flist.h b/src/lib/flist.h new file mode 100644 index 00000000..cb0ddf69 --- /dev/null +++ b/src/lib/flist.h @@ -0,0 +1,95 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2014. + */ + +/* Second arg of init */ +enum { + owned_by_flist = true, + not_owned_by_flist = false +}; + +/* + * Fifo list -- derived from alist + */ +class flist : public SMARTALLOC { + void **items; + int num_items; + int max_items; + int add_item; + int get_item; + bool own_items; +public: + flist(int num = 10, bool own=false); + ~flist(); + void init(int num = 10, bool own=false); + bool queue(void *item); + void *dequeue(); + bool empty() const; + bool full() const; + int size() const; + void destroy(); +}; + +inline bool flist::empty() const +{ + return num_items == 0; +} + +inline bool flist::full() const +{ + return num_items == max_items; +} + +/* + * This allows us to do explicit initialization, + * allowing us to mix C++ classes inside malloc'ed + * C structures. Define before called in constructor. + */ +inline void flist::init(int num, bool own) +{ + items = NULL; + num_items = 0; + max_items = num; + own_items = own; + add_item = 0; + get_item = 0; + items = (void **)malloc(max_items * sizeof(void *)); +} + +/* Constructor */ +inline flist::flist(int num, bool own) +{ + init(num, own); +} + +/* Destructor */ +inline flist::~flist() +{ + destroy(); +} + + + +/* Current size of list */ +inline int flist::size() const +{ + return num_items; +} diff --git a/src/lib/fnmatch.c b/src/lib/fnmatch.c new file mode 100644 index 00000000..ae8d9f4d --- /dev/null +++ b/src/lib/fnmatch.c @@ -0,0 +1,343 @@ +/* + * Copyright (c) 1989, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Guido van Rossum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* OpenBSD: fnmatch.c,v 1.6 1998/03/19 00:29:59 millert */ + +/* + * Function fnmatch() as specified in POSIX 1003.2-1992, section B.6. + * Compares a filename or pathname to a pattern. + */ + +/* Define SYS to use the system fnmatch() rather than ours */ +/* #define SYS 1 */ + +#include "bacula.h" +#ifdef SYS +#include +#else +#include "fnmatch.h" +#endif + +#undef EOS +#define EOS '\0' + +#define RANGE_MATCH 1 +#define RANGE_NOMATCH 0 +#define RANGE_ERROR (-1) + +/* Limit of recursion during matching attempts. */ +#define FNM_MAX_RECUR 64 + +#define ISSET(x, y) ((x) & (y)) +#define FOLD(c) ((flags & FNM_CASEFOLD) && B_ISUPPER(c) ? tolower(c) : (c)) + +static int rangematch(const char *, char, int, char **); +static int r_fnmatch(const char *, const char *, int, int); + +#ifdef SYS +int xfnmatch(const char *pattern, const char *string, int flags) +#else +int fnmatch(const char *pattern, const char *string, int flags) +#endif +{ + int e; + + e = r_fnmatch(pattern, string, flags, FNM_MAX_RECUR); + if (e == -1) { /* Too much recursion */ + e = FNM_NOMATCH; + } + return (e); +} + +static +int r_fnmatch(const char *pattern, const char *string, int flags, int recur) +{ + const char *stringstart; + char *newp; + char c, test; + int e; + + if (recur-- <= 0) { + return (-1); + } + + stringstart = string; + for ( ;; ) { + switch (c = *pattern++) { + case EOS: + if (ISSET(flags, FNM_LEADING_DIR) && IsPathSeparator(*string)) + return (0); + return (*string == EOS ? 0 : FNM_NOMATCH); + case '?': + if (*string == EOS) + return (FNM_NOMATCH); + if (IsPathSeparator(*string) && ISSET(flags, FNM_PATHNAME)) + return (FNM_NOMATCH); + if (*string == '.' && ISSET(flags, FNM_PERIOD) && + (string == stringstart || + (ISSET(flags, FNM_PATHNAME) && IsPathSeparator(*(string - 1))))) + return (FNM_NOMATCH); + ++string; + break; + case '*': + c = *pattern; + /* Collapse multiple stars. */ + while (c == '*') { + c = *++pattern; + } + + if (*string == '.' && ISSET(flags, FNM_PERIOD) && + (string == stringstart || + (ISSET(flags, FNM_PATHNAME) && IsPathSeparator(*(string - 1))))) { + return (FNM_NOMATCH); + } + + /* Optimize for pattern with * at end or before /. */ + if (c == EOS) { + if (ISSET(flags, FNM_PATHNAME)) { + return (ISSET(flags, FNM_LEADING_DIR) || + strchr(string, '/') == NULL ? 0 : FNM_NOMATCH); + } else { + return (0); + } + } else if (IsPathSeparator(c) && ISSET(flags, FNM_PATHNAME)) { + if ((string = strchr(string, '/')) == NULL) + return (FNM_NOMATCH); + break; + } + + /* General case, use recursion. */ + while ((test = *string) != EOS) { + e = r_fnmatch(pattern, string, flags & ~FNM_PERIOD, recur); + if (e != FNM_NOMATCH) { /* can be NOMATCH, -1 or MATCH */ + return (e); + } + if (test == '/' && ISSET(flags, FNM_PATHNAME)) { + break; + } + ++string; + } + return (FNM_NOMATCH); + case '[': + if (*string == EOS) + return (FNM_NOMATCH); + if (IsPathSeparator(*string) && ISSET(flags, FNM_PATHNAME)) + return (FNM_NOMATCH); + if (*string == '.' && ISSET(flags, FNM_PERIOD) && + (string == stringstart || + (ISSET(flags, FNM_PATHNAME) && IsPathSeparator(*(string - 1))))) + return (FNM_NOMATCH); + + switch (rangematch(pattern, *string, flags, &newp)) { + case RANGE_ERROR: + /* not a good range, treat as normal text */ + goto normal; + case RANGE_MATCH: + pattern = newp; + break; + case RANGE_NOMATCH: + return (FNM_NOMATCH); + } + ++string; + break; + + case '\\': + if (!ISSET(flags, FNM_NOESCAPE)) { + if ((c = *pattern++) == EOS) { + c = '\\'; + --pattern; + } + } + /* FALLTHROUGH */ + default: +normal: + if (FOLD(c) != FOLD(*string)) { + return (FNM_NOMATCH); + } + ++string; + break; + } + } + /* NOTREACHED */ +} + +static int rangematch(const char *pattern, char test, int flags, + char **newp) +{ + int negate, ok; + char c, c2; + + /* + * A bracket expression starting with an unquoted circumflex + * character produces unspecified results (IEEE 1003.2-1992, + * 3.13.2). This implementation treats it like '!', for + * consistency with the regular expression syntax. + * J.T. Conklin (conklin@ngai.kaleida.com) + */ + if ((negate = (*pattern == '!' || *pattern == '^'))) + ++pattern; + + test = FOLD(test); + + /* + * A right bracket shall lose its special meaning and represent + * itself in a bracket expression if it occurs first in the list. + * -- POSIX.2 2.8.3.2 + */ + ok = 0; + c = *pattern++; + do { + if (c == '\\' && !ISSET(flags, FNM_NOESCAPE)) + c = *pattern++; + if (c == EOS) + return (RANGE_ERROR); + if (c == '/' && ISSET(flags, FNM_PATHNAME)) + return (RANGE_NOMATCH); + c = FOLD(c); + if (*pattern == '-' && (c2 = *(pattern + 1)) != EOS && c2 != ']') { + pattern += 2; + if (c2 == '\\' && !ISSET(flags, FNM_NOESCAPE)) + c2 = *pattern++; + if (c2 == EOS) + return (RANGE_ERROR); + c2 = FOLD(c2); + if (c <= test && test <= c2) + ok = 1; + } else if (c == test) + ok = 1; + } while ((c = *pattern++) != ']'); + + *newp = (char *) pattern; + return (ok == negate ? RANGE_NOMATCH : RANGE_MATCH); +} + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +#ifdef TEST_PROGRAM +#include "unittests.h" + +struct test { + int nr; + const char *pattern; + const char *string; + const int options; + const int result; +}; + +/* + * Note, some of these tests were duplicated from a patch file I found + * in an email, so I am unsure what the license is. Since this code is + * never turned on in any release, it probably doesn't matter at all. + * If by some chance someone finds this to be a problem please let + * me know. + */ +static struct test tests[] = { + { 1, "x", "x", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + { 2, "x", "x/y", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + { 3, "x", "x/y/z", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + { 4, "*", "x", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + { 5, "*", "x/y", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + { 6, "*", "x/y/z", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + { 7, "*x", "x", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + { 8, "*x", "x/y", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + { 9, "*x", "x/y/z", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + {10, "x*", "x", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + {11, "x*", "x/y", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + {12, "x*", "x/y/z", FNM_PATHNAME | FNM_LEADING_DIR, 0}, + {0, "a*b/*", "abbb/.x", FNM_PATHNAME|FNM_PERIOD, FNM_NOMATCH}, // TODO: This test currently fail. + {14, "a*b/*", "abbb/xy", FNM_PATHNAME|FNM_PERIOD, 0}, + {15, "[A-[]", "A", 0, 0}, + {16, "[A-[]", "a", 0, FNM_NOMATCH}, + {17, "[a-{]", "A", 0, FNM_NOMATCH}, + {18, "[a-{]", "a", 0, 0}, + {19, "[A-[]", "A", FNM_CASEFOLD, FNM_NOMATCH}, + {20, "[A-[]", "a", FNM_CASEFOLD, FNM_NOMATCH}, + {21, "[a-{]", "A", FNM_CASEFOLD, 0}, + {22, "[a-{]", "a", FNM_CASEFOLD, 0}, + {23, "*LIB*", "lib", FNM_PERIOD, FNM_NOMATCH }, + {24, "*LIB*", "lib", FNM_CASEFOLD, 0}, + {25, "a[/]b", "a/b", 0, 0}, + {26, "a[/]b", "a/b", FNM_PATHNAME, FNM_NOMATCH }, + {27, "[a-z]/[a-z]", "a/b", 0, 0 }, + {28, "a/b", "*", FNM_PATHNAME, FNM_NOMATCH }, + {29, "*", "a/b", FNM_PATHNAME, FNM_NOMATCH }, + {30, "*[/]b", "a/b", FNM_PATHNAME, FNM_NOMATCH }, + {31, "\\[/b", "[/b", 0, 0 }, + {32, "?\?/b", "aa/b", 0, 0 }, + {33, "???b", "aa/b", 0, 0 }, + {34, "???b", "aa/b", FNM_PATHNAME, FNM_NOMATCH }, + {35, "?a/b", ".a/b", FNM_PATHNAME|FNM_PERIOD, FNM_NOMATCH }, + {36, "a/?b", "a/.b", FNM_PATHNAME|FNM_PERIOD, FNM_NOMATCH }, + {37, "*a/b", ".a/b", FNM_PATHNAME|FNM_PERIOD, FNM_NOMATCH }, + {38, "a/*b", "a/.b", FNM_PATHNAME|FNM_PERIOD, FNM_NOMATCH }, + {39, "[.]a/b", ".a/b", FNM_PATHNAME|FNM_PERIOD, FNM_NOMATCH }, + {40, "a/[.]b", "a/.b", FNM_PATHNAME|FNM_PERIOD, FNM_NOMATCH }, + {41, "*/?", "a/b", FNM_PATHNAME|FNM_PERIOD, 0 }, + {42, "?/*", "a/b", FNM_PATHNAME|FNM_PERIOD, 0 }, + {43, ".*/?", ".a/b", FNM_PATHNAME|FNM_PERIOD, 0 }, + {44, "*/.?", "a/.b", FNM_PATHNAME|FNM_PERIOD, 0 }, + {45, "*/*", "a/.b", FNM_PATHNAME|FNM_PERIOD, FNM_NOMATCH }, + {46, "*[.]/b", "a./b", FNM_PATHNAME|FNM_PERIOD, 0 }, + {47, "a?b", "a.b", FNM_PATHNAME|FNM_PERIOD, 0 }, + {48, "a*b", "a.b", FNM_PATHNAME|FNM_PERIOD, 0 }, + {49, "a[.]b", "a.b", FNM_PATHNAME|FNM_PERIOD, 0 }, + {50, "*a*", "a/b", FNM_PATHNAME|FNM_LEADING_DIR, 0 }, + {51, "[/b", "[/b", 0, 0}, +#ifdef FULL_TEST + /* This test takes a *long* time */ + {52, "a*b*c*d*e*f*g*h*i*j*k*l*m*n*o*p*q*r*s*t*u*v*w*x*y*z*", + "aaaabbbbccccddddeeeeffffgggghhhhiiiijjjjkkkkllllmmmm" + "nnnnooooppppqqqqrrrrssssttttuuuuvvvvwwwwxxxxyyyy", 0, FNM_NOMATCH}, +#endif + /* Keep dummy last to avoid compiler warnings */ + {0, "dummy", "dummy", 0, 0} +}; + +#define ntests ((int)(sizeof(tests)/sizeof(struct test))) + +int main() +{ + Unittests fnmatch_test("fnmatch_test"); + char buf[30]; + + for (int i = 0; i < ntests; i++) { + if (tests[i].nr > 0){ + sprintf(buf, "Checking format test: %d - %s", tests[i].nr, tests[i].pattern); + ok(fnmatch(tests[i].pattern, tests[i].string, tests[i].options) == tests[i].result, buf); + } + } + + return report(); +} +#endif /* TEST_PROGRAM */ diff --git a/src/lib/fnmatch.h b/src/lib/fnmatch.h new file mode 100644 index 00000000..b1c6a86e --- /dev/null +++ b/src/lib/fnmatch.h @@ -0,0 +1,55 @@ +/* $OpenBSD: fnmatch.h,v 1.8 2005/12/13 00:35:22 millert Exp $ */ + +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fnmatch.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _FNMATCH_H_ +#define _FNMATCH_H_ + +#undef FNM_PATHNAME +#undef FNM_NOESCAPE +#undef FNM_PERIOD + +#define FNM_NOMATCH 1 /* Match failed. */ +#define FNM_MATCH 0 /* Match succeeded */ + +#define FNM_PATHNAME 0x01 /* Slash must be matched by slash. */ +#define FNM_NOESCAPE 0x02 /* Disable backslash escaping. */ +#define FNM_PERIOD 0x04 /* Period must be matched by period. */ +#define FNM_LEADING_DIR 0x08 /* Ignore / after Imatch. */ +#define FNM_CASEFOLD 0x10 /* Case insensitive search. */ + +#define FNM_IGNORECASE FNM_CASEFOLD +#define FNM_FILE_NAME FNM_PATHNAME + +extern "C" int fnmatch(const char *, const char *, int); + +#endif /* !_FNMATCH_H_ */ diff --git a/src/lib/guid_to_name.c b/src/lib/guid_to_name.c new file mode 100644 index 00000000..fe9c244d --- /dev/null +++ b/src/lib/guid_to_name.c @@ -0,0 +1,197 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by Kern Sibbald, July 2007 to replace idcache.c + * + * Program to convert uid and gid into names, and cache the results + * for preformance reasons. + * + */ + +#include "bacula.h" + +#ifndef WIN32 +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + +struct guitem { + dlink link; + char *name; + union { + uid_t uid; + gid_t gid; + }; +}; + + +guid_list *new_guid_list() +{ + guid_list *list; + guitem *item = NULL; + list = (guid_list *)malloc(sizeof(guid_list)); + list->uid_list = New(dlist(item, &item->link)); + list->gid_list = New(dlist(item, &item->link)); + return list; +} + +void free_guid_list(guid_list *list) +{ + guitem *item; + foreach_dlist(item, list->uid_list) { + free(item->name); + } + foreach_dlist(item, list->gid_list) { + free(item->name); + } + delete list->uid_list; + delete list->gid_list; + free(list); +} + +static int uid_compare(void *item1, void *item2) +{ + guitem *i1 = (guitem *)item1; + guitem *i2 = (guitem *)item2; + if (i1->uid < i2->uid) { + return -1; + } else if (i1->uid > i2->uid) { + return 1; + } else { + return 0; + } +} + +static int gid_compare(void *item1, void *item2) +{ + guitem *i1 = (guitem *)item1; + guitem *i2 = (guitem *)item2; + if (i1->gid < i2->gid) { + return -1; + } else if (i1->gid > i2->gid) { + return 1; + } else { + return 0; + } +} + + +static void get_uidname(uid_t uid, guitem *item) +{ +#ifndef HAVE_WIN32 + struct passwd *pwbuf; + P(mutex); + pwbuf = getpwuid(uid); + if (pwbuf != NULL && strcmp(pwbuf->pw_name, "????????") != 0) { + item->name = bstrdup(pwbuf->pw_name); + } + V(mutex); +#endif +} + +static void get_gidname(gid_t gid, guitem *item) +{ +#ifndef HAVE_WIN32 + struct group *grbuf; + P(mutex); + grbuf = getgrgid(gid); + if (grbuf != NULL && strcmp(grbuf->gr_name, "????????") != 0) { + item->name = bstrdup(grbuf->gr_name); + } + V(mutex); +#endif +} + + +char *guid_list::uid_to_name(uid_t uid, char *name, int maxlen) +{ + guitem sitem, *item, *fitem; + sitem.uid = uid; + char buf[50]; + + item = (guitem *)uid_list->binary_search(&sitem, uid_compare); + Dmsg2(900, "uid=%d item=%p\n", uid, item); + if (!item) { + item = (guitem *)malloc(sizeof(guitem)); + item->uid = uid; + item->name = NULL; + get_uidname(uid, item); + if (!item->name) { + item->name = bstrdup(edit_int64(uid, buf)); + Dmsg2(900, "set uid=%d name=%s\n", uid, item->name); + } + fitem = (guitem *)uid_list->binary_insert(item, uid_compare); + if (fitem != item) { /* item already there this shouldn't happen */ + free(item->name); + free(item); + item = fitem; + } + } + bstrncpy(name, item->name, maxlen); + return name; +} + +char *guid_list::gid_to_name(gid_t gid, char *name, int maxlen) +{ + guitem sitem, *item, *fitem; + sitem.gid = gid; + char buf[50]; + + item = (guitem *)gid_list->binary_search(&sitem, gid_compare); + if (!item) { + item = (guitem *)malloc(sizeof(guitem)); + item->gid = gid; + item->name = NULL; + get_gidname(gid, item); + if (!item->name) { + item->name = bstrdup(edit_int64(gid, buf)); + } + fitem = (guitem *)gid_list->binary_insert(item, gid_compare); + if (fitem != item) { /* item already there this shouldn't happen */ + free(item->name); + free(item); + item = fitem; + } + } + + bstrncpy(name, item->name, maxlen); + return name; +} + +#ifdef TEST_PROGRAM + +int main() +{ + int i; + guid_list *list; + char ed1[50], ed2[50]; + list = new_guid_list(); + for (i=0; i<1001; i++) { + printf("uid=%d name=%s gid=%d name=%s\n", i, list->uid_to_name(i, ed1, sizeof(ed1)), + i, list->gid_to_name(i, ed2, sizeof(ed2))); + printf("uid=%d name=%s gid=%d name=%s\n", i, list->uid_to_name(i, ed1, sizeof(ed1)), + i, list->gid_to_name(i, ed2, sizeof(ed2))); + } + + free_guid_list(list); + sm_dump(false); /* unit test */ + + return 0; +} + +#endif diff --git a/src/lib/guid_to_name.h b/src/lib/guid_to_name.h new file mode 100644 index 00000000..5c9d63d2 --- /dev/null +++ b/src/lib/guid_to_name.h @@ -0,0 +1,36 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by Kern Sibbald, July 2007 to replace idcache.c + * + * Program to convert uid and gid into names, and cache the results + * for preformance reasons. + */ + +class guid_list { +public: + dlist *uid_list; + dlist *gid_list; + + char *uid_to_name(uid_t uid, char *name, int maxlen); + char *gid_to_name(gid_t gid, char *name, int maxlen); +}; + +guid_list *new_guid_list(); +void free_guid_list(guid_list *list); diff --git a/src/lib/hmac.c b/src/lib/hmac.c new file mode 100644 index 00000000..a8d5e3dc --- /dev/null +++ b/src/lib/hmac.c @@ -0,0 +1,117 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Hashed Message Authentication Code using MD5 (HMAC-MD5) + * + * hmac_md5 was based on sample code in RFC2104 (thanks guys). + * + * Adapted to Bacula by Kern E. Sibbald, February MMI. + */ +#include "bacula.h" + +#define PAD_LEN 64 /* PAD length */ +#define SIG_LEN MD5HashSize /* MD5 digest length */ + +void +hmac_md5( + uint8_t* text, /* pointer to data stream */ + int text_len, /* length of data stream */ + uint8_t* key, /* pointer to authentication key */ + int key_len, /* length of authentication key */ + uint8_t *hmac) /* returned hmac-md5 */ +{ + MD5Context md5c; + uint8_t k_ipad[PAD_LEN]; /* inner padding - key XORd with ipad */ + uint8_t k_opad[PAD_LEN]; /* outer padding - key XORd with opad */ + uint8_t keysig[SIG_LEN]; + int i; + + /* if key is longer than PAD length, reset it to key=MD5(key) */ + if (key_len > PAD_LEN) { + MD5Context md5key; + + MD5Init(&md5key); + MD5Update(&md5key, key, key_len); + MD5Final(keysig, &md5key); + + key = keysig; + key_len = SIG_LEN; + } + + /* + * the HMAC_MD5 transform looks like: + * + * MD5(Key XOR opad, MD5(Key XOR ipad, text)) + * + * where Key is an n byte key + * ipad is the byte 0x36 repeated 64 times + + * opad is the byte 0x5c repeated 64 times + * and text is the data being protected + */ + + /* Zero pads and store key */ + memset(k_ipad, 0, PAD_LEN); + memcpy(k_ipad, key, key_len); + memcpy(k_opad, k_ipad, PAD_LEN); + + /* XOR key with ipad and opad values */ + for (i=0; inext = mem_block; + mem_block = hmem; + hmem->mem = mem_block->first; + hmem->rem = (char *)hmem + size - hmem->mem; + Dmsg3(100, "malloc buf=%p size=%d rem=%d\n", hmem, size, hmem->rem); +} + +/* This routine frees the whole tree */ +void htable::hash_big_free() +{ + struct h_mem *hmem, *rel; + + for (hmem=mem_block; hmem; ) { + rel = hmem; + hmem = hmem->next; + Dmsg1(100, "free malloc buf=%p\n", rel); + free(rel); + } +} + +#endif + +/* + * Normal hash malloc routine that gets a + * "small" buffer from the big buffer + */ +char *htable::hash_malloc(int size) +{ +#ifdef BIG_MALLOC + char *buf; + int asize = BALIGN(size); + + if (mem_block->rem < asize) { + uint32_t mb_size; + if (total_size >= 1000000) { + mb_size = 1000000; + } else { + mb_size = 100000; + } + malloc_big_buf(mb_size); + } + mem_block->rem -= asize; + buf = mem_block->mem; + mem_block->mem += asize; + return buf; +#else + total_size += size; + blocks++; + return (char *)malloc(size); +#endif +} + + + + +/* + * Create hash of key, stored in hash then + * create and return the pseudo random bucket index + */ +void htable::hash_index(char *key) +{ + hash = 0; + for (char *p=key; *p; p++) { + hash += ((hash << 5) | (hash >> (sizeof(hash)*8-5))) + (uint32_t)*p; + } + /* Multiply by large prime number, take top bits, mask for remainder */ + index = ((hash * 1103515249LL) >> rshift) & mask; + Dmsg2(dbglvl, "Leave hash_index hash=0x%x index=%d\n", hash, index); +} + +void htable::hash_index(uint64_t ikey) +{ + hash = ikey; /* already have starting binary hash */ + /* Same algorithm as for char * */ + index = ((hash * 1103515249LL) >> rshift) & mask; + Dmsg2(dbglvl, "Leave hash_index hash=0x%x index=%d\n", hash, index); +} + +/* + * tsize is the estimated number of entries in the hash table + */ +htable::htable(void *item, void *link, int tsize) +{ + init(item, link, tsize); +} + +void htable::init(void *item, void *link, int tsize) +{ + int pwr; + + bmemzero(this, sizeof(htable)); + if (tsize < 31) { + tsize = 31; + } + tsize >>= 2; + for (pwr=0; tsize; pwr++) { + tsize >>= 1; + } + loffset = (char *)link - (char *)item; + mask = ~((~0)< table size = 8 */ + rshift = 30 - pwr; /* start using bits 28, 29, 30 */ + buckets = 1<next); + j++; + } + if (j > max) { + max = j; + } + if (j < MAX_COUNT) { + hits[j]++; + } + } + for (i=0; i < MAX_COUNT; i++) { + printf("%2d: %d\n",i, hits[i]); + } + printf("buckets=%d num_items=%d max_items=%d\n", buckets, num_items, max_items); + printf("max hits in a bucket = %d\n", max); +#ifdef BIG_MALLOC + char ed1[100]; + edit_uint64(total_size, ed1); + printf("total bytes malloced = %s\n", ed1); + printf("total blocks malloced = %d\n", blocks); +#endif +} + +void htable::grow_table() +{ + Dmsg1(100, "Grow called old size = %d\n", buckets); + /* Setup a bigger table */ + htable *big = (htable *)malloc(sizeof(htable)); + memcpy(big, this, sizeof(htable)); /* start with original class data */ + big->loffset = loffset; + big->mask = mask<<1 | 1; + big->rshift = rshift - 1; + big->num_items = 0; + big->buckets = buckets * 2; + big->max_items = big->buckets * 4; + /* Create a bigger hash table */ + big->table = (hlink **)malloc(big->buckets * sizeof(hlink *)); + bmemzero(big->table, big->buckets * sizeof(hlink *)); + big->walkptr = NULL; + big->walk_index = 0; + /* Insert all the items in the new hash table */ + Dmsg1(100, "Before copy num_items=%d\n", num_items); + /* + * We walk through the old smaller tree getting items, + * but since we are overwriting the colision links, we must + * explicitly save the item->next pointer and walk each + * colision chain ourselves. We do use next() for getting + * to the next bucket. + */ + for (void *item=first(); item; ) { + void *ni = ((hlink *)((char *)item+loffset))->next; /* save link overwritten by insert */ + hlink *hp = (hlink *)((char *)item+loffset); + if (hp->is_ikey) { + Dmsg1(100, "Grow insert: %lld\n", hp->key.ikey); + big->insert(hp->key.ikey, item); + } else { + Dmsg1(100, "Grow insert: %s\n", hp->key.key); + big->insert(hp->key.key, item); + } + if (ni) { + item = (void *)((char *)ni-loffset); + } else { + walkptr = NULL; + item = next(); + } + } + Dmsg1(100, "After copy new num_items=%d\n", big->num_items); + if (num_items != big->num_items) { + Dmsg0(000, "****** Big problems num_items mismatch ******\n"); + } + free(table); + memcpy(this, big, sizeof(htable)); /* move everything across */ + free(big); + Dmsg0(100, "Exit grow.\n"); +} + +bool htable::insert(char *key, void *item) +{ + hlink *hp; + if (lookup(key)) { + return false; /* already exists */ + } + ASSERT(index < buckets); + Dmsg2(dbglvl, "Insert: hash=%p index=%d\n", hash, index); + hp = (hlink *)(((char *)item)+loffset); + Dmsg4(dbglvl, "Insert hp=%p index=%d item=%p offset=%u\n", hp, + index, item, loffset); + hp->next = table[index]; + hp->hash = hash; + hp->key.key = key; + hp->is_ikey = false; + table[index] = hp; + Dmsg3(dbglvl, "Insert hp->next=%p hp->hash=0x%x hp->key=%s\n", + hp->next, hp->hash, hp->key.key); + + if (++num_items >= max_items) { + Dmsg2(dbglvl, "num_items=%d max_items=%d\n", num_items, max_items); + grow_table(); + } + Dmsg3(dbglvl, "Leave insert index=%d num_items=%d key=%s\n", index, num_items, key); + return true; +} + +void *htable::lookup(char *key) +{ + hash_index(key); + for (hlink *hp=table[index]; hp; hp=(hlink *)hp->next) { +// Dmsg2(100, "hp=%p key=%s\n", hp, hp->key.key); + if (hash == hp->hash && strcmp(key, hp->key.key) == 0) { + Dmsg1(dbglvl, "lookup return %p\n", ((char *)hp)-loffset); + return ((char *)hp)-loffset; + } + } + return NULL; +} + +bool htable::insert(uint64_t ikey, void *item) +{ + hlink *hp; + if (lookup(ikey)) { + return false; /* already exists */ + } + ASSERT(index < buckets); + Dmsg2(dbglvl, "Insert: hash=%p index=%d\n", hash, index); + hp = (hlink *)(((char *)item)+loffset); + Dmsg4(dbglvl, "Insert hp=%p index=%d item=%p offset=%u\n", hp, index, + item, loffset); + hp->next = table[index]; + hp->hash = hash; + hp->key.ikey = ikey; + hp->is_ikey = true; + table[index] = hp; + Dmsg3(dbglvl, "Insert hp->next=%p hp->hash=0x%x hp->ikey=%lld\n", hp->next, + hp->hash, hp->key.ikey); + + if (++num_items >= max_items) { + Dmsg2(dbglvl, "num_items=%d max_items=%d\n", num_items, max_items); + grow_table(); + } + Dmsg3(dbglvl, "Leave insert index=%d num_items=%d key=%lld\n", + index, num_items, ikey); + return true; +} + +void *htable::lookup(uint64_t ikey) +{ + hash_index(ikey); + for (hlink *hp=table[index]; hp; hp=(hlink *)hp->next) { +// Dmsg2(100, "hp=%p key=%lld\n", hp, hp->key.ikey); + if (hash == hp->hash && ikey == hp->key.ikey) { + Dmsg1(dbglvl, "lookup return %p\n", ((char *)hp)-loffset); + return ((char *)hp)-loffset; + } + } + return NULL; +} + +void *htable::next() +{ + Dmsg1(dbglvl, "Enter next: walkptr=%p\n", walkptr); + if (walkptr) { + walkptr = (hlink *)(walkptr->next); + } + while (!walkptr && walk_index < buckets) { + walkptr = table[walk_index++]; + if (walkptr) { + Dmsg3(dbglvl, "new walkptr=%p next=%p inx=%d\n", walkptr, + walkptr->next, walk_index-1); + } + } + if (walkptr) { + Dmsg2(dbglvl, "next: rtn %p walk_index=%d\n", + ((char *)walkptr)-loffset, walk_index); + return ((char *)walkptr)-loffset; + } + Dmsg0(dbglvl, "next: return NULL\n"); + return NULL; +} + +void *htable::first() +{ + Dmsg0(dbglvl, "Enter first\n"); + walkptr = table[0]; /* get first bucket */ + walk_index = 1; /* Point to next index */ + while (!walkptr && walk_index < buckets) { + walkptr = table[walk_index++]; /* go to next bucket */ + if (walkptr) { + Dmsg3(dbglvl, "first new walkptr=%p next=%p inx=%d\n", walkptr, + walkptr->next, walk_index-1); + } + } + if (walkptr) { + Dmsg1(dbglvl, "Leave first walkptr=%p\n", walkptr); + return ((char *)walkptr)-loffset; + } + Dmsg0(dbglvl, "Leave first walkptr=NULL\n"); + return NULL; +} + +/* Destroy the table and its contents */ +void htable::destroy() +{ +#ifdef BIG_MALLOC + hash_big_free(); +#else + void *ni; + void *li = first(); + + while (li) { + ni = next(); + free(li); + li=ni; + } +#endif + + free(table); + table = NULL; + Dmsg0(100, "Done destroy.\n"); +} + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + + +#ifdef TEST_PROGRAM +#include "unittests.h" + +struct MYJCR { + char *key; + hlink link; +}; + +#define NITEMS 5000000 + +int main() +{ + Unittests htable_test("htable_test"); + char mkey[30]; + htable *jcrtbl; + MYJCR *save_jcr = NULL, *item; + MYJCR *jcr = NULL; + int count = 0; + int i; + int len; + bool check_cont; + + Pmsg0(0, "Initialize tests ...\n"); + jcrtbl = (htable *)malloc(sizeof(htable)); + jcrtbl->init(jcr, &jcr->link, NITEMS); + ok(jcrtbl && jcrtbl->size() == 0, "Default initialization"); + + Pmsg1(0, "Inserting %d items\n", NITEMS); + for (i = 0; i < NITEMS; i++) { + len = sprintf(mkey, "This is htable item %d", i) + 1; + + jcr = (MYJCR *)jcrtbl->hash_malloc(sizeof(MYJCR)); + jcr->key = (char *)jcrtbl->hash_malloc(len); + memcpy(jcr->key, mkey, len); + Dmsg2(100, "link=%p jcr=%p\n", jcr->link, jcr); + + jcrtbl->insert(jcr->key, jcr); + if (i == 10) { + save_jcr = jcr; + } + } + ok(jcrtbl->size() == NITEMS, "Checking size"); + item = (MYJCR *)jcrtbl->lookup(save_jcr->key); + ok(item != NULL, "Checking saved key lookup"); + ok(item != NULL && strcmp(save_jcr->key, item->key) == 0, "Checking key"); + + /* some stats for human to consider */ + jcrtbl->stats(); + + Pmsg0(0, "Walk the hash table\n"); + check_cont = true; + for (i = 0; i < NITEMS; i++) { + sprintf(mkey, "This is htable item %d", i); + item = (MYJCR *)jcrtbl->lookup(mkey); + if (!item){ + check_cont = false; + } + } + ok(check_cont, "Checking htable content"); + + foreach_htable (jcr, jcrtbl) { +#ifndef BIG_MALLOC + free(jcr->key); +#endif + count++; + } + ok(count == NITEMS, "Checking number of items"); + printf("Calling destroy\n"); + jcrtbl->destroy(); + free(jcrtbl); + + return report(); +} +#endif diff --git a/src/lib/htable.h b/src/lib/htable.h new file mode 100644 index 00000000..31b20a0c --- /dev/null +++ b/src/lib/htable.h @@ -0,0 +1,118 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by Kern Sibbald, MMIV + */ + +#ifndef _HTABLE_H_ +#define _HTABLE_H_ + +/* + * + * Written by Kern Sibbald, MMIV + * + */ + +/* ======================================================================== + * + * Hash table class -- htable + * + */ + +/* + * BIG_MALLOC is to provide a large malloc service to htable + */ +#define BIG_MALLOC + +/* + * Loop var through each member of table + */ +#ifdef HAVE_TYPEOF +#define foreach_htable(var, tbl) \ + for((var)=(typeof(var))((tbl)->first()); \ + (var); \ + (var)=(typeof(var))((tbl)->next())) +#else +#define foreach_htable(var, tbl) \ + for((*((void **)&(var))=(void *)((tbl)->first())); \ + (var); \ + (*((void **)&(var))=(void *)((tbl)->next()))) +#endif + +union key_val { + char *key; /* char key */ + uint64_t ikey; /* integer key */ +}; + +struct hlink { + void *next; /* next hash item */ + uint64_t hash; /* 64 bit hash for this key */ + union key_val key; /* key value this key */ + bool is_ikey; /* set if integer key */ +}; + +struct h_mem { + struct h_mem *next; /* next buffer */ + char *mem; /* memory pointer */ + int64_t rem; /* remaining bytes in big_buffer */ + char first[1]; /* first byte */ +}; + +class htable : public SMARTALLOC { + hlink **table; /* hash table */ + uint64_t hash; /* temp storage */ + uint64_t total_size; /* total bytes malloced */ + int loffset; /* link offset in item */ + uint32_t num_items; /* current number of items */ + uint32_t max_items; /* maximum items before growing */ + uint32_t buckets; /* size of hash table */ + uint32_t index; /* temp storage */ + uint32_t mask; /* "remainder" mask */ + uint32_t rshift; /* amount to shift down */ + hlink *walkptr; /* table walk pointer */ + uint32_t walk_index; /* table walk index */ + uint32_t blocks; /* blocks malloced */ +#ifdef BIG_MALLOC + struct h_mem *mem_block; /* malloc'ed memory block chain */ + void malloc_big_buf(int size); /* Get a big buffer */ +#endif + void hash_index(char *key); /* produce hash key,index */ + void hash_index(uint64_t ikey); /* produce hash key,index */ + void grow_table(); /* grow the table */ + +public: + htable(void *item, void *link, int tsize = 31); + ~htable() { destroy(); } + void init(void *item, void *link, int tsize = 31); + bool insert(char *key, void *item); /* char key */ + bool insert(uint64_t ikey, void *item); /* 64 bit key */ + void *lookup(char *key); /* char key */ + void *lookup(uint64_t ikey); /* 64 bit key */ + void *first(); /* get first item in table */ + void *next(); /* get next item in table */ + void destroy(); + void stats(); /* print stats about the table */ + uint32_t size(); /* return size of table */ + char *hash_malloc(int size); /* malloc bytes for a hash entry */ +#ifdef BIG_MALLOC + void hash_big_free(); /* free all hash allocated big buffers */ +#endif +}; + +#endif diff --git a/src/lib/ini.c b/src/lib/ini.c new file mode 100644 index 00000000..f3c188f9 --- /dev/null +++ b/src/lib/ini.c @@ -0,0 +1,897 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Handle simple configuration file such as "ini" files. + * key1 = val # comment + * key2 = val # + * + */ + +#include "bacula.h" +#include "ini.h" + +#define bfree_and_null_const(a) do{if(a){free((void *)a); (a)=NULL;}} while(0) +static int dbglevel = 100; + +/* We use this structure to associate a key to the function */ +struct ini_store { + const char *key; + const char *comment; + INI_ITEM_HANDLER *handler; +}; + +static struct ini_store funcs[] = { + {"@INT32@", "Integer", ini_store_int32}, + {"@PINT32@", "Integer", ini_store_pint32}, + {"@PINT64@", "Positive Integer", ini_store_pint64}, + {"@INT64@", "Integer", ini_store_int64}, + {"@NAME@", "Simple String", ini_store_name}, + {"@STR@", "String", ini_store_str}, + {"@BOOL@", "on/off", ini_store_bool}, + {"@ALIST@", "String list", ini_store_alist_str}, + {"@DATE@", "Date", ini_store_date}, +/* TODO: Add protocol for the FD @ASKFD@ */ + {NULL, NULL, NULL} +}; + +/* + * Get handler code from handler @ + */ +const char *ini_get_store_code(INI_ITEM_HANDLER *handler) +{ + for (int i = 0; funcs[i].key ; i++) { + if (funcs[i].handler == handler) { + return funcs[i].key; + } + } + return NULL; +} + +/* + * Get handler function from handler name + */ +INI_ITEM_HANDLER *ini_get_store_handler(const char *key) +{ + for (int i = 0; funcs[i].key ; i++) { + if (!strcmp(funcs[i].key, key)) { + return funcs[i].handler; + } + } + return NULL; +} + +/* + * Format a scanner error message + */ +static void s_err(const char *file, int line, LEX *lc, const char *msg, ...) +{ + ConfigFile *ini = (ConfigFile *)(lc->caller_ctx); + va_list arg_ptr; + char buf[MAXSTRING]; + + va_start(arg_ptr, msg); + bvsnprintf(buf, sizeof(buf), msg, arg_ptr); + va_end(arg_ptr); + +#ifdef TEST_PROGRAM + printf("ERROR: Config file error: %s\n" + " : Line %d, col %d of file %s\n%s\n", + buf, lc->line_no, lc->col_no, lc->fname, lc->line); +#endif + + if (ini->jcr) { /* called from core */ + Jmsg(ini->jcr, M_ERROR, 0, _("Config file error: %s\n" + " : Line %d, col %d of file %s\n%s\n"), + buf, lc->line_no, lc->col_no, lc->fname, lc->line); + +// } else if (ini->ctx) { /* called from plugin */ +// ini->bfuncs->JobMessage(ini->ctx, __FILE__, __LINE__, M_FATAL, 0, +// _("Config file error: %s\n" +// " : Line %d, col %d of file %s\n%s\n"), +// buf, lc->line_no, lc->col_no, lc->fname, lc->line); +// + } else { /* called from ??? */ + e_msg(file, line, M_ERROR, 0, + _("Config file error: %s\n" + " : Line %d, col %d of file %s\n%s\n"), + buf, lc->line_no, lc->col_no, lc->fname, lc->line); + } +} + +/* Reset free items */ +void ConfigFile::clear_items() +{ + if (!items) { + return; + } + + for (int i=0; items[i].name; i++) { + if (items[i].found) { + /* special members require delete or free */ + if (items[i].handler == ini_store_str) { + free(items[i].val.strval); + items[i].val.strval = NULL; + + } else if (items[i].handler == ini_store_alist_str) { + delete items[i].val.alistval; + items[i].val.alistval = NULL; + } + items[i].found = false; + } + } +} + +void ConfigFile::free_items() +{ + if (items_allocated) { + for (int i=0; items[i].name; i++) { + bfree_and_null_const(items[i].name); + bfree_and_null_const(items[i].comment); + bfree_and_null_const(items[i].default_value); + } + } + if (items) { + free(items); + } + items = NULL; + items_allocated = false; +} + +/* Get a particular item from the items list */ +int ConfigFile::get_item(const char *name) +{ + if (!items) { + return -1; + } + + for (int i=0; i < MAX_INI_ITEMS && items[i].name; i++) { + if (strcasecmp(name, items[i].name) == 0) { + return i; + } + } + return -1; +} + +/* Dump a buffer to a file in the working directory + * Needed to unserialise() a config + */ +bool ConfigFile::dump_string(const char *buf, int32_t len) +{ + FILE *fp; + bool ret=false; + + if (!out_fname) { + out_fname = get_pool_memory(PM_FNAME); + make_unique_filename(&out_fname, (int)(intptr_t)this, (char*)"configfile"); + } + + fp = bfopen(out_fname, "wb"); + if (!fp) { + return ret; + } + + if (fwrite(buf, len, 1, fp) == 1) { + ret = true; + } + + fclose(fp); + return ret; +} + +/* Dump the item table format to a text file (used by plugin) */ +bool ConfigFile::serialize(const char *fname) +{ + FILE *fp; + POOLMEM *tmp; + int32_t len; + bool ret = false; + + if (!items) { + return ret; + } + + fp = bfopen(fname, "w"); + if (!fp) { + return ret; + } + + tmp = get_pool_memory(PM_MESSAGE); + len = serialize(&tmp); + if (fwrite(tmp, len, 1, fp) == 1) { + ret = true; + } + free_pool_memory(tmp); + + fclose(fp); + return ret; +} + +/* Dump the item table format to a text file (used by plugin) */ +int ConfigFile::serialize(POOLMEM **buf) +{ + int len; + POOLMEM *tmp, *tmp2; + if (!items) { + **buf = 0; + return 0; + } + + len = Mmsg(buf, "# Plugin configuration file\n# Version %d\n", version); + + tmp = get_pool_memory(PM_MESSAGE); + tmp2 = get_pool_memory(PM_MESSAGE); + + for (int i=0; items[i].name ; i++) { + if (items[i].comment) { + Mmsg(tmp, "OptPrompt=%s\n", quote_string(tmp2, items[i].comment)); + pm_strcat(buf, tmp); + } + if (items[i].default_value) { + Mmsg(tmp, "OptDefault=%s\n", + quote_string(tmp2, items[i].default_value)); + pm_strcat(buf, tmp); + } + if (items[i].required) { + Mmsg(tmp, "OptRequired=yes\n"); + pm_strcat(buf, tmp); + } + + Mmsg(tmp, "%s=%s\n\n", + items[i].name, ini_get_store_code(items[i].handler)); + + /* variable = @INT64@ */ + len = pm_strcat(buf, tmp); + } + free_pool_memory(tmp); + free_pool_memory(tmp2); + + return len ; +} + +/* Dump the item table content to a text file (used by director) */ +int ConfigFile::dump_results(POOLMEM **buf) +{ + int len; + POOLMEM *tmp, *tmp2; + if (!items) { + **buf = 0; + return 0; + } + len = Mmsg(buf, "# Plugin configuration file\n# Version %d\n", version); + + tmp = get_pool_memory(PM_MESSAGE); + tmp2 = get_pool_memory(PM_MESSAGE); + + for (int i=0; items[i].name ; i++) { + bool process= items[i].found; + if (items[i].found) { + items[i].handler(NULL, this, &items[i]); + } + if (!items[i].found && items[i].required && items[i].default_value) { + pm_strcpy(this->edit, items[i].default_value); + process = true; + } + if (process) { + if (items[i].comment && *items[i].comment) { + Mmsg(tmp, "# %s\n", items[i].comment); + pm_strcat(buf, tmp); + } + if (items[i].handler == ini_store_str || + items[i].handler == ini_store_name || + items[i].handler == ini_store_date) + { + Mmsg(tmp, "%s=%s\n\n", + items[i].name, + quote_string(tmp2, this->edit)); + + } else { + Mmsg(tmp, "%s=%s\n\n", items[i].name, this->edit); + } + len = pm_strcat(buf, tmp); + } + } + free_pool_memory(tmp); + free_pool_memory(tmp2); + + return len ; +} + +bool ConfigFile::parse() +{ + int token, i; + bool ret = false; + bool found; + + lc->options |= LOPT_NO_EXTERN; + lc->caller_ctx = (void *)this; + + while ((token=lex_get_token(lc, T_ALL)) != T_EOF) { + if (token == T_EOL) { + continue; + } + found = false; + for (i=0; items[i].name; i++) { + if (strcasecmp(items[i].name, lc->str) == 0) { + if ((token = lex_get_token(lc, T_EQUALS)) == T_ERROR) { + Dmsg2(dbglevel, "in T_IDENT got token=%s str=%s\n", lex_tok_to_str(token), lc->str); + break; + } + Dmsg2(dbglevel, "parse got token=%s str=%s\n", lex_tok_to_str(token), lc->str); + Dmsg1(dbglevel, "calling handler for %s\n", items[i].name); + /* Call item handler */ + ret = items[i].found = items[i].handler(lc, this, &items[i]); + found = true; + break; + } + } + if (!found) { + Dmsg1(dbglevel, "Unfound keyword=%s\n", lc->str); + scan_err1(lc, "Keyword %s not found", lc->str); + /* We can raise an error here */ + break; + } else { + Dmsg1(dbglevel, "Found keyword=%s\n", items[i].name); + } + if (!ret) { + Dmsg1(dbglevel, "Error getting value for keyword=%s\n", items[i].name); + break; + } + Dmsg0(dbglevel, "Continue with while(token) loop\n"); + } + + for (i=0; items[i].name; i++) { + if (items[i].required && !items[i].found) { + scan_err1(lc, "%s required but not found", items[i].name); + ret = false; + } + } + + lc = lex_close_file(lc); + return ret; +} + +/* Parse a config file used by Plugin/Director */ +bool ConfigFile::parse(const char *fname) +{ + if (!items) { + return false; + } + + if ((lc = lex_open_file(lc, fname, s_err)) == NULL) { + berrno be; + Emsg2(M_ERROR, 0, _("Cannot open config file %s: %s\n"), + fname, be.bstrerror()); + return false; + } + return parse(); /* Parse file */ +} + +/* Parse a config buffer used by Plugin/Director */ +bool ConfigFile::parse_buf(const char *buffer) +{ + if (!items) { + return false; + } + + if ((lc = lex_open_buf(lc, buffer, s_err)) == NULL) { + Emsg0(M_ERROR, 0, _("Cannot open lex\n")); + return false; + } + return parse(); /* Parse memory buffer */ +} + + +/* Analyse the content of a ini file to build the item list + * It uses special syntax for datatype. Used by Director on Restore object + * + * OptPrompt = "Variable1" + * OptRequired + * OptDefault = 100 + * Variable1 = @PINT32@ + * ... + */ +bool ConfigFile::unserialize(const char *fname) +{ + int token, i, nb = 0; + bool ret=false; + const char **assign; + + /* At this time, we allow only 32 different items */ + int s = MAX_INI_ITEMS * sizeof (struct ini_items); + + items = (struct ini_items *) malloc (s); + memset(items, 0, s); + items_allocated = true; + + /* parse the file and generate the items structure on the fly */ + if ((lc = lex_open_file(lc, fname, s_err)) == NULL) { + berrno be; + Emsg2(M_ERROR, 0, _("Cannot open config file %s: %s\n"), + fname, be.bstrerror()); + return false; + } + lc->options |= LOPT_NO_EXTERN; + lc->caller_ctx = (void *)this; + + while ((token=lex_get_token(lc, T_ALL)) != T_EOF) { + Dmsg1(dbglevel, "parse got token=%s\n", lex_tok_to_str(token)); + + if (token == T_EOL) { + continue; + } + + ret = false; + assign = NULL; + + if (nb >= MAX_INI_ITEMS) { + break; + } + + if (strcasecmp("optprompt", lc->str) == 0) { + assign = &(items[nb].comment); + + } else if (strcasecmp("optdefault", lc->str) == 0) { + assign = &(items[nb].default_value); + + } else if (strcasecmp("optrequired", lc->str) == 0) { + items[nb].required = true; /* Don't use argument */ + scan_to_eol(lc); + continue; + + } else { + items[nb].name = bstrdup(lc->str); + } + + token = lex_get_token(lc, T_ALL); + Dmsg1(dbglevel, "in T_IDENT got token=%s\n", lex_tok_to_str(token)); + + if (token != T_EQUALS) { + scan_err1(lc, "expected an equals, got: %s", lc->str); + break; + } + + /* We may allow blank variable */ + if (lex_get_token(lc, T_STRING) == T_ERROR) { + break; + } + + if (assign) { + *assign = bstrdup(lc->str); + + } else { + if ((items[nb].handler = ini_get_store_handler(lc->str)) == NULL) { + scan_err1(lc, "expected a data type, got: %s", lc->str); + break; + } + nb++; + } + scan_to_eol(lc); + ret = true; + } + + if (!ret) { + for (i = 0; i < nb ; i++) { + bfree_and_null_const(items[i].name); + bfree_and_null_const(items[i].comment); + bfree_and_null_const(items[i].default_value); + items[i].handler = NULL; + items[i].required = false; + } + } + + lc = lex_close_file(lc); + return ret; +} + +/* ---------------------------------------------------------------- + * Handle data type. Import/Export + * ---------------------------------------------------------------- + */ +bool ini_store_str(LEX *lc, ConfigFile *inifile, ini_items *item) +{ + if (!lc) { + Mmsg(inifile->edit, "%s", item->val.strval); + return true; + } + if (lex_get_token(lc, T_STRING) == T_ERROR) { + return false; + } + /* If already allocated, free first */ + if (item->found && item->val.strval) { + free(item->val.strval); + } + item->val.strval = bstrdup(lc->str); + scan_to_eol(lc); + return true; +} + +bool ini_store_name(LEX *lc, ConfigFile *inifile, ini_items *item) +{ + if (!lc) { + Mmsg(inifile->edit, "%s", item->val.nameval); + return true; + } + if (lex_get_token(lc, T_NAME) == T_ERROR) { + Dmsg0(dbglevel, "Want token=T_NAME got T_ERROR\n"); + return false; + } + Dmsg1(dbglevel, "ini_store_name: %s\n", lc->str); + strncpy(item->val.nameval, lc->str, sizeof(item->val.nameval)); + scan_to_eol(lc); + return true; +} + +bool ini_store_alist_str(LEX *lc, ConfigFile *inifile, ini_items *item) +{ + alist *list = item->val.alistval; + if (!lc) { + /* TODO, write back the alist to edit buffer */ + return true; + } + + for (;;) { + if (lex_get_token(lc, T_STRING) == T_ERROR) { + return false; + } + + if (list == NULL) { + list = New(alist(10, owned_by_alist)); + } + list->append(bstrdup(lc->str)); + + if (lc->ch != ',') { /* if no other item follows */ + if (!lex_check_eol(lc)) { + /* found garbage at the end of the line */ + return false; + } + break; /* get out */ + } + lex_get_token(lc, T_ALL); /* eat comma */ + } + + item->val.alistval = list; + scan_to_eol(lc); + return true; +} + +bool ini_store_pint64(LEX *lc, ConfigFile *inifile, ini_items *item) +{ + if (!lc) { + Mmsg(inifile->edit, "%lld", item->val.int64val); + return true; + } + if (lex_get_token(lc, T_PINT64) == T_ERROR) { + return false; + } + item->val.int64val = lc->pint64_val; + scan_to_eol(lc); + return true; +} + +bool ini_store_int64(LEX *lc, ConfigFile *inifile, ini_items *item) +{ + if (!lc) { + Mmsg(inifile->edit, "%lld", item->val.int64val); + return true; + } + if (lex_get_token(lc, T_INT64) == T_ERROR) { + return false; + } + item->val.int64val = lc->int64_val; + scan_to_eol(lc); + return true; +} + +bool ini_store_pint32(LEX *lc, ConfigFile *inifile, ini_items *item) +{ + if (!lc) { + Mmsg(inifile->edit, "%d", item->val.int32val); + return true; + } + if (lex_get_token(lc, T_PINT32) == T_ERROR) { + return false; + } + item->val.int32val = lc->pint32_val; + scan_to_eol(lc); + return true; +} + +bool ini_store_int32(LEX *lc, ConfigFile *inifile, ini_items *item) +{ + if (!lc) { + Mmsg(inifile->edit, "%d", item->val.int32val); + return true; + } + if (lex_get_token(lc, T_INT32) == T_ERROR) { + return false; + } + item->val.int32val = lc->int32_val; + scan_to_eol(lc); + return true; +} + +bool ini_store_bool(LEX *lc, ConfigFile *inifile, ini_items *item) +{ + if (!lc) { + Mmsg(inifile->edit, "%s", item->val.boolval?"yes":"no"); + return true; + } + if (lex_get_token(lc, T_NAME) == T_ERROR) { + return false; + } + if (strcasecmp(lc->str, "yes") == 0 || + strcasecmp(lc->str, "true") == 0 || + strcasecmp(lc->str, "on") == 0 || + strcasecmp(lc->str, "1") == 0) + { + item->val.boolval = true; + + } else if (strcasecmp(lc->str, "no") == 0 || + strcasecmp(lc->str, "false") == 0 || + strcasecmp(lc->str, "off") == 0 || + strcasecmp(lc->str, "0") == 0) + { + item->val.boolval = false; + + } else { + /* YES and NO must not be translated */ + scan_err2(lc, _("Expect %s, got: %s"), "YES, NO, ON, OFF, 0, 1, TRUE, or FALSE", lc->str); + return false; + } + scan_to_eol(lc); + return true; +} + +bool ini_store_date(LEX *lc, ConfigFile *inifile, ini_items *item) +{ + if (!lc) { + bstrutime(inifile->edit,sizeof_pool_memory(inifile->edit),item->val.btimeval); + return true; + } + if (lex_get_token(lc, T_STRING) == T_ERROR) { + return false; + } + item->val.btimeval = str_to_utime(lc->str); + if (item->val.btimeval == 0) { + return false; + } + scan_to_eol(lc); + return true; +} + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +/* ---------------------------------------------------------------- */ +#ifdef TEST_PROGRAM +#include "unittests.h" +/* make ini_test + * export LD_LIBRARY_PATH=.libs/ + * ./.libs/ini_test + */ + +struct ini_items membuf_items[] = { + /* name handler comment req */ + {"client", ini_store_name, "Client name", 0}, + {"serial", ini_store_int32, "Serial number", 1}, + {"max_clients", ini_store_int32, "Max Clients", 0}, + {NULL, NULL, NULL, 0} +}; + +struct ini_items test_items[] = { + /* name handler comment req */ + {"datastore", ini_store_name, "Target Datastore", 0}, + {"newhost", ini_store_str, "New Hostname", 1}, + {"int64val", ini_store_int64, "Int64", 1}, + {"list", ini_store_alist_str, "list", 0}, + {"bool", ini_store_bool, "Bool", 0}, + {"pint64", ini_store_pint64, "pint", 0}, + {"int32", ini_store_int32, "int 32bit", 0}, + {"plugin.test", ini_store_str, "test with .", 0}, + {"adate", ini_store_date, "test with date", 0}, + {NULL, NULL, NULL, 0} +}; + +/* In order to link with libbaccfg */ +int32_t r_last; +int32_t r_first; +RES_HEAD **res_head; +bool save_resource(RES_HEAD **rhead, int type, RES_ITEM *items, int pass){return false;} +bool save_resource(CONFIG*, int, RES_ITEM*, int) {return false;} +void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock){} +void free_resource(RES *rres, int type){} +union URES {}; +RES_TABLE resources[] = {}; +URES res_all; + +int main() +{ + Unittests ini_test("ini_test"); + FILE *fp; + int pos, size; + ConfigFile *ini = new ConfigFile(); + POOLMEM *buf = get_pool_memory(PM_BSOCK); + char buffer[2000]; + + //debug_level=500; + Pmsg0(0, "Begin Memory buffer Test\n"); + ok(ini->register_items(membuf_items, sizeof(struct ini_items)), "Check sizeof ini_items"); + + if ((fp = bfopen("test.cfg", "w")) == NULL) { + exit (1); + } + fprintf(fp, "client=JohnDoe\n"); + fprintf(fp, "serial=2\n"); + fprintf(fp, "max_clients=3\n"); + fflush(fp); + fseek(fp, 0, SEEK_END); + size = ftell(fp); + fclose(fp); + + if ((fp = bfopen("test.cfg", "rb")) == NULL) { + printf("Could not open file test.cfg\n"); + exit (1); + } + + size = fread(buffer, 1, size, fp); + buffer[size] = 0; + //printf("size of read buffer is: %d\n", size); + //printf("====buf=%s\n", buffer); + + ok(ini->parse_buf(buffer), "Test memory read with all members"); + + ini->clear_items(); + ini->free_items(); + fclose(fp); + + //debug_level = 0; + Pmsg0(0, "Begin Original Full Tests\n"); + nok(ini->register_items(test_items, 5), "Check bad sizeof ini_items"); + ok(ini->register_items(test_items, sizeof(struct ini_items)), "Check sizeof ini_items"); + + if ((fp = bfopen("test.cfg", "w")) == NULL) { + exit (1); + } + fprintf(fp, "# this is a comment\ndatastore=datastore1\nnewhost=\"host1\"\n"); + fflush(fp); + + nok(ini->parse("test.cfg"), "Test missing member"); + ini->clear_items(); + + fprintf(fp, "int64val=12 # with a comment\n"); + fprintf(fp, "int64val=10 # with a comment\n"); + fprintf(fp, "int32=100\n"); + fprintf(fp, "bool=yes\n"); + fprintf(fp, "plugin.test=parameter\n"); + fprintf(fp, "adate=\"1970-01-02 12:00:00\"\n"); + + fflush(fp); + + ok(ini->parse("test.cfg"), "Test with all members"); + + ok(ini->items[0].found, "Test presence of char[]"); + ok(!strcmp(ini->items[0].val.nameval, "datastore1"), "Test char[]"); + ok(ini->items[1].found, "Test presence of char*"); + ok(!strcmp(ini->items[1].val.strval, "host1"), "Test char*"); + ok(ini->items[2].found, "Test presence of int"); + ok(ini->items[2].val.int64val == 10, "Test int"); + ok(ini->items[4].val.boolval == true, "Test bool"); + ok(ini->items[6].val.int32val == 100, "Test int 32"); + ok(ini->items[6].val.btimeval != 126000, "Test btime"); + + alist *list = ini->items[3].val.alistval; + nok(ini->items[3].found, "Test presence of alist"); + + fprintf(fp, "list=a\nlist=b\nlist=c,d,e\n"); + fflush(fp); + + ini->clear_items(); + ok(ini->parse("test.cfg"), "Test with all members"); + + list = ini->items[3].val.alistval; + ok(ini->items[3].found, "Test presence of alist"); + ok(list != NULL, "Test list member"); + ok(list->size() == 5, "Test list size"); + + ok(!strcmp((char *)list->get(0), "a"), "Testing alist[0]"); + ok(!strcmp((char *)list->get(1), "b"), "Testing alist[1]"); + ok(!strcmp((char *)list->get(2), "c"), "Testing alist[2]"); + + system("cp -f test.cfg test3.cfg"); + + fprintf(fp, "pouet='10, 11, 12'\n"); + fprintf(fp, "pint=-100\n"); + fprintf(fp, "int64val=-100\n"); /* TODO: fix negative numbers */ + fflush(fp); + + ini->clear_items(); + ok(ini->parse("test.cfg"), "Test with errors"); + nok(ini->items[5].found, "Test presence of positive int"); + + fclose(fp); + ini->clear_items(); + ini->free_items(); + + /* Test */ + if ((fp = bfopen("test2.cfg", "w")) == NULL) { + exit (1); + } + fprintf(fp, + "# this is a comment\n" + "optprompt=\"Datastore Name\"\n" + "datastore=@NAME@\n" + "optprompt=\"New Hostname to create\"\n" + "newhost=@STR@\n" + "optprompt=\"Some 64 integer\"\n" + "optrequired=yes\n" + "int64val=@INT64@\n" + "list=@ALIST@\n" + "bool=@BOOL@\n" + "pint64=@PINT64@\n" + "pouet=@STR@\n" + "int32=@INT32@\n" + "plugin.test=@STR@\n" + "adate=@DATE@\n" + ); + fclose(fp); + + ok(ini->unserialize("test2.cfg"), "Test dynamic parse"); + ok(ini->serialize("test4.cfg"), "Try to dump the item table in a file"); + ok(ini->serialize(&buf) > 0, "Try to dump the item table in a buffer"); + ok(ini->parse("test3.cfg"), "Parse test file with dynamic grammar"); + + ok((pos = ini->get_item("datastore")) == 0, "Check datastore definition"); + ok(ini->items[pos].found, "Test presence of char[]"); + ok(!strcmp(ini->items[pos].val.nameval, "datastore1"), "Test char[]"); + ok(!strcmp(ini->items[pos].comment, "Datastore Name"), "Check comment"); + ok(ini->items[pos].required == false, "Check required"); + + ok((pos = ini->get_item("newhost")) == 1, "Check newhost definition"); + ok(ini->items[pos].found, "Test presence of char*"); + ok(!strcmp(ini->items[pos].val.strval, "host1"), "Test char*"); + ok(ini->items[pos].required == false, "Check required"); + + ok((pos = ini->get_item("int64val")) == 2, "Check int64val definition"); + ok(ini->items[pos].found, "Test presence of int"); + ok(ini->items[pos].val.int64val == 10, "Test int"); + ok(ini->items[pos].required == true, "Check required"); + + ok((pos = ini->get_item("bool")) == 4, "Check bool definition"); + ok(ini->items[pos].val.boolval == true, "Test bool"); + + ok((pos = ini->get_item("adate")) == 9, "Check adate definition"); + ok(ini->items[pos].val.btimeval == 126000, "Test date"); + + ok(ini->dump_results(&buf), "Test to dump results"); + printf("<%s>\n", buf); + + ini->clear_items(); + ini->free_items(); + delete(ini); + free_pool_memory(buf); + /* clean after tests */ + unlink("test.cfg"); + unlink("test2.cfg"); + unlink("test3.cfg"); + unlink("test4.cfg"); + + return report(); +} +#endif /* TEST_PROGRAM */ diff --git a/src/lib/ini.h b/src/lib/ini.h new file mode 100644 index 00000000..f6e86c82 --- /dev/null +++ b/src/lib/ini.h @@ -0,0 +1,249 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef INI_H +#define INI_H + +/* + * Plugin has a internal C structure that describes the configuration: + * struct ini_items[] + * + * The ConfigFile object can generate a text file that describes the C + * structure. This text format is saved as RestoreObject in the catalog. + * + * struct ini_items[] -> register_items() -> serialize() -> RestoreObject R1 + * + * On the Director side, at the restore time, we can analyse this text to + * get the C structure. + * + * RestoreObject R1 -> write to disk -> unserialize() -> struct ini_items[] + * + * Once done, we can ask questions to the user at the restore time and fill + * the C struct with answers. The Director can send back as a RestoreObject + * the result of the questionnaire. + * + * struct ini_items[] -> UAContext -> dump_result() -> FD as RestoreObject R2 + * + * On the Plugin side, it can get back the C structure and use it. + * RestoreObject R2 -> parse() -> struct ini_items[] + */ + +class ConfigFile; +struct ini_items; + +/* Used to store result */ +typedef union { + char *strval; + char nameval[MAX_NAME_LENGTH]; + int64_t int64val; + int32_t int32val; + btime_t btimeval; + alist *alistval; + bool boolval; +} item_value; + +/* These functions are used to convert a string to the appropriate value */ +typedef +bool (INI_ITEM_HANDLER)(LEX *lc, ConfigFile *inifile, + struct ini_items *item); + +/* If no items are registred at the scan time, we detect this list from + * the file itself + */ +struct ini_items { + const char *name; /* keyword name */ + INI_ITEM_HANDLER *handler; /* type accepted */ + const char *comment; /* comment associated, used in prompt */ + + int required; /* optional required or not */ + const char *default_value; /* optional default value */ + + const char *re_value; /* optional regexp associated */ + const char *in_values; /* optional list of values */ + + bool found; /* if val is set */ + item_value val; /* val contains the value */ +}; + +/* When reading a ini file, we limit the number of items that we + * can create + */ +#define MAX_INI_ITEMS 32 + +/* Special RestoreObject name used to get user input at restore time */ +#define INI_RESTORE_OBJECT_NAME "RestoreOptions" + +/* Can be used to set re_value, in_value, default_value, found and val to 0 + * G++ looks to allow partial declaration, let see with an other compiler + */ +#define ITEMS_DEFAULT NULL,NULL,NULL,0,{0} + +/* + * Handle simple configuration file such as "ini" files. + * key1 = val # comment + * OptPrompt=comment + * key2 = val + * + * For usage example, see ini.c TEST_PROGRAM + */ + +class ConfigFile +{ +private: + LEX *lc; /* Lex parser */ + bool items_allocated; + +public: + JCR *jcr; /* JCR needed for Jmsg */ + int version; /* Internal version check */ + int sizeof_ini_items; /* Extra check when using dynamic loading */ + bool unlink_temp_file; /* Unlink temp file when destroying object */ + struct ini_items *items; /* Structure of the config file */ + POOLMEM *out_fname; /* Can be used to dump config to disk */ + POOLMEM *edit; /* Can be used to build result file */ + char *plugin_name; /* Used to store owner of this ConfigFile */ + + ConfigFile() { + lc = NULL; + jcr = NULL; + items = NULL; + out_fname = NULL; + plugin_name = NULL; + + version = 1; + items_allocated = false; + unlink_temp_file = true; + edit = get_pool_memory(PM_FNAME); + edit[0] = 0; + sizeof_ini_items = sizeof(struct ini_items); + } + + virtual ~ConfigFile() { + if (lc) { + lex_close_file(lc); + } + if (edit) { + free_pool_memory(edit); + } + if (out_fname) { + if (unlink_temp_file) { + unlink(out_fname); + } + free_pool_memory(out_fname); + } + if (plugin_name) { + free(plugin_name); + } + clear_items(); + free_items(); + } + + /* Dump a config string to out_fname */ + bool dump_string(const char *buf, int32_t len); + + void set_plugin_name(char *n) { + if (plugin_name) { + free(plugin_name); + } + plugin_name = bstrdup(n); + } + + /* JCR needed for Jmsg */ + void set_jcr(JCR *ajcr) { + jcr = ajcr; + } + + void set_unlink_temp_file(bool val) { + unlink_temp_file = val; + } + + /* Free malloced items such as char* or alist or items */ + void free_items(); + + /* Clear items member */ + void clear_items(); + + /* Dump the item table to a file (used on plugin side) */ + bool serialize(const char *fname); + + /* Dump the item table format to a buffer (used on plugin side) + * returns the length of the buffer, -1 if error + */ + int serialize(POOLMEM **buf); + + /* Dump the item table content to a buffer */ + int dump_results(POOLMEM **buf); + + /* Get item position in items list (useful when dynamic) */ + virtual int get_item(const char *name); + + /* Register config file structure, if size doesn't match */ + bool register_items(struct ini_items *aitems, int size) { + int i; + if (sizeof_ini_items == size) { + for (i = 0; aitems[i].name ; i++); + items = (struct ini_items*) malloc((i+1) * size); /* NULL terminated */ + memcpy(items, aitems, (i+1) * size); + items_allocated = false; /* we copy only pointers, don't free them */ + return true; + } + return false; + } + + /* Parse an ini file with a item list previously registred (plugin side) */ + bool parse(const char *filename); + + /* Parse an ini buffer */ + bool parse_buf(const char *buf); + + /* Parse file or buffer already setup */ + bool parse(); + + /* Create a item list from a ini file (director side) */ + bool unserialize(const char *filename); + + /* Get Menu for an entry */ + char *get_menu(int index, POOLMEM **dest); + + /* Check if an entry is a part of a menu */ + bool is_in_menu(int index, const char *menu); +}; + +/* + * Standard global parsers defined in ini.c + * When called with lc=NULL, it converts the item value back in inifile->edit + * buffer. + */ +bool ini_store_str(LEX *lc, ConfigFile *inifile, ini_items *item); +bool ini_store_name(LEX *lc, ConfigFile *inifile, ini_items *item); +bool ini_store_alist_str(LEX *lc, ConfigFile *inifile, ini_items *item); +bool ini_store_pint64(LEX *lc, ConfigFile *inifile, ini_items *item); +bool ini_store_int64(LEX *lc, ConfigFile *inifile, ini_items *item); +bool ini_store_pint32(LEX *lc, ConfigFile *inifile, ini_items *item); +bool ini_store_int32(LEX *lc, ConfigFile *inifile, ini_items *item); +bool ini_store_bool(LEX *lc, ConfigFile *inifile, ini_items *item); +bool ini_store_date(LEX *lc, ConfigFile *inifile, ini_items *item); + +/* Get handler code from handler @ */ +const char *ini_get_store_code(INI_ITEM_HANDLER *handler); + +/* Get handler function from handler name */ +INI_ITEM_HANDLER *ini_get_store_handler(const char *); + +#endif diff --git a/src/lib/jcr.c b/src/lib/jcr.c new file mode 100644 index 00000000..ba9bf0d1 --- /dev/null +++ b/src/lib/jcr.c @@ -0,0 +1,1265 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Manipulation routines for Job Control Records and + * handling of last_jobs_list. + * + * Kern E. Sibbald, December 2000 + * + * These routines are thread safe. + * + * The job list routines were re-written in May 2005 to + * eliminate the global lock while traversing the list, and + * to use the dlist subroutines. The locking is now done + * on the list each time the list is modified or traversed. + * That is it is "micro-locked" rather than globally locked. + * The result is that there is one lock/unlock for each entry + * in the list while traversing it rather than a single lock + * at the beginning of a traversal and one at the end. This + * incurs slightly more overhead, but effectively eliminates + * the possibilty of race conditions. In addition, with the + * exception of the global locking of the list during the + * re-reading of the config file, no recursion is needed. + * + */ + +#include "bacula.h" +#include "jcr.h" + +const int dbglvl = 3400; + +/* External variables we reference */ + +/* External referenced functions */ +void free_bregexps(alist *bregexps); + +/* Forward referenced functions */ +extern "C" void timeout_handler(int sig); +static void jcr_timeout_check(watchdog_t *self); +#ifdef TRACE_JCR_CHAIN +static void b_lock_jcr_chain(const char *filen, int line); +static void b_unlock_jcr_chain(const char *filen, int line); +#define lock_jcr_chain() b_lock_jcr_chain(__FILE__, __LINE__); +#define unlock_jcr_chain() b_unlock_jcr_chain(__FILE__, __LINE__); +#else +static void lock_jcr_chain(); +static void unlock_jcr_chain(); +#endif + + +int num_jobs_run; +dlist *last_jobs = NULL; +const int max_last_jobs = 10; + +static dlist *jcrs = NULL; /* JCR chain */ +static pthread_mutex_t jcr_lock = PTHREAD_MUTEX_INITIALIZER; + +static pthread_mutex_t job_start_mutex = PTHREAD_MUTEX_INITIALIZER; + +static pthread_mutex_t last_jobs_mutex = PTHREAD_MUTEX_INITIALIZER; + +static pthread_key_t jcr_key; /* Pointer to jcr for each thread */ + +pthread_once_t key_once = PTHREAD_ONCE_INIT; + +static char Job_status[] = "Status JobId=%ld JobStatus=%d\n"; + + +void lock_jobs() +{ + P(job_start_mutex); +} + +void unlock_jobs() +{ + V(job_start_mutex); +} + +void init_last_jobs_list() +{ + JCR *jcr = NULL; + struct s_last_job *job_entry = NULL; + if (!last_jobs) { + last_jobs = New(dlist(job_entry, &job_entry->link)); + } + if (!jcrs) { + jcrs = New(dlist(jcr, &jcr->link)); + } +} + +void term_last_jobs_list() +{ + if (last_jobs) { + lock_last_jobs_list(); + while (!last_jobs->empty()) { + void *je = last_jobs->first(); + last_jobs->remove(je); + free(je); + } + delete last_jobs; + last_jobs = NULL; + unlock_last_jobs_list(); + } + if (jcrs) { + delete jcrs; + jcrs = NULL; + } +} + +bool read_last_jobs_list(int fd, uint64_t addr) +{ + struct s_last_job *je, job; + uint32_t num; + bool ok = true; + + Dmsg1(100, "read_last_jobs seek to %d\n", (int)addr); + if (addr == 0 || lseek(fd, (boffset_t)addr, SEEK_SET) < 0) { + return false; + } + if (read(fd, &num, sizeof(num)) != sizeof(num)) { + return false; + } + Dmsg1(100, "Read num_items=%d\n", num); + if (num > 4 * max_last_jobs) { /* sanity check */ + return false; + } + lock_last_jobs_list(); + for ( ; num; num--) { + if (read(fd, &job, sizeof(job)) != sizeof(job)) { + berrno be; + Pmsg1(000, "Read job entry. ERR=%s\n", be.bstrerror()); + ok = false; + break; + } + if (job.JobId > 0) { + je = (struct s_last_job *)malloc(sizeof(struct s_last_job)); + memcpy((char *)je, (char *)&job, sizeof(job)); + if (!last_jobs) { + init_last_jobs_list(); + } + last_jobs->append(je); + if (last_jobs->size() > max_last_jobs) { + je = (struct s_last_job *)last_jobs->first(); + last_jobs->remove(je); + free(je); + } + } + } + unlock_last_jobs_list(); + return ok; +} + +uint64_t write_last_jobs_list(int fd, uint64_t addr) +{ + struct s_last_job *je; + uint32_t num; + ssize_t stat; + + Dmsg1(100, "write_last_jobs seek to %d\n", (int)addr); + if (lseek(fd, (boffset_t)addr, SEEK_SET) < 0) { + return 0; + } + if (last_jobs) { + lock_last_jobs_list(); + /* First record is number of entires */ + num = last_jobs->size(); + if (write(fd, &num, sizeof(num)) != sizeof(num)) { + berrno be; + Pmsg1(000, "Error writing num_items: ERR=%s\n", be.bstrerror()); + goto bail_out; + } + foreach_dlist(je, last_jobs) { + if (write(fd, je, sizeof(struct s_last_job)) != sizeof(struct s_last_job)) { + berrno be; + Pmsg1(000, "Error writing job: ERR=%s\n", be.bstrerror()); + goto bail_out; + } + } + unlock_last_jobs_list(); + } + /* Return current address */ + stat = lseek(fd, 0, SEEK_CUR); + if (stat < 0) { + stat = 0; + } + return stat; + +bail_out: + unlock_last_jobs_list(); + return 0; +} + +void lock_last_jobs_list() +{ + P(last_jobs_mutex); +} + +void unlock_last_jobs_list() +{ + V(last_jobs_mutex); +} + +/* Get an ASCII representation of the Operation being performed as an english Noun */ +const char *JCR::get_OperationName() +{ + switch(m_JobType) { + case JT_BACKUP: + return _("Backup"); + case JT_VERIFY: + return _("Verifying"); + case JT_RESTORE: + return _("Restoring"); + case JT_ARCHIVE: + return _("Archiving"); + case JT_COPY: + return _("Copying"); + case JT_MIGRATE: + return _("Migration"); + case JT_SCAN: + return _("Scanning"); + default: + return _("Unknown operation"); + } +} + +/* Get an ASCII representation of the Action being performed either an english Verb or Adjective */ +const char *JCR::get_ActionName(bool past) +{ + switch(m_JobType) { + case JT_BACKUP: + return _("backup"); + case JT_VERIFY: + return (past == true) ? _("verified") : _("verify"); + case JT_RESTORE: + return (past == true) ? _("restored") : _("restore"); + case JT_ARCHIVE: + return (past == true) ? _("archived") : _("archive"); + case JT_COPY: + return (past == true) ? _("copied") : _("copy"); + case JT_MIGRATE: + return (past == true) ? _("migrated") : _("migrate"); + case JT_SCAN: + return (past == true) ? _("scanned") : _("scan"); + default: + return _("unknown action"); + } +} + +bool JCR::JobReads() +{ + switch (m_JobType) { + case JT_VERIFY: + case JT_RESTORE: + case JT_COPY: + case JT_MIGRATE: + return true; + case JT_BACKUP: + if (m_JobLevel == L_VIRTUAL_FULL) { + return true; + } + break; + default: + break; + } + return false; +} + +/* We can stop only Backup jobs connected to a client. It doesn't make sens at + * this time to stop a copy, migraton, restore or a verify job. The specific + * code should be implemented first. + */ +bool JCR::can_be_stopped() +{ + bool ok=true; + if (getJobType() == JT_BACKUP) { /* Is a Backup */ + if (getJobLevel() == L_VIRTUAL_FULL) { /* Is a VirtualFull */ + ok = false; + } + } else { /* Is not a backup (so, copy, migration, admin, verify, ... */ + ok = false; + } + return ok; +} + +/* + * Push a subroutine address into the job end callback stack + */ +void job_end_push(JCR *jcr, void job_end_cb(JCR *jcr,void *), void *ctx) +{ + jcr->job_end_push.append((void *)job_end_cb); + jcr->job_end_push.append(ctx); +} + +/* DELETE ME when bugs in MA1512, MA1632 MA1639 are fixed */ +void (*MA1512_reload_job_end_cb)(JCR *,void *) = NULL; + +/* Pop each job_end subroutine and call it */ +static void job_end_pop(JCR *jcr) +{ + void (*job_end_cb)(JCR *jcr, void *ctx); + void *ctx; + for (int i=jcr->job_end_push.size()-1; i > 0; ) { + ctx = jcr->job_end_push.get(i--); + job_end_cb = (void (*)(JCR *,void *))jcr->job_end_push.get(i--); + /* check for bug MA1512, MA1632 MA1639, + * today, job_end_cb can only be reload_job_end_cb() from DIR */ + if (job_end_cb != MA1512_reload_job_end_cb && MA1512_reload_job_end_cb != NULL) { + Tmsg2(0, "Bug 'job_end_pop' detected, skip ! job_end_cb=0x%p ctx=0x%p\n", job_end_cb, ctx); + Tmsg0(0, "Display job_end_push list\n"); + for (int j=jcr->job_end_push.size()-1; j > 0; ) { + void *ctx2 = jcr->job_end_push.get(j--); + void *job_end_cb2 = jcr->job_end_push.get(j--); + Tmsg3(0, "Bug 'job_end_pop' entry[%d] job_end_cb=0x%p ctx=0x%p\n", j+1, job_end_cb2, ctx2); + } + } else + { + job_end_cb(jcr, ctx); + } + } +} + +/* + * Create thread key for thread specific data + */ +void create_jcr_key() +{ + int status = pthread_key_create(&jcr_key, NULL); + if (status != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("pthread key create failed: ERR=%s\n"), + be.bstrerror(status)); + } +} + +/* + * Create a Job Control Record and link it into JCR chain + * Returns newly allocated JCR + * Note, since each daemon has a different JCR, he passes + * us the size. + */ +JCR *new_jcr(int size, JCR_free_HANDLER *daemon_free_jcr) +{ + JCR *jcr; + MQUEUE_ITEM *item = NULL; + int status; + + Dmsg0(dbglvl, "Enter new_jcr\n"); + status = pthread_once(&key_once, create_jcr_key); + if (status != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("pthread_once failed. ERR=%s\n"), be.bstrerror(status)); + } + jcr = (JCR *)malloc(size); + bmemzero(jcr, size); + /* Note for the director, this value is changed in jobq.c */ + jcr->my_thread_id = pthread_self(); + jcr->msg_queue = New(dlist(item, &item->link)); + if ((status = pthread_mutex_init(&jcr->msg_queue_mutex, NULL)) != 0) { + berrno be; + Jmsg(NULL, M_ABORT, 0, _("Could not init msg_queue mutex. ERR=%s\n"), + be.bstrerror(status)); + } + jcr->job_end_push.init(1, false); + jcr->sched_time = time(NULL); + jcr->initial_sched_time = jcr->sched_time; + jcr->daemon_free_jcr = daemon_free_jcr; /* plug daemon free routine */ + jcr->init_mutex(); + jcr->inc_use_count(); + jcr->VolumeName = get_pool_memory(PM_FNAME); + jcr->VolumeName[0] = 0; + jcr->errmsg = get_pool_memory(PM_MESSAGE); + jcr->errmsg[0] = 0; + jcr->comment = get_pool_memory(PM_FNAME); + jcr->comment[0] = 0; + jcr->StatusErrMsg = get_pool_memory(PM_FNAME); + jcr->StatusErrMsg[0] = 0; + jcr->job_uid = -1; + /* Setup some dummy values */ + bstrncpy(jcr->Job, "*System*", sizeof(jcr->Job)); + jcr->JobId = 0; + jcr->setJobType(JT_SYSTEM); /* internal job until defined */ + jcr->setJobLevel(L_NONE); + jcr->setJobStatus(JS_Created); /* ready to run */ +#ifndef HAVE_WIN32 + struct sigaction sigtimer; + sigtimer.sa_flags = 0; + sigtimer.sa_handler = timeout_handler; + sigfillset(&sigtimer.sa_mask); + sigaction(TIMEOUT_SIGNAL, &sigtimer, NULL); +#endif + + /* + * Locking jobs is a global lock that is needed + * so that the Director can stop new jobs from being + * added to the jcr chain while it processes a new + * conf file and does the job_end_push(). + */ + lock_jobs(); + lock_jcr_chain(); + if (!jcrs) { + jcrs = New(dlist(jcr, &jcr->link)); + } + jcrs->append(jcr); + unlock_jcr_chain(); + unlock_jobs(); + + return jcr; +} + + +/* + * Remove a JCR from the chain + * NOTE! The chain must be locked prior to calling + * this routine. + */ +static void remove_jcr(JCR *jcr) +{ + Dmsg0(dbglvl, "Enter remove_jcr\n"); + if (!jcr) { + Emsg0(M_ABORT, 0, _("NULL jcr.\n")); + } + jcrs->remove(jcr); + Dmsg0(dbglvl, "Leave remove_jcr\n"); +} + +/* + * Free stuff common to all JCRs. N.B. Be careful to include only + * generic stuff in the common part of the jcr. + */ +static void free_common_jcr(JCR *jcr) +{ + /* Uses jcr lock/unlock */ + remove_jcr_from_tsd(jcr); + jcr->set_killable(false); + + jcr->destroy_mutex(); + + if (jcr->msg_queue) { + delete jcr->msg_queue; + jcr->msg_queue = NULL; + pthread_mutex_destroy(&jcr->msg_queue_mutex); + } + + /* do this after closing messages */ + free_and_null_pool_memory(jcr->JobIds); + free_and_null_pool_memory(jcr->client_name); + free_and_null_pool_memory(jcr->attr); + free_and_null_pool_memory(jcr->VolumeName); + free_and_null_pool_memory(jcr->errmsg); + free_and_null_pool_memory(jcr->StatusErrMsg); + + if (jcr->sd_auth_key) { + free(jcr->sd_auth_key); + jcr->sd_auth_key = NULL; + } + + free_bsock(jcr->dir_bsock); + + if (jcr->where) { + free(jcr->where); + jcr->where = NULL; + } + if (jcr->RegexWhere) { + free(jcr->RegexWhere); + jcr->RegexWhere = NULL; + } + if (jcr->where_bregexp) { + free_bregexps(jcr->where_bregexp); + delete jcr->where_bregexp; + jcr->where_bregexp = NULL; + } + if (jcr->cached_path) { + free_pool_memory(jcr->cached_path); + jcr->cached_path = NULL; + jcr->cached_pnl = 0; + } + if (jcr->id_list) { + free_guid_list(jcr->id_list); + jcr->id_list = NULL; + } + if (jcr->comment) { + free_pool_memory(jcr->comment); + jcr->comment = NULL; + } + free(jcr); +} + +/* + * Global routine to free a jcr + */ +#ifdef DEBUG +void b_free_jcr(const char *file, int line, JCR *jcr) +{ + struct s_last_job *je; + + Dmsg3(dbglvl, "Enter free_jcr jid=%u from %s:%d\n", jcr->JobId, file, line); + +#else + +void free_jcr(JCR *jcr) +{ + struct s_last_job *je; + + Dmsg3(dbglvl, "Enter free_jcr jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + +#endif + + lock_jcr_chain(); + jcr->dec_use_count(); /* decrement use count */ + ASSERT2(jcr->use_count() >= 0, "JCR use_count < 0"); + // Jmsg2(jcr, M_ERROR, 0, _("JCR use_count=%d JobId=%d\n"), + // jcr->use_count(), jcr->JobId); + //} + if (jcr->JobId > 0) { + Dmsg3(dbglvl, "Dec free_jcr jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + } + if (jcr->use_count() > 0) { /* if in use */ + unlock_jcr_chain(); + return; + } + if (jcr->JobId > 0) { + Dmsg3(dbglvl, "remove jcr jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + } + jcr->exiting = true; + remove_jcr(jcr); /* remove Jcr from chain */ + unlock_jcr_chain(); + + if (jcr->JobId > 0) { + dequeue_messages(jcr); + dequeue_daemon_messages(jcr); + } + close_msg(jcr); /* close messages for this job */ + job_end_pop(jcr); /* pop and call hooked routines */ + + Dmsg1(dbglvl, "End job=%d\n", jcr->JobId); + + /* Keep some statistics */ + switch (jcr->getJobType()) { + case JT_BACKUP: + case JT_VERIFY: + case JT_RESTORE: + case JT_MIGRATE: + case JT_COPY: + case JT_ADMIN: + /* Keep list of last jobs, but not Console where JobId==0 */ + if (jcr->JobId > 0) { + lock_last_jobs_list(); + num_jobs_run++; + je = (struct s_last_job *)malloc(sizeof(struct s_last_job)); + memset(je, 0, sizeof(struct s_last_job)); /* zero in case unset fields */ + je->Errors = jcr->JobErrors; + je->JobType = jcr->getJobType(); + je->JobId = jcr->JobId; + je->VolSessionId = jcr->VolSessionId; + je->VolSessionTime = jcr->VolSessionTime; + bstrncpy(je->Job, jcr->Job, sizeof(je->Job)); + je->JobFiles = jcr->JobFiles; + je->JobBytes = jcr->JobBytes; + je->JobStatus = jcr->JobStatus; + je->JobLevel = jcr->getJobLevel(); + je->start_time = jcr->start_time; + je->end_time = time(NULL); + + if (!last_jobs) { + init_last_jobs_list(); + } + last_jobs->append(je); + if (last_jobs->size() > max_last_jobs) { + je = (struct s_last_job *)last_jobs->first(); + last_jobs->remove(je); + free(je); + } + unlock_last_jobs_list(); + } + break; + default: + break; + } + + if (jcr->daemon_free_jcr) { + jcr->daemon_free_jcr(jcr); /* call daemon free routine */ + } + + free_common_jcr(jcr); + close_msg(NULL); /* flush any daemon messages */ + Dmsg0(dbglvl, "Exit free_jcr\n"); +} + +/* + * Remove jcr from thread specific data, but + * but make sure it is us who are attached. + */ +void remove_jcr_from_tsd(JCR *jcr) +{ + JCR *tjcr = get_jcr_from_tsd(); + if (tjcr == jcr) { + set_jcr_in_tsd(INVALID_JCR); + } +} + +void JCR::set_killable(bool killable) +{ + lock(); + my_thread_killable = killable; + unlock(); +} + +/* + * Put this jcr in the thread specifc data + * if update_thread_info is true and the jcr is valide, + * we update the my_thread_id in the JCR + */ +void set_jcr_in_tsd(JCR *jcr) +{ + int status = pthread_setspecific(jcr_key, (void *)jcr); + if (status != 0) { + berrno be; + Jmsg1(jcr, M_ABORT, 0, _("pthread_setspecific failed: ERR=%s\n"), + be.bstrerror(status)); + } +} + +void JCR::my_thread_send_signal(int sig) +{ + lock_jcr_chain(); /* use global lock */ + this->lock(); + if (this->exiting) { + goto get_out; + } + if (this->is_killable() && + !pthread_equal(this->my_thread_id, pthread_self())) + { + Dmsg1(800, "Send kill to jid=%d\n", this->JobId); + pthread_kill(this->my_thread_id, sig); + this->exiting = true; + + } else if (!this->is_killable()) { + Dmsg1(10, "Warning, cannot send kill to jid=%d marked not killable.\n", this->JobId); + } +get_out: + this->unlock(); + unlock_jcr_chain(); +} + +/* + * Give me the jcr that is attached to this thread + */ +JCR *get_jcr_from_tsd() +{ + JCR *jcr = (JCR *)pthread_getspecific(jcr_key); +// printf("get_jcr_from_tsd: jcr=%p\n", jcr); + /* set any INVALID_JCR to NULL which the rest of Bacula understands */ + if (jcr == INVALID_JCR) { + jcr = NULL; + } + return jcr; +} + + +/* + * Find which JobId corresponds to the current thread + */ +uint32_t get_jobid_from_tsd() +{ + JCR *jcr; + uint32_t JobId = 0; + jcr = get_jcr_from_tsd(); +// printf("get_jobid_from_tsr: jcr=%p\n", jcr); + if (jcr) { + JobId = (uint32_t)jcr->JobId; + } + return JobId; +} + +/* + * Given a JobId, find the JCR + * Returns: jcr on success + * NULL on failure + */ +JCR *get_jcr_by_id(uint32_t JobId) +{ + JCR *jcr; + + foreach_jcr(jcr) { + if (jcr->JobId == JobId) { + jcr->inc_use_count(); + Dmsg3(dbglvl, "Inc get_jcr jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + break; + } + } + endeach_jcr(jcr); + return jcr; +} + +/* + * Given a thread id, find the JobId + * Returns: JobId on success + * 0 on failure + */ +uint32_t get_jobid_from_tid(pthread_t tid) +{ + JCR *jcr = NULL; + bool found = false; + + foreach_jcr(jcr) { + if (pthread_equal(jcr->my_thread_id, tid)) { + found = true; + break; + } + } + endeach_jcr(jcr); + if (found) { + return jcr->JobId; + } + return 0; +} + + +/* + * Given a SessionId and SessionTime, find the JCR + * Returns: jcr on success + * NULL on failure + */ +JCR *get_jcr_by_session(uint32_t SessionId, uint32_t SessionTime) +{ + JCR *jcr; + + foreach_jcr(jcr) { + if (jcr->VolSessionId == SessionId && + jcr->VolSessionTime == SessionTime) { + jcr->inc_use_count(); + Dmsg3(dbglvl, "Inc get_jcr jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + break; + } + } + endeach_jcr(jcr); + return jcr; +} + + +/* + * Given a Job, find the JCR + * compares on the number of characters in Job + * thus allowing partial matches. + * Returns: jcr on success + * NULL on failure + */ +JCR *get_jcr_by_partial_name(char *Job) +{ + JCR *jcr; + int len; + + if (!Job) { + return NULL; + } + len = strlen(Job); + foreach_jcr(jcr) { + if (strncmp(Job, jcr->Job, len) == 0) { + jcr->inc_use_count(); + Dmsg3(dbglvl, "Inc get_jcr jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + break; + } + } + endeach_jcr(jcr); + return jcr; +} + + +/* + * Given a Job, find the JCR + * requires an exact match of names. + * Returns: jcr on success + * NULL on failure + */ +JCR *get_jcr_by_full_name(char *Job) +{ + JCR *jcr; + + if (!Job) { + return NULL; + } + foreach_jcr(jcr) { + if (strcmp(jcr->Job, Job) == 0) { + jcr->inc_use_count(); + Dmsg3(dbglvl, "Inc get_jcr jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + break; + } + } + endeach_jcr(jcr); + return jcr; +} + +static void update_wait_time(JCR *jcr, int newJobStatus) +{ + bool enter_in_waittime; + int oldJobStatus = jcr->JobStatus; + + switch (newJobStatus) { + case JS_WaitFD: + case JS_WaitSD: + case JS_WaitMedia: + case JS_WaitMount: + case JS_WaitStoreRes: + case JS_WaitJobRes: + case JS_WaitClientRes: + case JS_WaitMaxJobs: + case JS_WaitPriority: + enter_in_waittime = true; + break; + default: + enter_in_waittime = false; /* not a Wait situation */ + break; + } + + /* + * If we were previously waiting and are not any more + * we want to update the wait_time variable, which is + * the start of waiting. + */ + switch (oldJobStatus) { + case JS_WaitFD: + case JS_WaitSD: + case JS_WaitMedia: + case JS_WaitMount: + case JS_WaitStoreRes: + case JS_WaitJobRes: + case JS_WaitClientRes: + case JS_WaitMaxJobs: + case JS_WaitPriority: + if (!enter_in_waittime) { /* we get out the wait time */ + jcr->wait_time_sum += (time(NULL) - jcr->wait_time); + jcr->wait_time = 0; + } + break; + + /* if wait state is new, we keep current time for watchdog MaxWaitTime */ + default: + if (enter_in_waittime) { + jcr->wait_time = time(NULL); + } + break; + } +} + +/* + * Priority runs from 0 (lowest) to 10 (highest) + */ +static int get_status_priority(int JobStatus) +{ + int priority = 0; + switch (JobStatus) { + case JS_Incomplete: + priority = 10; + break; + case JS_ErrorTerminated: + case JS_FatalError: + case JS_Canceled: + priority = 9; + break; + case JS_Error: + priority = 8; + break; + case JS_Differences: + priority = 7; + break; + } + return priority; +} + +/* + * Send Job status to Director + */ +bool JCR::sendJobStatus() +{ + if (dir_bsock) { + return dir_bsock->fsend(Job_status, JobId, JobStatus); + } + return true; +} + +/* + * Set and send Job status to Director + */ +bool JCR::sendJobStatus(int aJobStatus) +{ + if (!is_JobStatus(aJobStatus)) { + setJobStatus(aJobStatus); + if (dir_bsock) { + return dir_bsock->fsend(Job_status, JobId, JobStatus); + } + } + return true; +} + +void JCR::setJobStarted() +{ + job_started = true; + job_started_time = time(NULL); +} + +static pthread_mutex_t status_lock = PTHREAD_MUTEX_INITIALIZER; + +void JCR::setJobStatus(int newJobStatus) +{ + int priority, old_priority; + int oldJobStatus = JobStatus; + + P(status_lock); + priority = get_status_priority(newJobStatus); + old_priority = get_status_priority(oldJobStatus); + + Dmsg2(800, "set_jcr_job_status(%ld, %c)\n", JobId, newJobStatus); + + /* Update wait_time depending on newJobStatus and oldJobStatus */ + update_wait_time(this, newJobStatus); + + /* + * For a set of errors, ... keep the current status + * so it isn't lost. For all others, set it. + */ + Dmsg2(800, "OnEntry JobStatus=%c newJobstatus=%c\n", (oldJobStatus==0)?'0':oldJobStatus, newJobStatus); + /* + * If status priority is > than proposed new status, change it. + * If status priority == new priority and both are zero, take + * the new status. + * If it is not zero, then we keep the first non-zero "error" that + * occurred. + */ + if (priority > old_priority || ( + priority == 0 && old_priority == 0)) { + Dmsg4(800, "Set new stat. old: %c,%d new: %c,%d\n", + (oldJobStatus==0)?'0':oldJobStatus, old_priority, newJobStatus, priority); + JobStatus = newJobStatus; /* replace with new status */ + } + + if (oldJobStatus != JobStatus) { + Dmsg2(800, "leave setJobStatus old=%c new=%c\n", (oldJobStatus==0)?'0':oldJobStatus, newJobStatus); +// generate_plugin_event(this, bEventStatusChange, NULL); + } + V(status_lock); +} + +#ifdef TRACE_JCR_CHAIN +static int lock_count = 0; +#endif + +/* + * Lock the chain + */ +#ifdef TRACE_JCR_CHAIN +static void b_lock_jcr_chain(const char *fname, int line) +#else +static void lock_jcr_chain() +#endif +{ +#ifdef TRACE_JCR_CHAIN + Dmsg3(dbglvl, "Lock jcr chain %d from %s:%d\n", ++lock_count, fname, line); +#endif + P(jcr_lock); +} + +/* + * Unlock the chain + */ +#ifdef TRACE_JCR_CHAIN +static void b_unlock_jcr_chain(const char *fname, int line) +#else +static void unlock_jcr_chain() +#endif +{ +#ifdef TRACE_JCR_CHAIN + Dmsg3(dbglvl, "Unlock jcr chain %d from %s:%d\n", lock_count--, fname, line); +#endif + V(jcr_lock); +} + +/* + * Start walk of jcr chain + * The proper way to walk the jcr chain is: + * JCR *jcr; + * foreach_jcr(jcr) { + * ... + * } + * endeach_jcr(jcr); + * + * It is possible to leave out the endeach_jcr(jcr), but + * in that case, the last jcr referenced must be explicitly + * released with: + * + * free_jcr(jcr); + * + */ +JCR *jcr_walk_start() +{ + JCR *jcr; + lock_jcr_chain(); + jcr = (JCR *)jcrs->first(); + if (jcr) { + jcr->inc_use_count(); + if (jcr->JobId > 0) { + Dmsg3(dbglvl, "Inc walk_start jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + } + } + unlock_jcr_chain(); + return jcr; +} + +/* + * Get next jcr from chain, and release current one + */ +JCR *jcr_walk_next(JCR *prev_jcr) +{ + JCR *jcr; + + lock_jcr_chain(); + jcr = (JCR *)jcrs->next(prev_jcr); + if (jcr) { + jcr->inc_use_count(); + if (jcr->JobId > 0) { + Dmsg3(dbglvl, "Inc walk_next jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + } + } + unlock_jcr_chain(); + if (prev_jcr) { + free_jcr(prev_jcr); + } + return jcr; +} + +/* + * Release last jcr referenced + */ +void jcr_walk_end(JCR *jcr) +{ + if (jcr) { + if (jcr->JobId > 0) { + Dmsg3(dbglvl, "Free walk_end jid=%u use_count=%d Job=%s\n", + jcr->JobId, jcr->use_count(), jcr->Job); + } + free_jcr(jcr); + } +} + +/* + * Return number of Jobs + */ +int job_count() +{ + JCR *jcr; + int count = 0; + + lock_jcr_chain(); + for (jcr = (JCR *)jcrs->first(); (jcr = (JCR *)jcrs->next(jcr)); ) { + if (jcr->JobId > 0) { + count++; + } + } + unlock_jcr_chain(); + return count; +} + + +/* + * Setup to call the timeout check routine every 30 seconds + * This routine will check any timers that have been enabled. + */ +bool init_jcr_subsystem(void) +{ + watchdog_t *wd = new_watchdog(); + + wd->one_shot = false; + wd->interval = 30; /* FIXME: should be configurable somewhere, even + if only with a #define */ + wd->callback = jcr_timeout_check; + + register_watchdog(wd); + + return true; +} + +static void jcr_timeout_check(watchdog_t *self) +{ + JCR *jcr; + BSOCK *bs; + time_t timer_start; + + Dmsg0(dbglvl, "Start JCR timeout checks\n"); + + /* Walk through all JCRs checking if any one is + * blocked for more than specified max time. + */ + foreach_jcr(jcr) { + Dmsg2(dbglvl, "jcr_timeout_check JobId=%u jcr=0x%x\n", jcr->JobId, jcr); + if (jcr->JobId == 0) { + continue; + } + bs = jcr->store_bsock; + if (bs) { + timer_start = bs->timer_start; + if (timer_start && (watchdog_time - timer_start) > bs->timeout) { + bs->timer_start = 0; /* turn off timer */ + bs->set_timed_out(); + Qmsg(jcr, M_ERROR, 0, _( +"Watchdog sending kill after %d secs to thread stalled reading Storage daemon.\n"), + (int)(watchdog_time - timer_start)); + jcr->my_thread_send_signal(TIMEOUT_SIGNAL); + } + } + bs = jcr->file_bsock; + if (bs) { + timer_start = bs->timer_start; + if (timer_start && (watchdog_time - timer_start) > bs->timeout) { + bs->timer_start = 0; /* turn off timer */ + bs->set_timed_out(); + Qmsg(jcr, M_ERROR, 0, _( +"Watchdog sending kill after %d secs to thread stalled reading File daemon.\n"), + (int)(watchdog_time - timer_start)); + jcr->my_thread_send_signal(TIMEOUT_SIGNAL); + } + } + bs = jcr->dir_bsock; + if (bs) { + timer_start = bs->timer_start; + if (timer_start && (watchdog_time - timer_start) > bs->timeout) { + bs->timer_start = 0; /* turn off timer */ + bs->set_timed_out(); + Qmsg(jcr, M_ERROR, 0, _( +"Watchdog sending kill after %d secs to thread stalled reading Director.\n"), + (int)(watchdog_time - timer_start)); + jcr->my_thread_send_signal(TIMEOUT_SIGNAL); + } + } + } + endeach_jcr(jcr); + + Dmsg0(dbglvl, "Finished JCR timeout checks\n"); +} + +/* + * Return next JobId from comma separated list + * + * Returns: + * 1 if next JobId returned + * 0 if no more JobIds are in list + * -1 there is an error + */ +int get_next_jobid_from_list(char **p, uint32_t *JobId) +{ + const int maxlen = 30; + char jobid[maxlen+1]; + char *q = *p; + + jobid[0] = 0; + for (int i=0; isize()); + + for (JCR *jcr = (JCR *)jcrs->first(); jcr ; jcr = (JCR *)jcrs->next(jcr)) { + fprintf(fp, "threadid=%p JobId=%d JobStatus=%c jcr=%p name=%s\n", + get_threadid(jcr->my_thread_id), (int)jcr->JobId, jcr->JobStatus, jcr, jcr->Job); + fprintf(fp, "\tuse_count=%i killable=%d\n", + jcr->use_count(), jcr->is_killable()); + fprintf(fp, "\tJobType=%c JobLevel=%c\n", + jcr->getJobType(), jcr->getJobLevel()); + bstrftime(buf1, sizeof(buf1), jcr->sched_time); + bstrftime(buf2, sizeof(buf2), jcr->start_time); + bstrftime(buf3, sizeof(buf3), jcr->end_time); + bstrftime(buf4, sizeof(buf4), jcr->wait_time); + fprintf(fp, "\tsched_time=%s start_time=%s\n\tend_time=%s wait_time=%s\n", + buf1, buf2, buf3, buf4); + fprintf(fp, "\tdb=%p db_batch=%p batch_started=%i\n", + jcr->db, jcr->db_batch, jcr->batch_started); + + /* + * Call all the jcr debug hooks + */ + for(int i=0; i < dbg_jcr_handler_count; i++) { + dbg_jcr_hook_t *hook = dbg_jcr_hooks[i]; + hook(jcr, fp); + } + } +} diff --git a/src/lib/lex.c b/src/lib/lex.c new file mode 100644 index 00000000..5b4780aa --- /dev/null +++ b/src/lib/lex.c @@ -0,0 +1,901 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Lexical scanner for Bacula configuration file + * + * Kern Sibbald, 2000 + * + */ + +#include "bacula.h" +#include "lex.h" + +/* Debug level for this source file */ +static const int dbglvl = 5000; + +/* + * Return false if the end of the line contains anything other + * than spaces, or a semicolon or a comment. + */ +bool lex_check_eol(LEX *lf) +{ + char *ch = lf->line+lf->col_no; + while (*ch != '\0' && *ch != '#' && B_ISSPACE(*ch) && *ch != ';') { + ch++; + } + return *ch == '\0' || *ch == '#' || *ch == ';'; +} + +/* + * Scan to "logical" end of line. I.e. end of line, + * or semicolon, but stop on T_EOB (same as end of + * line except it is not eaten). + */ +void scan_to_eol(LEX *lc) +{ + int token; + Dmsg0(dbglvl, "start scan to eol\n"); + while ((token = lex_get_token(lc, T_ALL)) != T_EOL) { + if (token == T_EOB) { + lex_unget_char(lc); + return; + } + if (token == T_EOF) { + return; + } + } +} + +/* + * Get next token, but skip EOL + */ +int scan_to_next_not_eol(LEX * lc) +{ + int token; + do { + token = lex_get_token(lc, T_ALL); + } while (token == T_EOL); + return token; +} + +/* + * Format a scanner error message + */ +static void s_err(const char *file, int line, LEX *lc, const char *msg, ...) +{ + va_list arg_ptr; + char buf[MAXSTRING]; + char more[MAXSTRING]; + + va_start(arg_ptr, msg); + bvsnprintf(buf, sizeof(buf), msg, arg_ptr); + va_end(arg_ptr); + + if (lc->err_type == 0) { /* M_ERROR_TERM by default */ + lc->err_type = M_ERROR_TERM; + } + + if (lc->line_no > lc->begin_line_no) { + bsnprintf(more, sizeof(more), + _("Problem probably begins at line %d.\n"), lc->begin_line_no); + } else { + more[0] = 0; + } + if (lc->line_no > 0) { + e_msg(file, line, lc->err_type, 0, _("Config error: %s\n" +" : line %d, col %d of file %s\n%s\n%s"), + buf, lc->line_no, lc->col_no, lc->fname, lc->line, more); + } else { + e_msg(file, line, lc->err_type, 0, _("Config error: %s\n"), buf); + } +} + +void lex_set_default_error_handler(LEX *lf) +{ + lf->scan_error = s_err; +} + +/* + * Set err_type used in error_handler + * return the old value + */ +int lex_set_error_handler_error_type(LEX *lf, int err_type) +{ + int old = lf->err_type; + lf->err_type = err_type; + return old; +} + +/* Store passwords in clear text or with MD5 encoding */ +void lex_store_clear_passwords(LEX *lf) +{ + lf->options |= LOPT_NO_MD5; +} + +/* + * Free the current file, and retrieve the contents + * of the previous packet if any. + */ +LEX *lex_close_file(LEX *lf) +{ + LEX *of; + + if (lf == NULL) { + Emsg0(M_ABORT, 0, _("Close of NULL file\n")); + } + Dmsg1(dbglvl, "Close lex file: %s\n", lf->fname); + + of = lf->next; + if (lf->bpipe) { + close_bpipe(lf->bpipe); + lf->bpipe = NULL; + } else if (lf->fd) { + fclose(lf->fd); + } + Dmsg1(dbglvl, "Close cfg file %s\n", lf->fname); + if (lf->fname) { + free(lf->fname); + } + free_memory(lf->line); + lf->line = NULL; + free_memory(lf->str); + lf->str = NULL; + if (of) { + of->options = lf->options; /* preserve options */ + memcpy(lf, of, sizeof(LEX)); + Dmsg1(dbglvl, "Restart scan of cfg file %s\n", of->fname); + } else { + of = lf; + lf = NULL; + } + if (of) { + free(of); + } + return lf; +} + +/* + * Open a configuration in memory buffer. We push the + * state of the current file (lf) so that we + * can do includes. This is a bit of a hammer. + * Instead of passing back the pointer to the + * new packet, I simply replace the contents + * of the caller's packet with the new packet, + * and link the contents of the old packet into + * the next field. + * + */ +LEX *lex_open_buf(LEX *lf, const char *buffer, LEX_ERROR_HANDLER *scan_error) + +{ + LEX *nf; + + Dmsg0(400, "Open config buffer\n"); + nf = (LEX *)malloc(sizeof(LEX)); + if (lf) { + memcpy(nf, lf, sizeof(LEX)); + memset(lf, 0, sizeof(LEX)); + lf->next = nf; /* if have lf, push it behind new one */ + lf->options = nf->options; /* preserve user options */ + /* + * preserve err_type to prevent bacula exiting on 'reload' + * if config is invalid. Fixes bug #877 + */ + lf->err_type = nf->err_type; + } else { + lf = nf; /* start new packet */ + memset(lf, 0, sizeof(LEX)); + lex_set_error_handler_error_type(lf, M_ERROR_TERM); + } + if (scan_error) { + lf->scan_error = scan_error; + } else { + lex_set_default_error_handler(lf); + } + lf->fd = NULL; + lf->bpipe = NULL; + lf->fname = NULL; + lf->line = get_memory(5000); + pm_strcpy(lf->line, buffer); + pm_strcat(lf->line, ""); + lf->state = lex_none; + lf->ch = 0; + lf->str = get_memory(5000); + return lf; +} + +/* + * Open a new configuration file. We push the + * state of the current file (lf) so that we + * can do includes. This is a bit of a hammer. + * Instead of passing back the pointer to the + * new packet, I simply replace the contents + * of the caller's packet with the new packet, + * and link the contents of the old packet into + * the next field. + * + */ +LEX *lex_open_file(LEX *lf, const char *filename, LEX_ERROR_HANDLER *scan_error) + +{ + LEX *nf; + FILE *fd; + BPIPE *bpipe = NULL; + char *fname = bstrdup(filename); + + if (fname[0] == '|') { + if ((bpipe = open_bpipe(fname+1, 0, "rb")) == NULL) { + free(fname); + return NULL; + } + fd = bpipe->rfd; + } else if ((fd = fopen(fname, "rb")) == NULL) { + free(fname); + return NULL; + } + Dmsg1(400, "Open config file: %s\n", fname); + nf = (LEX *)malloc(sizeof(LEX)); + if (lf) { + memcpy(nf, lf, sizeof(LEX)); + memset(lf, 0, sizeof(LEX)); + lf->next = nf; /* if have lf, push it behind new one */ + lf->options = nf->options; /* preserve user options */ + /* + * preserve err_type to prevent bacula exiting on 'reload' + * if config is invalid. Fixes bug #877 + */ + lf->err_type = nf->err_type; + } else { + lf = nf; /* start new packet */ + memset(lf, 0, sizeof(LEX)); + lex_set_error_handler_error_type(lf, M_ERROR_TERM); + } + if (scan_error) { + lf->scan_error = scan_error; + } else { + lex_set_default_error_handler(lf); + } + lf->fd = fd; + lf->bpipe = bpipe; + lf->fname = fname; + lf->line = get_memory(5000); + lf->state = lex_none; + lf->ch = L_EOL; + lf->str = get_memory(5000); + Dmsg1(dbglvl, "Return lex=%x\n", lf); + return lf; +} + +/* + * Get the next character from the input. + * Returns the character or + * L_EOF if end of file + * L_EOL if end of line + */ +int lex_get_char(LEX *lf) +{ + if (lf->ch == L_EOF) { + Emsg0(M_ABORT, 0, _("get_char: called after EOF." + " You may have a open double quote without the closing double quote.\n")); + } + if (lf->fd && lf->ch == L_EOL) { + if (bfgets(lf->line, lf->fd) == NULL) { + lf->ch = L_EOF; + if (lf->next) { + lex_close_file(lf); + } + return lf->ch; + } + lf->line_no++; + lf->col_no = 0; + Dmsg2(1000, "fget line=%d %s", lf->line_no, lf->line); + } else if (lf->ch == L_EOL) { + lf->line_no++; + lf->col_no++; + } + lf->ch = (uint8_t)lf->line[lf->col_no]; + if (lf->fd) { + if (lf->ch == 0) { + lf->ch = L_EOL; /* reached end of line, force bfgets */ + } else { + lf->col_no++; + } + } else { + if (lf->ch == 0) { /* End of buffer, stop scan */ + lf->ch = L_EOF; + if (lf->next) { + lex_close_file(lf); + } + return lf->ch; + } else if (lf->ch == '\n') { /* End of line */ + Dmsg0(dbglvl, "Found newline return L_EOL\n"); + lf->ch = L_EOL; + } else { + lf->col_no++; + } + } + Dmsg3(dbglvl, "lex_get_char: %c %d col=%d\n", lf->ch, lf->ch, lf->col_no); + return lf->ch; +} + +void lex_unget_char(LEX *lf) +{ + if (lf->ch == L_EOL) { + lf->ch = 0; /* End of line, force read of next one */ + } else { + lf->col_no--; /* Backup to re-read char */ + } +} + + +/* + * Add a character to the current string + */ +static void add_str(LEX *lf, int ch) +{ + if (lf->str_len >= sizeof_pool_memory(lf->str)) { + Emsg3(M_ERROR_TERM, 0, _( + _("Config token too long, file: %s, line %d, begins at line %d\n")), + lf->fname, lf->line_no, lf->begin_line_no); + } + lf->str[lf->str_len++] = ch; + lf->str[lf->str_len] = 0; +} + +/* + * Begin the string + */ +static void begin_str(LEX *lf, int ch) +{ + lf->str_len = 0; + lf->str[0] = 0; + if (ch != 0) { + add_str(lf, ch); + } + lf->begin_line_no = lf->line_no; /* save start string line no */ +} + +#ifdef DEBUG +static const char *lex_state_to_str(int state) +{ + switch (state) { + case lex_none: return _("none"); + case lex_comment: return _("comment"); + case lex_number: return _("number"); + case lex_ip_addr: return _("ip_addr"); + case lex_identifier: return _("identifier"); + case lex_string: return _("string"); + case lex_quoted_string: return _("quoted_string"); + case lex_include: return _("include"); + case lex_include_quoted_string: return _("include_quoted_string"); + case lex_utf8_bom: return _("UTF-8 Byte Order Mark"); + case lex_utf16_le_bom: return _("UTF-16le Byte Order Mark"); + default: return "??????"; + } +} +#endif + +/* + * Convert a lex token to a string + * used for debug/error printing. + */ +const char *lex_tok_to_str(int token) +{ + switch(token) { + case L_EOF: return "L_EOF"; + case L_EOL: return "L_EOL"; + case T_NONE: return "T_NONE"; + case T_NUMBER: return "T_NUMBER"; + case T_IPADDR: return "T_IPADDR"; + case T_IDENTIFIER: return "T_IDENTIFIER"; + case T_UNQUOTED_STRING: return "T_UNQUOTED_STRING"; + case T_QUOTED_STRING: return "T_QUOTED_STRING"; + case T_BOB: return "T_BOB"; + case T_EOB: return "T_EOB"; + case T_EQUALS: return "T_EQUALS"; + case T_ERROR: return "T_ERROR"; + case T_EOF: return "T_EOF"; + case T_COMMA: return "T_COMMA"; + case T_EOL: return "T_EOL"; + case T_UTF8_BOM: return "T_UTF8_BOM"; + case T_UTF16_BOM: return "T_UTF16_BOM"; + default: return "??????"; + } +} + +static uint32_t scan_pint(LEX *lf, char *str) +{ + int64_t val = 0; + if (!is_a_number(str)) { + scan_err1(lf, _("expected a positive integer number, got: %s"), str); + /* NOT REACHED */ + } else { + errno = 0; + val = str_to_int64(str); + if (errno != 0 || val < 0) { + scan_err1(lf, _("expected a positive integer number, got: %s"), str); + /* NOT REACHED */ + } + } + return (uint32_t)val; +} + +static uint64_t scan_pint64(LEX *lf, char *str) +{ + uint64_t val = 0; + if (!is_a_number(str)) { + scan_err1(lf, _("expected a positive integer number, got: %s"), str); + /* NOT REACHED */ + } else { + errno = 0; + val = str_to_uint64(str); + if (errno != 0) { + scan_err1(lf, _("expected a positive integer number, got: %s"), str); + /* NOT REACHED */ + } + } + return val; +} + +/* + * + * Get the next token from the input + * + */ +int +lex_get_token(LEX *lf, int expect) +{ + int ch, nch; + int token = T_NONE; + bool esc_next = false; + /* Unicode files, especially on Win32, may begin with a "Byte Order Mark" + to indicate which transmission format the file is in. The codepoint for + this mark is U+FEFF and is represented as the octets EF-BB-BF in UTF-8 + and as FF-FE in UTF-16le(little endian) and FE-FF in UTF-16(big endian). + We use a distinct state for UTF-8 and UTF-16le, and use bom_bytes_seen + to tell which byte we are expecting. */ + int bom_bytes_seen = 0; + + Dmsg1(dbglvl, "enter lex_get_token state=%s\n", lex_state_to_str(lf->state)); + while (token == T_NONE) { + ch = lex_get_char(lf); + switch (lf->state) { + case lex_none: + Dmsg2(dbglvl, "Lex state lex_none ch=%c,%d\n", ch, ch); + if (B_ISSPACE(ch)) + break; + if (B_ISALPHA(ch)) { + if (lf->options & LOPT_NO_IDENT || lf->options & LOPT_STRING) { + lf->state = lex_string; + } else { + lf->state = lex_identifier; + } + begin_str(lf, ch); + break; + } + if (B_ISDIGIT(ch)) { + if (lf->options & LOPT_STRING) { + lf->state = lex_string; + } else { + lf->state = lex_number; + } + begin_str(lf, ch); + break; + } + Dmsg0(dbglvl, "Enter lex_none switch\n"); + switch (ch) { + case L_EOF: + token = T_EOF; + Dmsg0(dbglvl, "got L_EOF set token=T_EOF\n"); + break; + case '\\': + nch = lex_get_char(lf); + if (nch == ' ' || nch == '\n' || nch == '\r' || nch == L_EOL) { + lf->ch = L_EOL; /* force end of line */ + } + break; + case '#': + lf->state = lex_comment; + break; + case '{': + token = T_BOB; + begin_str(lf, ch); + break; + case '}': + token = T_EOB; + begin_str(lf, ch); + break; + case '"': + lf->state = lex_quoted_string; + begin_str(lf, 0); + break; + case '=': + token = T_EQUALS; + begin_str(lf, ch); + break; + case ',': + token = T_COMMA; + begin_str(lf, ch); + break; + case ';': + if (expect != T_SKIP_EOL) { + token = T_EOL; /* treat ; like EOL */ + } + break; + case L_EOL: + Dmsg0(dbglvl, "got L_EOL set token=T_EOL\n"); + if (expect != T_SKIP_EOL) { + token = T_EOL; + } + break; + case '@': + /* In NO_EXTERN mode, @ is part of a string */ + if (lf->options & LOPT_NO_EXTERN) { + lf->state = lex_string; + begin_str(lf, ch); + } else { + lf->state = lex_include; + begin_str(lf, 0); + } + break; + case 0xEF: /* probably a UTF-8 BOM */ + case 0xFF: /* probably a UTF-16le BOM */ + case 0xFE: /* probably a UTF-16be BOM (error)*/ + if (lf->line_no != 1 || lf->col_no != 1) + { + lf->state = lex_string; + begin_str(lf, ch); + } else { + bom_bytes_seen = 1; + if (ch == 0xEF) { + lf->state = lex_utf8_bom; + } else if (ch == 0xFF) { + lf->state = lex_utf16_le_bom; + } else { + scan_err0(lf, _("This config file appears to be in an " + "unsupported Unicode format (UTF-16be). Please resave as UTF-8\n")); + return T_ERROR; + } + } + break; + default: + lf->state = lex_string; + begin_str(lf, ch); + break; + } + break; + case lex_comment: + Dmsg1(dbglvl, "Lex state lex_comment ch=%x\n", ch); + if (ch == L_EOL) { + lf->state = lex_none; + if (expect != T_SKIP_EOL) { + token = T_EOL; + } + } else if (ch == L_EOF) { + token = T_ERROR; + } + break; + case lex_number: + Dmsg2(dbglvl, "Lex state lex_number ch=%x %c\n", ch, ch); + if (ch == L_EOF) { + token = T_ERROR; + break; + } + /* Might want to allow trailing specifications here */ + if (B_ISDIGIT(ch)) { + add_str(lf, ch); + break; + } + + /* A valid number can be terminated by the following */ + if (B_ISSPACE(ch) || ch == L_EOL || ch == ',' || ch == ';') { + token = T_NUMBER; + lf->state = lex_none; + } else { + lf->state = lex_string; + } + lex_unget_char(lf); + break; + case lex_ip_addr: + if (ch == L_EOF) { + token = T_ERROR; + break; + } + Dmsg1(dbglvl, "Lex state lex_ip_addr ch=%x\n", ch); + break; + case lex_string: + Dmsg1(dbglvl, "Lex state lex_string ch=%x\n", ch); + if (ch == L_EOF) { + token = T_ERROR; + break; + } + if (ch == '\n' || ch == L_EOL || ch == '=' || ch == '}' || ch == '{' || + ch == '\r' || ch == ';' || ch == ',' || ch == '#' || (B_ISSPACE(ch)) ) { + lex_unget_char(lf); + token = T_UNQUOTED_STRING; + lf->state = lex_none; + break; + } + add_str(lf, ch); + break; + case lex_identifier: + Dmsg2(dbglvl, "Lex state lex_identifier ch=%x %c\n", ch, ch); + if (B_ISALPHA(ch)) { + add_str(lf, ch); + break; + } else if (B_ISSPACE(ch)) { + break; + } else if (ch == '\n' || ch == L_EOL || ch == '=' || ch == '}' || ch == '{' || + ch == '\r' || ch == ';' || ch == ',' || ch == '"' || ch == '#') { + lex_unget_char(lf); + token = T_IDENTIFIER; + lf->state = lex_none; + break; + } else if (ch == L_EOF) { + token = T_ERROR; + lf->state = lex_none; + begin_str(lf, ch); + break; + } + /* Some non-alpha character => string */ + lf->state = lex_string; + add_str(lf, ch); + break; + case lex_quoted_string: + Dmsg2(dbglvl, "Lex state lex_quoted_string ch=%x %c\n", ch, ch); + if (ch == L_EOF) { + token = T_ERROR; + break; + } + if (ch == L_EOL) { + esc_next = false; + break; + } + if (esc_next) { + add_str(lf, ch); + esc_next = false; + break; + } + if (ch == '\\') { + esc_next = true; + break; + } + if (ch == '"') { + token = T_QUOTED_STRING; + /* + * Since we may be scanning a quoted list of names, + * we get the next character (a comma indicates another + * one), then we put it back for rescanning. + */ + lex_get_char(lf); + lex_unget_char(lf); + lf->state = lex_none; + break; + } + add_str(lf, ch); + break; + case lex_include_quoted_string: + if (ch == L_EOF) { + token = T_ERROR; + break; + } + if (esc_next) { + add_str(lf, ch); + esc_next = false; + break; + } + if (ch == '\\') { + esc_next = true; + break; + } + if (ch == '"') { + /* Keep the original LEX so we can print an error if the included file can't be opened. */ + LEX* lfori = lf; + /* Skip the double quote when restarting parsing */ + lex_get_char(lf); + + lf->state = lex_none; + lf = lex_open_file(lf, lf->str, lf->scan_error); + if (lf == NULL) { + berrno be; + scan_err2(lfori, _("Cannot open included config file %s: %s\n"), + lfori->str, be.bstrerror()); + return T_ERROR; + } + break; + } + add_str(lf, ch); + break; + case lex_include: /* scanning a filename */ + if (ch == L_EOF) { + token = T_ERROR; + break; + } + if (ch == '"') { + lf->state = lex_include_quoted_string; + break; + } + + + if (B_ISSPACE(ch) || ch == '\n' || ch == L_EOL || ch == '}' || ch == '{' || + ch == ';' || ch == ',' || ch == '"' || ch == '#') { + /* Keep the original LEX so we can print an error if the included file can't be opened. */ + LEX* lfori = lf; + + lf->state = lex_none; + lf = lex_open_file(lf, lf->str, lf->scan_error); + if (lf == NULL) { + berrno be; + scan_err2(lfori, _("Cannot open included config file %s: %s\n"), + lfori->str, be.bstrerror()); + return T_ERROR; + } + break; + } + add_str(lf, ch); + break; + case lex_utf8_bom: + /* we only end up in this state if we have read an 0xEF + as the first byte of the file, indicating we are probably + reading a UTF-8 file */ + if (ch == 0xBB && bom_bytes_seen == 1) { + bom_bytes_seen++; + } else if (ch == 0xBF && bom_bytes_seen == 2) { + token = T_UTF8_BOM; + lf->state = lex_none; + } else { + token = T_ERROR; + } + break; + case lex_utf16_le_bom: + /* we only end up in this state if we have read an 0xFF + as the first byte of the file -- indicating that we are + probably dealing with an Intel based (little endian) UTF-16 file*/ + if (ch == 0xFE) { + token = T_UTF16_BOM; + lf->state = lex_none; + } else { + token = T_ERROR; + } + break; + } + Dmsg4(dbglvl, "ch=%d state=%s token=%s %c\n", ch, lex_state_to_str(lf->state), + lex_tok_to_str(token), ch); + } + Dmsg2(dbglvl, "lex returning: line %d token: %s\n", lf->line_no, lex_tok_to_str(token)); + lf->token = token; + + /* + * Here is where we check to see if the user has set certain + * expectations (e.g. 32 bit integer). If so, we do type checking + * and possible additional scanning (e.g. for range). + */ + switch (expect) { + case T_PINT32: + lf->pint32_val = scan_pint(lf, lf->str); + lf->pint32_val2 = lf->pint32_val; + token = T_PINT32; + break; + + case T_PINT32_RANGE: + if (token == T_NUMBER) { + lf->pint32_val = scan_pint(lf, lf->str); + lf->pint32_val2 = lf->pint32_val; + token = T_PINT32; + } else { + char *p = strchr(lf->str, '-'); + if (!p) { + scan_err2(lf, _("expected an integer or a range, got %s: %s"), + lex_tok_to_str(token), lf->str); + token = T_ERROR; + break; + } + *p++ = 0; /* terminate first half of range */ + lf->pint32_val = scan_pint(lf, lf->str); + lf->pint32_val2 = scan_pint(lf, p); + token = T_PINT32_RANGE; + } + break; + + case T_INT32: + if (token != T_NUMBER || !is_a_number(lf->str)) { + scan_err2(lf, _("expected an integer number, got %s: %s"), + lex_tok_to_str(token), lf->str); + token = T_ERROR; + break; + } + errno = 0; + lf->int32_val = (int32_t)str_to_int64(lf->str); + if (errno != 0) { + scan_err2(lf, _("expected an integer number, got %s: %s"), + lex_tok_to_str(token), lf->str); + token = T_ERROR; + } else { + token = T_INT32; + } + break; + + case T_INT64: + Dmsg2(dbglvl, "int64=:%s: %f\n", lf->str, strtod(lf->str, NULL)); + if (token != T_NUMBER || !is_a_number(lf->str)) { + scan_err2(lf, _("expected an integer number, got %s: %s"), + lex_tok_to_str(token), lf->str); + token = T_ERROR; + break; + } + errno = 0; + lf->int64_val = str_to_int64(lf->str); + if (errno != 0) { + scan_err2(lf, _("expected an integer number, got %s: %s"), + lex_tok_to_str(token), lf->str); + token = T_ERROR; + } else { + token = T_INT64; + } + break; + + case T_PINT64_RANGE: + if (token == T_NUMBER) { + lf->pint64_val = scan_pint64(lf, lf->str); + lf->pint64_val2 = lf->pint64_val; + token = T_PINT64; + } else { + char *p = strchr(lf->str, '-'); + if (!p) { + scan_err2(lf, _("expected an integer or a range, got %s: %s"), + lex_tok_to_str(token), lf->str); + token = T_ERROR; + break; + } + *p++ = 0; /* terminate first half of range */ + lf->pint64_val = scan_pint64(lf, lf->str); + lf->pint64_val2 = scan_pint64(lf, p); + token = T_PINT64_RANGE; + } + break; + + case T_NAME: + if (token != T_IDENTIFIER && token != T_UNQUOTED_STRING && token != T_QUOTED_STRING) { + scan_err2(lf, _("expected a name, got %s: %s"), + lex_tok_to_str(token), lf->str); + token = T_ERROR; + } else if (lf->str_len > MAX_RES_NAME_LENGTH) { + scan_err3(lf, _("name %s length %d too long, max is %d\n"), lf->str, + lf->str_len, MAX_RES_NAME_LENGTH); + token = T_ERROR; + } + break; + + case T_STRING: + if (token != T_IDENTIFIER && token != T_UNQUOTED_STRING && token != T_QUOTED_STRING) { + scan_err2(lf, _("expected a string, got %s: %s"), + lex_tok_to_str(token), lf->str); + token = T_ERROR; + } else { + token = T_STRING; + } + break; + + + default: + break; /* no expectation given */ + } + lf->token = token; /* set possible new token */ + return token; +} diff --git a/src/lib/lex.h b/src/lib/lex.h new file mode 100644 index 00000000..ae73ad13 --- /dev/null +++ b/src/lib/lex.h @@ -0,0 +1,135 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * lex.h + * + * Lexical scanning of configuration files, used by parsers. + * + * Kern Sibbald, MM + * + */ + +#ifndef _LEX_H +#define _LEX_H + +/* Lex get_char() return values */ +#define L_EOF (-1) +#define L_EOL (-2) + +/* Internal tokens */ +#define T_NONE 100 + +/* Tokens returned by get_token() */ +#define T_EOF 101 +#define T_NUMBER 102 +#define T_IPADDR 103 +#define T_IDENTIFIER 104 +#define T_UNQUOTED_STRING 105 +#define T_QUOTED_STRING 106 +#define T_BOB 108 /* begin block */ +#define T_EOB 109 /* end of block */ +#define T_EQUALS 110 +#define T_COMMA 111 +#define T_EOL 112 +#define T_ERROR 200 +#define T_UTF8_BOM 201 /* File starts with a UTF-8 BOM*/ +#define T_UTF16_BOM 202 /* File starts with a UTF-16LE BOM*/ + +/* + * The following will be returned only if + * the appropriate expect flag has been set + */ +#define T_SKIP_EOL 113 /* scan through EOLs */ +#define T_PINT32 114 /* positive integer */ +#define T_PINT32_RANGE 115 /* positive integer range */ +#define T_INT32 116 /* integer */ +#define T_INT64 117 /* 64 bit integer */ +#define T_NAME 118 /* name max 128 chars */ +#define T_STRING 119 /* string */ +#define T_PINT64_RANGE 120 /* positive integer range */ +#define T_PINT64 121 /* positive integer range */ + +#define T_ALL 0 /* no expectations */ + +/* Lexical state */ +enum lex_state { + lex_none, + lex_comment, + lex_number, + lex_ip_addr, + lex_identifier, + lex_string, + lex_quoted_string, + lex_include_quoted_string, + lex_include, + lex_utf8_bom, /* we are parsing out a utf8 byte order mark */ + lex_utf16_le_bom /* we are parsing out a utf-16 (little endian) byte order mark */ +}; + +/* Lex scan options */ +#define LOPT_NO_IDENT 0x1 /* No Identifiers -- use string */ +#define LOPT_STRING 0x2 /* Force scan for string */ +#define LOPT_NO_EXTERN 0x4 /* Don't follow @ command */ +#define LOPT_NO_MD5 0x8 /* Do not encode passwords with MD5 */ + +class BPIPE; /* forward reference */ + +/* Lexical context */ +typedef struct s_lex_context { + struct s_lex_context *next; /* pointer to next lexical context */ + int options; /* scan options */ + char *fname; /* filename */ + FILE *fd; /* file descriptor */ + POOLMEM *line; /* input line */ + POOLMEM *str; /* string being scanned */ + int str_len; /* length of string */ + int line_no; /* file line number */ + int col_no; /* char position on line */ + int begin_line_no; /* line no of beginning of string */ + enum lex_state state; /* lex_state variable */ + int ch; /* last char/L_VAL returned by get_char */ + int token; + uint32_t pint32_val; + uint32_t pint32_val2; + int32_t int32_val; + int64_t int64_val; + uint64_t pint64_val; + uint64_t pint64_val2; + void (*scan_error)(const char *file, int line, struct s_lex_context *lc, const char *msg, ...); + int err_type; /* message level for scan_error (M_..) */ + void *caller_ctx; /* caller private data */ + BPIPE *bpipe; /* set if we are piping */ +} LEX; + +typedef void (LEX_ERROR_HANDLER)(const char *file, int line, LEX *lc, const char *msg, ...); + +/* Lexical scanning errors in parsing conf files */ +#define scan_err0(lc, msg) lc->scan_error(__FILE__, __LINE__, lc, msg) +#define scan_err1(lc, msg, a1) lc->scan_error(__FILE__, __LINE__, lc, msg, a1) +#define scan_err2(lc, msg, a1, a2) lc->scan_error(__FILE__, __LINE__, lc, msg, a1, a2) +#define scan_err3(lc, msg, a1, a2, a3) lc->scan_error(__FILE__, __LINE__, lc, msg, a1, a2, a3) +#define scan_err4(lc, msg, a1, a2, a3, a4) lc->scan_error(__FILE__, __LINE__, lc, msg, a1, a2, a3, a4) +#define scan_err5(lc, msg, a1, a2, a3, a4, a5) lc->scan_error(__FILE__, __LINE__, lc, msg, a1, a2, a3, a4, a5) +#define scan_err6(lc, msg, a1, a2, a3, a4, a5, a6) lc->scan_error(__FILE__, __LINE__, lc, msg, a1, a2, a3, a4, a5, a6) + +void scan_to_eol(LEX *lc); +int scan_to_next_not_eol(LEX * lc); +void lex_store_clear_passwords(LEX *lf); + +#endif /* _LEX_H */ diff --git a/src/lib/lib.h b/src/lib/lib.h new file mode 100644 index 00000000..11f970c6 --- /dev/null +++ b/src/lib/lib.h @@ -0,0 +1,70 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Library includes for Bacula lib directory + * + * This file contains an include for each library file + * that we use within Bacula. bacula.h includes this + * file and thus picks up everything we need in lib. + * + */ + +#include "smartall.h" +#include "lockmgr.h" +#include "alist.h" +#include "dlist.h" +#include "flist.h" +#include "rblist.h" +#include "worker.h" +#include "base64.h" +#include "bits.h" +#include "btime.h" +#include "crypto.h" +#include "mem_pool.h" +#include "rwlock.h" +#include "queue.h" +#include "serial.h" +#include "message.h" +#include "openssl.h" +#include "lex.h" +#include "parse_conf.h" +#include "bjson.h" +#include "tls.h" +#include "address_conf.h" +#include "bsockcore.h" +#include "bsock.h" +#include "workq.h" +#ifndef HAVE_FNMATCH +#include "fnmatch.h" +#endif +#include "md5.h" +#include "sha1.h" +#include "tree.h" +#include "watchdog.h" +#include "btimers.h" +#include "berrno.h" +#include "bpipe.h" +#include "attr.h" +#include "var.h" +#include "guid_to_name.h" +#include "htable.h" +#include "sellist.h" +#include "output.h" +#include "protos.h" +#include "bget_msg.h" diff --git a/src/lib/lockmgr.c b/src/lib/lockmgr.c new file mode 100644 index 00000000..3ec1c2e5 --- /dev/null +++ b/src/lib/lockmgr.c @@ -0,0 +1,1663 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + How to use mutex with bad order usage detection + ------------------------------------------------ + + Note: see file mutex_list.h for current mutexes with + defined priorities. + + Instead of using: + pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + P(mutex); + .. + V(mutex); + + use: + bthread_mutex_t mutex = BTHREAD_MUTEX_PRIORITY(1); + P(mutex); + ... + V(mutex); + + Mutex that doesn't need this extra check can be declared as pthread_mutex_t. + You can use this object on pthread_mutex_lock/unlock/cond_wait/cond_timewait. + + With dynamic creation, you can use: + bthread_mutex_t mutex; + pthread_mutex_init(&mutex); + bthread_mutex_set_priority(&mutex, 10); + pthread_mutex_destroy(&mutex); + + */ + +#define LOCKMGR_COMPLIANT +#include "bacula.h" + +#undef ASSERT +#define ASSERT(x) if (!(x)) { \ + char *jcr = NULL; \ + Pmsg3(000, _("ASSERT failed at %s:%i: %s\n"), __FILE__, __LINE__, #x); \ + jcr[0] = 0; } + +#define ASSERT_p(x,f,l) if (!(x)) { \ + char *jcr = NULL; \ + Pmsg3(000, _("ASSERT failed at %s:%i: %s \n"), f, l, #x); \ + jcr[0] = 0; } + +#define ASSERT2_p(x,m,f,l) if (!(x)) { \ + char *jcr = NULL; \ + set_assert_msg(f, l, m); \ + Pmsg4(000, _("ASSERT failed at %s:%i: %s (%s)\n"), f, l, #x, m); \ + jcr[0] = 0; } + +/* for lockmgr unit tests we have to clean up developer flags and asserts which breaks our tests */ +#ifdef TEST_PROGRAM +#ifdef DEVELOPER +#undef DEVELOPER +#endif +#ifdef ASSERTD +#undef ASSERTD +#define ASSERTD(x, y) +#endif +#endif + +/* + Inspired from + http://www.cs.berkeley.edu/~kamil/teaching/sp03/041403.pdf + + This lock manager will replace some pthread calls. It can be + enabled with USE_LOCKMGR + + Some part of the code can't use this manager, for example the + rwlock object or the smartalloc lib. To disable LMGR, just add + LOCKMGR_COMPLIANT before the inclusion of "bacula.h" + + cd build/src/lib + g++ -g -c lockmgr.c -I.. -I../lib -DUSE_LOCKMGR -DTEST_PROGRAM + g++ -o lockmgr lockmgr.o -lbac -L../lib/.libs -lssl -lpthread + +*/ + +#define DBGLEVEL_EVENT 50 + +/* + * pthread_mutex_lock for memory allocator and other + * parts that are LOCKMGR_COMPLIANT + */ +void lmgr_p(pthread_mutex_t *m) +{ + int errstat; + if ((errstat=pthread_mutex_lock(m))) { + berrno be; + e_msg(__FILE__, __LINE__, M_ABORT, 0, _("Mutex lock failure. ERR=%s\n"), + be.bstrerror(errstat)); + } +} + +void lmgr_v(pthread_mutex_t *m) +{ + int errstat; + if ((errstat=pthread_mutex_unlock(m))) { + berrno be; + e_msg(__FILE__, __LINE__, M_ABORT, 0, _("Mutex unlock failure. ERR=%s\n"), + be.bstrerror(errstat)); + } +} + +#ifdef USE_LOCKMGR + +typedef enum +{ + LMGR_WHITE, /* never seen */ + LMGR_BLACK, /* no loop */ + LMGR_GRAY /* already seen */ +} lmgr_color_t; + +/* + * Node used by the Lock Manager + * If the lock is GRANTED, we have mutex -> proc, else it's a proc -> mutex + * relation. + * + * Note, each mutex can be GRANTED once, and each proc can have only one WANTED + * mutex. + */ +class lmgr_node_t: public SMARTALLOC +{ +public: + dlink link; + void *node; + void *child; + lmgr_color_t seen; + + lmgr_node_t() { + child = node = NULL; + seen = LMGR_WHITE; + } + + lmgr_node_t(void *n, void *c) { + init(n,c); + } + + void init(void *n, void *c) { + node = n; + child = c; + seen = LMGR_WHITE; + } + + void mark_as_seen(lmgr_color_t c) { + seen = c; + } + + ~lmgr_node_t() {printf("delete node\n");} +}; + +typedef enum { + LMGR_LOCK_EMPTY = 'E', /* unused */ + LMGR_LOCK_WANTED = 'W', /* before mutex_lock */ + LMGR_LOCK_GRANTED = 'G' /* after mutex_lock */ +} lmgr_state_t; + +/* + * Object associated with each mutex per thread + */ +class lmgr_lock_t: public SMARTALLOC +{ +public: + dlink link; + void *lock; /* Link to the mutex (or any value) */ + lmgr_state_t state; + int max_priority; + int priority; /* Current node priority */ + + const char *file; + int line; + + lmgr_lock_t() { + lock = NULL; + state = LMGR_LOCK_EMPTY; + priority = max_priority = 0; + } + + lmgr_lock_t(void *l) { + lock = l; + state = LMGR_LOCK_WANTED; + } + + void set_granted() { + state = LMGR_LOCK_GRANTED; + } + + ~lmgr_lock_t() {} + +}; + +/* + * Get the child list, ret must be already allocated + */ +static void search_all_node(dlist *g, lmgr_node_t *v, alist *ret) +{ + lmgr_node_t *n; + foreach_dlist(n, g) { + if (v->child == n->node) { + ret->append(n); + } + } +} + +static bool visit(dlist *g, lmgr_node_t *v) +{ + bool ret=false; + lmgr_node_t *n; + v->mark_as_seen(LMGR_GRAY); + + alist *d = New(alist(5, false)); /* use alist because own=false */ + search_all_node(g, v, d); + + //foreach_alist(n, d) { + // printf("node n=%p c=%p s=%c\n", n->node, n->child, n->seen); + //} + + foreach_alist(n, d) { + if (n->seen == LMGR_GRAY) { /* already seen this node */ + ret = true; + goto bail_out; + } else if (n->seen == LMGR_WHITE) { + if (visit(g, n)) { + ret = true; + goto bail_out; + } + } + } + v->mark_as_seen(LMGR_BLACK); /* no loop detected, node is clean */ +bail_out: + delete d; + return ret; +} + +static bool contains_cycle(dlist *g) +{ + lmgr_node_t *n; + foreach_dlist(n, g) { + if (n->seen == LMGR_WHITE) { + if (visit(g, n)) { + return true; + } + } + } + return false; +} + +/****************************************************************/ + +/* lmgr_thread_event struct, some call can add events, and they will + * be dumped during a lockdump + */ +typedef struct +{ + int32_t id; /* Id of the event */ + int32_t global_id; /* Current global id */ + int32_t flags; /* Flags for this event */ + + int32_t line; /* from which line in filename */ + const char *from; /* From where in the code (filename) */ + + char *comment; /* Comment */ + intptr_t user_data; /* Optionnal user data (will print address) */ + +} lmgr_thread_event; + +static int32_t global_event_id=0; + +static int global_int_thread_id=0; /* Keep an integer for each thread */ + +/* Keep this number of event per thread */ +#ifdef TEST_PROGRAM +# define LMGR_THREAD_EVENT_MAX 15 +#else +# define LMGR_THREAD_EVENT_MAX 1024 +#endif + +#define lmgr_thread_event_get_pos(x) ((x) % LMGR_THREAD_EVENT_MAX) + +class lmgr_thread_t: public SMARTALLOC +{ +public: + dlink link; + pthread_mutex_t mutex; + pthread_t thread_id; + intptr_t int_thread_id; + lmgr_lock_t lock_list[LMGR_MAX_LOCK]; + int current; + int max; + int max_priority; + + lmgr_thread_event events[LMGR_THREAD_EVENT_MAX]; + int event_id; + + lmgr_thread_t() { + int status; + if ((status = pthread_mutex_init(&mutex, NULL)) != 0) { + berrno be; + Pmsg1(000, _("pthread key create failed: ERR=%s\n"), + be.bstrerror(status)); + ASSERT2(0, "pthread_mutex_init failed"); + } + event_id = 0; + thread_id = pthread_self(); + current = -1; + max = 0; + max_priority = 0; + } + + /* Add event to the event list of the thread */ + void add_event(const char *comment, intptr_t user_data, int32_t flags, + const char *from, int32_t line) + { + char *p; + int32_t oldflags; + int i = lmgr_thread_event_get_pos(event_id); + + oldflags = events[i].flags; + p = events[i].comment; + events[i].flags = LMGR_EVENT_INVALID; + events[i].comment = (char *)"*Freed*"; + + /* Shared between thread, just an indication about timing */ + events[i].global_id = global_event_id++; + events[i].id = event_id; + events[i].line = line; + events[i].from = from; + + /* It means we are looping over the ring, so we need + * to check if the memory need to be freed + */ + if (event_id >= LMGR_THREAD_EVENT_MAX) { + if (oldflags & LMGR_EVENT_FREE) { + free(p); + } + } + + /* We need to copy the memory */ + if (flags & LMGR_EVENT_DUP) { + events[i].comment = bstrdup(comment); + flags |= LMGR_EVENT_FREE; /* force the free */ + + } else { + events[i].comment = (char *)comment; + } + events[i].user_data = user_data; + events[i].flags = flags; /* mark it valid */ + event_id++; + } + + void free_event_list() { + /* We first check how far we go in the event list */ + int max = MIN(event_id, LMGR_THREAD_EVENT_MAX); + char *p; + + for (int i = 0; i < max ; i++) { + if (events[i].flags & LMGR_EVENT_FREE) { + p = events[i].comment; + events[i].flags = LMGR_EVENT_INVALID; + events[i].comment = (char *)"*Freed*"; + free(p); + } + } + } + + void print_event(lmgr_thread_event *ev, FILE *fp) { + if (ev->flags & LMGR_EVENT_INVALID) { + return; + } + fprintf(fp, " %010d id=%010d %s data=%p at %s:%d\n", + ev->global_id, + ev->id, + NPRT(ev->comment), + (void *)ev->user_data, + ev->from, + ev->line); + } + + void _dump(FILE *fp) { +#ifdef HAVE_WIN32 + fprintf(fp, "thread_id=%p int_threadid=%p max=%i current=%i\n", + (void *)(intptr_t)GetCurrentThreadId(), (void *)int_thread_id, max, current); +#else + fprintf(fp, "threadid=%p max=%i current=%i\n", + (void *)thread_id, max, current); +#endif + for(int i=0; i<=current; i++) { + fprintf(fp, " lock=%p state=%s priority=%i %s:%i\n", + lock_list[i].lock, + (lock_list[i].state=='W')?"Wanted ":"Granted", + lock_list[i].priority, + lock_list[i].file, lock_list[i].line); + } + + if (debug_flags & DEBUG_PRINT_EVENT) { + /* Debug events */ + fprintf(fp, " events:\n"); + + /* Display events between (event_id % LMGR_THREAD_EVENT_MAX) and LMGR_THREAD_EVENT_MAX */ + if (event_id > LMGR_THREAD_EVENT_MAX) { + for (int i = event_id % LMGR_THREAD_EVENT_MAX ; i < LMGR_THREAD_EVENT_MAX ; i++) + { + print_event(&events[i], fp); + } + } + + /* Display events between 0 and event_id % LMGR_THREAD_EVENT_MAX*/ + for (int i = 0 ; i < (event_id % LMGR_THREAD_EVENT_MAX) ; i++) + { + print_event(&events[i], fp); + } + } + } + + void dump(FILE *fp) { + lmgr_p(&mutex); + { + _dump(fp); + } + lmgr_v(&mutex); + } + + /* + * Call before a lock operation (mark mutex as WANTED) + */ + virtual void pre_P(void *m, int priority, + const char *f="*unknown*", int l=0) + { + int max_prio = max_priority; + + if (chk_dbglvl(DBGLEVEL_EVENT) && debug_flags & DEBUG_MUTEX_EVENT) { + /* Keep track of this event */ + add_event("P()", (intptr_t)m, 0, f, l); + } + + /* Fail if too many locks in use */ + ASSERT2_p(current < LMGR_MAX_LOCK, "Too many locks in use", f, l); + /* Fail if the "current" value is out of bounds */ + ASSERT2_p(current >= -1, "current lock value is out of bounds", f, l); + lmgr_p(&mutex); + { + current++; + lock_list[current].lock = m; + lock_list[current].state = LMGR_LOCK_WANTED; + lock_list[current].file = f; + lock_list[current].line = l; + lock_list[current].priority = priority; + lock_list[current].max_priority = MAX(priority, max_priority); + max = MAX(current, max); + max_priority = MAX(priority, max_priority); + } + lmgr_v(&mutex); + + /* Fail if we tried to lock a mutex with a lower priority than + * the current value. It means that you need to lock mutex in a + * different order to ensure that the priority field is always + * increasing. The mutex priority list is defined in mutex_list.h. + * + * Look the *.lockdump generated to get the list of all mutexes, + * and where they were granted to find the priority problem. + */ + ASSERT2_p(!priority || priority >= max_prio, + "Mutex priority problem found, locking done in wrong order", + f, l); + } + + /* + * Call after the lock operation (mark mutex as GRANTED) + */ + virtual void post_P() { + ASSERT2(current >= 0, "Lock stack when negative"); + ASSERT(lock_list[current].state == LMGR_LOCK_WANTED); + lock_list[current].state = LMGR_LOCK_GRANTED; + } + + /* Using this function is some sort of bug */ + void shift_list(int i) { + for(int j=i+1; j<=current; j++) { + lock_list[i] = lock_list[j]; + } + if (current >= 0) { + lock_list[current].lock = NULL; + lock_list[current].state = LMGR_LOCK_EMPTY; + } + /* rebuild the priority list */ + max_priority = 0; + for(int j=0; j< current; j++) { + max_priority = MAX(lock_list[j].priority, max_priority); + lock_list[j].max_priority = max_priority; + } + } + + /* + * Remove the mutex from the list + */ + virtual void do_V(void *m, const char *f="*unknown*", int l=0) { + int old_current = current; + + /* Keep track of this event */ + if (chk_dbglvl(DBGLEVEL_EVENT) && debug_flags & DEBUG_MUTEX_EVENT) { + add_event("V()", (intptr_t)m, 0, f, l); + } + + ASSERT2_p(current >= 0, "No previous P found, the mutex list is empty", f, l); + lmgr_p(&mutex); + { + if (lock_list[current].lock == m) { + lock_list[current].lock = NULL; + lock_list[current].state = LMGR_LOCK_EMPTY; + current--; + } else { + Pmsg3(0, "ERROR: V out of order lock=%p %s:%i dumping locks...\n", m, f, l); + Pmsg4(000, " wrong P/V order pos=%i lock=%p %s:%i\n", + current, lock_list[current].lock, lock_list[current].file, + lock_list[current].line); + for (int i=current-1; i >= 0; i--) { /* already seen current */ + Pmsg4(000, " wrong P/V order pos=%i lock=%p %s:%i\n", + i, lock_list[i].lock, lock_list[i].file, lock_list[i].line); + if (lock_list[i].lock == m) { + Pmsg3(000, "ERROR: FOUND P for out of order V at pos=%i %s:%i\n", i, f, l); + shift_list(i); + current--; + break; + } + } + } + /* reset max_priority to the last one */ + if (current >= 0) { + max_priority = lock_list[current].max_priority; + } else { + max_priority = 0; + } + } + lmgr_v(&mutex); + /* ASSERT2 should be called outside from the mutex lock */ + ASSERT2_p(current != old_current, "V() called without a previous P()", f, l); + } + + virtual ~lmgr_thread_t() {destroy();} + + void destroy() { + free_event_list(); + pthread_mutex_destroy(&mutex); + } +} ; + +class lmgr_dummy_thread_t: public lmgr_thread_t +{ + void do_V(void *m, const char *file, int l) {} + void post_P() {} + void pre_P(void *m, int priority, const char *file, int l) {} +}; + +/* + * LMGR - Lock Manager + * + * + * + */ + +pthread_once_t key_lmgr_once = PTHREAD_ONCE_INIT; +static pthread_key_t lmgr_key; /* used to get lgmr_thread_t object */ + +static dlist *global_mgr = NULL; /* used to store all lgmr_thread_t objects */ +static pthread_mutex_t lmgr_global_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_t undertaker; +static pthread_cond_t undertaker_cond; +static pthread_mutex_t undertaker_mutex = PTHREAD_MUTEX_INITIALIZER; +static bool use_undertaker = true; +static bool do_quit = false; + + +#define lmgr_is_active() (global_mgr != NULL) + +/* + * Add a new lmgr_thread_t object to the global list + */ +void lmgr_register_thread(lmgr_thread_t *item) +{ + lmgr_p(&lmgr_global_mutex); + { + item->int_thread_id = ++global_int_thread_id; + global_mgr->prepend(item); + } + lmgr_v(&lmgr_global_mutex); +} + +/* + * Call this function to cleanup specific lock thread data + */ +void lmgr_unregister_thread(lmgr_thread_t *item) +{ + if (!lmgr_is_active()) { + return; + } + lmgr_p(&lmgr_global_mutex); + { + global_mgr->remove(item); +#ifdef DEVELOPER + for(int i=0; i<=item->current; i++) { + lmgr_lock_t *lock = &item->lock_list[i]; + if (lock->state == LMGR_LOCK_GRANTED) { + ASSERT2(0, "Thread exits with granted locks"); + } + } +#endif + } + lmgr_v(&lmgr_global_mutex); +} + +#ifdef HAVE_WIN32 +# define TID int_thread_id +#else +# define TID thread_id +#endif +/* + * Search for a deadlock when it's secure to walk across + * locks list. (after lmgr_detect_deadlock or a fatal signal) + */ +bool lmgr_detect_deadlock_unlocked() +{ + bool ret=false; + lmgr_node_t *node=NULL; + lmgr_lock_t *lock; + lmgr_thread_t *item; + dlist *g = New(dlist(node, &node->link)); + + /* First, get a list of all node */ + foreach_dlist(item, global_mgr) { + for(int i=0; i<=item->current; i++) { + node = NULL; + lock = &item->lock_list[i]; + /* Depending if the lock is granted or not, it's a child or a root + * Granted: Mutex -> Thread + * Wanted: Thread -> Mutex + * + * Note: a Mutex can be locked only once, a thread can request only + * one mutex. + * + */ + if (lock->state == LMGR_LOCK_GRANTED) { + node = New(lmgr_node_t((void*)lock->lock, (void*)item->TID)); + } else if (lock->state == LMGR_LOCK_WANTED) { + node = New(lmgr_node_t((void*)item->TID, (void*)lock->lock)); + } + if (node) { + g->append(node); + } + } + } + + //foreach_dlist(node, g) { + // printf("g n=%p c=%p\n", node->node, node->child); + //} + + ret = contains_cycle(g); + if (ret) { + printf("Found a deadlock !!!!\n"); + } + + delete g; + return ret; +} + +/* + * Search for a deadlock in during the runtime + * It will lock all thread specific lock manager, nothing + * can be locked during this check. + */ +bool lmgr_detect_deadlock() +{ + bool ret=false; + if (!lmgr_is_active()) { + return ret; + } + + lmgr_p(&lmgr_global_mutex); + { + lmgr_thread_t *item; + foreach_dlist(item, global_mgr) { + lmgr_p(&item->mutex); + } + + ret = lmgr_detect_deadlock_unlocked(); + + foreach_dlist(item, global_mgr) { + lmgr_v(&item->mutex); + } + } + lmgr_v(&lmgr_global_mutex); + + return ret; +} + +/* + * !!! WARNING !!! + * Use this function is used only after a fatal signal + * We don't use locking to display the information + */ +void dbg_print_lock(FILE *fp) +{ + fprintf(fp, "Attempt to dump locks\n"); + if (!lmgr_is_active()) { + return ; + } + lmgr_thread_t *item; + foreach_dlist(item, global_mgr) { + item->_dump(fp); + } +} + +/* + * Dump each lmgr_thread_t object + */ +void lmgr_dump() +{ + lmgr_p(&lmgr_global_mutex); + { + lmgr_thread_t *item; + foreach_dlist(item, global_mgr) { + item->dump(stderr); + } + } + lmgr_v(&lmgr_global_mutex); +} + +void cln_hdl(void *a) +{ + lmgr_cleanup_thread(); +} + +void *check_deadlock(void *) +{ + lmgr_init_thread(); + pthread_cleanup_push(cln_hdl, NULL); + + while (!do_quit) { + struct timeval tv; + struct timezone tz; + struct timespec timeout; + + gettimeofday(&tv, &tz); + timeout.tv_nsec = 0; + timeout.tv_sec = tv.tv_sec + 30; + + pthread_mutex_lock(&undertaker_mutex); + pthread_cond_timedwait(&undertaker_cond, &undertaker_mutex, &timeout); + pthread_mutex_unlock(&undertaker_mutex); + + if(do_quit) { + goto bail_out; + } + + if (lmgr_detect_deadlock()) { + /* If we have information about P()/V(), display them */ + if (debug_flags & DEBUG_MUTEX_EVENT && chk_dbglvl(DBGLEVEL_EVENT)) { + debug_flags |= DEBUG_PRINT_EVENT; + } + lmgr_dump(); + ASSERT2(0, "Lock deadlock"); /* Abort if we found a deadlock */ + } + } + +bail_out: + Dmsg0(100, "Exit check_deadlock.\n"); + pthread_cleanup_pop(1); + return NULL; +} + +/* This object is used when LMGR is not initialized */ +static lmgr_dummy_thread_t dummy_lmgr; + +/* + * Retrieve the lmgr_thread_t object from the stack + */ +inline lmgr_thread_t *lmgr_get_thread_info() +{ + if (lmgr_is_active()) { + return (lmgr_thread_t *)pthread_getspecific(lmgr_key); + } else { + return &dummy_lmgr; + } +} + +/* + * Know if the current thread is registred (used when we + * do not control thread creation) + */ +bool lmgr_thread_is_initialized() +{ + return pthread_getspecific(lmgr_key) != NULL; +} + +/* On windows, the thread id is a struct, and sometime (for debug or openssl), + * we need a int + */ +intptr_t bthread_get_thread_id() +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + return self->int_thread_id; +} + +/* + * launch once for all threads + */ +void create_lmgr_key() +{ + int status = pthread_key_create(&lmgr_key, NULL); + if (status != 0) { + berrno be; + Pmsg1(000, _("pthread key create failed: ERR=%s\n"), + be.bstrerror(status)); + ASSERT2(0, "pthread_key_create failed"); + } + + lmgr_thread_t *n=NULL; + global_mgr = New(dlist(n, &n->link)); + + if (use_undertaker) { + /* Create condwait */ + status = pthread_cond_init(&undertaker_cond, NULL); + if (status != 0) { + berrno be; + Pmsg1(000, _("pthread_cond_init failed: ERR=%s\n"), + be.bstrerror(status)); + ASSERT2(0, "pthread_cond_init failed"); + } + status = pthread_create(&undertaker, NULL, check_deadlock, NULL); + if (status != 0) { + berrno be; + Pmsg1(000, _("pthread_create failed: ERR=%s\n"), + be.bstrerror(status)); + ASSERT2(0, "pthread_create failed"); + } + } +} + +/* + * Each thread have to call this function to put a lmgr_thread_t object + * in the stack and be able to call mutex_lock/unlock + */ +void lmgr_init_thread() +{ + int status = pthread_once(&key_lmgr_once, create_lmgr_key); + if (status != 0) { + berrno be; + Pmsg1(000, _("pthread key create failed: ERR=%s\n"), + be.bstrerror(status)); + ASSERT2(0, "pthread_once failed"); + } + lmgr_thread_t *l = New(lmgr_thread_t()); + pthread_setspecific(lmgr_key, l); + lmgr_register_thread(l); +} + +/* + * Call this function at the end of the thread + */ +void lmgr_cleanup_thread() +{ + if (!lmgr_is_active()) { + return ; + } + lmgr_thread_t *self = lmgr_get_thread_info(); + lmgr_unregister_thread(self); + delete(self); +} + +/* + * This function should be call at the end of the main thread + * Some thread like the watchdog are already present, so the global_mgr + * list is never empty. Should carefully clear the memory. + */ +void lmgr_cleanup_main() +{ + dlist *temp; + + if (!global_mgr) { + return; + } + if (use_undertaker) { + /* Signal to the check_deadlock thread to stop itself */ + pthread_mutex_lock(&undertaker_mutex); + do_quit = true; + pthread_cond_signal(&undertaker_cond); + pthread_mutex_unlock(&undertaker_mutex); + /* Should avoid memory leak reporting */ + pthread_join(undertaker, NULL); + pthread_cond_destroy(&undertaker_cond); + } + lmgr_cleanup_thread(); + lmgr_p(&lmgr_global_mutex); + { + temp = global_mgr; + global_mgr = NULL; + delete temp; + } + lmgr_v(&lmgr_global_mutex); +} + +void lmgr_add_event_p(const char *comment, intptr_t user_data, int32_t flags, + const char *file, int32_t line) +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + self->add_event(comment, user_data, flags, file, line); +} + +/* + * Set the priority of the lmgr mutex object + */ +void bthread_mutex_set_priority(bthread_mutex_t *m, int prio) +{ +#ifdef USE_LOCKMGR_PRIORITY + m->priority = prio; +#endif +} + +/* + * Replacement for pthread_mutex_init() + */ +int pthread_mutex_init(bthread_mutex_t *m, const pthread_mutexattr_t *attr) +{ + m->priority = 0; + return pthread_mutex_init(&m->mutex, attr); +} + +/* + * Replacement for pthread_mutex_destroy() + */ +int pthread_mutex_destroy(bthread_mutex_t *m) +{ + return pthread_mutex_destroy(&m->mutex); +} + +/* + * Replacement for pthread_kill (only with USE_LOCKMGR_SAFEKILL) + */ +int bthread_kill(pthread_t thread, int sig, + const char *file, int line) +{ + bool thread_found_in_process=false; + int ret=-1; + /* We dont allow to send signal to ourself */ + if (pthread_equal(thread, pthread_self())) { + ASSERTD(!pthread_equal(thread, pthread_self()), "Wanted to pthread_kill ourself"); + Dmsg3(10, "%s:%d send kill to self thread %p\n", file, line, thread); + errno = EINVAL; + return -1; + } + + /* This loop isn't very efficient with dozens of threads but we don't use + * signal very much + */ + lmgr_p(&lmgr_global_mutex); + { + lmgr_thread_t *item; + foreach_dlist(item, global_mgr) { + if (pthread_equal(thread, item->thread_id)) { + ret = pthread_kill(thread, sig); + thread_found_in_process=true; + break; + } + } + } + lmgr_v(&lmgr_global_mutex); + + /* Sending a signal to non existing thread can create problem */ + if (!thread_found_in_process) { + ASSERTD(thread_found_in_process, "Wanted to pthread_kill non-existant thread"); + Dmsg3(10, "%s:%d send kill to non-existant thread %p\n", file, line, thread); + errno=ECHILD; + } + return ret; +} + +/* + * Replacement for pthread_mutex_lock() + * Returns always ok + */ +int bthread_mutex_lock_p(bthread_mutex_t *m, const char *file, int line) +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + self->pre_P(m, m->priority, file, line); + lmgr_p(&m->mutex); + self->post_P(); + return 0; +} + +/* + * Replacement for pthread_mutex_unlock() + * Returns always ok + */ +int bthread_mutex_unlock_p(bthread_mutex_t *m, const char *file, int line) +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + self->do_V(m, file, line); + lmgr_v(&m->mutex); + return 0; +} + +/* + * Replacement for pthread_mutex_lock() but with real pthread_mutex_t + * Returns always ok + */ +int bthread_mutex_lock_p(pthread_mutex_t *m, const char *file, int line) +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + self->pre_P(m, 0, file, line); + lmgr_p(m); + self->post_P(); + return 0; +} + +/* + * Replacement for pthread_mutex_unlock() but with real pthread_mutex_t + * Returns always ok + */ +int bthread_mutex_unlock_p(pthread_mutex_t *m, const char *file, int line) +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + self->do_V(m, file, line); + lmgr_v(m); + return 0; +} + + +/* TODO: check this + */ +int bthread_cond_wait_p(pthread_cond_t *cond, + pthread_mutex_t *m, + const char *file, int line) +{ + int ret; + lmgr_thread_t *self = lmgr_get_thread_info(); + self->do_V(m, file, line); + ret = pthread_cond_wait(cond, m); + self->pre_P(m, 0, file, line); + self->post_P(); + return ret; +} + +/* TODO: check this + */ +int bthread_cond_timedwait_p(pthread_cond_t *cond, + pthread_mutex_t *m, + const struct timespec * abstime, + const char *file, int line) +{ + int ret; + lmgr_thread_t *self = lmgr_get_thread_info(); + self->do_V(m, file, line); + ret = pthread_cond_timedwait(cond, m, abstime); + self->pre_P(m, 0, file, line); + self->post_P(); + return ret; +} + +/* TODO: check this + */ +int bthread_cond_wait_p(pthread_cond_t *cond, + bthread_mutex_t *m, + const char *file, int line) +{ + int ret; + lmgr_thread_t *self = lmgr_get_thread_info(); + self->do_V(m, file, line); + ret = pthread_cond_wait(cond, &m->mutex); + self->pre_P(m, m->priority, file, line); + self->post_P(); + return ret; +} + +/* TODO: check this + */ +int bthread_cond_timedwait_p(pthread_cond_t *cond, + bthread_mutex_t *m, + const struct timespec * abstime, + const char *file, int line) +{ + int ret; + lmgr_thread_t *self = lmgr_get_thread_info(); + self->do_V(m, file, line); + ret = pthread_cond_timedwait(cond, &m->mutex, abstime); + self->pre_P(m, m->priority, file, line); + self->post_P(); + return ret; +} + +/* Test if this mutex is locked by the current thread + * returns: + * 0 - unlocked + * 1 - locked by the current thread + * 2 - locked by an other thread + */ +int lmgr_mutex_is_locked(void *m) +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + + for(int i=0; i <= self->current; i++) { + if (self->lock_list[i].lock == m) { + return 1; /* locked by us */ + } + } + + return 0; /* not locked by us */ +} + +/* + * Use this function when the caller handle the mutex directly + * + * lmgr_pre_lock(m, 10); + * pthread_mutex_lock(m); + * lmgr_post_lock(m); + */ +void lmgr_pre_lock(void *m, int prio, const char *file, int line) +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + self->pre_P(m, prio, file, line); +} + +/* + * Use this function when the caller handle the mutex directly + */ +void lmgr_post_lock() +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + self->post_P(); +} + +/* + * Do directly pre_P and post_P (used by trylock) + */ +void lmgr_do_lock(void *m, int prio, const char *file, int line) +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + self->pre_P(m, prio, file, line); + self->post_P(); +} + +/* + * Use this function when the caller handle the mutex directly + */ +void lmgr_do_unlock(void *m) +{ + lmgr_thread_t *self = lmgr_get_thread_info(); + self->do_V(m); +} + +typedef struct { + void *(*start_routine)(void*); + void *arg; +} lmgr_thread_arg_t; + +extern "C" +void *lmgr_thread_launcher(void *x) +{ + void *ret=NULL; + lmgr_init_thread(); + pthread_cleanup_push(cln_hdl, NULL); + + lmgr_thread_arg_t arg; + lmgr_thread_arg_t *a = (lmgr_thread_arg_t *)x; + arg.start_routine = a->start_routine; + arg.arg = a->arg; + free(a); + + ret = arg.start_routine(arg.arg); + pthread_cleanup_pop(1); + return ret; +} + +int lmgr_thread_create(pthread_t *thread, + const pthread_attr_t *attr, + void *(*start_routine)(void*), void *arg) +{ + /* lmgr should be active (lmgr_init_thread() call in main()) */ + ASSERT2(lmgr_is_active(), "Lock manager not active"); + /* Will be freed by the child */ + lmgr_thread_arg_t *a = (lmgr_thread_arg_t*) malloc(sizeof(lmgr_thread_arg_t)); + a->start_routine = start_routine; + a->arg = arg; + return pthread_create(thread, attr, lmgr_thread_launcher, a); +} + +#else /* USE_LOCKMGR */ + +intptr_t bthread_get_thread_id() +{ +# ifdef HAVE_WIN32 + return (intptr_t)GetCurrentThreadId(); +# else + return (intptr_t)pthread_self(); +# endif +} + +/* + * !!! WARNING !!! + * Use this function is used only after a fatal signal + * We don't use locking to display information + */ +void dbg_print_lock(FILE *fp) +{ + Pmsg0(000, "lockmgr disabled\n"); +} + +#endif /* USE_LOCKMGR */ + +#ifdef HAVE_LINUX_OS +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#endif + +/* + * Set the Thread Id of the current thread to limit I/O operations + */ +int bthread_change_uid(uid_t uid, gid_t gid) +{ +#if defined(HAVE_WIN32) || defined(HAVE_WIN64) + /* TODO: Check the cygwin code for the implementation of setuid() */ + errno = ENOSYS; + return -1; + +#elif defined(HAVE_LINUX_OS) + /* It can be also implemented with setfsuid() and setfsgid() */ + int ret=0; + ret = syscall(SYS_setregid, getgid(), gid); + if (ret == -1) { + return -1; + } + return syscall(SYS_setreuid, getuid(), uid); + +#elif defined(HAVE_PTHREAD_SETUGID_NP) + return pthread_setugid_np(uid, gid); + +#endif + errno = ENOSYS; + return -1; +} + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +#ifdef TEST_PROGRAM +#include "bacula.h" +#include "unittests.h" +#include "lockmgr.h" +#undef P +#undef V +#define P(x) bthread_mutex_lock_p(&(x), __FILE__, __LINE__) +#define V(x) bthread_mutex_unlock_p(&(x), __FILE__, __LINE__) +#define pthread_create(a, b, c, d) lmgr_thread_create(a,b,c,d) + +bthread_mutex_t mutex1 = BTHREAD_MUTEX_NO_PRIORITY; +bthread_mutex_t mutex2 = BTHREAD_MUTEX_NO_PRIORITY; +bthread_mutex_t mutex3 = BTHREAD_MUTEX_NO_PRIORITY; +bthread_mutex_t mutex4 = BTHREAD_MUTEX_NO_PRIORITY; +bthread_mutex_t mutex5 = BTHREAD_MUTEX_NO_PRIORITY; +bthread_mutex_t mutex6 = BTHREAD_MUTEX_NO_PRIORITY; +bthread_mutex_t mutex_p1 = BTHREAD_MUTEX_PRIORITY(1); +bthread_mutex_t mutex_p2 = BTHREAD_MUTEX_PRIORITY(2); +bthread_mutex_t mutex_p3 = BTHREAD_MUTEX_PRIORITY(3); +static const char *my_prog; +static bool thevent1ok = false; +static bool thevent2ok = false; + +void *self_lock(void *temp) +{ + P(mutex1); + P(mutex1); + V(mutex1); + + return NULL; +} + +void *nolock(void *temp) +{ + P(mutex2); + sleep(5); + V(mutex2); + return NULL; +} + +void *locker(void *temp) +{ + bthread_mutex_t *m = (bthread_mutex_t*) temp; + P(*m); + V(*m); + return NULL; +} + +void *rwlocker(void *temp) +{ + brwlock_t *m = (brwlock_t*) temp; + rwl_writelock(m); + rwl_writelock(m); + + rwl_writeunlock(m); + rwl_writeunlock(m); + return NULL; +} + +void *mix_rwl_mutex(void *temp) +{ + brwlock_t *m = (brwlock_t*) temp; + P(mutex1); + rwl_writelock(m); + rwl_writeunlock(m); + V(mutex1); + return NULL; +} + +void *thuid(void *temp) +{ +// char buf[512]; +// if (restrict_job_permissions("eric", "users", buf, sizeof(buf)) < 0) { + if (bthread_change_uid(2, 100) == -1) { + berrno be; + fprintf(stderr, "Unable to change the uid err=%s\n", be.bstrerror()); + } else { + fprintf(stderr, "UID set! %d:%d\n", (int)getuid(), (int)getgid()); + mkdir("/tmp/adirectory", 0755); + system("touch /tmp/afile"); + system("id"); + fclose(fopen("/tmp/aaa", "a")); + } + if (bthread_change_uid(0, 0) == -1) { + berrno be; + fprintf(stderr, "Unable to change the uid err=%s\n", be.bstrerror()); + } else { + fprintf(stderr, "UID set! %d:%d\n", (int)getuid(), (int)getgid()); + sleep(5); + mkdir("/tmp/adirectory2", 0755); + system("touch /tmp/afile2"); + system("id"); + fclose(fopen("/tmp/aaa2", "a")); + } + + return NULL; +} + +void *th2(void *temp) +{ + P(mutex2); + P(mutex1); + + lmgr_dump(); + + sleep(10); + + V(mutex1); + V(mutex2); + + lmgr_dump(); + return NULL; +} +void *th1(void *temp) +{ + P(mutex1); + sleep(2); + P(mutex2); + + lmgr_dump(); + + sleep(10); + + V(mutex2); + V(mutex1); + + lmgr_dump(); + return NULL; +} + +void *thx(void *temp) +{ + int s= 1 + (int) (500.0 * (rand() / (RAND_MAX + 1.0))) + 200; + P(mutex1); + bmicrosleep(0,s); + P(mutex2); + bmicrosleep(0,s); + + V(mutex2); + V(mutex1); + return NULL; +} + +void *th3(void *a) { + while (1) { + fprintf(stderr, "undertaker sleep()\n"); + sleep(10); + lmgr_dump(); + if (lmgr_detect_deadlock()) { + lmgr_dump(); + exit(1); + } + } + return NULL; +} + +void *th_prio(void *a) { + char buf[512]; + bstrncpy(buf, my_prog, sizeof(buf)); + bstrncat(buf, " priority", sizeof(buf)); + intptr_t ret = system(buf); + return (void*) ret; +} + +void *th_event1(void *a) { + lmgr_thread_t *self = lmgr_get_thread_info(); + for (int i=0; i < 10000; i++) { + if ((i % 7) == 0) { + lmgr_add_event_flag("strdup test", i, LMGR_EVENT_DUP); + } else { + lmgr_add_event("My comment", i); + } + } + thevent1ok = self->event_id == 10000; + sleep(5); + return NULL; +} + +void *th_event2(void *a) { + lmgr_thread_t *self = lmgr_get_thread_info(); + for (int i=0; i < 10000; i++) { + if ((i % 2) == 0) { + lmgr_add_event_flag(bstrdup("free test"), i, LMGR_EVENT_FREE); + } else { + lmgr_add_event("My comment", i); + } + } + thevent2ok = self->event_id == 10000; + sleep(5); + return NULL; +} + +/* + * TODO: + * - Must detect multiple lock + * - lock/unlock in wrong order + * - deadlock with 2 or 3 threads + */ +int main(int argc, char **argv) +{ + Unittests lmgr_test("lockmgr_test", true, argc != 2); + void *ret=NULL; + lmgr_thread_t *self; + pthread_t id1, id2, id3, id4, id5, tab[200]; + bthread_mutex_t bmutex1; + pthread_mutex_t pmutex2; + + use_undertaker = false; + my_prog = argv[0]; + self = lmgr_get_thread_info(); + + /* below is used for checking forced SIGSEGV in separate process */ + if (argc == 2) { /* do priority check */ + P(mutex_p2); /* not permited */ + P(mutex_p1); + V(mutex_p1); /* never goes here */ + V(mutex_p2); + return 0; + } + + /* workaround for bthread_change_uid() failure for non-root */ + if (getuid() == 0){ + /* we can change uid/git, so proceed the test */ + pthread_create(&id5, NULL, thuid, NULL); + pthread_join(id5, NULL); + Pmsg2(0, "UID %d:%d\n", (int)getuid(), (int)getgid()); + } else { + Pmsg0(0, "Skipped bthread_change_uid() for non-root\n"); + } + + Pmsg0(0, "Starting mutex priority test\n"); + pthread_mutex_init(&bmutex1, NULL); + bthread_mutex_set_priority(&bmutex1, 10); + + pthread_mutex_init(&pmutex2, NULL); + P(bmutex1); + ok(self->max_priority == 10, "Check self max_priority"); + P(pmutex2); + ok(bmutex1.priority == 10, "Check bmutex_set_priority()"); + V(pmutex2); + V(bmutex1); + ok(self->max_priority == 0, "Check self max_priority"); + + Pmsg0(0, "Starting self deadlock tests\n"); + pthread_create(&id1, NULL, self_lock, NULL); + sleep(2); + ok(lmgr_detect_deadlock(), "Check self deadlock"); + lmgr_v(&mutex1.mutex); /* a bit dirty */ + pthread_join(id1, NULL); + + Pmsg0(0, "Starting thread kill tests\n"); + pthread_create(&id1, NULL, nolock, NULL); + sleep(2); + ok(bthread_kill(id1, SIGUSR2) == 0, "Kill existing thread"); + pthread_join(id1, NULL); + ok(bthread_kill(id1, SIGUSR2) == -1, "Kill non-existing thread"); + ok(bthread_kill(pthread_self(), SIGUSR2) == -1, "Kill self"); + + Pmsg0(0, "Starting thread locks tests\n"); + pthread_create(&id1, NULL, nolock, NULL); + sleep(2); + nok(lmgr_detect_deadlock(), "Check for nolock"); + pthread_join(id1, NULL); + + P(mutex1); + pthread_create(&id1, NULL, locker, &mutex1); + pthread_create(&id2, NULL, locker, &mutex1); + pthread_create(&id3, NULL, locker, &mutex1); + sleep(2); + nok(lmgr_detect_deadlock(), "Check for multiple lock"); + V(mutex1); + pthread_join(id1, NULL); + pthread_join(id2, NULL); + pthread_join(id3, NULL); + + brwlock_t wr; + rwl_init(&wr); + rwl_writelock(&wr); + rwl_writelock(&wr); + pthread_create(&id1, NULL, rwlocker, &wr); + pthread_create(&id2, NULL, rwlocker, &wr); + pthread_create(&id3, NULL, rwlocker, &wr); + nok(lmgr_detect_deadlock(), "Check for multiple rwlock"); + rwl_writeunlock(&wr); + nok(lmgr_detect_deadlock(), "Check for simple rwlock"); + rwl_writeunlock(&wr); + nok(lmgr_detect_deadlock(), "Check for multiple rwlock"); + + pthread_join(id1, NULL); + pthread_join(id2, NULL); + pthread_join(id3, NULL); + + rwl_writelock(&wr); + P(mutex1); + pthread_create(&id1, NULL, mix_rwl_mutex, &wr); + nok(lmgr_detect_deadlock(), "Check for mix rwlock/mutex"); + V(mutex1); + nok(lmgr_detect_deadlock(), "Check for mix rwlock/mutex"); + rwl_writeunlock(&wr); + nok(lmgr_detect_deadlock(), "Check for mix rwlock/mutex"); + pthread_join(id1, NULL); + + P(mutex5); + P(mutex6); + V(mutex5); + V(mutex6); + + nok(lmgr_detect_deadlock(), "Check for wrong order"); + + for(int j=0; j<200; j++) { + pthread_create(&tab[j], NULL, thx, NULL); + } + for(int j=0; j<200; j++) { + pthread_join(tab[j], NULL); + if (j%3) { lmgr_detect_deadlock();} + } + nok(lmgr_detect_deadlock(), "Check 200 lockers"); + + P(mutex4); + P(mutex5); + P(mutex6); + ok(lmgr_mutex_is_locked(&mutex6) == 1, "Check if mutex is locked"); + V(mutex6); + ok(lmgr_mutex_is_locked(&mutex6) == 0, "Check if mutex is unlocked"); + V(mutex5); + V(mutex4); + + Pmsg0(0, "Starting threads deadlock tests\n"); + pthread_create(&id1, NULL, th1, NULL); + sleep(1); + pthread_create(&id2, NULL, th2, NULL); + sleep(1); + ok(lmgr_detect_deadlock(), "Check for deadlock"); + + Pmsg0(0, "Starting for max_priority locks tests\n"); + pthread_create(&id3, NULL, th_prio, NULL); + pthread_join(id3, &ret); + ok(ret != 0, "Check for priority segfault"); + + P(mutex_p1); + ok(self->max_priority == 1, "Check max_priority 1/4"); + P(mutex_p2); + ok(self->max_priority == 2, "Check max_priority 2/4"); + P(mutex_p3); + ok(self->max_priority == 3, "Check max_priority 3/4"); + P(mutex6); + ok(self->max_priority == 3, "Check max_priority 4/4"); + V(mutex6); + ok(self->max_priority == 3, "Check max_priority 1/5"); + V(mutex_p3); + ok(self->max_priority == 2, "Check max_priority 4/5"); + V(mutex_p2); + ok(self->max_priority == 1, "Check max_priority 4/5"); + V(mutex_p1); + ok(self->max_priority == 0, "Check max_priority 5/5"); + + + P(mutex_p1); + P(mutex_p2); + P(mutex_p3); + P(mutex6); + ok(self->max_priority == 3, "Check max_priority mixed"); + V(mutex_p2); + ok(self->max_priority == 3, "Check max_priority mixed"); + V(mutex_p1); + ok(self->max_priority == 3, "Check max_priority mixed"); + V(mutex_p3); + ok(self->max_priority == 0, "Check max_priority mixed"); + V(mutex6); + ok(self->max_priority == 0, "Check max_priority mixed"); + + P(mutex_p1); + P(mutex_p2); + V(mutex_p1); + V(mutex_p2); + + Pmsg0(0, "Start lmgr_add_even tests\n"); + for (int i=0; i < 10000; i++) { + if ((i % 7) == 0) { + lmgr_add_event_flag("xxxxxxxxxxxxxxxx strdup test xxxxxxxxxxxxxxxx", i, LMGR_EVENT_DUP); + } else { + lmgr_add_event("My comment", i); + } + } + ok(self->event_id == 10000, "Checking registered events in self"); + + pthread_create(&id4, NULL, th_event1, NULL); + pthread_create(&id5, NULL, th_event2, NULL); + + sleep(2); + + pthread_join(id4, NULL); + pthread_join(id5, NULL); + + ok(thevent1ok, "Checking registered events in thread1"); + ok(thevent2ok, "Checking registered events in thread2"); + + return report(); +} +#endif /* TEST_PROGRAM */ diff --git a/src/lib/lockmgr.h b/src/lib/lockmgr.h new file mode 100644 index 00000000..65a444fa --- /dev/null +++ b/src/lib/lockmgr.h @@ -0,0 +1,298 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef LOCKMGR_H +#define LOCKMGR_H 1 + +#include "mutex_list.h" /* Manage mutex with priority in a central place */ + +/* + * P and V op that don't use the lock manager (for memory allocation or on + * win32) + */ +void lmgr_p(pthread_mutex_t *m); +void lmgr_v(pthread_mutex_t *m); + +/* + * Get integer thread id + */ +intptr_t bthread_get_thread_id(); + +/* + * Set the Thread Id of the current thread to limit I/O operations + */ +int bthread_change_uid(uid_t uid, gid_t gid); + +#ifdef USE_LOCKMGR + +typedef struct bthread_mutex_t +{ + pthread_mutex_t mutex; + int priority; +} bthread_mutex_t; + +/* + * We decide that a thread won't lock more than LMGR_MAX_LOCK at the same time + */ +#define LMGR_MAX_LOCK 32 + +int bthread_cond_wait_p(pthread_cond_t *cond, + bthread_mutex_t *mutex, + const char *file="*unknown*", int line=0); + +int bthread_cond_timedwait_p(pthread_cond_t *cond, + bthread_mutex_t *mutex, + const struct timespec * abstime, + const char *file="*unknown*", int line=0); + +/* Same with real pthread_mutex_t */ +int bthread_cond_wait_p(pthread_cond_t *cond, + pthread_mutex_t *mutex, + const char *file="*unknown*", int line=0); + +int bthread_cond_timedwait_p(pthread_cond_t *cond, + pthread_mutex_t *mutex, + const struct timespec * abstime, + const char *file="*unknown*", int line=0); + +/* Replacement of pthread_mutex_lock() but with real pthread_mutex_t */ +int bthread_mutex_lock_p(pthread_mutex_t *m, + const char *file="*unknown*", int line=0); + +/* Replacement for pthread_mutex_unlock() but with real pthread_mutex_t */ +int bthread_mutex_unlock_p(pthread_mutex_t *m, + const char *file="*unknown*", int line=0); + +/* Replacement of pthread_mutex_lock() */ +int bthread_mutex_lock_p(bthread_mutex_t *m, + const char *file="*unknown*", int line=0); + +/* Replacement of pthread_mutex_unlock() */ +int bthread_mutex_unlock_p(bthread_mutex_t *m, + const char *file="*unknown*", int line=0); + +/* Test if this mutex is locked by the current thread + * 0 - not locked by the current thread + * 1 - locked by the current thread + */ +int lmgr_mutex_is_locked(void *m); + +/* + * Use them when you want use your lock yourself (ie rwlock) + */ + +/* Call before requesting the lock */ +void lmgr_pre_lock(void *m, int prio=0, + const char *file="*unknown*", int line=0); + +/* Call after getting it */ +void lmgr_post_lock(); + +/* Same as pre+post lock */ +void lmgr_do_lock(void *m, int prio=0, + const char *file="*unknown*", int line=0); + +/* Call just before releasing the lock */ +void lmgr_do_unlock(void *m); + +/* We use C++ mangling to make integration eaysier */ +int pthread_mutex_init(bthread_mutex_t *m, const pthread_mutexattr_t *attr); +int pthread_mutex_destroy(bthread_mutex_t *m); + +void bthread_mutex_set_priority(bthread_mutex_t *m, int prio); + +/* + * Each thread have to call this function to put a lmgr_thread_t object + * in the stack and be able to call mutex_lock/unlock + */ +void lmgr_init_thread(); + +/* + * Know if the current thread is registred (used when we + * do not control thread creation) + */ +bool lmgr_thread_is_initialized(); + +/* + * Call this function at the end of the thread + */ +void lmgr_cleanup_thread(); + +/* + * Call this at the end of the program, it will release the + * global lock manager + */ +void lmgr_cleanup_main(); + +/* + * Dump each lmgr_thread_t object to stdout + */ +void lmgr_dump(); + +/* + * Search a deadlock + */ +bool lmgr_detect_deadlock(); + +/* Bit flags */ +#define LMGR_EVENT_NONE 0 +#define LMGR_EVENT_DUP 1 /* use strdup() to copy the comment (will set FREE) */ +#define LMGR_EVENT_FREE 2 /* use free() when overwriting/deleting the comment */ +#define LMGR_EVENT_INVALID 4 /* Used to mark the record invalid */ + +/* + * Add event to the thread event list + */ +void lmgr_add_event_p(const char *comment, intptr_t user_data, int32_t flags, const char *file, int32_t line); +#define lmgr_add_event(c, u) lmgr_add_event_p(c, u, 0, __FILE__, __LINE__) +#define lmgr_add_event_flag(c, u, f) lmgr_add_event_p(c, u, (f), __FILE__, __LINE__) + +/* + * Search a deadlock after a fatal signal + * no lock are granted, so the program must be + * stopped. + */ +bool lmgr_detect_deadlock_unlocked(); + +/* + * This function will run your thread with lmgr_init_thread() and + * lmgr_cleanup_thread(). + */ +int lmgr_thread_create(pthread_t *thread, + const pthread_attr_t *attr, + void *(*start_routine)(void*), void *arg); + +/* + * Can use SAFEKILL to check if the argument is a valid threadid + */ +int bthread_kill(pthread_t thread, int sig, + const char *file="*unknown*", int line=0); + +#define BTHREAD_MUTEX_NO_PRIORITY {PTHREAD_MUTEX_INITIALIZER, 0} +#define BTHREAD_MUTEX_INITIALIZER BTHREAD_MUTEX_NO_PRIORITY + +/* Define USE_LOCKMGR_PRIORITY to detect mutex wrong order */ +#ifdef USE_LOCKMGR_PRIORITY +# define BTHREAD_MUTEX_PRIORITY(p) {PTHREAD_MUTEX_INITIALIZER, p} +#else +# define BTHREAD_MUTEX_PRIORITY(p) BTHREAD_MUTEX_NO_PRIORITY +#endif + +#define bthread_mutex_lock(x) bthread_mutex_lock_p(x, __FILE__, __LINE__) +#define bthread_mutex_unlock(x) bthread_mutex_unlock_p(x, __FILE__, __LINE__) +#define bthread_cond_wait(x,y) bthread_cond_wait_p(x,y, __FILE__, __LINE__) +#define bthread_cond_timedwait(x,y,z) bthread_cond_timedwait_p(x,y,z, __FILE__, __LINE__) + +/* + * Define LOCKMGR_COMPLIANT to use real pthread functions + */ +#define real_P(x) lmgr_p(&(x)) +#define real_V(x) lmgr_v(&(x)) + +#ifdef LOCKMGR_COMPLIANT +# define P(x) lmgr_p(&(x)) +# define pP(x) lmgr_p(x) +# define V(x) lmgr_v(&(x)) +# define pV(x) lmgr_v(x) +#else +# define P(x) bthread_mutex_lock_p(&(x), __FILE__, __LINE__) +# define pP(x) bthread_mutex_lock_p((x), __FILE__, __LINE__) +# define V(x) bthread_mutex_unlock_p(&(x), __FILE__, __LINE__) +# define pV(x) bthread_mutex_unlock_p((x), __FILE__, __LINE__) +# define pthread_create(a, b, c, d) lmgr_thread_create(a,b,c,d) +# define pthread_mutex_lock(x) bthread_mutex_lock(x) +# define pthread_mutex_unlock(x) bthread_mutex_unlock(x) +# define pthread_cond_wait(x,y) bthread_cond_wait(x,y) +# define pthread_cond_timedwait(x,y,z) bthread_cond_timedwait(x,y,z) + +# ifdef USE_LOCKMGR_SAFEKILL +# define pthread_kill(a,b) bthread_kill((a),(b), __FILE__, __LINE__) +# endif +#endif + +#else /* !USE_LOCKMGR */ + +# define lmgr_detect_deadlock() +# define lmgr_add_event_p(c, u, f, l) +# define lmgr_add_event(c, u) +# define lmgr_dump() +# define lmgr_thread_is_initialized() (1) +# define lmgr_init_thread() +# define lmgr_cleanup_thread() +# define lmgr_pre_lock(m, prio, f, l) +# define lmgr_post_lock() +# define lmgr_do_lock(m, prio, f, l) +# define lmgr_do_unlock(m) +# define lmgr_cleanup_main() +# define bthread_mutex_set_priority(a,b) +# define bthread_mutex_lock(a) pthread_mutex_lock(a) +# define bthread_mutex_lock_p(a, f, l) pthread_mutex_lock(a) +# define bthread_mutex_unlock(a) pthread_mutex_unlock(a) +# define bthread_mutex_unlock_p(a, f, l) pthread_mutex_unlock(a) +# define lmgr_cond_wait(a,b) pthread_cond_wait(a,b) +# define lmgr_cond_timedwait(a,b,c) pthread_cond_timedwait(a,b,c) +# define bthread_mutex_t pthread_mutex_t +# define P(x) lmgr_p(&(x)) +# define pP(x) lmgr_p((x)) +# define V(x) lmgr_v(&(x)) +# define pV(x) lmgr_v((x)) +# define BTHREAD_MUTEX_PRIORITY(p) PTHREAD_MUTEX_INITIALIZER +# define BTHREAD_MUTEX_NO_PRIORITY PTHREAD_MUTEX_INITIALIZER +# define BTHREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER +# define lmgr_mutex_is_locked(m) (1) +# define bthread_cond_wait_p(w, x, y, z) pthread_cond_wait(w,x) +#endif /* USE_LOCKMGR */ + +/* a very basic lock_guard implementation : + * Lock_guard is mostly usefull to garanty mutex unlocking. Also, it's exception safe. + * usage example: + * void foobar() + * { + * lock_guard protector(m_mutex); // m_mutex is locked + * // the following section is protected until the function exits and/or returns + * + * if (case == TRUE) + * { + * return; // when returning, m_mutex is unlocked + * } + * . + * . + * . + * + * // when the method exits, m_mutex is unlocked. + * } + */ +class lock_guard +{ +public: + + pthread_mutex_t &m_mutex; /* the class keeps a reference on the mutex*/ + + explicit lock_guard(pthread_mutex_t &mutex) : m_mutex(mutex) + { + P(m_mutex); /* constructor locks the mutex*/ + } + + ~lock_guard() + { + V(m_mutex); /* destructor unlocks the mutex*/ + } +}; + +#endif /* LOCKMGR_H */ diff --git a/src/lib/lz4.c b/src/lib/lz4.c new file mode 100644 index 00000000..38fa16a2 --- /dev/null +++ b/src/lib/lz4.c @@ -0,0 +1,1480 @@ +/* + LZ4 - Fast LZ compression algorithm + Copyright (C) 2011-2017, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 + + Tweaked for proper compliation in Bacula by KES 27 Jan 2019 +*/ + + +/*-************************************ +* Tuning parameters +**************************************/ +/* + * LZ4_HEAPMODE : + * Select how default compression functions will allocate memory for their hash table, + * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). + */ +#ifndef LZ4_HEAPMODE +# define LZ4_HEAPMODE 0 +#endif + +/* + * ACCELERATION_DEFAULT : + * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0 + */ +#define ACCELERATION_DEFAULT 1 + + +/*-************************************ +* CPU Feature Detection +**************************************/ +/* LZ4_FORCE_MEMORY_ACCESS + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets which assembly generation depends on alignment. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define LZ4_FORCE_MEMORY_ACCESS 2 +# elif defined(__INTEL_COMPILER) || defined(__GNUC__) +# define LZ4_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/* + * LZ4_FORCE_SW_BITCOUNT + * Define this parameter if your target system or compiler does not support hardware bit count + */ +#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */ +# define LZ4_FORCE_SW_BITCOUNT +#endif + + +/*-************************************ +* Dependency +**************************************/ +#include "lz4.h" +/* see also "memory routines" below */ + + +/*-************************************ +* Compiler Options +**************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# include +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */ +#endif /* _MSC_VER */ + +#ifndef LZ4_FORCE_INLINE +# ifdef _MSC_VER /* Visual Studio */ +# define LZ4_FORCE_INLINE static __forceinline +# else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define LZ4_FORCE_INLINE static inline +# endif +# else +# define LZ4_FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +# endif /* _MSC_VER */ +#endif /* LZ4_FORCE_INLINE */ + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) +# define expect(expr,value) (__builtin_expect ((expr),(value)) ) +#else +# define expect(expr,value) (expr) +#endif + +#define likely(expr) expect((expr) != 0, 1) +#define unlikely(expr) expect((expr) != 0, 0) + + +/*-************************************ +* Memory routines +**************************************/ +#include /* malloc, calloc, free */ +#define ALLOCATOR(n,s) calloc(n,s) +#define FREEMEM free +#include /* memset, memcpy */ +#define MEM_INIT memset + + +/*-************************************ +* Basic Types +**************************************/ +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef uintptr_t uptrval; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; + typedef size_t uptrval; /* generally true, except OpenVMS-64 */ +#endif + +#if defined(__x86_64__) + typedef U64 reg_t; /* 64-bits in x32 mode */ +#else + typedef size_t reg_t; /* 32-bits in x32 mode */ +#endif + +/*-************************************ +* Reading and writing into memory +**************************************/ +static unsigned LZ4_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + + +#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2) +/* lie to the compiler about data alignment; use with caution */ + +static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; } +static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; } +static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; } + +static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } +static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } + +#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign; + +static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } +static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; } + +static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } +static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } + +#else /* safe and portable access through memcpy() */ + +static U16 LZ4_read16(const void* memPtr) +{ + U16 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static U32 LZ4_read32(const void* memPtr) +{ + U32 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static reg_t LZ4_read_ARCH(const void* memPtr) +{ + reg_t val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static void LZ4_write16(void* memPtr, U16 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +static void LZ4_write32(void* memPtr, U32 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +#endif /* LZ4_FORCE_MEMORY_ACCESS */ + + +static U16 LZ4_readLE16(const void* memPtr) +{ + if (LZ4_isLittleEndian()) { + return LZ4_read16(memPtr); + } else { + const BYTE* p = (const BYTE*)memPtr; + return (U16)((U16)p[0] + (p[1]<<8)); + } +} + +static void LZ4_writeLE16(void* memPtr, U16 value) +{ + if (LZ4_isLittleEndian()) { + LZ4_write16(memPtr, value); + } else { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE) value; + p[1] = (BYTE)(value>>8); + } +} + +static void LZ4_copy8(void* dst, const void* src) +{ + memcpy(dst,src,8); +} + +/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ +static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* const e = (BYTE*)dstEnd; + + do { LZ4_copy8(d,s); d+=8; s+=8; } while (d=2) +# include +# define DEBUGLOG(l, ...) { \ + if (l<=LZ4_DEBUG) { \ + fprintf(stderr, __FILE__ ": "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + + +/*-************************************ +* Common functions +**************************************/ +static unsigned LZ4_NbCommonBytes (reg_t val) +{ + if (LZ4_isLittleEndian()) { + if (sizeof(val)==8) { +# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanForward64( &r, (U64)val ); + return (int)(r>>3); +# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctzll((U64)val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else /* 32 bits */ { +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward( &r, (U32)val ); + return (int)(r>>3); +# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctz((U32)val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } else /* Big Endian CPU */ { + if (sizeof(val)==8) { +# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (unsigned)(r>>3); +# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clzll((U64)val) >> 3); +# else + unsigned r; + if (!(val>>32)) { r=4; } else { r=0; val>>=32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } else /* 32 bits */ { +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse( &r, (unsigned long)val ); + return (unsigned)(r>>3); +# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clz((U32)val) >> 3); +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } + } +} + +#define STEPSIZE sizeof(reg_t) +static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) +{ + const BYTE* const pStart = pIn; + + while (likely(pIn compression run slower on incompressible data */ + + +/*-************************************ +* Local Structures and types +**************************************/ +typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive; +typedef enum { byPtr, byU32, byU16 } tableType_t; + +typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; +typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; + +typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; +typedef enum { full = 0, partial = 1 } earlyEnd_directive; + + +/*-************************************ +* Local Utils +**************************************/ +int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; } +const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; } +int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } +int LZ4_sizeofState() { return LZ4_STREAMSIZE; } + + +/*-****************************** +* Compression functions +********************************/ +static U32 LZ4_hash4(U32 sequence, tableType_t const tableType) +{ + if (tableType == byU16) + return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1))); + else + return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG)); +} + +static U32 LZ4_hash5(U64 sequence, tableType_t const tableType) +{ + static const U64 prime5bytes = 889523592379ULL; + static const U64 prime8bytes = 11400714785074694791ULL; + const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG; + if (LZ4_isLittleEndian()) + return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); + else + return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); +} + +LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) +{ + if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); + return LZ4_hash4(LZ4_read32(p), tableType); +} + +static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase) +{ + switch (tableType) + { + case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; } + case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; } + } +} + +LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +{ + U32 const h = LZ4_hashPosition(p, tableType); + LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); +} + +static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase) +{ + if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; } + if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; } + { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ +} + +LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +{ + U32 const h = LZ4_hashPosition(p, tableType); + return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); +} + + +/** LZ4_compress_generic() : + inlined, to ensure branches are decided at compilation time */ +LZ4_FORCE_INLINE int LZ4_compress_generic( + LZ4_stream_t_internal* const cctx, + const char* const source, + char* const dest, + const int inputSize, + const int maxOutputSize, + const limitedOutput_directive outputLimited, + const tableType_t tableType, + const dict_directive dict, + const dictIssue_directive dictIssue, + const U32 acceleration) +{ + const BYTE* ip = (const BYTE*) source; + const BYTE* base; + const BYTE* lowLimit; + const BYTE* const lowRefLimit = ip - cctx->dictSize; + const BYTE* const dictionary = cctx->dictionary; + const BYTE* const dictEnd = dictionary + cctx->dictSize; + const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source; + const BYTE* anchor = (const BYTE*) source; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = iend - LASTLITERALS; + + BYTE* op = (BYTE*) dest; + BYTE* const olimit = op + maxOutputSize; + + U32 forwardH; + + /* Init conditions */ + if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */ + switch(dict) + { + case noDict: + default: + base = (const BYTE*)source; + lowLimit = (const BYTE*)source; + break; + case withPrefix64k: + base = (const BYTE*)source - cctx->currentOffset; + lowLimit = (const BYTE*)source - cctx->dictSize; + break; + case usingExtDict: + base = (const BYTE*)source - cctx->currentOffset; + lowLimit = (const BYTE*)source; + break; + } + if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ + if (inputSizehashTable, tableType, base); + ip++; forwardH = LZ4_hashPosition(ip, tableType); + + /* Main Loop */ + for ( ; ; ) { + ptrdiff_t refDelta = 0; + const BYTE* match; + BYTE* token; + + /* Find a match */ + { const BYTE* forwardIp = ip; + unsigned step = 1; + unsigned searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimit)) goto _last_literals; + + match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); + if (dict==usingExtDict) { + if (match < (const BYTE*)source) { + refDelta = dictDelta; + lowLimit = dictionary; + } else { + refDelta = 0; + lowLimit = (const BYTE*)source; + } } + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + + } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0) + || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) + || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) ); + } + + /* Catch up */ + while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; } + + /* Encode Literals */ + { unsigned const litLength = (unsigned)(ip - anchor); + token = op++; + if ((outputLimited) && /* Check output buffer overflow */ + (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit))) + return 0; + if (litLength >= RUN_MASK) { + int len = (int)litLength-RUN_MASK; + *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255; + *op++ = (BYTE)len; + } + else *token = (BYTE)(litLength< matchlimit) limit = matchlimit; + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); + ip += MINMATCH + matchCode; + if (ip==limit) { + unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit); + matchCode += more; + ip += more; + } + } else { + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); + ip += MINMATCH + matchCode; + } + + if ( outputLimited && /* Check output buffer overflow */ + (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) ) + return 0; + if (matchCode >= ML_MASK) { + *token += ML_MASK; + matchCode -= ML_MASK; + LZ4_write32(op, 0xFFFFFFFF); + while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255; + op += matchCode / 255; + *op++ = (BYTE)(matchCode % 255); + } else + *token += (BYTE)(matchCode); + } + + anchor = ip; + + /* Test end of chunk */ + if (ip > mflimit) break; + + /* Fill table */ + LZ4_putPosition(ip-2, cctx->hashTable, tableType, base); + + /* Test next position */ + match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); + if (dict==usingExtDict) { + if (match < (const BYTE*)source) { + refDelta = dictDelta; + lowLimit = dictionary; + } else { + refDelta = 0; + lowLimit = (const BYTE*)source; + } } + LZ4_putPosition(ip, cctx->hashTable, tableType, base); + if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1) + && (match+MAX_DISTANCE>=ip) + && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) ) + { token=op++; *token=0; goto _next_match; } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + } + +_last_literals: + /* Encode Last Literals */ + { size_t const lastRun = (size_t)(iend - anchor); + if ( (outputLimited) && /* Check output buffer overflow */ + ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) ) + return 0; + if (lastRun >= RUN_MASK) { + size_t accumulator = lastRun - RUN_MASK; + *op++ = RUN_MASK << ML_BITS; + for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRun<internal_donotuse; + LZ4_resetStream((LZ4_stream_t*)state); + if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; + + if (maxOutputSize >= LZ4_compressBound(inputSize)) { + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration); + else + return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration); + } else { + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); + else + return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration); + } +} + + +int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +{ +#if (LZ4_HEAPMODE) + void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ +#else + LZ4_stream_t ctx; + void* const ctxPtr = &ctx; +#endif + + int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); + +#if (LZ4_HEAPMODE) + FREEMEM(ctxPtr); +#endif + return result; +} + + +int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize) +{ + return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1); +} + + +/* hidden debug function */ +/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */ +int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +{ + LZ4_stream_t ctx; + LZ4_resetStream(&ctx); + + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); + else + return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration); +} + + +/*-****************************** +* *_destSize() variant +********************************/ + +static int LZ4_compress_destSize_generic( + LZ4_stream_t_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + const int targetDstSize, + const tableType_t tableType) +{ + const BYTE* ip = (const BYTE*) src; + const BYTE* base = (const BYTE*) src; + const BYTE* lowLimit = (const BYTE*) src; + const BYTE* anchor = ip; + const BYTE* const iend = ip + *srcSizePtr; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = iend - LASTLITERALS; + + BYTE* op = (BYTE*) dst; + BYTE* const oend = op + targetDstSize; + BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */; + BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */); + BYTE* const oMaxSeq = oMaxLit - 1 /* token */; + + U32 forwardH; + + + /* Init conditions */ + if (targetDstSize < 1) return 0; /* Impossible to store anything */ + if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */ + if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ + if (*srcSizePtrhashTable, tableType, base); + ip++; forwardH = LZ4_hashPosition(ip, tableType); + + /* Main Loop */ + for ( ; ; ) { + const BYTE* match; + BYTE* token; + + /* Find a match */ + { const BYTE* forwardIp = ip; + unsigned step = 1; + unsigned searchMatchNb = 1 << LZ4_skipTrigger; + + do { + U32 h = forwardH; + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimit)) goto _last_literals; + + match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base); + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base); + + } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) + || (LZ4_read32(match) != LZ4_read32(ip)) ); + } + + /* Catch up */ + while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } + + /* Encode Literal length */ + { unsigned litLength = (unsigned)(ip - anchor); + token = op++; + if (op + ((litLength+240)/255) + litLength > oMaxLit) { + /* Not enough space for a last match */ + op--; + goto _last_literals; + } + if (litLength>=RUN_MASK) { + unsigned len = litLength - RUN_MASK; + *token=(RUN_MASK<= 255 ; len-=255) *op++ = 255; + *op++ = (BYTE)len; + } + else *token = (BYTE)(litLength< oMaxMatch) { + /* Match description too long : reduce it */ + matchLength = (15-1) + (oMaxMatch-op) * 255; + } + ip += MINMATCH + matchLength; + + if (matchLength>=ML_MASK) { + *token += ML_MASK; + matchLength -= ML_MASK; + while (matchLength >= 255) { matchLength-=255; *op++ = 255; } + *op++ = (BYTE)matchLength; + } + else *token += (BYTE)(matchLength); + } + + anchor = ip; + + /* Test end of block */ + if (ip > mflimit) break; + if (op > oMaxSeq) break; + + /* Fill table */ + LZ4_putPosition(ip-2, ctx->hashTable, tableType, base); + + /* Test next position */ + match = LZ4_getPosition(ip, ctx->hashTable, tableType, base); + LZ4_putPosition(ip, ctx->hashTable, tableType, base); + if ( (match+MAX_DISTANCE>=ip) + && (LZ4_read32(match)==LZ4_read32(ip)) ) + { token=op++; *token=0; goto _next_match; } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + } + +_last_literals: + /* Encode Last Literals */ + { size_t lastRunSize = (size_t)(iend - anchor); + if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) { + /* adapt lastRunSize to fill 'dst' */ + lastRunSize = (oend-op) - 1; + lastRunSize -= (lastRunSize+240)/255; + } + ip = anchor + lastRunSize; + + if (lastRunSize >= RUN_MASK) { + size_t accumulator = lastRunSize - RUN_MASK; + *op++ = RUN_MASK << ML_BITS; + for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRunSize<= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ + return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); + } else { + if (*srcSizePtr < LZ4_64Klimit) + return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16); + else + return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr); + } +} + + +int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) +{ +#if (LZ4_HEAPMODE) + LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ +#else + LZ4_stream_t ctxBody; + LZ4_stream_t* ctx = &ctxBody; +#endif + + int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize); + +#if (LZ4_HEAPMODE) + FREEMEM(ctx); +#endif + return result; +} + + + +/*-****************************** +* Streaming functions +********************************/ + +LZ4_stream_t* LZ4_createStream(void) +{ + LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64); + LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */ + LZ4_resetStream(lz4s); + return lz4s; +} + +void LZ4_resetStream (LZ4_stream_t* LZ4_stream) +{ + MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); +} + +int LZ4_freeStream (LZ4_stream_t* LZ4_stream) +{ + if (!LZ4_stream) return 0; /* support free on NULL */ + FREEMEM(LZ4_stream); + return (0); +} + + +#define HASH_UNIT sizeof(reg_t) +int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +{ + LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse; + const BYTE* p = (const BYTE*)dictionary; + const BYTE* const dictEnd = p + dictSize; + const BYTE* base; + + if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */ + LZ4_resetStream(LZ4_dict); + + if (dictSize < (int)HASH_UNIT) { + dict->dictionary = NULL; + dict->dictSize = 0; + return 0; + } + + if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; + dict->currentOffset += 64 KB; + base = p - dict->currentOffset; + dict->dictionary = p; + dict->dictSize = (U32)(dictEnd - p); + dict->currentOffset += dict->dictSize; + + while (p <= dictEnd-HASH_UNIT) { + LZ4_putPosition(p, dict->hashTable, byU32, base); + p+=3; + } + + return dict->dictSize; +} + + +static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) +{ + if ((LZ4_dict->currentOffset > 0x80000000) || + ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */ + /* rescale hash table */ + U32 const delta = LZ4_dict->currentOffset - 64 KB; + const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; + int i; + for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0; + else LZ4_dict->hashTable[i] -= delta; + } + LZ4_dict->currentOffset = 64 KB; + if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; + LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; + } +} + + +int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +{ + LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse; + const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; + + const BYTE* smallest = (const BYTE*) source; + if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */ + if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd; + LZ4_renormDictT(streamPtr, smallest); + if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; + + /* Check overlapping input/dictionary space */ + { const BYTE* sourceEnd = (const BYTE*) source + inputSize; + if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) { + streamPtr->dictSize = (U32)(dictEnd - sourceEnd); + if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; + if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; + streamPtr->dictionary = dictEnd - streamPtr->dictSize; + } + } + + /* prefix mode : source data follows dictionary */ + if (dictEnd == (const BYTE*)source) { + int result; + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration); + else + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration); + streamPtr->dictSize += (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + return result; + } + + /* external dictionary mode */ + { int result; + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration); + else + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration); + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + return result; + } +} + + +/* Hidden debug function, to force external dictionary mode */ +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize) +{ + LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; + int result; + const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; + + const BYTE* smallest = dictEnd; + if (smallest > (const BYTE*) source) smallest = (const BYTE*) source; + LZ4_renormDictT(streamPtr, smallest); + + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); + + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + + return result; +} + + +/*! LZ4_saveDict() : + * If previously compressed data block is not guaranteed to remain available at its memory location, + * save it into a safer place (char* safeBuffer). + * Note : you don't need to call LZ4_loadDict() afterwards, + * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue(). + * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. + */ +int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) +{ + LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; + const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; + + if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */ + if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize; + + memmove(safeBuffer, previousDictEnd - dictSize, dictSize); + + dict->dictionary = (const BYTE*)safeBuffer; + dict->dictSize = (U32)dictSize; + + return dictSize; +} + + + +/*-***************************** +* Decompression functions +*******************************/ +/*! LZ4_decompress_generic() : + * This generic decompression function cover all use cases. + * It shall be instantiated several times, using different sets of directives + * Note that it is important this generic function is really inlined, + * in order to remove useless branches during compilation optimization. + */ +LZ4_FORCE_INLINE int LZ4_decompress_generic( + const char* const source, + char* const dest, + int inputSize, + int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */ + + int endOnInput, /* endOnOutputSize, endOnInputSize */ + int partialDecoding, /* full, partial */ + int targetOutputSize, /* only used if partialDecoding==partial */ + int dict, /* noDict, withPrefix64k, usingExtDict */ + const BYTE* const lowPrefix, /* == dest when no prefix */ + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note : = 0 if noDict */ + ) +{ + /* Local Variables */ + const BYTE* ip = (const BYTE*) source; + const BYTE* const iend = ip + inputSize; + + BYTE* op = (BYTE*) dest; + BYTE* const oend = op + outputSize; + BYTE* cpy; + BYTE* oexit = op + targetOutputSize; + const BYTE* const lowLimit = lowPrefix - dictSize; + + const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize; + const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; + const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; + + const int safeDecode = (endOnInput==endOnInputSize); + const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); + + + /* Special cases */ + if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */ + if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */ + if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1); + + /* Main Loop : decode sequences */ + while (1) { + size_t length; + const BYTE* match; + size_t offset; + + /* get literal length */ + unsigned const token = *ip++; + if ((length=(token>>ML_BITS)) == RUN_MASK) { + unsigned s; + do { + s = *ip++; + length += s; + } while ( likely(endOnInput ? ip(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) ) + || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) ) + { + if (partialDecoding) { + if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */ + if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */ + } else { + if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */ + if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */ + } + memcpy(op, ip, length); + ip += length; + op += length; + break; /* Necessarily EOF, due to parsing restrictions */ + } + LZ4_wildCopy(op, ip, cpy); + ip += length; op = cpy; + + /* get offset */ + offset = LZ4_readLE16(ip); ip+=2; + match = op - offset; + if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside buffers */ + LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */ + + /* get matchlength */ + length = token & ML_MASK; + if (length == ML_MASK) { + unsigned s; + do { + s = *ip++; + if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error; + length += s; + } while (s==255); + if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ + } + length += MINMATCH; + + /* check external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) { + if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */ + + if (length <= (size_t)(lowPrefix-match)) { + /* match can be copied as a single segment from external dictionary */ + memmove(op, dictEnd - (lowPrefix-match), length); + op += length; + } else { + /* match encompass external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix-match); + size_t const restSize = length - copySize; + memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op-lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) *op++ = *copyFrom++; + } else { + memcpy(op, lowPrefix, restSize); + op += restSize; + } } + continue; + } + + /* copy match within block */ + cpy = op + length; + if (unlikely(offset<8)) { + const int dec64 = dec64table[offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[offset]; + memcpy(op+4, match, 4); + match -= dec64; + } else { LZ4_copy8(op, match); match+=8; } + op += 8; + + if (unlikely(cpy>oend-12)) { + BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1); + if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ + if (op < oCopyLimit) { + LZ4_wildCopy(op, match, oCopyLimit); + match += oCopyLimit - op; + op = oCopyLimit; + } + while (op16) LZ4_wildCopy(op+8, match+8, cpy); + } + op=cpy; /* correction */ + } + + /* end of decoding */ + if (endOnInput) + return (int) (((char*)op)-dest); /* Nb of output bytes decoded */ + else + return (int) (((const char*)ip)-source); /* Nb of input bytes read */ + + /* Overflow error detected */ +_output_error: + return (int) (-(((const char*)ip)-source))-1; +} + + +int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0); +} + +int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0); +} + +int LZ4_decompress_fast(const char* source, char* dest, int originalSize) +{ + return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB); +} + + +/*===== streaming decompression functions =====*/ + +LZ4_streamDecode_t* LZ4_createStreamDecode(void) +{ + LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t)); + return lz4s; +} + +int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream) +{ + if (!LZ4_stream) return 0; /* support free on NULL */ + FREEMEM(LZ4_stream); + return 0; +} + +/*! + * LZ4_setStreamDecode() : + * Use this function to instruct where to find the dictionary. + * This function is not necessary if previous data is still available where it was decoded. + * Loading a size of 0 is allowed (same effect as no dictionary). + * Return : 1 if OK, 0 if error + */ +int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + lz4sd->prefixSize = (size_t) dictSize; + lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize; + lz4sd->externalDict = NULL; + lz4sd->extDictSize = 0; + return 1; +} + +/* +*_continue() : + These decoding functions allow decompression of multiple blocks in "streaming" mode. + Previously decoded blocks must still be available at the memory position where they were decoded. + If it's not possible, save the relevant part of decoded data into a safe buffer, + and indicate where it stands using LZ4_setStreamDecode() +*/ +int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + int result; + + if (lz4sd->prefixEnd == (BYTE*)dest) { + result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, + usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += result; + lz4sd->prefixEnd += result; + } else { + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, + usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } + + return result; +} + +int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + int result; + + if (lz4sd->prefixEnd == (BYTE*)dest) { + result = LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, full, 0, + usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += originalSize; + lz4sd->prefixEnd += originalSize; + } else { + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, full, 0, + usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } + + return result; +} + + +/* +Advanced decoding functions : +*_usingDict() : + These decoding functions work the same as "_continue" ones, + the dictionary must be explicitly provided within parameters +*/ + +LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize) +{ + if (dictSize==0) + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0); + if (dictStart+dictSize == dest) { + if (dictSize >= (int)(64 KB - 1)) + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0); + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0); + } + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) +{ + return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize); +} + +int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) +{ + return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize); +} + +/* debug function */ +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + + +/*=************************************************* +* Obsolete Functions +***************************************************/ +/* obsolete compression functions */ +int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); } +int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); } +int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); } +int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); } +int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); } +int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); } + +/* +These function names are deprecated and should no longer be used. +They are only provided here for compatibility with older user programs. +- LZ4_uncompress is totally equivalent to LZ4_decompress_fast +- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe +*/ +int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); } +int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); } + + +/* Obsolete Streaming functions */ + +int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; } + +static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base) +{ + MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t)); + lz4ds->internal_donotuse.bufferStart = base; +} + +int LZ4_resetStreamState(void* state, char* inputBuffer) +{ + if ((((uptrval)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */ + LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer); + return 0; +} + +void* LZ4_create (char* inputBuffer) +{ + LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t)); + LZ4_init (lz4ds, (BYTE*)inputBuffer); + return lz4ds; +} + +char* LZ4_slideInputBuffer (void* LZ4_Data) +{ + LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse; + int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB); + return (char*)(ctx->bufferStart + dictSize); +} + +/* Obsolete streaming decompression functions */ + +int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); +} + +int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) +{ + return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); +} + +#endif /* LZ4_COMMONDEFS_ONLY */ diff --git a/src/lib/lz4.h b/src/lib/lz4.h new file mode 100644 index 00000000..d284d630 --- /dev/null +++ b/src/lib/lz4.h @@ -0,0 +1,475 @@ +/* + * LZ4 - Fast LZ compression algorithm + * Header File + * Copyright (C) 2011-2017, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 +*/ +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef LZ4_H_2983827168210 +#define LZ4_H_2983827168210 + +/* --- Dependency --- */ +#include /* size_t */ + + +/** + Introduction + + LZ4 is lossless compression algorithm, providing compression speed at 400 MB/s per core, + scalable with multi-cores CPU. It features an extremely fast decoder, with speed in + multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. + + The LZ4 compression library provides in-memory compression and decompression functions. + Compression can be done in: + - a single step (described as Simple Functions) + - a single step, reusing a context (described in Advanced Functions) + - unbounded multiple steps (described as Streaming compression) + + lz4.h provides block compression functions. It gives full buffer control to user. + Decompressing an lz4-compressed block also requires metadata (such as compressed size). + Each application is free to encode such metadata in whichever way it wants. + + An additional format, called LZ4 frame specification (doc/lz4_Frame_format.md), + take care of encoding standard metadata alongside LZ4-compressed blocks. + If your application requires interoperability, it's recommended to use it. + A library is provided to take care of it, see lz4frame.h. +*/ + +/*^*************************************************************** +* Export parameters +*****************************************************************/ +/* +* LZ4_DLL_EXPORT : +* Enable exporting of functions when building a Windows DLL +* LZ4LIB_API : +* Control library symbols visibility. +*/ +#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) +# define LZ4LIB_API __declspec(dllexport) +#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) +# define LZ4LIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#elif defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4LIB_API __attribute__ ((__visibility__ ("default"))) +#else +# define LZ4LIB_API +#endif + + +/*------ Version ------*/ +#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ +#define LZ4_VERSION_MINOR 8 /* for new (non-breaking) interface capabilities */ +#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */ + +#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) + +#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE +#define LZ4_QUOTE(str) #str +#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) +#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) + +LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; to be used when checking dll version */ +LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; to be used when checking dll version */ + + +/*-************************************ +* Tuning parameter +**************************************/ +/*! + * LZ4_MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache + */ +#ifndef LZ4_MEMORY_USAGE +# define LZ4_MEMORY_USAGE 14 +#endif + +/*-************************************ +* Simple Functions +**************************************/ +/*! LZ4_compress_default() : + Compresses 'sourceSize' bytes from buffer 'source' + into already allocated 'dest' buffer of size 'maxDestSize'. + Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize). + It also runs faster, so it's a recommended setting. + If the function cannot compress 'source' into a more limited 'dest' budget, + compression stops *immediately*, and the function result is zero. + As a consequence, 'dest' content is not valid. + This function never writes outside 'dest' buffer, nor read outside 'source' buffer. + sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE + maxDestSize : full or partial size of buffer 'dest' (which must be already allocated) + return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize) + or 0 if compression fails */ +LZ4LIB_API int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize); + +/*! LZ4_decompress_safe() : + compressedSize : is the precise full size of the compressed block. + maxDecompressedSize : is the size of destination buffer, which must be already allocated. + return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize) + If destination buffer is not large enough, decoding will stop and output an error code (<0). + If the source stream is detected malformed, the function will stop decoding and return a negative result. + This function is protected against buffer overflow exploits, including malicious data packets. + It never writes outside output buffer, nor reads outside input buffer. +*/ +LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize); + + +/*-************************************ +* Advanced Functions +**************************************/ +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) + +/*! +LZ4_compressBound() : + Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) + This function is primarily useful for memory allocation purposes (destination buffer size). + Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). + Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize) + inputSize : max supported value is LZ4_MAX_INPUT_SIZE + return : maximum output size in a "worst case" scenario + or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE) +*/ +LZ4LIB_API int LZ4_compressBound(int inputSize); + +/*! +LZ4_compress_fast() : + Same as LZ4_compress_default(), but allows to select an "acceleration" factor. + The larger the acceleration value, the faster the algorithm, but also the lesser the compression. + It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. + An acceleration value of "1" is the same as regular LZ4_compress_default() + Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1. +*/ +LZ4LIB_API int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration); + + +/*! +LZ4_compress_fast_extState() : + Same compression function, just using an externally allocated memory space to store compression state. + Use LZ4_sizeofState() to know how much memory must be allocated, + and allocate it on 8-bytes boundaries (using malloc() typically). + Then, provide it as 'void* state' to compression function. +*/ +LZ4LIB_API int LZ4_sizeofState(void); +LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration); + + +/*! +LZ4_compress_destSize() : + Reverse the logic, by compressing as much data as possible from 'source' buffer + into already allocated buffer 'dest' of size 'targetDestSize'. + This function either compresses the entire 'source' content into 'dest' if it's large enough, + or fill 'dest' buffer completely with as much data as possible from 'source'. + *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'. + New value is necessarily <= old value. + return : Nb bytes written into 'dest' (necessarily <= targetDestSize) + or 0 if compression fails +*/ +LZ4LIB_API int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize); + + +/*! +LZ4_decompress_fast() : + originalSize : is the original and therefore uncompressed size + return : the number of bytes read from the source buffer (in other words, the compressed size) + If the source stream is detected malformed, the function will stop decoding and return a negative result. + Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes. + note : This function fully respect memory boundaries for properly formed compressed data. + It is a bit faster than LZ4_decompress_safe(). + However, it does not provide any protection against intentionally modified data stream (malicious input). + Use this function in trusted environment only (data to decode comes from a trusted source). +*/ +LZ4LIB_API int LZ4_decompress_fast (const char* source, char* dest, int originalSize); + +/*! +LZ4_decompress_safe_partial() : + This function decompress a compressed block of size 'compressedSize' at position 'source' + into destination buffer 'dest' of size 'maxDecompressedSize'. + The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached, + reducing decompression time. + return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize) + Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller. + Always control how many bytes were decoded. + If the source stream is detected malformed, the function will stop decoding and return a negative result. + This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets +*/ +LZ4LIB_API int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize); + + +/*-********************************************* +* Streaming Compression Functions +***********************************************/ +typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ + +/*! LZ4_createStream() and LZ4_freeStream() : + * LZ4_createStream() will allocate and initialize an `LZ4_stream_t` structure. + * LZ4_freeStream() releases its memory. + */ +LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); +LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); + +/*! LZ4_resetStream() : + * An LZ4_stream_t structure can be allocated once and re-used multiple times. + * Use this function to start compressing a new stream. + */ +LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); + +/*! LZ4_loadDict() : + * Use this function to load a static dictionary into LZ4_stream_t. + * Any previous data will be forgotten, only 'dictionary' will remain in memory. + * Loading a size of 0 is allowed, and is the same as reset. + * @return : dictionary size, in bytes (necessarily <= 64 KB) + */ +LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); + +/*! LZ4_compress_fast_continue() : + * Compress content into 'src' using data from previously compressed blocks, improving compression ratio. + * 'dst' buffer must be already allocated. + * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. + * + * Important : Up to 64KB of previously compressed data is assumed to remain present and unmodified in memory ! + * Special 1 : If input buffer is a double-buffer, it can have any size, including < 64 KB. + * Special 2 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. + * + * @return : size of compressed block + * or 0 if there is an error (typically, compressed data cannot fit into 'dst') + * After an error, the stream status is invalid, it can only be reset or freed. + */ +LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_saveDict() : + * If previously compressed data block is not guaranteed to remain available at its current memory location, + * save it into a safer place (char* safeBuffer). + * Note : it's not necessary to call LZ4_loadDict() after LZ4_saveDict(), dictionary is immediately usable. + * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. + */ +LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize); + + +/*-********************************************** +* Streaming Decompression Functions +* Bufferless synchronous API +************************************************/ +typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* incomplete type (defined later) */ + +/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : + * creation / destruction of streaming decompression tracking structure. + * A tracking structure can be re-used multiple times sequentially. */ +LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); +LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); + +/*! LZ4_setStreamDecode() : + * An LZ4_streamDecode_t structure can be allocated once and re-used multiple times. + * Use this function to start decompression of a new stream of blocks. + * A dictionary can optionnally be set. Use NULL or size 0 for a simple reset order. + * @return : 1 if OK, 0 if error + */ +LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); + +/*! LZ4_decompress_*_continue() : + * These decoding functions allow decompression of consecutive blocks in "streaming" mode. + * A block is an unsplittable entity, it must be presented entirely to a decompression function. + * Decompression functions only accept one block at a time. + * Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB). + * + * Special : if application sets a ring buffer for decompression, it must respect one of the following conditions : + * - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions) + * In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. It's the maximum size of any single block. + * In which case, encoding and decoding buffers do not need to be synchronized, + * and encoding ring buffer can have any size, including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be synchronized, + * and encoding ring buffer can have any size, including larger than decoding buffer. + * Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer, + * and indicate where it is saved using LZ4_setStreamDecode() before decompressing next block. +*/ +LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize); +LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize); + + +/*! LZ4_decompress_*_usingDict() : + * These decoding functions work the same as + * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() + * They are stand-alone, and don't need an LZ4_streamDecode_t structure. + */ +LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize); +LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize); + + +/*^********************************************** + * !!!!!! STATIC LINKING ONLY !!!!!! + ***********************************************/ +/*-************************************ + * Private definitions + ************************************** + * Do not use these definitions. + * They are exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. + * Using these definitions will expose code to API and/or ABI break in future versions of the library. + **************************************/ +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ + +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +#include + +typedef struct { + uint32_t hashTable[LZ4_HASH_SIZE_U32]; + uint32_t currentOffset; + uint32_t initCheck; + const uint8_t* dictionary; + uint8_t* bufferStart; /* obsolete, used for slideInputBuffer */ + uint32_t dictSize; +} LZ4_stream_t_internal; + +typedef struct { + const uint8_t* externalDict; + size_t extDictSize; + const uint8_t* prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +#else + +typedef struct { + unsigned int hashTable[LZ4_HASH_SIZE_U32]; + unsigned int currentOffset; + unsigned int initCheck; + const unsigned char* dictionary; + unsigned char* bufferStart; /* obsolete, used for slideInputBuffer */ + unsigned int dictSize; +} LZ4_stream_t_internal; + +typedef struct { + const unsigned char* externalDict; + size_t extDictSize; + const unsigned char* prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +#endif + +/*! + * LZ4_stream_t : + * information structure to track an LZ4 stream. + * init this structure before first use. + * note : only use in association with static linking ! + * this definition is not API/ABI safe, + * it may change in a future version ! + */ +#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4) +#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) +union LZ4_stream_u { + unsigned long long table[LZ4_STREAMSIZE_U64]; + LZ4_stream_t_internal internal_donotuse; +} ; /* previously typedef'd to LZ4_stream_t */ + + +/*! + * LZ4_streamDecode_t : + * information structure to track an LZ4 stream during decompression. + * init this structure using LZ4_setStreamDecode (or memset()) before first use + * note : only use in association with static linking ! + * this definition is not API/ABI safe, + * and may change in a future version ! + */ +#define LZ4_STREAMDECODESIZE_U64 4 +#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long)) +union LZ4_streamDecode_u { + unsigned long long table[LZ4_STREAMDECODESIZE_U64]; + LZ4_streamDecode_t_internal internal_donotuse; +} ; /* previously typedef'd to LZ4_streamDecode_t */ + + +/*-************************************ +* Obsolete Functions +**************************************/ + +/*! Deprecation warnings + Should deprecation warnings be a problem, + it is generally possible to disable them, + typically with -Wno-deprecated-declarations for gcc + or _CRT_SECURE_NO_WARNINGS in Visual. + Otherwise, it's also possible to define LZ4_DISABLE_DEPRECATE_WARNINGS */ +#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS +# define LZ4_DEPRECATED(message) /* disable deprecation warnings */ +#else +# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +# if defined(__clang__) /* clang doesn't handle mixed C++11 and CNU attributes */ +# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) +# elif defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define LZ4_DEPRECATED(message) [[deprecated(message)]] +# elif (LZ4_GCC_VERSION >= 405) +# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) +# elif (LZ4_GCC_VERSION >= 301) +# define LZ4_DEPRECATED(message) __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define LZ4_DEPRECATED(message) __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler") +# define LZ4_DEPRECATED(message) +# endif +#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ + +/* Obsolete compression functions */ +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress (const char* source, char* dest, int sourceSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +/* Obsolete decompression functions */ +LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast() instead") int LZ4_uncompress (const char* source, char* dest, int outputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe() instead") int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); + +/* Obsolete streaming functions; use new streaming interface whenever possible */ +LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") int LZ4_sizeofStreamState(void); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStream() instead") int LZ4_resetStreamState(void* state, char* inputBuffer); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_saveDict() instead") char* LZ4_slideInputBuffer (void* state); + +/* Obsolete streaming decoding functions */ +LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); + +#endif /* LZ4_H_2983827168210 */ + + +#if defined (__cplusplus) +} +#endif diff --git a/src/lib/lz4_encoder.h b/src/lib/lz4_encoder.h new file mode 100644 index 00000000..94ebda16 --- /dev/null +++ b/src/lib/lz4_encoder.h @@ -0,0 +1,258 @@ +/* + LZ4 Encoder - Part of LZ4 compression algorithm + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + - LZ4 source repository : http://code.google.com/p/lz4/ +*/ + +/* lz4_encoder.h must be included into lz4.c + The objective of this file is to create a single LZ4 compression function source + which will be instanciated multiple times with minor variations + depending on a set of #define. +*/ + + + +//**************************** +// Check required defines +//**************************** + +#ifndef FUNCTION_NAME +# error "FUNTION_NAME is not defined" +#endif + + +//**************************** +// Local definitions +//**************************** + +#ifdef COMPRESS_64K +# define HASHLOG (MEMORY_USAGE-1) +# define CURRENT_H_TYPE U16 +# define CURRENTBASE(base) const BYTE* const base = ip +#else +# define HASHLOG (MEMORY_USAGE-2) +# define CURRENT_H_TYPE HTYPE +# define CURRENTBASE(base) INITBASE(base) +#endif + +#define HASHTABLE_NBCELLS (1U<> ((MINMATCH*8)-HASHLOG)) +#define LZ4_HASHVALUE(p) LZ4_HASH(A32(p)) + + + +//**************************** +// Function code +//**************************** + +int FUNCTION_NAME( +#ifdef USE_HEAPMEMORY + void* ctx, +#endif + const char* source, + char* dest, + int inputSize +#ifdef LIMITED_OUTPUT + ,int maxOutputSize +#endif + ) +{ +#ifdef USE_HEAPMEMORY + CURRENT_H_TYPE* HashTable = (CURRENT_H_TYPE*)ctx; +#else + CURRENT_H_TYPE HashTable[HASHTABLE_NBCELLS] = {0}; +#endif + + const BYTE* ip = (BYTE*) source; + CURRENTBASE(base); + const BYTE* anchor = ip; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimit = iend - MFLIMIT; +#define matchlimit (iend - LASTLITERALS) + + BYTE* op = (BYTE*) dest; +#ifdef LIMITED_OUTPUT + BYTE* const oend = op + maxOutputSize; +#endif + + int length; + const int skipStrength = SKIPSTRENGTH; + U32 forwardH; + + + // Init + if (inputSizeLZ4_64KLIMIT) return 0; // Size too large (not within 64K limit) +#endif +#ifdef USE_HEAPMEMORY + memset((void*)HashTable, 0, HASHTABLESIZE); +#endif + + // First Byte + HashTable[LZ4_HASHVALUE(ip)] = (CURRENT_H_TYPE)(ip - base); + ip++; forwardH = LZ4_HASHVALUE(ip); + + // Main Loop + for ( ; ; ) + { + int findMatchAttempts = (1U << skipStrength) + 3; + const BYTE* forwardIp = ip; + const BYTE* ref; + BYTE* token; + + // Find a match + do { + U32 h = forwardH; + int step = findMatchAttempts++ >> skipStrength; + ip = forwardIp; + forwardIp = ip + step; + + if unlikely(forwardIp > mflimit) { goto _last_literals; } + + forwardH = LZ4_HASHVALUE(forwardIp); + ref = base + HashTable[h]; + HashTable[h] = (CURRENT_H_TYPE)(ip - base); + + } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); + + // Catch up + while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; } + + // Encode Literal length + length = (int)(ip - anchor); + token = op++; +#ifdef LIMITED_OUTPUT + if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) > oend) return 0; // Check output limit +#endif + if (length>=(int)RUN_MASK) + { + int len = length-RUN_MASK; + *token=(RUN_MASK<= 255 ; len-=255) *op++ = 255; + *op++ = (BYTE)len; + } + else *token = (BYTE)(length<>8) > oend) return 0; // Check output limit +#endif + if (length>=(int)ML_MASK) + { + *token += ML_MASK; + length -= ML_MASK; + for (; length > 509 ; length-=510) { *op++ = 255; *op++ = 255; } + if (length >= 255) { length-=255; *op++ = 255; } + *op++ = (BYTE)length; + } + else *token += (BYTE)length; + + // Test end of chunk + if (ip > mflimit) { anchor = ip; break; } + + // Fill table + HashTable[LZ4_HASHVALUE(ip-2)] = (CURRENT_H_TYPE)(ip - 2 - base); + + // Test next position + ref = base + HashTable[LZ4_HASHVALUE(ip)]; + HashTable[LZ4_HASHVALUE(ip)] = (CURRENT_H_TYPE)(ip - base); + if ((ref >= ip - MAX_DISTANCE) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; } + + // Prepare next loop + anchor = ip++; + forwardH = LZ4_HASHVALUE(ip); + } + +_last_literals: + // Encode Last Literals + { + int lastRun = (int)(iend - anchor); +#ifdef LIMITED_OUTPUT + if (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) return 0; // Check output limit +#endif + if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } + else *op++ = (BYTE)(lastRun<buf[0] = 0x67452301; + ctx->buf[1] = 0xefcdab89; + ctx->buf[2] = 0x98badcfe; + ctx->buf[3] = 0x10325476; + + ctx->bits[0] = 0; + ctx->bits[1] = 0; +} + +/* + * Update context to reflect the concatenation of another buffer full + * of bytes. + */ +void MD5Update(struct MD5Context *ctx, unsigned char *buf, unsigned len) +{ + uint32_t t; + + /* Update bitcount */ + + t = ctx->bits[0]; + if ((ctx->bits[0] = t + ((uint32_t) len << 3)) < t) + ctx->bits[1]++; /* Carry from low to high */ + ctx->bits[1] += len >> 29; + + t = (t >> 3) & 0x3f; /* Bytes already in shsInfo->data */ + + /* Handle any leading odd-sized chunks */ + + if (t) { + unsigned char *p = (unsigned char *) ctx->in + t; + + t = 64 - t; + if (len < t) { + memcpy(p, buf, len); + return; + } + memcpy(p, buf, t); + byteReverse(ctx->in, 16); + MD5Transform(ctx->buf, (uint32_t *) ctx->in); + buf += t; + len -= t; + } + /* Process data in 64-byte chunks */ + + while (len >= 64) { + memcpy(ctx->in, buf, 64); + byteReverse(ctx->in, 16); + MD5Transform(ctx->buf, (uint32_t *) ctx->in); + buf += 64; + len -= 64; + } + + /* Handle any remaining bytes of data. */ + + memcpy(ctx->in, buf, len); +} + +/* + * Final wrapup - pad to 64-byte boundary with the bit pattern + * 1 0* (64-bit count of bits processed, MSB-first) + */ +void MD5Final(unsigned char digest[16], struct MD5Context *ctx) +{ + unsigned count; + unsigned char *p; + + /* Compute number of bytes mod 64 */ + count = (ctx->bits[0] >> 3) & 0x3F; + + /* Set the first char of padding to 0x80. This is safe since there is + always at least one byte free */ + p = ctx->in + count; + *p++ = 0x80; + + /* Bytes of padding needed to make 64 bytes */ + count = 64 - 1 - count; + + /* Pad out to 56 mod 64 */ + if (count < 8) { + /* Two lots of padding: Pad the first block to 64 bytes */ + memset(p, 0, count); + byteReverse(ctx->in, 16); + MD5Transform(ctx->buf, (uint32_t *) ctx->in); + + /* Now fill the next block with 56 bytes */ + memset(ctx->in, 0, 56); + } else { + /* Pad block to 56 bytes */ + memset(p, 0, count - 8); + } + byteReverse(ctx->in, 14); + + /* Append length in bits and transform */ + ((uint32_t *) ctx->in)[14] = ctx->bits[0]; + ((uint32_t *) ctx->in)[15] = ctx->bits[1]; + + MD5Transform(ctx->buf, (uint32_t *) ctx->in); + byteReverse((unsigned char *) ctx->buf, 4); + memcpy(digest, ctx->buf, 16); + memset(ctx, 0, sizeof(struct MD5Context)); /* In case it's sensitive */ +} + + +/* The four core functions - F1 is optimized somewhat */ + +/* #define F1(x, y, z) (x & y | ~x & z) */ +#define F1(x, y, z) (z ^ (x & (y ^ z))) +#define F2(x, y, z) F1(z, x, y) +#define F3(x, y, z) (x ^ y ^ z) +#define F4(x, y, z) (y ^ (x | ~z)) + +/* This is the central step in the MD5 algorithm. */ +#define MD5STEP(f, w, x, y, z, data, s) \ + ( w += f(x, y, z) + data, w = w<>(32-s), w += x ) + +/* + * The core of the MD5 algorithm, this alters an existing MD5 hash to + * reflect the addition of 16 longwords of new data. MD5Update blocks + * the data and converts bytes into longwords for this routine. + */ +void MD5Transform(uint32_t buf[4], uint32_t in[16]) +{ + uint32_t a, b, c, d; + + a = buf[0]; + b = buf[1]; + c = buf[2]; + d = buf[3]; + + MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); + MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); + MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); + MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); + MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); + MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); + MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); + MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); + MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); + MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); + MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); + MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); + MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); + MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); + MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); + MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); + + MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); + MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); + MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); + MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); + MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); + MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); + MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); + MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); + MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); + MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); + MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); + MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); + MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); + MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); + MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); + MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); + + MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); + MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); + MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); + MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); + MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); + MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); + MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); + MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); + MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); + MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); + MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); + MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); + MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); + MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); + MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); + MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); + + MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); + MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); + MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); + MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); + MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); + MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); + MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); + MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); + MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); + MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); + MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); + MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); + MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); + MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); + MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); + MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); + + buf[0] += a; + buf[1] += b; + buf[2] += c; + buf[3] += d; +} + +#ifdef MD5_SUM +#define OUTPUT_BASE64 1 + +static void usage() +{ + fprintf(stderr, +"\n" +"Usage: md5sum [-d decode] \n" +" -d decode the data file\n" +" -? print this message.\n" +"\n\n"); + + exit(1); +} + +static bool decode = false; + +/* + * Reads a single ASCII file and prints the HEX md5 sum. + */ +#include +int main(int argc, char *argv[]) +{ + FILE *fd; + MD5Context ctx; + char buf[5000]; + char signature[20]; + int ch; + + while ((ch = getopt(argc, argv, "d?")) != -1) { + switch (ch) { + case 'd': + decode = true; + break; + case '?': + default: + usage(); + } + } + + argc -= optind; + argv += optind; + + if (argc < 1) { + printf("Must have filename\n"); + exit(1); + } + + fd = fopen(argv[0], "rb"); + if (!fd) { + printf("Could not open %s: ERR=%s\n", argv[0], strerror(errno)); + exit(1); + } + if (decode) { + goto decode_it; + } + MD5Init(&ctx); + while (fgets(buf, sizeof(buf), fd)) { + MD5Update(&ctx, (unsigned char *)buf, strlen(buf)); + } + MD5Final((unsigned char *)signature, &ctx); + for (int i=0; i < 16; i++) { + printf("%02x", signature[i]& 0xFF); + } +#ifdef OUTPUT_BASE64 + char MD5buf[40]; /* 24 should do */ + memset(MD5buf, 0, 40); + bin_to_base64(MD5buf, sizeof(MD5buf), (char *)signature, 16, true); /* encode 16 bytes */ + printf(" %s", MD5buf); +#endif + printf(" %s\n", argv[0]); + exit(0); + +decode_it: + while (fgets(buf, sizeof(buf), fd)) { + char bin[40]; + unsigned char *p = (unsigned char *)buf; + unsigned char ch; + int val; + for (int i=0; i < 16; i++) { + if (*p <= '9') { + val = *p - '0'; + } else { + val = *p - 'a' + 10; + } + ch = val << 4; + p++; + if (*p <= '9') { + val = *p - '0'; + } else { + val = *p - 'a' + 10; + } + signature[i] = ch + val; + p++; + } + signature[16] = 0; + printf("%s", buf); + bin_to_base64(bin, sizeof(bin), (char *)signature, 16, true); + printf("%s\n", bin); + } + fclose(fd); +} +#endif diff --git a/src/lib/md5.h b/src/lib/md5.h new file mode 100644 index 00000000..f1f5a8e1 --- /dev/null +++ b/src/lib/md5.h @@ -0,0 +1,43 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula MD5 definitions + * + * Kern Sibbald, 2001 + */ + +#ifndef __BMD5_H +#define __BMD5_H + +#define MD5HashSize 16 + +struct MD5Context { + uint32_t buf[4]; + uint32_t bits[2]; + uint8_t in[64]; +}; + +typedef struct MD5Context MD5Context; + +extern void MD5Init(struct MD5Context *ctx); +extern void MD5Update(struct MD5Context *ctx, unsigned char *buf, unsigned len); +extern void MD5Final(unsigned char digest[16], struct MD5Context *ctx); +extern void MD5Transform(uint32_t buf[4], uint32_t in[16]); + +#endif /* !__BMD5_H */ diff --git a/src/lib/mem_pool.c b/src/lib/mem_pool.c new file mode 100644 index 00000000..feedc2d6 --- /dev/null +++ b/src/lib/mem_pool.c @@ -0,0 +1,668 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula memory pool routines. + * + * The idea behind these routines is that there will be + * pools of memory that are pre-allocated for quick + * access. The pools will have a fixed memory size on allocation + * but if need be, the size can be increased. This is + * particularly useful for filename + * buffers where 256 bytes should be sufficient in 99.99% + * of the cases, but when it isn't we want to be able to + * increase the size. + * + * A major advantage of the pool memory aside from the speed + * is that the buffer carrys around its size, so to ensure that + * there is enough memory, simply call the check_pool_memory_size() + * with the desired size and it will adjust only if necessary. + * + * Kern E. Sibbald + * + */ + +#include "bacula.h" +#define dbglvl DT_MEMORY|800 + +#ifdef HAVE_MALLOC_TRIM +extern "C" int malloc_trim (size_t pad); +#endif + +struct s_pool_ctl { + int32_t size; /* default size */ + int32_t max_allocated; /* max allocated */ + int32_t max_used; /* max buffers used */ + int32_t in_use; /* number in use */ + struct abufhead *free_buf; /* pointer to free buffers */ +}; + +/* Bacula Name length plus extra */ +#define NLEN (MAX_NAME_LENGTH+2) + +/* #define STRESS_TEST_POOL */ +#ifndef STRESS_TEST_POOL +/* + * Define default Pool buffer sizes + */ +static struct s_pool_ctl pool_ctl[] = { + { 256, 256, 0, 0, NULL }, /* PM_NOPOOL no pooling */ + { NLEN, NLEN,0, 0, NULL }, /* PM_NAME Bacula name */ + { 256, 256, 0, 0, NULL }, /* PM_FNAME filename buffers */ + { 512, 512, 0, 0, NULL }, /* PM_MESSAGE message buffer */ + { 1024, 1024, 0, 0, NULL }, /* PM_EMSG error message buffer */ + { 4096, 4096, 0, 0, NULL } /* PM_BSOCK message buffer */ +}; +#else + +/* This is used ONLY when stress testing the code */ +static struct s_pool_ctl pool_ctl[] = { + { 20, 20, 0, 0, NULL }, /* PM_NOPOOL no pooling */ + { NLEN, NLEN,0, 0, NULL }, /* PM_NAME Bacula name */ + { 20, 20, 0, 0, NULL }, /* PM_FNAME filename buffers */ + { 20, 20, 0, 0, NULL }, /* PM_MESSAGE message buffer */ + { 20, 20, 0, 0, NULL }, /* PM_EMSG error message buffer */ + { 20, 20, 0, 0, NULL } /* PM_BSOCK message buffer */ +}; +#endif + + +/* Memory allocation control structures and storage. */ +struct abufhead { + int32_t ablen; /* Buffer length in bytes */ + int32_t pool; /* pool */ + struct abufhead *next; /* pointer to next free buffer */ + int32_t bnet_size; /* dummy for bnet_send() */ + int32_t bnet_extension; /* dummy for bnet extension */ +}; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +#define HEAD_SIZE BALIGN(sizeof(struct abufhead)) + +#ifdef SMARTALLOC + +POOLMEM *sm_get_pool_memory(const char *fname, int lineno, int pool) +{ + struct abufhead *buf; + + if (pool > PM_MAX) { + Emsg2(M_ABORT, 0, _("MemPool index %d larger than max %d\n"), pool, PM_MAX); + } + P(mutex); + if (pool_ctl[pool].free_buf) { + buf = pool_ctl[pool].free_buf; + pool_ctl[pool].free_buf = buf->next; + pool_ctl[pool].in_use++; + if (pool_ctl[pool].in_use > pool_ctl[pool].max_used) { + pool_ctl[pool].max_used = pool_ctl[pool].in_use; + } + V(mutex); + Dmsg3(dbglvl, "sm_get_pool_memory reuse %p to %s:%d\n", buf, fname, lineno); + sm_new_owner(fname, lineno, (char *)buf); + return (POOLMEM *)((char *)buf+HEAD_SIZE); + } + + if ((buf = (struct abufhead *)sm_malloc(fname, lineno, pool_ctl[pool].size+HEAD_SIZE)) == NULL) { + V(mutex); + Emsg1(M_ABORT, 0, _("Out of memory requesting %d bytes\n"), pool_ctl[pool].size); + } + buf->ablen = pool_ctl[pool].size; + buf->pool = pool; + pool_ctl[pool].in_use++; + if (pool_ctl[pool].in_use > pool_ctl[pool].max_used) { + pool_ctl[pool].max_used = pool_ctl[pool].in_use; + } + V(mutex); + Dmsg3(dbglvl, "sm_get_pool_memory give %p to %s:%d\n", buf, fname, lineno); + return (POOLMEM *)((char *)buf+HEAD_SIZE); +} + +/* Get nonpool memory of size requested */ +POOLMEM *sm_get_memory(const char *fname, int lineno, int32_t size) +{ + struct abufhead *buf; + int pool = 0; + + if ((buf = (struct abufhead *)sm_malloc(fname, lineno, size+HEAD_SIZE)) == NULL) { + Emsg1(M_ABORT, 0, _("Out of memory requesting %d bytes\n"), size); + } + buf->ablen = size; + buf->pool = pool; + buf->next = NULL; + P(mutex); + pool_ctl[pool].in_use++; + if (pool_ctl[pool].in_use > pool_ctl[pool].max_used) + pool_ctl[pool].max_used = pool_ctl[pool].in_use; + V(mutex); + return (POOLMEM *)(((char *)buf)+HEAD_SIZE); +} + +/* Return the size of a memory buffer */ +int32_t sm_sizeof_pool_memory(const char *fname, int lineno, POOLMEM *obuf) +{ + char *cp = (char *)obuf; + + if (obuf == NULL) { + Emsg0(M_ABORT, 0, _("obuf is NULL\n")); + } + cp -= HEAD_SIZE; + return ((struct abufhead *)cp)->ablen; +} + +/* Realloc pool memory buffer */ +POOLMEM *sm_realloc_pool_memory(const char *fname, int lineno, POOLMEM *obuf, int32_t size) +{ + char *cp = (char *)obuf; + void *buf; + int pool; + + ASSERT(obuf); + P(mutex); + cp -= HEAD_SIZE; + buf = sm_realloc(fname, lineno, cp, size+HEAD_SIZE); + if (buf == NULL) { + V(mutex); + Emsg1(M_ABORT, 0, _("Out of memory requesting %d bytes\n"), size); + } + ((struct abufhead *)buf)->ablen = size; + pool = ((struct abufhead *)buf)->pool; + if (size > pool_ctl[pool].max_allocated) { + pool_ctl[pool].max_allocated = size; + } + V(mutex); + return (POOLMEM *)(((char *)buf)+HEAD_SIZE); +} + +POOLMEM *sm_check_pool_memory_size(const char *fname, int lineno, POOLMEM *obuf, int32_t size) +{ + ASSERT(obuf); + if (size <= sizeof_pool_memory(obuf)) { + return obuf; + } + return realloc_pool_memory(obuf, size); +} + +/* Free a memory buffer */ +void sm_free_pool_memory(const char *fname, int lineno, POOLMEM *obuf) +{ + struct abufhead *buf; + int pool; + + ASSERT(obuf); + P(mutex); + buf = (struct abufhead *)((char *)obuf - HEAD_SIZE); + pool = buf->pool; + pool_ctl[pool].in_use--; + if (pool == 0) { + free((char *)buf); /* free nonpooled memory */ + } else { /* otherwise link it to the free pool chain */ + + /* Disabled because it hangs in #5507 */ +#ifdef xDEBUG + struct abufhead *next; + /* Don't let him free the same buffer twice */ + for (next=pool_ctl[pool].free_buf; next; next=next->next) { + if (next == buf) { + Dmsg4(dbglvl, "free_pool_memory %p pool=%d from %s:%d\n", buf, pool, fname, lineno); + Dmsg4(dbglvl, "bad free_pool_memory %p pool=%d from %s:%d\n", buf, pool, fname, lineno); + V(mutex); /* unblock the pool */ + ASSERT(next != buf); /* attempt to free twice */ + } + } +#endif + buf->next = pool_ctl[pool].free_buf; + pool_ctl[pool].free_buf = buf; + } + Dmsg4(dbglvl, "free_pool_memory %p pool=%d from %s:%d\n", buf, pool, fname, lineno); + V(mutex); +} + +#else + +/* ========= NO SMARTALLOC ========================================= */ + +POOLMEM *get_pool_memory(int pool) +{ + struct abufhead *buf; + + P(mutex); + if (pool_ctl[pool].free_buf) { + buf = pool_ctl[pool].free_buf; + pool_ctl[pool].free_buf = buf->next; + V(mutex); + return (POOLMEM *)((char *)buf+HEAD_SIZE); + } + + if ((buf=(struct abufhead*)malloc(pool_ctl[pool].size+HEAD_SIZE)) == NULL) { + V(mutex); + Emsg1(M_ABORT, 0, _("Out of memory requesting %d bytes\n"), pool_ctl[pool].size); + } + buf->ablen = pool_ctl[pool].size; + buf->pool = pool; + buf->next = NULL; + pool_ctl[pool].in_use++; + if (pool_ctl[pool].in_use > pool_ctl[pool].max_used) { + pool_ctl[pool].max_used = pool_ctl[pool].in_use; + } + V(mutex); + return (POOLMEM *)(((char *)buf)+HEAD_SIZE); +} + +/* Get nonpool memory of size requested */ +POOLMEM *get_memory(int32_t size) +{ + struct abufhead *buf; + int pool = 0; + + if ((buf=(struct abufhead *)malloc(size+HEAD_SIZE)) == NULL) { + Emsg1(M_ABORT, 0, _("Out of memory requesting %d bytes\n"), size); + } + buf->ablen = size; + buf->pool = pool; + buf->next = NULL; + pool_ctl[pool].in_use++; + if (pool_ctl[pool].in_use > pool_ctl[pool].max_used) { + pool_ctl[pool].max_used = pool_ctl[pool].in_use; + } + return (POOLMEM *)(((char *)buf)+HEAD_SIZE); +} + +/* Return the size of a memory buffer */ +int32_t sizeof_pool_memory(POOLMEM *obuf) +{ + char *cp = (char *)obuf; + + ASSERT(obuf); + cp -= HEAD_SIZE; + return ((struct abufhead *)cp)->ablen; +} + +/* Realloc pool memory buffer */ +POOLMEM *realloc_pool_memory(POOLMEM *obuf, int32_t size) +{ + char *cp = (char *)obuf; + void *buf; + int pool; + + ASSERT(obuf); + P(mutex); + cp -= HEAD_SIZE; + buf = realloc(cp, size+HEAD_SIZE); + if (buf == NULL) { + V(mutex); + Emsg1(M_ABORT, 0, _("Out of memory requesting %d bytes\n"), size); + } + ((struct abufhead *)buf)->ablen = size; + pool = ((struct abufhead *)buf)->pool; + if (size > pool_ctl[pool].max_allocated) { + pool_ctl[pool].max_allocated = size; + } + V(mutex); + return (POOLMEM *)(((char *)buf)+HEAD_SIZE); +} + +POOLMEM *check_pool_memory_size(POOLMEM *obuf, int32_t size) +{ + ASSERT(obuf); + if (size <= sizeof_pool_memory(obuf)) { + return obuf; + } + return realloc_pool_memory(obuf, size); +} + +/* Free a memory buffer */ +void free_pool_memory(POOLMEM *obuf) +{ + struct abufhead *buf; + int pool; + + ASSERT(obuf); + P(mutex); + buf = (struct abufhead *)((char *)obuf - HEAD_SIZE); + pool = buf->pool; + pool_ctl[pool].in_use--; + if (pool == 0) { + free((char *)buf); /* free nonpooled memory */ + } else { /* otherwise link it to the free pool chain */ +#ifdef DEBUG + struct abufhead *next; + /* Don't let him free the same buffer twice */ + for (next=pool_ctl[pool].free_buf; next; next=next->next) { + if (next == buf) { + V(mutex); + ASSERT(next != buf); /* attempt to free twice */ + } + } +#endif + buf->next = pool_ctl[pool].free_buf; + pool_ctl[pool].free_buf = buf; + } + Dmsg2(dbglvl, "free_pool_memory %p pool=%d\n", buf, pool); + V(mutex); +} +#endif /* SMARTALLOC */ + +/* + * Clean up memory pool periodically + * + */ +static time_t last_garbage_collection = 0; +const int garbage_interval = 24 * 60 * 60; /* garbage collect every 24 hours */ + +void garbage_collect_memory_pool() +{ + time_t now; + + Dmsg0(200, "garbage collect memory pool\n"); + P(mutex); + if (last_garbage_collection == 0) { + last_garbage_collection = time(NULL); + V(mutex); + return; + } + now = time(NULL); + if (now >= last_garbage_collection + garbage_interval || + sm_bytes > 500000) { + last_garbage_collection = now; + V(mutex); + garbage_collect_memory(); + } else { + V(mutex); + } +} + +/* Release all freed pooled memory */ +void close_memory_pool() +{ + struct abufhead *buf, *next; + int count = 0; + uint64_t bytes = 0; + char ed1[50]; + + sm_check(__FILE__, __LINE__, false); + P(mutex); + for (int i=1; i<=PM_MAX; i++) { + buf = pool_ctl[i].free_buf; + while (buf) { + next = buf->next; + count++; + bytes += sizeof_pool_memory((char *)buf); + free((char *)buf); + buf = next; + } + pool_ctl[i].free_buf = NULL; + } + Dmsg2(DT_MEMORY|001, "Freed mem_pool count=%d size=%s\n", count, edit_uint64_with_commas(bytes, ed1)); + if (chk_dbglvl(DT_MEMORY|1)) { + print_memory_pool_stats(); + } + V(mutex); + +} + +/* + * Garbage collect and trim memory if possible + * This should be called after all big memory usages + * if possible. + */ +void garbage_collect_memory() +{ + close_memory_pool(); /* release free chain */ +#ifdef HAVE_MALLOC_TRIM + P(mutex); + malloc_trim(8192); + V(mutex); +#endif +} + +#ifdef DEBUG +static const char *pool_name(int pool) +{ + static const char *name[] = {"NoPool", "NAME ", "FNAME ", "MSG ", "EMSG ", "BSOCK "}; + static char buf[30]; + + if (pool >= 0 && pool <= PM_MAX) { + return name[pool]; + } + sprintf(buf, "%-6d", pool); + return buf; +} + +/* Print staticstics on memory pool usage + */ +void print_memory_pool_stats() +{ + Pmsg0(-1, "Pool Maxsize Maxused Inuse\n"); + for (int i=0; i<=PM_MAX; i++) + Pmsg4(-1, "%5s %7d %7d %5d\n", pool_name(i), pool_ctl[i].max_allocated, + pool_ctl[i].max_used, pool_ctl[i].in_use); + + Pmsg0(-1, "\n"); +} + +#else +void print_memory_pool_stats() {} +#endif /* DEBUG */ + +/* + * Concatenate a string (str) onto a pool memory buffer pm + * Returns: length of concatenated string + */ +int pm_strcat(POOLMEM **pm, const char *str) +{ + int pmlen = strlen(*pm); + int len; + + if (!str) str = ""; + + len = strlen(str) + 1; + *pm = check_pool_memory_size(*pm, pmlen + len); + memcpy(*pm+pmlen, str, len); + return pmlen + len - 1; +} + +int pm_strcat(POOLMEM *&pm, const char *str) +{ + int pmlen = strlen(pm); + int len; + + if (!str) str = ""; + + len = strlen(str) + 1; + pm = check_pool_memory_size(pm, pmlen + len); + memcpy(pm+pmlen, str, len); + return pmlen + len - 1; +} + +int pm_strcat(POOLMEM *&pm, POOL_MEM &str) +{ + int pmlen = strlen(pm); + int len = strlen(str.c_str()) + 1; + + pm = check_pool_memory_size(pm, pmlen + len); + memcpy(pm+pmlen, str.c_str(), len); + return pmlen + len - 1; +} + +int pm_strcat(POOL_MEM &pm, const char *str) +{ + int pmlen = strlen(pm.c_str()); + int len; + + if (!str) str = ""; + + len = strlen(str) + 1; + pm.check_size(pmlen + len); + memcpy(pm.c_str()+pmlen, str, len); + return pmlen + len - 1; +} + +int pm_strcat(POOL_MEM &pm, POOL_MEM &str) +{ + int pmlen = strlen(pm.c_str()); + int len; + + len = strlen(str.c_str()) + 1; + pm.check_size(pmlen + len); + memcpy(pm.c_str()+pmlen, str.c_str(), len); + return pmlen + len - 1; +} + +/* + * Copy a string (str) into a pool memory buffer pm + * Returns: length of string copied + */ +int pm_strcpy(POOLMEM **pm, const char *str) +{ + int len; + + if (!str) str = ""; + + len = strlen(str) + 1; + *pm = check_pool_memory_size(*pm, len); + memcpy(*pm, str, len); + return len - 1; +} + +int pm_strcpy(POOLMEM *&pm, const char *str) +{ + int len; + + if (!str) str = ""; + + len = strlen(str) + 1; + pm = check_pool_memory_size(pm, len); + memcpy(pm, str, len); + return len - 1; +} + +int pm_strcpy(POOLMEM *&pm, POOL_MEM &str) +{ + int len = strlen(str.c_str()) + 1; + + pm = check_pool_memory_size(pm, len); + memcpy(pm, str.c_str(), len); + return len - 1; +} + +int pm_strcpy(POOL_MEM &pm, const char *str) +{ + int len; + + if (!str) str = ""; + + len = strlen(str) + 1; + pm.check_size(len); + memcpy(pm.c_str(), str, len); + return len - 1; +} + +/* + * Copy data into a pool memory buffer pm + * Returns: length of data copied + */ +int pm_memcpy(POOLMEM **pm, const char *data, int32_t n) +{ + *pm = check_pool_memory_size(*pm, n); + memcpy(*pm, data, n); + return n; +} + +int pm_memcpy(POOLMEM *&pm, const char *data, int32_t n) +{ + pm = check_pool_memory_size(pm, n); + memcpy(pm, data, n); + return n; +} + +int pm_memcpy(POOLMEM *&pm, POOL_MEM &data, int32_t n) +{ + pm = check_pool_memory_size(pm, n); + memcpy(pm, data.c_str(), n); + return n; +} + +int pm_memcpy(POOL_MEM &pm, const char *data, int32_t n) +{ + pm.check_size(n); + memcpy(pm.c_str(), data, n); + return n; +} + +/* ============== CLASS POOL_MEM ============== */ + +/* Return the size of a memory buffer */ +int32_t POOL_MEM::max_size() +{ + int32_t size; + char *cp = mem; + cp -= HEAD_SIZE; + size = ((struct abufhead *)cp)->ablen; + Dmsg1(900, "max_size=%d\n", size); + return size; +} + +void POOL_MEM::realloc_pm(int32_t size) +{ + char *cp = mem; + char *buf; + int pool; + + P(mutex); + cp -= HEAD_SIZE; + buf = (char *)realloc(cp, size+HEAD_SIZE); + if (buf == NULL) { + V(mutex); + Emsg1(M_ABORT, 0, _("Out of memory requesting %d bytes\n"), size); + } + Dmsg2(900, "Old buf=%p new buf=%p\n", cp, buf); + ((struct abufhead *)buf)->ablen = size; + pool = ((struct abufhead *)buf)->pool; + if (size > pool_ctl[pool].max_allocated) { + pool_ctl[pool].max_allocated = size; + } + mem = buf+HEAD_SIZE; + V(mutex); + Dmsg3(900, "Old buf=%p new buf=%p mem=%p\n", cp, buf, mem); +} + +int POOL_MEM::strcat(const char *str) +{ + int pmlen = strlen(mem); + int len; + + if (!str) str = ""; + + len = strlen(str) + 1; + check_size(pmlen + len); + memcpy(mem+pmlen, str, len); + return pmlen + len - 1; +} + +int POOL_MEM::strcpy(const char *str) +{ + int len; + + if (!str) str = ""; + + len = strlen(str) + 1; + check_size(len); + memcpy(mem, str, len); + return len - 1; +} diff --git a/src/lib/mem_pool.h b/src/lib/mem_pool.h new file mode 100644 index 00000000..95172cb8 --- /dev/null +++ b/src/lib/mem_pool.h @@ -0,0 +1,116 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Memory Pool prototypes + * + * Kern Sibbald, MM + * + */ + +#ifndef __MEM_POOL_H_ +#define __MEM_POOL_H_ + + +#ifdef SMARTALLOC + +#define get_pool_memory(pool) sm_get_pool_memory(__FILE__, __LINE__, pool) +extern POOLMEM *sm_get_pool_memory(const char *file, int line, int pool); + +#define get_memory(size) sm_get_memory(__FILE__, __LINE__, size) +extern POOLMEM *sm_get_memory(const char *fname, int line, int32_t size); + +#define sizeof_pool_memory(buf) sm_sizeof_pool_memory(__FILE__, __LINE__, buf) +extern int32_t sm_sizeof_pool_memory(const char *fname, int line, POOLMEM *buf); + +#define realloc_pool_memory(buf,size) sm_realloc_pool_memory(__FILE__, __LINE__, buf, size) +extern POOLMEM *sm_realloc_pool_memory(const char *fname, int line, POOLMEM *buf, int32_t size); + +#define check_pool_memory_size(buf,size) sm_check_pool_memory_size(__FILE__, __LINE__, buf, size) +extern POOLMEM *sm_check_pool_memory_size(const char *fname, int line, POOLMEM *buf, int32_t size); + +#define free_pool_memory(x) sm_free_pool_memory(__FILE__, __LINE__, x) +#define free_memory(x) sm_free_pool_memory(__FILE__, __LINE__, x) +extern void sm_free_pool_memory(const char *fname, int line, POOLMEM *buf); + +#else + +extern POOLMEM *get_pool_memory(int pool); +extern POOLMEM *get_memory(int32_t size); +extern int32_t sizeof_pool_memory(POOLMEM *buf); +extern POOLMEM *realloc_pool_memory(POOLMEM *buf, int32_t size); +extern POOLMEM *check_pool_memory_size(POOLMEM *buf, int32_t size); +#define free_memory(x) free_pool_memory(x) +extern void free_pool_memory(POOLMEM *buf); + +#endif + +/* Macro to simplify free/reset pointers */ +#define free_and_null_pool_memory(a) do{if(a){free_pool_memory(a); (a)=NULL;}} while(0) + +extern void garbage_collect_memory_pool(); +extern void close_memory_pool(); +extern void print_memory_pool_stats(); + +extern void garbage_collect_memory(); + + +#define PM_NOPOOL 0 /* nonpooled memory */ +#define PM_NAME 1 /* Bacula name */ +#define PM_FNAME 2 /* file name buffer */ +#define PM_MESSAGE 3 /* daemon message */ +#define PM_EMSG 4 /* error message */ +#define PM_BSOCK 5 /* BSOCK buffer */ +#define PM_MAX PM_BSOCK /* Number of types */ + +class POOL_MEM { + char *mem; +public: + POOL_MEM() { mem = get_pool_memory(PM_NAME); *mem = 0; } + POOL_MEM(int pool) { mem = get_pool_memory(pool); *mem = 0; } + ~POOL_MEM() { free_pool_memory(mem); mem = NULL; } + char *c_str() const { return mem; } + POOLMEM *&addr() { return mem; } + int size() const { return sizeof_pool_memory(mem); } + char *check_size(int32_t size) { + mem = check_pool_memory_size(mem, size); + return mem; + } + int32_t max_size(); + void realloc_pm(int32_t size); + int strcpy(const char *str); + int strcat(const char *str); +}; + +int pm_strcat(POOLMEM **pm, const char *str); +int pm_strcat(POOLMEM *&pm, const char *str); +int pm_strcat(POOL_MEM &pm, const char *str); +int pm_strcat(POOLMEM *&pm, POOL_MEM &str); +int pm_strcat(POOL_MEM &pm, POOL_MEM &str); + +int pm_strcpy(POOLMEM **pm, const char *str); +int pm_strcpy(POOLMEM *&pm, const char *str); +int pm_strcpy(POOL_MEM &pm, const char *str); +int pm_strcpy(POOLMEM *&pm, POOL_MEM &str); + +int pm_memcpy(POOLMEM **pm, const char *data, int32_t n); +int pm_memcpy(POOLMEM *&pm, const char *data, int32_t n); +int pm_memcpy(POOL_MEM &pm, const char *data, int32_t n); +int pm_memcpy(POOLMEM *&pm, POOL_MEM &data, int32_t n); + +#endif diff --git a/src/lib/message.c b/src/lib/message.c new file mode 100644 index 00000000..b8e78ed5 --- /dev/null +++ b/src/lib/message.c @@ -0,0 +1,2002 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula message handling routines + * + * NOTE: don't use any Jmsg or Qmsg calls within this file, + * except in q_msg or j_msg (setup routines), + * otherwise you may get into recursive calls if there are + * errors, and that can lead to looping or deadlocks. + * + * Kern Sibbald, April 2000 + * + */ + +#include "bacula.h" +#include "jcr.h" + +sql_query_call p_sql_query = NULL; +sql_escape_call p_sql_escape = NULL; + +#define FULL_LOCATION 1 /* set for file:line in Debug messages */ + +/* + * This is where we define "Globals" because all the + * daemons include this file. + */ +dlist *daemon_msg_queue = NULL; +pthread_mutex_t daemon_msg_queue_mutex = PTHREAD_MUTEX_INITIALIZER; +static bool dequeuing_daemon_msgs = false; +const char *working_directory = NULL; /* working directory path stored here */ +const char *assert_msg = NULL; /* ASSERT2 error message */ +const char *version = VERSION " (" BDATE ")"; +const char *dist_name = DISTNAME " " DISTVER; +char *exepath = (char *)NULL; +char *exename = (char *)NULL; +char db_engine_name[50] = {0}; /* Database engine name or type */ +char con_fname[500]; /* Console filename */ +char my_name[MAX_NAME_LENGTH] = {0}; /* daemon name is stored here */ +char host_name[50] = {0}; /* host machine name */ +char fail_time[30] = {0}; /* Time of failure */ +int verbose = 0; /* increase User messages */ +int64_t debug_level = 0; /* debug level */ +int64_t debug_level_tags = 0; /* debug tags */ +int32_t debug_flags = 0; /* debug flags */ +bool console_msg_pending = false; +utime_t daemon_start_time = 0; /* Daemon start time */ +FILE *con_fd = NULL; /* Console file descriptor */ +brwlock_t con_lock; /* Console lock structure */ +bool dbg_timestamp = false; /* print timestamp in debug output */ +bool dbg_thread = false; /* add thread_id to details */ +bool prt_kaboom = false; /* Print kaboom output */ +job_code_callback_t message_job_code_callback = NULL; /* Job code callback. Only used by director. */ + +/* Forward referenced functions */ + +/* Imported functions */ +void create_jcr_key(); + +/* Static storage */ + +/* Exclude spaces but require .mail at end */ +#define MAIL_REGEX "^[^ ]+\\.mail$" + +/* Allow only one thread to tweak d->fd at a time */ +static pthread_mutex_t fides_mutex = PTHREAD_MUTEX_INITIALIZER; +static MSGS *daemon_msgs; /* global messages */ +static void (*message_callback)(int type, char *msg) = NULL; +static FILE *trace_fd = NULL; +#if defined(HAVE_WIN32) +static bool trace = true; +#else +static bool trace = false; +#endif +static int hangup = 0; +static int blowup = 0; + +/* Constants */ +const char *host_os = HOST_OS; +const char *distname = DISTNAME; +const char *distver = DISTVER; + +/* + * Walk back in a string from end looking for a + * path separator. + * This routine is passed the start of the string and + * the end of the string, it returns either the beginning + * of the string or where it found a path separator. + */ +static const char *bstrrpath(const char *start, const char *end) +{ + while ( end > start ) { + end--; + if (IsPathSeparator(*end)) { + break; + } + } + return end; +} + +/* Some message class methods */ +void MSGS::lock() +{ + P(fides_mutex); +} + +void MSGS::unlock() +{ + V(fides_mutex); +} + +/* + * Wait for not in use variable to be clear + */ +void MSGS::wait_not_in_use() /* leaves fides_mutex set */ +{ + lock(); + while (m_in_use || m_closing) { + unlock(); + bmicrosleep(0, 200); /* wait */ + lock(); + } +} + +/* + * Handle message delivery errors + */ +static void delivery_error(const char *fmt,...) +{ + va_list arg_ptr; + int i, len, maxlen; + POOLMEM *pool_buf; + char dt[MAX_TIME_LENGTH]; + int dtlen; + + pool_buf = get_pool_memory(PM_EMSG); + + bstrftime_ny(dt, sizeof(dt), time(NULL)); + dtlen = strlen(dt); + dt[dtlen++] = ' '; + dt[dtlen] = 0; + + i = Mmsg(pool_buf, "%s Message delivery ERROR: ", dt); + + for (;;) { + maxlen = sizeof_pool_memory(pool_buf) - i - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(pool_buf+i, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + pool_buf = realloc_pool_memory(pool_buf, maxlen + i + maxlen/2); + continue; + } + break; + } + + fputs(pool_buf, stdout); /* print this here to INSURE that it is printed */ + fflush(stdout); + syslog(LOG_DAEMON|LOG_ERR, "%s", pool_buf); + free_memory(pool_buf); +} + +void set_debug_flags(char *options) +{ + for (char *p = options; *p ; p++) { + switch(*p) { + case '0': /* clear flags */ + debug_flags = 0; + break; + + case 'i': /* used by FD */ + case 'd': /* used by FD */ + break; + + case 't': + dbg_timestamp = true; + break; + + case 'T': + dbg_timestamp = false; + break; + + case 'h': + dbg_thread = true; + break; + + case 'H': + dbg_thread = false; + break; + + case 'c': + /* truncate the trace file */ + if (trace && trace_fd) { + ftruncate(fileno(trace_fd), 0); + } + break; + + case 'l': + /* Turn on/off add_events for P()/V() */ + debug_flags |= DEBUG_MUTEX_EVENT; + break; + + case 'p': + /* Display event stack during lockdump */ + debug_flags |= DEBUG_PRINT_EVENT; + break; + + default: + Dmsg1(000, "Unknown debug flag %c\n", *p); + } + } +} + +void register_message_callback(void msg_callback(int type, char *msg)) +{ + message_callback = msg_callback; +} + + +/* + * Set daemon name. Also, find canonical execution + * path. Note, exepath has spare room for tacking on + * the exename so that we can reconstruct the full name. + * + * Note, this routine can get called multiple times + * The second time is to put the name as found in the + * Resource record. On the second call, generally, + * argv is NULL to avoid doing the path code twice. + */ +void my_name_is(int argc, char *argv[], const char *name) +{ + char *l, *p; + char *cpath; + char *cargv0; + int len; + int path_max; + bool respath; + + if (gethostname(host_name, sizeof(host_name)) != 0) { + bstrncpy(host_name, "Hostname unknown", sizeof(host_name)); + } + bstrncpy(my_name, name, sizeof(my_name)); + + if (argc>0 && argv && argv[0]) { + /* use a dynamic PATH_MAX and allocate temporary variables */ + path_max = pathconf(argv[0], _PC_PATH_MAX); + if (path_max < 4096){ + path_max = 4096; + } + cpath = (char *)malloc(path_max); + cargv0 = (char *)malloc(path_max); + + respath = false; +#ifdef HAVE_REALPATH + /* make a canonical argv[0] */ + if (realpath(argv[0], cargv0) != NULL){ + respath = true; + } +#endif + if (!respath){ + /* no resolved_path available in cargv0, so populate it */ + strncpy(cargv0, argv[0], path_max); + } + /* strip trailing filename and save exepath */ + for (l=p=cargv0; *p; p++) { + if (IsPathSeparator(*p)) { + l = p; /* set pos of last path separator */ + } + } + if (IsPathSeparator(*l)) { + l++; + } else { + l = cargv0; +#if defined(HAVE_WIN32) + /* On Windows allow c: drive specification */ + if (l[1] == ':') { + l += 2; + } +#endif + } + len = strlen(l) + 1; + if (exename) { + free(exename); + } + exename = (char *)malloc(len); + strcpy(exename, l); + if (exepath) { + free(exepath); + } + /* separate exepath from exename */ + *l = 0; + exepath = bstrdup(cargv0); + if (strstr(exepath, PathSeparatorUp) != NULL || strstr(exepath, PathSeparatorCur) != NULL || !IsPathSeparator(exepath[0])) { + /* fallback to legacy code */ + if (getcwd(cpath, path_max)) { + free(exepath); + exepath = (char *)malloc(strlen(cpath) + 1 + len); + strcpy(exepath, cpath); + } + } + Dmsg2(500, "exepath=%s\nexename=%s\n", exepath, exename); + free(cpath); + free(cargv0); + } +} + +/* Set special ASSERT2 message where debugger can find it */ +void +set_assert_msg(const char *file, int line, const char *msg) +{ + char buf[2000]; + bsnprintf(buf, sizeof(buf), "ASSERT at %s:%d-%u ERR=%s", + get_basename(file), line, get_jobid_from_tsd(), msg); + assert_msg = bstrdup(buf); +} + +void set_db_engine_name(const char *name) +{ + bstrncpy(db_engine_name, name, sizeof(db_engine_name)-1); +} + +/* + * Initialize message handler for a daemon or a Job + * We make a copy of the MSGS resource passed, so it belows + * to the job or daemon and thus can be modified. + * + * NULL for jcr -> initialize global messages for daemon + * non-NULL -> initialize jcr using Message resource + */ +void +init_msg(JCR *jcr, MSGS *msg, job_code_callback_t job_code_callback) +{ + DEST *d, *dnew, *temp_chain = NULL; + int i; + + if (jcr == NULL && msg == NULL) { + init_last_jobs_list(); + /* Create a daemon key then set invalid jcr */ + /* Maybe we should give the daemon a jcr??? */ + create_jcr_key(); + set_jcr_in_tsd(INVALID_JCR); + } + + message_job_code_callback = job_code_callback; + +#if !defined(HAVE_WIN32) + /* + * Make sure we have fd's 0, 1, 2 open + * If we don't do this one of our sockets may open + * there and if we then use stdout, it could + * send total garbage to our socket. + * + */ + int fd; + fd = open("/dev/null", O_RDONLY, 0644); + if (fd > 2) { + close(fd); + } else { + for(i=1; fd + i <= 2; i++) { + dup2(fd, fd+i); + } + } + +#endif + /* + * If msg is NULL, initialize global chain for STDOUT and syslog + */ + if (msg == NULL) { + daemon_msgs = (MSGS *)malloc(sizeof(MSGS)); + memset(daemon_msgs, 0, sizeof(MSGS)); + for (i=1; i<=M_MAX; i++) { + add_msg_dest(daemon_msgs, MD_STDOUT, i, NULL, NULL); + } + Dmsg1(050, "Create daemon global message resource %p\n", daemon_msgs); + return; + } + + /* + * Walk down the message resource chain duplicating it + * for the current Job. + */ + for (d=msg->dest_chain; d; d=d->next) { + dnew = (DEST *)malloc(sizeof(DEST)); + memcpy(dnew, d, sizeof(DEST)); + dnew->next = temp_chain; + dnew->fd = NULL; + dnew->mail_filename = NULL; + if (d->mail_cmd) { + dnew->mail_cmd = bstrdup(d->mail_cmd); + } + if (d->where) { + dnew->where = bstrdup(d->where); + } + temp_chain = dnew; + } + + if (jcr) { + jcr->jcr_msgs = (MSGS *)malloc(sizeof(MSGS)); + memset(jcr->jcr_msgs, 0, sizeof(MSGS)); + jcr->jcr_msgs->dest_chain = temp_chain; + memcpy(jcr->jcr_msgs->send_msg, msg->send_msg, sizeof(msg->send_msg)); + } else { + /* If we have default values, release them now */ + if (daemon_msgs) { + free_msgs_res(daemon_msgs); + } + daemon_msgs = (MSGS *)malloc(sizeof(MSGS)); + memset(daemon_msgs, 0, sizeof(MSGS)); + daemon_msgs->dest_chain = temp_chain; + memcpy(daemon_msgs->send_msg, msg->send_msg, sizeof(msg->send_msg)); + } + + Dmsg2(250, "Copy message resource %p to %p\n", msg, temp_chain); +} + +/* Initialize so that the console (User Agent) can + * receive messages -- stored in a file. + */ +void init_console_msg(const char *wd) +{ + int fd; + + bsnprintf(con_fname, sizeof(con_fname), "%s%c%s.conmsg", wd, PathSeparator, my_name); + fd = open(con_fname, O_CREAT|O_RDWR|O_BINARY, 0600); + if (fd == -1) { + berrno be; + Emsg2(M_ERROR_TERM, 0, _("Could not open console message file %s: ERR=%s\n"), + con_fname, be.bstrerror()); + } + if (lseek(fd, 0, SEEK_END) > 0) { + console_msg_pending = true; + } + close(fd); + con_fd = bfopen(con_fname, "a+b"); + if (!con_fd) { + berrno be; + Emsg2(M_ERROR, 0, _("Could not open console message file %s: ERR=%s\n"), + con_fname, be.bstrerror()); + } + if (rwl_init(&con_lock) != 0) { + berrno be; + Emsg1(M_ERROR_TERM, 0, _("Could not get con mutex: ERR=%s\n"), + be.bstrerror()); + } +} + +/* + * Called only during parsing of the config file. + * + * Add a message destination. I.e. associate a message type with + * a destination (code). + * Note, where in the case of dest_code FILE is a filename, + * but in the case of MAIL is a space separated list of + * email addresses, ... + */ +void add_msg_dest(MSGS *msg, int dest_code, int msg_type, char *where, char *mail_cmd) +{ + DEST *d; + /* + * First search the existing chain and see if we + * can simply add this msg_type to an existing entry. + */ + for (d=msg->dest_chain; d; d=d->next) { + if (dest_code == d->dest_code && ((where == NULL && d->where == NULL) || + bstrcmp(where, d->where))) { + Dmsg4(850, "Add to existing d=%p msgtype=%d destcode=%d where=%s\n", + d, msg_type, dest_code, NPRT(where)); + set_bit(msg_type, d->msg_types); + set_bit(msg_type, msg->send_msg); /* set msg_type bit in our local */ + return; + } + } + /* Not found, create a new entry */ + d = (DEST *)malloc(sizeof(DEST)); + memset(d, 0, sizeof(DEST)); + d->next = msg->dest_chain; + d->dest_code = dest_code; + set_bit(msg_type, d->msg_types); /* set type bit in structure */ + set_bit(msg_type, msg->send_msg); /* set type bit in our local */ + if (where) { + d->where = bstrdup(where); + } + if (mail_cmd) { + d->mail_cmd = bstrdup(mail_cmd); + } + Dmsg5(850, "add new d=%p msgtype=%d destcode=%d where=%s mailcmd=%s\n", + d, msg_type, dest_code, NPRT(where), NPRT(d->mail_cmd)); + msg->dest_chain = d; +} + +/* + * Called only during parsing of the config file. + * + * Remove a message destination + */ +void rem_msg_dest(MSGS *msg, int dest_code, int msg_type, char *where) +{ + DEST *d; + + for (d=msg->dest_chain; d; d=d->next) { + Dmsg2(850, "Remove_msg_dest d=%p where=%s\n", d, NPRT(d->where)); + if (bit_is_set(msg_type, d->msg_types) && (dest_code == d->dest_code) && + ((where == NULL && d->where == NULL) || + (strcmp(where, d->where) == 0))) { + Dmsg3(850, "Found for remove d=%p msgtype=%d destcode=%d\n", + d, msg_type, dest_code); + clear_bit(msg_type, d->msg_types); + Dmsg0(850, "Return rem_msg_dest\n"); + return; + } + } +} + + +/* + * Create a unique filename for the mail command + */ +static void make_unique_mail_filename(JCR *jcr, POOLMEM *&name, DEST *d) +{ + if (jcr) { + Mmsg(name, "%s/%s.%s.%d.mail", working_directory, my_name, + jcr->Job, (int)(intptr_t)d); + } else { + Mmsg(name, "%s/%s.%s.%d.mail", working_directory, my_name, + my_name, (int)(intptr_t)d); + } + Dmsg1(850, "mailname=%s\n", name); +} + +/* + * Open a mail pipe + */ +static BPIPE *open_mail_pipe(JCR *jcr, POOLMEM *&cmd, DEST *d) +{ + BPIPE *bpipe; + + if (d->mail_cmd) { + cmd = edit_job_codes(jcr, cmd, d->mail_cmd, d->where, message_job_code_callback); + } else { + Mmsg(cmd, "/usr/lib/sendmail -F Bacula %s", d->where); + } + fflush(stdout); + + if ((bpipe = open_bpipe(cmd, 120, "rw"))) { + /* If we had to use sendmail, add subject */ + if (!d->mail_cmd) { + fprintf(bpipe->wfd, "Subject: %s\r\n\r\n", _("Bacula Message")); + } + } else { + berrno be; + delivery_error(_("open mail pipe %s failed: ERR=%s\n"), + cmd, be.bstrerror()); + } + return bpipe; +} + +/* + * Close the messages for this Messages resource, which means to close + * any open files, and dispatch any pending email messages. + */ +void close_msg(JCR *jcr) +{ + MSGS *msgs; + DEST *d; + BPIPE *bpipe; + POOLMEM *cmd, *line; + int len, stat; + + Dmsg1(580, "Close_msg jcr=%p\n", jcr); + + if (jcr == NULL) { /* NULL -> global chain */ + msgs = daemon_msgs; + } else { + msgs = jcr->jcr_msgs; + jcr->jcr_msgs = NULL; + } + if (msgs == NULL) { + return; + } + + /* Wait for item to be not in use, then mark closing */ + if (msgs->is_closing()) { + return; + } + msgs->wait_not_in_use(); /* leaves fides_mutex set */ + /* Note get_closing() does not lock because we are already locked */ + if (msgs->get_closing()) { + msgs->unlock(); + return; + } + msgs->set_closing(); + msgs->unlock(); + + Dmsg1(850, "===Begin close msg resource at %p\n", msgs); + cmd = get_pool_memory(PM_MESSAGE); + for (d=msgs->dest_chain; d; ) { + bool success; + if (d->fd) { + switch (d->dest_code) { + case MD_FILE: + case MD_APPEND: + if (d->fd) { + fclose(d->fd); /* close open file descriptor */ + d->fd = NULL; + } + break; + case MD_MAIL: + case MD_MAIL_ON_ERROR: + case MD_MAIL_ON_SUCCESS: + Dmsg0(850, "Got MD_MAIL, MD_MAIL_ON_ERROR or MD_MAIL_ON_SUCCESS\n"); + if (!d->fd) { + break; + } + success = jcr && (jcr->JobStatus == JS_Terminated || jcr->JobStatus == JS_Warnings); + + if (d->dest_code == MD_MAIL_ON_ERROR && success) { + goto rem_temp_file; /* no mail */ + } else if (d->dest_code == MD_MAIL_ON_SUCCESS && !success) { + goto rem_temp_file; /* no mail */ + } + + if (!(bpipe=open_mail_pipe(jcr, cmd, d))) { + Pmsg0(000, _("open mail pipe failed.\n")); + goto rem_temp_file; /* error get out */ + } + Dmsg0(850, "Opened mail pipe\n"); + len = d->max_len+10; + line = get_memory(len); + rewind(d->fd); + while (fgets(line, len, d->fd)) { + fputs(line, bpipe->wfd); + } + if (!close_wpipe(bpipe)) { /* close write pipe sending mail */ + berrno be; + Pmsg1(000, _("close error: ERR=%s\n"), be.bstrerror()); + } + + /* + * Since we are closing all messages, before "recursing" + * make sure we are not closing the daemon messages, otherwise + * kaboom. + */ + if (msgs != daemon_msgs) { + /* read what mail prog returned -- should be nothing */ + while (fgets(line, len, bpipe->rfd)) { + delivery_error(_("Mail prog: %s"), line); + } + } + + stat = close_bpipe(bpipe); + if (stat != 0 && msgs != daemon_msgs) { + berrno be; + be.set_errno(stat); + Dmsg1(850, "Calling emsg. CMD=%s\n", cmd); + delivery_error(_("Mail program terminated in error.\n" + "CMD=%s\n" + "ERR=%s\n"), cmd, be.bstrerror()); + } + free_memory(line); + +rem_temp_file: + /* Remove temp mail file */ + if (d->fd) { + fclose(d->fd); + d->fd = NULL; + } + /* Exclude spaces in mail_filename */ + if (d->mail_filename) { + safer_unlink(d->mail_filename, MAIL_REGEX); + free_pool_memory(d->mail_filename); + d->mail_filename = NULL; + } + Dmsg0(850, "end mail or mail on error\n"); + break; + default: + break; + } + d->fd = NULL; + } + d = d->next; /* point to next buffer */ + } + free_pool_memory(cmd); + Dmsg0(850, "Done walking message chain.\n"); + if (jcr) { + free_msgs_res(msgs); + msgs = NULL; + } else { + msgs->clear_closing(); + } + Dmsg0(850, "===End close msg resource\n"); +} + +/* + * Free memory associated with Messages resource + */ +void free_msgs_res(MSGS *msgs) +{ + DEST *d, *old; + + /* Walk down the message chain releasing allocated buffers */ + for (d=msgs->dest_chain; d; ) { + if (d->where) { + free(d->where); + d->where = NULL; + } + if (d->mail_cmd) { + free(d->mail_cmd); + d->mail_cmd = NULL; + } + old = d; /* save pointer to release */ + d = d->next; /* point to next buffer */ + free(old); /* free the destination item */ + } + msgs->dest_chain = NULL; + free(msgs); /* free the head */ +} + + +/* + * Terminate the message handler for good. + * Release the global destination chain. + * + * Also, clean up a few other items (cons, exepath). Note, + * these really should be done elsewhere. + */ +void term_msg() +{ + Dmsg0(850, "Enter term_msg\n"); + close_msg(NULL); /* close global chain */ + free_msgs_res(daemon_msgs); /* free the resources */ + daemon_msgs = NULL; + if (con_fd) { + fflush(con_fd); + fclose(con_fd); + con_fd = NULL; + } + if (exepath) { + free(exepath); + exepath = NULL; + } + if (exename) { + free(exename); + exename = NULL; + } + if (trace_fd) { + fclose(trace_fd); + trace_fd = NULL; + trace = false; + } + working_directory = NULL; + term_last_jobs_list(); +} + +static bool open_dest_file(JCR *jcr, DEST *d, const char *mode) +{ + d->fd = bfopen(d->where, mode); + if (!d->fd) { + berrno be; + delivery_error(_("fopen %s failed: ERR=%s\n"), d->where, be.bstrerror()); + return false; + } + return true; +} + +/* Split the output for syslog (it converts \n to ' ' and is + * limited to 1024 characters per syslog message + */ +static void send_to_syslog(int mode, const char *msg) +{ + int len; + char buf[1024]; + const char *p2; + const char *p = msg; + + while (*p && ((p2 = strchr(p, '\n')) != NULL)) { + len = MIN((int)sizeof(buf) - 1, p2 - p + 1); /* Add 1 to keep \n */ + strncpy(buf, p, len); + buf[len] = 0; + syslog(mode, "%s", buf); + p = p2+1; /* skip \n */ + } + if (*p != 0) { /* no \n at the end ? */ + syslog(mode, "%s", p); + } +} + +/* + * Handle sending the message to the appropriate place + */ +void dispatch_message(JCR *jcr, int type, utime_t mtime, char *msg) +{ + DEST *d; + char dt[MAX_TIME_LENGTH]; + POOLMEM *mcmd; + int len, dtlen; + MSGS *msgs; + BPIPE *bpipe; + const char *mode; + bool created_jcr = false; + + Dmsg2(850, "Enter dispatch_msg type=%d msg=%s", type, msg); + + /* + * Most messages are prefixed by a date and time. If mtime is + * zero, then we use the current time. If mtime is 1 (special + * kludge), we do not prefix the date and time. Otherwise, + * we assume mtime is a utime_t and use it. + */ + if (mtime == 0) { + mtime = time(NULL); + } + if (mtime == 1) { + *dt = 0; + dtlen = 0; + mtime = time(NULL); /* get time for SQL log */ + } else { + bstrftime_ny(dt, sizeof(dt), mtime); + dtlen = strlen(dt); + dt[dtlen++] = ' '; + dt[dtlen] = 0; + } + + /* If the program registered a callback, send it there */ + if (message_callback) { + message_callback(type, msg); + return; + } + + /* For serious errors make sure message is printed or logged */ + if (type == M_ABORT || type == M_ERROR_TERM) { + fputs(dt, stdout); + fputs(msg, stdout); + fflush(stdout); + if (type == M_ABORT) { + syslog(LOG_DAEMON|LOG_ERR, "%s", msg); + } + } + + + /* Now figure out where to send the message */ + msgs = NULL; + if (!jcr) { + jcr = get_jcr_from_tsd(); + } + +/* Temporary fix for a deadlock in the reload command when + * the configuration has a problem. The JCR chain is locked + * during the reload, we cannot create a new JCR. + */ +#if 0 + if (!jcr) { + jcr = new_jcr(sizeof(JCR), NULL); + created_jcr = true; + } +#endif + + if (jcr) { + msgs = jcr->jcr_msgs; + } + if (msgs == NULL) { + msgs = daemon_msgs; + } + /* + * If closing this message resource, print and send to syslog, + * then get out. + */ + if (msgs->is_closing()) { + fputs(dt, stdout); + fputs(msg, stdout); + fflush(stdout); + syslog(LOG_DAEMON|LOG_ERR, "%s", msg); + return; + } + + + for (d=msgs->dest_chain; d; d=d->next) { + if (bit_is_set(type, d->msg_types)) { + bool ok; + switch (d->dest_code) { + case MD_CATALOG: + char ed1[50]; + if (!jcr || !jcr->db) { + break; + } + if (p_sql_query && p_sql_escape) { + POOLMEM *cmd = get_pool_memory(PM_MESSAGE); + POOLMEM *esc_msg = get_pool_memory(PM_MESSAGE); + + int len = strlen(msg) + 1; + esc_msg = check_pool_memory_size(esc_msg, len*2+1); + ok = p_sql_escape(jcr, jcr->db, esc_msg, msg, len); + if (ok) { + bstrutime(dt, sizeof(dt), mtime); + Mmsg(cmd, "INSERT INTO Log (JobId, Time, LogText) VALUES (%s,'%s','%s')", + edit_int64(jcr->JobId, ed1), dt, esc_msg); + ok = p_sql_query(jcr, cmd); + } + if (!ok) { + delivery_error(_("Message delivery error: Unable to store data in database.\n")); + } + free_pool_memory(cmd); + free_pool_memory(esc_msg); + } + break; + case MD_CONSOLE: + Dmsg1(850, "CONSOLE for following msg: %s", msg); + if (!con_fd) { + con_fd = bfopen(con_fname, "a+b"); + Dmsg0(850, "Console file not open.\n"); + } + if (con_fd) { + Pw(con_lock); /* get write lock on console message file */ + errno = 0; + if (dtlen) { + (void)fwrite(dt, dtlen, 1, con_fd); + } + len = strlen(msg); + if (len > 0) { + (void)fwrite(msg, len, 1, con_fd); + if (msg[len-1] != '\n') { + (void)fwrite("\n", 2, 1, con_fd); + } + } else { + (void)fwrite("\n", 2, 1, con_fd); + } + fflush(con_fd); + console_msg_pending = true; + Vw(con_lock); + } + break; + case MD_SYSLOG: + Dmsg1(850, "SYSLOG for following msg: %s\n", msg); + /* + * We really should do an openlog() here. + */ + send_to_syslog(LOG_DAEMON|LOG_ERR, msg); + break; + case MD_OPERATOR: + Dmsg1(850, "OPERATOR for following msg: %s\n", msg); + mcmd = get_pool_memory(PM_MESSAGE); + if ((bpipe=open_mail_pipe(jcr, mcmd, d))) { + int stat; + fputs(dt, bpipe->wfd); + fputs(msg, bpipe->wfd); + /* Messages to the operator go one at a time */ + stat = close_bpipe(bpipe); + if (stat != 0) { + berrno be; + be.set_errno(stat); + delivery_error(_("Msg delivery error: Operator mail program terminated in error.\n" + "CMD=%s\n" + "ERR=%s\n"), mcmd, be.bstrerror()); + } + } + free_pool_memory(mcmd); + break; + case MD_MAIL: + case MD_MAIL_ON_ERROR: + case MD_MAIL_ON_SUCCESS: + Dmsg1(850, "MAIL for following msg: %s", msg); + if (msgs->is_closing()) { + break; + } + msgs->set_in_use(); + if (!d->fd) { + POOLMEM *name = get_pool_memory(PM_MESSAGE); + make_unique_mail_filename(jcr, name, d); + d->fd = bfopen(name, "w+b"); + if (!d->fd) { + berrno be; + delivery_error(_("Msg delivery error: fopen %s failed: ERR=%s\n"), name, + be.bstrerror()); + free_pool_memory(name); + msgs->clear_in_use(); + break; + } + d->mail_filename = name; + } + fputs(dt, d->fd); + len = strlen(msg) + dtlen;; + if (len > d->max_len) { + d->max_len = len; /* keep max line length */ + } + fputs(msg, d->fd); + msgs->clear_in_use(); + break; + case MD_APPEND: + Dmsg1(850, "APPEND for following msg: %s", msg); + mode = "ab"; + goto send_to_file; + case MD_FILE: + Dmsg1(850, "FILE for following msg: %s", msg); + mode = "w+b"; +send_to_file: + if (msgs->is_closing()) { + break; + } + msgs->set_in_use(); + if (!d->fd && !open_dest_file(jcr, d, mode)) { + msgs->clear_in_use(); + break; + } + fputs(dt, d->fd); + fputs(msg, d->fd); + /* On error, we close and reopen to handle log rotation */ + if (ferror(d->fd)) { + fclose(d->fd); + d->fd = NULL; + if (open_dest_file(jcr, d, mode)) { + fputs(dt, d->fd); + fputs(msg, d->fd); + } + } + msgs->clear_in_use(); + break; + case MD_DIRECTOR: + Dmsg1(850, "DIRECTOR for following msg: %s", msg); + if (jcr && jcr->dir_bsock && !jcr->dir_bsock->errors) { + jcr->dir_bsock->fsend("Jmsg JobId=%ld type=%d level=%lld %s", + jcr->JobId, type, mtime, msg); + } else { + Dmsg1(800, "no jcr for following msg: %s", msg); + } + break; + case MD_STDOUT: + Dmsg1(850, "STDOUT for following msg: %s", msg); + if (type != M_ABORT && type != M_ERROR_TERM) { /* already printed */ + fputs(dt, stdout); + fputs(msg, stdout); + fflush(stdout); + } + break; + case MD_STDERR: + Dmsg1(850, "STDERR for following msg: %s", msg); + fputs(dt, stderr); + fputs(msg, stderr); + fflush(stdout); + break; + default: + break; + } + } + } + if (created_jcr) { + free_jcr(jcr); + } +} + +/********************************************************************* + * + * This subroutine returns the filename portion of a path. + * It is used because some compilers set __FILE__ + * to the full path. Try to return base + next higher path. + */ + +const char *get_basename(const char *pathname) +{ + const char *basename; + + if ((basename = bstrrpath(pathname, pathname+strlen(pathname))) == pathname) { + /* empty */ + } else if ((basename = bstrrpath(pathname, basename-1)) == pathname) { + /* empty */ + } else { + basename++; + } + return basename; +} + +/* + * print or write output to trace file + */ +static void pt_out(char *buf) +{ + /* + * Used the "trace on" command in the console to turn on + * output to the trace file. "trace off" will close the file. + */ + if (trace) { + if (!trace_fd) { + char fn[200]; + bsnprintf(fn, sizeof(fn), "%s/%s.trace", working_directory ? working_directory : "./", my_name); + trace_fd = bfopen(fn, "a+b"); + } + if (trace_fd) { + fputs(buf, trace_fd); + fflush(trace_fd); + return; + } else { + /* Some problem, turn off tracing */ + trace = false; + } + } + /* not tracing */ + fputs(buf, stdout); + fflush(stdout); +} + +/********************************************************************* + * + * This subroutine prints a debug message if the level number + * is less than or equal the debug_level. File and line numbers + * are included for more detail if desired, but not currently + * printed. + * + * If the level is negative, the details of file and line number + * are not printed. + * + */ +void +vd_msg(const char *file, int line, int64_t level, const char *fmt, va_list arg_ptr) +{ + char buf[5000]; + int len = 0; /* space used in buf */ + bool details = true; + utime_t mtime; + + if (level < 0) { + details = false; + level = -level; + } + + if (chk_dbglvl(level)) { + if (dbg_timestamp) { + mtime = time(NULL); + bstrftimes(buf+len, sizeof(buf)-len, mtime); + len = strlen(buf); + buf[len++] = ' '; + } + +#ifdef FULL_LOCATION + if (details) { + if (dbg_thread) { + len += bsnprintf(buf+len, sizeof(buf)-len, "%s[%lld]: %s:%d-%u ", + my_name, bthread_get_thread_id(), + get_basename(file), line, get_jobid_from_tsd()); + } else { + len += bsnprintf(buf+len, sizeof(buf)-len, "%s: %s:%d-%u ", + my_name, get_basename(file), line, get_jobid_from_tsd()); + } + } +#endif + bvsnprintf(buf+len, sizeof(buf)-len, (char *)fmt, arg_ptr); + + pt_out(buf); + } +} + +void +d_msg(const char *file, int line, int64_t level, const char *fmt,...) +{ + va_list arg_ptr; + va_start(arg_ptr, fmt); + vd_msg(file, line, level, fmt, arg_ptr); /* without tags */ + va_end(arg_ptr); +} + + +/* + * Set trace flag on/off. If argument is negative, there is no change + */ +void set_trace(int trace_flag) +{ + if (trace_flag < 0) { + return; + } else if (trace_flag > 0) { + trace = true; + } else { + trace = false; + } + if (!trace && trace_fd) { + FILE *ltrace_fd = trace_fd; + trace_fd = NULL; + bmicrosleep(0, 100000); /* yield to prevent seg faults */ + fclose(ltrace_fd); + } +} + +/* + * Can be called by Bacula's tools that use Bacula's libraries, to control where + * to redirect Dmsg() emitted by the code inside the Bacula's library. + * This should not be called by the main daemon, this is a Hack ! + * See in bsnapshot.c how it is used + * In your tools be careful to no call any function that here in messages.c + * that modify "trace" or close() or re-open() trace_fd + */ +void set_trace_for_tools(FILE *new_trace_fd) +{ + // don't call fclose(trace_fd) here + trace = true; + trace_fd = new_trace_fd; +} + +void set_hangup(int hangup_value) +{ + if (hangup_value != -1) { + hangup = hangup_value; + } +} + +int get_hangup(void) +{ + return hangup; +} + +void set_blowup(int blowup_value) +{ + if (blowup_value != -1) { + blowup = blowup_value; + } +} + +int get_blowup(void) +{ + return blowup; +} + +bool handle_hangup_blowup(JCR *jcr, uint32_t file_count, uint64_t byte_count) +{ + if (hangup == 0 && blowup == 0) { + /* quick check */ + return false; + } + /* Debug code: check if we must hangup or blowup */ + if ((hangup > 0 && (file_count > (uint32_t)hangup)) || + (hangup < 0 && (byte_count/1024 > (uint32_t)-hangup))) { + jcr->setJobStatus(JS_Incomplete); + if (hangup > 0) { + Jmsg1(jcr, M_FATAL, 0, "Debug hangup requested after %d files.\n", hangup); + } else { + Jmsg1(jcr, M_FATAL, 0, "Debug hangup requested after %d Kbytes.\n", -hangup); + } + set_hangup(0); + return true; + } + if ((blowup > 0 && (file_count > (uint32_t)blowup)) || + (blowup < 0 && (byte_count/1024 > (uint32_t)-blowup))) { + if (blowup > 0) { + Jmsg1(jcr, M_ABORT, 0, "Debug blowup requested after %d files.\n", blowup); + } else { + Jmsg1(jcr, M_ABORT, 0, "Debug blowup requested after %d Kbytes.\n", -blowup); + } + /* will never reach this line */ + return true; + } + return false; +} + +bool get_trace(void) +{ + return trace; +} + +/********************************************************************* + * + * This subroutine prints a message regardless of the debug level + * + * If the level is negative, the details of file and line number + * are not printed. + */ +void +p_msg(const char *file, int line, int level, const char *fmt,...) +{ + char buf[5000]; + int len = 0; /* space used in buf */ + va_list arg_ptr; + + if (dbg_timestamp) { + utime_t mtime = time(NULL); + bstrftimes(buf+len, sizeof(buf)-len, mtime); + len = strlen(buf); + buf[len++] = ' '; + } + +#ifdef FULL_LOCATION + if (level >= 0) { + len += bsnprintf(buf+len, sizeof(buf)-len, "%s: %s:%d-%u ", + my_name, get_basename(file), line, get_jobid_from_tsd()); + } +#endif + + va_start(arg_ptr, fmt); + bvsnprintf(buf+len, sizeof(buf)-len, (char *)fmt, arg_ptr); + va_end(arg_ptr); + + pt_out(buf); +} + + +/********************************************************************* + * + * subroutine writes a debug message to the trace file if the level number + * is less than or equal the debug_level. File and line numbers + * are included for more detail if desired, but not currently + * printed. + * + * If the level is negative, the details of file and line number + * are not printed. + */ +void +t_msg(const char *file, int line, int64_t level, const char *fmt,...) +{ + char buf[5000]; + int len; + va_list arg_ptr; + int details = TRUE; + + level = level & ~DT_ALL; /* level should be tag free */ + + if (level < 0) { + details = FALSE; + level = -level; + } + + if (level <= debug_level) { + if (!trace_fd) { + bsnprintf(buf, sizeof(buf), "%s/%s.trace", working_directory ? working_directory : ".", my_name); + trace_fd = bfopen(buf, "a+b"); + } + +#ifdef FULL_LOCATION + if (details) { + len = bsnprintf(buf, sizeof(buf), "%s: %s:%d ", my_name, get_basename(file), line); + } else { + len = 0; + } +#else + len = 0; +#endif + va_start(arg_ptr, fmt); + bvsnprintf(buf+len, sizeof(buf)-len, (char *)fmt, arg_ptr); + va_end(arg_ptr); + if (trace_fd != NULL) { + fputs(buf, trace_fd); + fflush(trace_fd); + } + } +} + +/* ********************************************************* + * + * print an error message + * + */ +void +e_msg(const char *file, int line, int type, int level, const char *fmt,...) +{ + char buf[5000]; + va_list arg_ptr; + int len; + + /* + * Check if we have a message destination defined. + * We always report M_ABORT and M_ERROR_TERM + */ + if (!daemon_msgs || ((type != M_ABORT && type != M_ERROR_TERM) && + !bit_is_set(type, daemon_msgs->send_msg))) { + return; /* no destination */ + } + switch (type) { + case M_ABORT: + len = bsnprintf(buf, sizeof(buf), _("%s: ABORTING due to ERROR in %s:%d\n"), + my_name, get_basename(file), line); + break; + case M_ERROR_TERM: + len = bsnprintf(buf, sizeof(buf), _("%s: ERROR TERMINATION at %s:%d\n"), + my_name, get_basename(file), line); + break; + case M_FATAL: + if (level == -1) /* skip details */ + len = bsnprintf(buf, sizeof(buf), _("%s: Fatal Error because: "), my_name); + else + len = bsnprintf(buf, sizeof(buf), _("%s: Fatal Error at %s:%d because:\n"), my_name, get_basename(file), line); + break; + case M_ERROR: + if (level == -1) /* skip details */ + len = bsnprintf(buf, sizeof(buf), _("%s: ERROR: "), my_name); + else + len = bsnprintf(buf, sizeof(buf), _("%s: ERROR in %s:%d "), my_name, get_basename(file), line); + break; + case M_WARNING: + len = bsnprintf(buf, sizeof(buf), _("%s: Warning: "), my_name); + break; + case M_SECURITY: + len = bsnprintf(buf, sizeof(buf), _("%s: Security Alert: "), my_name); + break; + default: + len = bsnprintf(buf, sizeof(buf), "%s: ", my_name); + break; + } + + va_start(arg_ptr, fmt); + bvsnprintf(buf+len, sizeof(buf)-len, (char *)fmt, arg_ptr); + va_end(arg_ptr); + + pt_out(buf); + dispatch_message(NULL, type, 0, buf); + + if (type == M_ABORT) { + char *p = 0; + p[0] = 0; /* generate segmentation violation */ + } + if (type == M_ERROR_TERM) { + exit(1); + } +} + +/* Check in the msgs resource if a given type is defined */ +bool is_message_type_set(JCR *jcr, int type) +{ + MSGS *msgs = NULL; + if (jcr) { + msgs = jcr->jcr_msgs; + } + if (!msgs) { + msgs = daemon_msgs; /* if no jcr, we use daemon handler */ + } + if (msgs && (type != M_ABORT && type != M_ERROR_TERM) && + !bit_is_set(type, msgs->send_msg)) { + return false; /* no destination */ + } + return true; +} + +/* ********************************************************* + * + * Generate a Job message + * + */ +void +Jmsg(JCR *jcr, int type, utime_t mtime, const char *fmt,...) +{ + char rbuf[5000]; + va_list arg_ptr; + int len; + MSGS *msgs; + uint32_t JobId = 0; + + + Dmsg1(850, "Enter Jmsg type=%d\n", type); + + /* + * Special case for the console, which has a dir_bsock and JobId==0, + * in that case, we send the message directly back to the + * dir_bsock. + * This allow commands such as "estimate" to work. + * It probably should be restricted to work only in the FD. + */ + if (jcr && jcr->JobId == 0 && jcr->dir_bsock && type != M_SECURITY) { + BSOCK *dir = jcr->dir_bsock; + va_start(arg_ptr, fmt); + dir->msglen = bvsnprintf(dir->msg, sizeof_pool_memory(dir->msg), + fmt, arg_ptr); + va_end(arg_ptr); + jcr->dir_bsock->send(); + return; + } + + /* The watchdog thread can't use Jmsg directly, we always queued it */ + if (is_watchdog()) { + va_start(arg_ptr, fmt); + bvsnprintf(rbuf, sizeof(rbuf), fmt, arg_ptr); + va_end(arg_ptr); + Qmsg(jcr, type, mtime, "%s", rbuf); + return; + } + + msgs = NULL; + if (!jcr) { + jcr = get_jcr_from_tsd(); + } + if (jcr) { + if (!jcr->dequeuing_msgs) { /* Avoid recursion */ + /* Dequeue messages to keep the original order */ + dequeue_messages(jcr); + } + msgs = jcr->jcr_msgs; + JobId = jcr->JobId; + } + if (!msgs) { + msgs = daemon_msgs; /* if no jcr, we use daemon handler */ + } + + /* + * Check if we have a message destination defined. + * We always report M_ABORT and M_ERROR_TERM + */ + if (msgs && (type != M_ABORT && type != M_ERROR_TERM) && + !bit_is_set(type, msgs->send_msg)) { + return; /* no destination */ + } + switch (type) { + case M_ABORT: + len = bsnprintf(rbuf, sizeof(rbuf), _("%s ABORTING due to ERROR\n"), my_name); + break; + case M_ERROR_TERM: + len = bsnprintf(rbuf, sizeof(rbuf), _("%s ERROR TERMINATION\n"), my_name); + break; + case M_FATAL: + len = bsnprintf(rbuf, sizeof(rbuf), _("%s JobId %u: Fatal error: "), my_name, JobId); + if (jcr) { + jcr->setJobStatus(JS_FatalError); + } + if (jcr && jcr->JobErrors == 0) { + jcr->JobErrors = 1; + } + break; + case M_ERROR: + len = bsnprintf(rbuf, sizeof(rbuf), _("%s JobId %u: Error: "), my_name, JobId); + if (jcr) { + jcr->JobErrors++; + } + break; + case M_WARNING: + len = bsnprintf(rbuf, sizeof(rbuf), _("%s JobId %u: Warning: "), my_name, JobId); + if (jcr) { + jcr->JobWarnings++; + } + break; + case M_SECURITY: + len = bsnprintf(rbuf, sizeof(rbuf), _("%s JobId %u: Security Alert: "), + my_name, JobId); + break; + default: + len = bsnprintf(rbuf, sizeof(rbuf), "%s JobId %u: ", my_name, JobId); + break; + } + + va_start(arg_ptr, fmt); + bvsnprintf(rbuf+len, sizeof(rbuf)-len, fmt, arg_ptr); + va_end(arg_ptr); + + dispatch_message(jcr, type, mtime, rbuf); + + if (type == M_ABORT){ + char *p = 0; + printf("Bacula forced SEG FAULT to obtain traceback.\n"); + syslog(LOG_DAEMON|LOG_ERR, "Bacula forced SEG FAULT to obtain traceback.\n"); + p[0] = 0; /* generate segmentation violation */ + } + if (type == M_ERROR_TERM) { + exit(1); + } +} + +/* + * If we come here, prefix the message with the file:line-number, + * then pass it on to the normal Jmsg routine. + */ +void j_msg(const char *file, int line, JCR *jcr, int type, utime_t mtime, const char *fmt,...) +{ + va_list arg_ptr; + int i, len, maxlen; + POOLMEM *pool_buf; + + va_start(arg_ptr, fmt); + vd_msg(file, line, 0, fmt, arg_ptr); + va_end(arg_ptr); + + pool_buf = get_pool_memory(PM_EMSG); + i = Mmsg(pool_buf, "%s:%d ", get_basename(file), line); + + for (;;) { + maxlen = sizeof_pool_memory(pool_buf) - i - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(pool_buf+i, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + pool_buf = realloc_pool_memory(pool_buf, maxlen + i + maxlen/2); + continue; + } + break; + } + + Jmsg(jcr, type, mtime, "%s", pool_buf); + free_memory(pool_buf); +} + + +/* + * Edit a message into a Pool memory buffer, with file:lineno + */ +int m_msg(const char *file, int line, POOLMEM **pool_buf, const char *fmt, ...) +{ + va_list arg_ptr; + int i, len, maxlen; + + i = sprintf(*pool_buf, "%s:%d ", get_basename(file), line); + + for (;;) { + maxlen = sizeof_pool_memory(*pool_buf) - i - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(*pool_buf+i, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + *pool_buf = realloc_pool_memory(*pool_buf, maxlen + i + maxlen/2); + continue; + } + break; + } + return len; +} + +int m_msg(const char *file, int line, POOLMEM *&pool_buf, const char *fmt, ...) +{ + va_list arg_ptr; + int i, len, maxlen; + + i = sprintf(pool_buf, "%s:%d ", get_basename(file), line); + + for (;;) { + maxlen = sizeof_pool_memory(pool_buf) - i - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(pool_buf+i, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + pool_buf = realloc_pool_memory(pool_buf, maxlen + i + maxlen/2); + continue; + } + break; + } + return len; +} + + +/* + * Edit a message into a Pool Memory buffer NO file:lineno + * Returns: string length of what was edited. + */ +int Mmsg(POOLMEM **pool_buf, const char *fmt, ...) +{ + va_list arg_ptr; + int len, maxlen; + + for (;;) { + maxlen = sizeof_pool_memory(*pool_buf) - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(*pool_buf, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + *pool_buf = realloc_pool_memory(*pool_buf, maxlen + maxlen/2); + continue; + } + break; + } + return len; +} + +int Mmsg(POOLMEM *&pool_buf, const char *fmt, ...) +{ + va_list arg_ptr; + int len, maxlen; + + for (;;) { + maxlen = sizeof_pool_memory(pool_buf) - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(pool_buf, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + pool_buf = realloc_pool_memory(pool_buf, maxlen + maxlen/2); + continue; + } + break; + } + return len; +} + +int Mmsg(POOL_MEM &pool_buf, const char *fmt, ...) +{ + va_list arg_ptr; + int len, maxlen; + + for (;;) { + maxlen = pool_buf.max_size() - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(pool_buf.c_str(), maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + pool_buf.realloc_pm(maxlen + maxlen/2); + continue; + } + break; + } + return len; +} + + +/* + * We queue messages rather than print them directly. This + * is generally used in low level routines (msg handler, bnet) + * to prevent recursion (i.e. if you are in the middle of + * sending a message, it is a bit messy to recursively call + * yourself when the bnet packet is not reentrant). + */ +void Qmsg(JCR *jcr, int type, utime_t mtime, const char *fmt,...) +{ + va_list arg_ptr; + int len, maxlen; + POOLMEM *pool_buf; + MQUEUE_ITEM *item, *last_item; + + pool_buf = get_pool_memory(PM_EMSG); + + for (;;) { + maxlen = sizeof_pool_memory(pool_buf) - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(pool_buf, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + pool_buf = realloc_pool_memory(pool_buf, maxlen + maxlen/2); + continue; + } + break; + } + item = (MQUEUE_ITEM *)malloc(sizeof(MQUEUE_ITEM) + strlen(pool_buf) + 1); + item->type = type; + item->repeat = 0; + item->mtime = time(NULL); + strcpy(item->msg, pool_buf); + if (!jcr) { + jcr = get_jcr_from_tsd(); + } + + if (jcr && type==M_FATAL) { + jcr->setJobStatus(JS_FatalError); + } + + /* If no jcr or no queue or dequeuing send to syslog */ + if (!jcr || !jcr->msg_queue || jcr->dequeuing_msgs) { + syslog(LOG_DAEMON|LOG_ERR, "%s", item->msg); + P(daemon_msg_queue_mutex); + if (daemon_msg_queue) { + if (item->type == M_SECURITY) { /* can be repeated */ + /* Keep repeat count of identical messages */ + last_item = (MQUEUE_ITEM *)daemon_msg_queue->last(); + if (last_item) { + if (strcmp(last_item->msg, item->msg) == 0) { + last_item->repeat++; + free(item); + item = NULL; + } + } + } + if (item) { + daemon_msg_queue->append(item); + } + } + V(daemon_msg_queue_mutex); + } else { + /* Queue message for later sending */ + P(jcr->msg_queue_mutex); + jcr->msg_queue->append(item); + V(jcr->msg_queue_mutex); + } + free_memory(pool_buf); +} + +/* + * Dequeue daemon messages + */ +void dequeue_daemon_messages(JCR *jcr) +{ + MQUEUE_ITEM *item; + JobId_t JobId; + + /* Dequeue daemon messages */ + if (daemon_msg_queue && !dequeuing_daemon_msgs) { + P(daemon_msg_queue_mutex); + dequeuing_daemon_msgs = true; + jcr->dequeuing_msgs = true; + JobId = jcr->JobId; + jcr->JobId = 0; /* set daemon JobId == 0 */ + if (jcr->dir_bsock) jcr->dir_bsock->suppress_error_messages(true); + foreach_dlist(item, daemon_msg_queue) { + if (item->type == M_FATAL || item->type == M_ERROR) { + item->type = M_SECURITY; + } + if (item->repeat == 0) { + Jmsg(jcr, item->type, item->mtime, "%s", item->msg); + } else { + Jmsg(jcr, item->type, item->mtime, "Message repeated %d times: %s", + item->repeat, item->msg); + } + } + if (jcr->dir_bsock) jcr->dir_bsock->suppress_error_messages(false); + /* Remove messages just sent */ + daemon_msg_queue->destroy(); + jcr->JobId = JobId; /* restore JobId */ + jcr->dequeuing_msgs = false; + dequeuing_daemon_msgs = false; + V(daemon_msg_queue_mutex); + } +} + +/* + * Dequeue messages + */ +void dequeue_messages(JCR *jcr) +{ + MQUEUE_ITEM *item; + + /* Avoid bad calls and recursion */ + if (!jcr || jcr->dequeuing_msgs) { + return; + } + + + /* Dequeue Job specific messages */ + if (!jcr->msg_queue || jcr->dequeuing_msgs) { + return; + } + P(jcr->msg_queue_mutex); + jcr->dequeuing_msgs = true; + if (jcr->dir_bsock) jcr->dir_bsock->suppress_error_messages(true); + foreach_dlist(item, jcr->msg_queue) { + Jmsg(jcr, item->type, item->mtime, "%s", item->msg); + } + if (jcr->dir_bsock) jcr->dir_bsock->suppress_error_messages(false); + /* Remove messages just sent */ + jcr->msg_queue->destroy(); + jcr->dequeuing_msgs = false; + V(jcr->msg_queue_mutex); +} + + +/* + * If we come here, prefix the message with the file:line-number, + * then pass it on to the normal Qmsg routine. + */ +void q_msg(const char *file, int line, JCR *jcr, int type, utime_t mtime, const char *fmt,...) +{ + va_list arg_ptr; + int i, len, maxlen; + POOLMEM *pool_buf; + + pool_buf = get_pool_memory(PM_EMSG); + i = Mmsg(pool_buf, "%s:%d ", get_basename(file), line); + + for (;;) { + maxlen = sizeof_pool_memory(pool_buf) - i - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(pool_buf+i, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + pool_buf = realloc_pool_memory(pool_buf, maxlen + i + maxlen/2); + continue; + } + break; + } + + Qmsg(jcr, type, mtime, "%s", pool_buf); + free_memory(pool_buf); +} + + +/* not all in alphabetical order. New commands are added after existing commands with similar letters + to prevent breakage of existing user scripts. */ +struct debugtags { + const char *tag; /* command */ + int64_t bit; /* bit to set */ + const char *help; /* main purpose */ +}; + +/* setdebug tag=all,-plugin */ +static struct debugtags debug_tags[] = { + { NT_("lock"), DT_LOCK, _("Debug lock information")}, + { NT_("network"), DT_NETWORK, _("Debug network information")}, + { NT_("plugin"), DT_PLUGIN, _("Debug plugin information")}, + { NT_("volume"), DT_VOLUME, _("Debug volume information")}, + { NT_("sql"), DT_SQL, _("Debug SQL queries")}, + { NT_("bvfs"), DT_BVFS, _("Debug BVFS queries")}, + { NT_("memory"), DT_MEMORY, _("Debug memory allocation")}, + { NT_("scheduler"), DT_SCHEDULER,_("Debug scheduler information")}, + { NT_("protocol"), DT_PROTOCOL, _("Debug protocol information")}, + { NT_("snapshot"), DT_SNAPSHOT, _("Debug snapshots")}, + { NT_("record"), DT_RECORD, _("Debug records")}, + { NT_("asx"), DT_ASX, _("ASX personal's debugging")}, + { NT_("all"), DT_ALL, _("Debug all information")}, + { NULL, 0, NULL} +}; + +#define MAX_TAG (sizeof(debug_tags) / sizeof(struct debugtags)) + +const char *debug_get_tag(uint32_t pos, const char **desc) +{ + if (pos < MAX_TAG) { + if (desc) { + *desc = debug_tags[pos].help; + } + return debug_tags[pos].tag; + } + return NULL; +} + +/* Allow +-, */ +bool debug_find_tag(const char *tagname, bool add, int64_t *current_level) +{ + Dmsg3(010, "add=%d tag=%s level=%lld\n", add, tagname, *current_level); + if (!*tagname) { + /* Nothing in the buffer */ + return true; + } + for (int i=0; debug_tags[i].tag ; i++) { + if (strcasecmp(debug_tags[i].tag, tagname) == 0) { + if (add) { + *current_level |= debug_tags[i].bit; + } else { + *current_level &= ~(debug_tags[i].bit); + } + return true; + } + } + return false; +} + +bool debug_parse_tags(const char *options, int64_t *current_level) +{ + bool operation; /* + => true, - false */ + char *p, *t, tag[256]; + int max = sizeof(tag) - 1; + bool ret=true; + int64_t level= *current_level; + + t = tag; + *tag = 0; + operation = true; /* add by default */ + + if (!options) { + Dmsg0(100, "No options for tags\n"); + return false; + } + + for (p = (char *)options; *p ; p++) { + if (*p == ',' || *p == '+' || *p == '-' || *p == '!') { + /* finish tag keyword */ + *t = 0; + /* handle tag */ + ret &= debug_find_tag(tag, operation, &level); + + if (*p == ',') { + /* reset tag */ + t = tag; + *tag = 0; + operation = true; + + } else { + /* reset tag */ + t = tag; + *tag = 0; + operation = (*p == '+'); + } + + } else if (isalpha(*p) && (t - tag) < max) { + *t++ = *p; + + } else { /* not isalpha or too long */ + Dmsg1(010, "invalid %c\n", *p); + return false; + } + } + + /* At the end, finish the string and look it */ + *t = 0; + if (t > tag) { /* something found */ + /* handle tag */ + ret &= debug_find_tag(tag, operation, &level); + } + + *current_level = level; + return ret; +} + +int generate_daemon_event(JCR *jcr, const char *event) { return 0; } + +void setup_daemon_message_queue() +{ + static MQUEUE_ITEM *item = NULL; + daemon_msg_queue = New(dlist(item, &item->link)); +} + +void free_daemon_message_queue() +{ + P(daemon_msg_queue_mutex); + daemon_msg_queue->destroy(); + free(daemon_msg_queue); + V(daemon_msg_queue_mutex); +} diff --git a/src/lib/message.h b/src/lib/message.h new file mode 100644 index 00000000..ed23ce39 --- /dev/null +++ b/src/lib/message.h @@ -0,0 +1,203 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Define Message Types for Bacula + * Kern Sibbald, 2000 + * + */ + + +#include "bits.h" + +#undef M_DEBUG +#undef M_ABORT +#undef M_FATAL +#undef M_ERROR +#undef M_WARNING +#undef M_INFO +#undef M_MOUNT +#undef M_ERROR_TERM +#undef M_TERM +#undef M_RESTORED +#undef M_SECURITY +#undef M_ALERT +#undef M_VOLMGMT + +/* + * Most of these message levels are more or less obvious. + * They have evolved somewhat during the development of Bacula, + * and here are some of the details of where I am trying to + * head (in the process of changing the code) as of 15 June 2002. + * + * M_ABORT Bacula immediately aborts and tries to produce a traceback + * This is for really serious errors like segmentation fault. + * M_ERROR_TERM Bacula immediately terminates but no dump. This is for + * "obvious" serious errors like daemon already running or + * cannot open critical file, ... where a dump is not wanted. + * M_TERM Bacula daemon shutting down because of request (SIGTERM). + * + * The remaining apply to Jobs rather than the daemon. + * + * M_FATAL Bacula detected a fatal Job error. The Job will be killed, + * but Bacula continues running. + * M_ERROR Bacula detected a Job error. The Job will continue running + * but the termination status will be error. + * M_WARNING Job warning message. + * M_INFO Job information message. + * + * M_RESTORED An ls -l of each restored file. + * + * M_SECURITY For security alerts. This is equivalent to FATAL, but + * generally occurs in a daemon with no JCR. + * + * M_ALERT For Tape Alert messages. + * + * M_VOLMGMT Volume Management message + * + * M_DEBUG and M_SAVED are excluded from M_ALL by default + */ + +enum { + /* Keep M_ABORT=1 for dlist.h */ + M_ABORT = 1, /* MUST abort immediately */ + M_DEBUG, /* debug message */ + M_FATAL, /* Fatal error, stopping job */ + M_ERROR, /* Error, but recoverable */ + M_WARNING, /* Warning message */ + M_INFO, /* Informational message */ + M_SAVED, /* Info on saved file */ + M_NOTSAVED, /* Info on notsaved file */ + M_SKIPPED, /* File skipped during backup by option setting */ + M_MOUNT, /* Mount requests */ + M_ERROR_TERM, /* Error termination request (no dump) */ + M_TERM, /* Terminating daemon normally */ + M_RESTORED, /* ls -l of restored files */ + M_SECURITY, /* security violation */ + M_ALERT, /* tape alert messages */ + M_VOLMGMT /* Volume management messages */ +}; + +#define M_MAX M_VOLMGMT /* keep this updated ! */ + +/* Define message destination structure */ +/* *** FIXME **** where should be extended to handle multiple values */ +typedef struct s_dest { + struct s_dest *next; + int dest_code; /* destination (one of the MD_ codes) */ + int max_len; /* max mail line length */ + FILE *fd; /* file descriptor */ + char msg_types[nbytes_for_bits(M_MAX+1)]; /* message type mask */ + char *where; /* filename/program name */ + char *mail_cmd; /* mail command */ + POOLMEM *mail_filename; /* unique mail filename */ +} DEST; + +/* Message Destination values for dest field of DEST */ +enum { + MD_SYSLOG = 1, /* send msg to syslog */ + MD_MAIL, /* email group of messages */ + MD_FILE, /* write messages to a file */ + MD_APPEND, /* append messages to a file */ + MD_STDOUT, /* print messages */ + MD_STDERR, /* print messages to stderr */ + MD_DIRECTOR, /* send message to the Director */ + MD_OPERATOR, /* email a single message to the operator */ + MD_CONSOLE, /* send msg to UserAgent or console */ + MD_MAIL_ON_ERROR, /* email messages if job errors */ + MD_MAIL_ON_SUCCESS, /* email messages if job succeeds */ + MD_CATALOG /* sent to catalog Log table */ +}; + +/* Queued message item */ +struct MQUEUE_ITEM { + dlink link; + int type; /* message M_ type */ + int repeat; /* count of identical messages */ + utime_t mtime; /* time stamp message queued */ + char msg[1]; /* message text */ +}; + +/* Debug options */ +#define DEBUG_CLEAR_FLAGS /* 0 clear debug_flags */ +#define DEBUG_NO_WIN32_WRITE_ERROR /* i continue even after win32 errors */ +#define DEBUG_WIN32DECOMP /* d */ +#define DEBUG_DBG_TIMESTAMP /* t turn on timestamp in trace file */ +#define DEBUG_DBG_NO_TIMESTAMP /* T turn off timestamp in trace file */ +#define DEBUG_TRUNCATE_TRACE /* c clear trace file if opened */ + +/* Bits (1, 2, 4, ...) for debug_flags used by set_debug_flags() */ +#define DEBUG_MUTEX_EVENT (1 << 0) /* l */ +#define DEBUG_PRINT_EVENT (1 << 1) /* p */ + +/* Tags that can be used with the setdebug command + * We can extend this list to use 64bit + * When adding new ones, keep existing one + * Corresponding strings are defined in messages.c (debugtags) + */ +#define DT_LOCK (1<<30) /* lock */ +#define DT_NETWORK (1<<29) /* network */ +#define DT_PLUGIN (1<<28) /* plugin */ +#define DT_VOLUME (1<<27) /* volume */ +#define DT_SQL (1<<26) /* sql */ +#define DT_BVFS (1<<25) /* bvfs */ +#define DT_MEMORY (1<<24) /* memory */ +#define DT_SCHEDULER (1<<23) /* scheduler */ +#define DT_PROTOCOL (1<<22) /* protocol */ +#define DT_xxxxx (1<<21) /* reserved BEE */ +#define DT_xxx (1<<20) /* reserved BEE */ +#define DT_SNAPSHOT (1<<19) /* Snapshot */ +#define DT_RECORD (1<<18) /* Record/block */ +#define DT_ASX (1<<16) /* used by Alain for personal debugging */ +#define DT_ALL (0x7FFF0000) /* all (up to debug_level 65635, 15 flags available) */ + +const char *debug_get_tag(uint32_t pos, const char **desc); +bool debug_find_tag(const char *tagname, bool add, int64_t *current_level); +bool debug_parse_tags(const char *options, int64_t *current_level); + + +void d_msg(const char *file, int line, int64_t level, const char *fmt,...) CHECK_FORMAT(printf, 4, 5); +void e_msg(const char *file, int line, int type, int level, const char *fmt,...) CHECK_FORMAT(printf, 5, 6);; +void Jmsg(JCR *jcr, int type, utime_t mtime, const char *fmt,...) CHECK_FORMAT(printf, 4, 5); +void Qmsg(JCR *jcr, int type, utime_t mtime, const char *fmt,...) CHECK_FORMAT(printf, 4, 5); +bool get_trace(void); +void set_debug_flags(char *options); +const char *get_basename(const char *pathname); +bool is_message_type_set(JCR *jcr, int type); +void set_trace_for_tools(FILE *new_trace_fd); // called by Bacula's tools only + +class BDB; /* define forward reference */ +typedef bool (*sql_query_call)(JCR *jcr, const char *cmd); +typedef bool (*sql_escape_call)(JCR *jcr, BDB *db, char *snew, char *sold, int len); + +extern DLL_IMP_EXP sql_query_call p_sql_query; +extern DLL_IMP_EXP sql_escape_call p_sql_escape; + +extern DLL_IMP_EXP int64_t debug_level; +extern DLL_IMP_EXP int64_t debug_level_tags; +extern DLL_IMP_EXP int32_t debug_flags; +extern DLL_IMP_EXP bool dbg_timestamp; /* print timestamp in debug output */ +extern DLL_IMP_EXP bool prt_kaboom; /* Print kaboom output */ +extern DLL_IMP_EXP int verbose; +extern DLL_IMP_EXP char my_name[]; +extern DLL_IMP_EXP const char * working_directory; +extern DLL_IMP_EXP utime_t daemon_start_time; + +extern DLL_IMP_EXP bool console_msg_pending; +extern DLL_IMP_EXP FILE * con_fd; /* Console file descriptor */ +extern DLL_IMP_EXP brwlock_t con_lock; /* Console lock structure */ diff --git a/src/lib/mutex_list.h b/src/lib/mutex_list.h new file mode 100644 index 00000000..759c9606 --- /dev/null +++ b/src/lib/mutex_list.h @@ -0,0 +1,36 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef MUTEX_LIST_H +#define MUTEX_LIST_H 1 + +/* + * Use this list to manage lock order and protect the Bacula from + * race conditions and dead locks + */ + +#define PRIO_SD_DEV_ACQUIRE 4 /* dev.acquire_mutex */ +#define PRIO_SD_DEV_ACCESS 5 /* dev.m_mutex */ +#define PRIO_SD_VOL_LIST 0 /* vol_list_lock */ +#define PRIO_SD_VOL_INFO 12 /* vol_info_mutex */ +#define PRIO_SD_READ_VOL_LIST 13 /* read_vol_list */ +#define PRIO_SD_DEV_SPOOL 14 /* dev.spool_mutex */ +#define PRIO_SD_ACH_ACCESS 16 /* autochanger lock mutex */ + +#endif diff --git a/src/lib/openssl-compat.h b/src/lib/openssl-compat.h new file mode 100644 index 00000000..e811a4b9 --- /dev/null +++ b/src/lib/openssl-compat.h @@ -0,0 +1,43 @@ +#ifndef __OPENSSL_COPMAT__H__ +#define __OPENSSL_COPMAT__H__ + +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) +static inline int EVP_PKEY_up_ref(EVP_PKEY *pkey) +{ + CRYPTO_add(&pkey->references, 1, CRYPTO_LOCK_EVP_PKEY); + return 1; +} + +static inline void EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx) +{ + EVP_CIPHER_CTX_init(ctx); +} + +static inline void EVP_MD_CTX_reset(EVP_MD_CTX *ctx) +{ + EVP_MD_CTX_init(ctx); +} + +static inline EVP_MD_CTX *EVP_MD_CTX_new(void) +{ + EVP_MD_CTX *ctx; + + ctx = (EVP_MD_CTX *)OPENSSL_malloc(sizeof(EVP_MD_CTX)); + if (ctx) + memset(ctx, 0, sizeof(EVP_MD_CTX)); + return ctx; +} + +static inline void EVP_MD_CTX_free(EVP_MD_CTX *ctx) +{ + EVP_MD_CTX_reset(ctx); + OPENSSL_free(ctx); +} + +static inline const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *asn1) +{ + return asn1->data; +} +#endif + +#endif diff --git a/src/lib/openssl.c b/src/lib/openssl.c new file mode 100644 index 00000000..7499a301 --- /dev/null +++ b/src/lib/openssl.c @@ -0,0 +1,341 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * openssl.c OpenSSL support functions + * + * Author: Landon Fuller + * + * This file was contributed to the Bacula project by Landon Fuller. + * + * Landon Fuller has been granted a perpetual, worldwide, non-exclusive, + * no-charge, royalty-free, irrevocable copyright license to reproduce, + * prepare derivative works of, publicly display, publicly perform, + * sublicense, and distribute the original work contributed by Landon Fuller + * to the Bacula project in source or object form. + * + * If you wish to license these contributions under an alternate open source + * license please contact Landon Fuller . + */ + + +#include "bacula.h" +#include + +#ifdef HAVE_OPENSSL + +/* Are we initialized? */ +static int crypto_initialized = false; +/* + * ***FIXME*** this is a sort of dummy to avoid having to + * change all the existing code to pass either a jcr or + * a NULL. Passing a NULL causes the messages to be + * printed by the daemon -- not very good :-( + */ +void openssl_post_errors(int code, const char *errstring) +{ + openssl_post_errors(NULL, code, errstring); +} + +/* + * Post all per-thread openssl errors + */ +void openssl_post_errors(JCR *jcr, int code, const char *errstring) +{ + char buf[512]; + unsigned long sslerr; + + /* Pop errors off of the per-thread queue */ + while((sslerr = ERR_get_error()) != 0) { + /* Acquire the human readable string */ + ERR_error_string_n(sslerr, buf, sizeof(buf)); + Dmsg3(50, "jcr=%p %s: ERR=%s\n", jcr, errstring, buf); + Qmsg2(jcr, M_ERROR, 0, "%s: ERR=%s\n", errstring, buf); + } +} + +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || defined(LIBRESSL_VERSION_NUMBER) +/* Array of mutexes for use with OpenSSL static locking */ +static pthread_mutex_t *mutexes; + +/* OpenSSL dynamic locking structure */ +struct CRYPTO_dynlock_value { + pthread_mutex_t mutex; +}; + +/* + * Return an OpenSSL thread ID + * Returns: thread ID + * + */ +static unsigned long get_openssl_thread_id(void) +{ +#ifdef HAVE_WIN32 + return (unsigned long)GetCurrentThreadId(); +#else + /* + * Comparison without use of pthread_equal() is mandated by the OpenSSL API + * + * Note: this creates problems with the new Win32 pthreads + * emulation code, which defines pthread_t as a structure. + */ + return ((unsigned long)pthread_self()); +#endif +} + +/* + * Allocate a dynamic OpenSSL mutex + */ +static struct CRYPTO_dynlock_value *openssl_create_dynamic_mutex (const char *file, int line) +{ + struct CRYPTO_dynlock_value *dynlock; + int stat; + + dynlock = (struct CRYPTO_dynlock_value *)malloc(sizeof(struct CRYPTO_dynlock_value)); + + if ((stat = pthread_mutex_init(&dynlock->mutex, NULL)) != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("Unable to init mutex: ERR=%s\n"), be.bstrerror(stat)); + } + + return dynlock; +} + +static void openssl_update_dynamic_mutex(int mode, struct CRYPTO_dynlock_value *dynlock, const char *file, int line) +{ + if (mode & CRYPTO_LOCK) { + P(dynlock->mutex); + } else { + V(dynlock->mutex); + } +} + +static void openssl_destroy_dynamic_mutex(struct CRYPTO_dynlock_value *dynlock, const char *file, int line) +{ + int stat; + + if ((stat = pthread_mutex_destroy(&dynlock->mutex)) != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("Unable to destroy mutex: ERR=%s\n"), be.bstrerror(stat)); + } + + free(dynlock); +} + +/* + * (Un)Lock a static OpenSSL mutex + */ +static void openssl_update_static_mutex (int mode, int i, const char *file, int line) +{ + if (mode & CRYPTO_LOCK) { + P(mutexes[i]); + } else { + V(mutexes[i]); + } +} + +/* + * Initialize OpenSSL thread support + * Returns: 0 on success + * errno on failure + */ +static int openssl_init_threads (void) +{ + int i, numlocks; + int stat; + + /* Set thread ID callback */ + CRYPTO_set_id_callback(get_openssl_thread_id); + + /* Initialize static locking */ + numlocks = CRYPTO_num_locks(); + mutexes = (pthread_mutex_t *) malloc(numlocks * sizeof(pthread_mutex_t)); + for (i = 0; i < numlocks; i++) { + if ((stat = pthread_mutex_init(&mutexes[i], NULL)) != 0) { + berrno be; + Jmsg1(NULL, M_FATAL, 0, _("Unable to init mutex: ERR=%s\n"), be.bstrerror(stat)); + return stat; + } + } + + /* Set static locking callback */ + CRYPTO_set_locking_callback(openssl_update_static_mutex); + + /* Initialize dyanmic locking */ + CRYPTO_set_dynlock_create_callback(openssl_create_dynamic_mutex); + CRYPTO_set_dynlock_lock_callback(openssl_update_dynamic_mutex); + CRYPTO_set_dynlock_destroy_callback(openssl_destroy_dynamic_mutex); + + return 0; +} + +/* + * Clean up OpenSSL threading support + */ +static void openssl_cleanup_threads(void) +{ + int i, numlocks; + int stat; + + /* Unset thread ID callback */ + CRYPTO_set_id_callback(NULL); + + /* Deallocate static lock mutexes */ + numlocks = CRYPTO_num_locks(); + for (i = 0; i < numlocks; i++) { + if ((stat = pthread_mutex_destroy(&mutexes[i])) != 0) { + berrno be; + /* We don't halt execution, reporting the error should be sufficient */ + Jmsg1(NULL, M_ERROR, 0, _("Unable to destroy mutex: ERR=%s\n"), + be.bstrerror(stat)); + } + } + + /* Unset static locking callback */ + CRYPTO_set_locking_callback(NULL); + + /* Free static lock array */ + free(mutexes); + + /* Unset dynamic locking callbacks */ + CRYPTO_set_dynlock_create_callback(NULL); + CRYPTO_set_dynlock_lock_callback(NULL); + CRYPTO_set_dynlock_destroy_callback(NULL); +} + +#endif + +/* + * Seed OpenSSL PRNG + * Returns: 1 on success + * 0 on failure + */ +static int openssl_seed_prng (void) +{ + const char *names[] = { "/dev/urandom", "/dev/random", NULL }; + int i; + + // ***FIXME*** + // Win32 Support + // Read saved entropy? + + for (i = 0; names[i]; i++) { + if (RAND_load_file(names[i], 1024) != -1) { + /* Success */ + return 1; + } + } + + /* Fail */ + return 0; +} + +/* + * Save OpenSSL Entropy + * Returns: 1 on success + * 0 on failure + */ +static int openssl_save_prng (void) +{ + // ***FIXME*** + // Implement PRNG state save + return 1; +} + +/* + * Perform global initialization of OpenSSL + * This function is not thread safe. + * Returns: 0 on success + * errno on failure + */ +int init_crypto (void) +{ + int stat = 0; + +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || defined(LIBRESSL_VERSION_NUMBER) + if ((stat = openssl_init_threads()) != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, + _("Unable to init OpenSSL threading: ERR=%s\n"), be.bstrerror(stat)); + } + + /* Load libssl and libcrypto human-readable error strings */ + SSL_load_error_strings(); + + /* Initialize OpenSSL SSL library */ + SSL_library_init(); + + /* Register OpenSSL ciphers and digests */ + OpenSSL_add_all_algorithms(); +#endif + + if (!openssl_seed_prng()) { + Jmsg0(NULL, M_ERROR_TERM, 0, _("Failed to seed OpenSSL PRNG\n")); + } + + crypto_initialized = true; + + return stat; +} + +/* + * Perform global cleanup of OpenSSL + * All cryptographic operations must be completed before calling this function. + * This function is not thread safe. + * Returns: 0 on success + * errno on failure + */ +int cleanup_crypto (void) +{ + /* + * Ensure that we've actually been initialized; Doing this here decreases the + * complexity of client's termination/cleanup code. + */ + if (!crypto_initialized) { + return 0; + } + + if (!openssl_save_prng()) { + Jmsg0(NULL, M_ERROR, 0, _("Failed to save OpenSSL PRNG\n")); + } + +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || defined(LIBRESSL_VERSION_NUMBER) + openssl_cleanup_threads(); + + /* Free libssl and libcrypto error strings */ + ERR_free_strings(); + + /* Free all ciphers and digests */ + EVP_cleanup(); + + /* Free memory used by PRNG */ + RAND_cleanup(); +#endif + + crypto_initialized = false; + + return 0; +} + +#else + +/* Dummy routines */ +int init_crypto (void) { return 0; } +int cleanup_crypto (void) { return 0; } + +#endif /* HAVE_OPENSSL */ diff --git a/src/lib/openssl.h b/src/lib/openssl.h new file mode 100644 index 00000000..9374a581 --- /dev/null +++ b/src/lib/openssl.h @@ -0,0 +1,44 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * openssl.h OpenSSL support functions + * + * Author: Landon Fuller + * + * This file was contributed to the Bacula project by Landon Fuller. + * + * Landon Fuller has been granted a perpetual, worldwide, non-exclusive, + * no-charge, royalty-free, irrevocable copyright * license to reproduce, + * prepare derivative works of, publicly display, publicly perform, + * sublicense, and distribute the original work contributed by Landon Fuller + * to the Bacula project in source or object form. + * + * If you wish to license these contributions under an alternate open source + * license please contact Landon Fuller . + */ + +#ifndef __OPENSSL_H_ +#define __OPENSSL_H_ + +#ifdef HAVE_OPENSSL +void openssl_post_errors (int code, const char *errstring); +void openssl_post_errors (JCR *jcr, int code, const char *errstring); +#endif /* HAVE_OPENSSL */ + +#endif /* __OPENSSL_H_ */ diff --git a/src/lib/output.c b/src/lib/output.c new file mode 100644 index 00000000..ae12c162 --- /dev/null +++ b/src/lib/output.c @@ -0,0 +1,480 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by: Eric Bollengier, December MMXIII + */ + +#define OUTPUT_C /* control dll export in output.h */ +#include "output.h" +#include "plugins.h" + +/* use new output (lowercase, no special char) */ +#define OF_USE_NEW_OUTPUT 1 + +void OutputWriter::parse_options(const char *options) +{ + int nb=0; + const char *p = options; + while (*p) { + nb=0; + + switch(*p) { + case 'C': + flags = 0; + set_time_format(OW_DEFAULT_TIMEFORMAT); + set_separator(OW_DEFAULT_SEPARATOR); + break; + + case 'S': /* object separator */ + while(isdigit(*(p+1))) { + nb = nb*10 + (*(++p) - '0'); + } + if (isascii(nb)) { + set_object_separator((char) nb); + } + break; + + case 'o': + flags |= OF_USE_NEW_OUTPUT; /* lowercase and only isalpha */ + break; + + case 't': /* Time format */ + if (isdigit(*(p+1))) { + nb = (*(++p) - '0'); + set_time_format((OutputTimeType) nb); + } + break; + + case 's': /* Separator */ + while(isdigit(*(p+1))) { + nb = nb*10 + (*(++p) - '0'); + } + if (isascii(nb)) { + set_separator((char) nb); + } + break; + default: + break; + } + p++; + } +} + +char *OutputWriter::get_options(char *dest) +{ + char ed1[50]; + *dest = *ed1 = 0; + if (separator != OW_DEFAULT_SEPARATOR) { + snprintf(dest, 50, "s%d", (int)separator); + } + if (object_separator) { + snprintf(ed1, sizeof(ed1), "S%d", (int) object_separator); + bstrncat(dest, ed1, sizeof(ed1)); + } + if (timeformat != OW_DEFAULT_TIMEFORMAT) { + snprintf(ed1, sizeof(ed1), "t%d", (int) timeformat); + bstrncat(dest, ed1, sizeof(ed1)); + } + if (flags & OF_USE_NEW_OUTPUT) { + bstrncat(dest, "o", 1); + } + return dest; +} + +void OutputWriter::get_buf(bool append) +{ + if (!buf) { + buf = get_pool_memory(PM_MESSAGE); + *buf = 0; + + } else if (!append) { + *buf = 0; + } +} + +char *OutputWriter::start_group(const char *name, bool append) +{ + get_buf(append); + pm_strcat(buf, name); + pm_strcat(buf, ":\n"); + return buf; +} + +char *OutputWriter::end_group(bool append) +{ + get_buf(append); + pm_strcat(buf, "\n"); + + return buf; +} + +char *OutputWriter::start_list(const char *name, bool append) +{ + get_buf(append); + pm_strcat(buf, name); + pm_strcat(buf, ": [\n"); + return buf; +} + +char *OutputWriter::end_list(bool append) +{ + get_buf(append); + pm_strcat(buf, "]\n"); + + return buf; +} + +/* Usage: + * get_output( + * OT_STRING, "name", "value", + * OT_PINT32, "age", 10, + * OT_TIME, "birth-date", 1120202002, + * OT_PINT64, "weight", 100, + * OT_END); + * + * + * "name=value\nage=10\nbirt-date=2012-01-12 10:20:00\nweight=100\n" + * + */ +char *OutputWriter::get_output(OutputType first, ...) +{ + char *ret; + va_list arg_ptr; + + get_buf(true); /* Append to the current string */ + + va_start(arg_ptr, first); + ret = get_output(arg_ptr, &buf, first); + va_end(arg_ptr); + + return ret; +} + +/* Usage: + * get_output(&out, + * OT_STRING, "name", "value", + * OT_PINT32, "age", 10, + * OT_TIME, "birth-date", 1120202002, + * OT_PINT64, "weight", 100, + * OT_END); + * + * + * "name=value\nage=10\nbirt-date=2012-01-12 10:20:00\nweight=100\n" + * + */ +char *OutputWriter::get_output(POOLMEM **out, OutputType first, ...) +{ + va_list arg_ptr; + char *ret; + + va_start(arg_ptr, first); + ret = get_output(arg_ptr, out, first); + va_end(arg_ptr); + + return ret; +} + +char *OutputWriter::get_output(va_list ap, POOLMEM **out, OutputType first) +{ + char ed1[MAX_TIME_LENGTH]; + int i; + int64_t i64; + uint64_t u64; + int32_t i32; + double d; + btime_t bt; + char *s = NULL, *k = NULL; + alist *lst; + Plugin *plug; + POOLMEM *tmp2 = get_pool_memory(PM_FNAME); + POOLMEM *tmp = get_pool_memory(PM_FNAME); + OutputType val = first; + + while (val != OT_END) { + + *tmp = 0; + + /* Some arguments are not using a keyword */ + switch (val) { + case OT_END: + case OT_START_OBJ: + case OT_END_OBJ: + case OT_CLEAR: + break; + + default: + k = va_arg(ap, char *); /* Get the variable name */ + + /* If requested, we can put the keyword in lowercase */ + if (flags & OF_USE_NEW_OUTPUT) { + tmp2 = check_pool_memory_size(tmp2, strlen(k)+1); + for (i = 0; k[i] ; i++) { + if (isalnum(k[i])) { + tmp2[i] = tolower(k[i]); + } else { + tmp2[i] = '_'; + } + } + tmp2[i] = 0; + k = tmp2; + } + } + + //Dmsg2(000, "%d - %s\n", val, k); + + switch (val) { + case OT_ALIST_STR: + lst = va_arg(ap, alist *); + i = 0; + Mmsg(tmp, "%s=", k); + if (lst) { + foreach_alist(s, lst) { + if (i++ > 0) { + pm_strcat(tmp, ","); + } + pm_strcat(tmp, s); + } + } + pm_strcat(tmp, separator_str); + break; + case OT_PLUGINS: + lst = va_arg(ap, alist *); + i = 0; + pm_strcpy(tmp, "plugins="); + if (lst) { + foreach_alist(plug, lst) { + if (i++ > 0) { + pm_strcat(tmp, ","); + } + pm_strcat(tmp, plug->file); + } + } + pm_strcat(tmp, separator_str); + break; + case OT_RATIO: + d = va_arg(ap, double); + Mmsg(tmp, "%s=%.2f%c", k, d, separator); + break; + + case OT_STRING: + s = va_arg(ap, char *); + Mmsg(tmp, "%s=%s%c", k, NPRTB(s), separator) ; + break; + + case OT_INT32: + i32 = va_arg(ap, int32_t); + Mmsg(tmp, "%s=%d%c", k, i32, separator); + break; + + case OT_UTIME: + case OT_BTIME: + if (val == OT_UTIME) { + bt = va_arg(ap, utime_t); + } else { + bt = va_arg(ap, btime_t); + } + switch (timeformat) { + case OTT_TIME_NC: /* Formatted time for user display: dd-Mon hh:mm */ + bstrftime_ny(ed1, sizeof(ed1), bt); + break; + + case OTT_TIME_UNIX: /* unix timestamp */ + bsnprintf(ed1, sizeof(ed1), "%lld", bt); + break; + + case OTT_TIME_ISO: + /* wanted fallback */ + default: + bstrutime(ed1, sizeof(ed1), bt); + } + Mmsg(tmp, "%s_epoch=%lld%c%s=%s%c", k, bt, separator, k, ed1, separator); + break; + + case OT_DURATION: + bt = va_arg(ap, utime_t); + bstrutime(ed1, sizeof(ed1), bt); + Mmsg(tmp, "%s=%lld%c%s_str=%s%c", k, bt, separator, k, edit_utime(bt, ed1, sizeof(ed1)), separator); + break; + + case OT_SIZE: + case OT_INT64: + i64 = va_arg(ap, int64_t); + Mmsg(tmp, "%s=%lld%c", k, i64, separator); + break; + + case OT_PINT64: + u64 = va_arg(ap, uint64_t); + Mmsg(tmp, "%s=%llu%c", k, u64, separator); + break; + + case OT_INT: + i64 = va_arg(ap, int); + Mmsg(tmp, "%s=%lld%c", k, i64, separator); + break; + + case OT_JOBLEVEL: + case OT_JOBTYPE: + case OT_JOBSTATUS: + i32 = va_arg(ap, int32_t); + if (i32 == 0) { + Mmsg(tmp, "%s=%c", k, separator); + } else { + Mmsg(tmp, "%s=%c%c", k, (char)i32, separator); + } + break; + + case OT_CLEAR: + **out = 0; + break; + + case OT_END_OBJ: + pm_strcpy(tmp, "\n"); + break; + + case OT_START_OBJ: + i=0; + if (object_separator) { + for(; i < 32 ; i++) { + tmp[i] = object_separator; + } + } + tmp[i++] = '\n'; + tmp[i] = 0; + break; + + case OT_END: + /* wanted fallback */ + default: + val = OT_END; + } + + if (val != OT_END) { + pm_strcat(out, tmp); + val = (OutputType) va_arg(ap, int); /* OutputType is promoted to int when using ... */ + } + } + + free_pool_memory(tmp); + free_pool_memory(tmp2); + //Dmsg1(000, "%s", *out); + return *out; +} + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +#ifdef TEST_PROGRAM +#include "unittests.h" + +int main(int argc, char **argv) +{ + Unittests output_test("output_test"); + char ed1[50]; + OutputWriter wt; + POOLMEM *tmp = get_pool_memory(PM_FNAME); + *tmp = 0; + + int nb = 10000; + const char *ptr = "my value"; + char *str = bstrdup("ptr"); + int32_t nb32 = -1; + int64_t nb64 = -1; + btime_t t = time(NULL); + + ok(strcmp(wt.get_options(ed1), "") == 0, "Default options"); + + Pmsg1(000, "%s", wt.start_group("test")); + + wt.get_output(&tmp, OT_CLEAR, + OT_STRING, "test", "my value", + OT_STRING, "test2", ptr, + OT_STRING, "test3", str, + OT_INT, "nb", nb, + OT_INT32, "nb32", nb32, + OT_INT64, "nb64", nb64, + OT_BTIME, "now", t, + OT_END); + + Pmsg1(000, "%s", tmp); + + free_pool_memory(tmp); + + Pmsg1(000, "%s", + wt.get_output(OT_CLEAR, + OT_START_OBJ, + OT_STRING, "test", "my value", + OT_STRING, "test2", ptr, + OT_STRING, "test3", str, + OT_INT, "nb", nb, + OT_INT32, "nb32", nb32, + OT_INT64, "nb64", nb64, + OT_BTIME, "now", t, + OT_END_OBJ, + OT_END)); + + wt.set_time_format(OTT_TIME_UNIX); + ok(strcmp("t1", wt.get_options(ed1)) == 0, "Check unix time format"); + + Pmsg1(000, "%s", + wt.get_output(OT_CLEAR, + OT_BTIME, "now", t, + OT_END)); + + wt.set_time_format(OTT_TIME_NC); + ok(strcmp("t2", wt.get_options(ed1)) == 0, "Check NC time format"); + + Pmsg1(000, "%s", + wt.get_output(OT_CLEAR, + OT_BTIME, "now", t, + OT_END)); + + Pmsg1(000, "%s", wt.end_group(false)); + + wt.parse_options("s43t1O"); + ok(strcmp(wt.get_options(ed1), "s43t1") == 0, "Check options after parsing"); + + ok(strstr( + wt.get_output(OT_CLEAR, + OT_BTIME, "now", t, + OT_STRING, "brazil", "test", + OT_END), + "+brazil=test+") != NULL, + "Check separator"); + + wt.parse_options("CS35"); + ok(strcmp(wt.get_options(ed1), "S35") == 0, "Check options after parsing"); + + Pmsg1(000, "%s", + wt.get_output(OT_CLEAR, + OT_START_OBJ, + OT_STRING, "test", "my value", + OT_STRING, "test2", ptr, + OT_END_OBJ, + OT_START_OBJ, + OT_STRING, "test", "my value", + OT_STRING, "test2", ptr, + OT_END_OBJ, + OT_END)); + + free(str); + + return report(); +} +#endif /* TEST_PROGRAM */ diff --git a/src/lib/output.h b/src/lib/output.h new file mode 100644 index 00000000..97e4e3e1 --- /dev/null +++ b/src/lib/output.h @@ -0,0 +1,162 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by: Eric Bollengier, December MMXIII + */ + +#ifndef OUTPUT_H +#define OUTPUT_H + +#include "bacula.h" + +enum _OutputType { + OT_INT, /* Integer */ + OT_SIZE, /* int64 size */ + OT_PINT32, /* Uint32 */ + OT_INT32, + OT_PINT64, /* Uint64 */ + OT_INT64, + OT_STRING, + OT_BTIME, /* btime_t */ + OT_UTIME, /* utime_t */ + OT_JOBTYPE, + OT_JOBLEVEL, + OT_JOBSTATUS, + OT_PLUGINS, /* Plugin alist */ + OT_RATIO, /* Double %.2f format */ + OT_ALIST_STR, + + OT_END, /* Last operator (no extra arg) */ + OT_START_OBJ, /* Skip a line to start a new object (no extra arg) */ + OT_END_OBJ, /* Skip a line to end current object (no extra arg) */ + OT_CLEAR, /* truncate current buffer (no extra arg) */ + OT_DURATION /* time duration in second */ +}; +/* Force OutputType to int to avoid compiler default conversion warnings */ +typedef int OutputType; + +/* Keep the same order for get_options/parse_options */ +typedef enum { + OTT_TIME_ISO = 0, + OTT_TIME_UNIX = 1, /* unix time stamp */ + OTT_TIME_NC = 2 /* Formatted time for user display: dd-Mon hh:mm */ +} OutputTimeType; + +#define OW_DEFAULT_SEPARATOR '\n' +#define OW_DEFAULT_TIMEFORMAT OTT_TIME_ISO + +/* If included from output.c, mark the class as export (else, symboles are + * exported from all files... + */ +#ifdef OUTPUT_C +# define OUTPUT_EXPORT DLL_IMP_EXP +#else +# define OUTPUT_EXPORT +#endif + +class OUTPUT_EXPORT OutputWriter: public SMARTALLOC +{ +private: + void init() { + buf = NULL; + separator = OW_DEFAULT_SEPARATOR; + separator_str[0] = OW_DEFAULT_SEPARATOR; + separator_str[1] = 0; + timeformat = OW_DEFAULT_TIMEFORMAT; + object_separator = 0; + flags = 0; + }; + +protected: + virtual char *get_output(va_list ap, POOLMEM **out, OutputType first); + void get_buf(bool append); /* Allocate buf if needed */ + + int flags; + char separator; + char separator_str[2]; + char object_separator; + OutputTimeType timeformat; + POOLMEM *buf; + +public: + OutputWriter(const char *opts) { + init(); + parse_options(opts); + }; + + OutputWriter() { + init(); + }; + + virtual ~OutputWriter() { + free_and_null_pool_memory(buf); + }; + + /* s[ascii code]t[0-3] + * ^^^ ^^ + * separator time format + * "s43" => + will be used as separator + * "s43t1" => + as separator and time as unix timestamp + */ + virtual void parse_options(const char *opts); + virtual char *get_options(char *dest_l128); /* MAX_NAME_LENGTH mini */ + + /* Make a clear separation in the output*/ + virtual char *start_group(const char *name, bool append=true); + virtual char *end_group(bool append=true); + + /* Make a clear separation in the output for list*/ + virtual char *start_list(const char *name, bool append=true); + virtual char *end_list(bool append=true); + + /* \n by default, can be \t for example */ + void set_separator(char sep) { + separator = sep; + separator_str[0] = sep; + }; + + void set_object_separator(char sep) { + object_separator = sep; + }; + + void set_time_format(OutputTimeType fmt) { + timeformat = fmt; + }; + +/* Usage: + * get_output(&out, + * OT_STRING, "name", "value", + * OT_PINT32, "age", 10, + * OT_TIME, "birth-date", 1120202002, + * OT_PINT64, "weight", 100, + * OT_END); + * + * + * "name=value\nage=10\nbirt-date=2012-01-12 10:20:00\nweight=100\n" + * + */ + + /* Use a user supplied buffer */ + char *get_output(POOLMEM **out, OutputType first, ...); + + /* Use the internal buffer */ + char *get_output(OutputType first, ...); +}; + +#endif diff --git a/src/lib/parse_conf.c b/src/lib/parse_conf.c new file mode 100644 index 00000000..d907a7cd --- /dev/null +++ b/src/lib/parse_conf.c @@ -0,0 +1,1272 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Master Configuration routines. + * + * This file contains the common parts of the Bacula + * configuration routines. + * + * Note, the configuration file parser consists of three parts + * + * 1. The generic lexical scanner in lib/lex.c and lib/lex.h + * + * 2. The generic config scanner in lib/parse_conf.c and + * lib/parse_conf.h. + * These files contain the parser code, some utility + * routines, and the common store routines (name, int, + * string, time, int64, size, ...). + * + * 3. The daemon specific file, which contains the Resource + * definitions as well as any specific store routines + * for the resource records. + * + * N.B. This is a two pass parser, so if you malloc() a string + * in a "store" routine, you must ensure to do it during + * only one of the two passes, or to free it between. + * Also, note that the resource record is malloced and + * saved in save_resource() during pass 1. Anything that + * you want saved after pass two (e.g. resource pointers) + * must explicitly be done in save_resource. Take a look + * at the Job resource in src/dird/dird_conf.c to see how + * it is done. + * + * Kern Sibbald, January MM + * + */ + + +#include "bacula.h" + +#if defined(HAVE_WIN32) +#include "shlobj.h" +#else +#define MAX_PATH 1024 +#endif + +/* + * Define the Union of all the common resource structure definitions. + */ +union URES { + MSGS res_msgs; + RES hdr; +}; + +#if defined(_MSC_VER) +// work around visual studio name mangling preventing external linkage since res_all +// is declared as a different type when instantiated. +extern "C" URES res_all; +#else +extern URES res_all; +#endif + +extern brwlock_t res_lock; /* resource lock */ + + +/* Forward referenced subroutines */ +static void scan_types(LEX *lc, MSGS *msg, int dest, char *where, char *cmd); +static const char *get_default_configdir(); + +/* Common Resource definitions */ + +/* + * Message resource directives + * Note: keep all store_mesgs last in the list as they are all + * output in json as a list. + * Also, the list store_msgs item must have flags set to ITEM_LAST + * so that the list editor (bjson.c) knows when to stop. + * + * name handler value code flags default_value + */ +RES_ITEM msgs_items[] = { + {"Name", store_name, ITEM(res_msgs.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_msgs.hdr.desc), 0, 0, 0}, + {"MailCommand", store_str, ITEM(res_msgs.mail_cmd), 0, ITEM_ALLOW_DUPS, 0}, + {"OperatorCommand", store_str, ITEM(res_msgs.operator_cmd), 0, ITEM_ALLOW_DUPS, 0}, + /* See comments above */ + {"Syslog", store_msgs, ITEM(res_msgs), MD_SYSLOG, 0, 0}, + {"Mail", store_msgs, ITEM(res_msgs), MD_MAIL, 0, 0}, + {"MailOnError", store_msgs, ITEM(res_msgs), MD_MAIL_ON_ERROR, 0, 0}, + {"MailOnSuccess", store_msgs, ITEM(res_msgs), MD_MAIL_ON_SUCCESS, 0, 0}, + {"File", store_msgs, ITEM(res_msgs), MD_FILE, 0, 0}, + {"Append", store_msgs, ITEM(res_msgs), MD_APPEND, 0, 0}, + {"Stdout", store_msgs, ITEM(res_msgs), MD_STDOUT, 0, 0}, + {"Stderr", store_msgs, ITEM(res_msgs), MD_STDERR, 0, 0}, + {"Director", store_msgs, ITEM(res_msgs), MD_DIRECTOR, 0, 0}, + {"Console", store_msgs, ITEM(res_msgs), MD_CONSOLE, 0, 0}, + {"Operator", store_msgs, ITEM(res_msgs), MD_OPERATOR, 0, 0}, + {"Catalog", store_msgs, ITEM(res_msgs), MD_CATALOG, ITEM_LAST, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Various message types */ +s_kw msg_types[] = { + {"Debug", M_DEBUG}, /* Keep 1st place */ + {"Saved", M_SAVED}, /* Keep 2nd place */ + {"Abort", M_ABORT}, + {"Fatal", M_FATAL}, + {"Error", M_ERROR}, + {"Warning", M_WARNING}, + {"Info", M_INFO}, + {"NotSaved", M_NOTSAVED}, + {"Skipped", M_SKIPPED}, + {"Mount", M_MOUNT}, + {"Terminate", M_TERM}, + {"Restored", M_RESTORED}, + {"Security", M_SECURITY}, + {"Alert", M_ALERT}, + {"VolMgmt", M_VOLMGMT}, + {"ErrorTerm", M_ERROR_TERM}, + {"All", M_MAX+1}, + {NULL, 0} +}; + + +/* + * Tape Label types permitted in Pool records + * + * tape label label code = token + */ +s_kw tapelabels[] = { + {"Bacula", B_BACULA_LABEL}, + {"ANSI", B_ANSI_LABEL}, + {"IBM", B_IBM_LABEL}, + {NULL, 0} +}; + + +/* Simply print a message */ +static void prtmsg(void *sock, const char *fmt, ...) +{ + va_list arg_ptr; + + va_start(arg_ptr, fmt); + vfprintf(stdout, fmt, arg_ptr); + va_end(arg_ptr); +} + +const char *res_to_str(int rcode) +{ + if (rcode < r_first || rcode > r_last) { + return _("***UNKNOWN***"); + } else { + return resources[rcode-r_first].name; + } +} + +/* + * Create a new res_head pointer to a list of res_heads + */ +void CONFIG::init_res_head(RES_HEAD ***rhead, int32_t rfirst, int32_t rlast) +{ + int num = rlast - rfirst + 1; + RES *res = NULL; + RES_HEAD **rh; + rh = *rhead = (RES_HEAD **)malloc(num * sizeof(RES_HEAD)); + for (int i=0; ires_list = New(rblist(res, &res->link)); + rh[i]->first = NULL; + rh[i]->last = NULL; + } +} + +/* + * Insert the resource in res_all into the + * resource list. + */ +bool CONFIG::insert_res(int rindex, int size) +{ + RES *res; + rblist *list = m_res_head[rindex]->res_list; + res = (RES *)malloc(size); + memcpy(res, m_res_all, size); + if (list->empty()) { + list->insert(res, res_compare); + m_res_head[rindex]->first = res; + m_res_head[rindex]->last = res; + } else { + RES *item, *prev; + prev = m_res_head[rindex]->last; + item = (RES *)list->insert(res, res_compare); + if (item != res) { + Mmsg(m_errmsg, _("Attempt to define second \"%s\" resource named \"%s\" is not permitted.\n"), + resources[rindex].name, ((URES *)res)->hdr.name); + return false; + } + prev->res_next = res; + m_res_head[rindex]->last = res; + } + Dmsg2(900, _("Inserted res: %s index=%d\n"), ((URES *)res)->hdr.name, rindex); + return true; +} + +/* + * Initialize the static structure to zeros, then + * apply all the default values. + */ +static void init_resource(CONFIG *config, int type, RES_ITEM *items, int pass) +{ + int i; + int rindex = type - r_first; + + memset(config->m_res_all, 0, config->m_res_all_size); + res_all.hdr.rcode = type; + res_all.hdr.refcnt = 1; + + /* Set defaults in each item */ + for (i=0; items[i].name; i++) { + Dmsg3(900, "Item=%s def=%s defval=%d\n", items[i].name, + (items[i].flags & ITEM_DEFAULT) ? "yes" : "no", + items[i].default_value); + if (items[i].flags & ITEM_DEFAULT && items[i].default_value != 0) { + if (items[i].handler == store_bit) { + *(uint32_t *)(items[i].value) |= items[i].code; + } else if (items[i].handler == store_bool) { + *(bool *)(items[i].value) = items[i].default_value != 0; + } else if (items[i].handler == store_pint32 || + items[i].handler == store_int32 || + items[i].handler == store_size32) { + *(uint32_t *)(items[i].value) = items[i].default_value; + } else if (items[i].handler == store_int64) { + *(int64_t *)(items[i].value) = items[i].default_value; + } else if (items[i].handler == store_size64) { + *(uint64_t *)(items[i].value) = (uint64_t)items[i].default_value; + } else if (items[i].handler == store_speed) { + *(uint64_t *)(items[i].value) = (uint64_t)items[i].default_value; + } else if (items[i].handler == store_time) { + *(utime_t *)(items[i].value) = (utime_t)items[i].default_value; + } else if (pass == 1 && items[i].handler == store_addresses) { + init_default_addresses((dlist**)items[i].value, items[i].default_value); + } + } + /* If this triggers, take a look at lib/parse_conf.h */ + if (i >= MAX_RES_ITEMS) { + Emsg1(M_ERROR_TERM, 0, _("Too many directives in \"%s\" resource\n"), resources[rindex].name); + } + } +} + +/* Initialize a resouce with default values */ +bool init_resource(CONFIG *config, uint32_t type, void *res) +{ + RES_ITEM *items; + for (int i=0; resources[i].name; i++) { + if (resources[i].rcode == type) { + items = resources[i].items; + if (!items) { + return false; + } + init_resource(config, type, items, 1); + memcpy(res, config->m_res_all, config->m_res_all_size); + return true; + } + } + return false; +} + +/* + * Dump each resource of type + */ +void dump_each_resource(int type, void sendit(void *sock, const char *fmt, ...), void *sock) +{ + RES *res = NULL; + + if (type < 0) { /* no recursion */ + type = -type; + } + foreach_res(res, type) { + dump_resource(-type, res, sendit, sock); + } +} + + +/* Store Messages Destination information */ +void store_msgs(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token; + char *cmd; + POOLMEM *dest; + int dest_len; + + Dmsg2(900, "store_msgs pass=%d code=%d\n", pass, item->code); + if (pass == 1) { + switch (item->code) { + case MD_STDOUT: + case MD_STDERR: + case MD_SYSLOG: /* syslog */ + case MD_CONSOLE: + case MD_CATALOG: + scan_types(lc, (MSGS *)(item->value), item->code, NULL, NULL); + break; + case MD_OPERATOR: /* send to operator */ + case MD_DIRECTOR: /* send to Director */ + case MD_MAIL: /* mail */ + case MD_MAIL_ON_ERROR: /* mail if Job errors */ + case MD_MAIL_ON_SUCCESS: /* mail if Job succeeds */ + if (item->code == MD_OPERATOR) { + cmd = res_all.res_msgs.operator_cmd; + } else { + cmd = res_all.res_msgs.mail_cmd; + } + dest = get_pool_memory(PM_MESSAGE); + dest[0] = 0; + dest_len = 0; + /* Pick up comma separated list of destinations */ + for ( ;; ) { + token = lex_get_token(lc, T_NAME); /* scan destination */ + dest = check_pool_memory_size(dest, dest_len + lc->str_len + 2); + if (dest[0] != 0) { + pm_strcat(dest, " "); /* separate multiple destinations with space */ + dest_len++; + } + pm_strcat(dest, lc->str); + dest_len += lc->str_len; + Dmsg2(900, "store_msgs newdest=%s: dest=%s:\n", lc->str, NPRT(dest)); + token = lex_get_token(lc, T_SKIP_EOL); + if (token == T_COMMA) { + continue; /* get another destination */ + } + if (token != T_EQUALS) { + scan_err1(lc, _("expected an =, got: %s"), lc->str); + return; + } + break; + } + Dmsg1(900, "mail_cmd=%s\n", NPRT(cmd)); + scan_types(lc, (MSGS *)(item->value), item->code, dest, cmd); + free_pool_memory(dest); + Dmsg0(900, "done with dest codes\n"); + break; + + case MD_FILE: /* file */ + case MD_APPEND: /* append */ + dest = get_pool_memory(PM_MESSAGE); + /* Pick up a single destination */ + token = lex_get_token(lc, T_NAME); /* scan destination */ + pm_strcpy(dest, lc->str); + dest_len = lc->str_len; + token = lex_get_token(lc, T_SKIP_EOL); + Dmsg1(900, "store_msgs dest=%s:\n", NPRT(dest)); + if (token != T_EQUALS) { + scan_err1(lc, _("expected an =, got: %s"), lc->str); + return; + } + scan_types(lc, (MSGS *)(item->value), item->code, dest, NULL); + free_pool_memory(dest); + Dmsg0(900, "done with dest codes\n"); + break; + + default: + scan_err1(lc, _("Unknown item code: %d\n"), item->code); + return; + } + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); + Dmsg0(900, "Done store_msgs\n"); +} + +/* + * Scan for message types and add them to the message + * destination. The basic job here is to connect message types + * (WARNING, ERROR, FATAL, INFO, ...) with an appropriate + * destination (MAIL, FILE, OPERATOR, ...) + */ +static void scan_types(LEX *lc, MSGS *msg, int dest_code, char *where, char *cmd) +{ + int i; + bool found, is_not; + int msg_type = 0; + char *str; + + for ( ;; ) { + lex_get_token(lc, T_NAME); /* expect at least one type */ + found = false; + if (lc->str[0] == '!') { + is_not = true; + str = &lc->str[1]; + } else { + is_not = false; + str = &lc->str[0]; + } + for (i=0; msg_types[i].name; i++) { + if (strcasecmp(str, msg_types[i].name) == 0) { + msg_type = msg_types[i].token; + found = true; + break; + } + } + if (!found) { + scan_err1(lc, _("message type: %s not found"), str); + return; + } + + if (msg_type == M_MAX+1) { /* all? */ + for (i=2; i<=M_MAX; i++) { /* yes set all types except Debug and Saved */ + add_msg_dest(msg, dest_code, msg_types[i].token, where, cmd); + } + } else if (is_not) { + rem_msg_dest(msg, dest_code, msg_type, where); + } else { + add_msg_dest(msg, dest_code, msg_type, where, cmd); + } + if (lc->ch != ',') { + break; + } + Dmsg0(900, "call lex_get_token() to eat comma\n"); + lex_get_token(lc, T_ALL); /* eat comma */ + } + Dmsg0(900, "Done scan_types()\n"); +} + + +/* + * This routine is ONLY for resource names + * Store a name at specified address. + */ +void store_name(LEX *lc, RES_ITEM *item, int index, int pass) +{ + POOLMEM *msg = get_pool_memory(PM_EMSG); + + lex_get_token(lc, T_NAME); + if (!is_name_valid(lc->str, &msg)) { + scan_err1(lc, "%s\n", msg); + return; + } + free_pool_memory(msg); + /* Store the name both pass 1 and pass 2 */ + if (*(item->value)) { + scan_err5(lc, _("Attempt to redefine \"%s\" from \"%s\" to \"%s\" referenced on line %d : %s\n"), + item->name, *(item->value), lc->str, lc->line_no, lc->line); + return; + } + *(item->value) = bstrdup(lc->str); + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +/* + * Store a name string at specified address + * A name string is limited to MAX_RES_NAME_LENGTH + */ +void store_strname(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_NAME); + /* Store the name */ + if (pass == 1) { + if (*(item->value)) { + scan_err5(lc, _("Attempt to redefine \"%s\" from \"%s\" to \"%s\" referenced on line %d : %s\n"), + item->name, *(item->value), lc->str, lc->line_no, lc->line); + return; + } + *(item->value) = bstrdup(lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* Store a string at specified address */ +void store_str(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_STRING); + if (pass == 1) { + if (*(item->value) && (item->flags & ITEM_ALLOW_DUPS)) { + free(*(item->value)); + *(item->value) = NULL; + } + if (*(item->value)) { + scan_err5(lc, _("Attempt to redefine \"%s\" from \"%s\" to \"%s\" referenced on line %d : %s\n"), + item->name, *(item->value), lc->str, lc->line_no, lc->line); + return; + } + *(item->value) = bstrdup(lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Store a directory name at specified address. Note, we do + * shell expansion except if the string begins with a vertical + * bar (i.e. it will likely be passed to the shell later). + */ +void store_dir(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_STRING); + if (pass == 1) { + if (lc->str[0] != '|') { + do_shell_expansion(lc->str, sizeof_pool_memory(lc->str)); + } +#ifdef STANDARDIZED_DIRECTORY_USAGE + // TODO ASX we should store all directory without the ending slash to + // avoid the need of testing its presence + int len=strlen(lc->str); + if (len>0 && IsPathSeparator(lc->str[len-1])) { + lc->str[len-1]='\0'; + } +#endif + if (*(item->value)) { + scan_err5(lc, _("Attempt to redefine \"%s\" from \"%s\" to \"%s\" referenced on line %d : %s\n"), + item->name, *(item->value), lc->str, lc->line_no, lc->line); + return; + } + *(item->value) = bstrdup(lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +/* Store a password specified address in MD5 coding */ +void store_password(LEX *lc, RES_ITEM *item, int index, int pass) +{ + unsigned int i, j; + struct MD5Context md5c; + unsigned char digest[CRYPTO_DIGEST_MD5_SIZE]; + char sig[100]; + + if (lc->options & LOPT_NO_MD5) { + store_str(lc, item, index, pass); + + } else { + lex_get_token(lc, T_STRING); + if (pass == 1) { + MD5Init(&md5c); + MD5Update(&md5c, (unsigned char *) (lc->str), lc->str_len); + MD5Final(digest, &md5c); + for (i = j = 0; i < sizeof(digest); i++) { + sprintf(&sig[j], "%02x", digest[i]); + j += 2; + } + if (*(item->value)) { + scan_err5(lc, _("Attempt to redefine \"%s\" from \"%s\" to \"%s\" referenced on line %d : %s\n"), + item->name, *(item->value), lc->str, lc->line_no, lc->line); + return; + } + *(item->value) = bstrdup(sig); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); + } +} + + +/* Store a resource at specified address. + * If we are in pass 2, do a lookup of the + * resource. + */ +void store_res(LEX *lc, RES_ITEM *item, int index, int pass) +{ + RES *res; + + lex_get_token(lc, T_NAME); + if (pass == 2) { + res = GetResWithName(item->code, lc->str); + if (res == NULL) { + scan_err3(lc, _("Could not find config Resource \"%s\" referenced on line %d : %s\n"), + lc->str, lc->line_no, lc->line); + return; + } + if (*(item->value)) { + scan_err3(lc, _("Attempt to redefine resource \"%s\" referenced on line %d : %s\n"), + item->name, lc->line_no, lc->line); + return; + } + *(item->value) = (char *)res; + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Store a resource pointer in an alist. default_value indicates how many + * times this routine can be called -- i.e. how many alists + * there are. + * If we are in pass 2, do a lookup of the + * resource. + */ +void store_alist_res(LEX *lc, RES_ITEM *item, int index, int pass) +{ + RES *res; + int count = item->default_value; + int i = 0; + alist *list; + + if (pass == 2) { + if (count == 0) { /* always store in item->value */ + i = 0; + if ((item->value)[i] == NULL) { + list = New(alist(10, not_owned_by_alist)); + } else { + list = (alist *)(item->value)[i]; + } + } else { + /* Find empty place to store this directive */ + while ((item->value)[i] != NULL && i++ < count) { } + if (i >= count) { + scan_err4(lc, _("Too many %s directives. Max. is %d. line %d: %s\n"), + lc->str, count, lc->line_no, lc->line); + return; + } + list = New(alist(10, not_owned_by_alist)); + } + + for (;;) { + lex_get_token(lc, T_NAME); /* scan next item */ + res = GetResWithName(item->code, lc->str); + if (res == NULL) { + scan_err3(lc, _("Could not find config Resource \"%s\" referenced on line %d : %s\n"), + lc->str, lc->line_no, lc->line); + return; + } + Dmsg5(900, "Append %p to alist %p size=%d i=%d %s\n", + res, list, list->size(), i, item->name); + list->append(res); + (item->value)[i] = (char *)list; + if (lc->ch != ',') { /* if no other item follows */ + if (!lex_check_eol(lc)) { + /* found garbage at the end of the line */ + scan_err3(lc, _("Found unexpected characters resource list in Directive \"%s\" at the end of line %d : %s\n"), + item->name, lc->line_no, lc->line); + } + break; /* get out */ + } + lex_get_token(lc, T_ALL); /* eat comma */ + } + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +/* + * Store a string in an alist. + */ +void store_alist_str(LEX *lc, RES_ITEM *item, int index, int pass) +{ + alist *list; + + if (pass == 2) { + if (*(item->value) == NULL) { + list = New(alist(10, owned_by_alist)); + *(item->value) = (char *)list; + } else { + list = (alist *)(*(item->value)); + } + for (;;) { + lex_get_token(lc, T_STRING); /* scan next item */ + Dmsg4(900, "Append %s to alist 0x%p size=%d %s\n", + lc->str, list, list->size(), item->name); + list->append(bstrdup(lc->str)); + if (lc->ch != ',') { /* if no other item follows */ + if (!lex_check_eol(lc)) { + /* found garbage at the end of the line */ + scan_err3(lc, _("Found unexpected characters in resource list in Directive \"%s\" at the end of line %d : %s\n"), + item->name, lc->line_no, lc->line); + } + break; /* get out */ + } + lex_get_token(lc, T_ALL); /* eat comma */ + } + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + + +/* + * Store default values for Resource from xxxDefs + * If we are in pass 2, do a lookup of the + * resource and store everything not explicitly set + * in main resource. + * + * Note, here item points to the main resource (e.g. Job, not + * the jobdefs, which we look up). + */ +void store_defs(LEX *lc, RES_ITEM *item, int index, int pass) +{ + RES *res; + + lex_get_token(lc, T_NAME); + if (pass == 2) { + Dmsg2(900, "Code=%d name=%s\n", item->code, lc->str); + res = GetResWithName(item->code, lc->str); + if (res == NULL) { + scan_err3(lc, _("Missing config Resource \"%s\" referenced on line %d : %s\n"), + lc->str, lc->line_no, lc->line); + return; + } + } + scan_to_eol(lc); +} + + + +/* Store an integer at specified address */ +void store_int32(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_INT32); + *(uint32_t *)(item->value) = lc->int32_val; + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* Store a positive integer at specified address */ +void store_pint32(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_PINT32); + *(uint32_t *)(item->value) = lc->pint32_val; + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +/* Store an 64 bit integer at specified address */ +void store_int64(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_INT64); + *(int64_t *)(item->value) = lc->int64_val; + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +enum store_unit_type { + STORE_SIZE, + STORE_SPEED +} ; + +/* + * This routine stores either a 32 or a 64 bit value (size32) + * and either a size (in bytes) or a speed (bytes per second). + */ +static void store_int_unit(LEX *lc, RES_ITEM *item, int index, int pass, + bool size32, enum store_unit_type type) +{ + int token; + uint64_t uvalue; + char bsize[500]; + + Dmsg0(900, "Enter store_unit\n"); + token = lex_get_token(lc, T_SKIP_EOL); + errno = 0; + switch (token) { + case T_NUMBER: + case T_IDENTIFIER: + case T_UNQUOTED_STRING: + bstrncpy(bsize, lc->str, sizeof(bsize)); /* save first part */ + /* if terminated by space, scan and get modifier */ + while (lc->ch == ' ') { + token = lex_get_token(lc, T_ALL); + switch (token) { + case T_NUMBER: + case T_IDENTIFIER: + case T_UNQUOTED_STRING: + bstrncat(bsize, lc->str, sizeof(bsize)); + break; + } + } + if (type == STORE_SIZE) { + if (!size_to_uint64(bsize, strlen(bsize), &uvalue)) { + scan_err1(lc, _("expected a size number, got: %s"), lc->str); + return; + } + } else { + if (!speed_to_uint64(bsize, strlen(bsize), &uvalue)) { + scan_err1(lc, _("expected a speed number, got: %s"), lc->str); + return; + } + } + if (size32) { + *(uint32_t *)(item->value) = (uint32_t)uvalue; + } else { + *(uint64_t *)(item->value) = uvalue; + } + break; + default: + scan_err2(lc, _("expected a %s, got: %s"), + (type == STORE_SIZE)?_("size"):_("speed"), lc->str); + return; + } + if (token != T_EOL) { + scan_to_eol(lc); + } + set_bit(index, res_all.hdr.item_present); + Dmsg0(900, "Leave store_unit\n"); +} + +/* Store a size in bytes */ +void store_size32(LEX *lc, RES_ITEM *item, int index, int pass) +{ + store_int_unit(lc, item, index, pass, true /* 32 bit */, STORE_SIZE); +} + +/* Store a size in bytes */ +void store_size64(LEX *lc, RES_ITEM *item, int index, int pass) +{ + store_int_unit(lc, item, index, pass, false /* not 32 bit */, STORE_SIZE); +} + +/* Store a speed in bytes/s */ +void store_speed(LEX *lc, RES_ITEM *item, int index, int pass) +{ + store_int_unit(lc, item, index, pass, false /* 64 bit */, STORE_SPEED); +} + +/* Store a time period in seconds */ +void store_time(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token; + utime_t utime; + char period[500]; + + token = lex_get_token(lc, T_SKIP_EOL); + errno = 0; + switch (token) { + case T_NUMBER: + case T_IDENTIFIER: + case T_UNQUOTED_STRING: + bstrncpy(period, lc->str, sizeof(period)); /* get first part */ + /* if terminated by space, scan and get modifier */ + while (lc->ch == ' ') { + token = lex_get_token(lc, T_ALL); + switch (token) { + case T_NUMBER: + case T_IDENTIFIER: + case T_UNQUOTED_STRING: + bstrncat(period, lc->str, sizeof(period)); + break; + } + } + if (!duration_to_utime(period, &utime)) { + scan_err1(lc, _("expected a time period, got: %s"), period); + return; + } + *(utime_t *)(item->value) = utime; + break; + default: + scan_err1(lc, _("expected a time period, got: %s"), lc->str); + return; + } + if (token != T_EOL) { + scan_to_eol(lc); + } + set_bit(index, res_all.hdr.item_present); +} + + +/* Store a yes/no in a bit field */ +void store_bit(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_NAME); + if (strcasecmp(lc->str, "yes") == 0 || strcasecmp(lc->str, "true") == 0) { + *(uint32_t *)(item->value) |= item->code; + } else if (strcasecmp(lc->str, "no") == 0 || strcasecmp(lc->str, "false") == 0) { + *(uint32_t *)(item->value) &= ~(item->code); + } else { + scan_err2(lc, _("Expect %s, got: %s"), "YES, NO, TRUE, or FALSE", lc->str); /* YES and NO must not be translated */ + return; + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* Store a bool in a bit field */ +void store_bool(LEX *lc, RES_ITEM *item, int index, int pass) +{ + lex_get_token(lc, T_NAME); + if (strcasecmp(lc->str, "yes") == 0 || strcasecmp(lc->str, "true") == 0) { + *(bool *)(item->value) = true; + } else if (strcasecmp(lc->str, "no") == 0 || strcasecmp(lc->str, "false") == 0) { + *(bool *)(item->value) = false; + } else { + scan_err2(lc, _("Expect %s, got: %s"), "YES, NO, TRUE, or FALSE", lc->str); /* YES and NO must not be translated */ + return; + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +/* + * Store Tape Label Type (Bacula, ANSI, IBM) + * + */ +void store_label(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int i; + + lex_get_token(lc, T_NAME); + /* Store the label pass 2 so that type is defined */ + for (i=0; tapelabels[i].name; i++) { + if (strcasecmp(lc->str, tapelabels[i].name) == 0) { + *(uint32_t *)(item->value) = tapelabels[i].token; + i = 0; + break; + } + } + if (i != 0) { + scan_err1(lc, _("Expected a Tape Label keyword, got: %s"), lc->str); + return; + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +/* Parser state */ +enum parse_state { + p_none, + p_resource +}; + +void CONFIG::init( + const char *cf, + LEX_ERROR_HANDLER *scan_error, + int32_t err_type, + void *vres_all, + int32_t res_all_size, + int32_t r_first, + int32_t r_last, + RES_TABLE *resources, + RES_HEAD ***res_head) +{ + m_cf = cf; + m_scan_error = scan_error; + m_err_type = err_type; + m_res_all = vres_all; + m_res_all_size = res_all_size; + m_r_first = r_first; + m_r_last = r_last; + m_resources = resources; + init_res_head(res_head, r_first, r_last); + m_res_head = *res_head; +} + +/********************************************************************* + * + * Parse configuration file + * + * Return 0 if reading failed, 1 otherwise + * Note, the default behavior unless you have set an alternate + * scan_error handler is to die on an error. + */ +bool CONFIG::parse_config() +{ + LEX *lc = NULL; + int token, i, pass; + int res_type = 0; + enum parse_state state = p_none; + RES_ITEM *items = NULL; + int level = 0; + static bool first = true; + int errstat; + const char *cf = m_cf; + LEX_ERROR_HANDLER *scan_error = m_scan_error; + int err_type = m_err_type; + //HPKT hpkt; + + if (first && (errstat=rwl_init(&res_lock)) != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("Unable to initialize resource lock. ERR=%s\n"), + be.bstrerror(errstat)); + } + first = false; + + char *full_path = (char *)alloca(MAX_PATH + 1); + + if (!find_config_file(cf, full_path, MAX_PATH +1)) { + Jmsg0(NULL, M_ABORT, 0, _("Config filename too long.\n")); + } + cf = full_path; + + /* Make two passes. The first builds the name symbol table, + * and the second picks up the items. + */ + Dmsg0(900, "Enter parse_config()\n"); + for (pass=1; pass <= 2; pass++) { + Dmsg1(900, "parse_config pass %d\n", pass); + if ((lc = lex_open_file(lc, cf, scan_error)) == NULL) { + berrno be; + /* We must create a lex packet to print the error */ + lc = (LEX *)malloc(sizeof(LEX)); + memset(lc, 0, sizeof(LEX)); + lc->str = get_memory(5000); + if (scan_error) { + lc->scan_error = scan_error; + } else { + lex_set_default_error_handler(lc); + } + lex_set_error_handler_error_type(lc, err_type) ; + pm_strcpy(lc->str, cf); + lc->fname = lc->str; + scan_err2(lc, _("Cannot open config file \"%s\": %s\n"), + lc->str, be.bstrerror()); + free_pool_memory(lc->str); + free(lc); + return 0; + } + if (!m_encode_pass) { + lex_store_clear_passwords(lc); + } + lex_set_error_handler_error_type(lc, err_type) ; + while ((token=lex_get_token(lc, T_ALL)) != T_EOF) { + Dmsg3(900, "parse state=%d pass=%d got token=%s\n", state, pass, + lex_tok_to_str(token)); + switch (state) { + case p_none: + if (token == T_EOL) { + break; + } else if (token == T_UTF8_BOM) { + /* We can assume the file is UTF-8 as we have seen a UTF-8 BOM */ + break; + } else if (token == T_UTF16_BOM) { + scan_err0(lc, _("Currently we cannot handle UTF-16 source files. " + "Please convert the conf file to UTF-8\n")); + goto bail_out; + } else if (token != T_IDENTIFIER) { + scan_err1(lc, _("Expected a Resource name identifier, got: %s"), lc->str); + goto bail_out; + } + for (i=0; resources[i].name; i++) { + if (strcasecmp(resources[i].name, lc->str) == 0) { + items = resources[i].items; + if (!items) { + break; + } + state = p_resource; + res_type = resources[i].rcode; + init_resource(this, res_type, items, pass); + break; + } + } + if (state == p_none) { + scan_err1(lc, _("expected resource name, got: %s"), lc->str); + goto bail_out; + } + break; + case p_resource: + switch (token) { + case T_BOB: + level++; + break; + case T_IDENTIFIER: + if (level != 1) { + scan_err1(lc, _("not in resource definition: %s"), lc->str); + goto bail_out; + } + for (i=0; items[i].name; i++) { + //hpkt.pass = pass; + //hpkt.ritem = &items[i]; + //hpkt.edbuf = NULL; + //hpkt.index = i; + //hpkt.lc = lc; + //hpkt.hfunc = HF_STORE; + if (strcasecmp(items[i].name, lc->str) == 0) { + /* If the ITEM_NO_EQUALS flag is set we do NOT + * scan for = after the keyword */ + if (!(items[i].flags & ITEM_NO_EQUALS)) { + token = lex_get_token(lc, T_SKIP_EOL); + Dmsg1 (900, "in T_IDENT got token=%s\n", lex_tok_to_str(token)); + if (token != T_EQUALS) { + scan_err1(lc, _("expected an equals, got: %s"), lc->str); + goto bail_out; + } + } + Dmsg1(800, "calling handler for %s\n", items[i].name); + /* Call item handler */ + items[i].handler(lc, &items[i], i, pass); + i = -1; + break; + } + } + if (i >= 0) { + Dmsg2(900, "level=%d id=%s\n", level, lc->str); + Dmsg1(900, "Keyword = %s\n", lc->str); + scan_err1(lc, _("Keyword \"%s\" not permitted in this resource.\n" + "Perhaps you left the trailing brace off of the previous resource."), lc->str); + goto bail_out; + } + break; + + case T_EOB: + level--; + state = p_none; + Dmsg0(900, "T_EOB => define new resource\n"); + if (res_all.hdr.name == NULL) { + scan_err0(lc, _("Name not specified for resource")); + goto bail_out; + } + if (!save_resource(this, res_type, items, pass)) { /* save resource */ + scan_err1(lc, "%s", m_errmsg); + goto bail_out; + } + break; + + case T_EOL: + break; + + default: + scan_err2(lc, _("unexpected token %d %s in resource definition"), + token, lex_tok_to_str(token)); + goto bail_out; + } + break; + default: + scan_err1(lc, _("Unknown parser state %d\n"), state); + goto bail_out; + } + } + if (state != p_none) { + scan_err0(lc, _("End of conf file reached with unclosed resource.")); + goto bail_out; + } + if (chk_dbglvl(900) && pass == 2) { + int i; + for (i=m_r_first; i<=m_r_last; i++) { + dump_each_resource(i, prtmsg, NULL); + } + } + lc = lex_close_file(lc); + } + Dmsg0(900, "Leave parse_config()\n"); + return 1; +bail_out: + if (lc) { + lc = lex_close_file(lc); + } + return 0; +} + +const char *get_default_configdir() +{ + return SYSCONFDIR; +} + +#ifdef xxx_not_used + HRESULT hr; + static char szConfigDir[MAX_PATH + 1] = { 0 }; + if (!p_SHGetFolderPath) { + bstrncpy(szConfigDir, DEFAULT_CONFIGDIR, sizeof(szConfigDir)); + return szConfigDir; + } + if (szConfigDir[0] == '\0') { + hr = p_SHGetFolderPath(NULL, CSIDL_COMMON_APPDATA, NULL, 0, szConfigDir); + if (SUCCEEDED(hr)) { + bstrncat(szConfigDir, "\\Bacula", sizeof(szConfigDir)); + } else { + bstrncpy(szConfigDir, DEFAULT_CONFIGDIR, sizeof(szConfigDir)); + } + } + return szConfigDir; +#endif + + +/* + * Returns false on error + * true on OK, with full_path set to where config file should be + */ +bool +find_config_file(const char *config_file, char *full_path, int max_path) +{ + int file_length = strlen(config_file) + 1; + + /* If a full path specified, use it */ + if (first_path_separator(config_file) != NULL) { + if (file_length > max_path) { + return false; + } + bstrncpy(full_path, config_file, file_length); + return true; + } + + /* config_file is default file name, now find default dir */ + const char *config_dir = get_default_configdir(); + int dir_length = strlen(config_dir); + + if ((dir_length + 1 + file_length) > max_path) { + return false; + } + + memcpy(full_path, config_dir, dir_length + 1); + + if (!IsPathSeparator(full_path[dir_length - 1])) { + full_path[dir_length++] = '/'; + } + + memcpy(&full_path[dir_length], config_file, file_length); + + return true; +} + +/********************************************************************* + * + * Free configuration resources + * + */ +void CONFIG::free_all_resources() +{ + RES *next, *res; + if (m_res_head == NULL) { + return; + } + /* Walk down chain of res_heads */ + for (int i=m_r_first; i<=m_r_last; i++) { + if (m_res_head[i-m_r_first]) { + next = m_res_head[i-m_r_first]->first; + Dmsg2(500, "i=%d, next=%p\n", i, next); + /* Walk down resource chain freeing them */ + for ( ; next; ) { + res = next; + next = res->res_next; + free_resource(res, i); + } + free(m_res_head[i-m_r_first]->res_list); + free(m_res_head[i-m_r_first]); + m_res_head[i-m_r_first] = NULL; + } + } +} + +CONFIG::CONFIG() +: m_cf(NULL), + m_scan_error(NULL), + m_err_type(0), + m_res_all(NULL), + m_res_all_size(0), + m_encode_pass(true), + m_r_first(0), + m_r_last(0), + m_resources(NULL), + m_res_head(NULL) +{ + m_errmsg = get_pool_memory(PM_EMSG); + *m_errmsg = 0; +} + +CONFIG::~CONFIG() { + free_all_resources(); + free_pool_memory(m_errmsg); +} + +void CONFIG::encode_password(bool a) +{ + m_encode_pass = a; +} diff --git a/src/lib/parse_conf.h b/src/lib/parse_conf.h new file mode 100644 index 00000000..3a8f2e60 --- /dev/null +++ b/src/lib/parse_conf.h @@ -0,0 +1,293 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Kern Sibbald, January MM + * + */ +#pragma once +/* Used for certain keyword tables */ +struct s_kw { + const char *name; + int token; +}; + +struct RES_ITEM; /* Declare forward referenced structure */ +struct RES_ITEM1; +struct RES_ITEM2; /* Declare forward referenced structure */ +class RES; /* Declare forward referenced structure */ +struct HPKT; /* Declare forward referenced structure */ +typedef void (RES_HANDLER)(HPKT &hpkt); +typedef void (MSG_RES_HANDLER)(LEX *lc, RES_ITEM *item, int index, int pass); +/* The INC_RES handler has an extra argument */ +typedef void (INC_RES_HANDLER)(LEX *lc, RES_ITEM2 *item, int index, int pass, bool exclude); + +/* This is the structure that defines + * the record types (items) permitted within each + * resource. It is used to define the configuration + * tables. + */ +struct RES_ITEM { + const char *name; /* Resource name i.e. Director, ... */ + MSG_RES_HANDLER *handler; /* Routine storing the resource item */ + union { + char **value; /* Where to store the item */ + char **charvalue; + uint32_t ui32value; + int32_t i32value; + uint64_t ui64value; + int64_t i64value; + bool boolvalue; + utime_t utimevalue; + RES *resvalue; + RES **presvalue; + }; + int32_t code; /* item code/additional info */ + uint32_t flags; /* flags: default, required, ... */ + int32_t default_value; /* default value */ +}; + +/* + * This handler takes only the RPKT as an argument + */ +struct RES_ITEM1 { + const char *name; /* Resource name i.e. Director, ... */ + RES_HANDLER *handler; /* Routine storing/displaying the resource */ + union { + char **value; /* Where to store the item */ + char **charvalue; + uint32_t ui32value; + int32_t i32value; + uint64_t ui64value; + int64_t i64value; + bool boolvalue; + utime_t utimevalue; + RES *resvalue; + RES **presvalue; + }; + int32_t code; /* item code/additional info */ + uint32_t flags; /* flags: default, required, ... */ + int32_t default_value; /* default value */ +}; + +/* INC_RES_HANDLER has exclude argument */ +struct RES_ITEM2 { + const char *name; /* Resource name i.e. Director, ... */ + INC_RES_HANDLER *handler; /* Routine storing the resource item */ + union { + char **value; /* Where to store the item */ + char **charvalue; + uint32_t ui32value; + int32_t i32value; + uint64_t ui64value; + int64_t i64value; + bool boolvalue; + utime_t utimevalue; + RES *resvalue; + RES **presvalue; + }; + int32_t code; /* item code/additional info */ + uint32_t flags; /* flags: default, required, ... */ + int32_t default_value; /* default value */ +}; + + +/* For storing name_addr items in res_items table */ +#define ITEM(x) {(char **)&res_all.x} + +#define MAX_RES_ITEMS 100 /* maximum resource items per RES */ + +class RES_HEAD { +public: + rblist *res_list; /* Resource list */ + RES *first; /* First RES item in list */ + RES *last; /* Last RES item inserted */ +}; + +/* + * This is the universal header that is + * at the beginning of every resource + * record. + */ +class RES { +public: + rblink link; /* red-black link */ + RES *res_next; /* pointer to next resource of this type */ + char *name; /* resource name */ + char *desc; /* resource description */ + uint32_t rcode; /* resource id or type */ + int32_t refcnt; /* reference count for releasing */ + char item_present[MAX_RES_ITEMS]; /* set if item is present in conf file */ +}; + + +/* + * Master Resource configuration structure definition + * This is the structure that defines the + * resources that are available to this daemon. + */ +struct RES_TABLE { + const char *name; /* resource name */ + RES_ITEM *items; /* list of resource keywords */ + uint32_t rcode; /* code if needed */ +}; + +/* Common Resource definitions */ + +#define MAX_RES_NAME_LENGTH MAX_NAME_LENGTH-1 /* maximum resource name length */ + +/* Permitted bits in Flags field */ +#define ITEM_REQUIRED (1<<0) /* item required */ +#define ITEM_DEFAULT (1<<1) /* default supplied */ +#define ITEM_NO_EQUALS (1<<2) /* Don't scan = after name */ +#define ITEM_LAST (1<<3) /* Last item in list */ +#define ITEM_ALLOW_DUPS (1<<4) /* Allow duplicate directives */ + +/* Message Resource */ +class MSGS { +public: + RES hdr; + char *mail_cmd; /* mail command */ + char *operator_cmd; /* Operator command */ + DEST *dest_chain; /* chain of destinations */ + char send_msg[nbytes_for_bits(M_MAX+1)]; /* bit array of types */ + +private: + bool m_in_use; /* set when using to send a message */ + bool m_closing; /* set when closing message resource */ + +public: + /* Methods */ + char *name() const; + void clear_in_use() { lock(); m_in_use=false; unlock(); } + void set_in_use() { wait_not_in_use(); m_in_use=true; unlock(); } + void set_closing() { m_closing=true; } + bool get_closing() { return m_closing; } + void clear_closing() { lock(); m_closing=false; unlock(); } + bool is_closing() { lock(); bool rtn=m_closing; unlock(); return rtn; } + + void wait_not_in_use(); /* in message.c */ + void lock(); /* in message.c */ + void unlock(); /* in message.c */ +}; + +inline char *MSGS::name() const { return hdr.name; } + +/* + * New C++ configuration routines + */ + +class CONFIG: public SMARTALLOC { +public: + const char *m_cf; /* config file */ + LEX_ERROR_HANDLER *m_scan_error; /* error handler if non-null */ + int32_t m_err_type; /* the way to terminate on failure */ + void *m_res_all; /* pointer to res_all buffer */ + int32_t m_res_all_size; /* length of buffer */ + bool m_encode_pass; /* Encode passwords with MD5 or not */ + + /* The below are not yet implemented */ + int32_t m_r_first; /* first daemon resource type */ + int32_t m_r_last; /* last daemon resource type */ + RES_TABLE *m_resources; /* pointer to table of permitted resources */ + RES_HEAD **m_res_head; /* pointer to list of resources this type */ + brwlock_t m_res_lock; /* resource lock */ + POOLMEM *m_errmsg; + + /* functions */ + void init( + const char *cf, + LEX_ERROR_HANDLER *scan_error, + int32_t err_type, + void *vres_all, + int32_t res_all_size, + int32_t r_first, + int32_t r_last, + RES_TABLE *resources, + RES_HEAD ***res_head); + + CONFIG(); + ~CONFIG(); + void encode_password(bool encode); + bool parse_config(); + void free_all_resources(); + bool insert_res(int rindex, int size); + RES_HEAD **save_resources(); + RES_HEAD **new_res_head(); + void init_res_head(RES_HEAD ***rhead, int32_t first, int32_t last); +}; + +/* Resource routines */ +int res_compare(void *item1, void *item2); +RES *GetResWithName(int rcode, const char *name); +RES *GetNextRes(int rcode, RES *res); +RES *GetNextRes(RES_HEAD **rhead, int rcode, RES *res); +void b_LockRes(const char *file, int line); +void b_UnlockRes(const char *file, int line); +void dump_resource(int type, RES *res, void sendmsg(void *sock, const char *fmt, ...), void *sock); +void dump_each_resource(int type, void sendmsg(void *sock, const char *fmt, ...), void *sock); +void free_resource(RES *res, int type); +bool init_resource(CONFIG *config, uint32_t type, void *res); +bool save_resource(CONFIG *config, int type, RES_ITEM *item, int pass); +void unstrip_password(RES_TABLE *resources); /* Used for json stuff */ +void strip_password(RES_TABLE *resources); /* Used for tray monitor */ +const char *res_to_str(int rcode); +bool find_config_file(const char *config_file, char *full_path, int max_path); + +/* Loop through each resource of type, returning in var */ +#ifdef HAVE_TYPEOF +#define foreach_res(var, type) \ + for((var)=NULL; ((var)=(typeof(var))GetNextRes((type), (RES *)var));) +#else +#define foreach_res(var, type) \ + for(var=NULL; (*((void **)&(var))=(void *)GetNextRes((type), (RES *)var));) +#endif + + +/* + * Standard global parsers defined in parse_config.c + */ +void store_str(LEX *lc, RES_ITEM *item, int index, int pass); +void store_dir(LEX *lc, RES_ITEM *item, int index, int pass); +void store_clear_password(LEX *lc, RES_ITEM *item, int index, int pass); +void store_password(LEX *lc, RES_ITEM *item, int index, int pass); +void store_name(LEX *lc, RES_ITEM *item, int index, int pass); +void store_strname(LEX *lc, RES_ITEM *item, int index, int pass); +void store_res(LEX *lc, RES_ITEM *item, int index, int pass); +void store_alist_res(LEX *lc, RES_ITEM *item, int index, int pass); +void store_alist_str(LEX *lc, RES_ITEM *item, int index, int pass); +void store_int32(LEX *lc, RES_ITEM *item, int index, int pass); +void store_pint32(LEX *lc, RES_ITEM *item, int index, int pass); +void store_msgs(LEX *lc, RES_ITEM *item, int index, int pass); +void store_int64(LEX *lc, RES_ITEM *item, int index, int pass); +void store_bit(LEX *lc, RES_ITEM *item, int index, int pass); +void store_bool(LEX *lc, RES_ITEM *item, int index, int pass); +void store_time(LEX *lc, RES_ITEM *item, int index, int pass); +void store_size64(LEX *lc, RES_ITEM *item, int index, int pass); +void store_size32(LEX *lc, RES_ITEM *item, int index, int pass); +void store_speed(LEX *lc, RES_ITEM *item, int index, int pass); +void store_defs(LEX *lc, RES_ITEM *item, int index, int pass); +void store_label(LEX *lc, RES_ITEM *item, int index, int pass); + +/* ***FIXME*** eliminate these globals */ +extern int32_t r_first; +extern int32_t r_last; +extern RES_TABLE resources[]; +extern RES_HEAD **res_head; +extern int32_t res_all_size; diff --git a/src/lib/plugins.c b/src/lib/plugins.c new file mode 100644 index 00000000..9a3628fc --- /dev/null +++ b/src/lib/plugins.c @@ -0,0 +1,261 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Plugin load/unloader for all Bacula daemons + * + * Kern Sibbald, October 2007 + */ + +#include "bacula.h" +#include +#ifdef HAVE_DIRENT_H +#include +#endif +int breaddir(DIR *dirp, POOLMEM *&d_name); + +#ifndef RTLD_NOW +#define RTLD_NOW 2 +#endif + +#include "plugins.h" + +static const int dbglvl = 50; + +/* + * List of all loaded plugins. + * + * NOTE!!! This is a global do not try walking it with + * foreach_alist, you must use foreach_alist_index !!!!!! + */ +alist *b_plugin_list = NULL; + +/* + * Create a new plugin "class" entry and enter it in the + * list of plugins. Note, this is not the same as + * an instance of the plugin. + */ +Plugin *new_plugin() +{ + Plugin *plugin; + + plugin = (Plugin *)malloc(sizeof(Plugin)); + memset(plugin, 0, sizeof(Plugin)); + return plugin; +} + +static void close_plugin(Plugin *plugin) +{ + if (plugin->file) { + Dmsg1(50, "Got plugin=%s but not accepted.\n", plugin->file); + } + if (plugin->unloadPlugin) { + plugin->unloadPlugin(); + } + if (plugin->pHandle) { + dlclose(plugin->pHandle); + } + if (plugin->file) { + free(plugin->file); + } + free(plugin); +} + +/* + * Load all the plugins in the specified directory. + */ +bool load_plugins(void *binfo, void *bfuncs, const char *plugin_dir, + const char *type, bool is_plugin_compatible(Plugin *plugin)) +{ + bool found = false; + t_loadPlugin loadPlugin; + Plugin *plugin = NULL; + DIR* dp = NULL; + int name_max; + struct stat statp; + POOL_MEM fname(PM_FNAME); + POOL_MEM dname(PM_FNAME); + bool need_slash = false; + int len, type_len; + + + Dmsg0(dbglvl, "load_plugins\n"); + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + if (!(dp = opendir(plugin_dir))) { + berrno be; + Jmsg(NULL, M_ERROR_TERM, 0, _("Failed to open Plugin directory %s: ERR=%s\n"), + plugin_dir, be.bstrerror()); + Dmsg2(dbglvl, "Failed to open Plugin directory %s: ERR=%s\n", + plugin_dir, be.bstrerror()); + goto get_out; + } + + len = strlen(plugin_dir); + if (len > 0) { + need_slash = !IsPathSeparator(plugin_dir[len - 1]); + } + for ( ;; ) { + plugin = NULL; /* Start from a fresh plugin */ + + if ((breaddir(dp, dname.addr()) != 0)) { + if (!found) { + Dmsg1(dbglvl, "Failed to find any plugins in %s\n", plugin_dir); + } + break; + } + if (strcmp(dname.c_str(), ".") == 0 || + strcmp(dname.c_str(), "..") == 0) { + continue; + } + + len = strlen(dname.c_str()); + type_len = strlen(type); + if (len < type_len+1 || strcmp(&dname.c_str()[len-type_len], type) != 0) { + Dmsg3(dbglvl, "Rejected plugin: want=%s name=%s len=%d\n", type, dname.c_str(), len); + continue; + } + Dmsg2(dbglvl, "Found plugin: name=%s len=%d\n", dname.c_str(), len); + + pm_strcpy(fname, plugin_dir); + if (need_slash) { + pm_strcat(fname, "/"); + } + pm_strcat(fname, dname); + if (lstat(fname.c_str(), &statp) != 0 || !S_ISREG(statp.st_mode)) { + continue; /* ignore directories & special files */ + } + + plugin = new_plugin(); + plugin->file = bstrdup(dname.c_str()); + plugin->file_len = strstr(plugin->file, type) - plugin->file; + plugin->pHandle = dlopen(fname.c_str(), RTLD_NOW); + if (!plugin->pHandle) { + const char *error = dlerror(); + Jmsg(NULL, M_ERROR, 0, _("dlopen plugin %s failed: ERR=%s\n"), + fname.c_str(), NPRT(error)); + Dmsg2(dbglvl, "dlopen plugin %s failed: ERR=%s\n", fname.c_str(), + NPRT(error)); + close_plugin(plugin); + continue; + } + + /* Get two global entry points */ + loadPlugin = (t_loadPlugin)dlsym(plugin->pHandle, "loadPlugin"); + if (!loadPlugin) { + Jmsg(NULL, M_ERROR, 0, _("Lookup of loadPlugin in plugin %s failed: ERR=%s\n"), + fname.c_str(), NPRT(dlerror())); + Dmsg2(dbglvl, "Lookup of loadPlugin in plugin %s failed: ERR=%s\n", + fname.c_str(), NPRT(dlerror())); + close_plugin(plugin); + continue; + } + plugin->unloadPlugin = (t_unloadPlugin)dlsym(plugin->pHandle, "unloadPlugin"); + if (!plugin->unloadPlugin) { + Jmsg(NULL, M_ERROR, 0, _("Lookup of unloadPlugin in plugin %s failed: ERR=%s\n"), + fname.c_str(), NPRT(dlerror())); + Dmsg2(dbglvl, "Lookup of unloadPlugin in plugin %s failed: ERR=%s\n", + fname.c_str(), NPRT(dlerror())); + close_plugin(plugin); + continue; + } + + /* Initialize the plugin */ + if (loadPlugin(binfo, bfuncs, &plugin->pinfo, &plugin->pfuncs) != bRC_OK) { + close_plugin(plugin); + continue; + } + if (!is_plugin_compatible) { + Dmsg0(50, "Plugin compatibility pointer not set.\n"); + } else if (!is_plugin_compatible(plugin)) { + close_plugin(plugin); + continue; + } + + found = true; /* found a plugin */ + b_plugin_list->append(plugin); + } + +get_out: + if (!found && plugin) { + close_plugin(plugin); + } + if (dp) { + closedir(dp); + } + return found; +} + +/* + * Unload all the loaded plugins + */ +void unload_plugins() +{ + Plugin *plugin; + + if (!b_plugin_list) { + return; + } + foreach_alist(plugin, b_plugin_list) { + /* Shut it down and unload it */ + plugin->unloadPlugin(); + dlclose(plugin->pHandle); + if (plugin->file) { + free(plugin->file); + } + free(plugin); + } + delete b_plugin_list; + b_plugin_list = NULL; +} + +/* + * Dump plugin information + * Each daemon can register a hook that will be called + * after a fatal signal. + */ +#define DBG_MAX_HOOK 10 +static dbg_plugin_hook_t *dbg_plugin_hooks[DBG_MAX_HOOK]; +static int dbg_plugin_hook_count=0; + +void dbg_plugin_add_hook(dbg_plugin_hook_t *fct) +{ + ASSERT(dbg_plugin_hook_count < DBG_MAX_HOOK); + dbg_plugin_hooks[dbg_plugin_hook_count++] = fct; +} + +void dbg_print_plugin(FILE *fp) +{ + Plugin *plugin; + fprintf(fp, "List plugins. Hook count=%d\n", dbg_plugin_hook_count); + + if (!b_plugin_list) { + return; + } + foreach_alist(plugin, b_plugin_list) { + for(int i=0; i < dbg_plugin_hook_count; i++) { +// dbg_plugin_hook_t *fct = dbg_plugin_hooks[i]; + fprintf(fp, "Plugin %p name=\"%s\" disabled=%d\n", + plugin, plugin->file, plugin->disabled); +// fct(plugin, fp); + } + } +} diff --git a/src/lib/plugins.h b/src/lib/plugins.h new file mode 100644 index 00000000..d4640f2c --- /dev/null +++ b/src/lib/plugins.h @@ -0,0 +1,90 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Common plugin definitions + * + * Kern Sibbald, October 2007 + */ +#ifndef __PLUGINS_H +#define __PLUGINS_H + +/**************************************************************************** + * * + * Common definitions for all plugins * + * * + ****************************************************************************/ + +#ifndef BUILD_PLUGIN +extern DLL_IMP_EXP alist *b_plugin_list; +#endif + +/* Universal return codes from all plugin functions */ +typedef enum { + bRC_OK = 0, /* OK */ + bRC_Stop = 1, /* Stop calling other plugins */ + bRC_Error = 2, /* Some kind of error */ + bRC_More = 3, /* More files to backup */ + bRC_Term = 4, /* Unload me */ + bRC_Seen = 5, /* Return code from checkFiles */ + bRC_Core = 6, /* Let Bacula core handles this file */ + bRC_Skip = 7, /* Skip the proposed file */ + bRC_Cancel = 8, /* Job cancelled */ + + bRC_Max = 9999 /* Max code Bacula can use */ +} bRC; + + + +/* Context packet as first argument of all functions */ +struct bpContext { + void *bContext; /* Bacula private context */ + void *pContext; /* Plugin private context */ +}; + +extern "C" { +typedef bRC (*t_loadPlugin)(void *binfo, void *bfuncs, void **pinfo, void **pfuncs); +typedef bRC (*t_unloadPlugin)(void); +} + +class Plugin { +public: + char *file; + int32_t file_len; + t_unloadPlugin unloadPlugin; + void *pinfo; + void *pfuncs; + void *pHandle; + bool disabled; + bool restoreFileStarted; + bool createFileCalled; +}; + +/* Functions */ +extern Plugin *new_plugin(); +extern bool load_plugins(void *binfo, void *bfuncs, const char *plugin_dir, + const char *type, bool is_plugin_compatible(Plugin *plugin)); +extern void unload_plugins(); + +/* Each daemon can register a debug hook that will be called + * after a fatal signal + */ +typedef void (dbg_plugin_hook_t)(Plugin *plug, FILE *fp); +extern void dbg_plugin_add_hook(dbg_plugin_hook_t *fct); + +#endif /* __PLUGINS_H */ diff --git a/src/lib/priv.c b/src/lib/priv.c new file mode 100644 index 00000000..ba07529d --- /dev/null +++ b/src/lib/priv.c @@ -0,0 +1,129 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bacula.h" + +#undef ENABLE_KEEP_READALL_CAPS_SUPPORT +#if defined(HAVE_SYS_PRCTL_H) && defined(HAVE_SYS_CAPABILITY_H) && \ + defined(HAVE_PRCTL) && defined(HAVE_SETREUID) && defined(HAVE_LIBCAP) +# include +# include +# if defined(PR_SET_KEEPCAPS) +# define ENABLE_KEEP_READALL_CAPS_SUPPORT +# endif +#endif + +#ifdef HAVE_AIX_OS +# ifndef _AIX51 +extern "C" int initgroups(const char *,int); +# endif +#endif + +/* + * Lower privileges by switching to new UID and GID if non-NULL. + * If requested, keep readall capabilities after switch. + */ +void drop(char *uname, char *gname, bool keep_readall_caps) +{ +#if defined(HAVE_PWD_H) && defined(HAVE_GRP_H) + struct passwd *passw = NULL; + struct group *group = NULL; + gid_t gid; + uid_t uid; + char username[1000]; + + Dmsg2(900, "uname=%s gname=%s\n", uname?uname:"NONE", gname?gname:"NONE"); + if (!uname && !gname) { + return; /* Nothing to do */ + } + + if (uname) { + if ((passw = getpwnam(uname)) == NULL) { + berrno be; + Emsg2(M_ERROR_TERM, 0, _("Could not find userid=%s: ERR=%s\n"), uname, + be.bstrerror()); + } + } else { + if ((passw = getpwuid(getuid())) == NULL) { + berrno be; + Emsg1(M_ERROR_TERM, 0, _("Could not find password entry. ERR=%s\n"), + be.bstrerror()); + } else { + uname = passw->pw_name; + } + } + /* Any OS uname pointer may get overwritten, so save name, uid, and gid */ + bstrncpy(username, uname, sizeof(username)); + uid = passw->pw_uid; + gid = passw->pw_gid; + if (gname) { + if ((group = getgrnam(gname)) == NULL) { + berrno be; + Emsg2(M_ERROR_TERM, 0, _("Could not find group=%s: ERR=%s\n"), gname, + be.bstrerror()); + } + gid = group->gr_gid; + } + if (initgroups(username, gid)) { + berrno be; + if (gname) { + Emsg3(M_ERROR_TERM, 0, _("Could not initgroups for group=%s, userid=%s: ERR=%s\n"), + gname, username, be.bstrerror()); + } else { + Emsg2(M_ERROR_TERM, 0, _("Could not initgroups for userid=%s: ERR=%s\n"), + username, be.bstrerror()); + } + } + if (gname) { + if (setgid(gid)) { + berrno be; + Emsg2(M_ERROR_TERM, 0, _("Could not set group=%s: ERR=%s\n"), gname, + be.bstrerror()); + } + } + if (keep_readall_caps) { +#ifdef ENABLE_KEEP_READALL_CAPS_SUPPORT + cap_t caps; + + if (prctl(PR_SET_KEEPCAPS, 1)) { + berrno be; + Emsg1(M_ERROR_TERM, 0, _("prctl failed: ERR=%s\n"), be.bstrerror()); + } + if (setreuid(uid, uid)) { + berrno be; + Emsg1(M_ERROR_TERM, 0, _("setreuid failed: ERR=%s\n"), be.bstrerror()); + } + if (!(caps = cap_from_text("cap_dac_read_search=ep"))) { + berrno be; + Emsg1(M_ERROR_TERM, 0, _("cap_from_text failed: ERR=%s\n"), be.bstrerror()); + } + if (cap_set_proc(caps) < 0) { + berrno be; + Emsg1(M_ERROR_TERM, 0, _("cap_set_proc failed: ERR=%s\n"), be.bstrerror()); + } + cap_free(caps); +#else + Emsg0(M_ERROR_TERM, 0, _("Keep readall caps not implemented this OS or missing libraries.\n")); +#endif + } else if (setuid(uid)) { + berrno be; + Emsg1(M_ERROR_TERM, 0, _("Could not set specified userid: %s\n"), username); + } +#endif +} diff --git a/src/lib/protos.h b/src/lib/protos.h new file mode 100644 index 00000000..53c5a470 --- /dev/null +++ b/src/lib/protos.h @@ -0,0 +1,399 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Prototypes for lib directory of Bacula + * + */ + +#ifndef __LIBPROTOS_H +#define __LIBPROTOS_H + +class JCR; + +/* address_conf.c */ +void remove_duplicate_addresses(dlist *addr_list); + +/* attr.c */ +ATTR *new_attr(JCR *jcr); +void free_attr(ATTR *attr); +int unpack_attributes_record(JCR *jcr, int32_t stream, char *rec, int32_t reclen, ATTR *attr); +void build_attr_output_fnames(JCR *jcr, ATTR *attr); +void print_ls_output(JCR *jcr, ATTR *attr, int message_type=M_RESTORED); + +/* base64.c */ +void base64_init (void); +int to_base64 (int64_t value, char *where); +int from_base64 (int64_t *value, char *where); +int bin_to_base64 (char *buf, int buflen, char *bin, int binlen, + int compatible); +int base64_to_bin(char *dest, int destlen, char *src, int srclen); + +/* bjson.c */ +void strip_long_opts(char *out, const char *in); +void edit_alist(HPKT &hpkt); +void edit_msg_types(HPKT &hpkt, DEST *dest); +void display_msgs(HPKT &hpkt); +void display_alist(HPKT &hpkt); +bool display_alist_str(HPKT &hpkt); +bool display_alist_res(HPKT &hpkt); +void display_res(HPKT &hpkt); +void display_string_pair(HPKT &hpkt); +void display_int32_pair(HPKT &hpkt); +void display_int64_pair(HPKT &hpkt); +void display_bool_pair(HPKT &hpkt); +void display_bit_pair(HPKT &hpkt); +bool byte_is_set(char *byte, int num); +void display_bit_array(char *array, int num); +void display_last(HPKT &hpkt); +void init_hpkt(HPKT &hpkt); +void term_hpkt(HPKT &hpkt); +bool display_global_item(HPKT &hpkt); + +/* bsys.c */ +char *ucfirst(char *dest, const char *src, int len); +typedef enum { + WAIT_READ = 1, + WAIT_WRITE = 2 +} fd_wait_mode; +int fd_wait_data(int fd, fd_wait_mode mode, int sec, int msec); +FILE *bfopen(const char *path, const char *mode); +int baccept(int sockfd, struct sockaddr *addr, socklen_t *addrlen); +int copyfile(const char *src, const char *dst); +void setup_env(char *envp[]); +POOLMEM *quote_string (POOLMEM *snew, const char *old); +POOLMEM *quote_where (POOLMEM *snew, const char *old); +char *bstrncpy (char *dest, const char *src, int maxlen); +char *bstrncpy (char *dest, POOL_MEM &src, int maxlen); +char *bstrncat (char *dest, const char *src, int maxlen); +char *bstrncat (char *dest, POOL_MEM &src, int maxlen); +bool bstrcmp (const char *s1, const char *s2); +bool bstrcasecmp (const char *s1, const char *s2); +int cstrlen (const char *str); +void *b_malloc (const char *file, int line, size_t size); +#ifndef bmalloc +void *bmalloc (size_t size); +#endif +void bfree (void *buf); +void *brealloc (void *buf, size_t size); +void *bcalloc (size_t size1, size_t size2); +int bsnprintf (char *str, int32_t size, const char *format, ...); +int bvsnprintf (char *str, int32_t size, const char *format, va_list ap); +int pool_sprintf (char *pool_buf, const char *fmt, ...); +int create_lock_file (char *fname, const char *progname, const char *filetype, POOLMEM **errmsg, int *fd); +void create_pid_file (char *dir, const char *progname, int port); +int delete_pid_file (char *dir, const char *progname, int port); +void drop (char *uid, char *gid, bool keep_readall_caps); +int reset_job_user(); +int change_job_user(char *uname, char *gname, char *errmsg, int errlen); + +int bmicrosleep (int32_t sec, int32_t usec); +char *bfgets (char *s, int size, FILE *fd); +char *bfgets (POOLMEM *&s, FILE *fd); +void make_unique_filename (POOLMEM **name, int Id, char *what); +#ifndef HAVE_STRTOLL +long long int strtoll (const char *ptr, char **endptr, int base); +#endif +void read_state_file(char *dir, const char *progname, int port); +int b_strerror(int errnum, char *buf, size_t bufsiz); +char *escape_filename(const char *file_path); +int Zdeflate(char *in, int in_len, char *out, int &out_len); +int Zinflate(char *in, int in_len, char *out, int &out_len); +void stack_trace(); +int safer_unlink(const char *pathname, const char *regex); +int fs_get_free_space(const char *path, int64_t *freeval, int64_t *totalval); + +/* bnet.c */ +bool bnet_tls_server (TLS_CONTEXT *ctx, BSOCK *bsock, + alist *verify_list); +bool bnet_tls_client (TLS_CONTEXT *ctx, BSOCK *bsock, + alist *verify_list); +BSOCK * init_bsock (JCR *jcr, int sockfd, const char *who, const char *ip, + int port, struct sockaddr *client_addr); +#ifdef HAVE_WIN32 +#ifndef socklen_t +#define socklen_t int +#endif +#endif +int bnet_get_peer (BSOCK *bs, char *buf, socklen_t buflen); +BSOCK * dup_bsock (BSOCK *bsock); +void term_bsock (BSOCK *bsock); +const char *bnet_strerror (BSOCK *bsock); +const char *bnet_sig_to_ascii (int32_t msglen); +dlist *bnet_host2ipaddrs(const char *host, int family, const char **errstr); +void bnet_restore_blocking (BSOCK *sock, int flags); +int set_socket_errno(int sockstat); + +/* bget_msg.c */ +int bget_msg(BSOCK *sock); + +/* bpipe.c */ +BPIPE * open_bpipe(char *prog, int wait, const char *mode, char *envp[]=NULL); +int close_wpipe(BPIPE *bpipe); +int close_epipe(BPIPE *bpipe); +int close_bpipe(BPIPE *bpipe); + +/* cram-md5.c */ +bool cram_md5_respond(BSOCK *bs, const char *password, int *tls_remote_need, int *compatible); +bool cram_md5_challenge(BSOCK *bs, const char *password, int tls_local_need, int compatible); +void hmac_md5(uint8_t* text, int text_len, uint8_t* key, int key_len, uint8_t *hmac); + +/* crc32.c */ + +uint32_t bcrc32(unsigned char *buf, int len); + + +/* crypto.c */ +int init_crypto (void); +int cleanup_crypto (void); +DIGEST * crypto_digest_new (JCR *jcr, crypto_digest_t type); +bool crypto_digest_update (DIGEST *digest, const uint8_t *data, uint32_t length); +bool crypto_digest_finalize (DIGEST *digest, uint8_t *dest, uint32_t *length); +void crypto_digest_free (DIGEST *digest); +SIGNATURE * crypto_sign_new (JCR *jcr); +crypto_error_t crypto_sign_get_digest (SIGNATURE *sig, X509_KEYPAIR *keypair, + crypto_digest_t &algorithm, DIGEST **digest); +crypto_error_t crypto_sign_verify (SIGNATURE *sig, X509_KEYPAIR *keypair, DIGEST *digest); +int crypto_sign_add_signer (SIGNATURE *sig, DIGEST *digest, X509_KEYPAIR *keypair); +int crypto_sign_encode (SIGNATURE *sig, uint8_t *dest, uint32_t *length); +SIGNATURE * crypto_sign_decode (JCR *jcr, const uint8_t *sigData, uint32_t length); +void crypto_sign_free (SIGNATURE *sig); +CRYPTO_SESSION * crypto_session_new (crypto_cipher_t cipher, alist *pubkeys); +void crypto_session_free (CRYPTO_SESSION *cs); +bool crypto_session_encode (CRYPTO_SESSION *cs, uint8_t *dest, uint32_t *length); +crypto_error_t crypto_session_decode (const uint8_t *data, uint32_t length, alist *keypairs, CRYPTO_SESSION **session); +CRYPTO_SESSION * crypto_session_decode (const uint8_t *data, uint32_t length); +CIPHER_CONTEXT * crypto_cipher_new (CRYPTO_SESSION *cs, bool encrypt, uint32_t *blocksize); +bool crypto_cipher_update (CIPHER_CONTEXT *cipher_ctx, const uint8_t *data, uint32_t length, const uint8_t *dest, uint32_t *written); +bool crypto_cipher_finalize (CIPHER_CONTEXT *cipher_ctx, uint8_t *dest, uint32_t *written); +void crypto_cipher_free (CIPHER_CONTEXT *cipher_ctx); +X509_KEYPAIR * crypto_keypair_new (void); +X509_KEYPAIR * crypto_keypair_dup (X509_KEYPAIR *keypair); +int crypto_keypair_load_cert (X509_KEYPAIR *keypair, const char *file); +bool crypto_keypair_has_key (const char *file); +int crypto_keypair_load_key (X509_KEYPAIR *keypair, const char *file, CRYPTO_PEM_PASSWD_CB *pem_callback, const void *pem_userdata); +void crypto_keypair_free (X509_KEYPAIR *keypair); +int crypto_default_pem_callback (char *buf, int size, const void *userdata); +const char * crypto_digest_name (DIGEST *digest); +crypto_digest_t crypto_digest_stream_type (int stream); +const char * crypto_strerror (crypto_error_t error); + +/* daemon.c */ +void daemon_start (); + +/* edit.c */ +uint64_t str_to_uint64(char *str); +int64_t str_to_int64(char *str); +#define str_to_int32(str) ((int32_t)str_to_int64(str)) +char * edit_uint64_with_commas (uint64_t val, char *buf); +char * edit_uint64_with_suffix (uint64_t val, char *buf); +char * add_commas (char *val, char *buf); +char * edit_uint64 (uint64_t val, char *buf); +char * edit_int64 (int64_t val, char *buf); +char * edit_int64_with_commas (int64_t val, char *buf); +bool duration_to_utime (char *str, utime_t *value); +bool size_to_uint64(char *str, int str_len, uint64_t *rtn_value); +bool speed_to_uint64(char *str, int str_len, uint64_t *rtn_value); +char *edit_utime (utime_t val, char *buf, int buf_len); +bool is_a_number (const char *num); +bool is_a_number_list (const char *n); +bool is_an_integer (const char *n); +bool is_name_valid (const char *name, POOLMEM **msg); + +/* jcr.c (most definitions are in src/jcr.h) */ +void init_last_jobs_list(); +void term_last_jobs_list(); +void lock_last_jobs_list(); +void unlock_last_jobs_list(); +bool read_last_jobs_list(int fd, uint64_t addr); +uint64_t write_last_jobs_list(int fd, uint64_t addr); +void write_state_file(char *dir, const char *progname, int port); +void job_end_push(JCR *jcr, void job_end_cb(JCR *jcr,void *), void *ctx); +void lock_jobs(); +void unlock_jobs(); +JCR *jcr_walk_start(); +JCR *jcr_walk_next(JCR *prev_jcr); +void jcr_walk_end(JCR *jcr); +int job_count(); +JCR *get_jcr_from_tsd(); +void set_jcr_in_tsd(JCR *jcr); +void remove_jcr_from_tsd(JCR *jcr); +uint32_t get_jobid_from_tsd(); +uint32_t get_jobid_from_tid(pthread_t tid); + + +/* lex.c */ +LEX * lex_close_file (LEX *lf); +LEX * lex_open_file (LEX *lf, const char *fname, LEX_ERROR_HANDLER *scan_error); +LEX * lex_open_buf (LEX *lf, const char *buf, LEX_ERROR_HANDLER *scan_error); +int lex_get_char (LEX *lf); +void lex_unget_char (LEX *lf); +const char * lex_tok_to_str (int token); +int lex_get_token (LEX *lf, int expect); +void lex_set_default_error_handler (LEX *lf); +int lex_set_error_handler_error_type (LEX *lf, int err_type); +bool lex_check_eol (LEX *lf); + +/* Required typedef, not in a C file */ +extern "C" { + typedef char *(*job_code_callback_t)(JCR *, const char *, char *, int); +} + +/* message.c */ +void my_name_is (int argc, char *argv[], const char *name); +void init_msg (JCR *jcr, MSGS *msg, job_code_callback_t job_code_callback = NULL); +void term_msg (void); +void close_msg (JCR *jcr); +void add_msg_dest (MSGS *msg, int dest, int type, char *where, char *dest_code); +void rem_msg_dest (MSGS *msg, int dest, int type, char *where); +void Jmsg (JCR *jcr, int type, utime_t mtime, const char *fmt, ...); +void dispatch_message (JCR *jcr, int type, utime_t mtime, char *buf); +void init_console_msg (const char *wd); +void free_msgs_res (MSGS *msgs); +void dequeue_messages (JCR *jcr); +void dequeue_daemon_messages (JCR *jcr); +void set_db_engine_name (const char *name); +void set_trace (int trace_flag); +bool get_trace (void); +void set_hangup (int hangup_value); +void set_blowup (int blowup_value); +int get_hangup (void); +int get_blowup (void); +bool handle_hangup_blowup (JCR *jcr, uint32_t file_count, uint64_t byte_count); +void set_assert_msg (const char *file, int line, const char *msg); +void register_message_callback(void msg_callback(int type, char *msg)); +void setup_daemon_message_queue(); +void free_daemon_message_queue(); + +/* bnet_server.c */ +void bnet_thread_server(dlist *addr_list, int max_clients, + workq_t *client_wq, void *handle_client_request(void *bsock)); +void bnet_stop_thread_server(pthread_t tid); +void bnet_server (int port, void handle_client_request(BSOCK *bsock)); +int net_connect (int port); +BSOCK * bnet_bind (int port); +BSOCK * bnet_accept (BSOCK *bsock, char *who); + +/* message.c */ +typedef int (EVENT_HANDLER)(JCR *jcr, const char *event); +int generate_daemon_event(JCR *jcr, const char *event); + +/* signal.c */ +void init_signals (void terminate(int sig)); +void init_stack_dump (void); + +/* Used to display specific job information after a fatal signal */ +typedef void (dbg_hook_t)(FILE *fp); +void dbg_add_hook(dbg_hook_t *fct); + +/* scan.c */ +char *next_name(char **s); +void strip_leading_space (char *str); +char *strip_trailing_junk (char *str); +char *strip_trailing_newline (char *str); +char *strip_trailing_slashes (char *dir); +bool skip_spaces (char **msg); +bool skip_nonspaces (char **msg); +int fstrsch (const char *a, const char *b); +char *next_arg(char **s); +int parse_args(POOLMEM *cmd, POOLMEM **args, int *argc, + char **argk, char **argv, int max_args); +int parse_args_only(POOLMEM *cmd, POOLMEM **args, int *argc, + char **argk, char **argv, int max_args); +void split_path_and_filename(const char *fname, POOLMEM **path, + int *pnl, POOLMEM **file, int *fnl); +int bsscanf(const char *buf, const char *fmt, ...); + + +/* tls.c */ +TLS_CONTEXT *new_tls_context (const char *ca_certfile, + const char *ca_certdir, + const char *certfile, + const char *keyfile, + CRYPTO_PEM_PASSWD_CB *pem_callback, + const void *pem_userdata, + const char *dhfile, + bool verify_peer); +void free_tls_context (TLS_CONTEXT *ctx); +#ifdef HAVE_TLS +bool tls_postconnect_verify_host(JCR *jcr, TLS_CONNECTION *tls, + const char *host); +bool tls_postconnect_verify_cn(JCR *jcr, TLS_CONNECTION *tls, + alist *verify_list); +TLS_CONNECTION *new_tls_connection (TLS_CONTEXT *ctx, int fd); +bool tls_bsock_accept (BSOCK *bsock); +int tls_bsock_writen (BSOCK *bsock, char *ptr, int32_t nbytes); +int tls_bsock_readn (BSOCK *bsock, char *ptr, int32_t nbytes); +bool tls_bsock_probe (BSOCKCORE *bsock); +#endif /* HAVE_TLS */ +bool tls_bsock_connect (BSOCK *bsock); +void tls_bsock_shutdown (BSOCKCORE *bsock); +void free_tls_connection (TLS_CONNECTION *tls); +bool get_tls_require (TLS_CONTEXT *ctx); +bool get_tls_enable (TLS_CONTEXT *ctx); + + +/* util.c */ +void bmemzero (void *buf, size_t size); +bool is_null (const void *ptr); +bool is_buf_zero (const char *buf, int len); +void lcase (char *str); +void bash_spaces (char *str); +void bash_spaces (POOL_MEM &pm); +void unbash_spaces (char *str); +void unbash_spaces (POOL_MEM &pm); +char * encode_time (utime_t time, char *buf); +char * encode_mode (mode_t mode, char *buf); +char * hexdump(const char *data, int len, char *buf, int capacity, bool add_spaces=true); +char * asciidump(const char *data, int len, char *buf, int capacity); +char * smartdump(const char *data, int len, char *buf, int capacity, bool *is_ascii=NULL); +int is_power_of_two (uint64_t x); +int do_shell_expansion (char *name, int name_len); +void jobstatus_to_ascii (int JobStatus, char *msg, int maxlen); +void jobstatus_to_ascii_gui (int JobStatus, char *msg, int maxlen); +int run_program (char *prog, int wait, POOLMEM *&results); +int run_program_full_output (char *prog, int wait, POOLMEM *&results, char *env[]=NULL); +char * action_on_purge_to_string(int aop, POOL_MEM &ret); +const char * job_type_to_str (int type); +const char * job_status_to_str (int stat, int errors); +const char * job_level_to_str (int level); +const char * volume_status_to_str (const char *status); +void make_session_key (char *key, char *seed, int mode); +void encode_session_key (char *encode, char *session, char *key, int maxlen); +void decode_session_key (char *decode, char *session, char *key, int maxlen); +POOLMEM * edit_job_codes (JCR *jcr, char *omsg, char *imsg, const char *to, job_code_callback_t job_code_callback = NULL); +void set_working_directory (char *wd); +const char * last_path_separator (const char *str); + +/* watchdog.c */ +int start_watchdog(void); +int stop_watchdog(void); +watchdog_t *new_watchdog(void); +bool register_watchdog(watchdog_t *wd); +bool unregister_watchdog(watchdog_t *wd); +bool is_watchdog(); + +/* timers.c */ +btimer_t *start_child_timer(JCR *jcr, pid_t pid, uint32_t wait); +void stop_child_timer(btimer_t *wid); +btimer_t *start_thread_timer(JCR *jcr, pthread_t tid, uint32_t wait); +void stop_thread_timer(btimer_t *wid); +btimer_t *start_bsock_timer(BSOCK *bs, uint32_t wait); +void stop_bsock_timer(btimer_t *wid); + +#endif /* __LIBPROTOS_H */ diff --git a/src/lib/queue.c b/src/lib/queue.c new file mode 100644 index 00000000..c703546f --- /dev/null +++ b/src/lib/queue.c @@ -0,0 +1,138 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + + Q U E U E + Queue Handling Routines + + Taken from smartall written by John Walker. + + http://www.fourmilab.ch/smartall/ + + + +*/ + +#include "bacula.h" + +/* General purpose queue */ + +#ifdef REALLY_NEEDED +struct b_queue { + struct b_queue *qnext, /* Next item in queue */ + *qprev; /* Previous item in queue */ +}; +#endif + +/* + * To define a queue, use the following + * + * static BQUEUE xyz = { &xyz, &xyz }; + * + * Also, note, that the only real requirement is that + * the object that is passed to these routines contain + * a BQUEUE object as the very first member. The + * rest of the structure may be anything. + * + * NOTE!!!! The casting here is REALLY painful, but this avoids + * doing ugly casting every where else in the code. + */ + + +/* Queue manipulation functions. */ + + +/* QINSERT -- Insert object at end of queue */ + +void qinsert(BQUEUE *qhead, BQUEUE *object) +{ +#define qh ((BQUEUE *)qhead) +#define obj ((BQUEUE *)object) + + ASSERT(qh->qprev->qnext == qh); + ASSERT(qh->qnext->qprev == qh); + + obj->qnext = qh; + obj->qprev = qh->qprev; + qh->qprev = obj; + obj->qprev->qnext = obj; +#undef qh +#undef obj +} + + +/* QREMOVE -- Remove next object from the queue given + the queue head (or any item). + Returns NULL if queue is empty */ + +BQUEUE *qremove(BQUEUE *qhead) +{ +#define qh ((BQUEUE *)qhead) + BQUEUE *object; + + ASSERT(qh->qprev->qnext == qh); + ASSERT(qh->qnext->qprev == qh); + + if ((object = qh->qnext) == qh) + return NULL; + qh->qnext = object->qnext; + object->qnext->qprev = qh; + return object; +#undef qh +} + +/* QNEXT -- Return next item from the queue + * returns NULL at the end of the queue. + * If qitem is NULL, the first item from + * the queue is returned. + */ + +BQUEUE *qnext(BQUEUE *qhead, BQUEUE *qitem) +{ +#define qh ((BQUEUE *)qhead) +#define qi ((BQUEUE *)qitem) + + BQUEUE *object; + + if (qi == NULL) + qitem = qhead; + ASSERT(qi->qprev->qnext == qi); + ASSERT(qi->qnext->qprev == qi); + + if ((object = qi->qnext) == qh) + return NULL; + return object; +#undef qh +#undef qi +} + + +/* QDCHAIN -- Dequeue an item from the middle of a queue. Passed + the queue item, returns the (now dechained) queue item. */ + +BQUEUE *qdchain(BQUEUE *qitem) +{ +#define qi ((BQUEUE *)qitem) + + ASSERT(qi->qprev->qnext == qi); + ASSERT(qi->qnext->qprev == qi); + + return qremove(qi->qprev); +#undef qi +} diff --git a/src/lib/queue.h b/src/lib/queue.h new file mode 100644 index 00000000..c64754b8 --- /dev/null +++ b/src/lib/queue.h @@ -0,0 +1,37 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by John Walker MM + */ + +/* General purpose queue */ + +struct b_queue { + struct b_queue *qnext, /* Next item in queue */ + *qprev; /* Previous item in queue */ +}; + +typedef struct b_queue BQUEUE; + +/* Queue functions */ + +void qinsert(BQUEUE *qhead, BQUEUE *object); +BQUEUE *qnext(BQUEUE *qhead, BQUEUE *qitem); +BQUEUE *qdchain(BQUEUE *qitem); +BQUEUE *qremove(BQUEUE *qhead); diff --git a/src/lib/rblist.c b/src/lib/rblist.c new file mode 100644 index 00000000..3c5bb7cb --- /dev/null +++ b/src/lib/rblist.c @@ -0,0 +1,488 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula red-black binary tree routines. + * + * rblist is a binary tree with the links being in the data item. + * + * Developped in part from ideas obtained from several online University + * courses. + * + * Kern Sibbald, November MMV + * + */ + +#include "bacula.h" + +/* =================================================================== + * rblist + */ + +/* + * Insert an item in the tree, but only if it is unique + * otherwise, the item is returned non inserted + * The big trick is keeping the tree balanced after the + * insert. We use a parent pointer to make it simpler and + * to avoid recursion. + * + * Returns: item if item inserted + * other_item if same value already exists (item not inserted) + */ +void *rblist::insert(void *item, int compare(void *item1, void *item2)) +{ + void *x, *y; + void *last = NULL; /* last leaf if not found */ + void *found = NULL; + int comp = 0; + + /* Search */ + x = head; + while (x && !found) { + last = x; + comp = compare(item, x); + if (comp < 0) { + x = left(x); + } else if (comp > 0) { + x = right(x); + } else { + found = x; + } + } + + if (found) { /* found? */ + return found; /* yes, return item found */ + } + set_left(item, NULL); + set_right(item, NULL); + set_parent(item, NULL); + set_red(item, false); + /* Handle empty tree */ + if (num_items == 0) { + head = item; + num_items++; + return item; + } + x = last; + /* Not found, so insert it on appropriate side of tree */ + if (comp < 0) { + set_left(last, item); + } else { + set_right(last, item); + } + set_red(last, true); + set_parent(item, last); + num_items++; + + /* Now we must walk up the tree balancing it */ + x = last; + while (x != head && red(parent(x))) { + if (parent(x) == left(parent(parent(x)))) { + /* Look at the right side of our grandparent */ + y = right(parent(parent(x))); + if (y && red(y)) { + /* our parent must be black */ + set_red(parent(x), false); + set_red(y, false); + set_red(parent(parent(x)), true); + x = parent(parent(x)); /* move up to grandpa */ + } else { + if (x == right(parent(x))) { /* right side of parent? */ + x = parent(x); + left_rotate(x); + } + /* make parent black too */ + set_red(parent(x), false); + set_red(parent(parent(x)), true); + right_rotate(parent(parent(x))); + } + } else { + /* Look at left side of our grandparent */ + y = left(parent(parent(x))); + if (y && red(y)) { + set_red(parent(x), false); + set_red(y, false); + set_red(parent(parent(x)), true); + x = parent(parent(x)); /* move up to grandpa */ + } else { + if (x == left(parent(x))) { + x = parent(x); + right_rotate(x); + } + /* make parent black too */ + set_red(parent(x), false); + set_red(parent(parent(x)), true); + left_rotate(parent(parent(x))); + } + } + } + /* Make sure the head is always black */ + set_red(head, false); + return item; +} + +/* + * Search for item + */ +void *rblist::search(void *item, int compare(void *item1, void *item2)) +{ + void *found = NULL; + void *x; + int comp; + + x = head; + while (x) { + comp = compare(item, x); + if (comp < 0) { + x = left(x); + } else if (comp > 0) { + x = right(x); + } else { + found = x; + break; + } + } + return found; +} + +/* + * Get first item (i.e. lowest value) + */ +void *rblist::first(void) +{ + void *x; + + x = head; + down = true; + while (x) { + if (left(x)) { + x = left(x); + continue; + } + return x; + } + /* Tree is empty */ + return NULL; +} + +/* + * This is a non-recursive btree walk routine that returns + * the items one at a time in order. I've never seen a + * non-recursive tree walk routine published that returns + * one item at a time rather than doing a callback. + * + * Return the next item in sorted order. We assume first() + * was called once before calling this routine. + * We always go down as far as we can to the left, then up, and + * down one to the right, and again down as far as we can to the + * left. etc. + * + * Returns: pointer to next larger item + * NULL when no more items in tree + */ +void *rblist::next(void *item) +{ + void *x; + + if (!item) { + return first(); + } + + x = item; + if ((down && !left(x) && right(x)) || (!down && right(x))) { + /* Move down to right one */ + down = true; + x = right(x); + /* Then all the way down left */ + while (left(x)) { + x = left(x); + } + return x; + } + + /* We have gone down all we can, so now go up */ + for ( ;; ) { + /* If at head, we are done */ + if (!parent(x)) { + return NULL; + } + /* Move up in tree */ + down = false; + /* if coming from right, continue up */ + if (right(parent(x)) == x) { + x = parent(x); + continue; + } + /* Coming from left, go up one -- ie. return parent */ + return parent(x); + } +} + +/* + * Similer to next(), but visits all right nodes when + * coming up the tree. + */ +void *rblist::any(void *item) +{ + void *x; + + if (!item) { + return NULL; + } + + x = item; + if ((down && !left(x) && right(x)) || (!down && right(x))) { + /* Move down to right one */ + down = true; + x = right(x); + /* Then all the way down left */ + while (left(x)) { + x = left(x); + } + return x; + } + + /* We have gone down all we can, so now go up */ + for ( ;; ) { + /* If at head, we are done */ + if (!parent(x)) { + return NULL; + } + down = false; + /* Go up one and return parent */ + return parent(x); + } +} + + +/* x is item, y is below and to right, then rotated to below left */ +void rblist::left_rotate(void *item) +{ + void *y; + void *x; + + x = item; + y = right(x); + set_right(x, left(y)); + if (left(y)) { + set_parent(left(y), x); + } + set_parent(y, parent(x)); + /* if no parent then we have a new head */ + if (!parent(x)) { + head = y; + } else if (x == left(parent(x))) { + set_left(parent(x), y); + } else { + set_right(parent(x), y); + } + set_left(y, x); + set_parent(x, y); +} + +void rblist::right_rotate(void *item) +{ + void *x, *y; + + y = item; + x = left(y); + set_left(y, right(x)); + if (right(x)) { + set_parent(right(x), y); + } + set_parent(x, parent(y)); + /* if no parent then we have a new head */ + if (!parent(y)) { + head = x; + } else if (y == left(parent(y))) { + set_left(parent(y), x); + } else { + set_right(parent(y), x); + } + set_right(x, y); + set_parent(y, x); +} + + +void rblist::remove(void *item) +{ +} + +/* Destroy the tree contents. Not totally working */ +void rblist::destroy() +{ + void *x, *y = NULL; + + x = first(); +// printf("head=%p first=%p left=%p right=%p\n", head, x, left(x), right(x)); + for ( ; (y=any(x)); ) { + /* Prune the last item */ + if (parent(x)) { + if (x == left(parent(x))) { + set_left(parent(x), NULL); + } else if (x == right(parent(x))) { + set_right(parent(x), NULL); + } + } + if (!left(x) && !right(x)) { + if (head == x) { + head = NULL; + } +// if (num_items<30) { +// printf("free nitems=%d item=%p left=%p right=%p\n", num_items, x, left(x), right(x)); +// } + free((void *)x); /* free previous node */ + num_items--; + } + x = y; /* save last node */ + } + if (x) { + if (x == head) { + head = NULL; + } +// printf("free nitems=%d item=%p left=%p right=%p\n", num_items, x, left(x), right(x)); + free((void *)x); + num_items--; + } + if (head) { +// printf("Free head\n"); + free((void *)head); + } +// printf("free nitems=%d\n", num_items); + + head = NULL; +} + + + +#ifdef TEST_PROGRAM + +struct MYJCR { + rblink link; + char *buf; +}; + +static int my_compare(void *item1, void *item2) +{ + MYJCR *jcr1, *jcr2; + int comp; + jcr1 = (MYJCR *)item1; + jcr2 = (MYJCR *)item2; + comp = strcmp(jcr1->buf, jcr2->buf); + //Dmsg3(000, "compare=%d: %s to %s\n", comp, jcr1->buf, jcr2->buf); + return comp; +} + +int main() +{ + char buf[30]; + rblist *jcr_chain; + MYJCR *jcr = NULL; + MYJCR *jcr1; + + + /* Now do a binary insert for the tree */ + jcr_chain = New(rblist()); +#define CNT 26 + printf("append %d items\n", CNT*CNT*CNT); + strcpy(buf, "ZZZ"); + int count = 0; + for (int i=0; ibuf = bstrdup(buf); +// printf("buf=%p %s\n", jcr, jcr->buf); + jcr1 = (MYJCR *)jcr_chain->insert((void *)jcr, my_compare); + if (jcr != jcr1) { + Dmsg2(000, "Insert of %s vs %s failed.\n", jcr->buf, jcr1->buf); + } + buf[1]--; + } + buf[1] = 'Z'; + buf[2]--; + } + buf[2] = 'Z'; + buf[0]--; + } + printf("%d items appended\n", CNT*CNT*CNT); + printf("num_items=%d\n", jcr_chain->size()); + + jcr = (MYJCR *)malloc(sizeof(MYJCR)); + bmemzero(jcr, sizeof(MYJCR)); + + jcr->buf = bstrdup("a"); + if ((jcr1=(MYJCR *)jcr_chain->search((void *)jcr, my_compare))) { + printf("One less failed!!!! Got: %s\n", jcr1->buf); + } else { + printf("One less: OK\n"); + } + free(jcr->buf); + + jcr->buf = bstrdup("ZZZZZZZZZZZZZZZZ"); + if ((jcr1=(MYJCR *)jcr_chain->search((void *)jcr, my_compare))) { + printf("One greater failed!!!! Got:%s\n", jcr1->buf); + } else { + printf("One greater: OK\n"); + } + free(jcr->buf); + + jcr->buf = bstrdup("AAA"); + if ((jcr1=(MYJCR *)jcr_chain->search((void *)jcr, my_compare))) { + printf("Search for AAA got %s\n", jcr1->buf); + } else { + printf("Search for AAA not found\n"); + } + free(jcr->buf); + + jcr->buf = bstrdup("ZZZ"); + if ((jcr1 = (MYJCR *)jcr_chain->search((void *)jcr, my_compare))) { + printf("Search for ZZZ got %s\n", jcr1->buf); + } else { + printf("Search for ZZZ not found\n"); + } + free(jcr->buf); + free(jcr); + + + printf("Find each of %d items in tree.\n", count); + for (jcr=(MYJCR *)jcr_chain->first(); jcr; (jcr=(MYJCR *)jcr_chain->next((void *)jcr)) ) { +// printf("Got: %s\n", jcr->buf); + if (!jcr_chain->search((void *)jcr, my_compare)) { + printf("rblist binary_search item not found = %s\n", jcr->buf); + } + } + printf("Free each of %d items in tree.\n", count); + for (jcr=(MYJCR *)jcr_chain->first(); jcr; (jcr=(MYJCR *)jcr_chain->next((void *)jcr)) ) { +// printf("Free: %p %s\n", jcr, jcr->buf); + free(jcr->buf); + jcr->buf = NULL; + } + printf("num_items=%d\n", jcr_chain->size()); + delete jcr_chain; + + + sm_dump(true); /* unit test */ + +} +#endif diff --git a/src/lib/rblist.h b/src/lib/rblist.h new file mode 100644 index 00000000..2c651d97 --- /dev/null +++ b/src/lib/rblist.h @@ -0,0 +1,156 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* ======================================================================== + * + * red-black binary tree routines -- rblist.h + * + * Kern Sibbald, MMV + * + */ + +#define M_ABORT 1 + +/* + * There is a lot of extra casting here to work around the fact + * that some compilers (Sun and Visual C++) do not accept + * (bnode *) as an lvalue on the left side of an equal. + * + * Loop var through each member of list + */ +#ifdef HAVE_TYPEOF +#define foreach_rblist(var, tree) \ + for (((var)=(typeof(var))(tree)->first()); (var); ((var)=(typeof(var))(tree)->next(var))) +#else +#define foreach_rblist(var, tree) \ + for ((*((void **)&(var))=(void*)((tree)->first())); (var); (*((void **)&(var))=(void*)((tree)->next(var)))) +#endif + +struct rblink { + void *parent; + void *left; + void *right; + bool red; +}; + +class rblist : public SMARTALLOC { + void *head; + int16_t loffset; + uint32_t num_items; + bool down; + void left_rotate(void *item); + void right_rotate(void *item); +public: + rblist(void *item, rblink *link); + rblist(void); + ~rblist(void) { destroy(); } + void init(void *item, rblink *link); + void set_parent(void *item, void *parent); + void set_left(void *item, void *left); + void set_right(void *item, void *right); + void set_red(void *item, bool red); + void *parent(const void *item) const; + void *left(const void *item) const; + void *right(const void *item) const; + bool red(const void *item) const; + void *insert(void *item, int compare(void *item1, void *item2)); + void *search(void *item, int compare(void *item1, void *item2)); + void *first(void); + void *next(void *item); + void *any(void *item); + void remove(void *item); + bool empty(void) const; + int size(void) const; + void destroy(void); +}; + + +/* + * This allows us to do explicit initialization, + * allowing us to mix C++ classes inside malloc'ed + * C structures. Define before called in constructor. + */ +inline void rblist::init(void *item, rblink *link) +{ + head = NULL; + loffset = (int)((char *)link - (char *)item); + if (loffset < 0 || loffset > 5000) { + Emsg0(M_ABORT, 0, "Improper rblist initialization.\n"); + } + num_items = 0; +} + +inline rblist::rblist(void *item, rblink *link) +{ + init(item, link); +} + +/* Constructor with link at head of item */ +inline rblist::rblist(void): head(0), loffset(0), num_items(0) +{ +} + +inline void rblist::set_parent(void *item, void *parent) +{ + ((rblink *)(((char *)item)+loffset))->parent = parent; +} + +inline void rblist::set_left(void *item, void *left) +{ + ((rblink *)(((char *)item)+loffset))->left = left; +} + +inline void rblist::set_right(void *item, void *right) +{ + ((rblink *)(((char *)item)+loffset))->right = right; +} + +inline void rblist::set_red(void *item, bool red) +{ + ((rblink *)(((char *)item)+loffset))->red = red; +} + +inline bool rblist::empty(void) const +{ + return head == NULL; +} + +inline int rblist::size() const +{ + return num_items; +} + +inline void *rblist::parent(const void *item) const +{ + return ((rblink *)(((char *)item)+loffset))->parent; +} + +inline void *rblist::left(const void *item) const +{ + return ((rblink *)(((char *)item)+loffset))->left; +} + +inline void *rblist::right(const void *item) const +{ + return ((rblink *)(((char *)item)+loffset))->right; +} + +inline bool rblist::red(const void *item) const +{ + return ((rblink *)(((char *)item)+loffset))->red; +} diff --git a/src/lib/res.c b/src/lib/res.c new file mode 100644 index 00000000..414b1c9c --- /dev/null +++ b/src/lib/res.c @@ -0,0 +1,148 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * This file handles locking and seaching resources + * + * Kern Sibbald, January MM + * Split from parse_conf.c April MMV + * + */ + +#include "bacula.h" + +/* Each daemon has a slightly different set of + * resources, so it will define the following + * global values. + */ +extern int32_t r_first; +extern int32_t r_last; +extern RES_TABLE resources[]; +extern RES_HEAD **res_head; + +brwlock_t res_lock; /* resource lock */ +static int res_locked = 0; /* resource chain lock count -- for debug */ + + +/* #define TRACE_RES */ + +void b_LockRes(const char *file, int line) +{ + int errstat; +#ifdef TRACE_RES + Pmsg4(000, "LockRes locked=%d w_active=%d at %s:%d\n", + res_locked, res_lock.w_active, file, line); + if (res_locked) { + Pmsg2(000, "LockRes writerid=%d myid=%d\n", res_lock.writer_id, + pthread_self()); + } +#endif + if ((errstat=rwl_writelock(&res_lock)) != 0) { + Emsg3(M_ABORT, 0, _("rwl_writelock failure at %s:%d: ERR=%s\n"), + file, line, strerror(errstat)); + } + res_locked++; +} + +void b_UnlockRes(const char *file, int line) +{ + int errstat; + if ((errstat=rwl_writeunlock(&res_lock)) != 0) { + Emsg3(M_ABORT, 0, _("rwl_writeunlock failure at %s:%d:. ERR=%s\n"), + file, line, strerror(errstat)); + } + res_locked--; +#ifdef TRACE_RES + Pmsg4(000, "UnLockRes locked=%d wactive=%d at %s:%d\n", + res_locked, res_lock.w_active, file, line); +#endif +} + +/* + * Compare two resource names + */ +int res_compare(void *item1, void *item2) +{ + RES *res1 = (RES *)item1; + RES *res2 = (RES *)item2; + return strcmp(res1->name, res2->name); +} + +/* + * Return resource of type rcode that matches name + */ +RES * +GetResWithName(int rcode, const char *name) +{ + RES_HEAD *reshead; + int rindex = rcode - r_first; + RES item, *res; + + LockRes(); + reshead = res_head[rindex]; + item.name = (char *)name; + res = (RES *)reshead->res_list->search(&item, res_compare); + UnlockRes(); + return res; + +} + +/* + * Return next resource of type rcode. On first + * call second arg (res) is NULL, on subsequent + * calls, it is called with previous value. + */ +RES * +GetNextRes(int rcode, RES *res) +{ + RES *nres; + int rindex = rcode - r_first; + + if (res == NULL) { + nres = (RES *)res_head[rindex]->first; + } else { + nres = res->res_next; + } + return nres; +} + +/* + * Return next resource of type rcode. On first + * call second arg (res) is NULL, on subsequent + * calls, it is called with previous value. + */ +RES * +GetNextRes(RES_HEAD **rhead, int rcode, RES *res) +{ + RES *nres; + int rindex = rcode - r_first; + + if (res == NULL) { + nres = (RES *)rhead[rindex]->first; + } else { + nres = res->res_next; + } + return nres; +} + + +/* Parser state */ +enum parse_state { + p_none, + p_resource +}; diff --git a/src/lib/runscript.c b/src/lib/runscript.c new file mode 100644 index 00000000..3aa7af50 --- /dev/null +++ b/src/lib/runscript.c @@ -0,0 +1,302 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Manipulation routines for RunScript list + * + * Eric Bollengier, May 2006 + * + */ + + +#include "bacula.h" +#include "jcr.h" +#include "runscript.h" + +/* + * This function pointer is set only by the Director (dird.c), + * and is not set in the File daemon, because the File + * daemon cannot run console commands. + */ +bool (*console_command)(JCR *jcr, const char *cmd) = NULL; + + +RUNSCRIPT *new_runscript() +{ + Dmsg0(500, "runscript: creating new RUNSCRIPT object\n"); + RUNSCRIPT *cmd = (RUNSCRIPT *)malloc(sizeof(RUNSCRIPT)); + memset(cmd, 0, sizeof(RUNSCRIPT)); + cmd->reset_default(); + + return cmd; +} + +void RUNSCRIPT::reset_default(bool free_strings) +{ + if (free_strings && command) { + free_pool_memory(command); + } + if (free_strings && target) { + free_pool_memory(target); + } + + target = NULL; + command = NULL; + on_success = true; + on_failure = false; + fail_on_error = true; + when = SCRIPT_Never; + old_proto = false; /* TODO: drop this with bacula 1.42 */ + job_code_callback = NULL; +} + +RUNSCRIPT *copy_runscript(RUNSCRIPT *src) +{ + Dmsg0(500, "runscript: creating new RUNSCRIPT object from other\n"); + + RUNSCRIPT *dst = (RUNSCRIPT *)malloc(sizeof(RUNSCRIPT)); + memcpy(dst, src, sizeof(RUNSCRIPT)); + + dst->command = NULL; + dst->target = NULL; + + dst->set_command(src->command, src->cmd_type); + dst->set_target(src->target); + + return dst; +} + +void free_runscript(RUNSCRIPT *script) +{ + Dmsg0(500, "runscript: freeing RUNSCRIPT object\n"); + + if (script->command) { + free_pool_memory(script->command); + } + if (script->target) { + free_pool_memory(script->target); + } + free(script); +} + +int run_scripts(JCR *jcr, alist *runscripts, const char *label) +{ + Dmsg2(200, "runscript: running all RUNSCRIPT object (%s) JobStatus=%c\n", label, jcr->JobStatus); + + RUNSCRIPT *script; + bool runit; + + int when; + + if (strstr(label, NT_("Before"))) { + when = SCRIPT_Before; + } else if (bstrcmp(label, NT_("ClientAfterVSS"))) { + when = SCRIPT_AfterVSS; + } else { + when = SCRIPT_After; + } + + if (runscripts == NULL) { + Dmsg0(100, "runscript: WARNING RUNSCRIPTS list is NULL\n"); + return 0; + } + + foreach_alist(script, runscripts) { + Dmsg2(200, "runscript: try to run %s:%s\n", NPRT(script->target), NPRT(script->command)); + runit = false; + + if ((script->when & SCRIPT_Before) && (when & SCRIPT_Before)) { + if ((script->on_success && + (jcr->JobStatus == JS_Running || jcr->JobStatus == JS_Created)) + || (script->on_failure && + (job_canceled(jcr) || jcr->JobStatus == JS_Differences)) + ) + { + Dmsg4(200, "runscript: Run it because SCRIPT_Before (%s,%i,%i,%c)\n", + script->command, script->on_success, script->on_failure, + jcr->JobStatus ); + runit = true; + } + } + + if ((script->when & SCRIPT_AfterVSS) && (when & SCRIPT_AfterVSS)) { + if ((script->on_success && (jcr->JobStatus == JS_Blocked)) + || (script->on_failure && job_canceled(jcr)) + ) + { + Dmsg4(200, "runscript: Run it because SCRIPT_AfterVSS (%s,%i,%i,%c)\n", + script->command, script->on_success, script->on_failure, + jcr->JobStatus ); + runit = true; + } + } + + if ((script->when & SCRIPT_After) && (when & SCRIPT_After)) { + if ((script->on_success && + (jcr->JobStatus == JS_Terminated || jcr->JobStatus == JS_Warnings)) + || (script->on_failure && + (job_canceled(jcr) || jcr->JobStatus == JS_Differences)) + ) + { + Dmsg4(200, "runscript: Run it because SCRIPT_After (%s,%i,%i,%c)\n", + script->command, script->on_success, script->on_failure, + jcr->JobStatus ); + runit = true; + } + } + + if (!script->is_local()) { + runit = false; + } + + /* we execute it */ + if (runit) { + script->run(jcr, label); + } + } + return 1; +} + +bool RUNSCRIPT::is_local() +{ + if (!target || (strcmp(target, "") == 0)) { + return true; + } else { + return false; + } +} + +/* set this->command to cmd */ +void RUNSCRIPT::set_command(const char *cmd, int acmd_type) +{ + Dmsg1(500, "runscript: setting command = %s\n", NPRT(cmd)); + + if (!cmd) { + return; + } + + if (!command) { + command = get_pool_memory(PM_FNAME); + } + + pm_strcpy(command, cmd); + cmd_type = acmd_type; +} + +/* set this->target to client_name */ +void RUNSCRIPT::set_target(const char *client_name) +{ + Dmsg1(500, "runscript: setting target = %s\n", NPRT(client_name)); + + if (!client_name) { + return; + } + + if (!target) { + target = get_pool_memory(PM_FNAME); + } + + pm_strcpy(target, client_name); +} + +bool RUNSCRIPT::run(JCR *jcr, const char *name) +{ + Dmsg1(100, "runscript: running a RUNSCRIPT object type=%d\n", cmd_type); + POOLMEM *ecmd = get_pool_memory(PM_FNAME); + int status; + BPIPE *bpipe; + char line[MAXSTRING]; + + ecmd = edit_job_codes(jcr, ecmd, this->command, "", this->job_code_callback); + Dmsg1(100, "runscript: running '%s'...\n", ecmd); + Jmsg(jcr, M_INFO, 0, _("%s: run %s \"%s\"\n"), + cmd_type==SHELL_CMD?"shell command":"console command", name, ecmd); + + switch (cmd_type) { + case SHELL_CMD: + bpipe = open_bpipe(ecmd, 0, "r"); + free_pool_memory(ecmd); + if (bpipe == NULL) { + berrno be; + Jmsg(jcr, M_ERROR, 0, _("Runscript: %s could not execute. ERR=%s\n"), name, + be.bstrerror()); + goto bail_out; + } + while (fgets(line, sizeof(line), bpipe->rfd)) { + int len = strlen(line); + if (len > 0 && line[len-1] == '\n') { + line[len-1] = 0; + } + Jmsg(jcr, M_INFO, 0, _("%s: %s\n"), name, line); + } + status = close_bpipe(bpipe); + if (status != 0) { + berrno be; + Jmsg(jcr, M_ERROR, 0, _("Runscript: %s returned non-zero status=%d. ERR=%s\n"), name, + be.code(status), be.bstrerror(status)); + goto bail_out; + } + Dmsg0(100, "runscript OK\n"); + break; + case CONSOLE_CMD: + if (console_command) { /* can we run console command? */ + if (!console_command(jcr, ecmd)) { /* yes, do so */ + goto bail_out; + } + } + break; + } + return true; + +bail_out: + /* cancel running job properly */ + if (fail_on_error) { + jcr->setJobStatus(JS_ErrorTerminated); + } + Dmsg1(100, "runscript failed. fail_on_error=%d\n", fail_on_error); + return false; +} + +void free_runscripts(alist *runscripts) +{ + Dmsg0(500, "runscript: freeing all RUNSCRIPTS object\n"); + + if (runscripts){ + RUNSCRIPT *elt; + foreach_alist(elt, runscripts) { + free_runscript(elt); + } + } +} + +void RUNSCRIPT::debug() +{ + Dmsg0(200, "runscript: debug\n"); + Dmsg0(200, _(" --> RunScript\n")); + Dmsg1(200, _(" --> Command=%s\n"), NPRT(command)); + Dmsg1(200, _(" --> Target=%s\n"), NPRT(target)); + Dmsg1(200, _(" --> RunOnSuccess=%u\n"), on_success); + Dmsg1(200, _(" --> RunOnFailure=%u\n"), on_failure); + Dmsg1(200, _(" --> FailJobOnError=%u\n"), fail_on_error); + Dmsg1(200, _(" --> RunWhen=%u\n"), when); +} + +void RUNSCRIPT::set_job_code_callback(job_code_callback_t arg_job_code_callback) +{ + this->job_code_callback = arg_job_code_callback; +} diff --git a/src/lib/runscript.h b/src/lib/runscript.h new file mode 100644 index 00000000..98b4d512 --- /dev/null +++ b/src/lib/runscript.h @@ -0,0 +1,106 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula RUNSCRIPT Structure definition for FileDaemon and Director + * Eric Bollengier May 2006 + */ + + +#ifndef __RUNSCRIPT_H_ +#define __RUNSCRIPT_H_ 1 + +#include "protos.h" + +/* Usage: + * + * #define USE_RUNSCRIPT + * #include "lib/runscript.h" + * + * RUNSCRIPT *script = new_runscript(); + * script->set_command("/bin/sleep 20"); + * script->on_failure = true; + * script->when = SCRIPT_After; + * + * script->run("LabelBefore"); // the label must contain "Before" or "After" special keyword + * free_runscript(script); + */ + +/* + * RUNSCRIPT->when can take following bit values: + */ +enum { + SCRIPT_Never = 0, + SCRIPT_After = (1<<0), /* AfterJob */ + SCRIPT_Before = (1<<1), /* BeforeJob */ + SCRIPT_AfterVSS = (1<<2), /* BeforeJob and After VSS */ + SCRIPT_Any = SCRIPT_Before | SCRIPT_After +}; + +enum { + SHELL_CMD = '|', + CONSOLE_CMD = '@' +}; + +/* + * Structure for RunScript ressource + */ +class RUNSCRIPT { +public: + POOLMEM *command; /* command string */ + POOLMEM *target; /* host target */ + int when; /* SCRIPT_Before|Script_After BEFORE/AFTER JOB*/ + int cmd_type; /* Command type -- Shell, Console */ + char level; /* Base|Full|Incr...|All (NYI) */ + bool on_success; /* execute command on job success (After) */ + bool on_failure; /* execute command on job failure (After) */ + bool fail_on_error; /* abort job on error (Before) */ + /* TODO : drop this with bacula 1.42 */ + bool old_proto; /* used by old 1.3X protocol */ + job_code_callback_t job_code_callback; + /* Optional callback function passed to edit_job_code */ + alist *commands; /* Use during parsing */ + bool run(JCR *job, const char *name=""); /* name must contain "Before" or "After" keyword */ + bool can_run_at_level(int JobLevel) { return true;}; /* TODO */ + void set_command(const char *cmd, int cmd_type = SHELL_CMD); + void set_target(const char *client_name); + void reset_default(bool free_string = false); + bool is_local(); /* true if running on local host */ + void debug(); + + void set_job_code_callback(job_code_callback_t job_code_callback); +}; + +/* create new RUNSCRIPT (set all value to 0) */ +RUNSCRIPT *new_runscript(); + +/* create new RUNSCRIPT from an other */ +RUNSCRIPT *copy_runscript(RUNSCRIPT *src); + +/* launch each script from runscripts*/ +int run_scripts(JCR *jcr, alist *runscripts, const char *name); + +/* free RUNSCRIPT (and all POOLMEM) */ +void free_runscript(RUNSCRIPT *script); + +/* foreach_alist free RUNSCRIPT */ +void free_runscripts(alist *runscripts); /* you have to free alist */ + +extern DLL_IMP_EXP bool (*console_command)(JCR *jcr, const char *cmd); + +#endif /* __RUNSCRIPT_H_ */ diff --git a/src/lib/rwlock.c b/src/lib/rwlock.c new file mode 100644 index 00000000..cad04b4c --- /dev/null +++ b/src/lib/rwlock.c @@ -0,0 +1,677 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Thread Read/Write locking code. It permits + * multiple readers but only one writer. Note, however, + * that the writer thread is permitted to make multiple + * nested write lock calls. + * + * Kern Sibbald, January MMI + * + * This code adapted from "Programming with POSIX Threads", by + * David R. Butenhof + * + */ + +#define LOCKMGR_COMPLIANT +#include "bacula.h" + +/* + * Initialize a read/write lock + * + * Returns: 0 on success + * errno on failure + */ +int rwl_init(brwlock_t *rwl, int priority) +{ + int stat; + + rwl->r_active = rwl->w_active = 0; + rwl->r_wait = rwl->w_wait = 0; + rwl->priority = priority; + if ((stat = pthread_mutex_init(&rwl->mutex, NULL)) != 0) { + return stat; + } + if ((stat = pthread_cond_init(&rwl->read, NULL)) != 0) { + pthread_mutex_destroy(&rwl->mutex); + return stat; + } + if ((stat = pthread_cond_init(&rwl->write, NULL)) != 0) { + pthread_cond_destroy(&rwl->read); + pthread_mutex_destroy(&rwl->mutex); + return stat; + } + rwl->valid = RWLOCK_VALID; + return 0; +} + +/* + * Destroy a read/write lock + * + * Returns: 0 on success + * errno on failure + */ +int rwl_destroy(brwlock_t *rwl) +{ + int stat, stat1, stat2; + + if (rwl->valid != RWLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + + /* + * If any threads are active, report EBUSY + */ + if (rwl->r_active > 0 || rwl->w_active) { + pthread_mutex_unlock(&rwl->mutex); + return EBUSY; + } + + /* + * If any threads are waiting, report EBUSY + */ + if (rwl->r_wait > 0 || rwl->w_wait > 0) { + pthread_mutex_unlock(&rwl->mutex); + return EBUSY; + } + + rwl->valid = 0; + if ((stat = pthread_mutex_unlock(&rwl->mutex)) != 0) { + return stat; + } + stat = pthread_mutex_destroy(&rwl->mutex); + stat1 = pthread_cond_destroy(&rwl->read); + stat2 = pthread_cond_destroy(&rwl->write); + return (stat != 0 ? stat : (stat1 != 0 ? stat1 : stat2)); +} + +/* + * Handle cleanup when the read lock condition variable + * wait is released. + */ +static void rwl_read_release(void *arg) +{ + brwlock_t *rwl = (brwlock_t *)arg; + + rwl->r_wait--; + pthread_mutex_unlock(&rwl->mutex); +} + +/* + * Handle cleanup when the write lock condition variable wait + * is released. + */ +static void rwl_write_release(void *arg) +{ + brwlock_t *rwl = (brwlock_t *)arg; + + rwl->w_wait--; + pthread_mutex_unlock(&rwl->mutex); +} + +/* + * Lock for read access, wait until locked (or error). + */ +int rwl_readlock(brwlock_t *rwl) +{ + int stat; + + if (rwl->valid != RWLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active) { + rwl->r_wait++; /* indicate that we are waiting */ + pthread_cleanup_push(rwl_read_release, (void *)rwl); + while (rwl->w_active) { + stat = pthread_cond_wait(&rwl->read, &rwl->mutex); + if (stat != 0) { + break; /* error, bail out */ + } + } + pthread_cleanup_pop(0); + rwl->r_wait--; /* we are no longer waiting */ + } + if (stat == 0) { + rwl->r_active++; /* we are running */ + } + pthread_mutex_unlock(&rwl->mutex); + return stat; +} + +/* + * Attempt to lock for read access, don't wait + */ +int rwl_readtrylock(brwlock_t *rwl) +{ + int stat, stat2; + + if (rwl->valid != RWLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active) { + stat = EBUSY; + } else { + rwl->r_active++; /* we are running */ + } + stat2 = pthread_mutex_unlock(&rwl->mutex); + return (stat == 0 ? stat2 : stat); +} + +/* + * Unlock read lock + */ +int rwl_readunlock(brwlock_t *rwl) +{ + int stat, stat2; + + if (rwl->valid != RWLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + rwl->r_active--; + if (rwl->r_active == 0 && rwl->w_wait > 0) { /* if writers waiting */ + stat = pthread_cond_broadcast(&rwl->write); + } + stat2 = pthread_mutex_unlock(&rwl->mutex); + return (stat == 0 ? stat2 : stat); +} + + +/* + * Lock for write access, wait until locked (or error). + * Multiple nested write locking is permitted. + */ +int rwl_writelock_p(brwlock_t *rwl, const char *file, int line) +{ + int stat; + + if (rwl->valid != RWLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active && pthread_equal(rwl->writer_id, pthread_self())) { + rwl->w_active++; + pthread_mutex_unlock(&rwl->mutex); + return 0; + } + lmgr_pre_lock(rwl, rwl->priority, file, line); + if (rwl->w_active || rwl->r_active > 0) { + rwl->w_wait++; /* indicate that we are waiting */ + pthread_cleanup_push(rwl_write_release, (void *)rwl); + while (rwl->w_active || rwl->r_active > 0) { + if ((stat = pthread_cond_wait(&rwl->write, &rwl->mutex)) != 0) { + lmgr_do_unlock(rwl); + break; /* error, bail out */ + } + } + pthread_cleanup_pop(0); + rwl->w_wait--; /* we are no longer waiting */ + } + if (stat == 0) { + rwl->w_active++; /* we are running */ + rwl->writer_id = pthread_self(); /* save writer thread's id */ + lmgr_post_lock(); + } + pthread_mutex_unlock(&rwl->mutex); + return stat; +} + +/* + * Attempt to lock for write access, don't wait + */ +int rwl_writetrylock(brwlock_t *rwl) +{ + int stat, stat2; + + if (rwl->valid != RWLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active && pthread_equal(rwl->writer_id, pthread_self())) { + rwl->w_active++; + pthread_mutex_unlock(&rwl->mutex); + return 0; + } + if (rwl->w_active || rwl->r_active > 0) { + stat = EBUSY; + } else { + rwl->w_active = 1; /* we are running */ + rwl->writer_id = pthread_self(); /* save writer thread's id */ + lmgr_do_lock(rwl, rwl->priority, __FILE__, __LINE__); + } + stat2 = pthread_mutex_unlock(&rwl->mutex); + return (stat == 0 ? stat2 : stat); +} + +/* + * Unlock write lock + * Start any waiting writers in preference to waiting readers + */ +int rwl_writeunlock(brwlock_t *rwl) +{ + int stat, stat2; + + if (rwl->valid != RWLOCK_VALID) { + return EINVAL; + } + if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) { + return stat; + } + if (rwl->w_active <= 0) { + pthread_mutex_unlock(&rwl->mutex); + Jmsg0(NULL, M_ABORT, 0, _("rwl_writeunlock called too many times.\n")); + } + rwl->w_active--; + if (!pthread_equal(pthread_self(), rwl->writer_id)) { + pthread_mutex_unlock(&rwl->mutex); + Jmsg0(NULL, M_ABORT, 0, _("rwl_writeunlock by non-owner.\n")); + } + if (rwl->w_active > 0) { + stat = 0; /* writers still active */ + } else { + lmgr_do_unlock(rwl); + /* No more writers, awaken someone */ + if (rwl->r_wait > 0) { /* if readers waiting */ + stat = pthread_cond_broadcast(&rwl->read); + } else if (rwl->w_wait > 0) { + stat = pthread_cond_broadcast(&rwl->write); + } + } + stat2 = pthread_mutex_unlock(&rwl->mutex); + return (stat == 0 ? stat2 : stat); +} + +bool is_rwl_valid(brwlock_t *rwl) +{ + return (rwl->valid == RWLOCK_VALID); +} + + +#ifdef TEST_RWLOCK + +#define THREADS 300 +#define DATASIZE 15 +#define ITERATIONS 1000000 + +/* + * Keep statics for each thread. + */ +typedef struct thread_tag { + int thread_num; + pthread_t thread_id; + int writes; + int reads; + int interval; +} thread_t; + +/* + * Read/write lock and shared data. + */ +typedef struct data_tag { + brwlock_t lock; + int data; + int writes; +} data_t; + +static thread_t threads[THREADS]; +static data_t data[DATASIZE]; + +/* + * Thread start routine that uses read/write locks. + */ +void *thread_routine(void *arg) +{ + thread_t *self = (thread_t *)arg; + int repeats = 0; + int iteration; + int element = 0; + int status; + + for (iteration=0; iteration < ITERATIONS; iteration++) { + /* + * Each "self->interval" iterations, perform an + * update operation (write lock instead of read + * lock). + */ +// if ((iteration % self->interval) == 0) { + status = rwl_writelock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Write lock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + data[element].data = self->thread_num; + data[element].writes++; + self->writes++; + status = rwl_writelock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Write lock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + data[element].data = self->thread_num; + data[element].writes++; + self->writes++; + status = rwl_writeunlock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Write unlock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + status = rwl_writeunlock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Write unlock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + +#ifdef xxx + } else { + /* + * Look at the current data element to see whether + * the current thread last updated it. Count the + * times to report later. + */ + status = rwl_readlock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Read lock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + self->reads++; + if (data[element].data == self->thread_num) + repeats++; + status = rwl_readunlock(&data[element].lock); + if (status != 0) { + berrno be; + printf("Read unlock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + } +#endif + element++; + if (element >= DATASIZE) { + element = 0; + } + } + if (repeats > 0) { + Pmsg2(000, _("Thread %d found unchanged elements %d times\n"), + self->thread_num, repeats); + } + return NULL; +} + +int main (int argc, char *argv[]) +{ + int count; + int data_count; + int status; + unsigned int seed = 1; + int thread_writes = 0; + int data_writes = 0; + + /* + * For Solaris 2.5,2.6,7 and 8 threads are not timesliced. + * Ensure our threads can run concurrently. + */ + +#ifdef USE_THR_SETCONCURRENCY + thr_setconcurrency(THREADS); /* Only implemented on Solaris */ +#endif + + /* + * Initialize the shared data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) { + data[data_count].data = 0; + data[data_count].writes = 0; + status = rwl_init(&data[data_count].lock); + if (status != 0) { + berrno be; + printf("Init rwlock failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + } + + /* + * Create THREADS threads to access shared data. + */ + for (count = 0; count < THREADS; count++) { + threads[count].thread_num = count + 1; + threads[count].writes = 0; + threads[count].reads = 0; + threads[count].interval = rand_r(&seed) % 71; + if (threads[count].interval <= 0) { + threads[count].interval = 1; + } + status = pthread_create (&threads[count].thread_id, + NULL, thread_routine, (void*)&threads[count]); + if (status != 0 || (int)threads[count].thread_id == 0) { + berrno be; + printf("Create thread failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + } + + /* + * Wait for all threads to complete, and collect + * statistics. + */ + for (count = 0; count < THREADS; count++) { + status = pthread_join (threads[count].thread_id, NULL); + if (status != 0) { + berrno be; + printf("Join thread failed. ERR=%s\n", be.bstrerror(status)); + exit(1); + } + thread_writes += threads[count].writes; + printf (_("%02d: interval %d, writes %d, reads %d\n"), + count, threads[count].interval, + threads[count].writes, threads[count].reads); + } + + /* + * Collect statistics for the data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) { + data_writes += data[data_count].writes; + printf (_("data %02d: value %d, %d writes\n"), + data_count, data[data_count].data, data[data_count].writes); + rwl_destroy (&data[data_count].lock); + } + + printf (_("Total: %d thread writes, %d data writes\n"), + thread_writes, data_writes); + return 0; +} + +#endif + +#ifdef TEST_RW_TRY_LOCK +/* + * brwlock_try_main.c + * + * Demonstrate use of non-blocking read-write locks. + * + * On older Solaris systems, call thr_setconcurrency() + * to allow interleaved thread execution, since threads are not + * timesliced. + */ +#include +#include "rwlock.h" +#include "errors.h" + +#define THREADS 5 +#define ITERATIONS 1000 +#define DATASIZE 15 + +/* + * Keep statistics for each thread. + */ +typedef struct thread_tag { + int thread_num; + pthread_t thread_id; + int r_collisions; + int w_collisions; + int updates; + int interval; +} thread_t; + +/* + * Read-write lock and shared data + */ +typedef struct data_tag { + brwlock_t lock; + int data; + int updates; +} data_t; + +thread_t threads[THREADS]; +data_t data[DATASIZE]; + +/* + * Thread start routine that uses read-write locks + */ +void *thread_routine (void *arg) +{ + thread_t *self = (thread_t*)arg; + int iteration; + int element; + int status; + lmgr_init_thread(); + element = 0; /* Current data element */ + + for (iteration = 0; iteration < ITERATIONS; iteration++) { + if ((iteration % self->interval) == 0) { + status = rwl_writetrylock (&data[element].lock); + if (status == EBUSY) + self->w_collisions++; + else if (status == 0) { + data[element].data++; + data[element].updates++; + self->updates++; + rwl_writeunlock (&data[element].lock); + } else + err_abort (status, _("Try write lock")); + } else { + status = rwl_readtrylock (&data[element].lock); + if (status == EBUSY) + self->r_collisions++; + else if (status != 0) { + err_abort (status, _("Try read lock")); + } else { + if (data[element].data != data[element].updates) + printf ("%d: data[%d] %d != %d\n", + self->thread_num, element, + data[element].data, data[element].updates); + rwl_readunlock (&data[element].lock); + } + } + + element++; + if (element >= DATASIZE) + element = 0; + } + lmgr_cleanup_thread(); + return NULL; +} + +int main (int argc, char *argv[]) +{ + int count, data_count; + unsigned int seed = 1; + int thread_updates = 0, data_updates = 0; + int status; + + /* + * For Solaris 2.5,2.6,7 and 8 threads are not timesliced. + * Ensure our threads can run concurrently. + */ + DPRINTF (("Setting concurrency level to %d\n", THREADS)); + thr_setconcurrency(THREADS); /* Only implemented on Solaris */ + + /* + * Initialize the shared data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) { + data[data_count].data = 0; + data[data_count].updates = 0; + rwl_init(&data[data_count].lock); + } + + /* + * Create THREADS threads to access shared data. + */ + for (count = 0; count < THREADS; count++) { + threads[count].thread_num = count; + threads[count].r_collisions = 0; + threads[count].w_collisions = 0; + threads[count].updates = 0; + threads[count].interval = rand_r (&seed) % ITERATIONS; + status = pthread_create (&threads[count].thread_id, + NULL, thread_routine, (void*)&threads[count]); + if (status != 0) + err_abort (status, _("Create thread")); + } + + /* + * Wait for all threads to complete, and collect + * statistics. + */ + for (count = 0; count < THREADS; count++) { + status = pthread_join (threads[count].thread_id, NULL); + if (status != 0) + err_abort (status, _("Join thread")); + thread_updates += threads[count].updates; + printf (_("%02d: interval %d, updates %d, " + "r_collisions %d, w_collisions %d\n"), + count, threads[count].interval, + threads[count].updates, + threads[count].r_collisions, threads[count].w_collisions); + } + + /* + * Collect statistics for the data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) { + data_updates += data[data_count].updates; + printf (_("data %02d: value %d, %d updates\n"), + data_count, data[data_count].data, data[data_count].updates); + rwl_destroy (&data[data_count].lock); + } + + return 0; +} + +#endif diff --git a/src/lib/rwlock.h b/src/lib/rwlock.h new file mode 100644 index 00000000..a366202b --- /dev/null +++ b/src/lib/rwlock.h @@ -0,0 +1,73 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Thread Read/Write locking code. It permits + * multiple readers but only one writer. + * + * Kern Sibbald, January MMI + * + * This code adapted from "Programming with POSIX Threads", by + * David R. Butenhof + */ + +#ifndef __RWLOCK_H +#define __RWLOCK_H 1 + +typedef struct s_rwlock_tag { + pthread_mutex_t mutex; + pthread_cond_t read; /* wait for read */ + pthread_cond_t write; /* wait for write */ + pthread_t writer_id; /* writer's thread id */ + int priority; /* used in deadlock detection */ + int valid; /* set when valid */ + int r_active; /* readers active */ + int w_active; /* writers active */ + int r_wait; /* readers waiting */ + int w_wait; /* writers waiting */ +} brwlock_t; + +typedef struct s_rwsteal_tag { + pthread_t writer_id; /* writer's thread id */ + int state; +} brwsteal_t; + + +#define RWLOCK_VALID 0xfacade + +#define RWL_INIIALIZER \ + {PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, \ + PTHREAD_COND_INITIALIZER, RWLOCK_VALID, 0, 0, 0, 0} + +#define rwl_writelock(x) rwl_writelock_p((x), __FILE__, __LINE__) + +/* + * read/write lock prototypes + */ +extern int rwl_init(brwlock_t *wrlock, int priority=0); +extern int rwl_destroy(brwlock_t *rwlock); +extern int rwl_readlock(brwlock_t *rwlock); +extern int rwl_readtrylock(brwlock_t *rwlock); +extern int rwl_readunlock(brwlock_t *rwlock); +extern int rwl_writelock_p(brwlock_t *rwlock, + const char *file="*unknown*", int line=0); +extern int rwl_writetrylock(brwlock_t *rwlock); +extern int rwl_writeunlock(brwlock_t *rwlock); +extern bool is_rwl_valid(brwlock_t *rwl); + +#endif /* __RWLOCK_H */ diff --git a/src/lib/scan.c b/src/lib/scan.c new file mode 100644 index 00000000..77fb50c8 --- /dev/null +++ b/src/lib/scan.c @@ -0,0 +1,620 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * scan.c -- scanning routines for Bacula + * + * Kern Sibbald, MM separated from util.c MMIII + * + */ + + +#include "bacula.h" +#include "jcr.h" +#include "findlib/find.h" + +/* Strip leading space from command line arguments */ +void strip_leading_space(char *str) +{ + char *p = str; + while (B_ISSPACE(*p)) { + p++; + } + if (str != p) { + do { + *str++ = *p; + } while (*p++ != 0); + } +} + +/* Strip any trailing junk from the command */ +char *strip_trailing_junk(char *cmd) +{ + char *p; + + /* strip trailing junk from command */ + p = cmd - 1 + strlen(cmd); + while ((p >= cmd) && (B_ISSPACE(*p) || *p == '\n' || *p == '\r')) { + *p-- = 0; + } + return cmd; +} + +/* Strip any trailing newline characters from the string */ +char *strip_trailing_newline(char *cmd) +{ + char *p; + p = cmd - 1 + strlen(cmd); + while ((p >= cmd) && (*p == '\n' || *p == '\r')) *p-- = 0; + return cmd; +} + +/* Strip any trailing slashes from a directory path */ +char *strip_trailing_slashes(char *dir) +{ + char *p; + + /* strip trailing slashes */ + p = dir -1 + strlen(dir); + while (p >= dir && IsPathSeparator(*p)) *p-- = 0; + return dir; +} + +/* + * Skip spaces + * Returns: 0 on failure (EOF) + * 1 on success + * new address in passed parameter + */ +bool skip_spaces(char **msg) +{ + char *p = *msg; + if (!p) { + return false; + } + while (*p && B_ISSPACE(*p)) { + p++; + } + *msg = p; + return *p ? true : false; +} + +/* + * Skip nonspaces + * Returns: 0 on failure (EOF) + * 1 on success + * new address in passed parameter + */ +bool skip_nonspaces(char **msg) +{ + char *p = *msg; + + if (!p) { + return false; + } + while (*p && !B_ISSPACE(*p)) { + p++; + } + *msg = p; + return *p ? true : false; +} + +/* folded search for string - case insensitive */ +int +fstrsch(const char *a, const char *b) /* folded case search */ +{ + const char *s1,*s2; + char c1, c2; + + s1=a; + s2=b; + while (*s1) { /* do it the fast way */ + if ((*s1++ | 0x20) != (*s2++ | 0x20)) + return 0; /* failed */ + } + while (*a) { /* do it over the correct slow way */ + if (B_ISUPPER(c1 = *a)) { + c1 = tolower((int)c1); + } + if (B_ISUPPER(c2 = *b)) { + c2 = tolower((int)c2); + } + if (c1 != c2) { + return 0; + } + a++; + b++; + } + return 1; +} + + +/* + * Return next argument from command line. Note, this + * routine is destructive because it stored 0 at the end + * of each argument. + * Called with pointer to pointer to command line. This + * pointer is updated to point to the remainder of the + * command line. + * + * Returns pointer to next argument -- don't store the result + * in the pointer you passed as an argument ... + * The next argument is terminated by a space unless within + * quotes. Double quote characters (unless preceded by a \) are + * stripped. + * + */ +char *next_arg(char **s) +{ + char *p, *q, *n; + bool in_quote = false; + + /* skip past spaces to next arg */ + for (p=*s; *p && B_ISSPACE(*p); ) { + p++; + } + Dmsg1(900, "Next arg=%s\n", p); + for (n = q = p; *p ; ) { + if (*p == '\\') { /* slash? */ + p++; /* yes, skip it */ + if (*p) { + *q++ = *p++; + } else { + *q++ = *p; + } + continue; + } + if (*p == '"') { /* start or end of quote */ + p++; + in_quote = !in_quote; /* change state */ + continue; + } + if (!in_quote && B_ISSPACE(*p)) { /* end of field */ + p++; + break; + } + *q++ = *p++; + } + *q = 0; + *s = p; + Dmsg2(900, "End arg=%s next=%s\n", n, p); + return n; +} + +/* + * This routine parses the input command line. + * It makes a copy in args, then builds an + * argc, argk, argv list where: + * + * argc = count of arguments + * argk[i] = argument keyword (part preceding =) + * argv[i] = argument value (part after =) + * + * example: arg1 arg2=abc arg3= + * + * argc = c + * argk[0] = arg1 + * argv[0] = NULL + * argk[1] = arg2 + * argv[1] = abc + * argk[2] = arg3 + * argv[2] = + */ +int parse_args(POOLMEM *cmd, POOLMEM **args, int *argc, + char **argk, char **argv, int max_args) +{ + char *p; + + parse_args_only(cmd, args, argc, argk, argv, max_args); + + /* Separate keyword and value */ + for (int i=0; i < *argc; i++) { + p = strchr(argk[i], '='); + if (p) { + *p++ = 0; /* terminate keyword and point to value */ + } + argv[i] = p; /* save ptr to value or NULL */ + } +#ifdef xxx_debug + for (int i=0; i < *argc; i++) { + Pmsg3(000, "Arg %d: kw=%s val=%s\n", i, argk[i], argv[i]?argv[i]:"NULL"); + } +#endif + return 1; +} + + +/* + * This routine parses the input command line. + * It makes a copy in args, then builds an + * argc, argk, but no argv (values). + * This routine is useful for scanning command lines where the data + * is a filename and no keywords are expected. If we scan a filename + * for keywords, any = in the filename will be interpreted as the + * end of a keyword, and this is not good. + * + * argc = count of arguments + * argk[i] = argument keyword (part preceding =) + * argv[i] = NULL + * + * example: arg1 arg2=abc arg3= + * + * argc = c + * argk[0] = arg1 + * argv[0] = NULL + * argk[1] = arg2=abc + * argv[1] = NULL + * argk[2] = arg3 + * argv[2] = + */ +int parse_args_only(POOLMEM *cmd, POOLMEM **args, int *argc, + char **argk, char **argv, int max_args) +{ + char *p, *n; + + pm_strcpy(args, cmd); + strip_trailing_junk(*args); + p = *args; + *argc = 0; + /* Pick up all arguments */ + while (*argc < max_args) { + n = next_arg(&p); + if (*n) { + argk[*argc] = n; + argv[(*argc)++] = NULL; + } else { + break; + } + } + return 1; +} + + +/* + * Given a full filename, split it into its path + * and filename parts. They are returned in pool memory + * in the arguments provided. + */ +void split_path_and_filename(const char *fname, POOLMEM **path, int *pnl, + POOLMEM **file, int *fnl) +{ + const char *f; + int slen; + int len = slen = strlen(fname); + + /* + * Find path without the filename. + * I.e. everything after the last / is a "filename". + * OK, maybe it is a directory name, but we treat it like + * a filename. If we don't find a / then the whole name + * must be a path name (e.g. c:). + */ + f = fname + len - 1; + /* "strip" any trailing slashes */ + while (slen > 1 && IsPathSeparator(*f)) { + slen--; + f--; + } + /* Walk back to last slash -- begin of filename */ + while (slen > 0 && !IsPathSeparator(*f)) { + slen--; + f--; + } + if (IsPathSeparator(*f)) { /* did we find a slash? */ + f++; /* yes, point to filename */ + } else { /* no, whole thing must be path name */ + f = fname; + } + Dmsg2(200, "after strip len=%d f=%s\n", len, f); + *fnl = fname - f + len; + if (*fnl > 0) { + *file = check_pool_memory_size(*file, *fnl+1); + memcpy(*file, f, *fnl); /* copy filename */ + } + (*file)[*fnl] = 0; + + *pnl = f - fname; + if (*pnl > 0) { + *path = check_pool_memory_size(*path, *pnl+1); + memcpy(*path, fname, *pnl); + } + (*path)[*pnl] = 0; + + Dmsg2(200, "pnl=%d fnl=%d\n", *pnl, *fnl); + Dmsg3(200, "split fname=%s path=%s file=%s\n", fname, *path, *file); +} + +/* + * Extremely simple sscanf. Handles only %(u,d,ld,qd,qu,lu,lld,llu,c,nns) + * + * Note, BIG is the default maximum length when no length + * has been specified for %s. If it is not big enough, then + * simply add a length such as %10000s. + */ +const int BIG = 1000; +int bsscanf(const char *buf, const char *fmt, ...) +{ + va_list ap; + int count = 0; + void *vp; + char *cp; + int l = 0; + int max_len = BIG; + uint64_t value; + bool error = false; + bool negative; + + va_start(ap, fmt); + while (*fmt && !error) { +// Dmsg1(000, "fmt=%c\n", *fmt); + if (*fmt == '%') { + fmt++; +// Dmsg1(000, "Got %% nxt=%c\n", *fmt); +switch_top: + switch (*fmt++) { + case 'u': + value = 0; + while (B_ISDIGIT(*buf)) { + value = B_TIMES10(value) + *buf++ - '0'; + } + vp = (void *)va_arg(ap, void *); +// Dmsg2(000, "val=%lld at 0x%lx\n", value, (long unsigned)vp); + if (l == 0) { + *((int *)vp) = (int)value; + } else if (l == 1) { + *((uint32_t *)vp) = (uint32_t)value; +// Dmsg0(000, "Store 32 bit int\n"); + } else { + *((uint64_t *)vp) = (uint64_t)value; +// Dmsg0(000, "Store 64 bit int\n"); + } + count++; + l = 0; + break; + case 'd': + value = 0; + if (*buf == '-') { + negative = true; + buf++; + } else { + negative = false; + } + while (B_ISDIGIT(*buf)) { + value = B_TIMES10(value) + *buf++ - '0'; + } + if (negative) { + value = -value; + } + vp = (void *)va_arg(ap, void *); +// Dmsg2(000, "val=%lld at 0x%lx\n", value, (long unsigned)vp); + if (l == 0) { + *((int *)vp) = (int)value; + } else if (l == 1) { + *((int32_t *)vp) = (int32_t)value; +// Dmsg0(000, "Store 32 bit int\n"); + } else { + *((int64_t *)vp) = (int64_t)value; +// Dmsg0(000, "Store 64 bit int\n"); + } + count++; + l = 0; + break; + case 'l': +// Dmsg0(000, "got l\n"); + l = 1; + if (*fmt == 'l') { + l++; + fmt++; + } + if (*fmt == 'd' || *fmt == 'u') { + goto switch_top; + } +// Dmsg1(000, "fmt=%c !=d,u\n", *fmt); + error = true; + break; + case 'q': + l = 2; + if (*fmt == 'd' || *fmt == 'u') { + goto switch_top; + } +// Dmsg1(000, "fmt=%c !=d,u\n", *fmt); + error = true; + break; + case 's': +// Dmsg1(000, "Store string max_len=%d\n", max_len); + cp = (char *)va_arg(ap, char *); + while (*buf && !B_ISSPACE(*buf) && max_len-- > 0) { + *cp++ = *buf++; + } + *cp = 0; + count++; + max_len = BIG; + break; + case 'c': + cp = (char *)va_arg(ap, char *); + *cp = *buf++; + count++; + break; + case '%': + if (*buf++ != '%') { + error = true; + } + break; + default: + fmt--; + max_len = 0; + while (B_ISDIGIT(*fmt)) { + max_len = B_TIMES10(max_len) + *fmt++ - '0'; + } +// Dmsg1(000, "Default max_len=%d\n", max_len); + if (*fmt == 's') { + goto switch_top; + } +// Dmsg1(000, "Default c=%c\n", *fmt); + error = true; + break; /* error: unknown format */ + } + continue; + + /* White space eats zero or more whitespace */ + } else if (B_ISSPACE(*fmt)) { + fmt++; + while (B_ISSPACE(*buf)) { + buf++; + } + /* Plain text must match */ + } else if (*buf++ != *fmt++) { +// Dmsg2(000, "Mismatch buf=%c fmt=%c\n", *--buf, *--fmt); + error = true; + break; + } + } + va_end(ap); +// Dmsg2(000, "Error=%d count=%d\n", error, count); + if (error) { + count = -1; + } + return count; +} + +/* + * Return next name from a comma separated list. Note, this + * routine is destructive because it stored 0 at the end + * of each argument. + * Called with pointer to pointer to command line. This + * pointer is updated to point to the remainder of the + * command line. + * + * Returns pointer to next name -- don't store the result + * in the pointer you passed as an argument ... + * The next argument is terminated by a , unless within + * quotes. Double quote characters (unless preceded by a \) are + * stripped. + * + */ +char *next_name(char **s) +{ + char *p, *q, *n; + bool in_quote = false; + + if (s == NULL || *s == NULL || **s == '\0') { + return NULL; + } + p = *s; + Dmsg1(900, "Next name=%s\n", p); + for (n = q = p; *p ; ) { + if (*p == '\\') { /* slash? */ + p++; /* yes, skip it */ + if (*p) { + *q++ = *p++; + } else { + *q++ = *p; + } + continue; + } + if (*p == '"') { /* start or end of quote */ + p++; + in_quote = !in_quote; /* change state */ + continue; + } + if (!in_quote && *p == ',') { /* end of field */ + p++; + break; + } + *q++ = *p++; + } + *q = 0; + *s = p; + Dmsg2(900, "End arg=%s next=%s\n", n, p); + return n; +} + +#ifdef TEST_PROGRAM +int main(int argc, char *argv[]) +{ + char buf[100]; + uint32_t val32; + uint64_t val64; + uint32_t FirstIndex, LastIndex, StartFile, EndFile, StartBlock, EndBlock; + char Job[200]; + int cnt; + char *helloreq= "Hello *UserAgent* calling\n"; + char *hello = "Hello %127s calling\n"; + char *catreq = +"CatReq Job=NightlySave.2004-06-11_19.11.32 CreateJobMedia FirstIndex=1 LastIndex=114 StartFile=0 EndFile=0 StartBlock=208 EndBlock=2903248"; +static char Create_job_media[] = "CatReq Job=%127s CreateJobMedia " + "FirstIndex=%u LastIndex=%u StartFile=%u EndFile=%u " + "StartBlock=%u EndBlock=%u\n"; +static char OK_media[] = "1000 OK VolName=%127s VolJobs=%u VolFiles=%u" + " VolBlocks=%u VolBytes=%" lld " VolMounts=%u VolErrors=%u VolWrites=%u" + " MaxVolBytes=%" lld " VolCapacityBytes=%" lld " VolStatus=%20s" + " Slot=%d MaxVolJobs=%u MaxVolFiles=%u InChanger=%d" + " VolReadTime=%" lld " VolWriteTime=%" lld; + char *media = +"1000 OK VolName=TestVolume001 VolJobs=0 VolFiles=0 VolBlocks=0 VolBytes=1 VolMounts=0 VolErrors=0 VolWrites=0 MaxVolBytes=0 VolCapacityBytes=0 VolStatus=Append Slot=0 MaxVolJobs=0 MaxVolFiles=0 InChanger=1 VolReadTime=0 VolWriteTime=0"; +struct VOLUME_CAT_INFO { + /* Media info for the current Volume */ + uint32_t VolCatJobs; /* number of jobs on this Volume */ + uint32_t VolCatFiles; /* Number of files */ + uint32_t VolCatBlocks; /* Number of blocks */ + uint64_t VolCatBytes; /* Number of bytes written */ + uint32_t VolCatMounts; /* Number of mounts this volume */ + uint32_t VolCatErrors; /* Number of errors this volume */ + uint32_t VolCatWrites; /* Number of writes this volume */ + uint32_t VolCatReads; /* Number of reads this volume */ + uint64_t VolCatRBytes; /* Number of bytes read */ + uint32_t VolCatRecycles; /* Number of recycles this volume */ + int32_t Slot; /* Slot in changer */ + bool InChanger; /* Set if vol in current magazine */ + uint32_t VolCatMaxJobs; /* Maximum Jobs to write to volume */ + uint32_t VolCatMaxFiles; /* Maximum files to write to volume */ + uint64_t VolCatMaxBytes; /* Max bytes to write to volume */ + uint64_t VolCatCapacityBytes; /* capacity estimate */ + uint64_t VolReadTime; /* time spent reading */ + uint64_t VolWriteTime; /* time spent writing this Volume */ + char VolCatStatus[20]; /* Volume status */ + char VolCatName[MAX_NAME_LENGTH]; /* Desired volume to mount */ +}; + struct VOLUME_CAT_INFO vol; + +#ifdef xxx + bsscanf("Hello_world 123 1234", "%120s %ld %lld", buf, &val32, &val64); + printf("%s %d %lld\n", buf, val32, val64); + + *Job=0; + cnt = bsscanf(catreq, Create_job_media, &Job, + &FirstIndex, &LastIndex, &StartFile, &EndFile, + &StartBlock, &EndBlock); + printf("cnt=%d Job=%s\n", cnt, Job); + cnt = bsscanf(helloreq, hello, &Job); + printf("cnt=%d Agent=%s\n", cnt, Job); +#endif + cnt = bsscanf(media, OK_media, + vol.VolCatName, + &vol.VolCatJobs, &vol.VolCatFiles, + &vol.VolCatBlocks, &vol.VolCatBytes, + &vol.VolCatMounts, &vol.VolCatErrors, + &vol.VolCatWrites, &vol.VolCatMaxBytes, + &vol.VolCatCapacityBytes, vol.VolCatStatus, + &vol.Slot, &vol.VolCatMaxJobs, &vol.VolCatMaxFiles, + &vol.InChanger, &vol.VolReadTime, &vol.VolWriteTime); + printf("cnt=%d Vol=%s\n", cnt, vol.VolCatName); + +} + +#endif diff --git a/src/lib/sellist.c b/src/lib/sellist.c new file mode 100644 index 00000000..25365a97 --- /dev/null +++ b/src/lib/sellist.c @@ -0,0 +1,268 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, January MMXII + * + * Selection list. A string of integers separated by commas + * representing items selected. Ranges of the form nn-mm + * are also permitted. + */ + +#include "bacula.h" + +/* + * Returns next item + * error if returns -1 and errmsg set + * end of items if returns -1 and errmsg NULL + */ +int64_t sellist::next() +{ + errmsg = NULL; + if (beg <= end) { /* scan done? */ + //printf("Return %lld\n", beg); + return beg++; + } + if (e == NULL) { + goto bail_out; /* nothing to scan */ + } + /* + * As we walk the list, we set EOF in + * the end of the next item to ease scanning, + * but save and then restore the character. + */ + for (p=e; p && *p; p=e) { + esave = hsave = 0; + /* Check for list */ + e = strpbrk(p, ", "); + if (e) { /* have list */ + esave = *e; + *e++ = 0; + } + /* Check for range */ + h = strchr(p, '-'); /* range? */ + if (h == p) { + errmsg = _("Negative numbers not permitted.\n"); + goto bail_out; + } + if (h) { /* have range */ + hsave = *h; + *h++ = 0; + if (!is_an_integer(h)) { + errmsg = _("Range end is not integer.\n"); + goto bail_out; + } + skip_spaces(&p); + if (!is_an_integer(p)) { + errmsg = _("Range start is not an integer.\n"); + goto bail_out; + } + beg = str_to_int64(p); + end = str_to_int64(h); + //printf("beg=%lld end=%lld\n", beg, end); + if (end <= beg) { + errmsg = _("Range end not bigger than start.\n"); + goto bail_out; + } + } else { /* not list, not range */ + skip_spaces(&p); + /* Check for abort (.) */ + if (*p == '.') { + errmsg = _("User cancel requested.\n"); + goto bail_out; + } + /* Check for all keyword */ + if (strncasecmp(p, "all", 3) == 0) { + all = true; + errmsg = NULL; + //printf("Return 0 i.e. all\n"); + return 0; + } + if (!is_an_integer(p)) { + errmsg = _("Input value is not an integer.\n"); + goto bail_out; + } + beg = end = str_to_int64(p); + } + if (esave) { + *(e-1) = esave; + } + if (hsave) { + *(h-1) = hsave; + } + if (beg <= 0 || end <= 0) { + errmsg = _("Selection items must be be greater than zero.\n"); + goto bail_out; + } + if (beg <= end) { + //printf("Return %lld\n", beg); + return beg++; + } + } + //printf("Rtn=-1. End of items\n"); + /* End of items */ + begin(); + e = NULL; + return -1; /* No error */ + +bail_out: + if (errmsg) { + //printf("Bail out rtn=-1. p=%c err=%s\n", *p, errmsg); + } else { + //printf("Rtn=-1. End of items\n"); + } + e = NULL; + return -1; /* Error, errmsg set */ +} + + +/* + * Set selection string and optionally scan it + * returns false on error in string + * returns true if OK + */ +bool sellist::set_string(const char *string, bool scan=true) +{ + bool ok = true; + /* + * Copy string, because we write into it, + * then scan through it once to find any + * errors. + */ + if (expanded) { + free(expanded); + expanded = NULL; + } + if (str) { + free(str); + } + str = bstrdup(string); + begin(); + num_items = 0; + if (scan) { + while (next() >= 0) { + num_items++; + } + ok = get_errmsg() == NULL; + } + if (ok) { + begin(); + } else { + e = NULL; + } + return ok; +} + +/* + * Get the expanded list of values separated by commas, + * useful for SQL queries + */ +char *sellist::get_expanded_list() +{ + int32_t expandedsize = 512; + int32_t len; + int64_t val; + char *p, *tmp; + char ed1[50]; + + if (!expanded) { + p = expanded = (char *)malloc(expandedsize * sizeof(char)); + *p = 0; + + while ((val = next()) >= 0) { + edit_int64(val, ed1); + len = strlen(ed1); + + /* Alloc more space if needed */ + if ((p + len + 1) > (expanded + expandedsize)) { + expandedsize = expandedsize * 2; + + tmp = (char *) realloc(expanded, expandedsize); + + /* Compute new addresses for p and expanded */ + p = tmp + (p - expanded); + expanded = tmp; + } + + /* If not at the begining of the string, add a "," */ + if (p != expanded) { + strcpy(p, ","); + p++; + } + + strcpy(p, ed1); + p += len; + } + } + return expanded; +} + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +#ifdef TEST_PROGRAM +#include "unittests.h" + +struct test { + const int nr; + const char *sinp; + const char *sout; + const bool res; +}; + +static struct test tests[] = { + { 1, "1,70", "1,70", true, }, + { 2, "1", "1", true, }, + { 3, "256", "256", true, }, + { 4, "1-5", "1,2,3,4,5", true, }, + { 5, "1-5,7", "1,2,3,4,5,7", true, }, + { 6, "1 10 20 30", "1,10,20,30", true, }, + { 7, "1-5,7,20 21", "1,2,3,4,5,7,20,21", true, }, + { 8, "all", "0", true, }, + { 9, "12a", "", false, }, + {10, "12-11", "", false, }, + {11, "12-13a", "", false, }, + {12, "a123", "", false, }, + {13, "1 3", "", false, }, + {0, "dummy", "dummy", false, }, +}; + +#define ntests ((int)(sizeof(tests)/sizeof(struct test))) +#define MSGLEN 80 + +int main() +{ + Unittests sellist_test("sellist_test"); + const char *msg; + sellist sl; + char buf[MSGLEN]; + bool check_rc; + + for (int i = 0; i < ntests; i++) { + if (tests[i].nr > 0){ + snprintf(buf, MSGLEN, "Checking test: %d - %s", tests[i].nr, tests[i].sinp); + check_rc = sl.set_string(tests[i].sinp, true); + msg = sl.get_expanded_list(); + ok(check_rc == tests[i].res && strcmp(msg, tests[i].sout) == 0, buf); + } + } + + return report(); +} +#endif /* TEST_PROGRAM */ diff --git a/src/lib/sellist.h b/src/lib/sellist.h new file mode 100644 index 00000000..ba554d03 --- /dev/null +++ b/src/lib/sellist.h @@ -0,0 +1,113 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, January MMXII + * + * Selection list. A string of integers separated by commas + * representing items selected. Ranges of the form nn-mm + * are also permitted. + */ + +#ifndef __SELLIST_H_ +#define __SELLIST_H_ + +/* + * Loop var through each member of list + */ +#define foreach_sellist(var, list) \ + for((var)=(list)->first(); (var)>=0; (var)=(list)->next() ) + + +class sellist : public SMARTALLOC { + const char *errmsg; + char *p, *e, *h; + char esave, hsave; + bool all; + int64_t beg, end; + int num_items; + char *str; + char *expanded; +public: + sellist(); + ~sellist(); + bool set_string(const char *string, bool scan); + bool is_all() { return all; }; + int64_t first(); + int64_t next(); + void begin(); + /* size() valid only if scan enabled on string */ + int size() const { return num_items; }; + char *get_list() { return str; }; + /* get the list of all jobids */ + char *get_expanded_list(); + /* if errmsg == NULL, no error */ + const char *get_errmsg() { return errmsg; }; +}; + +/* + * Initialize the list structure + */ +inline sellist::sellist() +{ + num_items = 0; + expanded = NULL; + str = NULL; + e = NULL; + errmsg = NULL; +} + +/* + * Destroy the list + */ +inline sellist::~sellist() +{ + if (str) { + free(str); + str = NULL; + } + if (expanded) { + free(expanded); + expanded = NULL; + } +} + +/* + * Returns first item + * error if returns -1 and errmsg set + * end of items if returns -1 and errmsg NULL + */ +inline int64_t sellist::first() +{ + begin(); + return next(); +} + +/* + * Reset to walk list from beginning + */ +inline void sellist::begin() +{ + e = str; + end = 0; + beg = 1; + all = false; + errmsg = NULL; +} + +#endif /* __SELLIST_H_ */ diff --git a/src/lib/serial.c b/src/lib/serial.c new file mode 100644 index 00000000..a7d62551 --- /dev/null +++ b/src/lib/serial.c @@ -0,0 +1,332 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + + Serialisation Support Functions + John Walker +*/ + + +#include "bacula.h" +#include "serial.h" + +/* + + NOTE: The following functions should work on any + vaguely contemporary platform. Production + builds should use optimised macros (void + on platforms with network byte order and IEEE + floating point format as native. + +*/ + +/* serial_int16 -- Serialise a signed 16 bit integer. */ + +void serial_int16(uint8_t * * const ptr, const int16_t v) +{ + int16_t vo = htons(v); + + memcpy(*ptr, &vo, sizeof vo); + *ptr += sizeof vo; +} + +/* serial_uint16 -- Serialise an unsigned 16 bit integer. */ + +void serial_uint16(uint8_t * * const ptr, const uint16_t v) +{ + uint16_t vo = htons(v); + + memcpy(*ptr, &vo, sizeof vo); + *ptr += sizeof vo; +} + +/* serial_int32 -- Serialise a signed 32 bit integer. */ + +void serial_int32(uint8_t * * const ptr, const int32_t v) +{ + int32_t vo = htonl(v); + + memcpy(*ptr, &vo, sizeof vo); + *ptr += sizeof vo; +} + +/* serial_uint32 -- Serialise an unsigned 32 bit integer. */ + +void serial_uint32(uint8_t * * const ptr, const uint32_t v) +{ + uint32_t vo = htonl(v); + + memcpy(*ptr, &vo, sizeof vo); + *ptr += sizeof vo; +} + +/* serial_int64 -- Serialise a signed 64 bit integer. */ + +void serial_int64(uint8_t * * const ptr, const int64_t v) +{ + if (bigendian()) { + memcpy(*ptr, &v, sizeof(int64_t)); + } else { + int i; + uint8_t rv[sizeof(int64_t)]; + uint8_t *pv = (uint8_t *) &v; + + for (i = 0; i < 8; i++) { + rv[i] = pv[7 - i]; + } + memcpy(*ptr, &rv, sizeof(int64_t)); + } + *ptr += sizeof(int64_t); +} + + +/* serial_uint64 -- Serialise an unsigned 64 bit integer. */ + +void serial_uint64(uint8_t * * const ptr, const uint64_t v) +{ + if (bigendian()) { + memcpy(*ptr, &v, sizeof(uint64_t)); + } else { + int i; + uint8_t rv[sizeof(uint64_t)]; + uint8_t *pv = (uint8_t *) &v; + + for (i = 0; i < 8; i++) { + rv[i] = pv[7 - i]; + } + memcpy(*ptr, &rv, sizeof(uint64_t)); + } + *ptr += sizeof(uint64_t); +} + + +/* serial_btime -- Serialise an btime_t 64 bit integer. */ + +void serial_btime(uint8_t * * const ptr, const btime_t v) +{ + if (bigendian()) { + memcpy(*ptr, &v, sizeof(btime_t)); + } else { + int i; + uint8_t rv[sizeof(btime_t)]; + uint8_t *pv = (uint8_t *) &v; + + for (i = 0; i < 8; i++) { + rv[i] = pv[7 - i]; + } + memcpy(*ptr, &rv, sizeof(btime_t)); + } + *ptr += sizeof(btime_t); +} + + + +/* serial_float64 -- Serialise a 64 bit IEEE floating point number. + This code assumes that the host floating point + format is IEEE and that floating point quantities + are stored in IEEE format either LSB first or MSB + first. More creative host formats will require + additional transformations here. */ + +void serial_float64(uint8_t * * const ptr, const float64_t v) +{ + if (bigendian()) { + memcpy(*ptr, &v, sizeof(float64_t)); + } else { + int i; + uint8_t rv[sizeof(float64_t)]; + uint8_t *pv = (uint8_t *) &v; + + for (i = 0; i < 8; i++) { + rv[i] = pv[7 - i]; + } + memcpy(*ptr, &rv, sizeof(float64_t)); + } + *ptr += sizeof(float64_t); +} + +void serial_string(uint8_t * * const ptr, const char * const str) +{ + int i; + char *dest = (char *)*ptr; + char *src = (char *)str; + for (i=0; src[i] != 0; i++) { + dest[i] = src[i]; + } + dest[i++] = 0; /* terminate output string */ + *ptr += i; /* update pointer */ +// Dmsg2(000, "ser src=%s dest=%s\n", src, dest); +} + + +/* unserial_int16 -- Unserialise a signed 16 bit integer. */ + +int16_t unserial_int16(uint8_t * * const ptr) +{ + int16_t vo; + + memcpy(&vo, *ptr, sizeof vo); + *ptr += sizeof vo; + return ntohs(vo); +} + +/* unserial_uint16 -- Unserialise an unsigned 16 bit integer. */ + +uint16_t unserial_uint16(uint8_t * * const ptr) +{ + uint16_t vo; + + memcpy(&vo, *ptr, sizeof vo); + *ptr += sizeof vo; + return ntohs(vo); +} + +/* unserial_int32 -- Unserialise a signed 32 bit integer. */ + +int32_t unserial_int32(uint8_t * * const ptr) +{ + int32_t vo; + + memcpy(&vo, *ptr, sizeof vo); + *ptr += sizeof vo; + return ntohl(vo); +} + +/* unserial_uint32 -- Unserialise an unsigned 32 bit integer. */ + +uint32_t unserial_uint32(uint8_t * * const ptr) +{ + uint32_t vo; + + memcpy(&vo, *ptr, sizeof vo); + *ptr += sizeof vo; + return ntohl(vo); +} + +/* unserial_int64 -- Unserialise a 64 bit integer. */ + +int64_t unserial_int64(uint8_t * * const ptr) +{ + int64_t v; + + if (bigendian()) { + memcpy(&v, *ptr, sizeof(int64_t)); + } else { + int i; + uint8_t rv[sizeof(int64_t)]; + uint8_t *pv = (uint8_t *) &v; + + memcpy(&v, *ptr, sizeof(uint64_t)); + for (i = 0; i < 8; i++) { + rv[i] = pv[7 - i]; + } + memcpy(&v, &rv, sizeof(uint64_t)); + } + *ptr += sizeof(uint64_t); + return v; +} + +/* unserial_uint64 -- Unserialise an unsigned 64 bit integer. */ + +uint64_t unserial_uint64(uint8_t * * const ptr) +{ + uint64_t v; + + if (bigendian()) { + memcpy(&v, *ptr, sizeof(uint64_t)); + } else { + int i; + uint8_t rv[sizeof(uint64_t)]; + uint8_t *pv = (uint8_t *) &v; + + memcpy(&v, *ptr, sizeof(uint64_t)); + for (i = 0; i < 8; i++) { + rv[i] = pv[7 - i]; + } + memcpy(&v, &rv, sizeof(uint64_t)); + } + *ptr += sizeof(uint64_t); + return v; +} + +/* unserial_btime -- Unserialise a btime_t 64 bit integer. */ + +btime_t unserial_btime(uint8_t * * const ptr) +{ + btime_t v; + + if (bigendian()) { + memcpy(&v, *ptr, sizeof(btime_t)); + } else { + int i; + uint8_t rv[sizeof(btime_t)]; + uint8_t *pv = (uint8_t *) &v; + + memcpy(&v, *ptr, sizeof(btime_t)); + for (i = 0; i < 8; i++) { + rv[i] = pv[7 - i]; + } + memcpy(&v, &rv, sizeof(btime_t)); + } + *ptr += sizeof(btime_t); + return v; +} + + + +/* unserial_float64 -- Unserialise a 64 bit IEEE floating point number. + This code assumes that the host floating point + format is IEEE and that floating point quantities + are stored in IEEE format either LSB first or MSB + first. More creative host formats will require + additional transformations here. */ + +float64_t unserial_float64(uint8_t * * const ptr) +{ + float64_t v; + + if (bigendian()) { + memcpy(&v, *ptr, sizeof(float64_t)); + } else { + int i; + uint8_t rv[sizeof(float64_t)]; + uint8_t *pv = (uint8_t *) &v; + + memcpy(&v, *ptr, sizeof(float64_t)); + for (i = 0; i < 8; i++) { + rv[i] = pv[7 - i]; + } + memcpy(&v, &rv, sizeof(float64_t)); + } + *ptr += sizeof(float64_t); + return v; +} + +void unserial_string(uint8_t * * const ptr, char * const str, int max) +{ + int i; + char *src = (char*)(*ptr); + char *dest = str; + for (i=0; i (included via "sha1.h" to define 32 and 8 + * bit unsigned integer types. If your C compiler does not + * support 32 bit unsigned integers, this code is not + * appropriate. + * + * Caveats: + * SHA-1 is designed to work with messages less than 2^64 bits + * long. Although SHA-1 allows a message digest to be generated + * for messages of any number of bits less than 2^64, this + * implementation only works with messages with a length that is + * a multiple of the size of an 8-bit character. + * + * See sha1.h for copyright + */ + +#include "sha1.h" + +/* + * Define the SHA1 circular left shift macro + */ +#define SHA1CircularShift(bits,word) \ + (((word) << (bits)) | ((word) >> (32-(bits)))) + +/* Local Function Prototyptes */ +static void SHA1PadMessage(SHA1Context *); +static void SHA1ProcessMessageBlock(SHA1Context *); + +/* + * SHA1Init + * + * Description: + * This function will initialize the SHA1Context in preparation + * for computing a new SHA1 message digest. + * + * Parameters: + * context: [in/out] + * The context to reset. + * + * Returns: + * sha Error Code. + * + */ +int SHA1Init(SHA1Context *context) +{ + if (!context) + { + return shaNull; + } + + context->Length_Low = 0; + context->Length_High = 0; + context->Message_Block_Index = 0; + + context->Intermediate_Hash[0] = 0x67452301; + context->Intermediate_Hash[1] = 0xEFCDAB89; + context->Intermediate_Hash[2] = 0x98BADCFE; + context->Intermediate_Hash[3] = 0x10325476; + context->Intermediate_Hash[4] = 0xC3D2E1F0; + + context->Computed = 0; + context->Corrupted = 0; + + return shaSuccess; +} + +/* + * SHA1Final + * + * Description: + * This function will return the 160-bit message digest into the + * Message_Digest array provided by the caller. + * NOTE: The first octet of hash is stored in the 0th element, + * the last octet of hash in the 19th element. + * + * Parameters: + * context: [in/out] + * The context to use to calculate the SHA-1 hash. + * Message_Digest: [out] + * Where the digest is returned. + * + * Returns: + * sha Error Code. + * + */ +int SHA1Final(SHA1Context *context, + uint8_t Message_Digest[SHA1HashSize]) +{ + int i; + + if (!context || !Message_Digest) { + return shaNull; + } + + if (context->Corrupted) { + return context->Corrupted; + } + + if (!context->Computed) { + SHA1PadMessage(context); + for(i=0; i<64; ++i) { + /* message may be sensitive, clear it out */ + context->Message_Block[i] = 0; + } + context->Length_Low = 0; /* and clear length */ + context->Length_High = 0; + context->Computed = 1; + + } + + for(i = 0; i < SHA1HashSize; ++i) { + Message_Digest[i] = context->Intermediate_Hash[i>>2] + >> 8 * ( 3 - ( i & 0x03 ) ); + } + + return shaSuccess; +} + +/* + * SHA1Update + * + * Description: + * This function accepts an array of octets as the next portion + * of the message. + * + * Parameters: + * context: [in/out] + * The SHA context to update + * message_array: [in] + * An array of characters representing the next portion of + * the message. + * length: [in] + * The length of the message in message_array + * + * Returns: + * sha Error Code. + * + */ +int SHA1Update(SHA1Context *context, + const uint8_t *message_array, + unsigned length) +{ + if (!length) { + return shaSuccess; + } + + if (!context || !message_array) { + return shaNull; + } + + if (context->Computed) { + context->Corrupted = shaStateError; + + return shaStateError; + } + + if (context->Corrupted) { + return context->Corrupted; + } + while(length-- && !context->Corrupted) { + context->Message_Block[context->Message_Block_Index++] = + (*message_array & 0xFF); + + context->Length_Low += 8; + if (context->Length_Low == 0) { + context->Length_High++; + if (context->Length_High == 0) { + /* Message is too long */ + context->Corrupted = 1; + } + } + + if (context->Message_Block_Index == 64) { + SHA1ProcessMessageBlock(context); + } + + message_array++; + } + + return shaSuccess; +} + +/* + * SHA1ProcessMessageBlock + * + * Description: + * This function will process the next 512 bits of the message + * stored in the Message_Block array. + * + * Parameters: + * None. + * + * Returns: + * Nothing. + * + * Comments: + + * Many of the variable names in this code, especially the + * single character names, were used because those were the + * names used in the publication. + * + * + */ +static void SHA1ProcessMessageBlock(SHA1Context *context) +{ + const uint32_t K[] = { /* Constants defined in SHA-1 */ + 0x5A827999, + 0x6ED9EBA1, + 0x8F1BBCDC, + 0xCA62C1D6 + }; + int t; /* Loop counter */ + uint32_t temp; /* Temporary word value */ + uint32_t W[80]; /* Word sequence */ + uint32_t A, B, C, D, E; /* Word buffers */ + + /* + * Initialize the first 16 words in the array W + */ + for(t = 0; t < 16; t++) { + W[t] = context->Message_Block[t * 4] << 24; + W[t] |= context->Message_Block[t * 4 + 1] << 16; + W[t] |= context->Message_Block[t * 4 + 2] << 8; + W[t] |= context->Message_Block[t * 4 + 3]; + } + + for(t = 16; t < 80; t++) { + W[t] = SHA1CircularShift(1,W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16]); + } + + A = context->Intermediate_Hash[0]; + B = context->Intermediate_Hash[1]; + C = context->Intermediate_Hash[2]; + D = context->Intermediate_Hash[3]; + E = context->Intermediate_Hash[4]; + + for(t = 0; t < 20; t++) { + temp = SHA1CircularShift(5,A) + + ((B & C) | ((~B) & D)) + E + W[t] + K[0]; + E = D; + D = C; + C = SHA1CircularShift(30,B); + + B = A; + A = temp; + } + + for(t = 20; t < 40; t++) { + temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[1]; + E = D; + D = C; + C = SHA1CircularShift(30,B); + B = A; + A = temp; + } + + for(t = 40; t < 60; t++) { + temp = SHA1CircularShift(5,A) + + ((B & C) | (B & D) | (C & D)) + E + W[t] + K[2]; + E = D; + D = C; + C = SHA1CircularShift(30,B); + B = A; + A = temp; + } + + for(t = 60; t < 80; t++) { + temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[3]; + E = D; + D = C; + C = SHA1CircularShift(30,B); + B = A; + A = temp; + } + + context->Intermediate_Hash[0] += A; + context->Intermediate_Hash[1] += B; + context->Intermediate_Hash[2] += C; + context->Intermediate_Hash[3] += D; + context->Intermediate_Hash[4] += E; + + context->Message_Block_Index = 0; +} + +/* + * SHA1PadMessage + * + + * Description: + * According to the standard, the message must be padded to an even + * 512 bits. The first padding bit must be a '1'. The last 64 + * bits represent the length of the original message. All bits in + * between should be 0. This function will pad the message + * according to those rules by filling the Message_Block array + * accordingly. It will also call the ProcessMessageBlock function + * provided appropriately. When it returns, it can be assumed that + * the message digest has been computed. + * + * Parameters: + * context: [in/out] + * The context to pad + * ProcessMessageBlock: [in] + * The appropriate SHA*ProcessMessageBlock function + * Returns: + * Nothing. + * + */ + +static void SHA1PadMessage(SHA1Context *context) +{ + /* + * Check to see if the current message block is too small to hold + * the initial padding bits and length. If so, we will pad the + * block, process it, and then continue padding into a second + * block. + */ + if (context->Message_Block_Index > 55) { + context->Message_Block[context->Message_Block_Index++] = 0x80; + while(context->Message_Block_Index < 64) { + context->Message_Block[context->Message_Block_Index++] = 0; + } + + SHA1ProcessMessageBlock(context); + + while(context->Message_Block_Index < 56) { + context->Message_Block[context->Message_Block_Index++] = 0; + } + } else { + context->Message_Block[context->Message_Block_Index++] = 0x80; + while(context->Message_Block_Index < 56) { + + context->Message_Block[context->Message_Block_Index++] = 0; + } + } + + /* + * Store the message length as the last 8 octets + */ + context->Message_Block[56] = context->Length_High >> 24; + context->Message_Block[57] = context->Length_High >> 16; + context->Message_Block[58] = context->Length_High >> 8; + context->Message_Block[59] = context->Length_High; + context->Message_Block[60] = context->Length_Low >> 24; + context->Message_Block[61] = context->Length_Low >> 16; + context->Message_Block[62] = context->Length_Low >> 8; + context->Message_Block[63] = context->Length_Low; + + SHA1ProcessMessageBlock(context); +} + + +#ifndef TEST_PROGRAM +#define TEST_PROGRAM_A +#endif + +#ifdef TEST_PROGRAM +/* + * sha1test.c + * + * Description: + * This file will exercise the SHA-1 code performing the three + * tests documented in FIPS PUB 180-1 plus one which calls + * SHA1Input with an exact multiple of 512 bits, plus a few + * error test checks. + * + * Portability Issues: + * None. + * + */ + +#include +#include +#include +#include "sha1.h" +#include "unittests.h" + +#define NRTESTS 4 + +/* + * Define patterns for testing + */ +#define TEST1 "abc" +#define TEST2a "abcdbcdecdefdefgefghfghighijhi" + +#define TEST2b "jkijkljklmklmnlmnomnopnopq" +#define TEST2 TEST2a TEST2b +#define TEST3 "a" +#define TEST4a "01234567012345670123456701234567" +#define TEST4b "01234567012345670123456701234567" + /* an exact multiple of 512 bits */ +#define TEST4 TEST4a TEST4b + +const char *testarray[NRTESTS] = +{ + TEST1, + TEST2, + TEST3, + TEST4 +}; + +int repeatcount[NRTESTS] = { 1, 1, 1000000, 10 }; + +const uint8_t resultarray[NRTESTS][20] = +{ + { 0xA9, 0x99, 0x3E, 0x36, 0x47, 0x06, 0x81, 0x6A, 0xBA, 0x3E, 0x25, 0x71, 0x78, 0x50, 0xC2, 0x6C, 0x9C, 0xD0, 0xD8, 0x9D }, + { 0x84, 0x98, 0x3E, 0x44, 0x1C, 0x3B, 0xD2, 0x6E, 0xBA, 0xAE, 0x4A, 0xA1, 0xF9, 0x51, 0x29, 0xE5, 0xE5, 0x46, 0x70, 0xF1 }, + { 0x34, 0xAA, 0x97, 0x3C, 0xD4, 0xC4, 0xDA, 0xA4, 0xF6, 0x1E, 0xEB, 0x2B, 0xDB, 0xAD, 0x27, 0x31, 0x65, 0x34, 0x01, 0x6F }, + { 0xDE, 0xA3, 0x56, 0xA2, 0xCD, 0xDD, 0x90, 0xC7, 0xA7, 0xEC, 0xED, 0xC5, 0xEB, 0xB5, 0x63, 0x93, 0x4F, 0x46, 0x04, 0x52 }, +}; + +int main() +{ + Unittests sha1_test("sha1_test"); + SHA1Context sha; + int i, j, err; + uint8_t Message_Digest[20]; + bool check_cont; + bool ct; + + /* + * Perform SHA-1 tests + */ + for(j = 0; j < 4; ++j) { + // printf( "\nTest %d: %d, '%s'\n", j+1, repeatcount[j], testarray[j]); + err = SHA1Init(&sha); + nok(err, "Test SHA1Init"); + if (err) { + break; /* out of for j loop */ + } + ct = true; + for(i = 0; i < repeatcount[j]; ++i) { + err = SHA1Update(&sha, (const unsigned char *) testarray[j], strlen(testarray[j])); + if (i < 5){ + nok(err, "Test SHA1Update"); + } + if (ct && repeatcount[j] > 4 && i > 4){ + ct = false; + printf("...\n"); + } + if (err) { + nok(err, "Test SHA1Update"); + break; /* out of for i loop */ + } + } + + err = SHA1Final(&sha, Message_Digest); + nok(err, "Test SHA1Final"); + check_cont = true; + for(i = 0; i < 20 ; ++i) { + if (Message_Digest[i] != resultarray[j][i]){ + check_cont = false; + } + } + ok(check_cont, "Checking expected result"); + } + + /* Test some error returns */ + err = SHA1Update(&sha,(const unsigned char *) testarray[1], 1); + ok(err == shaStateError, "Checking for shaStateError"); + err = SHA1Init(0); + ok(err == shaNull, "Checking for shaNull"); + + return report(); +} + +#endif /* TEST_PROGRAM */ + +#ifdef SHA1_SUM +/* + * Reads a single ASCII file and prints the HEX sha1 sum. + */ +#include +int main(int argc, char *argv[]) +{ + FILE *fd; + SHA1Context ctx; + char buf[5000]; + char signature[25]; + + if (argc < 1) { + printf("Must have filename\n"); + exit(1); + } + fd = fopen(argv[1], "rb"); + if (!fd) { + berrno be; + printf("Could not open %s: ERR=%s\n", argv[1], be.bstrerror(errno)); + exit(1); + } + SHA1Init(&ctx); + while (fgets(buf, sizeof(buf), fd)) { + SHA1Update(&ctx, (unsigned char *)buf, strlen(buf)); + } + SHA1Final(&ctx, (unsigned char *)signature); + for (int i=0; i < 20; i++) { + printf("%02x", signature[i]& 0xFF); + } + printf(" %s\n", argv[1]); + fclose(fd); +} +#endif /* SHA1_SUM */ diff --git a/src/lib/sha1.h b/src/lib/sha1.h new file mode 100644 index 00000000..437fe19c --- /dev/null +++ b/src/lib/sha1.h @@ -0,0 +1,107 @@ +/* + * sha1.h + * + * Description: + * This is the header file for code which implements the Secure + * Hashing Algorithm 1 as defined in FIPS PUB 180-1 published + * April 17, 1995. + * + * Many of the variable names in this code, especially the + * single character names, were used because those were the names + * used in the publication. + * + * Please read the file sha1.c for more information. + * + * Full Copyright Statement + * + * Copyright (C) The Internet Society (2001). All Rights Reserved. + * + * This document and translations of it may be copied and furnished to + * others, and derivative works that comment on or otherwise explain it + * or assist in its implementation may be prepared, copied, published + * and distributed, in whole or in part, without restriction of any + * kind, provided that the above copyright notice and this paragraph are + * included on all such copies and derivative works. However, this + * document itself may not be modified in any way, such as by removing + * the copyright notice or references to the Internet Society or other + * Internet organizations, except as needed for the purpose of + * developing Internet standards in which case the procedures for + * copyrights defined in the Internet Standards process must be + * followed, or as required to translate it into languages other than + * English. + * + * The limited permissions granted above are perpetual and will not be + * revoked by the Internet Society or its successors or assigns. + * + * This document and the information contained herein is provided on an + * "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING + * TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION + * HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * + * Acknowledgement + * + * Funding for the RFC Editor function is currently provided by the + * Internet Society. + * + */ + +#ifndef _SHA1_H_ +#define _SHA1_H_ + +#include "bacula.h" + +/* + * If you do not have the ISO standard stdint.h header file, then you + * must typdef the following: + * name meaning + * uint32_t unsigned 32 bit integer + * uint8_t unsigned 8 bit integer (i.e., unsigned char) + * int32_t integer of 32 bits + * + */ + +#ifndef _SHA_enum_ +#define _SHA_enum_ +enum +{ + shaSuccess = 0, + shaNull, /* Null pointer parameter */ + shaInputTooLong, /* input data too long */ + shaStateError /* called Input after Result */ +}; +#endif +#define SHA1HashSize 20 + +/* + * This structure will hold context information for the SHA-1 + * hashing operation + */ +typedef struct SHA1Context +{ + uint32_t Intermediate_Hash[SHA1HashSize/4]; /* Message Digest */ + + uint32_t Length_Low; /* Message length in bits */ + uint32_t Length_High; /* Message length in bits */ + + /* Index into message block array */ + int32_t Message_Block_Index; + uint8_t Message_Block[64]; /* 512-bit message blocks */ + + int Computed; /* Is the digest computed? */ + int Corrupted; /* Is the message digest corrupted? */ +} SHA1Context; + +/* + * Function Prototypes + */ + +int SHA1Init(SHA1Context *); +int SHA1Update(SHA1Context *, + const uint8_t *, + unsigned int); +int SHA1Final(SHA1Context *, + uint8_t Message_Digest[SHA1HashSize]); + +#endif diff --git a/src/lib/sha2.c b/src/lib/sha2.c new file mode 100644 index 00000000..56fb5180 --- /dev/null +++ b/src/lib/sha2.c @@ -0,0 +1,950 @@ +/* + * FIPS 180-2 SHA-224/256/384/512 implementation + * Last update: 02/02/2007 + * Issue date: 04/30/2005 + * + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if 0 +#define UNROLL_LOOPS /* Enable loops unrolling */ +#endif + +#include "sha2.h" + +#ifndef HAVE_SHA2 + +#define SHFR(x, n) (x >> n) +#define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) +#define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n))) +#define CH(x, y, z) ((x & y) ^ (~x & z)) +#define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) + +#define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) +#define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) +#define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) +#define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10)) + +#define SHA512_F1(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39)) +#define SHA512_F2(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41)) +#define SHA512_F3(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHFR(x, 7)) +#define SHA512_F4(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHFR(x, 6)) + +#define UNPACK32(x, str) \ +{ \ + *((str) + 3) = (uint8_t) ((x) ); \ + *((str) + 2) = (uint8_t) ((x) >> 8); \ + *((str) + 1) = (uint8_t) ((x) >> 16); \ + *((str) + 0) = (uint8_t) ((x) >> 24); \ +} + +#define PACK32(str, x) \ +{ \ + *(x) = ((uint32_t) *((str) + 3) ) \ + | ((uint32_t) *((str) + 2) << 8) \ + | ((uint32_t) *((str) + 1) << 16) \ + | ((uint32_t) *((str) + 0) << 24); \ +} + +#define UNPACK64(x, str) \ +{ \ + *((str) + 7) = (uint8_t) ((x) ); \ + *((str) + 6) = (uint8_t) ((x) >> 8); \ + *((str) + 5) = (uint8_t) ((x) >> 16); \ + *((str) + 4) = (uint8_t) ((x) >> 24); \ + *((str) + 3) = (uint8_t) ((x) >> 32); \ + *((str) + 2) = (uint8_t) ((x) >> 40); \ + *((str) + 1) = (uint8_t) ((x) >> 48); \ + *((str) + 0) = (uint8_t) ((x) >> 56); \ +} + +#define PACK64(str, x) \ +{ \ + *(x) = ((uint64_t) *((str) + 7) ) \ + | ((uint64_t) *((str) + 6) << 8) \ + | ((uint64_t) *((str) + 5) << 16) \ + | ((uint64_t) *((str) + 4) << 24) \ + | ((uint64_t) *((str) + 3) << 32) \ + | ((uint64_t) *((str) + 2) << 40) \ + | ((uint64_t) *((str) + 1) << 48) \ + | ((uint64_t) *((str) + 0) << 56); \ +} + +/* Macros used for loops unrolling */ + +#define SHA256_SCR(i) \ +{ \ + w[i] = SHA256_F4(w[i - 2]) + w[i - 7] \ + + SHA256_F3(w[i - 15]) + w[i - 16]; \ +} + +#define SHA512_SCR(i) \ +{ \ + w[i] = SHA512_F4(w[i - 2]) + w[i - 7] \ + + SHA512_F3(w[i - 15]) + w[i - 16]; \ +} + +#define SHA256_EXP(a, b, c, d, e, f, g, h, j) \ +{ \ + t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \ + + sha256_k[j] + w[j]; \ + t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \ + wv[d] += t1; \ + wv[h] = t1 + t2; \ +} + +#define SHA512_EXP(a, b, c, d, e, f, g ,h, j) \ +{ \ + t1 = wv[h] + SHA512_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \ + + sha512_k[j] + w[j]; \ + t2 = SHA512_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \ + wv[d] += t1; \ + wv[h] = t1 + t2; \ +} + +uint32_t sha224_h0[8] = + {0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, + 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4}; + +uint32_t sha256_h0[8] = + {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; + +uint64_t sha384_h0[8] = + {0xcbbb9d5dc1059ed8ULL, 0x629a292a367cd507ULL, + 0x9159015a3070dd17ULL, 0x152fecd8f70e5939ULL, + 0x67332667ffc00b31ULL, 0x8eb44a8768581511ULL, + 0xdb0c2e0d64f98fa7ULL, 0x47b5481dbefa4fa4ULL}; + +uint64_t sha512_h0[8] = + {0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, + 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL, + 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, + 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL}; + +uint32_t sha256_k[64] = + {0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2}; + +uint64_t sha512_k[80] = + {0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, + 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, + 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, + 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, + 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, + 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, + 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, + 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, + 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, + 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, + 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, + 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, + 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, + 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, + 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, + 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, + 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, + 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, + 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, + 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, + 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, + 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, + 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, + 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, + 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, + 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, + 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, + 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, + 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, + 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, + 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, + 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, + 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, + 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, + 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, + 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, + 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, + 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, + 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, + 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL}; + +/* SHA-256 functions */ + +void sha256_transf(sha256_ctx *ctx, const unsigned char *message, + unsigned int block_nb) +{ + uint32_t w[64]; + uint32_t wv[8]; + uint32_t t1, t2; + const unsigned char *sub_block; + int i; + +#ifndef UNROLL_LOOPS + int j; +#endif + + for (i = 0; i < (int) block_nb; i++) { + sub_block = message + (i << 6); + +#ifndef UNROLL_LOOPS + for (j = 0; j < 16; j++) { + PACK32(&sub_block[j << 2], &w[j]); + } + + for (j = 16; j < 64; j++) { + SHA256_SCR(j); + } + + for (j = 0; j < 8; j++) { + wv[j] = ctx->h[j]; + } + + for (j = 0; j < 64; j++) { + t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + + sha256_k[j] + w[j]; + t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); + wv[7] = wv[6]; + wv[6] = wv[5]; + wv[5] = wv[4]; + wv[4] = wv[3] + t1; + wv[3] = wv[2]; + wv[2] = wv[1]; + wv[1] = wv[0]; + wv[0] = t1 + t2; + } + + for (j = 0; j < 8; j++) { + ctx->h[j] += wv[j]; + } +#else + PACK32(&sub_block[ 0], &w[ 0]); PACK32(&sub_block[ 4], &w[ 1]); + PACK32(&sub_block[ 8], &w[ 2]); PACK32(&sub_block[12], &w[ 3]); + PACK32(&sub_block[16], &w[ 4]); PACK32(&sub_block[20], &w[ 5]); + PACK32(&sub_block[24], &w[ 6]); PACK32(&sub_block[28], &w[ 7]); + PACK32(&sub_block[32], &w[ 8]); PACK32(&sub_block[36], &w[ 9]); + PACK32(&sub_block[40], &w[10]); PACK32(&sub_block[44], &w[11]); + PACK32(&sub_block[48], &w[12]); PACK32(&sub_block[52], &w[13]); + PACK32(&sub_block[56], &w[14]); PACK32(&sub_block[60], &w[15]); + + SHA256_SCR(16); SHA256_SCR(17); SHA256_SCR(18); SHA256_SCR(19); + SHA256_SCR(20); SHA256_SCR(21); SHA256_SCR(22); SHA256_SCR(23); + SHA256_SCR(24); SHA256_SCR(25); SHA256_SCR(26); SHA256_SCR(27); + SHA256_SCR(28); SHA256_SCR(29); SHA256_SCR(30); SHA256_SCR(31); + SHA256_SCR(32); SHA256_SCR(33); SHA256_SCR(34); SHA256_SCR(35); + SHA256_SCR(36); SHA256_SCR(37); SHA256_SCR(38); SHA256_SCR(39); + SHA256_SCR(40); SHA256_SCR(41); SHA256_SCR(42); SHA256_SCR(43); + SHA256_SCR(44); SHA256_SCR(45); SHA256_SCR(46); SHA256_SCR(47); + SHA256_SCR(48); SHA256_SCR(49); SHA256_SCR(50); SHA256_SCR(51); + SHA256_SCR(52); SHA256_SCR(53); SHA256_SCR(54); SHA256_SCR(55); + SHA256_SCR(56); SHA256_SCR(57); SHA256_SCR(58); SHA256_SCR(59); + SHA256_SCR(60); SHA256_SCR(61); SHA256_SCR(62); SHA256_SCR(63); + + wv[0] = ctx->h[0]; wv[1] = ctx->h[1]; + wv[2] = ctx->h[2]; wv[3] = ctx->h[3]; + wv[4] = ctx->h[4]; wv[5] = ctx->h[5]; + wv[6] = ctx->h[6]; wv[7] = ctx->h[7]; + + SHA256_EXP(0,1,2,3,4,5,6,7, 0); SHA256_EXP(7,0,1,2,3,4,5,6, 1); + SHA256_EXP(6,7,0,1,2,3,4,5, 2); SHA256_EXP(5,6,7,0,1,2,3,4, 3); + SHA256_EXP(4,5,6,7,0,1,2,3, 4); SHA256_EXP(3,4,5,6,7,0,1,2, 5); + SHA256_EXP(2,3,4,5,6,7,0,1, 6); SHA256_EXP(1,2,3,4,5,6,7,0, 7); + SHA256_EXP(0,1,2,3,4,5,6,7, 8); SHA256_EXP(7,0,1,2,3,4,5,6, 9); + SHA256_EXP(6,7,0,1,2,3,4,5,10); SHA256_EXP(5,6,7,0,1,2,3,4,11); + SHA256_EXP(4,5,6,7,0,1,2,3,12); SHA256_EXP(3,4,5,6,7,0,1,2,13); + SHA256_EXP(2,3,4,5,6,7,0,1,14); SHA256_EXP(1,2,3,4,5,6,7,0,15); + SHA256_EXP(0,1,2,3,4,5,6,7,16); SHA256_EXP(7,0,1,2,3,4,5,6,17); + SHA256_EXP(6,7,0,1,2,3,4,5,18); SHA256_EXP(5,6,7,0,1,2,3,4,19); + SHA256_EXP(4,5,6,7,0,1,2,3,20); SHA256_EXP(3,4,5,6,7,0,1,2,21); + SHA256_EXP(2,3,4,5,6,7,0,1,22); SHA256_EXP(1,2,3,4,5,6,7,0,23); + SHA256_EXP(0,1,2,3,4,5,6,7,24); SHA256_EXP(7,0,1,2,3,4,5,6,25); + SHA256_EXP(6,7,0,1,2,3,4,5,26); SHA256_EXP(5,6,7,0,1,2,3,4,27); + SHA256_EXP(4,5,6,7,0,1,2,3,28); SHA256_EXP(3,4,5,6,7,0,1,2,29); + SHA256_EXP(2,3,4,5,6,7,0,1,30); SHA256_EXP(1,2,3,4,5,6,7,0,31); + SHA256_EXP(0,1,2,3,4,5,6,7,32); SHA256_EXP(7,0,1,2,3,4,5,6,33); + SHA256_EXP(6,7,0,1,2,3,4,5,34); SHA256_EXP(5,6,7,0,1,2,3,4,35); + SHA256_EXP(4,5,6,7,0,1,2,3,36); SHA256_EXP(3,4,5,6,7,0,1,2,37); + SHA256_EXP(2,3,4,5,6,7,0,1,38); SHA256_EXP(1,2,3,4,5,6,7,0,39); + SHA256_EXP(0,1,2,3,4,5,6,7,40); SHA256_EXP(7,0,1,2,3,4,5,6,41); + SHA256_EXP(6,7,0,1,2,3,4,5,42); SHA256_EXP(5,6,7,0,1,2,3,4,43); + SHA256_EXP(4,5,6,7,0,1,2,3,44); SHA256_EXP(3,4,5,6,7,0,1,2,45); + SHA256_EXP(2,3,4,5,6,7,0,1,46); SHA256_EXP(1,2,3,4,5,6,7,0,47); + SHA256_EXP(0,1,2,3,4,5,6,7,48); SHA256_EXP(7,0,1,2,3,4,5,6,49); + SHA256_EXP(6,7,0,1,2,3,4,5,50); SHA256_EXP(5,6,7,0,1,2,3,4,51); + SHA256_EXP(4,5,6,7,0,1,2,3,52); SHA256_EXP(3,4,5,6,7,0,1,2,53); + SHA256_EXP(2,3,4,5,6,7,0,1,54); SHA256_EXP(1,2,3,4,5,6,7,0,55); + SHA256_EXP(0,1,2,3,4,5,6,7,56); SHA256_EXP(7,0,1,2,3,4,5,6,57); + SHA256_EXP(6,7,0,1,2,3,4,5,58); SHA256_EXP(5,6,7,0,1,2,3,4,59); + SHA256_EXP(4,5,6,7,0,1,2,3,60); SHA256_EXP(3,4,5,6,7,0,1,2,61); + SHA256_EXP(2,3,4,5,6,7,0,1,62); SHA256_EXP(1,2,3,4,5,6,7,0,63); + + ctx->h[0] += wv[0]; ctx->h[1] += wv[1]; + ctx->h[2] += wv[2]; ctx->h[3] += wv[3]; + ctx->h[4] += wv[4]; ctx->h[5] += wv[5]; + ctx->h[6] += wv[6]; ctx->h[7] += wv[7]; +#endif /* !UNROLL_LOOPS */ + } +} + +void sha256(const unsigned char *message, unsigned int len, unsigned char *digest) +{ + sha256_ctx ctx; + + sha256_init(&ctx); + sha256_update(&ctx, message, len); + sha256_final(&ctx, digest); +} + +void sha256_init(sha256_ctx *ctx) +{ +#ifndef UNROLL_LOOPS + int i; + for (i = 0; i < 8; i++) { + ctx->h[i] = sha256_h0[i]; + } +#else + ctx->h[0] = sha256_h0[0]; ctx->h[1] = sha256_h0[1]; + ctx->h[2] = sha256_h0[2]; ctx->h[3] = sha256_h0[3]; + ctx->h[4] = sha256_h0[4]; ctx->h[5] = sha256_h0[5]; + ctx->h[6] = sha256_h0[6]; ctx->h[7] = sha256_h0[7]; +#endif /* !UNROLL_LOOPS */ + + ctx->len = 0; + ctx->tot_len = 0; +} + +void sha256_update(sha256_ctx *ctx, const unsigned char *message, + unsigned int len) +{ + unsigned int block_nb; + unsigned int new_len, rem_len, tmp_len; + const unsigned char *shifted_message; + + tmp_len = SHA256_BLOCK_SIZE - ctx->len; + rem_len = len < tmp_len ? len : tmp_len; + + memcpy(&ctx->block[ctx->len], message, rem_len); + + if (ctx->len + len < SHA256_BLOCK_SIZE) { + ctx->len += len; + return; + } + + new_len = len - rem_len; + block_nb = new_len / SHA256_BLOCK_SIZE; + + shifted_message = message + rem_len; + + sha256_transf(ctx, ctx->block, 1); + sha256_transf(ctx, shifted_message, block_nb); + + rem_len = new_len % SHA256_BLOCK_SIZE; + + memcpy(ctx->block, &shifted_message[block_nb << 6], + rem_len); + + ctx->len = rem_len; + ctx->tot_len += (block_nb + 1) << 6; +} + +void sha256_final(sha256_ctx *ctx, unsigned char *digest) +{ + unsigned int block_nb; + unsigned int pm_len; + unsigned int len_b; + +#ifndef UNROLL_LOOPS + int i; +#endif + + block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) + < (ctx->len % SHA256_BLOCK_SIZE))); + + len_b = (ctx->tot_len + ctx->len) << 3; + pm_len = block_nb << 6; + + memset(ctx->block + ctx->len, 0, pm_len - ctx->len); + ctx->block[ctx->len] = 0x80; + UNPACK32(len_b, ctx->block + pm_len - 4); + + sha256_transf(ctx, ctx->block, block_nb); + +#ifndef UNROLL_LOOPS + for (i = 0 ; i < 8; i++) { + UNPACK32(ctx->h[i], &digest[i << 2]); + } +#else + UNPACK32(ctx->h[0], &digest[ 0]); + UNPACK32(ctx->h[1], &digest[ 4]); + UNPACK32(ctx->h[2], &digest[ 8]); + UNPACK32(ctx->h[3], &digest[12]); + UNPACK32(ctx->h[4], &digest[16]); + UNPACK32(ctx->h[5], &digest[20]); + UNPACK32(ctx->h[6], &digest[24]); + UNPACK32(ctx->h[7], &digest[28]); +#endif /* !UNROLL_LOOPS */ +} + +/* SHA-512 functions */ + +void sha512_transf(sha512_ctx *ctx, const unsigned char *message, + unsigned int block_nb) +{ + uint64_t w[80]; + uint64_t wv[8]; + uint64_t t1, t2; + const unsigned char *sub_block; + int i, j; + + for (i = 0; i < (int) block_nb; i++) { + sub_block = message + (i << 7); + +#ifndef UNROLL_LOOPS + for (j = 0; j < 16; j++) { + PACK64(&sub_block[j << 3], &w[j]); + } + + for (j = 16; j < 80; j++) { + SHA512_SCR(j); + } + + for (j = 0; j < 8; j++) { + wv[j] = ctx->h[j]; + } + + for (j = 0; j < 80; j++) { + t1 = wv[7] + SHA512_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + + sha512_k[j] + w[j]; + t2 = SHA512_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); + wv[7] = wv[6]; + wv[6] = wv[5]; + wv[5] = wv[4]; + wv[4] = wv[3] + t1; + wv[3] = wv[2]; + wv[2] = wv[1]; + wv[1] = wv[0]; + wv[0] = t1 + t2; + } + + for (j = 0; j < 8; j++) { + ctx->h[j] += wv[j]; + } +#else + PACK64(&sub_block[ 0], &w[ 0]); PACK64(&sub_block[ 8], &w[ 1]); + PACK64(&sub_block[ 16], &w[ 2]); PACK64(&sub_block[ 24], &w[ 3]); + PACK64(&sub_block[ 32], &w[ 4]); PACK64(&sub_block[ 40], &w[ 5]); + PACK64(&sub_block[ 48], &w[ 6]); PACK64(&sub_block[ 56], &w[ 7]); + PACK64(&sub_block[ 64], &w[ 8]); PACK64(&sub_block[ 72], &w[ 9]); + PACK64(&sub_block[ 80], &w[10]); PACK64(&sub_block[ 88], &w[11]); + PACK64(&sub_block[ 96], &w[12]); PACK64(&sub_block[104], &w[13]); + PACK64(&sub_block[112], &w[14]); PACK64(&sub_block[120], &w[15]); + + SHA512_SCR(16); SHA512_SCR(17); SHA512_SCR(18); SHA512_SCR(19); + SHA512_SCR(20); SHA512_SCR(21); SHA512_SCR(22); SHA512_SCR(23); + SHA512_SCR(24); SHA512_SCR(25); SHA512_SCR(26); SHA512_SCR(27); + SHA512_SCR(28); SHA512_SCR(29); SHA512_SCR(30); SHA512_SCR(31); + SHA512_SCR(32); SHA512_SCR(33); SHA512_SCR(34); SHA512_SCR(35); + SHA512_SCR(36); SHA512_SCR(37); SHA512_SCR(38); SHA512_SCR(39); + SHA512_SCR(40); SHA512_SCR(41); SHA512_SCR(42); SHA512_SCR(43); + SHA512_SCR(44); SHA512_SCR(45); SHA512_SCR(46); SHA512_SCR(47); + SHA512_SCR(48); SHA512_SCR(49); SHA512_SCR(50); SHA512_SCR(51); + SHA512_SCR(52); SHA512_SCR(53); SHA512_SCR(54); SHA512_SCR(55); + SHA512_SCR(56); SHA512_SCR(57); SHA512_SCR(58); SHA512_SCR(59); + SHA512_SCR(60); SHA512_SCR(61); SHA512_SCR(62); SHA512_SCR(63); + SHA512_SCR(64); SHA512_SCR(65); SHA512_SCR(66); SHA512_SCR(67); + SHA512_SCR(68); SHA512_SCR(69); SHA512_SCR(70); SHA512_SCR(71); + SHA512_SCR(72); SHA512_SCR(73); SHA512_SCR(74); SHA512_SCR(75); + SHA512_SCR(76); SHA512_SCR(77); SHA512_SCR(78); SHA512_SCR(79); + + wv[0] = ctx->h[0]; wv[1] = ctx->h[1]; + wv[2] = ctx->h[2]; wv[3] = ctx->h[3]; + wv[4] = ctx->h[4]; wv[5] = ctx->h[5]; + wv[6] = ctx->h[6]; wv[7] = ctx->h[7]; + + j = 0; + + do { + SHA512_EXP(0,1,2,3,4,5,6,7,j); j++; + SHA512_EXP(7,0,1,2,3,4,5,6,j); j++; + SHA512_EXP(6,7,0,1,2,3,4,5,j); j++; + SHA512_EXP(5,6,7,0,1,2,3,4,j); j++; + SHA512_EXP(4,5,6,7,0,1,2,3,j); j++; + SHA512_EXP(3,4,5,6,7,0,1,2,j); j++; + SHA512_EXP(2,3,4,5,6,7,0,1,j); j++; + SHA512_EXP(1,2,3,4,5,6,7,0,j); j++; + } while (j < 80); + + ctx->h[0] += wv[0]; ctx->h[1] += wv[1]; + ctx->h[2] += wv[2]; ctx->h[3] += wv[3]; + ctx->h[4] += wv[4]; ctx->h[5] += wv[5]; + ctx->h[6] += wv[6]; ctx->h[7] += wv[7]; +#endif /* !UNROLL_LOOPS */ + } +} + +void sha512(const unsigned char *message, unsigned int len, + unsigned char *digest) +{ + sha512_ctx ctx; + + sha512_init(&ctx); + sha512_update(&ctx, message, len); + sha512_final(&ctx, digest); +} + +void sha512_init(sha512_ctx *ctx) +{ +#ifndef UNROLL_LOOPS + int i; + for (i = 0; i < 8; i++) { + ctx->h[i] = sha512_h0[i]; + } +#else + ctx->h[0] = sha512_h0[0]; ctx->h[1] = sha512_h0[1]; + ctx->h[2] = sha512_h0[2]; ctx->h[3] = sha512_h0[3]; + ctx->h[4] = sha512_h0[4]; ctx->h[5] = sha512_h0[5]; + ctx->h[6] = sha512_h0[6]; ctx->h[7] = sha512_h0[7]; +#endif /* !UNROLL_LOOPS */ + + ctx->len = 0; + ctx->tot_len = 0; +} + +void sha512_update(sha512_ctx *ctx, const unsigned char *message, + unsigned int len) +{ + unsigned int block_nb; + unsigned int new_len, rem_len, tmp_len; + const unsigned char *shifted_message; + + tmp_len = SHA512_BLOCK_SIZE - ctx->len; + rem_len = len < tmp_len ? len : tmp_len; + + memcpy(&ctx->block[ctx->len], message, rem_len); + + if (ctx->len + len < SHA512_BLOCK_SIZE) { + ctx->len += len; + return; + } + + new_len = len - rem_len; + block_nb = new_len / SHA512_BLOCK_SIZE; + + shifted_message = message + rem_len; + + sha512_transf(ctx, ctx->block, 1); + sha512_transf(ctx, shifted_message, block_nb); + + rem_len = new_len % SHA512_BLOCK_SIZE; + + memcpy(ctx->block, &shifted_message[block_nb << 7], + rem_len); + + ctx->len = rem_len; + ctx->tot_len += (block_nb + 1) << 7; +} + +void sha512_final(sha512_ctx *ctx, unsigned char *digest) +{ + unsigned int block_nb; + unsigned int pm_len; + unsigned int len_b; + +#ifndef UNROLL_LOOPS + int i; +#endif + + block_nb = 1 + ((SHA512_BLOCK_SIZE - 17) + < (ctx->len % SHA512_BLOCK_SIZE)); + + len_b = (ctx->tot_len + ctx->len) << 3; + pm_len = block_nb << 7; + + memset(ctx->block + ctx->len, 0, pm_len - ctx->len); + ctx->block[ctx->len] = 0x80; + UNPACK32(len_b, ctx->block + pm_len - 4); + + sha512_transf(ctx, ctx->block, block_nb); + +#ifndef UNROLL_LOOPS + for (i = 0 ; i < 8; i++) { + UNPACK64(ctx->h[i], &digest[i << 3]); + } +#else + UNPACK64(ctx->h[0], &digest[ 0]); + UNPACK64(ctx->h[1], &digest[ 8]); + UNPACK64(ctx->h[2], &digest[16]); + UNPACK64(ctx->h[3], &digest[24]); + UNPACK64(ctx->h[4], &digest[32]); + UNPACK64(ctx->h[5], &digest[40]); + UNPACK64(ctx->h[6], &digest[48]); + UNPACK64(ctx->h[7], &digest[56]); +#endif /* !UNROLL_LOOPS */ +} + +/* SHA-384 functions */ + +void sha384(const unsigned char *message, unsigned int len, + unsigned char *digest) +{ + sha384_ctx ctx; + + sha384_init(&ctx); + sha384_update(&ctx, message, len); + sha384_final(&ctx, digest); +} + +void sha384_init(sha384_ctx *ctx) +{ +#ifndef UNROLL_LOOPS + int i; + for (i = 0; i < 8; i++) { + ctx->h[i] = sha384_h0[i]; + } +#else + ctx->h[0] = sha384_h0[0]; ctx->h[1] = sha384_h0[1]; + ctx->h[2] = sha384_h0[2]; ctx->h[3] = sha384_h0[3]; + ctx->h[4] = sha384_h0[4]; ctx->h[5] = sha384_h0[5]; + ctx->h[6] = sha384_h0[6]; ctx->h[7] = sha384_h0[7]; +#endif /* !UNROLL_LOOPS */ + + ctx->len = 0; + ctx->tot_len = 0; +} + +void sha384_update(sha384_ctx *ctx, const unsigned char *message, + unsigned int len) +{ + unsigned int block_nb; + unsigned int new_len, rem_len, tmp_len; + const unsigned char *shifted_message; + + tmp_len = SHA384_BLOCK_SIZE - ctx->len; + rem_len = len < tmp_len ? len : tmp_len; + + memcpy(&ctx->block[ctx->len], message, rem_len); + + if (ctx->len + len < SHA384_BLOCK_SIZE) { + ctx->len += len; + return; + } + + new_len = len - rem_len; + block_nb = new_len / SHA384_BLOCK_SIZE; + + shifted_message = message + rem_len; + + sha512_transf(ctx, ctx->block, 1); + sha512_transf(ctx, shifted_message, block_nb); + + rem_len = new_len % SHA384_BLOCK_SIZE; + + memcpy(ctx->block, &shifted_message[block_nb << 7], + rem_len); + + ctx->len = rem_len; + ctx->tot_len += (block_nb + 1) << 7; +} + +void sha384_final(sha384_ctx *ctx, unsigned char *digest) +{ + unsigned int block_nb; + unsigned int pm_len; + unsigned int len_b; + +#ifndef UNROLL_LOOPS + int i; +#endif + + block_nb = (1 + ((SHA384_BLOCK_SIZE - 17) + < (ctx->len % SHA384_BLOCK_SIZE))); + + len_b = (ctx->tot_len + ctx->len) << 3; + pm_len = block_nb << 7; + + memset(ctx->block + ctx->len, 0, pm_len - ctx->len); + ctx->block[ctx->len] = 0x80; + UNPACK32(len_b, ctx->block + pm_len - 4); + + sha512_transf(ctx, ctx->block, block_nb); + +#ifndef UNROLL_LOOPS + for (i = 0 ; i < 6; i++) { + UNPACK64(ctx->h[i], &digest[i << 3]); + } +#else + UNPACK64(ctx->h[0], &digest[ 0]); + UNPACK64(ctx->h[1], &digest[ 8]); + UNPACK64(ctx->h[2], &digest[16]); + UNPACK64(ctx->h[3], &digest[24]); + UNPACK64(ctx->h[4], &digest[32]); + UNPACK64(ctx->h[5], &digest[40]); +#endif /* !UNROLL_LOOPS */ +} + +/* SHA-224 functions */ + +void sha224(const unsigned char *message, unsigned int len, + unsigned char *digest) +{ + sha224_ctx ctx; + + sha224_init(&ctx); + sha224_update(&ctx, message, len); + sha224_final(&ctx, digest); +} + +void sha224_init(sha224_ctx *ctx) +{ +#ifndef UNROLL_LOOPS + int i; + for (i = 0; i < 8; i++) { + ctx->h[i] = sha224_h0[i]; + } +#else + ctx->h[0] = sha224_h0[0]; ctx->h[1] = sha224_h0[1]; + ctx->h[2] = sha224_h0[2]; ctx->h[3] = sha224_h0[3]; + ctx->h[4] = sha224_h0[4]; ctx->h[5] = sha224_h0[5]; + ctx->h[6] = sha224_h0[6]; ctx->h[7] = sha224_h0[7]; +#endif /* !UNROLL_LOOPS */ + + ctx->len = 0; + ctx->tot_len = 0; +} + +void sha224_update(sha224_ctx *ctx, const unsigned char *message, + unsigned int len) +{ + unsigned int block_nb; + unsigned int new_len, rem_len, tmp_len; + const unsigned char *shifted_message; + + tmp_len = SHA224_BLOCK_SIZE - ctx->len; + rem_len = len < tmp_len ? len : tmp_len; + + memcpy(&ctx->block[ctx->len], message, rem_len); + + if (ctx->len + len < SHA224_BLOCK_SIZE) { + ctx->len += len; + return; + } + + new_len = len - rem_len; + block_nb = new_len / SHA224_BLOCK_SIZE; + + shifted_message = message + rem_len; + + sha256_transf(ctx, ctx->block, 1); + sha256_transf(ctx, shifted_message, block_nb); + + rem_len = new_len % SHA224_BLOCK_SIZE; + + memcpy(ctx->block, &shifted_message[block_nb << 6], + rem_len); + + ctx->len = rem_len; + ctx->tot_len += (block_nb + 1) << 6; +} + +void sha224_final(sha224_ctx *ctx, unsigned char *digest) +{ + unsigned int block_nb; + unsigned int pm_len; + unsigned int len_b; + +#ifndef UNROLL_LOOPS + int i; +#endif + + block_nb = (1 + ((SHA224_BLOCK_SIZE - 9) + < (ctx->len % SHA224_BLOCK_SIZE))); + + len_b = (ctx->tot_len + ctx->len) << 3; + pm_len = block_nb << 6; + + memset(ctx->block + ctx->len, 0, pm_len - ctx->len); + ctx->block[ctx->len] = 0x80; + UNPACK32(len_b, ctx->block + pm_len - 4); + + sha256_transf(ctx, ctx->block, block_nb); + +#ifndef UNROLL_LOOPS + for (i = 0 ; i < 7; i++) { + UNPACK32(ctx->h[i], &digest[i << 2]); + } +#else + UNPACK32(ctx->h[0], &digest[ 0]); + UNPACK32(ctx->h[1], &digest[ 4]); + UNPACK32(ctx->h[2], &digest[ 8]); + UNPACK32(ctx->h[3], &digest[12]); + UNPACK32(ctx->h[4], &digest[16]); + UNPACK32(ctx->h[5], &digest[20]); + UNPACK32(ctx->h[6], &digest[24]); +#endif /* !UNROLL_LOOPS */ +} +#endif /* HAVE_SHA2 */ + +#ifdef TEST_VECTORS + +/* FIPS 180-2 Validation tests */ + +#include +#include + +void test(const char *vector, unsigned char *digest, + unsigned int digest_size) +{ + char output[2 * SHA512_DIGEST_SIZE + 1]; + int i; + + output[2 * digest_size] = '\0'; + + for (i = 0; i < (int) digest_size ; i++) { + sprintf(output + 2 * i, "%02x", digest[i]); + } + + printf("H: %s\n", output); + if (strcmp(vector, output)) { + fprintf(stderr, "Test failed.\n"); + exit(EXIT_FAILURE); + } +} + +int main(void) +{ + static const char *vectors[4][3] = + { /* SHA-224 */ + { + "23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7", + "75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525", + "20794655980c91d8bbb4c1ea97618a4bf03f42581948b2ee4ee7ad67", + }, + /* SHA-256 */ + { + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + "248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", + "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0", + }, + /* SHA-384 */ + { + "cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed" + "8086072ba1e7cc2358baeca134c825a7", + "09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712" + "fcc7c71a557e2db966c3e9fa91746039", + "9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b" + "07b8b3dc38ecc4ebae97ddd87f3d8985", + }, + /* SHA-512 */ + { + "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a" + "2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f", + "8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018" + "501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909", + "e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973eb" + "de0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b" + } + }; + + static const char message1[] = "abc"; + static const char message2a[] = "abcdbcdecdefdefgefghfghighijhi" + "jkijkljklmklmnlmnomnopnopq"; + static const char message2b[] = "abcdefghbcdefghicdefghijdefghijkefghij" + "klfghijklmghijklmnhijklmnoijklmnopjklm" + "nopqklmnopqrlmnopqrsmnopqrstnopqrstu"; + unsigned char *message3; + unsigned int message3_len = 1000000; + unsigned char digest[SHA512_DIGEST_SIZE]; + + message3 = malloc(message3_len); + if (message3 == NULL) { + fprintf(stderr, "Can't allocate memory\n"); + return -1; + } + memset(message3, 'a', message3_len); + + printf("SHA-2 FIPS 180-2 Validation tests\n\n"); + printf("SHA-224 Test vectors\n"); + + sha224((const unsigned char *) message1, strlen(message1), digest); + test(vectors[0][0], digest, SHA224_DIGEST_SIZE); + sha224((const unsigned char *) message2a, strlen(message2a), digest); + test(vectors[0][1], digest, SHA224_DIGEST_SIZE); + sha224(message3, message3_len, digest); + test(vectors[0][2], digest, SHA224_DIGEST_SIZE); + printf("\n"); + + printf("SHA-256 Test vectors\n"); + + sha256((const unsigned char *) message1, strlen(message1), digest); + test(vectors[1][0], digest, SHA256_DIGEST_SIZE); + sha256((const unsigned char *) message2a, strlen(message2a), digest); + test(vectors[1][1], digest, SHA256_DIGEST_SIZE); + sha256(message3, message3_len, digest); + test(vectors[1][2], digest, SHA256_DIGEST_SIZE); + printf("\n"); + + printf("SHA-384 Test vectors\n"); + + sha384((const unsigned char *) message1, strlen(message1), digest); + test(vectors[2][0], digest, SHA384_DIGEST_SIZE); + sha384((const unsigned char *)message2b, strlen(message2b), digest); + test(vectors[2][1], digest, SHA384_DIGEST_SIZE); + sha384(message3, message3_len, digest); + test(vectors[2][2], digest, SHA384_DIGEST_SIZE); + printf("\n"); + + printf("SHA-512 Test vectors\n"); + + sha512((const unsigned char *) message1, strlen(message1), digest); + test(vectors[3][0], digest, SHA512_DIGEST_SIZE); + sha512((const unsigned char *) message2b, strlen(message2b), digest); + test(vectors[3][1], digest, SHA512_DIGEST_SIZE); + sha512(message3, message3_len, digest); + test(vectors[3][2], digest, SHA512_DIGEST_SIZE); + printf("\n"); + + printf("All tests passed.\n"); + + return 0; +} + +#endif /* TEST_VECTORS */ + diff --git a/src/lib/sha2.h b/src/lib/sha2.h new file mode 100644 index 00000000..8c29f5be --- /dev/null +++ b/src/lib/sha2.h @@ -0,0 +1,118 @@ +/* + * FIPS 180-2 SHA-224/256/384/512 implementation + * Last update: 02/02/2007 + * Issue date: 04/30/2005 + * + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef SHA2_H +#define SHA2_H + +#include "bacula.h" + +#ifndef HAVE_SHA2 + +#define SHA224_DIGEST_SIZE ( 224 / 8) +#define SHA256_DIGEST_SIZE ( 256 / 8) +#define SHA384_DIGEST_SIZE ( 384 / 8) +#define SHA512_DIGEST_SIZE ( 512 / 8) + +#define SHA256_BLOCK_SIZE ( 512 / 8) +#define SHA512_BLOCK_SIZE (1024 / 8) +#define SHA384_BLOCK_SIZE SHA512_BLOCK_SIZE +#define SHA224_BLOCK_SIZE SHA256_BLOCK_SIZE + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + unsigned int tot_len; + unsigned int len; + unsigned char block[2 * SHA256_BLOCK_SIZE]; + uint32_t h[8]; +} sha256_ctx; + +typedef struct { + unsigned int tot_len; + unsigned int len; + unsigned char block[2 * SHA512_BLOCK_SIZE]; + uint64_t h[8]; +} sha512_ctx; + +typedef sha512_ctx sha384_ctx; +typedef sha256_ctx sha224_ctx; + +void sha224_init(sha224_ctx *ctx); +void sha224_update(sha224_ctx *ctx, const unsigned char *message, + unsigned int len); +void sha224_final(sha224_ctx *ctx, unsigned char *digest); +void sha224(const unsigned char *message, unsigned int len, + unsigned char *digest); + +void sha256_init(sha256_ctx * ctx); +void sha256_update(sha256_ctx *ctx, const unsigned char *message, + unsigned int len); +void sha256_final(sha256_ctx *ctx, unsigned char *digest); +void sha256(const unsigned char *message, unsigned int len, + unsigned char *digest); + +void sha384_init(sha384_ctx *ctx); +void sha384_update(sha384_ctx *ctx, const unsigned char *message, + unsigned int len); +void sha384_final(sha384_ctx *ctx, unsigned char *digest); +void sha384(const unsigned char *message, unsigned int len, + unsigned char *digest); + +void sha512_init(sha512_ctx *ctx); +void sha512_update(sha512_ctx *ctx, const unsigned char *message, + unsigned int len); +void sha512_final(sha512_ctx *ctx, unsigned char *digest); +void sha512(const unsigned char *message, unsigned int len, + unsigned char *digest); + +#ifdef __cplusplus +} +#endif + +/* implement the openssl fall back */ +typedef sha256_ctx SHA256_CTX; +#define SHA256_Init(ctx) sha256_init(ctx) +#define SHA256_Update(ctx, msg, len) sha256_update(ctx, (const unsigned char *)(msg), len) +#define SHA256_Final(digest, ctx) sha256_final(ctx, digest) + +typedef sha512_ctx SHA512_CTX; +#define SHA512_Init(ctx) sha512_init(ctx) +#define SHA512_Update(ctx, msg, len) sha512_update(ctx, (const unsigned char *)(msg), len) +#define SHA512_Final(digest, ctx) sha512_final(ctx, digest) + +#endif /* HAVE_SHA2 */ + +#endif /* !SHA2_H */ + diff --git a/src/lib/signal.c b/src/lib/signal.c new file mode 100644 index 00000000..cdb9824e --- /dev/null +++ b/src/lib/signal.c @@ -0,0 +1,444 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Signal handlers for Bacula daemons + * + * Kern Sibbald, April 2000 + * + * Note, we probably should do a core dump for the serious + * signals such as SIGBUS, SIGPFE, ... + * Also, for SIGHUP and SIGUSR1, we should re-read the + * configuration file. However, since this is a "general" + * routine, we leave it to the individual daemons to + * tweek their signals after calling this routine. + * + */ + +#ifndef HAVE_WIN32 +#include "bacula.h" + +#ifndef _NSIG +#define BA_NSIG 100 +#else +#define BA_NSIG _NSIG +#endif + +extern char my_name[]; +extern char fail_time[]; +extern char *exepath; +extern char *exename; +extern bool prt_kaboom; + +static const char *sig_names[BA_NSIG+1]; + +typedef void (SIG_HANDLER)(int sig); +static SIG_HANDLER *exit_handler; + +/* main process id */ +static pid_t main_pid = 0; + +const char *get_signal_name(int sig) +{ + if (sig < 0 || sig > BA_NSIG || !sig_names[sig]) { + return _("Invalid signal number"); + } else { + return sig_names[sig]; + } +} + +/* defined in jcr.c */ +extern void dbg_print_jcr(FILE *fp); +/* defined in plugins.c */ +extern void dbg_print_plugin(FILE *fp); +/* defined in lockmgr.c */ +extern void dbg_print_lock(FILE *fp); + +#define MAX_DBG_HOOK 10 +static dbg_hook_t *dbg_hooks[MAX_DBG_HOOK]; +static int dbg_handler_count=0; + +void dbg_add_hook(dbg_hook_t *hook) +{ + ASSERT(dbg_handler_count < MAX_DBG_HOOK); + dbg_hooks[dbg_handler_count++] = hook; +} + +/* + * !!! WARNING !!! + * + * This function should be used ONLY after a violent signal. We walk through the + * JCR chain without locking, Bacula should not be running. + */ +static void dbg_print_bacula() +{ + char buf[512]; + + snprintf(buf, sizeof(buf), "%s/bacula.%d.traceback", working_directory, main_pid); + FILE *fp = bfopen(buf, "a+") ; + if (!fp) { + fp = stderr; + } + + fprintf(stderr, "LockDump: %s\n", buf); + + /* Print also BDB and RWLOCK structure + * Can add more info about JCR with dbg_jcr_add_hook() + */ + dbg_print_lock(fp); + dbg_print_jcr(fp); + dbg_print_plugin(fp); + + for(int i=0; i < dbg_handler_count ; i++) { + dbg_hooks[i](fp); + } + + if (fp != stderr) { + fclose(fp); + } +} + +/* + * Handle signals here + */ +extern "C" void signal_handler(int sig) +{ + static int already_dead = 0; + int chld_status=-1; + utime_t now; + + Dmsg2(900, "sig=%d %s\n", sig, sig_names[sig]); + /* Ignore certain signals -- SIGUSR2 used to interrupt threads */ + if (sig == SIGCHLD || sig == SIGUSR2) { + return; + } + /* FreeBSD seems to generate a signal of 0, which is of course undefined */ + if (sig == 0) { + return; + } + /* If we come back more than once, get out fast! */ + if (already_dead) { + exit(1); + } + already_dead++; + /* Don't use Emsg here as it may lock and thus block us */ + if (sig == SIGTERM || sig == SIGINT) { + syslog(LOG_DAEMON|LOG_ERR, "Shutting down Bacula service: %s ...\n", my_name); + } else { + fprintf(stderr, _("Bacula interrupted by signal %d: %s\n"), sig, get_signal_name(sig)); + syslog(LOG_DAEMON|LOG_ERR, + _("Bacula interrupted by signal %d: %s\n"), sig, get_signal_name(sig)); + /* Edit current time for showing in the dump */ + now = time(NULL); + bstrftimes(fail_time, 30, now); + } + +#ifdef TRACEBACK + if (sig != SIGTERM && sig != SIGINT) { + struct sigaction sigdefault; + static char *argv[5]; + static char pid_buf[20]; + static char btpath[400]; + char buf[400]; + pid_t pid; + int exelen = strlen(exepath); + + fprintf(stderr, _("Kaboom! %s, %s got signal %d - %s at %s. Attempting traceback.\n"), + exename, my_name, sig, get_signal_name(sig), fail_time); + fprintf(stderr, _("Kaboom! exepath=%s\n"), exepath); + + if (exelen + 12 > (int)sizeof(btpath)) { + bstrncpy(btpath, "btraceback", sizeof(btpath)); + } else { + bstrncpy(btpath, exepath, sizeof(btpath)); + if (IsPathSeparator(btpath[exelen-1])) { + btpath[exelen-1] = 0; + } + bstrncat(btpath, "/btraceback", sizeof(btpath)); + } + if (!IsPathSeparator(exepath[exelen - 1])) { + strcat(exepath, "/"); + } + strcat(exepath, exename); + if (!working_directory) { + working_directory = buf; + *buf = 0; + } + if (*working_directory == 0) { + strcpy((char *)working_directory, "/tmp/"); + } + if (chdir(working_directory) != 0) { /* dump in working directory */ + berrno be; + Pmsg2(000, "chdir to %s failed. ERR=%s\n", working_directory, be.bstrerror()); + strcpy((char *)working_directory, "/tmp/"); + } + unlink("./core"); /* get rid of any old core file */ + + sprintf(pid_buf, "%d", (int)main_pid); + snprintf(buf, sizeof(buf), "%s/bacula.%s.traceback", working_directory, pid_buf); + unlink(buf); /* Remove the previous backtrace file if exist */ + +#ifdef DEVELOPER /* When DEVELOPER not set, this is done below */ + /* print information about the current state into working/.traceback */ + dbg_print_bacula(); +#endif + + Dmsg1(300, "Working=%s\n", working_directory); + Dmsg1(300, "btpath=%s\n", btpath); + Dmsg1(300, "exepath=%s\n", exepath); + switch (pid = fork()) { + case -1: /* error */ + fprintf(stderr, _("Fork error: ERR=%s\n"), strerror(errno)); + break; + case 0: /* child */ + argv[0] = btpath; /* path to btraceback */ + argv[1] = exepath; /* path to exe */ + argv[2] = pid_buf; + argv[3] = (char *)working_directory; + argv[4] = (char *)NULL; + fprintf(stderr, _("Calling: %s %s %s %s\n"), btpath, exepath, pid_buf, + working_directory); + if (execv(btpath, argv) != 0) { + berrno be; + printf(_("execv: %s failed: ERR=%s\n"), btpath, be.bstrerror()); + } + exit(-1); + default: /* parent */ + break; + } + + /* Parent continue here, waiting for child */ + sigdefault.sa_flags = 0; + sigdefault.sa_handler = SIG_DFL; + sigfillset(&sigdefault.sa_mask); + + sigaction(sig, &sigdefault, NULL); + if (pid > 0) { + Dmsg0(500, "Doing waitpid\n"); + waitpid(pid, &chld_status, 0); /* wait for child to produce dump */ + Dmsg0(500, "Done waitpid\n"); + } else { + Dmsg0(500, "Doing sleep\n"); + bmicrosleep(30, 0); + } + if (WEXITSTATUS(chld_status) == 0) { + fprintf(stderr, "%s", _("It looks like the traceback worked...\n")); + } else { + fprintf(stderr, _("The btraceback call returned %d\n"), + WEXITSTATUS(chld_status)); + } + +#ifndef DEVELOPER /* When DEVELOPER set, this is done above */ + /* print information about the current state into working/.traceback */ + dbg_print_bacula(); +#endif + + /* If we want it printed, do so */ +#ifdef direct_print + if (prt_kaboom) { + FILE *fd; + snprintf(buf, sizeof(buf), "%s/bacula.%s.traceback", working_directory, pid_buf); + fd = bfopen(buf, "r"); + if (fd != NULL) { + printf("\n\n ==== Traceback output ====\n\n"); + while (fgets(buf, (int)sizeof(buf), fd) != NULL) { + printf("%s", buf); + } + fclose(fd); + printf(" ==== End traceback output ====\n\n"); + } + } +#else + if (prt_kaboom) { + snprintf(buf, sizeof(buf), "/bin/cat %s/bacula.%s.traceback", working_directory, pid_buf); + fprintf(stderr, "\n\n ==== Traceback output ====\n\n"); + system(buf); + fprintf(stderr, " ==== End traceback output ====\n\n"); + } +#endif + + } +#endif + exit_handler(sig); + Dmsg0(500, "Done exit_handler\n"); +} + +/* + * Init stack dump by saving main process id -- + * needed by debugger to attach to this program. + */ +void init_stack_dump(void) +{ + main_pid = getpid(); /* save main thread's pid */ +} + +/* + * Initialize signals + */ +void init_signals(void terminate(int sig)) +{ + struct sigaction sighandle; + struct sigaction sigignore; + struct sigaction sigdefault; +#ifdef _sys_nsig + int i; + + exit_handler = terminate; + if (BA_NSIG < _sys_nsig) + Emsg2(M_ABORT, 0, _("BA_NSIG too small (%d) should be (%d)\n"), BA_NSIG, _sys_nsig); + + for (i=0; i<_sys_nsig; i++) + sig_names[i] = _sys_siglist[i]; +#else + exit_handler = terminate; + sig_names[0] = _("UNKNOWN SIGNAL"); + sig_names[SIGHUP] = _("Hangup"); + sig_names[SIGINT] = _("Interrupt"); + sig_names[SIGQUIT] = _("Quit"); + sig_names[SIGILL] = _("Illegal instruction");; + sig_names[SIGTRAP] = _("Trace/Breakpoint trap"); + sig_names[SIGABRT] = _("Abort"); +#ifdef SIGEMT + sig_names[SIGEMT] = _("EMT instruction (Emulation Trap)"); +#endif +#ifdef SIGIOT + sig_names[SIGIOT] = _("IOT trap"); +#endif + sig_names[SIGBUS] = _("BUS error"); + sig_names[SIGFPE] = _("Floating-point exception"); + sig_names[SIGKILL] = _("Kill, unblockable"); + sig_names[SIGUSR1] = _("User-defined signal 1"); + sig_names[SIGSEGV] = _("Segmentation violation"); + sig_names[SIGUSR2] = _("User-defined signal 2"); + sig_names[SIGPIPE] = _("Broken pipe"); + sig_names[SIGALRM] = _("Alarm clock"); + sig_names[SIGTERM] = _("Termination"); +#ifdef SIGSTKFLT + sig_names[SIGSTKFLT] = _("Stack fault"); +#endif + sig_names[SIGCHLD] = _("Child status has changed"); + sig_names[SIGCONT] = _("Continue"); + sig_names[SIGSTOP] = _("Stop, unblockable"); + sig_names[SIGTSTP] = _("Keyboard stop"); + sig_names[SIGTTIN] = _("Background read from tty"); + sig_names[SIGTTOU] = _("Background write to tty"); + sig_names[SIGURG] = _("Urgent condition on socket"); + sig_names[SIGXCPU] = _("CPU limit exceeded"); + sig_names[SIGXFSZ] = _("File size limit exceeded"); + sig_names[SIGVTALRM] = _("Virtual alarm clock"); + sig_names[SIGPROF] = _("Profiling alarm clock"); + sig_names[SIGWINCH] = _("Window size change"); + sig_names[SIGIO] = _("I/O now possible"); +#ifdef SIGPWR + sig_names[SIGPWR] = _("Power failure restart"); +#endif +#ifdef SIGWAITING + sig_names[SIGWAITING] = _("No runnable lwp"); +#endif +#ifdef SIGLWP + sig_names[SIGLWP] = _("SIGLWP special signal used by thread library"); +#endif +#ifdef SIGFREEZE + sig_names[SIGFREEZE] = _("Checkpoint Freeze"); +#endif +#ifdef SIGTHAW + sig_names[SIGTHAW] = _("Checkpoint Thaw"); +#endif +#ifdef SIGCANCEL + sig_names[SIGCANCEL] = _("Thread Cancellation"); +#endif +#ifdef SIGLOST + sig_names[SIGLOST] = _("Resource Lost (e.g. record-lock lost)"); +#endif +#endif + + +/* Now setup signal handlers */ + sighandle.sa_flags = 0; + sighandle.sa_handler = signal_handler; + sigfillset(&sighandle.sa_mask); + sigignore.sa_flags = 0; + sigignore.sa_handler = SIG_IGN; + sigfillset(&sigignore.sa_mask); + sigdefault.sa_flags = 0; + sigdefault.sa_handler = SIG_DFL; + sigfillset(&sigdefault.sa_mask); + + + sigaction(SIGPIPE, &sigignore, NULL); + sigaction(SIGCHLD, &sighandle, NULL); + sigaction(SIGCONT, &sigignore, NULL); + sigaction(SIGPROF, &sigignore, NULL); + sigaction(SIGWINCH, &sigignore, NULL); + sigaction(SIGIO, &sighandle, NULL); + + sigaction(SIGINT, &sighandle, NULL); + sigaction(SIGXCPU, &sigdefault, NULL); + sigaction(SIGXFSZ, &sigdefault, NULL); + + sigaction(SIGHUP, &sigignore, NULL); + sigaction(SIGQUIT, &sighandle, NULL); + sigaction(SIGILL, &sighandle, NULL); + sigaction(SIGTRAP, &sighandle, NULL); + sigaction(SIGABRT, &sighandle, NULL); +#ifdef SIGEMT + sigaction(SIGEMT, &sighandle, NULL); +#endif +#ifdef SIGIOT + sigaction(SIGIOT, &sighandle, NULL); +#endif + sigaction(SIGBUS, &sighandle, NULL); + sigaction(SIGFPE, &sighandle, NULL); +/* sigaction(SIGKILL, &sighandle, NULL); cannot be trapped */ + sigaction(SIGUSR1, &sighandle, NULL); + sigaction(SIGSEGV, &sighandle, NULL); + sigaction(SIGUSR2, &sighandle, NULL); + sigaction(SIGALRM, &sighandle, NULL); + sigaction(SIGTERM, &sighandle, NULL); +#ifdef SIGSTKFLT + sigaction(SIGSTKFLT, &sighandle, NULL); +#endif +/* sigaction(SIGSTOP, &sighandle, NULL); cannot be trapped */ + sigaction(SIGTSTP, &sighandle, NULL); + sigaction(SIGTTIN, &sighandle, NULL); + sigaction(SIGTTOU, &sighandle, NULL); + sigaction(SIGURG, &sighandle, NULL); + sigaction(SIGVTALRM, &sighandle, NULL); +#ifdef SIGPWR + sigaction(SIGPWR, &sighandle, NULL); +#endif +#ifdef SIGWAITING + sigaction(SIGWAITING,&sighandle, NULL); +#endif +#ifdef SIGLWP + sigaction(SIGLWP, &sighandle, NULL); +#endif +#ifdef SIGFREEZE + sigaction(SIGFREEZE, &sighandle, NULL); +#endif +#ifdef SIGTHAW + sigaction(SIGTHAW, &sighandle, NULL); +#endif +#ifdef SIGCANCEL + sigaction(SIGCANCEL, &sighandle, NULL); +#endif +#ifdef SIGLOST + sigaction(SIGLOST, &sighandle, NULL); +#endif +} +#endif diff --git a/src/lib/smartall.c b/src/lib/smartall.c new file mode 100644 index 00000000..49a8c9fe --- /dev/null +++ b/src/lib/smartall.c @@ -0,0 +1,565 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + + S M A R T A L L O C + Smart Memory Allocator + + Evolved over several years, starting with the initial + SMARTALLOC code for AutoSketch in 1986, guided by the Blind + Watchbreaker, John Walker. Isolated in this general-purpose + form in September of 1989. Updated with be more POSIX + compliant and to include Web-friendly HTML documentation in + October of 1998 by the same culprit. For additional + information and the current version visit the Web page: + + http://www.fourmilab.ch/smartall/ + +*/ + +#define LOCKMGR_COMPLIANT + +#include "bacula.h" +/* Use the real routines here */ +#undef realloc +#undef calloc +#undef malloc +#undef free + +/* We normally turn off debugging here. + * If you want it, simply #ifdef all the + * following off. + */ +#ifdef no_debug_xxxxx +#undef Dmsg1 +#undef Dmsg2 +#undef Dmsg3 +#undef Dmsg4 +#define Dmsg1(l,f,a1) +#define Dmsg2(l,f,a1,a2) +#define Dmsg3(l,f,a1,a2,a3) +#define Dmsg4(l,f,a1,a2,a3,a4) +#endif + + +uint64_t sm_max_bytes = 0; +uint64_t sm_bytes = 0; +uint32_t sm_max_buffers = 0; +uint32_t sm_buffers = 0; + +#ifdef SMARTALLOC + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +extern char my_name[]; /* daemon name */ + +#define EOS '\0' /* End of string sentinel */ +#define sm_min(a, b) ((a) < (b) ? (a) : (b)) + +/* Queue data structures */ + +/* Memory allocation control structures and storage. */ + +struct abufhead { + struct b_queue abq; /* Links on allocated queue */ + uint32_t ablen; /* Buffer length in bytes */ + const char *abfname; /* File name pointer */ + uint32_t ablineno; /* Line number of allocation */ + bool abin_use; /* set when malloced and cleared when free */ +}; + +static struct b_queue abqueue = { /* Allocated buffer queue */ + &abqueue, &abqueue +}; + + +static bool bufimode = false; /* Buffers not tracked when True */ + +#define HEAD_SIZE BALIGN(sizeof(struct abufhead)) + + +/* SMALLOC -- Allocate buffer, enqueing on the orphaned buffer + tracking list. */ + +static void *smalloc(const char *fname, int lineno, unsigned int nbytes) +{ + char *buf; + + /* Note: Unix MALLOC actually permits a zero length to be + passed and allocates a valid block with zero user bytes. + Such a block can later be expanded with realloc(). We + disallow this based on the belief that it's better to make + a special case and allocate one byte in the rare case this + is desired than to miss all the erroneous occurrences where + buffer length calculation code results in a zero. */ + + if (nbytes == 0) { + Tmsg3(0, "Invalid memory allocation. %u bytes %s:%d\n", nbytes, fname, lineno); + ASSERT(nbytes > 0); + } + + nbytes += HEAD_SIZE + 1; + if ((buf = (char *)malloc(nbytes)) != NULL) { + struct abufhead *head = (struct abufhead *)buf; + P(mutex); + /* Enqueue buffer on allocated list */ + qinsert(&abqueue, (struct b_queue *) buf); + head->ablen = nbytes; + head->abfname = bufimode ? NULL : fname; + head->ablineno = (uint32_t)lineno; + head->abin_use = true; + /* Emplace end-clobber detector at end of buffer */ + buf[nbytes - 1] = (uint8_t)((((intptr_t) buf) & 0xFF) ^ 0xC5); + buf += HEAD_SIZE; /* Increment to user data start */ + if (++sm_buffers > sm_max_buffers) { + sm_max_buffers = sm_buffers; + } + sm_bytes += nbytes; + if (sm_bytes > sm_max_bytes) { + sm_max_bytes = sm_bytes; + } + V(mutex); + } else { + Emsg0(M_ABORT, 0, _("Out of memory\n")); + } + Dmsg4(DT_MEMORY|1050, "smalloc %d at %p from %s:%d\n", nbytes, buf, fname, lineno); +#if SMALLOC_SANITY_CHECK > 0 + if (sm_bytes > SMALLOC_SANITY_CHECK) { + Emsg0(M_ABORT, 0, _("Too much memory used.")); + } +#endif + return (void *)buf; +} + +/* SM_NEW_OWNER -- Update the File and line number for a buffer + This is to accomodate mem_pool. */ + +void sm_new_owner(const char *fname, int lineno, char *buf) +{ + buf -= HEAD_SIZE; /* Decrement to header */ + P(mutex); + ((struct abufhead *)buf)->abfname = bufimode ? NULL : fname; + ((struct abufhead *)buf)->ablineno = (uint32_t) lineno; + ((struct abufhead *)buf)->abin_use = true; + V(mutex); + return; +} + +/* SM_FREE -- Update free pool availability. FREE is never called + except through this interface or by actuallyfree(). + free(x) is defined to generate a call to this + routine. */ + +void sm_free(const char *file, int line, void *fp) +{ + char *cp = (char *) fp; + struct b_queue *qp; + uint32_t lineno = line; + + if (cp == NULL) { + Emsg2(M_ABORT, 0, _("Attempt to free NULL called from %s:%d\n"), file, lineno); + } + + cp -= HEAD_SIZE; + qp = (struct b_queue *)cp; + struct abufhead *head = (struct abufhead *)cp; + + P(mutex); + Dmsg4(DT_MEMORY|1050, "sm_free %d at %p from %s:%d\n", + head->ablen, fp, + get_basename(head->abfname), head->ablineno); + + if (!head->abin_use) { + V(mutex); + Emsg2(M_ABORT, 0, _("in-use bit not set: double free from %s:%d\n"), file, lineno); + } + head->abin_use = false; + + /* The following assertions will catch virtually every release + of an address which isn't an allocated buffer. */ + if (qp->qnext->qprev != qp) { + V(mutex); + Emsg2(M_ABORT, 0, _("qp->qnext->qprev != qp called from %s:%d\n"), file, lineno); + } + if (qp->qprev->qnext != qp) { + V(mutex); + Emsg2(M_ABORT, 0, _("qp->qprev->qnext != qp called from %s:%d\n"), file, lineno); + } + + /* The following assertion detects storing off the end of the + allocated space in the buffer by comparing the end of buffer + checksum with the address of the buffer. */ + + if (((unsigned char *)cp)[head->ablen - 1] != ((((intptr_t) cp) & 0xFF) ^ 0xC5)) { + V(mutex); + Emsg6(M_ABORT, 0, _("Overrun buffer: len=%d addr=%p allocated: %s:%d called from %s:%d\n"), + head->ablen, fp, get_basename(head->abfname), head->ablineno, file, line); + } + if (sm_buffers > 0) { + sm_buffers--; + sm_bytes -= head->ablen; + } + + qdchain(qp); + V(mutex); + + /* Now we wipe the contents of the just-released buffer with + "designer garbage" (Duff Kurland's phrase) of alternating + bits. This is intended to ruin the day for any miscreant who + attempts to access data through a pointer into storage that's + been previously released. + + Modified, kes May, 2007 to not zap the header. This allows us + to check the in_use bit and detect doubly freed buffers. + */ + + memset(cp+HEAD_SIZE, 0xAA, (int)(head->ablen - HEAD_SIZE)); + + free(cp); +} + +/* SM_MALLOC -- Allocate buffer. NULL is returned if no memory + was available. */ + +void *sm_malloc(const char *fname, int lineno, unsigned int nbytes) +{ + void *buf; + + if ((buf = smalloc(fname, lineno, nbytes)) != NULL) { + + /* To catch sloppy code that assumes buffers obtained from + malloc() are zeroed, we preset the buffer contents to + "designer garbage" consisting of alternating bits. */ + + memset(buf, 0x55, (int) nbytes); + } else { + Emsg0(M_ABORT, 0, _("Out of memory\n")); + } + return buf; +} + +/* SM_CALLOC -- Allocate an array and clear it to zero. */ + +void *sm_calloc(const char *fname, int lineno, + unsigned int nelem, unsigned int elsize) +{ + void *buf; + + if ((buf = smalloc(fname, lineno, nelem * elsize)) != NULL) { + memset(buf, 0, (int) (nelem * elsize)); + } else { + Emsg0(M_ABORT, 0, _("Out of memory\n")); + } + return buf; +} + +/* SM_REALLOC -- Adjust the size of a previously allocated buffer. + Note that the trick of "resurrecting" a previously + freed buffer with realloc() is NOT supported by this + function. Further, because of the need to maintain + our control storage, SM_REALLOC must always allocate + a new block and copy the data in the old block. + This may result in programs which make heavy use of + realloc() running much slower than normally. */ + +void *sm_realloc(const char *fname, int lineno, void *ptr, unsigned int size) +{ + unsigned osize; + void *buf; + char *cp = (char *) ptr; + + Dmsg4(DT_MEMORY|1050, "sm_realloc %s:%d %p %d\n", get_basename(fname), (uint32_t)lineno, ptr, size); + if (size <= 0) { + e_msg(fname, lineno, M_ABORT, 0, _("sm_realloc size: %d\n"), size); + } + + /* If the old block pointer is NULL, treat realloc() as a + malloc(). SVID is silent on this, but many C libraries + permit this. */ + if (ptr == NULL) { + return sm_malloc(fname, lineno, size); + } + + /* If the old and new sizes are the same, be a nice guy and just + return the buffer passed in. */ + cp -= HEAD_SIZE; + struct abufhead *head = (struct abufhead *)cp; + osize = head->ablen - (HEAD_SIZE + 1); + if (size == osize) { + return ptr; + } + + /* Sizes differ. Allocate a new buffer of the requested size. + If we can't obtain such a buffer, act as defined in SVID: + return NULL from realloc() and leave the buffer in PTR + intact. */ + +// sm_buffers--; +// sm_bytes -= head->ablen; + + if ((buf = smalloc(fname, lineno, size)) != NULL) { + memcpy(buf, ptr, (int)sm_min(size, osize)); + /* If the new buffer is larger than the old, fill the balance + of it with "designer garbage". */ + if (size > osize) { + memset(((char *) buf) + osize, 0x55, (int) (size - osize)); + } + + /* All done. Free and dechain the original buffer. */ + sm_free(fname, lineno, ptr); + } + Dmsg4(DT_MEMORY|1060, _("sm_realloc %d at %p from %s:%d\n"), size, buf, get_basename(fname), (uint32_t)lineno); + return buf; +} + +/* ACTUALLYMALLOC -- Call the system malloc() function to obtain + storage which will eventually be released + by system or library routines not compiled + using SMARTALLOC. */ + +void *actuallymalloc(unsigned int size) +{ + return malloc(size); +} + +/* ACTUALLYCALLOC -- Call the system calloc() function to obtain + storage which will eventually be released + by system or library routines not compiled + using SMARTALLOC. */ + +void *actuallycalloc(unsigned int nelem, unsigned int elsize) +{ + return calloc(nelem, elsize); +} + +/* ACTUALLYREALLOC -- Call the system realloc() function to obtain + storage which will eventually be released + by system or library routines not compiled + using SMARTALLOC. */ + +void *actuallyrealloc(void *ptr, unsigned int size) +{ + Dmsg2(DT_MEMORY|1040, "Actuallyrealloc %p %d\n", ptr, size); + return realloc(ptr, size); +} + +/* ACTUALLYFREE -- Interface to system free() function to release + buffers allocated by low-level routines. */ + +void actuallyfree(void *cp) +{ + free(cp); +} + +/* SM_DUMP -- Print orphaned buffers (and dump them if BUFDUMP is + * True). + */ +void sm_dump(bool bufdump, bool in_use) +{ + struct abufhead *ap; + + P(mutex); + + ap = (struct abufhead *)abqueue.qnext; + + while (ap != (struct abufhead *) &abqueue) { + + if ((ap == NULL) || + (ap->abq.qnext->qprev != (struct b_queue *) ap) || + (ap->abq.qprev->qnext != (struct b_queue *) ap)) { + Pmsg1(0, _( + "\nOrphaned buffers exist. Dump terminated following\n" + " discovery of bad links in chain of orphaned buffers.\n" + " Buffer address with bad links: %p\n"), ap); + break; + } + + if (ap->abfname != NULL) { + char errmsg[500]; + uint32_t memsize = ap->ablen - (HEAD_SIZE + 1); + char *cp = ((char *)ap) + HEAD_SIZE; + + Pmsg6(0, "%s buffer: %s %d bytes at %p from %s:%d\n", + in_use?"In use":"Orphaned", + my_name, memsize, cp, get_basename(ap->abfname), ap->ablineno); + if (bufdump) { + char buf[20]; + unsigned llen = 0; + + errmsg[0] = EOS; + while (memsize) { + if (llen >= 16) { + bstrncat(errmsg, "\n", sizeof(errmsg)); + llen = 0; + Pmsg1(0, "%s", errmsg); + errmsg[0] = EOS; + } + bsnprintf(buf, sizeof(buf), " %02X", + (*cp++) & 0xFF); + bstrncat(errmsg, buf, sizeof(errmsg)); + llen++; + memsize--; + } + Pmsg1(0, "%s\n", errmsg); + } + } + ap = (struct abufhead *) ap->abq.qnext; + } + V(mutex); +} + +#undef sm_check +/* SM_CHECK -- Check the buffers and dump if any damage exists. */ +void sm_check(const char *fname, int lineno, bool bufdump) +{ + if (!sm_check_rtn(fname, lineno, bufdump)) { + Emsg2(M_ABORT, 0, _("Damaged buffer found. Called from %s:%d\n"), + get_basename(fname), (uint32_t)lineno); + } +} + +#undef sm_check_rtn +/* SM_CHECK_RTN -- Check the buffers and return 1 if OK otherwise 0 */ +int sm_check_rtn(const char *fname, int lineno, bool bufdump) +{ + struct abufhead *ap; + int bad, badbuf = 0; + + P(mutex); + ap = (struct abufhead *) abqueue.qnext; + while (ap != (struct abufhead *)&abqueue) { + bad = 0; + if (ap != NULL) { + if (ap->abq.qnext->qprev != (struct b_queue *)ap) { + bad = 0x1; + } + if (ap->abq.qprev->qnext != (struct b_queue *)ap) { + bad |= 0x2; + } + if (((unsigned char *) ap)[((struct abufhead *)ap)->ablen - 1] != + ((((intptr_t) ap) & 0xFF) ^ 0xC5)) { + bad |= 0x4; + } + } else { + bad = 0x8; + } + badbuf |= bad; + if (bad) { + Pmsg2(0, + _("\nDamaged buffers found at %s:%d\n"), get_basename(fname), (uint32_t)lineno); + + if (bad & 0x1) { + Pmsg0(0, _(" discovery of bad prev link.\n")); + } + if (bad & 0x2) { + Pmsg0(0, _(" discovery of bad next link.\n")); + } + if (bad & 0x4) { + Pmsg0(0, _(" discovery of data overrun.\n")); + } + if (bad & 0x8) { + Pmsg0(0, _(" NULL pointer.\n")); + } + + if (!ap) { + goto get_out; + } + Pmsg1(0, _(" Buffer address: %p\n"), ap); + + if (ap->abfname != NULL) { + uint32_t memsize = ap->ablen - (HEAD_SIZE + 1); + char errmsg[80]; + + Pmsg4(0, + _("Damaged buffer: %6u bytes allocated at line %d of %s %s\n"), + memsize, ap->ablineno, my_name, get_basename(ap->abfname) + ); + if (bufdump) { + unsigned llen = 0; + char *cp = ((char *) ap) + HEAD_SIZE; + + errmsg[0] = EOS; + while (memsize) { + if (llen >= 16) { + strcat(errmsg, "\n"); + llen = 0; + Pmsg1(0, "%s", errmsg); + errmsg[0] = EOS; + } + if (*cp < 0x20) { + sprintf(errmsg + strlen(errmsg), " %02X", + (*cp++) & 0xFF); + } else { + sprintf(errmsg + strlen(errmsg), " %c ", + (*cp++) & 0xFF); + } + llen++; + memsize--; + } + Pmsg1(0, "%s\n", errmsg); + } + } + } + ap = (struct abufhead *)ap->abq.qnext; + } +get_out: + V(mutex); + return badbuf ? 0 : 1; +} + + +/* SM_STATIC -- Orphaned buffer detection can be disabled (for such + items as buffers allocated during initialisation) by + calling sm_static(1). Normal orphaned buffer + detection can be re-enabled with sm_static(0). Note + that all the other safeguards still apply to buffers + allocated when sm_static(1) mode is in effect. */ + +void sm_static(bool mode) +{ + bufimode = mode; +} + +/* + * Here we overload C++'s global new and delete operators + * so that the memory is allocated through smartalloc. + */ + +#ifdef xxx +void * operator new(size_t size) +{ +// Dmsg1(000, "new called %d\n", size); + return sm_malloc(__FILE__, __LINE__, size); +} + +void operator delete(void *buf) +{ +// Dmsg1(000, "free called %p\n", buf); + sm_free(__FILE__, __LINE__, buf); +} +#endif + +#endif + +/* Avoid aggressive optimization */ +void *bmemset(void *s, int c, size_t n) +{ + void *ret = memset(s, c, n); + return ret; +} diff --git a/src/lib/smartall.h b/src/lib/smartall.h new file mode 100644 index 00000000..49af5816 --- /dev/null +++ b/src/lib/smartall.h @@ -0,0 +1,173 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + + Definitions for the smart memory allocator + +*/ + +#ifndef SMARTALLOC_H +#define SMARTALLOC_H + +extern uint64_t DLL_IMP_EXP sm_max_bytes; +extern uint64_t DLL_IMP_EXP sm_bytes; +extern uint32_t DLL_IMP_EXP sm_max_buffers; +extern uint32_t DLL_IMP_EXP sm_buffers; + +#ifdef SMARTALLOC +#undef SMARTALLOC +#define SMARTALLOC SMARTALLOC + + +/* Avoid aggressive GCC optimization */ +extern void *bmemset(void *s, int c, size_t n); + +extern void *sm_malloc(const char *fname, int lineno, unsigned int nbytes), + *sm_calloc(const char *fname, int lineno, + unsigned int nelem, unsigned int elsize), + *sm_realloc(const char *fname, int lineno, void *ptr, unsigned int size), + *actuallymalloc(unsigned int size), + *actuallycalloc(unsigned int nelem, unsigned int elsize), + *actuallyrealloc(void *ptr, unsigned int size); +extern void sm_free(const char *fname, int lineno, void *fp); +extern void actuallyfree(void *cp), + sm_dump(bool bufdump, bool in_use=false), sm_static(int mode); +extern void sm_new_owner(const char *fname, int lineno, char *buf); + +#ifdef SMCHECK +#define Dsm_check(lvl) if ((lvl)<=debug_level) sm_check(__FILE__, __LINE__, true) +extern void sm_check(const char *fname, int lineno, bool bufdump); +extern int sm_check_rtn(const char *fname, int lineno, bool bufdump); +#else +#define Dsm_check(lvl) +#define sm_check(f, l, fl) +#define sm_check_rtn(f, l, fl) 1 +#endif + + +/* Redefine standard memory allocator calls to use our routines + instead. */ + +#define free(x) sm_free(__FILE__, __LINE__, (void *)(x)) +#define cfree(x) sm_free(__FILE__, __LINE__, (void *)(x)) +#define malloc(x) sm_malloc(__FILE__, __LINE__, (x)) +#define calloc(n,e) sm_calloc(__FILE__, __LINE__, (n), (e)) +#define realloc(p,x) sm_realloc(__FILE__, __LINE__, (p), (x)) + +#else + +/* If SMARTALLOC is disabled, define its special calls to default to + the standard routines. */ + +#define actuallyfree(x) free(x) +#define actuallymalloc(x) malloc(x) +#define actuallycalloc(x,y) calloc(x,y) +#define actuallyrealloc(x,y) realloc(x,y) +inline void sm_dump(int x, int y=0) {} /* with default arguments, we can't use a #define */ +#define sm_static(x) +#define sm_new_owner(a, b, c) +#define sm_malloc(f, l, n) malloc(n) +#define sm_free(f, l, n) free(n) +#define sm_check(f, l, fl) +#define sm_check_rtn(f, l, fl) 1 + +extern void *b_malloc(const char *file, int line, size_t size); +#define malloc(x) b_malloc(__FILE__, __LINE__, (x)) + +#define Dsm_check(lvl) +#define sm_check(f, l, fl) +#define sm_check_rtn(f, l, fl) 1 + +#endif + +#ifdef SMARTALLOC + +#define New(type) new(__FILE__, __LINE__) type + +/* We do memset(0) because it's not possible to memset a class when + * using subclass with virtual functions + */ + +class SMARTALLOC +{ +public: + +void *operator new(size_t s, const char *fname, int line) +{ + size_t size = s > sizeof(int) ? (unsigned int)s : sizeof(int); + void *p = sm_malloc(fname, line, size); + return bmemset(p, 0, size); /* return memset() result to avoid GCC 6.1 issue */ +} +void *operator new[](size_t s, const char *fname, int line) +{ + size_t size = s > sizeof(int) ? (unsigned int)s : sizeof(int); + void *p = sm_malloc(fname, line, size); + return bmemset(p, 0, size); /* return memset() result to avoid GCC 6.1 issue */ +} + +void operator delete(void *ptr) +{ + free(ptr); +} +void operator delete[](void *ptr, size_t /*i*/) +{ + free(ptr); +} + +void operator delete(void *ptr, const char * /*fname*/, int /*line*/) +{ + free(ptr); +} +void operator delete[](void *ptr, size_t /*i*/, + const char * /*fname*/, int /*line*/) +{ + free(ptr); +} + +private: +void *operator new(size_t s) throw() { (void)s; return 0; } +void *operator new[](size_t s) throw() { (void)s; return 0; } +}; + +#else + +#define New(type) new type + +class SMARTALLOC +{ + public: + void *operator new(size_t s) { + void *p = malloc(s); + bmemset(p, 0, s); + return p; + } + void *operator new[](size_t s) { + void *p = malloc(s); + bmemset(p, 0, s); + return p; + } + void operator delete(void *ptr) { + free(ptr); + } + void operator delete[](void *ptr, size_t i) { + free(ptr); + } +}; +#endif /* SMARTALLOC */ +#endif /* !SMARTALLOC_H */ diff --git a/src/lib/status.h b/src/lib/status.h new file mode 100644 index 00000000..fd41d03c --- /dev/null +++ b/src/lib/status.h @@ -0,0 +1,265 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Status packet definition that is used in both the SD and FD. It + * permits Win32 to call output_status() and get the output back + * at the callback address line by line, and for Linux code, + * the output can be sent directly to a BSOCK. + * + * Kern Sibbald, March MMVII + * + */ + +#ifndef __STATUS_H_ +#define __STATUS_H_ + +#ifdef HAVE_GETRLIMIT + #include + #include +#endif + +/* + * Packet to send to output_status() + */ +class STATUS_PKT { +public: + BSOCK *bs; /* used on Unix machines */ + void *context; /* Win32 */ + void (*callback)(const char *msg, int len, void *context); /* Win32 */ + char api_opts[MAX_NAME_LENGTH]; + int api; /* set if we want API output, with api level */ + + /* Methods */ + STATUS_PKT() { memset(this, 0, sizeof(STATUS_PKT)); }; + ~STATUS_PKT() { }; +}; + +extern void output_status(STATUS_PKT *sp); + +/* + * Send to bsock (Director or Console) + */ +static void sendit(const char *msg, int len, STATUS_PKT *sp) +{ + if (sp->bs) { + BSOCK *user = sp->bs; + user->msg = check_pool_memory_size(user->msg, len+1); + memcpy(user->msg, msg, len+1); + user->msglen = len+1; + user->send(); + } else { + sp->callback(msg, len, sp->context); + } +} + +#ifndef STATUS_FUNCTIONS +#define STATUS_FUNCTIONS + +/* common to SD/FD */ +static void list_terminated_jobs(STATUS_PKT *sp) +{ + OutputWriter ow(sp->api_opts); + char dt[MAX_TIME_LENGTH], b1[30], b2[30]; + char level[10]; + struct s_last_job *je; + const char *msg; + char *p; + + msg = _("\nTerminated Jobs:\n"); + if (!sp->api) sendit(msg, strlen(msg), sp); + if (last_jobs->size() == 0) { + if (!sp->api) sendit("====\n", 5, sp); + return; + } + lock_last_jobs_list(); + msg = _(" JobId Level Files Bytes Status Finished Name \n"); + if (!sp->api) sendit(msg, strlen(msg), sp); + msg = _("===================================================================\n"); + if (!sp->api) sendit(msg, strlen(msg), sp); + if (sp->api > 1) { + p = ow.start_group("terminated"); + sendit(p, strlen(p), sp); + } + foreach_dlist(je, last_jobs) { + char JobName[MAX_NAME_LENGTH]; + const char *termstat; + char buf[1000]; + + bstrftime_nc(dt, sizeof(dt), je->end_time); + switch (je->JobType) { + case JT_ADMIN: + bstrncpy(level, "Admn", sizeof(level)); + break; + case JT_RESTORE: + bstrncpy(level, "Rest", sizeof(level)); + break; + default: + bstrncpy(level, job_level_to_str(je->JobLevel), sizeof(level)); + level[4] = 0; + break; + } + switch (je->JobStatus) { + case JS_Created: + termstat = _("Created"); + break; + case JS_FatalError: + case JS_ErrorTerminated: + termstat = _("Error"); + break; + case JS_Differences: + termstat = _("Diffs"); + break; + case JS_Canceled: + termstat = _("Cancel"); + break; + case JS_Terminated: + termstat = _("OK"); + break; + case JS_Warnings: + termstat = _("OK -- with warnings"); + break; + case JS_Incomplete: + termstat = _("Incomplete"); + break; + default: + termstat = _("Other"); + break; + } + bstrncpy(JobName, je->Job, sizeof(JobName)); + /* There are three periods after the Job name */ + char *p; + for (int i=0; i<3; i++) { + if ((p=strrchr(JobName, '.')) != NULL) { + *p = 0; + } + } + if (sp->api == 1) { + bsnprintf(buf, sizeof(buf), _("%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n"), + je->JobId, + level, + edit_uint64_with_commas(je->JobFiles, b1), + edit_uint64_with_suffix(je->JobBytes, b2), + termstat, + dt, JobName); + + } else if (sp->api > 1) { + p = ow.get_output(OT_CLEAR, + OT_START_OBJ, + OT_INT, "jobid", je->JobId, + OT_JOBLEVEL,"level", je->JobLevel, + OT_JOBTYPE, "type", je->JobType, + OT_JOBSTATUS,"status", je->JobStatus, + OT_STRING, "status_desc",termstat, + OT_SIZE, "jobbytes", je->JobBytes, + OT_INT32, "jobfiles", je->JobFiles, + OT_STRING, "job", je->Job, + OT_STRING, "name", JobName, + OT_UTIME, "starttime", je->start_time, + OT_UTIME, "endtime", je->end_time, + OT_INT, "errors", je->Errors, + OT_END_OBJ, + OT_END); + sendit(p, strlen(p), sp); + + + } else { + bsnprintf(buf, sizeof(buf), _("%6d %-6s %8s %10s %-7s %-8s %s\n"), + je->JobId, + level, + edit_uint64_with_commas(je->JobFiles, b1), + edit_uint64_with_suffix(je->JobBytes, b2), + termstat, + dt, JobName); + } + sendit(buf, strlen(buf), sp); + } + unlock_last_jobs_list(); + if (!sp->api) { + sendit("====\n", 5, sp); + } else if (sp->api > 1) { + p = ow.end_group(false); + sendit(p, strlen(p), sp); + } +} + +#if defined(HAVE_WIN32) +int bacstat = 0; + +#ifdef FILE_DAEMON +# define BAC_COMPONENT "Client" +#else +# define BAC_COMPONENT "Storage" +#endif + +/* Return a one line status for the tray monitor */ +char *bac_status(char *buf, int buf_len) +{ + JCR *njcr; + const char *termstat = _("Bacula " BAC_COMPONENT ": Idle"); + struct s_last_job *job; + int stat = 0; /* Idle */ + + if (!last_jobs) { + goto done; + } + Dmsg0(1000, "Begin bac_status jcr loop.\n"); + foreach_jcr(njcr) { + if (njcr->JobId != 0) { + stat = JS_Running; + termstat = _("Bacula " BAC_COMPONENT ": Running"); + break; + } + } + endeach_jcr(njcr); + + if (stat != 0) { + goto done; + } + if (last_jobs->size() > 0) { + job = (struct s_last_job *)last_jobs->last(); + stat = job->JobStatus; + switch (job->JobStatus) { + case JS_Canceled: + termstat = _("Bacula " BAC_COMPONENT ": Last Job Canceled"); + break; + case JS_ErrorTerminated: + case JS_FatalError: + termstat = _("Bacula " BAC_COMPONENT ": Last Job Failed"); + break; + default: + if (job->Errors) { + termstat = _("Bacula " BAC_COMPONENT ": Last Job had Warnings"); + } + break; + } + } + Dmsg0(1000, "End bac_status jcr loop.\n"); +done: + bacstat = stat; + if (buf) { + bstrncpy(buf, termstat, buf_len); + } + return buf; +} + +#endif /* HAVE_WIN32 */ + +#endif /* ! STATUS_FUNCTIONS */ + +#endif diff --git a/src/lib/tcpd.h b/src/lib/tcpd.h new file mode 100644 index 00000000..a8a7f946 --- /dev/null +++ b/src/lib/tcpd.h @@ -0,0 +1,227 @@ + /* + * @(#) tcpd.h 1.5 96/03/19 16:22:24 + * + * Author: Wietse Venema, Eindhoven University of Technology, The Netherlands. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Structure to describe one communications endpoint. */ + +#define STRING_LENGTH 128 /* hosts, users, processes */ + +struct host_info { + char name[STRING_LENGTH]; /* access via eval_hostname(host) */ + char addr[STRING_LENGTH]; /* access via eval_hostaddr(host) */ + struct sockaddr_in *sin; /* socket address or 0 */ + struct t_unitdata *unit; /* TLI transport address or 0 */ + struct request_info *request; /* for shared information */ +}; + +/* Structure to describe what we know about a service request. */ + +struct request_info { + int fd; /* socket handle */ + char user[STRING_LENGTH]; /* access via eval_user(request) */ + char daemon[STRING_LENGTH]; /* access via eval_daemon(request) */ + char pid[10]; /* access via eval_pid(request) */ + struct host_info client[1]; /* client endpoint info */ + struct host_info server[1]; /* server endpoint info */ + void (*sink) (); /* datagram sink function or 0 */ + void (*hostname) (); /* address to printable hostname */ + void (*hostaddr) (); /* address to printable address */ + void (*cleanup) (); /* cleanup function or 0 */ + struct netconfig *config; /* netdir handle */ +}; + +/* Common string operations. Less clutter should be more readable. */ + +#define STRN_CPY(d,s,l) { strncpy((d),(s),(l)); (d)[(l)-1] = 0; } + +#define STRN_EQ(x,y,l) (strncasecmp((x),(y),(l)) == 0) +#define STRN_NE(x,y,l) (strncasecmp((x),(y),(l)) != 0) +#define STR_EQ(x,y) (strcasecmp((x),(y)) == 0) +#define STR_NE(x,y) (strcasecmp((x),(y)) != 0) + + /* + * Initially, all above strings have the empty value. Information that + * cannot be determined at runtime is set to "unknown", so that we can + * distinguish between `unavailable' and `not yet looked up'. A hostname + * that we do not believe in is set to "paranoid". + */ + +#define STRING_UNKNOWN "unknown" /* lookup failed */ +#define STRING_PARANOID "paranoid" /* hostname conflict */ + +extern char unknown[]; +extern char paranoid[]; + +#define HOSTNAME_KNOWN(s) (STR_NE((s),unknown) && STR_NE((s),paranoid)) + +#define NOT_INADDR(s) (s[strspn(s,"01234567890./")] != 0) + +/* Global functions. */ + +#if defined(TLI) || defined(PTX) || defined(TLI_SEQUENT) +extern void fromhost(); /* get/validate client host info */ +#else +#define fromhost sock_host /* no TLI support needed */ +#endif + +extern int hosts_access(struct request_info *); /* access control */ +extern void shell_cmd(); /* execute shell command */ +extern char *percent_x(); /* do % expansion */ +extern void rfc931(); /* client name from RFC 931 daemon */ +extern void clean_exit(); /* clean up and exit */ +extern void refuse(); /* clean up and exit */ +extern char *xgets(); /* fgets() on steroids */ +extern char *split_at(); /* strchr() and split */ +extern unsigned long dot_quad_addr(); /* restricted inet_addr() */ + +/* Global variables. */ + +extern int allow_severity; /* for connection logging */ +extern int deny_severity; /* for connection logging */ +extern char *hosts_allow_table; /* for verification mode redirection */ +extern char *hosts_deny_table; /* for verification mode redirection */ +extern int hosts_access_verbose; /* for verbose matching mode */ +extern int rfc931_timeout; /* user lookup timeout */ +extern int resident; /* > 0 if resident process */ + + /* + * Routines for controlled initialization and update of request structure + * attributes. Each attribute has its own key. + */ + +#ifdef __STDC__ +extern struct request_info *request_init(struct request_info *,...); +extern struct request_info *request_set(struct request_info *,...); +#else +extern struct request_info *request_init(); /* initialize request */ +extern struct request_info *request_set(); /* update request structure */ +#endif + +#define RQ_FILE 1 /* file descriptor */ +#define RQ_DAEMON 2 /* server process (argv[0]) */ +#define RQ_USER 3 /* client user name */ +#define RQ_CLIENT_NAME 4 /* client host name */ +#define RQ_CLIENT_ADDR 5 /* client host address */ +#define RQ_CLIENT_SIN 6 /* client endpoint (internal) */ +#define RQ_SERVER_NAME 7 /* server host name */ +#define RQ_SERVER_ADDR 8 /* server host address */ +#define RQ_SERVER_SIN 9 /* server endpoint (internal) */ + + /* + * Routines for delayed evaluation of request attributes. Each attribute + * type has its own access method. The trivial ones are implemented by + * macros. The other ones are wrappers around the transport-specific host + * name, address, and client user lookup methods. The request_info and + * host_info structures serve as caches for the lookup results. + */ + +extern char *eval_user(); /* client user */ +extern char *eval_hostname(); /* printable hostname */ +extern char *eval_hostaddr(); /* printable host address */ +extern char *eval_hostinfo(); /* host name or address */ +extern char *eval_client(struct request_info *); /* whatever is available */ +extern char *eval_server(); /* whatever is available */ +#define eval_daemon(r) ((r)->daemon) /* daemon process name */ +#define eval_pid(r) ((r)->pid) /* process id */ + +/* Socket-specific methods, including DNS hostname lookups. */ + +extern void sock_host(struct request_info *); +extern void sock_hostname(); /* translate address to hostname */ +extern void sock_hostaddr(); /* address to printable address */ +#define sock_methods(r) \ + { (r)->hostname = sock_hostname; (r)->hostaddr = sock_hostaddr; } + +/* The System V Transport-Level Interface (TLI) interface. */ + +#if defined(TLI) || defined(PTX) || defined(TLI_SEQUENT) +extern void tli_host(); /* look up endpoint addresses etc. */ +#endif + + /* + * Problem reporting interface. Additional file/line context is reported + * when available. The jump buffer (tcpd_buf) is not declared here, or + * everyone would have to include . + */ + +#ifdef __STDC__ +extern void tcpd_warn(char *, ...); /* report problem and proceed */ +extern void tcpd_jump(char *, ...); /* report problem and jump */ +#else +extern void tcpd_warn(); +extern void tcpd_jump(); +#endif + +struct tcpd_context { + char *file; /* current file */ + int line; /* current line */ +}; +extern struct tcpd_context tcpd_context; + + /* + * While processing access control rules, error conditions are handled by + * jumping back into the hosts_access() routine. This is cleaner than + * checking the return value of each and every silly little function. The + * (-1) returns are here because zero is already taken by longjmp(). + */ + +#define AC_PERMIT 1 /* permit access */ +#define AC_DENY (-1) /* deny_access */ +#define AC_ERROR AC_DENY /* XXX */ + + /* + * In verification mode an option function should just say what it would do, + * instead of really doing it. An option function that would not return + * should clear the dry_run flag to inform the caller of this unusual + * behavior. + */ + +extern void process_options(); /* execute options */ +extern int dry_run; /* verification flag */ + +/* Bug workarounds. */ + +#ifdef INET_ADDR_BUG /* inet_addr() returns struct */ +#define inet_addr fix_inet_addr +extern long fix_inet_addr(); +#endif + +#ifdef BROKEN_FGETS /* partial reads from sockets */ +#define fgets fix_fgets +extern char *fix_fgets(); +#endif + +#ifdef RECVFROM_BUG /* no address family info */ +#define recvfrom fix_recvfrom +extern int fix_recvfrom(); +#endif + +#ifdef GETPEERNAME_BUG /* claims success with UDP */ +#define getpeername fix_getpeername +extern int fix_getpeername(); +#endif + +#ifdef SOLARIS_24_GETHOSTBYNAME_BUG /* lists addresses as aliases */ +#define gethostbyname fix_gethostbyname +extern struct hostent *fix_gethostbyname(); +#endif + +#ifdef USE_STRSEP /* libc calls strtok() */ +#define strtok fix_strtok +extern char *fix_strtok(); +#endif + +#ifdef LIBC_CALLS_STRTOK /* libc calls strtok() */ +#define strtok my_strtok +extern char *my_strtok(); +#endif + +#ifdef __cplusplus +} +#endif diff --git a/src/lib/tls.c b/src/lib/tls.c new file mode 100644 index 00000000..604ccaee --- /dev/null +++ b/src/lib/tls.c @@ -0,0 +1,775 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * tls.c TLS support functions + * + * Author: Landon Fuller + * + * This file was contributed to the Bacula project by Landon Fuller + * and Three Rings Design, Inc. + * + * Three Rings Design, Inc. has been granted a perpetual, worldwide, + * non-exclusive, no-charge, royalty-free, irrevocable copyright + * license to reproduce, prepare derivative works of, publicly + * display, publicly perform, sublicense, and distribute the original + * work contributed by Three Rings Design, Inc. and its employees to + * the Bacula project in source or object form. + * + * If you wish to license contributions from Three Rings Design, Inc, + * under an alternate open source license please contact + * Landon Fuller . + */ + + +#include "bacula.h" +#include + + +#ifdef HAVE_TLS /* Is TLS enabled? */ + +#ifdef HAVE_OPENSSL /* How about OpenSSL? */ + +#include "openssl-compat.h" + +/* No anonymous ciphers, no <128 bit ciphers, no export ciphers, no MD5 ciphers */ +#define TLS_DEFAULT_CIPHERS "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH" + +/* TLS Context Structure */ +struct TLS_Context { + SSL_CTX *openssl; + CRYPTO_PEM_PASSWD_CB *pem_callback; + const void *pem_userdata; + bool tls_enable; + bool tls_require; +}; + +struct TLS_Connection { + SSL *openssl; + pthread_mutex_t wlock; /* make openssl_bsock_readwrite() atomic when writing */ + pthread_mutex_t rwlock; /* only one SSL_read() or SSL_write() at a time */ +}; + +/* + * OpenSSL certificate verification callback. + * OpenSSL has already performed internal certificate verification. + * We just report any errors that occured. + */ +static int openssl_verify_peer(int ok, X509_STORE_CTX *store) +{ + if (!ok) { + X509 *cert = X509_STORE_CTX_get_current_cert(store); + int depth = X509_STORE_CTX_get_error_depth(store); + int err = X509_STORE_CTX_get_error(store); + char issuer[256]; + char subject[256]; + + if (err == X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT || + err == X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN) + { + /* It seems that the error can be also + * 24 X509_V_ERR_INVALID_CA: invalid CA certificate + * But it's not very specific... + */ + Jmsg0(NULL, M_ERROR, 0, _("CA certificate is self signed. With OpenSSL 1.1, enforce basicConstraints = CA:true in the certificate creation to avoid this issue\n")); + } + X509_NAME_oneline(X509_get_issuer_name(cert), issuer, 256); + X509_NAME_oneline(X509_get_subject_name(cert), subject, 256); + + Jmsg5(NULL, M_ERROR, 0, _("Error with certificate at depth: %d, issuer = %s," + " subject = %s, ERR=%d:%s\n"), depth, issuer, + subject, err, X509_verify_cert_error_string(err)); + + } + + return ok; +} + +/* Dispatch user PEM encryption callbacks */ +static int tls_pem_callback_dispatch (char *buf, int size, int rwflag, void *userdata) +{ + TLS_CONTEXT *ctx = (TLS_CONTEXT *)userdata; + return (ctx->pem_callback(buf, size, ctx->pem_userdata)); +} + +/* + * Create a new TLS_CONTEXT instance. + * Returns: Pointer to TLS_CONTEXT instance on success + * NULL on failure; + */ +TLS_CONTEXT *new_tls_context(const char *ca_certfile, const char *ca_certdir, + const char *certfile, const char *keyfile, + CRYPTO_PEM_PASSWD_CB *pem_callback, + const void *pem_userdata, const char *dhfile, + bool verify_peer) +{ + TLS_CONTEXT *ctx; + BIO *bio; + DH *dh; + + ctx = (TLS_CONTEXT *)malloc(sizeof(TLS_CONTEXT)); + + /* Allocate our OpenSSL TLS Context */ +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) + /* Allows SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols */ + ctx->openssl = SSL_CTX_new(TLS_method()); + +#else + /* Allows most all protocols */ + ctx->openssl = SSL_CTX_new(SSLv23_method()); + +#endif + + /* Use SSL_OP_ALL to turn on all "rather harmless" workarounds that + * OpenSSL offers + */ + SSL_CTX_set_options(ctx->openssl, SSL_OP_ALL); + + /* Now disable old broken SSLv3 and SSLv2 protocols */ + SSL_CTX_set_options(ctx->openssl, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); + + if (!ctx->openssl) { + openssl_post_errors(M_FATAL, _("Error initializing SSL context")); + goto err; + } + + /* Set up pem encryption callback */ + if (pem_callback) { + ctx->pem_callback = pem_callback; + ctx->pem_userdata = pem_userdata; + } else { + ctx->pem_callback = crypto_default_pem_callback; + ctx->pem_userdata = NULL; + } + SSL_CTX_set_default_passwd_cb(ctx->openssl, tls_pem_callback_dispatch); + SSL_CTX_set_default_passwd_cb_userdata(ctx->openssl, (void *) ctx); + + /* + * Set certificate verification paths. This requires that at least one + * value be non-NULL + */ + if (ca_certfile || ca_certdir) { + if (!SSL_CTX_load_verify_locations(ctx->openssl, ca_certfile, ca_certdir)) { + openssl_post_errors(M_FATAL, _("Error loading certificate verification stores")); + goto err; + } + } else if (verify_peer) { + /* At least one CA is required for peer verification */ + Jmsg0(NULL, M_ERROR, 0, _("Either a certificate file or a directory must be" + " specified as a verification store\n")); + goto err; + } + + /* + * Load our certificate file, if available. This file may also contain a + * private key, though this usage is somewhat unusual. + */ + if (certfile) { + if (!SSL_CTX_use_certificate_chain_file(ctx->openssl, certfile)) { + openssl_post_errors(M_FATAL, _("Error loading certificate file")); + goto err; + } + } + + /* Load our private key. */ + if (keyfile) { + if (!SSL_CTX_use_PrivateKey_file(ctx->openssl, keyfile, SSL_FILETYPE_PEM)) { + openssl_post_errors(M_FATAL, _("Error loading private key")); + goto err; + } + } + + /* Load Diffie-Hellman Parameters. */ + if (dhfile) { + if (!(bio = BIO_new_file(dhfile, "r"))) { + openssl_post_errors(M_FATAL, _("Unable to open DH parameters file")); + goto err; + } + dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); + BIO_free(bio); + if (!dh) { + openssl_post_errors(M_FATAL, _("Unable to load DH parameters from specified file")); + goto err; + } + if (!SSL_CTX_set_tmp_dh(ctx->openssl, dh)) { + openssl_post_errors(M_FATAL, _("Failed to set TLS Diffie-Hellman parameters")); + DH_free(dh); + goto err; + } + /* Enable Single-Use DH for Ephemeral Keying */ + SSL_CTX_set_options(ctx->openssl, SSL_OP_SINGLE_DH_USE); + } + + if (SSL_CTX_set_cipher_list(ctx->openssl, TLS_DEFAULT_CIPHERS) != 1) { + Jmsg0(NULL, M_ERROR, 0, + _("Error setting cipher list, no valid ciphers available\n")); + goto err; + } + + /* Verify Peer Certificate */ + if (verify_peer) { + /* SSL_VERIFY_FAIL_IF_NO_PEER_CERT has no effect in client mode */ + SSL_CTX_set_verify(ctx->openssl, + SSL_VERIFY_PEER|SSL_VERIFY_FAIL_IF_NO_PEER_CERT, + openssl_verify_peer); + } + return ctx; + +err: + /* Clean up after ourselves */ + if(ctx->openssl) { + SSL_CTX_free(ctx->openssl); + } + free(ctx); + return NULL; +} + +/* + * Free TLS_CONTEXT instance + */ +void free_tls_context(TLS_CONTEXT *ctx) +{ + SSL_CTX_free(ctx->openssl); + free(ctx); +} + +bool get_tls_require(TLS_CONTEXT *ctx) +{ + return ctx->tls_require; +} + +bool get_tls_enable(TLS_CONTEXT *ctx) +{ + return ctx->tls_enable; +} + + +/* + * Verifies a list of common names against the certificate + * commonName attribute. + * Returns: true on success + * false on failure + */ +bool tls_postconnect_verify_cn(JCR *jcr, TLS_CONNECTION *tls, alist *verify_list) +{ + SSL *ssl = tls->openssl; + X509 *cert; + X509_NAME *subject; + bool auth_success = false; + char data[256]; + + /* Check if peer provided a certificate */ + if (!(cert = SSL_get_peer_certificate(ssl))) { + Qmsg0(jcr, M_ERROR, 0, _("Peer failed to present a TLS certificate\n")); + return false; + } + + if ((subject = X509_get_subject_name(cert)) != NULL) { + if (X509_NAME_get_text_by_NID(subject, NID_commonName, data, sizeof(data)) > 0) { + char *cn; + /* NULL terminate data */ + data[255] = 0; + + /* Try all the CNs in the list */ + foreach_alist(cn, verify_list) { + if (strcasecmp(data, cn) == 0) { + auth_success = true; + } + } + } + } + + X509_free(cert); + return auth_success; +} + +/* + * Verifies a peer's hostname against the subjectAltName and commonName + * attributes. + * Returns: true on success + * false on failure + */ +bool tls_postconnect_verify_host(JCR *jcr, TLS_CONNECTION *tls, const char *host) +{ + SSL *ssl = tls->openssl; + X509 *cert; + X509_NAME *subject; + bool auth_success = false; + int extensions; + int i, j; + const char *pval, *phost; + + int cnLastPos = -1; + X509_NAME_ENTRY *neCN; + ASN1_STRING *asn1CN; + + /* Check if peer provided a certificate */ + if (!(cert = SSL_get_peer_certificate(ssl))) { + Qmsg1(jcr, M_ERROR, 0, + _("Peer %s failed to present a TLS certificate\n"), host); + Dmsg1(250, _("Peer %s failed to present a TLS certificate\n"), host); + return false; + } + + /* Check subjectAltName extensions first */ + if ((extensions = X509_get_ext_count(cert)) > 0) { + for (i = 0; i < extensions; i++) { + X509_EXTENSION *ext; + const char *extname; + + ext = X509_get_ext(cert, i); + extname = OBJ_nid2sn(OBJ_obj2nid(X509_EXTENSION_get_object(ext))); + + if (strcmp(extname, "subjectAltName") == 0) { +#ifdef HAVE_OPENSSLv1 + const X509V3_EXT_METHOD *method; +#else + X509V3_EXT_METHOD *method; +#endif + STACK_OF(CONF_VALUE) *val; + CONF_VALUE *nval; + void *extstr = NULL; + const unsigned char *ext_value_data; + const ASN1_STRING *asn1_ext_val; + + /* Get x509 extension method structure */ + if (!(method = X509V3_EXT_get(ext))) { + break; + } + + asn1_ext_val = X509_EXTENSION_get_data(ext); + ext_value_data = ASN1_STRING_get0_data(asn1_ext_val); + + if (method->it) { + /* New style ASN1 */ + + /* Decode ASN1 item in data */ + extstr = ASN1_item_d2i(NULL, &ext_value_data, ASN1_STRING_length(asn1_ext_val), + ASN1_ITEM_ptr(method->it)); + } else { + /* Old style ASN1 */ + + /* Decode ASN1 item in data */ + extstr = method->d2i(NULL, &ext_value_data, ASN1_STRING_length(asn1_ext_val)); + } + + /* Iterate through to find the dNSName field(s) */ + val = method->i2v(method, extstr, NULL); + + /* dNSName shortname is "DNS" */ + Dmsg0(250, "Check DNS name\n"); + for (j = 0; j < sk_CONF_VALUE_num(val); j++) { + nval = sk_CONF_VALUE_value(val, j); + if (strcmp(nval->name, "DNS") == 0) { + if (strncasecmp(nval->value, "*.", 2) == 0) { + Dmsg0(250, "Wildcard Certificate\n"); + pval = strstr(nval->value, "."); + phost = strstr(host, "."); + if (pval && phost && (strcasecmp(pval, phost) == 0)) { + auth_success = true; + goto success; + } + } else if (strcasecmp(nval->value, host) == 0) { + auth_success = true; + goto success; + } + Dmsg2(250, "No DNS name match. Host=%s cert=%s\n", host, nval->value); + } + } + } + } + } + + /* Try verifying against the subject name */ + if (!auth_success) { + Dmsg0(250, "Check subject name name\n"); + if ((subject = X509_get_subject_name(cert)) != NULL) { + /* Loop through all CNs */ + for (;;) { + cnLastPos = X509_NAME_get_index_by_NID(subject, NID_commonName, cnLastPos); + if (cnLastPos == -1) { + break; + } + neCN = X509_NAME_get_entry(subject, cnLastPos); + asn1CN = X509_NAME_ENTRY_get_data(neCN); + if (strncasecmp((const char*)asn1CN->data, "*.", 2) == 0) { + /* wildcard certificate */ + Dmsg0(250, "Wildcard Certificate\n"); + pval = strstr((const char*)asn1CN->data, "."); + phost = strstr(host, "."); + if (pval && phost && (strcasecmp(pval, phost) == 0)) { + auth_success = true; + goto success; + } + } else if (strcasecmp((const char*)asn1CN->data, host) == 0) { + auth_success = true; + break; + } + Dmsg2(250, "No subject name match. Host=%s cert=%s\n", host, (const char*)asn1CN->data); + } + } + } + +success: + X509_free(cert); + return auth_success; +} + +/* + * Create a new TLS_CONNECTION instance. + * + * Returns: Pointer to TLS_CONNECTION instance on success + * NULL on failure; + */ +TLS_CONNECTION *new_tls_connection(TLS_CONTEXT *ctx, int fd) +{ + BIO *bio; + + /* + * Create a new BIO and assign the fd. + * The caller will remain responsible for closing the associated fd + */ + bio = BIO_new(BIO_s_socket()); + if (!bio) { + /* Not likely, but never say never */ + openssl_post_errors(M_FATAL, _("Error creating file descriptor-based BIO")); + return NULL; /* Nothing allocated, nothing to clean up */ + } + BIO_set_fd(bio, fd, BIO_NOCLOSE); + + /* Allocate our new tls connection */ + TLS_CONNECTION *tls = (TLS_CONNECTION *)malloc(sizeof(TLS_CONNECTION)); + + /* Create the SSL object and attach the socket BIO */ + if ((tls->openssl = SSL_new(ctx->openssl)) == NULL) { + /* Not likely, but never say never */ + openssl_post_errors(M_FATAL, _("Error creating new SSL object")); + goto err; + } + + SSL_set_bio(tls->openssl, bio, bio); + + /* Non-blocking partial writes */ + SSL_set_mode(tls->openssl, SSL_MODE_ENABLE_PARTIAL_WRITE|SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); + + pthread_mutex_init(&tls->wlock, NULL); + pthread_mutex_init(&tls->rwlock, NULL); + + return tls; + +err: + /* Clean up */ + BIO_free(bio); + SSL_free(tls->openssl); + free(tls); + return NULL; +} + +/* + * Free TLS_CONNECTION instance + */ +void free_tls_connection(TLS_CONNECTION *tls) +{ + pthread_mutex_destroy(&tls->rwlock); + pthread_mutex_destroy(&tls->wlock); + SSL_free(tls->openssl); + free(tls); +} + +/* Does all the manual labor for tls_bsock_accept() and tls_bsock_connect() */ +static inline bool openssl_bsock_session_start(BSOCK *bsock, bool server) +{ + TLS_CONNECTION *tls = bsock->tls; + int err; + int flags; + int stat = true; + + /* Ensure that socket is non-blocking */ + flags = bsock->set_nonblocking(); + + /* start timer */ + bsock->timer_start = watchdog_time; + bsock->clear_timed_out(); + bsock->set_killable(false); + + for (;;) { + if (server) { + err = SSL_accept(tls->openssl); + } else { + err = SSL_connect(tls->openssl); + } + + /* Handle errors */ + switch (SSL_get_error(tls->openssl, err)) { + case SSL_ERROR_NONE: + stat = true; + goto cleanup; + case SSL_ERROR_ZERO_RETURN: + /* TLS connection was cleanly shut down */ + openssl_post_errors(bsock->get_jcr(), M_FATAL, _("Connect failure")); + stat = false; + goto cleanup; + case SSL_ERROR_WANT_READ: + /* Block until we can read */ + fd_wait_data(bsock->m_fd, WAIT_READ, 10, 0); + break; + case SSL_ERROR_WANT_WRITE: + /* Block until we can write */ + fd_wait_data(bsock->m_fd, WAIT_WRITE, 10, 0); + break; + default: + /* Socket Error Occurred */ + openssl_post_errors(bsock->get_jcr(), M_FATAL, _("Connect failure")); + stat = false; + goto cleanup; + } + + if (bsock->is_timed_out()) { + goto cleanup; + } + } + +cleanup: + /* Restore saved flags */ + bsock->restore_blocking(flags); + /* Clear timer */ + bsock->timer_start = 0; + bsock->set_killable(true); + + return stat; +} + +/* + * Initiates a TLS connection with the server. + * Returns: true on success + * false on failure + */ +bool tls_bsock_connect(BSOCK *bsock) +{ + /* SSL_connect(bsock->tls) */ + return openssl_bsock_session_start(bsock, false); +} + +/* + * Listens for a TLS connection from a client. + * Returns: true on success + * false on failure + */ +bool tls_bsock_accept(BSOCK *bsock) +{ + /* SSL_accept(bsock->tls) */ + return openssl_bsock_session_start(bsock, true); +} + +/* + * Shutdown TLS_CONNECTION instance + */ +void tls_bsock_shutdown(BSOCKCORE *bsock) +{ + /* + * SSL_shutdown must be called twice to fully complete the process - + * The first time to initiate the shutdown handshake, and the second to + * receive the peer's reply. + * + * In addition, if the underlying socket is blocking, SSL_shutdown() + * will not return until the current stage of the shutdown process has + * completed or an error has occured. By setting the socket blocking + * we can avoid the ugly for()/switch()/select() loop. + */ + int err; + + btimer_t *tid; + + /* Set socket blocking for shutdown */ + bsock->set_blocking(); + + tid = start_bsock_timer(bsock, 60 * 2); + err = SSL_shutdown(bsock->tls->openssl); + stop_bsock_timer(tid); + if (err == 0) { + /* Complete shutdown */ + tid = start_bsock_timer(bsock, 60 * 2); + err = SSL_shutdown(bsock->tls->openssl); + stop_bsock_timer(tid); + } + + + switch (SSL_get_error(bsock->tls->openssl, err)) { + case SSL_ERROR_NONE: + break; + case SSL_ERROR_ZERO_RETURN: + /* TLS connection was shut down on us via a TLS protocol-level closure */ + openssl_post_errors(bsock->get_jcr(), M_ERROR, _("TLS shutdown failure.")); + break; + default: + /* Socket Error Occurred */ + openssl_post_errors(bsock->get_jcr(), M_ERROR, _("TLS shutdown failure.")); + break; + } +} + +/* Does all the manual labor for tls_bsock_readn() and tls_bsock_writen() */ +static inline int openssl_bsock_readwrite(BSOCK *bsock, char *ptr, int nbytes, bool write) +{ + TLS_CONNECTION *tls = bsock->tls; + int flags; + int nleft = 0; + int nwritten = 0; + + /* Ensure that socket is non-blocking */ + flags = bsock->set_nonblocking(); + + /* start timer */ + bsock->timer_start = watchdog_time; + bsock->clear_timed_out(); + bsock->set_killable(false); + + nleft = nbytes; + + if (write) { + pthread_mutex_lock(&tls->wlock); + } + while (nleft > 0) { + + pthread_mutex_lock(&tls->rwlock); + if (write) { + nwritten = SSL_write(tls->openssl, ptr, nleft); + } else { + nwritten = SSL_read(tls->openssl, ptr, nleft); + } + pthread_mutex_unlock(&tls->rwlock); + + /* Handle errors */ + switch (SSL_get_error(tls->openssl, nwritten)) { + case SSL_ERROR_NONE: + nleft -= nwritten; + if (nleft) { + ptr += nwritten; + } + break; + + case SSL_ERROR_SYSCALL: + if (nwritten == -1) { + if (errno == EINTR) { + continue; + } + if (errno == EAGAIN) { + bmicrosleep(0, 20000); /* try again in 20 ms */ + continue; + } + } + openssl_post_errors(bsock->get_jcr(), M_FATAL, _("TLS read/write failure.")); + goto cleanup; + + case SSL_ERROR_WANT_READ: + /* Block until we can read */ + fd_wait_data(bsock->m_fd, WAIT_READ, 10, 0); + break; + + case SSL_ERROR_WANT_WRITE: + /* Block until we can read */ + fd_wait_data(bsock->m_fd, WAIT_WRITE, 10, 0); + break; + + case SSL_ERROR_ZERO_RETURN: + /* TLS connection was cleanly shut down */ + /* Fall through wanted */ + default: + /* Socket Error Occured */ + openssl_post_errors(bsock->get_jcr(), M_FATAL, _("TLS read/write failure.")); + goto cleanup; + } + + /* Everything done? */ + if (nleft == 0) { + goto cleanup; + } + + /* Timeout/Termination, let's take what we can get */ + if (bsock->is_timed_out() || bsock->is_terminated()) { + goto cleanup; + } + } + +cleanup: + if (write) { + pthread_mutex_unlock(&tls->wlock); + } + /* Restore saved flags */ + bsock->restore_blocking(flags); + + /* Clear timer */ + bsock->timer_start = 0; + bsock->set_killable(true); + return nbytes - nleft; +} + + +int tls_bsock_writen(BSOCK *bsock, char *ptr, int32_t nbytes) +{ + /* SSL_write(bsock->tls->openssl, ptr, nbytes) */ + return openssl_bsock_readwrite(bsock, ptr, nbytes, true); +} + +int tls_bsock_readn(BSOCK *bsock, char *ptr, int32_t nbytes) +{ + /* SSL_read(bsock->tls->openssl, ptr, nbytes) */ + return openssl_bsock_readwrite(bsock, ptr, nbytes, false); +} + +/* test if 4 bytes can be read without "blocking" */ +bool tls_bsock_probe(BSOCKCORE *bsock) +{ + int32_t pktsiz; + return SSL_peek(bsock->tls->openssl, &pktsiz, sizeof(pktsiz))==sizeof(pktsiz); +} + +#else /* HAVE_OPENSSL */ +# error No TLS implementation available. +#endif /* !HAVE_OPENSSL */ + + +#else /* TLS NOT enabled, dummy routines substituted */ + + +/* Dummy routines */ +TLS_CONTEXT *new_tls_context(const char *ca_certfile, const char *ca_certdir, + const char *certfile, const char *keyfile, + CRYPTO_PEM_PASSWD_CB *pem_callback, + const void *pem_userdata, const char *dhfile, + bool verify_peer) +{ + return NULL; +} +void free_tls_context(TLS_CONTEXT *ctx) { } + +void tls_bsock_shutdown(BSOCKCORE *bsock) { } + +void free_tls_connection(TLS_CONNECTION *tls) { } + +bool get_tls_require(TLS_CONTEXT *ctx) +{ + return false; +} + +bool get_tls_enable(TLS_CONTEXT *ctx) +{ + return false; +} + +#endif /* HAVE_TLS */ diff --git a/src/lib/tls.h b/src/lib/tls.h new file mode 100644 index 00000000..45fdd192 --- /dev/null +++ b/src/lib/tls.h @@ -0,0 +1,51 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * tls.h TLS support functions + * + * Author: Landon Fuller + * + * This file was contributed to the Bacula project by Landon Fuller + * and Three Rings Design, Inc. + * + * Three Rings Design, Inc. has been granted a perpetual, worldwide, + * non-exclusive, no-charge, royalty-free, irrevocable copyright + * license to reproduce, prepare derivative works of, publicly + * display, publicly perform, sublicense, and distribute the original + * work contributed by Three Rings Design, Inc. and its employees to + * the Bacula project in source or object form. + * + * If you wish to license contributions from Three Rings Design, Inc, + * under an alternate open source license please contact + * Landon Fuller . + */ + +#ifndef __TLS_H_ +#define __TLS_H_ + +/* + * Opaque TLS Context Structure. + * New TLS Connections are manufactured from this context. + */ +typedef struct TLS_Context TLS_CONTEXT; + +/* Opaque TLS Connection Structure */ +typedef struct TLS_Connection TLS_CONNECTION; + +#endif /* __TLS_H_ */ diff --git a/src/lib/tree.c b/src/lib/tree.c new file mode 100644 index 00000000..572d9cd8 --- /dev/null +++ b/src/lib/tree.c @@ -0,0 +1,604 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Directory tree build/traverse routines + * + * Kern Sibbald, June MMII + * +*/ + + +#include "bacula.h" +#include "findlib/find.h" + +#define B_PAGE_SIZE 4096 +#define MAX_PAGES 2400 +#define MAX_BUF_SIZE (MAX_PAGES * B_PAGE_SIZE) /* approx 10MB */ + +/* Forward referenced subroutines */ +static TREE_NODE *search_and_insert_tree_node(char *fname, int type, + TREE_ROOT *root, TREE_NODE *parent); +static char *tree_alloc(TREE_ROOT *root, int size); + +/* + * NOTE !!!!! we turn off Debug messages for performance reasons. + */ +#undef Dmsg0 +#undef Dmsg1 +#undef Dmsg2 +#undef Dmsg3 +#define Dmsg0(n,f) +#define Dmsg1(n,f,a1) +#define Dmsg2(n,f,a1,a2) +#define Dmsg3(n,f,a1,a2,a3) + +/* + * This subroutine gets a big buffer. + */ +static void malloc_buf(TREE_ROOT *root, int size) +{ + struct s_mem *mem; + + mem = (struct s_mem *)malloc(size); + root->total_size += size; + root->blocks++; + mem->next = root->mem; + root->mem = mem; + mem->mem = mem->first; + mem->rem = (char *)mem + size - mem->mem; + Dmsg2(200, "malloc buf size=%d rem=%d\n", size, mem->rem); +} + + +/* + * Note, we allocate a big buffer in the tree root + * from which we allocate nodes. This runs more + * than 100 times as fast as directly using malloc() + * for each of the nodes. + */ +TREE_ROOT *new_tree(int count) +{ + TREE_ROOT *root; + uint32_t size; + + if (count < 1000) { /* minimum tree size */ + count = 1000; + } + root = (TREE_ROOT *)malloc(sizeof(TREE_ROOT)); + bmemset(root, 0, sizeof(TREE_ROOT)); + /* Assume filename + node = 40 characters average length */ + size = count * (BALIGN(sizeof(TREE_NODE)) + 40); + if (count > 1000000 || size > (MAX_BUF_SIZE / 2)) { + size = MAX_BUF_SIZE; + } + Dmsg2(400, "count=%d size=%d\n", count, size); + malloc_buf(root, size); + root->cached_path_len = -1; + root->cached_path = get_pool_memory(PM_FNAME); + root->type = TN_ROOT; + root->fname = ""; + root->can_access = 1; + HL_ENTRY* entry = NULL; + root->hardlinks.init(entry, &entry->link, 0); + return root; +} + +/* + * Create a new tree node. + */ +static TREE_NODE *new_tree_node(TREE_ROOT *root) +{ + TREE_NODE *node; + int size = sizeof(TREE_NODE); + node = (TREE_NODE *)tree_alloc(root, size); + bmemset(node, 0, size); + node->delta_seq = -1; + node->can_access = 1; + return node; +} + +/* + * This routine can be called to release the + * previously allocated tree node. + */ +static void free_tree_node(TREE_ROOT *root) +{ + int asize = BALIGN(sizeof(TREE_NODE)); + root->mem->rem += asize; + root->mem->mem -= asize; +} + +void tree_remove_node(TREE_ROOT *root, TREE_NODE *node) +{ + int asize = BALIGN(sizeof(TREE_NODE)); + node->parent->child.remove(node); + if ((root->mem->mem - asize) == (char *)node) { + free_tree_node(root); + } else { + Dmsg0(0, "Can't release tree node\n"); + } +} + +/* + * Allocate bytes for filename in tree structure. + * Keep the pointers properly aligned by allocating + * sizes that are aligned. + */ +static char *tree_alloc(TREE_ROOT *root, int size) +{ + char *buf; + int asize = BALIGN(size); + + if (root->mem->rem < asize) { + uint32_t mb_size; + if (root->total_size >= (MAX_BUF_SIZE / 2)) { + mb_size = MAX_BUF_SIZE; + } else { + mb_size = MAX_BUF_SIZE / 2; + } + malloc_buf(root, mb_size); + } + root->mem->rem -= asize; + buf = root->mem->mem; + root->mem->mem += asize; + return buf; +} + + +/* This routine frees the whole tree */ +void free_tree(TREE_ROOT *root) +{ + struct s_mem *mem, *rel; + uint32_t freed_blocks = 0; + + root->hardlinks.destroy(); + for (mem=root->mem; mem; ) { + rel = mem; + mem = mem->next; + free(rel); + freed_blocks++; + } + if (root->cached_path) { + free_pool_memory(root->cached_path); + root->cached_path = NULL; + } + Dmsg3(100, "Total size=%u blocks=%u freed_blocks=%u\n", root->total_size, root->blocks, freed_blocks); + free(root); + garbage_collect_memory(); + return; +} + +/* Add Delta part for this node */ +void tree_add_delta_part(TREE_ROOT *root, TREE_NODE *node, + JobId_t JobId, int32_t FileIndex) +{ + struct delta_list *elt = + (struct delta_list*) tree_alloc(root, sizeof(struct delta_list)); + + elt->next = node->delta_list; + elt->JobId = JobId; + elt->FileIndex = FileIndex; + node->delta_list = elt; +} + +/* + * Insert a node in the tree. This is the main subroutine + * called when building a tree. + * + */ +TREE_NODE *insert_tree_node(char *path, char *fname, int type, + TREE_ROOT *root, TREE_NODE *parent) +{ + char *p, *q; + int path_len = strlen(path); + TREE_NODE *node; + + Dmsg1(100, "insert_tree_node: %s\n", path); + /* + * If trailing slash on path, strip it + */ + if (path_len > 0) { + q = path + path_len - 1; + if (IsPathSeparator(*q)) { + *q = 0; /* strip trailing slash */ + } else { + q = NULL; /* no trailing slash */ + } + } else { + q = NULL; /* no trailing slash */ + } + /* If no filename, strip last component of path as "filename" */ + if (*fname == 0) { + p = (char *)last_path_separator(path); /* separate path and filename */ + if (p) { + fname = p + 1; /* set new filename */ + *p = '\0'; /* terminate new path */ + } + } else { + p = NULL; + } + if (*fname) { + if (!parent) { /* if no parent, we need to make one */ + Dmsg1(100, "make_tree_path for %s\n", path); + path_len = strlen(path); /* get new length */ + if (path_len == root->cached_path_len && + strcmp(path, root->cached_path) == 0) { + parent = root->cached_parent; + } else { + root->cached_path_len = path_len; + pm_strcpy(&root->cached_path, path); + parent = make_tree_path(path, root); + root->cached_parent = parent; + } + Dmsg1(100, "parent=%s\n", parent->fname); + } + } else { + fname = path; + if (!parent) { + parent = (TREE_NODE *)root; + type = TN_DIR_NLS; + } + Dmsg1(100, "No / found: %s\n", path); + } + + node = search_and_insert_tree_node(fname, 0, root, parent); + if (q) { /* if trailing slash on entry */ + *q = '/'; /* restore it */ + } + if (p) { /* if slash in path trashed */ + *p = '/'; /* restore full path */ + } + return node; +} + +/* + * Ensure that all appropriate nodes for a full path exist in + * the tree. + */ +TREE_NODE *make_tree_path(char *path, TREE_ROOT *root) +{ + TREE_NODE *parent, *node; + char *fname, *p; + int type = TN_NEWDIR; + + Dmsg1(100, "make_tree_path: %s\n", path); + if (*path == 0) { + Dmsg0(100, "make_tree_path: parent=*root*\n"); + return (TREE_NODE *)root; + } + p = (char *)last_path_separator(path); /* get last dir component of path */ + if (p) { + fname = p + 1; + *p = 0; /* terminate path */ + parent = make_tree_path(path, root); + *p = '/'; /* restore full name */ + } else { + fname = path; + parent = (TREE_NODE *)root; + type = TN_DIR_NLS; + } + node = search_and_insert_tree_node(fname, type, root, parent); + return node; +} + +static int node_compare(void *item1, void *item2) +{ + TREE_NODE *tn1 = (TREE_NODE *)item1; + TREE_NODE *tn2 = (TREE_NODE *)item2; + if (tn1->fname[0] > tn2->fname[0]) { + return 1; + } else if (tn1->fname[0] < tn2->fname[0]) { + return -1; + } + return strcmp(tn1->fname, tn2->fname); +} + +/* + * See if the fname already exists. If not insert a new node for it. + */ +static TREE_NODE *search_and_insert_tree_node(char *fname, int type, + TREE_ROOT *root, TREE_NODE *parent) +{ + TREE_NODE *node, *found_node; + node = new_tree_node(root); + node->fname = fname; + found_node = (TREE_NODE *)parent->child.insert(node, node_compare); + if (found_node != node) { /* already in list */ + free_tree_node(root); /* free node allocated above */ + found_node->inserted = false; + return found_node; + } + /* It was not found, but is now inserted */ + node->fname_len = strlen(fname); + node->fname = tree_alloc(root, node->fname_len + 1); + strcpy(node->fname, fname); + node->parent = parent; + node->type = type; + /* Maintain a linear chain of nodes */ + if (!root->first) { + root->first = node; + root->last = node; + } else { + root->last->next = node; + root->last = node; + } + node->inserted = true; /* inserted into tree */ + return node; + +} + +int tree_getpath(TREE_NODE *node, char *buf, int buf_size) +{ + if (!node) { + buf[0] = 0; + return 1; + } + tree_getpath(node->parent, buf, buf_size); + /* + * Fixup for Win32. If we have a Win32 directory and + * there is only a / in the buffer, remove it since + * win32 names don't generally start with / + */ + if (node->type == TN_DIR_NLS && IsPathSeparator(buf[0]) && buf[1] == '\0') { + buf[0] = '\0'; + } + bstrncat(buf, node->fname, buf_size); + /* Add a slash for all directories unless we are at the root, + * also add a slash to a soft linked file if it has children + * i.e. it is linked to a directory. + */ + if ((node->type != TN_FILE && !(IsPathSeparator(buf[0]) && buf[1] == '\0')) || + (node->soft_link && tree_node_has_child(node))) { + bstrncat(buf, "/", buf_size); + } + return 1; +} + +/* + * Change to specified directory + */ +TREE_NODE *tree_cwd(char *path, TREE_ROOT *root, TREE_NODE *node) +{ + if (path[0] == '.' && path[1] == '\0') { + return node; + } + /* Handle relative path */ + if (path[0] == '.' && path[1] == '.' && (IsPathSeparator(path[2]) || path[2] == '\0')) { + TREE_NODE *parent = node->parent ? node->parent : node; + if (path[2] == 0) { + return parent; + } else { + return tree_cwd(path+3, root, parent); + } + } + if (IsPathSeparator(path[0])) { + Dmsg0(100, "Doing absolute lookup.\n"); + return tree_relcwd(path+1, root, (TREE_NODE *)root); + } + Dmsg0(100, "Doing relative lookup.\n"); + return tree_relcwd(path, root, node); +} + + +/* + * Do a relative cwd -- i.e. relative to current node rather than root node + */ +TREE_NODE *tree_relcwd(char *path, TREE_ROOT *root, TREE_NODE *node) +{ + char *p; + int len; + TREE_NODE *cd; + char save_char; + int match; + + if (*path == 0) { + return node; + } + /* Check the current segment only */ + if ((p = first_path_separator(path)) != NULL) { + len = p - path; + } else { + len = strlen(path); + } + Dmsg2(100, "tree_relcwd: len=%d path=%s\n", len, path); + foreach_child(cd, node) { + Dmsg1(100, "tree_relcwd: test cd=%s\n", cd->fname); + if (cd->fname[0] == path[0] && len == (int)strlen(cd->fname) + && strncmp(cd->fname, path, len) == 0) { + break; + } + /* fnmatch has no len in call so we truncate the string */ + save_char = path[len]; + path[len] = 0; + match = fnmatch(path, cd->fname, 0) == 0; + path[len] = save_char; + if (match) { + break; + } + } + if (!cd || (cd->type == TN_FILE && !tree_node_has_child(cd))) { + return NULL; + } + if (!cd->can_access) { /* Will display permission denied */ + return cd; + } + if (!p) { + Dmsg0(100, "tree_relcwd: no more to lookup. found.\n"); + return cd; + } + Dmsg2(100, "recurse tree_relcwd with path=%s, cd=%s\n", p+1, cd->fname); + /* Check the next segment if any */ + return tree_relcwd(p+1, root, cd); +} + + + +#ifdef BUILD_TEST_PROGRAM + +void FillDirectoryTree(char *path, TREE_ROOT *root, TREE_NODE *parent); + +static uint32_t FileIndex = 0; +/* + * Simple test program for tree routines + */ +int main(int argc, char *argv[]) +{ + TREE_ROOT *root; + TREE_NODE *node; + char buf[MAXPATHLEN]; + + root = new_tree(); + root->fname = tree_alloc(root, 1); + *root->fname = 0; + root->fname_len = 0; + + FillDirectoryTree("/home/kern/bacula/k", root, NULL); + + for (node = first_tree_node(root); node; node=next_tree_node(node)) { + tree_getpath(node, buf, sizeof(buf)); + Dmsg2(100, "%d: %s\n", node->FileIndex, buf); + } + + node = (TREE_NODE *)root; + Pmsg0(000, "doing cd /home/kern/bacula/k/techlogs\n"); + node = tree_cwd("/home/kern/bacula/k/techlogs", root, node); + if (node) { + tree_getpath(node, buf, sizeof(buf)); + Dmsg2(100, "findex=%d: cwd=%s\n", node->FileIndex, buf); + } + + Pmsg0(000, "doing cd /home/kern/bacula/k/src/testprogs\n"); + node = tree_cwd("/home/kern/bacula/k/src/testprogs", root, node); + if (node) { + tree_getpath(node, buf, sizeof(buf)); + Dmsg2(100, "findex=%d: cwd=%s\n", node->FileIndex, buf); + } else { + Dmsg0(100, "testprogs not found.\n"); + } + + free_tree((TREE_NODE *)root); + + return 0; +} + +void FillDirectoryTree(char *path, TREE_ROOT *root, TREE_NODE *parent) +{ + TREE_NODE *newparent = NULL; + TREE_NODE *node; + struct stat statbuf; + DIR *dp; + POOL_MEM dname(PM_FNAME); + char pathbuf[MAXPATHLEN]; + char file[MAXPATHLEN]; + int type; + int i; + + Dmsg1(100, "FillDirectoryTree: %s\n", path); + dp = opendir(path); + if (!dp) { + return; + } + while (readdir(dp, dname.addr()) != 0) { + if (strcmp(dname.c_str(), ".") == 0 || strcmp(dname.c_str(), "..") == 0) { + continue; + } + bstrncpy(file, dname.c_str(), sizeof(file)); + snprintf(pathbuf, MAXPATHLEN-1, "%s/%s", path, file); + if (lstat(pathbuf, &statbuf) < 0) { + berrno be; + printf("lstat() failed. ERR=%s\n", be.bstrerror(errno)); + continue; + } +// printf("got file=%s, pathbuf=%s\n", file, pathbuf); + type = TN_FILE; + if (S_ISLNK(statbuf.st_mode)) + type = TN_FILE; /* link */ + else if (S_ISREG(statbuf.st_mode)) + type = TN_FILE; + else if (S_ISDIR(statbuf.st_mode)) { + type = TN_DIR; + } else if (S_ISCHR(statbuf.st_mode)) + type = TN_FILE; /* char dev */ + else if (S_ISBLK(statbuf.st_mode)) + type = TN_FILE; /* block dev */ + else if (S_ISFIFO(statbuf.st_mode)) + type = TN_FILE; /* fifo */ + else if (S_ISSOCK(statbuf.st_mode)) + type = TN_FILE; /* sock */ + else { + type = TN_FILE; + printf("Unknown file type: 0x%x\n", statbuf.st_mode); + } + + Dmsg2(100, "Doing: %d %s\n", type, pathbuf); + node = new_tree_node(root); + node->FileIndex = ++FileIndex; + parent = insert_tree_node(pathbuf, node, root, parent); + if (S_ISDIR(statbuf.st_mode) && !S_ISLNK(statbuf.st_mode)) { + Dmsg2(100, "calling fill. pathbuf=%s, file=%s\n", pathbuf, file); + FillDirectoryTree(pathbuf, root, node); + } + } + closedir(dp); +} + +#ifndef MAXPATHLEN +#define MAXPATHLEN 2000 +#endif + +void print_tree(char *path, TREE_NODE *tree) +{ + char buf[MAXPATHLEN]; + char *termchr; + + if (!tree) { + return; + } + switch (tree->type) { + case TN_DIR_NLS: + case TN_DIR: + case TN_NEWDIR: + termchr = "/"; + break; + case TN_ROOT: + case TN_FILE: + default: + termchr = ""; + break; + } + Pmsg3(-1, "%s/%s%s\n", path, tree->fname, termchr); + switch (tree->type) { + case TN_FILE: + case TN_NEWDIR: + case TN_DIR: + case TN_DIR_NLS: + bsnprintf(buf, sizeof(buf), "%s/%s", path, tree->fname); + print_tree(buf, first_child(tree)); + break; + case TN_ROOT: + print_tree(path, first_child(tree)); + break; + default: + Pmsg1(000, "Unknown node type %d\n", tree->type); + } + print_tree(path, tree->sibling_); + return; +} + +#endif diff --git a/src/lib/tree.h b/src/lib/tree.h new file mode 100644 index 00000000..7a2467cc --- /dev/null +++ b/src/lib/tree.h @@ -0,0 +1,149 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Directory tree build/traverse routines + * + * Kern Sibbald, June MMII + * +*/ + +#include "htable.h" + +struct s_mem { + struct s_mem *next; /* next buffer */ + int rem; /* remaining bytes */ + char *mem; /* memory pointer */ + char first[1]; /* first byte */ +}; + +#define USE_DLIST + +#define foreach_child(var, list) \ + for((var)=NULL; (*((TREE_NODE **)&(var))=(TREE_NODE*)(list->child.next(var))); ) + +#define tree_node_has_child(node) \ + ((node)->child.size() > 0) + +#define first_child(node) \ + ((TREE_NODE *)(node->child.first()) + +struct delta_list { + struct delta_list *next; + JobId_t JobId; + int32_t FileIndex; +}; + +/* + * Keep this node as small as possible because + * there is one for each file. + */ +struct s_tree_node { + /* KEEP sibling as the first member to avoid having to + * do initialization of child */ + rblink sibling; + rblist child; + char *fname; /* file name */ + int32_t FileIndex; /* file index */ + uint32_t JobId; /* JobId */ + int32_t delta_seq; /* current delta sequence */ + uint16_t fname_len; /* filename length */ + int type: 8; /* node type */ + unsigned int extract: 1; /* extract item */ + unsigned int extract_dir: 1; /* extract dir entry only */ + unsigned int hard_link: 1; /* set if have hard link */ + unsigned int soft_link: 1; /* set if is soft link */ + unsigned int inserted: 1; /* set when node newly inserted */ + unsigned int loaded: 1; /* set when the dir is in the tree */ + unsigned int can_access: 1; /* Can access to this node */ + struct s_tree_node *parent; + struct s_tree_node *next; /* next hash of FileIndex */ + struct delta_list *delta_list; /* delta parts for this node */ +}; +typedef struct s_tree_node TREE_NODE; + +struct s_tree_root { + /* KEEP sibling as the first member to avoid having to + * do initialization of child */ + rblink sibling; + rblist child; + const char *fname; /* file name */ + int32_t FileIndex; /* file index */ + uint32_t JobId; /* JobId */ + int32_t delta_seq; /* current delta sequence */ + uint16_t fname_len; /* filename length */ + unsigned int type: 8; /* node type */ + unsigned int extract: 1; /* extract item */ + unsigned int extract_dir: 1; /* extract dir entry only */ + unsigned int hard_link: 1; /* set if have hard link */ + unsigned int soft_link: 1; /* set if is soft link */ + unsigned int inserted: 1; /* set when newly inserted */ + unsigned int loaded: 1; /* set when the dir is in the tree */ + unsigned int can_access: 1; /* Can access to this node */ + struct s_tree_node *parent; + struct s_tree_node *next; /* next hash of FileIndex */ + struct delta_list *delta_list; /* delta parts for this node */ + + /* The above ^^^ must be identical to a TREE_NODE structure */ + struct s_tree_node *first; /* first entry in the tree */ + struct s_tree_node *last; /* last entry in tree */ + struct s_mem *mem; /* tree memory */ + uint32_t total_size; /* total bytes allocated */ + uint32_t blocks; /* total mallocs */ + int cached_path_len; /* length of cached path */ + char *cached_path; /* cached current path */ + TREE_NODE *cached_parent; /* cached parent for above path */ + htable hardlinks; /* references to first occurrence of hardlinks */ +}; +typedef struct s_tree_root TREE_ROOT; + +/* hardlink hashtable entry */ +struct s_hl_entry { + uint64_t key; + hlink link; + TREE_NODE *node; +}; +typedef struct s_hl_entry HL_ENTRY; + +/* type values */ +#define TN_ROOT 1 /* root node */ +#define TN_NEWDIR 2 /* created directory to fill path */ +#define TN_DIR 3 /* directory entry */ +#define TN_DIR_NLS 4 /* directory -- no leading slash -- win32 */ +#define TN_FILE 5 /* file entry */ + +/* External interface */ +TREE_ROOT *new_tree(int count); +TREE_NODE *insert_tree_node(char *path, char *fname, int type, + TREE_ROOT *root, TREE_NODE *parent); +TREE_NODE *make_tree_path(char *path, TREE_ROOT *root); +TREE_NODE *tree_cwd(char *path, TREE_ROOT *root, TREE_NODE *node); +TREE_NODE *tree_relcwd(char *path, TREE_ROOT *root, TREE_NODE *node); +void tree_add_delta_part(TREE_ROOT *root, TREE_NODE *node, + JobId_t JobId, int32_t FileIndex); +void free_tree(TREE_ROOT *root); +int tree_getpath(TREE_NODE *node, char *buf, int buf_size); +void tree_remove_node(TREE_ROOT *root, TREE_NODE *node); + +/* + * Use the following for traversing the whole tree. It will be + * traversed in the order the entries were inserted into the + * tree. + */ +#define first_tree_node(r) (r)->first +#define next_tree_node(n) (n)->next diff --git a/src/lib/unittests.c b/src/lib/unittests.c new file mode 100644 index 00000000..be9d04f8 --- /dev/null +++ b/src/lib/unittests.c @@ -0,0 +1,127 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Support routines for Unit Tests. + */ + +/* + * This is an example how to use unittests in your code: + +int main() +{ + int rc; + + prolog("app_test"); + + ( ... your test application goes here ...) + + rc = report(); + epilog(); + return rc; +} + + * and a C++ approach for any C++ geeks: + +int main() +{ + Unittests tests("app_test"); + + ( ... your test application goes here ...) + + return report(); +} + + */ + +#include +#include "bacula.h" + +static int err=0; +static int nb=0; +static bool lmgrinit = false; + +/* + * Test success if value is not zero. + */ +void _ok(const char *file, int l, const char *op, int value, const char *label) +{ + nb++; + if (!value) { + err++; + Pmsg4(-1, "ERR %.80s %s:%i on %s\n", label, file, l, op); + } else { + Pmsg1(-1, "OK %.80s\n", label); + } +} + +/* + * Test success if value is zero. + */ +void _nok(const char *file, int l, const char *op, int value, const char *label) +{ + nb++; + if (value) { + err++; + Pmsg4(-1, "ERR %.80s %s:%i on !%s\n", label, file, l, op); + } else { + Pmsg1(-1, "OK %.80s\n", label); + } +} + +/* + * Short report of successful/all tests. + */ +int report() +{ + Pmsg0(-1, "==== Report ====\n"); + Pmsg2(-1, "Result %i/%i OK\n", nb - err, nb); + return err > 0; +} + +void terminate(int sig) {}; + +/* + * Initializes the application env, including lockmanager. + */ +void prolog(const char *name, bool lmgr=false, bool motd=true) +{ + if (motd) + Pmsg1(-1, "==== Starting %s ... ====\n", name); + my_name_is(0, NULL, name); + init_signals(terminate); + if (lmgr){ + lmgr_init_thread(); /* initialize the lockmanager stack */ + lmgrinit = true; + } +}; + +/* + * Finish the application, shows report about memory leakage and terminates the lockmanager. + */ +void epilog() +{ + Pmsg0(-1, "\n"); + stop_watchdog(); + if (lmgrinit){ + lmgr_cleanup_main(); + } + close_memory_pool(); + sm_dump(false); + Pmsg0(-1, "==== Finish ====\n"); +}; diff --git a/src/lib/unittests.h b/src/lib/unittests.h new file mode 100644 index 00000000..453822ce --- /dev/null +++ b/src/lib/unittests.h @@ -0,0 +1,62 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Support routines for Unit Tests. + */ + +#ifndef _UNITTESTS_H_ +#define _UNITTESTS_H_ + +// Test success if value x is not zero +#define ok(x, label) _ok(__FILE__, __LINE__, #x, (x), label) +// Test success if value x is zero +#define nok(x, label) _nok(__FILE__, __LINE__, #x, (x), label) + +/* TODO: log() ported from BEE it should be updated. */ +#ifdef RTEST_LOG_THREADID +#define log(format, ...) do { \ + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); \ + printf("%p: " format "\n", (void *)pthread_self(), ##__VA_ARGS__ ); \ + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); \ + } while (0) +#else +#define log(format, ...) do { \ + printf("\n------------------------------------------\n" format "\n------------------------------------------\n", ##__VA_ARGS__ ); \ + } while (0) +#endif + +void _ok(const char *file, int l, const char *op, int value, const char *label); +void _nok(const char *file, int l, const char *op, int value, const char *label); +int report(); +void terminate(int sig); +void prolog(const char *name, bool lmgr=false, bool motd=true); +void epilog(); + +/* The class based approach for C++ geeks */ +class Unittests +{ +public: + Unittests(const char *name, bool lmgr=false, bool motd=true) + { + prolog(name, lmgr, motd); + }; + ~Unittests() { epilog(); }; +}; + +#endif /* _UNITTESTS_H_ */ \ No newline at end of file diff --git a/src/lib/util.c b/src/lib/util.c new file mode 100644 index 00000000..78c78efa --- /dev/null +++ b/src/lib/util.c @@ -0,0 +1,1036 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * util.c miscellaneous utility subroutines for Bacula + * + * Kern Sibbald, MM + */ + +#include "bacula.h" +#include "jcr.h" +#include "findlib/find.h" + +/* + * Various Bacula Utility subroutines + * + */ + +bool is_null(const void *ptr) +{ + return ptr == NULL; +} + +/* Return true of buffer has all zero bytes */ +bool is_buf_zero(const char *buf, int len) +{ + uint64_t *ip; + const char *p; + int i, len64, done, rem; + + if (buf[0] != 0) { + return false; + } + ip = (uint64_t *)buf; + /* Optimize by checking uint64_t for zero */ + len64 = len / sizeof(uint64_t); + for (i=0; i < len64; i++) { + if (ip[i] != 0) { + return false; + } + } + done = len64 * sizeof(uint64_t); /* bytes already checked */ + p = buf + done; + rem = len - done; + for (i = 0; i < rem; i++) { + if (p[i] != 0) { + return false; + } + } + return true; +} + +/* + * Subroutine that cannot be suppressed by GCC 6.0 + */ +void bmemzero(void *buf, size_t size) +{ + memset(buf, 0, size); + return; +} + + +/* Convert a string in place to lower case */ +void lcase(char *str) +{ + while (*str) { + if (B_ISUPPER(*str)) { + *str = tolower((int)(*str)); + } + str++; + } +} + +/* Convert spaces to non-space character. + * This makes scanf of fields containing spaces easier. + */ +void +bash_spaces(char *str) +{ + while (*str) { + if (*str == ' ') + *str = 0x1; + str++; + } +} + +/* Convert spaces to non-space character. + * This makes scanf of fields containing spaces easier. + */ +void +bash_spaces(POOL_MEM &pm) +{ + char *str = pm.c_str(); + while (*str) { + if (*str == ' ') + *str = 0x1; + str++; + } +} + + +/* Convert non-space characters (0x1) back into spaces */ +void +unbash_spaces(char *str) +{ + while (*str) { + if (*str == 0x1) + *str = ' '; + str++; + } +} + +/* Convert non-space characters (0x1) back into spaces */ +void +unbash_spaces(POOL_MEM &pm) +{ + char *str = pm.c_str(); + while (*str) { + if (*str == 0x1) + *str = ' '; + str++; + } +} + +char *encode_time(utime_t utime, char *buf) +{ + struct tm tm; + int n = 0; + time_t time = utime; + +#if defined(HAVE_WIN32) + /* + * Avoid a seg fault in Microsoft's CRT localtime_r(), + * which incorrectly references a NULL returned from gmtime() if + * time is negative before or after the timezone adjustment. + */ + struct tm *gtm; + + if ((gtm = gmtime(&time)) == NULL) { + return buf; + } + + if (gtm->tm_year == 1970 && gtm->tm_mon == 1 && gtm->tm_mday < 3) { + return buf; + } +#endif + + if (localtime_r(&time, &tm)) { + n = sprintf(buf, "%04d-%02d-%02d %02d:%02d:%02d", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec); + } + return buf+n; +} + + + +static char hexatable[]="0123456789abcdef"; + +/* + * do an hexadump of data[0:len] into buf[0:capacity] + * a space is inserted between every 4 bytes + * usage: + * char buf[10]; + * Dmsg2("msglen=%d msg=%s", fd->msglen, hexdump(fd->msg, fd->msglen, buf, sizeof(buf)); + * ==> + * msglen=36 msg=12345678 12345678 + */ +char *hexdump(const char *data, int len, char *buf, int capacity, bool add_spaces) +{ + char *b=buf; + int i=0; + while (i2) { + if (add_spaces && i>0 && i%4==0 ) { + *(b++)=' '; + capacity--; + } + if (capacity>2) { + *(b++)=hexatable[(data[i]&0xF0)>>4]; + *(b++)=hexatable[data[i++]&0x0F]; + } + capacity-=2; + } + *b='\0'; + return buf; +} + +/* + * do an ASCII dump of data[0:len] into buf[0:capacity] + * non printable chars are replaced by hexa "\xx" + * usage: + * char buf[10]; + * Dmsg2("msglen=%d msg=%s", fd->msglen, asciidump(fd->msg, fd->msglen, buf, sizeof(buf)); + * ==> + * msglen=5 msg=abcd\10 + */ +char *asciidump(const char *data, int len, char *buf, int capacity) +{ + char *b=buf; + const unsigned char *p=(const unsigned char *)data; + if (!data) { + strncpy(buf, "", capacity); + return buf; + } + while (len>0 && capacity>1) { + if (isprint(*p)) { + *(b++)=*(p++); + capacity--; + } else { + if (capacity>3) { + *(b++)='\\'; + *(b++)=hexatable[((*p)&0xF0)>>4]; + *(b++)=hexatable[(*(p++))&0x0F]; + } + capacity-=3; + } + len--; + } + *b='\0'; + return buf; +} + +char *smartdump(const char *data, int len, char *buf, int capacity, bool *is_ascii) +{ + char *b=buf; + int l=len; + int c=capacity; + const unsigned char *p=(const unsigned char *)data; + if (!data) { + strncpy(buf, "", capacity); + return buf; + } + if (is_ascii != NULL) { + *is_ascii = false; + } + while (l>0 && c>1) { + if (isprint(*p)) { + *(b++)=*(p++); + } else if (isspace(*p) || *p=='\0') { + *(b++)=' '; + p++; + } else { + return hexdump(data, len, buf, capacity); + } + c--; + l--; + } + *b='\0'; + if (is_ascii != NULL) { + *is_ascii = true; + } + return buf; +} + +/* + * check if x is a power two + */ +int is_power_of_two(uint64_t x) +{ + while ( x%2 == 0 && x > 1) { + x /= 2; + } + return (x == 1); +} + +/* + * Convert a JobStatus code into a human readable form + */ +void jobstatus_to_ascii(int JobStatus, char *msg, int maxlen) +{ + const char *jobstat; + char buf[100]; + + switch (JobStatus) { + case JS_Created: + jobstat = _("Created"); + break; + case JS_Running: + jobstat = _("Running"); + break; + case JS_Blocked: + jobstat = _("Blocked"); + break; + case JS_Terminated: + jobstat = _("OK"); + break; + case JS_Incomplete: + jobstat = _("Incomplete job"); + break; + case JS_FatalError: + case JS_ErrorTerminated: + jobstat = _("Error"); + break; + case JS_Error: + jobstat = _("Non-fatal error"); + break; + case JS_Warnings: + jobstat = _("OK -- with warnings"); + break; + case JS_Canceled: + jobstat = _("Canceled"); + break; + case JS_Differences: + jobstat = _("Verify differences"); + break; + case JS_WaitFD: + jobstat = _("Waiting on FD"); + break; + case JS_WaitSD: + jobstat = _("Wait on SD"); + break; + case JS_WaitMedia: + jobstat = _("Wait for new Volume"); + break; + case JS_WaitMount: + jobstat = _("Waiting for mount"); + break; + case JS_WaitStoreRes: + jobstat = _("Waiting for Storage resource"); + break; + case JS_WaitJobRes: + jobstat = _("Waiting for Job resource"); + break; + case JS_WaitClientRes: + jobstat = _("Waiting for Client resource"); + break; + case JS_WaitMaxJobs: + jobstat = _("Waiting on Max Jobs"); + break; + case JS_WaitStartTime: + jobstat = _("Waiting for Start Time"); + break; + case JS_WaitPriority: + jobstat = _("Waiting on Priority"); + break; + case JS_DataCommitting: + jobstat = _("SD committing Data"); + break; + case JS_DataDespooling: + jobstat = _("SD despooling Data"); + break; + case JS_AttrDespooling: + jobstat = _("SD despooling Attributes"); + break; + case JS_AttrInserting: + jobstat = _("Dir inserting Attributes"); + break; + + default: + if (JobStatus == 0) { + buf[0] = 0; + } else { + bsnprintf(buf, sizeof(buf), _("Unknown Job termination status=%d"), JobStatus); + } + jobstat = buf; + break; + } + bstrncpy(msg, jobstat, maxlen); +} + +/* + * Convert a JobStatus code into a human readable form - gui version + */ +void jobstatus_to_ascii_gui(int JobStatus, char *msg, int maxlen) +{ + const char *cnv = NULL; + switch (JobStatus) { + case JS_Terminated: + cnv = _("Completed successfully"); + break; + case JS_Warnings: + cnv = _("Completed with warnings"); + break; + case JS_ErrorTerminated: + cnv = _("Terminated with errors"); + break; + case JS_FatalError: + cnv = _("Fatal error"); + break; + case JS_Created: + cnv = _("Created, not yet running"); + break; + case JS_Canceled: + cnv = _("Canceled by user"); + break; + case JS_Differences: + cnv = _("Verify found differences"); + break; + case JS_WaitFD: + cnv = _("Waiting for File daemon"); + break; + case JS_WaitSD: + cnv = _("Waiting for Storage daemon"); + break; + case JS_WaitPriority: + cnv = _("Waiting for higher priority jobs"); + break; + case JS_AttrInserting: + cnv = _("Batch inserting file records"); + break; + }; + + if (cnv) { + bstrncpy(msg, cnv, maxlen); + } else { + jobstatus_to_ascii(JobStatus, msg, maxlen); + } +} + +/* + * Convert Job Termination Status into a string + */ +const char *job_status_to_str(int status, int errors) +{ + const char *str; + + switch (status) { + case JS_Terminated: + if (errors > 0) { + str = _("OK -- with warnings"); + } else { + str = _("OK"); + } + break; + case JS_Warnings: + str = _("OK -- with warnings"); + break; + case JS_ErrorTerminated: + case JS_Error: + str = _("Error"); + break; + case JS_FatalError: + str = _("Fatal Error"); + break; + case JS_Canceled: + str = _("Canceled"); + break; + case JS_Differences: + str = _("Differences"); + break; + case JS_Created: + str = _("Created"); + break; + case JS_Incomplete: + str = _("Incomplete"); + break; + default: + str = _("Unknown term code"); + break; + } + return str; +} + + +/* + * Convert Job Type into a string + */ +const char *job_type_to_str(int type) +{ + const char *str = NULL; + + switch (type) { + case JT_BACKUP: + str = _("Backup"); + break; + case JT_MIGRATED_JOB: + str = _("Migrated Job"); + break; + case JT_VERIFY: + str = _("Verify"); + break; + case JT_RESTORE: + str = _("Restore"); + break; + case JT_CONSOLE: + str = _("Console"); + break; + case JT_SYSTEM: + str = _("System or Console"); + break; + case JT_ADMIN: + str = _("Admin"); + break; + case JT_ARCHIVE: + str = _("Archive"); + break; + case JT_JOB_COPY: + str = _("Job Copy"); + break; + case JT_COPY: + str = _("Copy"); + break; + case JT_MIGRATE: + str = _("Migrate"); + break; + case JT_SCAN: + str = _("Scan"); + break; + } + if (!str) { + str = _("Unknown Type"); + } + return str; +} + +/* Convert ActionOnPurge to string (Truncate, Erase, Destroy) + */ +char *action_on_purge_to_string(int aop, POOL_MEM &ret) +{ + if (aop & ON_PURGE_TRUNCATE) { + pm_strcpy(ret, _("Truncate")); + } + if (!aop) { + pm_strcpy(ret, _("None")); + } + return ret.c_str(); +} + +/* + * Convert Job Level into a string + */ +const char *job_level_to_str(int level) +{ + const char *str; + + switch (level) { + case L_BASE: + str = _("Base"); + break; + case L_FULL: + str = _("Full"); + break; + case L_INCREMENTAL: + str = _("Incremental"); + break; + case L_DIFFERENTIAL: + str = _("Differential"); + break; + case L_SINCE: + str = _("Since"); + break; + case L_VERIFY_CATALOG: + str = _("Verify Catalog"); + break; + case L_VERIFY_INIT: + str = _("Verify Init Catalog"); + break; + case L_VERIFY_VOLUME_TO_CATALOG: + str = _("Verify Volume to Catalog"); + break; + case L_VERIFY_DISK_TO_CATALOG: + str = _("Verify Disk to Catalog"); + break; + case L_VERIFY_DATA: + str = _("Verify Data"); + break; + case L_VIRTUAL_FULL: + str = _("Virtual Full"); + break; + case L_NONE: + str = " "; + break; + default: + str = _("Unknown Job Level"); + break; + } + return str; +} + +const char *volume_status_to_str(const char *status) +{ + int pos; + const char *vs[] = { + NT_("Append"), _("Append"), + NT_("Archive"), _("Archive"), + NT_("Disabled"), _("Disabled"), + NT_("Full"), _("Full"), + NT_("Used"), _("Used"), + NT_("Cleaning"), _("Cleaning"), + NT_("Purged"), _("Purged"), + NT_("Recycle"), _("Recycle"), + NT_("Read-Only"), _("Read-Only"), + NT_("Error"), _("Error"), + NULL, NULL}; + + if (status) { + for (pos = 0 ; vs[pos] ; pos += 2) { + if ( !strcmp(vs[pos],status) ) { + return vs[pos+1]; + } + } + } + + return _("Invalid volume status"); +} + + +/*********************************************************************** + * Encode the mode bits into a 10 character string like LS does + ***********************************************************************/ + +char *encode_mode(mode_t mode, char *buf) +{ + char *cp = buf; + + *cp++ = S_ISDIR(mode) ? 'd' : S_ISBLK(mode) ? 'b' : S_ISCHR(mode) ? 'c' : + S_ISLNK(mode) ? 'l' : S_ISFIFO(mode) ? 'f' : S_ISSOCK(mode) ? 's' : '-'; + *cp++ = mode & S_IRUSR ? 'r' : '-'; + *cp++ = mode & S_IWUSR ? 'w' : '-'; + *cp++ = (mode & S_ISUID + ? (mode & S_IXUSR ? 's' : 'S') + : (mode & S_IXUSR ? 'x' : '-')); + *cp++ = mode & S_IRGRP ? 'r' : '-'; + *cp++ = mode & S_IWGRP ? 'w' : '-'; + *cp++ = (mode & S_ISGID + ? (mode & S_IXGRP ? 's' : 'S') + : (mode & S_IXGRP ? 'x' : '-')); + *cp++ = mode & S_IROTH ? 'r' : '-'; + *cp++ = mode & S_IWOTH ? 'w' : '-'; + *cp++ = (mode & S_ISVTX + ? (mode & S_IXOTH ? 't' : 'T') + : (mode & S_IXOTH ? 'x' : '-')); + *cp = '\0'; + return cp; +} + +#if defined(HAVE_WIN32) +int do_shell_expansion(char *name, int name_len) +{ + char *src = bstrdup(name); + + ExpandEnvironmentStrings(src, name, name_len); + + free(src); + + return 1; +} +#else +int do_shell_expansion(char *name, int name_len) +{ + static char meta[] = "~\\$[]*?`'<>\""; + bool found = false; + int len, i, stat; + POOLMEM *cmd; + BPIPE *bpipe; + char line[MAXSTRING]; + const char *shellcmd; + + /* Check if any meta characters are present */ + len = strlen(meta); + for (i = 0; i < len; i++) { + if (strchr(name, meta[i])) { + found = true; + break; + } + } + if (found) { + cmd = get_pool_memory(PM_FNAME); + /* look for shell */ + if ((shellcmd = getenv("SHELL")) == NULL) { + shellcmd = "/bin/sh"; + } + pm_strcpy(&cmd, shellcmd); + pm_strcat(&cmd, " -c \"echo "); + pm_strcat(&cmd, name); + pm_strcat(&cmd, "\""); + Dmsg1(400, "Send: %s\n", cmd); + if ((bpipe = open_bpipe(cmd, 0, "r"))) { + *line = 0; + fgets(line, sizeof(line), bpipe->rfd); + strip_trailing_junk(line); + stat = close_bpipe(bpipe); + Dmsg2(400, "stat=%d got: %s\n", stat, line); + } else { + stat = 1; /* error */ + } + free_pool_memory(cmd); + if (stat == 0) { + bstrncpy(name, line, name_len); + } + } + return 1; +} +#endif + + +/* MAKESESSIONKEY -- Generate session key with optional start + key. If mode is TRUE, the key will be + translated to a string, otherwise it is + returned as 16 binary bytes. + + from SpeakFreely by John Walker */ + +void make_session_key(char *key, char *seed, int mode) +{ + int j, k; + struct MD5Context md5c; + unsigned char md5key[16], md5key1[16]; + char s[1024]; + +#define ss sizeof(s) + + s[0] = 0; + if (seed != NULL) { + bstrncat(s, seed, sizeof(s)); + } + + /* The following creates a seed for the session key generator + based on a collection of volatile and environment-specific + information unlikely to be vulnerable (as a whole) to an + exhaustive search attack. If one of these items isn't + available on your machine, replace it with something + equivalent or, if you like, just delete it. */ + +#if defined(HAVE_WIN32) + { + LARGE_INTEGER li; + DWORD length; + FILETIME ft; + + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)GetCurrentProcessId()); + (void)getcwd(s + strlen(s), 256); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)GetTickCount()); + QueryPerformanceCounter(&li); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)li.LowPart); + GetSystemTimeAsFileTime(&ft); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)ft.dwLowDateTime); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)ft.dwHighDateTime); + length = 256; + GetComputerName(s + strlen(s), &length); + length = 256; + GetUserName(s + strlen(s), &length); + } +#else + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)getpid()); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)getppid()); + (void)getcwd(s + strlen(s), 256); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)clock()); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)time(NULL)); +#if defined(Solaris) + sysinfo(SI_HW_SERIAL,s + strlen(s), 12); +#endif +#if defined(HAVE_GETHOSTID) + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t) gethostid()); +#endif + gethostname(s + strlen(s), 256); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)getuid()); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)getgid()); +#endif + MD5Init(&md5c); + MD5Update(&md5c, (uint8_t *)s, strlen(s)); + MD5Final(md5key, &md5c); + bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)((time(NULL) + 65121) ^ 0x375F)); + MD5Init(&md5c); + MD5Update(&md5c, (uint8_t *)s, strlen(s)); + MD5Final(md5key1, &md5c); +#define nextrand (md5key[j] ^ md5key1[j]) + if (mode) { + for (j = k = 0; j < 16; j++) { + unsigned char rb = nextrand; + +#define Rad16(x) ((x) + 'A') + key[k++] = Rad16((rb >> 4) & 0xF); + key[k++] = Rad16(rb & 0xF); +#undef Rad16 + if (j & 1) { + key[k++] = '-'; + } + } + key[--k] = 0; + } else { + for (j = 0; j < 16; j++) { + key[j] = nextrand; + } + } +} +#undef nextrand + +void encode_session_key(char *encode, char *session, char *key, int maxlen) +{ + int i; + for (i=0; (i < maxlen-1) && session[i]; i++) { + if (session[i] == '-') { + encode[i] = '-'; + } else { + encode[i] = ((session[i] - 'A' + key[i]) & 0xF) + 'A'; + } + } + encode[i] = 0; + Dmsg3(000, "Session=%s key=%s encode=%s\n", session, key, encode); +} + +void decode_session_key(char *decode, char *session, char *key, int maxlen) +{ + int i, x; + + for (i=0; (i < maxlen-1) && session[i]; i++) { + if (session[i] == '-') { + decode[i] = '-'; + } else { + x = (session[i] - 'A' - key[i]) & 0xF; + if (x < 0) { + x += 16; + } + decode[i] = x + 'A'; + } + } + decode[i] = 0; + Dmsg3(000, "Session=%s key=%s decode=%s\n", session, key, decode); +} + + + +/* + * Edit job codes into main command line + * %% = % + * %b = Job Bytes + * %c = Client's name + * %C = If the job is a Cloned job (Only on director side) + * %d = Director's name (also valid on file daemon) + * %e = Job Exit code + * %E = Non-fatal Job Errors + * %f = Job FileSet (Only on director side) + * %F = Job Files + * %h = Client address (Only on director side) + * %i = JobId + * %j = Unique Job id + * %l = job level + * %n = Unadorned Job name + * %o = Job Priority + * %p = Pool name (Director) + * %P = Process PID + * %w = Write Store (Director) + * %x = Spool Data (Director) + * %D = Director name (Director/FileDaemon) + * %C = Cloned (Director) + * %I = wjcr->JobId (Director) + * %s = Since time + * %S = Previous Job name (FileDaemon) for Incremental/Differential + * %t = Job type (Backup, ...) + * %r = Recipients + * %v = Volume name + * %R = Job ReadBytes + * + * omsg = edited output message + * imsg = input string containing edit codes (%x) + * to = recepients list + * + */ +POOLMEM *edit_job_codes(JCR *jcr, char *omsg, char *imsg, const char *to, job_code_callback_t callback) +{ + char *p, *q; + const char *str; + char add[50]; + char name[MAX_ESCAPE_NAME_LENGTH]; + int i; + + *omsg = 0; + Dmsg1(200, "edit_job_codes: %s\n", imsg); + for (p=imsg; *p; p++) { + if (*p == '%') { + switch (*++p) { + case '%': + str = "%"; + break; + case 'c': + if (jcr) { + str = jcr->client_name; + } else { + str = _("*none*"); + } + break; + case 'd': + str = my_name; /* Director's name */ + break; + case 'e': + if (jcr) { + str = job_status_to_str(jcr->JobStatus, jcr->getErrors()); + } else { + str = _("*none*"); + } + break; + case 'E': /* Job Errors */ + str = edit_uint64(jcr->getErrors(), add); + break; + case 'i': + if (jcr) { + bsnprintf(add, sizeof(add), "%d", jcr->JobId); + str = add; + } else { + str = _("*none*"); + } + break; + case 'j': /* Job name */ + if (jcr) { + str = jcr->Job; + } else { + str = _("*none*"); + } + break; + case 'l': + if (jcr) { + str = job_level_to_str(jcr->getJobLevel()); + } else { + str = _("*none*"); + } + break; + case 'n': + if (jcr) { + bstrncpy(name, jcr->Job, sizeof(name)); + /* There are three periods after the Job name */ + for (i=0; i<3; i++) { + if ((q=strrchr(name, '.')) != NULL) { + *q = 0; + } + } + str = name; + } else { + str = _("*none*"); + } + break; + case 'r': + str = to; + break; + case 's': /* since time */ + if (jcr && jcr->stime) { + str = jcr->stime; + } else { + str = _("*none*"); + } + break; + case 'F': /* Job Files */ + str = edit_uint64(jcr->JobFiles, add); + break; + case 'b': /* Job Bytes */ + str = edit_uint64(jcr->JobBytes, add); + break; + case 't': + if (jcr) { + str = job_type_to_str(jcr->getJobType()); + } else { + str = _("*none*"); + } + break; + case 'v': + if (jcr) { + if (jcr->VolumeName && jcr->VolumeName[0]) { + str = jcr->VolumeName; + } else { + str = ""; + } + } else { + str = _("*none*"); + } + break; + case 'o': + edit_uint64(jcr->JobPriority, add); + str = add; + break; + case 'P': + edit_uint64(getpid(), add); + str = add; + break; + case 'R': /* Job ReadBytes */ + str = edit_uint64(jcr->ReadBytes, add); + break; + default: + str = NULL; + if (callback != NULL) { + str = callback(jcr, p, name, sizeof(name)); + } + + if (!str) { + add[0] = '%'; + add[1] = *p; + add[2] = 0; + str = add; + } + break; + } + } else { + add[0] = *p; + add[1] = 0; + str = add; + } + Dmsg1(1200, "add_str %s\n", str); + pm_strcat(&omsg, str); + Dmsg1(1200, "omsg=%s\n", omsg); + } + return omsg; +} + +void set_working_directory(char *wd) +{ + struct stat stat_buf; + + if (wd == NULL) { + Emsg0(M_ERROR_TERM, 0, _("Working directory not defined. Cannot continue.\n")); + } + if (stat(wd, &stat_buf) != 0) { + Emsg1(M_ERROR_TERM, 0, _("Working Directory: \"%s\" not found. Cannot continue.\n"), + wd); + } + if (!S_ISDIR(stat_buf.st_mode)) { + Emsg1(M_ERROR_TERM, 0, _("Working Directory: \"%s\" is not a directory. Cannot continue.\n"), + wd); + } + working_directory = wd; /* set global */ +} + +const char *last_path_separator(const char *str) +{ + if (*str != '\0') { + for (const char *p = &str[strlen(str) - 1]; p >= str; p--) { + if (IsPathSeparator(*p)) { + return p; + } + } + } + return NULL; +} diff --git a/src/lib/var.c b/src/lib/var.c new file mode 100644 index 00000000..510deeb9 --- /dev/null +++ b/src/lib/var.c @@ -0,0 +1,2720 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* +** OSSP var - Variable Expansion +** Copyright (c) 2001-2002 Ralf S. Engelschall +** Copyright (c) 2001-2002 The OSSP Project (http://www.ossp.org/) +** Copyright (c) 2001-2002 Cable & Wireless Deutschland (http://www.cw.com/de/) +** +** This file is part of OSSP var, a variable expansion +** library which can be found at http://www.ossp.org/pkg/lib/var/. +** +** Permission to use, copy, modify, and distribute this software for +** any purpose with or without fee is hereby granted, provided that +** the above copyright notice and this permission notice appear in all +** copies. +** +** For disclaimer see below. +*/ +/* + * Adapted by Kern Sibbald to Bacula June 2003 + */ + +#include "bacula.h" +#if defined(HAVE_PCREPOSIX) +# include +#elif defined(HAVE_WIN32) +# include "bregex.h" +#else +# include +#endif +#include "var.h" + +/* support for OSSP ex based exception throwing */ +#ifdef WITH_EX +#include "ex.h" +#define VAR_RC(rv) \ + ( (rv) != VAR_OK && (ex_catching && !ex_shielding) \ + ? (ex_throw(var_id, NULL, (rv)), (rv)) : (rv) ) +#else +#define VAR_RC(rv) (rv) +#endif /* WITH_EX */ + +#ifndef EOS +#define EOS '\0' +#endif + +/* +** +** ==== INTERNAL DATA STRUCTURES ==== +** +*/ + +typedef char char_class_t[256]; /* 256 == 2 ^ sizeof(unsigned char)*8 */ + +/* the external context structure */ +struct var_st { + var_syntax_t syntax; + char_class_t syntax_nameclass; + var_cb_value_t cb_value_fct; + void *cb_value_ctx; + var_cb_operation_t cb_operation_fct; + void *cb_operation_ctx; +}; + +/* the internal expansion context structure */ +struct var_parse_st { + struct var_parse_st *lower; + int force_expand; + int rel_lookup_flag; + int rel_lookup_cnt; + int index_this; +}; +typedef struct var_parse_st var_parse_t; + +/* the default syntax configuration */ +static const var_syntax_t var_syntax_default = { + '\\', /* escape */ + '$', /* delim_init */ + '{', /* delim_open */ + '}', /* delim_close */ + '[', /* index_open */ + ']', /* index_close */ + '#', /* index_mark */ + "a-zA-Z0-9_" /* name_chars */ +}; + +/* +** +** ==== FORMATTING FUNCTIONS ==== +** +*/ + +/* minimal output-independent vprintf(3) variant which supports %{c,s,d,%} only */ +static int +var_mvxprintf( + int (*output)(void *ctx, const char *buffer, int bufsize), void *ctx, + const char *format, va_list ap) +{ + /* sufficient integer buffer: x log_10(2) + safety */ + char ibuf[((sizeof(int)*8)/3)+10]; + const char *cp; + char c; + int d; + int n; + int bytes; + + if (format == NULL) + return -1; + bytes = 0; + while (*format != '\0') { + if (*format == '%') { + c = *(format+1); + if (c == '%') { + /* expand "%%" */ + cp = &c; + n = sizeof(char); + } + else if (c == 'c') { + /* expand "%c" */ + c = (char)va_arg(ap, int); + cp = &c; + n = sizeof(char); + } + else if (c == 's') { + /* expand "%s" */ + if ((cp = (char *)va_arg(ap, char *)) == NULL) + cp = "(null)"; + n = strlen(cp); + } + else if (c == 'd') { + /* expand "%d" */ + d = (int)va_arg(ap, int); + bsnprintf(ibuf, sizeof(ibuf), "%d", d); /* explicitly secure */ + cp = ibuf; + n = strlen(cp); + } + else { + /* any other "%X" */ + cp = (char *)format; + n = 2; + } + format += 2; + } + else { + /* plain text */ + cp = (char *)format; + if ((format = strchr(cp, '%')) == NULL) + format = strchr(cp, '\0'); + n = format - cp; + } + /* perform output operation */ + if (output != NULL) + if ((n = output(ctx, cp, n)) == -1) + break; + bytes += n; + } + return bytes; +} + +/* output callback function context for var_mvsnprintf() */ +typedef struct { + char *bufptr; + int buflen; +} var_mvsnprintf_cb_t; + +/* output callback function for var_mvsnprintf() */ +static int +var_mvsnprintf_cb( + void *_ctx, + const char *buffer, int bufsize) +{ + var_mvsnprintf_cb_t *ctx = (var_mvsnprintf_cb_t *)_ctx; + + if (bufsize > ctx->buflen) + return -1; + memcpy(ctx->bufptr, buffer, bufsize); + ctx->bufptr += bufsize; + ctx->buflen -= bufsize; + return bufsize; +} + +/* minimal vsnprintf(3) variant which supports %{c,s,d} only */ +static int +var_mvsnprintf( + char *buffer, int bufsize, + const char *format, va_list ap) +{ + int n; + var_mvsnprintf_cb_t ctx; + + if (format == NULL) + return -1; + if (buffer != NULL && bufsize == 0) + return -1; + if (buffer == NULL) + /* just determine output length */ + n = var_mvxprintf(NULL, NULL, format, ap); + else { + /* perform real output */ + ctx.bufptr = buffer; + ctx.buflen = bufsize; + n = var_mvxprintf(var_mvsnprintf_cb, &ctx, format, ap); + if (n != -1 && ctx.buflen == 0) + n = -1; + if (n != -1) + *(ctx.bufptr) = '\0'; + } + return n; +} + +/* +** +** ==== PARSE CONTEXT FUNCTIONS ==== +** +*/ + +static var_parse_t * +var_parse_push( + var_parse_t *lower, var_parse_t *upper) +{ + if (upper == NULL) + return NULL; + memcpy(upper, lower, sizeof(var_parse_t)); + upper->lower = lower; + return upper; +} + +static var_parse_t * +var_parse_pop( + var_parse_t *upper) +{ + if (upper == NULL) + return NULL; + return upper->lower; +} + +/* +** +** ==== TOKEN BUFFER FUNCTIONS ==== +** +*/ + +#define TOKENBUF_INITIAL_BUFSIZE 64 + +typedef struct { + const char *begin; + const char *end; + int buffer_size; +} tokenbuf_t; + +static void +tokenbuf_init( + tokenbuf_t *buf) +{ + buf->begin = NULL; + buf->end = NULL; + buf->buffer_size = 0; + return; +} + +static int +tokenbuf_isundef( + tokenbuf_t *buf) +{ + if (buf->begin == NULL && buf->end == NULL) + return 1; + return 0; +} + +static int +tokenbuf_isempty( + tokenbuf_t *buf) +{ + if (buf->begin == buf->end) + return 1; + return 0; +} + +static void +tokenbuf_set( + tokenbuf_t *buf, const char *begin, const char *end, int buffer_size) +{ + buf->begin = begin; + buf->end = end; + buf->buffer_size = buffer_size; + return; +} + +static void +tokenbuf_move( + tokenbuf_t *src, tokenbuf_t *dst) +{ + dst->begin = src->begin; + dst->end = src->end; + dst->buffer_size = src->buffer_size; + tokenbuf_init(src); + return; +} + +static int +tokenbuf_assign( + tokenbuf_t *buf, const char *data, int len) +{ + char *p; + + if ((p = (char *)malloc(len + 1)) == NULL) + return 0; + memcpy(p, data, len); + buf->begin = p; + buf->end = p + len; + buf->buffer_size = len + 1; + *((char *)(buf->end)) = EOS; + return 1; +} + +static int +tokenbuf_append( + tokenbuf_t *output, const char *data, int len) +{ + char *new_buffer; + int new_size; + char *tmp; + + /* Is the tokenbuffer initialized at all? If not, allocate a + standard-sized buffer to begin with. */ + if (output->begin == NULL) { + if ((output->begin = output->end = (const char *)malloc(TOKENBUF_INITIAL_BUFSIZE)) == NULL) + return 0; + output->buffer_size = TOKENBUF_INITIAL_BUFSIZE; + } + + /* does the token contain text, but no buffer has been allocated yet? */ + if (output->buffer_size == 0) { + /* check whether data borders to output. If, we can append + simly by increasing the end pointer. */ + if (output->end == data) { + output->end += len; + return 1; + } + /* ok, so copy the contents of output into an allocated buffer + so that we can append that way. */ + if ((tmp = (char *)malloc(output->end - output->begin + len + 1)) == NULL) + return 0; + memcpy(tmp, output->begin, output->end - output->begin); + output->buffer_size = output->end - output->begin; + output->begin = tmp; + output->end = tmp + output->buffer_size; + output->buffer_size += len + 1; + } + + /* does the token fit into the current buffer? If not, realloc a + larger buffer that fits. */ + if ((output->buffer_size - (output->end - output->begin)) <= len) { + new_size = output->buffer_size; + do { + new_size *= 2; + } while ((new_size - (output->end - output->begin)) <= len); + if ((new_buffer = (char *)realloc((char *)output->begin, new_size)) == NULL) + return 0; + output->end = new_buffer + (output->end - output->begin); + output->begin = new_buffer; + output->buffer_size = new_size; + } + + /* append the data at the end of the current buffer. */ + if (len > 0) + memcpy((char *)output->end, data, len); + output->end += len; + *((char *)output->end) = EOS; + return 1; +} + +static int +tokenbuf_merge( + tokenbuf_t *output, tokenbuf_t *input) +{ + return tokenbuf_append(output, input->begin, input->end - input->begin); +} + +static void +tokenbuf_free( + tokenbuf_t *buf) +{ + if (buf->begin != NULL && buf->buffer_size > 0) + free((char *)buf->begin); + buf->begin = buf->end = NULL; + buf->buffer_size = 0; + return; +} + +/* +** +** ==== CHARACTER CLASS EXPANSION ==== +** +*/ + +static void +expand_range(char a, char b, char_class_t chrclass) +{ + do { + chrclass[(int)a] = 1; + } while (++a <= b); + return; +} + +static var_rc_t +expand_character_class(const char *desc, char_class_t chrclass) +{ + int i; + + /* clear the class array. */ + for (i = 0; i < 256; ++i) + chrclass[i] = 0; + + /* walk through class description and set appropriate entries in array */ + while (*desc != EOS) { + if (desc[1] == '-' && desc[2] != EOS) { + if (desc[0] > desc[2]) + return VAR_ERR_INCORRECT_CLASS_SPEC; + expand_range(desc[0], desc[2], chrclass); + desc += 3; + } else { + chrclass[(int) *desc] = 1; + desc++; + } + } + return VAR_OK; +} + +/* +** +** ==== ESCAPE SEQUENCE EXPANSION FUNCTIONS ==== +** +*/ + +static int +expand_isoct( + int c) +{ + if (c >= '0' && c <= '7') + return 1; + else + return 0; +} + +static var_rc_t +expand_octal( + const char **src, char **dst, const char *end) +{ + int c; + + if (end - *src < 3) + return VAR_ERR_INCOMPLETE_OCTAL; + if ( !expand_isoct(**src) + || !expand_isoct((*src)[1]) + || !expand_isoct((*src)[2])) + return VAR_ERR_INVALID_OCTAL; + + c = **src - '0'; + if (c > 3) + return VAR_ERR_OCTAL_TOO_LARGE; + c *= 8; + (*src)++; + + c += **src - '0'; + c *= 8; + (*src)++; + + c += **src - '0'; + + **dst = (char) c; + (*dst)++; + return VAR_OK; +} + +static int +expand_ishex( + int c) +{ + if ((c >= '0' && c <= '9') || + (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')) + return 1; + else + return 0; +} + +static var_rc_t +expand_simple_hex( + const char **src, char **dst, const char *end) +{ + int c = 0; + + if (end - *src < 2) + return VAR_ERR_INCOMPLETE_HEX; + if ( !expand_ishex(**src) + || !expand_ishex((*src)[1])) + return VAR_ERR_INVALID_HEX; + + if (**src >= '0' && **src <= '9') + c = **src - '0'; + else if (**src >= 'a' && **src <= 'f') + c = **src - 'a' + 10; + else if (**src >= 'A' && **src <= 'F') + c = **src - 'A' + 10; + + c = c << 4; + (*src)++; + + if (**src >= '0' && **src <= '9') + c += **src - '0'; + else if (**src >= 'a' && **src <= 'f') + c += **src - 'a' + 10; + else if (**src >= 'A' && **src <= 'F') + c += **src - 'A' + 10; + + **dst = (char)c; + (*dst)++; + return VAR_OK; +} + +static var_rc_t +expand_grouped_hex( + const char **src, char **dst, const char *end) +{ + var_rc_t rc; + + while (*src < end && **src != '}') { + if ((rc = expand_simple_hex(src, dst, end)) != VAR_OK) + return rc; + (*src)++; + } + if (*src == end) + return VAR_ERR_INCOMPLETE_GROUPED_HEX; + + return VAR_OK; +} + +static var_rc_t +expand_hex( + const char **src, char **dst, const char *end) +{ + if (*src == end) + return VAR_ERR_INCOMPLETE_HEX; + if (**src == '{') { + (*src)++; + return expand_grouped_hex(src, dst, end); + } else + return expand_simple_hex(src, dst, end); +} + +/* +** +** ==== RECURSIVE-DESCEND VARIABLE EXPANSION PARSER ==== +** +*/ + +/* forward declarations */ +static int parse_variable(var_t *var, var_parse_t *ctx, const char *begin, const char *end, tokenbuf_t *result); +static int parse_numexp (var_t *var, var_parse_t *ctx, const char *begin, const char *end, int *result, int *failed); +static int parse_name (var_t *var, var_parse_t *ctx, const char *begin, const char *end); + +/* parse pattern text */ +static int +parse_pattern( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end) +{ + const char *p; + + /* parse until '/' */ + for (p = begin; p != end && *p != '/'; p++) { + if (*p == var->syntax.escape) { + if (p + 1 == end) + return VAR_ERR_INCOMPLETE_QUOTED_PAIR; + p++; + } + } + return (p - begin); +} + +/* parse substitution text */ +static int +parse_substext( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end) +{ + const char *p; + + /* parse until delim_init or '/' */ + for (p = begin; p != end && *p != var->syntax.delim_init && *p != '/'; p++) { + if (*p == var->syntax.escape) { + if (p + 1 == end) + return VAR_ERR_INCOMPLETE_QUOTED_PAIR; + p++; + } + } + return (p - begin); +} + +/* parse expression text */ +static int +parse_exptext( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end) +{ + const char *p; + + /* parse until delim_init or delim_close or ':' */ + for (p = begin; p != end + && *p != var->syntax.delim_init + && *p != var->syntax.delim_close + && *p != ':'; p++) { + if (*p == var->syntax.escape) { + if (p + 1 == end) + return VAR_ERR_INCOMPLETE_QUOTED_PAIR; + p++; + } + } + return (p - begin); +} + +/* parse opertion argument text */ +static int +parse_opargtext( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end) +{ + const char *p; + + /* parse until delim_init or ')' */ + for (p = begin; p != end && *p != var->syntax.delim_init && *p != ')'; p++) { + if (*p == var->syntax.escape) { + if (p + 1 == end) + return VAR_ERR_INCOMPLETE_QUOTED_PAIR; + p++; + } + } + return (p - begin); +} + +static int +parse_opargtext_or_variable( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + tokenbuf_t *result) +{ + const char *p; + tokenbuf_t tmp; + int rc; + + tokenbuf_init(result); + tokenbuf_init(&tmp); + p = begin; + if (p == end) + return 0; + do { + rc = parse_opargtext(var, ctx, p, end); + if (rc < 0) + goto error_return; + if (rc > 0) { + if (!tokenbuf_append(result, p, rc)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + p += rc; + } + rc = parse_variable(var, ctx, p, end, &tmp); + if (rc < 0) + goto error_return; + if (rc > 0) { + p += rc; + if (!tokenbuf_merge(result, &tmp)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + } + tokenbuf_free(&tmp); /* KES 11/9/2003 */ + } while (rc > 0); + tokenbuf_free(&tmp); + return (p - begin); + +error_return: + tokenbuf_free(&tmp); + tokenbuf_free(result); + return rc; +} + +/* parse expression or variable */ +static int +parse_exptext_or_variable( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + tokenbuf_t *result) +{ + const char *p = begin; + tokenbuf_t tmp; + int rc; + + tokenbuf_init(result); + tokenbuf_init(&tmp); + if (begin == end) + return 0; + do { + /* try to parse expression text */ + rc = parse_exptext(var, ctx, p, end); + if (rc < 0) + goto error_return; + if (rc > 0) { + if (!tokenbuf_append(result, p, rc)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + p += rc; + } + + /* try to parse variable construct */ + rc = parse_variable(var, ctx, p, end, &tmp); + if (rc < 0) + goto error_return; + if (rc > 0) { + p += rc; + if (!tokenbuf_merge(result, &tmp)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + } + tokenbuf_free(&tmp); /* KES 11/9/2003 */ + } while (rc > 0); + + tokenbuf_free(&tmp); + return (p - begin); + +error_return: + tokenbuf_free(&tmp); + tokenbuf_free(result); + return rc; +} + +/* parse substitution text or variable */ +static int +parse_substext_or_variable( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + tokenbuf_t *result) +{ + const char *p = begin; + tokenbuf_t tmp; + int rc; + + tokenbuf_init(result); + tokenbuf_init(&tmp); + if (begin == end) + return 0; + do { + /* try to parse substitution text */ + rc = parse_substext(var, ctx, p, end); + if (rc < 0) + goto error_return; + if (rc > 0) { + if (!tokenbuf_append(result, p, rc)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + p += rc; + } + + /* try to parse substitution text */ + rc = parse_variable(var, ctx, p, end, &tmp); + if (rc < 0) + goto error_return; + if (rc > 0) { + p += rc; + if (!tokenbuf_merge(result, &tmp)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + } + tokenbuf_free(&tmp); /* KES 11/9/2003 */ + } while (rc > 0); + + tokenbuf_free(&tmp); + return (p - begin); + +error_return: + tokenbuf_free(&tmp); + tokenbuf_free(result); + return rc; +} + +/* parse class description */ +static int +parse_class_description( + var_t *var, var_parse_t *ctx, + tokenbuf_t *src, tokenbuf_t *dst) +{ + unsigned char c, d; + const char *p; + + p = src->begin; + while (p != src->end) { + if ((src->end - p) >= 3 && p[1] == '-') { + if (*p > p[2]) + return VAR_ERR_INCORRECT_TRANSPOSE_CLASS_SPEC; + for (c = *p, d = p[2]; c <= d; ++c) { + if (!tokenbuf_append(dst, (char *)&c, 1)) + return VAR_ERR_OUT_OF_MEMORY; + } + p += 3; + } else { + if (!tokenbuf_append(dst, p, 1)) + return VAR_ERR_OUT_OF_MEMORY; + p++; + } + } + return VAR_OK; +} + +/* parse regex replace part */ +static int +parse_regex_replace( + var_t *var, var_parse_t *ctx, + const char *data, + tokenbuf_t *orig, + regmatch_t *pmatch, + tokenbuf_t *expanded) +{ + const char *p; + int i; + + p = orig->begin; + tokenbuf_init(expanded); + + while (p != orig->end) { + if (*p == '\\') { + if (orig->end - p <= 1) { + tokenbuf_free(expanded); + return VAR_ERR_INCOMPLETE_QUOTED_PAIR; + } + p++; + if (*p == '\\') { + if (!tokenbuf_append(expanded, p, 1)) { + tokenbuf_free(expanded); + return VAR_ERR_OUT_OF_MEMORY; + } + p++; + continue; + } + if (!isdigit((int)*p)) { + tokenbuf_free(expanded); + return VAR_ERR_UNKNOWN_QUOTED_PAIR_IN_REPLACE; + } + i = (*p - '0'); + p++; + if (pmatch[i].rm_so == -1 || pmatch[i].rm_eo == -1) { + tokenbuf_free(expanded); + return VAR_ERR_SUBMATCH_OUT_OF_RANGE; + } + if (!tokenbuf_append(expanded, data + pmatch[i].rm_so, + pmatch[i].rm_eo - pmatch[i].rm_so)) { + tokenbuf_free(expanded); + return VAR_ERR_OUT_OF_MEMORY; + } + } else { + if (!tokenbuf_append(expanded, p, 1)) { + tokenbuf_free(expanded); + return VAR_ERR_OUT_OF_MEMORY; + } + p++; + } + } + + return VAR_OK; +} + +/* operation: transpose */ +static int +op_transpose( + var_t *var, var_parse_t *ctx, + tokenbuf_t *data, + tokenbuf_t *search, + tokenbuf_t *replace) +{ + tokenbuf_t srcclass, dstclass; + const char *p; + int rc; + int i; + + tokenbuf_init(&srcclass); + tokenbuf_init(&dstclass); + if ((rc = parse_class_description(var, ctx, search, &srcclass)) != VAR_OK) + goto error_return; + if ((rc = parse_class_description(var, ctx, replace, &dstclass)) != VAR_OK) + goto error_return; + if (srcclass.begin == srcclass.end) { + rc = VAR_ERR_EMPTY_TRANSPOSE_CLASS; + goto error_return; + } + if ((srcclass.end - srcclass.begin) != (dstclass.end - dstclass.begin)) { + rc = VAR_ERR_TRANSPOSE_CLASSES_MISMATCH; + goto error_return; + } + if (data->buffer_size == 0) { + tokenbuf_t tmp; + if (!tokenbuf_assign(&tmp, data->begin, data->end - data->begin)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + tokenbuf_move(&tmp, data); + } + for (p = data->begin; p != data->end; ++p) { + for (i = 0; i <= (srcclass.end - srcclass.begin); ++i) { + if (*p == srcclass.begin[i]) { + *((char *)p) = dstclass.begin[i]; + break; + } + } + } + tokenbuf_free(&srcclass); + tokenbuf_free(&dstclass); + return VAR_OK; + +error_return: + tokenbuf_free(search); + tokenbuf_free(replace); + tokenbuf_free(&srcclass); + tokenbuf_free(&dstclass); + return rc; +} + +/* operation: search & replace */ +static int +op_search_and_replace( + var_t *var, var_parse_t *ctx, + tokenbuf_t *data, + tokenbuf_t *search, + tokenbuf_t *replace, + tokenbuf_t *flags) +{ + tokenbuf_t tmp; + const char *p; + int case_insensitive = 0; + int multiline = 0; + int global = 0; + int no_regex = 0; + int rc; + + if (search->begin == search->end) + return VAR_ERR_EMPTY_SEARCH_STRING; + + for (p = flags->begin; p != flags->end; p++) { + switch (tolower(*p)) { + case 'm': + multiline = 1; + break; + case 'i': + case_insensitive = 1; + break; + case 'g': + global = 1; + break; + case 't': + no_regex = 1; + break; + default: + return VAR_ERR_UNKNOWN_REPLACE_FLAG; + } + } + + if (no_regex) { + /* plain text pattern based operation */ + tokenbuf_init(&tmp); + for (p = data->begin; p != data->end;) { + if (case_insensitive) + rc = strncasecmp(p, search->begin, search->end - search->begin); + else + rc = strncmp(p, search->begin, search->end - search->begin); + if (rc != 0) { + /* not matched, copy character */ + if (!tokenbuf_append(&tmp, p, 1)) { + tokenbuf_free(&tmp); + return VAR_ERR_OUT_OF_MEMORY; + } + p++; + } else { + /* matched, copy replacement string */ + tokenbuf_merge(&tmp, replace); + p += (search->end - search->begin); + if (!global) { + /* append remaining text */ + if (!tokenbuf_append(&tmp, p, data->end - p)) { + tokenbuf_free(&tmp); + return VAR_ERR_OUT_OF_MEMORY; + } + break; + } + } + } + tokenbuf_free(data); + tokenbuf_move(&tmp, data); + } else { + /* regular expression pattern based operation */ + tokenbuf_t mydata; + tokenbuf_t myreplace; + regex_t preg; + regmatch_t pmatch[10]; + int regexec_flag; + + /* copy pattern and data to own buffer to make sure they are EOS-terminated */ + if (!tokenbuf_assign(&tmp, search->begin, search->end - search->begin)) + return VAR_ERR_OUT_OF_MEMORY; + if (!tokenbuf_assign(&mydata, data->begin, data->end - data->begin)) { + tokenbuf_free(&tmp); + return VAR_ERR_OUT_OF_MEMORY; + } + + /* compile the pattern. */ + rc = regcomp(&preg, tmp.begin, + ( REG_EXTENDED + | (multiline ? REG_NEWLINE : 0) + | (case_insensitive ? REG_ICASE : 0))); + tokenbuf_free(&tmp); + if (rc != 0) { + tokenbuf_free(&mydata); + return VAR_ERR_INVALID_REGEX_IN_REPLACE; + } + + /* match the pattern and create the result string in the tmp buffer */ + tokenbuf_append(&tmp, "", 0); + for (p = mydata.begin; p < mydata.end; ) { + if (p == mydata.begin || p[-1] == '\n') + regexec_flag = 0; + else + regexec_flag = REG_NOTBOL; + rc = regexec(&preg, p, sizeof(pmatch) / sizeof(regmatch_t), pmatch, regexec_flag); + if (rc != 0) { + /* no (more) matching */ + tokenbuf_append(&tmp, p, mydata.end - p); + break; + } + else if ( multiline + && (p + pmatch[0].rm_so) == mydata.end + && (pmatch[0].rm_eo - pmatch[0].rm_so) == 0) { + /* special case: found empty pattern (usually /^/ or /$/ only) + in multi-line at end of data (after the last newline) */ + tokenbuf_append(&tmp, p, mydata.end - p); + break; + } + else { + /* append prolog string */ + if (!tokenbuf_append(&tmp, p, pmatch[0].rm_so)) { + regfree(&preg); + tokenbuf_free(&tmp); + tokenbuf_free(&mydata); + return VAR_ERR_OUT_OF_MEMORY; + } + /* create replace string */ + rc = parse_regex_replace(var, ctx, p, replace, pmatch, &myreplace); + if (rc != VAR_OK) { + regfree(&preg); + tokenbuf_free(&tmp); + tokenbuf_free(&mydata); + return rc; + } + /* append replace string */ + if (!tokenbuf_merge(&tmp, &myreplace)) { + regfree(&preg); + tokenbuf_free(&tmp); + tokenbuf_free(&mydata); + tokenbuf_free(&myreplace); + return VAR_ERR_OUT_OF_MEMORY; + } + tokenbuf_free(&myreplace); + /* skip now processed data */ + p += pmatch[0].rm_eo; + /* if pattern matched an empty part (think about + anchor-only regular expressions like /^/ or /$/) we + skip the next character to make sure we do not enter + an infinitive loop in matching */ + if ((pmatch[0].rm_eo - pmatch[0].rm_so) == 0) { + if (p >= mydata.end) + break; + if (!tokenbuf_append(&tmp, p, 1)) { + regfree(&preg); + tokenbuf_free(&tmp); + tokenbuf_free(&mydata); + return VAR_ERR_OUT_OF_MEMORY; + } + p++; + } + /* append prolog string and stop processing if we + do not perform the search & replace globally */ + if (!global) { + if (!tokenbuf_append(&tmp, p, mydata.end - p)) { + regfree(&preg); + tokenbuf_free(&tmp); + tokenbuf_free(&mydata); + return VAR_ERR_OUT_OF_MEMORY; + } + break; + } + } + } + regfree(&preg); + tokenbuf_free(data); + tokenbuf_move(&tmp, data); + tokenbuf_free(&mydata); + } + + return VAR_OK; +} + +/* operation: offset substring */ +static int +op_offset( + var_t *var, var_parse_t *ctx, + tokenbuf_t *data, + int num1, + int num2, + int isrange) +{ + tokenbuf_t res; + const char *p; + + /* determine begin of result string */ + if ((data->end - data->begin) < num1) + return VAR_ERR_OFFSET_OUT_OF_BOUNDS; + p = data->begin + num1; + + /* if num2 is zero, we copy the rest from there. */ + if (num2 == 0) { + if (!tokenbuf_assign(&res, p, data->end - p)) + return VAR_ERR_OUT_OF_MEMORY; + } else { + /* ok, then use num2. */ + if (isrange) { + if ((p + num2) > data->end) + return VAR_ERR_RANGE_OUT_OF_BOUNDS; + if (!tokenbuf_assign(&res, p, num2)) + return VAR_ERR_OUT_OF_MEMORY; + } else { + if (num2 < num1) + return VAR_ERR_OFFSET_LOGIC; + if ((data->begin + num2) > data->end) + return VAR_ERR_RANGE_OUT_OF_BOUNDS; + if (!tokenbuf_assign(&res, p, num2 - num1 + 1)) + return VAR_ERR_OUT_OF_MEMORY; + } + } + tokenbuf_free(data); + tokenbuf_move(&res, data); + return VAR_OK; +} + +/* operation: padding */ +static int +op_padding( + var_t *var, var_parse_t *ctx, + tokenbuf_t *data, + int width, + tokenbuf_t *fill, + char position) +{ + tokenbuf_t result; + int i; + + if (fill->begin == fill->end) + return VAR_ERR_EMPTY_PADDING_FILL_STRING; + tokenbuf_init(&result); + if (position == 'l') { + /* left padding */ + i = width - (data->end - data->begin); + if (i > 0) { + i = i / (fill->end - fill->begin); + while (i > 0) { + if (!tokenbuf_append(data, fill->begin, fill->end - fill->begin)) + return VAR_ERR_OUT_OF_MEMORY; + i--; + } + i = (width - (data->end - data->begin)) % (fill->end - fill->begin); + if (!tokenbuf_append(data, fill->begin, i)) + return VAR_ERR_OUT_OF_MEMORY; + } + } else if (position == 'r') { + /* right padding */ + i = width - (data->end - data->begin); + if (i > 0) { + i = i / (fill->end - fill->begin); + while (i > 0) { + if (!tokenbuf_merge(&result, fill)) { + tokenbuf_free(&result); + return VAR_ERR_OUT_OF_MEMORY; + } + i--; + } + i = (width - (data->end - data->begin)) % (fill->end - fill->begin); + if (!tokenbuf_append(&result, fill->begin, i)) { + tokenbuf_free(&result); + return VAR_ERR_OUT_OF_MEMORY; + } + if (!tokenbuf_merge(&result, data)) { + tokenbuf_free(&result); + return VAR_ERR_OUT_OF_MEMORY; + } + /* move string from temporary buffer to data buffer */ + tokenbuf_free(data); + tokenbuf_move(&result, data); + } + } else if (position == 'c') { + /* centered padding */ + i = (width - (data->end - data->begin)) / 2; + if (i > 0) { + /* create the prefix */ + i = i / (fill->end - fill->begin); + while (i > 0) { + if (!tokenbuf_merge(&result, fill)) { + tokenbuf_free(&result); + return VAR_ERR_OUT_OF_MEMORY; + } + i--; + } + i = ((width - (data->end - data->begin)) / 2) + % (fill->end - fill->begin); + if (!tokenbuf_append(&result, fill->begin, i)) { + tokenbuf_free(&result); + return VAR_ERR_OUT_OF_MEMORY; + } + /* append the actual data string */ + if (!tokenbuf_merge(&result, data)) { + tokenbuf_free(&result); + return VAR_ERR_OUT_OF_MEMORY; + } + /* append the suffix */ + i = width - (result.end - result.begin); + i = i / (fill->end - fill->begin); + while (i > 0) { + if (!tokenbuf_merge(&result, fill)) { + tokenbuf_free(&result); + return VAR_ERR_OUT_OF_MEMORY; + } + i--; + } + i = width - (result.end - result.begin); + if (!tokenbuf_append(&result, fill->begin, i)) { + tokenbuf_free(&result); + return VAR_ERR_OUT_OF_MEMORY; + } + /* move string from temporary buffer to data buffer */ + tokenbuf_free(data); + tokenbuf_move(&result, data); + } + } + return VAR_OK; +} + +/* parse an integer number ("123") */ +static int +parse_integer( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + int *result) +{ + const char *p; + int num; + + p = begin; + num = 0; + while (isdigit(*p) && p != end) { + num *= 10; + num += (*p - '0'); + p++; + } + if (result != NULL) + *result = num; + return (p - begin); +} + +/* parse an operation (":x...") */ +static int +parse_operation( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + tokenbuf_t *data) +{ + const char *p; + tokenbuf_t tmptokbuf; + tokenbuf_t search, replace, flags; + tokenbuf_t number1, number2; + int num1, num2; + int isrange; + int rc; + char *ptr; + + /* initialization */ + tokenbuf_init(&tmptokbuf); + tokenbuf_init(&search); + tokenbuf_init(&replace); + tokenbuf_init(&flags); + tokenbuf_init(&number1); + tokenbuf_init(&number2); + p = begin; + if (p == end) + return 0; + + /* dispatch through the first operation character */ + switch (tolower(*p)) { + case 'l': { + /* turn value to lowercase. */ + if (data->begin != NULL) { + /* if the buffer does not live in an allocated buffer, + we have to copy it before modifying the contents. */ + if (data->buffer_size == 0) { + if (!tokenbuf_assign(data, data->begin, data->end - data->begin)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + } + /* convert value */ + for (ptr = (char *)data->begin; ptr != data->end; ptr++) + *ptr = (char)tolower((int)(*ptr)); + } + p++; + break; + } + case 'u': { + /* turn value to uppercase. */ + if (data->begin != NULL) { + /* if the buffer does not live in an allocated buffer, + we have to copy it before modifying the contents. */ + if (data->buffer_size == 0) { + if (!tokenbuf_assign(data, data->begin, data->end - data->begin)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + } + /* convert value */ + for (ptr = (char *)data->begin; ptr != data->end; ptr++) + *ptr = (char)toupper((int)(*ptr)); + } + p++; + break; + } + case 'o': { + /* cut out substring of value. */ + p++; + rc = parse_integer(var, ctx, p, end, &num1); + if (rc == 0) { + rc = VAR_ERR_MISSING_START_OFFSET; + goto error_return; + } + else if (rc < 0) + goto error_return; + p += rc; + if (*p == ',') { + isrange = 0; + p++; + } else if (*p == '-') { + isrange = 1; + p++; + } else { + rc = VAR_ERR_INVALID_OFFSET_DELIMITER; + goto error_return; + } + rc = parse_integer(var, ctx, p, end, &num2); + p += rc; + if (data->begin != NULL) { + rc = op_offset(var, ctx, data, num1, num2, isrange); + if (rc < 0) + goto error_return; + } + break; + } + case '#': { + /* determine length of the value */ + if (data->begin != NULL) { + char buf[((sizeof(int)*8)/3)+10]; /* sufficient size: <#bits> x log_10(2) + safety */ + sprintf(buf, "%d", (int)(data->end - data->begin)); + tokenbuf_free(data); + if (!tokenbuf_assign(data, buf, strlen(buf))) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + } + p++; + break; + } + case '-': { + /* substitute parameter if data is empty */ + p++; + rc = parse_exptext_or_variable(var, ctx, p, end, &tmptokbuf); + if (rc < 0) + goto error_return; + if (rc == 0) { + rc = VAR_ERR_MISSING_PARAMETER_IN_COMMAND; + goto error_return; + } + p += rc; + if (tokenbuf_isundef(data)) + tokenbuf_move(&tmptokbuf, data); + else if (tokenbuf_isempty(data)) { + tokenbuf_free(data); + tokenbuf_move(&tmptokbuf, data); + } + break; + } + case '*': { + /* substitute empty string if data is not empty, parameter otherwise. */ + p++; + rc = parse_exptext_or_variable(var, ctx, p, end, &tmptokbuf); + if (rc < 0) + goto error_return; + if (rc == 0) { + rc = VAR_ERR_MISSING_PARAMETER_IN_COMMAND; + goto error_return; + } + p += rc; + if (data->begin != NULL) { + if (data->begin == data->end) { + tokenbuf_free(data); + tokenbuf_move(&tmptokbuf, data); + } else { + tokenbuf_free(data); + data->begin = data->end = ""; + data->buffer_size = 0; + } + } + break; + } + case '+': { + /* substitute parameter if data is not empty. */ + p++; + rc = parse_exptext_or_variable(var, ctx, p, end, &tmptokbuf); + if (rc < 0) + goto error_return; + if (rc == 0) { + rc = VAR_ERR_MISSING_PARAMETER_IN_COMMAND; + goto error_return; + } + p += rc; + if (data->begin != NULL && data->begin != data->end) { + tokenbuf_free(data); + tokenbuf_move(&tmptokbuf, data); + } + break; + } + case 's': { + /* search and replace. */ + p++; + if (*p != '/') + return VAR_ERR_MALFORMATTED_REPLACE; + p++; + rc = parse_pattern(var, ctx, p, end); + if (rc < 0) + goto error_return; + tokenbuf_set(&search, p, p + rc, 0); + p += rc; + if (*p != '/') { + rc = VAR_ERR_MALFORMATTED_REPLACE; + goto error_return; + } + p++; + rc = parse_substext_or_variable(var, ctx, p, end, &replace); + if (rc < 0) + goto error_return; + p += rc; + if (*p != '/') { + rc = VAR_ERR_MALFORMATTED_REPLACE; + goto error_return; + } + p++; + rc = parse_exptext(var, ctx, p, end); + if (rc < 0) + goto error_return; + tokenbuf_set(&flags, p, p + rc, 0); + p += rc; + if (data->begin != NULL) { + rc = op_search_and_replace(var, ctx, data, &search, &replace, &flags); + if (rc < 0) + goto error_return; + } + break; + } + case 'y': { + /* transpose characters from class A to class B. */ + p++; + if (*p != '/') + return VAR_ERR_MALFORMATTED_TRANSPOSE; + p++; + rc = parse_substext_or_variable(var, ctx, p, end, &search); + if (rc < 0) + goto error_return; + p += rc; + if (*p != '/') { + rc = VAR_ERR_MALFORMATTED_TRANSPOSE; + goto error_return; + } + p++; + rc = parse_substext_or_variable(var, ctx, p, end, &replace); + if (rc < 0) + goto error_return; + p += rc; + if (*p != '/') { + rc = VAR_ERR_MALFORMATTED_TRANSPOSE; + goto error_return; + } else + p++; + if (data->begin) { + rc = op_transpose(var, ctx, data, &search, &replace); + if (rc < 0) + goto error_return; + } + break; + } + case 'p': { + /* padding. */ + p++; + if (*p != '/') + return VAR_ERR_MALFORMATTED_PADDING; + p++; + rc = parse_integer(var, ctx, p, end, &num1); + if (rc == 0) { + rc = VAR_ERR_MISSING_PADDING_WIDTH; + goto error_return; + } + p += rc; + if (*p != '/') { + rc = VAR_ERR_MALFORMATTED_PADDING; + goto error_return; + } + p++; + rc = parse_substext_or_variable(var, ctx, p, end, &replace); + if (rc < 0) + goto error_return; + p += rc; + if (*p != '/') { + rc = VAR_ERR_MALFORMATTED_PADDING; + goto error_return; + } + p++; + if (*p != 'l' && *p != 'c' && *p != 'r') { + rc = VAR_ERR_MALFORMATTED_PADDING; + goto error_return; + } + p++; + if (data->begin) { + rc = op_padding(var, ctx, data, num1, &replace, p[-1]); + if (rc < 0) + goto error_return; + } + break; + } + case '%': { + /* operation callback function */ + const char *op_ptr; + int op_len; + const char *arg_ptr; + int arg_len; + const char *val_ptr; + int val_len; + const char *out_ptr; + int out_len; + int out_size; + tokenbuf_t args; + + p++; + rc = parse_name(var, ctx, p, end); + if (rc < 0) + goto error_return; + op_ptr = p; + op_len = rc; + p += rc; + if (*p == '(') { + p++; + tokenbuf_init(&args); + rc = parse_opargtext_or_variable(var, ctx, p, end, &args); + if (rc < 0) + goto error_return; + p += rc; + arg_ptr = args.begin; + arg_len = args.end - args.begin; + if (*p != ')') { + rc = VAR_ERR_MALFORMED_OPERATION_ARGUMENTS; + goto error_return; + } + p++; + } + else { + arg_ptr = NULL; + arg_len = 0; + } + val_ptr = data->begin; + val_len = data->end - data->begin; + + if (data->begin != NULL && var->cb_operation_fct != NULL) { + /* call operation callback function */ + rc = (*var->cb_operation_fct)(var, var->cb_operation_ctx, + op_ptr, op_len, + arg_ptr, arg_len, + val_ptr, val_len, + &out_ptr, &out_len, &out_size); + if (rc < 0) { + if (arg_ptr != NULL) + free((void *)arg_ptr); + goto error_return; + } + tokenbuf_free(data); + tokenbuf_set(data, out_ptr, out_ptr+out_len, out_size); + } + if (arg_ptr != NULL) + free((void *)arg_ptr); + break; + } + default: + return VAR_ERR_UNKNOWN_COMMAND_CHAR; + } + + /* return successfully */ + tokenbuf_free(&tmptokbuf); + tokenbuf_free(&search); + tokenbuf_free(&replace); + tokenbuf_free(&flags); + tokenbuf_free(&number1); + tokenbuf_free(&number2); + return (p - begin); + + /* return with an error */ +error_return: + tokenbuf_free(data); + tokenbuf_free(&tmptokbuf); + tokenbuf_free(&search); + tokenbuf_free(&replace); + tokenbuf_free(&flags); + tokenbuf_free(&number1); + tokenbuf_free(&number2); + return rc; +} + +/* parse numerical expression operand */ +static int +parse_numexp_operand( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + int *result, int *failed) +{ + const char *p; + tokenbuf_t tmp; + int rc; + var_parse_t myctx; + + /* initialization */ + p = begin; + tokenbuf_init(&tmp); + if (p == end) + return VAR_ERR_INCOMPLETE_INDEX_SPEC; + + /* parse opening numerical expression */ + if (*p == '(') { + /* parse inner numerical expression */ + rc = parse_numexp(var, ctx, ++p, end, result, failed); + if (rc < 0) + return rc; + p += rc; + if (p == end) + return VAR_ERR_INCOMPLETE_INDEX_SPEC; + /* parse closing parenthesis */ + if (*p != ')') + return VAR_ERR_UNCLOSED_BRACKET_IN_INDEX; + p++; + } + /* parse contained variable */ + else if (*p == var->syntax.delim_init) { + /* parse variable with forced expansion */ + ctx = var_parse_push(ctx, &myctx); + ctx->force_expand = 1; + rc = parse_variable(var, ctx, p, end, &tmp); + ctx = var_parse_pop(ctx); + + if (rc == VAR_ERR_UNDEFINED_VARIABLE) { + *failed = 1; + /* parse variable without forced expansion */ + ctx = var_parse_push(ctx, &myctx); + ctx->force_expand = 0; + rc = parse_variable(var, ctx, p, end, &tmp); + ctx = var_parse_pop(ctx); + if (rc < 0) + return rc; + p += rc; + *result = 0; + tokenbuf_free(&tmp); /* KES 11/9/2003 */ + } else if (rc < 0) { + return rc; + } else { + p += rc; + /* parse remaining numerical expression */ + rc = parse_numexp(var, ctx, tmp.begin, tmp.end, result, failed); + tokenbuf_free(&tmp); + if (rc < 0) + return rc; + } + } + /* parse relative index mark ("#") */ + else if ( var->syntax.index_mark != EOS + && *p == var->syntax.index_mark) { + p++; + *result = ctx->index_this; + if (ctx->rel_lookup_flag) + ctx->rel_lookup_cnt++; + } + /* parse plain integer number */ + else if (isdigit(*p)) { + rc = parse_integer(var, ctx, p, end, result); + p += rc; + } + /* parse signed positive integer number */ + else if (*p == '+') { + if ((end - p) > 1 && isdigit(p[1])) { + p++; + rc = parse_integer(var, ctx, p, end, result); + p += rc; + } + else + return VAR_ERR_INVALID_CHAR_IN_INDEX_SPEC; + } + /* parse signed negative integer number */ + else if (*p == '-') { + if (end - p > 1 && isdigit(p[1])) { + p++; + rc = parse_integer(var, ctx, p, end, result); + *result = -(*result); + p += rc; + } + else + return VAR_ERR_INVALID_CHAR_IN_INDEX_SPEC; + } + /* else we failed to parse anything reasonable */ + else + return VAR_ERR_INVALID_CHAR_IN_INDEX_SPEC; + + return (p - begin); +} + +/* parse numerical expression ("x+y") */ +static int +parse_numexp( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + int *result, int *failed) +{ + const char *p; + char op; + int right; + int rc; + + /* initialization */ + p = begin; + if (p == end) + return VAR_ERR_INCOMPLETE_INDEX_SPEC; + + /* parse left numerical operand */ + rc = parse_numexp_operand(var, ctx, p, end, result, failed); + if (rc < 0) + return rc; + p += rc; + + /* parse numerical operator */ + while (p != end) { + if (*p == '+' || *p == '-') { + op = *p++; + /* recursively parse right operand (light binding) */ + rc = parse_numexp(var, ctx, p, end, &right, failed); + if (rc < 0) + return rc; + p += rc; + if (op == '+') + *result = (*result + right); + else + *result = (*result - right); + } + else if (*p == '*' || *p == '/' || *p == '%') { + op = *p++; + /* recursively parse right operand (string binding) */ + rc = parse_numexp_operand(var, ctx, p, end, &right, failed); + if (rc < 0) + return rc; + p += rc; + if (op == '*') + *result = (*result * right); + else if (op == '/') { + if (right == 0) { + if (*failed) + *result = 0; + else + return VAR_ERR_DIVISION_BY_ZERO_IN_INDEX; + } + else + *result = (*result / right); + } + else if (op == '%') { + if (right == 0) { + if (*failed) + *result = 0; + else + return VAR_ERR_DIVISION_BY_ZERO_IN_INDEX; + } + else + *result = (*result % right); + } + } + else + break; + } + + /* return amount of parsed input */ + return (p - begin); +} + +/* parse variable name ("abc") */ +static int +parse_name( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end) +{ + const char *p; + + /* parse as long as name class characters are found */ + for (p = begin; p != end && var->syntax_nameclass[(int)(*p)]; p++) + ; + return (p - begin); +} + +/* lookup a variable value through the callback function */ +static int +lookup_value( + var_t *var, var_parse_t *ctx, + const char *var_ptr, int var_len, int var_inc, int var_idx, + const char **val_ptr, int *val_len, int *val_size) +{ + char buf[1]; + int rc; + + /* pass through to original callback */ + rc = (*var->cb_value_fct)(var, var->cb_value_ctx, + var_ptr, var_len, var_inc, var_idx, + val_ptr, val_len, val_size); + + /* convert undefined variable into empty variable if relative + lookups are counted. This is the case inside an active loop + construct if no limits are given. There the parse_input() + has to proceed until all variables have undefined values. + This trick here allows it to determine this case. */ + if (ctx->rel_lookup_flag && rc == VAR_ERR_UNDEFINED_VARIABLE) { + ctx->rel_lookup_cnt--; + buf[0] = EOS; + /* ****FIXME**** passing back stack variable!!! */ + *val_ptr = buf; + *val_len = 0; + *val_size = 0; + return VAR_OK; + } + + return rc; +} + +/* parse complex variable construct ("${name...}") */ +static int +parse_variable_complex( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + tokenbuf_t *result) +{ + const char *p; + const char *data; + int len, buffer_size; + int failed = 0; + int rc; + int idx = 0; + int inc; + tokenbuf_t name; + tokenbuf_t tmp; + + /* initializations */ + p = begin; + tokenbuf_init(&name); + tokenbuf_init(&tmp); + tokenbuf_init(result); + + /* parse open delimiter */ + if (p == end || *p != var->syntax.delim_open) + return 0; + p++; + if (p == end) + return VAR_ERR_INCOMPLETE_VARIABLE_SPEC; + + /* parse name of variable to expand. The name may consist of an + arbitrary number of variable name character and contained variable + constructs. */ + do { + /* parse a variable name */ + rc = parse_name(var, ctx, p, end); + if (rc < 0) + goto error_return; + if (rc > 0) { + if (!tokenbuf_append(&name, p, rc)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + p += rc; + } + + /* parse an (embedded) variable */ + rc = parse_variable(var, ctx, p, end, &tmp); + if (rc < 0) + goto error_return; + if (rc > 0) { + if (!tokenbuf_merge(&name, &tmp)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + p += rc; + } + tokenbuf_free(&tmp); /* KES 11/9/2003 */ + } while (rc > 0); + + /* we must have the complete expanded variable name now, + so make sure we really do. */ + if (name.begin == name.end) { + if (ctx->force_expand) { + rc = VAR_ERR_INCOMPLETE_VARIABLE_SPEC; + goto error_return; + } + else { + /* If no force_expand is requested, we have to back-off. + We're not sure whether our approach here is 100% correct, + because it _could_ have side-effects according to Peter + Simons, but as far as we know and tried it, it is + correct. But be warned -- RSE */ + tokenbuf_set(result, begin - 1, p, 0); + goto goahead; + } + } + + /* parse an optional index specification */ + if ( var->syntax.index_open != EOS + && *p == var->syntax.index_open) { + p++; + rc = parse_numexp(var, ctx, p, end, &idx, &failed); + if (rc < 0) + goto error_return; + if (rc == 0) { + rc = VAR_ERR_INCOMPLETE_INDEX_SPEC; + goto error_return; + } + p += rc; + if (p == end) { + rc = VAR_ERR_INCOMPLETE_INDEX_SPEC; + goto error_return; + } + if (*p != var->syntax.index_close) { + rc = VAR_ERR_INVALID_CHAR_IN_INDEX_SPEC; + goto error_return; + } + p++; + } + + /* parse end of variable construct or start of post-operations */ + if (p == end || (*p != var->syntax.delim_close && *p != ':' && *p != '+')) { + rc = VAR_ERR_INCOMPLETE_VARIABLE_SPEC; + goto error_return; + } + if ((inc = (*p++ == '+'))) { + p++; /* skip the + */ + } + + /* lookup the variable value now */ + if (failed) { + tokenbuf_set(result, begin - 1, p, 0); + } else { + rc = lookup_value(var, ctx, + name.begin, name.end-name.begin, inc, idx, + &data, &len, &buffer_size); + if (rc == VAR_ERR_UNDEFINED_VARIABLE) { + tokenbuf_init(result); /* delayed handling of undefined variable */ + } else if (rc < 0) { + goto error_return; + } else { + /* the preliminary result is the raw value of the variable. + This may be modified by the operations that may follow. */ + tokenbuf_set(result, data, data + len, buffer_size); + } + } + + /* parse optional post-operations */ +goahead: + if (p[-1] == ':') { + tokenbuf_free(&tmp); + tokenbuf_init(&tmp); + p--; + while (p != end && *p == ':') { + p++; + if (!failed) + rc = parse_operation(var, ctx, p, end, result); + else + rc = parse_operation(var, ctx, p, end, &tmp); + if (rc < 0) + goto error_return; + p += rc; + if (failed) + result->end += rc; + } + if (p == end || *p != var->syntax.delim_close) { + rc = VAR_ERR_INCOMPLETE_VARIABLE_SPEC; + goto error_return; + } + p++; + if (failed) + result->end++; + } else if (p[-1] == '+') { + p++; + } + + /* lazy handling of undefined variable */ + if (!failed && tokenbuf_isundef(result)) { + if (ctx->force_expand) { + rc = VAR_ERR_UNDEFINED_VARIABLE; + goto error_return; + } else { + tokenbuf_set(result, begin - 1, p, 0); + } + } + + /* return successfully */ + tokenbuf_free(&name); + tokenbuf_free(&tmp); + return (p - begin); + + /* return with an error */ +error_return: + tokenbuf_free(&name); + tokenbuf_free(&tmp); + tokenbuf_free(result); + return rc; +} + +/* parse variable construct ("$name" or "${name...}") */ +static int +parse_variable( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + tokenbuf_t *result) +{ + const char *p; + const char *data; + int len, buffer_size; + int rc, rc2; + int inc; + + /* initialization */ + p = begin; + tokenbuf_init(result); + + /* parse init delimiter */ + if (p == end || *p != var->syntax.delim_init) + return 0; + p++; + if (p == end) + return VAR_ERR_INCOMPLETE_VARIABLE_SPEC; + + /* parse a simple variable name. + (if this fails, we're try to parse a complex variable construct) */ + rc = parse_name(var, ctx, p, end); + if (rc < 0) + return rc; + if (rc > 0) { + inc = (p[rc] == '+'); + rc2 = lookup_value(var, ctx, p, rc, inc, 0, &data, &len, &buffer_size); + if (rc2 == VAR_ERR_UNDEFINED_VARIABLE && !ctx->force_expand) { + tokenbuf_set(result, begin, begin + 1 + rc, 0); + return (1 + rc); + } + if (rc2 < 0) + return rc2; + tokenbuf_set(result, data, data + len, buffer_size); + return (1 + rc); + } + + /* parse a complex variable construct (else case) */ + rc = parse_variable_complex(var, ctx, p, end, result); + if (rc > 0) + rc++; + return rc; +} + +/* parse loop construct limits ("[...]{b,s,e}") */ +static var_rc_t +parse_looplimits( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + int *start, int *step, int *stop, int *open_stop) +{ + const char *p; + int rc; + int failed; + + /* initialization */ + p = begin; + + /* we are happy if nothing is to left to parse */ + if (p == end) + return VAR_OK; + + /* parse start delimiter */ + if (*p != var->syntax.delim_open) + return VAR_OK; + p++; + + /* parse loop start value */ + failed = 0; + rc = parse_numexp(var, ctx, p, end, start, &failed); + if (rc == VAR_ERR_INVALID_CHAR_IN_INDEX_SPEC) + *start = 0; /* use default */ + else if (rc < 0) + return (var_rc_t)rc; + else + p += rc; + if (failed) + return VAR_ERR_UNDEFINED_VARIABLE; + + /* parse separator */ + if (*p != ',') + return VAR_ERR_INVALID_CHAR_IN_LOOP_LIMITS; + p++; + + /* parse loop step value */ + failed = 0; + rc = parse_numexp(var, ctx, p, end, step, &failed); + if (rc == VAR_ERR_INVALID_CHAR_IN_INDEX_SPEC) + *step = 1; /* use default */ + else if (rc < 0) + return (var_rc_t)rc; + else + p += rc; + if (failed) + return VAR_ERR_UNDEFINED_VARIABLE; + + /* parse separator */ + if (*p != ',') { + /* if not found, parse end delimiter */ + if (*p != var->syntax.delim_close) + return VAR_ERR_INVALID_CHAR_IN_LOOP_LIMITS; + p++; + + /* shift step value to stop value */ + *stop = *step; + *step = 1; + + /* determine whether loop end is open */ + if (rc > 0) + *open_stop = 0; + else + *open_stop = 1; + return (var_rc_t)(p - begin); + } + p++; + + /* parse loop stop value */ + failed = 0; + rc = parse_numexp(var, ctx, p, end, stop, &failed); + if (rc == VAR_ERR_INVALID_CHAR_IN_INDEX_SPEC) { + *stop = 0; /* use default */ + *open_stop = 1; + } + else if (rc < 0) + return (var_rc_t)rc; + else { + *open_stop = 0; + p += rc; + } + if (failed) + return VAR_ERR_UNDEFINED_VARIABLE; + + /* parse end delimiter */ + if (*p != var->syntax.delim_close) + return VAR_ERR_INVALID_CHAR_IN_LOOP_LIMITS; + p++; + + /* return amount of parsed input */ + return (var_rc_t)(p - begin); +} + +/* parse plain text */ +static int +parse_text( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end) +{ + const char *p; + + /* parse until delim_init (variable construct) + or index_open (loop construct) is found */ + for (p = begin; p != end; p++) { + if (*p == var->syntax.escape) { + p++; /* skip next character */ + if (p == end) + return VAR_ERR_INCOMPLETE_QUOTED_PAIR; + } + else if (*p == var->syntax.delim_init) + break; + else if ( var->syntax.index_open != EOS + && ( *p == var->syntax.index_open + || *p == var->syntax.index_close)) + break; + } + return (p - begin); +} + +/* expand input in general */ +static var_rc_t +parse_input( + var_t *var, var_parse_t *ctx, + const char *begin, const char *end, + tokenbuf_t *output, int recursion_level) +{ + const char *p; + int rc, rc2; + tokenbuf_t result; + int start, step, stop, open_stop; + int i; + int output_backup; + int rel_lookup_cnt; + int loop_limit_length; + var_parse_t myctx; + + /* initialization */ + p = begin; + + do { + /* try to parse a loop construct */ + if ( p != end + && var->syntax.index_open != EOS + && *p == var->syntax.index_open) { + p++; + + /* loop preparation */ + loop_limit_length = -1; + rel_lookup_cnt = ctx->rel_lookup_cnt; + open_stop = 1; + rc = 0; + start = 0; + step = 1; + stop = 0; + output_backup = 0; + + /* iterate over loop construct, either as long as there is + (still) nothing known about the limit, or there is an open + (=unknown) limit stop and there are still defined variables + or there is a stop limit known and it is still not reached */ + re_loop: + for (i = start; + ( ( open_stop + && ( loop_limit_length < 0 + || rel_lookup_cnt > ctx->rel_lookup_cnt)) + || ( !open_stop + && i <= stop) ); + i += step) { + + /* remember current output end for restoring */ + output_backup = (output->end - output->begin); + + /* open temporary context for recursion */ + ctx = var_parse_push(ctx, &myctx); + ctx->force_expand = 1; + ctx->rel_lookup_flag = 1; + ctx->index_this = i; + + /* recursive parse input through ourself */ + rc = parse_input(var, ctx, p, end, + output, recursion_level+1); + + /* retrieve info and close temporary context */ + rel_lookup_cnt = ctx->rel_lookup_cnt; + ctx = var_parse_pop(ctx); + + /* error handling */ + if (rc < 0) + goto error_return; + + /* make sure the loop construct is closed */ + if (p[rc] != var->syntax.index_close) { + rc = VAR_ERR_UNTERMINATED_LOOP_CONSTRUCT; + goto error_return; + } + + /* try to parse loop construct limit specification */ + if (loop_limit_length < 0) { + rc2 = parse_looplimits(var, ctx, p+rc+1, end, + &start, &step, &stop, &open_stop); + if (rc2 < 0) + goto error_return; + else if (rc2 == 0) + loop_limit_length = 0; + else if (rc2 > 0) { + loop_limit_length = rc2; + /* restart loop from scratch */ + output->end = (output->begin + output_backup); + goto re_loop; + } + } + } + + /* if stop value is open, restore to the output end + because the last iteration was just to determine the loop + termination and its result has to be discarded */ + if (open_stop) + output->end = (output->begin + output_backup); + + /* skip parsed loop construct */ + p += rc; + p++; + p += loop_limit_length; + + continue; + } + + /* try to parse plain text */ + rc = parse_text(var, ctx, p, end); + if (rc > 0) { + if (!tokenbuf_append(output, p, rc)) { + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + p += rc; + continue; + } else if (rc < 0) + goto error_return; + + /* try to parse a variable construct */ + tokenbuf_init(&result); + rc = parse_variable(var, ctx, p, end, &result); + if (rc > 0) { + if (!tokenbuf_merge(output, &result)) { + tokenbuf_free(&result); + rc = VAR_ERR_OUT_OF_MEMORY; + goto error_return; + } + tokenbuf_free(&result); + p += rc; + continue; + } + tokenbuf_free(&result); + if (rc < 0) + goto error_return; + + } while (p != end && rc > 0); + + /* We do not know whether this really could happen, but because we + are paranoid, report an error at the outer most parsing level if + there is still any input. Because this would mean that we are no + longer able to parse the remaining input as a loop construct, a + text or a variable construct. This would be very strange, but + could perhaps happen in case of configuration errors!?... */ + if (recursion_level == 0 && p != end) { + rc = VAR_ERR_INPUT_ISNT_TEXT_NOR_VARIABLE; + goto error_return; + } + + /* return amount of parsed text */ + return (var_rc_t)(p - begin); + + /* return with an error where as a special case the output begin is + set to the input begin and the output end to the last input parsing + position. */ + error_return: + tokenbuf_free(output); + tokenbuf_set(output, begin, p, 0); + return (var_rc_t)rc; +} + +/* +** +** ==== APPLICATION PROGRAMMING INTERFACE (API) ==== +** +*/ + +/* create variable expansion context */ +var_rc_t +var_create( + var_t **pvar) +{ + var_t *var; + + if (pvar == NULL) + return VAR_RC(VAR_ERR_INVALID_ARGUMENT); + if ((var = (var_t *)malloc(sizeof(var_t))) == NULL) + return VAR_RC(VAR_ERR_OUT_OF_MEMORY); + memset(var, 0, sizeof(var_t)); + var_config(var, VAR_CONFIG_SYNTAX, &var_syntax_default); + *pvar = var; + return VAR_OK; +} + +/* destroy variable expansion context */ +var_rc_t +var_destroy( + var_t *var) +{ + if (var == NULL) + return VAR_RC(VAR_ERR_INVALID_ARGUMENT); + free(var); + return VAR_OK; +} + +/* configure variable expansion context */ +var_rc_t +var_config( + var_t *var, + var_config_t mode, + ...) +{ + va_list ap; + var_rc_t rc = VAR_OK; + + if (var == NULL) + return VAR_RC(VAR_ERR_INVALID_ARGUMENT); + va_start(ap, mode); + switch (mode) { + case VAR_CONFIG_SYNTAX: { + var_syntax_t *s; + s = (var_syntax_t *)va_arg(ap, void *); + if (s == NULL) + return VAR_RC(VAR_ERR_INVALID_ARGUMENT); + var->syntax.escape = s->escape; + var->syntax.delim_init = s->delim_init; + var->syntax.delim_open = s->delim_open; + var->syntax.delim_close = s->delim_close; + var->syntax.index_open = s->index_open; + var->syntax.index_close = s->index_close; + var->syntax.index_mark = s->index_mark; + var->syntax.name_chars = NULL; /* unused internally */ + if ((rc = expand_character_class(s->name_chars, var->syntax_nameclass)) != VAR_OK) + return VAR_RC(rc); + if ( var->syntax_nameclass[(int)var->syntax.delim_init] + || var->syntax_nameclass[(int)var->syntax.delim_open] + || var->syntax_nameclass[(int)var->syntax.delim_close] + || var->syntax_nameclass[(int)var->syntax.escape]) + return VAR_RC(VAR_ERR_INVALID_CONFIGURATION); + break; + } + case VAR_CONFIG_CB_VALUE: { + var_cb_value_t fct; + void *ctx; + fct = (var_cb_value_t)va_arg(ap, void *); + ctx = (void *)va_arg(ap, void *); + var->cb_value_fct = fct; + var->cb_value_ctx = ctx; + break; + } + case VAR_CONFIG_CB_OPERATION: { + var_cb_operation_t fct; + void *ctx; + fct = (var_cb_operation_t)va_arg(ap, void *); + ctx = (void *)va_arg(ap, void *); + var->cb_operation_fct = fct; + var->cb_operation_ctx = ctx; + break; + } + default: + return VAR_RC(VAR_ERR_INVALID_ARGUMENT); + } + va_end(ap); + return VAR_OK; +} + +/* perform unescape operation on a buffer */ +var_rc_t +var_unescape( + var_t *var, + const char *src, int srclen, + char *dst, int dstlen, + int all) +{ + const char *end; + var_rc_t rc; + + if (var == NULL || src == NULL || dst == NULL) + return VAR_RC(VAR_ERR_INVALID_ARGUMENT); + end = src + srclen; + while (src < end) { + if (*src == '\\') { + if (++src == end) + return VAR_RC(VAR_ERR_INCOMPLETE_NAMED_CHARACTER); + switch (*src) { + case '\\': + if (!all) { + *dst++ = '\\'; + } + *dst++ = '\\'; + break; + case 'n': + *dst++ = '\n'; + break; + case 't': + *dst++ = '\t'; + break; + case 'r': + *dst++ = '\r'; + break; + case 'x': + ++src; + if ((rc = expand_hex(&src, &dst, end)) != VAR_OK) + return VAR_RC(rc); + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + if ( end - src >= 3 + && isdigit((int)src[1]) + && isdigit((int)src[2])) { + if ((rc = expand_octal(&src, &dst, end)) != 0) + return VAR_RC(rc); + break; + } + default: + if (!all) { + *dst++ = '\\'; + } + *dst++ = *src; + } + ++src; + } else + *dst++ = *src++; + } + *dst = EOS; + return VAR_OK; +} + +/* perform expand operation on a buffer */ +var_rc_t +var_expand( + var_t *var, + const char *src_ptr, int src_len, + char **dst_ptr, int *dst_len, + int force_expand) +{ + var_parse_t ctx; + tokenbuf_t output; + var_rc_t rc; + + /* argument sanity checks */ + if (var == NULL || src_ptr == NULL || src_len == 0 || dst_ptr == NULL) + return VAR_RC(VAR_ERR_INVALID_ARGUMENT); + + /* prepare internal expansion context */ + ctx.lower = NULL; + ctx.force_expand = force_expand; + ctx.rel_lookup_flag = 0; + ctx.rel_lookup_cnt = 0; + ctx.index_this = 0; + + /* start the parsing */ + tokenbuf_init(&output); + rc = parse_input(var, &ctx, src_ptr, src_ptr+src_len, &output, 0); + + /* post-processing */ + if (rc >= 0) { + /* always EOS-terminate output for convinience reasons + but do not count the EOS-terminator in the length */ + if (!tokenbuf_append(&output, "\0", 1)) { + tokenbuf_free(&output); + return VAR_RC(VAR_ERR_OUT_OF_MEMORY); + } + output.end--; + + /* provide result */ + *dst_ptr = (char *)output.begin; + if (dst_len != NULL) + *dst_len = (output.end - output.begin); + rc = VAR_OK; + } + else { + /* provide result */ + if (dst_len != NULL) + *dst_len = (output.end - output.begin); + } + + return VAR_RC(rc); +} + +/* format and expand a string */ +var_rc_t +var_formatv( + var_t *var, + char **dst_ptr, int force_expand, + const char *fmt, va_list ap) +{ + var_rc_t rc; + char *cpBuf; + int nBuf = 5000; + + /* argument sanity checks */ + if (var == NULL || dst_ptr == NULL || fmt == NULL) + return VAR_RC(VAR_ERR_INVALID_ARGUMENT); + + /* perform formatting */ + if ((cpBuf = (char *)malloc(nBuf+1)) == NULL) + return VAR_RC(VAR_ERR_OUT_OF_MEMORY); + nBuf = var_mvsnprintf(cpBuf, nBuf+1, fmt, ap); + if (nBuf == -1) { + free(cpBuf); + return VAR_RC(VAR_ERR_FORMATTING_FAILURE); + } + + /* perform expansion */ + if ((rc = var_expand(var, cpBuf, nBuf, dst_ptr, NULL, force_expand)) != VAR_OK) { + free(cpBuf); + return VAR_RC(rc); + } + + /* cleanup */ + free(cpBuf); + + return VAR_OK; +} + +/* format and expand a string */ +var_rc_t +var_format( + var_t *var, + char **dst_ptr, int force_expand, + const char *fmt, ...) +{ + var_rc_t rc; + va_list ap; + + /* argument sanity checks */ + if (var == NULL || dst_ptr == NULL || fmt == NULL) + return VAR_RC(VAR_ERR_INVALID_ARGUMENT); + + va_start(ap, fmt); + rc = var_formatv(var, dst_ptr, force_expand, fmt, ap); + va_end(ap); + + return VAR_RC(rc); +} + +/* var_rc_t to string mapping table */ +static const char *var_errors[] = { + _("everything ok"), /* VAR_OK = 0 */ + _("incomplete named character"), /* VAR_ERR_INCOMPLETE_NAMED_CHARACTER */ + _("incomplete hexadecimal value"), /* VAR_ERR_INCOMPLETE_HEX */ + _("invalid hexadecimal value"), /* VAR_ERR_INVALID_HEX */ + _("octal value too large"), /* VAR_ERR_OCTAL_TOO_LARGE */ + _("invalid octal value"), /* VAR_ERR_INVALID_OCTAL */ + _("incomplete octal value"), /* VAR_ERR_INCOMPLETE_OCTAL */ + _("incomplete grouped hexadecimal value"), /* VAR_ERR_INCOMPLETE_GROUPED_HEX */ + _("incorrect character class specification"), /* VAR_ERR_INCORRECT_CLASS_SPEC */ + _("invalid expansion configuration"), /* VAR_ERR_INVALID_CONFIGURATION */ + _("out of memory"), /* VAR_ERR_OUT_OF_MEMORY */ + _("incomplete variable specification"), /* VAR_ERR_INCOMPLETE_VARIABLE_SPEC */ + _("undefined variable"), /* VAR_ERR_UNDEFINED_VARIABLE */ + _("input is neither text nor variable"), /* VAR_ERR_INPUT_ISNT_TEXT_NOR_VARIABLE */ + _("unknown command character in variable"), /* VAR_ERR_UNKNOWN_COMMAND_CHAR */ + _("malformatted search and replace operation"), /* VAR_ERR_MALFORMATTED_REPLACE */ + _("unknown flag in search and replace operation"), /* VAR_ERR_UNKNOWN_REPLACE_FLAG */ + _("invalid regex in search and replace operation"), /* VAR_ERR_INVALID_REGEX_IN_REPLACE */ + _("missing parameter in command"), /* VAR_ERR_MISSING_PARAMETER_IN_COMMAND */ + _("empty search string in search and replace operation"), /* VAR_ERR_EMPTY_SEARCH_STRING */ + _("start offset missing in cut operation"), /* VAR_ERR_MISSING_START_OFFSET */ + _("offsets in cut operation delimited by unknown character"), /* VAR_ERR_INVALID_OFFSET_DELIMITER */ + _("range out of bounds in cut operation"), /* VAR_ERR_RANGE_OUT_OF_BOUNDS */ + _("offset out of bounds in cut operation"), /* VAR_ERR_OFFSET_OUT_OF_BOUNDS */ + _("logic error in cut operation"), /* VAR_ERR_OFFSET_LOGIC */ + _("malformatted transpose operation"), /* VAR_ERR_MALFORMATTED_TRANSPOSE */ + _("source and target class mismatch in transpose operation"), /* VAR_ERR_TRANSPOSE_CLASSES_MISMATCH */ + _("empty character class in transpose operation"), /* VAR_ERR_EMPTY_TRANSPOSE_CLASS */ + _("incorrect character class in transpose operation"), /* VAR_ERR_INCORRECT_TRANSPOSE_CLASS_SPEC */ + _("malformatted padding operation"), /* VAR_ERR_MALFORMATTED_PADDING */ + _("width parameter missing in padding operation"), /* VAR_ERR_MISSING_PADDING_WIDTH */ + _("fill string missing in padding operation"), /* VAR_ERR_EMPTY_PADDING_FILL_STRING */ + _("unknown quoted pair in search and replace operation"), /* VAR_ERR_UNKNOWN_QUOTED_PAIR_IN_REPLACE */ + _("sub-matching reference out of range"), /* VAR_ERR_SUBMATCH_OUT_OF_RANGE */ + _("invalid argument"), /* VAR_ERR_INVALID_ARGUMENT */ + _("incomplete quoted pair"), /* VAR_ERR_INCOMPLETE_QUOTED_PAIR */ + _("lookup function does not support variable arrays"), /* VAR_ERR_ARRAY_LOOKUPS_ARE_UNSUPPORTED */ + _("index of array variable contains an invalid character"), /* VAR_ERR_INVALID_CHAR_IN_INDEX_SPEC */ + _("index of array variable is incomplete"), /* VAR_ERR_INCOMPLETE_INDEX_SPEC */ + _("bracket expression in array variable's index not closed"), /* VAR_ERR_UNCLOSED_BRACKET_IN_INDEX */ + _("division by zero error in index specification"), /* VAR_ERR_DIVISION_BY_ZERO_IN_INDEX */ + _("unterminated loop construct"), /* VAR_ERR_UNTERMINATED_LOOP_CONSTRUCT */ + _("invalid character in loop limits"), /* VAR_ERR_INVALID_CHAR_IN_LOOP_LIMITS */ + _("malformed operation argument list"), /* VAR_ERR_MALFORMED_OPERATION_ARGUMENTS */ + _("undefined operation"), /* VAR_ERR_UNDEFINED_OPERATION */ + _("formatting failure") /* VAR_ERR_FORMATTING_FAILURE */ +}; + +/* translate a return code into its corresponding descriptive text */ +const char *var_strerror(var_t *var, var_rc_t rc) +{ + const char *str; + rc = (var_rc_t)(0 - rc); + if (rc < 0 || rc >= (int)sizeof(var_errors) / (int)sizeof(char *)) { + str = _("unknown error"); + } else { + str = (char *)var_errors[rc]; + } + return str; +} diff --git a/src/lib/var.h b/src/lib/var.h new file mode 100644 index 00000000..212e928d --- /dev/null +++ b/src/lib/var.h @@ -0,0 +1,123 @@ +/* +** OSSP var - Variable Expansion +** Copyright (c) 2001-2002 Ralf S. Engelschall +** Copyright (c) 2001-2002 The OSSP Project (http://www.ossp.org/) +** Copyright (c) 2001-2002 Cable & Wireless Deutschland (http://www.cw.com/de/) +** +** This file is part of OSSP var, a variable expansion +** library which can be found at http://www.ossp.org/pkg/lib/var/. +** +** Permission to use, copy, modify, and distribute this software for +** any purpose with or without fee is hereby granted, provided that +** the above copyright notice and this permission notice appear in all +** copies. +** +** For disclaimer see below. +*/ +/* + * Modified for use with Bacula by Kern Sibbald, June 2003, Jan 2019 + */ + + + +#ifndef __VAR_H__ +#define __VAR_H__ + +/* Error codes */ +typedef enum { + VAR_ERR_CALLBACK = -64, + VAR_ERR_FORMATTING_FAILURE = -45, + VAR_ERR_UNDEFINED_OPERATION = -44, + VAR_ERR_MALFORMED_OPERATION_ARGUMENTS = -43, + VAR_ERR_INVALID_CHAR_IN_LOOP_LIMITS = -42, + VAR_ERR_UNTERMINATED_LOOP_CONSTRUCT = -41, + VAR_ERR_DIVISION_BY_ZERO_IN_INDEX = -40, + VAR_ERR_UNCLOSED_BRACKET_IN_INDEX = -39, + VAR_ERR_INCOMPLETE_INDEX_SPEC = -37, + VAR_ERR_INVALID_CHAR_IN_INDEX_SPEC = -36, + VAR_ERR_ARRAY_LOOKUPS_ARE_UNSUPPORTED = -35, + VAR_ERR_INCOMPLETE_QUOTED_PAIR = -34, + VAR_ERR_INVALID_ARGUMENT = -34, + VAR_ERR_SUBMATCH_OUT_OF_RANGE = -33, + VAR_ERR_UNKNOWN_QUOTED_PAIR_IN_REPLACE = -32, + VAR_ERR_EMPTY_PADDING_FILL_STRING = -31, + VAR_ERR_MISSING_PADDING_WIDTH = -30, + VAR_ERR_MALFORMATTED_PADDING = -29, + VAR_ERR_INCORRECT_TRANSPOSE_CLASS_SPEC = -28, + VAR_ERR_EMPTY_TRANSPOSE_CLASS = -27, + VAR_ERR_TRANSPOSE_CLASSES_MISMATCH = -26, + VAR_ERR_MALFORMATTED_TRANSPOSE = -25, + VAR_ERR_OFFSET_LOGIC = -24, + VAR_ERR_OFFSET_OUT_OF_BOUNDS = -23, + VAR_ERR_RANGE_OUT_OF_BOUNDS = -22, + VAR_ERR_INVALID_OFFSET_DELIMITER = -21, + VAR_ERR_MISSING_START_OFFSET = -20, + VAR_ERR_EMPTY_SEARCH_STRING = -19, + VAR_ERR_MISSING_PARAMETER_IN_COMMAND = -18, + VAR_ERR_INVALID_REGEX_IN_REPLACE = -17, + VAR_ERR_UNKNOWN_REPLACE_FLAG = -16, + VAR_ERR_MALFORMATTED_REPLACE = -15, + VAR_ERR_UNKNOWN_COMMAND_CHAR = -14, + VAR_ERR_INPUT_ISNT_TEXT_NOR_VARIABLE = -13, + VAR_ERR_UNDEFINED_VARIABLE = -12, + VAR_ERR_INCOMPLETE_VARIABLE_SPEC = -11, + VAR_ERR_OUT_OF_MEMORY = -10, + VAR_ERR_INVALID_CONFIGURATION = -9, + VAR_ERR_INCORRECT_CLASS_SPEC = -8, + VAR_ERR_INCOMPLETE_GROUPED_HEX = -7, + VAR_ERR_INCOMPLETE_OCTAL = -6, + VAR_ERR_INVALID_OCTAL = -5, + VAR_ERR_OCTAL_TOO_LARGE = -4, + VAR_ERR_INVALID_HEX = -3, + VAR_ERR_INCOMPLETE_HEX = -2, + VAR_ERR_INCOMPLETE_NAMED_CHARACTER = -1, + VAR_OK = 0 +} var_rc_t; + +struct var_st; +typedef struct var_st var_t; + +enum _var_config_t { + VAR_CONFIG_SYNTAX, + VAR_CONFIG_CB_VALUE, + VAR_CONFIG_CB_OPERATION +}; +/* Force var_config_t to int to avoid compiler default conversion warnings */ +typedef int var_config_t; + +typedef struct { + char escape; /* default: '\' */ + char delim_init; /* default: '$' */ + char delim_open; /* default: '{' */ + char delim_close; /* default: '}' */ + char index_open; /* default: '[' */ + char index_close; /* default: ']' */ + char index_mark; /* default: '#' */ + const char *name_chars; /* default: "a-zA-Z0-9_" */ +} var_syntax_t; + +typedef var_rc_t (*var_cb_value_t)( + var_t *var, void *ctx, + const char *var_ptr, int var_len, int var_inc, int var_idx, + const char **val_ptr, int *val_len, int *val_size +); + +typedef var_rc_t (*var_cb_operation_t)( + var_t *var, void *ctx, + const char *op_ptr, int op_len, + const char *arg_ptr, int arg_len, + const char *val_ptr, int val_len, + const char **out_ptr, int *out_len, int *out_size +); + + +var_rc_t var_create (var_t **var); +var_rc_t var_destroy (var_t *var); +var_rc_t var_config (var_t *var, var_config_t mode, ...); +var_rc_t var_unescape (var_t *var, const char *src_ptr, int src_len, char *dst_ptr, int dst_len, int all); +var_rc_t var_expand (var_t *var, const char *src_ptr, int src_len, char **dst_ptr, int *dst_len, int force_expand); +var_rc_t var_formatv (var_t *var, char **dst_ptr, int force_expand, const char *fmt, va_list ap); +var_rc_t var_format (var_t *var, char **dst_ptr, int force_expand, const char *fmt, ...); +const char *var_strerror (var_t *var, var_rc_t rc); + +#endif /* __VAR_H__ */ diff --git a/src/lib/waitq.h b/src/lib/waitq.h new file mode 100644 index 00000000..7d62ecc7 --- /dev/null +++ b/src/lib/waitq.h @@ -0,0 +1,58 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula wait queue routines. Permits waiting for something + * to be done. I.e. for operator to mount new volume. + * + * Kern Sibbald, March MMI + * + * This code inspired from "Programming with POSIX Threads", by + * David R. Butenhof + * + */ + +#ifndef __WAITQ_H +#define __WAITQ_H 1 + +/* + * Structure to keep track of wait queue request + */ +typedef struct waitq_ele_tag { + struct waitq_ele_tag *next; + int done_flag; /* predicate for wait */ + pthread_cont_t done; /* wait for completion */ + void *msg; /* message to be passed */ +} waitq_ele_t; + +/* + * Structure describing a wait queue + */ +typedef struct workq_tag { + pthread_mutex_t mutex; /* queue access control */ + pthread_cond_t wait_req; /* wait for OK */ + int num_msgs; /* number of waiters */ + waitq_ele_t *first; /* wait queue first item */ + waitq_ele_t *last; /* wait queue last item */ +} workq_t; + +extern int waitq_init(waitq_t *wq); +extern int waitq_destroy(waitq_t *wq); +extern int waitq_add(waitq_t *wq, void *msg); + +#endif /* __WAITQ_H */ diff --git a/src/lib/watchdog.c b/src/lib/watchdog.c new file mode 100644 index 00000000..4b7afeb3 --- /dev/null +++ b/src/lib/watchdog.c @@ -0,0 +1,335 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula thread watchdog routine. General routine that + * allows setting a watchdog timer with a callback that is + * called when the timer goes off. + * + * Kern Sibbald, January MMII + * + */ + +#include "bacula.h" +#include "jcr.h" + +/* Exported globals */ +utime_t watchdog_time = 0; /* this has granularity of SLEEP_TIME */ +utime_t watchdog_sleep_time = 60; /* examine things every 60 seconds */ + +/* Locals */ +static pthread_mutex_t timer_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t timer = PTHREAD_COND_INITIALIZER; + +/* Forward referenced functions */ +extern "C" void *watchdog_thread(void *arg); + +static void wd_lock(); +static void wd_unlock(); + +/* Static globals */ +static bool quit = false; +static bool wd_is_init = false; +static brwlock_t lock; /* watchdog lock */ + +static pthread_t wd_tid; +static dlist *wd_queue; +static dlist *wd_inactive; + +/* + * Returns: 0 if the current thread is NOT the watchdog + * 1 if the current thread is the watchdog + */ +bool is_watchdog() +{ + if (wd_is_init && pthread_equal(pthread_self(), wd_tid)) { + return true; + } else { + return false; + } +} + +/* + * Start watchdog thread + * + * Returns: 0 on success + * errno on failure + */ +int start_watchdog(void) +{ + int stat; + watchdog_t *dummy = NULL; + int errstat; + + if (wd_is_init) { + return 0; + } + Dmsg0(800, "Initialising NicB-hacked watchdog thread\n"); + watchdog_time = time(NULL); + + if ((errstat=rwl_init(&lock)) != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("Unable to initialize watchdog lock. ERR=%s\n"), + be.bstrerror(errstat)); + } + wd_queue = New(dlist(dummy, &dummy->link)); + wd_inactive = New(dlist(dummy, &dummy->link)); + wd_is_init = true; + + if ((stat = pthread_create(&wd_tid, NULL, watchdog_thread, NULL)) != 0) { + return stat; + } + return 0; +} + +/* + * Wake watchdog timer thread so that it walks the + * queue and adjusts its wait time (or exits). + */ +static void ping_watchdog() +{ + P(timer_mutex); + pthread_cond_signal(&timer); + V(timer_mutex); + bmicrosleep(0, 100); +} + +/* + * Terminate the watchdog thread + * + * Returns: 0 on success + * errno on failure + */ +int stop_watchdog(void) +{ + int stat; + watchdog_t *p; + + if (!wd_is_init) { + return 0; + } + + quit = true; /* notify watchdog thread to stop */ + ping_watchdog(); + + stat = pthread_join(wd_tid, NULL); + + while (!wd_queue->empty()) { + void *item = wd_queue->first(); + wd_queue->remove(item); + p = (watchdog_t *)item; + if (p->destructor != NULL) { + p->destructor(p); + } + free(p); + } + delete wd_queue; + wd_queue = NULL; + + while (!wd_inactive->empty()) { + void *item = wd_inactive->first(); + wd_inactive->remove(item); + p = (watchdog_t *)item; + if (p->destructor != NULL) { + p->destructor(p); + } + free(p); + } + delete wd_inactive; + wd_inactive = NULL; + rwl_destroy(&lock); + wd_is_init = false; + + return stat; +} + +watchdog_t *new_watchdog(void) +{ + watchdog_t *wd = (watchdog_t *)malloc(sizeof(watchdog_t)); + + if (!wd_is_init) { + start_watchdog(); + } + + if (wd == NULL) { + return NULL; + } + wd->one_shot = true; + wd->interval = 0; + wd->callback = NULL; + wd->destructor = NULL; + wd->data = NULL; + + return wd; +} + +bool register_watchdog(watchdog_t *wd) +{ + if (!wd_is_init) { + Jmsg0(NULL, M_ABORT, 0, _("BUG! register_watchdog called before start_watchdog\n")); + } + if (wd->callback == NULL) { + Jmsg1(NULL, M_ABORT, 0, _("BUG! Watchdog %p has NULL callback\n"), wd); + } + if (wd->interval == 0) { + Jmsg1(NULL, M_ABORT, 0, _("BUG! Watchdog %p has zero interval\n"), wd); + } + + wd_lock(); + wd->next_fire = watchdog_time + wd->interval; + wd_queue->append(wd); + Dmsg3(800, "Registered watchdog %p, interval %d%s\n", + wd, wd->interval, wd->one_shot ? " one shot" : ""); + wd_unlock(); + ping_watchdog(); + + return false; +} + +bool unregister_watchdog(watchdog_t *wd) +{ + watchdog_t *p; + bool ok = false; + + if (!wd_is_init) { + Jmsg0(NULL, M_ABORT, 0, _("BUG! unregister_watchdog_unlocked called before start_watchdog\n")); + } + + wd_lock(); + foreach_dlist(p, wd_queue) { + if (wd == p) { + wd_queue->remove(wd); + Dmsg1(800, "Unregistered watchdog %p\n", wd); + ok = true; + goto get_out; + } + } + + foreach_dlist(p, wd_inactive) { + if (wd == p) { + wd_inactive->remove(wd); + Dmsg1(800, "Unregistered inactive watchdog %p\n", wd); + ok = true; + goto get_out; + } + } + + Dmsg1(800, "Failed to unregister watchdog %p\n", wd); + +get_out: + wd_unlock(); + ping_watchdog(); + return ok; +} + +/* + * This is the thread that walks the watchdog queue + * and when a queue item fires, the callback is + * invoked. If it is a one shot, the queue item + * is moved to the inactive queue. + */ +extern "C" void *watchdog_thread(void *arg) +{ + struct timespec timeout; + struct timeval tv; + struct timezone tz; + utime_t next_time; + + set_jcr_in_tsd(INVALID_JCR); + Dmsg0(800, "NicB-reworked watchdog thread entered\n"); + + while (!quit) { + watchdog_t *p; + + wd_lock(); + +walk_list: + watchdog_time = time(NULL); + next_time = watchdog_time + watchdog_sleep_time; + foreach_dlist(p, wd_queue) { + if (p->next_fire <= watchdog_time) { + /* Run the callback */ + Dmsg2(3400, "Watchdog callback p=0x%p fire=%d\n", p, p->next_fire); + p->callback(p); + + /* Reschedule (or move to inactive list if it's a one-shot timer) */ + if (p->one_shot) { + wd_queue->remove(p); + wd_inactive->append(p); + goto walk_list; + } else { + p->next_fire = watchdog_time + p->interval; + } + } + if (p->next_fire <= next_time) { + next_time = p->next_fire; + } + } + wd_unlock(); + + /* + * Wait sleep time or until someone wakes us + */ + gettimeofday(&tv, &tz); + timeout.tv_nsec = tv.tv_usec * 1000; + timeout.tv_sec = tv.tv_sec + next_time - time(NULL); + while (timeout.tv_nsec >= 1000000000) { + timeout.tv_nsec -= 1000000000; + timeout.tv_sec++; + } + + Dmsg1(1900, "pthread_cond_timedwait %d\n", timeout.tv_sec - tv.tv_sec); + /* Note, this unlocks mutex during the sleep */ + P(timer_mutex); + pthread_cond_timedwait(&timer, &timer_mutex, &timeout); + V(timer_mutex); + } + + Dmsg0(800, "NicB-reworked watchdog thread exited\n"); + return NULL; +} + +/* + * Watchdog lock, this can be called multiple times by the same + * thread without blocking, but must be unlocked the number of + * times it was locked. + */ +static void wd_lock() +{ + int errstat; + if ((errstat=rwl_writelock(&lock)) != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("rwl_writelock failure. ERR=%s\n"), + be.bstrerror(errstat)); + } +} + +/* + * Unlock the watchdog. This can be called multiple times by the + * same thread up to the number of times that thread called + * wd_ lock()/ + */ +static void wd_unlock() +{ + int errstat; + if ((errstat=rwl_writeunlock(&lock)) != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("rwl_writeunlock failure. ERR=%s\n"), + be.bstrerror(errstat)); + } +} diff --git a/src/lib/watchdog.h b/src/lib/watchdog.h new file mode 100644 index 00000000..34a69408 --- /dev/null +++ b/src/lib/watchdog.h @@ -0,0 +1,48 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Watchdog timer routines + * + * Kern Sibbald, December MMII + * +*/ + +enum { + TYPE_CHILD = 1, + TYPE_PTHREAD, + TYPE_BSOCK +}; + +#define TIMEOUT_SIGNAL SIGUSR2 + +struct s_watchdog_t { + bool one_shot; + utime_t interval; + void (*callback)(struct s_watchdog_t *wd); + void (*destructor)(struct s_watchdog_t *wd); + void *data; + /* Private data below - don't touch outside of watchdog.c */ + dlink link; + utime_t next_fire; +}; +typedef struct s_watchdog_t watchdog_t; + +/* Exported globals */ +extern utime_t DLL_IMP_EXP watchdog_time; /* this has granularity of SLEEP_TIME */ +extern utime_t DLL_IMP_EXP watchdog_sleep_time; /* examine things every 60 seconds */ diff --git a/src/lib/worker.c b/src/lib/worker.c new file mode 100644 index 00000000..35397fcd --- /dev/null +++ b/src/lib/worker.c @@ -0,0 +1,437 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula worker class. It permits creating a worker thread, + * then sending data via a fifo queue to it. + * + * Kern Sibbald, August 2014 + * + */ + +#define LOCKMGR_COMPLIANT +#include "bacula.h" +#include "worker.h" + +int worker::init(int fifo_size) +{ + int stat; + + if ((stat = pthread_mutex_init(&mutex, NULL)) != 0) { + return stat; + } + if ((stat = pthread_mutex_init(&fmutex, NULL)) != 0) { + pthread_mutex_destroy(&mutex); + return stat; + } + if ((stat = pthread_cond_init(&full_wait, NULL)) != 0) { + pthread_mutex_destroy(&mutex); + pthread_mutex_destroy(&fmutex); + return stat; + } + if ((stat = pthread_cond_init(&empty_wait, NULL)) != 0) { + pthread_cond_destroy(&full_wait); + pthread_mutex_destroy(&mutex); + pthread_mutex_destroy(&fmutex); + return stat; + } + if ((stat = pthread_cond_init(&m_wait, NULL)) != 0) { + pthread_cond_destroy(&empty_wait); + pthread_cond_destroy(&full_wait); + pthread_mutex_destroy(&mutex); + pthread_mutex_destroy(&fmutex); + return stat; + } + valid = WORKER_VALID; + fifo = New(flist(fifo_size)); + fpool = New(alist(fifo_size + 2, false)); + worker_running = false; + set_wait_state(); + return 0; +} + +/* + * Handle cleanup when the lock is released. + */ +static void worker_cleanup(void *arg) +{ + worker *wrk = (worker *)arg; + wrk->release_lock(); +} + + +void worker::release_lock() +{ + pthread_mutex_unlock(&mutex); +} + + +void worker::set_wait_state() +{ + m_state = WORKER_WAIT; +} + +void worker::set_run_state() +{ + if (is_quit_state()) return; + m_state = WORKER_RUN; + if (worker_waiting) { + pthread_cond_signal(&m_wait); + } +} + +void worker::set_quit_state() +{ + P(mutex); + m_state = WORKER_QUIT; + pthread_cond_signal(&m_wait); + pthread_cond_signal(&empty_wait); + V(mutex); +} + + +/* Empty the fifo putting in free pool */ +void worker::discard_queue() +{ + void *item; + + P(mutex); + P(fmutex); + while ((item = fifo->dequeue())) { + fpool->push(item); + } + V(fmutex); + V(mutex); +} + +/* + * Destroy a read/write lock + * + * Returns: 0 on success + * errno on failure + */ +int worker::destroy() +{ + int stat, stat1, stat2, stat3, stat4; + POOLMEM *item; + + m_state = WORKER_QUIT; + pthread_cond_signal(&m_wait); + pthread_cond_signal(&empty_wait); + + P(fmutex); + /* Release free pool */ + while ((item = (POOLMEM *)fpool->pop())) { + free_pool_memory(item); + } + V(fmutex); + fpool->destroy(); + free(fpool); + + /* Release work queue */ + while ((item = (POOLMEM *)fifo->dequeue())) { + free_pool_memory(item); + } + valid = 0; + worker_running = false; + + fifo->destroy(); + free(fifo); + + stat = pthread_mutex_destroy(&mutex); + stat1 = pthread_mutex_destroy(&fmutex); + stat2 = pthread_cond_destroy(&full_wait); + stat3 = pthread_cond_destroy(&empty_wait); + stat4 = pthread_cond_destroy(&m_wait); + if (stat != 0) return stat; + if (stat1 != 0) return stat1; + if (stat2 != 0) return stat2; + if (stat3 != 0) return stat3; + if (stat4 != 0) return stat4; + return 0; +} + + +/* Start the worker thread */ +int worker::start(void *(*auser_sub)(void *), void *auser_ctx) +{ + int stat; + int i; + if (valid != WORKER_VALID) { + return EINVAL; + } + user_sub = auser_sub; + user_ctx = auser_ctx; + if ((stat = pthread_create(&worker_id, NULL, user_sub, this) != 0)) { + return stat; + } + /* Wait for thread to start, but not too long */ + for (i=0; i<100 && !is_running(); i++) { + bmicrosleep(0, 5000); + } + set_run_state(); + return 0; +} + +/* Wait for the worker thread to empty the queue */ +void worker::wait_queue_empty() +{ + if (is_quit_state()) { + return; + } + P(mutex); + while (!empty() && !is_quit_state()) { + pthread_cond_wait(&empty_wait, &mutex); + } + V(mutex); + return; +} + +/* Wait for the main thread to release us */ +void worker::wait() +{ + P(mutex); + pthread_cleanup_push(worker_cleanup, (void *)this); + while (is_wait_state() && !is_quit_state()) { + worker_waiting = true; + pthread_cond_signal(&m_wait); + pthread_cond_wait(&m_wait, &mutex); + } + pthread_cleanup_pop(0); + worker_waiting = false; + V(mutex); +} + +/* Stop the worker thread */ +int worker::stop() +{ + if (valid != WORKER_VALID) { + return EINVAL; + } + m_state = WORKER_QUIT; + pthread_cond_signal(&m_wait); + pthread_cond_signal(&empty_wait); + + if (!pthread_equal(worker_id, pthread_self())) { + pthread_cancel(worker_id); + pthread_join(worker_id, NULL); + } + return 0; +} + + +/* + * Queue an item for the worker thread. Called by main thread. + */ +bool worker::queue(void *item) +{ + bool was_empty = false;; + + if (valid != WORKER_VALID || is_quit_state()) { + return EINVAL; + } + P(mutex); + done = false; + //pthread_cleanup_push(worker_cleanup, (void *)this); + while (full() && !is_quit_state()) { + pthread_cond_wait(&full_wait, &mutex); + } + //pthread_cleanup_pop(0); + /* Maybe this should be worker_running */ + was_empty = empty(); + if (!fifo->queue(item)) { + /* Since we waited for !full this cannot happen */ + V(mutex); + ASSERT2(1, "Fifo queue failed.\n"); + } + if (was_empty) { + pthread_cond_signal(&empty_wait); + } + m_state = WORKER_RUN; + if (worker_waiting) { + pthread_cond_signal(&m_wait); + } + V(mutex); + return 1; +} + +/* + * Wait for work to complete + */ +void worker::finish_work() +{ + P(mutex); + while (!empty() && !is_quit_state()) { + pthread_cond_wait(&empty_wait, &mutex); + } + done = true; /* Tell worker that work is done */ + m_state = WORKER_WAIT; /* force worker into wait state */ + V(mutex); /* pause for state transition */ + if (waiting_on_empty) pthread_cond_signal(&empty_wait); + P(mutex); + /* Wait until worker in wait state */ + while (!worker_waiting && !is_quit_state()) { + if (waiting_on_empty) pthread_cond_signal(&empty_wait); + pthread_cond_wait(&m_wait, &mutex); + } + V(mutex); + discard_queue(); +} + +/* + * Dequeue a work item. Called by worker thread. + */ +void *worker::dequeue() +{ + bool was_full = false;; + void *item = NULL; + + if (valid != WORKER_VALID || done || is_quit_state()) { + return NULL; + } + P(mutex); + //pthread_cleanup_push(worker_cleanup, (void *)this); + while (empty() && !done && !is_quit_state()) { + waiting_on_empty = true; + pthread_cond_wait(&empty_wait, &mutex); + } + waiting_on_empty = false; + //pthread_cleanup_pop(0); + was_full = full(); + item = fifo->dequeue(); + if (was_full) { + pthread_cond_signal(&full_wait); + } + if (empty()) { + pthread_cond_signal(&empty_wait); + } + V(mutex); + return item; +} + +/* + * Pop a free buffer from the list, if one exists. + * Called by main thread to get a free buffer. + * If none exists (NULL returned), it must allocate + * one. + */ +void *worker::pop_free_buffer() +{ + void *free_buf; + + P(fmutex); + free_buf = fpool->pop(); + V(fmutex); + return free_buf; +} + +/* + * Once a work item (buffer) has been processed by the + * worker thread, it will put it on the free buffer list + * (fpool). + */ +void worker::push_free_buffer(void *buf) +{ + P(fmutex); + fpool->push(buf); + V(fmutex); +} + + +//================================================= + +#ifdef TEST_PROGRAM + +void *worker_prog(void *wctx) +{ + POOLMEM *buf; + worker *wrk = (worker *)wctx; + + wrk->set_running(); + + while (!wrk->is_quit_state()) { + if (wrk->is_wait_state()) { + wrk->wait(); + continue; + } + buf = (POOLMEM *)wrk->dequeue(); + if (!buf) { + printf("worker: got null stop\n"); + return NULL; + } + printf("ctx=%lld worker: %s\n", (long long int)wrk->get_ctx(), buf); + wrk->push_free_buffer(buf); + } + printf("worker: asked to stop"); + return NULL; +} + +int main(int argc, char *argv[]) +{ + POOLMEM *buf; + int i; + worker *wrk; + void *ctx; + + wrk = New(worker(10)); + ctx = (void *)1; + wrk->start(worker_prog, ctx); + + for (i=1; i<=40; i++) { + buf = (POOLMEM *)wrk->pop_free_buffer(); + if (!buf) { + buf = get_pool_memory(PM_BSOCK); + printf("Alloc %p\n", buf); + } + sprintf(buf, "This is item %d", i); + wrk->queue(buf); + //printf("back from queue %d\n", i); + } + wrk->wait_queue_empty(); + wrk->set_wait_state(); + printf("======\n"); + for (i=1; i<=5; i++) { + buf = (POOLMEM *)wrk->pop_free_buffer(); + if (!buf) { + buf = get_pool_memory(PM_BSOCK); + printf("Alloc %p\n", buf); + } + sprintf(buf, "This is item %d", i); + wrk->queue(buf); + //printf("back from queue %d\n", i); + } + wrk->set_run_state(); + for (i=6; i<=40; i++) { + buf = (POOLMEM *)wrk->pop_free_buffer(); + if (!buf) { + buf = get_pool_memory(PM_BSOCK); + printf("Alloc %p\n", buf); + } + sprintf(buf, "This is item %d", i); + wrk->queue(buf); + //printf("back from queue %d\n", i); + } + wrk->wait_queue_empty(); + wrk->stop(); + wrk->destroy(); + free(wrk); + + close_memory_pool(); + sm_dump(false); /* test program */ +} +#endif diff --git a/src/lib/worker.h b/src/lib/worker.h new file mode 100644 index 00000000..cea16a4b --- /dev/null +++ b/src/lib/worker.h @@ -0,0 +1,103 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula worker thread class + * + * Kern Sibbald, August 2014 + * + */ + +#ifndef __WORKER_H +#define __WORKER_H 1 + +enum WORKER_STATE { + WORKER_WAIT, + WORKER_RUN, + WORKER_QUIT +}; + +class worker : public SMARTALLOC { +private: + pthread_mutex_t mutex; /* main fifo mutex */ + pthread_mutex_t fmutex; /* free pool mutex */ + pthread_cond_t full_wait; /* wait because full */ + pthread_cond_t empty_wait; /* wait because empty */ + pthread_cond_t m_wait; /* wait state */ + pthread_t worker_id; /* worker's thread id */ + void *(*user_sub)(void *); /* user subroutine */ + void *user_ctx; /* user context */ + flist *fifo; /* work fifo */ + alist *fpool; /* free pool */ + int valid; /* set when valid */ + WORKER_STATE m_state; /* worker state */ + bool worker_running; /* set when worker running */ + bool worker_waiting; /* set when worker is waiting */ + bool done; /* work done */ + bool waiting_on_empty; /* hung waiting on empty queue */ + + +public: + worker(int fifo_size = 10); + ~worker(); + int init(int fifo_size = 10); + int destroy(); + + bool queue(void *item); + void *dequeue(); + int start(void *(*sub)(void *), void *wctx); /* Start worker */ + void wait_queue_empty(); /* Main thread wait for fifo to be emptied */ + void discard_queue(); /* Discard the fifo queue */ + int stop(); /* Stop worker */ + void wait(); /* Wait for main thread to release us */ + void set_wait_state(); + void set_run_state(); + void set_quit_state(); + void finish_work(); + + void *pop_free_buffer(); + void push_free_buffer(void *buf); + + inline void set_running() { worker_running = true; }; + inline bool is_running() const { return worker_running; }; + inline void *get_ctx() const { return user_ctx; }; + + void release_lock(); /* Cleanup release lock */ + + inline bool empty() const { return fifo->empty(); }; + inline bool full() const { return fifo->full(); }; + inline int size() const { return fifo->size(); }; + inline bool is_quit_state() const { return m_state == WORKER_QUIT; }; + inline bool is_wait_state() const { return m_state == WORKER_WAIT; }; +}; + + +inline worker::worker(int fifo_size) +{ + init(fifo_size); +} + +inline worker::~worker() +{ + destroy(); +} + + +#define WORKER_VALID 0xfadbec + +#endif /* __WORKER_H */ diff --git a/src/lib/workq.c b/src/lib/workq.c new file mode 100644 index 00000000..b41bd4a2 --- /dev/null +++ b/src/lib/workq.c @@ -0,0 +1,522 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula work queue routines. Permits passing work to + * multiple threads. + * + * Kern Sibbald, January MMI + * + * This code adapted from "Programming with POSIX Threads", by + * David R. Butenhof + * + * Example: + * + * static workq_t job_wq; define work queue + * + * Initialize queue + * if ((stat = workq_init(&job_wq, max_workers, job_thread)) != 0) { + * berrno be; + * Emsg1(M_ABORT, 0, "Could not init job work queue: ERR=%s\n", be.bstrerror(errno)); + * } + * + * Add an item to the queue + * if ((stat = workq_add(&job_wq, (void *)jcr)) != 0) { + * berrno be; + * Emsg1(M_ABORT, 0, "Could not add job to work queue: ERR=%s\n", be.bstrerror(errno)); + * } + * + * Wait for all queued work to be completed + * if ((stat = workq_wait_idle(&job_wq, (void *)jcr)) != 0) { + * berrno be; + * Emsg1(M_ABORT, 0, "Could not wait for idle: ERR=%s\n", be.bstrerror(errno)); + * } + * + * Terminate the queue + * workq_destroy(workq_t *wq); + * + */ + +#include "bacula.h" +#include "jcr.h" + +/* Forward referenced functions */ +extern "C" void *workq_server(void *arg); + +/* + * Initialize a work queue + * + * Returns: 0 on success + * errno on failure + */ +int workq_init(workq_t *wq, int threads, void *(*engine)(void *arg)) +{ + int stat; + + if ((stat = pthread_attr_init(&wq->attr)) != 0) { + return stat; + } + if ((stat = pthread_attr_setdetachstate(&wq->attr, PTHREAD_CREATE_DETACHED)) != 0) { + pthread_attr_destroy(&wq->attr); + return stat; + } + if ((stat = pthread_mutex_init(&wq->mutex, NULL)) != 0) { + pthread_attr_destroy(&wq->attr); + return stat; + } + if ((stat = pthread_cond_init(&wq->work, NULL)) != 0) { + pthread_mutex_destroy(&wq->mutex); + pthread_attr_destroy(&wq->attr); + return stat; + } + if ((stat = pthread_cond_init(&wq->idle, NULL)) != 0) { + pthread_mutex_destroy(&wq->mutex); + pthread_attr_destroy(&wq->attr); + pthread_cond_destroy(&wq->work); + return stat; + } + wq->quit = 0; + wq->first = wq->last = NULL; + wq->max_workers = threads; /* max threads to create */ + wq->num_workers = 0; /* no threads yet */ + wq->num_running = 0; /* no running threads */ + wq->idle_workers = 0; /* no idle threads */ + wq->engine = engine; /* routine to run */ + wq->valid = WORKQ_VALID; + return 0; +} + +/* + * Destroy a work queue + * + * Returns: 0 on success + * errno on failure + */ +int workq_destroy(workq_t *wq) +{ + int stat, stat1, stat2, stat3; + + if (wq->valid != WORKQ_VALID) { + return EINVAL; + } + P(wq->mutex); + wq->valid = 0; /* prevent any more operations */ + + /* + * If any threads are active, wake them + */ + if (wq->num_workers > 0) { + wq->quit = 1; + if (wq->idle_workers) { + if ((stat = pthread_cond_broadcast(&wq->work)) != 0) { + V(wq->mutex); + return stat; + } + } + while (wq->num_workers > 0) { + if ((stat = pthread_cond_wait(&wq->work, &wq->mutex)) != 0) { + V(wq->mutex); + return stat; + } + } + } + V(wq->mutex); + stat = pthread_mutex_destroy(&wq->mutex); + stat1 = pthread_cond_destroy(&wq->work); + stat2 = pthread_attr_destroy(&wq->attr); + stat3 = pthread_cond_destroy(&wq->idle); + if (stat != 0) return stat; + if (stat1 != 0) return stat1; + if (stat2 != 0) return stat2; + if (stat3 != 0) return stat3; + return 0; +} + +/* + * Wait for work to terminate + * + * Returns: 0 on success + * errno on failure + */ +int workq_wait_idle(workq_t *wq) +{ + int stat; + + if (wq->valid != WORKQ_VALID) { + return EINVAL; + } + P(wq->mutex); + + /* While there is work, wait */ + while (wq->num_running || wq->first != NULL) { + if ((stat = pthread_cond_wait(&wq->idle, &wq->mutex)) != 0) { + V(wq->mutex); + return stat; + } + } + V(wq->mutex); + return 0; +} + + + +/* + * Add work to a queue + * wq is a queue that was created with workq_init + * element is a user unique item that will be passed to the + * processing routine + * work_item will get internal work queue item -- if it is not NULL + * priority if non-zero will cause the item to be placed on the + * head of the list instead of the tail. + */ +int workq_add(workq_t *wq, void *element, workq_ele_t **work_item, int priority) +{ + int stat=0; + workq_ele_t *item; + pthread_t id; + + Dmsg0(1400, "workq_add\n"); + if (wq->valid != WORKQ_VALID) { + return EINVAL; + } + + if ((item = (workq_ele_t *)malloc(sizeof(workq_ele_t))) == NULL) { + return ENOMEM; + } + item->data = element; + item->next = NULL; + P(wq->mutex); + + Dmsg0(1400, "add item to queue\n"); + if (priority) { + /* Add to head of queue */ + if (wq->first == NULL) { + wq->first = item; + wq->last = item; + } else { + item->next = wq->first; + wq->first = item; + } + } else { + /* Add to end of queue */ + if (wq->first == NULL) { + wq->first = item; + } else { + wq->last->next = item; + } + wq->last = item; + } + + /* if any threads are idle, wake one */ + if (wq->idle_workers > 0) { + Dmsg0(1400, "Signal worker\n"); + if ((stat = pthread_cond_broadcast(&wq->work)) != 0) { + V(wq->mutex); + return stat; + } + } else if (wq->num_workers < wq->max_workers) { + Dmsg0(1400, "Create worker thread\n"); + /* No idle threads so create a new one */ + set_thread_concurrency(wq->max_workers + 1); + if ((stat = pthread_create(&id, &wq->attr, workq_server, (void *)wq)) != 0) { + V(wq->mutex); + return stat; + } + wq->num_workers++; + } + V(wq->mutex); + Dmsg0(1400, "Return workq_add\n"); + /* Return work_item if requested */ + if (work_item) { + *work_item = item; + } + return stat; +} + +/* + * Remove work from a queue + * wq is a queue that was created with workq_init + * work_item is an element of work + * + * Note, it is "removed" by immediately calling a processing routine. + * if you want to cancel it, you need to provide some external means + * of doing so. + */ +int workq_remove(workq_t *wq, workq_ele_t *work_item) +{ + int stat, found = 0; + pthread_t id; + workq_ele_t *item, *prev; + + Dmsg0(1400, "workq_remove\n"); + if (wq->valid != WORKQ_VALID) { + return EINVAL; + } + + P(wq->mutex); + + for (prev=item=wq->first; item; item=item->next) { + if (item == work_item) { + found = 1; + break; + } + prev = item; + } + if (!found) { + return EINVAL; + } + + /* Move item to be first on list */ + if (wq->first != work_item) { + prev->next = work_item->next; + if (wq->last == work_item) { + wq->last = prev; + } + work_item->next = wq->first; + wq->first = work_item; + } + + /* if any threads are idle, wake one */ + if (wq->idle_workers > 0) { + Dmsg0(1400, "Signal worker\n"); + if ((stat = pthread_cond_broadcast(&wq->work)) != 0) { + V(wq->mutex); + return stat; + } + } else { + Dmsg0(1400, "Create worker thread\n"); + /* No idle threads so create a new one */ + set_thread_concurrency(wq->max_workers + 1); + if ((stat = pthread_create(&id, &wq->attr, workq_server, (void *)wq)) != 0) { + V(wq->mutex); + return stat; + } + wq->num_workers++; + } + V(wq->mutex); + Dmsg0(1400, "Return workq_remove\n"); + return stat; +} + + +/* + * This is the worker thread that serves the work queue. + * In due course, it will call the user's engine. + */ +extern "C" +void *workq_server(void *arg) +{ + struct timespec timeout; + workq_t *wq = (workq_t *)arg; + workq_ele_t *we; + int stat, timedout; + + Dmsg0(1400, "Start workq_server\n"); + P(wq->mutex); + set_jcr_in_tsd(INVALID_JCR); + + for (;;) { + struct timeval tv; + struct timezone tz; + + Dmsg0(1400, "Top of for loop\n"); + timedout = 0; + Dmsg0(1400, "gettimeofday()\n"); + gettimeofday(&tv, &tz); + timeout.tv_nsec = 0; + timeout.tv_sec = tv.tv_sec + 2; + + while (wq->first == NULL && !wq->quit) { + /* + * Wait 2 seconds, then if no more work, exit + */ + Dmsg0(1400, "pthread_cond_timedwait()\n"); + stat = pthread_cond_timedwait(&wq->work, &wq->mutex, &timeout); + Dmsg1(1400, "timedwait=%d\n", stat); + if (stat == ETIMEDOUT) { + timedout = 1; + break; + } else if (stat != 0) { + /* This shouldn't happen */ + Dmsg0(1400, "This shouldn't happen\n"); + wq->num_workers--; + V(wq->mutex); + return NULL; + } + } + we = wq->first; + if (we != NULL) { + wq->first = we->next; + if (wq->last == we) { + wq->last = NULL; + } + wq->num_running++; + V(wq->mutex); + /* Call user's routine here */ + Dmsg0(1400, "Calling user engine.\n"); + wq->engine(we->data); + Dmsg0(1400, "Back from user engine.\n"); + free(we); /* release work entry */ + Dmsg0(1400, "relock mutex\n"); + P(wq->mutex); + wq->num_running--; + Dmsg0(1400, "Done lock mutex\n"); + } + if (wq->first == NULL && !wq->num_running) { + pthread_cond_broadcast(&wq->idle); + } + /* + * If no more work request, and we are asked to quit, then do it + */ + if (wq->first == NULL && wq->quit) { + wq->num_workers--; + if (wq->num_workers == 0) { + Dmsg0(1400, "Wake up destroy routine\n"); + /* Wake up destroy routine if he is waiting */ + pthread_cond_broadcast(&wq->work); + } + Dmsg0(1400, "Unlock mutex\n"); + V(wq->mutex); + Dmsg0(1400, "Return from workq_server\n"); + return NULL; + } + Dmsg0(1400, "Check for work request\n"); + /* + * If no more work requests, and we waited long enough, quit + */ + Dmsg1(1400, "wq->first==NULL = %d\n", wq->first==NULL); + Dmsg1(1400, "timedout=%d\n", timedout); + if (wq->first == NULL && timedout) { + Dmsg0(1400, "break big loop\n"); + wq->num_workers--; + break; + } + Dmsg0(1400, "Loop again\n"); + } /* end of big for loop */ + + Dmsg0(1400, "unlock mutex\n"); + V(wq->mutex); + Dmsg0(1400, "End workq_server\n"); + return NULL; +} + + +//================================================= +#ifdef TEST_PROGRAM + +#define TEST_SLEEP_TIME_IN_SECONDS 3 +#define TEST_MAX_NUM_WORKERS 5 +#define TEST_NUM_WORKS 10 + + +void *callback(void *ctx) +{ + JCR* jcr = (JCR*)ctx; + + if (jcr) + { + Jmsg1(jcr, M_INFO, 0, _("workq_test: thread %d : now starting work....\n"), (int)pthread_self()); + sleep(TEST_SLEEP_TIME_IN_SECONDS); + Jmsg1(jcr, M_INFO, 0, _("workq_test: thread %d : ...work completed.\n"), (int)pthread_self()); + } + return NULL; +} + + +char *configfile = NULL; +//STORES *me = NULL; /* our Global resource */ +bool forge_on = false; /* proceed inspite of I/O errors */ +pthread_mutex_t device_release_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t wait_device_release = PTHREAD_COND_INITIALIZER; + +int main (int argc, char *argv[]) +{ + pthread_attr_t attr; + + void * start_heap = sbrk(0); + (void)start_heap; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + my_name_is(argc, argv, "workq_test"); + init_msg(NULL, NULL); + daemon_start_time = time(NULL); + set_thread_concurrency(150); + lmgr_init_thread(); /* initialize the lockmanager stack */ + pthread_attr_init(&attr); + + int stat(-1); + berrno be; + + workq_t queue; + /* Start work queues */ + if ((stat = workq_init(&queue, TEST_MAX_NUM_WORKERS, callback)) != 0) + { + be.set_errno(stat); + Emsg1(M_ABORT, 0, _("Could not init work queue: ERR=%s\n"), be.bstrerror()); + } + + /* job1 is created and pseudo-submits some work to the work queue*/ + JCR *jcr1 = new_jcr(sizeof(JCR), NULL); + jcr1->JobId = 1; + workq_ele_t * ret(0); + for (int w=0; wJobId = 2; + for (int w=0; wsize, bfuncs->version); + + *pinfo = &pluginInfo; /* return pointer to our info */ + *pfuncs = &pluginFuncs; /* return pointer to our functions */ + + return bRC_OK; +} + +bRC unloadPlugin() +{ + printf("plugin: Unloaded\n"); + return bRC_OK; +} + +static bRC newPlugin(bpContext *ctx) +{ + int JobId = 0; + bfuncs->getBaculaValue(ctx, bDirVarJobId, (void *)&JobId); + printf("plugin: newPlugin JobId=%d\n", JobId); + bfuncs->registerBaculaEvents(ctx, 1, 2, 0); + return bRC_OK; +} + +static bRC freePlugin(bpContext *ctx) +{ + int JobId = 0; + bfuncs->getBaculaValue(ctx, bDirVarJobId, (void *)&JobId); + printf("plugin: freePlugin JobId=%d\n", JobId); + return bRC_OK; +} + +static bRC getPluginValue(bpContext *ctx, pDirVariable var, void *value) +{ + printf("plugin: getPluginValue var=%d\n", var); + return bRC_OK; +} + +static bRC setPluginValue(bpContext *ctx, pDirVariable var, void *value) +{ + printf("plugin: setPluginValue var=%d\n", var); + return bRC_OK; +} + +static bRC handlePluginEvent(bpContext *ctx, bDirEvent *event, void *value) +{ + char *name; + int val; + switch (event->eventType) { + case bDirEventJobStart: + printf("plugin: HandleEvent JobStart\n"); + break; + case bDirEventJobEnd: + printf("plugin: HandleEvent JobEnd\n"); + bfuncs->getBaculaValue(ctx, bDirVarJob, (void *)&name); + printf("plugin: bDirVarJob=%s\n", name); + bfuncs->getBaculaValue(ctx, bDirVarJobId, (void *)&val); + printf("plugin: bDirVarJobId=%d\n", val); + bfuncs->getBaculaValue(ctx, bDirVarType, (void *)&val); + printf("plugin: bDirVarType=%c\n", val); + bfuncs->getBaculaValue(ctx, bDirVarLevel, (void *)&val); + printf("plugin: bDirVarLevel=%c\n", val); + bfuncs->getBaculaValue(ctx, bDirVarClient, (void *)&name); + printf("plugin: bDirVarClient=%s\n", name); + bfuncs->getBaculaValue(ctx, bDirVarCatalog, (void *)&name); + printf("plugin: bDirVarCatalog=%s\n", name); + bfuncs->getBaculaValue(ctx, bDirVarPool, (void *)&name); + printf("plugin: bDirVarPool=%s\n", name); + bfuncs->getBaculaValue(ctx, bDirVarStorage, (void *)&name); + printf("plugin: bDirVarStorage=%s\n", name); + bfuncs->getBaculaValue(ctx, bDirVarJobErrors, (void *)&val); + printf("plugin: bDirVarJobErrors=%d\n", val); + bfuncs->getBaculaValue(ctx, bDirVarJobFiles, (void *)&val); + printf("plugin: bDirVarJobFiles=%d\n", val); + bfuncs->getBaculaValue(ctx, bDirVarNumVols, (void *)&val); + printf("plugin: bDirVarNumVols=%d\n", val); + + break; + } + bfuncs->getBaculaValue(ctx, bDirVarJobName, (void *)&name); + printf("Job Name=%s\n", name); + bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_INFO, 0, "JobMesssage message"); + bfuncs->DebugMessage(ctx, __FILE__, __LINE__, 1, "DebugMesssage message"); + return bRC_OK; +} + +#ifdef __cplusplus +} +#endif diff --git a/src/plugins/fd/Makefile.in b/src/plugins/fd/Makefile.in new file mode 100644 index 00000000..914d865f --- /dev/null +++ b/src/plugins/fd/Makefile.in @@ -0,0 +1,77 @@ +# +# Simple Makefile for building test FD plugins for Bacula +# +# Copyright (C) 2000-2015 by Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +@MCOMMON@ + + +# No optimization for now for easy debugging + +FDDIR=../../filed +SRCDIR=../.. +LIBDIR=../../lib + +.SUFFIXES: .c .lo + +.c.lo: + $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) -I${SRCDIR} -I${FDDIR} -DTEST_PROGRAM -c $< + +all: bpipe-fd.la test-plugin-fd.la test-deltaseq-fd.la + +example-plugin-fd.lo: example-plugin-fd.c ${FDDIR}/fd_plugins.h + $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c example-plugin-fd.c + +example-plugin-fd.la: Makefile example-plugin-fd$(DEFAULT_OBJECT_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared example-plugin-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version + +bpipe-fd.lo: bpipe-fd.c ${FDDIR}/fd_plugins.h + $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c bpipe-fd.c + +bpipe-fd.la: Makefile bpipe-fd$(DEFAULT_OBJECT_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared bpipe-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version + +test-deltaseq-fd.lo: test-deltaseq-fd.c ${FDDIR}/fd_plugins.h fd_common.h + $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c test-deltaseq-fd.c + +test-deltaseq-fd.la: Makefile test-deltaseq-fd$(DEFAULT_OBJECT_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared test-deltaseq-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version + +test-plugin-fd.lo: test-plugin-fd.c ${FDDIR}/fd_plugins.h + $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c test-plugin-fd.c + +test-plugin-fd.la: Makefile test-plugin-fd$(DEFAULT_OBJECT_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared test-plugin-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version + +install: all + $(MKDIR) $(DESTDIR)$(plugindir) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bpipe-fd.la $(DESTDIR)$(plugindir) + $(RMF) $(DESTDIR)$(plugindir)/bpipe-fd.la + +install-test-plugin: all + $(MKDIR) $(DESTDIR)$(plugindir) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) test-plugin-fd.la $(DESTDIR)$(plugindir) + $(RMF) $(DESTDIR)$(plugindir)/test-plugin-fd.la + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) test-deltaseq-fd.la $(DESTDIR)$(plugindir) + $(RMF) $(DESTDIR)$(plugindir)/test-deltaseq-fd.la + +libtool-clean: + @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) + @$(RMF) *.la + @$(RMF) -r .libs _libs + +clean: libtool-clean + @rm -f main *.so *.o 1 2 3 + +distclean: clean + @rm -f Makefile *.la *.lo + @rm -rf .libs + +libtool-uninstall: + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(plugindir)/bpipe-fd.so + +uninstall: @LIBTOOL_UNINSTALL_TARGET@ + +depend: diff --git a/src/plugins/fd/bpipe-fd.c b/src/plugins/fd/bpipe-fd.c new file mode 100644 index 00000000..df3695f2 --- /dev/null +++ b/src/plugins/fd/bpipe-fd.c @@ -0,0 +1,743 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * A simple pipe plugin for the Bacula File Daemon + * + * Kern Sibbald, October 2007 + * + */ +#include "bacula.h" +#define USE_FULL_WRITE +#include "fd_common.h" +#include "fd_plugins.h" +#include "lib/ini.h" + +#undef malloc +#undef free +#undef strdup + +#define fi __FILE__ +#define li __LINE__ + +#ifdef __cplusplus +extern "C" { +#endif + +static const int dbglvl = 150; + +#define PLUGIN_LICENSE "AGPLv3" +#define PLUGIN_AUTHOR "Kern Sibbald" +#define PLUGIN_DATE "January 2008" +#define PLUGIN_VERSION "1" +#define PLUGIN_DESCRIPTION "Bacula Pipe File Daemon Plugin" + +/* Forward referenced functions */ +static bRC newPlugin(bpContext *ctx); +static bRC freePlugin(bpContext *ctx); +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); +static bRC endBackupFile(bpContext *ctx); +static bRC pluginIO(bpContext *ctx, struct io_pkt *io); +static bRC startRestoreFile(bpContext *ctx, const char *cmd); +static bRC endRestoreFile(bpContext *ctx); +static bRC createFile(bpContext *ctx, struct restore_pkt *rp); +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); +static bRC checkFile(bpContext *ctx, char *fname); +static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); + +static char *apply_rp_codes(struct plugin_ctx * p_ctx); + +/* Pointers to Bacula functions */ +static bFuncs *bfuncs = NULL; +static bInfo *binfo = NULL; + +/* Plugin Information block */ +static pInfo pluginInfo = { + sizeof(pluginInfo), + FD_PLUGIN_INTERFACE_VERSION, + FD_PLUGIN_MAGIC, + PLUGIN_LICENSE, + PLUGIN_AUTHOR, + PLUGIN_DATE, + PLUGIN_VERSION, + PLUGIN_DESCRIPTION +}; + +/* Plugin entry points for Bacula */ +static pFuncs pluginFuncs = { + sizeof(pluginFuncs), + FD_PLUGIN_INTERFACE_VERSION, + + /* Entry points into plugin */ + newPlugin, /* new plugin instance */ + freePlugin, /* free plugin instance */ + getPluginValue, + setPluginValue, + handlePluginEvent, + startBackupFile, + endBackupFile, + startRestoreFile, + endRestoreFile, + pluginIO, + createFile, + setFileAttributes, + checkFile, + handleXACLdata, + NULL /* No checkStream */ +}; + +/* + * Plugin private context + */ +struct plugin_ctx { + boffset_t offset; + BPIPE *pfd; /* bpipe file descriptor */ + int efd; /* stderr */ + int rfd; /* stdout */ + int wfd; /* stdin */ + int maxfd; /* max(stderr, stdout) */ + bool backup; /* set when the backup is done */ + bool canceled; + char *cmd; /* plugin command line */ + char *fname; /* filename to "backup/restore" */ + char *reader; /* reader program for backup */ + char *writer; /* writer program for backup */ + char where[512]; + int replace; + int job_level; + int estimate_mode; + int64_t total_bytes; /* number of bytes read/write */ +}; + +/* + * loadPlugin() and unloadPlugin() are entry points that are + * exported, so Bacula can directly call these two entry points + * they are common to all Bacula plugins. + */ +/* + * External entry point called by Bacula to "load the plugin + */ +bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) +{ + bfuncs = lbfuncs; /* set Bacula funct pointers */ + binfo = lbinfo; + *pinfo = &pluginInfo; /* return pointer to our info */ + *pfuncs = &pluginFuncs; /* return pointer to our functions */ + + return bRC_OK; +} + +/* + * External entry point to unload the plugin + */ +bRC unloadPlugin() +{ +// printf("bpipe-fd: Unloaded\n"); + return bRC_OK; +} + +/* + * The following entry points are accessed through the function + * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) + * has its own set of entry points that the plugin must define. + */ +/* + * Create a new instance of the plugin i.e. allocate our private storage + */ +static bRC newPlugin(bpContext *ctx) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)malloc(sizeof(struct plugin_ctx)); + if (!p_ctx) { + return bRC_Error; + } + memset(p_ctx, 0, sizeof(struct plugin_ctx)); + ctx->pContext = (void *)p_ctx; /* set our context pointer */ + return bRC_OK; +} + +/* + * Free a plugin instance, i.e. release our private storage + */ +static bRC freePlugin(bpContext *ctx) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + if (p_ctx->cmd) { + free(p_ctx->cmd); /* free any allocated command string */ + } + free(p_ctx); /* free our private context */ + p_ctx = NULL; + return bRC_OK; +} + +/* + * Return some plugin value (none defined) + */ +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) +{ + return bRC_OK; +} + +/* + * Set a plugin value (none defined) + */ +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) +{ + return bRC_OK; +} + +/* + * Handle an event that was generated in Bacula + */ +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + + if (!p_ctx) { + return bRC_Error; + } + +// char *name; + + /* + * Most events don't interest us so we ignore them. + * the printfs are so that plugin writers can enable them to see + * what is really going on. + */ + switch (event->eventType) { + case bEventLevel: + p_ctx->job_level = ((intptr_t)value); + break; + + case bEventCancelCommand: + p_ctx->canceled = true; + break; + + case bEventPluginCommand: + bfuncs->DebugMessage(ctx, fi, li, dbglvl, + "bpipe-fd: PluginCommand=%s\n", (char *)value); + break; + case bEventJobStart: + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: JobStart=%s\n", (char *)value); + break; + case bEventJobEnd: +// printf("bpipe-fd: JobEnd\n"); + break; + case bEventStartBackupJob: +// printf("bpipe-fd: StartBackupJob\n"); + break; + case bEventEndBackupJob: +// printf("bpipe-fd: EndBackupJob\n"); + break; + case bEventSince: +// printf("bpipe-fd: since=%d\n", (int)value); + break; + case bEventStartRestoreJob: +// printf("bpipe-fd: StartRestoreJob\n"); + break; + + case bEventEndRestoreJob: +// printf("bpipe-fd: EndRestoreJob\n"); + break; + + /* Plugin command e.g. plugin = ::read command:write command */ + case bEventEstimateCommand: + p_ctx->estimate_mode = true; + /* Fall-through wanted */ + case bEventRestoreCommand: +// printf("bpipe-fd: EventRestoreCommand cmd=%s\n", (char *)value); + /* Fall-through wanted */ + case bEventBackupCommand: + char *p; + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: pluginEvent cmd=%s\n", (char *)value); + p_ctx->backup = false; + p_ctx->cmd = strdup((char *)value); + p = strchr(p_ctx->cmd, ':'); + if (!p) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Plugin terminator not found: %s\n", (char *)value); + return bRC_Error; + } + *p++ = 0; /* terminate plugin */ + p_ctx->fname = p; + p = strchr(p, ':'); + if (!p) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "File terminator not found: %s\n", (char *)value); + return bRC_Error; + } + *p++ = 0; /* terminate file */ + p_ctx->reader = p; + p = strchr(p, ':'); + if (!p) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Reader terminator not found: %s\n", (char *)value); + return bRC_Error; + } + *p++ = 0; /* terminate reader string */ + p_ctx->writer = p; + +// printf("bpipe-fd: plugin=%s fname=%s reader=%s writer=%s\n", +// p_ctx->cmd, p_ctx->fname, p_ctx->reader, p_ctx->writer); + break; + + default: +// printf("bpipe-fd: unknown event=%d\n", event->eventType); + break; + } + return bRC_OK; +} + + +/* + * Start the backup of a specific file + */ +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + + time_t now = time(NULL); + sp->fname = p_ctx->fname; + sp->type = FT_REG; + sp->statp.st_mode = 0700 | S_IFREG; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = -1; + sp->statp.st_blksize = 4096; + sp->statp.st_blocks = 1; + p_ctx->backup = true; +// printf("bpipe-fd: startBackupFile\n"); + return bRC_OK; +} + +/* + * Done with backup of this file + */ +static bRC endBackupFile(bpContext *ctx) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + + /* + * We would return bRC_More if we wanted startBackupFile to be + * called again to backup another file + */ + if (!p_ctx->backup) { + return bRC_More; + } + return bRC_OK; +} + +static void send_log(bpContext *ctx, char *buf) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + strip_trailing_newline(buf); + bfuncs->JobMessage(ctx, fi, li, M_INFO, 0, "%s: %s\n", p_ctx->fname, buf); +} + +/* + * Bacula is calling us to do the actual I/O + */ +static bRC pluginIO(bpContext *ctx, struct io_pkt *io) +{ + fd_set rfds; + fd_set wfds; + bool ok=false; + char buf[1024]; + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + + io->status = -1; + io->io_errno = 0; + switch(io->func) { + case IO_OPEN: + p_ctx->total_bytes = 0; + p_ctx->wfd = p_ctx->efd = p_ctx->rfd = -1; + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_OPEN\n"); + if (io->flags & (O_CREAT | O_WRONLY)) { + char *writer_codes = apply_rp_codes(p_ctx); + + p_ctx->pfd = open_bpipe(writer_codes, 0, "rws"); + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_OPEN fd=%p writer=%s\n", + p_ctx->pfd, writer_codes); + if (!p_ctx->pfd) { + io->io_errno = errno; + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, + "Open pipe writer=%s failed: ERR=%s\n", writer_codes, strerror(errno)); + if (writer_codes) { + free(writer_codes); + } + return bRC_Error; + } + if (writer_codes) { + free(writer_codes); + } + /* We need to read from stdout/stderr for messages to display to the user */ + p_ctx->rfd = fileno(p_ctx->pfd->rfd); + p_ctx->wfd = fileno(p_ctx->pfd->wfd); + p_ctx->maxfd = MAX(p_ctx->wfd, p_ctx->rfd); + io->status = p_ctx->wfd; + + } else { + /* Use shell mode and split stderr/stdout */ + p_ctx->pfd = open_bpipe(p_ctx->reader, 0, "rse"); + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_OPEN fd=%p reader=%s\n", + p_ctx->pfd, p_ctx->reader); + if (!p_ctx->pfd) { + io->io_errno = errno; + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, + "Open pipe reader=%s failed: ERR=%s\n", p_ctx->reader, strerror(errno)); + return bRC_Error; + } + /* We need to read from stderr for job log and stdout for the data */ + p_ctx->efd = fileno(p_ctx->pfd->efd); + p_ctx->rfd = fileno(p_ctx->pfd->rfd); + p_ctx->maxfd = MAX(p_ctx->efd, p_ctx->rfd); + io->status = p_ctx->rfd; + } + sleep(1); /* let pipe connect */ + break; + + case IO_READ: + if (!p_ctx->pfd) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Logic error: NULL read FD\n"); + return bRC_Error; + } + + /* We first try to read stderr, but keep monitoring for data on stdout (when stderr is empty) */ + while (!p_ctx->canceled) { + FD_ZERO(&rfds); + FD_SET(p_ctx->rfd, &rfds); + FD_SET(p_ctx->efd, &rfds); + select(p_ctx->maxfd+1, &rfds, NULL, NULL, NULL); + + if (!FD_ISSET(p_ctx->efd, &rfds)) { + /* nothing in stderr, then we should have something in stdout */ + break; + } + int ret = read(p_ctx->efd, buf, sizeof(buf)); + if (ret <= 0) { + /* stderr is closed or in error, stdout should be in the same state */ + /* let handle it at the stdout level */ + break; + } + /* TODO: buffer and split lines */ + buf[ret]=0; + send_log(ctx, buf); + } + + io->status = read(p_ctx->rfd, io->buf, io->count); +// bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_READ buf=%p len=%d\n", io->buf, io->status); + if (io->status < 0) { + berrno be; + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, + "Pipe read error: ERR=%s\n", be.bstrerror()); + bfuncs->DebugMessage(ctx, fi, li, dbglvl, + "Pipe read error: count=%lld errno=%d ERR=%s\n", + p_ctx->total_bytes, (int)errno, be.bstrerror()); + return bRC_Error; + } + p_ctx->total_bytes += io->status; + break; + + case IO_WRITE: + if (!p_ctx->pfd) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Logic error: NULL write FD\n"); + return bRC_Error; + } + + /* When we write, we must check for the error channel (stdout+stderr) as well */ + while (!ok && !p_ctx->canceled) { + FD_ZERO(&wfds); + FD_SET(p_ctx->wfd, &wfds); + FD_ZERO(&rfds); + FD_SET(p_ctx->rfd, &rfds); + + select(p_ctx->maxfd+1, &rfds, &wfds, NULL, NULL); + + if (FD_ISSET(p_ctx->rfd, &rfds)) { + int ret = read(p_ctx->rfd, buf, sizeof(buf)); /* TODO: simulate fgets() */ + if (ret > 0) { + buf[ret]=0; + send_log(ctx, buf); + } else { + ok = true; /* nothing to read */ + } + } + + if (FD_ISSET(p_ctx->wfd, &wfds)) { + ok = true; + } + } + +// printf("bpipe-fd: IO_WRITE fd=%p buf=%p len=%d\n", p_ctx->fd, io->buf, io->count); + io->status = full_write(p_ctx->wfd, io->buf, io->count, &p_ctx->canceled); +// printf("bpipe-fd: IO_WRITE buf=%p len=%d\n", io->buf, io->status); + if (io->status <= 0) { + berrno be; + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, + "Pipe write error: ERR=%s\n", be.bstrerror()); + bfuncs->DebugMessage(ctx, fi, li, dbglvl, + "Pipe write error: count=%lld errno=%d ERR=%s\n", + p_ctx->total_bytes, (int)errno, be.bstrerror()); + return bRC_Error; + } + p_ctx->total_bytes += io->status; + break; + + case IO_CLOSE: + if (!p_ctx->pfd) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Logic error: NULL FD on bpipe close\n"); + return bRC_Error; + } + + /* We inform the other side that we have nothing more to send */ + if (p_ctx->wfd >= 0) { + int ret = close_wpipe(p_ctx->pfd); + if (ret == 0) { + bfuncs->JobMessage(ctx, fi, li, M_ERROR, 0, "bpipe-fd: Error closing for file %s: %d\n", + p_ctx->fname, ret); + } + } + + /* We flush what the other program has to say */ + while (!ok && !p_ctx->canceled) { + struct timeval tv = {10, 0}; // sleep for 10secs + FD_ZERO(&rfds); + p_ctx->maxfd = -1; + + if (p_ctx->rfd >= 0) { + FD_SET(p_ctx->rfd, &rfds); + p_ctx->maxfd = MAX(p_ctx->maxfd, p_ctx->rfd); + } + + if (p_ctx->efd >= 0) { + FD_SET(p_ctx->efd, &rfds); + p_ctx->maxfd = MAX(p_ctx->maxfd, p_ctx->efd); + } + + if (p_ctx->maxfd == -1) { + ok = true; /* exit the loop */ + } else { + select(p_ctx->maxfd+1, &rfds, NULL, NULL, &tv); + } + + if (p_ctx->rfd >= 0 && FD_ISSET(p_ctx->rfd, &rfds)) { + int ret = read(p_ctx->rfd, buf, sizeof(buf)); + if (ret > 0) { + buf[ret]=0; + send_log(ctx, buf); + } else { + p_ctx->rfd = -1; /* closed, keep the reference in bpipe */ + } + } + + /* The stderr can be melted with stdout or not */ + if (p_ctx->efd >= 0 && FD_ISSET(p_ctx->efd, &rfds)) { + int ret = read(p_ctx->efd, buf, sizeof(buf)); + if (ret > 0) { + buf[ret]=0; + send_log(ctx, buf); + } else { + p_ctx->efd = -1; /* closed, keep the reference in bpipe */ + } + } + } + + io->status = close_bpipe(p_ctx->pfd); + if (io->status != 0) { + bfuncs->JobMessage(ctx, fi, li, M_ERROR, 0, "bpipe-fd: Error closing for file %s: %d\n", + p_ctx->fname, io->status); + } + break; + + case IO_SEEK: + io->offset = p_ctx->offset; + io->status = 0; + break; + } + return bRC_OK; +} + +/* + * Bacula is notifying us that a plugin name string was found, and + * passing us the plugin command, so we can prepare for a restore. + */ +static bRC startRestoreFile(bpContext *ctx, const char *cmd) +{ +// printf("bpipe-fd: startRestoreFile cmd=%s\n", cmd); + return bRC_OK; +} + +/* + * Bacula is notifying us that the plugin data has terminated, so + * the restore for this particular file is done. + */ +static bRC endRestoreFile(bpContext *ctx) +{ +// printf("bpipe-fd: endRestoreFile\n"); + return bRC_OK; +} + +/* + * This is called during restore to create the file (if necessary) + * We must return in rp->create_status: + * + * CF_ERROR -- error + * CF_SKIP -- skip processing this file + * CF_EXTRACT -- extract the file (i.e.call i/o routines) + * CF_CREATED -- created, but no content to extract (typically directories) + * + */ +static bRC createFile(bpContext *ctx, struct restore_pkt *rp) +{ +// printf("bpipe-fd: createFile\n"); + if (strlen(rp->where) > 512) { + printf("Restore target dir too long. Restricting to first 512 bytes.\n"); + } + strncpy(((struct plugin_ctx *)ctx->pContext)->where, rp->where, 512); + ((struct plugin_ctx *)ctx->pContext)->replace = rp->replace; + rp->create_status = CF_EXTRACT; + return bRC_OK; +} + +/* + * We will get here if the File is a directory after everything + * is written in the directory. + */ +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) +{ +// printf("bpipe-fd: setFileAttributes\n"); + return bRC_OK; +} + +/* When using Incremental dump, all previous dumps are necessary */ +static bRC checkFile(bpContext *ctx, char *fname) +{ + return bRC_OK; +} + +/* + * New Bacula Plugin API require this + */ +static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) +{ + return bRC_OK; +} + +/************************************************************************* + * Apply codes in writer command: + * %w -> "where" + * %r -> "replace" + * + * Replace: + * 'always' => 'a', chr(97) + * 'ifnewer' => 'w', chr(119) + * 'ifolder' => 'o', chr(111) + * 'never' => 'n', chr(110) + * + * This function will allocate the required amount of memory with malloc. + * Need to be free()d manually. + * Inspired by edit_job_codes in lib/util.c + */ + +static char *apply_rp_codes(struct plugin_ctx * p_ctx) +{ + char *p, *q; + const char *str; + char add[10]; + int w_count = 0, r_count = 0; + char *omsg; + + char *imsg = p_ctx->writer; + + if (!imsg) { + return NULL; + } + + if ((p = imsg)) { + while ((q = strstr(p, "%w"))) { + w_count++; + p=q+1; + } + + p = imsg; + while ((q = strstr(p, "%r"))) { + r_count++; + p=q+1; + } + } + + /* Required mem: + * len(imsg) + * + number of "where" codes * (len(where)-2) + * - number of "replace" codes + */ + omsg = (char*)malloc(strlen(imsg) + (w_count * (strlen(p_ctx->where)-2)) - r_count + 1); + if (!omsg) { + fprintf(stderr, "Out of memory."); + return NULL; + } + + *omsg = 0; + //printf("apply_rp_codes: %s\n", imsg); + for (p=imsg; *p; p++) { + if (*p == '%') { + switch (*++p) { + case '%': + str = "%"; + break; + case 'w': + str = p_ctx->where; + break; + case 'r': + snprintf(add, 2, "%c", p_ctx->replace); + str = add; + break; + default: + add[0] = '%'; + add[1] = *p; + add[2] = 0; + str = add; + break; + } + } else { + add[0] = *p; + add[1] = 0; + str = add; + } + //printf("add_str %s\n", str); + strcat(omsg, str); + //printf("omsg=%s\n", omsg); + } + return omsg; +} + +#ifdef __cplusplus +} +#endif diff --git a/src/plugins/fd/example-plugin-fd.c b/src/plugins/fd/example-plugin-fd.c new file mode 100644 index 00000000..c087704c --- /dev/null +++ b/src/plugins/fd/example-plugin-fd.c @@ -0,0 +1,311 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +#define BUILD_PLUGIN +#define BUILDING_DLL /* required for Windows plugin */ + +#include "bacula.h" +#include "fd_plugins.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define PLUGIN_LICENSE "AGPLv3" +#define PLUGIN_AUTHOR "Your name" +#define PLUGIN_DATE "January 2010" +#define PLUGIN_VERSION "1" +#define PLUGIN_DESCRIPTION "Test File Daemon Plugin" + +/* Forward referenced functions */ +static bRC newPlugin(bpContext *ctx); +static bRC freePlugin(bpContext *ctx); +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); +static bRC endBackupFile(bpContext *ctx); +static bRC pluginIO(bpContext *ctx, struct io_pkt *io); +static bRC startRestoreFile(bpContext *ctx, const char *cmd); +static bRC endRestoreFile(bpContext *ctx); +static bRC createFile(bpContext *ctx, struct restore_pkt *rp); +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); +static bRC checkFile(bpContext *ctx, char *fname); +static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); + +/* Pointers to Bacula functions */ +static bFuncs *bfuncs = NULL; +static bInfo *binfo = NULL; + +static pInfo pluginInfo = { + sizeof(pluginInfo), + FD_PLUGIN_INTERFACE_VERSION, + FD_PLUGIN_MAGIC, + PLUGIN_LICENSE, + PLUGIN_AUTHOR, + PLUGIN_DATE, + PLUGIN_VERSION, + PLUGIN_DESCRIPTION, +}; + +static pFuncs pluginFuncs = { + sizeof(pluginFuncs), + FD_PLUGIN_INTERFACE_VERSION, + + /* Entry points into plugin */ + newPlugin, /* new plugin instance */ + freePlugin, /* free plugin instance */ + getPluginValue, + setPluginValue, + handlePluginEvent, + startBackupFile, + endBackupFile, + startRestoreFile, + endRestoreFile, + pluginIO, + createFile, + setFileAttributes, + checkFile, + handleXACLdata +}; + +/* + * Plugin called here when it is first loaded + */ +bRC DLL_IMP_EXP +loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) +{ + bfuncs = lbfuncs; /* set Bacula funct pointers */ + binfo = lbinfo; + printf("plugin: Loaded: size=%d version=%d\n", bfuncs->size, bfuncs->version); + + *pinfo = &pluginInfo; /* return pointer to our info */ + *pfuncs = &pluginFuncs; /* return pointer to our functions */ + + return bRC_OK; +} + +/* + * Plugin called here when it is unloaded, normally when + * Bacula is going to exit. + */ +bRC DLL_IMP_EXP +unloadPlugin() +{ + printf("plugin: Unloaded\n"); + return bRC_OK; +} + +/* + * Called here to make a new instance of the plugin -- i.e. when + * a new Job is started. There can be multiple instances of + * each plugin that are running at the same time. Your + * plugin instance must be thread safe and keep its own + * local data. + */ +static bRC newPlugin(bpContext *ctx) +{ + int JobId = 0; + bfuncs->getBaculaValue(ctx, bVarJobId, (void *)&JobId); +// printf("plugin: newPlugin JobId=%d\n", JobId); + bfuncs->registerBaculaEvents(ctx, 1, 2, 0); + return bRC_OK; +} + +/* + * Release everything concerning a particular instance of a + * plugin. Normally called when the Job terminates. + */ +static bRC freePlugin(bpContext *ctx) +{ + int JobId = 0; + bfuncs->getBaculaValue(ctx, bVarJobId, (void *)&JobId); +// printf("plugin: freePlugin JobId=%d\n", JobId); + return bRC_OK; +} + +/* + * Called by core code to get a variable from the plugin. + * Not currently used. + */ +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) +{ +// printf("plugin: getPluginValue var=%d\n", var); + return bRC_OK; +} + +/* + * Called by core code to set a plugin variable. + * Not currently used. + */ +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) +{ +// printf("plugin: setPluginValue var=%d\n", var); + return bRC_OK; +} + +/* + * Called by Bacula when there are certain events that the + * plugin might want to know. The value depends on the + * event. + */ +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) +{ + char *name; + + switch (event->eventType) { + case bEventJobStart: + printf("plugin: JobStart=%s\n", NPRT((char *)value)); + break; + case bEventJobEnd: + printf("plugin: JobEnd\n"); + break; + case bEventStartBackupJob: + printf("plugin: BackupStart\n"); + break; + case bEventEndBackupJob: + printf("plugin: BackupEnd\n"); + break; + case bEventLevel: + printf("plugin: JobLevel=%c %d\n", (int64_t)value, (int64_t)value); + break; + case bEventSince: + printf("plugin: since=%d\n", (int64_t)value); + break; + case bEventStartRestoreJob: + printf("plugin: StartRestoreJob\n"); + break; + case bEventEndRestoreJob: + printf("plugin: EndRestoreJob\n"); + break; + /* Plugin command e.g. plugin = ::command */ + case bEventRestoreCommand: + printf("plugin: backup command=%s\n", NPRT((char *)value)); + break; + case bEventBackupCommand: + printf("plugin: backup command=%s\n", NPRT((char *)value)); + break; + + case bEventComponentInfo: + printf("plugin: Component=%s\n", NPRT((char *)value)); + break; + + default: + printf("plugin: unknown event=%d\n", event->eventType); + } + bfuncs->getBaculaValue(ctx, bVarFDName, (void *)&name); +// printf("FD Name=%s\n", name); +// bfuncs->JobMessage(ctx, __FILE__, __LINE__, 1, 0, "JobMesssage message"); +// bfuncs->DebugMessage(ctx, __FILE__, __LINE__, 1, "DebugMesssage message"); + return bRC_OK; +} + +/* + * Called when starting to backup a file. Here the plugin must + * return the "stat" packet for the directory/file and provide + * certain information so that Bacula knows what the file is. + * The plugin can create "Virtual" files by giving them a + * name that is not normally found on the file system. + */ +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) +{ + return bRC_OK; +} + +/* + * Done backing up a file. + */ +static bRC endBackupFile(bpContext *ctx) +{ + return bRC_OK; +} + +/* + * Do actual I/O. Bacula calls this after startBackupFile + * or after startRestoreFile to do the actual file + * input or output. + */ +static bRC pluginIO(bpContext *ctx, struct io_pkt *io) +{ + io->status = 0; + io->io_errno = 0; + switch(io->func) { + case IO_OPEN: + printf("plugin: IO_OPEN\n"); + break; + case IO_READ: + printf("plugin: IO_READ buf=%p len=%d\n", io->buf, io->count); + break; + case IO_WRITE: + printf("plugin: IO_WRITE buf=%p len=%d\n", io->buf, io->count); + break; + case IO_CLOSE: + printf("plugin: IO_CLOSE\n"); + break; + } + return bRC_OK; +} + +static bRC startRestoreFile(bpContext *ctx, const char *cmd) +{ + return bRC_OK; +} + +static bRC endRestoreFile(bpContext *ctx) +{ + return bRC_OK; +} + +/* + * Called here to give the plugin the information needed to + * re-create the file on a restore. It basically gets the + * stat packet that was created during the backup phase. + * This data is what is needed to create the file, but does + * not contain actual file data. + */ +static bRC createFile(bpContext *ctx, struct restore_pkt *rp) +{ + return bRC_OK; +} + +/* + * Called after the file has been restored. This can be used to + * set directory permissions, ... + */ +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) +{ + return bRC_OK; +} + +/* When using Incremental dump, all previous dumps are necessary */ +static bRC checkFile(bpContext *ctx, char *fname) +{ + return bRC_OK; +} + +/* + * New Bacula Plugin API require this + */ +static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) +{ + return bRC_OK; +} + +#ifdef __cplusplus +} +#endif diff --git a/src/plugins/fd/fd_common.h b/src/plugins/fd/fd_common.h new file mode 100644 index 00000000..5d98bee3 --- /dev/null +++ b/src/plugins/fd/fd_common.h @@ -0,0 +1,730 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* You can include this file to your plugin to have + * access to some common tools and utilities provided by Bacula + */ + +#ifndef PCOMMON_H +#define PCOMMON_H + +#define JT_BACKUP 'B' /* Backup Job */ +#define JT_RESTORE 'R' /* Restore Job */ + +#define L_FULL 'F' /* Full backup */ +#define L_INCREMENTAL 'I' /* since last backup */ +#define L_DIFFERENTIAL 'D' /* since last full backup */ + +#ifndef DLL_IMP_EXP +# if defined(BUILDING_DLL) +# define DLL_IMP_EXP __declspec(dllexport) +# elif defined(USING_DLL) +# define DLL_IMP_EXP __declspec(dllimport) +# else +# define DLL_IMP_EXP +# endif +#endif + +#ifdef SMARTALLOC +DLL_IMP_EXP void *sm_malloc(const char *fname, int lineno, unsigned int nbytes); +DLL_IMP_EXP void sm_free(const char *file, int line, void *fp); +DLL_IMP_EXP void *reallymalloc(const char *fname, int lineno, unsigned int nbytes); +DLL_IMP_EXP void reallyfree(const char *file, int line, void *fp); + +#ifndef bmalloc +# define bmalloc(s) sm_malloc(__FILE__, __LINE__, (s)) +# define bfree(o) sm_free(__FILE__, __LINE__, (o)) +#endif + +#define SM_CHECK sm_check(__FILE__, __LINE__, false) + +#ifdef malloc +#undef malloc +#undef free +#endif + +#define malloc(s) sm_malloc(__FILE__, __LINE__, (s)) +#define free(o) sm_free(__FILE__, __LINE__, (o)) + +/* Looks to be broken on scientific linux */ +#ifdef xxxx +inline void *operator new(size_t size, char const * file, int line) +{ + void *pnew = sm_malloc(file,line, size); + memset((char *)pnew, 0, size); + return pnew; +} + +inline void *operator new[](size_t size, char const * file, int line) +{ + void *pnew = sm_malloc(file, line, size); + memset((char *)pnew, 0, size); + return pnew; +} + +inline void *operator new(size_t size) +{ + void *pnew = sm_malloc(__FILE__, __LINE__, size); + memset((char *)pnew, 0, size); + return pnew; +} + +inline void *operator new[](size_t size) +{ + void *pnew = sm_malloc(__FILE__, __LINE__, size); + memset((char *)pnew, 0, size); + return pnew; +} + +#define new new(__FILE__, __LINE__) + +inline void operator delete(void *buf) +{ + sm_free( __FILE__, __LINE__, buf); +} + +inline void operator delete[] (void *buf) +{ + sm_free(__FILE__, __LINE__, buf); +} + +inline void operator delete[] (void *buf, char const * file, int line) +{ + sm_free(file, line, buf); +} + +inline void operator delete(void *buf, char const * file, int line) +{ + sm_free(file, line, buf); +} + +#endif +#endif /* !SMARTALLOC */ + +#define Dmsg(context, level, ...) bfuncs->DebugMessage(context, __FILE__, __LINE__, level, __VA_ARGS__ ) +#define Jmsg(context, type, ...) bfuncs->JobMessage(context, __FILE__, __LINE__, type, 0, __VA_ARGS__ ) + + +#ifdef USE_CMD_PARSER +#include "lib/cmd_parser.h" +#endif /* USE_CMD_PARSER */ + +#ifdef USE_ADD_DRIVE +/* Keep drive letters for windows vss snapshot */ +static void add_drive(char *drives, int *nCount, char *fname) { + if (strlen(fname) >= 2 && B_ISALPHA(fname[0]) && fname[1] == ':') { + /* always add in uppercase */ + char ch = toupper(fname[0]); + /* if not found in string, add drive letter */ + if (!strchr(drives,ch)) { + drives[*nCount] = ch; + drives[*nCount+1] = 0; + (*nCount)++; + } + } +} + +/* Copy our drive list to Bacula core list */ +static void copy_drives(char *drives, char *dest) { + int last = strlen(dest); /* dest is 27 bytes long */ + for (char *p = drives; *p && last < 26; p++) { + if (!strchr(dest, *p)) { + dest[last++] = *p; + dest[last] = 0; + } + } +} +#endif /* USE_ADD_DRIVE */ + +#endif /* ! PCOMMON_H */ + +#ifdef USE_JOB_LIST + +/* This class is used to store locally the job history, you can attach data + * to it such as snapshot names + * !!! Don't forget that this file may be deleted by the user. !!! + */ + +class joblist: public SMARTALLOC +{ +private: + bpContext *ctx; + +public: + char level; /* level of the job */ + + char base[MAX_NAME_LENGTH]; /* base name */ + char key[MAX_NAME_LENGTH]; /* group of backup */ + char name[MAX_NAME_LENGTH]; /* job name */ + char prev[MAX_NAME_LENGTH]; /* based on jobname */ + char root[MAX_NAME_LENGTH]; /* root of this branch */ + char rootdiff[MAX_NAME_LENGTH]; /* root of diff if any */ + + btime_t job_time; /* job time */ + + void init() { + level = 0; + job_time = 0; + *key = *name = *prev = *root = *rootdiff = 0; + set_base("jobs.dat"); + ctx = NULL; + } + + void set_base(const char *b) { + strncpy(base, b, sizeof(base)); + } + + joblist(bpContext *actx) { init(); ctx = actx; } + + joblist(bpContext *actx, + const char *akey, + const char *jobname, + const char *prevjobname, + char joblevel) + { + init(); + ctx = actx; + if (jobname) { + strncpy(name, jobname, MAX_NAME_LENGTH); + } + + if (prevjobname) { + strncpy(prev, prevjobname, MAX_NAME_LENGTH); + } + + level = joblevel; + + if (akey) { + strncpy(key, akey, MAX_NAME_LENGTH); + + } else { + get_key_from_name(); + } + } + + ~joblist() { } + + /* Will extract the name from the full job name */ + bool get_key_from_name() { + // pluginTest.2012-07-19_16.59.21_11 + int l = strlen(name); + int dlen = 23; // strlen(".2012-07-19_16.59.21_11"); + + if (l > dlen) { /* we probably have a key */ + int start = l - dlen; + if (name[start] == '.' && + B_ISDIGIT(name[start + 1]) && // 2 + B_ISDIGIT(name[start + 2]) && // 0 + B_ISDIGIT(name[start + 3]) && // 1 + B_ISDIGIT(name[start + 4]) && // 2 + name[start + 5] == '-' && // - + B_ISDIGIT(name[start + 6]) && // 0 + B_ISDIGIT(name[start + 7])) // 7 + { + bstrncpy(key, name, start + 1); + Dmsg(ctx, dbglvl+100, "key is %s from jobname %s\n", key, name); + return true; + } + } + Dmsg(ctx, dbglvl+100, "Unable to get key from jobname %s\n", name); + return false; + } + + bool find_job(const char *name, POOLMEM **data=NULL); /* set root, job_time */ + bool find_root_job(); + void store_job(char *data); + void prune_jobs(char *build_cmd(void *arg, const char *data, const char *job), + void *arg, alist *jobs); +}; + +static pthread_mutex_t joblist_mutex = PTHREAD_MUTEX_INITIALIZER; + +bool joblist::find_job(const char *name, POOLMEM **data) +{ + BFILE fp; + FILE *f; + POOLMEM *tmp; + char buf[1024]; + char curkey[MAX_NAME_LENGTH]; /* key */ + char curjobname[MAX_NAME_LENGTH]; /* jobname */ + char prevjob[MAX_NAME_LENGTH]; /* last jobname */ + char rootjob[MAX_NAME_LENGTH]; /* root jobname */ + char t[MAX_NAME_LENGTH]; + char curlevel; + bool ok=false; + + *root = 0; + job_time = 0; + *rootdiff = 0; + + binit(&fp); + set_portable_backup(&fp); + + tmp = get_pool_memory(PM_FNAME); + Mmsg(tmp, "%s/%s", working, base); + + P(joblist_mutex); + if (bopen(&fp, tmp, O_RDONLY, 0) < 0) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to open job database %s for reading. ERR=%s\n", + tmp, be.bstrerror(errno)); + goto bail_out; + } + + f = fdopen(fp.fid, "r"); + if (!f) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to open job database. ERR=%s\n", + be.bstrerror(errno)); + goto bail_out; + } + + while (!ok && fgets(buf, sizeof(buf), f) != NULL) { + *curkey = *curjobname = *rootjob = *prevjob = 0; + + Dmsg(ctx, dbglvl+100, "line = [%s]\n", buf); + + if (sscanf(buf, "time=%60s level=%c key=%127s name=%127s root=%127s prev=%127s", + t, &curlevel, curkey, curjobname, rootjob, prevjob) != 6) { + + if (sscanf(buf, "time=%60s level=F key=%127s name=%127s", + t, curkey, curjobname) != 3) { + Dmsg(ctx, dbglvl+100, "Bad line l=[%s]\n", buf); + continue; + } + } + + if (strcmp(name, curjobname) == 0 && + strcmp(key, curkey) == 0) + { + job_time = str_to_uint64(t); + bstrncpy(root, rootjob, MAX_NAME_LENGTH); + if (curlevel == 'D') { + bstrncpy(rootdiff, curjobname, MAX_NAME_LENGTH); + } + + if (data) { + pm_strcpy(data, strstr(buf, " vol=") + 5); + strip_trailing_newline(*data); + unbash_spaces(*data); + } + + ok = true; + Dmsg(ctx, dbglvl+100, "Found job root %s -> %s -> %s\n", + rootdiff, root, curjobname); + } + } + + fclose(f); + +bail_out: + V(joblist_mutex); + free_pool_memory(tmp); + return ok; + +} + +/* Find the root job for the current job */ +bool joblist::find_root_job() +{ + BFILE fp; + FILE *f; + POOLMEM *tmp; + char buf[1024]; + char curkey[MAX_NAME_LENGTH]; /* key */ + char curjobname[MAX_NAME_LENGTH]; /* jobname */ + char prevjob[MAX_NAME_LENGTH]; /* last jobname */ + char rootjob[MAX_NAME_LENGTH]; /* root jobname */ + char t[MAX_NAME_LENGTH]; + char curlevel; + bool ok=false; + + *root = 0; + job_time = 0; + + if (level == 'F') { + bstrncpy(root, name, MAX_NAME_LENGTH); + return true; + } + + binit(&fp); + set_portable_backup(&fp); + + tmp = get_pool_memory(PM_FNAME); + Mmsg(tmp, "%s/%s", working, base); + + P(joblist_mutex); + if (bopen(&fp, tmp, O_RDONLY, 0) < 0) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to prune previous jobs. " + "Can't open %s for reading ERR=%s\n", + tmp, be.bstrerror(errno)); + goto bail_out; + } + + f = fdopen(fp.fid, "r"); + if (!f) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to prune previous jobs. ERR=%s\n", + be.bstrerror(errno)); + goto bail_out; + } + + while (!ok && fgets(buf, sizeof(buf), f) != NULL) { + *curkey = *curjobname = *rootjob = *prevjob = 0; + + Dmsg(ctx, dbglvl+100, "line = [%s]\n", buf); + + if (sscanf(buf, "time=%60s level=%c key=%127s name=%127s root=%127s prev=%127s", + t, &curlevel, curkey, curjobname, rootjob, prevjob) != 6) { + + if (sscanf(buf, "time=%60s level=F key=%127s name=%127s", + t, curkey, curjobname) == 3) { + bstrncpy(rootjob, curjobname, MAX_NAME_LENGTH); + *prevjob = 0; + curlevel = 'F'; + + } else { + Dmsg(ctx, dbglvl+100, "Bad line l=[%s]\n", buf); + continue; + } + } + + if (strcmp(key, curkey) == 0 && + strcmp(prev, curjobname) == 0) + { + bstrncpy(root, rootjob, MAX_NAME_LENGTH); + + if (curlevel == 'D') { + bstrncpy(rootdiff, curjobname, MAX_NAME_LENGTH); + } + ok = true; + Dmsg(ctx, dbglvl+100, "Found job root %s -> %s -> %s\n", + rootdiff, root, curjobname); + } + } + + fclose(f); + +bail_out: + V(joblist_mutex); + free_pool_memory(tmp); + return true; +} + +/* Store the current job in the jobs.dat for a specific data list */ +void joblist::store_job(char *data) +{ + BFILE fp; + int l; + POOLMEM *tmp = NULL; + btime_t now; + + /* Not initialized, no need to store jobs */ + if (*name == 0 || !level) { + Dmsg(ctx, dbglvl+100, "store_job fail name=%s level=%d\n", name, level); + return; + } + + find_root_job(); + + binit(&fp); + set_portable_backup(&fp); + + P(joblist_mutex); + + tmp = get_pool_memory(PM_FNAME); + Mmsg(tmp, "%s/%s", working, base); + if (bopen(&fp, tmp, O_WRONLY|O_CREAT|O_APPEND, 0600) < 0) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to update the job history. ERR=%s\n", + be.bstrerror(errno)); + goto bail_out; + } + + now = time(NULL); + + bash_spaces(data); + + if (level == 'F') { + l = Mmsg(tmp, "time=%lld level=%c key=%s name=%s vollen=%d vol=%s\n", + now, level, key, name, strlen(data), data); + + } else { + l = Mmsg(tmp, "time=%lld level=%c key=%s name=%s root=%s prev=%s vollen=%d vol=%s\n", + now, level, key, name, root, prev, strlen(data), data); + } + + if (bwrite(&fp, tmp, l) != l) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to update the job history. ERR=%s\n", + be.bstrerror(errno)); + } + + bclose(&fp); + +bail_out: + V(joblist_mutex); + free_pool_memory(tmp); +} + +/* Prune jobs at the end of the job, this function can generate commands + * in order to cleanup something + */ +void joblist::prune_jobs(char *build_cmd(void *arg, const char *data, const char *job), + void *arg, alist *jobs) +{ + BFILE fp, fpout; + FILE *f=NULL; + POOLMEM *tmp; + POOLMEM *tmpout; + POOLMEM *data; + POOLMEM *buf; + char curkey[MAX_NAME_LENGTH]; /* key */ + char jobname[MAX_NAME_LENGTH]; /* jobname */ + char prevjob[MAX_NAME_LENGTH]; /* last jobname */ + char rootjob[MAX_NAME_LENGTH]; /* root jobname */ + char t[MAX_NAME_LENGTH]; + uint32_t datalen; + char curlevel; + bool keep; + bool ok=false; + int count=0, len; + + /* In Incremental, it means that the previous Full/Diff is well terminated */ + if (level != 'I') { + return; + } + + find_root_job(); + + binit(&fp); + set_portable_backup(&fp); + + binit(&fpout); + set_portable_backup(&fpout); + + tmp = get_pool_memory(PM_FNAME); + Mmsg(tmp, "%s/%s", working, base); + + tmpout = get_pool_memory(PM_FNAME); + Mmsg(tmpout, "%s/%s.swap", working, base); + + buf = get_pool_memory(PM_FNAME); + data = get_pool_memory(PM_FNAME); + *buf = *data = 0; + + P(joblist_mutex); + if (bopen(&fp, tmp, O_RDONLY, 0) < 0) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to prune previous jobs. " + "Can't open %s for reading ERR=%s\n", + tmp, be.bstrerror(errno)); + goto bail_out; + } + if (bopen(&fpout, tmpout, O_CREAT|O_WRONLY, 0600) < 0) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to prune previous jobs. " + "Can't open %s for writing ERR=%s\n", + tmpout, be.bstrerror(errno)); + goto bail_out; + } + + f = fdopen(fp.fid, "r"); /* we use fgets from open() */ + if (!f) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to prune previous jobs. ERR=%s\n", + be.bstrerror(errno)); + goto bail_out; + } + + while (fgets(buf, sizeof_pool_memory(buf), f) != NULL) { + *data = *curkey = *jobname = *rootjob = *prevjob = 0; + keep = false; + datalen = 0; + + len = strlen(buf); + if (len > 0 && buf[len -1] != '\n') { + /* The line is larger than the buffer, we need to capture the rest */ + bool ok=false; + while (!ok) { + Dmsg(ctx, dbglvl+100, "Reading extra 1024 bytes, len=%d\n", len); + buf = check_pool_memory_size(buf, sizeof_pool_memory(buf) + 1024); + if (fgets(buf + len, 1023, f) == NULL) { + ok = true; + } + len = strlen(buf); + if (buf[len - 1] == '\n') { + ok = true; + } + if (len > 32000) { /* sanity check */ + ok = true; + } + } + } + + /* We don't capture the vol list, because our sscanf is limited to 1000 bytes */ + if (sscanf(buf, "time=%60s level=%c key=%127s name=%127s root=%127s prev=%127s vollen=%d vol=", + t, &curlevel, curkey, jobname, rootjob, prevjob, &datalen) != 7) { + + if (sscanf(buf, "time=%60s level=F key=%127s name=%127s vollen=%d vol=", + t, curkey, jobname, &datalen) == 4) { + *rootdiff = *rootjob = *prevjob = 0; + curlevel = 'F'; + + } else { + Dmsg(ctx, dbglvl+100, "Bad line l=[%s]\n", buf); + keep = true; + } + } + + if (!keep) { + pm_strcpy(data, strstr(buf, " vol=") + 5); + strip_trailing_newline(data); + unbash_spaces(data); + + if (datalen != strlen(data)) { + Dmsg(ctx, dbglvl+100, "Bad data line datalen != strlen(data) %d != %d\n", datalen, strlen(data)); + Dmsg(ctx, dbglvl+100, "v=[%s]\n", data); + } + } + + + if (!keep && + (strcmp(key, curkey) != 0 || + strcmp(name, jobname) == 0 || + strcmp(prev, jobname) == 0 || + strcmp(root, jobname) == 0 || + strcmp(rootdiff, jobname) == 0)) + { + keep = true; + } + + if (keep) { + if (bwrite(&fpout, buf, len) < 0) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to update the job history. ERR=%s\n", + be.bstrerror(errno)); + goto bail_out; + } + + } else if (build_cmd) { + count++; + Dmsg(ctx, dbglvl+100, "Can prune jobname %s\n", jobname); + + char *p2 = data; + for(char *p = data; *p; p++) { + if (*p == ',') { + *p = 0; + jobs->append(bstrdup(build_cmd(arg, p2, jobname))); + p2 = p + 1 ; + } + } + jobs->append(bstrdup(build_cmd(arg, p2, jobname))); + + } else if (jobs) { + jobs->append(bstrdup(data)); + } + } + + ok = true; + +bail_out: + if (f) { + fclose(f); + } + if (is_bopen(&fpout)) { + bclose(&fpout); + } + + /* We can switch the file */ + if (ok) { + unlink(tmp); + + if (rename(tmpout, tmp) < 0) { + berrno be; + Jmsg(ctx, M_ERROR, "Unable to update the job history. ERR=%s\n", + be.bstrerror(errno)); + } + } + + V(joblist_mutex); + free_pool_memory(tmp); + free_pool_memory(tmpout); + free_pool_memory(data); + free_pool_memory(buf); + + Dmsg(ctx, dbglvl+100, "Pruning %d jobs\n", count); +} + + +#endif /* ! USE_JOB_LIST */ + +#ifdef USE_FULL_WRITE +static int32_t full_write(int fd, const char *ptr, int32_t nbytes, bool *canceled=NULL) +{ + ssize_t nleft, nwritten; + nleft = nbytes; + while (nleft > 0 && (canceled == NULL || *canceled == false)) { + do { + errno = 0; + nwritten = write(fd, ptr, nleft); + } while (nwritten == -1 && errno == EINTR && (canceled == NULL || *canceled == false)); + + if (nwritten <= 0) { + return nwritten; /* error */ + } + nleft -= nwritten; + ptr += nwritten; + } + return nbytes - nleft; +} +#endif + +#ifdef USE_MAKEDIR +/* Skip leading slash(es) */ +static bool makedir(char *path, mode_t mode) +{ + struct stat statp; + char *p = path; + + while (IsPathSeparator(*p)) { + p++; + } + while ((p = first_path_separator(p))) { + char save_p; + save_p = *p; + *p = 0; + if (mkdir(path, mode) != 0) { + if (stat(path, &statp) != 0) { + *p = save_p; + return false; + } else if (!S_ISDIR(statp.st_mode)) { + *p = save_p; + return false; + } + } + *p = save_p; + while (IsPathSeparator(*p)) { + p++; + } + } + return true; +} +#endif diff --git a/src/plugins/fd/test-deltaseq-fd.c b/src/plugins/fd/test-deltaseq-fd.c new file mode 100644 index 00000000..d7a305e6 --- /dev/null +++ b/src/plugins/fd/test-deltaseq-fd.c @@ -0,0 +1,507 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * A simple delta plugin for the Bacula File Daemon + * + * + */ +#include "bacula.h" +#include "fd_plugins.h" +#include "fd_common.h" + +#undef malloc +#undef free +#undef strdup + +#ifdef __cplusplus +extern "C" { +#endif + +static const int dbglvl = 0; + +#define PLUGIN_LICENSE "AGPLv3" +#define PLUGIN_AUTHOR "Eric Bollengier" +#define PLUGIN_DATE "November 2010" +#define PLUGIN_VERSION "1" +#define PLUGIN_DESCRIPTION "Bacula Delta Test Plugin" + +/* Forward referenced functions */ +static bRC newPlugin(bpContext *ctx); +static bRC freePlugin(bpContext *ctx); +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); +static bRC endBackupFile(bpContext *ctx); +static bRC pluginIO(bpContext *ctx, struct io_pkt *io); +static bRC startRestoreFile(bpContext *ctx, const char *cmd); +static bRC endRestoreFile(bpContext *ctx); +static bRC createFile(bpContext *ctx, struct restore_pkt *rp); +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); +static bRC checkFile(bpContext *ctx, char *fname); +static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); + +/* Pointers to Bacula functions */ +static bFuncs *bfuncs = NULL; +static bInfo *binfo = NULL; + +/* Plugin Information block */ +static pInfo pluginInfo = { + sizeof(pluginInfo), + FD_PLUGIN_INTERFACE_VERSION, + FD_PLUGIN_MAGIC, + PLUGIN_LICENSE, + PLUGIN_AUTHOR, + PLUGIN_DATE, + PLUGIN_VERSION, + PLUGIN_DESCRIPTION +}; + +/* Plugin entry points for Bacula */ +static pFuncs pluginFuncs = { + sizeof(pluginFuncs), + FD_PLUGIN_INTERFACE_VERSION, + + /* Entry points into plugin */ + newPlugin, /* new plugin instance */ + freePlugin, /* free plugin instance */ + getPluginValue, + setPluginValue, + handlePluginEvent, + startBackupFile, + endBackupFile, + startRestoreFile, + endRestoreFile, + pluginIO, + createFile, + setFileAttributes, + checkFile, + handleXACLdata +}; + +#define get_self(x) ((delta_test*)((x)->pContext)) +#define FO_DELTA (1<<28) /* Do delta on file */ +#define FO_OFFSETS (1<<30) /* Keep block offsets */ + +class delta_test +{ +private: + bpContext *ctx; + +public: + POOLMEM *fname; /* Filename to save */ + int32_t delta; + FILE *fd; + bool done; + int level; + + delta_test(bpContext *bpc) { + fd = NULL; + ctx = bpc; + done = false; + level = 0; + delta = 0; + fname = get_pool_memory(PM_FNAME); + } + ~delta_test() { + free_and_null_pool_memory(fname); + } +}; + +/* + * loadPlugin() and unloadPlugin() are entry points that are + * exported, so Bacula can directly call these two entry points + * they are common to all Bacula plugins. + */ +/* + * External entry point called by Bacula to "load the plugin + */ +bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) +{ + bfuncs = lbfuncs; /* set Bacula funct pointers */ + binfo = lbinfo; + *pinfo = &pluginInfo; /* return pointer to our info */ + *pfuncs = &pluginFuncs; /* return pointer to our functions */ + + /* Activate this plugin only in developer mode */ +#ifdef DEVELOPER + return bRC_OK; +#else + return bRC_Error; +#endif +} + +/* + * External entry point to unload the plugin + */ +bRC unloadPlugin() +{ +// Dmsg(NULL, dbglvl, "delta-test-fd: Unloaded\n"); + return bRC_OK; +} + +/* + * The following entry points are accessed through the function + * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) + * has its own set of entry points that the plugin must define. + */ +/* + * Create a new instance of the plugin i.e. allocate our private storage + */ +static bRC newPlugin(bpContext *ctx) +{ + delta_test *self = new delta_test(ctx); + if (!self) { + return bRC_Error; + } + ctx->pContext = (void *)self; /* set our context pointer */ + return bRC_OK; +} + +/* + * Free a plugin instance, i.e. release our private storage + */ +static bRC freePlugin(bpContext *ctx) +{ + delta_test *self = get_self(ctx); + if (!self) { + return bRC_Error; + } + delete self; + return bRC_OK; +} + +/* + * Return some plugin value (none defined) + */ +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) +{ + return bRC_OK; +} + +/* + * Set a plugin value (none defined) + */ +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) +{ + return bRC_OK; +} + +/* + * Handle an event that was generated in Bacula + */ +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) +{ + delta_test *self = get_self(ctx); + int accurate=0; + + if (!self) { + return bRC_Error; + } + +// char *name; + + /* + * Most events don't interest us so we ignore them. + * the printfs are so that plugin writers can enable them to see + * what is really going on. + */ + switch (event->eventType) { + case bEventPluginCommand: +// Dmsg(ctx, dbglvl, +// "delta-test-fd: PluginCommand=%s\n", (char *)value); + break; + case bEventJobStart: +// Dmsg(ctx, dbglvl, "delta-test-fd: JobStart=%s\n", (char *)value); + break; + case bEventJobEnd: +// Dmsg(ctx, dbglvl, "delta-test-fd: JobEnd\n"); + break; + case bEventStartBackupJob: +// Dmsg(ctx, dbglvl, "delta-test-fd: StartBackupJob\n"); + break; + case bEventEndBackupJob: +// Dmsg(ctx, dbglvl, "delta-test-fd: EndBackupJob\n"); + break; + case bEventLevel: +// Dmsg(ctx, dbglvl, "delta-test-fd: JobLevel=%c %d\n", (int)value, (int)value); + self->level = (int)(intptr_t)value; + break; + case bEventSince: +// Dmsg(ctx, dbglvl, "delta-test-fd: since=%d\n", (int)value); + break; + + case bEventStartRestoreJob: +// Dmsg(ctx, dbglvl, "delta-test-fd: StartRestoreJob\n"); + break; + + case bEventEndRestoreJob: +// Dmsg(ctx, dbglvl, "delta-test-fd: EndRestoreJob\n"); + break; + + /* Plugin command e.g. plugin = ::read command:write command */ + case bEventRestoreCommand: +// Dmsg(ctx, dbglvl, "delta-test-fd: EventRestoreCommand cmd=%s\n", (char *)value); + /* Fall-through wanted */ + break; + case bEventBackupCommand: + Dmsg(ctx, dbglvl, "delta-test-fd: pluginEvent cmd=%s\n", (char *)value); + if (self->level == 'I' || self->level == 'D') { + bfuncs->getBaculaValue(ctx, bVarAccurate, (void *)&accurate); + if (!accurate) { /* can be changed to FATAL */ + Jmsg(ctx, M_FATAL, + "Accurate mode should be turned on when using the " + "delta-test plugin\n"); + return bRC_Error; + } + } + break; + + default: +// Dmsg(ctx, dbglvl, "delta-test-fd: unknown event=%d\n", event->eventType); + break; + } + return bRC_OK; +} + +static const char *files[] = { + "/etc/passwd", + "/etc/group", + "/etc/hosts", + "/etc/services" +}; +static int nb_files = 4; + +/* + * Start the backup of a specific file + */ +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) +{ + delta_test *self = get_self(ctx); + if (!self) { + return bRC_Error; + } + time_t now = time(NULL); + sp->fname = (char *)"/delta.txt"; + sp->type = FT_REG; + sp->statp.st_mode = 0700 | S_IFREG; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = -1; + sp->statp.st_blksize = 4096; + sp->statp.st_blocks = 1; + if (self->level == 'I' || self->level == 'D') { + bRC state = bfuncs->checkChanges(ctx, sp); + /* Should always be bRC_OK */ + sp->type = (state == bRC_Seen)? FT_NOCHG : FT_REG; + sp->flags |= (FO_DELTA|FO_OFFSETS); + self->delta = sp->delta_seq + 1; + } + pm_strcpy(self->fname, files[self->delta % nb_files]); + Dmsg(ctx, dbglvl, "delta-test-fd: delta_seq=%i delta=%i fname=%s\n", + sp->delta_seq, self->delta, self->fname); +// Dmsg(ctx, dbglvl, "delta-test-fd: startBackupFile\n"); + return bRC_OK; +} + +/* + * Done with backup of this file + */ +static bRC endBackupFile(bpContext *ctx) +{ + /* + * We would return bRC_More if we wanted startBackupFile to be + * called again to backup another file + */ + return bRC_OK; +} + + +/* + * Bacula is calling us to do the actual I/O + */ +static bRC pluginIO(bpContext *ctx, struct io_pkt *io) +{ + delta_test *self = get_self(ctx); + struct stat statp; + if (!self) { + return bRC_Error; + } + + io->status = 0; + io->io_errno = 0; + switch(io->func) { + case IO_OPEN: + Dmsg(ctx, dbglvl, "delta-test-fd: IO_OPEN\n"); + if (io->flags & (O_CREAT | O_WRONLY)) { + /* TODO: if the file already exists, the result is undefined */ + if (stat(io->fname, &statp) == 0) { /* file exists */ + self->fd = fopen(io->fname, "r+"); + } else { + self->fd = fopen(io->fname, "w"); /* file doesn't exist,create it */ + } + if (!self->fd) { + io->io_errno = errno; + Jmsg(ctx, M_FATAL, + "Open failed: ERR=%s\n", strerror(errno)); + return bRC_Error; + } + + } else { + self->fd = fopen(self->fname, "r"); + if (!self->fd) { + io->io_errno = errno; + Jmsg(ctx, M_FATAL, + "Open failed: ERR=%s\n", strerror(errno)); + return bRC_Error; + } + } + break; + + case IO_READ: + if (!self->fd) { + Jmsg(ctx, M_FATAL, "Logic error: NULL read FD\n"); + return bRC_Error; + } + if (self->done) { + io->status = 0; + } else { + /* first time, read 300, then replace 50-250 by other data */ + if (self->delta == 0) { + io->status = fread(io->buf, 1, 400, self->fd); + } else { + io->offset = self->delta * 100 / 2; /* chunks are melted */ + io->status = fread(io->buf, 1, 100, self->fd); + } + Dmsg(ctx, dbglvl, "delta-test-fd: READ offset=%lld\n", (int64_t)io->offset); + self->done = true; + } + if (io->status == 0 && ferror(self->fd)) { + Jmsg(ctx, M_FATAL, + "Pipe read error: ERR=%s\n", strerror(errno)); + Dmsg(ctx, dbglvl, + "Pipe read error: ERR=%s\n", strerror(errno)); + return bRC_Error; + } + Dmsg(ctx, dbglvl, "offset=%d\n", io->offset); + break; + + case IO_WRITE: + if (!self->fd) { + Jmsg(ctx, M_FATAL, "Logic error: NULL write FD\n"); + return bRC_Error; + } + Dmsg(ctx, dbglvl, "delta-test-fd: WRITE count=%lld\n", (int64_t)io->count); + io->status = fwrite(io->buf, 1, io->count, self->fd); + if (io->status == 0 && ferror(self->fd)) { + Jmsg(ctx, M_FATAL, + "Pipe write error\n"); + Dmsg(ctx, dbglvl, + "Pipe read error: ERR=%s\n", strerror(errno)); + return bRC_Error; + } + break; + + case IO_CLOSE: + if (!self->fd) { + Jmsg(ctx, M_FATAL, "Logic error: NULL FD on delta close\n"); + return bRC_Error; + } + io->status = fclose(self->fd); + break; + + case IO_SEEK: + if (!self->fd) { + Jmsg(ctx, M_FATAL, "Logic error: NULL FD on delta close\n"); + return bRC_Error; + } + Dmsg(ctx, dbglvl, "delta-test-fd: SEEK offset=%lld\n", (int64_t)io->offset); + io->status = fseek(self->fd, io->offset, io->whence); + Dmsg(ctx, dbglvl, "after SEEK=%lld\n", (int64_t)ftell(self->fd)); + break; + } + return bRC_OK; +} + +/* + * Bacula is notifying us that a plugin name string was found, and + * passing us the plugin command, so we can prepare for a restore. + */ +static bRC startRestoreFile(bpContext *ctx, const char *cmd) +{ +// Dmsg(ctx, dbglvl, "delta-test-fd: startRestoreFile cmd=%s\n", cmd); + return bRC_OK; +} + +/* + * Bacula is notifying us that the plugin data has terminated, so + * the restore for this particular file is done. + */ +static bRC endRestoreFile(bpContext *ctx) +{ +// Dmsg(ctx, dbglvl, "delta-test-fd: endRestoreFile\n"); + return bRC_OK; +} + +/* + * This is called during restore to create the file (if necessary) + * We must return in rp->create_status: + * + * CF_ERROR -- error + * CF_SKIP -- skip processing this file + * CF_EXTRACT -- extract the file (i.e.call i/o routines) + * CF_CREATED -- created, but no content to extract (typically directories) + * + */ +static bRC createFile(bpContext *ctx, struct restore_pkt *rp) +{ + delta_test *self = get_self(ctx); + pm_strcpy(self->fname, rp->ofname); + rp->create_status = CF_EXTRACT; + return bRC_OK; +} + +/* + * We will get here if the File is a directory after everything + * is written in the directory. + */ +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) +{ +// Dmsg(ctx, dbglvl, "delta-test-fd: setFileAttributes\n"); + return bRC_OK; +} + +/* When using Incremental dump, all previous dumps are necessary */ +static bRC checkFile(bpContext *ctx, char *fname) +{ + return bRC_OK; +} + +/* + * New Bacula Plugin API require this + */ +static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) +{ + return bRC_OK; +} + +#ifdef __cplusplus +} +#endif diff --git a/src/plugins/fd/test-plugin-fd.c b/src/plugins/fd/test-plugin-fd.c new file mode 100644 index 00000000..0a6373a2 --- /dev/null +++ b/src/plugins/fd/test-plugin-fd.c @@ -0,0 +1,710 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * A simple test plugin for the Bacula File Daemon derived from + * the bpipe plugin, but used for testing new features. + * + * Kern Sibbald, October 2007 + * + */ +#include "bacula.h" +#include "fd_plugins.h" +#include "lib/ini.h" +#include + +#undef malloc +#undef free +#undef strdup + +#define fi __FILE__ +#define li __LINE__ + +#ifdef __cplusplus +extern "C" { +#endif + +static const int dbglvl = 000; + +#define PLUGIN_LICENSE "AGPLv3" +#define PLUGIN_AUTHOR "Kern Sibbald" +#define PLUGIN_DATE "May 2011" +#define PLUGIN_VERSION "3" +#define PLUGIN_DESCRIPTION "Bacula Test File Daemon Plugin" + +/* Forward referenced functions */ +static bRC newPlugin(bpContext *ctx); +static bRC freePlugin(bpContext *ctx); +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); +static bRC endBackupFile(bpContext *ctx); +static bRC pluginIO(bpContext *ctx, struct io_pkt *io); +static bRC startRestoreFile(bpContext *ctx, const char *cmd); +static bRC endRestoreFile(bpContext *ctx); +static bRC createFile(bpContext *ctx, struct restore_pkt *rp); +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); +static bRC checkFile(bpContext *ctx, char *fname); +static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); + +/* Pointers to Bacula functions */ +static bFuncs *bfuncs = NULL; +static bInfo *binfo = NULL; + +/* Plugin Information block */ +static pInfo pluginInfo = { + sizeof(pluginInfo), + FD_PLUGIN_INTERFACE_VERSION, + FD_PLUGIN_MAGIC, + PLUGIN_LICENSE, + PLUGIN_AUTHOR, + PLUGIN_DATE, + PLUGIN_VERSION, + PLUGIN_DESCRIPTION +}; + +/* Plugin entry points for Bacula */ +static pFuncs pluginFuncs = { + sizeof(pluginFuncs), + FD_PLUGIN_INTERFACE_VERSION, + + /* Entry points into plugin */ + newPlugin, /* new plugin instance */ + freePlugin, /* free plugin instance */ + getPluginValue, + setPluginValue, + handlePluginEvent, + startBackupFile, + endBackupFile, + startRestoreFile, + endRestoreFile, + pluginIO, + createFile, + setFileAttributes, + checkFile, + handleXACLdata +}; + +static struct ini_items test_items[] = { + // name handler comment required + { "string1", ini_store_str, "Special String", 1}, + { "string2", ini_store_str, "2nd String", 0}, + { "ok", ini_store_bool, "boolean", 0}, + +// We can also use the ITEMS_DEFAULT +// { "ok", ini_store_bool, "boolean", 0, ITEMS_DEFAULT}, + { NULL, NULL, NULL, 0} +}; + +/* + * Plugin private context + */ +struct plugin_ctx { + boffset_t offset; + FILE *fd; /* pipe file descriptor */ + char *cmd; /* plugin command line */ + char *fname; /* filename to "backup/restore" */ + char *reader; /* reader program for backup */ + char *writer; /* writer program for backup */ + + char where[1000]; + int replace; + + int nb_obj; /* Number of objects created */ + POOLMEM *buf; /* store ConfigFile */ +}; + +/* + * loadPlugin() and unloadPlugin() are entry points that are + * exported, so Bacula can directly call these two entry points + * they are common to all Bacula plugins. + */ +/* + * External entry point called by Bacula to "load the plugin + */ +bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) +{ + bfuncs = lbfuncs; /* set Bacula funct pointers */ + binfo = lbinfo; + *pinfo = &pluginInfo; /* return pointer to our info */ + *pfuncs = &pluginFuncs; /* return pointer to our functions */ + + return bRC_OK; +} + +/* + * External entry point to unload the plugin + */ +bRC unloadPlugin() +{ +// printf("test-plugin-fd: Unloaded\n"); + return bRC_OK; +} + +/* + * The following entry points are accessed through the function + * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) + * has its own set of entry points that the plugin must define. + */ +/* + * Create a new instance of the plugin i.e. allocate our private storage + */ +static bRC newPlugin(bpContext *ctx) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)malloc(sizeof(struct plugin_ctx)); + if (!p_ctx) { + return bRC_Error; + } + memset(p_ctx, 0, sizeof(struct plugin_ctx)); + ctx->pContext = (void *)p_ctx; /* set our context pointer */ + return bRC_OK; +} + +/* + * Free a plugin instance, i.e. release our private storage + */ +static bRC freePlugin(bpContext *ctx) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + if (p_ctx->buf) { + free_pool_memory(p_ctx->buf); + } + if (p_ctx->cmd) { + free(p_ctx->cmd); /* free any allocated command string */ + } + free(p_ctx); /* free our private context */ + ctx->pContext = NULL; + return bRC_OK; +} + +/* + * Return some plugin value (none defined) + */ +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) +{ + return bRC_OK; +} + +/* + * Set a plugin value (none defined) + */ +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) +{ + return bRC_OK; +} + +/* + * Handle an event that was generated in Bacula + */ +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + restore_object_pkt *rop; + if (!p_ctx) { + return bRC_Error; + } + +// char *name; + + /* + * Most events don't interest us so we ignore them. + * the printfs are so that plugin writers can enable them to see + * what is really going on. + */ + switch (event->eventType) { + case bEventJobStart: + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "test-plugin-fd: JobStart=%s\n", (char *)value); + break; + case bEventJobEnd: + case bEventEndBackupJob: + case bEventLevel: + case bEventSince: + case bEventStartRestoreJob: + case bEventEndRestoreJob: + break; + /* End of Dir FileSet commands, now we can add excludes */ + case bEventEndFileSet: + bfuncs->NewOptions(ctx); + bfuncs->AddWild(ctx, "*.c", ' '); + bfuncs->AddWild(ctx, "*.cpp", ' '); + bfuncs->AddOptions(ctx, "ei"); /* exclude, ignore case */ + bfuncs->AddExclude(ctx, "/home/kern/bacula/regress/README"); + break; + case bEventStartBackupJob: + break; + case bEventRestoreObject: + { + printf("Plugin RestoreObject\n"); + if (!value) { + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "test-plugin-fd: End restore objects\n"); + break; + } + rop = (restore_object_pkt *)value; + bfuncs->DebugMessage(ctx, fi, li, dbglvl, + "Get RestoreObject len=%d JobId=%d oname=%s type=%d data=%.127s\n", + rop->object_len, rop->JobId, rop->object_name, rop->object_type, + rop->object); + FILE *fp; + POOLMEM *q; + char *working; + static int _nb=0; + q = get_pool_memory(PM_FNAME); + + bfuncs->getBaculaValue(ctx, bVarWorkingDir, &working); + Mmsg(q, "%s/restore.%d", working, _nb++); + if ((fp = fopen(q, "w")) != NULL) { + fwrite(rop->object, rop->object_len, 1, fp); + fclose(fp); + } + + free_pool_memory(q); + + if (!strcmp(rop->object_name, INI_RESTORE_OBJECT_NAME)) { + ConfigFile ini; + if (!ini.dump_string(rop->object, rop->object_len)) { + break; + } + ini.register_items(test_items, sizeof(struct ini_items)); + if (ini.parse(ini.out_fname)) { + bfuncs->JobMessage(ctx, fi, li, M_INFO, 0, "string1 = %s\n", + ini.items[0].val.strval); + } else { + bfuncs->JobMessage(ctx, fi, li, M_ERROR, 0, "Can't parse config\n"); + } + } + + break; + } + /* Plugin command e.g. plugin = ::read command:write command */ + case bEventRestoreCommand: + /* Fall-through wanted */ + case bEventEstimateCommand: + /* Fall-through wanted */ + case bEventBackupCommand: + { + char *p; + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "test-plugin-fd: pluginEvent cmd=%s\n", (char *)value); + p_ctx->cmd = strdup((char *)value); + p = strchr(p_ctx->cmd, ':'); + if (!p) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Plugin terminator not found: %s\n", (char *)value); + return bRC_Error; + } + *p++ = 0; /* terminate plugin */ + p_ctx->fname = p; + p = strchr(p, ':'); + if (!p) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "File terminator not found: %s\n", (char *)value); + return bRC_Error; + } + *p++ = 0; /* terminate file */ + p_ctx->reader = p; + p = strchr(p, ':'); + if (!p) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Reader terminator not found: %s\n", (char *)value); + return bRC_Error; + } + *p++ = 0; /* terminate reader string */ + p_ctx->writer = p; + printf("test-plugin-fd: plugin=%s fname=%s reader=%s writer=%s\n", + p_ctx->cmd, p_ctx->fname, p_ctx->reader, p_ctx->writer); + break; + } + case bEventPluginCommand: + break; + case bEventVssBeforeCloseRestore: + break; + case bEventComponentInfo: + printf("plugin: Component=%s\n", NPRT((char *)value)); + break; + + default: + printf("test-plugin-fd: unknown event=%d\n", event->eventType); + break; + } + return bRC_OK; +} + +/* + * Start the backup of a specific file + */ +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + + if (p_ctx->nb_obj == 0) { + sp->fname = (char *)"takeme.h"; + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "AcceptFile=%s = %d\n", + sp->fname, bfuncs->AcceptFile(ctx, sp)); + + sp->fname = (char *)"/path/to/excludeme.o"; + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "AcceptFile=%s = %d\n", + sp->fname, bfuncs->AcceptFile(ctx, sp)); + + sp->fname = (char *)"/path/to/excludeme.c"; + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "AcceptFile=%s = %d\n", + sp->fname, bfuncs->AcceptFile(ctx, sp)); + } + + if (p_ctx->nb_obj == 0) { + sp->object_name = (char *)"james.xml"; + sp->object = (char *)"This is test data for the restore object. " + "garbage=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + "\0secret"; + sp->object_len = strlen(sp->object)+1+6+1; /* str + 0 + secret + 0 */ + sp->type = FT_RESTORE_FIRST; + + static int _nb=0; + POOLMEM *q = get_pool_memory(PM_FNAME); + char *working; + FILE *fp; + + bfuncs->getBaculaValue(ctx, bVarWorkingDir, &working); + Mmsg(q, "%s/torestore.%d", working, _nb++); + if ((fp = fopen(q, "w")) != NULL) { + fwrite(sp->object, sp->object_len, 1, fp); + fclose(fp); + } + free_pool_memory(q); + + } else if (p_ctx->nb_obj == 1) { + ConfigFile ini; + p_ctx->buf = get_pool_memory(PM_BSOCK); + ini.register_items(test_items, sizeof(struct ini_items)); + + sp->object_name = (char*)INI_RESTORE_OBJECT_NAME; + sp->object_len = ini.serialize(&p_ctx->buf); + sp->object = p_ctx->buf; + sp->type = FT_PLUGIN_CONFIG; + + Dmsg1(0, "RestoreOptions=<%s>\n", p_ctx->buf); + } + + time_t now = time(NULL); + sp->index = ++p_ctx->nb_obj; + sp->statp.st_mode = 0700 | S_IFREG; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = sp->object_len; + sp->statp.st_blksize = 4096; + sp->statp.st_blocks = 1; + bfuncs->DebugMessage(ctx, fi, li, dbglvl, + "Creating RestoreObject len=%d oname=%s data=%.127s\n", + sp->object_len, sp->object_name, sp->object); + + printf("test-plugin-fd: startBackupFile\n"); + return bRC_OK; +} + +/* + * Done with backup of this file + */ +static bRC endBackupFile(bpContext *ctx) +{ + /* + * We would return bRC_More if we wanted startBackupFile to be + * called again to backup another file + */ + return bRC_OK; +} + + +/* + * Bacula is calling us to do the actual I/O + */ +static bRC pluginIO(bpContext *ctx, struct io_pkt *io) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + + io->status = 0; + io->io_errno = 0; + return bRC_OK; +} + +/* + * Bacula is notifying us that a plugin name string was found, and + * passing us the plugin command, so we can prepare for a restore. + */ +static bRC startRestoreFile(bpContext *ctx, const char *cmd) +{ + printf("test-plugin-fd: startRestoreFile cmd=%s\n", cmd); + return bRC_OK; +} + +/* + * Bacula is notifying us that the plugin data has terminated, so + * the restore for this particular file is done. + */ +static bRC endRestoreFile(bpContext *ctx) +{ + printf("test-plugin-fd: endRestoreFile\n"); + return bRC_OK; +} + +/* + * This is called during restore to create the file (if necessary) + * We must return in rp->create_status: + * + * CF_ERROR -- error + * CF_SKIP -- skip processing this file + * CF_EXTRACT -- extract the file (i.e.call i/o routines) + * CF_CREATED -- created, but no content to extract (typically directories) + * + */ +static bRC createFile(bpContext *ctx, struct restore_pkt *rp) +{ + struct plugin_ctx *pctx = (struct plugin_ctx *)ctx->pContext; + printf("test-plugin-fd: createFile\n"); + if (strlen(rp->where) > 990) { + printf("Restore target dir too long. Restricting to first 990 bytes.\n"); + } + strncpy(pctx->where, rp->where, sizeof(pctx->where)); + pctx->replace = rp->replace; + rp->create_status = CF_CORE; + return bRC_OK; +} + +/* + * We will get here if the File is a directory after everything + * is written in the directory. + */ +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) +{ + printf("test-plugin-fd: setFileAttributes\n"); + return bRC_OK; +} + +/* When using Incremental dump, all previous dumps are necessary */ +static bRC checkFile(bpContext *ctx, char *fname) +{ + return bRC_OK; +} + +/* + * New Bacula Plugin API require this + */ +static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) +{ + return bRC_OK; +} + +#ifdef __cplusplus +} +#endif diff --git a/src/plugins/sd/Makefile.in b/src/plugins/sd/Makefile.in new file mode 100644 index 00000000..ef5797d8 --- /dev/null +++ b/src/plugins/sd/Makefile.in @@ -0,0 +1,50 @@ +# +# Simple Makefile for building test SD plugins for Bacula +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +@MCOMMON@ + + +# No optimization for now for easy debugging + +SDDIR=../../stored +SRCDIR=../.. +LIBDIR=../../lib + +.SUFFIXES: .c .lo + +.c.lo: + $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) -I${SRCDIR} -I${SDDIR} -DTEST_PROGRAM -c $< + +all: example-plugin-sd.la + +example-plugin-sd.lo: example-plugin-sd.c + $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CFLAGS) -I../.. -I${SDDIR} -c example-plugin-sd.c + +example-plugin-sd.la: Makefile example-plugin-sd$(DEFAULT_OBJECT_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared example-plugin-sd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version + +install: all + $(MKDIR) $(DESTDIR)$(plugindir) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) example-plugin-sd$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) + $(RMF) $(DESTDIR)$(plugindir)/example-plugin-sd.la + +libtool-clean: + find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) + $(RMF) *.la + $(RMF) -r .libs _libs + +clean: @LIBTOOL_CLEAN_TARGET@ + rm -f main *.la *.so *.o 1 2 3 + +distclean: clean + rm -f Makefile + +libtool-uninstall: + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(plugindir)/example-plugin-sd.so + +uninstall: @LIBTOOL_UNINSTALL_TARGET@ + +depend: diff --git a/src/plugins/sd/example-plugin-sd.c b/src/plugins/sd/example-plugin-sd.c new file mode 100644 index 00000000..742f3d30 --- /dev/null +++ b/src/plugins/sd/example-plugin-sd.c @@ -0,0 +1,181 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Sample Storage daemon Plugin program + * + * Kern Sibbald, October 2007 + */ +#include "bacula.h" /* General Bacula headers */ +#include "stored.h" /* Pull in storage daemon headers */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define PLUGIN_LICENSE "AGPLv3" +#define PLUGIN_AUTHOR "Kern Sibbald" +#define PLUGIN_DATE "November 2011" +#define PLUGIN_VERSION "2" +#define PLUGIN_DESCRIPTION "Test Storage Daemon Plugin" + +/* Forward referenced functions */ +static bRC newPlugin(bpContext *ctx); +static bRC freePlugin(bpContext *ctx); +static bRC getPluginValue(bpContext *ctx, psdVariable var, void *value); +static bRC setPluginValue(bpContext *ctx, psdVariable var, void *value); +static bRC handlePluginEvent(bpContext *ctx, bsdEvent *event, void *value); +static bRC handleGlobalPluginEvent(bsdEvent *event, void *value); + + +/* Pointers to Bacula functions */ +static bsdFuncs *bfuncs = NULL; +static bsdInfo *binfo = NULL; + +static psdInfo pluginInfo = { + sizeof(pluginInfo), + SD_PLUGIN_INTERFACE_VERSION, + SD_PLUGIN_MAGIC, + PLUGIN_LICENSE, + PLUGIN_AUTHOR, + PLUGIN_DATE, + PLUGIN_VERSION, + PLUGIN_DESCRIPTION +}; + +static psdFuncs pluginFuncs = { + sizeof(pluginFuncs), + SD_PLUGIN_INTERFACE_VERSION, + + /* Entry points into plugin */ + newPlugin, /* new plugin instance */ + freePlugin, /* free plugin instance */ + getPluginValue, + setPluginValue, + handlePluginEvent, + handleGlobalPluginEvent +}; + +/* + * loadPlugin() and unloadPlugin() are entry points that are + * exported, so Bacula can directly call these two entry points + * they are common to all Bacula plugins. + * + * External entry point called by Bacula to "load the plugin + */ +bRC DLL_IMP_EXP +loadPlugin(bsdInfo *lbinfo, bsdFuncs *lbfuncs, psdInfo **pinfo, psdFuncs **pfuncs) +{ + bfuncs = lbfuncs; /* set Bacula funct pointers */ + binfo = lbinfo; + printf("example-plugin-sd: Loaded: size=%d version=%d\n", bfuncs->size, bfuncs->version); + *pinfo = &pluginInfo; /* return pointer to our info */ + *pfuncs = &pluginFuncs; /* return pointer to our functions */ + printf("example-plugin-sd: Loaded\n"); + return bRC_OK; +} + +/* + * External entry point to unload the plugin + */ +bRC DLL_IMP_EXP +unloadPlugin() +{ + printf("example-plugin-sd: Unloaded\n"); + return bRC_OK; +} + +/* + * The following entry points are accessed through the function + * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) + * has its own set of entry points that the plugin must define. + */ +/* + * Create a new instance of the plugin i.e. allocate our private storage + */ +static bRC newPlugin(bpContext *ctx) +{ + int JobId = 0; + bfuncs->getBaculaValue(ctx, bsdVarJobId, (void *)&JobId); + printf("example-plugin-sd: newPlugin JobId=%d\n", JobId); + bfuncs->registerBaculaEvents(ctx, 1, 2, 0); + return bRC_OK; +} + +/* + * Free a plugin instance, i.e. release our private storage + */ +static bRC freePlugin(bpContext *ctx) +{ + int JobId = 0; + bfuncs->getBaculaValue(ctx, bsdVarJobId, (void *)&JobId); + printf("example-plugin-sd: freePlugin JobId=%d\n", JobId); + return bRC_OK; +} + +/* + * Return some plugin value (none defined) + */ +static bRC getPluginValue(bpContext *ctx, psdVariable var, void *value) +{ + printf("example-plugin-sd: getPluginValue var=%d\n", var); + return bRC_OK; +} + +/* + * Set a plugin value (none defined) + */ +static bRC setPluginValue(bpContext *ctx, psdVariable var, void *value) +{ + printf("example-plugin-sd: setPluginValue var=%d\n", var); + return bRC_OK; +} + +/* + * Handle an event that was generated in Bacula + */ +static bRC handlePluginEvent(bpContext *ctx, bsdEvent *event, void *value) +{ + char *name; + switch (event->eventType) { + case bsdEventJobStart: + printf("example-plugin-sd: HandleEvent JobStart :%s:\n", (char *)value); + break; + case bsdEventJobEnd: + printf("example-plugin-sd: HandleEvent JobEnd\n"); + break; + } + bfuncs->getBaculaValue(ctx, bsdVarJobName, (void *)&name); + printf("Job Name=%s\n", name); + bfuncs->JobMessage(ctx, __FILE__, __LINE__, 1, 0, "JobMesssage message"); + bfuncs->DebugMessage(ctx, __FILE__, __LINE__, 1, "DebugMesssage message"); + return bRC_OK; +} + +/* + * Handle a Global event -- no context + */ +static bRC handleGlobalPluginEvent(bsdEvent *event, void *value) +{ + return bRC_OK; +} + + +#ifdef __cplusplus +} +#endif diff --git a/src/plugins/sd/main.c b/src/plugins/sd/main.c new file mode 100644 index 00000000..2b97ce8e --- /dev/null +++ b/src/plugins/sd/main.c @@ -0,0 +1,110 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main program to test loading and running Bacula plugins. + * Destined to become Bacula pluginloader, ... + * + * Kern Sibbald, October 2007 + */ +#include "bacula.h" +#include +#include "lib/plugin.h" +#include "plugin-sd.h" + +const char *plugin_type = "-sd.so"; + + +/* Forward referenced functions */ +static bpError baculaGetValue(bpContext *ctx, bVariable var, void *value); +static bpError baculaSetValue(bpContext *ctx, bVariable var, void *value); + + +/* Bacula entry points */ +static bFuncs bfuncs = { + sizeof(bFuncs), + PLUGIN_INTERFACE, + baculaGetValue, + baculaSetValue, + NULL, + NULL +}; + + + + +int main(int argc, char *argv[]) +{ + char plugin_dir[1000]; + bpContext ctx; + bEvent event; + Plugin *plugin; + + b_plugin_list = New(alist(10, not_owned_by_alist)); + + ctx.bContext = NULL; + ctx.pContext = NULL; + getcwd(plugin_dir, sizeof(plugin_dir)-1); + + load_plugins((void *)&bfuncs, plugin_dir, plugin_type); + + foreach_alist(plugin, b_plugin_list) { + printf("bacula: plugin_size=%d plugin_version=%d\n", + pref(plugin)->size, pref(plugin)->interface); + printf("License: %s\nAuthor: %s\nDate: %s\nVersion: %s\nDescription: %s\n", + pref(plugin)->plugin_license, pref(plugin)->plugin_author, + pref(plugin)->plugin_date, pref(plugin)->plugin_version, + pref(plugin)->plugin_description); + + /* Start a new instance of the plugin */ + pref(plugin)->newPlugin(&ctx); + event.eventType = bEventNewVolume; + pref(plugin)->handlePluginEvent(&ctx, &event); + /* Free the plugin instance */ + pref(plugin)->freePlugin(&ctx); + + /* Start a new instance of the plugin */ + pref(plugin)->newPlugin(&ctx); + event.eventType = bEventNewVolume; + pref(plugin)->handlePluginEvent(&ctx, &event); + /* Free the plugin instance */ + pref(plugin)->freePlugin(&ctx); + } + + unload_plugins(); + + printf("bacula: OK ...\n"); + close_memory_pool(); + sm_dump(false); + return 0; +} + +static bpError baculaGetValue(bpContext *ctx, bVariable var, void *value) +{ + printf("bacula: baculaGetValue var=%d\n", var); + if (value) { + *((int *)value) = 100; + } + return 0; +} + +static bpError baculaSetValue(bpContext *ctx, bVariable var, void *value) +{ + printf("bacula: baculaSetValue var=%d\n", var); + return 0; +} diff --git a/src/qt-console/COMMANDS b/src/qt-console/COMMANDS new file mode 100644 index 00000000..e1560166 --- /dev/null +++ b/src/qt-console/COMMANDS @@ -0,0 +1,125 @@ +cancel ujobid= + +delete pool= + should ask if this would have bad effects if say media existed in the pool + the pool existed in the configuration file. + +list + list ujobid (list job with unique name) + list nextvol job= days=nnn + +unmount [ jobid= | job= ] +Mmmmm + +update + /* Attempted, having problems */ + Volume from Pool All Volumes from Pool + +CONFIG FILE +=========================== +add [pool= storage= jobid=] +These are done from config files + +create [pool=] +Done in a config file + +CHOOSE NOT TO (for now at least) +=========================== +autodisplay on/off +could be done in the context of the console + This may interfere with our connection issues + Also, there is a configurable qt timer to automatically ask for messages + +python +not needed from bat?? + +setdebug +I'd say we could choose not to implement this in bat + +quit +I'd like to have a disconnect graphical option. + +exit +Not really needed + +use +still need to make decisions about how to handle multiple catalogs + +version +could be done in console as long as it is explicite about that it is the version +of the director + +var +Mmmmmm + +wait +Mmmmmmm + +DONE +=========================== +automount on/off +Added buttons to the label dialog box to execute automount command + +cancel jobid= job= + +delete [volume= job jobid=] + +disable job and enable job +Could be done in the context of a jobs resource window which is not yet created + +estimate +Could be a dialog in the context of a jobs window. + +list + list jobid= (list jobid id) + list files jobid= + list jobmedia jobid= + list volumes jobid= + list jobtotals (from page selector on joblist widget) + The next few are accomplishable graphically with joblist class + list jobs + list job= (list all jobs with "job-name") + list jobname= (same as above) + list jobmedia + list jobmedia job= + The next few are accomplishable graphically with the medialist class + list volumes + list volumes pool= + list volume= + list pools + Accomplishable with the clients class graphically + list clients + list files job= + do this as a context option on page selector on joblist + list volumes job= + list nextvolume job= + list nextvol job= + +help + context sensitive from page selector console + +label +Done by kern before I started + +prune files|jobs|volume client= volume= +could add as a dialog box from both client and medialist + +purge files jobid=|job=|client= +purge volume|volume= (of all jobs) +purge jobs client= (of all jobs) + +relabel + +release storage= +Would need to explain what this does in bat with a dialog + +run + +reload + +status + +unmount storage= [ drive=\lt{}num\gt{} ] + +update volume (via mediaedit) + "update slots" "update slots scan" (context of storage) diff --git a/src/qt-console/External-qt-console b/src/qt-console/External-qt-console new file mode 100644 index 00000000..2a494904 --- /dev/null +++ b/src/qt-console/External-qt-console @@ -0,0 +1,26 @@ +# This file provides information about the External dependencies required by +# Bacula. +# +# There are four fields delimited by |. Only the first two fields are +# required. The other two are used when the top level directory of the +# archive is not the same as the file name with any suffixes removed. +# +# Field 1 is the name of the dependency. It is used to define the +# name of the three variables which are assigned the values of fields 2 to 4. +# +# Field 2 is the URL of the archive. It is assigned to the variable +# URL_[field1]. +# +# Field 3 is the top directory of the archive or the name of a directory that +# must be created and the archive extracted into it. It is assigned to the +# variable DIR_[field1]. +# +# Field 4 indicates if the directory specified in field 3 must be created +# first and the archive extracted into it. It is assigned to the variable +# MKD_[field1] +# +QWT|http://www.bacula.org/depkgs/qwt-5.0.2.tar.bz2|qwt-5.0.2|1 +# +# Original location +# +#QWT|http://superb-west.dl.sourceforge.net/sourceforge/qwt/qwt-5.0.2.tar.bz2|qwt-5.0.2|1 diff --git a/src/qt-console/PAGES b/src/qt-console/PAGES new file mode 100644 index 00000000..ec019637 --- /dev/null +++ b/src/qt-console/PAGES @@ -0,0 +1,44 @@ +Each page should have the following in the constructor +/* this is the text identifying the page in the tree widget */ +m_name = "Restore"; + +/* this sets values for the class based on the extern mainWin-> it sets + m_parent (the stacked widget), + m_console (from current console) + creates the page selector tree widget and sets it's name and inserts + itself in the double direction hashes */ +pgInitialize(parentwidgetitem); or +pgInitialize(); which will have the director as parent in page selector + +/* is this window always present or can the user remove it */ +m_closeable = true; + +/* put the page in the stack widget */ +dockPage(); +/* bring the page to the top of the widget by "selecting" the tree widget + item in the page selector */ +setCurrent(); + +Watch out for console not being connected yet gotchya's. The console is not yet +connected when the first base set of widgets are created on the stack. Use a +function like populate() to place objects in the window. Call populate from +virtual function void ClassName::currentStackItem(). Embed inside condition +similar to if(!m_populated) {} so as to populate only the first time the page +comes to the front. + +The major features that the pages class provides: +dockPage, undockPage and togglePageDocking for docking. +closeEvent to redock an undocked page when it is closed. +virtual functions PgSeltreeWidgetClicked() and currentStackItem() to give pages +the opportunity to have behaviours on events +closeStackPage() to delete both the page object and it's page selector widget. +consoleCommand(QString &command) to execute a console command +setTitle() for setting the title of a window that will display when undocked. +setCurrent() for making the page and tree widget item of an object selected and +in the front of the stack. + +Closing +Use the function closeStackPage() to close from within the class. Otherwise, if +there are pointers which need to be deleted, use a destructor. The m_closeable +page member will determine whether the option to close will appear in context +menu of page selector. diff --git a/src/qt-console/PREFS b/src/qt-console/PREFS new file mode 100644 index 00000000..8985954d --- /dev/null +++ b/src/qt-console/PREFS @@ -0,0 +1,9 @@ +To create a preference variable + +Create member in mainwin.h +use designer to add the widget to modify in prefs.ui +modify the following functions in mainwin.cpp +MainWin::setPreferences() +prefsDialog::accept() +MainWin::readPreferences() + diff --git a/src/qt-console/README b/src/qt-console/README new file mode 100644 index 00000000..6f392ffc --- /dev/null +++ b/src/qt-console/README @@ -0,0 +1,118 @@ + +This directory contains the Bacula Admin Tool (bat). + +At the current time, the contents of this directory are under +development. If you want to help, please contact Kern directly. +If you want to build it, you need Qt4 loaded and setup as your +default Qt or with the appropriate Qt Environment variables set. + +6/24/07 +There is now one dependency, it is qwt. It compiles just fine with +either qwt-5.0.2 or qwt-5.0.1. You can either install the qwt package +yourself or if your distro does not have it, we have included the source +in depkgs-qt, which you can download from the Bacula Source Forge +download area. + +Building and running bat is done much like bconsole, the gnome console, +or the wxWidgets console. You add the appropriate options to your +./configure, then simply do a make. Please see the Installation chapter +of the manual for more details. + + +Win32 mingw infos for QT4 : + - http://silmor.de/29 + - http://doc.qtfr.org/post/2007/04/10/Cross-Compilation-Native-dapplication-Qt-depuis-Linux + +Development status as of 05/06/07 + +Items not implemented: +- Nothing on the brestore page + +Translations: +- All translatable strings should be written as tr("string") ... +- To extract the strings for translation run: + + lupdate bat.pro + +- To translate the strings, do: + + linguist ts/bat_xx.ts + + where xx is the country code (e.g. fr or de) + +- To "compile" the translated strings do: + + lrelease bat.pro + + The necessary binary files will be in ts/bat_xx.qm + As far as I can tell, these files must be on your path or + in the same directory as bat for them to be used, otherwise + it reverts to English. Selecting the translation is based on + how your system is setup or the LANG environment variable. + +Design decisions: +- If possible all code for a particular component will be kept in + an appropriate subdirectory. +- All private class variables are named "m_xxx" this makes it very + clear if one is referencing a class variable or a local. +- All signal/slots are connected by explicit code (most all are + done in the MainWin constructor), rather than using designer. +- Each page has a separate designer .ui file in a subdirectory. +- All windows are created with designer and have + a name such as xxxForm i.e. the main window is MainForm and kept + in main.ui. + +Major projects: +- Implement other restore interfaces such as brestore ... +- Implement a database browser +- Implement a resource (conf file) browser +- Implement a reports page -- e.g. something similar to bweb +- Implement Qt plugins to add new functionality to bat +- Implement a GUI configuration file editor (something like JBacula). +... + +Partially Done: +=========================== +- Implement graphical commands that allow updating most aspects of + the database (i.e. commands for label, update Volume, ...) + still need to be able to edit a pool object + +- None of the menu items except About, Select Font, and Quit. + Print and save don't do anything, does save need to?? +Done: +============================ +Design/implementation considerations: +- Need icons in front of the Director. +- The console page should be in a DockWidget so it can be removed + from the main window. It is currently in a dock window, but it + does not remove properly -- more research needed. +- Need to figure out a good implementation of adding pages and even + having plugins that load as pages. Currently the page mechanism + is a bit kludged. + +- We need to have multiple Directors +- Each Director should have its own console +- The Console class needs to be a list or be attached to the + currently active Director. +- Will automatically connect to the first Director in the + conf file. Doesn't know about multiple Directors. + +- The Label menu bar item, prints on the shell window what you entered. +- The Run menu bar item, prints on the console window what you entered. +- The Restore menu bar item, brings up dialog, then when OK is + clicked, it goes on to the next dialog, which is meant to be + a tree view, but for the moment does nothing ... It is a bit + ugly. Canceling it should get you back to the normal command prompt. + +- Implement a restore page that does a directory tree restore selection + much like wx-console does. + +Not working: +- The left selection window and the right window (where the console + is) are dockable windows so should be movable once they are properly + clicked. Well, they sort of move, but then get stuck. I haven't figured + out what is going on, so for the current time, I am implementing most + stuff through dialogs. + +Items implemented: + See RELEASEFEATURES diff --git a/src/qt-console/README.mingw32 b/src/qt-console/README.mingw32 new file mode 100644 index 00000000..0422c1ff --- /dev/null +++ b/src/qt-console/README.mingw32 @@ -0,0 +1,120 @@ + +BUILD SYSTEM: Ubuntu gutsy +STATUS: Works + +REQUIRE: + - Bacula cross compilation tool (must be able to compile bacula-fd.exe) + - wine (apt-get install wine) + - qt mingw32 distribution + +ORIGINAL HOWTO (french): +http://doc.qtfr.org/post/2007/04/10/Cross-Compilation-Native-dapplication-Qt-depuis-Linux + +Legend: +# comment +$ shell command +* tips + +Directory setup +--------------- +$ cd bacula/src/win32 +$ ./build-win32-cross-tools +$ ./build-depkgs-mingw32 + +It will result something like : + +./ +|-- depkgs-mingw32 +|-- cross-tools +`-- bacula + +Linux bacula setup -- Note: I believe that this is *required* before trying + to build the Win32 bat. +------------ +$ cd bacula +$ ./configure +$ cd bacula/src/win32 +$ make + +Make sure that bacula/src/win32/release/bacula.dll is built + +QT4 setup +---------- + +Install QT for mingw + +Get the mingw installation from http://trolltech.com/developer/downloads/qt/windows +(Try to get the same version than your linux installation) +ftp://ftp.qtsoftware.com/qt/source + + +$ wine qt-win-opensource-4.3.5-mingw.exe + * Install under c:\Qt (no space) + * no worry about mingw installation + +$ cp -r ~/.wine/drive_c/Qt/4.3.5/src/ depkgs-mingw32/include +$ cp -r ~/.wine/drive_c/Qt/4.3.5/include depkgs-mingw32/include/qt +$ cp -r ~/.wine/drive_c/Qt/4.3.5/lib depkgs-mingw32/lib/qt +# cp ~/.wine/drive_c/Qt/4.3.5/bin/QtCore4.dll src/win32/release32 +# cp ~/.wine/drive_c/Qt/4.3.5/bin/QtGui4.dll src/win32/release32 + +MINGW setup +----------- +I think this only needs to be done once ... + +--- cross-tools/mingw32/mingw32/include/wchar.h.org 2008-07-13 15:18:52.000000000 +0200 ++++ cross-tools/mingw32/mingw32/include/wchar.h 2008-07-12 14:47:10.000000000 +0200 +@@ -394,7 +394,7 @@ + time_t st_ctime; /* Creation time */ + }; + +-#ifndef _NO_OLDNAMES ++#ifdef _NO_OLDNAMES_DISABLE + /* NOTE: Must be the same as _stat above. */ + struct stat + { + +Compile bat +----------- + +$ cd bacula/src/qt-console +$ export DEPKGS="directory above cross-tools and depkgs" +$ ./make-win32 clean +$ ./make-win32 + +Cleanup +------- +$ cd bacula/src/qt-console +$ ./make-win32 clean + +The bat.exe will be in src/qt-console/debug/bat.exe + + + +Run Bat on Windows +------------------ + +You'll need + zlib1.dll + ssleay32.dll + libeay32.dll + QtCore4.dll + QtGui4.dll + bacula.dll + pthreadGCE.dll + mingwm10.dll + bat.conf + +You can find the Qt dlls in ~/.wine/drive_c/Qt/4.3.5/bin + +Run Bat with wine +----------------- +$ cd bacula/src/qt-console/debug + +# configure a bat.conf +# copy all dlls to this directory + +$ wine bat + + +That all, easy isn't it ? diff --git a/src/qt-console/RELEASEFEATURES b/src/qt-console/RELEASEFEATURES new file mode 100644 index 00000000..feca7f34 --- /dev/null +++ b/src/qt-console/RELEASEFEATURES @@ -0,0 +1,73 @@ +Original release, August 2007. + +Graphical features: + Undockable (floating) windows of all interfaces. + Page selector for deciding and finding interfaces to view + Console command window for entering standard text console commands. + A preferences interface for listing limits and debugging. + +Graphical console + A console to perform all the commands available from bconsole. + +Graphical restoring + High performance restore of backup with GUI tree browsing and file and + directory selecting. Pre-restore Interface to assist in selecting jobs for + this restore method. + Restore by browsing and selecting from all cataloged versions of files. This + method allows for Multiple simultaneous views of catalog data. The only + selection limitation for this browsing method is one client at a time. + +Graphical listing Interfaces: + List the backup jobs that have run. ** see details below + List the clients and perform 4 commands on the client within context. + List the Filesets + List the Job resources and perform 8 commands on the job within context. + List the Pools and the volumes in each pool. Pefrom 7 possible commands on + each media. + List the storage resources and perform any of 5 commands on the storage + resource. + +Graphical Media Management + Modify Volume parameters + Label a new volume + Relabel an existing volume + Select jobs on a volume in Job List directly from media interface + Delete a Volume + Purge Jobs on volume + Update Volume Parameters from Pool Parameters + +Graphical Graphing + Interface to plot the files and bytes of backup jobs. + +Other Graphical interfaces: + Open the cataloged Log messages of a job that has run + Run a job manually and modify job defaults before running. + Estimate a job. Determine the files and bytes that would be backed up if + the job were run now. + +** JobList features + As many joblist windows at a time as desired. + 9 different selection criterion: client, volume, job, Fileset, level, status, + purged or not, record limit and days limit + Lists the 11 most commonly desired job attributes. + Run the following console commands by issuing the command within the context + of the know job number: List job, list files on job, list jobmedia, list + volumes, delete job and purge files. + Open other interfaces knowing the jobid: show the cataloged log of the running + of the job, restore from that job only browsing the filestructure, restore + from the job end time to get the most recent version of a restore after the + job completed running and browse the combined filestucture, jump directly + to view a plot of the selected jobs files and bytes backed up. + +There is even a button to perform status dir at any time. What more can you + ask for. ;-) An are you sure dialog box will even attempt to prevent the + user from deleting important stuff. + +Almost all console commands can be run graphically without touching a keyboard. +Help me out by letting me know what still cannot be done. I know of the +following: show, run a migration job, and editing resources in configuration +files. The last is not a console command, so it does not count. + +See the TODO file for future plans. The bacula develompment team is interested +in your feedback and your assistance. If you would like to get involved, join +the users and/or the developers mailing list. diff --git a/src/qt-console/TODO b/src/qt-console/TODO new file mode 100644 index 00000000..aa2ab983 --- /dev/null +++ b/src/qt-console/TODO @@ -0,0 +1,313 @@ +dhb +==================================================== +can "schedule" be a member of job_defs and populated?? + +========LOW priority items: +Human readable in joblist for purged, joblevel and job type. + +Possibly a stack of past screens so that when you open a window from another, +closing would bring the previous one back. + +======================================================== +This release or next: + +A page showing a list of schedule resources. + +A page list of message resources?? + +Kern discussed windows showing statistics like web based interfaces. + + I think the above is very important. + +======================================================== +Future Releases : + +The ablility to modify configuration files to create jobs, schedules, filesets +and any other director resources. + +The ablility to modify configuration files to create storage resources. + +Add a status dir graphical interface. It would auto update every ?? seconds +and have a list of scheduled jobs, and in the que to run jobs that could be +cancelled graphically. + +Add a status client window. Keep updating showing what file is being +processed. + +Documentation, Documentation, Documentaion. Help. Add help documentation. +Have context sensitve help. + +bRestore add code to get working. + +May be in brestore, find a file by name, find a directory by name + +Interfaces to commands like bextract, bscan, bcopy, btape????? + +Is there a way to query the director/database for whether a storage is currently +mounted so I am not presenting both mount and unmount to the user?? + Yes, but it requires being able to directly connect to the SD (at least + for the moment). + +Is there a way to identify a slot as having a cleaning tape??? +(Kern says more work needs to be done in bacula with autochangers) + Yes, there is a cleaning prefix for Volume labels defined in + the DIR (not currently available to bat). Typically it is CLNxxx + and by looking at the database, you can see these cleaning + volumes. + +Migration Jobs?? +=========================================================== +NOT SURE +=========================================================== + +I'm not sure about this one?? Things seem to work and I did not do a +thing to make it happen: the "dir" is a member of Console + +- We also must somehow make the low level I/O routines know which +director/console to use. Currently they always use the single +global one defined in the mainWin class (if I remember right). + + I'm working on this (kes). It is not so simple, but moving forward + gradually ... + +Create edit pool interface. + This is done from config file + +============================================================ +CALLING GOOD: +============================================================ +See if there is a solution to images fun with designer other than: +%s/[\.\/]*:images/images/g +%s/images/..\/images/g + Images that are in the binary are referenced with :/images/... + This is a Qt convention. If the image is in a file, it + can be referenced directly, but for the most part, I prefer + images in the binary (not lost, not accidently deleted, no + installation problems, ... + Utilizing designer to select the main.qrc resource file seems to do the + job. Designer then puts the : in front of images and work. + +Think about good ways to clean up the Console creation part of the +loop creating pages. + I don't think it is bad as it is. There is, however, a construct + called a Factory that could more or less automate this in one + big for loop. + Probably not neccesary for the time being. +============================================================ +DONE: +============================================================ +Use settings object to size the restore window. Similar to the saving of the +state of the main window. + +Add context sensitive options for most commands +see COMMANDS file + +A window showing a list of jobs and defaults. Defaults can be gotten in manner +similar to what the first restore window does. + +status dir on page select director item +All items with jobid= that I thought could work from joblist are done. +As well as many more +update slots scan +Preferences for the messages timer. + +Get the 5 second bring to bottom of console to stop + +joblist cancel a running job. + +Fixes to final restore widgets. + +Set default for replace in run restore job to "always"?????? + +Option in joblist like with restore from jobid but restore populating timestamp +of the selected job. + +User preferences. With log to stdout options. +Have settings for defaults of limits on joblist + +Resolve issue of connection during restore selection. Could go with preempt of +connections. Temporary resolution is in. (Kern is to work on) + +Further testing of restore with .mod + Tested a few things, not all. + +Add fileset to joblist. + +Test left pane of restore with 2 windows drives in one backup job. + Yup, id didn't work, now it does. + +Purging not working from console or from context sensitive. + This was a confusion with the restore command. Now resolved. + +Can produce a segfault by attempting to restore from a restore job. In +pre-restore, prevent a job in the list from being a restore job. + +Need to figure out the functionality and inteligence that the last restore +window should have and give it to it. Right now it shows drop downs with no +options. + +Allow for selecting multiple jobs to restore from in joblist. Right click +restore from job works, but not with multiple selected jobs. + +See if it would be possible to have user provided console text show up in a +color + +Get status codes in dropdown for joblist select. + +Create class to display messages from a specific job. Want the ability to +create an instance of that class from joblist. + +Color code termination code in joblist. I also want a table to convert +termination code into human readable text. + +show purged flag in joblist. Don't have purge option show if already purged. + +move behavior of: + MainWin::setContextMenuDockText + MainWin::setTreeWidgetItemDockColor +to the pages class + +preempt all connections to console with +if (!is_connectedGui()) +or some other mechanism. May find more as users start finding them. + +Create documentation for any other developers interested in creating +new classes to add more pages. Explain how to use the pages class +and about not populating until the tree widget is clicked etc... + +Add numerous are you sure dialog boxes. Like are you sure you want to +delete/purge that volume. Show a little of the documentation about what +the consequences of delete or purging are. + +A Tree widget context sensitive menu option and class to jump from known job +to surf the filestructure on the job. + This was future, but it is kind of done with restore from jobid + +Get rid of "Warning: name layoutWidget is already used" when make uic's restore.ui + +Create the ability to start a restore from joblist. Right click, select +"restore from Jobid=xx" create an instance of restore defaulting in the jobid +or a list of selected jobs. + +Update README describe bat.conf.example to bat.conf + +Test restore and get anything not working, working. +Add inteligence to prerestore. + +Color code Media Red->Error Append->green Full/Used->Yellow + +Get restore into stack. + Should the jobs dialog be turned into a page as well?? +Possilbe: Turn run and label into docked pages. (remove button bar buttons??) + +Where and bootstrap are confused in runjobs of restore. + This was just the labels. + +Create list of what does not work. +From what I can tell, just the restore window on the left. + +Add option to LIMIT the number of jobs shown in all jobs page for users with +multiple hundreds to thousands of jobs. + +Play with includes to Make these compiles shorter. + moved includes of of includes and into files only console.h should be long + +relabel storage=DDS3 oldvolume=ddsvol003 volume=dds3vol003 slot=3 pool=dds3_hope +in label slot spinner, limit the upper to the value of slots for that storage. + +Fix bug in myth box not working with .sql query="" command. +This was a fix in mysql + +Figure out how to get tables like Joblist to do the equivalent of double clicking +on the separating lines between each of the headings. +Tried the hard way first. Oops. + +If the console command line entry docked widget gets the focus, make +m_currentConsole the top widget by setting the treewidgetitem selected. +Did this in MainWin::input_line almost better to let the person see +whatever they want until they hit enter. + +Set Window titles to reflect what director it is. + +Must:: get page selector to follow undocked windows. Otherwise +current console won't be current. + +Re-add class for storage, I accidentally reverted because I left +it on my laptop. This is why I like committing often. + +Add class for FileSets + +Another idea for what you have implemented: +- I think that the dynamic pages that you create on the fly +should be nested under the item that creates them more like a +directory tree. + +For example: Jobs on Volume xxx, probably should be shown under +"All Jobs" (or Media if that is what created it) and "Jobs of +Client Rufus" probably should be shown under "Clients". I base +this on looking at the Select page list after I have brought up 3 +or 4 dynamic pages. Once there are a good number, I get a bit +confused where they came from. This would also permit selecting +multipe Volumes then displaying multiple pages, one for each +Volume selected. If they are nested, then that nested level can +be expanded or collapsed, which would be pretty cool at keeping +information, but getting it out of the way, sort of like what +happens for a directory tree. + +dhb: ref above +My original concept was to put these in a tabbed widget. Your Idea may +make for a cleaner user experience. I like it. It could save the +effort of getting a tabbed widget to work. + + +- I think we need to make the current Director more explicit, by +perhaps highlighting it in the page selector when it is current +and unhighlighting it when it is not (we could use color as we do +for the console, though some color blind people may have +problems. + +- When any director is clicked, we need to set it as the current +director and set the current console as well. + +Remove DoubleClicking, From pages class not needed any more. + +Broken with multiple directors: +- If you click on the second director, it will probably open, but +none of the pages that are defined below it will be able to talk +to it. They will most likely talk to the first director. + +- When any console is clicked we need to set it as the current +console (m_console) and also set its director as the current +director (m_topItem). These are in the mainwin class. + +- When any page is selected, we must set both the current +director (m_topItem) and current console (m_console) that this +page is connected to. + +dhb: +m_topItem has been changed to Console::directorTreeItem() +m_currentConsole->directorTreeItem(); returns disired treeWidgetItem + +- We also need a concept of a "local" director/console for each +page, so the page knows who it is talking to -- this doesn't +currently exist, so I think we must pass the director and console +to each page widget constructor. + +dhb: +m_currentConsole is saved in each page subclass's m_console. This value +is set by all but the console class calling Pages::pgInitialie() in it's +constructor + +In short, there is a lot of work to be done to make multiple +simultaneous directors work. + +dhb: +this may be moot: + +If the above prooves to be too much, we might consider to only +have a single director at a time, and simply let the user select +which director he wants to connect to (one at a time, but +dynamically). In the end, this may be the best thing to do, so +any user who wishes to connect to multiple directors would run +two instances of bat. I am a bit unsure now, but the above list +of things to do is much bigger than I thought it was going to be. diff --git a/src/qt-console/bat.conf.example b/src/qt-console/bat.conf.example new file mode 100644 index 00000000..fa5f9bcd --- /dev/null +++ b/src/qt-console/bat.conf.example @@ -0,0 +1,10 @@ +# +# Bacula User Agent (or Console) Configuration File +# + +Director { + Name = rufus-dir + DIRport = 8101 + address = localhost + Password = UA_password +} diff --git a/src/qt-console/bat.conf.in b/src/qt-console/bat.conf.in new file mode 100644 index 00000000..a70e9617 --- /dev/null +++ b/src/qt-console/bat.conf.in @@ -0,0 +1,10 @@ +# +# Bacula Administration Tool (bat) configuration file +# + +Director { + Name = @basename@-dir + DIRport = @dir_port@ + address = @hostname@ + Password = "@dir_password@" +} diff --git a/src/qt-console/bat.h b/src/qt-console/bat.h new file mode 100644 index 00000000..65ed2ad2 --- /dev/null +++ b/src/qt-console/bat.h @@ -0,0 +1,64 @@ +#ifndef _BAT_H_ +#define _BAT_H_ + +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, January 2007 + */ + +#if defined(HAVE_WIN32) +#if !defined(_STAT_H) +#define _STAT_H /* don't pull in MinGW stat.h */ +#define _STAT_DEFINED /* don't pull in MinGW stat.h */ +#endif +#endif + +#if defined(HAVE_WIN32) +#if defined(HAVE_MINGW) +#include "mingwconfig.h" +#else +#include "winconfig.h" +#endif +#else +#include "config.h" +#endif +#define __CONFIG_H + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include +#include "bacula.h" + +#ifndef TRAY_MONITOR +#include "mainwin.h" +#include "bat_conf.h" +#include "jcr.h" +#include "console.h" + +extern MainWin *mainWin; +extern QApplication *app; +#endif + +bool isWin32Path(QString &fullPath); + +#endif /* _BAT_H_ */ diff --git a/src/qt-console/bat.pro.in b/src/qt-console/bat.pro.in new file mode 100644 index 00000000..67ca841f --- /dev/null +++ b/src/qt-console/bat.pro.in @@ -0,0 +1,189 @@ +###################################################################### +# +# !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# Edit only bat.pro.in -- bat.pro is built by the ./configure program +# +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +CONFIG += qt debug @QWT@ + + +greaterThan(QT_MAJOR_VERSION, 4): QT += widgets + +bins.path = /$(DESTDIR)@sbindir@ +bins.files = bat +confs.path = /$(DESTDIR)@sysconfdir@ +confs.commands = ./install_conf_file +help.path = /$(DESTDIR)@docdir@ +help.files = help/*.html images/status.png images/mail-message-new.png + +datarootdir = @datarootdir@ +TEMPLATE = app +TARGET = bat +DEPENDPATH += . +INCLUDEPATH += .. . ./console ./restore ./select +LIBS += -L../lib -lbaccfg -lbac -L../findlib -lbacfind @OPENSSL_LIBS@ +LIBTOOL_LINK = @QMAKE_LIBTOOL@ --silent --tag=CXX --mode=link +LIBTOOL_INSTALL = @QMAKE_LIBTOOL@ --silent --mode=install +QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) +QMAKE_INSTALL_PROGRAM = $${LIBTOOL_INSTALL} install -m @SBINPERM@ -p +QMAKE_CLEAN += .libs/* bat + +qwt { + INCLUDEPATH += @QWT_INC@ + LIBS += @QWT_LDFLAGS@ @QWT_LIB@ +} + +macx { + ICON = images/bat_icon.icns +} + +RESOURCES = main.qrc +MOC_DIR = moc +OBJECTS_DIR = obj +UI_DIR = ui + +# Main window +FORMS += main.ui +FORMS += prefs.ui +FORMS += label/label.ui +FORMS += relabel/relabel.ui +FORMS += mount/mount.ui +FORMS += console/console.ui +FORMS += restore/restore.ui restore/prerestore.ui restore/brestore.ui +FORMS += restore/runrestore.ui +FORMS += restore/restoretree.ui +FORMS += run/run.ui run/runcmd.ui run/estimate.ui run/prune.ui +FORMS += select/select.ui select/textinput.ui +FORMS += medialist/medialist.ui mediaedit/mediaedit.ui joblist/joblist.ui +FORMS += medialist/mediaview.ui +FORMS += clients/clients.ui storage/storage.ui fileset/fileset.ui +FORMS += joblog/joblog.ui jobs/jobs.ui job/job.ui +FORMS += help/help.ui mediainfo/mediainfo.ui +FORMS += status/dirstat.ui storage/content.ui +FORMS += status/clientstat.ui +FORMS += status/storstat.ui +qwt { + FORMS += jobgraphs/jobplotcontrols.ui +} + +# Main directory +HEADERS += mainwin.h bat.h bat_conf.h qstd.h pages.h +SOURCES += main.cpp bat_conf.cpp mainwin.cpp qstd.cpp pages.cpp + +# bcomm +HEADERS += bcomm/dircomm.h +SOURCES += bcomm/dircomm.cpp bcomm/dircomm_auth.cpp + +# Console +HEADERS += console/console.h +SOURCES += console/console.cpp + +# Restore +HEADERS += restore/restore.h +SOURCES += restore/prerestore.cpp restore/restore.cpp restore/brestore.cpp + +# Label dialog +HEADERS += label/label.h +SOURCES += label/label.cpp + +# Relabel dialog +HEADERS += relabel/relabel.h +SOURCES += relabel/relabel.cpp + +# Mount dialog +HEADERS += mount/mount.h +SOURCES += mount/mount.cpp + +# Run dialog +HEADERS += run/run.h +SOURCES += run/run.cpp run/runcmd.cpp run/estimate.cpp run/prune.cpp + +# Select dialog +HEADERS += select/select.h select/textinput.h +SOURCES += select/select.cpp select/textinput.cpp + +## MediaList +HEADERS += medialist/medialist.h +SOURCES += medialist/medialist.cpp + +# MediaView +HEADERS += medialist/mediaview.h +SOURCES += medialist/mediaview.cpp + +## MediaEdit +HEADERS += mediaedit/mediaedit.h +SOURCES += mediaedit/mediaedit.cpp + +## JobList +HEADERS += joblist/joblist.h +SOURCES += joblist/joblist.cpp + +## Clients +HEADERS += clients/clients.h +SOURCES += clients/clients.cpp + +## Storage +HEADERS += storage/storage.h +SOURCES += storage/storage.cpp + +## Storage content +HEADERS += storage/content.h +SOURCES += storage/content.cpp + +## Fileset +HEADERS += fileset/fileset.h +SOURCES += fileset/fileset.cpp + +## Job log +HEADERS += joblog/joblog.h +SOURCES += joblog/joblog.cpp + +## Job +HEADERS += job/job.h +SOURCES += job/job.cpp + +## Jobs +HEADERS += jobs/jobs.h +SOURCES += jobs/jobs.cpp + +## RestoreTree +HEADERS += restore/restoretree.h +SOURCES += restore/restoretree.cpp + +## Job Step Graphs +qwt { + HEADERS += jobgraphs/jobplot.h + SOURCES += jobgraphs/jobplot.cpp +} + +# Help dialog +HEADERS += help/help.h +SOURCES += help/help.cpp + +# Media info dialog +HEADERS += mediainfo/mediainfo.h +SOURCES += mediainfo/mediainfo.cpp + +## Status Dir +HEADERS += status/dirstat.h +SOURCES += status/dirstat.cpp + +## Status Client +HEADERS += status/clientstat.h +SOURCES += status/clientstat.cpp + +## Status Client +HEADERS += status/storstat.h +SOURCES += status/storstat.cpp + +# Utility sources +HEADERS += util/fmtwidgetitem.h util/comboutil.h +SOURCES += util/fmtwidgetitem.cpp util/comboutil.cpp + +INSTALLS = bins confs help + +QMAKE_EXTRA_TARGETS += depend + +TRANSLATIONS += ts/bat_fr.ts ts/bat_de.ts diff --git a/src/qt-console/bat.pro.mingw32.in b/src/qt-console/bat.pro.mingw32.in new file mode 100644 index 00000000..23ac93a4 --- /dev/null +++ b/src/qt-console/bat.pro.mingw32.in @@ -0,0 +1,191 @@ +###################################################################### +# +# !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# Edit only bat.pro.mingw32.in -- bat.pro.mingw32 is built by the ./configure program +# +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +CONFIG += qt cross-win32 +# CONFIG += debug + +bins.path = ./ +bins.files = ./bat +confs.path = ./ +confs.commands = ./install_conf_file + +TEMPLATE = app +TARGET = bat +DEPENDPATH += . +INCLUDEPATH += .. . ./console ./restore ./select + +cross-win32 { +# LIBS += ../win32/dll/bacula.a + LIBS += -mwindows -L../win32/release32 -lbacula +} +!cross-win32 { + LIBS += -L../lib -lbac -L../findlib -lbacfind @OPENSSL_LIBS@ +} + +qwt { + INCLUDEPATH += @QWT_INC@ + LIBS += @QWT_LDFLAGS@ @QWT_LIB@ +} + +RESOURCES = main.qrc +MOC_DIR = moc32 +OBJECTS_DIR = obj32 +UI_DIR = ui32 +QMAKE_CC = i686-w64-mingw32-gcc +QMAKE_CXX = i686-w64-mingw32-g++ +QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw32/include/pthreads $(DEPKGS)/depkgs-mingw32/include/ ../win32/compat +QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw32/include/qt +QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw32/lib/qt +QMAKE_LINK = i686-w64-mingw32-g++ +QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m32 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc +QMAKE_LIB = i686-w64-mingw32-ar -ru +QMAKE_RC = i686-w64-mingw32-windres + +# Main window +FORMS += main.ui +FORMS += prefs.ui +FORMS += label/label.ui +FORMS += relabel/relabel.ui +FORMS += mount/mount.ui +FORMS += console/console.ui +FORMS += restore/restore.ui restore/prerestore.ui restore/brestore.ui +FORMS += restore/runrestore.ui restore/restoretree.ui +FORMS += run/run.ui run/runcmd.ui run/estimate.ui run/prune.ui +FORMS += select/select.ui select/textinput.ui +FORMS += medialist/medialist.ui mediaedit/mediaedit.ui joblist/joblist.ui +FORMS += medialist/mediaview.ui +FORMS += clients/clients.ui storage/storage.ui fileset/fileset.ui +FORMS += joblog/joblog.ui jobs/jobs.ui job/job.ui +FORMS += help/help.ui mediainfo/mediainfo.ui +FORMS += status/dirstat.ui storage/content.ui +FORMS += status/clientstat.ui +FORMS += status/storstat.ui +qwt { + FORMS += jobgraphs/jobplotcontrols.ui +} + +# Main directory +HEADERS += mainwin.h bat.h bat_conf.h qstd.h pages.h +SOURCES += main.cpp bat_conf.cpp mainwin.cpp qstd.cpp pages.cpp + +# bcomm +HEADERS += bcomm/dircomm.h +SOURCES += bcomm/dircomm.cpp bcomm/dircomm_auth.cpp + +# Console +HEADERS += console/console.h +SOURCES += console/console.cpp + +# Restore +HEADERS += restore/restore.h +SOURCES += restore/prerestore.cpp restore/restore.cpp restore/brestore.cpp + +# Label dialog +HEADERS += label/label.h +SOURCES += label/label.cpp + +# Relabel dialog +HEADERS += relabel/relabel.h +SOURCES += relabel/relabel.cpp + +# Mount dialog +HEADERS += mount/mount.h +SOURCES += mount/mount.cpp + +# Run dialog +HEADERS += run/run.h +SOURCES += run/run.cpp run/runcmd.cpp run/estimate.cpp run/prune.cpp + +# Select dialog +HEADERS += select/select.h select/textinput.h +SOURCES += select/select.cpp select/textinput.cpp + +## MediaList +HEADERS += medialist/medialist.h +SOURCES += medialist/medialist.cpp + +# MediaView +HEADERS += medialist/mediaview.h +SOURCES += medialist/mediaview.cpp + +## MediaEdit +HEADERS += mediaedit/mediaedit.h +SOURCES += mediaedit/mediaedit.cpp + +## JobList +HEADERS += joblist/joblist.h +SOURCES += joblist/joblist.cpp + +## Clients +HEADERS += clients/clients.h +SOURCES += clients/clients.cpp + +## Storage +HEADERS += storage/storage.h +SOURCES += storage/storage.cpp + +## Storage content +HEADERS += storage/content.h +SOURCES += storage/content.cpp + +## Fileset +HEADERS += fileset/fileset.h +SOURCES += fileset/fileset.cpp + +## Job log +HEADERS += joblog/joblog.h +SOURCES += joblog/joblog.cpp + +## Job +HEADERS += job/job.h +SOURCES += job/job.cpp + +## Jobs +HEADERS += jobs/jobs.h +SOURCES += jobs/jobs.cpp + +## RestoreTree +HEADERS += restore/restoretree.h +SOURCES += restore/restoretree.cpp + +## Job Step Graphs +qwt { + HEADERS += jobgraphs/jobplot.h + SOURCES += jobgraphs/jobplot.cpp +} + +# Help dialog +HEADERS += help/help.h +SOURCES += help/help.cpp + +# Media info dialog +HEADERS += mediainfo/mediainfo.h +SOURCES += mediainfo/mediainfo.cpp + +## Status Dir +HEADERS += status/dirstat.h +SOURCES += status/dirstat.cpp + +## Status Client +HEADERS += status/clientstat.h +SOURCES += status/clientstat.cpp + +## Status Client +HEADERS += status/storstat.h +SOURCES += status/storstat.cpp + +# Utility sources +HEADERS += util/fmtwidgetitem.h util/comboutil.h +SOURCES += util/fmtwidgetitem.cpp util/comboutil.cpp + +INSTALLS += bins +INSTALLS += confs + +QMAKE_EXTRA_TARGETS += depend + +TRANSLATIONS += ts/bat_fr.ts ts/bat_de.ts diff --git a/src/qt-console/bat.pro.mingw64 b/src/qt-console/bat.pro.mingw64 new file mode 100644 index 00000000..4812feca --- /dev/null +++ b/src/qt-console/bat.pro.mingw64 @@ -0,0 +1,191 @@ +###################################################################### +# +# !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# Edit only bat.pro.mingw64.in -- bat.pro.mingw64 is built by the ./configure program +# +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +CONFIG += qt cross-win32 +# CONFIG += debug + +bins.path = ./ +bins.files = ./bat +confs.path = ./ +confs.commands = ./install_conf_file + +TEMPLATE = app +TARGET = bat +DEPENDPATH += . +INCLUDEPATH += .. . ./console ./restore ./select + +cross-win32 { +# LIBS += ../win32/dll/bacula.a + LIBS += -mwindows -L../win32/release64 -lbacula +} +!cross-win32 { + LIBS += -L../lib -lbac -L../findlib -lbacfind -lssl -lcrypto +} + +qwt { + INCLUDEPATH += + LIBS += +} + +RESOURCES = main.qrc +MOC_DIR = moc64 +OBJECTS_DIR = obj64 +UI_DIR = ui64 +QMAKE_CC = x86_64-w64-mingw32-gcc +QMAKE_CXX = x86_64-w64-mingw32-g++ +QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw-w64/include/pthreads $(DEPKGS)/depkgs-mingw-w64/include/ ../win32/compat +QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw-w64/include/qt +QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw-w64/lib/qt +QMAKE_LINK = x86_64-w64-mingw32-g++ +QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m64 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc +QMAKE_LIB = x86_64-w64-mingw32-ar -ru +QMAKE_RC = x86_64-w64-mingw32-windres + +# Main window +FORMS += main.ui +FORMS += prefs.ui +FORMS += label/label.ui +FORMS += relabel/relabel.ui +FORMS += mount/mount.ui +FORMS += console/console.ui +FORMS += restore/restore.ui restore/prerestore.ui restore/brestore.ui +FORMS += restore/runrestore.ui restore/restoretree.ui +FORMS += run/run.ui run/runcmd.ui run/estimate.ui run/prune.ui +FORMS += select/select.ui select/textinput.ui +FORMS += medialist/medialist.ui mediaedit/mediaedit.ui joblist/joblist.ui +FORMS += medialist/mediaview.ui +FORMS += clients/clients.ui storage/storage.ui fileset/fileset.ui +FORMS += joblog/joblog.ui jobs/jobs.ui job/job.ui +FORMS += help/help.ui mediainfo/mediainfo.ui +FORMS += status/dirstat.ui storage/content.ui +FORMS += status/clientstat.ui +FORMS += status/storstat.ui +qwt { + FORMS += jobgraphs/jobplotcontrols.ui +} + +# Main directory +HEADERS += mainwin.h bat.h bat_conf.h qstd.h pages.h +SOURCES += main.cpp bat_conf.cpp mainwin.cpp qstd.cpp pages.cpp + +# bcomm +HEADERS += bcomm/dircomm.h +SOURCES += bcomm/dircomm.cpp bcomm/dircomm_auth.cpp + +# Console +HEADERS += console/console.h +SOURCES += console/console.cpp + +# Restore +HEADERS += restore/restore.h +SOURCES += restore/prerestore.cpp restore/restore.cpp restore/brestore.cpp + +# Label dialog +HEADERS += label/label.h +SOURCES += label/label.cpp + +# Relabel dialog +HEADERS += relabel/relabel.h +SOURCES += relabel/relabel.cpp + +# Mount dialog +HEADERS += mount/mount.h +SOURCES += mount/mount.cpp + +# Run dialog +HEADERS += run/run.h +SOURCES += run/run.cpp run/runcmd.cpp run/estimate.cpp run/prune.cpp + +# Select dialog +HEADERS += select/select.h select/textinput.h +SOURCES += select/select.cpp select/textinput.cpp + +## MediaList +HEADERS += medialist/medialist.h +SOURCES += medialist/medialist.cpp + +# MediaView +HEADERS += medialist/mediaview.h +SOURCES += medialist/mediaview.cpp + +## MediaEdit +HEADERS += mediaedit/mediaedit.h +SOURCES += mediaedit/mediaedit.cpp + +## JobList +HEADERS += joblist/joblist.h +SOURCES += joblist/joblist.cpp + +## Clients +HEADERS += clients/clients.h +SOURCES += clients/clients.cpp + +## Storage +HEADERS += storage/storage.h +SOURCES += storage/storage.cpp + +## Storage content +HEADERS += storage/content.h +SOURCES += storage/content.cpp + +## Fileset +HEADERS += fileset/fileset.h +SOURCES += fileset/fileset.cpp + +## Job log +HEADERS += joblog/joblog.h +SOURCES += joblog/joblog.cpp + +## Job +HEADERS += job/job.h +SOURCES += job/job.cpp + +## Jobs +HEADERS += jobs/jobs.h +SOURCES += jobs/jobs.cpp + +## RestoreTree +HEADERS += restore/restoretree.h +SOURCES += restore/restoretree.cpp + +## Job Step Graphs +qwt { + HEADERS += jobgraphs/jobplot.h + SOURCES += jobgraphs/jobplot.cpp +} + +# Help dialog +HEADERS += help/help.h +SOURCES += help/help.cpp + +# Media info dialog +HEADERS += mediainfo/mediainfo.h +SOURCES += mediainfo/mediainfo.cpp + +## Status Dir +HEADERS += status/dirstat.h +SOURCES += status/dirstat.cpp + +## Status Client +HEADERS += status/clientstat.h +SOURCES += status/clientstat.cpp + +## Status Client +HEADERS += status/storstat.h +SOURCES += status/storstat.cpp + +# Utility sources +HEADERS += util/fmtwidgetitem.h util/comboutil.h +SOURCES += util/fmtwidgetitem.cpp util/comboutil.cpp + +INSTALLS += bins +INSTALLS += confs + +QMAKE_EXTRA_TARGETS += depend + +TRANSLATIONS += ts/bat_fr.ts ts/bat_de.ts diff --git a/src/qt-console/bat.pro.mingw64.in b/src/qt-console/bat.pro.mingw64.in new file mode 100644 index 00000000..1c22a2a5 --- /dev/null +++ b/src/qt-console/bat.pro.mingw64.in @@ -0,0 +1,191 @@ +###################################################################### +# +# !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# Edit only bat.pro.mingw64.in -- bat.pro.mingw64 is built by the ./configure program +# +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +CONFIG += qt cross-win32 +# CONFIG += debug + +bins.path = ./ +bins.files = ./bat +confs.path = ./ +confs.commands = ./install_conf_file + +TEMPLATE = app +TARGET = bat +DEPENDPATH += . +INCLUDEPATH += .. . ./console ./restore ./select + +cross-win32 { +# LIBS += ../win32/dll/bacula.a + LIBS += -mwindows -L../win32/release64 -lbacula +} +!cross-win32 { + LIBS += -L../lib -lbac -L../findlib -lbacfind @OPENSSL_LIBS@ +} + +qwt { + INCLUDEPATH += @QWT_INC@ + LIBS += @QWT_LDFLAGS@ @QWT_LIB@ +} + +RESOURCES = main.qrc +MOC_DIR = moc64 +OBJECTS_DIR = obj64 +UI_DIR = ui64 +QMAKE_CC = x86_64-w64-mingw32-gcc +QMAKE_CXX = x86_64-w64-mingw32-g++ +QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw-w64/include/pthreads $(DEPKGS)/depkgs-mingw-w64/include/ ../win32/compat +QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw-w64/include/qt +QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw-w64/lib/qt +QMAKE_LINK = x86_64-w64-mingw32-g++ +QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m64 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc +QMAKE_LIB = x86_64-w64-mingw32-ar -ru +QMAKE_RC = x86_64-w64-mingw32-windres + +# Main window +FORMS += main.ui +FORMS += prefs.ui +FORMS += label/label.ui +FORMS += relabel/relabel.ui +FORMS += mount/mount.ui +FORMS += console/console.ui +FORMS += restore/restore.ui restore/prerestore.ui restore/brestore.ui +FORMS += restore/runrestore.ui restore/restoretree.ui +FORMS += run/run.ui run/runcmd.ui run/estimate.ui run/prune.ui +FORMS += select/select.ui select/textinput.ui +FORMS += medialist/medialist.ui mediaedit/mediaedit.ui joblist/joblist.ui +FORMS += medialist/mediaview.ui +FORMS += clients/clients.ui storage/storage.ui fileset/fileset.ui +FORMS += joblog/joblog.ui jobs/jobs.ui job/job.ui +FORMS += help/help.ui mediainfo/mediainfo.ui +FORMS += status/dirstat.ui storage/content.ui +FORMS += status/clientstat.ui +FORMS += status/storstat.ui +qwt { + FORMS += jobgraphs/jobplotcontrols.ui +} + +# Main directory +HEADERS += mainwin.h bat.h bat_conf.h qstd.h pages.h +SOURCES += main.cpp bat_conf.cpp mainwin.cpp qstd.cpp pages.cpp + +# bcomm +HEADERS += bcomm/dircomm.h +SOURCES += bcomm/dircomm.cpp bcomm/dircomm_auth.cpp + +# Console +HEADERS += console/console.h +SOURCES += console/console.cpp + +# Restore +HEADERS += restore/restore.h +SOURCES += restore/prerestore.cpp restore/restore.cpp restore/brestore.cpp + +# Label dialog +HEADERS += label/label.h +SOURCES += label/label.cpp + +# Relabel dialog +HEADERS += relabel/relabel.h +SOURCES += relabel/relabel.cpp + +# Mount dialog +HEADERS += mount/mount.h +SOURCES += mount/mount.cpp + +# Run dialog +HEADERS += run/run.h +SOURCES += run/run.cpp run/runcmd.cpp run/estimate.cpp run/prune.cpp + +# Select dialog +HEADERS += select/select.h select/textinput.h +SOURCES += select/select.cpp select/textinput.cpp + +## MediaList +HEADERS += medialist/medialist.h +SOURCES += medialist/medialist.cpp + +# MediaView +HEADERS += medialist/mediaview.h +SOURCES += medialist/mediaview.cpp + +## MediaEdit +HEADERS += mediaedit/mediaedit.h +SOURCES += mediaedit/mediaedit.cpp + +## JobList +HEADERS += joblist/joblist.h +SOURCES += joblist/joblist.cpp + +## Clients +HEADERS += clients/clients.h +SOURCES += clients/clients.cpp + +## Storage +HEADERS += storage/storage.h +SOURCES += storage/storage.cpp + +## Storage content +HEADERS += storage/content.h +SOURCES += storage/content.cpp + +## Fileset +HEADERS += fileset/fileset.h +SOURCES += fileset/fileset.cpp + +## Job log +HEADERS += joblog/joblog.h +SOURCES += joblog/joblog.cpp + +## Job +HEADERS += job/job.h +SOURCES += job/job.cpp + +## Jobs +HEADERS += jobs/jobs.h +SOURCES += jobs/jobs.cpp + +## RestoreTree +HEADERS += restore/restoretree.h +SOURCES += restore/restoretree.cpp + +## Job Step Graphs +qwt { + HEADERS += jobgraphs/jobplot.h + SOURCES += jobgraphs/jobplot.cpp +} + +# Help dialog +HEADERS += help/help.h +SOURCES += help/help.cpp + +# Media info dialog +HEADERS += mediainfo/mediainfo.h +SOURCES += mediainfo/mediainfo.cpp + +## Status Dir +HEADERS += status/dirstat.h +SOURCES += status/dirstat.cpp + +## Status Client +HEADERS += status/clientstat.h +SOURCES += status/clientstat.cpp + +## Status Client +HEADERS += status/storstat.h +SOURCES += status/storstat.cpp + +# Utility sources +HEADERS += util/fmtwidgetitem.h util/comboutil.h +SOURCES += util/fmtwidgetitem.cpp util/comboutil.cpp + +INSTALLS += bins +INSTALLS += confs + +QMAKE_EXTRA_TARGETS += depend + +TRANSLATIONS += ts/bat_fr.ts ts/bat_de.ts diff --git a/src/qt-console/bat_conf.cpp b/src/qt-console/bat_conf.cpp new file mode 100644 index 00000000..ce2ab36f --- /dev/null +++ b/src/qt-console/bat_conf.cpp @@ -0,0 +1,330 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main configuration file parser for Bacula User Agent + * some parts may be split into separate files such as + * the schedule configuration (sch_config.c). + * + * Note, the configuration file parser consists of three parts + * + * 1. The generic lexical scanner in lib/lex.c and lib/lex.h + * + * 2. The generic config scanner in lib/parse_config.c and + * lib/parse_config.h. + * These files contain the parser code, some utility + * routines, and the common store routines (name, int, + * string). + * + * 3. The daemon specific file, which contains the Resource + * definitions as well as any specific store routines + * for the resource records. + * + * Kern Sibbald, January MM, September MM + * + */ + +#include "bacula.h" +#include "bat_conf.h" + +/* Define the first and last resource ID record + * types. Note, these should be unique for each + * daemon though not a requirement. + */ +int32_t r_first = R_FIRST; +int32_t r_last = R_LAST; +RES_HEAD **res_head; + +/* Forward referenced subroutines */ + + +/* We build the current resource here as we are + * scanning the resource configuration definition, + * then move it to allocated memory when the resource + * scan is complete. + */ +#if defined(_MSC_VER) +extern "C" URES res_all; /* declare as C to avoid name mangling by visual c */ +#endif +URES res_all; +int32_t res_all_size = sizeof(res_all); + +/* Definition of records permitted within each + * resource with the routine to process the record + * information. + */ +static RES_ITEM dir_items[] = { + {"name", store_name, ITEM(dir_res.hdr.name), 0, ITEM_REQUIRED, 0}, + {"description", store_str, ITEM(dir_res.hdr.desc), 0, 0, 0}, + {"dirport", store_pint32, ITEM(dir_res.DIRport), 0, ITEM_DEFAULT, 9101}, + {"address", store_str, ITEM(dir_res.address), 0, ITEM_REQUIRED, 0}, + {"password", store_password, ITEM(dir_res.password), 0, 0, 0}, + {"tlsauthenticate",store_bool, ITEM(dir_res.tls_authenticate), 0, 0, 0}, + {"tlsenable", store_bool, ITEM(dir_res.tls_enable), 0, 0, 0}, + {"tlsrequire", store_bool, ITEM(dir_res.tls_require), 0, 0, 0}, + {"tlscacertificatefile", store_dir, ITEM(dir_res.tls_ca_certfile), 0, 0, 0}, + {"tlscacertificatedir", store_dir, ITEM(dir_res.tls_ca_certdir), 0, 0, 0}, + {"tlscertificate", store_dir, ITEM(dir_res.tls_certfile), 0, 0, 0}, + {"tlskey", store_dir, ITEM(dir_res.tls_keyfile), 0, 0, 0}, + {"heartbeatinterval", store_time, ITEM(dir_res.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +static RES_ITEM con_items[] = { + {"name", store_name, ITEM(con_res.hdr.name), 0, ITEM_REQUIRED, 0}, + {"description", store_str, ITEM(con_res.hdr.desc), 0, 0, 0}, + {"password", store_password, ITEM(con_res.password), 0, ITEM_REQUIRED, 0}, + {"tlsauthenticate",store_bool, ITEM(con_res.tls_authenticate), 0, 0, 0}, + {"tlsenable", store_bool, ITEM(con_res.tls_enable), 0, 0, 0}, + {"tlsrequire", store_bool, ITEM(con_res.tls_require), 0, 0, 0}, + {"tlscacertificatefile", store_dir, ITEM(con_res.tls_ca_certfile), 0, 0, 0}, + {"tlscacertificatedir", store_dir, ITEM(con_res.tls_ca_certdir), 0, 0, 0}, + {"tlscertificate", store_dir, ITEM(con_res.tls_certfile), 0, 0, 0}, + {"tlskey", store_dir, ITEM(con_res.tls_keyfile), 0, 0, 0}, + {"heartbeatinterval", store_time, ITEM(con_res.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, + {"director", store_str, ITEM(con_res.director), 0, 0, 0}, + {"CommCompression", store_bool, ITEM(con_res.comm_compression), 0, ITEM_DEFAULT, true}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +static RES_ITEM con_font_items[] = { + {"name", store_name, ITEM(con_font.hdr.name), 0, ITEM_REQUIRED, 0}, + {"description", store_str, ITEM(con_font.hdr.desc), 0, 0, 0}, + {"font", store_str, ITEM(con_font.fontface), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + + +/* + * This is the master resource definition. + * It must have one item for each of the resources. + */ +RES_TABLE resources[] = { + {"director", dir_items, R_DIRECTOR}, + {"console", con_items, R_CONSOLE}, + {"consolefont", con_font_items, R_CONSOLE_FONT}, + {NULL, NULL, 0} +}; + + +/* Dump contents of resource */ +void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock) +{ + RES *next; + URES *res = (URES *)ares; + bool recurse = true; + + if (res == NULL) { + printf(_("No record for %d %s\n"), type, res_to_str(type)); + return; + } + if (type < 0) { /* no recursion */ + type = - type; + recurse = false; + } + switch (type) { + case R_DIRECTOR: + printf(_("Director: name=%s address=%s DIRport=%d\n"), ares->name, + res->dir_res.address, res->dir_res.DIRport); + break; + case R_CONSOLE: + printf(_("Console: name=%s\n"), ares->name); + break; + case R_CONSOLE_FONT: + printf(_("ConsoleFont: name=%s font face=%s\n"), + ares->name, NPRT(res->con_font.fontface)); + break; + default: + printf(_("Unknown resource type %d\n"), type); + } + if (recurse) { + next = GetNextRes(0, (RES *)res); + if (next) { + dump_resource(type, next, sendit, sock); + } + } +} + +/* + * Free memory of resource. + * NB, we don't need to worry about freeing any references + * to other resources as they will be freed when that + * resource chain is traversed. Mainly we worry about freeing + * allocated strings (names). + */ +void free_resource(RES *sres, int type) +{ + URES *res = (URES *)sres; + + if (res == NULL) + return; + + /* common stuff -- free the resource name */ + if (res->dir_res.hdr.name) { + free(res->dir_res.hdr.name); + } + if (res->dir_res.hdr.desc) { + free(res->dir_res.hdr.desc); + } + + switch (type) { + case R_DIRECTOR: + if (res->dir_res.address) { + free(res->dir_res.address); + } + if (res->dir_res.tls_ctx) { + free_tls_context(res->dir_res.tls_ctx); + } + if (res->dir_res.tls_ca_certfile) { + free(res->dir_res.tls_ca_certfile); + } + if (res->dir_res.tls_ca_certdir) { + free(res->dir_res.tls_ca_certdir); + } + if (res->dir_res.tls_certfile) { + free(res->dir_res.tls_certfile); + } + if (res->dir_res.tls_keyfile) { + free(res->dir_res.tls_keyfile); + } + break; + case R_CONSOLE: + if (res->con_res.password) { + free(res->con_res.password); + } + if (res->con_res.tls_ctx) { + free_tls_context(res->con_res.tls_ctx); + } + if (res->con_res.tls_ca_certfile) { + free(res->con_res.tls_ca_certfile); + } + if (res->con_res.tls_ca_certdir) { + free(res->con_res.tls_ca_certdir); + } + if (res->con_res.tls_certfile) { + free(res->con_res.tls_certfile); + } + if (res->con_res.tls_keyfile) { + free(res->con_res.tls_keyfile); + } + if (res->con_res.director) { + free(res->con_res.director); + } + break; + case R_CONSOLE_FONT: + if (res->con_font.fontface) { + free(res->con_font.fontface); + } + break; + default: + printf(_("Unknown resource type %d\n"), type); + } + /* Common stuff again -- free the resource, recurse to next one */ + if (res) { + free(res); + } +} + +/* Save the new resource by chaining it into the head list for + * the resource. If this is pass 2, we update any resource + * pointers (currently only in the Job resource). + */ +bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) +{ + int rindex = type - r_first; + int i, size = 0; + int error = 0; + + /* + * Ensure that all required items are present + */ + for (i=0; items[i].name; i++) { + if (items[i].flags & ITEM_REQUIRED) { + if (!bit_is_set(i, res_all.dir_res.hdr.item_present)) { + Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), + items[i].name, resources[rindex].name); + return false; + } + } + } + + /* During pass 2, we looked up pointers to all the resources + * referrenced in the current resource, , now we + * must copy their address from the static record to the allocated + * record. + */ + if (pass == 2) { + switch (type) { + /* Resources not containing a resource */ + case R_DIRECTOR: + break; + + case R_CONSOLE: + case R_CONSOLE_FONT: + break; + + default: + Emsg1(M_ERROR, 0, _("Unknown resource type %d\n"), type); + error = 1; + break; + } + /* Note, the resoure name was already saved during pass 1, + * so here, we can just release it. + */ + if (res_all.dir_res.hdr.name) { + free(res_all.dir_res.hdr.name); + res_all.dir_res.hdr.name = NULL; + } + if (res_all.dir_res.hdr.desc) { + free(res_all.dir_res.hdr.desc); + res_all.dir_res.hdr.desc = NULL; + } + return true; + } + + /* The following code is only executed during pass 1 */ + switch (type) { + case R_DIRECTOR: + size = sizeof(DIRRES); + break; + case R_CONSOLE_FONT: + size = sizeof(CONFONTRES); + break; + case R_CONSOLE: + size = sizeof(CONRES); + break; + default: + printf(_("Unknown resource type %d\n"), type); + error = 1; + break; + } + /* Common */ + if (!error) { + if (!config->insert_res(rindex, size)) { + return false; + } + } + return true; +} + +bool parse_bat_config(CONFIG *config, const char *configfile, int exit_code) +{ + config->init(configfile, NULL, exit_code, (void *)&res_all, res_all_size, + r_first, r_last, resources, &res_head); + return config->parse_config(); +} diff --git a/src/qt-console/bat_conf.h b/src/qt-console/bat_conf.h new file mode 100644 index 00000000..7321f3dd --- /dev/null +++ b/src/qt-console/bat_conf.h @@ -0,0 +1,122 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Adminstration Tool (bat) + * + * Kern Sibbald, March 2002 + */ + +#ifndef _BAT_CONF_H_ +#define _BAT_CONF_H_ + +/* + * Resource codes -- they must be sequential for indexing + */ + +enum { + R_DIRECTOR = 1001, + R_CONSOLE, + R_CONSOLE_FONT, + R_FIRST = R_DIRECTOR, + R_LAST = R_CONSOLE_FONT /* Keep this updated */ +}; + +/* + * Some resource attributes + */ +enum { + R_NAME = 1020, + R_ADDRESS, + R_PASSWORD, + R_TYPE, + R_BACKUP +}; + + +/* Definition of the contents of each Resource */ +class DIRRES { +public: + RES hdr; + uint32_t DIRport; /* UA server port */ + char *address; /* UA server address */ + char *password; /* UA server password */ + bool tls_authenticate; /* Authenticate with tls */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Client Certificate File */ + char *tls_keyfile; /* TLS Client Key File */ + utime_t heartbeat_interval; /* Dir heartbeat interval */ + + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + + /* Methods */ + char *name() const; +}; + +inline char *DIRRES::name() const { return hdr.name; } + + +struct CONFONTRES { + RES hdr; + char *fontface; /* Console Font specification */ +}; + +class CONRES { +public: + RES hdr; + char *password; /* UA server password */ + bool comm_compression; /* Enable comm line compression */ + bool tls_authenticate; /* Authenticate with tls */ + bool tls_enable; /* Enable TLS on all connections */ + bool tls_require; /* Require TLS on all connections */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Client Certificate File */ + char *tls_keyfile; /* TLS Client Key File */ + char *director; /* bind to director */ + utime_t heartbeat_interval; /* Cons heartbeat interval */ + + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + + /* Methods */ + char *name() const; +}; + +inline char *CONRES::name() const { return hdr.name; } + + +/* Define the Union of all the above + * resource structure definitions. + */ +union u_res { + DIRRES dir_res; + CONRES con_res; + CONFONTRES con_font; + RES hdr; +}; + +typedef union u_res URES; + +#define GetConsoleResWithName(x) ((CONRES *)GetResWithName(R_CONSOLE, (x))) +#define GetDirResWithName(x) ((DIRRES *)GetResWithName(R_DIRECTOR, (x))) + + +#endif /* _BAT_CONF_H_ */ diff --git a/src/qt-console/bcomm/dircomm.cpp b/src/qt-console/bcomm/dircomm.cpp new file mode 100644 index 00000000..ccbf788a --- /dev/null +++ b/src/qt-console/bcomm/dircomm.cpp @@ -0,0 +1,575 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * DirComm, Director communications,class + * + * Kern Sibbald, January MMVII + * + */ + +#include "bat.h" +#include "console.h" +#include "restore.h" +#include "select.h" +#include "textinput.h" +#include "run/run.h" + +static int tls_pem_callback(char *buf, int size, const void *userdata); + +DirComm::DirComm(Console *parent, int conn): m_notifier(NULL), m_api_set(false) +{ + m_console = parent; + m_sock = NULL; + m_at_prompt = false; + m_at_main_prompt = false; + m_sent_blank = false; + m_conn = conn; + m_in_command = 0; + m_in_select = false; + m_notify = false; +} + +DirComm::~DirComm() +{ +} + +/* Terminate any open socket */ +void DirComm::terminate() +{ + if (m_sock) { + if (m_notifier) { + m_notifier->setEnabled(false); + delete m_notifier; + m_notifier = NULL; + m_notify = false; + } + if (mainWin->m_connDebug) + Pmsg2(000, "DirComm %i terminating connections %s\n", m_conn, m_console->m_dir->name()); + free_bsock(m_sock); + } +} + +/* + * Connect to Director. + */ +bool DirComm::connect_dir() +{ + JCR *jcr = new JCR; + utime_t heart_beat; + char buf[1024]; + CONRES *cons; + int numcon = 0; + int i = 0; + + buf[0] = 0; + + foreach_res(cons, R_CONSOLE) { + numcon++; + } + + if (m_sock && !is_bsock_open(m_sock)) { + mainWin->set_status( tr("Already connected.")); + m_console->display_textf(_("Already connected\"%s\".\n"), + m_console->m_dir->name()); + if (mainWin->m_connDebug) { + Pmsg2(000, "DirComm %i BAILING already connected %s\n", m_conn, m_console->m_dir->name()); + } + goto bail_out; + } + + if (mainWin->m_connDebug)Pmsg2(000, "DirComm %i connecting %s\n", m_conn, m_console->m_dir->name()); + memset(jcr, 0, sizeof(JCR)); + + mainWin->set_statusf(_("Connecting to Director %s:%d"), m_console->m_dir->address, m_console->m_dir->DIRport); + if (m_conn == 0) { + m_console->display_textf(_("Connecting to Director %s:%d\n\n"), m_console->m_dir->address, m_console->m_dir->DIRport); + } + + /* Give GUI a chance */ + app->processEvents(); + + LockRes(); + /* If cons==NULL, default console will be used */ + for (i=0; idirector && strcasecmp(cons->director, m_console->m_dir->name()) == 0) { + break; + } + if (i == (numcon - 1)) { + cons = NULL; + } + } + + /* Look for the first non-linked console */ + if (cons == NULL) { + for (i=0; idirector == NULL) { + break; + } + if (i == (numcon - 1)) { + cons = NULL; + } + } + } + + /* If no console, take first one */ + if (!cons) { + cons = (CONRES *)GetNextRes(R_CONSOLE, (RES *)NULL); + } + UnlockRes(); + + /* Initialize Console TLS context once */ + if (cons && !cons->tls_ctx && (cons->tls_enable || cons->tls_require)) { + /* Generate passphrase prompt */ + bsnprintf(buf, sizeof(buf), "Passphrase for Console \"%s\" TLS private key: ", + cons->name()); + + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer + */ + cons->tls_ctx = new_tls_context(cons->tls_ca_certfile, + cons->tls_ca_certdir, cons->tls_certfile, + cons->tls_keyfile, tls_pem_callback, &buf, NULL, true); + + if (!cons->tls_ctx) { + m_console->display_textf(_("Failed to initialize TLS context for Console \"%s\".\n"), + m_console->m_dir->name()); + if (mainWin->m_connDebug) { + Pmsg2(000, "DirComm %i BAILING Failed to initialize TLS context for Console %s\n", m_conn, m_console->m_dir->name()); + } + goto bail_out; + } + } + + /* Initialize Director TLS context once */ + if (!m_console->m_dir->tls_ctx && (m_console->m_dir->tls_enable || m_console->m_dir->tls_require)) { + /* Generate passphrase prompt */ + bsnprintf(buf, sizeof(buf), "Passphrase for Director \"%s\" TLS private key: ", + m_console->m_dir->name()); + + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + m_console->m_dir->tls_ctx = new_tls_context(m_console->m_dir->tls_ca_certfile, + m_console->m_dir->tls_ca_certdir, m_console->m_dir->tls_certfile, + m_console->m_dir->tls_keyfile, tls_pem_callback, &buf, NULL, true); + + if (!m_console->m_dir->tls_ctx) { + m_console->display_textf(_("Failed to initialize TLS context for Director \"%s\".\n"), + m_console->m_dir->name()); + mainWin->set_status("Connection failed"); + if (mainWin->m_connDebug) { + Pmsg2(000, "DirComm %i BAILING Failed to initialize TLS context for Director %s\n", m_conn, m_console->m_dir->name()); + } + goto bail_out; + } + } + + if (m_console->m_dir->heartbeat_interval) { + heart_beat = m_console->m_dir->heartbeat_interval; + } else if (cons) { + heart_beat = cons->heartbeat_interval; + } else { + heart_beat = 0; + } + + if (!m_sock) { + m_sock = new_bsock(); + } + if (!m_sock->connect(NULL, 5, 15, heart_beat, + _("Director daemon"), m_console->m_dir->address, + NULL, m_console->m_dir->DIRport, 0)) { + m_sock->destroy(); + m_sock = NULL; + } + if (m_sock == NULL) { + mainWin->set_status("Connection failed"); + if (mainWin->m_connDebug) { + Pmsg2(000, "DirComm %i BAILING Connection failed %s\n", m_conn, m_console->m_dir->name()); + } + goto bail_out; + } else { + /* Update page selector to green to indicate that Console is connected */ + mainWin->actionConnect->setIcon(QIcon(":images/connected.png")); + QBrush greenBrush(Qt::green); + QTreeWidgetItem *item = mainWin->getFromHash(m_console); + if (item) { + item->setForeground(0, greenBrush); + } + } + + jcr->dir_bsock = m_sock; + + if (!authenticate_director(jcr, m_console->m_dir, cons, buf, sizeof(buf))) { + m_console->display_text(buf); + if (mainWin->m_connDebug) { + Pmsg2(000, "DirComm %i BAILING Connection failed %s\n", m_conn, m_console->m_dir->name()); + } + goto bail_out; + } + + if (buf[0]) { + m_console->display_text(buf); + } + + /* Give GUI a chance */ + app->processEvents(); + + mainWin->set_status(_("Initializing ...")); + + /* + * Set up input notifier + */ + m_notifier = new QSocketNotifier(m_sock->m_fd, QSocketNotifier::Read, 0); + QObject::connect(m_notifier, SIGNAL(activated(int)), this, SLOT(notify_read_dir(int))); + m_notifier->setEnabled(true); + m_notify = true; + + write(".api 1"); + m_api_set = true; + m_console->displayToPrompt(m_conn); + + m_console->beginNewCommand(m_conn); + + mainWin->set_status(_("Connected")); + + if (mainWin->m_connDebug) { + Pmsg2(000, "Returning TRUE from DirComm->connect_dir : %i %s\n", m_conn, m_console->m_dir->name()); + } + return true; + +bail_out: + if (mainWin->m_connDebug) { + Pmsg2(000, "Returning FALSE from DirComm->connect_dir : %i %s\n", m_conn, m_console->m_dir->name()); + } + delete jcr; + return false; +} + +/* + * This should be moved into a bSocket class + */ +char *DirComm::msg() +{ + if (m_sock) { + return m_sock->msg; + } + return NULL; +} + +int DirComm::write(const QString msg) +{ + return write(msg.toUtf8().data()); +} + +int DirComm::write(const char *msg) +{ + if (!m_sock) { + return -1; + } + m_sock->msglen = pm_strcpy(m_sock->msg, msg); + m_at_prompt = false; + m_at_main_prompt = false; + if (mainWin->m_commDebug) Pmsg2(000, "conn %i send: %s\n", m_conn, msg); + /* + * Ensure we send only one blank line. Multiple blank lines are + * simply discarded, it keeps the console output looking nicer. + */ + if (m_sock->msglen == 0 || (m_sock->msglen == 1 && *m_sock->msg == '\n')) { + if (!m_sent_blank) { + m_sent_blank = true; + return m_sock->send(); + } else { + return -1; /* discard multiple blanks */ + } + } + m_sent_blank = false; /* clear flag */ + return m_sock->send(); +} + +int DirComm::sock_read() +{ + int stat; +#ifdef HAVE_WIN32 + bool wasEnabled = notify(false); + stat = m_sock->recv(); + notify(wasEnabled); +#else + stat = m_sock->recv(); +#endif + return stat; +} + +/* + * Blocking read from director + */ +int DirComm::read() +{ + int stat = -1; + + if (!m_sock) { + return -1; + } + while (m_sock) { + /* Poll DIR every 50 milliseconds */ + for (;;) { + if (!m_sock) break; + stat = m_sock->wait_data_intr(0, 50); /* wait 50 milliseconds */ + if (stat > 0) { + break; + } + app->processEvents(); + if (m_api_set && m_console->is_messagesPending() && is_notify_enabled() && m_console->hasFocus()) { + if (mainWin->m_commDebug) Pmsg1(000, "conn %i process_events\n", m_conn); + m_console->messagesPending(false); + m_console->write_dir(m_conn, ".messages", false); + } + } + if (!m_sock) { + return -1; + } + m_sock->msg[0] = 0; + stat = sock_read(); + if (stat >= 0) { + if (mainWin->m_commDebug) Pmsg2(000, "conn %i got: %s\n", m_conn, m_sock->msg); + if (m_at_prompt) { + m_console->display_text("\n"); + m_at_prompt = false; + m_at_main_prompt = false; + } + } + switch (m_sock->msglen) { + case BNET_MSGS_PENDING : + if (is_notify_enabled() && m_console->hasFocus()) { + m_console->messagesPending(false); + if (mainWin->m_commDebug) Pmsg1(000, "conn %i MSGS PENDING\n", m_conn); + m_console->write_dir(m_conn, ".messages", false); + m_console->displayToPrompt(m_conn); + continue; + } + m_console->messagesPending(true); + continue; + case BNET_CMD_OK: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i CMD OK\n", m_conn); + m_at_prompt = false; + m_at_main_prompt = false; + if (--m_in_command < 0) { + m_in_command = 0; + } + mainWin->set_status(_("Command completed ...")); + continue; + case BNET_CMD_BEGIN: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i CMD BEGIN\n", m_conn); + m_at_prompt = false; + m_at_main_prompt = false; + m_in_command++; + mainWin->set_status(_("Processing command ...")); + continue; + case BNET_MAIN_PROMPT: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i MAIN PROMPT\n", m_conn); + if (!m_at_prompt && ! m_at_main_prompt) { + m_at_prompt = true; + m_at_main_prompt = true; + mainWin->set_status(_("At main prompt waiting for input ...")); + } + break; + case BNET_SUB_PROMPT: + if (mainWin->m_commDebug) Pmsg2(000, "conn %i SUB_PROMPT m_in_select=%d\n", m_conn, m_in_select); + m_at_prompt = true; + m_at_main_prompt = false; + mainWin->set_status(_("At prompt waiting for input ...")); + break; + case BNET_TEXT_INPUT: + if (mainWin->m_commDebug) Pmsg4(000, "conn %i TEXT_INPUT at_prompt=%d m_in_select=%d notify=%d\n", + m_conn, m_at_prompt, m_in_select, is_notify_enabled()); + if (!m_in_select && is_notify_enabled()) { + new textInputDialog(m_console, m_conn); + if (mainWin->m_commDebug) Pmsg0(000, "!m_in_select && is_notify_enabled\n"); + m_at_prompt = true; + m_at_main_prompt = false; + mainWin->set_status(_("At prompt waiting for input ...")); + } + break; + case BNET_CMD_FAILED: + if (mainWin->m_commDebug) Pmsg1(000, "CMD FAILED\n", m_conn); + if (--m_in_command < 0) { + m_in_command = 0; + } + mainWin->set_status(_("Command failed.")); + break; + /* We should not get this one */ + case BNET_EOD: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i EOD\n", m_conn); + mainWin->set_status_ready(); + if (!m_api_set) { + break; + } + continue; + case BNET_START_SELECT: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i START SELECT\n", m_conn); + m_in_select = true; + new selectDialog(m_console, m_conn); + m_in_select = false; + break; + case BNET_YESNO: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i YESNO\n", m_conn); + new yesnoPopUp(m_console, m_conn); + break; + case BNET_RUN_CMD: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i RUN CMD\n", m_conn); + new runCmdPage(m_conn); + break; + case BNET_START_RTREE: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i START RTREE CMD\n", m_conn); + new restorePage(m_conn); + break; + case BNET_END_RTREE: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i END RTREE CMD\n", m_conn); + break; + case BNET_ERROR_MSG: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i ERROR MSG\n", m_conn); + stat = sock_read(); /* get the message */ + m_console->display_text(msg()); + QMessageBox::critical(m_console, "Error", msg(), QMessageBox::Ok); + m_console->beginNewCommand(m_conn); + mainWin->waitExit(); + break; + case BNET_WARNING_MSG: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i WARNING MSG\n", m_conn); + stat = sock_read(); /* get the message */ + if (!m_console->m_warningPrevent) { + QMessageBox::critical(m_console, "Warning", msg(), QMessageBox::Ok); + } + break; + case BNET_INFO_MSG: + if (mainWin->m_commDebug) Pmsg1(000, "conn %i INFO MSG\n", m_conn); + stat = sock_read(); /* get the message */ + m_console->display_text(msg()); + mainWin->set_status(msg()); + break; + } + + if (!m_sock) { + stat = BNET_HARDEOF; + return stat; + } + if (m_sock->is_stop()) { /* error or term request */ + if (mainWin->m_commDebug) Pmsg1(000, "conn %i BNET STOP\n", m_conn); + m_console->stopTimer(); + free_bsock(m_sock); + mainWin->actionConnect->setIcon(QIcon(":images/disconnected.png")); + QBrush redBrush(Qt::red); + QTreeWidgetItem *item = mainWin->getFromHash(m_console); + item->setForeground(0, redBrush); + if (m_notifier) { + m_notifier->setEnabled(false); + delete m_notifier; + m_notifier = NULL; + m_notify = false; + } + mainWin->set_status(_("Director disconnected.")); + stat = BNET_HARDEOF; + } + break; + } + return stat; +} + +/* Called by signal when the Director has output for us */ +void DirComm::notify_read_dir(int /* fd */) +{ + int stat; + if (!mainWin->m_notify) { + return; + } + if (mainWin->m_commDebug) Pmsg1(000, "enter read_dir conn %i read_dir\n", m_conn); + stat = m_sock->wait_data(0, 5000); + if (stat > 0) { + if (mainWin->m_commDebug) Pmsg2(000, "read_dir conn %i stat=%d\n", m_conn, stat); + while (read() >= 0) { + m_console->display_text(msg()); + } + } + if (mainWin->m_commDebug) Pmsg2(000, "exit read_dir conn %i stat=%d\n", m_conn, stat); +} + +/* + * When the notifier is enabled, read_dir() will automatically be + * called by the Qt event loop when ever there is any output + * from the Director, and read_dir() will then display it on + * the console. + * + * When we are in a bat dialog, we want to control *all* output + * from the Directory, so we set notify to off. + * m_console->notify(false); + */ +bool DirComm::notify(bool enable) +{ + bool prev_enabled = false; + /* Set global flag */ + mainWin->m_notify = enable; + if (m_notifier) { + prev_enabled = m_notifier->isEnabled(); + m_notifier->setEnabled(enable); + m_notify = enable; + if (mainWin->m_connDebug) Pmsg3(000, "conn=%i set_notify=%d prev=%d\n", m_conn, enable, prev_enabled); + } else if (mainWin->m_connDebug) { + Pmsg2(000, "m_notifier does not exist: %i %s\n", m_conn, m_console->m_dir->name()); + } + return prev_enabled; +} + +bool DirComm::is_notify_enabled() const +{ + return m_notify; +} + +/* + * Call-back for reading a passphrase for an encrypted PEM file + * This function uses getpass(), + * which uses a static buffer and is NOT thread-safe. + */ +static int tls_pem_callback(char *buf, int size, const void *userdata) +{ + (void)size; + (void)userdata; +#ifdef HAVE_TLS +# if defined(HAVE_WIN32) + //sendit(prompt); + if (win32_cgets(buf, size) == NULL) { + buf[0] = 0; + return 0; + } else { + return strlen(buf); + } +# else + const char *prompt = (const char *)userdata; + char *passwd; + + passwd = getpass(prompt); + bstrncpy(buf, passwd, size); + return strlen(buf); +# endif +#else + buf[0] = 0; + return 0; +#endif +} diff --git a/src/qt-console/bcomm/dircomm.h b/src/qt-console/bcomm/dircomm.h new file mode 100644 index 00000000..aca39d2f --- /dev/null +++ b/src/qt-console/bcomm/dircomm.h @@ -0,0 +1,83 @@ +#ifndef _DIRCOMM_H_ +#define _DIRCOMM_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, January 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "pages.h" +#include "ui_console.h" +#include + +#ifndef MAX_NAME_LENGTH +#define MAX_NAME_LENGTH 128 +#endif + +class DIRRES; +class BSOCK; +class JCR; +class CONRES; + +//class DirComm : public QObject +class DirComm : public QObject +{ + Q_OBJECT + friend class Console; +public: + DirComm(Console *parent, int conn); + ~DirComm(); + Console *m_console; + int sock_read(); + bool authenticate_director(JCR *jcr, DIRRES *director, CONRES *cons, + char *buf, int buflen); + bool is_connected() { return m_sock != NULL; } + bool is_ready() { return is_connected() && m_at_prompt && m_at_main_prompt; } + char *msg(); + bool notify(bool enable); // enables/disables socket notification - returns the previous state + bool is_notify_enabled() const; + bool is_in_command() const { return m_in_command > 0; } + void terminate(); + bool connect_dir(); + int read(void); + int write(const char *msg); + int write(QString msg); + +public slots: + void notify_read_dir(int fd); + +private: + BSOCK *m_sock; + bool m_at_prompt; + bool m_at_main_prompt; + bool m_sent_blank; + bool m_notify; + int m_in_command; + QSocketNotifier *m_notifier; + bool m_api_set; + int m_conn; + bool m_in_select; +}; + +#endif /* _DIRCOMM_H_ */ diff --git a/src/qt-console/bcomm/dircomm_auth.cpp b/src/qt-console/bcomm/dircomm_auth.cpp new file mode 100644 index 00000000..9da403e8 --- /dev/null +++ b/src/qt-console/bcomm/dircomm_auth.cpp @@ -0,0 +1,188 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula UA authentication. Provides authentication with + * the Director. + * + * Kern Sibbald, June MMI adapted to bat, Jan MMVI + * + */ + + +#include "bat.h" + +/* + * Version at end of Hello + * prior to 06Aug13 no version + * 1 21Oct13 - added comm line compression + */ +#define BAT_VERSION 1 + + +/* Commands sent to Director */ +static char hello[] = "Hello %s calling %d\n"; + +/* Response from Director */ +static char oldOKhello[] = "1000 OK:"; +static char newOKhello[] = "1000 OK: %d"; +static char FDOKhello[] = "2000 OK Hello %d"; + +/* Forward referenced functions */ + +/* + * Authenticate Director + */ +bool DirComm::authenticate_director(JCR *jcr, DIRRES *director, CONRES *cons, + char *errmsg, int errmsg_len) +{ + BSOCK *dir = jcr->dir_bsock; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int dir_version = 0; + bool tls_authenticate; + int compatible = true; + char bashed_name[MAX_NAME_LENGTH]; + char *password; + TLS_CONTEXT *tls_ctx = NULL; + + errmsg[0] = 0; + /* + * Send my name to the Director then do authentication + */ + if (cons) { + bstrncpy(bashed_name, cons->hdr.name, sizeof(bashed_name)); + bash_spaces(bashed_name); + password = cons->password; + /* TLS Requirement */ + if (cons->tls_enable) { + if (cons->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + tls_authenticate = cons->tls_authenticate; + tls_ctx = cons->tls_ctx; + } else { + bstrncpy(bashed_name, "*UserAgent*", sizeof(bashed_name)); + password = director->password; + /* TLS Requirement */ + if (director->tls_enable) { + if (director->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + tls_authenticate = director->tls_authenticate; + tls_ctx = director->tls_ctx; + } + if (tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + /* Timeout Hello after 15 secs */ + dir->start_timer(15); + dir->fsend(hello, bashed_name, BAT_VERSION); + + /* respond to Dir challenge */ + if (!cram_md5_respond(dir, password, &tls_remote_need, &compatible) || + /* Now challenge dir */ + !cram_md5_challenge(dir, password, tls_local_need, compatible)) { + bsnprintf(errmsg, errmsg_len, _("Director authorization problem at \"%s:%d\"\n"), + dir->host(), dir->port()); + goto bail_out; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + bsnprintf(errmsg, errmsg_len, _("Authorization problem:" + " Remote server at \"%s:%d\" did not advertise required TLS support.\n"), + dir->host(), dir->port()); + goto bail_out; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + bsnprintf(errmsg, errmsg_len, _("Authorization problem with Director at \"%s:%d\":" + " Remote server requires TLS.\n"), + dir->host(), dir->port()); + + goto bail_out; + } + + /* Is TLS Enabled? */ + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_client(tls_ctx, dir, NULL)) { + bsnprintf(errmsg, errmsg_len, _("TLS negotiation failed with Director at \"%s:%d\"\n"), + dir->host(), dir->port()); + goto bail_out; + } + if (tls_authenticate) { /* authenticate only? */ + dir->free_tls(); /* Yes, shutdown tls */ + } + } + + Dmsg1(6, ">dird: %s", dir->msg); + if (dir->recv() <= 0) { + dir->stop_timer(); + bsnprintf(errmsg, errmsg_len, _("Bad response to Hello command: ERR=%s\n" + "The Director at \"%s:%d\" is probably not running.\n"), + dir->bstrerror(), dir->host(), dir->port()); + return false; + } + + dir->stop_timer(); + Dmsg1(10, "msg); + if (strncmp(dir->msg, oldOKhello, sizeof(oldOKhello)-1) == 0) { + /* If Dir version exists, get it */ + sscanf(dir->msg, newOKhello, &dir_version); + + /* We do not check the last %d */ + } else if (strncmp(dir->msg, FDOKhello, sizeof(FDOKhello)-3) == 0) { + sscanf(dir->msg, FDOKhello, &dir_version); + // TODO: Keep somewhere that we need a proxy command, or run it directly? + } else { + bsnprintf(errmsg, errmsg_len, _("Director at \"%s:%d\" rejected Hello command\n"), + dir->host(), dir->port()); + return false; + } + + /* Turn on compression for newer Directors */ + if (dir_version >= 1 && (!cons || cons->comm_compression)) { + dir->set_compress(); + } + + if (m_conn == 0) { + bsnprintf(errmsg, errmsg_len, "%s", dir->msg); + } + return true; + +bail_out: + dir->stop_timer(); + bsnprintf(errmsg, errmsg_len, _("Authorization problem with Director at \"%s:%d\"\n" + "Most likely the passwords do not agree.\n" + "If you are using TLS, there may have been a certificate validation error during the TLS handshake.\n" + "For help, please see " MANUAL_AUTH_URL "\n"), + dir->host(), dir->port()); + return false; +} diff --git a/src/qt-console/build-depkgs-qt-console b/src/qt-console/build-depkgs-qt-console new file mode 100755 index 00000000..6a762246 --- /dev/null +++ b/src/qt-console/build-depkgs-qt-console @@ -0,0 +1,165 @@ +#!/bin/sh +# +# This file is driven by the parameters that are defined in +# the file External-qt-console +# + +usage() +{ + echo "usage: $0 [-h] [-C] [] [] ..." + echo " -h Displays this usage" + echo " -C Clobbers (overwrites) the source code by " + echo " reextracting the archive and reapplying the" + echo " patches." + echo "" + echo " Optional dependency, If none are given then all" + echo " of them will be built." + echo "" + echo "Valid dependencies are:" + grep -v '^#' < External-qt-console | cut -d'|' -f1 | cut -d'_' -f1 | tr A-Z a-z | sort -u | awk '{ print " " $1 }' +} + +CLOBBER_SOURCE= + +while getopts "hHC" opt; do + case ${opt} in + H|h|\?) usage;exit 1;; + C) CLOBBER_SOURCE=true;; + esac +done + +[ ${OPTIND} -gt 1 ] && shift `expr ${OPTIND} - 1` + +cwd=`pwd` +cd `dirname $0` +SCRIPT_DIR=`pwd` + +TOP_DIR=`pwd` + +[ ! -e ${TOP_DIR}/depkgs ] && mkdir ${TOP_DIR}/depkgs +cd ${TOP_DIR}/depkgs +DEPPKG_DIR=`pwd` + +OLD_IFS=${IFS};IFS="|"; +while read package url dir mkd; do + case ${package} in + \#*) ;; + *) eval "URL_${package}=${url};DIR_${package}=${dir};MKD_${package}=${mkd}";; + esac +done < ${SCRIPT_DIR}/External-qt-console +IFS=${OLD_IFS};unset OLD_IFS + +get_source() +{ + URL=$1 + SRC_DIR=$2 + MAKE_DIR=$3 + ARCHIVE=`basename ${URL}` +echo "in get_source URL is $URL SRC_DIR is $SRC_DIR MAKE_DIR is $MAKE_DIR ARCHIVE is $ARCHIVE" + + case ${ARCHIVE} in + *.tar.gz) ARCHIVER="tar xzf"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.gz'`;; + *.tar.bz2) ARCHIVER="tar xjf"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.bz2'`;; + *.zip) ARCHIVER="unzip -q"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.zip'`;; + *.exe) ARCHIVER=""; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.zip'`;; + *) echo "Unsupported archive type - $ARCHIVE"; exit 1;; + esac + +# cd ${DEPPKG_DIR}/src + + if [ ! -e "${ARCHIVE}" ] + then + echo "Downloading ${URL}" + if wget --passive-ftp "${URL}" + then + : + else + echo "Unable to download ${ARCHIVE}" + exit 1 + fi + fi + + [ -z "${ARCHIVER}" ] && return 0 + + if [ ! -e "${SRC_DIR}" -o "${CLOBBER_SOURCE}" = "true" ] + then + rm -rf ${SRC_DIR} + echo "Extracting ${ARCHIVE}" + if [ "${MAKE_DIR}" = "true" ] + then + mkdir ${SRC_DIR} + cd ${SRC_DIR} + ${ARCHIVER} ../${ARCHIVE} > ../${ARCHIVE}.log 2>&1 + else + ${ARCHIVER} ${ARCHIVE} > ${ARCHIVE}.log 2>&1 + cd ${SRC_DIR} + fi + return 0 + fi + + cd ${SRC_DIR} + return 1 +} + +parse_output() +{ + sed -ne '/\\$/N' -e 's/\\\n//' -e 's/\t\+/ /g' -e 's/ \+/ /g' \ + -e '/ error: /p' \ + -e "s%.*Entering directory[ ]\\+.${DEPPKG_DIR}/\\([^ ]\+\).%Entering \\1%p" \ + -e "s%.*Leaving directory[ ]\\+.${DEPPKG_DIR}/\\([^ ]\+.\).%Leaving \\1%p" \ + -e '/gcc \|g\+\+ \|ar /!d' \ + -e 's/ \(\.\.\/\)\+/ /g' \ + -e 's/.* \([^ ]\+\(\.c\|\.cpp\|\.cc\|\.cxx\)\)\( .*\|\)$/Compiling \1/p' \ + -e 's/.* \([^ ]\+\.s\)\( .*\|\)$/Assembling \1/p' \ + -e 's/.*ar [^ ]\+ \([^ ]\+\)\(\( [^ ]\+\.o\)\+\)/Updating \1 -\2/p' \ + -e 's/.* -o \([^ ]\+\)\( .*\|\)$/Linking \1/p' +} + +do_patch() +{ + PATCH_FILE=${SCRIPT_DIR}/patches/$1; shift + + if patch -f -p0 "$@" >>patch.log < ${PATCH_FILE} + then + : + else + echo "Patch failed - Check `pwd`/patch.log" > /dev/tty + exit 1 + fi +} + +do_make() +{ + if make -f "$@" 2>&1 + then + : + else + echo "Make failed - Check `pwd`/make.log" > /dev/tty + exit 1 + fi | tee -a make.log #| parse_output +} + +process_qwt() +{ + get_source "${URL_QWT}" "${DIR_QWT}" "${MKD_QWT}" + echo "Building qwt graphics library. This takes some time ..." + echo "unix {" >${TOP_DIR}/depkgs/qwt-5.0.2/qwtconfig.pri + echo " INSTALLBASE = ${TOP_DIR}/qwt" >>${TOP_DIR}/depkgs/qwt-5.0.2/qwtconfig.pri + echo "}" >>${TOP_DIR}/depkgs/qwt-5.0.2/qwtconfig.pri + cat ${TOP_DIR}/qwtconfig.pri >>${TOP_DIR}/depkgs/qwt-5.0.2/qwtconfig.pri + qmake >make.log + do_make Makefile >>make.log + do_make Makefile install >>make.log + echo "In case of problems see: `pwd`/make.log" +} + +if [ "$#" -eq 0 ] +then + process_qwt +else + for dependency in "$@" + do + eval "process_${dependency}" + done + +fi diff --git a/src/qt-console/clients/clients.cpp b/src/qt-console/clients/clients.cpp new file mode 100644 index 00000000..37f288a9 --- /dev/null +++ b/src/qt-console/clients/clients.cpp @@ -0,0 +1,355 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Clients Class + * + * Dirk Bartley, March 2007 + * + */ + +#include "bat.h" +#include +#include +#include "clients/clients.h" +#include "run/run.h" +#include "status/clientstat.h" +#include "util/fmtwidgetitem.h" + +Clients::Clients() : Pages() +{ + setupUi(this); + m_name = tr("Clients"); + pgInitialize(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/network-server.png"))); + + /* tableWidget, Storage Tree Tree Widget inherited from ui_client.h */ + m_populated = false; + m_checkcurwidget = true; + m_closeable = false; + m_firstpopulation = true; + /* add context sensitive menu items specific to this classto the page + * selector tree. m_contextActions is QList of QActions */ + m_contextActions.append(actionRefreshClients); + createContextMenu(); +} + +Clients::~Clients() +{ +} + +/* + * The main meat of the class!! The function that queries the director and + * creates the widgets with appropriate values. + */ +void Clients::populateTable() +{ + m_populated = true; + + Freeze frz(*tableWidget); /* disable updating*/ + + QStringList headerlist = (QStringList() << tr("Client Name") << tr("File Retention") + << tr("Job Retention") << tr("AutoPrune") << tr("ClientId") << tr("Uname") ); + + int sortcol = headerlist.indexOf(tr("Client Name")); + Qt::SortOrder sortord = Qt::AscendingOrder; + if (tableWidget->rowCount()) { + sortcol = tableWidget->horizontalHeader()->sortIndicatorSection(); + sortord = tableWidget->horizontalHeader()->sortIndicatorOrder(); + } + + m_checkcurwidget = false; + tableWidget->clear(); + m_checkcurwidget = true; + + tableWidget->setColumnCount(headerlist.count()); + tableWidget->setHorizontalHeaderLabels(headerlist); + tableWidget->horizontalHeader()->setHighlightSections(false); + tableWidget->setRowCount(m_console->client_list.count()); + tableWidget->verticalHeader()->hide(); + tableWidget->setSelectionBehavior(QAbstractItemView::SelectRows); + tableWidget->setSelectionMode(QAbstractItemView::SingleSelection); + tableWidget->setSortingEnabled(false); /* rows move on insert if sorting enabled */ + bool first = true; + QString client_comsep(""); + foreach (QString clientName, m_console->client_list){ + if (first) { + client_comsep += "'" + clientName + "'"; + first = false; + } + else + client_comsep += ",'" + clientName + "'"; + } + + if (client_comsep != "") { + QString query(""); + query += "SELECT Name, FileRetention, JobRetention, AutoPrune, ClientId, Uname" + " FROM Client" + " WHERE ClientId IN (SELECT MAX(ClientId) FROM Client WHERE"; + query += " Name IN (" + client_comsep + ")"; + query += " GROUP BY Name) ORDER BY Name"; + + QStringList results; + if (mainWin->m_sqlDebug) + Pmsg1(000, "Clients query cmd : %s\n",query.toUtf8().data()); + if (m_console->sql_cmd(query, results)) { + int row = 0; + + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + QStringList fieldlist = resultline.split("\t"); + + if (fieldlist.size() < 5) { // Uname is checked after + Pmsg1(0, "Unexpected line %s\n", resultline.toUtf8().data()); + continue; + } + if (m_firstpopulation) { + settingsOpenStatus(fieldlist[0]); + } + + TableItemFormatter item(*tableWidget, row); + + /* Iterate through fields in the record */ + QStringListIterator fld(fieldlist); + int col = 0; + + /* name */ + item.setTextFld(col++, fld.next()); + + /* file retention */ + item.setDurationFld(col++, fld.next()); + + /* job retention */ + item.setDurationFld(col++, fld.next()); + + /* autoprune */ + item.setBoolFld(col++, fld.next()); + + /* client id */ + item.setNumericFld(col++, fld.next()); + + /* uname */ + if (fld.hasNext()) { + item.setTextFld(col++, fld.next()); + } else { + item.setTextFld(col++, ""); + } + + row++; + } + } + } + /* set default sorting */ + tableWidget->sortByColumn(sortcol, sortord); + tableWidget->setSortingEnabled(true); + + /* Resize rows and columns */ + tableWidget->resizeColumnsToContents(); + tableWidget->resizeRowsToContents(); + + /* make read only */ + int rcnt = tableWidget->rowCount(); + int ccnt = tableWidget->columnCount(); + for(int r=0; r < rcnt; r++) { + for(int c=0; c < ccnt; c++) { + QTableWidgetItem* item = tableWidget->item(r, c); + if (item) { + item->setFlags(Qt::ItemFlags(item->flags() & (~Qt::ItemIsEditable))); + } + } + } + m_firstpopulation = false; +} + +/* + * When the treeWidgetItem in the page selector tree is singleclicked, Make sure + * The tree has been populated. + */ +void Clients::PgSeltreeWidgetClicked() +{ + if(!m_populated) { + populateTable(); + } + if (!isOnceDocked()) { + dockPage(); + } +} + +/* + * Added to set the context menu policy based on currently active treeWidgetItem + * signaled by currentItemChanged + */ +void Clients::tableItemChanged(QTableWidgetItem *currentwidgetitem, QTableWidgetItem *previouswidgetitem ) +{ + /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ + if (m_checkcurwidget) { + int currentRow = currentwidgetitem->row(); + QTableWidgetItem *currentrowzeroitem = tableWidget->item(currentRow, 0); + m_currentlyselected = currentrowzeroitem->text(); + + /* The Previous item */ + if (previouswidgetitem) { /* avoid a segfault if first time */ + tableWidget->removeAction(actionListJobsofClient); + tableWidget->removeAction(actionStatusClientWindow); + tableWidget->removeAction(actionPurgeJobs); + tableWidget->removeAction(actionPrune); + } + + if (m_currentlyselected.length() != 0) { + /* set a hold variable to the client name in case the context sensitive + * menu is used */ + tableWidget->addAction(actionListJobsofClient); + tableWidget->addAction(actionStatusClientWindow); + tableWidget->addAction(actionPurgeJobs); + tableWidget->addAction(actionPrune); + } + } +} + +/* + * Setup a context menu + * Made separate from populate so that it would not create context menu over and + * over as the tree is repopulated. + */ +void Clients::createContextMenu() +{ + tableWidget->setContextMenuPolicy(Qt::ActionsContextMenu); + tableWidget->addAction(actionRefreshClients); + /* for the tableItemChanged to maintain m_currentJob */ + connect(tableWidget, SIGNAL( + currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)), + this, SLOT(tableItemChanged(QTableWidgetItem *, QTableWidgetItem *))); + + /* connect to the action specific to this pages class */ + connect(actionRefreshClients, SIGNAL(triggered()), this, + SLOT(populateTable())); + connect(actionListJobsofClient, SIGNAL(triggered()), this, + SLOT(showJobs())); + connect(actionStatusClientWindow, SIGNAL(triggered()), this, + SLOT(statusClientWindow())); + connect(actionPurgeJobs, SIGNAL(triggered()), this, + SLOT(consolePurgeJobs())); + connect(actionPrune, SIGNAL(triggered()), this, + SLOT(prune())); +} + +/* + * Function responding to actionListJobsofClient which calls mainwin function + * to create a window of a list of jobs of this client. + */ +void Clients::showJobs() +{ + QTreeWidgetItem *parentItem = mainWin->getFromHash(this); + mainWin->createPageJobList("", m_currentlyselected, "", "", parentItem); +} + +/* + * Function responding to actionListJobsofClient which calls mainwin function + * to create a window of a list of jobs of this client. + */ +void Clients::consoleStatusClient() +{ + QString cmd("status client="); + cmd += m_currentlyselected; + consoleCommand(cmd); +} + +/* + * Virtual function which is called when this page is visible on the stack + */ +void Clients::currentStackItem() +{ + if(!m_populated) { + populateTable(); + /* Create the context menu for the client table */ + } +} + +/* + * Function responding to actionPurgeJobs + */ +void Clients::consolePurgeJobs() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to purge all jobs of client \"%1\" ?\n" +"The Purge command will delete associated Catalog database records from Jobs and" +" Volumes without considering the retention period. Purge works only on the" +" Catalog database and does not affect data written to Volumes. This command can" +" be dangerous because you can delete catalog records associated with current" +" backups of files, and we recommend that you do not use it unless you know what" +" you are doing.\n\n" +" Is there any way I can get you to click Cancel here? You really don't want to do" +" this\n\n" + "Press OK to proceed with the purge operation?").arg(m_currentlyselected), + QMessageBox::Ok | QMessageBox::Cancel, + QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + QString cmd("purge jobs client="); + cmd += m_currentlyselected; + consoleCommand(cmd); +} + +/* + * Function responding to actionPrune + */ +void Clients::prune() +{ + new prunePage("", m_currentlyselected); +} + +/* + * Function responding to action to create new client status window + */ +void Clients::statusClientWindow() +{ + /* if one exists, then just set it current */ + bool found = false; + foreach(Pages *page, mainWin->m_pagehash) { + if (mainWin->currentConsole() == page->console()) { + if (page->name() == tr("Client Status %1").arg(m_currentlyselected)) { + found = true; + page->setCurrent(); + } + } + } + if (!found) { + QTreeWidgetItem *parentItem = mainWin->getFromHash(this); + new ClientStat(m_currentlyselected, parentItem); + } +} + +/* + * If first time, then check to see if there were status pages open the last time closed + * if so open + */ +void Clients::settingsOpenStatus(QString &client) +{ + QSettings settings(m_console->m_dir->name(), "bat"); + + settings.beginGroup("OpenOnExit"); + QString toRead = "ClientStatus_" + client; + if (settings.value(toRead) == 1) { + new ClientStat(client, mainWin->getFromHash(this)); + setCurrent(); + mainWin->getFromHash(this)->setExpanded(true); + } + settings.endGroup(); +} diff --git a/src/qt-console/clients/clients.h b/src/qt-console/clients/clients.h new file mode 100644 index 00000000..8852fa9d --- /dev/null +++ b/src/qt-console/clients/clients.h @@ -0,0 +1,64 @@ +#ifndef _CLIENTS_H_ +#define _CLIENTS_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_clients.h" +#include "console.h" +#include "pages.h" + +class Clients : public Pages, public Ui::ClientForm +{ + Q_OBJECT + +public: + Clients(); + ~Clients(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + +public slots: + void tableItemChanged(QTableWidgetItem *, QTableWidgetItem *); + +private slots: + void populateTable(); + void showJobs(); + void consoleStatusClient(); + void statusClientWindow(); + void consolePurgeJobs(); + void prune(); + +private: + void createContextMenu(); + void settingsOpenStatus(QString& client); + QString m_currentlyselected; + bool m_populated; + bool m_firstpopulation; + bool m_checkcurwidget; +}; + +#endif /* _CLIENTS_H_ */ diff --git a/src/qt-console/clients/clients.ui b/src/qt-console/clients/clients.ui new file mode 100644 index 00000000..b6fd03a5 --- /dev/null +++ b/src/qt-console/clients/clients.ui @@ -0,0 +1,82 @@ + + ClientForm + + + + 0 + 0 + 492 + 428 + + + + Client Tree + + + + + + + + + + :/images/view-refresh.png:/images/view-refresh.png + + + Refresh Client List + + + Requery the director for the list of clients. + + + + + + :/images/emblem-system.png:/images/emblem-system.png + + + List Jobs of Client + + + Open a joblist page selecting this client. + + + + + + :/images/weather-severe-alert.png:/images/weather-severe-alert.png + + + Purge Jobs + + + Purge jobs peformed from this client. + + + + + + :/images/edit-cut.png:/images/edit-cut.png + + + Prune Jobs + + + Open the diaolog to prune for this client. + + + + + + :/images/status.png:/images/status.png + + + Status Client + + + + + + + + diff --git a/src/qt-console/console/console.cpp b/src/qt-console/console/console.cpp new file mode 100644 index 00000000..a183b919 --- /dev/null +++ b/src/qt-console/console/console.cpp @@ -0,0 +1,931 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Console Class + * + * Written by Kern Sibbald, January MMVII + * + */ + +#include "bat.h" +#include "console.h" +#include "restore.h" +#include "select.h" +#include "run/run.h" + +Console::Console(QTabWidget *parent) : Pages() +{ + QFont font; + m_name = tr("Console"); + m_messages_pending = false; + m_parent = parent; + m_closeable = false; + m_console = this; + m_warningPrevent = false; + m_dircommCounter = 0; + + /* + * Create a connection to the Director and put it in a hash table + */ + m_dircommHash.insert(m_dircommCounter, new DirComm(this, m_dircommCounter)); + + setupUi(this); + m_textEdit = textEdit; /* our console screen */ + m_cursor = new QTextCursor(m_textEdit->document()); + mainWin->actionConnect->setIcon(QIcon(":images/disconnected.png")); + + m_timer = NULL; + m_contextActions.append(actionStatusDir); + m_contextActions.append(actionConsoleHelp); + m_contextActions.append(actionRequestMessages); + m_contextActions.append(actionConsoleReload); + connect(actionStatusDir, SIGNAL(triggered()), this, SLOT(status_dir())); + connect(actionConsoleHelp, SIGNAL(triggered()), this, SLOT(consoleHelp())); + connect(actionConsoleReload, SIGNAL(triggered()), this, SLOT(consoleReload())); + connect(actionRequestMessages, SIGNAL(triggered()), this, SLOT(messages())); +} + +Console::~Console() +{ +} + +void Console::startTimer() +{ + m_timer = new QTimer(this); + QWidget::connect(m_timer, SIGNAL(timeout()), this, SLOT(poll_messages())); + m_timer->start(mainWin->m_checkMessagesInterval*30000); +} + +void Console::stopTimer() +{ + if (m_timer) { + QWidget::disconnect(m_timer, SIGNAL(timeout()), this, SLOT(poll_messages())); + m_timer->stop(); + delete m_timer; + m_timer = NULL; + } +} + +/* slot connected to the timer + * requires preferences of check messages and operates at interval */ +void Console::poll_messages() +{ + int conn; + + /* Do not poll if notifier off */ + if (!mainWin->m_notify) { + return; + } + + /* + * Note if we call getDirComm here, we continuously consume + * file descriptors. + */ + if (!findDirComm(conn)) { /* find a free DirComm */ + return; /* try later */ + } + + DirComm *dircomm = m_dircommHash.value(conn); + if (mainWin->m_checkMessages && dircomm->m_at_main_prompt && hasFocus() && !mainWin->getWaitState()){ + dircomm->write(".messages"); + displayToPrompt(conn); + messagesPending(false); + } +} + +/* + * Connect to Director. This does not connect to the director, dircomm does. + * This creates the first and possibly 2nd dircomm instance + */ +void Console::connect_dir() +{ + DirComm *dircomm = m_dircommHash.value(0); + + if (!m_console->m_dir) { + mainWin->set_status( tr("No Director found.")); + return; + } + + m_textEdit = textEdit; /* our console screen */ + + if (dircomm->connect_dir()) { + if (mainWin->m_connDebug) { + Pmsg1(000, "DirComm 0 Seems to have Connected %s\n", m_dir->name()); + } + beginNewCommand(0); + } + mainWin->set_status(_("Connected")); + + startTimer(); /* start message timer */ +} + +/* + * A function created to separate out the population of the lists + * from the Console::connect_dir function + */ +void Console::populateLists(bool /*forcenew*/) +{ + int conn; + if (!getDirComm(conn)) { + if (mainWin->m_connDebug) Pmsg0(000, "call newDirComm\n"); + if (!newDirComm(conn)) { + Emsg1(M_INFO, 0, "Failed to connect to %s for populateLists.\n", m_dir->name()); + return; + } + } + populateLists(conn); + notify(conn, true); +} + +void Console::populateLists(int conn) +{ + job_list.clear(); + restore_list.clear(); + client_list.clear(); + fileset_list.clear(); + messages_list.clear(); + pool_list.clear(); + storage_list.clear(); + type_list.clear(); + level_list.clear(); + volstatus_list.clear(); + mediatype_list.clear(); + dir_cmd(conn, ".jobs", job_list); + dir_cmd(conn, ".jobs type=R", restore_list); + dir_cmd(conn, ".clients", client_list); + dir_cmd(conn, ".filesets", fileset_list); + dir_cmd(conn, ".msgs", messages_list); + dir_cmd(conn, ".pools", pool_list); + dir_cmd(conn, ".storage", storage_list); + dir_cmd(conn, ".types", type_list); + dir_cmd(conn, ".levels", level_list); + dir_cmd(conn, ".volstatus", volstatus_list); + dir_cmd(conn, ".mediatypes", mediatype_list); + dir_cmd(conn, ".locations", location_list); + + if (mainWin->m_connDebug) { + QString dbgmsg = QString("jobs=%1 clients=%2 filesets=%3 msgs=%4 pools=%5 storage=%6 types=%7 levels=%8 conn=%9 %10\n") + .arg(job_list.count()).arg(client_list.count()).arg(fileset_list.count()).arg(messages_list.count()) + .arg(pool_list.count()).arg(storage_list.count()).arg(type_list.count()).arg(level_list.count()) + .arg(conn).arg(m_dir->name()); + Pmsg1(000, "%s", dbgmsg.toUtf8().data()); + } + job_list.sort(); + client_list.sort(); + fileset_list.sort(); + messages_list.sort(); + pool_list.sort(); + storage_list.sort(); + type_list.sort(); + level_list.sort(); +} + +/* + * Overload function for dir_cmd with a QString + * Ease of use + */ +bool Console::dir_cmd(QString &cmd, QStringList &results) +{ + return dir_cmd(cmd.toUtf8().data(), results); +} + +/* + * Overload function for dir_cmd, this is if connection is not worried about + */ +bool Console::dir_cmd(const char *cmd, QStringList &results) +{ + int conn; + if (getDirComm(conn)) { + dir_cmd(conn, cmd, results); + return true; + } else { + Pmsg1(000, "dir_cmd failed to connect to %s\n", m_dir->name()); + return false; + } +} + +/* + * Send a command to the Director, and return the + * results in a QStringList. + */ +bool Console::dir_cmd(int conn, const char *cmd, QStringList &results) +{ + mainWin->waitEnter(); + DirComm *dircomm = m_dircommHash.value(conn); + int stat; + bool prev_notify = is_notify_enabled(conn); + + if (mainWin->m_connDebug) { + QString dbgmsg = QString("dir_cmd conn %1 %2 %3\n").arg(conn).arg(m_dir->name()).arg(cmd); + Pmsg1(000, "%s", dbgmsg.toUtf8().data()); + } + if (prev_notify) { + notify(conn, false); + } + dircomm->write(cmd); + while ((stat = dircomm->read()) > 0 && dircomm->is_in_command()) { + if (mainWin->m_displayAll) display_text(dircomm->msg()); + strip_trailing_junk(dircomm->msg()); + results << dircomm->msg(); + } + if (stat > 0 && mainWin->m_displayAll) display_text(dircomm->msg()); + if (prev_notify) { + notify(conn, true); /* turn it back on */ + } + discardToPrompt(conn); + mainWin->waitExit(); + return true; /* ***FIXME*** return any command error */ +} + +/* + * OverLoads for sql_cmd + */ +bool Console::sql_cmd(int &conn, QString &query, QStringList &results) +{ + return sql_cmd(conn, query.toUtf8().data(), results, false); +} + +bool Console::sql_cmd(QString &query, QStringList &results) +{ + int conn; + if (!getDirComm(conn)) { + return false; + } + return sql_cmd(conn, query.toUtf8().data(), results, true); +} + +bool Console::sql_cmd(const char *query, QStringList &results) +{ + int conn; + if (!getDirComm(conn)) { + return false; + } + return sql_cmd(conn, query, results, true); +} + +/* + * Send an sql query to the Director, and return the + * results in a QStringList. + */ +bool Console::sql_cmd(int &conn, const char *query, QStringList &results, bool donotify) +{ + DirComm *dircomm = m_dircommHash.value(conn); + int stat; + POOL_MEM cmd(PM_MESSAGE); + bool prev_notify = is_notify_enabled(conn); + + if (!is_connectedGui()) { + return false; + } + + if (mainWin->m_connDebug) Pmsg2(000, "sql_cmd conn %i %s\n", conn, query); + if (donotify) { + dircomm->notify(false); + } + mainWin->waitEnter(); + + pm_strcpy(cmd, ".sql query=\""); + pm_strcat(cmd, query); + pm_strcat(cmd, "\""); + dircomm->write(cmd.c_str()); + while ((stat = dircomm->read()) > 0) { + bool first = true; + if (mainWin->m_displayAll) { + display_text(dircomm->msg()); + display_text("\n"); + } + strip_trailing_junk(dircomm->msg()); + bool doappend = true; + if (first) { + QString dum = dircomm->msg(); + if ((dum.left(6) == "*None*")) doappend = false; + } + if (doappend) { + results << dircomm->msg(); + } + first = false; + } + if (donotify && prev_notify) { + dircomm->notify(true); + } + discardToPrompt(conn); + mainWin->waitExit(); + if (donotify && prev_notify) { + dircomm->notify(true); + } + return !mainWin->isClosing(); /* return false if closing */ +} + +/* + * Overloads for + * Sending a command to the Director + */ +int Console::write_dir(const char *msg) +{ + int conn; + if (getDirComm(conn)) { + write_dir(conn, msg); + } + return conn; +} + +int Console::write_dir(const char *msg, bool dowait) +{ + int conn; + if (getDirComm(conn)) { + write_dir(conn, msg, dowait); + } + return conn; +} + +void Console::write_dir(int conn, const char *msg) +{ + write_dir(conn, msg, true); +} + +/* + * Send a command to the Director + */ +void Console::write_dir(int conn, const char *msg, bool dowait) +{ + DirComm *dircomm = m_dircommHash.value(conn); + + if (dircomm->m_sock) { + mainWin->set_status(_("Processing command ...")); + if (dowait) + mainWin->waitEnter(); + dircomm->write(msg); + if (dowait) + mainWin->waitExit(); + } else { + mainWin->set_status( tr(" Director not connected. Click on connect button.")); + mainWin->actionConnect->setIcon(QIcon(":images/disconnected.png")); + QBrush redBrush(Qt::red); + QTreeWidgetItem *item = mainWin->getFromHash(this); + item->setForeground(0, redBrush); + dircomm->m_at_prompt = false; + dircomm->m_at_main_prompt = false; + } +} + +/* + * get_job_defaults overload + */ +bool Console::get_job_defaults(struct job_defaults &job_defs) +{ + int conn; + getDirComm(conn); + return get_job_defaults(conn, job_defs, true); +} + +bool Console::get_job_defaults(int &conn, struct job_defaults &job_defs) +{ + return get_job_defaults(conn, job_defs, true); +} + +/* + * Send a job name to the director, and read all the resulting + * defaults. + */ +bool Console::get_job_defaults(int &conn, struct job_defaults &job_defs, bool donotify) +{ + QString scmd; + int stat; + char *def; + bool prev_notify = is_notify_enabled(conn); + bool rtn = false; + DirComm *dircomm = m_dircommHash.value(conn); + + if (donotify) { + dircomm->notify(false); + } + beginNewCommand(conn); + bool prevWaitState = mainWin->getWaitState(); + if (!prevWaitState) { + mainWin->waitEnter(); + } + if (mainWin->m_connDebug) { + Pmsg2(000, "job_defaults conn %i %s\n", conn, m_dir->name()); + } + scmd = QString(".defaults job=\"%1\"").arg(job_defs.job_name); + dircomm->write(scmd); + while ((stat = dircomm->read()) > 0) { + if (mainWin->m_displayAll) display_text(dircomm->msg()); + def = strchr(dircomm->msg(), '='); + if (!def) { + continue; + } + /* Pointer to default value */ + *def++ = 0; + strip_trailing_junk(def); + + if (strcmp(dircomm->msg(), "job") == 0) { + if (strcmp(def, job_defs.job_name.toUtf8().data()) != 0) { + goto bail_out; + } + continue; + } + if (strcmp(dircomm->msg(), "pool") == 0) { + job_defs.pool_name = def; + continue; + } + if (strcmp(dircomm->msg(), "messages") == 0) { + job_defs.messages_name = def; + continue; + } + if (strcmp(dircomm->msg(), "client") == 0) { + job_defs.client_name = def; + continue; + } + if (strcmp(dircomm->msg(), "storage") == 0) { + job_defs.store_name = def; + continue; + } + if (strcmp(dircomm->msg(), "where") == 0) { + job_defs.where = def; + continue; + } + if (strcmp(dircomm->msg(), "level") == 0) { + job_defs.level = def; + continue; + } + if (strcmp(dircomm->msg(), "type") == 0) { + job_defs.type = def; + continue; + } + if (strcmp(dircomm->msg(), "fileset") == 0) { + job_defs.fileset_name = def; + continue; + } + if (strcmp(dircomm->msg(), "catalog") == 0) { + job_defs.catalog_name = def; + continue; + } + if (strcmp(dircomm->msg(), "enabled") == 0) { + job_defs.enabled = *def == '1' ? true : false; + continue; + } + } + rtn = true; + /* Fall through wanted */ +bail_out: + if (donotify && prev_notify) { + notify(conn, true); + } + if (!prevWaitState) { + mainWin->waitExit(); + } + return rtn; +} + + +/* + * Save user settings associated with this console + */ +void Console::writeSettings() +{ + QFont font = get_font(); + + QSettings settings(m_dir->name(), "bat"); + settings.beginGroup("Console"); + settings.setValue("consoleFont", font.family()); + settings.setValue("consolePointSize", font.pointSize()); + settings.setValue("consoleFixedPitch", font.fixedPitch()); + settings.endGroup(); +} + +/* + * Read and restore user settings associated with this console + */ +void Console::readSettings() +{ + QFont font = get_font(); + + QSettings settings(m_dir->name(), "bat"); + settings.beginGroup("Console"); + font.setFamily(settings.value("consoleFont", "Courier").value()); + font.setPointSize(settings.value("consolePointSize", 10).toInt()); + font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); + settings.endGroup(); + m_textEdit->setFont(font); +} + +/* + * Set the console textEdit font + */ +void Console::set_font() +{ + bool ok; + QFont font = QFontDialog::getFont(&ok, QFont(m_textEdit->font()), this); + if (ok) { + m_textEdit->setFont(font); + } +} + +/* + * Get the console text edit font + */ +const QFont Console::get_font() +{ + return m_textEdit->font(); +} + +/* + * Slot for responding to status dir button on button bar + */ +void Console::status_dir() +{ + QString cmd("status dir"); + consoleCommand(cmd); +} + +/* + * Slot for responding to messages button on button bar + * Here we want to bring the console to the front so use pages' consoleCommand + */ +void Console::messages() +{ + QString cmd(".messages"); + consoleCommand(cmd); + messagesPending(false); +} + +/* + * Put text into the console window + */ +void Console::display_textf(const char *fmt, ...) +{ + va_list arg_ptr; + char buf[1000]; + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); + va_end(arg_ptr); + display_text(buf); +} + +void Console::display_text(const QString buf) +{ + if (mainWin->isClosing()) return; + if (buf.size() != 0) { + m_cursor->insertText(buf); + update_cursor(); + } +} + + +void Console::display_text(const char *buf) +{ + if (mainWin->isClosing()) return; + if (*buf != 0) { + m_cursor->insertText(buf); + update_cursor(); + } +} + +void Console::display_html(const QString buf) +{ + if (mainWin->isClosing()) return; + if (buf.size() != 0) { + m_cursor->insertHtml(buf); + update_cursor(); + } +} + +/* Position cursor to end of screen */ +void Console::update_cursor() +{ + m_textEdit->moveCursor(QTextCursor::End); + m_textEdit->ensureCursorVisible(); +} + +void Console::beginNewCommand(int conn) +{ + DirComm *dircomm = m_dircommHash.value(conn); + + if (dircomm->m_at_main_prompt) { + return; + } + for (int i=0; i < 3; i++) { + dircomm->write("."); + while (dircomm->read() > 0) { + if (mainWin->m_commDebug) Pmsg2(000, "begin new command loop %i %s\n", i, m_dir->name()); + if (mainWin->m_displayAll) display_text(dircomm->msg()); + } + if (dircomm->m_at_main_prompt) { + break; + } + } + //display_text("\n"); +} + +void Console::displayToPrompt(int conn) +{ + DirComm *dircomm = m_dircommHash.value(conn); + + int stat = 0; + QString buf; + if (mainWin->m_commDebug) Pmsg1(000, "DisplaytoPrompt %s\n", m_dir->name()); + while (!dircomm->m_at_prompt) { + if ((stat=dircomm->read()) > 0) { + buf += dircomm->msg(); + if (buf.size() >= 8196 || m_messages_pending) { + display_text(buf); + buf.clear(); + messagesPending(false); + } + } + } + display_text(buf); + if (mainWin->m_commDebug) Pmsg2(000, "endDisplaytoPrompt=%d %s\n", stat, m_dir->name()); +} + +void Console::discardToPrompt(int conn) +{ + DirComm *dircomm = m_dircommHash.value(conn); + + int stat = 0; + if (mainWin->m_commDebug) Pmsg1(000, "discardToPrompt %s\n", m_dir->name()); + if (mainWin->m_displayAll) { + displayToPrompt(conn); + } else { + while (!dircomm->m_at_prompt) { + stat = dircomm->read(); + if (stat < 0) { + break; + } + } + } + if (mainWin->m_commDebug) { + Pmsg2(000, "endDiscardToPrompt conn=%i %s\n", conn, m_dir->name()); + } +} + +QString Console::returnFromPrompt(int conn) +{ + DirComm *dircomm = m_dircommHash.value(conn); + QString text(""); + + int stat = 0; + text = ""; + dircomm->read(); + text += dircomm->msg(); + if (mainWin->m_commDebug) Pmsg1(000, "returnFromPrompt %s\n", m_dir->name()); + while (!dircomm->m_at_prompt) { + if ((stat=dircomm->read()) > 0) { + text += dircomm->msg(); + } + } + if (mainWin->m_commDebug) Pmsg2(000, "endreturnFromPrompt=%d %s\n", stat, m_dir->name()); + return text; +} + +/* + * When the notifier is enabled, read_dir() will automatically be + * called by the Qt event loop when ever there is any output + * from the Director, and read_dir() will then display it on + * the console. + * + * When we are in a bat dialog, we want to control *all* output + * from the Director, so we set notify to off. + * m_console->notifiy(false); + */ + +/* dual purpose function to turn notify off and return a connection */ +int Console::notifyOff() +{ + int conn = 0; + if (getDirComm(conn)) { + notify(conn, false); + } + return conn; +} + +/* knowing a connection, turn notify off or on */ +bool Console::notify(int conn, bool enable) +{ + DirComm *dircomm = m_dircommHash.value(conn); + if (dircomm) { + return dircomm->notify(enable); + } else { + return false; + } +} + +/* knowing a connection, return notify state */ +bool Console::is_notify_enabled(int conn) const +{ + DirComm *dircomm = m_dircommHash.value(conn); + if (dircomm) { + return dircomm->is_notify_enabled(); + } else { + return false; + } +} + +void Console::setDirectorTreeItem(QTreeWidgetItem *item) +{ + m_directorTreeItem = item; +} + +void Console::setDirRes(DIRRES *dir) +{ + m_dir = dir; +} + +/* + * To have the ability to get the name of the director resource. + */ +void Console::getDirResName(QString &name_returned) +{ + name_returned = m_dir->name(); +} + +/* Slot for responding to page selectors status help command */ +void Console::consoleHelp() +{ + QString cmd("help"); + consoleCommand(cmd); +} + +/* Slot for responding to page selectors reload bacula-dir.conf */ +void Console::consoleReload() +{ + QString cmd("reload"); + consoleCommand(cmd); +} + +/* For suppressing .messages + * This may be rendered not needed if the multiple connections feature gets working */ +bool Console::hasFocus() +{ + if (mainWin->tabWidget->currentIndex() == mainWin->tabWidget->indexOf(this)) + return true; + else + return false; +} + +/* For adding feature to have the gui's messages button change when + * messages are pending */ +bool Console::messagesPending(bool pend) +{ + bool prev = m_messages_pending; + m_messages_pending = pend; + mainWin->setMessageIcon(); + return prev; +} + +/* terminate all existing connections */ +void Console::terminate() +{ + foreach(DirComm* dircomm, m_dircommHash) { + dircomm->terminate(); + } + m_console->stopTimer(); +} + +/* Maybe this should be checking the list, for the moment lets check 0 which should be connected */ +bool Console::is_connectedGui() +{ + if (is_connected(0)) { + return true; + } else { + QString message = tr("Director is currently disconnected\nPlease reconnect!"); + QMessageBox::warning(this, "Bat", message, QMessageBox::Ok ); + return false; + } +} + +int Console::read(int conn) +{ + DirComm *dircomm = m_dircommHash.value(conn); + return dircomm->read(); +} + +char *Console::msg(int conn) +{ + DirComm *dircomm = m_dircommHash.value(conn); + return dircomm->msg(); +} + +int Console::write(int conn, const QString msg) +{ + DirComm *dircomm = m_dircommHash.value(conn); + mainWin->waitEnter(); + int ret = dircomm->write(msg); + mainWin->waitExit(); + return ret; +} + +int Console::write(int conn, const char *msg) +{ + DirComm *dircomm = m_dircommHash.value(conn); + mainWin->waitEnter(); + int ret = dircomm->write(msg); + mainWin->waitExit(); + return ret; +} + +/* This checks to see if any is connected */ +bool Console::is_connected() +{ + bool connected = false; + foreach(DirComm* dircomm, m_dircommHash) { + if (dircomm->is_connected()) + return true; + } + return connected; +} + +/* knowing the connection id, is it connected */ +bool Console::is_connected(int conn) +{ + DirComm *dircomm = m_dircommHash.value(conn); + return dircomm->is_connected(); +} + +/* + * Need a connection. Check existing connections or create one + */ +bool Console::getDirComm(int &conn) +{ + if (findDirComm(conn)) { + return true; + } + if (mainWin->m_connDebug) Pmsg0(000, "call newDirComm\n"); + return newDirComm(conn); +} + + +/* + * Try to find a free (unused but established) connection + * KES: Note, I think there is a problem here because for + * some reason, the notifier is often turned off on file + * descriptors that seem to me to be available. That means + * that we do not use a free descriptor and thus we will create + * a new connection that is maybe not necessary. Someone needs + * to look into whether or not notify() is correctly turned on + * when we are back at the command prompt and idle. + * + */ +bool Console::findDirComm(int &conn) +{ + QHash::const_iterator iter = m_dircommHash.constBegin(); + while (iter != m_dircommHash.constEnd()) { + DirComm *dircomm = iter.value(); + if (dircomm->m_at_prompt && dircomm->m_at_main_prompt && dircomm->is_notify_enabled()) { + conn = dircomm->m_conn; + return true; + } + if (mainWin->m_connDebug) { + Pmsg4(000, "currentDirComm=%d at_prompt=%d at_main=%d && notify=%d\n", + dircomm->m_conn, dircomm->m_at_prompt, dircomm->m_at_main_prompt, dircomm->is_notify_enabled()); + } + ++iter; + } + return false; +} + +/* + * Create a new connection + */ +bool Console::newDirComm(int &conn) +{ + m_dircommCounter++; + if (mainWin->m_connDebug) { + Pmsg2(000, "newDirComm=%i to: %s\n", m_dircommCounter, m_dir->name()); + } + DirComm *dircomm = new DirComm(this, m_dircommCounter); + m_dircommHash.insert(m_dircommCounter, dircomm); + bool success = dircomm->connect_dir(); + if (mainWin->m_connDebug) { + if (success) { + Pmsg2(000, "newDirComm=%i Connected %s\n", m_dircommCounter, m_dir->name()); + } else { + Emsg2(M_ERROR, 0, "DirComm=%i. Unable to connect to %s\n", + m_dircommCounter, m_dir->name()); + } + } + if (!success) { + m_dircommHash.remove(m_dircommCounter); + delete dircomm; + m_dircommCounter--; + } + conn = m_dircommCounter; + return success; +} diff --git a/src/qt-console/console/console.h b/src/qt-console/console/console.h new file mode 100644 index 00000000..4f3179c7 --- /dev/null +++ b/src/qt-console/console/console.h @@ -0,0 +1,160 @@ +#ifndef _CONSOLE_H_ +#define _CONSOLE_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, January 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "pages.h" +#include "ui_console.h" +#include "bcomm/dircomm.h" + +#ifndef MAX_NAME_LENGTH +#define MAX_NAME_LENGTH 128 +#endif + +/* + * Structure for obtaining the defaults for a job + */ +struct job_defaults { + QString job_name; + QString pool_name; + QString messages_name; + QString client_name; + QString store_name; + QString where; + QString level; + QString type; + QString fileset_name; + QString catalog_name; + bool enabled; +}; + +//class DIRRES; +//class BSOCK; +//class JCR; +//class CONRES; + +class Console : public Pages, public Ui::ConsoleForm +{ + Q_OBJECT + friend class DirComm; + +public: + Console(QTabWidget *parent); + ~Console(); + int read(int conn); + char *msg(int conn); + void discardToPrompt(int conn); + int write(int conn, const char *msg); + int write(int conn, QString msg); + int notifyOff(); // enables/disables socket notification - returns the previous state + bool notify(int conn, bool enable); // enables/disables socket notification - returns the previous state + bool is_notify_enabled(int conn) const; + bool getDirComm(int &conn); + bool findDirComm(int &conn); + void displayToPrompt(int conn); + QString returnFromPrompt(int conn); + + bool dir_cmd(int conn, const char *cmd, QStringList &results); + bool dir_cmd(const char *cmd, QStringList &results); + bool dir_cmd(QString &cmd, QStringList &results); + bool sql_cmd(const char *cmd, QStringList &results); + bool sql_cmd(QString &cmd, QStringList &results); + bool sql_cmd(int &conn, QString &cmd, QStringList &results); + bool sql_cmd(int &conn, const char *cmd, QStringList &results, bool donotify); + int write_dir(const char *buf); + int write_dir(const char *buf, bool dowait); + void write_dir(int conn, const char *buf); + void write_dir(int conn, const char *buf, bool dowait); + void getDirResName(QString &); + void setDirRes(DIRRES *dir); + void writeSettings(); + void readSettings(); + void setDirectorTreeItem(QTreeWidgetItem *); + void terminate(); + bool is_messagesPending() { return m_messages_pending; }; + bool is_connected(); + bool is_connected(int conn); + QTreeWidgetItem *directorTreeItem() { return m_directorTreeItem; }; + void startTimer(); + void display_text(const char *buf); + void display_text(const QString buf); + void display_textf(const char *fmt, ...); + void display_html(const QString buf); + bool get_job_defaults(struct job_defaults &); + bool get_job_defaults(int &conn, struct job_defaults &); + const QFont get_font(); + void beginNewCommand(int conn); + void populateLists(bool forcenew); + +private: + bool get_job_defaults(int &conn, struct job_defaults &, bool donotify); + void update_cursor(void); + void stopTimer(); + bool is_connectedGui(); + bool newDirComm(int &conn); + void populateLists(int conn); + +public: + QStringList job_list; + QStringList restore_list; + QStringList client_list; + QStringList fileset_list; + QStringList messages_list; + QStringList pool_list; + QStringList storage_list; + QStringList type_list; + QStringList level_list; + QStringList volstatus_list; + QStringList mediatype_list; + QStringList location_list; + +public slots: + void connect_dir(); + void status_dir(void); + void messages(void); + void set_font(void); + void poll_messages(void); + void consoleHelp(); + void consoleReload(); + +public: + DIRRES *m_dir; /* so various pages can reference it */ + bool m_warningPrevent; + +private: + QTextEdit *m_textEdit; + QTextCursor *m_cursor; + QTreeWidgetItem *m_directorTreeItem; + bool m_messages_pending; + QTimer *m_timer; + bool messagesPending(bool pend); + bool hasFocus(); + QHash m_dircommHash; + int m_dircommCounter; +}; + +#endif /* _CONSOLE_H_ */ diff --git a/src/qt-console/console/console.ui b/src/qt-console/console/console.ui new file mode 100644 index 00000000..6d8bcd92 --- /dev/null +++ b/src/qt-console/console/console.ui @@ -0,0 +1,117 @@ + + ConsoleForm + + + + 0 + 0 + 432 + 456 + + + + Console + + + + 9 + + + 6 + + + + + + 7 + 7 + 200 + 0 + + + + + 0 + 0 + + + + + 1 + 0 + + + + Qt::StrongFocus + + + false + + + + + + + + + + + + Qt::ScrollBarAsNeeded + + + QTextEdit::AutoNone + + + false + + + + + + QTextEdit::NoWrap + + + true + + + + + + + :/images/status.png + + + StatusDir + + + + + :/images/utilities-terminal.png + + + Console Help + + + + + :/images/utilities-terminal.png + + + Request Messages + + + + + :/images/utilities-terminal.png + + + Reload bacula-dir.conf + + + + + + + + diff --git a/src/qt-console/fileset/fileset.cpp b/src/qt-console/fileset/fileset.cpp new file mode 100644 index 00000000..5287c941 --- /dev/null +++ b/src/qt-console/fileset/fileset.cpp @@ -0,0 +1,284 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * FileSet Class + * + * Dirk Bartley, March 2007 + * + */ + +#include "bat.h" +#include +#include +#include "fileset/fileset.h" +#include "util/fmtwidgetitem.h" + +FileSet::FileSet() : Pages() +{ + setupUi(this); + m_name = tr("FileSets"); + pgInitialize(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/system-file-manager.png"))); + + /* tableWidget, FileSet Tree Tree Widget inherited from ui_fileset.h */ + m_populated = false; + m_checkcurwidget = true; + m_closeable = false; + readSettings(); + /* add context sensitive menu items specific to this classto the page + * selector tree. m_contextActions is QList of QActions */ + m_contextActions.append(actionRefreshFileSet); +} + +FileSet::~FileSet() +{ + writeSettings(); +} + +/* + * The main meat of the class!! The function that querries the director and + * creates the widgets with appropriate values. + */ +void FileSet::populateTable() +{ + + Freeze frz(*tableWidget); /* disable updating*/ + + m_checkcurwidget = false; + tableWidget->clear(); + m_checkcurwidget = true; + + QStringList headerlist = (QStringList() << tr("FileSet Name") << tr("FileSet Id") + << tr("Create Time")); + + tableWidget->setColumnCount(headerlist.count()); + tableWidget->setHorizontalHeaderLabels(headerlist); + tableWidget->horizontalHeader()->setHighlightSections(false); + tableWidget->verticalHeader()->hide(); + tableWidget->setSelectionBehavior(QAbstractItemView::SelectRows); + tableWidget->setSelectionMode(QAbstractItemView::SingleSelection); + tableWidget->setSortingEnabled(false); /* rows move on insert if sorting enabled */ + + QString fileset_comsep(""); + bool first = true; + QStringList notFoundList = m_console->fileset_list; + foreach(QString filesetName, m_console->fileset_list) { + if (first) { + fileset_comsep += "'" + filesetName + "'"; + first = false; + } + else + fileset_comsep += ",'" + filesetName + "'"; + } + + int row = 0; + tableWidget->setRowCount(m_console->fileset_list.count()); + if (fileset_comsep != "") { + /* Set up query QString and header QStringList */ + QString query(""); + query += "SELECT FileSet AS Name, FileSetId AS Id, CreateTime" + " FROM FileSet" + " WHERE FileSetId IN (SELECT MAX(FileSetId) FROM FileSet WHERE"; + query += " FileSet IN (" + fileset_comsep + ")"; + query += " GROUP BY FileSet) ORDER BY FileSet"; + + QStringList results; + if (mainWin->m_sqlDebug) { + Pmsg1(000, "FileSet query cmd : %s\n",query.toUtf8().data()); + } + if (m_console->sql_cmd(query, results)) { + QStringList fieldlist; + + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + if (fieldlist.size() != 3) { + Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); + continue; + } + + /* remove this fileSet from notFoundList */ + int indexOf = notFoundList.indexOf(fieldlist[0]); + if (indexOf != -1) { notFoundList.removeAt(indexOf); } + + TableItemFormatter item(*tableWidget, row); + + /* Iterate through fields in the record */ + QStringListIterator fld(fieldlist); + int col = 0; + + /* name */ + item.setTextFld(col++, fld.next()); + + /* id */ + item.setNumericFld(col++, fld.next()); + + /* creation time */ + item.setTextFld(col++, fld.next()); + + row++; + } + } + } + foreach(QString filesetName, notFoundList) { + TableItemFormatter item(*tableWidget, row); + item.setTextFld(0, filesetName); + row++; + } + + /* set default sorting */ + tableWidget->sortByColumn(headerlist.indexOf(tr("FileSet Name")), Qt::AscendingOrder); + tableWidget->setSortingEnabled(true); + + /* Resize rows and columns */ + tableWidget->resizeColumnsToContents(); + tableWidget->resizeRowsToContents(); + + /* make read only */ + int rcnt = tableWidget->rowCount(); + int ccnt = tableWidget->columnCount(); + for(int r=0; r < rcnt; r++) { + for(int c=0; c < ccnt; c++) { + QTableWidgetItem* item = tableWidget->item(r, c); + if (item) { + item->setFlags(Qt::ItemFlags(item->flags() & (~Qt::ItemIsEditable))); + } + } + } + m_populated = true; +} + +/* + * When the treeWidgetItem in the page selector tree is singleclicked, Make sure + * The tree has been populated. + */ +void FileSet::PgSeltreeWidgetClicked() +{ + if (!m_populated) { + populateTable(); + createContextMenu(); + } + if (!isOnceDocked()) { + dockPage(); + } +} + +/* + * Added to set the context menu policy based on currently active treeWidgetItem + * signaled by currentItemChanged + */ +void FileSet::tableItemChanged(QTableWidgetItem *currentwidgetitem, QTableWidgetItem *previouswidgetitem) +{ + /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ + if (m_checkcurwidget && currentwidgetitem) { + int currentRow = currentwidgetitem->row(); + QTableWidgetItem *currentrowzeroitem = tableWidget->item(currentRow, 0); + m_currentlyselected = currentrowzeroitem->text(); + + /* The Previous item */ + if (previouswidgetitem) { /* avoid a segfault if first time */ + tableWidget->removeAction(actionStatusFileSetInConsole); + tableWidget->removeAction(actionShowJobs); + } + + if (m_currentlyselected.length() != 0) { + /* set a hold variable to the fileset name in case the context sensitive + * menu is used */ + tableWidget->addAction(actionStatusFileSetInConsole); + tableWidget->addAction(actionShowJobs); + } + } +} + +/* + * Setup a context menu + * Made separate from populate so that it would not create context menu over and + * over as the tree is repopulated. + */ +void FileSet::createContextMenu() +{ + tableWidget->setContextMenuPolicy(Qt::ActionsContextMenu); + tableWidget->addAction(actionRefreshFileSet); + connect(tableWidget, SIGNAL( + currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)), + this, SLOT(tableItemChanged(QTableWidgetItem *, QTableWidgetItem *))); + /* connect to the action specific to this pages class */ + connect(actionRefreshFileSet, SIGNAL(triggered()), this, + SLOT(populateTable())); + connect(actionStatusFileSetInConsole, SIGNAL(triggered()), this, + SLOT(consoleShowFileSet())); + connect(actionShowJobs, SIGNAL(triggered()), this, + SLOT(showJobs())); +} + +/* + * Function responding to actionListJobsofFileSet which calls mainwin function + * to create a window of a list of jobs of this fileset. + */ +void FileSet::consoleShowFileSet() +{ + QString cmd("show fileset=\""); + cmd += m_currentlyselected + "\""; + consoleCommand(cmd); +} + +/* + * Virtual function which is called when this page is visible on the stack + */ +void FileSet::currentStackItem() +{ + if (!m_populated) { + populateTable(); + /* Create the context menu for the fileset table */ + createContextMenu(); + } +} + +/* + * Save user settings associated with this page + */ +void FileSet::writeSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("FileSet"); + settings.setValue("geometry", saveGeometry()); + settings.endGroup(); +} + +/* + * Read and restore user settings associated with this page + */ +void FileSet::readSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("FileSet"); + restoreGeometry(settings.value("geometry").toByteArray()); + settings.endGroup(); +} + +/* + * Create a JobList object pre-populating a fileset + */ +void FileSet::showJobs() +{ + QTreeWidgetItem *parentItem = mainWin->getFromHash(this); + mainWin->createPageJobList("", "", "", m_currentlyselected, parentItem); +} diff --git a/src/qt-console/fileset/fileset.h b/src/qt-console/fileset/fileset.h new file mode 100644 index 00000000..03220fef --- /dev/null +++ b/src/qt-console/fileset/fileset.h @@ -0,0 +1,61 @@ +#ifndef _FILESET_H_ +#define _FILESET_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_fileset.h" +#include "console.h" +#include "pages.h" + +class FileSet : public Pages, public Ui::FileSetForm +{ + Q_OBJECT + +public: + FileSet(); + ~FileSet(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + +public slots: + void tableItemChanged(QTableWidgetItem *, QTableWidgetItem *); + +private slots: + void populateTable(); + void consoleShowFileSet(); + void showJobs(); + +private: + void writeSettings(); + void readSettings(); + void createContextMenu(); + QString m_currentlyselected; + bool m_populated; + bool m_checkcurwidget; +}; + +#endif /* _FILESET_H_ */ diff --git a/src/qt-console/fileset/fileset.ui b/src/qt-console/fileset/fileset.ui new file mode 100644 index 00000000..f08e06b9 --- /dev/null +++ b/src/qt-console/fileset/fileset.ui @@ -0,0 +1,52 @@ + + FileSetForm + + + + 0 + 0 + 341 + 277 + + + + FileSet Tree + + + + + + + + + :/images/view-refresh.png + + + Refresh FileSet List + + + Requery the director for the list of storage objects. + + + + + :/images/status-console.png + + + Show FileSet In Console + + + + + :/images/emblem-system.png + + + ShowJobs + + + + + + + + diff --git a/src/qt-console/help/clients.html b/src/qt-console/help/clients.html new file mode 100644 index 00000000..3bdf5aa6 --- /dev/null +++ b/src/qt-console/help/clients.html @@ -0,0 +1,35 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+
+ +

The Clients Interface

+ + +

The Clients interface shows a list of configured client resources. This +list of clients is a tree structure and shows a number of attributes from the +database for each client. + +

The user may right click on a client item to display a context sensitive menu +of what actions can be performed on a client. This menu has +options to allow the user to perform commands in the console. These +commands include requesting the status of the client, and pruning or purging +jobs and files from the catalog. There is also an option to open up a JobList +interface with the control for clients already prepopulated with the selected +client item. + +

To understand pruning and purging, please read the Bacula documentation at +the following URL: http://www.bacula.org/rel-manual/Bacula_Console.html + + + diff --git a/src/qt-console/help/console.html b/src/qt-console/help/console.html new file mode 100644 index 00000000..04f2cf24 --- /dev/null +++ b/src/qt-console/help/console.html @@ -0,0 +1,36 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+
+ +

The Console

+ + +

The Console is the interface that displays all of the text that would +normally be displayed in bconsole. Use the command window at the bottom of +the interface to manually enter any command as with bconsole. + +

There are 2 icons in the taskbar which are designed to run console commands. + +

Use the status +  icon in the taskbar to run the status dir command. + +

Use the messages   icon in the taskbar to run the messages command. + +

There will be no attempt to document console commands here. An alphabetic list of +the commands can be found on the following web URL +http://www.bacula.org/rel-manual/Bacula_Console.html + + + diff --git a/src/qt-console/help/filesets.html b/src/qt-console/help/filesets.html new file mode 100644 index 00000000..c93397bb --- /dev/null +++ b/src/qt-console/help/filesets.html @@ -0,0 +1,29 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+
+ +

The Filesets Interface

+ + +

The Filesets interface shows a list of filesets and allow the +user to right click on a specific fileset to popup it's context sensitive menu. +This interface is a tree structure and each item displays a number of +attributes from the database for each fileset. + +

The context sensitive menu allows the user to perform the command status +fileset in the console. The user can also open up a new JobList window with the +Fileset dropdown pre-populated with the selected fileset. + + + diff --git a/src/qt-console/help/help.cpp b/src/qt-console/help/help.cpp new file mode 100644 index 00000000..2967ce1b --- /dev/null +++ b/src/qt-console/help/help.cpp @@ -0,0 +1,68 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Help Window class + * + * Kern Sibbald, May MMVII + * + */ + +#include "bat.h" +#include "help.h" + +/* + * Note: HELPDIR is defined in src/host.h + */ + +Help::Help(const QString &path, const QString &file, QWidget *parent) : + QWidget(parent) +{ + setAttribute(Qt::WA_DeleteOnClose); /* Make sure we go away */ + setAttribute(Qt::WA_GroupLeader); /* allow calling from modal dialog */ + + setupUi(this); /* create window */ + + textBrowser->setSearchPaths(QStringList() << HELPDIR << path << ":/images"); + textBrowser->setSource(file); + //textBrowser->setCurrentFont(mainWin->m_consoleHash.values()[0]->get_font()); + + connect(textBrowser, SIGNAL(sourceChanged(const QUrl &)), this, SLOT(updateTitle())); + connect(closeButton, SIGNAL(clicked()), this, SLOT(close())); + connect(homeButton, SIGNAL(clicked()), textBrowser, SLOT(home())); + connect(backButton, SIGNAL(clicked()), textBrowser, SLOT(backward())); + this->show(); +} + +void Help::updateTitle() +{ + setWindowTitle(tr("Help: %1").arg(textBrowser->documentTitle())); +} + +void Help::displayFile(const QString &file) +{ + QRegExp rx; + rx.setPattern("/\\.libs"); + QString path = QApplication::applicationDirPath(); + int pos = rx.indexIn(path); + if (pos) + path = path.remove(pos, 6); + path += "/help"; + new Help(path, file); +} diff --git a/src/qt-console/help/help.h b/src/qt-console/help/help.h new file mode 100644 index 00000000..c6e0ff5d --- /dev/null +++ b/src/qt-console/help/help.h @@ -0,0 +1,51 @@ +#ifndef _HELP_H_ +#define _HELP_H_ + +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Help Window class + * + * It reads an html file and displays it in a "browser" window. + * + * Kern Sibbald, May MMVII + * + * $Id$ + */ + +#include "bat.h" +#include "ui_help.h" + +class Help : public QWidget, public Ui::helpForm +{ + Q_OBJECT + +public: + Help(const QString &path, const QString &file, QWidget *parent = NULL); + virtual ~Help() { }; + static void displayFile(const QString &file); + +public slots: + void updateTitle(); + +private: +}; + +#endif /* _HELP_H_ */ diff --git a/src/qt-console/help/help.ui b/src/qt-console/help/help.ui new file mode 100644 index 00000000..546b1f6c --- /dev/null +++ b/src/qt-console/help/help.ui @@ -0,0 +1,73 @@ + + helpForm + + + + 0 + 0 + 762 + 497 + + + + Form + + + + 9 + + + 6 + + + + + + + + 0 + + + 6 + + + + + &Home + + + + + + + &Back + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Close + + + + + + + + + + diff --git a/src/qt-console/help/index.html b/src/qt-console/help/index.html new file mode 100644 index 00000000..c0ff20ee --- /dev/null +++ b/src/qt-console/help/index.html @@ -0,0 +1,53 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+ +
The User's guide, for the Bacula Administration Tool. + +
+

Welcome to the BAT

+ +

Contents.

+ + +

The Console Interface +

The Clients Interface +

The FileSets Interface +

The Jobs Interface +

The JobList Interface +

The JobPlot Interface +

The Media Interface +

The Storage Interface +

The two Restore Interfaces + +

What is the BAT.

+ +

Welcome to the Bacula Administration Tool. The bat is the graphical interface +to communicate with and administer the bacula director daemon. If you have been +using bacula for a while, you're familiar with bconsole, the text based interface +to administer bacula. With bat, the user can perform any command that can be +run from bconsole, and more. + +

The bat is bconsole with extra communication capabilities and a set of +graphical user interfaces that utilize those capabilities. A bat programmer is +able to create interfaces that interact with the director in all ways that a user +can, by executing commands. When a command is entered by an interface, the bat +types in the command in the console. Often times the interface visible is +changed to the console so that the user can read the command as well as the +resulting output from the director. Bat can also perform requests of the +daemon, that the user would not normally do. These requests would include the ability to +retrieve lists of various resources and to instruct the director +to query the sql server. The director then forwards the query results to bat. + + + diff --git a/src/qt-console/help/joblist.html b/src/qt-console/help/joblist.html new file mode 100644 index 00000000..a9c492f2 --- /dev/null +++ b/src/qt-console/help/joblist.html @@ -0,0 +1,84 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+
+ +

The JobList Interface

+ + +

The JobList interface displays a table of jobid's which are instances of +a job having been run. The table displays data for each job stored in the +daemons sql backend. The table currently includes the job name, the client name, +the job type, the job level, the number of files, the number of bytes, the status, +and a flag for the jobs files that have been purged or not, and the jobs +fileset. + +

The job type is either "B" for backup or "R" for restore. + +

The job level is one of "I" for incremental or "F" for full or "D" for differential. + +

A purged value of yes means that the files for that job have been purged from the +catalog's file table. Purged jobs are not be able to have their files browsed when +restoring from a job with that jobid. + +

This interface uses a splitter to divide the display of the table and the controls +used to modify the selection criterion. There are 7 drop down boxes that can be used +to filter out the records in the table and to view only the items you are interested +in. There are two limiting spin boxes and related check boxes that can be used to +limit the number of and the age of the records that are to be displayed. These limiting +controls begin with default values that can be configured in the Settings->Preferences +dialog box in the JobList tab. + +

Pushing the Refresh button causes the interface to re-query the database. The +database does also get requerried every time it comes back to the top of the stack of +interfaces in the main window. Another refresh option exists in the popup when right +clicking on the Joblist item in the Page Selector. + +

Pushing the Graph button opens up a new JobPlot interface. All of the +drop downs and limiting controls in the JobPlot window default to the current +settings of the controls in the JobList interface. If the graph button is not there, +bat was not compiled with qwt libraries. + +

There are many options in the context sensitive popup that appear when the user +right clicks on an item in the job table. + +

Listing console commands can be run to list the jobid, list the files on that jobid, +list the jobmedia for that jobid, and list the volumes used for that jobid. There is +a preferences item to determine whether a long list or a short list command is run. +It can be found from the menu by following settings - preferences then the miscellaneous +tab. + +

Dangerous commands of delete and purge are preceded by an "Are You Sure?" dialog +box. + +

An interface to view the logs that have been stored in the database for the jobid +can be viewed by using the show log for job option. +If the database does not have a log, an popup explaining the modification that +may be made to the bacula-dir.conf file appears. The change is to add +"catalog = all" to the messages stanza of the messages resource that this job uses. + +

Restore from job opens up the Select Jobs interface. It opens prepopulated +with the correct data to open up a standard restore interface displaying the +filestructure backed up when the job ran. It displays only the files from this job +if you don't change the controls in the "Select jobs interface" + +

Restore from time populates the Select Jobs interface with the endtime of the job +populated in the Before Time entry box. This causes the daemon to use the Before +Time to select the appropriate job id's for the restore. This contains the most recent +full backup, the most recent differential backup done since the most recent full, and +all the incremental backups done since the most recent full or differential. This job +set is considered the best possible to restore to what the filesystem looked like +after that job ran. + + + diff --git a/src/qt-console/help/jobplot.html b/src/qt-console/help/jobplot.html new file mode 100644 index 00000000..2d4624e9 --- /dev/null +++ b/src/qt-console/help/jobplot.html @@ -0,0 +1,28 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+
+ +

The JobPlot Interface

+ + +

The JobPlot interface has all of the same selection controls as the +JobList interface. It also has controls that affects the plot's +display. These settings are saved and used the next time the interface +is opened. The user can change the Graph type to one of sticks, lines, steps +or none. The user can also turn one or both of the plots off with the File +Data and the Byte Data check boxes. The symbol type for each plot's points +can be changed as well. + + + diff --git a/src/qt-console/help/jobs.html b/src/qt-console/help/jobs.html new file mode 100644 index 00000000..27f02126 --- /dev/null +++ b/src/qt-console/help/jobs.html @@ -0,0 +1,58 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+
+ +

The Jobs Interface

+ + +

The Jobs interface shows a tree of configured job resources. The data displayed +for each job in this list are the defaults for the job resource as defined in +the bacula-dir configuration file. When a job is run due to being scheduled, +these defaults are the values that are used. They are also the values +that are populated by default if the job is run manually. + +

The context sensitive popup menu includes a number of options. There are +options to run commands and there is an option to open up a different +interface. + +

The user can run a list files command in the console that displays a +list of jobid's that have been run and some associated data. This is similar +output to what can be seen in the joblist interface. + +

The list volumes console command can be run to output a list of volumes that +have this jobs files stored on it. + +

The list nextvolume console command tells the user the next volume that +will be asked for when the job is run. It also shows a list of past jobs. + +

There is a preferences item to determine whether a long list or a short list +command is run. It can be found from the menu by following settings - +preferences then the miscellaneous tab. + +

There are options to run commands to either enable or disable the job as scheduled. +If disabled, the job is re-enabled when the director daemon is restarted. + +

The cancel job command is to cancel any running job id's with the job +definition. + +

The open joblist option is to open a new JobList interface and pre-populate +the job dropdown list with the jobs resource name. + +

The run job option is to open the Run a Job interface prepopulating the job +dropdown box with the selected job. With the job name in the job dropdown box +of the Run a Job interface, the remainder of the controls are populated with +the jobs defaults. + + + diff --git a/src/qt-console/help/media.html b/src/qt-console/help/media.html new file mode 100644 index 00000000..c30726f2 --- /dev/null +++ b/src/qt-console/help/media.html @@ -0,0 +1,41 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+
+ +

The Media Interface

+ + +

The Media interface displays a tree structure of pools, and under each +pool branch, are items representing the volumes in the pool. The display +shows data that are the values in the database for each volume. With this +interface, the user can manage tape volumes and file volumes. + +

When the user right clicks on a volume item, a context sensitve menu appears +that shows the actions that can be performed on this volume. This menu allows +the user to open up other interfaces and to perform commands in the console. +There are command options that modify the catalog. These commands include +deleting the volume, purging the jobs/files which have been written to the volume, and pruning the +jobs/files that meet the pruning criterion configured in +bacula-dir.conf. There is also a command to reset the parameters on the volumes +based on the parameters of the pool resource. + +

Other interfaces can be opened up from the context sensitive menu. +The user can open up the Relabel a Volume dialog box. The user can also open +up the Edit a Volume interface which allows the user to modify all user +editable data items for the volume. Another interface that may be opened is +the JobList. When created the media drop down control is preselected to the +volume. It lists jobs which have been written to the volume. + + + diff --git a/src/qt-console/help/restore.html b/src/qt-console/help/restore.html new file mode 100644 index 00000000..cbae9570 --- /dev/null +++ b/src/qt-console/help/restore.html @@ -0,0 +1,139 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+
+ + +

The Two Restore Interfaces

+ +

Both interfaces accomplish the same three steps. +The steps are to select jobs to restore from, give the user the +opportunity to select files/directories, then indicate details such as +the host and path to restore to and trigger the job to run. + +

The Standard Restore Interface

+ +

Start the standard restore procedure by pressing the restore button +in the task bar. There are also two options in the joblist context sensitive +menu to start a restore. They are Restore From Time or Restore From Job. + +

This restore method is intended as a high performance option. +It is a server side process. These interfaces assist the user in utilizing the +text based restore capabilities of the standard console. It interprets the text +to display the information in a way that simplifies the restore procedure. + +

The Opening interface allows the user to choose selection criterion to +inform the server how to determine the set of backup job ids to use in the +restore. This best possible set is he most recent full backup, the most +recent differential backup done since the most recent full, and all the +incremental backups done since the most recent full or differential. Then the +server uses this set of jobs to create a file structure that is the +most recent version of each file found in this job list. + +

The second interface allows the user to browse this file structure and +choose the files and directories to restore. This is done in an explorer +type interface with a directory tree on the left. In the right pane of a +splitter is a table showing a list of files and directories that are the +contents of the directory selected in the left pane. The user can mark and +unmark either with the buttons on the top or by double clicking on the check +mark icon to toggle whether an item is selected or not. Double clicking an +item which is a directory on a part of the table which is not the check icon +opens that directory. Clicking 'OK' completes the selection process. + +

The third step is the Restore Run interface. It's purpose is to allow the +user to inform the bacula server details of the host and path to restore to, the +replacement rules, when to restore and what priority to give the restore job. + +

The Version Browser Restore Interface

+ +

This restore interface is NOT intended to perform major restores of directory +structures with large numbers of directories and files. It should work, however +it is a chatty interface. This is due to the number of sql queries made of the server +which is proportional to the number of files and directories selected plus the number of +exceptions to defaults selected. It IS intended to allow the user to browse for +specific files and choose between the different versions of those files that +are available in the catalog to restore. + +

The interface contains a horizontal splitter. The bottom pane contains +some controls for the interface. +The top portion contains a vertical splitter with 4 panes for viewing the +cataloged information. The left pane is for viewing and further sub selecting +of jobs. The second pane is for viewing the directory tree. The third is for +viewing a list of files in a directory that has been selected. Then +lastly the fourth pane is for viewing a table of versions of a single file +that has been selected from the file table. + +

The version browser accomplishes the three restore steps differently. + +

To select jobs and populate the directory tree, press the refresh button. +The job table contains selected jobs. The selection criterion of the three +dropdowns and the two limits are used as the filtering criterion for +populating the job table the first time the refresh button is pushed. +After the refresh button has been pushed, the job table has check marks that +can selects and unselects jobs. Re-pressing the refresh button does one of two +things. What occurs is dependent on if the controls in the bottom pane +display the same data as the previous time the refresh button was pressed. If +changed the jobs table is repopulated from the selection criterion. If +unchanged any jobs that have been unchecked are excluded from the +process of selecting directories, files and versions. The directory tree does get +repopulated when the refresh button is pushed. There is a text label underneath the +refresh button to inform the user as to which occurs when refresh is pressed. + +

The user can browse the directory tree and click on a directory folder which +then populates the file table with the files that are contained in the +selected directory path. Selecting or unselecting a directory does also select or +unselect all files and all directories in the tree hierarchy beneath it. If +there are any exceptions already selected beneath that directory, those +exceptions do get deleted. + +

With the file table populated, the user can unselect a file in a selected +directory and also select a file in an unselected directory. + +

With a file selected the version table populates with all the instances +a file has been written to tape. The user can choose a specific version of a +file for restore and override the default which is to restore the most recent +version. + +

Pressing the restore button initiates a procedure preparing to +perform the restore of the requested files. The same Restore Run interface +that was the third step in the standard restore is then displayed. It +allows the user to instruct the bacula server of the details of what host +and what path to restore the files to. This part of the restore does take control +of the connection to the server and does not allow any other communication +to the server by the other interfaces. + +

There are two progress bars that appear when refreshing or after pressing +Restore. These indicate to the user the time it may take to complete any tasks +that could take a long time period. + +

A Version Browser Limitation

+ +There is an important limitation of the version browser. If a fileset +specifically points to a file instead of a directory that contains files, it +will not be seen in the version browser. This is due to the way that the +version browser searches for directories first, then for files contained in +those directories. A common example is with the catalog job. +It by default points directly to one file created in a databse dump. + +

Version Browser Performance

+ +

If you have used the version browser with a large database, you may have +noticed that the performance can begin to be quite slow. A way to improve the +response time of the database server is to add indexes that will assist a +couple of the specific queries that are made. + +

If you have sqlite and would be willing to test out the creation of these +indexes to see if they work, please let me know the commands. + + + diff --git a/src/qt-console/help/storage.html b/src/qt-console/help/storage.html new file mode 100644 index 00000000..fa6fbb4b --- /dev/null +++ b/src/qt-console/help/storage.html @@ -0,0 +1,33 @@ + + + +Bat User's Guide + + + + + + + + +

Bat User's Guide

+
+ +

The Storage Interface

+ + +

The Storage interface shows a list of configured storage device resources. +The list is a tree structure and shows the storage id and the auto changer flag. + +

The interface allows the user to right click on an item to popup a context +sensitive menu of the actions that can be performed with that storage device. +The actions allow performing commands in the console. The commands include +requesting the status of the storage, and to mount, unmount or release the media +in the storage device. + +

If the autochanger flag is true, two additional options in the context +sensitive menu are found. These are options to run the command to update slots +and the command to update slots scan. + + + diff --git a/src/qt-console/images/0p.png b/src/qt-console/images/0p.png new file mode 100644 index 00000000..3a1b1d00 Binary files /dev/null and b/src/qt-console/images/0p.png differ diff --git a/src/qt-console/images/16p.png b/src/qt-console/images/16p.png new file mode 100644 index 00000000..bae7a483 Binary files /dev/null and b/src/qt-console/images/16p.png differ diff --git a/src/qt-console/images/32p.png b/src/qt-console/images/32p.png new file mode 100644 index 00000000..83bfa863 Binary files /dev/null and b/src/qt-console/images/32p.png differ diff --git a/src/qt-console/images/48p.png b/src/qt-console/images/48p.png new file mode 100644 index 00000000..93a098a3 Binary files /dev/null and b/src/qt-console/images/48p.png differ diff --git a/src/qt-console/images/64p.png b/src/qt-console/images/64p.png new file mode 100644 index 00000000..f1d5e20e Binary files /dev/null and b/src/qt-console/images/64p.png differ diff --git a/src/qt-console/images/80p.png b/src/qt-console/images/80p.png new file mode 100644 index 00000000..01c69a10 Binary files /dev/null and b/src/qt-console/images/80p.png differ diff --git a/src/qt-console/images/96p.png b/src/qt-console/images/96p.png new file mode 100644 index 00000000..440c19ac Binary files /dev/null and b/src/qt-console/images/96p.png differ diff --git a/src/qt-console/images/A.png b/src/qt-console/images/A.png new file mode 100644 index 00000000..89c21249 Binary files /dev/null and b/src/qt-console/images/A.png differ diff --git a/src/qt-console/images/R.png b/src/qt-console/images/R.png new file mode 100644 index 00000000..166c2201 Binary files /dev/null and b/src/qt-console/images/R.png differ diff --git a/src/qt-console/images/T.png b/src/qt-console/images/T.png new file mode 100644 index 00000000..d92d88d8 Binary files /dev/null and b/src/qt-console/images/T.png differ diff --git a/src/qt-console/images/W.png b/src/qt-console/images/W.png new file mode 100644 index 00000000..6d7dbadd Binary files /dev/null and b/src/qt-console/images/W.png differ diff --git a/src/qt-console/images/ajax-loader-big.gif b/src/qt-console/images/ajax-loader-big.gif new file mode 100644 index 00000000..3288d103 Binary files /dev/null and b/src/qt-console/images/ajax-loader-big.gif differ diff --git a/src/qt-console/images/applications-graphics.png b/src/qt-console/images/applications-graphics.png new file mode 100644 index 00000000..e392c298 Binary files /dev/null and b/src/qt-console/images/applications-graphics.png differ diff --git a/src/qt-console/images/applications-graphics.svg b/src/qt-console/images/applications-graphics.svg new file mode 100644 index 00000000..0438325e --- /dev/null +++ b/src/qt-console/images/applications-graphics.svg @@ -0,0 +1,545 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Graphics Category + + + + Jakub Steiner + + + + + + graphics + category + pixel + vector + editor + draw + paint + + + http://tango-project.org + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/backup.png b/src/qt-console/images/backup.png new file mode 100644 index 00000000..5111dd0b Binary files /dev/null and b/src/qt-console/images/backup.png differ diff --git a/src/qt-console/images/bat.png b/src/qt-console/images/bat.png new file mode 100644 index 00000000..7812eea3 Binary files /dev/null and b/src/qt-console/images/bat.png differ diff --git a/src/qt-console/images/bat_icon.icns b/src/qt-console/images/bat_icon.icns new file mode 100644 index 00000000..b76fe1d3 Binary files /dev/null and b/src/qt-console/images/bat_icon.icns differ diff --git a/src/qt-console/images/bat_icon.png b/src/qt-console/images/bat_icon.png new file mode 100644 index 00000000..c017f9e0 Binary files /dev/null and b/src/qt-console/images/bat_icon.png differ diff --git a/src/qt-console/images/browse.png b/src/qt-console/images/browse.png new file mode 100644 index 00000000..fa1b8776 Binary files /dev/null and b/src/qt-console/images/browse.png differ diff --git a/src/qt-console/images/browse.svg b/src/qt-console/images/browse.svg new file mode 100644 index 00000000..f56c565d --- /dev/null +++ b/src/qt-console/images/browse.svg @@ -0,0 +1,620 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Folder Icon + + + + Jakub Steiner + + + + http://jimmac.musichall.cz + + + folder + directory + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/cartridge-edit.png b/src/qt-console/images/cartridge-edit.png new file mode 100644 index 00000000..e686ca1d Binary files /dev/null and b/src/qt-console/images/cartridge-edit.png differ diff --git a/src/qt-console/images/cartridge-edit.svg b/src/qt-console/images/cartridge-edit.svg new file mode 100644 index 00000000..80c59405 --- /dev/null +++ b/src/qt-console/images/cartridge-edit.svg @@ -0,0 +1,277 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/cartridge.png b/src/qt-console/images/cartridge.png new file mode 100644 index 00000000..8df920af Binary files /dev/null and b/src/qt-console/images/cartridge.png differ diff --git a/src/qt-console/images/cartridge.svg b/src/qt-console/images/cartridge.svg new file mode 100644 index 00000000..946e6906 --- /dev/null +++ b/src/qt-console/images/cartridge.svg @@ -0,0 +1,148 @@ + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/cartridge1.png b/src/qt-console/images/cartridge1.png new file mode 100644 index 00000000..8ae4406a Binary files /dev/null and b/src/qt-console/images/cartridge1.png differ diff --git a/src/qt-console/images/check.png b/src/qt-console/images/check.png new file mode 100644 index 00000000..84055cc7 Binary files /dev/null and b/src/qt-console/images/check.png differ diff --git a/src/qt-console/images/check.svg b/src/qt-console/images/check.svg new file mode 100644 index 00000000..81095a37 --- /dev/null +++ b/src/qt-console/images/check.svg @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + diff --git a/src/qt-console/images/connected.png b/src/qt-console/images/connected.png new file mode 100644 index 00000000..9a6a3980 Binary files /dev/null and b/src/qt-console/images/connected.png differ diff --git a/src/qt-console/images/copy.png b/src/qt-console/images/copy.png new file mode 100644 index 00000000..f2c04dfe Binary files /dev/null and b/src/qt-console/images/copy.png differ diff --git a/src/qt-console/images/cut.png b/src/qt-console/images/cut.png new file mode 100644 index 00000000..54638e93 Binary files /dev/null and b/src/qt-console/images/cut.png differ diff --git a/src/qt-console/images/disconnected.png b/src/qt-console/images/disconnected.png new file mode 100644 index 00000000..57bdd27b Binary files /dev/null and b/src/qt-console/images/disconnected.png differ diff --git a/src/qt-console/images/edit-cut.png b/src/qt-console/images/edit-cut.png new file mode 100644 index 00000000..dc9eb9a7 Binary files /dev/null and b/src/qt-console/images/edit-cut.png differ diff --git a/src/qt-console/images/edit-delete.png b/src/qt-console/images/edit-delete.png new file mode 100644 index 00000000..ba2e237d Binary files /dev/null and b/src/qt-console/images/edit-delete.png differ diff --git a/src/qt-console/images/edit-delete.svg b/src/qt-console/images/edit-delete.svg new file mode 100644 index 00000000..146d32db --- /dev/null +++ b/src/qt-console/images/edit-delete.svg @@ -0,0 +1,882 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Delete + + + + Jakub Steiner + + + + + edit + delete + shredder + + + + + Novell, Inc. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/edit.png b/src/qt-console/images/edit.png new file mode 100644 index 00000000..36775390 Binary files /dev/null and b/src/qt-console/images/edit.png differ diff --git a/src/qt-console/images/emblem-system.png b/src/qt-console/images/emblem-system.png new file mode 100644 index 00000000..89082dad Binary files /dev/null and b/src/qt-console/images/emblem-system.png differ diff --git a/src/qt-console/images/emblem-system.svg b/src/qt-console/images/emblem-system.svg new file mode 100644 index 00000000..1d2b5bb9 --- /dev/null +++ b/src/qt-console/images/emblem-system.svg @@ -0,0 +1,235 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + Jakub Steiner + + + http://jimmac.musichall.cz + + Emblem System + + + emblem + system + library + crucial + base + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/estimate-job.png b/src/qt-console/images/estimate-job.png new file mode 100644 index 00000000..b47d13cc Binary files /dev/null and b/src/qt-console/images/estimate-job.png differ diff --git a/src/qt-console/images/estimate-job.svg b/src/qt-console/images/estimate-job.svg new file mode 100644 index 00000000..16326edf --- /dev/null +++ b/src/qt-console/images/estimate-job.svg @@ -0,0 +1,222 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + Jakub Steiner + + + http://jimmac.musichall.cz + + Emblem System + + + emblem + system + library + crucial + base + + + + + + + + + + + + + + + + + + + ? + + diff --git a/src/qt-console/images/extern.png b/src/qt-console/images/extern.png new file mode 100644 index 00000000..cfc3b355 Binary files /dev/null and b/src/qt-console/images/extern.png differ diff --git a/src/qt-console/images/f.png b/src/qt-console/images/f.png new file mode 100644 index 00000000..89c21249 Binary files /dev/null and b/src/qt-console/images/f.png differ diff --git a/src/qt-console/images/folder.png b/src/qt-console/images/folder.png new file mode 100644 index 00000000..6a0be938 Binary files /dev/null and b/src/qt-console/images/folder.png differ diff --git a/src/qt-console/images/folder.svg b/src/qt-console/images/folder.svg new file mode 100644 index 00000000..2027f568 --- /dev/null +++ b/src/qt-console/images/folder.svg @@ -0,0 +1,422 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Folder Icon + + + + Jakub Steiner + + + + http://jimmac.musichall.cz + + + folder + directory + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/folderbothchecked.png b/src/qt-console/images/folderbothchecked.png new file mode 100644 index 00000000..fdb993ba Binary files /dev/null and b/src/qt-console/images/folderbothchecked.png differ diff --git a/src/qt-console/images/folderbothchecked.svg b/src/qt-console/images/folderbothchecked.svg new file mode 100644 index 00000000..6fe48e60 --- /dev/null +++ b/src/qt-console/images/folderbothchecked.svg @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Folder Icon + + + + Jakub Steiner + + + + http://jimmac.musichall.cz + + + folder + directory + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/folderchecked.png b/src/qt-console/images/folderchecked.png new file mode 100644 index 00000000..97cbf19b Binary files /dev/null and b/src/qt-console/images/folderchecked.png differ diff --git a/src/qt-console/images/folderchecked.svg b/src/qt-console/images/folderchecked.svg new file mode 100644 index 00000000..c87014ad --- /dev/null +++ b/src/qt-console/images/folderchecked.svg @@ -0,0 +1,430 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Folder Icon + + + + Jakub Steiner + + + + http://jimmac.musichall.cz + + + folder + directory + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/folderunchecked.png b/src/qt-console/images/folderunchecked.png new file mode 100644 index 00000000..13a8de2f Binary files /dev/null and b/src/qt-console/images/folderunchecked.png differ diff --git a/src/qt-console/images/folderunchecked.svg b/src/qt-console/images/folderunchecked.svg new file mode 100644 index 00000000..cbe7627c --- /dev/null +++ b/src/qt-console/images/folderunchecked.svg @@ -0,0 +1,430 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Folder Icon + + + + Jakub Steiner + + + + http://jimmac.musichall.cz + + + folder + directory + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/go-down.png b/src/qt-console/images/go-down.png new file mode 100644 index 00000000..3dd7fccd Binary files /dev/null and b/src/qt-console/images/go-down.png differ diff --git a/src/qt-console/images/go-down.svg b/src/qt-console/images/go-down.svg new file mode 100644 index 00000000..18dadc49 --- /dev/null +++ b/src/qt-console/images/go-down.svg @@ -0,0 +1,199 @@ + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + Jakub Steiner + + + http://jimmac.musichall.cz + + Go Down + + + go + lower + down + arrow + pointer + > + + + + + Andreas Nilsson + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/go-jump.png b/src/qt-console/images/go-jump.png new file mode 100644 index 00000000..34dc4c04 Binary files /dev/null and b/src/qt-console/images/go-jump.png differ diff --git a/src/qt-console/images/go-jump.svg b/src/qt-console/images/go-jump.svg new file mode 100644 index 00000000..4832fe94 --- /dev/null +++ b/src/qt-console/images/go-jump.svg @@ -0,0 +1,204 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + Jakub Steiner + + + http://jimmac.musichall.cz + + Go Jump + + + go + jump + seek + arrow + pointer + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/go-up.png b/src/qt-console/images/go-up.png new file mode 100644 index 00000000..fa9a7d71 Binary files /dev/null and b/src/qt-console/images/go-up.png differ diff --git a/src/qt-console/images/go-up.svg b/src/qt-console/images/go-up.svg new file mode 100644 index 00000000..0e3d01d1 --- /dev/null +++ b/src/qt-console/images/go-up.svg @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + Jakub Steiner + + + http://jimmac.musichall.cz + + Go Up + + + go + higher + up + arrow + pointer + > + + + + + Andreas Nilsson + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/graph1.png b/src/qt-console/images/graph1.png new file mode 100644 index 00000000..9e12db09 Binary files /dev/null and b/src/qt-console/images/graph1.png differ diff --git a/src/qt-console/images/graph1.svg b/src/qt-console/images/graph1.svg new file mode 100644 index 00000000..ecde0e9f --- /dev/null +++ b/src/qt-console/images/graph1.svg @@ -0,0 +1,380 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + System Monitor + 2005-10-10 + + + Andreas Nilsson + + + + + system + monitor + performance + + + + + + Jakub Steiner + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/help-browser.png b/src/qt-console/images/help-browser.png new file mode 100644 index 00000000..e712112a Binary files /dev/null and b/src/qt-console/images/help-browser.png differ diff --git a/src/qt-console/images/help-browser.svg b/src/qt-console/images/help-browser.svg new file mode 100644 index 00000000..cc80f776 --- /dev/null +++ b/src/qt-console/images/help-browser.svg @@ -0,0 +1,215 @@ + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Help Browser + 2005-11-06 + + + Tuomas Kuosmanen + + + + + help + browser + documentation + docs + man + info + + + + + + Jakub Steiner, Andreas Nilsson + + + http://tigert.com + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/home.png b/src/qt-console/images/home.png new file mode 100644 index 00000000..045339ce Binary files /dev/null and b/src/qt-console/images/home.png differ diff --git a/src/qt-console/images/inflag0.png b/src/qt-console/images/inflag0.png new file mode 100644 index 00000000..6478554f Binary files /dev/null and b/src/qt-console/images/inflag0.png differ diff --git a/src/qt-console/images/inflag1.png b/src/qt-console/images/inflag1.png new file mode 100644 index 00000000..e061e7f1 Binary files /dev/null and b/src/qt-console/images/inflag1.png differ diff --git a/src/qt-console/images/inflag2.png b/src/qt-console/images/inflag2.png new file mode 100644 index 00000000..dbff8454 Binary files /dev/null and b/src/qt-console/images/inflag2.png differ diff --git a/src/qt-console/images/intern.png b/src/qt-console/images/intern.png new file mode 100644 index 00000000..64b8300d Binary files /dev/null and b/src/qt-console/images/intern.png differ diff --git a/src/qt-console/images/joblog.png b/src/qt-console/images/joblog.png new file mode 100644 index 00000000..6728b1dc Binary files /dev/null and b/src/qt-console/images/joblog.png differ diff --git a/src/qt-console/images/joblog.svg b/src/qt-console/images/joblog.svg new file mode 100644 index 00000000..b776f6a4 --- /dev/null +++ b/src/qt-console/images/joblog.svg @@ -0,0 +1,252 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + Jakub Steiner + + + http://jimmac.musichall.cz + + Emblem System + + + emblem + system + library + crucial + base + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/label.png b/src/qt-console/images/label.png new file mode 100644 index 00000000..23a5f7cb Binary files /dev/null and b/src/qt-console/images/label.png differ diff --git a/src/qt-console/images/mail-message-new.png b/src/qt-console/images/mail-message-new.png new file mode 100644 index 00000000..2588cbb1 Binary files /dev/null and b/src/qt-console/images/mail-message-new.png differ diff --git a/src/qt-console/images/mail-message-new.svg b/src/qt-console/images/mail-message-new.svg new file mode 100644 index 00000000..4bad1cad --- /dev/null +++ b/src/qt-console/images/mail-message-new.svg @@ -0,0 +1,465 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Mail New + + + Jakub Steiner + + + + + Andreas Nilsson, Steven Garrity + + + + + + mail + e-mail + MUA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/mail-message-pending.png b/src/qt-console/images/mail-message-pending.png new file mode 100644 index 00000000..5b1680c8 Binary files /dev/null and b/src/qt-console/images/mail-message-pending.png differ diff --git a/src/qt-console/images/mail-message-pending.svg b/src/qt-console/images/mail-message-pending.svg new file mode 100644 index 00000000..59df1b59 --- /dev/null +++ b/src/qt-console/images/mail-message-pending.svg @@ -0,0 +1,467 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Mail New + + + Jakub Steiner + + + + + Andreas Nilsson, Steven Garrity + + + + + + mail + e-mail + MUA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/mark.png b/src/qt-console/images/mark.png new file mode 100644 index 00000000..f066077e Binary files /dev/null and b/src/qt-console/images/mark.png differ diff --git a/src/qt-console/images/media-floppy.svg b/src/qt-console/images/media-floppy.svg new file mode 100644 index 00000000..b3a2de91 --- /dev/null +++ b/src/qt-console/images/media-floppy.svg @@ -0,0 +1,340 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Media Floppy + + + Tuomas Kuosmanen + + + http://www.tango-project.org + + + save + document + store + file + io + floppy + media + + + + + Jakub Steiner + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/network-server.png b/src/qt-console/images/network-server.png new file mode 100644 index 00000000..c22fbb04 Binary files /dev/null and b/src/qt-console/images/network-server.png differ diff --git a/src/qt-console/images/network-server.svg b/src/qt-console/images/network-server.svg new file mode 100644 index 00000000..cd1515e3 --- /dev/null +++ b/src/qt-console/images/network-server.svg @@ -0,0 +1,1005 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Server + + + Jakub Steiner + + + http://jimmac.musichall.cz + + + + server + daemon + comupetr + lan + service + provider + + + + + Garrett LeSage + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/new.png b/src/qt-console/images/new.png new file mode 100644 index 00000000..12131b01 Binary files /dev/null and b/src/qt-console/images/new.png differ diff --git a/src/qt-console/images/next.png b/src/qt-console/images/next.png new file mode 100644 index 00000000..0d15331f Binary files /dev/null and b/src/qt-console/images/next.png differ diff --git a/src/qt-console/images/open.png b/src/qt-console/images/open.png new file mode 100644 index 00000000..45fa2883 Binary files /dev/null and b/src/qt-console/images/open.png differ diff --git a/src/qt-console/images/package-x-generic.png b/src/qt-console/images/package-x-generic.png new file mode 100644 index 00000000..4311b579 Binary files /dev/null and b/src/qt-console/images/package-x-generic.png differ diff --git a/src/qt-console/images/package-x-generic.svg b/src/qt-console/images/package-x-generic.svg new file mode 100644 index 00000000..dced3c42 --- /dev/null +++ b/src/qt-console/images/package-x-generic.svg @@ -0,0 +1,483 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Package + + + Jakub Steiner + + + http://jimmac.musichall.cz/ + + + package + archive + tarball + tar + bzip + gzip + zip + arj + tar + jar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/page-next.gif b/src/qt-console/images/page-next.gif new file mode 100644 index 00000000..96016353 Binary files /dev/null and b/src/qt-console/images/page-next.gif differ diff --git a/src/qt-console/images/page-prev.gif b/src/qt-console/images/page-prev.gif new file mode 100644 index 00000000..eb70cf8f Binary files /dev/null and b/src/qt-console/images/page-prev.gif differ diff --git a/src/qt-console/images/paste.png b/src/qt-console/images/paste.png new file mode 100644 index 00000000..e38bbe5f Binary files /dev/null and b/src/qt-console/images/paste.png differ diff --git a/src/qt-console/images/prev.png b/src/qt-console/images/prev.png new file mode 100644 index 00000000..991a49dc Binary files /dev/null and b/src/qt-console/images/prev.png differ diff --git a/src/qt-console/images/print.png b/src/qt-console/images/print.png new file mode 100644 index 00000000..2afb769e Binary files /dev/null and b/src/qt-console/images/print.png differ diff --git a/src/qt-console/images/purge.png b/src/qt-console/images/purge.png new file mode 100644 index 00000000..9ba7c532 Binary files /dev/null and b/src/qt-console/images/purge.png differ diff --git a/src/qt-console/images/restore.png b/src/qt-console/images/restore.png new file mode 100644 index 00000000..563d4067 Binary files /dev/null and b/src/qt-console/images/restore.png differ diff --git a/src/qt-console/images/run.png b/src/qt-console/images/run.png new file mode 100644 index 00000000..d3853864 Binary files /dev/null and b/src/qt-console/images/run.png differ diff --git a/src/qt-console/images/runit.png b/src/qt-console/images/runit.png new file mode 100644 index 00000000..4dd2e8fb Binary files /dev/null and b/src/qt-console/images/runit.png differ diff --git a/src/qt-console/images/save.png b/src/qt-console/images/save.png new file mode 100644 index 00000000..daba865f Binary files /dev/null and b/src/qt-console/images/save.png differ diff --git a/src/qt-console/images/server.png b/src/qt-console/images/server.png new file mode 100644 index 00000000..b21caca7 Binary files /dev/null and b/src/qt-console/images/server.png differ diff --git a/src/qt-console/images/status-console.png b/src/qt-console/images/status-console.png new file mode 100644 index 00000000..ea88abed Binary files /dev/null and b/src/qt-console/images/status-console.png differ diff --git a/src/qt-console/images/status-console.svg b/src/qt-console/images/status-console.svg new file mode 100644 index 00000000..f3caf8a3 --- /dev/null +++ b/src/qt-console/images/status-console.svg @@ -0,0 +1,606 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Terminal + 2005-10-15 + + + Andreas Nilsson + + + + + terminal + emulator + term + command line + + + + + + Jakub Steiner + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/status.png b/src/qt-console/images/status.png new file mode 100644 index 00000000..ea06f93b Binary files /dev/null and b/src/qt-console/images/status.png differ diff --git a/src/qt-console/images/status.svg b/src/qt-console/images/status.svg new file mode 100644 index 00000000..ecde0e9f --- /dev/null +++ b/src/qt-console/images/status.svg @@ -0,0 +1,380 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + System Monitor + 2005-10-10 + + + Andreas Nilsson + + + + + system + monitor + performance + + + + + + Jakub Steiner + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/system-file-manager.png b/src/qt-console/images/system-file-manager.png new file mode 100644 index 00000000..97513c1c Binary files /dev/null and b/src/qt-console/images/system-file-manager.png differ diff --git a/src/qt-console/images/system-file-manager.svg b/src/qt-console/images/system-file-manager.svg new file mode 100644 index 00000000..4af259b1 --- /dev/null +++ b/src/qt-console/images/system-file-manager.svg @@ -0,0 +1,318 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + File Manager + + + Jakub Steiner + + + http://jimmac.musichall.cz + + + file + manager + copy + move + filesystem + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/unchecked.png b/src/qt-console/images/unchecked.png new file mode 100644 index 00000000..ccd9e8b3 Binary files /dev/null and b/src/qt-console/images/unchecked.png differ diff --git a/src/qt-console/images/unchecked.svg b/src/qt-console/images/unchecked.svg new file mode 100644 index 00000000..a4dc5795 --- /dev/null +++ b/src/qt-console/images/unchecked.svg @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + diff --git a/src/qt-console/images/undo.png b/src/qt-console/images/undo.png new file mode 100644 index 00000000..eee23d24 Binary files /dev/null and b/src/qt-console/images/undo.png differ diff --git a/src/qt-console/images/unmark.png b/src/qt-console/images/unmark.png new file mode 100644 index 00000000..4f7ea940 Binary files /dev/null and b/src/qt-console/images/unmark.png differ diff --git a/src/qt-console/images/up.png b/src/qt-console/images/up.png new file mode 100644 index 00000000..fabe7bf7 Binary files /dev/null and b/src/qt-console/images/up.png differ diff --git a/src/qt-console/images/utilities-terminal.png b/src/qt-console/images/utilities-terminal.png new file mode 100644 index 00000000..c76370f5 Binary files /dev/null and b/src/qt-console/images/utilities-terminal.png differ diff --git a/src/qt-console/images/utilities-terminal.svg b/src/qt-console/images/utilities-terminal.svg new file mode 100644 index 00000000..45b2e7e3 --- /dev/null +++ b/src/qt-console/images/utilities-terminal.svg @@ -0,0 +1,498 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Terminal + 2005-10-15 + + + Andreas Nilsson + + + + + terminal + emulator + term + command line + + + + + + Jakub Steiner + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/view-refresh.png b/src/qt-console/images/view-refresh.png new file mode 100644 index 00000000..b6ae9bbf Binary files /dev/null and b/src/qt-console/images/view-refresh.png differ diff --git a/src/qt-console/images/view-refresh.svg b/src/qt-console/images/view-refresh.svg new file mode 100644 index 00000000..2209874b --- /dev/null +++ b/src/qt-console/images/view-refresh.svg @@ -0,0 +1,391 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + Jakub Steiner + + + http://jimmac.musichall.cz + + View Refresh + + + reload + refresh + view + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/weather-severe-alert.png b/src/qt-console/images/weather-severe-alert.png new file mode 100644 index 00000000..7670df3f Binary files /dev/null and b/src/qt-console/images/weather-severe-alert.png differ diff --git a/src/qt-console/images/weather-severe-alert.svg b/src/qt-console/images/weather-severe-alert.svg new file mode 100644 index 00000000..2e96850d --- /dev/null +++ b/src/qt-console/images/weather-severe-alert.svg @@ -0,0 +1,4699 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + weather-severe-alert + January 2006 + + + Ryan Collier (pseudo) + + + + + http://www.tango-project.org + + + http://www.pseudocode.org + + + weather + applet + notify + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/qt-console/images/zoom.png b/src/qt-console/images/zoom.png new file mode 100644 index 00000000..1ac4864d Binary files /dev/null and b/src/qt-console/images/zoom.png differ diff --git a/src/qt-console/install_conf_file.in b/src/qt-console/install_conf_file.in new file mode 100755 index 00000000..ab3592ce --- /dev/null +++ b/src/qt-console/install_conf_file.in @@ -0,0 +1,16 @@ +#!/bin/sh + +sbindir=@sbindir@ +sysconfdir=@sysconfdir@ +INSTALL_CONFIG="@INSTALL@ -m 640" +DESTDIR=`echo ${DESTDIR}` + +srcconf=bat.conf +if test -f ${DESTDIR}${sysconfdir}/${srcconf}; then + destconf=${srcconf}.new + echo " ==> Found existing $srcconf, installing new conf file as ${destconf}" +else + destconf=${srcconf} +fi +echo "${INSTALL_CONFIG} ${srcconf} ${DESTDIR}${sysconfdir}/${destconf}" +${INSTALL_CONFIG} ${srcconf} ${DESTDIR}${sysconfdir}/${destconf} diff --git a/src/qt-console/job/job.cpp b/src/qt-console/job/job.cpp new file mode 100644 index 00000000..fce65003 --- /dev/null +++ b/src/qt-console/job/job.cpp @@ -0,0 +1,494 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bat.h" +#include "job.h" +#include "util/fmtwidgetitem.h" +#include "mediainfo/mediainfo.h" +#include "run/run.h" + +Job::Job(QString &jobId, QTreeWidgetItem *parentTreeWidgetItem) : Pages() +{ + setupUi(this); + pgInitialize(tr("Job"), parentTreeWidgetItem); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/joblog.png"))); + m_cursor = new QTextCursor(textJobLog->document()); + + m_bwlimit = 0; + m_jobId = jobId; + m_timer = NULL; + getFont(); + + connect(pbRefresh, SIGNAL(clicked()), this, SLOT(populateAll())); + connect(pbDelete, SIGNAL(clicked()), this, SLOT(deleteJob())); + connect(pbCancel, SIGNAL(clicked()), this, SLOT(cancelJob())); + connect(pbRun, SIGNAL(clicked()), this, SLOT(rerun())); + connect(list_Volume, SIGNAL(itemDoubleClicked(QListWidgetItem*)), this, SLOT(showInfoVolume(QListWidgetItem *))); + connect(spin_Bwlimit, SIGNAL(valueChanged(int)), this, SLOT(storeBwLimit(int))); + + populateAll(); + dockPage(); + setCurrent(); +} + +void Job::rerun() +{ + new runPage(label_Name->text(), + label_Level->text(), + label_Pool->text(), + QString(""), // storage + label_Client->text(), + label_FileSet->text()); +} + +void Job::showInfoVolume(QListWidgetItem *item) +{ + QString s= item->text(); + QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); + + MediaInfo *m = new MediaInfo(pageSelectorTreeWidgetItem, s); + connect(m, SIGNAL(destroyed()), this, SLOT(populateTree())); +} + +void Job::deleteJob() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to delete?? !!!.\n" +"This delete command is used to delete a Job record and all associated catalog" +" records that were created. This command operates only on the Catalog" +" database and has no effect on the actual data written to a Volume. This" +" command can be dangerous and we strongly recommend that you do not use" +" it unless you know what you are doing. The Job and all its associated" +" records (File and JobMedia) will be deleted from the catalog." + "Press OK to proceed with delete operation.?"), + QMessageBox::Ok | QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + QString cmd("delete job jobid="); + cmd += m_jobId; + consoleCommand(cmd, false); + closeStackPage(); +} + +void Job::cancelJob() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to cancel this job?"), + QMessageBox::Ok | QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + QString cmd("cancel jobid="); + cmd += m_jobId; + consoleCommand(cmd, false); +} + +void Job::getFont() +{ + QFont font = textJobLog->font(); + + QString dirname; + m_console->getDirResName(dirname); + QSettings settings(dirname, "bat"); + settings.beginGroup("Console"); + font.setFamily(settings.value("consoleFont", "Courier").value()); + font.setPointSize(settings.value("consolePointSize", 10).toInt()); + font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); + settings.endGroup(); + textJobLog->setFont(font); +} + +void Job::populateAll() +{ +// Pmsg0(50, "populateAll()\n"); + populateText(); + populateForm(); + populateVolumes(); +} + +/* + * Populate the text in the window + * TODO: Just append new text instead of clearing the window + */ +void Job::populateText() +{ + textJobLog->clear(); + QString query; + query = "SELECT Time, LogText FROM Log WHERE JobId='" + m_jobId + "' order by Time"; + + /* This could be a log item */ + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Log query cmd : %s\n", query.toUtf8().data()); + } + + QStringList results; + if (m_console->sql_cmd(query, results)) { + + if (!results.size()) { + QMessageBox::warning(this, tr("Bat"), + tr("There were no results!\n" + "It is possible you may need to add \"catalog = all\" " + "to the Messages resource for this job.\n"), QMessageBox::Ok); + return; + } + + QString jobstr("JobId "); /* FIXME: should this be translated ? */ + jobstr += m_jobId; + + QString htmlbuf("

");
+  
+      /* Iterate through the lines of results. */
+      QString field;
+      QStringList fieldlist;
+      QString lastTime;
+      QString lastSvc;
+      foreach (QString resultline, results) {
+         fieldlist = resultline.split("\t");
+         
+         if (fieldlist.size() < 2)
+            continue;
+
+         QString curTime = fieldlist[0].trimmed();
+
+         field = fieldlist[1].trimmed();
+         int colon = field.indexOf(":");
+         if (colon > 0) {
+            /* string is like  : ..." 
+             * we split at ':' then remove the jobId xxxx string (always the same) */ 
+            QString curSvc(field.left(colon).replace(jobstr,"").trimmed());
+            if (curSvc == lastSvc  && curTime == lastTime) {
+               curTime.clear();
+               curSvc.clear(); 
+            } else {
+               lastTime = curTime;
+               lastSvc = curSvc;
+            }
+//          htmlbuf += "
"; + htmlbuf += "\n" + curSvc + " "; + + /* rest of string is marked as pre-formatted (here trimming should + * be avoided, to preserve original formatting) */ + QString msg(field.mid(colon+2)); + if (msg.startsWith( tr("Error:")) ) { /* FIXME: should really be translated ? */ + /* error msg, use a specific class */ + htmlbuf += "
" + msg + "
";
+            } else {
+               htmlbuf += msg ;
+            }
+         } else {
+            /* non standard string, place as-is */
+            if (curTime == lastTime) {
+               curTime.clear();
+            } else {
+               lastTime = curTime;
+            }
+//          htmlbuf += "
"; + htmlbuf += "\n" + field ; + } + + } /* foreach resultline */ + + htmlbuf += ""; + + /* full text ready. Here a custom sheet is used to align columns */ + QString logSheet(".err {color:#FF0000;}"); + textJobLog->document()->setDefaultStyleSheet(logSheet); + textJobLog->document()->setHtml(htmlbuf); + textJobLog->moveCursor(QTextCursor::Start); + + } /* if results from query */ + +} + +void Job::storeBwLimit(int val) +{ + m_bwlimit = val; +} + +void Job::updateRunInfo() +{ + QString cmd; + QStringList results; + QStringList lst; + bool parseit=false; + +#ifdef xxx + /* This doesn't seem like the right thing to do */ + if (m_bwlimit >= 100) { + cmd = QString("setbandwidth limit=" + QString::number(m_bwlimit) + + " jobid=" + m_jobId); + m_console->dir_cmd(cmd, results); + results.clear(); + m_bwlimit = 0; + } +#endif + + cmd = QString(".status client=\"" + m_client + "\" running"); +/* + * JobId 5 Job backup.2010-12-21_09.28.17_03 is running. + * VSS Full Backup Job started: 21-Dec-10 09:28 + * Files=4 Bytes=610,976 Bytes/sec=87,282 Errors=0 + * Files Examined=4 + * Processing file: /tmp/regress/build/po/de.po + * SDReadSeqNo=5 fd=5 + * + * Or + * JobId=5 + * Job=backup.2010-12-21_09.28.17_03 + * VSS=1 + * Files=4 + * Bytes=610976 + * + */ + QRegExp jobline("(JobId) (\\d+) Job "); + QRegExp itemline("([\\w /]+)[:=]\\s*(.+)"); + QRegExp filesline("Files: Examined=([\\d,]+) Backed up=([\\d,])"); + QRegExp oldline("Files=([\\d,]+) Bytes=([\\d,]+) Bytes/sec=([\\d,]+) Errors=([\\d,]+)"); + QRegExp restoreline("Files: Restored=([\\d,]+) Expected=([\\d,]+) Completed=([\\d,]+)%"); + QRegExp restoreline2("Files Examined=([\\d,]+) Expected Files=([\\d,]+) Percent Complete=([\\d,]+)"); + + QString com(","); + QString empty(""); + + if (m_console->dir_cmd(cmd, results)) { + foreach (QString mline, results) { + foreach (QString line, mline.split("\n")) { + line = line.trimmed(); + if (oldline.indexIn(line) >= 0) { + if (parseit) { + lst = oldline.capturedTexts(); + label_JobErrors->setText(lst[4]); + label_Speed->setText(convertBytesSI(lst[3].replace(com, empty).toULongLong())+"/s"); + label_JobFiles->setText(lst[1]); + label_JobBytes->setText(convertBytesSI(lst[2].replace(com, empty).toULongLong())); + } + continue; + + } else if (filesline.indexIn(line) >= 0) { + if (parseit) { + lst = filesline.capturedTexts(); // Will also catch Backed up + label_FilesExamined->setText(lst[1]); + } + continue; + +// TODO: Need to be fixed +// } else if (restoreline2.indexIn(line) >= 0) { +// if (parseit) { +// lst = filesline.capturedTexts(); +// label_FilesExamined->setText(lst[1]); // Can also handle Expected and Completed +// } +// continue; + + } else if (jobline.indexIn(line) >= 0) { + lst = jobline.capturedTexts(); + lst.removeFirst(); + + } else if (itemline.indexIn(line) >= 0) { + lst = itemline.capturedTexts(); + lst.removeFirst(); + + } else { + if (mainWin->m_miscDebug) + Pmsg1(0, "bad line=%s\n", line.toUtf8().data()); + continue; + } + if (lst.count() < 2) { + if (mainWin->m_miscDebug) + Pmsg2(0, "bad line=%s count=%d\n", line.toUtf8().data(), lst.count()); + } + if (lst[0] == "JobId") { + if (lst[1] == m_jobId) { + parseit = true; + } else { + parseit = false; + } + } + if (!parseit) { + continue; + } + +// } else if (lst[0] == "Job") { +// grpRun->setTitle(lst[1]); + +// +// } else if (lst[0] == "VSS") { + +// } else if (lst[0] == "Level") { +// Info->setText(lst[1]); +// +// } else if (lst[0] == "JobType" || lst[0] == "Type") { +// +// } else if (lst[0] == "JobStarted" || lst[0] == "StartTime") { +// Started->setText(lst[1]); + +#ifdef xxx + if (lst[0] == "Bwlimit") { + int val = lst[1].toInt(); + if (val > 0) { + chk_Bwlimit->setChecked(true); + spin_Bwlimit->setEnabled(true); + spin_Bwlimit->setValue(lst[1].toInt()/1024); + } else { + chk_Bwlimit->setEnabled(false); + spin_Bwlimit->setEnabled(false); + spin_Bwlimit->setValue(0); + } +#endif + + if (lst[0] == "Errors") { + label_JobErrors->setText(lst[1]); + + } else if (lst[0] == "Bytes/sec") { + label_Speed->setText(convertBytesSI(lst[1].toULongLong())+"/s"); + + } else if (lst[0] == "Files" || lst[0] == "JobFiles") { + label_JobFiles->setText(lst[1]); + + } else if (lst[0] == "Bytes" || lst[0] == "JobBytes") { + label_JobBytes->setText(convertBytesSI(lst[1].toULongLong())); + + } else if (lst[0] == "Examined") { + label_FilesExamined->setText(lst[1]); + + } else if (lst[0] == "Files Examined") { + label_FilesExamined->setText(lst[1]); + + } else if (lst[0] == "Processing file") { + label_CurrentFile->setText(lst[1]); + } + } + } + } +} + +/* + * Populate the text in the window + */ +void Job::populateForm() +{ + QString stat, err; + char buf[256]; + QString query = + "SELECT JobId, Job.Name, Level, Client.Name, Pool.Name, FileSet," + "SchedTime, StartTime, EndTime, EndTime-StartTime AS Duration, " + "JobBytes, JobFiles, JobErrors, JobStatus, PurgedFiles " + "FROM Job JOIN Client USING (ClientId) " + "LEFT JOIN Pool ON (Job.PoolId = Pool.PoolId) " + "LEFT JOIN FileSet ON (Job.FileSetId = FileSet.FileSetId)" + "WHERE JobId=" + m_jobId; + QStringList results; + if (m_console->sql_cmd(query, results)) { + QString resultline, duration; + QStringList fieldlist; + + foreach (resultline, results) { // should have only one result + fieldlist = resultline.split("\t"); + if (fieldlist.size() != 15) { + Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); + continue; + } + QStringListIterator fld(fieldlist); + label_JobId->setText(fld.next()); + label_Name->setText(fld.next()); + + label_Level->setText(job_level_to_str(fld.next()[0].toLatin1())); + + m_client = fld.next(); + label_Client->setText(m_client); + label_Pool->setText(fld.next()); + label_FileSet->setText(fld.next()); + label_SchedTime->setText(fld.next()); + label_StartTime->setText(fld.next()); + label_EndTime->setText(fld.next()); + duration = fld.next(); + /* + * Note: if we have a negative duration, it is because the EndTime + * is zero (i.e. the Job is still running). We should use + * duration = StartTime - current_time + */ + if (duration.left(1) == "-") { + duration = "0.0"; + } + label_Duration->setText(duration); + + label_JobBytes->setText(convertBytesSI(fld.next().toULongLong())); + label_JobFiles->setText(fld.next()); + err = fld.next(); + label_JobErrors->setText(err); + + stat = fld.next(); + if (stat == "T" && err.toInt() > 0) { + stat = "W"; + } + if (stat == "R") { + pbDelete->setVisible(false); + pbCancel->setVisible(true); + grpRun->setVisible(true); + if (!m_timer) { + m_timer = new QTimer(this); + connect(m_timer, SIGNAL(timeout()), this, SLOT(populateAll())); + m_timer->start(30000); + } + updateRunInfo(); + } else { + pbDelete->setVisible(true); + pbCancel->setVisible(false); + grpRun->setVisible(false); + if (m_timer) { + m_timer->stop(); + delete m_timer; + m_timer = NULL; + } + } + label_JobStatus->setPixmap(QPixmap(":/images/" + stat + ".png")); + jobstatus_to_ascii_gui(stat[0].toLatin1(), buf, sizeof(buf)); + stat = buf; + label_JobStatus->setToolTip(stat); + + chkbox_PurgedFiles->setCheckState(fld.next().toInt()?Qt::Checked:Qt::Unchecked); + } + } +} + +void Job::populateVolumes() +{ + + QString query = + "SELECT DISTINCT VolumeName, InChanger, Slot " + "FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) " + "WHERE JobId=" + m_jobId + " ORDER BY VolumeName "; + if (mainWin->m_sqlDebug) Pmsg1(0, "Query cmd : %s\n",query.toUtf8().data()); + + + QStringList results; + if (m_console->sql_cmd(query, results)) { + QString resultline; + QStringList fieldlist; + list_Volume->clear(); + foreach (resultline, results) { // should have only one result + fieldlist = resultline.split("\t"); + QStringListIterator fld(fieldlist); +// QListWidgetItem(QIcon(":/images/inchanger" + fld.next() + ".png"), +// fld.next(), list_Volume); + list_Volume->addItem(fld.next()); + } + } +} + +//QListWidgetItem ( const QIcon & icon, const QString & text, QListWidget * parent = 0, int type = Type ) diff --git a/src/qt-console/job/job.h b/src/qt-console/job/job.h new file mode 100644 index 00000000..a1c6ab23 --- /dev/null +++ b/src/qt-console/job/job.h @@ -0,0 +1,60 @@ +#ifndef _JOB_H_ +#define _JOB_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_job.h" +#include "console.h" + +class Job : public Pages, public Ui::JobForm +{ + Q_OBJECT + +public: + Job(QString &jobId, QTreeWidgetItem *parentTreeWidgetItem); + +public slots: + void populateAll(); + void deleteJob(); + void cancelJob(); + void showInfoVolume(QListWidgetItem *); + void rerun(); + void storeBwLimit(int val); + +private slots: + +private: + void updateRunInfo(); + void populateText(); + void populateForm(); + void populateVolumes(); + void getFont(); + QTextCursor *m_cursor; + QString m_jobId; + QString m_client; + QTimer *m_timer; + int m_bwlimit; +}; + +#endif /* _JOB_H_ */ diff --git a/src/qt-console/job/job.ui b/src/qt-console/job/job.ui new file mode 100644 index 00000000..a60cb747 --- /dev/null +++ b/src/qt-console/job/job.ui @@ -0,0 +1,776 @@ + + + JobForm + + + + 0 + 0 + 975 + 631 + + + + Form + + + + + + + + + + Cancel + + + + :/images/A.png:/images/A.png + + + true + + + + + + + Delete + + + + :/images/purge.png:/images/purge.png + + + true + + + + + + + false + + + View errors for this Job + + + Errors + + + + :/images/zoom.png:/images/zoom.png + + + true + + + + + + + false + + + Media + + + + :/images/zoom.png:/images/zoom.png + + + true + + + + + + + false + + + History + + + + :/images/zoom.png:/images/zoom.png + + + true + + + + + + + + 0 + 0 + + + + Run again + + + + :/images/R.png:/images/R.png + + + true + + + + + + + false + + + Read doc + + + true + + + + + + + false + + + FileSet + + + + :/images/zoom.png:/images/zoom.png + + + true + + + + + + + false + + + Stats + + + + :/images/zoom.png:/images/zoom.png + + + true + + + + + + + Refresh + + + + :/images/view-refresh.png:/images/view-refresh.png + + + true + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + + 0 + 0 + + + + + 230 + 180 + + + + + 230 + 180 + + + + Basic Information + + + + + + JobId: + + + + + + + 2 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Job Name: + + + + + + + Test + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Level: + + + + + + + VirtualFull + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Client: + + + + + + + client-fd + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + FileSet: + + + + + + + TheFileSet + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Pool: + + + + + + + ThePool + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + + + + + 0 + 0 + + + + + 160 + 180 + + + + + 160 + 180 + + + + Status + + + + + + Status: + + + + + + + + + + :/images/T.png + + + false + + + + + + + Errors: + + + + + + + 0 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Files: + + + + + + + 1,924 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Bytes: + + + + + + + 109 MB + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Purged: + + + + + + + false + + + + + + true + + + + + + + + + + + 0 + 0 + + + + + 260 + 180 + + + + + 260 + 180 + + + + Times + + + + + + Sched Time: + + + + + + + 2009-07-31 00:10:00 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Start Time: + + + + + + + 2009-07-31 00:10:00 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + End Time: + + + + + + + 2009-07-31 00:20:00 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Duration: + + + + + + + 00:10:00 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + + + + + 0 + 0 + + + + + 170 + 181 + + + + + 170 + 181 + + + + + 170 + 180 + + + + Volume Used + + + + + + + 0 + 0 + + + + + 149 + 140 + + + + + 149 + 140 + + + + + 149 + 140 + + + + + Vol0001 + + + + :/images/inflag1.png:/images/inflag1.png + + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + true + + + + 500 + 0 + + + + Running Information + + + + + + Speed: + + + + + + + Files Examined: + + + + + + + Current File: + + + + + + + + 250 + 0 + + + + /var/www/bacula/spool + + + + + + + 100,000 + + + + + + + 100 MB/s + + + + + + + false + + + kB/s + + + 100 + + + 200000 + + + 100 + + + 200000 + + + + + + + Bandwidth Limit: + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> +<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'DejaVu Sans'; font-size:10pt;"></p></body></html> + + + + + + + + + + + chk_Bwlimit + clicked(bool) + spin_Bwlimit + setEnabled(bool) + + + 51 + 324 + + + 302 + 328 + + + + + diff --git a/src/qt-console/jobgraphs/jobplot.cpp b/src/qt-console/jobgraphs/jobplot.cpp new file mode 100644 index 00000000..f3a938f7 --- /dev/null +++ b/src/qt-console/jobgraphs/jobplot.cpp @@ -0,0 +1,571 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * JobPlots Class + * + * Dirk Bartley, March 2007 + * + */ + +#include "bat.h" +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "util/comboutil.h" +#include "jobgraphs/jobplot.h" + + +JobPlotPass::JobPlotPass() +{ + use = false; +} + +JobPlotPass& JobPlotPass::operator=(const JobPlotPass &cp) +{ + use = cp.use; + recordLimitCheck = cp.recordLimitCheck; + daysLimitCheck = cp.daysLimitCheck; + recordLimitSpin = cp.recordLimitSpin; + daysLimitSpin = cp.daysLimitSpin; + jobCombo = cp.jobCombo; + clientCombo = cp.clientCombo; + volumeCombo = cp.volumeCombo; + fileSetCombo = cp.fileSetCombo; + purgedCombo = cp.purgedCombo; + levelCombo = cp.levelCombo; + statusCombo = cp.statusCombo; + return *this; +} + +/* + * Constructor for the controls class which inherits QScrollArea and a ui header + */ +JobPlotControls::JobPlotControls() +{ + setupUi(this); +} + +/* + * Constructor, this class does not inherit anything but pages. + */ +JobPlot::JobPlot(QTreeWidgetItem *parentTreeWidgetItem, JobPlotPass &passVals) + : Pages() +{ + setupUserInterface(); + pgInitialize(tr("JobPlot"), parentTreeWidgetItem); + readSplitterSettings(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/applications-graphics.png"))); + m_drawn = false; + + /* this invokes the pass values = operator function */ + m_pass = passVals; + dockPage(); + /* If the values of the controls are predetermined (from joblist), then set + * this class as current window at the front of the stack */ + if (m_pass.use) + setCurrent(); + m_jobPlot->replot(); +} + +/* + * Kill, crush Destroy + */ +JobPlot::~JobPlot() +{ + if (m_drawn) + writeSettings(); + m_pjd.clear(); +} + +/* + * This is called when the page selector has this page selected + */ +void JobPlot::currentStackItem() +{ + if (!m_drawn) { + setupControls(); + reGraph(); + m_drawn=true; + } +} + +/* + * Slot for the refresh push button, also called from constructor. + */ +void JobPlot::reGraph() +{ + /* clear m_pjd */ + m_pjd.clear(); + runQuery(); + m_jobPlot->clear(); + addCurve(); + m_jobPlot->replot(); +} + +/* + * Setup the control widgets for the graph, this are the objects from JobPlotControls + */ +void JobPlot::setupControls() +{ + QStringList graphType = QStringList() << /* tr("Fitted") <<*/ tr("Sticks") + << tr("Lines") << tr("Steps") << tr("None"); + controls->plotTypeCombo->addItems(graphType); + + fillSymbolCombo(controls->fileSymbolTypeCombo); + fillSymbolCombo(controls->byteSymbolTypeCombo); + + readControlSettings(); + + controls->fileCheck->setCheckState(Qt::Checked); + controls->byteCheck->setCheckState(Qt::Checked); + connect(controls->plotTypeCombo, SIGNAL(currentIndexChanged(QString)), this, SLOT(setPlotType(QString))); + connect(controls->fileSymbolTypeCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(setFileSymbolType(int))); + connect(controls->byteSymbolTypeCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(setByteSymbolType(int))); + connect(controls->fileCheck, SIGNAL(stateChanged(int)), this, SLOT(fileCheckChanged(int))); + connect(controls->byteCheck, SIGNAL(stateChanged(int)), this, SLOT(byteCheckChanged(int))); + connect(controls->refreshButton, SIGNAL(pressed()), this, SLOT(reGraph())); + + controls->clientComboBox->addItem(tr("Any")); + controls->clientComboBox->addItems(m_console->client_list); + + QStringList volumeList; + getVolumeList(volumeList); + controls->volumeComboBox->addItem(tr("Any")); + controls->volumeComboBox->addItems(volumeList); + controls->jobComboBox->addItem(tr("Any")); + controls->jobComboBox->addItems(m_console->job_list); + + levelComboFill(controls->levelComboBox); + + boolComboFill(controls->purgedComboBox); + + controls->fileSetComboBox->addItem(tr("Any")); + controls->fileSetComboBox->addItems(m_console->fileset_list); + QStringList statusLongList; + getStatusList(statusLongList); + controls->statusComboBox->addItem(tr("Any")); + controls->statusComboBox->addItems(statusLongList); + + if (m_pass.use) { + controls->limitCheckBox->setCheckState(m_pass.recordLimitCheck); + controls->limitSpinBox->setValue(m_pass.recordLimitSpin); + controls->daysCheckBox->setCheckState(m_pass.daysLimitCheck); + controls->daysSpinBox->setValue(m_pass.daysLimitSpin); + + comboSel(controls->jobComboBox, m_pass.jobCombo); + comboSel(controls->clientComboBox, m_pass.clientCombo); + comboSel(controls->volumeComboBox, m_pass.volumeCombo); + comboSel(controls->fileSetComboBox, m_pass.fileSetCombo); + comboSel(controls->purgedComboBox, m_pass.purgedCombo); + comboSel(controls->levelComboBox, m_pass.levelCombo); + comboSel(controls->statusComboBox, m_pass.statusCombo); + + } else { + /* Set Defaults for check and spin for limits */ + controls->limitCheckBox->setCheckState(mainWin->m_recordLimitCheck ? Qt::Checked : Qt::Unchecked); + controls->limitSpinBox->setValue(mainWin->m_recordLimitVal); + controls->daysCheckBox->setCheckState(mainWin->m_daysLimitCheck ? Qt::Checked : Qt::Unchecked); + controls->daysSpinBox->setValue(mainWin->m_daysLimitVal); + } +} + +/* + * Setup the control widgets for the graph, this are the objects from JobPlotControls + */ +void JobPlot::runQuery() +{ + /* Set up query */ + QString query(""); + query += "SELECT DISTINCT " + " Job.Starttime AS JobStart," + " Job.Jobfiles AS FileCount," + " Job.JobBytes AS Bytes," + " Job.JobId AS JobId" + " FROM Job" + " JOIN Client ON (Client.ClientId=Job.ClientId)" + " JOIN Status ON (Job.JobStatus=Status.JobStatus)" + " LEFT OUTER JOIN FileSet ON (FileSet.FileSetId=Job.FileSetId)"; + + QStringList conditions; + comboCond(conditions, controls->jobComboBox, "Job.Name"); + comboCond(conditions, controls->clientComboBox, "Client.Name"); + int volumeIndex = controls->volumeComboBox->currentIndex(); + if ((volumeIndex != -1) && (controls->volumeComboBox->itemText(volumeIndex) != tr("Any"))) { + query += " LEFT OUTER JOIN JobMedia ON (JobMedia.JobId=Job.JobId)" + " LEFT OUTER JOIN Media ON (JobMedia.MediaId=Media.MediaId)"; + conditions.append("Media.VolumeName='" + controls->volumeComboBox->itemText(volumeIndex) + "'"); + } + comboCond(conditions, controls->fileSetComboBox, "FileSet.FileSet"); + boolComboCond(conditions, controls->purgedComboBox, "Job.PurgedFiles"); + levelComboCond(conditions, controls->levelComboBox, "Job.Level"); + comboCond(conditions, controls->statusComboBox, "Status.JobStatusLong"); + + /* If Limit check box For limit by days is checked */ + if (controls->daysCheckBox->checkState() == Qt::Checked) { + QDateTime stamp = QDateTime::currentDateTime().addDays(-controls->daysSpinBox->value()); + QString since = stamp.toString(Qt::ISODate); + conditions.append("Job.Starttime>'" + since + "'"); + } + bool first = true; + foreach (QString condition, conditions) { + if (first) { + query += " WHERE " + condition; + first = false; + } else { + query += " AND " + condition; + } + } + /* Descending */ + query += " ORDER BY Job.Starttime DESC, Job.JobId DESC"; + /* If Limit check box for limit records returned is checked */ + if (controls->limitCheckBox->checkState() == Qt::Checked) { + QString limit; + limit.setNum(controls->limitSpinBox->value()); + query += " LIMIT " + limit; + } + + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Query cmd : %s\n",query.toUtf8().data()); + } + QString resultline; + QStringList results; + if (m_console->sql_cmd(query, results)) { + + QString field; + QStringList fieldlist; + + int row = 0; + /* Iterate through the record returned from the query */ + foreach (resultline, results) { + PlotJobData *plotJobData = new PlotJobData(); + fieldlist = resultline.split("\t"); + int column = 0; + QString statusCode(""); + /* Iterate through fields in the record */ + foreach (field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + if (column == 0) { + plotJobData->dt = QDateTime::fromString(field, mainWin->m_dtformat); + } else if (column == 1) { + plotJobData->files = field.toDouble(); + } else if (column == 2) { + plotJobData->bytes = field.toDouble(); + } + column++; + m_pjd.prepend(plotJobData); + } + row++; + } + } + if ((controls->volumeComboBox->itemText(volumeIndex) != tr("Any")) && (results.count() == 0)){ + /* for context sensitive searches, let the user know if there were no + * * results */ + QMessageBox::warning(this, "Bat", + tr("The Jobs query returned no results.\n" + "Press OK to continue?"), QMessageBox::Ok ); + } +} + +/* + * The user interface that used to be in the ui header. I wanted to have a + * scroll area which is not in designer. + */ +void JobPlot::setupUserInterface() +{ + QSizePolicy sizePolicy(static_cast(1), static_cast(5)); + sizePolicy.setHorizontalStretch(0); + sizePolicy.setVerticalStretch(0); + sizePolicy.setVerticalStretch(0); + sizePolicy.setVerticalPolicy(QSizePolicy::Ignored); + sizePolicy.setHorizontalPolicy(QSizePolicy::Ignored); + m_gridLayout = new QGridLayout(this); + m_gridLayout->setSpacing(6); + m_gridLayout->setMargin(9); + m_gridLayout->setObjectName(QString::fromUtf8("m_gridLayout")); + m_splitter = new QSplitter(this); + m_splitter->setObjectName(QString::fromUtf8("m_splitter")); + m_splitter->setOrientation(Qt::Horizontal); + m_jobPlot = new QwtPlot(m_splitter); + m_jobPlot->setObjectName(QString::fromUtf8("m_jobPlot")); + m_jobPlot->setSizePolicy(sizePolicy); + m_jobPlot->setMinimumSize(QSize(0, 0)); + QScrollArea *area = new QScrollArea(m_splitter); + area->setObjectName(QString::fromUtf8("area")); + controls = new JobPlotControls(); + area->setWidget(controls); + + m_splitter->addWidget(m_jobPlot); + m_splitter->addWidget(area); + + m_gridLayout->addWidget(m_splitter, 0, 0, 1, 1); +} + +/* + * Add the curves to the plot + */ +void JobPlot::addCurve() +{ + m_jobPlot->setTitle(tr("Files and Bytes backed up")); + m_jobPlot->insertLegend(new QwtLegend(), QwtPlot::RightLegend); + + // Set axis titles + m_jobPlot->enableAxis(QwtPlot::yRight); + m_jobPlot->setAxisTitle(QwtPlot::yRight, tr("<-- Bytes Kb")); + m_jobPlot->setAxisTitle(m_jobPlot->xBottom, tr("date of backup -->")); + m_jobPlot->setAxisTitle(m_jobPlot->yLeft, tr("Number of Files -->")); + m_jobPlot->setAxisScaleDraw(QwtPlot::xBottom, new DateTimeScaleDraw()); + + // Insert new curves + m_fileCurve = new QwtPlotCurve( tr("Files") ); + m_fileCurve->setPen(QPen(Qt::red)); + m_fileCurve->setCurveType(m_fileCurve->Yfx); + m_fileCurve->setYAxis(QwtPlot::yLeft); + + m_byteCurve = new QwtPlotCurve(tr("Bytes")); + m_byteCurve->setPen(QPen(Qt::blue)); + m_byteCurve->setCurveType(m_byteCurve->Yfx); + m_byteCurve->setYAxis(QwtPlot::yRight); + setPlotType(controls->plotTypeCombo->currentText()); + setFileSymbolType(controls->fileSymbolTypeCombo->currentIndex()); + setByteSymbolType(controls->byteSymbolTypeCombo->currentIndex()); + + m_fileCurve->attach(m_jobPlot); + m_byteCurve->attach(m_jobPlot); + + // attach data + int size = m_pjd.count(); + int j = 0; +#if defined(__GNU_C) + double tval[size]; + double fval[size]; + double bval[size]; +#else + double *tval; + double *fval; + double *bval; + + tval = (double *)malloc(size * sizeof(double)); + fval = (double *)malloc(size * sizeof(double)); + bval = (double *)malloc(size * sizeof(double)); +#endif + + foreach (PlotJobData* plotJobData, m_pjd) { +// printf("%.0f %.0f %s\n", plotJobData->bytes, plotJobData->files, +// plotJobData->dt.toString(mainWin->m_dtformat).toUtf8().data()); + fval[j] = plotJobData->files; + bval[j] = plotJobData->bytes / 1024; + tval[j] = plotJobData->dt.toTime_t(); +// printf("%i %.0f %.0f %.0f\n", j, tval[j], fval[j], bval[j]); + j++; + } + m_fileCurve->setData(tval,fval,size); + m_byteCurve->setData(tval,bval,size); + + for (int year=2000; year<2010; year++) { + for (int month=1; month<=12; month++) { + QString monthBegin; + if (month > 9) { + QTextStream(&monthBegin) << year << "-" << month << "-01 00:00:00"; + } else { + QTextStream(&monthBegin) << year << "-0" << month << "-01 00:00:00"; + } + QDateTime mdt = QDateTime::fromString(monthBegin, mainWin->m_dtformat); + double monbeg = mdt.toTime_t(); + + // ...a vertical line at the first of each month + QwtPlotMarker *mX = new QwtPlotMarker(); + mX->setLabel(mdt.toString("MMM-d")); + mX->setLabelAlignment(Qt::AlignRight|Qt::AlignTop); + mX->setLineStyle(QwtPlotMarker::VLine); + QPen pen(Qt::darkGray); + pen.setStyle(Qt::DashDotDotLine); + mX->setLinePen(pen); + mX->setXValue(monbeg); + mX->attach(m_jobPlot); + } + } + +#if !defined(__GNU_C) + free(tval); + free(fval); + free(bval); +#endif +} + +/* + * slot to respond to the plot type combo changing + */ +void JobPlot::setPlotType(QString currentText) +{ + QwtPlotCurve::CurveStyle style = QwtPlotCurve::NoCurve; + if (currentText == tr("Fitted")) { + style = QwtPlotCurve::Lines; + m_fileCurve->setCurveAttribute(QwtPlotCurve::Fitted); + m_byteCurve->setCurveAttribute(QwtPlotCurve::Fitted); + } else if (currentText == tr("Sticks")) { + style = QwtPlotCurve::Sticks; + } else if (currentText == tr("Lines")) { + style = QwtPlotCurve::Lines; + m_fileCurve->setCurveAttribute(QwtPlotCurve::Fitted); + m_byteCurve->setCurveAttribute(QwtPlotCurve::Fitted); + } else if (currentText == tr("Steps")) { + style = QwtPlotCurve::Steps; + } else if (currentText == tr("None")) { + style = QwtPlotCurve::NoCurve; + } + m_fileCurve->setStyle(style); + m_byteCurve->setStyle(style); + m_jobPlot->replot(); +} + +void JobPlot::fillSymbolCombo(QComboBox *q) +{ + q->addItem( tr("Ellipse"), (int)QwtSymbol::Ellipse); + q->addItem( tr("Rect"), (int)QwtSymbol::Rect); + q->addItem( tr("Diamond"), (int)QwtSymbol::Diamond); + q->addItem( tr("Triangle"), (int)QwtSymbol::Triangle); + q->addItem( tr("DTrianle"), (int)QwtSymbol::DTriangle); + q->addItem( tr("UTriangle"), (int)QwtSymbol::UTriangle); + q->addItem( tr("LTriangle"), (int)QwtSymbol::LTriangle); + q->addItem( tr("RTriangle"), (int)QwtSymbol::RTriangle); + q->addItem( tr("Cross"), (int)QwtSymbol::Cross); + q->addItem( tr("XCross"), (int)QwtSymbol::XCross); + q->addItem( tr("HLine"), (int)QwtSymbol::HLine); + q->addItem( tr("Vline"), (int)QwtSymbol::VLine); + q->addItem( tr("Star1"), (int)QwtSymbol::Star1); + q->addItem( tr("Star2"), (int)QwtSymbol::Star2); + q->addItem( tr("Hexagon"), (int)QwtSymbol::Hexagon); + q->addItem( tr("None"), (int)QwtSymbol::NoSymbol); +} + + +/* + * slot to respond to the symbol type combo changing + */ +void JobPlot::setFileSymbolType(int index) +{ + setSymbolType(index, 0); +} + +void JobPlot::setByteSymbolType(int index) +{ + setSymbolType(index, 1); +} +void JobPlot::setSymbolType(int index, int type) +{ + QwtSymbol sym; + sym.setPen(QColor(Qt::black)); + sym.setSize(7); + + QVariant style; + if (0 == type) { + style = controls->fileSymbolTypeCombo->itemData(index); + sym.setStyle( (QwtSymbol::Style)style.toInt() ); + sym.setBrush(QColor(Qt::yellow)); + m_fileCurve->setSymbol(sym); + + } else { + style = controls->byteSymbolTypeCombo->itemData(index); + sym.setStyle( (QwtSymbol::Style)style.toInt() ); + sym.setBrush(QColor(Qt::blue)); + m_byteCurve->setSymbol(sym); + + } + m_jobPlot->replot(); +} + +/* + * slot to respond to the file check box changing state + */ +void JobPlot::fileCheckChanged(int newstate) +{ + if (newstate == Qt::Unchecked) { + m_fileCurve->detach(); + m_jobPlot->enableAxis(QwtPlot::yLeft, false); + } else { + m_fileCurve->attach(m_jobPlot); + m_jobPlot->enableAxis(QwtPlot::yLeft); + } + m_jobPlot->replot(); +} + +/* + * slot to respond to the byte check box changing state + */ +void JobPlot::byteCheckChanged(int newstate) +{ + if (newstate == Qt::Unchecked) { + m_byteCurve->detach(); + m_jobPlot->enableAxis(QwtPlot::yRight, false); + } else { + m_byteCurve->attach(m_jobPlot); + m_jobPlot->enableAxis(QwtPlot::yRight); + } + m_jobPlot->replot(); +} + +/* + * Save user settings associated with this page + */ +void JobPlot::writeSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("JobPlot"); + settings.setValue("m_splitterSizes", m_splitter->saveState()); + settings.setValue("fileSymbolTypeCombo", controls->fileSymbolTypeCombo->currentText()); + settings.setValue("byteSymbolTypeCombo", controls->byteSymbolTypeCombo->currentText()); + settings.setValue("plotTypeCombo", controls->plotTypeCombo->currentText()); + settings.endGroup(); +} + +/* + * Read settings values for Controls + */ +void JobPlot::readControlSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("JobPlot"); + int fileSymbolTypeIndex = controls->fileSymbolTypeCombo->findText(settings.value("fileSymbolTypeCombo").toString(), Qt::MatchExactly); + if (fileSymbolTypeIndex == -1) fileSymbolTypeIndex = 2; + controls->fileSymbolTypeCombo->setCurrentIndex(fileSymbolTypeIndex); + int byteSymbolTypeIndex = controls->byteSymbolTypeCombo->findText(settings.value("byteSymbolTypeCombo").toString(), Qt::MatchExactly); + if (byteSymbolTypeIndex == -1) byteSymbolTypeIndex = 3; + controls->byteSymbolTypeCombo->setCurrentIndex(byteSymbolTypeIndex); + int plotTypeIndex = controls->plotTypeCombo->findText(settings.value("plotTypeCombo").toString(), Qt::MatchExactly); + if (plotTypeIndex == -1) plotTypeIndex = 2; + controls->plotTypeCombo->setCurrentIndex(plotTypeIndex); + settings.endGroup(); +} + +/* + * Read and restore user settings associated with this page + */ +void JobPlot::readSplitterSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("JobPlot"); + if (settings.contains("m_splitterSizes")) { + m_splitter->restoreState(settings.value("m_splitterSizes").toByteArray()); + } + settings.endGroup(); +} diff --git a/src/qt-console/jobgraphs/jobplot.h b/src/qt-console/jobgraphs/jobplot.h new file mode 100644 index 00000000..b6bea40d --- /dev/null +++ b/src/qt-console/jobgraphs/jobplot.h @@ -0,0 +1,145 @@ +#ifndef _JOBPLOT_H_ +#define _JOBPLOT_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "pages.h" +#include "ui_jobplotcontrols.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Structure to hold data items of jobs when and how much. + * If I worked at it I could eliminate this. It's just the way it evolved. + */ +struct PlotJobData +{ + double files; + double bytes; + QDateTime dt; +}; + +/* + * Class for the purpose of having a single object to pass data to the JobPlot + * Constructor. The other option was a constructor with this many passed + * values or some sort of code to parse a list. I liked this best at the time. + */ +class JobPlotPass +{ +public: + JobPlotPass(); + JobPlotPass& operator=(const JobPlotPass&); + bool use; + Qt::CheckState recordLimitCheck; + Qt::CheckState daysLimitCheck; + int recordLimitSpin; + int daysLimitSpin; + QString jobCombo; + QString clientCombo; + QString volumeCombo; + QString fileSetCombo; + QString purgedCombo; + QString levelCombo; + QString statusCombo; +}; + +/* + * Class to Change the display of the time scale to display dates. + */ +class DateTimeScaleDraw : public QwtScaleDraw +{ +public: + virtual QwtText label(double v) const + { + QDateTime dtlabel(QDateTime::fromTime_t((uint)v)); + return dtlabel.toString("M-d-yy"); + } +}; + +/* + * These are the user interface control widgets as a separate class. + * Separately for the purpos of having the controls in a Scroll Area. + */ +class JobPlotControls : public QWidget, public Ui::JobPlotControlsForm +{ + Q_OBJECT + +public: + JobPlotControls(); +}; + +/* + * The main class + */ +class JobPlot : public Pages +{ + Q_OBJECT + +public: + JobPlot(QTreeWidgetItem *parentTreeWidgetItem, JobPlotPass &); + ~JobPlot(); + virtual void currentStackItem(); + +private slots: + void setPlotType(QString); + void setFileSymbolType(int); + void setByteSymbolType(int); + void fileCheckChanged(int); + void byteCheckChanged(int); + void reGraph(); + +private: + void fillSymbolCombo(QComboBox *q); + void setSymbolType(int, int type); + void addCurve(); + void writeSettings(); + void readSplitterSettings(); + void readControlSettings(); + void setupControls(); + void runQuery(); + bool m_drawn; + JobPlotPass m_pass; + JobPlotControls* controls; + QList m_pjd; + QwtPlotCurve *m_fileCurve; + QwtPlotCurve *m_byteCurve; + /* from the user interface before using scroll area */ + void setupUserInterface(); + QGridLayout *m_gridLayout; + QSplitter *m_splitter; + QwtPlot *m_jobPlot; +}; + +#endif /* _JOBPLOT_H_ */ diff --git a/src/qt-console/jobgraphs/jobplotcontrols.ui b/src/qt-console/jobgraphs/jobplotcontrols.ui new file mode 100644 index 00000000..135511d8 --- /dev/null +++ b/src/qt-console/jobgraphs/jobplotcontrols.ui @@ -0,0 +1,337 @@ + + JobPlotControlsForm + + + + 0 + 0 + 308 + 535 + + + + + 5 + 0 + 0 + 0 + + + + + 0 + 0 + + + + Form + + + + 9 + + + 6 + + + + + Qt::Vertical + + + + 236 + 16 + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 21 + 20 + + + + + + + + 0 + + + 6 + + + + + File Data + + + + + + + Byte Data + + + + + + + + + Qt::Horizontal + + + + 21 + 20 + + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + 4 + 0 + 0 + 0 + + + + + 82 + 30 + + + + Refresh + + + :/images/view-refresh.png + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 5 + 0 + 0 + 0 + + + + 7 + + + + + + + + 5 + 0 + 0 + 0 + + + + 10000 + + + 1 + + + 25 + + + + + + + + + + + + + Status + + + + + + + Level + + + + + + + Purged + + + + + + + + 16777215 + 20 + + + + FileSet + + + + + + + Volume + + + + + + + Client + + + + + + + Job + + + + + + + Days Limit + + + + + + + + 0 + 0 + 0 + 0 + + + + Record Limit + + + + + + + Byte Symbol Type + + + + + + + File Symbol Type + + + + + + + Graph Type + + + + + + + + diff --git a/src/qt-console/joblist/joblist.cpp b/src/qt-console/joblist/joblist.cpp new file mode 100644 index 00000000..4440722a --- /dev/null +++ b/src/qt-console/joblist/joblist.cpp @@ -0,0 +1,728 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#include "bat.h" +#include +#include +#include "joblist.h" +#include "restore.h" +#include "job/job.h" +#include "joblog/joblog.h" +#ifdef HAVE_QWT +#include "jobgraphs/jobplot.h" +#endif +#include "util/fmtwidgetitem.h" +#include "util/comboutil.h" + +/* + * Constructor for the class + */ +JobList::JobList(const QString &mediaName, const QString &clientName, + const QString &jobName, const QString &filesetName, QTreeWidgetItem *parentTreeWidgetItem) + : Pages() +{ + setupUi(this); + m_name = "Jobs Run"; /* treeWidgetName has a virtual override in this class */ + m_mediaName = mediaName; + m_clientName = clientName; + m_jobName = jobName; + m_filesetName = filesetName; + pgInitialize("", parentTreeWidgetItem); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/emblem-system.png"))); + + m_resultCount = 0; + m_populated = false; + m_closeable = false; + if ((m_mediaName != "") || (m_clientName != "") || (m_jobName != "") || (m_filesetName != "")) { + m_closeable=true; + } + m_checkCurrentWidget = true; + + /* Set Defaults for check and spin for limits */ + limitCheckBox->setCheckState(mainWin->m_recordLimitCheck ? Qt::Checked : Qt::Unchecked); + limitSpinBox->setValue(mainWin->m_recordLimitVal); + daysCheckBox->setCheckState(mainWin->m_daysLimitCheck ? Qt::Checked : Qt::Unchecked); + daysSpinBox->setValue(mainWin->m_daysLimitVal); + + QGridLayout *gridLayout = new QGridLayout(this); + gridLayout->setSpacing(6); + gridLayout->setMargin(9); + gridLayout->setObjectName(QString::fromUtf8("gridLayout")); + + m_splitter = new QSplitter(Qt::Vertical, this); + QScrollArea *area = new QScrollArea(); + area->setObjectName(QString::fromUtf8("area")); + area->setWidget(frame); + area->setWidgetResizable(true); + m_splitter->addWidget(area); + m_splitter->addWidget(mp_tableWidget); + + gridLayout->addWidget(m_splitter, 0, 0, 1, 1); + createConnections(); + readSettings(); + if (m_closeable) { dockPage(); } +} + +/* + * Write the m_splitter settings in the destructor + */ +JobList::~JobList() +{ + writeSettings(); +} + +/* + * The Meat of the class. + * This function will populate the QTableWidget, mp_tablewidget, with + * QTableWidgetItems representing the results of a query for what jobs exist on + * the media name passed from the constructor stored in m_mediaName. + */ +void JobList::populateTable() +{ + /* Can't do this in constructor because not neccesarily conected in constructor */ + prepareFilterWidgets(); + m_populated = true; + + Freeze frz(*mp_tableWidget); /* disable updating*/ + + /* Set up query */ + QString query; + fillQueryString(query); + + /* Set up the Header for the table */ + QStringList headerlist = (QStringList() + << tr("Job Id") << tr("Job Name") << tr("Client") << tr("Job Starttime") + << tr("Job Type") << tr("Job Level") << tr("Job Files") + << tr("Job Bytes") << tr("Job Status") << tr("Purged") << tr("File Set") + << tr("Pool Name") << tr("First Volume") << tr("VolCount")); + + m_jobIdIndex = headerlist.indexOf(tr("Job Id")); + m_purgedIndex = headerlist.indexOf(tr("Purged")); + m_typeIndex = headerlist.indexOf(tr("Job Type")); + m_statusIndex = headerlist.indexOf(tr("Job Status")); + m_startIndex = headerlist.indexOf(tr("Job Starttime")); + m_filesIndex = headerlist.indexOf(tr("Job Files")); + m_bytesIndex = headerlist.indexOf(tr("Job Bytes")); + m_levelIndex = headerlist.indexOf(tr("Job Level")); + m_nameIndex = headerlist.indexOf(tr("Job Name")); + m_filesetIndex = headerlist.indexOf(tr("File Set")); + m_clientIndex = headerlist.indexOf(tr("Client")); + + /* Initialize the QTableWidget */ + m_checkCurrentWidget = false; + mp_tableWidget->clear(); + m_checkCurrentWidget = true; + mp_tableWidget->setColumnCount(headerlist.size()); + mp_tableWidget->setHorizontalHeaderLabels(headerlist); + mp_tableWidget->horizontalHeader()->setHighlightSections(false); + mp_tableWidget->setSelectionBehavior(QAbstractItemView::SelectRows); + mp_tableWidget->setSortingEnabled(false); /* rows move on insert if sorting enabled */ + + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Query cmd : %s\n",query.toUtf8().data()); + } + + QStringList results; + if (m_console->sql_cmd(query, results)) { + m_resultCount = results.count(); + + QStringList fieldlist; + mp_tableWidget->setRowCount(results.size()); + + int row = 0; + /* Iterate through the record returned from the query */ + QString resultline; + foreach (resultline, results) { + fieldlist = resultline.split("\t"); + if (fieldlist.size() < 13) + continue; /* some fields missing, ignore row */ + + TableItemFormatter jobitem(*mp_tableWidget, row); + + /* Iterate through fields in the record */ + QStringListIterator fld(fieldlist); + int col = 0; + + /* job id */ + jobitem.setNumericFld(col++, fld.next()); + + /* job name */ + jobitem.setTextFld(col++, fld.next()); + + /* client */ + jobitem.setTextFld(col++, fld.next()); + + /* job starttime */ + jobitem.setTextFld(col++, fld.next(), true); + + /* job type */ + jobitem.setJobTypeFld(col++, fld.next()); + + /* job level */ + jobitem.setJobLevelFld(col++, fld.next()); + + /* job files */ + jobitem.setNumericFld(col++, fld.next()); + + /* job bytes */ + jobitem.setBytesFld(col++, fld.next()); + + /* job status */ + jobitem.setJobStatusFld(col++, fld.next()); + + /* purged */ + jobitem.setBoolFld(col++, fld.next()); + + /* fileset */ + jobitem.setTextFld(col++, fld.next()); + + /* pool name */ + jobitem.setTextFld(col++, fld.next()); + + /* First Media */ + jobitem.setTextFld(col++, fld.next()); + + /* Medias count */ + jobitem.setNumericFld(col++, fld.next()); + row++; + } + } + /* set default sorting */ + mp_tableWidget->sortByColumn(m_jobIdIndex, Qt::DescendingOrder); + mp_tableWidget->setSortingEnabled(true); + + /* Resize the columns */ + mp_tableWidget->resizeColumnsToContents(); + mp_tableWidget->resizeRowsToContents(); + mp_tableWidget->verticalHeader()->hide(); + if ((m_mediaName != tr("Any")) && (m_resultCount == 0)){ + /* for context sensitive searches, let the user know if there were no + * results */ + QMessageBox::warning(this, "Bat", + tr("The Jobs query returned no results.\n" + "Press OK to continue?"), QMessageBox::Ok ); + } + + /* make read only */ + mp_tableWidget->setEditTriggers(QAbstractItemView::NoEditTriggers); +} + +void JobList::prepareFilterWidgets() +{ + if (!m_populated) { + clientComboBox->addItem(tr("Any")); + clientComboBox->addItems(m_console->client_list); + comboSel(clientComboBox, m_clientName); + + QStringList volumeList; + getVolumeList(volumeList); + volumeComboBox->addItem(tr("Any")); + volumeComboBox->addItems(volumeList); + comboSel(volumeComboBox, m_mediaName); + + jobComboBox->addItem(tr("Any")); + jobComboBox->addItems(m_console->job_list); + comboSel(jobComboBox, m_jobName); + + levelComboFill(levelComboBox); + + boolComboFill(purgedComboBox); + + fileSetComboBox->addItem(tr("Any")); + fileSetComboBox->addItems(m_console->fileset_list); + comboSel(fileSetComboBox, m_filesetName); + + poolComboBox->addItem(tr("Any")); + poolComboBox->addItems(m_console->pool_list); + + jobStatusComboFill(statusComboBox); + } +} + +void JobList::fillQueryString(QString &query) +{ + query = ""; + int volumeIndex = volumeComboBox->currentIndex(); + if (volumeIndex != -1) + m_mediaName = volumeComboBox->itemText(volumeIndex); + QString distinct = ""; + if (m_mediaName != tr("Any")) { distinct = "DISTINCT "; } + query += "SELECT " + distinct + "Job.JobId AS JobId, Job.Name AS JobName, " + " Client.Name AS Client," + " Job.Starttime AS JobStart, Job.Type AS JobType," + " Job.Level AS BackupLevel, Job.Jobfiles AS FileCount," + " Job.JobBytes AS Bytes, Job.JobStatus AS Status," + " Job.PurgedFiles AS Purged, FileSet.FileSet," + " Pool.Name AS Pool," + " (SELECT Media.VolumeName FROM JobMedia JOIN Media ON JobMedia.MediaId=Media.MediaId WHERE JobMedia.JobId=Job.JobId ORDER BY JobMediaId LIMIT 1) AS FirstVolume," + " (SELECT count(DISTINCT MediaId) FROM JobMedia WHERE JobMedia.JobId=Job.JobId) AS Volumes" + " FROM Job" + " JOIN Client ON (Client.ClientId=Job.ClientId)" + " LEFT OUTER JOIN FileSet ON (FileSet.FileSetId=Job.FileSetId) " + " LEFT OUTER JOIN Pool ON Job.PoolId = Pool.PoolId "; + QStringList conditions; + if (m_mediaName != tr("Any")) { + query += " LEFT OUTER JOIN JobMedia ON (JobMedia.JobId=Job.JobId) " + " LEFT OUTER JOIN Media ON (JobMedia.MediaId=Media.MediaId) "; + conditions.append("Media.VolumeName='" + m_mediaName + "'"); + } + + comboCond(conditions, clientComboBox, "Client.Name"); + comboCond(conditions, jobComboBox, "Job.Name"); + levelComboCond(conditions, levelComboBox, "Job.Level"); + jobStatusComboCond(conditions, statusComboBox, "Job.JobStatus"); + boolComboCond(conditions, purgedComboBox, "Job.PurgedFiles"); + comboCond(conditions, fileSetComboBox, "FileSet.FileSet"); + comboCond(conditions, poolComboBox, "Pool.Name"); + + /* If Limit check box For limit by days is checked */ + if (daysCheckBox->checkState() == Qt::Checked) { + QDateTime stamp = QDateTime::currentDateTime().addDays(-daysSpinBox->value()); + QString since = stamp.toString(Qt::ISODate); + conditions.append("Job.Starttime > '" + since + "'"); + } + if (filterCopyCheckBox->checkState() == Qt::Checked) { + conditions.append("Job.Type != 'c'" ); + } + if (filterMigrationCheckBox->checkState() == Qt::Checked) { + conditions.append("Job.Type != 'g'" ); + } + bool first = true; + foreach (QString condition, conditions) { + if (first) { + query += " WHERE " + condition; + first = false; + } else { + query += " AND " + condition; + } + } + /* Descending */ + query += " ORDER BY Job.JobId DESC"; + /* If Limit check box for limit records returned is checked */ + if (limitCheckBox->checkState() == Qt::Checked) { + QString limit; + limit.setNum(limitSpinBox->value()); + query += " LIMIT " + limit; + } +} + +/* + * When the treeWidgetItem in the page selector tree is singleclicked, Make sure + * The tree has been populated. + */ +void JobList::PgSeltreeWidgetClicked() +{ + if (!m_populated) { + populateTable(); + /* Lets make sure the splitter is not all the way to size index 0 == 0 */ + QList sizes = m_splitter->sizes(); + if (sizes[0] == 0) { + int frameMax = frame->maximumHeight(); + int sizeSum = 0; + foreach(int size, sizes) { sizeSum += size; } + int tabHeight = mainWin->tabWidget->geometry().height(); + sizes[0] = frameMax; + sizes[1] = tabHeight - frameMax; + m_splitter->setSizes(sizes); + } + } + if (!isOnceDocked()) { + dockPage(); + } +} + +/* + * Virtual function override of pages function which is called when this page + * is visible on the stack + */ +void JobList::currentStackItem() +{ +/* if (!m_populated) populate every time user comes back to this object */ + populateTable(); +} + +/* + * Virtual Function to return the name for the medialist tree widget + */ +void JobList::treeWidgetName(QString &desc) +{ + if (m_mediaName != "" ) { + desc = tr("Jobs Run on Volume %1").arg(m_mediaName); + } else if (m_clientName != "" ) { + desc = tr("Jobs Run from Client %1").arg(m_clientName); + } else if (m_jobName != "" ) { + desc = tr("Jobs Run of Job %1").arg(m_jobName); + } else if (m_filesetName != "" ) { + desc = tr("Jobs Run with fileset %1").arg(m_filesetName); + } else { + desc = tr("Jobs Run"); + } +} + +/* + * Function to create connections for context sensitive menu for this and + * the page selector + */ +void JobList::createConnections() +{ + /* connect to the action specific to this pages class that shows up in the + * page selector tree */ + connect(actionRefreshJobList, SIGNAL(triggered()), this, SLOT(populateTable())); + connect(refreshButton, SIGNAL(pressed()), this, SLOT(populateTable())); +#ifdef HAVE_QWT + connect(graphButton, SIGNAL(pressed()), this, SLOT(graphTable())); +#else + graphButton->setEnabled(false); + graphButton->setVisible(false); +#endif + /* for the selectionChanged to maintain m_currentJob and a delete selection */ + connect(mp_tableWidget, SIGNAL(itemSelectionChanged()), this, SLOT(selectionChanged())); + connect(mp_tableWidget, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), this, SLOT(showInfoForJob())); + + /* Do what is required for the local context sensitive menu */ + + + /* setContextMenuPolicy is required */ + mp_tableWidget->setContextMenuPolicy(Qt::ActionsContextMenu); + + connect(actionListFilesOnJob, SIGNAL(triggered()), this, SLOT(consoleListFilesOnJob())); + connect(actionListJobMedia, SIGNAL(triggered()), this, SLOT(consoleListJobMedia())); + connect(actionDeleteJob, SIGNAL(triggered()), this, SLOT(consoleDeleteJob())); + connect(actionRestartJob, SIGNAL(triggered()), this, SLOT(consoleRestartJob())); + connect(actionPurgeFiles, SIGNAL(triggered()), this, SLOT(consolePurgeFiles())); + connect(actionRestoreFromJob, SIGNAL(triggered()), this, SLOT(preRestoreFromJob())); + connect(actionRestoreFromTime, SIGNAL(triggered()), this, SLOT(preRestoreFromTime())); + connect(actionShowLogForJob, SIGNAL(triggered()), this, SLOT(showLogForJob())); + connect(actionShowInfoForJob, SIGNAL(triggered()), this, SLOT(showInfoForJob())); + connect(actionCancelJob, SIGNAL(triggered()), this, SLOT(consoleCancelJob())); + connect(actionListJobTotals, SIGNAL(triggered()), this, SLOT(consoleListJobTotals())); + connect(m_splitter, SIGNAL(splitterMoved(int, int)), this, SLOT(splitterMoved(int, int))); + + m_contextActions.append(actionRefreshJobList); + m_contextActions.append(actionListJobTotals); +} + +/* + * Functions to respond to local context sensitive menu sending console commands + * If I could figure out how to make these one function passing a string, Yaaaaaa + */ +void JobList::consoleListFilesOnJob() +{ + QString cmd("list files jobid="); + cmd += m_currentJob; + if (mainWin->m_longList) { cmd.prepend("l"); } + consoleCommand(cmd); +} +void JobList::consoleListJobMedia() +{ + QString cmd("list jobmedia jobid="); + cmd += m_currentJob; + if (mainWin->m_longList) { cmd.prepend("l"); } + consoleCommand(cmd); +} + +void JobList::consoleListJobTotals() +{ + QString cmd("list jobtotals"); + if (mainWin->m_longList) { cmd.prepend("l"); } + consoleCommand(cmd); +} + +void JobList::consoleDeleteJob() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to delete?? !!!.\n" +"This delete command is used to delete a Job record and all associated catalog" +" records that were created. This command operates only on the Catalog" +" database and has no effect on the actual data written to a Volume. This" +" command can be dangerous and we strongly recommend that you do not use" +" it unless you know what you are doing. The Job and all its associated" +" records (File and JobMedia) will be deleted from the catalog." + "Press OK to proceed with delete operation.?"), + QMessageBox::Ok | QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + QString cmd("delete job jobid="); + cmd += m_selectedJobs; + consoleCommand(cmd, false); + populateTable(); +} + +void JobList::consoleRestartJob() +{ + QString cmd; + + cmd = tr("run job=\"%1\" client=\"%2\" level=%3").arg(m_jobName).arg(m_clientName).arg(m_levelName); + if (m_filesetName != "" && m_filesetName != "*None*") { + cmd += tr(" fileset=\"%1\"").arg(m_filesetName); + } + + if (mainWin->m_commandDebug) Pmsg1(000, "Run cmd : %s\n",cmd.toUtf8().data()); + consoleCommand(cmd, false); + populateTable(); +} + + + +void JobList::consolePurgeFiles() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to purge ?? !!!.\n" +"The Purge command will delete associated Catalog database records from Jobs and" +" Volumes without considering the retention period. Purge works only on the" +" Catalog database and does not affect data written to Volumes. This command can" +" be dangerous because you can delete catalog records associated with current" +" backups of files, and we recommend that you do not use it unless you know what" +" you are doing.\n" + "Press OK to proceed with the purge operation?"), + QMessageBox::Ok | QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + m_console->m_warningPrevent = true; + foreach(QString job, m_selectedJobsList) { + QString cmd("purge files jobid="); + cmd += job; + consoleCommand(cmd, false); + } + m_console->m_warningPrevent = false; + populateTable(); +} + +/* + * Subroutine to call preRestore to restore from a select job + */ +void JobList::preRestoreFromJob() +{ + new prerestorePage(m_currentJob, R_JOBIDLIST); +} + +/* + * Subroutine to call preRestore to restore from a select job + */ +void JobList::preRestoreFromTime() +{ + new prerestorePage(m_currentJob, R_JOBDATETIME); +} + +/* + * Subroutine to call class to show the log in the database from that job + */ +void JobList::showLogForJob() +{ + QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); + new JobLog(m_currentJob, pageSelectorTreeWidgetItem); +} + +/* + * Subroutine to call class to show the log in the database from that job + */ +void JobList::showInfoForJob(QTableWidgetItem * /*item*/) +{ + QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); + new Job(m_currentJob, pageSelectorTreeWidgetItem); +} + +/* + * Cancel a running job + */ +void JobList::consoleCancelJob() +{ + QString cmd("cancel jobid="); + cmd += m_currentJob; + consoleCommand(cmd); +} + +/* + * Graph this table + */ +void JobList::graphTable() +{ +#ifdef HAVE_QWT + JobPlotPass pass; + pass.recordLimitCheck = limitCheckBox->checkState(); + pass.daysLimitCheck = daysCheckBox->checkState(); + pass.recordLimitSpin = limitSpinBox->value(); + pass.daysLimitSpin = daysSpinBox->value(); + pass.jobCombo = jobComboBox->currentText(); + pass.clientCombo = clientComboBox->currentText(); + pass.volumeCombo = volumeComboBox->currentText(); + pass.fileSetCombo = fileSetComboBox->currentText(); + pass.purgedCombo = purgedComboBox->currentText(); + pass.levelCombo = levelComboBox->currentText(); + pass.statusCombo = statusComboBox->currentText(); + pass.use = true; + QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); + new JobPlot(pageSelectorTreeWidgetItem, pass); +#endif +} + +/* + * Save user settings associated with this page + */ +void JobList::writeSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + settings.setValue(m_splitText, m_splitter->saveState()); + settings.setValue("FilterCopyCheckState", filterCopyCheckBox->checkState()); + settings.setValue("FilterMigrationCheckState", filterMigrationCheckBox->checkState()); + settings.endGroup(); +} + +/* + * Read and restore user settings associated with this page + */ +void JobList::readSettings() +{ + m_groupText = "JobListPage"; + m_splitText = "splitterSizes_2"; + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + if (settings.contains(m_splitText)) { + m_splitter->restoreState(settings.value(m_splitText).toByteArray()); + } + filterCopyCheckBox->setCheckState((Qt::CheckState)settings.value("FilterCopyCheckState").toInt()); + filterMigrationCheckBox->setCheckState((Qt::CheckState)settings.value("FilterMigrationCheckState").toInt()); + settings.endGroup(); +} + +/* + * Function to fill m_selectedJobsCount and m_selectedJobs with selected values + */ +void JobList::selectionChanged() +{ + QList rowList; + QList sitems = mp_tableWidget->selectedItems(); + foreach (QTableWidgetItem *sitem, sitems) { + int row = sitem->row(); + if (!rowList.contains(row)) { + rowList.append(row); + } + } + + m_selectedJobs = ""; + m_selectedJobsList.clear(); + bool first = true; + foreach(int row, rowList) { + QTableWidgetItem * sitem = mp_tableWidget->item(row, m_jobIdIndex); + if (!first) m_selectedJobs.append(","); + else first = false; + m_selectedJobs.append(sitem->text()); + m_selectedJobsList.append(sitem->text()); + } + m_selectedJobsCount = rowList.count(); + if (m_selectedJobsCount > 1) { + QString text = QString( tr("Delete list of %1 Jobs")).arg(m_selectedJobsCount); + actionDeleteJob->setText(text); + text = QString( tr("Purge Files from list of %1 Jobs")).arg(m_selectedJobsCount); + actionPurgeFiles->setText(text); + } else { + actionDeleteJob->setText(tr("Delete Single Job")); + actionPurgeFiles->setText(tr("Purge Files from single job")); + } + + /* remove all actions */ + foreach(QAction* mediaAction, mp_tableWidget->actions()) { + mp_tableWidget->removeAction(mediaAction); + } + + /* Add Actions */ + mp_tableWidget->addAction(actionRefreshJobList); + if (m_selectedJobsCount == 1) { + mp_tableWidget->addAction(actionListFilesOnJob); + mp_tableWidget->addAction(actionListJobMedia); + mp_tableWidget->addAction(actionRestartJob); + mp_tableWidget->addAction(actionRestoreFromJob); + mp_tableWidget->addAction(actionRestoreFromTime); + mp_tableWidget->addAction(actionShowLogForJob); + mp_tableWidget->addAction(actionShowInfoForJob); + } + if (m_selectedJobsCount >= 1) { + mp_tableWidget->addAction(actionDeleteJob); + mp_tableWidget->addAction(actionPurgeFiles); + } + + /* Make Connections */ + if (m_checkCurrentWidget) { + int row = mp_tableWidget->currentRow(); + QTableWidgetItem* jobitem = mp_tableWidget->item(row, 0); + m_currentJob = jobitem->text(); /* get JobId */ + jobitem = mp_tableWidget->item(row, m_clientIndex); + m_clientName = jobitem->text(); /* get Client Name */ + jobitem = mp_tableWidget->item(row, m_nameIndex); + m_jobName = jobitem->text(); /* get Job Name */ + jobitem = mp_tableWidget->item(row, m_levelIndex); + m_levelName = jobitem->text(); /* get level */ + jobitem = mp_tableWidget->item(row, m_filesetIndex); + if (jobitem) { + m_filesetName = jobitem->text(); /* get FileSet Name */ + } else { + m_filesetName = ""; + } + + /* include purged action or not */ + jobitem = mp_tableWidget->item(row, m_purgedIndex); + QString purged = jobitem->text(); +/* mp_tableWidget->removeAction(actionPurgeFiles); + if (purged == tr("No") ) { + mp_tableWidget->addAction(actionPurgeFiles); + }*/ + + /* include restore from time and job action or not */ + jobitem = mp_tableWidget->item(row, m_typeIndex); + QString type = jobitem->text(); + if (m_selectedJobsCount == 1) { + mp_tableWidget->removeAction(actionRestoreFromJob); + mp_tableWidget->removeAction(actionRestoreFromTime); + if (type == tr("Backup")) { + mp_tableWidget->addAction(actionRestoreFromJob); + mp_tableWidget->addAction(actionRestoreFromTime); + } + } + + /* include cancel action or not */ + jobitem = mp_tableWidget->item(row, m_statusIndex); + QString status = jobitem->text(); + mp_tableWidget->removeAction(actionCancelJob); + if (status == tr("Running") || status == tr("Created, not yet running")) { + mp_tableWidget->addAction(actionCancelJob); + } + } +} + +/* + * Function to prevent the splitter from making index 0 of the size larger than it + * needs to be + */ +void JobList::splitterMoved(int /*pos*/, int /*index*/) +{ + int frameMax = frame->maximumHeight(); + QList sizes = m_splitter->sizes(); + int sizeSum = 0; + foreach(int size, sizes) { sizeSum += size; } + if (sizes[0] > frameMax) { + sizes[0] = frameMax; + sizes[1] = sizeSum - frameMax; + m_splitter->setSizes(sizes); + } +} diff --git a/src/qt-console/joblist/joblist.h b/src/qt-console/joblist/joblist.h new file mode 100644 index 00000000..d1033554 --- /dev/null +++ b/src/qt-console/joblist/joblist.h @@ -0,0 +1,101 @@ +#ifndef _JOBLIST_H_ +#define _JOBLIST_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_joblist.h" +#include "console.h" +#include "pages.h" + +class JobList : public Pages, public Ui::JobListForm +{ + Q_OBJECT + +public: + JobList(const QString &medianame, const QString &clientname, + const QString &jobname, const QString &filesetname, QTreeWidgetItem *); + ~JobList(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + int m_resultCount; + +public slots: + void populateTable(); + virtual void treeWidgetName(QString &); + void selectionChanged(); + +private slots: + void consoleListFilesOnJob(); + void consoleListJobMedia(); + void consoleListJobTotals(); + void consoleDeleteJob(); + void consoleRestartJob(); + void consolePurgeFiles(); + void preRestoreFromJob(); + void preRestoreFromTime(); + void showLogForJob(); + void showInfoForJob(QTableWidgetItem * item=NULL); + void consoleCancelJob(); + void graphTable(); + void splitterMoved(int pos, int index); + +private: + void createConnections(); + void writeSettings(); + void readSettings(); + void prepareFilterWidgets(); + void fillQueryString(QString &query); + QSplitter *m_splitter; + + QString m_groupText; + QString m_splitText; + QString m_mediaName; + QString m_clientName; + QString m_jobName; + QString m_filesetName; + QString m_currentJob; + QString m_levelName; + + bool m_populated; + bool m_checkCurrentWidget; + int m_jobIdIndex; + int m_purgedIndex; + int m_typeIndex; + int m_levelIndex; + int m_clientIndex; + int m_nameIndex; + int m_filesetIndex; + int m_statusIndex; + int m_startIndex; + int m_bytesIndex; + int m_filesIndex; + int m_selectedJobsCount; + QString m_selectedJobs; + QStringList m_selectedJobsList; +}; + +#endif /* _JOBLIST_H_ */ diff --git a/src/qt-console/joblist/joblist.ui b/src/qt-console/joblist/joblist.ui new file mode 100644 index 00000000..5d594755 --- /dev/null +++ b/src/qt-console/joblist/joblist.ui @@ -0,0 +1,486 @@ + + + JobListForm + + + + 0 + 0 + 696 + 456 + + + + Form + + + + + 60 + 10 + 457 + 131 + + + + + + + 50 + 190 + 541 + 171 + + + + + 900 + 172 + + + + QFrame::StyledPanel + + + QFrame::Raised + + + + + + 6 + + + 3 + + + + + 6 + + + 3 + + + + + Record Limit + + + + + + + 1 + + + 10000 + + + 25 + + + + + + + + + 6 + + + 3 + + + + + Days Limit + + + + + + + 7 + + + + + + + + + + + 6 + + + 3 + + + + + 6 + + + 3 + + + + + Clients + + + + + + + + + + + + 6 + + + 3 + + + + + Volume + + + + + + + + + + + + + + 6 + + + 3 + + + + + 6 + + + 3 + + + + + Job + + + + + + + + + + + + 6 + + + 3 + + + + + Level + + + + + + + + + + + + + + 6 + + + 3 + + + + + 6 + + + 3 + + + + + Status + + + + + + + + + + + + 6 + + + 3 + + + + + Purged + + + + + + + + + + + + + + 3 + + + + + 6 + + + 3 + + + + + FileSet + + + + + + + + + + + + 6 + + + 3 + + + + + Pool + + + + + + + + + + + + + + + + 3 + + + 3 + + + + + Refresh + + + + :/images/view-refresh.png:/images/view-refresh.png + + + + + + + Graph + + + + :/images/applications-graphics.png:/images/applications-graphics.png + + + + + + + + + Filter Copy Jobs + + + + + + + Filter Migration Jobs + + + + + + + + + + + :/images/view-refresh.png:/images/view-refresh.png + + + Refresh Job List + + + Requery the director for the list of jobs. + + + + + + :/images/utilities-terminal.png:/images/utilities-terminal.png + + + List Files On Job + + + + + + :/images/utilities-terminal.png:/images/utilities-terminal.png + + + List Job Volumes + + + + + + :/images/utilities-terminal.png:/images/utilities-terminal.png + + + List Volumes + + + + + + :/images/weather-severe-alert.png:/images/weather-severe-alert.png + + + Delete Job + + + + + + :/images/weather-severe-alert.png:/images/weather-severe-alert.png + + + Purge Files + + + + + + :/images/weather-severe-alert.png:/images/weather-severe-alert.png + + + Restart Job + + + + + + :/images/restore.png:/images/restore.png + + + Restore From Job + + + + + + :/images/restore.png:/images/restore.png + + + Restore From Time + + + + + + :/images/joblog.png:/images/joblog.png + + + Show Job Log + + + + + + :/images/joblog.png:/images/joblog.png + + + Show Job Info + + + + + + :/images/unmark.png:/images/unmark.png + + + Cancel Currently Running Job + + + + + + :/images/utilities-terminal.png:/images/utilities-terminal.png + + + List Job Totals on Console + + + + + + + + diff --git a/src/qt-console/joblog/joblog.cpp b/src/qt-console/joblog/joblog.cpp new file mode 100644 index 00000000..46ae5546 --- /dev/null +++ b/src/qt-console/joblog/joblog.cpp @@ -0,0 +1,157 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * JobLog Class + * + * Dirk Bartley, March 2007 + * + */ + +#include "bat.h" +#include "joblog.h" + +JobLog::JobLog(QString &jobId, QTreeWidgetItem *parentTreeWidgetItem) : Pages() +{ + setupUi(this); + pgInitialize(tr("JobLog"), parentTreeWidgetItem); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/joblog.png"))); + m_cursor = new QTextCursor(textEdit->document()); + + m_jobId = jobId; + getFont(); + populateText(); + + dockPage(); + setCurrent(); +} + +void JobLog::getFont() +{ + QFont font = textEdit->font(); + + QString dirname; + m_console->getDirResName(dirname); + QSettings settings(dirname, "bat"); + settings.beginGroup("Console"); + font.setFamily(settings.value("consoleFont", "Courier").value()); + font.setPointSize(settings.value("consolePointSize", 10).toInt()); + font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); + settings.endGroup(); + textEdit->setFont(font); +} + +/* + * Populate the text in the window + */ +void JobLog::populateText() +{ + QString query; + query = "SELECT Time, LogText FROM Log WHERE JobId='" + m_jobId + "' order by Time"; + + /* This could be a log item */ + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Log query cmd : %s\n", query.toUtf8().data()); + } + + QStringList results; + if (m_console->sql_cmd(query, results)) { + + if (!results.size()) { + QMessageBox::warning(this, tr("Bat"), + tr("There were no results!\n" + "It is possible you may need to add \"catalog = all\" " + "to the Messages resource for this job.\n"), QMessageBox::Ok); + return; + } + + QString jobstr("JobId "); /* FIXME: should this be translated ? */ + jobstr += m_jobId; + + QString htmlbuf("" + tr("Log records for job %1").arg(m_jobId) ); + htmlbuf += "
" + curTime + "" + curTime + "
"; + + /* Iterate through the lines of results. */ + QString field; + QStringList fieldlist; + QString lastTime; + QString lastSvc; + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + + if (fieldlist.size() < 2) + continue; + + htmlbuf +=""; + + QString curTime = fieldlist[0].trimmed(); + + field = fieldlist[1].trimmed(); + int colon = field.indexOf(":"); + if (colon > 0) { + /* string is like : ..." + * we split at ':' then remove the jobId xxxx string (always the same) */ + QString curSvc(field.left(colon).replace(jobstr,"").trimmed()); + if (curSvc == lastSvc && curTime == lastTime) { + curTime.clear(); + curSvc.clear(); + } else { + lastTime = curTime; + lastSvc = curSvc; + } + htmlbuf += ""; + htmlbuf += ""; + + /* rest of string is marked as pre-formatted (here trimming should + * be avoided, to preserve original formatting) */ + QString msg(field.mid(colon+2)); + if (msg.startsWith( tr("Error:")) ) { /* FIXME: should really be translated ? */ + /* error msg, use a specific class */ + htmlbuf += ""; + } else { + htmlbuf += ""; + } + } else { + /* non standard string, place as-is */ + if (curTime == lastTime) { + curTime.clear(); + } else { + lastTime = curTime; + } + htmlbuf += ""; + htmlbuf += ""; + } + + htmlbuf += ""; + + } /* foreach resultline */ + + htmlbuf += "
" + curTime + "

" + curSvc + "

" + msg + "
" + msg + "
" + curTime + "
" + field + "
"; + + /* full text ready. Here a custom sheet is used to align columns */ + QString logSheet("p,pre,.err {margin-left: 10px} .err {color:#FF0000;}"); + textEdit->document()->setDefaultStyleSheet(logSheet); + textEdit->document()->setHtml(htmlbuf); + textEdit->moveCursor(QTextCursor::Start); + + } /* if results from query */ + +} + diff --git a/src/qt-console/joblog/joblog.h b/src/qt-console/joblog/joblog.h new file mode 100644 index 00000000..3011ea80 --- /dev/null +++ b/src/qt-console/joblog/joblog.h @@ -0,0 +1,51 @@ +#ifndef _JOBLOG_H_ +#define _JOBLOG_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_joblog.h" +#include "console.h" + +class JobLog : public Pages, public Ui::JobLogForm +{ + Q_OBJECT + +public: + JobLog(QString &jobId, QTreeWidgetItem *parentTreeWidgetItem); + +public slots: + +private slots: + +private: + void populateText(); + void getFont(); + QTextCursor *m_cursor; + QString m_jobId; +}; + +#endif /* _JOBLOG_H_ */ diff --git a/src/qt-console/joblog/joblog.ui b/src/qt-console/joblog/joblog.ui new file mode 100644 index 00000000..a9239706 --- /dev/null +++ b/src/qt-console/joblog/joblog.ui @@ -0,0 +1,85 @@ + + JobLogForm + + + + 0 + 0 + 432 + 456 + + + + Job Log + + + + 9 + + + 6 + + + + + + 7 + 7 + 200 + 0 + + + + + 0 + 0 + + + + + 1 + 0 + + + + Qt::StrongFocus + + + false + + + + + + + + + + + + Qt::ScrollBarAsNeeded + + + QTextEdit::AutoNone + + + false + + + + + + QTextEdit::NoWrap + + + true + + + + + + + + + + diff --git a/src/qt-console/jobs/jobs.cpp b/src/qt-console/jobs/jobs.cpp new file mode 100644 index 00000000..a6df56e8 --- /dev/null +++ b/src/qt-console/jobs/jobs.cpp @@ -0,0 +1,264 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Jobs Class + * + * Dirk Bartley, March 2007 + */ + +#include "bat.h" +#include "jobs/jobs.h" +#include "run/run.h" +#include "util/fmtwidgetitem.h" + +Jobs::Jobs() : Pages() +{ + setupUi(this); + m_name = tr("Jobs"); + pgInitialize(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/run.png"))); + + /* tableWidget, Storage Tree Tree Widget inherited from ui_client.h */ + m_populated = false; + m_checkcurwidget = true; + m_closeable = false; + /* add context sensitive menu items specific to this classto the page + * selector tree. m_contextActions is QList of QActions */ + m_contextActions.append(actionRefreshJobs); + createContextMenu(); + + connect(tableWidget, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), + this, SLOT(runJob())); +} + +Jobs::~Jobs() +{ +} + +/* + * The main meat of the class!! The function that querries the director and + * creates the widgets with appropriate values. + */ +void Jobs::populateTable() +{ + m_populated = true; + mainWin->waitEnter(); + + Freeze frz(*tableWidget); /* disable updating*/ + + QBrush blackBrush(Qt::black); + m_checkcurwidget = false; + tableWidget->clear(); + m_checkcurwidget = true; + QStringList headerlist = (QStringList() << tr("Job Name") + << tr("Pool") << tr("Messages") << tr("Client") + << tr("Storage") << tr("Level") << tr("Type") + << tr("FileSet") << tr("Catalog") << tr("Enabled") + << tr("Where")); + + m_typeIndex = headerlist.indexOf(tr("Type")); + + tableWidget->setColumnCount(headerlist.count()); + tableWidget->setHorizontalHeaderLabels(headerlist); + tableWidget->horizontalHeader()->setHighlightSections(false); + tableWidget->setRowCount(m_console->job_list.count()); + tableWidget->verticalHeader()->hide(); + tableWidget->setSelectionBehavior(QAbstractItemView::SelectRows); + tableWidget->setSelectionMode(QAbstractItemView::SingleSelection); + tableWidget->setSortingEnabled(false); /* rows move on insert if sorting enabled */ + + int row = 0; + foreach (QString jobName, m_console->job_list){ + job_defaults job_defs; + job_defs.job_name = jobName; + if (m_console->get_job_defaults(job_defs)) { + int col = 0; + TableItemFormatter jobsItem(*tableWidget, row); + jobsItem.setTextFld(col++, jobName); + jobsItem.setTextFld(col++, job_defs.pool_name); + jobsItem.setTextFld(col++, job_defs.messages_name); + jobsItem.setTextFld(col++, job_defs.client_name); + jobsItem.setTextFld(col++, job_defs.store_name); + jobsItem.setTextFld(col++, job_defs.level); + jobsItem.setTextFld(col++, job_defs.type); + jobsItem.setTextFld(col++, job_defs.fileset_name); + jobsItem.setTextFld(col++, job_defs.catalog_name); + jobsItem.setBoolFld(col++, job_defs.enabled); + jobsItem.setTextFld(col++, job_defs.where); + } + row++; + } + /* set default sorting */ + tableWidget->sortByColumn(headerlist.indexOf(tr("Job Name")), Qt::AscendingOrder); + tableWidget->setSortingEnabled(true); + + /* Resize rows and columns */ + tableWidget->resizeColumnsToContents(); + tableWidget->resizeRowsToContents(); + + /* make read only */ + tableWidget->setEditTriggers(QAbstractItemView::NoEditTriggers); + + mainWin->waitExit(); + dockPage(); +} + +/* + * When the treeWidgetItem in the page selector tree is singleclicked, Make sure + * The tree has been populated. + */ +void Jobs::PgSeltreeWidgetClicked() +{ + if(!m_populated) { + populateTable(); + } +} + +/* + * Added to set the context menu policy based on currently active tableWidgetItem + * signaled by currentItemChanged + */ +void Jobs::tableItemChanged(QTableWidgetItem *currentwidgetitem, QTableWidgetItem *previouswidgetitem ) +{ + /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ + if (m_checkcurwidget && currentwidgetitem) { + /* The Previous item */ + if (previouswidgetitem) { /* avoid a segfault if first time */ + foreach(QAction* jobAction, tableWidget->actions()) { + tableWidget->removeAction(jobAction); + } + } + int currentRow = currentwidgetitem->row(); + QTableWidgetItem *currentrowzeroitem = tableWidget->item(currentRow, 0); + m_currentlyselected = currentrowzeroitem->text(); + QTableWidgetItem *currenttypeitem = tableWidget->item(currentRow, m_typeIndex); + QString type = currenttypeitem->text(); + + if (m_currentlyselected.length() != 0) { + /* set a hold variable to the client name in case the context sensitive + * menu is used */ + tableWidget->addAction(actionRefreshJobs); + tableWidget->addAction(actionConsoleListFiles); + tableWidget->addAction(actionConsoleListVolumes); + tableWidget->addAction(actionConsoleListNextVolume); + tableWidget->addAction(actionConsoleEnableJob); + tableWidget->addAction(actionConsoleDisableJob); + tableWidget->addAction(actionConsoleCancel); + tableWidget->addAction(actionJobListQuery); + tableWidget->addAction(actionRunJob); + } + } +} + +/* + * Setup a context menu + * Made separate from populate so that it would not create context menu over and + * over as the table is repopulated. + */ +void Jobs::createContextMenu() +{ + tableWidget->setContextMenuPolicy(Qt::ActionsContextMenu); + tableWidget->addAction(actionRefreshJobs); + connect(tableWidget, SIGNAL( + currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)), + this, SLOT(tableItemChanged(QTableWidgetItem *, QTableWidgetItem *))); + /* connect to the action specific to this pages class */ + connect(actionRefreshJobs, SIGNAL(triggered()), this, + SLOT(populateTable())); + connect(actionConsoleListFiles, SIGNAL(triggered()), this, SLOT(consoleListFiles())); + connect(actionConsoleListVolumes, SIGNAL(triggered()), this, SLOT(consoleListVolume())); + connect(actionConsoleListNextVolume, SIGNAL(triggered()), this, SLOT(consoleListNextVolume())); + connect(actionConsoleEnableJob, SIGNAL(triggered()), this, SLOT(consoleEnable())); + connect(actionConsoleDisableJob, SIGNAL(triggered()), this, SLOT(consoleDisable())); + connect(actionConsoleCancel, SIGNAL(triggered()), this, SLOT(consoleCancel())); + connect(actionJobListQuery, SIGNAL(triggered()), this, SLOT(listJobs())); + connect(actionRunJob, SIGNAL(triggered()), this, SLOT(runJob())); +} + +/* + * Virtual function which is called when this page is visible on the stack + */ +void Jobs::currentStackItem() +{ + if(!m_populated) { + /* Create the context menu for the client table */ + populateTable(); + } +} + +/* + * The following functions are slots responding to users clicking on the context + * sensitive menu + */ + +void Jobs::consoleListFiles() +{ + QString cmd = "list files job=\"" + m_currentlyselected + "\""; + if (mainWin->m_longList) { cmd.prepend("l"); } + consoleCommand(cmd); +} + +void Jobs::consoleListVolume() +{ + QString cmd = "list volumes job=\"" + m_currentlyselected + "\""; + if (mainWin->m_longList) { cmd.prepend("l"); } + consoleCommand(cmd); +} + +void Jobs::consoleListNextVolume() +{ + QString cmd = "list nextvolume job=\"" + m_currentlyselected + "\""; + if (mainWin->m_longList) { cmd.prepend("l"); } + consoleCommand(cmd); +} + +void Jobs::consoleEnable() +{ + QString cmd = "enable job=\"" + m_currentlyselected + "\""; + consoleCommand(cmd); +} + +void Jobs::consoleDisable() +{ + QString cmd = "disable job=\"" + m_currentlyselected + "\""; + consoleCommand(cmd); +} + +void Jobs::consoleCancel() +{ + QString cmd = "cancel job=\"" + m_currentlyselected + "\""; + consoleCommand(cmd); +} + +void Jobs::listJobs() +{ + QTreeWidgetItem *parentItem = mainWin->getFromHash(this); + mainWin->createPageJobList("", "", m_currentlyselected, "", parentItem); +} + +/* + * Open a new job run page with the currently selected job + * defaulted In + */ +void Jobs::runJob() +{ + new runPage(m_currentlyselected); +} diff --git a/src/qt-console/jobs/jobs.h b/src/qt-console/jobs/jobs.h new file mode 100644 index 00000000..96bb5eab --- /dev/null +++ b/src/qt-console/jobs/jobs.h @@ -0,0 +1,66 @@ +#ifndef _JOBS_H_ +#define _JOBS_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_jobs.h" +#include "console.h" +#include "pages.h" + +class Jobs : public Pages, public Ui::jobsForm +{ + Q_OBJECT + +public: + Jobs(); + ~Jobs(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + +public slots: + void tableItemChanged(QTableWidgetItem *, QTableWidgetItem *); + +private slots: + void populateTable(); + void consoleListFiles(); + void consoleListVolume(); + void consoleListNextVolume(); + void consoleEnable(); + void consoleDisable(); + void consoleCancel(); + void listJobs(); + void runJob(); + +private: + void createContextMenu(); + QString m_currentlyselected; + bool m_populated; + bool m_checkcurwidget; + int m_typeIndex; +}; + +#endif /* _JOBS_H_ */ diff --git a/src/qt-console/jobs/jobs.ui b/src/qt-console/jobs/jobs.ui new file mode 100644 index 00000000..e07c030b --- /dev/null +++ b/src/qt-console/jobs/jobs.ui @@ -0,0 +1,157 @@ + + jobsForm + + + + 0 + 0 + 449 + 307 + + + + Client Tree + + + + + + + + + :/images/view-refresh.png + + + Refresh Jobs List + + + Requery the director for the list of clients. + + + + + :/images/utilities-terminal.png + + + List Files Command + + + List Files Command + + + List Files Command + + + List Files Command + + + + + :/images/utilities-terminal.png + + + List Volumes Command + + + List Volumes Command + + + List Volumes Command + + + List Volumes Command + + + + + :/images/utilities-terminal.png + + + List Next Volume Command + + + List Next Volume Command + + + List Next Volume Command + + + List Next Volume Command + + + + + :/images/utilities-terminal.png + + + Enable Job Command + + + Enable Job Command + + + Enable Job Command + + + Enable Job Command + + + + + :/images/utilities-terminal.png + + + Disable Job Command + + + Disable Job Command + + + Disable Job Command + + + Disable Job Command + + + + + :/images/emblem-system.png + + + Open JobList on Job + + + Open JobList on Job + + + Open JobList on Job + + + Open JobList on Job + + + + + :/images/utilities-terminal.png + + + Cancel Job Command + + + Cancel Job Command + + + + + :/images/run.png + + + RunJob + + + + + + + + diff --git a/src/qt-console/label/label.cpp b/src/qt-console/label/label.cpp new file mode 100644 index 00000000..664d0e50 --- /dev/null +++ b/src/qt-console/label/label.cpp @@ -0,0 +1,127 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Label Page class + * + * Kern Sibbald, February MMVII + * + */ + +#include "bat.h" +#include "label.h" +#include + +labelPage::labelPage() : Pages() +{ + QString deflt(""); + m_closeable = false; + showPage(deflt); +} + +/* + * An overload of the constructor to have a default storage show in the + * combobox on start. Used from context sensitive in storage class. + */ +labelPage::labelPage(QString &defString) : Pages() +{ + showPage(defString); +} + +/* + * moved the constructor code here for the overload. + */ +void labelPage::showPage(QString &defString) +{ + m_name = "Label"; + pgInitialize(); + setupUi(this); + m_conn = m_console->notifyOff(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/label.png"))); + + storageCombo->addItems(m_console->storage_list); + int index = storageCombo->findText(defString, Qt::MatchExactly); + if (index != -1) { + storageCombo->setCurrentIndex(index); + } + poolCombo->addItems(m_console->pool_list); + connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); + connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); + connect(automountOnButton, SIGNAL(pressed()), this, SLOT(automountOnButtonPushed())); + connect(automountOffButton, SIGNAL(pressed()), this, SLOT(automountOffButtonPushed())); + dockPage(); + setCurrent(); + this->show(); +} + + +void labelPage::okButtonPushed() +{ + QString scmd; + if (volumeName->text().toUtf8().data()[0] == 0) { + QMessageBox::warning(this, "No Volume name", "No Volume name given", + QMessageBox::Ok, QMessageBox::Ok); + return; + } + this->hide(); + scmd = QString("label volume=\"%1\" pool=\"%2\" storage=\"%3\" slot=%4\n") + .arg(volumeName->text()) + .arg(poolCombo->currentText()) + .arg(storageCombo->currentText()) + .arg(slotSpin->value()); + if (mainWin->m_commandDebug) { + Pmsg1(000, "sending command : %s\n", scmd.toUtf8().data()); + } + if (m_console) { + m_console->write_dir(scmd.toUtf8().data()); + m_console->displayToPrompt(m_conn); + m_console->notify(m_conn, true); + } else { + Pmsg0(000, "m_console==NULL !!!!!!\n"); + } + closeStackPage(); + mainWin->resetFocus(); +} + +void labelPage::cancelButtonPushed() +{ + this->hide(); + if (m_console) { + m_console->notify(m_conn, true); + } else { + Pmsg0(000, "m_console==NULL !!!!!!\n"); + } + closeStackPage(); + mainWin->resetFocus(); +} + +/* turn automount on */ +void labelPage::automountOnButtonPushed() +{ + QString cmd("automount on"); + consoleCommand(cmd); +} + +/* turn automount off */ +void labelPage::automountOffButtonPushed() +{ + QString cmd("automount off"); + consoleCommand(cmd); +} diff --git a/src/qt-console/label/label.h b/src/qt-console/label/label.h new file mode 100644 index 00000000..094f6a43 --- /dev/null +++ b/src/qt-console/label/label.h @@ -0,0 +1,54 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, February MMVII + */ + +#ifndef _LABEL_H_ +#define _LABEL_H_ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_label.h" +#include "console.h" +#include "pages.h" + +class labelPage : public Pages, public Ui::labelForm +{ + Q_OBJECT + +public: + labelPage(); + labelPage(QString &defString); + void showPage(QString &defString); + +private slots: + void okButtonPushed(); + void cancelButtonPushed(); + void automountOnButtonPushed(); + void automountOffButtonPushed(); + +private: + int m_conn; +}; + +#endif /* _LABEL_H_ */ diff --git a/src/qt-console/label/label.ui b/src/qt-console/label/label.ui new file mode 100644 index 00000000..49db6e4c --- /dev/null +++ b/src/qt-console/label/label.ui @@ -0,0 +1,303 @@ + + + labelForm + + + + 0 + 0 + 560 + 357 + + + + Qt::NoFocus + + + Form + + + + 9 + + + 6 + + + + + Qt::Vertical + + + QSizePolicy::Expanding + + + + 421 + 48 + + + + + + + + Qt::Vertical + + + QSizePolicy::Expanding + + + + 431 + 48 + + + + + + + + 0 + + + 6 + + + + + 6 + + + 0 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + true + + + + + + + Cancel + + + + + + + + + Volume Name: + + + volumeName + + + + + + + + + + Storage: + + + storageCombo + + + + + + + + 200 + 0 + + + + Qt::StrongFocus + + + Enter Name of Volume to create + + + + + + + 10000 + + + + + + + Slot: + + + slotSpin + + + + + + + 6 + + + 0 + + + + + Execute Automount + + + + + + + On + + + + + + + Off + + + + + + + Qt::Vertical + + + + 20 + 61 + + + + + + + + + + + + + Pool: + + + poolCombo + + + + + + + 6 + + + 0 + + + + + Qt::Horizontal + + + + 71 + 21 + + + + + + + + + 16777215 + 30 + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Label a Volume</span></p></body></html> + + + + + + + Qt::Horizontal + + + + 81 + 20 + + + + + + + + + + + + Qt::Horizontal + + + + 40 + 131 + + + + + + + + Qt::Horizontal + + + + 40 + 121 + + + + + + + + + diff --git a/src/qt-console/main.cpp b/src/qt-console/main.cpp new file mode 100644 index 00000000..3c1924c9 --- /dev/null +++ b/src/qt-console/main.cpp @@ -0,0 +1,269 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main program for bat (qt-console) + * + * Written by Kern Sibbald, January MMVII + * + */ + + +#include "bat.h" +#include +#include + +/* + * We need Qt version 4.8.4 or later to be able to comple correctly + */ +#if QT_VERSION < 0x040804 +#error "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" +#error "You need Qt version 4.8.4 or later to build Bat" +#error "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" +#endif + +MainWin *mainWin; +QApplication *app; + +/* Forward referenced functions */ +void terminate_console(int sig); +static void usage(); +static int check_resources(); + +extern bool parse_bat_config(CONFIG *config, const char *configfile, int exit_code); +extern void message_callback(int /* type */, char *msg); + + +#define CONFIG_FILE "bat.conf" /* default configuration file */ + +/* Static variables */ +static CONFIG *config; +static char *configfile = NULL; + +int main(int argc, char *argv[]) +{ + int ch; + bool no_signals = true; + bool test_config = false; + + + app = new QApplication(argc, argv); + app->setQuitOnLastWindowClosed(true); +#if QT_VERSION < 0x050000 + app->setStyle(new QPlastiqueStyle()); + QTextCodec::setCodecForCStrings(QTextCodec::codecForName("UTF-8")); +#endif + + QTranslator qtTranslator; + qtTranslator.load(QString("qt_") + QLocale::system().name(),QLibraryInfo::location(QLibraryInfo::TranslationsPath)); + app->installTranslator(&qtTranslator); + + QTranslator batTranslator; + batTranslator.load(QString("bat_") + QLocale::system().name(),QLibraryInfo::location(QLibraryInfo::TranslationsPath)); + app->installTranslator(&batTranslator); + + register_message_callback(message_callback); + +#ifdef xENABLE_NLS + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); +#endif + +#ifdef HAVE_WIN32 + set_trace(true); /* output to trace file */ +#endif + + init_stack_dump(); + my_name_is(argc, argv, "bat"); + lmgr_init_thread(); + init_msg(NULL, NULL); + working_directory = "/tmp"; + +#ifndef HAVE_WIN32 + struct sigaction sigignore; + sigignore.sa_flags = 0; + sigignore.sa_handler = SIG_IGN; + sigfillset(&sigignore.sa_mask); + sigaction(SIGPIPE, &sigignore, NULL); + sigaction(SIGUSR2, &sigignore, NULL); +#endif + + while ((ch = getopt(argc, argv, "bc:d:r:st?")) != -1) { + switch (ch) { + case 'c': /* configuration file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': + debug_level = atoi(optarg); + if (debug_level <= 0) + debug_level = 1; + break; + + case 's': /* turn off signals */ + no_signals = true; + break; + + case 't': + test_config = true; + break; + + case '?': + default: + usage(); + } + } + argc -= optind; + argv += optind; + + + if (!no_signals) { + init_signals(terminate_console); + } + + if (argc) { + usage(); + } + + OSDependentInit(); +#ifdef HAVE_WIN32 + WSA_Init(); /* Initialize Windows sockets */ +#endif + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + config = New(CONFIG()); + parse_bat_config(config, configfile, M_ERROR_TERM); + + if (init_crypto() != 0) { + Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); + } + + if (!check_resources()) { + Emsg1(M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); + } + if (test_config) { + exit(0); + } + + mainWin = new MainWin; + mainWin->show(); + + return app->exec(); +} + +void terminate_console(int /*sig*/) +{ +#ifdef HAVE_WIN32 + WSACleanup(); /* TODO: check when we have to call it */ +#endif + exit(0); +} + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s) %s %s %s\n\n" +"Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" +" -c set configuration file to file\n" +" -dnn set debug level to nn\n" +" -s no signals\n" +" -t test - read configuration and exit\n" +" -? print this message.\n" +"\n"), 2007, BDEMO, VERSION, BDATE, HOST_OS, DISTNAME, DISTVER); + + exit(1); +} + +/* + * Make a quick check to see that we have all the + * resources needed. + */ +static int check_resources() +{ + bool ok = true; + DIRRES *director; + int numdir; + bool tls_needed; + + LockRes(); + + numdir = 0; + foreach_res(director, R_DIRECTOR) { + numdir++; + /* tls_require implies tls_enable */ + if (director->tls_require) { + if (have_tls) { + director->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + ok = false; + continue; + } + } + tls_needed = director->tls_enable || director->tls_authenticate; + + if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." + " At least one CA certificate store is required.\n"), + director->hdr.name, configfile); + ok = false; + } + } + + if (numdir == 0) { + Emsg1(M_FATAL, 0, _("No Director resource defined in %s\n" + "Without that I don't how to speak to the Director :-(\n"), configfile); + ok = false; + } + + CONRES *cons; + /* Loop over Consoles */ + foreach_res(cons, R_CONSOLE) { + /* tls_require implies tls_enable */ + if (cons->tls_require) { + if (have_tls) { + cons->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + ok = false; + continue; + } + } + tls_needed = cons->tls_enable || cons->tls_authenticate; + + if ((!cons->tls_ca_certfile && !cons->tls_ca_certdir) && tls_needed) { + Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Console \"%s\" in %s.\n"), + cons->hdr.name, configfile); + ok = false; + } + } + + UnlockRes(); + + return ok; +} diff --git a/src/qt-console/main.qrc b/src/qt-console/main.qrc new file mode 100644 index 00000000..5401d154 --- /dev/null +++ b/src/qt-console/main.qrc @@ -0,0 +1,77 @@ + + + images/ajax-loader-big.gif + images/page-prev.gif + images/page-next.gif + images/0p.png + images/16p.png + images/32p.png + images/48p.png + images/64p.png + images/80p.png + images/96p.png + images/A.png + images/R.png + images/T.png + images/W.png + images/applications-graphics.png + images/bat.png + images/bat_icon.png + images/browse.png + images/cartridge-edit.png + images/cartridge.png + images/cartridge1.png + images/check.png + images/connected.png + images/copy.png + images/cut.png + images/disconnected.png + images/edit-cut.png + images/edit-delete.png + images/edit.png + images/emblem-system.png + images/estimate-job.png + images/extern.png + images/f.png + images/folder.png + images/folderbothchecked.png + images/folderchecked.png + images/folderunchecked.png + images/go-down.png + images/go-jump.png + images/go-up.png + images/graph1.png + images/help-browser.png + images/inflag0.png + images/inflag1.png + images/inflag2.png + images/intern.png + images/joblog.png + images/label.png + images/mail-message-new.png + images/mail-message-pending.png + images/mark.png + images/network-server.png + images/new.png + images/open.png + images/package-x-generic.png + images/paste.png + images/print.png + images/purge.png + images/restore.png + images/run.png + images/runit.png + images/save.png + images/status-console.png + images/status.png + images/system-file-manager.png + images/unchecked.png + images/undo.png + images/unmark.png + images/up.png + images/utilities-terminal.png + images/view-refresh.png + images/weather-severe-alert.png + images/zoom.png + + diff --git a/src/qt-console/main.ui b/src/qt-console/main.ui new file mode 100644 index 00000000..0cd7a870 --- /dev/null +++ b/src/qt-console/main.ui @@ -0,0 +1,581 @@ + + + Kern Sibbald + MainForm + + + + 0 + 0 + 785 + 660 + + + + bat - Bacula Admin Tool + + + + :/images/bat.png:/images/bat.png + + + + + + Bacula Administration Tool + + + It's a Dock widget to allow page selection + + + + + 9 + + + 6 + + + + + -1 + + + + + + + + + 0 + 0 + 785 + 25 + + + + + Settings + + + + + + + &Help + + + + + + + &File + + + + + + + + + + + + + Current Status + + + Current Status + + + + + + 51 + 39 + + + + Tool Bar + + + Qt::Horizontal + + + TopToolBarArea + + + false + + + + + + + + + + + + + + + + + + + 0 + 1 + + + + + 112 + 179 + + + + + 524287 + 524287 + + + + Qt::StrongFocus + + + + + + + + + false + + + QDockWidget::DockWidgetFloatable|QDockWidget::DockWidgetMovable + + + Qt::AllDockWidgetAreas + + + + + + 1 + + + + + + + + 1 + 1 + + + + + 0 + 0 + + + + + 16777215 + 16777215 + + + + + 0 + 0 + + + + Qt::StrongFocus + + + Selects panel window + + + Use items in this tree to select what window is in panel + + + true + + + + 1 + + + + + + + + Command: + + + + + + + Click here to enter command + + + + + + + + + &Quit + + + Ctrl+Q + + + + + + :/images/bat.png:/images/bat.png + + + &About bat + + + + + + :/images/copy.png:/images/copy.png + + + &Copy + + + + + + :/images/cut.png:/images/cut.png + + + Cu&t + + + + + + :/images/new.png:/images/new.png + + + new + + + + + + :/images/open.png:/images/open.png + + + open + + + + + + :/images/paste.png:/images/paste.png + + + &Paste + + + + + + :/images/print.png:/images/print.png + + + &Print + + + Print + + + + + + :/images/save.png:/images/save.png + + + &Save + + + Save (not implemented) + + + + + + :/images/disconnected.png:/images/disconnected.png + + + Connect + + + Connect/disconnect + + + + + + :/images/label.png:/images/label.png + + + Label + + + Label a Volume + + + Label a Volume + + + + + + :/images/restore.png:/images/restore.png + + + Restore + + + Restore Files + + + + + false + + + + :/images/run.png:/images/run.png + + + Run Job + + + Run Job + + + Run Job + + + Run a Job + + + + + false + + + + :/images/estimate-job.png:/images/estimate-job.png + + + Estimate Job + + + Estimate Job + + + Estimate Job + + + Estimate a Job + + + + + + :/images/status-console.png:/images/status-console.png + + + Status Dir + + + Status Dir + + + Query status of director in console + + + Query status of director in console + + + true + + + + + &Select Font ... + + + + + + :/images/up.png:/images/up.png + + + Undock Window + + + Undock Current Window + + + + + + :/images/up.png:/images/up.png + + + ToggleDock + + + Toggle Dock Status + + + Toggle Dock Status + + + + + + :/images/unmark.png:/images/unmark.png + + + Close Page + + + Close The Current Page + + + + + + :/images/mail-message-new.png:/images/mail-message-new.png + + + Messages + + + Display any messages queued at the director + + + false + + + + + &Preferences ... + + + Set Preferences + + + Set Preferences + + + + + + :/images/help-browser.png:/images/help-browser.png + + + bat &Help + + + + + false + + + + :/images/browse.png:/images/browse.png + + + Browse + + + Browse Cataloged Files + + + Browse Cataloged Files + + + + + + :/images/applications-graphics.png:/images/applications-graphics.png + + + JobPlot + + + Plot Job Files and Bytes + + + Plot Job Files and Bytes + + + + + + :/images/status.png:/images/status.png + + + Status Dir Page + + + Director Status Page + + + + + + :/images/mark.png:/images/mark.png + + + Repop Lists + + + + + + :/images/mark.png:/images/mark.png + + + Reload and Repop + + + + + + :/images/go-jump.png:/images/go-jump.png + + + back + + + Previous Page + + + Previous Page + + + + + + + + diff --git a/src/qt-console/mainwin.cpp b/src/qt-console/mainwin.cpp new file mode 100644 index 00000000..9b70903d --- /dev/null +++ b/src/qt-console/mainwin.cpp @@ -0,0 +1,1027 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * + * Main Window control for bat (qt-console) + * + * Kern Sibbald, January MMVII + * + */ + +#include "bat.h" +#include "version.h" +#include "joblist/joblist.h" +#include "storage/storage.h" +#include "fileset/fileset.h" +#include "label/label.h" +#include "run/run.h" +#include "pages.h" +#include "restore/restore.h" +#include "medialist/medialist.h" +#include "joblist/joblist.h" +#include "clients/clients.h" +#include "restore/restoretree.h" +#include "help/help.h" +#include "jobs/jobs.h" +#include "medialist/mediaview.h" +#ifdef HAVE_QWT +#include "jobgraphs/jobplot.h" +#endif +#include "status/dirstat.h" +#include "util/fmtwidgetitem.h" + +/* + * Daemon message callback + */ +void message_callback(int /* type */, char *msg) +{ + QMessageBox::warning(mainWin, "Bat", msg, QMessageBox::Ok); +} + +MainWin::MainWin(QWidget *parent) : QMainWindow(parent) +{ + app->setOverrideCursor(QCursor(Qt::WaitCursor)); + m_isClosing = false; + m_waitState = false; + m_doConnect = false; + m_treeStackTrap = false; + m_dtformat = "yyyy-MM-dd HH:mm:ss"; + mainWin = this; + setupUi(this); /* Setup UI defined by main.ui (designer) */ + register_message_callback(message_callback); + readPreferences(); + treeWidget->clear(); + treeWidget->setColumnCount(1); + treeWidget->setHeaderLabel( tr("Select Page") ); + treeWidget->setContextMenuPolicy(Qt::ActionsContextMenu); + tabWidget->setTabsClosable(true); /* wait for QT 4.5 */ + createPages(); + + resetFocus(); /* lineEdit->setFocus() */ + +#ifndef HAVE_QWT + actionJobPlot->setEnabled(false); + actionJobPlot->setVisible(false); +#endif + + this->show(); + + readSettings(); + + foreach(Console *console, m_consoleHash) { + console->connect_dir(); + } + /* + * Note, the notifier is now a global flag, although each notifier + * can be individually turned on and off at a socket level. Once + * the notifier is turned off, we don't accept anything from anyone + * this prevents unwanted messages from getting into the input + * dialogs such as restore that read from the director and "know" + * what to expect. + */ + m_notify = true; + m_currentConsole = (Console*)getFromHash(m_firstItem); + QTimer::singleShot(2000, this, SLOT(popLists())); + if (m_miscDebug) { + QString directoryResourceName; + m_currentConsole->getDirResName(directoryResourceName); + Pmsg1(100, "Setting initial window to %s\n", directoryResourceName.toUtf8().data()); + } + app->restoreOverrideCursor(); +} + +void MainWin::popLists() +{ + foreach(Console *console, m_consoleHash) { + console->populateLists(true); + } + m_doConnect = true; + connectConsoleSignals(); + connectSignals(); + app->restoreOverrideCursor(); + m_currentConsole->setCurrent(); +} + +void MainWin::createPages() +{ + DIRRES *dir; + QTreeWidgetItem *item, *topItem; + m_firstItem = NULL; + + LockRes(); + foreach_res(dir, R_DIRECTOR) { + + /* Create console tree stacked widget item */ + m_currentConsole = new Console(tabWidget); + m_currentConsole->setDirRes(dir); + m_currentConsole->readSettings(); + + /* The top tree item representing the director */ + topItem = new QTreeWidgetItem(treeWidget); + topItem->setText(0, dir->name()); + topItem->setIcon(0, QIcon(":images/server.png")); + /* Set background to grey for ease of identification of inactive Director */ + QBrush greyBrush(Qt::lightGray); + topItem->setBackground(0, greyBrush); + m_currentConsole->setDirectorTreeItem(topItem); + m_consoleHash.insert(topItem, m_currentConsole); + + /* Create Tree Widget Item */ + item = new QTreeWidgetItem(topItem); + item->setText(0, tr("Console")); + if (!m_firstItem){ m_firstItem = item; } + item->setIcon(0,QIcon(QString::fromUtf8(":images/utilities-terminal.png"))); + + /* insert the cosole and tree widget item into the hashes */ + hashInsert(item, m_currentConsole); + m_currentConsole->dockPage(); + + /* Set Color of treeWidgetItem for the console + * It will be set to green in the console class if the connection is made. + */ + QBrush redBrush(Qt::red); + item->setForeground(0, redBrush); + + /* + * Create instances in alphabetic order of the rest + * of the classes that will by default exist under each Director. + */ + new bRestore(); + new Clients(); + new FileSet(); + new Jobs(); + createPageJobList("", "", "", "", NULL); +#ifdef HAVE_QWT + JobPlotPass pass; + pass.use = false; + if (m_openPlot) + new JobPlot(NULL, pass); +#endif + new MediaList(); + new MediaView(); + new Storage(); +// if (m_openBrowser) { +// new restoreTree(); +// } + if (m_openDirStat) { + new DirStat(); + } + treeWidget->expandItem(topItem); + tabWidget->setCurrentWidget(m_currentConsole); + } + UnlockRes(); +} + +/* + * create an instance of the the joblist class on the stack + */ +void MainWin::createPageJobList(const QString &media, const QString &client, + const QString &job, const QString &fileset, QTreeWidgetItem *parentTreeWidgetItem) +{ + QTreeWidgetItem *holdItem; + + /* save current tree widget item in case query produces no results */ + holdItem = treeWidget->currentItem(); + JobList* joblist = new JobList(media, client, job, fileset, parentTreeWidgetItem); + /* If this is a query of jobs on a specific media */ + if ((media != "") || (client != "") || (job != "") || (fileset != "")) { + joblist->setCurrent(); + /* did query produce results, if not close window and set back to hold */ + if (joblist->m_resultCount == 0) { + joblist->closeStackPage(); + treeWidget->setCurrentItem(holdItem); + } + } +} + +/* + * Handle up and down arrow keys for the command line + * history. + */ +void MainWin::keyPressEvent(QKeyEvent *event) +{ + if (m_cmd_history.size() == 0) { + event->ignore(); + return; + } + switch (event->key()) { + case Qt::Key_Down: + if (m_cmd_last < 0 || m_cmd_last >= (m_cmd_history.size()-1)) { + event->ignore(); + return; + } + m_cmd_last++; + break; + case Qt::Key_Up: + if (m_cmd_last == 0) { + event->ignore(); + return; + } + if (m_cmd_last < 0 || m_cmd_last > (m_cmd_history.size()-1)) { + m_cmd_last = m_cmd_history.size() - 1; + } else { + m_cmd_last--; + } + break; + default: + event->ignore(); + return; + } + lineEdit->setText(m_cmd_history[m_cmd_last]); +} + +void MainWin::connectSignals() +{ + /* Connect signals to slots */ + connect(lineEdit, SIGNAL(returnPressed()), this, SLOT(input_line())); + connect(actionAbout_bat, SIGNAL(triggered()), this, SLOT(about())); + connect(actionBat_Help, SIGNAL(triggered()), this, SLOT(help())); + connect(treeWidget, SIGNAL(itemClicked(QTreeWidgetItem *, int)), this, SLOT(treeItemClicked(QTreeWidgetItem *, int))); + connect(treeWidget, SIGNAL(currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); + connect(tabWidget, SIGNAL(currentChanged(int)), this, SLOT(stackItemChanged(int))); + connect(tabWidget, SIGNAL(tabCloseRequested(int)), this, SLOT(closePage(int))); + connect(actionQuit, SIGNAL(triggered()), app, SLOT(closeAllWindows())); + connect(actionLabel, SIGNAL(triggered()), this, SLOT(labelButtonClicked())); + connect(actionRun, SIGNAL(triggered()), this, SLOT(runButtonClicked())); + connect(actionEstimate, SIGNAL(triggered()), this, SLOT(estimateButtonClicked())); + connect(actionBrowse, SIGNAL(triggered()), this, SLOT(browseButtonClicked())); + connect(actionStatusDirPage, SIGNAL(triggered()), this, SLOT(statusPageButtonClicked())); +#ifdef HAVE_QWT + connect(actionJobPlot, SIGNAL(triggered()), this, SLOT(jobPlotButtonClicked())); +#endif + connect(actionRestore, SIGNAL(triggered()), this, SLOT(restoreButtonClicked())); + connect(actionUndock, SIGNAL(triggered()), this, SLOT(undockWindowButton())); + connect(actionToggleDock, SIGNAL(triggered()), this, SLOT(toggleDockContextWindow())); + connect(actionClosePage, SIGNAL(triggered()), this, SLOT(closeCurrentPage())); + connect(actionPreferences, SIGNAL(triggered()), this, SLOT(setPreferences())); + connect(actionRepopLists, SIGNAL(triggered()), this, SLOT(repopLists())); + connect(actionReloadRepop, SIGNAL(triggered()), this, SLOT(reloadRepopLists())); +} + +void MainWin::disconnectSignals() +{ + /* Connect signals to slots */ + disconnect(lineEdit, SIGNAL(returnPressed()), this, SLOT(input_line())); + disconnect(actionAbout_bat, SIGNAL(triggered()), this, SLOT(about())); + disconnect(actionBat_Help, SIGNAL(triggered()), this, SLOT(help())); + disconnect(treeWidget, SIGNAL(itemClicked(QTreeWidgetItem *, int)), this, SLOT(treeItemClicked(QTreeWidgetItem *, int))); + disconnect(treeWidget, SIGNAL( currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); + disconnect(tabWidget, SIGNAL(currentChanged(int)), this, SLOT(stackItemChanged(int))); + disconnect(tabWidget, SIGNAL(tabCloseRequested(int)), this, SLOT(closePage(int))); + disconnect(actionQuit, SIGNAL(triggered()), app, SLOT(closeAllWindows())); + disconnect(actionLabel, SIGNAL(triggered()), this, SLOT(labelButtonClicked())); + disconnect(actionRun, SIGNAL(triggered()), this, SLOT(runButtonClicked())); + disconnect(actionEstimate, SIGNAL(triggered()), this, SLOT(estimateButtonClicked())); + disconnect(actionBrowse, SIGNAL(triggered()), this, SLOT(browseButtonClicked())); + disconnect(actionStatusDirPage, SIGNAL(triggered()), this, SLOT(statusPageButtonClicked())); +#ifdef HAVE_QWT + disconnect(actionJobPlot, SIGNAL(triggered()), this, SLOT(jobPlotButtonClicked())); +#endif + disconnect(actionRestore, SIGNAL(triggered()), this, SLOT(restoreButtonClicked())); + disconnect(actionUndock, SIGNAL(triggered()), this, SLOT(undockWindowButton())); + disconnect(actionToggleDock, SIGNAL(triggered()), this, SLOT(toggleDockContextWindow())); + disconnect(actionClosePage, SIGNAL(triggered()), this, SLOT(closeCurrentPage())); + disconnect(actionPreferences, SIGNAL(triggered()), this, SLOT(setPreferences())); + disconnect(actionRepopLists, SIGNAL(triggered()), this, SLOT(repopLists())); + disconnect(actionReloadRepop, SIGNAL(triggered()), this, SLOT(reloadRepopLists())); +} + +/* + * Enter wait state + */ +void MainWin::waitEnter() +{ + if (m_waitState || m_isClosing) { + return; + } + m_waitState = true; + if (mainWin->m_connDebug) Pmsg0(000, "Entering Wait State\n"); + app->setOverrideCursor(QCursor(Qt::WaitCursor)); + disconnectSignals(); + disconnectConsoleSignals(m_currentConsole); + m_waitTreeItem = treeWidget->currentItem(); +} + +/* + * Leave wait state + */ +void MainWin::waitExit() +{ + if (!m_waitState || m_isClosing) { + return; + } + if (mainWin->m_connDebug) Pmsg0(000, "Exiting Wait State\n"); + if (m_waitTreeItem && (m_waitTreeItem != treeWidget->currentItem())) { + treeWidget->setCurrentItem(m_waitTreeItem); + } + if (m_doConnect) { + connectSignals(); + connectConsoleSignals(); + } + app->restoreOverrideCursor(); + m_waitState = false; +} + +void MainWin::connectConsoleSignals() +{ + connect(actionConnect, SIGNAL(triggered()), m_currentConsole, SLOT(connect_dir())); + connect(actionSelectFont, SIGNAL(triggered()), m_currentConsole, SLOT(set_font())); + connect(actionMessages, SIGNAL(triggered()), m_currentConsole, SLOT(messages())); +} + +void MainWin::disconnectConsoleSignals(Console *console) +{ + disconnect(actionConnect, SIGNAL(triggered()), console, SLOT(connect_dir())); + disconnect(actionMessages, SIGNAL(triggered()), console, SLOT(messages())); + disconnect(actionSelectFont, SIGNAL(triggered()), console, SLOT(set_font())); +} + + +/* + * Two functions to respond to menu items to repop lists and execute reload and repopulate + * the lists for jobs, clients, filesets .. .. + */ +void MainWin::repopLists() +{ + m_currentConsole->populateLists(false); +} +void MainWin::reloadRepopLists() +{ + QString cmd = "reload"; + m_currentConsole->consoleCommand(cmd); + m_currentConsole->populateLists(false); +} + +/* + * Reimplementation of QWidget closeEvent virtual function + */ +void MainWin::closeEvent(QCloseEvent *event) +{ + m_isClosing = true; + writeSettings(); + /* Remove all groups from settings for OpenOnExit so that we can start some of the status windows */ + foreach(Console *console, m_consoleHash){ + QSettings settings(console->m_dir->name(), "bat"); + settings.beginGroup("OpenOnExit"); + settings.remove(""); + settings.endGroup(); + } + /* close all non console pages, this will call settings in destructors */ + while (m_consoleHash.count() < m_pagehash.count()) { + foreach(Pages *page, m_pagehash) { + if (page != page->console()) { + QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(page); + if (pageSelectorTreeWidgetItem->childCount() == 0) { + page->console()->setCurrent(); + page->closeStackPage(); + } + } + } + } + foreach(Console *console, m_consoleHash){ + console->writeSettings(); + console->terminate(); + console->closeStackPage(); + } + event->accept(); +} + +void MainWin::writeSettings() +{ + QSettings settings("bacula.org", "bat"); + + settings.beginGroup("MainWin"); + settings.setValue("winSize", size()); + settings.setValue("winPos", pos()); + settings.setValue("state", saveState()); + settings.endGroup(); + +} + +void MainWin::readSettings() +{ + QSettings settings("bacula.org", "bat"); + + settings.beginGroup("MainWin"); + resize(settings.value("winSize", QSize(1041, 801)).toSize()); + move(settings.value("winPos", QPoint(200, 150)).toPoint()); + restoreState(settings.value("state").toByteArray()); + settings.endGroup(); +} + +/* + * This subroutine is called with an item in the Page Selection window + * is clicked + */ +void MainWin::treeItemClicked(QTreeWidgetItem *item, int /*column*/) +{ + /* Is this a page that has been inserted into the hash */ + Pages* page = getFromHash(item); + if (page) { + int stackindex = tabWidget->indexOf(page); + + if (stackindex >= 0) { + tabWidget->setCurrentWidget(page); + } + page->dockPage(); + /* run the virtual function in case this class overrides it */ + page->PgSeltreeWidgetClicked(); + } else { + Dmsg0(000, "Page not in hash"); + } +} + +/* + * Called with a change of the highlighed tree widget item in the page selector. + */ +void MainWin::treeItemChanged(QTreeWidgetItem *currentitem, QTreeWidgetItem *previousitem) +{ + if (m_isClosing) return; /* if closing the application, do nothing here */ + + Pages *previousPage, *nextPage; + Console *previousConsole = NULL; + Console *nextConsole; + + /* remove all actions before adding actions appropriate for new page */ + foreach(QAction* pageAction, treeWidget->actions()) { + treeWidget->removeAction(pageAction); + } + + /* first determine the next item */ + + /* knowing the treeWidgetItem, get the page from the hash */ + nextPage = getFromHash(currentitem); + nextConsole = m_consoleHash.value(currentitem); + /* Is this a page that has been inserted into the hash */ + if (nextPage) { + nextConsole = nextPage->console(); + /* then is it a treeWidgetItem representing a director */ + } else if (nextConsole) { + /* let the next page BE the console */ + nextPage = nextConsole; + } else { + /* Should never get here */ + nextPage = NULL; + nextConsole = NULL; + } + + /* The Previous item */ + + /* this condition prevents a segfault. The first time there is no previousitem*/ + if (previousitem) { + if (m_treeStackTrap == false) { /* keep track of previous items for going Back */ + m_treeWidgetStack.append(previousitem); + } + /* knowing the treeWidgetItem, get the page from the hash */ + previousPage = getFromHash(previousitem); + previousConsole = m_consoleHash.value(previousitem); + if (previousPage) { + previousConsole = previousPage->console(); + } else if (previousConsole) { + previousPage = previousConsole; + } + if ((previousPage) || (previousConsole)) { + if (nextConsole != previousConsole) { + /* remove connections to the current console */ + disconnectConsoleSignals(previousConsole); + QTreeWidgetItem *dirItem = previousConsole->directorTreeItem(); + QBrush greyBrush(Qt::lightGray); + dirItem->setBackground(0, greyBrush); + } + } + } + + /* process the current (next) item */ + + if ((nextPage) || (nextConsole)) { + if (nextConsole != previousConsole) { + /* make connections to the current console */ + m_currentConsole = nextConsole; + connectConsoleSignals(); + setMessageIcon(); + /* Set director's tree widget background to magenta for ease of identification */ + QTreeWidgetItem *dirItem = m_currentConsole->directorTreeItem(); + QBrush magentaBrush(Qt::magenta); + dirItem->setBackground(0, magentaBrush); + } + /* set the value for the currently active console */ + int stackindex = tabWidget->indexOf(nextPage); + nextPage->firstUseDock(); + + /* Is this page currently on the stack or is it undocked */ + if (stackindex >= 0) { + /* put this page on the top of the stack */ + tabWidget->setCurrentIndex(stackindex); + } else { + /* it is undocked, raise it to the front */ + nextPage->raise(); + } + /* for the page selectors menu action to dock or undock, set the text */ + nextPage->setContextMenuDockText(); + + treeWidget->addAction(actionToggleDock); + /* if this page is closeable, and it has no childern, then add that action */ + if ((nextPage->isCloseable()) && (currentitem->child(0) == NULL)) + treeWidget->addAction(actionClosePage); + + /* Add the actions to the Page Selectors tree widget that are part of the + * current items list of desired actions regardless of whether on top of stack*/ + treeWidget->addActions(nextPage->m_contextActions); + } +} + +void MainWin::labelButtonClicked() +{ + new labelPage(); +} + +void MainWin::runButtonClicked() +{ + new runPage(""); +} + +void MainWin::estimateButtonClicked() +{ + new estimatePage(); +} + +void MainWin::browseButtonClicked() +{ +// new restoreTree(); +} + +void MainWin::statusPageButtonClicked() +{ + /* if one exists, then just set it current */ + bool found = false; + foreach(Pages *page, m_pagehash) { + if (m_currentConsole == page->console()) { + if (page->name() == tr("Director Status")) { + found = true; + page->setCurrent(); + } + } + } + if (!found) { + new DirStat(); + } +} + +void MainWin::restoreButtonClicked() +{ + new prerestorePage(); + if (mainWin->m_miscDebug) Pmsg0(000, "in restoreButtonClicked after prerestorePage\n"); +} + +void MainWin::jobPlotButtonClicked() +{ +#ifdef HAVE_QWT + JobPlotPass pass; + pass.use = false; + new JobPlot(NULL, pass); +#endif +} + +/* + * The user just finished typing a line in the command line edit box + */ +void MainWin::input_line() +{ + int conn; + QString cmdStr = lineEdit->text(); /* Get the text */ + lineEdit->clear(); /* clear the lineEdit box */ + if (m_currentConsole->is_connected()) { + if (m_currentConsole->findDirComm(conn)) { + m_currentConsole->consoleCommand(cmdStr, conn); + } else { + /* Use consoleCommand to allow typing anything */ + m_currentConsole->consoleCommand(cmdStr); + } + } else { + set_status(tr("Director not connected. Click on connect button.")); + } + m_cmd_history.append(cmdStr); + m_cmd_last = -1; + if (treeWidget->currentItem() != getFromHash(m_currentConsole)) + m_currentConsole->setCurrent(); +} + + +void MainWin::about() +{ + QMessageBox::about(this, tr("About bat"), + tr("

Bacula Bat %1 (%2)

" + "

Copyright © 2007-%3 Kern Sibbald" + "

The bat is an administrative console" + " interface to the Director.").arg(VERSION).arg(BDATE).arg(BYEAR)); +} + +void MainWin::help() +{ + Help::displayFile("index.html"); +} + +void MainWin::set_statusf(const char *fmt, ...) +{ + va_list arg_ptr; + char buf[1000]; + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); + va_end(arg_ptr); + set_status(buf); +} + +void MainWin::set_status_ready() +{ + set_status(tr(" Ready")); +} + +void MainWin::set_status(const QString &str) +{ + statusBar()->showMessage(str); +} + +void MainWin::set_status(const char *buf) +{ + statusBar()->showMessage(buf); +} + +/* + * Function to respond to the button bar button to undock + */ +void MainWin::undockWindowButton() +{ + Pages* page = (Pages*)tabWidget->currentWidget(); + if (page) { + page->togglePageDocking(); + } +} + +/* + * Function to respond to action on page selector context menu to toggle the + * dock status of the window associated with the page selectors current + * tree widget item. + */ +void MainWin::toggleDockContextWindow() +{ + QTreeWidgetItem *currentitem = treeWidget->currentItem(); + + /* Is this a page that has been inserted into the hash */ + if (getFromHash(currentitem)) { + Pages* page = getFromHash(currentitem); + if (page) { + page->togglePageDocking(); + } + } +} + +/* + * This function is called when the stack item is changed. Call + * the virtual function here. Avoids a window being undocked leaving + * a window at the top of the stack unpopulated. + */ +void MainWin::stackItemChanged(int) +{ + if (m_isClosing) return; /* if closing the application, do nothing here */ + Pages* page = (Pages*)tabWidget->currentWidget(); + /* run the virtual function in case this class overrides it */ + if (page) { + page->currentStackItem(); + } + if (!m_waitState) { + disconnect(treeWidget, SIGNAL(itemClicked(QTreeWidgetItem *, int)), this, SLOT(treeItemClicked(QTreeWidgetItem *, int))); + disconnect(treeWidget, SIGNAL( currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); + treeWidget->setCurrentItem(getFromHash(page)); + connect(treeWidget, SIGNAL(itemClicked(QTreeWidgetItem *, int)), this, SLOT(treeItemClicked(QTreeWidgetItem *, int))); + connect(treeWidget, SIGNAL(currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); + } +} + +/* + * Function to simplify insertion of QTreeWidgetItem <-> Page association + * into a double direction hash. + */ +void MainWin::hashInsert(QTreeWidgetItem *item, Pages *page) +{ + m_pagehash.insert(item, page); + m_widgethash.insert(page, item); +} + +/* + * Function to simplify removal of QTreeWidgetItem <-> Page association + * into a double direction hash. + */ +void MainWin::hashRemove(QTreeWidgetItem *item, Pages *page) +{ + /* I had all sorts of return status checking code here. Do we have a log + * level capability in bat. I would have left it in but it used printf's + * and it should really be some kind of log level facility ??? + * ******FIXME********/ + m_pagehash.remove(item); + m_widgethash.remove(page); +} + +/* + * Function to retrieve a Page* when the item in the page selector's tree is + * known. + */ +Pages* MainWin::getFromHash(QTreeWidgetItem *item) +{ + return m_pagehash.value(item); +} + +/* + * Function to retrieve the page selectors tree widget item when the page is + * known. + */ +QTreeWidgetItem* MainWin::getFromHash(Pages *page) +{ + return m_widgethash.value(page); +} + +void MainWin::closeCurrentPage() +{ + closePage(-1); +} + +/* + * Function to respond to action on page selector context menu to close the + * current window. + */ +void MainWin::closePage(int item) +{ + QTreeWidgetItem *currentitem; + Pages *page = NULL; + + if (item >= 0) { + page = (Pages *)tabWidget->widget(item); + } else { + currentitem = treeWidget->currentItem(); + /* Is this a page that has been inserted into the hash */ + if (getFromHash(currentitem)) { + page = getFromHash(currentitem); + } + } + + if (page) { + if (page->isCloseable()) { + page->closeStackPage(); + } else { + page->hidePage(); + } + } +} + +/* Quick function to return the current console */ +Console *MainWin::currentConsole() +{ + return m_currentConsole; +} + +/* Quick function to return the tree item for the director */ +QTreeWidgetItem *MainWin::currentTopItem() +{ + return m_currentConsole->directorTreeItem(); +} + +/* Preferences menu item clicked */ +void MainWin::setPreferences() +{ + prefsDialog prefs; + prefs.commDebug->setCheckState(m_commDebug ? Qt::Checked : Qt::Unchecked); + prefs.connDebug->setCheckState(m_connDebug ? Qt::Checked : Qt::Unchecked); + prefs.displayAll->setCheckState(m_displayAll ? Qt::Checked : Qt::Unchecked); + prefs.sqlDebug->setCheckState(m_sqlDebug ? Qt::Checked : Qt::Unchecked); + prefs.commandDebug->setCheckState(m_commandDebug ? Qt::Checked : Qt::Unchecked); + prefs.miscDebug->setCheckState(m_miscDebug ? Qt::Checked : Qt::Unchecked); + prefs.recordLimit->setCheckState(m_recordLimitCheck ? Qt::Checked : Qt::Unchecked); + prefs.recordSpinBox->setValue(m_recordLimitVal); + prefs.daysLimit->setCheckState(m_daysLimitCheck ? Qt::Checked : Qt::Unchecked); + prefs.daysSpinBox->setValue(m_daysLimitVal); + prefs.checkMessages->setCheckState(m_checkMessages ? Qt::Checked : Qt::Unchecked); + prefs.checkMessagesSpin->setValue(m_checkMessagesInterval); + prefs.executeLongCheckBox->setCheckState(m_longList ? Qt::Checked : Qt::Unchecked); + prefs.rtPopDirCheckBox->setCheckState(m_rtPopDirDebug ? Qt::Checked : Qt::Unchecked); + prefs.rtDirCurICCheckBox->setCheckState(m_rtDirCurICDebug ? Qt::Checked : Qt::Unchecked); + prefs.rtDirICCheckBox->setCheckState(m_rtDirICDebug ? Qt::Checked : Qt::Unchecked); + prefs.rtFileTabICCheckBox->setCheckState(m_rtFileTabICDebug ? Qt::Checked : Qt::Unchecked); + prefs.rtVerTabICCheckBox->setCheckState(m_rtVerTabICDebug ? Qt::Checked : Qt::Unchecked); + prefs.rtUpdateFTCheckBox->setCheckState(m_rtUpdateFTDebug ? Qt::Checked : Qt::Unchecked); + prefs.rtUpdateVTCheckBox->setCheckState(m_rtUpdateVTDebug ? Qt::Checked : Qt::Unchecked); + prefs.rtChecksCheckBox->setCheckState(m_rtChecksDebug ? Qt::Checked : Qt::Unchecked); + prefs.rtIconStateCheckBox->setCheckState(m_rtIconStateDebug ? Qt::Checked : Qt::Unchecked); + prefs.rtRestore1CheckBox->setCheckState(m_rtRestore1Debug ? Qt::Checked : Qt::Unchecked); + prefs.rtRestore2CheckBox->setCheckState(m_rtRestore2Debug ? Qt::Checked : Qt::Unchecked); + prefs.rtRestore3CheckBox->setCheckState(m_rtRestore3Debug ? Qt::Checked : Qt::Unchecked); + switch (ItemFormatterBase::getBytesConversion()) { + case ItemFormatterBase::BYTES_CONVERSION_NONE: + prefs.radioConvertOff->setChecked(true); + break; + case ItemFormatterBase::BYTES_CONVERSION_IEC: + prefs.radioConvertIEC->setChecked(true); + break; + default: + prefs.radioConvertStandard->setChecked(true); + break; + } + prefs.openPlotCheckBox->setCheckState(m_openPlot ? Qt::Checked : Qt::Unchecked); +#ifndef HAVE_QWT + prefs.openPlotCheckBox->setVisible(false); +#endif + prefs.openBrowserCheckBox->setCheckState(m_openBrowser ? Qt::Checked : Qt::Unchecked); + prefs.openDirStatCheckBox->setCheckState(m_openDirStat ? Qt::Checked : Qt::Unchecked); + prefs.exec(); +} + +/* Preferences dialog */ +prefsDialog::prefsDialog() : QDialog() +{ + setupUi(this); +} + +void prefsDialog::accept() +{ + this->hide(); + mainWin->m_commDebug = this->commDebug->checkState() == Qt::Checked; + mainWin->m_connDebug = this->connDebug->checkState() == Qt::Checked; + mainWin->m_displayAll = this->displayAll->checkState() == Qt::Checked; + mainWin->m_sqlDebug = this->sqlDebug->checkState() == Qt::Checked; + mainWin->m_commandDebug = this->commandDebug->checkState() == Qt::Checked; + mainWin->m_miscDebug = this->miscDebug->checkState() == Qt::Checked; + mainWin->m_recordLimitCheck = this->recordLimit->checkState() == Qt::Checked; + mainWin->m_recordLimitVal = this->recordSpinBox->value(); + mainWin->m_daysLimitCheck = this->daysLimit->checkState() == Qt::Checked; + mainWin->m_daysLimitVal = this->daysSpinBox->value(); + mainWin->m_checkMessages = this->checkMessages->checkState() == Qt::Checked; + mainWin->m_checkMessagesInterval = this->checkMessagesSpin->value(); + mainWin->m_longList = this->executeLongCheckBox->checkState() == Qt::Checked; + + mainWin->m_rtPopDirDebug = this->rtPopDirCheckBox->checkState() == Qt::Checked; + mainWin->m_rtDirCurICDebug = this->rtDirCurICCheckBox->checkState() == Qt::Checked; + mainWin->m_rtDirICDebug = this->rtDirICCheckBox->checkState() == Qt::Checked; + mainWin->m_rtFileTabICDebug = this->rtFileTabICCheckBox->checkState() == Qt::Checked; + mainWin->m_rtVerTabICDebug = this->rtVerTabICCheckBox->checkState() == Qt::Checked; + mainWin->m_rtUpdateFTDebug = this->rtUpdateFTCheckBox->checkState() == Qt::Checked; + mainWin->m_rtUpdateVTDebug = this->rtUpdateVTCheckBox->checkState() == Qt::Checked; + mainWin->m_rtChecksDebug = this->rtChecksCheckBox->checkState() == Qt::Checked; + mainWin->m_rtIconStateDebug = this->rtIconStateCheckBox->checkState() == Qt::Checked; + mainWin->m_rtRestore1Debug = this->rtRestore1CheckBox->checkState() == Qt::Checked; + mainWin->m_rtRestore2Debug = this->rtRestore2CheckBox->checkState() == Qt::Checked; + mainWin->m_rtRestore3Debug = this->rtRestore3CheckBox->checkState() == Qt::Checked; + if (this->radioConvertOff->isChecked()) { + ItemFormatterBase::setBytesConversion(ItemFormatterBase::BYTES_CONVERSION_NONE); + } else if (this->radioConvertIEC->isChecked()){ + ItemFormatterBase::setBytesConversion(ItemFormatterBase::BYTES_CONVERSION_IEC); + } else { + ItemFormatterBase::setBytesConversion(ItemFormatterBase::BYTES_CONVERSION_SI); + } + mainWin->m_openPlot = this->openPlotCheckBox->checkState() == Qt::Checked; + mainWin->m_openBrowser = this->openBrowserCheckBox->checkState() == Qt::Checked; + mainWin->m_openDirStat = this->openDirStatCheckBox->checkState() == Qt::Checked; + + QSettings settings("www.bacula.org", "bat"); + settings.beginGroup("Debug"); + settings.setValue("commDebug", mainWin->m_commDebug); + settings.setValue("connDebug", mainWin->m_connDebug); + settings.setValue("displayAll", mainWin->m_displayAll); + settings.setValue("sqlDebug", mainWin->m_sqlDebug); + settings.setValue("commandDebug", mainWin->m_commandDebug); + settings.setValue("miscDebug", mainWin->m_miscDebug); + settings.endGroup(); + settings.beginGroup("JobList"); + settings.setValue("recordLimitCheck", mainWin->m_recordLimitCheck); + settings.setValue("recordLimitVal", mainWin->m_recordLimitVal); + settings.setValue("daysLimitCheck", mainWin->m_daysLimitCheck); + settings.setValue("daysLimitVal", mainWin->m_daysLimitVal); + settings.endGroup(); + settings.beginGroup("Timers"); + settings.setValue("checkMessages", mainWin->m_checkMessages); + settings.setValue("checkMessagesInterval", mainWin->m_checkMessagesInterval); + settings.endGroup(); + settings.beginGroup("Misc"); + settings.setValue("longList", mainWin->m_longList); + settings.setValue("byteConvert", ItemFormatterBase::getBytesConversion()); + settings.setValue("openplot", mainWin->m_openPlot); + settings.setValue("openbrowser", mainWin->m_openBrowser); + settings.setValue("opendirstat", mainWin->m_openDirStat); + settings.endGroup(); + settings.beginGroup("RestoreTree"); + settings.setValue("rtPopDirDebug", mainWin->m_rtPopDirDebug); + settings.setValue("rtDirCurICDebug", mainWin->m_rtDirCurICDebug); + settings.setValue("rtDirCurICRetDebug", mainWin->m_rtDirICDebug); + settings.setValue("rtFileTabICDebug", mainWin->m_rtFileTabICDebug); + settings.setValue("rtVerTabICDebug", mainWin->m_rtVerTabICDebug); + settings.setValue("rtUpdateFTDebug", mainWin->m_rtUpdateFTDebug); + settings.setValue("rtUpdateVTDebug", mainWin->m_rtUpdateVTDebug); + settings.setValue("rtChecksDebug", mainWin->m_rtChecksDebug); + settings.setValue("rtIconStateDebug", mainWin->m_rtIconStateDebug); + settings.setValue("rtRestore1Debug", mainWin->m_rtRestore1Debug); + settings.setValue("rtRestore2Debug", mainWin->m_rtRestore2Debug); + settings.setValue("rtRestore3Debug", mainWin->m_rtRestore3Debug); + settings.endGroup(); +} + +void prefsDialog::reject() +{ + this->hide(); + mainWin->set_status(tr("Canceled")); +} + +/* read preferences for the prefences dialog box */ +void MainWin::readPreferences() +{ + QSettings settings("www.bacula.org", "bat"); + settings.beginGroup("Debug"); + m_commDebug = settings.value("commDebug", false).toBool(); + m_connDebug = settings.value("connDebug", false).toBool(); + m_displayAll = settings.value("displayAll", false).toBool(); + m_sqlDebug = settings.value("sqlDebug", false).toBool(); + m_commandDebug = settings.value("commandDebug", false).toBool(); + m_miscDebug = settings.value("miscDebug", false).toBool(); + settings.endGroup(); + settings.beginGroup("JobList"); + m_recordLimitCheck = settings.value("recordLimitCheck", true).toBool(); + m_recordLimitVal = settings.value("recordLimitVal", 50).toInt(); + m_daysLimitCheck = settings.value("daysLimitCheck", false).toBool(); + m_daysLimitVal = settings.value("daysLimitVal", 28).toInt(); + settings.endGroup(); + settings.beginGroup("Timers"); + m_checkMessages = settings.value("checkMessages", false).toBool(); + m_checkMessagesInterval = settings.value("checkMessagesInterval", 28).toInt(); + settings.endGroup(); + settings.beginGroup("Misc"); + m_longList = settings.value("longList", false).toBool(); + ItemFormatterBase::setBytesConversion( + (ItemFormatterBase::BYTES_CONVERSION) settings.value("byteConvert", + ItemFormatterBase::BYTES_CONVERSION_IEC).toInt()); + m_openPlot = settings.value("openplot", false).toBool(); + m_openBrowser = settings.value("openbrowser", false).toBool(); + m_openDirStat = settings.value("opendirstat", false).toBool(); + settings.endGroup(); + settings.beginGroup("RestoreTree"); + m_rtPopDirDebug = settings.value("rtPopDirDebug", false).toBool(); + m_rtDirCurICDebug = settings.value("rtDirCurICDebug", false).toBool(); + m_rtDirICDebug = settings.value("rtDirCurICRetDebug", false).toBool(); + m_rtFileTabICDebug = settings.value("rtFileTabICDebug", false).toBool(); + m_rtVerTabICDebug = settings.value("rtVerTabICDebug", false).toBool(); + m_rtUpdateFTDebug = settings.value("rtUpdateFTDebug", false).toBool(); + m_rtUpdateVTDebug = settings.value("rtUpdateVTDebug", false).toBool(); + m_rtChecksDebug = settings.value("rtChecksDebug", false).toBool(); + m_rtIconStateDebug = settings.value("rtIconStateDebug", false).toBool(); + m_rtRestore1Debug = settings.value("rtRestore1Debug", false).toBool(); + m_rtRestore2Debug = settings.value("rtRestore2Debug", false).toBool(); + m_rtRestore3Debug = settings.value("rtRestore3Debug", false).toBool(); + settings.endGroup(); +} + +void MainWin::setMessageIcon() +{ + if (m_currentConsole->is_messagesPending()) + actionMessages->setIcon(QIcon(QString::fromUtf8(":/images/mail-message-pending.png"))); + else + actionMessages->setIcon(QIcon(QString::fromUtf8(":/images/mail-message-new.png"))); +} + +void MainWin::goToPreviousPage() +{ + m_treeStackTrap = true; + bool done = false; + while (!done) { + /* If stack list is emtpty, then done */ + if (m_treeWidgetStack.isEmpty()) { + done = true; + } else { + QTreeWidgetItem* testItem = m_treeWidgetStack.takeLast(); + QTreeWidgetItemIterator it(treeWidget); + /* lets avoid a segfault by setting an item current that no longer exists */ + while (*it) { + if (*it == testItem) { + if (testItem != treeWidget->currentItem()) { + treeWidget->setCurrentItem(testItem); + done = true; + } + break; + } + ++it; + } + } + } + m_treeStackTrap = false; +} diff --git a/src/qt-console/mainwin.h b/src/qt-console/mainwin.h new file mode 100644 index 00000000..5fd76c0f --- /dev/null +++ b/src/qt-console/mainwin.h @@ -0,0 +1,171 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * qt-console main window class definition. + * + * Written by Kern Sibbald, January MMVII + */ + +#ifndef _MAINWIN_H_ +#define _MAINWIN_H_ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include +#include "ui_main.h" + +class Console; +class Pages; + +class MainWin : public QMainWindow, public Ui::MainForm +{ + Q_OBJECT + +public: + MainWin(QWidget *parent = 0); + void set_statusf(const char *fmt, ...); + void set_status_ready(); + void set_status(const char *buf); + void set_status(const QString &str); + void writeSettings(); + void readSettings(); + void resetFocus() { lineEdit->setFocus(); }; + void hashInsert(QTreeWidgetItem *, Pages *); + void hashRemove(Pages *); + void hashRemove(QTreeWidgetItem *, Pages *); + void setMessageIcon(); + bool getWaitState() {return m_waitState; }; + bool isClosing() {return m_isClosing; }; + Console *currentConsole(); + QTreeWidgetItem *currentTopItem(); + Pages* getFromHash(QTreeWidgetItem *); + QTreeWidgetItem* getFromHash(Pages *); + /* This hash is to get the page when the page selector widget is known */ + QHash m_pagehash; + /* This hash is to get the page selector widget when the page is known */ + QHash m_widgethash; + /* This is a list of consoles */ + QHash m_consoleHash; + void createPageJobList(const QString &, const QString &, + const QString &, const QString &, QTreeWidgetItem *); + QString m_dtformat; + /* Begin Preferences variables */ + bool m_commDebug; + bool m_connDebug; + bool m_displayAll; + bool m_sqlDebug; + bool m_commandDebug; + bool m_miscDebug; + bool m_recordLimitCheck; + int m_recordLimitVal; + bool m_daysLimitCheck; + int m_daysLimitVal; + bool m_checkMessages; + int m_checkMessagesInterval; + bool m_longList; + bool m_rtPopDirDebug; + bool m_rtDirCurICDebug; + bool m_rtDirICDebug; + bool m_rtFileTabICDebug; + bool m_rtVerTabICDebug; + bool m_rtUpdateFTDebug; + bool m_rtUpdateVTDebug; + bool m_rtChecksDebug; + bool m_rtIconStateDebug; + bool m_rtRestore1Debug; + bool m_rtRestore2Debug; + bool m_rtRestore3Debug; + bool m_openBrowser; + bool m_openPlot; + bool m_openDirStat; + + /* Global */ + bool m_notify; /* global flag to turn on/off all notifiers */ + +public slots: + void input_line(); + void about(); + void help(); + void treeItemClicked(QTreeWidgetItem *item, int column); + void labelButtonClicked(); + void runButtonClicked(); + void estimateButtonClicked(); + void browseButtonClicked(); + void statusPageButtonClicked(); + void jobPlotButtonClicked(); + void restoreButtonClicked(); + void undockWindowButton(); + void treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); + void stackItemChanged(int); + void toggleDockContextWindow(); + void closePage(int item); + void closeCurrentPage(); + void setPreferences(); + void readPreferences(); + void waitEnter(); + void waitExit(); + void repopLists(); + void reloadRepopLists(); + void popLists(); + void goToPreviousPage(); + +protected: + void closeEvent(QCloseEvent *event); + void keyPressEvent(QKeyEvent *event); + +private: + void connectConsole(); + void createPages(); + void connectSignals(); + void disconnectSignals(); + void connectConsoleSignals(); + void disconnectConsoleSignals(Console *console); + +private: + Console *m_currentConsole; + Pages *m_pagespophold; + QStringList m_cmd_history; + int m_cmd_last; + QTreeWidgetItem *m_firstItem; + QTreeWidgetItem *m_waitTreeItem; + bool m_isClosing; + bool m_waitState; + bool m_doConnect; + QList m_treeWidgetStack; + bool m_treeStackTrap; +}; + +#include "ui_prefs.h" + +class prefsDialog : public QDialog, public Ui::PrefsForm +{ + Q_OBJECT + +public: + prefsDialog(); + +private slots: + void accept(); + void reject(); +}; + +#endif /* _MAINWIN_H_ */ diff --git a/src/qt-console/make-win32 b/src/qt-console/make-win32 new file mode 100755 index 00000000..76a70599 --- /dev/null +++ b/src/qt-console/make-win32 @@ -0,0 +1,77 @@ +#!/bin/sh +# +# Used to build the Win32/Win64 version of bat +# +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +prepare_bat_build () +{ + local version=$1 + local dpkgs_mingw + if test -d ../win32/release${version}; then + + + if [ ${version} -eq 32 ] + then + depkgs_mingw='depkgs-mingw32' + + elif [ ${version} -eq 64 ] + then + depkgs_mingw='depkgs-mingw-w64' + else + echo "wrong version - '${version}'." + exit 1 + fi + + cp -f ${DEPKGS}/${depkgs_mingw}/lib/qt/QtGui4.dll ../win32/release${version} + cp -f ${DEPKGS}/${depkgs_mingw}/lib/qt/QtCore4.dll ../win32/release${version} + + rm -rf ../win32/release${version}/help + mkdir ../win32/release${version}/help + cp -f help/*.html ../win32/release${version}/help/ + cp -f images/status.png ../win32/release${version}/help/ + cp -f images/mail-message-new.png ../win32/release${version}/help/ + qmake -spec win32 -unix -o Makefile.mingw${version} bat.pro.mingw${version} + echo "Make Windows bat" + make -j3 -f Makefile.mingw${version} $2 + if test -f release/bat.exe; then + cp -f release/bat.exe ../win32/release${version} + else + cp -f debug/bat.exe ../win32/release${version} + fi + fi +} + +prepare_tray_monitor_build () +{ + local version=$1 + cd tray-monitor + qmake -spec ../win32 -unix -o Makefile.mingw${version} tray-monitor.pro.mingw${version} + echo "Make Windows tray-monitor" + make -j3 -f Makefile.mingw${version} $2 + if test -f release/bacula-tray-monitor.exe; then + cp -f release/bacula-tray-monitor.exe ../../win32/release${version} + else + cp -f debug/bacula-tray-monitor.exe ../../win32/release${version} + fi + rm -f release/bacula-tray-monitor.exe debug/bacula-tray-monitor.exe + cd .. +} + +readonly BUILD_ARCH="$1" + +rm -f debug/bat.exe +if test -f ../config.h ; then + mv -f ../config.h ../config.h.orig +fi + +prepare_bat_build ${BUILD_ARCH:-64} + +if test -e ../config.h.orig ; then + mv -f ../config.h.orig ../config.h +fi + +prepare_tray_monitor_build ${BUILD_ARCH:-64} diff --git a/src/qt-console/mediaedit/mediaedit.cpp b/src/qt-console/mediaedit/mediaedit.cpp new file mode 100644 index 00000000..9662e376 --- /dev/null +++ b/src/qt-console/mediaedit/mediaedit.cpp @@ -0,0 +1,420 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#include "bat.h" +#include +#include +#include +#include "mediaedit.h" + +/* + * A constructor + */ +MediaEdit::MediaEdit(QTreeWidgetItem *parentWidget, QString &mediaId) + : Pages() +{ + setupUi(this); + pgInitialize(tr("Media Edit"), parentWidget); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/cartridge-edit.png"))); + dockPage(); + setCurrent(); + + connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); + connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); + connectSpins(); + connect(retentionSpin, SIGNAL(valueChanged(int)), this, SLOT(retentionChanged())); + connect(useDurationSpin, SIGNAL(valueChanged(int)), this, SLOT(useDurationChanged())); + connect(retentionRadio, SIGNAL(pressed()), this, SLOT(retentionRadioPressed())); + connect(useDurationRadio, SIGNAL(pressed()), this, SLOT(useDurationRadioPressed())); + + m_pool = ""; + m_recyclePool = ""; + m_status = ""; + m_slot = 0; + + /* The media's pool */ + poolCombo->addItems(m_console->pool_list); + + /* The media's Status */ + QStringList statusList = (QStringList() << "Full" << "Used" << "Append" + << "Error" << "Purged" << "Recycle" << "Read-Only" << "Cleaning"); + statusCombo->addItems(statusList); + + /* Set up the query for the default values */ + QStringList FieldList = (QStringList() + << "Media.VolumeName" << "Pool.Name" << "Media.VolStatus" << "Media.Slot" + << "Media.VolRetention" << "Media.VolUseDuration" << "Media.MaxVolJobs" + << "Media.MaxVolFiles" << "Media.MaxVolBytes" << "Media.Recycle" << "Media.Enabled" + << "Pol.Name"); + QStringList AsList = (QStringList() + << "VolumeName" << "PoolName" << "Status" << "Slot" + << "Retention" << "UseDuration" << "MaxJobs" + << "MaxFiles" << "MaxBytes" << "Recycle" << "Enabled" + << "RecyclePool"); + int i = 0; + QString query("SELECT "); + foreach (QString field, FieldList) { + if (i != 0) { + query += ", "; + } + query += field + " AS " + AsList[i]; + i += 1; + } + + QString where = " WHERE Media.VolumeName = '" + mediaId + "' "; + if (mediaId.contains(QRegExp("^[0-9]+$"))) { + where = " WHERE Media.MediaId=" + mediaId; + } + query += " FROM Media" + " JOIN Pool ON (Media.PoolId=Pool.PoolId)" + " LEFT OUTER JOIN Pool AS Pol ON (Media.RecyclePoolId=Pol.PoolId)" + + where; + + if (mainWin->m_sqlDebug) { + Pmsg1(000, "MediaList query cmd : %s\n",query.toUtf8().data()); + } + QStringList results; + if (m_console->sql_cmd(query, results)) { + QString field; + QStringList fieldlist; + + /* Iterate through the lines of results, there should only be one. */ + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + i = 0; + + /* Iterate through fields in the record */ + foreach (field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + bool ok; + if (i == 0) { + m_mediaName = field; + volumeLabel->setText(QString("Volume : %1").arg(m_mediaName)); + } else if (i == 1) { + m_pool = field; + } else if (i == 2) { + m_status = field; + } else if (i == 3) { + m_slot = field.toInt(&ok, 10); + if (!ok){ m_slot = 0; } + } else if (i == 4) { + m_retention = field.toInt(&ok, 10); + if (!ok){ m_retention = 0; } + } else if (i == 5) { + m_useDuration = field.toInt(&ok, 10); + if (!ok){ m_useDuration = 0; } + } else if (i == 6) { + m_maxVolJobs = field.toInt(&ok, 10); + if (!ok){ m_maxVolJobs = 0; } + } else if (i == 7) { + m_maxVolFiles = field.toInt(&ok, 10); + if (!ok){ m_maxVolFiles = 0; } + } else if (i == 8) { + m_maxVolBytes = field.toInt(&ok, 10); + if (!ok){ m_maxVolBytes = 0; } + } else if (i == 9) { + if (field == "1") m_recycle = true; + else m_recycle = false; + } else if (i == 10) { + if (field == "1") m_enabled = true; + else m_enabled = false; + } else if (i == 11) { + m_recyclePool = field; + } + i++; + } /* foreach field */ + } /* foreach resultline */ + } /* if results from query */ + + if (m_mediaName != "") { + int index; + /* default value for pool */ + index = poolCombo->findText(m_pool, Qt::MatchExactly); + if (index != -1) { + poolCombo->setCurrentIndex(index); + } + + /* default value for status */ + index = statusCombo->findText(m_status, Qt::MatchExactly); + if (index != -1) { + statusCombo->setCurrentIndex(index); + } + slotSpin->setValue(m_slot); + retentionSpin->setValue(m_retention); + useDurationSpin->setValue(m_useDuration); + setSpins(retentionSpin->value()); + retentionRadio->setChecked(true); + maxJobsSpin->setValue(m_maxVolJobs); + maxFilesSpin->setValue(m_maxVolFiles); + maxBytesSpin->setValue(m_maxVolBytes); + if (m_recycle) recycleCheck->setCheckState(Qt::Checked); + else recycleCheck->setCheckState(Qt::Unchecked); + if (m_enabled) enabledCheck->setCheckState(Qt::Checked); + else enabledCheck->setCheckState(Qt::Unchecked); + /* default for recycle pool */ + recyclePoolCombo->addItems(m_console->pool_list); + recyclePoolCombo->insertItem(0, "*None*"); + index = recyclePoolCombo->findText(m_recyclePool, Qt::MatchExactly); + if (index == -1) { + index = 0; + } + recyclePoolCombo->setCurrentIndex(index); + } else { + QMessageBox::warning(this, tr("No Volume name"), tr("No Volume name given"), + QMessageBox::Ok, QMessageBox::Ok); + return; + } +} + +/* + * Function to handle updating the record then closing the page + */ +void MediaEdit::okButtonPushed() +{ + QString scmd; + this->hide(); + bool docmd = false; + scmd = QString("update volume=\"%1\"") + .arg(m_mediaName); + if (m_pool != poolCombo->currentText()) { + scmd += " pool=\"" + poolCombo->currentText() + "\""; + docmd = true; + } + if (m_status != statusCombo->currentText()) { + scmd += " volstatus=\"" + statusCombo->currentText() + "\""; + docmd = true; + } + if (m_slot != slotSpin->value()) { + scmd += " slot=" + QString().setNum(slotSpin->value()); + docmd = true; + } + if (m_retention != retentionSpin->value()) { + scmd += " VolRetention=" + QString().setNum(retentionSpin->value()); + docmd = true; + } + if (m_useDuration != useDurationSpin->value()) { + scmd += " VolUse=" + QString().setNum(useDurationSpin->value()); + docmd = true; + } + if (m_maxVolJobs != maxJobsSpin->value()) { + scmd += " MaxVolJobs=" + QString().setNum(maxJobsSpin->value()); + docmd = true; + } + if (m_maxVolFiles != maxFilesSpin->value()) { + scmd += " MaxVolFiles=" + QString().setNum(maxFilesSpin->value()); + docmd = true; + } + if (m_maxVolBytes != maxBytesSpin->value()) { + scmd += " MaxVolBytes=" + QString().setNum(maxBytesSpin->value()); + docmd = true; + } + if ((m_recycle) && (recycleCheck->checkState() == Qt::Unchecked)) { + scmd += " Recycle=no"; + docmd = true; + } + if ((!m_recycle) && (recycleCheck->checkState() == Qt::Checked)) { + scmd += " Recycle=yes"; + docmd = true; + } + if ((m_enabled) && (enabledCheck->checkState() == Qt::Unchecked)) { + scmd += " enabled=no"; + docmd = true; + } + if ((!m_enabled) && (enabledCheck->checkState() == Qt::Checked)) { + scmd += " enabled=yes"; + docmd = true; + } + if (m_recyclePool != recyclePoolCombo->currentText()) { + scmd += " recyclepool=\""; + if (recyclePoolCombo->currentText() != "*None*") { + scmd += recyclePoolCombo->currentText(); + } + scmd += "\""; + docmd = true; + } + if (docmd) { + if (mainWin->m_commandDebug) { + Pmsg1(000, "sending command : %s\n",scmd.toUtf8().data()); + } + consoleCommand(scmd); + } + closeStackPage(); +} + +/* close if cancel */ +void MediaEdit::cancelButtonPushed() +{ + closeStackPage(); +} + +/* + * Slot for user changed retention + */ +void MediaEdit::retentionChanged() +{ + retentionRadio->setChecked(true); + setSpins(retentionSpin->value()); +} + +/* + * Slot for user changed the use duration + */ +void MediaEdit::useDurationChanged() +{ + useDurationRadio->setChecked(true); + setSpins(useDurationSpin->value()); +} + +/* + * Set the 5 duration spins from a known duration value + */ +void MediaEdit::setSpins(int value) +{ + int years, months, days, hours, minutes, seconds, left; + + years = abs(value / 31536000); + left = value - years * 31536000; + months = abs(left / 2592000); + left = left - months * 2592000; + days = abs(left / 86400); + left = left - days * 86400; + hours = abs(left / 3600); + left = left - hours * 3600; + minutes = abs(left / 60); + seconds = left - minutes * 60; + disconnectSpins(); + yearsSpin->setValue(years); + monthsSpin->setValue(months); + daysSpin->setValue(days); + hoursSpin->setValue(hours); + minutesSpin->setValue(minutes); + secondsSpin->setValue(seconds); + connectSpins(); +} + +/* + * This slot is called any time any one of the 5 duration spins a changed. + */ +void MediaEdit::durationChanged() +{ + disconnectSpins(); + if (secondsSpin->value() == -1) { + secondsSpin->setValue(59); + minutesSpin->setValue(minutesSpin->value()-1); + } + if (minutesSpin->value() == -1) { + minutesSpin->setValue(59); + hoursSpin->setValue(hoursSpin->value()-1); + } + if (hoursSpin->value() == -1) { + hoursSpin->setValue(23); + daysSpin->setValue(daysSpin->value()-1); + } + if (daysSpin->value() == -1) { + daysSpin->setValue(29); + monthsSpin->setValue(monthsSpin->value()-1); + } + if (monthsSpin->value() == -1) { + monthsSpin->setValue(11); + yearsSpin->setValue(yearsSpin->value()-1); + } + if (yearsSpin->value() == -1) { + yearsSpin->setValue(0); + } + + if (secondsSpin->value() == 60) { + secondsSpin->setValue(0); + minutesSpin->setValue(minutesSpin->value()+1); + } + if (minutesSpin->value() == 60) { + minutesSpin->setValue(0); + hoursSpin->setValue(hoursSpin->value()+1); + } + if (hoursSpin->value() == 24) { + hoursSpin->setValue(0); + daysSpin->setValue(daysSpin->value()+1); + } + if (daysSpin->value() == 30) { + daysSpin->setValue(0); + monthsSpin->setValue(monthsSpin->value()+1); + } + if (monthsSpin->value() == 12) { + monthsSpin->setValue(0); + yearsSpin->setValue(yearsSpin->value()+1); + } + connectSpins(); + if (retentionRadio->isChecked()) { + int retention; + retention = secondsSpin->value() + minutesSpin->value() * 60 + + hoursSpin->value() * 3600 + daysSpin->value() * 86400 + + monthsSpin->value() * 2592000 + + yearsSpin->value() * 31536000; + disconnect(retentionSpin, SIGNAL(valueChanged(int)), this, SLOT(retentionChanged())); + retentionSpin->setValue(retention); + connect(retentionSpin, SIGNAL(valueChanged(int)), this, SLOT(retentionChanged())); + } + if (useDurationRadio->isChecked()) { + int useDuration; + useDuration = secondsSpin->value() + minutesSpin->value() * 60 + + hoursSpin->value() * 3600 + daysSpin->value() * 86400 + + monthsSpin->value() * 2592000 + + yearsSpin->value() * 31536000; + disconnect(useDurationSpin, SIGNAL(valueChanged(int)), this, SLOT(useDurationChanged())); + useDurationSpin->setValue(useDuration); + connect(useDurationSpin, SIGNAL(valueChanged(int)), this, SLOT(useDurationChanged())); + } +} + +/* Connect the spins */ +void MediaEdit::connectSpins() +{ + connect(secondsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + connect(minutesSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + connect(hoursSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + connect(daysSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + connect(monthsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + connect(yearsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); +} + +/* disconnect spins so that we can set the value of other spin from changed duration spin */ +void MediaEdit::disconnectSpins() +{ + disconnect(secondsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + disconnect(minutesSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + disconnect(hoursSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + disconnect(daysSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + disconnect(monthsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); + disconnect(yearsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); +} + +/* slot for setting spins when retention radio checked */ +void MediaEdit::retentionRadioPressed() +{ + setSpins(retentionSpin->value()); +} + +/* slot for setting spins when duration radio checked */ +void MediaEdit::useDurationRadioPressed() +{ + setSpins(useDurationSpin->value()); +} diff --git a/src/qt-console/mediaedit/mediaedit.h b/src/qt-console/mediaedit/mediaedit.h new file mode 100644 index 00000000..89ed3dba --- /dev/null +++ b/src/qt-console/mediaedit/mediaedit.h @@ -0,0 +1,68 @@ +#ifndef _MEDIAEDIT_H_ +#define _MEDIAEDIT_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_mediaedit.h" +#include "console.h" +#include "pages.h" + +class MediaEdit : public Pages, public Ui::mediaEditForm +{ + Q_OBJECT + +public: + MediaEdit(QTreeWidgetItem *parentWidget, QString &mediaId); + +private slots: + void okButtonPushed(); + void cancelButtonPushed(); + void retentionChanged(); + void durationChanged(); + void useDurationChanged(); + void setSpins(int value); + void retentionRadioPressed(); + void useDurationRadioPressed(); + +private: + void connectSpins(); + void disconnectSpins(); + QString m_mediaName; + QString m_pool; + QString m_status; + int m_slot; + int m_retention; + int m_useDuration; + int m_maxVolJobs; + int m_maxVolFiles; + int m_maxVolBytes; + bool m_recycle; + bool m_enabled; + QString m_recyclePool; +}; + +#endif /* _MEDIAEDIT_H_ */ diff --git a/src/qt-console/mediaedit/mediaedit.ui b/src/qt-console/mediaedit/mediaedit.ui new file mode 100644 index 00000000..aaabf014 --- /dev/null +++ b/src/qt-console/mediaedit/mediaedit.ui @@ -0,0 +1,585 @@ + + mediaEditForm + + + + 0 + 0 + 487 + 470 + + + + Form + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + Pool: + + + poolCombo + + + + + + + Volume Status: + + + + + + + Max Volume Bytes: + + + slotSpin + + + + + + + + + + + + + Slot: + + + slotSpin + + + + + + + Max Volume Jobs: + + + slotSpin + + + + + + + Use Duration: + + + slotSpin + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + 2147483647 + + + + + + + Retention: + + + slotSpin + + + + + + + Recycle Pool: + + + slotSpin + + + + + + + 0 + + + 6 + + + + + Qt::RightToLeft + + + Enabled + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + 2147483647 + + + + + + + Max Volume Files: + + + slotSpin + + + + + + + 0 + + + 6 + + + + + Qt::Vertical + + + + 97 + 21 + + + + + + + + 999 + + + -1 + + + + + + + Years + + + + + + + Seconds + + + + + + + 30 + + + -1 + + + + + + + Use Duration + + + + + + + Qt::Vertical + + + + 97 + 31 + + + + + + + + Days + + + + + + + 60 + + + -1 + + + + + + + Hours + + + + + + + Months + + + + + + + 60 + + + -1 + + + + + + + 24 + + + -1 + + + 0 + + + + + + + Retention + + + + + + + 12 + + + -1 + + + + + + + Minutes + + + + + + + + + 2147483647 + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + 16777215 + 48 + + + + Volume : + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + + 2147483647 + + + + + + + 0 + + + 6 + + + + + Qt::RightToLeft + + + Recycle + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 71 + 21 + + + + + + + + + 16777215 + 30 + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Edit a Volume</span></p></body></html> + + + + + + + Qt::Horizontal + + + + 81 + 20 + + + + + + + + + + 10000 + + + + + + + 2147483647 + + + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + diff --git a/src/qt-console/mediainfo/mediainfo.cpp b/src/qt-console/mediainfo/mediainfo.cpp new file mode 100644 index 00000000..2a45bdbc --- /dev/null +++ b/src/qt-console/mediainfo/mediainfo.cpp @@ -0,0 +1,257 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bat.h" +#include +#include +#include +#include "mediaedit/mediaedit.h" +#include "relabel/relabel.h" +#include "run/run.h" +#include "mediainfo.h" +#include "util/fmtwidgetitem.h" +#include "job/job.h" + +/* + * A constructor + */ +MediaInfo::MediaInfo(QTreeWidgetItem *parentWidget, QString &mediaName) + : Pages() +{ + setupUi(this); + pgInitialize(tr("Media Info"), parentWidget); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/cartridge-edit.png"))); + m_mediaName = mediaName; + connect(pbPrune, SIGNAL(clicked()), this, SLOT(pruneVol())); + connect(pbPurge, SIGNAL(clicked()), this, SLOT(purgeVol())); + connect(pbDelete, SIGNAL(clicked()), this, SLOT(deleteVol())); + connect(pbEdit, SIGNAL(clicked()), this, SLOT(editVol())); + connect(tableJob, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), this, SLOT(showInfoForJob(QTableWidgetItem *))); + + dockPage(); + setCurrent(); + populateForm(); +} + +/* + * Subroutine to call class to show the log in the database from that job + */ +void MediaInfo::showInfoForJob(QTableWidgetItem * item) +{ + QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); + int row = item->row(); + QString jobid = tableJob->item(row, 0)->text(); + new Job(jobid, pageSelectorTreeWidgetItem); +// connect(j, SIGNAL(destroyed()), this, SLOT(populateTree())); +} + +void MediaInfo::pruneVol() +{ + new prunePage(m_mediaName, ""); +// connect(prune, SIGNAL(destroyed()), this, SLOT(populateTree())); +} + +// TODO: use same functions as in medialist.cpp +void MediaInfo::purgeVol() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to purge ?? !!!.\n" +"The Purge command will delete associated Catalog database records from Jobs and" +" Volumes without considering the retention period. Purge works only on the" +" Catalog database and does not affect data written to Volumes. This command can" +" be dangerous because you can delete catalog records associated with current" +" backups of files, and we recommend that you do not use it unless you know what" +" you are doing.\n" + "Press OK to proceed with the purge operation?"), + QMessageBox::Ok | QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + QString cmd("purge volume="); + cmd += m_mediaName; + consoleCommand(cmd); +} + +void MediaInfo::deleteVol() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to delete?? !!!.\n" +"This delete command is used to delete a Volume record and all associated catalog" +" records that were created. This command operates only on the Catalog" +" database and has no effect on the actual data written to a Volume. This" +" command can be dangerous and we strongly recommend that you do not use" +" it unless you know what you are doing. All Jobs and all associated" +" records (File and JobMedia) will be deleted from the catalog." + "Press OK to proceed with delete operation.?"), + QMessageBox::Ok | QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + QString cmd("delete volume="); + cmd += m_mediaName; + consoleCommand(cmd); +} + +void MediaInfo::editVol() +{ + new MediaEdit(mainWin->getFromHash(this), m_mediaId); +// connect(edit, SIGNAL(destroyed()), this, SLOT(populateTree())); +} + +/* + * Populate the text in the window + */ +void MediaInfo::populateForm() +{ + utime_t t; + time_t ttime; + + QString stat, LastWritten; + struct tm tm; + char buf[256]; + QString query = + "SELECT MediaId, VolumeName, Pool.Name, MediaType, FirstWritten," + "LastWritten, VolMounts, VolBytes, Media.Enabled," + "Location.Location, VolStatus, RecyclePool.Name, Media.Recycle, " + "VolReadTime/1000000, VolWriteTime/1000000, Media.VolUseDuration, " + "Media.MaxVolJobs, Media.MaxVolFiles, Media.MaxVolBytes, " + "Media.VolRetention,InChanger,Slot " + "FROM Media JOIN Pool USING (PoolId) LEFT JOIN Pool AS RecyclePool " + "ON (Media.RecyclePoolId=RecyclePool.PoolId) " + "LEFT JOIN Location ON (Media.LocationId=Location.LocationId) " + "WHERE Media.VolumeName='" + m_mediaName + "'"; + + if (mainWin->m_sqlDebug) { + Pmsg1(000, "MediaInfo query cmd : %s\n",query.toUtf8().data()); + } + QStringList results; + if (m_console->sql_cmd(query, results)) { + QString resultline; + QStringList fieldlist; + + foreach (resultline, results) { // should have only one result + fieldlist = resultline.split("\t"); + QStringListIterator fld(fieldlist); + m_mediaId = fld.next(); + + label_VolumeName->setText(fld.next()); + label_Pool->setText(fld.next()); + label_MediaType->setText(fld.next()); + label_FirstWritten->setText(fld.next()); + LastWritten = fld.next(); + label_LastWritten->setText(LastWritten); +// label_VolFiles->setText(fld.next()); + label_VolMounts->setText(fld.next()); + label_VolBytes->setText(convertBytesSI(fld.next().toULongLong())); + label_Enabled->setPixmap(QPixmap(":/images/inflag" + fld.next() + ".png")); + label_Location->setText(fld.next()); + label_VolStatus->setText(fld.next()); + label_RecyclePool->setText(fld.next()); + chkbox_Recycle->setCheckState(fld.next().toInt()?Qt::Checked:Qt::Unchecked); + edit_utime(fld.next().toULongLong(), buf, sizeof(buf)); + label_VolReadTime->setText(QString(buf)); + + edit_utime(fld.next().toULongLong(), buf, sizeof(buf)); + label_VolWriteTime->setText(QString(buf)); + + edit_utime(fld.next().toULongLong(), buf, sizeof(buf)); + label_VolUseDuration->setText(QString(buf)); + + label_MaxVolJobs->setText(fld.next()); + label_MaxVolFiles->setText(fld.next()); + label_MaxVolBytes->setText(fld.next()); + + stat = fld.next(); + edit_utime(stat.toULongLong(), buf, sizeof(buf)); + label_VolRetention->setText(QString(buf)); + + if (LastWritten != "") { + t = str_to_utime(LastWritten.toLatin1().data()); + t = t + stat.toULongLong(); + ttime = t; + localtime_r(&ttime, &tm); + strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); + label_Expire->setText(QString(buf)); + } + label_Online->setPixmap(QPixmap(":/images/inflag"+fld.next()+".png")); +// label_VolFiles->setText(fld.next()); +// label_VolErrors->setText(fld.next()); + +// stat=fld.next(); + +// jobstatus_to_ascii_gui(stat[0].toLatin1(), buf, sizeof(buf)); +// stat = buf; +// + } + } + + query = + "SELECT DISTINCT JobId, Name, StartTime, Type, Level, JobFiles," + "JobBytes,JobStatus " + "FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) " + "WHERE Media.VolumeName = '" + m_mediaName + "'"; + + if (mainWin->m_sqlDebug) { + Pmsg1(000, "MediaInfo query cmd : %s\n",query.toUtf8().data()); + } + results.clear(); + if (m_console->sql_cmd(query, results)) { + QString resultline; + QStringList fieldlist; + int row = 0; + tableJob->setRowCount(results.size()); + foreach (resultline, results) { + fieldlist = resultline.split("\t"); + QStringListIterator fld(fieldlist); + int index=0; + TableItemFormatter jobitem(*tableJob, row); + + /* JobId */ + jobitem.setNumericFld(index++, fld.next()); + + /* job name */ + jobitem.setTextFld(index++, fld.next()); + + /* job starttime */ + jobitem.setTextFld(index++, fld.next(), true); + + /* job type */ + jobitem.setJobTypeFld(index++, fld.next()); + + /* job level */ + jobitem.setJobLevelFld(index++, fld.next()); + + /* job files */ + jobitem.setNumericFld(index++, fld.next()); + + /* job bytes */ + jobitem.setBytesFld(index++, fld.next()); + + /* job status */ + jobitem.setJobStatusFld(index++, fld.next()); + row++; + } + } + + tableJob->resizeColumnsToContents(); + tableJob->resizeRowsToContents(); + tableJob->verticalHeader()->hide(); + + /* make read only */ + tableJob->setEditTriggers(QAbstractItemView::NoEditTriggers); +} diff --git a/src/qt-console/mediainfo/mediainfo.h b/src/qt-console/mediainfo/mediainfo.h new file mode 100644 index 00000000..7f3c732b --- /dev/null +++ b/src/qt-console/mediainfo/mediainfo.h @@ -0,0 +1,54 @@ +#ifndef _MEDIAINFO_H_ +#define _MEDIAINFO_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_mediainfo.h" +#include "console.h" +#include "pages.h" + +class MediaInfo : public Pages, public Ui::mediaInfoForm +{ + Q_OBJECT + +public: + MediaInfo(QTreeWidgetItem *parentWidget, QString &mediaId); + +private slots: + void pruneVol(); + void purgeVol(); + void deleteVol(); + void editVol(); + void showInfoForJob(QTableWidgetItem * item); + +private: + void populateForm(); + QString m_mediaName; + QString m_mediaId; +}; + +#endif /* _MEDIAINFO_H_ */ diff --git a/src/qt-console/mediainfo/mediainfo.ui b/src/qt-console/mediainfo/mediainfo.ui new file mode 100644 index 00000000..9dc9fa74 --- /dev/null +++ b/src/qt-console/mediainfo/mediainfo.ui @@ -0,0 +1,701 @@ + + mediaInfoForm + + + + 0 + 0 + 828 + 814 + + + + Form + + + + + + + + Edit + + + + :/images/edit.png:/images/edit.png + + + true + + + + + + + Purge + + + + :/images/purge.png:/images/purge.png + + + true + + + + + + + Delete + + + + :/images/purge.png:/images/purge.png + + + true + + + + + + + Prune + + + + :/images/edit-cut.png:/images/edit-cut.png + + + true + + + + + + + false + + + Load + + + + :/images/intern.png:/images/intern.png + + + true + + + + + + + false + + + Unload + + + + :/images/extern.png:/images/extern.png + + + true + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + QLayout::SetFixedSize + + + + + + 0 + 0 + + + + + 251 + 241 + + + + + 251 + 243 + + + + Information + + + + + + Name: + + + + + + + Vol0001 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Pool: + + + + + + + Default + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Online: + + + + + + + + + + :/images/inflag0.png + + + + + + + Enabled: + + + + + + + yes + + + + + + + Location: + + + + + + + Vault + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Status: + + + + + + + Append + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Media Type: + + + + + + + File + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Recycle Pool: + + + + + + + Scratch + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + + + + + 0 + 0 + + + + + 261 + 241 + + + + + 261 + 241 + + + + Statistics + + + + + + Vol Bytes: + + + + + + + 19.8 MB + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Vol Mounts: + + + + + + + 10 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Recycle count: + + + + + + + 5 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Read time: + + + + + + + 10 mins + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Write time: + + + + + + + 20 mins + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Errors: + + + + + + + 0 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Last Written: + + + + + + + 2009-07-05 12:23:00 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + First Written: + + + + + + + 2009-06-05 10:00:00 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + + + + + 0 + 0 + + + + + 200 + 241 + + + + + 200 + 241 + + + + Limits + + + + + + Use duration: + + + + + + + 0 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Max jobs: + + + + + + + 0 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Max files: + + + + + + + 0 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Max bytes: + + + + + + + 0 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Recycle: + + + + + + + false + + + + + + + + + + Retention: + + + + + + + 365 days + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + Expire: + + + + + + + 2010-08-03 23:10:03 + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + 0 + 0 + + + + Jobs + + + + + + true + + + + JobId + + + + true + + + + Name + + + + true + + + + Start Time + + + + true + + + + Type + + + + true + + + + Level + + + + true + + + + Files + + + + true + + + + Bytes + + + + true + + + + Status + + + + + + + + + + + + + + diff --git a/src/qt-console/medialist/medialist.cpp b/src/qt-console/medialist/medialist.cpp new file mode 100644 index 00000000..04ce282b --- /dev/null +++ b/src/qt-console/medialist/medialist.cpp @@ -0,0 +1,480 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * MediaList Class + * + * Dirk Bartley, March 2007 + * + */ + +#include "bat.h" +#include +#include +#include +#include "medialist.h" +#include "mediaedit/mediaedit.h" +#include "mediainfo/mediainfo.h" +#include "joblist/joblist.h" +#include "relabel/relabel.h" +#include "run/run.h" +#include "util/fmtwidgetitem.h" + +MediaList::MediaList() : Pages() +{ + setupUi(this); + m_name = tr("Pools"); + pgInitialize(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/cartridge.png"))); + + /* mp_treeWidget, Storage Tree Tree Widget inherited from ui_medialist.h */ + m_populated = false; + m_checkcurwidget = true; + m_closeable = false; + /* add context sensitive menu items specific to this classto the page + * selector tree. m_contextActions is QList of QActions */ + m_contextActions.append(actionRefreshMediaList); +} + +MediaList::~MediaList() +{ + if (m_populated) + writeExpandedSettings(); +} + +/* + * The main meat of the class!! The function that querries the director and + * creates the widgets with appropriate values. + */ +void MediaList::populateTree() +{ + QTreeWidgetItem *pooltreeitem = NULL; + + if (m_populated) { + writeExpandedSettings(); + } + m_populated = true; + + Freeze frz(*mp_treeWidget); /* disable updating*/ + + QStringList headerlist = (QStringList() + << tr("Volume Name") << tr("Id") << tr("Status") << tr("Enabled") << tr("Bytes") << tr("Files") + << tr("Jobs") << tr("Retention") << tr("Media Type") << tr("Slot") << tr("Use Duration") + << tr("Max Jobs") << tr("Max Files") << tr("Max Bytes") << tr("Recycle") + << tr("Last Written") << tr("First Written") << tr("Read Time") + << tr("Write Time") << tr("Recycle Count") << tr("Recycle Pool")); + + m_checkcurwidget = false; + mp_treeWidget->clear(); + m_checkcurwidget = true; + mp_treeWidget->setColumnCount(headerlist.count()); + m_topItem = new QTreeWidgetItem(mp_treeWidget); + m_topItem->setText(0, tr("Pools")); + m_topItem->setData(0, Qt::UserRole, 0); + m_topItem->setExpanded(true); + + mp_treeWidget->setHeaderLabels(headerlist); + + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("MediaListTreeExpanded"); + QString query; + + + /* Comma separated list of pools first */ + bool first = true; + QString pool_comsep(""); + foreach (QString pool_listItem, m_console->pool_list) { + if (first) { + pool_comsep += "'" + pool_listItem + "'"; + first = false; + } + else + pool_comsep += ",'" + pool_listItem + "'"; + } + /* Now use pool_comsep list to perform just one query */ + if (pool_comsep != "") { + query = "SELECT Pool.Name AS pul," + " Media.VolumeName AS Media, " + " Media.MediaId AS Id, Media.VolStatus AS VolStatus," + " Media.Enabled AS Enabled, Media.VolBytes AS Bytes," + " Media.VolFiles AS FileCount, Media.VolJobs AS JobCount," + " Media.VolRetention AS VolumeRetention, Media.MediaType AS MediaType," + " Media.InChanger AS InChanger, Media.Slot AS Slot, " + " Media.VolUseDuration AS UseDuration," + " Media.MaxVolJobs AS MaxJobs, Media.MaxVolFiles AS MaxFiles," + " Media.MaxVolBytes AS MaxBytes, Media.Recycle AS Recycle," + " Media.LastWritten AS LastWritten," + " Media.FirstWritten AS FirstWritten," + " (VolReadTime/1000000) AS ReadTime, (VolWriteTime/1000000) AS WriteTime," + " RecycleCount AS ReCyCount," + " RecPool.Name AS RecyclePool" + " FROM Media" + " JOIN Pool ON (Media.PoolId=Pool.PoolId)" + " LEFT OUTER JOIN Pool AS RecPool ON (Media.RecyclePoolId=RecPool.PoolId)" + " WHERE "; + query += " Pool.Name IN (" + pool_comsep + ")"; + query += " ORDER BY Pool.Name, Media"; + + if (mainWin->m_sqlDebug) { + Pmsg1(000, "MediaList query cmd : %s\n",query.toUtf8().data()); + } + QStringList results; + int counter = 0; + if (m_console->sql_cmd(query, results)) { + QStringList fieldlist; + QString prev_pool(""); + QString this_pool(""); + + /* Iterate through the lines of results. */ + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + this_pool = fieldlist.takeFirst(); + if (prev_pool != this_pool) { + prev_pool = this_pool; + pooltreeitem = new QTreeWidgetItem(m_topItem); + pooltreeitem->setText(0, this_pool); + pooltreeitem->setData(0, Qt::UserRole, 1); + } + if(settings.contains(this_pool)) { + pooltreeitem->setExpanded(settings.value(this_pool).toBool()); + } else { + pooltreeitem->setExpanded(true); + } + + if (fieldlist.size() < 21) { // Handle recyclepool specifically, and pool is already removed + Pmsg2(000, "Unexpected line %s %d", resultline.toUtf8().data(), fieldlist.size()); + continue; // some fields missing, ignore row + } + int index = 0; + TreeItemFormatter mediaitem(*pooltreeitem, 2); + + /* Iterate through fields in the record */ + QStringListIterator fld(fieldlist); + + /* volname */ + mediaitem.setTextFld(index++, fld.next()); + + /* id */ + mediaitem.setNumericFld(index++, fld.next()); + + /* status */ + mediaitem.setVolStatusFld(index++, fld.next()); + + /* enabled */ + mediaitem.setBoolFld(index++, fld.next()); + + /* bytes */ + mediaitem.setBytesFld(index++, fld.next()); + + /* files */ + mediaitem.setNumericFld(index++, fld.next()); + + /* jobs */ + mediaitem.setNumericFld(index++, fld.next()); + + /* retention */ + mediaitem.setDurationFld(index++, fld.next()); + + /* media type */ + mediaitem.setTextFld(index++, fld.next()); + + /* inchanger + slot */ + int inchanger = fld.next().toInt(); + if (inchanger) { + mediaitem.setNumericFld(index++, fld.next()); + } + else { + /* volume not in changer, show blank slot */ + mediaitem.setNumericFld(index++, ""); + fld.next(); + } + + /* use duration */ + mediaitem.setDurationFld(index++, fld.next()); + + /* max jobs */ + mediaitem.setNumericFld(index++, fld.next()); + + /* max files */ + mediaitem.setNumericFld(index++, fld.next()); + + /* max bytes */ + mediaitem.setBytesFld(index++, fld.next()); + + /* recycle */ + mediaitem.setBoolFld(index++, fld.next()); + + /* last written */ + mediaitem.setTextFld(index++, fld.next()); + + /* first written */ + mediaitem.setTextFld(index++, fld.next()); + + /* read time */ + mediaitem.setDurationFld(index++, fld.next()); + + /* write time */ + mediaitem.setDurationFld(index++, fld.next()); + + /* Recycle Count */ + mediaitem.setNumericFld(index++, fld.next()); + + /* recycle pool */ + if (fld.hasNext()) { + mediaitem.setTextFld(index++, fld.next()); + } else { + mediaitem.setTextFld(index++, ""); + } + + } /* foreach resultline */ + counter += 1; + } /* if results from query */ + } /* foreach pool_listItem */ + settings.endGroup(); + /* Resize the columns */ + for(int cnter=0; cnterresizeColumnToContents(cnter); + } +} + +/* + * Called from the signal of the context sensitive menu! + */ +void MediaList::editVolume() +{ + MediaEdit* edit = new MediaEdit(mainWin->getFromHash(this), m_currentVolumeId); + connect(edit, SIGNAL(destroyed()), this, SLOT(populateTree())); +} + +/* + * Called from the signal of the context sensitive menu! + */ +void MediaList::showJobs() +{ + QTreeWidgetItem *parentItem = mainWin->getFromHash(this); + mainWin->createPageJobList(m_currentVolumeName, "", "", "", parentItem); +} + +/* + * Called from the signal of the context sensitive menu! + */ +void MediaList::viewVolume() +{ + QTreeWidgetItem *parentItem = mainWin->getFromHash(this); + MediaInfo* view = new MediaInfo(parentItem, m_currentVolumeName); + connect(view, SIGNAL(destroyed()), this, SLOT(populateTree())); + +} + +/* + * When the treeWidgetItem in the page selector tree is singleclicked, Make sure + * The tree has been populated. + */ +void MediaList::PgSeltreeWidgetClicked() +{ + if (!m_populated) { + populateTree(); + createContextMenu(); + } + if (!isOnceDocked()) { + dockPage(); + } +} + +/* + * Added to set the context menu policy based on currently active treeWidgetItem + * signaled by currentItemChanged + */ +void MediaList::treeItemChanged(QTreeWidgetItem *currentwidgetitem, QTreeWidgetItem *previouswidgetitem ) +{ + /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ + if (m_checkcurwidget) { + /* The Previous item */ + if (previouswidgetitem) { /* avoid a segfault if first time */ + foreach(QAction* mediaAction, mp_treeWidget->actions()) { + mp_treeWidget->removeAction(mediaAction); + } + } + + int treedepth = currentwidgetitem->data(0, Qt::UserRole).toInt(); + m_currentVolumeName=currentwidgetitem->text(0); + mp_treeWidget->addAction(actionRefreshMediaList); + if (treedepth == 2){ + m_currentVolumeId=currentwidgetitem->text(1); + mp_treeWidget->addAction(actionEditVolume); + mp_treeWidget->addAction(actionListJobsOnVolume); + mp_treeWidget->addAction(actionDeleteVolume); + mp_treeWidget->addAction(actionPruneVolume); + mp_treeWidget->addAction(actionPurgeVolume); + mp_treeWidget->addAction(actionRelabelVolume); + mp_treeWidget->addAction(actionVolumeFromPool); + } else if (treedepth == 1) { + mp_treeWidget->addAction(actionAllVolumesFromPool); + } + } +} + +/* + * Setup a context menu + * Made separate from populate so that it would not create context menu over and + * over as the tree is repopulated. + */ +void MediaList::createContextMenu() +{ + mp_treeWidget->setContextMenuPolicy(Qt::ActionsContextMenu); + connect(mp_treeWidget, SIGNAL(itemDoubleClicked(QTreeWidgetItem *, int)), this, SLOT(viewVolume())); + connect(actionEditVolume, SIGNAL(triggered()), this, SLOT(editVolume())); + connect(actionListJobsOnVolume, SIGNAL(triggered()), this, SLOT(showJobs())); + connect(actionDeleteVolume, SIGNAL(triggered()), this, SLOT(deleteVolume())); + connect(actionPurgeVolume, SIGNAL(triggered()), this, SLOT(purgeVolume())); + connect(actionPruneVolume, SIGNAL(triggered()), this, SLOT(pruneVolume())); + connect(actionRelabelVolume, SIGNAL(triggered()), this, SLOT(relabelVolume())); + connect(mp_treeWidget, SIGNAL( + currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), + this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); + /* connect to the action specific to this pages class */ + connect(actionRefreshMediaList, SIGNAL(triggered()), this, + SLOT(populateTree())); + connect(actionAllVolumes, SIGNAL(triggered()), this, SLOT(allVolumes())); + connect(actionAllVolumesFromPool, SIGNAL(triggered()), this, SLOT(allVolumesFromPool())); + connect(actionVolumeFromPool, SIGNAL(triggered()), this, SLOT(volumeFromPool())); +} + +/* + * Virtual function which is called when this page is visible on the stack + */ +void MediaList::currentStackItem() +{ + if(!m_populated) { + populateTree(); + /* Create the context menu for the medialist tree */ + createContextMenu(); + } +} + +/* + * Called from the signal of the context sensitive menu to delete a volume! + */ +void MediaList::deleteVolume() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to delete?? !!!.\n" +"This delete command is used to delete a Volume record and all associated catalog" +" records that were created. This command operates only on the Catalog" +" database and has no effect on the actual data written to a Volume. This" +" command can be dangerous and we strongly recommend that you do not use" +" it unless you know what you are doing. All Jobs and all associated" +" records (File and JobMedia) will be deleted from the catalog." + "Press OK to proceed with delete operation.?"), + QMessageBox::Ok | QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + QString cmd("delete volume="); + cmd += m_currentVolumeName; + consoleCommand(cmd); +} + +/* + * Called from the signal of the context sensitive menu to purge! + */ +void MediaList::purgeVolume() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to purge ?? !!!.\n" +"The Purge command will delete associated Catalog database records from Jobs and" +" Volumes without considering the retention period. Purge works only on the" +" Catalog database and does not affect data written to Volumes. This command can" +" be dangerous because you can delete catalog records associated with current" +" backups of files, and we recommend that you do not use it unless you know what" +" you are doing.\n" + "Press OK to proceed with the purge operation?"), + QMessageBox::Ok | QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + QString cmd("purge volume="); + cmd += m_currentVolumeName; + consoleCommand(cmd); + populateTree(); +} + +/* + * Called from the signal of the context sensitive menu to prune! + */ +void MediaList::pruneVolume() +{ + new prunePage(m_currentVolumeName, ""); +} + +/* + * Called from the signal of the context sensitive menu to relabel! + */ +void MediaList::relabelVolume() +{ + setConsoleCurrent(); + new relabelDialog(m_console, m_currentVolumeName); +} + +/* + * Called from the signal of the context sensitive menu to purge! + */ +void MediaList::allVolumesFromPool() +{ + QString cmd = "update volume AllFromPool=" + m_currentVolumeName; + consoleCommand(cmd); + populateTree(); +} + +void MediaList::allVolumes() +{ + QString cmd = "update volume fromallpools"; + consoleCommand(cmd); + populateTree(); +} + +/* + * Called from the signal of the context sensitive menu to purge! + */ +void MediaList::volumeFromPool() +{ + QTreeWidgetItem *currentItem = mp_treeWidget->currentItem(); + QTreeWidgetItem *parent = currentItem->parent(); + QString pool = parent->text(0); + QString cmd; + cmd = "update volume=" + m_currentVolumeName + " frompool=" + pool; + consoleCommand(cmd); + populateTree(); +} + +/* + * Write settings to save expanded states of the pools + */ +void MediaList::writeExpandedSettings() +{ + if (m_topItem) { + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("MediaListTreeExpanded"); + int childcount = m_topItem->childCount(); + for (int cnt=0; cntchild(cnt); + settings.setValue(poolitem->text(0), poolitem->isExpanded()); + } + settings.endGroup(); + } +} diff --git a/src/qt-console/medialist/medialist.h b/src/qt-console/medialist/medialist.h new file mode 100644 index 00000000..487742ff --- /dev/null +++ b/src/qt-console/medialist/medialist.h @@ -0,0 +1,70 @@ +#ifndef _MEDIALIST_H_ +#define _MEDIALIST_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_medialist.h" +#include "console.h" +#include + +class MediaList : public Pages, public Ui::MediaListForm +{ + Q_OBJECT + +public: + MediaList(); + ~MediaList(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + +public slots: + void treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); + +private slots: + void populateTree(); + void showJobs(); + void viewVolume(); + void editVolume(); + void deleteVolume(); + void purgeVolume(); + void pruneVolume(); + void relabelVolume(); + void allVolumesFromPool(); + void allVolumes(); + void volumeFromPool(); + +private: + void createContextMenu(); + void writeExpandedSettings(); + QString m_currentVolumeName; + QString m_currentVolumeId; + bool m_populated; + bool m_checkcurwidget; + QTreeWidgetItem *m_topItem; +}; + +#endif /* _MEDIALIST_H_ */ diff --git a/src/qt-console/medialist/medialist.ui b/src/qt-console/medialist/medialist.ui new file mode 100644 index 00000000..058a4f82 --- /dev/null +++ b/src/qt-console/medialist/medialist.ui @@ -0,0 +1,132 @@ + + MediaListForm + + + + 0 + 0 + 490 + 303 + + + + Media Tree + + + + 9 + + + 6 + + + + + + + + :/images/view-refresh.png + + + Refresh Media List + + + Requery the director for the list of media. + + + + + :/images/cartridge-edit.png + + + Edit Volume + + + + + :/images/emblem-system.png + + + List Jobs On Volume + + + + + :/images/edit-delete.png + + + Delete Volume + + + + + :/images/edit-cut.png + + + Prune Volume + + + + + :/images/weather-severe-alert.png + + + Purge Volume + + + + + :/images/label.png + + + Relabel Volume + + + + + :/images/cartridge-edit.png + + + Update all Volumes From Pool + + + Update all Volumes From Pool + + + Update all Volumes From Pool + + + Update all Volumes From Pool + + + + + :/images/cartridge-edit.png + + + Update all Volumes from all Pools + + + Update all Volumes from all Pools + + + Update all Volumes from all Pools + + + Update all Volumes from all Pools + + + + + :/images/cartridge-edit.png + + + Volume From Pool + + + + + + + + diff --git a/src/qt-console/medialist/mediaview.cpp b/src/qt-console/medialist/mediaview.cpp new file mode 100644 index 00000000..acc7fd60 --- /dev/null +++ b/src/qt-console/medialist/mediaview.cpp @@ -0,0 +1,458 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bat.h" +#include +#include +#include +#include "mediaview.h" +#include "mediaedit/mediaedit.h" +#include "mediainfo/mediainfo.h" +#include "joblist/joblist.h" +#include "relabel/relabel.h" +#include "run/run.h" +#include "util/fmtwidgetitem.h" + +MediaView::MediaView() : Pages() +{ + setupUi(this); + m_name = tr("Media"); + pgInitialize(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/cartridge.png"))); + connect(m_pbApply, SIGNAL(pressed()), this, SLOT(applyPushed())); + connect(m_pbEdit, SIGNAL(pressed()), this, SLOT(editPushed())); + connect(m_pbPurge, SIGNAL(pressed()), this, SLOT(purgePushed())); + connect(m_pbDelete, SIGNAL(pressed()), this, SLOT(deletePushed())); + connect(m_pbPrune, SIGNAL(pressed()), this, SLOT(prunePushed())); + connect(m_tableMedia, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), + this, SLOT(showInfoForMedia(QTableWidgetItem *))); + + /* mp_treeWidget, Storage Tree Tree Widget inherited from ui_medialist.h */ + m_populated = false; + m_checkcurwidget = true; + m_closeable = false; +} + +void MediaView::showInfoForMedia(QTableWidgetItem * item) +{ + QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); + int row = item->row(); + QString vol = m_tableMedia->item(row, 0)->text(); + new MediaInfo(pageSelectorTreeWidgetItem, vol); +// connect(j, SIGNAL(destroyed()), this, SLOT(populateTree())); +} + +MediaView::~MediaView() +{ +} + +void MediaView::applyPushed() +{ + populateTable(); +} + +void MediaView::editPushed() +{ + QStringList sel; + QString cmd; + getSelection(sel); + + for(int i=0; igetFromHash(this), cmd); + } +} + +void MediaView::purgePushed() +{ + if (QMessageBox::warning(this, "Bat", + tr("Are you sure you want to purge ?? !!!.\n" +"The Purge command will delete associated Catalog database records from Jobs and" +" Volumes without considering the retention period. Purge works only on the" +" Catalog database and does not affect data written to Volumes. This command can" +" be dangerous because you can delete catalog records associated with current" +" backups of files, and we recommend that you do not use it unless you know what" +" you are doing.\n" + "Press OK to proceed with the purge operation?"), + QMessageBox::Ok | QMessageBox::Cancel) + == QMessageBox::Cancel) { return; } + + QStringList lst; + QString cmd; + getSelection(lst); + for(int i=0; i items = m_tableMedia->selectedItems(); + int row; + int nrows; /* number of rows */ + bool *tab; + int nb = items.count(); + if (!nb) { + return false; + } + nrows = m_tableMedia->rowCount(); + tab = (bool *) malloc (nrows * sizeof(bool)); + memset(tab, 0, sizeof(bool)*nrows); + for (int i = 0; i < nb; ++i) { + row = items[i]->row(); + if (!tab[row]) { + tab[row] = true; + it = m_tableMedia->item(row, 0); + list.append(it->text()); + } + } + free(tab); + return list.count() > 0; +} + +void MediaView::prunePushed() +{ + QStringList sel; + QString cmd; + getSelection(sel); + + for(int i=0; iclear(); + m_cbPool->addItem(""); + m_cbPool->addItems(m_console->pool_list); + + m_cbStatus->clear(); + m_cbStatus->addItem(""); + m_cbStatus->addItems(m_console->volstatus_list); + + m_cbMediaType->clear(); + m_cbMediaType->addItem(""); + m_cbMediaType->addItems(m_console->mediatype_list); + + m_cbLocation->clear(); + m_cbLocation->addItem(""); + m_cbLocation->addItems(m_console->location_list); +} + +/* + * If chkExpired button is checked, we can remove all non Expired + * entries + */ +void MediaView::filterExipired(QStringList &list) +{ + utime_t t, now = time(NULL); + QString resultline, stat, LastWritten; + QStringList fieldlist; + + /* We should now in advance how many rows we will have */ + if (m_chkExpired->isChecked()) { + for (int i=list.size() -1; i >= 0; i--) { + fieldlist = list.at(i).split("\t"); + ASSERT(fieldlist.size() != 9); + LastWritten = fieldlist.at(7); + if (LastWritten == "") { + list.removeAt(i); + + } else { + stat = fieldlist.at(8); + t = str_to_utime(LastWritten.toLatin1().data()); + t = t + stat.toULongLong(); + if (t > now) { + list.removeAt(i); + } + } + } + } +} + +/* + * The main meat of the class!! The function that querries the director and + * creates the widgets with appropriate values. + */ +void MediaView::populateTable() +{ + utime_t t; + time_t ttime; + QString stat, resultline, query; + QString str_usage; + QHash hash_size; + QStringList fieldlist, results; + char buf[256]; + float usage; + struct tm tm; + + m_populated = true; + + Freeze frz(*m_tableMedia); /* disable updating*/ + QStringList where; + QString cmd; + if (m_cbPool->currentText() != "") { + cmd = " Pool.Name = '" + m_cbPool->currentText() + "'"; + where.append(cmd); + } + + if (m_cbStatus->currentText() != "") { + cmd = " Media.VolStatus = '" + m_cbStatus->currentText() + "'"; + where.append(cmd); + } + + if (m_cbStatus->currentText() != "") { + cmd = " Media.VolStatus = '" + m_cbStatus->currentText() + "'"; + where.append(cmd); + } + + if (m_cbMediaType->currentText() != "") { + cmd = " Media.MediaType = '" + m_cbMediaType->currentText() + "'"; + where.append(cmd); + } + + if (m_cbLocation->currentText() != "") { + cmd = " Location.Location = '" + m_cbLocation->currentText() + "'"; + where.append(cmd); + } + + if (m_textName->text() != "") { + cmd = " Media.VolumeName like '%" + m_textName->text() + "%'"; + where.append(cmd); + } + + if (where.size() > 0) { + cmd = " WHERE " + where.join(" AND "); + } else { + cmd = ""; + } + + query = + "SELECT AVG(VolBytes) AS size, COUNT(1) as nb, " + "MediaType FROM Media " + "WHERE VolStatus IN ('Full', 'Used') " + "GROUP BY MediaType"; + + if (mainWin->m_sqlDebug) { + Pmsg1(000, "MediaView query cmd : %s\n",query.toUtf8().data()); + } + if (m_console->sql_cmd(query, results)) { + foreach (resultline, results) { + fieldlist = resultline.split("\t"); + if (fieldlist.size() != 3) { + Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); + continue; + } + if (fieldlist.at(1).toInt() >= 1) { + // MediaType + hash_size[fieldlist.at(2)] + = fieldlist.at(0).toFloat(); + } + } + } + + m_tableMedia->clearContents(); + query = + "SELECT VolumeName, InChanger, " + "Slot, MediaType, VolStatus, VolBytes, Pool.Name, " + "LastWritten, Media.VolRetention " + "FROM Media JOIN Pool USING (PoolId) " + "LEFT JOIN Location ON (Media.LocationId=Location.LocationId) " + + cmd + + " ORDER BY VolumeName LIMIT " + m_sbLimit->cleanText(); + + m_tableMedia->sortByColumn(0, Qt::AscendingOrder); + m_tableMedia->setSortingEnabled(false); /* Don't sort during insert */ + results.clear(); + if (mainWin->m_sqlDebug) { + Pmsg1(000, "MediaView query cmd : %s\n",query.toUtf8().data()); + } + if (m_console->sql_cmd(query, results)) { + int row=0; + filterExipired(results); + m_tableMedia->setRowCount(results.size()); + foreach (resultline, results) { // should have only one result + int index = 0; + QString Slot, VolBytes, MediaType, LastWritten, VolStatus; + fieldlist = resultline.split("\t"); + if (fieldlist.size() != 9) { + Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); + continue; + } + QStringListIterator fld(fieldlist); + TableItemFormatter mediaitem(*m_tableMedia, row); + + /* VolumeName */ + mediaitem.setTextFld(index++, fld.next()); + + /* Online */ + mediaitem.setInChanger(index++, fld.next()); + + Slot = fld.next(); // Slot + mediaitem.setNumericFld(index++, Slot); + + MediaType = fld.next(); + VolStatus = fld.next(); + + /* Volume bytes */ + VolBytes = fld.next(); + mediaitem.setBytesFld(index++, VolBytes); + + /* Usage */ + usage = 0; + if (hash_size.contains(MediaType) && + hash_size[MediaType] != 0) { + usage = VolBytes.toLongLong() * 100 / hash_size[MediaType]; + } + mediaitem.setPercent(index++, usage); + + /* Volstatus */ + mediaitem.setVolStatusFld(index++, VolStatus); + + /* Pool */ + mediaitem.setTextFld(index++, fld.next()); + + /* MediaType */ + mediaitem.setTextFld(index++, MediaType); + + LastWritten = fld.next(); + buf[0] = 0; + if (LastWritten != "") { + stat = fld.next(); // VolUseDuration + t = str_to_utime(LastWritten.toLatin1().data()); + t = t + stat.toULongLong(); + ttime = t; + localtime_r(&ttime, &tm); + strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); + } + + /* LastWritten */ + mediaitem.setTextFld(index++, LastWritten); + + /* When expired */ + mediaitem.setTextFld(index++, buf); + row++; + } + } + m_tableMedia->resizeColumnsToContents(); + m_tableMedia->resizeRowsToContents(); + m_tableMedia->verticalHeader()->hide(); + m_tableMedia->setSortingEnabled(true); + + /* make read only */ + m_tableMedia->setEditTriggers(QAbstractItemView::NoEditTriggers); +} + +/* + * When the treeWidgetItem in the page selector tree is singleclicked, Make sure + * The tree has been populated. + */ +void MediaView::PgSeltreeWidgetClicked() +{ + if (!m_populated) { + populateForm(); + populateTable(); + } + if (!isOnceDocked()) { + dockPage(); + } +} + +/* + * Virtual function which is called when this page is visible on the stack + */ +void MediaView::currentStackItem() +{ + if(!m_populated) { + populateForm(); + populateTable(); + } +} + +// /* +// * Called from the signal of the context sensitive menu to relabel! +// */ +// void MediaView::relabelVolume() +// { +// setConsoleCurrent(); +// new relabelDialog(m_console, m_currentVolumeName); +// } +// +// /* +// * Called from the signal of the context sensitive menu to purge! +// */ +// void MediaView::allVolumesFromPool() +// { +// QString cmd = "update volume AllFromPool=" + m_currentVolumeName; +// consoleCommand(cmd); +// populateTable(); +// } +// +// void MediaView::allVolumes() +// { +// QString cmd = "update volume allfrompools"; +// consoleCommand(cmd); +// populateTable(); +// } +// +// /* +// * Called from the signal of the context sensitive menu to purge! +// */ +// void MediaView::volumeFromPool() +// { +// QTreeWidgetItem *currentItem = mp_treeWidget->currentItem(); +// QTreeWidgetItem *parent = currentItem->parent(); +// QString pool = parent->text(0); +// QString cmd; +// cmd = "update volume=" + m_currentVolumeName + " frompool=" + pool; +// consoleCommand(cmd); +// populateTable(); +// } +// diff --git a/src/qt-console/medialist/mediaview.h b/src/qt-console/medialist/mediaview.h new file mode 100644 index 00000000..8b46c7e6 --- /dev/null +++ b/src/qt-console/medialist/mediaview.h @@ -0,0 +1,63 @@ +#ifndef _MEDIAVIEW_H_ +#define _MEDIAVIEW_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_mediaview.h" +#include "console.h" +#include + +class MediaView : public Pages, public Ui::MediaViewForm +{ + Q_OBJECT + +public: + MediaView(); + ~MediaView(); + +private slots: + void populateTable(); + void populateForm(); + void PgSeltreeWidgetClicked(); + void currentStackItem(); + void applyPushed(); + void editPushed(); + void purgePushed(); + void prunePushed(); + void deletePushed(); + bool getSelection(QStringList &ret); + void showInfoForMedia(QTableWidgetItem * item); + void filterExipired(QStringList &list); +// void relabelVolume(); +// void allVolumesFromPool(); +// void allVolumes(); +// void volumeFromPool(); + +private: + bool m_populated; + bool m_checkcurwidget; + QTreeWidgetItem *m_topItem; +}; + +#endif /* _MEDIAVIEW_H_ */ diff --git a/src/qt-console/medialist/mediaview.ui b/src/qt-console/medialist/mediaview.ui new file mode 100644 index 00000000..1fe54180 --- /dev/null +++ b/src/qt-console/medialist/mediaview.ui @@ -0,0 +1,289 @@ + + + MediaViewForm + + + + 0 + 0 + 949 + 638 + + + + Form + + + + + + + + Edit + + + + :/images/edit.png:/images/edit.png + + + true + + + + + + + Purge + + + + :/images/purge.png:/images/purge.png + + + true + + + + + + + Delete + + + + :/images/purge.png:/images/purge.png + + + true + + + + + + + Prune + + + + :/images/edit-cut.png:/images/edit-cut.png + + + true + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + Filter + + + + + + Media Type: + + + + + + + + + + Status: + + + + + + + + + + Limit: + + + + + + + 1 + + + 1000 + + + 100 + + + + + + + Name: + + + + + + + + 0 + 0 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Pool: + + + + + + + + + + Location: + + + + + + + + + + Expired + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Apply + + + + :/images/view-refresh.png:/images/view-refresh.png + + + + + + + + + + QAbstractItemView::ExtendedSelection + + + QAbstractItemView::SelectRows + + + true + + + + Volume Name + + + + + Online + + + + + Slot + + + + + Vol Bytes + + + + + Vol Usage + + + + + Vol Status + + + + + Pool + + + + + Media Type + + + + + Last Written + + + + + When expire? + + + + + + + + + + + diff --git a/src/qt-console/mount/mount.cpp b/src/qt-console/mount/mount.cpp new file mode 100644 index 00000000..2d466e19 --- /dev/null +++ b/src/qt-console/mount/mount.cpp @@ -0,0 +1,80 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Label Dialog class + * + * Kern Sibbald, February MMVII + * + */ + +#include "bat.h" +#include "mount/mount.h" +#include + +/* + * A constructor + */ +mountDialog::mountDialog(Console *console, QString &storageName) : QDialog() +{ + m_console = console; + m_storageName = storageName; + m_conn = m_console->notifyOff(); + setupUi(this); + this->show(); + + QString labelText( tr("Storage : %1").arg(storageName) ); + storageLabel->setText(labelText); +} + +void mountDialog::accept() +{ + QString scmd; + if (m_storageName == "") { + QMessageBox::warning(this, tr("No Storage name"), tr("No Storage name given"), + QMessageBox::Ok, QMessageBox::Ok); + return; + } + this->hide(); + scmd = QString("mount storage=\"%1\" slot=%2") + .arg(m_storageName) + .arg(slotSpin->value()); + if (mainWin->m_commandDebug) { + Pmsg1(000, "sending command : %s\n",scmd.toUtf8().data()); + } + + m_console->display_text( tr("Context sensitive command :\n\n")); + m_console->display_text("**** "); + m_console->display_text(scmd + " ****\n"); + m_console->display_text(tr("Director Response :\n\n")); + + m_console->write_dir(scmd.toUtf8().data()); + m_console->displayToPrompt(m_conn); + m_console->notify(m_conn, true); + delete this; + mainWin->resetFocus(); +} + +void mountDialog::reject() +{ + this->hide(); + m_console->notify(m_conn, true); + delete this; + mainWin->resetFocus(); +} diff --git a/src/qt-console/mount/mount.h b/src/qt-console/mount/mount.h new file mode 100644 index 00000000..d2886909 --- /dev/null +++ b/src/qt-console/mount/mount.h @@ -0,0 +1,51 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, February MMVII + */ + +#ifndef _MOUNT_H_ +#define _MOUNT_H_ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_mount.h" +#include "console.h" + +class mountDialog : public QDialog, public Ui::mountForm +{ + Q_OBJECT + +public: + mountDialog(Console *console, QString &storage); + +private slots: + void accept(); + void reject(); + +private: + Console *m_console; + QString m_storageName; + int m_conn; +}; + +#endif /* _MOUNT_H_ */ diff --git a/src/qt-console/mount/mount.ui b/src/qt-console/mount/mount.ui new file mode 100644 index 00000000..fbd2c51e --- /dev/null +++ b/src/qt-console/mount/mount.ui @@ -0,0 +1,199 @@ + + mountForm + + + Qt::WindowModal + + + + 0 + 0 + 400 + 300 + + + + Label + + + + 9 + + + 6 + + + + + + 16777215 + 20 + + + + TextLabel + + + Qt::AlignCenter + + + + + + + 0 + + + 6 + + + + + Slot: + + + slotSpin + + + + + + + 10000 + + + + + + + + + Qt::Horizontal + + + QDialogButtonBox::Cancel|QDialogButtonBox::NoButton|QDialogButtonBox::Ok + + + + + + + Qt::Vertical + + + QSizePolicy::Maximum + + + + 21 + 16 + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 71 + 21 + + + + + + + + + 16777215 + 30 + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Mount a Slot</span></p></body></html> + + + + + + + Qt::Horizontal + + + + 81 + 20 + + + + + + + + + + Qt::Vertical + + + QSizePolicy::Maximum + + + + 382 + 16 + + + + + + + + + + buttonBox + accepted() + mountForm + accept() + + + 248 + 254 + + + 157 + 274 + + + + + buttonBox + rejected() + mountForm + reject() + + + 316 + 260 + + + 286 + 274 + + + + + diff --git a/src/qt-console/pages.cpp b/src/qt-console/pages.cpp new file mode 100644 index 00000000..0866817f --- /dev/null +++ b/src/qt-console/pages.cpp @@ -0,0 +1,445 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#include "bat.h" +#include "pages.h" + +/* A global function */ +bool isWin32Path(QString &fullPath) +{ + if (fullPath.size()<2) { + return false; + } + + bool toret = fullPath[1].toLatin1() == ':' && fullPath[0].isLetter(); + if (mainWin->m_miscDebug) { + if (toret) + Pmsg1(000, "returning from isWin32Path true %s\n", fullPath.toUtf8().data()); + else + Pmsg1(000, "returning from isWin32Path false %s\n", fullPath.toUtf8().data()); + } + return toret; +} + +/* Need to initialize variables here */ +Pages::Pages() : QWidget() +{ + m_docked = false; + m_onceDocked = false; + m_closeable = true; + m_dockOnFirstUse = true; + m_console = NULL; + m_parent = NULL; +} + +/* first Use Dock */ +void Pages::firstUseDock() +{ + if (!m_onceDocked && m_dockOnFirstUse) { + dockPage(); + } +} + +/* + * dockPage + * This function is intended to be called from within the Pages class to pull + * a window from floating to in the stack widget. + */ + +void Pages::dockPage() +{ + if (isDocked()) { + return; + } + + /* These two lines are for making sure if it is being changed from a window + * that it has the proper window flag and parent. + */ + setWindowFlags(Qt::Widget); + + /* calculate the index that the tab should be inserted into */ + int tabPos = 0; + QTreeWidgetItemIterator it(mainWin->treeWidget); + while (*it) { + Pages *somepage = mainWin->getFromHash(*it); + if (this == somepage) { + tabPos += 1; + break; + } + int pageindex = mainWin->tabWidget->indexOf(somepage); + if (pageindex != -1) { tabPos = pageindex; } + ++it; + } + + /* This was being done already */ + m_parent->insertTab(tabPos, this, m_name); + + /* Set docked flag */ + m_docked = true; + m_onceDocked = true; + mainWin->tabWidget->setCurrentWidget(this); + /* lets set the page selectors action for docking or undocking */ + setContextMenuDockText(); +} + +/* + * undockPage + * This function is intended to be called from within the Pages class to put + * a window from the stack widget to a floating window. + */ + +void Pages::undockPage() +{ + if (!isDocked()) { + return; + } + + /* Change from a stacked widget to a normal window */ + m_parent->removeTab(m_parent->indexOf(this)); + setWindowFlags(Qt::Window); + show(); + /* Clear docked flag */ + m_docked = false; + /* The window has been undocked, lets change the context menu */ + setContextMenuDockText(); +} + +/* + * This function is intended to be called with the subclasses. When it is + * called the specific sublclass does not have to be known to Pages. When it + * is called this function will change the page from it's current state of being + * docked or undocked and change it to the other. + */ + +void Pages::togglePageDocking() +{ + if (m_docked) { + undockPage(); + } else { + dockPage(); + } +} + +/* + * This function is because I wanted for some reason to keep it protected but still + * give any subclasses the ability to find out if it is currently stacked or not. + */ +bool Pages::isDocked() +{ + return m_docked; +} + +/* + * This function is because after the tabbed widget was added I could not tell + * from is docked if it had been docked yet. To prevent status pages from requesting + * status from the director + */ +bool Pages::isOnceDocked() +{ + return m_onceDocked; +} + + +/* + * To keep m_closeable protected as well + */ +bool Pages::isCloseable() +{ + return m_closeable; +} + +void Pages::hidePage() +{ + if (!m_parent || (m_parent->indexOf(this) <= 0)) { + return; + } + /* Remove any tab that may exist */ + m_parent->removeTab(m_parent->indexOf(this)); + hide(); + /* Clear docked flag */ + m_docked = false; + /* The window has been undocked, lets change the context menu */ + setContextMenuDockText(); +} + +/* + * When a window is closed, this slot is called. The idea is to put it back in the + * stack here, and it works. I wanted to get it to the top of the stack so that the + * user immediately sees where his window went. Also, if he undocks the window, then + * closes it with the tree item highlighted, it may be confusing that the highlighted + * treewidgetitem is not the stack item in the front. + */ + +void Pages::closeEvent(QCloseEvent* event) +{ + /* A Widget was closed, lets toggle it back into the window, and set it in front. */ + dockPage(); + + /* this fixes my woes of getting the widget to show up on top when closed */ + event->ignore(); + + /* Set the current tree widget item in the Page Selector window to the item + * which represents "this" + * Which will also bring "this" to the top of the stacked widget */ + setCurrent(); +} + +/* + * The next three are virtual functions. The idea here is that each subclass will have the + * built in virtual function to override if the programmer wants to populate the window + * when it it is first clicked. + */ +void Pages::PgSeltreeWidgetClicked() +{ +} + +/* + * Virtual function which is called when this page is visible on the stack. + * This will be overridden by classes that want to populate if they are on the + * top. + */ +void Pages::currentStackItem() +{ +} + +/* + * Function to close the stacked page and remove the widget from the + * Page selector window + */ +void Pages::closeStackPage() +{ + /* First get the tree widget item and destroy it */ + QTreeWidgetItem *item=mainWin->getFromHash(this); + /* remove the QTreeWidgetItem <-> page from the hash */ + if (item) { + mainWin->hashRemove(item, this); + /* remove the item from the page selector by destroying it */ + delete item; + } + /* remove this */ + delete this; +} + +/* + * Function to set members from the external mainwin and it's overload being + * passed a specific QTreeWidgetItem to be it's parent on the tree + */ +void Pages::pgInitialize() +{ + pgInitialize(QString(), NULL); +} + +void Pages::pgInitialize(const QString &name) +{ + pgInitialize(name, NULL); +} + +void Pages::pgInitialize(const QString &tname, QTreeWidgetItem *parentTreeWidgetItem) +{ + m_docked = false; + m_onceDocked = false; + if (tname.size()) { + m_name = tname; + } + m_parent = mainWin->tabWidget; + m_console = mainWin->currentConsole(); + + if (!parentTreeWidgetItem) { + parentTreeWidgetItem = m_console->directorTreeItem(); + } + + QTreeWidgetItem *item = new QTreeWidgetItem(parentTreeWidgetItem); + QString name; + treeWidgetName(name); + item->setText(0, name); + mainWin->hashInsert(item, this); + setTitle(); +} + +/* + * Virtual Function to return a name + * All subclasses should override this function + */ +void Pages::treeWidgetName(QString &name) +{ + name = m_name; +} + +/* + * Function to simplify executing a console command and bringing the + * console to the front of the stack + */ +void Pages::consoleCommand(QString &command) +{ + consoleCommand(command, true); +} + +void Pages::consoleCommand(QString &command, bool setCurrent) +{ + int conn; + if (m_console->getDirComm(conn)) { + consoleCommand(command, conn, setCurrent, true); + } +} + +/* + * Lowest level of console command method. + * "notify" parameter default is set to true by higher level console command call. + * In most cases "notify" parameter should be set to true value because after console + * command sent, notifier should be always enabled for catch all Director responses. + */ +void Pages::consoleCommand(QString &command, int conn, bool setCurrent, bool notify) +{ + if (notify) { + m_console->notify(conn, true); + } + /* Bring this director's console to the front of the stack */ + if (setCurrent) { setConsoleCurrent(); } + QString displayhtml(""); + displayhtml += command + "\n"; + m_console->display_html(displayhtml); + m_console->display_text("\n"); + mainWin->waitEnter(); + m_console->write_dir(conn, command.toUtf8().data(), false); + m_console->displayToPrompt(conn); + mainWin->waitExit(); +} + +/* + * Function for handling undocked windows becoming active. + * Change the currently selected item in the page selector window to the now + * active undocked window. This will also make the console for the undocked + * window m_currentConsole. + */ +void Pages::changeEvent(QEvent *event) +{ + if ((event->type() == QEvent::ActivationChange) && (isActiveWindow())) { + setCurrent(); + } +} + +/* + * Function to simplify getting the name of the class and the director into + * the caption or title of the window + */ +void Pages::setTitle() +{ + QString wdgname, director; + treeWidgetName(wdgname); + m_console->getDirResName(director); + QString title = tr("%1 of Director %2").arg(wdgname).arg(director); + setWindowTitle(title); +} + +/* + * Bring the current directors console window to the top of the stack. + */ +void Pages::setConsoleCurrent() +{ + mainWin->treeWidget->setCurrentItem(mainWin->getFromHash(m_console)); +} + +/* + * Bring this window to the top of the stack. + */ +void Pages::setCurrent() +{ + mainWin->treeWidget->setCurrentItem(mainWin->getFromHash(this)); +} + +/* + * Function to set the text of the toggle dock context menu when page and + * widget item are NOT known. + */ +void Pages::setContextMenuDockText() +{ + QTreeWidgetItem *item = mainWin->getFromHash(this); + QString docktext; + if (isDocked()) { + docktext = tr("UnDock %1 Window").arg(item->text(0)); + } else { + docktext = tr("ReDock %1 Window").arg(item->text(0)); + } + + mainWin->actionToggleDock->setText(docktext); + setTreeWidgetItemDockColor(); +} + +/* + * Function to set the color of the tree widget item based on whether it is + * docked or not. + */ +void Pages::setTreeWidgetItemDockColor() +{ + QTreeWidgetItem* item = mainWin->getFromHash(this); + if (item) { + if (item->text(0) != tr("Console")) { + if (isDocked()) { + /* Set the brush to blue if undocked */ + QBrush blackBrush(Qt::black); + item->setForeground(0, blackBrush); + } else { + /* Set the brush back to black if docked */ + QBrush blueBrush(Qt::blue); + item->setForeground(0, blueBrush); + } + } + } +} + +/* Function to get a list of volumes */ +void Pages::getVolumeList(QStringList &volumeList) +{ + QString query("SELECT VolumeName AS Media FROM Media ORDER BY Media"); + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Query cmd : %s\n",query.toUtf8().data()); + } + QStringList results; + if (m_console->sql_cmd(query, results)) { + QString field; + QStringList fieldlist; + /* Iterate through the lines of results. */ + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + volumeList.append(fieldlist[0]); + } /* foreach resultline */ + } /* if results from query */ +} + +/* Function to get a list of volumes */ +void Pages::getStatusList(QStringList &statusLongList) +{ + QString statusQuery("SELECT JobStatusLong FROM Status"); + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Query cmd : %s\n",statusQuery.toUtf8().data()); + } + QStringList statusResults; + if (m_console->sql_cmd(statusQuery, statusResults)) { + QString field; + QStringList fieldlist; + /* Iterate through the lines of results. */ + foreach (QString resultline, statusResults) { + fieldlist = resultline.split("\t"); + statusLongList.append(fieldlist[0]); + } /* foreach resultline */ + } /* if results from statusquery */ +} diff --git a/src/qt-console/pages.h b/src/qt-console/pages.h new file mode 100644 index 00000000..4a16582f --- /dev/null +++ b/src/qt-console/pages.h @@ -0,0 +1,106 @@ +#ifndef _PAGES_H_ +#define _PAGES_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#include +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include + +/* + * The Pages Class + * + * This class is inherited by all widget windows which are on the stack + * It is for the purpose of having a consistent set of functions and properties + * in all of the subclasses to accomplish tasks such as pulling a window out + * of or into the stack. It also provides virtual functions called + * from in mainwin so that subclasses can contain functions to allow them + * to populate the screens at the time of first viewing, (when selected) as + * opposed to the first creation of the console connection. The + * console is not connected until after the page selector tree has been + * populated. + */ + +class Console; + +class Pages : public QWidget +{ + Q_OBJECT + +public: + /* methods */ + Pages(); + void dockPage(); + void undockPage(); + void hidePage(); + void togglePageDocking(); + bool isDocked(); + bool isOnceDocked(); + bool isCloseable(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + void closeStackPage(); + Console *console() { return m_console; }; + void setCurrent(); + void setContextMenuDockText(); + void setTreeWidgetItemDockColor(); + void consoleCommand(QString &); + void consoleCommand(QString &, bool setCurrent); + void consoleCommand(QString &, int conn, bool setCurrent=true, bool notify=true); + QString &name() { return m_name; }; + void getVolumeList(QStringList &); + void getStatusList(QStringList &); + void firstUseDock(); + + /* members */ + QTabWidget *m_parent; + QList m_contextActions; + + +public slots: + /* closeEvent is a virtual function inherited from QWidget */ + virtual void closeEvent(QCloseEvent* event); + +protected: + /* methods */ + void pgInitialize(); + void pgInitialize(const QString &); + void pgInitialize(const QString &, QTreeWidgetItem *); + virtual void treeWidgetName(QString &); + virtual void changeEvent(QEvent *event); + void setConsoleCurrent(); + void setTitle(); + + /* members */ + bool m_closeable; + bool m_docked; + bool m_onceDocked; + bool m_dockOnFirstUse; + Console *m_console; + QString m_name; +}; + +#endif /* _PAGES_H_ */ diff --git a/src/qt-console/prefs.ui b/src/qt-console/prefs.ui new file mode 100644 index 00000000..e86f10cc --- /dev/null +++ b/src/qt-console/prefs.ui @@ -0,0 +1,733 @@ + + PrefsForm + + + + 0 + 0 + 470 + 533 + + + + + 0 + 0 + + + + Preferences + + + images/bat.png + + + + 9 + + + 9 + + + 9 + + + 9 + + + 6 + + + 6 + + + + + 0 + + + + Timers + + + + + 60 + 60 + 180 + 106 + + + + + 0 + 0 + + + + Messages Options + + + + + 11 + 81 + 158 + 16 + + + + Message check interval in seconds + + + + + + 11 + 25 + 158 + 20 + + + + Check Messages + + + + + + 11 + 51 + 158 + 24 + + + + 3600 + + + + + + + Joblist + + + + 9 + + + 9 + + + 9 + + + 9 + + + 6 + + + 6 + + + + + + 0 + 0 + + + + Joblist Limit Options + + + + 9 + + + 9 + + + 9 + + + 9 + + + 6 + + + 6 + + + + + 0 + + + 0 + + + 0 + + + 0 + + + 6 + + + 6 + + + + + Days Limit + + + + + + + 1 + + + 10000 + + + 7 + + + + + + + 1 + + + 10000 + + + 25 + + + + + + + Record Limit + + + + + + + + + + + + + Misc + + + + + 30 + 220 + 311 + 111 + + + + Convert + + + + + 20 + 20 + 231 + 22 + + + + Convert Off + + + + + + 20 + 50 + 231 + 22 + + + + Display Bytes using IEC units (1024B = 1 KiB) + + + + + + 20 + 80 + 231 + 22 + + + + Display Bytes using SI units (1000B = 1KB) + + + + + + + 30 + 10 + 311 + 61 + + + + Context Sensitive List Commands + + + + 9 + + + 9 + + + 9 + + + 9 + + + 6 + + + 6 + + + + + Execute Long List + + + + + + + + + 30 + 80 + 311 + 121 + + + + Open Pages + + + + + 10 + 30 + 241 + 20 + + + + Open Plot page on startup + + + + + + 10 + 60 + 241 + 20 + + + + Open Browser page on startup + + + + + + 10 + 90 + 241 + 20 + + + + Open Director Status page on startup + + + + + + + Debug + + + + 9 + + + 9 + + + 9 + + + 9 + + + 6 + + + 6 + + + + + + 0 + 0 + + + + Debugging Options + + + + + + Debug comm + + + + + + + Debug multiple connection + + + + + + + Display all messages in console + + + + + + + Debug Sql queries + + + + + + + Debug Commands + + + + + + + Debug Miscellaneous Items + + + + + + + + + + + RestoreTree + + + + 9 + + + 9 + + + 9 + + + 9 + + + 6 + + + 6 + + + + + + 0 + 0 + + + + Debugging Options + + + + 9 + + + 9 + + + 9 + + + 9 + + + 6 + + + 6 + + + + + 0 + + + 0 + + + 0 + + + 0 + + + 6 + + + 6 + + + + + Restore Debug 2 + + + + + + + Directory Item Changed + + + + + + + Restore Debug 1 + + + + + + + Directory Current Item Changed Debug + + + + + + + Update File Table Debug + + + + + + + Version Table Item Changed Debug + + + + + + + File Table Item Changed Debug + + + + + + + Icon State Debug + + + + + + + Update Checks Debug + + + + + + + Restore Debug 3 + + + + + + + Update Version Table Debug + + + + + + + Populate Directory Debug + + + + + + + + + + + + + + + + Qt::Horizontal + + + QDialogButtonBox::Cancel|QDialogButtonBox::NoButton|QDialogButtonBox::Ok + + + + + + + 6 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + Qt::Horizontal + + + + 81 + 20 + + + + + + + + + 0 + 0 + + + + <h2>Preferences</h2> + + + + + + + Qt::Horizontal + + + + 101 + 20 + + + + + + + + + + + + buttonBox + accepted() + PrefsForm + accept() + + + 248 + 254 + + + 157 + 274 + + + + + buttonBox + rejected() + PrefsForm + reject() + + + 316 + 260 + + + 286 + 274 + + + + + diff --git a/src/qt-console/qstd.cpp b/src/qt-console/qstd.cpp new file mode 100644 index 00000000..8093d78c --- /dev/null +++ b/src/qt-console/qstd.cpp @@ -0,0 +1,104 @@ +//start id=namespace +#include "qstd.h" + +/* QTextStreams look a lot like iostreams, +we just have to point them to the right place. */ + +//start id=streamdefs +QTextStream qstd::cin(stdin, QIODevice::ReadOnly); +QTextStream qstd::cout(stdout, QIODevice::WriteOnly); +QTextStream qstd::cerr(stderr, QIODevice::WriteOnly); +//end + + +/* Namespace members are like static class members */ +bool qstd::yes(QString question) { + QString ans; + cout << QString(" %1 [y/n]? ").arg(question); + cout.flush(); + ans = cin.readLine(); + return (ans.toUpper().startsWith("Y", Qt::CaseInsensitive)); +} +//end + +bool qstd::more(QString s) { + return yes(QString("Another %1").arg(s)); +} + + +int qstd::promptInt(int base /* =10 */) { /* Usage: int n = promptInt(); */ + QString numstr; + int result; + bool ok; + cout << ": " << flush; + while (1) { + numstr = cin.readLine(); + result = numstr.toInt(&ok, base); + if (!ok) { + cout << "Invalid number. Try again: "; + cout.flush(); + } + else + return result; + } +} + + +double qstd::promptDouble() { /* Usage: double d = promptDouble(); */ + QString numstr; + double result; + bool ok; + while (1) { + numstr = cin.readLine(); + result = numstr.toDouble(&ok); + if (!ok) { + cout << "Invalid number. Try again: "; + cout.flush(); + } + else + return result; + } +} + + +void qstd::promptOutputFile(QFile& outfile) { + QString filename; + while (1) { + cout << "Please enter the file name for saving this data: "; + cout.flush(); + filename = cin.readLine(); + outfile.setFileName(filename); + bool fileExists = outfile.open(QIODevice::ReadOnly); + if (!fileExists) + break; + if (yes("File already exists ... Ok to overwrite")) + break; + outfile.close(); + outfile.reset(); + } + outfile.close(); + outfile.reset(); + outfile.open(QIODevice::WriteOnly); + cout << filename << " open for writing ...\n"; + cout.flush(); +} + + +void qstd::promptInputFile(QFile& infile) { + QString filename; + while (1) { + cout << "Name of the file to be read: "; + cout.flush(); + filename = cin.readLine(); + infile.setFileName(filename); + bool fileExists = infile.open(QIODevice::ReadOnly); + if (fileExists) + break; + cout << "File does not exist ... Please try again. \n"; + cout.flush(); + infile.reset(); + } + cout << filename << " open for reading ...\n"; + cout.flush(); +} + diff --git a/src/qt-console/qstd.h b/src/qt-console/qstd.h new file mode 100644 index 00000000..27b7b998 --- /dev/null +++ b/src/qt-console/qstd.h @@ -0,0 +1,79 @@ +#ifndef QSTD_H +#define QSTD_H + +#include +#include +#include + +/** @short helper objects and functions which help reduce the + need for char[] and the standard library. + + defines three @ref QTextStream instances + which behave like the c++ standard iostreams, bound to the + standard in/out/error. + + Also provided, some helper functions for writing + interactive stdin/stdout applications. +*/ +//start +namespace qstd { + + /** @short An alias for standard input + */ + extern QTextStream cin; /* declared only, defined in the .cpp file */ + /** @short An alias for standard output + */ + extern QTextStream cout; + /** @short An alias for standard error + */ + extern QTextStream cerr; + /** yes/no prompt + interactive stdin UI - prompts user with + a yes/no question. Repeatedly-asks + until user supplies a valid answer. + + @param yesNoQuestion the yes/no question + @return true/false depending on what the + user responded. + */ + bool yes(QString yesNoQuestion); + /** Convenience function that feeds a specific question + to the yes() function. + @usage do {.....} while(more ("foobar")); + so that user sees the question: "Another foobar (y/n)? " + @param name of the item being handled by the loop. + */ + bool more(QString prompt); + /** A function for safely taking an int from the keyboard. + Takes data into a QString and tests to make sure it + can be converted to int before returning. + @param base allows choice of number base. + @return returns validated int. + */ + int promptInt(int base = 10); + /** A function for safely taking a double from the keyboard. + Takes data into a QString and tests to make sure it + can be converted to double before returning. + @return returns validated int. + */ + double promptDouble(); + /** Complete dialog for opening a file for output. + Asks user for file name, checks to see if + file already exists and, if so, asks the user if + it is ok to overwrite. + @param Reference QFile parameter is set to point + to the (eventually) opened file. + */ + /** @short Dialog for a output file prompt + */ + void promptOutputFile(QFile& outfile); + + /** @short Dialog for input file prompt */ + void promptInputFile(QFile& infile); + + +//end +} + +#endif + diff --git a/src/qt-console/qwtconfig.pri b/src/qt-console/qwtconfig.pri new file mode 100644 index 00000000..7422712e --- /dev/null +++ b/src/qt-console/qwtconfig.pri @@ -0,0 +1,88 @@ + +# +# Inserted by build script +# +# unix { +# INSTALLBASE = /usr +#} +# +#win32 { +# INSTALLBASE = C:/Qwt-5.0.2 +#} + +target.path = $$INSTALLBASE/lib +headers.path = $$INSTALLBASE/include +# doc.path = $$INSTALLBASE/doc + +###################################################################### +# qmake internal options +###################################################################### + +CONFIG += qt # Also for Qtopia Core! +CONFIG += warn_on +CONFIG += thread + +###################################################################### +# release/debug mode +# The designer plugin is always built in release mode. +# If want to change this, you have to edit designer/designer.pro. +###################################################################### + +CONFIG += release # release/debug + +###################################################################### +# Build the static/shared libraries. +# If QwtDll is enabled, a shared library is built, otherwise +# it will be a static library. +###################################################################### + +# CONFIG += QwtDll + +###################################################################### +# QwtPlot enables all classes, that are needed to use the QwtPlot +# widget. +###################################################################### + +CONFIG += QwtPlot + +###################################################################### +# QwtWidgets enables all classes, that are needed to use the all other +# widgets (sliders, dials, ...), beside QwtPlot. +###################################################################### + +# CONFIG += QwtWidgets + +###################################################################### +# If you want to display svg imageson the plot canvas, enable the +# line below. Note that Qwt needs the svg+xml, when enabling +# QwtSVGItem. +###################################################################### + +#CONFIG += QwtSVGItem + +###################################################################### +# If you have a commercial license you can use the MathML renderer +# of the Qt solutions package to enable MathML support in Qwt. +# So if you want this, copy qtmmlwidget.h + qtmmlwidget.cpp to +# textengines/mathml and enable the line below. +###################################################################### + +#CONFIG += QwtMathML + +###################################################################### +# If you want to build the Qwt designer plugin, +# enable the line below. +# Otherwise you have to build it from the designer directory. +###################################################################### + +# CONFIG += QwtDesigner + +###################################################################### +# If you want to auto build the examples, enable the line below +# Otherwise you have to build them from the examples directory. +###################################################################### + +# CONFIG += QwtExamples +unix { + INSTALLBASE = /home/kern/bacula/x/src/qt-console/qwt +} diff --git a/src/qt-console/relabel/relabel.cpp b/src/qt-console/relabel/relabel.cpp new file mode 100644 index 00000000..b416f7a3 --- /dev/null +++ b/src/qt-console/relabel/relabel.cpp @@ -0,0 +1,116 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Label Dialog class + * + * Kern Sibbald, February MMVII + * + */ + +#include "bat.h" +#include "relabel.h" +#include + +/* + * An overload of the constructor to have a default storage show in the + * combobox on start. Used from context sensitive in storage class. + */ +relabelDialog::relabelDialog(Console *console, QString &fromVolume) +{ + m_console = console; + m_fromVolume = fromVolume; + m_conn = m_console->notifyOff(); + setupUi(this); + storageCombo->addItems(console->storage_list); + poolCombo->addItems(console->pool_list); + volumeName->setText(fromVolume); + QString fromText(tr("From Volume : ")); + fromText += fromVolume; + fromLabel->setText(fromText); + QStringList defFields; + if (getDefs(defFields) >= 1) { + poolCombo->setCurrentIndex(poolCombo->findText(defFields[1], Qt::MatchExactly)); + storageCombo->setCurrentIndex(storageCombo->findText(defFields[0], Qt::MatchExactly)); + } + this->show(); +} + +/* + * Use an sql statment to get some defaults + */ +int relabelDialog::getDefs(QStringList &fieldlist) +{ + QString job, client, fileset; + QString query(""); + query = "SELECT MediaType AS MediaType, Pool.Name AS PoolName" + " FROM Media" + " LEFT OUTER JOIN Pool ON Media.PoolId = Pool.PoolId" + " WHERE VolumeName = \'" + m_fromVolume + "\'"; + if (mainWin->m_sqlDebug) { Pmsg1(000, "query = %s\n", query.toUtf8().data()); } + QStringList results; + if (m_console->sql_cmd(query, results)) { + QString field; + /* Iterate through the lines of results, there should only be one. */ + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + } /* foreach resultline */ + } /* if results from query */ + return results.count(); +} + +void relabelDialog::accept() +{ + QString scmd; + if (volumeName->text().toUtf8().data()[0] == 0) { + QMessageBox::warning(this, tr("No Volume name"), tr("No Volume name given"), + QMessageBox::Ok, QMessageBox::Ok); + return; + } + if (m_fromVolume == volumeName->text().toUtf8()) { + QMessageBox::warning(this, tr("New name must be different"), + tr("New name must be different"), + QMessageBox::Ok, QMessageBox::Ok); + return; + } + + this->hide(); + scmd = QString("relabel storage=\"%1\" oldvolume=\"%2\" volume=\"%3\" pool=\"%4\" slot=%5") + .arg(storageCombo->currentText()) + .arg(m_fromVolume) + .arg(volumeName->text()) + .arg(poolCombo->currentText()) + .arg(slotSpin->value()); + if (mainWin->m_commandDebug) { + Pmsg1(000, "sending command : %s\n",scmd.toUtf8().data()); + } + m_console->write_dir(scmd.toUtf8().data()); + m_console->displayToPrompt(m_conn); + m_console->notify(m_conn, true); + delete this; + mainWin->resetFocus(); +} + +void relabelDialog::reject() +{ + this->hide(); + m_console->notify(m_conn, true); + delete this; + mainWin->resetFocus(); +} diff --git a/src/qt-console/relabel/relabel.h b/src/qt-console/relabel/relabel.h new file mode 100644 index 00000000..c1aeb260 --- /dev/null +++ b/src/qt-console/relabel/relabel.h @@ -0,0 +1,54 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, February MMVII + */ + +#ifndef _RELABEL_H_ +#define _RELABEL_H_ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_relabel.h" +#include "console.h" + +class relabelDialog : public QDialog, public Ui::relabelForm +{ + Q_OBJECT + +public: + relabelDialog(Console *console, QString &fromVolume); + +private: + int getDefs(QStringList &fieldlist); + +private slots: + void accept(); + void reject(); + +private: + Console *m_console; + QString m_fromVolume; + int m_conn; +}; + +#endif /* _RELABEL_H_ */ diff --git a/src/qt-console/relabel/relabel.ui b/src/qt-console/relabel/relabel.ui new file mode 100644 index 00000000..4a8b20e8 --- /dev/null +++ b/src/qt-console/relabel/relabel.ui @@ -0,0 +1,245 @@ + + relabelForm + + + Qt::WindowModal + + + + 0 + 0 + 400 + 212 + + + + Label + + + + 9 + + + 6 + + + + + + 16777215 + 20 + + + + From Volume : + + + Qt::AlignCenter + + + + + + + Qt::Vertical + + + QSizePolicy::Maximum + + + + 382 + 16 + + + + + + + + Qt::Vertical + + + QSizePolicy::Maximum + + + + 21 + 16 + + + + + + + + Qt::Horizontal + + + QDialogButtonBox::Cancel|QDialogButtonBox::NoButton|QDialogButtonBox::Ok + + + + + + + 0 + + + 6 + + + + + 10000 + + + + + + + Pool: + + + poolCombo + + + + + + + Storage: + + + storageCombo + + + + + + + + 200 + 0 + + + + + + + + + + + New Volume Name: + + + volumeName + + + + + + + + + + Slot: + + + slotSpin + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 71 + 21 + + + + + + + + + 16777215 + 30 + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Relabel a Volume</span></p></body></html> + + + + + + + Qt::Horizontal + + + + 81 + 20 + + + + + + + + + + + + buttonBox + accepted() + relabelForm + accept() + + + 248 + 254 + + + 157 + 274 + + + + + buttonBox + rejected() + relabelForm + reject() + + + 316 + 260 + + + 286 + 274 + + + + + diff --git a/src/qt-console/restore/brestore.cpp b/src/qt-console/restore/brestore.cpp new file mode 100644 index 00000000..341a9984 --- /dev/null +++ b/src/qt-console/restore/brestore.cpp @@ -0,0 +1,714 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * + * bRestore Class (Eric's brestore) + * + * Kern Sibbald, January MMVII + * + */ + +#include "bat.h" +#include "restore.h" +#include "util/fmtwidgetitem.h" + +bRestore::bRestore() : Pages() +{ + m_name = tr("bRestore"); + m_client = ""; + setupUi(this); + pgInitialize(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0, QIcon(QString::fromUtf8(":images/browse.png"))); + m_populated = false; + m_closeable = false; + m_current = NULL; + RestoreList->setAcceptDrops(true); +} + +// Populate client table and job associated +void bRestore::setClient() +{ + // Select the same client, don't touch + if (m_client == ClientList->currentText()) { + return; + } + m_client = ClientList->currentText(); + FileList->clearContents(); + FileRevisions->clearContents(); + JobList->clear(); + JobList->setEnabled(true); + LocationEntry->clear(); + m_path = ""; + m_pathid = 0; + + if (ClientList->currentIndex() < 1) { + JobList->setEnabled(false); + return; + } + + JobList->addItem("Job list for " + m_client); + + QString jobQuery = + "SELECT Job.Jobid AS JobId, Job.StartTime AS StartTime," + " Job.Level AS Level," + " Job.Name AS Name" + " FROM Job JOIN Client USING (ClientId)" + " WHERE" + " Job.JobStatus IN ('T','W') AND Job.Type='B' AND" + " Client.Name='" + m_client + "' ORDER BY StartTime DESC" ; + + QString job; + QStringList results; + QStringList fieldlist; + if (m_console->sql_cmd(jobQuery, results)) { + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + // 0 1 2 3 + // JobId, StartTime, Level, Name + fieldlist = resultline.split("\t"); + job = fieldlist[1] + " " + fieldlist[3] + "(" + fieldlist[2] + ") " + fieldlist[0]; + JobList->addItem(job, QVariant(fieldlist[0])); // set also private value + } + } +} + +// Compute job associated and update the job cache if needed +void bRestore::setJob() +{ + if (JobList->currentIndex() < 1) { + FileList->clearContents(); + FileList->setRowCount(0); + FileRevisions->clearContents(); + FileRevisions->setRowCount(0); + return ; + } + QStringList results; + QVariant tmp = JobList->itemData(JobList->currentIndex(), Qt::UserRole); + + m_jobids = tmp.toString(); + QString cmd = ".bvfs_get_jobids jobid=" + m_jobids; + if (MergeChk->checkState() == Qt::Checked) { + cmd.append(" all"); + } + + m_console->dir_cmd(cmd, results); + + if (results.size() < 1) { + FileList->clearContents(); + FileList->setRowCount(0); + FileRevisions->clearContents(); + FileRevisions->setRowCount(0); + return; + } + + // TODO: Can take some time if the job contains many dirs + m_jobids = results.at(0); + cmd = ".bvfs_update jobid=" + m_jobids; + m_console->dir_cmd(cmd, results); + + Pmsg1(0, "jobids=%s\n", m_jobids.toLocal8Bit().constData()); + + displayFiles(m_pathid, QString("")); + Pmsg0(000, "update done\n"); +} + +extern int decode_stat(char *buf, struct stat *statp, int stat_size, int32_t *LinkFI); + +// refresh button with a filter or limit/offset change +void bRestore::refreshView() +{ + displayFiles(m_pathid, m_path); +} + +void bRestore::displayFiles(int64_t pathid, QString path) +{ + QString arg; + QStringList results; + QStringList fieldlist; + struct stat statp; + int32_t LinkFI; + int nb = 0; + int row = 0; + Freeze frz_lst(*FileList); /* disable updating*/ + Freeze frz_rev(*FileRevisions); /* disable updating*/ + FileList->clearContents(); + FileRevisions->clearContents(); + FileRevisions->setRowCount(0); + + // If we provide pathid, use it (path can be altered by encoding conversion) + if (pathid > 0) { + arg = " pathid=" + QString().setNum(pathid); + + // Choose .. update current path to parent dir + if (path == "..") { + if (m_path == "/") { + m_path = ""; + } else { + m_path.remove(QRegExp("[^/]+/$")); + } + + } else if (path == "/" && m_path == "") { + m_path += path; + + } else if (path != "/" && path != ".") { + m_path += path; + } + } else { + m_path = path; + arg = " path=\"" + m_path + "\""; + } + + // If a filter is set, add it to the current query + if (FilterEntry->text() != "") { + QString tmp = FilterEntry->text(); + tmp.replace("\"", "."); // basic escape of " + arg += " pattern=\"" + tmp + "\""; + } + + LocationEntry->setText(m_path); + QString offset = QString().setNum(Offset1Spin->value()); + QString limit=QString().setNum(Offset2Spin->value() - Offset1Spin->value()); + QString q = ".bvfs_lsdir jobid=" + m_jobids + arg + + " limit=" + limit + " offset=" + offset ; + if (mainWin->m_miscDebug) qDebug() << q; + if (m_console->dir_cmd(q, results)) { + nb = results.size(); + FileList->setRowCount(nb); + foreach (QString resultline, results) { + int col=0; + //PathId, FilenameId, fileid, jobid, lstat, path + fieldlist = resultline.split("\t"); + /* + * Note, the next line zaps variable "item", probably + * because the input data in fieldlist is bad. + */ + decode_stat(fieldlist.at(4).toLocal8Bit().data(), &statp, sizeof(statp), &LinkFI); + TableItemFormatter item(*FileList, row++); + item.setFileType(col++, QString("folder")); // folder or file + item.setTextFld(col++, fieldlist.at(5)); // path + item.setBytesFld(col++, QString().setNum(statp.st_size)); + item.setDateFld(col++, statp.st_mtime); // date + fieldlist.replace(3, m_jobids); // use current jobids selection + // keep original info on the first cel that is never empty + item.widget(1)->setData(Qt::UserRole, fieldlist.join("\t")); + } + } + + results.clear(); + q = ".bvfs_lsfiles jobid=" + m_jobids + arg + + " limit=" + limit + " offset=" + offset ; + if (m_console->dir_cmd(q, results)) { + FileList->setRowCount(results.size() + nb); + foreach (QString resultline, results) { + int col=1; // skip icon + //PathId, FilenameId, fileid, jobid, lstat, name + fieldlist = resultline.split("\t"); + TableItemFormatter item(*FileList, row++); + item.setTextFld(col++, fieldlist.at(5)); // name + decode_stat(fieldlist.at(4).toLocal8Bit().data(), + &statp, sizeof(statp), &LinkFI); + item.setBytesFld(col++, QString().setNum(statp.st_size)); + item.setDateFld(col++, statp.st_mtime); + // keep original info on the first cel that is never empty + item.widget(1)->setData(Qt::UserRole, fieldlist.join("\t")); // keep info + } + } + FileList->verticalHeader()->hide(); + FileList->resizeColumnsToContents(); + FileList->resizeRowsToContents(); + FileList->setEditTriggers(QAbstractItemView::NoEditTriggers); +} + +void bRestore::PgSeltreeWidgetClicked() +{ + if(!m_populated) { + setupPage(); + } + if (!isOnceDocked()) { + dockPage(); + } +} + +// Display all versions of a file for this client +void bRestore::displayFileVersion(QString pathid, QString fnid, + QString client, QString filename) +{ + int row=0; + struct stat statp; + int32_t LinkFI; + Freeze frz_rev(*FileRevisions); /* disable updating*/ + FileRevisions->clearContents(); + + QString q = ".bvfs_versions jobid=" + m_jobids + + " pathid=" + pathid + + " fnid=" + fnid + + " client=" + client; + + if (VersionsChk->checkState() == Qt::Checked) { + q.append(" versions"); + } + + QStringList results; + QStringList fieldlist; + QString tmp; + if (m_console->dir_cmd(q, results)) { + FileRevisions->setRowCount(results.size()); + foreach (QString resultline, results) { + int col=0; + // 0 1 2 3 4 5 6 7 + //PathId, FilenameId, fileid, jobid, lstat, Md5, VolName, Inchanger + fieldlist = resultline.split("\t"); + TableItemFormatter item(*FileRevisions, row++); + item.setInChanger(col++, fieldlist.at(7)); // inchanger + item.setTextFld(col++, fieldlist.at(6)); // Volume + item.setNumericFld(col++, fieldlist.at(3)); // JobId + decode_stat(fieldlist.at(4).toLocal8Bit().data(), + &statp, sizeof(statp), &LinkFI); + item.setBytesFld(col++, QString().setNum(statp.st_size)); // size + item.setDateFld(col++, statp.st_mtime); // date + item.setTextFld(col++, fieldlist.at(5)); // chksum + + // Adjust the fieldlist for drag&drop + fieldlist.removeLast(); // inchanger + fieldlist.removeLast(); // volname + fieldlist.removeLast(); // md5 + fieldlist << m_path + filename; + + // keep original info on the first cel that is never empty + item.widget(1)->setData(Qt::UserRole, fieldlist.join("\t")); + } + } + FileRevisions->verticalHeader()->hide(); + FileRevisions->resizeColumnsToContents(); + FileRevisions->resizeRowsToContents(); + FileRevisions->setEditTriggers(QAbstractItemView::NoEditTriggers); +} + +void bRestore::showInfoForFile(QTableWidgetItem *widget) +{ + m_current = widget; + QTableWidgetItem *first = FileList->item(widget->row(), 1); + QStringList lst = first->data(Qt::UserRole).toString().split("\t"); + if (lst.at(1) == "0") { // no filenameid, should be a path + displayFiles(lst.at(0).toLongLong(), lst.at(5)); + } else { + displayFileVersion(lst.at(0), lst.at(1), m_client, lst.at(5)); + } +} + +void bRestore::applyLocation() +{ + displayFiles(0, LocationEntry->text()); +} + +void bRestore::clearVersions(QTableWidgetItem *item) +{ + if (item != m_current) { + FileRevisions->clearContents(); + FileRevisions->setRowCount(0); + } + m_current = item ; +} + +void bRestore::clearRestoreList() +{ + RestoreList->clearContents(); + RestoreList->setRowCount(0); +} + +void bRestore::runRestore() +{ + bRunRestore *r = new bRunRestore(this); + r->setVisible(true); +} + +void bRestore::setupPage() +{ + ClientList->addItem("Client list"); + ClientList->addItems(m_console->client_list); + connect(ClientList, SIGNAL(currentIndexChanged(int)), this, SLOT(setClient())); + connect(JobList, SIGNAL(currentIndexChanged(int)), this, SLOT(setJob())); + connect(FileList, SIGNAL(itemClicked(QTableWidgetItem*)), + this, SLOT(clearVersions(QTableWidgetItem *))); + connect(FileList, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), + this, SLOT(showInfoForFile(QTableWidgetItem *))); + connect(LocationBp, SIGNAL(pressed()), this, SLOT(applyLocation())); + connect(MergeChk, SIGNAL(clicked()), this, SLOT(setJob())); + connect(ClearBp, SIGNAL(clicked()), this, SLOT(clearRestoreList())); + connect(RestoreBp, SIGNAL(clicked()), this, SLOT(runRestore())); + connect(FilterBp, SIGNAL(clicked()), this, SLOT(refreshView())); + m_populated = true; +} + +bRestore::~bRestore() +{ +} + +// Drag & Drop handling, not so easy... +void bRestoreTable::mousePressEvent(QMouseEvent *event) +{ + QTableWidget::mousePressEvent(event); + + if (event->button() == Qt::LeftButton) { + dragStartPosition = event->pos(); + } +} + +// This event permits to send set custom data on drag&drop +// Don't forget to call original class if we are not interested +void bRestoreTable::mouseMoveEvent(QMouseEvent *event) +{ + int lastrow=-1; + + // Look just for drag&drop + if (!(event->buttons() & Qt::LeftButton)) { + QTableWidget::mouseMoveEvent(event); + return; + } + if ((event->pos() - dragStartPosition).manhattanLength() + < QApplication::startDragDistance()) + { + QTableWidget::mouseMoveEvent(event); + return; + } + + QList lst = selectedItems(); + if (mainWin->m_miscDebug) qDebug() << this << " selectedItems: " << lst; + if (lst.isEmpty()) { + return; + } + + QDrag *drag = new QDrag(this); + QMimeData *mimeData = new QMimeData; + for (int i=0; i < lst.size(); i++) { + if (lastrow != lst[i]->row()) { + lastrow = lst[i]->row(); + QTableWidgetItem *it = item(lastrow, 1); + mimeData->setText(it->data(Qt::UserRole).toString()); + break; // at this time, we do it one by one + } + } + drag->setMimeData(mimeData); + drag->exec(); +} + +// This event is called when the drag item enters in the destination area +void bRestoreTable::dragEnterEvent(QDragEnterEvent *event) +{ + if (event->source() == this) { + event->ignore(); + return; + } + if (event->mimeData()->hasText()) { + event->acceptProposedAction(); + } else { + event->ignore(); + } +} + +// It should not be essential to redefine this event, but it +// doesn't work if not defined +void bRestoreTable::dragMoveEvent(QDragMoveEvent *event) +{ + if (event->mimeData()->hasText()) { + event->acceptProposedAction(); + } else { + event->ignore(); + } +} + +// When user releases the button +void bRestoreTable::dropEvent(QDropEvent *event) +{ + int col=1; + struct stat statp; + int32_t LinkFI; + if (event->mimeData()->hasText()) { + TableItemFormatter item(*this, rowCount()); + setRowCount(rowCount() + 1); + QStringList fields = event->mimeData()->text().split("\t"); + if (fields.size() != 6) { + event->ignore(); + return; + } + if (fields.at(1) == "0") { + item.setFileType(0, "folder"); + } + item.setTextFld(col++, fields.at(5)); // filename + decode_stat(fields.at(4).toLocal8Bit().data(), + &statp, sizeof(statp), &LinkFI); + item.setBytesFld(col++, QString().setNum(statp.st_size)); // size + item.setDateFld(col++, statp.st_mtime); // date + item.setNumericFld(col++, fields.at(3)); // jobid + item.setNumericFld(col++, fields.at(2)); // fileid + // keep original info on the first cel that is never empty + item.widget(1)->setData(Qt::UserRole, event->mimeData()->text()); + event->acceptProposedAction(); + } else { + event->ignore(); + } +} + +// Use File Relocation bp +void bRunRestore::UFRcb() +{ + if (UseFileRelocationChk->checkState() == Qt::Checked) { + WhereEntry->setEnabled(false); + UseRegexpChk->setEnabled(true); + if (UseRegexpChk->checkState() == Qt::Checked) { + AddSuffixEntry->setEnabled(false); + AddPrefixEntry->setEnabled(false); + StripPrefixEntry->setEnabled(false); + WhereRegexpEntry->setEnabled(true); + } else { + AddSuffixEntry->setEnabled(true); + AddPrefixEntry->setEnabled(true); + StripPrefixEntry->setEnabled(true); + WhereRegexpEntry->setEnabled(false); + } + } else { + WhereEntry->setEnabled(true); + AddSuffixEntry->setEnabled(false); + AddPrefixEntry->setEnabled(false); + StripPrefixEntry->setEnabled(false); + UseRegexpChk->setEnabled(false); + WhereRegexpEntry->setEnabled(false); + } +} + +// Expert mode for file relocation +void bRunRestore::useRegexp() +{ + if (UseRegexpChk->checkState() == Qt::Checked) { + AddSuffixEntry->setEnabled(false); + AddPrefixEntry->setEnabled(false); + StripPrefixEntry->setEnabled(false); + WhereRegexpEntry->setEnabled(true); + } else { + AddSuffixEntry->setEnabled(true); + AddPrefixEntry->setEnabled(true); + StripPrefixEntry->setEnabled(true); + WhereRegexpEntry->setEnabled(false); + } +} + +// Display Form to run the restore job +bRunRestore::bRunRestore(bRestore *parent) +{ + brestore = parent; + setupUi(this); + ClientCb->addItems(parent->console()->client_list); + int i = ClientCb->findText(parent->m_client); + if (i >= 0) { + ClientCb->setCurrentIndex(i); + } + StorageCb->addItem(QString("")); + RestoreCb->addItems(parent->console()->restore_list); + WhenEditor->setDateTime(QDateTime::currentDateTime()); + StorageCb->addItems(parent->console()->storage_list); + connect(UseFileRelocationChk, SIGNAL(clicked()), this, SLOT(UFRcb())); + connect(UseRegexpChk, SIGNAL(clicked()), this, SLOT(useRegexp())); + connect(ActionBp, SIGNAL(accepted()), this, SLOT(computeRestore())); + // TODO: handle multiple restore job + struct job_defaults jd; + if (parent->console()->restore_list.size() > 0) { + jd.job_name = parent->console()->restore_list[0]; + brestore->console()->get_job_defaults(jd); + WhereEntry->setText(jd.where); + } + computeVolumeList(); +} + +void bRestore::get_info_from_selection(QStringList &fileids, + QStringList &jobids, + QStringList &dirids, + QStringList &findexes) +{ + struct stat statp; + int32_t LinkFI; + for (int i=0; i < RestoreList->rowCount(); i++) { + QTableWidgetItem *item = RestoreList->item(i, 1); + QString data = item->data(Qt::UserRole).toString(); + QStringList lst = data.split("\t"); + if (lst.at(1) != "0") { // skip path + fileids << lst.at(2); + jobids << lst.at(3); + decode_stat(lst.at(4).toLocal8Bit().data(), + &statp, sizeof(statp), &LinkFI); + if (LinkFI) { + findexes << lst.at(3) + "," + QString().setNum(LinkFI); + } + } else { + dirids << lst.at(0); + jobids << lst.at(3).split(","); // Can have multiple jobids + } + } + fileids.removeDuplicates(); + jobids.removeDuplicates(); + dirids.removeDuplicates(); + findexes.removeDuplicates(); +} + +// To compute volume list with directories, query is much slower +void bRunRestore::computeVolumeList() +{ + brestore->get_info_from_selection(m_fileids, m_jobids, m_dirids, m_findexes); + if (m_fileids.size() == 0) { + return; + } + + Freeze frz_lst(*TableMedia); /* disable updating*/ + QString q = +" SELECT DISTINCT VolumeName, Enabled, InChanger " + " FROM File, " + " ( " // -- Get all media from this job + " SELECT MIN(FirstIndex) AS FirstIndex, MAX(LastIndex) AS LastIndex, " + " VolumeName, Enabled, Inchanger " + " FROM JobMedia JOIN Media USING (MediaId) " + " WHERE JobId IN (" + m_jobids.join(",") + ") " + " GROUP BY VolumeName,Enabled,InChanger " + " ) AS allmedia " + " WHERE File.FileId IN (" + m_fileids.join(",") + ") " + " AND File.FileIndex >= allmedia.FirstIndex " + " AND File.FileIndex <= allmedia.LastIndex "; + int row=0; + QStringList results; + if (brestore->console()->sql_cmd(q, results)) { + QStringList fieldlist; + TableMedia->setRowCount(results.size()); + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + // 0 1 2 + //volname, enabled, inchanger + fieldlist = resultline.split("\t"); + int col=0; + TableItemFormatter item(*TableMedia, row++); + item.setInChanger(col++, fieldlist.at(2)); // inchanger + item.setTextFld(col++, fieldlist.at(0)); // Volume + } + } + TableMedia->verticalHeader()->hide(); + TableMedia->resizeColumnsToContents(); + TableMedia->resizeRowsToContents(); + TableMedia->setEditTriggers(QAbstractItemView::NoEditTriggers); +} + +int64_t bRunRestore::runRestore(QString tablename) +{ + QString q; + QString tmp; + + tmp = ClientCb->currentText(); + if (tmp == "") { + return 0; + } + q = "restore client=" + tmp; + + tmp = CommentEntry->text(); + if (tmp != "") { + tmp.replace("\"", " "); + q += " comment=\"" + tmp + "\""; + } + + tmp = StorageCb->currentText(); + if (tmp != "") { + q += " storage=" + tmp; + } + + if (UseFileRelocationChk->checkState() == Qt::Checked) { + if (UseRegexpChk->checkState() == Qt::Checked) { + tmp = WhereRegexpEntry->text(); + if (tmp != "") { + tmp.replace("\"", ""); + q += " regexwhere=\"" + tmp + "\""; + } + } else { + QStringList lst; + tmp = StripPrefixEntry->text(); + if (tmp != "") { + tmp.replace("\"", ""); + lst.append("!" + tmp + "!!i"); + } + tmp = AddPrefixEntry->text(); + if (tmp != "") { + tmp.replace("\"", ""); + lst.append("!^!" + tmp + "!"); + } + tmp = AddSuffixEntry->text(); + if (tmp != "") { + tmp.replace("\"", ""); + lst.append("!([^/])$!$1" + tmp + "!"); + } + if (lst.size() > 0) { + q += " regexwhere=\"" + lst.join(",") + "\""; + } + } + } else { + tmp = WhereEntry->text(); + if (tmp != "") { + tmp.replace("\"", ""); + q += " where=\"" + tmp + "\""; + } + } + +// q += " priority=" + tmp.setNum(PrioritySb->value()); +// q += " job=\"" + RestoreCb->currentText() + "\""; + q += " file=\"?" + tablename + "\""; + q += " when=\"" + WhenEditor->dateTime().toString("yyyy-MM-dd hh:mm:ss") + "\""; + q += " done yes"; + + if (mainWin->m_miscDebug) qDebug() << q; + QStringList results; + if (brestore->console()->dir_cmd(q, results)) { + foreach (QString resultline, results) { + QStringList fieldlist = resultline.split("="); + if (fieldlist.size() == 2) { + return fieldlist.at(1).toLongLong(); + } + } + } + return 0; +} + +void bRunRestore::computeRestore() +{ + QString q = ".bvfs_restore path=b2123 jobid=" + m_jobids.join(","); + if (m_fileids.size() > 0) { + q += " fileid=" + m_fileids.join(","); + } + if (m_dirids.size() > 0) { + q += " dirid=" + m_dirids.join(","); + } + if (m_findexes.size() > 0) { + q += " hardlink=" + m_findexes.join(","); + } + if (mainWin->m_miscDebug) qDebug() << q; + + QStringList results; + if (brestore->console()->dir_cmd(q, results)) { + if (results.size() == 1 && results[0] == "OK") { + int64_t jobid = runRestore("b2123"); + if (mainWin->m_miscDebug) qDebug() << "jobid=" << jobid; + q = ".bvfs_cleanup path=b2123"; + brestore->console()->dir_cmd(q, results); + } + } +} diff --git a/src/qt-console/restore/brestore.ui b/src/qt-console/restore/brestore.ui new file mode 100644 index 00000000..921df790 --- /dev/null +++ b/src/qt-console/restore/brestore.ui @@ -0,0 +1,666 @@ + + + bRestoreForm + + + + 0 + 0 + 1011 + 650 + + + + brestore + + + + 9 + + + 6 + + + + + 6 + + + 0 + + + + + + 0 + 0 + + + + QComboBox::AdjustToContents + + + + + + + false + + + + 0 + 0 + + + + QComboBox::AdjustToContents + + + + + + + Qt::Horizontal + + + QSizePolicy::Minimum + + + + 40 + 20 + + + + + + + + true + + + Merge Jobs + + + false + + + + + + + View all Versions + + + + + + + Current Directory + + + + + + + + 150 + 0 + + + + + 150 + 0 + + + + + + + + + + Qt::Vertical + + + + Qt::Horizontal + + + + + 0 + 0 + + + + File list -- drag below for restore + + + + + + + 5 + 5 + + + + Double Click File Name to decend + + + true + + + QAbstractItemView::DragOnly + + + true + + + QAbstractItemView::SelectRows + + + false + + + false + + + false + + + false + + + + Type + + + + + File Name + + + Double Click to decend + + + + + Size + + + + + Date + + + + + + + + QLayout::SetMinimumSize + + + + + Selection Range + + + + + + + + 0 + 0 + + + + + 25 + 25 + + + + + + + + :/images/page-prev.gif:/images/page-prev.gif + + + + + + + QAbstractSpinBox::NoButtons + + + 9000000 + + + 500 + + + + + + + + 0 + 0 + + + + + 16 + 16 + + + + - + + + + + + + QAbstractSpinBox::NoButtons + + + false + + + 10 + + + 9999999 + + + 500 + + + 500 + + + + + + + + 0 + 0 + + + + + 25 + 25 + + + + + + + + :/images/page-next.gif:/images/page-next.gif + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + File Filter + + + + + + + + 150 + 0 + + + + + 111 + 23 + + + + + + + + + 25 + 25 + + + + + + + + :/images/view-refresh.png:/images/view-refresh.png + + + + + + + + + + + 0 + 0 + + + + File revisions -- drag below for restore + + + + 6 + + + 9 + + + + + + 0 + 0 + + + + true + + + QAbstractItemView::DragOnly + + + true + + + QAbstractItemView::SingleSelection + + + QAbstractItemView::SelectRows + + + false + + + + InChanger + + + + + Volume + + + + + JobId + + + + + Size + + + + + Date + + + + + Chksum + + + + + + + + + + + 6 + + + 0 + + + + + 6 + + + 0 + + + + + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Drag and drop <span style=" font-weight:600;">File list</span> and/or <span style=" font-weight:600;">File revisions</span> items here for Restore</p></body></html> + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Clear + + + + + + + false + + + Estimate + + + + + + + Restore... + + + + + + + + + true + + + false + + + QAbstractItemView::DropOnly + + + Qt::MoveAction + + + QAbstractItemView::MultiSelection + + + QAbstractItemView::SelectRows + + + false + + + false + + + + Type + + + + + FileName + + + + + Size + + + + + Date + + + + + JobId + + + + + FileIndex + + + + + Nb Files + + + + + + + + + + + + Select from Client list drop down then select from Job list drop down + + + + + + + + bRestoreTable + QTableWidget +

restore.h
+ + + + + + + + OffsetNextBp + clicked() + Offset2Spin + stepUp() + + + 275 + 279 + + + 232 + 279 + + + + + OffsetNextBp + clicked() + Offset1Spin + stepUp() + + + 272 + 281 + + + 92 + 279 + + + + + OffsetPrevBp + clicked() + Offset1Spin + stepDown() + + + 44 + 287 + + + 123 + 282 + + + + + OffsetPrevBp + clicked() + Offset2Spin + stepDown() + + + 50 + 284 + + + 221 + 282 + + + + + diff --git a/src/qt-console/restore/prerestore.cpp b/src/qt-console/restore/prerestore.cpp new file mode 100644 index 00000000..f2adb1f9 --- /dev/null +++ b/src/qt-console/restore/prerestore.cpp @@ -0,0 +1,389 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * preRestore -> dialog put up to determine the restore type + * + * Kern Sibbald, February MMVII + * + */ + +#include "bat.h" +#include "restore.h" + +/* Constructor to have jobid list default in */ +prerestorePage::prerestorePage(QString &data, unsigned int datatype) : Pages() +{ + m_dataIn = data; + m_dataInType = datatype; + buildPage(); +} + +/* Basic Constructor */ +prerestorePage::prerestorePage() +{ + m_dataIn = ""; + m_dataInType = R_NONE; + buildPage(); +} + +/* + * This is really the constructor + */ +void prerestorePage::buildPage() +{ + m_name = tr("Restore"); + setupUi(this); + pgInitialize(); + m_conn = m_console->notifyOff(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/restore.png"))); + + jobCombo->addItems(m_console->job_list); + filesetCombo->addItems(m_console->fileset_list); + clientCombo->addItems(m_console->client_list); + poolCombo->addItem(tr("Any")); + poolCombo->addItems(m_console->pool_list); + storageCombo->addItems(m_console->storage_list); + /* current or before . . Start out with current checked */ + recentCheckBox->setCheckState(Qt::Checked); + beforeDateTime->setDisplayFormat(mainWin->m_dtformat); + beforeDateTime->setDateTime(QDateTime::currentDateTime()); + beforeDateTime->setEnabled(false); + selectFilesRadio->setChecked(true); + if (m_dataInType == R_NONE) { + selectJobRadio->setChecked(true); + selectJobIdsRadio->setChecked(false); + jobIdEdit->setText(tr("Comma separated list of JobIds")); + jobIdEdit->setEnabled(false); + } else if (m_dataInType == R_JOBIDLIST) { + selectJobIdsRadio->setChecked(true); + selectJobRadio->setChecked(false); + jobIdEdit->setText(m_dataIn); + jobRadioClicked(false); + QStringList fieldlist; + if (jobdefsFromJob(fieldlist, m_dataIn) == 1) { + filesetCombo->setCurrentIndex(filesetCombo->findText(fieldlist[2], Qt::MatchExactly)); + clientCombo->setCurrentIndex(clientCombo->findText(fieldlist[1], Qt::MatchExactly)); + jobCombo->setCurrentIndex(jobCombo->findText(fieldlist[0], Qt::MatchExactly)); + } + } else if (m_dataInType == R_JOBDATETIME) { + selectJobRadio->setChecked(true); + selectJobIdsRadio->setChecked(false); + jobIdEdit->setText(tr("Comma separated list of JobIds")); + jobIdEdit->setEnabled(false); + recentCheckBox->setCheckState(Qt::Unchecked); + jobRadioClicked(true); + QStringList fieldlist; + if (jobdefsFromJob(fieldlist, m_dataIn) == 1) { + filesetCombo->setCurrentIndex(filesetCombo->findText(fieldlist[2], Qt::MatchExactly)); + clientCombo->setCurrentIndex(clientCombo->findText(fieldlist[1], Qt::MatchExactly)); + jobCombo->setCurrentIndex(jobCombo->findText(fieldlist[0], Qt::MatchExactly)); + beforeDateTime->setDateTime(QDateTime::fromString(fieldlist[3], mainWin->m_dtformat)); + } + } + job_name_change(0); + connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(job_name_change(int))); + connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); + connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); + connect(recentCheckBox, SIGNAL(stateChanged(int)), this, SLOT(recentChanged(int))); + connect(selectJobRadio, SIGNAL(clicked(bool)), this, SLOT(jobRadioClicked(bool))); + connect(selectJobIdsRadio, SIGNAL(clicked(bool)), this, SLOT(jobidsRadioClicked(bool))); + connect(jobIdEdit, SIGNAL(editingFinished()), this, SLOT(jobIdEditFinished())); + + dockPage(); + setCurrent(); + this->show(); + if (mainWin->m_miscDebug) Pmsg0(000, "Leave preRestore\n"); +} + + +/* + * Check to make sure all is ok then start either the select window or the restore + * run window + */ +void prerestorePage::okButtonPushed() +{ + if (!selectJobRadio->isChecked()) { + if (!checkJobIdList()) { + return; + } + } + QString cmd; + + this->hide(); + + + cmd = QString("restore"); + cmd += " fileset=\"" + filesetCombo->currentText() + "\""; + cmd += " client=\"" + clientCombo->currentText() + "\""; + if (selectJobRadio->isChecked()) { + if (poolCombo->currentText() != tr("Any") ){ + cmd += " pool=\"" + poolCombo->currentText() + "\""; + } + cmd += " storage=\"" + storageCombo->currentText() + "\""; + if (recentCheckBox->checkState() == Qt::Checked) { + cmd += " current"; + } else { + QDateTime stamp = beforeDateTime->dateTime(); + QString before = stamp.toString(mainWin->m_dtformat); + cmd += " before=\"" + before + "\""; + } + } else { + cmd += " jobid=\"" + jobIdEdit->text() + "\""; + } + if (selectFilesRadio->isChecked()) { + if (!selectJobIdsRadio->isChecked()) + cmd += " select"; + } else { + cmd += " all done"; + } + + if (mainWin->m_commandDebug) { + Pmsg1(000, "preRestore command \'%s\'\n", cmd.toUtf8().data()); + } + /* + * Send off command that looks something like: + * + * restore fileset="Full Set" client="timmy-fd" + * storage="File" current select + */ + m_console->write_dir(m_conn, cmd.toUtf8().data()); + + /* Note, do not turn notifier back on here ... */ + if (selectFilesRadio->isChecked()) { + setConsoleCurrent(); + closeStackPage(); + /* wait will be exited in the restore page constructor */ + mainWin->waitEnter(); + } else { + closeStackPage(); + mainWin->resetFocus(); + } + m_console->notify(m_conn, true); + if (mainWin->m_miscDebug) Pmsg0(000, "preRestore OK pressed\n"); +} + + +/* + * Destroy the instace of the class + */ +void prerestorePage::cancelButtonPushed() +{ + mainWin->set_status(tr("Canceled")); + this->hide(); + m_console->notify(m_conn, true); + closeStackPage(); +} + + +/* + * Handle updating the other widget with job defaults when the job combo is changed. + */ +void prerestorePage::job_name_change(int index) +{ + job_defaults job_defs; + + (void)index; + job_defs.job_name = jobCombo->currentText(); + if (m_console->get_job_defaults(m_conn, job_defs)) { + filesetCombo->setCurrentIndex(filesetCombo->findText(job_defs.fileset_name, Qt::MatchExactly)); + clientCombo->setCurrentIndex(clientCombo->findText(job_defs.client_name, Qt::MatchExactly)); + poolCombo->setCurrentIndex(poolCombo->findText(tr("Any"), Qt::MatchExactly)); + storageCombo->setCurrentIndex(storageCombo->findText(job_defs.store_name, Qt::MatchExactly)); + } +} + +/* + * Handle the change of enabled of input widgets when the recent checkbox state + * is changed. + */ +void prerestorePage::recentChanged(int state) +{ + if ((state == Qt::Unchecked) && (selectJobRadio->isChecked())) { + beforeDateTime->setEnabled(true); + } else { + beforeDateTime->setEnabled(false); + } +} + + +/* + * For when jobs list is to be used, return a list which is the needed items from + * the job record + */ +int prerestorePage::jobdefsFromJob(QStringList &fieldlist, QString &jobId) +{ + QString job, client, fileset; + QString query(""); + query = "SELECT DISTINCT Job.Name AS JobName, Client.Name AS Client," + " FileSet.FileSet AS FileSet, Job.EndTime AS JobEnd," + " Job.Type AS JobType" + " From Job, Client, FileSet" + " WHERE Job.FileSetId=FileSet.FileSetId AND Job.ClientId=Client.ClientId" + " AND JobId=\'" + jobId + "\'"; + if (mainWin->m_sqlDebug) { Pmsg1(000, "query = %s\n", query.toUtf8().data()); } + QStringList results; + if (m_console->sql_cmd(m_conn, query, results)) { + QString field; + + /* Iterate through the lines of results, there should only be one. */ + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + } /* foreach resultline */ + } /* if results from query */ + + /* ***FIXME*** This should not ever be getting more than one */ + return results.count() >= 1; +} + +/* + * Function to handle when the jobidlist line edit input loses focus or is entered + */ +void prerestorePage::jobIdEditFinished() +{ + checkJobIdList(); +} + +bool prerestorePage::checkJobIdList() +{ + /* Need to check and make sure the text is a comma separated list of integers */ + QString line = jobIdEdit->text(); + if (line.contains(" ")) { + QMessageBox::warning(this, "Bat", + tr("There can be no spaces in the text for the joblist.\n" + "Press OK to continue?"), QMessageBox::Ok ); + return false; + } + QStringList joblist = line.split(",", QString::SkipEmptyParts); + bool allintokay = true, alljobok = true, allisjob = true; + QString jobName(""), clientName(""); + foreach (QString job, joblist) { + bool intok; + job.toInt(&intok, 10); + if (intok) { + /* are the integers representing a list of jobs all with the same job + * and client */ + QStringList fields; + if (jobdefsFromJob(fields, job) == 1) { + if (jobName == "") + jobName = fields[0]; + else if (jobName != fields[0]) + alljobok = false; + if (clientName == "") + clientName = fields[1]; + else if (clientName != fields[1]) + alljobok = false; + } else { + allisjob = false; + } + } else { + allintokay = false; + } + } + if (!allintokay){ + QMessageBox::warning(this, "Bat", + tr("The string is not a comma separated list of integers.\n" + "Press OK to continue?"), QMessageBox::Ok ); + return false; + } + if (!allisjob){ + QMessageBox::warning(this, tr("Bat"), + tr("At least one of the jobs is not a valid job of type \"Backup\".\n" + "Press OK to continue?"), QMessageBox::Ok ); + return false; + } + if (!alljobok){ + QMessageBox::warning(this, "Bat", + tr("All jobs in the list must be of the same jobName and same client.\n" + "Press OK to continue?"), QMessageBox::Ok ); + return false; + } + return true; +} + +/* + * Handle the change of enabled of input widgets when the job radio buttons + * are changed. + */ +void prerestorePage::jobRadioClicked(bool checked) +{ + if (checked) { + jobCombo->setEnabled(true); + filesetCombo->setEnabled(true); + clientCombo->setEnabled(true); + poolCombo->setEnabled(true); + storageCombo->setEnabled(true); + recentCheckBox->setEnabled(true); + if (!recentCheckBox->isChecked()) { + beforeDateTime->setEnabled(true); + } + jobIdEdit->setEnabled(false); + selectJobRadio->setChecked(true); + selectJobIdsRadio->setChecked(false); + } else { + jobCombo->setEnabled(false); + filesetCombo->setEnabled(false); + clientCombo->setEnabled(false); + poolCombo->setEnabled(false); + storageCombo->setEnabled(false); + recentCheckBox->setEnabled(false); + beforeDateTime->setEnabled(false); + jobIdEdit->setEnabled(true); + selectJobRadio->setChecked(false); + selectJobIdsRadio->setChecked(true); + } + if (mainWin->m_miscDebug) { + Pmsg2(000, "jobRadio=%d jobidsRadio=%d\n", selectJobRadio->isChecked(), + selectJobIdsRadio->isChecked()); + } +} + +void prerestorePage::jobidsRadioClicked(bool checked) +{ + if (checked) { + jobCombo->setEnabled(false); + filesetCombo->setEnabled(false); + clientCombo->setEnabled(false); + poolCombo->setEnabled(false); + storageCombo->setEnabled(false); + recentCheckBox->setEnabled(false); + beforeDateTime->setEnabled(false); + jobIdEdit->setEnabled(true); + selectJobRadio->setChecked(false); + selectJobIdsRadio->setChecked(true); + } else { + jobCombo->setEnabled(true); + filesetCombo->setEnabled(true); + clientCombo->setEnabled(true); + poolCombo->setEnabled(true); + storageCombo->setEnabled(true); + recentCheckBox->setEnabled(true); + if (!recentCheckBox->isChecked()) { + beforeDateTime->setEnabled(true); + } + jobIdEdit->setEnabled(false); + selectJobRadio->setChecked(true); + selectJobIdsRadio->setChecked(false); + } + if (mainWin->m_miscDebug) { + Pmsg2(000, "jobRadio=%d jobidsRadio=%d\n", selectJobRadio->isChecked(), + selectJobIdsRadio->isChecked()); + } +} diff --git a/src/qt-console/restore/prerestore.ui b/src/qt-console/restore/prerestore.ui new file mode 100644 index 00000000..5f375b2e --- /dev/null +++ b/src/qt-console/restore/prerestore.ui @@ -0,0 +1,414 @@ + + prerestoreForm + + + + 0 + 0 + 589 + 897 + + + + Form + + + true + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + QFrame::NoFrame + + + QFrame::Plain + + + + 0 + + + 6 + + + + + All Files + + + false + + + + + + + Select Files + + + false + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + 0 + + + 6 + + + + + + 7 + 0 + 0 + 0 + + + + + 600 + 600 + + + + + + + + Job + + + + + + + + + + JobIds + + + + + + + + + Qt::Vertical + + + QSizePolicy::Fixed + + + + 20 + 21 + + + + + + + + 0 + + + 6 + + + + + + 2000 + 1 + 1 + + + + yyyy-mm-dd h:mm:ss + + + true + + + + + + + Qt::LeftToRight + + + Use Most Recent + + + + + + + File Set: + + + filesetCombo + + + + + + + Client: + + + clientCombo + + + + + + + + + + Storage: + + + storageCombo + + + + + + + + + + Before: + + + beforeDateTime + + + + + + + + + + Pool: + + + poolCombo + + + + + + + + + + Qt::Horizontal + + + + 262 + 21 + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 171 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + + + + + Qt::Vertical + + + + 71 + 41 + + + + + + + + Qt::Vertical + + + + 387 + 161 + + + + + + + + Qt::Horizontal + + + + 41 + 139 + + + + + + + + Qt::Horizontal + + + + 21 + 139 + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 71 + 21 + + + + + + + + + 16777215 + 30 + + + + <h3>Select Jobs</h3> + + + + + + + Qt::Horizontal + + + + 81 + 20 + + + + + + + + + + + diff --git a/src/qt-console/restore/restore.cpp b/src/qt-console/restore/restore.cpp new file mode 100644 index 00000000..c8c0770f --- /dev/null +++ b/src/qt-console/restore/restore.cpp @@ -0,0 +1,498 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Restore Class + * + * Kern Sibbald, February MMVII + * + */ + +#include "bat.h" +#include "restore.h" + +static const int dbglvl = 100; + +restorePage::restorePage(int conn) : Pages() +{ + Dmsg1(dbglvl, "Construcing restorePage Instance connection %i\n", conn); + m_conn = conn; + QStringList titles; + + setupUi(this); + m_name = tr("Restore Select"); + pgInitialize(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/restore.png"))); + + m_console->notify(m_conn, false); /* this should already be off */ + + connect(fileWidget, SIGNAL(itemDoubleClicked(QTreeWidgetItem*, int)), + this, SLOT(fileDoubleClicked(QTreeWidgetItem *, int))); + connect(directoryWidget, SIGNAL( + currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), + this, SLOT(directoryItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); + connect(upButton, SIGNAL(pressed()), this, SLOT(upButtonPushed())); + connect(markButton, SIGNAL(pressed()), this, SLOT(markButtonPushed())); + connect(unmarkButton, SIGNAL(pressed()), this, SLOT(unmarkButtonPushed())); + connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); + connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); + + fileWidget->setContextMenuPolicy(Qt::ActionsContextMenu); + fileWidget->addAction(actionMark); + fileWidget->addAction(actionUnMark); + connect(actionMark, SIGNAL(triggered()), this, SLOT(markButtonPushed())); + connect(actionUnMark, SIGNAL(triggered()), this, SLOT(unmarkButtonPushed())); + + setFont(m_console->get_font()); + m_console->displayToPrompt(m_conn); + + titles << tr("Mark") << tr("File") << tr("Mode") << tr("User") + << tr("Group") << tr("Size") << tr("Date"); + fileWidget->setHeaderLabels(titles); + + get_cwd(); + + readSettings(); + /* wait was entered from pre-restore + * will exit, but will reenter in fillDirectory */ + mainWin->waitExit(); + fillDirectory(); + dockPage(); + setCurrent(); + this->show(); + if (mainWin->m_miscDebug) Pmsg0(000, "Leave restorePage\n"); +} + +restorePage::~restorePage() +{ + writeSettings(); +} + +/* + * Fill the fileWidget box with the contents of the current directory + */ +void restorePage::fillDirectory() +{ + mainWin->waitEnter(); + char modes[20], user[20], group[20], size[20], date[30]; + char marked[10]; + int pnl, fnl; + POOLMEM *file = get_pool_memory(PM_FNAME); + POOLMEM *path = get_pool_memory(PM_FNAME); + + fileWidget->clear(); + m_console->write_dir(m_conn, "dir", false); + QList treeItemList; + QStringList item; + m_rx.setPattern("has no children\\.$"); + bool first = true; + while (m_console->read(m_conn) > 0) { + char *p = m_console->msg(m_conn); + char *l; + strip_trailing_junk(p); + if (*p == '$' || !*p) { continue; } + if (first) { + if (m_rx.indexIn(QString(p)) != -1) { continue; } + first = false; + } + l = p; + skip_nonspaces(&p); /* permissions */ + *p++ = 0; + bstrncpy(modes, l, sizeof(modes)); + skip_spaces(&p); + skip_nonspaces(&p); /* link count */ + *p++ = 0; + skip_spaces(&p); + l = p; + skip_nonspaces(&p); /* user */ + *p++ = 0; + skip_spaces(&p); + bstrncpy(user, l, sizeof(user)); + l = p; + skip_nonspaces(&p); /* group */ + *p++ = 0; + bstrncpy(group, l, sizeof(group)); + skip_spaces(&p); + l = p; + skip_nonspaces(&p); /* size */ + *p++ = 0; + bstrncpy(size, l, sizeof(size)); + skip_spaces(&p); + l = p; + skip_nonspaces(&p); /* date/time */ + skip_spaces(&p); + skip_nonspaces(&p); + *p++ = 0; + bstrncpy(date, l, sizeof(date)); + skip_spaces(&p); + if (*p == '*') { + bstrncpy(marked, "*", sizeof(marked)); + p++; + } else { + bstrncpy(marked, " ", sizeof(marked)); + } + split_path_and_filename(p, &path, &pnl, &file, &fnl); + item.clear(); + item << "" << file << modes << user << group << size << date; + if (item[1].endsWith("/")) { + addDirectory(item[1]); + } + QTreeWidgetItem *ti = new QTreeWidgetItem((QTreeWidget *)0, item); + ti->setTextAlignment(5, Qt::AlignRight); /* right align size */ + if (strcmp(marked, "*") == 0) { + ti->setIcon(0, QIcon(QString::fromUtf8(":images/check.png"))); + ti->setData(0, Qt::UserRole, true); + } else { + ti->setIcon(0, QIcon(QString::fromUtf8(":images/unchecked.png"))); + ti->setData(0, Qt::UserRole, false); + } + treeItemList.append(ti); + } + fileWidget->clear(); + fileWidget->insertTopLevelItems(0, treeItemList); + for (int i=0; i<7; i++) { + fileWidget->resizeColumnToContents(i); + } + + free_pool_memory(file); + free_pool_memory(path); + mainWin->waitExit(); +} + +/* + * Function called from fill directory when a directory is found to see if this + * directory exists in the directory pane and then add it to the directory pane + */ +void restorePage::addDirectory(QString &newdirr) +{ + QString newdir = newdirr; + QString fullpath = m_cwd + newdirr; + bool ok = true; + + if (mainWin->m_miscDebug) { + QString msg = QString(tr("In addDirectory cwd \"%1\" newdir \"%2\" fullpath \"%3\"\n")) + .arg(m_cwd) + .arg(newdir) + .arg(fullpath); + Pmsg1(dbglvl, "%s\n", msg.toUtf8().data()); + } + + if (isWin32Path(fullpath)) { + if (mainWin->m_miscDebug) Pmsg0(dbglvl, "Windows drive\n"); + if (fullpath.left(1) == "/") { + fullpath.replace(0, 1, ""); /* strip leading / */ + } + /* If drive and not already in add it */ + if (fullpath.length() == 3 && !m_dirPaths.contains(fullpath)) { + QTreeWidgetItem *item = new QTreeWidgetItem(directoryWidget); + item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); + item->setText(0, fullpath.toUtf8().data()); + if (mainWin->m_miscDebug) { + Pmsg1(dbglvl, "Pre Inserting %s\n",fullpath.toUtf8().data()); + } + m_dirPaths.insert(fullpath, item); + m_dirTreeItems.insert(item, fullpath); + directoryWidget->setCurrentItem(NULL); + } + } else { + // Unix add / first if not already there + if (m_dirPaths.empty()) { + QTreeWidgetItem *item = new QTreeWidgetItem(directoryWidget); + item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); + + QString text("/"); + item->setText(0, text.toUtf8().data()); + if (mainWin->m_miscDebug) { + Pmsg1(dbglvl, "Pre Inserting %s\n",text.toUtf8().data()); + } + m_dirPaths.insert(text, item); + m_dirTreeItems.insert(item, text); + } + } + + /* Does it already exist ?? */ + if (!m_dirPaths.contains(fullpath)) { + QTreeWidgetItem *item = NULL; + if (isWin32Path(fullpath)) { + /* this is the base widget */ + item = new QTreeWidgetItem(directoryWidget); + item->setText(0, fullpath.toUtf8().data()); + if (mainWin->m_miscDebug) Pmsg1(dbglvl, "Windows: %s\n", fullpath.toUtf8().data()); + item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); + } else { + QTreeWidgetItem *parent = m_dirPaths.value(m_cwd); + if (parent) { + /* new directories to add */ + item = new QTreeWidgetItem(parent); + item->setText(0, newdir.toUtf8().data()); + item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); + directoryWidget->expandItem(parent); + if (mainWin->m_miscDebug) { + Pmsg1(dbglvl, "%s\n", newdir.toUtf8().data()); + } + } else { + ok = false; + if (mainWin->m_miscDebug) { + QString msg = QString(tr("In else of if parent cwd \"%1\" newdir \"%2\"\n")) + .arg(m_cwd) + .arg(newdir); + Pmsg1(dbglvl, "%s\n", msg.toUtf8().data()); + } + } + } + /* insert into both forward and reverse hash */ + if (ok) { + if (mainWin->m_miscDebug) { + Pmsg1(dbglvl, "Inserting %s\n",fullpath.toUtf8().data()); + } + m_dirPaths.insert(fullpath, item); + m_dirTreeItems.insert(item, fullpath); + } + } +} + +/* + * Executed when the tree item in the directory pane is changed. This will + * allow us to populate the file pane and make this the cwd. + */ +void restorePage::directoryItemChanged(QTreeWidgetItem *currentitem, + QTreeWidgetItem * /*previousitem*/) +{ + QString fullpath = m_dirTreeItems.value(currentitem); + statusLine->setText(""); + if (fullpath != "") { + cwd(fullpath.toUtf8().data()); + fillDirectory(); + } +} + +void restorePage::okButtonPushed() +{ + this->hide(); + m_console->write(m_conn, "done"); + m_console->notify(m_conn, true); + setConsoleCurrent(); + closeStackPage(); + mainWin->resetFocus(); +} + + +void restorePage::cancelButtonPushed() +{ + this->hide(); + m_console->write(m_conn, "quit"); + m_console->displayToPrompt(m_conn); + mainWin->set_status(tr("Canceled")); + closeStackPage(); + m_console->notify(m_conn, true); + mainWin->resetFocus(); +} + +void restorePage::fileDoubleClicked(QTreeWidgetItem *item, int column) +{ + char cmd[1000]; + statusLine->setText(""); + if (column == 0) { /* mark/unmark */ + mainWin->waitEnter(); + if (item->data(0, Qt::UserRole).toBool()) { + bsnprintf(cmd, sizeof(cmd), "unmark \"%s\"", item->text(1).toUtf8().data()); + item->setIcon(0, QIcon(QString::fromUtf8(":images/unchecked.png"))); + item->setData(0, Qt::UserRole, false); + } else { + bsnprintf(cmd, sizeof(cmd), "mark \"%s\"", item->text(1).toUtf8().data()); + item->setIcon(0, QIcon(QString::fromUtf8(":images/check.png"))); + item->setData(0, Qt::UserRole, true); + } + m_console->write_dir(m_conn, cmd, false); + if (m_console->read(m_conn) > 0) { + strip_trailing_junk(m_console->msg(m_conn)); + statusLine->setText(m_console->msg(m_conn)); + } + m_console->displayToPrompt(m_conn); + mainWin->waitExit(); + return; + } + /* + * Double clicking other than column 0 means to decend into + * the directory -- or nothing if it is not a directory. + */ + if (item->text(1).endsWith("/")) { + QString fullpath = m_cwd + item->text(1); + QTreeWidgetItem *item = m_dirPaths.value(fullpath); + if (mainWin->m_miscDebug) { + Pmsg1(dbglvl, "%s\n", fullpath.toUtf8().data()); + } + if (item) { + directoryWidget->setCurrentItem(item); + } else { + QString msg = QString("DoubleClick else of item column %1 fullpath %2\n") + .arg(column,10) + .arg(fullpath); + if (mainWin->m_miscDebug) Pmsg1(dbglvl, "%s\n", msg.toUtf8().data()); + } + } +} + +/* + * If up button pushed, making the parent tree widget current will call fill + * directory. + */ +void restorePage::upButtonPushed() +{ + cwd(".."); + QTreeWidgetItem *item = m_dirPaths.value(m_cwd); + if (item) { + directoryWidget->setCurrentItem(item); + } + statusLine->setText(""); +} + +/* + * Mark selected items + */ +void restorePage::markButtonPushed() +{ + mainWin->waitEnter(); + QList treeItemList = fileWidget->selectedItems(); + QTreeWidgetItem *item; + char cmd[1000]; + int count = 0; + statusLine->setText(""); + foreach (item, treeItemList) { + count++; + bsnprintf(cmd, sizeof(cmd), "mark \"%s\"", item->text(1).toUtf8().data()); + item->setIcon(0, QIcon(QString::fromUtf8(":images/check.png"))); + m_console->write_dir(m_conn, cmd, false); + if (m_console->read(m_conn) > 0) { + strip_trailing_junk(m_console->msg(m_conn)); + statusLine->setText(m_console->msg(m_conn)); + } + Dmsg1(dbglvl, "cmd=%s\n", cmd); + m_console->discardToPrompt(m_conn); + } + if (count == 0) { + mainWin->set_status("Nothing selected, nothing done"); + statusLine->setText("Nothing selected, nothing done"); + } + mainWin->waitExit(); +} + +/* + * Unmark selected items + */ +void restorePage::unmarkButtonPushed() +{ + mainWin->waitEnter(); + QList treeItemList = fileWidget->selectedItems(); + QTreeWidgetItem *item; + char cmd[1000]; + int count = 0; + statusLine->setText(""); + foreach (item, treeItemList) { + count++; + bsnprintf(cmd, sizeof(cmd), "unmark \"%s\"", item->text(1).toUtf8().data()); + item->setIcon(0, QIcon(QString::fromUtf8(":images/unchecked.png"))); + m_console->write_dir(m_conn, cmd, false); + if (m_console->read(m_conn) > 0) { + strip_trailing_junk(m_console->msg(m_conn)); + statusLine->setText(m_console->msg(m_conn)); + } + Dmsg1(dbglvl, "cmd=%s\n", cmd); + m_console->discardToPrompt(m_conn); + } + if (count == 0) { + mainWin->set_status(tr("Nothing selected, nothing done")); + statusLine->setText(tr("Nothing selected, nothing done")); + } + mainWin->waitExit(); +} + +/* + * Change current working directory + */ +bool restorePage::cwd(const char *dir) +{ + int stat; + char cd_cmd[MAXSTRING]; + + mainWin->waitEnter(); + statusLine->setText(""); + bsnprintf(cd_cmd, sizeof(cd_cmd), "cd \"%s\"", dir); + Dmsg2(dbglvl, "dir=%s cmd=%s\n", dir, cd_cmd); + m_console->write_dir(m_conn, cd_cmd, false); + lineEdit->clear(); + if ((stat = m_console->read(m_conn)) > 0) { + m_cwd = m_console->msg(m_conn); + lineEdit->insert(m_cwd); + Dmsg2(dbglvl, "cwd=%s msg=%s\n", m_cwd.toUtf8().data(), m_console->msg(m_conn)); + } else { + Dmsg1(dbglvl, "stat=%d\n", stat); + QMessageBox::critical(this, "Error", tr("cd command failed"), QMessageBox::Ok); + } + m_console->discardToPrompt(m_conn); + mainWin->waitExit(); + return true; /* ***FIXME*** return real status */ +} + +/* + * Return cwd when in tree restore mode + */ +char *restorePage::get_cwd() +{ + int stat; + mainWin->waitEnter(); + m_console->write_dir(m_conn, ".pwd", false); + Dmsg0(dbglvl, "send: .pwd\n"); + if ((stat = m_console->read(m_conn)) > 0) { + m_cwd = m_console->msg(m_conn); + Dmsg2(dbglvl, "cwd=%s msg=%s\n", m_cwd.toUtf8().data(), m_console->msg(m_conn)); + } else { + Dmsg1(dbglvl, "Something went wrong read stat=%d\n", stat); + QMessageBox::critical(this, "Error", tr(".pwd command failed"), QMessageBox::Ok); + } + m_console->discardToPrompt(m_conn); + mainWin->waitExit(); + return m_cwd.toUtf8().data(); +} + +/* + * Save user settings associated with this page + */ +void restorePage::writeSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("RestorePage"); + settings.setValue(m_splitText, splitter->saveState()); + settings.endGroup(); +} + +/* + * Read and restore user settings associated with this page + */ +void restorePage::readSettings() +{ + m_splitText = "splitterSizes_2"; + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("RestorePage"); + if (settings.contains(m_splitText)) { + splitter->restoreState(settings.value(m_splitText).toByteArray()); + } + settings.endGroup(); +} diff --git a/src/qt-console/restore/restore.h b/src/qt-console/restore/restore.h new file mode 100644 index 00000000..542c953a --- /dev/null +++ b/src/qt-console/restore/restore.h @@ -0,0 +1,184 @@ +#ifndef _RESTORE_H_ +#define _RESTORE_H_ + +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Kern Sibbald, February 2007 + */ + +#include +#include /* Needed for some systems */ +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "pages.h" +#include "ui_runrestore.h" + +class bRestoreTable : public QTableWidget +{ + Q_OBJECT +private: + QPoint dragStartPosition; +public: + bRestoreTable(QWidget *parent) + : QTableWidget(parent) + { + } + void mousePressEvent(QMouseEvent *event); + void mouseMoveEvent(QMouseEvent *event); + + void dragEnterEvent(QDragEnterEvent *event); + void dragMoveEvent(QDragMoveEvent *event); + void dropEvent(QDropEvent *event); +}; + +#include "ui_brestore.h" +#include "ui_restore.h" +#include "ui_prerestore.h" + +enum { + R_NONE, + R_JOBIDLIST, + R_JOBDATETIME +}; + +/* + * The pre-restore dialog selects the Job/Client to be restored + * It really could use considerable enhancement. + */ +class prerestorePage : public Pages, public Ui::prerestoreForm +{ + Q_OBJECT + +public: + prerestorePage(); + prerestorePage(QString &data, unsigned int); + +private slots: + void okButtonPushed(); + void cancelButtonPushed(); + void job_name_change(int index); + void recentChanged(int); + void jobRadioClicked(bool); + void jobidsRadioClicked(bool); + void jobIdEditFinished(); + +private: + int m_conn; + int jobdefsFromJob(QStringList &, QString &); + void buildPage(); + bool checkJobIdList(); + QString m_dataIn; + unsigned int m_dataInType; +}; + +/* + * The restore dialog is brought up once we are in the Bacula + * restore tree routines. It handles putting up a GUI tree + * representation of the files to be restored. + */ +class restorePage : public Pages, public Ui::restoreForm +{ + Q_OBJECT + +public: + restorePage(int conn); + ~restorePage(); + void fillDirectory(); + char *get_cwd(); + bool cwd(const char *); + +private slots: + void okButtonPushed(); + void cancelButtonPushed(); + void fileDoubleClicked(QTreeWidgetItem *item, int column); + void directoryItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); + void upButtonPushed(); + void unmarkButtonPushed(); + void markButtonPushed(); + void addDirectory(QString &); + +private: + int m_conn; + void writeSettings(); + void readSettings(); + QString m_cwd; + QHash m_dirPaths; + QHash m_dirTreeItems; + QRegExp m_rx; + QString m_splitText; +}; + +class bRestore : public Pages, public Ui::bRestoreForm +{ + Q_OBJECT + +public: + bRestore(); + ~bRestore(); + void PgSeltreeWidgetClicked(); + QString m_client; + QString m_jobids; + void get_info_from_selection(QStringList &fileids, QStringList &jobids, + QStringList &dirids, QStringList &fileindexes); + +public slots: + void setClient(); + void setJob(); + void showInfoForFile(QTableWidgetItem *); + void applyLocation(); + void clearVersions(QTableWidgetItem *); + void clearRestoreList(); + void runRestore(); + void refreshView(); +private: + QString m_path; + int64_t m_pathid; + QTableWidgetItem *m_current; + void setupPage(); + bool m_populated; + void displayFiles(int64_t pathid, QString path); + void displayFileVersion(QString pathid, QString fnid, + QString client, QString filename); +}; + +class bRunRestore : public QDialog, public Ui::bRunRestoreForm +{ + Q_OBJECT +private: + bRestore *brestore; + QStringList m_fileids, m_jobids, m_dirids, m_findexes; + +public: + bRunRestore(bRestore *parent); + ~bRunRestore() {} + void computeVolumeList(); + int64_t runRestore(QString tablename); + +public slots: + void useRegexp(); + void UFRcb(); + void computeRestore(); +}; + +#endif /* _RESTORE_H_ */ diff --git a/src/qt-console/restore/restore.ui b/src/qt-console/restore/restore.ui new file mode 100644 index 00000000..968e9e8c --- /dev/null +++ b/src/qt-console/restore/restore.ui @@ -0,0 +1,400 @@ + + restoreForm + + + + 0 + 0 + 796 + 597 + + + + Form + + + + + + Qt::Horizontal + + + QSizePolicy::Expanding + + + + 81 + 20 + + + + + + + + + 16777215 + 41 + + + + <h3>Restore Select</h3> + + + + + + + Qt::Horizontal + + + QSizePolicy::Expanding + + + + 51 + 21 + + + + + + + + + 0 + 0 + + + + + 35 + 0 + + + + Up + + + :/images/up.png + + + + + + + + 0 + 0 + + + + + 35 + 0 + + + + Mark + + + :/images/check.png + + + + + + + + 0 + 0 + + + + + 50 + 0 + + + + Unmark + + + :/images/unchecked.png + + + + + + + Qt::Horizontal + + + QSizePolicy::Expanding + + + + 81 + 21 + + + + + + + + <h2>Files</h2> + + + + + + + Qt::Horizontal + + + + 71 + 21 + + + + + + + + + 0 + 0 + + + + Qt::Horizontal + + + + + 1 + 1 + + + + + 200 + 0 + + + + + 16777215 + 16777215 + + + + + 1 + 0 + + + + + 50 + 0 + + + + Select a Directory + + + true + + + 1 + + + + Directories + + + + + + + 144 + 1 + + + + + 400 + 0 + + + + + 0 + 0 + + + + true + + + QAbstractItemView::ExtendedSelection + + + QAbstractItemView::ScrollPerItem + + + true + + + 7 + + + + 1 + + + + + 1 + + + + + 2 + + + + + 3 + + + + + 4 + + + + + 5 + + + + + 6 + + + + + + + + + + + + 0 + 0 + + + + Current Dir: + + + lineEdit + + + + + + + + 0 + 0 + + + + + + + + + + + + 100 + 0 + + + + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 30 + 32 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + + 0 + 0 + + + + Status: + + + + + + + :/images/check.png + + + Mark + + + + + :/images/unchecked.png + + + UnMark + + + + + + + + diff --git a/src/qt-console/restore/restoretree.cpp b/src/qt-console/restore/restoretree.cpp new file mode 100644 index 00000000..143f3c53 --- /dev/null +++ b/src/qt-console/restore/restoretree.cpp @@ -0,0 +1,1765 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * + * Restore Class + * + * Kern Sibbald, February MMVII + * + */ + +#include "bat.h" +#include "restoretree.h" +#include "pages.h" + +restoreTree::restoreTree() : Pages() +{ + setupUi(this); + m_name = tr("Version Browser"); + pgInitialize(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0, QIcon(QString::fromUtf8(":images/browse.png"))); + + m_populated = false; + + m_debugCnt = 0; + m_debugTrap = true; + + QGridLayout *gridLayout = new QGridLayout(this); + gridLayout->setSpacing(6); + gridLayout->setMargin(9); + gridLayout->setObjectName(QString::fromUtf8("gridLayout")); + + m_splitter = new QSplitter(Qt::Vertical, this); + QScrollArea *area = new QScrollArea(); + area->setObjectName(QString::fromUtf8("area")); + area->setWidget(widget); + area->setWidgetResizable(true); + m_splitter->addWidget(area); + m_splitter->addWidget(splitter); + splitter->setChildrenCollapsible(false); + + gridLayout->addWidget(m_splitter, 0, 0, 1, 1); + + /* progress widgets */ + prBar1->setVisible(false); + prBar2->setVisible(false); + prLabel1->setVisible(false); + prLabel2->setVisible(false); + + /* Set Defaults for check and spin for limits */ + limitCheckBox->setCheckState(mainWin->m_recordLimitCheck ? Qt::Checked : Qt::Unchecked); + limitSpinBox->setValue(mainWin->m_recordLimitVal); + daysCheckBox->setCheckState(mainWin->m_daysLimitCheck ? Qt::Checked : Qt::Unchecked); + daysSpinBox->setValue(mainWin->m_daysLimitVal); + readSettings(); + m_nullFileNameId = -1; + dockPage(); + setCurrent(); +} + +restoreTree::~restoreTree() +{ + writeSettings(); +} + +/* + * Called from the constructor to set up the page widgets and connections. + */ +void restoreTree::setupPage() +{ + connect(refreshButton, SIGNAL(pressed()), this, SLOT(refreshButtonPushed())); + connect(restoreButton, SIGNAL(pressed()), this, SLOT(restoreButtonPushed())); + connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(jobComboChanged(int))); + connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(updateRefresh())); + connect(clientCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(updateRefresh())); + connect(fileSetCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(updateRefresh())); + connect(limitCheckBox, SIGNAL(stateChanged(int)), this, SLOT(updateRefresh())); + connect(daysCheckBox, SIGNAL(stateChanged(int)), this, SLOT(updateRefresh())); + connect(daysSpinBox, SIGNAL(valueChanged(int)), this, SLOT(updateRefresh())); + connect(limitSpinBox, SIGNAL(valueChanged(int)), this, SLOT(updateRefresh())); + connect(directoryTree, SIGNAL(currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), + this, SLOT(directoryCurrentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); + connect(directoryTree, SIGNAL(itemExpanded(QTreeWidgetItem *)), + this, SLOT(directoryItemExpanded(QTreeWidgetItem *))); + connect(directoryTree, SIGNAL(itemChanged(QTreeWidgetItem *, int)), + this, SLOT(directoryItemChanged(QTreeWidgetItem *, int))); + connect(fileTable, SIGNAL(currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)), + this, SLOT(fileCurrentItemChanged(QTableWidgetItem *, QTableWidgetItem *))); + connect(jobTable, SIGNAL(cellClicked(int, int)), + this, SLOT(jobTableCellClicked(int, int))); + + QStringList titles = QStringList() << tr("Directories"); + directoryTree->setHeaderLabels(titles); + clientCombo->addItems(m_console->client_list); + fileSetCombo->addItem(tr("Any")); + fileSetCombo->addItems(m_console->fileset_list); + jobCombo->addItem(tr("Any")); + jobCombo->addItems(m_console->job_list); + + directoryTree->setContextMenuPolicy(Qt::ActionsContextMenu); +} + +void restoreTree::updateRefresh() +{ + if (mainWin->m_rtPopDirDebug) Pmsg2(000, "testing prev=\"%s\" current=\"%s\"\n", m_prevJobCombo.toUtf8().data(), jobCombo->currentText().toUtf8().data()); + m_dropdownChanged = (m_prevJobCombo != jobCombo->currentText()) + || (m_prevClientCombo != clientCombo->currentText()) + || (m_prevFileSetCombo != fileSetCombo->currentText() + || (m_prevLimitSpinBox != limitSpinBox->value()) + || (m_prevDaysSpinBox != daysSpinBox->value()) + || (m_prevLimitCheckState != limitCheckBox->checkState()) + || (m_prevDaysCheckState != daysCheckBox->checkState()) + ); + if (m_dropdownChanged) { + if (mainWin->m_rtPopDirDebug) Pmsg0(000, "In restoreTree::updateRefresh Is CHANGED\n"); + refreshLabel->setText(tr("Refresh From Re-Select")); + } else { + if (mainWin->m_rtPopDirDebug) Pmsg0(000, "In restoreTree::updateRefresh Is not Changed\n"); + refreshLabel->setText(tr("Refresh From JobChecks")); + } +} + +/* + * When refresh button is pushed, perform a query getting the directories and + * use parseDirectory and addDirectory to populate the directory tree with items. + */ +void restoreTree::populateDirectoryTree() +{ + m_debugTrap = true; + m_debugCnt = 0; + m_slashTrap = false; + m_dirPaths.clear(); + directoryTree->clear(); + fileTable->clear(); + fileTable->setRowCount(0); + fileTable->setColumnCount(0); + versionTable->clear(); + versionTable->setRowCount(0); + versionTable->setColumnCount(0); + m_fileExceptionHash.clear(); + m_fileExceptionMulti.clear(); + m_versionExceptionHash.clear(); + m_directoryIconStateHash.clear(); + + updateRefresh(); + int taskcount = 3, ontask = 1; + if (m_dropdownChanged) taskcount += 1; + + /* Set progress bars and repaint */ + prBar1->setVisible(true); + prBar1->setRange(0,taskcount); + prBar1->setValue(0); + prLabel1->setText(tr("Task %1 of %2").arg(ontask).arg(taskcount)); + prLabel1->setVisible(true); + prBar2->setVisible(true); + prBar2->setRange(0,0); + prLabel2->setText(tr("Querying Database")); + prLabel2->setVisible(true); + repaint(); + + if (m_dropdownChanged) { + m_prevJobCombo = jobCombo->currentText(); + m_prevClientCombo = clientCombo->currentText(); + m_prevFileSetCombo = fileSetCombo->currentText(); + m_prevLimitSpinBox = limitSpinBox->value(); + m_prevDaysSpinBox = daysSpinBox->value(); + m_prevLimitCheckState = limitCheckBox->checkState(); + m_prevDaysCheckState = daysCheckBox->checkState(); + updateRefresh(); + prBar1->setValue(ontask++); + prLabel1->setText(tr("Task %1 of %2").arg(ontask).arg(taskcount)); + prBar2->setValue(0); + prBar2->setRange(0,0); + prLabel2->setText(tr("Querying Jobs")); + repaint(); + populateJobTable(); + } + setJobsCheckedList(); + if (mainWin->m_rtPopDirDebug) Pmsg0(000, "Repopulating from checks in Job Table\n"); + + if (m_checkedJobs != "") { + /* First get the filenameid of where the nae is null. These will be the directories + * This could be done in a subquery but postgres's query analyzer won't do the right + * thing like I want */ + if (m_nullFileNameId == -1) { + QString cmd = "SELECT FilenameId FROM Filename WHERE name=''"; + if (mainWin->m_sqlDebug) + Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); + QStringList qres; + if (m_console->sql_cmd(cmd, qres)) { + if (qres.count()) { + QStringList fieldlist = qres[0].split("\t"); + QString field = fieldlist[0]; + bool ok; + int val = field.toInt(&ok, 10); + if (ok) m_nullFileNameId = val; + } + } + } + /* now create the query to get the list of paths */ + QString cmd = + "SELECT DISTINCT Path.Path AS Path, File.PathId AS PathId" + " FROM File" + " INNER JOIN Path ON (File.PathId=Path.PathId)"; + if (m_nullFileNameId != -1) + cmd += " WHERE File.FilenameId=" + QString("%1").arg(m_nullFileNameId); + else + cmd += " WHERE File.FilenameId IN (SELECT FilenameId FROM Filename WHERE Name='')"; + cmd += " AND File.Jobid IN (" + m_checkedJobs + ")"; + if (mainWin->m_sqlDebug) + Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); + prBar1->setValue(ontask++); + prLabel1->setText(tr("Task %1 of %2").arg(ontask).arg(taskcount)); + prBar2->setValue(0); + prBar2->setRange(0,0); + prLabel2->setText(tr("Querying for Directories")); + repaint(); + QStringList results; + m_directoryPathIdHash.clear(); + bool querydone = false; + if (m_console->sql_cmd(cmd, results)) { + if (!querydone) { + querydone = true; + prLabel2->setText(tr("Processing Directories")); + prBar2->setRange(0,results.count()); + repaint(); + } + if (mainWin->m_miscDebug) + Pmsg1(000, "Done with query %i results\n", results.count()); + QStringList fieldlist; + foreach(const QString &resultline, results) { + /* Update progress bar periodically */ + if ((++m_debugCnt && 0x3FF) == 0) { + prBar2->setValue(m_debugCnt); + } + fieldlist = resultline.split("\t"); + int fieldcnt = 0; + /* Iterate through fields in the record */ + foreach (const QString &field, fieldlist) { + if (fieldcnt == 0 ) { + parseDirectory(field); + } else if (fieldcnt == 1) { + bool ok; + int pathid = field.toInt(&ok, 10); + if (ok) + m_directoryPathIdHash.insert(fieldlist[0], pathid); + } + fieldcnt += 1; + } + } + } else { + return; + } + } else { + QMessageBox::warning(this, "Bat", + tr("No jobs were selected in the job query !!!.\n" + "Press OK to continue"), + QMessageBox::Ok ); + } + prBar1->setVisible(false); + prBar2->setVisible(false); + prLabel1->setVisible(false); + prLabel2->setVisible(false); +} + +/* + * Function to set m_checkedJobs from the jobs that are checked in the table + * of jobs + */ +void restoreTree::setJobsCheckedList() +{ + m_JobsCheckedList = ""; + bool first = true; + /* Update the items in the version table */ + int cnt = jobTable->rowCount(); + for (int row=0; rowitem(row, 0); + if (jobItem->checkState() == Qt::Checked) { + if (!first) + m_JobsCheckedList += ","; + m_JobsCheckedList += jobItem->text(); + first = false; + jobItem->setBackground(Qt::green); + } else { + if (jobItem->flags()) + jobItem->setBackground(Qt::gray); + else + jobItem->setBackground(Qt::darkYellow); + } + } + m_checkedJobs = m_JobsCheckedList; +} + +/* + * Function to parse a directory into all possible subdirectories, then add to + * The tree. + */ +void restoreTree::parseDirectory(const QString &dir_in) +{ + // bail out if already processed + if (m_dirPaths.contains(dir_in)) + return; + // search for parent... + int pos=dir_in.lastIndexOf("/",-2); + + if (pos != -1) + { + QString parent=dir_in.left(pos+1); + QString subdir=dir_in.mid(pos+1); + + QTreeWidgetItem *item = NULL; + QTreeWidgetItem *parentItem = m_dirPaths.value(parent); + + if (parentItem==0) { + // recurse to build parent... + parseDirectory(parent); + parentItem = m_dirPaths.value(parent); + } + + /* new directories to add */ + item = new QTreeWidgetItem(parentItem); + item->setText(0, subdir); + item->setData(0, Qt::UserRole, QVariant(dir_in)); + item->setCheckState(0, Qt::Unchecked); + /* Store the current state of the check status in column 1, which at + * this point has no text*/ + item->setData(1, Qt::UserRole, QVariant(Qt::Unchecked)); + m_dirPaths.insert(dir_in,item); + } + else + { + QTreeWidgetItem *item = new QTreeWidgetItem(directoryTree); + item->setText(0, dir_in); + item->setData(0, Qt::UserRole, QVariant(dir_in)); + item->setData(1, Qt::UserRole, QVariant(Qt::Unchecked)); + item->setIcon(0, QIcon(QString::fromUtf8(":images/folder.png"))); + m_dirPaths.insert(dir_in,item); + } +} + +/* + * Virtual function which is called when this page is visible on the stack + */ +void restoreTree::currentStackItem() +{ + if(!m_populated) { + setupPage(); + m_populated = true; + } +} + +/* + * Populate the tree when refresh button pushed. + */ +void restoreTree::refreshButtonPushed() +{ + populateDirectoryTree(); +} + +/* + * Set the values of non-job combo boxes to the job defaults + */ +void restoreTree::jobComboChanged(int) +{ + if (jobCombo->currentText() == tr("Any")) { + fileSetCombo->setCurrentIndex(fileSetCombo->findText(tr("Any"), Qt::MatchExactly)); + return; + } + job_defaults job_defs; + + //(void)index; + job_defs.job_name = jobCombo->currentText(); + if (m_console->get_job_defaults(job_defs)) { + fileSetCombo->setCurrentIndex(fileSetCombo->findText(job_defs.fileset_name, Qt::MatchExactly)); + clientCombo->setCurrentIndex(clientCombo->findText(job_defs.client_name, Qt::MatchExactly)); + } +} + +/* + * Function to populate the file list table + */ +void restoreTree::directoryCurrentItemChanged(QTreeWidgetItem *item, QTreeWidgetItem *) +{ + if (item == NULL) + return; + + fileTable->clear(); + /* Also clear the version table here */ + versionTable->clear(); + versionFileLabel->setText(""); + versionTable->setRowCount(0); + versionTable->setColumnCount(0); + + QStringList headerlist = (QStringList() << tr("File Name") << tr("Filename Id")); + fileTable->setColumnCount(headerlist.size()); + fileTable->setHorizontalHeaderLabels(headerlist); + fileTable->setRowCount(0); + + m_fileCheckStateList.clear(); + disconnect(fileTable, SIGNAL(itemChanged(QTableWidgetItem *)), + this, SLOT(fileTableItemChanged(QTableWidgetItem *))); + QBrush blackBrush(Qt::black); + QString directory = item->data(0, Qt::UserRole).toString(); + directoryLabel->setText(tr("Present Working Directory: %1").arg(directory)); + int pathid = m_directoryPathIdHash.value(directory, -1); + if (pathid != -1) { + QString cmd = + "SELECT DISTINCT Filename.Name AS FileName, Filename.FilenameId AS FilenameId" + " FROM File " + " INNER JOIN Filename on (Filename.FilenameId=File.FilenameId)" + " WHERE File.PathId=" + QString("%1").arg(pathid) + + " AND File.Jobid IN (" + m_checkedJobs + ")" + " AND Filename.Name!=''" + " ORDER BY FileName"; + if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); + + QStringList results; + if (m_console->sql_cmd(cmd, results)) { + + QTableWidgetItem* tableItem; + QString field; + QStringList fieldlist; + fileTable->setRowCount(results.size()); + + int row = 0; + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + /* Iterate through fields in the record */ + int column = 0; + fieldlist = resultline.split("\t"); + foreach (field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + tableItem = new QTableWidgetItem(field, 1); + /* Possible flags are Qt::ItemFlags flag = Qt::ItemIsSelectable | Qt::ItemIsEditablex + * | Qt::ItemIsDragEnabled | Qt::ItemIsDropEnabled | Qt::ItemIsUserCheckable + * | Qt::ItemIsEnabled | Qt::ItemIsTristate; */ + tableItem->setForeground(blackBrush); + /* Just in case a column ever gets added */ + if (mainWin->m_sqlDebug) Pmsg1(000, "Column=%d\n", column); + if (column == 0) { + Qt::ItemFlags flag = Qt::ItemIsUserCheckable | Qt::ItemIsEnabled | Qt::ItemIsTristate; + tableItem->setFlags(flag); + tableItem->setData(Qt::UserRole, QVariant(directory)); + fileTable->setItem(row, column, tableItem); + m_fileCheckStateList.append(Qt::Unchecked); + tableItem->setCheckState(Qt::Unchecked); + } else if (column == 1) { + Qt::ItemFlags flag = Qt::ItemIsEnabled; + tableItem->setFlags(flag); + bool ok; + int filenameid = field.toInt(&ok, 10); + if (!ok) filenameid = -1; + tableItem->setData(Qt::UserRole, QVariant(filenameid)); + fileTable->setItem(row, column, tableItem); + } + column++; + } + row++; + } + fileTable->setRowCount(row); + } + fileTable->resizeColumnsToContents(); + fileTable->resizeRowsToContents(); + fileTable->verticalHeader()->hide(); + fileTable->hideColumn(1); + if (mainWin->m_rtDirCurICDebug) Pmsg0(000, "will update file table checks\n"); + updateFileTableChecks(); + } else if (mainWin->m_sqlDebug) + Pmsg1(000, "did not perform query, pathid=%i not found\n", pathid); + connect(fileTable, SIGNAL(itemChanged(QTableWidgetItem *)), + this, SLOT(fileTableItemChanged(QTableWidgetItem *))); +} + +/* + * Function to populate the version table + */ +void restoreTree::fileCurrentItemChanged(QTableWidgetItem *currentFileTableItem, QTableWidgetItem *) +{ + if (currentFileTableItem == NULL) + return; + + int currentRow = fileTable->row(currentFileTableItem); + QTableWidgetItem *fileTableItem = fileTable->item(currentRow, 0); + QTableWidgetItem *fileNameIdTableItem = fileTable->item(currentRow, 1); + int fileNameId = fileNameIdTableItem->data(Qt::UserRole).toInt(); + + m_versionCheckStateList.clear(); + disconnect(versionTable, SIGNAL(itemChanged(QTableWidgetItem *)), + this, SLOT(versionTableItemChanged(QTableWidgetItem *))); + + QString file = fileTableItem->text(); + versionFileLabel->setText(file); + QString directory = fileTableItem->data(Qt::UserRole).toString(); + + QBrush blackBrush(Qt::black); + + QStringList headerlist = (QStringList() + << tr("Job Id") << tr("Type") << tr("End Time") << tr("Hash") << tr("FileId") << tr("Job Type") << tr("First Volume")); + versionTable->clear(); + versionTable->setColumnCount(headerlist.size()); + versionTable->setHorizontalHeaderLabels(headerlist); + versionTable->setRowCount(0); + + int pathid = m_directoryPathIdHash.value(directory, -1); + if ((pathid != -1) && (fileNameId != -1)) { + QString cmd = + "SELECT Job.JobId AS JobId, Job.Level AS Type," + " Job.EndTime AS EndTime, File.MD5 AS MD5," + " File.FileId AS FileId, Job.Type AS JobType," + " (SELECT Media.VolumeName FROM JobMedia JOIN Media ON JobMedia.MediaId=Media.MediaId WHERE JobMedia.JobId=Job.JobId ORDER BY JobMediaId LIMIT 1) AS FirstVolume" + " FROM File" + " INNER JOIN Filename on (Filename.FilenameId=File.FilenameId)" + " INNER JOIN Path ON (Path.PathId=File.PathId)" + " INNER JOIN Job ON (File.JobId=Job.JobId)" + " WHERE Path.PathId=" + QString("%1").arg(pathid) + + //" AND Filename.Name='" + file + "'" + " AND Filename.FilenameId=" + QString("%1").arg(fileNameId) + + " AND Job.Jobid IN (" + m_checkedJobs + ")" + " ORDER BY Job.EndTime DESC"; + + if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); + QStringList results; + if (m_console->sql_cmd(cmd, results)) { + + QTableWidgetItem* tableItem; + QString field; + QStringList fieldlist; + versionTable->setRowCount(results.size()); + + int row = 0; + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + int column = 0; + /* remove directory */ + if (fieldlist[0].trimmed() != "") { + /* Iterate through fields in the record */ + foreach (field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + if (column == 5 ) { + QByteArray jtype(field.trimmed().toLatin1()); + if (jtype.size()) { + field = job_type_to_str(jtype[0]); + } + } + tableItem = new QTableWidgetItem(field, 1); + tableItem->setFlags(0); + tableItem->setForeground(blackBrush); + tableItem->setData(Qt::UserRole, QVariant(directory)); + versionTable->setItem(row, column, tableItem); + if (mainWin->m_sqlDebug) Pmsg1(000, "Column=%d\n", column); + if (column == 0) { + Qt::ItemFlags flag = Qt::ItemIsUserCheckable | Qt::ItemIsEnabled | Qt::ItemIsTristate; + tableItem->setFlags(flag); + m_versionCheckStateList.append(Qt::Unchecked); + tableItem->setCheckState(Qt::Unchecked); + } + column++; + } + row++; + } + } + } + versionTable->resizeColumnsToContents(); + versionTable->resizeRowsToContents(); + versionTable->verticalHeader()->hide(); + updateVersionTableChecks(); + } else { + if (mainWin->m_sqlDebug) + Pmsg2(000, "not querying : pathid=%i fileNameId=%i\n", pathid, fileNameId); + } + connect(versionTable, SIGNAL(itemChanged(QTableWidgetItem *)), + this, SLOT(versionTableItemChanged(QTableWidgetItem *))); +} + +/* + * Save user settings associated with this page + */ +void restoreTree::writeSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + settings.setValue(m_splitText1, m_splitter->saveState()); + settings.setValue(m_splitText2, splitter->saveState()); + settings.endGroup(); +} + +/* + * Read and restore user settings associated with this page + */ +void restoreTree::readSettings() +{ + m_groupText = tr("RestoreTreePage"); + m_splitText1 = "splitterSizes1_3"; + m_splitText2 = "splitterSizes2_3"; + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + if (settings.contains(m_splitText1)) { m_splitter->restoreState(settings.value(m_splitText1).toByteArray()); } + if (settings.contains(m_splitText2)) { splitter->restoreState(settings.value(m_splitText2).toByteArray()); } + settings.endGroup(); +} + +/* + * This is a funcion to accomplish the one thing I struggled to figure out what + * was taking so long. It add the icons, but after the tree is made. Seemed to + * work fast after changing from png to png file for graphic. + */ +void restoreTree::directoryItemExpanded(QTreeWidgetItem *item) +{ + int childCount = item->childCount(); + for (int i=0; ichild(i); + if (child->icon(0).isNull()) + child->setIcon(0, QIcon(QString::fromUtf8(":images/folder.png"))); + } +} + +/* + * Show what jobs meet the criteria and are being used to + * populate the directory tree and file and version tables. + */ +void restoreTree::populateJobTable() +{ + QBrush blackBrush(Qt::black); + + if (mainWin->m_rtPopDirDebug) Pmsg0(000, "Repopulating the Job Table\n"); + QStringList headerlist = (QStringList() + << tr("Job Id") << tr("End Time") << tr("Level") << tr("Type") + << tr("Name") << tr("Purged") << tr("TU") << tr("TD")); + m_toggleUpIndex = headerlist.indexOf(tr("TU")); + m_toggleDownIndex = headerlist.indexOf(tr("TD")); + int purgedIndex = headerlist.indexOf(tr("Purged")); + int typeIndex = headerlist.indexOf(tr("Type")); + jobTable->clear(); + jobTable->setColumnCount(headerlist.size()); + jobTable->setHorizontalHeaderLabels(headerlist); + QString jobQuery = + "SELECT Job.Jobid AS Id, Job.EndTime AS EndTime," + " Job.Level AS Level, Job.Type AS Type," + " Job.Name AS JobName, Job.purgedfiles AS Purged" + " FROM Job" + /* INNER JOIN FileSet eliminates all restore jobs */ + " INNER JOIN Client ON (Job.ClientId=Client.ClientId)" + " INNER JOIN FileSet ON (Job.FileSetId=FileSet.FileSetId)" + " WHERE" + " Job.JobStatus IN ('T','W') AND Job.Type='B' AND" + " Client.Name='" + clientCombo->currentText() + "'"; + if ((jobCombo->currentIndex() >= 0) && (jobCombo->currentText() != tr("Any"))) { + jobQuery += " AND Job.name = '" + jobCombo->currentText() + "'"; + } + if ((fileSetCombo->currentIndex() >= 0) && (fileSetCombo->currentText() != tr("Any"))) { + jobQuery += " AND FileSet.FileSet='" + fileSetCombo->currentText() + "'"; + } + /* If Limit check box For limit by days is checked */ + if (daysCheckBox->checkState() == Qt::Checked) { + QDateTime stamp = QDateTime::currentDateTime().addDays(-daysSpinBox->value()); + QString since = stamp.toString(Qt::ISODate); + jobQuery += " AND Job.Starttime>'" + since + "'"; + } + //jobQuery += " AND Job.purgedfiles=0"; + jobQuery += " ORDER BY Job.EndTime DESC"; + /* If Limit check box for limit records returned is checked */ + if (limitCheckBox->checkState() == Qt::Checked) { + QString limit; + limit.setNum(limitSpinBox->value()); + jobQuery += " LIMIT " + limit; + } + if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", jobQuery.toUtf8().data()); + + + QStringList results; + if (m_console->sql_cmd(jobQuery, results)) { + + QTableWidgetItem* tableItem; + QString field; + QStringList fieldlist; + jobTable->setRowCount(results.size()); + + int row = 0; + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + int column = 0; + /* remove directory */ + if (fieldlist[0].trimmed() != "") { + /* Iterate through fields in the record */ + foreach (field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + if (field != "") { + if (column == typeIndex) { + QByteArray jtype(field.trimmed().toLatin1()); + if (jtype.size()) { + field = job_type_to_str(jtype[0]); + } + } + tableItem = new QTableWidgetItem(field, 1); + tableItem->setFlags(0); + tableItem->setForeground(blackBrush); + jobTable->setItem(row, column, tableItem); + if (mainWin->m_sqlDebug) Pmsg1(000, "Column=%d\n", column); + if (column == 0) { + bool ok; + int purged = fieldlist[purgedIndex].toInt(&ok, 10); + if (!((ok) && (purged == 1))) { + Qt::ItemFlags flag = Qt::ItemIsUserCheckable | Qt::ItemIsEnabled | Qt::ItemIsTristate; + tableItem->setFlags(flag); + tableItem->setCheckState(Qt::Checked); + tableItem->setBackground(Qt::green); + } else { + tableItem->setFlags(0); + tableItem->setCheckState(Qt::Unchecked); + } + } + column++; + } + } + tableItem = new QTableWidgetItem(QIcon(QString::fromUtf8(":images/go-up.png")), "", 1); + tableItem->setFlags(0); + tableItem->setForeground(blackBrush); + jobTable->setItem(row, column, tableItem); + column++; + tableItem = new QTableWidgetItem(QIcon(QString::fromUtf8(":images/go-down.png")), "", 1); + tableItem->setFlags(0); + tableItem->setForeground(blackBrush); + jobTable->setItem(row, column, tableItem); + row++; + } + } + } + jobTable->resizeColumnsToContents(); + jobTable->resizeRowsToContents(); + jobTable->verticalHeader()->hide(); + jobTable->hideColumn(purgedIndex); +} + +void restoreTree::jobTableCellClicked(int row, int column) +{ + if (column == m_toggleUpIndex){ + int cnt; + for (cnt=0; cntitem(cnt, 0); + if (item->flags()) { + Qt::CheckState state = item->checkState(); + if (state == Qt::Checked) + item->setCheckState(Qt::Unchecked); + else if (state == Qt::Unchecked) + item->setCheckState(Qt::Checked); + } + } + } + if (column == m_toggleDownIndex){ + int cnt, max = jobTable->rowCount(); + for (cnt=row; cntitem(cnt, 0); + if (item->flags()) { + Qt::CheckState state = item->checkState(); + if (state == Qt::Checked) + item->setCheckState(Qt::Unchecked); + else if (state == Qt::Unchecked) + item->setCheckState(Qt::Checked); + } + } + } +} + +/* + * When a directory item is "changed" check the state of the checkable item + * to see if it is different than what it was which is stored in Qt::UserRole + * of the 2nd column, column 1, of the tree widget. + */ +void restoreTree::directoryItemChanged(QTreeWidgetItem *item, int /*column*/) +{ + Qt::CheckState prevState = (Qt::CheckState)item->data(1, Qt::UserRole).toInt(); + Qt::CheckState curState = item->checkState(0); + QTreeWidgetItem* parent = item->parent(); + Qt::CheckState parState; + if (parent) parState = parent->checkState(0); + else parState = (Qt::CheckState)3; + if (mainWin->m_rtDirICDebug) { + QString msg = QString("directory item OBJECT has changed prev=%1 cur=%2 par=%3 dir=%4\n") + .arg(prevState).arg(curState).arg(parState).arg(item->text(0)); + Pmsg1(000, "%s", msg.toUtf8().data()); } + /* I only care when the check state changes */ + if (prevState == curState) { + if (mainWin->m_rtDirICDebug) Pmsg0(000, "Returning Early\n"); + return; + } + + if ((prevState == Qt::Unchecked) && (curState == Qt::Checked) && (parState != Qt::Unchecked)) { + if (mainWin->m_rtDirICDebug) Pmsg0(000, "Disconnected Setting to Qt::PartiallyChecked\n"); + directoryTreeDisconnectedSet(item, Qt::PartiallyChecked); + curState = Qt::PartiallyChecked; + } + if ((prevState == Qt::PartiallyChecked) && (curState == Qt::Checked)) { + if (mainWin->m_rtDirICDebug) Pmsg0(000, "Disconnected Setting to Qt::Unchecked\n"); + directoryTreeDisconnectedSet(item, Qt::Unchecked); + curState = Qt::Unchecked; + } + if (mainWin->m_rtDirICDebug) { + QString msg = QString("directory item CHECKSTATE has changed prev=%1 cur=%2 par=%3 dir=%4\n") + .arg(prevState).arg(curState).arg(parState).arg(item->text(0)); + Pmsg1(000, "%s", msg.toUtf8().data()); } + + item->setData(1, Qt::UserRole, QVariant(curState)); + Qt::CheckState childState = curState; + if (childState == Qt::Checked) + childState = Qt::PartiallyChecked; + setCheckofChildren(item, childState); + + /* Remove items from the exception lists. The multi exception list is my index + * of what exceptions can be removed when the directory is known*/ + QString directory = item->data(0, Qt::UserRole).toString(); + QStringList fullPathList = m_fileExceptionMulti.values(directory); + int fullPathListCount = fullPathList.count(); + if ((mainWin->m_rtDirICDebug) && fullPathListCount) Pmsg2(000, "Will attempt to remove file exceptions for %s count %i\n", directory.toUtf8().data(), fullPathListCount); + foreach (QString fullPath, fullPathList) { + /* If there is no value in the hash for the key fullPath a value of 3 will be returned + * which will match no Qt::xxx values */ + Qt::CheckState hashState = m_fileExceptionHash.value(fullPath, (Qt::CheckState)3); + if (mainWin->m_rtDirICDebug) Pmsg2(000, "hashState=%i childState=%i\n", hashState, childState); + if (hashState == Qt::Unchecked) { + fileExceptionRemove(fullPath, directory); + m_versionExceptionHash.remove(fullPath); + if (mainWin->m_rtDirICDebug) Pmsg0(000, "Attempted Removal A\n"); + } + if (hashState == Qt::Checked) { + fileExceptionRemove(fullPath, directory); + m_versionExceptionHash.remove(fullPath); + if (mainWin->m_rtDirICDebug) Pmsg0(000, "Attempted Removal B\n"); + } + } + + if (item == directoryTree->currentItem()) { + if (mainWin->m_rtDirICDebug) Pmsg0(000, "Will attempt to update File Table Checks\n"); + updateFileTableChecks(); + versionTable->clear(); + versionTable->setRowCount(0); + versionTable->setColumnCount(0); + } + if (mainWin->m_rtDirICDebug) Pmsg0(000, "Returning At End\n"); +} + +/* + * When a directory item check state is changed, this function iterates through + * all subdirectories and sets all to the passed state, which is either partially + * checked or unchecked. + */ +void restoreTree::setCheckofChildren(QTreeWidgetItem *item, Qt::CheckState state) +{ + int childCount; + childCount = item->childCount(); + for (int i=0; ichild(i); + child->setData(1, Qt::UserRole, QVariant(state)); + child->setCheckState(0, state); + setCheckofChildren(child, state); + } +} + +/* + * When a File Table Item is "changed" check to see if the state of the checkable + * item has changed which is stored in m_fileCheckStateList + * If changed store in a hash m_fileExceptionHash that whether this file should be + * restored or not. + * Called as a slot, connected after populated (after directory current changed called) + */ +void restoreTree::fileTableItemChanged(QTableWidgetItem *item) +{ + /* get the previous and current check states */ + int row = fileTable->row(item); + Qt::CheckState prevState; + /* prevent a segfault */ + prevState = m_fileCheckStateList[row]; + Qt::CheckState curState = item->checkState(); + + /* deterimine the default state from the state of the directory */ + QTreeWidgetItem *dirTreeItem = directoryTree->currentItem(); + Qt::CheckState dirState = (Qt::CheckState)dirTreeItem->data(1, Qt::UserRole).toInt(); + Qt::CheckState defState = Qt::PartiallyChecked; + if (dirState == Qt::Unchecked) defState = Qt::Unchecked; + + /* determine if it is already in the m_fileExceptionHash */ + QString directory = directoryTree->currentItem()->data(0, Qt::UserRole).toString(); + QString file = item->text(); + QString fullPath = directory + file; + Qt::CheckState hashState = m_fileExceptionHash.value(fullPath, (Qt::CheckState)3); + int verJobNum = m_versionExceptionHash.value(fullPath, 0); + + if (mainWin->m_rtFileTabICDebug) { + QString msg = QString("filerow=%1 prev=%2 cur=%3 def=%4 hash=%5 dir=%6 verJobNum=%7\n") + .arg(row).arg(prevState).arg(curState).arg(defState).arg(hashState).arg(dirState).arg(verJobNum); + Pmsg1(000, "%s", msg.toUtf8().data()); } + + /* Remove the hash if currently checked previously unchecked and directory is checked or partial */ + if ((prevState == Qt::Checked) && (curState == Qt::Unchecked) && (dirState == Qt::Unchecked)) { + /* it can behave as defaulted so current of unchecked is fine */ + if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will fileExceptionRemove and m_versionExceptionHash.remove here\n"); + fileExceptionRemove(fullPath, directory); + m_versionExceptionHash.remove(fullPath); + } else if ((prevState == Qt::PartiallyChecked) && (curState == Qt::Checked) && (dirState != Qt::Unchecked) && (verJobNum == 0)) { + if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will fileExceptionInsert here\n"); + fileExceptionInsert(fullPath, directory, Qt::Unchecked); + } else if ((prevState == Qt::Unchecked) && (curState == Qt::Checked) && (dirState != Qt::Unchecked) && (verJobNum == 0) && (defState == Qt::PartiallyChecked)) { + /* filerow=2 prev=0 cur=2 def=1 hash=0 dir=2 verJobNum=0 */ + if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will fileExceptionRemove here\n"); + fileExceptionRemove(fullPath, directory); + } else if ((prevState == Qt::Checked) && (curState == Qt::Unchecked) && (defState == Qt::PartiallyChecked) && (verJobNum != 0) && (hashState == Qt::Checked)) { + /* Check dir, check version, attempt uncheck in file + * filerow=4 prev=2 cur=0 def=1 hash=2 dir=2 verJobNum=53 */ + if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will fileExceptionRemove and m_versionExceptionHash.remove here\n"); + fileExceptionRemove(fullPath, directory); + m_versionExceptionHash.remove(fullPath); + } else if ((prevState == Qt::Unchecked) && (curState == Qt::Checked) && (dirState != Qt::Unchecked) && (verJobNum == 0)) { + /* filerow=0 prev=0 cur=2 def=1 hash=0 dirState=2 verJobNum */ + if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will Not remove here\n"); + } else if (prevState != curState) { + if (mainWin->m_rtFileTabICDebug) Pmsg2(000, " THE STATE OF THE Check has changed, Setting StateList[%i] to %i\n", row, curState); + /* A user did not set the check state to Partially checked, ignore if so */ + if (curState != Qt::PartiallyChecked) { + if ((defState == Qt::Unchecked) && (prevState == Qt::PartiallyChecked) && (curState == Qt::Unchecked)) { + if (mainWin->m_rtFileTabICDebug) Pmsg0(000, " got here\n"); + } else { + if (mainWin->m_rtFileTabICDebug) Pmsg2(000, " Inserting into m_fileExceptionHash %s, %i\n", fullPath.toUtf8().data(), curState); + fileExceptionInsert(fullPath, directory, curState); + } + } else { + if (mainWin->m_rtFileTabICDebug) Pmsg1(000, "Removing version hash for %s\n", fullPath.toUtf8().data()); + /* programattically been changed back to a default state of Qt::PartiallyChecked remove the version hash here */ + m_versionExceptionHash.remove(fullPath); + } + } + + updateFileTableChecks(); + updateVersionTableChecks(); +} + +/* + * function to insert keys and values to both m_fileExceptionHash and m_fileExceptionMulti + */ +void restoreTree::fileExceptionInsert(QString &fullPath, QString &direcotry, Qt::CheckState state) +{ + m_fileExceptionHash.insert(fullPath, state); + m_fileExceptionMulti.insert(direcotry, fullPath); + directoryIconStateInsert(fullPath, state); +} + +/* + * function to remove keys from both m_fileExceptionHash and m_fileExceptionMulti + */ +void restoreTree::fileExceptionRemove(QString &fullPath, QString &directory) +{ + m_fileExceptionHash.remove(fullPath); + /* pull the list of values in the multi */ + QStringList fullPathList = m_fileExceptionMulti.values(directory); + /* get the index of the fullpath to remove */ + int index = fullPathList.indexOf(fullPath); + if (index != -1) { + /* remove the desired item in the list */ + fullPathList.removeAt(index); + /* remove the entire list from the multi */ + m_fileExceptionMulti.remove(directory); + /* readd the remaining */ + foreach (QString fp, fullPathList) { + m_fileExceptionMulti.insert(directory, fp); + } + } + directoryIconStateRemove(); +} + +/* + * Overloaded function to be called from the slot and from other places to set the state + * of the check marks in the version table + */ +void restoreTree::versionTableItemChanged(QTableWidgetItem *item) +{ + /* get the previous and current check states */ + int row = versionTable->row(item); + QTableWidgetItem *colZeroItem = versionTable->item(row, 0); + Qt::CheckState prevState = m_versionCheckStateList[row]; + Qt::CheckState curState = (Qt::CheckState)colZeroItem->checkState(); + m_versionCheckStateList[row] = curState; + + /* deterimine the default state from the state of the file */ + QTableWidgetItem *fileTableItem = fileTable->currentItem(); + Qt::CheckState fileState = (Qt::CheckState)fileTableItem->checkState(); + + /* determine the default state */ + Qt::CheckState defState; + if (mainWin->m_sqlDebug) Pmsg1(000, "row=%d\n", row); + if (row == 0) { + defState = Qt::PartiallyChecked; + if (fileState == Qt::Unchecked) + defState = Qt::Unchecked; + } else { + defState = Qt::Unchecked; + } + + /* determine if it is already in the versionExceptionHash */ + QString directory = directoryTree->currentItem()->data(0, Qt::UserRole).toString(); + Qt::CheckState dirState = directoryTree->currentItem()->checkState(0); + QString file = fileTableItem->text(); + QString fullPath = directory + file; + int thisJobNum = colZeroItem->text().toInt(); + int hashJobNum = m_versionExceptionHash.value(fullPath, 0); + + if (mainWin->m_rtVerTabICDebug) { + QString msg = QString("versrow=%1 prev=%2 cur=%3 def=%4 dir=%5 hashJobNum=%6 thisJobNum=%7 filestate=%8 fec=%9 vec=%10\n") + .arg(row).arg(prevState).arg(curState).arg(defState).arg(dirState).arg(hashJobNum).arg(thisJobNum).arg(fileState) + .arg(m_fileExceptionHash.count()).arg(m_versionExceptionHash.count()); + Pmsg1(000, "%s", msg.toUtf8().data()); } + /* if changed from partially checked to checked, make it unchecked */ + if ((curState == Qt::Checked) && (row == 0) && (fileState == Qt::Unchecked)) { + if (mainWin->m_rtVerTabICDebug) Pmsg0(000, "Setting to Qt::Checked\n"); + fileTableItem->setCheckState(Qt::Checked); + } else if ((prevState == Qt::PartiallyChecked) && (curState == Qt::Checked) && (row == 0) && (fileState == Qt::Checked) && (dirState == Qt::Unchecked)) { + //versrow=0 prev=1 cur=2 def=1 dir=0 hashJobNum=0 thisJobNum=64 filestate=2 fec=1 vec=0 + if (mainWin->m_rtVerTabICDebug) Pmsg1(000, "fileExceptionRemove %s, %i\n", fullPath.toUtf8().data()); + fileExceptionRemove(fullPath, directory); + } else if ((curState == Qt::Checked) && (row == 0) && (hashJobNum != 0) && (dirState != Qt::Unchecked)) { + //versrow=0 prev=0 cur=2 def=1 dir=2 hashJobNum=53 thisJobNum=64 filestate=2 fec=1 vec=1 + if (mainWin->m_rtVerTabICDebug) Pmsg1(000, "m_versionExceptionHash.remove %s\n", fullPath.toUtf8().data()); + m_versionExceptionHash.remove(fullPath); + fileExceptionRemove(fullPath, directory); + } else if ((curState == Qt::Checked) && (row == 0)) { + if (mainWin->m_rtVerTabICDebug) Pmsg1(000, "m_versionExceptionHash.remove %s\n", fullPath.toUtf8().data()); + m_versionExceptionHash.remove(fullPath); + } else if (prevState != curState) { + if (mainWin->m_rtVerTabICDebug) Pmsg2(000, " THE STATE OF THE version Check has changed, Setting StateList[%i] to %i\n", row, curState); + if ((curState == Qt::Checked) || (curState == Qt::PartiallyChecked)) { + if (mainWin->m_rtVerTabICDebug) Pmsg2(000, "Inserting into m_versionExceptionHash %s, %i\n", fullPath.toUtf8().data(), thisJobNum); + m_versionExceptionHash.insert(fullPath, thisJobNum); + if (fileState != Qt::Checked) { + if (mainWin->m_rtVerTabICDebug) Pmsg2(000, "Inserting into m_fileExceptionHash %s, %i\n", fullPath.toUtf8().data(), curState); + fileExceptionInsert(fullPath, directory, curState); + } + } else { + if (mainWin->m_rtVerTabICDebug) Pmsg0(000, "got here\n"); + } + } else { + if (mainWin->m_rtVerTabICDebug) Pmsg0(000, "no conditions met\n"); + } + + updateFileTableChecks(); + updateVersionTableChecks(); +} + +/* + * Simple function to set the check state in the file table by disconnecting the + * signal/slot the setting then reconnecting the signal/slot + */ +void restoreTree::fileTableDisconnectedSet(QTableWidgetItem *item, Qt::CheckState state, bool color) +{ + disconnect(fileTable, SIGNAL(itemChanged(QTableWidgetItem *)), + this, SLOT(fileTableItemChanged(QTableWidgetItem *))); + item->setCheckState(state); + if (color) item->setBackground(Qt::yellow); + else item->setBackground(Qt::white); + connect(fileTable, SIGNAL(itemChanged(QTableWidgetItem *)), + this, SLOT(fileTableItemChanged(QTableWidgetItem *))); +} + +/* + * Simple function to set the check state in the version table by disconnecting the + * signal/slot the setting then reconnecting the signal/slot + */ +void restoreTree::versionTableDisconnectedSet(QTableWidgetItem *item, Qt::CheckState state) +{ + disconnect(versionTable, SIGNAL(itemChanged(QTableWidgetItem *)), + this, SLOT(versionTableItemChanged(QTableWidgetItem *))); + item->setCheckState(state); + connect(versionTable, SIGNAL(itemChanged(QTableWidgetItem *)), + this, SLOT(versionTableItemChanged(QTableWidgetItem *))); +} + +/* + * Simple function to set the check state in the directory tree by disconnecting the + * signal/slot the setting then reconnecting the signal/slot + */ +void restoreTree::directoryTreeDisconnectedSet(QTreeWidgetItem *item, Qt::CheckState state) +{ + disconnect(directoryTree, SIGNAL(itemChanged(QTreeWidgetItem *, int)), + this, SLOT(directoryItemChanged(QTreeWidgetItem *, int))); + item->setCheckState(0, state); + connect(directoryTree, SIGNAL(itemChanged(QTreeWidgetItem *, int)), + this, SLOT(directoryItemChanged(QTreeWidgetItem *, int))); +} + +/* + * Simplify the updating of the check state in the File table by iterating through + * each item in the file table to determine it's appropriate state. + * !! Will probably want to concoct a way to do this without iterating for the possibility + * of the very large directories. + */ +void restoreTree::updateFileTableChecks() +{ + /* deterimine the default state from the state of the directory */ + QTreeWidgetItem *dirTreeItem = directoryTree->currentItem(); + Qt::CheckState dirState = dirTreeItem->checkState(0); + + QString dirName = dirTreeItem->data(0, Qt::UserRole).toString(); + + /* Update the items in the version table */ + int rcnt = fileTable->rowCount(); + for (int row=0; rowitem(row, 0); + if (!item) { return; } + + Qt::CheckState curState = item->checkState(); + Qt::CheckState newState = Qt::PartiallyChecked; + if (dirState == Qt::Unchecked) newState = Qt::Unchecked; + + /* determine if it is already in the m_fileExceptionHash */ + QString file = item->text(); + QString fullPath = dirName + file; + Qt::CheckState hashState = m_fileExceptionHash.value(fullPath, (Qt::CheckState)3); + int hashJobNum = m_versionExceptionHash.value(fullPath, 0); + + if (hashState != 3) newState = hashState; + + if (mainWin->m_rtUpdateFTDebug) { + QString msg = QString("file row=%1 cur=%2 hash=%3 new=%4 dirState=%5\n") + .arg(row).arg(curState).arg(hashState).arg(newState).arg(dirState); + Pmsg1(000, "%s", msg.toUtf8().data()); + } + + bool docolor = false; + if (hashJobNum != 0) docolor = true; + bool isyellow = item->background().color() == QColor(Qt::yellow); + if ((newState != curState) || (hashState == 3) || ((isyellow && !docolor) || (!isyellow && docolor))) + fileTableDisconnectedSet(item, newState, docolor); + m_fileCheckStateList[row] = newState; + } +} + +/* + * Simplify the updating of the check state in the Version table by iterating through + * each item in the file table to determine it's appropriate state. + */ +void restoreTree::updateVersionTableChecks() +{ + /* deterimine the default state from the state of the directory */ + QTreeWidgetItem *dirTreeItem = directoryTree->currentItem(); + Qt::CheckState dirState = dirTreeItem->checkState(0); + QString dirName = dirTreeItem->data(0, Qt::UserRole).toString(); + + /* deterimine the default state from the state of the file */ + QTableWidgetItem *fileTableItem = fileTable->item(fileTable->currentRow(), 0); + if (!fileTableItem) { return; } + Qt::CheckState fileState = fileTableItem->checkState(); + QString file = fileTableItem->text(); + QString fullPath = dirName + file; + int hashJobNum = m_versionExceptionHash.value(fullPath, 0); + + /* Update the items in the version table */ + int cnt = versionTable->rowCount(); + for (int row=0; rowitem(row, 0); + if (!item) { break; } + + Qt::CheckState curState = item->checkState(); + Qt::CheckState newState = Qt::Unchecked; + + if ((row == 0) && (fileState != Qt::Unchecked) && (hashJobNum == 0)) + newState = Qt::PartiallyChecked; + /* determine if it is already in the versionExceptionHash */ + if (hashJobNum) { + int thisJobNum = item->text().toInt(); + if (thisJobNum == hashJobNum) + newState = Qt::Checked; + } + if (mainWin->m_rtChecksDebug) { + QString msg = QString("ver row=%1 cur=%2 hashJobNum=%3 new=%4 dirState=%5\n") + .arg(row).arg(curState).arg(hashJobNum).arg(newState).arg(dirState); + Pmsg1(000, "%s", msg.toUtf8().data()); + } + if (newState != curState) + versionTableDisconnectedSet(item, newState); + m_versionCheckStateList[row] = newState; + } +} + +/* + * Quick subroutine to "return via subPaths" a list of subpaths when passed a fullPath + */ +void restoreTree::fullPathtoSubPaths(QStringList &subPaths, QString &fullPath_in) +{ + int index; + bool done = false; + QString fullPath = fullPath_in; + QString direct, path; + while (((index = fullPath.lastIndexOf("/", -2)) != -1) && (!done)) { + direct = path = fullPath; + path.replace(index+1, fullPath.length()-index-1, ""); + direct.replace(0, index+1, ""); + if (false) { + QString msg = QString("length = \"%1\" index = \"%2\" Considering \"%3\" \"%4\"\n") + .arg(fullPath.length()).arg(index).arg(path).arg(direct); + Pmsg0(000, msg.toUtf8().data()); + } + fullPath = path; + subPaths.append(fullPath); + } +} + +/* + * A Function to set the icon state and insert a record into + * m_directoryIconStateHash when an exception is added by the user + */ +void restoreTree::directoryIconStateInsert(QString &fullPath, Qt::CheckState excpState) +{ + QStringList paths; + fullPathtoSubPaths(paths, fullPath); + /* an exception that causes the item in the file table to be "Checked" has occured */ + if (excpState == Qt::Checked) { + bool foundAsUnChecked = false; + QTreeWidgetItem *firstItem = m_dirPaths.value(paths[0]); + if (firstItem) { + if (firstItem->checkState(0) == Qt::Unchecked) + foundAsUnChecked = true; + } + if (foundAsUnChecked) { + /* as long as directory item is Unchecked, set icon state to "green check" */ + bool done = false; + QListIterator siter(paths); + while (siter.hasNext() && !done) { + QString path = siter.next(); + QTreeWidgetItem *item = m_dirPaths.value(path); + if (item) { + if (item->checkState(0) != Qt::Unchecked) + done = true; + else { + directorySetIcon(1, FolderGreenChecked, path, item); + if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert inserting %s\n", path.toUtf8().data()); + } + } + } + } else { + /* if it is partially checked or fully checked insert green Check until a unchecked is found in the path */ + if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert Aqua %s\n", paths[0].toUtf8().data()); + bool done = false; + QListIterator siter(paths); + while (siter.hasNext() && !done) { + QString path = siter.next(); + QTreeWidgetItem *item = m_dirPaths.value(path); + if (item) { /* if the directory item is checked, set icon state to unchecked "green check" */ + if (item->checkState(0) == Qt::Checked) + done = true; + directorySetIcon(1, FolderGreenChecked, path, item); + if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert boogie %s\n", path.toUtf8().data()); + } + } + } + } + /* an exception that causes the item in the file table to be "Unchecked" has occured */ + if (excpState == Qt::Unchecked) { + bool done = false; + QListIterator siter(paths); + while (siter.hasNext() && !done) { + QString path = siter.next(); + QTreeWidgetItem *item = m_dirPaths.value(path); + if (item) { /* if the directory item is checked, set icon state to unchecked "white check" */ + if (item->checkState(0) == Qt::Checked) + done = true; + directorySetIcon(1, FolderWhiteChecked, path, item); + if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert boogie %s\n", path.toUtf8().data()); + } + } + } +} + +/* + * A function to set the icon state back to "folder" and to remove a record from + * m_directoryIconStateHash when an exception is removed by a user. + */ +void restoreTree::directoryIconStateRemove() +{ + QHash shouldBeIconStateHash; + /* First determine all paths with icons that should be checked with m_fileExceptionHash */ + /* Use iterator tera to iterate through m_fileExceptionHash */ + QHashIterator tera(m_fileExceptionHash); + while (tera.hasNext()) { + tera.next(); + if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Alpha Key %s value %i\n", tera.key().toUtf8().data(), tera.value()); + + QString keyPath = tera.key(); + Qt::CheckState state = tera.value(); + + QStringList paths; + fullPathtoSubPaths(paths, keyPath); + /* if the state of the item in m_fileExceptionHash is checked + * each of the subpaths should be "Checked Green" */ + if (state == Qt::Checked) { + + bool foundAsUnChecked = false; + QTreeWidgetItem *firstItem = m_dirPaths.value(paths[0]); + if (firstItem) { + if (firstItem->checkState(0) == Qt::Unchecked) + foundAsUnChecked = true; + } + if (foundAsUnChecked) { + /* The right most directory is Unchecked, iterate leftwards + * as long as directory item is Unchecked, set icon state to "green check" */ + bool done = false; + QListIterator siter(paths); + while (siter.hasNext() && !done) { + QString path = siter.next(); + QTreeWidgetItem *item = m_dirPaths.value(path); + if (item) { + if (item->checkState(0) != Qt::Unchecked) + done = true; + else { + shouldBeIconStateHash.insert(path, FolderGreenChecked); + if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert inserting %s\n", path.toUtf8().data()); + } + } + } + } + else { + /* The right most directory is Unchecked, iterate leftwards + * until directory item is Checked, set icon state to "green check" */ + bool done = false; + QListIterator siter(paths); + while (siter.hasNext() && !done) { + QString path = siter.next(); + QTreeWidgetItem *item = m_dirPaths.value(path); + if (item) { + if (item->checkState(0) == Qt::Checked) + done = true; + shouldBeIconStateHash.insert(path, FolderGreenChecked); + } + } + } + } + /* if the state of the item in m_fileExceptionHash is UNChecked + * each of the subpaths should be "Checked white" until the tree item + * which represents that path is Qt::Checked */ + if (state == Qt::Unchecked) { + bool done = false; + QListIterator siter(paths); + while (siter.hasNext() && !done) { + QString path = siter.next(); + QTreeWidgetItem *item = m_dirPaths.value(path); + if (item) { + if (item->checkState(0) == Qt::Checked) + done = true; + shouldBeIconStateHash.insert(path, FolderWhiteChecked); + } + } + } + } + /* now iterate through m_directoryIconStateHash which are the items that are checked + * and remove all of those that are not in shouldBeIconStateHash */ + QHashIterator iter(m_directoryIconStateHash); + while (iter.hasNext()) { + iter.next(); + if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Beta Key %s value %i\n", iter.key().toUtf8().data(), iter.value()); + + QString keyPath = iter.key(); + if (shouldBeIconStateHash.value(keyPath)) { + if (mainWin->m_rtIconStateDebug) Pmsg1(000, "WAS found in shouldBeStateHash %s\n", keyPath.toUtf8().data()); + //newval = m_directoryIconStateHash.value(path, FolderUnchecked) & (~change); + int newval = shouldBeIconStateHash.value(keyPath); + newval = ~newval; + newval = newval & FolderBothChecked; + QTreeWidgetItem *item = m_dirPaths.value(keyPath); + if (item) + directorySetIcon(0, newval, keyPath, item); + } else { + if (mainWin->m_rtIconStateDebug) Pmsg1(000, "NOT found in shouldBeStateHash %s\n", keyPath.toUtf8().data()); + QTreeWidgetItem *item = m_dirPaths.value(keyPath); + if (item) + directorySetIcon(0, FolderBothChecked, keyPath, item); + //item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); + //m_directoryIconStateHash.remove(keyPath); + } + } +} + +void restoreTree::directorySetIcon(int operation, int change, QString &path, QTreeWidgetItem* item) { + int newval; + /* we are adding a check type white or green */ + if (operation > 0) { + /* get the old val and "bitwise OR" with the change */ + newval = m_directoryIconStateHash.value(path, FolderUnchecked) | change; + if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Inserting into m_directoryIconStateHash path=%s newval=%i\n", path.toUtf8().data(), newval); + m_directoryIconStateHash.insert(path, newval); + } else { + /* we are removing a check type white or green */ + newval = m_directoryIconStateHash.value(path, FolderUnchecked) & (~change); + if (newval == 0) { + if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Removing from m_directoryIconStateHash path=%s newval=%i\n", path.toUtf8().data(), newval); + m_directoryIconStateHash.remove(path); + } + else { + if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Inserting into m_directoryIconStateHash path=%s newval=%i\n", path.toUtf8().data(), newval); + m_directoryIconStateHash.insert(path, newval); + } + } + if (newval == FolderUnchecked) + item->setIcon(0, QIcon(QString::fromUtf8(":images/folder.png"))); + else if (newval == FolderGreenChecked) + item->setIcon(0, QIcon(QString::fromUtf8(":images/folderchecked.png"))); + else if (newval == FolderWhiteChecked) + item->setIcon(0, QIcon(QString::fromUtf8(":images/folderunchecked.png"))); + else if (newval == FolderBothChecked) + item->setIcon(0, QIcon(QString::fromUtf8(":images/folderbothchecked.png"))); +} + +/* + * Restore Button + */ +void restoreTree::restoreButtonPushed() +{ + /* Set progress bars and repaint */ + prLabel1->setVisible(true); + prLabel1->setText(tr("Task 1 of 3")); + prLabel2->setVisible(true); + prLabel2->setText(tr("Processing Checked directories")); + prBar1->setVisible(true); + prBar1->setRange(0, 3); + prBar1->setValue(0); + prBar2->setVisible(true); + prBar2->setRange(0, 0); + repaint(); + QMultiHash versionFilesMulti; + int vFMCounter = 0; + QHash fullPathDone; + QHash fileIndexHash; + if ((mainWin->m_rtRestore1Debug) || (mainWin->m_rtRestore2Debug) || (mainWin->m_rtRestore3Debug)) + Pmsg0(000, "In restoreTree::restoreButtonPushed\n"); + /* Use a tree widget item iterator to count directories for the progress bar */ + QTreeWidgetItemIterator diterc(directoryTree, QTreeWidgetItemIterator::Checked); + int ditcount = 0; + while (*diterc) { + ditcount += 1; + ++diterc; + } /* while (*diterc) */ + prBar2->setRange(0, ditcount); + prBar2->setValue(0); + ditcount = 0; + /* Use a tree widget item iterator filtering for Checked Items */ + QTreeWidgetItemIterator diter(directoryTree, QTreeWidgetItemIterator::Checked); + while (*diter) { + QString directory = (*diter)->data(0, Qt::UserRole).toString(); + int pathid = m_directoryPathIdHash.value(directory, -1); + if (pathid != -1) { + if (mainWin->m_rtRestore1Debug) + Pmsg1(000, "Directory Checked=\"%s\"\n", directory.toUtf8().data()); + /* With a checked directory, query for the files in the directory */ + + QString cmd = + "SELECT Filename.Name AS Filename, t1.JobId AS JobId, File.FileIndex AS FileIndex" + " FROM" + " ( SELECT File.FilenameId AS FilenameId, MAX(Job.JobId) AS JobId" + " FROM File" + " INNER JOIN Job ON (Job.JobId=File.JobId)" + " WHERE File.PathId=" + QString("%1").arg(pathid) + + " AND Job.Jobid IN (" + m_checkedJobs + ")" + " GROUP BY File.FilenameId" + ") t1, File " + " INNER JOIN Filename on (Filename.FilenameId=File.FilenameId)" + " INNER JOIN Job ON (Job.JobId=File.JobId)" + " WHERE File.PathId=" + QString("%1").arg(pathid) + + " AND File.FilenameId=t1.FilenameId" + " AND Job.Jobid=t1.JobId" + " ORDER BY Filename"; + + if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); + QStringList results; + if (m_console->sql_cmd(cmd, results)) { + QStringList fieldlist; + + int row = 0; + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + /* Iterate through fields in the record */ + int column = 0; + QString fullPath = ""; + Qt::CheckState fileExcpState = (Qt::CheckState)4; + fieldlist = resultline.split("\t"); + int version = 0; + int fileIndex = 0; + foreach (QString field, fieldlist) { + if (column == 0) { + fullPath = directory + field; + } + if (column == 1) { + version = field.toInt(); + } + if (column == 2) { + fileIndex = field.toInt(); + } + column++; + } + fileExcpState = m_fileExceptionHash.value(fullPath, (Qt::CheckState)3); + + int excpVersion = m_versionExceptionHash.value(fullPath, 0); + if (fileExcpState != Qt::Unchecked) { + QString debugtext; + if (excpVersion != 0) { + debugtext = QString("*E* version=%1").arg(excpVersion); + version = excpVersion; + fileIndex = queryFileIndex(fullPath, excpVersion); + } else + debugtext = QString("___ version=%1").arg(version); + if (mainWin->m_rtRestore1Debug) + Pmsg2(000, "Restoring %s File %s\n", debugtext.toUtf8().data(), fullPath.toUtf8().data()); + fullPathDone.insert(fullPath, 1); + fileIndexHash.insert(fullPath, fileIndex); + versionFilesMulti.insert(version, fullPath); + vFMCounter += 1; + } + row++; + } + } + } + ditcount += 1; + prBar2->setValue(ditcount); + ++diter; + } /* while (*diter) */ + prBar1->setValue(1); + prLabel1->setText( tr("Task 2 of 3")); + prLabel2->setText(tr("Processing Exceptions")); + prBar2->setRange(0, 0); + repaint(); + + /* There may be some exceptions not accounted for yet with fullPathDone */ + QHashIterator ftera(m_fileExceptionHash); + while (ftera.hasNext()) { + ftera.next(); + QString fullPath = ftera.key(); + Qt::CheckState state = ftera.value(); + if (state != 0) { + /* now we don't want the ones already done */ + if (fullPathDone.value(fullPath, 0) == 0) { + int version = m_versionExceptionHash.value(fullPath, 0); + int fileIndex = 0; + QString debugtext = ""; + if (version != 0) { + fileIndex = queryFileIndex(fullPath, version); + debugtext = QString("E1* version=%1 fileid=%2").arg(version).arg(fileIndex); + } else { + version = mostRecentVersionfromFullPath(fullPath); + if (version) { + fileIndex = queryFileIndex(fullPath, version); + debugtext = QString("E2* version=%1 fileid=%2").arg(version).arg(fileIndex); + } else + debugtext = QString("Error det vers").arg(version); + } + if (mainWin->m_rtRestore1Debug) + Pmsg2(000, "Restoring %s file %s\n", debugtext.toUtf8().data(), fullPath.toUtf8().data()); + versionFilesMulti.insert(version, fullPath); + vFMCounter += 1; + fileIndexHash.insert(fullPath, fileIndex); + } /* if fullPathDone.value(fullPath, 0) == 0 */ + } /* if state != 0 */ + } /* while ftera.hasNext */ + /* The progress bars for the next step */ + prBar1->setValue(2); + prLabel1->setText(tr("Task 3 of 3")); + prLabel2->setText(tr("Filling Database Table")); + prBar2->setRange(0, vFMCounter); + vFMCounter = 0; + prBar2->setValue(vFMCounter); + repaint(); + + /* now for the final spit out of the versions and lists of files for each version */ + QHash doneKeys; + QHashIterator vFMiter(versionFilesMulti); + QString tempTable = ""; + QList jobList; + while (vFMiter.hasNext()) { + vFMiter.next(); + int fversion = vFMiter.key(); + /* did not succeed in getting an iterator to work as expected on versionFilesMulti so use doneKeys */ + if (doneKeys.value(fversion, 0) == 0) { + if (tempTable == "") { + QSettings settings("www.bacula.org", "bat"); + settings.beginGroup("Restore"); + int counter = settings.value("Counter", 1).toInt(); + settings.setValue("Counter", counter+1); + settings.endGroup(); + tempTable = "restore_" + QString("%1").arg(qrand()) + "_" + QString("%1").arg(counter); + QString sqlcmd = "CREATE TEMPORARY TABLE " + tempTable + " (JobId INTEGER, FileIndex INTEGER)"; + if (mainWin->m_sqlDebug) + Pmsg1(000, "Query cmd : %s ;\n", sqlcmd.toUtf8().data()); + QStringList results; + if (!m_console->sql_cmd(sqlcmd, results)) + Pmsg1(000, "CREATE TABLE FAILED!!!! %s\n", sqlcmd.toUtf8().data()); + } + + if (mainWin->m_rtRestore2Debug) Pmsg1(000, "Version->%i\n", fversion); + QStringList fullPathList = versionFilesMulti.values(fversion); + /* create the command to perform the restore */ + foreach(QString ffullPath, fullPathList) { + int fileIndex = fileIndexHash.value(ffullPath); + if (mainWin->m_rtRestore2Debug) Pmsg2(000, " file->%s id %i\n", ffullPath.toUtf8().data(), fileIndex); + QString sqlcmd = "INSERT INTO " + tempTable + " (JobId, FileIndex) VALUES (" + QString("%1").arg(fversion) + ", " + QString("%1").arg(fileIndex) + ")"; + if (mainWin->m_rtRestore3Debug) + Pmsg1(000, "Insert cmd : %s\n", sqlcmd.toUtf8().data()); + QStringList results; + if (!m_console->sql_cmd(sqlcmd, results)) + Pmsg1(000, "INSERT INTO FAILED!!!! %s\n", sqlcmd.toUtf8().data()); + prBar2->setValue(++vFMCounter); + } /* foreach fullPathList */ + doneKeys.insert(fversion,1); + jobList.append(fversion); + } /* if (doneKeys.value(fversion, 0) == 0) */ + } /* while (vFMiter.hasNext()) */ + if (tempTable != "") { + /* a table was made, lets run the job */ + QString jobOption = " jobid=\""; + bool first = true; + /* create a list of jobs comma separated */ + foreach (int job, jobList) { + if (first) first = false; + else jobOption += ","; + jobOption += QString("%1").arg(job); + } + jobOption += "\""; + QString cmd = QString("restore"); + cmd += jobOption + + " client=\"" + m_prevClientCombo + "\"" + + " file=\"?" + tempTable + "\" done"; + if (mainWin->m_commandDebug) + Pmsg1(000, "preRestore command \'%s\'\n", cmd.toUtf8().data()); + consoleCommand(cmd); + } + /* turn off the progress widgets */ + prBar1->setVisible(false); + prBar2->setVisible(false); + prLabel1->setVisible(false); + prLabel2->setVisible(false); +} + +int restoreTree::mostRecentVersionfromFullPath(QString &fullPath) +{ + int qversion = 0; + QString directory, fileName; + int index = fullPath.lastIndexOf("/", -2); + if (index != -1) { + directory = fileName = fullPath; + directory.replace(index+1, fullPath.length()-index-1, ""); + fileName.replace(0, index+1, ""); + if (false) { + QString msg = QString("length = \"%1\" index = \"%2\" Considering \"%3\" \"%4\"\n") + .arg(fullPath.length()).arg(index).arg(fileName).arg(directory); + Pmsg0(000, msg.toUtf8().data()); + } + int pathid = m_directoryPathIdHash.value(directory, -1); + if (pathid != -1) { + /* so now we need the latest version from the database */ + QString cmd = + "SELECT MAX(Job.JobId)" + " FROM File " + " INNER JOIN Filename on (Filename.FilenameId=File.FilenameId)" + " INNER JOIN Job ON (File.JobId=Job.JobId)" + " WHERE File.PathId=" + QString("%1").arg(pathid) + + " AND Job.Jobid IN (" + m_checkedJobs + ")" + " AND Filename.Name='" + fileName + "'" + " AND File.FilenameId!=" + QString("%1").arg(m_nullFileNameId) + + " GROUP BY Filename.Name"; + + if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); + QStringList results; + if (m_console->sql_cmd(cmd, results)) { + QStringList fieldlist; + int row = 0; + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + /* Iterate through fields in the record */ + int column = 0; + fieldlist = resultline.split("\t"); + foreach (QString field, fieldlist) { + if (column == 0) { + qversion = field.toInt(); + } + column++; + } + row++; + } + } + } + } /* if (index != -1) */ + return qversion; +} + + +int restoreTree::queryFileIndex(QString &fullPath, int jobId) +{ + int qfileIndex = 0; + QString directory, fileName; + int index = fullPath.lastIndexOf("/", -2); + if (mainWin->m_sqlDebug) Pmsg1(000, "Index=%d\n", index); + if (index != -1) { + directory = fileName = fullPath; + directory.replace(index+1, fullPath.length()-index-1, ""); + fileName.replace(0, index+1, ""); + if (false) { + QString msg = QString("length = \"%1\" index = \"%2\" Considering \"%3\" \"%4\"\n") + .arg(fullPath.length()).arg(index).arg(fileName).arg(directory); + Pmsg0(000, msg.toUtf8().data()); + } + int pathid = m_directoryPathIdHash.value(directory, -1); + if (pathid != -1) { + /* so now we need the latest version from the database */ + QString cmd = + "SELECT" + " File.FileIndex" + " FROM File" + " INNER JOIN Filename on (Filename.FilenameId=File.FilenameId)" + " INNER JOIN Job ON (File.JobId=Job.JobId)" + " WHERE File.PathId=" + QString("%1").arg(pathid) + + " AND Filename.Name='" + fileName + "'" + " AND Job.Jobid='" + QString("%1").arg(jobId) + "'" + " GROUP BY File.FileIndex"; + if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); + QStringList results; + if (m_console->sql_cmd(cmd, results)) { + QStringList fieldlist; + int row = 0; + /* Iterate through the record returned from the query */ + foreach (QString resultline, results) { + /* Iterate through fields in the record */ + int column = 0; + fieldlist = resultline.split("\t"); + foreach (QString field, fieldlist) { + if (column == 0) { + qfileIndex = field.toInt(); + } + column++; + } + row++; + } + } + } + } /* if (index != -1) */ + if (mainWin->m_sqlDebug) Pmsg1(000, "qfileIndex=%d\n", qfileIndex); + return qfileIndex; +} + + +void restoreTree::PgSeltreeWidgetClicked() +{ + if (!isOnceDocked()) { + dockPage(); + } +} diff --git a/src/qt-console/restore/restoretree.h b/src/qt-console/restore/restoretree.h new file mode 100644 index 00000000..3bbef589 --- /dev/null +++ b/src/qt-console/restore/restoretree.h @@ -0,0 +1,115 @@ +#ifndef _RESTORETREE_H_ +#define _RESTORETREE_H_ + +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, February 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "pages.h" +#include "ui_restoretree.h" + + +/* + * A restore tree to view files in the catalog + */ +class restoreTree : public Pages, public Ui::restoreTreeForm +{ + Q_OBJECT + +public: + restoreTree(); + ~restoreTree(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + enum folderCheckState + { + FolderUnchecked = 0, + FolderGreenChecked = 1, + FolderWhiteChecked = 2, + FolderBothChecked = 3 + }; + +private slots: + void refreshButtonPushed(); + void restoreButtonPushed(); + void jobComboChanged(int); + void directoryCurrentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); + void fileCurrentItemChanged(QTableWidgetItem *,QTableWidgetItem *); + void directoryItemExpanded(QTreeWidgetItem *); + void setCheckofChildren(QTreeWidgetItem *item, Qt::CheckState); + void directoryItemChanged(QTreeWidgetItem *, int); + void fileTableItemChanged(QTableWidgetItem *); + void versionTableItemChanged(QTableWidgetItem *); + void updateRefresh(); + void jobTableCellClicked(int, int); + +private: + void populateDirectoryTree(); + void populateJobTable(); + void parseDirectory(const QString &dir_in); + void setupPage(); + void writeSettings(); + void readSettings(); + void fileExceptionInsert(QString &, QString &, Qt::CheckState); + void fileExceptionRemove(QString &, QString &); + void directoryTreeDisconnectedSet(QTreeWidgetItem *, Qt::CheckState); + void fileTableDisconnectedSet(QTableWidgetItem *, Qt::CheckState, bool color); + void versionTableDisconnectedSet(QTableWidgetItem *, Qt::CheckState); + void updateFileTableChecks(); + void updateVersionTableChecks(); + void directoryIconStateInsert(QString &, Qt::CheckState); + void directoryIconStateRemove(); + void directorySetIcon(int operation, int change, QString &, QTreeWidgetItem* item); + void fullPathtoSubPaths(QStringList &, QString &); + int mostRecentVersionfromFullPath(QString &); + void setJobsCheckedList(); + int queryFileIndex(QString &fullPath, int jobID); + + QSplitter *m_splitter; + QString m_groupText; + QString m_splitText1; + QString m_splitText2; + bool m_populated; + bool m_dropdownChanged; + bool m_slashTrap; + QHash m_dirPaths; + QString m_checkedJobs, m_prevJobCombo, m_prevClientCombo, m_prevFileSetCombo; + int m_prevLimitSpinBox, m_prevDaysSpinBox; + Qt::CheckState m_prevLimitCheckState, m_prevDaysCheckState; + QString m_JobsCheckedList; + int m_debugCnt; + bool m_debugTrap; + QList m_fileCheckStateList; + QList m_versionCheckStateList; + QHash m_fileExceptionHash; + QMultiHash m_fileExceptionMulti; + QHash m_versionExceptionHash; + QHash m_directoryIconStateHash; + QHash m_directoryPathIdHash; + int m_toggleUpIndex, m_toggleDownIndex, m_nullFileNameId; +}; + +#endif /* _RESTORETREE_H_ */ diff --git a/src/qt-console/restore/restoretree.ui b/src/qt-console/restore/restoretree.ui new file mode 100644 index 00000000..77f1affd --- /dev/null +++ b/src/qt-console/restore/restoretree.ui @@ -0,0 +1,428 @@ + + + restoreTreeForm + + + + 0 + 0 + 695 + 432 + + + + Form + + + + + 120 + 190 + 382 + 221 + + + + Qt::Horizontal + + + + + 6 + + + 0 + + + + + Jobs + + + Qt::AlignCenter + + + + + + + + + + TextLabel + + + Qt::AlignCenter + + + + + + + true + + + 24 + + + Qt::Horizontal + + + + + + + TextLabel + + + Qt::AlignCenter + + + + + + + true + + + 24 + + + Qt::Horizontal + + + + + + + + + 1 + + + + + + + 6 + + + 0 + + + + + Files + + + Qt::AlignCenter + + + + + + + + + + + + 6 + + + 0 + + + + + Versions of File + + + Qt::AlignCenter + + + + + + + FileName + + + Qt::AlignCenter + + + + + + + + + + + + + 20 + 59 + 669 + 95 + + + + + 900 + 95 + + + + + 3 + + + 2 + + + + + 4 + + + 2 + + + + + 2 + + + 3 + + + + + Refresh + + + + :/images/view-refresh.png:/images/view-refresh.png + + + + + + + TextLabel + + + + + + + Restore + + + + :/images/restore.png:/images/restore.png + + + + + + + + + 3 + + + 2 + + + + + Job + + + + + + + + 0 + 0 + + + + Job List Job Criterion Selector + + + Job List Job Criterion Selector + + + + + + + + + 3 + + + 2 + + + + + Client + + + + + + + + 0 + 0 + + + + Job List Client Criterion Selector + + + Job List Client Criterion Selector + + + + + + + + + 3 + + + 2 + + + + + FileSet + + + + + + + + 0 + 0 + + + + Job List Fileset Criterion Selector + + + Job List Fileset Criterion Selector + + + + + + + + + 3 + + + 2 + + + + + Record Limit + + + + + + + 1 + + + 10000 + + + 25 + + + + + + + + + 3 + + + 2 + + + + + Days Limit + + + + + + + 7 + + + + + + + + + + + + 0 + 0 + + + + PointingHandCursor + + + Directory + + + + + + + + + + + + + Select Directory + + + + + + + + + + UnselectDirectory + + + + + + + + diff --git a/src/qt-console/restore/runrestore.ui b/src/qt-console/restore/runrestore.ui new file mode 100644 index 00000000..20ff1609 --- /dev/null +++ b/src/qt-console/restore/runrestore.ui @@ -0,0 +1,366 @@ + + + bRunRestoreForm + + + + 0 + 0 + 385 + 438 + + + + Run restore + + + + + + 0 + + + + Standard + + + + + + Restore options + + + + QFormLayout::ExpandingFieldsGrow + + + + + Client: + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + + + Where: + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + + + Replace: + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + + + Comment: + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + + + + + + Media needed + + + + + + false + + + Qt::NoPen + + + + InChanger + + + + + Volume + + + + + + + + Compute with directories + + + false + + + + + + + + + + + Advanced + + + + + + File Relocation + + + + + + Use file +relocation: + + + + + + + + + + + + + + Strip prefix: + + + + + + + false + + + + + + + Add prefix: + + + + + + + false + + + + + + + Add suffix: + + + + + + + false + + + + + + + Use regexp: + + + + + + + false + + + + + + + + + + Where +regexp: + + + + + + + false + + + + + + + + + + Other options + + + + + + When: + + + + + + + Priority: + + + + + + + 10 + + + + + + + Storage: + + + + + + + + + + + + + true + + + + + + + Job: + + + + + + + + + + + + + + + + + Qt::Horizontal + + + QDialogButtonBox::Cancel|QDialogButtonBox::Ok + + + + + + + + + ActionBp + accepted() + bRunRestoreForm + accept() + + + 222 + 422 + + + 157 + 274 + + + + + ActionBp + rejected() + bRunRestoreForm + reject() + + + 290 + 428 + + + 286 + 274 + + + + + UseFileRelocationChk + clicked(bool) + WhereEntry + setDisabled(bool) + + + 107 + 72 + + + 225 + 102 + + + + + diff --git a/src/qt-console/run/estimate.cpp b/src/qt-console/run/estimate.cpp new file mode 100644 index 00000000..8a31697f --- /dev/null +++ b/src/qt-console/run/estimate.cpp @@ -0,0 +1,114 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Run Dialog class + * + * Kern Sibbald, February MMVII + * + * $Id$ + */ + +#include "bat.h" +#include "run.h" + +/* + * Setup all the combo boxes and display the dialog + */ +estimatePage::estimatePage() : Pages() +{ + QDateTime dt; + + m_name = tr("Estimate"); + pgInitialize(); + setupUi(this); + m_conn = m_console->notifyOff(); + + m_console->beginNewCommand(m_conn); + jobCombo->addItems(m_console->job_list); + filesetCombo->addItems(m_console->fileset_list); + levelCombo->addItems(m_console->level_list); + clientCombo->addItems(m_console->client_list); + job_name_change(0); + connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(job_name_change(int))); + connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); + connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/estimate-job.png"))); + + dockPage(); + setCurrent(); + this->show(); + m_aButtonPushed = false; +} + +void estimatePage::okButtonPushed() +{ + if (m_aButtonPushed) return; + m_aButtonPushed = true; + this->hide(); + QString cmd; + QTextStream(&cmd) << "estimate" << + " job=\"" << jobCombo->currentText() << "\"" << + " fileset=\"" << filesetCombo->currentText() << "\"" << + " level=\"" << levelCombo->currentText() << "\"" << + " client=\"" << clientCombo->currentText() << "\""; + if (listingCheckBox->checkState() == Qt::Checked) { + cmd += " listing"; + } + + if (mainWin->m_commandDebug) { + Pmsg1(000, "command : %s\n", cmd.toUtf8().data()); + } + + consoleCommand(cmd, m_conn, true, true); + m_console->notify(m_conn, true); + closeStackPage(); + mainWin->resetFocus(); +} + + +void estimatePage::cancelButtonPushed() +{ + if (m_aButtonPushed) return; + m_aButtonPushed = true; + mainWin->set_status(" Canceled"); + this->hide(); + m_console->notify(m_conn, true); + closeStackPage(); + mainWin->resetFocus(); +} + +/* + * Called here when the jobname combo box is changed. + * We load the default values for the new job in the + * other combo boxes. + */ +void estimatePage::job_name_change(int index) +{ + job_defaults job_defs; + + (void)index; + job_defs.job_name = jobCombo->currentText(); + if (m_console->get_job_defaults(m_conn, job_defs)) { + filesetCombo->setCurrentIndex(filesetCombo->findText(job_defs.fileset_name, Qt::MatchExactly)); + levelCombo->setCurrentIndex(levelCombo->findText(job_defs.level, Qt::MatchExactly)); + clientCombo->setCurrentIndex(clientCombo->findText(job_defs.client_name, Qt::MatchExactly)); + } +} diff --git a/src/qt-console/run/estimate.ui b/src/qt-console/run/estimate.ui new file mode 100644 index 00000000..bc5e6382 --- /dev/null +++ b/src/qt-console/run/estimate.ui @@ -0,0 +1,281 @@ + + estimateForm + + + + 0 + 0 + 562 + 308 + + + + Form + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + 0 + + + 6 + + + + + Qt::RightToLeft + + + List Files + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + 0 + + + 6 + + + + + + + + Level: + + + levelCombo + + + + + + + + + + + + + Client: + + + clientCombo + + + + + + + + + + + 65 + 16777215 + + + + Job: + + + jobCombo + + + + + + + FileSet: + + + filesetCombo + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 71 + 21 + + + + + + + + + 16777215 + 30 + + + + <h3>Estimate a backup Job</h3> + + + + + + + Qt::Horizontal + + + + 81 + 20 + + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::Expanding + + + + 351 + 16 + + + + + + + + Qt::Vertical + + + QSizePolicy::Expanding + + + + 351 + 16 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + diff --git a/src/qt-console/run/prune.cpp b/src/qt-console/run/prune.cpp new file mode 100644 index 00000000..64955770 --- /dev/null +++ b/src/qt-console/run/prune.cpp @@ -0,0 +1,134 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Run Dialog class + * + * Kern Sibbald, February MMVII + * + * $Id$ + */ + +#include "bat.h" +#include "run.h" + +/* + * Setup all the combo boxes and display the dialog + */ +prunePage::prunePage(const QString &volume, const QString &client) : Pages() +{ + QDateTime dt; + + m_name = tr("Prune"); + pgInitialize(); + setupUi(this); + m_conn = m_console->notifyOff(); + + QString query("SELECT VolumeName AS Media FROM Media ORDER BY Media"); + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Query cmd : %s\n",query.toUtf8().data()); + } + QStringList results, volumeList; + if (m_console->sql_cmd(query, results)) { + QString field; + QStringList fieldlist; + /* Iterate through the lines of results. */ + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + volumeList.append(fieldlist[0]); + } /* foreach resultline */ + } /* if results from query */ + + volumeCombo->addItem(tr("Any")); + volumeCombo->addItems(volumeList); + clientCombo->addItem(tr("Any")); + clientCombo->addItems(m_console->client_list); + connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); + connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); + filesRadioButton->setChecked(true); + if (clientCombo->findText(client, Qt::MatchExactly) != -1) + clientCombo->setCurrentIndex(clientCombo->findText(client, Qt::MatchExactly)); + else + clientCombo->setCurrentIndex(0); + if (volumeCombo->findText(volume, Qt::MatchExactly) != -1) + volumeCombo->setCurrentIndex(volumeCombo->findText(volume, Qt::MatchExactly)); + else + volumeCombo->setCurrentIndex(0); + connect(volumeCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(volumeChanged())); + connect(clientCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(clientChanged())); + + dockPage(); + setCurrent(); + this->show(); +} + +void prunePage::okButtonPushed() +{ + this->hide(); + QString cmd("prune"); + if (filesRadioButton->isChecked()) { + cmd += " files"; + } + if (jobsRadioButton->isChecked()) { + cmd += " jobs"; + } + if (filesRadioButton->isChecked()) { + cmd += " volume"; + } + if (volumeCombo->currentText() != tr("Any")) { + cmd += " volume=\"" + volumeCombo->currentText() + "\""; + } + if (clientCombo->currentText() != tr("Any")) { + cmd += " client=\"" + clientCombo->currentText() + "\""; + } + cmd += " yes"; + + if (mainWin->m_commandDebug) { + Pmsg1(000, "command : %s\n", cmd.toUtf8().data()); + } + + consoleCommand(cmd); + m_console->notify(m_conn, true); + closeStackPage(); + mainWin->resetFocus(); +} + + +void prunePage::cancelButtonPushed() +{ + mainWin->set_status(tr(" Canceled")); + this->hide(); + m_console->notify(m_conn, true); + closeStackPage(); + mainWin->resetFocus(); +} + +void prunePage::volumeChanged() +{ + if ((volumeCombo->currentText() == tr("Any")) && (clientCombo->currentText() == tr("Any"))) { + clientCombo->setCurrentIndex(1); + } +} + +void prunePage::clientChanged() +{ + if ((volumeCombo->currentText() == tr("Any")) && (clientCombo->currentText() == tr("Any"))) { + volumeCombo->setCurrentIndex(1); + } +} diff --git a/src/qt-console/run/prune.ui b/src/qt-console/run/prune.ui new file mode 100644 index 00000000..15d151bf --- /dev/null +++ b/src/qt-console/run/prune.ui @@ -0,0 +1,302 @@ + + pruneForm + + + + 0 + 0 + 514 + 363 + + + + Form + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + 0 + + + 6 + + + + + Prune Files + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + Volume: + + + volumeCombo + + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 71 + 21 + + + + + + + + + 16777215 + 30 + + + + <h3>Prune Files/Jobs/Volumes</h3> + + + + + + + Qt::Horizontal + + + + 81 + 20 + + + + + + + + + + 0 + + + 6 + + + + + Prune Jobs + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + + + + + 65 + 16777215 + + + + Client: + + + clientCombo + + + + + + + 0 + + + 6 + + + + + Prune Volumes + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + Qt::Vertical + + + QSizePolicy::Expanding + + + + 351 + 16 + + + + + + + + Qt::Vertical + + + QSizePolicy::Expanding + + + + 351 + 16 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + diff --git a/src/qt-console/run/run.cpp b/src/qt-console/run/run.cpp new file mode 100644 index 00000000..c8199e5e --- /dev/null +++ b/src/qt-console/run/run.cpp @@ -0,0 +1,186 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Run Dialog class + * + * Kern Sibbald, February MMVII + * + */ + +#include "bat.h" +#include "run.h" + + +runPage::runPage() : Pages() +{ + init(); + show(); +} + +runPage::runPage(const QString &defJob) : Pages() +{ + m_dockOnFirstUse = false; + init(); + if (defJob != "") + jobCombo->setCurrentIndex(jobCombo->findText(defJob, Qt::MatchExactly)); + show(); +} + + +runPage::runPage(const QString &defJob, const QString &level, + const QString &pool, const QString &storage, + const QString &client, const QString &fileset) + : Pages() +{ + m_dockOnFirstUse = false; + init(); + jobCombo->setCurrentIndex(jobCombo->findText(defJob, Qt::MatchExactly)); + job_name_change(0); + filesetCombo->setCurrentIndex(filesetCombo->findText(fileset, + Qt::MatchExactly)); + levelCombo->setCurrentIndex(levelCombo->findText(level, Qt::MatchExactly)); + clientCombo->setCurrentIndex(clientCombo->findText(client,Qt::MatchExactly)); + poolCombo->setCurrentIndex(poolCombo->findText(pool, Qt::MatchExactly)); + + if (storage != "") { // TODO: enable storage + storageCombo->setCurrentIndex(storageCombo->findText(storage, + Qt::MatchExactly)); + } + show(); +} + + +/* + * Setup all the combo boxes and display the dialog + */ +void runPage::init() +{ + QDateTime dt; + QDesktopWidget *desk = QApplication::desktop(); + QRect scrn; + + m_name = tr("Run"); + pgInitialize(); + setupUi(this); + /* Get screen rectangle */ + scrn = desk->screenGeometry(desk->primaryScreen()); + /* Position this window in the middle of the screen */ + this->move((scrn.width()-this->width())/2, (scrn.height()-this->height())/2); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/run.png"))); + m_conn = m_console->notifyOff(); + + m_console->beginNewCommand(m_conn); + jobCombo->addItems(m_console->job_list); + filesetCombo->addItems(m_console->fileset_list); + levelCombo->addItems(m_console->level_list); + clientCombo->addItems(m_console->client_list); + poolCombo->addItems(m_console->pool_list); + storageCombo->addItems(m_console->storage_list); + dateTimeEdit->setDisplayFormat(mainWin->m_dtformat); + dateTimeEdit->setDateTime(dt.currentDateTime()); + /*printf("listing messages resources"); ***FIME *** + foreach(QString mes, m_console->messages_list) { + printf("%s\n", mes.toUtf8().data()); + }*/ + messagesCombo->addItems(m_console->messages_list); + messagesCombo->setEnabled(false); + job_name_change(0); + connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(job_name_change(int))); + connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); + connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); + + // find a way to place the new window at the cursor position + // or in the middle of the page +// dockPage(); + setCurrent(); +} + +void runPage::okButtonPushed() +{ + this->hide(); + QString cmd; + QTextStream(&cmd) << "run" << + " job=\"" << jobCombo->currentText() << "\"" << + " fileset=\"" << filesetCombo->currentText() << "\"" << + " level=\"" << levelCombo->currentText() << "\"" << + " client=\"" << clientCombo->currentText() << "\"" << + " pool=\"" << poolCombo->currentText() << "\"" << + " storage=\"" << storageCombo->currentText() << "\"" << + " priority=\"" << prioritySpin->value() << "\"" + " when=\"" << dateTimeEdit->dateTime().toString(mainWin->m_dtformat) << "\""; +#ifdef xxx + " messages=\"" << messagesCombo->currentText() << "\""; + /* FIXME when there is an option to modify the messages resoruce associated + * with a job */ +#endif + if (bootstrap->text() != "") { + cmd += " bootstrap=\"" + bootstrap->text() + "\""; + } + cmd += " yes"; + + if (mainWin->m_commandDebug) { + Pmsg1(000, "command : %s\n", cmd.toUtf8().data()); + } + + consoleCommand(cmd); + m_console->notify(m_conn, true); + closeStackPage(); + mainWin->resetFocus(); +} + + +void runPage::cancelButtonPushed() +{ + mainWin->set_status(tr(" Canceled")); + this->hide(); + m_console->notify(m_conn, true); + closeStackPage(); + mainWin->resetFocus(); +} + +/* + * Called here when the jobname combo box is changed. + * We load the default values for the new job in the + * other combo boxes. + */ +void runPage::job_name_change(int index) +{ + job_defaults job_defs; + + (void)index; + job_defs.job_name = jobCombo->currentText(); + if (m_console->get_job_defaults(job_defs)) { + QString cmd; + typeLabel->setText("

"+job_defs.type+"

"); + filesetCombo->setCurrentIndex(filesetCombo->findText(job_defs.fileset_name, Qt::MatchExactly)); + levelCombo->setCurrentIndex(levelCombo->findText(job_defs.level, Qt::MatchExactly)); + clientCombo->setCurrentIndex(clientCombo->findText(job_defs.client_name, Qt::MatchExactly)); + poolCombo->setCurrentIndex(poolCombo->findText(job_defs.pool_name, Qt::MatchExactly)); + storageCombo->setCurrentIndex(storageCombo->findText(job_defs.store_name, Qt::MatchExactly)); + messagesCombo->setCurrentIndex(messagesCombo->findText(job_defs.messages_name, Qt::MatchExactly)); + m_console->level_list.clear(); + cmd = ".levels " + job_defs.type; + m_console->dir_cmd(cmd, m_console->level_list); + levelCombo->clear(); + levelCombo->addItems(m_console->level_list); + levelCombo->setCurrentIndex(levelCombo->findText(job_defs.level, 0 /*Qt::MatchExactly*/)); + } +} diff --git a/src/qt-console/run/run.h b/src/qt-console/run/run.h new file mode 100644 index 00000000..edbbcd90 --- /dev/null +++ b/src/qt-console/run/run.h @@ -0,0 +1,110 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef _RUN_H_ +#define _RUN_H_ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_run.h" +#include "ui_runcmd.h" +#include "ui_estimate.h" +#include "ui_prune.h" +#include "console.h" + +class runPage : public Pages, public Ui::runForm +{ + Q_OBJECT + +public: + runPage(); + + runPage(const QString &defJob); + + runPage(const QString &defJob, + const QString &level, + const QString &pool, + const QString &storage, + const QString &client, + const QString &fileset); + +public slots: + void okButtonPushed(); + void cancelButtonPushed(); + void job_name_change(int index); + +private: + void init(); + int m_conn; +}; + +class runCmdPage : public Pages, public Ui::runCmdForm +{ + Q_OBJECT + +public: + runCmdPage(int conn); + +public slots: + void okButtonPushed(); + void cancelButtonPushed(); + +private: + void fill(); + int m_conn; +}; + +class estimatePage : public Pages, public Ui::estimateForm +{ + Q_OBJECT + +public: + estimatePage(); + +public slots: + void okButtonPushed(); + void cancelButtonPushed(); + void job_name_change(int index); + +private: + int m_conn; + bool m_aButtonPushed; +}; + +class prunePage : public Pages, public Ui::pruneForm +{ + Q_OBJECT + +public: + prunePage(const QString &volume, const QString &client); + +public slots: + void okButtonPushed(); + void cancelButtonPushed(); + void volumeChanged(); + void clientChanged(); + +private: + int m_conn; +}; + +#endif /* _RUN_H_ */ diff --git a/src/qt-console/run/run.ui b/src/qt-console/run/run.ui new file mode 100644 index 00000000..0fc3eae8 --- /dev/null +++ b/src/qt-console/run/run.ui @@ -0,0 +1,357 @@ + + runForm + + + + 0 + 0 + 594 + 415 + + + + + 0 + 0 + + + + Run job + + + + + + + 16777215 + 30 + + + + + 11 + + + + <h3>Run a Job</h3> + + + + + + + + 0 + 0 + + + + + 0 + 5 + + + + Qt::Horizontal + + + + + + + + + + + + :/images/runit.png + + + + + + + Job properties + + + + + + Job: + + + jobCombo + + + + + + + + + + 6 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + Type: + + + + + + + <h3>Backup<h3/> + + + + + + + + + Client: + + + clientCombo + + + + + + + + + + Priority: + + + prioritySpin + + + + + + + 1 + + + 10000 + + + 10 + + + + + + + Level: + + + levelCombo + + + + + + + + + + Qt::Vertical + + + + 56 + 151 + + + + + + + + FileSet: + + + filesetCombo + + + + + + + + + + Pool: + + + poolCombo + + + + + + + + + + Storage: + + + storageCombo + + + + + + + + + + Messages: + + + messagesCombo + + + + + + + + + + When: + + + dateTimeEdit + + + + + + + + 0 + 2 + 0 + 2000 + 1 + 1 + + + + yyyy-mm-dd hh:mm:ss + + + true + + + + + + + Bootstrap: + + + true + + + bootstrap + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + + + + + + + 0 + 0 + + + + + 0 + 5 + + + + Qt::Horizontal + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + + + + diff --git a/src/qt-console/run/runadmin.ui b/src/qt-console/run/runadmin.ui new file mode 100644 index 00000000..68d87631 --- /dev/null +++ b/src/qt-console/run/runadmin.ui @@ -0,0 +1,405 @@ + + runAdminForm + + + + 0 + 0 + 704 + 466 + + + + Form + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + 0 + + + 6 + + + + + Priority: + + + prioritySpin + + + + + + + 10000 + + + 1 + + + 12 + + + + + + + + + Qt::Vertical + + + + 20 + 171 + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + 0 + + + 6 + + + + + + 0 + 2 + 0 + 2000 + 1 + 1 + + + + yyyy-mm-dd hh:mm:ss + + + true + + + + + + + When: + + + dateTimeEdit + + + + + + + Where: + + + true + + + where + + + + + + + Bootstrap: + + + bootstrap + + + + + + + + + + + + + Job: + + + jobCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + Storage: + + + storageCombo + + + + + + + FileSet: + + + filesetCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + + + + + + + + + + Replace: + + + + + + + + + + To client: + + + clientCombo + + + + + + + Catalog: + + + catalogCombo + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + 16777215 + 30 + + + + <h3>Run Restore Job</h3> + + + Qt::AlignCenter + + + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + + + diff --git a/src/qt-console/run/runbackup.ui b/src/qt-console/run/runbackup.ui new file mode 100644 index 00000000..bb46cf26 --- /dev/null +++ b/src/qt-console/run/runbackup.ui @@ -0,0 +1,405 @@ + + runBackupForm + + + + 0 + 0 + 704 + 466 + + + + Form + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + 0 + + + 6 + + + + + Priority: + + + prioritySpin + + + + + + + 10000 + + + 1 + + + 12 + + + + + + + + + Qt::Vertical + + + + 20 + 171 + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + 0 + + + 6 + + + + + + 0 + 2 + 0 + 2000 + 1 + 1 + + + + yyyy-mm-dd hh:mm:ss + + + true + + + + + + + When: + + + dateTimeEdit + + + + + + + Where: + + + true + + + where + + + + + + + Bootstrap: + + + bootstrap + + + + + + + + + + + + + Job: + + + jobCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + Storage: + + + storageCombo + + + + + + + FileSet: + + + filesetCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + + + + + + + + + + Replace: + + + + + + + + + + To client: + + + clientCombo + + + + + + + Catalog: + + + catalogCombo + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + 16777215 + 30 + + + + <h3>Run Restore Job</h3> + + + Qt::AlignCenter + + + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + + + diff --git a/src/qt-console/run/runcmd.cpp b/src/qt-console/run/runcmd.cpp new file mode 100644 index 00000000..c7616b0a --- /dev/null +++ b/src/qt-console/run/runcmd.cpp @@ -0,0 +1,171 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Run Command Dialog class + * + * This is called when a Run Command signal is received from the + * Director. We parse the Director's output and throw up a + * dialog box. This happens, for example, after the user finishes + * selecting files to be restored. The Director will then submit a + * run command, that causes this page to be popped up. + * + * Kern Sibbald, March MMVII + * + */ + +#include "bat.h" +#include "run.h" + +/* + * Setup all the combo boxes and display the dialog + */ +runCmdPage::runCmdPage(int conn) : Pages() +{ + m_name = tr("Restore Run"); + pgInitialize(); + setupUi(this); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/restore.png"))); + m_conn = conn; + m_console->notify(conn, false); + + fill(); + + connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); + connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); + //dockPage(); + setCurrent(); + this->show(); + +} + +void runCmdPage::fill() +{ + QString item, val; + QStringList items; + QRegExp rx("^.*:\\s*(\\S.*$)"); /* Regex to get value */ + + clientCombo->addItems(m_console->client_list); + filesetCombo->addItems(m_console->fileset_list); + replaceCombo->addItems(QStringList() << tr("never") << tr("always") << tr("ifnewer") + << tr("ifolder")); + replaceCombo->setCurrentIndex(replaceCombo->findText(tr("never"), Qt::MatchExactly)); + storageCombo->addItems(m_console->storage_list); + dateTimeEdit->setDisplayFormat(mainWin->m_dtformat); + + m_console->read(m_conn); + item = m_console->msg(m_conn); + items = item.split("\n"); + foreach(item, items) { + rx.indexIn(item); + val = rx.cap(1); + Dmsg1(100, "Item=%s\n", item.toUtf8().data()); + Dmsg1(100, "Value=%s\n", val.toUtf8().data()); + + if (item.startsWith("Title:")) { + run->setText(val); + } + if (item.startsWith("JobName:")) { + jobCombo->addItem(val); + continue; + } + if (item.startsWith("Bootstrap:")) { + bootstrap->setText(val); + continue; + } + if (item.startsWith("Backup Client:")) { + clientCombo->setCurrentIndex(clientCombo->findText(val, Qt::MatchExactly)); + continue; + } + if (item.startsWith("Storage:")) { + storageCombo->setCurrentIndex(storageCombo->findText(val, Qt::MatchExactly)); + continue; + } + if (item.startsWith("Where:")) { + where->setText(val); + continue; + } + if (item.startsWith("When:")) { + dateTimeEdit->setDateTime(QDateTime::fromString(val,mainWin->m_dtformat)); + continue; + } + if (item.startsWith("Catalog:")) { + catalogCombo->addItem(val); + continue; + } + if (item.startsWith("FileSet:")) { + filesetCombo->setCurrentIndex(filesetCombo->findText(val, Qt::MatchExactly)); + continue; + } + if (item.startsWith("Priority:")) { + bool okay; + int pri = val.toInt(&okay, 10); + if (okay) + prioritySpin->setValue(pri); + continue; + } + if (item.startsWith("Replace:")) { + int replaceIndex = replaceCombo->findText(val, Qt::MatchExactly); + if (replaceIndex >= 0) + replaceCombo->setCurrentIndex(replaceIndex); + continue; + } + } +} + +void runCmdPage::okButtonPushed() +{ + QString cmd(".mod"); + cmd += " restoreclient=\"" + clientCombo->currentText() + "\""; + cmd += " fileset=\"" + filesetCombo->currentText() + "\""; + cmd += " storage=\"" + storageCombo->currentText() + "\""; + cmd += " replace=\"" + replaceCombo->currentText() + "\""; + cmd += " when=\"" + dateTimeEdit->dateTime().toString(mainWin->m_dtformat) + "\""; + cmd += " bootstrap=\"" + bootstrap->text() + "\""; + cmd += " where=\"" + where->text() + "\""; + QString pri; + QTextStream(&pri) << " priority=\"" << prioritySpin->value() << "\""; + cmd += pri; + cmd += " yes\n"; + + setConsoleCurrent(); + QString displayhtml(""); + displayhtml += cmd + "\n"; + m_console->display_html(displayhtml); + m_console->display_text("\n"); + m_console->write_dir(m_conn, cmd.toUtf8().data()); + m_console->displayToPrompt(m_conn); +// consoleCommand(cmd); ***FIXME set back to consoleCommand when connection issue is resolved + + m_console->notify(m_conn, true); + closeStackPage(); +} + + +void runCmdPage::cancelButtonPushed() +{ + this->hide(); + m_console->write_dir(m_conn, "no"); + m_console->displayToPrompt(m_conn); + m_console->notify(m_conn, true); + mainWin->set_status(tr(" Canceled")); + closeStackPage(); + mainWin->resetFocus(); +} diff --git a/src/qt-console/run/runcmd.ui b/src/qt-console/run/runcmd.ui new file mode 100644 index 00000000..2f32c5cf --- /dev/null +++ b/src/qt-console/run/runcmd.ui @@ -0,0 +1,405 @@ + + runCmdForm + + + + 0 + 0 + 704 + 466 + + + + Form + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + 0 + + + 6 + + + + + Priority: + + + prioritySpin + + + + + + + 10000 + + + 1 + + + 12 + + + + + + + + + Qt::Vertical + + + + 20 + 171 + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + 0 + + + 6 + + + + + + 0 + 2 + 0 + 2000 + 1 + 1 + + + + yyyy-mm-dd hh:mm:ss + + + true + + + + + + + When: + + + dateTimeEdit + + + + + + + Where: + + + true + + + where + + + + + + + Bootstrap: + + + bootstrap + + + + + + + + + + + + + Job: + + + jobCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + Storage: + + + storageCombo + + + + + + + FileSet: + + + filesetCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + + + + + + + + + + Replace: + + + + + + + + + + To client: + + + clientCombo + + + + + + + Catalog: + + + catalogCombo + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + 16777215 + 30 + + + + <h3>Run Restore Job</h3> + + + Qt::AlignCenter + + + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + + + diff --git a/src/qt-console/run/runcopy.ui b/src/qt-console/run/runcopy.ui new file mode 100644 index 00000000..6b05075c --- /dev/null +++ b/src/qt-console/run/runcopy.ui @@ -0,0 +1,405 @@ + + runCopyForm + + + + 0 + 0 + 704 + 466 + + + + Form + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + 0 + + + 6 + + + + + Priority: + + + prioritySpin + + + + + + + 10000 + + + 1 + + + 12 + + + + + + + + + Qt::Vertical + + + + 20 + 171 + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + 0 + + + 6 + + + + + + 0 + 2 + 0 + 2000 + 1 + 1 + + + + yyyy-mm-dd hh:mm:ss + + + true + + + + + + + When: + + + dateTimeEdit + + + + + + + Where: + + + true + + + where + + + + + + + Bootstrap: + + + bootstrap + + + + + + + + + + + + + Job: + + + jobCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + Storage: + + + storageCombo + + + + + + + FileSet: + + + filesetCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + + + + + + + + + + Replace: + + + + + + + + + + To client: + + + clientCombo + + + + + + + Catalog: + + + catalogCombo + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + 16777215 + 30 + + + + <h3>Run Restore Job</h3> + + + Qt::AlignCenter + + + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + + + diff --git a/src/qt-console/run/runmigration.ui b/src/qt-console/run/runmigration.ui new file mode 100644 index 00000000..f5dcd81e --- /dev/null +++ b/src/qt-console/run/runmigration.ui @@ -0,0 +1,405 @@ + + runMigrationForm + + + + 0 + 0 + 704 + 466 + + + + Form + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + 0 + + + 6 + + + + + Priority: + + + prioritySpin + + + + + + + 10000 + + + 1 + + + 12 + + + + + + + + + Qt::Vertical + + + + 20 + 171 + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + 0 + + + 6 + + + + + + 0 + 2 + 0 + 2000 + 1 + 1 + + + + yyyy-mm-dd hh:mm:ss + + + true + + + + + + + When: + + + dateTimeEdit + + + + + + + Where: + + + true + + + where + + + + + + + Bootstrap: + + + bootstrap + + + + + + + + + + + + + Job: + + + jobCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + Storage: + + + storageCombo + + + + + + + FileSet: + + + filesetCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + + + + + + + + + + Replace: + + + + + + + + + + To client: + + + clientCombo + + + + + + + Catalog: + + + catalogCombo + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + 16777215 + 30 + + + + <h3>Run Restore Job</h3> + + + Qt::AlignCenter + + + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + + + diff --git a/src/qt-console/run/runrestore.ui b/src/qt-console/run/runrestore.ui new file mode 100644 index 00000000..81690cbd --- /dev/null +++ b/src/qt-console/run/runrestore.ui @@ -0,0 +1,405 @@ + + runRestoreForm + + + + 0 + 0 + 704 + 466 + + + + Form + + + + 9 + + + 6 + + + + + 0 + + + 6 + + + + + 0 + + + 6 + + + + + Priority: + + + prioritySpin + + + + + + + 10000 + + + 1 + + + 12 + + + + + + + + + Qt::Vertical + + + + 20 + 171 + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Qt::Vertical + + + QSizePolicy::MinimumExpanding + + + + 572 + 16 + + + + + + + + 0 + + + 6 + + + + + + 0 + 2 + 0 + 2000 + 1 + 1 + + + + yyyy-mm-dd hh:mm:ss + + + true + + + + + + + When: + + + dateTimeEdit + + + + + + + Where: + + + true + + + where + + + + + + + Bootstrap: + + + bootstrap + + + + + + + + + + + + + Job: + + + jobCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + Storage: + + + storageCombo + + + + + + + FileSet: + + + filesetCombo + + + + + + + true + + + + 200 + 0 + + + + false + + + + + + + + + + + + + + + + Replace: + + + + + + + + + + To client: + + + clientCombo + + + + + + + Catalog: + + + catalogCombo + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + 0 + + + 6 + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + 16777215 + 30 + + + + <h3>Run Restore Job</h3> + + + Qt::AlignCenter + + + + + + + Qt::Horizontal + + + QSizePolicy::Fixed + + + + 131 + 25 + + + + + + + + + + + diff --git a/src/qt-console/select/select.cpp b/src/qt-console/select/select.cpp new file mode 100644 index 00000000..f026920c --- /dev/null +++ b/src/qt-console/select/select.cpp @@ -0,0 +1,118 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Select dialog class + * + * Kern Sibbald, March MMVII + * + */ + +#include "bat.h" +#include "select.h" + +/* + * Read the items for the selection + */ +selectDialog::selectDialog(Console *console, int conn) : QDialog() +{ + m_conn = conn; + QDateTime dt; + int stat; + QListWidgetItem *item; + int row = 0; + + m_console = console; + m_console->notify(m_conn, false); + setupUi(this); + connect(listBox, SIGNAL(currentRowChanged(int)), this, SLOT(index_change(int))); + setAttribute(Qt::WA_DeleteOnClose); + m_console->read(m_conn); /* get title */ + labelWidget->setText(m_console->msg(m_conn)); + while ((stat=m_console->read(m_conn)) > 0) { + item = new QListWidgetItem; + item->setText(m_console->msg(m_conn)); + listBox->insertItem(row++, item); + } + m_console->displayToPrompt(m_conn); + this->show(); +} + +void selectDialog::accept() +{ + char cmd[100]; + + this->hide(); + bsnprintf(cmd, sizeof(cmd), "%d", m_index+1); + m_console->write_dir(m_conn, cmd); + m_console->displayToPrompt(m_conn); + this->close(); + mainWin->resetFocus(); + m_console->displayToPrompt(m_conn); + m_console->notify(m_conn, true); +} + + +void selectDialog::reject() +{ + this->hide(); + mainWin->set_status(tr(" Canceled")); + this->close(); + mainWin->resetFocus(); + m_console->beginNewCommand(m_conn); + m_console->notify(m_conn, true); +} + +/* + * Called here when the jobname combo box is changed. + * We load the default values for the new job in the + * other combo boxes. + */ +void selectDialog::index_change(int index) +{ + m_index = index; +} + +/* + * Handle yesno PopUp when Bacula asks a yes/no question. + */ +/* + * Read the items for the selection + */ +yesnoPopUp::yesnoPopUp(Console *console, int conn) : QDialog() +{ + QMessageBox msgBox; + + setAttribute(Qt::WA_DeleteOnClose); + console->read(conn); /* get yesno question */ + msgBox.setWindowTitle(tr("Bat Question")); + msgBox.setText(console->msg(conn)); + msgBox.setStandardButtons(QMessageBox::Yes | QMessageBox::No); + console->displayToPrompt(conn); + switch (msgBox.exec()) { + case QMessageBox::Yes: + console->write_dir(conn, "yes"); + break; + case QMessageBox::No: + console->write_dir(conn, "no"); + break; + } + console->displayToPrompt(conn); + mainWin->resetFocus(); +} diff --git a/src/qt-console/select/select.h b/src/qt-console/select/select.h new file mode 100644 index 00000000..057fa874 --- /dev/null +++ b/src/qt-console/select/select.h @@ -0,0 +1,59 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef _SELECT_H_ +#define _SELECT_H_ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_select.h" +#include "console.h" + +class selectDialog : public QDialog, public Ui::selectForm +{ + Q_OBJECT + +public: + selectDialog(Console *console, int conn); + +public slots: + void accept(); + void reject(); + void index_change(int index); + +private: + Console *m_console; + int m_index; + int m_conn; +}; + +class yesnoPopUp : public QDialog +{ + Q_OBJECT + +public: + yesnoPopUp(Console *console, int conn); + +}; + + +#endif /* _SELECT_H_ */ diff --git a/src/qt-console/select/select.ui b/src/qt-console/select/select.ui new file mode 100644 index 00000000..8595a650 --- /dev/null +++ b/src/qt-console/select/select.ui @@ -0,0 +1,82 @@ + + selectForm + + + Qt::NonModal + + + + 0 + 0 + 377 + 323 + + + + Selection dialog + + + + 9 + + + 6 + + + + + Qt::Horizontal + + + QDialogButtonBox::Cancel|QDialogButtonBox::NoButton|QDialogButtonBox::Ok + + + + + + + + + + + + + + + + + + + buttonBox + accepted() + selectForm + accept() + + + 248 + 254 + + + 157 + 274 + + + + + buttonBox + rejected() + selectForm + reject() + + + 316 + 260 + + + 286 + 274 + + + + + diff --git a/src/qt-console/select/textinput.cpp b/src/qt-console/select/textinput.cpp new file mode 100644 index 00000000..a32e1ff2 --- /dev/null +++ b/src/qt-console/select/textinput.cpp @@ -0,0 +1,67 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Select dialog class + * + * Kern Sibbald, March MMVII + * + */ + +#include "bat.h" +#include "textinput.h" + +/* + * Read input text box + */ +textInputDialog::textInputDialog(Console *console, int conn) +{ + m_conn = conn; + QDateTime dt; + + m_console = console; + m_console->notify(m_conn, false); + setupUi(this); + setAttribute(Qt::WA_DeleteOnClose); + m_console->read(m_conn); /* get title */ + labelWidget->setText(m_console->msg(m_conn)); + this->show(); +} + +void textInputDialog::accept() +{ + this->hide(); + m_console->write_dir(m_conn, lineEdit->text().toUtf8().data()); + /* Do not displayToPrompt because there may be another Text Input required */ + this->close(); + mainWin->resetFocus(); + m_console->notify(m_conn, true); +} + + +void textInputDialog::reject() +{ + this->hide(); + mainWin->set_status(tr(" Canceled")); + m_console->write_dir(m_conn, "."); + this->close(); + mainWin->resetFocus(); + m_console->beginNewCommand(m_conn); + m_console->notify(m_conn, true); +} diff --git a/src/qt-console/select/textinput.h b/src/qt-console/select/textinput.h new file mode 100644 index 00000000..68c97c4d --- /dev/null +++ b/src/qt-console/select/textinput.h @@ -0,0 +1,47 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef _TEXTENTRY_H_ +#define _TEXTENTRY_H_ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_textinput.h" +#include "console.h" + +class textInputDialog : public QDialog, public Ui::textInputForm +{ + Q_OBJECT + +public: + textInputDialog(Console *console, int conn); + +public slots: + void accept(); + void reject(); + +private: + Console *m_console; + int m_conn; +}; + +#endif /* _TEXTENTRY_H_ */ diff --git a/src/qt-console/select/textinput.ui b/src/qt-console/select/textinput.ui new file mode 100644 index 00000000..4991b8f2 --- /dev/null +++ b/src/qt-console/select/textinput.ui @@ -0,0 +1,143 @@ + + + textInputForm + + + Qt::ApplicationModal + + + + 0 + 0 + 430 + 96 + + + + Qt::StrongFocus + + + Text input dialog + + + + + + + 0 + 0 + + + + + 50 + 0 + + + + + 100 + 16777215 + + + + Qt::TabFocus + + + + + + + + 0 + 0 + + + + Message + + + + + + + 5 + + + + + Qt::Horizontal + + + QSizePolicy::Expanding + + + + 26 + 9 + + + + + + + + + 0 + 0 + + + + + 16777215 + 38 + + + + Qt::Horizontal + + + QDialogButtonBox::Cancel|QDialogButtonBox::Ok + + + + + + + + + + + buttonBox + accepted() + textInputForm + accept() + + + 248 + 254 + + + 157 + 274 + + + + + buttonBox + rejected() + textInputForm + reject() + + + 316 + 260 + + + 286 + 274 + + + + + diff --git a/src/qt-console/status/clientstat.cpp b/src/qt-console/status/clientstat.cpp new file mode 100644 index 00000000..d11ae977 --- /dev/null +++ b/src/qt-console/status/clientstat.cpp @@ -0,0 +1,280 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#include "bat.h" +#include +#include +#include "clientstat.h" + +/* This probably should be on a mutex */ +static bool working = false; /* prevent timer recursion */ + +/* + * Constructor for the class + */ +ClientStat::ClientStat(QString &client, QTreeWidgetItem *parentTreeWidgetItem) + : Pages() +{ + m_client = client; + setupUi(this); + pgInitialize(tr("Client Status %1").arg(m_client), parentTreeWidgetItem); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/status.png"))); + m_cursor = new QTextCursor(textEditHeader->document()); + + readSettings(); + dockPage(); + m_timer = new QTimer(this); + + createConnections(); + m_timer->start(1000); + setCurrent(); +} + +void ClientStat::getFont() +{ + QFont font = textEditHeader->font(); + + QString dirname; + m_console->getDirResName(dirname); + QSettings settings(dirname, "bat"); + settings.beginGroup("Console"); + font.setFamily(settings.value("consoleFont", "Courier").value()); + font.setPointSize(settings.value("consolePointSize", 10).toInt()); + font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); + settings.endGroup(); + textEditHeader->setFont(font); +} + +/* + * Write the m_splitter settings in the destructor + */ +ClientStat::~ClientStat() +{ + writeSettings(); +} + +/* + * Populate all tables and header widgets + */ +void ClientStat::populateAll() +{ + populateTerminated(); + populateCurrentTab(tabWidget->currentIndex()); +} + +/* + * Timer is triggered, see if is current and repopulate. + */ +void ClientStat::timerTriggered() +{ + double value = timerDisplay->value(); + value -= 1; + if (value <= 0 && !working) { + working = true; + value = spinBox->value(); + bool iscurrent = mainWin->tabWidget->currentIndex() == mainWin->tabWidget->indexOf(this); + if (((isDocked() && iscurrent) || (!isDocked())) && (checkBox->checkState() == Qt::Checked)) { + populateAll(); + } + working = false; + } + timerDisplay->display(value); +} + + +void ClientStat::populateCurrentTab(int index) +{ + if (index == 0) + populateRunning(); + if (index == 1) + populateHeader(); +} + +/* + * Populate header text widget + */ +void ClientStat::populateHeader() +{ + QString command = QString(".status client=\"" + m_client + "\" header"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + textEditHeader->clear(); + + if (m_console->dir_cmd(command, results)) { + foreach (QString line, results) { + line += "\n"; + textEditHeader->insertPlainText(line); + } + } +} + +/* + * Populate teminated table + */ +void ClientStat::populateTerminated() +{ + QString command = QString(".status client=\"" + m_client + "\" terminated"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + QBrush blackBrush(Qt::black); + + terminatedTable->clear(); + QStringList headerlist = (QStringList() + << tr("Job Id") << tr("Job Level") << tr("Job Files") + << tr("Job Bytes") << tr("Job Status") << tr("Job Time") + << tr("Job Name")); + QStringList flaglist = (QStringList() + << "R" << "L" << "R" << "R" << "LC" + << "L" << "L"); + + terminatedTable->setColumnCount(headerlist.size()); + terminatedTable->setHorizontalHeaderLabels(headerlist); + + if (m_console->dir_cmd(command, results)) { + int row = 0; + QTableWidgetItem* p_tableitem; + terminatedTable->setRowCount(results.size()); + foreach (QString line, results) { + /* Iterate through the record returned from the query */ + QStringList fieldlist = line.split("\t"); + int column = 0; + QString statusCode(""); + /* Iterate through fields in the record */ + foreach (QString field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + p_tableitem = new QTableWidgetItem(field, 1); + p_tableitem->setForeground(blackBrush); + p_tableitem->setFlags(0); + if (flaglist[column].contains("R")) + p_tableitem->setTextAlignment(Qt::AlignRight); + if (flaglist[column].contains("C")) { + if (field == "OK") + p_tableitem->setBackground(Qt::green); + else + p_tableitem->setBackground(Qt::red); + } + terminatedTable->setItem(results.size() - row - 1, column, p_tableitem); + column += 1; + } + row += 1; + } + } + terminatedTable->resizeColumnsToContents(); + terminatedTable->resizeRowsToContents(); + terminatedTable->verticalHeader()->hide(); +} + +/* + * Populate running text + */ +void ClientStat::populateRunning() +{ + QString command = QString(".status client=\"" + m_client + "\" running"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + textEditRunning->clear(); + + if (m_console->dir_cmd(command, results)) { + foreach (QString line, results) { + line += "\n"; + textEditRunning->insertPlainText(line); + } + } +} + +/* + * When the treeWidgetItem in the page selector tree is single clicked, Make sure + * The tree has been populated. + */ +void ClientStat::PgSeltreeWidgetClicked() +{ + if (!m_populated) { + populateAll(); + m_populated=true; + } +} + +/* + * Virtual function override of pages function which is called when this page + * is visible on the stack + */ +void ClientStat::currentStackItem() +{ + populateAll(); + timerDisplay->display(spinBox->value()); + + if (!m_populated) { + m_populated=true; + } +} + +/* + * Function to create connections for context sensitive menu for this and + * the page selector + */ +void ClientStat::createConnections() +{ + connect(actionRefresh, SIGNAL(triggered()), this, SLOT(populateAll())); + connect(tabWidget, SIGNAL(currentChanged(int)), this, SLOT(populateCurrentTab(int))); + connect(m_timer, SIGNAL(timeout()), this, SLOT(timerTriggered())); + terminatedTable->setContextMenuPolicy(Qt::ActionsContextMenu); + terminatedTable->addAction(actionRefresh); +} + +/* + * Save user settings associated with this page + */ +void ClientStat::writeSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + settings.setValue(m_splitText, splitter->saveState()); + settings.setValue("refreshInterval", spinBox->value()); + settings.setValue("refreshCheck", checkBox->checkState()); + settings.endGroup(); + + settings.beginGroup("OpenOnExit"); + QString toWrite = "ClientStatus_" + m_client; + settings.setValue(toWrite, 1); + settings.endGroup(); +} + +/* + * Read and restore user settings associated with this page + */ +void ClientStat::readSettings() +{ + m_groupText = "ClientStatPage"; + m_splitText = "splitterSizes_1"; + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + if (settings.contains(m_splitText)) { splitter->restoreState(settings.value(m_splitText).toByteArray()); } + spinBox->setValue(settings.value("refreshInterval", 28).toInt()); + checkBox->setCheckState((Qt::CheckState)settings.value("refreshCheck", Qt::Checked).toInt()); + settings.endGroup(); + + timerDisplay->display(spinBox->value()); +} diff --git a/src/qt-console/status/clientstat.h b/src/qt-console/status/clientstat.h new file mode 100644 index 00000000..895f0590 --- /dev/null +++ b/src/qt-console/status/clientstat.h @@ -0,0 +1,66 @@ +#ifndef _CLIENTSTAT_H_ +#define _CLIENTSTAT_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_clientstat.h" +#include "console.h" +#include "pages.h" + +class ClientStat : public Pages, public Ui::ClientStatForm +{ + Q_OBJECT + +public: + ClientStat(QString&, QTreeWidgetItem*); + ~ClientStat(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + +public slots: + void populateHeader(); + void populateTerminated(); + void populateRunning(); + void populateAll(); + void populateCurrentTab(int); + +private slots: + void timerTriggered(); + +private: + void createConnections(); + void writeSettings(); + void readSettings(); + bool m_populated; + QTextCursor *m_cursor; + void getFont(); + QString m_groupText, m_splitText; + QTimer *m_timer; + QString m_client; +}; + +#endif /* _CLIENTSTAT_H_ */ diff --git a/src/qt-console/status/clientstat.ui b/src/qt-console/status/clientstat.ui new file mode 100644 index 00000000..483f567c --- /dev/null +++ b/src/qt-console/status/clientstat.ui @@ -0,0 +1,215 @@ + + ClientStatForm + + + + 0 + 0 + 557 + 350 + + + + Form + + + + + + Qt::Vertical + + + + + + + 0 + + + + Running + + + + + + + + + + Header + + + + + + + 200 + 0 + + + + + 0 + 0 + + + + + 16777215 + 16777215 + + + + + 1 + 0 + + + + Qt::StrongFocus + + + false + + + + + + + + + + + + Qt::ScrollBarAsNeeded + + + QTextEdit::AutoNone + + + false + + + + + + QTextEdit::NoWrap + + + true + + + + + + + + + + + + 141 + 0 + + + + + 141 + 16777215 + + + + Refresh Timer + + + + + 20 + 50 + 111 + 24 + + + + 5 + + + 999 + + + + + + 20 + 20 + 101 + 20 + + + + Do Refresh + + + + + + 20 + 80 + 101 + 31 + + + + + + + + + + + + + Qt::LeftToRight + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> + + + Qt::AlignCenter + + + + + + + + + + + + + + :/images/view-refresh.png + + + Refresh + + + + + :/images/utilities-terminal.png + + + Cancel Running Job + + + + + + + + diff --git a/src/qt-console/status/dirstat.cpp b/src/qt-console/status/dirstat.cpp new file mode 100644 index 00000000..e7db91d2 --- /dev/null +++ b/src/qt-console/status/dirstat.cpp @@ -0,0 +1,391 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Dirk Bartley, March 2007 + */ + +#include "bat.h" +#include +#include +#include "dirstat.h" + +static bool working = false; /* prevent timer recursion */ + +/* + * Constructor for the class + */ +DirStat::DirStat() : Pages() +{ + setupUi(this); + m_name = tr("Director Status"); + pgInitialize(); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/status.png"))); + m_cursor = new QTextCursor(textEdit->document()); + + m_timer = new QTimer(this); + readSettings(); + m_timer->start(1000); + + createConnections(); + setCurrent(); +} + +void DirStat::getFont() +{ + QFont font = textEdit->font(); + + QString dirname; + m_console->getDirResName(dirname); + QSettings settings(dirname, "bat"); + settings.beginGroup("Console"); + font.setFamily(settings.value("consoleFont", "Courier").value()); + font.setPointSize(settings.value("consolePointSize", 10).toInt()); + font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); + settings.endGroup(); + textEdit->setFont(font); +} + +/* + * Write the m_splitter settings in the destructor + */ +DirStat::~DirStat() +{ + writeSettings(); +} + +/* + * Populate all tables and header widgets + */ +void DirStat::populateAll() +{ + populateHeader(); + populateTerminated(); + populateScheduled(); + populateRunning(); +} + +/* + * Timer is triggered, see if is current and repopulate. + */ +void DirStat::timerTriggered() +{ + double value = timerDisplay->value(); + value -= 1; + if (value <= 0 && !working) { + working = true; + value = spinBox->value(); + bool iscurrent = mainWin->tabWidget->currentIndex() == mainWin->tabWidget->indexOf(this); + if (((isDocked() && iscurrent) || ((!isDocked()) && isOnceDocked())) && (checkBox->checkState() == Qt::Checked)) { + populateAll(); + } + working = false; + } + timerDisplay->display(value); +} + +/* + * Populate header text widget + */ +void DirStat::populateHeader() +{ + QString command = QString(".status dir header"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + textEdit->clear(); + + if (m_console->dir_cmd(command, results)) { + foreach (QString line, results) { + line += "\n"; + textEdit->insertPlainText(line); + } + } +} + +/* + * Populate teminated table + */ +void DirStat::populateTerminated() +{ + QString command = QString(".status dir terminated"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + QBrush blackBrush(Qt::black); + + terminatedTable->clear(); + QStringList headerlist = (QStringList() + << tr("Job Id") << tr("Job Level") << tr("Job Files") + << tr("Job Bytes") << tr("Job Status") << tr("Job Time") + << tr("Job Name")); + QStringList flaglist = (QStringList() + << "R" << "L" << "R" << "R" << "LC" + << "L" << "L"); + + terminatedTable->setColumnCount(headerlist.size()); + terminatedTable->setHorizontalHeaderLabels(headerlist); + + if (m_console->dir_cmd(command, results)) { + int row = 0; + QTableWidgetItem* p_tableitem; + terminatedTable->setRowCount(results.size()); + foreach (QString line, results) { + /* Iterate through the record returned from the query */ + QStringList fieldlist = line.split("\t"); + int column = 0; + QString statusCode(""); + /* Iterate through fields in the record */ + foreach (QString field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + p_tableitem = new QTableWidgetItem(field, 1); + p_tableitem->setForeground(blackBrush); + p_tableitem->setFlags(0); + if (flaglist[column].contains("R")) + p_tableitem->setTextAlignment(Qt::AlignRight); + if (flaglist[column].contains("C")) { + if (field == "OK") + p_tableitem->setBackground(Qt::green); + else + p_tableitem->setBackground(Qt::red); + } + terminatedTable->setItem(results.size() - row - 1, column, p_tableitem); + column += 1; + } + row += 1; + } + } + terminatedTable->resizeColumnsToContents(); + terminatedTable->resizeRowsToContents(); + terminatedTable->verticalHeader()->hide(); +} + +/* + * Populate scheduled table + */ +void DirStat::populateScheduled() +{ + QString command = QString(".status dir scheduled"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + QBrush blackBrush(Qt::black); + + scheduledTable->clear(); + QStringList headerlist = (QStringList() + << tr("Job Level") << tr("Job Type") << tr("Priority") << tr("Job Time") + << tr("Job Name") << tr("Volume")); + QStringList flaglist = (QStringList() + << "L" << "L" << "R" << "L" << "L" << "L"); + + scheduledTable->setColumnCount(headerlist.size()); + scheduledTable->setHorizontalHeaderLabels(headerlist); + scheduledTable->setSelectionBehavior(QAbstractItemView::SelectRows); + scheduledTable->setSelectionMode(QAbstractItemView::SingleSelection); + + if (m_console->dir_cmd(command, results)) { + int row = 0; + QTableWidgetItem* p_tableitem; + scheduledTable->setRowCount(results.size()); + foreach (QString line, results) { + /* Iterate through the record returned from the query */ + QStringList fieldlist = line.split("\t"); + int column = 0; + QString statusCode(""); + /* Iterate through fields in the record */ + foreach (QString field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + p_tableitem = new QTableWidgetItem(field, 1); + p_tableitem->setForeground(blackBrush); + scheduledTable->setItem(row, column, p_tableitem); + column += 1; + } + row += 1; + } + } + scheduledTable->resizeColumnsToContents(); + scheduledTable->resizeRowsToContents(); + scheduledTable->verticalHeader()->hide(); +} + +/* + * Populate running table + */ +void DirStat::populateRunning() +{ + QString command = QString(".status dir running"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + QBrush blackBrush(Qt::black); + + runningTable->clear(); + QStringList headerlist = (QStringList() + << tr("Job Id") << tr("Job Level") << tr("Job Data") << tr("Job Info")); + + runningTable->setColumnCount(headerlist.size()); + runningTable->setHorizontalHeaderLabels(headerlist); + runningTable->setSelectionBehavior(QAbstractItemView::SelectRows); + + if (m_console->dir_cmd(command, results)) { + int row = 0; + QTableWidgetItem* p_tableitem; + runningTable->setRowCount(results.size()); + foreach (QString line, results) { + /* Iterate through the record returned from the query */ + QStringList fieldlist = line.split("\t"); + int column = 0; + QString statusCode(""); + /* Iterate through fields in the record */ + foreach (QString field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + p_tableitem = new QTableWidgetItem(field, 1); + p_tableitem->setForeground(blackBrush); + runningTable->setItem(row, column, p_tableitem); + column += 1; + } + row += 1; + } + } + runningTable->resizeColumnsToContents(); + runningTable->resizeRowsToContents(); + runningTable->verticalHeader()->hide(); +} + +/* + * When the treeWidgetItem in the page selector tree is singleclicked, Make sure + * The tree has been populated. + */ +void DirStat::PgSeltreeWidgetClicked() +{ + if (!m_populated) { + populateAll(); + m_populated=true; + } + if (!isOnceDocked()) { + dockPage(); + } +} + +/* + * Virtual function override of pages function which is called when this page + * is visible on the stack + */ +void DirStat::currentStackItem() +{ + populateAll(); + timerDisplay->display(spinBox->value()); + if (!m_populated) { + m_populated=true; + } +} + +/* + * Function to create connections for context sensitive menu for this and + * the page selector + */ +void DirStat::createConnections() +{ + connect(actionRefresh, SIGNAL(triggered()), this, SLOT(populateAll())); + connect(actionCancelRunning, SIGNAL(triggered()), this, SLOT(consoleCancelJob())); + connect(actionDisableScheduledJob, SIGNAL(triggered()), this, SLOT(consoleDisableJob())); + connect(m_timer, SIGNAL(timeout()), this, SLOT(timerTriggered())); + + scheduledTable->setContextMenuPolicy(Qt::ActionsContextMenu); + scheduledTable->addAction(actionRefresh); + scheduledTable->addAction(actionDisableScheduledJob); + terminatedTable->setContextMenuPolicy(Qt::ActionsContextMenu); + terminatedTable->addAction(actionRefresh); + runningTable->setContextMenuPolicy(Qt::ActionsContextMenu); + runningTable->addAction(actionRefresh); + runningTable->addAction(actionCancelRunning); +} + +/* + * Save user settings associated with this page + */ +void DirStat::writeSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + settings.setValue(m_splitText, splitter->saveState()); + settings.setValue("refreshInterval", spinBox->value()); + settings.setValue("refreshCheck", checkBox->checkState()); + settings.endGroup(); +} + +/* + * Read and restore user settings associated with this page + */ +void DirStat::readSettings() +{ + m_groupText = "DirStatPage"; + m_splitText = "splitterSizes_0"; + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + if (settings.contains(m_splitText)) { splitter->restoreState(settings.value(m_splitText).toByteArray()); } + spinBox->setValue(settings.value("refreshInterval", 28).toInt()); + checkBox->setCheckState((Qt::CheckState)settings.value("refreshCheck", Qt::Checked).toInt()); + settings.endGroup(); + + timerDisplay->display(spinBox->value()); +} + +/* + * Cancel a running job + */ +void DirStat::consoleCancelJob() +{ + QList rowList; + QList sitems = runningTable->selectedItems(); + foreach (QTableWidgetItem *sitem, sitems) { + int row = sitem->row(); + if (!rowList.contains(row)) { + rowList.append(row); + } + } + + QStringList selectedJobsList; + foreach(int row, rowList) { + QTableWidgetItem * sitem = runningTable->item(row, 0); + selectedJobsList.append(sitem->text()); + } + foreach( QString job, selectedJobsList ) + { + QString cmd("cancel jobid="); + cmd += job; + consoleCommand(cmd); + } +} + +/* + * Disable a scheduled Job + */ +void DirStat::consoleDisableJob() +{ + int currentrow = scheduledTable->currentRow(); + QTableWidgetItem *item = scheduledTable->item(currentrow, 4); + if (item) { + QString text = item->text(); + QString cmd("disable job=\""); + cmd += text + '"'; + consoleCommand(cmd); + } +} diff --git a/src/qt-console/status/dirstat.h b/src/qt-console/status/dirstat.h new file mode 100644 index 00000000..d07b5dfc --- /dev/null +++ b/src/qt-console/status/dirstat.h @@ -0,0 +1,67 @@ +#ifndef _DIRSTAT_H_ +#define _DIRSTAT_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_dirstat.h" +#include "console.h" +#include "pages.h" + +class DirStat : public Pages, public Ui::DirStatForm +{ + Q_OBJECT + +public: + DirStat(); + ~DirStat(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + +public slots: + void populateHeader(); + void populateTerminated(); + void populateScheduled(); + void populateRunning(); + void populateAll(); + +private slots: + void timerTriggered(); + void consoleCancelJob(); + void consoleDisableJob(); + +private: + void createConnections(); + void writeSettings(); + void readSettings(); + bool m_populated; + QTextCursor *m_cursor; + void getFont(); + QString m_groupText, m_splitText; + QTimer *m_timer; +}; + +#endif /* _DIRSTAT_H_ */ diff --git a/src/qt-console/status/dirstat.ui b/src/qt-console/status/dirstat.ui new file mode 100644 index 00000000..41a4b40d --- /dev/null +++ b/src/qt-console/status/dirstat.ui @@ -0,0 +1,296 @@ + + DirStatForm + + + + 0 + 0 + 514 + 425 + + + + Form + + + + + + Qt::Vertical + + + + + + + + 0 + 0 + + + + + 0 + 0 + + + + + 16777215 + 100 + + + + + 1 + 0 + + + + Qt::StrongFocus + + + false + + + + + + + + + + + + Qt::ScrollBarAsNeeded + + + QTextEdit::AutoNone + + + false + + + + + + QTextEdit::NoWrap + + + true + + + + + + + + 0 + 0 + + + + + 221 + 100 + + + + + 221 + 100 + + + + Refresh Timer + + + + + 20 + 60 + 111 + 24 + + + + 5 + + + 999 + + + + + + 20 + 20 + 101 + 20 + + + + Do Refresh + + + + + + 110 + 20 + 101 + 31 + + + + + + + + + + + 6 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + Qt::LeftToRight + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Scheduled Jobs</span></p></body></html> + + + Qt::AlignCenter + + + + + + + + + + + + 6 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + Qt::LeftToRight + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Running Jobs</span></p></body></html> + + + Qt::AlignCenter + + + + + + + + + + + + 6 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + Qt::LeftToRight + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> + + + Qt::AlignCenter + + + + + + + + + + + + + + :/images/view-refresh.png + + + Refresh + + + + + :/images/utilities-terminal.png + + + Cancel Selected Running Jobs + + + + + :/images/utilities-terminal.png + + + Disable Scheduled Job + + + + + + + + diff --git a/src/qt-console/status/storstat.cpp b/src/qt-console/status/storstat.cpp new file mode 100644 index 00000000..5eb326ee --- /dev/null +++ b/src/qt-console/status/storstat.cpp @@ -0,0 +1,443 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Dirk Bartley, March 2007 + */ + +#include "bat.h" +#include +#include +#include "storstat.h" +#include "mount/mount.h" +#include "label/label.h" + +static bool working = false; /* prevent timer recursion */ + +/* +.status storage= +where is the storage name in the Director, and + is one of the following: +header +running +terminated + +waitreservation +devices +volumes +spooling +*/ + +/* + * Constructor for the class + */ +StorStat::StorStat(QString &storage, QTreeWidgetItem *parentTreeWidgetItem) + : Pages() +{ + m_storage = storage; + setupUi(this); + pgInitialize(tr("Storage Status %1").arg(m_storage), parentTreeWidgetItem); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/status.png"))); + m_cursor = new QTextCursor(textEditHeader->document()); + + m_timer = new QTimer(this); + readSettings(); + + createConnections(); + m_timer->start(1000); + setCurrent(); + + dockPage(); +} + +void StorStat::getFont() +{ + QFont font = textEditHeader->font(); + + QString dirname; + m_console->getDirResName(dirname); + QSettings settings(dirname, "bat"); + settings.beginGroup("Console"); + font.setFamily(settings.value("consoleFont", "Courier").value()); + font.setPointSize(settings.value("consolePointSize", 10).toInt()); + font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); + settings.endGroup(); + textEditHeader->setFont(font); +} + +/* + * Write the m_splitter settings in the destructor + */ +StorStat::~StorStat() +{ + writeSettings(); +} + +/* + * Populate all tables and header widgets + */ +void StorStat::populateAll() +{ + populateTerminated(); + populateCurrentTab(tabWidget->currentIndex()); +} + +/* + * Timer is triggered, see if is current and repopulate. + */ +void StorStat::timerTriggered() +{ + double value = timerDisplay->value(); + value -= 1; + if (value <= 0 && !working) { + working = true; + value = spinBox->value(); + bool iscurrent = mainWin->tabWidget->currentIndex() == mainWin->tabWidget->indexOf(this); + if (((isDocked() && iscurrent) || (!isDocked())) && (checkBox->checkState() == Qt::Checked)) { + populateAll(); + } + working = false; + } + timerDisplay->display(value); +} + +/* + * Populate header text widget + */ +void StorStat::populateHeader() +{ + QString command = QString(".status storage=\"" + m_storage + "\" header"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + textEditHeader->clear(); + + if (m_console->dir_cmd(command, results)) { + foreach (QString line, results) { + line += "\n"; + textEditHeader->insertPlainText(line); + } + } +} + +void StorStat::populateWaitReservation() +{ + QString command = QString(".status storage=\"" + m_storage + "\" waitreservation"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + textEditWaitReservation->clear(); + + if (m_console->dir_cmd(command, results)) { + foreach (QString line, results) { + line += "\n"; + textEditWaitReservation->insertPlainText(line); + } + } +} + +void StorStat::populateDevices() +{ + QString command = QString(".status storage=\"" + m_storage + "\" devices"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + textEditDevices->clear(); + + if (m_console->dir_cmd(command, results)) { + foreach (QString line, results) { + line += "\n"; + textEditDevices->insertPlainText(line); + } + } +} + +void StorStat::populateVolumes() +{ + QString command = QString(".status storage=\"" + m_storage + "\" volumes"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + textEditVolumes->clear(); + + if (m_console->dir_cmd(command, results)) { + foreach (QString line, results) { + line += "\n"; + textEditVolumes->insertPlainText(line); + } + } +} + +void StorStat::populateSpooling() +{ + QString command = QString(".status storage=\"" + m_storage + "\" spooling"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + textEditSpooling->clear(); + + if (m_console->dir_cmd(command, results)) { + foreach (QString line, results) { + line += "\n"; + textEditSpooling->insertPlainText(line); + } + } +} + +void StorStat::populateRunning() +{ + QString command = QString(".status storage=\"" + m_storage + "\" running"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + textEditRunning->clear(); + + if (m_console->dir_cmd(command, results)) { + foreach (QString line, results) { + line += "\n"; + textEditRunning->insertPlainText(line); + } + } +} + +/* + * Populate teminated table + */ +void StorStat::populateTerminated() +{ + QString command = QString(".status storage=\"" + m_storage + "\" terminated"); + if (mainWin->m_commandDebug) + Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); + QStringList results; + QBrush blackBrush(Qt::black); + + terminatedTable->clear(); + QStringList headerlist = (QStringList() + << tr("Job Id") << tr("Job Level") << tr("Job Files") + << tr("Job Bytes") << tr("Job Status") << tr("Job Time") + << tr("Job Name")); + QStringList flaglist = (QStringList() + << "R" << "L" << "R" << "R" << "LC" + << "L" << "L"); + + terminatedTable->setColumnCount(headerlist.size()); + terminatedTable->setHorizontalHeaderLabels(headerlist); + + if (m_console->dir_cmd(command, results)) { + int row = 0; + QTableWidgetItem* p_tableitem; + terminatedTable->setRowCount(results.size()); + foreach (QString line, results) { + /* Iterate through the record returned from the query */ + QStringList fieldlist = line.split("\t"); + int column = 0; + QString statusCode(""); + /* Iterate through fields in the record */ + foreach (QString field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + p_tableitem = new QTableWidgetItem(field, 1); + p_tableitem->setForeground(blackBrush); + p_tableitem->setFlags(0); + if (flaglist[column].contains("R")) + p_tableitem->setTextAlignment(Qt::AlignRight); + if (flaglist[column].contains("C")) { + if (field == "OK") + p_tableitem->setBackground(Qt::green); + else + p_tableitem->setBackground(Qt::red); + } + terminatedTable->setItem(row, column, p_tableitem); + column += 1; + } + row += 1; + } + } + terminatedTable->resizeColumnsToContents(); + terminatedTable->resizeRowsToContents(); + terminatedTable->verticalHeader()->hide(); +} + +/* + * When the treeWidgetItem in the page selector tree is singleclicked, Make sure + * The tree has been populated. + */ +void StorStat::PgSeltreeWidgetClicked() +{ + if (!m_populated) { + populateAll(); + m_populated=true; + } +} + +/* + * Virtual function override of pages function which is called when this page + * is visible on the stack + */ +void StorStat::currentStackItem() +{ + populateAll(); + timerDisplay->display(spinBox->value()); + if (!m_populated) { + m_populated=true; + } +} + +/* + * Function to create connections for context sensitive menu for this and + * the page selector + */ +void StorStat::createConnections() +{ + connect(actionRefresh, SIGNAL(triggered()), this, SLOT(populateAll())); + connect(tabWidget, SIGNAL(currentChanged(int)), this, SLOT(populateCurrentTab(int))); + connect(mountButton, SIGNAL(pressed()), this, SLOT(mountButtonPushed())); + connect(umountButton, SIGNAL(pressed()), this, SLOT(umountButtonPushed())); + connect(labelButton, SIGNAL(pressed()), this, SLOT(labelButtonPushed())); + connect(releaseButton, SIGNAL(pressed()), this, SLOT(releaseButtonPushed())); + terminatedTable->setContextMenuPolicy(Qt::ActionsContextMenu); + terminatedTable->addAction(actionRefresh); + connect(m_timer, SIGNAL(timeout()), this, SLOT(timerTriggered())); +} + +/* + * Save user settings associated with this page + */ +void StorStat::writeSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + settings.setValue(m_splitText, splitter->saveState()); + settings.setValue("refreshInterval", spinBox->value()); + settings.setValue("refreshCheck", checkBox->checkState()); + settings.endGroup(); + + settings.beginGroup("OpenOnExit"); + QString toWrite = "StorageStatus_" + m_storage; + settings.setValue(toWrite, 1); + settings.endGroup(); +} + +/* + * Read and restore user settings associated with this page + */ +void StorStat::readSettings() +{ + m_groupText = "StorStatPage"; + m_splitText = "splitterSizes_0"; + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup(m_groupText); + if (settings.contains(m_splitText)) { splitter->restoreState(settings.value(m_splitText).toByteArray()); } + spinBox->setValue(settings.value("refreshInterval", 28).toInt()); + checkBox->setCheckState((Qt::CheckState)settings.value("refreshCheck", Qt::Checked).toInt()); + settings.endGroup(); + + timerDisplay->display(spinBox->value()); +} + +/* + * Populate the text edit window in the current tab + */ +void StorStat::populateCurrentTab(int index) +{ + if (index == 0) + populateHeader(); + if (index == 1) + populateWaitReservation(); + if (index == 2) + populateDevices(); + if (index == 3) + populateVolumes(); + if (index == 4) + populateSpooling(); + if (index == 5) + populateRunning(); +} + +/* + * execute mount in console + */ +void StorStat::mountButtonPushed() +{ + int haschanger = 3; + + /* Set up query QString and header QStringList */ + QString query("SELECT AutoChanger AS Changer" + " FROM Storage WHERE Name='" + m_storage + "'" + " ORDER BY Name" ); + + QStringList results; + /* This could be a log item */ + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Storage query cmd : %s\n",query.toUtf8().data()); + } + if (m_console->sql_cmd(query, results)) { + int resultCount = results.count(); + if (resultCount == 1){ + QString resultline; + QString field; + QStringList fieldlist; + /* there will only be one of these */ + foreach (resultline, results) { + fieldlist = resultline.split("\t"); + int index = 0; + /* Iterate through fields in the record */ + foreach (field, fieldlist) { + field = field.trimmed(); /* strip leading & trailing spaces */ + haschanger = field.toInt(); + index++; + } + } + } + } + + Pmsg1(000, "haschanger is : %i\n", haschanger); + if (haschanger == 0){ + /* no autochanger, just execute the command in the console */ + QString cmd("mount storage=" + m_storage); + consoleCommand(cmd); + } else if (haschanger != 3) { + setConsoleCurrent(); + /* if this storage is an autochanger, lets ask for the slot */ + new mountDialog(m_console, m_storage); + } +} + +/* + * execute umount in console + */ +void StorStat::umountButtonPushed() +{ + QString cmd("umount storage=" + m_storage); + consoleCommand(cmd); +} + +/* Release a tape in the drive */ +void StorStat::releaseButtonPushed() +{ + QString cmd("release storage="); + cmd += m_storage; + consoleCommand(cmd); +} + +/* Label Media populating current storage by default */ +void StorStat::labelButtonPushed() +{ + new labelPage(m_storage); +} diff --git a/src/qt-console/status/storstat.h b/src/qt-console/status/storstat.h new file mode 100644 index 00000000..ab0dbb4e --- /dev/null +++ b/src/qt-console/status/storstat.h @@ -0,0 +1,75 @@ +#ifndef _STORSTAT_H_ +#define _STORSTAT_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_storstat.h" +#include "console.h" +#include "pages.h" + +class StorStat : public Pages, public Ui::StorStatForm +{ + Q_OBJECT + +public: + StorStat(QString &, QTreeWidgetItem *); + ~StorStat(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + +public slots: + void populateHeader(); + void populateTerminated(); + void populateRunning(); + void populateWaitReservation(); + void populateDevices(); + void populateVolumes(); + void populateSpooling(); + void populateAll(); + +private slots: + void timerTriggered(); + void populateCurrentTab(int); + void mountButtonPushed(); + void umountButtonPushed(); + void releaseButtonPushed(); + void labelButtonPushed(); + +private: + void createConnections(); + void writeSettings(); + void readSettings(); + bool m_populated; + QTextCursor *m_cursor; + void getFont(); + QString m_groupText; + QString m_splitText; + QTimer *m_timer; + QString m_storage; +}; + +#endif /* _STORSTAT_H_ */ diff --git a/src/qt-console/status/storstat.ui b/src/qt-console/status/storstat.ui new file mode 100644 index 00000000..a650b1ee --- /dev/null +++ b/src/qt-console/status/storstat.ui @@ -0,0 +1,250 @@ + + StorStatForm + + + + 0 + 0 + 694 + 393 + + + + Form + + + + + + Qt::Vertical + + + + + + + 6 + + + + Header + + + + + + + + + + Waitreservation + + + + + + + + + + Devices + + + + + + + + + + Volumes + + + + + + + + + + Spooling + + + + + + + + + + Running + + + + + + + + + + Misc + + + + + 10 + 10 + 168 + 60 + + + + + + + Mount + + + + + + + UMount + + + + + + + Label + + + + + + + Release + + + + + + + + + + + + + 141 + 0 + + + + + 141 + 16777215 + + + + Refresh Timer + + + + + 20 + 50 + 111 + 24 + + + + 5 + + + 999 + + + + + + 20 + 20 + 101 + 20 + + + + Do Refresh + + + + + + 20 + 80 + 101 + 31 + + + + + + + + + + + + + Qt::LeftToRight + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> + + + Qt::AlignCenter + + + + + + + + + + + + + + :/images/view-refresh.png + + + Refresh + + + + + :/images/utilities-terminal.png + + + Cancel Running Job + + + + + :/images/utilities-terminal.png + + + Disable Scheduled Job + + + + + + + + diff --git a/src/qt-console/storage/content.cpp b/src/qt-console/storage/content.cpp new file mode 100644 index 00000000..f8e937dc --- /dev/null +++ b/src/qt-console/storage/content.cpp @@ -0,0 +1,345 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bat.h" +#include +#include +#include "content.h" +#include "label/label.h" +#include "mediainfo/mediainfo.h" +#include "mount/mount.h" +#include "util/fmtwidgetitem.h" +#include "status/storstat.h" + +// +// TODO: List tray +// List drives in autochanger +// use user selection to add slot= argument +// + +Content::Content(QString storage, QTreeWidgetItem *parentWidget) : Pages() +{ + setupUi(this); + pgInitialize(storage, parentWidget); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/package-x-generic.png"))); + + m_populated = false; + m_firstpopulation = true; + m_checkcurwidget = true; + m_currentStorage = storage; + + connect(pbUpdate, SIGNAL(clicked()), this, + SLOT(consoleUpdateSlots())); + + connect(pbLabel, SIGNAL(clicked()), this, + SLOT(consoleLabelStorage())); + + connect(pbMount, SIGNAL(clicked()), this, + SLOT(consoleMountStorage())); + + connect(pbUnmount, SIGNAL(clicked()), this, + SLOT(consoleUnMountStorage())); + + connect(pbStatus, SIGNAL(clicked()), this, + SLOT(statusStorageWindow())); + + connect(pbRelease, SIGNAL(clicked()), this, + SLOT(consoleRelease())); + + connect(tableContent, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), this, + SLOT(showMediaInfo(QTableWidgetItem *))); + + dockPage(); + setCurrent(); +} + + +/* + * Subroutine to call class to show the log in the database from that job + */ +void Content::showMediaInfo(QTableWidgetItem * item) +{ + QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); + int row = item->row(); + QString vol = tableContent->item(row, 1)->text(); + if (vol != "") { + new MediaInfo(pageSelectorTreeWidgetItem, vol); + } +} + +void table_get_selection(QTableWidget *table, QString &sel) +{ + QTableWidgetItem *item; + int current; + + /* The QT selection returns each cell, so you + * have x times the same row number... + * We take only one instance + */ + int s = table->rowCount(); + if (s == 0) { + /* No selection?? */ + return; + } + bool *tab = (bool *)malloc(s * sizeof(bool)); + memset(tab, 0, s); + + foreach (item, table->selectedItems()) { + current = item->row(); + tab[current]=true; + } + + sel += "="; + + for(int i=0; iitem(i, 0)->text(); + sel += ","; + } + } + sel.chop(1); // remove trailing , or useless = + free(tab); +} + +/* Label Media populating current storage by default */ +void Content::consoleLabelStorage() +{ + QString sel; + table_get_selection(tableContent, sel); + if (sel == "") { + new labelPage(m_currentStorage); + } else { + QString cmd = "label barcodes slots"; + cmd += sel; + cmd += " storage=" + m_currentStorage; + consoleCommand(cmd); + } +} + +/* Mount currently selected storage */ +void Content::consoleMountStorage() +{ + setConsoleCurrent(); + /* if this storage is an autochanger, lets ask for the slot */ + new mountDialog(m_console, m_currentStorage); +} + +/* Unmount Currently selected storage */ +void Content::consoleUnMountStorage() +{ + QString cmd("umount storage="); + cmd += m_currentStorage; + consoleCommand(cmd); +} + +void Content::statusStorageWindow() +{ + /* if one exists, then just set it current */ + bool found = false; + foreach(Pages *page, mainWin->m_pagehash) { + if (mainWin->currentConsole() == page->console()) { + if (page->name() == tr("Storage Status %1").arg(m_currentStorage)) { + found = true; + page->setCurrent(); + } + } + } + if (!found) { + QTreeWidgetItem *parentItem = mainWin->getFromHash(this); + new StorStat(m_currentStorage, parentItem); + } +} + +/* + * The main meat of the class!! The function that querries the director and + * creates the widgets with appropriate values. + */ +void Content::populateContent() +{ + char buf[200]; + time_t tim; + struct tm tm; + + QStringList results_all; + QString cmd("status slots drive=0 storage=\"" + m_currentStorage + "\""); + m_console->dir_cmd(cmd, results_all); + + Freeze frz(*tableContent); /* disable updating*/ + Freeze frz2(*tableTray); + Freeze frz3(*tableDrive); + + tableContent->clearContents(); + tableTray->clearContents(); + tableTray->clearContents(); + + // take only valid records, TODO: Add D to get drive status + QStringList results = results_all.filter(QRegExp("^[IS]\\|[0-9]+\\|")); + tableContent->setRowCount(results.size()); + + QStringList io_results = results_all.filter(QRegExp("^I\\|[0-9]+\\|")); + tableTray->setRowCount(io_results.size()); + + QString resultline; + QStringList fieldlist; + int row = 0, row_io=0; + + foreach (resultline, results) { + fieldlist = resultline.split("|"); + if (fieldlist.size() < 10) { + Pmsg1(0, "Discarding %s\n", resultline.data()); + continue; /* some fields missing, ignore row */ + } + + int index=0; + QStringListIterator fld(fieldlist); + TableItemFormatter slotitem(*tableContent, row); + + /* Slot type */ + if (fld.next() == "I") { + TableItemFormatter ioitem(*tableTray, row_io++); + ioitem.setNumericFld(0, fieldlist[1]); + ioitem.setTextFld(1, fieldlist[3]); + } + + /* Slot */ + slotitem.setNumericFld(index++, fld.next()); + + /* Real Slot */ + if (fld.next() != "") { + + /* Volume */ + slotitem.setTextFld(index++, fld.next()); + + /* Bytes */ + slotitem.setBytesFld(index++, fld.next()); + + /* Status */ + slotitem.setVolStatusFld(index++, fld.next()); + + /* MediaType */ + slotitem.setTextFld(index++, fld.next()); + + /* Pool */ + slotitem.setTextFld(index++, fld.next()); + + tim = fld.next().toInt(); + if (tim > 0) { + /* LastW */ + localtime_r(&tim, &tm); + strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); + slotitem.setTextFld(index++, QString(buf)); + + /* Expire */ + tim = fld.next().toInt(); + localtime_r(&tim, &tm); + strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); + slotitem.setTextFld(index++, QString(buf)); + } + } + row++; + } + + + tableContent->verticalHeader()->hide(); + tableContent->sortByColumn(0, Qt::AscendingOrder); + tableContent->setSortingEnabled(true); + tableContent->resizeColumnsToContents(); + tableContent->resizeRowsToContents(); + + tableContent->setEditTriggers(QAbstractItemView::NoEditTriggers); + m_populated = true; + + tableTray->verticalHeader()->hide(); + tableTray->setEditTriggers(QAbstractItemView::NoEditTriggers); + + tableDrive->verticalHeader()->hide(); + /* Get count of rows needed (Drives) */ + QStringList drives = results_all.filter(QRegExp("^D\\|[0-9]+\\|")); + /* Ensure we have sufficient rows for Drive display */ + tableDrive->setRowCount(drives.size()); + row = 0; + foreach (resultline, drives) { + fieldlist = resultline.split("|"); + if (fieldlist.size() < 4) { + continue; /* some fields missing, ignore row */ + } + int index=0; + QStringListIterator fld(fieldlist); + TableItemFormatter slotitem(*tableDrive, row); + + /* Drive type */ + fld.next(); + + /* Number */ + slotitem.setNumericFld(index++, fld.next()); + + /* Slot */ + fld.next(); + + /* Volume */ + slotitem.setTextFld(index++, fld.next()); + + row++; + } + + tableDrive->resizeRowsToContents(); + tableDrive->setEditTriggers(QAbstractItemView::NoEditTriggers); +} + +/* + * Virtual function which is called when this page is visible on the stack + */ +void Content::currentStackItem() +{ + if(!m_populated) { + populateContent(); + } +} + +void Content::treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *) +{ + +} + +/* Update Slots */ +void Content::consoleUpdateSlots() +{ + QString sel = ""; + table_get_selection(tableContent, sel); + + QString cmd("update slots"); + if (sel != "") { + cmd += sel; + } + cmd += " drive=0 storage=" + m_currentStorage; + + Pmsg1(0, "cmd=%s\n", cmd.toUtf8().data()); + + consoleCommand(cmd); + populateContent(); +} + +/* Release a tape in the drive */ +void Content::consoleRelease() +{ + QString cmd("release storage="); + cmd += m_currentStorage; + consoleCommand(cmd); +} diff --git a/src/qt-console/storage/content.h b/src/qt-console/storage/content.h new file mode 100644 index 00000000..36fa7b4e --- /dev/null +++ b/src/qt-console/storage/content.h @@ -0,0 +1,62 @@ +#ifndef _CONTENT_H_ +#define _CONTENT_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_content.h" +#include "console.h" +#include "pages.h" + +class Content : public Pages, public Ui::ContentForm +{ + Q_OBJECT + +public: + Content(QString storage, QTreeWidgetItem *parentWidget); +// virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + +public slots: + void treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); + + void consoleRelease(); + void consoleUpdateSlots(); + void consoleLabelStorage(); + void consoleMountStorage(); + void statusStorageWindow(); + void consoleUnMountStorage(); + void showMediaInfo(QTableWidgetItem * item); + +private slots: + void populateContent(); + +private: + bool m_currentAutoChanger; + bool m_populated; + bool m_firstpopulation; + bool m_checkcurwidget; + QString m_currentStorage; +}; + +#endif /* _STORAGE_H_ */ diff --git a/src/qt-console/storage/content.ui b/src/qt-console/storage/content.ui new file mode 100644 index 00000000..eb115f6e --- /dev/null +++ b/src/qt-console/storage/content.ui @@ -0,0 +1,295 @@ + + ContentForm + + + + 0 + 0 + 949 + 695 + + + + Form + + + + + + + + + 0 + 0 + + + + + 240 + 110 + + + + Actions + + + + + + Update slots + + + :/images/view-refresh.png + + + + + + + Label + + + :/images/label.png + + + + + + + false + + + Move to tray + + + :/images/extern.png + + + + + + + false + + + Empty tray + + + :/images/intern.png + + + + + + + Mount + + + + + + + Unmount + + + + + + + Status + + + + + + + Release + + + + + + + + + + + 0 + 0 + + + + + 241 + 221 + + + + + 241 + 16777215 + + + + Drives + + + + + + + 0 + 0 + + + + + 220 + 192 + + + + QAbstractItemView::SelectRows + + + + Drive + + + + + Volume + + + + + + + + + + + + 0 + 0 + + + + + 240 + 16777215 + + + + Import/Export + + + + + + + 0 + 0 + + + + + 100 + 0 + + + + QAbstractItemView::SelectRows + + + false + + + + Slot + + + + + Volume + + + + + + + + + + + + + Content + + + + + + true + + + QAbstractItemView::ExtendedSelection + + + QAbstractItemView::SelectRows + + + 0 + + + + Slot + + + + + Volume + + + + + Bytes + + + + + Status + + + + + Media Type + + + + + Pool + + + + + Last Written + + + + + When expire? + + + + + + + + + + + + + + diff --git a/src/qt-console/storage/storage.cpp b/src/qt-console/storage/storage.cpp new file mode 100644 index 00000000..a21f995f --- /dev/null +++ b/src/qt-console/storage/storage.cpp @@ -0,0 +1,474 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * + * Storage Class + * + * Dirk Bartley, March 2007 + * + */ + +#include "bat.h" +#include +#include +#include "storage.h" +#include "content.h" +#include "label/label.h" +#include "mount/mount.h" +#include "status/storstat.h" +#include "util/fmtwidgetitem.h" + +Storage::Storage() : Pages() +{ + setupUi(this); + pgInitialize(tr("Storage")); + QTreeWidgetItem* thisitem = mainWin->getFromHash(this); + thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/package-x-generic.png"))); + + /* mp_treeWidget, Storage Tree Tree Widget inherited from ui_storage.h */ + m_populated = false; + m_firstpopulation = true; + m_checkcurwidget = true; + m_closeable = false; + m_currentStorage = ""; + /* add context sensitive menu items specific to this classto the page + * selector tree. m_contextActions is QList of QActions */ + m_contextActions.append(actionRefreshStorage); +} + +Storage::~Storage() +{ + if (m_populated) + writeExpandedSettings(); +} + +/* + * The main meat of the class!! The function that querries the director and + * creates the widgets with appropriate values. + */ +void Storage::populateTree() +{ + if (m_populated) + writeExpandedSettings(); + m_populated = true; + + Freeze frz(*mp_treeWidget); /* disable updating */ + + m_checkcurwidget = false; + mp_treeWidget->clear(); + m_checkcurwidget = true; + + QStringList headerlist = (QStringList() << tr("Name") << tr("Id") + << tr("Changer") << tr("Slot") << tr("Status") << tr("Enabled") << tr("Pool") + << tr("Media Type") ); + + m_topItem = new QTreeWidgetItem(mp_treeWidget); + m_topItem->setText(0, tr("Storage")); + m_topItem->setData(0, Qt::UserRole, 0); + m_topItem->setExpanded(true); + + mp_treeWidget->setColumnCount(headerlist.count()); + mp_treeWidget->setHeaderLabels(headerlist); + + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("StorageTreeExpanded"); + + bool first = true; + QString storage_comsep(""); + QString storageName; + foreach(storageName, m_console->storage_list){ + if (first) { + storage_comsep += "'" + storageName + "'"; + first = false; + } + else + storage_comsep += ",'" + storageName + "'"; + } + if (storage_comsep != "") { + + /* Set up query QString and header QStringList */ + QString query("SELECT" + " Name AS StorageName," + " StorageId AS ID, AutoChanger AS Changer" + " FROM Storage " + " WHERE StorageId IN (SELECT MAX(StorageId) FROM Storage WHERE"); + query += " Name IN (" + storage_comsep + ")"; + query += " GROUP BY Name) ORDER BY Name"; + + QStringList results; + /* This could be a log item */ + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Storage query cmd : %s\n",query.toUtf8().data()); + } + if (m_console->sql_cmd(query, results)) { + + QStringList fieldlist; + foreach (QString resultline, results) { + fieldlist = resultline.split("\t"); + if (fieldlist.size() != 3) { + Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); + continue; + } + storageName = fieldlist.takeFirst(); + if (m_firstpopulation) { + settingsOpenStatus(storageName); + } + TreeItemFormatter storageItem(*m_topItem, 1); + storageItem.setTextFld(0, storageName); + if(settings.contains(storageName)) + storageItem.widget()->setExpanded(settings.value(storageName).toBool()); + else + storageItem.widget()->setExpanded(true); + + int index = 1; + QStringListIterator fld(fieldlist); + + /* storage id */ + storageItem.setNumericFld(index++, fld.next() ); + + /* changer */ + QString changer = fld.next(); + storageItem.setBoolFld(index++, changer); + + if (changer == "1") + mediaList(storageItem.widget(), fieldlist.first()); + } + } + } + /* Resize the columns */ + for(int cnter=0; cnterresizeColumnToContents(cnter); + } + m_firstpopulation = false; +} + +/* + * For autochangers A query to show the tapes in the changer. + */ +void Storage::mediaList(QTreeWidgetItem *parent, const QString &storageID) +{ + QString query("SELECT Media.VolumeName AS Media, Media.Slot AS Slot," + " Media.VolStatus AS VolStatus, Media.Enabled AS Enabled," + " Pool.Name AS MediaPool, Media.MediaType AS MediaType" + " From Media" + " JOIN Pool ON (Media.PoolId=Pool.PoolId)" + " WHERE Media.StorageId='" + storageID + "'" + " AND Media.InChanger<>0" + " ORDER BY Media.Slot"); + + QStringList results; + /* This could be a log item */ + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Storage query cmd : %s\n",query.toUtf8().data()); + } + if (m_console->sql_cmd(query, results)) { + QString resultline; + QString field; + QStringList fieldlist; + + foreach (resultline, results) { + fieldlist = resultline.split("\t"); + if (fieldlist.size() < 6) + continue; + + /* Iterate through fields in the record */ + QStringListIterator fld(fieldlist); + int index = 0; + TreeItemFormatter fmt(*parent, 2); + + /* volname */ + fmt.setTextFld(index++, fld.next()); + + /* skip the next two columns, unused by media */ + index += 2; + + /* slot */ + fmt.setNumericFld(index++, fld.next()); + + /* status */ + fmt.setVolStatusFld(index++, fld.next()); + + /* enabled */ + fmt.setBoolFld(index++, fld.next()); + + /* pool */ + fmt.setTextFld(index++, fld.next()); + + /* media type */ + fmt.setTextFld(index++, fld.next()); + + } + } +} + +/* + * When the treeWidgetItem in the page selector tree is singleclicked, Make sure + * The tree has been populated. + */ +void Storage::PgSeltreeWidgetClicked() +{ + if(!m_populated) { + populateTree(); + createContextMenu(); + } + if (!isOnceDocked()) { + dockPage(); + } +} + +/* + * Added to set the context menu policy based on currently active treeWidgetItem + * signaled by currentItemChanged + */ +void Storage::treeItemChanged(QTreeWidgetItem *currentwidgetitem, QTreeWidgetItem *previouswidgetitem ) +{ + /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ + if (m_checkcurwidget) { + /* The Previous item */ + if (previouswidgetitem) { /* avoid a segfault if first time */ + int treedepth = previouswidgetitem->data(0, Qt::UserRole).toInt(); + if (treedepth == 1){ + mp_treeWidget->removeAction(actionStatusStorageInConsole); + mp_treeWidget->removeAction(actionStatusStorageWindow); + mp_treeWidget->removeAction(actionLabelStorage); + mp_treeWidget->removeAction(actionMountStorage); + mp_treeWidget->removeAction(actionUnMountStorage); + mp_treeWidget->removeAction(actionUpdateSlots); + mp_treeWidget->removeAction(actionUpdateSlotsScan); + mp_treeWidget->removeAction(actionRelease); + } + } + + int treedepth = currentwidgetitem->data(0, Qt::UserRole).toInt(); + if (treedepth == 1){ + /* set a hold variable to the storage name in case the context sensitive + * menu is used */ + m_currentStorage = currentwidgetitem->text(0); + m_currentAutoChanger = currentwidgetitem->text(2) == tr("Yes"); + mp_treeWidget->addAction(actionStatusStorageInConsole); + mp_treeWidget->addAction(actionStatusStorageWindow); + mp_treeWidget->addAction(actionLabelStorage); + mp_treeWidget->addAction(actionMountStorage); + mp_treeWidget->addAction(actionUnMountStorage); + mp_treeWidget->addAction(actionRelease); + QString text; + text = tr("Status Storage \"%1\"").arg(m_currentStorage);; + actionStatusStorageInConsole->setText(text); + text = tr("Status Storage \"%1\" in Window").arg(m_currentStorage);; + actionStatusStorageWindow->setText(text); + text = tr("Label media in Storage \"%1\"").arg(m_currentStorage); + actionLabelStorage->setText(text); + text = tr("Mount media in Storage \"%1\"").arg(m_currentStorage); + actionMountStorage->setText(text); + text = tr("\"UN\" Mount media in Storage \"%1\"").arg(m_currentStorage); + text = tr("Release media in Storage \"%1\"").arg(m_currentStorage); + actionRelease->setText(text); + if (m_currentAutoChanger) { + mp_treeWidget->addAction(actionUpdateSlots); + mp_treeWidget->addAction(actionUpdateSlotsScan); + text = tr("Barcode Scan media in Storage \"%1\"").arg(m_currentStorage); + actionUpdateSlots->setText(text); + text = tr("Read scan media in Storage \"%1\"").arg( m_currentStorage); + actionUpdateSlotsScan->setText(text); + } + } + } +} + +/* + * Setup a context menu + * Made separate from populate so that it would not create context menu over and + * over as the tree is repopulated. + */ +void Storage::createContextMenu() +{ + mp_treeWidget->setContextMenuPolicy(Qt::ActionsContextMenu); + mp_treeWidget->addAction(actionRefreshStorage); + connect(mp_treeWidget, SIGNAL( + currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), + this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); + /* connect to the action specific to this pages class */ + connect(actionRefreshStorage, SIGNAL(triggered()), this, + SLOT(populateTree())); + connect(actionStatusStorageInConsole, SIGNAL(triggered()), this, + SLOT(consoleStatusStorage())); + connect(actionLabelStorage, SIGNAL(triggered()), this, + SLOT(consoleLabelStorage())); + connect(actionMountStorage, SIGNAL(triggered()), this, + SLOT(consoleMountStorage())); + connect(actionUnMountStorage, SIGNAL(triggered()), this, + SLOT(consoleUnMountStorage())); + connect(actionUpdateSlots, SIGNAL(triggered()), this, + SLOT(consoleUpdateSlots())); + connect(actionUpdateSlotsScan, SIGNAL(triggered()), this, + SLOT(consoleUpdateSlotsScan())); + connect(actionRelease, SIGNAL(triggered()), this, + SLOT(consoleRelease())); + connect(actionStatusStorageWindow, SIGNAL(triggered()), this, + SLOT(statusStorageWindow())); + connect(mp_treeWidget, SIGNAL(itemDoubleClicked(QTreeWidgetItem *, int)), + this, SLOT(contentWindow())); + +} + +void Storage::contentWindow() +{ + if (m_currentStorage != "" && m_currentAutoChanger) { + QTreeWidgetItem *parentItem = mainWin->getFromHash(this); + new Content(m_currentStorage, parentItem); + } +} + +/* + * Virtual function which is called when this page is visible on the stack + */ +void Storage::currentStackItem() +{ + if(!m_populated) { + populateTree(); + /* Create the context menu for the storage tree */ + createContextMenu(); + } +} + +/* + * Functions to respond to local context sensitive menu sending console commands + * If I could figure out how to make these one function passing a string, Yaaaaaa + */ +void Storage::consoleStatusStorage() +{ + QString cmd("status storage="); + cmd += m_currentStorage; + consoleCommand(cmd); +} + +/* Label Media populating current storage by default */ +void Storage::consoleLabelStorage() +{ + new labelPage(m_currentStorage); +} + +/* Mount currently selected storage */ +void Storage::consoleMountStorage() +{ + if (m_currentAutoChanger == 0){ + /* no autochanger, just execute the command in the console */ + QString cmd("mount storage="); + cmd += m_currentStorage; + consoleCommand(cmd); + } else { + setConsoleCurrent(); + /* if this storage is an autochanger, lets ask for the slot */ + new mountDialog(m_console, m_currentStorage); + } +} + +/* Unmount Currently selected storage */ +void Storage::consoleUnMountStorage() +{ + QString cmd("umount storage="); + cmd += m_currentStorage; + consoleCommand(cmd); +} + +/* Update Slots */ +void Storage::consoleUpdateSlots() +{ + QString cmd("update slots storage="); + cmd += m_currentStorage; + consoleCommand(cmd); +} + +/* Update Slots Scan*/ +void Storage::consoleUpdateSlotsScan() +{ + QString cmd("update slots scan storage="); + cmd += m_currentStorage; + consoleCommand(cmd); +} + +/* Release a tape in the drive */ +void Storage::consoleRelease() +{ + QString cmd("release storage="); + cmd += m_currentStorage; + consoleCommand(cmd); +} + +/* + * Open a status storage window + */ +void Storage::statusStorageWindow() +{ + /* if one exists, then just set it current */ + bool found = false; + foreach(Pages *page, mainWin->m_pagehash) { + if (mainWin->currentConsole() == page->console()) { + if (page->name() == tr("Storage Status %1").arg(m_currentStorage)) { + found = true; + page->setCurrent(); + } + } + } + if (!found) { + QTreeWidgetItem *parentItem = mainWin->getFromHash(this); + new StorStat(m_currentStorage, parentItem); + } +} + +/* + * Write settings to save expanded states of the pools + */ +void Storage::writeExpandedSettings() +{ + QSettings settings(m_console->m_dir->name(), "bat"); + settings.beginGroup("StorageTreeExpanded"); + int childcount = m_topItem->childCount(); + for (int cnt=0; cntchild(cnt); + settings.setValue(item->text(0), item->isExpanded()); + } + settings.endGroup(); +} + +/* + * If first time, then check to see if there were status pages open the last time closed + * if so open + */ +void Storage::settingsOpenStatus(QString &storage) +{ + QSettings settings(m_console->m_dir->name(), "bat"); + + settings.beginGroup("OpenOnExit"); + QString toRead = "StorageStatus_" + storage; + if (settings.value(toRead) == 1) { + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Do open Storage Status window for : %s\n", storage.toUtf8().data()); + } + new StorStat(storage, mainWin->getFromHash(this)); + setCurrent(); + mainWin->getFromHash(this)->setExpanded(true); + } else { + if (mainWin->m_sqlDebug) { + Pmsg1(000, "Do NOT open Storage Status window for : %s\n", storage.toUtf8().data()); + } + } + settings.endGroup(); +} diff --git a/src/qt-console/storage/storage.h b/src/qt-console/storage/storage.h new file mode 100644 index 00000000..5352c48b --- /dev/null +++ b/src/qt-console/storage/storage.h @@ -0,0 +1,74 @@ +#ifndef _STORAGE_H_ +#define _STORAGE_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Dirk Bartley, March 2007 + */ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_storage.h" +#include "console.h" +#include "pages.h" + +class Storage : public Pages, public Ui::StorageForm +{ + Q_OBJECT + +public: + Storage(); + ~Storage(); + virtual void PgSeltreeWidgetClicked(); + virtual void currentStackItem(); + +public slots: + void treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); + +private slots: + void populateTree(); + void consoleStatusStorage(); + void consoleLabelStorage(); + void consoleMountStorage(); + void consoleUnMountStorage(); + void consoleUpdateSlots(); + void consoleUpdateSlotsScan(); + void consoleRelease(); + void statusStorageWindow(); + void contentWindow(); + +private: + void createContextMenu(); + void mediaList(QTreeWidgetItem *parent, const QString &storageID); + void settingsOpenStatus(QString& storage); + QString m_currentStorage; + bool m_currentAutoChanger; + bool m_populated; + bool m_firstpopulation; + bool m_checkcurwidget; + void writeExpandedSettings(); + QTreeWidgetItem *m_topItem; +}; + +void table_get_selection(QTableWidget *table, QString &sel); + +#endif /* _STORAGE_H_ */ diff --git a/src/qt-console/storage/storage.ui b/src/qt-console/storage/storage.ui new file mode 100644 index 00000000..3ed7a475 --- /dev/null +++ b/src/qt-console/storage/storage.ui @@ -0,0 +1,142 @@ + + StorageForm + + + + 0 + 0 + 467 + 383 + + + + Storage Tree + + + + 9 + + + 9 + + + 9 + + + 9 + + + 6 + + + 6 + + + + + + 1 + + + + + + + + :/images/view-refresh.png + + + Refresh Storage List + + + Requery the director for the list of storage objects. + + + + + :/images/status-console.png + + + Status Storage In Console + + + Status Storage In Console + + + + + :/images/label.png + + + Label Storage + + + Label Storage + + + + + :/images/cartridge.png + + + MountStorage + + + MountStorage + + + + + :/images/cartridge.png + + + UnMount Storage + + + UnMount Storage + + + + + :/images/package-x-generic.png + + + Update Slots + + + Update Slots + + + + + :/images/package-x-generic.png + + + Update Slots Scan + + + Update Slots Scan + + + + + :/images/cartridge.png + + + Release + + + + + :/images/status.png + + + Status Storage Window + + + + + + + + diff --git a/src/qt-console/testprogs/examp/dock.pro b/src/qt-console/testprogs/examp/dock.pro new file mode 100644 index 00000000..a372d54b --- /dev/null +++ b/src/qt-console/testprogs/examp/dock.pro @@ -0,0 +1,15 @@ +CONFIG += qt debug + +TEMPLATE = app +TARGET = main +DEPENDPATH += . +INCLUDEPATH += .. +MOC_DIR = moc +OBJECTS_DIR = obj +UI_DIR = ui + +# Main window +#RESOURCES = dockwidgets.qrc +HEADERS += mainwindow.h +SOURCES += mainwindow.cpp main.cpp + diff --git a/src/qt-console/testprogs/examp/dockwidgets.qrc b/src/qt-console/testprogs/examp/dockwidgets.qrc new file mode 100644 index 00000000..4414c630 --- /dev/null +++ b/src/qt-console/testprogs/examp/dockwidgets.qrc @@ -0,0 +1,8 @@ + + + images/new.png + images/print.png + images/save.png + images/undo.png + + diff --git a/src/qt-console/testprogs/examp/main.cpp b/src/qt-console/testprogs/examp/main.cpp new file mode 100644 index 00000000..7786e16b --- /dev/null +++ b/src/qt-console/testprogs/examp/main.cpp @@ -0,0 +1,12 @@ +#include + + #include "mainwindow.h" + + int main(int argc, char *argv[]) + { + QApplication app(argc, argv); +// Q_INIT_RESOURCE(dockwidgets); + MainWindow mainWin; + mainWin.show(); + return app.exec(); + } diff --git a/src/qt-console/testprogs/examp/mainwindow.cpp b/src/qt-console/testprogs/examp/mainwindow.cpp new file mode 100644 index 00000000..ebe4e80e --- /dev/null +++ b/src/qt-console/testprogs/examp/mainwindow.cpp @@ -0,0 +1,302 @@ +/**************************************************************************** + * ** + * ** Copyright (C) 2005-2007 Trolltech ASA. All rights reserved. + * ** + * ** This file is part of the example classes of the Qt Toolkit. + * ** + * ** This file may be used under the terms of the GNU General Public + * ** License version 2.0 as published by the Free Software Foundation + * ** and appearing in the file LICENSE.GPL included in the packaging of + * ** this file. Please review the following information to ensure GNU + * ** General Public Licensing requirements will be met: + * ** http://www.trolltech.com/products/qt/opensource.html + * ** + * ** If you are unsure which license is appropriate for your use, please + * ** review the following information: + * ** http://www.trolltech.com/products/qt/licensing.html or contact the + * ** sales department at sales@trolltech.com. + * ** + * ** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * ** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * ** + * ****************************************************************************/ + + #include + + #include "mainwindow.h" + + MainWindow::MainWindow() + { + textEdit = new QTextEdit; + setCentralWidget(textEdit); + + createActions(); + createMenus(); + createToolBars(); + createStatusBar(); + createDockWindows(); + + setWindowTitle(tr("Dock Widgets")); + + newLetter(); + } + + void MainWindow::newLetter() + { + textEdit->clear(); + + QTextCursor cursor(textEdit->textCursor()); + cursor.movePosition(QTextCursor::Start); + QTextFrame *topFrame = cursor.currentFrame(); + QTextFrameFormat topFrameFormat = topFrame->frameFormat(); + topFrameFormat.setPadding(16); + topFrame->setFrameFormat(topFrameFormat); + + QTextCharFormat textFormat; + QTextCharFormat boldFormat; + boldFormat.setFontWeight(QFont::Bold); + QTextCharFormat italicFormat; + italicFormat.setFontItalic(true); + + QTextTableFormat tableFormat; + tableFormat.setBorder(1); + tableFormat.setCellPadding(16); + tableFormat.setAlignment(Qt::AlignRight); + cursor.insertTable(1, 1, tableFormat); + cursor.insertText("The Firm", boldFormat); + cursor.insertBlock(); + cursor.insertText("321 City Street", textFormat); + cursor.insertBlock(); + cursor.insertText("Industry Park"); + cursor.insertBlock(); + cursor.insertText("Some Country"); + cursor.setPosition(topFrame->lastPosition()); + cursor.insertText(QDate::currentDate().toString("d MMMM yyyy"), textFormat); + cursor.insertBlock(); + cursor.insertBlock(); + cursor.insertText("Dear ", textFormat); + cursor.insertText("NAME", italicFormat); + cursor.insertText(",", textFormat); + for (int i = 0; i < 3; ++i) + cursor.insertBlock(); + cursor.insertText(tr("Yours sincerely,"), textFormat); + for (int i = 0; i < 3; ++i) + cursor.insertBlock(); + cursor.insertText("The Boss", textFormat); + cursor.insertBlock(); + cursor.insertText("ADDRESS", italicFormat); + } + + void MainWindow::print() + { + QTextDocument *document = textEdit->document(); + QPrinter printer; + + QPrintDialog *dlg = new QPrintDialog(&printer, this); + if (dlg->exec() != QDialog::Accepted) + return; + + document->print(&printer); + + statusBar()->showMessage(tr("Ready"), 2000); + } + + void MainWindow::save() + { + QString fileName = QFileDialog::getSaveFileName(this, + tr("Choose a file name"), ".", + tr("HTML (*.html *.htm)")); + if (fileName.isEmpty()) + return; + QFile file(fileName); + if (!file.open(QFile::WriteOnly | QFile::Text)) { + QMessageBox::warning(this, tr("Dock Widgets"), + tr("Cannot write file %1:\n%2.") + .arg(fileName) + .arg(file.errorString())); + return; + } + + QTextStream out(&file); + QApplication::setOverrideCursor(Qt::WaitCursor); + out << textEdit->toHtml(); + QApplication::restoreOverrideCursor(); + + statusBar()->showMessage(tr("Saved '%1'").arg(fileName), 2000); + } + + void MainWindow::undo() + { + QTextDocument *document = textEdit->document(); + document->undo(); + } + + void MainWindow::insertCustomer(const QString &customer) + { + if (customer.isEmpty()) + return; + QStringList customerList = customer.split(", "); + QTextDocument *document = textEdit->document(); + QTextCursor cursor = document->find("NAME"); + if (!cursor.isNull()) { + cursor.beginEditBlock(); + cursor.insertText(customerList.at(0)); + QTextCursor oldcursor = cursor; + cursor = document->find("ADDRESS"); + if (!cursor.isNull()) { + for (int i = 1; i < customerList.size(); ++i) { + cursor.insertBlock(); + cursor.insertText(customerList.at(i)); + } + cursor.endEditBlock(); + } + else + oldcursor.endEditBlock(); + } + } + + void MainWindow::addParagraph(const QString ¶graph) + { + if (paragraph.isEmpty()) + return; + QTextDocument *document = textEdit->document(); + QTextCursor cursor = document->find(tr("Yours sincerely,")); + if (cursor.isNull()) + return; + cursor.beginEditBlock(); + cursor.movePosition(QTextCursor::PreviousBlock, QTextCursor::MoveAnchor, 2); + cursor.insertBlock(); + cursor.insertText(paragraph); + cursor.insertBlock(); + cursor.endEditBlock(); + + } + + void MainWindow::about() + { + QMessageBox::about(this, tr("About Dock Widgets"), + tr("The Dock Widgets example demonstrates how to " + "use Qt's dock widgets. You can enter your own text, " + "click a customer to add a customer name and " + "address, and click standard paragraphs to add them.")); + } + + void MainWindow::createActions() + { + newLetterAct = new QAction(QIcon(":/images/new.png"), tr("&New Letter"), + this); + newLetterAct->setShortcut(tr("Ctrl+N")); + newLetterAct->setStatusTip(tr("Create a new form letter")); + connect(newLetterAct, SIGNAL(triggered()), this, SLOT(newLetter())); + + saveAct = new QAction(QIcon(":/images/save.png"), tr("&Save..."), this); + saveAct->setShortcut(tr("Ctrl+S")); + saveAct->setStatusTip(tr("Save the current form letter")); + connect(saveAct, SIGNAL(triggered()), this, SLOT(save())); + + printAct = new QAction(QIcon(":/images/print.png"), tr("&Print..."), this); + printAct->setShortcut(tr("Ctrl+P")); + printAct->setStatusTip(tr("Print the current form letter")); + connect(printAct, SIGNAL(triggered()), this, SLOT(print())); + + undoAct = new QAction(QIcon(":/images/undo.png"), tr("&Undo"), this); + undoAct->setShortcut(tr("Ctrl+Z")); + undoAct->setStatusTip(tr("Undo the last editing action")); + connect(undoAct, SIGNAL(triggered()), this, SLOT(undo())); + + quitAct = new QAction(tr("&Quit"), this); + quitAct->setShortcut(tr("Ctrl+Q")); + quitAct->setStatusTip(tr("Quit the application")); + connect(quitAct, SIGNAL(triggered()), this, SLOT(close())); + + aboutAct = new QAction(tr("&About"), this); + aboutAct->setStatusTip(tr("Show the application's About box")); + connect(aboutAct, SIGNAL(triggered()), this, SLOT(about())); + + aboutQtAct = new QAction(tr("About &Qt"), this); + aboutQtAct->setStatusTip(tr("Show the Qt library's About box")); + connect(aboutQtAct, SIGNAL(triggered()), qApp, SLOT(aboutQt())); + } + + void MainWindow::createMenus() + { + fileMenu = menuBar()->addMenu(tr("&File")); + fileMenu->addAction(newLetterAct); + fileMenu->addAction(saveAct); + fileMenu->addAction(printAct); + fileMenu->addSeparator(); + fileMenu->addAction(quitAct); + + editMenu = menuBar()->addMenu(tr("&Edit")); + editMenu->addAction(undoAct); + + viewMenu = menuBar()->addMenu(tr("&View")); + + menuBar()->addSeparator(); + + helpMenu = menuBar()->addMenu(tr("&Help")); + helpMenu->addAction(aboutAct); + helpMenu->addAction(aboutQtAct); + } + + void MainWindow::createToolBars() + { + fileToolBar = addToolBar(tr("File")); + fileToolBar->addAction(newLetterAct); + fileToolBar->addAction(saveAct); + fileToolBar->addAction(printAct); + + editToolBar = addToolBar(tr("Edit")); + editToolBar->addAction(undoAct); + } + + void MainWindow::createStatusBar() + { + statusBar()->showMessage(tr("Ready")); + } + + void MainWindow::createDockWindows() + { + QDockWidget *dock = new QDockWidget(tr("Customers"), this); + dock->setAllowedAreas(Qt::LeftDockWidgetArea | Qt::RightDockWidgetArea); + customerList = new QListWidget(dock); + customerList->addItems(QStringList() + << "John Doe, Harmony Enterprises, 12 Lakeside, Ambleton" + << "Jane Doe, Memorabilia, 23 Watersedge, Beaton" + << "Tammy Shea, Tiblanka, 38 Sea Views, Carlton" + << "Tim Sheen, Caraba Gifts, 48 Ocean Way, Deal" + << "Sol Harvey, Chicos Coffee, 53 New Springs, Eccleston" + << "Sally Hobart, Tiroli Tea, 67 Long River, Fedula"); + dock->setWidget(customerList); + addDockWidget(Qt::RightDockWidgetArea, dock); + viewMenu->addAction(dock->toggleViewAction()); + + dock = new QDockWidget(tr("Paragraphs"), this); + paragraphsList = new QListWidget(dock); + paragraphsList->addItems(QStringList() + << "Thank you for your payment which we have received today." + << "Your order has been dispatched and should be with you " + "within 28 days." + << "We have dispatched those items that were in stock. The " + "rest of your order will be dispatched once all the " + "remaining items have arrived at our warehouse. No " + "additional shipping charges will be made." + << "You made a small overpayment (less than $5) which we " + "will keep on account for you, or return at your request." + << "You made a small underpayment (less than $1), but we have " + "sent your order anyway. We'll add this underpayment to " + "your next bill." + << "Unfortunately you did not send enough money. Please remit " + "an additional $. Your order will be dispatched as soon as " + "the complete amount has been received." + << "You made an overpayment (more than $5). Do you wish to " + "buy more items, or should we return the excess to you?"); + dock->setWidget(paragraphsList); + addDockWidget(Qt::RightDockWidgetArea, dock); + viewMenu->addAction(dock->toggleViewAction()); + + connect(customerList, SIGNAL(currentTextChanged(const QString &)), + this, SLOT(insertCustomer(const QString &))); + connect(paragraphsList, SIGNAL(currentTextChanged(const QString &)), + this, SLOT(addParagraph(const QString &))); + } diff --git a/src/qt-console/testprogs/examp/mainwindow.h b/src/qt-console/testprogs/examp/mainwindow.h new file mode 100644 index 00000000..ffa7ba4c --- /dev/null +++ b/src/qt-console/testprogs/examp/mainwindow.h @@ -0,0 +1,76 @@ + /**************************************************************************** + * ** + * ** Copyright (C) 2005-2007 Trolltech ASA. All rights reserved. + * ** + * ** This file is part of the example classes of the Qt Toolkit. + * ** + * ** This file may be used under the terms of the GNU General Public + * ** License version 2.0 as published by the Free Software Foundation + * ** and appearing in the file LICENSE.GPL included in the packaging of + * ** this file. Please review the following information to ensure GNU + * ** General Public Licensing requirements will be met: + * ** http://www.trolltech.com/products/qt/opensource.html + * ** + * ** If you are unsure which license is appropriate for your use, please + * ** review the following information: + * ** http://www.trolltech.com/products/qt/licensing.html or contact the + * ** sales department at sales@trolltech.com. + * ** + * ** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * ** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * ** + * ****************************************************************************/ + + #ifndef MAINWINDOW_H + #define MAINWINDOW_H + + #include + + class QAction; + class QListWidget; + class QMenu; + class QTextEdit; + + class MainWindow : public QMainWindow + { + Q_OBJECT + + public: + MainWindow(); + + private slots: + void newLetter(); + void save(); + void print(); + void undo(); + void about(); + void insertCustomer(const QString &customer); + void addParagraph(const QString ¶graph); + + private: + void createActions(); + void createMenus(); + void createToolBars(); + void createStatusBar(); + void createDockWindows(); + + QTextEdit *textEdit; + QListWidget *customerList; + QListWidget *paragraphsList; + + QMenu *fileMenu; + QMenu *editMenu; + QMenu *viewMenu; + QMenu *helpMenu; + QToolBar *fileToolBar; + QToolBar *editToolBar; + QAction *newLetterAct; + QAction *saveAct; + QAction *printAct; + QAction *undoAct; + QAction *aboutAct; + QAction *aboutQtAct; + QAction *quitAct; + }; + + #endif diff --git a/src/qt-console/testprogs/putz/main.cpp b/src/qt-console/testprogs/putz/main.cpp new file mode 100644 index 00000000..5e7287e8 --- /dev/null +++ b/src/qt-console/testprogs/putz/main.cpp @@ -0,0 +1,16 @@ +#include +#include "putz.h" + +Putz *putz; +QApplication *app; + +int main(int argc, char *argv[]) +{ + app = new QApplication(argc, argv); + app->setQuitOnLastWindowClosed(true); + + putz = new Putz; + putz->show(); + + return app->exec(); +} diff --git a/src/qt-console/testprogs/putz/putz.cpp b/src/qt-console/testprogs/putz/putz.cpp new file mode 100644 index 00000000..0b8fd135 --- /dev/null +++ b/src/qt-console/testprogs/putz/putz.cpp @@ -0,0 +1,11 @@ + +#include +#include + +#include "putz.h" + +Putz::Putz() +{ + printf("got to Putz Constructor\n"); + setupUi(this); +} diff --git a/src/qt-console/testprogs/putz/putz.h b/src/qt-console/testprogs/putz/putz.h new file mode 100644 index 00000000..48675bf6 --- /dev/null +++ b/src/qt-console/testprogs/putz/putz.h @@ -0,0 +1,23 @@ +#ifndef _PUTZ_H_ +#define _PUTZ_H_ + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include "ui_putz.h" + +class Putz : public QMainWindow, public Ui::MainWindow +{ + Q_OBJECT + +public: + Putz(); + +public slots: + +private: +}; + +#endif /* _PUTZ_H_ */ diff --git a/src/qt-console/testprogs/putz/putz.pro b/src/qt-console/testprogs/putz/putz.pro new file mode 100644 index 00000000..2e317919 --- /dev/null +++ b/src/qt-console/testprogs/putz/putz.pro @@ -0,0 +1,15 @@ +CONFIG += qt debug + +TEMPLATE = app +TARGET = putz +DEPENDPATH += . +INCLUDEPATH += .. +MOC_DIR = moc +OBJECTS_DIR = obj +UI_DIR = ui + +# Main window +FORMS += putz.ui + +HEADERS += putz.h +SOURCES += putz.cpp main.cpp diff --git a/src/qt-console/testprogs/putz/putz.ui b/src/qt-console/testprogs/putz/putz.ui new file mode 100644 index 00000000..343f0023 --- /dev/null +++ b/src/qt-console/testprogs/putz/putz.ui @@ -0,0 +1,83 @@ + + MainWindow + + + + 0 + 0 + 557 + 534 + + + + MainWindow + + + true + + + + + + 0 + 0 + 557 + 28 + + + + + + + + 7 + 7 + 12 + 7 + + + + Whoochie + + + 1 + + + + + 9 + + + 6 + + + + + + + + + + coochie + + + 2 + + + + + 9 + + + 6 + + + + + + + + + + + diff --git a/src/qt-console/tray-monitor/authenticate.cpp b/src/qt-console/tray-monitor/authenticate.cpp new file mode 100644 index 00000000..339655c3 --- /dev/null +++ b/src/qt-console/tray-monitor/authenticate.cpp @@ -0,0 +1,145 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula authentication. Provides authentication with + * File and Storage daemons. + * + * Nicolas Boichat, August MMIV + * + * This routine runs as a thread and must be thread reentrant. + * + * Basic tasks done here: + * + */ + +#include "tray-monitor.h" + +/* Commands sent to Director */ +static char DIRhello[] = "Hello %s calling\n"; + +static char SDhello[] = "Hello SD: Bacula Director %s calling\n"; + +/* Response from Director */ +static char DIROKhello[] = "1000 OK:"; + +/* Commands sent to File daemon and received + * from the User Agent */ +static char FDhello[] = "Hello Director %s calling\n"; + +/* Response from SD */ +static char SDOKhello[] = "3000 OK Hello"; +/* Response from FD */ +static char FDOKhello[] = "2000 OK Hello"; + +/* Forward referenced functions */ + +int authenticate_daemon(JCR *jcr, MONITOR *mon, RESMON *res) +{ + BSOCK *bs = res->bs; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; + char bashed_name[MAX_NAME_LENGTH]; + char *password, *p; + int ret = 0; + + bstrncpy(bashed_name, mon->hdr.name, sizeof(bashed_name)); + bash_spaces(bashed_name); + password = res->password; + + /* TLS Requirement */ + if (res->tls_enable) { + tls_local_need = BNET_TLS_REQUIRED; + } + + /* Timeout Hello after 5 mins */ + btimer_t *tid = start_bsock_timer(bs, 60 * 5); + if (res->type == R_DIRECTOR) { + p = DIRhello; + } else if (res->type == R_STORAGE) { + p = SDhello; + } else { + p = FDhello; + } + + bs->fsend(p, bashed_name); + + if (!cram_md5_respond(bs, password, &tls_remote_need, &compatible) || + !cram_md5_challenge(bs, password, tls_local_need, compatible)) { + Jmsg(jcr, M_FATAL, 0, _("Authorization problem.\n" + "Most likely the passwords do not agree.\n" + "For help, please see " MANUAL_AUTH_URL "\n")); + goto bail_out; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg(jcr, M_FATAL, 0, _("Authorization problem:" + " Remote server did not advertise required TLS support.\n")); + goto bail_out; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg(jcr, M_FATAL, 0, ("Authorization problem:" + " Remote server requires TLS.\n")); + goto bail_out; + } + + /* Is TLS Enabled? */ + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_client(res->tls_ctx, bs, NULL)) { + Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed\n")); + goto bail_out; + } + } + + Dmsg1(6, "> %s", bs->msg); + if (bs->recv() <= 0) { + Jmsg1(jcr, M_FATAL, 0, _("Bad response to Hello command: ERR=%s\n"), + bs->bstrerror()); + goto bail_out; + } + Dmsg1(10, "< %s", bs->msg); + switch(res->type) { + case R_DIRECTOR: + p = DIROKhello; + break; + case R_CLIENT: + p = FDOKhello; + break; + case R_STORAGE: + p = SDOKhello; + break; + } + if (strncmp(bs->msg, p, strlen(p)) != 0) { + Jmsg(jcr, M_FATAL, 0, _("Daemon rejected Hello command\n")); + goto bail_out; + } else { + //Jmsg0(jcr, M_INFO, 0, dir->msg); + } + ret = 1; +bail_out: + if (tid) { + stop_bsock_timer(tid); + } + return ret; +} diff --git a/src/qt-console/tray-monitor/bacula-tray-monitor.conf.in b/src/qt-console/tray-monitor/bacula-tray-monitor.conf.in new file mode 100644 index 00000000..4a9eff7b --- /dev/null +++ b/src/qt-console/tray-monitor/bacula-tray-monitor.conf.in @@ -0,0 +1,32 @@ +# +# Bacula Tray Monitor Configuration File +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +Monitor { + Name = @basename@-mon + RefreshInterval = 120 seconds +} + +Client { + Name = @basename@-fd + Password = "@mon_fd_password@" # password for FileDaemon + Address = @hostname@ + Port = @fd_port@ +} + +#Storage { +# Name = @basename@-sd +# Address = @hostname@ +# Port = @sd_port@ +# Password = "@mon_sd_password@" # password for StorageDaemon +#} +# +#Director { +# Name = @basename@-dir +# Address = @hostname@ +# Port = @dir_port@ +# Password = "@mon_dir_password@" # password for the Directors +#} diff --git a/src/qt-console/tray-monitor/clientselectwizardpage.cpp b/src/qt-console/tray-monitor/clientselectwizardpage.cpp new file mode 100644 index 00000000..8bef54d1 --- /dev/null +++ b/src/qt-console/tray-monitor/clientselectwizardpage.cpp @@ -0,0 +1,57 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard: Client selection page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#include "common.h" +#include "clientselectwizardpage.h" +#include "ui_clientselectwizardpage.h" +#include "conf.h" +ClientSelectWizardPage::ClientSelectWizardPage(QWidget *parent) : + QWizardPage(parent), + ui(new Ui::ClientSelectWizardPage) +{ + ui->setupUi(this); + /* currentClient field is mandatory */ + registerField("currentClient*", ui->backupClientComboBox); +} + +ClientSelectWizardPage::~ClientSelectWizardPage() +{ + delete ui; +} + +void ClientSelectWizardPage::initializePage() +{ + if (res && res->clients && res->clients->size() > 0) { + ui->backupClientComboBox->clear(); + QStringList list; + char *str; + foreach_alist(str, res->clients) { + list << QString(str); + } + ui->backupClientComboBox->addItems(list); + ui->backupClientComboBox->setEnabled(true); + } else { + ui->backupClientComboBox->setEnabled(false); + } +} diff --git a/src/qt-console/tray-monitor/clientselectwizardpage.h b/src/qt-console/tray-monitor/clientselectwizardpage.h new file mode 100644 index 00000000..673d8755 --- /dev/null +++ b/src/qt-console/tray-monitor/clientselectwizardpage.h @@ -0,0 +1,53 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard: Client selection page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#ifndef CLIENTSELECTWIZARDPAGE_H +#define CLIENTSELECTWIZARDPAGE_H + +#include + +class RESMON; + +namespace Ui { +class ClientSelectWizardPage; +} + +class ClientSelectWizardPage : public QWizardPage +{ + Q_OBJECT + +public: + explicit ClientSelectWizardPage(QWidget *parent = 0); + ~ClientSelectWizardPage(); + /* QWizardPage interface */ + void initializePage(); + /* local interface */ + inline void setRes(RESMON *r) {res=r;} + +private: + Ui::ClientSelectWizardPage *ui; + RESMON *res; +}; + +#endif // CLIENTSELECTWIZARDPAGE_H diff --git a/src/qt-console/tray-monitor/clientselectwizardpage.ui b/src/qt-console/tray-monitor/clientselectwizardpage.ui new file mode 100644 index 00000000..484d0d96 --- /dev/null +++ b/src/qt-console/tray-monitor/clientselectwizardpage.ui @@ -0,0 +1,68 @@ + + + ClientSelectWizardPage + + + + 0 + 0 + 714 + 330 + + + + WizardPage + + + + + + true + + + + 0 + 0 + + + + + 0 + 0 + + + + Qt::DefaultContextMenu + + + Backup Client: + + + + + + + + 0 + 0 + + + + + 595 + 0 + + + + + + + -1 + + + + + + + + diff --git a/src/qt-console/tray-monitor/common.h b/src/qt-console/tray-monitor/common.h new file mode 100644 index 00000000..9b09abaa --- /dev/null +++ b/src/qt-console/tray-monitor/common.h @@ -0,0 +1,71 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef TRAY_MONITOR_COMMON_H +#define TRAY_MONITOR_COMMON_H + +#if defined(HAVE_WIN32) +#if !defined(_STAT_H) +#define _STAT_H /* don't pull in MinGW stat.h */ +#endif +#ifndef _STAT_DEFINED +#define _STAT_DEFINED /* don't pull in MinGW stat.h */ +#endif +#endif + +#if defined(HAVE_WIN32) +#if defined(HAVE_MINGW) +#include "mingwconfig.h" +#else +#include "winconfig.h" +#endif +#else +#include "config.h" +#endif +#define __CONFIG_H + + +#if QT_VERSION >= 0x050000 +#include +#else +#include +#endif +#include + +#include "bacula.h" + +class DbgLine +{ +public: + const char *funct; + DbgLine(const char *fun): funct(fun) { + Dmsg2(0, "[%p] -> %s\n", bthread_get_thread_id(), funct); + } + ~DbgLine() { + Dmsg2(0, "[%p] <- %s\n", bthread_get_thread_id(), funct); + } +}; + +#ifdef Enter +#undef Enter +#endif +//#define Enter() DbgLine _enter(__PRETTY_FUNCTION__) +#define Enter() + +#endif diff --git a/src/qt-console/tray-monitor/conf.cpp b/src/qt-console/tray-monitor/conf.cpp new file mode 100644 index 00000000..729b559e --- /dev/null +++ b/src/qt-console/tray-monitor/conf.cpp @@ -0,0 +1,412 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "conf.h" +#include "tray-monitor.h" +#include + +extern char *configfile; // defined in tray-monitor.cpp +extern RES_TABLE resources[]; +extern int32_t r_first; +extern int32_t r_last; +extern int32_t res_all_size; +extern URES res_all; + +bool Conf::parse_config() +{ + bool ret; + config = New(CONFIG()); + config->encode_password(false); + config->init(configfile, NULL, M_ERROR, (void *)&res_all, res_all_size, + r_first, r_last, resources, &rhead); + ret = config->parse_config(); + return ret; +} + +/* Check for \ at the end */ +static char *is_str_valid(POOLMEM **buf, const char *p) +{ + char *p1; + if (!p || !*p) { + return NULL; + } + p1 = *buf = check_pool_memory_size(*buf, (strlen(p) + 1)); + for (; *p ; p++) { + if (*p == '\\') { + *p1++ = '/'; + + } else if (*p == '"') { + return NULL; + + } else { + *p1++ = *p; + } + } + *p1 = 0; + return *buf; +} + +/* The .toUtf8().data() function can be called only one time at a time */ +void Conf::accept() +{ + POOLMEM *buf = get_pool_memory(PM_FNAME); + POOL_MEM tmp, tmp2, name; + QString str; + const char *restype=NULL, *p=NULL, *pname; + struct stat sp; + FILE *fp=NULL; + int n; + bool commit=false; + bool doclose=true; + bool ok; + + Mmsg(tmp, "%s.temp", configfile); + fp = fopen(tmp.c_str(), "w"); + if (!fp) { + berrno be; + display_error("Unable to open %s to write the new configuration file. ERR=%s\n", tmp.c_str(), be.bstrerror()); + goto bail_out; + } + str = UIConf.editName->text(); + p = is_str_valid(&buf, str.toUtf8().data()); + if (!p) { + display_error(_("The Name of the Monitor should be set")); + doclose = false; + goto bail_out; + } + + fprintf(fp, "Monitor {\n Name=\"%s\"\n", p); + + n = UIConf.spinRefresh->value(); + fprintf(fp, " Refresh Interval = %d\n", n); + + if (UIConf.cbDspAdvanced->isChecked()) { + fprintf(fp, " Display Advanced Options = yes\n"); + } + + str = UIConf.editCommandDir->text(); + p = is_str_valid(&buf, str.toUtf8().data()); + if (p) { // TODO: Check for \ at the end of the string + fprintf(fp, " Command Directory = \"%s\"\n", p); + } + + fprintf(fp, "}\n"); + + for (int i = 1; i < UIConf.tabWidget->count() ; i++) { + ConfTab *t = (ConfTab *) UIConf.tabWidget->widget(i); + if (t->isEnabled() == false) { + continue; // This one was deleted + } + for(int i = 0; resources[i].name ; i++) { + if (resources[i].rcode == t->res->type) { + restype = resources[i].name; + break; + } + } + if (!restype) { + goto bail_out; + } + + str = t->ui.editName->text(); + pname = is_str_valid(&buf, str.toUtf8().data()); + if (!pname) { + display_error(_("The name of the Resource should be set")); + doclose = false; + goto bail_out; + } + pm_strcpy(name, pname); + + str = t->ui.editAddress->text(); + p = is_str_valid(&buf, str.toUtf8().data()); + if (!p) { + display_error(_("The address of the Resource should be set for resource %s"), name.c_str()); + doclose = false; + goto bail_out; + } + fprintf(fp, "%s {\n Name = \"%s\"\n Address = \"%s\"\n", restype, name.c_str(), p); + + str = t->ui.editPassword->text(); + p = is_str_valid(&buf, str.toUtf8().data()); + if (!p) { + display_error(_("The Password of should be set for resource %s"), name.c_str()); + doclose = false; + goto bail_out; + } + fprintf(fp, " Password = \"%s\"\n", p); + + str = t->ui.editDescription->text(); + p = is_str_valid(&buf, str.toUtf8().data()); + if (p) { + fprintf(fp, " Description = \"%s\"\n", p); + } + n = t->ui.editPort->text().toInt(&ok, 10); + if (ok && n > 0 && n < 65636) { + fprintf(fp, " Port = %d\n", n); + } + n = t->ui.editTimeout->text().toInt(&ok, 10); + if (ok && n > 0) { + fprintf(fp, " Connect Timeout = %d\n", n); + } + + str = t->ui.editCaCertificateFile->text(); + p = is_str_valid(&buf, str.toUtf8().data()); + if (p) { + if (stat(p, &sp) != 0 || !S_ISREG(sp.st_mode)) { + display_error(_("The TLS CA Certificate File should be a PEM file for resource %s"), name.c_str()); + doclose = false; + goto bail_out; + } + fprintf(fp, " TLSCaCertificateFile = \"%s\"\n", p); + } + + str = t->ui.editCaCertificateDir->text(); + p = is_str_valid(&buf, str.toUtf8().data()); + if (p) { + if (stat(p, &sp) != 0 || !S_ISDIR(sp.st_mode)) { + display_error(_("The TLS CA Certificate Directory should be a directory for resource %s"), name.c_str()); + doclose = false; + goto bail_out; + } + fprintf(fp, " TLSCaCertificateDir = \"%s\"\n", p); + } + + str = t->ui.editCertificate->text(); + p = is_str_valid(&buf, str.toUtf8().data()); + if (p) { + if (stat(p, &sp) != 0 || !S_ISREG(sp.st_mode)) { + display_error(_("The TLS Certificate File should be a file for resource %s"), name.c_str()); + doclose = false; + goto bail_out; + } + fprintf(fp, " TLSCertificate = \"%s\"\n", p); + } + + str = t->ui.editKey->text(); + p = is_str_valid(&buf, str.toUtf8().data()); + if (p) { + if (stat(p, &sp) != 0 || !S_ISREG(sp.st_mode)) { + display_error(_("The TLS Key File should be a file for resource %s"), name.c_str()); + doclose = false; + goto bail_out; + } + fprintf(fp, " TLSKey = \"%s\"\n", p); + } + if (t->ui.cbTLSEnabled->isChecked()) { + fprintf(fp, " TLS Enable = yes\n"); + } + if (strcmp(restype, "client") == 0 && t->ui.cbRemote->isChecked()) { + fprintf(fp, " Remote = yes\n"); + } + if (strcmp(restype, "director") == 0 && t->ui.cbUseSetIp->isChecked()) { + fprintf(fp, " UseSetIp = yes\n"); + } + if (t->ui.cbMonitor->isChecked()) { + fprintf(fp, " Monitor = yes\n"); + } + fprintf(fp, "}\n"); + } + commit = true; + // Save the configuration file +bail_out: + if (fp) { + fclose(fp); + } + if (commit) { + // TODO: We probably need to load the configuration file to see if it works + unlink(configfile); + if (rename(tmp.c_str(), configfile) == 0) { + reload(); + + } else { + berrno be; + display_error("Unable to write to the configuration file %s ERR=%s\n", configfile, be.bstrerror()); + } + } + if (doclose) { + close(); + deleteLater(); + } +} + +Conf::~Conf() +{ + if (config) { + delete config; + } +} + +Conf::Conf(): QDialog() +{ + RESMON *res; + MONITOR *mon; + + rhead = NULL; + items = 0; + UIConf.setupUi(this); + if (parse_config()) { + for(res=NULL; (res=(RESMON *)GetNextRes(rhead, R_CLIENT, (RES*)res));) { + addResource(res, res->hdr.name); + } + for(res=NULL; (res=(RESMON *)GetNextRes(rhead, R_DIRECTOR, (RES*)res));) { + addResource(res, res->hdr.name); + } + for(res=NULL; (res=(RESMON *)GetNextRes(rhead, R_STORAGE, (RES*)res));) { + addResource(res, res->hdr.name); + } + mon = (MONITOR *)GetNextRes(rhead, R_MONITOR, NULL); + UIConf.editName->setText(QString(mon->hdr.name)); + UIConf.spinRefresh->setValue(mon->RefreshInterval); + UIConf.editCommandDir->setText(QString(NPRTB(mon->command_dir))); + + if (mon->display_advanced_options) { + UIConf.cbDspAdvanced->setChecked(true); + } + } + setAttribute(Qt::WA_DeleteOnClose, true); + show(); +} + +void Conf::addResource(RESMON *res, const char *title) +{ + char ed1[50]; + ConfTab *w = new ConfTab(res); + w->ui.editName->setText(QString(res->hdr.name)); + if (res->password) { + w->ui.editPassword->setText(QString(res->password)); + } + if (res->type != R_CLIENT) { + w->ui.cbRemote->hide(); + w->ui.labelRemote->hide(); + + } else if (res->use_remote) { + w->ui.cbRemote->setChecked(true); + } + + if (res->type != R_DIRECTOR) { + w->ui.cbUseSetIp->hide(); + w->ui.labelSetIp->hide(); + + } else if (res->use_setip) { + w->ui.cbUseSetIp->setChecked(true); + } + + if (res->use_monitor) { + w->ui.cbMonitor->setChecked(true); + } + w->ui.editAddress->setText(QString(res->address)); + w->ui.editPort->setText(QString(edit_uint64(res->port, ed1))); + w->ui.editTimeout->setText(QString(edit_uint64(res->connect_timeout, ed1))); + if (!res->tls_enable) { + if (w->ui.cbTLSEnabled->isChecked()) { + emit w->ui.cbTLSEnabled->click(); + } + } + if (res->tls_ca_certfile) { + w->ui.editCaCertificateFile->setText(QString(res->tls_ca_certfile)); + } + if (res->tls_ca_certdir) { + w->ui.editCaCertificateDir->setText(QString(res->tls_ca_certdir)); + } + if (res->tls_certfile) { + w->ui.editCertificate->setText(QString(res->tls_certfile)); + } + if (res->tls_keyfile) { + w->ui.editKey->setText(QString(res->tls_keyfile)); + } + + UIConf.tabWidget->addTab(w, QString(title)); + items++; +} + +void Conf::addRes(int type, const char *title) +{ + RESMON *res = (RESMON *) malloc(sizeof(RESMON)); + init_resource(config, type, res); + res->type = type; // Not sure it's set by init_resource + res->new_resource = true; // We want to free this resource with the ConfTab + addResource(res, title); +} + +void Conf::addDir() +{ + addRes(R_DIRECTOR, "New Director"); +} + +void Conf::addStore() +{ + addRes(R_STORAGE, "New Storage"); +} + +void Conf::addClient() +{ + addRes(R_CLIENT, "New Client"); +} + +void Conf::togglePassword() +{ + if (passtype == QLineEdit::Normal) { + passtype = QLineEdit::PasswordEchoOnEdit; + } else { + passtype = QLineEdit::Normal; + } + for (int i = 1; i < UIConf.tabWidget->count() ; i++) { + ConfTab *tab = (ConfTab *) UIConf.tabWidget->widget(i); + tab->ui.editPassword->setEchoMode(passtype); + } +} + +void Conf::selectCommandDir() +{ + QString directory = QFileDialog::getExistingDirectory(this, + tr("Select Command Directory"), + QDir::currentPath()); + UIConf.editCommandDir->setText(directory); +} + +void ConfTab::selectCaCertificateFile() +{ + QString directory = QFileDialog::getOpenFileName(this, + tr("Select CA Certificate File PEM file"), + QDir::currentPath()); + ui.editCaCertificateFile->setText(directory); +} + +void ConfTab::selectCaCertificateDir() +{ + QString directory = QFileDialog::getExistingDirectory(this, + tr("Select CA Certificate Directory"), + QDir::currentPath()); + ui.editCaCertificateDir->setText(directory); +} + +void ConfTab::selectCertificate() +{ + QString file = QFileDialog::getOpenFileName(this, + tr("Select TLS Certificate File"), + QDir::currentPath()); + ui.editCertificate->setText(file); +} + +void ConfTab::selectKey() +{ + QString file = QFileDialog::getOpenFileName(this, + tr("Select TLS Certificate File"), + QDir::currentPath()); + ui.editKey->setText(file); +} diff --git a/src/qt-console/tray-monitor/conf.h b/src/qt-console/tray-monitor/conf.h new file mode 100644 index 00000000..e2cb5280 --- /dev/null +++ b/src/qt-console/tray-monitor/conf.h @@ -0,0 +1,85 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef CONF_H +#define CONF_H + +#include "common.h" +#include "ui_main-conf.h" +#include "ui_res-conf.h" +#include "tray_conf.h" + +class Conf: public QDialog +{ + Q_OBJECT + +private: + CONFIG *config; + RES_HEAD **rhead; +public: + int items; + QLineEdit::EchoMode passtype; + Ui::Conf UIConf; + Conf(); + ~Conf(); + bool parse_config(); + void addResource(RESMON *res, const char *title); + void addRes(int type, const char *title); /* create the resource */ +public slots: + void accept(); + void selectCommandDir(); + void addDir(); + void addStore(); + void addClient(); + void togglePassword(); +}; + +class ConfTab: public QWidget +{ + Q_OBJECT + +public: + Ui::ResConf ui; + RESMON *res; + int type; + bool new_resource; + ConfTab(RESMON *r): QWidget() { + res = r; + type = r->type; + new_resource = r->new_resource; + ui.setupUi(this); + connect(ui.bpDelete, SIGNAL(clicked()), this, SLOT(disable())); + }; + ~ConfTab() { + if (new_resource && res) { + free_resource((RES*) res, res->type); + res = NULL; + } + }; +public slots: + void disable() { + setEnabled(false); + }; + void selectCaCertificateFile(); + void selectCaCertificateDir(); + void selectCertificate(); + void selectKey(); +}; + +#endif diff --git a/src/qt-console/tray-monitor/dir-monitor.ui b/src/qt-console/tray-monitor/dir-monitor.ui new file mode 100644 index 00000000..0c68374f --- /dev/null +++ b/src/qt-console/tray-monitor/dir-monitor.ui @@ -0,0 +1,176 @@ + + + dirStatus + + + + 0 + 0 + 518 + 642 + + + + Form + + + + + + + + Director Status + + + + + + + + + + + + + Name: + + + + + + + Started: + + + + + + + + + + + + + + + + + + + + + Version: + + + + + + + Plugins: + + + + + + + + + + + + + + Reloaded: + + + + + + + + + + + + + + + + + Running Jobs + + + + + + QAbstractItemView::SingleSelection + + + false + + + + + + + + + + Terminated Jobs + + + + + + QAbstractItemView::SingleSelection + + + + + + + + + + + + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + :/images/view-refresh.png:/images/view-refresh.png + + + + + + + + + + + + diff --git a/src/qt-console/tray-monitor/dirstatus.cpp b/src/qt-console/tray-monitor/dirstatus.cpp new file mode 100644 index 00000000..0e692a31 --- /dev/null +++ b/src/qt-console/tray-monitor/dirstatus.cpp @@ -0,0 +1,127 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "dirstatus.h" +#include "../util/fmtwidgetitem.h" +#include "jcr.h" + +void DIRStatus::doUpdate() +{ + if (count == 0) { + count++; + task *t = new task(); + status.pushButton->setEnabled(false); + connect(t, SIGNAL(done(task *)), this, SLOT(taskDone(task *)), Qt::QueuedConnection); + t->init(res, TASK_STATUS); + res->wrk->queue(t); + status.statusBar->setText(QString("Trying to connect to Director...")); + Dmsg1(50, "doUpdate(%p)\n", res); + } +} + +void DIRStatus::taskDone(task *t) +{ + count--; + if (!t->status) { + status.statusBar->setText(QString(t->errmsg)); + + } else { + status.statusBar->clear(); + if (t->type == TASK_STATUS) { + char ed1[50]; + struct s_last_job *ljob; + struct s_running_job *rjob; + res->mutex->lock(); + status.labelName->setText(QString(res->name)); + status.labelVersion->setText(QString(res->version)); + status.labelStarted->setText(QString(res->started)); + status.labelReloaded->setText(QString(res->reloaded)); + status.labelPlugins->setText(QString(res->plugins)); + /* Clear the table first */ + Freeze(*status.tableRunning); + Freeze(*status.tableTerminated); + QStringList headerlistR = (QStringList() << tr("JobId") + << tr("Job") << tr("Level") << tr("Client") + << tr("Status") << tr("Storage") + << tr("Files") << tr("Bytes") << tr("Errors")); + status.tableRunning->clear(); + status.tableRunning->setRowCount(0); + status.tableRunning->setColumnCount(headerlistR.count()); + status.tableRunning->setHorizontalHeaderLabels(headerlistR); + status.tableRunning->setEditTriggers(QAbstractItemView::NoEditTriggers); + status.tableRunning->verticalHeader()->hide(); + status.tableRunning->setSortingEnabled(true); + + if (res->running_jobs) { + status.tableRunning->setRowCount(res->running_jobs->size()); + int row=0; + foreach_alist(rjob, res->running_jobs) { + int col=0; + TableItemFormatter item(*status.tableRunning, row++); + item.setNumericFld(col++, QString(edit_uint64(rjob->JobId, ed1))); + item.setTextFld(col++, QString(rjob->Job)); + item.setJobLevelFld(col++, QString(rjob->JobLevel)); + item.setTextFld(col++, QString(rjob->Client)); + item.setJobStatusFld(col++, QString(rjob->JobStatus)); + item.setTextFld(col++, QString(rjob->Storage)); + item.setNumericFld(col++, QString(edit_uint64(rjob->JobFiles, ed1))); + item.setBytesFld(col++, QString(edit_uint64(rjob->JobBytes, ed1))); + item.setNumericFld(col++, QString(edit_uint64(rjob->Errors, ed1))); + } + } else { + Dmsg0(0, "Strange, the list is NULL\n"); + } + + QStringList headerlistT = (QStringList() << tr("JobId") + << tr("Job") << tr("Level") + << tr("Status") << tr("Files") << tr("Bytes") + << tr("Errors")); + + status.tableTerminated->clear(); + status.tableTerminated->setRowCount(0); + status.tableTerminated->setColumnCount(headerlistT.count()); + status.tableTerminated->setHorizontalHeaderLabels(headerlistT); + status.tableTerminated->setEditTriggers(QAbstractItemView::NoEditTriggers); + status.tableTerminated->verticalHeader()->hide(); + status.tableTerminated->setSortingEnabled(true); + + if (res->terminated_jobs) { + status.tableTerminated->setRowCount(res->terminated_jobs->size()); + int row=0; + foreach_dlist(ljob, res->terminated_jobs) { + int col=0; + TableItemFormatter item(*status.tableTerminated, row++); + item.setNumericFld(col++, QString(edit_uint64(ljob->JobId, ed1))); + item.setTextFld(col++, QString(ljob->Job)); + item.setJobLevelFld(col++, QString(ljob->JobLevel)); + item.setJobStatusFld(col++, QString(ljob->JobStatus)); + item.setNumericFld(col++, QString(edit_uint64(ljob->JobFiles, ed1))); + item.setBytesFld(col++, QString(edit_uint64(ljob->JobBytes, ed1))); + item.setNumericFld(col++, QString(edit_uint64(ljob->Errors, ed1))); + } + } else { + Dmsg0(0, "Strange, the list is NULL\n"); + } + res->mutex->unlock(); + } + Dmsg1(50, " Task %p OK\n", t); + } + t->deleteLater(); + status.pushButton->setEnabled(true); +} diff --git a/src/qt-console/tray-monitor/dirstatus.h b/src/qt-console/tray-monitor/dirstatus.h new file mode 100644 index 00000000..242bd69a --- /dev/null +++ b/src/qt-console/tray-monitor/dirstatus.h @@ -0,0 +1,42 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "common.h" +#include "ui_dir-monitor.h" +#include "task.h" +#include "status.h" + +class DIRStatus: public ResStatus +{ + Q_OBJECT + +public: + Ui::dirStatus status; + + DIRStatus(RESMON *d): ResStatus(d) + { + status.setupUi(this); + QObject::connect(status.pushButton, SIGNAL(clicked()), this, SLOT(doUpdate()), Qt::QueuedConnection); + }; + ~DIRStatus() { + }; +public slots: + void doUpdate(); + void taskDone(task *); +}; diff --git a/src/qt-console/tray-monitor/fd-monitor.ui b/src/qt-console/tray-monitor/fd-monitor.ui new file mode 100644 index 00000000..fee5f730 --- /dev/null +++ b/src/qt-console/tray-monitor/fd-monitor.ui @@ -0,0 +1,176 @@ + + + fdStatus + + + + 0 + 0 + 518 + 435 + + + + Form + + + + + + + + FileDaemon Status + + + + + + + + + + + + + + + + + + + + Name: + + + + + + + Bandwidth Limit: + + + + + + + + + + + + + + Started: + + + + + + + + + + + + + + + + + + + + + Version: + + + + + + + Plugins: + + + + + + + + + + Running Jobs + + + + + + QAbstractItemView::SingleSelection + + + false + + + + + + + + + + Terminated Jobs + + + + + + QAbstractItemView::SingleSelection + + + + + + + + + + + + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + :/images/view-refresh.png:/images/view-refresh.png + + + + + + + + + + + + diff --git a/src/qt-console/tray-monitor/fdstatus.cpp b/src/qt-console/tray-monitor/fdstatus.cpp new file mode 100644 index 00000000..a09a2147 --- /dev/null +++ b/src/qt-console/tray-monitor/fdstatus.cpp @@ -0,0 +1,125 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "fdstatus.h" +#include "../util/fmtwidgetitem.h" +#include "jcr.h" + +void FDStatus::doUpdate() +{ + if (count == 0) { // We do only doUpdate() + count++; + task *t = new task(); + status.pushButton->setEnabled(false); + connect(t, SIGNAL(done(task *)), this, SLOT(taskDone(task *)), Qt::QueuedConnection); + t->init(res, TASK_STATUS); + res->wrk->queue(t); + status.statusBar->setText(QString("Trying to connect to FD...")); + Dmsg1(50, "doUpdate(%p)\n", res); + } +} + +void FDStatus::taskDone(task *t) +{ + int nbjobs=0; + count--; + if (!t->status) { + status.statusBar->setText(QString(t->errmsg)); + + } else if (isVisible()) { + status.statusBar->clear(); + if (t->type == TASK_STATUS) { + char ed1[50]; + struct s_last_job *ljob; + struct s_running_job *rjob; + res->mutex->lock(); + status.labelName->setText(QString(res->name)); + status.labelVersion->setText(QString(res->version)); + status.labelPlugins->setText(QString(res->plugins)); + status.labelStarted->setText(QString(res->started)); + status.labelBandwidth->setText(QString(edit_uint64(res->bwlimit, ed1))); + /* Clear the table first */ + Freeze(*status.tableRunning); + Freeze(*status.tableTerminated); + QStringList headerlistR = (QStringList() << tr("JobId") + << tr("Job") << tr("Level") + << tr("Files") << tr("Bytes") << tr("Errors") << tr("Current File")); + status.tableRunning->clear(); + status.tableRunning->setRowCount(0); + status.tableRunning->setColumnCount(headerlistR.count()); + status.tableRunning->setHorizontalHeaderLabels(headerlistR); + status.tableRunning->setEditTriggers(QAbstractItemView::NoEditTriggers); + status.tableRunning->verticalHeader()->hide(); + status.tableRunning->setSortingEnabled(true); + + if (res->running_jobs) { + status.tableRunning->setRowCount(res->running_jobs->size()); + int row=0; + foreach_alist(rjob, res->running_jobs) { + int col=0; + TableItemFormatter item(*status.tableRunning, row++); + item.setNumericFld(col++, QString(edit_uint64(rjob->JobId, ed1))); + item.setTextFld(col++, QString(rjob->Job)); + item.setJobLevelFld(col++, QString(rjob->JobLevel)); + item.setNumericFld(col++, QString(edit_uint64(rjob->JobFiles, ed1))); + item.setBytesFld(col++, QString(edit_uint64(rjob->JobBytes, ed1))); + item.setNumericFld(col++, QString(edit_uint64(rjob->Errors, ed1))); + item.setNumericFld(col++, QString(rjob->CurrentFile)); + nbjobs++; + } + } else { + Dmsg0(0, "Strange, the list is NULL\n"); + } + + QStringList headerlistT = (QStringList() << tr("JobId") + << tr("Job") << tr("Level") + << tr("Status") << tr("Files") << tr("Bytes") << tr("Errors")); + + status.tableTerminated->clear(); + status.tableTerminated->setRowCount(0); + status.tableTerminated->setColumnCount(headerlistT.count()); + status.tableTerminated->setHorizontalHeaderLabels(headerlistT); + status.tableTerminated->setEditTriggers(QAbstractItemView::NoEditTriggers); + status.tableTerminated->verticalHeader()->hide(); + status.tableTerminated->setSortingEnabled(true); + + if (res->terminated_jobs) { + status.tableTerminated->setRowCount(res->terminated_jobs->size()); + int row=0; + foreach_dlist(ljob, res->terminated_jobs) { + int col=0; + TableItemFormatter item(*status.tableTerminated, row++); + item.setNumericFld(col++, QString(edit_uint64(ljob->JobId, ed1))); + item.setTextFld(col++, QString(ljob->Job)); + item.setJobLevelFld(col++, QString(ljob->JobLevel)); + item.setJobStatusFld(col++, QString(ljob->JobStatus)); + item.setNumericFld(col++, QString(edit_uint64(ljob->JobFiles, ed1))); + item.setBytesFld(col++, QString(edit_uint64(ljob->JobBytes, ed1))); + item.setNumericFld(col++, QString(edit_uint64(ljob->Errors, ed1))); + } + } else { + Dmsg0(0, "Strange, the list is NULL\n"); + } + res->mutex->unlock(); + } + Dmsg1(50, " Task %p OK\n", t); + } + t->deleteLater(); + status.pushButton->setEnabled(true); +} diff --git a/src/qt-console/tray-monitor/fdstatus.h b/src/qt-console/tray-monitor/fdstatus.h new file mode 100644 index 00000000..a3316d13 --- /dev/null +++ b/src/qt-console/tray-monitor/fdstatus.h @@ -0,0 +1,42 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "common.h" +#include "ui_fd-monitor.h" +#include "task.h" +#include "status.h" + +class FDStatus: public ResStatus +{ + Q_OBJECT + +public: + Ui::fdStatus status; + + FDStatus(RESMON *c): ResStatus(c) + { + status.setupUi(this); + QObject::connect(status.pushButton, SIGNAL(clicked()), this, SLOT(doUpdate()), Qt::QueuedConnection); + }; + ~FDStatus() { + }; +public slots: + void doUpdate(); + void taskDone(task *); +}; diff --git a/src/qt-console/tray-monitor/fileselectwizardpage.cpp b/src/qt-console/tray-monitor/fileselectwizardpage.cpp new file mode 100644 index 00000000..52579ca8 --- /dev/null +++ b/src/qt-console/tray-monitor/fileselectwizardpage.cpp @@ -0,0 +1,302 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard: File selection page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#include "common.h" +#include "fileselectwizardpage.h" +#include "filesmodel.h" +#include "ui_fileselectwizardpage.h" +#include "restorewizard.h" +#include "task.h" +#include +#include + +FileSelectWizardPage::FileSelectWizardPage(QWidget *parent) : + QWizardPage(parent), + m_currentSourceId(0), + m_currentPathStr(""), + ui(new Ui::FileSelectWizardPage), + src_files_model(new FileSourceModel), + dest_files_model(new FileDestModel), + m_filterTimer(new QTimer), + res(NULL), + need_optimize(true) +{ + ui->setupUi(this); + + /* keep track of the current source view selection */ + registerField("currentSourceId", this, "currentSourceId", "currentSourceIdChanged"); + registerField("currentPathStr", this, "currentPathStr", "currentPathStrChanged"); + registerField("fileFilter", ui->FilterEdit); + /* usefull to the following pages */ + registerField("jobIds", this, "jobIds", "jobIdsChanged"); + registerField("fileIds", this, "fileIds", "fileIdsChanged"); + registerField("dirIds", this, "dirIds", "dirIdsChanged"); + registerField("hardlinks", this, "hardlinks", "hardlinksChanged"); + registerField("pluginIds", this, "pluginIds", "pluginIdsChanged"); + registerField("pluginNames", this, "pluginNames", "pluginNamesChanged"); + + QStringList headers; + headers << tr("Name") << tr("Size") << tr("Date"); + + ui->sourceTableView->setModel(src_files_model); + //ui->sourceTableView->setLayoutMode(QListView::Batched); + //ui->sourceTableView->setBatchSize(BATCH_SIZE); + src_files_model->setHorizontalHeaderLabels(headers); + connect(ui->sourceTableView, SIGNAL(activated(const QModelIndex &)), this, SLOT(changeCurrentFolder(const QModelIndex&))); + connect(ui->sourceTableView, SIGNAL(activated(const QModelIndex &)), this, SLOT(updateSourceModel())); + + ui->destTableView->setModel(dest_files_model); + dest_files_model->setHorizontalHeaderLabels(headers); + connect(dest_files_model, SIGNAL(rowsInserted(const QModelIndex &, int, int)), this, SIGNAL(completeChanged())); + connect(dest_files_model, SIGNAL(rowsRemoved(const QModelIndex &, int, int)), this, SIGNAL(completeChanged())); + + connect(ui->filePathComboPath, SIGNAL(activated(const QString &)), this, SLOT(changeCurrentText(const QString &))); + connect(ui->filePathComboPath, SIGNAL(activated(const QString &)), this, SLOT(updateSourceModel())); + + connect(ui->FilterEdit, SIGNAL(textChanged(const QString &)), this, SLOT(delayedFilter())); + + m_filterTimer->setSingleShot(true); + connect(m_filterTimer, SIGNAL(timeout()), this, SLOT(unFreezeSrcView())); + connect(m_filterTimer, SIGNAL(timeout()), this, SLOT(updateSourceModel())); + + QShortcut *shortcut = new QShortcut(QKeySequence(Qt::Key_Delete), this); + QObject::connect(shortcut, SIGNAL(activated()), this, SLOT(deleteDestSelection())); +} + +FileSelectWizardPage::~FileSelectWizardPage() +{ + delete ui; +} + +void FileSelectWizardPage::initializePage() +{ + /* first request synchronous */ + if (res) { + task t; + t.init(res, -1); + + const char *p = field("fileFilter").toString().toLatin1().data(); + POOL_MEM info2; + pm_strcpy(info2, p); + t.arg2 = info2.c_str(); + + p = currentPathStr().toLatin1().data(); + POOL_MEM info3; + pm_strcpy(info3, p); + t.arg3 = info3.c_str(); + + t.model = src_files_model; + + t.get_job_files(field("currentJob").toString().toLatin1().data(), currentSourceId()); + } + + need_optimize = true; +} + +bool FileSelectWizardPage::isComplete() const +{ + return dest_files_model->rowCount() != 0; +} + +int FileSelectWizardPage::nextId() const +{ + /* if some plugins are used in current selection, move to plugin page, otherwise options */ + if (field("pluginIds").toString().isEmpty()) { + return RestoreWizard::RW_ADVANCEDOPTIONS_PAGE; + } + return RestoreWizard::RW_PLUGIN_PAGE; +} + +bool FileSelectWizardPage::validatePage() +{ + /* compute result fields based on destination model content */ + QStringList fileids, jobids, dirids, findexes; + struct stat statp; + int32_t LinkFI; + + for (int row=0; row < dest_files_model->rowCount(); ++row) { + QModelIndex idx = dest_files_model->index(row, 0); + if (idx.data(TypeRole) == TYPEROLE_FILE) { + fileids << idx.data(FileIdRole).toString(); + jobids << idx.data(JobIdRole).toString(); + decode_stat(idx.data(LStatRole).toString().toLocal8Bit().data(), + &statp, sizeof(statp), &LinkFI); + if (LinkFI) { + findexes << idx.data(JobIdRole).toString() + "," + QString().setNum(LinkFI); + } + } else /* TYPEROLE_DIRECTORY */ + { + dirids << idx.data(PathIdRole).toString(); + jobids << idx.data(JobIdRole).toString().split(","); /* Can have multiple jobids */ + } + } + + fileids.removeDuplicates(); + jobids.removeDuplicates(); + dirids.removeDuplicates(); + findexes.removeDuplicates(); + + m_jobIds = jobids.join(","); + m_fileIds = fileids.join(","); + m_dirIds = dirids.join(","); + m_hardlinks = findexes.join(","); + + /* plugin Ids and Names are retrieved now, so NextId() can decide to schedule the PluginPage before RestoreOptionsPage */ + /* Stored as properties so thru the next Wizard Page can use them */ + task t; + t.init(res, -1); /* pass res, ID is set to -1 because task method is called synchronously */ + m_pluginIds = t.plugins_ids(m_jobIds); + m_pluginNames = t.plugins_names(m_jobIds); + + return true; +} + +void FileSelectWizardPage::updateSourceModel() +{ + /* subsequent request async */ + if (res) { + task *t = new task(); + + /* optimize size only once */ + if (need_optimize) { + connect(t, SIGNAL(done(task*)), this, SLOT(optimizeSize())); + need_optimize = false; + } + connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); + + t->init(res, TASK_LIST_JOB_FILES); + + const char *p = field("currentJob").toString().toLatin1().data(); + POOL_MEM info; + pm_strcpy(info, p); + t->arg = info.c_str(); + + p = field("fileFilter").toString().toLatin1().data(); + POOL_MEM info2; + pm_strcpy(info2, p); + t->arg2 = info2.c_str(); + + p = currentPathStr().toLatin1().data(); + POOL_MEM info3; + pm_strcpy(info3, p); + t->arg3 = info3.c_str(); + + t->pathId = currentSourceId(); + t->model = src_files_model; + + res->wrk->queue(t); + } +} + +void FileSelectWizardPage::optimizeSize() +{ + int w = ui->destTableView->width()/4; + ui->destTableView->horizontalHeader()->resizeSection( 0, w*2); + ui->destTableView->horizontalHeader()->resizeSection( 1, w); + ui->destTableView->horizontalHeader()->resizeSection( 2, w ); + ui->destTableView->horizontalHeader()->setStretchLastSection(true); + + w = ui->sourceTableView->width()/4; + ui->sourceTableView->horizontalHeader()->resizeSection( 0, w*2); + ui->sourceTableView->horizontalHeader()->resizeSection( 1, w); + ui->sourceTableView->horizontalHeader()->resizeSection( 2, w ); + ui->sourceTableView->horizontalHeader()->setStretchLastSection(true); +} + +void FileSelectWizardPage::changeCurrentFolder(const QModelIndex& current) +{ + if (current.isValid()) { + QStandardItem *item = src_files_model->itemFromIndex(current); + if (item && item->data(TypeRole) == TYPEROLE_DIRECTORY) { + QString path = item->text(); + m_currentSourceId = item->data(PathIdRole).toULongLong(); + if (m_currentSourceId > 0) { + // Choose .. update current path to parent dir + if (path == "..") { + if (m_currentPathStr == "/") { + m_currentPathStr = ""; + } else { + m_currentPathStr.remove(QRegExp("[^/]+/$")); + } + + } else if (path == "/" && m_currentPathStr == "") { + m_currentPathStr += path; + + } else if (path != "/") { + m_currentPathStr += path; + } + } + + emit currentSourceIdChanged(); + emit currentPathStrChanged(); + + int idx = ui->filePathComboPath->findText(m_currentPathStr); + if (idx >= 0) { + ui->filePathComboPath->setCurrentIndex(idx); + } else { + ui->filePathComboPath->insertItem(-1, m_currentPathStr, QVariant(m_currentSourceId)); + ui->filePathComboPath->model()->sort(0); + ui->filePathComboPath->setCurrentIndex(ui->filePathComboPath->findText(m_currentPathStr)); + } + } + } +} + +void FileSelectWizardPage::changeCurrentText(const QString& current) +{ + int idx = ui->filePathComboPath->findText(current); + m_currentSourceId = ui->filePathComboPath->itemData(idx).toULongLong(); + m_currentPathStr = ui->filePathComboPath->itemText(idx); + emit currentSourceIdChanged(); +} + +void FileSelectWizardPage::deleteDestSelection() +{ + QMap rows; + foreach (QModelIndex index, ui->destTableView->selectionModel()->selectedIndexes()) + rows.insert(index.row(), 0); + QMapIterator r(rows); + r.toBack(); + while (r.hasPrevious()) { + r.previous(); + ui->destTableView->model()->removeRow(r.key()); + } +} + +void FileSelectWizardPage::delayedFilter() +{ + freezeSrcView(); + m_filterTimer->start( 100 ); +} + +void FileSelectWizardPage::freezeSrcView() +{ + ui->sourceTableView->setUpdatesEnabled(false); +} + +void FileSelectWizardPage::unFreezeSrcView() +{ + ui->sourceTableView->setUpdatesEnabled(true); + ui->sourceTableView->update(); +} diff --git a/src/qt-console/tray-monitor/fileselectwizardpage.h b/src/qt-console/tray-monitor/fileselectwizardpage.h new file mode 100644 index 00000000..bb59c4df --- /dev/null +++ b/src/qt-console/tray-monitor/fileselectwizardpage.h @@ -0,0 +1,112 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard: File selection page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#ifndef FILESELECTWIZARDPAGE_H +#define FILESELECTWIZARDPAGE_H + +#include + +class QStandardItemModel; +class QModelIndex; +class RESMON; +class task; + +namespace Ui { +class FileSelectWizardPage; +} + +class FileSelectWizardPage : public QWizardPage +{ + Q_OBJECT + + Q_PROPERTY(qulonglong currentSourceId READ currentSourceId NOTIFY currentSourceIdChanged) + Q_PROPERTY(QString currentPathStr READ currentPathStr NOTIFY currentPathStrChanged) + Q_PROPERTY(QString jobIds READ jobIds NOTIFY jobIdsChanged) + Q_PROPERTY(QString fileIds READ fileIds NOTIFY fileIdsChanged) + Q_PROPERTY(QString dirIds READ dirIds NOTIFY dirIdsChanged) + Q_PROPERTY(QString hardlinks READ hardlinks NOTIFY hardlinksChanged) + Q_PROPERTY(QString pluginIds READ pluginIds NOTIFY pluginIdsChanged) + Q_PROPERTY(QString pluginNames READ pluginNames NOTIFY pluginNamesChanged) + +private: qulonglong m_currentSourceId; +public: qulonglong currentSourceId() const { return m_currentSourceId; } +signals: void currentSourceIdChanged(); +private: QString m_currentPathStr; +public: QString currentPathStr() const { return m_currentPathStr; } +signals: void currentPathStrChanged(); +private: QString m_jobIds; +public: QString jobIds() const { return m_jobIds; } +signals: void jobIdsChanged(); +private: QString m_fileIds; +public: QString fileIds() const { return m_fileIds; } +signals: void fileIdsChanged(); +private: QString m_dirIds; +public: QString dirIds() const { return m_dirIds; } +signals: void dirIdsChanged(); +private: QString m_hardlinks; +public: QString hardlinks() const { return m_hardlinks; } +signals: void hardlinksChanged(); +private: QString m_pluginIds; +public: QString pluginIds() const { return m_pluginIds; } +signals: void pluginIdsChanged(); +private: QString m_pluginNames; +public: QString pluginNames() const { return m_pluginNames; } +signals: void pluginNamesChanged(); + +public: + explicit FileSelectWizardPage(QWidget *parent = 0); + ~FileSelectWizardPage(); + /* QWizardPage interface */ + void initializePage(); + bool isComplete() const; + int nextId() const; + bool validatePage(); + /* local interface */ + void setRes(RESMON *r) {res=r;} + +protected slots: + void updateSourceModel(); + + void optimizeSize(); + + void changeCurrentFolder(const QModelIndex& current); + void changeCurrentText(const QString ¤t); + + void deleteDestSelection(); + + void delayedFilter(); + + void freezeSrcView(); + void unFreezeSrcView(); + +private: + Ui::FileSelectWizardPage *ui; + QStandardItemModel *src_files_model; + QStandardItemModel *dest_files_model; + QTimer *m_filterTimer; + RESMON *res; + bool need_optimize; +}; + +#endif // FILESELECTWIZARDPAGE_H diff --git a/src/qt-console/tray-monitor/fileselectwizardpage.ui b/src/qt-console/tray-monitor/fileselectwizardpage.ui new file mode 100644 index 00000000..41d7d85a --- /dev/null +++ b/src/qt-console/tray-monitor/fileselectwizardpage.ui @@ -0,0 +1,139 @@ + + + FileSelectWizardPage + + + + 0 + 0 + 698 + 484 + + + + WizardPage + + + + + + Drag from left to right + + + + + + false + + + + + + + + + + + QAbstractItemView::NoEditTriggers + + + false + + + true + + + QAbstractItemView::DragOnly + + + false + + + QAbstractItemView::ExtendedSelection + + + QAbstractItemView::SelectRows + + + false + + + false + + + false + + + false + + + + + + + + + Filter + + + + + + + + + + + + + + false + + + QAbstractItemView::NoEditTriggers + + + true + + + false + + + QAbstractItemView::DropOnly + + + Qt::CopyAction + + + QAbstractItemView::ExtendedSelection + + + QAbstractItemView::SelectRows + + + false + + + false + + + false + + + false + + + false + + + + + + + + + + + + + diff --git a/src/qt-console/tray-monitor/filesmodel.h b/src/qt-console/tray-monitor/filesmodel.h new file mode 100644 index 00000000..db8ebba8 --- /dev/null +++ b/src/qt-console/tray-monitor/filesmodel.h @@ -0,0 +1,220 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * definition of models related to file selection wizard page. + * + * Written by Norbert Bizet, May MMXVII + * + */ +#ifndef FILESMODEL_H +#define FILESMODEL_H +#include +#include +#include +#include "task.h" +#include +#include "../util/fmtwidgetitem.h" + +enum { + PathIdRole = Qt::UserRole+1, + FilenameIdRole = Qt::UserRole+2, + FileIdRole = Qt::UserRole+3, + JobIdRole = Qt::UserRole+4, + LStatRole = Qt::UserRole+5, + PathRole = Qt::UserRole+6, + TypeRole = Qt::UserRole+7, + FullPathRole = Qt::UserRole+8 +}; + +enum { + TYPEROLE_DIRECTORY = 0, + TYPEROLE_FILE +}; + +class DirectoryItem : public QStandardItem +{ +public: + DirectoryItem() : QStandardItem() + { + /* explicit set the data, so it can be serialized in Mime Data for D&D */ + setData(QVariant(TYPEROLE_DIRECTORY), TypeRole); + } + QVariant data(int role = Qt::UserRole + 1) const + { + if (role == Qt::DecorationRole) { + QFileIconProvider provider; + return provider.icon(QFileIconProvider::Folder); + } + + return QStandardItem::data(role); + } +}; + +class FileItem : public QStandardItem +{ +public: + FileItem() : QStandardItem() + { + /* explicit set the data, so it can be serialized in Mime Data for D&D */ + setData(QVariant(TYPEROLE_FILE), TypeRole); + } + enum { + FILE_TYPE = UserType +2 + }; + + QVariant data(int role = Qt::UserRole + 1) const + { + if (role == Qt::DecorationRole) { + QFileIconProvider provider; + return provider.icon(QFileIconProvider::File); + } + + return QStandardItem::data(role); + } +}; + +#define BATCH_SIZE 100 +class FileSourceModel : public QStandardItemModel +{ +public: + FileSourceModel() : QStandardItemModel(), + m_cursor(0), + m_batchSize(BATCH_SIZE), + m_canFetchMore(true) + {} + + bool canFetchMore(const QModelIndex &parent) const + { + Q_UNUSED(parent) + return false/*m_canFetchMore*/; + } + void fetchMore(const QModelIndex &parent) + { + Q_UNUSED(parent) + } + +public slots: + void taskComplete(task *t) + { + t->deleteLater(); + } + +private: + u_int64_t m_cursor; + u_int64_t m_batchSize; + bool m_canFetchMore; +}; + +extern int decode_stat(char *buf, struct stat *statp, int stat_size, int32_t *LinkFI); + +class FileDestModel : public QStandardItemModel +{ + bool canDropMimeData(const QMimeData *data, Qt::DropAction action, int row, int column, const QModelIndex &parent) const + { + Q_UNUSED(action) + Q_UNUSED(row) + Q_UNUSED(column) + Q_UNUSED(parent) + + if (data->hasFormat("application/x-qstandarditemmodeldatalist")) { + QByteArray encoded = data->data("application/x-qabstractitemmodeldatalist"); + QDataStream stream(&encoded, QIODevice::ReadOnly); + + while (!stream.atEnd()) + { + int row, col; + QMap roleDataMap; + stream >> row >> col >> roleDataMap; + + /* do something with the data */ + int type = roleDataMap[TypeRole].toInt(); + + switch(type) { + case TYPEROLE_DIRECTORY: + case TYPEROLE_FILE: + break; + default: + return false; + } + } + return true; + } + return false; + } + + bool dropMimeData(const QMimeData * data, Qt::DropAction action, int row, int column, const QModelIndex & parent) + { + Q_UNUSED(action) + Q_UNUSED(row) + Q_UNUSED(column) + Q_UNUSED(parent) + + QByteArray encoded = data->data("application/x-qabstractitemmodeldatalist"); + QDataStream stream(&encoded, QIODevice::ReadOnly); + + while (!stream.atEnd()) + { + int row, col; + QMap roleDataMap; + stream >> row >> col >> roleDataMap; + + if (col == 0) { + QStandardItem *item; + /* do something with the data */ + int type = roleDataMap[TypeRole].toInt(); + + switch(type) { + case TYPEROLE_DIRECTORY: + item = new DirectoryItem(); + break; + case TYPEROLE_FILE: + item = new FileItem(); + break; + default: + return false; + } + + item->setData(roleDataMap[PathIdRole], PathIdRole); + item->setData(roleDataMap[FilenameIdRole], FilenameIdRole); + item->setData(roleDataMap[FileIdRole], FileIdRole); + item->setData(roleDataMap[JobIdRole], JobIdRole); + item->setData(roleDataMap[LStatRole], LStatRole); + item->setData(roleDataMap[PathRole], PathRole); + item->setData(roleDataMap[Qt::DisplayRole], Qt::DisplayRole); + item->setData(roleDataMap[Qt::ToolTipRole], Qt::ToolTipRole); + if (type == TYPEROLE_FILE) { + QList colums; + struct stat statp; + int32_t LinkFI; + decode_stat(roleDataMap[LStatRole].toString().toLocal8Bit().data(), + &statp, sizeof(statp), &LinkFI); + char buf[200]; + bstrutime(buf, sizeof(buf), statp.st_mtime); + colums << item << new QStandardItem(convertBytesSI(statp.st_size)) << new QStandardItem(buf); + appendRow(colums); + } else { + appendRow(item); + } + } + } + return true; + } +}; + +#endif // FILESMODEL_H diff --git a/src/qt-console/tray-monitor/install_conf_file.in b/src/qt-console/tray-monitor/install_conf_file.in new file mode 100755 index 00000000..3698afb4 --- /dev/null +++ b/src/qt-console/tray-monitor/install_conf_file.in @@ -0,0 +1,19 @@ +#!/bin/sh +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +sbindir=@sbindir@ +sysconfdir=@sysconfdir@ +INSTALL_CONFIG="@INSTALL@ -m 640" +DESTDIR=`echo ${DESTDIR}` + +srcconf=bacula-tray-monitor.conf +if test -f ${DESTDIR}${sysconfdir}/${srcconf}; then + destconf=${srcconf}.new + echo " ==> Found existing $srcconf, installing new conf file as ${destconf}" +else + destconf=${srcconf} +fi +echo "${INSTALL_CONFIG} ${srcconf} ${DESTDIR}${sysconfdir}/${destconf}" +${INSTALL_CONFIG} ${srcconf} ${DESTDIR}${sysconfdir}/${destconf} diff --git a/src/qt-console/tray-monitor/jobselectwizardpage.cpp b/src/qt-console/tray-monitor/jobselectwizardpage.cpp new file mode 100644 index 00000000..f54a3ea4 --- /dev/null +++ b/src/qt-console/tray-monitor/jobselectwizardpage.cpp @@ -0,0 +1,103 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard: Job selection page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#include "common.h" +#include "jobselectwizardpage.h" +#include "conf.h" +#include "task.h" +#include "ui_jobselectwizardpage.h" +#include + +JobSelectWizardPage::JobSelectWizardPage(QWidget *parent) : + QWizardPage(parent), + ui(new Ui::JobSelectWizardPage), + res(NULL), + model(new QStandardItemModel), + m_jobId(-1) +{ + ui->setupUi(this); + /* currentJob in mandatory */ + registerField("currentJob*", this, "currentJob", SIGNAL(currentJobChanged())); + /* assign model to widget */ + ui->BackupTableView->setModel(model); + /* when selection change, change the field value and the next button state */ + connect(ui->BackupTableView->selectionModel(), SIGNAL(selectionChanged(const QItemSelection&, const QItemSelection&)), this, SIGNAL(currentJobChanged())); + connect(ui->BackupTableView->selectionModel(), SIGNAL(selectionChanged(const QItemSelection&, const QItemSelection&)), this, SIGNAL(completeChanged())); +} + +JobSelectWizardPage::~JobSelectWizardPage() +{ + delete model; + delete ui; +} + +void JobSelectWizardPage::initializePage() +{ + /* populate model */ + if (res && (!res->terminated_jobs || res->terminated_jobs->empty())) { + /* get terminated_jobs info if not present. Queue populateModel() at end of task */ + task *t = new task(); + connect(t, SIGNAL(done(task*)), this , SLOT(populateModel()), Qt::QueuedConnection); + connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); + t->init(res, TASK_STATUS); + res->wrk->queue(t); + } else { + /* populate Model directly */ + populateModel(); + } +} + +bool JobSelectWizardPage::isComplete() const +{ + /* any selection will do since it's single selection */ + return (ui->BackupTableView->selectionModel() + && !ui->BackupTableView->selectionModel()->selectedRows().isEmpty() + ); +} + +qlonglong JobSelectWizardPage::currentJob() const +{ + /* single selection */ + QModelIndex idx = ui->BackupTableView->selectionModel()->currentIndex(); + /* return the JobId in column 0 */ + QModelIndex idIdx = idx.sibling(idx.row(), 0); + return idIdx.data().toLongLong(); +} + +void JobSelectWizardPage::populateModel() +{ + if (res) { + /* populate model with jobs listed in currentClient */ + task *t = new task(); + connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); + t->init(res, TASK_LIST_CLIENT_JOBS); + int idx = field("currentClient").toInt(); + char *p = (char*) res->clients->get(idx); + POOL_MEM info; + pm_strcpy(info, p); + t->arg = info.c_str(); + t->model = model; + res->wrk->queue(t); + } +} diff --git a/src/qt-console/tray-monitor/jobselectwizardpage.h b/src/qt-console/tray-monitor/jobselectwizardpage.h new file mode 100644 index 00000000..034cea78 --- /dev/null +++ b/src/qt-console/tray-monitor/jobselectwizardpage.h @@ -0,0 +1,69 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard: Job selection page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#ifndef JOBSELECTWIZARDPAGE_H +#define JOBSELECTWIZARDPAGE_H + +#include "common.h" +#include + +class QStandardItemModel; +class QItemSelection; +class RESMON; + +namespace Ui { +class JobSelectWizardPage; +} + +class JobSelectWizardPage : public QWizardPage +{ + Q_OBJECT + Q_PROPERTY(qlonglong currentJob READ currentJob NOTIFY currentJobChanged) + +public: + explicit JobSelectWizardPage(QWidget *parent = 0); + ~JobSelectWizardPage(); + /* QWizardPage interface */ + void initializePage(); + bool isComplete() const; + /* local interface */ + inline void setRes(RESMON *r) {res=r;} + /* currentJob READ */ + qlonglong currentJob() const; + +signals: + /* currentJob NOTIFY */ + void currentJobChanged(); + +protected slots: + void populateModel(); + +private: + Ui::JobSelectWizardPage *ui; + RESMON *res; + QStandardItemModel *model; + qlonglong m_jobId; +}; + +#endif // JOBSELECTWIZARDPAGE_H diff --git a/src/qt-console/tray-monitor/jobselectwizardpage.ui b/src/qt-console/tray-monitor/jobselectwizardpage.ui new file mode 100644 index 00000000..e45c3c80 --- /dev/null +++ b/src/qt-console/tray-monitor/jobselectwizardpage.ui @@ -0,0 +1,52 @@ + + + JobSelectWizardPage + + + + 0 + 0 + 706 + 360 + + + + WizardPage + + + + + + QAbstractItemView::NoEditTriggers + + + QAbstractItemView::SingleSelection + + + QAbstractItemView::SelectRows + + + true + + + false + + + false + + + false + + + true + + + false + + + + + + + + diff --git a/src/qt-console/tray-monitor/jobsmodel.cpp b/src/qt-console/tray-monitor/jobsmodel.cpp new file mode 100644 index 00000000..1b8610a1 --- /dev/null +++ b/src/qt-console/tray-monitor/jobsmodel.cpp @@ -0,0 +1,98 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +#include "jobsmodel.h" + +JobsModel::JobsModel(const QList& t, QObject *parent) + : QAbstractTableModel(parent) + , table(t) +{ +} + +QVariant JobsModel::headerData(int section, Qt::Orientation orientation, int role) const +{ + if (role != Qt::DisplayRole) + return QVariant(); + + if (orientation == Qt::Horizontal) { + switch (section) { + case ID_COLUMN: + return tr("Id"); + case TDATE_COLUMN: + return tr("Date"); + case HASCACHE_COLUMN: + return tr("Cache"); + case NAME_COLUMN: + return tr("Name"); + default: + return QVariant(); + } + } + return QVariant(); + + + return QVariant(); +} + +int JobsModel::rowCount(const QModelIndex &parent) const +{ + if (parent.isValid()) + return 0; + + return table.size(); +} + +int JobsModel::columnCount(const QModelIndex &parent) const +{ + if (parent.isValid()) + return 0; + + return NUM_COLUMN; +} + +QVariant JobsModel::data(const QModelIndex &index, int role) const +{ + if (!index.isValid()) + return QVariant(); + + if (index.row() >= table.size() || index.row() < 0) + return QVariant(); + + if (index.column() >= NUM_COLUMN || index.column() < 0) + return QVariant(); + + if (role == Qt::DisplayRole) { + row_struct row = table.at(index.row()); + switch(index.column()) + { + case 0: + return quint64(row.id); + break; + case 1: + return row.tdate; + break; + case 2: + return row.hasCache; + break; + case 3: + return row.name; + break; + } + } + return QVariant(); +} diff --git a/src/qt-console/tray-monitor/jobsmodel.h b/src/qt-console/tray-monitor/jobsmodel.h new file mode 100644 index 00000000..e47fccab --- /dev/null +++ b/src/qt-console/tray-monitor/jobsmodel.h @@ -0,0 +1,59 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +#ifndef JOBSMODEL_H +#define JOBSMODEL_H + +#include +#include "common.h" + +class JobsModel : public QAbstractTableModel +{ + Q_OBJECT + +public: + enum { + ID_COLUMN =0, + TDATE_COLUMN, + HASCACHE_COLUMN, + NAME_COLUMN, + NUM_COLUMN + }; + + struct row_struct { + uint64_t id; + QDateTime tdate; + QString hasCache; + QString name; + }; + + explicit JobsModel(const QList& t, QObject *parent = NULL); + // Header: + QVariant headerData(int section, Qt::Orientation orientation, int role = Qt::DisplayRole) const; + + // Basic functionality: + int rowCount(const QModelIndex &parent = QModelIndex()) const; + int columnCount(const QModelIndex &parent = QModelIndex()) const; + + QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const; + +private: + QList table; +}; + +#endif // JOBSMODEL_H diff --git a/src/qt-console/tray-monitor/main-conf.ui b/src/qt-console/tray-monitor/main-conf.ui new file mode 100644 index 00000000..4e023ab6 --- /dev/null +++ b/src/qt-console/tray-monitor/main-conf.ui @@ -0,0 +1,350 @@ + + + Conf + + + + 0 + 0 + 556 + 337 + + + + Configuration + + + + + + 0 + + + + Monitor Configuration + + + + + + QFormLayout::AllNonFixedFieldsGrow + + + + + The Monitor name will be used during the authentication phase. + + + Name: + + + + + + + The Monitor name will be used during the authentication phase. + + + 127 + + + + + + + Refresh Interval: + + + + + + + 5 + + + 9999 + + + 120 + + + + + + + Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run + + + Command Directory: + + + + + + + + + Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run + + + + + + + ... + + + + + + + + + Display or Hide advanced options in the "Run Job" window + + + Display Advanced Options: + + + + + + + Display or Hide advanced options in the "Run Job" window + + + + + + + + + + + + + + + + + + Save and Apply the changes + + + Save + + + + :/images/label.png:/images/label.png + + + + + + + Cancel + + + + :/images/A.png:/images/A.png + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + Show/Hide Passwords + + + Password + + + + :/images/zoom.png:/images/zoom.png + + + + + + + Add Client resource to monitor + + + Client + + + + :/images/mark.png:/images/mark.png + + + + + + + Add Storage resource to monitor + + + Storage + + + + :/images/mark.png:/images/mark.png + + + + + + + Add Director resource to monitor + + + Director + + + + :/images/mark.png:/images/mark.png + + + + + + + + + + + + + bpSave + clicked() + Conf + accept() + + + 511 + 30 + + + 521 + 46 + + + + + bpCancel + clicked() + Conf + close() + + + 511 + 76 + + + 521 + 159 + + + + + bpStrip + clicked() + Conf + togglePassword() + + + 511 + 178 + + + 496 + 142 + + + + + bpAddClient + clicked() + Conf + addClient() + + + 511 + 239 + + + 521 + 245 + + + + + bpAddStorage + clicked() + Conf + addStore() + + + 511 + 272 + + + 521 + 289 + + + + + bpAddDir + clicked() + Conf + addDir() + + + 511 + 313 + + + 521 + 331 + + + + + bpCommandDir + clicked() + Conf + selectCommandDir() + + + 405 + 135 + + + 466 + 112 + + + + + + togglePassword() + addClient() + addStore() + addDir() + selectCommandDir() + + diff --git a/src/qt-console/tray-monitor/pluginmodel.h b/src/qt-console/tray-monitor/pluginmodel.h new file mode 100644 index 00000000..f7bdca6d --- /dev/null +++ b/src/qt-console/tray-monitor/pluginmodel.h @@ -0,0 +1,28 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +#ifndef PLUGINMODEL_H +#define PLUGINMODEL_H +enum { + PluginNameRole = Qt::UserRole+1, + PluginTypeRole = Qt::UserRole+2, + PluginCommentRole = Qt::UserRole+3, + PluginRequiredRole = Qt::UserRole+4, + PluginDefaultRole = Qt::UserRole+5 +}; +#endif // PLUGINMODEL_H diff --git a/src/qt-console/tray-monitor/pluginwizardpage.cpp b/src/qt-console/tray-monitor/pluginwizardpage.cpp new file mode 100644 index 00000000..164a5b57 --- /dev/null +++ b/src/qt-console/tray-monitor/pluginwizardpage.cpp @@ -0,0 +1,177 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard: Plugin selection page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#include "common.h" +#include "pluginwizardpage.h" +#include "ui_pluginwizardpage.h" +#include "pluginmodel.h" +#include "task.h" +#include +#include +#include +#include +#include +#include "lib/ini.h" + +PluginWizardPage::PluginWizardPage(QWidget *parent) : + QWizardPage(parent), + ui(new Ui::PluginWizardPage), + res(NULL) +{ + ui->setupUi(this); + ui->tabWidget->clear(); + registerField("pluginKeysStr", this, "pluginKeysStr", "pluginKeysStrChanged"); +} + +PluginWizardPage::~PluginWizardPage() +{ + delete ui; +} + +void PluginWizardPage::initializePage() +{ + /* build plugin form UI dynamically*/ + task t; + t.init(res, -1); + QStringList idsList = field("pluginIds").toString().split(","); + QStringList nameList = field("pluginNames").toString().split(","); + /* process ids and name lists with the assumption that indexes match */ + ASSERT(idsList.count() == nameList.count()); + for( int c=0; ctabWidget->count(); ++j) { + if (ui->tabWidget->tabText(j) == pluginName) { + exists=true; + break; + } + } + if (exists) continue; + /* create a tab widget */ + QWidget *pluginWidget = new QWidget(); + /* insert a tab widget with an empty form layout */ + QFormLayout *layout = new QFormLayout(); + pluginWidget->setLayout(layout); + ui->tabWidget->addTab(pluginWidget, pluginName); + + /* parse each plugin fields*/ + t.plugin(nameList[c], field("jobIds").toString(), pluginId.toInt()); + ConfigFile cf; + cf.unserialize(pluginName.toLatin1().data()); + + /* for each field */ + for (int i=0; i < MAX_INI_ITEMS && cf.items[i].name; i++) { + ini_items f = cf.items[i]; + /* create the w Widget dynamically, based on the field value type */ + QWidget *w(NULL); + if (f.handler == ini_store_str || + f.handler == ini_store_name || + f.handler == ini_store_alist_str) /*FIXME: treat alist separatly*/ + { + QLineEdit *l = new QLineEdit(); + w=l; + if (f.default_value) + l->setText(f.default_value); + } + else if (f.handler == ini_store_pint64 || + f.handler == ini_store_int64 || + f.handler == ini_store_pint32 || + f.handler == ini_store_int32) + { + QLineEdit *l = new QLineEdit(); + w=l; + l->setValidator(new QIntValidator()); + if (f.default_value) + l->setText(f.default_value); + } + else if (f.handler == ini_store_bool) + { + QCheckBox *c = new QCheckBox(); + w=c; + if (f.default_value) + c->setChecked(f.default_value); + } + else if (f.handler == ini_store_date) + { + QDateTimeEdit *d = new QDateTimeEdit(); + w=d; + if (f.default_value) + d->setDateTime(QDateTime::fromString(f.default_value, "yyyy-MM-dd hh:mm:ss")); + } + + if (w) { + w->setToolTip(f.comment); + QString field_name = QString("%1_%2").arg(nameList[c]).arg(f.name); + /* This doesn't work FIXME + * if (f.required) { + field_name.append("*"); + }*/ + + registerField(field_name, w); + /* there's no way to iterate thru page-register field */ + /* As a workaround we keep track of registered fields in a separate list */ + registeredFields.append(field_name); + + layout->addRow(f.name, w); + emit completeChanged(); + } + } + } +} + +bool PluginWizardPage::validatePage() +{ + QStringList pluginKeys; + QStringList idsList = field("pluginIds").toString().split(","); + QStringList nameList = field("pluginNames").toString().split(","); + for (int idx=0; idxtabWidget->count(); ++idx) { + QString name = ui->tabWidget->tabText(idx); + ASSERT(name.compare(nameList[idx]) == 0); + + QFile file(name); + if (file.open(QIODevice::WriteOnly)) { + QTextStream outputStream(&file); + foreach(QString fld, registeredFields) { + QStringList sl = fld.split("_"); + if ( (name.compare(sl[0]) == 0) && field(fld).isValid()) { + QString s = QString("%1=%2\n").arg(sl[1]).arg(field(fld).toString()); + outputStream << s; + } + } + } + + /* create the key */ + QString key = QString("j%1i%2").arg(field("jobIds").toString().remove(',').simplified()).arg(idsList[idx].simplified()); + QString restoreKey = QString("%1:%2").arg(idsList[idx].simplified()).arg(key); + pluginKeys.append(restoreKey); + } + + m_pluginKeysStr = pluginKeys.join(","); + emit pluginKeysStrChanged(); + + return true; +} diff --git a/src/qt-console/tray-monitor/pluginwizardpage.h b/src/qt-console/tray-monitor/pluginwizardpage.h new file mode 100644 index 00000000..ebb25733 --- /dev/null +++ b/src/qt-console/tray-monitor/pluginwizardpage.h @@ -0,0 +1,60 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard: Plugin selection page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#ifndef PLUGINWIZARDPAGE_H +#define PLUGINWIZARDPAGE_H + +#include + +namespace Ui { +class PluginWizardPage; +} + +class RESMON; + +class PluginWizardPage : public QWizardPage +{ + Q_OBJECT + Q_PROPERTY(QString pluginKeysStr READ pluginKeysStr NOTIFY pluginKeysStrChanged) + +private: QString m_pluginKeysStr; +public: QString pluginKeysStr() const { return m_pluginKeysStr; } +signals: void pluginKeysStrChanged(); + +public: + explicit PluginWizardPage(QWidget *parent = 0); + ~PluginWizardPage(); + /* QWizardPage interface */ + void initializePage(); + bool validatePage(); + /* local interface */ + inline void setRes(RESMON *r) {res=r;} +private: + Ui::PluginWizardPage *ui; + RESMON *res; + QStringList registeredFields; + +}; + +#endif // PLUGINWIZARDPAGE_H diff --git a/src/qt-console/tray-monitor/pluginwizardpage.ui b/src/qt-console/tray-monitor/pluginwizardpage.ui new file mode 100644 index 00000000..e55a6d26 --- /dev/null +++ b/src/qt-console/tray-monitor/pluginwizardpage.ui @@ -0,0 +1,35 @@ + + + PluginWizardPage + + + + 0 + 0 + 637 + 503 + + + + WizardPage + + + + + + + Tab 1 + + + + + Tab 2 + + + + + + + + + diff --git a/src/qt-console/tray-monitor/res-conf.ui b/src/qt-console/tray-monitor/res-conf.ui new file mode 100644 index 00000000..7cf21a78 --- /dev/null +++ b/src/qt-console/tray-monitor/res-conf.ui @@ -0,0 +1,565 @@ + + + ResConf + + + + 0 + 0 + 417 + 541 + + + + Form + + + + + + General + + + + QFormLayout::AllNonFixedFieldsGrow + + + + + The Name will be used only in the Tray Monitor interface + + + Name: + + + + + + + The Name will be used only in the Tray Monitor interface + + + 127 + + + + + + + Description: + + + + + + + 512 + + + + + + + Password: + + + + + + + + + + 127 + + + QLineEdit::PasswordEchoOnEdit + + + + + + + Address: + + + + + + + 1024 + + + + + + + Port: + + + + + + + + 0 + 0 + + + + + 100 + 16777215 + + + + 5 + + + + + + + Timeout: + + + + + + + + 0 + 0 + + + + + 100 + 16777215 + + + + 5 + + + + + + + Use Client Initiated backup/restore feature + + + Remote + + + + + + + Use Client Initiated backup/restore feature + + + + + + + + + + Update the tray monitor icon with the status of this component + + + Monitor: + + + + + + + Update the tray monitor icon with the status of this component + + + + + + + + + + + + + + + + + Use SetIp: + + + + + + + + + + TLS + + + + + + ... + + + + + + + CA Certificate File: + + + + + + + Enabled + + + true + + + + + + + ... + + + + + + + Key File: + + + + + + + + + + ... + + + + + + + Certificate File: + + + + + + + + + + + + + + + + CA Certificate Directory: + + + + + + + ... + + + + + cbTLSEnabled + editCaCertificateFile + label_5 + label_6 + editCaCertificateDir + label_7 + editCertificate + label_8 + editKey + bpCaCertificateFile + bpCaCertificateDir + bpCertificate + bpKey + + + + + + + 64 + 16777215 + + + + + 64 + 0 + + + + + + + + :/images/purge.png:/images/purge.png + + + false + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + editName + editDescription + editPassword + editAddress + editPort + editTimeout + cbRemote + cbTLSEnabled + editCaCertificateFile + bpCaCertificateFile + editCaCertificateDir + bpCaCertificateDir + editCertificate + bpCertificate + editKey + bpKey + + + + + + + bpCaCertificateFile + clicked() + ResConf + selectCaCertificateFile() + + + 461 + 294 + + + 521 + 247 + + + + + bpCaCertificateDir + clicked() + ResConf + selectCaCertificateDir() + + + 452 + 334 + + + 501 + 355 + + + + + bpCertificate + clicked() + ResConf + selectCertificate() + + + 459 + 364 + + + 495 + 384 + + + + + bpKey + clicked() + ResConf + selectKey() + + + 461 + 395 + + + 481 + 410 + + + + + cbTLSEnabled + toggled(bool) + editCaCertificateFile + setEnabled(bool) + + + 132 + 271 + + + 249 + 291 + + + + + cbTLSEnabled + toggled(bool) + editCaCertificateDir + setEnabled(bool) + + + 120 + 274 + + + 203 + 325 + + + + + cbTLSEnabled + toggled(bool) + editCertificate + setEnabled(bool) + + + 68 + 271 + + + 220 + 360 + + + + + cbTLSEnabled + toggled(bool) + editKey + setEnabled(bool) + + + 51 + 275 + + + 288 + 392 + + + + + cbTLSEnabled + toggled(bool) + bpCaCertificateFile + setEnabled(bool) + + + 161 + 267 + + + 449 + 291 + + + + + cbTLSEnabled + toggled(bool) + bpCaCertificateDir + setEnabled(bool) + + + 145 + 271 + + + 455 + 329 + + + + + cbTLSEnabled + toggled(bool) + bpCertificate + setEnabled(bool) + + + 140 + 266 + + + 459 + 358 + + + + + cbTLSEnabled + toggled(bool) + bpKey + setEnabled(bool) + + + 118 + 272 + + + 458 + 389 + + + + + + selectCaCertificateFile() + selectCaCertificateDir() + selectCertificate() + selectKey() + + diff --git a/src/qt-console/tray-monitor/restoreoptionswizardpage.cpp b/src/qt-console/tray-monitor/restoreoptionswizardpage.cpp new file mode 100644 index 00000000..9ca86a69 --- /dev/null +++ b/src/qt-console/tray-monitor/restoreoptionswizardpage.cpp @@ -0,0 +1,111 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard : Options page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#include "common.h" +#include "restoreoptionswizardpage.h" +#include "ui_restoreoptionswizardpage.h" +#include "common.h" +#include "filesmodel.h" +#include "task.h" + +#define TABLENAME "b21234" +RestoreOptionsWizardPage::RestoreOptionsWizardPage(QWidget *parent) : + QWizardPage(parent), + ui(new Ui::RestoreOptionsWizardPage), + res(0) + +{ + ui->setupUi(this); + + registerField("restoreClient", ui->restoreClientComboxBox); + registerField("restoreWhere", ui->whereLineEdit); + registerField("restoreReplace", ui->replaceComboBox); + registerField("restoreComment", ui->commentLineEdit); +} + +RestoreOptionsWizardPage::~RestoreOptionsWizardPage() +{ + delete ui; +} + +void RestoreOptionsWizardPage::initializePage() +{ + /* first request synchronous */ + if (res) { + QStringList list; + char *str; + foreach_alist(str, res->clients) { + list << QString(str); + } + + ui->restoreClientComboxBox->clear(); + if (!list.isEmpty()) { + ui->restoreClientComboxBox->addItems(list); + ui->restoreClientComboxBox->setEnabled(true); + ui->restoreClientComboxBox->setCurrentIndex(0); + } else { + ui->restoreClientComboxBox->setEnabled(false); + } + + /* find RestoreFiles default */ + const char *p = "RestoreFiles"; + POOL_MEM info; + pm_strcpy(info, p); + res->mutex->lock(); + bfree_and_null(res->defaults.job); + res->defaults.job = bstrdup(info.c_str()); + res->mutex->unlock(); + + task t; + t.init(res, -1); + t.get_job_defaults(); + + ui->whereLineEdit->setText(res->defaults.where); + ui->replaceComboBox->setCurrentIndex(res->defaults.replace); + } +} + +bool RestoreOptionsWizardPage::validatePage() +{ + task *t = new task(); + connect(t, SIGNAL(done(task*)), wizard(), SLOT(deleteLater())); + connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); + t->init(res, TASK_RESTORE); + + t->restore_field.tableName = QString(TABLENAME); + t->restore_field.jobIds = field("jobIds").toString(); + t->restore_field.fileIds = field("fileIds").toString(); + t->restore_field.dirIds = field("dirIds").toString(); + t->restore_field.hardlinks = field("hardlinks").toString(); + int idx = field("currentClient").toInt(); + t->restore_field.client = QString((char*)res->clients->get(idx)); + t->restore_field.where = field("restoreWhere").toString(); + t->restore_field.replace = ui->replaceComboBox->currentText(); + t->restore_field.comment = field("restoreComment").toString(); + t->restore_field.pluginnames = field("pluginNames").toString(); + t->restore_field.pluginkeys = field("pluginKeysStr").toString(); + + res->wrk->queue(t); + return true; +} diff --git a/src/qt-console/tray-monitor/restoreoptionswizardpage.h b/src/qt-console/tray-monitor/restoreoptionswizardpage.h new file mode 100644 index 00000000..8ea3fe09 --- /dev/null +++ b/src/qt-console/tray-monitor/restoreoptionswizardpage.h @@ -0,0 +1,57 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard: Options page + * + * Written by Norbert Bizet, May MMXVII + * + */ +#ifndef RESTOREOPTIONSWIZARDPAGE_H +#define RESTOREOPTIONSWIZARDPAGE_H + +#include + +class alist; +class QStandardItemModel; +class RESMON; +class task; + +namespace Ui { +class RestoreOptionsWizardPage; +} + +class RestoreOptionsWizardPage : public QWizardPage +{ + Q_OBJECT + +public: + explicit RestoreOptionsWizardPage(QWidget *parent = 0); + ~RestoreOptionsWizardPage(); + /* QWizardPage interface */ + void initializePage(); + bool validatePage(); + /* local interface */ + inline void setRes(RESMON *r) {res=r;} + +private: + Ui::RestoreOptionsWizardPage *ui; + RESMON *res; +}; + +#endif // RESTOREOPTIONSWIZARDPAGE_H diff --git a/src/qt-console/tray-monitor/restoreoptionswizardpage.ui b/src/qt-console/tray-monitor/restoreoptionswizardpage.ui new file mode 100644 index 00000000..cbc16ff8 --- /dev/null +++ b/src/qt-console/tray-monitor/restoreoptionswizardpage.ui @@ -0,0 +1,82 @@ + + + RestoreOptionsWizardPage + + + + 0 + 0 + 419 + 304 + + + + WizardPage + + + + + + Restore Client: + + + + + + + + + + Where : + + + + + + + + + + Replace : + + + + + + + + Never + + + + + Always + + + + + IfNewer + + + + + IfOlder + + + + + + + + Comment : + + + + + + + + + + + diff --git a/src/qt-console/tray-monitor/restorewizard.cpp b/src/qt-console/tray-monitor/restorewizard.cpp new file mode 100644 index 00000000..0f55ef20 --- /dev/null +++ b/src/qt-console/tray-monitor/restorewizard.cpp @@ -0,0 +1,56 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard + * + * Written by Norbert Bizet, May MMXVII + * + */ +#include "common.h" +#include "restorewizard.h" +#include "ui_restorewizard.h" +#include "filesmodel.h" +#include "task.h" +#include + +RestoreWizard::RestoreWizard(RESMON *r, QWidget *parent) : + QWizard(parent), + res(r), + ui(new Ui::RestoreWizard) +{ + ui->setupUi(this); + ui->RestWizClientPage->setRes(res); + ui->RestWizJobSelectPage->setRes(res); + ui->RestWiFileSelectionPage->setRes(res); + ui->RestWizPluginPage->setRes(res); + ui->RestWizAdvancedOptionsPage->setRes(res); + + /* avoid connect warnings */ + qRegisterMetaType("Qt::Orientation"); + qRegisterMetaType< QList < QPersistentModelIndex > >("QList"); + qRegisterMetaType< QVector < int > >("QVector"); +#if QT_VERSION >= 0x050000 + qRegisterMetaType< QAbstractItemModel::LayoutChangeHint >("QAbstractItemModel::LayoutChangeHint"); +#endif +} + +RestoreWizard::~RestoreWizard() +{ + delete ui; +} diff --git a/src/qt-console/tray-monitor/restorewizard.h b/src/qt-console/tray-monitor/restorewizard.h new file mode 100644 index 00000000..76b9a83a --- /dev/null +++ b/src/qt-console/tray-monitor/restorewizard.h @@ -0,0 +1,59 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Restore Wizard + * + * Written by Norbert Bizet, May MMXVII + * + */ +#ifndef RESTOREWIZARD_H +#define RESTOREWIZARD_H + +#include +#include +namespace Ui { +class RestoreWizard; +} + +class task; +class RESMON; +class QStandardItemModel; + +class RestoreWizard : public QWizard +{ + Q_OBJECT + +public: + enum { + RW_CLIENT_PAGE = 0, + RW_JOB_PAGE = 1, + RW_FILE_PAGE = 2, + RW_PLUGIN_PAGE = 3, + RW_ADVANCEDOPTIONS_PAGE = 4 + }; + + explicit RestoreWizard(RESMON *r, QWidget *parent = 0); + ~RestoreWizard(); + +private: + RESMON *res; + Ui::RestoreWizard *ui; +}; + +#endif // RESTOREWIZARD_H diff --git a/src/qt-console/tray-monitor/restorewizard.ui b/src/qt-console/tray-monitor/restorewizard.ui new file mode 100644 index 00000000..7671cdf8 --- /dev/null +++ b/src/qt-console/tray-monitor/restorewizard.ui @@ -0,0 +1,123 @@ + + + RestoreWizard + + + + 0 + 0 + 853 + 444 + + + + Wizard + + + false + + + false + + + + + 2 + 0 + + + + false + + + Restore + + + Select a Client. + + + + + Restore + + + Select Backup to restore. + + + + + Restore + + + Files Selection + + + + + Plugins + + + Restore pluging values + + + + + Restore Options + + + Select advanced options for restore + + + + + + ClientSelectWizardPage + QWizardPage +
clientselectwizardpage.h
+ 1 +
+ + JobSelectWizardPage + QWizardPage +
jobselectwizardpage.h
+ 1 +
+ + FileSelectWizardPage + QWizardPage +
fileselectwizardpage.h
+ 1 +
+ + RestoreOptionsWizardPage + QWizardPage +
restoreoptionswizardpage.h
+ 1 +
+ + PluginWizardPage + QWizardPage +
pluginwizardpage.h
+ 1 +
+
+ + + + RestWizClientPage + completeChanged() + RestoreWizard + setFocus() + + + 424 + 288 + + + 424 + 274 + + + + +
diff --git a/src/qt-console/tray-monitor/run.ui b/src/qt-console/tray-monitor/run.ui new file mode 100644 index 00000000..4ed29af1 --- /dev/null +++ b/src/qt-console/tray-monitor/run.ui @@ -0,0 +1,379 @@ + + + runForm + + + + 0 + 0 + 568 + 407 + + + + + 0 + 0 + + + + Run job + + + + + + + 16777215 + 30 + + + + + 11 + + + + <h3>Run a Job</h3> + + + + + + + + 0 + 0 + + + + + 0 + 5 + + + + Qt::Horizontal + + + + + + + + + + + + :/images/runit.png + + + + + + + 0 + + + + Properties + + + + + + + + + + QFormLayout::AllNonFixedFieldsGrow + + + + + Job: + + + jobCombo + + + + + + + + 0 + 0 + + + + QComboBox::AdjustToContents + + + + + + + When: + + + dateTimeEdit + + + + + + + QDateTimeEdit::YearSection + + + yyyy-MM-dd hh:mm:ss + + + true + + + + + + + + + + <html><head/><body><p>Job statistics computed from the Catalog with previous jobs.</p><p>For accurate information, it is possible to use the bconsole &quot;estimate&quot; command.</p></body></html> + + + Estimate: + + + + + + Job Bytes: + + + + + + + + + + + + + + Job Files: + + + + + + + + + + + + + + Level: + + + + + + + + + + + + + + + + + + Advanced + + + + + + Level: + + + levelCombo + + + + + + + + + + Client: + + + clientCombo + + + + + + + + + + FileSet: + + + filesetCombo + + + + + + + + + + Pool: + + + poolCombo + + + + + + + + + + Storage: + + + storageCombo + + + + + + + + + + Catalog: + + + + + + + + + + Priority: + + + prioritySpin + + + + + + + + 60 + 0 + + + + + 60 + 16777215 + + + + + 60 + 0 + + + + 1 + + + 10000 + + + 10 + + + + + + + + + + + + + + 0 + 0 + + + + + 0 + 5 + + + + Qt::Horizontal + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + OK + + + + + + + Cancel + + + + + + + + + + + + diff --git a/src/qt-console/tray-monitor/runjob.cpp b/src/qt-console/tray-monitor/runjob.cpp new file mode 100644 index 00000000..e588ba30 --- /dev/null +++ b/src/qt-console/tray-monitor/runjob.cpp @@ -0,0 +1,509 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "runjob.h" +#include + +static void fillcombo(QComboBox *cb, alist *lst, bool addempty=true) +{ + if (lst && lst->size() > 0) { + QStringList list; + char *str; + if (addempty) { + list << QString(""); + } + foreach_alist(str, lst) { + list << QString(str); + } + cb->addItems(list); + } else { + cb->setEnabled(false); + } +} + +RunJob::RunJob(RESMON *r): QDialog(), res(r), tabAdvanced(NULL) +{ + int nbjob; + if (res->jobs->size() == 0) { + QMessageBox msgBox; + msgBox.setText(_("This restricted console does not have access to Backup jobs")); + msgBox.setIcon(QMessageBox::Warning); + msgBox.exec(); + deleteLater(); + return; + + } + + ui.setupUi(this); + setModal(true); + connect(ui.cancelButton, SIGNAL(clicked()), this, SLOT(close_cb())); + connect(ui.okButton, SIGNAL(clicked()), this, SLOT(runjob())); + connect(ui.jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(jobChanged(int))); + connect(ui.levelCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(levelChanged(int))); + ui.dateTimeEdit->setMinimumDate(QDate::currentDate()); + ui.dateTimeEdit->setMaximumDate(QDate::currentDate().addDays(7)); + ui.dateTimeEdit->setDate(QDate::currentDate()); + ui.dateTimeEdit->setTime(QTime::currentTime()); + ui.boxEstimate->setVisible(false); + + res->mutex->lock(); + nbjob = res->jobs->size(); + fillcombo(ui.jobCombo, res->jobs, (nbjob > 1)); + fillcombo(ui.clientCombo, res->clients); + fillcombo(ui.filesetCombo,res->filesets); + fillcombo(ui.poolCombo, res->pools); + fillcombo(ui.storageCombo,res->storages); + fillcombo(ui.catalogCombo,res->catalogs); + res->mutex->unlock(); + connect(ui.tabWidget, SIGNAL(currentChanged(int)), this, SLOT(tabChange(int))); + QStringList levels; + levels << "" << "Incremental" << "Differential" << "Full"; + ui.levelCombo->addItems(levels); + + MONITOR *m = (MONITOR*) GetNextRes(R_MONITOR, NULL); + if (!m->display_advanced_options) { + tabAdvanced = ui.tabWidget->widget(1); + ui.tabWidget->removeTab(1); + } + + show(); +} + +void RunJob::tabChange(int idx) +{ + QString q = ui.tabWidget->tabText(idx); + if (q.contains("Advanced")) { + if (ui.jobCombo->currentText().compare("") == 0) { + pm_strcpy(curjob, ""); + ui.tab2->setEnabled(false); + + } else if (ui.jobCombo->currentText().compare(curjob.c_str()) != 0) { + task *t = new task(); + char *job = bstrdup(ui.jobCombo->currentText().toUtf8().data()); + pm_strcpy(curjob, job); // Keep the job name to not refresh the Advanced tab the next time + + Dmsg1(10, "get defaults for %s\n", job); + res->mutex->lock(); + bfree_and_null(res->defaults.job); + res->defaults.job = job; + res->mutex->unlock(); + + ui.tab2->setEnabled(false); + connect(t, SIGNAL(done(task *)), this, SLOT(fill_defaults(task *)), Qt::QueuedConnection); + t->init(res, TASK_DEFAULTS); + res->wrk->queue(t); + } + } +} + +void RunJob::runjob() +{ + POOL_MEM tmp; + char *p; + + p = ui.jobCombo->currentText().toUtf8().data(); + if (!p || !*p) { + QMessageBox msgBox; + msgBox.setText(_("Nothing selected")); + msgBox.setIcon(QMessageBox::Warning); + msgBox.exec(); + return; + } + + Mmsg(command, "run job=\"%s\" yes", p); + + if (strcmp(p, NPRTB(res->defaults.job)) == 0 || strcmp("", NPRTB(res->defaults.job)) == 0) { + p = ui.storageCombo->currentText().toUtf8().data(); + if (p && *p && strcmp(p, NPRTB(res->defaults.storage)) != 0) { + Mmsg(tmp, " storage=\"%s\"", p); + pm_strcat(command, tmp.c_str()); + } + + p = ui.clientCombo->currentText().toUtf8().data(); + if (p && *p && strcmp(p, NPRTB(res->defaults.client)) != 0) { + Mmsg(tmp, " client=\"%s\"", p); + pm_strcat(command, tmp.c_str()); + } + + p = ui.levelCombo->currentText().toUtf8().data(); + if (p && *p && strcmp(p, NPRTB(res->defaults.level)) != 0) { + Mmsg(tmp, " level=\"%s\"", p); + pm_strcat(command, tmp.c_str()); + } + + p = ui.poolCombo->currentText().toUtf8().data(); + if (p && *p && strcmp(p, NPRTB(res->defaults.pool)) != 0) { + Mmsg(tmp, " pool=\"%s\"", p); + pm_strcat(command, tmp.c_str()); + } + + p = ui.filesetCombo->currentText().toUtf8().data(); + if (p && *p && strcmp(p, NPRTB(res->defaults.fileset)) != 0) { + Mmsg(tmp, " fileset=\"%s\"", p); + pm_strcat(command, tmp.c_str()); + } + + if (res->defaults.priority && res->defaults.priority != ui.prioritySpin->value()) { + Mmsg(tmp, " priority=\"%d\"", res->defaults.priority); + pm_strcat(command, tmp.c_str()); + } + } + + QDate dnow = QDate::currentDate(); + QTime tnow = QTime::currentTime(); + QDate dval = ui.dateTimeEdit->date(); + QTime tval = ui.dateTimeEdit->time(); + + if (dval > dnow || (dval == dnow && tval > tnow)) { + Mmsg(tmp, " when=\"%s %s\"", dval.toString("yyyy-MM-dd").toUtf8().data(), tval.toString("hh:mm:00").toUtf8().data()); + pm_strcat(command, tmp.c_str()); + } + + if (res->type == R_CLIENT) { + pm_strcat(command, " fdcalled=1"); + } + + // Build the command and run it! + task *t = new task(); + t->init(res, TASK_RUN); + connect(t, SIGNAL(done(task *)), this, SLOT(jobStarted(task *)), Qt::QueuedConnection); + t->arg = command.c_str(); + res->wrk->queue(t); +} + +void RunJob::jobStarted(task *t) +{ + Dmsg1(10, "%s\n", command.c_str()); + Dmsg1(10, "-> jobid=%d\n", t->result.i); + deleteLater(); + delete t; +} + +void RunJob::close_cb(task *t) +{ + deleteLater(); + delete t; +} + +void RunJob::close_cb() +{ + task *t = new task(); + connect(t, SIGNAL(done(task *)), this, SLOT(close_cb(task *)), Qt::QueuedConnection); + t->init(res, TASK_DISCONNECT); + res->wrk->queue(t); +} + +void RunJob::jobChanged(int) +{ + char *p; + ui.levelCombo->setCurrentIndex(0); + ui.storageCombo->setCurrentIndex(0); + ui.filesetCombo->setCurrentIndex(0); + ui.clientCombo->setCurrentIndex(0); + ui.storageCombo->setCurrentIndex(0); + ui.poolCombo->setCurrentIndex(0); + ui.catalogCombo->setCurrentIndex(0); + + p = ui.jobCombo->currentText().toUtf8().data(); + if (p && *p) { + task *t = new task(); + t->init(res, TASK_INFO); + pm_strcpy(info, p); + connect(t, SIGNAL(done(task *)), this, SLOT(jobInfo(task *)), Qt::QueuedConnection); + t->arg = info.c_str(); // Jobname + t->arg2 = NULL; // Level + res->wrk->queue(t); + } +} + +void RunJob::levelChanged(int) +{ + char *p; + p = ui.jobCombo->currentText().toUtf8().data(); + if (p && *p) { + pm_strcpy(info, p); + p = ui.levelCombo->currentText().toUtf8().data(); + if (p && *p) { + task *t = new task(); + pm_strcpy(level, p); + connect(t, SIGNAL(done(task *)), this, SLOT(jobInfo(task *)), Qt::QueuedConnection); + t->init(res, TASK_INFO); + t->arg = info.c_str(); // Jobname + t->arg2 = level.c_str(); // Level + res->wrk->queue(t); + } + } +} + +void RunJob::jobInfo(task *t) +{ + char ed1[50]; + res->mutex->lock(); + if (res->infos.CorrNbJob == 0) { + ui.boxEstimate->setVisible(false); + } else { + QString t; + edit_uint64_with_suffix(res->infos.JobBytes, ed1); + strncat(ed1, "B", sizeof(ed1)); + ui.labelJobBytes->setText(QString(ed1)); + ui.labelJobFiles->setText(QString(edit_uint64_with_commas(res->infos.JobFiles, ed1))); + ui.labelJobLevel->setText(QString(job_level_to_str(res->infos.JobLevel))); + t = tr("Computed over %1 job%2, the correlation is %3/100.").arg(res->infos.CorrNbJob).arg(res->infos.CorrNbJob>1?"s":"").arg(res->infos.CorrJobBytes); + ui.labelJobBytes_2->setToolTip(t); + t = tr("Computed over %1 job%2, The correlation is %3/100.").arg(res->infos.CorrNbJob).arg(res->infos.CorrNbJob>1?"s":"").arg(res->infos.CorrJobFiles); + ui.labelJobFiles_2->setToolTip(t); + ui.boxEstimate->setVisible(true); + } + res->mutex->unlock(); + t->deleteLater(); +} + +static void set_combo(QComboBox *dest, char *str) +{ + if (str) { + int idx = dest->findText(QString(str), Qt::MatchExactly); + if (idx >= 0) { + dest->setCurrentIndex(idx); + } + } +} + +void RunJob::fill_defaults(task *t) +{ + if (t->status == true) { + res->mutex->lock(); + set_combo(ui.levelCombo, res->defaults.level); + set_combo(ui.filesetCombo, res->defaults.fileset); + set_combo(ui.clientCombo, res->defaults.client); + set_combo(ui.storageCombo, res->defaults.storage); + set_combo(ui.poolCombo, res->defaults.pool); + set_combo(ui.catalogCombo, res->defaults.catalog); + res->mutex->unlock(); + } + + ui.tab2->setEnabled(true); + t->deleteLater(); +} + +RunJob::~RunJob() +{ + Dmsg0(10, "~RunJob()\n"); + if (tabAdvanced) { + delete tabAdvanced; + } +} + +void TSched::init(const char *cmd_dir) +{ + bool started = (timer >= 0); + if (started) { + stop(); + } + + bfree_and_null(command_dir); + command_dir = bstrdup(cmd_dir); + + if (started) { + start(); + } +} + +TSched::TSched() { + timer = -1; + command_dir = NULL; +} + +TSched::~TSched() { + if (timer >= 0) { + stop(); + } + bfree_and_null(command_dir); +} + +#include +int breaddir(DIR *dirp, POOLMEM *&dname); + +bool TSched::read_command_file(const char *file, alist *lst, btime_t mtime) +{ + POOLMEM *line; + bool ret=false; + char *p; + TSchedJob *s; + Dmsg1(50, "open command file %s\n", file); + FILE *fp = fopen(file, "r"); + if (!fp) { + return false; + } + line = get_pool_memory(PM_FNAME); + + /* Get the first line, client/component:command */ + while (bfgets(line, fp) != NULL) { + strip_trailing_junk(line); + Dmsg1(50, "%s\n", line); + if (line[0] == '#') { + continue; + } + + if ((p = strchr(line, ':')) != NULL) { + *p=0; + s = new TSchedJob(line, p+1, mtime); + lst->append(s); + ret = true; + } + } + + free_pool_memory(line); + fclose(fp); + return ret; +} + +#include "lib/plugins.h" +#include "lib/cmd_parser.h" + +void TSched::timerEvent(QTimerEvent *event) +{ + Q_UNUSED(event) + POOL_MEM tmp, command; + TSchedJob *j; + alist lst(10, not_owned_by_alist); + arg_parser parser; + int i; + task *t; + RESMON *res; + scan_for_commands(&lst); + + foreach_alist(j, (&lst)) { + if (parser.parse_cmd(j->command) == bRC_OK) { + if ((i = parser.find_arg_with_value("job")) > 0) { + QMessageBox msgbox; + foreach_res(res, R_CLIENT) { + if (strcmp(res->hdr.name, j->component) == 0) { + break; + } + } + if (!res) { + foreach_res(res, R_DIRECTOR) { + if (strcmp(res->hdr.name, j->component) == 0) { + break; + } + } + } + if (!res) { + msgbox.setIcon(QMessageBox::Information); + msgbox.setText(QString("Unable to find the component \"%1\" to run the job \"%2\".").arg(j->component, j->command)); + msgbox.setStandardButtons(QMessageBox::Ignore); + } else { + + msgbox.setIcon(QMessageBox::Information); + msgbox.setText(QString("The job \"%1\" will start automatically in few seconds...").arg(parser.argv[i])); + msgbox.setStandardButtons(QMessageBox::Ok | QMessageBox::Ignore); + msgbox.setDefaultButton(QMessageBox::Ok); + msgbox.button(QMessageBox::Ok)->animateClick(6000); + } + switch(msgbox.exec()) { + case QMessageBox::Ok: + Mmsg(command, "%s yes", j->command); + + if (res->type == R_CLIENT) { + pm_strcat(command, " fdcalled=1"); + } + + // Build the command and run it! + t = new task(); + t->init(res, TASK_RUN); + connect(t, SIGNAL(done(task *)), this, SLOT(jobStarted(task *)), Qt::QueuedConnection); + t->arg = command.c_str(); + res->wrk->queue(t); + + break; + case QMessageBox::Cancel: + case QMessageBox::Ignore: + break; + } + } + } + delete j; + } +} + +void TSched::jobStarted(task *t) +{ + Dmsg1(10, "-> jobid=%d\n", t->result.i); + t->deleteLater(); +} + + +bool TSched::scan_for_commands(alist *commands) +{ + int name_max, len; + DIR* dp = NULL; + POOL_MEM fname(PM_FNAME), fname2(PM_FNAME); + POOL_MEM dir_entry; + bool ret=false, found=false; + struct stat statp; + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + if (!(dp = opendir(command_dir))) { + berrno be; + Dmsg2(0, "Failed to open directory %s: ERR=%s\n", + command_dir, be.bstrerror()); + goto bail_out; + } + + for ( ;; ) { + if (breaddir(dp, dir_entry.addr()) != 0) { + if (!found) { + goto bail_out; + } + break; + } + if (strcmp(dir_entry.c_str(), ".") == 0 || + strcmp(dir_entry.c_str(), "..") == 0) { + continue; + } + len = strlen(dir_entry.c_str()); + if (len <= 5) { + continue; + } + if (strcmp(dir_entry.c_str() + len - 5, ".bcmd") != 0) { + continue; + } + + Mmsg(fname, "%s/%s", command_dir, dir_entry.c_str()); + + if (lstat(fname.c_str(), &statp) != 0 || !S_ISREG(statp.st_mode)) { + continue; /* ignore directories & special files */ + } + + if (read_command_file(fname.c_str(), commands, statp.st_mtime)) { + Mmsg(fname2, "%s.ok", fname.c_str()); + unlink(fname2.c_str()); + rename(fname.c_str(), fname2.c_str()); // TODO: We should probably unlink the file + } + } +bail_out: + if (dp) { + closedir(dp); + } + return ret; +} diff --git a/src/qt-console/tray-monitor/runjob.h b/src/qt-console/tray-monitor/runjob.h new file mode 100644 index 00000000..f1ab60fa --- /dev/null +++ b/src/qt-console/tray-monitor/runjob.h @@ -0,0 +1,119 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef RUN_H +#define RUN_H + +#include "common.h" +#include "ui_run.h" +#include "tray_conf.h" +#include "task.h" +#include + +class RunJob: public QDialog +{ + Q_OBJECT + +public: + RESMON *res; + QWidget *tabAdvanced; + POOL_MEM command; + POOL_MEM info; + POOL_MEM level; + POOL_MEM curjob; + Ui::runForm ui; + RunJob(RESMON *r); + ~RunJob(); + +public slots: + void jobChanged(int); + void levelChanged(int); + void jobStarted(task *); + void jobInfo(task *); + void fill_defaults(task *); + void tabChange(int idx); + void runjob(); + /* close the window properly */ + void close_cb(task *t); + void close_cb(); +}; + +/* Object that can scan a directory to find jobs */ +class TSched: public QObject +{ + Q_OBJECT +private: + char *command_dir; + bool read_command_file(const char *file, alist *lst, btime_t mtime); + int timer; + +public: + TSched(); + ~TSched(); + void init(const char *cmd_dir); + bool scan_for_commands(alist *lst); + void start() { + timer = startTimer(60000); // 1-minute timer + }; + void stop() { + if (timer >= 0) { + killTimer(timer); + timer = -1; + } + }; +public slots: + void jobStarted(task *t); +protected: + void timerEvent(QTimerEvent *event); + +}; + + +/* Job found in the command directory */ +class TSchedJob: public QObject +{ + Q_OBJECT + +public: + char *component; // Name of the daemon + char *command; // job command + btime_t create_date; // When the command file was created + TSchedJob() : component(NULL), command(NULL) {}; + + TSchedJob(const char *comp, const char *cmd, btime_t cd) { + component = bstrdup(comp); + command = bstrdup(cmd); + create_date = cd; + }; + + ~TSchedJob() { + clear(); + }; + void clear() { + if (component) { + bfree_and_null(component); + } + if (command) { + bfree_and_null(command); + } + create_date = 0; + }; +}; + +#endif diff --git a/src/qt-console/tray-monitor/sd-monitor.ui b/src/qt-console/tray-monitor/sd-monitor.ui new file mode 100644 index 00000000..3c00e6c9 --- /dev/null +++ b/src/qt-console/tray-monitor/sd-monitor.ui @@ -0,0 +1,162 @@ + + + sdStatus + + + + 0 + 0 + 518 + 435 + + + + Form + + + + + + + + Storage Daemon Status + + + + + + + + + + + + + Name: + + + + + + + + + + + + + + Started: + + + + + + + + + + + + + + + + + + + + + Version: + + + + + + + Plugins: + + + + + + + + + + Running Jobs + + + + + + QAbstractItemView::SingleSelection + + + false + + + + + + + + + + Terminated Jobs + + + + + + QAbstractItemView::SingleSelection + + + + + + + + + + + + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + :/images/view-refresh.png:/images/view-refresh.png + + + + + + + + + + + + diff --git a/src/qt-console/tray-monitor/sdstatus.cpp b/src/qt-console/tray-monitor/sdstatus.cpp new file mode 100644 index 00000000..741305af --- /dev/null +++ b/src/qt-console/tray-monitor/sdstatus.cpp @@ -0,0 +1,125 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "sdstatus.h" +#include "../util/fmtwidgetitem.h" +#include "jcr.h" + +void SDStatus::doUpdate() +{ + if (count == 0) { + count++; + task *t = new task(); + status.pushButton->setEnabled(false); + connect(t, SIGNAL(done(task *)), this, SLOT(taskDone(task *)), Qt::QueuedConnection); + t->init(res, TASK_STATUS); + res->wrk->queue(t); + status.statusBar->setText(QString("Trying to connect to Storage...")); + Dmsg1(50, "doUpdate(%p)\n", res); + } +} + +void SDStatus::taskDone(task *t) +{ + count--; + if (!t->status) { + status.statusBar->setText(QString(t->errmsg)); + + } else { + status.statusBar->clear(); + if (t->type == TASK_STATUS) { + char ed1[50]; + struct s_last_job *ljob; + struct s_running_job *rjob; + res->mutex->lock(); + status.labelName->setText(QString(res->name)); + status.labelVersion->setText(QString(res->version)); + status.labelStarted->setText(QString(res->started)); + status.labelPlugins->setText(QString(res->plugins)); + /* Clear the table first */ + Freeze(*status.tableRunning); + Freeze(*status.tableTerminated); + QStringList headerlistR = (QStringList() << tr("JobId") + << tr("Job") << tr("Level") << tr("Client") + << tr("Storage") + << tr("Files") << tr("Bytes") << tr("Errors")); + status.tableRunning->clear(); + status.tableRunning->setRowCount(0); + status.tableRunning->setColumnCount(headerlistR.count()); + status.tableRunning->setHorizontalHeaderLabels(headerlistR); + status.tableRunning->setEditTriggers(QAbstractItemView::NoEditTriggers); + status.tableRunning->verticalHeader()->hide(); + status.tableRunning->setSortingEnabled(true); + + if (res->running_jobs) { + status.tableRunning->setRowCount(res->running_jobs->size()); + int row=0; + foreach_alist(rjob, res->running_jobs) { + int col=0; + TableItemFormatter item(*status.tableRunning, row++); + item.setNumericFld(col++, QString(edit_uint64(rjob->JobId, ed1))); + item.setTextFld(col++, QString(rjob->Job)); + item.setJobLevelFld(col++, QString(rjob->JobLevel)); + item.setTextFld(col++, QString(rjob->Client)); + item.setTextFld(col++, QString(rjob->Storage)); + item.setNumericFld(col++, QString(edit_uint64(rjob->JobFiles, ed1))); + item.setBytesFld(col++, QString(edit_uint64(rjob->JobBytes, ed1))); + item.setNumericFld(col++, QString(edit_uint64(rjob->Errors, ed1))); + } + } else { + Dmsg0(0, "Strange, the list is NULL\n"); + } + + QStringList headerlistT = (QStringList() << tr("JobId") + << tr("Job") << tr("Level") + << tr("Status") << tr("Files") << tr("Bytes") + << tr("Errors")); + + status.tableTerminated->clear(); + status.tableTerminated->setRowCount(0); + status.tableTerminated->setColumnCount(headerlistT.count()); + status.tableTerminated->setHorizontalHeaderLabels(headerlistT); + status.tableTerminated->setEditTriggers(QAbstractItemView::NoEditTriggers); + status.tableTerminated->verticalHeader()->hide(); + status.tableTerminated->setSortingEnabled(true); + + if (res->terminated_jobs) { + status.tableTerminated->setRowCount(res->terminated_jobs->size()); + int row=0; + foreach_dlist(ljob, res->terminated_jobs) { + int col=0; + TableItemFormatter item(*status.tableTerminated, row++); + item.setNumericFld(col++, QString(edit_uint64(ljob->JobId, ed1))); + item.setTextFld(col++, QString(ljob->Job)); + item.setJobLevelFld(col++, QString(ljob->JobLevel)); + item.setJobStatusFld(col++, QString(ljob->JobStatus)); + item.setNumericFld(col++, QString(edit_uint64(ljob->JobFiles, ed1))); + item.setBytesFld(col++, QString(edit_uint64(ljob->JobBytes, ed1))); + item.setNumericFld(col++, QString(edit_uint64(ljob->Errors, ed1))); + } + } else { + Dmsg0(0, "Strange, the list is NULL\n"); + } + res->mutex->unlock(); + } + Dmsg1(50, " Task %p OK\n", t); + } + t->deleteLater(); + status.pushButton->setEnabled(true); +} diff --git a/src/qt-console/tray-monitor/sdstatus.h b/src/qt-console/tray-monitor/sdstatus.h new file mode 100644 index 00000000..17f0a3fd --- /dev/null +++ b/src/qt-console/tray-monitor/sdstatus.h @@ -0,0 +1,42 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "common.h" +#include "ui_sd-monitor.h" +#include "task.h" +#include "status.h" + +class SDStatus: public ResStatus +{ + Q_OBJECT + +public: + Ui::sdStatus status; + + SDStatus(RESMON *d): ResStatus(d) + { + status.setupUi(this); + QObject::connect(status.pushButton, SIGNAL(clicked()), this, SLOT(doUpdate()), Qt::QueuedConnection); + }; + ~SDStatus() { + }; +public slots: + void doUpdate(); + void taskDone(task *); +}; diff --git a/src/qt-console/tray-monitor/status.cpp b/src/qt-console/tray-monitor/status.cpp new file mode 100644 index 00000000..8320c5b2 --- /dev/null +++ b/src/qt-console/tray-monitor/status.cpp @@ -0,0 +1,42 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "status.h" +#include "lib/worker.h" + +void ResStatus::doUpdate() +{ + if (count == 0) { + task *t = new task(); + connect(t, SIGNAL(done(task *)), this, SLOT(taskDone(task *)), Qt::QueuedConnection); + t->init(res, TASK_STATUS); + res->wrk->queue(t); + Dmsg0(0, "doUpdate()\n"); + count++; + } +} + +void ResStatus::taskDone(task *t) +{ + if (!t->status) { + Dmsg2(0, " Task %p failed => %s\n", t, t->errmsg); + } + delete t; + count--; +} diff --git a/src/qt-console/tray-monitor/status.h b/src/qt-console/tray-monitor/status.h new file mode 100644 index 00000000..0e0f4ea9 --- /dev/null +++ b/src/qt-console/tray-monitor/status.h @@ -0,0 +1,44 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef STATUS_H +#define STATUS_H + +#include "common.h" +#include +#include "tray_conf.h" +#include "task.h" + +class ResStatus: public QWidget +{ + Q_OBJECT + +public: + int count; + RESMON *res; + ResStatus(RESMON *c): count(0), res(c) { + }; + virtual ~ResStatus() { + }; +public slots: + virtual void doUpdate(); + virtual void taskDone(task *t); +}; + +#endif diff --git a/src/qt-console/tray-monitor/task.cpp b/src/qt-console/tray-monitor/task.cpp new file mode 100644 index 00000000..8e12b260 --- /dev/null +++ b/src/qt-console/tray-monitor/task.cpp @@ -0,0 +1,1619 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +#include "task.h" +#include "jcr.h" +#include "filesmodel.h" +#include "pluginmodel.h" +#include +#include "../util/fmtwidgetitem.h" + +#define dbglvl 10 +int authenticate_daemon(JCR *jcr, MONITOR *monitor, RESMON *res); + +static void *handle_task(void *data) +{ + task *t; + worker *wrk = (worker *)data; + lmgr_init_thread(); + + wrk->set_running(); + Dmsg0(dbglvl, "Worker started\n"); + + while (!wrk->is_quit_state()) { + if (wrk->is_wait_state()) { + wrk->wait(); + continue; + } + t = (task *)wrk->dequeue(); + if (!t) { + continue; + } + /* Do the work */ + switch(t->type) { + case TASK_STATUS: + t->do_status(); + break; + case TASK_RESOURCES: + t->get_resources(); + break; + case TASK_DEFAULTS: + t->get_job_defaults(); + break; + case TASK_RUN: + t->run_job(); + break; + case TASK_BWLIMIT: + t->set_bandwidth(); + break; + case TASK_INFO: + t->get_job_info(t->arg2); + break; + case TASK_DISCONNECT: + t->disconnect_bacula(); + t->mark_as_done(); + break; + case TASK_LIST_CLIENT_JOBS: + t->get_client_jobs(t->arg); + break; + case TASK_LIST_JOB_FILES: + t->get_job_files(t->arg, t->pathId); + break; + case TASK_RESTORE: + t->restore(); + break; + default: + Mmsg(t->errmsg, "Unknown task"); + t->mark_as_failed(); + break; + } + } + Dmsg0(dbglvl, "Worker stopped\n"); + lmgr_cleanup_thread(); + return NULL; +} + +bool task::set_bandwidth() +{ + bool ret = false; + btimer_t *tid = NULL; + if (res->type != R_CLIENT) { + mark_as_failed(); + Mmsg(errmsg, _("Bandwidth can set only set on Client")); + return false; + } + if (!arg || !*arg) { + mark_as_failed(); + Mmsg(errmsg, _("Bandwidth parameter is invalid")); + return false; + } + + if (res->proxy_sent) { + free_bsock(res->bs); + } + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + mark_as_failed(); + return false; + } + } + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + res->bs->fsend("setbandwidth limit=%s\n", NPRTB(arg)); + while (get_next_line(res)) { + Dmsg1(dbglvl, "-> %s\n", curline); + } + + if (tid) { + stop_thread_timer(tid); + } + + /* Do not reuse the same socket */ + disconnect_bacula(); + + if (ret) { + mark_as_done(); + } else { + mark_as_failed(); + } + return ret; +} + +RESMON *task::get_res() +{ + return res; +} + +void task::lock_res() +{ + res->mutex->lock(); +} + +void task::unlock_res() +{ + res->mutex->unlock(); +} + +bool task::disconnect_bacula() +{ + free_bsock(res->bs); + return true; +} + +bool task::connect_bacula() +{ + JCR jcr; + bool ret = false; + memset(&jcr, 0, sizeof(jcr)); + curend = curline = NULL; + + RESMON *r = get_res(); + MONITOR *monitor = (MONITOR*)GetNextRes(R_MONITOR, NULL); + + if (r->type == R_CLIENT) { + r->proxy_sent = false; + if (r->bs && (r->bs->is_error() || !r->bs->is_open())) { + free_bsock(r->bs); + } + if (!r->bs) { + r->bs = new_bsock(); + Dmsg0(dbglvl, "Trying to connect to FD\n"); + if (r->bs->connect(NULL, r->connect_timeout, 0, 0, _("Client daemon"), + r->address, NULL, r->port, 0)) + { + Dmsg0(dbglvl, "Connect done!\n"); + jcr.file_bsock = r->bs; + if (!authenticate_daemon(&jcr, monitor, r)) { + Dmsg0(dbglvl, "Unable to authenticate\n"); + Mmsg(errmsg, "Unable to authenticate with the FileDaemon"); + free_bsock(r->bs); + return false; + } + Dmsg0(dbglvl, "Authenticate OK\n"); + ret = true; + } else { + Mmsg(errmsg, "Unable to connect to the FileDaemon"); + Dmsg0(dbglvl, "Connect error!\n"); + } + } else { + ret = true; + } + } + if (r->type == R_STORAGE) { + if (r->bs && (r->bs->is_error() || !r->bs->is_open())) { + free_bsock(r->bs); + } + if (!r->bs) { + r->bs = new_bsock(); + Dmsg0(dbglvl, "Trying to connect to FD\n"); + if (r->bs->connect(NULL, r->connect_timeout, 0, 0, _("Storage daemon"), + r->address, NULL, r->port, 0)) + { + Dmsg0(dbglvl, "Connect done!\n"); + jcr.store_bsock = r->bs; + if (!authenticate_daemon(&jcr, monitor, r)) { + Dmsg0(dbglvl, "Unable to authenticate\n"); + Mmsg(errmsg, "Unable to authenticate with the Storage Daemon"); + free_bsock(r->bs); + return false; + } + Dmsg0(dbglvl, "Authenticate OK\n"); + ret = true; + } else { + Mmsg(errmsg, "Unable to connect to the Storage Daemon"); + Dmsg0(dbglvl, "Connect error!\n"); + } + } else { + ret = true; + } + } + if (r->type == R_DIRECTOR) { + if (r->bs && (r->bs->is_error() || !r->bs->is_open())) { + free_bsock(r->bs); + } + if (!r->bs) { + r->bs = new_bsock(); + Dmsg0(dbglvl, "Trying to connect to DIR\n"); + if (r->bs->connect(NULL, r->connect_timeout, 0, 0, _("Director daemon"), + r->address, NULL, r->port, 0)) + { + Dmsg0(dbglvl, "Connect done!\n"); + jcr.dir_bsock = r->bs; + if (!authenticate_daemon(&jcr, monitor, r)) { + Dmsg0(dbglvl, "Unable to authenticate\n"); + Mmsg(errmsg, "Unable to authenticate with the Director"); + free_bsock(r->bs); + return false; + } + Dmsg0(dbglvl, "Authenticate OK\n"); + ret = true; + } else { + Mmsg(errmsg, "Unable to connect to the Director"); + Dmsg0(dbglvl, "Connect error!\n"); + } + } else { + ret = true; + } + } + return ret; +} + +bool task::read_status_running(RESMON *r) +{ + bool ret = false; + char *start, *end; + struct s_running_job *item = NULL; + alist *running_jobs = New(alist(10, owned_by_alist)); + + while (r->bs->recv() >= -1) { + if (r->bs->msglen < 0 && + r->bs->msglen != BNET_CMD_BEGIN && + r->bs->msglen != BNET_CMD_OK) + { + Dmsg1(dbglvl, "Got Signal %s\n", bnet_sig_to_ascii(r->bs->msglen)); + break; + } + Dmsg2(dbglvl, "RECV -> %s:%d\n", r->bs->msg, r->bs->msglen); + start = r->bs->msg; + + while ((end = strchr(start, '\n')) != NULL) { + *end = 0; + Dmsg1(dbglvl, "line=[%s]\n", start); + if (strncasecmp(start, "jobid=", 6) == 0) { + if (item) { + Dmsg1(dbglvl, "Append item %ld\n", item->JobId); + running_jobs->append(item); + } + item = (struct s_running_job *)malloc(sizeof(struct s_running_job)); + memset(item, 0, sizeof(struct s_running_job)); + item->JobId = str_to_uint64(start + 6); + + } else if (!item) { + Dmsg0(dbglvl, "discard line\n"); + + } else if (strncasecmp(start, "level=", 6) == 0) { + item->JobLevel = start[6]; + + } else if (strncasecmp(start, "type=", 5) == 0) { + item->JobType = start[5]; + + } else if (strncasecmp(start, "status=", 7) == 0) { + item->JobStatus = start[7]; + + } else if (strncasecmp(start, "jobbytes=", 9) == 0) { + item->JobBytes = str_to_uint64(start + 9); + + } else if (strncasecmp(start, "jobfiles=", 9) == 0) { + item->JobFiles = str_to_uint64(start + 9); + + } else if (strncasecmp(start, "job=", 4) == 0) { + bstrncpy(item->Job, start + 4, sizeof(item->Job)); + + } else if (strncasecmp(start, "starttime_epoch=", 16) == 0) { + item->start_time = str_to_uint64(start + 16); + + } else if (strncasecmp(start, "schedtime_epoch=", 16) == 0) { + item->sched_time = str_to_uint64(start + 16); + + } else if (strncasecmp(start, "bytes/sec=", 10) == 0) { + item->bytespersec = str_to_uint64(start + 10); + + } else if (strncasecmp(start, "avebytes_sec=", 13) == 0) { + item->bytespersec = str_to_uint64(start + 13); + + } else if (strncasecmp(start, "errors=", 7) == 0) { + item->Errors = str_to_uint64(start + 7); + + } else if (strncasecmp(start, "readbytes=", 10) == 0) { + item->ReadBytes = str_to_uint64(start + 10); + + } else if (strncasecmp(start, "processing file=", 16) == 0) { + bstrncpy(item->CurrentFile, start + 16, sizeof(item->CurrentFile)); + + } else if (strncasecmp(start, "clientname=", 11) == 0) { + bstrncpy(item->Client, start + 11, sizeof(item->Client)); + + } else if (strncasecmp(start, "fileset=", 8) == 0) { + bstrncpy(item->FileSet, start + 8, sizeof(item->FileSet)); + + } else if (strncasecmp(start, "storage=", 8) == 0) { + bstrncpy(item->Storage, start + 8, sizeof(item->Storage)); + + } else if (strncasecmp(start, "rstorage=", 8) == 0) { + bstrncpy(item->RStorage, start + 8, sizeof(item->Storage)); + + } else if (strncasecmp(start, "sdtls=", 6) == 0) { + item->SDtls = str_to_uint64(start + 6); + } + start = end+1; + } + r->last_update = time(NULL); + + if (r->bs->is_error()) { + Mmsg(errmsg, "Got error on the socket communication line"); + goto bail_out; + } + } + if (item) { + Dmsg1(dbglvl, "Append item %ld\n", item->JobId); + running_jobs->append(item); + } + ret = true; + +bail_out: + r->mutex->lock(); + if (r->running_jobs) { + delete r->running_jobs; + } + r->running_jobs = running_jobs; + r->mutex->unlock(); + + return ret; +} + +bool task::read_status_terminated(RESMON *r) +{ + bool ret = false; + char *start, *end; + struct s_last_job *item = NULL; + + r->mutex->lock(); + if (r->terminated_jobs) { + delete r->terminated_jobs; + } + r->terminated_jobs = New(dlist(item, &item->link)); + r->mutex->unlock(); + + while (r->bs->recv() >= -1) { + if (r->bs->msglen < 0 && + r->bs->msglen != BNET_CMD_BEGIN && + r->bs->msglen != BNET_CMD_OK) + { + Dmsg1(dbglvl, "Got Signal %s\n", bnet_sig_to_ascii(r->bs->msglen)); + break; + } + + Dmsg2(dbglvl, "RECV -> %s:%d\n", r->bs->msg, r->bs->msglen); + r->mutex->lock(); + start = r->bs->msg; + + while ((end = strchr(start, '\n')) != NULL) { + *end = 0; + Dmsg1(dbglvl, "line=[%s]\n", start); + if (strncasecmp(start, "jobid=", 6) == 0) { + if (item) { + Dmsg1(dbglvl, "Append item %ld\n", item->JobId); + r->terminated_jobs->append(item); + } + item = (struct s_last_job *)malloc(sizeof(struct s_last_job)); + memset(item, 0, sizeof(struct s_last_job)); + item->JobId = str_to_uint64(start + 6); + + } else if (!item) { + Dmsg0(dbglvl, "discard line\n"); + + } else if (strncasecmp(start, "level=", 6) == 0) { + item->JobLevel = start[6]; + + } else if (strncasecmp(start, "type=", 5) == 0) { + item->JobType = start[5]; + + } else if (strncasecmp(start, "status=", 7) == 0) { + item->JobStatus = start[7]; + + } else if (strncasecmp(start, "jobbytes=", 9) == 0) { + item->JobBytes = str_to_uint64(start + 9); + + } else if (strncasecmp(start, "jobfiles=", 9) == 0) { + item->JobFiles = str_to_uint64(start + 9); + + } else if (strncasecmp(start, "job=", 4) == 0) { + bstrncpy(item->Job, start + 4, sizeof(item->Job)); + + } else if (strncasecmp(start, "starttime_epoch=", 16) == 0) { + item->start_time = str_to_uint64(start + 16); + + } else if (strncasecmp(start, "endtime_epoch=", 14) == 0) { + item->end_time = str_to_uint64(start + 14); + + } else if (strncasecmp(start, "errors=", 7) == 0) { + item->Errors = str_to_uint64(start + 7); + } + start = end+1; + } + r->last_update = time(NULL); + r->mutex->unlock(); + + if (r->bs->is_error()) { + Mmsg(errmsg, "Got error on the socket communication line"); + goto bail_out; + } + } + if (item) { + r->mutex->lock(); + Dmsg1(dbglvl, "Append item %ld\n", item->JobId); + r->terminated_jobs->append(item); + r->mutex->unlock(); + } + ret = true; + +bail_out: + return ret; +} + +bool task::read_status_header(RESMON *r) +{ + bool ret = false; + char *start, *end; + + while (r->bs->recv() >= -1) { + if (r->bs->msglen < 0 && + r->bs->msglen != BNET_CMD_BEGIN && + r->bs->msglen != BNET_CMD_OK) + { + Dmsg1(dbglvl, "Got Signal %d\n", r->bs->msglen); + break; + } + + Dmsg2(dbglvl, "RECV -> %s:%d\n", r->bs->msg, r->bs->msglen); + r->mutex->lock(); + start = r->bs->msg; + + while ((end = strchr(start, '\n')) != NULL) { + *end = 0; + Dmsg1(dbglvl, "line=[%s]\n", start); + if (strncasecmp(start, "name=", 5) == 0) { + bstrncpy(r->name, start + 5, sizeof(r->name)); + + } else if (strncasecmp(start, "version=", 8) == 0) { + bstrncpy(r->version, start + 8, sizeof(r->version)); + + } else if (strncasecmp(start, "plugins=", 8) == 0) { + bstrncpy(r->plugins, start + 8, sizeof(r->plugins)); + + } else if (strncasecmp(start, "bwlimit=", 8) == 0) { + r->bwlimit = str_to_uint64(start + 8); + + } else if (strncasecmp(start, "started=", 8) == 0) { + bstrncpy(r->started, start + 8, sizeof(r->started)); + + } else if (strncasecmp(start, "reloaded=", 9) == 0) { + bstrncpy(r->reloaded, start + 9, sizeof(r->reloaded)); + } + start = end+1; + } + + if (r->bs->is_error()) { + r->mutex->unlock(); + Mmsg(errmsg, "Got error on the socket communication line"); + goto bail_out; + + } + r->last_update = time(NULL); + r->mutex->unlock(); + } + ret = true; +bail_out: + return ret; +} + + +bool task::do_status() +{ + bool ret = false; + btimer_t *tid = NULL; + + /* We don't want to use a proxy session */ + if (res->type == R_CLIENT && res->proxy_sent) { + free_bsock(res->bs); + } + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + /* TODO: */ + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + if (res->type == R_CLIENT || res->type == R_STORAGE) { + Dmsg0(dbglvl, "Send status command header\n"); + res->bs->fsend(".status header api=2\n"); + // TODO: Update a local set of variables and commit everything when it's done + ret = read_status_header(res); + + if (ret) { + res->bs->fsend(".status terminated api=2\n"); + ret = read_status_terminated(res); + } + if (ret) { + res->bs->fsend(".status running api=2\n"); + ret = read_status_running(res); + } + } + if (res->type == R_DIRECTOR) { + Dmsg0(dbglvl, "-> .api 2\n"); + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + Dmsg0(dbglvl, "Send status command header\n"); + res->bs->fsend(".status dir header\n"); + // TODO: Update a local set of variables and commit everything when it's done + ret = read_status_header(res); + + if (ret) { + Dmsg0(dbglvl, "Send status command terminated\n"); + res->bs->fsend(".status dir terminated\n"); + ret = read_status_terminated(res); + } + if (ret) { + Dmsg0(dbglvl, "Send status command running\n"); + res->bs->fsend(".status dir running\n"); + ret = read_status_running(res); + } + } +bail_out: + if (tid) { + stop_thread_timer(tid); + } + /* Use a new socket the next time */ + disconnect_bacula(); + if (ret) { + mark_as_done(); + } else { + mark_as_failed(); + } + return ret; +} + +bool task::get_next_line(RESMON *r) +{ + /* We are currently reading a line */ + if (curline && curend && r->bs->msglen > 0 && curend < (r->bs->msg + r->bs->msglen - 1)) { + curline = curend + 1; /* skip \0 */ + if ((curend = strchr(curline, '\n')) != NULL) { + *curend = '\0'; + } + return true; + } + curline = curend = NULL; + do { + r->bs->recv(); + + if (r->bs->msglen < 0) { + Dmsg1(dbglvl, "<- %s\n", bnet_sig_to_ascii(r->bs->msglen)); + switch(r->bs->msglen) { + case BNET_ERROR_MSG: + r->bs->recv(); + strip_trailing_junk(r->bs->msg); + Dmsg1(0, "ERROR: %s\n", r->bs->msg); + break; + case BNET_MAIN_PROMPT: // stop + return false; + case BNET_CMD_OK: + case BNET_CMD_BEGIN: + case BNET_MSGS_PENDING: + break; + case BNET_TERMINATE: + return false; + default: // error or question? + return false; + } + + } else if (r->bs->msglen == 0) { // strange + return false; + + } else { + Dmsg1(10, "<- %s\n", r->bs->msg); + curline = r->bs->msg; + curend = strchr(curline, '\n'); + if (curend) { + *curend = 0; + } + return true; // something to read + } + } while (!r->bs->is_error()); + return false; +} + +bool task::get_job_defaults() +{ + bool ret = false; + btimer_t *tid = NULL; + char *p; + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + + res->mutex->lock(); + bfree_and_null(res->defaults.client); + bfree_and_null(res->defaults.pool); + bfree_and_null(res->defaults.storage); + bfree_and_null(res->defaults.level); + bfree_and_null(res->defaults.type); + bfree_and_null(res->defaults.fileset); + bfree_and_null(res->defaults.catalog); + bfree_and_null(res->defaults.where); + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + goto bail_out; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + res->bs->fsend(".defaults job=%s\n", res->defaults.job); + while (get_next_line(res)) { + Dmsg1(dbglvl, "line = [%s]\n", curline); + if ((p = strchr(curline, '=')) == NULL) { + continue; + } + *p++ = 0; + if (strcasecmp(curline, "client") == 0) { + res->defaults.client = bstrdup(p); + + } else if (strcasecmp(curline, "pool") == 0) { + res->defaults.pool = bstrdup(p); + + } else if (strcasecmp(curline, "storage") == 0) { + res->defaults.storage = bstrdup(p); + + } else if (strcasecmp(curline, "level") == 0) { + res->defaults.level = bstrdup(p); + + } else if (strcasecmp(curline, "type") == 0) { + res->defaults.type = bstrdup(p); + + } else if (strcasecmp(curline, "fileset") == 0) { + res->defaults.fileset = bstrdup(p); + + } else if (strcasecmp(curline, "catalog") == 0) { + res->defaults.catalog = bstrdup(p); + + } else if (strcasecmp(curline, "priority") == 0) { + res->defaults.priority = str_to_uint64(p); + + } else if (strcasecmp(curline, "where") == 0) { + res->defaults.where = bstrdup(p); + + } else if (strcasecmp(curline, "replace") == 0) { + res->defaults.replace = str_to_uint64(p); + } + + } + ret = true; +bail_out: + if (tid) { + stop_thread_timer(tid); + } + if (ret) { + mark_as_done(); + } else { + mark_as_failed(); + } + res->mutex->unlock(); + return ret; +} + +bool task::get_job_info(const char *level) +{ + bool ret = false; + btimer_t *tid = NULL; + char *p; + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + res->mutex->lock(); + memset(&res->infos, 0, sizeof(res->infos)); + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + goto bail_out; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + if (level) { + res->bs->fsend(".estimate job=\"%s\" level=%s\n", arg, level); + } else { + res->bs->fsend(".estimate job=\"%s\"\n", arg); + } + while (get_next_line(res)) { + Dmsg1(dbglvl, "line = [%s]\n", curline); + if ((p = strchr(curline, '=')) == NULL) { + continue; + } + *p++ = 0; + if (strcasecmp(curline, "level") == 0) { + res->infos.JobLevel = p[0]; + + } else if (strcasecmp(curline, "jobbytes") == 0) { + res->infos.JobBytes = str_to_uint64(p); + + } else if (strcasecmp(curline, "jobfiles") == 0) { + res->infos.JobFiles = str_to_uint64(p); + + } else if (strcasecmp(curline, "corrbytes") == 0) { + res->infos.CorrJobBytes = str_to_uint64(p); + + } else if (strcasecmp(curline, "corrfiles") == 0) { + res->infos.CorrJobFiles = str_to_uint64(p); + + } else if (strcasecmp(curline, "nbjob") == 0) { + res->infos.CorrNbJob = str_to_uint64(p); + } + } + ret = true; +bail_out: + res->mutex->unlock(); + if (tid) { + stop_thread_timer(tid); + } + if (ret) { + mark_as_done(); + } else { + mark_as_failed(); + } + return ret; +} + +bool task::run_job() +{ + bool ret = false; + char *p; + btimer_t *tid = NULL; + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + goto bail_out; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + if (res->type == R_DIRECTOR && res->use_setip) { + res->bs->fsend("setip\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + res->bs->fsend("%s\n", arg); + while (get_next_line(res)) { + if ((p = strstr(curline, "JobId=")) != NULL && sscanf(p, "JobId=%d\n", &result.i) == 1) { + ret = true; + } + } + // Close the socket, it's over or we don't want to reuse it + disconnect_bacula(); + +bail_out: + + if (tid) { + stop_thread_timer(tid); + } + if (ret) { + mark_as_done(); + } else { + mark_as_failed(); + } + return ret; +} + +bool task::get_client_jobs(const char* client) +{ + bool ret = false; + btimer_t *tid = NULL; + int row=0; + QStringList headers; + struct s_last_job *ljob=NULL; + + if (!model) { + goto bail_out; + } + + model->clear(); + headers << tr("JobId") << tr("Job") << tr("Level") << tr("Date") << tr("Files") << tr("Bytes"); + model->setHorizontalHeaderLabels(headers); + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + goto bail_out; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + if (res->type == R_DIRECTOR && res->use_setip) { + res->bs->fsend("setip\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".bvfs_get_jobs client=%s\n", client); + while (get_next_line(res)) { + QString line(curline); + QStringList line_lst = line.split(" ", QString::SkipEmptyParts); + + model->setItem(row, 0, new QStandardItem(line_lst[0])); + + model->setItem(row, 1, new QStandardItem(line_lst[3])); + + QDateTime date; + date.setTime_t(line_lst[1].toUInt()); + QStandardItem *dateItem = new QStandardItem(); + dateItem->setData(date, Qt::DisplayRole); + model->setItem(row, 3, dateItem); + + /* find the job in res terminated list */ + if (res->terminated_jobs) { + foreach_dlist(ljob, res->terminated_jobs) { + if (ljob->JobId == line_lst[0].toUInt()) { + model->setItem(row, 2, new QStandardItem(QString(job_level_to_str(ljob->JobLevel)))); + model->setItem(row, 4, new QStandardItem(QString::number(ljob->JobFiles))); + model->setItem(row, 5, new QStandardItem(QString::number(ljob->JobBytes))); + break; + } + } + } + + ret = true; + ++row; + } + + // Close the socket, it's over or we don't want to reuse it + disconnect_bacula(); + +bail_out: + + if (tid) { + stop_thread_timer(tid); + } + if (ret) { + mark_as_done(); + } else { + mark_as_failed(); + } + return ret; +} + +extern int decode_stat(char *buf, struct stat *statp, int stat_size, int32_t *LinkFI); + +bool task::get_job_files(const char* job, uint64_t pathid) +{ + bool ret = false; + btimer_t *tid = NULL; + QString jobs; + struct stat statp; + int32_t LinkFI; + + if (!model) { + goto bail_out; + } + + model->removeRows(0, model->rowCount()); + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + goto bail_out; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + if (res->type == R_DIRECTOR && res->use_setip) { + res->bs->fsend("setip\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + /* retrieve all job ids*/ + res->bs->fsend(".bvfs_get_jobids jobid=%s\n", job); + while (get_next_line(res)) { + jobs = QString(curline); + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + /* cache the file set */ + res->bs->fsend(".bvfs_update jobid=%s\n", jobs.toLatin1().data()); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + if (pathid == 0) { + res->bs->fsend(".bvfs_lsdirs jobid=%s path=\"\"\n", jobs.toLatin1().data()); + } else { + res->bs->fsend(".bvfs_lsdirs jobid=%s pathid=%lld\n", jobs.toLatin1().data(), pathid); + } + + //+ " limit=" + limit + " offset=" + offset ; + while (get_next_line(res)) { + QString line(curline); + QStringList line_lst = line.split("\t", QString::KeepEmptyParts); + if ((line_lst.size() == 6) && line_lst[5] != ".") + { + DirectoryItem *d = new DirectoryItem(); + d->setData(QVariant(line_lst[0]), PathIdRole); + d->setData(QVariant(line_lst[1]), FilenameIdRole); + d->setData(QVariant(line_lst[2]), FileIdRole); + d->setData(QVariant(jobs), JobIdRole); + d->setData(QVariant(line_lst[4]), LStatRole); + d->setData(QVariant(line_lst[5]), PathRole); + QFileInfo fi(QDir(QString(arg3)), line_lst[5]); + d->setData(QVariant(fi.absoluteFilePath()), Qt::ToolTipRole); + d->setData(QVariant(line_lst[5]), Qt::DisplayRole); + model->appendRow(d); + ret = true; + } + } + + /* then, request files */ + if (strcmp(arg2,"") == 0) { + if (pathid == 0) { + res->bs->fsend(".bvfs_lsfiles jobid=%s path=\"\"\n", jobs.toLatin1().data()); + } else { + res->bs->fsend(".bvfs_lsfiles jobid=%s pathid=%lld\n", jobs.toLatin1().data(), pathid); + } + } else { + if (pathid == 0) { + res->bs->fsend(".bvfs_lsfiles jobid=%s path=\"\" pattern=\"%s\"\n", jobs.toLatin1().data(), arg2); + } else { + res->bs->fsend(".bvfs_lsfiles jobid=%s pathid=%lld pattern=\"%s\"\n", jobs.toLatin1().data(), pathid, arg2); + } + } + //+ " limit=" + limit + " offset=" + offset ; + + while (get_next_line(res)) { + QString line(curline); + QStringList line_lst = line.split("\t", QString::SkipEmptyParts); + if ((line_lst.size() == 6) && line_lst[5] != ".") + { + FileItem *f = new FileItem(); + f->setData(QVariant(line_lst[0]), PathIdRole); + f->setData(QVariant(line_lst[1]), FilenameIdRole); + f->setData(QVariant(line_lst[2]), FileIdRole); + f->setData(QVariant(jobs), JobIdRole); + f->setData(QVariant(line_lst[4]), LStatRole); + f->setData(QVariant(line_lst[5]), PathRole); + QFileInfo fi(QDir(QString(arg3)), line_lst[5]); + f->setData(QVariant(fi.absoluteFilePath()), Qt::ToolTipRole); + f->setData(QVariant(line_lst[5]), Qt::DisplayRole); + QList colums; + decode_stat(line_lst[4].toLocal8Bit().data(), + &statp, sizeof(statp), &LinkFI); + char buf[200]; + bstrutime(buf, sizeof(buf), statp.st_mtime); + colums << f << new QStandardItem(convertBytesSI(statp.st_size)) << new QStandardItem(buf); + model->appendRow(colums); + ret = true; + } + } + + // Close the socket, it's over or we don't want to reuse it + disconnect_bacula(); + +bail_out: + + if (tid) { + stop_thread_timer(tid); + } + if (ret) { + mark_as_done(); + } else { + mark_as_failed(); + } + return ret; +} + +bool task::prepare_restore() +{ + bool ret = false; + btimer_t *tid = NULL; + QString q; + /* in the restore prepare phase, we apply plugins settings. + * Upload and restoration must be done in one shot within the same connection since + * the director uses UA adress to create a local file unique name */ + FILE *fp; + int idx = 0; + QStringList pluginKeys; + QStringList pluginNames; + + if (!restore_field.pluginkeys.isEmpty()) { + pluginKeys = restore_field.pluginkeys.split(','); + } + if (!restore_field.pluginnames.isEmpty()) { + pluginNames = restore_field.pluginnames.split(','); + } + + ASSERT(pluginKeys.count() == pluginNames.count()); + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + goto bail_out; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + if (res->type == R_DIRECTOR && res->use_setip) { + res->bs->fsend("setip\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + /* retrieve all job ids*/ + q = QString(".bvfs_restore path=%1 jobid=%2").arg(restore_field.tableName, restore_field.jobIds); + if (!restore_field.fileIds.isEmpty()) { + q += QString(" fileid=%1").arg(restore_field.fileIds); + } + if (!restore_field.dirIds.isEmpty()) { + q += QString(" dirid=%1").arg(restore_field.dirIds); + } + if (!restore_field.hardlinks.isEmpty()) { + q += QString(" hardlink=%1").arg(restore_field.hardlinks); + } + q += "\n"; + res->bs->fsend(q.toLatin1().data()); + while (get_next_line(res)) { + ret = true; + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + /* parse plugins files, upload them and call restore pluginrestoreconf*/ + foreach(QString k, pluginKeys) { + QStringList keysplit=k.split(':'); + if (keysplit.count() > 1) { + QString key = keysplit[1]; + QString name = pluginNames[idx]; + + fp = fopen(name.toLatin1().data(), "r"); + if (!fp) { + berrno be; + Dmsg2(dbglvl, "Unable to open %s. ERR=%s\n", name.toLatin1().data(), be.bstrerror(errno)); + goto bail_out; + } + + res->bs->fsend(".putfile key=\"%s\"\n", key.toLatin1().data()); + + /* Just read the file and send it to the director */ + while (!feof(fp)) { + int i = fread(res->bs->msg, 1, sizeof_pool_memory(res->bs->msg) - 1, fp); + if (i > 0) { + res->bs->msg[i] = 0; + res->bs->msglen = i; + res->bs->send(); + } + } + + res->bs->signal(BNET_EOD); + fclose(fp); + + res->bs->fsend("restore pluginrestoreconf=\"%s\"\n", k.toLatin1().data()); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + ++idx; + } + + // Close the socket, it's over or we don't want to reuse it + disconnect_bacula(); + + bail_out: + + if (tid) { + stop_thread_timer(tid); + } + + return ret; +} + +bool task::run_restore() +{ + bool ret = false; + uint timeout = 1000; + btimer_t *tid = NULL; + QString q; + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + goto bail_out; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + if (res->type == R_DIRECTOR && res->use_setip) { + res->bs->fsend("setip\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + q = QString("restore client=%1").arg(restore_field.client); + + if (!restore_field.where.isEmpty()) { + restore_field.where.replace("\"", ""); + q += QString(" where=\"%1\"").arg(restore_field.where); + } + + if (!restore_field.comment.isEmpty()) { + restore_field.comment.replace("\"", ""); + q += QString(" comment=\"%1\"").arg(restore_field.comment); + } + + q += QString(" file=\"?%1\"").arg(restore_field.tableName); + q += " done yes\n"; + + res->bs->fsend(q.toLatin1().data()); + + /* drain the messages */ + while (get_next_line(res)) { + ret = true; + // FIXME : report a signal to have a progress feedback + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + // Close the socket, it's over or we don't want to reuse it + disconnect_bacula(); + + bail_out: + + if (tid) { + stop_thread_timer(tid); + } + + return ret; +} + +bool task::clean_restore() +{ + bool ret = false; + btimer_t *tid = NULL; + QString q; + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + goto bail_out; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + if (res->type == R_DIRECTOR && res->use_setip) { + res->bs->fsend("setip\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + q = QString(".bvfs_cleanup path=%1\n").arg(restore_field.tableName); + + res->bs->fsend(q.toLatin1().data()); + while (get_next_line(res)) { + ret = true; + // FIXME : report a signal to have a progress feedback + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + // Close the socket, it's over or we don't want to reuse it + disconnect_bacula(); + + bail_out: + + if (tid) { + stop_thread_timer(tid); + } + + return ret; +} + +bool task::restore() +{ + bool ret=prepare_restore(); + if (ret) { + ret = run_restore(); + if (ret) { + ret = clean_restore(); + } + } + + if (ret) { + mark_as_done(); + } else { + mark_as_failed(); + } + return ret; +} + +QString task::plugins_ids(const QString& jobIds) +{ + return parse_plugins(jobIds, "restoreobjectid"); +} + +QString task::plugins_names(const QString& jobIds) +{ + return parse_plugins(jobIds, "pluginname"); +} + +QString task::parse_plugins(const QString& jobIds, const QString& fieldName) +{ + btimer_t *tid = NULL; + QString ret; + QString q; + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + return ret; + } + } + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + return ret; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + if (res->type == R_DIRECTOR && res->use_setip) { + res->bs->fsend("setip\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + q = QString("llist pluginrestoreconf jobid=%1\n").arg(jobIds); + res->bs->fsend(q.toLatin1().data()); + + QStringList sl; + while (get_next_line(res)) { + QString line(curline); + line = line.simplified(); + QStringList line_lst = line.split(":", QString::SkipEmptyParts); + + if (!line_lst.empty() && fieldName.compare(line_lst[0]) == 0) { + sl << line_lst[1]; + } + } + + ret = sl.join(","); + // Close the socket, it's over or we don't want to reuse it + disconnect_bacula(); + + + if (tid) { + stop_thread_timer(tid); + } + + return ret; +} + +QFile* task::plugin(const QString& name, const QString& jobIds, int id) +{ + QFile *ret(NULL); + btimer_t *tid = NULL; + QString q; + + if (id < 0) + return NULL; + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + return NULL; + } + } + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + return NULL; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + if (res->type == R_DIRECTOR && res->use_setip) { + res->bs->fsend("setip\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + } + + res->bs->fsend(".api 1\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + ret = new QFile(name); + ret->open(QIODevice::WriteOnly); + + q = QString("llist pluginrestoreconf jobid=%1 id=%2\n").arg(jobIds).arg(id); + res->bs->fsend(q.toLatin1().data()); + while (get_next_line(res)) { + if (QString(curline).contains(":")) + continue; + ret->write(curline); + ret->write("\n"); + } + ret->close(); + + // Close the socket, it's over or we don't want to reuse it + disconnect_bacula(); + + if (tid) { + stop_thread_timer(tid); + } + + return ret; +} + +/* Get resources to run a job */ +bool task::get_resources() +{ + bool ret = false; + btimer_t *tid = NULL; + + if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { + if (!connect_bacula()) { + goto bail_out; + } + } + + res->mutex->lock(); + if (res->jobs) { + delete res->jobs; + } + res->jobs = New(alist(10, owned_by_alist)); + if (res->clients) { + delete res->clients; + } + res->clients = New(alist(10, owned_by_alist)); + if (res->filesets) { + delete res->filesets; + } + res->filesets = New(alist(10, owned_by_alist)); + if (res->pools) { + delete res->pools; + } + res->pools = New(alist(10, owned_by_alist)); + if (res->storages) { + delete res->storages; + } + res->storages = New(alist(10, owned_by_alist)); + if (res->catalogs) { + delete res->catalogs; + } + res->catalogs = New(alist(10, owned_by_alist)); + + tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); + if (res->type == R_CLIENT && !res->proxy_sent) { + res->proxy_sent = true; + res->bs->fsend("proxy\n"); + while (get_next_line(res)) { + if (strncmp(curline, "2000", 4) != 0) { + pm_strcpy(errmsg, curline); + goto bail_out; + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + res->bs->fsend(".api 2\n"); + while (get_next_line(res)) { + Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); + } + + res->bs->fsend(".jobs type=B\n"); + while (get_next_line(res)) { + res->jobs->append(bstrdup(curline)); + } + + res->bs->fsend(".pools\n"); + while (get_next_line(res)) { + res->pools->append(bstrdup(curline)); + } + + res->bs->fsend(".clients\n"); + while (get_next_line(res)) { + res->clients->append(bstrdup(curline)); + } + + res->bs->fsend(".filesets\n"); + while (get_next_line(res)) { + res->filesets->append(bstrdup(curline)); + } + + res->bs->fsend(".storage\n"); + while (get_next_line(res)) { + res->storages->append(bstrdup(curline)); + } + + res->bs->fsend(".catalogs\n"); + while (get_next_line(res)) { + res->catalogs->append(bstrdup(curline)); + } + +bail_out: + res->mutex->unlock(); + + if (tid) { + stop_thread_timer(tid); + } + if (ret) { + mark_as_done(); + } else { + mark_as_failed(); + } + return ret; +} + +worker *worker_start() +{ + worker *w = New(worker()); + w->start(handle_task, w); + return w; +} + +void worker_stop(worker *w) +{ + if (w) { + w->stop(); + delete w; + } +} diff --git a/src/qt-console/tray-monitor/task.h b/src/qt-console/tray-monitor/task.h new file mode 100644 index 00000000..ad62194b --- /dev/null +++ b/src/qt-console/tray-monitor/task.h @@ -0,0 +1,158 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef TASK_H +#define TASK_H + +#include "common.h" +#include +#include +#include "tray_conf.h" + +enum { + TASK_NONE, + TASK_STATUS, + TASK_RESOURCES, + TASK_QUERY, + TASK_RUN, + TASK_LIST_CLIENT_JOBS, + TASK_LIST_JOB_FILES, + TASK_RESTORE, + TASK_PLUGIN, + TASK_DEFAULTS, + TASK_CLOSE, + TASK_INFO, + TASK_BWLIMIT, + TASK_DISCONNECT +}; + +/* The task should emit a signal when done */ +class task: public QObject +{ + Q_OBJECT + +public: + RESMON *res; + POOLMEM *errmsg; + int type; + bool status; + char *curline; + char *curend; + const char *arg; /* Argument that can be used by some tasks */ + const char *arg2; + const char *arg3; + QStandardItemModel *model; /* model to fill, depending on context */ + uint64_t pathId; + + union { + bool b; + int i; + char c[256]; + } result; /* The task might return something */ + + struct r_field + { + QString tableName; + QString jobIds; + QString fileIds; + QString dirIds; + QString hardlinks; + QString client; + QString where; + QString replace; + QString comment; + QString pluginnames; + QString pluginkeys; + } restore_field; + + task(): QObject(), res(NULL), type(TASK_NONE), status(false), curline(NULL), + curend(NULL), arg(NULL), arg2(NULL), arg3(NULL), model(NULL), pathId(0) + { + errmsg = get_pool_memory(PM_FNAME); + *errmsg = 0; + memset(result.c, 0, sizeof(result.c)); + } + + ~task() { + Enter(); + disconnect(); /* disconnect all signals */ + free_pool_memory(errmsg); + } + + void init(int t) { + res = NULL; + type = t; + status = false; + arg = NULL; + arg2 = NULL; + arg3 = NULL; + model = NULL; + pathId = 0; + } + + void init(RESMON *s, int t) { + init(t); + res = s; + } + + RESMON *get_res(); + void lock_res(); + void unlock_res(); + bool connect_bacula(); + bool do_status(); + bool read_status_terminated(RESMON *res); + bool read_status_header(RESMON *res); + bool read_status_running(RESMON *res); + bool set_bandwidth(); + bool disconnect_bacula(); + void mark_as_done() { + status = true; + emit done(this); + } + void mark_as_failed() { + status = false; + emit done(this); + } + bool get_resources(); + bool get_next_line(RESMON *res); + bool get_job_defaults(); /* Look r->defaults.job */ + bool run_job(); + bool get_job_info(const char *level); /* look r->info */ + bool get_client_jobs(const char* client); + bool get_job_files(const char* job, uint64_t pathId); + + bool prepare_restore(); + bool run_restore(); + bool clean_restore(); + bool restore(); + + QString plugins_ids(const QString &jobIds); + QString plugins_names(const QString& jobIds); + QString parse_plugins(const QString& jobIds, const QString& fieldName); + QFile* plugin(const QString &name, const QString& jobIds, int id); + +signals: + + void done(task *t); +}; + +worker *worker_start(); +void worker_stop(worker *); + +#endif diff --git a/src/qt-console/tray-monitor/tray-monitor.conf.in b/src/qt-console/tray-monitor/tray-monitor.conf.in new file mode 100644 index 00000000..3f72921b --- /dev/null +++ b/src/qt-console/tray-monitor/tray-monitor.conf.in @@ -0,0 +1,32 @@ +# +# Bacula Tray Monitor Configuration File +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +Monitor { + Name = @basename@-mon + Password = "@mon_dir_password@" # password for the Directors + RefreshInterval = 30 seconds +} + +Client { + Name = @basename@-fd + Address = @hostname@ + FDPort = @fd_port@ + Password = "@mon_fd_password@" # password for FileDaemon +} + +Storage { + Name = @basename@-sd + Address = @hostname@ + SDPort = @sd_port@ + Password = "@mon_sd_password@" # password for StorageDaemon +} + +Director { + Name = @basename@-dir + DIRport = @dir_port@ + address = @hostname@ +} diff --git a/src/qt-console/tray-monitor/tray-monitor.cpp b/src/qt-console/tray-monitor/tray-monitor.cpp new file mode 100644 index 00000000..5f29a9f8 --- /dev/null +++ b/src/qt-console/tray-monitor/tray-monitor.cpp @@ -0,0 +1,320 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "tray-monitor.h" +#include +#include + +/* Static variables */ +char *configfile = NULL; +static MONITOR *monitor = NULL; +static CONFIG *config = NULL; +static TrayUI *mainwidget = NULL; +static TSched *scheduler = NULL; + +#define CONFIG_FILE "./bacula-tray-monitor.conf" /* default configuration file */ + +#ifdef HAVE_WIN32 +#define HOME_VAR "APPDATA" +#define CONFIG_FILE_HOME "bacula-tray-monitor.conf" /* In $HOME */ +#else +#define HOME_VAR "HOME" +#define CONFIG_FILE_HOME ".bacula-tray-monitor.conf" /* In $HOME */ +#endif + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s) %s %s %s\n\n" +"Usage: tray-monitor [-c config_file] [-d debug_level]\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read configuration and exit\n" +" -W 0/1 force the detection of the systray\n" +" -? print this message.\n" +"\n"), 2004, BDEMO, VERSION, BDATE, HOST_OS, DISTNAME, DISTVER); +} + +void refresh_tray(TrayUI *t) +{ + RESMON *r; + MONITOR *mon; + + if (!t) { + return; + } + + t->clearTabs(); + if (!config) { + return; + } + + mon = (MONITOR *) GetNextRes(R_MONITOR, NULL); + t->spinRefresh->setValue(mon?mon->RefreshInterval:60); + + foreach_res(r, R_CLIENT) { + t->addTab(r); + } + foreach_res(r, R_DIRECTOR) { + t->addTab(r); + } + foreach_res(r, R_STORAGE) { + t->addTab(r); + } +} + +void display_error(const char *fmt, ...) +{ + va_list arg_ptr; + POOL_MEM tmp(PM_MESSAGE); + QMessageBox msgBox; + int maxlen; + + if (!fmt || !*fmt) { + return; + } + + maxlen = tmp.size() - 1; + va_start(arg_ptr, fmt); + bvsnprintf(tmp.c_str(), maxlen, fmt, arg_ptr); + va_end(arg_ptr); + + msgBox.setIcon(QMessageBox::Critical); + msgBox.setText(tmp.c_str()); + msgBox.exec(); +} + +void error_handler(const char *file, int line, LEX */* lc */, const char *msg, ...) +{ + POOL_MEM tmp; + va_list arg_ptr; + va_start(arg_ptr, msg); + vsnprintf(tmp.c_str(), tmp.size(), msg, arg_ptr); + va_end(arg_ptr); + display_error("Error %s:%d %s\n", file, line, tmp.c_str()); +} + +int tls_pem_callback(char *buf, int size, const void * /*userdata*/) +{ + bool ok; + QString text = QInputDialog::getText(mainwidget, _("TLS PassPhrase"), + buf, QLineEdit::Normal, + QDir::home().dirName(), &ok); + if (ok) { + bstrncpy(buf, text.toUtf8().data(), size); + return 1; + } else { + return 0; + } +} + +bool reload() +{ + bool displaycfg=false; + int nitems = 0; + struct stat sp; + + Dmsg0(50, "reload the configuration!\n"); + scheduler->stop(); + if (config) { + delete config; + } + config = NULL; + monitor = NULL; + + if (stat(configfile, &sp) != 0) { + berrno be; + Dmsg2(50, "Unable to find %s. ERR=%s\n", configfile, be.bstrerror()); + displaycfg = true; + goto bail_out; + } + + config = New(CONFIG()); + if (!parse_tmon_config(config, configfile, M_ERROR)) { + Dmsg1(50, "Error while parsing %s\n", configfile); + // TODO: Display a warning message an open the configuration + // window + displaycfg = true; + } + + LockRes(); + foreach_res(monitor, R_MONITOR) { + nitems++; + } + if (!displaycfg && nitems != 1) { + Mmsg(config->m_errmsg, + _("Error: %d Monitor resources defined in %s. " + "You must define one Monitor resource.\n"), + nitems, configfile); + displaycfg = true; + } + monitor = (MONITOR*)GetNextRes(R_MONITOR, (RES *)NULL); + UnlockRes(); + if (displaycfg) { + display_error(config->m_errmsg); + } + refresh_tray(mainwidget); + if (monitor && monitor->command_dir) { + scheduler->init(monitor->command_dir); + scheduler->start(); + } else { + Dmsg0(50, "Do not start the scheduler\n"); + } +bail_out: + return displaycfg; +} + +/********************************************************************* + * + * Main Bacula Tray Monitor -- User Interface Program + * + */ +int main(int argc, char *argv[]) +{ + QApplication app(argc, argv); + int ch; + bool test_config = false, display_cfg = false; + TrayUI tray; + TSched sched; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + init_stack_dump(); + my_name_is(argc, argv, "tray-monitor"); + lmgr_init_thread(); + init_msg(NULL, NULL, NULL); +#ifdef HAVE_WIN32 + working_directory = getenv("TMP"); +#endif + if (working_directory == NULL) { + working_directory = "/tmp"; + } + start_watchdog(); + +#ifndef HAVE_WIN32 + struct sigaction sigignore; + sigignore.sa_flags = 0; + sigignore.sa_handler = SIG_IGN; + sigfillset(&sigignore.sa_mask); + sigaction(SIGPIPE, &sigignore, NULL); +#endif + + while ((ch = getopt(argc, argv, "c:d:th?TW:")) != -1) { + switch (ch) { + case 'c': /* configuration file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'W': + tray.have_systray = (atoi(optarg) != 0); + break; + + case 'T': + set_trace(true); + break; + + case 'd': + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 't': + test_config = true; + break; + + case 'h': + case '?': + default: + usage(); + exit(1); + } + } + argc -= optind; + //argv += optind; + + if (argc) { + usage(); + exit(1); + } + + /* Keep generated files for ourself */ + umask(0077); + + if (configfile == NULL) { + if (getenv(HOME_VAR) != NULL) { + int len = strlen(getenv(HOME_VAR)) + strlen(CONFIG_FILE_HOME) + 5; + configfile = (char *) malloc(len); + bsnprintf(configfile, len, "%s/%s", getenv(HOME_VAR), CONFIG_FILE_HOME); + + } else { + configfile = bstrdup(CONFIG_FILE); + } + } + Dmsg1(50, "configfile=%s\n", configfile); + + // We need to initialize the scheduler before the reload() command + scheduler = &sched; + + OSDependentInit(); /* Initialize Windows path handling */ + (void)WSA_Init(); /* Initialize Windows sockets */ + + display_cfg = reload(); + + if (test_config) { + exit(0); + } + /* If we have a systray, we always keep the application*/ + if (tray.have_systray) { + app.setQuitOnLastWindowClosed(false); + + } else { /* Without a systray, we quit when we close */ + app.setQuitOnLastWindowClosed(true); + } + tray.setupUi(&tray, monitor); + refresh_tray(&tray); + mainwidget = &tray; + if (display_cfg) { + new Conf(); + } + app.exec(); + sched.stop(); + stop_watchdog(); + (void)WSACleanup(); /* Cleanup Windows sockets */ + + if (config) { + delete config; + } + config = NULL; + bfree_and_null(configfile); + term_msg(); + return 0; +} diff --git a/src/qt-console/tray-monitor/tray-monitor.h b/src/qt-console/tray-monitor/tray-monitor.h new file mode 100644 index 00000000..8bf22255 --- /dev/null +++ b/src/qt-console/tray-monitor/tray-monitor.h @@ -0,0 +1,31 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef TRAY_MONITOR_H +#define TRAY_MONITOR_H + +#include "common.h" +#include "tray-ui.h" +#include "tray_conf.h" + +bool parse_tmon_config(CONFIG *config, const char *configfile, int exit_code); +bool reload(); +void display_error(const char *msg, ...); + +#endif diff --git a/src/qt-console/tray-monitor/tray-monitor.pro.in b/src/qt-console/tray-monitor/tray-monitor.pro.in new file mode 100644 index 00000000..b82ab1e0 --- /dev/null +++ b/src/qt-console/tray-monitor/tray-monitor.pro.in @@ -0,0 +1,58 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +###################################################################### +# +# !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# Edit only tray-monitor.pro.in -- tray-monitor.pro is built by the ./configure program +# +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# CONFIG options for Windows are pulled from win32/qmake.conf +CONFIG += qt +#CONFIG += qt debug + +greaterThan(QT_MAJOR_VERSION, 4): QT += widgets + +cross-win32 { + LIBS += -mwindows -L../../win32/release32 -lbacula + INCLUDEPATH += ../../win32/compat +} +!cross-win32 { + LIBS += -L../../lib -lbaccfg -lbac -L../../findlib -lbacfind @OPENSSL_LIBS@ +} + + +bins.path = /$(DESTDIR)@sbindir@ +bins.files = bacula-tray-monitor +confs.path = /$(DESTDIR)@sysconfdir@ +confs.commands = ./install_conf_file + +TEMPLATE = app +TARGET = bacula-tray-monitor +QMAKE_EXTRA_TARGETS += depend +DEPENDPATH += . +INCLUDEPATH += ../.. . +LIBTOOL_LINK = @QMAKE_LIBTOOL@ --silent --tag=CXX --mode=link +LIBTOOL_INSTALL = @QMAKE_LIBTOOL@ --silent --mode=install +QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) +QMAKE_INSTALL_PROGRAM = $${LIBTOOL_INSTALL} install -m @SBINPERM@ -p +QMAKE_CLEAN += obj/* .libs/* bacula-tray-monitor release/bacula-tray-monitor +QMAKE_CXXFLAGS += -DTRAY_MONITOR +QMAKE_CFLAGS += -DTRAY_MONITOR + +INSTALLS = bins confs + +RESOURCES = ../main.qrc +MOC_DIR = moc +OBJECTS_DIR = obj +UI_DIR = ui + +# Main directory +HEADERS += tray-monitor.h tray_conf.h tray-ui.h fdstatus.h task.h ../util/fmtwidgetitem.h dirstatus.h conf.h sdstatus.h runjob.h status.h restorewizard.h filesmodel.h clientselectwizardpage.h jobselectwizardpage.h fileselectwizardpage.h restoreoptionswizardpage.h pluginwizardpage.h +SOURCES += tray-monitor.cpp tray_conf.cpp fdstatus.cpp task.cpp authenticate.cpp ../util/fmtwidgetitem.cpp dirstatus.cpp sdstatus.cpp conf.cpp runjob.cpp status.cpp restorewizard.cpp clientselectwizardpage.cpp jobselectwizardpage.cpp fileselectwizardpage.cpp restoreoptionswizardpage.cpp pluginwizardpage.cpp +FORMS += fd-monitor.ui dir-monitor.ui sd-monitor.ui main-conf.ui res-conf.ui run.ui restorewizard.ui clientselectwizardpage.ui jobselectwizardpage.ui fileselectwizardpage.ui restoreoptionswizardpage.ui pluginwizardpage.ui + +TRANSLATIONS += ts/tm_fr.ts ts/tm_de.ts ts/tm_ja.ts diff --git a/src/qt-console/tray-monitor/tray-monitor.pro.mingw32.in b/src/qt-console/tray-monitor/tray-monitor.pro.mingw32.in new file mode 100644 index 00000000..5ffc16c4 --- /dev/null +++ b/src/qt-console/tray-monitor/tray-monitor.pro.mingw32.in @@ -0,0 +1,67 @@ +###################################################################### +# +# !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# Edit only tray-monitor.pro.mingw32.in -- tray-monitor.pro.mingw32 is built by the ./configure program +# +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# CONFIG options for Windows are pulled from win32/qmake.conf +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +CONFIG += qt cross-win32 +#CONFIG += qt debug + +greaterThan(QT_MAJOR_VERSION, 4): QT += widgets + +cross-win32 { + LIBS += ../../win32/lib/obj32/ini.o -mwindows -L../../win32/release32 -lbacula -lpthread + INCLUDEPATH += ../../win32/compat +} +!cross-win32 { + LIBS += -L../../lib -lbaccfg -lbac -L../../findlib -lbacfind @OPENSSL_LIBS@ +} + + +bins.path = /$(DESTDIR)@sbindir@ +bins.files = bacula-tray-monitor +confs.path = /$(DESTDIR)@sysconfdir@ +confs.commands = ./install_conf_file + +TEMPLATE = app +TARGET = bacula-tray-monitor +QMAKE_EXTRA_TARGETS += depend +DEPENDPATH += . +INCLUDEPATH += ../.. . +LIBTOOL_LINK = @QMAKE_LIBTOOL@ --silent --tag=CXX --mode=link +LIBTOOL_INSTALL = @QMAKE_LIBTOOL@ --silent --mode=install +QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) +QMAKE_INSTALL_PROGRAM = $${LIBTOOL_INSTALL} install -m @SBINPERM@ -p +QMAKE_CLEAN += .libs/* bacula-tray-monitor release/bacula-tray-monitor +QMAKE_CXXFLAGS += -DTRAY_MONITOR +QMAKE_CFLAGS += -DTRAY_MONITOR + + +RESOURCES = ../main.qrc +MOC_DIR = moc32 +OBJECTS_DIR = obj32 +UI_DIR = ui32 +QMAKE_CC = i686-w64-mingw32-gcc +QMAKE_CXX = i686-w64-mingw32-g++ +QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw32/include/pthreads $(DEPKGS)/depkgs-mingw32/include/ ../win32/compat +QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw32/include/qt +QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw32/lib/qt +QMAKE_LINK = i686-w64-mingw32-g++ +QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m32 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc +QMAKE_LIB = i686-w64-mingw32-ar -ru +QMAKE_RC = i686-w64-mingw32-windres + +# Main directory +HEADERS += tray-monitor.h tray_conf.h tray-ui.h fdstatus.h task.h ../util/fmtwidgetitem.h dirstatus.h conf.h sdstatus.h runjob.h status.h restorewizard.h filesmodel.h clientselectwizardpage.h jobselectwizardpage.h fileselectwizardpage.h restoreoptionswizardpage.h pluginwizardpage.h +SOURCES += tray-monitor.cpp tray_conf.cpp fdstatus.cpp task.cpp authenticate.cpp ../util/fmtwidgetitem.cpp dirstatus.cpp sdstatus.cpp conf.cpp runjob.cpp status.cpp restorewizard.cpp clientselectwizardpage.cpp jobselectwizardpage.cpp fileselectwizardpage.cpp restoreoptionswizardpage.cpp pluginwizardpage.cpp +FORMS += fd-monitor.ui dir-monitor.ui sd-monitor.ui main-conf.ui res-conf.ui run.ui restorewizard.ui clientselectwizardpage.ui jobselectwizardpage.ui fileselectwizardpage.ui restoreoptionswizardpage.ui pluginwizardpage.ui + +TRANSLATIONS += ts/tm_fr.ts ts/tm_de.ts ts/tm_ja.ts diff --git a/src/qt-console/tray-monitor/tray-monitor.pro.mingw64.in b/src/qt-console/tray-monitor/tray-monitor.pro.mingw64.in new file mode 100644 index 00000000..096dac3a --- /dev/null +++ b/src/qt-console/tray-monitor/tray-monitor.pro.mingw64.in @@ -0,0 +1,71 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +###################################################################### +# +# !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# Edit only tray-monitor.pro.mingw64.in -- tray-monitor.pro.mingw64 is built by the ./configure program +# +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# CONFIG options for Windows are pulled from win32/qmake.conf +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +CONFIG += qt cross-win32 +#CONFIG += qt debug + +greaterThan(QT_MAJOR_VERSION, 4): QT += widgets + +cross-win32 { + LIBS += ../../win32/lib/obj64/ini.o -mwindows -L../../win32/release64 -lbacula -lpthread + INCLUDEPATH += ../../win32/compat +} +!cross-win32 { + LIBS += -L../../lib -lbaccfg -lbac -L../../findlib -lbacfind @OPENSSL_LIBS@ +} + + +bins.path = /$(DESTDIR)@sbindir@ +bins.files = bacula-tray-monitor +confs.path = /$(DESTDIR)@sysconfdir@ +confs.commands = ./install_conf_file + +TEMPLATE = app +TARGET = bacula-tray-monitor +QMAKE_EXTRA_TARGETS += depend +DEPENDPATH += . +INCLUDEPATH += ../.. . +LIBTOOL_LINK = @QMAKE_LIBTOOL@ --silent --tag=CXX --mode=link +LIBTOOL_INSTALL = @QMAKE_LIBTOOL@ --silent --mode=install +QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) +QMAKE_INSTALL_PROGRAM = $${LIBTOOL_INSTALL} install -m @SBINPERM@ -p +QMAKE_CLEAN += .libs/* bacula-tray-monitor release/bacula-tray-monitor +QMAKE_CXXFLAGS += -DTRAY_MONITOR +QMAKE_CFLAGS += -DTRAY_MONITOR + + +RESOURCES = ../main.qrc +MOC_DIR = moc64 +OBJECTS_DIR = obj64 +UI_DIR = ui64 +QMAKE_CC = x86_64-w64-mingw32-gcc +QMAKE_CXX = x86_64-w64-mingw32-g++ +QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw-w64/include/pthreads $(DEPKGS)/depkgs-mingw-w64/include/ ../win32/compat +QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw-w64/include/qt +QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw-w64/lib/qt +QMAKE_LINK = x86_64-w64-mingw32-g++ +QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m64 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc +QMAKE_LIB = x86_64-w64-mingw32-ar -ru +QMAKE_RC = x86_64-w64-mingw32-windres + +# Main directory +HEADERS += tray-monitor.h tray_conf.h tray-ui.h fdstatus.h task.h ../util/fmtwidgetitem.h dirstatus.h conf.h sdstatus.h runjob.h status.h restorewizard.h filesmodel.h clientselectwizardpage.h jobselectwizardpage.h fileselectwizardpage.h restoreoptionswizardpage.h pluginwizardpage.h +SOURCES += tray-monitor.cpp tray_conf.cpp fdstatus.cpp task.cpp authenticate.cpp ../util/fmtwidgetitem.cpp dirstatus.cpp sdstatus.cpp conf.cpp runjob.cpp status.cpp restorewizard.cpp clientselectwizardpage.cpp jobselectwizardpage.cpp fileselectwizardpage.cpp restoreoptionswizardpage.cpp pluginwizardpage.cpp +FORMS += fd-monitor.ui dir-monitor.ui sd-monitor.ui main-conf.ui res-conf.ui run.ui restorewizard.ui clientselectwizardpage.ui jobselectwizardpage.ui fileselectwizardpage.ui restoreoptionswizardpage.ui pluginwizardpage.ui + +TRANSLATIONS += ts/tm_fr.ts ts/tm_de.ts ts/tm_ja.ts diff --git a/src/qt-console/tray-monitor/tray-ui.h b/src/qt-console/tray-monitor/tray-ui.h new file mode 100644 index 00000000..355c651b --- /dev/null +++ b/src/qt-console/tray-monitor/tray-ui.h @@ -0,0 +1,440 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef TRAYUI_H +#define TRAYUI_H + + +#include "common.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fdstatus.h" +#include "sdstatus.h" +#include "dirstatus.h" +#include "conf.h" +#include "runjob.h" +#include "restorewizard.h" + +void display_error(const char *fmt, ...); + +int tls_pem_callback(char *buf, int size, const void *userdata); + +class TrayUI: public QMainWindow +{ + Q_OBJECT + +public: + QWidget *centralwidget; + QTabWidget *tabWidget; + QStatusBar *statusbar; + + QSystemTrayIcon *tray; + QSpinBox *spinRefresh; + QTimer *timer; + bool have_systray; + RestoreWizard *restorewiz; + + TrayUI(): + QMainWindow(), + tabWidget(NULL), + statusbar(NULL), + tray(NULL), + spinRefresh(NULL), + timer(NULL), + have_systray(QSystemTrayIcon::isSystemTrayAvailable()), + restorewiz(NULL) + { + } + + ~TrayUI() { + } + void addTab(RESMON *r) + { + QWidget *tab; + QString t = QString(r->hdr.name); + if (r->tls_enable) { + char buf[512]; + /* Generate passphrase prompt */ + bsnprintf(buf, sizeof(buf), "Passphrase for \"%s\" TLS private key: ", r->hdr.name); + + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer + */ + r->tls_ctx = new_tls_context(r->tls_ca_certfile, + r->tls_ca_certdir, r->tls_certfile, + r->tls_keyfile, tls_pem_callback, &buf, NULL, true); + + if (!r->tls_ctx) { + display_error(_("Failed to initialize TLS context for \"%s\".\n"), r->hdr.name); + } + } + switch(r->type) { + case R_CLIENT: + tab = new FDStatus(r); + break; + case R_STORAGE: + tab = new SDStatus(r); + break; + case R_DIRECTOR: + tab = new DIRStatus(r); + break; + default: + return; + } + tabWidget->setUpdatesEnabled(false); + tabWidget->addTab(tab, t); + tabWidget->setUpdatesEnabled(true); + } + void clearTabs() + { + tabWidget->setUpdatesEnabled(false); + for(int i = tabWidget->count() - 1; i >= 0; i--) { + QWidget *w = tabWidget->widget(i); + tabWidget->removeTab(i); + delete w; + } + tabWidget->setUpdatesEnabled(true); + tabWidget->update(); + } + void startTimer() + { + if (!timer) { + timer = new QTimer(this); + connect(timer, SIGNAL(timeout()), this, SLOT(refresh_screen())); + } + timer->start(spinRefresh->value()*1000); + } + void setupUi(QMainWindow *TrayMonitor, MONITOR *mon) + { + QPushButton *menubp = NULL; + timer = NULL; + if (TrayMonitor->objectName().isEmpty()) + TrayMonitor->setObjectName(QString::fromUtf8("TrayMonitor")); + TrayMonitor->setWindowIcon(QIcon(":/images/cartridge1.png")); + TrayMonitor->resize(789, 595); + centralwidget = new QWidget(TrayMonitor); + centralwidget->setObjectName(QString::fromUtf8("centralwidget")); + QVBoxLayout *verticalLayout = new QVBoxLayout(centralwidget); + verticalLayout->setObjectName(QString::fromUtf8("verticalLayout")); + tabWidget = new QTabWidget(centralwidget); + tabWidget->setObjectName(QString::fromUtf8("tabWidget")); + tabWidget->setTabPosition(QTabWidget::North); + tabWidget->setTabShape(QTabWidget::Rounded); + tabWidget->setTabsClosable(false); + verticalLayout->addWidget(tabWidget); + + QDialogButtonBox *buttonBox = new QDialogButtonBox(centralwidget); + buttonBox->setObjectName(QString::fromUtf8("buttonBox")); + if (have_systray) { + buttonBox->setStandardButtons(QDialogButtonBox::Close); + connect(buttonBox, SIGNAL(rejected()), this, SLOT(cb_show())); + } else { + /* Here we can display something else, now it's just a simple menu */ + menubp = new QPushButton(tr("&Options")); + buttonBox->addButton(menubp, QDialogButtonBox::ActionRole); + } + TrayMonitor->setCentralWidget(centralwidget); + statusbar = new QStatusBar(TrayMonitor); + statusbar->setObjectName(QString::fromUtf8("statusbar")); + TrayMonitor->setStatusBar(statusbar); + + QHBoxLayout *hLayout = new QHBoxLayout(); + QLabel *refreshlabel = new QLabel(centralwidget); + refreshlabel->setText("Refresh:"); + hLayout->addWidget(refreshlabel); + spinRefresh = new QSpinBox(centralwidget); + QSizePolicy sizePolicy(QSizePolicy::Fixed, QSizePolicy::Fixed); + sizePolicy.setHorizontalStretch(0); + sizePolicy.setVerticalStretch(0); + sizePolicy.setHeightForWidth(spinRefresh->sizePolicy().hasHeightForWidth()); + spinRefresh->setSizePolicy(sizePolicy); + spinRefresh->setMinimum(1); + spinRefresh->setMaximum(600); + spinRefresh->setSingleStep(10); + spinRefresh->setValue(mon?mon->RefreshInterval:60); + hLayout->addWidget(spinRefresh); + hLayout->addWidget(buttonBox); + + verticalLayout->addLayout(hLayout); + //QSystemTrayIcon::isSystemTrayAvailable + + tray = new QSystemTrayIcon(TrayMonitor); + QMenu* stmenu = new QMenu(TrayMonitor); + +#if QT_VERSION >= 0x050000 + QAction *actShow = new QAction(QApplication::translate("TrayMonitor", + "Display", 0),TrayMonitor); + QAction* actQuit = new QAction(QApplication::translate("TrayMonitor", + "Quit", 0),TrayMonitor); + QAction* actAbout = new QAction(QApplication::translate("TrayMonitor", + "About", 0),TrayMonitor); + QAction* actRun = new QAction(QApplication::translate("TrayMonitor", + "Run...", 0),TrayMonitor); + QAction* actRes = new QAction(QApplication::translate("TrayMonitor", + "Restore...", 0),TrayMonitor); + + QAction* actConf = new QAction(QApplication::translate("TrayMonitor", + "Configure...", 0),TrayMonitor); +#else + QAction *actShow = new QAction(QApplication::translate("TrayMonitor", + "Display", + 0, QApplication::UnicodeUTF8),TrayMonitor); + QAction* actQuit = new QAction(QApplication::translate("TrayMonitor", + "Quit", + 0, QApplication::UnicodeUTF8),TrayMonitor); + QAction* actAbout = new QAction(QApplication::translate("TrayMonitor", + "About", + 0, QApplication::UnicodeUTF8),TrayMonitor); + QAction* actRun = new QAction(QApplication::translate("TrayMonitor", + "Run...", + 0, QApplication::UnicodeUTF8),TrayMonitor); + QAction* actRes = new QAction(QApplication::translate("TrayMonitor", + "Restore...", + 0, QApplication::UnicodeUTF8),TrayMonitor); + + QAction* actConf = new QAction(QApplication::translate("TrayMonitor", + "Configure...", + 0, QApplication::UnicodeUTF8),TrayMonitor); +#endif + stmenu->addAction(actShow); + stmenu->addAction(actRun); + stmenu->addAction(actRes); + stmenu->addSeparator(); + stmenu->addAction(actConf); + stmenu->addSeparator(); + stmenu->addAction(actAbout); + stmenu->addSeparator(); + stmenu->addAction(actQuit); + + connect(actRun, SIGNAL(triggered()), this, SLOT(cb_run())); + connect(actShow, SIGNAL(triggered()), this, SLOT(cb_show())); + connect(actConf, SIGNAL(triggered()), this, SLOT(cb_conf())); + connect(actRes, SIGNAL(triggered()), this, SLOT(cb_restore())); + connect(actQuit, SIGNAL(triggered()), this, SLOT(cb_quit())); + connect(actAbout, SIGNAL(triggered()), this, SLOT(cb_about())); + connect(spinRefresh, SIGNAL(valueChanged(int)), this, SLOT(cb_refresh(int))); + connect(tray, SIGNAL(activated(QSystemTrayIcon::ActivationReason)), + this, SLOT(cb_trayIconActivated(QSystemTrayIcon::ActivationReason))); + tray->setContextMenu(stmenu); + + QIcon icon(":/images/cartridge1.png"); + tray->setIcon(icon); + tray->setToolTip(QString("Bacula Tray Monitor")); + tray->show(); + retranslateUi(TrayMonitor); + QMetaObject::connectSlotsByName(TrayMonitor); + startTimer(); + + /* When we don't have the systemtray, we keep the menu, but disabled */ + if (!have_systray) { + actShow->setEnabled(false); + menubp->setMenu(stmenu); + TrayMonitor->show(); + } + } // setupUi + + void retranslateUi(QMainWindow *TrayMonitor) + { +#if QT_VERSION >= 0x050000 + TrayMonitor->setWindowTitle(QApplication::translate("TrayMonitor", "Bacula Tray Monitor", 0)); +#else + TrayMonitor->setWindowTitle(QApplication::translate("TrayMonitor", "Bacula Tray Monitor", 0, QApplication::UnicodeUTF8)); +#endif + + } // retranslateUi + +private slots: + void cb_quit() { + QApplication::quit(); + } + + void cb_refresh(int val) { + if (timer) { + timer->setInterval(val*1000); + } + } + + void cb_about() { + QMessageBox::about(this, "Bacula Tray Monitor", "Bacula Tray Monitor\n" + "For more information, see: www.bacula.org\n" + "Copyright (C) 2000-2018, Kern Sibbald\n" + "License: AGPLv3"); + } + RESMON *get_director() { + QStringList dirs; + RESMON *d, *director=NULL; + bool ok; + foreach_res(d, R_DIRECTOR) { + if (!director) { + director = d; + } + dirs << QString(d->hdr.name); + } + foreach_res(d, R_CLIENT) { + if (d->use_remote) { + if (!director) { + director = d; + } + dirs << QString(d->hdr.name); + } + } + if (dirs.count() > 1) { + /* TODO: Set Modal attribute */ + QString dir = QInputDialog::getItem(this, _("Select a Director"), "Director:", dirs, 0, false, &ok, 0); + if (!ok) { + return NULL; + } + if (ok && !dir.isEmpty()) { + char *p = dir.toUtf8().data(); + foreach_res(d, R_DIRECTOR) { + if (strcmp(p, d->hdr.name) == 0) { + director = d; + break; + } + } + foreach_res(d, R_CLIENT) { + if (strcmp(p, d->hdr.name) == 0) { + director = d; + break; + } + } + } + } + if (dirs.count() == 0 || director == NULL) { + /* We need the proxy feature */ + display_error("No Director defined"); + return NULL; + } + return director; + } + void cb_run() { + RESMON *dir = get_director(); + if (!dir) { + return; + } + task *t = new task(); + connect(t, SIGNAL(done(task *)), this, SLOT(run_job(task *)), Qt::QueuedConnection); + t->init(dir, TASK_RESOURCES); + dir->wrk->queue(t); + } + void refresh_item() { + /* Probably do only the first one */ + int oldnbjobs = 0; + + for (int i=tabWidget->count() - 1; i >= 0; i--) { + ResStatus *s = (ResStatus *) tabWidget->widget(i); + if (s->res->use_monitor) { + s->res->mutex->lock(); + if (s->res->running_jobs) { + oldnbjobs += s->res->running_jobs->size(); + } + s->res->mutex->unlock(); + } + if (isVisible() || s->res->use_monitor) { + s->doUpdate(); + } + } + /* We need to find an other way to compute running jobs */ + if (oldnbjobs) { + QString q; + tray->setIcon(QIcon(":/images/R.png")); + tray->setToolTip(q.sprintf("Bacula Tray Monitor - %d job%s running", oldnbjobs, oldnbjobs>1?"s":"")); + //tray->showMessage(); Can use this function to display a popup + + } else { + tray->setIcon(QIcon(":/images/cartridge1.png")); + tray->setToolTip("Bacula Tray Monitor"); + } + } + void cb_conf() { + new Conf(); + } + void cb_restore() { + RESMON *dir = get_director(); + if (!dir) { + return; + } + task *t = new task(); + connect(t, SIGNAL(done(task *)), this, SLOT(start_restore_wizard(task *)), Qt::QueuedConnection); + connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); + + t->init(dir, TASK_RESOURCES); + dir->wrk->queue(t); + } + + void cb_trayIconActivated(QSystemTrayIcon::ActivationReason r) { + if (r == QSystemTrayIcon::Trigger) { + cb_show(); + } + } + + void refresh_screen() { + refresh_item(); + } + + void cb_show() { + if (isVisible()) { + hide(); + } else { + refresh_item(); + show(); + } + } +public slots: + void task_done(task *t) { + Dmsg0(0, "Task done!\n"); + t->deleteLater(); + } + void run_job(task *t) { + Dmsg0(0, "Task done!\n"); + RESMON *dir = t->res; + t->deleteLater(); + new RunJob(dir); + } + + void start_restore_wizard(task *t) { + restorewiz = new RestoreWizard(t->res); + restorewiz->show(); + } + +}; + + +#endif /* TRAYUI_H */ diff --git a/src/qt-console/tray-monitor/tray_conf.cpp b/src/qt-console/tray-monitor/tray_conf.cpp new file mode 100644 index 00000000..2f0d57e7 --- /dev/null +++ b/src/qt-console/tray-monitor/tray_conf.cpp @@ -0,0 +1,393 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* +* Main configuration file parser for Bacula Tray Monitor. +* +* Adapted from dird_conf.c +* +* Note, the configuration file parser consists of three parts +* +* 1. The generic lexical scanner in lib/lex.c and lib/lex.h +* +* 2. The generic config scanner in lib/parse_config.c and +* lib/parse_config.h. +* These files contain the parser code, some utility +* routines, and the common store routines (name, int, +* string). +* +* 3. The daemon specific file, which contains the Resource +* definitions as well as any specific store routines +* for the resource records. +* +* Nicolas Boichat, August MMIV +* +*/ + +#include "common.h" +#include "tray_conf.h" + +worker *worker_start(); +void worker_stop(worker *); + +/* Define the first and last resource ID record +* types. Note, these should be unique for each +* daemon though not a requirement. +*/ +int32_t r_first = R_FIRST; +int32_t r_last = R_LAST; +RES_HEAD **res_head; + +/* We build the current resource here as we are +* scanning the resource configuration definition, +* then move it to allocated memory when the resource +* scan is complete. +*/ +URES res_all; +int32_t res_all_size = sizeof(res_all); + + +/* Definition of records permitted within each +* resource with the routine to process the record +* information. NOTE! quoted names must be in lower case. +*/ +/* +* Monitor Resource +* +* name handler value code flags default_value +*/ +static RES_ITEM mon_items[] = { + {"Name", store_name, ITEM(res_monitor.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_monitor.hdr.desc), 0, 0, 0}, + {"requiressl", store_bool, ITEM(res_monitor.require_ssl), 1, ITEM_DEFAULT, 0}, + {"RefreshInterval", store_time,ITEM(res_monitor.RefreshInterval), 0, ITEM_DEFAULT, 60}, + {"CommCompression", store_bool, ITEM(res_monitor.comm_compression), 0, ITEM_DEFAULT, true}, + {"CommandDirectory", store_dir, ITEM(res_monitor.command_dir), 0, 0, 0}, + {"DisplayAdvancedOptions", store_bool, ITEM(res_monitor.display_advanced_options), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Director's that we can contact */ +static RES_ITEM dir_items[] = { + {"Name", store_name, ITEM(res_main.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_main.hdr.desc), 0, 0, 0}, + {"Port", store_pint32, ITEM(res_main.port), 0, ITEM_DEFAULT, 9101}, + {"Address", store_str, ITEM(res_main.address), 0, ITEM_REQUIRED, 0}, + {"Password", store_password, ITEM(res_main.password), 0, ITEM_REQUIRED, 0}, + {"Monitor", store_bool, ITEM(res_main.use_monitor), 0, ITEM_DEFAULT, 0}, + {"ConnectTimeout", store_time,ITEM(res_main.connect_timeout), 0, ITEM_DEFAULT, 10}, + {"UseSetIp", store_bool, ITEM(res_main.use_setip), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_main.tls_enable), 0, 0, 0}, + {"TlsCaCertificateFile", store_dir, ITEM(res_main.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_main.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_main.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_main.tls_keyfile), 0, 0, 0}, + + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* +* Client or File daemon resource +* +* name handler value code flags default_value +*/ + +static RES_ITEM cli_items[] = { + {"Name", store_name, ITEM(res_main.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_main.hdr.desc), 0, 0, 0}, + {"Address", store_str, ITEM(res_main.address), 0, ITEM_REQUIRED, 0}, + {"Port", store_pint32, ITEM(res_main.port), 0, ITEM_DEFAULT, 9102}, + {"Password", store_password, ITEM(res_main.password), 0, ITEM_REQUIRED, 0}, + {"ConnectTimeout", store_time,ITEM(res_main.connect_timeout), 0, ITEM_DEFAULT, 10}, + {"Remote", store_bool, ITEM(res_main.use_remote), 0, ITEM_DEFAULT, 0}, + {"Monitor", store_bool, ITEM(res_main.use_monitor), 0, ITEM_DEFAULT, 0}, + {"TlsEnable", store_bool, ITEM(res_main.tls_enable), 0, 0, 0}, + {"TlsCaCertificateFile", store_dir, ITEM(res_main.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_main.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_main.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_main.tls_keyfile), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Storage daemon resource +* +* name handler value code flags default_value +*/ +static RES_ITEM store_items[] = { + {"Name", store_name, ITEM(res_main.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_main.hdr.desc), 0, 0, 0}, + {"Port", store_pint32, ITEM(res_main.port), 0, ITEM_DEFAULT, 9103}, + {"Address", store_str, ITEM(res_main.address), 0, ITEM_REQUIRED, 0}, + {"Password", store_password, ITEM(res_main.password), 0, ITEM_REQUIRED, 0}, + {"ConnectTimeout", store_time,ITEM(res_main.connect_timeout), 0, ITEM_DEFAULT, 10}, + {"Monitor", store_bool, ITEM(res_main.use_monitor), 0, ITEM_DEFAULT, 0}, + {"TlsEnable", store_bool, ITEM(res_main.tls_enable), 0, 0, 0}, + {"TlsCaCertificateFile", store_dir, ITEM(res_main.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_main.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_main.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_main.tls_keyfile), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* +* This is the master resource definition. +* It must have one item for each of the resources. +* +* NOTE!!! keep it in the same order as the R_codes +* or eliminate all resources[rindex].name +* +* name items rcode res_head +*/ +RES_TABLE resources[] = { + {"monitor", mon_items, R_MONITOR}, + {"director", dir_items, R_DIRECTOR}, + {"client", cli_items, R_CLIENT}, + {"storage", store_items, R_STORAGE}, + {NULL, NULL, 0} +}; + +/* Dump contents of resource */ +void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock) +{ + RES *next; + URES *res = (URES *)ares; + bool recurse = true; + + if (res == NULL) { + sendit(sock, _("No %s resource defined\n"), res_to_str(type)); + return; + } + if (type < 0) { /* no recursion */ + type = - type; + recurse = false; + } + switch (type) { + case R_MONITOR: + sendit(sock, _("Monitor: name=%s\n"), ares->name); + break; + case R_DIRECTOR: + sendit(sock, _("Director: name=%s address=%s port=%d\n"), + res->res_main.hdr.name, res->res_main.address, res->res_main.port); + break; + case R_CLIENT: + sendit(sock, _("Client: name=%s address=%s port=%d\n"), + res->res_main.hdr.name, res->res_main.address, res->res_main.port); + break; + case R_STORAGE: + sendit(sock, _("Storage: name=%s address=%s port=%d\n"), + res->res_main.hdr.name, res->res_main.address, res->res_main.port); + break; + default: + sendit(sock, _("Unknown resource type %d in dump_resource.\n"), type); + break; + } + if (recurse) { + next = GetNextRes(0, (RES *)res); + if (next) { + dump_resource(type, next, sendit, sock); + } + } +} + +/* +* Free memory of resource -- called when daemon terminates. +* NB, we don't need to worry about freeing any references +* to other resources as they will be freed when that +* resource chain is traversed. Mainly we worry about freeing +* allocated strings (names). +*/ +void free_resource(RES *sres, int type) +{ + URES *res = (URES *)sres; + + if (res == NULL) + return; + /* common stuff -- free the resource name and description */ + if (res->res_monitor.hdr.name) { + free(res->res_monitor.hdr.name); + } + if (res->res_monitor.hdr.desc) { + free(res->res_monitor.hdr.desc); + } + + switch (type) { + case R_MONITOR: + if (res->res_monitor.password) { + free(res->res_monitor.password); + } + if (res->res_monitor.command_dir) { + free(res->res_monitor.command_dir); + } + break; + case R_DIRECTOR: + case R_CLIENT: + case R_STORAGE: + delete res->res_main.mutex; + free_bsock(res->res_main.bs); + if (res->res_main.wrk) { + worker_stop(res->res_main.wrk); + res->res_main.wrk = NULL; + } + if (res->res_main.address) { + free(res->res_main.address); + } + if (res->res_main.tls_ctx) { + free_tls_context(res->res_main.tls_ctx); + } + if (res->res_main.tls_ca_certfile) { + free(res->res_main.tls_ca_certfile); + } + if (res->res_main.tls_ca_certdir) { + free(res->res_main.tls_ca_certdir); + } + if (res->res_main.tls_certfile) { + free(res->res_main.tls_certfile); + } + if (res->res_main.tls_keyfile) { + free(res->res_main.tls_keyfile); + } + if (res->res_main.jobs) { + delete res->res_main.jobs; + } + if (res->res_main.clients) { + delete res->res_main.clients; + } + if (res->res_main.filesets) { + delete res->res_main.filesets; + } + if (res->res_main.pools) { + delete res->res_main.pools; + } + if (res->res_main.storages) { + delete res->res_main.storages; + } + if (res->res_main.running_jobs) { + delete res->res_main.terminated_jobs; + } + break; + default: + printf(_("Unknown resource type %d in free_resource.\n"), type); + } + + /* Common stuff again -- free the resource, recurse to next one */ + if (res) { + free(res); + } +} + +/* +* Save the new resource by chaining it into the head list for +* the resource. If this is pass 2, we update any resource +* pointers because they may not have been defined until +* later in pass 1. +*/ +bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) +{ + int rindex = type - r_first; + int i, size; + int error = 0; + + /* + * Ensure that all required items are present + */ + for (i=0; items[i].name; i++) { + if (items[i].flags & ITEM_REQUIRED) { + if (!bit_is_set(i, res_all.res_monitor.hdr.item_present)) { + Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), + items[i].name, resources[rindex].name); + return false; + } + } + /* If this triggers, take a look at lib/parse_conf.h */ + if (i >= MAX_RES_ITEMS) { + Mmsg(config->m_errmsg, _("Too many directives in \"%s\" resource\n"), resources[rindex].name); + return false; + } + } + + /* + * During pass 2 in each "store" routine, we looked up pointers + * to all the resources referrenced in the current resource, now we + * must copy their addresses from the static record to the allocated + * record. + */ + if (pass == 2) { + switch (type) { + /* Resources not containing a resource */ + case R_STORAGE: + case R_DIRECTOR: + case R_CLIENT: + case R_MONITOR: + break; + default: + Emsg1(M_ERROR, 0, _("Unknown resource type %d in save_resource.\n"), type); + error = 1; + break; + } + /* Note, the resource name was already saved during pass 1, + * so here, we can just release it. + */ + if (res_all.res_monitor.hdr.name) { + free(res_all.res_monitor.hdr.name); + res_all.res_monitor.hdr.name = NULL; + } + if (res_all.res_monitor.hdr.desc) { + free(res_all.res_monitor.hdr.desc); + res_all.res_monitor.hdr.desc = NULL; + } + return true; + } + + /* + * The following code is only executed during pass 1 + */ + switch (type) { + case R_MONITOR: + size = sizeof(MONITOR); + break; + case R_CLIENT: + case R_STORAGE: + case R_DIRECTOR: + // We need to initialize the mutex + res_all.res_main.mutex = new QMutex(); + res_all.res_main.wrk = worker_start(); + size = sizeof(RESMON); + break; + default: + printf(_("Unknown resource type %d in save_resource.\n"), type); + error = 1; + size = 1; + break; + } + /* Common */ + if (!error) { + res_all.res_main.type = type; + if (!config->insert_res(rindex, size)) { + return false; + } + } + return true; +} + +bool parse_tmon_config(CONFIG *config, const char *configfile, int exit_code) +{ + config->init(configfile, error_handler, exit_code, + (void *)&res_all, res_all_size, + r_first, r_last, resources, &res_head); + return config->parse_config(); +} diff --git a/src/qt-console/tray-monitor/tray_conf.h b/src/qt-console/tray-monitor/tray_conf.h new file mode 100644 index 00000000..575d008a --- /dev/null +++ b/src/qt-console/tray-monitor/tray_conf.h @@ -0,0 +1,188 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Tray Monitor specific configuration and defines + * + * Adapted from dird_conf.c + * + * Nicolas Boichat, August MMIV + * + */ + +#ifndef TRAY_CONF +#define TRAY_CONF + +#include "common.h" + +/* + * Resource codes -- they must be sequential for indexing + */ +enum rescode { + R_MONITOR = 1001, + R_DIRECTOR, + R_CLIENT, + R_STORAGE, + R_CONSOLE_FONT, + R_FIRST = R_MONITOR, + R_LAST = R_CONSOLE_FONT /* keep this updated */ +}; + + +/* + * Some resource attributes + */ +enum { + R_NAME = 1020, + R_ADDRESS, + R_PASSWORD, + R_TYPE, + R_BACKUP +}; + +struct s_running_job +{ + int32_t Errors; /* FD/SD errors */ + int32_t JobType; + int32_t JobStatus; + int32_t JobLevel; + uint32_t JobId; + uint32_t VolSessionId; + uint32_t VolSessionTime; + uint32_t JobFiles; + uint64_t JobBytes; + uint64_t ReadBytes; + utime_t start_time; + int64_t bytespersec; + int SDtls; + char Client[MAX_NAME_LENGTH]; + char FileSet[MAX_NAME_LENGTH]; + char Storage[MAX_NAME_LENGTH]; + char RStorage[MAX_NAME_LENGTH]; + char sched_time; + char Job[MAX_NAME_LENGTH]; + char CurrentFile[4096]; +}; + +/* forward definition */ +class worker; + +/* Director/Client/Storage */ +struct RESMON { + RES hdr; /* Keep First */ + uint32_t type; /* Keep 2nd R_CLIENT, R_DIRECTOR, R_STORAGE */ + + uint32_t port; /* UA server port */ + char *address; /* UA server address */ + utime_t connect_timeout; /* timeout for connect in seconds */ + char *password; + + bool use_remote; /* Use Client Initiated backup feature */ + bool use_monitor; /* update the status icon with this resource */ + bool use_setip; /* Send setip command before a job */ + + bool tls_enable; /* Enable TLS on all connections */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Client Certificate File */ + char *tls_keyfile; /* TLS Client Key File */ + + /* ------------------------------------------------------------ */ + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + worker *wrk; /* worker that will handle async op */ + + QMutex *mutex; + BSOCK *bs; + char name[MAX_NAME_LENGTH]; + char version[MAX_NAME_LENGTH]; + char plugins[MAX_NAME_LENGTH]; + char started[32]; /* ISO date */ + char reloaded[32]; /* ISO date */ + int bwlimit; + alist *running_jobs; + dlist *terminated_jobs; + btime_t last_update; + bool new_resource; + bool proxy_sent; + + /* List of resources available */ + alist *jobs; + alist *clients; + alist *filesets; + alist *pools; + alist *storages; + alist *catalogs; + + /* Default value */ + struct { + char *job; + char *client; + char *pool; + char *storage; + char *level; + char *type; + char *fileset; + char *catalog; + int priority; + char *where; + int replace; + } defaults; + + /* Information about the job */ + struct { + uint64_t JobBytes; + uint32_t JobFiles; + int CorrJobBytes; + int CorrJobFiles; + int CorrNbJob; + char JobLevel; + } infos; +}; + +Q_DECLARE_METATYPE(RESMON*) + +/* + * Tray Monitor Resource + * + */ +struct MONITOR { + RES hdr; /* Keep first */ + int32_t type; /* Keep second */ + + bool comm_compression; /* Enable comm line compression */ + bool require_ssl; /* Require SSL for all connections */ + bool display_advanced_options; /* Display advanced options (run options for example) */ + MSGS *messages; /* Daemon message handler */ + char *password; /* UA server password */ + char *command_dir; /* Where to find Commands */ + utime_t RefreshInterval; /* Status refresh interval */ +}; + + +/* Define the Union of all the above + * resource structure definitions. + */ +union URES { + MONITOR res_monitor; + RESMON res_main; + RES hdr; +}; + +void error_handler(const char *file, int line, LEX *lc, const char *msg, ...); + +#endif diff --git a/src/qt-console/tray-monitor/ts/tm_de.ts b/src/qt-console/tray-monitor/ts/tm_de.ts new file mode 100644 index 00000000..3f5449c9 --- /dev/null +++ b/src/qt-console/tray-monitor/ts/tm_de.ts @@ -0,0 +1,720 @@ + + + + + Conf + + + Configuration + + + + + Monitor Configuration + + + + + + The Monitor name will be used during the authentication phase. + + + + + Name: + + + + + Refresh Interval: + + + + + + Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run + + + + + Command Directory: + + + + + ... + + + + + + Display or Hide advanced options in the "Run Job" window + + + + + Display Advanced Options: + + + + + Save and Apply the changes + + + + + Save + + + + + Cancel + + + + + Show/Hide Passwords + + + + + Password + + + + + Add Client resource to monitor + + + + + Client + + + + + Add Storage resource to monitor + + + + + Storage + + + + + Add Director resource to monitor + + + + + Director + + + + + Select Command Directory + + + + + ConfTab + + + Select CA Certificate File PEM file + + + + + Select CA Certificate Directory + + + + + + Select TLS Certificate File + + + + + DIRStatus + + + + JobId + + + + + + Job + + + + + + Level + + + + + Client + + + + + + Status + + + + + Storage + + + + + + Files + + + + + + Bytes + + + + + + Errors + + + + + FDStatus + + + + JobId + + + + + + Job + + + + + + Level + + + + + + Files + + + + + + Bytes + + + + + + Errors + + + + + Current File + + + + + Status + + + + + QObject + + + Invalid job status %1 + + + + + + Yes + + + + + + No + + + + + ResConf + + + Form + + + + + General + + + + + + The Name will be used only in the Tray Monitor interface + + + + + Name: + + + + + Description: + + + + + Password: + + + + + Address: + + + + + Port: + + + + + Timeout: + + + + + + Use Client Initiated backup/restore feature + + + + + Remote + + + + + + Update the tray monitor icon with the status of this component + + + + + Monitor: + + + + + Use SetIp: + + + + + TLS + + + + + + + + ... + + + + + CA Certificate File: + + + + + Enabled + + + + + Key File: + + + + + Certificate File: + + + + + CA Certificate Directory: + + + + + RunJob + + + Computed over %1 job%2, the correlation is %3. + + + + + Computed over %1 job%2, The correlation is %3. + + + + + SDStatus + + + + JobId + + + + + + Job + + + + + + Level + + + + + Client + + + + + Storage + + + + + + Files + + + + + + Bytes + + + + + + Errors + + + + + Status + + + + + TrayMonitor + + + Display + + + + + Quit + + + + + About + + + + + Run... + + + + + Configure... + + + + + Bacula Tray Monitor + + + + + dirStatus + + + Form + + + + + Director Status + + + + + Name: + + + + + Started: + + + + + Version: + + + + + Plugins: + + + + + Reloaded: + + + + + Running Jobs + + + + + Terminated Jobs + + + + + fdStatus + + + Form + + + + + FileDaemon Status + + + + + Name: + + + + + Bandwidth Limit: + + + + + Started: + + + + + Version: + + + + + Plugins: + + + + + Running Jobs + + + + + Terminated Jobs + + + + + runForm + + + Run job + + + + + <h3>Run a Job</h3> + + + + + Properties + + + + + Job: + + + + + When: + + + + + yyyy-MM-dd hh:mm:ss + + + + + <html><head/><body><p>Job statistics computed from the Catalog with previous jobs.</p><p>For accurate information, it is possible to use the bconsole &quot;estimate&quot; command.</p></body></html> + + + + + Estimate: + + + + + Job Bytes: + + + + + Job Files: + + + + + + Level: + + + + + Advanced + + + + + Client: + + + + + FileSet: + + + + + Pool: + + + + + Storage: + + + + + Catalog: + + + + + Priority: + + + + + OK + + + + + Cancel + + + + + sdStatus + + + Form + + + + + Storage Daemon Status + + + + + Name: + + + + + Started: + + + + + Version: + + + + + Plugins: + + + + + Running Jobs + + + + + Terminated Jobs + + + + diff --git a/src/qt-console/tray-monitor/ts/tm_fr.ts b/src/qt-console/tray-monitor/ts/tm_fr.ts new file mode 100644 index 00000000..94938da4 --- /dev/null +++ b/src/qt-console/tray-monitor/ts/tm_fr.ts @@ -0,0 +1,720 @@ + + + + + Conf + + + Configuration + + + + + Monitor Configuration + + + + + + The Monitor name will be used during the authentication phase. + + + + + Name: + + + + + Refresh Interval: + + + + + + Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run + + + + + Command Directory: + + + + + ... + + + + + + Display or Hide advanced options in the "Run Job" window + + + + + Display Advanced Options: + + + + + Save and Apply the changes + + + + + Save + + + + + Cancel + + + + + Show/Hide Passwords + + + + + Password + + + + + Add Client resource to monitor + + + + + Client + + + + + Add Storage resource to monitor + + + + + Storage + + + + + Add Director resource to monitor + + + + + Director + + + + + Select Command Directory + + + + + ConfTab + + + Select CA Certificate File PEM file + + + + + Select CA Certificate Directory + + + + + + Select TLS Certificate File + + + + + DIRStatus + + + + JobId + + + + + + Job + + + + + + Level + + + + + Client + + + + + + Status + + + + + Storage + + + + + + Files + + + + + + Bytes + + + + + + Errors + + + + + FDStatus + + + + JobId + + + + + + Job + + + + + + Level + + + + + + Files + + + + + + Bytes + + + + + + Errors + + + + + Current File + + + + + Status + + + + + QObject + + + Invalid job status %1 + + + + + + Yes + + + + + + No + + + + + ResConf + + + Form + + + + + General + + + + + + The Name will be used only in the Tray Monitor interface + + + + + Name: + + + + + Description: + + + + + Password: + + + + + Address: + + + + + Port: + + + + + Timeout: + + + + + + Use Client Initiated backup/restore feature + + + + + Remote + + + + + + Update the tray monitor icon with the status of this component + + + + + Monitor: + + + + + Use SetIp: + + + + + TLS + + + + + + + + ... + + + + + CA Certificate File: + + + + + Enabled + + + + + Key File: + + + + + Certificate File: + + + + + CA Certificate Directory: + + + + + RunJob + + + Computed over %1 job%2, the correlation is %3. + + + + + Computed over %1 job%2, The correlation is %3. + + + + + SDStatus + + + + JobId + + + + + + Job + + + + + + Level + + + + + Client + + + + + Storage + + + + + + Files + + + + + + Bytes + + + + + + Errors + + + + + Status + + + + + TrayMonitor + + + Display + + + + + Quit + + + + + About + + + + + Run... + + + + + Configure... + + + + + Bacula Tray Monitor + + + + + dirStatus + + + Form + + + + + Director Status + + + + + Name: + + + + + Started: + + + + + Version: + + + + + Plugins: + + + + + Reloaded: + + + + + Running Jobs + + + + + Terminated Jobs + + + + + fdStatus + + + Form + + + + + FileDaemon Status + + + + + Name: + + + + + Bandwidth Limit: + + + + + Started: + + + + + Version: + + + + + Plugins: + + + + + Running Jobs + + + + + Terminated Jobs + + + + + runForm + + + Run job + + + + + <h3>Run a Job</h3> + + + + + Properties + + + + + Job: + + + + + When: + + + + + yyyy-MM-dd hh:mm:ss + + + + + <html><head/><body><p>Job statistics computed from the Catalog with previous jobs.</p><p>For accurate information, it is possible to use the bconsole &quot;estimate&quot; command.</p></body></html> + + + + + Estimate: + + + + + Job Bytes: + + + + + Job Files: + + + + + + Level: + + + + + Advanced + + + + + Client: + + + + + FileSet: + + + + + Pool: + + + + + Storage: + + + + + Catalog: + + + + + Priority: + + + + + OK + + + + + Cancel + + + + + sdStatus + + + Form + + + + + Storage Daemon Status + + + + + Name: + + + + + Started: + + + + + Version: + + + + + Plugins: + + + + + Running Jobs + + + + + Terminated Jobs + + + + diff --git a/src/qt-console/tray-monitor/ts/tm_ja.ts b/src/qt-console/tray-monitor/ts/tm_ja.ts new file mode 100644 index 00000000..0e419d9b --- /dev/null +++ b/src/qt-console/tray-monitor/ts/tm_ja.ts @@ -0,0 +1,720 @@ + + + + + Conf + + + Configuration + + + + + Monitor Configuration + + + + + + The Monitor name will be used during the authentication phase. + + + + + Name: + + + + + Refresh Interval: + + + + + + Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run + + + + + Command Directory: + + + + + ... + + + + + + Display or Hide advanced options in the "Run Job" window + + + + + Display Advanced Options: + + + + + Save and Apply the changes + + + + + Save + + + + + Cancel + + + + + Show/Hide Passwords + + + + + Password + + + + + Add Client resource to monitor + + + + + Client + + + + + Add Storage resource to monitor + + + + + Storage + + + + + Add Director resource to monitor + + + + + Director + + + + + Select Command Directory + + + + + ConfTab + + + Select CA Certificate File PEM file + + + + + Select CA Certificate Directory + + + + + + Select TLS Certificate File + + + + + DIRStatus + + + + JobId + + + + + + Job + + + + + + Level + + + + + Client + + + + + + Status + + + + + Storage + + + + + + Files + + + + + + Bytes + + + + + + Errors + + + + + FDStatus + + + + JobId + + + + + + Job + + + + + + Level + + + + + + Files + + + + + + Bytes + + + + + + Errors + + + + + Current File + + + + + Status + + + + + QObject + + + Invalid job status %1 + + + + + + Yes + + + + + + No + + + + + ResConf + + + Form + + + + + General + + + + + + The Name will be used only in the Tray Monitor interface + + + + + Name: + + + + + Description: + + + + + Password: + + + + + Address: + + + + + Port: + + + + + Timeout: + + + + + + Use Client Initiated backup/restore feature + + + + + Remote + + + + + + Update the tray monitor icon with the status of this component + + + + + Monitor: + + + + + Use SetIp: + + + + + TLS + + + + + + + + ... + + + + + CA Certificate File: + + + + + Enabled + + + + + Key File: + + + + + Certificate File: + + + + + CA Certificate Directory: + + + + + RunJob + + + Computed over %1 job%2, the correlation is %3. + + + + + Computed over %1 job%2, The correlation is %3. + + + + + SDStatus + + + + JobId + + + + + + Job + + + + + + Level + + + + + Client + + + + + Storage + + + + + + Files + + + + + + Bytes + + + + + + Errors + + + + + Status + + + + + TrayMonitor + + + Display + + + + + Quit + + + + + About + + + + + Run... + + + + + Configure... + + + + + Bacula Tray Monitor + + + + + dirStatus + + + Form + + + + + Director Status + + + + + Name: + + + + + Started: + + + + + Version: + + + + + Plugins: + + + + + Reloaded: + + + + + Running Jobs + + + + + Terminated Jobs + + + + + fdStatus + + + Form + + + + + FileDaemon Status + + + + + Name: + + + + + Bandwidth Limit: + + + + + Started: + + + + + Version: + + + + + Plugins: + + + + + Running Jobs + + + + + Terminated Jobs + + + + + runForm + + + Run job + + + + + <h3>Run a Job</h3> + + + + + Properties + + + + + Job: + + + + + When: + + + + + yyyy-MM-dd hh:mm:ss + + + + + <html><head/><body><p>Job statistics computed from the Catalog with previous jobs.</p><p>For accurate information, it is possible to use the bconsole &quot;estimate&quot; command.</p></body></html> + + + + + Estimate: + + + + + Job Bytes: + + + + + Job Files: + + + + + + Level: + + + + + Advanced + + + + + Client: + + + + + FileSet: + + + + + Pool: + + + + + Storage: + + + + + Catalog: + + + + + Priority: + + + + + OK + + + + + Cancel + + + + + sdStatus + + + Form + + + + + Storage Daemon Status + + + + + Name: + + + + + Started: + + + + + Version: + + + + + Plugins: + + + + + Running Jobs + + + + + Terminated Jobs + + + + diff --git a/src/qt-console/tray-monitor/win32/qmake.conf b/src/qt-console/tray-monitor/win32/qmake.conf new file mode 100644 index 00000000..7a4c04be --- /dev/null +++ b/src/qt-console/tray-monitor/win32/qmake.conf @@ -0,0 +1,93 @@ +# +# qmake configuration for win32-g++ +# +# Written for MinGW +# + +MAKEFILE_GENERATOR = MINGW +TEMPLATE = app +CONFIG += qt warn_on release link_prl copy_dir_files debug_and_release debug_and_release_target precompile_header cross-win32 +QT += core gui +DEFINES += UNICODE QT_LARGEFILE_SUPPORT +QMAKE_COMPILER_DEFINES += __GNUC__ WIN32 + +QMAKE_EXT_OBJ = .o +QMAKE_EXT_RES = _res.o + +QMAKE_LEX = flex +QMAKE_LEXFLAGS = +QMAKE_YACC = byacc +QMAKE_YACCFLAGS = -d +QMAKE_CFLAGS = -DHAVE_MINGW -DHAVE_WIN32 -DHAVE_MINGW_W64 +QMAKE_CFLAGS_DEPS = -M +QMAKE_CFLAGS_WARN_ON = -Wall +QMAKE_CFLAGS_WARN_OFF = -w +QMAKE_CFLAGS_RELEASE = -O2 +QMAKE_CFLAGS_DEBUG = -g -O2 +QMAKE_CFLAGS_YACC = -Wno-unused -Wno-parentheses + +QMAKE_CXXFLAGS = $$QMAKE_CFLAGS -DHAVE_MINGW -DHAVE_WIN32 -DHAVE_ZLIB_H -DHAVE_LIBZ -DHAVE_CRYPTO -DHAVE_OPENSSL -DHAVE_TLS -DHAVE_MINGW_W64 +QMAKE_CXXFLAGS_DEPS = $$QMAKE_CFLAGS_DEPS +QMAKE_CXXFLAGS_WARN_ON = $$QMAKE_CFLAGS_WARN_ON +QMAKE_CXXFLAGS_WARN_OFF = $$QMAKE_CFLAGS_WARN_OFF +QMAKE_CXXFLAGS_RELEASE = $$QMAKE_CFLAGS_RELEASE +QMAKE_CXXFLAGS_DEBUG = $$QMAKE_CFLAGS_DEBUG +QMAKE_CXXFLAGS_YACC = $$QMAKE_CFLAGS_YACC +QMAKE_CXXFLAGS_THREAD = $$QMAKE_CFLAGS_THREAD +QMAKE_CXXFLAGS_RTTI_ON = -frtti +QMAKE_CXXFLAGS_RTTI_OFF = -fno-rtti +QMAKE_CXXFLAGS_EXCEPTIONS_ON = -fexceptions -mthreads +QMAKE_CXXFLAGS_EXCEPTIONS_OFF = -fno-exceptions + +QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw32/include/pthreads $(DEPKGS)/depkgs-mingw32/include/ ../win32/compat +QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw32/include/qt +QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw32/lib/qt + +QMAKE_RUN_CC = $(CXX) -c $(CFLAGS) $(INCPATH) -o $obj $src +QMAKE_RUN_CC_IMP = $(CXX) -c $(CFLAGS) $(INCPATH) -o $@ $< +QMAKE_RUN_CXX = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $obj $src +QMAKE_RUN_CXX_IMP = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $@ $< + +QMAKE_LINK = i686-w64-mingw32-g++ +QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m32 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc + +QMAKE_LFLAGS_EXCEPTIONS_ON = -mthreads -Wl +QMAKE_LFLAGS_EXCEPTIONS_OFF = +QMAKE_LFLAGS_RELEASE = -Wl,-s +QMAKE_LFLAGS_DEBUG = +QMAKE_LFLAGS_CONSOLE = -Wl,-subsystem,console +QMAKE_LFLAGS_WINDOWS = -Wl,-subsystem,windows +QMAKE_LFLAGS_DLL = -shared +QMAKE_LINK_OBJECT_MAX = 10 +QMAKE_LINK_OBJECT_SCRIPT= object_script + + +QMAKE_LIBS = -lwsock32 -lstdc++ +QMAKE_LIBS_CORE = -lkernel32 -luser32 -lshell32 -luuid -lole32 -ladvapi32 -lws2_32 +QMAKE_LIBS_GUI = -lgdi32 -lcomdlg32 -loleaut32 -limm32 -lwinmm -lwinspool -lws2_32 -lole32 -luuid -luser32 -ladvapi32 +QMAKE_LIBS_NETWORK = -lws2_32 +QMAKE_LIBS_OPENGL = -lopengl32 -lglu32 -lgdi32 -luser32 +QMAKE_LIBS_COMPAT = -ladvapi32 -lshell32 -lcomdlg32 -luser32 -lgdi32 -lws2_32 +QMAKE_LIBS_QT_ENTRY = -lmingw32 -lqtmain + +MINGW_IN_SHELL = 1 +QMAKE_DIR_SEP = / +QMAKE_COPY = cp +QMAKE_COPY_DIR = cp -r +QMAKE_MOVE = mv +QMAKE_DEL_FILE = rm -f +QMAKE_MKDIR = mkdir -p +QMAKE_DEL_DIR = rm -rf +QMAKE_RCC = rcc +QMAKE_CHK_DIR_EXISTS = test -d + +QMAKE_MOC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}moc +QMAKE_UIC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}uic +QMAKE_IDC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}idc + +QMAKE_IDL = midl +QMAKE_ZIP = zip -r -9 + +QMAKE_STRIP = i686-w64-mingw32-strip +QMAKE_STRIPFLAGS_LIB += --strip-unneeded +load(qt_config) diff --git a/src/qt-console/tray-monitor/win32/qplatformdefs.h b/src/qt-console/tray-monitor/win32/qplatformdefs.h new file mode 100644 index 00000000..4802c2b6 --- /dev/null +++ b/src/qt-console/tray-monitor/win32/qplatformdefs.h @@ -0,0 +1,161 @@ +/**************************************************************************** +** +** Copyright (C) 1992-2007 Trolltech ASA. All rights reserved. +** +** This file is part of the qmake spec of the Qt Toolkit. +** +** This file may be used under the terms of the GNU General Public +** License version 2.0 as published by the Free Software Foundation +** and appearing in the file LICENSE.GPL included in the packaging of +** this file. Please review the following information to ensure GNU +** General Public Licensing requirements will be met: +** http://trolltech.com/products/qt/licenses/licensing/opensource/ +** +** If you are unsure which license is appropriate for your use, please +** review the following information: +** http://trolltech.com/products/qt/licenses/licensing/licensingoverview +** or contact the sales department at sales@trolltech.com. +** +** In addition, as a special exception, Trolltech gives you certain +** additional rights. These rights are described in the Trolltech GPL +** Exception version 1.0, which can be found at +** http://www.trolltech.com/products/qt/gplexception/ and in the file +** GPL_EXCEPTION.txt in this package. +** +** In addition, as a special exception, Trolltech, as the sole copyright +** holder for Qt Designer, grants users of the Qt/Eclipse Integration +** plug-in the right for the Qt/Eclipse Integration to link to +** functionality provided by Qt Designer and its related libraries. +** +** Trolltech reserves all rights not expressly granted herein. +** +** Trolltech ASA (c) 2007 +** +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. +** +****************************************************************************/ + +#ifndef QPLATFORMDEFS_H +#define QPLATFORMDEFS_H + +#ifdef UNICODE +#ifndef _UNICODE +#define _UNICODE +#endif +#endif + +// Get Qt defines/settings + +#include "qglobal.h" + +#include "winhdrs.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(_WIN32_WINNT) || (_WIN32_WINNT-0 < 0x0500) +typedef enum { + NameUnknown = 0, + NameFullyQualifiedDN = 1, + NameSamCompatible = 2, + NameDisplay = 3, + NameUniqueId = 6, + NameCanonical = 7, + NameUserPrincipal = 8, + NameCanonicalEx = 9, + NameServicePrincipal = 10, + NameDnsDomain = 12 +} EXTENDED_NAME_FORMAT, *PEXTENDED_NAME_FORMAT; +#endif + +#define Q_FS_FAT +#ifdef QT_LARGEFILE_SUPPORT +#define QT_STATBUF struct _stati64 // non-ANSI defs +#define QT_STATBUF4TSTAT struct _stati64 // non-ANSI defs +#define QT_STAT ::_stati64 +#define QT_FSTAT ::_fstati64 +#else +#define QT_STATBUF struct _stat // non-ANSI defs +#define QT_STATBUF4TSTAT struct _stat // non-ANSI defs +#define QT_STAT ::_stat +#define QT_FSTAT ::_fstat +#endif +#define QT_STAT_REG _S_IFREG +#define QT_STAT_DIR _S_IFDIR +#define QT_STAT_MASK _S_IFMT +#if defined(_S_IFLNK) +# define QT_STAT_LNK _S_IFLNK +#endif +#define QT_FILENO _fileno +#define QT_OPEN ::_open +#define QT_CLOSE ::_close +#ifdef QT_LARGEFILE_SUPPORT +#define QT_LSEEK ::_lseeki64 +#ifndef UNICODE +#define QT_TSTAT ::_stati64 +#else +#define QT_TSTAT ::_wstati64 +#endif +#else +#define QT_LSEEK ::_lseek +#ifndef UNICODE +#define QT_TSTAT ::_stat +#else +#define QT_TSTAT ::_wstat +#endif +#endif +#define QT_READ ::_read +#define QT_WRITE ::_write +#define QT_ACCESS ::_access +#define QT_GETCWD ::_getcwd +#define QT_CHDIR ::_chdir +#define QT_MKDIR ::_mkdir +#define QT_RMDIR ::_rmdir +#define QT_OPEN_LARGEFILE 0 +#define QT_OPEN_RDONLY _O_RDONLY +#define QT_OPEN_WRONLY _O_WRONLY +#define QT_OPEN_RDWR _O_RDWR +#define QT_OPEN_CREAT _O_CREAT +#define QT_OPEN_TRUNC _O_TRUNC +#define QT_OPEN_APPEND _O_APPEND +#if defined(O_TEXT) +# define QT_OPEN_TEXT _O_TEXT +# define QT_OPEN_BINARY _O_BINARY +#endif + +#define QT_FOPEN ::fopen +#ifdef QT_LARGEFILE_SUPPORT +#define QT_FSEEK ::fseeko64 +#define QT_FTELL ::ftello64 +#else +#define QT_FSEEK ::fseek +#define QT_FTELL ::ftell +#endif +#define QT_FGETPOS ::fgetpos +#define QT_FSETPOS ::fsetpos +#define QT_FPOS_T fpos_t +#ifdef QT_LARGEFILE_SUPPORT +#define QT_OFF_T off64_t +#else +#define QT_OFF_T long +#endif + +#define QT_SIGNAL_ARGS int + +#define QT_VSNPRINTF ::_vsnprintf +#define QT_SNPRINTF ::_snprintf + +# define F_OK 0 +# define X_OK 1 +# define W_OK 2 +# define R_OK 4 + + +#endif // QPLATFORMDEFS_H diff --git a/src/qt-console/ts/bat_de.ts b/src/qt-console/ts/bat_de.ts new file mode 100644 index 00000000..57c607ce --- /dev/null +++ b/src/qt-console/ts/bat_de.ts @@ -0,0 +1,4870 @@ + + + + + ClientForm + + + Client Tree + + + + + Refresh Client List + + + + + Requery the director for the list of clients. + + + + + List Jobs of Client + + + + + Open a joblist page selecting this client. + + + + + Status Client + + + + + Purge Jobs + + + + + Purge jobs peformed from this client. + + + + + Prune Jobs + + + + + Open the diaolog to prune for this client. + + + + + ClientStat + + + Client Status %1 + + + + + Job Id + + + + + Job Level + + + + + Job Files + + + + + Job Bytes + + + + + Job Status + + + + + Job Time + + + + + Job Name + + + + + ClientStatForm + + + Form + + + + + Running + + + + + Header + + + + + Refresh Timer + + + + + Do Refresh + + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> + + + + + Refresh + + + + + Cancel Running Job + + + + + Clients + + + Clients + + + + + + Client Name + + + + + File Retention + + + + + Job Retention + + + + + AutoPrune + + + + + ClientId + + + + + Uname + + + + + Are you sure you want to purge all jobs of client "%1" ? +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. + + Is there any way I can get you to click Cancel here? You really don't want to do this + +Press OK to proceed with the purge operation? + + + + + Client Status %1 + + + + + Console + + + Console + + + + + No Director found. + + + + + Director not connected. Click on connect button. + + + + + Director is currently disconnected +Please reconnect! + + + + + ConsoleForm + + + Console + + + + + StatusDir + + + + + Console Help + + + + + Request Messages + + + + + Reload bacula-dir.conf + + + + + Content + + + Storage Status %1 + + + + + ContentForm + + + Form + + + + + Actions + + + + + Update slots + + + + + Label + + + + + Move to tray + + + + + Empty tray + + + + + Mount + + + + + Unmount + + + + + Status + + + + + Release + + + + + Drives + + + + + Drive + + + + + + Volume + + + + + Import/Export + + + + + Slot + + + + + Content + + + + + Slot + + + + + Volume + + + + + Bytes + + + + + Status + + + + + Media Type + + + + + Pool + + + + + Last Written + + + + + When expire? + + + + + DirComm + + + Already connected. + + + + + DirStat + + + + Job Id + + + + + + + Job Level + + + + + Job Files + + + + + Job Bytes + + + + + Job Status + + + + + + Job Time + + + + + + Job Name + + + + + Job Type + + + + + Priority + + + + + Volume + + + + + Job Data + + + + + Job Info + + + + + Director Status + + + + + DirStatForm + + + Form + + + + + Refresh Timer + + + + + Do Refresh + + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Scheduled Jobs</span></p></body></html> + + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Running Jobs</span></p></body></html> + + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> + + + + + Refresh + + + + + Cancel Selected Running Jobs + + + + + Disable Scheduled Job + + + + + FileSet + + + FileSets + + + + + FileSet Id + + + + + + FileSet Name + + + + + Create Time + + + + + FileSetForm + + + FileSet Tree + + + + + Refresh FileSet List + + + + + Requery the director for the list of storage objects. + + + + + Show FileSet In Console + + + + + ShowJobs + + + + + Help + + + Help: %1 + + + + + Job + + + Job + + + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Job record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. The Job and all its associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + + + + + Are you sure you want to cancel this job? + + + + + Bat + + + + + There were no results! +It is possible you may need to add "catalog = all" to the Messages resource for this job. + + + + + + Error: + + + + + JobForm + + + Form + + + + + Cancel + + + + + Delete + + + + + View errors for this Job + + + + + Errors + + + + + Media + + + + + History + + + + + Run again + + + + + Read doc + + + + + FileSet + + + + + Stats + + + + + Refresh + + + + + Basic Information + + + + + JobId: + + + + + 2 + + + + + Job Name: + + + + + Test + + + + + Level: + + + + + VirtualFull + + + + + Client: + + + + + client-fd + + + + + FileSet: + + + + + TheFileSet + + + + + Pool: + + + + + ThePool + + + + + Status + + + + + Status: + + + + + Errors: + + + + + 0 + + + + + Files: + + + + + 1,924 + + + + + Bytes: + + + + + 109 MB + + + + + Purged: + + + + + Times + + + + + Sched Time: + + + + + + 2009-07-31 00:10:00 + + + + + Start Time: + + + + + End Time: + + + + + 2009-07-31 00:20:00 + + + + + Duration: + + + + + 00:10:00 + + + + + Volume Used + + + + + Vol0001 + + + + + Running Information + + + + + Speed: + + + + + Files Examined: + + + + + Current File: + + + + + /var/www/bacula/spool + + + + + 100,000 + + + + + 100 MB/s + + + + + kB/s + + + + + Bandwidth Limit: + + + + + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> +<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'DejaVu Sans'; font-size:10pt;"></p></body></html> + + + + + JobList + + + + + + + + + + Any + + + + + + Job Id + + + + + + Job Name + + + + + + Client + + + + + + Job Starttime + + + + + + Job Type + + + + + + Job Level + + + + + + Job Files + + + + + + Job Bytes + + + + + + Job Status + + + + + + Purged + + + + + + File Set + + + + + Pool Name + + + + + First Volume + + + + + VolCount + + + + + Delete list of %1 Jobs + + + + + Purge Files from list of %1 Jobs + + + + + Delete Single Job + + + + + Purge Files from single job + + + + + Running + + + + + Created, not yet running + + + + + Backup + + + + + The Jobs query returned no results. +Press OK to continue? + + + + + Jobs Run on Volume %1 + + + + + Jobs Run from Client %1 + + + + + Jobs Run of Job %1 + + + + + Jobs Run with fileset %1 + + + + + Jobs Run + + + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Job record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. The Job and all its associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + + + + + run job="%1" client="%2" level=%3 + + + + + fileset="%1" + + + + + Are you sure you want to purge ?? !!!. +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. +Press OK to proceed with the purge operation? + + + + + JobListForm + + + Form + + + + + Refresh + + + + + Graph + + + + + FileSet + + + + + Status + + + + + Purged + + + + + Record Limit + + + + + Days Limit + + + + + Clients + + + + + Volume + + + + + Job + + + + + Level + + + + + Pool + + + + + Filter Copy Jobs + + + + + Filter Migration Jobs + + + + + Refresh Job List + + + + + Requery the director for the list of jobs. + + + + + List Job Volumes + + + + + List Volumes + + + + + Delete Job + + + + + Purge Files + + + + + Restart Job + + + + + Show Job Log + + + + + Show Job Info + + + + + List Job Totals on Console + + + + + List Files On Job + + + + + Restore From Job + + + + + Restore From Time + + + + + Cancel Currently Running Job + + + + + JobLog + + + JobLog + + + + + Bat + + + + + There were no results! +It is possible you may need to add "catalog = all" to the Messages resource for this job. + + + + + + Log records for job %1 + + + + + Error: + + + + + JobLogForm + + + Job Log + + + + + JobPlot + + + JobPlot + + + + + + Sticks + + + + + + Lines + + + + + + Steps + + + + + + + None + + + + + + + + + + + Any + + + + + The Jobs query returned no results. +Press OK to continue? + + + + + Files and Bytes backed up + + + + + <-- Bytes Kb + + + + + date of backup --> + + + + + Number of Files --> + + + + + Files + + + + + Bytes + + + + + Fitted + + + + + Ellipse + + + + + Rect + + + + + Diamond + + + + + Triangle + + + + + DTrianle + + + + + UTriangle + + + + + LTriangle + + + + + RTriangle + + + + + Cross + + + + + XCross + + + + + HLine + + + + + Vline + + + + + Star1 + + + + + Star2 + + + + + Hexagon + + + + + JobPlotControlsForm + + + Form + + + + + File Data + + + + + Byte Data + + + + + Refresh + + + + + Status + + + + + Level + + + + + Purged + + + + + FileSet + + + + + Volume + + + + + Client + + + + + Job + + + + + Days Limit + + + + + Record Limit + + + + + Byte Symbol Type + + + + + File Symbol Type + + + + + Graph Type + + + + + Jobs + + + Jobs + + + + + + Job Name + + + + + Pool + + + + + Messages + + + + + Client + + + + + Storage + + + + + Where + + + + + Level + + + + + + Type + + + + + FileSet + + + + + Catalog + + + + + Enabled + + + + + MainForm + + + bat - Bacula Admin Tool + + + + + Bacula Administration Tool + + + + + It's a Dock widget to allow page selection + + + + + Settings + + + + + &Help + + + + + &File + + + + + + Current Status + + + + + Tool Bar + + + + + Selects panel window + + + + + Use items in this tree to select what window is in panel + + + + + 1 + + + + + Command: + + + + + Click here to enter command + + + + + &Quit + + + + + Ctrl+Q + + + + + &About bat + + + + + &Copy + + + + + Cu&t + + + + + new + + + + + open + + + + + &Paste + + + + + &Print + + + + + Print + + + + + &Save + + + + + Save (not implemented) + + + + + Connect + + + + + Connect/disconnect + + + + + Label + + + + + + Label a Volume + + + + + Restore + + + + + Restore Files + + + + + + + Run Job + + + + + Run a Job + + + + + + + Estimate Job + + + + + Estimate a Job + + + + + Status Dir + Query status of director + + + + + Status Dir + + + + + + Query status of director in console + + + + + &Select Font ... + + + + + Undock Window + + + + + Undock Current Window + + + + + ToggleDock + + + + + + Toggle Dock Status + + + + + Close Page + + + + + Close The Current Page + + + + + Messages + + + + + Display any messages queued at the director + + + + + &Preferences ... + + + + + + Set Preferences + + + + + bat &Help + + + + + Browse + + + + + + Browse Cataloged Files + + + + + JobPlot + + + + + + Plot Job Files and Bytes + + + + + Status Dir Page + + + + + Director Status Page + + + + + Repop Lists + + + + + Reload and Repop + + + + + back + + + + + + Previous Page + + + + + MainWin + + + Select Page + + + + + Console + + + + + Director Status + + + + + Director not connected. Click on connect button. + + + + + About bat + + + + + <br><h2>Bacula Bat %1 (%2)</h2><p>Copyright &copy; 2007-%3 Kern Sibbald<p>The <b>bat</b> is an administrative console interface to the Director. + + + + + Ready + + + + + MediaEdit + + + Media Edit + + + + + No Volume name + + + + + No Volume name given + + + + + MediaInfo + + + Media Info + + + + + Are you sure you want to purge ?? !!!. +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. +Press OK to proceed with the purge operation? + + + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + + + + + MediaList + + + Volume Name + + + + + Id + + + + + Status + + + + + Enabled + + + + + Bytes + + + + + Files + + + + + Jobs + + + + + Retention + + + + + Media Type + + + + + Slot + + + + + Use Duration + + + + + Max Jobs + + + + + Max Files + + + + + Max Bytes + + + + + Recycle + + + + + Last Written + + + + + + Pools + + + + + First Written + + + + + Read Time + + + + + Write Time + + + + + Recycle Count + + + + + Recycle Pool + + + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + + + + + Are you sure you want to purge ?? !!!. +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. +Press OK to proceed with the purge operation? + + + + + MediaListForm + + + Media Tree + + + + + Refresh Media List + + + + + Requery the director for the list of media. + + + + + Edit Volume + + + + + List Jobs On Volume + + + + + Delete Volume + + + + + Prune Volume + + + + + Purge Volume + + + + + Relabel Volume + + + + + + + + Update all Volumes From Pool + + + + + + + + Update all Volumes from all Pools + + + + + Volume From Pool + + + + + MediaView + + + Media + + + + + Are you sure you want to purge ?? !!!. +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. +Press OK to proceed with the purge operation? + + + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + + + + + MediaViewForm + + + Form + + + + + Edit + + + + + Purge + + + + + Delete + + + + + Prune + + + + + Filter + + + + + Media Type: + + + + + Status: + + + + + Limit: + + + + + Name: + + + + + Pool: + + + + + Location: + + + + + Expired + + + + + Apply + + + + + Volume Name + + + + + Online + + + + + Slot + + + + + Vol Bytes + + + + + Vol Usage + + + + + Vol Status + + + + + Pool + + + + + Media Type + + + + + Last Written + + + + + When expire? + + + + + Pages + + + %1 of Director %2 + + + + + UnDock %1 Window + + + + + ReDock %1 Window + + + + + Console + + + + + PrefsForm + + + Preferences + + + + + Messages Options + + + + + Message check interval in seconds + + + + + Check Messages + + + + + Joblist + + + + + Joblist Limit Options + + + + + Days Limit + + + + + Record Limit + + + + + Misc + + + + + Convert + + + + + Convert Off + + + + + Context Sensitive List Commands + + + + + Execute Long List + + + + + Open Plot page on startup + + + + + Open Browser page on startup + + + + + Open Director Status page on startup + + + + + Debug + + + + + + Debugging Options + + + + + Debug comm + + + + + Display all messages in console + + + + + Debug Commands + + + + + Debug Miscellaneous Items + + + + + Debug Sql queries + + + + + Timers + + + + + Display Bytes using IEC units (1024B = 1 KiB) + + + + + Display Bytes using SI units (1000B = 1KB) + + + + + Open Pages + + + + + Debug multiple connection + + + + + RestoreTree + + + + + Restore Debug 2 + + + + + Directory Item Changed + + + + + Restore Debug 1 + + + + + Directory Current Item Changed Debug + + + + + Update File Table Debug + + + + + Version Table Item Changed Debug + + + + + File Table Item Changed Debug + + + + + Icon State Debug + + + + + Update Checks Debug + + + + + Restore Debug 3 + + + + + Update Version Table Debug + + + + + Populate Directory Debug + + + + + <h2>Preferences</h2> + + + + + QObject + + + Any + + + + + + + No + + + + + + + Yes + + + + + Invalid job status %1 + + + + + StorStat + + + Storage Status %1 + + + + + Job Id + + + + + Job Level + + + + + Job Files + + + + + Job Bytes + + + + + Job Status + + + + + Job Time + + + + + Job Name + + + + + StorStatForm + + + Form + + + + + Header + + + + + Waitreservation + + + + + Devices + + + + + Volumes + + + + + Spooling + + + + + Running + + + + + Misc + + + + + Mount + + + + + UMount + + + + + Label + + + + + Release + + + + + Refresh Timer + + + + + Do Refresh + + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> + + + + + Refresh + + + + + Cancel Running Job + + + + + Disable Scheduled Job + + + + + Storage + + + + Storage + + + + + Name + + + + + Id + + + + + Changer + + + + + Slot + + + + + Status + + + + + Enabled + + + + + Pool + + + + + Media Type + + + + + Yes + + + + + Status Storage "%1" + + + + + Status Storage "%1" in Window + + + + + Label media in Storage "%1" + + + + + Mount media in Storage "%1" + + + + + "UN" Mount media in Storage "%1" + + + + + Release media in Storage "%1" + + + + + Barcode Scan media in Storage "%1" + + + + + Read scan media in Storage "%1" + + + + + Storage Status %1 + + + + + StorageForm + + + Storage Tree + + + + + 1 + + + + + Refresh Storage List + + + + + Requery the director for the list of storage objects. + + + + + + Status Storage In Console + + + + + + Label Storage + + + + + + MountStorage + + + + + + UnMount Storage + + + + + + Update Slots + + + + + + Update Slots Scan + + + + + Release + + + + + Status Storage Window + + + + + bRestore + + + bRestore + + + + + bRestoreForm + + + brestore + + + + + Merge Jobs + + + + + View all Versions + + + + + Current Directory + + + + + File list -- drag below for restore + + + + + Double Click File Name to decend + + + + + + Type + + + + + File Name + + + + + Double Click to decend + + + + + + + Size + + + + + + + Date + + + + + Selection Range + + + + + - + + + + + File Filter + + + + + File revisions -- drag below for restore + + + + + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Drag and drop <span style=" font-weight:600;">File list</span> and/or <span style=" font-weight:600;">File revisions</span> items here for Restore</p></body></html> + + + + + Restore... + + + + + Select from Client list drop down then select from Job list drop down + + + + + InChanger + + + + + Volume + + + + + + JobId + + + + + Chksum + + + + + Clear + + + + + Estimate + + + + + FileName + + + + + FileIndex + + + + + Nb Files + + + + + bRunRestoreForm + + + Run restore + + + + + Standard + + + + + Restore options + + + + + Client: + + + + + Where: + + + + + Replace: + + + + + Comment: + + + + + Media needed + + + + + InChanger + + + + + Volume + + + + + Compute with directories + + + + + Advanced + + + + + File Relocation + + + + + Use file +relocation: + + + + + Strip prefix: + + + + + Add prefix: + + + + + Add suffix: + + + + + Use regexp: + + + + + Where +regexp: + + + + + Other options + + + + + When: + + + + + Priority: + + + + + Storage: + + + + + Job: + + + + + estimateForm + + + Form + + + + + List Files + + + + + Level: + + + + + Client: + + + + + Job: + + + + + FileSet: + + + + + <h3>Estimate a backup Job</h3> + + + + + OK + + + + + Cancel + + + + + estimatePage + + + Estimate + + + + + helpForm + + + Form + + + + + &Home + + + + + &Back + + + + + Close + + + + + jobsForm + + + Client Tree + + + + + Refresh Jobs List + + + + + Requery the director for the list of clients. + + + + + + + + List Files Command + + + + + + + + List Volumes Command + + + + + + + + List Next Volume Command + + + + + + + + Enable Job Command + + + + + + + + Disable Job Command + + + + + + + + Open JobList on Job + + + + + + Cancel Job Command + + + + + RunJob + + + + + labelForm + + + Form + + + + + OK + + + + + Cancel + + + + + Volume Name: + + + + + Storage: + + + + + Enter Name of Volume to create + + + + + Slot: + + + + + Execute Automount + + + + + On + + + + + Off + + + + + Pool: + + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Label a Volume</span></p></body></html> + + + + + mediaEditForm + + + Form + + + + + Pool: + + + + + Volume Status: + + + + + Max Volume Bytes: + + + + + Slot: + + + + + Max Volume Jobs: + + + + + Use Duration: + + + + + OK + + + + + Cancel + + + + + Retention: + + + + + Recycle Pool: + + + + + Enabled + + + + + Max Volume Files: + + + + + Years + + + + + Seconds + + + + + Use Duration + + + + + Days + + + + + Hours + + + + + Months + + + + + Retention + + + + + Minutes + + + + + Volume : + + + + + Recycle + + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Edit a Volume</span></p></body></html> + + + + + mediaInfoForm + + + Form + + + + + Edit + + + + + Purge + + + + + Delete + + + + + Prune + + + + + Load + + + + + Unload + + + + + Information + + + + + Name: + + + + + Vol0001 + + + + + Pool: + + + + + Default + + + + + Online: + + + + + Enabled: + + + + + yes + + + + + Location: + + + + + Vault + + + + + Status: + + + + + Append + + + + + Media Type: + + + + + File + + + + + Recycle Pool: + + + + + Scratch + + + + + Statistics + + + + + Vol Bytes: + + + + + 19.8 MB + + + + + Vol Mounts: + + + + + 10 + + + + + Recycle count: + + + + + 5 + + + + + Read time: + + + + + 10 mins + + + + + Write time: + + + + + 20 mins + + + + + Errors: + + + + + + + + + 0 + + + + + Last Written: + + + + + 2009-07-05 12:23:00 + + + + + First Written: + + + + + 2009-06-05 10:00:00 + + + + + Limits + + + + + Use duration: + + + + + Max jobs: + + + + + Max files: + + + + + Max bytes: + + + + + Recycle: + + + + + Retention: + + + + + 365 days + + + + + Expire: + + + + + 2010-08-03 23:10:03 + + + + + Jobs + + + + + JobId + + + + + Name + + + + + Start Time + + + + + Type + + + + + Level + + + + + Files + + + + + Bytes + + + + + Status + + + + + mountDialog + + + Storage : %1 + + + + + No Storage name + + + + + No Storage name given + + + + + Context sensitive command : + + + + + + + Director Response : + + + + + + + mountForm + + + Label + + + + + TextLabel + + + + + Slot: + + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Mount a Slot</span></p></body></html> + + + + + prefsDialog + + + Canceled + + + + + prerestoreForm + + + Form + + + + + All Files + + + + + Select Files + + + + + Job + + + + + JobIds + + + + + yyyy-mm-dd h:mm:ss + + + + + Use Most Recent + + + + + File Set: + + + + + Client: + + + + + Storage: + + + + + Before: + + + + + Pool: + + + + + OK + + + + + Cancel + + + + + <h3>Select Jobs</h3> + + + + + prerestorePage + + + Restore + + + + + + + Any + + + + + + Comma separated list of JobIds + + + + + Canceled + + + + + There can be no spaces in the text for the joblist. +Press OK to continue? + + + + + The string is not a comma separated list of integers. +Press OK to continue? + + + + + Bat + + + + + At least one of the jobs is not a valid job of type "Backup". +Press OK to continue? + + + + + All jobs in the list must be of the same jobName and same client. +Press OK to continue? + + + + + pruneForm + + + Form + + + + + Prune Files + + + + + Volume: + + + + + <h3>Prune Files/Jobs/Volumes</h3> + + + + + Prune Jobs + + + + + OK + + + + + Cancel + + + + + Client: + + + + + Prune Volumes + + + + + prunePage + + + Prune + + + + + + + + + + Any + + + + + Canceled + + + + + relabelDialog + + + From Volume : + + + + + No Volume name + + + + + No Volume name given + + + + + + New name must be different + + + + + relabelForm + + + Label + + + + + From Volume : + + + + + Pool: + + + + + Storage: + + + + + New Volume Name: + + + + + Slot: + + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Relabel a Volume</span></p></body></html> + + + + + restoreForm + + + Form + + + + + Select a Directory + + + + + Directories + + + + + + 1 + + + + + 2 + + + + + 3 + + + + + 4 + + + + + 5 + + + + + 6 + + + + + <h3>Restore Select</h3> + + + + + Up + + + + + + Mark + + + + + Unmark + + + + + <h2>Files</h2> + + + + + Status: + + + + + Current Dir: + + + + + OK + + + + + Cancel + + + + + UnMark + + + + + restorePage + + + Restore Select + + + + + Mark + + + + + File + + + + + Mode + + + + + User + + + + + Group + + + + + Size + + + + + Date + + + + + In addDirectory cwd "%1" newdir "%2" fullpath "%3" + + + + + + + Nothing selected, nothing done + + + + + cd command failed + + + + + .pwd command failed + + + + + In else of if parent cwd "%1" newdir "%2" + + + + + + Canceled + + + + + restoreTree + + + Version Browser + + + + + Directories + + + + + + + + + + Any + + + + + Refresh From Re-Select + + + + + Refresh From JobChecks + + + + + Querying Database + + + + + Querying Jobs + + + + + Querying for Directories + + + + + Processing Directories + + + + + File Name + + + + + Filename Id + + + + + + + Task %1 of %2 + + + + + No jobs were selected in the job query !!!. +Press OK to continue + + + + + Present Working Directory: %1 + + + + + + Job Id + + + + + + + Type + + + + + + End Time + + + + + Hash + + + + + FileId + + + + + Job Type + + + + + First Volume + + + + + RestoreTreePage + + + + + Level + + + + + Name + + + + + + Purged + + + + + + TU + + + + + + TD + + + + + Task 1 of 3 + + + + + Processing Checked directories + + + + + Task 2 of 3 + + + + + Processing Exceptions + + + + + Task 3 of 3 + + + + + Filling Database Table + + + + + restoreTreeForm + + + Form + + + + + Jobs + + + + + + + TextLabel + + + + + Files + + + + + Versions of File + + + + + FileName + + + + + Refresh + + + + + Restore + + + + + Job + + + + + + Job List Job Criterion Selector + + + + + Client + + + + + + Job List Client Criterion Selector + + + + + FileSet + + + + + + Job List Fileset Criterion Selector + + + + + Record Limit + + + + + Days Limit + + + + + Directory + + + + + Select Directory + + + + + UnselectDirectory + + + + + runCmdForm + + + Form + + + + + Priority: + + + + + yyyy-mm-dd hh:mm:ss + + + + + When: + + + + + Where: + + + + + Bootstrap: + + + + + Job: + + + + + Storage: + + + + + FileSet: + + + + + Replace: + + + + + To client: + + + + + Catalog: + + + + + OK + + + + + Cancel + + + + + <h3>Run Restore Job</h3> + + + + + runCmdPage + + + Restore Run + + + + + + never + + + + + always + + + + + ifnewer + + + + + ifolder + + + + + Canceled + + + + + runForm + + + Level: + + + + + Bootstrap: + + + + + yyyy-mm-dd hh:mm:ss + + + + + Job: + + + + + Pool: + + + + + Type: + + + + + Run job + + + + + Job properties + + + + + <h3>Backup<h3/> + + + + + FileSet: + + + + + Messages: + + + + + <h3>Run a Job</h3> + + + + + Priority: + + + + + Client: + + + + + OK + + + + + Cancel + + + + + Storage: + + + + + When: + + + + + runPage + + + Run + + + + + Canceled + + + + + selectDialog + + + Canceled + + + + + selectForm + + + Selection dialog + + + + + textInputDialog + + + Canceled + + + + + textInputForm + + + Text input dialog + + + + + Message + + + + + yesnoPopUp + + + Bat Question + + + + diff --git a/src/qt-console/ts/bat_fr.ts b/src/qt-console/ts/bat_fr.ts new file mode 100644 index 00000000..9ff16a64 --- /dev/null +++ b/src/qt-console/ts/bat_fr.ts @@ -0,0 +1,5015 @@ + + + + + ClientForm + + + Client Tree + Arborescence Client + + + + Refresh Client List + Rafraichir la liste des Clients + + + + Requery the director for the list of clients. + Interroger le director pour la liste des clients. + + + + List Jobs of Client + Lister les travaux du client + + + + Open a joblist page selecting this client. + Ouvrir une liste des travaux en sélectionnant ce client. + + + + Purge Jobs + Purger les travaux + + + + Purge jobs peformed from this client. + Purger les travaux de ce client. + + + + Prune Jobs + Élaguer les travaux + + + + Open the diaolog to prune for this client. + Ouvrir un dialogue pour élaguer (prune) ce client. + + + + Status Client + État du client + + + + ClientStat + + + Client Status %1 + État du client %1 + + + + Job Id + Id du travail + + + + Job Level + Niveau + + + + Job Files + Fichiers + + + + Job Bytes + Octets + + + + Job Status + État + + + + Job Time + Heure + + + + Job Name + Nom + + + + ClientStatForm + + + Form + Form + + + + Running + En cours + + + + Header + Entête + + + + Refresh Timer + Sablier de MàJ + + + + Do Refresh + Activer + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux terminés</span></p></body></html> + + + + Refresh + Actualiser + + + + Cancel Running Job + Annuler le travail en cours + + + + Clients + + + Clients + Clients + + + + + Client Name + Nom du client + + + + File Retention + Durée de rétention du fichier + + + + Job Retention + Rétention du travail + + + + AutoPrune + Élagage auto + + + + ClientId + Id du client + + + + Uname + Uname + + + + Are you sure you want to purge all jobs of client "%1" ? +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. + + Is there any way I can get you to click Cancel here? You really don't want to do this + +Press OK to proceed with the purge operation? + Êtes-vous sûr de vouloir purger tous les travaux du client "%1" ? +La commande Purge va détruire les enregistrements du catalogue pour les travaux et les volumes sans prendre en considération les durées de rétention. Purger n'affecte que le catalogue, pas les données écrites dans les Volumes. Cette commande peut être dangereuse car vous pouvez détruire les enregistrements associés aux sauvegardes de fichiers actuelles, et nous vous recommandons de ne pas l'utiliser sauf si vous savez vraiment ce que vous faîtes. + +Comment vous persuader de clicker Annuler ? Vous ne désirez probablement pas faire ça ! + +Cliquer OK pour réaliser l'opération de purge ? + + + + Client Status %1 + État du client %1 + + + + Console + + + Console + Console + + + + No Director found. + Aucun Director trouvé. + + + + Director not connected. Click on connect button. + Director déconnecté. Cliquer sur le bouton connexion. + + + + Director is currently disconnected +Please reconnect! + le Director est actuellement déconnecté +Veuillez vous reconnecter ! + + + + ConsoleForm + + + Console + Console + + + + StatusDir + + + + + Console Help + Aide de la Console + + + + Request Messages + Récupérer les messages + + + + Reload bacula-dir.conf + Recharger bacula-dir.conf + + + + Content + + + Storage Status %1 + Etat du dépôt %1 + + + + ContentForm + + + Form + Form + + + + Actions + Actions + + + + Update slots + M.à.J. slots + + + + Label + Étiquette + + + + Move to tray + Charger + + + + Empty tray + Décharger + + + + Mount + Monter + + + + Unmount + Démonter + + + + Status + État + + + + Release + Libérer + + + + Drives + Lecteurs + + + + Drive + Lecteur + + + + + Volume + Volume + + + + Import/Export + + + + + Slot + Slot + + + + Content + Contenu + + + + Slot + Slot + + + + Volume + Volume + + + + Bytes + Octets + + + + Status + État + + + + Media Type + Type de support + + + + Pool + Groupe + + + + Last Written + Dernière écriture + + + + When expire? + Expire le ? + + + + DirComm + + Bat + Bat + + + + Already connected. + Déjà connecté. + + + + DirStat + + + + Job Id + Id du travail + + + + + + Job Level + Niveau + + + + Job Files + Fichiers + + + + Job Bytes + Octets + + + + Job Status + État + + + + + Job Time + Heure + + + + + Job Name + Nom + + + + Job Type + Type de travail + + + + Priority + Priorité + + + + Volume + Volume + + + + Job Data + Données + + + + Job Info + Information + + + + Director Status + État du director + + + + DirStatForm + + + Form + Form + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Scheduled Jobs</span></p></body></html> + <html><head><meta name="qrichtext" content="1" /><style type="text/css">p, li { white-space: pre-wrap; }</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"><p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux prévus</span></p></body></html> + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Running Jobs</span></p></body></html> + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux en cours</span></p></body></html> + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux Terminés</span></p></body></html> + + + + Refresh Timer + Sablier de rafraîchissement + + + + Do Refresh + Activer le sablier + + + + Refresh + Actualiser + + + + Cancel Selected Running Jobs + Annuler les travaux en cours sélectionnés + + + + Disable Scheduled Job + Désactiver le job planifié + + + + FileSet + + + FileSets + Jeux de fichiers + + + FileSet Name + FileSet Name + + + + FileSet Id + Id du jeu de fichiers + + + + Create Time + Date de création + + + FileSet + FileSet + + + + + FileSet Name + Nom + + + + FileSetForm + + + FileSet Tree + Arbre des jeux de fichiers + + + + Refresh FileSet List + Rafraichir la liste des jeux de fichier + + + + Requery the director for the list of storage objects. + Réinterroger le director pour la liste des dépôts. + + + + ShowJobs + Afficher les travaux + + + + Show FileSet In Console + Afficher le jeu de fichiers dans la console + + + + Help + + + Help: %1 + Aide : %1 + + + + Job + + + Job + Travail + + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Job record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. The Job and all its associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + Êtes-vous sûr de vouloir détruire?? !!!.Cette commande est utilisée pour détruire un enregistrement "travail" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écrites sur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Le travail et les autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? + + + + Are you sure you want to cancel this job? + Êtes-vous sûr de vouloir annuler ce travail ? + + + + Bat + Bat + + + + There were no results! +It is possible you may need to add "catalog = all" to the Messages resource for this job. + + Aucun résultat ! +Il se pourrait que vous deviez ajouter "catalog = all" à la ressource "Messages" de ce travail. + + + + + Error: + Erreur : + + + + JobForm + + + Form + Form + + + + Cancel + Annuler + + + + Delete + Supprimer + + + + View errors for this Job + Voir les erreurs de ce travail + + + + Errors + Erreurs + + + + Media + Support + + + + History + Historique + + + + Run again + Ré-éxécuter + + + + Read doc + + + + + FileSet + Jeu de fichiers + + + + Stats + Statistiques + + + + Refresh + Actualiser + + + + Basic Information + Informations de base + + + + JobId: + Id Travail : + + + + 2 + 2 + + + + Job Name: + Nom du Travail : + + + + Test + + + + + Level: + Niveau : + + + + VirtualFull + + + + + Client: + Client : + + + + client-fd + + + + + FileSet: + Jeu de fichier : + + + + TheFileSet + + + + + Pool: + Groupe : + + + + ThePool + + + + + Status + État + + + + Status: + État : + + + + Errors: + Erreurs : + + + + 0 + 0 + + + + Files: + Fichiers : + + + + 1,924 + 1,924 + + + + Bytes: + Octets : + + + + 109 MB + + + + + Purged: + Purgés : + + + + Times + Temps + + + + Sched Time: + Heure prévue : + + + + + 2009-07-31 00:10:00 + 2009-07-31 00:10:00 + + + + Start Time: + Heure début : + + + + End Time: + Heure de fin : + + + + 2009-07-31 00:20:00 + 2009-07-31 00:20:00 + + + + Duration: + Durée : + + + + 00:10:00 + 00:10:00 + + + + Volume Used + Volume utilisé + + + + Vol0001 + + + + + Running Information + Informations en cours + + + + Speed: + Vitesse : + + + + Files Examined: + nb Fichers examinés : + + + + Current File: + Fichier en cours : + + + + /var/www/bacula/spool + + + + + 100,000 + 100,000 + + + + 100 MB/s + + + + + kB/s + + + + + Bandwidth Limit: + + + + + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> +<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'DejaVu Sans'; font-size:10pt;"></p></body></html> + + + + + JobList + + + The Jobs query returned no results. +Press OK to continue? + La requête ne renvoie aucun travail. +Cliquer OK pour continuer ? + + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Job record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. The Job and all its associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + Êtes-vous sûr de vouloir détruire?? !!!.Cette commande est utilisée pour détruire un enregistrement "travail" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écrites sur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Le travail et les autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? + + + + Are you sure you want to purge ?? !!!. +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. +Press OK to proceed with the purge operation? + Êtes-vous sûr de vouloir purger ?? !!!. +La commande de purge va détruire les enregistrements du catalogue associés (travaux et volumes) sans prendre en compte la durée de rétention. La purge ne concerne que le catalogue et n'affecte pas les données écrites sur les volumes. Cette commande peut être dangereuse car vous pouvez détruire des enregistrements associés aux sauveardes actuelles, et nous vous recommandons de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. +Cliquer OK pour réaliser la purge? + + + + + + + + + + + Any + Tous, mais peut-on le traduire ? + + + + + + Job Id + Id du travail + + + + + Job Name + Nom + + + + + Client + Client + + + + + Job Starttime + Heure de début + + + + + Job Type + Type de travail + + + + + Job Level + Niveau + + + + + Job Files + Fichiers + + + + + Job Bytes + Octets + + + + + Job Status + État + + + + + Purged + Purgé + + + + + File Set + Jeu de fichiers + + + + Backup + Sauvegarde .... mais peut-on le traduire ? + + + + Restore + Restauration + + + + Pool Name + Nom du Groupe + + + + First Volume + Premier Volume + + + + VolCount + nb de volumes + + + + Jobs Run on Volume %1 + Exécutions sur le volume %1 + + + + Jobs Run from Client %1 + Exécutions depuis le client %1 + + + + Jobs Run of Job %1 + Exécutions du travail %1 + + + + Jobs Run with fileset %1 + Exécutions avec le jeu de fichiers %1 + + + + Jobs Run + Exécutions de travaux + + + + run job="%1" client="%2" level=%3 + faut-il traduire les commandes envoyées à la console ? je ne crois pas. + + + + + fileset="%1" + faut-il traduire les commandes envoyées à la console ? je ne crois pas. + + + + + Delete list of %1 Jobs + Détruire %1 travaux + + + + Purge Files from list of %1 Jobs + Purger les fichiers de %1 travaux + + + + Delete Single Job + Détruire un travail + + + + Purge Files from single job + Purger les fichiers d'un travail + + + + Running + En cours ... mais faut-il le traduire ? + + + + + Created, not yet running + Créé, pas encore lancé ... mais faut-il le traduire ? + + + + + JobListForm + + + Form + Form + + + + Refresh + Actualiser + + + + Graph + Graphique + + + + FileSet + Jeu de fichiers + + + + Status + État + + + + Purged + Purgé + + + + Record Limit + Limitation en nombre + + + + Days Limit + Limitation en jours + + + + Clients + Clients + + + + Volume + Volume + + + + Job + Travail + + + + Level + Niveau + + + + Refresh Job List + Actualiser la liste + + + + Requery the director for the list of jobs. + Réinterroger le Director pour la liste des travaux. + + + + List Files On Job + Lister les fichiers du travail + + + + Restore From Job + Restaurer depuis ce travail + + + + Restore From Time + Restaurer à cette date + + + + Cancel Currently Running Job + Annuler le travail en cours + + + + Pool + Groupe + + + + Filter Copy Jobs + Filtrer les copies + + + + Filter Migration Jobs + Filtrer les migrations + + + + List Job Volumes + Lister les volumes du travail + + + + List Volumes + Lister les volumes + + + + Delete Job + Détruire le travail + + + + Purge Files + Purger les fichiers + + + + Restart Job + Relancer le travail + + + + Show Job Log + Voir les traces du travail + + + + Show Job Info + Voir les informations du travail + + + + List Job Totals on Console + Lister les grands totaux des travaux dans la console + + + + JobLog + + + Bat + Bat + + + + JobLog + Traces du travail + + + + There were no results! +It is possible you may need to add "catalog = all" to the Messages resource for this job. + + Aucun résultat ! +Il se pourrait que vous deviez ajouter "catalog = all" à la ressource "Messages" de ce travail. + + + + + Log records for job %1 + Enregistrements de trace du travail %1 + + + + Error: + Erreur : ... mais faut-il le traduire ? + + + + + JobLogForm + + + Job Log + Traces du travail + + + + JobPlot + + + The Jobs query returned no results. +Press OK to continue? + La requête ne renvoie aucun travail. +Cliquer OK pour continuer ? + + + + JobPlot + + + + + + Sticks + + + + + + Lines + + + + + + Steps + + + + + + + None + + + + + + + + + + + Any + Tous + + + + Files and Bytes backed up + + + + + <-- Bytes Kb + + + + + date of backup --> + + + + + Number of Files --> + + + + + Files + Fichiers + + + + Bytes + Octets + + + + Fitted + + + + + Ellipse + + + + + Rect + + + + + Diamond + + + + + Triangle + + + + + DTrianle + + + + + UTriangle + + + + + LTriangle + + + + + RTriangle + + + + + Cross + + + + + XCross + + + + + HLine + + + + + Vline + + + + + Star1 + + + + + Star2 + + + + + Hexagon + + + + + JobPlotControlsForm + + + Form + Form + + + + File Data + + + + + Byte Data + + + + + Refresh + Actualiser + + + + Status + État + + + + Level + Niveau + + + + Purged + Purgé + + + + FileSet + Jeu de fichiers + + + + Volume + Volume + + + + Client + Client + + + + Job + Travail + + + + Days Limit + Limitation en jour + + + + Record Limit + Limitation en nombre + + + + Byte Symbol Type + + + + + File Symbol Type + + + + + Graph Type + Type de graphique + + + + Jobs + + + Jobs + Travaux + + + + + Job Name + Nom du travail + + + + Pool + Groupe + + + + Messages + Messages + + + + Client + Client + + + + Storage + Dépôt + + + + Where + + + + + Level + Niveau + + + + + Type + Type + + + + FileSet + Jeu de fichiers + + + + Catalog + Catalogue + + + + Enabled + Activé + + + + MainForm + + + bat - Bacula Admin Tool + + + + + Bacula Administration Tool + Outil d'administration de Bacula + + + + It's a Dock widget to allow page selection + + + + + Settings + Options + + + + &Help + &Aide + + + + &File + &Fichier + + + + + Current Status + Barre d'état + + + + Tool Bar + Barre d'outils + + + Page Selector + Choisir Page + + + + Selects panel window + Choisir l'onglet actif + + + + Use items in this tree to select what window is in panel + Cliquer sur un élément de cette liste pour choisir l'onglet actif du panneau + + + Enter a bacula command + Entrer une commande Bacula + + + + Command: + Commande : + + + + Click here to enter command + Cliquer ici pour entrer une commande + + + + &Quit + &Quitter + + + + Ctrl+Q + + + + + &About bat + &À propos de bat + + + + &Copy + Co&pier + + + + Cu&t + &Couper + + + + new + nouveau + + + + open + ouvrir + + + + &Paste + Co&ller + + + + &Print + &Imprimer + + + + Print + Imprimer + + + + &Save + &Enregistrer + + + + Save (not implemented) + Enregistrer (pas implémenté) + + + + Connect + Connexion + + + + Connect/disconnect + Connexion/Déconnexion + + + + Label + Étiquette (Label) + + + + + Label a Volume + Étiqueter un volume + + + + Restore + Restaurer + + + + Restore Files + Restaurer des fichiers + + + + + + Run Job + Lancer un travail + + + + Run a Job + Lancer un nouveau travail + + + + + + Estimate Job + Estimer un travail + + + + Estimate a Job + Estimer un travail + + + + Status Dir + Query status of director + État du director + + + + Status Dir + État du director + + + + + Query status of director in console + Interroger l'état du director dans la console + + + + &Select Font ... + &Choisir la police ... + + + + Undock Window + Déplacer vers une nouvelle fenêtre + + + + Undock Current Window + Déplacer l'onglet courant vers une nouvelle fenêtre + + + + ToggleDock + + + + + + Toggle Dock Status + + + + + Close Page + Fermer l'onglet + + + + Close The Current Page + Fermer l'onglet en cours + + + + Messages + Messages + + + + Display any messages queued at the director + Afficher tous les messages en attente dans le director + + + + &Preferences ... + &Préférences ... + + + + + Set Preferences + Définir les préférences + + + + bat &Help + &Aide de bat + + + + Browse + Naviguer + + + + + Browse Cataloged Files + Naviguer dans les fichiers du catalogue + + + + JobPlot + + + + + + Plot Job Files and Bytes + + + + + Status Dir Page + État du director + + + + Director Status Page + État du director + + + + 1 + 1 + + + + Repop Lists + Remplir les listes + + + + Reload and Repop + Recharger et remplir les listes + + + + back + + + + + + Previous Page + + + + + MainWin + + + Director not connected. Click on connect button. + le Director n'est pas connecté. Cliquer le bouton de connexion. + + + + About bat + À propos de bat + + + <br><h2>bat 1.0, by Dirk H Bartley and Kern Sibbald</h2><p>Copyright &copy; 2007- + <br><h2>bat 1.0, par Dirk H Bartley et Kern Sibbald</h2><p>Copyright &copy; 2007- + + + + Ready + Prêt + + + + Select Page + Sélection d'onglet + + + + Console + Console + + + + Director Status + Etat du director + + + + <br><h2>Bacula Bat %1 (%2)</h2><p>Copyright &copy; 2007-%3 Kern Sibbald<p>The <b>bat</b> is an administrative console interface to the Director. + + + + <br><h2>bat %1 (%2), by Dirk H Bartley and Kern Sibbald</h2><p>Copyright &copy; 2007-%3 Kern Sibbald<p>The <b>bat</b> is an administrative console interface to the Director. + <br><h2>bat %1 (%2), par Dirk H Bartley et Kern Sibbald</h2><p>Copyright &copy; 2007-%3 Kern Sibbald<p><b>bat</b> est une console d'administration pour le "Director". + + + + MediaEdit + + + Media Edit + Édition du support + + + + No Volume name + Pas de nom de volume + + + + No Volume name given + Pas de nom de Volume donné + + + + MediaInfo + + + Media Info + Informations du support + + + + Are you sure you want to purge ?? !!!. +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. +Press OK to proceed with the purge operation? + Êtes-vous sûr de vouloir purger ?? !!!. +La commande de purge va détruire les enregistrements du catalogue associés (travaux et volumes) sans prendre en compte la durée de rétention. La purge ne concerne que le catalogue et n'affecte pas les données écrites sur les volumes. Cette commande peut être dangereuse car vous pouvez détruire des enregistrements associés aux sauvegardes actuelles, et nous vous recommandons de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. +Cliquer OK pour réaliser la purge? + + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + Êtes-vous sûr de vouloir détruire?? !!!. +Cette commande est utilisée pour détruire un enregistrement "volume" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écritessur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Tous les travaux et autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? + + + + MediaList + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + Êtes-vous sûr de vouloir détruire?? !!!. +Cette commande est utilisée pour détruire un enregistrement "volume" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écritessur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Tous les travaux et autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? + + + + Are you sure you want to purge ?? !!!. +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. +Press OK to proceed with the purge operation? + Êtes-vous sûr de vouloir purger ?? !!!. +La commande de purge va détruire les enregistrements du catalogue associés (travaux et volumes) sans prendre en compte la durée de rétention. La purge ne concerne que le catalogue et n'affecte pas les données écrites sur les volumes. Cette commande peut être dangereuse car vous pouvez détruire des enregistrements associés aux sauveardes actuelles, et nous vous recommandons de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. +Cliquer OK pour réaliser la purge? + + + + Volume Name + Nom de volume + + + + Id + + + + + Status + État + + + + Enabled + Activé + + + + Bytes + Octets + + + + Files + Fichiers + + + + Jobs + Travaux + + + + Retention + Rétention + + + + Media Type + Type de support + + + + Slot + Slot + + + + Use Duration + Durée d'utilisation + + + + Max Jobs + nb max de travaux + + + + Max Files + nb max de fichiers + + + + Max Bytes + nb max d'octets + + + + Recycle + Recycler + + + + Last Written + Dernière écriture + + + + + Pools + Groupes + + + + First Written + Première écriture + + + + Read Time + Temps de lecture + + + + Write Time + Temps d'écriture + + + + Recycle Count + Compteur de recyclage + + + + Recycle Pool + Groupe de recyclage + + + + MediaListForm + + + Media Tree + Arbre des supports + + + + Refresh Media List + Actualiser la liste des supports + + + + Requery the director for the list of media. + Interroger le director pour la liste des supports. + + + + Edit Volume + Éditer le volume + + + + List Jobs On Volume + Lister les travaux du volume + + + + Delete Volume + Détruire le volume + + + + Prune Volume + Élaguer le volume (prune) + + + + Purge Volume + Purger le volume + + + + Relabel Volume + Ré-étiqueter le volume + + + + + + + Update all Volumes From Pool + Mettre à jour tous les volumes du groupe + + + + + + + Update all Volumes from all Pools + Mettre à jour tous les volumes de tous les groupes + + + + Volume From Pool + Volumes du groupe + + + + MediaView + + + Media + Support + + + + Are you sure you want to purge ?? !!!. +The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. +Press OK to proceed with the purge operation? + Êtes-vous sûr de vouloir purger ?? !!!. +La commande de purge va détruire les enregistrements du catalogue associés (travaux et volumes) sans prendre en compte la durée de rétention. La purge ne concerne que le catalogue et n'affecte pas les données écrites sur les volumes. Cette commande peut être dangereuse car vous pouvez détruire des enregistrements associés aux sauvegardes actuelles, et nous vous recommandons de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. +Cliquer OK pour réaliser la purge ? + + + + Are you sure you want to delete?? !!!. +This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? + Êtes-vous sûr de vouloir détruire?? !!!. +Cette commande est utilisée pour détruire un enregistrement "volume" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écritessur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Tous les travaux et autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? + + + + MediaViewForm + + + Form + Form + + + + Edit + Éditer + + + + Purge + Purger + + + + Delete + Détruire + + + + Prune + Élaguer (prune) + + + + Filter + Filtre + + + + Media Type: + Type de support : + + + + Status: + État : + + + + Limit: + Limite : + + + + Name: + Nom : + + + + Pool: + Groupe :Pool : + + + + Location: + Lieu : + + + + Expired + Expiré + + + + Apply + Appliquer + + + + Volume Name + Nom du volume + + + + Online + Disponible + + + + Slot + Emplacement + + + + Vol Bytes + Octets + + + + Vol Usage + Utilisation + + + + Vol Status + ÉTAT + + + + Pool + gROUPE + + + + Media Type + Type de support + + + + Last Written + Dernière écriture + + + + When expire? + Expire le ? + + + + Pages + + + %1 of Director %2 + %1 du Director %2 + + + + UnDock %1 Window + Déplacer %1 vers une nouvelle fenêtre + + + + ReDock %1 Window + Remettre %1 en onglet + + + + Console + Console + + + + PrefsForm + + + Preferences + Préférences + + + Messages + Messages + + + + Messages Options + Options des Messages + + + + Message check interval in seconds + Intervalle de vérification + + + + Check Messages + Activer la vérification auto + + + + Joblist + Travaux + + + + Joblist Limit Options + Options de limitation sur Travaux + + + + Days Limit + Limitation en jour + + + + Record Limit + Limitation sur la taille + + + + Misc + Autres + + + + Convert + Conversion + + + + Convert Off + Désactiver la conversion + + + Convert Bytes with IEC 1000B = KB + Convertir les octets avec IEC 1000B = KB + + + Convert Bytes with 1024B = KB + Convertir les octets avec 1024B = KB + + + + Context Sensitive List Commands + Liste de commandes sensible au contexte + + + + Execute Long List + + + + + Open Plot page on startup + + + + + Open Browser page on startup + Ouvrir l'onglet de navigation au démarrage + + + + Open Director Status page on startup + Ouvrir l'onglet d'état du director au démarrage + + + + Debug + Déboguer + + + + + Debugging Options + Options de débogage + + + + Debug comm + Débogage communications avec le director + + + + Display all messages in console + Afficher tous les messages dans la console + + + + Debug Commands + Débogage des commandes + + + + Debug Sql queries + Débogage des requêtes SQL + + + Debug Miscelaneous Items + Débogage divers + + + + Debug Miscellaneous Items + + + + + RestoreTree + Arbre de restauration + + + + Restore Debug 2 + + + + + Directory Item Changed + + + + + Restore Debug 1 + + + + + Directory Current Item Changed Debug + + + + + Update File Table Debug + + + + + Version Table Item Changed Debug + + + + + File Table Item Changed Debug + + + + + Icon State Debug + + + + + Update Checks Debug + + + + + Restore Debug 3 + + + + + Update Version Table Debug + + + + + Populate Directory Debug + + + + + <h2>Preferences</h2> + <h2>Préférences</h2> + + + + Timers + Sabliers + + + + Display Bytes using IEC units (1024B = 1 KiB) + Afficher les octets avec les unités IEC (1 KiO = 1000 octets) + + + + Display Bytes using SI units (1000B = 1KB) + Afficher les octets avec les unités SI (1 KO = 1000 octets) + + + + Open Pages + Ouverture d'onglets + + + + Debug multiple connection + + + + + QObject + + + Any + Tous + + + + + + No + Non + + + + + + Yes + Oui + + + + Invalid job status %1 + État de travail invalide : %1 + + + + StorStat + + + Storage Status %1 + Etat du dépôt %1 + + + + Job Id + Id du travail + + + + Job Level + Niveau du Travail + + + + Job Files + Fichiers + + + + Job Bytes + Octets + + + + Job Status + État + + + + Job Time + Heure + + + + Job Name + Nom + + + + StorStatForm + + + Form + Form + + + + Header + Entête + + + + Waitreservation + Attente de réservation + + + + Devices + Lecteurs + + + + Volumes + Volumes + + + + Spooling + + + + + Running + En cours + + + + Misc + Autres + + + + Mount + Monter + + + + UMount + Démonter + + + + Label + Étiquette + + + + Release + Libérer + + + + Refresh Timer + Sablier de rafraîchissement + + + + Do Refresh + Activer le sablier + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux Terminés</span></p></body></html> + + + + Refresh + Actualiser + + + + Cancel Running Job + Annuler le travail en cours + + + + Disable Scheduled Job + Désactiver le job planifié + + + + Storage + + + + Storage + Dépôt + + + + Name + Nom + + + + Id + Id + + + + Changer + Changeur + + + + Slot + Emplacement + + + + Status + État + + + + Enabled + Activé + + + + Pool + Groupe + + + + Media Type + Type de support + + + + Yes + Oui + + + + Status Storage "%1" + État du dépôt "%1" + + + + Status Storage "%1" in Window + État du dépôt "%1" (dans une fenêtre) + + + + Label media in Storage "%1" + Étiqueter le support dans le dépôt "%1" + + + + Mount media in Storage "%1" + Monter le support dans le dépôt "%1" + + + + "UN" Mount media in Storage "%1" + Démonter le support dans le dépôt "%1" + + + + Release media in Storage "%1" + Libérer le support dans le dépôt "%1" + + + + Barcode Scan media in Storage "%1" + Scanner le code-barre du support dans le dépôt "%1" + + + + Read scan media in Storage "%1" + + + + + Storage Status %1 + État du dépôt %1 + + + + StorageForm + + + Storage Tree + Arbre des dépôts + + + + Refresh Storage List + Rafraichir la liste + + + + Requery the director for the list of storage objects. + Réinterroger le director pour la liste des dépôts. + + + + + Status Storage In Console + État du dépôt (dans la console) + + + + + Label Storage + Étiqueter le dépôt + + + + + MountStorage + Monter + + + + + UnMount Storage + Démonter + + + + + Update Slots + Mettre à jour les slots + + + + + Update Slots Scan + Mettre à jour les slots (scan) + + + + Release + Relâcher + + + + 1 + 1 + + + + Status Storage Window + État du dépôt (dans une fenêtre) + + + + bRestore + + + bRestore + b-Restauration + + + + bRestoreForm + + + brestore + brestore + + + + + Type + Type + + + + File Name + Nom de fichier + + + + + + Size + Taille + + + + + + Date + Date + + + + InChanger + dans changeur + + + + Volume + Volume + + + + + JobId + Id du travail + + + + Chksum + Somme de contrôle + + + + Clear + Effacer + + + + Estimate + Estimer + + + Restore + Restaurer + + + + FileName + Nom de fichier + + + + FileIndex + Position du fichier + + + + Nb Files + Nb de fichiers + + + + Merge Jobs + Fusionner les travaux + + + + View all Versions + Voir toutes les versions + + + + Current Directory + Dossier courant + + + + File list -- drag below for restore + Liste des fichiers -- glisser en-dessous pour restaurer + + + + Double Click File Name to decend + Double-cliquer le dossier pour y entrer, double-cliquer le fichier pour voir ses versions + + + + Double Click to decend + Double-cliquer pour entrer + + + + Selection Range + Rang de sélection + + + + - + - + + + + File Filter + Filtre de fichiers + + + + File revisions -- drag below for restore + Versions de fichier -- glisser en-dessous pour restaurer + + + + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Drag and drop <span style=" font-weight:600;">File list</span> and/or <span style=" font-weight:600;">File revisions</span> items here for Restore</p></body></html> + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Glisser et déplacer ici depuis <span style=" font-weight:600;">"Liste des fichiers"</span> et/ou depuis <span style=" font-weight:600;">"Versions de fichier"</span> les éléments à restaurer.</p></body></html> + + + + Restore... + + + + + Select from Client list drop down then select from Job list drop down + Choisir un élément dans la liste déroulante "Client List" puis dans la liste déroulante "Job list" + + + + bRunRestoreForm + + + Run restore + Lancer la restauration + + + + Standard + Standard + + + + Restore options + Options de restauration + + + + Client: + Client : + + + + Where: + Où : + + + + Replace: + Remplacer : + + + + Comment: + Commentaire : + + + + Media needed + Supports nécessaires + + + + InChanger + dans Changeur + + + + Volume + Volume + + + + Compute with directories + + + + + Advanced + Avancé + + + + File Relocation + Déplacement de fichiers + + + + Use file +relocation: + Utiliser + + + + Strip prefix: + préfixe à supprimer : + + + + Add prefix: + préfixe à ajouter : + + + + Add suffix: + suffixe à ajouter : + + + + Use regexp: + utiliser une "regexp" : + + + + Where +regexp: + "regexp" : + + + + Other options + Autres options + + + + When: + Quand : + + + + Priority: + Priorité : + + + + Storage: + Dépôt : + + + + Job: + Travail : + + + + estimateForm + + + Form + Form + + + + List Files + Lister les fichiers + + + + Level: + Niveau : + + + + Client: + Client : + + + + Job: + Travail : + + + + FileSet: + Jeu de fichiers : + + + + <h3>Estimate a backup Job</h3> + <h3>Estimer un travail de sauvegarde</h3> + + + + OK + OK + + + + Cancel + Annuler + + + + estimatePage + + + Estimate + Estimer + + + + helpForm + + + Form + Form + + + + &Home + &Accueil + + + + &Back + &En arrière + + + + Close + Fermer + + + + jobsForm + + + Client Tree + Arborescence Client + + + + Refresh Jobs List + Actualiser la liste des travaux + + + + Requery the director for the list of clients. + Interroger le director pour la liste des clients. + + + + + + + List Files Command + commande "list files" : liste des fichiers + + + + + + + List Volumes Command + commande "list volumes" : liste des volumes + + + + + + + List Next Volume Command + commande "next volume" : volume suivant + + + + + + + Enable Job Command + commande "enable job" : activer le travail + + + + + + + Disable Job Command + commande "disable job" : désactiver le travail + + + + + + + Open JobList on Job + Ouvrie la liste des exécutions de ce travail + + + + + Cancel Job Command + commande "cancel job" : annuler le travail + + + + RunJob + Èxécuter ce travail + + + + labelForm + + + Form + Form + + + + OK + OK + + + + Cancel + Annuler + + + + Volume Name: + Nom du volume : + + + + Storage: + Dépôt : + + + + Slot: + Emplacement : + + + + Execute Automount + Montage automatique + + + + On + Activer + + + + Off + Désactiver + + + + Pool: + Groupe : + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Label a Volume</span></p></body></html> + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Étiqueter un volume</span></p></body></html> + + + + Enter Name of Volume to create + Entrer le nom du volume à créer + + + + mediaEditForm + + + Form + Form + + + + Pool: + Groupe : + + + + Volume Status: + État du volume : + + + + Max Volume Bytes: + Nb Max d'octets : + + + + Slot: + Emplacement : + + + + Max Volume Jobs: + Nb Max de Travaux : + + + + Use Duration: + Durée d'utilisation : + + + + OK + OK + + + + Cancel + Annuler + + + + Recycle Pool: + Groupe de recyclage : + + + + Enabled + Activé + + + + Max Volume Files: + nb max de fichiers : + + + + Years + Années + + + + Seconds + Secondes + + + + Use Duration + Durée d'utilisation + + + + Days + Jours + + + + Hours + Heures + + + + Months + Mois + + + + Retention + Rétention + + + + Minutes + Minutes + + + + Volume : + Volume : + + + + Recycle + Recycler + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Edit a Volume</span></p></body></html> + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Éditer un Volume</span></p></body></html> + + + + Retention: + Rétention : + + + + mediaInfoForm + + + Form + Form + + + + Edit + Éditer + + + + Purge + Purger + + + + Delete + Supprimer + + + + Prune + Élaguer (prune) + + + + Load + Charger + + + + Unload + Décharger + + + + Information + Informations + + + + Name: + Nom : + + + + Vol0001 + + + + + Pool: + Groupe : + + + + Default + + + + + Online: + Disponible : + + + + Enabled: + Activé : + + + + yes + + + + + Location: + Lieu : + + + + Vault + + + + + Status: + État : + + + + Append + + + + + Media Type: + Type de support : + + + + File + + + + + Recycle Pool: + Groupe de recyclage : + + + + Scratch + + + + + Statistics + Statistiques + + + + Vol Bytes: + Octets : + + + + 19.8 MB + + + + + Vol Mounts: + Montages : + + + + 10 + 10 + + + + Recycle count: + Recyclages : + + + + 5 + 5 + + + + Read time: + Temps de lecture : + + + + 10 mins + + + + + Write time: + Temps d'écriture : + + + + 20 mins + + + + + Errors: + Erreurs : + + + + + + + + 0 + 0 + + + + Last Written: + Dernière écriture : + + + + 2009-07-05 12:23:00 + 2009-07-05 12:23:00 + + + + First Written: + Première écriture : + + + + 2009-06-05 10:00:00 + 2009-06-05 10:00:00 + + + + Limits + Limitations + + + + Use duration: + Durée d'utilisation : + + + + Max jobs: + Nb max de travaux : + + + + Max files: + Nb max de fichiers : + + + + Max bytes: + Nb max d'octets : + + + + Recycle: + Recyclage : + + + + Retention: + Rétention : + + + + 365 days + + + + + Expire: + Expire le : + + + + 2010-08-03 23:10:03 + 2010-08-03 23:10:03 + + + + Jobs + Travaux + + + + JobId + Id du travail + + + + Name + Nom + + + + Start Time + début le + + + + Type + Type + + + + Level + Niveau + + + + Files + Fichiers + + + + Bytes + Octets + + + + Status + État + + + + mountDialog + + + Storage : %1 + Dépôt : %1 + + + + No Storage name + Pas de nom de dépôt + + + + No Storage name given + Pas de nom de dépôt indiqué + + + + Context sensitive command : + + + Commande sensible au contexte : + + + + + + Director Response : + + + Réponse du Director : + + + + mountForm + + + Label + Étiquette + + + + TextLabel + TextLabel + + + + Slot: + Emplacement : + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Mount a Slot</span></p></body></html> + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Monter un emplacement</span></p></body></html> + + + + prefsDialog + + + Canceled + Annulé + + + + prerestoreForm + + + Form + Form + + + + All Files + Tous les fichiers + + + + Select Files + Fichiers sélectionnés + + + + Job + Travail + + + + JobIds + Travaux + + + + yyyy-mm-dd h:mm:ss + aaaa-mm-jj h:mm:ss + + + + Use Most Recent + Utiliser le plus récent + + + + File Set: + File Set : + + + + Client: + Client : + + + + Storage: + Dépôt : + + + + Before: + Avant : + + + + Pool: + Pool : + + + + OK + OK + + + + Cancel + Annuler + + + + <h3>Select Jobs</h3> + <h3>Sélectionner les Travaux</h3> + + + + prerestorePage + + + Bat + Bat + + + + There can be no spaces in the text for the joblist. +Press OK to continue? + Il ne peut pas y avoir d'espace dans la liste des travaux. +Appuyer sur OK pour continuer ? + + + + + Comma separated list of JobIds + + + + + At least one of the jobs is not a valid job of type "Backup". +Press OK to continue? + Il y a au moins un travail qui n'est pas du type "Backup" (Sauvegarde). +Appuyer sur OK pour continuer ? + + + + All jobs in the list must be of the same jobName and same client. +Press OK to continue? + Tous les travaux de la liste doivent avoir le même nom et le même Client. +Appuyer sur OK pour continuer ? + + + + Restore + Restaurer + + + + + + Any + Tous + + + Comma separted list of Job Ids + Liste de "id de travail" séparés par des virgules + + + + Canceled + Annulé + + + + The string is not a comma separated list of integers. +Press OK to continue? + La chaine n'est pas une liste de nombres séparés par des virgules. +Appuyer sur OK pour continuer ? + + + + pruneForm + + + Form + Form + + + + Prune Files + Élaguer (Prune) les fichiers + + + + Volume: + Volume : + + + + <h3>Prune Files/Jobs/Volumes</h3> + <h3>Élaguer (Prune) les Fichiers/Travaux/Volumes</h3> + + + + Prune Jobs + Élaguer (prune) les travaux + + + + OK + OK + + + + Cancel + Annuler + + + + Client: + Client : + + + + Prune Volumes + Élaguer (Pruner) les Volumes + + + + prunePage + + + Prune + Élaguer (prune) + + + + + + + + + Any + Tous + + + + Canceled + Annulé + + + + relabelDialog + + + From Volume : + Volume : + + + + No Volume name + Pas de nom de volume + + + + No Volume name given + Pas de nom de Volume donné + + + + + New name must be different + Le nouveau nom doit être différent + + + + relabelForm + + + Label + Label + + + + From Volume : + Ancien Volume : + + + + Pool: + Pool : + + + + Storage: + Dépôt : + + + + New Volume Name: + Nouveau nom de Volume : + + + + Slot: + Slot : + + + + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Relabel a Volume</span></p></body></html> + <html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Relabéliser un Volume</span></p></body></html> + + + + restoreForm + + + Form + Form + + + + + 1 + 1 + + + + 2 + 2 + + + + 3 + 3 + + + + 4 + 4 + + + + 5 + 5 + + + + 6 + 6 + + + <h2>Directories</h2> + <h2>Répertoires</h2> + + + + <h3>Restore Select</h3> + <h3>Restaurer la sélection</h3> + + + + Up + Dossier parent + + + + + Mark + Sélectionner + + + + Unmark + Désélectionner + + + + <h2>Files</h2> + <h2>Fichiers</h2> + + + + Status: + État : + + + + Current Dir: + Dossier courant : + + + + OK + OK + + + + Cancel + Annuler + + + + UnMark + Désélectionner + + + + Select a Directory + Choisir un dossier + + + + Directories + Dossiers + + + + restorePage + + + Restore Select + Sélection pour restauration + + + + Mark + Sélectionner + + + + File + Fichier + + + + Mode + Mode + + + + User + Propriétaire + + + + Group + Groupe + + + + Size + Taille + + + + Date + Date + + + + In else of if parent cwd "%1" newdir "%2" + + Dans le "else" du "if parent" cwd "%1" newdir "%2" + + + + Canceled + Annulé + + + + In addDirectory cwd "%1" newdir "%2" fullpath "%3" + + Dans "addDirectory" cwd "%1" newdir "%2" fullpath "%3" + + + + + Nothing selected, nothing done + Rien de sélectionné, rien de fait + + + + cd command failed + la commande cd a échoué + + + + .pwd command failed + la commande ".pwd" a échoué + + + + restoreTree + + + Version Browser + Navigateur temporel + + + + Directories + Dossiers + + + + + + + + + Any + Tous + + + + Refresh From JobChecks + Rafraichir depuis JobChecks + + + Task + Taches + + + + Querying Database + Interrogation en cours + + + + Querying Jobs + Interrogation des travaux + + + + Querying for Directories + Interrogation des dossiers + + + + Processing Directories + Analyse des dossiers + + + + File Name + Nom de fichier + + + + Filename Id + Id du nom de fichier + + + + + Job Id + + + + + + + Type + Type + + + + + End Time + Heure de fin + + + + Hash + + + + + FileId + FileId + + + + RestoreTreePage + Onglet de l'arbre de restauration + + + + Level + Niveau + + + + Name + Nom + + + + + Purged + Purgé + + + + + TU + + + + + + TD + + + + + Refresh From Re-Select + + + + + + + Task %1 of %2 + Tâche %1 sur %2 + + + + No jobs were selected in the job query !!!. +Press OK to continue + La requête ne renvoie aucun travail !!! +Cliquer OK pour continuer + + + + Present Working Directory: %1 + Dossier de travail actuel : %1 + + + + Job Type + Type de travail + + + + First Volume + Premier volume + + + + Task 1 of 3 + Tâche 1 sur 3 + + + + Processing Checked directories + Traitement des dossiers + + + + Task 2 of 3 + Tâche 2 sur 3 + + + + Processing Exceptions + Traitement des exceptions + + + + Task 3 of 3 + Tâche 3 sur 3 + + + + Filling Database Table + Remplissage de la base de données + + + + restoreTreeForm + + + Form + Form + + + + Jobs + Travaux + + + + + + TextLabel + TextLabel + + + + Files + Fichiers + + + + Versions of File + Versions du fichier + + + + FileName + Nom de fichier + + + + Refresh + Actualiser + + + + Restore + Restaurer + + + + Job + Travail + + + + + Job List Job Criterion Selector + Job List Job Criterion Selector + + + + Client + Client + + + + + Job List Client Criterion Selector + Job List Client Criterion Selector + + + + FileSet + Jeu de fichiers + + + + + Job List Fileset Criterion Selector + Job List Fileset Criterion Selector + + + + Record Limit + Limitation en nombre + + + + Days Limit + Limitation en jour + + + + Directory + Dossier + + + + Select Directory + Sélectionner le dossier + + + + UnselectDirectory + Désélectionner le dossier + + + + runCmdForm + + + Form + Form + + + + Priority: + Priorité : + + + + yyyy-mm-dd hh:mm:ss + aaaa-mm-jj hh:mm:ss + + + + When: + Quand : + + + + Where: + Ou : + + + + Bootstrap: + Bootstrap : + + + + Job: + Travail : + + + + Storage: + Dépôt : + + + + FileSet: + Jeu de fichiers : + + + + Replace: + Remplacer : + + + + To client: + Vers le client : + + + + Catalog: + Catalogue : + + + + OK + OK + + + + Cancel + Annuler + + + + <h3>Run Restore Job</h3> + <h3>Lancer la restauration</h3> + + + + runCmdPage + + + Restore Run + Restauration + + + + + never + jamais ... mais il faudrait retraduire dans okButtonPushed. + never + + + + always + toujours ... mais il faudrait retraduire dans okButtonPushed. + always + + + + ifnewer + si nouveau ... mais il faudrait retraduire dans okButtonPushed. + si nouveau + + + + ifolder + si plus vieux ... mais il faudrait retraduire dans okButtonPushed. + si ancien + + + + Canceled + Annulé + + + + runForm + + Form + Form + + + + Level: + Niveau : + + + + Bootstrap: + Bootstrap : + + + + yyyy-mm-dd hh:mm:ss + aaaa-mm-jj h:mm:ss + + + + Job: + Travail : + + + + Pool: + Groupe : + + + + Type: + Type : + + + + <h3>Backup<h3/> + <h3>Sauvegarde</h3> + + + + FileSet: + Jeu de fichiers : + + + + Messages: + Messages : + + + + <h3>Run a Job</h3> + <h3>Lancer un Travail</h3> + + + + Priority: + Priorité : + + + + Client: + Client : + + + + OK + OK + + + + Cancel + Annuler + + + + Storage: + Dépôt : + + + + When: + Quand : + + + + Run job + Lancer le travail + + + + Job properties + Propriétés du travail + + + + runPage + + + Run + Lancer + + + + Canceled + Annulé + + + + selectDialog + + + Canceled + Annulé + + + + selectForm + + + Selection dialog + Dialogue de sélection + + + + textInputDialog + + + Canceled + Annulé + + + + textInputForm + + Selection dialog + Dialogue de sélection + + + TextLabel + TextLabel + + + + Text input dialog + + + + + Message + + + + + yesnoPopUp + + + Bat Question + Bat Question + + + diff --git a/src/qt-console/util/comboutil.cpp b/src/qt-console/util/comboutil.cpp new file mode 100644 index 00000000..d388a283 --- /dev/null +++ b/src/qt-console/util/comboutil.cpp @@ -0,0 +1,143 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * ComboBox helper functions + * + * Riccardo Ghetta, May 2008 + * + */ + +#include "bat.h" +#include +#include +#include +#include "fmtwidgetitem.h" +#include "comboutil.h" + +static const QString QS_ANY(QObject::tr("Any")); + + +/* selects value val on combo, if exists */ +void comboSel(QComboBox *combo, const QString &val) +{ + int index = combo->findText(val, Qt::MatchExactly); + if (index != -1) { + combo->setCurrentIndex(index); + } +} + +/* if the combo has selected something different from "Any" uses the selection + * to build a condition on field fldname and adds it to the condition list */ +void comboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname) +{ + int index = combo->currentIndex(); + if (index != -1 && combo->itemText(index) != QS_ANY) { + cndlist.append( QString("%1='%2'").arg(fldname).arg(combo->itemText(index)) ); + } +} + + +/* boolean combo (yes/no) */ +void boolComboFill(QComboBox *combo) +{ + combo->addItem(QS_ANY, -1); + combo->addItem(QObject::tr("No"), 0); + combo->addItem(QObject::tr("Yes"), 1); +} + +void boolComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname) +{ + int index = combo->currentIndex(); + if (index != -1 && combo->itemData(index).toInt() >= 0 ) { + QString cnd = combo->itemData(index).toString(); + cndlist.append( QString("%1='%2'").arg(fldname).arg(cnd) ); + } +} + +/* backup level combo */ +void levelComboFill(QComboBox *combo) +{ + combo->addItem(QS_ANY); + combo->addItem(job_level_to_str(L_FULL), L_FULL); + combo->addItem(job_level_to_str(L_INCREMENTAL), L_INCREMENTAL); + combo->addItem(job_level_to_str(L_DIFFERENTIAL), L_DIFFERENTIAL); + combo->addItem(job_level_to_str(L_SINCE), L_SINCE); + combo->addItem(job_level_to_str(L_VERIFY_CATALOG), L_VERIFY_CATALOG); + combo->addItem(job_level_to_str(L_VERIFY_INIT), L_VERIFY_INIT); + combo->addItem(job_level_to_str(L_VERIFY_VOLUME_TO_CATALOG), L_VERIFY_VOLUME_TO_CATALOG); + combo->addItem(job_level_to_str(L_VERIFY_DISK_TO_CATALOG), L_VERIFY_DISK_TO_CATALOG); + combo->addItem(job_level_to_str(L_VERIFY_DATA), L_VERIFY_DATA); + /* combo->addItem(job_level_to_str(L_BASE), L_BASE); base jobs ignored */ +} + +void levelComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname) +{ + int index = combo->currentIndex(); + if (index != -1 && combo->itemText(index) != QS_ANY ) { + QString cnd = combo->itemData(index).toChar(); + cndlist.append( QString("%1='%2'").arg(fldname).arg(cnd) ); + } +} + +/* job status combo */ +void jobStatusComboFill(QComboBox *combo) +{ + static const char js[] = { + JS_Terminated, + JS_Created, + JS_Running, + JS_Blocked, + JS_ErrorTerminated, + JS_Error, + JS_FatalError, + JS_Differences, + JS_Canceled, + JS_WaitFD, + JS_WaitSD, + JS_WaitMedia, + JS_WaitMount, + JS_WaitStoreRes, + JS_WaitJobRes, + JS_WaitClientRes, + JS_WaitMaxJobs, + JS_WaitStartTime, + JS_WaitPriority, + JS_AttrDespooling, + JS_AttrInserting, + JS_DataDespooling, + JS_DataCommitting, + '\0'}; + + int pos; + + combo->addItem(QS_ANY); + for (pos = 0 ; js[pos] != '\0' ; ++pos) { + combo->addItem(convertJobStatus( QString(js[pos]) ), js[pos]); + } +} + +void jobStatusComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname) +{ + int index = combo->currentIndex(); + if (index != -1 && combo->itemText(index) != QS_ANY ) { + QString cnd = combo->itemData(index).toChar(); + cndlist.append( QString("%1='%2'").arg(fldname).arg(cnd) ); + } +} diff --git a/src/qt-console/util/comboutil.h b/src/qt-console/util/comboutil.h new file mode 100644 index 00000000..e7823155 --- /dev/null +++ b/src/qt-console/util/comboutil.h @@ -0,0 +1,55 @@ +#ifndef _COMBOUTIL_H_ +#define _COMBOUTIL_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Combobox helpers - Riccardo Ghetta, May 2008 + */ + +class QComboBox; +class QString; +class QStringList; + +/* selects value val on combo, if exists */ +void comboSel(QComboBox *combo, const QString &val); + +/* if the combo has selected something different from "Any" uses the selection + * to build a condition on field fldname and adds it to the condition list */ +void comboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname); + +/* these helpers are used to give an uniform content to common combos. + * There are two routines per combo type: + * - XXXXComboFill fills the combo with values. + * - XXXXComboCond checks the combo and, if selected adds a condition + * on the field fldName to the list of conditions cndList + */ + +/* boolean combo (yes/no) */ +void boolComboFill(QComboBox *combo); +void boolComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname); + +/* backup level combo */ +void levelComboFill(QComboBox *combo); +void levelComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname); + +/* job status combo */ +void jobStatusComboFill(QComboBox *combo); +void jobStatusComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname); + +#endif /* _COMBOUTIL_H_ */ diff --git a/src/qt-console/util/fmtwidgetitem.cpp b/src/qt-console/util/fmtwidgetitem.cpp new file mode 100644 index 00000000..2642ee38 --- /dev/null +++ b/src/qt-console/util/fmtwidgetitem.cpp @@ -0,0 +1,551 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * + * Helper functions for tree widget formatting + * + * Riccardo Ghetta, May 2008 + * + */ + +#include "../bat.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include "fmtwidgetitem.h" + +/*********************************************** + * + * common helpers + * + ***********************************************/ + +QString convertJobStatus(const QString &sts) +{ + QString code( sts.trimmed() ); + if ( code.size() != 1) { + return QObject::tr("Invalid job status %1").arg(sts); + } + + char buf[256]; + jobstatus_to_ascii_gui( code[0].toLatin1(), buf, sizeof(buf)); + return QString(buf); +} + +/* + * disable widget updating + */ +Freeze::Freeze(QWidget &q): +qw(&q) +{ + qw->setUpdatesEnabled(false); +} + +Freeze::~Freeze() +{ + if (qw) { + qw->setUpdatesEnabled(true); + qw->update(); + } +} + +/*********************************************** + * + * ItemFormatterBase static members + * + ***********************************************/ + +ItemFormatterBase::BYTES_CONVERSION ItemFormatterBase::cnvFlag(BYTES_CONVERSION_IEC); + +/* String to Electronic value based on K=1024 */ +QString convertBytesIEC(qint64 qfld) +{ + static const qint64 KB = Q_INT64_C(1024); + static const qint64 MB = (KB * KB); + static const qint64 GB = (MB * KB); + static const qint64 TB = (GB * KB); + static const qint64 PB = (TB * KB); + static const qint64 EB = (PB * KB); + + /* note: division is integer, so to have some decimals we divide for a + smaller unit (e.g. GB for a TB number and so on) */ + char suffix; + if (qfld >= EB) { + qfld /= PB; + suffix = 'E'; + } + else if (qfld >= PB) { + qfld /= TB; + suffix = 'P'; + } + else if (qfld >= TB) { + qfld /= GB; + suffix = 'T'; + } + else if (qfld >= GB) { + qfld /= MB; + suffix = 'G'; + } + else if (qfld >= MB) { + qfld /= KB; + suffix = 'M'; + } + else if (qfld >= KB) { + suffix = 'K'; + } + else { + /* plain bytes, no need to reformat */ + return QString("%1 B").arg(qfld); + } + + /* After dividing into a smaller value, we can safely convert from + * to a double double and use the extra room for decimals + */ + return QString("%1 %2iB").arg(qfld / 1024.0, 0, 'f', 2).arg(suffix); +} + +/* String to human value based on k=1000 */ +QString convertBytesSI(qint64 qfld) +{ + static const qint64 KB = Q_INT64_C(1000); + static const qint64 MB = (KB * KB); + static const qint64 GB = (MB * KB); + static const qint64 TB = (GB * KB); + static const qint64 PB = (TB * KB); + static const qint64 EB = (PB * KB); + + /* Note: division is integer, so to have some decimals we divide for a + smaller unit (e.g. GB for a TB number and so on) */ + char suffix; + if (qfld >= EB) { + qfld /= PB; + suffix = 'E'; + } + else if (qfld >= PB) { + qfld /= TB; + suffix = 'P'; + } + else if (qfld >= TB) { + qfld /= GB; + suffix = 'T'; + } + else if (qfld >= GB) { + qfld /= MB; + suffix = 'G'; + } + else if (qfld >= MB) { + qfld /= KB; + suffix = 'M'; + } + else if (qfld >= KB) { + suffix = 'k'; /* SI uses lowercase k */ + } + else { + /* plain bytes, no need to reformat */ + return QString("%1 B").arg(qfld); + } + + /* having divided for a smaller unit, now we can safely convert to double and + use the extra room for decimals */ + return QString("%1 %2B").arg(qfld / 1000.0, 0, 'f', 2).arg(suffix); +} + +/*********************************************** + * + * base formatting routines + * + ***********************************************/ + +ItemFormatterBase::ItemFormatterBase() +{ +} + +ItemFormatterBase::~ItemFormatterBase() +{ +} + +void ItemFormatterBase::setPercent(int index, float value) +{ + char buf[100]; + bsnprintf(buf, sizeof(buf), "%.2f%%", value); + QString val = buf; + QString pix; + if (value < 8) { + pix = ":images/0p.png"; + } else if (value < 24) { + pix = ":images/16p.png"; + } else if (value < 40) { + pix = ":images/32p.png"; + } else if (value < 56) { + pix = ":images/48p.png"; + } else if (value < 72) { + pix = ":images/64p.png"; + } else if (value < 88) { + pix = ":images/80p.png"; + } else { + pix = ":images/96p.png"; + } + setPixmap(index, QPixmap(pix), val); + //setSortValue(index, (int) value); + //setBackground(index, Qt::green); +} + +/* By default, the setPixmap implementation with tooltip don't implement + * the tooltip stuff + */ +void ItemFormatterBase::setPixmap(int index, const QPixmap &pix, + const QString & /* tip */) +{ + setPixmap(index, pix); +} + +void ItemFormatterBase::setInChanger(int index, const QString &InChanger) +{ + setPixmap(index, QPixmap(":images/inflag"+InChanger+".png")); + //setSortValue(index, InChanger.toInt() ); +} + +void ItemFormatterBase::setFileType(int index, const QString &type) +{ + setPixmap(index, QPixmap(":images/"+type+".png")); + //setSortValue(index, InChanger.toInt() ); +} + +void ItemFormatterBase::setTextFld(int index, const QString &fld, bool center) +{ + setText(index, fld.trimmed()); + if (center) { + setTextAlignment(index, Qt::AlignCenter); + } +} + +void ItemFormatterBase::setDateFld(int index, utime_t fld, bool center) +{ + char buf[200]; + bstrutime(buf, sizeof(buf), fld); + setText(index, QString(buf).trimmed()); + if (center) { + setTextAlignment(index, Qt::AlignCenter); + } +} + +void ItemFormatterBase::setRightFld(int index, const QString &fld) +{ + setText(index, fld.trimmed()); + setTextAlignment(index, Qt::AlignRight | Qt::AlignVCenter); +} + +void ItemFormatterBase::setBoolFld(int index, const QString &fld, bool center) +{ + if (fld.trimmed().toInt()) + setTextFld(index, QObject::tr("Yes"), center); + else + setTextFld(index, QObject::tr("No"), center); +} + +void ItemFormatterBase::setBoolFld(int index, int fld, bool center) +{ + if (fld) + setTextFld(index, QObject::tr("Yes"), center); + else + setTextFld(index, QObject::tr("No"), center); +} + +void ItemFormatterBase::setNumericFld(int index, const QString &fld) +{ + setRightFld(index, fld.trimmed()); + setSortValue(index, fld.toDouble() ); +} + +void ItemFormatterBase::setNumericFld(int index, const QString &fld, const QVariant &sortval) +{ + setRightFld(index, fld.trimmed()); + setSortValue(index, sortval ); +} + +void ItemFormatterBase::setBytesFld(int index, const QString &fld) +{ + qint64 qfld = fld.trimmed().toLongLong(); + QString msg; + switch (cnvFlag) { + case BYTES_CONVERSION_NONE: + msg = QString::number(qfld); + break; + case BYTES_CONVERSION_IEC: + msg = convertBytesIEC(qfld); + break; + case BYTES_CONVERSION_SI: + msg = convertBytesSI(qfld); + break; + default: + msg = " "; + break; + } + + setNumericFld(index, msg, QVariant(qfld)); +} + +void ItemFormatterBase::setDurationFld(int index, const QString &fld) +{ + static const qint64 HOUR = Q_INT64_C(3600); + static const qint64 DAY = HOUR * 24; + static const qint64 WEEK = DAY * 7; + static const qint64 MONTH = DAY * 30; + static const qint64 YEAR = DAY * 365; + static const qint64 divs[] = { YEAR, MONTH, WEEK, DAY, HOUR }; + static const char sufs[] = { 'y', 'm', 'w', 'd', 'h', '\0' }; + + qint64 dfld = fld.trimmed().toLongLong(); + + char suffix = 's'; + if (dfld) { + for (int pos = 0 ; sufs[pos] ; ++pos) { + if (dfld % divs[pos] == 0) { + dfld /= divs[pos]; + suffix = sufs[pos]; + break; + } + } + } + QString msg; + if (dfld < 100) { + msg = QString("%1%2").arg(dfld).arg(suffix); + } else { + /* previous check returned a number too big. The original specification perhaps + was mixed, like 1d 2h, so we try to match with this routine */ + dfld = fld.trimmed().toLongLong(); + msg = ""; + for (int pos = 0 ; sufs[pos] ; ++pos) { + if (dfld / divs[pos] != 0) { + msg += QString(" %1%2").arg(dfld / divs[pos]).arg(sufs[pos]); + dfld %= divs[pos]; + } + } + if (dfld) + msg += QString(" %1s").arg(dfld); + } + + setNumericFld(index, msg, QVariant(fld.trimmed().toLongLong())); +} + +void ItemFormatterBase::setVolStatusFld(int index, const QString &fld, bool center) +{ + QString mp(fld.trimmed()); + setTextFld(index, volume_status_to_str(mp.toUtf8()), center); + + if (mp == "Append" ) { + setBackground(index, Qt::green); + } else if (mp == "Error") { + setBackground(index, Qt::red); + } else if (mp == "Used" || mp == "Full"){ + setBackground(index, Qt::yellow); + } else if (mp == "Read-only" || mp == "Disabled"){ + setBackground(index, Qt::lightGray); + } +} + +void ItemFormatterBase::setJobStatusFld(int index, const QString &status, bool center) +{ + /* C (created, not yet running) uses the default background */ + static QString greenchars("TR"); + static QString redchars("BEf"); + static QString yellowchars("eDAFSMmsjdctp"); + + setTextFld(index, convertJobStatus(status), center); + + QString st(status.trimmed()); + if (greenchars.contains(st, Qt::CaseSensitive)) { + setBackground(index, Qt::green); + } else if (redchars.contains(st, Qt::CaseSensitive)) { + setBackground(index, Qt::red); + } else if (yellowchars.contains(st, Qt::CaseSensitive)){ + setBackground(index, Qt::yellow); + } +} + +void ItemFormatterBase::setJobTypeFld(int index, const QString &fld, bool center) +{ + QByteArray jtype(fld.trimmed().toLatin1()); + if (jtype.size()) { + setTextFld(index, job_type_to_str(jtype[0]), center); + } else { + setTextFld(index, "", center); + } +} + +void ItemFormatterBase::setJobLevelFld(int index, const QString &fld, bool center) +{ + QByteArray lvl(fld.trimmed().toLatin1()); + if (lvl.size()) { + setTextFld(index, job_level_to_str(lvl[0]), center); + } else { + setTextFld(index, "", center); + } +} + + + +/*********************************************** + * + * treeitem formatting routines + * + ***********************************************/ +TreeItemFormatter::TreeItemFormatter(QTreeWidgetItem &parent, int indent_level): +ItemFormatterBase(), +wdg(new QTreeWidgetItem(&parent)), +level(indent_level) +{ +} + +void TreeItemFormatter::setText(int index, const QString &fld) +{ + wdg->setData(index, Qt::UserRole, level); + wdg->setText(index, fld); +} + +void TreeItemFormatter::setTextAlignment(int index, int align) +{ + wdg->setTextAlignment(index, align); +} + +void TreeItemFormatter::setBackground(int index, const QBrush &qb) +{ + wdg->setBackground(index, qb); +} + +/* at this time we don't sort trees, so this method does nothing */ +void TreeItemFormatter::setSortValue(int /* index */, const QVariant & /* value */) +{ +} + +void TreeItemFormatter::setPixmap(int index, const QPixmap &pix) +{ + wdg->setIcon(index, QIcon(pix)); +} + +/*********************************************** + * + * Specialized table widget used for sorting + * + ***********************************************/ +TableItemFormatter::BatSortingTableItem::BatSortingTableItem(): +QTableWidgetItem(1) +{ +} + +void TableItemFormatter::BatSortingTableItem::setSortData(const QVariant &d) +{ + setData(SORTDATA_ROLE, d); +} + +bool TableItemFormatter::BatSortingTableItem::operator< ( const QTableWidgetItem & o ) const +{ + QVariant my = data(SORTDATA_ROLE); + QVariant other = o.data(SORTDATA_ROLE); + if (!my.isValid() || !other.isValid() || my.type() != other.type()) + return QTableWidgetItem::operator< (o); /* invalid combination, revert to default sorting */ + + /* 64bit integers must be handled separately, others can be converted to double */ + if (QVariant::ULongLong == my.type()) { + return my.toULongLong() < other.toULongLong(); + } else if (QVariant::LongLong == my.type()) { + return my.toLongLong() < other.toLongLong(); + } else if (my.canConvert(QVariant::Double)) { + return my.toDouble() < other.toDouble(); + } else { + return QTableWidgetItem::operator< (o); /* invalid combination, revert to default sorting */ + } +} + +/*********************************************** + * + * tableitem formatting routines + * + ***********************************************/ +TableItemFormatter::TableItemFormatter(QTableWidget &tparent, int trow): +ItemFormatterBase(), +parent(&tparent), +row(trow), +last(NULL) +{ +} + +void TableItemFormatter::setPixmap(int index, const QPixmap &pix) +{ +// Centered, but not sortable ! + QLabel *lbl = new QLabel(); + lbl->setAlignment(Qt::AlignCenter); + lbl->setPixmap(pix); + parent->setCellWidget(row, index, lbl); +} + +void TableItemFormatter::setPixmap(int index, const QPixmap &pix, + const QString &tips) +{ +// Centered, but not sortable ! + QLabel *lbl = new QLabel(); + lbl->setAlignment(Qt::AlignCenter); + lbl->setPixmap(pix); + if (!tips.isEmpty()) { + lbl->setToolTip(tips); + } + parent->setCellWidget(row, index, lbl); + +// last = new BatSortingTableItem; +// parent->setItem(row, index, last); +// last->setIcon(pix); +} + +void TableItemFormatter::setText(int col, const QString &fld) +{ + last = new BatSortingTableItem; + parent->setItem(row, col, last); + last->setText(fld); +} + +void TableItemFormatter::setTextAlignment(int /*index*/, int align) +{ + last->setTextAlignment(align); +} + +void TableItemFormatter::setBackground(int /*index*/, const QBrush &qb) +{ + last->setBackground(qb); +} + +void TableItemFormatter::setSortValue(int /* index */, const QVariant &value ) +{ + last->setSortData(value); +} + +QTableWidgetItem *TableItemFormatter::widget(int col) +{ + return parent->item(row, col); +} + +const QTableWidgetItem *TableItemFormatter::widget(int col) const +{ + return parent->item(row, col); +} diff --git a/src/qt-console/util/fmtwidgetitem.h b/src/qt-console/util/fmtwidgetitem.h new file mode 100644 index 00000000..859977dd --- /dev/null +++ b/src/qt-console/util/fmtwidgetitem.h @@ -0,0 +1,217 @@ +#ifndef _FMTWIDGETITEM_H_ +#define _FMTWIDGETITEM_H_ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * TreeView formatting helpers - Riccardo Ghetta, May 2008 + */ + +class QWidget; +class QTreeWidgetItem; +class QTableWidget; +class QTableWidgetItem; +class QString; +class QBrush; + + +/* + * common conversion routines + * + */ +QString convertJobStatus(const QString &sts); + +/* bytes formatted as power-of-two with IEC suffixes (KiB, MiB, and so on) */ +QString convertBytesIEC(qint64 fld); + +/* bytes formatted as power-of-ten with SI suffixes (kB, MB, and so on) */ +QString convertBytesSI(qint64 fld); + +/* + * disable widget updating + */ +class Freeze +{ +private: + QWidget *qw; + + public: + Freeze(QWidget &q); + ~Freeze(); +}; + + + +/* + * base class for formatters + * + */ +class ItemFormatterBase +{ +public: + enum BYTES_CONVERSION { + BYTES_CONVERSION_NONE, + BYTES_CONVERSION_IEC, + BYTES_CONVERSION_SI, + }; + +public: + virtual ~ItemFormatterBase(); + + /* Prints Yes if fld is != 0, No otherwise. Centers field if center true*/ + void setBoolFld(int index, const QString &fld, bool center = true); + void setBoolFld(int index, int fld, bool center = true); + + /* Print nice icon to represent percent */ + void setPercent(int index, float number); + + /* Normal text field. Centers field if center true*/ + void setTextFld(int index, const QString &fld, bool center = false); + + /* Normal date field. Centers field if center true*/ + void setDateFld(int index, utime_t fld, bool center = false); + + /* Right-aligned text field. */ + void setRightFld(int index, const QString &fld); + + /* Numeric field - sorted as numeric type */ + void setNumericFld(int index, const QString &fld); + void setNumericFld(int index, const QString &fld, const QVariant &sortVal); + + /* fld value interpreted as bytes and formatted with size suffixes */ + void setBytesFld(int index, const QString &fld); + + /* fld value interpreted as seconds and formatted with y,m,w,h suffixes */ + void setDurationFld(int index, const QString &fld); + + /* fld value interpreted as volume status. Colored accordingly */ + void setVolStatusFld(int index, const QString &fld, bool center = true); + + /* fld value interpreted as job status. Colored accordingly */ + void setJobStatusFld(int index, const QString &status, bool center = true); + + /* fld value interpreted as job type. */ + void setJobTypeFld(int index, const QString &fld, bool center = false); + + /* fld value interpreted as job level. */ + void setJobLevelFld(int index, const QString &fld, bool center = false); + + /* fld value interpreted as Online/Offline */ + void setInChanger(int index, const QString &InChanger); + + /* fld value interpreted as file or folder */ + void setFileType(int index, const QString &type); + + static void setBytesConversion(BYTES_CONVERSION b) { + cnvFlag = b; + } + static BYTES_CONVERSION getBytesConversion() { + return cnvFlag; + } + +protected: + /* only derived classes can create one of these */ + ItemFormatterBase(); + + virtual void setText(int index, const QString &fld) = 0; + virtual void setTextAlignment(int index, int align) = 0; + virtual void setBackground(int index, const QBrush &) = 0; + virtual void setPixmap(int index, const QPixmap &pix) = 0; + virtual void setPixmap(int index, const QPixmap &pix, const QString &tip); + + /* sets the *optional* value used for sorting */ + virtual void setSortValue(int index, const QVariant &value) = 0; + +private: + static BYTES_CONVERSION cnvFlag; +}; + +/* + * This class can be used instead of QTreeWidgetItem (it allocates one internally, + * to format data fields. + * All setXXXFld routines receive a column index and the unformatted string value. + */ +class TreeItemFormatter : public ItemFormatterBase +{ +public: + + TreeItemFormatter(QTreeWidgetItem &parent, int indent_level); + + /* access internal widget */ + QTreeWidgetItem *widget() { return wdg; } + const QTreeWidgetItem *widget() const { return wdg; } + +protected: + virtual void setText(int index, const QString &fld); + virtual void setTextAlignment(int index, int align); + virtual void setBackground(int index, const QBrush &); + virtual void setSortValue(int index, const QVariant &value); + virtual void setPixmap(int index, const QPixmap &pix); + +private: + QTreeWidgetItem *wdg; + int level; +}; + +/* + * This class can be used instead of QTableWidgetItem (it allocates one internally, + * to format data fields. + * All setXXXFld routines receive the column and the unformatted string value. + */ +class TableItemFormatter : public ItemFormatterBase +{ +private: + + /* specialized widget item - allows an optional data property for sorting */ + class BatSortingTableItem : public QTableWidgetItem + { + private: + static const int SORTDATA_ROLE = Qt::UserRole + 100; + public: + BatSortingTableItem(); + + /* uses the sort data if available, reverts to default behavior othervise */ + virtual bool operator< ( const QTableWidgetItem & o ) const; + + /* set the value used for sorting - MUST BE A NUMERIC TYPE */ + void setSortData(const QVariant &d); + }; + +public: + + TableItemFormatter(QTableWidget &parent, int row); + + /* access internal widget at column col*/ + QTableWidgetItem *widget(int col); + const QTableWidgetItem *widget(int col) const; + +protected: + virtual void setText(int index, const QString &fld); + virtual void setTextAlignment(int index, int align); + virtual void setBackground(int index, const QBrush &); + virtual void setSortValue(int index, const QVariant &value); + virtual void setPixmap(int index, const QPixmap &pix); + virtual void setPixmap(int index, const QPixmap &pix, const QString &tip); + +private: + QTableWidget *parent; + int row; + BatSortingTableItem *last; +}; + +#endif /* _FMTWIDGETITEM_H_ */ diff --git a/src/qt-console/win32/qmake.conf b/src/qt-console/win32/qmake.conf new file mode 100644 index 00000000..7a4c04be --- /dev/null +++ b/src/qt-console/win32/qmake.conf @@ -0,0 +1,93 @@ +# +# qmake configuration for win32-g++ +# +# Written for MinGW +# + +MAKEFILE_GENERATOR = MINGW +TEMPLATE = app +CONFIG += qt warn_on release link_prl copy_dir_files debug_and_release debug_and_release_target precompile_header cross-win32 +QT += core gui +DEFINES += UNICODE QT_LARGEFILE_SUPPORT +QMAKE_COMPILER_DEFINES += __GNUC__ WIN32 + +QMAKE_EXT_OBJ = .o +QMAKE_EXT_RES = _res.o + +QMAKE_LEX = flex +QMAKE_LEXFLAGS = +QMAKE_YACC = byacc +QMAKE_YACCFLAGS = -d +QMAKE_CFLAGS = -DHAVE_MINGW -DHAVE_WIN32 -DHAVE_MINGW_W64 +QMAKE_CFLAGS_DEPS = -M +QMAKE_CFLAGS_WARN_ON = -Wall +QMAKE_CFLAGS_WARN_OFF = -w +QMAKE_CFLAGS_RELEASE = -O2 +QMAKE_CFLAGS_DEBUG = -g -O2 +QMAKE_CFLAGS_YACC = -Wno-unused -Wno-parentheses + +QMAKE_CXXFLAGS = $$QMAKE_CFLAGS -DHAVE_MINGW -DHAVE_WIN32 -DHAVE_ZLIB_H -DHAVE_LIBZ -DHAVE_CRYPTO -DHAVE_OPENSSL -DHAVE_TLS -DHAVE_MINGW_W64 +QMAKE_CXXFLAGS_DEPS = $$QMAKE_CFLAGS_DEPS +QMAKE_CXXFLAGS_WARN_ON = $$QMAKE_CFLAGS_WARN_ON +QMAKE_CXXFLAGS_WARN_OFF = $$QMAKE_CFLAGS_WARN_OFF +QMAKE_CXXFLAGS_RELEASE = $$QMAKE_CFLAGS_RELEASE +QMAKE_CXXFLAGS_DEBUG = $$QMAKE_CFLAGS_DEBUG +QMAKE_CXXFLAGS_YACC = $$QMAKE_CFLAGS_YACC +QMAKE_CXXFLAGS_THREAD = $$QMAKE_CFLAGS_THREAD +QMAKE_CXXFLAGS_RTTI_ON = -frtti +QMAKE_CXXFLAGS_RTTI_OFF = -fno-rtti +QMAKE_CXXFLAGS_EXCEPTIONS_ON = -fexceptions -mthreads +QMAKE_CXXFLAGS_EXCEPTIONS_OFF = -fno-exceptions + +QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw32/include/pthreads $(DEPKGS)/depkgs-mingw32/include/ ../win32/compat +QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw32/include/qt +QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw32/lib/qt + +QMAKE_RUN_CC = $(CXX) -c $(CFLAGS) $(INCPATH) -o $obj $src +QMAKE_RUN_CC_IMP = $(CXX) -c $(CFLAGS) $(INCPATH) -o $@ $< +QMAKE_RUN_CXX = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $obj $src +QMAKE_RUN_CXX_IMP = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $@ $< + +QMAKE_LINK = i686-w64-mingw32-g++ +QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m32 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc + +QMAKE_LFLAGS_EXCEPTIONS_ON = -mthreads -Wl +QMAKE_LFLAGS_EXCEPTIONS_OFF = +QMAKE_LFLAGS_RELEASE = -Wl,-s +QMAKE_LFLAGS_DEBUG = +QMAKE_LFLAGS_CONSOLE = -Wl,-subsystem,console +QMAKE_LFLAGS_WINDOWS = -Wl,-subsystem,windows +QMAKE_LFLAGS_DLL = -shared +QMAKE_LINK_OBJECT_MAX = 10 +QMAKE_LINK_OBJECT_SCRIPT= object_script + + +QMAKE_LIBS = -lwsock32 -lstdc++ +QMAKE_LIBS_CORE = -lkernel32 -luser32 -lshell32 -luuid -lole32 -ladvapi32 -lws2_32 +QMAKE_LIBS_GUI = -lgdi32 -lcomdlg32 -loleaut32 -limm32 -lwinmm -lwinspool -lws2_32 -lole32 -luuid -luser32 -ladvapi32 +QMAKE_LIBS_NETWORK = -lws2_32 +QMAKE_LIBS_OPENGL = -lopengl32 -lglu32 -lgdi32 -luser32 +QMAKE_LIBS_COMPAT = -ladvapi32 -lshell32 -lcomdlg32 -luser32 -lgdi32 -lws2_32 +QMAKE_LIBS_QT_ENTRY = -lmingw32 -lqtmain + +MINGW_IN_SHELL = 1 +QMAKE_DIR_SEP = / +QMAKE_COPY = cp +QMAKE_COPY_DIR = cp -r +QMAKE_MOVE = mv +QMAKE_DEL_FILE = rm -f +QMAKE_MKDIR = mkdir -p +QMAKE_DEL_DIR = rm -rf +QMAKE_RCC = rcc +QMAKE_CHK_DIR_EXISTS = test -d + +QMAKE_MOC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}moc +QMAKE_UIC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}uic +QMAKE_IDC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}idc + +QMAKE_IDL = midl +QMAKE_ZIP = zip -r -9 + +QMAKE_STRIP = i686-w64-mingw32-strip +QMAKE_STRIPFLAGS_LIB += --strip-unneeded +load(qt_config) diff --git a/src/qt-console/win32/qplatformdefs.h b/src/qt-console/win32/qplatformdefs.h new file mode 100644 index 00000000..4802c2b6 --- /dev/null +++ b/src/qt-console/win32/qplatformdefs.h @@ -0,0 +1,161 @@ +/**************************************************************************** +** +** Copyright (C) 1992-2007 Trolltech ASA. All rights reserved. +** +** This file is part of the qmake spec of the Qt Toolkit. +** +** This file may be used under the terms of the GNU General Public +** License version 2.0 as published by the Free Software Foundation +** and appearing in the file LICENSE.GPL included in the packaging of +** this file. Please review the following information to ensure GNU +** General Public Licensing requirements will be met: +** http://trolltech.com/products/qt/licenses/licensing/opensource/ +** +** If you are unsure which license is appropriate for your use, please +** review the following information: +** http://trolltech.com/products/qt/licenses/licensing/licensingoverview +** or contact the sales department at sales@trolltech.com. +** +** In addition, as a special exception, Trolltech gives you certain +** additional rights. These rights are described in the Trolltech GPL +** Exception version 1.0, which can be found at +** http://www.trolltech.com/products/qt/gplexception/ and in the file +** GPL_EXCEPTION.txt in this package. +** +** In addition, as a special exception, Trolltech, as the sole copyright +** holder for Qt Designer, grants users of the Qt/Eclipse Integration +** plug-in the right for the Qt/Eclipse Integration to link to +** functionality provided by Qt Designer and its related libraries. +** +** Trolltech reserves all rights not expressly granted herein. +** +** Trolltech ASA (c) 2007 +** +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. +** +****************************************************************************/ + +#ifndef QPLATFORMDEFS_H +#define QPLATFORMDEFS_H + +#ifdef UNICODE +#ifndef _UNICODE +#define _UNICODE +#endif +#endif + +// Get Qt defines/settings + +#include "qglobal.h" + +#include "winhdrs.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(_WIN32_WINNT) || (_WIN32_WINNT-0 < 0x0500) +typedef enum { + NameUnknown = 0, + NameFullyQualifiedDN = 1, + NameSamCompatible = 2, + NameDisplay = 3, + NameUniqueId = 6, + NameCanonical = 7, + NameUserPrincipal = 8, + NameCanonicalEx = 9, + NameServicePrincipal = 10, + NameDnsDomain = 12 +} EXTENDED_NAME_FORMAT, *PEXTENDED_NAME_FORMAT; +#endif + +#define Q_FS_FAT +#ifdef QT_LARGEFILE_SUPPORT +#define QT_STATBUF struct _stati64 // non-ANSI defs +#define QT_STATBUF4TSTAT struct _stati64 // non-ANSI defs +#define QT_STAT ::_stati64 +#define QT_FSTAT ::_fstati64 +#else +#define QT_STATBUF struct _stat // non-ANSI defs +#define QT_STATBUF4TSTAT struct _stat // non-ANSI defs +#define QT_STAT ::_stat +#define QT_FSTAT ::_fstat +#endif +#define QT_STAT_REG _S_IFREG +#define QT_STAT_DIR _S_IFDIR +#define QT_STAT_MASK _S_IFMT +#if defined(_S_IFLNK) +# define QT_STAT_LNK _S_IFLNK +#endif +#define QT_FILENO _fileno +#define QT_OPEN ::_open +#define QT_CLOSE ::_close +#ifdef QT_LARGEFILE_SUPPORT +#define QT_LSEEK ::_lseeki64 +#ifndef UNICODE +#define QT_TSTAT ::_stati64 +#else +#define QT_TSTAT ::_wstati64 +#endif +#else +#define QT_LSEEK ::_lseek +#ifndef UNICODE +#define QT_TSTAT ::_stat +#else +#define QT_TSTAT ::_wstat +#endif +#endif +#define QT_READ ::_read +#define QT_WRITE ::_write +#define QT_ACCESS ::_access +#define QT_GETCWD ::_getcwd +#define QT_CHDIR ::_chdir +#define QT_MKDIR ::_mkdir +#define QT_RMDIR ::_rmdir +#define QT_OPEN_LARGEFILE 0 +#define QT_OPEN_RDONLY _O_RDONLY +#define QT_OPEN_WRONLY _O_WRONLY +#define QT_OPEN_RDWR _O_RDWR +#define QT_OPEN_CREAT _O_CREAT +#define QT_OPEN_TRUNC _O_TRUNC +#define QT_OPEN_APPEND _O_APPEND +#if defined(O_TEXT) +# define QT_OPEN_TEXT _O_TEXT +# define QT_OPEN_BINARY _O_BINARY +#endif + +#define QT_FOPEN ::fopen +#ifdef QT_LARGEFILE_SUPPORT +#define QT_FSEEK ::fseeko64 +#define QT_FTELL ::ftello64 +#else +#define QT_FSEEK ::fseek +#define QT_FTELL ::ftell +#endif +#define QT_FGETPOS ::fgetpos +#define QT_FSETPOS ::fsetpos +#define QT_FPOS_T fpos_t +#ifdef QT_LARGEFILE_SUPPORT +#define QT_OFF_T off64_t +#else +#define QT_OFF_T long +#endif + +#define QT_SIGNAL_ARGS int + +#define QT_VSNPRINTF ::_vsnprintf +#define QT_SNPRINTF ::_snprintf + +# define F_OK 0 +# define X_OK 1 +# define W_OK 2 +# define R_OK 4 + + +#endif // QPLATFORMDEFS_H diff --git a/src/stored/Makefile.in b/src/stored/Makefile.in new file mode 100644 index 00000000..a0d3d7e2 --- /dev/null +++ b/src/stored/Makefile.in @@ -0,0 +1,325 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +@MCOMMON@ + +srcdir = . +VPATH = . +.PATH: . + +sd_group=@sd_group@ + +# one up +basedir = .. +# top dir +topdir = ../.. +# this dir relative to top dir +thisdir = src/stored + +DEBUG=@DEBUG@ + +GETTEXT_LIBS = @LIBINTL@ + +S3_LIBS=@S3_LIBS@ +S3_INC=@S3_INC@ + +DB_LIBS=@DB_LIBS@ + +first_rule: all +dummy: + +# Bacula SD core objects needed by all executables +SDCORE_OBJS = \ + stored_conf.o global.o + +# bacula-sd +SDOBJS = \ + stored.o append.o authenticate.o dircmd.o fd_cmds.o job.o \ + hello.o status.o vbackup.o \ + $(SDCORE_OBJS) + +JSONOBJS = bsdjson.o stored_conf.o + +# btape +TAPEOBJS = btape.o $(SDCORE_OBJS) + +# bls +BLSOBJS = bls.o $(SDCORE_OBJS) + +# bextract +BEXTOBJS = bextract.o $(SDCORE_OBJS) + +# bscan +SCNOBJS = bscan.o $(SDCORE_OBJS) + +# bcopy +COPYOBJS = bcopy.o $(SDCORE_OBJS) + +ALIGNED_SRCS = \ + aligned_dev.c aligned_read.c aligned_write.c + +ALIGNED_OBJS = $(ALIGNED_SRCS:.c=.o) +ALIGNED_LOBJS = $(ALIGNED_SRCS:.c=.lo) + +CLOUD_SRCS = \ + cloud_dev.c cloud_parts.c cloud_transfer_mgr.c s3_driver.c file_driver.c + +CLOUD_OBJS = $(CLOUD_SRCS:.c=.o) +CLOUD_LOBJS = $(CLOUD_SRCS:.c=.lo) + +# cloud_test +CLOUDTESTOBJS = cloud_test.o $(SDCORE_OBJS) + +# libbacsd objects +LIBBACSD_SRCS = \ + acquire.c ansi_label.c askdir.c autochanger.c \ + block.c block_util.c butil.c dev.c device.c ebcdic.c \ + init_dev.c label.c lock.c match_bsr.c mount.c \ + null_dev.c os.c parse_bsr.c read.c read_records.c \ + record_read.c record_util.c record_write.c reserve.c \ + scan.c sd_plugins.c spool.c tape_alert.c tape_worm.c vol_mgr.c wait.c \ + fifo_dev.c file_dev.c tape_dev.c vtape_dev.c + +LIBBACSD_OBJS = $(LIBBACSD_SRCS:.c=.o) +LIBBACSD_LOBJS = $(LIBBACSD_SRCS:.c=.lo) +LIBBACSD_LT_RELEASE = @LIBBAC_LT_RELEASE@ + + +# these are the objects that are changed by the .configure process +EXTRAOBJS = @OBJLIST@ + +CAP_LIBS = @CAP_LIBS@ +ZLIBS=@ZLIBS@ +LZO_LIBS= @LZO_LIBS@ +LZO_INC= @LZO_INC@ + +SD_LIBS = -lbacsd -lbaccfg -lbac + + +.SUFFIXES: .c .o .lo +.PHONY: +.DONTCARE: + +# inference rules +.c.o: + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +.c.lo: + @echo "LT Compiling $<" + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(TOKYOCABINET_INC) $(S3_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + + +#------------------------------------------------------------------------- + +all: Makefile libbacsd.la drivers bacula-sd @STATIC_SD@ \ + bls bextract bscan bcopy \ + bsdjson btape + @echo "===== Make of stored is good ====" + @echo " " + +bacula-sd: Makefile libbacsd.la $(SDOBJS) \ + ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ + ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + @echo "Linking $@ ..." + $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L. -L../lib -o $@ $(SDOBJS) $(ZLIBS) \ + $(SD_LIBS) -lm $(DLIB) $(LIBS) $(WRAPLIBS) \ + $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) + +libbacsd.a: $(LIBBACSD_OBJS) + @echo "Making $@ ..." + $(AR) rc $@ $(LIBBACSD_OBJS) + $(RANLIB) $@ + +libbacsd.la: Makefile $(LIBBACSD_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ \ + $(TOKYOCABINET_LIBS) $(LIBBACSD_LOBJS) \ + -export-dynamic -rpath $(libdir) -release $(LIBBACSD_LT_RELEASE) + +# +# Loadable driver +# +drivers: bacula-sd-cloud-driver.la bacula-sd-aligned-driver.la + +bacula-sd-cloud-driver.la: Makefile $(CLOUD_LOBJS) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_LOBJS) -o $@ $(S3_LIBS) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) + +bacula-sd-aligned-driver.la: Makefile $(ALIGNED_LOBJS) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(ALIGNED_LOBJS) -o $@ -rpath $(plugindir) \ + -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) + + +bsdjson: Makefile $(JSONOBJS) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + @echo "Linking $@ ..." + $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L../lib -o $@ $(JSONOBJS) $(ZLIBS) \ + -lbaccfg -lbac -lm $(DLIB) $(LIBS) $(WRAPLIBS) \ + $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) + +static-bacula-sd: Makefile $(SDOBJS) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -static -L../lib -o $@ $(SDOBJS) $(ZLIBS) \ + $(SD_LIBS) -lm $(DLIB) $(LIBS) $(WRAPLIBS) \ + $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) + strip $@ + +btape.o: btape.c + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ + -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +btape: Makefile $(TAPEOBJS) libbacsd.la drivers ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -o $@ $(TAPEOBJS) \ + $(SD_LIBS) $(DLIB) -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + + +cloud_test.o: cloud_test.c + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ + -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +cloud_test: Makefile cloud_test.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(BLSOBJS) libbacsd.la drivers + $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(CLOUDTESTOBJS) $(DLIB) \ + $(SD_LIBS) -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bls.o: bls.c + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ + -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +bls: Makefile $(BLSOBJS) libbacsd.la drivers ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + @echo "Compiling $<" + $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(BLSOBJS) $(DLIB) \ + $(SD_LIBS) -lbacfind -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bextract.o: bextract.c + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ + -I$(basedir) $(DINCLUDE) $(CFLAGS) $(LZO_INC) $< + +bextract: Makefile $(BEXTOBJS) libbacsd.la drivers ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + @echo "Compiling $<" + $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(BEXTOBJS) $(DLIB) $(ZLIBS) $(LZO_LIBS) \ + $(SD_LIBS) -lbacfind -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bscan.o: bscan.c + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ + -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +bscan: Makefile $(SCNOBJS) libbacsd.la drivers ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ + ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../cats -L../findlib -o $@ $(SCNOBJS) \ + $(SD_LIBS) -lbacsql -lbaccats $(DB_LIBS) $(ZLIBS) -lbacfind -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bcopy.o: bcopy.c + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ + -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +bcopy: Makefile $(COPYOBJS) libbacsd.la drivers ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -o $@ $(COPYOBJS) \ + $(SD_LIBS) -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +cloud_parts_test: Makefile cloud_parts.c + $(RMF) cloud_parts.o + $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) cloud_parts.c + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ cloud_parts.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) + rm -f cloud_parts.o + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) cloud_parts.c + + +Makefile: $(srcdir)/Makefile.in $(topdir)/config.status + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + +libtool-install: all + $(MKDIR) $(DESTDIR)$(libdir) + $(RMF) $(DESTDIR)$(libdir)/libbacsd-*.so $(DESTDIR)$(libdir)/libbacsd.la + $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbacsd.la $(DESTDIR)$(libdir) + +libtool-uninstall: + $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbacsd.la + +install: all @LIBTOOL_INSTALL_TARGET@ + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd $(DESTDIR)$(sbindir)/bacula-sd + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bsdjson $(DESTDIR)$(sbindir)/bsdjson + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bls $(DESTDIR)$(sbindir)/bls + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bextract $(DESTDIR)$(sbindir)/bextract + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bcopy $(DESTDIR)$(sbindir)/bcopy + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bscan $(DESTDIR)$(sbindir)/bscan + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) btape $(DESTDIR)$(sbindir)/btape + @if test -f static-bacula-sd; then \ + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) static-bacula-sd $(DESTDIR)$(sbindir)/static-bacula-sd; \ + fi + @srcconf=bacula-sd.conf; \ + if test -f ${DESTDIR}${sysconfdir}/$$srcconf; then \ + destconf=$$srcconf.new; \ + echo " ==> Found existing $$srcconf, installing new conf file as $$destconf"; \ + else \ + destconf=$$srcconf; \ + fi; \ + echo "${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf"; \ + ${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf + @if test "x${sd_group}" != "x" -a "x${DESTDIR}" = "x" ; then \ + chgrp -f ${sd_group} ${DESTDIR}${sysconfdir}/$$destconf; \ + fi + +install-cloud: bacula-sd-cloud-driver.la + $(MKDIR) $(DESTDIR)$(plugindir) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd-cloud-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) + $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-driver.la + +install-aligned: bacula-sd-aligned-driver.la + $(MKDIR) $(DESTDIR)$(plugindir) + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd-aligned-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) + $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-aligned-driver.la + +uninstall: + (cd $(DESTDIR)$(sbindir); $(RMF) bacula-sd bsdjson) + (cd $(DESTDIR)$(sbindir); $(RMF) bls) + (cd $(DESTDIR)$(sbindir); $(RMF) bextract) + (cd $(DESTDIR)$(sbindir); $(RMF) bcopy) + (cd $(DESTDIR)$(sbindir); $(RMF) bscan) + (cd $(DESTDIR)$(sbindir); $(RMF) btape) + (cd $(DESTDIR)$(sysconfdir); $(RMF) bacula-sd.conf bacula-sd.conf.new) + +libtool-clean: + @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) + @$(RMF) -r .libs _libs + @$(RMF) *.la + +clean: libtool-clean + @$(RMF) bacula-sd stored bls bextract bpool btape shmfree core core.* a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 + @$(RMF) bscan bsdjson bcopy static-bacula-sd + +realclean: clean + @$(RMF) tags bacula-sd.conf + +distclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +devclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +# Semi-automatic generation of dependencies: +# Use cc -M because X11 `makedepend' doesn't work on all systems +# and it also includes system headers. +# `semi'-automatic since dependencies are generated at distribution time. + +depend: + @$(MV) Makefile Makefile.bak + @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile + @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile + @$(CXX) -S -M $(CPPFLAGS) $(XINC) $(S3_INC) $(TOKYOCABINET_INC) -I$(srcdir) -I$(basedir) *.c >> Makefile + @if test -f Makefile ; then \ + $(RMF) Makefile.bak; \ + else \ + $(MV) Makefile.bak Makefile; \ + echo " ======= Something went wrong with make depend. ======="; \ + fi + +# ----------------------------------------------------------------------- +# DO NOT DELETE: nice dependency list follows diff --git a/src/stored/acquire.c b/src/stored/acquire.c new file mode 100644 index 00000000..d81bf9c4 --- /dev/null +++ b/src/stored/acquire.c @@ -0,0 +1,799 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Routines to acquire and release a device for read/write + * + * Written by Kern Sibbald, August MMII + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +static int const rdbglvl = 100; + +/* Forward referenced functions */ +static void set_dcr_from_vol(DCR *dcr, VOL_LIST *vol); + +/********************************************************************* + * Acquire device for reading. + * The drive should have previously been reserved by calling + * reserve_device_for_read(). We read the Volume label from the block and + * leave the block pointers just after the label. + * + * Returns: false if failed for any reason + * true if successful + */ +bool acquire_device_for_read(DCR *dcr) +{ + DEVICE *dev; + JCR *jcr = dcr->jcr; + bool ok = false; + bool tape_previously_mounted; + VOL_LIST *vol; + bool try_autochanger = true; + int i; + int vol_label_status; + int retry = 0; + + Enter(rdbglvl); + dev = dcr->dev; + ASSERT2(!dev->adata, "Called with adata dev. Wrong!"); + dev->Lock_read_acquire(); + Dmsg2(rdbglvl, "dcr=%p dev=%p\n", dcr, dcr->dev); + Dmsg2(rdbglvl, "MediaType dcr=%s dev=%s\n", dcr->media_type, dev->device->media_type); + dev->dblock(BST_DOING_ACQUIRE); + + if (dev->num_writers > 0) { + Jmsg2(jcr, M_FATAL, 0, _("Acquire read: num_writers=%d not zero. Job %d canceled.\n"), + dev->num_writers, jcr->JobId); + goto get_out; + } + + /* Find next Volume, if any */ + vol = jcr->VolList; + if (!vol) { + char ed1[50]; + Jmsg(jcr, M_FATAL, 0, _("No volumes specified for reading. Job %s canceled.\n"), + edit_int64(jcr->JobId, ed1)); + goto get_out; + } + jcr->CurReadVolume++; + for (i=1; iCurReadVolume; i++) { + vol = vol->next; + } + if (!vol) { + Jmsg(jcr, M_FATAL, 0, _("Logic error: no next volume to read. Numvol=%d Curvol=%d\n"), + jcr->NumReadVolumes, jcr->CurReadVolume); + goto get_out; /* should not happen */ + } + set_dcr_from_vol(dcr, vol); + + if (generate_plugin_event(jcr, bsdEventDeviceOpen, dcr) != bRC_OK) { + Jmsg(jcr, M_FATAL, 0, _("generate_plugin_event(bsdEventDeviceOpen) Failed\n")); + goto get_out; + } + + Dmsg2(rdbglvl, "Want Vol=%s Slot=%d\n", vol->VolumeName, vol->Slot); + + /* + * If the MediaType requested for this volume is not the + * same as the current drive, we attempt to find the same + * device that was used to write the orginal volume. If + * found, we switch to using that device. + * + * N.B. A lot of routines rely on the dcr pointer not changing + * read_records.c even has multiple dcrs cached, so we take care + * here to release all important parts of the dcr and re-acquire + * them such as the block pointer (size may change), but we do + * not release the dcr. + */ + Dmsg2(rdbglvl, "MediaType dcr=%s dev=%s\n", dcr->media_type, dev->device->media_type); + if (dcr->media_type[0] && strcmp(dcr->media_type, dev->device->media_type) != 0) { + RCTX rctx; + DIRSTORE *store; + int stat; + + Jmsg4(jcr, M_INFO, 0, _("Changing read device. Want Media Type=\"%s\" have=\"%s\"\n" + " %s device=%s\n"), + dcr->media_type, dev->device->media_type, dev->print_type(), + dev->print_name()); + Dmsg4(rdbglvl, "Changing read device. Want Media Type=\"%s\" have=\"%s\"\n" + " %s device=%s\n", + dcr->media_type, dev->device->media_type, + dev->print_type(), dev->print_name()); + + generate_plugin_event(jcr, bsdEventDeviceClose, dcr); + + dev->dunblock(DEV_UNLOCKED); + + lock_reservations(); + memset(&rctx, 0, sizeof(RCTX)); + rctx.jcr = jcr; + jcr->read_dcr = dcr; + jcr->reserve_msgs = New(alist(10, not_owned_by_alist)); + rctx.any_drive = true; + rctx.device_name = vol->device; + store = new DIRSTORE; + memset(store, 0, sizeof(DIRSTORE)); + store->name[0] = 0; /* No storage name */ + bstrncpy(store->media_type, vol->MediaType, sizeof(store->media_type)); + bstrncpy(store->pool_name, dcr->pool_name, sizeof(store->pool_name)); + bstrncpy(store->pool_type, dcr->pool_type, sizeof(store->pool_type)); + store->append = false; + rctx.store = store; + clean_device(dcr); /* clean up the dcr */ + + /* + * Search for a new device + */ + stat = search_res_for_device(rctx); + release_reserve_messages(jcr); /* release queued messages */ + unlock_reservations(); + + if (stat == 1) { /* found new device to use */ + /* + * Switching devices, so acquire lock on new device, + * then release the old one. + */ + dcr->dev->Lock_read_acquire(); /* lock new one */ + dev->Unlock_read_acquire(); /* release old one */ + dev = dcr->dev; /* get new device pointer */ + dev->dblock(BST_DOING_ACQUIRE); + + dcr->VolumeName[0] = 0; + Jmsg(jcr, M_INFO, 0, _("Media Type change. New read %s device %s chosen.\n"), + dev->print_type(), dev->print_name()); + Dmsg2(50, "Media Type change. New read %s device %s chosen.\n", + dev->print_type(), dev->print_name()); + if (generate_plugin_event(jcr, bsdEventDeviceOpen, dcr) != bRC_OK) { + Jmsg(jcr, M_FATAL, 0, _("generate_plugin_event(bsdEventDeviceOpen) Failed\n")); + goto get_out; + } + bstrncpy(dcr->VolumeName, vol->VolumeName, sizeof(dcr->VolumeName)); + dcr->setVolCatName(vol->VolumeName); + bstrncpy(dcr->media_type, vol->MediaType, sizeof(dcr->media_type)); + dcr->VolCatInfo.Slot = vol->Slot; + dcr->VolCatInfo.InChanger = vol->Slot > 0; + bstrncpy(dcr->pool_name, store->pool_name, sizeof(dcr->pool_name)); + bstrncpy(dcr->pool_type, store->pool_type, sizeof(dcr->pool_type)); + } else { + /* error */ + Jmsg1(jcr, M_FATAL, 0, _("No suitable device found to read Volume \"%s\"\n"), + vol->VolumeName); + Dmsg1(rdbglvl, "No suitable device found to read Volume \"%s\"\n", vol->VolumeName); + goto get_out; + } + } + Dmsg2(rdbglvl, "MediaType dcr=%s dev=%s\n", dcr->media_type, dev->device->media_type); + + dev->clear_unload(); + + if (dev->vol && dev->vol->is_swapping()) { + dev->vol->set_slot(vol->Slot); + Dmsg3(rdbglvl, "swapping: slot=%d Vol=%s dev=%s\n", dev->vol->get_slot(), + dev->vol->vol_name, dev->print_name()); + } + + init_device_wait_timers(dcr); + + tape_previously_mounted = dev->can_read() || dev->can_append() || + dev->is_labeled(); +// tape_initially_mounted = tape_previously_mounted; + + /* Volume info is always needed because of VolType */ + Dmsg1(rdbglvl, "dir_get_volume_info vol=%s\n", dcr->VolumeName); + if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_READ)) { + Dmsg2(rdbglvl, "dir_get_vol_info failed for vol=%s: %s\n", + dcr->VolumeName, jcr->errmsg); + Jmsg1(jcr, M_WARNING, 0, "Read acquire: %s", jcr->errmsg); + } + dev->set_load(); /* set to load volume */ + + for ( ;; ) { + /* If not polling limit retries */ + if (!dev->poll && retry++ > 10) { + break; + } + dev->clear_labeled(); /* force reread of label */ + if (job_canceled(jcr)) { + char ed1[50]; + Mmsg1(dev->errmsg, _("Job %s canceled.\n"), edit_int64(jcr->JobId, ed1)); + Jmsg(jcr, M_INFO, 0, dev->errmsg); + goto get_out; /* error return */ + } + + dcr->do_unload(); + dcr->do_swapping(SD_READ); + dcr->do_load(SD_READ); + set_dcr_from_vol(dcr, vol); /* refresh dcr with desired volume info */ + + /* + * This code ensures that the device is ready for + * reading. If it is a file, it opens it. + * If it is a tape, it checks the volume name + */ + Dmsg1(rdbglvl, "open vol=%s\n", dcr->VolumeName); + if (!dev->open_device(dcr, OPEN_READ_ONLY)) { + if (!dev->poll) { + Jmsg4(jcr, M_WARNING, 0, _("Read open %s device %s Volume \"%s\" failed: ERR=%s\n"), + dev->print_type(), dev->print_name(), dcr->VolumeName, dev->bstrerror()); + } + goto default_path; + } + Dmsg1(rdbglvl, "opened dev %s OK\n", dev->print_name()); + + /* Read Volume Label */ + Dmsg0(rdbglvl, "calling read-vol-label\n"); + vol_label_status = dev->read_dev_volume_label(dcr); + switch (vol_label_status) { + case VOL_OK: + Dmsg1(rdbglvl, "Got correct volume. VOL_OK: %s\n", dcr->VolCatInfo.VolCatName); + ok = true; + dev->VolCatInfo = dcr->VolCatInfo; /* structure assignment */ + break; /* got it */ + case VOL_IO_ERROR: + Dmsg0(rdbglvl, "IO Error\n"); + /* + * Send error message generated by dev->read_dev_volume_label() + * only we really had a tape mounted. This supresses superfluous + * error messages when nothing is mounted. + */ + if (tape_previously_mounted) { + Jmsg(jcr, M_WARNING, 0, "Read acquire: %s", jcr->errmsg); + } + goto default_path; + case VOL_TYPE_ERROR: + Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); + goto get_out; + case VOL_NAME_ERROR: + Dmsg3(rdbglvl, "Vol name=%s want=%s drv=%s.\n", dev->VolHdr.VolumeName, + dcr->VolumeName, dev->print_name()); + if (dev->is_volume_to_unload()) { + goto default_path; + } + dev->set_unload(); /* force unload of unwanted tape */ + if (!unload_autochanger(dcr, -1)) { + /* at least free the device so we can re-open with correct volume */ + dev->close(dcr); + free_volume(dev); + } + dev->set_load(); + /* Fall through */ + default: + Jmsg1(jcr, M_WARNING, 0, "Read acquire: %s", jcr->errmsg); +default_path: + Dmsg0(rdbglvl, "default path\n"); + tape_previously_mounted = true; + + /* + * If the device requires mount, close it, so the device can be ejected. + */ + if (dev->requires_mount()) { + dev->close(dcr); + free_volume(dev); + } + + /* Call autochanger only once unless ask_sysop called */ + if (try_autochanger) { + int stat; + Dmsg2(rdbglvl, "calling autoload Vol=%s Slot=%d\n", + dcr->VolumeName, dcr->VolCatInfo.Slot); + stat = autoload_device(dcr, SD_READ, NULL); + if (stat > 0) { + try_autochanger = false; + continue; /* try reading volume mounted */ + } + } + + /* Mount a specific volume and no other */ + Dmsg0(rdbglvl, "calling dir_ask_sysop\n"); + if (!dir_ask_sysop_to_mount_volume(dcr, SD_READ)) { + goto get_out; /* error return */ + } + + /* Volume info is always needed because of VolType */ + Dmsg1(150, "dir_get_volume_info vol=%s\n", dcr->VolumeName); + if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_READ)) { + Dmsg2(150, "dir_get_vol_info failed for vol=%s: %s\n", + dcr->VolumeName, jcr->errmsg); + Jmsg1(jcr, M_WARNING, 0, "Read acquire: %s", jcr->errmsg); + } + dev->set_load(); /* set to load volume */ + + try_autochanger = true; /* permit trying the autochanger again */ + + continue; /* try reading again */ + } /* end switch */ + break; + } /* end for loop */ + + if (!ok) { + Jmsg2(jcr, M_FATAL, 0, _("Too many errors trying to mount %s device %s for reading.\n"), + dev->print_type(), dev->print_name()); + goto get_out; + } + + dev->clear_append(); + dev->set_read(); + jcr->sendJobStatus(JS_Running); + Jmsg(jcr, M_INFO, 0, _("Ready to read from volume \"%s\" on %s device %s.\n"), + dcr->VolumeName, dev->print_type(), dev->print_name()); + +get_out: + dev->Lock(); + /* If failed and not writing plugin close device */ + if (!ok && dev->num_writers == 0 && dev->num_reserved() == 0) { + generate_plugin_event(jcr, bsdEventDeviceClose, dcr); + } + /* + * Normally we are blocked, but in at least one error case above + * we are not blocked because we unsuccessfully tried changing + * devices. + */ + if (dev->is_blocked()) { + dev->dunblock(DEV_LOCKED); + } else { + dev->Unlock(); /* dunblock() unlock the device too */ + } + Dmsg2(rdbglvl, "dcr=%p dev=%p\n", dcr, dcr->dev); + Dmsg2(rdbglvl, "MediaType dcr=%s dev=%s\n", dcr->media_type, dev->device->media_type); + dev->Unlock_read_acquire(); + Leave(rdbglvl); + return ok; +} + +/* + * Acquire device for writing. We permit multiple writers. + * If this is the first one, we read the label. + * + * Returns: NULL if failed for any reason + * dcr if successful. + * Note, normally reserve_device_for_append() is called + * before this routine. + */ +DCR *acquire_device_for_append(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + bool ok = false; + bool have_vol = false; + + Enter(200); + dcr->set_ameta(); + init_device_wait_timers(dcr); + + dev->Lock_acquire(); /* only one job at a time */ + dev->Lock(); + Dmsg1(100, "acquire_append device is %s\n", dev->print_type()); + /* + * With the reservation system, this should not happen + */ + if (dev->can_read()) { + Mmsg2(jcr->errmsg, "Want to append but %s device %s is busy reading.\n", + dev->print_type(), dev->print_name()); + Jmsg(jcr, M_FATAL, 0, jcr->errmsg); + Dmsg0(50, jcr->errmsg); + goto get_out; + } + + dev->clear_unload(); + + /* + * have_vol defines whether or not mount_next_write_volume should + * ask the Director again about what Volume to use. + */ + if (dev->can_append() && dcr->is_suitable_volume_mounted() && + strcmp(dcr->VolCatInfo.VolCatStatus, "Recycle") != 0) { + Dmsg0(190, "device already in append.\n"); + /* + * At this point, the correct tape is already mounted, so + * we do not need to do mount_next_write_volume(), unless + * we need to recycle the tape. + */ + if (dev->num_writers == 0) { + dev->VolCatInfo = dcr->VolCatInfo; /* structure assignment */ + } + have_vol = dcr->is_tape_position_ok(); + } + + if (!have_vol) { + dev->rLock(true); + block_device(dev, BST_DOING_ACQUIRE); + dev->Unlock(); + Dmsg1(190, "jid=%u Do mount_next_write_vol\n", (uint32_t)jcr->JobId); + if (!dcr->mount_next_write_volume()) { + if (!job_canceled(jcr)) { + /* Reduce "noise" -- don't print if job canceled */ + Mmsg2(jcr->errmsg, _("Could not ready %s device %s for append.\n"), + dev->print_type(), dev->print_name()); + Jmsg(jcr, M_FATAL, 0, jcr->errmsg); + Dmsg0(50, jcr->errmsg); + } + dev->Lock(); + unblock_device(dev); + goto get_out; + } + Dmsg2(190, "Output pos=%u:%u\n", dcr->dev->file, dcr->dev->block_num); + dev->Lock(); + unblock_device(dev); + } + + if (generate_plugin_event(jcr, bsdEventDeviceOpen, dcr) != bRC_OK) { + Mmsg0(jcr->errmsg, _("generate_plugin_event(bsdEventDeviceOpen) Failed\n")); + Jmsg(jcr, M_FATAL, 0, jcr->errmsg); + Dmsg0(50, jcr->errmsg); + goto get_out; + } + + dev->num_writers++; /* we are now a writer */ + if (jcr->NumWriteVolumes == 0) { + jcr->NumWriteVolumes = 1; + } + dev->VolCatInfo.VolCatJobs++; /* increment number of jobs on vol */ + + ok = dir_update_volume_info(dcr, false, false); /* send Volume info to Director */ + if (!ok) { /* We cannot use this volume/device */ + Jmsg(jcr, M_WARNING, 0, _("Warning cannot use Volume \"%s\", update_volume_info failed.\n"), + dev->VolCatInfo.VolCatName); + dev->num_writers--; /* on fail update_volume do not update num_writers */ + /* TODO: See if we revert the NumWriteVolumes as well */ + } + + Dmsg4(100, "=== nwriters=%d nres=%d vcatjob=%d dev=%s\n", + dev->num_writers, dev->num_reserved(), dev->VolCatInfo.VolCatJobs, + dev->print_name()); + +get_out: + /* Don't plugin close here, we might have multiple writers */ + dcr->clear_reserved(); + dev->Unlock(); + dev->Unlock_acquire(); + Leave(200); + return ok ? dcr : NULL; +} + +/* + * This job is done, so release the device. From a Unix standpoint, + * the device remains open. + * + * Note, if we were spooling, we may enter with the device blocked. + * We unblock at the end, only if it was us who blocked the + * device. + * + */ +bool release_device(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + DEVICE *dev = dcr->dev; + bool ok = true; + char tbuf[100]; + bsteal_lock_t holder; + + dev->Lock(); + if (!obtain_device_block(dev, + &holder, + 0, /* infinite wait */ + BST_RELEASING)) { + ASSERT2(0, "unable to obtain device block"); + } + + lock_volumes(); + Dmsg2(100, "release_device device %s is %s\n", dev->print_name(), dev->is_tape()?"tape":"disk"); + + /* if device is reserved, job never started, so release the reserve here */ + dcr->clear_reserved(); + + if (dev->can_read()) { + VOLUME_CAT_INFO *vol = &dev->VolCatInfo; + generate_plugin_event(jcr, bsdEventDeviceClose, dcr); + dev->clear_read(); /* clear read bit */ + Dmsg2(150, "dir_update_vol_info. label=%d Vol=%s\n", + dev->is_labeled(), vol->VolCatName); + if (dev->is_labeled() && vol->VolCatName[0] != 0) { + dir_update_volume_info(dcr, false, false); /* send Volume info to Director */ + remove_read_volume(jcr, dcr->VolumeName); + volume_unused(dcr); + } + } else if (dev->num_writers > 0) { + /* + * Note if WEOT is set, we are at the end of the tape + * and may not be positioned correctly, so the + * job_media_record and update_vol_info have already been + * done, which means we skip them here. + */ + dev->num_writers--; + Dmsg1(100, "There are %d writers in release_device\n", dev->num_writers); + if (dev->is_labeled()) { + if (!dev->at_weot()) { + Dmsg2(200, "dir_create_jobmedia. Release vol=%s dev=%s\n", + dev->getVolCatName(), dev->print_name()); + } + if (!dev->at_weot() && !dir_create_jobmedia_record(dcr)) { + Jmsg2(jcr, M_FATAL, 0, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), + dcr->getVolCatName(), jcr->Job); + } + /* If no more writers, and no errors, and wrote something, write an EOF */ + if (!dev->num_writers && dev->can_write() && dev->block_num > 0) { + dev->weof(dcr, 1); + write_ansi_ibm_labels(dcr, ANSI_EOF_LABEL, dev->VolHdr.VolumeName); + } + if (!dev->at_weot()) { + dev->VolCatInfo.VolCatFiles = dev->get_file(); /* set number of files */ + /* Note! do volume update before close, which zaps VolCatInfo */ + dir_update_volume_info(dcr, false, false); /* send Volume info to Director */ + Dmsg2(200, "dir_update_vol_info. Release vol=%s dev=%s\n", + dev->getVolCatName(), dev->print_name()); + } + if (dev->num_writers == 0) { /* if not being used */ + volume_unused(dcr); /* we obviously are not using the volume */ + generate_plugin_event(jcr, bsdEventDeviceClose, dcr); + } + } + + } else { + /* + * If we reach here, it is most likely because the job + * has failed, since the device is not in read mode and + * there are no writers. It was probably reserved. + */ + volume_unused(dcr); + generate_plugin_event(jcr, bsdEventDeviceClose, dcr); + } + Dmsg3(100, "%d writers, %d reserve, dev=%s\n", dev->num_writers, dev->num_reserved(), + dev->print_name()); + + /* If no writers, close if file or !CAP_ALWAYS_OPEN */ + if (dev->num_writers == 0 && (!dev->is_tape() || !dev->has_cap(CAP_ALWAYSOPEN))) { + generate_plugin_event(jcr, bsdEventDeviceClose, dcr); + if (!dev->close(dcr) && dev->errmsg[0]) { + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + } + free_volume(dev); + } + unlock_volumes(); + + /* Do new tape alert code */ + dev->get_tape_alerts(dcr); + /* alert_callback is in tape_alert.c -- show only most recent (last) alert */ + dev->show_tape_alerts(dcr, list_long, list_last, alert_callback); + + pthread_cond_broadcast(&dev->wait_next_vol); + Dmsg2(100, "JobId=%u broadcast wait_device_release at %s\n", + (uint32_t)jcr->JobId, bstrftimes(tbuf, sizeof(tbuf), (utime_t)time(NULL))); + pthread_cond_broadcast(&wait_device_release); + + give_back_device_block(dev, &holder); + /* + * If we are the thread that blocked the device, then unblock it + */ + if (pthread_equal(dev->no_wait_id, pthread_self())) { + dev->dunblock(true); + } else { + dev->Unlock(); + } + + dev->end_of_job(dcr); + + if (dcr->keep_dcr) { + dev->detach_dcr_from_dev(dcr); + } else { + free_dcr(dcr); + } + Dmsg2(100, "Device %s released by JobId=%u\n", dev->print_name(), + (uint32_t)jcr->JobId); + return ok; +} + +/* + * Clean up the device for reuse without freeing the memory + */ +bool clean_device(DCR *dcr) +{ + bool ok; + dcr->keep_dcr = true; /* do not free the dcr */ + ok = release_device(dcr); + dcr->keep_dcr = false; + return ok; +} + +/* + * Create a new Device Control Record and attach + * it to the device (if this is a real job). + * Note, this has been updated so that it can be called first + * without a DEVICE, then a second or third time with a DEVICE, + * and each time, it should cleanup and point to the new device. + * This should facilitate switching devices. + * Note, each dcr must point to the controlling job (jcr). However, + * a job can have multiple dcrs, so we must not store in the jcr's + * structure as previously. The higher level routine must store + * this dcr in the right place + * + */ +DCR *new_dcr(JCR *jcr, DCR *dcr, DEVICE *dev, bool writing) +{ + DEVICE *odev; + if (!dcr) { + dcr = (DCR *)malloc(sizeof(DCR)); + memset(dcr, 0, sizeof(DCR)); + dcr->tid = pthread_self(); + dcr->uploads = New(alist(100, false)); + dcr->downloads = New(alist(100, false)); + dcr->spool_fd = -1; + } + dcr->jcr = jcr; /* point back to jcr */ + odev = dcr->dev; + if (dcr->attached_to_dev && odev) { + Dmsg2(100, "Detach 0x%x from olddev %s\n", dcr, odev->print_name()); + odev->detach_dcr_from_dev(dcr); + } + ASSERT2(!dcr->attached_to_dev, "DCR is attached. Wrong!"); + /* Set device information, possibly change device */ + if (dev) { + ASSERT2(!dev->adata, "Called with adata dev. Wrong!"); + dev->free_dcr_blocks(dcr); + dev->new_dcr_blocks(dcr); + if (dcr->rec) { + free_record(dcr->rec); + } + dcr->rec = new_record(); + /* Use job spoolsize prior to device spoolsize */ + if (jcr && jcr->spool_size) { + dcr->max_job_spool_size = jcr->spool_size; + } else { + dcr->max_job_spool_size = dev->device->max_job_spool_size; + } + dcr->device = dev->device; + dcr->set_dev(dev); + Dmsg2(100, "Attach 0x%x to dev %s\n", dcr, dev->print_name()); + dev->attach_dcr_to_dev(dcr); + } + if (writing) { + dcr->set_writing(); + } else { + dcr->clear_writing(); + } + return dcr; +} + +/* + * Search the dcrs list for the given dcr. If it is found, + * as it should be, then remove it. Also zap the jcr pointer + * to the dcr if it is the same one. + * + * Note, this code will be turned on when we can write to multiple + * dcrs at the same time. + */ +#ifdef needed +static void remove_dcr_from_dcrs(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + if (jcr->dcrs) { + int i = 0; + DCR *ldcr; + int num = jcr->dcrs->size(); + for (i=0; i < num; i++) { + ldcr = (DCR *)jcr->dcrs->get(i); + if (ldcr == dcr) { + jcr->dcrs->remove(i); + if (jcr->dcr == dcr) { + jcr->dcr = NULL; + } + } + } + } +} +#endif + +void DEVICE::attach_dcr_to_dev(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + + Lock_dcrs(); + jcr = dcr->jcr; + if (jcr) Dmsg1(500, "JobId=%u enter attach_dcr_to_dev\n", (uint32_t)jcr->JobId); + /* ***FIXME*** return error if dev not initiated */ + if (!dcr->attached_to_dev && initiated && jcr && jcr->getJobType() != JT_SYSTEM) { + ASSERT2(!adata, "Called on adata dev. Wrong!"); + Dmsg4(200, "Attach Jid=%d dcr=%p size=%d dev=%s\n", (uint32_t)jcr->JobId, + dcr, attached_dcrs->size(), print_name()); + attached_dcrs->append(dcr); /* attach dcr to device */ + dcr->attached_to_dev = true; + } + Unlock_dcrs(); +} + +/* + * Note!! Do not enter with dev->Lock() since unreserve_device() + * is going to lock it too. + */ +void DEVICE::detach_dcr_from_dev(DCR *dcr) +{ + Dmsg0(500, "Enter detach_dcr_from_dev\n"); /* jcr is NULL in some cases */ + + Lock(); + Lock_dcrs(); + /* Detach this dcr only if attached */ + if (dcr->attached_to_dev) { + ASSERT2(!adata, "Called with adata dev. Wrong!"); + dcr->unreserve_device(true); + Dmsg4(200, "Detach Jid=%d dcr=%p size=%d to dev=%s\n", (uint32_t)dcr->jcr->JobId, + dcr, attached_dcrs->size(), print_name()); + dcr->attached_to_dev = false; + if (attached_dcrs->size()) { + attached_dcrs->remove(dcr); /* detach dcr from device */ + } + } + /* Check if someone accidentally left a drive reserved, and clear it */ + if (attached_dcrs->size() == 0 && num_reserved() > 0) { + Pmsg3(000, "Warning!!! Detach %s DCR: dcrs=0 reserved=%d setting reserved==0. dev=%s\n", + dcr->is_writing() ? "writing" : "reading", num_reserved(), print_name()); + m_num_reserved = 0; + } + dcr->attached_to_dev = false; + Unlock_dcrs(); + Unlock(); +} + +/* + * Free up all aspects of the given dcr -- i.e. dechain it, + * release allocated memory, zap pointers, ... + */ +void free_dcr(DCR *dcr) +{ + JCR *jcr; + + jcr = dcr->jcr; + + if (dcr->dev) { + dcr->dev->detach_dcr_from_dev(dcr); + } + + if (dcr->dev) { + dcr->dev->free_dcr_blocks(dcr); + } else { + dcr->ameta_block = NULL; + free_block(dcr->block); + } + if (dcr->rec) { + free_record(dcr->rec); + } + if (jcr && jcr->dcr == dcr) { + jcr->dcr = NULL; + } + if (jcr && jcr->read_dcr == dcr) { + jcr->read_dcr = NULL; + } + delete dcr->uploads; + delete dcr->downloads; + free(dcr); +} + +static void set_dcr_from_vol(DCR *dcr, VOL_LIST *vol) +{ + /* + * Note, if we want to be able to work from a .bsr file only + * for disaster recovery, we must "simulate" reading the catalog + */ + bstrncpy(dcr->VolumeName, vol->VolumeName, sizeof(dcr->VolumeName)); + dcr->setVolCatName(vol->VolumeName); + bstrncpy(dcr->media_type, vol->MediaType, sizeof(dcr->media_type)); + dcr->VolCatInfo.Slot = vol->Slot; + dcr->VolCatInfo.InChanger = vol->Slot > 0; + dcr->CurrentVol = vol; /* Keep VOL_LIST pointer (freed at the end of the job) */ +} diff --git a/src/stored/aligned_dev.c b/src/stored/aligned_dev.c new file mode 100644 index 00000000..038a4e38 --- /dev/null +++ b/src/stored/aligned_dev.c @@ -0,0 +1,21 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by: Kern Sibbald, March MMXIII + */ diff --git a/src/stored/aligned_dev.h b/src/stored/aligned_dev.h new file mode 100644 index 00000000..4e5a993d --- /dev/null +++ b/src/stored/aligned_dev.h @@ -0,0 +1,141 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Inspired by vtape.h + */ + +#ifndef _ALIGNED_DEV_H_ +#define _ALIGNED_DEV_H_ + +class aligned_dev : public file_dev { +public: + + aligned_dev(); + ~aligned_dev(); + + boffset_t get_adata_size(DCR *dcr); + boffset_t align_adata_addr(DCR *dcr, boffset_t addr); + boffset_t get_adata_addr(DCR *dcr); + void set_adata_addr(DCR *dcr); + void clear_adata_addr(); + + /* DEVICE virtual functions that we redefine */ + void setVolCatName(const char *name); + void setVolCatStatus(const char *status); + void free_dcr_blocks(DCR *dcr); + void new_dcr_blocks(DCR *dcr); + void updateVolCatBytes(uint64_t); + void updateVolCatBlocks(uint32_t); + void updateVolCatWrites(uint32_t); + void updateVolCatReads(uint32_t); + void updateVolCatReadBytes(uint64_t); + void updateVolCatPadding(uint64_t); + bool setVolCatAdataBytes(uint64_t bytes); + void updateVolCatHoleBytes(uint64_t bytes); + void device_specific_open(DCR *dcr); + void set_volcatinfo_from_dcr(DCR *dcr); + bool allow_maxbytes_concurrency(DCR *dcr); + bool flush_before_eos(DCR *dcr); + void set_nospace(); + void set_append(); + void set_read(); + void clear_nospace(); + void clear_append(); + void clear_read(); + void device_specific_init(JCR *jcr, DEVRES *device); + int d_close(int fd); + int d_open(const char *pathname, int flags); + int d_ioctl(int fd, ioctl_req_t request, char *mt_com); + ssize_t d_read(int fd, void *buffer, size_t count); + ssize_t d_write(int, const void *buffer, size_t count); + boffset_t lseek(DCR *dcr, off_t offset, int whence); + bool rewind(DCR *dcr); + bool reposition(DCR *dcr, uint64_t raddr); + bool open_device(DCR *dcr, int omode); + bool truncate(DCR *dcr); + bool close(DCR *dcr); + void term(DCR *dcr); + bool eod(DCR *dcr); + bool update_pos(DCR *dcr); + bool mount_file(int mount, int dotimeout); + bool is_indexed() { return !adata; }; + int read_dev_volume_label(DCR *dcr); + const char *print_type(); + DEVICE *get_dev(DCR *dcr); + uint32_t get_hi_addr(); + uint32_t get_low_addr(); + uint64_t get_full_addr(); + uint64_t get_full_addr(boffset_t addr); + bool do_size_checks(DCR *dcr, DEV_BLOCK *block); + bool write_volume_label_to_block(DCR *dcr); + bool write_volume_label_to_dev(DCR *dcr, + const char *VolName, const char *PoolName, + bool relabel, bool no_prelabel); + bool write_adata_label(DCR *dcr, DEV_RECORD *rec); + void write_adata(DCR *dcr, DEV_RECORD *rec); + void write_cont_adata(DCR *dcr, DEV_RECORD *rec); + int write_adata_rechdr(DCR *dcr, DEV_RECORD *rec); + bool read_adata_record_header(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec); + void read_adata_block_header(DCR *dcr); + int read_adata(DCR *dcr, DEV_RECORD *rec); + bool have_adata_header(DCR *dcr, DEV_RECORD *rec, int32_t FileIndex, + int32_t Stream, uint32_t VolSessionId); + void select_data_stream(DCR *dcr, DEV_RECORD *rec); + bool flush_block(DCR *dcr); + bool do_pre_write_checks(DCR *dcr, DEV_RECORD *rec); + + + + /* + * Locking and blocking calls + */ +#ifdef DEV_DEBUG_LOCK + void dbg_Lock(const char *, int); + void dbg_Unlock(const char *, int); + void dbg_rLock(const char *, int, bool locked=false); + void dbg_rUnlock(const char *, int); +#else + void Lock(); + void Unlock(); + void rLock(bool locked=false); + void rUnlock(); +#endif + +#ifdef SD_DEBUG_LOCK + void dbg_Lock_acquire(const char *, int); + void dbg_Unlock_acquire(const char *, int); + void dbg_Lock_read_acquire(const char *, int); + void dbg_Unlock_read_acquire(const char *, int); + void dbg_Lock_VolCatInfo(const char *, int); + void dbg_Unlock_VolCatInfo(const char *, int); +#else + void Lock_acquire(); + void Unlock_acquire(); + void Lock_read_acquire(); + void Unlock_read_acquire(); + void Lock_VolCatInfo(); + void Unlock_VolCatInfo(); +#endif + + void dblock(int why); /* in lock.c */ + void dunblock(bool locked=false); + +}; + +#endif /* _ALIGNED_DEV_H_ */ diff --git a/src/stored/aligned_read.c b/src/stored/aligned_read.c new file mode 100644 index 00000000..6798657c --- /dev/null +++ b/src/stored/aligned_read.c @@ -0,0 +1,25 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * record_read.c -- Volume (tape/disk) record read functions + * + * Kern Sibbald, April MMI + * added BB02 format October MMII + */ diff --git a/src/stored/aligned_write.c b/src/stored/aligned_write.c new file mode 100644 index 00000000..1d73e7b4 --- /dev/null +++ b/src/stored/aligned_write.c @@ -0,0 +1,26 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * record_write.c -- Volume (tape/disk) record write functions + * + * Kern Sibbald, April MMI + * added BB02 format October MMII + * added aligned format November MMXII + */ diff --git a/src/stored/ansi_label.c b/src/stored/ansi_label.c new file mode 100644 index 00000000..2c81ceeb --- /dev/null +++ b/src/stored/ansi_label.c @@ -0,0 +1,449 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * ansi_label.c routines to handle ANSI (and perhaps one day IBM) + * tape labels. + * + * Kern Sibbald, MMV + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +/* Imported functions */ +void ascii_to_ebcdic(char *dst, char *src, int count); +void ebcdic_to_ascii(char *dst, char *src, int count); + +/* Forward referenced functions */ +static char *ansi_date(time_t td, char *buf); +static bool same_label_names(char *bacula_name, char *ansi_name); + +/* + * We read an ANSI label and compare the Volume name. We require + * a VOL1 record of 80 characters followed by a HDR1 record containing + * BACULA.DATA in the filename field. We then read up to 3 more + * header records (they are not required) and an EOF, at which + * point, all is good. + * + * Returns: + * VOL_OK Volume name OK + * VOL_NO_LABEL No ANSI label on Volume + * VOL_IO_ERROR I/O error on read + * VOL_NAME_ERROR Wrong name in VOL1 record + * VOL_LABEL_ERROR Probably an ANSI label, but something wrong + * + */ +int read_ansi_ibm_label(DCR *dcr) +{ + DEVICE * volatile dev = dcr->dev; + JCR *jcr = dcr->jcr; + char label[80]; /* tape label */ + int stat, i; + char *VolName = dcr->VolumeName; + bool ok = false; + + /* + * Read VOL1, HDR1, HDR2 labels, but ignore the data + * If tape read the following EOF mark, on disk do + * not read. + */ + Dmsg0(100, "Read ansi label.\n"); + if (!dev->is_tape()) { + return VOL_OK; + } + + dev->label_type = B_BACULA_LABEL; /* assume Bacula label */ + + /* Read a maximum of 5 records VOL1, HDR1, ... HDR4 */ + for (i=0; i < 6; i++) { + do { + stat = dev->read(label, sizeof(label)); + } while (stat == -1 && errno == EINTR); + if (stat < 0) { + berrno be; + dev->clrerror(-1); + Dmsg1(100, "Read device got: ERR=%s\n", be.bstrerror()); + Mmsg2(jcr->errmsg, _("Read error on device %s in ANSI label. ERR=%s\n"), + dev->dev_name, be.bstrerror()); + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + dev->VolCatInfo.VolCatErrors++; + return VOL_IO_ERROR; + } + if (stat == 0) { + if (dev->at_eof()) { + dev->set_eot(); /* second eof, set eot bit */ + Dmsg0(100, "EOM on ANSI label\n"); + Mmsg0(jcr->errmsg, _("Insane! End of tape while reading ANSI label.\n")); + return VOL_LABEL_ERROR; /* at EOM this shouldn't happen */ + } else { + dev->set_ateof(); /* set eof state */ + } + } + switch (i) { + case 0: /* Want VOL1 label */ + if (stat == 80) { + if (strncmp("VOL1", label, 4) == 0) { + ok = true; + dev->label_type = B_ANSI_LABEL; + Dmsg0(100, "Got ANSI VOL1 label\n"); + } else { + /* Try EBCDIC */ + ebcdic_to_ascii(label, label, sizeof(label)); + if (strncmp("VOL1", label, 4) == 0) { + ok = true;; + dev->label_type = B_IBM_LABEL; + Dmsg0(100, "Found IBM label.\n"); + Dmsg0(100, "Got IBM VOL1 label\n"); + } + } + } + if (!ok) { + Dmsg0(100, "No VOL1 label\n"); + Mmsg0(jcr->errmsg, _("No VOL1 label while reading ANSI/IBM label.\n")); + return VOL_NO_LABEL; /* No ANSI label */ + } + + + /* Compare Volume Names allow special wild card */ + if (VolName && *VolName && *VolName != '*') { + if (!same_label_names(VolName, &label[4])) { + char *p = &label[4]; + char *q; + + free_volume(dev); + /* Store new Volume name */ + q = dev->VolHdr.VolumeName; + for (int i=0; *p != ' ' && i < 6; i++) { + *q++ = *p++; + } + *q = 0; + Dmsg0(100, "Call reserve_volume\n"); + /* ***FIXME*** why is this reserve_volume() needed???? KES */ + reserve_volume(dcr, dev->VolHdr.VolumeName); + dev = dcr->dev; /* may have changed in reserve_volume */ + Dmsg2(100, "Wanted ANSI Vol %s got %6s\n", VolName, dev->VolHdr.VolumeName); + Mmsg2(jcr->errmsg, _("Wanted ANSI Volume \"%s\" got \"%s\"\n"), VolName, dev->VolHdr.VolumeName); + return VOL_NAME_ERROR; + } + } + break; + case 1: + if (dev->label_type == B_IBM_LABEL) { + ebcdic_to_ascii(label, label, sizeof(label)); + } + if (stat != 80 || strncmp("HDR1", label, 4) != 0) { + Dmsg0(100, "No HDR1 label\n"); + Mmsg0(jcr->errmsg, _("No HDR1 label while reading ANSI label.\n")); + return VOL_LABEL_ERROR; + } + if (strncmp("BACULA.DATA", &label[4], 11) != 0) { + Dmsg1(100, "HD1 not Bacula label. Wanted BACULA.DATA got %11s\n", + &label[4]); + Mmsg1(jcr->errmsg, _("ANSI/IBM Volume \"%s\" does not belong to Bacula.\n"), + dev->VolHdr.VolumeName); + return VOL_NAME_ERROR; /* Not a Bacula label */ + } + Dmsg0(100, "Got HDR1 label\n"); + break; + case 2: + if (dev->label_type == B_IBM_LABEL) { + ebcdic_to_ascii(label, label, sizeof(label)); + } + if (stat != 80 || strncmp("HDR2", label, 4) != 0) { + Dmsg0(100, "No HDR2 label\n"); + Mmsg0(jcr->errmsg, _("No HDR2 label while reading ANSI/IBM label.\n")); + return VOL_LABEL_ERROR; + } + Dmsg0(100, "Got ANSI HDR2 label\n"); + break; + default: + if (stat == 0) { + Dmsg0(100, "ANSI label OK\n"); + return VOL_OK; + } + if (dev->label_type == B_IBM_LABEL) { + ebcdic_to_ascii(label, label, sizeof(label)); + } + if (stat != 80 || strncmp("HDR", label, 3) != 0) { + Dmsg0(100, "Unknown or bad ANSI/IBM label record.\n"); + Mmsg0(jcr->errmsg, _("Unknown or bad ANSI/IBM label record.\n")); + return VOL_LABEL_ERROR; + } + Dmsg0(100, "Got HDR label\n"); + break; + } + } + Dmsg0(100, "Too many records in ANSI/IBM label.\n"); + Mmsg0(jcr->errmsg, _("Too many records in while reading ANSI/IBM label.\n")); + return VOL_LABEL_ERROR; +} + +/* + * ANSI/IBM VOL1 label + * 80 characters blank filled + * Pos count Function What Bacula puts + * 0-3 4 "VOL1" VOL1 + * 4-9 6 Volume name Volume name + * 10-10 1 Access code + * 11-36 26 Unused + * + * ANSI + * 37-50 14 Owner + * 51-78 28 reserved + * 79 1 ANSI level 3 + * + * IBM + * 37-40 4 reserved + * 41-50 10 Owner + * 51-79 29 reserved + + * + * + * ANSI/IBM HDR1 label + * 80 characters blank filled + * Pos count Function What Bacula puts + * 0-3 4 "HDR1" HDR1 + * 4-20 17 File name BACULA.DATA + * 21-26 6 Volume name Volume name + * 27-30 4 Vol seq num 0001 + * 31-34 4 file num 0001 + * 35-38 4 Generation 0001 + * 39-40 2 Gen version 00 + * 41-46 6 Create date bYYDDD yesterday + * 47-52 6 Expire date bYYDDD today + * 53-53 1 Access + * 54-59 6 Block count 000000 + * 60-72 13 Software name Bacula + * 73-79 7 Reserved + + * ANSI/IBM HDR2 label + * 80 characters blank filled + * Pos count Function What Bacula puts + * 0-3 4 "HDR2" HDR2 + * 4-4 1 Record format D (V if IBM) => variable + * 5-9 5 Block length 32000 + * 10-14 5 Rec length 32000 + * 15-15 1 Density + * 16-16 1 Continued + * 17-33 17 Job + * 34-35 2 Recording + * 36-36 1 cr/lf ctl + * 37-37 1 reserved + * 38-38 1 Blocked flag + * 39-49 11 reserved + * 50-51 2 offset + * 52-79 28 reserved + + */ + +static const char *labels[] = {"HDR", "EOF", "EOV"}; + +/* + * Write an ANSI or IBM 80 character tape label + * Type determines whether we are writing HDR, EOF, or EOV labels + * Assume we are positioned to write the labels + * Returns: true of OK + * false if error + */ +bool write_ansi_ibm_labels(DCR *dcr, int type, const char *VolName) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + char ansi_volname[7]; /* 6 char + \0 */ + char label[80]; /* tape label */ + char date[20]; /* ansi date buffer */ + time_t now; + int len, stat, label_type; + + /* + * If the Device requires a specific label type use it, + * otherwise, use the type requested by the Director + */ + if (dcr->device->label_type != B_BACULA_LABEL) { + label_type = dcr->device->label_type; /* force label type */ + } else { + label_type = dcr->VolCatInfo.LabelType; /* accept Dir type */ + } + + switch (label_type) { + case B_BACULA_LABEL: + return true; + case B_ANSI_LABEL: + case B_IBM_LABEL: + ser_declare; + Dmsg1(100, "Write ANSI label type=%d\n", label_type); + len = strlen(VolName); + if (len > 6) { + Jmsg1(jcr, M_FATAL, 0, _("ANSI Volume label name \"%s\" longer than 6 chars.\n"), + VolName); + return false; + } + /* ANSI labels have 6 characters, and are padded with spaces + * 'vol1\0' => 'vol1 \0' + */ + strcpy(ansi_volname, VolName); + for(int i=len; i < 6; i++) { + ansi_volname[i]=' '; + } + ansi_volname[6]='\0'; /* only for debug */ + + if (type == ANSI_VOL_LABEL) { + ser_begin(label, sizeof(label)); + ser_bytes("VOL1", 4); + ser_bytes(ansi_volname, 6); + /* Write VOL1 label */ + if (label_type == B_IBM_LABEL) { + ascii_to_ebcdic(label, label, sizeof(label)); + } else { + label[79] = '3'; /* ANSI label flag */ + } + stat = dev->write(label, sizeof(label)); + if (stat != sizeof(label)) { + berrno be; + Jmsg3(jcr, M_FATAL, 0, _("Could not write ANSI VOL1 label. Wanted size=%d got=%d ERR=%s\n"), + sizeof(label), stat, be.bstrerror()); + return false; + } + } + + /* Now construct HDR1 label */ + memset(label, ' ', sizeof(label)); + ser_begin(label, sizeof(label)); + ser_bytes(labels[type], 3); + ser_bytes("1", 1); + ser_bytes("BACULA.DATA", 11); /* Filename field */ + ser_begin(&label[21], sizeof(label)-21); /* fileset field */ + ser_bytes(ansi_volname, 6); /* write Vol Ser No. */ + ser_begin(&label[27], sizeof(label)-27); + ser_bytes("00010001000100", 14); /* File section, File seq no, Generation no */ + now = time(NULL); + ser_bytes(ansi_date(now, date), 6); /* current date */ + ser_bytes(ansi_date(now - 24 * 3600, date), 6); /* created yesterday */ + ser_bytes(" 000000Bacula ", 27); + /* Write HDR1 label */ + if (label_type == B_IBM_LABEL) { + ascii_to_ebcdic(label, label, sizeof(label)); + } + + /* + * This could come at the end of a tape, ignore + * EOT errors. + */ + stat = dev->write(label, sizeof(label)); + if (stat != sizeof(label)) { + berrno be; + if (stat == -1) { + dev->clrerror(-1); + if (dev->dev_errno == 0) { + dev->dev_errno = ENOSPC; /* out of space */ + } + if (dev->dev_errno != ENOSPC) { + Jmsg1(jcr, M_FATAL, 0, _("Could not write ANSI HDR1 label. ERR=%s\n"), + be.bstrerror()); + return false; + } + } else { + Jmsg(jcr, M_FATAL, 0, _("Could not write ANSI HDR1 label.\n")); + return false; + } + } + + /* Now construct HDR2 label */ + memset(label, ' ', sizeof(label)); + ser_begin(label, sizeof(label)); + ser_bytes(labels[type], 3); + ser_bytes("2D3200032000", 12); + /* Write HDR2 label */ + if (label_type == B_IBM_LABEL) { + label[4] = 'V'; + ascii_to_ebcdic(label, label, sizeof(label)); + } + stat = dev->write(label, sizeof(label)); + if (stat != sizeof(label)) { + berrno be; + if (stat == -1) { + dev->clrerror(-1); + if (dev->dev_errno == 0) { + dev->dev_errno = ENOSPC; /* out of space */ + } + if (dev->dev_errno != ENOSPC) { + Jmsg1(jcr, M_FATAL, 0, _("Could not write ANSI HDR1 label. ERR=%s\n"), + be.bstrerror()); + return false; + } + dev->weof(NULL, 1); + return true; + } else { + Jmsg(jcr, M_FATAL, 0, _("Could not write ANSI HDR1 label.\n")); + return false; + } + } + if (!dev->weof(NULL, 1)) { + Jmsg(jcr, M_FATAL, 0, _("Error writing EOF to tape. ERR=%s"), dev->errmsg); + return false; + } + return true; + default: + Jmsg0(jcr, M_ABORT, 0, _("write_ansi_ibm_label called for non-ANSI/IBM type\n")); + return false; /* should not get here */ + } +} + +/* Check a Bacula Volume name against an ANSI Volume name */ +static bool same_label_names(char *bacula_name, char *ansi_name) +{ + char *a = ansi_name; + char *b = bacula_name; + /* Six characters max */ + for (int i=0; i < 6; i++) { + if (*a == *b) { + a++; + b++; + continue; + } + /* ANSI labels are blank filled, Bacula's are zero terminated */ + if (*a == ' ' && *b == 0) { + return true; + } + return false; + } + /* Reached 6 characters */ + b++; + if (*b == 0) { + return true; + } + return false; +} + +/* + * ANSI date + * ' 'YYDDD + */ +static char *ansi_date(time_t td, char *buf) +{ + struct tm *tm; + + if (td == 0) { + td = time(NULL); + } + tm = gmtime(&td); + bsnprintf(buf, 10, " %05d ", 1000 * (tm->tm_year + 1900 - 2000) + tm->tm_yday); + return buf; +} diff --git a/src/stored/append.c b/src/stored/append.c new file mode 100644 index 00000000..144436ff --- /dev/null +++ b/src/stored/append.c @@ -0,0 +1,391 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Append code for Storage daemon + * Kern Sibbald, May MM + */ + +#include "bacula.h" +#include "stored.h" + + +/* Responses sent to the File daemon */ +static char OK_data[] = "3000 OK data\n"; +static char OK_append[] = "3000 OK append data\n"; + +/* Forward referenced functions */ + + +/* + * Check if we can mark this job incomplete + * + */ +void possible_incomplete_job(JCR *jcr, int32_t last_file_index) +{ + /* + * Note, here we decide if it is worthwhile to restart + * the Job at this point. For the moment, if at least + * 10 Files have been seen. + * We must be sure that the saved files are safe. + * Using this function when their is as comm line problem is probably safe, + * it is inappropriate to use it for a any failure that could + * involve corrupted data. + */ + if (jcr->spool_attributes && last_file_index > 10) { + jcr->setJobStatus(JS_Incomplete); + } +} + +/* + * Append Data sent from Client (FD/SD) + * + */ +bool do_append_data(JCR *jcr) +{ + int32_t n; + int32_t file_index, stream, last_file_index; + uint64_t stream_len; + BSOCK *fd = jcr->file_bsock; + bool ok = true; + DEV_RECORD rec; + char buf1[100], buf2[100]; + DCR *dcr = jcr->dcr; + DEVICE *dev; + char ec[50]; + POOLMEM *eblock = NULL; + POOL_MEM errmsg(PM_EMSG); + + if (!dcr) { + pm_strcpy(jcr->errmsg, _("DCR is NULL!!!\n")); + Jmsg0(jcr, M_FATAL, 0, jcr->errmsg); + return false; + } + dev = dcr->dev; + if (!dev) { + pm_strcpy(jcr->errmsg, _("DEVICE is NULL!!!\n")); + Jmsg0(jcr, M_FATAL, 0, jcr->errmsg); + return false; + } + + Dmsg1(100, "Start append data. res=%d\n", dev->num_reserved()); + + memset(&rec, 0, sizeof(rec)); + + if (!fd->set_buffer_size(dcr->device->max_network_buffer_size, BNET_SETBUF_WRITE)) { + jcr->setJobStatus(JS_ErrorTerminated); + pm_strcpy(jcr->errmsg, _("Unable to set network buffer size.\n")); + Jmsg0(jcr, M_FATAL, 0, jcr->errmsg); + return false; + } + + if (!acquire_device_for_append(dcr)) { + jcr->setJobStatus(JS_ErrorTerminated); + return false; + } + + dev->start_of_job(dcr); + jcr->sendJobStatus(JS_Running); + + Dmsg1(50, "Begin append device=%s\n", dev->print_name()); + + begin_data_spool(dcr); + begin_attribute_spool(jcr); + + /* + * Write Begin Session Record + */ + if (!write_session_label(dcr, SOS_LABEL)) { + Jmsg1(jcr, M_FATAL, 0, _("Write session label failed. ERR=%s\n"), + dev->bstrerror()); + jcr->setJobStatus(JS_ErrorTerminated); + ok = false; + } + + /* Tell File daemon to send data */ + if (!fd->fsend(OK_data)) { + berrno be; + Jmsg1(jcr, M_FATAL, 0, _("Network send error to FD. ERR=%s\n"), + be.bstrerror(fd->b_errno)); + ok = false; + } + + /* + * Get Data from File daemon, write to device. To clarify what is + * going on here. We expect: + * - A stream header + * - Multiple records of data + * - EOD record + * + * The Stream header is just used to synchronize things, and + * none of the stream header is written to tape. + * The Multiple records of data, contain first the Attributes, + * then after another stream header, the file data, then + * after another stream header, the MD5 data if any. + * + * So we get the (stream header, data, EOD) three time for each + * file. 1. for the Attributes, 2. for the file data if any, + * and 3. for the MD5 if any. + */ + dcr->VolFirstIndex = dcr->VolLastIndex = 0; + jcr->run_time = time(NULL); /* start counting time for rates */ + + GetMsg *qfd; + + qfd = New(GetMsg(jcr, fd, NULL, GETMSG_MAX_MSG_SIZE)); + qfd->start_read_sock(); + + for (last_file_index = 0; ok && !jcr->is_job_canceled(); ) { + + /* Read Stream header from the File daemon. + * The stream header consists of the following: + * file_index (sequential Bacula file index, base 1) + * stream (Bacula number to distinguish parts of data) + * stream_len (Expected length of this stream. This + * will be the size backed up if the file does not + * grow during the backup. + */ + n = qfd->bget_msg(NULL); + if (n <= 0) { + if (n == BNET_SIGNAL && qfd->msglen == BNET_EOD) { + Dmsg0(200, "Got EOD on reading header.\n"); + break; /* end of data */ + } + Jmsg3(jcr, M_FATAL, 0, _("Error reading data header from FD. n=%d msglen=%d ERR=%s\n"), + n, qfd->msglen, fd->bstrerror()); + // ASX TODO the fd->bstrerror() can be related to the wrong error, I should Queue the error too + possible_incomplete_job(jcr, last_file_index); + ok = false; + break; + } + + if (sscanf(qfd->msg, "%ld %ld %lld", &file_index, &stream, &stream_len) != 3) { + // TODO ASX already done in bufmsg, should reuse the values + char buf[256]; + Jmsg1(jcr, M_FATAL, 0, _("Malformed data header from FD: %s\n"), asciidump(qfd->msg, qfd->msglen, buf, sizeof(buf))); + ok = false; + possible_incomplete_job(jcr, last_file_index); + break; + } + + Dmsg3(890, "rerunning && file_index > 0 && last_file_index == 0) { + goto fi_checked; + } + Dmsg2(400, "file_index=%d last_file_index=%d\n", file_index, last_file_index); + if (file_index > 0 && (file_index == last_file_index || + file_index == last_file_index + 1)) { + goto fi_checked; + } + Jmsg2(jcr, M_FATAL, 0, _("FI=%d from FD not positive or last_FI=%d\n"), + file_index, last_file_index); + possible_incomplete_job(jcr, last_file_index); + ok = false; + break; + +fi_checked: + if (file_index != last_file_index) { + jcr->JobFiles = file_index; + last_file_index = file_index; + } + + /* Read data stream from the File daemon. + * The data stream is just raw bytes + */ + while ((n=qfd->bget_msg(NULL)) > 0 && !jcr->is_job_canceled()) { + + rec.VolSessionId = jcr->VolSessionId; + rec.VolSessionTime = jcr->VolSessionTime; + rec.FileIndex = file_index; + rec.Stream = stream; + rec.StreamLen = stream_len; + rec.maskedStream = stream & STREAMMASK_TYPE; /* strip high bits */ + rec.data_len = qfd->msglen; + rec.data = qfd->msg; /* use message buffer */ + + /* Debug code: check if we must hangup or blowup */ + if (handle_hangup_blowup(jcr, jcr->JobFiles, jcr->JobBytes)) { + return false; + } + Dmsg4(850, "before writ_rec FI=%d SessId=%d Strm=%s len=%d\n", + rec.FileIndex, rec.VolSessionId, + stream_to_ascii(buf1, rec.Stream,rec.FileIndex), + rec.data_len); + ok = dcr->write_record(&rec); + if (!ok) { + Dmsg2(90, "Got write_block_to_dev error on device %s. %s\n", + dcr->dev->print_name(), dcr->dev->bstrerror()); + break; + } + jcr->JobBytes += rec.data_len; /* increment bytes this job */ + jcr->JobBytes += qfd->bmsg->jobbytes; // if the block as been downloaded, count it + Dmsg4(850, "write_record FI=%s SessId=%d Strm=%s len=%d\n", + FI_to_ascii(buf1, rec.FileIndex), rec.VolSessionId, + stream_to_ascii(buf2, rec.Stream, rec.FileIndex), rec.data_len); + + send_attrs_to_dir(jcr, &rec); + Dmsg0(650, "Enter bnet_get\n"); + } + Dmsg2(650, "End read loop with FD. JobFiles=%d Stat=%d\n", jcr->JobFiles, n); + + if (fd->is_error()) { + if (!jcr->is_job_canceled()) { + Dmsg1(350, "Network read error from FD. ERR=%s\n", fd->bstrerror()); + Jmsg1(jcr, M_FATAL, 0, _("Network error reading from FD. ERR=%s\n"), + fd->bstrerror()); + possible_incomplete_job(jcr, last_file_index); + } + ok = false; + break; + } + } + + qfd->wait_read_sock((ok == false) || jcr->is_job_canceled()); + free_GetMsg(qfd); + + if (eblock != NULL) { + free_pool_memory(eblock); + } + + /* Create Job status for end of session label */ + jcr->setJobStatus(ok?JS_Terminated:JS_ErrorTerminated); + + if (ok) { + /* Terminate connection with Client */ + fd->fsend(OK_append); + do_client_commands(jcr); /* finish dialog with Client */ + } else { + fd->fsend("3999 Failed append\n"); + } + + Dmsg1(200, "Write EOS label JobStatus=%c\n", jcr->JobStatus); + + /* + * Check if we can still write. This may not be the case + * if we are at the end of the tape or we got a fatal I/O error. + */ + dcr->set_ameta(); + if (ok || dev->can_write()) { + if (!dev->flush_before_eos(dcr)) { + /* Print only if ok and not cancelled to avoid spurious messages */ + if (ok && !jcr->is_job_canceled()) { + Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), + dev->print_name(), dev->bstrerror()); + Dmsg0(100, _("Set ok=FALSE after write_block_to_device.\n")); + possible_incomplete_job(jcr, last_file_index); + } + jcr->setJobStatus(JS_ErrorTerminated); + ok = false; + } + if (!write_session_label(dcr, EOS_LABEL)) { + /* Print only if ok and not cancelled to avoid spurious messages */ + if (ok && !jcr->is_job_canceled()) { + Jmsg1(jcr, M_FATAL, 0, _("Error writing end session label. ERR=%s\n"), + dev->bstrerror()); + possible_incomplete_job(jcr, last_file_index); + } + jcr->setJobStatus(JS_ErrorTerminated); + ok = false; + } + /* Flush out final partial block of this session */ + Dmsg1(200, "=== Flush adata=%d last block.\n", dcr->block->adata); + ASSERT(!dcr->block->adata); + if (!dcr->write_final_block_to_device()) { + /* Print only if ok and not cancelled to avoid spurious messages */ + if (ok && !jcr->is_job_canceled()) { + Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), + dev->print_name(), dev->bstrerror()); + Dmsg0(100, _("Set ok=FALSE after write_final_block_to_device.\n")); + possible_incomplete_job(jcr, last_file_index); + } + jcr->setJobStatus(JS_ErrorTerminated); + ok = false; + } + } + flush_jobmedia_queue(jcr); + if (!ok && !jcr->is_JobStatus(JS_Incomplete)) { + discard_data_spool(dcr); + } else { + /* Note: if commit is OK, the device will remain blocked */ + commit_data_spool(dcr); + } + + /* + * Don't use time_t for job_elapsed as time_t can be 32 or 64 bits, + * and the subsequent Jmsg() editing will break + */ + int32_t job_elapsed = time(NULL) - jcr->run_time; + + if (job_elapsed <= 0) { + job_elapsed = 1; + } + + Jmsg(dcr->jcr, M_INFO, 0, _("Elapsed time=%02d:%02d:%02d, Transfer rate=%s Bytes/second\n"), + job_elapsed / 3600, job_elapsed % 3600 / 60, job_elapsed % 60, + edit_uint64_with_suffix(jcr->JobBytes / job_elapsed, ec)); + + /* + * Release the device -- and send final Vol info to DIR + * and unlock it. + */ + release_device(dcr); + + if ((!ok || jcr->is_job_canceled()) && !jcr->is_JobStatus(JS_Incomplete)) { + discard_attribute_spool(jcr); + } else { + commit_attribute_spool(jcr); + } + + jcr->sendJobStatus(); /* update director */ + + Dmsg1(100, "return from do_append_data() ok=%d\n", ok); + return ok; +} + + +/* Send attributes and digest to Director for Catalog */ +bool send_attrs_to_dir(JCR *jcr, DEV_RECORD *rec) +{ + if (rec->maskedStream == STREAM_UNIX_ATTRIBUTES || + rec->maskedStream == STREAM_UNIX_ATTRIBUTES_EX || + rec->maskedStream == STREAM_RESTORE_OBJECT || + crypto_digest_stream_type(rec->maskedStream) != CRYPTO_DIGEST_NONE) { + if (!jcr->no_attributes) { + BSOCK *dir = jcr->dir_bsock; + if (are_attributes_spooled(jcr)) { + dir->set_spooling(); + } + Dmsg1(850, "Send attributes to dir. FI=%d\n", rec->FileIndex); + if (!dir_update_file_attributes(jcr->dcr, rec)) { + Jmsg(jcr, M_FATAL, 0, _("Error updating file attributes. ERR=%s\n"), + dir->bstrerror()); + dir->clear_spooling(); + return false; + } + dir->clear_spooling(); + } + } + return true; +} diff --git a/src/stored/askdir.c b/src/stored/askdir.c new file mode 100644 index 00000000..05d0a1f9 --- /dev/null +++ b/src/stored/askdir.c @@ -0,0 +1,985 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Subroutines to handle Catalog reqests sent to the Director + * Reqests/commands from the Director are handled in dircmd.c + * + * Kern Sibbald, December 2000 + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +static const int dbglvl = 200; + +/* Requests sent to the Director */ +static char Find_media[] = "CatReq JobId=%ld FindMedia=%d pool_name=%s media_type=%s vol_type=%d\n"; +static char Get_Vol_Info[] = "CatReq JobId=%ld GetVolInfo VolName=%s write=%d\n"; +static char Update_media[] = "CatReq JobId=%ld UpdateMedia VolName=%s" + " VolJobs=%u VolFiles=%u VolBlocks=%u VolBytes=%s VolABytes=%s" + " VolHoleBytes=%s VolHoles=%u VolMounts=%u" + " VolErrors=%u VolWrites=%u MaxVolBytes=%s EndTime=%s VolStatus=%s" + " Slot=%d relabel=%d InChanger=%d VolReadTime=%s VolWriteTime=%s" + " VolFirstWritten=%s VolType=%u VolParts=%d VolCloudParts=%d" + " LastPartBytes=%lld Enabled=%d Recycle=%d\n"; +static char Create_jobmedia[] = "CatReq JobId=%ld CreateJobMedia\n"; +static char FileAttributes[] = "UpdCat JobId=%ld FileAttributes "; + +/* Responses received from the Director */ +static char OK_media[] = "1000 OK VolName=%127s VolJobs=%u VolFiles=%lu" + " VolBlocks=%lu VolBytes=%lld VolABytes=%lld" + " VolHoleBytes=%lld VolHoles=%lu VolMounts=%lu" + " VolErrors=%lu VolWrites=%lu" + " MaxVolBytes=%lld VolCapacityBytes=%lld VolStatus=%20s" + " Slot=%ld MaxVolJobs=%lu MaxVolFiles=%lu InChanger=%ld" + " VolReadTime=%lld VolWriteTime=%lld EndFile=%lu EndBlock=%lu" + " VolType=%lu LabelType=%ld MediaId=%lld ScratchPoolId=%lld" + " VolParts=%d VolCloudParts=%d LastPartBytes=%lld Enabled=%d" + " Recycle=%d\n"; + + +static char OK_create[] = "1000 OK CreateJobMedia\n"; + +static bthread_mutex_t vol_info_mutex = BTHREAD_MUTEX_PRIORITY(PRIO_SD_VOL_INFO); + +#ifdef needed + +Note: if you turn this on, be sure to add the Recycle Flag + +static char Device_update[] = "DevUpd JobId=%ld device=%s " + "append=%d read=%d num_writers=%d " + "open=%d labeled=%d offline=%d " + "reserved=%d max_writers=%d " + "autoselect=%d autochanger=%d " + "enabled=%d " + "changer_name=%s media_type=%s volume_name=%s\n"; + + +/** Send update information about a device to Director */ +bool dir_update_device(JCR *jcr, DEVICE *dev) +{ + BSOCK *dir = jcr->dir_bsock; + POOL_MEM dev_name, VolumeName, MediaType, ChangerName; + DEVRES *device = dev->device; + bool ok; + + pm_strcpy(dev_name, device->hdr.name); + bash_spaces(dev_name); + if (dev->is_labeled()) { + pm_strcpy(VolumeName, dev->VolHdr.VolumeName); + } else { + pm_strcpy(VolumeName, "*"); + } + bash_spaces(VolumeName); + pm_strcpy(MediaType, device->media_type); + bash_spaces(MediaType); + if (device->changer_res) { + pm_strcpy(ChangerName, device->changer_res->hdr.name); + bash_spaces(ChangerName); + } else { + pm_strcpy(ChangerName, "*"); + } + ok = dir->fsend(Device_update, + jcr->JobId, + dev_name.c_str(), + dev->can_append()!=0, + dev->can_read()!=0, dev->num_writers, + dev->is_open()!=0, dev->is_labeled()!=0, + dev->is_offline()!=0, dev->reserved_device, + dev->is_tape()?100000:1, + dev->autoselect, 0, + dev->enabled, + ChangerName.c_str(), MediaType.c_str(), VolumeName.c_str()); + Dmsg1(dbglvl, ">dird: %s\n", dir->msg); + return ok; +} + +bool dir_update_changer(JCR *jcr, AUTOCHANGER *changer) +{ + BSOCK *dir = jcr->dir_bsock; + POOL_MEM dev_name, MediaType; + DEVRES *device; + bool ok; + + pm_strcpy(dev_name, changer->hdr.name); + bash_spaces(dev_name); + device = (DEVRES *)changer->device->first(); + pm_strcpy(MediaType, device->media_type); + bash_spaces(MediaType); + /* This is mostly to indicate that we are here */ + ok = dir->fsend(Device_update, + jcr->JobId, + dev_name.c_str(), /* Changer name */ + 0, 0, 0, /* append, read, num_writers */ + 0, 0, 0, /* is_open, is_labeled, offline */ + 0, 0, /* reserved, max_writers */ + 0, /* Autoselect */ + 0, /* Enabled */ + changer->device->size(), /* Number of devices */ + "0", /* PoolId */ + "*", /* ChangerName */ + MediaType.c_str(), /* MediaType */ + "*"); /* VolName */ + Dmsg1(dbglvl, ">dird: %s\n", dir->msg); + return ok; +} +#endif + + +static AskDirHandler *askdir_handler = NULL; /* must be true when inside a "btools" */ + +/* + * btools must call this function, to modify behavior of some functions here + */ +AskDirHandler *init_askdir_handler(AskDirHandler *new_askdir_handler) +{ + AskDirHandler *old = askdir_handler; + askdir_handler = new_askdir_handler; + return old; +} + +/* + * Alternate function used by btools + */ +bool AskDirHandler::dir_ask_sysop_to_mount_volume(DCR *dcr, bool /*writing*/) +{ + DEVICE *dev = dcr->dev; + fprintf(stderr, _("Mount Volume \"%s\" on device %s and press return when ready: "), + dcr->VolumeName, dev->print_name()); + dev->close(dcr); + getchar(); + return true; +} + +bool AskDirHandler::dir_get_volume_info(DCR *dcr, const char *VolumeName, enum get_vol_info_rw writing) +{ + Dmsg0(100, "Fake dir_get_volume_info\n"); + dcr->setVolCatName(VolumeName); + Dmsg2(500, "Vol=%s VolType=%d\n", dcr->getVolCatName(), dcr->VolCatInfo.VolCatType); + return 1; +} + +/** + * Send current JobStatus to Director + */ +bool dir_send_job_status(JCR *jcr) +{ + if (askdir_handler) { + return askdir_handler->dir_send_job_status(jcr); + } + + return jcr->sendJobStatus(); +} + +/** + * Common routine for: + * dir_get_volume_info() + * and + * dir_find_next_appendable_volume() + * + * NOTE!!! All calls to this routine must be protected by + * locking vol_info_mutex before calling it so that + * we don't have one thread modifying the parameters + * and another reading them. + * + * Returns: true on success and vol info in dcr->VolCatInfo + * false on failure + */ +static bool do_get_volume_info(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + BSOCK *dir = jcr->dir_bsock; + VOLUME_CAT_INFO vol; + int n; + int32_t Enabled, Recycle; + int32_t InChanger; + + dcr->setVolCatInfo(false); + if (dir->recv() <= 0) { + Dmsg0(dbglvl, "getvolname error bnet_recv\n"); + Mmsg(jcr->errmsg, _("Network error on bnet_recv in req_vol_info.\n")); + return false; + } + memset(&vol, 0, sizeof(vol)); + n = sscanf(dir->msg, OK_media, vol.VolCatName, + &vol.VolCatJobs, &vol.VolCatFiles, + &vol.VolCatBlocks, &vol.VolCatAmetaBytes, + &vol.VolCatAdataBytes, &vol.VolCatHoleBytes, + &vol.VolCatHoles, &vol.VolCatMounts, &vol.VolCatErrors, + &vol.VolCatWrites, &vol.VolCatMaxBytes, + &vol.VolCatCapacityBytes, vol.VolCatStatus, + &vol.Slot, &vol.VolCatMaxJobs, &vol.VolCatMaxFiles, + &InChanger, &vol.VolReadTime, &vol.VolWriteTime, + &vol.EndFile, &vol.EndBlock, &vol.VolCatType, + &vol.LabelType, &vol.VolMediaId, &vol.VolScratchPoolId, + &vol.VolCatParts, &vol.VolCatCloudParts, + &vol.VolLastPartBytes, &Enabled, &Recycle); + Dmsg2(dbglvl, "msg); + if (n != 31) { + Dmsg1(dbglvl, "get_volume_info failed: ERR=%s", dir->msg); + /* + * Note, we can get an error here either because there is + * a comm problem, or if the volume is not a suitable + * volume to use, so do not issue a Jmsg() here, do it + * in the calling routine. + */ + Mmsg(jcr->errmsg, _("Error getting Volume info: %s"), dir->msg); + return false; + } + vol.InChanger = InChanger; /* bool in structure */ + vol.VolEnabled = Enabled; /* bool in structure */ + vol.VolRecycle = Recycle; /* bool in structure */ + vol.is_valid = true; + vol.VolCatBytes = vol.VolCatAmetaBytes + vol.VolCatAdataBytes; + unbash_spaces(vol.VolCatName); + bstrncpy(dcr->VolumeName, vol.VolCatName, sizeof(dcr->VolumeName)); + dcr->VolCatInfo = vol; /* structure assignment */ + + Dmsg3(dbglvl, "do_reqest_vol_info return true slot=%d Volume=%s MediaId=%lld\n", + dcr->VolCatInfo.Slot, dcr->VolCatInfo.VolCatName, dcr->VolCatInfo.VolMediaId); + Dmsg5(dbglvl, "Dir returned VolCatAmetaBytes=%lld VolCatAdataBytes=%lld Status=%s Vol=%s MediaId=%lld\n", + dcr->VolCatInfo.VolCatAmetaBytes, dcr->VolCatInfo.VolCatAdataBytes, + dcr->VolCatInfo.VolCatStatus, dcr->VolCatInfo.VolCatName, + dcr->VolCatInfo.VolMediaId); + return true; +} + + +/** + * Get Volume info for a specific volume from the Director's Database + * + * Returns: true on success (Director guarantees that Pool and MediaType + * are correct and VolStatus==Append or + * VolStatus==Recycle) + * false on failure + * + * Volume information returned in dcr->VolCatInfo + */ +bool dir_get_volume_info(DCR *dcr, + const char *VolumeName, + enum get_vol_info_rw writing) +{ + if (askdir_handler) { + return askdir_handler->dir_get_volume_info(dcr, VolumeName, writing); + } + + JCR *jcr = dcr->jcr; + BSOCK *dir = jcr->dir_bsock; + + P(vol_info_mutex); + dcr->setVolCatName(VolumeName); + bash_spaces(dcr->getVolCatName()); + dir->fsend(Get_Vol_Info, jcr->JobId, dcr->getVolCatName(), + writing==GET_VOL_INFO_FOR_WRITE?1:0); + Dmsg1(dbglvl, ">dird %s", dir->msg); + unbash_spaces(dcr->getVolCatName()); + bool ok = do_get_volume_info(dcr); + V(vol_info_mutex); + return ok; +} + + + +/** + * Get info on the next appendable volume in the Director's database + * + * Returns: true on success dcr->VolumeName is volume + * reserve_volume() called on Volume name + * false on failure dcr->VolumeName[0] == 0 + * also sets dcr->found_in_use if at least one + * in use volume was found. + * + * Volume information returned in dcr + * + */ +bool dir_find_next_appendable_volume(DCR *dcr) +{ + /* SD tools setup a handler because they have no connection to Dir */ + if (askdir_handler) { + return askdir_handler->dir_find_next_appendable_volume(dcr); + } + + JCR *jcr = dcr->jcr; + BSOCK *dir = jcr->dir_bsock; + bool rtn; + char lastVolume[MAX_NAME_LENGTH]; + int nb_retry; + + /* + * Calculate the number of possible drives + 30 for the size of the + * Volume list to consider. + */ + nb_retry = ((rblist *)res_head[R_DEVICE-r_first]->res_list)->size() + 30; + Dmsg2(dbglvl, "dir_find_next_appendable_volume: reserved=%d Vol=%s\n", + dcr->is_reserved(), dcr->VolumeName); + Mmsg(jcr->errmsg, "Unknown error\n"); + + /* + * Try the thirty oldest or most available volumes. Note, + * the most available could already be mounted on another + * drive, so we continue looking for a not in use Volume. + */ + lock_volumes(); + P(vol_info_mutex); + dcr->clear_found_in_use(); + lastVolume[0] = 0; + for (int vol_index=1; vol_index < nb_retry; vol_index++) { + bash_spaces(dcr->media_type); + bash_spaces(dcr->pool_name); + dir->fsend(Find_media, jcr->JobId, vol_index, dcr->pool_name, dcr->media_type, + dcr->dev->dev_type); + unbash_spaces(dcr->media_type); + unbash_spaces(dcr->pool_name); + Dmsg1(dbglvl, ">dird %s", dir->msg); + if (do_get_volume_info(dcr)) { + /* Give up if we get the same volume name twice */ + if (lastVolume[0] && strcmp(lastVolume, dcr->VolumeName) == 0) { + Mmsg(jcr->errmsg, "Director returned same volume name=%s twice.\n", + lastVolume); + Dmsg1(dbglvl, "Got same vol = %s\n", lastVolume); + break; + } + /* If VolCatAdataBytes, we have ALIGNED_DEV */ + if (dcr->VolCatInfo.VolCatType == 0 && dcr->VolCatInfo.VolCatAdataBytes != 0) { + dcr->VolCatInfo.VolCatType = B_ALIGNED_DEV; + } + /* + * If we have VolType and we are disk or aligned, the VolType must match + */ + /* ***FIXME*** find better way to handle voltype */ + if (dcr->VolCatInfo.VolCatType != 0 && + (dcr->dev->dev_type == B_FILE_DEV || dcr->dev->dev_type == B_ALIGNED_DEV || + dcr->dev->dev_type == B_CLOUD_DEV) && + dcr->dev->dev_type != (int)dcr->VolCatInfo.VolCatType) { + Dmsg2(000, "Skip vol. Wanted VolType=%d Got=%d\n", dcr->dev->dev_type, dcr->VolCatInfo.VolCatType); + continue; + } + bstrncpy(lastVolume, dcr->VolumeName, sizeof(lastVolume)); + if (dcr->can_i_write_volume()) { + Dmsg1(dbglvl, "Call reserve_volume for write. Vol=%s\n", dcr->VolumeName); + if (reserve_volume(dcr, dcr->VolumeName) == NULL) { + Dmsg1(dbglvl, "%s", jcr->errmsg); + if (dcr->dev->must_wait()) { + rtn = false; + dcr->VolumeName[0] = 0; + goto get_out; + } + continue; + } + Dmsg1(dbglvl, "dir_find_next_appendable_volume return true. vol=%s\n", + dcr->VolumeName); + rtn = true; + goto get_out; + } else { + Mmsg(jcr->errmsg, "Volume %s is in use.\n", dcr->VolumeName); + Dmsg1(dbglvl, "Volume %s is in use.\n", dcr->VolumeName); + /* If volume is not usable, it is in use by someone else */ + dcr->set_found_in_use(); + continue; + } + } + Dmsg2(dbglvl, "No vol. index %d return false. dev=%s\n", vol_index, + dcr->dev->print_name()); + break; + } + rtn = false; + dcr->VolumeName[0] = 0; + +get_out: + V(vol_info_mutex); + unlock_volumes(); + if (!rtn && dcr->VolCatInfo.VolScratchPoolId != 0) { + Jmsg(jcr, M_WARNING, 0, "%s", jcr->errmsg); + Dmsg2(000, "!!!!!!!!! Volume=%s rejected ScratchPoolId=%lld\n", dcr->VolumeName, + dcr->VolCatInfo.VolScratchPoolId); + Dmsg1(000, "%s", jcr->errmsg); + //} else { + // Dmsg3(000, "Rtn=%d Volume=%s ScratchPoolId=%lld\n", rtn, dcr->VolumeName, + // dcr->VolCatInfo.VolScratchPoolId); + } + return rtn; +} + + +/* + * After writing a Volume, send the updated statistics + * back to the director. The information comes from the + * dev record. + */ +bool dir_update_volume_info(DCR *dcr, bool label, bool update_LastWritten, + bool use_dcr_only) +{ + if (askdir_handler) { + return askdir_handler->dir_update_volume_info(dcr, label, update_LastWritten, use_dcr_only); + } + + JCR *jcr = dcr->jcr; + BSOCK *dir = jcr->dir_bsock; + DEVICE *dev = dcr->ameta_dev; + VOLUME_CAT_INFO vol; + char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50], ed7[50], ed8[50]; + int InChanger, Enabled, Recycle; + bool ok = false; + POOL_MEM VolumeName; + + /* If system job, do not update catalog, except if we explicitly force it. */ + if (jcr->getJobType() == JT_SYSTEM && + !dcr->force_update_volume_info) { + return true; + } + + /* Lock during Volume update */ + P(vol_info_mutex); + dev->Lock_VolCatInfo(); + + if (use_dcr_only) { + vol = dcr->VolCatInfo; /* structure assignment */ + } else { + /* Just labeled or relabeled the tape */ + if (label) { + dev->setVolCatStatus("Append"); + } + vol = dev->VolCatInfo; /* structure assignment */ + } + + /* This happens when nothing to update after fixup_device ... */ + if (vol.VolCatName[0] == 0) { + Dmsg0(50, "Volume Name is NULL\n"); + goto bail_out; + } + Dmsg4(100, "Update cat VolBytes=%lld VolABytes=%lld Status=%s Vol=%s\n", + vol.VolCatAmetaBytes, vol.VolCatAdataBytes, vol.VolCatStatus, vol.VolCatName); +// if (update_LastWritten) { + vol.VolLastWritten = time(NULL); +// } + /* worm cannot be recycled, ensure catalog correct */ + if (dev->is_worm() && vol.VolRecycle) { + Jmsg(jcr, M_INFO, 0, _("WORM cassette detected: setting Recycle=No on Volume=\"%s\"\n"), vol.VolCatName); + vol.VolRecycle = false; + } + pm_strcpy(VolumeName, vol.VolCatName); + bash_spaces(VolumeName); + InChanger = vol.InChanger; + Enabled = vol.VolEnabled; + Recycle = vol.VolRecycle; + /* Insanity test */ + if (vol.VolCatHoleBytes > (((uint64_t)2)<<60)) { + Pmsg1(010, "VolCatHoleBytes too big: %lld. Reset to zero.\n", + vol.VolCatHoleBytes); + vol.VolCatHoleBytes = 0; + } + /* Set device type where this Volume used */ + if (vol.VolCatType == 0) { + vol.VolCatType = dev->dev_type; + } + dir->fsend(Update_media, jcr->JobId, + VolumeName.c_str(), vol.VolCatJobs, vol.VolCatFiles, + vol.VolCatBlocks, edit_uint64(vol.VolCatAmetaBytes, ed1), + edit_uint64(vol.VolCatAdataBytes, ed2), + edit_uint64(vol.VolCatHoleBytes, ed3), + vol.VolCatHoles, vol.VolCatMounts, vol.VolCatErrors, + vol.VolCatWrites, edit_uint64(vol.VolCatMaxBytes, ed4), + edit_uint64(vol.VolLastWritten, ed5), + vol.VolCatStatus, vol.Slot, label, + InChanger, /* bool in structure */ + edit_int64(vol.VolReadTime, ed6), + edit_int64(vol.VolWriteTime, ed7), + edit_uint64(vol.VolFirstWritten, ed8), + vol.VolCatType, + vol.VolCatParts, + vol.VolCatCloudParts, + vol.VolLastPartBytes, + Enabled, + Recycle); + Dmsg1(100, ">dird %s", dir->msg); + + /* Do not lock device here because it may be locked from label */ + if (!jcr->is_canceled()) { + /* + * We sent info directly from dev to the Director. + * What the Director sends back is first read into + * the dcr with do_get_volume_info() + */ + if (!do_get_volume_info(dcr)) { + Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); + Dmsg2(dbglvl, _("Didn't get vol info vol=%s: ERR=%s"), + vol.VolCatName, jcr->errmsg); + goto bail_out; + } + Dmsg1(100, "get_volume_info() %s", dir->msg); + + /* Update dev Volume info in case something changed (e.g. expired) */ + if (!use_dcr_only) { + dev->VolCatInfo.Slot = dcr->VolCatInfo.Slot; + bstrncpy(dev->VolCatInfo.VolCatStatus, dcr->VolCatInfo.VolCatStatus, sizeof(vol.VolCatStatus)); + dev->VolCatInfo.VolCatAdataBytes = dcr->VolCatInfo.VolCatAdataBytes; + dev->VolCatInfo.VolCatAmetaBytes = dcr->VolCatInfo.VolCatAmetaBytes; + dev->VolCatInfo.VolCatHoleBytes = dcr->VolCatInfo.VolCatHoleBytes; + dev->VolCatInfo.VolCatHoles = dcr->VolCatInfo.VolCatHoles; + dev->VolCatInfo.VolCatPadding = dcr->VolCatInfo.VolCatPadding; + dev->VolCatInfo.VolCatAmetaPadding = dcr->VolCatInfo.VolCatAmetaPadding; + dev->VolCatInfo.VolCatAdataPadding = dcr->VolCatInfo.VolCatAdataPadding; + dev->VolCatInfo.VolCatFiles = dcr->VolCatInfo.VolCatFiles; + dev->VolCatInfo.VolCatBytes = dcr->VolCatInfo.VolCatBytes; + dev->VolCatInfo.VolCatMounts = dcr->VolCatInfo.VolCatMounts; + dev->VolCatInfo.VolCatJobs = dcr->VolCatInfo.VolCatJobs; + dev->VolCatInfo.VolCatFiles = dcr->VolCatInfo.VolCatFiles; + dev->VolCatInfo.VolCatRecycles = dcr->VolCatInfo.VolCatRecycles; + dev->VolCatInfo.VolCatWrites = dcr->VolCatInfo.VolCatWrites; + dev->VolCatInfo.VolCatReads = dcr->VolCatInfo.VolCatReads; + dev->VolCatInfo.VolEnabled = dcr->VolCatInfo.VolEnabled; + dev->VolCatInfo.VolRecycle = dcr->VolCatInfo.VolRecycle; + } + ok = true; + } + +bail_out: + dev->Unlock_VolCatInfo(); + V(vol_info_mutex); + return ok; +} + +struct JOBMEDIA_ITEM { + dlink link; + int64_t VolMediaId; + uint64_t StartAddr; + uint64_t EndAddr; + uint32_t VolFirstIndex; + uint32_t VolLastIndex; + uint32_t StartFile; + uint32_t EndFile; + uint32_t StartBlock; + uint32_t EndBlock; +}; + +void create_jobmedia_queue(JCR *jcr) +{ + JOBMEDIA_ITEM *item = NULL; + jcr->jobmedia_queue = New(dlist(item, &item->link)); +} + +bool flush_jobmedia_queue(JCR *jcr) +{ + if (askdir_handler) { + return askdir_handler->flush_jobmedia_queue(jcr); + } + + JOBMEDIA_ITEM *item; + BSOCK *dir = jcr->dir_bsock; + bool ok; + + if (!jcr->jobmedia_queue || jcr->jobmedia_queue->size() == 0) { + return true; /* should never happen */ + } + Dmsg1(400, "=== Flush jobmedia queue = %d\n", jcr->jobmedia_queue->size()); + + dir->fsend(Create_jobmedia, jcr->JobId); + foreach_dlist(item, jcr->jobmedia_queue) { + ok = dir->fsend("%u %u %u %u %u %u %lld\n", + item->VolFirstIndex, item->VolLastIndex, + item->StartFile, item->EndFile, + item->StartBlock, item->EndBlock, + item->VolMediaId); + Dmsg2(400, "sd->dir: ok=%d Jobmedia=%s", ok, dir->msg); + } + dir->signal(BNET_EOD); + jcr->jobmedia_queue->destroy(); + + if (dir->recv() <= 0) { + Dmsg0(dbglvl, "create_jobmedia error bnet_recv\n"); + Jmsg(jcr, M_FATAL, 0, _("Error creating JobMedia records: ERR=%s\n"), + dir->bstrerror()); + return false; + } + Dmsg1(210, "msg); + if (strcmp(dir->msg, OK_create) != 0) { + Dmsg1(dbglvl, "Bad response from Dir: %s\n", dir->msg); + Jmsg(jcr, M_FATAL, 0, _("Error creating JobMedia records: %s\n"), dir->msg); + return false; + } + return true; +} + + +/* + * After writing a Volume, create the JobMedia record. + */ +bool dir_create_jobmedia_record(DCR *dcr, bool zero) +{ + if (askdir_handler) { + return askdir_handler->dir_create_jobmedia_record(dcr, zero); + } + + JCR *jcr = dcr->jcr; + BSOCK *dir = jcr->dir_bsock; + JOBMEDIA_ITEM *item; + bool ok = true;; + + if (!zero && !dcr->WroteVol) { + return true; + } + if (!zero && dcr->VolLastIndex == 0) { + Pmsg7(0/*dbglvl*/, "Discard: JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", + dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, + dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); + return true; /* nothing written to the Volume */ + } + /* Throw out records where the start address is bigger than the end */ + if (!zero && dcr->StartAddr > dcr->EndAddr) { + Pmsg7(0/*dbglvl*/, "Discard: JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", + dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, + dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); + return true; + } + + /* If system job, do not update catalog */ + if (jcr->getJobType() == JT_SYSTEM) { + return true; + } + + /* Throw out records where FI is zero -- i.e. nothing done */ + if (!zero && dcr->VolFirstIndex == 0 && + (dcr->StartAddr != 0 || dcr->EndAddr != 0)) { + Pmsg7(0/*dbglvl*/, "Discard: JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", + dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, + dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); + return true; + } + + /* + * If this Job is incomplete, we need to backup the FileIndex + * to the last correctly saved file so that the JobMedia + * LastIndex is correct. + * + * Note: ***FIXME*** though it is not required, we probably + * should also keep a last EndFile and last EndBlock and + * reset them correctly too so that the JobMedia record is + * really correct. + */ + if (jcr->is_JobStatus(JS_Incomplete)) { + dcr->VolLastIndex = dir->get_lastFileIndex(); + Dmsg1(100, "======= Set FI=%ld\n", dcr->VolLastIndex); + } + + Dmsg7(100, "Queue JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", + dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, + dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); + item = (JOBMEDIA_ITEM *)malloc(sizeof(JOBMEDIA_ITEM)); + if (zero) { + item->VolFirstIndex = item->VolLastIndex = 0; + item->StartFile = item->EndFile = 0; + item->StartBlock = item->EndBlock = 0; + item->StartAddr = item->EndAddr = 0; + item->VolMediaId = dcr->VolMediaId; + } else { + item->VolFirstIndex = dcr->VolFirstIndex; + item->VolLastIndex = dcr->VolLastIndex; + item->StartFile = (uint32_t)(dcr->StartAddr >> 32); + item->EndFile = (uint32_t)(dcr->EndAddr >> 32); + item->StartBlock = (uint32_t)dcr->StartAddr; + item->EndBlock = (uint32_t)dcr->EndAddr; + item->StartAddr = dcr->StartAddr; + item->EndAddr = dcr->EndAddr; + item->VolMediaId = dcr->VolMediaId; + } + jcr->jobmedia_queue->append(item); + /* Flush at 100 queue size of 100 jobmedia records */ + if (zero || jcr->jobmedia_queue->size() >= 100) { + ok = flush_jobmedia_queue(jcr); + } + + dcr->VolFirstIndex = dcr->VolLastIndex = 0; + dcr->StartAddr = dcr->EndAddr = 0; + dcr->VolMediaId = 0; + dcr->WroteVol = false; + return ok; +} + +/* + * Update File Attribute data + * We do the following: + * 1. expand the bsock buffer to be large enough + * 2. Write a "header" into the buffer with serialized data + * VolSessionId + * VolSeesionTime + * FileIndex + * Stream + * data length that follows + * start of raw byte data from the Device record. + * Note, this is primarily for Attribute data, but can + * also handle any device record. The Director must know + * the raw byte data format that is defined for each Stream. + * Now Restore Objects pass through here STREAM_RESTORE_OBJECT + */ +bool dir_update_file_attributes(DCR *dcr, DEV_RECORD *rec) +{ + if (askdir_handler) { + return askdir_handler->dir_update_file_attributes(dcr, rec); + } + + JCR *jcr = dcr->jcr; + BSOCK *dir = jcr->dir_bsock; + ser_declare; + +#ifdef NO_ATTRIBUTES_TEST + return true; +#endif + + dir->msg = check_pool_memory_size(dir->msg, sizeof(FileAttributes) + + MAX_NAME_LENGTH + sizeof(DEV_RECORD) + rec->data_len + 1); + dir->msglen = bsnprintf(dir->msg, sizeof(FileAttributes) + + MAX_NAME_LENGTH + 1, FileAttributes, jcr->JobId); + ser_begin(dir->msg + dir->msglen, 0); + ser_uint32(rec->VolSessionId); + ser_uint32(rec->VolSessionTime); + ser_int32(rec->FileIndex); + ser_int32(rec->Stream); + ser_uint32(rec->data_len); + ser_bytes(rec->data, rec->data_len); + dir->msglen = ser_length(dir->msg); + Dmsg1(1800, ">dird %s\n", dir->msg); /* Attributes */ + if (rec->maskedStream == STREAM_UNIX_ATTRIBUTES || + rec->maskedStream == STREAM_UNIX_ATTRIBUTES_EX) { + Dmsg2(1500, "==== set_data_end FI=%ld %s\n", rec->FileIndex, rec->data); + dir->set_data_end(rec->FileIndex); /* set offset of valid data */ + } + return dir->send(); +} + + +/** + * Request the sysop to create an appendable volume + * + * Entered with device blocked. + * Leaves with device blocked. + * + * Returns: true on success (operator issues a mount command) + * false on failure + * Note, must create dev->errmsg on error return. + * + * On success, dcr->VolumeName and dcr->VolCatInfo contain + * information on suggested volume, but this may not be the + * same as what is actually mounted. + * + * When we return with success, the correct tape may or may not + * actually be mounted. The calling routine must read it and + * verify the label. + */ +bool dir_ask_sysop_to_create_appendable_volume(DCR *dcr) +{ + if (askdir_handler) { + return askdir_handler->dir_ask_sysop_to_create_appendable_volume(dcr); + } + + int stat = W_TIMEOUT; + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + bool got_vol = false; + + if (job_canceled(jcr)) { + dev->poll = false; + return false; + } + Dmsg0(400, "enter dir_ask_sysop_to_create_appendable_volume\n"); + ASSERT(dev->blocked()); + for ( ;; ) { + if (job_canceled(jcr)) { + Mmsg(dev->errmsg, + _("Job %s canceled while waiting for mount on Storage Device \"%s\".\n"), + jcr->Job, dev->print_name()); + Jmsg(jcr, M_INFO, 0, "%s", dev->errmsg); + dev->poll = false; + return false; + } + got_vol = dir_find_next_appendable_volume(dcr); /* get suggested volume */ + if (got_vol) { + goto get_out; + } else { + dev->clear_wait(); + if (stat == W_TIMEOUT || stat == W_MOUNT) { + Mmsg(dev->errmsg, _( +"Job %s is waiting. Cannot find any appendable volumes.\n" +"Please use the \"label\" command to create a new Volume for:\n" +" Storage: %s\n" +" Pool: %s\n" +" Media type: %s\n"), + jcr->Job, + dev->print_name(), + dcr->pool_name, + dcr->media_type); + Jmsg(jcr, M_MOUNT, 0, "%s", dev->errmsg); + Dmsg1(dbglvl, "%s", dev->errmsg); + } + } + + jcr->sendJobStatus(JS_WaitMedia); + + stat = wait_for_sysop(dcr); + Dmsg1(dbglvl, "Back from wait_for_sysop stat=%d\n", stat); + if (dev->poll) { + Dmsg1(dbglvl, "Poll timeout in create append vol on device %s\n", dev->print_name()); + continue; + } + + if (stat == W_TIMEOUT) { + if (!double_dev_wait_time(dev)) { + Mmsg(dev->errmsg, _("Max time exceeded waiting to mount Storage Device %s for Job %s\n"), + dev->print_name(), jcr->Job); + Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); + Dmsg1(dbglvl, "Gave up waiting on device %s\n", dev->print_name()); + dev->poll = false; + return false; /* exceeded maximum waits */ + } + continue; + } + if (stat == W_ERROR) { + berrno be; + Mmsg0(dev->errmsg, _("pthread error in mount_next_volume.\n")); + Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); + dev->poll = false; + return false; + } + Dmsg1(dbglvl, "Someone woke me for device %s\n", dev->print_name()); + } + +get_out: + jcr->sendJobStatus(JS_Running); + Dmsg0(dbglvl, "leave dir_ask_sysop_to_create_appendable_volume\n"); + return true; +} + +/* + * Request to mount specific Volume + * + * Entered with device blocked and dcr->VolumeName is desired + * volume. + * Leaves with device blocked. + * + * Returns: true on success (operator issues a mount command) + * false on failure + * Note, must create dev->errmsg on error return. + * + */ +bool dir_ask_sysop_to_mount_volume(DCR *dcr, bool write_access) +{ + if (askdir_handler) { + return askdir_handler->dir_ask_sysop_to_mount_volume(dcr, write_access); + } + + int stat = W_TIMEOUT; + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + + Dmsg0(400, "enter dir_ask_sysop_to_mount_volume\n"); + if (!dcr->VolumeName[0]) { + Mmsg0(dev->errmsg, _("Cannot request another volume: no volume name given.\n")); + dev->poll = false; + return false; + } + + if (dcr->no_mount_request) { + Mmsg(dev->errmsg, _("The current operation doesn't support mount request\n")); + dev->poll = false; + return false; + } + + for ( ;; ) { + if (job_canceled(jcr)) { + Mmsg(dev->errmsg, + _("Job %s canceled while waiting for mount on Storage Device \"%s\".\n"), + jcr->Job, dev->print_name()); + Jmsg(jcr, M_INFO, 0, "%s", dev->errmsg); + dev->poll = false; + return false; + } + /* + * If we are not polling, and the wait timeout or the + * user explicitly did a mount, send him the message. + * Otherwise skip it. + */ + if (!dev->poll && (stat == W_TIMEOUT || stat == W_MOUNT)) { + const char *msg; + if (write_access) { + msg = _("%sPlease mount append Volume \"%s\" or label a new one for:\n" + " Job: %s\n" + " Storage: %s\n" + " Pool: %s\n" + " Media type: %s\n"); + } else { + msg = _("%sPlease mount read Volume \"%s\" for:\n" + " Job: %s\n" + " Storage: %s\n" + " Pool: %s\n" + " Media type: %s\n"); + } + Jmsg(jcr, M_MOUNT, 0, msg, + dev->is_nospace()?_("\n\nWARNING: device is full! Please add more disk space then ...\n\n"):"", + dcr->VolumeName, + jcr->Job, + dev->print_name(), + dcr->pool_name, + dcr->media_type); + Dmsg3(400, "Mount \"%s\" on device \"%s\" for Job %s\n", + dcr->VolumeName, dev->print_name(), jcr->Job); + } + + jcr->sendJobStatus(JS_WaitMount); + + stat = wait_for_sysop(dcr); /* wait on device */ + Dmsg1(100, "Back from wait_for_sysop stat=%d\n", stat); + if (dev->poll) { + Dmsg1(100, "Poll timeout in mount vol on device %s\n", dev->print_name()); + Dmsg1(100, "Blocked=%s\n", dev->print_blocked()); + goto get_out; + } + + if (stat == W_TIMEOUT) { + if (!double_dev_wait_time(dev)) { + Mmsg(dev->errmsg, _("Max time exceeded waiting to mount Storage Device %s for Job %s\n"), + dev->print_name(), jcr->Job); + Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); + Dmsg1(400, "Gave up waiting on device %s\n", dev->print_name()); + dev->poll = false; + return false; /* exceeded maximum waits */ + } + continue; + } + if (stat == W_ERROR) { + berrno be; + Mmsg(dev->errmsg, _("pthread error in mount_volume\n")); + Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); + dev->poll = false; + return false; + } + Dmsg1(100, "Someone woke me for device %s\n", dev->print_name()); + break; + } + +get_out: + if (job_canceled(jcr)) { + Mmsg(dev->errmsg, _("Job %s canceled while waiting for mount on Storage Device %s.\n"), + jcr->Job, dev->print_name()); + dev->poll = false; + return false; + } + + jcr->sendJobStatus(JS_Running); + Dmsg0(100, "leave dir_ask_sysop_to_mount_volume\n"); + return true; +} diff --git a/src/stored/authenticate.c b/src/stored/authenticate.c new file mode 100644 index 00000000..83255757 --- /dev/null +++ b/src/stored/authenticate.c @@ -0,0 +1,346 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Authenticate caller + * + * Written by Kern Sibbald, October 2000 + */ + + +#include "bacula.h" +#include "stored.h" + +extern STORES *me; /* our Global resource */ + +const int dbglvl = 50; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +/* Version at end of Hello + * prior to 06Aug13 no version + * 1 06Aug13 - added comm line compression + * 2 13Dec13 - added api version to status command + */ +#define SD_VERSION 2 + + +/* + * Authenticate the Director + */ +bool authenticate_director(JCR* jcr) +{ + DIRRES *director = jcr->director; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; /* require md5 compatible DIR */ + bool auth_success = false; + alist *verify_list = NULL; + BSOCK *dir = jcr->dir_bsock; + + /* TLS Requirement */ + if (director->tls_enable) { + if (director->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + if (director->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + if (director->tls_verify_peer) { + verify_list = director->tls_allowed_cns; + } + + /* Timeout authentication after 10 mins */ + btimer_t *tid = start_bsock_timer(dir, AUTH_TIMEOUT); + auth_success = cram_md5_challenge(dir, director->password, tls_local_need, compatible); + if (auth_success) { + auth_success = cram_md5_respond(dir, director->password, &tls_remote_need, &compatible); + if (!auth_success) { + Dmsg1(dbglvl, "cram_get_auth respond failed with Director %s\n", dir->who()); + } + } else { + Dmsg1(dbglvl, "cram_auth challenge failed with Director %s\n", dir->who()); + } + + if (!auth_success) { + Jmsg0(jcr, M_FATAL, 0, _("Incorrect password given by Director.\n" + "For help, please see: " MANUAL_AUTH_URL "\n")); + auth_success = false; + goto auth_fatal; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg0(jcr, M_FATAL, 0, _("Authorization problem: Remote server did not" + " advertize required TLS support.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg0(jcr, M_FATAL, 0, _("Authorization problem: Remote server requires TLS.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_server(director->tls_ctx, dir, verify_list)) { + Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed with DIR at \"%s:%d\"\n"), + dir->host(), dir->port()); + auth_success = false; + goto auth_fatal; + } + if (director->tls_authenticate) { /* authenticate with tls only? */ + dir->free_tls(); /* yes, shut it down */ + } + } + +auth_fatal: + stop_bsock_timer(tid); + jcr->director = director; + if (auth_success) { + return send_hello_ok(dir); + } + send_sorry(dir); + Dmsg1(dbglvl, "Unable to authenticate Director at %s.\n", dir->who()); + Jmsg1(jcr, M_ERROR, 0, _("Unable to authenticate Director at %s.\n"), dir->who()); + bmicrosleep(5, 0); + return false; +} + + +int authenticate_filed(JCR *jcr, BSOCK *fd, int FDVersion) +{ + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; /* require md5 compatible FD */ + bool auth_success = false; + alist *verify_list = NULL; + + /* TLS Requirement */ + if (me->tls_enable) { + if (me->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + if (me->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + if (me->tls_verify_peer) { + verify_list = me->tls_allowed_cns; + } + + /* Timeout authentication after 5 mins */ + btimer_t *tid = start_bsock_timer(fd, AUTH_TIMEOUT); + /* Challenge FD */ + Dmsg0(050, "Challenge FD\n"); + auth_success = cram_md5_challenge(fd, jcr->sd_auth_key, tls_local_need, compatible); + if (auth_success) { + /* Respond to his challenge */ + Dmsg0(050, "Respond to FD challenge\n"); + auth_success = cram_md5_respond(fd, jcr->sd_auth_key, &tls_remote_need, &compatible); + if (!auth_success) { + Dmsg1(dbglvl, "Respond cram-get-auth respond failed with FD: %s\n", fd->who()); + } + } else { + Dmsg1(dbglvl, "Challenge cram-auth failed with FD: %s\n", fd->who()); + } + + if (!auth_success) { + Jmsg(jcr, M_FATAL, 0, _("Incorrect authorization key from File daemon at %s rejected.\n" + "For help, please see: " MANUAL_AUTH_URL "\n"), + fd->who()); + auth_success = false; + goto auth_fatal; + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server did not" + " advertize required TLS support.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server requires TLS.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_server(me->tls_ctx, fd, verify_list)) { + Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed with FD at \"%s:%d\"\n"), + fd->host(), fd->port()); + auth_success = false; + goto auth_fatal; + } + if (me->tls_authenticate) { /* tls authenticate only? */ + fd->free_tls(); /* yes, shut it down */ + } + } + +auth_fatal: + stop_bsock_timer(tid); + if (!auth_success) { + Jmsg(jcr, M_FATAL, 0, _("Incorrect authorization key from File daemon at %s rejected.\n" + "For help, please see: " MANUAL_AUTH_URL "\n"), + fd->who()); + } + + /* Version 5 of the protocol is a bit special, it is used by both 6.0.0 + * Enterprise version and 7.0.x Community version, but do not support the + * same level of features. As nobody is using the 6.0.0 release, we can + * be pretty sure that the FD in version 5 is a community FD. + */ + if (auth_success && (FDVersion >= 9 || FDVersion == 5)) { + send_hello_ok(fd); + } + return auth_success; +} + +/* + * First prove our identity to the Storage daemon, then + * make him prove his identity. + */ +bool authenticate_storagedaemon(JCR *jcr) +{ + BSOCK *sd = jcr->store_bsock; + int tls_local_need = BNET_TLS_NONE; + int tls_remote_need = BNET_TLS_NONE; + int compatible = true; + bool auth_success = false; + int sd_version = 0; + + btimer_t *tid = start_bsock_timer(sd, AUTH_TIMEOUT); + + /* TLS Requirement */ + if (have_tls && me->tls_enable) { + if (me->tls_require) { + tls_local_need = BNET_TLS_REQUIRED; + } else { + tls_local_need = BNET_TLS_OK; + } + } + + if (me->tls_authenticate) { + tls_local_need = BNET_TLS_REQUIRED; + } + + if (job_canceled(jcr)) { + auth_success = false; /* force quick exit */ + goto auth_fatal; + } + + /* Respond to SD challenge */ + Dmsg0(050, "Respond to SD challenge\n"); + auth_success = cram_md5_respond(sd, jcr->sd_auth_key, &tls_remote_need, &compatible); + if (job_canceled(jcr)) { + auth_success = false; /* force quick exit */ + goto auth_fatal; + } + if (!auth_success) { + Dmsg1(dbglvl, "cram_respond failed for SD: %s\n", sd->who()); + } else { + /* Now challenge him */ + Dmsg0(050, "Challenge SD\n"); + auth_success = cram_md5_challenge(sd, jcr->sd_auth_key, tls_local_need, compatible); + if (!auth_success) { + Dmsg1(dbglvl, "cram_challenge failed for SD: %s\n", sd->who()); + } + } + + if (!auth_success) { + Jmsg(jcr, M_FATAL, 0, _("Authorization key rejected by Storage daemon.\n" + "Please see " MANUAL_AUTH_URL " for help.\n")); + goto auth_fatal; + } else { + Dmsg0(050, "Authorization with SD is OK\n"); + } + + /* Verify that the remote host is willing to meet our TLS requirements */ + if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server did not" + " advertize required TLS support.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + /* Verify that we are willing to meet the remote host's requirements */ + if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { + Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server requires TLS.\n")); + Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); + auth_success = false; + goto auth_fatal; + } + + if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { + /* Engage TLS! Full Speed Ahead! */ + if (!bnet_tls_client(me->tls_ctx, sd, NULL)) { + Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed.\n")); + auth_success = false; + goto auth_fatal; + } + if (me->tls_authenticate) { /* tls authentication only? */ + sd->free_tls(); /* yes, shutdown tls */ + } + } + if (sd->recv() <= 0) { + auth_success = false; + goto auth_fatal; + } + sscanf(sd->msg, "3000 OK Hello %d", &sd_version); + if (sd_version >= 1 && me->comm_compression) { + sd->set_compress(); + } else { + sd->clear_compress(); + Dmsg0(050, "*** No FD compression with SD\n"); + } + + /* At this point, we have successfully connected */ + +auth_fatal: + /* Destroy session key */ + memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); + stop_bsock_timer(tid); + /* Single thread all failures to avoid DOS */ + if (!auth_success) { + P(mutex); + bmicrosleep(6, 0); + V(mutex); + } + return auth_success; +} diff --git a/src/stored/autochanger.c b/src/stored/autochanger.c new file mode 100644 index 00000000..24ffacab --- /dev/null +++ b/src/stored/autochanger.c @@ -0,0 +1,808 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Routines for handling the autochanger. + * + * Written by Kern Sibbald, August MMII + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +static const int dbglvl = 60; + +/* Forward referenced functions */ +static void lock_changer(DCR *dcr); +static void unlock_changer(DCR *dcr); +static bool unload_other_drive(DCR *dcr, int slot, bool writing); + +bool DCR::is_virtual_autochanger() +{ + return device->changer_command && + (device->changer_command[0] == 0 || + strcmp(device->changer_command, "/dev/null") == 0); +} + +/* Init all the autochanger resources found */ +bool init_autochangers() +{ + bool OK = true; + AUTOCHANGER *changer; + /* Ensure that the media_type for each device is the same */ + foreach_res(changer, R_AUTOCHANGER) { + DEVRES *device; + foreach_alist(device, changer->device) { + /* + * If the device does not have a changer name or changer command + * defined, use the one from the Autochanger resource + */ + if (!device->changer_name && changer->changer_name) { + device->changer_name = bstrdup(changer->changer_name); + } + if (!device->changer_command && changer->changer_command) { + device->changer_command = bstrdup(changer->changer_command); + } + if (!device->changer_name) { + Jmsg(NULL, M_ERROR, 0, + _("No Changer Name given for device %s. Cannot continue.\n"), + device->hdr.name); + OK = false; + } + if (!device->changer_command) { + Jmsg(NULL, M_ERROR, 0, + _("No Changer Command given for device %s. Cannot continue.\n"), + device->hdr.name); + OK = false; + } + } + } + return OK; +} + + +/* + * Called here to do an autoload using the autochanger, if + * configured, and if a Slot has been defined for this Volume. + * On success this routine loads the indicated tape, but the + * label is not read, so it must be verified. + * + * Note if dir is not NULL, it is the console requesting the + * autoload for labeling, so we respond directly to the + * dir bsock. + * + * Returns: 1 on success + * 0 on failure (no changer available) + * -1 on error on autochanger + */ +int autoload_device(DCR *dcr, bool writing, BSOCK *dir) +{ + JCR *jcr = dcr->jcr; + DEVICE * volatile dev = dcr->dev; + char *new_vol_name = dcr->VolumeName; + int slot; + int drive = dev->drive_index; + int rtn_stat = -1; /* error status */ + POOLMEM *changer; + + if (!dev->is_autochanger()) { + Dmsg1(dbglvl, "Device %s is not an autochanger\n", dev->print_name()); + return 0; + } + + /* An empty ChangerCommand => virtual disk autochanger */ + if (dcr->is_virtual_autochanger()) { + Dmsg0(dbglvl, "ChangerCommand=0, virtual disk changer\n"); + return 1; /* nothing to load */ + } + + slot = dcr->VolCatInfo.InChanger ? dcr->VolCatInfo.Slot : 0; + /* + * Handle autoloaders here. If we cannot autoload it, we + * will return 0 so that the sysop will be asked to load it. + */ + if (writing && slot <= 0) { + if (dir) { + return 0; /* For user, bail out right now */ + } + /* ***FIXME*** this really should not be here */ + if (dir_find_next_appendable_volume(dcr)) { + slot = dcr->VolCatInfo.InChanger ? dcr->VolCatInfo.Slot : 0; + } else { + slot = 0; + dev->clear_wait(); + } + } + Dmsg4(dbglvl, "Want slot=%d drive=%d InChgr=%d Vol=%s\n", + dcr->VolCatInfo.Slot, drive, + dcr->VolCatInfo.InChanger, dcr->getVolCatName()); + + changer = get_pool_memory(PM_FNAME); + if (slot <= 0) { + /* Suppress info when polling */ + if (!dev->poll) { + Jmsg(jcr, M_INFO, 0, _("No slot defined in catalog (slot=%d) for Volume \"%s\" on %s.\n"), + slot, dcr->getVolCatName(), dev->print_name()); + Jmsg(jcr, M_INFO, 0, _("Cartridge change or \"update slots\" may be required.\n")); + } + rtn_stat = 0; + } else if (!dcr->device->changer_name) { + /* Suppress info when polling */ + if (!dev->poll) { + Jmsg(jcr, M_INFO, 0, _("No \"Changer Device\" for %s. Manual load of Volume may be required.\n"), + dev->print_name()); + } + rtn_stat = 0; + } else if (!dcr->device->changer_command) { + /* Suppress info when polling */ + if (!dev->poll) { + Jmsg(jcr, M_INFO, 0, _("No \"Changer Command\" for %s. Manual load of Volume may be requird.\n"), + dev->print_name()); + } + rtn_stat = 0; + } else { + /* Attempt to load the Volume */ + uint32_t timeout = dcr->device->max_changer_wait; + int loaded, status; + + loaded = get_autochanger_loaded_slot(dcr); + if (loaded < 0) { /* Autochanger error, try again */ + loaded = get_autochanger_loaded_slot(dcr); + } + Dmsg2(dbglvl, "Found loaded=%d drive=%d\n", loaded, drive); + + if (loaded <= 0 || loaded != slot) { + POOL_MEM results(PM_MESSAGE); + + /* Unload anything in our drive */ + if (!unload_autochanger(dcr, loaded)) { + goto bail_out; + } + + /* Make sure desired slot is unloaded */ + if (!unload_other_drive(dcr, slot, writing)) { + goto bail_out; + } + + /* + * Load the desired cassette + */ + lock_changer(dcr); + Dmsg2(dbglvl, "Doing changer load slot %d %s\n", slot, dev->print_name()); + Jmsg(jcr, M_INFO, 0, + _("3304 Issuing autochanger \"load Volume %s, Slot %d, Drive %d\" command.\n"), + new_vol_name, slot, drive); + Dmsg3(dbglvl, + "3304 Issuing autochanger \"load Volume %s, Slot %d, Drive %d\" command.\n", + new_vol_name, slot, drive); + + dcr->VolCatInfo.Slot = slot; /* slot to be loaded */ + changer = edit_device_codes(dcr, changer, dcr->device->changer_command, "load"); + dev->close(dcr); + Dmsg1(dbglvl, "Run program=%s\n", changer); + status = run_program_full_output(changer, timeout, results.addr()); + if (status == 0) { + Jmsg(jcr, M_INFO, 0, _("3305 Autochanger \"load Volume %s, Slot %d, Drive %d\", status is OK.\n"), + new_vol_name, slot, drive); + Dmsg3(dbglvl, "OK: load volume %s, slot %d, drive %d.\n", new_vol_name, slot, drive); + bstrncpy(dev->LoadedVolName, new_vol_name, sizeof(dev->LoadedVolName)); + dev->set_slot(slot); /* set currently loaded slot */ + if (dev->vol) { + /* We just swapped this Volume so it cannot be swapping any more */ + dev->vol->clear_swapping(); + } + } else { + berrno be; + be.set_errno(status); + Dmsg5(dbglvl, "Error: load Volume %s, Slot %d, Drive %d, bad stats=%s.\nResults=%s\n", + new_vol_name, slot, drive, + be.bstrerror(), results.c_str()); + Jmsg(jcr, M_FATAL, 0, _("3992 Bad autochanger \"load Volume %s Slot %d, Drive %d\": " + "ERR=%s.\nResults=%s\n"), + new_vol_name, slot, drive, be.bstrerror(), results.c_str()); + rtn_stat = -1; /* hard error */ + dev->clear_slot(); /* mark unknown */ + } + unlock_changer(dcr); + } else { + status = 0; /* we got what we want */ + dev->set_slot(slot); /* set currently loaded slot */ + bstrncpy(dev->LoadedVolName, new_vol_name, sizeof(dev->LoadedVolName)); + } + Dmsg1(dbglvl, "After changer, status=%d\n", status); + if (status == 0) { /* did we succeed? */ + rtn_stat = 1; /* tape loaded by changer */ + } + } + free_pool_memory(changer); + return rtn_stat; + +bail_out: + free_pool_memory(changer); + return -1; + +} + +/* + * Returns: -1 if error from changer command + * slot otherwise + * Note, this is safe to do without releasing the drive + * since it does not attempt load/unload a slot. + */ +int get_autochanger_loaded_slot(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + DEVICE *dev = dcr->dev; + int status, loaded; + uint32_t timeout = dcr->device->max_changer_wait; + int drive = dcr->dev->drive_index; + POOL_MEM results(PM_MESSAGE); + POOLMEM *changer; + + if (!dev->is_autochanger()) { + return -1; + } + if (!dcr->device->changer_command) { + return -1; + } + + if (dev->get_slot() > 0 && dev->has_cap(CAP_ALWAYSOPEN)) { + Dmsg1(dbglvl, "Return cached slot=%d\n", dev->get_slot()); + return dev->get_slot(); + } + + /* Virtual disk autochanger */ + if (dcr->is_virtual_autochanger()) { + return 1; + } + + /* Find out what is loaded, zero means device is unloaded */ + changer = get_pool_memory(PM_FNAME); + lock_changer(dcr); + /* Suppress info when polling */ + if (!dev->poll && chk_dbglvl(1)) { + Jmsg(jcr, M_INFO, 0, _("3301 Issuing autochanger \"loaded? drive %d\" command.\n"), + drive); + } + changer = edit_device_codes(dcr, changer, dcr->device->changer_command, "loaded"); + Dmsg1(dbglvl, "Run program=%s\n", changer); + status = run_program_full_output(changer, timeout, results.addr()); + Dmsg3(dbglvl, "run_prog: %s stat=%d result=%s", changer, status, results.c_str()); + if (status == 0) { + loaded = str_to_int32(results.c_str()); + if (loaded > 0) { + /* Suppress info when polling */ + if (!dev->poll && chk_dbglvl(1)) { + Jmsg(jcr, M_INFO, 0, _("3302 Autochanger \"loaded? drive %d\", result is Slot %d.\n"), + drive, loaded); + } + dev->set_slot(loaded); + } else { + /* Suppress info when polling */ + if (!dev->poll && chk_dbglvl(1)) { + Jmsg(jcr, M_INFO, 0, _("3302 Autochanger \"loaded? drive %d\", result: nothing loaded.\n"), + drive); + } + if (loaded == 0) { /* no slot loaded */ + dev->set_slot(0); + } else { /* probably some error */ + dev->clear_slot(); /* unknown */ + } + } + } else { + berrno be; + be.set_errno(status); + Jmsg(jcr, M_INFO, 0, _("3991 Bad autochanger \"loaded? drive %d\" command: " + "ERR=%s.\nResults=%s\n"), drive, be.bstrerror(), results.c_str()); + Dmsg3(dbglvl, "Error: autochanger loaded? drive %d " + "ERR=%s.\nResults=%s\n", drive, be.bstrerror(), results.c_str()); + loaded = -1; /* force unload */ + dev->clear_slot(); /* slot unknown */ + } + unlock_changer(dcr); + free_pool_memory(changer); + return loaded; +} + +static void lock_changer(DCR *dcr) +{ + AUTOCHANGER *changer_res = dcr->device->changer_res; + if (changer_res) { + int errstat; + Dmsg1(dbglvl, "Locking changer %s\n", changer_res->hdr.name); + if ((errstat=rwl_writelock(&changer_res->changer_lock)) != 0) { + berrno be; + Jmsg(dcr->jcr, M_ERROR_TERM, 0, _("Lock failure on autochanger. ERR=%s\n"), + be.bstrerror(errstat)); + } + } +} + +static void unlock_changer(DCR *dcr) +{ + AUTOCHANGER *changer_res = dcr->device->changer_res; + if (changer_res) { + int errstat; + Dmsg1(dbglvl, "Unlocking changer %s\n", changer_res->hdr.name); + if ((errstat=rwl_writeunlock(&changer_res->changer_lock)) != 0) { + berrno be; + Jmsg(dcr->jcr, M_ERROR_TERM, 0, _("Unlock failure on autochanger. ERR=%s\n"), + be.bstrerror(errstat)); + } + } +} + +/* + * Unload the volume, if any, in this drive + * On entry: loaded == 0 -- nothing to do + * loaded < 0 -- check if anything to do + * loaded > 0 -- load slot == loaded + */ +bool unload_autochanger(DCR *dcr, int loaded) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + const char *old_vol_name; + int slot; + uint32_t timeout = dcr->device->max_changer_wait; + bool ok = true; + + if (loaded == 0) { + return true; + } + + if (!dev->is_autochanger() || !dcr->device->changer_name || + !dcr->device->changer_command) { + return false; + } + + /* Virtual disk autochanger */ + if (dcr->is_virtual_autochanger()) { + dev->clear_unload(); + return true; + } + + lock_changer(dcr); + if (dev->LoadedVolName[0]) { + old_vol_name = dev->LoadedVolName; + } else { + old_vol_name = "*Unknown*"; + } + if (loaded < 0) { + loaded = get_autochanger_loaded_slot(dcr); + if (loaded < 0) { /* try again, maybe autochanger error */ + loaded = get_autochanger_loaded_slot(dcr); + } + } + + if (loaded > 0) { + POOL_MEM results(PM_MESSAGE); + POOLMEM *changer = get_pool_memory(PM_FNAME); + Jmsg(jcr, M_INFO, 0, + _("3307 Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" command.\n"), + old_vol_name, loaded, dev->drive_index); + Dmsg3(dbglvl, + "3307 Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" command.\n", + old_vol_name, loaded, dev->drive_index); + slot = dcr->VolCatInfo.Slot; + dcr->VolCatInfo.Slot = loaded; + changer = edit_device_codes(dcr, changer, + dcr->device->changer_command, "unload"); + dev->close(dcr); + Dmsg1(dbglvl, "Run program=%s\n", changer); + int stat = run_program_full_output(changer, timeout, results.addr()); + dcr->VolCatInfo.Slot = slot; + if (stat != 0) { + berrno be; + be.set_errno(stat); + Jmsg(jcr, M_INFO, 0, _("3995 Bad autochanger \"unload Volume %s, Slot %d, Drive %d\": " + "ERR=%s\nResults=%s\n"), + old_vol_name, loaded, dev->drive_index, be.bstrerror(), results.c_str()); + Dmsg5(dbglvl, "Error: unload Volume %s, Slot %d, Drive %d, bad stats=%s.\nResults=%s\n", + old_vol_name, loaded, dev->drive_index, + be.bstrerror(), results.c_str()); + ok = false; + dev->clear_slot(); /* unknown */ + } else { + dev->set_slot(0); /* unload is OK, mark nothing loaded */ + dev->clear_unload(); + dev->LoadedVolName[0] = 0; /* clear loaded volume name */ + } + free_pool_memory(changer); + } + unlock_changer(dcr); + + if (ok) { + free_volume(dev); + } + return ok; +} + +/* + * Unload the slot if mounted in a different drive + */ +static bool unload_other_drive(DCR *dcr, int slot, bool writing) +{ + DEVICE *dev = NULL; + DEVICE *dev_save; + bool found = false; + AUTOCHANGER *changer = dcr->dev->device->changer_res; + DEVRES *device; + int retries = 0; /* wait for device retries */ + int loaded; + int i; + + if (!changer || !changer->device) { + return false; + } + if (changer->device->size() == 1) { + return true; + } + + /* + * We look for the slot number corresponding to the tape + * we want in other drives, and if possible, unload + * it. + */ + Dmsg1(dbglvl, "Begin wiffle through devices looking for slot=%d\n", slot); + /* + * foreach_alist(device, changer->device) { + * + * The above fails to loop through all devices. It is + * probably a compiler bug. + */ + for (i=0; i < changer->device->size(); i++) { + device = (DEVRES *)changer->device->get(i); + dev = device->dev; + if (!dev) { + Dmsg0(dbglvl, "No dev attached to device\n"); + continue; + } + + dev_save = dcr->dev; + dcr->set_dev(dev); + loaded = get_autochanger_loaded_slot(dcr); + dcr->set_dev(dev_save); + + if (loaded > 0) { + Dmsg4(dbglvl, "Want slot=%d, drive=%d loaded=%d dev=%s\n", + slot, dev->drive_index, loaded, dev->print_name()); + if (loaded == slot) { + found = true; + break; + } + } else { + Dmsg4(dbglvl, "After slot=%d drive=%d loaded=%d dev=%s\n", + slot, dev->drive_index, loaded, dev->print_name()); + } + } + Dmsg1(dbglvl, "End wiffle through devices looking for slot=%d\n", slot); + if (!found) { + Dmsg1(dbglvl, "Slot=%d not found in another device\n", slot); + return true; + } else { + Dmsg3(dbglvl, "Slot=%d drive=%d found in dev=%s\n", slot, dev->drive_index, dev->print_name()); + } + + /* + * The Volume we want is on another device. + * If we want the Volume to read it, and the other device where the + * Volume is currently is not open, we simply unload the Volume then + * then the subsequent code will load it in the desired drive. + * If want to write or the device is open, we attempt to wait for + * the Volume to become available. + */ + if (writing || dev->is_open()) { + if (dev->is_busy()) { + Dmsg4(dbglvl, "Vol %s for dev=%s in use dev=%s slot=%d\n", + dcr->VolumeName, dcr->dev->print_name(), + dev->print_name(), slot); + } + for (int i=0; i < 3; i++) { + if (dev->is_busy()) { + Dmsg0(40, "Device is busy. Calling wait_for_device()\n"); + wait_for_device(dcr, retries); + continue; + } + break; + } + if (dev->is_busy()) { + Jmsg(dcr->jcr, M_WARNING, 0, _("Volume \"%s\" wanted on %s is in use by device %s\n"), + dcr->VolumeName, dcr->dev->print_name(), dev->print_name()); + Dmsg4(dbglvl, "Vol %s for dev=%s is busy dev=%s slot=%d\n", + dcr->VolumeName, dcr->dev->print_name(), dev->print_name(), dev->get_slot()); + Dmsg2(dbglvl, "num_writ=%d reserv=%d\n", dev->num_writers, dev->num_reserved()); + volume_unused(dcr); + return false; + } + } + return unload_dev(dcr, dev); +} + +/* + * Unconditionally unload a specified drive + */ +bool unload_dev(DCR *dcr, DEVICE *dev) +{ + JCR *jcr = dcr->jcr; + bool ok = true; + uint32_t timeout = dcr->device->max_changer_wait; + AUTOCHANGER *changer = dcr->dev->device->changer_res; + const char *old_vol_name = dcr->VolumeName; + DEVICE *save_dev; + int save_slot; + + if (!changer) { + return false; + } + + save_dev = dcr->dev; /* save dcr device */ + dcr->set_dev(dev); /* temporarily point dcr at other device */ + + get_autochanger_loaded_slot(dcr); + + /* Fail if we have no slot to unload */ + if (dev->get_slot() <= 0) { + if (dev->get_slot() < 0) { + Dmsg1(dbglvl, "Cannot unload, slot not defined. dev=%s\n", + dev->print_name()); + } + dcr->set_dev(save_dev); + return false; + } + + save_slot = dcr->VolCatInfo.Slot; + dcr->VolCatInfo.Slot = dev->get_slot(); + + POOLMEM *changer_cmd = get_pool_memory(PM_FNAME); + POOL_MEM results(PM_MESSAGE); + if (old_vol_name[0] == 0) { + if (dev->LoadedVolName[0]) { + old_vol_name = dev->LoadedVolName; + } else { + old_vol_name = "*Unknown*"; + } + } + lock_changer(dcr); + Jmsg(jcr, M_INFO, 0, + _("3307 Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" command.\n"), + old_vol_name, dev->get_slot(), dev->drive_index); + Dmsg3(0/*dbglvl*/, "Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" command.\n", + old_vol_name, dev->get_slot(), dev->drive_index); + + changer_cmd = edit_device_codes(dcr, changer_cmd, + dcr->device->changer_command, "unload"); + dev->close(dcr); + Dmsg2(dbglvl, "close dev=%s reserve=%d\n", dev->print_name(), + dev->num_reserved()); + Dmsg1(dbglvl, "Run program=%s\n", changer_cmd); + int stat = run_program_full_output(changer_cmd, timeout, results.addr()); + dcr->VolCatInfo.Slot = save_slot; + if (stat != 0) { + berrno be; + be.set_errno(stat); + Jmsg(jcr, M_INFO, 0, _("3997 Bad autochanger \"unload Volume %s, Slot %d, Drive %d\": ERR=%s.\n"), + old_vol_name, dev->get_slot(), dev->drive_index, be.bstrerror()); + Dmsg5(dbglvl, "Error: unload Volume %s, Slot %d, Drive %d bad stats=%s.\nResults=%s\n", + old_vol_name, dev->get_slot(), dev->drive_index, + be.bstrerror(), results.c_str()); + ok = false; + dev->clear_slot(); /* unknown */ + } else { + Dmsg3(dbglvl, "Volume %s, Slot %d unloaded %s\n", + old_vol_name, dev->get_slot(), dev->print_name()); + dev->set_slot(0); /* unload OK, mark nothing loaded */ + dev->clear_unload(); + dev->LoadedVolName[0] = 0; + } + unlock_changer(dcr); + + if (ok) { + free_volume(dev); + } + dcr->set_dev(save_dev); + free_pool_memory(changer_cmd); + return ok; +} + + + +/* + * List the Volumes that are in the autoloader possibly + * with their barcodes. + * We assume that it is always the Console that is calling us. + */ +bool autochanger_cmd(DCR *dcr, BSOCK *dir, const char *cmd) +{ + DEVICE *dev = dcr->dev; + uint32_t timeout = dcr->device->max_changer_wait; + POOLMEM *changer; + BPIPE *bpipe; + int len = sizeof_pool_memory(dir->msg) - 1; + int stat; + + if (!dev->is_autochanger() || !dcr->device->changer_name || + !dcr->device->changer_command) { + if (strcasecmp(cmd, "drives") == 0) { + dir->fsend("drives=1\n"); + } + dir->fsend(_("3993 Device %s not an autochanger device.\n"), + dev->print_name()); + return false; + } + + if (strcasecmp(cmd, "drives") == 0) { + AUTOCHANGER *changer_res = dcr->device->changer_res; + int drives = 1; + if (changer_res && changer_res->device) { + drives = changer_res->device->size(); + } + dir->fsend("drives=%d\n", drives); + Dmsg1(dbglvl, "drives=%d\n", drives); + return true; + } + + /* If listing, reprobe changer */ + if (bstrcasecmp(cmd, "list") || bstrcasecmp(cmd, "listall")) { + dcr->dev->set_slot(0); + get_autochanger_loaded_slot(dcr); + } + + changer = get_pool_memory(PM_FNAME); + lock_changer(dcr); + /* Now issue the command */ + changer = edit_device_codes(dcr, changer, + dcr->device->changer_command, cmd); + dir->fsend(_("3306 Issuing autochanger \"%s\" command.\n"), cmd); + bpipe = open_bpipe(changer, timeout, "r"); + if (!bpipe) { + dir->fsend(_("3996 Open bpipe failed.\n")); + goto bail_out; /* TODO: check if we need to return false */ + } + if (bstrcasecmp(cmd, "list") || bstrcasecmp(cmd, "listall")) { + /* Get output from changer */ + while (fgets(dir->msg, len, bpipe->rfd)) { + dir->msglen = strlen(dir->msg); + Dmsg1(dbglvl, "msg); + dir->send(); + } + } else if (strcasecmp(cmd, "slots") == 0 ) { + char buf[100], *p; + /* For slots command, read a single line */ + buf[0] = 0; + fgets(buf, sizeof(buf)-1, bpipe->rfd); + buf[sizeof(buf)-1] = 0; + /* Strip any leading space in front of # of slots */ + for (p=buf; B_ISSPACE(*p); p++) + { } + dir->fsend("slots=%s", p); + Dmsg1(dbglvl, "msg); + } + + stat = close_bpipe(bpipe); + if (stat != 0) { + berrno be; + be.set_errno(stat); + dir->fsend(_("Autochanger error: ERR=%s\n"), be.bstrerror()); + } + +bail_out: + unlock_changer(dcr); + free_pool_memory(changer); + return true; +} + + +/* + * Edit codes into ChangerCommand + * %% = % + * %a = archive device name + * %c = changer device name + * %d = changer drive index + * %f = Client's name + * %j = Job name + * %l = archive control channel name + * %o = command + * %s = Slot base 0 + * %S = Slot base 1 + * %v = Volume name + * + * + * omsg = edited output message + * imsg = input string containing edit codes (%x) + * cmd = command string (load, unload, ...) + * + */ +char *edit_device_codes(DCR *dcr, char *omsg, const char *imsg, const char *cmd) +{ + const char *p; + const char *str; + char add[20]; + + *omsg = 0; + Dmsg1(1800, "edit_device_codes: %s\n", imsg); + for (p=imsg; *p; p++) { + if (*p == '%') { + switch (*++p) { + case '%': + str = "%"; + break; + case 'a': + str = dcr->dev->archive_name(); + break; + case 'c': + str = NPRT(dcr->device->changer_name); + break; + case 'l': + str = NPRT(dcr->device->control_name); + break; + case 'd': + sprintf(add, "%d", dcr->dev->drive_index); + str = add; + break; + case 'o': + str = NPRT(cmd); + break; + case 's': + sprintf(add, "%d", dcr->VolCatInfo.Slot - 1); + str = add; + break; + case 'S': + sprintf(add, "%d", dcr->VolCatInfo.Slot); + str = add; + break; + case 'j': /* Job name */ + str = dcr->jcr->Job; + break; + case 'v': + if (dcr->dev->LoadedVolName[0]) { + str = dcr->dev->LoadedVolName; + } else if (dcr->VolCatInfo.VolCatName[0]) { + str = dcr->VolCatInfo.VolCatName; + } else if (dcr->VolumeName[0]) { + str = dcr->VolumeName; + } else if (dcr->dev->vol && dcr->dev->vol->vol_name) { + str = dcr->dev->vol->vol_name; + } else { + str = dcr->dev->VolHdr.VolumeName; + } + break; + case 'f': + str = NPRT(dcr->jcr->client_name); + break; + + default: + add[0] = '%'; + add[1] = *p; + add[2] = 0; + str = add; + break; + } + } else { + add[0] = *p; + add[1] = 0; + str = add; + } + Dmsg1(1900, "add_str %s\n", str); + pm_strcat(&omsg, (char *)str); + Dmsg1(1800, "omsg=%s\n", omsg); + } + Dmsg1(800, "omsg=%s\n", omsg); + return omsg; +} diff --git a/src/stored/bacula-sd.conf.in b/src/stored/bacula-sd.conf.in new file mode 100644 index 00000000..1c1d4a40 --- /dev/null +++ b/src/stored/bacula-sd.conf.in @@ -0,0 +1,334 @@ +# +# Default Bacula Storage Daemon Configuration file +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ @DISTVER@ +# +# You may need to change the name of your tape drive +# on the "Archive Device" directive in the Device +# resource. If you change the Name and/or the +# "Media Type" in the Device resource, please ensure +# that dird.conf has corresponding changes. +# +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +Storage { # definition of myself + Name = @basename@-sd + SDPort = @sd_port@ # Director's port + WorkingDirectory = "@working_dir@" + Pid Directory = "@piddir@" + Plugin Directory = "@plugindir@" + Maximum Concurrent Jobs = 20 +} + +# +# List Directors who are permitted to contact Storage daemon +# +Director { + Name = @basename@-dir + Password = "@sd_password@" +} + +# +# Restricted Director, used by tray-monitor to get the +# status of the storage daemon +# +Director { + Name = @basename@-mon + Password = "@mon_sd_password@" + Monitor = yes +} + +# +# Note, for a list of additional Device templates please +# see the directory /examples/devices +# Or follow the following link: +# http://www.bacula.org/git/cgit.cgi/bacula/tree/bacula/examples/devices?h=Branch-7.4 +# + +# +# Devices supported by this Storage daemon +# To connect, the Director's bacula-dir.conf must have the +# same Name and MediaType. +# + +# +# Define a Virtual autochanger +# +Autochanger { + Name = FileChgr1 + Device = FileChgr1-Dev1, FileChgr1-Dev2 + Changer Command = "" + Changer Device = /dev/null +} + +Device { + Name = FileChgr1-Dev1 + Media Type = File1 + Archive Device = @archivedir@ + LabelMedia = yes; # lets Bacula label unlabeled media + Random Access = Yes; + AutomaticMount = yes; # when device opened, read it + RemovableMedia = no; + AlwaysOpen = no; + Maximum Concurrent Jobs = 5 +} + +Device { + Name = FileChgr1-Dev2 + Media Type = File1 + Archive Device = @archivedir@ + LabelMedia = yes; # lets Bacula label unlabeled media + Random Access = Yes; + AutomaticMount = yes; # when device opened, read it + RemovableMedia = no; + AlwaysOpen = no; + Maximum Concurrent Jobs = 5 +} + +# +# Define a second Virtual autochanger +# +Autochanger { + Name = FileChgr2 + Device = FileChgr2-Dev1, FileChgr2-Dev2 + Changer Command = "" + Changer Device = /dev/null +} + +Device { + Name = FileChgr2-Dev1 + Media Type = File2 + Archive Device = @archivedir@ + LabelMedia = yes; # lets Bacula label unlabeled media + Random Access = Yes; + AutomaticMount = yes; # when device opened, read it + RemovableMedia = no; + AlwaysOpen = no; + Maximum Concurrent Jobs = 5 +} + +Device { + Name = FileChgr2-Dev2 + Media Type = File2 + Archive Device = @archivedir@ + LabelMedia = yes; # lets Bacula label unlabeled media + Random Access = Yes; + AutomaticMount = yes; # when device opened, read it + RemovableMedia = no; + AlwaysOpen = no; + Maximum Concurrent Jobs = 5 +} + + + +# +# An autochanger device with two drives +# +#Autochanger { +# Name = Autochanger +# Device = Drive-1 +# Device = Drive-2 +# Changer Command = "@scriptdir@/mtx-changer %c %o %S %a %d" +# Changer Device = /dev/sg0 +#} + +#Device { +# Name = Drive-1 # +# Drive Index = 0 +# Media Type = DLT-8000 +# Archive Device = /dev/nst0 +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = yes; +# RemovableMedia = yes; +# RandomAccess = no; +# AutoChanger = yes +# # +# # New alert command in Bacula 9.0.0 +# # Note: you must have the sg3_utils (rpms) or the +# # sg3-utils (deb) installed on your system. +# # and you must set the correct control device that +# # corresponds to the Archive Device +# Control Device = /dev/sg?? # must be SCSI ctl for /dev/nst0 +# Alert Command = "@scriptdir@/tapealert %l" +# +# # +# # Enable the Alert command only if you have the mtx package loaded +# # Note, apparently on some systems, tapeinfo resets the SCSI controller +# # thus if you turn this on, make sure it does not reset your SCSI +# # controller. I have never had any problems, and smartctl does +# # not seem to cause such problems. +# # +# Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +# If you have smartctl, enable this, it has more info than tapeinfo +# Alert Command = "sh -c 'smartctl -H -l error %c'" +#} + +#Device { +# Name = Drive-2 # +# Drive Index = 1 +# Media Type = DLT-8000 +# Archive Device = /dev/nst1 +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = yes; +# RemovableMedia = yes; +# RandomAccess = no; +# AutoChanger = yes +# # Enable the Alert command only if you have the mtx package loaded +# Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +# If you have smartctl, enable this, it has more info than tapeinfo +# Alert Command = "sh -c 'smartctl -H -l error %c'" +#} + +# +# A Linux or Solaris LTO-2 tape drive +# +#Device { +# Name = LTO-2 +# Media Type = LTO-2 +# Archive Device = @TAPEDRIVE@ +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = yes; +# RemovableMedia = yes; +# RandomAccess = no; +# Maximum File Size = 3GB +## Changer Command = "@scriptdir@/mtx-changer %c %o %S %a %d" +## Changer Device = /dev/sg0 +## AutoChanger = yes +# # Enable the Alert command only if you have the mtx package loaded +## Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +## If you have smartctl, enable this, it has more info than tapeinfo +## Alert Command = "sh -c 'smartctl -H -l error %c'" +#} + +# +# A Linux or Solaris LTO-3 tape drive +# +#Device { +# Name = LTO-3 +# Media Type = LTO-3 +# Archive Device = @TAPEDRIVE@ +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = yes; +# RemovableMedia = yes; +# RandomAccess = no; +# Maximum File Size = 4GB +# Changer Command = "@scriptdir@/mtx-changer %c %o %S %a %d" +# Changer Device = /dev/sg0 +# AutoChanger = yes +# # +# # New alert command in Bacula 9.0.0 +# # Note: you must have the sg3_utils (rpms) or the +# # sg3-utils (deb) installed on your system. +# # and you must set the correct control device that +# # corresponds to the Archive Device +# Control Device = /dev/sg?? # must be SCSI ctl for @TAPEDRIVE@ +# Alert Command = "@scriptdir@/tapealert %l" +# +# # Enable the Alert command only if you have the mtx package loaded +## Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +## If you have smartctl, enable this, it has more info than tapeinfo +## Alert Command = "sh -c 'smartctl -H -l error %c'" +#} + +# +# A Linux or Solaris LTO-4 tape drive +# +#Device { +# Name = LTO-4 +# Media Type = LTO-4 +# Archive Device = @TAPEDRIVE@ +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = yes; +# RemovableMedia = yes; +# RandomAccess = no; +# Maximum File Size = 5GB +# Changer Command = "@scriptdir@/mtx-changer %c %o %S %a %d" +# Changer Device = /dev/sg0 +# AutoChanger = yes +# # +# # New alert command in Bacula 9.0.0 +# # Note: you must have the sg3_utils (rpms) or the +# # sg3-utils (deb) installed on your system. +# # and you must set the correct control device that +# # corresponds to the Archive Device +# Control Device = /dev/sg?? # must be SCSI ctl for @TAPEDRIVE@ +# Alert Command = "@scriptdir@/tapealert %l" +# +# # Enable the Alert command only if you have the mtx package loaded +## Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +## If you have smartctl, enable this, it has more info than tapeinfo +## Alert Command = "sh -c 'smartctl -H -l error %c'" +#} + +# +# An HP-UX tape drive +# +#Device { +# Name = Drive-1 # +# Drive Index = 0 +# Media Type = DLT-8000 +# Archive Device = /dev/rmt/1mnb +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = yes; +# RemovableMedia = yes; +# RandomAccess = no; +# AutoChanger = no +# Two EOF = yes +# Hardware End of Medium = no +# Fast Forward Space File = no +# # +# # New alert command in Bacula 9.0.0 +# # Note: you must have the sg3_utils (rpms) or the +# # sg3-utils (deb) installed on your system. +# # and you must set the correct control device that +# # corresponds to the Archive Device +# Control Device = /dev/sg?? # must be SCSI ctl for /dev/rmt/1mnb +# Alert Command = "@scriptdir@/tapealert %l" +# +# # +# # Enable the Alert command only if you have the mtx package loaded +# Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +# If you have smartctl, enable this, it has more info than tapeinfo +# Alert Command = "sh -c 'smartctl -H -l error %c'" +#} + +# +# A FreeBSD tape drive +# +#Device { +# Name = DDS-4 +# Description = "DDS-4 for FreeBSD" +# Media Type = DDS-4 +# Archive Device = /dev/nsa1 +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = yes +# Offline On Unmount = no +# Hardware End of Medium = no +# BSF at EOM = yes +# Backward Space Record = no +# Fast Forward Space File = no +# TWO EOF = yes +# # +# # New alert command in Bacula 9.0.0 +# # Note: you must have the sg3_utils (rpms) or the +# # sg3-utils (deb) installed on your system. +# # and you must set the correct control device that +# # corresponds to the Archive Device +# Control Device = /dev/sg?? # must be SCSI ctl for /dev/nsa1 +# Alert Command = "@scriptdir@/tapealert %l" +# +# If you have smartctl, enable this, it has more info than tapeinfo +# Alert Command = "sh -c 'smartctl -H -l error %c'" +#} + +# +# Send all messages to the Director, +# mount messages also are sent to the email address +# +Messages { + Name = Standard + director = @basename@-dir = all +} diff --git a/src/stored/bcopy.c b/src/stored/bcopy.c new file mode 100644 index 00000000..a2684066 --- /dev/null +++ b/src/stored/bcopy.c @@ -0,0 +1,348 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Program to copy a Bacula from one volume to another. + * + * Kern E. Sibbald, October 2002 + */ + +#include "bacula.h" +#include "stored.h" + +extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); + +/* Forward referenced functions */ +static void get_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec); +static bool record_cb(DCR *dcr, DEV_RECORD *rec); + + +/* Global variables */ +static DEVICE *in_dev = NULL; +static DEVICE *out_dev = NULL; +static JCR *in_jcr; /* input jcr */ +static JCR *out_jcr; /* output jcr */ +static BSR *bsr = NULL; +static const char *wd = "/tmp"; +static bool list_records = false; +static uint32_t records = 0; +static uint32_t jobs = 0; +static DEV_BLOCK *out_block; +static SESSION_LABEL sessrec; + +static CONFIG *config; +#define CONFIG_FILE "bacula-sd.conf" + +void *start_heap; +char *configfile = NULL; + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n\n" +"Usage: bcopy [-d debug_level] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify a Storage configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -i specify input Volume names (separated by |)\n" +" -o specify output Volume names (separated by |)\n" +" -p proceed inspite of errors\n" +" -v verbose\n" +" -w specify working directory (default /tmp)\n" +" -? print this message\n\n"), 2002, "", VERSION, BDATE); + exit(1); +} + +int main (int argc, char *argv[]) +{ + int ch; + char *iVolumeName = NULL; + char *oVolumeName = NULL; + bool ignore_label_errors = false; + bool ok; + BtoolsAskDirHandler askdir_handler; + + init_askdir_handler(&askdir_handler); + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + + my_name_is(argc, argv, "bcopy"); + lmgr_init_thread(); + init_msg(NULL, NULL); + + while ((ch = getopt(argc, argv, "b:c:d:i:o:pvw:?")) != -1) { + switch (ch) { + case 'b': + bsr = parse_bsr(NULL, optarg); + break; + + case 'c': /* specify config file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 'i': /* input Volume name */ + iVolumeName = optarg; + break; + + case 'o': /* output Volume name */ + oVolumeName = optarg; + break; + + case 'p': + ignore_label_errors = true; + forge_on = true; + break; + + case 'v': + verbose++; + break; + + case 'w': + wd = optarg; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (argc != 2) { + Pmsg0(0, _("Wrong number of arguments: \n")); + usage(); + } + + OSDependentInit(); + + working_directory = wd; + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + config = New(CONFIG()); + parse_sd_config(config, configfile, M_ERROR_TERM); + setup_me(); + load_sd_plugins(me->plugin_directory); + + /* Setup and acquire input device for reading */ + Dmsg0(100, "About to setup input jcr\n"); + in_jcr = setup_jcr("bcopy", argv[0], bsr, iVolumeName, SD_READ, true/*read dedup data*/); /* read device */ + if (!in_jcr) { + exit(1); + } + in_jcr->ignore_label_errors = ignore_label_errors; + in_dev = in_jcr->dcr->dev; + if (!in_dev) { + exit(1); + } + + /* Setup output device for writing */ + Dmsg0(100, "About to setup output jcr\n"); + out_jcr = setup_jcr("bcopy", argv[1], bsr, oVolumeName, SD_APPEND); /* no acquire */ + if (!out_jcr) { + exit(1); + } + out_dev = out_jcr->dcr->dev; + if (!out_dev) { + exit(1); + } + Dmsg0(100, "About to acquire device for writing\n"); + /* For we must now acquire the device for writing */ + out_dev->rLock(false); + if (!out_dev->open_device(out_jcr->dcr, OPEN_READ_WRITE)) { + Emsg1(M_FATAL, 0, _("dev open failed: %s\n"), out_dev->errmsg); + out_dev->Unlock(); + exit(1); + } + out_dev->Unlock(); + if (!acquire_device_for_append(out_jcr->dcr)) { + free_jcr(in_jcr); + exit(1); + } + out_block = out_jcr->dcr->block; + + ok = read_records(in_jcr->dcr, record_cb, mount_next_read_volume); + + if (ok || out_dev->can_write()) { + if (!out_jcr->dcr->write_final_block_to_device()) { + Pmsg0(000, _("Write of last block failed.\n")); + } + } + + Pmsg2(000, _("%u Jobs copied. %u records copied.\n"), jobs, records); + + free_jcr(in_jcr); + free_jcr(out_jcr); + + in_dev->term(NULL); + out_dev->term(NULL); + + return 0; +} + + +/* + * read_records() calls back here for each record it gets + */ +static bool record_cb(DCR *in_dcr, DEV_RECORD *rec) +{ + if (list_records) { + Pmsg5(000, _("Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n"), + rec->VolSessionId, rec->VolSessionTime, rec->FileIndex, + rec->Stream, rec->data_len); + } + /* + * Check for Start or End of Session Record + * + */ + if (rec->FileIndex < 0) { + get_session_record(in_dcr->dev, rec, &sessrec); + + if (verbose > 1) { + dump_label_record(in_dcr->dev, rec, 1/*verbose*/, false/*check err*/); + } + switch (rec->FileIndex) { + case PRE_LABEL: + Pmsg0(000, _("Volume is prelabeled. This volume cannot be copied.\n")); + return false; + case VOL_LABEL: + Pmsg0(000, _("Volume label not copied.\n")); + return true; + case SOS_LABEL: + if (bsr && rec->match_stat < 1) { + /* Skipping record, because does not match BSR filter */ + if (verbose) { + Pmsg0(-1, _("Copy skipped. Record does not match BSR filter.\n")); + } + } else { + jobs++; + } + break; + case EOS_LABEL: + if (bsr && rec->match_stat < 1) { + /* Skipping record, because does not match BSR filter */ + return true; + } + while (!write_record_to_block(out_jcr->dcr, rec)) { + Dmsg2(150, "!write_record_to_block data_len=%d rem=%d\n", rec->data_len, + rec->remainder); + if (!out_jcr->dcr->write_block_to_device()) { + Dmsg2(90, "Got write_block_to_dev error on device %s: ERR=%s\n", + out_dev->print_name(), out_dev->bstrerror()); + Jmsg(out_jcr, M_FATAL, 0, _("Cannot fixup device error. %s\n"), + out_dev->bstrerror()); + return false; + } + } + if (!out_jcr->dcr->write_block_to_device()) { + Dmsg2(90, "Got write_block_to_dev error on device %s: ERR=%s\n", + out_dev->print_name(), out_dev->bstrerror()); + Jmsg(out_jcr, M_FATAL, 0, _("Cannot fixup device error. %s\n"), + out_dev->bstrerror()); + return false; + } + return true; + case EOM_LABEL: + Pmsg0(000, _("EOM label not copied.\n")); + return true; + case EOT_LABEL: /* end of all tapes */ + Pmsg0(000, _("EOT label not copied.\n")); + return true; + default: + return true; + } + } + + /* Write record */ + if (bsr && rec->match_stat < 1) { + /* Skipping record, because does not match BSR filter */ + return true; + } + records++; + while (!write_record_to_block(out_jcr->dcr, rec)) { + Dmsg2(150, "!write_record_to_block data_len=%d rem=%d\n", rec->data_len, + rec->remainder); + if (!out_jcr->dcr->write_block_to_device()) { + Dmsg2(90, "Got write_block_to_dev error on device %s: ERR=%s\n", + out_dev->print_name(), out_dev->bstrerror()); + Jmsg(out_jcr, M_FATAL, 0, _("Cannot fixup device error. %s\n"), + out_dev->bstrerror()); + return false; + } + } + return true; +} + +static void get_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec) +{ + const char *rtype; + memset(sessrec, 0, sizeof(SESSION_LABEL)); + switch (rec->FileIndex) { + case PRE_LABEL: + rtype = _("Fresh Volume Label"); + break; + case VOL_LABEL: + rtype = _("Volume Label"); + unser_volume_label(dev, rec); + break; + case SOS_LABEL: + rtype = _("Begin Job Session"); + unser_session_label(sessrec, rec); + break; + case EOS_LABEL: + rtype = _("End Job Session"); + unser_session_label(sessrec, rec); + break; + case 0: + case EOM_LABEL: + rtype = _("End of Medium"); + break; + default: + rtype = _("Unknown"); + break; + } + Dmsg5(10, "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n", + rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); + if (verbose) { + Pmsg5(-1, _("%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n"), + rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); + } +} diff --git a/src/stored/bextract.c b/src/stored/bextract.c new file mode 100644 index 00000000..2a3dd25a --- /dev/null +++ b/src/stored/bextract.c @@ -0,0 +1,670 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Dumb program to extract files from a Bacula backup. + * + * Kern E. Sibbald, MM + */ + +#include "bacula.h" +#include "stored.h" +#include "ch.h" +#include "findlib/find.h" + +#ifdef HAVE_LZO +#include +#include +#endif + +extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); + +static void do_extract(char *fname); +static bool record_cb(DCR *dcr, DEV_RECORD *rec); + +static DEVICE *dev = NULL; +static DCR *dcr; +static BFILE bfd; +static JCR *jcr; +static FF_PKT *ff; +static BSR *bsr = NULL; +static bool extract = false; +static int non_support_data = 0; +static long total = 0; +static ATTR *attr; +static POOLMEM *curr_fname; +static char *where; +static uint64_t num_errors = 0; +static uint64_t num_records = 0; +static uint32_t num_files = 0; +static uint32_t compress_buf_size = 70000; +static POOLMEM *compress_buf; +static int prog_name_msg = 0; +static int win32_data_msg = 0; +static char *VolumeName = NULL; + +static char *wbuf; /* write buffer address */ +static uint32_t wsize; /* write size */ +static uint64_t fileAddr = 0; /* file write address */ + +static CONFIG *config; +#define CONFIG_FILE "bacula-sd.conf" + +void *start_heap; +char *configfile = NULL; +bool skip_extract = false; + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n\n" +"Usage: bextract \n" +" -b specify a bootstrap file\n" +" -c specify a Storage configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -T send debug traces to trace file (stored in /tmp)\n" +" -e exclude list\n" +" -i include list\n" +" -p proceed inspite of I/O errors\n" +" -t read data from volume, do not write anything\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n\n"), 2000, "", VERSION, BDATE); + exit(1); +} + + +int main (int argc, char *argv[]) +{ + int ch; + FILE *fd; + char line[1000]; + bool got_inc = false; + BtoolsAskDirHandler askdir_handler; + + init_askdir_handler(&askdir_handler); + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + lmgr_init_thread(); + + working_directory = "/tmp"; + my_name_is(argc, argv, "bextract"); + init_msg(NULL, NULL); /* setup message handler */ + + OSDependentInit(); + + ff = init_find_files(); + binit(&bfd); + + while ((ch = getopt(argc, argv, "Ttb:c:d:e:i:pvV:?")) != -1) { + switch (ch) { + case 't': + skip_extract = true; + break; + + case 'b': /* bootstrap file */ + bsr = parse_bsr(NULL, optarg); + break; + + case 'T': /* Send debug to trace file */ + set_trace(1); + break; + + case 'c': /* specify config file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + char *p; + /* We probably find a tag list -d 10,sql,bvfs */ + if ((p = strchr(optarg, ',')) != NULL) { + *p = 0; + } + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + if (p) { + debug_parse_tags(p+1, &debug_level_tags); + } + } + break; + + case 'e': /* exclude list */ + if ((fd = bfopen(optarg, "rb")) == NULL) { + berrno be; + Pmsg2(0, _("Could not open exclude file: %s, ERR=%s\n"), + optarg, be.bstrerror()); + exit(1); + } + while (fgets(line, sizeof(line), fd) != NULL) { + strip_trailing_junk(line); + Dmsg1(900, "add_exclude %s\n", line); + add_fname_to_exclude_list(ff, line); + } + fclose(fd); + break; + + case 'i': /* include list */ + if ((fd = bfopen(optarg, "rb")) == NULL) { + berrno be; + Pmsg2(0, _("Could not open include file: %s, ERR=%s\n"), + optarg, be.bstrerror()); + exit(1); + } + while (fgets(line, sizeof(line), fd) != NULL) { + strip_trailing_junk(line); + Dmsg1(900, "add_include %s\n", line); + add_fname_to_include_list(ff, 0, line); + } + fclose(fd); + got_inc = true; + break; + + case 'p': + forge_on = true; + break; + + case 'v': + verbose++; + break; + + case 'V': /* Volume name */ + VolumeName = optarg; + break; + + case '?': + default: + usage(); + + } /* end switch */ + } /* end while */ + argc -= optind; + argv += optind; + + if (argc != 2) { + Pmsg0(0, _("Wrong number of arguments: \n")); + usage(); + } + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + config = New(CONFIG()); + parse_sd_config(config, configfile, M_ERROR_TERM); + setup_me(); + load_sd_plugins(me->plugin_directory); + + if (!got_inc) { /* If no include file, */ + add_fname_to_include_list(ff, 0, "/"); /* include everything */ + } + + where = argv[1]; + do_extract(argv[0]); + + if (bsr) { + free_bsr(bsr); + } + if (prog_name_msg) { + Pmsg1(000, _("%d Program Name and/or Program Data Stream records ignored.\n"), + prog_name_msg); + } + if (win32_data_msg) { + Pmsg1(000, _("%d Win32 data or Win32 gzip data stream records. Ignored.\n"), + win32_data_msg); + } + term_include_exclude_files(ff); + term_find_files(ff); + return 0; +} + +static void do_extract(char *devname) +{ + char ed1[50]; + struct stat statp; + + enable_backup_privileges(NULL, 1); + + jcr = setup_jcr("bextract", devname, bsr, VolumeName, SD_READ, true/*read dedup data*/); + if (!jcr) { + exit(1); + } + dev = jcr->read_dcr->dev; + if (!dev) { + exit(1); + } + dcr = jcr->read_dcr; + + /* Make sure where directory exists and that it is a directory */ + if (stat(where, &statp) < 0) { + berrno be; + Emsg2(M_ERROR_TERM, 0, _("Cannot stat %s. It must exist. ERR=%s\n"), + where, be.bstrerror()); + } + if (!S_ISDIR(statp.st_mode)) { + Emsg1(M_ERROR_TERM, 0, _("%s must be a directory.\n"), where); + } + + free(jcr->where); + jcr->where = bstrdup(where); + attr = new_attr(jcr); + + compress_buf = get_memory(compress_buf_size); + curr_fname = get_pool_memory(PM_FNAME); + *curr_fname = 0; + + read_records(dcr, record_cb, mount_next_read_volume); + /* If output file is still open, it was the last one in the + * archive since we just hit an end of file, so close the file. + */ + if (is_bopen(&bfd)) { + set_attributes(jcr, attr, &bfd); + } + release_device(dcr); + free_attr(attr); + free_jcr(jcr); + dev->term(NULL); + free_pool_memory(curr_fname); + + printf(_("%u files restored.\n"), num_files); + if (num_errors) { + printf(_("Found %s error%s\n"), edit_uint64(num_errors, ed1), num_errors>1? "s":""); + } + return; +} + +static bool store_data(BFILE *bfd, char *data, const int32_t length) +{ + if (is_win32_stream(attr->data_stream) && !have_win32_api()) { + set_portable_backup(bfd); + if (!processWin32BackupAPIBlock(bfd, data, length)) { + berrno be; + Emsg2(M_ERROR_TERM, 0, _("Write error on %s: %s\n"), + attr->ofname, be.bstrerror()); + return false; + } + } else if (bwrite(bfd, data, length) != (ssize_t)length) { + berrno be; + Emsg2(M_ERROR_TERM, 0, _("Write error on %s: %s\n"), + attr->ofname, be.bstrerror()); + return false; + } + + return true; +} + +/* + * Called here for each record from read_records() + */ +static bool record_cb(DCR *dcr, DEV_RECORD *rec) +{ + int stat, ret=true; + JCR *jcr = dcr->jcr; + char ed1[50]; + + bool restoredatap = false; + POOLMEM *orgdata = NULL; + uint32_t orgdata_len = 0; + + if (rec->FileIndex < 0) { + return true; /* we don't want labels */ + } + + /* In this mode, we do not create any file on disk, just read + * everything from the volume. + */ + if (skip_extract) { + switch (rec->maskedStream) { + case STREAM_UNIX_ATTRIBUTES: + case STREAM_UNIX_ATTRIBUTES_EX: + if (!unpack_attributes_record(jcr, rec->Stream, rec->data, rec->data_len, attr)) { + Emsg0(M_ERROR_TERM, 0, _("Cannot continue.\n")); + } + if (verbose) { + attr->data_stream = decode_stat(attr->attr, &attr->statp, sizeof(attr->statp), &attr->LinkFI); + build_attr_output_fnames(jcr, attr); + print_ls_output(jcr, attr); + } + pm_strcpy(curr_fname, attr->fname); + num_files++; + break; + } + num_records++; + + /* We display some progress information if verbose not set or set to 2 */ + if (verbose != 1 && (num_records % 200000) == 0L) { + fprintf(stderr, "\rfiles=%d records=%s\n", num_files, edit_uint64(num_records, ed1)); + } + ret = true; + goto bail_out; + } + + /* File Attributes stream */ + + switch (rec->maskedStream) { + case STREAM_UNIX_ATTRIBUTES: + case STREAM_UNIX_ATTRIBUTES_EX: + + /* If extracting, it was from previous stream, so + * close the output file. + */ + if (extract) { + if (!is_bopen(&bfd)) { + Emsg0(M_ERROR, 0, _("Logic error output file should be open but is not.\n")); + } + set_attributes(jcr, attr, &bfd); + extract = false; + } + + if (!unpack_attributes_record(jcr, rec->Stream, rec->data, rec->data_len, attr)) { + Emsg0(M_ERROR_TERM, 0, _("Cannot continue.\n")); + } + + /* Keep the name of the current file if we find a bad block */ + pm_strcpy(curr_fname, attr->fname); + + if (file_is_included(ff, attr->fname) && !file_is_excluded(ff, attr->fname)) { + attr->data_stream = decode_stat(attr->attr, &attr->statp, sizeof(attr->statp), &attr->LinkFI); + if (!is_restore_stream_supported(attr->data_stream)) { + if (!non_support_data++) { + Jmsg(jcr, M_ERROR, 0, _("%s stream not supported on this Client.\n"), + stream_to_ascii(attr->data_stream)); + } + extract = false; + goto bail_out; + } + + build_attr_output_fnames(jcr, attr); + + if (attr->type == FT_DELETED) { /* TODO: choose the right fname/ofname */ + Jmsg(jcr, M_INFO, 0, _("%s was deleted.\n"), attr->fname); + extract = false; + goto bail_out; + } + + extract = false; + stat = create_file(jcr, attr, &bfd, REPLACE_ALWAYS); + + switch (stat) { + case CF_ERROR: + case CF_SKIP: + break; + case CF_EXTRACT: + extract = true; + print_ls_output(jcr, attr); + num_files++; + fileAddr = 0; + break; + case CF_CREATED: + set_attributes(jcr, attr, &bfd); + print_ls_output(jcr, attr); + num_files++; + fileAddr = 0; + break; + } + } + break; + + case STREAM_RESTORE_OBJECT: + /* nothing to do */ + break; + + /* Data stream and extracting */ + case STREAM_FILE_DATA: + case STREAM_SPARSE_DATA: + case STREAM_WIN32_DATA: + if (extract) { + if (rec->maskedStream == STREAM_SPARSE_DATA) { + ser_declare; + uint64_t faddr; + wbuf = rec->data + OFFSET_FADDR_SIZE; + wsize = rec->data_len - OFFSET_FADDR_SIZE; + ser_begin(rec->data, OFFSET_FADDR_SIZE); + unser_uint64(faddr); + /* We seek only for real SPARSE data, not for OFFSET option */ + if ((rec->Stream & STREAM_BIT_OFFSETS) == 0 && fileAddr != faddr) { + fileAddr = faddr; + if (blseek(&bfd, (boffset_t)fileAddr, SEEK_SET) < 0) { + berrno be; + Emsg3(M_ERROR_TERM, 0, _("Seek error Addr=%llu on %s: %s\n"), + fileAddr, attr->ofname, be.bstrerror()); + } + } + } else { + wbuf = rec->data; + wsize = rec->data_len; + } + total += wsize; + Dmsg2(8, "Write %u bytes, total=%u\n", wsize, total); + store_data(&bfd, wbuf, wsize); + fileAddr += wsize; + } + break; + + /* GZIP data stream */ + case STREAM_GZIP_DATA: + case STREAM_SPARSE_GZIP_DATA: + case STREAM_WIN32_GZIP_DATA: +#ifdef HAVE_LIBZ + if (extract) { + uLong compress_len = compress_buf_size; + int stat = Z_BUF_ERROR; + + if (rec->maskedStream == STREAM_SPARSE_DATA) { + ser_declare; + uint64_t faddr; + char ec1[50]; + wbuf = rec->data + OFFSET_FADDR_SIZE; + wsize = rec->data_len - OFFSET_FADDR_SIZE; + ser_begin(rec->data, OFFSET_FADDR_SIZE); + unser_uint64(faddr); + if ((rec->Stream & STREAM_BIT_OFFSETS) == 0 && fileAddr != faddr) { + fileAddr = faddr; + if (blseek(&bfd, (boffset_t)fileAddr, SEEK_SET) < 0) { + berrno be; + Emsg3(M_ERROR, 0, _("Seek to %s error on %s: ERR=%s\n"), + edit_uint64(fileAddr, ec1), attr->ofname, be.bstrerror()); + extract = false; + goto bail_out; + } + } + } else { + wbuf = rec->data; + wsize = rec->data_len; + } + + while (compress_len < 10000000 && (stat=uncompress((Byte *)compress_buf, &compress_len, + (const Byte *)wbuf, (uLong)wsize)) == Z_BUF_ERROR) { + /* The buffer size is too small, try with a bigger one */ + compress_len = 2 * compress_len; + compress_buf = check_pool_memory_size(compress_buf, + compress_len); + } + if (stat != Z_OK) { + Emsg1(M_ERROR, 0, _("Uncompression error. ERR=%d\n"), stat); + extract = false; + goto bail_out; + } + + Dmsg2(100, "Write uncompressed %d bytes, total before write=%d\n", compress_len, total); + store_data(&bfd, compress_buf, compress_len); + total += compress_len; + fileAddr += compress_len; + Dmsg2(100, "Compress len=%d uncompressed=%d\n", rec->data_len, + compress_len); + } +#else + if (extract) { + Emsg0(M_ERROR, 0, _("GZIP data stream found, but GZIP not configured!\n")); + extract = false; + goto bail_out; + } +#endif + break; + + /* Compressed data stream */ + case STREAM_COMPRESSED_DATA: + case STREAM_SPARSE_COMPRESSED_DATA: + case STREAM_WIN32_COMPRESSED_DATA: + if (extract) { + uint32_t comp_magic, comp_len; + uint16_t comp_level, comp_version; +#ifdef HAVE_LZO + lzo_uint compress_len; + const unsigned char *cbuf; + int r, real_compress_len; +#endif + + if (rec->maskedStream == STREAM_SPARSE_DATA) { + ser_declare; + uint64_t faddr; + char ec1[50]; + wbuf = rec->data + OFFSET_FADDR_SIZE; + wsize = rec->data_len - OFFSET_FADDR_SIZE; + ser_begin(rec->data, OFFSET_FADDR_SIZE); + unser_uint64(faddr); + if ((rec->Stream & STREAM_BIT_OFFSETS) == 0 && fileAddr != faddr) { + fileAddr = faddr; + if (blseek(&bfd, (boffset_t)fileAddr, SEEK_SET) < 0) { + berrno be; + Emsg3(M_ERROR, 0, _("Seek to %s error on %s: ERR=%s\n"), + edit_uint64(fileAddr, ec1), attr->ofname, be.bstrerror()); + extract = false; + goto bail_out; + } + } + } else { + wbuf = rec->data; + wsize = rec->data_len; + } + + /* read compress header */ + unser_declare; + unser_begin(wbuf, sizeof(comp_stream_header)); + unser_uint32(comp_magic); + unser_uint32(comp_len); + unser_uint16(comp_level); + unser_uint16(comp_version); + Dmsg4(200, "Compressed data stream found: magic=0x%x, len=%d, level=%d, ver=0x%x\n", comp_magic, comp_len, + comp_level, comp_version); + + /* version check */ + if (comp_version != COMP_HEAD_VERSION) { + Emsg1(M_ERROR, 0, _("Compressed header version error. version=0x%x\n"), comp_version); + ret = false; + goto bail_out; + } + /* size check */ + if (comp_len + sizeof(comp_stream_header) != wsize) { + Emsg2(M_ERROR, 0, _("Compressed header size error. comp_len=%d, msglen=%d\n"), + comp_len, wsize); + ret = false; + goto bail_out; + } + + switch(comp_magic) { +#ifdef HAVE_LZO + case COMPRESS_LZO1X: + compress_len = compress_buf_size; + cbuf = (const unsigned char*) wbuf + sizeof(comp_stream_header); + real_compress_len = wsize - sizeof(comp_stream_header); + Dmsg2(200, "Comp_len=%d msglen=%d\n", compress_len, wsize); + while ((r=lzo1x_decompress_safe(cbuf, real_compress_len, + (unsigned char *)compress_buf, &compress_len, NULL)) == LZO_E_OUTPUT_OVERRUN) + { + + /* The buffer size is too small, try with a bigger one */ + compress_len = 2 * compress_len; + compress_buf = check_pool_memory_size(compress_buf, + compress_len); + } + if (r != LZO_E_OK) { + Emsg1(M_ERROR, 0, _("LZO uncompression error. ERR=%d\n"), r); + extract = false; + goto bail_out; + } + Dmsg2(100, "Write uncompressed %d bytes, total before write=%d\n", compress_len, total); + store_data(&bfd, compress_buf, compress_len); + total += compress_len; + fileAddr += compress_len; + Dmsg2(100, "Compress len=%d uncompressed=%d\n", rec->data_len, compress_len); + break; +#endif + default: + Emsg1(M_ERROR, 0, _("Compression algorithm 0x%x found, but not supported!\n"), comp_magic); + extract = false; + goto bail_out; + } + + } + break; + + case STREAM_MD5_DIGEST: + case STREAM_SHA1_DIGEST: + case STREAM_SHA256_DIGEST: + case STREAM_SHA512_DIGEST: + break; + + case STREAM_SIGNED_DIGEST: + case STREAM_ENCRYPTED_SESSION_DATA: + // TODO landonf: Investigate crypto support in the storage daemon + break; + + case STREAM_PROGRAM_NAMES: + case STREAM_PROGRAM_DATA: + if (!prog_name_msg) { + Pmsg0(000, _("Got Program Name or Data Stream. Ignored.\n")); + prog_name_msg++; + } + break; + case STREAM_PLUGIN_NAME: + /* Just ignore the plugin name */ + break; + default: + /* If extracting, weird stream (not 1 or 2), close output file anyway */ + if (extract) { + if (!is_bopen(&bfd)) { + Emsg0(M_ERROR, 0, _("Logic error output file should be open but is not.\n")); + } + set_attributes(jcr, attr, &bfd); + extract = false; + } + Jmsg(jcr, M_ERROR, 0, _("Unknown stream=%d ignored. This shouldn't happen!\n"), + rec->Stream); + break; + + } /* end switch */ +bail_out: + if (restoredatap) { + rec->data = orgdata; + rec->data_len = orgdata_len; + } + return ret; +} diff --git a/src/stored/block.c b/src/stored/block.c new file mode 100644 index 00000000..16bc04a0 --- /dev/null +++ b/src/stored/block.c @@ -0,0 +1,751 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * block.c -- tape block handling functions + * + * Kern Sibbald, March MMI + * added BB02 format October MMII + */ + + +#include "bacula.h" +#include "stored.h" + +#ifdef DEBUG_BLOCK_CHECKSUM +static const bool debug_block_checksum = true; +#else +static const bool debug_block_checksum = false; +#endif + +static int debug_io_error = 0; /* # blocks to write before creating I/O error */ + +#ifdef NO_TAPE_WRITE_TEST +static const bool no_tape_write_test = true; +#else +static const bool no_tape_write_test = false; +#endif + + +uint32_t get_len_and_clear_block(DEV_BLOCK *block, DEVICE *dev, uint32_t &pad); +uint32_t ser_block_header(DEV_BLOCK *block, bool do_checksum); +bool unser_block_header(DCR *dcr, DEVICE *dev, DEV_BLOCK *block); + +/* + * Write a block to the device, with locking and unlocking + * + * Returns: true on success + * : false on failure + * + */ +bool DCR::write_block_to_device(bool final) +{ + bool ok = true; + DCR *dcr = this; + + if (dcr->spooling) { + Dmsg0(250, "Write to spool\n"); + ok = write_block_to_spool_file(dcr); + return ok; + } + + if (!is_dev_locked()) { /* device already locked? */ + /* note, do not change this to dcr->rLock */ + dev->rLock(false); /* no, lock it */ + } + + if (!check_for_newvol_or_newfile(dcr)) { + ok = false; + goto bail_out; /* fatal error */ + } + + Dmsg1(500, "Write block to dev=%p\n", dcr->dev); + if (!write_block_to_dev()) { + Dmsg2(40, "*** Failed write_block_to_dev adata=%d block=%p\n", + block->adata, block); + if (job_canceled(jcr) || jcr->getJobType() == JT_SYSTEM) { + ok = false; + Dmsg2(40, "cancel=%d or SYSTEM=%d\n", job_canceled(jcr), + jcr->getJobType() == JT_SYSTEM); + } else { + bool was_adata = false; + if (was_adata) { + dcr->set_ameta(); + was_adata = true; + } + /* Flush any existing JobMedia info */ + if (!(ok = dir_create_jobmedia_record(dcr))) { + Jmsg(jcr, M_FATAL, 0, _("[SF0201] Error writing JobMedia record to catalog.\n")); + } else { + Dmsg1(40, "Calling fixup_device was_adata=%d...\n", was_adata); + ok = fixup_device_block_write_error(dcr); + } + if (was_adata) { + dcr->set_adata(); + } + } + } + if (ok && final && !dir_create_jobmedia_record(dcr)) { + Jmsg(jcr, M_FATAL, 0, _("[SF0202] Error writing final JobMedia record to catalog.\n")); + } + +bail_out: + if (!dcr->is_dev_locked()) { /* did we lock dev above? */ + /* note, do not change this to dcr->dunlock */ + dev->Unlock(); /* unlock it now */ + } + return ok; +} + +/* + * Write a block to the device + * + * Returns: true on success or EOT + * false on hard error + */ +bool DCR::write_block_to_dev() +{ + ssize_t stat = 0; + uint32_t wlen; /* length to write */ + bool ok = true; + DCR *dcr = this; + uint32_t checksum; + uint32_t pad; /* padding or zeros written */ + boffset_t pos; + char ed1[50]; + + if (no_tape_write_test) { + empty_block(block); + return true; + } + if (job_canceled(jcr)) { + return false; + } + if (!dev->enabled) { + Jmsg1(jcr, M_FATAL, 0, _("[SF0203] Cannot write block. Device is disabled. dev=%s\n"), dev->print_name()); + return false; + } + + ASSERT2(block->adata == dev->adata, "Block and dev adata not same"); + Dmsg4(200, "fd=%d adata=%d bufp-buf=%d binbuf=%d\n", dev->fd(), block->adata, + block->bufp-block->buf, block->binbuf); + ASSERT2(block->binbuf == ((uint32_t)(block->bufp - block->buf)), "binbuf badly set"); + + if (is_block_empty(block)) { /* Does block have data in it? */ + Dmsg1(50, "return write_block_to_dev no adata=%d data to write\n", block->adata); + return true; + } + + if (dev->at_weot()) { + Dmsg1(50, "==== FATAL: At EOM with ST_WEOT. adata=%d.\n", dev->adata); + dev->dev_errno = ENOSPC; + Jmsg1(jcr, M_FATAL, 0, _("[SF0204] Cannot write block. Device at EOM. dev=%s\n"), dev->print_name()); + return false; + } + if (!dev->can_append()) { + dev->dev_errno = EIO; + Jmsg1(jcr, M_FATAL, 0, _("[SF0205] Attempt to write on read-only Volume. dev=%s\n"), dev->print_name()); + Dmsg1(50, "Attempt to write on read-only Volume. dev=%s\n", dev->print_name()); + return false; + } + + if (!dev->is_open()) { + Jmsg1(jcr, M_FATAL, 0, _("[SF0206] Attempt to write on closed device=%s\n"), dev->print_name()); + Dmsg1(50, "Attempt to write on closed device=%s\n", dev->print_name()); + return false; + } + + wlen = get_len_and_clear_block(block, dev, pad); + block->block_len = wlen; + dev->updateVolCatPadding(pad); + + checksum = ser_block_header(block, dev->do_checksum()); + + if (!dev->do_size_checks(dcr, block)) { + Dmsg0(50, "Size check triggered. Cannot write block.\n"); + return false; + } + + dev->updateVolCatWrites(1); + + dump_block(dev, block, "before write"); + +#ifdef DEBUG_BLOCK_ZEROING + uint32_t *bp = (uint32_t *)block->buf; + if (bp[0] == 0 && bp[1] == 0 && bp[2] == 0 && block->buf[12] == 0) { + Jmsg0(jcr, M_ABORT, 0, _("[SA0201] Write block header zeroed.\n")); + } +#endif + + /* + * If Adata block, we must seek to the correct address + */ + if (block->adata) { + ASSERT(dcr->dev->adata); + uint64_t cur = dev->lseek(dcr, 0, SEEK_CUR); + /* If we are going to create a hole, record it */ + if (block->BlockAddr != cur) { + dev->lseek(dcr, block->BlockAddr, SEEK_SET); + Dmsg4(100, "Adata seek BlockAddr from %lld to %lld = %lld bytes adata_addr=%lld\n", + cur, block->BlockAddr, block->BlockAddr - cur, dev->adata_addr); + /* Insanity check */ + if (block->BlockAddr > cur) { + dev->updateVolCatHoleBytes(block->BlockAddr - cur); + } else if (block->BlockAddr < cur) { + Pmsg5(000, "Vol=%s cur=%lld BlockAddr=%lld adata=%d block=%p\n", + dev->getVolCatName(), cur, block->BlockAddr, block->adata, block); + Jmsg3(jcr, M_FATAL, 0, "[SF0207] Bad seek on adata Vol=%s BlockAddr=%lld DiskAddr=%lld. Multiple simultaneous Jobs?\n", + dev->getVolCatName(), block->BlockAddr, cur); + //Pmsg2(000, "HoleBytes would go negative cur=%lld blkaddr=%lld\n", cur, block->BlockAddr); + } + } + } + + /* + * Do write here, make a somewhat feeble attempt to recover from + * I/O errors, or from the OS telling us it is busy. + */ + int retry = 0; + errno = 0; + stat = 0; + /* ***FIXME**** remove next line debug */ + pos = dev->lseek(dcr, 0, SEEK_CUR); + do { + if (retry > 0 && stat == -1 && errno == EBUSY) { + berrno be; + Dmsg4(100, "===== write retry=%d stat=%d errno=%d: ERR=%s\n", + retry, stat, errno, be.bstrerror()); + bmicrosleep(5, 0); /* pause a bit if busy or lots of errors */ + dev->clrerror(-1); + } + stat = dev->write(block->buf, (size_t)wlen); + Dmsg4(100, "%s write() BlockAddr=%lld wlen=%d Vol=%s wlen=%d\n", + block->adata?"Adata":"Ameta", block->BlockAddr, wlen, + dev->VolHdr.VolumeName); + } while (stat == -1 && (errno == EBUSY || errno == EIO) && retry++ < 3); + + /* ***FIXME*** remove 2 lines debug */ + Dmsg2(100, "Wrote %d bytes at %s\n", wlen, dev->print_addr(ed1, sizeof(ed1), pos)); + dump_block(dev, block, "After write"); + + if (debug_block_checksum) { + uint32_t achecksum = ser_block_header(block, dev->do_checksum()); + if (checksum != achecksum) { + Jmsg2(jcr, M_ERROR, 0, _("[SA0201] Block checksum changed during write: before=%u after=%u\n"), + checksum, achecksum); + dump_block(dev, block, "with checksum error"); + } + } + +#ifdef DEBUG_BLOCK_ZEROING + if (bp[0] == 0 && bp[1] == 0 && bp[2] == 0 && block->buf[12] == 0) { + Jmsg0(jcr, M_ABORT, 0, _("[SA0202] Write block header zeroed.\n")); + } +#endif + + if (debug_io_error) { + debug_io_error--; + if (debug_io_error == 1) { /* trigger error */ + stat = -1; + dev->dev_errno = EIO; + errno = EIO; + debug_io_error = 0; /* turn off trigger */ + } + } + + if (stat != (ssize_t)wlen) { + /* Some devices simply report EIO when the volume is full. + * With a little more thought we may be able to check + * capacity and distinguish real errors and EOT + * conditions. In any case, we probably want to + * simulate an End of Medium. + */ + if (stat == -1) { + berrno be; + dev->clrerror(-1); /* saves errno in dev->dev_errno */ + if (dev->dev_errno == 0) { + dev->dev_errno = ENOSPC; /* out of space */ + } + if (dev->dev_errno != ENOSPC) { + int etype = M_ERROR; + if (block->adata) { + etype = M_FATAL; + } + dev->VolCatInfo.VolCatErrors++; + Jmsg5(jcr, etype, 0, _("%s Write error at %s on device %s Vol=%s. ERR=%s.\n"), + etype==M_FATAL?"[SF0208]":"[SE0201]", + dev->print_addr(ed1, sizeof(ed1)), dev->print_name(), + dev->getVolCatName(), be.bstrerror()); + if (dev->get_tape_alerts(this)) { + dev->show_tape_alerts(this, list_long, list_last, alert_callback); + } + } + } else { + dev->dev_errno = ENOSPC; /* out of space */ + } + if (dev->dev_errno == ENOSPC) { + dev->update_freespace(); + if (dev->is_freespace_ok() && dev->free_space < dev->min_free_space) { + int mtype = M_FATAL; + dev->set_nospace(); + if (dev->is_removable()) { + mtype = M_INFO; + } + Jmsg(jcr, mtype, 0, _("%s Out of freespace caused End of Volume \"%s\" at %s on device %s. Write of %u bytes got %d.\n"), + mtype==M_FATAL?"[SF0209]":"[SI0201]", + dev->getVolCatName(), + dev->print_addr(ed1, sizeof(ed1)), dev->print_name(), wlen, stat); + } else { + dev->clear_nospace(); + Jmsg(jcr, M_INFO, 0, _("[SI0202] End of Volume \"%s\" at %s on device %s. Write of %u bytes got %d.\n"), + dev->getVolCatName(), + dev->print_addr(ed1, sizeof(ed1)), dev->print_name(), wlen, stat); + } + } + if (chk_dbglvl(100)) { + berrno be; + Dmsg7(90, "==== Write error. fd=%d size=%u rtn=%d dev_blk=%d blk_blk=%d errno=%d: ERR=%s\n", + dev->fd(), wlen, stat, dev->block_num, block->BlockNumber, + dev->dev_errno, be.bstrerror(dev->dev_errno)); + } + + Dmsg0(40, "Calling terminate_writing_volume\n"); + ok = terminate_writing_volume(dcr); + if (ok) { + reread_last_block(dcr); + } + return false; + } + + /* We successfully wrote the block, now do housekeeping */ + Dmsg2(1300, "VolCatBytes=%lld newVolCatBytes=%lld\n", dev->VolCatInfo.VolCatBytes, + (dev->VolCatInfo.VolCatBytes+wlen)); + if (!dev->setVolCatAdataBytes(block->BlockAddr + wlen)) { + dev->updateVolCatBytes(wlen); + Dmsg3(200, "AmetaBytes=%lld AdataBytes=%lld Bytes=%lld\n", + dev->VolCatInfo.VolCatAmetaBytes, dev->VolCatInfo.VolCatAdataBytes, dev->VolCatInfo.VolCatBytes); + } + dev->updateVolCatBlocks(1); + dev->LastBlock = block->BlockNumber; + block->BlockNumber++; + + /* Update dcr values */ + if (dev->is_tape()) { + dev->EndAddr = dev->get_full_addr(); + if (dcr->EndAddr < dev->EndAddr) { + dcr->EndAddr = dev->EndAddr; + } + dev->block_num++; + } else { + /* Save address of byte just written */ + uint64_t addr = dev->file_addr + wlen - 1; + if (dev->is_indexed()) { + uint64_t full_addr = dev->get_full_addr(addr); + if (full_addr < dcr->EndAddr) { + Pmsg2(000, "Possible incorrect EndAddr oldEndAddr=%llu newEndAddr=%llu\n", + dcr->EndAddr, full_addr); + } + dcr->EndAddr = full_addr; + } + + if (dev->adata) { + /* We really should use file_addr, but I am not sure it is correctly set */ + Dmsg3(100, "Set BlockAddr from %lld to %lld adata_addr=%lld\n", + block->BlockAddr, block->BlockAddr + wlen, dev->adata_addr); + block->BlockAddr += wlen; + dev->adata_addr = block->BlockAddr; + } else { + block->BlockAddr = dev->get_full_addr() + wlen; + } + } + if (dev->is_indexed()) { + if (dcr->VolMediaId != dev->VolCatInfo.VolMediaId) { + Dmsg7(100, "JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld Wrote\n", + dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, + dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); + } + dcr->VolMediaId = dev->VolCatInfo.VolMediaId; + Dmsg3(150, "VolFirstIndex=%d blockFirstIndex=%d Vol=%s\n", + dcr->VolFirstIndex, block->FirstIndex, dcr->VolumeName); + if (dcr->VolFirstIndex == 0 && block->FirstIndex > 0) { + dcr->VolFirstIndex = block->FirstIndex; + } + if (block->LastIndex > (int32_t)dcr->VolLastIndex) { + dcr->VolLastIndex = block->LastIndex; + } + dcr->WroteVol = true; + } + + dev->file_addr += wlen; /* update file address */ + dev->file_size += wlen; + dev->usage += wlen; /* update usage counter */ + if (dev->part > 0) { + dev->part_size += wlen; + } + dev->setVolCatInfo(false); /* Needs update */ + + Dmsg2(1300, "write_block: wrote block %d bytes=%d\n", dev->block_num, wlen); + empty_block(block); + return true; +} + + +/* + * Read block with locking + * + */ +bool DCR::read_block_from_device(bool check_block_numbers) +{ + bool ok; + + Dmsg0(250, "Enter read_block_from_device\n"); + dev->rLock(false); + ok = read_block_from_dev(check_block_numbers); + dev->rUnlock(); + Dmsg1(250, "Leave read_block_from_device. ok=%d\n", ok); + return ok; +} + +static void set_block_position(DCR *dcr, DEVICE *dev, DEV_BLOCK *block) +{ + /* Used also by the Single Item Restore code to locate a particular block */ + if (dev->is_tape()) { + block->BlockAddr = dev->get_full_addr(); + + } else if (!dev->adata) { /* TODO: See if we just use !dev->adata for tapes */ + /* Note: we only update the DCR values for ameta blocks + * because all the indexing (JobMedia) is done with + * ameta blocks/records, which may point to adata. + */ + block->BlockAddr = dev->get_full_addr(); + } + block->RecNum = 0; +} + +/* + * Read the next block into the block structure and unserialize + * the block header. For a file, the block may be partially + * or completely in the current buffer. + * Note: in order for bscan to generate correct JobMedia records + * we must be careful to update the EndAddr of the last byte read. + */ +bool DCR::read_block_from_dev(bool check_block_numbers) +{ + ssize_t stat; + int looping; + int retry; + DCR *dcr = this; + boffset_t pos; + char ed1[50]; + uint32_t data_len; + + if (job_canceled(jcr)) { + Mmsg(dev->errmsg, _("Job failed or canceled.\n")); + Dmsg1(000, "%s", dev->errmsg); + block->read_len = 0; + return false; + } + if (!dev->enabled) { + Mmsg(dev->errmsg, _("[SF0210] Cannot write block. Device is disabled. dev=%s\n"), dev->print_name()); + Jmsg1(jcr, M_FATAL, 0, "%s", dev->errmsg); + return false; + } + + if (dev->at_eot()) { + Mmsg(dev->errmsg, _("[SX0201] At EOT: attempt to read past end of Volume.\n")); + Dmsg1(000, "%s", dev->errmsg); + block->read_len = 0; + return false; + } + looping = 0; + + if (!dev->is_open()) { + Mmsg4(dev->errmsg, _("[SF0211] Attempt to read closed device: fd=%d at file:blk %u:%u on device %s\n"), + dev->fd(), dev->file, dev->block_num, dev->print_name()); + Jmsg(dcr->jcr, M_FATAL, 0, "%s", dev->errmsg); + Pmsg4(000, "Fatal: dev=%p dcr=%p adata=%d bytes=%lld\n", dev, dcr, dev->adata, + VolCatInfo.VolCatAdataRBytes); + Pmsg1(000, "%s", dev->errmsg); + block->read_len = 0; + return false; + } + + set_block_position(dcr, dev, block); + +reread: + if (looping > 1) { + dev->dev_errno = EIO; + Mmsg1(dev->errmsg, _("[SE0202] Block buffer size looping problem on device %s\n"), + dev->print_name()); + Dmsg1(000, "%s", dev->errmsg); + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + block->read_len = 0; + return false; + } + + /* See if we must open another part */ + if (dev->at_eof() && !dev->open_next_part(dcr)) { + if (dev->at_eof()) { /* EOF just seen? */ + dev->set_eot(); /* yes, error => EOT */ + } + return false; + } + + retry = 0; + errno = 0; + stat = 0; + + if (dev->adata) { + dev->lseek(dcr, dcr->block->BlockAddr, SEEK_SET); + } + { /* debug block */ + pos = dev->lseek(dcr, (boffset_t)0, SEEK_CUR); /* get curr pos */ + Dmsg2(200, "Pos for read=%s %lld\n", + dev->print_addr(ed1, sizeof(ed1), pos), pos); + } + + data_len = 0; + + do { + retry = 0; + + do { + if ((retry > 0 && stat == -1 && errno == EBUSY)) { + berrno be; + Dmsg4(100, "===== read retry=%d stat=%d errno=%d: ERR=%s\n", + retry, stat, errno, be.bstrerror()); + bmicrosleep(10, 0); /* pause a bit if busy or lots of errors */ + dev->clrerror(-1); + } + stat = dev->read(block->buf + data_len, (size_t)(block->buf_len - data_len)); + if (stat > 0) + data_len += stat; + + } while (stat == -1 && (errno == EBUSY || errno == EINTR || errno == EIO) && retry++ < 3); + + } while (data_len < block->buf_len && stat > 0 && dev->dev_type == B_FIFO_DEV); + + Dmsg4(110, "Read() adata=%d vol=%s nbytes=%d pos=%lld\n", + block->adata, dev->VolHdr.VolumeName, stat < 0 ? stat : data_len, pos); + if (stat < 0) { + berrno be; + dev->clrerror(-1); + Dmsg2(90, "Read device fd=%d got: ERR=%s\n", dev->fd(), be.bstrerror()); + block->read_len = 0; + if (reading_label) { /* Trying to read Volume label */ + Mmsg(dev->errmsg, _("[SE0203] The %sVolume=%s on device=%s appears to be unlabeled.%s\n"), + dev->adata?"adata ":"", VolumeName, dev->print_name(), + dev->is_fs_nearly_full(1048576)?" Warning: The filesystem is nearly full.":""); + } else { + Mmsg4(dev->errmsg, _("[SE0204] Read error on fd=%d at addr=%s on device %s. ERR=%s.\n"), + dev->fd(), dev->print_addr(ed1, sizeof(ed1)), dev->print_name(), be.bstrerror()); + } + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + if (dev->get_tape_alerts(this)) { + dev->show_tape_alerts(this, list_long, list_last, alert_callback); + } + if (dev->at_eof()) { /* EOF just seen? */ + dev->set_eot(); /* yes, error => EOT */ + } + return false; + } + + stat = data_len; + + if (stat == 0) { /* Got EOF ! */ + pos = dev->lseek(dcr, (boffset_t)0, SEEK_CUR); /* get curr pos */ + pos = dev->get_full_addr(pos); + if (reading_label) { /* Trying to read Volume label */ + Mmsg4(dev->errmsg, _("The %sVolume=%s on device=%s appears to be unlabeled.%s\n"), + dev->adata?"adata ":"", VolumeName, dev->print_name(), + dev->is_fs_nearly_full(1048576)?" Warning: The filesystem is nearly full.":""); + } else { + Mmsg4(dev->errmsg, _("Read zero %sbytes Vol=%s at %s on device %s.\n"), + dev->adata?"adata ":"", dev->VolCatInfo.VolCatName, + dev->print_addr(ed1, sizeof(ed1), pos), dev->print_name()); + } + block->read_len = 0; + Dmsg1(100, "%s", dev->errmsg); + if (dev->at_eof()) { /* EOF just seen? */ + dev->set_eot(); /* yes, error => EOT */ + } + dev->set_ateof(); + dev->file_addr = 0; + dev->EndAddr = pos; + if (dcr->EndAddr < dev->EndAddr) { + dcr->EndAddr = dev->EndAddr; + } + Dmsg3(150, "==== Read zero bytes. adata=%d vol=%s at %s\n", dev->adata, + dev->VolCatInfo.VolCatName, dev->print_addr(ed1, sizeof(ed1), pos)); + return false; /* return eof */ + } + + /* Continue here for successful read */ + + block->read_len = stat; /* save length read */ + if (block->adata) { + block->binbuf = block->read_len; + block->block_len = block->read_len; + } else { + if (block->read_len == 80 && + (dcr->VolCatInfo.LabelType != B_BACULA_LABEL || + dcr->device->label_type != B_BACULA_LABEL)) { + /* ***FIXME*** should check label */ + Dmsg2(100, "Ignore 80 byte ANSI label at %u:%u\n", dev->file, dev->block_num); + dev->clear_eof(); + goto reread; /* skip ANSI/IBM label */ + } + + if (block->read_len < BLKHDR2_LENGTH) { + dev->dev_errno = EIO; + Mmsg3(dev->errmsg, _("[SE0205] Volume data error at %s! Very short block of %d bytes on device %s discarded.\n"), + dev->print_addr(ed1, sizeof(ed1)), block->read_len, dev->print_name()); + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + dev->set_short_block(); + block->read_len = block->binbuf = 0; + Dmsg2(50, "set block=%p binbuf=%d\n", block, block->binbuf); + return false; /* return error */ + } + + if (!unser_block_header(this, dev, block)) { + if (forge_on) { + dev->file_addr += block->read_len; + dev->file_size += block->read_len; + goto reread; + } + return false; + } + } + + /* + * If the block is bigger than the buffer, we reposition for + * re-reading the block, allocate a buffer of the correct size, + * and go re-read. + */ + Dmsg3(150, "adata=%d block_len=%d buf_len=%d\n", block->adata, block->block_len, block->buf_len); + if (block->block_len > block->buf_len) { + dev->dev_errno = EIO; + Mmsg2(dev->errmsg, _("[SE0206] Block length %u is greater than buffer %u. Attempting recovery.\n"), + block->block_len, block->buf_len); + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + Pmsg1(000, "%s", dev->errmsg); + /* Attempt to reposition to re-read the block */ + if (dev->is_tape()) { + Dmsg0(250, "BSR for reread; block too big for buffer.\n"); + if (!dev->bsr(1)) { + Mmsg(dev->errmsg, "%s", dev->bstrerror()); + if (dev->errmsg[0]) { + Jmsg(jcr, M_ERROR, 0, "[SE0207] %s", dev->errmsg); + } + block->read_len = 0; + return false; + } + } else { + Dmsg0(250, "Seek to beginning of block for reread.\n"); + boffset_t pos = dev->lseek(dcr, (boffset_t)0, SEEK_CUR); /* get curr pos */ + pos -= block->read_len; + dev->lseek(dcr, pos, SEEK_SET); + dev->file_addr = pos; + } + Mmsg1(dev->errmsg, _("[SI0203] Setting block buffer size to %u bytes.\n"), block->block_len); + Jmsg(jcr, M_INFO, 0, "%s", dev->errmsg); + Pmsg1(000, "%s", dev->errmsg); + /* Set new block length */ + dev->max_block_size = block->block_len; + block->buf_len = block->block_len; + free_memory(block->buf); + block->buf = get_memory(block->buf_len); + empty_block(block); + looping++; + goto reread; /* re-read block with correct block size */ + } + + if (block->block_len > block->read_len) { + dev->dev_errno = EIO; + Mmsg4(dev->errmsg, _("[SE0208] Volume data error at %u:%u! Short block of %d bytes on device %s discarded.\n"), + dev->file, dev->block_num, block->read_len, dev->print_name()); + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + dev->set_short_block(); + block->read_len = block->binbuf = 0; + return false; /* return error */ + } + + dev->clear_short_block(); + dev->clear_eof(); + dev->updateVolCatReads(1); + dev->updateVolCatReadBytes(block->read_len); + + /* Update dcr values */ + if (dev->is_tape()) { + dev->EndAddr = dev->get_full_addr(); + if (dcr->EndAddr < dev->EndAddr) { + dcr->EndAddr = dev->EndAddr; + } + dev->block_num++; + } else { + /* We need to take care about a short block in EndBlock/File + * computation + */ + uint32_t len = MIN(block->read_len, block->block_len); + uint64_t addr = dev->get_full_addr() + len - 1; + if (dev->is_indexed()) { + if (addr > dcr->EndAddr) { + dcr->EndAddr = addr; + } + } + dev->EndAddr = addr; + } + if (dev->is_indexed()) { + dcr->VolMediaId = dev->VolCatInfo.VolMediaId; + } + dev->file_addr += block->read_len; + dev->file_size += block->read_len; + dev->usage += block->read_len; /* update usage counter */ + + /* + * If we read a short block on disk, + * seek to beginning of next block. This saves us + * from shuffling blocks around in the buffer. Take a + * look at this from an efficiency stand point later, but + * it should only happen once at the end of each job. + * + * I've been lseek()ing negative relative to SEEK_CUR for 30 + * years now. However, it seems that with the new off_t definition, + * it is not possible to seek negative amounts, so we use two + * lseek(). One to get the position, then the second to do an + * absolute positioning -- so much for efficiency. KES Sep 02. + */ + Dmsg0(250, "At end of read block\n"); + if (block->read_len > block->block_len && !dev->is_tape()) { + char ed1[50]; + boffset_t pos = dev->lseek(dcr, (boffset_t)0, SEEK_CUR); /* get curr pos */ + Dmsg1(250, "Current lseek pos=%s\n", edit_int64(pos, ed1)); + pos -= (block->read_len - block->block_len); + dev->lseek(dcr, pos, SEEK_SET); + Dmsg3(250, "Did lseek pos=%s blk_size=%d rdlen=%d\n", + edit_int64(pos, ed1), block->block_len, + block->read_len); + dev->file_addr = pos; + dev->file_size = pos; + } + Dmsg3(150, "Exit read_block read_len=%d block_len=%d binbuf=%d\n", + block->read_len, block->block_len, block->binbuf); + block->block_read = true; + return true; +} diff --git a/src/stored/block.h b/src/stored/block.h new file mode 100644 index 00000000..2a2e2f1f --- /dev/null +++ b/src/stored/block.h @@ -0,0 +1,169 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Block definitions for Bacula media data format. + * + * Kern Sibbald, MM + */ + + +#ifndef __BLOCK_H +#define __BLOCK_H 1 + +#define MAX_BLOCK_SIZE 20000000 /* this is a sort of sanity check */ +#define DEFAULT_BLOCK_SIZE (512 * 126) /* 64,512 N.B. do not use 65,636 here */ +#define MIN_DEDUP_BLOCK_SIZE (512 * 2) /* Minimum block (bucket) size */ + +#define DEDUP_BLOCK_SIZE (512 * 128) /* For now use a fixed dedup block size */ + +/* Block Header definitions. */ +#define BLKHDR1_ID "BB01" +#define BLKHDR2_ID "BB02" +#define BLKHDR_ID_LENGTH 4 +#define BLKHDR_CS_LENGTH 4 /* checksum length */ +#define BLKHDR1_LENGTH 16 /* Total length */ +#define BLKHDR2_LENGTH 24 /* Total length */ + +#define WRITE_BLKHDR_ID BLKHDR2_ID +#define WRITE_BLKHDR_LENGTH BLKHDR2_LENGTH +#define WRITE_ADATA_BLKHDR_LENGTH (6*sizeof(int32_t)+sizeof(uint64_t)) +#define BLOCK_VER 2 + +/* Record header definitions */ +#define RECHDR1_LENGTH 20 +/* + * Record header consists of: + * int32_t FileIndex + * int32_t Stream + * uint32_t data_length + * uint64_t FileOffset (if offset_stream) + */ +#define RECHDR2_LENGTH (3*sizeof(int32_t)) +#define WRITE_RECHDR_LENGTH RECHDR2_LENGTH + +/* + * An adata record header includes: + * int32_t FileIndex + * int32_t Stream STREAM_ADATA_RECORD_HEADER + * uint32_t data_length + * uint32_t block length (binbuf to that point in time) + * int32_t Stream (original stream) + */ +#define WRITE_ADATA_RECHDR_LENGTH (5*sizeof(int32_t)) + +/* Tape label and version definitions */ +#define BaculaId "Bacula 1.0 immortal\n" +#define OldBaculaId "Bacula 0.9 mortal\n" +#define BaculaTapeVersion 11 +#define OldCompatibleBaculaTapeVersion1 10 +#define OldCompatibleBaculaTapeVersion2 9 + +#define BaculaMetaDataId "Bacula 1.0 Metadata\n" +#define BaculaAlignedDataId "Bacula 1.0 Aligned Data\n" +#define BaculaMetaDataVersion 10000 +#define BaculaAlignedDataVersion 20000 + +#define BaculaDedupMetaDataId "Bacula 1.0 Dedup Metadata\n" +#define BaculaDedupMetaDataVersion 30000 + +#define BaculaS3CloudId "Bacula 1.0 S3 Cloud Data\n" +#define BaculaS3CloudVersion 40000 + +/* + * This is the Media structure for a block header + * Note, when written, it is serialized. + 16 bytes + + uint32_t CheckSum; + uint32_t block_len; + uint32_t BlockNumber; + char Id[BLKHDR_ID_LENGTH]; + + * for BB02 block, we have + 24 bytes + + uint32_t CheckSum; + uint32_t block_len; + uint32_t BlockNumber; + char Id[BLKHDR_ID_LENGTH]; + uint32_t VolSessionId; + uint32_t VolSessionTime; + + * for an adata block header (in ameta file), we have + 32 bytes + + uint32_t BlockNumber; + int32_t Stream; STREAM_ADATA_BLOCK_HEADER + uint32_t block_len; + uint32_t CheckSum; + uint32_t VolSessionId; + uint32_t VolSessionTime; + uint64_t BlockAddr; + + */ + +class DEVICE; /* for forward reference */ + +/* + * DEV_BLOCK for reading and writing blocks. + * This is the basic unit that is written to the device, and + * it contains a Block Header followd by Records. Note, + * at times (when reading a file), this block may contain + * multiple blocks. + * + * This is the memory structure for a device block. + */ +struct DEV_BLOCK { + DEV_BLOCK *next; /* pointer to next one */ + DEVICE *dev; /* pointer to device */ + /* binbuf is the number of bytes remaining in the buffer. + * For writes, it is bytes not yet written. + * For reads, it is remaining bytes not yet read. + */ + uint64_t BlockAddr; /* Block address */ + uint32_t binbuf; /* bytes in buffer */ + uint32_t block_len; /* length of current block read */ + uint32_t buf_len; /* max/default block length */ + uint32_t reclen; /* Last record length put in adata block */ + uint32_t BlockNumber; /* sequential Bacula block number */ + uint32_t read_len; /* bytes read into buffer, if zero, block empty */ + uint32_t VolSessionId; /* */ + uint32_t VolSessionTime; /* */ + uint32_t read_errors; /* block errors (checksum, header, ...) */ + uint32_t CheckSum; /* Block checksum */ + uint32_t RecNum; /* Number of records read from the current block */ + int BlockVer; /* block version 1 or 2 */ + bool write_failed; /* set if write failed */ + bool block_read; /* set when block read */ + bool needs_write; /* block must be written */ + bool adata; /* adata block */ + bool no_header; /* Set if no block header */ + bool new_fi; /* New FI arrived */ + int32_t FirstIndex; /* first index this block */ + int32_t LastIndex; /* last index this block */ + int32_t rechdr_items; /* number of items in rechdr queue */ + char *bufp; /* pointer into buffer */ + char ser_buf[BLKHDR2_LENGTH]; /* Serial buffer for adata */ + POOLMEM *rechdr_queue; /* record header queue */ + POOLMEM *buf; /* actual data buffer */ +}; + +#define block_is_empty(block) ((block)->read_len == 0) + +#endif diff --git a/src/stored/block_util.c b/src/stored/block_util.c new file mode 100644 index 00000000..bf0a8c38 --- /dev/null +++ b/src/stored/block_util.c @@ -0,0 +1,833 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * block_util.c -- tape block utility functions + * + * Kern Sibbald, split from block.c March MMXII + */ + + +#include "bacula.h" +#include "stored.h" + +static const int dbglvl = 160; + +#ifdef DEBUG_BLOCK_CHECKSUM +static const bool debug_block_checksum = true; +#else +static const bool debug_block_checksum = false; +#endif + +#ifdef NO_TAPE_WRITE_TEST +static const bool no_tape_write_test = true; +#else +static const bool no_tape_write_test = false; +#endif + +/* + * Dump the block header, then walk through + * the block printing out the record headers. + */ +void dump_block(DEVICE *dev, DEV_BLOCK *b, const char *msg, bool force) +{ + ser_declare; + char *p; + char *bufp; + char Id[BLKHDR_ID_LENGTH+1]; + uint32_t CheckSum, BlockCheckSum; + uint32_t block_len, reclen; + uint32_t BlockNumber; + uint32_t VolSessionId, VolSessionTime, data_len; + int32_t FileIndex; + int32_t Stream; + int bhl, rhl; + char buf1[100], buf2[100]; + + if (!force && ((debug_level & ~DT_ALL) < 250)) { + return; + } + if (b->adata) { + Dmsg0(20, "Dump block: adata=1 cannot dump.\n"); + return; + } + bufp = b->bufp; + if (dev) { + if (dev->can_read()) { + bufp = b->buf + b->block_len; + } + } + unser_begin(b->buf, BLKHDR1_LENGTH); + unser_uint32(CheckSum); + unser_uint32(block_len); + unser_uint32(BlockNumber); + unser_bytes(Id, BLKHDR_ID_LENGTH); + ASSERT(unser_length(b->buf) == BLKHDR1_LENGTH); + Id[BLKHDR_ID_LENGTH] = 0; + if (Id[3] == '2') { + unser_uint32(VolSessionId); + unser_uint32(VolSessionTime); + bhl = BLKHDR2_LENGTH; + rhl = RECHDR2_LENGTH; + } else { + VolSessionId = VolSessionTime = 0; + bhl = BLKHDR1_LENGTH; + rhl = RECHDR1_LENGTH; + } + + if (block_len > 4000000 || block_len < BLKHDR_CS_LENGTH) { + Dmsg3(20, "Will not dump blocksize too %s %lu msg: %s\n", + (block_len < BLKHDR_CS_LENGTH)?"small":"big", + block_len, msg); + return; + } + + BlockCheckSum = bcrc32((uint8_t *)b->buf+BLKHDR_CS_LENGTH, + block_len-BLKHDR_CS_LENGTH); + Pmsg7(000, _("Dump block %s %p: adata=%d size=%d BlkNum=%d\n" +" Hdrcksum=%x cksum=%x\n"), + msg, b, b->adata, block_len, BlockNumber, CheckSum, BlockCheckSum); + p = b->buf + bhl; + while (p < bufp) { + unser_begin(p, WRITE_RECHDR_LENGTH); + if (rhl == RECHDR1_LENGTH) { + unser_uint32(VolSessionId); + unser_uint32(VolSessionTime); + } + unser_int32(FileIndex); + unser_int32(Stream); + unser_uint32(data_len); + if (Stream == STREAM_ADATA_BLOCK_HEADER) { + reclen = 0; + p += WRITE_ADATA_BLKHDR_LENGTH; + } else if (Stream == STREAM_ADATA_RECORD_HEADER || + Stream == -STREAM_ADATA_RECORD_HEADER) { + unser_uint32(reclen); + unser_int32(Stream); + p += WRITE_ADATA_RECHDR_LENGTH; + } else { + reclen = 0; + p += data_len + rhl; + } + Pmsg6(000, _(" Rec: VId=%u VT=%u FI=%s Strm=%s len=%d reclen=%d\n"), + VolSessionId, VolSessionTime, FI_to_ascii(buf1, FileIndex), + stream_to_ascii(buf2, Stream, FileIndex), data_len, reclen); + } +} + +/* + * Create a new block structure. + * We pass device so that the block can inherit the + * min and max block sizes. + */ +void DEVICE::new_dcr_blocks(DCR *dcr) +{ + dcr->block = dcr->ameta_block = new_block(dcr); +} + +DEV_BLOCK *DEVICE::new_block(DCR *dcr, int size) +{ + DEV_BLOCK *block = (DEV_BLOCK *)get_memory(sizeof(DEV_BLOCK)); + int len; + + memset(block, 0, sizeof(DEV_BLOCK)); + + /* If the user has specified a max_block_size, use it as the default */ + if (max_block_size == 0) { + len = DEFAULT_BLOCK_SIZE; + } else { + len = max_block_size; + } + block->dev = this; + /* special size */ + if (size) { + len = size; + } + block->buf_len = len; + block->buf = get_memory(block->buf_len); + block->rechdr_queue = get_memory(block->buf_len); + block->rechdr_items = 0; + Dmsg2(510, "Rechdr len=%d max_items=%d\n", sizeof_pool_memory(block->rechdr_queue), + sizeof_pool_memory(block->rechdr_queue)/WRITE_ADATA_RECHDR_LENGTH); + empty_block(block); + block->BlockVer = BLOCK_VER; /* default write version */ + Dmsg3(150, "New block adata=%d len=%d block=%p\n", block->adata, len, block); + return block; +} + + +/* + * Duplicate an existing block (eblock) + */ +DEV_BLOCK *dup_block(DEV_BLOCK *eblock) +{ + DEV_BLOCK *block = (DEV_BLOCK *)get_memory(sizeof(DEV_BLOCK)); + int buf_len = sizeof_pool_memory(eblock->buf); + int rechdr_len = sizeof_pool_memory(eblock->rechdr_queue); + + memcpy(block, eblock, sizeof(DEV_BLOCK)); + block->buf = get_memory(buf_len); + memcpy(block->buf, eblock->buf, buf_len); + + block->rechdr_queue = get_memory(rechdr_len); + memcpy(block->rechdr_queue, eblock->rechdr_queue, rechdr_len); + + /* bufp might point inside buf */ + if (eblock->bufp && + eblock->bufp >= eblock->buf && + eblock->bufp < (eblock->buf + buf_len)) + { + block->bufp = (eblock->bufp - eblock->buf) + block->buf; + + } else { + block->bufp = NULL; + } + return block; +} + +/* + * Flush block to disk + */ +bool DEVICE::flush_block(DCR *dcr) +{ + if (!is_block_empty(dcr->block)) { + Dmsg0(dbglvl, "=== wpath 53 flush_ameta\n"); + Dmsg4(190, "Call flush_ameta_block BlockAddr=%lld nbytes=%d adata=%d block=%x\n", + dcr->block->BlockAddr, dcr->block->binbuf, dcr->adata_block->adata, dcr->adata_block); + dump_block(dcr->dev, dcr->block, "Flush_ameta_block"); + if (dcr->jcr->is_canceled() || !dcr->write_block_to_device()) { + Dmsg0(dbglvl, "=== wpath 54 flush_ameta\n"); + Dmsg0(190, "Failed to write ameta block to device, return false.\n"); + return false; + } + empty_block(dcr->block); + } + return true; +} + + +/* + * Only the first block checksum error was reported. + * If there are more, report it now. + */ +void print_block_read_errors(JCR *jcr, DEV_BLOCK *block) +{ + if (block->read_errors > 1) { + Jmsg(jcr, M_ERROR, 0, _("%d block read errors not printed.\n"), + block->read_errors); + } +} + +void DEVICE::free_dcr_blocks(DCR *dcr) +{ + if (dcr->block == dcr->ameta_block) { + dcr->ameta_block = NULL; /* do not free twice */ + } + free_block(dcr->block); + dcr->block = NULL; + free_block(dcr->ameta_block); + dcr->ameta_block = NULL; +} + +/* + * Free block + */ +void free_block(DEV_BLOCK *block) +{ + if (block) { + Dmsg1(999, "free_block buffer=%p\n", block->buf); + if (block->buf) { + free_memory(block->buf); + } + if (block->rechdr_queue) { + free_memory(block->rechdr_queue); + } + Dmsg1(999, "=== free_block block %p\n", block); + free_memory((POOLMEM *)block); + } +} + +bool is_block_empty(DEV_BLOCK *block) +{ + if (block->adata) { + Dmsg1(200, "=== adata=1 binbuf=%d\n", block->binbuf); + return block->binbuf <= 0; + } else { + Dmsg1(200, "=== adata=0 binbuf=%d\n", block->binbuf-WRITE_BLKHDR_LENGTH); + return block->binbuf <= WRITE_BLKHDR_LENGTH; + } +} + +/* Empty the block -- for writing */ +void empty_block(DEV_BLOCK *block) +{ + if (block->adata) { + block->binbuf = 0; + } else { + block->binbuf = WRITE_BLKHDR_LENGTH; + } + Dmsg3(250, "empty_block: adata=%d len=%d set binbuf=%d\n", + block->adata, block->buf_len, block->binbuf); + block->bufp = block->buf + block->binbuf; + block->read_len = 0; + block->write_failed = false; + block->block_read = false; + block->needs_write = false; + block->FirstIndex = block->LastIndex = 0; + block->RecNum = 0; + block->BlockAddr = 0; +} + +/* + * Create block header just before write. The space + * in the buffer should have already been reserved by + * init_block. + */ +uint32_t ser_block_header(DEV_BLOCK *block, bool do_checksum) +{ + ser_declare; + uint32_t block_len = block->binbuf; + + block->CheckSum = 0; + if (block->adata) { + /* Checksum whole block */ + if (do_checksum) { + block->CheckSum = bcrc32((uint8_t *)block->buf, block_len); + } + } else { + Dmsg1(160, "block_header: block_len=%d\n", block_len); + ser_begin(block->buf, BLKHDR2_LENGTH); + ser_uint32(block->CheckSum); + ser_uint32(block_len); + ser_uint32(block->BlockNumber); + ser_bytes(WRITE_BLKHDR_ID, BLKHDR_ID_LENGTH); + if (BLOCK_VER >= 2) { + ser_uint32(block->VolSessionId); + ser_uint32(block->VolSessionTime); + } + + /* Checksum whole block except for the checksum */ + if (do_checksum) { + block->CheckSum = bcrc32((uint8_t *)block->buf+BLKHDR_CS_LENGTH, + block_len-BLKHDR_CS_LENGTH); + } + Dmsg2(160, "ser_block_header: adata=%d checksum=%x\n", block->adata, block->CheckSum); + ser_begin(block->buf, BLKHDR2_LENGTH); + ser_uint32(block->CheckSum); /* now add checksum to block header */ + } + return block->CheckSum; +} + +/* + * Unserialize the block header for reading block. + * This includes setting all the buffer pointers correctly. + * + * Returns: false on failure (not a block) + * true on success + */ +bool unser_block_header(DCR *dcr, DEVICE *dev, DEV_BLOCK *block) +{ + ser_declare; + char Id[BLKHDR_ID_LENGTH+1]; + uint32_t BlockCheckSum; + uint32_t block_len; + uint32_t block_end; + uint32_t BlockNumber; + JCR *jcr = dcr->jcr; + int bhl; + + if (block->adata) { + /* Checksum the whole block */ + if (block->block_len <= block->read_len && dev->do_checksum()) { + BlockCheckSum = bcrc32((uint8_t *)block->buf, block->block_len); + if (BlockCheckSum != block->CheckSum) { + dev->dev_errno = EIO; + Mmsg5(dev->errmsg, _("Volume data error at %lld!\n" + "Adata block checksum mismatch in block=%u len=%d: calc=%x blk=%x\n"), + block->BlockAddr, block->BlockNumber, + block->block_len, BlockCheckSum, block->CheckSum); + if (block->read_errors == 0 || verbose >= 2) { + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + dump_block(dev, block, "with checksum error"); + } + block->read_errors++; + if (!forge_on) { + return false; + } + } + } + return true; + } + + if (block->no_header) { + return true; + } + unser_begin(block->buf, BLKHDR_LENGTH); + unser_uint32(block->CheckSum); + unser_uint32(block_len); + unser_uint32(BlockNumber); + unser_bytes(Id, BLKHDR_ID_LENGTH); + ASSERT(unser_length(block->buf) == BLKHDR1_LENGTH); + Id[BLKHDR_ID_LENGTH] = 0; + + if (Id[3] == '1') { + bhl = BLKHDR1_LENGTH; + block->BlockVer = 1; + block->bufp = block->buf + bhl; + //Dmsg3(100, "Block=%p buf=%p bufp=%p\n", block, block->buf, block->bufp); + if (strncmp(Id, BLKHDR1_ID, BLKHDR_ID_LENGTH) != 0) { + dev->dev_errno = EIO; + Mmsg4(dev->errmsg, _("Volume data error at %u:%u! Wanted ID: \"%s\", got \"%s\". Buffer discarded.\n"), + dev->file, dev->block_num, BLKHDR1_ID, Id); + if (block->read_errors == 0 || verbose >= 2) { + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + } + block->read_errors++; + return false; + } + } else if (Id[3] == '2') { + unser_uint32(block->VolSessionId); + unser_uint32(block->VolSessionTime); + bhl = BLKHDR2_LENGTH; + block->BlockVer = 2; + block->bufp = block->buf + bhl; + //Dmsg5(100, "Read-blkhdr Block=%p adata=%d buf=%p bufp=%p off=%d\n", block, block->adata, + // block->buf, block->bufp, block->bufp-block->buf); + if (strncmp(Id, BLKHDR2_ID, BLKHDR_ID_LENGTH) != 0) { + dev->dev_errno = EIO; + Mmsg4(dev->errmsg, _("Volume data error at %u:%u! Wanted ID: \"%s\", got \"%s\". Buffer discarded.\n"), + dev->file, dev->block_num, BLKHDR2_ID, Id); + if (block->read_errors == 0 || verbose >= 2) { + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + } + block->read_errors++; + return false; + } + } else { + dev->dev_errno = EIO; + Mmsg4(dev->errmsg, _("Volume data error at %u:%u! Wanted ID: \"%s\", got \"%s\". Buffer discarded.\n"), + dev->file, dev->block_num, BLKHDR2_ID, Id); + Dmsg1(50, "%s", dev->errmsg); + if (block->read_errors == 0 || verbose >= 2) { + Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); + } + block->read_errors++; + unser_uint32(block->VolSessionId); + unser_uint32(block->VolSessionTime); + return false; + } + + /* Sanity check */ + if (block_len > MAX_BLOCK_SIZE) { + dev->dev_errno = EIO; + Mmsg3(dev->errmsg, _("Volume data error at %u:%u! Block length %u is insane (too large), probably due to a bad archive.\n"), + dev->file, dev->block_num, block_len); + if (block->read_errors == 0 || verbose >= 2) { + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + } + block->read_errors++; + return false; + } + + Dmsg1(390, "unser_block_header block_len=%d\n", block_len); + /* Find end of block or end of buffer whichever is smaller */ + if (block_len > block->read_len) { + block_end = block->read_len; + } else { + block_end = block_len; + } + block->binbuf = block_end - bhl; + Dmsg3(200, "set block=%p adata=%d binbuf=%d\n", block, block->adata, block->binbuf); + block->block_len = block_len; + block->BlockNumber = BlockNumber; + Dmsg3(390, "Read binbuf = %d %d block_len=%d\n", block->binbuf, + bhl, block_len); + if (block_len <= block->read_len && dev->do_checksum()) { + BlockCheckSum = bcrc32((uint8_t *)block->buf+BLKHDR_CS_LENGTH, + block_len-BLKHDR_CS_LENGTH); + + if (BlockCheckSum != block->CheckSum) { + dev->dev_errno = EIO; + Mmsg6(dev->errmsg, _("Volume data error at %u:%u!\n" + "Block checksum mismatch in block=%u len=%d: calc=%x blk=%x\n"), + dev->file, dev->block_num, (unsigned)BlockNumber, + block_len, BlockCheckSum, block->CheckSum); + if (block->read_errors == 0 || verbose >= 2) { + Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); + dump_block(dev, block, "with checksum error"); + } + block->read_errors++; + if (!forge_on) { + return false; + } + } + } + return true; +} + +/* + * Calculate how many bytes to write and then clear to end + * of block. + */ +uint32_t get_len_and_clear_block(DEV_BLOCK *block, DEVICE *dev, uint32_t &pad) +{ + uint32_t wlen; + /* + * Clear to the end of the buffer if it is not full, + * and on tape devices, apply min and fixed blocking. + */ + wlen = block->binbuf; + if (wlen != block->buf_len) { + Dmsg2(250, "binbuf=%d buf_len=%d\n", block->binbuf, block->buf_len); + + /* Adjust write size to min/max for tapes and aligned only */ + if (dev->is_tape() || block->adata) { + /* check for fixed block size */ + if (dev->min_block_size == dev->max_block_size) { + wlen = block->buf_len; /* fixed block size already rounded */ + /* Check for min block size */ + } else if (wlen < dev->min_block_size) { + wlen = ((dev->min_block_size + TAPE_BSIZE - 1) / TAPE_BSIZE) * TAPE_BSIZE; + /* Ensure size is rounded */ + } else { + wlen = ((wlen + TAPE_BSIZE - 1) / TAPE_BSIZE) * TAPE_BSIZE; + } + } + if (block->adata && dev->padding_size > 0) { + /* Write to next aligned boundry */ + wlen = ((wlen + dev->padding_size - 1) / dev->padding_size) * dev->padding_size; + } + ASSERT(wlen <= block->buf_len); + /* Clear from end of data to end of block */ + if (wlen-block->binbuf > 0) { + memset(block->bufp, 0, wlen-block->binbuf); /* clear garbage */ + } + pad = wlen - block->binbuf; /* padding or zeros written */ + Dmsg5(150, "Zero end blk: adata=%d cleared=%d buf_len=%d wlen=%d binbuf=%d\n", + block->adata, pad, block->buf_len, wlen, block->binbuf); + } else { + pad = 0; + } + + return wlen; /* bytes to write */ +} + +/* + * Determine if user defined volume size has been + * reached, and if so, return true, otherwise + * return false. + */ +bool is_user_volume_size_reached(DCR *dcr, bool quiet) +{ + bool hit_max1, hit_max2; + uint64_t size, max_size; + DEVICE *dev = dcr->ameta_dev; + char ed1[50]; + bool rtn = false; + + Enter(dbglvl); + if (dev->is_aligned()) { + /* Note, we reserve space for one ameta and one adata block */ + size = dev->VolCatInfo.VolCatBytes + dcr->ameta_block->buf_len + + dcr->adata_block->buf_len; + } else { + size = dev->VolCatInfo.VolCatBytes + dcr->ameta_block->binbuf; + } + /* Limit maximum Volume size to value specified by user */ + hit_max1 = (dev->max_volume_size > 0) && (size >= dev->max_volume_size); + hit_max2 = (dev->VolCatInfo.VolCatMaxBytes > 0) && + (size >= dev->VolCatInfo.VolCatMaxBytes); + if (hit_max1) { + max_size = dev->max_volume_size; + } else { + max_size = dev->VolCatInfo.VolCatMaxBytes; + } + if (hit_max1 || hit_max2) { + if (!quiet) { + Jmsg(dcr->jcr, M_INFO, 0, _("User defined maximum volume size %s will be exceeded on device %s.\n" + " Marking Volume \"%s\" as Full.\n"), + edit_uint64_with_commas(max_size, ed1), dev->print_name(), + dev->getVolCatName()); + } + Dmsg4(100, "Maximum volume size %s exceeded Vol=%s device=%s.\n" + "Marking Volume \"%s\" as Full.\n", + edit_uint64_with_commas(max_size, ed1), dev->getVolCatName(), + dev->print_name(), dev->getVolCatName()); + rtn = true; + } + Dmsg1(dbglvl, "Return from is_user_volume_size_reached=%d\n", rtn); + Leave(dbglvl); + return rtn; +} + + +void reread_last_block(DCR *dcr) +{ +#define CHECK_LAST_BLOCK +#ifdef CHECK_LAST_BLOCK + bool ok = true; + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + DEV_BLOCK *ameta_block = dcr->ameta_block; + DEV_BLOCK *adata_block = dcr->adata_block; + DEV_BLOCK *block = dcr->block; + /* + * If the device is a tape and it supports backspace record, + * we backspace over one or two eof marks depending on + * how many we just wrote, then over the last record, + * then re-read it and verify that the block number is + * correct. + */ + if (dev->is_tape() && dev->has_cap(CAP_BSR)) { + /* Now back up over what we wrote and read the last block */ + if (!dev->bsf(1)) { + berrno be; + ok = false; + Jmsg(jcr, M_ERROR, 0, _("Backspace file at EOT failed. ERR=%s\n"), + be.bstrerror(dev->dev_errno)); + } + if (ok && dev->has_cap(CAP_TWOEOF) && !dev->bsf(1)) { + berrno be; + ok = false; + Jmsg(jcr, M_ERROR, 0, _("Backspace file at EOT failed. ERR=%s\n"), + be.bstrerror(dev->dev_errno)); + } + /* Backspace over record */ + if (ok && !dev->bsr(1)) { + berrno be; + ok = false; + Jmsg(jcr, M_ERROR, 0, _("Backspace record at EOT failed. ERR=%s\n"), + be.bstrerror(dev->dev_errno)); + /* + * On FreeBSD systems, if the user got here, it is likely that his/her + * tape drive is "frozen". The correct thing to do is a + * rewind(), but if we do that, higher levels in cleaning up, will + * most likely write the EOS record over the beginning of the + * tape. The rewind *is* done later in mount.c when another + * tape is requested. Note, the clrerror() call in bsr() + * calls ioctl(MTCERRSTAT), which *should* fix the problem. + */ + } + if (ok) { + dev->new_dcr_blocks(dcr); + /* Note, this can destroy dev->errmsg */ + if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { + Jmsg(jcr, M_ERROR, 0, _("Re-read last block at EOT failed. ERR=%s"), + dev->errmsg); + } else { + /* + * If we wrote block and the block numbers don't agree + * we have a possible problem. + */ + if (dcr->block->BlockNumber != dev->LastBlock) { + if (dev->LastBlock > (dcr->block->BlockNumber + 1)) { + Jmsg(jcr, M_FATAL, 0, _( +"Re-read of last block: block numbers differ by more than one.\n" +"Probable tape misconfiguration and data loss. Read block=%u Want block=%u.\n"), + dcr->block->BlockNumber, dev->LastBlock); + } else { + Jmsg(jcr, M_ERROR, 0, _( +"Re-read of last block OK, but block numbers differ. Read block=%u Want block=%u.\n"), + dcr->block->BlockNumber, dev->LastBlock); + } + } else { + Jmsg(jcr, M_INFO, 0, _("Re-read of last block succeeded.\n")); + } + } + dev->free_dcr_blocks(dcr); + dcr->ameta_block = ameta_block; + dcr->block = block; + dcr->adata_block = adata_block; + } + } +#endif +} + +/* + * If this routine is called, we do our bookkeeping and + * then assure that the volume will not be written any + * more. + */ +bool terminate_writing_volume(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + bool ok = true; + bool was_adata = false; + + Enter(dbglvl); + + if (dev->is_ateot()) { + return ok; /* already been here return now */ + } + + /* Work with ameta device */ + if (dev->adata) { + dev->set_ateot(); /* no more writing this Volume */ + dcr->adata_block->write_failed = true; + dcr->set_ameta(); + dev = dcr->ameta_dev; + was_adata = true; + } + + /* Create a JobMedia record to indicated end of medium */ + dev->VolCatInfo.VolCatFiles = dev->get_file(); + dev->VolCatInfo.VolLastPartBytes = dev->part_size; + dev->VolCatInfo.VolCatParts = dev->part; + if (!dir_create_jobmedia_record(dcr)) { + Dmsg0(50, "Error from create JobMedia\n"); + dev->dev_errno = EIO; + Mmsg2(dev->errmsg, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), + dev->getVolCatName(), dcr->jcr->Job); + Jmsg(dcr->jcr, M_FATAL, 0, "%s", dev->errmsg); + ok = false; + } + flush_jobmedia_queue(dcr->jcr); + bstrncpy(dev->LoadedVolName, dev->VolCatInfo.VolCatName, sizeof(dev->LoadedVolName)); + dcr->block->write_failed = true; + if (dev->can_append() && !dev->weof(dcr, 1)) { /* end the tape */ + dev->VolCatInfo.VolCatErrors++; + Jmsg(dcr->jcr, M_ERROR, 0, _("Error writing final EOF to tape. Volume %s may not be readable.\n" + "%s"), dev->VolCatInfo.VolCatName, dev->errmsg); + ok = false; + Dmsg0(50, "Error writing final EOF to volume.\n"); + } + if (ok) { + ok = dev->end_of_volume(dcr); + } + + Dmsg3(100, "Set VolCatStatus Full adata=%d size=%lld vol=%s\n", dev->adata, + dev->VolCatInfo.VolCatBytes, dev->VolCatInfo.VolCatName); + + /* If still in append mode mark volume Full */ + if (bstrcmp(dev->VolCatInfo.VolCatStatus, "Append")) { + dev->setVolCatStatus("Full"); + } + + if (!dir_update_volume_info(dcr, false, true)) { + Mmsg(dev->errmsg, _("Error sending Volume info to Director.\n")); + ok = false; + Dmsg0(50, "Error updating volume info.\n"); + } + Dmsg2(150, "dir_update_volume_info vol=%s to terminate writing -- %s\n", + dev->getVolCatName(), ok?"OK":"ERROR"); + + dev->notify_newvol_in_attached_dcrs(NULL); + + /* Set new file/block parameters for current dcr */ + set_new_file_parameters(dcr); + + if (ok && dev->has_cap(CAP_TWOEOF) && dev->can_append() && !dev->weof(dcr, 1)) { /* end the tape */ + dev->VolCatInfo.VolCatErrors++; + /* This may not be fatal since we already wrote an EOF */ + if (dev->errmsg[0]) { + Jmsg(dcr->jcr, M_ERROR, 0, "%s", dev->errmsg); + } + Dmsg0(50, "Writing second EOF failed.\n"); + } + + dev->set_ateot(); /* no more writing this tape */ + Dmsg2(150, "Leave terminate_writing_volume=%s -- %s\n", + dev->getVolCatName(), ok?"OK":"ERROR"); + if (was_adata) { + dcr->set_adata(); + } + Leave(dbglvl); + return ok; +} + +/* + * If a new volume has been mounted since our last write + * Create a JobMedia record for the previous volume written, + * and set new parameters to write this volume + * The same applies for if we are in a new file. + */ +bool check_for_newvol_or_newfile(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + + if (dcr->NewVol || dcr->NewFile) { + if (job_canceled(jcr)) { + Dmsg0(100, "Canceled\n"); + return false; + } + /* If we wrote on Volume create a last jobmedia record for this job */ + if (!dcr->VolFirstIndex) { + Dmsg7(100, "Skip JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", + dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, + dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); + } + if (dcr->VolFirstIndex && !dir_create_jobmedia_record(dcr)) { + dcr->dev->dev_errno = EIO; + Jmsg2(jcr, M_FATAL, 0, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), + dcr->getVolCatName(), jcr->Job); + set_new_volume_parameters(dcr); + Dmsg0(100, "cannot create media record\n"); + return false; + } + if (dcr->NewVol) { + Dmsg0(250, "Process NewVol\n"); + flush_jobmedia_queue(jcr); + /* Note, setting a new volume also handles any pending new file */ + set_new_volume_parameters(dcr); + } else { + set_new_file_parameters(dcr); + } + } + return true; +} + +/* + * Do bookkeeping when a new file is created on a Volume. This is + * also done for disk files to generate the jobmedia records for + * quick seeking. + */ +bool do_new_file_bookkeeping(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + + /* Create a JobMedia record so restore can seek */ + if (!dir_create_jobmedia_record(dcr)) { + Dmsg0(40, "Error from create_job_media.\n"); + dev->dev_errno = EIO; + Jmsg2(jcr, M_FATAL, 0, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), + dcr->getVolCatName(), jcr->Job); + Dmsg0(40, "Call terminate_writing_volume\n"); + terminate_writing_volume(dcr); + dev->dev_errno = EIO; + return false; + } + dev->VolCatInfo.VolCatFiles = dev->get_file(); + dev->VolCatInfo.VolLastPartBytes = dev->part_size; + dev->VolCatInfo.VolCatParts = dev->part; + if (!dir_update_volume_info(dcr, false, false)) { + Dmsg0(50, "Error from update_vol_info.\n"); + Dmsg0(40, "Call terminate_writing_volume\n"); + terminate_writing_volume(dcr); + dev->dev_errno = EIO; + return false; + } + Dmsg0(100, "dir_update_volume_info max file size -- OK\n"); + + dev->notify_newfile_in_attached_dcrs(); + + /* Set new file/block parameters for current dcr */ + set_new_file_parameters(dcr); + return true; +} diff --git a/src/stored/bls.c b/src/stored/bls.c new file mode 100644 index 00000000..6a0c89bd --- /dev/null +++ b/src/stored/bls.c @@ -0,0 +1,484 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Dumb program to do an "ls" of a Bacula 1.0 mortal file. + * + * Kern Sibbald, MM + */ + +#include "bacula.h" +#include "stored.h" +#include "findlib/find.h" +#include "lib/cmd_parser.h" + +extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); + +static void do_blocks(char *infname); +static void do_jobs(char *infname); +static void do_ls(char *fname); +static void do_close(JCR *jcr); +static void get_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec); +static bool record_cb(DCR *dcr, DEV_RECORD *rec); + +static DEVICE *dev; +static DCR *dcr; +static bool dump_label = false; +static bool list_blocks = false; +static bool list_jobs = false; +static DEV_RECORD *rec; +static JCR *jcr; +static SESSION_LABEL sessrec; +static uint32_t num_files = 0; +static ATTR *attr; +static CONFIG *config; + +void *start_heap; +#define CONFIG_FILE "bacula-sd.conf" +char *configfile = NULL; +bool detect_errors = false; +int errors = 0; + +static FF_PKT *ff; + +static BSR *bsr = NULL; + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n\n" +"Usage: bls [options] \n" +" -b specify a bootstrap file\n" +" -c specify a Storage configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -e exclude list\n" +" -i include list\n" +" -j list jobs\n" +" -k list blocks\n" +" (no j or k option) list saved files\n" +" -L dump label\n" +" -p proceed inspite of errors\n" +" -V specify Volume names (separated by |)\n" +" -E Check records to detect errors\n" +" -v be verbose\n" +" -? print this message\n\n"), 2000, "", VERSION, BDATE); + exit(1); +} + + +int main (int argc, char *argv[]) +{ + int i, ch; + FILE *fd; + char line[1000]; + char *VolumeName= NULL; + char *bsrName = NULL; + bool ignore_label_errors = false; + BtoolsAskDirHandler askdir_handler; + + init_askdir_handler(&askdir_handler); + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + lmgr_init_thread(); + + working_directory = "/tmp"; + my_name_is(argc, argv, "bls"); + init_msg(NULL, NULL); /* initialize message handler */ + + OSDependentInit(); + + ff = init_find_files(); + + while ((ch = getopt(argc, argv, "b:c:d:e:i:jkLpvV:?EDF:")) != -1) { + switch (ch) { + case 'b': + bsrName = optarg; + break; + + case 'E': + detect_errors = true; + break; + + case 'c': /* specify config file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + char *p; + /* We probably find a tag list -d 10,sql,bvfs */ + if ((p = strchr(optarg, ',')) != NULL) { + *p = 0; + } + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + if (p) { + debug_parse_tags(p+1, &debug_level_tags); + } + } + break; + + case 'e': /* exclude list */ + if ((fd = bfopen(optarg, "rb")) == NULL) { + berrno be; + Pmsg2(0, _("Could not open exclude file: %s, ERR=%s\n"), + optarg, be.bstrerror()); + exit(1); + } + while (fgets(line, sizeof(line), fd) != NULL) { + strip_trailing_junk(line); + Dmsg1(100, "add_exclude %s\n", line); + add_fname_to_exclude_list(ff, line); + } + fclose(fd); + break; + + case 'i': /* include list */ + if ((fd = bfopen(optarg, "rb")) == NULL) { + berrno be; + Pmsg2(0, _("Could not open include file: %s, ERR=%s\n"), + optarg, be.bstrerror()); + exit(1); + } + while (fgets(line, sizeof(line), fd) != NULL) { + strip_trailing_junk(line); + Dmsg1(100, "add_include %s\n", line); + add_fname_to_include_list(ff, 0, line); + } + fclose(fd); + break; + + case 'j': + list_jobs = true; + break; + + case 'k': + list_blocks = true; + break; + + case 'L': + dump_label = true; + break; + + case 'p': + ignore_label_errors = true; + forge_on = true; + break; + + case 'v': + verbose++; + break; + + case 'V': /* Volume name */ + VolumeName = optarg; + break; + + case '?': + default: + usage(); + + } /* end switch */ + } /* end while */ + argc -= optind; + argv += optind; + + if (!argc) { + Pmsg0(0, _("No archive name specified\n")); + usage(); + } + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + config = New(CONFIG()); + parse_sd_config(config, configfile, M_ERROR_TERM); + setup_me(); + load_sd_plugins(me->plugin_directory); + + if (ff->included_files_list == NULL) { + add_fname_to_include_list(ff, 0, "/"); + } + + for (i=0; i < argc; i++) { + if (bsrName) { + bsr = parse_bsr(NULL, bsrName); + } + jcr = setup_jcr("bls", argv[i], bsr, VolumeName, SD_READ); + if (!jcr) { + exit(1); + } + jcr->ignore_label_errors = ignore_label_errors; + dev = jcr->dcr->dev; + if (!dev) { + exit(1); + } + dcr = jcr->dcr; + rec = new_record(); + attr = new_attr(jcr); + /* + * Assume that we have already read the volume label. + * If on second or subsequent volume, adjust buffer pointer + */ + if (dev->VolHdr.PrevVolumeName[0] != 0) { /* second volume */ + Pmsg1(0, _("\n" + "Warning, this Volume is a continuation of Volume %s\n"), + dev->VolHdr.PrevVolumeName); + } + + if (list_blocks) { + do_blocks(argv[i]); + } else if (list_jobs) { + do_jobs(argv[i]); + } else { + do_ls(argv[i]); + } + do_close(jcr); + } + if (bsr) { + free_bsr(bsr); + } + term_include_exclude_files(ff); + term_find_files(ff); + + if (detect_errors) { + return (errors > 0)? 1 : 0; + } + return 0; +} + +static void do_close(JCR *jcr) +{ + release_device(jcr->dcr); + free_attr(attr); + free_record(rec); + free_jcr(jcr); + dev->term(NULL); +} + + +/* List just block information */ +static void do_blocks(char *infname) +{ + DEV_BLOCK *block = dcr->block; + char buf1[100], buf2[100]; + for ( ;; ) { + if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { + Dmsg1(100, "!read_block(): ERR=%s\n", dev->print_errmsg()); + if (dev->at_eot()) { + if (!mount_next_read_volume(dcr)) { + Jmsg(jcr, M_INFO, 0, _("Got EOM at file %u on device %s, Volume \"%s\"\n"), + dev->file, dev->print_name(), dcr->VolumeName); + break; + } + /* Read and discard Volume label */ + DEV_RECORD *record; + record = new_record(); + dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK); + read_record_from_block(dcr, record); + get_session_record(dev, record, &sessrec); + free_record(record); + Jmsg(jcr, M_INFO, 0, _("Mounted Volume \"%s\".\n"), dcr->VolumeName); + } else if (dev->at_eof()) { + Jmsg(jcr, M_INFO, 0, _("End of file %u on device %s, Volume \"%s\"\n"), + dev->file, dev->print_name(), dcr->VolumeName); + Dmsg0(20, "read_record got eof. try again\n"); + continue; + } else if (dev->is_short_block()) { + Jmsg(jcr, M_INFO, 0, "%s", dev->print_errmsg()); + continue; + } else { + /* I/O error */ + errors++; + display_tape_error_status(jcr, dev); + break; + } + } + if (!match_bsr_block(bsr, block)) { + Dmsg5(100, "reject Blk=%u blen=%u bVer=%d SessId=%u SessTim=%u\n", + block->BlockNumber, block->block_len, block->BlockVer, + block->VolSessionId, block->VolSessionTime); + continue; + } + Dmsg5(100, "Blk=%u blen=%u bVer=%d SessId=%u SessTim=%u\n", + block->BlockNumber, block->block_len, block->BlockVer, + block->VolSessionId, block->VolSessionTime); + if (verbose == 1) { + read_record_from_block(dcr, rec); + Pmsg8(-1, "Addr=%llu blk_num=%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%s rlen=%d\n", + dev->get_full_addr(), + block->BlockNumber, block->block_len, + FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, rec->VolSessionTime, + stream_to_ascii_ex(buf2, rec->Stream, rec->FileIndex), rec->data_len); + rec->remainder = 0; + } else if (verbose > 1) { /* detailed block dump */ + Pmsg5(-1, "Blk=%u blen=%u bVer=%d SessId=%u SessTim=%u\n", + block->BlockNumber, block->block_len, block->BlockVer, + block->VolSessionId, block->VolSessionTime); + dump_block(dcr->dev, block, "", true); + } else { + printf("Block: %d size=%d\n", block->BlockNumber, block->block_len); + } + + } + return; +} + +/* + * We are only looking for labels or in particular Job Session records + */ +static bool jobs_cb(DCR *dcr, DEV_RECORD *rec) +{ + if (rec->FileIndex < 0) { + dump_label_record(dcr->dev, rec, verbose, detect_errors); + } + rec->remainder = 0; + return true; +} + +/* Do list job records */ +static void do_jobs(char *infname) +{ + if (!read_records(dcr, jobs_cb, mount_next_read_volume)) { + errors++; + } +} + +/* Do an ls type listing of an archive */ +static void do_ls(char *infname) +{ + if (dump_label) { + dev->dump_volume_label(); + return; + } + if (!read_records(dcr, record_cb, mount_next_read_volume)) { + errors++; + } + printf("%u files found.\n", num_files); +} + + +/* + * Called here for each record from read_records() + */ +static bool record_cb(DCR *dcr, DEV_RECORD *rec) +{ + if (verbose && rec->FileIndex < 0) { + dump_label_record(dcr->dev, rec, verbose, false); + return true; + } + + /* File Attributes stream */ + if (rec->maskedStream == STREAM_UNIX_ATTRIBUTES || + rec->maskedStream == STREAM_UNIX_ATTRIBUTES_EX) { + if (!unpack_attributes_record(jcr, rec->Stream, rec->data, rec->data_len, attr)) { + if (!forge_on) { + Emsg0(M_ERROR_TERM, 0, _("Cannot continue.\n")); + } else { + Emsg0(M_ERROR, 0, _("Attrib unpack error!\n")); + } + num_files++; + return true; + } + + attr->data_stream = decode_stat(attr->attr, &attr->statp, sizeof(attr->statp), &attr->LinkFI); + build_attr_output_fnames(jcr, attr); + + if (file_is_included(ff, attr->fname) && !file_is_excluded(ff, attr->fname)) { + if (verbose) { + Pmsg5(000, _("FileIndex=%d VolSessionId=%d VolSessionTime=%d Stream=%d DataLen=%d\n"), + rec->FileIndex, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); + } + print_ls_output(jcr, attr); + num_files++; + } + } else if (rec->maskedStream == STREAM_PLUGIN_NAME) { + char data[100]; + int len = MIN(rec->data_len+1, sizeof(data)); + bstrncpy(data, rec->data, len); + Dmsg1(100, "Plugin data: %s\n", data); + } else if (rec->maskedStream == STREAM_RESTORE_OBJECT) { + Dmsg0(100, "Restore Object record\n"); + } else if (rec->maskedStream == STREAM_ADATA_BLOCK_HEADER) { + Dmsg0(000, "Adata block header\n"); + } else if (rec->maskedStream == STREAM_ADATA_RECORD_HEADER) { + Dmsg0(000, "Adata record header\n"); + } + + return true; +} + + +static void get_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec) +{ + const char *rtype; + memset(sessrec, 0, sizeof(SESSION_LABEL)); + jcr->JobId = 0; + switch (rec->FileIndex) { + case PRE_LABEL: + rtype = _("Fresh Volume Label"); + break; + case VOL_LABEL: + rtype = _("Volume Label"); + unser_volume_label(dev, rec); + break; + case SOS_LABEL: + rtype = _("Begin Job Session"); + unser_session_label(sessrec, rec); + jcr->JobId = sessrec->JobId; + break; + case EOS_LABEL: + rtype = _("End Job Session"); + break; + case 0: + case EOM_LABEL: + rtype = _("End of Medium"); + break; + case EOT_LABEL: + rtype = _("End of Physical Medium"); + break; + case SOB_LABEL: + rtype = _("Start of object"); + break; + case EOB_LABEL: + rtype = _("End of object"); + break; + default: + rtype = _("Unknown"); + Dmsg1(10, "FI rtype=%d unknown\n", rec->FileIndex); + break; + } + Dmsg5(10, "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n", + rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); + if (verbose) { + Pmsg5(-1, _("%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n"), + rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); + } +} diff --git a/src/stored/bscan.c b/src/stored/bscan.c new file mode 100644 index 00000000..9815d879 --- /dev/null +++ b/src/stored/bscan.c @@ -0,0 +1,1369 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Program to scan a Bacula Volume and compare it with + * the catalog and optionally synchronize the catalog + * with the tape. + * + * Kern E. Sibbald, December 2001 + */ + +#include "bacula.h" +#include "stored.h" +#include "findlib/find.h" +#include "cats/cats.h" + +extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); + +/* Forward referenced functions */ +static void do_scan(void); +static bool record_cb(DCR *dcr, DEV_RECORD *rec); +static int create_file_attributes_record(BDB *db, JCR *mjcr, + char *fname, char *lname, int type, + char *ap, DEV_RECORD *rec); +static int create_media_record(BDB *db, MEDIA_DBR *mr, VOLUME_LABEL *vl); +static bool update_media_record(BDB *db, MEDIA_DBR *mr); +static int create_pool_record(BDB *db, POOL_DBR *pr); +static JCR *create_job_record(BDB *db, JOB_DBR *mr, SESSION_LABEL *label, DEV_RECORD *rec); +static int update_job_record(BDB *db, JOB_DBR *mr, SESSION_LABEL *elabel, + DEV_RECORD *rec); +static int create_client_record(BDB *db, CLIENT_DBR *cr); +static int create_fileset_record(BDB *db, FILESET_DBR *fsr); +static int create_jobmedia_record(BDB *db, JCR *jcr); +static JCR *create_jcr(JOB_DBR *jr, DEV_RECORD *rec, uint32_t JobId); +static int update_digest_record(BDB *db, char *digest, DEV_RECORD *rec, int type); + + +/* Local variables */ +static DEVICE *dev = NULL; +static BDB *db; +static JCR *bjcr; /* jcr for bscan */ +static BSR *bsr = NULL; +static MEDIA_DBR mr; +static POOL_DBR pr; +static JOB_DBR jr; +static CLIENT_DBR cr; +static FILESET_DBR fsr; +static ATTR_DBR ar; +static FILE_DBR fr; +static SESSION_LABEL label; +static SESSION_LABEL elabel; +static ATTR *attr; + +static time_t lasttime = 0; + +static const char *db_driver = "NULL"; +static const char *db_name = "bacula"; +static const char *db_user = "bacula"; +static const char *db_password = ""; +static const char *db_host = NULL; +static const char *db_ssl_mode = NULL; +static const char *db_ssl_key = NULL; +static const char *db_ssl_cert = NULL; +static const char *db_ssl_ca = NULL; +static const char *db_ssl_capath = NULL; +static const char *db_ssl_cipher = NULL; +static int db_port = 0; +static const char *wd = NULL; +static bool update_db = false; +static bool update_vol_info = false; +static bool list_records = false; +static int ignored_msgs = 0; + +static uint64_t currentVolumeSize; +static int last_pct = -1; +static bool showProgress = false; +static int num_jobs = 0; +static int num_pools = 0; +static int num_media = 0; +static int num_files = 0; + +static CONFIG *config; +#define CONFIG_FILE "bacula-sd.conf" + +void *start_heap; +char *configfile = NULL; + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n\n" +"Usage: bscan [ options ] \n" +" -b bootstrap specify a bootstrap file\n" +" -c specify configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -m update media info in database\n" +" -D specify the driver database name (default NULL)\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database password (default none)\n" +" -h specify database host (default NULL)\n" +" -t specify database port (default 0)\n" +" -p proceed inspite of I/O errors\n" +" -r list records\n" +" -s synchronize or store in database\n" +" -S show scan progress periodically\n" +" -v verbose\n" +" -V specify Volume names (separated by |)\n" +" -w specify working directory (default from conf file)\n" +" -? print this message\n\n"), + 2001, "", VERSION, BDATE); + exit(1); +} + +int main (int argc, char *argv[]) +{ + int ch; + struct stat stat_buf; + char *VolumeName = NULL; + BtoolsAskDirHandler askdir_handler; + + init_askdir_handler(&askdir_handler); + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + lmgr_init_thread(); + + my_name_is(argc, argv, "bscan"); + init_msg(NULL, NULL); + + OSDependentInit(); + + while ((ch = getopt(argc, argv, "b:c:d:D:h:o:k:e:a:p:mn:pP:rsSt:u:vV:w:?")) != -1) { + switch (ch) { + case 'S' : + showProgress = true; + break; + case 'b': + bsr = parse_bsr(NULL, optarg); + break; + + case 'c': /* specify config file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'D': + db_driver = optarg; + break; + + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 'h': + db_host = optarg; + break; + + case 'o': + db_ssl_mode = optarg; + break; + + case 'k': + db_ssl_key = optarg; + break; + + case 'e': + db_ssl_cert = optarg; + break; + + case 'a': + db_ssl_ca = optarg; + break; + + case 't': + db_port = atoi(optarg); + break; + + case 'm': + update_vol_info = true; + break; + + case 'n': + db_name = optarg; + break; + + case 'u': + db_user = optarg; + break; + + case 'P': + db_password = optarg; + break; + + case 'p': + forge_on = true; + break; + + case 'r': + list_records = true; + break; + + case 's': + update_db = true; + break; + + case 'v': + verbose++; + break; + + case 'V': /* Volume name */ + VolumeName = optarg; + break; + + case 'w': + wd = optarg; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (argc != 1) { + Pmsg0(0, _("Wrong number of arguments: \n")); + usage(); + } + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + config = New(CONFIG()); + parse_sd_config(config, configfile, M_ERROR_TERM); + setup_me(); + load_sd_plugins(me->plugin_directory); + + /* Check if -w option given, otherwise use resource for working directory */ + if (wd) { + working_directory = wd; + } else if (!me->working_directory) { + Emsg1(M_ERROR_TERM, 0, _("No Working Directory defined in %s. Cannot continue.\n"), + configfile); + } else { + working_directory = me->working_directory; + } + + /* Check that working directory is good */ + if (stat(working_directory, &stat_buf) != 0) { + Emsg1(M_ERROR_TERM, 0, _("Working Directory: %s not found. Cannot continue.\n"), + working_directory); + } + if (!S_ISDIR(stat_buf.st_mode)) { + Emsg1(M_ERROR_TERM, 0, _("Working Directory: %s is not a directory. Cannot continue.\n"), + working_directory); + } + + bjcr = setup_jcr("bscan", argv[0], bsr, VolumeName, SD_READ); + if (!bjcr) { + exit(1); + } + dev = bjcr->read_dcr->dev; + if (showProgress) { + char ed1[50]; + struct stat sb; + fstat(dev->fd(), &sb); + currentVolumeSize = sb.st_size; + Pmsg1(000, _("First Volume Size = %s\n"), + edit_uint64(currentVolumeSize, ed1)); + } + + db = db_init_database(NULL, db_driver, db_name, db_user, db_password, + db_host, db_port, NULL, + db_ssl_mode, db_ssl_key, + db_ssl_cert, db_ssl_ca, + db_ssl_capath, db_ssl_cipher, + false, false); + if (!db || !db_open_database(NULL, db)) { + Pmsg2(000, _("Could not open Catalog \"%s\", database \"%s\".\n"), + db_driver, db_name); + if (db) { + Jmsg(NULL, M_FATAL, 0, _("%s"), db_strerror(db)); + Pmsg1(000, "%s", db_strerror(db)); + db_close_database(NULL, db); + } + Jmsg(NULL, M_ERROR_TERM, 0, _("Could not open Catalog \"%s\", database \"%s\".\n"), + db_driver, db_name); + } + Dmsg0(200, "Database opened\n"); + if (verbose) { + Pmsg2(000, _("Using Database: %s, User: %s\n"), db_name, db_user); + } + + do_scan(); + if (update_db) { + printf("Records added or updated in the catalog:\n%7d Media\n%7d Pool\n%7d Job\n%7d File\n", + num_media, num_pools, num_jobs, num_files); + } else { + printf("Records would have been added or updated in the catalog:\n%7d Media\n%7d Pool\n%7d Job\n%7d File\n", + num_media, num_pools, num_jobs, num_files); + } + + free_jcr(bjcr); + dev->term(NULL); + return 0; +} + +/* + * We are at the end of reading a Volume. Now, we simulate handling + * the end of writing a Volume by wiffling through the attached + * jcrs creating jobmedia records. + */ +static bool bscan_mount_next_read_volume(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + DCR *mdcr; + Dmsg1(100, "Walk attached jcrs. Volume=%s\n", dev->getVolCatName()); + foreach_dlist(mdcr, dev->attached_dcrs) { + JCR *mjcr = mdcr->jcr; + Dmsg1(100, "========== JobId=%u ========\n", mjcr->JobId); + if (mjcr->JobId == 0) { + continue; + } + if (verbose) { + Pmsg1(000, _("Create JobMedia for Job %s\n"), mjcr->Job); + } + mdcr->StartAddr = dcr->StartAddr; + mdcr->EndAddr = dcr->EndAddr; + mdcr->VolMediaId = dcr->VolMediaId; + mjcr->read_dcr->VolLastIndex = dcr->VolLastIndex; + if( mjcr->bscan_insert_jobmedia_records ) { + if (!create_jobmedia_record(db, mjcr)) { + Pmsg2(000, _("Could not create JobMedia record for Volume=%s Job=%s\n"), + dev->getVolCatName(), mjcr->Job); + } + } + } + + update_media_record(db, &mr); + + /* Now let common read routine get up next tape. Note, + * we call mount_next... with bscan's jcr because that is where we + * have the Volume list, but we get attached. + */ + bool stat = mount_next_read_volume(dcr); + + if (showProgress) { + char ed1[50]; + struct stat sb; + fstat(dev->fd(), &sb); + currentVolumeSize = sb.st_size; + Pmsg1(000, _("First Volume Size = %s\n"), + edit_uint64(currentVolumeSize, ed1)); + } + return stat; +} + +static void do_scan() +{ + attr = new_attr(bjcr); + + bmemset(&ar, 0, sizeof(ar)); + bmemset(&pr, 0, sizeof(pr)); + bmemset(&jr, 0, sizeof(jr)); + bmemset(&cr, 0, sizeof(cr)); + bmemset(&fsr, 0, sizeof(fsr)); + bmemset(&fr, 0, sizeof(fr)); + + /* Detach bscan's jcr as we are not a real Job on the tape */ + + read_records(bjcr->read_dcr, record_cb, bscan_mount_next_read_volume); + + if (update_db) { + db_write_batch_file_records(bjcr); /* used by bulk batch file insert */ + } + free_attr(attr); +} + +/* + * Returns: true if OK + * false if error + */ +static bool record_cb(DCR *dcr, DEV_RECORD *rec) +{ + JCR *mjcr; + char ec1[30]; + DEVICE *dev = dcr->dev; + JCR *bjcr = dcr->jcr; + DEV_BLOCK *block = dcr->block; + POOL_MEM sql_buffer; + db_int64_ctx jmr_count; + + char digest[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)]; + + if (rec->data_len > 0) { + mr.VolBytes += rec->data_len + WRITE_RECHDR_LENGTH; /* Accumulate Volume bytes */ + if (showProgress && currentVolumeSize > 0) { + int pct = (mr.VolBytes * 100) / currentVolumeSize; + if (pct != last_pct) { + fprintf(stdout, _("done: %d%%\n"), pct); + fflush(stdout); + last_pct = pct; + } + } + } + + if (list_records) { + Pmsg5(000, _("Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n"), + rec->VolSessionId, rec->VolSessionTime, rec->FileIndex, + rec->Stream, rec->data_len); + } + /* + * Check for Start or End of Session Record + * + */ + if (rec->FileIndex < 0) { + bool save_update_db = update_db; + + if (verbose > 1) { + dump_label_record(dev, rec, 1, false); + } + switch (rec->FileIndex) { + case PRE_LABEL: + Pmsg0(000, _("Volume is prelabeled. This tape cannot be scanned.\n")); + return false; + break; + + case VOL_LABEL: + unser_volume_label(dev, rec); + /* Check Pool info */ + bstrncpy(pr.Name, dev->VolHdr.PoolName, sizeof(pr.Name)); + bstrncpy(pr.PoolType, dev->VolHdr.PoolType, sizeof(pr.PoolType)); + num_pools++; + if (db_get_pool_numvols(bjcr, db, &pr)) { + if (verbose) { + Pmsg1(000, _("Pool record for %s found in DB.\n"), pr.Name); + } + } else { + if (!update_db) { + Pmsg1(000, _("VOL_LABEL: Pool record not found for Pool: %s\n"), + pr.Name); + } + create_pool_record(db, &pr); + } + if (strcmp(pr.PoolType, dev->VolHdr.PoolType) != 0) { + Pmsg2(000, _("VOL_LABEL: PoolType mismatch. DB=%s Vol=%s\n"), + pr.PoolType, dev->VolHdr.PoolType); + return true; + } else if (verbose) { + Pmsg1(000, _("Pool type \"%s\" is OK.\n"), pr.PoolType); + } + + /* Check Media Info */ + bmemset(&mr, 0, sizeof(mr)); + bstrncpy(mr.VolumeName, dev->VolHdr.VolumeName, sizeof(mr.VolumeName)); + mr.PoolId = pr.PoolId; + num_media++; + if (db_get_media_record(bjcr, db, &mr)) { + if (verbose) { + Pmsg1(000, _("Media record for %s found in DB.\n"), mr.VolumeName); + } + /* Clear out some volume statistics that will be updated */ + mr.VolJobs = mr.VolFiles = mr.VolBlocks = 0; + mr.VolBytes = rec->data_len + 20; + } else { + if (!update_db) { + Pmsg1(000, _("VOL_LABEL: Media record not found for Volume: %s\n"), + mr.VolumeName); + } + bstrncpy(mr.MediaType, dev->VolHdr.MediaType, sizeof(mr.MediaType)); + create_media_record(db, &mr, &dev->VolHdr); + } + if (strcmp(mr.MediaType, dev->VolHdr.MediaType) != 0) { + Pmsg2(000, _("VOL_LABEL: MediaType mismatch. DB=%s Vol=%s\n"), + mr.MediaType, dev->VolHdr.MediaType); + return true; /* ignore error */ + } else if (verbose) { + Pmsg1(000, _("Media type \"%s\" is OK.\n"), mr.MediaType); + } + /* Reset some DCR variables */ + foreach_dlist(dcr, dev->attached_dcrs) { + dcr->VolFirstIndex = dcr->FileIndex = 0; + dcr->StartAddr = dcr->EndAddr = 0; + dcr->VolMediaId = 0; + } + + Pmsg1(000, _("VOL_LABEL: OK for Volume: %s\n"), mr.VolumeName); + break; + + case SOS_LABEL: + mr.VolJobs++; + num_jobs++; + if (ignored_msgs > 0) { + Pmsg1(000, _("%d \"errors\" ignored before first Start of Session record.\n"), + ignored_msgs); + ignored_msgs = 0; + } + unser_session_label(&label, rec); + bmemset(&jr, 0, sizeof(jr)); + bstrncpy(jr.Job, label.Job, sizeof(jr.Job)); + if (db_get_job_record(bjcr, db, &jr)) { + /* Job record already exists in DB */ + update_db = false; /* don't change db in create_job_record */ + if (verbose) { + Pmsg1(000, _("SOS_LABEL: Found Job record for JobId: %d\n"), jr.JobId); + } + } else { + /* Must create a Job record in DB */ + if (!update_db) { + Pmsg1(000, _("SOS_LABEL: Job record not found for JobId: %d\n"), + jr.JobId); + } + } + + /* Create Client record if not already there */ + bstrncpy(cr.Name, label.ClientName, sizeof(cr.Name)); + create_client_record(db, &cr); + jr.ClientId = cr.ClientId; + + /* process label, if Job record exists don't update db */ + mjcr = create_job_record(db, &jr, &label, rec); + dcr = mjcr->read_dcr; + update_db = save_update_db; + + jr.PoolId = pr.PoolId; + mjcr->start_time = jr.StartTime; + mjcr->setJobLevel(jr.JobLevel); + + mjcr->client_name = get_pool_memory(PM_FNAME); + pm_strcpy(mjcr->client_name, label.ClientName); + mjcr->fileset_name = get_pool_memory(PM_FNAME); + pm_strcpy(mjcr->fileset_name, label.FileSetName); + bstrncpy(dcr->pool_type, label.PoolType, sizeof(dcr->pool_type)); + bstrncpy(dcr->pool_name, label.PoolName, sizeof(dcr->pool_name)); + + /* Look for existing Job Media records for this job. If there are + any, no new ones need be created. This may occur if File + Retention has expired before Job Retention, or if the volume + has already been bscan'd */ + Mmsg(sql_buffer, "SELECT count(*) from JobMedia where JobId=%d", jr.JobId); + db_sql_query(db, sql_buffer.c_str(), db_int64_handler, &jmr_count); + if( jmr_count.value > 0 ) { + //FIELD NAME TO BE DEFINED/CONFIRMED (maybe a struct?) + mjcr->bscan_insert_jobmedia_records = false; + } else { + mjcr->bscan_insert_jobmedia_records = true; + } + + if (rec->VolSessionId != jr.VolSessionId) { + Pmsg3(000, _("SOS_LABEL: VolSessId mismatch for JobId=%u. DB=%d Vol=%d\n"), + jr.JobId, + jr.VolSessionId, rec->VolSessionId); + return true; /* ignore error */ + } + if (rec->VolSessionTime != jr.VolSessionTime) { + Pmsg3(000, _("SOS_LABEL: VolSessTime mismatch for JobId=%u. DB=%d Vol=%d\n"), + jr.JobId, + jr.VolSessionTime, rec->VolSessionTime); + return true; /* ignore error */ + } + if (jr.PoolId != pr.PoolId) { + Pmsg3(000, _("SOS_LABEL: PoolId mismatch for JobId=%u. DB=%d Vol=%d\n"), + jr.JobId, + jr.PoolId, pr.PoolId); + return true; /* ignore error */ + } + break; + + case EOS_LABEL: + unser_session_label(&elabel, rec); + + /* Create FileSet record */ + bstrncpy(fsr.FileSet, label.FileSetName, sizeof(fsr.FileSet)); + bstrncpy(fsr.MD5, label.FileSetMD5, sizeof(fsr.MD5)); + create_fileset_record(db, &fsr); + jr.FileSetId = fsr.FileSetId; + + mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); + if (!mjcr) { + Pmsg2(000, _("Could not find SessId=%d SessTime=%d for EOS record.\n"), + rec->VolSessionId, rec->VolSessionTime); + break; + } + + /* Do the final update to the Job record */ + update_job_record(db, &jr, &elabel, rec); + + mjcr->end_time = jr.EndTime; + mjcr->JobStatus = JS_Terminated; + + /* Create JobMedia record */ + mjcr->read_dcr->VolLastIndex = dcr->VolLastIndex; + if( mjcr->bscan_insert_jobmedia_records ) { + create_jobmedia_record(db, mjcr); + } + free_dcr(mjcr->read_dcr); + free_jcr(mjcr); + + break; + + case EOM_LABEL: + break; + + case EOT_LABEL: /* end of all tapes */ + /* + * Wiffle through all jobs still open and close + * them. + */ + if (update_db) { + DCR *mdcr; + foreach_dlist(mdcr, dev->attached_dcrs) { + JCR *mjcr = mdcr->jcr; + if (!mjcr || mjcr->JobId == 0) { + continue; + } + jr.JobId = mjcr->JobId; + /* Mark Job as Error Terimined */ + jr.JobStatus = JS_ErrorTerminated; + jr.JobFiles = mjcr->JobFiles; + jr.JobBytes = mjcr->JobBytes; + jr.VolSessionId = mjcr->VolSessionId; + jr.VolSessionTime = mjcr->VolSessionTime; + jr.JobTDate = (utime_t)mjcr->start_time; + jr.ClientId = mjcr->ClientId; + if (!db_update_job_end_record(bjcr, db, &jr)) { + Pmsg1(0, _("Could not update job record. ERR=%s\n"), db_strerror(db)); + } + mjcr->read_dcr = NULL; + free_jcr(mjcr); + } + } + mr.VolFiles = (uint32_t)(rec->Addr >> 32); + mr.VolBlocks = (uint32_t)rec->Addr; + mr.VolBytes += mr.VolBlocks * WRITE_BLKHDR_LENGTH; /* approx. */ + mr.VolMounts++; + update_media_record(db, &mr); + Pmsg3(0, _("End of all Volumes. VolFiles=%u VolBlocks=%u VolBytes=%s\n"), mr.VolFiles, + mr.VolBlocks, edit_uint64_with_commas(mr.VolBytes, ec1)); + break; + case STREAM_PLUGIN_NAME: + break; + + default: + break; + } /* end switch */ + return true; + } + + mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); + if (!mjcr) { + if (mr.VolJobs > 0) { + Pmsg2(000, _("Could not find Job for SessId=%d SessTime=%d record.\n"), + rec->VolSessionId, rec->VolSessionTime); + } else { + ignored_msgs++; + } + return true; + } + dcr = mjcr->read_dcr; + if (dcr->VolFirstIndex == 0) { + dcr->VolFirstIndex = block->FirstIndex; + } + + /* File Attributes stream */ + switch (rec->maskedStream) { + case STREAM_UNIX_ATTRIBUTES: + case STREAM_UNIX_ATTRIBUTES_EX: + + if (!unpack_attributes_record(bjcr, rec->Stream, rec->data, rec->data_len, attr)) { + Emsg0(M_ERROR_TERM, 0, _("Cannot continue.\n")); + } + + if (verbose > 1) { + decode_stat(attr->attr, &attr->statp, sizeof(attr->statp), &attr->LinkFI); + build_attr_output_fnames(bjcr, attr); + print_ls_output(bjcr, attr); + } + fr.JobId = mjcr->JobId; + fr.FileId = 0; + num_files++; + if (verbose && (num_files & 0x7FFF) == 0) { + char ed1[30], ed2[30], ed3[30]; + Pmsg3(000, _("%s file records. At addr=%s bytes=%s\n"), + edit_uint64_with_commas(num_files, ed1), + edit_uint64_with_commas(rec->Addr, ed2), + edit_uint64_with_commas(mr.VolBytes, ed3)); + } + create_file_attributes_record(db, mjcr, attr->fname, attr->lname, + attr->type, attr->attr, rec); + free_jcr(mjcr); + break; + + case STREAM_RESTORE_OBJECT: + /* ****FIXME*****/ + /* Implement putting into catalog */ + break; + + /* Data stream */ + case STREAM_WIN32_DATA: + case STREAM_FILE_DATA: + case STREAM_SPARSE_DATA: + case STREAM_MACOS_FORK_DATA: + case STREAM_ENCRYPTED_FILE_DATA: + case STREAM_ENCRYPTED_WIN32_DATA: + case STREAM_ENCRYPTED_MACOS_FORK_DATA: + /* + * For encrypted stream, this is an approximation. + * The data must be decrypted to know the correct length. + */ + mjcr->JobBytes += rec->data_len; + if (rec->maskedStream == STREAM_SPARSE_DATA) { + mjcr->JobBytes -= sizeof(uint64_t); + } + + free_jcr(mjcr); /* done using JCR */ + break; + + case STREAM_GZIP_DATA: + case STREAM_COMPRESSED_DATA: + case STREAM_ENCRYPTED_FILE_GZIP_DATA: + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + case STREAM_ENCRYPTED_WIN32_GZIP_DATA: + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: + /* No correct, we should (decrypt and) expand it + done using JCR + */ + mjcr->JobBytes += rec->data_len; + free_jcr(mjcr); + break; + + case STREAM_SPARSE_GZIP_DATA: + case STREAM_SPARSE_COMPRESSED_DATA: + mjcr->JobBytes += rec->data_len - sizeof(uint64_t); /* No correct, we should expand it */ + free_jcr(mjcr); /* done using JCR */ + break; + + /* Win32 GZIP stream */ + case STREAM_WIN32_GZIP_DATA: + case STREAM_WIN32_COMPRESSED_DATA: + mjcr->JobBytes += rec->data_len; + free_jcr(mjcr); /* done using JCR */ + break; + + case STREAM_MD5_DIGEST: + bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_MD5_SIZE, true); + if (verbose > 1) { + Pmsg1(000, _("Got MD5 record: %s\n"), digest); + } + update_digest_record(db, digest, rec, CRYPTO_DIGEST_MD5); + break; + + case STREAM_SHA1_DIGEST: + bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_SHA1_SIZE, true); + if (verbose > 1) { + Pmsg1(000, _("Got SHA1 record: %s\n"), digest); + } + update_digest_record(db, digest, rec, CRYPTO_DIGEST_SHA1); + break; + + case STREAM_SHA256_DIGEST: + bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_SHA256_SIZE, true); + if (verbose > 1) { + Pmsg1(000, _("Got SHA256 record: %s\n"), digest); + } + update_digest_record(db, digest, rec, CRYPTO_DIGEST_SHA256); + break; + + case STREAM_SHA512_DIGEST: + bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_SHA512_SIZE, true); + if (verbose > 1) { + Pmsg1(000, _("Got SHA512 record: %s\n"), digest); + } + update_digest_record(db, digest, rec, CRYPTO_DIGEST_SHA512); + break; + + case STREAM_ENCRYPTED_SESSION_DATA: + // TODO landonf: Investigate crypto support in bscan + if (verbose > 1) { + Pmsg0(000, _("Got signed digest record\n")); + } + break; + + case STREAM_SIGNED_DIGEST: + // TODO landonf: Investigate crypto support in bscan + if (verbose > 1) { + Pmsg0(000, _("Got signed digest record\n")); + } + break; + + case STREAM_PROGRAM_NAMES: + if (verbose) { + Pmsg1(000, _("Got Prog Names Stream: %s\n"), rec->data); + } + break; + + case STREAM_PROGRAM_DATA: + if (verbose > 1) { + Pmsg0(000, _("Got Prog Data Stream record.\n")); + } + break; + + case STREAM_UNIX_ACCESS_ACL: /* Deprecated Standard ACL attributes on UNIX */ + case STREAM_UNIX_DEFAULT_ACL: /* Deprecated Default ACL attributes on UNIX */ + case STREAM_HFSPLUS_ATTRIBUTES: + case STREAM_XACL_AIX_TEXT: + case STREAM_XACL_DARWIN_ACCESS: + case STREAM_XACL_FREEBSD_DEFAULT: + case STREAM_XACL_FREEBSD_ACCESS: + case STREAM_XACL_HPUX_ACL_ENTRY: + case STREAM_XACL_IRIX_DEFAULT: + case STREAM_XACL_IRIX_ACCESS: + case STREAM_XACL_LINUX_DEFAULT: + case STREAM_XACL_LINUX_ACCESS: + case STREAM_XACL_TRU64_DEFAULT: + case STREAM_XACL_TRU64_DEFAULT_DIR: + case STREAM_XACL_TRU64_ACCESS: + case STREAM_XACL_SOLARIS_POSIX: + case STREAM_XACL_SOLARIS_NFS4: + case STREAM_XACL_AFS_TEXT: + case STREAM_XACL_AIX_AIXC: + case STREAM_XACL_AIX_NFS4: + case STREAM_XACL_FREEBSD_NFS4: + case STREAM_XACL_HURD_DEFAULT: + case STREAM_XACL_HURD_ACCESS: + /* Ignore Unix ACL attributes */ + break; + + case STREAM_XACL_HURD_XATTR: + case STREAM_XACL_IRIX_XATTR: + case STREAM_XACL_TRU64_XATTR: + case STREAM_XACL_AIX_XATTR: + case STREAM_XACL_OPENBSD_XATTR: + case STREAM_XACL_SOLARIS_SYS_XATTR: + case STREAM_XACL_SOLARIS_XATTR: + case STREAM_XACL_DARWIN_XATTR: + case STREAM_XACL_FREEBSD_XATTR: + case STREAM_XACL_LINUX_XATTR: + case STREAM_XACL_NETBSD_XATTR: + /* Ignore Unix Extended attributes */ + break; + + default: + Pmsg2(0, _("Unknown stream type!!! stream=%d len=%i\n"), rec->Stream, rec->data_len); + break; + } + return true; +} + +/* + * Free the Job Control Record if no one is still using it. + * Called from main free_jcr() routine in src/lib/jcr.c so + * that we can do our Director specific cleanup of the jcr. + */ +static void bscan_free_jcr(JCR *jcr) +{ + Dmsg0(200, "Start bscan free_jcr\n"); + + free_bsock(jcr->file_bsock); + free_bsock(jcr->store_bsock); + if (jcr->RestoreBootstrap) { + bfree_and_null(jcr->RestoreBootstrap); + } + if (jcr->dcr) { + free_dcr(jcr->dcr); + jcr->dcr = NULL; + } + if (jcr->read_dcr) { + free_dcr(jcr->read_dcr); + jcr->read_dcr = NULL; + } + Dmsg0(200, "End bscan free_jcr\n"); +} + +/* + * We got a File Attributes record on the tape. Now, lookup the Job + * record, and then create the attributes record. + */ +static int create_file_attributes_record(BDB *db, JCR *mjcr, + char *fname, char *lname, int type, + char *ap, DEV_RECORD *rec) +{ + DCR *dcr = mjcr->read_dcr; + ar.fname = fname; + ar.link = lname; + ar.ClientId = mjcr->ClientId; + ar.JobId = mjcr->JobId; + ar.Stream = rec->Stream; + if (type == FT_DELETED) { + ar.FileIndex = 0; + } else { + ar.FileIndex = rec->FileIndex; + } + ar.attr = ap; + if (dcr->VolFirstIndex == 0) { + dcr->VolFirstIndex = rec->FileIndex; + } + dcr->FileIndex = rec->FileIndex; + mjcr->JobFiles++; + + if (!update_db) { + return 1; + } + + if (!db_create_file_attributes_record(bjcr, db, &ar)) { + Pmsg1(0, _("Could not create File Attributes record. ERR=%s\n"), db_strerror(db)); + return 0; + } + mjcr->FileId = ar.FileId; + + if (verbose > 1) { + Pmsg1(000, _("Created File record: %s\n"), fname); + } + return 1; +} + +/* + * For each Volume we see, we create a Medium record + */ +static int create_media_record(BDB *db, MEDIA_DBR *mr, VOLUME_LABEL *vl) +{ + struct date_time dt; + struct tm tm; + + /* We mark Vols as Archive to keep them from being re-written */ + bstrncpy(mr->VolStatus, "Archive", sizeof(mr->VolStatus)); + mr->VolRetention = 365 * 3600 * 24; /* 1 year */ + mr->Enabled = 1; + if (vl->VerNum >= 11) { + mr->set_first_written = true; /* Save FirstWritten during update_media */ + mr->FirstWritten = btime_to_utime(vl->write_btime); + mr->LabelDate = btime_to_utime(vl->label_btime); + } else { + /* DEPRECATED DO NOT USE */ + dt.julian_day_number = vl->write_date; + dt.julian_day_fraction = vl->write_time; + tm_decode(&dt, &tm); + mr->FirstWritten = mktime(&tm); + dt.julian_day_number = vl->label_date; + dt.julian_day_fraction = vl->label_time; + tm_decode(&dt, &tm); + mr->LabelDate = mktime(&tm); + } + lasttime = mr->LabelDate; + if (mr->VolJobs == 0) { + mr->VolJobs = 1; + } + if (mr->VolMounts == 0) { + mr->VolMounts = 1; + } + + if (!update_db) { + return 1; + } + + if (!db_create_media_record(bjcr, db, mr)) { + Pmsg1(0, _("Could not create media record. ERR=%s\n"), db_strerror(db)); + return 0; + } + if (!db_update_media_record(bjcr, db, mr)) { + Pmsg1(0, _("Could not update media record. ERR=%s\n"), db_strerror(db)); + return 0; + } + if (verbose) { + Pmsg1(000, _("Created Media record for Volume: %s\n"), mr->VolumeName); + } + return 1; + +} + +/* + * Called at end of media to update it + */ +static bool update_media_record(BDB *db, MEDIA_DBR *mr) +{ + if (!update_db && !update_vol_info) { + return true; + } + + mr->LastWritten = lasttime; + if (!db_update_media_record(bjcr, db, mr)) { + Pmsg1(0, _("Could not update media record. ERR=%s\n"), db_strerror(db)); + return false;; + } + if (verbose) { + Pmsg1(000, _("Updated Media record at end of Volume: %s\n"), mr->VolumeName); + } + return true; + +} + + +static int create_pool_record(BDB *db, POOL_DBR *pr) +{ + pr->NumVols++; + pr->UseCatalog = 1; + pr->VolRetention = 355 * 3600 * 24; /* 1 year */ + + if (!update_db) { + return 1; + } + if (!db_create_pool_record(bjcr, db, pr)) { + Pmsg1(0, _("Could not create pool record. ERR=%s\n"), db_strerror(db)); + return 0; + } + if (verbose) { + Pmsg1(000, _("Created Pool record for Pool: %s\n"), pr->Name); + } + return 1; + +} + + +/* + * Called from SOS to create a client for the current Job + */ +static int create_client_record(BDB *db, CLIENT_DBR *cr) +{ + /* + * Note, update_db can temporarily be set false while + * updating the database, so we must ensure that ClientId is non-zero. + */ + if (!update_db) { + cr->ClientId = 0; + if (!db_get_client_record(bjcr, db, cr)) { + Pmsg1(0, _("Could not get Client record. ERR=%s\n"), db_strerror(db)); + return 0; + } + return 1; + } + if (!db_create_client_record(bjcr, db, cr)) { + Pmsg1(0, _("Could not create Client record. ERR=%s\n"), db_strerror(db)); + return 0; + } + if (verbose) { + Pmsg1(000, _("Created Client record for Client: %s\n"), cr->Name); + } + return 1; +} + +static int create_fileset_record(BDB *db, FILESET_DBR *fsr) +{ + if (!update_db) { + return 1; + } + fsr->FileSetId = 0; + if (fsr->MD5[0] == 0) { + fsr->MD5[0] = ' '; /* Equivalent to nothing */ + fsr->MD5[1] = 0; + } + if (db_get_fileset_record(bjcr, db, fsr)) { + if (verbose) { + Pmsg1(000, _("Fileset \"%s\" already exists.\n"), fsr->FileSet); + } + } else { + if (!db_create_fileset_record(bjcr, db, fsr)) { + Pmsg2(0, _("Could not create FileSet record \"%s\". ERR=%s\n"), + fsr->FileSet, db_strerror(db)); + return 0; + } + if (verbose) { + Pmsg1(000, _("Created FileSet record \"%s\"\n"), fsr->FileSet); + } + } + return 1; +} + +/* + * Simulate the two calls on the database to create + * the Job record and to update it when the Job actually + * begins running. + */ +static JCR *create_job_record(BDB *db, JOB_DBR *jr, SESSION_LABEL *label, + DEV_RECORD *rec) +{ + JCR *mjcr; + struct date_time dt; + struct tm tm; + + jr->JobId = label->JobId; + jr->JobType = label->JobType; + jr->JobLevel = label->JobLevel; + jr->JobStatus = JS_Created; + bstrncpy(jr->Name, label->JobName, sizeof(jr->Name)); + bstrncpy(jr->Job, label->Job, sizeof(jr->Job)); + if (label->VerNum >= 11) { + jr->SchedTime = btime_to_unix(label->write_btime); + } else { + dt.julian_day_number = label->write_date; + dt.julian_day_fraction = label->write_time; + tm_decode(&dt, &tm); + jr->SchedTime = mktime(&tm); + } + + jr->StartTime = jr->SchedTime; + jr->JobTDate = (utime_t)jr->SchedTime; + jr->VolSessionId = rec->VolSessionId; + jr->VolSessionTime = rec->VolSessionTime; + + /* Now create a JCR as if starting the Job */ + mjcr = create_jcr(jr, rec, label->JobId); + + if (!update_db) { + return mjcr; + } + + /* This creates the bare essentials */ + if (!db_create_job_record(bjcr, db, jr)) { + Pmsg1(0, _("Could not create JobId record. ERR=%s\n"), db_strerror(db)); + return mjcr; + } + + /* This adds the client, StartTime, JobTDate, ... */ + if (!db_update_job_start_record(bjcr, db, jr)) { + Pmsg1(0, _("Could not update job start record. ERR=%s\n"), db_strerror(db)); + return mjcr; + } + Pmsg2(000, _("Created new JobId=%u record for original JobId=%u\n"), jr->JobId, + label->JobId); + mjcr->JobId = jr->JobId; /* set new JobId */ + return mjcr; +} + +/* + * Simulate the database call that updates the Job + * at Job termination time. + */ +static int update_job_record(BDB *db, JOB_DBR *jr, SESSION_LABEL *elabel, + DEV_RECORD *rec) +{ + struct date_time dt; + struct tm tm; + JCR *mjcr; + + mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); + if (!mjcr) { + Pmsg2(000, _("Could not find SessId=%d SessTime=%d for EOS record.\n"), + rec->VolSessionId, rec->VolSessionTime); + return 0; + } + if (elabel->VerNum >= 11) { + jr->EndTime = btime_to_unix(elabel->write_btime); + } else { + dt.julian_day_number = elabel->write_date; + dt.julian_day_fraction = elabel->write_time; + tm_decode(&dt, &tm); + jr->EndTime = mktime(&tm); + } + lasttime = jr->EndTime; + mjcr->end_time = jr->EndTime; + + jr->JobId = mjcr->JobId; + + /* The JobStatus can't be 0 */ + if (elabel->JobStatus == 0) { + Pmsg2(000, _("Could not find JobStatus for SessId=%d SessTime=%d in EOS record.\n"), + rec->VolSessionId, rec->VolSessionTime); + } + mjcr->JobStatus = jr->JobStatus = + elabel->JobStatus ? elabel->JobStatus : JS_ErrorTerminated; + + jr->JobFiles = elabel->JobFiles; + if (jr->JobFiles > 0) { /* If we found files, force PurgedFiles */ + jr->PurgedFiles = 0; + } + jr->JobBytes = elabel->JobBytes; + jr->VolSessionId = rec->VolSessionId; + jr->VolSessionTime = rec->VolSessionTime; + jr->JobTDate = (utime_t)mjcr->start_time; + jr->ClientId = mjcr->ClientId; + + if (!update_db) { + free_jcr(mjcr); + return 1; + } + + if (!db_update_job_end_record(bjcr, db, jr)) { + Pmsg2(0, _("Could not update JobId=%u record. ERR=%s\n"), jr->JobId, db_strerror(db)); + free_jcr(mjcr); + return 0; + } + if (verbose) { + Pmsg3(000, _("Updated Job termination record for JobId=%u Level=%s TermStat=%c\n"), + jr->JobId, job_level_to_str(mjcr->getJobLevel()), jr->JobStatus); + } + if (verbose > 1) { + const char *term_msg; + static char term_code[70]; + char sdt[50], edt[50]; + char ec1[30], ec2[30], ec3[30]; + + switch (mjcr->JobStatus) { + case JS_Terminated: + term_msg = _("Backup OK"); + break; + case JS_Warnings: + term_msg = _("Backup OK -- with warnings"); + break; + case JS_FatalError: + case JS_ErrorTerminated: + term_msg = _("*** Backup Error ***"); + break; + case JS_Canceled: + term_msg = _("Backup Canceled"); + break; + default: + term_msg = term_code; + sprintf(term_code, _("Job Termination code: %d"), mjcr->JobStatus); + break; + } + bstrftime(sdt, sizeof(sdt), mjcr->start_time); + bstrftime(edt, sizeof(edt), mjcr->end_time); + Pmsg14(000, _("%s\n" +"JobId: %d\n" +"Job: %s\n" +"FileSet: %s\n" +"Backup Level: %s\n" +"Client: %s\n" +"Start time: %s\n" +"End time: %s\n" +"Files Written: %s\n" +"Bytes Written: %s\n" +"Volume Session Id: %d\n" +"Volume Session Time: %d\n" +"Last Volume Bytes: %s\n" +"Termination: %s\n\n"), + edt, + mjcr->JobId, + mjcr->Job, + mjcr->fileset_name, + job_level_to_str(mjcr->getJobLevel()), + mjcr->client_name, + sdt, + edt, + edit_uint64_with_commas(mjcr->JobFiles, ec1), + edit_uint64_with_commas(mjcr->JobBytes, ec2), + mjcr->VolSessionId, + mjcr->VolSessionTime, + edit_uint64_with_commas(mr.VolBytes, ec3), + term_msg); + } + free_jcr(mjcr); + return 1; +} + +static int create_jobmedia_record(BDB *db, JCR *mjcr) +{ + JOBMEDIA_DBR jmr; + DCR *dcr = mjcr->read_dcr; + + dcr->EndAddr = dev->EndAddr; + dcr->VolMediaId = dev->VolCatInfo.VolMediaId; + + bmemset(&jmr, 0, sizeof(jmr)); + jmr.JobId = mjcr->JobId; + jmr.MediaId = mr.MediaId; + jmr.FirstIndex = dcr->VolFirstIndex; + jmr.LastIndex = dcr->VolLastIndex; + jmr.StartBlock = (uint32_t)dcr->StartAddr; + jmr.StartFile = (uint32_t)(dcr->StartAddr >> 32); + jmr.EndBlock = (uint32_t)dcr->EndAddr; + jmr.EndFile = (uint32_t)(dcr->EndAddr >> 32); + if (!update_db) { + return 1; + } + + if (!db_create_jobmedia_record(bjcr, db, &jmr)) { + Pmsg1(0, _("Could not create JobMedia record. ERR=%s\n"), db_strerror(db)); + return 0; + } + if (verbose) { + Pmsg2(000, _("Created JobMedia record JobId %d, MediaId %d\n"), + jmr.JobId, jmr.MediaId); + } + return 1; +} + +/* + * Simulate the database call that updates the MD5/SHA1 record + */ +static int update_digest_record(BDB *db, char *digest, DEV_RECORD *rec, int type) +{ + JCR *mjcr; + + mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); + if (!mjcr) { + if (mr.VolJobs > 0) { + Pmsg2(000, _("Could not find SessId=%d SessTime=%d for MD5/SHA1 record.\n"), + rec->VolSessionId, rec->VolSessionTime); + } else { + ignored_msgs++; + } + return 0; + } + + if (!update_db || mjcr->FileId == 0) { + free_jcr(mjcr); + return 1; + } + + if (!db_add_digest_to_file_record(bjcr, db, mjcr->FileId, digest, type)) { + Pmsg1(0, _("Could not add MD5/SHA1 to File record. ERR=%s\n"), db_strerror(db)); + free_jcr(mjcr); + return 0; + } + if (verbose > 1) { + Pmsg0(000, _("Updated MD5/SHA1 record\n")); + } + free_jcr(mjcr); + return 1; +} + + +/* + * Create a JCR as if we are really starting the job + */ +static JCR *create_jcr(JOB_DBR *jr, DEV_RECORD *rec, uint32_t JobId) +{ + JCR *jobjcr; + /* + * Transfer as much as possible to the Job JCR. Most important is + * the JobId and the ClientId. + */ + jobjcr = new_jcr(sizeof(JCR), bscan_free_jcr); + jobjcr->setJobType(jr->JobType); + jobjcr->setJobLevel(jr->JobLevel); + jobjcr->JobStatus = jr->JobStatus; + bstrncpy(jobjcr->Job, jr->Job, sizeof(jobjcr->Job)); + jobjcr->JobId = JobId; /* this is JobId on tape */ + jobjcr->sched_time = jr->SchedTime; + jobjcr->start_time = jr->StartTime; + jobjcr->VolSessionId = rec->VolSessionId; + jobjcr->VolSessionTime = rec->VolSessionTime; + jobjcr->ClientId = jr->ClientId; + jobjcr->dcr = jobjcr->read_dcr = new_dcr(jobjcr, NULL, dev, SD_READ); + + return jobjcr; +} diff --git a/src/stored/bsdjson.c b/src/stored/bsdjson.c new file mode 100644 index 00000000..27e3c176 --- /dev/null +++ b/src/stored/bsdjson.c @@ -0,0 +1,684 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula conf to json + * + * Kern Sibbald, MMXII + */ + +#include "bacula.h" +#include "stored.h" + +/* Imported functions */ +extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); + +/* Imported variables */ +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + extern URES res_all; +} +#else +extern URES res_all; +#endif +extern s_kw msg_types[]; +extern s_kw dev_types[]; +extern s_kw tapelabels[]; +extern s_kw cloud_drivers[]; +extern s_kw trunc_opts[]; +extern s_kw upload_opts[]; +extern s_kw proto_opts[]; +extern s_kw uri_opts[]; + +extern RES_TABLE resources[]; + +typedef struct +{ + bool do_list; + bool do_one; + bool do_only_data; + char *resource_type; + char *resource_name; + regex_t directive_reg; +} display_filter; + +/* Forward referenced functions */ +void terminate_stored(int sig); +static int check_resources(); +static void sendit(void *sock, const char *fmt, ...); +static void dump_json(display_filter *filter); + +#define CONFIG_FILE "bacula-sd.conf" /* Default config file */ + +/* Global variables exported */ +STORES *me = NULL; /* our Global resource */ + +char *configfile = NULL; + +/* Global static variables */ +static CONFIG *config; + + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n\n" +"Usage: bsdjson [options] [config_file]\n" +" -r get resource type \n" +" -n get resource \n" +" -l get only directives matching dirs (use with -r)\n" +" -D get only data\n" +" -c use as configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -t test - read config and exit\n" +" -v verbose user messages\n" +" -? print this message.\n" +"\n"), 2012, "", VERSION, BDATE); + + exit(1); +} + +/********************************************************************* + * + * Main Bacula Unix Storage Daemon + * + */ +#if defined(HAVE_WIN32) +#define main BaculaMain +#endif + +int main (int argc, char *argv[]) +{ + int ch; + bool test_config = false; + display_filter filter; + memset(&filter, 0, sizeof(filter)); + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + my_name_is(argc, argv, "bacula-sd"); + init_msg(NULL, NULL); + + while ((ch = getopt(argc, argv, "Dc:d:tv?r:n:l:")) != -1) { + switch (ch) { + case 'D': + filter.do_only_data = true; + break; + + case 'l': + filter.do_list = true; + /* Might use something like -l '^(Name|Description)$' */ + filter.do_list = true; + if (regcomp(&filter.directive_reg, optarg, REG_EXTENDED) != 0) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, + _("Please use valid -l argument: %s\n"), optarg); + } + break; + + case 'r': + filter.resource_type = optarg; + break; + + case 'n': + filter.resource_name = optarg; + break; + + case 'c': /* configuration file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 't': + test_config = true; + break; + + case 'v': /* verbose */ + verbose++; + break; + + case '?': + default: + usage(); + break; + } + } + argc -= optind; + argv += optind; + + if (argc) { + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(*argv); + argc--; + argv++; + } + + if (argc) { + usage(); + } + + if (filter.do_list && !filter.resource_type) { + usage(); + } + + if (filter.resource_type && filter.resource_name) { + filter.do_one = true; + } + + if (configfile == NULL || configfile[0] == 0) { + configfile = bstrdup(CONFIG_FILE); + } + + if (test_config && verbose > 0) { + char buf[1024]; + find_config_file(configfile, buf, sizeof(buf)); + sendit(NULL, "config_file=%s\n", buf); + } + + config = New(CONFIG()); + config->encode_password(false); + parse_sd_config(config, configfile, M_ERROR_TERM); + + if (!check_resources()) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); + } + + if (test_config) { + terminate_stored(0); + } + + my_name_is(0, (char **)NULL, me->hdr.name); /* Set our real name */ + + dump_json(&filter); + + if (filter.do_list) { + regfree(&filter.directive_reg); + } + + terminate_stored(0); +} + +static void display_devtype(HPKT &hpkt) +{ + int i; + for (i=0; dev_types[i].name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == dev_types[i].token) { + sendit(NULL, "\n \"%s\": \"%s\"", hpkt.ritem->name, + dev_types[i].name); + return; + } + } +} + +static void display_label(HPKT &hpkt) +{ + int i; + for (i=0; tapelabels[i].name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == tapelabels[i].token) { + sendit(NULL, "\n \"%s\": \"%s\"", hpkt.ritem->name, + tapelabels[i].name); + return; + } + } +} + +static void display_cloud_driver(HPKT &hpkt) +{ + int i; + for (i=0; cloud_drivers[i].name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == cloud_drivers[i].token) { + sendit(NULL, "\n \"%s\": \"%s\"", hpkt.ritem->name, + cloud_drivers[i].name); + return; + } + } +} + +static void display_protocol(HPKT &hpkt) +{ + int i; + for (i=0; proto_opts[i].name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == proto_opts[i].token) { + sendit(NULL, "\n \"%s\": \"%s\"", hpkt.ritem->name, + proto_opts[i].name); + return; + } + } +} + +static void display_truncate_cache(HPKT &hpkt) +{ + int i; + for (i=0; trunc_opts[i].name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == trunc_opts[i].token) { + sendit(NULL, "\n \"%s\": \"%s\"", hpkt.ritem->name, + trunc_opts[i].name); + return; + } + } +} + +static void display_uri_style(HPKT &hpkt) +{ + int i; + for (i=0; uri_opts[i].name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == uri_opts[i].token) { + sendit(NULL, "\n \"%s\": \"%s\"", hpkt.ritem->name, + uri_opts[i].name); + return; + } + } +} + +static void display_upload(HPKT &hpkt) +{ + int i; + for (i=0; upload_opts[i].name; i++) { + if (*(int32_t *)(hpkt.ritem->value) == upload_opts[i].token) { + sendit(NULL, "\n \"%s\": \"%s\"", hpkt.ritem->name, + upload_opts[i].name); + return; + } + } +} + +/* + * Dump out all resources in json format. + * Note!!!! This routine must be in this file rather + * than in src/lib/parser_conf.c otherwise the pointers + * will be all messed up. + */ +static void dump_json(display_filter *filter) +{ + int resinx, item, directives, first_directive; + bool first_res; + RES_ITEM *items; + RES *res; + HPKT hpkt; + regmatch_t pmatch[32]; + STORES *me = (STORES *)GetNextRes(R_STORAGE, NULL); + + if (init_crypto() != 0) { + Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); + } + + init_hpkt(hpkt); + + if (filter->do_only_data) { + sendit(NULL, "["); + + /* List resources and directives */ + /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } + * or print a single item + */ + } else if (filter->do_one || filter->do_list) { + sendit(NULL, "{"); + + } else { + /* [ { "Device": { "Name": "aa",.. } }, { "Director": { "Name": "bb", ... } } ]*/ + sendit(NULL, "["); + } + + first_res = true; + /* Loop over all resource types */ + for (resinx=0; resources[resinx].name; resinx++) { + if (!resources[resinx].items) { + continue; /* skip dummy entries */ + } + + /* Skip this resource type */ + if (filter->resource_type && + strcasecmp(filter->resource_type, resources[resinx].name) != 0) { + continue; + } + + directives = 0; + /* Loop over all resources of this type */ + foreach_rblist(res, res_head[resinx]->res_list) { + hpkt.res = res; + items = resources[resinx].items; + if (!items) { + continue; + } + + /* Copy the resource into res_all */ + memcpy(&res_all, res, sizeof(res_all)); + + if (filter->resource_name) { + bool skip=true; + /* The Name should be at the first place, so this is not a real loop */ + for (item=0; items[item].name; item++) { + if (strcasecmp(items[item].name, "Name") == 0) { + if (strcasecmp(*(items[item].value), filter->resource_name) == 0) { + skip = false; + } + break; + } + } + if (skip) { /* The name doesn't match, so skip it */ + continue; + } + } + + if (first_res) { + sendit(NULL, "\n"); + } else { + sendit(NULL, ",\n"); + } + + if (filter->do_only_data) { + sendit(NULL, " {"); + + } else if (filter->do_one) { + /* Nothing to print */ + + /* When sending the list, the form is: + * { aa: { Name: aa, Description: aadesc...}, bb: { Name: bb + */ + } else if (filter->do_list) { + /* Search and display Name, should be the first item */ + for (item=0; items[item].name; item++) { + if (strcmp(items[item].name, "Name") == 0) { + sendit(NULL, "%s: {\n", quote_string(hpkt.edbuf2, *items[item].value)); + break; + } + } + } else { + /* Begin new resource */ + sendit(NULL, "{\n \"%s\": {", resources[resinx].name); + } + + first_res = false; + first_directive = 0; + directives = 0; + for (item=0; items[item].name; item++) { + /* Check user argument -l */ + if (filter->do_list && + regexec(&filter->directive_reg, + items[item].name, 32, pmatch, 0) != 0) + { + continue; + } + + hpkt.ritem = &items[item]; + if (bit_is_set(item, res_all.hdr.item_present)) { + if (first_directive++ > 0) printf(","); + if (display_global_item(hpkt)) { + /* Fall-through wanted */ + } else if (items[item].handler == store_maxblocksize) { + display_int32_pair(hpkt); + } else if (items[item].handler == store_devtype) { + display_devtype(hpkt); + } else if (items[item].handler == store_label) { + display_label(hpkt); + } else if (items[item].handler == store_cloud_driver) { + display_cloud_driver(hpkt); + } else if (items[item].handler == store_protocol) { + display_protocol(hpkt); + } else if (items[item].handler == store_uri_style) { + display_uri_style(hpkt); + } else if (items[item].handler == store_truncate) { + display_truncate_cache(hpkt); + } else if (items[item].handler == store_upload) { + display_upload(hpkt); + } else { + printf("\n \"%s\": \"null\"", items[item].name); + } + directives++; + } else { /* end if is present */ + /* For some directive, the bitmap is not set (like addresses) */ + if (me && strcmp(resources[resinx].name, "Storage") == 0) { + if (strcmp(items[item].name, "SdPort") == 0) { + if (get_first_port_host_order(me->sdaddrs) != items[item].default_value) { + if (first_directive++ > 0) sendit(NULL, ","); + sendit(NULL, "\n \"SdPort\": %d", + get_first_port_host_order(me->sdaddrs)); + } + } else if (me && strcmp(items[item].name, "SdAddress") == 0) { + char buf[500]; + get_first_address(me->sdaddrs, buf, sizeof(buf)); + if (strcmp(buf, "0.0.0.0") != 0) { + if (first_directive++ > 0) sendit(NULL, ","); + sendit(NULL, "\n \"SdAddress\": \"%s\"", buf); + } + } + } + } + if (items[item].flags & ITEM_LAST) { + display_last(hpkt); /* If last bit set always call to cleanup */ + } + } + + /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } */ + if (filter->do_only_data || filter->do_list) { + sendit(NULL, "\n }"); /* Finish the Resource with a single } */ + + } else { + if (filter->do_one) { + /* don't print anything */ + + } else if (first_directive > 0) { + sendit(NULL, "\n }\n}"); /* end of resource */ + + } else { + sendit(NULL, "}\n }"); + } + } + + } /* End loop over all resources of this type */ + } /* End loop all resource types */ + + if (filter->do_only_data) { + sendit(NULL, "\n]\n"); + + /* In list context, we are dealing with a hash */ + } else if (filter->do_one || filter->do_list) { + sendit(NULL, "\n}\n"); + + } else { + sendit(NULL, "\n]\n"); + } + term_hpkt(hpkt); +} + + +/* Check Configuration file for necessary info */ +static int check_resources() +{ + bool OK = true; + bool tls_needed; + AUTOCHANGER *changer; + DEVRES *device; + + me = (STORES *)GetNextRes(R_STORAGE, NULL); + if (!me) { + Jmsg1(NULL, M_ERROR, 0, _("No Storage resource defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + + if (GetNextRes(R_STORAGE, (RES *)me) != NULL) { + Jmsg1(NULL, M_ERROR, 0, _("Only one Storage resource permitted in %s\n"), + configfile); + OK = false; + } + if (GetNextRes(R_DIRECTOR, NULL) == NULL) { + Jmsg1(NULL, M_ERROR, 0, _("No Director resource defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + if (GetNextRes(R_DEVICE, NULL) == NULL){ + Jmsg1(NULL, M_ERROR, 0, _("No Device resource defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + + if (!me->messages) { + me->messages = (MSGS *)GetNextRes(R_MSGS, NULL); + if (!me->messages) { + Jmsg1(NULL, M_ERROR, 0, _("No Messages resource defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + } + + if (!me->working_directory) { + Jmsg1(NULL, M_ERROR, 0, _("No Working Directory defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + + DIRRES *director; + STORES *store; + foreach_res(store, R_STORAGE) { + /* tls_require implies tls_enable */ + if (store->tls_require) { + if (have_tls) { + store->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + + tls_needed = store->tls_enable || store->tls_authenticate; + + if (!store->tls_certfile && tls_needed) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n"), + store->hdr.name, configfile); + OK = false; + } + + if (!store->tls_keyfile && tls_needed) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Storage \"%s\" in %s.\n"), + store->hdr.name, configfile); + OK = false; + } + + if ((!store->tls_ca_certfile && !store->tls_ca_certdir) && tls_needed && store->tls_verify_peer) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Storage \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + store->hdr.name, configfile); + OK = false; + } + } + + foreach_res(director, R_DIRECTOR) { + /* tls_require implies tls_enable */ + if (director->tls_require) { + director->tls_enable = true; + } + + tls_needed = director->tls_enable || director->tls_authenticate; + + if (!director->tls_certfile && tls_needed) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + + if (!director->tls_keyfile && tls_needed) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + + if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed && director->tls_verify_peer) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + director->hdr.name, configfile); + OK = false; + } + } + + foreach_res(changer, R_AUTOCHANGER) { + foreach_alist(device, changer->device) { + device->cap_bits |= CAP_AUTOCHANGER; + } + } + + return OK; +} + +/* Clean up and then exit */ +void terminate_stored(int sig) +{ + static bool in_here = false; + + if (in_here) { /* prevent loops */ + bmicrosleep(2, 0); /* yield */ + exit(1); + } + in_here = true; + debug_level = 0; /* turn off any debug */ + + if (configfile) { + free(configfile); + configfile = NULL; + } + if (config) { + delete config; + config = NULL; + } + + if (debug_level > 10) { + print_memory_pool_stats(); + } + term_msg(); + free(res_head); + res_head = NULL; + close_memory_pool(); + + //sm_dump(false); /* dump orphaned buffers */ + exit(sig); +} + +static void sendit(void *sock, const char *fmt, ...) +{ + char buf[3000]; + va_list arg_ptr; + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); + va_end(arg_ptr); + fputs(buf, stdout); + fflush(stdout); +} diff --git a/src/stored/bsr.h b/src/stored/bsr.h new file mode 100644 index 00000000..344f8e09 --- /dev/null +++ b/src/stored/bsr.h @@ -0,0 +1,173 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * BootStrap record definition -- for restoring files. + * + * Kern Sibbald, June 2002 + * + */ + + +#ifndef __BSR_H +#define __BSR_H 1 + +#ifndef HAVE_REGEX_H +#include "lib/bregex.h" +#else +#include +#endif + +/* + * List of Volume names to be read by Storage daemon. + * Formed by Storage daemon from BSR + */ +struct VOL_LIST { + VOL_LIST *next; + char VolumeName[MAX_NAME_LENGTH]; + char MediaType[MAX_NAME_LENGTH]; + char device[MAX_NAME_LENGTH]; /* ***FIXME*** use alist here */ + int Slot; + uint32_t start_file; +}; + + +/* + * !!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!! + * !!! !!! + * !!! All records must have a pointer to !!! + * !!! the next item as the first item defined. !!! + * !!! !!! + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + */ + +struct BSR_VOLUME { + BSR_VOLUME *next; + char VolumeName[MAX_NAME_LENGTH]; + char MediaType[MAX_NAME_LENGTH]; + char device[MAX_NAME_LENGTH]; /* ***FIXME*** use alist here */ + int32_t Slot; /* Slot */ +}; + +struct BSR_CLIENT { + BSR_CLIENT *next; + char ClientName[MAX_NAME_LENGTH]; +}; + +struct BSR_SESSID { + BSR_SESSID *next; + uint32_t sessid; + uint32_t sessid2; +}; + +struct BSR_SESSTIME { + BSR_SESSTIME *next; + uint32_t sesstime; + bool done; /* local done */ +}; + +struct BSR_VOLFILE { + BSR_VOLFILE *next; + uint32_t sfile; /* start file */ + uint32_t efile; /* end file */ + bool done; /* local done */ +}; + +struct BSR_VOLBLOCK { + BSR_VOLBLOCK *next; + uint32_t sblock; /* start block */ + uint32_t eblock; /* end block */ + bool done; /* local done */ +}; + +struct BSR_VOLADDR { + BSR_VOLADDR *next; + uint64_t saddr; /* start address */ + uint64_t eaddr; /* end address */ + bool done; /* local done */ +}; + +struct BSR_FINDEX { + BSR_FINDEX *next; + int32_t findex; /* start file index */ + int32_t findex2; /* end file index */ + bool done; /* local done */ +}; + +struct BSR_JOBID { + BSR_JOBID *next; + uint32_t JobId; + uint32_t JobId2; +}; + +struct BSR_JOBTYPE { + BSR_JOBTYPE *next; + uint32_t JobType; +}; + +struct BSR_JOBLEVEL { + BSR_JOBLEVEL *next; + uint32_t JobLevel; +}; + +struct BSR_JOB { + BSR_JOB *next; + char Job[MAX_NAME_LENGTH]; + bool done; /* local done */ +}; + +struct BSR_STREAM { + BSR_STREAM *next; + int32_t stream; /* stream desired */ +}; + +struct BSR { + /* NOTE!!! next must be the first item */ + BSR *next; /* pointer to next one */ + BSR *prev; /* pointer to previous one */ + BSR *root; /* root bsr */ + BSR *cur_bsr; + bool reposition; /* set when any bsr is marked done */ + bool mount_next_volume; /* set when next volume should be mounted */ + bool done; /* set when everything found for this bsr */ + bool use_fast_rejection; /* set if fast rejection can be used */ + bool use_positioning; /* set if we can position the archive */ + bool skip_file; /* skip all records for current file */ + BSR_VOLUME *volume; + uint32_t count; /* count of files to restore this bsr */ + uint32_t found; /* count of restored files this bsr */ + int32_t LastFI; /* LastFI seen by this bsr */ + BSR_VOLFILE *volfile; + BSR_VOLBLOCK *volblock; + BSR_VOLADDR *voladdr; + BSR_SESSTIME *sesstime; + BSR_SESSID *sessid; + BSR_JOBID *JobId; + BSR_JOB *job; + BSR_CLIENT *client; + BSR_FINDEX *FileIndex; + BSR_JOBTYPE *JobType; + BSR_JOBLEVEL *JobLevel; + BSR_STREAM *stream; + char *fileregex; /* set if restore is filtered on filename */ + regex_t *fileregex_re; + ATTR *attr; /* scratch space for unpacking */ +}; + + +#endif diff --git a/src/stored/btape.c b/src/stored/btape.c new file mode 100644 index 00000000..69a41dca --- /dev/null +++ b/src/stored/btape.c @@ -0,0 +1,3132 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Bacula Tape manipulation program + * + * Has various tape manipulation commands -- mostly for + * use in determining how tapes really work. + * + * Kern Sibbald, April MM + * + * Note, this program reads stored.conf, and will only + * talk to devices that are configured. + */ + +#include "bacula.h" +#include "stored.h" + +#ifdef USE_VTAPE +#include "vtape_dev.h" +#endif + +extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); + +/* External subroutines */ +extern void free_config_resources(); + +/* Exported variables */ +void *start_heap; +int quit = 0; +char buf[100000]; +int bsize = TAPE_BSIZE; +char VolName[MAX_NAME_LENGTH]; + +/* + * If you change the format of the state file, + * increment this value + */ +static uint32_t btape_state_level = 2; + +DEVICE *dev = NULL; +DCR *dcr; +DEVRES *device = NULL; +int exit_code = 0; + +#define REC_SIZE 32768 + +/* Forward referenced subroutines */ +static void do_tape_cmds(); +static void helpcmd(); +static void scancmd(); +static void rewindcmd(); +static void clearcmd(); +static void wrcmd(); +static void rrcmd(); +static void rbcmd(); +static void eodcmd(); +static void fillcmd(); +static void qfillcmd(); +static void statcmd(); +static void unfillcmd(); +static int flush_block(DEV_BLOCK *block, int dump); +static bool quickie_cb(DCR *dcr, DEV_RECORD *rec); +static bool compare_blocks(DEV_BLOCK *last_block, DEV_BLOCK *block); +static bool my_mount_next_read_volume(DCR *dcr); +static void scan_blocks(); +static void set_volume_name(const char *VolName, int volnum); +static void rawfill_cmd(); +static bool open_the_device(); +static void autochangercmd(); +static bool do_unfill(); + + +/* Static variables */ +static CONFIG *config; +#define CONFIG_FILE "bacula-sd.conf" +char *configfile = NULL; + +#define MAX_CMD_ARGS 30 +static POOLMEM *cmd; +static POOLMEM *args; +static char *argk[MAX_CMD_ARGS]; +static char *argv[MAX_CMD_ARGS]; +static int argc; + +static int quickie_count = 0; +static uint64_t write_count = 0; +static BSR *bsr = NULL; +static int signals = TRUE; +static bool ok; +static int stop = 0; +static uint64_t vol_size; +static uint64_t VolBytes; +static time_t now; +static int32_t file_index; +static int end_of_tape = 0; +static uint32_t LastBlock = 0; +static uint32_t eot_block; +static uint32_t eot_block_len; +static uint32_t eot_FileIndex; +static int dumped = 0; +static DEV_BLOCK *last_block1 = NULL; +static DEV_BLOCK *last_block2 = NULL; +static DEV_BLOCK *last_block = NULL; +static DEV_BLOCK *this_block = NULL; +static DEV_BLOCK *first_block = NULL; +static uint32_t last_file1 = 0; +static uint32_t last_file2 = 0; +static uint32_t last_file = 0; +static uint32_t last_block_num1 = 0; +static uint32_t last_block_num2 = 0; +static uint32_t last_block_num = 0; +static uint32_t BlockNumber = 0; +static bool simple = true; + +static const char *VolumeName = NULL; +static int vol_num = 0; + +static JCR *jcr = NULL; + +static void usage(); +static void terminate_btape(int sig); +int get_cmd(const char *prompt); + +class BtapeAskDirHandler: public BtoolsAskDirHandler +{ +public: + BtapeAskDirHandler() {} + ~BtapeAskDirHandler() {} + bool dir_find_next_appendable_volume(DCR *dcr); + bool dir_ask_sysop_to_mount_volume(DCR *dcr, bool /* writing */); + bool dir_ask_sysop_to_create_appendable_volume(DCR *dcr); + bool dir_create_jobmedia_record(DCR *dcr, bool zero); +}; + + +/********************************************************************* + * + * Bacula tape testing program + * + */ +int main(int margc, char *margv[]) +{ + int ch, i; + uint32_t x32, y32; + uint64_t x64, y64; + char buf[1000]; + BtapeAskDirHandler askdir_handler; + + init_askdir_handler(&askdir_handler); + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + lmgr_init_thread(); + + /* Sanity checks */ + if (TAPE_BSIZE % B_DEV_BSIZE != 0 || TAPE_BSIZE / B_DEV_BSIZE == 0) { + Emsg2(M_ABORT, 0, _("Tape block size (%d) not multiple of system size (%d)\n"), + TAPE_BSIZE, B_DEV_BSIZE); + } + if (TAPE_BSIZE != (1 << (ffs(TAPE_BSIZE)-1))) { + Emsg1(M_ABORT, 0, _("Tape block size (%d) is not a power of 2\n"), TAPE_BSIZE); + } + if (sizeof(boffset_t) < 8) { + Pmsg1(-1, _("\n\n!!!! Warning large disk addressing disabled. boffset_t=%d should be 8 or more !!!!!\n\n\n"), + sizeof(boffset_t)); + } + x32 = 123456789; + bsnprintf(buf, sizeof(buf), "%u", x32); + i = bsscanf(buf, "%lu", &y32); + if (i != 1 || x32 != y32) { + Pmsg3(-1, _("32 bit printf/scanf problem. i=%d x32=%u y32=%u\n"), i, x32, y32); + exit(1); + } + x64 = 123456789; + x64 = x64 << 32; + x64 += 123456789; + bsnprintf(buf, sizeof(buf), "%" llu, x64); + i = bsscanf(buf, "%llu", &y64); + if (i != 1 || x64 != y64) { + Pmsg3(-1, _("64 bit printf/scanf problem. i=%d x64=%" llu " y64=%" llu "\n"), + i, x64, y64); + exit(1); + } + + printf(_("Tape block granularity is %d bytes.\n"), TAPE_BSIZE); + + working_directory = "/tmp"; + my_name_is(margc, margv, "btape"); + init_msg(NULL, NULL); + + OSDependentInit(); + + while ((ch = getopt(margc, margv, "b:w:c:d:psv?")) != -1) { + switch (ch) { + case 'b': /* bootstrap file */ + bsr = parse_bsr(NULL, optarg); + break; + + case 'w': + working_directory = optarg; + break; + + case 'c': /* specify config file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* set debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 'p': + forge_on = true; + break; + + case 's': + signals = false; + break; + + case 'v': + verbose++; + break; + + case '?': + default: + helpcmd(); + exit(0); + + } + } + margc -= optind; + margv += optind; + + cmd = get_pool_memory(PM_FNAME); + args = get_pool_memory(PM_FNAME); + + if (signals) { + init_signals(terminate_btape); + } + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + daemon_start_time = time(NULL); + + config = New(CONFIG()); + parse_sd_config(config, configfile, M_ERROR_TERM); + setup_me(); + load_sd_plugins(me->plugin_directory); + + /* See if we can open a device */ + if (margc == 0) { + Pmsg0(000, _("No archive name specified.\n")); + usage(); + exit(1); + } else if (margc != 1) { + Pmsg0(000, _("Improper number of arguments specified.\n")); + usage(); + exit(1); + } + + jcr = setup_jcr("btape", margv[0], bsr, NULL, SD_APPEND); + if (!jcr) { + exit(1); + } + dev = jcr->dcr->dev; + if (!dev) { + exit(1); + } + if (!dev->is_tape()) { + Pmsg0(000, _("btape only works with tape storage.\n")); + usage(); + exit(1); + } + dcr = jcr->dcr; + if (!open_the_device()) { + exit(1); + } + + Dmsg0(200, "Do tape commands\n"); + do_tape_cmds(); + + terminate_btape(exit_code); +} + +static void terminate_btape(int stat) +{ + + Dsm_check(200); + + if (args) { + free_pool_memory(args); + args = NULL; + } + if (cmd) { + free_pool_memory(cmd); + cmd = NULL; + } + + if (bsr) { + free_bsr(bsr); + } + + free_jcr(jcr); + jcr = NULL; + + free_volume_lists(); + + if (dev) { + dev->term(dcr); + } + + if (configfile) { + free(configfile); + } + + if (config) { + delete config; + config = NULL; + } + + if (chk_dbglvl(10)) + print_memory_pool_stats(); + + Dmsg1(900, "=== free_block %p\n", this_block); + free_block(this_block); + this_block = NULL; + + stop_watchdog(); + term_msg(); + term_last_jobs_list(); + free(res_head); + res_head = NULL; + close_memory_pool(); /* free memory in pool */ + lmgr_cleanup_main(); + + sm_dump(false); + exit(stat); +} + + +btime_t total_time=0; +uint64_t total_size=0; + +static void init_total_speed() +{ + total_size = 0; + total_time = 0; +} + +static void print_total_speed() +{ + char ec1[50], ec2[50]; + uint64_t rate = total_size / total_time; + Pmsg2(000, _("Total Volume bytes=%sB. Total Write rate = %sB/s\n"), + edit_uint64_with_suffix(total_size, ec1), + edit_uint64_with_suffix(rate, ec2)); +} + +static void init_speed() +{ + time(&jcr->run_time); /* start counting time for rates */ + jcr->JobBytes=0; +} + +static void print_speed(uint64_t bytes) +{ + char ec1[50], ec2[50]; + uint64_t rate; + + now = time(NULL); + now -= jcr->run_time; + if (now <= 0) { + now = 1; /* don't divide by zero */ + } + + total_time += now; + total_size += bytes; + + rate = bytes / now; + Pmsg2(000, _("Volume bytes=%sB. Write rate = %sB/s\n"), + edit_uint64_with_suffix(bytes, ec1), + edit_uint64_with_suffix(rate, ec2)); +} + +/* + * Helper that fill a buffer with random data or not + */ +typedef enum { + FILL_RANDOM, + FILL_ZERO +} fill_mode_t; + +static void fill_buffer(fill_mode_t mode, char *buf, uint32_t len) +{ + int fd; + switch (mode) { + case FILL_RANDOM: + fd = open("/dev/urandom", O_RDONLY); + if (fd != -1) { + read(fd, buf, len); + close(fd); + } else { + uint32_t *p = (uint32_t *)buf; + srandom(time(NULL)); + for (uint32_t i=0; iblock) { + dev->new_dcr_blocks(dcr); + } + dev->rLock(false); + Dmsg1(200, "Opening device %s\n", dcr->VolumeName); + if (!dev->open_device(dcr, OPEN_READ_WRITE)) { + Emsg1(M_FATAL, 0, _("dev open failed: %s\n"), dev->print_errmsg()); + ok = false; + goto bail_out; + } + Pmsg1(000, _("open device %s: OK\n"), dev->print_name()); + dev->set_append(); /* put volume in append mode */ + +bail_out: + dev->Unlock(); + return ok; +} + + +void quitcmd() +{ + quit = 1; +} + +/* + * Write a label to the tape + */ +static void labelcmd() +{ + if (VolumeName) { + pm_strcpy(cmd, VolumeName); + } else { + if (!get_cmd(_("Enter Volume Name: "))) { + return; + } + } + + if (!dev->is_open()) { + if (!first_open_device(dcr)) { + Pmsg1(0, _("Device open failed. ERR=%s\n"), dev->bstrerror()); + } + } + dev->rewind(dcr); + dev->write_volume_label(dcr, cmd, "Default", false,/*no relabel*/ true/* label now */); + Pmsg1(-1, _("Wrote Volume label for volume \"%s\".\n"), cmd); +} + +/* + * Read the tape label + */ +static void readlabelcmd() +{ + int64_t save_debug_level = debug_level; + int stat; + + stat = dev->read_dev_volume_label(dcr); + switch (stat) { + case VOL_NO_LABEL: + Pmsg0(0, _("Volume has no label.\n")); + break; + case VOL_OK: + Pmsg0(0, _("Volume label read correctly.\n")); + break; + case VOL_IO_ERROR: + Pmsg1(0, _("I/O error on device: ERR=%s"), dev->bstrerror()); + break; + case VOL_TYPE_ERROR: + Pmsg1(0, _("Volume type error: ERR=%s\n"), dev->print_errmsg()); + break; + case VOL_NAME_ERROR: + Pmsg0(0, _("Volume name error\n")); + break; + case VOL_CREATE_ERROR: + Pmsg1(0, _("Error creating label. ERR=%s"), dev->bstrerror()); + break; + case VOL_VERSION_ERROR: + Pmsg0(0, _("Volume version error.\n")); + break; + case VOL_LABEL_ERROR: + Pmsg0(0, _("Bad Volume label type.\n")); + break; + default: + Pmsg0(0, _("Unknown error.\n")); + break; + } + + debug_level = 20; + dev->dump_volume_label(); + debug_level = save_debug_level; +} + + +/* + * Load the tape should have prevously been taken + * off line, otherwise this command is not necessary. + */ +static void loadcmd() +{ + + if (!load_dev(dev)) { + Pmsg1(0, _("Bad status from load. ERR=%s\n"), dev->bstrerror()); + } else + Pmsg1(0, _("Loaded %s\n"), dev->print_name()); +} + +/* + * Rewind the tape. + */ +static void rewindcmd() +{ + if (!dev->rewind(dcr)) { + Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); + dev->clrerror(-1); + } else { + Pmsg1(0, _("Rewound %s\n"), dev->print_name()); + } +} + +/* + * Clear any tape error + */ +static void clearcmd() +{ + dev->clrerror(-1); +} + +/* + * Write and end of file on the tape + */ +static void weofcmd() +{ + int num = 1; + if (argc > 1) { + num = atoi(argk[1]); + } + if (num <= 0) { + num = 1; + } + + if (!dev->weof(NULL, num)) { + Pmsg1(0, _("Bad status from weof. ERR=%s\n"), dev->bstrerror()); + return; + } else { + if (num==1) { + Pmsg1(0, _("Wrote 1 EOF to %s\n"), dev->print_name()); + } + else { + Pmsg2(0, _("Wrote %d EOFs to %s\n"), num, dev->print_name()); + } + } +} + + +/* Go to the end of the medium -- raw command + * The idea was orginally that the end of the Bacula + * medium would be flagged differently. This is not + * currently the case. So, this is identical to the + * eodcmd(). + */ +static void eomcmd() +{ + if (!dev->eod(dcr)) { + Pmsg1(0, "%s", dev->bstrerror()); + return; + } else { + Pmsg0(0, _("Moved to end of medium.\n")); + } +} + +/* + * Go to the end of the medium (either hardware determined + * or defined by two eofs. + */ +static void eodcmd() +{ + eomcmd(); +} + +/* + * Backspace file + */ +static void bsfcmd() +{ + int num = 1; + if (argc > 1) { + num = atoi(argk[1]); + } + if (num <= 0) { + num = 1; + } + + if (!dev->bsf(num)) { + Pmsg1(0, _("Bad status from bsf. ERR=%s\n"), dev->bstrerror()); + } else { + Pmsg2(0, _("Backspaced %d file%s.\n"), num, num==1?"":"s"); + } +} + +/* + * Backspace record + */ +static void bsrcmd() +{ + int num = 1; + if (argc > 1) { + num = atoi(argk[1]); + } + if (num <= 0) { + num = 1; + } + if (!dev->bsr(num)) { + Pmsg1(0, _("Bad status from bsr. ERR=%s\n"), dev->bstrerror()); + } else { + Pmsg2(0, _("Backspaced %d record%s.\n"), num, num==1?"":"s"); + } +} + +/* + * List device capabilities as defined in the + * stored.conf file. + */ +static void capcmd() +{ + printf(_("Configured device capabilities:\n")); + printf("%sEOF ", dev->capabilities & CAP_EOF ? "" : "!"); + printf("%sBSR ", dev->capabilities & CAP_BSR ? "" : "!"); + printf("%sBSF ", dev->capabilities & CAP_BSF ? "" : "!"); + printf("%sFSR ", dev->capabilities & CAP_FSR ? "" : "!"); + printf("%sFSF ", dev->capabilities & CAP_FSF ? "" : "!"); + printf("%sFASTFSF ", dev->capabilities & CAP_FASTFSF ? "" : "!"); + printf("%sBSFATEOM ", dev->capabilities & CAP_BSFATEOM ? "" : "!"); + printf("%sEOM ", dev->capabilities & CAP_EOM ? "" : "!"); + printf("%sREM ", dev->capabilities & CAP_REM ? "" : "!"); + printf("%sRACCESS ", dev->capabilities & CAP_RACCESS ? "" : "!"); + printf("%sAUTOMOUNT ", dev->capabilities & CAP_AUTOMOUNT ? "" : "!"); + printf("%sLABEL ", dev->capabilities & CAP_LABEL ? "" : "!"); + printf("%sANONVOLS ", dev->capabilities & CAP_ANONVOLS ? "" : "!"); + printf("%sALWAYSOPEN ", dev->capabilities & CAP_ALWAYSOPEN ? "" : "!"); + printf("%sMTIOCGET ", dev->capabilities & CAP_MTIOCGET ? "" : "!"); + printf("\n"); + + printf(_("Device status:\n")); + printf("%sOPENED ", dev->is_open() ? "" : "!"); + printf("%sTAPE ", dev->is_tape() ? "" : "!"); + printf("%sLABEL ", dev->is_labeled() ? "" : "!"); + printf("%sMALLOC ", dev->state & ST_MALLOC ? "" : "!"); + printf("%sAPPEND ", dev->can_append() ? "" : "!"); + printf("%sREAD ", dev->can_read() ? "" : "!"); + printf("%sEOT ", dev->at_eot() ? "" : "!"); + printf("%sWEOT ", dev->state & ST_WEOT ? "" : "!"); + printf("%sEOF ", dev->at_eof() ? "" : "!"); + printf("%sNEXTVOL ", dev->state & ST_NEXTVOL ? "" : "!"); + printf("%sSHORT ", dev->state & ST_SHORT ? "" : "!"); + printf("\n"); + + printf(_("Device parameters:\n")); + printf("Device name: %s\n", dev->dev_name); + printf("File=%u block=%u\n", dev->file, dev->block_num); + printf("Min block=%u Max block=%u\n", dev->min_block_size, dev->max_block_size); + + printf(_("Status:\n")); + statcmd(); + +} + +/* + * Test writing larger and larger records. + * This is a torture test for records. + */ +static void rectestcmd() +{ + DEV_BLOCK *save_block; + DEV_RECORD *rec; + int i, blkno = 0; + + Pmsg0(0, _("Test writing larger and larger records.\n" +"This is a torture test for records.\nI am going to write\n" +"larger and larger records. It will stop when the record size\n" +"plus the header exceeds the block size (by default about 64K)\n")); + + + get_cmd(_("Do you want to continue? (y/n): ")); + if (cmd[0] != 'y') { + Pmsg0(000, _("Command aborted.\n")); + return; + } + + Dsm_check(200); + save_block = dcr->block; + dcr->block = dev->new_block(dcr); + rec = new_record(); + + for (i=1; i<500000; i++) { + rec->data = check_pool_memory_size(rec->data, i); + memset(rec->data, i & 0xFF, i); + rec->data_len = i; + Dsm_check(200); + if (write_record_to_block(dcr, rec)) { + empty_block(dcr->block); + blkno++; + Pmsg2(0, _("Block %d i=%d\n"), blkno, i); + } else { + break; + } + Dsm_check(200); + } + free_record(rec); + Dmsg0(900, "=== free_blocks\n"); + free_block(dcr->block); + dcr->block = save_block; /* restore block to dcr */ + Dsm_check(200); +} + +/* + * This test attempts to re-read a block written by Bacula + * normally at the end of the tape. Bacula will then back up + * over the two eof marks, backup over the record and reread + * it to make sure it is valid. Bacula can skip this validation + * if you set "Backward space record = no" + */ +static bool re_read_block_test() +{ + DEV_BLOCK *block = dcr->block; + DEV_RECORD *rec; + bool rc = false; + int len; + + if (!(dev->capabilities & CAP_BSR)) { + Pmsg0(-1, _("Skipping read backwards test because BSR turned off.\n")); + return true; + } + + Pmsg0(-1, _("\n=== Write, backup, and re-read test ===\n\n" + "I'm going to write three records and an EOF\n" + "then backup over the EOF and re-read the last record.\n" + "Bacula does this after writing the last block on the\n" + "tape to verify that the block was written correctly.\n\n" + "This is not an *essential* feature ...\n\n")); + rewindcmd(); + empty_block(block); + rec = new_record(); + rec->data = check_pool_memory_size(rec->data, block->buf_len); + len = rec->data_len = block->buf_len-100; + memset(rec->data, 1, rec->data_len); + if (!write_record_to_block(dcr, rec)) { + Pmsg0(0, _("Error writing record to block.\n")); + goto bail_out; + } + if (!dcr->write_block_to_dev()) { + Pmsg0(0, _("Error writing block to device.\n")); + goto bail_out; + } else { + Pmsg1(0, _("Wrote first record of %d bytes.\n"), rec->data_len); + } + memset(rec->data, 2, rec->data_len); + if (!write_record_to_block(dcr, rec)) { + Pmsg0(0, _("Error writing record to block.\n")); + goto bail_out; + } + if (!dcr->write_block_to_dev()) { + Pmsg0(0, _("Error writing block to device.\n")); + goto bail_out; + } else { + Pmsg1(0, _("Wrote second record of %d bytes.\n"), rec->data_len); + } + memset(rec->data, 3, rec->data_len); + if (!write_record_to_block(dcr, rec)) { + Pmsg0(0, _("Error writing record to block.\n")); + goto bail_out; + } + if (!dcr->write_block_to_dev()) { + Pmsg0(0, _("Error writing block to device.\n")); + goto bail_out; + } else { + Pmsg1(0, _("Wrote third record of %d bytes.\n"), rec->data_len); + } + weofcmd(); + if (dev->has_cap(CAP_TWOEOF)) { + weofcmd(); + } + if (!dev->bsf(1)) { + Pmsg1(0, _("Backspace file failed! ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + if (dev->has_cap(CAP_TWOEOF)) { + if (!dev->bsf(1)) { + Pmsg1(0, _("Backspace file failed! ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + } + Pmsg0(0, _("Backspaced over EOF OK.\n")); + if (!dev->bsr(1)) { + Pmsg1(0, _("Backspace record failed! ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + Pmsg0(0, _("Backspace record OK.\n")); + if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { + Pmsg1(0, _("Read block failed! ERR=%s\n"), dev->print_errmsg()); + goto bail_out; + } + memset(rec->data, 0, rec->data_len); + if (!read_record_from_block(dcr, rec)) { + berrno be; + Pmsg1(0, _("Read block failed! ERR=%s\n"), be.bstrerror(dev->dev_errno)); + goto bail_out; + } + for (int i=0; idata[i] != 3) { + Pmsg0(0, _("Bad data in record. Test failed!\n")); + goto bail_out; + } + } + Pmsg0(0, _("\nBlock re-read correct. Test succeeded!\n")); + Pmsg0(-1, _("=== End Write, backup, and re-read test ===\n\n")); + + rc = true; + +bail_out: + free_record(rec); + if (!rc) { + Pmsg0(0, _("This is not terribly serious since Bacula only uses\n" + "this function to verify the last block written to the\n" + "tape. Bacula will skip the last block verification\n" + "if you add:\n\n" + "Backward Space Record = No\n\n" + "to your Storage daemon's Device resource definition.\n")); + } + return rc; +} + +static bool speed_test_raw(fill_mode_t mode, uint64_t nb_gb, uint32_t nb) +{ + DEV_BLOCK *block = dcr->block; + int stat; + uint32_t block_num = 0; + int my_errno; + char ed1[200]; + nb_gb *= 1024*1024*1024; /* convert size from nb to GB */ + + init_total_speed(); + fill_buffer(mode, block->buf, block->buf_len); + + Pmsg3(0, _("Begin writing %i files of %sB with raw blocks of %u bytes.\n"), + nb, edit_uint64_with_suffix(nb_gb, ed1), block->buf_len); + + for (uint32_t j=0; jJobBytes < nb_gb; ) { + stat = dev->d_write(dev->fd(), block->buf, block->buf_len); + if (stat == (int)block->buf_len) { + if ((block_num++ % 500) == 0) { + printf("+"); + fflush(stdout); + } + + mix_buffer(mode, block->buf, block->buf_len); + + jcr->JobBytes += stat; + + } else { + my_errno = errno; + printf("\n"); + berrno be; + printf(_("Write failed at block %u. stat=%d ERR=%s\n"), block_num, + stat, be.bstrerror(my_errno)); + return false; + } + } + printf("\n"); + weofcmd(); + print_speed(jcr->JobBytes); + } + print_total_speed(); + printf("\n"); + return true; +} + + +static bool speed_test_bacula(fill_mode_t mode, uint64_t nb_gb, uint32_t nb) +{ + DEV_BLOCK *block = dcr->block; + char ed1[200]; + DEV_RECORD *rec; + uint64_t last_bytes = dev->VolCatInfo.VolCatBytes; + uint64_t written=0; + + nb_gb *= 1024*1024*1024; /* convert size from nb to GB */ + + init_total_speed(); + + empty_block(block); + rec = new_record(); + rec->data = check_pool_memory_size(rec->data, block->buf_len); + rec->data_len = block->buf_len-100; + + fill_buffer(mode, rec->data, rec->data_len); + + Pmsg3(0, _("Begin writing %i files of %sB with blocks of %u bytes.\n"), + nb, edit_uint64_with_suffix(nb_gb, ed1), block->buf_len); + + for (uint32_t j=0; jwrite_block_to_dev()) { + Pmsg0(0, _("\nError writing block to device.\n")); + goto bail_out; + } + + if ((block->BlockNumber % 500) == 0) { + printf("+"); + fflush(stdout); + } + written += dev->VolCatInfo.VolCatBytes - last_bytes; + last_bytes = dev->VolCatInfo.VolCatBytes; + mix_buffer(mode, rec->data, rec->data_len); + } + printf("\n"); + weofcmd(); + print_speed(written); + } + print_total_speed(); + printf("\n"); + free_record(rec); + return true; + +bail_out: + free_record(rec); + return false; +} + +/* TODO: use UAContext */ +static int btape_find_arg(const char *keyword) +{ + for (int i=1; i 0) { + file_size = atoi(argv[i]); + if (file_size > 100) { + Pmsg0(0, _("The file_size is too big, stop this test with Ctrl-c.\n")); + } + } + + i = btape_find_arg("nb_file"); + if (i > 0) { + nb_file = atoi(argv[i]); + } + + if (btape_find_arg("skip_zero") > 0) { + do_zero = false; + } + + if (btape_find_arg("skip_random") > 0) { + do_random = false; + } + + if (btape_find_arg("skip_raw") > 0) { + do_raw = false; + } + + if (btape_find_arg("skip_block") > 0) { + do_block = false; + } + + if (do_raw) { + dev->rewind(dcr); + if (do_zero) { + Pmsg0(0, _("Test with zero data, should give the " + "maximum throughput.\n")); + if (file_size) { + ok(speed_test_raw(FILL_ZERO, file_size, nb_file)); + } else { + ok(speed_test_raw(FILL_ZERO, 1, nb_file)); + ok(speed_test_raw(FILL_ZERO, 2, nb_file)); + ok(speed_test_raw(FILL_ZERO, 4, nb_file)); + } + } + + if (do_random) { + Pmsg0(0, _("Test with random data, should give the minimum " + "throughput.\n")); + if (file_size) { + ok(speed_test_raw(FILL_RANDOM, file_size, nb_file)); + } else { + ok(speed_test_raw(FILL_RANDOM, 1, nb_file)); + ok(speed_test_raw(FILL_RANDOM, 2, nb_file)); + ok(speed_test_raw(FILL_RANDOM, 4, nb_file)); + } + } + } + + if (do_block) { + dev->rewind(dcr); + if (do_zero) { + Pmsg0(0, _("Test with zero data and bacula block structure.\n")); + if (file_size) { + ok(speed_test_bacula(FILL_ZERO, file_size, nb_file)); + } else { + ok(speed_test_bacula(FILL_ZERO, 1, nb_file)); + ok(speed_test_bacula(FILL_ZERO, 2, nb_file)); + ok(speed_test_bacula(FILL_ZERO, 4, nb_file)); + } + } + + if (do_random) { + Pmsg0(0, _("Test with random data, should give the minimum " + "throughput.\n")); + if (file_size) { + ok(speed_test_bacula(FILL_RANDOM, file_size, nb_file)); + } else { + ok(speed_test_bacula(FILL_RANDOM, 1, nb_file)); + ok(speed_test_bacula(FILL_RANDOM, 2, nb_file)); + ok(speed_test_bacula(FILL_RANDOM, 4, nb_file)); + } + } + } +} + +const int num_recs = 10000; + +static bool write_two_files() +{ + DEV_BLOCK *block; + DEV_RECORD *rec; + int len, i, j; + int *p; + bool rc = false; /* bad return code */ + DEVICE *dev = dcr->dev; + + /* + * Set big max_file_size so that write_record_to_block + * doesn't insert any additional EOF marks + * Do calculation in 64 bits to avoid overflow. + */ + dev->max_file_size = (uint64_t)2 * (uint64_t)num_recs * (uint64_t)dev->max_block_size; + Pmsg2(-1, _("\n=== Write, rewind, and re-read test ===\n\n" + "I'm going to write %d records and an EOF\n" + "then write %d records and an EOF, then rewind,\n" + "and re-read the data to verify that it is correct.\n\n" + "This is an *essential* feature ...\n\n"), num_recs, num_recs); + + block = dcr->block; + empty_block(block); + rec = new_record(); + rec->data = check_pool_memory_size(rec->data, block->buf_len); + rec->data_len = block->buf_len-100; + len = rec->data_len/sizeof(i); + + if (!dev->rewind(dcr)) { + Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + + for (i=1; i<=num_recs; i++) { + p = (int *)rec->data; + for (j=0; jwrite_block_to_dev()) { + Pmsg0(0, _("Error writing block to device.\n")); + goto bail_out; + } + } + Pmsg2(0, _("Wrote %d blocks of %d bytes.\n"), num_recs, rec->data_len); + weofcmd(); + for (i=num_recs+1; i<=2*num_recs; i++) { + p = (int *)rec->data; + for (j=0; jwrite_block_to_dev()) { + Pmsg0(0, _("Error writing block to device.\n")); + goto bail_out; + } + } + Pmsg2(0, _("Wrote %d blocks of %d bytes.\n"), num_recs, rec->data_len); + weofcmd(); + if (dev->has_cap(CAP_TWOEOF)) { + weofcmd(); + } + rc = true; + +bail_out: + free_record(rec); + if (!rc) { + exit_code = 1; + } + return rc; + +} + +/* + * This test writes Bacula blocks to the tape in + * several files. It then rewinds the tape and attepts + * to read these blocks back checking the data. + */ +static bool write_read_test() +{ + DEV_BLOCK *block; + DEV_RECORD *rec; + bool rc = false; + int len, i, j; + int *p; + + rec = new_record(); + + if (!write_two_files()) { + goto bail_out; + } + + block = dcr->block; + empty_block(block); + + if (!dev->rewind(dcr)) { + Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } else { + Pmsg0(0, _("Rewind OK.\n")); + } + + rec->data = check_pool_memory_size(rec->data, block->buf_len); + rec->data_len = block->buf_len-100; + len = rec->data_len/sizeof(i); + + /* Now read it back */ + for (i=1; i<=2*num_recs; i++) { +read_again: + if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { + if (dev_state(dev, ST_EOF)) { + Pmsg0(-1, _("Got EOF on tape.\n")); + if (i == num_recs+1) { + goto read_again; + } + } + Pmsg2(0, _("Read block %d failed! ERR=%s\n"), i, dev->print_errmsg()); + goto bail_out; + } + memset(rec->data, 0, rec->data_len); + if (!read_record_from_block(dcr, rec)) { + berrno be; + Pmsg2(0, _("Read record failed. Block %d! ERR=%s\n"), i, be.bstrerror(dev->dev_errno)); + goto bail_out; + } + p = (int *)rec->data; + for (j=0; jblock; + DEV_RECORD *rec; + bool rc = false; + int len, j; + bool more = true; + int recno = 0; + int file = 0, blk = 0; + int *p; + bool got_eof = false; + + Pmsg0(0, _("Block position test\n")); + empty_block(block); + rec = new_record(); + rec->data = check_pool_memory_size(rec->data, block->buf_len); + rec->data_len = block->buf_len-100; + len = rec->data_len/sizeof(j); + + if (!dev->rewind(dcr)) { + Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } else { + Pmsg0(0, _("Rewind OK.\n")); + } + + while (more) { + /* Set up next item to read based on where we are */ + /* At each step, recno is what we print for the "block number" + * and file, blk are the real positions to go to. + */ + switch (recno) { + case 0: + recno = 5; + file = 0; + blk = 4; + break; + case 5: + recno = 201; + file = 0; + blk = 200; + break; + case 201: + recno = num_recs; + file = 0; + blk = num_recs-1; + break; + case num_recs: + recno = num_recs+1; + file = 1; + blk = 0; + break; + case num_recs+1: + recno = num_recs+601; + file = 1; + blk = 600; + break; + case num_recs+601: + recno = 2*num_recs; + file = 1; + blk = num_recs-1; + break; + case 2*num_recs: + more = false; + continue; + } + Pmsg2(-1, _("Reposition to file:block %d:%d\n"), file, blk); + uint64_t addr = file; + addr = (addr<<32) + blk; + if (!dev->reposition(dcr, addr)) { + Pmsg0(0, _("Reposition error.\n")); + goto bail_out; + } +read_again: + if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { + if (dev_state(dev, ST_EOF)) { + Pmsg0(-1, _("Got EOF on tape.\n")); + if (!got_eof) { + got_eof = true; + goto read_again; + } + } + Pmsg4(0, _("Read block %d failed! file=%d blk=%d. ERR=%s\n\n"), + recno, file, blk, dev->print_errmsg()); + Pmsg0(0, _("This may be because the tape drive block size is not\n" + " set to variable blocking as normally used by Bacula.\n" + " Please see the Tape Testing chapter in the manual and \n" + " look for using mt with defblksize and setoptions\n" + "If your tape drive block size is correct, then perhaps\n" + " your SCSI driver is *really* stupid and does not\n" + " correctly report the file:block after a FSF. In this\n" + " case try setting:\n" + " Fast Forward Space File = no\n" + " in your Device resource.\n")); + + goto bail_out; + } + memset(rec->data, 0, rec->data_len); + if (!read_record_from_block(dcr, rec)) { + berrno be; + Pmsg1(0, _("Read record failed! ERR=%s\n"), be.bstrerror(dev->dev_errno)); + goto bail_out; + } + p = (int *)rec->data; + for (j=0; jhas_cap(CAP_TWOEOF)) { + weofcmd(); + } + dev->close(dcr); /* release device */ + if (!open_the_device()) { + return -1; + } + rewindcmd(); + Pmsg0(0, _("Now moving to end of medium.\n")); + eodcmd(); + Pmsg2(-1, _("We should be in file 3. I am at file %d. %s\n"), + dev->file, dev->file == 3 ? _("This is correct!") : _("This is NOT correct!!!!")); + + if (dev->file != 3) { + return -1; + } + + Pmsg0(-1, _("\nNow the important part, I am going to attempt to append to the tape.\n\n")); + wrcmd(); + weofcmd(); + if (dev->has_cap(CAP_TWOEOF)) { + weofcmd(); + } + rewindcmd(); + Pmsg0(-1, _("Done appending, there should be no I/O errors\n\n")); + Pmsg0(-1, _("Doing Bacula scan of blocks:\n")); + scan_blocks(); + Pmsg0(-1, _("End scanning the tape.\n")); + Pmsg2(-1, _("We should be in file 4. I am at file %d. %s\n"), + dev->file, dev->file == 4 ? _("This is correct!") : _("This is NOT correct!!!!")); + + if (dev->file != 4) { + return -2; + } + return 1; +} + + +/* + * This test exercises the autochanger + */ +static int autochanger_test() +{ + POOLMEM *results, *changer; + int slot, status, loaded; + int timeout = dcr->device->max_changer_wait; + int sleep_time = 0; + + Dmsg1(100, "Max changer wait = %d sec\n", timeout); + if (!dev->has_cap(CAP_AUTOCHANGER)) { + return 1; + } + if (!(dcr->device && dcr->device->changer_name && dcr->device->changer_command)) { + Pmsg0(-1, _("\nAutochanger enabled, but no name or no command device specified.\n")); + return 1; + } + + Pmsg0(-1, _("\nAh, I see you have an autochanger configured.\n" + "To test the autochanger you must have a blank tape\n" + " that I can write on in Slot 1.\n")); + if (!get_cmd(_("\nDo you wish to continue with the Autochanger test? (y/n): "))) { + return 0; + } + if (cmd[0] != 'y' && cmd[0] != 'Y') { + return 0; + } + + Pmsg0(-1, _("\n\n=== Autochanger test ===\n\n")); + + results = get_pool_memory(PM_MESSAGE); + changer = get_pool_memory(PM_FNAME); + +try_again: + slot = 1; + dcr->VolCatInfo.Slot = slot; + /* Find out what is loaded, zero means device is unloaded */ + Pmsg0(-1, _("3301 Issuing autochanger \"loaded\" command.\n")); + changer = edit_device_codes(dcr, changer, + dcr->device->changer_command, "loaded"); + status = run_program(changer, timeout, results); + Dmsg3(100, "run_prog: %s stat=%d result=\"%s\"\n", changer, status, results); + if (status == 0) { + loaded = atoi(results); + } else { + berrno be; + Pmsg1(-1, _("3991 Bad autochanger command: %s\n"), changer); + Pmsg2(-1, _("3991 result=\"%s\": ERR=%s\n"), results, be.bstrerror(status)); + goto bail_out; + } + if (loaded) { + Pmsg1(-1, _("Slot %d loaded. I am going to unload it.\n"), loaded); + } else { + Pmsg0(-1, _("Nothing loaded in the drive. OK.\n")); + } + Dmsg1(100, "Results from loaded query=%s\n", results); + if (loaded) { + dcr->VolCatInfo.Slot = loaded; + /* We are going to load a new tape, so close the device */ + dev->close(dcr); + Pmsg2(-1, _("3302 Issuing autochanger \"unload %d %d\" command.\n"), + loaded, dev->drive_index); + changer = edit_device_codes(dcr, changer, + dcr->device->changer_command, "unload"); + status = run_program(changer, timeout, results); + Pmsg2(-1, _("unload status=%s %d\n"), status==0?_("OK"):_("Bad"), status); + if (status != 0) { + berrno be; + Pmsg1(-1, _("3992 Bad autochanger command: %s\n"), changer); + Pmsg2(-1, _("3992 result=\"%s\": ERR=%s\n"), results, be.bstrerror(status)); + } + } + + /* + * Load the Slot 1 + */ + + slot = 1; + dcr->VolCatInfo.Slot = slot; + Pmsg2(-1, _("3303 Issuing autochanger \"load %d %d\" command.\n"), + slot, dev->drive_index); + changer = edit_device_codes(dcr, changer, + dcr->device->changer_command, "load"); + Dmsg1(100, "Changer=%s\n", changer); + dev->close(dcr); + status = run_program(changer, timeout, results); + if (status == 0) { + Pmsg2(-1, _("3303 Autochanger \"load %d %d\" status is OK.\n"), + slot, dev->drive_index); + } else { + berrno be; + Pmsg1(-1, _("3993 Bad autochanger command: %s\n"), changer); + Pmsg2(-1, _("3993 result=\"%s\": ERR=%s\n"), results, be.bstrerror(status)); + goto bail_out; + } + + if (!open_the_device()) { + goto bail_out; + } + /* + * Start with sleep_time 0 then increment by 30 seconds if we get + * a failure. + */ + bmicrosleep(sleep_time, 0); + if (!dev->rewind(dcr) || !dev->weof(dcr, 1)) { + Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); + dev->clrerror(-1); + Pmsg0(-1, _("\nThe test failed, probably because you need to put\n" + "a longer sleep time in the mtx-script in the load) case.\n" + "Adding a 30 second sleep and trying again ...\n")); + sleep_time += 30; + goto try_again; + } else { + Pmsg1(0, _("Rewound %s\n"), dev->print_name()); + } + + if (!dev->weof(dcr, 1)) { + Pmsg1(0, _("Bad status from weof. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } else { + Pmsg1(0, _("Wrote EOF to %s\n"), dev->print_name()); + } + + if (sleep_time) { + Pmsg1(-1, _("\nThe test worked this time. Please add:\n\n" + " sleep %d\n\n" + "to your mtx-changer script in the load) case.\n\n"), + sleep_time); + } else { + Pmsg0(-1, _("\nThe test autochanger worked!!\n\n")); + } + + free_pool_memory(changer); + free_pool_memory(results); + return 1; + + +bail_out: + free_pool_memory(changer); + free_pool_memory(results); + Pmsg0(-1, _("You must correct this error or the Autochanger will not work.\n")); + return -2; +} + +static void autochangercmd() +{ + autochanger_test(); +} + + +/* + * This test assumes that the append test has been done, + * then it tests the fsf function. + */ +static bool fsf_test() +{ + bool set_off = false; + + Pmsg0(-1, _("\n\n=== Forward space files test ===\n\n" + "This test is essential to Bacula.\n\n" + "I'm going to write five files then test forward spacing\n\n")); + argc = 1; + rewindcmd(); + wrcmd(); + weofcmd(); /* end file 0 */ + wrcmd(); + wrcmd(); + weofcmd(); /* end file 1 */ + wrcmd(); + wrcmd(); + wrcmd(); + weofcmd(); /* end file 2 */ + wrcmd(); + wrcmd(); + weofcmd(); /* end file 3 */ + wrcmd(); + weofcmd(); /* end file 4 */ + if (dev->has_cap(CAP_TWOEOF)) { + weofcmd(); + } + +test_again: + rewindcmd(); + Pmsg0(0, _("Now forward spacing 1 file.\n")); + if (!dev->fsf(1)) { + Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + Pmsg2(-1, _("We should be in file 1. I am at file %d. %s\n"), + dev->file, dev->file == 1 ? _("This is correct!") : _("This is NOT correct!!!!")); + + if (dev->file != 1) { + goto bail_out; + } + + Pmsg0(0, _("Now forward spacing 2 files.\n")); + if (!dev->fsf(2)) { + Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + Pmsg2(-1, _("We should be in file 3. I am at file %d. %s\n"), + dev->file, dev->file == 3 ? _("This is correct!") : _("This is NOT correct!!!!")); + + if (dev->file != 3) { + goto bail_out; + } + + rewindcmd(); + Pmsg0(0, _("Now forward spacing 4 files.\n")); + if (!dev->fsf(4)) { + Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + Pmsg2(-1, _("We should be in file 4. I am at file %d. %s\n"), + dev->file, dev->file == 4 ? _("This is correct!") : _("This is NOT correct!!!!")); + + if (dev->file != 4) { + goto bail_out; + } + if (set_off) { + Pmsg0(-1, _("The test worked this time. Please add:\n\n" + " Fast Forward Space File = no\n\n" + "to your Device resource for this drive.\n")); + } + + Pmsg0(-1, "\n"); + Pmsg0(0, _("Now forward spacing 1 more file.\n")); + if (!dev->fsf(1)) { + Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); + } + Pmsg2(-1, _("We should be in file 5. I am at file %d. %s\n"), + dev->file, dev->file == 5 ? _("This is correct!") : _("This is NOT correct!!!!")); + if (dev->file != 5) { + goto bail_out; + } + Pmsg0(-1, _("\n=== End Forward space files test ===\n\n")); + return true; + +bail_out: + Pmsg0(-1, _("\nThe forward space file test failed.\n")); + if (dev->has_cap(CAP_FASTFSF)) { + Pmsg0(-1, _("You have Fast Forward Space File enabled.\n" + "I am turning it off then retrying the test.\n")); + dev->clear_cap(CAP_FASTFSF); + set_off = true; + goto test_again; + } + Pmsg0(-1, _("You must correct this error or Bacula will not work.\n" + "Some systems, e.g. OpenBSD, require you to set\n" + " Use MTIOCGET= no\n" + "in your device resource. Use with caution.\n")); + return false; +} + + + + + +/* + * This is a general test of Bacula's functions + * needed to read and write the tape. + */ +static void testcmd() +{ + int stat; + + if (!write_read_test()) { + exit_code = 1; + return; + } + if (!position_test()) { + exit_code = 1; + return; + } + + stat = append_test(); + if (stat == 1) { /* OK get out */ + goto all_done; + } + if (stat == -1) { /* first test failed */ + if (dev->has_cap(CAP_EOM) || dev->has_cap(CAP_FASTFSF)) { + Pmsg0(-1, _("\nAppend test failed. Attempting again.\n" + "Setting \"Hardware End of Medium = no\n" + " and \"Fast Forward Space File = no\n" + "and retrying append test.\n\n")); + dev->clear_cap(CAP_EOM); /* turn off eom */ + dev->clear_cap(CAP_FASTFSF); /* turn off fast fsf */ + stat = append_test(); + if (stat == 1) { + Pmsg0(-1, _("\n\nIt looks like the test worked this time, please add:\n\n" + " Hardware End of Medium = No\n\n" + " Fast Forward Space File = No\n" + "to your Device resource in the Storage conf file.\n")); + goto all_done; + } + if (stat == -1) { + Pmsg0(-1, _("\n\nThat appears *NOT* to have corrected the problem.\n")); + goto failed; + } + /* Wrong count after append */ + if (stat == -2) { + Pmsg0(-1, _("\n\nIt looks like the append failed. Attempting again.\n" + "Setting \"BSF at EOM = yes\" and retrying append test.\n")); + dev->capabilities |= CAP_BSFATEOM; /* backspace on eom */ + stat = append_test(); + if (stat == 1) { + Pmsg0(-1, _("\n\nIt looks like the test worked this time, please add:\n\n" + " Hardware End of Medium = No\n" + " Fast Forward Space File = No\n" + " BSF at EOM = yes\n\n" + "to your Device resource in the Storage conf file.\n")); + goto all_done; + } + } + + } +failed: + Pmsg0(-1, _("\nAppend test failed.\n\n" + "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + "Unable to correct the problem. You MUST fix this\n" + "problem before Bacula can use your tape drive correctly\n" + "\nPerhaps running Bacula in fixed block mode will work.\n" + "Do so by setting:\n\n" + "Minimum Block Size = nnn\n" + "Maximum Block Size = nnn\n\n" + "in your Storage daemon's Device definition.\n" + "nnn must match your tape driver's block size, which\n" + "can be determined by reading your tape manufacturers\n" + "information, and the information on your kernel dirver.\n" + "Fixed block sizes, however, are not normally an ideal solution.\n" + "\n" + "Some systems, e.g. OpenBSD, require you to set\n" + " Use MTIOCGET= no\n" + "in your device resource. Use with caution.\n")); + exit_code = 1; + return; + } + +all_done: + Pmsg0(-1, _("\nThe above Bacula scan should have output identical to what follows.\n" + "Please double check it ...\n" + "=== Sample correct output ===\n" + "1 block of 64448 bytes in file 1\n" + "End of File mark.\n" + "2 blocks of 64448 bytes in file 2\n" + "End of File mark.\n" + "3 blocks of 64448 bytes in file 3\n" + "End of File mark.\n" + "1 block of 64448 bytes in file 4\n" + "End of File mark.\n" + "Total files=4, blocks=7, bytes = 451,136\n" + "=== End sample correct output ===\n\n" + "If the above scan output is not identical to the\n" + "sample output, you MUST correct the problem\n" + "or Bacula will not be able to write multiple Jobs to \n" + "the tape.\n\n")); + + if (stat == 1) { + if (!re_read_block_test()) { + exit_code = 1; + } + } + + if (!fsf_test()) { /* do fast forward space file test */ + exit_code = 1; + } + + autochanger_test(); /* do autochanger test */ + +} + +/* Forward space a file */ +static void fsfcmd() +{ + int num = 1; + if (argc > 1) { + num = atoi(argk[1]); + } + if (num <= 0) { + num = 1; + } + if (!dev->fsf(num)) { + Pmsg1(0, _("Bad status from fsf. ERR=%s\n"), dev->bstrerror()); + return; + } + if (num == 1) { + Pmsg0(0, _("Forward spaced 1 file.\n")); + } + else { + Pmsg1(0, _("Forward spaced %d files.\n"), num); + } +} + +/* Forward space a record */ +static void fsrcmd() +{ + int num = 1; + if (argc > 1) { + num = atoi(argk[1]); + } + if (num <= 0) { + num = 1; + } + if (!dev->fsr(num)) { + Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); + return; + } + if (num == 1) { + Pmsg0(0, _("Forward spaced 1 record.\n")); + } + else { + Pmsg1(0, _("Forward spaced %d records.\n"), num); + } +} + +/* + * Read a Bacula block from the tape + */ +static void rbcmd() +{ + dev->open_device(dcr, OPEN_READ_ONLY); + dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK); +} + +/* + * Write a Bacula block to the tape + */ +static void wrcmd() +{ + DEV_BLOCK *block = dcr->block; + DEV_RECORD *rec = dcr->rec; + int i; + + if (!dev->is_open()) { + open_the_device(); + } + Dsm_check(200); + empty_block(block); + if (verbose > 1) { + dump_block(dcr->dev, block, "test"); + } + + i = block->buf_len - 100; + ASSERT (i > 0); + rec->data = check_pool_memory_size(rec->data, i); + memset(rec->data, i & 0xFF, i); + rec->data_len = i; + Dsm_check(200); + if (!write_record_to_block(dcr, rec)) { + Pmsg0(0, _("Error writing record to block.\n")); + goto bail_out; + } + if (!dcr->write_block_to_dev()) { + Pmsg0(0, _("Error writing block to device.\n")); + goto bail_out; + } else { + Pmsg1(0, _("Wrote one record of %d bytes.\n"), i); + } + Pmsg0(0, _("Wrote block to device.\n")); + +bail_out: + Dsm_check(200); +} + +/* + * Read a record from the tape + */ +static void rrcmd() +{ + char *buf; + int stat, len; + + if (!get_cmd(_("Enter length to read: "))) { + return; + } + len = atoi(cmd); + if (len < 0 || len > 1000000) { + Pmsg0(0, _("Bad length entered, using default of 1024 bytes.\n")); + len = 1024; + } + buf = (char *)malloc(len); + stat = read(dev->fd(), buf, len); + if (stat > 0 && stat <= len) { + errno = 0; + } + berrno be; + Pmsg3(0, _("Read of %d bytes gives stat=%d. ERR=%s\n"), + len, stat, be.bstrerror()); + free(buf); +} + + +/* + * Scan tape by reading block by block. Report what is + * on the tape. Note, this command does raw reads, and as such + * will not work with fixed block size devices. + */ +static void scancmd() +{ + int stat; + int blocks, tot_blocks, tot_files; + int block_size; + uint64_t bytes; + char ec1[50]; + + + blocks = block_size = tot_blocks = 0; + bytes = 0; + if (dev->at_eot()) { + Pmsg0(0, _("End of tape\n")); + return; + } + dev->update_pos(dcr); + tot_files = dev->file; + Pmsg1(0, _("Starting scan at file %u\n"), dev->file); + for (;;) { + if ((stat = read(dev->fd(), buf, sizeof(buf))) < 0) { + berrno be; + dev->clrerror(-1); + Mmsg2(dev->errmsg, _("read error on %s. ERR=%s.\n"), + dev->dev_name, be.bstrerror()); + Pmsg2(0, _("Bad status from read %d. ERR=%s\n"), stat, dev->bstrerror()); + if (blocks > 0) { + if (blocks==1) { + printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); + } + else { + printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); + } + } + return; + } + Dmsg1(200, "read status = %d\n", stat); +/* sleep(1); */ + if (stat != block_size) { + dev->update_pos(dcr); + if (blocks > 0) { + if (blocks==1) { + printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); + } + else { + printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); + } + blocks = 0; + } + block_size = stat; + } + if (stat == 0) { /* EOF */ + dev->update_pos(dcr); + printf(_("End of File mark.\n")); + /* Two reads of zero means end of tape */ + if (dev->at_eof()) { + dev->set_ateot(); + } else { + dev->set_ateof(); + } + if (dev->at_eot()) { + printf(_("End of tape\n")); + break; + } + } else { /* Got data */ + dev->clear_eof(); + blocks++; + tot_blocks++; + bytes += stat; + } + } + dev->update_pos(dcr); + tot_files = dev->file - tot_files; + printf(_("Total files=%d, blocks=%d, bytes = %s\n"), tot_files, tot_blocks, + edit_uint64_with_commas(bytes, ec1)); +} + + +/* + * Scan tape by reading Bacula block by block. Report what is + * on the tape. This function reads Bacula blocks, so if your + * Device resource is correctly defined, it should work with + * either variable or fixed block sizes. + */ +static void scan_blocks() +{ + int blocks, tot_blocks, tot_files; + uint32_t block_size; + uint64_t bytes; + DEV_BLOCK *block = dcr->block; + char ec1[50]; + char buf1[100], buf2[100]; + + blocks = block_size = tot_blocks = 0; + bytes = 0; + + empty_block(block); + dev->update_pos(dcr); + tot_files = dev->file; + for (;;) { + if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { + Dmsg1(100, "!read_block(): ERR=%s\n", dev->bstrerror()); + if (dev->at_eot()) { + if (blocks > 0) { + if (blocks==1) { + printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); + } + else { + printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); + } + blocks = 0; + } + goto bail_out; + } + if (dev->state & ST_EOF) { + if (blocks > 0) { + if (blocks==1) { + printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); + } + else { + printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); + } + blocks = 0; + } + printf(_("End of File mark.\n")); + continue; + } + if (dev->state & ST_SHORT) { + if (blocks > 0) { + if (blocks==1) { + printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); + } + else { + printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); + } + blocks = 0; + } + printf(_("Short block read.\n")); + continue; + } + printf(_("Error reading block. ERR=%s\n"), dev->print_errmsg()); + goto bail_out; + } + if (block->block_len != block_size) { + if (blocks > 0) { + if (blocks==1) { + printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); + } + else { + printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); + } + blocks = 0; + } + block_size = block->block_len; + } + blocks++; + tot_blocks++; + bytes += block->block_len; + Dmsg7(100, "Blk_blk=%u file,blk=%u,%u blen=%u bVer=%d SessId=%u SessTim=%u\n", + block->BlockNumber, dev->file, dev->block_num, block->block_len, block->BlockVer, + block->VolSessionId, block->VolSessionTime); + if (verbose == 1) { + DEV_RECORD *rec = new_record(); + read_record_from_block(dcr, rec); + Pmsg9(-1, _("Block=%u file,blk=%u,%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%s rlen=%d\n"), + block->BlockNumber, dev->file, dev->block_num, block->block_len, + FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, rec->VolSessionTime, + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); + rec->remainder = 0; + free_record(rec); + } else if (verbose > 1) { + dump_block(dcr->dev, block, ""); + } + + } +bail_out: + tot_files = dev->file - tot_files; + printf(_("Total files=%d, blocks=%d, bytes = %s\n"), tot_files, tot_blocks, + edit_uint64_with_commas(bytes, ec1)); +} + + +static void statcmd() +{ + int64_t debug = debug_level; + debug_level = 30; + Pmsg2(0, _("Device status: %u. ERR=%s\n"), status_dev(dev), dev->bstrerror()); +#ifdef xxxx + dump_volume_label(dev); +#endif + debug_level = debug; +} + + +/* + * First we label the tape, then we fill + * it with data get a new tape and write a few blocks. + */ +static void fillcmd() +{ + DEV_RECORD rec; + DEV_BLOCK *block = dcr->block; + char ec1[50], ec2[50]; + char buf1[100], buf2[100]; + uint64_t write_eof; + uint64_t rate; + uint32_t min_block_size; + int fd; + struct tm tm; + + ok = true; + stop = 0; + vol_num = 0; + last_file = 0; + last_block_num = 0; + BlockNumber = 0; + exit_code = 0; + + Pmsg1(-1, _("\n" +"This command simulates Bacula writing to a tape.\n" +"It requires either one or two blank tapes, which it\n" +"will label and write.\n\n" +"If you have an autochanger configured, it will use\n" +"the tapes that are in slots 1 and 2, otherwise, you will\n" +"be prompted to insert the tapes when necessary.\n\n" +"It will print a status approximately\n" +"every 322 MB, and write an EOF every %s. If you have\n" +"selected the simple test option, after writing the first tape\n" +"it will rewind it and re-read the last block written.\n\n" +"If you have selected the multiple tape test, when the first tape\n" +"fills, it will ask for a second, and after writing a few more \n" +"blocks, it will stop. Then it will begin re-reading the\n" +"two tapes.\n\n" +"This may take a long time -- hours! ...\n\n"), + edit_uint64_with_suffix(dev->max_file_size, buf1)); + + get_cmd(_("Do you want to run the simplified test (s) with one tape\n" + "or the complete multiple tape (m) test: (s/m) ")); + if (cmd[0] == 's') { + Pmsg0(-1, _("Simple test (single tape) selected.\n")); + simple = true; + } else if (cmd[0] == 'm') { + Pmsg0(-1, _("Multiple tape test selected.\n")); + simple = false; + } else { + Pmsg0(000, _("Command aborted.\n")); + exit_code = 1; + return; + } + + Dmsg1(20, "Begin append device=%s\n", dev->print_name()); + Dmsg1(20, "MaxVolSize=%s\n", edit_uint64(dev->max_volume_size, ec1)); + + /* Use fixed block size to simplify read back */ + min_block_size = dev->min_block_size; + dev->min_block_size = dev->max_block_size; + write_eof = dev->max_file_size / REC_SIZE; /*compute when we add EOF*/ + set_volume_name("TestVolume1", 1); + dir_ask_sysop_to_create_appendable_volume(dcr); + dev->set_append(); /* force volume to be relabeled */ + + /* + * Acquire output device for writing. Note, after acquiring a + * device, we MUST release it, which is done at the end of this + * subroutine. + */ + Dmsg0(100, "just before acquire_device\n"); + dcr->setVolCatName(dcr->VolumeName); + if (!acquire_device_for_append(dcr)) { + Pmsg0(000, "Could not acquire_device_for_append()\n"); + jcr->setJobStatus(JS_ErrorTerminated); + exit_code = 1; + return; + } + block = jcr->dcr->block; + + Dmsg0(100, "Just after acquire_device_for_append\n"); + /* + * Write Begin Session Record + */ + if (!write_session_label(dcr, SOS_LABEL)) { + jcr->setJobStatus(JS_ErrorTerminated); + Jmsg1(jcr, M_FATAL, 0, _("Write session label failed. ERR=%s\n"), + dev->bstrerror()); + ok = false; + } + Pmsg0(-1, _("Wrote Start of Session label.\n")); + + memset(&rec, 0, sizeof(rec)); + rec.data = get_memory(100000); /* max record size */ + rec.data_len = REC_SIZE; + + /* + * Put some random data in the record + */ + fill_buffer(FILL_RANDOM, rec.data, rec.data_len); + + /* + * Generate data as if from File daemon, write to device + */ + jcr->dcr->VolFirstIndex = 0; + time(&jcr->run_time); /* start counting time for rates */ + (void)localtime_r(&jcr->run_time, &tm); + strftime(buf1, sizeof(buf1), "%H:%M:%S", &tm); + if (simple) { + Pmsg1(-1, _("%s Begin writing Bacula records to tape ...\n"), buf1); + } else { + Pmsg1(-1, _("%s Begin writing Bacula records to first tape ...\n"), buf1); + } + for (file_index = 0; ok && !job_canceled(jcr); ) { + rec.VolSessionId = jcr->VolSessionId; + rec.VolSessionTime = jcr->VolSessionTime; + rec.FileIndex = ++file_index; + rec.Stream = STREAM_FILE_DATA; + rec.maskedStream = STREAM_FILE_DATA; + + /* Mix up the data just a bit */ + mix_buffer(FILL_RANDOM, rec.data, rec.data_len); + + Dmsg4(250, "before write_rec FI=%d SessId=%d Strm=%s len=%d\n", + rec.FileIndex, rec.VolSessionId, + stream_to_ascii(buf1, rec.Stream, rec.FileIndex), + rec.data_len); + + while (!write_record_to_block(dcr, &rec)) { + /* + * When we get here we have just filled a block + */ + Dmsg2(150, "!write_record_to_block data_len=%d rem=%d\n", rec.data_len, + rec.remainder); + + /* Write block to tape */ + if (!flush_block(block, 1)) { + Pmsg0(000, _("Flush block failed.\n")); + exit_code = 1; + break; + } + + /* Every 5000 blocks (approx 322MB) report where we are. + */ + if ((block->BlockNumber % 5000) == 0) { + now = time(NULL); + now -= jcr->run_time; + if (now <= 0) { + now = 1; /* prevent divide error */ + } + rate = dev->VolCatInfo.VolCatBytes / now; + Pmsg5(-1, _("Wrote block=%u, file,blk=%u,%u VolBytes=%s rate=%sB/s\n"), + block->BlockNumber, dev->file, dev->block_num, + edit_uint64_with_commas(dev->VolCatInfo.VolCatBytes, ec1), + edit_uint64_with_suffix(rate, ec2)); + } + /* Every X blocks (dev->max_file_size) write an EOF. + */ + if ((block->BlockNumber % write_eof) == 0) { + now = time(NULL); + (void)localtime_r(&now, &tm); + strftime(buf1, sizeof(buf1), "%H:%M:%S", &tm); + Pmsg1(-1, _("%s Flush block, write EOF\n"), buf1); + flush_block(block, 0); +#ifdef needed_xxx + dev->weof(dcr, 1); +#endif + } + + /* Get out after writing 1000 blocks to the second tape */ + if (++BlockNumber > 1000 && stop != 0) { /* get out */ + Pmsg0(000, _("Wrote 1000 blocks on second tape. Done.\n")); + break; + } + } + if (!ok) { + Pmsg0(000, _("Not OK\n")); + exit_code = 1; + break; + } + jcr->JobBytes += rec.data_len; /* increment bytes this job */ + Dmsg4(190, "write_record FI=%s SessId=%d Strm=%s len=%d\n", + FI_to_ascii(buf1, rec.FileIndex), rec.VolSessionId, + stream_to_ascii(buf2, rec.Stream, rec.FileIndex), rec.data_len); + + /* Get out after writing 1000 blocks to the second tape */ + if (BlockNumber > 1000 && stop != 0) { /* get out */ + char ed1[50]; + Pmsg1(-1, "Done writing %s records ...\n", + edit_uint64_with_commas(write_count, ed1)); + break; + } + } /* end big for loop */ + + if (vol_num > 1) { + Dmsg0(100, "Write_end_session_label()\n"); + /* Create Job status for end of session label */ + if (!job_canceled(jcr) && ok) { + jcr->setJobStatus(JS_Terminated); + } else if (!ok) { + Pmsg0(000, _("Job canceled.\n")); + jcr->setJobStatus(JS_ErrorTerminated); + exit_code = 1; + } + if (!write_session_label(dcr, EOS_LABEL)) { + Pmsg1(000, _("Error writing end session label. ERR=%s\n"), dev->bstrerror()); + ok = false; + exit_code = 1; + } + /* Write out final block of this session */ + if (!dcr->write_block_to_device()) { + Pmsg0(-1, _("Set ok=false after write_block_to_device.\n")); + ok = false; + exit_code = 1; + } + Pmsg0(-1, _("Wrote End of Session label.\n")); + + /* Save last block info for second tape */ + last_block_num2 = last_block_num; + last_file2 = last_file; + Dmsg1(000, "=== free_block %p\n", last_block2); + free_block(last_block2); + last_block2 = dup_block(last_block); + } + + sprintf(buf, "%s/btape.state", working_directory); + fd = open(buf, O_CREAT|O_TRUNC|O_WRONLY, 0640); + if (fd >= 0) { + write(fd, &btape_state_level, sizeof(btape_state_level)); + write(fd, &simple, sizeof(simple)); + write(fd, &last_block_num1, sizeof(last_block_num1)); + write(fd, &last_block_num2, sizeof(last_block_num2)); + write(fd, &last_file1, sizeof(last_file1)); + write(fd, &last_file2, sizeof(last_file2)); + write(fd, last_block1->buf, last_block1->buf_len); + write(fd, last_block2->buf, last_block2->buf_len); + write(fd, first_block->buf, first_block->buf_len); + close(fd); + Pmsg2(0, _("Wrote state file last_block_num1=%d last_block_num2=%d\n"), + last_block_num1, last_block_num2); + } else { + berrno be; + Pmsg2(0, _("Could not create state file: %s ERR=%s\n"), buf, + be.bstrerror()); + exit_code = 1; + ok = false; + } + + now = time(NULL); + (void)localtime_r(&now, &tm); + strftime(buf1, sizeof(buf1), "%H:%M:%S", &tm); + if (ok) { + if (simple) { + Pmsg3(0, _("\n\n%s Done filling tape at %d:%d. Now beginning re-read of tape ...\n"), + buf1, jcr->dcr->dev->file, jcr->dcr->dev->block_num); + } else { + Pmsg3(0, _("\n\n%s Done filling tapes at %d:%d. Now beginning re-read of first tape ...\n"), + buf1, jcr->dcr->dev->file, jcr->dcr->dev->block_num); + } + + jcr->dcr->block = block; + if (!do_unfill()) { + Pmsg0(000, _("do_unfill failed.\n")); + exit_code = 1; + ok = false; + } + } else { + Pmsg1(000, _("%s: Error during test.\n"), buf1); + } + dev->min_block_size = min_block_size; + free_memory(rec.data); +} + +/* + * Read two tapes written by the "fill" command and ensure + * that the data is valid. If stop==1 we simulate full read back + * of two tapes. If stop==-1 we simply read the last block and + * verify that it is correct. + */ +static void unfillcmd() +{ + int fd; + + exit_code = 0; + last_block1 = dev->new_block(dcr); + last_block2 = dev->new_block(dcr); + first_block = dev->new_block(dcr); + sprintf(buf, "%s/btape.state", working_directory); + fd = open(buf, O_RDONLY); + if (fd >= 0) { + uint32_t state_level; + read(fd, &state_level, sizeof(btape_state_level)); + read(fd, &simple, sizeof(simple)); + read(fd, &last_block_num1, sizeof(last_block_num1)); + read(fd, &last_block_num2, sizeof(last_block_num2)); + read(fd, &last_file1, sizeof(last_file1)); + read(fd, &last_file2, sizeof(last_file2)); + read(fd, last_block1->buf, last_block1->buf_len); + read(fd, last_block2->buf, last_block2->buf_len); + read(fd, first_block->buf, first_block->buf_len); + close(fd); + if (state_level != btape_state_level) { + Pmsg0(-1, _("\nThe state file level has changed. You must redo\n" + "the fill command.\n")); + exit_code = 1; + return; + } + } else { + berrno be; + Pmsg2(-1, _("\nCould not find the state file: %s ERR=%s\n" + "You must redo the fill command.\n"), buf, be.bstrerror()); + exit_code = 1; + return; + } + if (!do_unfill()) { + exit_code = 1; + } + this_block = NULL; +} + +/* + * This is the second part of the fill command. After the tape or + * tapes are written, we are called here to reread parts, particularly + * the last block. + */ +static bool do_unfill() +{ + DEV_BLOCK *block = dcr->block; + int autochanger; + bool rc = false; + uint64_t addr; + + dumped = 0; + VolBytes = 0; + LastBlock = 0; + + Pmsg0(000, "Enter do_unfill\n"); + dev->set_cap(CAP_ANONVOLS); /* allow reading any volume */ + dev->clear_cap(CAP_LABEL); /* don't label anything here */ + + end_of_tape = 0; + + time(&jcr->run_time); /* start counting time for rates */ + stop = 0; + file_index = 0; + Dmsg1(900, "=== free_block %p\n", last_block); + free_block(last_block); + last_block = NULL; + last_block_num = last_block_num1; + last_file = last_file1; + last_block = last_block1; + + free_restore_volume_list(jcr); + jcr->bsr = NULL; + bstrncpy(dcr->VolumeName, "TestVolume1|TestVolume2", sizeof(dcr->VolumeName)); + create_restore_volume_list(jcr, true); + if (jcr->VolList != NULL) { + jcr->VolList->Slot = 1; + if (jcr->VolList->next != NULL) { + jcr->VolList->next->Slot = 2; + } + } + + set_volume_name("TestVolume1", 1); + + if (!simple) { + /* Multiple Volume tape */ + /* Close device so user can use autochanger if desired */ + if (dev->has_cap(CAP_OFFLINEUNMOUNT)) { + dev->offline(dcr); + } + autochanger = autoload_device(dcr, 1, NULL); + if (autochanger != 1) { + Pmsg1(100, "Autochanger returned: %d\n", autochanger); + dev->close(dcr); + get_cmd(_("Mount first tape. Press enter when ready: ")); + Pmsg0(000, "\n"); + } + } + + dev->close(dcr); + dev->num_writers = 0; + dcr->clear_writing(); + if (!acquire_device_for_read(dcr)) { + Pmsg1(-1, "%s", dev->print_errmsg()); + goto bail_out; + } + /* + * We now have the first tape mounted. + * Note, re-reading last block may have caused us to + * loose track of where we are (block number unknown). + */ + Pmsg0(-1, _("Rewinding.\n")); + if (!dev->rewind(dcr)) { /* get to a known place on tape */ + goto bail_out; + } + /* Read the first 10000 records */ + Pmsg2(-1, _("Reading the first 10000 records from %u:%u.\n"), + dev->file, dev->block_num); + quickie_count = 0; + read_records(dcr, quickie_cb, my_mount_next_read_volume); + Pmsg4(-1, _("Reposition from %u:%u to %u:%u\n"), dev->file, dev->block_num, + last_file, last_block_num); + addr = last_file; + addr = (addr << 32) + last_block_num; + if (!dev->reposition(dcr, addr)) { + Pmsg1(-1, _("Reposition error. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + Pmsg1(-1, _("Reading block %u.\n"), last_block_num); + if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { + Pmsg1(-1, _("Error reading block: ERR=%s\n"), dev->print_errmsg()); + goto bail_out; + } + if (compare_blocks(last_block, block)) { + if (simple) { + Pmsg0(-1, _("\nThe last block on the tape matches. Test succeeded.\n\n")); + rc = true; + } else { + Pmsg0(-1, _("\nThe last block of the first tape matches.\n\n")); + } + } + if (simple) { + goto bail_out; + } + + /* restore info for last block on second Volume */ + last_block_num = last_block_num2; + last_file = last_file2; + last_block = last_block2; + + /* Multiple Volume tape */ + /* Close device so user can use autochanger if desired */ + if (dev->has_cap(CAP_OFFLINEUNMOUNT)) { + dev->offline(dcr); + } + + set_volume_name("TestVolume2", 2); + + autochanger = autoload_device(dcr, 1, NULL); + if (autochanger != 1) { + Pmsg1(100, "Autochanger returned: %d\n", autochanger); + dev->close(dcr); + get_cmd(_("Mount second tape. Press enter when ready: ")); + Pmsg0(000, "\n"); + } + + dev->clear_read(); + dcr->clear_writing(); + if (!acquire_device_for_read(dcr)) { + Pmsg1(-1, "%s", dev->print_errmsg()); + goto bail_out; + } + + /* Space to "first" block which is last block not written + * on the previous tape. + */ + Pmsg2(-1, _("Reposition from %u:%u to 0:1\n"), dev->file, dev->block_num); + addr = 1; + if (!dev->reposition(dcr, addr)) { + Pmsg1(-1, _("Reposition error. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + Pmsg1(-1, _("Reading block %d.\n"), dev->block_num); + if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { + Pmsg1(-1, _("Error reading block: ERR=%s\n"), dev->print_errmsg()); + goto bail_out; + } + if (compare_blocks(first_block, block)) { + Pmsg0(-1, _("\nThe first block on the second tape matches.\n\n")); + } + + /* Now find and compare the last block */ + Pmsg4(-1, _("Reposition from %u:%u to %u:%u\n"), dev->file, dev->block_num, + last_file, last_block_num); + addr = last_file; + addr = (addr<<32) + last_block_num; + if (!dev->reposition(dcr, addr)) { + Pmsg1(-1, _("Reposition error. ERR=%s\n"), dev->bstrerror()); + goto bail_out; + } + Pmsg1(-1, _("Reading block %d.\n"), dev->block_num); + if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { + Pmsg1(-1, _("Error reading block: ERR=%s\n"), dev->print_errmsg()); + goto bail_out; + } + if (compare_blocks(last_block, block)) { + Pmsg0(-1, _("\nThe last block on the second tape matches. Test succeeded.\n\n")); + rc = true; + } + +bail_out: + free_block(last_block1); + free_block(last_block2); + free_block(first_block); + last_block = first_block = last_block1 = last_block2 = NULL; + return rc; +} + +/* Read 10000 records then stop */ +static bool quickie_cb(DCR *dcr, DEV_RECORD *rec) +{ + DEVICE *dev = dcr->dev; + quickie_count++; + if (quickie_count == 10000) { + Pmsg2(-1, _("10000 records read now at %d:%d\n"), dev->file, dev->block_num); + } + return quickie_count < 10000; +} + +static bool compare_blocks(DEV_BLOCK *last_block, DEV_BLOCK *block) +{ + char *p, *q; + union { + uint32_t CheckSum; + uint32_t block_len; + }; + ser_declare; + + p = last_block->buf; + q = block->buf; + unser_begin(q, BLKHDR2_LENGTH); + unser_uint32(CheckSum); + unser_uint32(block_len); + while (q < (block->buf+block_len)) { + if (*p == *q) { + p++; + q++; + continue; + } + Pmsg0(-1, "\n"); + dump_block(NULL, last_block, _("Last block written")); + Pmsg0(-1, "\n"); + dump_block(NULL, block, _("Block read back")); + Pmsg1(-1, _("\n\nThe blocks differ at byte %u\n"), p - last_block->buf); + Pmsg0(-1, _("\n\n!!!! The last block written and the block\n" + "that was read back differ. The test FAILED !!!!\n" + "This must be corrected before you use Bacula\n" + "to write multi-tape Volumes.!!!!\n")); + return false; + } + if (verbose) { + dump_block(NULL, last_block, _("Last block written")); + dump_block(NULL, block, _("Block read back")); + } + return true; +} + +/* + * Write current block to tape regardless of whether or + * not it is full. If the tape fills, attempt to + * acquire another tape. + */ +static int flush_block(DEV_BLOCK *block, int dump) +{ + char ec1[50], ec2[50]; + uint64_t rate; + DEV_BLOCK *tblock; + uint32_t this_file, this_block_num; + + dev->rLock(false); + if (!this_block) { + this_block = dev->new_block(dcr); + } + if (!last_block) { + last_block = dev->new_block(dcr); + } + /* Copy block */ + this_file = dev->file; + this_block_num = dev->block_num; + if (!dcr->write_block_to_dev()) { + Pmsg3(000, _("Last block at: %u:%u this_dev_block_num=%d\n"), + last_file, last_block_num, this_block_num); + if (vol_num == 1) { + /* + * This is 1st tape, so save first tape info separate + * from second tape info + */ + last_block_num1 = last_block_num; + last_file1 = last_file; + last_block1 = dup_block(last_block); + last_block2 = dup_block(last_block); + first_block = dup_block(block); /* first block second tape */ + } + if (verbose) { + Pmsg3(000, _("Block not written: FileIndex=%d blk_block=%u Size=%u\n"), + file_index, block->BlockNumber, block->block_len); + dump_block(dev, last_block, _("Last block written")); + Pmsg0(-1, "\n"); + dump_block(dev, block, _("Block not written")); + } + if (stop == 0) { + eot_block = block->BlockNumber; + eot_block_len = block->block_len; + eot_FileIndex = file_index; + stop = 1; + } + now = time(NULL); + now -= jcr->run_time; + if (now <= 0) { + now = 1; /* don't divide by zero */ + } + rate = dev->VolCatInfo.VolCatBytes / now; + vol_size = dev->VolCatInfo.VolCatBytes; + Pmsg4(000, _("End of tape %d:%d. Volume Bytes=%s. Write rate = %sB/s\n"), + dev->file, dev->block_num, + edit_uint64_with_commas(dev->VolCatInfo.VolCatBytes, ec1), + edit_uint64_with_suffix(rate, ec2)); + + if (simple) { + stop = -1; /* stop, but do simplified test */ + } else { + /* Full test in progress */ + if (!fixup_device_block_write_error(jcr->dcr)) { + Pmsg1(000, _("Cannot fixup device error. %s\n"), dev->bstrerror()); + ok = false; + dev->Unlock(); + return 0; + } + BlockNumber = 0; /* start counting for second tape */ + } + dev->Unlock(); + return 1; /* end of tape reached */ + } + + /* Save contents after write so that the header is serialized */ + memcpy(this_block->buf, block->buf, this_block->buf_len); + + /* + * Note, we always read/write to block, but we toggle + * copying it to one or another of two allocated blocks. + * Switch blocks so that the block just successfully written is + * always in last_block. + */ + tblock = last_block; + last_block = this_block; + this_block = tblock; + last_file = this_file; + last_block_num = this_block_num; + + dev->Unlock(); + return 1; +} + + +/* + * First we label the tape, then we fill + * it with data get a new tape and write a few blocks. + */ +static void qfillcmd() +{ + DEV_BLOCK *block = dcr->block; + DEV_RECORD *rec = dcr->rec; + int i, count; + + Pmsg0(0, _("Test writing blocks of 64512 bytes to tape.\n")); + + get_cmd(_("How many blocks do you want to write? (1000): ")); + + count = atoi(cmd); + if (count <= 0) { + count = 1000; + } + + Dsm_check(200); + + i = block->buf_len - 100; + ASSERT (i > 0); + rec->data = check_pool_memory_size(rec->data, i); + memset(rec->data, i & 0xFF, i); + rec->data_len = i; + rewindcmd(); + init_speed(); + + Pmsg1(0, _("Begin writing %d Bacula blocks to tape ...\n"), count); + for (i=0; i < count; i++) { + if (i % 100 == 0) { + printf("+"); + fflush(stdout); + } + if (!write_record_to_block(dcr, rec)) { + Pmsg0(0, _("Error writing record to block.\n")); + goto bail_out; + } + if (!dcr->write_block_to_dev()) { + Pmsg0(0, _("Error writing block to device.\n")); + goto bail_out; + } + } + printf("\n"); + print_speed(dev->VolCatInfo.VolCatBytes); + weofcmd(); + if (dev->has_cap(CAP_TWOEOF)) { + weofcmd(); + } + rewindcmd(); + scan_blocks(); + +bail_out: + Dsm_check(200); +} + +/* + * Fill a tape using raw write() command + */ +static void rawfill_cmd() +{ + DEV_BLOCK *block = dcr->block; + int stat; + uint32_t block_num = 0; + uint32_t *p; + int my_errno; + + fill_buffer(FILL_RANDOM, block->buf, block->buf_len); + init_speed(); + + p = (uint32_t *)block->buf; + Pmsg1(0, _("Begin writing raw blocks of %u bytes.\n"), block->buf_len); + for ( ;; ) { + *p = block_num; + stat = dev->d_write(dev->fd(), block->buf, block->buf_len); + if (stat == (int)block->buf_len) { + if ((block_num++ % 100) == 0) { + printf("+"); + fflush(stdout); + } + + mix_buffer(FILL_RANDOM, block->buf, block->buf_len); + + jcr->JobBytes += stat; + continue; + } + break; + } + my_errno = errno; + printf("\n"); + berrno be; + printf(_("Write failed at block %u. stat=%d ERR=%s\n"), block_num, stat, + be.bstrerror(my_errno)); + + print_speed(jcr->JobBytes); + weofcmd(); +} + + + +struct cmdstruct { const char *key; void (*func)(); const char *help; }; +static struct cmdstruct commands[] = { + {NT_("autochanger"),autochangercmd, _("test autochanger")}, + {NT_("bsf"), bsfcmd, _("backspace file")}, + {NT_("bsr"), bsrcmd, _("backspace record")}, + {NT_("cap"), capcmd, _("list device capabilities")}, + {NT_("clear"), clearcmd, _("clear tape errors")}, + {NT_("eod"), eodcmd, _("go to end of Bacula data for append")}, + {NT_("eom"), eomcmd, _("go to the physical end of medium")}, + {NT_("fill"), fillcmd, _("fill tape, write onto second volume")}, + {NT_("unfill"), unfillcmd, _("read filled tape")}, + {NT_("fsf"), fsfcmd, _("forward space a file")}, + {NT_("fsr"), fsrcmd, _("forward space a record")}, + {NT_("help"), helpcmd, _("print this command")}, + {NT_("label"), labelcmd, _("write a Bacula label to the tape")}, + {NT_("load"), loadcmd, _("load a tape")}, + {NT_("quit"), quitcmd, _("quit btape")}, + {NT_("rawfill"), rawfill_cmd, _("use write() to fill tape")}, + {NT_("readlabel"), readlabelcmd, _("read and print the Bacula tape label")}, + {NT_("rectest"), rectestcmd, _("test record handling functions")}, + {NT_("rewind"), rewindcmd, _("rewind the tape")}, + {NT_("scan"), scancmd, _("read() tape block by block to EOT and report")}, + {NT_("scanblocks"),scan_blocks, _("Bacula read block by block to EOT and report")}, + {NT_("speed"), speed_test, _("[file_size=n(GB)|nb_file=3|skip_zero|skip_random|skip_raw|skip_block] report drive speed")}, + {NT_("status"), statcmd, _("print tape status")}, + {NT_("test"), testcmd, _("General test Bacula tape functions")}, + {NT_("weof"), weofcmd, _("write an EOF on the tape")}, + {NT_("wr"), wrcmd, _("write a single Bacula block")}, + {NT_("rr"), rrcmd, _("read a single record")}, + {NT_("rb"), rbcmd, _("read a single Bacula block")}, + {NT_("qfill"), qfillcmd, _("quick fill command")} + }; +#define comsize (sizeof(commands)/sizeof(struct cmdstruct)) + +static void +do_tape_cmds() +{ + unsigned int i; + bool found; + + while (!quit && get_cmd("*")) { + Dsm_check(200); + found = false; + parse_args(cmd, &args, &argc, argk, argv, MAX_CMD_ARGS); + for (i=0; i 0 && fstrsch(argk[0], commands[i].key)) { + (*commands[i].func)(); /* go execute command */ + found = true; + break; + } + if (*cmd && !found) { + Pmsg1(0, _("\"%s\" is an invalid command\n"), cmd); + } + } +} + +static void helpcmd() +{ + unsigned int i; + usage(); + printf(_("Interactive commands:\n")); + printf(_(" Command Description\n ======= ===========\n")); + for (i=0; i \n" +" -b specify bootstrap file\n" +" -c set configuration file to file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -p proceed inspite of I/O errors\n" +" -s turn off signals\n" +" -w set working directory to dir\n" +" -v be verbose\n" +" -? print this message.\n" +"\n"), 2000, "", VERSION, BDATE); + +} + +/* + * Get next input command from terminal. This + * routine is REALLY primitive, and should be enhanced + * to have correct backspacing, etc. + */ +int +get_cmd(const char *prompt) +{ + int i = 0; + int ch; + + fprintf(stdout, "%s", prompt); + + /* We really should turn off echoing and pretty this + * up a bit. + */ + cmd[i] = 0; + while ((ch = fgetc(stdin)) != EOF) { + if (ch == '\n') { + strip_trailing_junk(cmd); + return 1; + } else if (ch == 4 || ch == 0xd3 || ch == 0x8) { + if (i > 0) { + cmd[--i] = 0; + } + continue; + } + + cmd[i++] = ch; + cmd[i] = 0; + } + quit = 1; + return 0; +} + +bool BtapeAskDirHandler::dir_create_jobmedia_record(DCR *dcr, bool zero) +{ + dcr->WroteVol = false; + return 1; +} + +bool BtapeAskDirHandler::dir_find_next_appendable_volume(DCR *dcr) +{ + Dmsg1(20, "Enter dir_find_next_appendable_volume. stop=%d\n", stop); + return dcr->VolumeName[0] != 0; +} + +bool BtapeAskDirHandler::dir_ask_sysop_to_mount_volume(DCR *dcr, bool /* writing */) +{ + DEVICE *dev = dcr->dev; + Dmsg0(20, "Enter dir_ask_sysop_to_mount_volume\n"); + if (dcr->VolumeName[0] == 0) { + return dir_ask_sysop_to_create_appendable_volume(dcr); + } + Pmsg1(-1, "%s", dev->print_errmsg()); /* print reason */ + if (dcr->VolumeName[0] == 0 || strcmp(dcr->VolumeName, "TestVolume2") == 0) { + fprintf(stderr, _("Mount second Volume on device %s and press return when ready: "), + dev->print_name()); + } else { + fprintf(stderr, _("Mount Volume \"%s\" on device %s and press return when ready: "), + dcr->VolumeName, dev->print_name()); + } + dev->close(dcr); + getchar(); + return true; +} + +bool BtapeAskDirHandler::dir_ask_sysop_to_create_appendable_volume(DCR *dcr) +{ + int autochanger; + DEVICE *dev = dcr->dev; + Dmsg0(20, "Enter dir_ask_sysop_to_create_appendable_volume\n"); + if (stop == 0) { + set_volume_name("TestVolume1", 1); + } else { + set_volume_name("TestVolume2", 2); + } + /* Close device so user can use autochanger if desired */ + if (dev->has_cap(CAP_OFFLINEUNMOUNT)) { + dev->offline(dcr); + } + autochanger = autoload_device(dcr, 1, NULL); + if (autochanger != 1) { + Pmsg1(100, "Autochanger returned: %d\n", autochanger); + fprintf(stderr, _("Mount blank Volume on device %s and press return when ready: "), + dev->print_name()); + dev->close(dcr); + getchar(); + Pmsg0(000, "\n"); + } + labelcmd(); + VolumeName = NULL; + BlockNumber = 0; + return true; +} + +static bool my_mount_next_read_volume(DCR *dcr) +{ + char ec1[50], ec2[50]; + uint64_t rate; + JCR *jcr = dcr->jcr; + DEV_BLOCK *block = dcr->block; + + Dmsg0(20, "Enter my_mount_next_read_volume\n"); + Pmsg2(000, _("End of Volume \"%s\" %d records.\n"), dcr->VolumeName, + quickie_count); + + volume_unused(dcr); /* release current volume */ + if (LastBlock != block->BlockNumber) { + VolBytes += block->block_len; + } + LastBlock = block->BlockNumber; + now = time(NULL); + now -= jcr->run_time; + if (now <= 0) { + now = 1; + } + rate = VolBytes / now; + Pmsg3(-1, _("Read block=%u, VolBytes=%s rate=%sB/s\n"), block->BlockNumber, + edit_uint64_with_commas(VolBytes, ec1), + edit_uint64_with_suffix(rate, ec2)); + + if (strcmp(dcr->VolumeName, "TestVolume2") == 0) { + end_of_tape = 1; + return false; + } + + set_volume_name("TestVolume2", 2); + + dev->close(dcr); + if (!acquire_device_for_read(dcr)) { + Pmsg2(0, _("Cannot open Dev=%s, Vol=%s\n"), dev->print_name(), dcr->VolumeName); + return false; + } + return true; /* next volume mounted */ +} + +static void set_volume_name(const char *VolName, int volnum) +{ + DCR *dcr = jcr->dcr; + VolumeName = VolName; + vol_num = volnum; + dev->setVolCatName(VolName); + dcr->setVolCatName(VolName); + bstrncpy(dcr->VolumeName, VolName, sizeof(dcr->VolumeName)); + dcr->VolCatInfo.Slot = volnum; + dcr->VolCatInfo.InChanger = true; +} diff --git a/src/stored/butil.c b/src/stored/butil.c new file mode 100644 index 00000000..c59289f6 --- /dev/null +++ b/src/stored/butil.c @@ -0,0 +1,317 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Utility routines for "tool" programs such as bscan, bls, + * bextract, ... Some routines also used by Bacula. + * + * Kern Sibbald, MM + * + * Normally nothing in this file is called by the Storage + * daemon because we interact more directly with the user + * i.e. printf, ... + */ + +#include "bacula.h" +#include "stored.h" + +/* Forward referenced functions */ +static DCR *setup_to_access_device(JCR *jcr, char *dev_name, + const char *VolumeName, bool writing, bool read_dedup_data); +static DEVRES *find_device_res(char *device_name, bool writing); +static void my_free_jcr(JCR *jcr); + +/* Imported variables -- eliminate some day */ +extern char *configfile; + +#ifdef DEBUG +char *rec_state_bits_to_str(DEV_RECORD *rec) +{ + static char buf[200]; + buf[0] = 0; + if (rec->state_bits & REC_NO_HEADER) { + strcat(buf, _("Nohdr,")); + } + if (is_partial_record(rec)) { + strcat(buf, _("partial,")); + } + if (rec->state_bits & REC_BLOCK_EMPTY) { + strcat(buf, _("empty,")); + } + if (rec->state_bits & REC_NO_MATCH) { + strcat(buf, _("Nomatch,")); + } + if (rec->state_bits & REC_CONTINUATION) { + strcat(buf, _("cont,")); + } + if (buf[0]) { + buf[strlen(buf)-1] = 0; + } + return buf; +} +#endif + +/* + * Setup a pointer to my resource me + */ +void setup_me() +{ + LockRes(); + me = (STORES *)GetNextRes(R_STORAGE, NULL); + if (!me) { + UnlockRes(); + Emsg1(M_ERROR_TERM, 0, _("No Storage resource defined in %s. Cannot continue.\n"), + configfile); + } + UnlockRes(); +} + +/* + * Setup a "daemon" JCR for the various standalone + * tools (e.g. bls, bextract, bscan, ...) + */ +JCR *setup_jcr(const char *name, char *dev_name, BSR *bsr, + const char *VolumeName, bool writing, bool read_dedup_data) +{ + DCR *dcr; + JCR *jcr = new_jcr(sizeof(JCR), my_free_jcr); + jcr->bsr = bsr; + jcr->VolSessionId = 1; + jcr->VolSessionTime = (uint32_t)time(NULL); + jcr->NumReadVolumes = 0; + jcr->NumWriteVolumes = 0; + jcr->JobId = 0; + jcr->setJobType(JT_CONSOLE); + jcr->setJobLevel(L_FULL); + jcr->JobStatus = JS_Terminated; + jcr->where = bstrdup(""); + jcr->job_name = get_pool_memory(PM_FNAME); + pm_strcpy(jcr->job_name, "Dummy.Job.Name"); + jcr->client_name = get_pool_memory(PM_FNAME); + pm_strcpy(jcr->client_name, "Dummy.Client.Name"); + bstrncpy(jcr->Job, name, sizeof(jcr->Job)); + jcr->fileset_name = get_pool_memory(PM_FNAME); + pm_strcpy(jcr->fileset_name, "Dummy.fileset.name"); + jcr->fileset_md5 = get_pool_memory(PM_FNAME); + pm_strcpy(jcr->fileset_md5, "Dummy.fileset.md5"); + init_autochangers(); + create_volume_lists(); + + dcr = setup_to_access_device(jcr, dev_name, VolumeName, writing, read_dedup_data); + if (!dcr) { + return NULL; + } + if (!bsr && VolumeName) { + bstrncpy(dcr->VolumeName, VolumeName, sizeof(dcr->VolumeName)); + } + bstrncpy(dcr->pool_name, "Default", sizeof(dcr->pool_name)); + bstrncpy(dcr->pool_type, "Backup", sizeof(dcr->pool_type)); + return jcr; +} + +/* + * Setup device, jcr, and prepare to access device. + * If the caller wants read access, acquire the device, otherwise, + * the caller will do it. + */ +static DCR *setup_to_access_device(JCR *jcr, char *dev_name, + const char *VolumeName, bool writing, bool read_dedup_data) +{ + DEVICE *dev; + char *p; + DEVRES *device; + DCR *dcr; + char VolName[MAX_NAME_LENGTH]; + + init_reservations_lock(); + + /* + * If no volume name already given and no bsr, and it is a file, + * try getting name from Filename + */ + if (VolumeName) { + bstrncpy(VolName, VolumeName, sizeof(VolName)); + if (strlen(VolumeName) >= MAX_NAME_LENGTH) { + Jmsg0(jcr, M_ERROR, 0, _("Volume name or names is too long. Please use a .bsr file.\n")); + } + } else { + VolName[0] = 0; + } + if (!jcr->bsr && VolName[0] == 0) { + if (strncmp(dev_name, "/dev/", 5) != 0) { + /* Try stripping file part */ + p = dev_name + strlen(dev_name); + + while (p >= dev_name && !IsPathSeparator(*p)) + p--; + if (IsPathSeparator(*p)) { + bstrncpy(VolName, p+1, sizeof(VolName)); + *p = 0; + } + } + } + + if ((device=find_device_res(dev_name, writing)) == NULL) { + Jmsg2(jcr, M_FATAL, 0, _("Cannot find device \"%s\" in config file %s.\n"), + dev_name, configfile); + return NULL; + } + + dev = init_dev(jcr, device); + if (!dev) { + Jmsg1(jcr, M_FATAL, 0, _("Cannot init device %s\n"), dev_name); + return NULL; + } + device->dev = dev; + jcr->dcr = dcr = new_dcr(jcr, NULL, dev, writing); + if (VolName[0]) { + bstrncpy(dcr->VolumeName, VolName, sizeof(dcr->VolumeName)); + } + bstrncpy(dcr->dev_name, device->device_name, sizeof(dcr->dev_name)); + + create_restore_volume_list(jcr, true); + + if (!writing) { /* read only access? */ + Dmsg0(100, "Acquire device for read\n"); + if (!acquire_device_for_read(dcr)) { + return NULL; + } + jcr->read_dcr = dcr; + } else { + if (!first_open_device(dcr)) { + Jmsg1(jcr, M_FATAL, 0, _("Cannot open %s\n"), dev->print_name()); + return NULL; + } + jcr->dcr = dcr; /* write dcr */ + } + return dcr; +} + + +/* + * Called here when freeing JCR so that we can get rid + * of "daemon" specific memory allocated. + */ +static void my_free_jcr(JCR *jcr) +{ + if (jcr->job_name) { + free_pool_memory(jcr->job_name); + jcr->job_name = NULL; + } + if (jcr->client_name) { + free_pool_memory(jcr->client_name); + jcr->client_name = NULL; + } + if (jcr->fileset_name) { + free_pool_memory(jcr->fileset_name); + jcr->fileset_name = NULL; + } + if (jcr->fileset_md5) { + free_pool_memory(jcr->fileset_md5); + jcr->fileset_md5 = NULL; + } + if (jcr->comment) { + free_pool_memory(jcr->comment); + jcr->comment = NULL; + } + if (jcr->VolList) { + free_restore_volume_list(jcr); + } + if (jcr->dcr) { + free_dcr(jcr->dcr); + jcr->dcr = NULL; + } + return; +} + + +/* + * Search for device resource that corresponds to + * device name on command line (or default). + * + * Returns: NULL on failure + * Device resource pointer on success + */ +static DEVRES *find_device_res(char *device_name, bool write_access) +{ + bool found = false; + DEVRES *device; + + Dmsg0(900, "Enter find_device_res\n"); + LockRes(); + foreach_res(device, R_DEVICE) { + Dmsg2(900, "Compare %s and %s\n", device->device_name, device_name); + if (strcmp(device->device_name, device_name) == 0) { + found = true; + break; + } + } + if (!found) { + /* Search for name of Device resource rather than archive name */ + if (device_name[0] == '"') { + int len = strlen(device_name); + bstrncpy(device_name, device_name+1, len+1); + len--; + if (len > 0) { + device_name[len-1] = 0; /* zap trailing " */ + } + } + foreach_res(device, R_DEVICE) { + Dmsg2(900, "Compare %s and %s\n", device->hdr.name, device_name); + if (strcmp(device->hdr.name, device_name) == 0) { + found = true; + break; + } + } + } + UnlockRes(); + if (!found) { + Pmsg2(0, _("Could not find device \"%s\" in config file %s.\n"), device_name, + configfile); + return NULL; + } + if (write_access) { + Pmsg1(0, _("Using device: \"%s\" for writing.\n"), device_name); + } else { + Pmsg1(0, _("Using device: \"%s\" for reading.\n"), device_name); + } + return device; +} + + +/* + * Device got an error, attempt to analyse it + */ +void display_tape_error_status(JCR *jcr, DEVICE *dev) +{ + uint32_t status; + + status = status_dev(dev); + Dmsg1(20, "Device status: %x\n", status); + if (status & BMT_EOD) + Jmsg(jcr, M_ERROR, 0, _("Unexpected End of Data\n")); + else if (status & BMT_EOT) + Jmsg(jcr, M_ERROR, 0, _("Unexpected End of Tape\n")); + else if (status & BMT_EOF) + Jmsg(jcr, M_ERROR, 0, _("Unexpected End of File\n")); + else if (status & BMT_DR_OPEN) + Jmsg(jcr, M_ERROR, 0, _("Tape Door is Open\n")); + else if (!(status & BMT_ONLINE)) + Jmsg(jcr, M_ERROR, 0, _("Unexpected Tape is Off-line\n")); +} diff --git a/src/stored/cloud_dev.c b/src/stored/cloud_dev.c new file mode 100644 index 00000000..970f6d4f --- /dev/null +++ b/src/stored/cloud_dev.c @@ -0,0 +1,2301 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Generic routines for creating Cloud compatibile Volumes. + * NOTE!!! This cloud device is not compatible with + * any disk-changer script for changing Volumes. + * It does however work with Bacula Virtual autochangers. + * + * Written by Kern Sibbald, May MMXVI + * + */ + +#include "bacula.h" +#include "stored.h" +#include "cloud_dev.h" +#include "s3_driver.h" +#include "file_driver.h" +#include "cloud_parts.h" +#include "math.h" + +static const int dbglvl = 450; + +#define ASYNC_TRANSFER 1 + +/* Debug only: Enable to introduce random transfer delays*/ +/* #define RANDOM_WAIT_ENABLE*/ +#define RANDOM_WAIT_MIN 2 /* minimum delay time*/ +#define RANDOM_WAIT_MAX 12 /* maxinum delay time*/ + +#define XFER_TMP_NAME "xfer" +#include + +#if defined(HAVE_WIN32) + #define lseek _lseeki64 +#endif + +/* standard dcr cancel callback function */ +bool DCR_cancel_cb(void* arg) +{ + DCR *dcr = (DCR*)arg; + if (dcr && dcr->jcr) { + return dcr->jcr->is_canceled(); + } + return false; +} + +#ifdef __cplusplus +extern "C" { +#endif + +DEVICE *BaculaSDdriver(JCR *jcr, DEVRES *device) +{ + DEVICE *dev; + if (!device->cloud) { + Jmsg0(jcr, M_FATAL, 0, _("A Cloud resource is required for the Cloud driver, but is missing.\n")); + return NULL; + } + dev = New(cloud_dev(jcr, device)); + dev->capabilities |= CAP_LSEEK; + return dev; +} + +#ifdef __cplusplus +} +#endif + +transfer_manager cloud_dev::download_mgr(transfer_manager(0)); +transfer_manager cloud_dev::upload_mgr(transfer_manager(0)); + +/* Imported functions */ +const char *mode_to_str(int mode); +int breaddir(DIR *dirp, POOLMEM *&dname); + +/* Forward referenced functions */ +bool makedir(JCR *jcr, char *path, mode_t mode); + +/* Const and Static definitions */ + +/* Address manipulations: + * + * The current idea is internally use part and offset-in-part + * However for sending back JobMedia, we need to use + * get_full_addr() which puts the file in the top 20 bits. + */ + +static boffset_t get_offset(boffset_t ls_offset) +{ + boffset_t pos = ls_offset & off_mask; + return pos; +} + +static boffset_t make_addr(uint32_t my_part, boffset_t my_offset) +{ + return (boffset_t)(((uint64_t)my_part)<m_volume_name) && (upart == t->m_part)) { + return t; + } + } + return NULL; +} + +/* + * This upload_engine is called by workq in a worker thread. + */ +void *upload_engine(transfer *tpkt) +{ +#ifdef RANDOM_WAIT_ENABLE + srand(time(NULL)); + /* wait between 2 and 12 seconds */ + int s_time = RANDOM_WAIT_MIN + rand() % (RANDOM_WAIT_MAX-RANDOM_WAIT_MIN); + bmicrosleep(s_time, 0); +#endif + if (tpkt && tpkt->m_driver) { + /* call the driver method async */ + Dmsg4(dbglvl, "Upload start %s-%d JobId : %d driver :%p\n", + tpkt->m_volume_name, tpkt->m_part, tpkt->m_dcr->jcr->JobId, tpkt->m_driver); + if (!tpkt->m_driver->copy_cache_part_to_cloud(tpkt)) { + /* Error message already sent by Qmsg() */ + Dmsg4(dbglvl, "Upload error!! JobId=%d part=%d Vol=%s cache=%s\n", + tpkt->m_dcr->jcr->JobId, tpkt->m_part, tpkt->m_volume_name, tpkt->m_cache_fname); + POOL_MEM dmsg(PM_MESSAGE); + tpkt->append_status(dmsg); + Dmsg1(dbglvl, "%s\n",dmsg.c_str()); + return tpkt; + } + Dmsg2(dbglvl, "Upload end JobId : %d driver :%p\n", + tpkt->m_dcr->jcr->JobId, tpkt->m_driver); + + if (tpkt->m_do_cache_truncate && tpkt->m_part!=1) { + if (unlink(tpkt->m_cache_fname) != 0) { + berrno be; + Dmsg2(dbglvl, "Truncate cache option after upload. Unable to delete %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror()); + } else { + Dmsg1(dbglvl, "Truncate cache option after upload. Unlink file %s\n", tpkt->m_cache_fname); + } + } + } + return NULL; +} + +/* + * This download_engine is called by workq in a worker thread. + */ +void *download_engine(transfer *tpkt) +{ +#ifdef RANDOM_WAIT_ENABLE + srand(time(NULL)); + /* wait between 2 and 12 seconds */ + int s_time = RANDOM_WAIT_MIN + rand() % (RANDOM_WAIT_MAX-RANDOM_WAIT_MIN); + bmicrosleep(s_time, 0); +#endif + if (tpkt && tpkt->m_driver) { + /* call the driver method async */ + Dmsg4(dbglvl, "Download starts %s-%d : job : %d driver :%p\n", + tpkt->m_volume_name, tpkt->m_part, tpkt->m_dcr->jcr->JobId, tpkt->m_driver); + if (!tpkt->m_driver->copy_cloud_part_to_cache(tpkt)) { + Dmsg4(dbglvl, "Download error!! JobId=%d part=%d Vol=%s cache=%s\n", + tpkt->m_dcr->jcr->JobId, tpkt->m_part, tpkt->m_volume_name, tpkt->m_cache_fname); + POOL_MEM dmsg(PM_MESSAGE); + tpkt->append_status(dmsg); + Dmsg1(dbglvl, "%s\n",dmsg.c_str()); + + /* download failed -> remove the temp xfer file */ + if (unlink(tpkt->m_cache_fname) != 0) { + berrno be; + Dmsg2(dbglvl, "Unable to delete %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror()); + } else { + Dmsg1(dbglvl, "Unlink file %s\n", tpkt->m_cache_fname); + } + + return tpkt; + } + else { + POOLMEM *cache_fname = get_pool_memory(PM_FNAME); + pm_strcpy(cache_fname, tpkt->m_cache_fname); + char *p = strstr(cache_fname, XFER_TMP_NAME); + char partnumber[20]; + bsnprintf(partnumber, sizeof(partnumber), "part.%d", tpkt->m_part); + strcpy(p,partnumber); + if (rename(tpkt->m_cache_fname, cache_fname) != 0) { + Dmsg5(dbglvl, "Download copy error!! JobId=%d part=%d Vol=%s temp cache=%s cache=%s\n", + tpkt->m_dcr->jcr->JobId, tpkt->m_part, tpkt->m_volume_name, tpkt->m_cache_fname, cache_fname); + free_pool_memory(cache_fname); + return tpkt; + } + free_pool_memory(cache_fname); + } + Dmsg2(dbglvl, "Download end JobId : %d driver :%p\n", + tpkt->m_dcr->jcr->JobId, tpkt->m_driver); + } + return NULL; +} + +/* + * Upload the given part to the cloud + */ +bool cloud_dev::upload_part_to_cloud(DCR *dcr, const char *VolumeName, uint32_t upart) +{ + if (upload_opt == UPLOAD_NO) { + /* lets pretend everything is OK */ + return true; + } + bool ret=false; + if (upart == 0 || get_list_transfer(dcr->uploads, VolumeName, upart)) { + return ret; + } + + uint64_t file_size=0; + POOLMEM *cache_fname = get_pool_memory(PM_FNAME); + make_cache_filename(cache_fname, VolumeName, upart); + + /* part is valid and no upload for the same part is scheduled */ + if (!upload_mgr.find(VolumeName,upart)) { + Enter(dbglvl); + + /* statistics require the size to transfer */ + struct stat statbuf; + if (lstat(cache_fname, &statbuf) < 0) { + berrno be; + Mmsg2(errmsg, "Failed to find cache part file %s. ERR=%s\n", + cache_fname, be.bstrerror()); + Dmsg1(dbglvl, "%s", errmsg); + free_pool_memory(cache_fname); + return false; + } + file_size = statbuf.st_size; + + /* Nothing to do with this empty part */ + if (file_size == 0) { + free_pool_memory(cache_fname); + return true; /* consider the transfer OK */ + } + + ret=true; + } + + Dmsg1(dbglvl, "upload_part_to_cloud: %s\n", cache_fname); + /* get_xfer either returns a new transfer or a similar one if it already exists. + * in this case, the transfer is shared and ref_count is incremented. The caller should only care to release() + * the transfer eventually. The transfer_mgr is in charge of deleting the transfer when no one shares it anymore*/ + transfer *item = upload_mgr.get_xfer(file_size, + upload_engine, + cache_fname,/* cache_fname is duplicated in the transfer constructor*/ + VolumeName, /* VolumeName is duplicated in the transfer constructor*/ + upart, + driver, + dcr, + cloud_prox); + dcr->uploads->append(item); + /* transfer are queued manually, so the caller has control on when the transfer is scheduled + * this should come handy for upload_opt */ + item->set_do_cache_truncate(trunc_opt == TRUNC_AFTER_UPLOAD); + if (upload_opt == UPLOAD_EACHPART) { + /* in each part upload option, queue right away */ + item->queue(); + } + free_pool_memory(cache_fname); + + if (ret) { + /* Update the Media information */ + if (upart >= VolCatInfo.VolCatParts) { + VolCatInfo.VolCatParts = upart; + VolCatInfo.VolLastPartBytes = file_size; + } + /* We do not call dir_update_volume_info() because the part is not yet + * uploaded, but we may call it to update VolCatParts or VolLastPartBytes. + */ + } + + return ret; +} + +/* Small helper to get */ +static int64_t part_get_size(ilist *cachep, int index) +{ + int64_t ret=0; + if (index <= cachep->last_index()) { + cloud_part *p = (cloud_part*) cachep->get(index); + if (p) { + ret = p->size; + } + } + return ret; +} + +/* + * Download the part_idx part to the cloud. The result is store in the DCR context + * The caller should use free_transfer() + */ +transfer *cloud_dev::download_part_to_cache(DCR *dcr, const char *VolumeName, uint32_t dpart) +{ + if (dpart == 0) { + return NULL; + } + + /* if item's already in the dcr list, it's already in the download_mgr, we don't need any duplication*/ + transfer *item = get_list_transfer(dcr->downloads, VolumeName, dpart); + if (!item) { + POOLMEM *cache_fname = get_pool_memory(PM_FNAME); + pm_strcpy(cache_fname, dev_name); + /* create a uniq xfer file name with XFER_TMP_NAME and the pid */ + char xferbuf[32]; + bsnprintf(xferbuf, sizeof(xferbuf), "%s_%d", XFER_TMP_NAME, (int)getpid()); + add_vol_and_part(cache_fname, VolumeName, xferbuf, dpart); + + /* use the cloud proxy to retrieve the transfer size */ + uint64_t cloud_size = cloud_prox->get_size(VolumeName, dpart); + + /* check if the part is already in the cache and if it's bigger or equal to the cloud conterpart*/ + ilist cachep; + if (!get_cache_volume_parts_list(dcr, getVolCatName(), &cachep)) { + free_pool_memory(cache_fname); + return NULL; + } + uint64_t cache_size = part_get_size(&cachep, dpart); + + Dmsg3(dbglvl, "download_part_to_cache: %s. cache_size=%d cloud_size=%d\n", cache_fname, cache_size, cloud_size); + + if (cache_size >= cloud_size) { + /* We could/should use mtime */ + /* cache is "better" than cloud, no need to download */ + Dmsg2(dbglvl, "part %ld is up-to-date in the cache %lld\n", (int32_t)dpart, cache_size); + free_pool_memory(cache_fname); + return NULL; + } + + /* Unlikely, but still possible : the xfer cache file already exists */ + struct stat statbuf; + if (lstat(cache_fname, &statbuf) == 0) { + Dmsg1(dbglvl, "download_part_to_cache: %s already exists: remove it.", cache_fname); + if (unlink(cache_fname) < 0) { + berrno be; + Dmsg2(dbglvl, "download_part_to_cache: failed to remove file %s. ERR: %s\n",cache_fname, be.bstrerror()); + } else { + Dmsg1(dbglvl, "=== unlinked: %s\n", cache_fname); + } + } + + /* get_xfer either returns a new transfer or a similar one if it already exists. + * in this case, the transfer is shared and ref_count is incremented. The caller should only care to release() + * the transfer eventually. The transfer_mgr is in charge of deleting the transfer when no one shares it anymore*/ + item = download_mgr.get_xfer(cloud_size, + download_engine, + cache_fname,/* cache_fname is duplicated in the transfer constructor*/ + VolumeName, /* VolumeName is duplicated in the transfer constructor*/ + dpart, + driver, + dcr, + NULL); // no proxy on download to cache + dcr->downloads->append(item); + /* transfer are queued manually, so the caller has control on when the transfer is scheduled */ + item->queue(); + + free_pool_memory(cache_fname); + } + return item; +} + +/* + * Note, we might want to make a new download_first_part_to_read() + * where it waits for the first part, then this routine + * can simply start the other downloads that will be needed, and + * we can wait for them in each new open(). + */ +bool cloud_dev::download_parts_to_read(DCR *dcr, alist* parts) +{ + intptr_t part; + transfer *part_1=NULL, *item; + ilist cachep; + int64_t size; + + /* Find and download any missing parts for read */ + if (!driver) { + return false; + } + + if (!get_cache_volume_parts_list(dcr, getVolCatName(), &cachep)) { + return false; + } + + foreach_alist(part, parts) { + /* TODO: get_cache_sizes is called before; should be an argument */ + size = part_get_size(&cachep, part); + if (size == 0) { + item = download_part_to_cache(dcr, getVolCatName(), (int32_t)part); + if (part == 1) { + part_1 = item; /* Keep it, we continue only if the part1 is downloaded */ + } + } else { + Dmsg2(dbglvl, "part %ld is already in the cache %lld\n", (int32_t)part, size); + } + } + + /* wait for the part.1 */ + if (part_1) { + wait_end_of_transfer(dcr, part_1); + } + return true; +} + +uint32_t cloud_dev::get_part(boffset_t ls_offset) +{ + return (uint32_t)(ls_offset>>off_bits); +} + +DEVICE *cloud_dev::get_dev(DCR */*dcr*/) +{ + return this; +} + +uint32_t cloud_dev::get_hi_addr() +{ + return (uint32_t)(file_addr >> 32); +} + +uint32_t cloud_dev::get_low_addr() +{ + return (uint32_t)(file_addr); +} + +uint64_t cloud_dev::get_full_addr() +{ + uint64_t pos; + pos = make_addr(part, get_offset(file_addr)); + return pos; +} + +uint64_t cloud_dev::get_full_addr(boffset_t addr) +{ + uint64_t pos; + pos = make_addr(part, get_offset(addr)); + return pos; +} + + + +#ifdef is_loadable_driver +/* Main entry point when loaded */ +extern "C" cloud_dev *BaculaSDdriver(JCR *jcr, DEVRES *device) +{ + Enter(dbglvl); + cloud_dev *dev = New(cloud_dev); + return dev; +} +#endif + +#if 0 +static transfer* find_transfer(DCR *dcr, const char *VolumeName, uint32_t part) +{ + transfer *item; + foreach_alist(item, dcr->transfers) { + if (part == item->m_part && strcmp(item->m_volume_name, VolumeName) == 0) { + return item; + } + } + return NULL; +} +#endif + +/* + * Make a list of cache sizes and count num_cache_parts + */ +bool cloud_dev::get_cache_sizes(DCR *dcr, const char *VolumeName) +{ + DIR* dp = NULL; + struct dirent *entry = NULL; + struct stat statbuf; + int name_max; + POOLMEM *vol_dir = get_pool_memory(PM_NAME); + POOLMEM *fname = get_pool_memory(PM_NAME); + uint32_t cpart; + bool ok = false; + + POOL_MEM dname(PM_FNAME); + int status = 0; + + /* + * **FIXME**** do this only once for each Volume. Currently, + * it is done for each part that is opened. + * NB : this should be substituted with get_cache_volume_parts_list + */ + Enter(dbglvl); + max_cache_size = 100; + if (cache_sizes) { + free(cache_sizes); + } + cache_sizes = (uint64_t *)malloc(max_cache_size * sizeof(uint64_t)); + memset(cache_sizes, 0, max_cache_size * sizeof(uint64_t)); + num_cache_parts = 0; + max_cache_part = 0; + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + make_cache_volume_name(vol_dir, VolumeName); + if (!(dp = opendir(vol_dir))) { + berrno be; + Mmsg2(errmsg, "Cannot opendir to get cache sizes. Volume=%s does not exist. ERR=%s\n", + vol_dir, be.bstrerror()); + Dmsg1(dbglvl, "%s", errmsg); + goto get_out; + } + + entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); + + for ( ;; ) { + if (dcr->jcr->is_canceled()) { + goto get_out; + } + errno = 0; + status = breaddir(dp, dname.addr()); + if (status == -1) { + break; + } else if (status > 0) { + Mmsg1(errmsg, "breaddir failed: ERR=%s", status); + Dmsg1(dbglvl, "%s\n", errmsg); + goto get_out; + } + /* Always ignore . and .. */ + if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { + continue; + } + + /* Look only for part files */ + if (strncmp("part.", dname.c_str(), 5) != 0) { + continue; + } + + /* Get size of part */ + Mmsg(fname, "%s/%s", vol_dir, dname.c_str()); + if (lstat(fname, &statbuf) == -1) { + berrno be; + Mmsg2(errmsg, "Failed to stat file %s: %s\n", fname, be.bstrerror()); + Dmsg1(dbglvl, "%s\n", errmsg); + goto get_out; + } + + cpart = (int)str_to_int64((char *)&(dname.c_str()[5])); + Dmsg2(dbglvl, "part=%d file=%s\n", cpart, dname.c_str()); + if (cpart > max_cache_part) { + max_cache_part = cpart; + } + if (cpart >= max_cache_size) { + max_cache_size = cpart + 100; + cache_sizes = (uint64_t *)realloc(cache_sizes, max_cache_size * sizeof(uint64_t)); + for (int i=cpart; i<(int)max_cache_size; i++) cache_sizes[i] = 0; + } + num_cache_parts++; + cache_sizes[cpart] = (uint64_t)statbuf.st_size; + Dmsg2(dbglvl, "found part=%d size=%llu\n", cpart, cache_sizes[cpart]); + } + + if (chk_dbglvl(dbglvl)) { + Pmsg1(0, "Cache objects Vol=%s:\n", VolumeName); + for (int i=1; i <= (int)max_cache_part; i++) { + Pmsg2(0, " part num=%d size=%llu\n", i, cache_sizes[i]); + } + Pmsg2(0, "End cache obj list: nparts=%d max_cache_part=%d\n", + num_cache_parts, max_cache_part); + } + ok = true; + +get_out: + if (dp) { + closedir(dp); + } + if (entry) { + free(entry); + } + free_pool_memory(vol_dir); + free_pool_memory(fname); + return ok; +} + + +/* Utility routines */ + +void cloud_dev::add_vol_and_part(POOLMEM *&filename, + const char *VolumeName, const char *name, uint32_t apart) +{ + Enter(dbglvl); + char partnumber[20]; + int len = strlen(filename); + + if (len > 0 && !IsPathSeparator((filename)[len-1])) { + pm_strcat(filename, "/"); + } + + pm_strcat(filename, VolumeName); + bsnprintf(partnumber, sizeof(partnumber), "/%s.%d", name, apart); + pm_strcat(filename, partnumber); +} + +void cloud_dev::make_cache_filename(POOLMEM *&filename, + const char *VolumeName, uint32_t upart) +{ + Enter(dbglvl); + + pm_strcpy(filename, dev_name); + add_vol_and_part(filename, VolumeName, "part", upart); +} + +void cloud_dev::make_cache_volume_name(POOLMEM *&volname, + const char *VolumeName) +{ + Enter(dbglvl); + POOL_MEM archive_name(PM_FNAME); + + pm_strcpy(archive_name, dev_name); + if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) { + pm_strcat(archive_name, "/"); + } + pm_strcat(archive_name, VolumeName); + pm_strcpy(volname, archive_name.c_str()); +} + +/* + * DEVICE virtual functions that we redefine. + */ +cloud_dev::~cloud_dev() +{ + Enter(dbglvl); + + cloud_prox->release(); + + if (cache_sizes) { + free(cache_sizes); + cache_sizes = NULL; + } + if (driver) { + driver->term(NULL); + delete driver; + driver = NULL; + } + if (m_fd != -1) { + d_close(m_fd); + m_fd = -1; + } +} + +cloud_dev::cloud_dev(JCR *jcr, DEVRES *device) +{ + Enter(dbglvl); + m_fd = -1; + capabilities |= CAP_LSEEK; + + /* Initialize Cloud driver */ + if (!driver) { + switch (device->cloud->driver_type) { +#ifdef HAVE_LIBS3 + case C_S3_DRIVER: + driver = New(s3_driver); + break; +#endif + case C_FILE_DRIVER: + driver = New(file_driver); + break; + default: + break; + } + if (!driver) { + Qmsg2(jcr, M_FATAL, 0, _("Could not open Cloud driver type=%d for Device=%s.\n"), + device->cloud->driver_type, device->hdr.name); + return; + } + /* Make local copy in device */ + if (device->cloud->upload_limit) { + driver->upload_limit.set_bwlimit(device->cloud->upload_limit); + } + + if (device->cloud->download_limit) { + driver->download_limit.set_bwlimit(device->cloud->download_limit); + } + + trunc_opt = device->cloud->trunc_opt; + upload_opt = device->cloud->upload_opt; + Dmsg2(dbglvl, "Trunc_opt=%d upload_opt=%d\n", trunc_opt, upload_opt); + if (device->cloud->max_concurrent_uploads) { + upload_mgr.m_wq.max_workers = device->cloud->max_concurrent_uploads; + } + if (device->cloud->max_concurrent_downloads) { + download_mgr.m_wq.max_workers = device->cloud->max_concurrent_downloads; + } + + /* Initialize the driver */ + driver->init(jcr, this, device); + } + + /* the cloud proxy owns its cloud_parts, so we can 'set and forget' them */ + cloud_prox = cloud_proxy::get_instance(); + +} + +/* + * DEVICE virtuals that we redefine. + */ + +static const char *seek_where(int whence) +{ + switch (whence) { + case SEEK_SET: + return "SEEK_SET"; + case SEEK_CUR: + return "SEEK_CUR"; + case SEEK_END: + return "SEEK_END"; + default: + return "UNKNOWN"; + } +} + + +/* + * Note, we can enter with a full address containing a part number + * and an offset or with an offset. If the part number is zero + * at entry, we use the current part. + * + * This routine always returns a full address (part, offset). + * + */ +boffset_t cloud_dev::lseek(DCR *dcr, boffset_t ls_offset, int whence) +{ + boffset_t pos; + uint32_t new_part; + boffset_t new_offset; + char ed1[50]; + + if (!dcr) { /* can be NULL when called from rewind(NULL) */ + return -1; + } + + /* Convert input ls_offset into part and off */ + if (ls_offset < 0) { + return -1; + } + new_part = get_part(ls_offset); + new_offset = get_offset(ls_offset); + if (new_part == 0) { + new_part = part; + if (new_part == 0) { + new_part = 1; + } + } + Dmsg6(dbglvl, "lseek(%d, %s, %s) part=%d nparts=%d off=%lld\n", + m_fd, print_addr(ed1, sizeof(ed1), ls_offset), seek_where(whence), part, num_cache_parts, new_offset); + if (whence != SEEK_CUR && new_part != part) { + Dmsg2(dbglvl, "new_part=%d part=%d call close_part()\n", new_part, part); + close_part(dcr); + part = new_part; + Dmsg0(dbglvl, "now open_device()\n"); + if (!open_device(dcr, openmode)) { + return -1; + } + ASSERT2(part==new_part, "Big problem part!=new_partn"); + } + + switch (whence) { + case SEEK_SET: + /* We are staying in the current part, just seek */ + pos = ::lseek(m_fd, new_offset, SEEK_SET); + if (pos < 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + Dmsg1(000, "Seek error. ERR=%s\n", errmsg); + return pos; + } + Dmsg4(dbglvl, "lseek_set part=%d pos=%s fd=%d offset=%lld\n", + part, print_addr(ed1, sizeof(ed1), pos), m_fd, new_offset); + return get_full_addr(pos); + + case SEEK_CUR: + pos = ::lseek(m_fd, 0, SEEK_CUR); + if (pos < 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + Dmsg1(000, "Seek error. ERR=%s\n", errmsg); + return pos; + } + Dmsg4(dbglvl, "lseek %s fd=%d offset=%lld whence=%s\n", + print_addr(ed1, sizeof(ed1)), m_fd, new_offset, seek_where(whence)); + return get_full_addr(pos); + + case SEEK_END: + /* + * Bacula does not use offsets for SEEK_END + * Also, Bacula uses seek_end only when it wants to + * append to the volume. + */ + pos = ::lseek(m_fd, new_offset, SEEK_END); + if (pos < 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + Dmsg1(000, "Seek error. ERR=%s\n", errmsg); + return pos; + } + Dmsg4(dbglvl, "lseek_end part=%d pos=%lld fd=%d offset=%lld\n", + part, pos, m_fd, new_offset); + return get_full_addr(pos); + + default: + Dmsg0(dbglvl, "Seek call error.\n"); + errno = EINVAL; + return -1; + } +} + +/* use this to track file usage */ +bool cloud_dev::update_pos(DCR *dcr) +{ + Enter(dbglvl); + return file_dev::update_pos(dcr); +} + +bool cloud_dev::rewind(DCR *dcr) +{ + Enter(dbglvl); + Dmsg3(dbglvl, "rewind res=%d fd=%d %s\n", num_reserved(), m_fd, print_name()); + state &= ~(ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ + block_num = file = 0; + file_size = 0; + if (m_fd < 0) { + Mmsg1(errmsg, _("Rewind failed: device %s is not open.\n"), print_name()); + return false; + } + if (part != 1) { + close_part(dcr); + part = 1; + if (!open_device(dcr, openmode)) { + return false; + } + } + if (lseek(dcr, (boffset_t)0, SEEK_SET) < 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("lseek to 0 error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + return false; + } + file_addr = 0; + return true; +} + +bool cloud_dev::reposition(DCR *dcr, uint64_t raddr) +{ + Enter(dbglvl); + char ed1[50]; + Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts); + if (!is_open()) { + dev_errno = EBADF; + Mmsg0(errmsg, _("Bad call to reposition. Device not open\n")); + Qmsg0(dcr->jcr, M_FATAL, 0, errmsg); + return false; + } + + if (lseek(dcr, (boffset_t)raddr, SEEK_SET) == (boffset_t)-1) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + return false; + } + file_addr = raddr; + Dmsg1(dbglvl, "=== reposition lseeked to %s\n", print_addr(ed1, sizeof(ed1))); + return true; +} + +#define INTPTR(a) (void*)(intptr_t)(a) + +/* Small cloud scanner for the BSR list, we check if all parts are in the cache area. */ +class BSRPartScanner { +private: + DCR *dcr; + cloud_dev *dev; + uint32_t lastPart; /* Last part used, mark the part for download only one time */ + alist *parts; + +public: + BSRPartScanner(DCR *adcr, cloud_dev *adev) { + dcr = adcr; + dev = adev; + lastPart = 0; + parts = New(alist(100, not_owned_by_alist)); + }; + + ~BSRPartScanner() { + delete parts; + }; + + /* accessor for parts list*/ + alist *get_parts_list() { + return parts; + }; + + /* We check if the current Parts in the voladdr list are needed + * The BSR structure should progress forward in the volume. + */ + void get_parts(BSR_VOLUME *volume, BSR_VOLADDR *voladdr) + { + while (voladdr) { + for (uint32_t part = dev->get_part(voladdr->saddr); part <=dev->get_part(voladdr->eaddr); ++part) + { + if (lastPart != part) { + lastPart = part; + parts->append(INTPTR(part)); + } + } + voladdr = voladdr->next; + } + }; + + /* Get Volume/Parts that must be downloaded For each MediaType, we must find + * the right device and check if it's a Cloud device. If we have a cloud device, + * then we need to check all VolAddress to get the Part list that is associated. + * + * It's likely that we will always query the same MediaType and the same + * Volume. + */ + void get_all_parts(BSR *bsr, const char *cur_volume) + { + bool done=false; + BSR_VOLUME *volume; + parts->destroy(); + /* Always download the part.1 */ + parts->append(INTPTR(1)); + + while (bsr) { + volume = bsr->volume; + + if (strcmp(volume->VolumeName, cur_volume) == 0) { + get_parts(volume, bsr->voladdr); + done = true; + + } else if (done) { /* TODO: We can stop when it's no longer the right volume */ + break; + } + + bsr = bsr->next; + } + + intptr_t part; + if (chk_dbglvl(dbglvl)) { + Dmsg1(0, "Display list of parts to download for volume %s:\n", cur_volume); + foreach_alist(part, parts) { + Dmsg2(0, " Must download part %s/part.%lld\n", cur_volume, (int64_t)part); + } + } + }; +}; + +/* Wait for the download of a particular part */ +bool cloud_dev::wait_one_transfer(DCR *dcr, char *VolName, uint32_t part) +{ + dcr->jcr->setJobStatus(JS_CloudDownload); + + transfer *item = download_part_to_cache(dcr, VolName, part); + if (item) { + bool ok = wait_end_of_transfer(dcr, item); + ok &= (item->m_state == TRANS_STATE_DONE); + dcr->jcr->setJobStatus(JS_Running); + + if (!ok) { + Qmsg2(dcr->jcr, M_FATAL, 0, + _("Unable to download Volume=\"%s\"%s.\n"), VolName, + (part==1)?" label":""); + } + return ok; + } else { + /* no item to download -> up-to-date */ + return true; + } + return false; +} + +bool cloud_dev::open_device(DCR *dcr, int omode) +{ + POOL_MEM archive_name(PM_FNAME); + POOL_MEM part_name(PM_FNAME); + struct stat sp; + + Enter(dbglvl); + /* Call base class to define generic variables */ + if (DEVICE::open_device(dcr, omode)) { + Dmsg2(dbglvl, "fd=%d device %s already open\n", m_fd, print_name()); + Leave(dbglvl); + return true; + } + omode = openmode; + + /* At this point, the device is closed, so we open it */ + + /* reset the cloud parts proxy for the current volume */ + probe_cloud_proxy(dcr, getVolCatName()); + + /* Now Initialize the Cache part */ + pm_strcpy(archive_name, dev_name); + if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) { + pm_strcat(archive_name, "/"); + } + pm_strcat(archive_name, getVolCatName()); + + /* If create make directory with Volume name */ + if (part <= 0 && omode == CREATE_READ_WRITE) { + Dmsg1(dbglvl, "=== makedir=%s\n", archive_name.c_str()); + if (!makedir(dcr->jcr, archive_name.c_str(), 0740)) { + Dmsg0(dbglvl, "makedir failed.\n"); + Leave(dbglvl); + return false; + } + } + if (part <= 0) { + part = 1; /* always start from 1 */ + } + Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts); + + /* + * If we are doing a restore, get the necessary parts + */ + if (dcr->is_reading()) { + BSRPartScanner scanner(dcr, this); + scanner.get_all_parts(dcr->jcr->bsr, getVolCatName()); + download_parts_to_read(dcr, scanner.get_parts_list()); + } + get_cache_sizes(dcr, getVolCatName()); /* refresh with what may have downloaded */ + + /* We need to make sure the current part is loaded */ + uint64_t cld_size = cloud_prox->get_size(getVolCatName(), 1); + if (cache_sizes[1] == 0 && cld_size != 0) { + if (!wait_one_transfer(dcr, getVolCatName(), 1)) { + return false; + } + } + + /* TODO: Merge this part of the code with the previous section */ + cld_size = cloud_prox->get_size(getVolCatName(), part); + if (dcr->is_reading() && part > 1 && cache_sizes[part] == 0 + && cld_size != 0) { + if (!wait_one_transfer(dcr, getVolCatName(), part)) { + return false; + } + } + + Mmsg(part_name, "/part.%d", part); + pm_strcat(archive_name, part_name.c_str()); + + set_mode(omode); + /* If creating file, give 0640 permissions */ + Dmsg3(dbglvl, "open mode=%s open(%s, 0x%x, 0640)\n", mode_to_str(omode), + archive_name.c_str(), mode); + + /* Use system open() */ + errmsg[0] = 0; + if ((m_fd = ::open(archive_name.c_str(), mode|O_CLOEXEC, 0640)) < 0) { + berrno be; + dev_errno = errno; + if (part == 1 && omode != CREATE_READ_WRITE) { + part = 0; /* Open failed, reset part number */ + Mmsg3(errmsg, _("Could not open(%s,%s,0640): ERR=%s\n"), + archive_name.c_str(), mode_to_str(omode), be.bstrerror()); + Dmsg1(dbglvl, "open failed: %s", errmsg); + } + } + if (m_fd >= 0 && !get_cache_sizes(dcr, getVolCatName())) { + return false; + } + /* TODO: Make sure max_cache_part and max_cloud_part are up to date */ + uint32_t max_cloud_part = cloud_prox->last_index(getVolCatName()); + if (can_read() && m_fd < 0 && part > MAX(max_cache_part, max_cloud_part)) { + Dmsg4(dbglvl, "set_eot: part=%d num_cache_parts=%d max_cache_part=%d max_cloud_part=%d\n", + part, num_cache_parts, max_cache_part, max_cloud_part); + set_eot(); + } + if (m_fd >= 0) { + if (omode == CREATE_READ_WRITE || omode == OPEN_READ_WRITE) { + set_append(); + } + dev_errno = 0; + file = 0; + file_addr = 0; + if (part > num_cache_parts) { + num_cache_parts = part; + if (part > max_cache_part) { + max_cache_part = part; + } + } + + /* Refresh the device id */ + if (fstat(m_fd, &sp) == 0) { + devno = sp.st_dev; + } + } else if (dcr->jcr) { + pm_strcpy(dcr->jcr->errmsg, errmsg); + } + state |= preserve; /* reset any important state info */ + + Dmsg3(dbglvl, "fd=%d part=%d num_cache_parts=%d\n", m_fd, part, num_cache_parts); + Leave(dbglvl); + return m_fd >= 0; +} + +bool cloud_dev::close(DCR *dcr) +{ + Enter(dbglvl); + bool ok = true; + + Dmsg6(dbglvl, "close_dev vol=%s part=%d fd=%d dev=%p adata=%d dev=%s\n", + VolHdr.VolumeName, part, m_fd, this, adata, print_name()); + + if (!is_open()) { + //Dmsg2(1000, "Device %s already closed vol=%s\n", print_name(),VolHdr.VolumeName); + Leave(dbglvl); + return true; /* already closed */ + } + + if (d_close(m_fd) != 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("Error closing device %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + ok = false; + } + + unmount(1); /* do unmount if required */ + + /* Ensure the last written part is uploaded */ + if ((part > 0) && dcr->is_writing()) { + if (!upload_part_to_cloud(dcr, VolHdr.VolumeName, part)) { + if (errmsg[0]) { + Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg); + } + } + } + + /* + * Clean up device packet so it can be re-opened + * + */ + state &= ~(ST_LABEL|ST_READ|ST_APPEND|ST_EOT|ST_WEOT|ST_EOF| + ST_NOSPACE|ST_MOUNTED|ST_MEDIA|ST_SHORT); + label_type = B_BACULA_LABEL; + clear_opened(); + file = block_num = 0; + part = 0; + EndAddr = get_full_addr(); + file_addr = 0; + EndFile = EndBlock = 0; + openmode = 0; + clear_volhdr(); + memset(&VolCatInfo, 0, sizeof(VolCatInfo)); + if (tid) { + stop_thread_timer(tid); + tid = 0; + } + Leave(dbglvl); + return ok; +} + +/* When constructed, jcr_killable_lock captures the jcr current killable state and set it to false. + * The original state is re-applied at destruction + */ +class jcr_not_killable +{ + JCR *m_jcr; + bool m_killable; +public: + jcr_not_killable(JCR* jcr) : + m_jcr(jcr), + m_killable(jcr->is_killable()) + { + if (m_killable) { + m_jcr->set_killable(false); + } + } + ~jcr_not_killable() + { + /* reset killable state */ + m_jcr->set_killable(m_killable); + } +}; + +/* update the cloud_proxy at VolName key. Only if necessary or if force-d */ +bool cloud_dev::probe_cloud_proxy(DCR *dcr,const char *VolName, bool force) +{ + /* check if the current volume is present in the proxy by probing the label (part.1)*/ + if (!cloud_prox->volume_lookup(VolName)|| force) { + /* Make sure the Job thread will not be killed in this function */ + jcr_not_killable jkl(dcr->jcr); + ilist cloud_parts(100, false); /* !! dont own the parts here */ + /* first, retrieve the volume content within cloud_parts list*/ + if (!driver->get_cloud_volume_parts_list(dcr, VolName, &cloud_parts, errmsg)) { + Dmsg2(dbglvl, "Cannot get cloud sizes for Volume=%s Err=%s\n", VolName, errmsg); + return false; + } + + /* then, add the content of cloud_parts in the proxy table */ + if (!cloud_prox->reset(VolName, &cloud_parts)) { + Dmsg1(dbglvl, "could not reset cloud proxy for Volume=%s\n", VolName); + return false; + } + } + return true; +} + +/* + * Truncate cache parts that are also in the cloud + * NOTE! We do not delete part.1 so that after this + * truncate cache command (really a sort of purge), + * the user can still do a restore. + */ +int cloud_dev::truncate_cache(DCR *dcr, const char *VolName, int64_t *size) +{ + int i, nbpart=0; + Enter(dbglvl); + ilist cache_parts; + /* init the dev error message */ + errmsg[0] = 0; + POOLMEM *vol_dir = get_pool_memory(PM_NAME); + POOLMEM *fname = get_pool_memory(PM_NAME); + + if (!probe_cloud_proxy(dcr, VolName)) { + if (errmsg[0] == 0) { + Mmsg1(errmsg, "Truncate cache cannot get cache volume parts list for Volume=%s\n", VolName); + } + Dmsg1(dbglvl, "%s\n", errmsg); + nbpart = -1; + goto bail_out; + } + + if (!get_cache_volume_parts_list(dcr, VolName, &cache_parts)) { + if (errmsg[0] == 0) { + Mmsg1(errmsg, "Truncate cache cannot get cache volume parts list for Volume=%s\n", VolName); + } + Dmsg1(dbglvl, "%s\n", errmsg); + nbpart = -1; + goto bail_out; + } + + make_cache_volume_name(vol_dir, VolName); + + /* + * Remove every cache part that is also in the cloud + */ + for (i=2; i <= (int)cache_parts.last_index(); i++) { + int64_t cache_size = part_get_size(&cache_parts, i); + int64_t cloud_size = cloud_prox->get_size(VolName, i); + + /* remove cache parts that are empty or cache parts with matching cloud_part size*/ + if (cache_size != 0 && cache_size != cloud_size) { + Dmsg3(dbglvl, "Skip truncate for part=%d scloud=%lld scache=%lld\n", i, cloud_size, cache_size); + continue; + } + + /* Look in the transfer list if we have a download/upload for the current volume */ + if (download_mgr.find(VolName, i)) { + Dmsg1(dbglvl, "Skip truncate for part=%d\n", i); + continue; + } + + Mmsg(fname, "%s/part.%d", vol_dir, i); + if (unlink(fname) < 0) { + berrno be; + Mmsg2(errmsg, "Truncate cache failed to remove file %s. ERR: %s\n", fname, be.bstrerror()); + Dmsg1(dbglvl, "%s\n", errmsg); + } else { + *size = *size + cache_size; + nbpart++; + Dmsg1(dbglvl, "=== unlinked: part=%s\n", fname); + } + } +bail_out: + free_pool_memory(vol_dir); + free_pool_memory(fname); + Leave(dbglvl); + return nbpart; +} + +/* + * Truncate both cache and cloud + */ +bool cloud_dev::truncate(DCR *dcr) +{ + DIR* dp = NULL; + struct dirent *entry = NULL; + int name_max; + POOLMEM *vol_dir = get_pool_memory(PM_NAME); + POOLMEM *fname = get_pool_memory(PM_NAME); + bool ok = false; + POOL_MEM dname(PM_FNAME); + int status = 0; + ilist * iuploads = New(ilist(100,true)); /* owns the parts */ + ilist *truncate_list = NULL; + FILE *fp; + errmsg[0] = 0; + Enter(dbglvl); + + /* Make sure the Job thread will not be killed in this function */ + jcr_not_killable jkl(dcr->jcr); + + if (cache_sizes) { + free(cache_sizes); + cache_sizes = NULL; + } + num_cache_parts = 0; + max_cache_part = 0; + part = 0; + if (m_fd) { + ::close(m_fd); + m_fd = -1; + } + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + make_cache_volume_name(vol_dir, getVolCatName()); + Dmsg1(dbglvl, "===== truncate: %s\n", vol_dir); + if (!(dp = opendir(vol_dir))) { + berrno be; + Mmsg2(errmsg, "Cannot opendir to get cache sizes. Volume %s does not exist. ERR=%s\n", + vol_dir, be.bstrerror()); + Dmsg1(dbglvl, "%s\n", errmsg); + goto get_out; + } + + entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); + for ( ;; ) { + errno = 0; + status = breaddir(dp, dname.addr()); + if (status == -1) { + break; + } else if (status > 0) { + Mmsg1(errmsg, "breaddir failed: status=%d", status); + Dmsg1(dbglvl, "%s\n", errmsg); + goto get_out; + } + + /* Always ignore . and .. */ + if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { + continue; + } + + /* Look only for part files */ + if (strncmp("part.", dname.c_str(), 5) != 0) { + continue; + } + Mmsg(fname, "%s/%s", vol_dir, dname.c_str()); + if (unlink(fname) < 0) { + berrno be; + Mmsg2(errmsg, "Failed to remove file %s ERR: %s\n", fname, be.bstrerror()); + Dmsg1(dbglvl, "%s\n", errmsg); + goto get_out; + } else { + Dmsg1(dbglvl, "=== unlinked: part=%s\n", fname); + } + } + + /* All parts have been unlinked. Recreate an empty part.1 + * FIX MT3450:Fatal error: Failed to re-open device after truncate on Cloud device */ + Dmsg1(dbglvl, "Recreate empty part.1 for volume: %s\n", vol_dir); + Mmsg(fname, "%s/part.1", vol_dir); + fp = bfopen(fname, "a"); + if (fp) { + fclose(fp); + } else { + berrno be; + Mmsg2(errmsg, "Failed to create empty file %s ERR: %s\n", fname, + be.bstrerror()); + } + + if (!dir_get_volume_info(dcr, getVolCatName(), GET_VOL_INFO_FOR_READ)) { + /* It may happen for label operation */ + Dmsg2(100, "dir_get_vol_info failed for vol=%s: %s\n", getVolCatName(), dcr->jcr->errmsg); + goto get_out; + } + + /* Update the Catalog information */ + dcr->VolCatInfo.VolCatParts = 0; + dcr->VolCatInfo.VolLastPartBytes = 0; + dcr->VolCatInfo.VolCatCloudParts = 0; + + openmode = CREATE_READ_WRITE; + if (!open_next_part(dcr)) { + goto get_out; + } + + /* check if the current volume is present in the proxy */ + if (!probe_cloud_proxy(dcr, getVolCatName())) { + goto get_out; + } + + /* wrap the uploads in a parts ilist */ + transfer *tpkt; + foreach_alist(tpkt, dcr->uploads) { + /* convert xfer into part when VolName match*/ + if (strcmp(tpkt->m_volume_name,getVolCatName())!=0) { + continue; + } + cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); + part->index = tpkt->m_part; + part->mtime = tpkt->m_res_mtime; + part->size = tpkt->m_res_size; + iuploads->put(part->index, part); + } + /* returns the list of items to truncate : cloud parts-uploads*/ + truncate_list = cloud_prox->exclude(getVolCatName(), iuploads); + if (truncate_list && !driver->truncate_cloud_volume(dcr, getVolCatName(), truncate_list, errmsg)) { + Qmsg(dcr->jcr, M_ERROR, 0, "truncate_cloud_volume for %s: ERR=%s\n", getVolCatName(), errmsg); + goto get_out; + } + /* force proxy refresh (volume should be empty so it should be fast) */ + /* another approach would be to reuse truncate_list to remove items */ + if (!probe_cloud_proxy(dcr, getVolCatName(), true)) { + goto get_out; + } + /* check content of the list : only index should be available */ + for(uint32_t index=1; index<=cloud_prox->last_index(getVolCatName()); index++ ) { + if (cloud_prox->get(getVolCatName(), index)) { + Dmsg2(0, "truncate_cloud_volume proxy for volume %s got part.%d should be empty\n", getVolCatName(), index); + Qmsg(dcr->jcr, M_WARNING, 0, "truncate_cloud_volume: %s/part.%d is still present\n", getVolCatName(), index); + } + } + ok = true; + +get_out: + if (dp) { + closedir(dp); + } + if (entry) { + free(entry); + } + free_pool_memory(vol_dir); + free_pool_memory(fname); + + delete iuploads; + delete truncate_list; + + Leave(dbglvl); + return ok; +} + + +int cloud_dev::read_dev_volume_label(DCR *dcr) +{ + int stat; + Enter(dbglvl); + Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts); + if (!is_open()) { + part = 0; + } + stat = file_dev::read_dev_volume_label(dcr); + Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts); + return stat; +} + +const char *cloud_dev::print_type() +{ + return "Cloud"; +} + + +/* + * makedir() is a lightly modified copy of the same function + * in findlib/mkpath.c + * + */ +bool makedir(JCR *jcr, char *path, mode_t mode) +{ + struct stat statp; + + if (mkdir(path, mode) != 0) { + berrno be; + if (lstat(path, &statp) != 0) { + Qmsg2(jcr, M_ERROR, 0, _("Cannot create directory %s: ERR=%s\n"), + path, be.bstrerror()); + return false; + } else if (!S_ISDIR(statp.st_mode)) { + Qmsg1(jcr, M_ERROR, 0, _("%s exists but is not a directory.\n"), path); + return false; + } + return true; /* directory exists */ + } + return true; +} + +/* + * This call closes the device, but it is used for part handling + * where we close one part and then open the next part. The + * difference between close_part() and close() is that close_part() + * saves the state information of the device (e.g. the Volume label, + * the Volume Catalog record, ... This permits opening and closing + * the Volume parts multiple times without losing track of what the + * main Volume parameters are. + */ +bool cloud_dev::close_part(DCR *dcr) +{ + bool ok = true; + + Enter(dbglvl); + Dmsg5(dbglvl, "close_part vol=%s fd=%d dev=%p adata=%d dev=%s\n", + VolHdr.VolumeName, m_fd, this, adata, print_name()); + + if (!is_open()) { + //Dmsg2(1000, "Device %s already closed vol=%s\n", print_name(),VolHdr.VolumeName); + Leave(dbglvl); + return true; /* already closed */ + } + + if (d_close(m_fd) != 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("Error closing device %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + ok = false; + } + + m_fd = -1; + part = 0; + file_addr = 0; + Leave(dbglvl); + return ok; +} + +bool cloud_dev::open_next_part(DCR *dcr) +{ + Enter(dbglvl); + int save_part; + char ed1[50]; + + /* When appending, do not open a new part if the current is empty */ + if (can_append() && (part_size == 0)) { + Dmsg2(dbglvl, "open next: part=%d num_cache_parts=%d\n", part, num_cache_parts); + Leave(dbglvl); + return true; + } + + /* TODO: Get the the last max_part */ + uint32_t max_cloud_part = cloud_prox->last_index(getVolCatName()); + if (!can_append() && part >= MAX(max_cache_part, max_cloud_part)) { + Dmsg3(dbglvl, "EOT: part=%d num_cache_parts=%d max_cloud_part=%d\n", part, num_cache_parts, max_cloud_part); + Mmsg2(errmsg, "part=%d no more parts to read. addr=%s\n", part, + print_addr(ed1, sizeof(ed1), EndAddr)); + Dmsg1(dbglvl, "%s", errmsg); + part = 0; + Leave(dbglvl); + return false; + } + + save_part = part; + if (!close_part(dcr)) { /* close current part */ + Leave(dbglvl); + Mmsg2(errmsg, "close_part failed: part=%d num_cache_parts=%d\n", part, num_cache_parts); + Dmsg1(dbglvl, "%s", errmsg); + return false; + } + if (openmode == CREATE_READ_WRITE) { + VolCatInfo.VolCatParts = num_cache_parts; + if (!dir_update_volume_info(dcr, false, false, true)) { + Dmsg0(dbglvl, "Error from update_vol_info.\n"); + dev_errno = EIO; + return false; + } + part_size = 0; + } + + /* Restore part number */ + part = save_part; + + if (dcr->is_reading()) { + wait_one_transfer(dcr, getVolCatName(), part); + } + + /* Write part to cloud */ + Dmsg2(dbglvl, "=== part=%d num_cache_parts=%d\n", part, num_cache_parts); + if (dcr->is_writing()) { + if (!upload_part_to_cloud(dcr, getVolCatName(), part)) { + if (errmsg[0]) { + Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg); + } + } + } + + /* Try to open next part */ + part++; + Dmsg2(dbglvl, "=== inc part: part=%d num_cache_parts=%d\n", part, num_cache_parts); + if (can_append()) { + Dmsg0(dbglvl, "Set openmode to CREATE_READ_WRITE\n"); + openmode = CREATE_READ_WRITE; + } + if (open_device(dcr, openmode)) { + if (openmode == CREATE_READ_WRITE) { + set_append(); + clear_eof(); + clear_eot(); + file_addr = 0; + file_addr = get_full_addr(); + if (lseek(dcr, file_addr, SEEK_SET) < 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("lseek to 0 error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + Leave(dbglvl); + return false; + } + } + } else { /* open failed */ + /* TODO: Make sure max_cache_part and max_cloud_part are up to date */ + if (part > MAX(max_cache_part, max_cloud_part)) { + Dmsg4(dbglvl, "set_eot: part=%d num_cache_parts=%d max_cache_part=%d max_cloud_part=%d\n", + part, num_cache_parts, max_cache_part, max_cloud_part); + set_eof(); + set_eot(); + } + Leave(dbglvl); + Mmsg2(errmsg, "EOT: part=%d num_cache_parts=%d\n", part, num_cache_parts); + Dmsg1(dbglvl, "%s", errmsg); + return false; + } + + set_labeled(); /* all parts are labeled */ + + Dmsg3(dbglvl, "opened next: append=%d part=%d num_cache_parts=%d\n", can_append(), part, num_cache_parts); + Leave(dbglvl); + return true; +} + + +/* Print the object address */ +char *cloud_dev::print_addr(char *buf, int32_t buf_len) +{ + uint64_t full_addr = get_full_addr(); + buf[0] = 0; + bsnprintf(buf, buf_len, "%lu:%llu", get_part(full_addr), get_offset(full_addr)); + return buf; +} + +char *cloud_dev::print_addr(char *buf, int32_t buf_len, boffset_t addr) +{ + buf[0] = 0; + bsnprintf(buf, buf_len, "%lu:%llu", get_part(addr), get_offset(addr)); + return buf; +} + +/* + * Check if the current position on the volume corresponds to + * what is in the catalog. + * + */ +bool cloud_dev::is_eod_valid(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + ilist cache_parts; + bool do_update=false, ok=true; + POOL_MEM err, tmp; + + /* We need up to date information for Cloud and Cache */ + uint32_t max_cloud_part = cloud_prox->last_index(dcr->VolumeName); + uint64_t last_cloud_size = cloud_prox->get_size(dcr->VolumeName, max_cloud_part); + + get_cache_volume_parts_list(dcr, dcr->VolumeName, &cache_parts); + uint32_t max_cache_part = cache_parts.last_index(); + uint64_t last_cache_size = part_get_size(&cache_parts, max_cache_part); + + /* When we open a new part, the actual size is 0, so we are not very interested */ + if (last_cache_size == 0 && max_cache_part > 0) { + max_cache_part--; + last_cache_size = part_get_size(&cache_parts, max_cache_part); + } + + uint32_t last_p = MAX(max_cloud_part, max_cache_part); + uint64_t last_s = MAX(last_cache_size, last_cloud_size); + + Dmsg5(dbglvl, "vol=%s cache part=%ld size=%lld, cloud part=%ld size=%lld\n", + dcr->VolumeName, max_cache_part, last_cache_size, max_cloud_part, last_cloud_size); + + /* If we have the same Part number in the cloud and in the cache. We check + * the size of the two parts. The cache part may be truncated (size=0). + */ + if (max_cloud_part == max_cache_part) { + if (last_cache_size > 0 && last_cloud_size != last_cache_size) { + ok = false; /* Big consistency problem, which one do we take? Biggest one? */ + Mmsg(tmp, "The last Part %ld size do not match between the Cache and the Cloud! Cache=%lld Cloud=%lld.\n", + max_cloud_part, last_cloud_size, last_cache_size); + pm_strcat(err, tmp.c_str()); + } + } + + /* The catalog should have the right LastPart */ + if (VolCatInfo.VolCatParts != last_p) { + Mmsg(tmp, "The Parts do not match! Metadata Volume=%ld Catalog=%ld.\n", + last_p, VolCatInfo.VolCatParts); + VolCatInfo.VolCatParts = last_p; + VolCatInfo.VolLastPartBytes = last_s; + VolCatInfo.VolCatBytes = last_s; + pm_strcat(err, tmp.c_str()); + do_update = true; + + /* The catalog should have the right LastPartBytes */ + } else if (VolCatInfo.VolLastPartBytes != last_s) { + Mmsg(tmp, "The Last Part Bytes %ld do not match! Metadata Volume=%lld Catalog=%lld.\n", + last_p, VolCatInfo.VolLastPartBytes, last_s); + VolCatInfo.VolLastPartBytes = last_s; + VolCatInfo.VolCatBytes = last_s; + pm_strcat(err, tmp.c_str()); + do_update = true; + } + /* We also check that the last part uploaded in the cloud is correct */ + if (VolCatInfo.VolCatCloudParts != max_cloud_part) { + Mmsg(tmp, "The Cloud Parts do not match! Metadata Volume=%ld Catalog=%ld.\n", + max_cloud_part, VolCatInfo.VolCatCloudParts); + pm_strcat(err, tmp.c_str()); + do_update = true; + } + if (ok) { + if (do_update) { + Jmsg2(jcr, M_WARNING, 0, _("For Volume \"%s\":\n%s\nCorrecting Catalog\n"), dcr->VolumeName, err.c_str()); + if (!dir_update_volume_info(dcr, false, true)) { + Jmsg(jcr, M_WARNING, 0, _("Error updating Catalog\n")); + dcr->mark_volume_in_error(); + return false; + } + } + } else { + Mmsg2(jcr->errmsg, _("Bacula cannot write on disk Volume \"%s\" because: %s"), + dcr->VolumeName, err.c_str()); + Jmsg(jcr, M_ERROR, 0, jcr->errmsg); + Dmsg0(100, jcr->errmsg); + dcr->mark_volume_in_error(); + return false; + } + return true; +} + +/* + * We are called here when Bacula wants to append to a Volume + */ +bool cloud_dev::eod(DCR *dcr) +{ + bool ok; + uint32_t max_part = 1; + Enter(dbglvl); + + uint32_t max_cloud_part = cloud_prox->last_index(getVolCatName()); + Dmsg5(dbglvl, "=== eod: part=%d num_cache_parts=%d max_cache_part=%d max_cloud_part=%d vol_parts=%d\n", + part, num_cache_parts, max_cache_part, + max_cloud_part, VolCatInfo.VolCatParts); + + /* First find maximum part */ + if (max_part < max_cache_part) { + max_part = max_cache_part; + } + if (max_part < max_cloud_part) { + max_part = max_cloud_part; + } + if (max_part < VolCatInfo.VolCatParts) { + max_part = VolCatInfo.VolCatParts; + } + if (max_part < VolCatInfo.VolCatCloudParts) { + max_part = VolCatInfo.VolCatCloudParts; + } + if (part < max_part) { + if (!close_part(dcr)) { /* close current part */ + Leave(dbglvl); + Dmsg2(dbglvl, "close_part failed: part=%d num_cache_parts=%d\n", part, num_cache_parts); + return false; + } + /* Try to open next part */ + part = max_part; + /* Create new part */ + part_size = 0; + part++; + openmode = CREATE_READ_WRITE; + Dmsg2(dbglvl, "=== eod: set part=%d num_cache_parts=%d\n", part, num_cache_parts); + if (!open_device(dcr, openmode)) { + Leave(dbglvl); + Dmsg2(dbglvl, "Fail open_device: part=%d num_cache_parts=%d\n", part, num_cache_parts); + return false; + } + } + ok = file_dev::eod(dcr); + return ok; +} + +bool cloud_dev::write_volume_label(DCR *dcr, + const char *VolName, const char *PoolName, + bool relabel, bool no_prelabel) +{ + bool ok = DEVICE::write_volume_label(dcr, + VolName, PoolName, relabel, no_prelabel); + if (!ok) { + Dmsg0(dbglvl, "write_volume_label failed.\n"); + return false; + } + if (part != 1) { + Dmsg1(000, "Big problem!!! part=%d, but should be 1\n", part); + return false; + } + set_append(); + return true; +} + +bool cloud_dev::rewrite_volume_label(DCR *dcr, bool recycle) +{ + Enter(100); + bool ok = DEVICE::rewrite_volume_label(dcr, recycle); + /* + * Normally, at this point, the label has been written to disk + * but remains in the first part of the block, and will be + * "rewritten" when the full block is written. + * However, in the case of a cloud device the label has + * already been written to a part, so we must now clear + * the block of the label data. + */ + empty_block(dcr->block); + if (!ok || !open_next_part(dcr)) { + ok = false; + } + Leave(100); + return ok; +} + +bool cloud_dev::do_size_checks(DCR *dcr, DEV_BLOCK *block) +{ + if (!DEVICE::do_size_checks(dcr, block)) { + return false; + } + + /* + * Do Cloud specific size checks + */ + /* Limit maximum part size to value specified by user */ + if (max_part_size > 0 && ((part_size + block->binbuf) >= max_part_size)) { + if (part < num_cache_parts) { + Qmsg3(dcr->jcr, M_FATAL, 0, _("Error while writing, current part number" + " is less than the total number of parts (%d/%d, device=%s)\n"), + part, num_cache_parts, print_name()); + dev_errno = EIO; + return false; + } + + if (!open_next_part(dcr)) { + return false; + } + } + + // static, so it's not calculated everytime + static uint64_t hard_max_part_size = ((uint64_t)1 << off_bits) -1; + static uint32_t hard_max_part_number = ((uint32_t)1 << part_bits) -1; + + if (part_size >= hard_max_part_size) { + Qmsg3(dcr->jcr, M_FATAL, 0, _("Error while writing, current part size" + " is greater than the maximum part size (%d>%d, device=%s)\n"), + part_size, hard_max_part_size, print_name()); + dev_errno = EIO; + return false; + } + + if (part >= hard_max_part_number) { + Qmsg3(dcr->jcr, M_FATAL, 0, _("Error while writing, current part number" + " is greater than the maximum part number (%d>%d, device=%s)\n"), + part, hard_max_part_number, print_name()); + dev_errno = EIO; + return false; + } + + return true; +} + +bool cloud_dev::start_of_job(DCR *dcr) +{ + if (driver) { + driver->start_of_job(dcr); + } + return true; +} + + +/* Two jobs can try to update the catalog information for a given cloud + * volume. It might be avoided by converting the vol_info_mutex to a recursive + * lock +*/ +static pthread_mutex_t update_mutex = PTHREAD_MUTEX_INITIALIZER; + +static void update_volume_record(DCR *dcr, transfer *ppkt) +{ + lock_guard lg(update_mutex); /* automatically released at exit */ + bool do_update=false; + /* + * At this point ppkt should have the last part for the + * previous volume, so update the Media record. + */ + if (!dir_get_volume_info(dcr, ppkt->m_volume_name, GET_VOL_INFO_FOR_READ)) { + /* It may happen for label operation */ + Dmsg2((ppkt->m_part == 1 ? 100 : 0) , "dir_get_vol_info failed for vol=%s: %s\n", + ppkt->m_volume_name, dcr->jcr->errmsg); + return; + } + + /* Between the GET and the UPDATE, and other job can call the same + * function and put more up to date information. So we are protected + * by the update_mutex + */ + /* Update the Media information */ + if ((ppkt->m_part > dcr->VolCatInfo.VolCatParts) || + (ppkt->m_part == dcr->VolCatInfo.VolCatParts && dcr->VolCatInfo.VolLastPartBytes != ppkt->m_stat_size)) + { + do_update=true; + dcr->VolCatInfo.VolCatParts = ppkt->m_part; + dcr->VolCatInfo.VolLastPartBytes = ppkt->m_stat_size; + } + /* We update the CloudParts in the catalog only if the current transfer is correct */ + if (ppkt->m_state == TRANS_STATE_DONE && ppkt->m_part > dcr->VolCatInfo.VolCatCloudParts && ppkt->m_stat_size > 0) { + do_update = true; + dcr->VolCatInfo.VolCatCloudParts = ppkt->m_part; + } + if (do_update) { + dir_update_volume_info(dcr, false, true, true/*use_dcr*/); + } +} + +bool cloud_dev::end_of_job(DCR *dcr) +{ + Enter(dbglvl); + transfer *tpkt; /* current packet */ + transfer *ppkt=NULL; /* previous packet */ + const char *prefix = ""; + + /* before waiting on transfers, we might have to lauch the uploads */ + if (upload_opt == UPLOAD_AT_ENDOFJOB) { + foreach_alist(tpkt, dcr->uploads) { + tpkt->queue(); + } + } + + /* + * We wait for each of our uploads to complete + * Note: we also want to update the cloud parts and cache parts for + * each part uploaded. The deletes list contains transfer packet for + * each part that was upload in the order of the parts as they were + * created. Also, there may be multiple Volumes that were uploaded, + * so for each volume, we search until the end of the list or a + * different Volume is found in order to find the maximum part + * number that was uploaded. Then we read the Media record for + * that Volume, update it, and write it back to the catalog. + */ + POOL_MEM msg(PM_MESSAGE); + if (!dcr->downloads->empty()) { + if (!dcr->jcr->is_internal_job()) { + Jmsg(dcr->jcr, M_INFO, 0, _("Cloud Download transfers:\n")); + } else { + prefix = "3000 Cloud Download: "; + } + Dmsg1(dbglvl, "%s", msg.c_str()); + foreach_alist(tpkt, dcr->downloads) { + /* Do we really need to wait on downloads : if we didn't + * wait for them until now, we basically didn't use them. And we + * surelly won't anymore. If the job is canceled we can cancel our + * own downloads (do not touch downloads shared with other jobs). + */ + wait_end_of_transfer(dcr, tpkt); + POOL_MEM dmsg(PM_MESSAGE); + tpkt->append_status(dmsg); + Jmsg(dcr->jcr, M_INFO, 0, "%s%s", prefix, dmsg.c_str()); + download_mgr.release(tpkt); + } + } + dcr->downloads->destroy(); + + if (!dcr->uploads->empty()) { + int oldstatus = dcr->jcr->JobStatus; + dcr->jcr->sendJobStatus(JS_CloudUpload); + if (!dcr->jcr->is_internal_job()) { + Jmsg(dcr->jcr, M_INFO, 0, _("Cloud Upload transfers:\n")); + } else { + prefix = "3000 Cloud Upload: "; + } + foreach_alist(tpkt, dcr->uploads) { + wait_end_of_transfer(dcr, tpkt); + POOL_MEM umsg(PM_MESSAGE); + tpkt->append_status(umsg); + Jmsg(dcr->jcr, (tpkt->m_state == TRANS_STATE_ERROR) ? M_ERROR : M_INFO, 0, "%s%s", prefix, umsg.c_str()); + Dmsg1(dbglvl, "%s", umsg.c_str()); + + if (tpkt->m_state == TRANS_STATE_ERROR) { + Mmsg(dcr->jcr->StatusErrMsg, _("Upload to Cloud failed")); + } else if (trunc_opt == TRUNC_AT_ENDOFJOB && tpkt->m_part!=1) { + /* else -> don't remove the cache file if the upload failed */ + if (unlink(tpkt->m_cache_fname) != 0) { + berrno be; + Dmsg2(dbglvl, "Truncate cache option at end of job. Unable to delete %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror()); + } else { + Dmsg1(dbglvl, "Truncate cache option at end of job. Unlink file %s\n", tpkt->m_cache_fname); + } + } + + if (ppkt == NULL) { + ppkt = tpkt; + continue; + } + if (strcmp(ppkt->m_volume_name, tpkt->m_volume_name) == 0) { + ppkt = tpkt; + continue; + } + /* vol name changed so update media for previous transfer */ + update_volume_record(dcr, ppkt); + ppkt = tpkt; + } + dcr->jcr->sendJobStatus(oldstatus); + } + + /* Update the last (previous) one */ + if (ppkt) { + Dmsg3(dbglvl, "== Last part=%d size=%lld Volume=%s\n", ppkt->m_part, + ppkt->m_stat_size, ppkt->m_volume_name); + update_volume_record(dcr, ppkt); + Dmsg3(dbglvl, "=== Very Last part=%d size=%lld Volume=%s\n", ppkt->m_part, + ppkt->m_stat_size, ppkt->m_volume_name); + } + + /* Now, clear our list and the global one if needed */ + foreach_alist(tpkt, dcr->uploads) { + upload_mgr.release(tpkt); + } + dcr->uploads->destroy(); + + if (driver) { + driver->end_of_job(dcr); + } + + Leave(dbglvl); + return true; +} + +bool cloud_dev::wait_end_of_transfer(DCR *dcr, transfer *elem) +{ + if (!elem) { + return false; + } + + Enter(dbglvl); + struct timeval tv; + tv.tv_usec = 0; + tv.tv_sec = 30; + + int stat = ETIMEDOUT; + while (stat == ETIMEDOUT) { + + if (dcr->jcr->is_canceled()) { + elem->cancel(); + break; + } + + if (chk_dbglvl(dbglvl)) { + POOL_MEM status(PM_FNAME); + get_cloud_upload_transfer_status(status, false); + Dmsg1(0, "%s\n",status.addr()); + get_cloud_download_transfer_status(status, false); + Dmsg1(0, "%s\n",status.addr()); + } + + stat = elem->timedwait(tv); + } + + Leave(dbglvl); + return (stat == 0); +} + +/* TODO: Add .api2 mode for the status message */ +/* format a status message of the cloud transfers. Verbose gives details on each transfer */ +uint32_t cloud_dev::get_cloud_upload_transfer_status(POOL_MEM& msg, bool verbose) +{ + upload_mgr.update_statistics(); + uint32_t ret = 0; + ret = Mmsg(msg,_(" Uploads ")); + ret += upload_mgr.append_status(msg, verbose); + return ret; +} + +/* format a status message of the cloud transfers. Verbose gives details on each transfer */ +uint32_t cloud_dev::get_cloud_download_transfer_status(POOL_MEM& msg, bool verbose) +{ + download_mgr.update_statistics(); + uint32_t ret = 0; + ret = Mmsg(msg,_(" Downloads ")); + ret += download_mgr.append_status(msg, verbose); + return ret; +} + +/* for a given volume VolumeName, return parts that is a list of the + * cache parts within the volume */ +bool cloud_dev::get_cache_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts) +{ + JCR *jcr = dcr->jcr; + Enter(dbglvl); + + if (!parts || strlen(VolumeName) == 0) { + return false; + } + + POOLMEM *vol_dir = get_pool_memory(PM_NAME); + /*NB : *** QUESTION *** : it works with examples but is archive_name() the kosher fct to call to get the cache path? */ + pm_strcpy(vol_dir, archive_name()); + if (!IsPathSeparator(vol_dir[strlen(vol_dir)-1])) { + pm_strcat(vol_dir, "/"); + } + pm_strcat(vol_dir, VolumeName); + + DIR* dp = NULL; + struct dirent *entry = NULL; + struct stat statbuf; + int name_max; + bool ok = false; + POOL_MEM dname(PM_FNAME); + int status = 0; + + Enter(dbglvl); + + Dmsg1(dbglvl, "Searching for parts in: %s\n", VolumeName); + + if (!(dp = opendir(vol_dir))) { + berrno be; + Mmsg2(errmsg, "Cannot opendir to get parts list. Volume %s does not exist. ERR=%s\n", + VolumeName, be.bstrerror()); + Dmsg1(dbglvl, "%s", errmsg); + goto get_out; + } + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); + + for ( ;; ) { + if (jcr->is_canceled()) { + goto get_out; + } + errno = 0; + status = breaddir(dp, dname.addr()); + if (status == -1) { + break; + } else if (status < 0) { + Mmsg1(errmsg, "breaddir failed: status=%d", status); + Dmsg1(dbglvl, "%s\n", errmsg); + goto get_out; + } + /* Always ignore . and .. */ + if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { + continue; + } + + /* Look only for part files */ + if (strncmp("part.", dname.c_str(), 5) != 0) { + continue; + } + char *ext = strrchr (dname.c_str(), '.'); + if (!ext || strlen(ext) < 2) { + continue; + } + + cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); + if (!part) { + berrno be; + Dmsg1(dbglvl, "Failed to create part structure: %s\n", + be.bstrerror()); + goto get_out; + } + + /* save extension (part number) to cloud_part struct index*/ + part->index = atoi(&ext[1]); + + /* Bummer : caller is responsible for freeing label */ + POOLMEM *part_path = get_pool_memory(PM_NAME); + pm_strcpy(part_path, vol_dir); + if (!IsPathSeparator(part_path[strlen(vol_dir)-1])) { + pm_strcat(part_path, "/"); + } + pm_strcat(part_path, dname.c_str()); + + /* Get size of part */ + if (lstat(part_path, &statbuf) == -1) { + berrno be; + Dmsg2(dbglvl, "Failed to stat file %s: %s\n", + part_path, be.bstrerror()); + free_pool_memory(part_path); + free(part); + goto get_out; + } + free_pool_memory(part_path); + + part->size = statbuf.st_size; + part->mtime = statbuf.st_mtime; + parts->put(part->index, part); + } + + ok = true; + +get_out: + if (dp) { + closedir(dp); + } + if (entry) { + free(entry); + } + free_pool_memory(vol_dir); + + return ok; +} + +/* + * Upload cache parts that are not in the cloud + */ +bool cloud_dev::upload_cache(DCR *dcr, const char *VolumeName, POOLMEM *&err) +{ + int i; + Enter(dbglvl); + bool ret=true; + ilist cloud_parts; + ilist cache_parts; + POOLMEM *vol_dir = get_pool_memory(PM_NAME); + POOLMEM *fname = get_pool_memory(PM_NAME); + + if (!driver->get_cloud_volume_parts_list(dcr, VolumeName, &cloud_parts, err)) { + Qmsg2(dcr->jcr, M_ERROR, 0, "Error while uploading parts for volume %s. %s\n", VolumeName, err); + ret = false; + goto bail_out; + } + + if (!get_cache_volume_parts_list(dcr, VolumeName, &cache_parts)) { + Qmsg1(dcr->jcr, M_ERROR, 0, "Error while listing cache parts for volume %s.\n", VolumeName); + ret = false; + goto bail_out; + } + + make_cache_volume_name(vol_dir, VolumeName); + + /* + * Upload every part where cache_size > cloud_size + */ + for (i=1; i <= (int)cache_parts.last_index(); i++) { + if (i <= (int)cloud_parts.last_index()) { /* not on the cloud, but exists in the cache */ + cloud_part *cachep = (cloud_part *)cache_parts[i]; + cloud_part *cloudp = (cloud_part *)cloud_parts[i]; + + if (!cachep || cachep->size == 0) { /* Not in the current cache */ + continue; + } + if (cloudp && cloudp->size >= cachep->size) { + continue; /* already uploaded */ + } + } + Mmsg(fname, "%s/part.%d", vol_dir, i); + Dmsg1(dbglvl, "Do upload of %s\n", fname); + if (!upload_part_to_cloud(dcr, VolumeName, i)) { + if (errmsg[0]) { + Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg); + } + ret = false; + } else { + Qmsg(dcr->jcr, M_INFO, 0, "Uploaded cache %s\n", fname); + } + } +bail_out: + free_pool_memory(vol_dir); + free_pool_memory(fname); + Leave(dbglvl); + return ret; +} diff --git a/src/stored/cloud_dev.h b/src/stored/cloud_dev.h new file mode 100644 index 00000000..7bd9ed8a --- /dev/null +++ b/src/stored/cloud_dev.h @@ -0,0 +1,119 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Generic routines for writing Cloud Volumes + * + * Written by Kern Sibbald, May MMXVI + */ + +#ifndef _CLOUD_DEV_H_ +#define _CLOUD_DEV_H_ + +#define part_bits 20 +#define part_mask 0x7FFFFLL +#define off_bits (64-part_bits) +#define off_mask 0xFFFFFFFFFFFLL + +#include "bacula.h" +#include "stored.h" +#include "cloud_driver.h" +#include "cloud_transfer_mgr.h" +#include "cloud_parts.h" + +class cloud_dev: public file_dev { +public: + int64_t obj_len; + int status; + + uint64_t *cache_sizes; + uint32_t num_cache_parts; + uint32_t max_cache_part; + uint32_t max_cache_size; + + uint32_t trunc_opt; + uint32_t upload_opt; + + cloud_driver *driver; + + static transfer_manager download_mgr; + static transfer_manager upload_mgr; + + cloud_proxy *cloud_prox; + + void add_vol_and_part(POOLMEM *&filename, const char *VolumeName, const char *name, uint32_t part); + +private: + char *cache_directory; + bool download_parts_to_read(DCR *dcr, alist* parts); + bool upload_part_to_cloud(DCR *dcr, const char *VolumeName, uint32_t part); + transfer *download_part_to_cache(DCR *dcr, const char *VolumeName, uint32_t part); + void make_cache_filename(POOLMEM *&filename, const char *VolumeName, uint32_t part); + void make_cache_volume_name(POOLMEM *&full_volname, const char *VolumeName); + bool get_cache_sizes(DCR *dcr, const char *VolumeName); + bool wait_end_of_transfer(DCR *dcr, transfer *elem); + bool get_cache_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts); + bool wait_one_transfer(DCR *dcr, char *VolName, uint32_t part); + bool probe_cloud_proxy(DCR *dcr, const char* VolName, bool force=false); + +public: + cloud_dev(JCR *jcr, DEVRES *device); + ~cloud_dev(); + + bool close_part(DCR *dcr); + uint32_t get_part(boffset_t ls_offset); + + /* DEVICE virtual interfaces that we redefine */ + boffset_t lseek(DCR *dcr, off_t offset, int whence); + bool rewind(DCR *dcr); + bool reposition(DCR *dcr, uint64_t raddr); + bool open_device(DCR *dcr, int omode); + bool open_next_part(DCR *dcr); + bool truncate(DCR *dcr); + int truncate_cache(DCR *dcr, const char *VolName, int64_t *size); + bool upload_cache(DCR *dcr, const char *VolName, POOLMEM *&err); + bool close(DCR *dcr); + bool update_pos(DCR *dcr); + bool is_eod_valid(DCR *dcr); + bool eod(DCR *dcr); + int read_dev_volume_label(DCR *dcr); + const char *print_type(); + DEVICE *get_dev(DCR *dcr); + uint32_t get_hi_addr(); + uint32_t get_low_addr(); + uint64_t get_full_addr(); + uint64_t get_full_addr(boffset_t addr); + char *print_addr(char *buf, int32_t buf_len); + char *print_addr(char *buf, int32_t maxlen, boffset_t addr); + bool do_size_checks(DCR *dcr, DEV_BLOCK *block); + bool write_volume_label(DCR *dcr, + const char *VolName, const char *PoolName, + bool relabel, bool no_prelabel); + bool rewrite_volume_label(DCR *dcr, bool recycle); + bool start_of_job(DCR *dcr); + bool end_of_job(DCR *dcr); + bool get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err) { return driver->get_cloud_volumes_list(dcr, volumes, err); }; + bool get_cloud_volume_parts_list(DCR *dcr, const char *VolumeName, ilist *parts, POOLMEM *&err) { return driver->get_cloud_volume_parts_list(dcr, VolumeName, parts, err);}; + uint32_t get_cloud_upload_transfer_status(POOL_MEM &msg, bool verbose); + uint32_t get_cloud_download_transfer_status(POOL_MEM &msg, bool verbose); +}; + +/* Exported subroutines */ +bool makedir(JCR *jcr, char *path, mode_t mode); + +#endif /* _CLOUD_DEV_H_ */ diff --git a/src/stored/cloud_driver.h b/src/stored/cloud_driver.h new file mode 100644 index 00000000..dafd880f --- /dev/null +++ b/src/stored/cloud_driver.h @@ -0,0 +1,64 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Routines for writing Cloud drivers + * + * Written by Kern Sibbald, May MMXVI + */ + +#include "bacula.h" +#include "stored.h" +#include "cloud_parts.h" +#include "cloud_transfer_mgr.h" +#include "lib/bwlimit.h" + +#ifndef _CLOUD_DRIVER_H_ +#define _CLOUD_DRIVER_H_ + +#define NUM_UPLOAD_RETRIES 2 +class cloud_dev; + +enum { + C_S3_DRIVER = 1, + C_FILE_DRIVER = 2 +}; + +/* Abstract class cannot be instantiated */ +class cloud_driver: public SMARTALLOC { +public: + cloud_driver() : max_upload_retries(NUM_UPLOAD_RETRIES) {}; + virtual ~cloud_driver() {}; + + virtual bool copy_cache_part_to_cloud(transfer *xfer) = 0; + virtual bool copy_cloud_part_to_cache(transfer *xfer) = 0; + virtual bool truncate_cloud_volume(DCR *dcr, const char *VolumeName, ilist *trunc_parts, POOLMEM *&err) = 0; + virtual bool init(JCR *jcr, cloud_dev *dev, DEVRES *device) = 0; + virtual bool term(DCR *dcr) = 0; + virtual bool start_of_job(DCR *dcr) = 0; + virtual bool end_of_job(DCR *dcr) = 0; + + virtual bool get_cloud_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts, POOLMEM *&err) = 0; + virtual bool get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err) = 0; /* TODO: Adapt the prototype to have a handler instead */ + + bwlimit upload_limit; + bwlimit download_limit; + uint32_t max_upload_retries; +}; + +#endif /* _CLOUD_DRIVER_H_ */ diff --git a/src/stored/cloud_parts.c b/src/stored/cloud_parts.c new file mode 100644 index 00000000..a78032a3 --- /dev/null +++ b/src/stored/cloud_parts.c @@ -0,0 +1,607 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Routines for writing Cloud drivers + * + * Written by Kern Sibbald, May MMXVI + */ + +#include "cloud_parts.h" + +bool operator==(const cloud_part& lhs, const cloud_part& rhs) +{ + return (lhs.index == rhs.index && + lhs.mtime == rhs.mtime && + lhs.size == rhs.size); +} + +bool operator!=(const cloud_part& lhs, const cloud_part& rhs) +{ + return !operator==(lhs, rhs); +} + +bool operator==(const cloud_part& lhs, const uint32_t& rhs) +{ + return (lhs.index == rhs); +} + +bool operator!=(const cloud_part& lhs, const uint32_t& rhs) +{ + return !operator==(lhs, rhs); +} + +/* compares the all cloud_part, according to the operator==() above*/ +bool list_contains_part(ilist *parts, cloud_part *p) +{ + if (parts && p) { + cloud_part *ap = (cloud_part *)parts->get(p->index); + if (ap && *ap == *p) { + return true; + } + } + return false; +} + +/* only checks if the part_idx part exists in the parts lst*/ +bool list_contains_part(ilist *parts, uint32_t part_idx) +{ + if (parts && part_idx > 0) { + return parts->get(part_idx) != NULL; + } + return false; +} + +bool identical_lists(ilist *parts1, ilist *parts2) +{ + if (parts1 && parts2) { + /* Using indexed ilist forces us to treat it differently (foreach not working b.e.) */ + int max_size = parts1->last_index(); + if (parts2->last_index() > parts1->last_index()) { + max_size = parts2->last_index(); + } + for(int index=0; index<=max_size; index++ ) { + cloud_part *p1 = (cloud_part *)parts1->get(index); + cloud_part *p2 = (cloud_part *)parts2->get(index); + if (!p1) { + if (p2) return false; + } else if (!p2) { + if (p1) return false; + } else if (*p1 != *p2) { + return false; + } + } + return true; + } + return false; +} + +/* cloud_parts present in source but not in dest are appended to diff. + * there's no cloud_part copy made. + * Diff only holds references and shoudn't own them */ +bool diff_lists(ilist *source, ilist *dest, ilist *diff) +{ + if (source && dest && diff) { + /* Using indexed list forces us to treat it differently (foreach not working b.e.) */ + int max_size = source->last_index(); + if (dest->last_index() > source->last_index()) { + max_size = dest->last_index(); + } + for(int index=0; index<=max_size; index++ ) { + cloud_part *p1 = (cloud_part *)source->get(index); + cloud_part *p2 = (cloud_part *)dest->get(index); + if (!p1) { + if (p2) diff->put(index, p1); + } else if (!p2) { + if (p1) diff->put(index, p1); + } else if (*p1 != *p2) { + diff->put(index, p1); + } + } + return true; + } + return false; +} + +/*================================================= + * cloud_proxy definitions + ================================================= */ + +cloud_proxy * cloud_proxy::m_pinstance=NULL; +uint64_t cloud_proxy::m_count=0; + +/* hash table node structure */ +typedef struct { + hlink hlnk; + ilist *parts_lst; + char *key_name; +} VolHashItem; + +/* constructor + * size: the default hash size + * owns: determines if the ilists own the cloud_parts or not */ +cloud_proxy::cloud_proxy(uint32_t size, bool owns) +{ + pthread_mutex_init(&m_mutex, 0); + VolHashItem *hitem=NULL; + m_hash = New(htable(hitem, &hitem->hlnk, size)); + m_owns = owns; +} + +/* destructor + * we need to go thru each htable node and manually delete + * the associated alist before deleting the htable itself */ +cloud_proxy::~cloud_proxy() +{ + VolHashItem *hitem; + foreach_htable(hitem, m_hash) { + delete hitem->parts_lst; + free (hitem->key_name); + } + delete m_hash; + pthread_mutex_destroy(&m_mutex); +} + +/* insert the cloud_part into the proxy. + * create the volume ilist if necessary */ +bool cloud_proxy::set(const char *volume, cloud_part *part) +{ + if (part) { + return set(volume, part->index, part->mtime, part->size); + } + return false; +} + +bool cloud_proxy::set(const char *volume, uint32_t index, utime_t mtime, uint64_t size) +{ + if (!volume || index < 1) { + return false; + } + lock_guard lg(m_mutex); + /* allocate new part */ + cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); + /* fill it with result info from the transfer */ + part->index = index; + part->mtime = mtime; + part->size = size; + + VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast(volume)); + if (hitem) { /* when the node already exist, put the cloud_part into the vol list */ + /* free the existing part */ + if (hitem->parts_lst->get(index)) { + free(hitem->parts_lst->get(index)); + } + hitem->parts_lst->put(index, part); + return true; + } else { /* if the node doesnt exist for this key, create it */ + ilist *new_lst = New(ilist(100,m_owns)); + new_lst->put(part->index, part); + /* use hashtable helper malloc */ + VolHashItem *new_hitem = (VolHashItem *) m_hash->hash_malloc(sizeof(VolHashItem)); + new_hitem->parts_lst = new_lst; + new_hitem->key_name = bstrdup(volume); + return m_hash->insert(new_hitem->key_name, new_hitem); + } + return false; +} + +/* retrieve the cloud_part for the volume name at index part idx + * can return NULL */ +cloud_part *cloud_proxy::get(const char *volume, uint32_t index) +{ + lock_guard lg(m_mutex); + if (volume) { + VolHashItem *hitem = (VolHashItem *)m_hash->lookup(const_cast(volume)); + if (hitem) { + ilist * ilst = hitem->parts_lst; + if (ilst) { + return (cloud_part*)ilst->get(index); + } + } + } + return NULL; +} + +uint64_t cloud_proxy::get_size(const char *volume, uint32_t part_idx) +{ + cloud_part *cld_part = get(volume, part_idx); + return cld_part ? cld_part->size:0; +} + +/* Check if the volume entry exists and return true if it's the case */ +bool cloud_proxy::volume_lookup(const char *volume) +{ + lock_guard lg(m_mutex); + return ( (volume) && m_hash->lookup(const_cast(volume)) ); +} + +/* reset the volume list content with the content of part_list */ +bool cloud_proxy::reset(const char *volume, ilist *part_list) +{ + lock_guard lg(m_mutex); + if (volume && part_list) { + VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast(volume)); + if (hitem) { /* when the node already exist, recycle it */ + delete hitem->parts_lst; + } else { /* create the node */ + hitem = (VolHashItem *) m_hash->hash_malloc(sizeof(VolHashItem)); + hitem->key_name = bstrdup(volume); + if (!m_hash->insert(hitem->key_name, hitem)) { + return false; + } + } + /* re-create the volume list */ + hitem->parts_lst = New(ilist(100, m_owns)); + /* feed it with cloud_part elements */ + for(int index=1; index<=part_list->last_index(); index++ ) { + cloud_part *part = (cloud_part *)part_list->get(index); + if (part) { + hitem->parts_lst->put(index, part); + } + } + return true; + } + return false; +} + +uint32_t cloud_proxy::last_index(const char *volume) +{ + lock_guard lg(m_mutex); + if (volume) { + VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast(volume)); + if (hitem && hitem->parts_lst) { + return hitem->parts_lst->last_index(); + } + } + return 0; +} + +ilist *cloud_proxy::exclude(const char *volume, ilist *exclusion_lst) +{ + if (volume && exclusion_lst) { + VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast(volume)); + if (hitem) { + ilist *res_lst = New(ilist(100, false)); + if (diff_lists(hitem->parts_lst, exclusion_lst, res_lst)) { + return res_lst; + } + } + } + return NULL; +} +cloud_proxy *cloud_proxy::get_instance() +{ + if (!m_pinstance) { + m_pinstance = New(cloud_proxy()); + } + ++m_count; + return m_pinstance; +} + +void cloud_proxy::release() +{ + if (--m_count == 0) { + delete m_pinstance; + m_pinstance = NULL; + } +} + +void cloud_proxy::dump() +{ + VolHashItem *hitem; + foreach_htable(hitem, m_hash) { + Dmsg2(0, "proxy (%d) Volume:%s\n", m_hash->size(), hitem->hlnk.key.key); + for(int index=0; index<=hitem->parts_lst->last_index(); index++ ) { + cloud_part *p = (cloud_part *)hitem->parts_lst->get(index); + if (p) { + Dmsg1(0, "part.%d\n", p->index); + } + } + } +} + +//================================================= +#ifdef TEST_PROGRAM +int main (int argc, char *argv[]) +{ + pthread_attr_t attr; + + void * start_heap = sbrk(0); + (void)start_heap; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + my_name_is(argc, argv, "cloud_parts_test"); + init_msg(NULL, NULL); + daemon_start_time = time(NULL); + set_thread_concurrency(150); + lmgr_init_thread(); /* initialize the lockmanager stack */ + pthread_attr_init(&attr); + berrno be; + + printf("Test0\n"); + { + cloud_part p1, p2, p3, p4; + + p1.index = 1; + p1.mtime = 1000; + p1.size = 1000; + + p2.index = 2; + p2.mtime = 2000; + p2.size = 2020; + + p3.index = 3; + p3.mtime = 3000; + p3.size = 3030; + + p4.index = 4; + p4.mtime = 4000; + p4.size = 4040; + + ilist l(10,false); + l.put(p1.index,&p1); + l.put(p2.index,&p2); + l.put(p3.index,&p3); + + ASSERT(list_contains_part(&l, &p1)); + ASSERT(list_contains_part(&l, &p2)); + ASSERT(list_contains_part(&l, &p3)); + ASSERT(!list_contains_part(&l, &p4)); + + ASSERT(list_contains_part(&l, 3)); + ASSERT(list_contains_part(&l, 1)); + ASSERT(list_contains_part(&l, 2)); + ASSERT(!list_contains_part(&l, 4)); + } + + printf("Test1\n"); + { + cloud_part p1, p2, p3; + + p1.index = 1; + p1.mtime = 1000; + p1.size = 1000; + + p2.index = 2; + p2.mtime = 2000; + p2.size = 2020; + + p3.index = 3; + p3.mtime = 3000; + p3.size = 3030; + + ilist cloud(10,false); + cloud.put(p1.index, &p1); + cloud.put(p2.index, &p2); + + ilist cache(10,false); + cache.put(p3.index, &p3); + + ASSERT(!identical_lists(&cloud, &cache)); + + cache.put(p1.index, &p1); + ASSERT(!identical_lists(&cloud, &cache)); + } + + printf("Test2\n"); + { + cloud_part p1, p2, p3, p4; + + p1.index = 1; + p1.mtime = 1000; + p1.size = 1000; + + p2.index = 2; + p2.mtime = 2000; + p2.size = 2020; + + p3.index = 3; + p3.mtime = 3000; + p3.size = 3030; + + p4.index = 4; + p4.mtime = 4000; + p4.size = 4040; + + ilist cloud(10,false); + cloud.put(p1.index, &p1); + cloud.put(p2.index, &p2); + + ilist cache(10,false); + cloud.put(p3.index, &p3); + cloud.put(p4.index, &p4); + + ASSERT(!identical_lists(&cloud, &cache)); + + cache.put(p1.index, &p1); + ASSERT(!identical_lists(&cloud, &cache)); + } + + printf("Test3\n"); + { + cloud_part p1, p2, p3; + + p1.index = 1; + p1.mtime = 1000; + p1.size = 1000; + + p2.index = 2; + p2.mtime = 2000; + p2.size = 2020; + + p3.index = 3; + p3.mtime = 3000; + p3.size = 3030; + + ilist cloud(10,false); + cloud.put(p1.index, &p1); + cloud.put(p2.index, &p2); + cloud.put(p3.index, &p3); + + ilist cache(10,false); + cache.put(p3.index, &p3); + cache.put(p1.index, &p1); + cache.put(p2.index, &p2); + + ASSERT(identical_lists(&cloud, &cache)); + } + + printf("Test4\n"); + { + cloud_part p1, p2, p3; + + p1.index = 1; + p1.mtime = 1000; + p1.size = 1000; + + p2.index = 2; + p2.mtime = 2000; + p2.size = 2020; + + p3.index = 3; + p3.mtime = 3000; + p3.size = 3030; + + ilist cloud(10,false); + cloud.put(p1.index, &p1); + cloud.put(p2.index, &p2); + cloud.put(p3.index, &p3); + + ilist cache(10,false); + cache.put(p2.index, &p2); + cache.put(p1.index, &p1); + + ASSERT(!identical_lists(&cloud, &cache)); + ilist diff(10,false); + ASSERT(diff_lists(&cloud, &cache, &diff)); + ASSERT(diff.size() == 1); + cloud_part *dp = (cloud_part *)diff.get(3); + ASSERT(*dp == p3); + } + + printf("Test proxy set\\get\n"); + { + cloud_part p1, p2, p3; + + p1.index = 1; + p1.mtime = 1000; + p1.size = 1000; + + p2.index = 2; + p2.mtime = 2000; + p2.size = 2020; + + p3.index = 3; + p3.mtime = 3000; + p3.size = 3030; + + cloud_proxy *prox = cloud_proxy::get_instance(); + + /* add to the cloud proxy with no error */ + /* in volume1 */ + ASSERT(prox->set("volume1", &p1)); + ASSERT(prox->set("volume1", &p2)); + /* in volume2 */ + ASSERT(prox->set("volume2", &p3)); + + /* retrieve the correct elements */ + ASSERT(prox->get("volume1", 1) != NULL); + ASSERT(prox->get("volume1", 1)->mtime == 1000); + ASSERT(prox->get("volume1", 1)->size == 1000); + ASSERT(prox->get("volume1", 2) != NULL); + ASSERT(prox->get("volume1", 2)->mtime == 2000); + ASSERT(prox->get("volume1", 2)->size == 2020); + /* part3 is in volume2, not in volume1 */ + ASSERT(prox->get("volume1", 3) == NULL); + ASSERT(prox->get("volume2", 3) != NULL); + ASSERT(prox->get("volume2", 3)->mtime == 3000); + ASSERT(prox->get("volume2", 3)->size == 3030); + /* there's no volume3 */ + ASSERT(prox->get("volume3", 1) == NULL); + /* there's no volume3 nor part4 */ + ASSERT(prox->get("volume3", 4) == NULL); + } + printf("Test proxy reset\n"); + { + cloud_part p1, p2, p3, p4, p5; + + p1.index = 1; + p1.mtime = 1000; + p1.size = 1000; + + p2.index = 2; + p2.mtime = 2000; + p2.size = 2020; + + p3.index = 3; + p3.mtime = 3000; + p3.size = 3030; + + cloud_proxy *prox = cloud_proxy::get_instance(); + + /* add to the cloud proxy with no error */ + /* in volume1 */ + ASSERT(prox->set("volume1", &p1)); + ASSERT(prox->set("volume1", &p2)); + /* in volume2 */ + ASSERT(prox->set("volume2", &p3)); + + p4.index = 3; + p4.mtime = 4000; + p4.size = 4040; + + p5.index = 50; + p5.mtime = 5000; + p5.size = 5050; + + ilist part_list(10,false); + part_list.put(p4.index, &p4); + part_list.put(p5.index, &p5); + + /* reset volume 1 */ + prox->reset("volume1", &part_list); + /* old elements are gone */ + ASSERT(prox->get("volume1", 1) == NULL); + ASSERT(prox->get("volume1", 2) == NULL); + /* new elements are at the correct index */ + ASSERT(prox->get("volume1", 3) != NULL); + ASSERT(prox->get("volume1", 3)->mtime == 4000); + ASSERT(prox->get("volume1", 3)->size == 4040); + ASSERT(prox->get("volume1", 50) != NULL); + ASSERT(prox->get("volume1", 50)->mtime == 5000); + ASSERT(prox->get("volume1", 50)->size == 5050); + /* part3 is still in volume2 */ + ASSERT(prox->get("volume2", 3) != NULL); + ASSERT(prox->get("volume2", 3)->mtime == 3000); + ASSERT(prox->get("volume2", 3)->size == 3030); + /* there's no volume3 */ + ASSERT(prox->get("volume3", 1) == NULL); + /* there's no volume3 nor part.index 4 */ + ASSERT(prox->get("volume3", 4) == NULL); + prox->dump(); + } + + + return 0; + +} + +#endif /* TEST_PROGRAM */ diff --git a/src/stored/cloud_parts.h b/src/stored/cloud_parts.h new file mode 100644 index 00000000..4755b98e --- /dev/null +++ b/src/stored/cloud_parts.h @@ -0,0 +1,122 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Routines for managing Volumes contains and comparing parts + * + * Written by Norbert Bizet, May MMXVI + */ +#ifndef _CLOUD_PARTS_H_ +#define _CLOUD_PARTS_H_ + +#include "bacula.h" +#include "stored.h" + + +struct cloud_part +{ + uint32_t index; + utime_t mtime; + uint64_t size; +}; + +/* equality operators for cloud_part structure */ +bool operator==(const cloud_part& lhs, const cloud_part& rhs); +bool operator!=(const cloud_part& lhs, const cloud_part& rhs); +/* more equality operators: when compared to int, we match only index */ +bool operator==(const cloud_part& lhs, const uint32_t& rhs); +bool operator!=(const cloud_part& lhs, const uint32_t& rhs); + +/* Check if a part p is contained in a parts list */ +bool list_contains_part(ilist *parts, cloud_part *p); +/* Check if a part index is contained in a parts list */ +bool list_contains_part(ilist *parts, uint32_t part_idx); +/* if parts1 and parts2 are synced, return true. false otherwise */ +bool identical_lists(ilist *parts1, ilist *parts2); +/* cloud_parts present in source but not in dest are appended to diff. + * there's no cloud_part copy made. + * Diff only holds references and shoudn't own them */ +bool diff_lists(ilist *source, ilist *dest, ilist *diff); + + +/* A proxy view of the cloud, providing existing parts + * index/size/date of modification without accessing the cloud itself. + * The basic proxy structure is a hash table of ilists: + root + | + -[volume001]-----ilist + | | + | [01]-->cloud_part + | [03]-->cloud_part + | | + | + -[volume002]-----ilist + | | + | [01]-->cloud_part + [02]-->cloud_part + | + */ +class cloud_proxy : public SMARTALLOC +{ +private: + htable *m_hash; /* the root htable */ + bool m_owns; /* determines if ilist own the cloud_parts */ + pthread_mutex_t m_mutex; /* protect access*/ + static cloud_proxy *m_pinstance; /* singleton instance */ + static uint64_t m_count; /* static refcount */ + + ~cloud_proxy(); + +public: + /* size: the default hash size + * owns: determines if the ilists own the cloud_parts or not */ + cloud_proxy(uint32_t size=100, bool owns=true); + + /* each time a part is added to the cloud, the corresponding cloud_part + * should be set here */ + /* either using a part ptr (part can be disposed afterward)... */ + bool set(const char *volume, cloud_part *part); + /* ...or by passing basic part parameters (part is constructed internally) */ + bool set(const char *volume, uint32_t index, utime_t mtime, uint64_t size); + + /* one can retrieve the proxied cloud_part using the get method */ + cloud_part *get(const char *volume, uint32_t part_idx); + /* direct access to part size */ + uint64_t get_size(const char *volume, uint32_t part_idx); + + /* Check if the volume entry exists and return true if it's the case */ + bool volume_lookup(const char *volume); + + /* reset the volume list content with the content of part_list */ + bool reset(const char *volume, ilist *part_list); + + /* get the current last (max) index for a given volume */ + uint32_t last_index(const char *volume); + + /* returns a ilist of elements present in the proxy but not in the exclusion list */ + ilist *exclude(const char* volume, ilist *exclusion_lst); + + /* refcounted singleton */ + static cloud_proxy *get_instance(); + /* instead of deleting, release the cloud_proxy*/ + void release(); + + void dump(); +}; + +#endif /* _CLOUD_PARTS_H_ */ diff --git a/src/stored/cloud_test.c b/src/stored/cloud_test.c new file mode 100644 index 00000000..acb55340 --- /dev/null +++ b/src/stored/cloud_test.c @@ -0,0 +1,256 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bacula.h" +#include "../stored/stored.h" + +extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); + +static CONFIG *config; + +void *start_heap; +#define CONFIG_FILE "bacula-sd.conf" +char *configfile = NULL; +bool detect_errors = false; +int errors = 0; + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n\n" +"Usage: cloud_test [options] \n" +" -b specify a bootstrap file\n" +" -c specify a Storage configuration file\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -v be verbose\n" +" -V specify Volume names (separated by |)\n" +" -? print this message\n\n"), 2000, "", VERSION, BDATE); + exit(1); +} + +static void get_session_record(JCR *jcr, DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec) +{ + const char *rtype; + memset(sessrec, 0, sizeof(SESSION_LABEL)); + jcr->JobId = 0; + switch (rec->FileIndex) { + case PRE_LABEL: + rtype = _("Fresh Volume Label"); + break; + case VOL_LABEL: + rtype = _("Volume Label"); + unser_volume_label(dev, rec); + break; + case SOS_LABEL: + rtype = _("Begin Job Session"); + unser_session_label(sessrec, rec); + jcr->JobId = sessrec->JobId; + break; + case EOS_LABEL: + rtype = _("End Job Session"); + break; + case 0: + case EOM_LABEL: + rtype = _("End of Medium"); + break; + case EOT_LABEL: + rtype = _("End of Physical Medium"); + break; + case SOB_LABEL: + rtype = _("Start of object"); + break; + case EOB_LABEL: + rtype = _("End of object"); + break; + default: + rtype = _("Unknown"); + Dmsg1(10, "FI rtype=%d unknown\n", rec->FileIndex); + break; + } + Dmsg5(10, "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n", + rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); + if (verbose) { + Pmsg5(-1, _("%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n"), + rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); + } +} + +/* List just block information */ +static void do_blocks(JCR *jcr, DCR *dcr) +{ + DEV_BLOCK *block = dcr->block; + DEVICE *dev = dcr->dev; + char buf1[100], buf2[100]; + DEV_RECORD *rec = new_record(); + for ( ;; ) { + if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { + Dmsg1(100, "!read_block(): ERR=%s\n", dev->print_errmsg()); + if (dev->at_eot()) { + if (!mount_next_read_volume(dcr)) { + Jmsg(jcr, M_INFO, 0, _("Got EOM at file %u on device %s, Volume \"%s\"\n"), + dev->file, dev->print_name(), dcr->VolumeName); + break; + } + /* Read and discard Volume label */ + DEV_RECORD *record; + SESSION_LABEL sessrec; + record = new_record(); + dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK); + read_record_from_block(dcr, record); + get_session_record(jcr, dev, record, &sessrec); + free_record(record); + Jmsg(jcr, M_INFO, 0, _("Mounted Volume \"%s\".\n"), dcr->VolumeName); + } else if (dev->at_eof()) { + Jmsg(jcr, M_INFO, 0, _("End of file %u on device %s, Volume \"%s\"\n"), + dev->part, dev->print_name(), dcr->VolumeName); + Dmsg0(20, "read_record got eof. try again\n"); + continue; + } else if (dev->is_short_block()) { + Jmsg(jcr, M_INFO, 0, "%s", dev->print_errmsg()); + continue; + } else { + /* I/O error */ + errors++; + display_tape_error_status(jcr, dev); + break; + } + } + read_record_from_block(dcr, rec); + printf("Block: %d size=%d\n", block->BlockNumber, block->block_len); + } + free_record(rec); + return; +} + +int main (int argc, char *argv[]) +{ + int ch; + DEVICE *dev; + cloud_dev *cdev; + cloud_driver *driver; + char *VolumeName=NULL; + JCR *jcr=NULL; + BSR *bsr = NULL; + char *bsrName = NULL; + BtoolsAskDirHandler askdir_handler; + + init_askdir_handler(&askdir_handler); + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + lmgr_init_thread(); + + working_directory = "/tmp"; + my_name_is(argc, argv, "cloud_test"); + init_msg(NULL, NULL); /* initialize message handler */ + + OSDependentInit(); + + + while ((ch = getopt(argc, argv, "b:c:d:vV:?")) != -1) { + switch (ch) { + case 'c': /* specify config file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'b': + bsrName = optarg; + break; + + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + char *p; + /* We probably find a tag list -d 10,sql,bvfs */ + if ((p = strchr(optarg, ',')) != NULL) { + *p = 0; + } + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + if (p) { + debug_parse_tags(p+1, &debug_level_tags); + } + } + break; + + case 'v': + verbose++; + break; + + case 'V': /* Volume name */ + VolumeName = optarg; + break; + + case '?': + default: + usage(); + + } /* end switch */ + } /* end while */ + argc -= optind; + argv += optind; + + if (!argc) { + Pmsg0(0, _("No archive name specified\n")); + usage(); + } + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + config = New(CONFIG()); + parse_sd_config(config, configfile, M_ERROR_TERM); + setup_me(); + load_sd_plugins(me->plugin_directory); + if (bsrName) { + bsr = parse_bsr(NULL, bsrName); + } + jcr = setup_jcr("cloud_test", argv[0], bsr, VolumeName, SD_READ); + dev = jcr->dcr->dev; + if (!dev || dev->dev_type != B_CLOUD_DEV) { + Pmsg0(0, "Bad device\n"); + exit(1); + } + do_blocks(jcr, jcr->dcr); + /* Start low level tests */ + cdev = (cloud_dev *)dev; + driver = cdev->driver; + + /* TODO: Put here low level tests for all drivers */ + if (!cdev->truncate_cache(jcr->dcr)) { + Pmsg1(0, "Unable to truncate the cache ERR=%s\n", cdev->errmsg); + } + + if (jcr) { + release_device(jcr->dcr); + free_jcr(jcr); + dev->term(NULL); + } + return 0; +} diff --git a/src/stored/cloud_transfer_mgr.c b/src/stored/cloud_transfer_mgr.c new file mode 100644 index 00000000..e3786005 --- /dev/null +++ b/src/stored/cloud_transfer_mgr.c @@ -0,0 +1,641 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Cloud Transfer Manager: + * transfer manager wraps around the work queue. + * Reports transfer status and error + * Reports statistics about current past and future work + * Written by Norbert Bizet, May MMXVI + * + */ +#include "cloud_transfer_mgr.h" +#include "stored.h" + +/* constructor + * size : the size in bytes of the transfer + * funct : function to process + * arg : argument passed to the function + * cache_fname : cache file name is duplicated in the transfer constructor + * volume_name : volume name is duplicated in the transfer constructor + * part : part index + * driver : pointer to the cloud_driver + * dcr : pointer to DCR +*/ +transfer::transfer(uint64_t size, + void * (*funct)(transfer*), + const char *cache_fname, + const char *volume_name, + uint32_t part, + cloud_driver *driver, + DCR *dcr, + cloud_proxy *proxy) : + m_stat_size(size), + m_stat_start(0), + m_stat_duration(0), + m_stat_eta(0), + m_message(NULL), + m_state(TRANS_STATE_CREATED), + m_mgr(NULL), + m_funct(funct), + m_cache_fname(bstrdup(cache_fname)), /* cache fname is duplicated*/ + m_volume_name(bstrdup(volume_name)), /* volume name is duplicated*/ + m_part(part), + m_driver(driver), + m_dcr(dcr), + m_proxy(proxy), + m_workq_elem(NULL), + m_use_count(0), + m_cancel(false), + m_do_cache_truncate(false) +{ + pthread_mutex_init(&m_stat_mutex, 0); + pthread_mutex_init(&m_mutex, 0); + pthread_cond_init(&m_done, NULL); + + m_message = get_pool_memory(PM_MESSAGE); + *m_message = 0; +} + +/* destructor */ +transfer::~transfer() +{ + free_pool_memory(m_message); + pthread_cond_destroy(&m_done); + pthread_mutex_destroy(&m_mutex); + pthread_mutex_destroy(&m_stat_mutex); + + free(m_volume_name); + free(m_cache_fname); + if (m_use_count > 0) { + ASSERT(FALSE); + Dmsg1(0, "!!!m_use_count = %d\n", m_use_count); + } +} + +/* queue this transfer for processing in the manager workq + * ret :true if queuing is successful */ +bool transfer::queue() +{ + if (!transition(TRANS_STATE_QUEUED)) { + return false; + } + return true; +} + + +/* opaque function that processes m_funct with m_arg as parameter + * depending on m_funct return value, changes state to TRANS_STATE_DONE + * or TRANS_STATE_ERROR + */ +void transfer::proceed() +{ + if (transition(TRANS_STATE_PROCESSED)) { + if (m_funct(this)) { + transition(TRANS_STATE_ERROR); + } else { + transition(TRANS_STATE_DONE); + } + } else { + Mmsg(m_message, _("wrong transition to TRANS_STATE_PROCESS in proceed review logic\n")); + } +} + +int transfer::wait() +{ + lock_guard lg(m_mutex); + + int stat = 0; + while (m_state != TRANS_STATE_DONE && + m_state != TRANS_STATE_ERROR) { + + if ((stat = pthread_cond_wait(&m_done, &m_mutex)) != 0) { + return stat; + } + } + return stat; +} + +int transfer::timedwait(const timeval& tv) +{ + lock_guard lg(m_mutex); + struct timespec timeout; + struct timeval ttv; + struct timezone tz; + int stat = 0; + timeout.tv_sec = tv.tv_sec; + timeout.tv_nsec = tv.tv_usec * 1000; + + while (m_state != TRANS_STATE_DONE && + m_state != TRANS_STATE_ERROR) { + + gettimeofday(&ttv, &tz); + timeout.tv_nsec += ttv.tv_usec * 1000; + timeout.tv_sec += ttv.tv_sec; + + if ((stat = pthread_cond_timedwait(&m_done, &m_mutex, &timeout)) != 0) { + return stat; + } + } + return stat; +} + +/* place the cancel flag and wait until processing is done */ +bool transfer::cancel() +{ + { + lock_guard lg(m_mutex); + m_cancel = true; + } + return wait(); +} + +/* checking the cancel status : doesnt request locking */ +bool transfer::is_cancelled() const +{ + return m_cancel; +} + +uint32_t transfer::append_status(POOL_MEM& msg) +{ + POOLMEM *tmp_msg = get_pool_memory(PM_MESSAGE); + char ec[30]; + uint32_t ret=0; + static const char *state[] = {"created", "queued", "process", "done", "error"}; + + if (m_state > TRANS_STATE_PROCESSED) { + ret = Mmsg(tmp_msg,_("%s/part.%-5d state=%-7s size=%sB duration=%ds%s%s\n"), + m_volume_name, m_part, + state[m_state], + edit_uint64_with_suffix(m_stat_size, ec), + m_stat_duration, + (strlen(m_message) != 0)?" msg=":"", + (strlen(m_message) != 0)?m_message:""); + pm_strcat(msg, tmp_msg); + } else { + ret = Mmsg(tmp_msg,_("%s/part.%-5d, state=%-7s size=%sB eta=%dss%s%s\n"), + m_volume_name, m_part, + state[m_state], + edit_uint64_with_suffix(m_stat_size, ec), + m_stat_eta, + (strlen(m_message) != 0)?" msg=":"", + (strlen(m_message) != 0)?m_message:""); + pm_strcat(msg, tmp_msg); + } + free_pool_memory(tmp_msg); + return ret; +} + + +/* the manager references itself through this function */ +void transfer::set_manager(transfer_manager *mgr) +{ + lock_guard lg(m_mutex); + m_mgr = mgr; +} + +/* change the state */ +bool transfer::transition(transfer_state state) +{ + /* lock state mutex*/ + lock_guard lg(m_mutex); + + /* transition from current state (m_state) to target state (state)*/ + bool ret = false; /*impossible transition*/ + switch(m_state) + { + case TRANS_STATE_CREATED: + /* CREATED -> QUEUED */ + if (state == TRANS_STATE_QUEUED) { + /* valid transition*/ + ret = true; + if (m_mgr) { + /*lock manager statistics */ + P(m_mgr->m_stat_mutex); + /*increment the number of queued transfer*/ + m_mgr->m_stat_nb_transfer_queued++; + /*add the current size into manager queued size*/ + m_mgr->m_stat_size_queued += m_stat_size; + /*unlock manager statistics */ + V(m_mgr->m_stat_mutex); + + P(m_mgr->m_mutex); + ++m_use_count; + m_mgr->add_work(this); + V(m_mgr->m_mutex); + } + } + break; + + case TRANS_STATE_QUEUED: + /* QUEUED -> CREATED : back to initial state*/ + if (state == TRANS_STATE_CREATED) { + /* valid transition*/ + ret = true; + if (m_mgr) { + /*lock manager statistics */ + P(m_mgr->m_stat_mutex); + /*decrement the number of queued transfer*/ + m_mgr->m_stat_nb_transfer_queued--; + /*remove the current size from the manager queued size*/ + m_mgr->m_stat_size_queued -= m_stat_size; + /*unlock manager statistics */ + V(m_mgr->m_stat_mutex); + + P(m_mgr->m_mutex); + m_mgr->remove_work(m_workq_elem); + --m_use_count; + V(m_mgr->m_mutex); + } + } + /* QUEUED -> PROCESSED : a worker aquired the transfer*/ + if (state == TRANS_STATE_PROCESSED) { + /*valid transition*/ + ret = true; + if (m_mgr) { + /*lock manager statistics */ + P(m_mgr->m_stat_mutex); + /*decrement the number of queued transfer*/ + m_mgr->m_stat_nb_transfer_queued--; + /*increment the number of processed transfer*/ + m_mgr->m_stat_nb_transfer_processed++; + /*remove the current size from the manager queued size*/ + m_mgr->m_stat_size_queued -= m_stat_size; + /*... and add it to the manager processed size*/ + m_mgr->m_stat_size_processed += m_stat_size; + /*unlock manager statistics */ + V(m_mgr->m_stat_mutex); + + /*transfer starts now*/ + P(m_stat_mutex); + m_stat_start = (utime_t)time(NULL); + V(m_stat_mutex); + } + } + break; + + case TRANS_STATE_PROCESSED: + /* PROCESSED -> DONE : Success! */ + if (state == TRANS_STATE_DONE) { + /*valid transition*/ + ret = true; + /*transfer stops now : compute transfer duration*/ + P(m_stat_mutex); + m_stat_duration = (utime_t)time(NULL)-m_stat_start; + V(m_stat_mutex); + + if (m_mgr) { + /*lock manager statistics */ + P(m_mgr->m_stat_mutex); + /* ... from processed to done*/ + m_mgr->m_stat_nb_transfer_processed--; + m_mgr->m_stat_nb_transfer_done++; + m_mgr->m_stat_size_processed -= m_stat_size; + m_mgr->m_stat_size_done += m_stat_size; + /*add local duration to manager duration */ + m_mgr->m_stat_duration_done += m_stat_duration; + /*reprocess the manager average rate with it*/ + if (m_mgr->m_stat_duration_done != 0) { + m_mgr->m_stat_average_rate = + m_mgr->m_stat_size_done / + m_mgr->m_stat_duration_done; + } + /*unlock manager statistics */ + V(m_mgr->m_stat_mutex); + + /* process is completed, unref the workq reference */ + --m_use_count; + } + + if (m_proxy) { + m_proxy->set(m_volume_name, m_part, m_res_mtime, m_res_size); + } + + /* in both case, success or failure, life keeps going on */ + pthread_cond_broadcast(&m_done); + } + /* PROCESSED -> ERROR : Failure! */ + if (state == TRANS_STATE_ERROR) { + /*valid transition*/ + ret = true; + /*transfer stops now, even if in error*/ + P(m_stat_mutex); + m_stat_duration = (utime_t)time(NULL)-m_stat_start; + V(m_stat_mutex); + + if (m_mgr) { + /*lock manager statistics */ + P(m_mgr->m_stat_mutex); + /* ... from processed to error*/ + m_mgr->m_stat_nb_transfer_processed--; + m_mgr->m_stat_nb_transfer_error++; + m_mgr->m_stat_size_processed -= m_stat_size; + m_mgr->m_stat_size_error += m_stat_size; + /*unlock manager statistics */ + V(m_mgr->m_stat_mutex); + + /* process is completed, unref the workq reference */ + --m_use_count; + } + /* in both case, success or failure, life keeps going on */ + pthread_cond_broadcast(&m_done); + } + break; + + case TRANS_STATE_DONE: + case TRANS_STATE_ERROR: + default: + ret = false; + break; + } + + /* update state when transition is valid*/ + if (ret) { + m_state = state; + } + + return ret; +} + +void transfer::set_do_cache_truncate(bool do_cache_truncate) +{ + m_do_cache_truncate=do_cache_truncate; +} + +int transfer::inc_use_count() +{ + lock_guard lg(m_mutex); + return ++m_use_count; +} + +int transfer::dec_use_count() +{ + lock_guard lg(m_mutex); + return --m_use_count; +} + +void *transfer_launcher(void *arg) +{ + transfer *t = (transfer *)arg; + if (t) { + t->proceed(); + } + return NULL; +} + +/* ----------------------------------------------------------- + transfer manager declarations + ----------------------------------------------------------- + */ + +/* constructor */ +transfer_manager::transfer_manager(uint32_t n) +{ + transfer *item=NULL; + m_transfer_list.init(item, &item->link); + pthread_mutex_init(&m_stat_mutex, 0); + pthread_mutex_init(&m_mutex, 0); + workq_init(&m_wq, 1, transfer_launcher); +} + +/* destructor */ +transfer_manager::~transfer_manager() +{ + workq_wait_idle(&m_wq); + pthread_mutex_destroy(&m_mutex); + pthread_mutex_destroy(&m_stat_mutex); +} + +/* create a new or inc-reference a similar transfer. (factory) + * ret: transfer* is ref_counted and must be kept, used + * and eventually released by caller with release() */ +transfer *transfer_manager::get_xfer(uint64_t size, + transfer_engine *funct, + POOLMEM *cache_fname, + const char *volume_name, + uint32_t part, + cloud_driver *driver, + DCR *dcr, + cloud_proxy *proxy) +{ + lock_guard lg (m_mutex); + + /* do we have a similar transfer on tap? */ + transfer *item; + foreach_dlist(item, (&m_transfer_list)) { + /* this is where "similar transfer" is defined: + * same volume_name, same part idx */ + if (strcmp(item->m_volume_name, volume_name) == 0 && item->m_part == part) { + item->inc_use_count(); + return item; + } + } + /* no existing transfer: create a new one */ + item = New(transfer(size, + funct, + cache_fname,/* cache_fname is duplicated in the transfer constructor*/ + volume_name, /* volume_name is duplicated in the transfer constructor*/ + part, + driver, + dcr, + proxy)); + + ASSERT(item->m_state == TRANS_STATE_CREATED); + item->set_manager(this); + /* inc use_count once for m_transfer_list insertion */ + item->inc_use_count(); + m_transfer_list.append(item); + /* inc use_count once for caller ref counting */ + item->inc_use_count(); + return item; +} + +/* does the xfer belong to us? */ +bool transfer_manager::owns(transfer *xfer) +{ + lock_guard lg(m_mutex); + transfer *item; + foreach_dlist(item, (&m_transfer_list)) { + /* same address */ + if (item == xfer) { + return true; + } + } + return false; +} + +/* un-ref transfer and free if ref count goes to zero + * caller must NOT use xfer anymore after this has been called */ +void transfer_manager::release(transfer *xfer) +{ + if (xfer) { + ASSERTD(owns(xfer), "Wrong Manager"); + /* wait should have been done already by caller, + * but we cannot afford deleting the transfer while it's not completed */ + wait(xfer); + /* decrement the caller reference */ + if (xfer->dec_use_count() == 1) { + /* the only ref left is the one from m_transfer_list + * time for deletion */ + lock_guard lg(m_mutex); + m_transfer_list.remove(xfer); + xfer->dec_use_count(); + delete xfer; + } + } +} + +/* accessors to xfer->queue */ +bool transfer_manager::queue(transfer *xfer) +{ + if (xfer) { + ASSERTD(owns(xfer), "Wrong Manager"); + return xfer->queue(); + } + return false; +} + +/* accessors to xfer->wait */ +int transfer_manager::wait(transfer *xfer) +{ + if (xfer) { + ASSERTD(owns(xfer), "Wrong Manager"); + return xfer->wait(); + } + return 0; +} + +/* accessors to xfer->timedwait */ +int transfer_manager::timedwait(transfer *xfer, const timeval& tv) +{ + if (xfer) { + ASSERTD(owns(xfer), "Wrong Manager"); + return xfer->timedwait(tv); + } + return 0; +} + +/* accessors to xfer->cancel */ +bool transfer_manager::cancel(transfer *xfer) +{ + if (xfer) { + ASSERTD(owns(xfer), "Wrong Manager"); + return xfer->cancel(); + } + return false; +} + +/* append a transfer object to this manager */ +int transfer_manager::add_work(transfer* t) +{ + return workq_add(&m_wq, t, t ? &t->m_workq_elem : NULL, 0); +} + +/* remove associated workq_ele_t from this manager workq */ +int transfer_manager::remove_work(workq_ele_t *elem) +{ + return workq_remove(&m_wq, elem); +} +/* search the transfer list for similar transfer */ +bool transfer_manager::find(const char *VolName, uint32_t index) +{ + /* Look in the transfer list if we have a download/upload for the current volume */ + lock_guard lg(m_mutex); + transfer *item; + foreach_dlist(item, (&m_transfer_list)) { + if (strcmp(item->m_volume_name, VolName) == 0 && item->m_part == index) { + return true; + } + } + return false; +} + +/* Call to this function just before displaying global statistics */ +void transfer_manager::update_statistics() +{ + /* lock the manager stats */ + P(m_stat_mutex); + + /* ETA naive calculation for each element in the queue = + * (accumulator(previous elements size) / average_rate) / num_workers; + */ + uint64_t accumulator=0; + + /* lock the queue so order and chaining cannot be modified */ + P(m_mutex); + P(m_wq.mutex); + m_stat_nb_workers = m_wq.max_workers; + + /* parse the queued and processed transfers */ + transfer *t; + foreach_dlist(t, &m_transfer_list) { + if ( (t->m_state == TRANS_STATE_QUEUED) || + (t->m_state == TRANS_STATE_PROCESSED)) { + accumulator+=t->m_stat_size; + P(t->m_stat_mutex); + if ((m_stat_average_rate != 0) && (m_stat_nb_workers != 0)) { + /*update eta for each transfer*/ + t->m_stat_eta = (accumulator / m_stat_average_rate) / m_stat_nb_workers; + } + V(t->m_stat_mutex); + } + } + + /* the manager ETA is the ETA of the last transfer in its workq */ + if (m_wq.last) { + transfer *t = (transfer *)m_wq.last->data; + if (t) { + m_stat_eta = t->m_stat_eta; + } + } + + V(m_wq.mutex); + V(m_mutex); + V(m_stat_mutex); +} + +/* short status of the transfers */ +uint32_t transfer_manager::append_status(POOL_MEM& msg, bool verbose) +{ + update_statistics(); + char ec0[30],ec1[30],ec2[30],ec3[30],ec4[30]; + POOLMEM *tmp_msg = get_pool_memory(PM_MESSAGE); + uint32_t ret = Mmsg(tmp_msg, _("(%sB/s) (ETA %d s) " + "Queued=%d %sB, Processed=%d %sB, Done=%d %sB, Failed=%d %sB\n"), + edit_uint64_with_suffix(m_stat_average_rate, ec0), m_stat_eta, + m_stat_nb_transfer_queued, edit_uint64_with_suffix(m_stat_size_queued, ec1), + m_stat_nb_transfer_processed, edit_uint64_with_suffix(m_stat_size_processed, ec2), + m_stat_nb_transfer_done, edit_uint64_with_suffix(m_stat_size_done, ec3), + m_stat_nb_transfer_error, edit_uint64_with_suffix(m_stat_size_error, ec4)); + pm_strcat(msg, tmp_msg); + + if (verbose) { + P(m_mutex); + if (!m_transfer_list.empty()) { + ret += Mmsg(tmp_msg, _("------------------------------------------------------------ details ------------------------------------------------------------\n")); + pm_strcat(msg, tmp_msg); + } + transfer *tpkt; + foreach_dlist(tpkt, &m_transfer_list) { + ret += tpkt->append_status(msg); + } + V(m_mutex); + } + free_pool_memory(tmp_msg); + return ret; +} diff --git a/src/stored/cloud_transfer_mgr.h b/src/stored/cloud_transfer_mgr.h new file mode 100644 index 00000000..e229c166 --- /dev/null +++ b/src/stored/cloud_transfer_mgr.h @@ -0,0 +1,296 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Cloud Transfer Manager: + * transfer manager wraps around the work queue. + * Reports transfer status and error + * Reports statistics about current past and future work + * Written by Norbert Bizet, May MMXVI + * + */ + +#ifndef BCLOUD_TRANSFER_MANAGER_H +#define BCLOUD_TRANSFER_MANAGER_H + +#include "bacula.h" +#include "lib/workq.h" + +/* forward declarations */ +class transfer_manager; +class cloud_driver; +class DCR; +class transfer; +class cloud_proxy; + +typedef void *(transfer_engine)(transfer *); + + +/* possible states of a transfer object */ +typedef enum { +/* initial state */ + /* object has been created but not queued yet*/ + TRANS_STATE_CREATED = 0, +/* in the workq states */ + /* object is queued*/ + TRANS_STATE_QUEUED, + /* object is processed*/ + TRANS_STATE_PROCESSED, +/* completed states */ + /* object processing has completed ok*/ + TRANS_STATE_DONE, + /* object processing has completed but failed*/ + TRANS_STATE_ERROR, +/* number of states */ + NUM_TRANS_STATE +} transfer_state; + +/* each cloud transfer (download, upload, etc.) + is wrapped into a transfer object */ +class transfer : public SMARTALLOC +{ +public: + dlink link; /* Used in global manager dlist */ + +/* m_stat prefixed statistics variables : */ + /* protect access to statistics resources*/ + pthread_mutex_t m_stat_mutex; + /* size of the transfer: should be filled asap */ + uint64_t m_stat_size; + /* time when process started */ + utime_t m_stat_start; + /* duration of the transfer : automatically filled when transfer is completed*/ + utime_t m_stat_duration; + /* estimate time to arrival : predictive guest approximation of transfer time*/ + utime_t m_stat_eta; + +/* status variables :*/ + /* protect status changes*/ + pthread_mutex_t m_mutex; + /* cond variable to broadcast transfer completion*/ + pthread_cond_t m_done; + /* status message */ + POOLMEM *m_message; + /* current transfer state*/ + transfer_state m_state; + +/* other variables :*/ + /* the manager that holds this element */ + transfer_manager *m_mgr; + /* the function processed by this transfer: contrary to the workq, it can be different for each transfer */ + transfer_engine *m_funct; + + /* variables */ + const char *m_cache_fname; + const char *m_volume_name; + uint32_t m_part; + cloud_driver *m_driver; + DCR *m_dcr; + cloud_proxy *m_proxy; + /* size of the transfer result : filled by the processor (driver) */ + uint64_t m_res_size; + /* last modification time of the transfer result : filled by the processor (driver) */ + utime_t m_res_mtime; + + /* the associated workq element */ + workq_ele_t *m_workq_elem; + + /* reference counter */ + int m_use_count; + + /* cancel flag */ + bool m_cancel; + + /* truncate cache once transfer is completed (upload)*/ + bool m_do_cache_truncate; +/* methods :*/ + /* constructor + * size : the size in bytes of the transfer + * funct : function to process + * cache_fname : cache file name is duplicated in the transfer constructor + * volume_name : volume name is duplicated in the transfer constructor + * part : part index + * driver : pointer to the cloud_driver + * dcr : pointer to DCR + */ + transfer(uint64_t size, + transfer_engine *funct, + const char *cache_fname, + const char *volume_name, + uint32_t part, + cloud_driver *driver, + DCR *dcr, + cloud_proxy *proxy + ); + + /* destructor*/ + ~transfer(); + + /* opaque function that will process m_funct with m_arg as parameter. Called back from the workq. + * depending on m_funct return value, changes m_state to TRANS_STATE_DONE or TRANS_STATE_ERROR */ + void proceed(); + + /* waits for the asynchronous computation to finish (including cancel()ed computations). + * ret: 0:Ok, errorcode otherwise */ + int wait(); /* no timeout */ + int timedwait(const timeval& tv); /* with timeout */ + + /* queue this transfer for processing in the manager workq + * ret :true if queuing is successful */ + bool queue(); + + /* cancel processing + * ret: true cancel done, false cancel failed */ + bool cancel(); + + /* callback fct that checks if transfer has been cancelled */ + bool is_cancelled() const; + + /* append a status message into msg*/ + uint32_t append_status(POOL_MEM& msgs); + + void set_do_cache_truncate(bool do_cache_truncate); + +protected: +friend class transfer_manager; + + /* the manager references itselfs thru this function*/ + void set_manager(transfer_manager *mgr); + + /* change the state + * ret : true if transition is legal, false otherwise */ + bool transition(transfer_state state); + + /* ref counting must lock the element prior to use */ + int inc_use_count(); + /* !!dec use count can delete the transfer */ + int dec_use_count(); +}; + +/* + The transfer_manager wraps around the work queue and holds the transfer(s) +*/ +class transfer_manager : public SMARTALLOC +{ +public: + +/* m_stat prefixed statistics variables global for this manager: */ + /* protect access to statistics resources*/ + pthread_mutex_t m_stat_mutex; + /* number of workers*/ + uint32_t m_stat_nb_workers; + /* current number of transfers in TRANS_STATE_QUEUED state in this manager*/ + uint64_t m_stat_nb_transfer_queued; + /* current number of transfers in TRANS_STATE_PROCESSED state in this manager*/ + uint64_t m_stat_nb_transfer_processed; + /* current number of transfers in TRANS_STATE_DONE state in this manager*/ + uint64_t m_stat_nb_transfer_done; + /* current number of transfers in TRANS_STATE_ERROR state in this manager*/ + uint64_t m_stat_nb_transfer_error; + + /* size in bytes of transfers in TRANS_STATE_QUEUED state in this manager*/ + uint64_t m_stat_size_queued; + /* size in bytes of transfers in TRANS_STATE_PROCESSED state in this manager*/ + uint64_t m_stat_size_processed; + /* size in bytes of transfers in TRANS_STATE_DONE state in this manager*/ + uint64_t m_stat_size_done; + /* size in bytes of transfers in TRANS_STATE_ERROR state in this manager*/ + uint64_t m_stat_size_error; + /* duration of transfers in TRANS_STATE_DONE state in this manager*/ + utime_t m_stat_duration_done; + /* computed bytes/sec transfer rate */ + uint64_t m_stat_average_rate; + /* computed Estimate Time to Arrival */ + utime_t m_stat_eta; + + +/* status variables global for this manager: */ + /* protect status access*/ + pthread_mutex_t m_mutex; + /* status message for this manager TBD*/ + POOLMEM *m_message; + /* m_state for the manager TBD*/ + int32_t m_state; + +/* others: */ + /* tranfer list*/ + dlist m_transfer_list; + + /* workq used by this manager*/ + workq_t m_wq; + +/* methods */ + + /* constructor */ + transfer_manager(uint32_t n); + + /* destructor */ + ~transfer_manager(); + +/* transfer functions */ + + /* create a new or inc-reference a similar transfer. (factory) + * ret: transfer* is ref_counted and must be kept, used + * and eventually released by caller with release() */ + transfer *get_xfer(uint64_t size, + transfer_engine *funct, + POOLMEM *cache_fname, + const char *volume_name, + uint32_t part, + cloud_driver *driver, + DCR *dcr, + cloud_proxy *proxy); + + /* does the xfer belong to this manager? */ + bool owns(transfer *xfer); + + /* un-ref transfer and delete if ref count falls to zero + * caller must NOT use xfer anymore after calling release() */ + void release(transfer *xfer); + + /* accessors to xfer->queue */ + bool queue(transfer *xfer); + + /* accessors to xfer->wait */ + int wait(transfer *xfer); + + /* accessors to xfer->timedwait */ + int timedwait(transfer *xfer, const timeval& tv); + + /* accessors to xfer->cancel */ + bool cancel(transfer *xfer); + + /* search the transfer list for similar transfer */ + bool find(const char *VolName, uint32_t index); + + /* call to update manager statistics, before displaying it b.e.*/ + void update_statistics(); + + /* append a status message into msg*/ + uint32_t append_status(POOL_MEM& msg, bool verbose); + +protected: +friend class transfer; + + /* append a transfer object to this manager */ + int add_work(transfer* t); + /* remove associated workq_ele_t from this manager workq*/ + int remove_work(workq_ele_t *elem); +}; + +#endif /* BCLOUD_TRANSFER_MANAGER_H */ diff --git a/src/stored/dev.c b/src/stored/dev.c new file mode 100644 index 00000000..257875bc --- /dev/null +++ b/src/stored/dev.c @@ -0,0 +1,1055 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * dev.c -- low level operations on device (storage device) + * + * written by, Kern Sibbald, MM + * + * NOTE!!!! None of these routines are reentrant. You must + * use dev->rLock() and dev->Unlock() at a higher level, + * or use the xxx_device() equivalents. By moving the + * thread synchronization to a higher level, we permit + * the higher level routines to "seize" the device and + * to carry out operations without worrying about who + * set what lock (i.e. race conditions). + * + * Note, this is the device dependent code, and may have + * to be modified for each system, but is meant to + * be as "generic" as possible. + * + * The purpose of this code is to develop a SIMPLE Storage + * daemon. More complicated coding (double buffering, writer + * thread, ...) is left for a later version. + */ + +/* + * Handling I/O errors and end of tape conditions are a bit tricky. + * This is how it is currently done when writing. + * On either an I/O error or end of tape, + * we will stop writing on the physical device (no I/O recovery is + * attempted at least in this daemon). The state flag will be sent + * to include ST_EOT, which is ephemeral, and ST_WEOT, which is + * persistent. Lots of routines clear ST_EOT, but ST_WEOT is + * cleared only when the problem goes away. Now when ST_WEOT + * is set all calls to write_block_to_device() call the fix_up + * routine. In addition, all threads are blocked + * from writing on the tape by calling lock_dev(), and thread other + * than the first thread to hit the EOT will block on a condition + * variable. The first thread to hit the EOT will continue to + * be able to read and write the tape (he sort of tunnels through + * the locking mechanism -- see lock_dev() for details). + * + * Now presumably somewhere higher in the chain of command + * (device.c), someone will notice the EOT condition and + * get a new tape up, get the tape label read, and mark + * the label for rewriting. Then this higher level routine + * will write the unwritten buffer to the new volume. + * Finally, he will release + * any blocked threads by doing a broadcast on the condition + * variable. At that point, we should be totally back in + * business with no lost data. + */ + +#include "bacula.h" +#include "stored.h" + +#ifndef O_NONBLOCK +#define O_NONBLOCK 0 +#endif + +static const int dbglvl = 150; + +/* Imported functions */ +extern void set_os_device_parameters(DCR *dcr); +extern bool dev_get_os_pos(DEVICE *dev, struct mtget *mt_stat); +extern uint32_t status_dev(DEVICE *dev); + +/* Forward referenced functions */ +const char *mode_to_str(int mode); +DEVICE *m_init_dev(JCR *jcr, DEVRES *device, bool adata); + +/* + * Device specific initialization. + */ +void DEVICE::device_specific_init(JCR *jcr, DEVRES *device) +{ +} + +/* + * Initialize the device with the operating system and + * initialize buffer pointers. + * + * Returns: true device already open + * false device setup but not open + * + * Note, for a tape, the VolName is the name we give to the + * volume (not really used here), but for a file, the + * VolName represents the name of the file to be created/opened. + * In the case of a file, the full name is the device name + * (archive_name) with the VolName concatenated. + * + * This is generic common code. It should be called prior to any + * device specific code. Note! This does not open anything. + */ +bool DEVICE::open_device(DCR *dcr, int omode) +{ + Enter(dbglvl); + preserve = 0; + ASSERT2(!adata, "Attempt to open adata dev"); + if (is_open()) { + if (openmode == omode) { + return true; + } else { + Dmsg1(200, "Close fd=%d for mode change in open().\n", m_fd); + d_close(m_fd); + clear_opened(); + preserve = state & (ST_LABEL|ST_APPEND|ST_READ); + } + } + openmode = omode; + if (dcr) { + dcr->setVolCatName(dcr->VolumeName); + VolCatInfo = dcr->VolCatInfo; /* structure assign */ + } + + state &= ~(ST_NOSPACE|ST_LABEL|ST_APPEND|ST_READ|ST_EOT|ST_WEOT|ST_EOF); + label_type = B_BACULA_LABEL; + + if (openmode == OPEN_READ_WRITE && has_cap(CAP_STREAM)) { + openmode = OPEN_WRITE_ONLY; + } + return false; +} + +void DEVICE::set_mode(int new_mode) +{ + switch (new_mode) { + case CREATE_READ_WRITE: + mode = O_CREAT | O_RDWR | O_BINARY; + break; + case OPEN_READ_WRITE: + mode = O_RDWR | O_BINARY; + break; + case OPEN_READ_ONLY: + mode = O_RDONLY | O_BINARY; + break; + case OPEN_WRITE_ONLY: + mode = O_WRONLY | O_BINARY; + break; + default: + Jmsg0(NULL, M_ABORT, 0, _("Illegal mode given to open dev.\n")); + } +} + +/* + * Called to indicate that we have just read an + * EOF from the device. + */ +void DEVICE::set_ateof() +{ + set_eof(); + file_addr = 0; + file_size = 0; + block_num = 0; +} + +/* + * Called to indicate we are now at the end of the tape, and + * writing is not possible. + */ +void DEVICE::set_ateot() +{ + /* Make tape effectively read-only */ + Dmsg0(200, "==== Set AtEof\n"); + state |= (ST_EOF|ST_EOT|ST_WEOT); + clear_append(); +} + + +/* + * Set the position of the device -- only for files + * For other devices, there is no generic way to do it. + * Returns: true on succes + * false on error + */ +bool DEVICE::update_pos(DCR *dcr) +{ + boffset_t pos; + bool ok = true; + + if (!is_open()) { + dev_errno = EBADF; + Mmsg0(errmsg, _("Bad device call. Device not open\n")); + Emsg1(M_FATAL, 0, "%s", errmsg); + return false; + } + + if (is_file()) { + file = 0; + file_addr = 0; + pos = lseek(dcr, (boffset_t)0, SEEK_CUR); + if (pos < 0) { + berrno be; + dev_errno = errno; + Pmsg1(000, _("Seek error: ERR=%s\n"), be.bstrerror()); + Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + ok = false; + } else { + file_addr = pos; + block_num = (uint32_t)pos; + file = (uint32_t)(pos >> 32); + } + } + return ok; +} + +void DEVICE::set_slot(int32_t slot) +{ + m_slot = slot; + if (vol) vol->clear_slot(); +} + +void DEVICE::clear_slot() +{ + m_slot = -1; + if (vol) vol->set_slot(-1); +} + +/* + * Set to unload the current volume in the drive + */ +void DEVICE::set_unload() +{ + if (!m_unload && VolHdr.VolumeName[0] != 0) { + m_unload = true; + notify_newvol_in_attached_dcrs(NULL); + } +} + +/* + * Clear volume header + */ +void DEVICE::clear_volhdr() +{ + Dmsg1(100, "Clear volhdr vol=%s\n", VolHdr.VolumeName); + memset(&VolHdr, 0, sizeof(VolHdr)); + setVolCatInfo(false); +} + +void DEVICE::set_volcatinfo_from_dcr(DCR *dcr) +{ + VolCatInfo = dcr->VolCatInfo; +} + +/* + * Close the device + * Can enter with dcr==NULL + */ +bool DEVICE::close(DCR *dcr) +{ + bool ok = true; + + Dmsg5(40, "close_dev vol=%s fd=%d dev=%p adata=%d dev=%s\n", + VolHdr.VolumeName, m_fd, this, adata, print_name()); + offline_or_rewind(dcr); + + if (!is_open()) { + Dmsg2(200, "device %s already closed vol=%s\n", print_name(), + VolHdr.VolumeName); + return true; /* already closed */ + } + + switch (dev_type) { + case B_VTL_DEV: + case B_VTAPE_DEV: + case B_TAPE_DEV: + unlock_door(); + /* Fall through wanted */ + default: + if (d_close(m_fd) != 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("Error closing device %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + ok = false; + } + break; + } + + unmount(1); /* do unmount if required */ + + /* Clean up device packet so it can be reused */ + clear_opened(); + + state &= ~(ST_LABEL|ST_READ|ST_APPEND|ST_EOT|ST_WEOT|ST_EOF| + ST_NOSPACE|ST_MOUNTED|ST_MEDIA|ST_SHORT); + label_type = B_BACULA_LABEL; + file = block_num = 0; + file_size = 0; + file_addr = 0; + EndFile = EndBlock = 0; + openmode = 0; + clear_volhdr(); + memset(&VolCatInfo, 0, sizeof(VolCatInfo)); + if (tid) { + stop_thread_timer(tid); + tid = 0; + } + return ok; +} + +/* + * If timeout, wait until the mount command returns 0. + * If !timeout, try to mount the device only once. + */ +bool DEVICE::mount(int timeout) +{ + Enter(dbglvl); + if (!is_mounted() && device->mount_command) { + return mount_file(1, timeout); + } + return true; +} + +/* + * Unmount the device + * If timeout, wait until the unmount command returns 0. + * If !timeout, try to unmount the device only once. + */ +bool DEVICE::unmount(int timeout) +{ + Enter(dbglvl); + if (is_mounted() && requires_mount() && device->unmount_command) { + return mount_file(0, timeout); + } + return true; +} + + +/* + * Edit codes into (Un)MountCommand, Write(First)PartCommand + * %% = % + * %a = archive device name + * %e = erase (set if cannot mount and first part) + * %n = part number + * %m = mount point + * %v = last part name + * + * omsg = edited output message + * imsg = input string containing edit codes (%x) + * + */ +void DEVICE::edit_mount_codes(POOL_MEM &omsg, const char *imsg) +{ + const char *p; + const char *str; + char add[20]; + + POOL_MEM archive_name(PM_FNAME); + + omsg.c_str()[0] = 0; + Dmsg1(800, "edit_mount_codes: %s\n", imsg); + for (p=imsg; *p; p++) { + if (*p == '%') { + switch (*++p) { + case '%': + str = "%"; + break; + case 'a': + str = dev_name; + break; + case 'e': + str = "0"; + /* ***FIXME*** this may be useful for Cloud */ +#ifdef xxx + if (num_dvd_parts == 0) { + if (truncating || blank_dvd) { + str = "2"; + } else { + str = "1"; + } + } else { + str = "0"; + } +#endif + break; + case 'n': + bsnprintf(add, sizeof(add), "%d", part); + str = add; + break; + case 'm': + str = device->mount_point; + break; + default: + add[0] = '%'; + add[1] = *p; + add[2] = 0; + str = add; + break; + } + } else { + add[0] = *p; + add[1] = 0; + str = add; + } + Dmsg1(1900, "add_str %s\n", str); + pm_strcat(omsg, (char *)str); + Dmsg1(1800, "omsg=%s\n", omsg.c_str()); + } +} + +/* return the last timer interval (ms) + * or 0 if something goes wrong + */ +btime_t DEVICE::get_timer_count() +{ + btime_t temp = last_timer; + last_timer = get_current_btime(); + temp = last_timer - temp; /* get elapsed time */ + return (temp>0)?temp:0; /* take care of skewed clock */ +} + +/* read from fd */ +ssize_t DEVICE::read(void *buf, size_t len) +{ + ssize_t read_len; + + get_timer_count(); + + read_len = d_read(m_fd, buf, len); + + last_tick = get_timer_count(); + + DevReadTime += last_tick; + VolCatInfo.VolReadTime += last_tick; + + if (read_len > 0) { /* skip error */ + DevReadBytes += read_len; + } + + return read_len; +} + +/* write to fd */ +ssize_t DEVICE::write(const void *buf, size_t len) +{ + ssize_t write_len; + + get_timer_count(); + + write_len = d_write(m_fd, buf, len); + + last_tick = get_timer_count(); + + DevWriteTime += last_tick; + VolCatInfo.VolWriteTime += last_tick; + + if (write_len > 0) { /* skip error */ + DevWriteBytes += write_len; + } + + return write_len; +} + +/* Return the resource name for the device */ +const char *DEVICE::name() const +{ + return device->hdr.name; +} + +uint32_t DEVICE::get_file() +{ + if (is_tape()) { + return file; + } else { + uint64_t bytes = VolCatInfo.VolCatAdataBytes + VolCatInfo.VolCatAmetaBytes; + return (uint32_t)(bytes >> 32); + } +} + +uint32_t DEVICE::get_block_num() +{ + if (is_tape()) { + return block_num; + } else { + return VolCatInfo.VolCatAdataBlocks + VolCatInfo.VolCatAmetaBlocks; + } +} + +/* + * Walk through all attached jcrs indicating the volume has changed + * Note: If you have the new VolumeName, it is passed here, + * otherwise pass a NULL. + */ +void +DEVICE::notify_newvol_in_attached_dcrs(const char *newVolumeName) +{ + Dmsg2(140, "Notify dcrs of vol change. oldVolume=%s NewVolume=%s\n", + getVolCatName(), newVolumeName?newVolumeName:"*None*"); + Lock_dcrs(); + DCR *mdcr; + foreach_dlist(mdcr, attached_dcrs) { + if (mdcr->jcr->JobId == 0) { + continue; /* ignore console */ + } + mdcr->NewVol = true; + mdcr->NewFile = true; + if (newVolumeName && mdcr->VolumeName != newVolumeName) { + bstrncpy(mdcr->VolumeName, newVolumeName, sizeof(mdcr->VolumeName)); + Dmsg2(140, "Set NewVol=%s in JobId=%d\n", mdcr->VolumeName, mdcr->jcr->JobId); + } + } + Unlock_dcrs(); +} + +/* + * Walk through all attached jcrs indicating the File has changed + */ +void +DEVICE::notify_newfile_in_attached_dcrs() +{ + Dmsg1(140, "Notify dcrs of file change. Volume=%s\n", getVolCatName()); + Lock_dcrs(); + DCR *mdcr; + foreach_dlist(mdcr, attached_dcrs) { + if (mdcr->jcr->JobId == 0) { + continue; /* ignore console */ + } + Dmsg1(140, "Notify JobI=%d\n", mdcr->jcr->JobId); + mdcr->NewFile = true; + } + Unlock_dcrs(); +} + + + +/* + * Free memory allocated for the device + * Can enter with dcr==NULL + */ +void DEVICE::term(DCR *dcr) +{ + Dmsg1(900, "term dev: %s\n", print_name()); + if (!dcr) { + d_close(m_fd); + } else { + close(dcr); + } + if (dev_name) { + free_memory(dev_name); + dev_name = NULL; + } + if (adev_name) { + free_memory(adev_name); + adev_name = NULL; + } + if (prt_name) { + free_memory(prt_name); + prt_name = NULL; + } + if (errmsg) { + free_pool_memory(errmsg); + errmsg = NULL; + } + pthread_mutex_destroy(&m_mutex); + pthread_cond_destroy(&wait); + pthread_cond_destroy(&wait_next_vol); + pthread_mutex_destroy(&spool_mutex); + pthread_mutex_destroy(&freespace_mutex); + if (attached_dcrs) { + delete attached_dcrs; + attached_dcrs = NULL; + } + /* We let the DEVRES pointer if not our device */ + if (device && device->dev == this) { + device->dev = NULL; + } + delete this; +} + +/* Get freespace values */ +void DEVICE::get_freespace(uint64_t *freeval, uint64_t *totalval) +{ + get_os_device_freespace(); + P(freespace_mutex); + if (is_freespace_ok()) { + *freeval = free_space; + *totalval = total_space; + } else { + *freeval = *totalval = 0; + } + V(freespace_mutex); +} + +/* Set freespace values */ +void DEVICE::set_freespace(uint64_t freeval, uint64_t totalval, int errnoval, bool valid) +{ + P(freespace_mutex); + free_space = freeval; + total_space = totalval; + free_space_errno = errnoval; + if (valid) { + set_freespace_ok(); + } else { + clear_freespace_ok(); + } + V(freespace_mutex); +} + +/* Convenient function that return true only if the device back-end is a + * filesystem that its nearly full. (the free space is below the given threshold) + */ +bool DEVICE::is_fs_nearly_full(uint64_t threshold) +{ + uint64_t freeval, totalval; + if (is_file()) { + get_freespace(&freeval, &totalval); + if (totalval > 0) { + if (freeval < threshold) { + return true; + } + } + } + return false; +} + +static const char *modes[] = { + "CREATE_READ_WRITE", + "OPEN_READ_WRITE", + "OPEN_READ_ONLY", + "OPEN_WRITE_ONLY" +}; + + +const char *mode_to_str(int mode) +{ + static char buf[100]; + if (mode < 1 || mode > 4) { + bsnprintf(buf, sizeof(buf), "BAD mode=%d", mode); + return buf; + } + return modes[mode-1]; +} + +void DEVICE::setVolCatName(const char *name) +{ + bstrncpy(VolCatInfo.VolCatName, name, sizeof(VolCatInfo.VolCatName)); + setVolCatInfo(false); +} + +void DEVICE::setVolCatStatus(const char *status) +{ + bstrncpy(VolCatInfo.VolCatStatus, status, sizeof(VolCatInfo.VolCatStatus)); + setVolCatInfo(false); +} + +void DEVICE::updateVolCatBytes(uint64_t bytes) +{ + Lock_VolCatInfo(); + VolCatInfo.VolCatAmetaBytes += bytes; + VolCatInfo.VolCatBytes += bytes; + setVolCatInfo(false); + Unlock_VolCatInfo(); +} + +void DEVICE::updateVolCatHoleBytes(uint64_t hole) +{ + return; +} + +void DEVICE::updateVolCatPadding(uint64_t padding) +{ + Lock_VolCatInfo(); + VolCatInfo.VolCatAmetaPadding += padding; + VolCatInfo.VolCatPadding += padding; + setVolCatInfo(false); + Unlock_VolCatInfo(); +} + + +void DEVICE::updateVolCatBlocks(uint32_t blocks) +{ + Lock_VolCatInfo(); + VolCatInfo.VolCatAmetaBlocks += blocks; + VolCatInfo.VolCatBlocks += blocks; + setVolCatInfo(false); + Unlock_VolCatInfo(); +} + +void DEVICE::updateVolCatWrites(uint32_t writes) +{ + Lock_VolCatInfo(); + VolCatInfo.VolCatAmetaWrites += writes; + VolCatInfo.VolCatWrites += writes; + setVolCatInfo(false); + Unlock_VolCatInfo(); +} + +void DEVICE::updateVolCatReads(uint32_t reads) +{ + Lock_VolCatInfo(); + VolCatInfo.VolCatAmetaReads += reads; + VolCatInfo.VolCatReads += reads; + setVolCatInfo(false); + Unlock_VolCatInfo(); +} + +void DEVICE::updateVolCatReadBytes(uint64_t bytes) +{ + Lock_VolCatInfo(); + VolCatInfo.VolCatAmetaRBytes += bytes; + VolCatInfo.VolCatRBytes += bytes; + setVolCatInfo(false); + Unlock_VolCatInfo(); +} + +void DEVICE::set_nospace() +{ + state |= ST_NOSPACE; +} + +void DEVICE::clear_nospace() +{ + state &= ~ST_NOSPACE; +} + +/* Put device in append mode */ +void DEVICE::set_append() +{ + state &= ~(ST_NOSPACE|ST_READ|ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ + state |= ST_APPEND; +} + +/* Clear append mode */ +void DEVICE::clear_append() +{ + state &= ~ST_APPEND; +} + +/* Put device in read mode */ +void DEVICE::set_read() +{ + state &= ~(ST_APPEND|ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ + state |= ST_READ; +} + +/* Clear read mode */ +void DEVICE::clear_read() +{ + state &= ~ST_READ; +} + +/* + * Get freespace using OS calls + * TODO: See if it's working with mount commands + */ +bool DEVICE::get_os_device_freespace() +{ + int64_t freespace, totalspace; + + if (!is_file()) { + return true; + } + if (fs_get_free_space(dev_name, &freespace, &totalspace) == 0) { + set_freespace(freespace, totalspace, 0, true); + Mmsg(errmsg, ""); + return true; + + } else { + set_freespace(0, 0, 0, false); /* No valid freespace */ + } + return false; +} + +/* Update the free space on the device */ +bool DEVICE::update_freespace() +{ + POOL_MEM ocmd(PM_FNAME); + POOLMEM* results; + char* icmd; + char* p; + uint64_t free, total; + char ed1[50]; + bool ok = false; + int status; + berrno be; + + if (!is_file()) { + Mmsg(errmsg, ""); + return true; + } + + /* The device must be mounted in order for freespace to work */ + if (requires_mount()) { + mount(1); + } + + if (get_os_device_freespace()) { + Dmsg4(20, "get_os_device_freespace: free_space=%s freespace_ok=%d free_space_errno=%d have_media=%d\n", + edit_uint64(free_space, ed1), !!is_freespace_ok(), free_space_errno, !!have_media()); + return true; + } + + icmd = device->free_space_command; + + if (!icmd) { + set_freespace(0, 0, 0, false); + Dmsg2(20, "ERROR: update_free_space_dev: free_space=%s, free_space_errno=%d (!icmd)\n", + edit_uint64(free_space, ed1), free_space_errno); + Mmsg(errmsg, _("No FreeSpace command defined.\n")); + return false; + } + + edit_mount_codes(ocmd, icmd); + + Dmsg1(20, "update_freespace: cmd=%s\n", ocmd.c_str()); + + results = get_pool_memory(PM_MESSAGE); + + Dmsg1(20, "Run freespace prog=%s\n", ocmd.c_str()); + status = run_program_full_output(ocmd.c_str(), max_open_wait/2, results); + Dmsg2(20, "Freespace status=%d result=%s\n", status, results); + /* Should report "1223232 12323232\n" "free total\n" */ + if (status == 0) { + free = str_to_int64(results) * 1024; + p = results; + + if (skip_nonspaces(&p)) { + total = str_to_int64(p) * 1024; + + } else { + total = 0; + } + + Dmsg1(400, "Free space program run: Freespace=%s\n", results); + if (free >= 0) { + set_freespace(free, total, 0, true); /* have valid freespace */ + Mmsg(errmsg, ""); + ok = true; + } + } else { + set_freespace(0, 0, EPIPE, false); /* no valid freespace */ + Mmsg2(errmsg, _("Cannot run free space command. Results=%s ERR=%s\n"), + results, be.bstrerror(status)); + + dev_errno = free_space_errno; + Dmsg4(20, "Cannot get free space on device %s. free_space=%s, " + "free_space_errno=%d ERR=%s\n", + print_name(), edit_uint64(free_space, ed1), + free_space_errno, errmsg); + } + free_pool_memory(results); + Dmsg4(20, "leave update_freespace: free_space=%s freespace_ok=%d free_space_errno=%d have_media=%d\n", + edit_uint64(free_space, ed1), !!is_freespace_ok(), free_space_errno, !!have_media()); + return ok; +} + +bool DEVICE::weof(DCR */*dcr*/, int num) +{ + Dmsg1(129, "=== weof_dev=%s\n", print_name()); + + if (!is_open()) { + dev_errno = EBADF; + Mmsg1(errmsg, _("Bad call to weof_dev. Device %s not open\n"), print_name()); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + if (!can_append()) { + Mmsg1(errmsg, _("Attempt to WEOF on non-appendable Volume %s\n"), VolHdr.VolumeName); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + file_size = 0; + return true; +} + +/* + * Very basic functions -- no device specific code. + * Returns: true on succes + * false on error + */ +bool DEVICE::eod(DCR *dcr) +{ + bool ok = true; + + Enter(dbglvl); + if (m_fd < 0) { + dev_errno = EBADF; + Mmsg1(errmsg, _("Bad call to eod. Device %s not open\n"), print_name()); + Dmsg1(100, "%s", errmsg); + return false; + } + + if (at_eot()) { + Leave(100); + return true; + } + clear_eof(); /* remove EOF flag */ + block_num = file = 0; + file_size = 0; + file_addr = 0; + Leave(100); + return ok; +} + +bool DEVICE::is_eod_valid(DCR *dcr) +{ + return true; +} + +bool DEVICE::open_next_part(DCR */*dcr*/) +{ + return true; +} + +bool DEVICE::close_part(DCR */*dcr*/) +{ + return true; +} + +DEVICE *DEVICE::get_dev(DCR */*dcr*/) +{ + return this; +} + +uint32_t DEVICE::get_hi_addr() +{ + return (uint32_t)(file_addr >> 32); +} + +uint32_t DEVICE::get_hi_addr(boffset_t addr) +{ + return (uint32_t)(addr >> 32); +} + +uint32_t DEVICE::get_low_addr() +{ + return (uint32_t)(file_addr); +} + +uint32_t DEVICE::get_low_addr(boffset_t addr) +{ + return (uint32_t)(addr); +} + + +uint64_t DEVICE::get_full_addr() +{ + return file_addr; +} + +uint64_t DEVICE::get_full_addr(boffset_t addr) +{ + return addr; +} + + +uint64_t DEVICE::get_full_addr(uint32_t hi, uint32_t low) +{ + return ((uint64_t)hi)<<32 | (uint64_t)low; +} + +/* Note: this subroutine is not in the class */ +uint64_t get_full_addr(uint32_t hi, uint32_t low) +{ + return ((uint64_t)hi)<<32 | (uint64_t)low; +} + +/* Print the file address */ +char *DEVICE::print_addr(char *buf, int32_t buf_len) +{ + buf[0] = 0; + bsnprintf(buf, buf_len, "%llu", get_full_addr()); + return buf; +} + +char *DEVICE::print_addr(char *buf, int32_t buf_len, boffset_t addr) +{ + buf[0] = 0; + bsnprintf(buf, buf_len, "%llu", addr); + return buf; +} + + +bool DEVICE::do_size_checks(DCR *dcr, DEV_BLOCK *block) +{ + JCR *jcr = dcr->jcr; + + if (is_user_volume_size_reached(dcr, true)) { + Dmsg0(40, "Calling terminate_writing_volume\n"); + terminate_writing_volume(dcr); + reread_last_block(dcr); /* Only used on tapes */ + dev_errno = ENOSPC; + return false; + } + + /* + * Limit maximum File size on volume to user specified value. + * In practical terms, this means to put an EOF mark on + * a tape after every X bytes. This effectively determines + * how many index records we have (JobMedia). If you set + * max_file_size too small, it will cause a lot of shoe-shine + * on very fast modern tape (LTO-3 and above). + */ + if ((max_file_size > 0) && + (file_size+block->binbuf) >= max_file_size) { + file_size = 0; /* reset file size */ + + if (!weof(dcr, 1)) { /* write eof */ + Dmsg0(50, "WEOF error in max file size.\n"); + Jmsg(jcr, M_FATAL, 0, _("Unable to write EOF. ERR=%s\n"), + bstrerror()); + Dmsg0(40, "Calling terminate_writing_volume\n"); + terminate_writing_volume(dcr); + dev_errno = ENOSPC; + return false; + } + + if (!do_new_file_bookkeeping(dcr)) { + /* Error message already sent */ + return false; + } + } + return true; +} + +bool DEVICE::get_tape_alerts(DCR *dcr) +{ + return true; +} + +void DEVICE::show_tape_alerts(DCR *dcr, alert_list_type type, + alert_list_which which, alert_cb alert_callback) +{ + return; +} + +int DEVICE::delete_alerts() +{ + return 0; +} + +bool DEVICE::get_tape_worm(DCR *dcr) +{ + return false; +} diff --git a/src/stored/dev.h b/src/stored/dev.h new file mode 100644 index 00000000..dc80cb79 --- /dev/null +++ b/src/stored/dev.h @@ -0,0 +1,836 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Definitions for using the Device functions in Bacula + * Tape and File storage access + * + * Kern Sibbald, MM + */ + +/* + * Some details of how device reservations work + * + * class DEVICE: + * set_load() set to load volume + * needs_load() volume must be loaded (i.e. set_load done) + * clear_load() load done. + * set_unload() set to unload volume + * needs_unload() volume must be unloaded + * clear_unload() volume unloaded + * + * reservations are temporary until the drive is acquired + * inc_reserved() increments num of reservations + * dec_reserved() decrements num of reservations + * num_reserved() number of reservations + * + * class DCR: + * set_reserved() sets local reserve flag and calls dev->inc_reserved() + * clear_reserved() clears local reserve flag and calls dev->dec_reserved() + * is_reserved() returns local reserved flag + * unreserve_device() much more complete unreservation + * + */ + +#ifndef __DEV_H +#define __DEV_H 1 + +#undef DCR /* used by Bacula */ + +/* Return values from wait_for_sysop() */ +enum { + W_ERROR = 1, + W_TIMEOUT = 2, + W_POLL = 3, + W_MOUNT = 4, + W_WAKE = 5 +}; + +/* Arguments to open_dev() */ +enum { + CREATE_READ_WRITE = 1, + OPEN_READ_WRITE = 2, + OPEN_READ_ONLY = 3, + OPEN_WRITE_ONLY = 4 +}; + +/* Argument to print tape alerts */ +enum alert_list_type { + list_codes = 1, + list_short = 2, + list_long = 3 +}; + +enum alert_list_which { + list_last = 1, + list_all = 2 +}; + +typedef void (alert_cb)(void *alert_ctx, const char *short_msg, + const char *long_msg, char *Volume, int severity, + int flags, int alert, utime_t alert_time); + +/* Aligned Data Disk Volume extension */ +#define ADATA_EXTENSION ".add" + +/* Generic status bits returned from status_dev() */ +#define BMT_TAPE (1<<0) /* is tape device */ +#define BMT_EOF (1<<1) /* just read EOF */ +#define BMT_BOT (1<<2) /* at beginning of tape */ +#define BMT_EOT (1<<3) /* end of tape reached */ +#define BMT_SM (1<<4) /* DDS setmark */ +#define BMT_EOD (1<<5) /* DDS at end of data */ +#define BMT_WR_PROT (1<<6) /* tape write protected */ +#define BMT_ONLINE (1<<7) /* tape online */ +#define BMT_DR_OPEN (1<<8) /* tape door open */ +#define BMT_IM_REP_EN (1<<9) /* immediate report enabled */ + + +/* Bits for device capabilities */ +#define CAP_EOF (1<<0) /* has MTWEOF */ +#define CAP_BSR (1<<1) /* has MTBSR */ +#define CAP_BSF (1<<2) /* has MTBSF */ +#define CAP_FSR (1<<3) /* has MTFSR */ +#define CAP_FSF (1<<4) /* has MTFSF */ +#define CAP_EOM (1<<5) /* has MTEOM */ +#define CAP_REM (1<<6) /* is removable media */ +#define CAP_RACCESS (1<<7) /* is random access device */ +#define CAP_AUTOMOUNT (1<<8) /* Read device at start to see what is there */ +#define CAP_LABEL (1<<9) /* Label blank tapes */ +#define CAP_ANONVOLS (1<<10) /* Mount without knowing volume name */ +#define CAP_ALWAYSOPEN (1<<11) /* always keep device open */ +#define CAP_AUTOCHANGER (1<<12) /* AutoChanger */ +#define CAP_OFFLINEUNMOUNT (1<<13) /* Offline before unmount */ +#define CAP_STREAM (1<<14) /* Stream device */ +#define CAP_BSFATEOM (1<<15) /* Backspace file at EOM */ +#define CAP_FASTFSF (1<<16) /* Fast forward space file */ +#define CAP_TWOEOF (1<<17) /* Write two eofs for EOM */ +#define CAP_CLOSEONPOLL (1<<18) /* Close device on polling */ +#define CAP_POSITIONBLOCKS (1<<19) /* Use block positioning */ +#define CAP_MTIOCGET (1<<20) /* Basic support for fileno and blkno */ +#define CAP_REQMOUNT (1<<21) /* Require mount and unmount */ +#define CAP_CHECKLABELS (1<<22) /* Check for ANSI/IBM labels */ +#define CAP_BLOCKCHECKSUM (1<<23) /* Create/test block checksum */ +#define CAP_LSEEK (1<<24) /* Has lseek function defined i.e. basically File storage */ + +/* Test state */ +#define dev_state(dev, st_state) ((dev)->state & (st_state)) + +/* Device state bits */ +#define ST_XXXXXX (1<<0) /* was ST_OPENED */ +#define ST_XXXXX (1<<1) /* was ST_TAPE */ +#define ST_XXXX (1<<2) /* was ST_FILE */ +#define ST_XXX (1<<3) /* was ST_FIFO */ +#define ST_XX (1<<4) /* was ST_DVD */ +#define ST_X (1<<5) /* was ST_PROG */ + +#define ST_LABEL (1<<6) /* label found */ +#define ST_MALLOC (1<<7) /* dev packet malloc'ed in init_dev() */ +#define ST_APPEND (1<<8) /* ready for Bacula append */ +#define ST_READ (1<<9) /* ready for Bacula read */ +#define ST_EOT (1<<10) /* at end of tape */ +#define ST_WEOT (1<<11) /* Got EOT on write */ +#define ST_EOF (1<<12) /* Read EOF i.e. zero bytes */ +#define ST_NEXTVOL (1<<13) /* Start writing on next volume */ +#define ST_SHORT (1<<14) /* Short block read */ +#define ST_MOUNTED (1<<15) /* the device is mounted to the mount point */ +#define ST_MEDIA (1<<16) /* Media found in mounted device */ +#define ST_OFFLINE (1<<17) /* set offline by operator */ +#define ST_PART_SPOOLED (1<<18) /* spooling part */ +#define ST_FREESPACE_OK (1<<19) /* Have valid freespace */ +#define ST_NOSPACE (1<<20) /* No space on device */ + + +/* Volume Catalog Information structure definition */ +struct VOLUME_CAT_INFO { + /* Media info for the current Volume */ + uint64_t VolCatBytes; /* Total bytes written */ + uint64_t VolCatAmetaBytes; /* Ameta bytes written */ + uint64_t VolCatAdataBytes; /* Adata bytes written */ + uint64_t VolCatPadding; /* Total padding bytes written */ + uint64_t VolCatAmetaPadding; /* Ameta zeros (padding) written */ + uint64_t VolCatAdataPadding; /* Adata zeros (padding) written */ + uint32_t VolCatBlocks; /* Total blocks */ + uint32_t VolCatAmetaBlocks; /* Ameta blocks */ + uint32_t VolCatAdataBlocks; /* Adata blocks */ + uint32_t VolCatWrites; /* Total writes this volume */ + uint32_t VolCatAmetaWrites; /* Ameta writes this volume */ + uint32_t VolCatAdataWrites; /* Adata writes this volume */ + uint32_t VolCatReads; /* Total reads this volume */ + uint32_t VolCatAmetaReads; /* Ameta reads this volume */ + uint32_t VolCatAdataReads; /* Adata reads this volume */ + uint64_t VolCatRBytes; /* Total bytes read */ + uint64_t VolCatAmetaRBytes; /* Ameta bytes read */ + uint64_t VolCatAdataRBytes; /* Adata bytes read */ + uint64_t VolCatHoleBytes; /* Total hole bytes */ + uint64_t VolEndAddr; /* Last Volume address */ + uint64_t VolLastPartBytes; /* Bytes in last part */ + + uint32_t VolCatHoles; /* Number of holes */ + uint32_t VolCatJobs; /* Number of jobs on this Volume */ + uint32_t VolCatFiles; /* Number of files */ + uint32_t VolCatType; /* Volume drive type */ + uint32_t VolCatParts; /* Max number of cache parts */ + uint32_t VolCatCloudParts; /* Max number of cloud parts */ + uint32_t VolCatMounts; /* Number of mounts this volume */ + uint32_t VolCatErrors; /* Number of errors this volume */ + uint32_t VolCatRecycles; /* Number of recycles this volume */ + uint32_t EndFile; /* Last file number */ + uint32_t EndBlock; /* Last block number */ + + int32_t LabelType; /* Bacula/ANSI/IBM */ + int32_t Slot; /* >0=Slot loaded, 0=nothing, -1=unknown */ + uint32_t VolCatMaxJobs; /* Maximum Jobs to write to volume */ + uint32_t VolCatMaxFiles; /* Maximum files to write to volume */ + uint64_t VolCatMaxBytes; /* Max bytes to write to volume */ + uint64_t VolCatCapacityBytes; /* capacity estimate */ + btime_t VolReadTime; /* time spent reading */ + btime_t VolWriteTime; /* time spent writing this Volume */ + int64_t VolMediaId; /* MediaId */ + int64_t VolScratchPoolId; /* ScratchPoolId */ + utime_t VolFirstWritten; /* Time of first write */ + utime_t VolLastWritten; /* Time of last write */ + bool InChanger; /* Set if vol in current magazine */ + bool is_valid; /* set if this data is valid */ + bool VolEnabled; /* set if volume enabled */ + bool VolRecycle; /* set if volume can be recycled */ + char VolCatStatus[20]; /* Volume status */ + char VolCatName[MAX_NAME_LENGTH]; /* Desired volume to mount */ +}; + +class DEVRES; /* Device resource defined in stored_conf.h */ +class DCR; /* forward reference */ +class VOLRES; /* forward reference */ + +/* + * Device structure definition. There is one of these for + * each physical device. Everything here is "global" to + * that device and effects all jobs using the device. + */ +class DEVICE: public SMARTALLOC { +public: + int m_blocked; /* set if we must wait (i.e. change tape) */ + int m_count; /* Mutex use count -- DEBUG only */ + int m_num_reserved; /* counter of device reservations */ + pthread_t m_pid; /* Thread that locked -- DEBUG only */ + int32_t m_slot; /* slot loaded in drive or -1 if none */ + bool m_unload; /* set when Volume must be unloaded */ + bool m_load; /* set when Volume must be loaded */ + bool m_wait; /* must wait for device to free volume */ + bool m_append_reserve; /* reserved for append or read in m_num_reserved set */ + bthread_mutex_t m_mutex; /* access control */ + bthread_mutex_t acquire_mutex; /* mutex for acquire code */ + pthread_mutex_t read_acquire_mutex; /* mutex for acquire read code */ + pthread_mutex_t adata_mutex; /* Lock for writing adata */ + pthread_mutex_t volcat_mutex; /* VolCatInfo mutex */ + pthread_mutex_t dcrs_mutex; /* Attached dcr mutex */ + pthread_mutex_t freespace_mutex; /* mutex to compute the freespace */ + +public: + DEVICE() {}; + virtual ~DEVICE() {}; + DEVICE * volatile swap_dev; /* Swap vol from this device */ + dlist *attached_dcrs; /* attached DCR list */ + bthread_mutex_t spool_mutex; /* mutex for updating spool_size */ + pthread_cond_t wait; /* thread wait variable */ + pthread_cond_t wait_next_vol; /* wait for tape to be mounted */ + pthread_t no_wait_id; /* this thread must not wait */ + int m_fd; /* file descriptor */ + int dev_prev_blocked; /* previous blocked state */ + int num_waiting; /* number of threads waiting */ + int num_writers; /* number of writing threads */ + int capabilities; /* capabilities mask */ + int state; /* state mask */ + int dev_errno; /* Our own errno */ + int mode; /* read/write modes */ + int openmode; /* parameter passed to open_dev (useful to reopen the device) */ + int preserve; /* preserve open state */ + int dev_type; /* device type */ + uint32_t blocked_by; /* JobId that blocked */ + bool enabled; /* Set when enabled */ + bool autoselect; /* Autoselect in autochanger */ + bool read_only; /* Device is read only */ + bool initiated; /* set when init_dev() called */ + bool m_is_worm; /* set for worm tape */ + bool m_shstore; /* Shares storage can be used */ + bool m_shstore_lock; /* set if shared lock set */ + bool m_shstore_user_lock; /* set if user set shared lock */ + bool m_shstore_register; /* set if register key set */ + bool m_shstore_blocked; /* Set if I am blocked */ + bool adata; /* set if adata device */ + int label_type; /* Bacula/ANSI/IBM label types */ + uint32_t drive_index; /* Autochanger drive index (base 0) */ + POOLMEM *dev_name; /* Physical device name */ + POOLMEM *adev_name; /* Aligned device name */ + POOLMEM *prt_name; /* Name used for display purposes */ + char *errmsg; /* nicely edited error message */ + uint32_t block_num; /* current block number base 0 */ + uint32_t LastBlock; /* last DEV_BLOCK number written to Volume */ + uint32_t file; /* current file number base 0 */ + uint64_t file_addr; /* Current file read/write address */ + uint64_t file_size; /* Current file size */ + uint32_t EndBlock; /* last block written */ + uint32_t EndFile; /* last file written */ + uint64_t EndAddr; /* Ending write addr */ + uint32_t min_block_size; /* min block size */ + uint32_t max_block_size; /* max block size */ + uint32_t max_concurrent_jobs; /* maximum simultaneous jobs this drive */ + uint64_t max_volume_size; /* max bytes to put on one volume */ + uint64_t max_file_size; /* max file size to put in one file on volume */ + uint64_t volume_capacity; /* advisory capacity */ + uint64_t max_spool_size; /* maximum spool file size */ + uint64_t spool_size; /* current spool size for this device */ + uint32_t max_rewind_wait; /* max secs to allow for rewind */ + uint32_t max_open_wait; /* max secs to allow for open */ + uint32_t padding_size; /* adata block padding -- bytes */ + uint32_t file_alignment; /* adata file alignment -- bytes */ + uint32_t adata_size; /* adata block size */ + uint64_t adata_addr; /* Next adata write address */ + + uint64_t max_part_size; /* max part size */ + uint64_t part_size; /* current part size */ + uint32_t part; /* current part number (starts at 0) */ + /* state ST_FREESPACE_OK is set if free_space is valid */ + uint64_t free_space; /* current free space on device */ + uint64_t total_space; /* current used space on device */ + uint64_t devno; /* device id */ + uint64_t min_free_space; /* Minimum free disk space */ + int free_space_errno; /* indicates errno getting freespace */ + bool truncating; /* if set, we are currently truncating */ + + utime_t vol_poll_interval; /* interval between polling Vol mount */ + DEVRES *device; /* pointer to Device Resource */ + VOLRES *vol; /* Pointer to Volume reservation item */ + btimer_t *tid; /* timer id */ + + VOLUME_CAT_INFO VolCatInfo; /* Volume Catalog Information */ + VOLUME_LABEL VolHdr; /* Actual volume label (only needs to be correct when writing a new header) */ + char pool_name[MAX_NAME_LENGTH]; /* pool name */ + char pool_type[MAX_NAME_LENGTH]; /* pool type */ + char reserved_pool_name[MAX_NAME_LENGTH]; /* pool name for reserves */ + + char LoadedVolName[MAX_NAME_LENGTH]; /* Last loaded Volume */ + char lock_holder[12]; /* holder of SCSI lock */ + bool poll; /* set to poll Volume */ + /* Device wait times ***FIXME*** look at durations */ + int min_wait; + int max_wait; + int max_num_wait; + int wait_sec; + int rem_wait_sec; + int num_wait; + + btime_t last_timer; /* used by read/write/seek to get stats (usec) */ + btime_t last_tick; /* contains last read/write time (usec) */ + + btime_t DevReadTime; + btime_t DevWriteTime; + uint64_t DevWriteBytes; + uint64_t DevReadBytes; + uint64_t usage; /* Drive usage read+write bytes */ + + /* Methods */ + btime_t get_timer_count(); /* return the last timer interval (ms) */ + + void device_generic_init(JCR *jcr, DEVRES *device); + int has_cap(int cap) const { return capabilities & cap; } + void clear_cap(int cap) { capabilities &= ~cap; } + void set_cap(int cap) { capabilities |= cap; } + void set_worm(bool is_worm) { m_is_worm = is_worm; } + bool do_checksum() const { return (capabilities & CAP_BLOCKCHECKSUM) != 0; } + int is_autochanger() const { return capabilities & CAP_AUTOCHANGER; } + int requires_mount() const { return capabilities & CAP_REQMOUNT; } + int is_removable() const { return capabilities & CAP_REM; } + bool is_tape() const { return (dev_type == B_TAPE_DEV || + dev_type == B_VTAPE_DEV); } + bool is_file() const { return (dev_type == B_FILE_DEV) || is_aligned() || is_cloud(); } + bool is_cloud() const { return dev_type == B_CLOUD_DEV; } + bool is_adata() const { return dev_type == B_ADATA_DEV; } + bool is_aligned() const { return dev_type == B_ALIGNED_DEV; } + bool is_null() const { return dev_type == B_NULL_DEV; } + bool is_fifo() const { return dev_type == B_FIFO_DEV; } + bool is_vtl() const { return dev_type == B_VTL_DEV; } + bool is_vtape() const { return dev_type == B_VTAPE_DEV; } + bool is_worm() const { return m_is_worm; } + bool is_open() const { return m_fd >= 0; } + int is_offline() const { return state & ST_OFFLINE; } + int is_labeled() const { return state & ST_LABEL; } + int is_mounted() const { return state & ST_MOUNTED; } + int is_unmountable() const { return ((is_file() && is_removable())); } + int num_reserved() const { return m_num_reserved; }; + int is_part_spooled() const { return state & ST_PART_SPOOLED; } + int have_media() const { return state & ST_MEDIA; } + int is_short_block() const { return state & ST_SHORT; } + int is_busy() const { return (state & ST_READ) || num_writers || num_reserved(); } + bool is_reserved_for_read() const { return num_reserved() && !m_append_reserve; } + bool is_ateot() const { return (state & ST_EOF) && (state & ST_EOT) && (state & ST_WEOT); } + int at_eof() const { return state & ST_EOF; } + int at_eot() const { return state & ST_EOT; } + int at_weot() const { return state & ST_WEOT; } + int can_append() const { return state & ST_APPEND; } + int is_freespace_ok() const { return state & ST_FREESPACE_OK; } + int is_nospace() const { return (is_freespace_ok() && (state & ST_NOSPACE)); }; + /* + * can_write() is meant for checking at the end of a job to see + * if we still have a tape (perhaps not if at end of tape + * and the job is canceled). + */ + int can_write() const { return is_open() && can_append() && + is_labeled() && !at_weot(); } + bool can_read() const { return (state & ST_READ) != 0; } + bool can_obtain_block() const { return + (m_blocked == BST_NOT_BLOCKED || + m_blocked == BST_UNMOUNTED || + m_blocked == BST_WAITING_FOR_SYSOP || + m_blocked == BST_UNMOUNTED_WAITING_FOR_SYSOP); }; + bool waiting_for_mount() const { return + (m_blocked == BST_UNMOUNTED || + m_blocked == BST_WAITING_FOR_SYSOP || + m_blocked == BST_UNMOUNTED_WAITING_FOR_SYSOP); }; + bool must_unload() const { return m_unload; }; + bool must_load() const { return m_load; }; + const char *strerror() const; + const char *archive_name() const; + const char *name() const; + const char *print_name() const; /* Name for display purposes */ + void set_ateot(); /* in dev.c */ + void set_eot() { state |= ST_EOT; }; + void set_eof() { state |= ST_EOF; }; + void set_labeled() { state |= ST_LABEL; }; + void set_offline() { state |= ST_OFFLINE; }; + void set_mounted() { state |= ST_MOUNTED; }; + void set_media() { state |= ST_MEDIA; }; + void set_short_block() { state |= ST_SHORT; }; + void set_freespace_ok() { state |= ST_FREESPACE_OK; } + void set_part_spooled(int val) { if (val) state |= ST_PART_SPOOLED; \ + else state &= ~ST_PART_SPOOLED; + }; + void get_freespace(uint64_t *freeval, uint64_t *totalval); /* in dev.c */ + void set_freespace(uint64_t freeval, uint64_t totalval, int errnoval, bool valid); /* in dev.c */ + bool is_fs_nearly_full(uint64_t threshold); + bool is_volume_to_unload() const { \ + return m_unload && strcmp(VolHdr.VolumeName, LoadedVolName) == 0; }; + void set_load() { m_load = true; }; + void set_wait() { m_wait = true; }; + void clear_wait() { m_wait = false; }; + bool must_wait() const { return m_wait; }; + void inc_reserved() { m_num_reserved++; } + void set_mounted(int val) { if (val) state |= ST_MOUNTED; \ + else state &= ~ST_MOUNTED; }; + void dec_reserved() { m_num_reserved--; ASSERT(m_num_reserved>=0); }; + void set_read_reserve() { m_append_reserve = false; }; + void set_append_reserve() { m_append_reserve = true; }; + void clear_labeled() { state &= ~ST_LABEL; }; + void clear_offline() { state &= ~ST_OFFLINE; }; + void clear_eot() { state &= ~ST_EOT; }; + void clear_eof() { state &= ~ST_EOF; }; + void clear_opened() { m_fd = -1; }; + void clear_mounted() { state &= ~ST_MOUNTED; }; + void clear_media() { state &= ~ST_MEDIA; }; + void clear_short_block() { state &= ~ST_SHORT; }; + void clear_freespace_ok() { state &= ~ST_FREESPACE_OK; }; + void clear_unload() { m_unload = false; }; + void clear_load() { m_load = false; }; + char *bstrerror(void) { return errmsg; }; + char *print_errmsg() { return errmsg; }; + int32_t get_slot() const { return m_slot; }; + void setVolCatInfo(bool valid) { VolCatInfo.is_valid = valid; }; + bool haveVolCatInfo() const { return VolCatInfo.is_valid; }; + + void clearVolCatBytes() { + VolCatInfo.VolCatBytes = 0; + VolCatInfo.VolCatAmetaBytes = 0; + VolCatInfo.VolCatAdataBytes = 0; + }; + + char *getVolCatName() { return VolCatInfo.VolCatName; }; + + void set_unload(); /* in dev.c */ + void clear_volhdr(); /* in dev.c */ + ssize_t read(void *buf, size_t len); /* in dev.c */ + ssize_t write(const void *buf, size_t len); /* in dev.c */ + void edit_mount_codes(POOL_MEM &omsg, const char *imsg); /* in dev.c */ + bool offline_or_rewind(DCR *dcr); /* in dev.c */ + bool fsr(int num); /* in dev.c */ + bool bsr(int num); /* in dev.c */ + int32_t get_os_tape_file(); /* in dev.c */ + bool scan_dir_for_volume(DCR *dcr); /* in scan.c */ + void clrerror(int func); /* in dev.c */ + void set_slot(int32_t slot); /* in dev.c */ + void clear_slot(); /* in dev.c */ + bool update_freespace(); /* in dev.c */ + bool get_os_device_freespace(); /* in dev.c */ + void notify_newvol_in_attached_dcrs(const char *VolumeName); /* in dev.c */ + void notify_newfile_in_attached_dcrs();/* in dev.c */ + void attach_dcr_to_dev(DCR *dcr); /* in acquire.c */ + void detach_dcr_from_dev(DCR *dcr); /* in acquire.c */ + + + uint32_t get_file(); /* in dev.c */ + uint32_t get_block_num(); /* in dev.c */ + + int fd() const { return m_fd; }; + bool mount_file(int mount, int dottimout); + void dump_volume_label(); + DEV_BLOCK *new_block(DCR *dcr, int size=0); + + + /* Virtual functions that can be overridden */ + virtual void setVolCatName(const char *name); + virtual void setVolCatStatus(const char *status); + virtual void free_dcr_blocks(DCR *dcr); /* in block_util.c */ + virtual void new_dcr_blocks(DCR *dcr); /* in block_util.c */ + virtual boffset_t get_adata_size(DCR *dcr) { return (boffset_t)0; }; + virtual void updateVolCatBytes(uint64_t); /* in dev.c */ + virtual void updateVolCatBlocks(uint32_t); /* in dev.c */ + virtual void updateVolCatWrites(uint32_t); /* in dev.c */ + virtual void updateVolCatReads(uint32_t); /* in dev.c */ + virtual void updateVolCatReadBytes(uint64_t); /* in dev.c */ + virtual void updateVolCatPadding(uint64_t); /* in dev.c */ + virtual void updateVolCatHoleBytes(uint64_t); /* in dev.c */ + virtual bool setVolCatAdataBytes(uint64_t) { return false; }; + virtual void set_volcatinfo_from_dcr(DCR *dcr); + virtual bool allow_maxbytes_concurrency(DCR *dcr) { return true; }; + virtual bool flush_before_eos(DCR *dcr) { return true; }; + virtual void set_nospace(); /* in dev.c */ + virtual void set_append(); /* in dev.c */ + virtual void set_read(); /* in dev.c */ + virtual void clear_nospace(); /* in dev.c */ + virtual void clear_append(); /* in dev.c */ + virtual void clear_read(); /* in dev.c */ + virtual void device_specific_init(JCR *jcr, DEVRES *device); + virtual void device_specific_open(DCR *dcr) { return; }; + virtual int d_ioctl(int fd, ioctl_req_t request, char *mt_com=NULL); + virtual int d_open(const char *pathname, int flags); + virtual int d_close(int fd); + virtual ssize_t d_read(int fd, void *buffer, size_t count); + virtual ssize_t d_write(int fd, const void *buffer, size_t count); + virtual boffset_t lseek(DCR *dcr, boffset_t offset, int whence); + virtual bool update_pos(DCR *dcr); + virtual bool rewind(DCR *dcr); + virtual bool truncate(DCR *dcr); + virtual int truncate_cache(DCR *dcr, const char *VolName, int64_t *size) { return 0; }; + virtual bool get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err) { pm_strcpy(err, "Not implemented"); return false;}; + virtual bool get_cloud_volume_parts_list(DCR *dcr, const char *VolumeName, ilist *parts, POOLMEM *&err) { pm_strcpy(err, "Not implemented"); return false; }; + virtual uint32_t get_cloud_upload_transfer_status(POOL_MEM &msg, bool verbose) { pm_strcpy(msg, "Not implemented"); return 0; }; + virtual uint32_t get_cloud_download_transfer_status(POOL_MEM &msg, bool verbose) { pm_strcpy(msg, "Not implemented"); return 0; }; + virtual bool upload_cache(DCR *dcr, const char *VolName, POOLMEM *&err) { return true; }; + virtual bool open_device(DCR *dcr, int omode) = 0; + virtual bool open_next_part(DCR *dcr); + virtual bool close(DCR *dcr); /* in dev.c */ + virtual void term(DCR *dcr); /* in dev.c */ + virtual bool close_part(DCR *dcr); + virtual bool reposition(DCR *dcr, uint64_t raddr); + virtual bool mount(int timeout); + virtual bool unmount(int timeout); + virtual int read_dev_volume_label(DCR *dcr); /* in label.c */ + virtual bool write_volume_label_to_block(DCR *dcr); /* in label.c */ + virtual bool write_volume_label(DCR *dcr, + const char *VolName, const char *PoolName, + bool relabel, bool no_prelabel); /* in label.c */ + virtual bool write_volume_label_to_dev(DCR *dcr, + const char *VolName, const char *PoolName, + bool relabel, bool no_prelabel); /* in label.c */ + virtual bool rewrite_volume_label(DCR *dcr, bool recycle); /* in label.c */ + virtual bool is_eod_valid(DCR *dcr); /* in dev.c */ + virtual bool eod(DCR *dcr); /* in dev.c */ + virtual bool weof(DCR *dcr, int num); /* in dev.c */ + virtual bool end_of_volume(DCR *dcr) { return true; }; + virtual bool start_of_job(DCR *dcr) {return true; }; + virtual bool end_of_job(DCR *dcr) {return true; }; + virtual bool is_indexed() { return true; }; + virtual void set_ateof(); /* in dev.c */ + virtual const char *print_type() = 0; /* in dev.c */ + virtual DEVICE *get_dev(DCR *dcr); /* in dev.c */ + virtual uint32_t get_hi_addr(); /* in dev.c */ + virtual uint32_t get_low_addr(); /* in dev.c */ + virtual uint32_t get_hi_addr(boffset_t addr); /* in dev.c */ + virtual uint32_t get_low_addr(boffset_t addr); /* in dev.c */ + virtual uint64_t get_full_addr(); /* in dev.c */ + virtual uint64_t get_full_addr(boffset_t addr); /* in dev.c */ + virtual uint64_t get_full_addr(uint32_t hi, uint32_t low); /* in dev.c */ + virtual char *print_addr(char *buf, int32_t maxlen); + virtual char *print_addr(char *buf, int32_t maxlen, boffset_t addr); + virtual bool do_size_checks(DCR *dcr, DEV_BLOCK *block); /* in dev.c */ + + virtual bool get_tape_worm(DCR *dcr); + virtual bool get_tape_alerts(DCR *dcr); + virtual void show_tape_alerts(DCR *dcr, alert_list_type type, + alert_list_which which, alert_cb alert_callback); + virtual int delete_alerts(); + + /* These could probably be made tape_dev only */ + virtual bool bsf(int count) { return true; } + virtual bool fsf(int num) { return true; } + virtual bool offline(DCR *dcr) { return true; } + virtual void lock_door() { return; } + virtual void unlock_door() { return; } + + virtual bool write_adata_label(DCR *dcr, DEV_RECORD *rec) { return false; }; + virtual void write_adata(DCR *dcr, DEV_RECORD *rec) { return; }; + virtual void write_cont_adata(DCR *dcr, DEV_RECORD *rec) { return; }; + virtual int write_adata_rechdr(DCR *dcr, DEV_RECORD *rec) { return -1; }; + virtual bool have_adata_header(DCR *dcr, DEV_RECORD *rec, + int32_t FileIndex, int32_t Stream, uint32_t VolSessionId) + { return false; }; + virtual bool read_adata_record_header(DCR *dcr, DEV_BLOCK *block, + DEV_RECORD *rec) { return false; }; + virtual void read_adata_block_header(DCR *dcr) { return; }; + virtual int read_adata(DCR *dcr, DEV_RECORD *rec) { return -1; }; + virtual void select_data_stream(DCR *dcr, DEV_RECORD *rec) { return; }; + virtual bool flush_block(DCR *dcr); /* in block_util.c */ + virtual bool do_pre_write_checks(DCR *dcr, DEV_RECORD *rec) { return true; }; + + + /* + * Locking and blocking calls + */ +#ifdef DEV_DEBUG_LOCK + virtual void dbg_Lock(const char *, int); /* in lock.c */ + virtual void dbg_Unlock(const char *, int); /* in lock.c */ + virtual void dbg_rLock(const char *, int, bool locked=false); /* in lock.c */ + virtual void dbg_rUnlock(const char *, int); /* in lock.c */ +#else + virtual void Lock(); /* in lock.c */ + virtual void Unlock(); /* in lock.c */ + virtual void rLock(bool locked=false); /* in lock.c */ + virtual void rUnlock(); /* in lock.c */ +#endif + +#ifdef SD_DEBUG_LOCK + virtual void dbg_Lock_acquire(const char *, int); /* in lock.c */ + virtual void dbg_Unlock_acquire(const char *, int); /* in lock.c */ + virtual void dbg_Lock_read_acquire(const char *, int); /* in lock.c */ + virtual void dbg_Unlock_read_acquire(const char *, int); /* in lock.c */ + virtual void dbg_Lock_VolCatInfo(const char *, int); /* in lock.c */ + virtual void dbg_Unlock_VolCatInfo(const char *, int); /* in lock.c */ +#else + virtual void Lock_acquire(); /* in lock.c */ + virtual void Unlock_acquire(); /* in lock.c */ + virtual void Lock_read_acquire(); /* in lock.c */ + virtual void Unlock_read_acquire(); /* in lock.c */ + virtual void Lock_VolCatInfo(); /* in lock.c */ + virtual void Unlock_VolCatInfo(); /* in lock.c */ +#endif + virtual void dblock(int why); /* in lock.c */ + virtual void dunblock(bool locked=false); /* in lock.c */ + + /* use obtain_device_block() macro */ + bool _obtain_device_block(const char *file, int line, + bsteal_lock_t *hold, int retry, int state); + + + int init_mutex(); /* in lock.c */ + int init_acquire_mutex(); /* in lock.c */ + int init_read_acquire_mutex(); /* in lock.c */ + int init_volcat_mutex(); /* in lock.c */ + int init_dcrs_mutex(); /* in lock.c */ + int init_freespace_mutex(); /* in lock.c */ + void set_mutex_priorities(); /* in lock.c */ + int next_vol_timedwait(const struct timespec *timeout); /* in lock.c */ + void Lock_dcrs() { P(dcrs_mutex); }; + void Unlock_dcrs() { V(dcrs_mutex); }; + bool is_device_unmounted(); /* in lock.c */ + void set_blocked(int block) { m_blocked = block; }; + int blocked() const { return m_blocked; }; + bool is_blocked() const { return m_blocked != BST_NOT_BLOCKED; }; + const char *print_blocked() const; /* in dev.c */ + void open_tape_device(DCR *dcr, int omode); /* in dev.c */ + void open_file_device(DCR *dcr, int omode); /* in dev.c */ + +private: + bool mount_tape(int mount, int dotimeout); /* in dev.c */ +protected: + void set_mode(int omode); /* in dev.c */ +}; +inline const char *DEVICE::strerror() const { return errmsg; } +inline const char *DEVICE::archive_name() const { return dev_name; } +inline const char *DEVICE::print_name() const { return NPRT(prt_name); } + + +#define CHECK_BLOCK_NUMBERS true +#define NO_BLOCK_NUMBER_CHECK false + +/* + * Device Context (or Control) Record. + * There is one of these records for each Job that is using + * the device. Items in this record are "local" to the Job and + * do not affect other Jobs. Note, a job can have multiple + * DCRs open, each pointing to a different device. + * Normally, there is only one JCR thread per DCR. However, the + * big and important exception to this is when a Job is being + * canceled. At that time, there may be two threads using the + * same DCR. Consequently, when creating/attaching/detaching + * and freeing the DCR we must lock it (m_mutex). + */ +class DCR { +private: + bool m_dev_locked; /* set if dev already locked */ + int m_dev_lock; /* non-zero if rLock already called */ + bool m_reserved; /* set if reserved device */ + bool m_found_in_use; /* set if a volume found in use */ + bool m_writing; /* set when DCR used for writing */ + +public: + dlink dev_link; /* link to attach to dev */ + JCR *jcr; /* pointer to JCR */ + DEVICE * volatile dev; /* pointer to device */ + DEVICE *adata_dev; /* pointer to adata dev */ + DEVICE *ameta_dev; /* pointer to ameta_dev */ + DEVRES *device; /* pointer to device resource */ + DEV_BLOCK *block; /* pointer to block */ + DEV_BLOCK *adata_block; /* aligned data block */ + DEV_BLOCK *ameta_block; /* aligned meta data block */ + DEV_RECORD *rec; /* pointer to record */ + + VOL_LIST *CurrentVol; /* From JCR::VolList, freed at the end, passed to records */ + + alist *uploads; /* Current upload transfers to the cloud */ + alist *downloads; /* Current donwload transfers from the cloud */ + + pthread_t tid; /* Thread running this dcr */ + int spool_fd; /* fd if spooling */ + int crc32_type; /* Handle bad CRC32 on Solaris for volumes written with 8.4 */ + bool adata_label; /* writing adata label block */ + bool adata; /* writing aligned data block */ + bool spool_data; /* set to spool data */ + bool spooling; /* set when actually spooling */ + bool despooling; /* set when despooling */ + bool despool_wait; /* waiting for despooling */ + bool NewVol; /* set if new Volume mounted */ + bool WroteVol; /* set if Volume written */ + bool NewFile; /* set when EOF written */ + bool reserved_volume; /* set if we reserved a volume */ + bool any_volume; /* Any OK for dir_find_next... */ + bool attached_to_dev; /* set when attached to dev */ + bool keep_dcr; /* do not free dcr in release_dcr */ + bool no_mount_request; /* do not submit any mount request */ + bool reading_label; /* Reading volume label */ + bool discard_invalid_records; /* we should not try to assemble invalid records */ + bool force_update_volume_info; /* update the volume information, no matter the job type */ + + uint32_t VolFirstIndex; /* First file index this Volume */ + uint32_t VolLastIndex; /* Last file index this Volume */ + int32_t FileIndex; /* Current File Index */ + uint64_t StartAddr; /* Starting write addr */ + uint64_t EndAddr; /* Ending write addr */ + int64_t VolMediaId; /* MediaId */ + int64_t job_spool_size; /* Current job spool size */ + int64_t max_job_spool_size; /* Max job spool size */ + char VolumeName[MAX_NAME_LENGTH]; /* Volume name */ + char pool_name[MAX_NAME_LENGTH]; /* pool name */ + char pool_type[MAX_NAME_LENGTH]; /* pool type */ + char media_type[MAX_NAME_LENGTH]; /* media type */ + char dev_name[MAX_NAME_LENGTH]; /* dev name */ + int Copy; /* identical copy number */ + int Stripe; /* RAIT stripe */ + VOLUME_CAT_INFO VolCatInfo; /* Catalog info for desired volume */ + + /* Methods */ + void set_no_mount_request() { no_mount_request = true; }; /* Just fail in case of mount request */ + void set_dev(DEVICE *ndev) { dev = ndev; ameta_dev = ndev; }; + void set_adata() { if (adata_dev) {dev = adata_dev; block = adata_block;} }; + void set_ameta() { dev = ameta_dev; block = ameta_block; }; + void set_dev_locked() { m_dev_locked = true; }; + void set_writing() { m_writing = true; }; + void clear_writing() { m_writing = false; }; + bool is_reading() const { return !m_writing; }; + bool is_writing() const { return m_writing; }; + void clear_dev_locked() { m_dev_locked = false; }; + void inc_dev_lock() { m_dev_lock++; }; + void dec_dev_lock() { m_dev_lock--; }; + bool found_in_use() const { return m_found_in_use; }; + void set_found_in_use() { m_found_in_use = true; }; + void clear_found_in_use() { m_found_in_use = false; }; + bool is_reserved() const { return m_reserved; }; + bool is_dev_locked() const { return m_dev_locked; } + void setVolCatInfo(bool valid) { VolCatInfo.is_valid = valid; }; + bool haveVolCatInfo() const { return VolCatInfo.is_valid; }; + void setVolCatName(const char *name) { + bstrncpy(VolCatInfo.VolCatName, name, sizeof(VolCatInfo.VolCatName)); + setVolCatInfo(false); + }; + char *getVolCatName() { return VolCatInfo.VolCatName; }; + bool write_final_block_to_device() { return write_block_to_device(true); }; + + /* Methods in autochanger.c */ + bool is_virtual_autochanger(); + + /* Methods in lock.c */ + void dblock(int why) { dev->dblock(why); } + + /* Methods in record.c */ + bool write_record(DEV_RECORD *rec); + + /* Methods in read_record.c */ + bool read_records( + bool record_cb(DCR *dcr, DEV_RECORD *rec), + bool mount_cb(DCR *dcr)); + bool try_repositioning(DEV_RECORD *rec); + BSR *position_to_first_file(); + + /* Methods in reserve.c */ + void clear_reserved(); + void set_reserved_for_read(); + void set_reserved_for_append(); + void unreserve_device(bool locked); + + /* Methods in vol_mgr.c */ + bool can_i_use_volume(); + bool can_i_write_volume(); + + /* Methods in mount.c */ + bool mount_next_write_volume(); + bool mount_next_read_volume(); + void mark_volume_in_error(); + void mark_volume_read_only(); + void mark_volume_not_inchanger(); + int try_autolabel(bool opened); + bool find_a_volume(); + bool is_suitable_volume_mounted(); + int check_volume_label(bool &ask, bool &autochanger); + void release_volume(); + void do_swapping(bool is_writing); + bool do_unload(); + bool do_load(bool is_writing); + bool is_tape_position_ok(); + + /* Methods in block.c */ + bool write_block_to_device(bool final=false); + bool write_block_to_dev(); + bool read_block_from_device(bool check_block_numbers); + bool read_block_from_dev(bool check_block_numbers); + + +}; + +/* Get some definition of function to position + * to the end of the medium in MTEOM. System + * dependent. Arrgggg! + */ +#ifndef MTEOM +#ifdef MTSEOD +#define MTEOM MTSEOD +#endif +#ifdef MTEOD +#undef MTEOM +#define MTEOM MTEOD +#endif +#endif + +#endif /* __DEV_H */ diff --git a/src/stored/device.c b/src/stored/device.c new file mode 100644 index 00000000..a748455f --- /dev/null +++ b/src/stored/device.c @@ -0,0 +1,328 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Higher Level Device routines. + * Knows about Bacula tape labels and such + * + * NOTE! In general, subroutines that have the word + * "device" in the name do locking. Subroutines + * that have the word "dev" in the name do not + * do locking. Thus if xxx_device() calls + * yyy_dev(), all is OK, but if xxx_device() + * calls yyy_device(), everything will hang. + * Obviously, no zzz_dev() is allowed to call + * a www_device() or everything falls apart. + * + * Concerning the routines dev->rLock()() and block_device() + * see the end of this module for details. In general, + * blocking a device leaves it in a state where all threads + * other than the current thread block when they attempt to + * lock the device. They remain suspended (blocked) until the device + * is unblocked. So, a device is blocked during an operation + * that takes a long time (initialization, mounting a new + * volume, ...) locking a device is done for an operation + * that takes a short time such as writing data to the + * device. + * + * + * Kern Sibbald, MM, MMI + * + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +/* Forward referenced functions */ + +/* + * This is the dreaded moment. We either have an end of + * medium condition or worse, and error condition. + * Attempt to "recover" by obtaining a new Volume. + * + * Here are a few things to know: + * dcr->VolCatInfo contains the info on the "current" tape for this job. + * dev->VolCatInfo contains the info on the tape in the drive. + * The tape in the drive could have changed several times since + * the last time the job used it (jcr->VolCatInfo). + * dcr->VolumeName is the name of the current/desired tape in the drive. + * + * We enter with device locked, and + * exit with device locked. + * + * Note, we are called only from one place in block.c for the daemons. + * The btape utility calls it from btape.c. + * + * Returns: true on success + * false on failure + */ +bool fixup_device_block_write_error(DCR *dcr, int retries) +{ + char PrevVolName[MAX_NAME_LENGTH]; + DEV_BLOCK *block = dcr->block; + DEV_BLOCK *ameta_block = dcr->ameta_block; + DEV_BLOCK *adata_block = dcr->adata_block; + char b1[30], b2[30]; + time_t wait_time; + char dt[MAX_TIME_LENGTH]; + JCR *jcr = dcr->jcr; + DEVICE *dev; + int blocked; /* save any previous blocked status */ + bool ok = false; + bool save_adata = dcr->dev->adata; + + Enter(100); + if (save_adata) { + dcr->set_ameta(); /* switch to working with ameta */ + } + dev = dcr->dev; + blocked = dev->blocked(); + + wait_time = time(NULL); + + /* + * If we are blocked at entry, unblock it, and set our own block status + */ + if (blocked != BST_NOT_BLOCKED) { + unblock_device(dev); + } + block_device(dev, BST_DOING_ACQUIRE); + + /* Continue unlocked, but leave BLOCKED */ + dev->Unlock(); + + bstrncpy(PrevVolName, dev->getVolCatName(), sizeof(PrevVolName)); + bstrncpy(dev->VolHdr.PrevVolumeName, PrevVolName, sizeof(dev->VolHdr.PrevVolumeName)); + + /* create temporary block, that will be released at the end, current blocks + * have been saved in local DEV_BLOCK above and will be restored before to + * leave the function + */ + dev->new_dcr_blocks(dcr); + + /* Inform User about end of medium */ + Jmsg(jcr, M_INFO, 0, _("End of medium on Volume \"%s\" Bytes=%s Blocks=%s at %s.\n"), + PrevVolName, edit_uint64_with_commas(dev->VolCatInfo.VolCatBytes, b1), + edit_uint64_with_commas(dev->VolCatInfo.VolCatBlocks, b2), + bstrftime(dt, sizeof(dt), time(NULL))); + + Dmsg1(150, "set_unload dev=%s\n", dev->print_name()); + dev->set_unload(); + + /* Clear DCR Start/End Block/File positions */ + dcr->VolFirstIndex = dcr->VolLastIndex = 0; + dcr->StartAddr = dcr->EndAddr = 0; + dcr->VolMediaId = 0; + dcr->WroteVol = false; + + if (!dcr->mount_next_write_volume()) { + dev->free_dcr_blocks(dcr); + dcr->block = block; + dcr->ameta_block = ameta_block; + dcr->adata_block = adata_block; + dev->Lock(); + goto bail_out; + } + Dmsg2(150, "must_unload=%d dev=%s\n", dev->must_unload(), dev->print_name()); + + dev->notify_newvol_in_attached_dcrs(dcr->VolumeName); + dev->Lock(); /* lock again */ + + dev->VolCatInfo.VolCatJobs++; /* increment number of jobs on vol */ + if (!dir_update_volume_info(dcr, false, false)) { /* send Volume info to Director */ + goto bail_out; + } + + Jmsg(jcr, M_INFO, 0, _("New volume \"%s\" mounted on device %s at %s.\n"), + dcr->VolumeName, dev->print_name(), bstrftime(dt, sizeof(dt), time(NULL))); + + /* + * If this is a new tape, the label_blk will contain the + * label, so write it now. If this is a previously + * used tape, mount_next_write_volume() will return an + * empty label_blk, and nothing will be written. + */ + Dmsg0(190, "write label block to dev\n"); + if (!dcr->write_block_to_dev()) { + berrno be; + Pmsg1(0, _("write_block_to_device Volume label failed. ERR=%s"), + be.bstrerror(dev->dev_errno)); + dev->free_dcr_blocks(dcr); + dcr->block = block; + dcr->ameta_block = ameta_block; + dcr->adata_block = adata_block; + goto bail_out; + } + dev->free_dcr_blocks(dcr); + dcr->block = block; + dcr->ameta_block = ameta_block; + dcr->adata_block = adata_block; + + /* Clear NewVol now because dir_get_volume_info() already done */ + jcr->dcr->NewVol = false; + set_new_volume_parameters(dcr); + + jcr->run_time += time(NULL) - wait_time; /* correct run time for mount wait */ + + /* Write overflow block to device */ + Dmsg0(190, "Write overflow block to dev\n"); + if (save_adata) { + dcr->set_adata(); /* try to write block we entered with */ + } + if (!dcr->write_block_to_dev()) { + berrno be; + Dmsg1(0, _("write_block_to_device overflow block failed. ERR=%s"), + be.bstrerror(dev->dev_errno)); + /* Note: recursive call */ + if (retries-- <= 0 || !fixup_device_block_write_error(dcr, retries)) { + Jmsg2(jcr, M_FATAL, 0, + _("Catastrophic error. Cannot write overflow block to device %s. ERR=%s"), + dev->print_name(), be.bstrerror(dev->dev_errno)); + goto bail_out; + } + } + ok = true; + +bail_out: + if (save_adata) { + dcr->set_ameta(); /* Do unblock ... on ameta */ + } + /* + * At this point, the device is locked and blocked. + * Unblock the device, restore any entry blocked condition, then + * return leaving the device locked (as it was on entry). + */ + unblock_device(dev); + if (blocked != BST_NOT_BLOCKED) { + block_device(dev, blocked); + } + if (save_adata) { + dcr->set_adata(); /* switch back to what we entered with */ + } + return ok; /* device locked */ +} + +void set_start_vol_position(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + /* Set new start position */ + if (dev->is_tape()) { + dcr->StartAddr = dcr->EndAddr = dev->get_full_addr(); + } else { + if (dev->adata) { + dev = dcr->ameta_dev; + } + /* + * Note: we only update the DCR values for ameta blocks + * because all the indexing (JobMedia) is done with + * ameta blocks/records, which may point to adata. + */ + dcr->StartAddr = dcr->EndAddr = dev->get_full_addr(); + } +} + +/* + * We have a new Volume mounted, so reset the Volume parameters + * concerning this job. The global changes were made earlier + * in the dev structure. + */ +void set_new_volume_parameters(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + Dmsg1(40, "set_new_volume_parameters dev=%s\n", dcr->dev->print_name()); + if (dcr->NewVol) { + while (dcr->VolumeName[0] == 0) { + int retries = 5; + wait_for_device(dcr, retries); + } + if (dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_WRITE)) { + dcr->dev->clear_wait(); + } else { + Dmsg1(40, "getvolinfo failed. No new Vol: %s", jcr->errmsg); + } + } + set_new_file_parameters(dcr); + jcr->NumWriteVolumes++; + dcr->NewVol = false; +} + +/* + * We are now in a new Volume file, so reset the Volume parameters + * concerning this job. The global changes were made earlier + * in the dev structure. + */ +void set_new_file_parameters(DCR *dcr) +{ + set_start_vol_position(dcr); + + /* Reset indicies */ + Dmsg3(1000, "Reset indices Vol=%s were: FI=%d LI=%d\n", dcr->VolumeName, + dcr->VolFirstIndex, dcr->VolLastIndex); + dcr->VolFirstIndex = 0; + dcr->VolLastIndex = 0; + dcr->NewFile = false; + dcr->WroteVol = false; +} + + + +/* + * First Open of the device. Expect dev to already be initialized. + * + * This routine is used only when the Storage daemon starts + * and always_open is set, and in the stand-alone utility + * routines such as bextract. + * + * Note, opening of a normal file is deferred to later so + * that we can get the filename; the device_name for + * a file is the directory only. + * + * Returns: false on failure + * true on success + */ +bool first_open_device(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + bool ok = true; + + Dmsg0(120, "start open_output_device()\n"); + if (!dev) { + return false; + } + + dev->rLock(false); + + /* Defer opening files */ + if (!dev->is_tape()) { + Dmsg0(129, "Device is file, deferring open.\n"); + goto bail_out; + } + + Dmsg0(129, "Opening device.\n"); + if (!dev->open_device(dcr, OPEN_READ_ONLY)) { + Jmsg1(NULL, M_FATAL, 0, _("dev open failed: %s\n"), dev->errmsg); + ok = false; + goto bail_out; + } + Dmsg1(129, "open dev %s OK\n", dev->print_name()); + +bail_out: + dev->rUnlock(); + return ok; +} diff --git a/src/stored/dircmd.c b/src/stored/dircmd.c new file mode 100644 index 00000000..738fa7f5 --- /dev/null +++ b/src/stored/dircmd.c @@ -0,0 +1,1986 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * This file handles accepting Director Commands + * + * Most Director commands are handled here, with the + * exception of the Job command command and subsequent + * subcommands that are handled + * in job.c. + * + * N.B. in this file, in general we must use P(dev->mutex) rather + * than dev->rLock() so that we can examine the blocked + * state rather than blocking ourselves because a Job + * thread has the device blocked. In some "safe" cases, + * we can do things to a blocked device. CAREFUL!!!! + * + * File daemon commands are handled in fdcmd.c + * + * Written by Kern Sibbald, May MMI + * + */ + +#include "bacula.h" +#include "stored.h" + +/* Exported variables */ + +/* Imported variables */ +extern BSOCK *filed_chan; +extern struct s_last_job last_job; +extern bool init_done; + +/* Static variables */ +static char derrmsg[] = "3900 Invalid command:"; +static char OKsetdebug[] = "3000 OK setdebug=%ld trace=%ld options=%s tags=%s\n"; +static char invalid_cmd[] = "3997 Invalid command for a Director with Monitor directive enabled.\n"; +static char OK_bootstrap[] = "3000 OK bootstrap\n"; +static char ERROR_bootstrap[] = "3904 Error bootstrap\n"; +static char OKclient[] = "3000 OK client command\n"; + +/* Imported functions */ +extern void terminate_child(); +extern bool job_cmd(JCR *jcr); +extern bool use_cmd(JCR *jcr); +extern bool run_cmd(JCR *jcr); +extern bool status_cmd(JCR *sjcr); +extern bool qstatus_cmd(JCR *jcr); +//extern bool query_cmd(JCR *jcr); + +/* Forward referenced functions */ +static bool client_cmd(JCR *jcr); +static bool storage_cmd(JCR *jcr); +static bool label_cmd(JCR *jcr); +static bool die_cmd(JCR *jcr); +static bool relabel_cmd(JCR *jcr); +static bool truncate_cache_cmd(JCR *jcr); +static bool upload_cmd(JCR *jcr); +static bool readlabel_cmd(JCR *jcr); +static bool release_cmd(JCR *jcr); +static bool setdebug_cmd(JCR *jcr); +static bool cancel_cmd(JCR *cjcr); +static bool mount_cmd(JCR *jcr); +static bool unmount_cmd(JCR *jcr); +static bool enable_cmd(JCR *jcr); +static bool disable_cmd(JCR *jcr); +//static bool action_on_purge_cmd(JCR *jcr); +static bool bootstrap_cmd(JCR *jcr); +static bool cloud_list_cmd(JCR *jcr); +static bool cloud_prunecache_cmd(JCR *jcr); +static bool changer_cmd(JCR *sjcr); +static bool do_label(JCR *jcr, int relabel); +static DCR *find_device(JCR *jcr, POOL_MEM &dev_name, + POOLMEM *media_type, int drive); +static DCR *find_any_device(JCR *jcr, POOL_MEM &dev_name, + POOLMEM *media_type, int drive); +static void read_volume_label(JCR *jcr, DCR *dcr, DEVICE *dev, int Slot); +static void label_volume_if_ok(DCR *dcr, char *oldname, + char *newname, char *poolname, + int Slot, int relabel); +static bool try_autoload_device(JCR *jcr, DCR *dcr, int slot, const char *VolName); +static void send_dir_busy_message(BSOCK *dir, DEVICE *dev); + +/* Responses send to Director for storage command */ +static char BADcmd[] = "2902 Bad %s\n"; +static char OKstore[] = "2000 OK storage\n"; + +/* Commands received from director that need scanning */ +static char storaddr[] = "storage address=%s port=%d ssl=%d Job=%127s Authentication=%127s"; + +struct s_cmds { + const char *cmd; + bool (*func)(JCR *jcr); + bool monitoraccess; /* set if monitors can access this cmd */ +}; + +/* + * The following are the recognized commands from the Director. + */ +static struct s_cmds cmds[] = { + {"JobId=", job_cmd, 0}, /* start Job */ + {"autochanger", changer_cmd, 0}, + {"bootstrap", bootstrap_cmd, 0}, + {"cancel", cancel_cmd, 0}, + {"client", client_cmd, 0}, /* client address */ + {".die", die_cmd, 0}, + {"label", label_cmd, 0}, /* label a tape */ + {"mount", mount_cmd, 0}, + {"enable", enable_cmd, 0}, + {"disable", disable_cmd, 0}, + {"readlabel", readlabel_cmd, 0}, + {"release", release_cmd, 0}, + {"relabel", relabel_cmd, 0}, /* relabel a tape */ + {"setdebug=", setdebug_cmd, 0}, /* set debug level */ + {"status", status_cmd, 1}, + {".status", qstatus_cmd, 1}, + {"stop", cancel_cmd, 0}, + {"storage", storage_cmd, 0}, /* get SD addr from Dir */ + {"truncate", truncate_cache_cmd, 0}, + {"upload", upload_cmd, 0}, + {"prunecache", cloud_prunecache_cmd, 0}, + {"cloudlist", cloud_list_cmd, 0}, /* List volumes/parts in the cloud */ + {"unmount", unmount_cmd, 0}, + {"use storage=", use_cmd, 0}, + {"run", run_cmd, 0}, +// {"query", query_cmd, 0}, + {NULL, NULL} /* list terminator */ +}; + + +/* + * Connection request. We accept connections either from the + * Director or a Client (File daemon). + * + * Note, we are running as a separate thread of the Storage daemon. + * and it is because a Director has made a connection with + * us on the "Message" channel. + * + * Basic tasks done here: + * - Create a JCR record + * - If it was from the FD, call handle_filed_connection() + * - Authenticate the Director + * - We wait for a command + * - We execute the command + * - We continue or exit depending on the return status + */ +void *handle_connection_request(void *arg) +{ + BSOCK *bs = (BSOCK *)arg; + JCR *jcr; + int i; + bool found, quit; + int bnet_stat = 0; + char tbuf[100]; + + if (bs->recv() <= 0) { + Qmsg1(NULL, M_ERROR, 0, _("Connection request from %s failed.\n"), bs->who()); + bmicrosleep(5, 0); /* make user wait 5 seconds */ + bs->destroy(); + return NULL; + } + + /* Check for client connection */ + if (is_client_connection(bs)) { + handle_client_connection(bs); + return NULL; + } + + /* + * This is a connection from the Director, so setup a JCR + */ + Dmsg1(050, "Got a DIR connection at %s\n", bstrftimes(tbuf, sizeof(tbuf), + (utime_t)time(NULL))); + jcr = new_jcr(sizeof(JCR), stored_free_jcr); /* create Job Control Record */ + jcr->dir_bsock = bs; /* save Director bsock */ + jcr->dir_bsock->set_jcr(jcr); + jcr->dcrs = New(alist(10, not_owned_by_alist)); + create_jobmedia_queue(jcr); + /* Initialize FD start condition variable */ + int errstat = pthread_cond_init(&jcr->job_start_wait, NULL); + if (errstat != 0) { + berrno be; + Qmsg1(jcr, M_FATAL, 0, _("Unable to init job cond variable: ERR=%s\n"), be.bstrerror(errstat)); + goto bail_out; + } + + Dmsg0(1000, "stored in start_job\n"); + + /* + * Validate then authenticate the Director + */ + if (!validate_dir_hello(jcr)) { + goto bail_out; + } + if (!authenticate_director(jcr)) { + Qmsg(jcr, M_FATAL, 0, _("[SF0100] Unable to authenticate Director\n")); + goto bail_out; + } + Dmsg0(90, "Message channel init completed.\n"); + + dequeue_messages(jcr); /* dequeue any daemon messages */ + + jcr->set_killable(true); /* allow dir to kill/cancel job */ + + for (quit=false; !quit;) { + /* Read command */ + if ((bnet_stat = bs->recv()) <= 0) { + break; /* connection terminated */ + } + Dmsg1(199, "msg); + /* Ensure that device initialization is complete */ + while (!init_done) { + bmicrosleep(1, 0); + } + found = false; + for (i=0; cmds[i].cmd; i++) { + if (strncmp(cmds[i].cmd, bs->msg, strlen(cmds[i].cmd)) == 0) { + if ((!cmds[i].monitoraccess) && (jcr->director->monitor)) { + Dmsg1(100, "Command \"%s\" is invalid.\n", cmds[i].cmd); + bs->fsend(invalid_cmd); + bs->signal(BNET_EOD); + break; + } + Dmsg1(200, "Do command: %s\n", cmds[i].cmd); + if (!cmds[i].func(jcr)) { /* do command */ + quit = true; /* error, get out */ + Dmsg1(190, "Command %s requests quit\n", cmds[i].cmd); + } + found = true; /* indicate command found */ + break; + } + } + if (!found) { /* command not found */ + POOL_MEM err_msg; + Mmsg(err_msg, "%s %s\n", derrmsg, bs->msg); + bs->fsend(err_msg.c_str()); + break; + } + } +bail_out: + generate_daemon_event(jcr, "JobEnd"); + generate_plugin_event(jcr, bsdEventJobEnd); + flush_jobmedia_queue(jcr); + dequeue_messages(jcr); /* send any queued messages */ + dequeue_daemon_messages(jcr); + bs->signal(BNET_TERMINATE); + bs->destroy(); + jcr->dir_bsock = NULL; /* just freed bsock */ + free_plugins(jcr); /* release instantiated plugins */ + free_jcr(jcr); + return NULL; +} + + +/* + * Force SD to die, and hopefully dump itself. Turned on only + * in development version. + */ +static bool die_cmd(JCR *jcr) +{ +#ifdef DEVELOPER + JCR *djcr = NULL; + int a; + BSOCK *dir = jcr->dir_bsock; + pthread_mutex_t m=PTHREAD_MUTEX_INITIALIZER; + + if (strstr(dir->msg, "deadlock")) { + Pmsg0(000, "I have been requested to deadlock ...\n"); + P(m); + P(m); + } + + Pmsg1(000, "I have been requested to die ... (%s)\n", dir->msg); + a = djcr->JobId; /* ref NULL pointer */ + djcr->JobId = a; +#endif + return 0; +} + +/* + * Get address of client from Director + * This initiates SD Calls Client. + * We attempt to connect to the client (an FD or SD) and + * authenticate it. + */ +static bool client_cmd(JCR *jcr) +{ + int client_port; /* client port */ + int enable_ssl; /* enable ssl */ + BSOCK *dir = jcr->dir_bsock; + BSOCK *cl = new_bsock(); /* client bsock */ + + Dmsg1(100, "ClientCmd: %s", dir->msg); + jcr->sd_calls_client = true; + if (sscanf(dir->msg, "client address=%s port=%d ssl=%d", &jcr->client_addr, &client_port, + &enable_ssl) != 3) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg(jcr, M_FATAL, 0, _("[SF0101] Bad client command: %s"), jcr->errmsg); + Dmsg1(050, "Bad client command: %s", jcr->errmsg); + goto bail_out; + } + + Dmsg3(110, "Connect to client: %s:%d ssl=%d\n", jcr->client_addr, client_port, + enable_ssl); + /* Open command communications with Client */ + /* Try to connect for 1 hour at 10 second intervals */ + if (!cl->connect(jcr, 10, (int)me->ClientConnectTimeout, me->heartbeat_interval, + _("Client daemon"), jcr->client_addr, NULL, client_port, 1)) { + /* destroy() OK because cl is local */ + cl->destroy(); + Jmsg(jcr, M_FATAL, 0, _("[SF0102] Failed to connect to Client daemon: %s:%d\n"), + jcr->client_addr, client_port); + Dmsg2(100, "Failed to connect to Client daemon: %s:%d\n", + jcr->client_addr, client_port); + goto bail_out; + } + Dmsg0(110, "SD connection OK to Client.\n"); + + jcr->file_bsock = cl; + jcr->file_bsock->set_jcr(jcr); + if (!send_hello_client(jcr, jcr->Job)) { + goto bail_out; + } + + /* Send OK to Director */ + return dir->fsend(OKclient); + +bail_out: + jcr->setJobStatus(JS_ErrorTerminated); + dir->fsend("3902 Bad %s cmd\n", "client"); + return 0; +} + +/* + * Get address of storage daemon from Director + */ +static bool storage_cmd(JCR *jcr) +{ + int stored_port; /* storage daemon port */ + int enable_ssl; /* enable ssl to sd */ + char sd_auth_key[200]; + BSOCK *dir = jcr->dir_bsock; + BSOCK *sd = new_bsock(); /* storage daemon bsock */ + char Job[MAX_NAME_LENGTH]; + + Dmsg1(050, "StorageCmd: %s", dir->msg); + if (sscanf(dir->msg, storaddr, &jcr->stored_addr, &stored_port, + &enable_ssl, Job, sd_auth_key) != 5) { + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg(jcr, M_FATAL, 0, _("[SF0103] Bad storage command: %s"), jcr->errmsg); + Pmsg1(010, "Bad storage command: %s", jcr->errmsg); + goto bail_out; + } + + unbash_spaces(Job); + if (jcr->sd_auth_key) { + bfree_and_null(jcr->sd_auth_key); + jcr->sd_auth_key = bstrdup(sd_auth_key); + } + if (stored_port != 0) { + Dmsg2(050, "sd_calls=%d sd_client=%d\n", jcr->sd_calls_client, + jcr->sd_client); + jcr->sd_calls_client = false; /* We are doing the connecting */ + Dmsg3(050, "Connect to storage and wait: %s:%d ssl=%d\n", jcr->stored_addr, stored_port, + enable_ssl); + /* Open command communications with Storage daemon */ + /* Try to connect for 1 hour at 10 second intervals */ + if (!sd->connect(jcr, 10, (int)me->ClientConnectTimeout, me->heartbeat_interval, + _("Storage daemon"), jcr->stored_addr, NULL, stored_port, 1)) { + /* destroy() OK because sd is local */ + sd->destroy(); + Jmsg(jcr, M_FATAL, 0, _("[SF0104] Failed to connect to Storage daemon: %s:%d\n"), + jcr->stored_addr, stored_port); + Dmsg2(010, "Failed to connect to Storage daemon: %s:%d\n", + jcr->stored_addr, stored_port); + goto bail_out; + } + + Dmsg0(050, "Connection OK to SD.\n"); + + jcr->store_bsock = sd; + } else { /* The storage daemon called us */ + jcr->sd_calls_client = true; + /* We should already have a storage connection! */ + if (jcr->file_bsock && jcr->store_bsock == NULL) { + jcr->store_bsock = jcr->file_bsock; + } + if (jcr->store_bsock == NULL) { + Jmsg0(jcr, M_FATAL, 0, _("[SF0105] In storage_cmd port==0, no prior Storage connection.\n")); + Pmsg0(010, "In storage_cmd port==0, no prior Storage connection.\n"); + goto bail_out; + } + } + + if (!send_hello_sd(jcr, Job)) { + goto bail_out; + } + + if (!authenticate_storagedaemon(jcr)) { + goto bail_out; + } + /* + * We are a client so we read from the socket we just + * opened as if we were a FD, so set file_bsock and + * clear the store_bsock. + */ + jcr->file_bsock = jcr->store_bsock; + jcr->store_bsock = NULL; + jcr->authenticated = true; /* Dir authentication is sufficient */ + Dmsg1(050, "=== Storage_cmd authenticated Job=%s with SD.\n", Job); + + /* Send OK to Director */ + return dir->fsend(OKstore); + +bail_out: + Dmsg0(100, "Send storage command failed.\n"); + dir->fsend(BADcmd, "storage"); + return false; +} + + +/* + * Set debug level as requested by the Director + * + */ +static bool setdebug_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + int32_t trace_flag, lvl, hangup, blowup; + int64_t level, level_tags = 0; + char options[60]; + char tags[512]; + *tags = *options = 0; + + Dmsg1(10, "setdebug_cmd: %s", dir->msg); + + if (sscanf(dir->msg, "setdebug=%ld trace=%ld hangup=%ld blowup=%ld options=%55s tags=%511s", + &lvl, &trace_flag, &hangup, &blowup, options, tags) != 6) + { + if (sscanf(dir->msg, "setdebug=%ld trace=%ld", &lvl, &trace_flag) != 2 || lvl < 0) { + dir->fsend(_("3991 Bad setdebug command: %s\n"), dir->msg); + return 0; + } + } + level = lvl; + set_trace(trace_flag); + set_hangup(hangup); + set_blowup(blowup); + set_debug_flags(options); + if (!debug_parse_tags(tags, &level_tags)) { + *tags = 0; + } + if (level >= 0) { + debug_level = level; + } + debug_level_tags = level_tags; + + /* TODO: Temp code to activate the new BSR optimisation code */ + for (char *p = options; *p ; p++) { + switch(*p) { + case 'i': /* Use new match_bsr() code */ + use_new_match_all = 1; + break; + case '0': + use_new_match_all = 0; + break; + } + } + /* **** */ + + return dir->fsend(OKsetdebug, lvl, trace_flag, options, tags); +} + + +/* + * Cancel a Job + * Be careful, we switch to using the job's JCR! So, using + * BSOCKs on that jcr can have two threads in the same code. + */ +static bool cancel_cmd(JCR *cjcr) +{ + BSOCK *dir = cjcr->dir_bsock; + int oldStatus; + char Job[MAX_NAME_LENGTH]; + JCR *jcr; + int status; + const char *reason; + + if (sscanf(dir->msg, "cancel Job=%127s", Job) == 1) { + status = JS_Canceled; + reason = "canceled"; + } else if (sscanf(dir->msg, "stop Job=%127s", Job) == 1) { + status = JS_Incomplete; + reason = "stopped"; + } else { + dir->fsend(_("3903 Error scanning cancel command.\n")); + goto bail_out; + } + if (!(jcr=get_jcr_by_full_name(Job))) { + dir->fsend(_("3904 Job %s not found.\n"), Job); + } else { + oldStatus = jcr->JobStatus; + jcr->setJobStatus(status); + Dmsg2(800, "Cancel JobId=%d %p\n", jcr->JobId, jcr); + if (!jcr->authenticated && oldStatus == JS_WaitFD) { + pthread_cond_signal(&jcr->job_start_wait); /* wake waiting thread */ + } + if (jcr->file_bsock) { + jcr->file_bsock->set_terminated(); + jcr->file_bsock->set_timed_out(); + Dmsg2(800, "Term bsock jid=%d %p\n", jcr->JobId, jcr); + } else { + /* Still waiting for FD to connect, release it */ + pthread_cond_signal(&jcr->job_start_wait); /* wake waiting job */ + Dmsg2(800, "Signal FD connect jid=%d %p\n", jcr->JobId, jcr); + } + /* If thread waiting on mount, wake him */ + if (jcr->dcr && jcr->dcr->dev && jcr->dcr->dev->waiting_for_mount()) { + pthread_cond_broadcast(&jcr->dcr->dev->wait_next_vol); + Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)jcr->JobId); + pthread_cond_broadcast(&wait_device_release); + } + if (jcr->read_dcr && jcr->read_dcr->dev && jcr->read_dcr->dev->waiting_for_mount()) { + pthread_cond_broadcast(&jcr->read_dcr->dev->wait_next_vol); + Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)jcr->JobId); + pthread_cond_broadcast(&wait_device_release); + } + jcr->my_thread_send_signal(TIMEOUT_SIGNAL); + dir->fsend(_("3000 JobId=%ld Job=\"%s\" marked to be %s.\n"), + jcr->JobId, jcr->Job, reason); + free_jcr(jcr); + } + +bail_out: + dir->signal(BNET_EOD); + return 1; +} + +/* + * Label a Volume + * + */ +static bool label_cmd(JCR *jcr) +{ + return do_label(jcr, 0); +} + +static bool relabel_cmd(JCR *jcr) +{ + return do_label(jcr, 1); +} + +static bool do_label(JCR *jcr, int relabel) +{ + POOLMEM *newname, *oldname, *poolname, *mtype; + POOL_MEM dev_name; + BSOCK *dir = jcr->dir_bsock; + DCR *dcr = NULL;; + DEVICE *dev; + bool ok = false; + int32_t slot, drive; + + newname = get_memory(dir->msglen+1); + oldname = get_memory(dir->msglen+1); + poolname = get_memory(dir->msglen+1); + mtype = get_memory(dir->msglen+1); + if (relabel) { + if (sscanf(dir->msg, "relabel %127s OldName=%127s NewName=%127s PoolName=%127s " + "MediaType=%127s Slot=%d drive=%d", + dev_name.c_str(), oldname, newname, poolname, mtype, + &slot, &drive) == 7) { + ok = true; + } + } else { + *oldname = 0; + if (sscanf(dir->msg, "label %127s VolumeName=%127s PoolName=%127s " + "MediaType=%127s Slot=%d drive=%d", + dev_name.c_str(), newname, poolname, mtype, &slot, &drive) == 6) { + ok = true; + } + } + if (ok) { + unbash_spaces(newname); + unbash_spaces(oldname); + unbash_spaces(poolname); + unbash_spaces(mtype); + dcr = find_device(jcr, dev_name, mtype, drive); + if (dcr) { + uint32_t max_jobs; + dev = dcr->dev; + ok = true; + dev->Lock(); /* Use P to avoid indefinite block */ + max_jobs = dev->max_concurrent_jobs; + dev->max_concurrent_jobs = 1; + bstrncpy(dcr->VolumeName, newname, sizeof(dcr->VolumeName)); + if (dcr->can_i_write_volume()) { + if (reserve_volume(dcr, newname) == NULL) { + ok = false; + } + Dmsg1(400, "Reserved Volume=\"%s\" for relabel/truncate.\n", newname); + } else { + ok = false; + } + if (!ok) { + dir->fsend(_("3908 Error reserving Volume=\"%s\": %s"), newname, jcr->errmsg); + dev->max_concurrent_jobs = max_jobs; + dev->Unlock(); + goto bail_out; + } + + /* some command use recv and don't accept catalog update. + * it's not the case here, so we force dir_update_volume_info catalog update */ + dcr->force_update_volume_info = true; + + if (!dev->is_open() && !dev->is_busy()) { + Dmsg1(400, "Can %slabel. Device is not open\n", relabel?"re":""); + label_volume_if_ok(dcr, oldname, newname, poolname, slot, relabel); + dev->close(dcr); + /* Under certain "safe" conditions, we can steal the lock */ + } else if (dev->can_obtain_block()) { + Dmsg0(400, "Can relabel. can_obtain_block\n"); + label_volume_if_ok(dcr, oldname, newname, poolname, slot, relabel); + } else if (dev->is_busy() || dev->is_blocked()) { + send_dir_busy_message(dir, dev); + } else { /* device not being used */ + Dmsg0(400, "Can relabel. device not used\n"); + label_volume_if_ok(dcr, oldname, newname, poolname, slot, relabel); + } + dev->max_concurrent_jobs = max_jobs; + volume_unused(dcr); + dev->Unlock(); +#ifdef DEVELOPER + if (chk_dbglvl(DT_VOLUME)) { + Dmsg0(0, "Waiting few seconds to force a bug...\n"); + bmicrosleep(30, 0); + Dmsg0(0, "Doing free_volume()\n"); + } +#endif + } else { + dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), dev_name.c_str()); + } + } else { + /* NB dir->msg gets clobbered in bnet_fsend, so save command */ + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3903 Error scanning label command: %s\n"), jcr->errmsg); + } +bail_out: + if (dcr) { + free_dcr(dcr); + } + free_memory(oldname); + free_memory(newname); + free_memory(poolname); + free_memory(mtype); + dir->signal(BNET_EOD); + return true; +} + +/* + * Handles truncate cache commands + */ +static bool truncate_cache_cmd(JCR *jcr) +{ + POOLMEM *volname, *poolname, *mtype; + POOL_MEM dev_name; + BSOCK *dir = jcr->dir_bsock; + DCR *dcr = NULL;; + DEVICE *dev; + bool ok = false; + int32_t slot, drive; + int nbpart = 0; + int64_t size = 0; + char ed1[50]; + + volname = get_memory(dir->msglen+1); + poolname = get_memory(dir->msglen+1); + mtype = get_memory(dir->msglen+1); + if (sscanf(dir->msg, "truncate cache Storage=%127s Volume=%127s PoolName=%127s " + "MediaType=%127s Slot=%d drive=%d", + dev_name.c_str(), volname, poolname, mtype, + &slot, &drive) == 6) { + ok = true; + } + if (ok) { + unbash_spaces(volname); + unbash_spaces(poolname); + unbash_spaces(mtype); + dcr = find_device(jcr, dev_name, mtype, drive); + if (dcr) { + uint32_t max_jobs; + dev = dcr->dev; + ok = true; + dev->Lock(); /* Use P to avoid indefinite block */ + max_jobs = dev->max_concurrent_jobs; + dev->max_concurrent_jobs = 1; + bstrncpy(dcr->VolumeName, volname, sizeof(dcr->VolumeName)); + if (dcr->can_i_write_volume()) { + if (reserve_volume(dcr, volname) == NULL) { + ok = false; + } + Dmsg1(400, "Reserved volume \"%s\"\n", volname); + } else { + ok = false; + } + if (!ok) { + dir->fsend(_("3908 Error reserving Volume=\"%s\": %s"), volname, jcr->errmsg); + dev->max_concurrent_jobs = max_jobs; + dev->Unlock(); + goto bail_out; + } + if ((!dev->is_open() && !dev->is_busy()) || dev->can_obtain_block()) { + Dmsg0(400, "Call truncate_cache\n"); + nbpart = dev->truncate_cache(dcr, volname, &size); + if (nbpart >= 0) { + dir->fsend(_("3000 OK truncate cache for volume \"%s\" %d part(s) %sB\n"), + volname, nbpart, edit_uint64_with_suffix(size, ed1)); + } else { + dir->fsend(_("3900 Truncate cache for volume \"%s\" failed. ERR=%s\n"), volname, dev->errmsg); + } + } else if (dev->is_busy() || dev->is_blocked()) { + send_dir_busy_message(dir, dev); + } else { /* device not being used */ + Dmsg0(400, "Call truncate_cache\n"); + nbpart = dev->truncate_cache(dcr, volname, &size); + if (nbpart >= 0) { + dir->fsend(_("3000 OK truncate cache for volume \"%s\" %d part(s) %sB\n"), volname, + nbpart, edit_uint64_with_suffix(size, ed1)); + } else { + dir->fsend(_("3900 Truncate cache for volume \"%s\" failed. ERR=%s\n"), volname, dev->errmsg); + } + } + dev->max_concurrent_jobs = max_jobs; + volume_unused(dcr); + dev->Unlock(); +#ifdef DEVELOPER + if (chk_dbglvl(DT_VOLUME)) { + Dmsg0(0, "Waiting few seconds to force a bug...\n"); + bmicrosleep(30, 0); + Dmsg0(0, "Doing free_volume()\n"); + } +#endif + } else { + dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), dev_name.c_str()); + } + } else { + /* NB dir->msg gets clobbered in bnet_fsend, so save command */ + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3911 Error scanning truncate command: %s\n"), jcr->errmsg); + } +bail_out: + if (dcr) { + free_dcr(dcr); + } + free_memory(volname); + free_memory(poolname); + free_memory(mtype); + dir->signal(BNET_EOD); + return true; +} + +static bool cloud_prunecache_cmd(JCR *jcr) +{ + /* TODO: Implement a function to prune the cache of a cloud device */ + jcr->dir_bsock->fsend(_("3900 Not yet implemented\n")); + return true; +} + +/* List volumes in the cloud */ +static bool cloud_list_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + POOL_MEM dev_name; + POOLMEM *errmsg = get_pool_memory(PM_FNAME); + errmsg[0] = 0; + char volname[MAX_NAME_LENGTH]; + char mtype[MAX_NAME_LENGTH]; + int slot, drive; + DCR *dcr = NULL; + + if (sscanf(dir->msg, "cloudlist Storage=%127s Volume=%127s MediaType=%127s Slot=%d drive=%d", + dev_name.c_str(), volname, mtype, &slot, &drive) != 5) { + dir->fsend(_("3912 Error scanning the command\n")); + goto bail_out; + } + + /* In fact, we do not need to find and reserve a device for this operation, + * we just need to find one, idle or not + */ + dcr = find_device(jcr, dev_name, mtype, drive); + if (!dcr) { + dir->fsend(_("3900 Error reserving device %s %s\n"), dev_name.c_str(), mtype); + goto bail_out; + } + + if (volname[0] == 0) { /* List all volumes, TODO: Switch to a callback mode */ + char *vol; + alist volumes(100, not_owned_by_alist); + if (!dcr->dev->get_cloud_volumes_list(dcr, &volumes, errmsg)) { + dir->fsend(_("3900 Error cannot get cloud Volume list. ERR=%s\n"), errmsg); + } + free_dcr(dcr); + + foreach_alist(vol, &volumes) { + bash_spaces(vol); + dir->fsend("volume=%s\n", vol); + free(vol); /* Walk through the list only one time */ + } + + } else { + ilist parts(100, not_owned_by_alist); + if (!dcr->dev->get_cloud_volume_parts_list(dcr, volname, &parts, errmsg)) { + dir->fsend(_("3900 Error cannot get cloud Volume list. ERR=%s\n"), errmsg); + free_dcr(dcr); + goto bail_out; + } + free_dcr(dcr); + + for (int i=1; i <= parts.last_index() ; i++) { + cloud_part *p = (cloud_part *)parts.get(i); + if (p) { + dir->fsend("part=%d size=%lld mtime=%lld\n", i, p->size, p->mtime); + free(p); + } + } + } + +bail_out: + free_pool_memory(errmsg); + dir->signal(BNET_EOD); + return true; +} + +/* + * Handles upload cache to Cloud command + */ +static bool upload_cmd(JCR *jcr) +{ + POOLMEM *volname, *poolname, *mtype, *err; + POOL_MEM dev_name; + BSOCK *dir = jcr->dir_bsock; + DCR *dcr = NULL; + DEVICE *dev; + bool ok = false; + int32_t slot, drive; + + volname = get_memory(dir->msglen+1); + poolname = get_memory(dir->msglen+1); + mtype = get_memory(dir->msglen+1); + err = get_pool_memory(PM_MESSAGE); + *volname = *poolname = *mtype = *err = 0; + + if (sscanf(dir->msg, "upload Storage=%127s Volume=%127s PoolName=%127s " + "MediaType=%127s Slot=%d drive=%d", + dev_name.c_str(), volname, poolname, mtype, + &slot, &drive) == 6) { + ok = true; + } + if (ok) { + unbash_spaces(volname); + unbash_spaces(poolname); + unbash_spaces(mtype); + dcr = find_device(jcr, dev_name, mtype, drive); + if (dcr) { + uint32_t max_jobs; + dev = dcr->dev; + ok = true; + dev->Lock(); /* Use P to avoid indefinite block */ + max_jobs = dev->max_concurrent_jobs; + dev->max_concurrent_jobs = 1; + bstrncpy(dcr->VolumeName, volname, sizeof(dcr->VolumeName)); + if (dcr->can_i_write_volume()) { + if (reserve_volume(dcr, volname) == NULL) { + ok = false; + } + Dmsg1(400, "Reserved volume \"%s\"\n", volname); + } else { + ok = false; + } + if (!ok) { + dir->fsend(_("3908 Error reserving Volume=\"%s\": %s"), volname, jcr->errmsg); + dev->max_concurrent_jobs = max_jobs; + dev->Unlock(); + goto bail_out; + } + if ((!dev->is_open() && !dev->is_busy()) || dev->can_obtain_block()) { + Dmsg0(400, "Can upload, because device is not open.\n"); + dev->setVolCatName(volname); + dev->part = 0; + if (dev->open_device(dcr, OPEN_READ_WRITE)) { + ok = dev->upload_cache(dcr, volname, err); + dev->part = 0; + dev->close(dcr); + dev->end_of_job(dcr); + } + } else if (dev->is_busy() || dev->is_blocked()) { + send_dir_busy_message(dir, dev); + } else { /* device not being used */ + Dmsg0(400, "Can upload, because device not used\n"); + dev->setVolCatName(volname); + dev->part = 0; + if (dev->open_device(dcr, OPEN_READ_WRITE)) { + ok = dev->upload_cache(dcr, volname, err); + dev->part = 0; + dev->close(dcr); + dev->end_of_job(dcr); + } + } + dev->max_concurrent_jobs = max_jobs; + volume_unused(dcr); + dev->Unlock(); +#ifdef DEVELOPER + if (chk_dbglvl(DT_VOLUME)) { + Dmsg0(0, "Waiting few seconds to force a bug...\n"); + bmicrosleep(30, 0); + Dmsg0(0, "Doing free_volume()\n"); + } +#endif + } else { + dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), dev_name.c_str()); + } + } else { + /* NB dir->msg gets clobbered in bnet_fsend, so save command */ + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3912 Error scanning upload command: ERR=%s\n"), jcr->errmsg); + } +bail_out: + if (ok) { + dir->fsend(_("3000 OK upload.\n")); + } else { + dir->fsend(_("3999 Error with the upload: ERR=%s\n"), err); + } + if (dcr) { + free_dcr(dcr); + } + free_pool_memory(err); + free_memory(volname); + free_memory(poolname); + free_memory(mtype); + dir->signal(BNET_EOD); + return true; +} + + +/* + * Read the tape label and determine if we can safely + * label the tape (not a Bacula volume), then label it. + * + * Enter with the mutex set + */ +static void label_volume_if_ok(DCR *dcr, char *oldname, + char *newname, char *poolname, + int slot, int relabel) +{ + BSOCK *dir = dcr->jcr->dir_bsock; + bsteal_lock_t hold; + DEVICE *dev = dcr->dev; + int label_status; + int mode; + const char *volname = (relabel == 1) ? oldname : newname; + uint64_t volCatBytes; + + if (!obtain_device_block(dev, + &hold, + 1, /* one try */ + BST_WRITING_LABEL)) { + send_dir_busy_message(dir, dev); + return; + } + dev->Unlock(); + + Dmsg1(100, "Stole device %s lock, writing label.\n", dev->print_name()); + + Dmsg0(90, "try_autoload_device - looking for volume_info\n"); + if (!try_autoload_device(dcr->jcr, dcr, slot, volname)) { + goto bail_out; /* error */ + } + + + if (relabel) { + dev->truncating = true; /* let open_device() know we will truncate it */ + } + /* Set old volume name for open if relabeling */ + dcr->setVolCatName(volname); + + /* Ensure that the device is open -- autoload_device() closes it */ + if (dev->is_tape()) { + mode = OPEN_READ_WRITE; + } else { + mode = CREATE_READ_WRITE; + } + if (!dev->open_device(dcr, mode)) { + dir->fsend(_("3929 Unable to open device \"%s\": ERR=%s\n"), + dev->print_name(), dev->bstrerror()); + goto bail_out; + } + + /* See what we have for a Volume */ + label_status = dev->read_dev_volume_label(dcr); + + /* Set new volume name */ + dcr->setVolCatName(newname); + switch(label_status) { + case VOL_NAME_ERROR: + case VOL_VERSION_ERROR: + case VOL_LABEL_ERROR: + case VOL_OK: + if (!relabel) { + dir->fsend(_( + "3920 Cannot label Volume because it is already labeled: \"%s\"\n"), + dev->VolHdr.VolumeName); + break; + } + + /* Relabel request. If oldname matches, continue */ + if (strcmp(oldname, dev->VolHdr.VolumeName) != 0) { + dir->fsend(_("3921 Wrong volume mounted.\n")); + break; + } + if (dev->label_type != B_BACULA_LABEL) { + dir->fsend(_("3922 Cannot relabel an ANSI/IBM labeled Volume.\n")); + break; + } + /* Fall through wanted! */ + case VOL_IO_ERROR: + case VOL_NO_LABEL: + if (!dev->write_volume_label(dcr, newname, poolname, + relabel, true /* write label now */)) { + dir->fsend(_("3912 Failed to label Volume %s: ERR=%s\n"), + newname, dcr->jcr->errmsg); + break; + } + volCatBytes = dev->VolCatInfo.VolCatBytes; + /* + * After writing label, create a new part + */ + if (dev->is_cloud()) { + dev->set_append(); + if (!dev->open_next_part(dcr)) { + dir->fsend(_("3913 Failed to open next part: ERR=%s\n"), dcr->jcr->errmsg); + break; + } + } + bstrncpy(dcr->VolumeName, newname, sizeof(dcr->VolumeName)); + /* The following 3000 OK label. string is scanned in ua_label.c */ + int type; + if (dev->dev_type == B_FILE_DEV || dev->dev_type == B_ALIGNED_DEV || + dev->dev_type == B_CLOUD_DEV) + { + type = dev->dev_type; + } else { + type = 0; + } + dir->fsend("3000 OK label. VolBytes=%lld VolABytes=%lld VolType=%d Volume=\"%s\" Device=%s\n", + volCatBytes, dev->VolCatInfo.VolCatAdataBytes, + type, newname, dev->print_name()); + break; + case VOL_TYPE_ERROR: + dir->fsend(_("3917 Failed to label Volume: ERR=%s\n"), dcr->jcr->errmsg); + break; + case VOL_NO_MEDIA: + dir->fsend(_("3918 Failed to label Volume (no media): ERR=%s\n"), dcr->jcr->errmsg); + break; + default: + dir->fsend(_("3919 Cannot label Volume. " + "Unknown status %d from read_volume_label()\n"), label_status); + break; + } + +bail_out: + if (dev->is_open() && !dev->has_cap(CAP_ALWAYSOPEN)) { + dev->close(dcr); + } + + dev->end_of_job(dcr); + + if (!dev->is_open()) { + dev->clear_volhdr(); + } + volume_unused(dcr); /* no longer using volume */ + dev->Lock(); + give_back_device_block(dev, &hold); + return; +} + + +/* + * Read the tape label + * + * Enter with the mutex set + */ +static bool read_label(DCR *dcr) +{ + int ok; + JCR *jcr = dcr->jcr; + BSOCK *dir = jcr->dir_bsock; + bsteal_lock_t hold; + DEVICE *dev = dcr->dev; + + if (!obtain_device_block(dev, + &hold, + 1 /* one try */, + BST_DOING_ACQUIRE)) { + send_dir_busy_message(dir, dev); + return false; + } + dev->Unlock(); + dcr->VolumeName[0] = 0; + dev->clear_labeled(); /* force read of label */ + switch (dev->read_dev_volume_label(dcr)) { + case VOL_OK: + dir->fsend(_("3001 Mounted Volume: %s\n"), dev->VolHdr.VolumeName); + ok = true; + break; + default: + dir->fsend(_("3902 Cannot mount Volume on Storage Device \"%s\" because:\n%s"), + dev->print_name(), jcr->errmsg); + ok = false; + break; + } + volume_unused(dcr); + dev->Lock(); + give_back_device_block(dev, &hold); + return ok; +} + +/* + * Searches for device by name, and if found, creates a dcr and + * returns it. + */ +static DCR *find_device(JCR *jcr, POOL_MEM &devname, + POOLMEM *media_type, int drive) +{ + DEVRES *device; + AUTOCHANGER *changer; + bool found = false; + DCR *dcr = NULL; + + unbash_spaces(devname); + foreach_res(device, R_DEVICE) { + /* Find resource, and make sure we were able to open it */ + if (strcmp(device->hdr.name, devname.c_str()) == 0 && + (!media_type || strcmp(device->media_type, media_type) ==0)) { + if (!device->dev) { + device->dev = init_dev(jcr, device); + } + if (!device->dev) { + Jmsg(jcr, M_WARNING, 0, _("\n" + "[SW0106] Device \"%s\" requested by DIR could not be opened or does not exist.\n"), + devname.c_str()); + continue; + } + Dmsg1(20, "Found device %s\n", device->hdr.name); + found = true; + break; + } + } + if (!found) { + foreach_res(changer, R_AUTOCHANGER) { + /* Find resource, and make sure we were able to open it */ + if (strcmp(devname.c_str(), changer->hdr.name) == 0) { + /* Try each device in this AutoChanger */ + foreach_alist(device, changer->device) { + Dmsg1(100, "Try changer device %s\n", device->hdr.name); + if (!device->dev) { + device->dev = init_dev(jcr, device); + } + if (!device->dev) { + Dmsg1(100, "Device %s could not be opened. Skipped\n", devname.c_str()); + Jmsg(jcr, M_WARNING, 0, _("\n" + "[SW0107] Device \"%s\" in changer \"%s\" requested by DIR could not be opened or does not exist.\n"), + device->hdr.name, devname.c_str()); + continue; + } + if (!device->dev->autoselect) { + Dmsg1(100, "Device %s not autoselect skipped.\n", devname.c_str()); + continue; /* device is not available */ + } else if (!device->dev->enabled) { + Dmsg1(100, "Device %s disabled skipped.\n", devname.c_str()); + continue; /* device disabled */ + } + if ((drive < 0 || drive == (int)device->dev->drive_index) && + (!media_type || strcmp(device->media_type, media_type) ==0)) { + Dmsg1(20, "Found changer device %s\n", device->hdr.name); + found = true; + break; + } + Dmsg3(100, "Device %s drive wrong: want=%d got=%d skipping\n", + devname.c_str(), drive, (int)device->dev->drive_index); + } + break; /* we found it but could not open a device */ + } + } + } + + if (found) { + Dmsg1(100, "Found device %s\n", device->hdr.name); + dcr = new_dcr(jcr, NULL, device->dev); + dcr->device = device; + } + return dcr; +} + +/* + * Find even disabled devices so that we can enable them + * ***FIXME*** This could probably be merged with find_device with another + * argument, but this is easier for the moment. + */ +static DCR *find_any_device(JCR *jcr, POOL_MEM &devname, + POOLMEM *media_type, int drive) +{ + DEVRES *device; + AUTOCHANGER *changer; + bool found = false; + DCR *dcr = NULL; + + unbash_spaces(devname); + foreach_res(device, R_DEVICE) { + /* Find resource, and make sure we were able to open it */ + if (strcmp(device->hdr.name, devname.c_str()) == 0 && + (!media_type || strcmp(device->media_type, media_type) ==0)) { + if (!device->dev) { + device->dev = init_dev(jcr, device); + } + if (!device->dev) { + Jmsg(jcr, M_WARNING, 0, _("\n" + "[SW0108] Device \"%s\" requested by DIR could not be opened or does not exist.\n"), + devname.c_str()); + continue; + } + Dmsg1(20, "Found device %s\n", device->hdr.name); + found = true; + break; + } + } + if (!found) { + foreach_res(changer, R_AUTOCHANGER) { + /* Find resource, and make sure we were able to open it */ + if (strcmp(devname.c_str(), changer->hdr.name) == 0) { + /* Try each device in this AutoChanger */ + foreach_alist(device, changer->device) { + Dmsg1(100, "Try changer device %s\n", device->hdr.name); + if (!device->dev) { + device->dev = init_dev(jcr, device); + } + if (!device->dev) { + Dmsg1(100, "Device %s could not be opened. Skipped\n", devname.c_str()); + Jmsg(jcr, M_WARNING, 0, _("\n" + "[SW0109] Device \"%s\" in changer \"%s\" requested by DIR could not be opened or does not exist.\n"), + device->hdr.name, devname.c_str()); + continue; + } + if ((drive < 0 || drive == (int)device->dev->drive_index) && + (!media_type || strcmp(device->media_type, media_type) ==0)) { + Dmsg1(20, "Found changer device %s\n", device->hdr.name); + found = true; + break; + } + Dmsg3(100, "Device %s drive wrong: want=%d got=%d skipping\n", + devname.c_str(), drive, (int)device->dev->drive_index); + } + break; /* we found it but could not open a device */ + } + } + } + + if (found) { + Dmsg1(100, "Found device %s\n", device->hdr.name); + dcr = new_dcr(jcr, NULL, device->dev); + dcr->device = device; + } + return dcr; +} + + +/* + * Mount command from Director + */ +static bool mount_cmd(JCR *jcr) +{ + POOL_MEM devname; + BSOCK *dir = jcr->dir_bsock; + DEVICE *dev; + DCR *dcr; + int32_t drive; /* device index */ + int32_t slot; + bool ok; + + Dmsg1(100, "%s\n", dir->msg); + ok = sscanf(dir->msg, "mount %127s drive=%d slot=%d", devname.c_str(), + &drive, &slot) == 3; + Dmsg3(100, "ok=%d device_index=%d slot=%d\n", ok, drive, slot); + if (ok) { + dcr = find_device(jcr, devname, NULL, drive); + if (dcr) { + dev = dcr->dev; + dev->Lock(); /* Use P to avoid indefinite block */ + Dmsg2(100, "mount cmd blocked=%d must_unload=%d\n", dev->blocked(), + dev->must_unload()); + switch (dev->blocked()) { /* device blocked? */ + case BST_WAITING_FOR_SYSOP: + /* Someone is waiting, wake him */ + Dmsg0(100, "Waiting for mount. Attempting to wake thread\n"); + dev->set_blocked(BST_MOUNT); + dir->fsend("3001 OK mount requested. %sDevice=%s\n", + slot>0?_("Specified slot ignored. "):"", + dev->print_name()); + Dmsg1(100, "JobId=%u broadcast wait_next_vol\n", (uint32_t)dcr->jcr->JobId); + pthread_cond_broadcast(&dev->wait_next_vol); + Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)dcr->jcr->JobId); + pthread_cond_broadcast(&wait_device_release); + break; + + /* In both of these two cases, we (the user) unmounted the Volume */ + case BST_UNMOUNTED_WAITING_FOR_SYSOP: + case BST_UNMOUNTED: + Dmsg2(100, "Unmounted changer=%d slot=%d\n", dev->is_autochanger(), slot); + if (dev->is_autochanger() && slot > 0) { + try_autoload_device(jcr, dcr, slot, ""); + } + /* We freed the device, so reopen it and wake any waiting threads */ + if (!dev->open_device(dcr, OPEN_READ_ONLY)) { + dir->fsend(_("3901 Unable to open device \"%s\": ERR=%s\n"), + dev->print_name(), dev->bstrerror()); + if (dev->blocked() == BST_UNMOUNTED) { + /* We blocked the device, so unblock it */ + Dmsg0(100, "Unmounted. Unblocking device\n"); + unblock_device(dev); + } + break; + } + dev->read_dev_volume_label(dcr); + if (dev->blocked() == BST_UNMOUNTED) { + /* We blocked the device, so unblock it */ + Dmsg0(100, "Unmounted. Unblocking device\n"); + read_label(dcr); /* this should not be necessary */ + unblock_device(dev); + } else { + Dmsg0(100, "Unmounted waiting for mount. Attempting to wake thread\n"); + dev->set_blocked(BST_MOUNT); + } + if (dev->is_labeled()) { + dir->fsend(_("3001 Device \"%s\" is mounted with Volume \"%s\"\n"), + dev->print_name(), dev->VolHdr.VolumeName); + } else { + dir->fsend(_("3905 Device \"%s\" open but no Bacula volume is mounted.\n" + "If this is not a blank tape, try unmounting and remounting the Volume.\n"), + dev->print_name()); + } + pthread_cond_broadcast(&dev->wait_next_vol); + Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)dcr->jcr->JobId); + pthread_cond_broadcast(&wait_device_release); + break; + + case BST_DOING_ACQUIRE: + dir->fsend(_("3001 Device \"%s\" is doing acquire.\n"), + dev->print_name()); + break; + + case BST_WRITING_LABEL: + dir->fsend(_("3903 Device \"%s\" is being labeled.\n"), + dev->print_name()); + break; + + case BST_NOT_BLOCKED: + Dmsg2(100, "Not blocked changer=%d slot=%d\n", dev->is_autochanger(), slot); + if (dev->is_autochanger() && slot > 0) { + try_autoload_device(jcr, dcr, slot, ""); + } + if (dev->is_open()) { + if (dev->is_labeled()) { + dir->fsend(_("3001 Device \"%s\" is mounted with Volume \"%s\"\n"), + dev->print_name(), dev->VolHdr.VolumeName); + } else { + dir->fsend(_("3905 Device \"%s\" open but no Bacula volume is mounted.\n" + "If this is not a blank tape, try unmounting and remounting the Volume.\n"), + dev->print_name()); + } + } else if (dev->is_tape()) { + if (!dev->open_device(dcr, OPEN_READ_ONLY)) { + dir->fsend(_("3901 Unable to open device \"%s\": ERR=%s\n"), + dev->print_name(), dev->bstrerror()); + break; + } + read_label(dcr); + if (dev->is_labeled()) { + dir->fsend(_("3001 Device \"%s\" is already mounted with Volume \"%s\"\n"), + dev->print_name(), dev->VolHdr.VolumeName); + } else { + dir->fsend(_("3905 Device \"%s\" open but no Bacula volume is mounted.\n" + "If this is not a blank tape, try unmounting and remounting the Volume.\n"), + dev->print_name()); + } + if (dev->is_open() && !dev->has_cap(CAP_ALWAYSOPEN)) { + dev->close(dcr); + } + } else if (dev->is_unmountable()) { + if (dev->mount(1)) { + dir->fsend(_("3002 Device \"%s\" is mounted.\n"), dev->print_name()); + } else { + dir->fsend(_("3907 %s"), dev->bstrerror()); + } + } else { /* must be file */ + dir->fsend(_("3906 File device \"%s\" is always mounted.\n"), + dev->print_name()); + pthread_cond_broadcast(&dev->wait_next_vol); + Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)dcr->jcr->JobId); + pthread_cond_broadcast(&wait_device_release); + } + break; + + case BST_RELEASING: + dir->fsend(_("3930 Device \"%s\" is being released.\n"), dev->print_name()); + break; + + default: + dir->fsend(_("3905 Unknown wait state %d\n"), dev->blocked()); + break; + } + dev->Unlock(); + free_dcr(dcr); + } else { + dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); + } + } else { + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3909 Error scanning mount command: %s\n"), jcr->errmsg); + } + dir->signal(BNET_EOD); + return true; +} + +/* enable command from Director */ +static bool enable_cmd(JCR *jcr) +{ + POOL_MEM devname; + BSOCK *dir = jcr->dir_bsock; + DEVICE *dev; + DCR *dcr; + int32_t drive; + bool ok; + int deleted; + + ok = sscanf(dir->msg, "enable %127s drive=%d", devname.c_str(), + &drive) == 2; + Dmsg3(100, "ok=%d device=%s device_index=%d\n", ok, devname.c_str(), drive); + if (ok) { + dcr = find_any_device(jcr, devname, NULL, drive); + if (dcr) { + dev = dcr->dev; + dev->Lock(); /* Use P to avoid indefinite block */ + if (dev->enabled) { + dir->fsend(_("3003 Device \"%s\" already enabled.\n"), dev->print_name()); + } else { + dev->enabled = true; + dir->fsend(_("3002 Device \"%s\" enabled.\n"), dev->print_name()); + } + deleted = dev->delete_alerts(); + if (deleted > 0) { + dir->fsend(_("3004 Device \"%s\" deleted %d alert%s.\n"), + dev->print_name(), deleted, deleted>1?"s":""); + } + dev->Unlock(); + free_dcr(dcr); + } + } else { + /* NB dir->msg gets clobbered in bnet_fsend, so save command */ + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3907 Error scanning \"enable\" command: %s\n"), jcr->errmsg); + } + dir->signal(BNET_EOD); + return true; +} + +/* enable command from Director */ +static bool disable_cmd(JCR *jcr) +{ + POOL_MEM devname; + BSOCK *dir = jcr->dir_bsock; + DEVICE *dev; + DCR *dcr; + int32_t drive; + bool ok; + + ok = sscanf(dir->msg, "disable %127s drive=%d", devname.c_str(), + &drive) == 2; + Dmsg3(100, "ok=%d device=%s device_index=%d\n", ok, devname.c_str(), drive); + if (ok) { + dcr = find_device(jcr, devname, NULL, drive); + if (dcr) { + dev = dcr->dev; + dev->Lock(); + dev->enabled = false; + dir->fsend(_("3002 Device \"%s\" disabled.\n"), dev->print_name()); + dev->Unlock(); + free_dcr(dcr); + } + } else { + /* NB dir->msg gets clobbered in bnet_fsend, so save command */ + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3907 Error scanning \"disable\" command: %s\n"), jcr->errmsg); + } + dir->signal(BNET_EOD); + return true; +} + + +/* + * unmount command from Director + */ +static bool unmount_cmd(JCR *jcr) +{ + POOL_MEM devname; + BSOCK *dir = jcr->dir_bsock; + DEVICE *dev; + DCR *dcr; + int32_t drive; + + if (sscanf(dir->msg, "unmount %127s drive=%d", devname.c_str(), &drive) == 2) { + dcr = find_device(jcr, devname, NULL, drive); + if (dcr) { + dev = dcr->dev; + dev->Lock(); /* Use P to avoid indefinite block */ + if (!dev->is_open()) { + if (!dev->is_busy()) { + unload_autochanger(dcr, -1); + } + if (dev->is_unmountable()) { + if (dev->unmount(0)) { + dir->fsend(_("3002 Device \"%s\" unmounted.\n"), + dev->print_name()); + } else { + dir->fsend(_("3907 %s"), dev->bstrerror()); + } + } else { + Dmsg0(90, "Device already unmounted\n"); + dir->fsend(_("3901 Device \"%s\" is already unmounted.\n"), + dev->print_name()); + } + } else if (dev->blocked() == BST_WAITING_FOR_SYSOP) { + Dmsg2(90, "%d waiter dev_block=%d. doing unmount\n", dev->num_waiting, + dev->blocked()); + if (!unload_autochanger(dcr, -1)) { + /* + * ***FIXME**** what is this ???? -- probably we had + * the wrong volume so we must free it and try again. KES + */ + dev->close(dcr); + free_volume(dev); + } + if (dev->is_unmountable() && !dev->unmount(0)) { + dir->fsend(_("3907 %s"), dev->bstrerror()); + } else { + dev->set_blocked(BST_UNMOUNTED_WAITING_FOR_SYSOP); + dir->fsend(_("3001 Device \"%s\" unmounted.\n"), + dev->print_name()); + } + + } else if (dev->blocked() == BST_DOING_ACQUIRE) { + dir->fsend(_("3902 Device \"%s\" is busy in acquire.\n"), + dev->print_name()); + + } else if (dev->blocked() == BST_WRITING_LABEL) { + dir->fsend(_("3903 Device \"%s\" is being labeled.\n"), + dev->print_name()); + + } else if (dev->is_busy()) { + send_dir_busy_message(dir, dev); + } else { /* device not being used */ + Dmsg0(90, "Device not in use, unmounting\n"); + /* On FreeBSD, I am having ASSERT() failures in block_device() + * and I can only imagine that the thread id that we are + * leaving in no_wait_id is being re-used. So here, + * we simply do it by hand. Gross, but a solution. + */ + /* block_device(dev, BST_UNMOUNTED); replace with 2 lines below */ + dev->set_blocked(BST_UNMOUNTED); + clear_thread_id(dev->no_wait_id); + if (!unload_autochanger(dcr, -1)) { + dev->close(dcr); + free_volume(dev); + } + if (dev->is_unmountable() && !dev->unmount(0)) { + dir->fsend(_("3907 %s"), dev->bstrerror()); + } else { + dir->fsend(_("3002 Device \"%s\" unmounted.\n"), + dev->print_name()); + } + } + dev->Unlock(); + free_dcr(dcr); + } else { + dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); + } + } else { + /* NB dir->msg gets clobbered in bnet_fsend, so save command */ + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3907 Error scanning unmount command: %s\n"), jcr->errmsg); + } + dir->signal(BNET_EOD); + return true; +} + +#if 0 +/* + * The truncate command will recycle a volume. The director can call this + * after purging a volume so that disk space will not be wasted. Only useful + * for File Storage, of course. + * + * + * It is currently disabled + */ +static bool action_on_purge_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + + char devname[MAX_NAME_LENGTH]; + char volumename[MAX_NAME_LENGTH]; + int32_t action; + + /* TODO: Need to find a free device and ask for slot to the director */ + if (sscanf(dir->msg, + "action_on_purge %127s vol=%127s action=%d", + devname, volumename, &action)!= 5) + { + dir->fsend(_("3916 Error scanning action_on_purge command\n")); + goto done; + } + unbash_spaces(volumename); + unbash_spaces(devname); + + /* Check if action is correct */ + if (action & AOP_TRUNCTATE) { + + } + /* ... */ + +done: + dir->signal(BNET_EOD); + return true; +} +#endif + +/* + * Release command from Director. This rewinds the device and if + * configured does a offline and ensures that Bacula will + * re-read the label of the tape before continuing. This gives + * the operator the chance to change the tape anytime before the + * next job starts. + */ +static bool release_cmd(JCR *jcr) +{ + POOL_MEM devname; + BSOCK *dir = jcr->dir_bsock; + DEVICE *dev; + DCR *dcr; + int32_t drive; + + if (sscanf(dir->msg, "release %127s drive=%d", devname.c_str(), &drive) == 2) { + dcr = find_device(jcr, devname, NULL, drive); + if (dcr) { + dev = dcr->dev; + dev->Lock(); /* Use P to avoid indefinite block */ + if (!dev->is_open()) { + if (!dev->is_busy()) { + unload_autochanger(dcr, -1); + } + Dmsg0(90, "Device already released\n"); + dir->fsend(_("3921 Device \"%s\" already released.\n"), + dev->print_name()); + + } else if (dev->blocked() == BST_WAITING_FOR_SYSOP) { + Dmsg2(90, "%d waiter dev_block=%d.\n", dev->num_waiting, + dev->blocked()); + unload_autochanger(dcr, -1); + dir->fsend(_("3922 Device \"%s\" waiting for sysop.\n"), + dev->print_name()); + + } else if (dev->blocked() == BST_UNMOUNTED_WAITING_FOR_SYSOP) { + Dmsg2(90, "%d waiter dev_block=%d. doing unmount\n", dev->num_waiting, + dev->blocked()); + dir->fsend(_("3922 Device \"%s\" waiting for mount.\n"), + dev->print_name()); + + } else if (dev->blocked() == BST_DOING_ACQUIRE) { + dir->fsend(_("3923 Device \"%s\" is busy in acquire.\n"), + dev->print_name()); + + } else if (dev->blocked() == BST_WRITING_LABEL) { + dir->fsend(_("3914 Device \"%s\" is being labeled.\n"), + dev->print_name()); + + } else if (dev->is_busy()) { + send_dir_busy_message(dir, dev); + } else { /* device not being used */ + Dmsg0(90, "Device not in use, releasing\n"); + dcr->release_volume(); + dir->fsend(_("3022 Device \"%s\" released.\n"), + dev->print_name()); + } + dev->Unlock(); + free_dcr(dcr); + } else { + dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); + } + } else { + /* NB dir->msg gets clobbered in bnet_fsend, so save command */ + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3927 Error scanning release command: %s\n"), jcr->errmsg); + } + dir->signal(BNET_EOD); + return true; +} + +static pthread_mutex_t bsr_mutex = PTHREAD_MUTEX_INITIALIZER; +static uint32_t bsr_uniq = 0; + +static bool get_bootstrap_file(JCR *jcr, BSOCK *sock) +{ + POOLMEM *fname = get_pool_memory(PM_FNAME); + FILE *bs; + bool ok = false; + + if (jcr->RestoreBootstrap) { + unlink(jcr->RestoreBootstrap); + free_pool_memory(jcr->RestoreBootstrap); + } + P(bsr_mutex); + bsr_uniq++; + Mmsg(fname, "%s/%s.%s.%d.bootstrap", me->working_directory, me->hdr.name, + jcr->Job, bsr_uniq); + V(bsr_mutex); + Dmsg1(400, "bootstrap=%s\n", fname); + jcr->RestoreBootstrap = fname; + bs = bfopen(fname, "a+b"); /* create file */ + if (!bs) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("[SF0110] Could not create bootstrap file %s: ERR=%s\n"), + jcr->RestoreBootstrap, be.bstrerror()); + goto bail_out; + } + Dmsg0(150, "=== Bootstrap file ===\n"); + while (sock->recv() >= 0) { + Dmsg1(150, "%s", sock->msg); + fputs(sock->msg, bs); + } + fclose(bs); + Dmsg0(150, "=== end bootstrap file ===\n"); + jcr->bsr = parse_bsr(jcr, jcr->RestoreBootstrap); + if (!jcr->bsr) { + Jmsg(jcr, M_FATAL, 0, _("[SF0111] Error parsing bootstrap file.\n")); + goto bail_out; + } + if (chk_dbglvl(150)) { + dump_bsr(NULL, jcr->bsr, true); + } + + /* If we got a bootstrap, we are reading, so create read volume list */ + create_restore_volume_list(jcr, true /* store the volumes in the global vol_read list */); + ok = true; + +bail_out: + unlink(jcr->RestoreBootstrap); + free_pool_memory(jcr->RestoreBootstrap); + jcr->RestoreBootstrap = NULL; + if (!ok) { + sock->fsend(ERROR_bootstrap); + return false; + } + return sock->fsend(OK_bootstrap); +} + +static bool bootstrap_cmd(JCR *jcr) +{ + return get_bootstrap_file(jcr, jcr->dir_bsock); +} + +/* + * Autochanger command from Director + */ +static bool changer_cmd(JCR *jcr) +{ + POOL_MEM devname; + BSOCK *dir = jcr->dir_bsock; + DEVICE *dev; + DCR *dcr; + const char *cmd = NULL; + bool ok = false; + /* + * A safe_cmd may call autochanger script but does not load/unload + * slots so it can be done at the same time that the drive is open. + */ + bool safe_cmd = false; + + if (sscanf(dir->msg, "autochanger listall %127s", devname.c_str()) == 1) { + cmd = "listall"; + safe_cmd = ok = true; + } else if (sscanf(dir->msg, "autochanger list %127s", devname.c_str()) == 1) { + cmd = "list"; + safe_cmd = ok = true; + } else if (sscanf(dir->msg, "autochanger slots %127s", devname.c_str()) == 1) { + cmd = "slots"; + safe_cmd = ok = true; + } else if (sscanf(dir->msg, "autochanger drives %127s", devname.c_str()) == 1) { + cmd = "drives"; + safe_cmd = ok = true; + } + if (ok) { + dcr = find_device(jcr, devname, NULL, -1); + if (dcr) { + dev = dcr->dev; + dev->Lock(); /* Use P to avoid indefinite block */ + if (!dev->device->changer_res) { + dir->fsend(_("3998 Device \"%s\" is not an autochanger.\n"), + dev->print_name()); + /* Under certain "safe" conditions, we can steal the lock */ + } else if (safe_cmd || !dev->is_open() || dev->can_obtain_block()) { + autochanger_cmd(dcr, dir, cmd); + } else if (dev->is_busy() || dev->is_blocked()) { + send_dir_busy_message(dir, dev); + } else { /* device not being used */ + autochanger_cmd(dcr, dir, cmd); + } + dev->Unlock(); + free_dcr(dcr); + } else { + dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); + } + } else { /* error on scanf */ + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3909 Error scanning autochanger drives/list/slots command: %s\n"), + jcr->errmsg); + } + dir->signal(BNET_EOD); + return true; +} + +/* + * Read and return the Volume label + */ +static bool readlabel_cmd(JCR *jcr) +{ + POOL_MEM devname; + BSOCK *dir = jcr->dir_bsock; + DEVICE *dev; + DCR *dcr; + int32_t Slot, drive; + + if (sscanf(dir->msg, "readlabel %127s Slot=%d drive=%d", devname.c_str(), + &Slot, &drive) == 3) { + dcr = find_device(jcr, devname, NULL, drive); + if (dcr) { + dev = dcr->dev; + dev->Lock(); /* Use P to avoid indefinite block */ + if (!dev->is_open()) { + read_volume_label(jcr, dcr, dev, Slot); + dev->close(dcr); + /* Under certain "safe" conditions, we can steal the lock */ + } else if (dev->can_obtain_block()) { + read_volume_label(jcr, dcr, dev, Slot); + } else if (dev->is_busy() || dev->is_blocked()) { + send_dir_busy_message(dir, dev); + } else { /* device not being used */ + read_volume_label(jcr, dcr, dev, Slot); + } + dev->Unlock(); + free_dcr(dcr); + } else { + dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); + } + } else { + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3909 Error scanning readlabel command: %s\n"), jcr->errmsg); + } + dir->signal(BNET_EOD); + return true; +} + + +/* + * Read the tape label + * + * Enter with the mutex set + */ +static void read_volume_label(JCR *jcr, DCR *dcr, DEVICE *dev, int Slot) +{ + BSOCK *dir = jcr->dir_bsock; + bsteal_lock_t hold; + + dcr->set_dev(dev); + if (!obtain_device_block(dev, + &hold, + 1 /* one try */, + BST_WRITING_LABEL)) { + send_dir_busy_message(dir, dev); + return; + } + dev->Unlock(); + + if (!try_autoload_device(jcr, dcr, Slot, "")) { + goto bail_out; /* error */ + } + + dev->clear_labeled(); /* force read of label */ + switch (dev->read_dev_volume_label(dcr)) { + case VOL_OK: + /* DO NOT add quotes around the Volume name. It is scanned in the DIR */ + dir->fsend(_("3001 Volume=%s Slot=%d\n"), dev->VolHdr.VolumeName, Slot); + Dmsg1(100, "Volume: %s\n", dev->VolHdr.VolumeName); + break; + default: + dir->fsend(_("3902 Cannot mount Volume on Storage Device \"%s\" because:\n%s"), + dev->print_name(), jcr->errmsg); + break; + } + +bail_out: + dev->Lock(); + give_back_device_block(dev, &hold); + return; +} + +static bool try_autoload_device(JCR *jcr, DCR *dcr, int slot, const char *VolName) +{ + BSOCK *dir = jcr->dir_bsock; + + bstrncpy(dcr->VolumeName, VolName, sizeof(dcr->VolumeName)); + dcr->VolCatInfo.Slot = slot; + dcr->VolCatInfo.InChanger = slot > 0; + if (autoload_device(dcr, 0, dir) < 0) { /* autoload if possible */ + return false; + } + return true; +} + +static void send_dir_busy_message(BSOCK *dir, DEVICE *dev) +{ + if (dev->is_blocked()) { + switch (dev->blocked()) { + case BST_UNMOUNTED: + dir->fsend(_("3931 Device \"%s\" is BLOCKED. user unmounted.\n"), + dev->print_name()); + break; + case BST_UNMOUNTED_WAITING_FOR_SYSOP: + dir->fsend(_("3932 Device \"%s\" is BLOCKED. user unmounted during wait for media/mount.\n"), + dev->print_name()); + break; + case BST_WAITING_FOR_SYSOP: + dir->fsend(_("3933 Device \"%s\" is BLOCKED waiting for media.\n"), + dev->print_name()); + break; + case BST_DOING_ACQUIRE: + dir->fsend(_("3934 Device \"%s\" is being initialized.\n"), + dev->print_name()); + break; + case BST_WRITING_LABEL: + dir->fsend(_("3935 Device \"%s\" is blocked labeling a Volume.\n"), + dev->print_name()); + break; + default: + dir->fsend(_("3935 Device \"%s\" is blocked for unknown reason.\n"), + dev->print_name()); + break; + } + } else if (dev->can_read()) { + dir->fsend(_("3936 Device \"%s\" is busy reading.\n"), + dev->print_name());; + } else { + dir->fsend(_("3937 Device \"%s\" is busy with writers=%d reserved=%d.\n"), + dev->print_name(), dev->num_writers, dev->num_reserved()); + } +} diff --git a/src/stored/ebcdic.c b/src/stored/ebcdic.c new file mode 100644 index 00000000..eddcddc9 --- /dev/null +++ b/src/stored/ebcdic.c @@ -0,0 +1,182 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Taken from the public domain ansitape program for + * integration into Bacula. KES - Mar 2005 + */ + + +/* Mapping of EBCDIC codes to ASCII equivalents. */ +static char to_ascii_table[256] = { + '\000', '\001', '\002', '\003', + '\234', '\011', '\206', '\177', + '\227', '\215', '\216', '\013', + '\014', '\015', '\016', '\017', + '\020', '\021', '\022', '\023', + '\235', '\205', '\010', '\207', + '\030', '\031', '\222', '\217', + '\034', '\035', '\036', '\037', + '\200', '\201', '\202', '\203', + '\204', '\012', '\027', '\033', + '\210', '\211', '\212', '\213', + '\214', '\005', '\006', '\007', + '\220', '\221', '\026', '\223', + '\224', '\225', '\226', '\004', + '\230', '\231', '\232', '\233', + '\024', '\025', '\236', '\032', + '\040', '\240', '\241', '\242', + '\243', '\244', '\245', '\246', + '\247', '\250', '\133', '\056', + '\074', '\050', '\053', '\041', + '\046', '\251', '\252', '\253', + '\254', '\255', '\256', '\257', + '\260', '\261', '\135', '\044', + '\052', '\051', '\073', '\136', + '\055', '\057', '\262', '\263', + '\264', '\265', '\266', '\267', + '\270', '\271', '\174', '\054', + '\045', '\137', '\076', '\077', + '\272', '\273', '\274', '\275', + '\276', '\277', '\300', '\301', + '\302', '\140', '\072', '\043', + '\100', '\047', '\075', '\042', + '\303', '\141', '\142', '\143', + '\144', '\145', '\146', '\147', + '\150', '\151', '\304', '\305', + '\306', '\307', '\310', '\311', + '\312', '\152', '\153', '\154', + '\155', '\156', '\157', '\160', + '\161', '\162', '\313', '\314', + '\315', '\316', '\317', '\320', + '\321', '\176', '\163', '\164', + '\165', '\166', '\167', '\170', + '\171', '\172', '\322', '\323', + '\324', '\325', '\326', '\327', + '\330', '\331', '\332', '\333', + '\334', '\335', '\336', '\337', + '\340', '\341', '\342', '\343', + '\344', '\345', '\346', '\347', + '\173', '\101', '\102', '\103', + '\104', '\105', '\106', '\107', + '\110', '\111', '\350', '\351', + '\352', '\353', '\354', '\355', + '\175', '\112', '\113', '\114', + '\115', '\116', '\117', '\120', + '\121', '\122', '\356', '\357', + '\360', '\361', '\362', '\363', + '\134', '\237', '\123', '\124', + '\125', '\126', '\127', '\130', + '\131', '\132', '\364', '\365', + '\366', '\367', '\370', '\371', + '\060', '\061', '\062', '\063', + '\064', '\065', '\066', '\067', + '\070', '\071', '\372', '\373', + '\374', '\375', '\376', '\377' +}; + + +/* Mapping of ASCII codes to EBCDIC equivalents. */ +static char to_ebcdic_table[256] = { + '\000', '\001', '\002', '\003', + '\067', '\055', '\056', '\057', + '\026', '\005', '\045', '\013', + '\014', '\015', '\016', '\017', + '\020', '\021', '\022', '\023', + '\074', '\075', '\062', '\046', + '\030', '\031', '\077', '\047', + '\034', '\035', '\036', '\037', + '\100', '\117', '\177', '\173', + '\133', '\154', '\120', '\175', + '\115', '\135', '\134', '\116', + '\153', '\140', '\113', '\141', + '\360', '\361', '\362', '\363', + '\364', '\365', '\366', '\367', + '\370', '\371', '\172', '\136', + '\114', '\176', '\156', '\157', + '\174', '\301', '\302', '\303', + '\304', '\305', '\306', '\307', + '\310', '\311', '\321', '\322', + '\323', '\324', '\325', '\326', + '\327', '\330', '\331', '\342', + '\343', '\344', '\345', '\346', + '\347', '\350', '\351', '\112', + '\340', '\132', '\137', '\155', + '\171', '\201', '\202', '\203', + '\204', '\205', '\206', '\207', + '\210', '\211', '\221', '\222', + '\223', '\224', '\225', '\226', + '\227', '\230', '\231', '\242', + '\243', '\244', '\245', '\246', + '\247', '\250', '\251', '\300', + '\152', '\320', '\241', '\007', + '\040', '\041', '\042', '\043', + '\044', '\025', '\006', '\027', + '\050', '\051', '\052', '\053', + '\054', '\011', '\012', '\033', + '\060', '\061', '\032', '\063', + '\064', '\065', '\066', '\010', + '\070', '\071', '\072', '\073', + '\004', '\024', '\076', '\341', + '\101', '\102', '\103', '\104', + '\105', '\106', '\107', '\110', + '\111', '\121', '\122', '\123', + '\124', '\125', '\126', '\127', + '\130', '\131', '\142', '\143', + '\144', '\145', '\146', '\147', + '\150', '\151', '\160', '\161', + '\162', '\163', '\164', '\165', + '\166', '\167', '\170', '\200', + '\212', '\213', '\214', '\215', + '\216', '\217', '\220', '\232', + '\233', '\234', '\235', '\236', + '\237', '\240', '\252', '\253', + '\254', '\255', '\256', '\257', + '\260', '\261', '\262', '\263', + '\264', '\265', '\266', '\267', + '\270', '\271', '\272', '\273', + '\274', '\275', '\276', '\277', + '\312', '\313', '\314', '\315', + '\316', '\317', '\332', '\333', + '\334', '\335', '\336', '\337', + '\352', '\353', '\354', '\355', + '\356', '\357', '\372', '\373', + '\374', '\375', '\376', '\377' +}; + + +/* + * Convert from ASCII to EBCDIC + */ +void ascii_to_ebcdic(char *dst, char *src, int count) +{ + while (count--) { + *dst++ = to_ebcdic_table[0377 & *src++]; + } +} + + +/* + * Convert from EBCDIC to ASCII + */ +void ebcdic_to_ascii(char *dst, char *src, int count) +{ + while (count--) { + *dst++ = to_ascii_table[0377 & *src++]; + } +} diff --git a/src/stored/fd_cmds.c b/src/stored/fd_cmds.c new file mode 100644 index 00000000..04e253cf --- /dev/null +++ b/src/stored/fd_cmds.c @@ -0,0 +1,501 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * This file handles commands from the File daemon. + * + * Written by Kern Sibbald, MM + * + * We get here because the Director has initiated a Job with + * the Storage daemon, then done the same with the File daemon, + * then when the Storage daemon receives a proper connection from + * the File daemon, control is passed here to handle the + * subsequent File daemon commands. + */ + +#include "bacula.h" +#include "stored.h" + +/* Forward referenced functions */ +static bool response(JCR *jcr, BSOCK *bs, const char *resp, const char *cmd); + +/* Imported variables */ +extern STORES *me; + +/* Static variables */ +static char ferrmsg[] = "3900 Invalid command\n"; +static char OK_data[] = "3000 OK data\n"; + +/* Imported functions */ +extern bool do_append_data(JCR *jcr); +extern bool do_read_data(JCR *jcr); +extern bool do_backup_job(JCR *jcr); + +/* Forward referenced FD commands */ +static bool append_open_session(JCR *jcr); +static bool append_close_session(JCR *jcr); +static bool append_data_cmd(JCR *jcr); +static bool append_end_session(JCR *jcr); +static bool read_open_session(JCR *jcr); +static bool read_data_cmd(JCR *jcr); +static bool read_close_session(JCR *jcr); +static bool read_control_cmd(JCR *jcr); +static bool sd_testnetwork_cmd(JCR *jcr); + +/* Exported function */ +bool get_bootstrap_file(JCR *jcr, BSOCK *bs); + +struct s_cmds { + const char *cmd; + bool (*func)(JCR *jcr); +}; + +/* + * The following are the recognized commands from the File daemon + */ +static struct s_cmds fd_cmds[] = { + {"append open", append_open_session}, + {"append data", append_data_cmd}, + {"append end", append_end_session}, + {"append close", append_close_session}, + {"read open", read_open_session}, + {"read data", read_data_cmd}, + {"read close", read_close_session}, + {"read control", read_control_cmd}, + {"testnetwork", sd_testnetwork_cmd}, + {NULL, NULL} /* list terminator */ +}; + +/* Commands from the File daemon that require additional scanning */ +static char read_open[] = "read open session = %127s %ld %ld %ld %ld %ld %ld\n"; + +/* Responses sent to the File daemon */ +static char NO_open[] = "3901 Error session already open\n"; +static char NOT_opened[] = "3902 Error session not opened\n"; +static char ERROR_open[] = "3904 Error open session, bad parameters\n"; +static char OK_end[] = "3000 OK end\n"; +static char OK_close[] = "3000 OK close Status = %d\n"; +static char OK_open[] = "3000 OK open ticket = %d\n"; +static char ERROR_append[] = "3903 Error append data: %s\n"; + +/* Information sent to the Director */ +static char Job_start[] = "3010 Job %s start\n"; +char Job_end[] = + "3099 Job %s end JobStatus=%d JobFiles=%d JobBytes=%s JobErrors=%u ErrMsg=%s\n"; + +/* + * Run a Client Job -- Client already authorized + * Note: this can be either a backup or restore or + * migrate/copy job. + * + * Basic task here is: + * - Read a command from the Client -- FD or SD + * - Execute it + * + */ +void run_job(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + char ec1[30]; + + dir->set_jcr(jcr); + Dmsg1(120, "Start run Job=%s\n", jcr->Job); + dir->fsend(Job_start, jcr->Job); + jcr->start_time = time(NULL); + jcr->run_time = jcr->start_time; + jcr->sendJobStatus(JS_Running); + + /* TODO: Remove when the new match_all is well tested */ + jcr->use_new_match_all = use_new_match_all; + /* + * A migrate or copy job does both a restore (read_data) and + * a backup (append_data). + * Otherwise we do the commands that the client sends + * which are for normal backup or restore jobs. + */ + Dmsg3(050, "==== JobType=%c run_job=%d sd_client=%d\n", jcr->getJobType(), jcr->JobId, jcr->sd_client); + if (jcr->is_JobType(JT_BACKUP) && jcr->sd_client) { + jcr->session_opened = true; + Dmsg0(050, "Do: receive for 3000 OK data then append\n"); + if (!response(jcr, jcr->file_bsock, "3000 OK data\n", "Append data")) { + Dmsg1(050, "Expect: 3000 OK data, got: %s", jcr->file_bsock->msg); + Jmsg0(jcr, M_FATAL, 0, "Append data not accepted\n"); + goto bail_out; + } + append_data_cmd(jcr); + append_end_session(jcr); + } else if (jcr->is_JobType(JT_MIGRATE) || jcr->is_JobType(JT_COPY)) { + jcr->session_opened = true; + /* + * Send "3000 OK data" now to avoid a dead lock, the other side is also + * waiting for one. The old code was reading the "3000 OK" reply + * at the end of the backup (not really appropriate). + * dedup needs duplex communication with the other side and needs the + * "3000 OK" to be read, which is handled here by the code below. + */ + Dmsg0(215, "send OK_data\n"); + jcr->file_bsock->fsend(OK_data); + jcr->is_ok_data_sent = true; + Dmsg1(050, "Do: read_data_cmd file_bsock=%p\n", jcr->file_bsock); + Dmsg0(050, "Do: receive for 3000 OK data then read\n"); + if (!response(jcr, jcr->file_bsock, "3000 OK data\n", "Data received")) { + Dmsg1(050, "Expect 3000 OK data, got: %s", jcr->file_bsock->msg); + Jmsg0(jcr, M_FATAL, 0, "Read data not accepted\n"); + jcr->file_bsock->signal(BNET_EOD); + goto bail_out; + } + read_data_cmd(jcr); + jcr->file_bsock->signal(BNET_EOD); + } else { + /* Either a Backup or Restore job */ + Dmsg0(050, "Do: do_client_commands\n"); + do_client_commands(jcr); + } +bail_out: + jcr->end_time = time(NULL); + flush_jobmedia_queue(jcr); + dequeue_messages(jcr); /* send any queued messages */ + jcr->setJobStatus(JS_Terminated); + generate_daemon_event(jcr, "JobEnd"); + generate_plugin_event(jcr, bsdEventJobEnd); + bash_spaces(jcr->StatusErrMsg); + dir->fsend(Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, + edit_uint64(jcr->JobBytes, ec1), jcr->JobErrors, jcr->StatusErrMsg); + Dmsg1(100, "==== %s", dir->msg); + unbash_spaces(jcr->StatusErrMsg); + dequeue_daemon_messages(jcr); + dir->signal(BNET_EOD); /* send EOD to Director daemon */ + free_plugins(jcr); /* release instantiated plugins */ + garbage_collect_memory_pool(); + return; +} + +/* + * Now talk to the Client (FD/SD) and do what he says + */ +void do_client_commands(JCR *jcr) +{ + int i; + bool found, quit; + BSOCK *fd = jcr->file_bsock; + + if (!fd) { + return; + } + fd->set_jcr(jcr); + for (quit=false; !quit;) { + int stat; + + /* Read command coming from the File daemon */ + stat = fd->recv(); + if (fd->is_stop()) { /* hard eof or error */ + break; /* connection terminated */ + } + if (stat <= 0) { + continue; /* ignore signals and zero length msgs */ + } + Dmsg1(110, "msg); + found = false; + for (i=0; fd_cmds[i].cmd; i++) { + if (strncmp(fd_cmds[i].cmd, fd->msg, strlen(fd_cmds[i].cmd)) == 0) { + found = true; /* indicate command found */ + jcr->errmsg[0] = 0; + if (!fd_cmds[i].func(jcr)) { /* do command */ + /* Note fd->msg command may be destroyed by comm activity */ + if (!job_canceled(jcr)) { + strip_trailing_junk(fd->msg); + if (jcr->errmsg[0]) { + strip_trailing_junk(jcr->errmsg); + Jmsg2(jcr, M_FATAL, 0, _("Command error with FD msg=\"%s\", SD hanging up. ERR=%s\n"), + fd->msg, jcr->errmsg); + } else { + Jmsg1(jcr, M_FATAL, 0, _("Command error with FD msg=\"%s\", SD hanging up.\n"), + fd->msg); + } + jcr->setJobStatus(JS_ErrorTerminated); + } + quit = true; + } + break; + } + } + if (!found) { /* command not found */ + if (!job_canceled(jcr)) { + Jmsg1(jcr, M_FATAL, 0, _("FD command not found: %s\n"), fd->msg); + Dmsg1(110, "msg); + } + fd->fsend(ferrmsg); + break; + } + } + fd->signal(BNET_TERMINATE); /* signal to FD job is done */ +} + +/* + * Append Data command + * Open Data Channel and receive Data for archiving + * Write the Data to the archive device + */ +static bool append_data_cmd(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + + Dmsg1(120, "Append data: %s", fd->msg); + if (jcr->session_opened) { + Dmsg1(110, "msg); + jcr->setJobType(JT_BACKUP); + jcr->errmsg[0] = 0; + if (do_append_data(jcr)) { + return true; + } else { + fd->suppress_error_messages(true); /* ignore errors at this point */ + fd->fsend(ERROR_append, jcr->errmsg); + } + } else { + pm_strcpy(jcr->errmsg, _("Attempt to append on non-open session.\n")); + fd->fsend(NOT_opened); + } + return false; +} + +static bool append_end_session(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + + Dmsg1(120, "storemsg); + if (!jcr->session_opened) { + pm_strcpy(jcr->errmsg, _("Attempt to close non-open session.\n")); + fd->fsend(NOT_opened); + return false; + } + return fd->fsend(OK_end); +} + +/* + * Test the FD/SD connectivity + */ +static bool sd_testnetwork_cmd(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + int64_t nb=0; + bool can_compress, ok=true; + + if (sscanf(fd->msg, "testnetwork bytes=%lld", &nb) != 1) { + return false; + } + /* We disable the comline compression for this test */ + can_compress = fd->can_compress(); + fd->clear_compress(); + + /* First, get data from the FD */ + while (fd->recv() > 0) { } + + /* Then, send back data to the FD */ + memset(fd->msg, 0xBB, sizeof_pool_memory(fd->msg)); + fd->msglen = sizeof_pool_memory(fd->msg); + + while(nb > 0 && ok) { + if (nb < fd->msglen) { + fd->msglen = nb; + } + ok = fd->send(); + nb -= fd->msglen; + } + fd->signal(BNET_EOD); + + if (can_compress) { + fd->set_compress(); + } + return true; +} + +/* + * Append Open session command + * + */ +static bool append_open_session(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + + Dmsg1(120, "Append open session: %s", fd->msg); + if (jcr->session_opened) { + pm_strcpy(jcr->errmsg, _("Attempt to open already open session.\n")); + fd->fsend(NO_open); + return false; + } + + jcr->session_opened = true; + + /* Send "Ticket" to File Daemon */ + fd->fsend(OK_open, jcr->VolSessionId); + Dmsg1(110, ">filed: %s", fd->msg); + + return true; +} + +/* + * Append Close session command + * Close the append session and send back Statistics + * (need to fix statistics) + */ +static bool append_close_session(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + + Dmsg1(120, "msg); + if (!jcr->session_opened) { + pm_strcpy(jcr->errmsg, _("Attempt to close non-open session.\n")); + fd->fsend(NOT_opened); + return false; + } + /* Send final statistics to File daemon */ + fd->fsend(OK_close, jcr->JobStatus); + Dmsg1(120, ">filed: %s", fd->msg); + + fd->signal(BNET_EOD); /* send EOD to File daemon */ + + jcr->session_opened = false; + return true; +} + +/* + * Read Data command + * Open Data Channel, read the data from + * the archive device and send to File + * daemon. + */ +static bool read_data_cmd(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + + Dmsg1(120, "Read data: %s", fd->msg); + if (jcr->session_opened) { + Dmsg1(120, "msg); + return do_read_data(jcr); + } else { + pm_strcpy(jcr->errmsg, _("Attempt to read on non-open session.\n")); + fd->fsend(NOT_opened); + return false; + } +} + +/* + * Read Open session command + * + * We need to scan for the parameters of the job + * to be restored. + */ +static bool read_open_session(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + + Dmsg1(120, "%s", fd->msg); + if (jcr->session_opened) { + pm_strcpy(jcr->errmsg, _("Attempt to open an already open session.\n")); + fd->fsend(NO_open); + return false; + } + + if (sscanf(fd->msg, read_open, jcr->read_dcr->VolumeName, &jcr->read_VolSessionId, + &jcr->read_VolSessionTime, &jcr->read_StartFile, &jcr->read_EndFile, + &jcr->read_StartBlock, &jcr->read_EndBlock) == 7) { + Dmsg4(100, "read_open_session got: JobId=%d Vol=%s VolSessId=%ld VolSessT=%ld\n", + jcr->JobId, jcr->read_dcr->VolumeName, jcr->read_VolSessionId, + jcr->read_VolSessionTime); + Dmsg4(100, " StartF=%ld EndF=%ld StartB=%ld EndB=%ld\n", + jcr->read_StartFile, jcr->read_EndFile, jcr->read_StartBlock, + jcr->read_EndBlock); + + } else { + pm_strcpy(jcr->errmsg, _("Cannot open session, received bad parameters.\n")); + fd->fsend(ERROR_open); + return false; + } + + jcr->session_opened = true; + jcr->setJobType(JT_RESTORE); + + /* Send "Ticket" to File Daemon */ + fd->fsend(OK_open, jcr->VolSessionId); + Dmsg1(110, ">filed: %s", fd->msg); + + return true; +} + +static bool read_control_cmd(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + + Dmsg1(120, "Read control: %s\n", fd->msg); + if (!jcr->session_opened) { + fd->fsend(NOT_opened); + return false; + } + jcr->interactive_session = true; + return true; +} + +/* + * Read Close session command + * Close the read session + */ +static bool read_close_session(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + + Dmsg1(120, "Read close session: %s\n", fd->msg); + if (!jcr->session_opened) { + fd->fsend(NOT_opened); + return false; + } + /* Send final close msg to File daemon */ + fd->fsend(OK_close, jcr->JobStatus); + Dmsg1(160, ">filed: %s\n", fd->msg); + + fd->signal(BNET_EOD); /* send EOD to File daemon */ + + jcr->session_opened = false; + return true; +} + +/* + * Get response from FD or SD + * sent. Check that the response agrees with what we expect. + * + * Returns: false on failure + * true on success + */ +static bool response(JCR *jcr, BSOCK *bs, const char *resp, const char *cmd) +{ + int n; + + if (bs->is_error()) { + return false; + } + if ((n = bs->recv()) >= 0) { + if (strcmp(bs->msg, resp) == 0) { + return true; + } + Jmsg(jcr, M_FATAL, 0, _("Bad response to %s command: wanted %s, got %s\n"), + cmd, resp, bs->msg); + return false; + } + Jmsg(jcr, M_FATAL, 0, _("Socket error on %s command: ERR=%s\n"), + cmd, bs->bstrerror()); + return false; +} diff --git a/src/stored/fifo_dev.c b/src/stored/fifo_dev.c new file mode 100644 index 00000000..6143a521 --- /dev/null +++ b/src/stored/fifo_dev.c @@ -0,0 +1,51 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * fifo_dev.c -- low level operations on fifo devices + * + * written by, Kern Sibbald, MM + * separated from file_dev.c January 2016f4 + * + */ + +#include "bacula.h" +#include "stored.h" + +bool fifo_dev::open_device(DCR *dcr, int omode) +{ + return tape_dev::open_device(dcr, omode); +} + +boffset_t fifo_dev::lseek(DCR *dcr, boffset_t offset, int whence) +{ + /* Cannot seek */ + return 0; +} + +bool fifo_dev::truncate(DCR *dcr) +{ + /* Cannot truncate */ + return true; +} + +const char *fifo_dev::print_type() +{ + return "FIFO"; +} diff --git a/src/stored/fifo_dev.h b/src/stored/fifo_dev.h new file mode 100644 index 00000000..72c6beac --- /dev/null +++ b/src/stored/fifo_dev.h @@ -0,0 +1,39 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Modified version of tape_dev.h + */ + +#ifndef __FIFO_DEV_ +#define __FIFO_DEV_ + +class fifo_dev : public tape_dev { +public: + + fifo_dev() { }; + ~fifo_dev() { }; + + /* DEVICE virtual functions that we redefine with our fifo code */ + bool open_device(DCR *dcr, int omode); + boffset_t lseek(DCR *dcr, boffset_t offset, int whence); + bool truncate(DCR *dcr); + const char *print_type(); +}; + +#endif /* __FIFO_DEV_ */ diff --git a/src/stored/file_dev.c b/src/stored/file_dev.c new file mode 100644 index 00000000..c3d536ec --- /dev/null +++ b/src/stored/file_dev.c @@ -0,0 +1,541 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * file_dev.c -- low level operations on file devices + * + * written by, Kern Sibbald, MM + * separated from dev.c February 2014 + * + */ + +#include "bacula.h" +#include "stored.h" + +static const int dbglvl = 100; + +/* Imported functions */ +const char *mode_to_str(int mode); + + +/* default primitives are designed for file */ +int DEVICE::d_open(const char *pathname, int flags) +{ + return ::open(pathname, flags | O_CLOEXEC); +} + +int DEVICE::d_close(int fd) +{ + return ::close(fd); +} + +int DEVICE::d_ioctl(int fd, ioctl_req_t request, char *mt_com) +{ +#ifdef HAVE_WIN32 + return -1; +#else + return ::ioctl(fd, request, mt_com); +#endif +} + +ssize_t DEVICE::d_read(int fd, void *buffer, size_t count) +{ + return ::read(fd, buffer, count); +} + +ssize_t DEVICE::d_write(int fd, const void *buffer, size_t count) +{ + return ::write(fd, buffer, count); +} + +/* Rewind file device */ +bool DEVICE::rewind(DCR *dcr) + +{ + Enter(dbglvl); + Dmsg3(400, "rewind res=%d fd=%d %s\n", num_reserved(), m_fd, print_name()); + state &= ~(ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ + block_num = file = 0; + file_size = 0; + file_addr = 0; + if (m_fd < 0) { + Mmsg1(errmsg, _("Rewind failed: device %s is not open.\n"), print_name()); + return false; + } + if (is_file()) { + if (lseek(dcr, (boffset_t)0, SEEK_SET) < 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + return false; + } + } + return true; +} + +/* + * Reposition the device to file, block + * Returns: false on failure + * true on success + */ +bool DEVICE::reposition(DCR *dcr, uint64_t raddr) +{ + if (!is_open()) { + dev_errno = EBADF; + Mmsg0(errmsg, _("Bad call to reposition. Device not open\n")); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + Dmsg1(100, "===== lseek to %llu\n", raddr); + if (lseek(dcr, (boffset_t)raddr, SEEK_SET) == (boffset_t)-1) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + return false; + } + file_addr = raddr; + return true; +} + + +/* Seek to specified place */ +boffset_t DEVICE::lseek(DCR *dcr, boffset_t offset, int whence) +{ +#if defined(HAVE_WIN32) + return ::_lseeki64(m_fd, (__int64)offset, whence); +#else + return ::lseek(m_fd, offset, whence); +#endif +} + +/* + * Open a file device. For Aligned type we open both Volumes + */ +bool file_dev::open_device(DCR *dcr, int omode) +{ + POOL_MEM archive_name(PM_FNAME); + struct stat sp; + + Enter(dbglvl); + if (DEVICE::open_device(dcr, omode)) { + Leave(dbglvl); + return true; + } + omode = openmode; + + get_autochanger_loaded_slot(dcr); + + /* + * Handle opening of File Autochanger + */ + + pm_strcpy(archive_name, dev_name); + /* + * If this is a virtual autochanger (i.e. changer_res != NULL) + * we simply use the device name, assuming it has been + * appropriately setup by the "autochanger". + */ + if (!device->changer_res || device->changer_command[0] == 0 || + strcmp(device->changer_command, "/dev/null") == 0) { + if (VolCatInfo.VolCatName[0] == 0) { + Mmsg(errmsg, _("Could not open file device %s. No Volume name given.\n"), + print_name()); + if (dcr->jcr) { + pm_strcpy(dcr->jcr->errmsg, errmsg); + } + clear_opened(); + Leave(dbglvl); + return false; + } + + /* If not /dev/null concatenate VolumeName */ + if (!is_null()) { + if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) { + pm_strcat(archive_name, "/"); + } + pm_strcat(archive_name, getVolCatName()); + } + } + + mount(1); /* do mount if required */ + + set_mode(omode); + /* If creating file, give 0640 permissions */ + Dmsg3(100, "open disk: mode=%s open(%s, 0x%x, 0640)\n", mode_to_str(omode), + archive_name.c_str(), mode); + /* Use system open() */ + if ((m_fd = ::open(archive_name.c_str(), mode|O_CLOEXEC, 0640)) < 0) { + berrno be; + dev_errno = errno; + Mmsg3(errmsg, _("Could not open(%s,%s,0640): ERR=%s\n"), + archive_name.c_str(), mode_to_str(omode), be.bstrerror()); + Dmsg1(40, "open failed: %s", errmsg); + } else { + /* Open is OK, now let device get control */ + Dmsg2(40, "Did open(%s,%s,0640)\n", archive_name.c_str(), mode_to_str(omode)); + device_specific_open(dcr); + } + if (m_fd >= 0) { + dev_errno = 0; + file = 0; + file_addr = 0; + + /* Refresh the underline device id */ + if (fstat(m_fd, &sp) == 0) { + devno = sp.st_dev; + } + } else { + if (dcr->jcr) { + pm_strcpy(dcr->jcr->errmsg, errmsg); + } + } + Dmsg1(100, "open dev: disk fd=%d opened\n", m_fd); + + state |= preserve; /* reset any important state info */ + Leave(dbglvl); + return m_fd >= 0; +} + +/* + * Truncate a volume. If this is aligned disk, we + * truncate both volumes. + */ +bool DEVICE::truncate(DCR *dcr) +{ + struct stat st; + DEVICE *dev = this; + + Dmsg1(100, "truncate %s\n", print_name()); + switch (dev_type) { + case B_VTL_DEV: + case B_VTAPE_DEV: + case B_TAPE_DEV: + /* maybe we should rewind and write and eof ???? */ + return true; /* we don't really truncate tapes */ + default: + break; + } + + Dmsg2(100, "Truncate adata=%d fd=%d\n", dev->adata, dev->m_fd); + if (ftruncate(dev->m_fd, 0) != 0) { + berrno be; + Mmsg2(errmsg, _("Unable to truncate device %s. ERR=%s\n"), + print_name(), be.bstrerror()); + return false; + } + + /* + * Check for a successful ftruncate() and issue a work-around for devices + * (mostly cheap NAS) that don't support truncation. + * Workaround supplied by Martin Schmid as a solution to bug #1011. + * 1. close file + * 2. delete file + * 3. open new file with same mode + * 4. change ownership to original + */ + + if (fstat(dev->m_fd, &st) != 0) { + berrno be; + Mmsg2(errmsg, _("Unable to stat device %s. ERR=%s\n"), + print_name(), be.bstrerror()); + return false; + } + + if (st.st_size != 0) { /* ftruncate() didn't work */ + POOL_MEM archive_name(PM_FNAME); + + pm_strcpy(archive_name, dev_name); + if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) { + pm_strcat(archive_name, "/"); + } + pm_strcat(archive_name, dcr->VolumeName); + if (dev->is_adata()) { + pm_strcat(archive_name, ADATA_EXTENSION); + } + + Mmsg2(errmsg, _("Device %s doesn't support ftruncate(). Recreating file %s.\n"), + print_name(), archive_name.c_str()); + + /* Close file and blow it away */ + ::close(dev->m_fd); + ::unlink(archive_name.c_str()); + + /* Recreate the file -- of course, empty */ + dev->set_mode(CREATE_READ_WRITE); + if ((dev->m_fd = ::open(archive_name.c_str(), mode|O_CLOEXEC, st.st_mode)) < 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("Could not reopen: %s, ERR=%s\n"), archive_name.c_str(), + be.bstrerror()); + Dmsg1(40, "reopen failed: %s", errmsg); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + /* Reset proper owner */ + chown(archive_name.c_str(), st.st_uid, st.st_gid); + } + return true; +} + + +/* + * (Un)mount the device + */ +bool DEVICE::mount_file(int mount, int dotimeout) +{ + POOL_MEM ocmd(PM_FNAME); + POOLMEM *results; + DIR* dp; + char *icmd; + POOL_MEM dname(PM_FNAME); + int status, tries, name_max, count; + berrno be; + + Dsm_check(200); + if (mount) { + icmd = device->mount_command; + } else { + icmd = device->unmount_command; + } + + clear_freespace_ok(); + edit_mount_codes(ocmd, icmd); + + Dmsg2(100, "mount_file: cmd=%s mounted=%d\n", ocmd.c_str(), !!is_mounted()); + + if (dotimeout) { + /* Try at most 10 times to (un)mount the device. This should perhaps be configurable. */ + tries = 10; + } else { + tries = 1; + } + results = get_memory(4000); + + /* If busy retry each second */ + Dmsg1(100, "mount_file run_prog=%s\n", ocmd.c_str()); + while ((status = run_program_full_output(ocmd.c_str(), max_open_wait/2, results)) != 0) { + /* Doesn't work with internationalization (This is not a problem) */ + if (mount && fnmatch("*is already mounted on*", results, 0) == 0) { + break; + } + if (!mount && fnmatch("* not mounted*", results, 0) == 0) { + break; + } + if (tries-- > 0) { + /* Sometimes the device cannot be mounted because it is already mounted. + * Try to unmount it, then remount it */ + if (mount) { + Dmsg1(400, "Trying to unmount the device %s...\n", print_name()); + mount_file(0, 0); + } + bmicrosleep(1, 0); + continue; + } + Dmsg5(100, "Device %s cannot be %smounted. stat=%d result=%s ERR=%s\n", print_name(), + (mount ? "" : "un"), status, results, be.bstrerror(status)); + Mmsg(errmsg, _("Device %s cannot be %smounted. ERR=%s\n"), + print_name(), (mount ? "" : "un"), be.bstrerror(status)); + + /* + * Now, just to be sure it is not mounted, try to read the filesystem. + */ + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + if (!(dp = opendir(device->mount_point))) { + berrno be; + dev_errno = errno; + Dmsg3(100, "mount_file: failed to open dir %s (dev=%s), ERR=%s\n", + device->mount_point, print_name(), be.bstrerror()); + goto get_out; + } + + count = 0; + while (1) { + if (breaddir(dp, dname.addr()) != 0) { + dev_errno = EIO; + Dmsg2(129, "mount_file: failed to find suitable file in dir %s (dev=%s)\n", + device->mount_point, print_name()); + break; + } + if ((strcmp(dname.c_str(), ".")) && (strcmp(dname.c_str(), "..")) && (strcmp(dname.c_str(), ".keep"))) { + count++; /* dname.c_str() != ., .. or .keep (Gentoo-specific) */ + break; + } else { + Dmsg2(129, "mount_file: ignoring %s in %s\n", dname.c_str(), device->mount_point); + } + } + closedir(dp); + + Dmsg1(100, "mount_file: got %d files in the mount point (not counting ., .. and .keep)\n", count); + + if (count > 0) { + /* If we got more than ., .. and .keep */ + /* there must be something mounted */ + if (mount) { + Dmsg1(100, "Did Mount by count=%d\n", count); + break; + } else { + /* An unmount request. We failed to unmount - report an error */ + set_mounted(true); + free_pool_memory(results); + Dmsg0(200, "== error mount=1 wanted unmount\n"); + return false; + } + } +get_out: + set_mounted(false); + free_pool_memory(results); + Dmsg0(200, "============ mount=0\n"); + Dsm_check(200); + return false; + } + + set_mounted(mount); /* set/clear mounted flag */ + free_pool_memory(results); + /* Do not check free space when unmounting */ + Dmsg1(200, "============ mount=%d\n", mount); + return true; +} + +/* + * Check if the current position on the volume corresponds to + * what is in the catalog. + * + */ +bool file_dev::is_eod_valid(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + + if (has_cap(CAP_LSEEK)) { + char ed1[50], ed2[50]; + boffset_t ameta_size, adata_size, size; + + ameta_size = lseek(dcr, (boffset_t)0, SEEK_END); + adata_size = get_adata_size(dcr); + size = ameta_size + adata_size; + if (VolCatInfo.VolCatAmetaBytes == (uint64_t)ameta_size && + VolCatInfo.VolCatAdataBytes == (uint64_t)adata_size) { + if (is_aligned()) { + Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volumes \"%s\"" + " ameta size=%s adata size=%s\n"), dcr->VolumeName, + edit_uint64_with_commas(VolCatInfo.VolCatAmetaBytes, ed1), + edit_uint64_with_commas(VolCatInfo.VolCatAdataBytes, ed2)); + } else { + Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volume \"%s\"" + " size=%s\n"), dcr->VolumeName, + edit_uint64_with_commas(VolCatInfo.VolCatAmetaBytes, ed1)); + } + } else if ((uint64_t)ameta_size >= VolCatInfo.VolCatAmetaBytes && + (uint64_t)adata_size >= VolCatInfo.VolCatAdataBytes) { + if ((uint64_t)ameta_size != VolCatInfo.VolCatAmetaBytes) { + Jmsg(jcr, M_WARNING, 0, _("For Volume \"%s\":\n" + " The sizes do not match! Metadata Volume=%s Catalog=%s\n" + " Correcting Catalog\n"), + dcr->VolumeName, edit_uint64_with_commas(ameta_size, ed1), + edit_uint64_with_commas(VolCatInfo.VolCatAmetaBytes, ed2)); + } + if ((uint64_t)adata_size != VolCatInfo.VolCatAdataBytes) { + Jmsg(jcr, M_WARNING, 0, _("For aligned Volume \"%s\":\n" + " Aligned sizes do not match! Aligned Volume=%s Catalog=%s\n" + " Correcting Catalog\n"), + dcr->VolumeName, edit_uint64_with_commas(adata_size, ed1), + edit_uint64_with_commas(VolCatInfo.VolCatAdataBytes, ed2)); + } + VolCatInfo.VolCatAmetaBytes = ameta_size; + VolCatInfo.VolCatAdataBytes = adata_size; + VolCatInfo.VolCatBytes = size; + VolCatInfo.VolCatFiles = (uint32_t)(size >> 32); + if (!dir_update_volume_info(dcr, false, true)) { + Jmsg(jcr, M_WARNING, 0, _("Error updating Catalog\n")); + dcr->mark_volume_in_error(); + return false; + } + } else { + Mmsg(jcr->errmsg, _("Bacula cannot write on disk Volume \"%s\" because: " + "The sizes do not match! Volume=%s Catalog=%s\n"), + dcr->VolumeName, + edit_uint64_with_commas(size, ed1), + edit_uint64_with_commas(VolCatInfo.VolCatBytes, ed2)); + Jmsg(jcr, M_ERROR, 0, jcr->errmsg); + Dmsg0(100, jcr->errmsg); + dcr->mark_volume_in_error(); + return false; + } + } + return true; +} + + +/* + * Position device to end of medium (end of data) + * Returns: true on succes + * false on error + */ +bool file_dev::eod(DCR *dcr) +{ + boffset_t pos; + + Enter(100); + if (m_fd < 0) { + dev_errno = EBADF; + Mmsg1(errmsg, _("Bad call to eod. Device %s not open\n"), print_name()); + Dmsg1(100, "%s", errmsg); + return false; + } + + if (at_eot()) { + Leave(100); + return true; + } + clear_eof(); /* remove EOF flag */ + block_num = file = 0; + file_size = 0; + file_addr = 0; + if (is_fifo()) { + Leave(100); + return true; + } + pos = lseek(dcr, (boffset_t)0, SEEK_END); + Dmsg1(200, "====== Seek to %lld\n", pos); + if (pos >= 0) { + update_pos(dcr); + set_eot(); + Leave(100); + return true; + } + dev_errno = errno; + berrno be; + Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + Dmsg1(100, "%s", errmsg); + Leave(100); + return false; +} + +const char *file_dev::print_type() +{ + return "File"; +} diff --git a/src/stored/file_dev.h b/src/stored/file_dev.h new file mode 100644 index 00000000..0c841cf2 --- /dev/null +++ b/src/stored/file_dev.h @@ -0,0 +1,37 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Inspired by vtape.h + */ + +#ifndef __FILE_DEV_ +#define __FILE_DEV_ + +class file_dev : public DEVICE { +public: + + file_dev() { }; + ~file_dev() { m_fd = -1; }; + bool is_eod_valid(DCR *dcr); + bool eod(DCR *dcr); + bool open_device(DCR *dcr, int omode); + const char *print_type(); +}; + +#endif /* __FILE_DEV_ */ diff --git a/src/stored/file_driver.c b/src/stored/file_driver.c new file mode 100644 index 00000000..b209ca83 --- /dev/null +++ b/src/stored/file_driver.c @@ -0,0 +1,501 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Routines for writing to a file from the Cloud device. + * + * This is for testing purposes only. + # + * NOTE!!! This cloud driver is not compatible with + * any disk-changer script for changing Volumes. + * It does however work with Bacula Virtual autochangers. + * + * Written by Kern Sibbald, May MMXVI + * + */ + +#include "file_driver.h" + +static const int dbglvl = 100; + +#include + +/* Imported functions */ +const char *mode_to_str(int mode); +int breaddir(DIR *dirp, POOLMEM *&dname); + +/* Forward referenced functions */ + +/* Const and Static definitions */ + +/* + * Put a cache object into the cloud (i.e. local disk) + * or visa-versa. + */ +bool file_driver::put_object(transfer *xfer, const char *in_fname, const char *out_fname, bwlimit *limit) +{ + struct stat statbuf; + char *p, *f; + char save_separator; + ssize_t rbytes, wbytes; + uint32_t read_len; + int64_t obj_len; + FILE *infile=NULL, *outfile=NULL; + POOLMEM *buf = get_memory(buf_len); + + Enter(dbglvl); + Dmsg2(dbglvl, "Put from: %s to %s\n", in_fname, out_fname); + + /* + * First work on output file + */ + /* Split out_fname into path + file */ + for (p=f=const_cast(out_fname); *p; p++) { + if (IsPathSeparator(*p)) { + f = p; /* set pos of last slash */ + } + } + if (!IsPathSeparator(*f)) { /* did we find a slash? */ + Mmsg1(xfer->m_message, "Could not find path name for output file: %s\n", out_fname); + goto get_out; + } + save_separator = *f; + *f = 0; /* terminate path */ + + /* const_cast should not be necessary here but is due the makedir interface */ + if (!makedir(NULL, const_cast(out_fname), 0740)) { + Mmsg1(xfer->m_message, "Could not makedir output directory: %s\n", out_fname); + *f = save_separator; + goto get_out; + } + *f = save_separator; + + if (lstat(out_fname, &statbuf) == -1) { + outfile = bfopen(out_fname, "w"); + } else { + /* append to existing file */ + outfile = bfopen(out_fname, "r+"); + } + + if (!outfile) { + berrno be; + Mmsg2(xfer->m_message, "Could not open output file %s. ERR=%s\n", + out_fname, be.bstrerror()); + goto get_out; + } + + /* + * Now work on input file + */ + if (lstat(in_fname, &statbuf) == -1) { + berrno be; + Mmsg2(xfer->m_message, "Failed to stat input file %s. ERR=%s\n", + in_fname, be.bstrerror()); + goto get_out; + } + + obj_len = statbuf.st_size; + Dmsg1(dbglvl, "Object length to copy is: %lld bytes.\n", obj_len); + if (obj_len == 0) { /* Not yet created nothing to do */ + goto get_out; + } + + infile = bfopen(in_fname, "r"); + + if (!infile) { + berrno be; + Mmsg2(xfer->m_message, "Failed to open input file %s. ERR=%s\n", + in_fname, be.bstrerror()); + goto get_out; + } + + while (obj_len > 0) { + if (xfer->is_cancelled()) { + Mmsg(xfer->m_message, "Job is canceled.\n"); + goto get_out; + } + read_len = (obj_len > buf_len) ? buf_len : obj_len; + Dmsg3(dbglvl, "obj_len=%d buf_len=%d read_len=%d\n", obj_len, buf_len, read_len); + rbytes = fread(buf, 1, read_len, infile); + Dmsg1(dbglvl, "Read %d bytes.\n", rbytes); + if (rbytes <= 0) { + berrno be; + Mmsg2(xfer->m_message, "Error reading input file %s. ERR=%s\n", + in_fname, be.bstrerror()); + goto get_out; + } + wbytes = fwrite(buf, 1, rbytes, outfile); + Dmsg2(dbglvl, "Wrote: %d bytes wanted %d bytes.\n", wbytes, rbytes); + if (wbytes < 0) { + berrno be; + Mmsg2(xfer->m_message, "Error writing output file %s. ERR=%s\n", + out_fname, be.bstrerror()); + } + obj_len -= rbytes; + if (limit->use_bwlimit()) { + limit->control_bwlimit(rbytes); + } + } + +get_out: + free_memory(buf); + if (infile) { + fclose(infile); + } + if (outfile) { + fclose(outfile); + /* Get stats on the result part and fill the xfer res */ + if (lstat(out_fname, &statbuf) == -1) { + berrno be; + Mmsg2(xfer->m_message, "Failed to stat file %s: %s\n", out_fname, be.bstrerror()); + } else { + xfer->m_res_size = statbuf.st_size; + xfer->m_res_mtime = statbuf.st_mtime; + } + } + Leave(dbglvl); + return (xfer->m_message[0] == 0); +} + +bool file_driver::get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname) +{ + return put_object(xfer, cloud_fname, cache_fname, &download_limit); +} + +bool file_driver::truncate_cloud_volume(DCR *dcr, const char *VolumeName, ilist *trunc_parts, POOLMEM *&err) +{ + bool rtn = true; + int i; + POOLMEM *filename = get_pool_memory(PM_FNAME); + for (i=1; (i <= (int)trunc_parts->last_index()); i++) { + if (!trunc_parts->get(i)) { + continue; + } + make_cloud_filename(filename, VolumeName, i); + if (unlink(filename) != 0 && errno != ENOENT) { + berrno be; + Mmsg2(err, "Unable to delete %s. ERR=%s\n", filename, be.bstrerror()); + Dmsg1(dbglvl, "%s", err); + Qmsg(dcr->jcr, M_INFO, 0, "%s", err); + rtn = false; + } else { + Dmsg1(dbglvl, "Unlink file %s\n", filename); + } + } + + free_pool_memory(filename); + return rtn; +} + +void file_driver::make_cloud_filename(POOLMEM *&filename, + const char *VolumeName, uint32_t part) +{ + Enter(dbglvl); + + pm_strcpy(filename, hostName); + dev->add_vol_and_part(filename, VolumeName, "part", part); + Dmsg1(dbglvl, "make_cloud_filename: %s\n", filename); +} + +/* + * Copy a single cache part to the cloud (local disk) + */ +bool file_driver::copy_cache_part_to_cloud(transfer *xfer) +{ + Enter(dbglvl); + POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); + make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part); + Dmsg2(dbglvl, "Call put_object: %s, %s\n", xfer->m_cache_fname, cloud_fname); + bool rtn = put_object(xfer, xfer->m_cache_fname, cloud_fname, &upload_limit); + free_pool_memory(cloud_fname); + return rtn; +} + +/* + * Copy a single object (part) from the cloud to the cache + */ +bool file_driver::copy_cloud_part_to_cache(transfer *xfer) +{ + Enter(dbglvl); + POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); + make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part); + bool rtn = put_object(xfer, cloud_fname, xfer->m_cache_fname, &download_limit); + free_pool_memory(cloud_fname); + return rtn; +} + +/* + * NOTE: The SD Cloud resource has the following items + + RES hdr; + char *host_name; + char *bucket_name; + char *access_key; + char *secret_key; + int32_t protocol; + int32_t uri_style; + uint32_t driver_type; + uint32_t trunc_opt; + uint32_t upload_opt; +*/ + +bool file_driver::init(JCR *jcr, cloud_dev *adev, DEVRES *adevice) +{ + dev = adev; /* copy cloud device pointer */ + device = adevice; /* copy device resource pointer */ + cloud = device->cloud; /* local pointer to cloud definition */ + + /* File I/O buffer */ + buf_len = dev->max_block_size; + if (buf_len == 0) { + buf_len = DEFAULT_BLOCK_SIZE; + } + + hostName = cloud->host_name; + bucketName = cloud->bucket_name; + protocol = cloud->protocol; + uriStyle = cloud->uri_style; + accessKeyId = cloud->access_key; + secretAccessKey = cloud->secret_key; + + return true; +} + +bool file_driver::start_of_job(DCR *dcr) +{ + Jmsg(dcr->jcr, M_INFO, 0, _("Using File cloud driver Host=%s Bucket=%s\n"), + hostName, bucketName); + return true; +} + +bool file_driver::end_of_job(DCR *dcr) +{ + return true; +} + +/* + * Note, dcr may be NULL + */ +bool file_driver::term(DCR *dcr) +{ + return true; +} + +bool file_driver::get_cloud_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts, POOLMEM *&err) +{ + Enter(dbglvl); + + if (parts == NULL || strlen(VolumeName) == 0) { + pm_strcpy(err, "Invalid argument"); + return false; + } + + POOLMEM *vol_dir = get_pool_memory(PM_NAME); + + pm_strcpy(vol_dir, hostName); + + if (!IsPathSeparator(vol_dir[strlen(vol_dir)-1])) { + pm_strcat(vol_dir, "/"); + } + pm_strcat(vol_dir, VolumeName); + + DIR* dp = NULL; + struct dirent *entry = NULL; + struct stat statbuf; + int name_max; + bool ok = false; + POOL_MEM dname(PM_FNAME); + int status = 0; + + Dmsg1(dbglvl, "Searching for parts in: %s\n", vol_dir); + + if (!(dp = opendir(vol_dir))) { + berrno be; + Mmsg2(err, "Cannot opendir to get parts list. Volume %s does not exist. ERR=%s", + VolumeName, be.bstrerror()); + Dmsg1(dbglvl, "%s\n", err); + if (errno == ENOENT) { + ok=true; /* No volume, so no part */ + } + goto get_out; + } + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); + + for ( ;; ) { + if (dcr->jcr->is_canceled()) { + pm_strcpy(err, "Job canceled"); + goto get_out; + } + errno = 0; + status = breaddir(dp, dname.addr()); + if (status != 0) { + if (status > 0) { + Mmsg1(err, "breaddir failed: status=%d", status); + Dmsg1(dbglvl, "%s\n", err); + } + break; + } + /* Always ignore . and .. */ + if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { + continue; + } + + /* Look only for part files */ + if (strncmp("part.", dname.c_str(), 5) != 0) { + continue; + } + + char *ext = strrchr (dname.c_str(), '.'); + if (!ext || strlen(ext) < 2) { + continue; + } + + cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); + + /* save extension (part number) to cloud_part struct index*/ + part->index = atoi(&ext[1]); + + POOLMEM *part_path = get_pool_memory(PM_NAME); + pm_strcpy(part_path,vol_dir); + if (!IsPathSeparator(part_path[strlen(vol_dir)-1])) { + pm_strcat(part_path, "/"); + } + pm_strcat(part_path, dname.c_str()); + + /* Get size of part */ + if (lstat(part_path, &statbuf) == -1) { + berrno be; + Mmsg(err, "Failed to stat file %s: %s", part_path, be.bstrerror()); + Dmsg1(dbglvl, "%s\n", err); + free_pool_memory(part_path); + free(part); + goto get_out; + } + free_pool_memory(part_path); + + part->size = statbuf.st_size; + part->mtime = statbuf.st_mtime; + parts->put(part->index, part); + } + ok = true; + +get_out: + if (dp) { + closedir(dp); + } + if (entry) { + free(entry); + } + + free_pool_memory(vol_dir); + + return ok; +} + +bool file_driver::get_cloud_volumes_list(DCR *dcr, alist *volumes, POOLMEM *&err) +{ + if (!volumes) { + pm_strcpy(err, "Invalid argument"); + return false; + } + + Enter(dbglvl); + + DIR* dp = NULL; + struct dirent *entry = NULL; + struct stat statbuf; + int name_max; + bool ok = false; + POOLMEM *fullpath = get_pool_memory(PM_NAME); + POOL_MEM dname(PM_FNAME); + int status = 0; + + if (!(dp = opendir(hostName))) { + berrno be; + Mmsg2(err, "Cannot opendir to get volumes list. host_name %s does not exist. ERR=%s", + hostName, be.bstrerror()); + Dmsg1(dbglvl, "%s\n", err); + if (errno == ENOENT) { + ok=true; /* No volume, so no part */ + } + goto get_out; + } + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); + + for ( ;; ) { + if (dcr->jcr->is_canceled()) { + goto get_out; + } + errno = 0; + status = breaddir(dp, dname.addr()); + if (status != 0) { + if (status > 0) { + Mmsg1(err, "breaddir failed: status=%d", status); + Dmsg1(dbglvl, "%s\n", err); + } + break; + } + /* Always ignore . and .. */ + if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { + continue; + } + + + pm_strcpy(fullpath, hostName); + if (!IsPathSeparator(fullpath[strlen(fullpath)-1])) { + pm_strcat(fullpath, "/"); + } + pm_strcat(fullpath, dname.c_str()); + + if (lstat(fullpath, &statbuf) != 0) { + berrno be; + Dmsg2(dbglvl, "Failed to stat file %s: %s\n", + fullpath, be.bstrerror()); + continue; + } + + if (S_ISDIR(statbuf.st_mode)) { + volumes->append(bstrdup(dname.c_str())); + } + } + ok = true; + +get_out: + if (dp) { + closedir(dp); + } + if (entry) { + free(entry); + } + + free_pool_memory(fullpath); + + return ok; +} diff --git a/src/stored/file_driver.h b/src/stored/file_driver.h new file mode 100644 index 00000000..1d31d92a --- /dev/null +++ b/src/stored/file_driver.h @@ -0,0 +1,72 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Routines for writing to the Cloud using S3 protocol. + * + * Written by Kern Sibbald, May MMXVI + */ + +#ifndef _FILE_DRV_H +#define _FILE_DRV_H + +#include "bacula.h" +#include "stored.h" +#include "cloud_driver.h" /* get base class definitions */ + +class file_driver: public cloud_driver { +public: + cloud_dev *dev; /* device that is calling us */ + DEVRES *device; + CLOUD *cloud; /* Pointer to CLOUD resource */ + alist *objects; + uint32_t buf_len; + + + /* Stuff directly from Cloud resource */ + char *hostName; + char *bucketName; + char *accessKeyId; + char *secretAccessKey; + int32_t protocol; + int32_t uriStyle; + + +private: + void make_cloud_filename(POOLMEM *&filename, const char *VolumeName, uint32_t part); + bool init(JCR *jcr, cloud_dev *dev, DEVRES *device); + bool start_of_job(DCR *dcr); + bool end_of_job(DCR *dcr); + bool term(DCR *dcr); + bool truncate_cloud_volume(DCR *dcr, const char *VolumeName, ilist *trunc_parts, POOLMEM *&err); + bool copy_cache_part_to_cloud(transfer *xfer); + bool copy_cloud_part_to_cache(transfer *xfer); + bool get_cloud_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts, POOLMEM *&err); + bool get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err); + + bool put_object(transfer *xfer, const char *cache_fname, const char *cloud_fname, bwlimit *limit); + bool get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname); + +public: + file_driver() { + }; + ~file_driver() { + }; +}; + +#endif /* _FILE_DRV_H */ diff --git a/src/stored/global.c b/src/stored/global.c new file mode 100644 index 00000000..13868618 --- /dev/null +++ b/src/stored/global.c @@ -0,0 +1,26 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bacula.h" +#include "stored.h" + +STORES *me = NULL; /* our Global resource */ +bool forge_on = false; /* proceed inspite of I/O errors */ +pthread_mutex_t device_release_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t wait_device_release = PTHREAD_COND_INITIALIZER; diff --git a/src/stored/hello.c b/src/stored/hello.c new file mode 100644 index 00000000..e027836a --- /dev/null +++ b/src/stored/hello.c @@ -0,0 +1,351 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Hello routines for Storage daemon. + * + * This file contains all the code relating to reading and writing of + * all Hello commands between the daemons. + * + * Written by Kern Sibbald, June 2014 + * + */ + + +#include "bacula.h" +#include "stored.h" + +extern STORES *me; /* our Global resource */ + +const int dbglvl = 50; + +/* + * SD_VERSION history + * Note: Enterprise versions now numbered in 30000 + * and community is at SD version 3 + * None prior to 06Aug13 + * 1 - Skipped + * 2 - Skipped + * 3 22Feb14 - Added SD->SD with SD_Calls_Client + * 4 22Jun14 - Skipped + * 305 04Jun15 - Added JobMedia queueing + * 306 20Mar15 - Added comm line compression + */ + +#define SD_VERSION 306 /* Community SD version */ +#define FD_VERSION 214 /* Community FD version */ + +static char hello_sd[] = "Hello Bacula SD: Start Job %s %d %d\n"; + +static char Sorry[] = "3999 No go\n"; +static char OK_hello[] = "3000 OK Hello %d\n"; + + +/********************************************************************* + * + * Validate hello from the Director. + * + * Returns: true if Hello is good. + * false if Hello is bad. + */ +bool validate_dir_hello(JCR* jcr) +{ + POOLMEM *dirname; + DIRRES *director = NULL; + int dir_version = 0; + BSOCK *dir = jcr->dir_bsock; + + if (dir->msglen < 25 || dir->msglen > 500) { + Dmsg2(dbglvl, "Bad Hello command from Director at %s. Len=%d.\n", + dir->who(), dir->msglen); + Qmsg2(jcr, M_SECURITY, 0, _("Bad Hello command from Director at %s. Len=%d.\n"), + dir->who(), dir->msglen); + sleep(5); + return false; + } + dirname = get_pool_memory(PM_MESSAGE); + dirname = check_pool_memory_size(dirname, dir->msglen); + + if (sscanf(dir->msg, "Hello SD: Bacula Director %127s calling %d", + dirname, &dir_version) != 2 && + sscanf(dir->msg, "Hello SD: Bacula Director %127s calling", + dirname) != 1) { + dir->msg[100] = 0; + Dmsg2(dbglvl, "Bad Hello command from Director at %s: %s\n", + dir->who(), dir->msg); + Qmsg2(jcr, M_SECURITY, 0, _("Bad Hello command from Director at %s: %s\n"), + dir->who(), dir->msg); + free_pool_memory(dirname); + sleep(5); + return false; + } + + if (dir_version >= 1 && me->comm_compression) { + dir->set_compress(); + } else { + dir->clear_compress(); + Dmsg0(050, "**** No SD compression to Dir\n"); + } + director = NULL; + unbash_spaces(dirname); + foreach_res(director, R_DIRECTOR) { + if (strcasecmp(director->hdr.name, dirname) == 0) { + break; + } + } + if (!director) { + Dmsg2(dbglvl, "Connection from unknown Director %s at %s rejected.\n", + dirname, dir->who()); + Qmsg2(jcr, M_SECURITY, 0, _("Connection from unknown Director %s at %s rejected.\n" + "Please see " MANUAL_AUTH_URL " for help.\n"), + dirname, dir->who()); + free_pool_memory(dirname); + sleep(5); + return false; + } + jcr->director = director; + free_pool_memory(dirname); + return true; +} + +/* + * After receiving a connection (in dircmd.c) if it is + * from the File daemon, this routine is called. + */ +void handle_client_connection(BSOCK *fd) +{ + JCR *jcr; + int fd_version = 0; + int sd_version = 0; + char job_name[500]; + /* + * Do a sanity check on the message received + */ + if (fd->msglen < 25 || fd->msglen > (int)sizeof(job_name)) { + Pmsg1(000, "msg); + Qmsg2(NULL, M_SECURITY, 0, _("Invalid connection from %s. Len=%d\n"), fd->who(), fd->msglen); + bmicrosleep(5, 0); /* make user wait 5 seconds */ + fd->destroy(); + return; + } + + Dmsg1(100, "Conn: %s", fd->msg); + /* + * See if this is a File daemon connection. If so + * call FD handler. + */ + if (sscanf(fd->msg, "Hello Bacula SD: Start Job %127s %d %d", job_name, &fd_version, &sd_version) != 3 && + sscanf(fd->msg, "Hello FD: Bacula Storage calling Start Job %127s %d", job_name, &sd_version) != 2 && + sscanf(fd->msg, "Hello Start Job %127s", job_name) != 1) { + Qmsg2(NULL, M_SECURITY, 0, _("Invalid Hello from %s. Len=%d\n"), fd->who(), fd->msglen); + sleep(5); + fd->destroy(); + return; + } + + if (!(jcr=get_jcr_by_full_name(job_name))) { + Qmsg1(NULL, M_SECURITY, 0, _("Client connect failed: Job name not found: %s\n"), job_name); + Dmsg1(3, "**** Job \"%s\" not found.\n", job_name); + sleep(5); + fd->destroy(); + return; + } + + /* After this point, we can use bail_out */ + Dmsg1(100, "Found Client Job %s\n", job_name); + if (jcr->authenticated) { + Jmsg3(jcr, M_SECURITY, 0, _("A Client \"%s\" tried to authenticate for Job %s, " + "but the Job is already authenticated with \"%s\".\n"), + fd->who(), jcr->Job, jcr->file_bsock?jcr->file_bsock->who():"N/A"); + Dmsg2(050, "Hey!!!! JobId %u Job %s already authenticated.\n", + (uint32_t)jcr->JobId, jcr->Job); + goto bail_out; + } + + fd->set_jcr(jcr); + Dmsg2(050, "fd_version=%d sd_version=%d\n", fd_version, sd_version); + + /* Turn on compression for newer FDs */ + if (fd_version >= 214 || sd_version >= 306) { + fd->set_compress(); /* set compression allowed */ + } else { + fd->clear_compress(); + Dmsg0(050, "*** No SD compression to FD\n"); + } + + /* + * Authenticate the Client (FD or SD) + */ + jcr->lock_auth(); /* Ensure that only one thread is dealing with auth */ + if (jcr->authenticated) { + Jmsg2(jcr, M_SECURITY, 0, _("A Client \"%s\" tried to authenticate for Job %s, " + "but the job is already authenticated.\n"), + fd->who(), jcr->Job); + + } else if (!authenticate_filed(jcr, fd, fd_version)) { + Dmsg1(50, "Authentication failed Job %s\n", jcr->Job); + /* Job not yet started, we can cancel */ + Jmsg(jcr, M_SECURITY, 0, _("Unable to authenticate File daemon\n")); + + } else { + Dmsg2(050, "OK Authentication jid=%u Job %s\n", (uint32_t)jcr->JobId, jcr->Job); + jcr->file_bsock = fd; + jcr->FDVersion = fd_version; + jcr->SDVersion = sd_version; + jcr->authenticated = true; + + if (sd_version > 0) { + jcr->sd_client = true; + } + } + jcr->unlock_auth(); + + if (!jcr->authenticated) { + jcr->setJobStatus(JS_ErrorTerminated); + } + + Dmsg4(050, "=== Auth %s, unblock Job %s jid=%d sd_ver=%d\n", + jcr->authenticated?"OK":"KO", job_name, jcr->JobId, sd_version); + +bail_out: + /* file_bsock might be NULL or a previous BSOCK */ + if (jcr->file_bsock != fd) { + fd->destroy(); + } + pthread_cond_signal(&jcr->job_start_wait); /* wake waiting job */ + free_jcr(jcr); + if (!jcr->authenticated) { + sleep(5); + } + return; +} + + +bool is_client_connection(BSOCK *bs) +{ + return + sscanf(bs->msg, "Hello Bacula SD: Start Job ") == 0 || + sscanf(bs->msg, "Hello FD: Bacula Storage calling Start Job ") == 0 || + sscanf(bs->msg, "Hello Start Job ") == 0; +} + +/* + * If sd_calls_client, we must read the client's response to + * the hello we previously sent. + */ +bool read_client_hello(JCR *jcr) +{ + int i; + int stat; + int fd_version = 0; + int sd_version = 0; + BSOCK *cl = jcr->file_bsock; + char job_name[500]; + + /* We connected to Client, so finish work */ + if (!cl) { + Jmsg0(jcr, M_FATAL, 0, _("Client socket not open. Could not connect to Client.\n")); + Dmsg0(050, "Client socket not open. Could not connect to Client.\n"); + return false; + } + /* Get response to Hello command sent earlier */ + Dmsg0(050, "Read Hello command from Client\n"); + for (i=0; i<60; i++) { + stat = cl->recv(); + if (stat <= 0) { + bmicrosleep(1, 0); + } else { + break; + } + } + if (stat <= 0) { + berrno be; + Jmsg1(jcr, M_FATAL, 0, _("Recv request to Client failed. ERR=%s\n"), + be.bstrerror()); + Dmsg1(050, _("Recv request to Client failed. ERR=%s\n"), be.bstrerror()); + return false; + } + Dmsg1(050, ">filed: %s\n", cl->msg); + if (sscanf(cl->msg, "Hello Bacula SD: Start Job %127s %d %d", job_name, &fd_version, &sd_version) != 3) { + Jmsg1(jcr, M_FATAL, 0, _("Bad Hello from Client: %s.\n"), cl->msg); + Dmsg1(050, _("Bad Hello from Client: %s.\n"), cl->msg); + return false; + } + unbash_spaces(job_name); + jcr->FDVersion = fd_version; + jcr->SDVersion = sd_version; + Dmsg1(050, "FDVersion=%d\n", fd_version); + /* Turn on compression for newer FDs, except for Community version */ + if (jcr->FDVersion >= 214 && me->comm_compression) { + cl->set_compress(); /* set compression allowed */ + } else { + cl->clear_compress(); + Dmsg0(050, "*** No SD compression to FD\n"); + } + + return true; +} + +/* + * Send Hello OK to DIR or FD + */ +bool send_hello_ok(BSOCK *bs) +{ + return bs->fsend(OK_hello, SD_VERSION); +} + +bool send_sorry(BSOCK *bs) +{ + return bs->fsend(Sorry); +} + +/* + * We are acting as a client, so send Hello to the SD. + */ +bool send_hello_sd(JCR *jcr, char *Job) +{ + bool rtn; + BSOCK *sd = jcr->store_bsock; + + bash_spaces(Job); + rtn = sd->fsend(hello_sd, Job, FD_VERSION, SD_VERSION); + unbash_spaces(Job); + Dmsg1(100, "Send to SD: %s\n", sd->msg); + if (!rtn) { + return false; + } + return true; +} + +/* + * We are SD so send Hello to client + * Note: later the Client will send us a Hello. + */ +bool send_hello_client(JCR *jcr, char *Job) +{ + bool rtn; + BSOCK *cl = jcr->file_bsock; + + bash_spaces(Job); + rtn = cl->fsend("Hello FD: Bacula Storage calling Start Job %s %d\n", Job, SD_VERSION); + unbash_spaces(Job); + if (!rtn) { + return false; + } + return rtn; +} diff --git a/src/stored/init_dev.c b/src/stored/init_dev.c new file mode 100644 index 00000000..9f4999a8 --- /dev/null +++ b/src/stored/init_dev.c @@ -0,0 +1,474 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Initialize a single device. + * + * This code was split from dev.c in January 2016 + * + * written by, Kern Sibbald, MM + */ + +#include "bacula.h" +#include "stored.h" +#include + +/* Define possible extensions */ +#if defined(HAVE_WIN32) +#define DRV_EXT ".dll" +#elif defined(HAVE_DARWIN_OS) +#define DRV_EXT ".dylib" +#else +#define DRV_EXT ".so" +#endif + +#ifndef RTLD_NOW +#define RTLD_NOW 2 +#endif + +/* Forward referenced functions */ +extern "C" { +typedef DEVICE *(*newDriver_t)(JCR *jcr, DEVRES *device); +} + +static DEVICE *load_driver(JCR *jcr, DEVRES *device); + +/* + * Driver item for driver table +*/ +struct driver_item { + const char *name; + void *handle; + newDriver_t newDriver; + bool builtin; + bool loaded; +}; + +/* + * Driver table. Must be in same order as the B_xxx_DEV type + * name handle, builtin loaded + */ +static driver_item driver_tab[] = { +/* name handle, newDriver builtin loaded */ + {"file", NULL, NULL, true, true}, + {"tape", NULL, NULL, true, true}, + {"none", NULL, NULL, true, true}, /* deprecated was DVD */ + {"fifo", NULL, NULL, true, true}, + {"vtape", NULL, NULL, true, true}, + {"ftp", NULL, NULL, true, true}, + {"vtl", NULL, NULL, true, true}, + {"none", NULL, NULL, true, true}, /* B_ADATA_DEV */ + {"aligned", NULL, NULL, false, false}, + {"none", NULL, NULL, true, true}, /* deprecated was old dedup */ + {"null", NULL, NULL, true, true}, + {"none", NULL, NULL, true, true}, /* deprecated B_VALIGNED_DEV */ + {"none", NULL, NULL, true, true}, /* deprecated B_VDEDUP_DEV */ + {"cloud", NULL, NULL, false, false}, + {"none", NULL, NULL, false, false}, + {NULL, NULL, NULL, false, false} +}; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +/* The alist should be created with not_owned_by_alist argument */ +void sd_list_loaded_drivers(alist *list) +{ + for(int i=0 ; driver_tab[i].name != NULL; i++) { + if (driver_tab[i].loaded && !driver_tab[i].builtin) { + list->append((void*)driver_tab[i].name); + } + } +} + +/* + * Allocate and initialize the DEVICE structure + * Note, if dev is non-NULL, it is already allocated, + * thus we neither allocate it nor free it. This allows + * the caller to put the packet in shared memory. + * + * Note, for a tape, the device->device_name is the device name + * (e.g. /dev/nst0), and for a file, the device name + * is the directory in which the file will be placed. + * + */ +DEVICE *init_dev(JCR *jcr, DEVRES *device, bool adata) +{ + struct stat statp; + DEVICE *dev = NULL; + uint32_t n_drivers; + + generate_global_plugin_event(bsdGlobalEventDeviceInit, device); + Dmsg1(150, "init_dev dev_type=%d\n", device->dev_type); + /* If no device type specified, try to guess */ + if (!device->dev_type) { + /* Check that device is available */ + if (stat(device->device_name, &statp) < 0) { + berrno be; + Jmsg3(jcr, M_ERROR, 0, _("[SE0001] Unable to stat device %s at %s: ERR=%s\n"), + device->hdr.name, device->device_name, be.bstrerror()); + return NULL; + } + if (S_ISDIR(statp.st_mode)) { + device->dev_type = B_FILE_DEV; + } else if (S_ISCHR(statp.st_mode)) { + device->dev_type = B_TAPE_DEV; + } else if (S_ISFIFO(statp.st_mode)) { + device->dev_type = B_FIFO_DEV; +#ifdef USE_VTAPE + /* must set DeviceType = Vtape + * in normal mode, autodetection is disabled + */ + } else if (S_ISREG(statp.st_mode)) { + device->dev_type = B_VTAPE_DEV; +#endif + } else if (!(device->cap_bits & CAP_REQMOUNT)) { + Jmsg2(jcr, M_ERROR, 0, _("[SE0002] %s is an unknown device type. Must be tape or directory." + " st_mode=%x\n"), + device->device_name, statp.st_mode); + return NULL; + } + if (strcmp(device->device_name, "/dev/null") == 0) { + device->dev_type = B_NULL_DEV; + } + } + + /* Count drivers */ + for (n_drivers=0; driver_tab[n_drivers].name; n_drivers++) { }; + Dmsg1(100, "Num drivers=%d\n", n_drivers); + + /* If invalid dev_type get out */ + if (device->dev_type < 0 || device->dev_type > n_drivers) { + Jmsg2(jcr, M_FATAL, 0, _("[SF0001] Invalid device type=%d name=\"%s\"\n"), + device->dev_type, device->hdr.name); + return NULL; + } + Dmsg5(100, "loadable=%d type=%d loaded=%d name=%s handle=%p\n", + !driver_tab[device->dev_type-1].builtin, + device->dev_type, + driver_tab[device->dev_type-1].loaded, + driver_tab[device->dev_type-1].name, + driver_tab[device->dev_type-1].handle); + if (driver_tab[device->dev_type-1].builtin) { + /* Built-in driver */ + switch (device->dev_type) { +#ifdef HAVE_WIN32 + case B_TAPE_DEV: +// dev = New(win_tape_dev); + break; + case B_ADATA_DEV: + case B_ALIGNED_DEV: + case B_FILE_DEV: + dev = New(win_file_dev); + dev->capabilities |= CAP_LSEEK; + break; + case B_NULL_DEV: + dev = New(win_file_dev); + break; +#else + case B_VTAPE_DEV: + dev = New(vtape); + break; + case B_TAPE_DEV: + dev = New(tape_dev); + break; + case B_FILE_DEV: + dev = New(file_dev); + dev->capabilities |= CAP_LSEEK; + break; + case B_NULL_DEV: + dev = New(null_dev); + break; + case B_FIFO_DEV: + dev = New(fifo_dev); + break; +#endif + default: + Jmsg2(jcr, M_FATAL, 0, _("[SF0002] Unknown device type=%d device=\"%s\"\n"), + device->dev_type, device->hdr.name); + return NULL; + } + } else { + /* Loadable driver */ + dev = load_driver(jcr, device); + } + if (!dev) { + return NULL; + } + + dev->adata = adata; + + /* Keep the device ID in the DEVICE struct to identify the hardware */ + if (dev->is_file() && stat(dev->archive_name(), &statp) == 0) { + dev->devno = statp.st_dev; + } + + dev->device_generic_init(jcr, device); + + /* Do device specific initialization */ + dev->device_specific_init(jcr, device); + + /* ***FIXME*** move to fifo driver */ + if (dev->is_fifo()) { + dev->capabilities |= CAP_STREAM; /* set stream device */ + } + + return dev; +} + +/* + * Do all the generic initialization here. Same for all devices. + */ +void DEVICE::device_generic_init(JCR *jcr, DEVRES *device) +{ + struct stat statp; + DEVICE *dev = this; + DCR *dcr = NULL; + int errstat; + uint32_t max_bs; + + dev->clear_slot(); /* unknown */ + + /* Copy user supplied device parameters from Resource */ + dev->dev_name = get_memory(strlen(device->device_name)+1); + pm_strcpy(dev->dev_name, device->device_name); + dev->prt_name = get_memory(strlen(device->device_name) + strlen(device->hdr.name) + 20); + /* We edit "Resource-name" (physical-name) */ + Mmsg(dev->prt_name, "\"%s\" (%s)", device->hdr.name, device->device_name); + Dmsg1(400, "Allocate dev=%s\n", dev->print_name()); + dev->capabilities = device->cap_bits; + dev->min_free_space = device->min_free_space; + dev->min_block_size = device->min_block_size; + dev->max_block_size = device->max_block_size; + dev->max_volume_size = device->max_volume_size; + dev->max_file_size = device->max_file_size; + dev->padding_size = device->padding_size; + dev->file_alignment = device->file_alignment; + dev->max_concurrent_jobs = device->max_concurrent_jobs; + dev->volume_capacity = device->volume_capacity; + dev->max_rewind_wait = device->max_rewind_wait; + dev->max_open_wait = device->max_open_wait; + dev->vol_poll_interval = device->vol_poll_interval; + dev->max_spool_size = device->max_spool_size; + dev->drive_index = device->drive_index; + dev->enabled = device->enabled; + dev->autoselect = device->autoselect; + dev->read_only = device->read_only; + dev->dev_type = device->dev_type; + dev->device = device; + if (dev->is_tape()) { /* No parts on tapes */ + dev->max_part_size = 0; + } else { + dev->max_part_size = device->max_part_size; + } + /* Sanity check */ + if (dev->vol_poll_interval && dev->vol_poll_interval < 60) { + dev->vol_poll_interval = 60; + } + + if (!device->dev) { + device->dev = dev; + } + + /* If the device requires mount : + * - Check that the mount point is available + * - Check that (un)mount commands are defined + */ + if (dev->is_file() && dev->requires_mount()) { + if (!device->mount_point || stat(device->mount_point, &statp) < 0) { + berrno be; + dev->dev_errno = errno; + Jmsg2(jcr, M_ERROR_TERM, 0, _("[SA0003] Unable to stat mount point %s: ERR=%s\n"), + device->mount_point, be.bstrerror()); + } + + if (!device->mount_command || !device->unmount_command) { + Jmsg0(jcr, M_ERROR_TERM, 0, _("[SA0004] Mount and unmount commands must defined for a device which requires mount.\n")); + } + } + + + /* Sanity check */ + if (dev->max_block_size == 0) { + max_bs = DEFAULT_BLOCK_SIZE; + } else { + max_bs = dev->max_block_size; + } + if (dev->min_block_size > max_bs) { + Jmsg(jcr, M_ERROR_TERM, 0, _("[SA0005] Min block size > max on device %s\n"), + dev->print_name()); + } + if (dev->max_block_size > MAX_BLOCK_SIZE) { + Jmsg3(jcr, M_ERROR, 0, _("[SA0006] Block size %u on device %s is too large, using default %u\n"), + dev->max_block_size, dev->print_name(), DEFAULT_BLOCK_SIZE); + dev->max_block_size = DEFAULT_BLOCK_SIZE ; + } + if (dev->max_block_size % TAPE_BSIZE != 0) { + Jmsg3(jcr, M_WARNING, 0, _("[SW0007] Max block size %u not multiple of device %s block size=%d.\n"), + dev->max_block_size, dev->print_name(), TAPE_BSIZE); + } + if (dev->max_volume_size != 0 && dev->max_volume_size < (dev->max_block_size << 4)) { + Jmsg(jcr, M_ERROR_TERM, 0, _("[SA0008] Max Vol Size < 8 * Max Block Size for device %s\n"), + dev->print_name()); + } + + dev->errmsg = get_pool_memory(PM_EMSG); + *dev->errmsg = 0; + + if ((errstat = dev->init_mutex()) != 0) { + berrno be; + dev->dev_errno = errstat; + Mmsg1(dev->errmsg, _("[SA0009] Unable to init mutex: ERR=%s\n"), be.bstrerror(errstat)); + Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); + } + if ((errstat = pthread_cond_init(&dev->wait, NULL)) != 0) { + berrno be; + dev->dev_errno = errstat; + Mmsg1(dev->errmsg, _("[SA0010] Unable to init cond variable: ERR=%s\n"), be.bstrerror(errstat)); + Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); + } + if ((errstat = pthread_cond_init(&dev->wait_next_vol, NULL)) != 0) { + berrno be; + dev->dev_errno = errstat; + Mmsg1(dev->errmsg, _("[SA0011] Unable to init cond variable: ERR=%s\n"), be.bstrerror(errstat)); + Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); + } + if ((errstat = pthread_mutex_init(&dev->spool_mutex, NULL)) != 0) { + berrno be; + dev->dev_errno = errstat; + Mmsg1(dev->errmsg, _("[SA0012] Unable to init spool mutex: ERR=%s\n"), be.bstrerror(errstat)); + Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); + } + if ((errstat = dev->init_acquire_mutex()) != 0) { + berrno be; + dev->dev_errno = errstat; + Mmsg1(dev->errmsg, _("[SA0013] Unable to init acquire mutex: ERR=%s\n"), be.bstrerror(errstat)); + Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); + } + if ((errstat = dev->init_freespace_mutex()) != 0) { + berrno be; + dev->dev_errno = errstat; + Mmsg1(dev->errmsg, _("[SA0014] Unable to init freespace mutex: ERR=%s\n"), be.bstrerror(errstat)); + Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); + } + if ((errstat = dev->init_read_acquire_mutex()) != 0) { + berrno be; + dev->dev_errno = errstat; + Mmsg1(dev->errmsg, _("[SA0015] Unable to init read acquire mutex: ERR=%s\n"), be.bstrerror(errstat)); + Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); + } + if ((errstat = dev->init_volcat_mutex()) != 0) { + berrno be; + dev->dev_errno = errstat; + Mmsg1(dev->errmsg, _("[SA0016] Unable to init volcat mutex: ERR=%s\n"), be.bstrerror(errstat)); + Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); + } + if ((errstat = dev->init_dcrs_mutex()) != 0) { + berrno be; + dev->dev_errno = errstat; + Mmsg1(dev->errmsg, _("[SA0017] Unable to init dcrs mutex: ERR=%s\n"), be.bstrerror(errstat)); + Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); + } + + dev->set_mutex_priorities(); + + dev->clear_opened(); + dev->attached_dcrs = New(dlist(dcr, &dcr->dev_link)); + Dmsg2(100, "init_dev: tape=%d dev_name=%s\n", dev->is_tape(), dev->dev_name); + dev->initiated = true; +} + +static DEVICE *load_driver(JCR *jcr, DEVRES *device) +{ + POOL_MEM fname(PM_FNAME); + DEVICE *dev; + driver_item *drv; + const char *slash; + void *pHandle; + int len; + newDriver_t newDriver; + + P(mutex); + if (!me->plugin_directory) { + Jmsg2(jcr, M_FATAL, 0, _("[SF0018] Plugin directory not defined. Cannot load SD %s driver for device %s.\n"), + driver_tab[device->dev_type - 1], device->hdr.name); + V(mutex); + return NULL; + } + len = strlen(me->plugin_directory); + if (len == 0) { + Jmsg0(jcr, M_FATAL, 0, _("[SF0019] Plugin directory not defined. Cannot load drivers.\n")); + V(mutex); + return NULL; + } + + if (IsPathSeparator(me->plugin_directory[len - 1])) { + slash = ""; + } else { + slash = "/"; + } + + Dmsg5(100, "loadable=%d type=%d loaded=%d name=%s handle=%p\n", + !driver_tab[device->dev_type-1].builtin, + device->dev_type, + driver_tab[device->dev_type-1].loaded, + driver_tab[device->dev_type-1].name, + driver_tab[device->dev_type-1].handle); + drv = &driver_tab[device->dev_type - 1]; + Mmsg(fname, "%s%sbacula-sd-%s-driver%s%s", me->plugin_directory, slash, + drv->name, "-" VERSION, DRV_EXT); + if (!drv->loaded) { + Dmsg1(10, "Open SD driver at %s\n", fname.c_str()); + pHandle = dlopen(fname.c_str(), RTLD_NOW); + if (pHandle) { + Dmsg2(100, "Driver=%s handle=%p\n", drv->name, pHandle); + /* Get global entry point */ + Dmsg1(10, "Lookup \"BaculaSDdriver\" in driver=%s\n", drv->name); + newDriver = (newDriver_t)dlsym(pHandle, "BaculaSDdriver"); + Dmsg2(10, "Driver=%s entry point=%p\n", drv->name, newDriver); + if (!newDriver) { + const char *error = dlerror(); + Jmsg(NULL, M_ERROR, 0, _("[SE0003] Lookup of symbol \"BaculaSDdriver\" in driver %s for device %s failed: ERR=%s\n"), + device->hdr.name, fname.c_str(), NPRT(error)); + Dmsg2(10, "Lookup of symbol \"BaculaSDdriver\" driver=%s failed: ERR=%s\n", + fname.c_str(), NPRT(error)); + dlclose(pHandle); + V(mutex); + return NULL; + } + drv->handle = pHandle; + drv->loaded = true; + drv->newDriver = newDriver; + } else { + /* dlopen failed */ + const char *error = dlerror(); + Jmsg3(jcr, M_FATAL, 0, _("[SF0020] dlopen of SD driver=%s at %s failed: ERR=%s\n"), + drv->name, fname.c_str(), NPRT(error)); + Dmsg2(000, "dlopen plugin %s failed: ERR=%s\n", fname.c_str(), + NPRT(error)); + V(mutex); + return NULL; + } + } else { + Dmsg1(10, "SD driver=%s is already loaded.\n", drv->name); + } + + /* Call driver initialization */ + dev = drv->newDriver(jcr, device); + V(mutex); + return dev; +} diff --git a/src/stored/job.c b/src/stored/job.c new file mode 100644 index 00000000..02a659df --- /dev/null +++ b/src/stored/job.c @@ -0,0 +1,412 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Job control and execution for Storage Daemon + * + * Written by Kern Sibbald, MM + * + */ + +#include "bacula.h" +#include "stored.h" + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +/* Imported variables */ +extern STORES *me; /* our Global resource */ +extern uint32_t VolSessionTime; + +/* Imported functions */ +extern uint32_t newVolSessionId(); +extern bool do_vbackup(JCR *jcr); + +/* Requests from the Director daemon */ +static char jobcmd[] = "JobId=%d job=%127s job_name=%127s client_name=%127s " + "type=%d level=%d FileSet=%127s NoAttr=%d SpoolAttr=%d FileSetMD5=%127s " + "SpoolData=%d WritePartAfterJob=%d PreferMountedVols=%d SpoolSize=%s " + "rerunning=%d VolSessionId=%d VolSessionTime=%d sd_client=%d " + "Authorization=%s\n"; + +/* Responses sent to Director daemon */ +static char OKjob[] = "3000 OK Job SDid=%u SDtime=%u Authorization=%s\n"; +static char BAD_job[] = "3915 Bad Job command. stat=%d CMD: %s\n"; + +/* + * Director requests us to start a job + * Basic tasks done here: + * - We pickup the JobId to be run from the Director. + * - We pickup the device, media, and pool from the Director + * - Wait for a connection from the File Daemon (FD) + * - Accept commands from the FD (i.e. run the job) + * - Return when the connection is terminated or + * there is an error. + */ +bool job_cmd(JCR *jcr) +{ + int32_t JobId; + char sd_auth_key[200]; + char spool_size[30]; + char seed[100]; + BSOCK *dir = jcr->dir_bsock; + POOL_MEM job_name, client_name, job, fileset_name, fileset_md5; + int32_t JobType, level, spool_attributes, no_attributes, spool_data; + int32_t write_part_after_job, PreferMountedVols; + int32_t rerunning; + int32_t is_client; + int stat; + JCR *ojcr; + + /* + * Get JobId and permissions from Director + */ + Dmsg1(100, "msg); + bstrncpy(spool_size, "0", sizeof(spool_size)); + stat = sscanf(dir->msg, jobcmd, &JobId, job.c_str(), job_name.c_str(), + client_name.c_str(), + &JobType, &level, fileset_name.c_str(), &no_attributes, + &spool_attributes, fileset_md5.c_str(), &spool_data, + &write_part_after_job, &PreferMountedVols, spool_size, + &rerunning, &jcr->VolSessionId, &jcr->VolSessionTime, + &is_client, &sd_auth_key); + if (stat != 19) { + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(BAD_job, stat, jcr->errmsg); + Dmsg1(100, ">dird: %s", dir->msg); + jcr->setJobStatus(JS_ErrorTerminated); + return false; + } + jcr->rerunning = rerunning; + jcr->sd_client = is_client; + if (is_client) { + jcr->sd_auth_key = bstrdup(sd_auth_key); + } + Dmsg3(100, "rerunning=%d VolSesId=%d VolSesTime=%d\n", jcr->rerunning, + jcr->VolSessionId, jcr->VolSessionTime); + /* + * Since this job could be rescheduled, we + * check to see if we have it already. If so + * free the old jcr and use the new one. + */ + ojcr = get_jcr_by_full_name(job.c_str()); + if (ojcr && !ojcr->authenticated) { + Dmsg2(100, "Found ojcr=0x%x Job %s\n", (unsigned)(intptr_t)ojcr, job.c_str()); + free_jcr(ojcr); + } + jcr->JobId = JobId; + Dmsg2(800, "Start JobId=%d %p\n", JobId, jcr); + set_jcr_in_tsd(jcr); + + /* + * If job rescheduled because previous was incomplete, + * the Resched flag is set and VolSessionId and VolSessionTime + * are given to us (same as restarted job). + */ + if (!jcr->rerunning) { + jcr->VolSessionId = newVolSessionId(); + jcr->VolSessionTime = VolSessionTime; + } + bstrncpy(jcr->Job, job, sizeof(jcr->Job)); + unbash_spaces(job_name); + jcr->job_name = get_pool_memory(PM_NAME); + pm_strcpy(jcr->job_name, job_name); + unbash_spaces(client_name); + jcr->client_name = get_pool_memory(PM_NAME); + pm_strcpy(jcr->client_name, client_name); + unbash_spaces(fileset_name); + jcr->fileset_name = get_pool_memory(PM_NAME); + pm_strcpy(jcr->fileset_name, fileset_name); + jcr->setJobType(JobType); + jcr->setJobLevel(level); + jcr->no_attributes = no_attributes; + jcr->spool_attributes = spool_attributes; + jcr->spool_data = spool_data; + jcr->spool_size = str_to_int64(spool_size); + jcr->write_part_after_job = write_part_after_job; + jcr->fileset_md5 = get_pool_memory(PM_NAME); + pm_strcpy(jcr->fileset_md5, fileset_md5); + jcr->PreferMountedVols = PreferMountedVols; + + + jcr->authenticated = false; + + /* + * Pass back an authorization key for the File daemon + */ + if (jcr->sd_client) { + bstrncpy(sd_auth_key, "xxx", 3); + } else { + bsnprintf(seed, sizeof(seed), "%p%d", jcr, JobId); + make_session_key(sd_auth_key, seed, 1); + } + dir->fsend(OKjob, jcr->VolSessionId, jcr->VolSessionTime, sd_auth_key); + Dmsg2(150, ">dird jid=%u: %s", (uint32_t)jcr->JobId, dir->msg); + /* If not client, set key, otherwise it is already set */ + if (!jcr->sd_client) { + jcr->sd_auth_key = bstrdup(sd_auth_key); + memset(sd_auth_key, 0, sizeof(sd_auth_key)); + } + new_plugins(jcr); /* instantiate the plugins */ + generate_daemon_event(jcr, "JobStart"); + generate_plugin_event(jcr, bsdEventJobStart, (void *)"JobStart"); + return true; +} + +bool run_cmd(JCR *jcr) +{ + struct timeval tv; + struct timezone tz; + struct timespec timeout; + int errstat = 0; + + Dsm_check(200); + Dmsg1(200, "Run_cmd: %s\n", jcr->dir_bsock->msg); + + /* + * If we do not need the FD, + * we are doing a migration, copy, or virtual backup. + */ + if (jcr->no_client_used()) { + do_vbackup(jcr); + return false; + } + + jcr->sendJobStatus(JS_WaitFD); /* wait for FD to connect */ + + Dmsg2(050, "sd_calls_client=%d sd_client=%d\n", jcr->sd_calls_client, jcr->sd_client); + if (jcr->sd_calls_client) { + if (!read_client_hello(jcr)) { + return false; + } + /* + * Authenticate the File daemon + */ + Dmsg0(050, "=== Authenticate FD\n"); + if (jcr->authenticated || !authenticate_filed(jcr, jcr->file_bsock, jcr->FDVersion)) { + Dmsg1(050, "Authentication failed Job %s\n", jcr->Job); + Qmsg(jcr, M_FATAL, 0, _("Unable to authenticate File daemon\n")); + } else { + jcr->authenticated = true; + } + } else if (!jcr->sd_client) { + /* We wait to receive connection from Client */ + gettimeofday(&tv, &tz); + timeout.tv_nsec = tv.tv_usec * 1000; + timeout.tv_sec = tv.tv_sec + me->client_wait; + + Dmsg3(050, "%s waiting %d sec for FD to contact SD key=%s\n", + jcr->Job, (int)(timeout.tv_sec-time(NULL)), jcr->sd_auth_key); + + Dmsg3(800, "=== Block Job=%s jid=%d %p\n", jcr->Job, jcr->JobId, jcr); + + /* + * Wait for the File daemon to contact us to start the Job, + * when he does, we will be released, unless the 30 minutes + * expires. + */ + P(mutex); + while ( !jcr->authenticated && !job_canceled(jcr) ) { + errstat = pthread_cond_timedwait(&jcr->job_start_wait, &mutex, &timeout); + if (errstat == ETIMEDOUT || errstat == EINVAL || errstat == EPERM) { + break; + } + Dmsg1(800, "=== Auth cond errstat=%d\n", errstat); + } + Dmsg4(050, "=== Auth=%d jid=%d canceled=%d errstat=%d\n", + jcr->JobId, jcr->authenticated, job_canceled(jcr), errstat); + V(mutex); + Dmsg2(800, "Auth fail or cancel for jid=%d %p\n", jcr->JobId, jcr); + } + + memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); + + if (jcr->authenticated && !job_canceled(jcr)) { + Dmsg2(050, "Running jid=%d %p\n", jcr->JobId, jcr); + run_job(jcr); /* Run the job */ + } + Dmsg2(800, "Done jid=%d %p\n", jcr->JobId, jcr); + return false; +} + + +#ifdef needed +/* + * Query Device command from Director + * Sends Storage Daemon's information on the device to the + * caller (presumably the Director). + * This command always returns "true" so that the line is + * not closed on an error. + * + */ +bool query_cmd(JCR *jcr) +{ + POOL_MEM dev_name, VolumeName, MediaType, ChangerName; + BSOCK *dir = jcr->dir_bsock; + DEVRES *device; + AUTOCHANGER *changer; + bool ok; + + Dmsg1(100, "Query_cmd: %s", dir->msg); + ok = sscanf(dir->msg, query_device, dev_name.c_str()) == 1; + Dmsg1(100, "msg); + if (ok) { + unbash_spaces(dev_name); + foreach_res(device, R_DEVICE) { + /* Find resource, and make sure we were able to open it */ + if (strcmp(dev_name.c_str(), device->hdr.name) == 0) { + if (!device->dev) { + device->dev = init_dev(jcr, device); + } + if (!device->dev) { + break; + } + ok = dir_update_device(jcr, device->dev); + if (ok) { + ok = dir->fsend(OK_query); + } else { + dir->fsend(NO_query); + } + return ok; + } + } + foreach_res(changer, R_AUTOCHANGER) { + /* Find resource, and make sure we were able to open it */ + if (strcmp(dev_name.c_str(), changer->hdr.name) == 0) { + if (!changer->device || changer->device->size() == 0) { + continue; /* no devices */ + } + ok = dir_update_changer(jcr, changer); + if (ok) { + ok = dir->fsend(OK_query); + } else { + dir->fsend(NO_query); + } + return ok; + } + } + /* If we get here, the device/autochanger was not found */ + unbash_spaces(dir->msg); + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(NO_device, dev_name.c_str()); + Dmsg1(100, ">dird: %s\n", dir->msg); + } else { + unbash_spaces(dir->msg); + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(BAD_query, jcr->errmsg); + Dmsg1(100, ">dird: %s\n", dir->msg); + } + + return true; +} + +#endif + + +/* + * Destroy the Job Control Record and associated + * resources (sockets). + */ +void stored_free_jcr(JCR *jcr) +{ + Dmsg2(800, "End Job JobId=%u %p\n", jcr->JobId, jcr); + if (jcr->jobmedia_queue) { + flush_jobmedia_queue(jcr); + delete jcr->jobmedia_queue; + jcr->jobmedia_queue = NULL; + } + + if (jcr->dir_bsock) { + Dmsg2(800, "Send terminate jid=%d %p\n", jcr->JobId, jcr); + jcr->dir_bsock->signal(BNET_EOD); + jcr->dir_bsock->signal(BNET_TERMINATE); + jcr->dir_bsock->destroy(); + } + if (jcr->file_bsock) { + jcr->file_bsock->destroy(); + } + if (jcr->job_name) { + free_pool_memory(jcr->job_name); + } + if (jcr->client_name) { + free_memory(jcr->client_name); + jcr->client_name = NULL; + } + if (jcr->fileset_name) { + free_memory(jcr->fileset_name); + } + if (jcr->fileset_md5) { + free_memory(jcr->fileset_md5); + } + if (jcr->bsr) { + free_bsr(jcr->bsr); + jcr->bsr = NULL; + } + /* Free any restore volume list created */ + free_restore_volume_list(jcr); + if (jcr->RestoreBootstrap) { + unlink(jcr->RestoreBootstrap); + free_pool_memory(jcr->RestoreBootstrap); + jcr->RestoreBootstrap = NULL; + } + if (jcr->next_dev || jcr->prev_dev) { + Qmsg0(NULL, M_FATAL, 0, _("In free_jcr(), but still attached to device!!!!\n")); + } + pthread_cond_destroy(&jcr->job_start_wait); + if (jcr->dcrs) { + delete jcr->dcrs; + } + jcr->dcrs = NULL; + + /* Avoid a double free */ + if (jcr->dcr == jcr->read_dcr) { + jcr->read_dcr = NULL; + } + if (jcr->dcr) { + free_dcr(jcr->dcr); + jcr->dcr = NULL; + } + if (jcr->read_dcr) { + free_dcr(jcr->read_dcr); + jcr->read_dcr = NULL; + } + + if (jcr->read_store) { + DIRSTORE *store; + foreach_alist(store, jcr->read_store) { + delete store->device; + delete store; + } + delete jcr->read_store; + jcr->read_store = NULL; + } + if (jcr->write_store) { + DIRSTORE *store; + foreach_alist(store, jcr->write_store) { + delete store->device; + delete store; + } + delete jcr->write_store; + jcr->write_store = NULL; + } + Dsm_check(200); + + if (jcr->JobId != 0) + write_state_file(me->working_directory, "bacula-sd", get_first_port_host_order(me->sdaddrs)); + + return; +} diff --git a/src/stored/label.c b/src/stored/label.c new file mode 100644 index 00000000..c061e52a --- /dev/null +++ b/src/stored/label.c @@ -0,0 +1,1397 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * label.c Bacula routines to handle labels + * + * Kern Sibbald, MM + * + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +static const int dbglvl = 100; + +/* Forward referenced functions */ +static void create_volume_label_record(DCR *dcr, DEVICE *dev, DEV_RECORD *rec, bool adata); + +/* + * Read the volume label + * + * If dcr->VolumeName == NULL, we accept any Bacula Volume + * If dcr->VolumeName[0] == 0, we accept any Bacula Volume + * otherwise dcr->VolumeName must match the Volume. + * + * If VolName given, ensure that it matches + * + * Returns VOL_ code as defined in record.h + * VOL_NOT_READ + * VOL_OK good label found + * VOL_NO_LABEL volume not labeled + * VOL_IO_ERROR I/O error reading tape + * VOL_NAME_ERROR label has wrong name + * VOL_CREATE_ERROR Error creating label + * VOL_VERSION_ERROR label has wrong version + * VOL_LABEL_ERROR bad label type + * VOL_NO_MEDIA no media in drive + * VOL_TYPE_ERROR aligned/non-aligned/dedup error + * + * The dcr block is emptied on return, and the Volume is + * rewound. + * + * Handle both the ameta and adata volumes. + */ +int DEVICE::read_dev_volume_label(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + char *VolName = dcr->VolumeName; + DEV_RECORD *record; + bool ok = false; + DEV_BLOCK *block = dcr->block; + int stat; + bool want_ansi_label; + bool have_ansi_label = false; + + Enter(dbglvl); + Dmsg5(dbglvl, "Enter read_volume_label adata=%d res=%d device=%s vol=%s dev_Vol=%s\n", + block->adata, num_reserved(), print_name(), VolName, + VolHdr.VolumeName[0]?VolHdr.VolumeName:"*NULL*"); + + + if (!is_open()) { + if (!open_device(dcr, OPEN_READ_ONLY)) { + Leave(dbglvl); + return VOL_IO_ERROR; + } + } + + clear_labeled(); + clear_append(); + clear_read(); + label_type = B_BACULA_LABEL; + set_worm(get_tape_worm(dcr)); + Dmsg1(dbglvl, "==== worm=%d ====\n", is_worm()); + + if (!rewind(dcr)) { + Mmsg(jcr->errmsg, _("Couldn't rewind %s device %s: ERR=%s\n"), + print_type(), print_name(), print_errmsg()); + Dmsg1(dbglvl, "return VOL_NO_MEDIA: %s", jcr->errmsg); + Leave(dbglvl); + return VOL_NO_MEDIA; + } + bstrncpy(VolHdr.Id, "**error**", sizeof(VolHdr.Id)); + + /* Read ANSI/IBM label if so requested */ + want_ansi_label = dcr->VolCatInfo.LabelType != B_BACULA_LABEL || + dcr->device->label_type != B_BACULA_LABEL; + if (want_ansi_label || has_cap(CAP_CHECKLABELS)) { + stat = read_ansi_ibm_label(dcr); + /* If we want a label and didn't find it, return error */ + if (want_ansi_label && stat != VOL_OK) { + goto bail_out; + } + if (stat == VOL_NAME_ERROR || stat == VOL_LABEL_ERROR) { + Mmsg(jcr->errmsg, _("Wrong Volume mounted on %s device %s: Wanted %s have %s\n"), + print_type(), print_name(), VolName, VolHdr.VolumeName); + if (!poll && jcr->label_errors++ > 100) { + Jmsg(jcr, M_FATAL, 0, _("Too many tries: %s"), jcr->errmsg); + } + goto bail_out; + } + if (stat != VOL_OK) { /* Not an ANSI/IBM label, so re-read */ + rewind(dcr); + } else { + have_ansi_label = true; + } + } + + /* Read the Bacula Volume label block */ + record = new_record(); + empty_block(block); + + Dmsg0(130, "Big if statement in read_volume_label\n"); + dcr->reading_label = true; + if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { + Mmsg(jcr->errmsg, _("Read label block failed: requested Volume \"%s\" on %s device %s is not a Bacula " + "labeled Volume, because: ERR=%s"), NPRT(VolName), + print_type(), print_name(), print_errmsg()); + Dmsg1(dbglvl, "%s", jcr->errmsg); + } else if (!read_record_from_block(dcr, record)) { + Mmsg(jcr->errmsg, _("Could not read Volume label from block.\n")); + Dmsg1(dbglvl, "%s", jcr->errmsg); + } else if (!unser_volume_label(this, record)) { + Mmsg(jcr->errmsg, _("Could not unserialize Volume label: ERR=%s\n"), + print_errmsg()); + Dmsg1(dbglvl, "%s", jcr->errmsg); + } else if (strcmp(VolHdr.Id, BaculaId) != 0 && + strcmp(VolHdr.Id, OldBaculaId) != 0 && + strcmp(VolHdr.Id, BaculaMetaDataId) != 0 && + strcmp(VolHdr.Id, BaculaAlignedDataId) != 0 && + strcmp(VolHdr.Id, BaculaS3CloudId) != 0) { + Mmsg(jcr->errmsg, _("Volume Header Id bad: %s\n"), VolHdr.Id); + Dmsg1(dbglvl, "%s", jcr->errmsg); + } else { + ok = true; + Dmsg1(dbglvl, "VolHdr.Id OK: %s\n", VolHdr.Id); + } + dcr->reading_label = false; + free_record(record); /* finished reading Volume record */ + + if (!is_volume_to_unload()) { + clear_unload(); + } + + if (!ok) { + if (jcr->ignore_label_errors) { + set_labeled(); /* set has Bacula label */ + if (jcr->errmsg[0]) { + Jmsg(jcr, M_ERROR, 0, "%s", jcr->errmsg); + } + empty_block(block); + Leave(dbglvl); + return VOL_OK; + } + Dmsg0(dbglvl, "No volume label - bailing out\n"); + stat = VOL_NO_LABEL; + goto bail_out; + } + + /* At this point, we have read the first Bacula block, and + * then read the Bacula Volume label. Now we need to + * make sure we have the right Volume. + */ + if (VolHdr.VerNum != BaculaTapeVersion && + VolHdr.VerNum != BaculaMetaDataVersion && + VolHdr.VerNum != BaculaS3CloudVersion && + VolHdr.VerNum != OldCompatibleBaculaTapeVersion1 && + VolHdr.VerNum != OldCompatibleBaculaTapeVersion2) { + Mmsg(jcr->errmsg, _("Volume on %s device %s has wrong Bacula version. Wanted %d got %d\n"), + print_type(), print_name(), BaculaTapeVersion, VolHdr.VerNum); + Dmsg1(dbglvl, "VOL_VERSION_ERROR: %s", jcr->errmsg); + stat = VOL_VERSION_ERROR; + goto bail_out; + } + Dmsg1(dbglvl, "VolHdr.VerNum=%ld OK.\n", VolHdr.VerNum); + + /* We are looking for either an unused Bacula tape (PRE_LABEL) or + * a Bacula volume label (VOL_LABEL) + */ + if (VolHdr.LabelType != PRE_LABEL && VolHdr.LabelType != VOL_LABEL) { + Mmsg(jcr->errmsg, _("Volume on %s device %s has bad Bacula label type: %ld\n"), + print_type(), print_name(), VolHdr.LabelType); + Dmsg1(dbglvl, "%s", jcr->errmsg); + if (!poll && jcr->label_errors++ > 100) { + Jmsg(jcr, M_FATAL, 0, _("Too many tries: %s"), jcr->errmsg); + } + Dmsg0(dbglvl, "return VOL_LABEL_ERROR\n"); + stat = VOL_LABEL_ERROR; + goto bail_out; + } + + set_labeled(); /* set has Bacula label */ + + /* Compare Volume Names */ + Dmsg2(130, "Compare Vol names: VolName=%s hdr=%s\n", VolName?VolName:"*", VolHdr.VolumeName); + if (VolName && *VolName && *VolName != '*' && strcmp(VolHdr.VolumeName, VolName) != 0) { + Mmsg(jcr->errmsg, _("Wrong Volume mounted on %s device %s: Wanted %s have %s\n"), + print_type(), print_name(), VolName, VolHdr.VolumeName); + Dmsg1(dbglvl, "%s", jcr->errmsg); + /* + * Cancel Job if too many label errors + * => we are in a loop + */ + if (!poll && jcr->label_errors++ > 100) { + Jmsg(jcr, M_FATAL, 0, "Too many tries: %s", jcr->errmsg); + } + Dmsg0(dbglvl, "return VOL_NAME_ERROR\n"); + stat = VOL_NAME_ERROR; + goto bail_out; + } + + /* Compare VolType to Device Type */ + switch (dev_type) { + case B_FILE_DEV: + if (strcmp(VolHdr.Id, BaculaId) != 0) { + Mmsg(jcr->errmsg, _("Wrong Volume Type. Wanted a File or Tape Volume %s on device %s, but got: %s\n"), + VolHdr.VolumeName, print_name(), VolHdr.Id); + stat = VOL_TYPE_ERROR; + goto bail_out; + } + break; + case B_ALIGNED_DEV: + case B_ADATA_DEV: + if (strcmp(VolHdr.Id, BaculaMetaDataId) != 0) { + Mmsg(jcr->errmsg, _("Wrong Volume Type. Wanted an Aligned Volume %s on device %s, but got: %s\n"), + VolHdr.VolumeName, print_name(), VolHdr.Id); + stat = VOL_TYPE_ERROR; + goto bail_out; + } + break; + case B_CLOUD_DEV: + if (strcmp(VolHdr.Id, BaculaS3CloudId) != 0) { + Mmsg(jcr->errmsg, _("Wrong Volume Type. Wanted a Cloud Volume %s on device %s, but got: %s\n"), + VolHdr.VolumeName, print_name(), VolHdr.Id); + stat = VOL_TYPE_ERROR; + goto bail_out; + } + default: + break; + } + + if (chk_dbglvl(100)) { + dump_volume_label(); + } + Dmsg0(dbglvl, "Leave read_volume_label() VOL_OK\n"); + /* If we are a streaming device, we only get one chance to read */ + if (!has_cap(CAP_STREAM)) { + rewind(dcr); + if (have_ansi_label) { + stat = read_ansi_ibm_label(dcr); + /* If we want a label and didn't find it, return error */ + if (stat != VOL_OK) { + goto bail_out; + } + } + } + + Dmsg1(100, "Call reserve_volume=%s\n", VolHdr.VolumeName); + if (reserve_volume(dcr, VolHdr.VolumeName) == NULL) { + if (!jcr->errmsg[0]) { + Mmsg3(jcr->errmsg, _("Could not reserve volume %s on %s device %s\n"), + VolHdr.VolumeName, print_type(), print_name()); + } + Dmsg2(dbglvl, "Could not reserve volume %s on %s\n", VolHdr.VolumeName, print_name()); + stat = VOL_NAME_ERROR; + goto bail_out; + } + + if (dcr->is_writing()) { + empty_block(block); + } + + Leave(dbglvl); + return VOL_OK; + +bail_out: + empty_block(block); + rewind(dcr); + Dmsg2(dbglvl, "return stat=%d %s", stat, jcr->errmsg); + Leave(dbglvl); + return stat; +} + + +/* + * Create and put a volume label into the block + * + * Returns: false on failure + * true on success + * + * Handle both the ameta and adata volumes. + */ +bool DEVICE::write_volume_label_to_block(DCR *dcr) +{ + DEVICE *dev; + DEV_BLOCK *block; + DEV_RECORD rec; + JCR *jcr = dcr->jcr; + bool ok = true; + + Enter(100); + dev = dcr->dev; + block = dcr->block; + memset(&rec, 0, sizeof(rec)); + rec.data = get_memory(SER_LENGTH_Volume_Label); + memset(rec.data, 0, SER_LENGTH_Volume_Label); + empty_block(block); /* Volume label always at beginning */ + + create_volume_label_record(dcr, dcr->dev, &rec, dcr->block->adata); + + block->BlockNumber = 0; + /* Note for adata this also writes to disk */ + Dmsg1(100, "write_record_to_block adata=%d\n", dcr->dev->adata); + if (!write_record_to_block(dcr, &rec)) { + free_pool_memory(rec.data); + Jmsg2(jcr, M_FATAL, 0, _("Cannot write Volume label to block for %s device %s\n"), + dev->print_type(), dev->print_name()); + ok = false; + goto get_out; + } else { + Dmsg4(100, "Wrote fd=%d adata=%d label of %d bytes to block. Vol=%s\n", + dev->fd(), block->adata, rec.data_len, dcr->VolumeName); + } + free_pool_memory(rec.data); + +get_out: + Leave(100); + return ok; +} + +/* + * Write a Volume Label + * !!! Note, this is ONLY used for writing + * a fresh volume label. Any data + * after the label will be destroyed, + * in fact, we write the label 5 times !!!! + * + * This routine should be used only when labeling a blank tape or + * when recylcing a volume. + * + * Handle both the ameta and adata volumes. + */ +bool DEVICE::write_volume_label(DCR *dcr, const char *VolName, + const char *PoolName, bool relabel, bool no_prelabel) +{ + DEVICE *dev; + + Enter(100); + Dmsg4(230, "Write: block=%p ameta=%p dev=%p ameta_dev=%p\n", + dcr->block, dcr->ameta_block, dcr->dev, dcr->ameta_dev); + dcr->set_ameta(); + dev = dcr->dev; + + Dmsg0(150, "write_volume_label()\n"); + if (*VolName == 0) { + if (dcr->jcr) { + Mmsg(dcr->jcr->errmsg, "ERROR: new_volume_label_to_dev called with NULL VolName\n"); + } + Pmsg0(0, "=== ERROR: write_volume_label called with NULL VolName\n"); + goto bail_out; + } + + if (relabel) { + volume_unused(dcr); /* mark current volume unused */ + /* Truncate device */ + if (!dev->truncate(dcr)) { + goto bail_out; + } + dev->close_part(dcr); /* make sure closed for rename */ + } + + /* Set the new filename for open, ... */ + dev->setVolCatName(VolName); + dcr->setVolCatName(VolName); + dev->clearVolCatBytes(); + + Dmsg1(100, "New VolName=%s\n", VolName); + if (!dev->open_device(dcr, OPEN_READ_WRITE)) { + /* If device is not tape, attempt to create it */ + if (dev->is_tape() || !dev->open_device(dcr, CREATE_READ_WRITE)) { + Jmsg4(dcr->jcr, M_WARNING, 0, _("Open %s device %s Volume \"%s\" failed: ERR=%s"), + dev->print_type(), dev->print_name(), dcr->VolumeName, dev->bstrerror()); + goto bail_out; + } + } + Dmsg1(150, "Label type=%d\n", dev->label_type); + + if (!write_volume_label_to_dev(dcr, VolName, PoolName, relabel, no_prelabel)) { + goto bail_out; + } + + if (!dev->is_aligned()) { + /* Not aligned data */ + if (dev->weof(dcr, 1)) { + dev->set_labeled(); + } + + if (chk_dbglvl(100)) { + dev->dump_volume_label(); + } + Dmsg0(50, "Call reserve_volume\n"); + /**** ***FIXME*** if dev changes, dcr must be updated */ + if (reserve_volume(dcr, VolName) == NULL) { + if (!dcr->jcr->errmsg[0]) { + Mmsg3(dcr->jcr->errmsg, _("Could not reserve volume %s on %s device %s\n"), + dev->VolHdr.VolumeName, dev->print_type(), dev->print_name()); + } + Dmsg1(50, "%s", dcr->jcr->errmsg); + goto bail_out; + } + dev = dcr->dev; /* may have changed in reserve_volume */ + } + dev->clear_append(); /* remove append since this is PRE_LABEL */ + Leave(100); + return true; + +bail_out: + dcr->adata_label = false; + dcr->set_ameta(); + volume_unused(dcr); + dcr->dev->clear_append(); /* remove append since this is PRE_LABEL */ + Leave(100); + return false; +} + +bool DEVICE::write_volume_label_to_dev(DCR *dcr, const char *VolName, + const char *PoolName, bool relabel, bool no_prelabel) +{ + DEVICE *dev, *ameta_dev; + DEV_BLOCK *block; + DEV_RECORD *rec = new_record(); + bool rtn = false; + + Enter(100); + dev = dcr->dev; + ameta_dev = dcr->ameta_dev; + block = dcr->block; + + empty_block(block); + if (!dev->rewind(dcr)) { + Dmsg2(130, "Bad status on %s from rewind: ERR=%s\n", dev->print_name(), dev->print_errmsg()); + goto bail_out; + } + + /* Temporarily mark in append state to enable writing */ + dev->set_append(); + + /* Create PRE_LABEL or VOL_LABEL */ + create_volume_header(dev, VolName, PoolName, no_prelabel); + + /* + * If we have already detected an ANSI label, re-read it + * to skip past it. Otherwise, we write a new one if + * so requested. + */ + if (!block->adata) { + if (dev->label_type != B_BACULA_LABEL) { + if (read_ansi_ibm_label(dcr) != VOL_OK) { + dev->rewind(dcr); + goto bail_out; + } + } else if (!write_ansi_ibm_labels(dcr, ANSI_VOL_LABEL, VolName)) { + goto bail_out; + } + } + + create_volume_label_record(dcr, dev, rec, block->adata); + rec->Stream = 0; + rec->maskedStream = 0; + + Dmsg2(100, "write_record_to_block adata=%d FI=%d\n", dcr->dev->adata, + rec->FileIndex); + + /* For adata label this also writes to disk */ + if (!write_record_to_block(dcr, rec)) { + Dmsg2(40, "Bad Label write on %s: ERR=%s\n", dev->print_name(), dev->print_errmsg()); + goto bail_out; + } else { + Dmsg3(100, "Wrote label=%d bytes adata=%d block: %s\n", rec->data_len, block->adata, dev->print_name()); + } + Dmsg3(100, "New label adata=%d VolCatBytes=%lld VolCatStatus=%s\n", + dev->adata, ameta_dev->VolCatInfo.VolCatBytes, ameta_dev->VolCatInfo.VolCatStatus); + + if (block->adata) { + /* Empty block and set data start address */ + empty_block(dcr->adata_block); + } else { + Dmsg4(130, "Call write_block_to_dev() fd=%d adata=%d block=%p Addr=%lld\n", + dcr->dev->fd(), dcr->block->adata, dcr->block, block->dev->lseek(dcr, 0, SEEK_CUR)); + Dmsg1(100, "write_record_to_dev adata=%d\n", dcr->dev->adata); + /* Write ameta block to device */ + if (!dcr->write_block_to_dev()) { + Dmsg2(40, "Bad Label write on %s: ERR=%s\n", dev->print_name(), dev->print_errmsg()); + goto bail_out; + } + } + Dmsg3(100, "Wrote new Vol label adata=%d VolCatBytes=%lld VolCatStatus=%s\n", + dev->adata, ameta_dev->VolCatInfo.VolCatBytes, ameta_dev->VolCatInfo.VolCatStatus); + rtn = true; + +bail_out: + free_record(rec); + Leave(100); + return rtn; +} + +/* + * Write a volume label. This is ONLY called if we have a valid Bacula + * label of type PRE_LABEL or we are recyling an existing Volume. + * + * By calling write_volume_label_to_block, both ameta and adata + * are updated. + * + * Returns: true if OK + * false if unable to write it + */ +bool DEVICE::rewrite_volume_label(DCR *dcr, bool recycle) +{ + char ed1[50]; + JCR *jcr = dcr->jcr; + + Enter(100); + ASSERT2(dcr->VolumeName[0], "Empty Volume name"); + ASSERT(!dcr->block->adata); + if (is_worm()) { + Jmsg3(jcr, M_FATAL, 0, _("Cannot relabel worm %s device %s Volume \"%s\"\n"), + print_type(), print_name(), dcr->VolumeName); + Leave(100); + return false; + } + if (!open_device(dcr, OPEN_READ_WRITE)) { + Jmsg4(jcr, M_WARNING, 0, _("Open %s device %s Volume \"%s\" failed: ERR=%s\n"), + print_type(), print_name(), dcr->VolumeName, bstrerror()); + Leave(100); + return false; + } + Dmsg2(190, "set append found freshly labeled volume. fd=%d dev=%x\n", fd(), this); + VolHdr.LabelType = VOL_LABEL; /* set Volume label */ + set_append(); + Dmsg0(100, "Rewrite_volume_label set volcatbytes=0\n"); + clearVolCatBytes(); /* resets both ameta and adata byte counts */ + setVolCatStatus("Append"); /* set append status */ + + if (!has_cap(CAP_STREAM)) { + if (!rewind(dcr)) { + Jmsg3(jcr, M_FATAL, 0, _("Rewind error on %s device %s: ERR=%s\n"), + print_type(), print_name(), print_errmsg()); + Leave(100); + return false; + } + if (recycle) { + Dmsg1(150, "Doing recycle. Vol=%s\n", dcr->VolumeName); + if (!truncate(dcr)) { + Jmsg3(jcr, M_FATAL, 0, _("Truncate error on %s device %s: ERR=%s\n"), + print_type(), print_name(), print_errmsg()); + Leave(100); + return false; + } + if (!open_device(dcr, OPEN_READ_WRITE)) { + Jmsg3(jcr, M_FATAL, 0, + _("Failed to re-open device after truncate on %s device %s: ERR=%s"), + print_type(), print_name(), print_errmsg()); + Leave(100); + return false; + } + } + } + + if (!write_volume_label_to_block(dcr)) { + Dmsg0(150, "Error from write volume label.\n"); + Leave(100); + return false; + } + Dmsg2(100, "wrote vol label to block. adata=%d Vol=%s\n", dcr->block->adata, dcr->VolumeName); + + ASSERT2(dcr->VolumeName[0], "Empty Volume name"); + setVolCatInfo(false); + + /* + * If we are not dealing with a streaming device, + * write the block now to ensure we have write permission. + * It is better to find out now rather than later. + * We do not write the block now if this is an ANSI label. This + * avoids re-writing the ANSI label, which we do not want to do. + */ + if (!has_cap(CAP_STREAM)) { + /* + * If we have already detected an ANSI label, re-read it + * to skip past it. Otherwise, we write a new one if + * so requested. + */ + if (label_type != B_BACULA_LABEL) { + if (read_ansi_ibm_label(dcr) != VOL_OK) { + rewind(dcr); + Leave(100); + return false; + } + } else if (!write_ansi_ibm_labels(dcr, ANSI_VOL_LABEL, VolHdr.VolumeName)) { + Leave(100); + return false; + } + + /* Attempt write to check write permission */ + Dmsg1(200, "Attempt to write to device fd=%d.\n", fd()); + if (!dcr->write_block_to_dev()) { + Jmsg3(jcr, M_ERROR, 0, _("Unable to write %s device %s: ERR=%s\n"), + print_type(), print_name(), print_errmsg()); + Dmsg0(200, "===ERROR write block to dev\n"); + Leave(100); + return false; + } + } + ASSERT2(dcr->VolumeName[0], "Empty Volume name"); + setVolCatName(dcr->VolumeName); + if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_WRITE)) { + Leave(100); + return false; + } + set_labeled(); + /* Set or reset Volume statistics */ + VolCatInfo.VolCatJobs = 0; + VolCatInfo.VolCatFiles = 0; + VolCatInfo.VolCatErrors = 0; + VolCatInfo.VolCatBlocks = 0; + VolCatInfo.VolCatRBytes = 0; + VolCatInfo.VolCatCloudParts = 0; + VolCatInfo.VolLastPartBytes = 0; + VolCatInfo.VolCatType = 0; /* Will be set by dir_update_volume_info() */ + if (recycle) { + VolCatInfo.VolCatMounts++; + VolCatInfo.VolCatRecycles++; + } else { + VolCatInfo.VolCatMounts = 1; + VolCatInfo.VolCatRecycles = 0; + VolCatInfo.VolCatWrites = 1; + VolCatInfo.VolCatReads = 1; + } + dcr->VolMediaId = dcr->VolCatInfo.VolMediaId; /* make create_jobmedia work */ + dir_create_jobmedia_record(dcr, true); + Dmsg1(100, "dir_update_vol_info. Set Append vol=%s\n", dcr->VolumeName); + VolCatInfo.VolFirstWritten = time(NULL); + setVolCatStatus("Append"); + if (!dir_update_volume_info(dcr, true, true)) { /* indicate relabel */ + Leave(100); + return false; + } + if (recycle) { + Jmsg(jcr, M_INFO, 0, _("Recycled volume \"%s\" on %s device %s, all previous data lost.\n"), + dcr->VolumeName, print_type(), print_name()); + } else { + Jmsg(jcr, M_INFO, 0, _("Wrote label to prelabeled Volume \"%s\" on %s device %s\n"), + dcr->VolumeName, print_type(), print_name()); + } + /* + * End writing real Volume label (from pre-labeled tape), or recycling + * the volume. + */ + Dmsg4(100, "OK rewrite vol label. Addr=%s adata=%d slot=%d Vol=%s\n", + print_addr(ed1, sizeof(ed1)), dcr->block->adata, VolCatInfo.Slot, dcr->VolumeName); + Leave(100); + return true; +} + + +/* + * create_volume_label_record + * Note: it is assumed that you have created the volume_header + * (label) prior to calling this subroutine. + * Serialize label (from dev->VolHdr structure) into device record. + * Assumes that the dev->VolHdr structure is properly + * initialized. +*/ +static void create_volume_label_record(DCR *dcr, DEVICE *dev, + DEV_RECORD *rec, bool adata) +{ + ser_declare; + struct date_time dt; + JCR *jcr = dcr->jcr; + char buf[100]; + + /* Serialize the label into the device record. */ + + Enter(100); + rec->data = check_pool_memory_size(rec->data, SER_LENGTH_Volume_Label); + memset(rec->data, 0, SER_LENGTH_Volume_Label); + ser_begin(rec->data, SER_LENGTH_Volume_Label); + ser_string(dev->VolHdr.Id); + + ser_uint32(dev->VolHdr.VerNum); + + if (dev->VolHdr.VerNum >= 11) { + ser_btime(dev->VolHdr.label_btime); + dev->VolHdr.write_btime = get_current_btime(); + ser_btime(dev->VolHdr.write_btime); + dev->VolHdr.write_date = 0; + dev->VolHdr.write_time = 0; + } else { + /* OLD WAY DEPRECATED */ + ser_float64(dev->VolHdr.label_date); + ser_float64(dev->VolHdr.label_time); + get_current_time(&dt); + dev->VolHdr.write_date = dt.julian_day_number; + dev->VolHdr.write_time = dt.julian_day_fraction; + } + ser_float64(dev->VolHdr.write_date); /* 0 if VerNum >= 11 */ + ser_float64(dev->VolHdr.write_time); /* 0 if VerNum >= 11 */ + + ser_string(dev->VolHdr.VolumeName); + ser_string(dev->VolHdr.PrevVolumeName); + ser_string(dev->VolHdr.PoolName); + ser_string(dev->VolHdr.PoolType); + ser_string(dev->VolHdr.MediaType); + + ser_string(dev->VolHdr.HostName); + ser_string(dev->VolHdr.LabelProg); + ser_string(dev->VolHdr.ProgVersion); + ser_string(dev->VolHdr.ProgDate); + /* ***FIXME*** */ + dev->VolHdr.AlignedVolumeName[0] = 0; + ser_string(dev->VolHdr.AlignedVolumeName); + + /* This is adata Volume information */ + ser_uint64(dev->VolHdr.FirstData); + ser_uint32(dev->VolHdr.FileAlignment); + ser_uint32(dev->VolHdr.PaddingSize); + /* adata and dedup volumes */ + ser_uint32(dev->VolHdr.BlockSize); + + ser_end(rec->data, SER_LENGTH_Volume_Label); + if (!adata) { + bstrncpy(dcr->VolumeName, dev->VolHdr.VolumeName, sizeof(dcr->VolumeName)); + } + ASSERT2(dcr->VolumeName[0], "Empty Volume name"); + rec->data_len = ser_length(rec->data); + rec->FileIndex = dev->VolHdr.LabelType; + Dmsg2(100, "LabelType=%d adata=%d\n", dev->VolHdr.LabelType, dev->adata); + rec->VolSessionId = jcr->VolSessionId; + rec->VolSessionTime = jcr->VolSessionTime; + rec->Stream = jcr->NumWriteVolumes; + rec->maskedStream = jcr->NumWriteVolumes; + Dmsg3(100, "Created adata=%d Vol label rec: FI=%s len=%d\n", adata, FI_to_ascii(buf, rec->FileIndex), + rec->data_len); + Dmsg2(100, "reclen=%d recdata=%s", rec->data_len, rec->data); + Leave(100); +} + + +/* + * Create a volume header (label) in memory + * The volume record is created after this header (label) + * is created. + */ +void create_volume_header(DEVICE *dev, const char *VolName, + const char *PoolName, bool no_prelabel) +{ + DEVRES *device = (DEVRES *)dev->device; + + Enter(130); + + ASSERT2(dev != NULL, "dev ptr is NULL"); + + if (dev->is_aligned()) { + bstrncpy(dev->VolHdr.Id, BaculaMetaDataId, sizeof(dev->VolHdr.Id)); + dev->VolHdr.VerNum = BaculaMetaDataVersion; + dev->VolHdr.FirstData = dev->file_alignment; + dev->VolHdr.FileAlignment = dev->file_alignment; + dev->VolHdr.PaddingSize = dev->padding_size; + dev->VolHdr.BlockSize = dev->adata_size; + } else if (dev->is_adata()) { + bstrncpy(dev->VolHdr.Id, BaculaAlignedDataId, sizeof(dev->VolHdr.Id)); + dev->VolHdr.VerNum = BaculaAlignedDataVersion; + dev->VolHdr.FirstData = dev->file_alignment; + dev->VolHdr.FileAlignment = dev->file_alignment; + dev->VolHdr.PaddingSize = dev->padding_size; + dev->VolHdr.BlockSize = dev->adata_size; + } else if (dev->is_cloud()) { + bstrncpy(dev->VolHdr.Id, BaculaS3CloudId, sizeof(dev->VolHdr.Id)); + dev->VolHdr.VerNum = BaculaS3CloudVersion; + dev->VolHdr.BlockSize = dev->max_block_size; + dev->VolHdr.MaxPartSize = dev->max_part_size; + } else { + bstrncpy(dev->VolHdr.Id, BaculaId, sizeof(dev->VolHdr.Id)); + dev->VolHdr.VerNum = BaculaTapeVersion; + dev->VolHdr.BlockSize = dev->max_block_size; + } + + if ((dev->has_cap(CAP_STREAM) && no_prelabel) || dev->is_worm()) { + /* We do not want to re-label so write VOL_LABEL now */ + dev->VolHdr.LabelType = VOL_LABEL; + } else { + dev->VolHdr.LabelType = PRE_LABEL; /* Mark Volume as unused */ + } + bstrncpy(dev->VolHdr.VolumeName, VolName, sizeof(dev->VolHdr.VolumeName)); + bstrncpy(dev->VolHdr.PoolName, PoolName, sizeof(dev->VolHdr.PoolName)); + bstrncpy(dev->VolHdr.MediaType, device->media_type, sizeof(dev->VolHdr.MediaType)); + + bstrncpy(dev->VolHdr.PoolType, "Backup", sizeof(dev->VolHdr.PoolType)); + + dev->VolHdr.label_btime = get_current_btime(); + dev->VolHdr.label_date = 0; + dev->VolHdr.label_time = 0; + + if (gethostname(dev->VolHdr.HostName, sizeof(dev->VolHdr.HostName)) != 0) { + dev->VolHdr.HostName[0] = 0; + } + bstrncpy(dev->VolHdr.LabelProg, my_name, sizeof(dev->VolHdr.LabelProg)); + sprintf(dev->VolHdr.ProgVersion, "Ver. %s %s ", VERSION, BDATE); + sprintf(dev->VolHdr.ProgDate, "Build %s %s ", __DATE__, __TIME__); + dev->set_labeled(); /* set has Bacula label */ + if (chk_dbglvl(100)) { + dev->dump_volume_label(); + } +} + +/* + * Create session (Job) label + * The pool memory must be released by the calling program + */ +void create_session_label(DCR *dcr, DEV_RECORD *rec, int label) +{ + JCR *jcr = dcr->jcr; + ser_declare; + + Enter(100); + rec->VolSessionId = jcr->VolSessionId; + rec->VolSessionTime = jcr->VolSessionTime; + rec->Stream = jcr->JobId; + rec->maskedStream = jcr->JobId; + + rec->data = check_pool_memory_size(rec->data, SER_LENGTH_Session_Label); + ser_begin(rec->data, SER_LENGTH_Session_Label); + ser_string(BaculaId); + ser_uint32(BaculaTapeVersion); + + ser_uint32(jcr->JobId); + + /* Changed in VerNum 11 */ + ser_btime(get_current_btime()); + ser_float64(0); + + ser_string(dcr->pool_name); + ser_string(dcr->pool_type); + ser_string(jcr->job_name); /* base Job name */ + ser_string(jcr->client_name); + + /* Added in VerNum 10 */ + ser_string(jcr->Job); /* Unique name of this Job */ + ser_string(jcr->fileset_name); + ser_uint32(jcr->getJobType()); + ser_uint32(jcr->getJobLevel()); + /* Added in VerNum 11 */ + ser_string(jcr->fileset_md5); + + if (label == EOS_LABEL) { + ser_uint32(jcr->JobFiles); + ser_uint64(jcr->JobBytes); + ser_uint32((uint32_t)dcr->StartAddr); /* Start Block */ + ser_uint32((uint32_t)dcr->EndAddr); /* End Block */ + ser_uint32((uint32_t)(dcr->StartAddr>>32)); /* Start File */ + ser_uint32((uint32_t)(dcr->EndAddr>>32)); /* End File */ + ser_uint32(jcr->JobErrors); + + /* Added in VerNum 11 */ + ser_uint32(jcr->JobStatus); + } + ser_end(rec->data, SER_LENGTH_Session_Label); + rec->data_len = ser_length(rec->data); + Leave(100); +} + +/* Write session (Job) label + * Returns: false on failure + * true on success + */ +bool write_session_label(DCR *dcr, int label) +{ + JCR *jcr = dcr->jcr; + DEVICE *dev = dcr->dev; + DEV_RECORD *rec; + DEV_BLOCK *block = dcr->block; + char buf1[100], buf2[100]; + + Enter(100); + dev->Lock(); + Dmsg2(140, "=== write_session_label label=%d Vol=%s.\n", label, dev->getVolCatName()); + if (!check_for_newvol_or_newfile(dcr)) { + Pmsg0(000, "ERR: !check_for_new_vol_or_newfile\n"); + dev->Unlock(); + return false; + } + + rec = new_record(); + Dmsg1(130, "session_label record=%x\n", rec); + switch (label) { + case SOS_LABEL: + set_start_vol_position(dcr); + break; + case EOS_LABEL: + dcr->EndAddr = dev->get_full_addr(); + break; + default: + Jmsg1(jcr, M_ABORT, 0, _("Bad Volume session label request=%d\n"), label); + break; + } + + create_session_label(dcr, rec, label); + rec->FileIndex = label; + dev->Unlock(); + + /* + * We guarantee that the session record can totally fit + * into a block. If not, write the block, and put it in + * the next block. Having the sesssion record totally in + * one block makes reading them much easier (no need to + * read the next block). + */ + if (!can_write_record_to_block(block, rec)) { + Dmsg0(150, "Cannot write session label to block.\n"); + if (!dcr->write_block_to_device()) { + Dmsg0(130, "Got session label write_block_to_dev error.\n"); + free_record(rec); + Leave(100); + return false; + } + } + /* + * We use write_record() because it handles the case that + * the maximum user size has been reached. + */ + if (!dcr->write_record(rec)) { + Dmsg0(150, "Bad return from write_record\n"); + free_record(rec); + Leave(100); + return false; + } + + Dmsg6(150, "Write sesson_label record JobId=%d FI=%s SessId=%d Strm=%s len=%d " + "remainder=%d\n", jcr->JobId, + FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, + rec->remainder); + + free_record(rec); + Dmsg2(150, "Leave write_session_label Block=%u File=%u\n", + dev->get_block_num(), dev->get_file()); + Leave(100); + return true; +} + +/* unser_volume_label + * + * Unserialize the Bacula Volume label into the device Volume_Label + * structure. + * + * Assumes that the record is already read. + * + * Returns: false on error + * true on success +*/ + +bool unser_volume_label(DEVICE *dev, DEV_RECORD *rec) +{ + ser_declare; + char buf1[100], buf2[100]; + + Enter(100); + if (rec->FileIndex != VOL_LABEL && rec->FileIndex != PRE_LABEL) { + Mmsg3(dev->errmsg, _("Expecting Volume Label, got FI=%s Stream=%s len=%d\n"), + FI_to_ascii(buf1, rec->FileIndex), + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), + rec->data_len); + if (!forge_on) { + Leave(100); + return false; + } + } + + dev->VolHdr.LabelType = rec->FileIndex; + dev->VolHdr.LabelSize = rec->data_len; + + + /* Unserialize the record into the Volume Header */ + Dmsg2(100, "reclen=%d recdata=%s", rec->data_len, rec->data); + rec->data = check_pool_memory_size(rec->data, SER_LENGTH_Volume_Label); + Dmsg2(100, "reclen=%d recdata=%s", rec->data_len, rec->data); + ser_begin(rec->data, SER_LENGTH_Volume_Label); + unser_string(dev->VolHdr.Id); + unser_uint32(dev->VolHdr.VerNum); + + if (dev->VolHdr.VerNum >= 11) { + unser_btime(dev->VolHdr.label_btime); + unser_btime(dev->VolHdr.write_btime); + } else { /* old way */ + unser_float64(dev->VolHdr.label_date); + unser_float64(dev->VolHdr.label_time); + } + unser_float64(dev->VolHdr.write_date); /* Unused with VerNum >= 11 */ + unser_float64(dev->VolHdr.write_time); /* Unused with VerNum >= 11 */ + + unser_string(dev->VolHdr.VolumeName); + unser_string(dev->VolHdr.PrevVolumeName); + unser_string(dev->VolHdr.PoolName); + unser_string(dev->VolHdr.PoolType); + unser_string(dev->VolHdr.MediaType); + + unser_string(dev->VolHdr.HostName); + unser_string(dev->VolHdr.LabelProg); + unser_string(dev->VolHdr.ProgVersion); + unser_string(dev->VolHdr.ProgDate); + +// unser_string(dev->VolHdr.AlignedVolumeName); + dev->VolHdr.AlignedVolumeName[0] = 0; + unser_uint64(dev->VolHdr.FirstData); + unser_uint32(dev->VolHdr.FileAlignment); + unser_uint32(dev->VolHdr.PaddingSize); + unser_uint32(dev->VolHdr.BlockSize); + + ser_end(rec->data, SER_LENGTH_Volume_Label); + Dmsg0(190, "unser_vol_label\n"); + if (chk_dbglvl(100)) { + dev->dump_volume_label(); + } + Leave(100); + return true; +} + + +bool unser_session_label(SESSION_LABEL *label, DEV_RECORD *rec) +{ + ser_declare; + + Enter(100); + rec->data = check_pool_memory_size(rec->data, SER_LENGTH_Session_Label); + unser_begin(rec->data, SER_LENGTH_Session_Label); + unser_string(label->Id); + unser_uint32(label->VerNum); + unser_uint32(label->JobId); + if (label->VerNum >= 11) { + unser_btime(label->write_btime); + } else { + unser_float64(label->write_date); + } + unser_float64(label->write_time); + unser_string(label->PoolName); + unser_string(label->PoolType); + unser_string(label->JobName); + unser_string(label->ClientName); + if (label->VerNum >= 10) { + unser_string(label->Job); /* Unique name of this Job */ + unser_string(label->FileSetName); + unser_uint32(label->JobType); + unser_uint32(label->JobLevel); + } + if (label->VerNum >= 11) { + unser_string(label->FileSetMD5); + } else { + label->FileSetMD5[0] = 0; + } + if (rec->FileIndex == EOS_LABEL) { + unser_uint32(label->JobFiles); + unser_uint64(label->JobBytes); + unser_uint32(label->StartBlock); + unser_uint32(label->EndBlock); + unser_uint32(label->StartFile); + unser_uint32(label->EndFile); + unser_uint32(label->JobErrors); + if (label->VerNum >= 11) { + unser_uint32(label->JobStatus); + } else { + label->JobStatus = JS_Terminated; /* kludge */ + } + } + Leave(100); + return true; +} + +void DEVICE::dump_volume_label() +{ + int64_t dbl = debug_level; + uint32_t File; + const char *LabelType; + char buf[30]; + struct tm tm; + struct date_time dt; + + debug_level = 1; + File = file; + switch (VolHdr.LabelType) { + case PRE_LABEL: + LabelType = "PRE_LABEL"; + break; + case VOL_LABEL: + LabelType = "VOL_LABEL"; + break; + case EOM_LABEL: + LabelType = "EOM_LABEL"; + break; + case SOS_LABEL: + LabelType = "SOS_LABEL"; + break; + case EOS_LABEL: + LabelType = "EOS_LABEL"; + break; + case EOT_LABEL: + goto bail_out; + default: + LabelType = buf; + sprintf(buf, _("Unknown %d"), VolHdr.LabelType); + break; + } + + Pmsg12(-1, _("\nVolume Label:\n" +"Adata : %d\n" +"Id : %s" +"VerNo : %d\n" +"VolName : %s\n" +"PrevVolName : %s\n" +"VolFile : %d\n" +"LabelType : %s\n" +"LabelSize : %d\n" +"PoolName : %s\n" +"MediaType : %s\n" +"PoolType : %s\n" +"HostName : %s\n" +""), + adata, + VolHdr.Id, VolHdr.VerNum, + VolHdr.VolumeName, VolHdr.PrevVolumeName, + File, LabelType, VolHdr.LabelSize, + VolHdr.PoolName, VolHdr.MediaType, + VolHdr.PoolType, VolHdr.HostName); + + if (VolHdr.VerNum >= 11) { + char dt[50]; + bstrftime(dt, sizeof(dt), btime_to_utime(VolHdr.label_btime)); + Pmsg1(-1, _("Date label written: %s\n"), dt); + } else { + dt.julian_day_number = VolHdr.label_date; + dt.julian_day_fraction = VolHdr.label_time; + tm_decode(&dt, &tm); + Pmsg5(-1, + _("Date label written: %04d-%02d-%02d at %02d:%02d\n"), + tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min); + } + +bail_out: + debug_level = dbl; +} + + +static void dump_session_label(DEV_RECORD *rec, const char *type) +{ + int64_t dbl; + struct date_time dt; + struct tm tm; + SESSION_LABEL label; + char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30], ec6[30], ec7[30]; + + unser_session_label(&label, rec); + dbl = debug_level; + debug_level = 1; + Pmsg7(-1, _("\n%s Record:\n" +"JobId : %d\n" +"VerNum : %d\n" +"PoolName : %s\n" +"PoolType : %s\n" +"JobName : %s\n" +"ClientName : %s\n" +""), type, label.JobId, label.VerNum, + label.PoolName, label.PoolType, + label.JobName, label.ClientName); + + if (label.VerNum >= 10) { + Pmsg4(-1, _( +"Job (unique name) : %s\n" +"FileSet : %s\n" +"JobType : %c\n" +"JobLevel : %c\n" +""), label.Job, label.FileSetName, label.JobType, label.JobLevel); + } + + if (rec->FileIndex == EOS_LABEL) { + Pmsg8(-1, _( +"JobFiles : %s\n" +"JobBytes : %s\n" +"StartBlock : %s\n" +"EndBlock : %s\n" +"StartFile : %s\n" +"EndFile : %s\n" +"JobErrors : %s\n" +"JobStatus : %c\n" +""), + edit_uint64_with_commas(label.JobFiles, ec1), + edit_uint64_with_commas(label.JobBytes, ec2), + edit_uint64_with_commas(label.StartBlock, ec3), + edit_uint64_with_commas(label.EndBlock, ec4), + edit_uint64_with_commas(label.StartFile, ec5), + edit_uint64_with_commas(label.EndFile, ec6), + edit_uint64_with_commas(label.JobErrors, ec7), + label.JobStatus); + } + if (label.VerNum >= 11) { + char dt[50]; + bstrftime(dt, sizeof(dt), btime_to_utime(label.write_btime)); + Pmsg1(-1, _("Date written : %s\n"), dt); + } else { + dt.julian_day_number = label.write_date; + dt.julian_day_fraction = label.write_time; + tm_decode(&dt, &tm); + Pmsg5(-1, _("Date written : %04d-%02d-%02d at %02d:%02d\n"), + tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min); + } + + debug_level = dbl; +} + +static int check_label(SESSION_LABEL *label) +{ + int errors = 0; + + if (label->JobId > 10000000 || label->JobId < 0) { + Pmsg0(-1, _("***** ERROR ****** : Found error with the JobId\n")); + errors++; + } + + if (!errors) { + switch (label->JobLevel) { + case L_FULL: + case L_INCREMENTAL: + case L_DIFFERENTIAL: + case L_SINCE: + case L_VERIFY_CATALOG: + case L_VERIFY_INIT: + case L_VERIFY_VOLUME_TO_CATALOG: + case L_VERIFY_DISK_TO_CATALOG: + case L_VERIFY_DATA: + case L_BASE: + case L_NONE: + case L_VIRTUAL_FULL: + break; + default: + Pmsg0(-1, _("***** ERROR ****** : Found error with the JobLevel\n")); + errors++; + } + } + if (!errors) { + switch (label->JobType) { + case JT_BACKUP: + case JT_MIGRATED_JOB: + case JT_VERIFY: + case JT_RESTORE: + case JT_CONSOLE: + case JT_SYSTEM: + case JT_ADMIN: + case JT_ARCHIVE: + case JT_JOB_COPY: + case JT_COPY: + case JT_MIGRATE: + case JT_SCAN: + break; + default: + Pmsg0(-1, _("***** ERROR ****** : Found error with the JobType\n")); + errors++; + } + } + if (!errors) { + POOLMEM *err = get_pool_memory(PM_EMSG); + if (!is_name_valid(label->Job, &err)) { + Pmsg1(-1, _("***** ERROR ****** : Found error with the Job name %s\n"), err); + errors++; + } + free_pool_memory(err); + } + return errors; +} + +int dump_label_record(DEVICE *dev, DEV_RECORD *rec, int verbose, bool check_err) +{ + const char *type; + int64_t dbl; + int errors = 0; + + if (rec->FileIndex == 0 && rec->VolSessionId == 0 && rec->VolSessionTime == 0) { + return 0; + } + dbl = debug_level; + debug_level = 1; + switch (rec->FileIndex) { + case PRE_LABEL: + type = _("Fresh Volume"); + break; + case VOL_LABEL: + type = _("Volume"); + break; + case SOS_LABEL: + type = _("Begin Job Session"); + break; + case EOS_LABEL: + type = _("End Job Session"); + break; + case EOM_LABEL: + type = _("End of Media"); + break; + case EOT_LABEL: + type = _("End of Tape"); + break; + default: + type = _("Unknown"); + break; + } + if (verbose) { + switch (rec->FileIndex) { + case PRE_LABEL: + case VOL_LABEL: + unser_volume_label(dev, rec); + dev->dump_volume_label(); + break; + + case EOS_LABEL: + case SOS_LABEL: + dump_session_label(rec, type); + break; + case EOM_LABEL: + Pmsg7(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n"), + type, dev->file, dev->block_num, rec->VolSessionId, + rec->VolSessionTime, rec->Stream, rec->data_len); + break; + case EOT_LABEL: + Pmsg0(-1, _("Bacula \"End of Tape\" label found.\n")); + break; + default: + Pmsg7(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n"), + type, dev->file, dev->block_num, rec->VolSessionId, + rec->VolSessionTime, rec->Stream, rec->data_len); + break; + } + } else { + SESSION_LABEL label; + char dt[50]; + switch (rec->FileIndex) { + case SOS_LABEL: + unser_session_label(&label, rec); + bstrftimes(dt, sizeof(dt), btime_to_utime(label.write_btime)); + Pmsg6(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n"), + type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, label.JobId); + Pmsg4(-1, _(" Job=%s Date=%s Level=%c Type=%c\n"), + label.Job, dt, label.JobLevel, label.JobType); + if (check_err) { + errors += check_label(&label); + } + break; + case EOS_LABEL: + char ed1[30], ed2[30]; + unser_session_label(&label, rec); + bstrftimes(dt, sizeof(dt), btime_to_utime(label.write_btime)); + Pmsg6(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n"), + type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, label.JobId); + Pmsg7(-1, _(" Date=%s Level=%c Type=%c Files=%s Bytes=%s Errors=%d Status=%c\n"), + dt, label.JobLevel, label.JobType, + edit_uint64_with_commas(label.JobFiles, ed1), + edit_uint64_with_commas(label.JobBytes, ed2), + label.JobErrors, (char)label.JobStatus); + if (check_err) { + errors += check_label(&label); + } + break; + case EOM_LABEL: + case PRE_LABEL: + case VOL_LABEL: + default: + Pmsg7(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n"), + type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, + rec->Stream, rec->data_len); + break; + case EOT_LABEL: + break; + } + } + debug_level = dbl; + return errors; +} diff --git a/src/stored/lock.c b/src/stored/lock.c new file mode 100644 index 00000000..ef808bba --- /dev/null +++ b/src/stored/lock.c @@ -0,0 +1,536 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Collection of Bacula Storage daemon locking software + * + * Kern Sibbald, June 2007 + * + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +#ifdef SD_DEBUG_LOCK +const int dbglvl = DT_LOCK|30; +#else +const int dbglvl = DT_LOCK|50; +#endif + + +/* + * + * The Storage daemon has three locking concepts that must be + * understood: + * + * 1. dblock blocking the device, which means that the device + * is "marked" in use. When setting and removing the + block, the device is locked, but after dblock is + called the device is unlocked. + * 2. Lock() simple mutex that locks the device structure. A Lock + * can be acquired while a device is blocked if it is not + * locked. + * 3. rLock(locked) "recursive" Lock, when means that a Lock (mutex) + * will be acquired on the device if it is not blocked + * by some other thread. If the device was blocked by + * the current thread, it will acquire the lock. + * If some other thread has set a block on the device, + * this call will wait until the device is unblocked. + * Can be called with locked true, which means the + * Lock is already set + * + * A lock is normally set when modifying the device structure. + * A rLock is normally acquired when you want to block the device + * i.e. it will wait until the device is not blocked. + * A block is normally set during long operations like writing to + * the device. + * If you are writing the device, you will normally block and + * lock it. + * A lock cannot be violated. No other thread can touch the + * device while a lock is set. + * When a block is set, every thread accept the thread that set + * the block will block if rLock is called. + * A device can be blocked for multiple reasons, labeling, writing, + * acquiring (opening) the device, waiting for the operator, unmounted, + * ... + * Under certain conditions the block that is set on a device can be + * stolen and the device can be used by another thread. For example, + * a device is blocked because it is waiting for the operator to + * mount a tape. The operator can then unmount the device, and label + * a tape, re-mount it, give back the block, and the job will continue. + * + * + * Functions: + * + * DEVICE::Lock() does P(m_mutex) (in dev.h) + * DEVICE::Unlock() does V(m_mutex) + * + * DEVICE::rLock(locked) allows locking the device when this thread + * already has the device blocked. + * if (!locked) + * Lock() + * if blocked and not same thread that locked + * pthread_cond_wait + * leaves device locked + * + * DEVICE::rUnlock() unlocks but does not unblock + * same as Unlock(); + * + * DEVICE::dblock(why) does + * rLock(); (recursive device lock) + * block_device(this, why) + * rUnlock() + * + * DEVICE::dunblock does + * Lock() + * unblock_device() + * Unlock() + * + * block_device() does (must be locked and not blocked at entry) + * set blocked status + * set our pid + * + * unblock_device() does (must be blocked at entry) + * (locked on entry) + * (locked on exit) + * set unblocked status + * clear pid + * if waiting threads + * pthread_cond_broadcast + * + * obtain_device_block() does (must be locked and blocked at entry) + * save status + * set new blocked status + * set new pid + * + * give_back_device_block() does (must be blocked and locked) + * reset blocked status + * save previous blocked + * reset pid + * if waiting threads + * pthread_cond_broadcast + * + */ + +void DEVICE::dblock(int why) +{ + rLock(false); /* need recursive lock to block */ + block_device(this, why); + rUnlock(); +} + +void DEVICE::dunblock(bool locked) +{ + if (!locked) { + Lock(); + } + unblock_device(this); + Unlock(); +} + + + +/* + * Debug DEVICE locks N.B. + * + */ + +#ifdef DEV_DEBUG_LOCK + +void DEVICE::dbg_Lock(const char *file, int line) +{ + Dmsg4(sd_dbglvl, "Lock %s from %s:%d precnt=%d\n", device->hdr.name, file, line, m_count); + bthread_mutex_lock_p(&m_mutex, file, line); + m_pid = pthread_self(); + m_count++; +} + +void DEVICE::dbg_Unlock(const char *file, int line) +{ + m_count--; + clear_thread_id(m_pid); + Dmsg4(sd_dbglvl, "Unlock %s from %s:%d postcnt=%d\n", device->hdr.name, file, line, m_count); + bthread_mutex_unlock_p(&m_mutex, file, line); +} + +void DEVICE::dbg_rUnlock(const char *file, int line) +{ + Dmsg2(sd_dbglvl, "rUnlock from %s:%d\n", file, line); + dbg_Unlock(file, line); +} + +#else + +/* + * DEVICE locks N.B. + * + */ + + +void DEVICE::rUnlock() +{ + Unlock(); +} + +void DEVICE::Lock() +{ + P(m_mutex); +} + +void DEVICE::Unlock() +{ + V(m_mutex); +} + +#endif /* DEV_DEBUG_LOCK */ + +/* + * This is a recursive lock that checks if the device is blocked. + * + * When blocked is set, all threads EXCEPT thread with id no_wait_id + * must wait. The no_wait_id thread is out obtaining a new volume + * and preparing the label. + */ +#ifdef DEV_DEBUG_LOCK +void DEVICE::dbg_rLock(const char *file, int line, bool locked) +{ + Dmsg3(sd_dbglvl, "Enter rLock blked=%s from %s:%d\n", print_blocked(), + file, line); + if (!locked) { + /* lockmgr version of P(m_mutex) */ + Dmsg4(sd_dbglvl, "Lock %s in rLock %s from %s:%d\n", + device->hdr.name, print_blocked(), file, line); + bthread_mutex_lock_p(&m_mutex, file, line); + m_count++; + } + + if (blocked() && !pthread_equal(no_wait_id, pthread_self())) { + num_waiting++; /* indicate that I am waiting */ + while (blocked()) { + int stat; +#ifndef HAVE_WIN32 + /* thread id on Win32 may be a struct */ + Dmsg5(sd_dbglvl, "Blocked by %d %s in rLock blked=%s no_wait=%p me=%p\n", + blocked_by, device->hdr.name, print_blocked(), no_wait_id, pthread_self()); +#endif + if ((stat = bthread_cond_wait_p(&this->wait, &m_mutex, file, line)) != 0) { + berrno be; + this->dbg_Unlock(file, line); + Emsg1(M_ABORT, 0, _("pthread_cond_wait failure. ERR=%s\n"), + be.bstrerror(stat)); + } + } + num_waiting--; /* no longer waiting */ + } +} +#else /* DEV_DEBUG_LOCK */ + +void DEVICE::rLock(bool locked) +{ + if (!locked) { + Lock(); + m_count++; + } + + if (blocked() && !pthread_equal(no_wait_id, pthread_self())) { + num_waiting++; /* indicate that I am waiting */ + while (blocked()) { + int stat; +#ifndef HAVE_WIN32 + /* thread id on Win32 may be a struct */ + Dmsg5(sd_dbglvl, "Blocked by %d rLock %s blked=%s no_wait=%p me=%p\n", + blocked_by, device->hdr.name, print_blocked(), no_wait_id, pthread_self()); +#endif + if ((stat = pthread_cond_wait(&this->wait, &m_mutex)) != 0) { + berrno be; + this->Unlock(); + Emsg1(M_ABORT, 0, _("pthread_cond_wait failure. ERR=%s\n"), + be.bstrerror(stat)); + } + } + num_waiting--; /* no longer waiting */ + } +} + +#endif /* DEV_DEBUG_LOCK */ + +#ifdef SD_DEBUG_LOCK + +void DEVICE::dbg_Lock_acquire(const char *file, int line) +{ + Dmsg2(sd_dbglvl, "Lock_acquire from %s:%d\n", file, line); + bthread_mutex_lock_p(&acquire_mutex, file, line); +} + +void DEVICE::dbg_Unlock_acquire(const char *file, int line) +{ + Dmsg2(sd_dbglvl, "Unlock_acquire from %s:%d\n", file, line); + bthread_mutex_unlock_p(&acquire_mutex, file, line); +} + +void DEVICE::dbg_Lock_read_acquire(const char *file, int line) +{ + Dmsg2(sd_dbglvl, "Lock_read_acquire from %s:%d\n", file, line); + bthread_mutex_lock_p(&read_acquire_mutex, file, line); +} + +void DEVICE::dbg_Unlock_read_acquire(const char *file, int line) +{ + Dmsg2(sd_dbglvl, "Unlock_read_acquire from %s:%d\n", file, line); + bthread_mutex_unlock_p(&read_acquire_mutex, file, line); +} + +void DEVICE::dbg_Lock_VolCatInfo(const char *file, int line) +{ + bthread_mutex_lock_p(&volcat_mutex, file, line); +} + +void DEVICE::dbg_Unlock_VolCatInfo(const char *file, int line) +{ + bthread_mutex_unlock_p(&volcat_mutex, file, line); +} + +#else + +void DEVICE::Lock_acquire() +{ + P(acquire_mutex); +} + +void DEVICE::Unlock_acquire() +{ + V(acquire_mutex); +} + +void DEVICE::Lock_read_acquire() +{ + P(read_acquire_mutex); +} + +void DEVICE::Unlock_read_acquire() +{ + V(read_acquire_mutex); +} + +void DEVICE::Lock_VolCatInfo() +{ + P(volcat_mutex); +} + +void DEVICE::Unlock_VolCatInfo() +{ + V(volcat_mutex); +} + + + +#endif + +/* Main device access control */ +int DEVICE::init_mutex() +{ + return pthread_mutex_init(&m_mutex, NULL); +} + +/* Mutex around the freespace command */ +int DEVICE::init_freespace_mutex() +{ + return pthread_mutex_init(&freespace_mutex, NULL); +} + +/* Write device acquire mutex */ +int DEVICE::init_acquire_mutex() +{ + return pthread_mutex_init(&acquire_mutex, NULL); +} + +/* Read device acquire mutex */ +int DEVICE::init_read_acquire_mutex() +{ + return pthread_mutex_init(&read_acquire_mutex, NULL); +} + +/* VolCatInfo mutex */ +int DEVICE::init_volcat_mutex() +{ + return pthread_mutex_init(&volcat_mutex, NULL); +} + +/* dcrs mutex */ +int DEVICE::init_dcrs_mutex() +{ + return pthread_mutex_init(&dcrs_mutex, NULL); +} + +/* Set order in which device locks must be acquired */ +void DEVICE::set_mutex_priorities() +{ + /* Ensure that we respect this order in P/V operations */ + bthread_mutex_set_priority(&m_mutex, PRIO_SD_DEV_ACCESS); + bthread_mutex_set_priority(&spool_mutex, PRIO_SD_DEV_SPOOL); + bthread_mutex_set_priority(&acquire_mutex, PRIO_SD_DEV_ACQUIRE); +} + +int DEVICE::next_vol_timedwait(const struct timespec *timeout) +{ + return pthread_cond_timedwait(&wait_next_vol, &m_mutex, timeout); +} + + +/* + * Block all other threads from using the device + * Device must already be locked. After this call, + * the device is blocked to any thread calling dev->rLock(), + * but the device is not locked (i.e. no P on device). Also, + * the current thread can do slip through the dev->rLock() + * calls without blocking. + */ +void _block_device(const char *file, int line, DEVICE *dev, int state) +{ + ASSERT2(dev->blocked() == BST_NOT_BLOCKED, "Block request of device already blocked"); + dev->set_blocked(state); /* make other threads wait */ + dev->no_wait_id = pthread_self(); /* allow us to continue */ + dev->blocked_by = get_jobid_from_tsd(); + Dmsg4(sd_dbglvl, "Blocked %s %s from %s:%d\n", + dev->device->hdr.name, dev->print_blocked(), file, line); +} + +/* + * Unblock the device, and wake up anyone who went to sleep. + * Enter: device locked + * Exit: device locked + */ +void _unblock_device(const char *file, int line, DEVICE *dev) +{ + Dmsg4(sd_dbglvl, "Unblocked %s %s from %s:%d\n", dev->device->hdr.name, + dev->print_blocked(), file, line); + ASSERT2(dev->blocked(), "Unblock request of device not blocked"); + dev->set_blocked(BST_NOT_BLOCKED); + dev->blocked_by = 0; + clear_thread_id(dev->no_wait_id); + if (dev->num_waiting > 0) { + pthread_cond_broadcast(&dev->wait); /* wake them up */ + } +} + +static pthread_mutex_t block_mutex = PTHREAD_MUTEX_INITIALIZER; +/* + * Enter and leave with device locked + * + * Note: actually this routine: + * returns true if it can either set or steal the device block + * returns false if it cannot block the device + */ +bool DEVICE::_obtain_device_block(const char *file, int line, + bsteal_lock_t *hold, int retry, int state) +{ + int ret; + int r = retry; + + if (!can_obtain_block() && !pthread_equal(no_wait_id, pthread_self())) { + num_waiting++; /* indicate that I am waiting */ + while ((retry == 0 || r-- > 0) && !can_obtain_block()) { + if ((ret = bthread_cond_wait_p(&wait, &m_mutex, file, line)) != 0) { + berrno be; + Emsg1(M_ABORT, 0, _("pthread_cond_wait failure. ERR=%s\n"), + be.bstrerror(ret)); + } + } + num_waiting--; /* no longer waiting */ + } + + P(block_mutex); + Dmsg4(sd_dbglvl, "Steal lock %s old=%s from %s:%d\n", + device->hdr.name, print_blocked(), file, line); + + if (!can_obtain_block() && !pthread_equal(no_wait_id, pthread_self())) { + V(block_mutex); + return false; + } + hold->dev_blocked = blocked(); + hold->dev_prev_blocked = dev_prev_blocked; + hold->no_wait_id = no_wait_id; + hold->blocked_by = blocked_by; + set_blocked(state); + Dmsg1(sd_dbglvl, "steal block. new=%s\n", print_blocked()); + no_wait_id = pthread_self(); + blocked_by = get_jobid_from_tsd(); + V(block_mutex); + return true; +} + +/* + * Enter with device blocked and locked by us + * Exit with device locked, and blocked by previous owner + */ +void _give_back_device_block(const char *file, int line, + DEVICE *dev, bsteal_lock_t *hold) +{ + Dmsg4(sd_dbglvl, "Return lock %s old=%s from %s:%d\n", + dev->device->hdr.name, dev->print_blocked(), file, line); + P(block_mutex); + dev->set_blocked(hold->dev_blocked); + dev->dev_prev_blocked = hold->dev_prev_blocked; + dev->no_wait_id = hold->no_wait_id; + dev->blocked_by = hold->blocked_by; + Dmsg1(sd_dbglvl, "return lock. new=%s\n", dev->print_blocked()); + if (dev->num_waiting > 0) { + pthread_cond_broadcast(&dev->wait); /* wake them up */ + } + V(block_mutex); +} + +const char *DEVICE::print_blocked() const +{ + switch (m_blocked) { + case BST_NOT_BLOCKED: + return "BST_NOT_BLOCKED"; + case BST_UNMOUNTED: + return "BST_UNMOUNTED"; + case BST_WAITING_FOR_SYSOP: + return "BST_WAITING_FOR_SYSOP"; + case BST_DOING_ACQUIRE: + return "BST_DOING_ACQUIRE"; + case BST_WRITING_LABEL: + return "BST_WRITING_LABEL"; + case BST_UNMOUNTED_WAITING_FOR_SYSOP: + return "BST_UNMOUNTED_WAITING_FOR_SYSOP"; + case BST_MOUNT: + return "BST_MOUNT"; + case BST_DESPOOLING: + return "BST_DESPOOLING"; + case BST_RELEASING: + return "BST_RELEASING"; + default: + return _("unknown blocked code"); + } +} + + +/* + * Check if the device is blocked or not + */ +bool DEVICE::is_device_unmounted() +{ + bool stat; + + int blk = blocked(); + stat = (blk == BST_UNMOUNTED) || + (blk == BST_UNMOUNTED_WAITING_FOR_SYSOP); + return stat; +} diff --git a/src/stored/lock.h b/src/stored/lock.h new file mode 100644 index 00000000..77140c7d --- /dev/null +++ b/src/stored/lock.h @@ -0,0 +1,123 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Definitions for locking and blocking functions in the SD + * + * Kern Sibbald, pulled out of dev.h June 2007 + * + */ + + +#ifndef __LOCK_H +#define __LOCK_H 1 + +void _lock_reservations(const char *file="**Unknown**", int line=0); +void _unlock_reservations(); +void _lock_volumes(const char *file="**Unknown**", int line=0); +void _unlock_volumes(); + +#ifdef SD_DEBUG_LOCK + +#define lock_reservations() \ + do { Dmsg3(sd_dbglvl, "lock_reservations at %s:%d precnt=%d\n", \ + __FILE__, __LINE__, \ + reservations_lock_count); \ + _lock_reservations(__FILE__, __LINE__); \ + Dmsg0(sd_dbglvl, "lock_reservations: got lock\n"); \ + } while (0) +#define unlock_reservations() \ + do { Dmsg3(sd_dbglvl, "unlock_reservations at %s:%d precnt=%d\n", \ + __FILE__, __LINE__, \ + reservations_lock_count); \ + _unlock_reservations(); } while (0) + +#define lock_volumes() \ + do { Dmsg3(sd_dbglvl, "lock_volumes at %s:%d precnt=%d\n", \ + __FILE__, __LINE__, \ + vol_list_lock_count); \ + _lock_volumes(__FILE__, __LINE__); \ + Dmsg0(sd_dbglvl, "lock_volumes: got lock\n"); \ + } while (0) + +#define unlock_volumes() \ + do { Dmsg3(sd_dbglvl, "unlock_volumes at %s:%d precnt=%d\n", \ + __FILE__, __LINE__, \ + vol_list_lock_count); \ + _unlock_volumes(); } while (0) + +#else + +#define lock_reservations() _lock_reservations(__FILE__, __LINE__) +#define unlock_reservations() _unlock_reservations() +#define lock_volumes() _lock_volumes(__FILE__, __LINE__) +#define unlock_volumes() _unlock_volumes() + +#endif + +#ifdef DEV_DEBUG_LOCK +#define Lock() dbg_Lock(__FILE__, __LINE__) +#define Unlock() dbg_Unlock(__FILE__, __LINE__) +#define rLock(locked) dbg_rLock(__FILE__, __LINE__, locked) +#define rUnlock() dbg_rUnlock(__FILE__, __LINE__) +#endif + +#ifdef SD_DEBUG_LOCK +#define Lock_acquire() dbg_Lock_acquire(__FILE__, __LINE__) +#define Unlock_acquire() dbg_Unlock_acquire(__FILE__, __LINE__) +#define Lock_read_acquire() dbg_Lock_read_acquire(__FILE__, __LINE__) +#define Unlock_read_acquire() dbg_Unlock_read_acquire(__FILE__, __LINE__) +#define Lock_VolCatInfo() dbg_Lock_VolCatInfo(__FILE__, __LINE__) +#define Unlock_VolCatInfo() dbg_Unlock_VolCatInfo(__FILE__, __LINE__) +#endif + +#define block_device(d, s) _block_device(__FILE__, __LINE__, (d), s) +#define unblock_device(d) _unblock_device(__FILE__, __LINE__, (d)) + +#define obtain_device_block(d, p, r, s) (d)->_obtain_device_block(__FILE__, __LINE__, (p), (r), (s)) +#define give_back_device_block(d, p) _give_back_device_block(__FILE__, __LINE__, (d), (p)) + +/* m_blocked states (mutually exclusive) */ +enum { + BST_NOT_BLOCKED = 0, /* not blocked */ + BST_UNMOUNTED, /* User unmounted device */ + BST_WAITING_FOR_SYSOP, /* Waiting for operator to mount tape */ + BST_DOING_ACQUIRE, /* Opening/validating/moving tape */ + BST_WRITING_LABEL, /* Labeling a tape */ + BST_UNMOUNTED_WAITING_FOR_SYSOP, /* User unmounted during wait for op */ + BST_MOUNT, /* Mount request */ + BST_DESPOOLING, /* Despooling -- i.e. multiple writes */ + BST_RELEASING /* Releasing the device */ +}; + +typedef struct s_steal_lock { + pthread_t no_wait_id; /* id of no wait thread */ + int dev_blocked; /* state */ + int dev_prev_blocked; /* previous blocked state */ + uint32_t blocked_by; /* previous blocker */ +} bsteal_lock_t; + +/* + * Used in unblock() call + */ +enum { + DEV_LOCKED = true, + DEV_UNLOCKED = false +}; + +#endif diff --git a/src/stored/match_bsr.c b/src/stored/match_bsr.c new file mode 100644 index 00000000..6f5548ff --- /dev/null +++ b/src/stored/match_bsr.c @@ -0,0 +1,840 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Match Bootstrap Records (used for restores) against + * Volume Records + * + * Kern Sibbald, June MMII + * + */ + +/* + * ***FIXME*** + * Also for efficiency, once a bsr is done, it really should be + * delinked from the bsr chain. This will avoid the above + * problem and make traversal of the bsr chain more efficient. + * + * To be done ... + */ + +#include "bacula.h" +#include "stored.h" +#ifdef HAVE_FNMATCH +#include +#else +#include "lib/fnmatch.h" +#endif + +/* Temp code to test the new match_all code */ +int use_new_match_all = 0; + +const int dbglevel = 200; + +/* Forward references */ +static int match_volume(BSR *bsr, BSR_VOLUME *volume, VOLUME_LABEL *volrec, bool done); +static int match_sesstime(BSR *bsr, BSR_SESSTIME *sesstime, DEV_RECORD *rec, bool done); +static int match_sessid(BSR *bsr, BSR_SESSID *sessid, DEV_RECORD *rec); +static int match_client(BSR *bsr, BSR_CLIENT *client, SESSION_LABEL *sessrec, bool done); +static int match_job(BSR *bsr, BSR_JOB *job, SESSION_LABEL *sessrec, bool done); +static int match_job_type(BSR *bsr, BSR_JOBTYPE *job_type, SESSION_LABEL *sessrec, bool done); +static int match_job_level(BSR *bsr, BSR_JOBLEVEL *job_level, SESSION_LABEL *sessrec, bool done); +static int match_jobid(BSR *bsr, BSR_JOBID *jobid, SESSION_LABEL *sessrec, bool done); +static int match_findex(BSR *bsr, DEV_RECORD *rec, bool done); +static int match_voladdr(BSR *bsr, BSR_VOLADDR *voladdr, DEV_RECORD *rec, bool done); +static int match_stream(BSR *bsr, BSR_STREAM *stream, DEV_RECORD *rec, bool done); +static int match_all(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, SESSION_LABEL *sessrec, bool done, JCR *jcr); +static int match_block_sesstime(BSR *bsr, BSR_SESSTIME *sesstime, DEV_BLOCK *block); +static int match_block_sessid(BSR *bsr, BSR_SESSID *sessid, DEV_BLOCK *block); +static BSR *find_smallest_volfile(BSR *fbsr, BSR *bsr); + +/* Temp function to test the new code */ +static int new_match_all(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, + SESSION_LABEL *sessrec, bool done, JCR *jcr); + +/********************************************************************* + * + * If possible, position the archive device (tape) to read the + * next block. + */ +void position_bsr_block(BSR *bsr, DEV_BLOCK *block) +{ + /* To be implemented */ +} + +/********************************************************************* + * + * Do fast block rejection based on bootstrap records. + * use_fast_rejection will be set if we have VolSessionId and VolSessTime + * in each record. When BlockVer is >= 2, we have those in the block header + * so can do fast rejection. + * + * returns: 1 if block may contain valid records + * 0 if block may be skipped (i.e. it contains no records of + * that can match the bsr). + * + */ +int match_bsr_block(BSR *bsr, DEV_BLOCK *block) +{ + if (!bsr || !bsr->use_fast_rejection || (block->BlockVer < 2)) { + return 1; /* cannot fast reject */ + } + + for ( ; bsr; bsr=bsr->next) { + if (!match_block_sesstime(bsr, bsr->sesstime, block)) { + continue; + } + if (!match_block_sessid(bsr, bsr->sessid, block)) { + continue; + } + return 1; + } + return 0; +} + +static int match_block_sesstime(BSR *bsr, BSR_SESSTIME *sesstime, DEV_BLOCK *block) +{ + if (!sesstime) { + return 1; /* no specification matches all */ + } + if (sesstime->sesstime == block->VolSessionTime) { + return 1; + } + if (sesstime->next) { + return match_block_sesstime(bsr, sesstime->next, block); + } + return 0; +} + +static int match_block_sessid(BSR *bsr, BSR_SESSID *sessid, DEV_BLOCK *block) +{ + if (!sessid) { + return 1; /* no specification matches all */ + } + if (sessid->sessid <= block->VolSessionId && sessid->sessid2 >= block->VolSessionId) { + return 1; + } + if (sessid->next) { + return match_block_sessid(bsr, sessid->next, block); + } + return 0; +} + +static int match_fileregex(BSR *bsr, DEV_RECORD *rec, JCR *jcr) +{ + if (bsr->fileregex_re == NULL) + return 1; + + if (bsr->attr == NULL) { + bsr->attr = new_attr(jcr); + } + + /* + * The code breaks if the first record associated with a file is + * not of this type + */ + if (rec->maskedStream == STREAM_UNIX_ATTRIBUTES || + rec->maskedStream == STREAM_UNIX_ATTRIBUTES_EX) { + bsr->skip_file = false; + if (unpack_attributes_record(jcr, rec->Stream, rec->data, rec->data_len, bsr->attr)) { + if (regexec(bsr->fileregex_re, bsr->attr->fname, 0, NULL, 0) == 0) { + Dmsg2(dbglevel, "Matched pattern, fname=%s FI=%d\n", + bsr->attr->fname, rec->FileIndex); + } else { + Dmsg2(dbglevel, "Didn't match, skipping fname=%s FI=%d\n", + bsr->attr->fname, rec->FileIndex); + bsr->skip_file = true; + } + } + } + return 1; +} + +/********************************************************************* + * + * Match Bootstrap records + * returns 1 on match + * returns 0 no match and reposition is set if we should + * reposition the tape + * returns -1 no additional matches possible + */ +int match_bsr(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, SESSION_LABEL *sessrec, JCR *jcr) +{ + int stat; + + /* + * The bsr->reposition flag is set any time a bsr is done. + * In this case, we can probably reposition the + * tape to the next available bsr position. + */ + if (jcr->use_new_match_all) { /* TODO: Remove the if when the new code is tested */ + if (bsr->cur_bsr) { + bsr = bsr->cur_bsr; + } + } + if (bsr) { + bsr->reposition = false; + /* Temp code to test the new match_all */ + if (jcr->use_new_match_all) { + stat = new_match_all(bsr, rec, volrec, sessrec, true, jcr); + } else { + stat = match_all(bsr, rec, volrec, sessrec, true, jcr); + } + /* + * Note, bsr->reposition is set by match_all when + * a bsr is done. We turn it off if a match was + * found or if we cannot use positioning + */ + if (stat != 0 || !bsr->use_positioning) { + bsr->reposition = false; + } + } else { + stat = 1; /* no bsr => match all */ + } + return stat; +} + +/* + * Find the next bsr that applies to the current tape. + * It is the one with the smallest VolFile position. + */ +BSR *find_next_bsr(BSR *root_bsr, DEVICE *dev) +{ + BSR *bsr; + BSR *found_bsr = NULL; + + /* Do tape/disk seeking only if CAP_POSITIONBLOCKS is on */ + if (!root_bsr) { + Dmsg0(dbglevel, "NULL root bsr pointer passed to find_next_bsr.\n"); + return NULL; + } + if (!root_bsr->use_positioning || + !root_bsr->reposition || !dev->has_cap(CAP_POSITIONBLOCKS)) { + Dmsg2(dbglevel, "No nxt_bsr use_pos=%d repos=%d\n", root_bsr->use_positioning, root_bsr->reposition); + return NULL; + } + Dmsg2(dbglevel, "use_pos=%d repos=%d\n", root_bsr->use_positioning, root_bsr->reposition); + root_bsr->mount_next_volume = false; + /* Walk through all bsrs to find the next one to use => smallest file,block */ + for (bsr=root_bsr; bsr; bsr=bsr->next) { + if (bsr->done || !match_volume(bsr, bsr->volume, &dev->VolHdr, 1)) { + continue; + } + if (found_bsr == NULL) { + found_bsr = bsr; + } else { + found_bsr = find_smallest_volfile(found_bsr, bsr); + } + } + /* + * If we get to this point and found no bsr, it means + * that any additional bsr's must apply to the next + * tape, so set a flag. + */ + if (found_bsr == NULL) { + root_bsr->mount_next_volume = true; + } + return found_bsr; +} + +/* + * Get the smallest address from this voladdr part + * Don't use "done" elements + */ +static bool get_smallest_voladdr(BSR_VOLADDR *va, uint64_t *ret) +{ + bool ok=false; + uint64_t min_val=0; + + for (; va ; va = va->next) { + if (!va->done) { + if (ok) { + min_val = MIN(min_val, va->saddr); + } else { + min_val = va->saddr; + ok=true; + } + } + } + *ret = min_val; + return ok; +} + +/* FIXME + * This routine needs to be fixed to only look at items that + * are not marked as done. Otherwise, it can find a bsr + * that has already been consumed, and this will cause the + * bsr to be used, thus we may seek back and re-read the + * same records, causing an error. This deficiency must + * be fixed. For the moment, it has been kludged in + * read_record.c to avoid seeking back if find_next_bsr + * returns a bsr pointing to a smaller address (file/block). + * + */ +static BSR *find_smallest_volfile(BSR *found_bsr, BSR *bsr) +{ + BSR *return_bsr = found_bsr; + uint64_t found_bsr_saddr, bsr_saddr; + + /* if we have VolAddr, use it, else try with File and Block */ + if (get_smallest_voladdr(found_bsr->voladdr, &found_bsr_saddr)) { + if (get_smallest_voladdr(bsr->voladdr, &bsr_saddr)) { + if (found_bsr_saddr > bsr_saddr) { + return bsr; + } else { + return found_bsr; + } + } + } + + return return_bsr; +} + +/* + * Called after the signature record so that + * we can see if the current bsr has been + * fully processed (i.e. is done). + * The bsr argument is not used, but is included + * for consistency with the other match calls. + * + * Returns: true if we should reposition + * : false otherwise. + */ +bool is_this_bsr_done(JCR *jcr, BSR *bsr, DEV_RECORD *rec) +{ + BSR *rbsr = rec->bsr; + Dmsg1(dbglevel, "match_set %d\n", rbsr != NULL); + if (!rbsr) { + return false; + } + rec->bsr = NULL; + + /* TODO: When the new code is stable, drop the else part */ + if (jcr->use_new_match_all) { + if (!rbsr->next) { + rbsr->found++; + } + /* Normally the loop must stop only *after* the last record has been read, + * and we are about to read the next record. + */ + if (rbsr->count && rbsr->found > rbsr->count) { + rbsr->done = true; + rbsr->root->reposition = true; + Dmsg2(dbglevel, "is_end_this_bsr set reposition=1 count=%d found=%d\n", + rbsr->count, rbsr->found); + return true; + } + + } else { + /* Old code that is stable */ + rbsr->found++; + + if (rbsr->count && rbsr->found >= rbsr->count) { + rbsr->done = true; + rbsr->root->reposition = true; + Dmsg2(dbglevel, "is_end_this_bsr set reposition=1 count=%d found=%d\n", + rbsr->count, rbsr->found); + return true; + } + } + Dmsg2(dbglevel, "is_end_this_bsr not done count=%d found=%d\n", + rbsr->count, rbsr->found); + return false; +} + + +static int new_match_all(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, + SESSION_LABEL *sessrec, bool done, JCR *jcr) +{ + Dmsg0(dbglevel, "Enter match_all\n"); + for ( ; bsr; ) { + if (bsr->done) { + goto no_match; + } + if (!match_volume(bsr, bsr->volume, volrec, 1)) { + Dmsg2(dbglevel, "bsr fail bsr_vol=%s != rec read_vol=%s\n", bsr->volume->VolumeName, + volrec->VolumeName); + goto no_match; + } + + if (!match_voladdr(bsr, bsr->voladdr, rec, 1)) { + if (bsr->voladdr) { + Dmsg3(dbglevel, "Fail on Addr=%llu. bsr=%llu,%llu\n", + get_record_address(rec), bsr->voladdr->saddr, bsr->voladdr->eaddr); + } + goto no_match; + } + + if (!match_sesstime(bsr, bsr->sesstime, rec, 1)) { + Dmsg2(dbglevel, "Fail on sesstime. bsr=%u rec=%u\n", + bsr->sesstime->sesstime, rec->VolSessionTime); + goto no_match; + } + + /* NOTE!! This test MUST come after the sesstime test */ + if (!match_sessid(bsr, bsr->sessid, rec)) { + Dmsg2(dbglevel, "Fail on sessid. bsr=%u rec=%u\n", + bsr->sessid->sessid, rec->VolSessionId); + goto no_match; + } + + /* NOTE!! This test MUST come after sesstime and sessid tests */ + if (!match_findex(bsr, rec, 1)) { + Dmsg3(dbglevel, "Fail on recFI=%d. bsrFI=%d,%d\n", + rec->FileIndex, bsr->FileIndex->findex, bsr->FileIndex->findex2); + goto no_match; + } + if (bsr->FileIndex) { + Dmsg3(dbglevel, "match on findex=%d. bsrFI=%d,%d\n", + rec->FileIndex, bsr->FileIndex->findex, bsr->FileIndex->findex2); + } + + if (!match_fileregex(bsr, rec, jcr)) { + Dmsg1(dbglevel, "Fail on fileregex='%s'\n", NPRT(bsr->fileregex)); + goto no_match; + } + + /* This flag is set by match_fileregex (and perhaps other tests) */ + if (bsr->skip_file) { + Dmsg1(dbglevel, "Skipping findex=%d\n", rec->FileIndex); + goto no_match; + } + + /* + * If a count was specified and we have a FileIndex, assume + * it is a Bacula created bsr (or the equivalent). We + * then save the bsr where the match occurred so that + * after processing the record or records, we can update + * the found count. I.e. rec->bsr points to the bsr that + * satisfied the match. + */ + if (bsr->count && bsr->FileIndex) { + rec->bsr = bsr; + if (bsr->next && rec->FileIndex != bsr->LastFI) { + bsr->LastFI = rec->FileIndex; + } + Dmsg1(dbglevel, "Leave match_all 1 found=%d\n", bsr->found); + return 1; /* this is a complete match */ + } + + /* + * The selections below are not used by Bacula's + * restore command, and don't work because of + * the rec->bsr = bsr optimization above. + */ + if (sessrec) { + if (!match_jobid(bsr, bsr->JobId, sessrec, 1)) { + Dmsg0(dbglevel, "fail on JobId\n"); + goto no_match; + } + if (!match_job(bsr, bsr->job, sessrec, 1)) { + Dmsg0(dbglevel, "fail on Job\n"); + goto no_match; + } + if (!match_client(bsr, bsr->client, sessrec, 1)) { + Dmsg0(dbglevel, "fail on Client\n"); + goto no_match; + } + if (!match_job_type(bsr, bsr->JobType, sessrec, 1)) { + Dmsg0(dbglevel, "fail on Job type\n"); + goto no_match; + } + if (!match_job_level(bsr, bsr->JobLevel, sessrec, 1)) { + Dmsg0(dbglevel, "fail on Job level\n"); + goto no_match; + } + if (!match_stream(bsr, bsr->stream, rec, 1)) { + Dmsg0(dbglevel, "fail on stream\n"); + goto no_match; + } + } + return 1; + +no_match: + if (bsr->count && bsr->found >= bsr->count) { + bsr->done = true; + if (bsr->next) bsr->root->cur_bsr = bsr->next; + Dmsg1(dbglevel, "bsr done: Volume=%s\n", bsr->volume->VolumeName); + } + if (bsr->next) { + done = bsr->done && done; + bsr = bsr->next; + continue; + } + if (bsr->done && done) { + Dmsg0(dbglevel, "Leave match all -1\n"); + return -1; + } + Dmsg1(dbglevel, "Leave match all 0, repos=%d\n", bsr->reposition); + return 0; + } + return 0; +} + +/* + * Match all the components of current record + * returns 1 on match + * returns 0 no match + * returns -1 no additional matches possible + */ +static int match_all(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, + SESSION_LABEL *sessrec, bool done, JCR *jcr) +{ + Dmsg0(dbglevel, "Enter match_all\n"); + if (bsr->done) { + goto no_match; + } + if (!match_volume(bsr, bsr->volume, volrec, 1)) { + Dmsg2(dbglevel, "bsr fail bsr_vol=%s != rec read_vol=%s\n", bsr->volume->VolumeName, + volrec->VolumeName); + goto no_match; + } + Dmsg2(dbglevel, "OK bsr match bsr_vol=%s read_vol=%s\n", bsr->volume->VolumeName, + volrec->VolumeName); + + if (!match_voladdr(bsr, bsr->voladdr, rec, 1)) { + if (bsr->voladdr) { + Dmsg3(dbglevel, "Fail on Addr=%llu. bsr=%llu,%llu\n", + get_record_address(rec), bsr->voladdr->saddr, bsr->voladdr->eaddr); + dump_record(rec); + } + goto no_match; + } + + if (!match_sesstime(bsr, bsr->sesstime, rec, 1)) { + Dmsg2(dbglevel, "Fail on sesstime. bsr=%u rec=%u\n", + bsr->sesstime->sesstime, rec->VolSessionTime); + goto no_match; + } + + /* NOTE!! This test MUST come after the sesstime test */ + if (!match_sessid(bsr, bsr->sessid, rec)) { + Dmsg2(dbglevel, "Fail on sessid. bsr=%u rec=%u\n", + bsr->sessid->sessid, rec->VolSessionId); + goto no_match; + } + + /* NOTE!! This test MUST come after sesstime and sessid tests */ + if (!match_findex(bsr, rec, 1)) { + Dmsg3(dbglevel, "Fail on findex=%d. bsr=%d,%d\n", + rec->FileIndex, bsr->FileIndex->findex, bsr->FileIndex->findex2); + goto no_match; + } + if (bsr->FileIndex) { + Dmsg3(dbglevel, "match on findex=%d. bsr=%d,%d\n", + rec->FileIndex, bsr->FileIndex->findex, bsr->FileIndex->findex2); + } + + if (!match_fileregex(bsr, rec, jcr)) { + Dmsg1(dbglevel, "Fail on fileregex='%s'\n", NPRT(bsr->fileregex)); + goto no_match; + } + + /* This flag is set by match_fileregex (and perhaps other tests) */ + if (bsr->skip_file) { + Dmsg1(dbglevel, "Skipping findex=%d\n", rec->FileIndex); + goto no_match; + } + + /* + * If a count was specified and we have a FileIndex, assume + * it is a Bacula created bsr (or the equivalent). We + * then save the bsr where the match occurred so that + * after processing the record or records, we can update + * the found count. I.e. rec->bsr points to the bsr that + * satisfied the match. + */ + if (bsr->count && bsr->FileIndex) { + rec->bsr = bsr; + Dmsg0(dbglevel, "Leave match_all 1\n"); + return 1; /* this is a complete match */ + } + + /* + * The selections below are not used by Bacula's + * restore command, and don't work because of + * the rec->bsr = bsr optimization above. + */ + if (!match_jobid(bsr, bsr->JobId, sessrec, 1)) { + Dmsg0(dbglevel, "fail on JobId\n"); + goto no_match; + + } + if (!match_job(bsr, bsr->job, sessrec, 1)) { + Dmsg0(dbglevel, "fail on Job\n"); + goto no_match; + } + if (!match_client(bsr, bsr->client, sessrec, 1)) { + Dmsg0(dbglevel, "fail on Client\n"); + goto no_match; + } + if (!match_job_type(bsr, bsr->JobType, sessrec, 1)) { + Dmsg0(dbglevel, "fail on Job type\n"); + goto no_match; + } + if (!match_job_level(bsr, bsr->JobLevel, sessrec, 1)) { + Dmsg0(dbglevel, "fail on Job level\n"); + goto no_match; + } + if (!match_stream(bsr, bsr->stream, rec, 1)) { + Dmsg0(dbglevel, "fail on stream\n"); + goto no_match; + } + return 1; + +no_match: + if (bsr->next) { + return match_all(bsr->next, rec, volrec, sessrec, bsr->done && done, jcr); + } + if (bsr->done && done) { + Dmsg0(dbglevel, "Leave match all -1\n"); + return -1; + } + Dmsg0(dbglevel, "Leave match all 0\n"); + return 0; +} + +static int match_volume(BSR *bsr, BSR_VOLUME *volume, VOLUME_LABEL *volrec, bool done) +{ + if (!volume) { + return 0; /* Volume must match */ + } + if (strcmp(volume->VolumeName, volrec->VolumeName) == 0) { + Dmsg1(dbglevel, "OK match_volume=%s\n", volrec->VolumeName); + return 1; + } + if (volume->next) { + return match_volume(bsr, volume->next, volrec, 1); + } + return 0; +} + +static int match_client(BSR *bsr, BSR_CLIENT *client, SESSION_LABEL *sessrec, bool done) +{ + if (!client) { + return 1; /* no specification matches all */ + } + if (strcmp(client->ClientName, sessrec->ClientName) == 0) { + return 1; + } + if (client->next) { + return match_client(bsr, client->next, sessrec, 1); + } + return 0; +} + +static int match_job(BSR *bsr, BSR_JOB *job, SESSION_LABEL *sessrec, bool done) +{ + if (!job) { + return 1; /* no specification matches all */ + } + if (strcmp(job->Job, sessrec->Job) == 0) { + return 1; + } + if (job->next) { + return match_job(bsr, job->next, sessrec, 1); + } + return 0; +} + +static int match_job_type(BSR *bsr, BSR_JOBTYPE *job_type, SESSION_LABEL *sessrec, bool done) +{ + if (!job_type) { + return 1; /* no specification matches all */ + } + if (job_type->JobType == sessrec->JobType) { + return 1; + } + if (job_type->next) { + return match_job_type(bsr, job_type->next, sessrec, 1); + } + return 0; +} + +static int match_job_level(BSR *bsr, BSR_JOBLEVEL *job_level, SESSION_LABEL *sessrec, bool done) +{ + if (!job_level) { + return 1; /* no specification matches all */ + } + if (job_level->JobLevel == sessrec->JobLevel) { + return 1; + } + if (job_level->next) { + return match_job_level(bsr, job_level->next, sessrec, 1); + } + return 0; +} + +static int match_jobid(BSR *bsr, BSR_JOBID *jobid, SESSION_LABEL *sessrec, bool done) +{ + if (!jobid) { + return 1; /* no specification matches all */ + } + if (jobid->JobId <= sessrec->JobId && jobid->JobId2 >= sessrec->JobId) { + return 1; + } + if (jobid->next) { + return match_jobid(bsr, jobid->next, sessrec, 1); + } + return 0; +} + +static int match_voladdr(BSR *bsr, BSR_VOLADDR *voladdr, DEV_RECORD *rec, bool done) +{ + if (!voladdr) { + return 1; /* no specification matches all */ + } + + uint64_t addr = get_record_address(rec); + Dmsg6(dbglevel, "match_voladdr: saddr=%llu eaddr=%llu recaddr=%llu sfile=%u efile=%u recfile=%u\n", + voladdr->saddr, voladdr->eaddr, addr, (uint32_t)(voladdr->saddr>>32), + (uint32_t)(voladdr->eaddr>>32), (uint32_t)(addr>>32)); + + if (voladdr->saddr <= addr && voladdr->eaddr >= addr) { + Dmsg1(dbglevel, "OK match voladdr=%lld\n", addr); + return 1; + } + /* Once we get past last eblock, we are done */ + if (addr > voladdr->eaddr) { + voladdr->done = true; /* set local done */ + if (!voladdr->next) { /* done with everything? */ + bsr->done = true; /* yes */ + } + } + if (voladdr->next) { + return match_voladdr(bsr, voladdr->next, rec, voladdr->done && done); + } + + /* If we are done and all prior matches are done, this bsr is finished */ + if (voladdr->done && done) { + bsr->done = true; + bsr->root->reposition = true; + Dmsg2(dbglevel, "bsr done from voladdr rec=%llu voleaddr=%llu\n", + addr, voladdr->eaddr); + } + return 0; +} + + +static int match_stream(BSR *bsr, BSR_STREAM *stream, DEV_RECORD *rec, bool done) +{ + if (!stream) { + return 1; /* no specification matches all */ + } + if (stream->stream == rec->Stream) { + return 1; + } + if (stream->next) { + return match_stream(bsr, stream->next, rec, 1); + } + return 0; +} + +static int match_sesstime(BSR *bsr, BSR_SESSTIME *sesstime, DEV_RECORD *rec, bool done) +{ + if (!sesstime) { + return 1; /* no specification matches all */ + } + if (sesstime->sesstime == rec->VolSessionTime) { + return 1; + } + if (rec->VolSessionTime > sesstime->sesstime) { + sesstime->done = true; + } + if (sesstime->next) { + return match_sesstime(bsr, sesstime->next, rec, sesstime->done && done); + } + if (sesstime->done && done) { + bsr->done = true; + bsr->root->reposition = true; + Dmsg0(dbglevel, "bsr done from sesstime\n"); + } + return 0; +} + +/* + * Note, we cannot mark bsr done based on session id because we may + * have interleaved records, and there may be more of what we want + * later. + */ +static int match_sessid(BSR *bsr, BSR_SESSID *sessid, DEV_RECORD *rec) +{ + if (!sessid) { + return 1; /* no specification matches all */ + } + if (sessid->sessid <= rec->VolSessionId && sessid->sessid2 >= rec->VolSessionId) { + return 1; + } + if (sessid->next) { + return match_sessid(bsr, sessid->next, rec); + } + return 0; +} + +/* + * When reading the Volume, the Volume Findex (rec->FileIndex) always + * are found in sequential order. Thus we can make optimizations. + * + */ +static int match_findex(BSR *bsr, DEV_RECORD *rec, bool done) +{ + BSR_FINDEX *findex = bsr->FileIndex; + BSR_FINDEX *next; + + if (!findex) { + return 1; /* no specification matches all */ + } + + for ( ;; ) { + if (findex->findex <= rec->FileIndex && findex->findex2 >= rec->FileIndex) { + Dmsg3(dbglevel, "Match on recFindex=%d. bsrFIs=%d,%d\n", + rec->FileIndex, findex->findex, findex->findex2); + return 1; + } + if (rec->FileIndex > findex->findex2) { + /* TODO: See if we really want to modify the findex when we will try + * to seek backward */ + if (findex->next) { + next = findex->next; + Dmsg3(dbglevel, "No match recFindex=%d. bsrFIs=%d,%d\n", + rec->FileIndex, findex->findex, findex->findex2); + free(findex); + findex = next; + bsr->FileIndex = findex; + continue; + } else { + bsr->done = true; + bsr->root->reposition = true; + } + } + return 0; + } +} + +uint64_t get_bsr_start_addr(BSR *bsr) +{ + uint64_t bsr_addr = 0; + + if (bsr) { + if (bsr->voladdr) { + bsr_addr = bsr->voladdr->saddr; + } + } + + return bsr_addr; +} diff --git a/src/stored/mount.c b/src/stored/mount.c new file mode 100644 index 00000000..c0e89e09 --- /dev/null +++ b/src/stored/mount.c @@ -0,0 +1,880 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Routines for handling mounting tapes for reading and for + * writing. + * + * Kern Sibbald, August MMII + * + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +/* Make sure some EROFS is defined */ +#ifndef EROFS /* read-only file system */ +#define EROFS -1 /* make impossible errno */ +#endif + +static pthread_mutex_t mount_mutex = PTHREAD_MUTEX_INITIALIZER; + +enum { + try_next_vol = 1, + try_read_vol, + try_error, + try_default +}; + +enum { + check_next_vol = 1, + check_ok, + check_read_vol, + check_error +}; + +/* + * If release is set, we rewind the current volume, + * which we no longer want, and ask the user (console) + * to mount the next volume. + * + * Continue trying until we get it, and then ensure + * that we can write on it. + * + * This routine returns a 0 only if it is REALLY + * impossible to get the requested Volume. + * + * This routine is entered with the device blocked, but not + * locked. + * + */ +bool DCR::mount_next_write_volume() +{ + int retry = 0; + bool ask = false, recycle, autochanger; + DCR *dcr = this; + + Enter(200); + set_ameta(); + Dmsg2(100, "Enter mount_next_volume(release=%d) dev=%s\n", dev->must_unload(), + dev->print_name()); + + init_device_wait_timers(dcr); + + P(mount_mutex); + + /* + * Attempt to mount the next volume. If something non-fatal goes + * wrong, we come back here to re-try (new op messages, re-read + * Volume, ...) + */ +mount_next_vol: + Dmsg1(100, "mount_next_vol retry=%d\n", retry); + /* Ignore retry if this is poll request */ + if (dev->is_nospace() || retry++ > 4) { + /* Last ditch effort before giving up, force operator to respond */ + VolCatInfo.Slot = 0; + V(mount_mutex); + if (!dir_ask_sysop_to_mount_volume(dcr, SD_APPEND)) { + Jmsg(jcr, M_FATAL, 0, _("Too many errors trying to mount %s device %s.\n"), + dev->print_type(), dev->print_name()); + goto no_lock_bail_out; + } + P(mount_mutex); + Dmsg1(90, "Continue after dir_ask_sysop_to_mount. must_load=%d\n", dev->must_load()); + } + if (job_canceled(jcr)) { + Jmsg(jcr, M_FATAL, 0, _("Job %d canceled.\n"), jcr->JobId); + goto bail_out; + } + recycle = false; + + if (dev->must_unload()) { + ask = true; /* ask operator to mount tape */ + } + do_unload(); + do_swapping(SD_APPEND); + do_load(SD_APPEND); + + if (!find_a_volume()) { + goto bail_out; + } + + if (job_canceled(jcr)) { + goto bail_out; + } + Dmsg3(100, "After find_a_volume. Vol=%s Slot=%d VolType=%d\n", + getVolCatName(), VolCatInfo.Slot, VolCatInfo.VolCatType); + + dev->notify_newvol_in_attached_dcrs(getVolCatName()); + + /* + * Get next volume and ready it for append + * This code ensures that the device is ready for + * writing. We start from the assumption that there + * may not be a tape mounted. + * + * If the device is a file, we create the output + * file. If it is a tape, we check the volume name + * and move the tape to the end of data. + * + */ + dcr->setVolCatInfo(false); /* out of date when Vols unlocked */ + if (autoload_device(dcr, SD_APPEND, NULL) > 0) { + autochanger = true; + ask = false; + } else { + autochanger = false; + VolCatInfo.Slot = 0; + if (dev->is_autochanger() && !VolCatInfo.InChanger) { + ask = true; /* not in changer, do not retry */ + } else { + ask = retry >= 2; + } + } + Dmsg1(100, "autoload_dev returns %d\n", autochanger); + /* + * If we autochanged to correct Volume or (we have not just + * released the Volume AND we can automount) we go ahead + * and read the label. If there is no tape in the drive, + * we will fail, recurse and ask the operator the next time. + */ + if (!dev->must_unload() && dev->is_tape() && dev->has_cap(CAP_AUTOMOUNT)) { + Dmsg0(250, "(1)Ask=0\n"); + ask = false; /* don't ask SYSOP this time */ + } + /* Don't ask if not removable */ + if (!dev->is_removable()) { + Dmsg0(250, "(2)Ask=0\n"); + ask = false; + } + Dmsg2(100, "Ask=%d autochanger=%d\n", ask, autochanger); + + if (ask) { + V(mount_mutex); + dcr->setVolCatInfo(false); /* out of date when Vols unlocked */ + if (!dir_ask_sysop_to_mount_volume(dcr, SD_APPEND)) { + Dmsg0(150, "Error return ask_sysop ...\n"); + goto no_lock_bail_out; + } + P(mount_mutex); + } + if (job_canceled(jcr)) { + goto bail_out; + } + Dmsg3(100, "want vol=%s devvol=%s dev=%s\n", VolumeName, + dev->VolHdr.VolumeName, dev->print_name()); + + if (dev->poll && dev->has_cap(CAP_CLOSEONPOLL)) { + dev->close(this); + free_volume(dev); + } + + /* Try autolabel if enabled */ + Dmsg1(100, "Try open Vol=%s\n", getVolCatName()); + if (!dev->open_device(dcr, OPEN_READ_WRITE)) { + Dmsg1(100, "Try autolabel Vol=%s\n", getVolCatName()); + if (!dev->poll) { + try_autolabel(false); /* try to create a new volume label */ + } + } + while (!dev->open_device(dcr, OPEN_READ_WRITE)) { + Dmsg1(100, "open_device failed: ERR=%s", dev->bstrerror()); + if (dev->is_file() && dev->is_removable()) { + bool ok = true; + Dmsg0(150, "call scan_dir_for_vol\n"); + if (ok && dev->scan_dir_for_volume(dcr)) { + if (dev->open_device(dcr, OPEN_READ_WRITE)) { + break; /* got a valid volume */ + } + } + } + if (try_autolabel(false) == try_read_vol) { + break; /* created a new volume label */ + } + + /* ***FIXME*** if autochanger, before giving up try unload and load */ + + Jmsg4(jcr, M_WARNING, 0, _("Open of %s device %s Volume \"%s\" failed: ERR=%s\n"), + dev->print_type(), dev->print_name(), dcr->VolumeName, dev->bstrerror()); + + /* If not removable, Volume is broken. This is a serious issue here. */ + if (dev->is_file() && !dev->is_removable()) { + Dmsg3(40, "Volume \"%s\" not loaded on %s device %s.\n", + dcr->VolumeName, dev->print_type(), dev->print_name()); + if (dev->dev_errno == EACCES || dev->dev_errno == EROFS) { + mark_volume_read_only(); + } else { + mark_volume_in_error(); + } + + } else { + Dmsg0(100, "set_unload\n"); + if (dev->dev_errno == EACCES || dev->dev_errno == EROFS) { + mark_volume_read_only(); + } + dev->set_unload(); /* force ask sysop */ + ask = true; + } + + Dmsg0(100, "goto mount_next_vol\n"); + goto mount_next_vol; + } + + /* + * Now check the volume label to make sure we have the right tape mounted + */ +read_volume: + switch (check_volume_label(ask, autochanger)) { + case check_next_vol: + Dmsg0(50, "set_unload\n"); + dev->set_unload(); /* want a different Volume */ + Dmsg0(100, "goto mount_next_vol\n"); + goto mount_next_vol; + case check_read_vol: + goto read_volume; + case check_error: + goto bail_out; + case check_ok: + break; + } + /* + * Check that volcatinfo is good + */ + if (!dev->haveVolCatInfo()) { + Dmsg0(100, "Do not have volcatinfo\n"); + if (!find_a_volume()) { + goto mount_next_vol; + } + dev->set_volcatinfo_from_dcr(this); + } + + /* + * See if we have a fresh tape or a tape with data. + * + * Note, if the LabelType is PRE_LABEL, it was labeled + * but never written. If so, rewrite the label but set as + * VOL_LABEL. We rewind and return the label (reconstructed) + * in the block so that in the case of a new tape, data can + * be appended just after the block label. If we are writing + * a second volume, the calling routine will write the label + * before writing the overflow block. + * + * If the tape is marked as Recycle, we rewrite the label. + */ + recycle = strcmp(dev->VolCatInfo.VolCatStatus, "Recycle") == 0; + if (dev->VolHdr.LabelType == PRE_LABEL || recycle) { + dcr->WroteVol = false; + if (!dev->rewrite_volume_label(dcr, recycle)) { + mark_volume_in_error(); + goto mount_next_vol; + } + } else { + /* + * OK, at this point, we have a valid Bacula label, but + * we need to position to the end of the volume, since we are + * just now putting it into append mode. + */ + Dmsg1(100, "Device previously written, moving to end of data. Expect %lld bytes\n", + dev->VolCatInfo.VolCatBytes); + Jmsg(jcr, M_INFO, 0, _("Volume \"%s\" previously written, moving to end of data.\n"), + VolumeName); + + if (!dev->eod(dcr)) { + Dmsg3(050, "Unable to position to end of data on %s device %s: ERR=%s\n", + dev->print_type(), dev->print_name(), dev->bstrerror()); + Jmsg(jcr, M_ERROR, 0, _("Unable to position to end of data on %s device %s: ERR=%s\n"), + dev->print_type(), dev->print_name(), dev->bstrerror()); + mark_volume_in_error(); + goto mount_next_vol; + } + + if (!dev->is_eod_valid(dcr)) { + Dmsg0(100, "goto mount_next_vol\n"); + goto mount_next_vol; + } + + dev->VolCatInfo.VolCatMounts++; /* Update mounts */ + Dmsg1(150, "update volinfo mounts=%d\n", dev->VolCatInfo.VolCatMounts); + if (!dir_update_volume_info(dcr, false, false)) { + goto bail_out; + } + + /* Return an empty block */ + empty_block(block); /* we used it for reading so set for write */ + } + dev->set_append(); + Dmsg1(150, "set APPEND, normal return from mount_next_write_volume. dev=%s\n", + dev->print_name()); + + V(mount_mutex); + return true; + +bail_out: + V(mount_mutex); + +no_lock_bail_out: + Leave(200); + return false; +} + +/* + * This routine is meant to be called once the first pass + * to ensure that we have a candidate volume to mount. + * Otherwise, we ask the sysop to created one. + * Note, mount_mutex is already locked on entry and thus + * must remain locked on exit from this function. + */ +bool DCR::find_a_volume() +{ + DCR *dcr = this; + bool ok; + + if (!is_suitable_volume_mounted()) { + bool have_vol = false; + /* Do we have a candidate volume? */ + if (dev->vol) { + bstrncpy(VolumeName, dev->vol->vol_name, sizeof(VolumeName)); + have_vol = dir_get_volume_info(this, VolumeName, GET_VOL_INFO_FOR_WRITE); + } + /* + * Get Director's idea of what tape we should have mounted. + * in dcr->VolCatInfo + */ + if (!have_vol) { + Dmsg0(200, "Before dir_find_next_appendable_volume.\n"); + while (!dir_find_next_appendable_volume(dcr)) { + Dmsg0(200, "not dir_find_next\n"); + if (job_canceled(jcr)) { + return false; + } + /* + * Unlock the mount mutex while waiting or + * the Director for a new volume + */ + V(mount_mutex); + if (dev->must_wait()) { + int retries = 5; + Dmsg0(40, "No appendable volume. Calling wait_for_device\n"); + wait_for_device(dcr, retries); + ok = true; + } else { + ok = dir_ask_sysop_to_create_appendable_volume(dcr); + } + P(mount_mutex); + if (!ok || job_canceled(jcr)) { + return false; + } + Dmsg0(150, "Again dir_find_next_append...\n"); + } + dev->clear_wait(); + } + } + if (dcr->haveVolCatInfo()) { + return true; + } + return dir_get_volume_info(dcr, VolumeName, GET_VOL_INFO_FOR_WRITE); +} + +int DCR::check_volume_label(bool &ask, bool &autochanger) +{ + int vol_label_status; + + Enter(200); + + set_ameta(); + /* + * If we are writing to a stream device, ASSUME the volume label + * is correct. + */ + if (dev->has_cap(CAP_STREAM)) { + vol_label_status = VOL_OK; + create_volume_header(dev, VolumeName, "Default", false); + dev->VolHdr.LabelType = PRE_LABEL; + } else { + vol_label_status = dev->read_dev_volume_label(this); + } + if (job_canceled(jcr)) { + goto check_bail_out; + } + + Dmsg2(150, "Want dirVol=%s dirStat=%s\n", VolumeName, + VolCatInfo.VolCatStatus); + + /* + * At this point, dev->VolCatInfo has what is in the drive, if anything, + * and dcr->VolCatInfo has what the Director wants. + */ + switch (vol_label_status) { + case VOL_OK: + Dmsg1(150, "Vol OK name=%s\n", dev->VolHdr.VolumeName); + dev->VolCatInfo = VolCatInfo; /* structure assignment */ + break; /* got a Volume */ + case VOL_NAME_ERROR: + VOLUME_CAT_INFO dcrVolCatInfo, devVolCatInfo; + char saveVolumeName[MAX_NAME_LENGTH]; + + Dmsg2(40, "Vol NAME Error Have=%s, want=%s\n", dev->VolHdr.VolumeName, VolumeName); + if (dev->is_volume_to_unload()) { + ask = true; + goto check_next_volume; + } + +#ifdef xxx + /* If not removable, Volume is broken */ + if (!dev->is_removable()) { + Jmsg3(jcr, M_WARNING, 0, _("Volume \"%s\" not loaded on %s device %s.\n"), + VolumeName, dev->print_type(), dev->print_name()); + Dmsg3(40, "Volume \"%s\" not loaded on %s device %s.\n", + VolumeName, dev->print_type(), dev->print_name()); + mark_volume_in_error(); + goto check_next_volume; + } +#endif + + /* + * OK, we got a different volume mounted. First save the + * requested Volume info (dcr) structure, then query if + * this volume is really OK. If not, put back the desired + * volume name, mark it not in changer and continue. + */ + dcrVolCatInfo = VolCatInfo; /* structure assignment */ + devVolCatInfo = dev->VolCatInfo; /* structure assignment */ + /* Check if this is a valid Volume in the pool */ + bstrncpy(saveVolumeName, VolumeName, sizeof(saveVolumeName)); + bstrncpy(VolumeName, dev->VolHdr.VolumeName, sizeof(VolumeName)); + if (!dir_get_volume_info(this, VolumeName, GET_VOL_INFO_FOR_WRITE)) { + POOL_MEM vol_info_msg; + pm_strcpy(vol_info_msg, jcr->dir_bsock->msg); /* save error message */ + /* Restore desired volume name, note device info out of sync */ + /* This gets the info regardless of the Pool */ + bstrncpy(VolumeName, dev->VolHdr.VolumeName, sizeof(VolumeName)); + if (autochanger && !dir_get_volume_info(this, VolumeName, GET_VOL_INFO_FOR_READ)) { + /* + * If we get here, we know we cannot write on the Volume, + * and we know that we cannot read it either, so it + * is not in the autochanger. + */ + mark_volume_not_inchanger(); + } + dev->VolCatInfo = devVolCatInfo; /* structure assignment */ + dev->set_unload(); /* unload this volume */ + Jmsg(jcr, M_WARNING, 0, _("Director wanted Volume \"%s\".\n" + " Current Volume \"%s\" not acceptable because:\n" + " %s"), + dcrVolCatInfo.VolCatName, dev->VolHdr.VolumeName, + vol_info_msg.c_str()); + ask = true; + /* Restore saved DCR before continuing */ + bstrncpy(VolumeName, saveVolumeName, sizeof(VolumeName)); + VolCatInfo = dcrVolCatInfo; /* structure assignment */ + goto check_next_volume; + } + /* + * This was not the volume we expected, but it is OK with + * the Director, so use it. + */ + Dmsg1(150, "Got new Volume name=%s\n", VolumeName); + dev->VolCatInfo = VolCatInfo; /* structure assignment */ + Dmsg1(100, "Call reserve_volume=%s\n", dev->VolHdr.VolumeName); + if (reserve_volume(this, dev->VolHdr.VolumeName) == NULL) { + if (!jcr->errmsg[0]) { + Jmsg3(jcr, M_WARNING, 0, _("Could not reserve volume %s on %s device %s\n"), + dev->VolHdr.VolumeName, dev->print_type(), dev->print_name()); + } else { + Jmsg(jcr, M_WARNING, 0, "%s", jcr->errmsg); + } + ask = true; + dev->setVolCatInfo(false); + setVolCatInfo(false); + goto check_next_volume; + } + break; /* got a Volume */ + /* + * At this point, we assume we have a blank tape mounted. + */ + case VOL_IO_ERROR: + /* Fall through wanted */ + case VOL_NO_LABEL: + switch (try_autolabel(true)) { + case try_next_vol: + goto check_next_volume; + case try_read_vol: + goto check_read_volume; + case try_error: + goto check_bail_out; + case try_default: + break; + } + /* NOTE! Fall-through wanted. */ + case VOL_NO_MEDIA: + default: + Dmsg0(200, "VOL_NO_MEDIA or default.\n"); + /* Send error message */ + if (!dev->poll) { + } else { + Dmsg1(200, "Msg suppressed by poll: %s\n", jcr->errmsg); + } + ask = true; + /* Needed, so the medium can be changed */ + if (dev->requires_mount()) { + dev->close(this); + free_volume(dev); + } + goto check_next_volume; + } + Leave(200); + return check_ok; + +check_next_volume: + dev->setVolCatInfo(false); + setVolCatInfo(false); + Leave(200); + return check_next_vol; + +check_bail_out: + Leave(200); + return check_error; + +check_read_volume: + Leave(200); + return check_read_vol; + +} + + +bool DCR::is_suitable_volume_mounted() +{ + bool ok; + + /* Volume mounted? */ + if (dev->VolHdr.VolumeName[0] == 0 || dev->swap_dev || dev->must_unload()) { + return false; /* no */ + } + bstrncpy(VolumeName, dev->VolHdr.VolumeName, sizeof(VolumeName)); + ok = dir_get_volume_info(this, VolumeName, GET_VOL_INFO_FOR_WRITE); + if (!ok) { + Dmsg1(40, "dir_get_volume_info failed: %s", jcr->errmsg); + dev->set_wait(); + } + return ok; +} + +bool DCR::do_unload() +{ + if (dev->must_unload()) { + Dmsg1(100, "must_unload release %s\n", dev->print_name()); + release_volume(); + } + return false; +} + +bool DCR::do_load(bool is_writing) +{ + if (dev->must_load()) { + Dmsg1(100, "Must load dev=%s\n", dev->print_name()); + if (autoload_device(this, is_writing, NULL) > 0) { + dev->clear_load(); + return true; + } + return false; + } + return true; +} + +void DCR::do_swapping(bool is_writing) +{ + /* + * See if we are asked to swap the Volume from another device + * if so, unload the other device here, and attach the + * volume to our drive. + */ + if (dev->swap_dev) { + if (dev->swap_dev->must_unload()) { + if (dev->vol) { + dev->swap_dev->set_slot(dev->vol->get_slot()); + } + Dmsg2(100, "Swap unloading slot=%d %s\n", dev->swap_dev->get_slot(), + dev->swap_dev->print_name()); + unload_dev(this, dev->swap_dev); + } + if (dev->vol) { + dev->vol->clear_swapping(); + Dmsg1(100, "=== set in_use vol=%s\n", dev->vol->vol_name); + dev->vol->clear_in_use(); + dev->VolHdr.VolumeName[0] = 0; /* don't yet have right Volume */ + } else { + Dmsg1(100, "No vol on dev=%s\n", dev->print_name()); + } + if (dev->swap_dev->vol) { + Dmsg2(100, "Vol=%s on dev=%s\n", dev->swap_dev->vol->vol_name, + dev->swap_dev->print_name()); + } + Dmsg2(100, "Set swap_dev=NULL for dev=%s swap_dev=%s\n", + dev->print_name(), dev->swap_dev->print_name()); + dev->swap_dev = NULL; + } else { + if (dev->vol) { + Dmsg1(100, "No swap_dev set. dev->vol=%p\n", dev->vol); + } else { + Dmsg1(100, "No swap_dev set. dev->vol=%p\n", dev->vol); + } + } +} + +/* + * If permitted, we label the device, make sure we can do + * it by checking that the VolCatBytes is zero => not labeled, + * once the Volume is labeled we don't want to label another + * blank tape with the same name. For disk, we go ahead and + * label it anyway, because the OS insures that there is only + * one Volume with that name. + * As noted above, at this point dcr->VolCatInfo has what + * the Director wants and dev->VolCatInfo has info on the + * previous tape (or nothing). + * + * Return codes are: + * try_next_vol label failed, look for another volume + * try_read_vol labeled volume, now re-read the label + * try_error hard error (catalog update) + * try_default I couldn't do anything + */ +int DCR::try_autolabel(bool opened) +{ + DCR *dcr = this; + + if (dev->poll && !dev->is_tape()) { + Dmsg0(100, "No autolabel because polling.\n"); + return try_default; /* if polling, don't try to create new labels */ + } + /* For a tape require it to be opened and read before labeling */ + if (!opened && (dev->is_tape() || dev->is_null())) { + return try_default; + } + if (dev->has_cap(CAP_LABEL) && (VolCatInfo.VolCatBytes == 0 || + (!dev->is_tape() && strcmp(VolCatInfo.VolCatStatus, + "Recycle") == 0))) { + Dmsg1(40, "Create new volume label vol=%s\n", VolumeName); + /* Create a new Volume label and write it to the device */ + if (!dev->write_volume_label(dcr, VolumeName, + pool_name, false, /* no relabel */ false /* defer label */)) { + Dmsg2(100, "write_vol_label failed. vol=%s, pool=%s\n", + VolumeName, pool_name); + if (opened) { + mark_volume_in_error(); + } + return try_next_vol; + } + Dmsg0(150, "dir_update_vol_info. Set Append\n"); + /* Copy Director's info into the device info */ + dev->VolCatInfo = VolCatInfo; /* structure assignment */ + if (!dir_update_volume_info(dcr, true, true)) { /* indicate tape labeled */ + Dmsg3(100, "Update_vol_info failed no autolabel Volume \"%s\" on %s device %s.\n", + VolumeName, dev->print_type(), dev->print_name()); + return try_error; + } + Jmsg(dcr->jcr, M_INFO, 0, _("Labeled new Volume \"%s\" on %s device %s.\n"), + VolumeName, dev->print_type(), dev->print_name()); + Dmsg3(100, "Labeled new Volume \"%s\" on %s device %s.\n", + VolumeName, dev->print_type(), dev->print_name()); + return try_read_vol; /* read label we just wrote */ + } else { + Dmsg4(40, "=== Cannot autolabel: cap_label=%d VolCatBytes=%lld is_tape=%d VolCatStatus=%s\n", + dev->has_cap(CAP_LABEL), VolCatInfo.VolCatBytes, dev->is_tape(), + VolCatInfo.VolCatStatus); + } + if (!dev->has_cap(CAP_LABEL) && VolCatInfo.VolCatBytes == 0) { + Jmsg(jcr, M_WARNING, 0, _("%s device %s not configured to autolabel Volumes.\n"), + dev->print_type(), dev->print_name()); + } +#ifdef xxx + /* If not removable, Volume is broken */ + if (!dev->is_removable()) { + Jmsg3(jcr, M_WARNING, 0, _("Volume \"%s\" not loaded on %s device %s.\n"), + VolumeName, dev->print_type(), dev->print_name()); + Dmsg3(40, "Volume \"%s\" not loaded on %s device %s.\n", + VolumeName, dev->print_type(), dev->print_name()); + + mark_volume_in_error(); + return try_next_vol; + } +#endif + return try_default; +} + + +/* + * Mark volume in error in catalog + */ +void DCR::mark_volume_in_error() +{ + Jmsg(jcr, M_INFO, 0, _("Marking Volume \"%s\" in Error in Catalog.\n"), + VolumeName); + dev->VolCatInfo = VolCatInfo; /* structure assignment */ + dev->setVolCatStatus("Error"); + Dmsg0(150, "dir_update_vol_info. Set Error.\n"); + dir_update_volume_info(this, false, false); + volume_unused(this); + Dmsg0(50, "set_unload\n"); + dev->set_unload(); /* must get a new volume */ +} + +/* + * Mark volume read_only in catalog + */ +void DCR::mark_volume_read_only() +{ + Jmsg(jcr, M_INFO, 0, _("Marking Volume \"%s\" Read-Only in Catalog.\n"), + VolumeName); + dev->VolCatInfo = VolCatInfo; /* structure assignment */ + dev->setVolCatStatus("Read-Only"); + Dmsg0(150, "dir_update_vol_info. Set Read-Only.\n"); + dir_update_volume_info(this, false, false); + volume_unused(this); + Dmsg0(50, "set_unload\n"); + dev->set_unload(); /* must get a new volume */ +} + + +/* + * The Volume is not in the correct slot, so mark this + * Volume as not being in the Changer. + */ +void DCR::mark_volume_not_inchanger() +{ + Jmsg(jcr, M_ERROR, 0, _("Autochanger Volume \"%s\" not found in slot %d.\n" +" Setting InChanger to zero in catalog.\n"), + getVolCatName(), VolCatInfo.Slot); + dev->VolCatInfo = VolCatInfo; /* structure assignment */ + VolCatInfo.InChanger = false; + dev->VolCatInfo.InChanger = false; + Dmsg0(400, "update vol info in mount\n"); + dir_update_volume_info(this, true, false); /* set new status */ +} + +/* + * Either because we are going to hang a new volume, or because + * of explicit user request, we release the current volume. + */ +void DCR::release_volume() +{ + unload_autochanger(this, -1); + + if (WroteVol) { + Jmsg0(jcr, M_ERROR, 0, _("Hey!!!!! WroteVol non-zero !!!!!\n")); + Pmsg0(190, "Hey!!!!! WroteVol non-zero !!!!!\n"); + } + + if (dev->is_open() && (!dev->is_tape() || !dev->has_cap(CAP_ALWAYSOPEN))) { + generate_plugin_event(jcr, bsdEventDeviceClose, this); + dev->close(this); + } + + /* If we have not closed the device, then at least rewind the tape */ + if (dev->is_open()) { + dev->offline_or_rewind(this); + } + + /* + * Erase all memory of the current volume + */ + free_volume(dev); + dev->block_num = dev->file = 0; + dev->EndBlock = dev->EndFile = 0; + memset(&dev->VolCatInfo, 0, sizeof(dev->VolCatInfo)); + dev->clear_volhdr(); + /* Force re-read of label */ + dev->clear_labeled(); + dev->clear_read(); + dev->clear_append(); + dev->label_type = B_BACULA_LABEL; + VolumeName[0] = 0; + + Dmsg0(190, "release_volume\n"); +} + +/* + * Insanity check + * + * Check to see if the tape position as defined by the OS is + * the same as our concept. If it is not, + * it means the user has probably manually rewound the tape. + * Note, we check only if num_writers == 0, but this code will + * also work fine for any number of writers. If num_writers > 0, + * we probably should cancel all jobs using this device, or + * perhaps even abort the SD, or at a minimum, mark the tape + * in error. Another strategy with num_writers == 0, would be + * to rewind the tape and do a new eod() request. + */ +bool DCR::is_tape_position_ok() +{ + if (dev->is_tape() && dev->num_writers == 0) { + int32_t file = dev->get_os_tape_file(); + if (file >= 0 && file != (int32_t)dev->get_file()) { + Jmsg(jcr, M_ERROR, 0, _("Invalid tape position on volume \"%s\"" + " on device %s. Expected %d, got %d\n"), + dev->VolHdr.VolumeName, dev->print_name(), dev->get_file(), file); + /* + * If the current file is greater than zero, it means we probably + * have some bad count of EOF marks, so mark tape in error. Otherwise + * the operator might have moved the tape, so we just release it + * and try again. + */ + if (file > 0) { + mark_volume_in_error(); + } + release_volume(); + return false; + } + } + return true; +} + + +/* + * If we are reading, we come here at the end of the tape + * and see if there are more volumes to be mounted. + */ +bool mount_next_read_volume(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + Dmsg2(90, "NumReadVolumes=%d CurReadVolume=%d\n", jcr->NumReadVolumes, jcr->CurReadVolume); + + volume_unused(dcr); /* release current volume */ + /* + * End Of Tape -- mount next Volume (if another specified) + */ + if (jcr->NumReadVolumes > 1 && jcr->CurReadVolume < jcr->NumReadVolumes) { + dev->Lock(); + dev->close(dcr); + dev->set_read(); + dcr->set_reserved_for_read(); + dev->Unlock(); + if (!acquire_device_for_read(dcr)) { + Jmsg3(jcr, M_FATAL, 0, _("Cannot open %s Dev=%s, Vol=%s for reading.\n"), + dev->print_type(), dev->print_name(), dcr->VolumeName); + jcr->setJobStatus(JS_FatalError); /* Jmsg is not working for *SystemJob* */ + return false; + } + return true; /* next volume mounted */ + } + Dmsg0(90, "End of Device reached.\n"); + return false; +} diff --git a/src/stored/null_dev.c b/src/stored/null_dev.c new file mode 100644 index 00000000..3703b7ee --- /dev/null +++ b/src/stored/null_dev.c @@ -0,0 +1,33 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Null driver code + * + * written by, Kern Sibbald, MMXVI + * + */ + +#include "bacula.h" +#include "stored.h" + +const char *null_dev::print_type() +{ + return "Null"; +} diff --git a/src/stored/null_dev.h b/src/stored/null_dev.h new file mode 100644 index 00000000..33b85e78 --- /dev/null +++ b/src/stored/null_dev.h @@ -0,0 +1,34 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * NULL driver -- normally only used for performance testing + */ + +#ifndef __NULL_DEV_ +#define __NULL_DEV_ + +class null_dev : public file_dev { +public: + + null_dev() { }; + ~null_dev() { }; + const char *print_type(); +}; + +#endif /* __NULL_DEV_ */ diff --git a/src/stored/os.c b/src/stored/os.c new file mode 100644 index 00000000..c8ec1cce --- /dev/null +++ b/src/stored/os.c @@ -0,0 +1,421 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * os.c -- Operating System dependent dev.c routines + * + * written by, Kern Sibbald, MM + * separated from dev.c February 2014 + * + * Note, this is the device dependent code, and may have + * to be modified for each system. + */ + + +#include "bacula.h" +#include "stored.h" + +/* Returns file position on tape or -1 */ +int32_t DEVICE::get_os_tape_file() +{ + struct mtget mt_stat; + + if (has_cap(CAP_MTIOCGET) && + d_ioctl(m_fd, MTIOCGET, (char *)&mt_stat) == 0) { + return mt_stat.mt_fileno; + } + return -1; +} + + +void set_os_device_parameters(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + + if (strcmp(dev->dev_name, "/dev/null") == 0) { + return; /* no use trying to set /dev/null */ + } + +#if defined(HAVE_LINUX_OS) || defined(HAVE_WIN32) + struct mtop mt_com; + + Dmsg0(100, "In set_os_device_parameters\n"); +#if defined(MTSETBLK) + if (dev->min_block_size == dev->max_block_size && + dev->min_block_size == 0) { /* variable block mode */ + mt_com.mt_op = MTSETBLK; + mt_com.mt_count = 0; + Dmsg0(100, "Set block size to zero\n"); + if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { + dev->clrerror(MTSETBLK); + } + } +#endif +#if defined(MTSETDRVBUFFER) + if (getuid() == 0) { /* Only root can do this */ + mt_com.mt_op = MTSETDRVBUFFER; + mt_com.mt_count = MT_ST_CLEARBOOLEANS; + if (!dev->has_cap(CAP_TWOEOF)) { + mt_com.mt_count |= MT_ST_TWO_FM; + } + if (dev->has_cap(CAP_EOM)) { + mt_com.mt_count |= MT_ST_FAST_MTEOM; + } + Dmsg0(100, "MTSETDRVBUFFER\n"); + if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { + dev->clrerror(MTSETDRVBUFFER); + } + } +#endif + return; +#endif + +#ifdef HAVE_NETBSD_OS + struct mtop mt_com; + if (dev->min_block_size == dev->max_block_size && + dev->min_block_size == 0) { /* variable block mode */ + mt_com.mt_op = MTSETBSIZ; + mt_com.mt_count = 0; + if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { + dev->clrerror(MTSETBSIZ); + } + /* Get notified at logical end of tape */ + mt_com.mt_op = MTEWARN; + mt_com.mt_count = 1; + if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { + dev->clrerror(MTEWARN); + } + } + return; +#endif + +#if HAVE_FREEBSD_OS || HAVE_OPENBSD_OS + struct mtop mt_com; + if (dev->min_block_size == dev->max_block_size && + dev->min_block_size == 0) { /* variable block mode */ + mt_com.mt_op = MTSETBSIZ; + mt_com.mt_count = 0; + if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { + dev->clrerror(MTSETBSIZ); + } + } +#if defined(MTIOCSETEOTMODEL) + if (dev->is_fifo()) { + return; /* do not do tape stuff */ + } + uint32_t neof; + if (dev->has_cap(CAP_TWOEOF)) { + neof = 2; + } else { + neof = 1; + } + if (dev->d_ioctl(dev->fd(), MTIOCSETEOTMODEL, (caddr_t)&neof) < 0) { + berrno be; + dev->dev_errno = errno; /* save errno */ + Mmsg2(dev->errmsg, _("Unable to set eotmodel on device %s: ERR=%s\n"), + dev->print_name(), be.bstrerror(dev->dev_errno)); + Jmsg(dcr->jcr, M_FATAL, 0, dev->errmsg); + } +#endif + return; +#endif + +#ifdef HAVE_SUN_OS + struct mtop mt_com; + if (dev->min_block_size == dev->max_block_size && + dev->min_block_size == 0) { /* variable block mode */ + mt_com.mt_op = MTSRSZ; + mt_com.mt_count = 0; + if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { + dev->clrerror(MTSRSZ); + } + } + return; +#endif +} + +bool dev_get_os_pos(DEVICE *dev, struct mtget *mt_stat) +{ + Dmsg0(100, "dev_get_os_pos\n"); + return dev->has_cap(CAP_MTIOCGET) && + dev->d_ioctl(dev->fd(), MTIOCGET, (char *)mt_stat) == 0 && + mt_stat->mt_fileno >= 0; +} + +/* + * Return the status of the device. This was meant + * to be a generic routine. Unfortunately, it doesn't + * seem possible (at least I do not know how to do it + * currently), which means that for the moment, this + * routine has very little value. + * + * Returns: status + */ +uint32_t status_dev(DEVICE *dev) +{ + struct mtget mt_stat; + uint32_t stat = 0; + + if (dev->state & (ST_EOT | ST_WEOT)) { + stat |= BMT_EOD; + Pmsg0(-20, " EOD"); + } + if (dev->state & ST_EOF) { + stat |= BMT_EOF; + Pmsg0(-20, " EOF"); + } + if (dev->is_tape()) { + stat |= BMT_TAPE; + Pmsg0(-20,_(" Bacula status:")); + Pmsg2(-20,_(" file=%d block=%d\n"), dev->file, dev->block_num); + if (dev->d_ioctl(dev->fd(), MTIOCGET, (char *)&mt_stat) < 0) { + berrno be; + dev->dev_errno = errno; + Mmsg2(dev->errmsg, _("ioctl MTIOCGET error on %s. ERR=%s.\n"), + dev->print_name(), be.bstrerror()); + return 0; + } + Pmsg0(-20, _(" Device status:")); + +#if defined(HAVE_LINUX_OS) + if (GMT_EOF(mt_stat.mt_gstat)) { + stat |= BMT_EOF; + Pmsg0(-20, " EOF"); + } + if (GMT_BOT(mt_stat.mt_gstat)) { + stat |= BMT_BOT; + Pmsg0(-20, " BOT"); + } + if (GMT_EOT(mt_stat.mt_gstat)) { + stat |= BMT_EOT; + Pmsg0(-20, " EOT"); + } + if (GMT_SM(mt_stat.mt_gstat)) { + stat |= BMT_SM; + Pmsg0(-20, " SM"); + } + if (GMT_EOD(mt_stat.mt_gstat)) { + stat |= BMT_EOD; + Pmsg0(-20, " EOD"); + } + if (GMT_WR_PROT(mt_stat.mt_gstat)) { + stat |= BMT_WR_PROT; + Pmsg0(-20, " WR_PROT"); + } + if (GMT_ONLINE(mt_stat.mt_gstat)) { + stat |= BMT_ONLINE; + Pmsg0(-20, " ONLINE"); + } + if (GMT_DR_OPEN(mt_stat.mt_gstat)) { + stat |= BMT_DR_OPEN; + Pmsg0(-20, " DR_OPEN"); + } + if (GMT_IM_REP_EN(mt_stat.mt_gstat)) { + stat |= BMT_IM_REP_EN; + Pmsg0(-20, " IM_REP_EN"); + } +#elif defined(HAVE_WIN32) + if (GMT_EOF(mt_stat.mt_gstat)) { + stat |= BMT_EOF; + Pmsg0(-20, " EOF"); + } + if (GMT_BOT(mt_stat.mt_gstat)) { + stat |= BMT_BOT; + Pmsg0(-20, " BOT"); + } + if (GMT_EOT(mt_stat.mt_gstat)) { + stat |= BMT_EOT; + Pmsg0(-20, " EOT"); + } + if (GMT_EOD(mt_stat.mt_gstat)) { + stat |= BMT_EOD; + Pmsg0(-20, " EOD"); + } + if (GMT_WR_PROT(mt_stat.mt_gstat)) { + stat |= BMT_WR_PROT; + Pmsg0(-20, " WR_PROT"); + } + if (GMT_ONLINE(mt_stat.mt_gstat)) { + stat |= BMT_ONLINE; + Pmsg0(-20, " ONLINE"); + } + if (GMT_DR_OPEN(mt_stat.mt_gstat)) { + stat |= BMT_DR_OPEN; + Pmsg0(-20, " DR_OPEN"); + } + if (GMT_IM_REP_EN(mt_stat.mt_gstat)) { + stat |= BMT_IM_REP_EN; + Pmsg0(-20, " IM_REP_EN"); + } + +#endif /* !SunOS && !OSF */ + if (dev->has_cap(CAP_MTIOCGET)) { + Pmsg2(-20, _(" file=%d block=%d\n"), mt_stat.mt_fileno, mt_stat.mt_blkno); + } else { + Pmsg2(-20, _(" file=%d block=%d\n"), -1, -1); + } + } else { + stat |= BMT_ONLINE | BMT_BOT; + } + return stat; +} + +/* + * If implemented in system, clear the tape + * error status. + */ +void DEVICE::clrerror(int func) +{ + const char *msg = NULL; + char buf[100]; + + dev_errno = errno; /* save errno */ + if (errno == EIO) { + VolCatInfo.VolCatErrors++; + } + + if (!is_tape()) { + return; + } + + if (errno == ENOTTY || errno == ENOSYS) { /* Function not implemented */ + switch (func) { + case -1: + break; /* ignore message printed later */ + case MTWEOF: + msg = "WTWEOF"; + clear_cap(CAP_EOF); /* turn off feature */ + break; +#ifdef MTEOM + case MTEOM: + msg = "WTEOM"; + clear_cap(CAP_EOM); /* turn off feature */ + break; +#endif + case MTFSF: + msg = "MTFSF"; + clear_cap(CAP_FSF); /* turn off feature */ + break; + case MTBSF: + msg = "MTBSF"; + clear_cap(CAP_BSF); /* turn off feature */ + break; + case MTFSR: + msg = "MTFSR"; + clear_cap(CAP_FSR); /* turn off feature */ + break; + case MTBSR: + msg = "MTBSR"; + clear_cap(CAP_BSR); /* turn off feature */ + break; + case MTREW: + msg = "MTREW"; + break; +#ifdef MTSETBLK + case MTSETBLK: + msg = "MTSETBLK"; + break; +#endif +#ifdef MTSETDRVBUFFER + case MTSETDRVBUFFER: + msg = "MTSETDRVBUFFER"; + break; +#endif +#ifdef MTRESET + case MTRESET: + msg = "MTRESET"; + break; +#endif + +#ifdef MTSETBSIZ + case MTSETBSIZ: + msg = "MTSETBSIZ"; + break; +#endif +#ifdef MTSRSZ + case MTSRSZ: + msg = "MTSRSZ"; + break; +#endif +#ifdef MTLOAD + case MTLOAD: + msg = "MTLOAD"; + break; +#endif +#ifdef MTUNLOCK + case MTUNLOCK: + msg = "MTUNLOCK"; + break; +#endif + case MTOFFL: + msg = "MTOFFL"; + break; + default: + bsnprintf(buf, sizeof(buf), _("unknown func code %d"), func); + msg = buf; + break; + } + if (msg != NULL) { + dev_errno = ENOSYS; + Mmsg1(errmsg, _("I/O function \"%s\" not supported on this device.\n"), msg); + Emsg0(M_ERROR, 0, errmsg); + } + } + + /* + * Now we try different methods of clearing the error + * status on the drive so that it is not locked for + * further operations. + */ + + /* On some systems such as NetBSD, this clears all errors */ + get_os_tape_file(); + +/* Found on Solaris */ +#ifdef MTIOCLRERR +{ + d_ioctl(m_fd, MTIOCLRERR); + Dmsg0(200, "Did MTIOCLRERR\n"); +} +#endif + +/* Typically on FreeBSD */ +#ifdef MTIOCERRSTAT +{ + berrno be; + /* Read and clear SCSI error status */ + union mterrstat mt_errstat; + Dmsg2(200, "Doing MTIOCERRSTAT errno=%d ERR=%s\n", dev_errno, + be.bstrerror(dev_errno)); + d_ioctl(m_fd, MTIOCERRSTAT, (char *)&mt_errstat); +} +#endif + +/* Clear Subsystem Exception TRU64 */ +#ifdef MTCSE +{ + struct mtop mt_com; + mt_com.mt_op = MTCSE; + mt_com.mt_count = 1; + /* Clear any error condition on the tape */ + d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); + Dmsg0(200, "Did MTCSE\n"); +} +#endif +} diff --git a/src/stored/parse_bsr.c b/src/stored/parse_bsr.c new file mode 100644 index 00000000..87625e2d --- /dev/null +++ b/src/stored/parse_bsr.c @@ -0,0 +1,1071 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Parse a Bootstrap Records (used for restores) + * + * Kern Sibbald, June MMII + * + */ + + +#include "bacula.h" +#include "stored.h" + +static void s_err(const char *file, int line, LEX *lc, const char *msg, ...); +static bool add_restore_volume(JCR *jcr, VOL_LIST *vol, bool add_to_read_list); + +typedef BSR * (ITEM_HANDLER)(LEX *lc, BSR *bsr); + +static BSR *store_client(LEX *lc, BSR *bsr); +static BSR *store_count(LEX *lc, BSR *bsr); +static BSR *store_device(LEX *lc, BSR *bsr); +static BSR *store_exclude(LEX *lc, BSR *bsr); +static BSR *store_fileregex(LEX *lc, BSR *bsr); +static BSR *store_findex(LEX *lc, BSR *bsr); +static BSR *store_include(LEX *lc, BSR *bsr); +static BSR *store_jobid(LEX *lc, BSR *bsr); +static BSR *store_joblevel(LEX *lc, BSR *bsr); +static BSR *store_job(LEX *lc, BSR *bsr); +static BSR *store_jobtype(LEX *lc, BSR *bsr); +static BSR *store_mediatype(LEX *lc, BSR *bsr); +static BSR *store_nothing(LEX *lc, BSR *bsr); +static BSR *store_sessid(LEX *lc, BSR *bsr); +static BSR *store_sesstime(LEX *lc, BSR *bsr); +static BSR *store_slot(LEX *lc, BSR *bsr); +static BSR *store_stream(LEX *lc, BSR *bsr); +static BSR *store_voladdr(LEX *lc, BSR *bsr); +static BSR *store_volblock(LEX *lc, BSR *bsr); +static BSR *store_volfile(LEX *lc, BSR *bsr); +static BSR *store_vol(LEX *lc, BSR *bsr); +static bool is_fast_rejection_ok(BSR *bsr); +static bool is_positioning_ok(BSR *bsr); + +struct kw_items { + const char *name; + ITEM_HANDLER *handler; +}; + +/* + * List of all keywords permitted in bsr files and their handlers + */ +struct kw_items items[] = { + {"client", store_client}, + {"count", store_count}, + {"device", store_device}, + {"exclude", store_exclude}, + {"fileindex", store_findex}, + {"fileregex", store_fileregex}, + {"include", store_include}, + {"jobid", store_jobid}, + {"joblevel", store_joblevel}, + {"job", store_job}, + {"jobtype", store_jobtype}, + {"mediatype", store_mediatype}, + {"slot", store_slot}, + {"storage", store_nothing}, + {"stream", store_stream}, + {"voladdr", store_voladdr}, + {"volblock", store_volblock}, + {"volume", store_vol}, + {"volfile", store_volfile}, + {"volsessionid", store_sessid}, + {"volsessiontime", store_sesstime}, + {NULL, NULL} +}; + +/* + * Create a BSR record + */ +BSR *new_bsr() +{ + BSR *bsr = (BSR *)malloc(sizeof(BSR)); + memset(bsr, 0, sizeof(BSR)); + return bsr; +} + + +/********************************************************************* + * + * Parse Bootstrap file + * + */ +BSR *parse_bsr(JCR *jcr, char *fname) +{ + LEX *lc = NULL; + int token, i; + BSR *root_bsr = new_bsr(); + BSR *bsr = root_bsr; + + Dmsg1(300, "Enter parse_bsf %s\n", fname); + if ((lc = lex_open_file(lc, fname, s_err)) == NULL) { + berrno be; + Emsg2(M_ERROR_TERM, 0, _("Cannot open bootstrap file %s: %s\n"), + fname, be.bstrerror()); + } + lc->caller_ctx = (void *)jcr; + while ((token=lex_get_token(lc, T_ALL)) != T_EOF) { + Dmsg1(300, "parse got token=%s\n", lex_tok_to_str(token)); + if (token == T_EOL) { + continue; + } + for (i=0; items[i].name; i++) { + if (strcasecmp(items[i].name, lc->str) == 0) { + token = lex_get_token(lc, T_ALL); + Dmsg1 (300, "in T_IDENT got token=%s\n", lex_tok_to_str(token)); + if (token != T_EQUALS) { + scan_err1(lc, "expected an equals, got: %s", lc->str); + bsr = NULL; + break; + } + Dmsg1(300, "calling handler for %s\n", items[i].name); + /* Call item handler */ + bsr = items[i].handler(lc, bsr); + i = -1; + break; + } + } + if (i >= 0) { + Dmsg1(300, "Keyword = %s\n", lc->str); + scan_err1(lc, "Keyword %s not found", lc->str); + bsr = NULL; + break; + } + if (!bsr) { + break; + } + } + lc = lex_close_file(lc); + Dmsg0(300, "Leave parse_bsf()\n"); + if (!bsr) { + free_bsr(root_bsr); + root_bsr = NULL; + } + if (root_bsr) { + root_bsr->use_fast_rejection = is_fast_rejection_ok(root_bsr); + root_bsr->use_positioning = is_positioning_ok(root_bsr); + } + for (bsr=root_bsr; bsr; bsr=bsr->next) { + bsr->root = root_bsr; + } + return root_bsr; +} + +static BSR *store_client(LEX *lc, BSR *bsr) +{ + int token; + BSR_CLIENT *client; + + for (;;) { + token = lex_get_token(lc, T_NAME); + if (token == T_ERROR) { + return NULL; + } + client = (BSR_CLIENT *)malloc(sizeof(BSR_CLIENT)); + memset(client, 0, sizeof(BSR_CLIENT)); + bstrncpy(client->ClientName, lc->str, sizeof(client->ClientName)); + /* Add it to the end of the client chain */ + if (!bsr->client) { + bsr->client = client; + } else { + BSR_CLIENT *bc = bsr->client; + for ( ;bc->next; bc=bc->next) + { } + bc->next = client; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + +static BSR *store_count(LEX *lc, BSR *bsr) +{ + int token; + + token = lex_get_token(lc, T_PINT32); + if (token == T_ERROR) { + return NULL; + } + bsr->count = lc->pint32_val; + scan_to_eol(lc); + return bsr; +} + +/* Shove the Device name in each Volume in the current bsr */ +static BSR *store_device(LEX *lc, BSR *bsr) +{ + int token; + + token = lex_get_token(lc, T_STRING); + if (token == T_ERROR) { + return NULL; + } + if (!bsr->volume) { + Emsg1(M_ERROR,0, _("Device \"%s\" in bsr at inappropriate place.\n"), + lc->str); + return bsr; + } + BSR_VOLUME *bv; + for (bv=bsr->volume; bv; bv=bv->next) { + bstrncpy(bv->device, lc->str, sizeof(bv->device)); + } + return bsr; +} + +static BSR *store_findex(LEX *lc, BSR *bsr) +{ + int token; + BSR_FINDEX *findex; + + for (;;) { + token = lex_get_token(lc, T_PINT32_RANGE); + if (token == T_ERROR) { + return NULL; + } + findex = (BSR_FINDEX *)malloc(sizeof(BSR_FINDEX)); + memset(findex, 0, sizeof(BSR_FINDEX)); + findex->findex = lc->pint32_val; + findex->findex2 = lc->pint32_val2; + /* Add it to the end of the chain */ + if (!bsr->FileIndex) { + bsr->FileIndex = findex; + } else { + /* Add to end of chain */ + BSR_FINDEX *bs = bsr->FileIndex; + for ( ;bs->next; bs=bs->next) + { } + bs->next = findex; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + +static BSR *store_fileregex(LEX *lc, BSR *bsr) +{ + int token; + int rc; + + token = lex_get_token(lc, T_STRING); + if (token == T_ERROR) { + return NULL; + } + + if (bsr->fileregex) free(bsr->fileregex); + bsr->fileregex = bstrdup(lc->str); + + if (bsr->fileregex_re == NULL) + bsr->fileregex_re = (regex_t *)bmalloc(sizeof(regex_t)); + + rc = regcomp(bsr->fileregex_re, bsr->fileregex, REG_EXTENDED|REG_NOSUB); + if (rc != 0) { + char prbuf[500]; + regerror(rc, bsr->fileregex_re, prbuf, sizeof(prbuf)); + Emsg2(M_ERROR, 0, _("REGEX '%s' compile error. ERR=%s\n"), + bsr->fileregex, prbuf); + return NULL; + } + return bsr; +} + +static BSR *store_jobid(LEX *lc, BSR *bsr) +{ + int token; + BSR_JOBID *jobid; + + for (;;) { + token = lex_get_token(lc, T_PINT32_RANGE); + if (token == T_ERROR) { + return NULL; + } + jobid = (BSR_JOBID *)malloc(sizeof(BSR_JOBID)); + memset(jobid, 0, sizeof(BSR_JOBID)); + jobid->JobId = lc->pint32_val; + jobid->JobId2 = lc->pint32_val2; + /* Add it to the end of the chain */ + if (!bsr->JobId) { + bsr->JobId = jobid; + } else { + /* Add to end of chain */ + BSR_JOBID *bs = bsr->JobId; + for ( ;bs->next; bs=bs->next) + { } + bs->next = jobid; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + + +static BSR *store_jobtype(LEX *lc, BSR *bsr) +{ + /* *****FIXME****** */ + Pmsg0(-1, _("JobType not yet implemented\n")); + return bsr; +} + + +static BSR *store_joblevel(LEX *lc, BSR *bsr) +{ + /* *****FIXME****** */ + Pmsg0(-1, _("JobLevel not yet implemented\n")); + return bsr; +} + +static BSR *store_job(LEX *lc, BSR *bsr) +{ + int token; + BSR_JOB *job; + + for (;;) { + token = lex_get_token(lc, T_NAME); + if (token == T_ERROR) { + return NULL; + } + job = (BSR_JOB *)malloc(sizeof(BSR_JOB)); + memset(job, 0, sizeof(BSR_JOB)); + bstrncpy(job->Job, lc->str, sizeof(job->Job)); + /* Add it to the end of the client chain */ + if (!bsr->job) { + bsr->job = job; + } else { + /* Add to end of chain */ + BSR_JOB *bc = bsr->job; + for ( ;bc->next; bc=bc->next) + { } + bc->next = job; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + +/* Shove the MediaType in each Volume in the current bsr */ +static BSR *store_mediatype(LEX *lc, BSR *bsr) +{ + int token; + + token = lex_get_token(lc, T_STRING); + if (token == T_ERROR) { + return NULL; + } + if (!bsr->volume) { + Emsg1(M_ERROR,0, _("MediaType %s in bsr at inappropriate place.\n"), + lc->str); + return bsr; + } + BSR_VOLUME *bv; + for (bv=bsr->volume; bv; bv=bv->next) { + bstrncpy(bv->MediaType, lc->str, sizeof(bv->MediaType)); + } + return bsr; +} + +static BSR *store_vol(LEX *lc, BSR *bsr) +{ + int token; + BSR_VOLUME *volume; + char *p, *n; + + token = lex_get_token(lc, T_STRING); + if (token == T_ERROR) { + return NULL; + } + if (bsr->volume) { + bsr->next = new_bsr(); + bsr->next->prev = bsr; + bsr = bsr->next; + } + /* This may actually be more than one volume separated by a | + * If so, separate them. + */ + for (p=lc->str; p && *p; ) { + n = strchr(p, '|'); + if (n) { + *n++ = 0; + } + volume = (BSR_VOLUME *)malloc(sizeof(BSR_VOLUME)); + memset(volume, 0, sizeof(BSR_VOLUME)); + bstrncpy(volume->VolumeName, p, sizeof(volume->VolumeName)); + /* Add it to the end of the volume chain */ + if (!bsr->volume) { + bsr->volume = volume; + } else { + BSR_VOLUME *bc = bsr->volume; + for ( ;bc->next; bc=bc->next) + { } + bc->next = volume; + } + p = n; + } + return bsr; +} + +static bool is_positioning_ok(BSR *bsr) +{ + /* + * Every bsr should have a volfile entry and a volblock entry + * or a VolAddr + * if we are going to use positioning + */ + for ( ; bsr; bsr=bsr->next) { + if (!((bsr->volfile && bsr->volblock) || bsr->voladdr)) { + return false; + } + } + return true; +} + +static bool is_fast_rejection_ok(BSR *bsr) +{ + /* + * Although, this can be optimized, for the moment, require + * all bsrs to have both sesstime and sessid set before + * we do fast rejection. + */ + for ( ; bsr; bsr=bsr->next) { + if (!(bsr->sesstime && bsr->sessid)) { + return false; + } + } + return true; +} + + +static BSR *store_nothing(LEX *lc, BSR *bsr) +{ + int token; + + token = lex_get_token(lc, T_STRING); + if (token == T_ERROR) { + return NULL; + } + return bsr; +} + + + +/* + * Routine to handle Volume start/end file + */ +static BSR *store_volfile(LEX *lc, BSR *bsr) +{ + int token; + BSR_VOLFILE *volfile; + + for (;;) { + token = lex_get_token(lc, T_PINT32_RANGE); + if (token == T_ERROR) { + return NULL; + } + volfile = (BSR_VOLFILE *)malloc(sizeof(BSR_VOLFILE)); + memset(volfile, 0, sizeof(BSR_VOLFILE)); + volfile->sfile = lc->pint32_val; + volfile->efile = lc->pint32_val2; + /* Add it to the end of the chain */ + if (!bsr->volfile) { + bsr->volfile = volfile; + } else { + /* Add to end of chain */ + BSR_VOLFILE *bs = bsr->volfile; + for ( ;bs->next; bs=bs->next) + { } + bs->next = volfile; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + + +/* + * Routine to handle Volume start/end Block + */ +static BSR *store_volblock(LEX *lc, BSR *bsr) +{ + int token; + BSR_VOLBLOCK *volblock; + + for (;;) { + token = lex_get_token(lc, T_PINT32_RANGE); + if (token == T_ERROR) { + return NULL; + } + volblock = (BSR_VOLBLOCK *)malloc(sizeof(BSR_VOLBLOCK)); + memset(volblock, 0, sizeof(BSR_VOLBLOCK)); + volblock->sblock = lc->pint32_val; + volblock->eblock = lc->pint32_val2; + /* Add it to the end of the chain */ + if (!bsr->volblock) { + bsr->volblock = volblock; + } else { + /* Add to end of chain */ + BSR_VOLBLOCK *bs = bsr->volblock; + for ( ;bs->next; bs=bs->next) + { } + bs->next = volblock; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + +/* + * Routine to handle Volume start/end address + */ +static BSR *store_voladdr(LEX *lc, BSR *bsr) +{ + int token; + BSR_VOLADDR *voladdr; + + for (;;) { + token = lex_get_token(lc, T_PINT64_RANGE); + if (token == T_ERROR) { + return NULL; + } + voladdr = (BSR_VOLADDR *)malloc(sizeof(BSR_VOLADDR)); + memset(voladdr, 0, sizeof(BSR_VOLADDR)); + voladdr->saddr = lc->pint64_val; + voladdr->eaddr = lc->pint64_val2; + /* Add it to the end of the chain */ + if (!bsr->voladdr) { + bsr->voladdr = voladdr; + } else { + /* Add to end of chain */ + BSR_VOLADDR *bs = bsr->voladdr; + for ( ;bs->next; bs=bs->next) + { } + bs->next = voladdr; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + +static BSR *store_sessid(LEX *lc, BSR *bsr) +{ + int token; + BSR_SESSID *sid; + + for (;;) { + token = lex_get_token(lc, T_PINT32_RANGE); + if (token == T_ERROR) { + return NULL; + } + sid = (BSR_SESSID *)malloc(sizeof(BSR_SESSID)); + memset(sid, 0, sizeof(BSR_SESSID)); + sid->sessid = lc->pint32_val; + sid->sessid2 = lc->pint32_val2; + /* Add it to the end of the chain */ + if (!bsr->sessid) { + bsr->sessid = sid; + } else { + /* Add to end of chain */ + BSR_SESSID *bs = bsr->sessid; + for ( ;bs->next; bs=bs->next) + { } + bs->next = sid; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + +static BSR *store_sesstime(LEX *lc, BSR *bsr) +{ + int token; + BSR_SESSTIME *stime; + + for (;;) { + token = lex_get_token(lc, T_PINT32); + if (token == T_ERROR) { + return NULL; + } + stime = (BSR_SESSTIME *)malloc(sizeof(BSR_SESSTIME)); + memset(stime, 0, sizeof(BSR_SESSTIME)); + stime->sesstime = lc->pint32_val; + /* Add it to the end of the chain */ + if (!bsr->sesstime) { + bsr->sesstime = stime; + } else { + /* Add to end of chain */ + BSR_SESSTIME *bs = bsr->sesstime; + for ( ;bs->next; bs=bs->next) + { } + bs->next = stime; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + + +static BSR *store_stream(LEX *lc, BSR *bsr) +{ + int token; + BSR_STREAM *stream; + + for (;;) { + token = lex_get_token(lc, T_INT32); + if (token == T_ERROR) { + return NULL; + } + stream = (BSR_STREAM *)malloc(sizeof(BSR_STREAM)); + memset(stream, 0, sizeof(BSR_STREAM)); + stream->stream = lc->int32_val; + /* Add it to the end of the chain */ + if (!bsr->stream) { + bsr->stream = stream; + } else { + /* Add to end of chain */ + BSR_STREAM *bs = bsr->stream; + for ( ;bs->next; bs=bs->next) + { } + bs->next = stream; + } + token = lex_get_token(lc, T_ALL); + if (token != T_COMMA) { + break; + } + } + return bsr; +} + +static BSR *store_slot(LEX *lc, BSR *bsr) +{ + int token; + + token = lex_get_token(lc, T_PINT32); + if (token == T_ERROR) { + return NULL; + } + if (!bsr->volume) { + Emsg1(M_ERROR,0, _("Slot %d in bsr at inappropriate place.\n"), + lc->pint32_val); + return bsr; + } + bsr->volume->Slot = lc->pint32_val; + scan_to_eol(lc); + return bsr; +} + +static BSR *store_include(LEX *lc, BSR *bsr) +{ + scan_to_eol(lc); + return bsr; +} + +static BSR *store_exclude(LEX *lc, BSR *bsr) +{ + scan_to_eol(lc); + return bsr; +} + +void dump_volfile(BSR_VOLFILE *volfile) +{ + if (volfile) { + Pmsg2(-1, _("VolFile : %u-%u\n"), volfile->sfile, volfile->efile); + dump_volfile(volfile->next); + } +} + +void dump_volblock(BSR_VOLBLOCK *volblock) +{ + if (volblock) { + Pmsg2(-1, _("VolBlock : %u-%u\n"), volblock->sblock, volblock->eblock); + dump_volblock(volblock->next); + } +} + +void dump_voladdr(DEVICE *dev, BSR_VOLADDR *voladdr) +{ + if (voladdr) { + if (dev) { + char ed1[50], ed2[50]; + Pmsg2(-1, _("VolAddr : %s-%llu\n"), + dev->print_addr(ed1, sizeof(ed1), voladdr->saddr), + dev->print_addr(ed2, sizeof(ed2), voladdr->eaddr)); + } else { + Pmsg2(-1, _("VolAddr : %llu-%llu\n"), voladdr->saddr, voladdr->eaddr); + } + dump_voladdr(dev, voladdr->next); + } +} + +void dump_findex(BSR_FINDEX *FileIndex) +{ + if (FileIndex) { + if (FileIndex->findex == FileIndex->findex2) { + Pmsg1(-1, _("FileIndex : %u\n"), FileIndex->findex); + } else { + Pmsg2(-1, _("FileIndex : %u-%u\n"), FileIndex->findex, FileIndex->findex2); + } + dump_findex(FileIndex->next); + } +} + +void dump_jobid(BSR_JOBID *jobid) +{ + if (jobid) { + if (jobid->JobId == jobid->JobId2) { + Pmsg1(-1, _("JobId : %u\n"), jobid->JobId); + } else { + Pmsg2(-1, _("JobId : %u-%u\n"), jobid->JobId, jobid->JobId2); + } + dump_jobid(jobid->next); + } +} + +void dump_sessid(BSR_SESSID *sessid) +{ + if (sessid) { + if (sessid->sessid == sessid->sessid2) { + Pmsg1(-1, _("SessId : %u\n"), sessid->sessid); + } else { + Pmsg2(-1, _("SessId : %u-%u\n"), sessid->sessid, sessid->sessid2); + } + dump_sessid(sessid->next); + } +} + +void dump_volume(BSR_VOLUME *volume) +{ + if (volume) { + Pmsg1(-1, _("VolumeName : %s\n"), volume->VolumeName); + Pmsg1(-1, _(" MediaType : %s\n"), volume->MediaType); + Pmsg1(-1, _(" Device : %s\n"), volume->device); + Pmsg1(-1, _(" Slot : %d\n"), volume->Slot); + dump_volume(volume->next); + } +} + + +void dump_client(BSR_CLIENT *client) +{ + if (client) { + Pmsg1(-1, _("Client : %s\n"), client->ClientName); + dump_client(client->next); + } +} + +void dump_job(BSR_JOB *job) +{ + if (job) { + Pmsg1(-1, _("Job : %s\n"), job->Job); + dump_job(job->next); + } +} + +void dump_sesstime(BSR_SESSTIME *sesstime) +{ + if (sesstime) { + Pmsg1(-1, _("SessTime : %u\n"), sesstime->sesstime); + dump_sesstime(sesstime->next); + } +} + + +void dump_bsr(DEVICE *dev, BSR *bsr, bool recurse) +{ + int64_t save_debug = debug_level; + debug_level = 1; + if (!bsr) { + Pmsg0(-1, _("BSR is NULL\n")); + debug_level = save_debug; + return; + } + Pmsg1(-1, _("Next : 0x%x\n"), bsr->next); + Pmsg1(-1, _("Root bsr : 0x%x\n"), bsr->root); + dump_volume(bsr->volume); + dump_sessid(bsr->sessid); + dump_sesstime(bsr->sesstime); + dump_volfile(bsr->volfile); + dump_volblock(bsr->volblock); + dump_voladdr(dev, bsr->voladdr); + dump_client(bsr->client); + dump_jobid(bsr->JobId); + dump_job(bsr->job); + dump_findex(bsr->FileIndex); + if (bsr->count) { + Pmsg1(-1, _("count : %u\n"), bsr->count); + Pmsg1(-1, _("found : %u\n"), bsr->found); + } + + Pmsg1(-1, _("done : %s\n"), bsr->done?_("yes"):_("no")); + Pmsg1(-1, _("positioning : %d\n"), bsr->use_positioning); + Pmsg1(-1, _("fast_reject : %d\n"), bsr->use_fast_rejection); + if (recurse && bsr->next) { + Pmsg0(-1, "\n"); + dump_bsr(dev, bsr->next, true); + } + debug_level = save_debug; +} + +/********************************************************************* + * + * Free bsr resources + */ + +static void free_bsr_item(BSR *bsr) +{ + BSR *next; + while (bsr) { + next = bsr->next; + free(bsr); + bsr = next; + } +} + +/* + * Remove a single item from the bsr tree + */ +void remove_bsr(BSR *bsr) +{ + free_bsr_item((BSR *)bsr->volume); + free_bsr_item((BSR *)bsr->client); + free_bsr_item((BSR *)bsr->sessid); + free_bsr_item((BSR *)bsr->sesstime); + free_bsr_item((BSR *)bsr->volfile); + free_bsr_item((BSR *)bsr->volblock); + free_bsr_item((BSR *)bsr->voladdr); + free_bsr_item((BSR *)bsr->JobId); + free_bsr_item((BSR *)bsr->job); + free_bsr_item((BSR *)bsr->FileIndex); + free_bsr_item((BSR *)bsr->JobType); + free_bsr_item((BSR *)bsr->JobLevel); + if (bsr->fileregex) { + bfree(bsr->fileregex); + } + if (bsr->fileregex_re) { + regfree(bsr->fileregex_re); + free(bsr->fileregex_re); + } + if (bsr->attr) { + free_attr(bsr->attr); + } + if (bsr->next) { + bsr->next->prev = bsr->prev; + } + if (bsr->prev) { + bsr->prev->next = bsr->next; + } + free(bsr); +} + +/* + * Free all bsrs in chain + */ +void free_bsr(BSR *bsr) +{ + BSR *next_bsr; + while (bsr) { + next_bsr = bsr->next; + /* Remove (free) current bsr */ + remove_bsr(bsr); + /* Now get the next one */ + bsr = next_bsr; + } +} + +/***************************************************************** + * Routines for handling volumes + */ +static VOL_LIST *new_restore_volume() +{ + VOL_LIST *vol; + vol = (VOL_LIST *)malloc(sizeof(VOL_LIST)); + memset(vol, 0, sizeof(VOL_LIST)); + return vol; +} + +/* + * Create a list of Volumes (and Slots and Start positions) to be + * used in the current restore job. + */ +void create_restore_volume_list(JCR *jcr, bool add_to_read_list) +{ + char *p, *n; + VOL_LIST *vol; + + /* + * Build a list of volumes to be processed + */ + jcr->NumReadVolumes = 0; + jcr->CurReadVolume = 0; + if (jcr->bsr) { + BSR *bsr = jcr->bsr; + if (!bsr->volume || !bsr->volume->VolumeName[0]) { + return; + } + for ( ; bsr; bsr=bsr->next) { + BSR_VOLUME *bsrvol; + BSR_VOLFILE *volfile; + uint32_t sfile = UINT32_MAX; + + /* Find minimum start file so that we can forward space to it */ + for (volfile = bsr->volfile; volfile; volfile=volfile->next) { + if (volfile->sfile < sfile) { + sfile = volfile->sfile; + } + } + /* Now add volumes for this bsr */ + for (bsrvol = bsr->volume; bsrvol; bsrvol=bsrvol->next) { + vol = new_restore_volume(); + bstrncpy(vol->VolumeName, bsrvol->VolumeName, sizeof(vol->VolumeName)); + bstrncpy(vol->MediaType, bsrvol->MediaType, sizeof(vol->MediaType)); + bstrncpy(vol->device, bsrvol->device, sizeof(vol->device)); + vol->Slot = bsrvol->Slot; + vol->start_file = sfile; + if (add_restore_volume(jcr, vol, add_to_read_list)) { + jcr->NumReadVolumes++; + Dmsg2(400, "Added volume=%s mediatype=%s\n", vol->VolumeName, + vol->MediaType); + } else { + Dmsg1(400, "Duplicate volume %s\n", vol->VolumeName); + free((char *)vol); + } + sfile = 0; /* start at beginning of second volume */ + } + } + } else { + /* This is the old way -- deprecated */ + for (p = jcr->dcr->VolumeName; p && *p; ) { + n = strchr(p, '|'); /* volume name separator */ + if (n) { + *n++ = 0; /* Terminate name */ + } + vol = new_restore_volume(); + bstrncpy(vol->VolumeName, p, sizeof(vol->VolumeName)); + bstrncpy(vol->MediaType, jcr->dcr->media_type, sizeof(vol->MediaType)); + if (add_restore_volume(jcr, vol, add_to_read_list)) { + jcr->NumReadVolumes++; + } else { + free((char *)vol); + } + p = n; + } + } +} + +/* + * Add current volume to end of list, only if the Volume + * is not already in the list. + * + * returns: 1 if volume added + * 0 if volume already in list + */ +static bool add_restore_volume(JCR *jcr, VOL_LIST *vol, bool add_to_read_list) +{ + VOL_LIST *next = jcr->VolList; + + if (add_to_read_list) { + /* Add volume to volume manager's read list */ + add_read_volume(jcr, vol->VolumeName); + } + + if (!next) { /* list empty ? */ + jcr->VolList = vol; /* yes, add volume */ + } else { + /* Loop through all but last */ + for ( ; next->next; next=next->next) { + if (strcmp(vol->VolumeName, next->VolumeName) == 0) { + /* Save smallest start file */ + if (vol->start_file < next->start_file) { + next->start_file = vol->start_file; + } + return false; /* already in list */ + } + } + /* Check last volume in list */ + if (strcmp(vol->VolumeName, next->VolumeName) == 0) { + if (vol->start_file < next->start_file) { + next->start_file = vol->start_file; + } + return false; /* already in list */ + } + next->next = vol; /* add volume */ + } + return true; +} + +void free_restore_volume_list(JCR *jcr) +{ + VOL_LIST *vol = jcr->VolList; + VOL_LIST *tmp; + + for ( ; vol; ) { + tmp = vol->next; + remove_read_volume(jcr, vol->VolumeName); + free(vol); + vol = tmp; + } + jcr->VolList = NULL; +} + + +/* + * Format a scanner error message + */ +static void s_err(const char *file, int line, LEX *lc, const char *msg, ...) +{ + JCR *jcr = (JCR *)(lc->caller_ctx); + va_list arg_ptr; + char buf[MAXSTRING]; + + va_start(arg_ptr, msg); + bvsnprintf(buf, sizeof(buf), msg, arg_ptr); + va_end(arg_ptr); + + if (jcr) { + Jmsg(jcr, M_FATAL, 0, _("Bootstrap file error: %s\n" +" : Line %d, col %d of file %s\n%s\n"), + buf, lc->line_no, lc->col_no, lc->fname, lc->line); + } else { + e_msg(file, line, M_FATAL, 0, _("Bootstrap file error: %s\n" +" : Line %d, col %d of file %s\n%s\n"), + buf, lc->line_no, lc->col_no, lc->fname, lc->line); + } +} diff --git a/src/stored/protos.h b/src/stored/protos.h new file mode 100644 index 00000000..27edb128 --- /dev/null +++ b/src/stored/protos.h @@ -0,0 +1,298 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Protypes for stored + * + * Written by Kern Sibbald, MM + */ + +/* From stored.c */ +uint32_t new_VolSessionId(); + +/* From acquire.c */ +DCR *acquire_device_for_append(DCR *dcr); +bool acquire_device_for_read(DCR *dcr); +bool release_device(DCR *dcr); +bool clean_device(DCR *dcr); +DCR *new_dcr(JCR *jcr, DCR *dcr, DEVICE *dev, bool writing=true); +void free_dcr(DCR *dcr); + +/* From append.c */ +bool send_attrs_to_dir(JCR *jcr, DEV_RECORD *rec); + +/* From askdir.c */ +enum get_vol_info_rw { + GET_VOL_INFO_FOR_WRITE, + GET_VOL_INFO_FOR_READ +}; +bool dir_get_volume_info(DCR *dcr, const char *VolumeName, enum get_vol_info_rw); +bool dir_find_next_appendable_volume(DCR *dcr); +bool dir_update_volume_info(DCR *dcr, bool label, bool update_LastWritten, + bool use_dcr=false); +bool dir_ask_sysop_to_create_appendable_volume(DCR *dcr); +bool dir_ask_sysop_to_mount_volume(DCR *dcr, bool read_access); +bool dir_update_file_attributes(DCR *dcr, DEV_RECORD *rec); +bool dir_send_job_status(JCR *jcr); +bool dir_create_jobmedia_record(DCR *dcr, bool zero=false); +void create_jobmedia_queue(JCR *jcr); +bool flush_jobmedia_queue(JCR *jcr); +bool dir_update_device(JCR *jcr, DEVICE *dev); +bool dir_update_changer(JCR *jcr, AUTOCHANGER *changer); + +/* class AskDirHandler allows btool's utilities to overwrite the functions above + * and even to overwrite the functions in their own sub-class like in btape + */ +class AskDirHandler +{ + /* this class should be an abstract class */ +public: + AskDirHandler() {} + virtual ~AskDirHandler() {} + virtual bool dir_find_next_appendable_volume(DCR *dcr) { return 1;} + virtual bool dir_update_volume_info(DCR *dcr, bool relabel, bool update_LastWritten, bool use_dcr) { return 1; } + virtual bool dir_create_jobmedia_record(DCR *dcr, bool zero) { return 1; } + virtual bool flush_jobmedia_queue(JCR *jcr) { return true; } + virtual bool dir_ask_sysop_to_create_appendable_volume(DCR *dcr) { return 1; } + virtual bool dir_update_file_attributes(DCR *dcr, DEV_RECORD *rec) { return 1;} + virtual bool dir_send_job_status(JCR *jcr) {return 1;} + virtual bool dir_ask_sysop_to_mount_volume(DCR *dcr, bool writing); + virtual bool dir_get_volume_info(DCR *dcr, const char *VolumeName, enum get_vol_info_rw writing); +}; + +class BtoolsAskDirHandler: public AskDirHandler +{ + /* if AskDirHandler was an abstract class, implement the method here */ +public: + BtoolsAskDirHandler() {} + virtual ~BtoolsAskDirHandler() {} +}; + +/* activate the functions provided by the btool's handler */ +AskDirHandler *init_askdir_handler(AskDirHandler *new_askdir_handler); + +/* authenticate.c */ +bool authenticate_director(JCR *jcr); +int authenticate_filed(JCR *jcr, BSOCK *fd, int FDVersion); +bool authenticate_storagedaemon(JCR *jcr); + +/* From autochanger.c */ +bool init_autochangers(); +int autoload_device(DCR *dcr, bool writing, BSOCK *dir); +bool autochanger_cmd(DCR *dcr, BSOCK *dir, const char *cmd); +bool unload_autochanger(DCR *dcr, int loaded); +bool unload_dev(DCR *dcr, DEVICE *dev); +char *edit_device_codes(DCR *dcr, char *omsg, const char *imsg, const char *cmd); +int get_autochanger_loaded_slot(DCR *dcr); + +/* From block.c */ +void dump_block(DEVICE *dev, DEV_BLOCK *b, const char *msg, bool force=false); +DEV_BLOCK *dup_block(DEV_BLOCK *eblock); +void init_block_write(DEV_BLOCK *block); +void empty_block(DEV_BLOCK *block); +void free_block(DEV_BLOCK *block); +void print_block_read_errors(JCR *jcr, DEV_BLOCK *block); +void ser_block_header(DEV_BLOCK *block); +bool is_block_empty(DEV_BLOCK *block); +bool terminate_writing_volume(DCR *dcr); + +/* From block_util.c */ +bool terminate_writing_volume(DCR *dcr); +bool is_user_volume_size_reached(DCR *dcr, bool quiet); +bool check_for_newvol_or_newfile(DCR *dcr); +bool do_new_file_bookkeeping(DCR *dcr); +void reread_last_block(DCR *dcr); + + +/* From butil.c -- utilities for SD tool programs */ +void setup_me(); +void print_ls_output(const char *fname, const char *link, int type, struct stat *statp); +JCR *setup_jcr(const char *name, char *dev_name, BSR *bsr, + const char *VolumeName, bool writing, bool read_dedup_data=true); +void display_tape_error_status(JCR *jcr, DEVICE *dev); + + +/* From dev.c */ +void sd_list_loaded_drivers(alist *list); +DEVICE *init_dev(JCR *jcr, DEVRES *device, bool adata=false); +bool can_open_mounted_dev(DEVICE *dev); +bool load_dev(DEVICE *dev); +int write_block(DEVICE *dev); +uint32_t status_dev(DEVICE *dev); +void attach_jcr_to_device(DEVICE *dev, JCR *jcr); +void detach_jcr_from_device(DEVICE *dev, JCR *jcr); +JCR *next_attached_jcr(DEVICE *dev, JCR *jcr); +void init_device_wait_timers(DCR *dcr); +void init_jcr_device_wait_timers(JCR *jcr); +bool double_dev_wait_time(DEVICE *dev); + +/* From device.c */ +bool first_open_device(DCR *dcr); +bool fixup_device_block_write_error(DCR *dcr, int retries=4); +void set_start_vol_position(DCR *dcr); +void set_new_volume_parameters(DCR *dcr); +void set_new_file_parameters(DCR *dcr); + +/* From dircmd.c */ +void *handle_connection_request(void *arg); +extern int use_new_match_all; /* Temp variable, to be removed when the new code is tested */ + +/* From fd_cmds.c */ +void run_job(JCR *jcr); +void do_fd_commands(JCR *jcr); +bool do_handle_command(JCR *jcr); +void do_client_commands(JCR *jcr); + +/* From job.c */ +void stored_free_jcr(JCR *jcr); + +/* From hello.c */ +bool validate_dir_hello(JCR* jcr); +bool send_hello_ok(BSOCK *bs); +bool send_sorry(BSOCK *bs); +bool send_hello_sd(JCR *jcr, char *Job); +bool send_hello_client(JCR *jcr, char *Job); +bool read_client_hello(JCR *jcr); +bool is_client_connection(BSOCK *bs); +void handle_client_connection(BSOCK *fd); + +/* From label.c */ +void create_session_label(DCR *dcr, DEV_RECORD *rec, int label); +void create_volume_header(DEVICE *dev, const char *VolName, const char *PoolName, bool no_prelabel); +#define ANSI_VOL_LABEL 0 +#define ANSI_EOF_LABEL 1 +#define ANSI_EOV_LABEL 2 +bool write_ansi_ibm_labels(DCR *dcr, int type, const char *VolName); +int read_ansi_ibm_label(DCR *dcr); +bool write_session_label(DCR *dcr, int label); +int dump_label_record(DEVICE *dev, DEV_RECORD *rec, int verbose, bool check_err); +bool unser_volume_label(DEVICE *dev, DEV_RECORD *rec); +bool unser_session_label(SESSION_LABEL *label, DEV_RECORD *rec); + +/* From locks.c */ +void _lock_device(const char *file, int line, DEVICE *dev); +void _unlock_device(const char *file, int line, DEVICE *dev); +void _block_device(const char *file, int line, DEVICE *dev, int state); +void _unblock_device(const char *file, int line, DEVICE *dev); +void _give_back_device_block(const char *file, int line, DEVICE *dev, bsteal_lock_t *hold); + + +/* From match_bsr.c */ +int match_bsr(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, + SESSION_LABEL *sesrec, JCR *jcr); +int match_bsr_block(BSR *bsr, DEV_BLOCK *block); +void position_bsr_block(BSR *bsr, DEV_BLOCK *block); +BSR *find_next_bsr(BSR *root_bsr, DEVICE *dev); +bool is_this_bsr_done(JCR *jcr, BSR *bsr, DEV_RECORD *rec); +uint64_t get_bsr_start_addr(BSR *bsr); + +/* From mount.c */ +bool mount_next_read_volume(DCR *dcr); + +/* From parse_bsr.c */ +BSR *new_bsr(); +BSR *parse_bsr(JCR *jcr, char *lf); +void dump_bsr(DEVICE *dev, BSR *bsr, bool recurse); +void free_bsr(BSR *bsr); +void free_restore_volume_list(JCR *jcr); +void create_restore_volume_list(JCR *jcr, bool add_to_read_list); + +/* From record.c */ +const char *FI_to_ascii(char *buf, int fi); +const char *stream_to_ascii(char *buf, int stream, int fi); +const char *stream_to_ascii_ex(char *buf, int stream, int fi); +bool write_record_to_block(DCR *dcr, DEV_RECORD *rec); +bool can_write_record_to_block(DEV_BLOCK *block, DEV_RECORD *rec); +bool read_record_from_block(DCR *dcr, DEV_RECORD *rec); +DEV_RECORD *new_record(); +void free_record(DEV_RECORD *rec); +void empty_record(DEV_RECORD *rec); +uint64_t get_record_address(DEV_RECORD *rec); +uint64_t get_record_start_address(DEV_RECORD *rec); +bool flush_adata_to_device(DCR *dcr); + +/* From record_util.c */ +void dump_record(DEV_RECORD *rec); + +/* From read_record.c */ +bool read_records(DCR *dcr, + bool record_cb(DCR *dcr, DEV_RECORD *rec), + bool mount_cb(DCR *dcr)); + +/* From reserve.c */ +void init_reservations_lock(); +void term_reservations_lock(); +void send_drive_reserve_messages(JCR *jcr, void sendit(const char *msg, int len, void *sarg), void *arg); +bool find_suitable_device_for_job(JCR *jcr, RCTX &rctx); +int search_res_for_device(RCTX &rctx); +void release_reserve_messages(JCR *jcr); + +extern int reservations_lock_count; + +/* From status.c */ +void _dbg_list_one_device(DEVICE *dev, const char *f, int l); +#define dbg_list_one_device(x, dev) if (chk_dbglvl(x)) \ + _dbg_list_one_device(dev, __FILE__, __LINE__) + +/* From vol_mgr.c */ +void init_vol_list_lock(); +void term_vol_list_lock(); +VOLRES *reserve_volume(DCR *dcr, const char *VolumeName); +bool free_volume(DEVICE *dev); +bool is_vol_list_empty(); +dlist *dup_vol_list(JCR *jcr); +void free_temp_vol_list(dlist *temp_vol_list); +bool volume_unused(DCR *dcr); +void create_volume_lists(); +void free_volume_lists(); +void list_volumes(void sendit(const char *msg, int len, void *sarg), void *arg); +bool is_volume_in_use(DCR *dcr); +extern int vol_list_lock_count; +void add_read_volume(JCR *jcr, const char *VolumeName); +void remove_read_volume(JCR *jcr, const char *VolumeName); + + +/* From spool.c */ +bool begin_data_spool (DCR *dcr); +bool discard_data_spool (DCR *dcr); +bool commit_data_spool (DCR *dcr); +bool are_attributes_spooled (JCR *jcr); +bool begin_attribute_spool (JCR *jcr); +bool discard_attribute_spool (JCR *jcr); +bool commit_attribute_spool (JCR *jcr); +bool write_block_to_spool_file (DCR *dcr); +void list_spool_stats (void sendit(const char *msg, int len, void *sarg), void *arg); + +/* From tape_alert.c */ +extern void alert_callback(void *ctx, const char *short_msg, + const char *long_msg, char *Volume, + int severity, int flags, int alertno, utime_t alert_time); + +/* From wait.c */ +int wait_for_sysop(DCR *dcr); +bool wait_for_any_device(JCR *jcr, int &retries); +bool wait_for_device(DCR *dcr, int &retries); + +/* stored_conf.c */ +void store_protocol(LEX *lc, RES_ITEM *item, int index, int pass); +void store_uri_style(LEX *lc, RES_ITEM *item, int index, int pass); +void store_truncate(LEX *lc, RES_ITEM *item, int index, int pass); +void store_upload(LEX *lc, RES_ITEM *item, int index, int pass); +void store_devtype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_cloud_driver(LEX *lc, RES_ITEM *item, int index, int pass); +void store_maxblocksize(LEX *lc, RES_ITEM *item, int index, int pass); diff --git a/src/stored/read.c b/src/stored/read.c new file mode 100644 index 00000000..df3f13d2 --- /dev/null +++ b/src/stored/read.c @@ -0,0 +1,288 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Read code for Storage daemon + * + * Kern Sibbald, November MM + * + */ + +#include "bacula.h" +#include "stored.h" + +/* Forward referenced subroutines */ +static bool read_record_cb(DCR *dcr, DEV_RECORD *rec); +static bool mac_record_cb(DCR *dcr, DEV_RECORD *rec); + +/* Responses sent to the File daemon */ +static char OK_data[] = "3000 OK data\n"; +static char FD_error[] = "3000 error\n"; +static char rec_header[] = "rechdr %ld %ld %ld %ld %ld"; + +/* + * Read Data and send to File Daemon + * Returns: false on failure + * true on success + */ +bool do_read_data(JCR *jcr) +{ + BSOCK *fd = jcr->file_bsock; + bool ok = true; + DCR *dcr = jcr->read_dcr; + char ec[50]; + + Dmsg0(100, "Start read data.\n"); + + if (!fd->set_buffer_size(dcr->device->max_network_buffer_size, BNET_SETBUF_WRITE)) { + return false; + } + + if (jcr->NumReadVolumes == 0) { + Jmsg(jcr, M_FATAL, 0, _("No Volume names found for restore.\n")); + fd->fsend(FD_error); + return false; + } + + Dmsg2(200, "Found %d volumes names to restore. First=%s\n", jcr->NumReadVolumes, + jcr->VolList->VolumeName); + + /* Ready device for reading */ + if (!acquire_device_for_read(dcr)) { + fd->fsend(FD_error); + return false; + } + dcr->dev->start_of_job(dcr); + + /* Tell File daemon we will send data */ + if (!jcr->is_ok_data_sent) { + fd->fsend(OK_data); + jcr->is_ok_data_sent = true; + } + + jcr->sendJobStatus(JS_Running); + jcr->run_time = time(NULL); + jcr->JobFiles = 0; + + if (jcr->is_JobType(JT_MIGRATE) || jcr->is_JobType(JT_COPY)) { + ok = read_records(dcr, mac_record_cb, mount_next_read_volume); + } else { + ok = read_records(dcr, read_record_cb, mount_next_read_volume); + } + + /* + * Don't use time_t for job_elapsed as time_t can be 32 or 64 bits, + * and the subsequent Jmsg() editing will break + */ + int32_t job_elapsed = time(NULL) - jcr->run_time; + + if (job_elapsed <= 0) { + job_elapsed = 1; + } + + Jmsg(dcr->jcr, M_INFO, 0, _("Elapsed time=%02d:%02d:%02d, Transfer rate=%s Bytes/second\n"), + job_elapsed / 3600, job_elapsed % 3600 / 60, job_elapsed % 60, + edit_uint64_with_suffix(jcr->JobBytes / job_elapsed, ec)); + + /* Send end of data to FD */ + fd->signal(BNET_EOD); + + if (!release_device(jcr->read_dcr)) { + ok = false; + } + + Dmsg0(30, "Done reading.\n"); + return ok; +} + +static bool read_record_cb(DCR *dcr, DEV_RECORD *rec) +{ + JCR *jcr = dcr->jcr; + BSOCK *fd = jcr->file_bsock; + bool ok = true; + POOLMEM *save_msg; + char ec1[50], ec2[50]; + POOLMEM *wbuf = rec->data; /* send buffer */ + uint32_t wsize = rec->data_len; /* send size */ + + if (rec->FileIndex < 0) { + return true; + } + + Dmsg5(400, "Send to FD: SessId=%u SessTim=%u FI=%s Strm=%s, len=%d\n", + rec->VolSessionId, rec->VolSessionTime, + FI_to_ascii(ec1, rec->FileIndex), + stream_to_ascii(ec2, rec->Stream, rec->FileIndex), + wsize); + + Dmsg2(640, ">filed: send header stream=0x%lx len=%ld\n", rec->Stream, wsize); + /* Send record header to File daemon */ + if (!fd->fsend(rec_header, rec->VolSessionId, rec->VolSessionTime, + rec->FileIndex, rec->Stream, wsize)) { + Pmsg1(000, _(">filed: Error Hdr=%s\n"), fd->msg); + Jmsg1(jcr, M_FATAL, 0, _("Error sending header to Client. ERR=%s\n"), + fd->bstrerror()); + return false; + } + /* + * For normal migration jobs, FileIndex values are sequential because + * we are dealing with one job. However, for Vbackup (consolidation), + * we will be getting records from multiple jobs and writing them back + * out, so we need to ensure that the output FileIndex is sequential. + * We do so by detecting a FileIndex change and incrementing the + * JobFiles, which we then use as the output FileIndex. + */ + if (rec->FileIndex >= 0) { + /* If something changed, increment FileIndex */ + if (rec->VolSessionId != rec->last_VolSessionId || + rec->VolSessionTime != rec->last_VolSessionTime || + rec->FileIndex != rec->last_FileIndex) { + jcr->JobFiles++; + rec->last_VolSessionId = rec->VolSessionId; + rec->last_VolSessionTime = rec->VolSessionTime; + rec->last_FileIndex = rec->FileIndex; + } + } + + /* Debug code: check if we must hangup or blowup */ + if (handle_hangup_blowup(jcr, jcr->JobFiles, jcr->JobBytes)) { + return false; + } + + save_msg = fd->msg; /* save fd message pointer */ + fd->msg = wbuf; + fd->msglen = wsize; + /* Send data record to File daemon */ + jcr->JobBytes += wsize; /* increment bytes this job */ + Dmsg1(640, ">filed: send %d bytes data.\n", fd->msglen); + if (!fd->send()) { + Pmsg1(000, _("Error sending to FD. ERR=%s\n"), fd->bstrerror()); + Jmsg1(jcr, M_FATAL, 0, _("Error sending data to Client. ERR=%s\n"), + fd->bstrerror()); + ok = false; + } + fd->msg = save_msg; + return ok; +} + +/* + * New routine after to SD->SD implementation + * Called here for each record from read_records() + * Returns: true if OK + * false if error + */ +static bool mac_record_cb(DCR *dcr, DEV_RECORD *rec) +{ + JCR *jcr = dcr->jcr; + BSOCK *fd = jcr->file_bsock; + char buf1[100], buf2[100]; + bool new_header = false; + POOLMEM *save_msg; + char ec1[50], ec2[50]; + bool ok = true; + POOLMEM *wbuf = rec->data;; /* send buffer */ + uint32_t wsize = rec->data_len; /* send size */ + +#ifdef xxx + Pmsg5(000, "on entry JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", + jcr->JobId, + FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); +#endif + + /* If label and not for us, discard it */ + if (rec->FileIndex < 0) { + Dmsg1(100, "FileIndex=%d\n", rec->FileIndex); + return true; + } + + /* + * For normal migration jobs, FileIndex values are sequential because + * we are dealing with one job. However, for Vbackup (consolidation), + * we will be getting records from multiple jobs and writing them back + * out, so we need to ensure that the output FileIndex is sequential. + * We do so by detecting a FileIndex change and incrementing the + * JobFiles, which we then use as the output FileIndex. + */ + if (rec->FileIndex >= 0) { + /* If something changed, increment FileIndex */ + if (rec->VolSessionId != rec->last_VolSessionId || + rec->VolSessionTime != rec->last_VolSessionTime || + rec->FileIndex != rec->last_FileIndex || + rec->Stream != rec->last_Stream) { + + /* Something changed */ + if (rec->last_VolSessionId != 0) { /* Not first record */ + Dmsg1(200, "Send EOD jobfiles=%d\n", jcr->JobFiles); + if (!fd->signal(BNET_EOD)) { /* End of previous stream */ + Jmsg(jcr, M_FATAL, 0, _("Error sending to File daemon. ERR=%s\n"), + fd->bstrerror()); + return false; + } + } + new_header = true; + if (rec->FileIndex != rec->last_FileIndex) { + jcr->JobFiles++; + } + rec->last_VolSessionId = rec->VolSessionId; + rec->last_VolSessionTime = rec->VolSessionTime; + rec->last_FileIndex = rec->FileIndex; + rec->last_Stream = rec->Stream; + } + rec->FileIndex = jcr->JobFiles; /* set sequential output FileIndex */ + } + + if (new_header) { + new_header = false; + Dmsg5(400, "Send header to FD: SessId=%u SessTim=%u FI=%s Strm=%s, len=%ld\n", + rec->VolSessionId, rec->VolSessionTime, + FI_to_ascii(ec1, rec->FileIndex), + stream_to_ascii(ec2, rec->Stream, rec->FileIndex), + wsize); + + /* Send data header to File daemon */ + if (!fd->fsend("%ld %ld %ld", rec->FileIndex, rec->Stream, wsize)) { + Pmsg1(000, _(">filed: Error Hdr=%s\n"), fd->msg); + Jmsg1(jcr, M_FATAL, 0, _("Error sending to File daemon. ERR=%s\n"), + fd->bstrerror()); + return false; + } + } + + Dmsg1(400, "FI=%d\n", rec->FileIndex); + /* Send data record to File daemon */ + save_msg = fd->msg; /* save fd message pointer */ + fd->msg = wbuf; /* pass data directly to the FD */ + fd->msglen = wsize; + jcr->JobBytes += wsize; /* increment bytes this job */ + Dmsg1(400, ">filed: send %d bytes data.\n", fd->msglen); + if (!fd->send()) { + Pmsg1(000, _("Error sending to FD. ERR=%s\n"), fd->bstrerror()); + Jmsg1(jcr, M_FATAL, 0, _("Error sending to File daemon. ERR=%s\n"), + fd->bstrerror()); + ok = false; + } + fd->msg = save_msg; /* restore fd message pointer */ + + Dmsg5(500, "wrote_record JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", + jcr->JobId, + FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); + + return ok; +} diff --git a/src/stored/read_records.c b/src/stored/read_records.c new file mode 100644 index 00000000..6ec305cb --- /dev/null +++ b/src/stored/read_records.c @@ -0,0 +1,518 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * This routine provides a routine that will handle all + * the gory little details of reading a record from a Bacula + * archive. It uses a callback to pass you each record in turn, + * as well as a callback for mounting the next tape. It takes + * care of reading blocks, applying the bsr, ... + * Note, this routine is really the heart of the restore routines, + * and we are *really* bit pushing here so be careful about making + * any modifications. + * + * Kern E. Sibbald, August MMII + * + */ + +#include "bacula.h" +#include "stored.h" + +/* Forward referenced functions */ +static BSR *position_to_first_file(JCR *jcr, DCR *dcr, BSR *bsr); +static void handle_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec); +static bool try_repositioning(JCR *jcr, DEV_RECORD *rec, DCR *dcr); +#ifdef DEBUG +static char *rec_state_bits_to_str(DEV_RECORD *rec); +#endif + +static const int dbglvl = 150; +static const int no_FileIndex = -999999; +static bool mount_next_vol(JCR *jcr, DCR *dcr, BSR *bsr, + SESSION_LABEL *sessrec, bool *should_stop, + bool record_cb(DCR *dcr, DEV_RECORD *rec), + bool mount_cb(DCR *dcr)) +{ + bool ok = true; + DEVICE *dev = dcr->dev; + *should_stop = false; + + /* We need an other volume */ + volume_unused(dcr); /* mark volume unused */ + if (!mount_cb(dcr)) { + *should_stop = true; + /* + * Create EOT Label so that Media record may + * be properly updated because this is the last + * tape. + */ + DEV_RECORD *trec = new_record(); + trec->FileIndex = EOT_LABEL; + trec->Addr = dev->get_full_addr(); + ok = record_cb(dcr, trec); + free_record(trec); + if (jcr->mount_next_volume) { + jcr->mount_next_volume = false; + dev->clear_eot(); + } + return ok; + } + jcr->mount_next_volume = false; + /* + * The Device can change at the end of a tape, so refresh it + * from the dcr. + */ + dev = dcr->dev; + /* + * We just have a new tape up, now read the label (first record) + * and pass it off to the callback routine, then continue + * most likely reading the previous record. + */ + dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK); + + DEV_RECORD *trec = new_record(); + read_record_from_block(dcr, trec); + handle_session_record(dev, trec, sessrec); + ok = record_cb(dcr, trec); + free_record(trec); + position_to_first_file(jcr, dcr, bsr); /* We jump to the specified position */ + return ok; +} + +/* + * This subroutine reads all the records and passes them back to your + * callback routine (also mount routine at EOM). + * You must not change any values in the DEV_RECORD packet + */ +bool read_records(DCR *dcr, + bool record_cb(DCR *dcr, DEV_RECORD *rec), + bool mount_cb(DCR *dcr)) +{ + JCR *jcr = dcr->jcr; + DEVICE *dev = dcr->dev; + DEV_BLOCK *block = dcr->block; + DEV_RECORD *rec = NULL; + uint32_t record; + int32_t lastFileIndex; + bool ok = true; + bool done = false; + bool should_stop; + SESSION_LABEL sessrec; + dlist *recs; /* linked list of rec packets open */ + char ed1[50]; + bool first_block = true; + + recs = New(dlist(rec, &rec->link)); + /* We go to the first_file unless we need to reposition during an + * interactive restore session (the reposition will be done with a different + * BSR in the for loop */ + position_to_first_file(jcr, dcr, jcr->bsr); + jcr->mount_next_volume = false; + + for ( ; ok && !done; ) { + if (job_canceled(jcr)) { + ok = false; + break; + } + ASSERT2(!dcr->dev->adata, "Called with adata block. Wrong!"); + + + if (! first_block || dev->dev_type != B_FIFO_DEV ) { + if (dev->at_eot() || !dcr->read_block_from_device(CHECK_BLOCK_NUMBERS)) { + if (dev->at_eot()) { + Jmsg(jcr, M_INFO, 0, + _("End of Volume \"%s\" at addr=%s on device %s.\n"), + dcr->VolumeName, + dev->print_addr(ed1, sizeof(ed1), dev->EndAddr), + dev->print_name()); + ok = mount_next_vol(jcr, dcr, jcr->bsr, &sessrec, &should_stop, + record_cb, mount_cb); + /* Might have changed after the mount request */ + dev = dcr->dev; + block = dcr->block; + if (should_stop) { + break; + } + continue; + + } else if (dev->at_eof()) { + Dmsg3(200, "EOF at addr=%s on device %s, Volume \"%s\"\n", + dev->print_addr(ed1, sizeof(ed1), dev->EndAddr), + dev->print_name(), dcr->VolumeName); + continue; + } else if (dev->is_short_block()) { + Jmsg1(jcr, M_ERROR, 0, "%s", dev->errmsg); + continue; + } else { + /* I/O error or strange end of tape */ + display_tape_error_status(jcr, dev); + if (forge_on || jcr->ignore_label_errors) { + dev->fsr(1); /* try skipping bad record */ + Pmsg0(000, _("Did fsr in attempt to skip bad record.\n")); + continue; /* try to continue */ + } + ok = false; /* stop everything */ + break; + } + } + Dmsg1(dbglvl, "Read new block at pos=%s\n", dev->print_addr(ed1, sizeof(ed1))); + } + first_block = false; +#ifdef if_and_when_FAST_BLOCK_REJECTION_is_working + /* this does not stop when file/block are too big */ + if (!match_bsr_block(jcr->bsr, block)) { + if (try_repositioning(jcr, rec, dcr)) { + break; /* get next volume */ + } + continue; /* skip this record */ + } +#endif + /* + * Get a new record for each Job as defined by + * VolSessionId and VolSessionTime + */ + bool found = false; + foreach_dlist(rec, recs) { + if (rec->VolSessionId == block->VolSessionId && + rec->VolSessionTime == block->VolSessionTime) { + /* When the previous Block of the current record is not correctly ordered, + * if we concat the previous record to the next one, the result is probably + * incorrect. At least the vacuum command should not use this kind of record + */ + if (rec->remainder) { + if (rec->BlockNumber != (block->BlockNumber - 1) + && + rec->BlockNumber != block->BlockNumber) + { + Dmsg3(0, "invalid: rec=%ld block=%ld state=%s\n", + rec->BlockNumber, block->BlockNumber, rec_state_bits_to_str(rec)); + rec->invalid = true; + /* We can discard the current data if needed. The code is very + * tricky in the read_records loop, so it's better to not + * introduce new subtle errors. + */ + if (dcr->discard_invalid_records) { + empty_record(rec); + } + } + } + found = true; + break; + } + } + if (!found) { + rec = new_record(); + recs->prepend(rec); + Dmsg3(dbglvl, "New record for state=%s SI=%d ST=%d\n", + rec_state_bits_to_str(rec), + block->VolSessionId, block->VolSessionTime); + } + Dmsg4(dbglvl, "Before read rec loop. stat=%s blk=%d rem=%d invalid=%d\n", + rec_state_bits_to_str(rec), block->BlockNumber, rec->remainder, rec->invalid); + record = 0; + rec->state_bits = 0; + rec->BlockNumber = block->BlockNumber; + lastFileIndex = no_FileIndex; + Dmsg1(dbglvl, "Block %s empty\n", is_block_marked_empty(rec)?"is":"NOT"); + for (rec->state_bits=0; ok && !is_block_marked_empty(rec); ) { + if (!read_record_from_block(dcr, rec)) { + Dmsg3(200, "!read-break. state_bits=%s blk=%d rem=%d\n", rec_state_bits_to_str(rec), + block->BlockNumber, rec->remainder); + break; + } + Dmsg5(dbglvl, "read-OK. state_bits=%s blk=%d rem=%d volume:addr=%s:%llu\n", + rec_state_bits_to_str(rec), block->BlockNumber, rec->remainder, + NPRT(rec->VolumeName), rec->Addr); + /* + * At this point, we have at least a record header. + * Now decide if we want this record or not, but remember + * before accessing the record, we may need to read again to + * get all the data. + */ + record++; + Dmsg6(dbglvl, "recno=%d state_bits=%s blk=%d SI=%d ST=%d FI=%d\n", record, + rec_state_bits_to_str(rec), block->BlockNumber, + rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); + + if (rec->FileIndex == EOM_LABEL) { /* end of tape? */ + Dmsg0(40, "Get EOM LABEL\n"); + break; /* yes, get out */ + } + + /* Some sort of label? */ + if (rec->FileIndex < 0) { + handle_session_record(dev, rec, &sessrec); + if (jcr->bsr) { + /* We just check block FI and FT not FileIndex */ + rec->match_stat = match_bsr_block(jcr->bsr, block); + } else { + rec->match_stat = 0; + } + if (rec->invalid) { + Dmsg5(0, "The record %d in block %ld SI=%ld ST=%ld FI=%ld was marked as invalid\n", + rec->RecNum, rec->BlockNumber, rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); + } + //ASSERTD(!rec->invalid || dcr->discard_invalid_records, "Got an invalid record"); + /* + * Note, we pass *all* labels to the callback routine. If + * he wants to know if they matched the bsr, then he must + * check the match_stat in the record */ + ok = record_cb(dcr, rec); + rec->invalid = false; /* The record was maybe invalid, but the next one is probably good */ +#ifdef xxx + /* + * If this is the end of the Session (EOS) for this record + * we can remove the record. Note, there is a separate + * record to read each session. If a new session is seen + * a new record will be created at approx line 157 above. + * However, it seg faults in the for line at lineno 196. + */ + if (rec->FileIndex == EOS_LABEL) { + Dmsg2(dbglvl, "Remove EOS rec. SI=%d ST=%d\n", rec->VolSessionId, + rec->VolSessionTime); + recs->remove(rec); + free_record(rec); + } +#endif + continue; + } /* end if label record */ + + /* + * Apply BSR filter + */ + if (jcr->bsr) { + rec->match_stat = match_bsr(jcr->bsr, rec, &dev->VolHdr, &sessrec, jcr); + Dmsg2(dbglvl, "match_bsr=%d bsr->reposition=%d\n", rec->match_stat, + jcr->bsr->reposition); + if (rec->match_stat == -1) { /* no more possible matches */ + done = true; /* all items found, stop */ + Dmsg1(dbglvl, "All done Addr=%s\n", dev->print_addr(ed1, sizeof(ed1))); + break; + } else if (rec->match_stat == 0) { /* no match */ + Dmsg3(dbglvl, "BSR no match: clear rem=%d FI=%d before set_eof pos %s\n", + rec->remainder, rec->FileIndex, dev->print_addr(ed1, sizeof(ed1))); + rec->remainder = 0; + rec->state_bits &= ~REC_PARTIAL_RECORD; + if (try_repositioning(jcr, rec, dcr)) { + break; /* We moved on the volume, read next block */ + } + continue; /* we don't want record, read next one */ + } + } + dcr->VolLastIndex = rec->FileIndex; /* let caller know where we are */ + if (is_partial_record(rec)) { + Dmsg6(dbglvl, "Partial, break. recno=%d state_bits=%s blk=%d SI=%d ST=%d FI=%d\n", record, + rec_state_bits_to_str(rec), block->BlockNumber, + rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); + break; /* read second part of record */ + } + + Dmsg6(dbglvl, "OK callback. recno=%d state_bits=%s blk=%d SI=%d ST=%d FI=%d\n", record, + rec_state_bits_to_str(rec), block->BlockNumber, + rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); + if (lastFileIndex != no_FileIndex && lastFileIndex != rec->FileIndex) { + if (is_this_bsr_done(jcr, jcr->bsr, rec) && try_repositioning(jcr, rec, dcr)) { + Dmsg1(dbglvl, "This bsr done, break pos %s\n", + dev->print_addr(ed1, sizeof(ed1))); + break; + } + Dmsg2(dbglvl, "==== inside LastIndex=%d FileIndex=%d\n", lastFileIndex, rec->FileIndex); + } + Dmsg2(dbglvl, "==== LastIndex=%d FileIndex=%d\n", lastFileIndex, rec->FileIndex); + lastFileIndex = rec->FileIndex; + if (rec->invalid) { + Dmsg5(0, "The record %d in block %ld SI=%ld ST=%ld FI=%ld was marked as invalid\n", + rec->RecNum, rec->BlockNumber, rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); + } + //ASSERTD(!rec->invalid || dcr->discard_invalid_records, "Got an invalid record"); + ok = record_cb(dcr, rec); + rec->invalid = false; /* The record was maybe invalid, but the next one is probably good */ +#if 0 + /* + * If we have a digest stream, we check to see if we have + * finished the current bsr, and if so, repositioning will + * be turned on. + */ + if (crypto_digest_stream_type(rec->Stream) != CRYPTO_DIGEST_NONE) { + Dmsg3(dbglvl, "=== Have digest FI=%u before bsr check pos %s\n", rec->FileIndex, + dev->print_addr(ed1, sizeof(ed1)); + if (is_this_bsr_done(jcr, jcr->bsr, rec) && try_repositioning(jcr, rec, dcr)) { + Dmsg1(dbglvl, "==== BSR done at FI=%d\n", rec->FileIndex); + Dmsg1(dbglvl, "This bsr done, break pos=%s\n", + dev->print_addr(ed1, sizeof(ed1))); + break; + } + Dmsg2(900, "After is_bsr_done pos %s\n", dev->print_addr(ed1, sizeof(ed1)); + } +#endif + } /* end for loop over records */ + Dmsg1(dbglvl, "After end recs in block. pos=%s\n", dev->print_addr(ed1, sizeof(ed1))); + } /* end for loop over blocks */ + + /* Walk down list and free all remaining allocated recs */ + while (!recs->empty()) { + rec = (DEV_RECORD *)recs->first(); + recs->remove(rec); + free_record(rec); + } + delete recs; + print_block_read_errors(jcr, block); + return ok; +} + +/* + * See if we can reposition. + * Returns: true if at end of volume + * false otherwise + */ +static bool try_repositioning(JCR *jcr, DEV_RECORD *rec, DCR *dcr) +{ + BSR *bsr; + DEVICE *dev = dcr->dev; + char ed1[50]; + + bsr = find_next_bsr(jcr->bsr, dev); + Dmsg2(dbglvl, "nextbsr=%p mount_next_volume=%d\n", bsr, jcr->bsr->mount_next_volume); + if (bsr == NULL && jcr->bsr->mount_next_volume) { + Dmsg0(dbglvl, "Would mount next volume here\n"); + Dmsg1(dbglvl, "Current position Addr=%s\n", + dev->print_addr(ed1, sizeof(ed1))); + jcr->bsr->mount_next_volume = false; + if (!dev->at_eot()) { + /* Set EOT flag to force mount of next Volume */ + jcr->mount_next_volume = true; + dev->set_eot(); + } + rec->Addr = 0; + return true; + } + if (bsr) { + /* + * ***FIXME*** gross kludge to make disk seeking work. Remove + * when find_next_bsr() is fixed not to return a bsr already + * completed. + */ + uint64_t dev_addr = dev->get_full_addr(); + uint64_t bsr_addr = get_bsr_start_addr(bsr); + + /* Do not position backwards */ + if (dev_addr > bsr_addr) { + return false; + } + Dmsg2(dbglvl, "Try_Reposition from addr=%llu to %llu\n", + dev_addr, bsr_addr); + dev->reposition(dcr, bsr_addr); + rec->Addr = 0; + return true; /* We want the next block */ + } + return false; +} + +/* + * Position to the first file on this volume + */ +static BSR *position_to_first_file(JCR *jcr, DCR *dcr, BSR *bsr) +{ + DEVICE *dev = dcr->dev; + uint64_t bsr_addr; + char ed1[50], ed2[50]; + + Enter(150); + /* + * Now find and position to first file and block + * on this tape. + */ + if (bsr) { + bsr->reposition = true; /* force repositioning */ + bsr = find_next_bsr(bsr, dev); + + if ((bsr_addr=get_bsr_start_addr(bsr)) > 0) { + Jmsg(jcr, M_INFO, 0, _("Forward spacing Volume \"%s\" to addr=%s\n"), + dev->VolHdr.VolumeName, dev->print_addr(ed1, sizeof(ed1), bsr_addr)); + dev->clear_eot(); /* TODO: See where to put this clear() exactly */ + Dmsg2(dbglvl, "pos_to_first_file from addr=%s to %s\n", + dev->print_addr(ed1, sizeof(ed1)), + dev->print_addr(ed2, sizeof(ed2), bsr_addr)); + dev->reposition(dcr, bsr_addr); + } + } + Leave(150); + return bsr; +} + + +static void handle_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec) +{ + const char *rtype; + char buf[100]; + + memset(sessrec, 0, sizeof(SESSION_LABEL)); + switch (rec->FileIndex) { + case PRE_LABEL: + rtype = _("Fresh Volume Label"); + break; + case VOL_LABEL: + rtype = _("Volume Label"); + unser_volume_label(dev, rec); + break; + case SOS_LABEL: + rtype = _("Begin Session"); + unser_session_label(sessrec, rec); + break; + case EOS_LABEL: + rtype = _("End Session"); + break; + case EOM_LABEL: + rtype = _("End of Media"); + break; + default: + bsnprintf(buf, sizeof(buf), _("Unknown code %d\n"), rec->FileIndex); + rtype = buf; + break; + } + Dmsg5(dbglvl, _("%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n"), + rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); +} + +#ifdef DEBUG +static char *rec_state_bits_to_str(DEV_RECORD *rec) +{ + static char buf[200]; + buf[0] = 0; + if (rec->state_bits & REC_NO_HEADER) { + bstrncat(buf, "Nohdr,", sizeof(buf)); + } + if (is_partial_record(rec)) { + bstrncat(buf, "partial,", sizeof(buf)); + } + if (rec->state_bits & REC_BLOCK_EMPTY) { + bstrncat(buf, "empty,", sizeof(buf)); + } + if (rec->state_bits & REC_NO_MATCH) { + bstrncat(buf, "Nomatch,", sizeof(buf)); + } + if (rec->state_bits & REC_CONTINUATION) { + bstrncat(buf, "cont,", sizeof(buf)); + } + if (buf[0]) { + buf[strlen(buf)-1] = 0; + } + return buf; +} +#endif diff --git a/src/stored/record.h b/src/stored/record.h new file mode 100644 index 00000000..b0aa0dc8 --- /dev/null +++ b/src/stored/record.h @@ -0,0 +1,258 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Record, and label definitions for Bacula + * media data format. + * + * Kern Sibbald, MM + * + */ + + +#ifndef __RECORD_H +#define __RECORD_H 1 + +/* Return codes from read_device_volume_label() */ +enum { + VOL_NOT_READ = 1, /* Volume label not read */ + VOL_OK = 2, /* volume name OK */ + VOL_NO_LABEL = 3, /* volume not labeled */ + VOL_IO_ERROR = 4, /* volume I/O error */ + VOL_NAME_ERROR = 5, /* Volume name mismatch */ + VOL_CREATE_ERROR = 6, /* Error creating label */ + VOL_VERSION_ERROR = 7, /* Bacula version error */ + VOL_LABEL_ERROR = 8, /* Bad label type */ + VOL_NO_MEDIA = 9, /* Hard error -- no media present */ + VOL_TYPE_ERROR = 10 /* Volume type (aligned/non-aligned) error */ +}; + +enum rec_state { + st_none, /* No state */ + st_header, /* Write header */ + st_cont_header, /* Write continuation header */ + st_data, /* Write data record */ + st_adata_blkhdr, /* Adata block header */ + st_adata_rechdr, /* Adata record header */ + st_cont_adata_rechdr, /* Adata continuation rechdr */ + st_adata, /* Write aligned data */ + st_cont_adata, /* Write more aligned data */ + st_adata_label /* Writing adata vol label */ +}; + + +/* See block.h for RECHDR_LENGTH */ + +/* + * This is the Media structure for a record header. + * NB: when it is written it is serialized. + + uint32_t VolSessionId; + uint32_t VolSessionTime; + + * The above 8 bytes are only written in a BB01 block, BB02 + * and later blocks contain these values in the block header + * rather than the record header. + + int32_t FileIndex; + int32_t Stream; + uint32_t data_len; + + */ + +/* Record state bit definitions */ +#define REC_NO_HEADER (1<<0) /* No header read */ +#define REC_PARTIAL_RECORD (1<<1) /* returning partial record */ +#define REC_BLOCK_EMPTY (1<<2) /* Not enough data in block */ +#define REC_NO_MATCH (1<<3) /* No match on continuation data */ +#define REC_CONTINUATION (1<<4) /* Continuation record found */ +#define REC_ISTAPE (1<<5) /* Set if device is tape */ +#define REC_ADATA_EMPTY (1<<6) /* Not endough adata in block */ +#define REC_NO_SPLIT (1<<7) /* Do not split this record */ + +#define is_partial_record(r) ((r)->state_bits & REC_PARTIAL_RECORD) +#define is_block_marked_empty(r) ((r)->state_bits & (REC_BLOCK_EMPTY|REC_ADATA_EMPTY)) + +/* + * DEV_RECORD for reading and writing records. + * It consists of a Record Header, and the Record Data + * + * This is the memory structure for the record header. + */ +struct BSR; /* satisfy forward reference */ +struct VOL_LIST; +struct DEV_RECORD { + dlink link; /* link for chaining in read_record.c */ + /* File and Block are always returned during reading + * and writing records. + */ + uint64_t StreamLen; /* Expected data stream length */ + uint64_t FileOffset; /* Offset of this record inside the file */ + uint64_t StartAddr; /* Start address (when the record is partial) */ + uint64_t Addr; /* Record address */ + uint32_t VolSessionId; /* sequential id within this session */ + uint32_t VolSessionTime; /* session start time */ + int32_t FileIndex; /* sequential file number */ + int32_t Stream; /* Full Stream number with high bits */ + int32_t last_FI; /* previous fi for adata */ + int32_t last_Stream; /* previous stream for adata */ + int32_t maskedStream; /* Masked Stream without high bits */ + uint32_t data_len; /* current record length */ + uint32_t remainder; /* remaining bytes to read/write */ + uint32_t adata_remainder; /* remaining adata bytes to read/write */ + uint32_t remlen; /* temp remainder bytes */ + uint32_t data_bytes; /* data_bytes */ + uint32_t state_bits; /* state bits */ + uint32_t RecNum; /* Record number in the block */ + uint32_t BlockNumber; /* Block number for this record (used in read_records()) */ + bool invalid; /* The record may be invalid if it was merged with a previous record */ + rec_state wstate; /* state of write_record_to_block */ + rec_state rstate; /* state of read_record_from_block */ + BSR *bsr; /* pointer to bsr that matched */ + POOLMEM *data; /* Record data. This MUST be a memory pool item */ + const char *VolumeName; /* From JCR::VolList::VolumeName, freed at the end */ + int32_t match_stat; /* bsr match status */ + uint32_t last_VolSessionId; /* used in sequencing FI for Vbackup */ + uint32_t last_VolSessionTime; + int32_t last_FileIndex; +}; + + +/* + * Values for LabelType that are put into the FileIndex field + * Note, these values are negative to distinguish them + * from user records where the FileIndex is forced positive. + */ +#define PRE_LABEL -1 /* Vol label on unwritten tape */ +#define VOL_LABEL -2 /* Volume label first file */ +#define EOM_LABEL -3 /* Writen at end of tape */ +#define SOS_LABEL -4 /* Start of Session */ +#define EOS_LABEL -5 /* End of Session */ +#define EOT_LABEL -6 /* End of physical tape (2 eofs) */ +#define SOB_LABEL -7 /* Start of object -- file/directory */ +#define EOB_LABEL -8 /* End of object (after all streams) */ + +/* + * Volume Label Record. This is the in-memory definition. The + * tape definition is defined in the serialization code itself + * ser_volume_label() and unser_volume_label() and is slightly different. + */ + + +struct Volume_Label { + /* + * The first items in this structure are saved + * in the DEVICE buffer, but are not actually written + * to the tape. + */ + int32_t LabelType; /* This is written in header only */ + uint32_t LabelSize; /* length of serialized label */ + /* + * The items below this line are stored on + * the tape + */ + char Id[32]; /* Bacula Immortal ... */ + + uint32_t VerNum; /* Label version number */ + + /* VerNum <= 10 */ + float64_t label_date; /* Date tape labeled */ + float64_t label_time; /* Time tape labeled */ + + /* VerNum >= 11 */ + btime_t label_btime; /* tdate tape labeled */ + btime_t write_btime; /* tdate tape written */ + + /* Unused with VerNum >= 11 */ + float64_t write_date; /* Date this label written */ + float64_t write_time; /* Time this label written */ + + char VolumeName[MAX_NAME_LENGTH]; /* Volume name */ + char PrevVolumeName[MAX_NAME_LENGTH]; /* Previous Volume Name */ + char PoolName[MAX_NAME_LENGTH]; /* Pool name */ + char PoolType[MAX_NAME_LENGTH]; /* Pool type */ + char MediaType[MAX_NAME_LENGTH]; /* Type of this media */ + + char HostName[MAX_NAME_LENGTH]; /* Host name of writing computer */ + char LabelProg[50]; /* Label program name */ + char ProgVersion[50]; /* Program version */ + char ProgDate[50]; /* Program build date/time */ + + /* Mostly for aligned volumes, BlockSize also used for dedup volumes */ + char AlignedVolumeName[MAX_NAME_LENGTH+4]; /* Aligned block volume name */ + uint64_t FirstData; /* Offset to first data address */ + uint32_t FileAlignment; /* File alignment factor */ + uint32_t PaddingSize; /* Block padding */ + uint32_t BlockSize; /* Basic block size */ + + /* For Cloud */ + uint64_t MaxPartSize; /* Maximum Part Size */ + +}; + +#define SER_LENGTH_Volume_Label 1024 /* max serialised length of volume label */ +#define SER_LENGTH_Session_Label 1024 /* max serialised length of session label */ + +typedef struct Volume_Label VOLUME_LABEL; + +/* + * Session Start/End Label + * This record is at the beginning and end of each session + */ +struct Session_Label { + char Id[32]; /* Bacula Immortal ... */ + + uint32_t VerNum; /* Label version number */ + + uint32_t JobId; /* Job id */ + uint32_t VolumeIndex; /* Sequence no of volume for this job */ + + /* VerNum >= 11 */ + btime_t write_btime; /* Tdate this label written */ + + /* VerNum < 11 */ + float64_t write_date; /* Date this label written */ + + /* Unused VerNum >= 11 */ + float64_t write_time; /* Time this label written */ + + char PoolName[MAX_NAME_LENGTH]; /* Pool name */ + char PoolType[MAX_NAME_LENGTH]; /* Pool type */ + char JobName[MAX_NAME_LENGTH]; /* base Job name */ + char ClientName[MAX_NAME_LENGTH]; + char Job[MAX_NAME_LENGTH]; /* Unique name of this Job */ + char FileSetName[MAX_NAME_LENGTH]; + char FileSetMD5[MAX_NAME_LENGTH]; + uint32_t JobType; + uint32_t JobLevel; + /* The remainder are part of EOS label only */ + uint32_t JobFiles; + uint64_t JobBytes; + uint32_t StartBlock; + uint32_t EndBlock; + uint32_t StartFile; + uint32_t EndFile; + uint32_t JobErrors; + uint32_t JobStatus; /* Job status */ + +}; +typedef struct Session_Label SESSION_LABEL; + +#define SERIAL_BUFSIZE 1024 /* volume serialisation buffer size */ + +#endif diff --git a/src/stored/record_read.c b/src/stored/record_read.c new file mode 100644 index 00000000..3fd1f044 --- /dev/null +++ b/src/stored/record_read.c @@ -0,0 +1,323 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * record_read.c -- Volume (tape/disk) record read functions + * + * Kern Sibbald, April MMI + * added BB02 format October MMII + * + */ + + +#include "bacula.h" +#include "stored.h" + +/* Imported subroutines */ +static const int read_dbglvl = 200|DT_VOLUME; +static const int dbgep = 200|DT_VOLUME; /* debug execution path */ + +/* + * Read the header record + */ +static bool read_header(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec) +{ + ser_declare; + uint32_t VolSessionId; + uint32_t VolSessionTime; + int32_t FileIndex; + int32_t Stream; + uint32_t rhl; + char buf1[100], buf2[100]; + + Dmsg0(dbgep, "=== rpath 1 read_header\n"); + ASSERT2(!block->adata, "Block is adata. Wrong!"); + /* Clear state flags */ + rec->state_bits = 0; + if (block->dev->is_tape()) { + rec->state_bits |= REC_ISTAPE; + } + rec->Addr = ((DEVICE *)block->dev)->EndAddr; + + /* + * Get the header. There is always a full header, + * otherwise we find it in the next block. + */ + Dmsg4(read_dbglvl, "adata=%d Block=%d Ver=%d block_len=%u\n", + block->adata, block->BlockNumber, block->BlockVer, block->block_len); + if (block->BlockVer == 1) { + rhl = RECHDR1_LENGTH; + } else { + rhl = RECHDR2_LENGTH; + } + if (rec->remlen >= rhl) { + Dmsg0(dbgep, "=== rpath 2 begin unserial header\n"); + Dmsg4(read_dbglvl, "read_header: remlen=%d data_len=%d rem=%d blkver=%d\n", + rec->remlen, rec->data_len, rec->remainder, block->BlockVer); + + unser_begin(block->bufp, WRITE_RECHDR_LENGTH); + if (block->BlockVer == 1) { + unser_uint32(VolSessionId); + unser_uint32(VolSessionTime); + } else { + VolSessionId = block->VolSessionId; + VolSessionTime = block->VolSessionTime; + } + unser_int32(FileIndex); + unser_int32(Stream); + unser_uint32(rec->data_bytes); + + if (dcr->dev->have_adata_header(dcr, rec, FileIndex, Stream, VolSessionId)) { + return true; + } + + block->bufp += rhl; + block->binbuf -= rhl; + rec->remlen -= rhl; + + /* If we are looking for more (remainder!=0), we reject anything + * where the VolSessionId and VolSessionTime don't agree + */ + if (rec->remainder && (rec->VolSessionId != VolSessionId || + rec->VolSessionTime != VolSessionTime)) { + rec->state_bits |= REC_NO_MATCH; + Dmsg0(read_dbglvl, "remainder and VolSession doesn't match\n"); + Dmsg0(dbgep, "=== rpath 4 VolSession no match\n"); + return false; /* This is from some other Session */ + } + + /* if Stream is negative, it means that this is a continuation + * of a previous partially written record. + */ + if (Stream < 0) { /* continuation record? */ + Dmsg0(dbgep, "=== rpath 5 negative stream\n"); + Dmsg1(read_dbglvl, "Got negative Stream => continuation. remainder=%d\n", + rec->remainder); + rec->state_bits |= REC_CONTINUATION; + if (!rec->remainder) { /* if we didn't read previously */ + Dmsg0(dbgep, "=== rpath 6 no remainder\n"); + rec->data_len = 0; /* return data as if no continuation */ + } else if (rec->Stream != -Stream) { + Dmsg0(dbgep, "=== rpath 7 wrong cont stream\n"); + rec->state_bits |= REC_NO_MATCH; + return false; /* This is from some other Session */ + } + rec->Stream = -Stream; /* set correct Stream */ + rec->maskedStream = rec->Stream & STREAMMASK_TYPE; + } else { /* Regular record */ + Dmsg0(dbgep, "=== rpath 8 normal stream\n"); + rec->Stream = Stream; + rec->maskedStream = rec->Stream & STREAMMASK_TYPE; + rec->data_len = 0; /* transfer to beginning of data */ + } + rec->VolSessionId = VolSessionId; + rec->VolSessionTime = VolSessionTime; + rec->FileIndex = FileIndex; + if (FileIndex > 0) { + Dmsg0(dbgep, "=== rpath 9 FileIndex>0\n"); + if (block->FirstIndex == 0) { + Dmsg0(dbgep, "=== rpath 10 FirstIndex\n"); + block->FirstIndex = FileIndex; + } + block->LastIndex = rec->FileIndex; + } + + Dmsg6(read_dbglvl, "read_header: FI=%s SessId=%d Strm=%s len=%u rec->remlen=%d data_len=%d\n", + FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_bytes, rec->remlen, + rec->data_len); + } else { + Dmsg0(dbgep, "=== rpath 11a block out of records\n"); + /* + * No more records in this block because the number + * of remaining bytes are less than a record header + * length, so return empty handed, but indicate that + * he must read again. By returning, we allow the + * higher level routine to fetch the next block and + * then reread. + */ + Dmsg0(read_dbglvl, "read_header: End of block\n"); + rec->state_bits |= (REC_NO_HEADER | REC_BLOCK_EMPTY); + empty_block(block); /* mark block empty */ + return false; + } + + /* Sanity check */ + if (rec->data_bytes >= MAX_BLOCK_SIZE) { + Dmsg0(dbgep, "=== rpath 11b maxlen too big\n"); + /* + * Something is wrong, force read of next block, abort + * continuing with this block. + */ + rec->state_bits |= (REC_NO_HEADER | REC_BLOCK_EMPTY); + empty_block(block); + Jmsg2(dcr->jcr, M_WARNING, 0, _("Sanity check failed. maxlen=%d datalen=%d. Block discarded.\n"), + MAX_BLOCK_SIZE, rec->data_bytes); + return false; + } + + rec->data = check_pool_memory_size(rec->data, rec->data_len+rec->data_bytes); + rec->rstate = st_data; + return true; +} + +/* + * We have just read a header, now read the data into the record. + * Note, if we do not read the full record, we will return to + * read the next header, which will then come back here later + * to finish reading the full record. + */ +static void read_data(DEV_BLOCK *block, DEV_RECORD *rec) +{ + char buf1[100], buf2[100]; + + Dmsg0(dbgep, "=== rpath 22 read_data\n"); + ASSERT2(!block->adata, "Block is adata. Wrong!"); + /* + * At this point, we have read the header, now we + * must transfer as much of the data record as + * possible taking into account: 1. A partial + * data record may have previously been transferred, + * 2. The current block may not contain the whole data + * record. + */ + if (rec->remlen >= rec->data_bytes) { + Dmsg0(dbgep, "=== rpath 23 full record\n"); + /* Got whole record */ + memcpy(rec->data+rec->data_len, block->bufp, rec->data_bytes); + block->bufp += rec->data_bytes; + block->binbuf -= rec->data_bytes; + rec->data_len += rec->data_bytes; + rec->remainder = 0; + Dmsg6(190, "Rdata full adata=%d FI=%s SessId=%d Strm=%s len=%d block=%p\n", + block->adata, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, + block); + } else { + Dmsg0(dbgep, "=== rpath 24 partial record\n"); + /* Partial record */ + memcpy(rec->data+rec->data_len, block->bufp, rec->remlen); + block->bufp += rec->remlen; + block->binbuf -= rec->remlen; + rec->data_len += rec->remlen; + rec->remainder = 1; /* partial record transferred */ + Dmsg1(read_dbglvl, "read_data: partial xfered=%d\n", rec->data_len); + rec->state_bits |= (REC_PARTIAL_RECORD | REC_BLOCK_EMPTY); + } +} + + +/* + * Read a Record from the block + * Returns: false if nothing read or if the continuation record does not match. + * In both of these cases, a block read must be done. + * true if at least the record header was read, this + * routine may have to be called again with a new + * block if the entire record was not read. + */ +bool read_record_from_block(DCR *dcr, DEV_RECORD *rec) +{ + bool save_adata = dcr->dev->adata; + bool rtn; + + Dmsg0(dbgep, "=== rpath 1 Enter read_record_from block\n"); + + /* Update the Record number only if we have a new record */ + if (rec->remainder == 0) { + rec->RecNum = dcr->block->RecNum; + rec->VolumeName = dcr->CurrentVol->VolumeName; /* From JCR::VolList, freed at the end */ + rec->Addr = rec->StartAddr = dcr->block->BlockAddr; + } + + /* We read the next record */ + dcr->block->RecNum++; + + for ( ;; ) { + switch (rec->rstate) { + case st_none: + dump_block(dcr->dev, dcr->ameta_block, "st_none"); + case st_header: + Dmsg0(dbgep, "=== rpath 33 st_header\n"); + dcr->set_ameta(); + rec->remlen = dcr->block->binbuf; + /* Note read_header sets rec->rstate on return true */ + if (!read_header(dcr, dcr->block, rec)) { /* sets state */ + Dmsg0(dbgep, "=== rpath 34 failed read header\n"); + Dmsg0(read_dbglvl, "read_header returned EOF.\n"); + goto fail_out; + } + continue; + + case st_data: + Dmsg0(dbgep, "=== rpath 37 st_data\n"); + read_data(dcr->block, rec); + rec->rstate = st_header; /* next pass look for a header */ + goto get_out; + + case st_adata_blkhdr: + dcr->set_adata(); + dcr->dev->read_adata_block_header(dcr); + rec->rstate = st_header; + continue; + + case st_adata_rechdr: + Dmsg0(dbgep, "=== rpath 35 st_adata_rechdr\n"); + if (!dcr->dev->read_adata_record_header(dcr, dcr->block, rec)) { /* sets state */ + Dmsg0(dbgep, "=== rpath 36 failed read_adata rechdr\n"); + Dmsg0(100, "read_link returned EOF.\n"); + goto fail_out; + } + continue; + + case st_adata: + switch (dcr->dev->read_adata(dcr, rec)) { + case -1: + goto fail_out; + case 0: + continue; + case 1: + goto get_out; + } + + default: + Dmsg0(dbgep, "=== rpath 50 default\n"); + Dmsg0(0, "======= In default !!!!!\n"); + Pmsg1(190, "Read: unknown state=%d\n", rec->rstate); + goto fail_out; + } + } +get_out: + char buf1[100], buf2[100]; + Dmsg6(read_dbglvl, "read_rec return: FI=%s Strm=%s len=%d rem=%d remainder=%d Num=%d\n", + FI_to_ascii(buf1, rec->FileIndex), + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, + rec->remlen, rec->remainder, rec->RecNum); + rtn = true; + goto out; +fail_out: + rec->rstate = st_none; + rtn = false; +out: + if (save_adata) { + dcr->set_adata(); + } else { + dcr->set_ameta(); + } + return rtn; +} diff --git a/src/stored/record_util.c b/src/stored/record_util.c new file mode 100644 index 00000000..a326c14a --- /dev/null +++ b/src/stored/record_util.c @@ -0,0 +1,335 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * record-util.c -- Utilities for record handling + * + * Kern Sibbald, October MMXII + * + */ + +#include "bacula.h" +#include "stored.h" + +/* + * Convert a FileIndex into a printable + * ASCII string. Not reentrant. + * If the FileIndex is negative, it flags the + * record as a Label, otherwise it is simply + * the FileIndex of the current file. + */ +const char *FI_to_ascii(char *buf, int fi) +{ + if (fi >= 0) { + sprintf(buf, "%d", fi); + return buf; + } + switch (fi) { + case PRE_LABEL: + return "PRE_LABEL"; + case VOL_LABEL: + return "VOL_LABEL"; + case EOM_LABEL: + return "EOM_LABEL"; + case SOS_LABEL: + return "SOS_LABEL"; + case EOS_LABEL: + return "EOS_LABEL"; + case EOT_LABEL: + return "EOT_LABEL"; + break; + case SOB_LABEL: + return "SOB_LABEL"; + break; + case EOB_LABEL: + return "EOB_LABEL"; + break; + default: + sprintf(buf, _("unknown: %d"), fi); + return buf; + } +} + +/* + * Convert a Stream ID into a printable + * ASCII string. Not reentrant. + + * A negative stream number represents + * stream data that is continued from a + * record in the previous block. + * If the FileIndex is negative, we are + * dealing with a Label, hence the + * stream is the JobId. + */ +const char *stream_to_ascii(char *buf, int stream, int fi) +{ + + if (fi < 0) { + sprintf(buf, "%d", stream); + return buf; + } + if (stream < 0) { + stream = -stream; + stream &= STREAMMASK_TYPE; + /* Stream was negative => all are continuation items */ + switch (stream) { + case STREAM_UNIX_ATTRIBUTES: + return "contUATTR"; + case STREAM_FILE_DATA: + return "contDATA"; + case STREAM_WIN32_DATA: + return "contWIN32-DATA"; + case STREAM_WIN32_GZIP_DATA: + return "contWIN32-GZIP"; + case STREAM_WIN32_COMPRESSED_DATA: + return "contWIN32-COMPRESSED"; + case STREAM_MD5_DIGEST: + return "contMD5"; + case STREAM_SHA1_DIGEST: + return "contSHA1"; + case STREAM_GZIP_DATA: + return "contGZIP"; + case STREAM_COMPRESSED_DATA: + return "contCOMPRESSED"; + case STREAM_UNIX_ATTRIBUTES_EX: + return "contUNIX-ATTR-EX"; + case STREAM_RESTORE_OBJECT: + return "contRESTORE-OBJECT"; + case STREAM_SPARSE_DATA: + return "contSPARSE-DATA"; + case STREAM_SPARSE_GZIP_DATA: + return "contSPARSE-GZIP"; + case STREAM_SPARSE_COMPRESSED_DATA: + return "contSPARSE-COMPRESSED"; + case STREAM_PROGRAM_NAMES: + return "contPROG-NAMES"; + case STREAM_PROGRAM_DATA: + return "contPROG-DATA"; + case STREAM_MACOS_FORK_DATA: + return "contMACOS-RSRC"; + case STREAM_HFSPLUS_ATTRIBUTES: + return "contHFSPLUS-ATTR"; + case STREAM_SHA256_DIGEST: + return "contSHA256"; + case STREAM_SHA512_DIGEST: + return "contSHA512"; + case STREAM_SIGNED_DIGEST: + return "contSIGNED-DIGEST"; + case STREAM_ENCRYPTED_SESSION_DATA: + return "contENCRYPTED-SESSION-DATA"; + case STREAM_ENCRYPTED_FILE_DATA: + return "contENCRYPTED-FILE"; + case STREAM_ENCRYPTED_FILE_GZIP_DATA: + return "contENCRYPTED-GZIP"; + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + return "contENCRYPTED-COMPRESSED"; + case STREAM_ENCRYPTED_WIN32_DATA: + return "contENCRYPTED-WIN32-DATA"; + case STREAM_ENCRYPTED_WIN32_GZIP_DATA: + return "contENCRYPTED-WIN32-GZIP"; + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: + return "contENCRYPTED-WIN32-COMPRESSED"; + case STREAM_ENCRYPTED_MACOS_FORK_DATA: + return "contENCRYPTED-MACOS-RSRC"; + case STREAM_PLUGIN_NAME: + return "contPLUGIN-NAME"; + case STREAM_ADATA_BLOCK_HEADER: + return "contADATA-BLOCK-HEADER"; + case STREAM_ADATA_RECORD_HEADER: + return "contADATA-RECORD-HEADER"; + + default: + sprintf(buf, "%d", -stream); + return buf; + } + } + + switch (stream & STREAMMASK_TYPE) { + case STREAM_UNIX_ATTRIBUTES: + return "UATTR"; + case STREAM_FILE_DATA: + return "DATA"; + case STREAM_WIN32_DATA: + return "WIN32-DATA"; + case STREAM_WIN32_GZIP_DATA: + return "WIN32-GZIP"; + case STREAM_WIN32_COMPRESSED_DATA: + return "WIN32-COMPRESSED"; + case STREAM_MD5_DIGEST: + return "MD5"; + case STREAM_SHA1_DIGEST: + return "SHA1"; + case STREAM_GZIP_DATA: + return "GZIP"; + case STREAM_COMPRESSED_DATA: + return "COMPRESSED"; + case STREAM_UNIX_ATTRIBUTES_EX: + return "UNIX-ATTR-EX"; + case STREAM_RESTORE_OBJECT: + return "RESTORE-OBJECT"; + case STREAM_SPARSE_DATA: + return "SPARSE-DATA"; + case STREAM_SPARSE_GZIP_DATA: + return "SPARSE-GZIP"; + case STREAM_SPARSE_COMPRESSED_DATA: + return "SPARSE-COMPRESSED"; + case STREAM_PROGRAM_NAMES: + return "PROG-NAMES"; + case STREAM_PROGRAM_DATA: + return "PROG-DATA"; + case STREAM_PLUGIN_NAME: + return "PLUGIN-NAME"; + case STREAM_MACOS_FORK_DATA: + return "MACOS-RSRC"; + case STREAM_HFSPLUS_ATTRIBUTES: + return "HFSPLUS-ATTR"; + case STREAM_SHA256_DIGEST: + return "SHA256"; + case STREAM_SHA512_DIGEST: + return "SHA512"; + case STREAM_SIGNED_DIGEST: + return "SIGNED-DIGEST"; + case STREAM_ENCRYPTED_SESSION_DATA: + return "ENCRYPTED-SESSION-DATA"; + case STREAM_ENCRYPTED_FILE_DATA: + return "ENCRYPTED-FILE"; + case STREAM_ENCRYPTED_FILE_GZIP_DATA: + return "ENCRYPTED-GZIP"; + case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: + return "ENCRYPTED-COMPRESSED"; + case STREAM_ENCRYPTED_WIN32_DATA: + return "ENCRYPTED-WIN32-DATA"; + case STREAM_ENCRYPTED_WIN32_GZIP_DATA: + return "ENCRYPTED-WIN32-GZIP"; + case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: + return "ENCRYPTED-WIN32-COMPRESSED"; + case STREAM_ENCRYPTED_MACOS_FORK_DATA: + return "ENCRYPTED-MACOS-RSRC"; + case STREAM_ADATA_BLOCK_HEADER: + return "ADATA-BLOCK-HEADER"; + case STREAM_ADATA_RECORD_HEADER: + return "ADATA-RECORD-HEADER"; + default: + sprintf(buf, "%d", stream); + return buf; + } +} + +const char *stream_to_ascii_ex(char *buf, int stream, int fi) +{ + if (fi < 0) { + return stream_to_ascii(buf, stream, fi); + } + const char *p = stream_to_ascii(buf, stream, fi); + return p; +} +/* + * Return a new record entity + */ +DEV_RECORD *new_record(void) +{ + DEV_RECORD *rec; + + rec = (DEV_RECORD *)get_memory(sizeof(DEV_RECORD)); + memset(rec, 0, sizeof(DEV_RECORD)); + rec->data = get_pool_memory(PM_MESSAGE); + rec->wstate = st_none; + rec->rstate = st_none; + return rec; +} + +void empty_record(DEV_RECORD *rec) +{ + rec->RecNum = 0; + rec->StartAddr = rec->Addr = 0; + rec->VolSessionId = rec->VolSessionTime = 0; + rec->FileIndex = rec->Stream = 0; + rec->data_len = rec->remainder = 0; + rec->state_bits &= ~(REC_PARTIAL_RECORD|REC_ADATA_EMPTY|REC_BLOCK_EMPTY|REC_NO_MATCH|REC_CONTINUATION); + rec->FileOffset = 0; + rec->wstate = st_none; + rec->rstate = st_none; + rec->VolumeName = NULL; +} + +/* + * Free the record entity + * + */ +void free_record(DEV_RECORD *rec) +{ + Dmsg0(950, "Enter free_record.\n"); + if (rec->data) { + free_pool_memory(rec->data); + } + Dmsg0(950, "Data buf is freed.\n"); + free_pool_memory((POOLMEM *)rec); + Dmsg0(950, "Leave free_record.\n"); +} + +void dump_record(DEV_RECORD *rec) +{ + char buf[32]; + Dmsg11(100|DT_VOLUME, "Dump record %s 0x%p:\n\tStart=%lld addr=%lld #%d\n" + "\tVolSess: %ld:%ld\n" + "\tFileIndex: %ld\n" + "\tStream: 0x%lx\n\tLen: %ld\n\tData: %s\n", + rec, NPRT(rec->VolumeName), + rec->StartAddr, rec->Addr, rec->RecNum, + rec->VolSessionId, rec->VolSessionTime, rec->FileIndex, + rec->Stream, rec->data_len, + asciidump(rec->data, rec->data_len, buf, sizeof(buf))); +} + +/* + * Test if we can write whole record to the block + * + * Returns: false on failure + * true on success (all bytes can be written) + */ +bool can_write_record_to_block(DEV_BLOCK *block, DEV_RECORD *rec) +{ + uint32_t remlen; + + remlen = block->buf_len - block->binbuf; + if (rec->remainder == 0) { + if (remlen >= WRITE_RECHDR_LENGTH) { + remlen -= WRITE_RECHDR_LENGTH; + rec->remainder = rec->data_len; + } else { + return false; + } + } else { + return false; + } + if (rec->remainder > 0 && remlen < rec->remainder) { + return false; + } + return true; +} + +uint64_t get_record_address(DEV_RECORD *rec) +{ + return rec->Addr; +} + +uint64_t get_record_start_address(DEV_RECORD *rec) +{ + return rec->StartAddr; +} diff --git a/src/stored/record_write.c b/src/stored/record_write.c new file mode 100644 index 00000000..b4c44429 --- /dev/null +++ b/src/stored/record_write.c @@ -0,0 +1,423 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * record_write.c -- Volume (tape/disk) record write functions + * + * Kern Sibbald, April MMI + * added BB02 format October MMII + * added aligned format November MMXII + * + */ + + +#include "bacula.h" +#include "stored.h" + +/* Imported functions */ + +static const int dbgep = 250|DT_RECORD; /* debug execution path */ +static const int dbgel = 250|DT_RECORD; /* debug Enter/Leave code */ + +struct rechdr { + int32_t FileIndex; + uint32_t data_len; + uint32_t reclen; + int32_t Stream; + int32_t oStream; +}; + +/* + * Write an ameta (normal) header record to the block. + */ +static bool write_header_to_block(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec) +{ + ser_declare; + + Dmsg0(dbgep, "=== wpath 11 write_header_to_block\n"); + ASSERT2(!block->adata, "Attempt to write header to adata block!"); + rec->remlen = block->buf_len - block->binbuf; + /* Require enough room to write a full header */ + if (rec->remlen < WRITE_RECHDR_LENGTH) { + Dmsg0(dbgep, "=== wpath 12 write_header_to_block\n"); + Dmsg5(190, "remlenadata, rec->remlen, WRITE_RECHDR_LENGTH, block->buf_len, block->binbuf); + rec->remainder = rec->data_len + WRITE_RECHDR_LENGTH; + return false; + } + ser_begin(block->bufp, WRITE_RECHDR_LENGTH); + if (BLOCK_VER == 1) { + Dmsg0(dbgep, "=== wpath 13 write_header_to_block\n"); + ser_uint32(rec->VolSessionId); + ser_uint32(rec->VolSessionTime); + } else { + Dmsg0(dbgep, "=== wpath 14 write_header_to_block\n"); + block->VolSessionId = rec->VolSessionId; + block->VolSessionTime = rec->VolSessionTime; + } + ser_int32(rec->FileIndex); + ser_int32(rec->Stream); + ser_uint32(rec->data_len); + + block->bufp += WRITE_RECHDR_LENGTH; + block->binbuf += WRITE_RECHDR_LENGTH; + + block->RecNum++; + rec->remlen -= WRITE_RECHDR_LENGTH; + rec->remainder = rec->data_len; + if (rec->FileIndex > 0) { + Dmsg0(dbgep, "=== wpath 15 write_header_to_block\n"); + /* If data record, update what we have in this block */ + if (block->FirstIndex == 0) { + Dmsg0(dbgep, "=== wpath 16 write_header_to_block\n"); + block->FirstIndex = rec->FileIndex; + } + block->LastIndex = rec->FileIndex; + } + + //dump_block(dcr->dev, block, "Add header"); + return true; +} + +/* + * If the prior ameta block was not big enough to hold the + * whole record, write a continuation header record. + */ +static void write_continue_header_to_block(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec) +{ + ser_declare; + + Dmsg0(dbgep, "=== wpath 17 write_cont_hdr_to_block\n"); + ASSERT2(!block->adata, "Attempt to write adata header!"); + rec->remlen = block->buf_len - block->binbuf; + + /* No space left to write the continue header */ + if (rec->remlen == 0) { + return; + } + + /* + * We have unwritten bytes from a previous + * time. Presumably we have a new buffer (possibly + * containing a volume label), so the new header + * should be able to fit in the block -- otherwise we have + * an error. Note, we have to continue splitting the + * data record if it is longer than the block. + * + * First, write the header. + * + * Every time we write a header and it is a continuation + * of a previous partially written record, we store the + * Stream as -Stream in the record header. + */ + ser_begin(block->bufp, WRITE_RECHDR_LENGTH); + if (BLOCK_VER == 1) { + Dmsg0(dbgep, "=== wpath 18 write_cont_hdr_to_block\n"); + ser_uint32(rec->VolSessionId); + ser_uint32(rec->VolSessionTime); + } else { + Dmsg0(dbgep, "=== wpath 19 write_cont_hdr_to_block\n"); + block->VolSessionId = rec->VolSessionId; + block->VolSessionTime = rec->VolSessionTime; + } + ser_int32(rec->FileIndex); + if (rec->remainder > rec->data_len) { + Dmsg0(dbgep, "=== wpath 20 write_cont_hdr_to_block\n"); + ser_int32(rec->Stream); /* normal full header */ + ser_uint32(rec->data_len); + rec->remainder = rec->data_len; /* must still do data record */ + } else { + Dmsg0(dbgep, "=== wpath 21 write_cont_hdr_to_block\n"); + ser_int32(-rec->Stream); /* mark this as a continuation record */ + ser_uint32(rec->remainder); /* bytes to do */ + } + + /* Require enough room to write a full header */ + ASSERT(rec->remlen >= WRITE_RECHDR_LENGTH); + + block->bufp += WRITE_RECHDR_LENGTH; + block->binbuf += WRITE_RECHDR_LENGTH; + rec->remlen -= WRITE_RECHDR_LENGTH; + if (rec->FileIndex > 0) { + Dmsg0(dbgep, "=== wpath 22 write_cont_hdr_to_block\n"); + /* If data record, update what we have in this block */ + if (block->FirstIndex == 0) { + Dmsg0(dbgep, "=== wpath 23 write_cont_hdr_to_block\n"); + block->FirstIndex = rec->FileIndex; + } + block->LastIndex = rec->FileIndex; + } + if (block->adata) { + Dmsg3(150, "=== write_cont_hdr ptr=%p begin=%p off=%d\n", block->bufp, + block->buf, block->bufp-block->buf); + } + block->RecNum++; + //dump_block(dcr->dev, block, "Add cont header"); +} + +/* + * Now write non-aligned data to an ameta block + */ +static bool write_data_to_block(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec) +{ + Dmsg0(dbgep, "=== wpath 24 write_data_to_block\n"); + ASSERT2(!block->adata, "Attempt to write adata to metadata file!"); + rec->remlen = block->buf_len - block->binbuf; + /* Write as much of data as possible */ + if (rec->remlen >= rec->remainder) { + Dmsg0(dbgep, "=== wpath 25 write_data_to_block\n"); + memcpy(block->bufp, rec->data+rec->data_len-rec->remainder, + rec->remainder); + block->bufp += rec->remainder; + block->binbuf += rec->remainder; + rec->remainder = 0; + } else { + if (rec->state_bits & REC_NO_SPLIT) { + return false; /* do not split record */ + } + Dmsg0(dbgep, "=== wpath 26 write_data_to_block\n"); + memcpy(block->bufp, rec->data+rec->data_len-rec->remainder, + rec->remlen); + block->bufp += rec->remlen; + block->binbuf += rec->remlen; + rec->remainder -= rec->remlen; + return false; /* did partial transfer */ + } + if (block->adata) { + /* Adata label data */ + Dmsg3(190, "write_data adata=%d blkAddr=%lld off=%d\n", + block->adata, block->BlockAddr, block->bufp-block->buf); + } + //dump_block(dcr->dev, block, "Add data/adata"); + return true; +} + +/* + * Write a record to the block -- handles writing out full + * blocks by writing them to the device. + * + * Returns: false means the block could not be written to tape/disk. + * true on success (all bytes written to the block). + */ +bool DCR::write_record(DEV_RECORD *rec) +{ + Enter(dbgel); + Dmsg0(dbgep, "=== wpath 33 write_record\n"); + while (!write_record_to_block(this, rec)) { + Dmsg2(850, "!write_record_to_block data_len=%d rem=%d\n", rec->data_len, + rec->remainder); + if (jcr->is_canceled()) { + Leave(dbgel); + return false; + } + if (!write_block_to_device()) { + Dmsg0(dbgep, "=== wpath 34 write_record\n"); + Pmsg2(000, "Got write_block_to_dev error on device %s. %s\n", + dev->print_name(), dev->bstrerror()); + Leave(dbgel); + return false; + } + Dmsg2(850, "!write_record_to_block data_len=%d rem=%d\n", rec->data_len, + rec->remainder); + } + Leave(dbgel); + return true; +} + +/* + * Write a record to the block + * + * Returns: false on failure (none or partially written) + * true on success (all bytes written) + * + * and remainder returned in packet. + * + * We require enough room for the header, and we deal with + * two special cases. 1. Only part of the record may have + * been transferred the last time (when remainder is + * non-zero), and 2. The remaining bytes to write may not + * all fit into the block. + * + * Ensure we leave with same ameta/adata set as enter. + */ +bool write_record_to_block(DCR *dcr, DEV_RECORD *rec) +{ + char buf1[100], buf2[100]; + bool save_adata = dcr->block->adata; + bool rtn; + + Enter(dbgel); + Dmsg0(dbgep, "=== wpath 35 enter write_record_to_block\n"); + Dmsg7(250, "write_record_to_block() state=%d FI=%s SessId=%d" + " Strm=%s len=%d rem=%d remainder=%d\n", rec->wstate, + FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, + rec->remlen, rec->remainder); + Dmsg4(250, "write_rec Strm=%s len=%d rem=%d remainder=%d\n", + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, + rec->remlen, rec->remainder); + + if (!dcr->dev->do_pre_write_checks(dcr, rec)) { + goto fail_out; + } + + for ( ;; ) { + Dmsg0(dbgep, "=== wpath 37 top of for loop\n"); + ASSERT(dcr->block->binbuf == (uint32_t) (dcr->block->bufp - dcr->block->buf)); + ASSERT(dcr->block->buf_len >= dcr->block->binbuf); + + switch (rec->wstate) { + case st_none: + Dmsg0(dbgep, "=== wpath 38 st_none\n"); + /* Figure out what to do */ + rec->wstate = st_header; + /* If labeling adata, special path */ + if (dcr->adata_label) { + Dmsg1(dbgep, "=== wpath adata_label set adata=%d\n", dcr->dev->adata); + rec->wstate = st_adata_label; + continue; + } + if (rec->FileIndex < 0) { + /* Label record -- ameta label */ + Dmsg3(dbgep, "=== wpath label adata=%d Strm=%d FI=%d\n", + dcr->dev->adata, rec->Stream, rec->FileIndex); + rec->wstate = st_header; + continue; + } + + dcr->dev->select_data_stream(dcr, rec); + continue; /* go to next state */ + + case st_header: + /* + * Write header + * + * If rec->remainder is non-zero, we have been called a + * second (or subsequent) time to finish writing a record + * that did not previously fit into the block. + */ + Dmsg0(dbgep, "=== wpath 42 st_header\n"); + dcr->set_ameta(); + if (!write_header_to_block(dcr, dcr->ameta_block, rec)) { + Dmsg0(dbgep, "=== wpath 43 st_header\n"); + rec->wstate = st_cont_header; + goto fail_out; + } + Dmsg0(dbgep, "=== wpath 44 st_header\n"); + rec->wstate = st_data; + continue; + + case st_cont_header: + Dmsg0(dbgep, "=== wpath 45 st_cont_header\n"); + dcr->set_ameta(); + write_continue_header_to_block(dcr, dcr->ameta_block, rec); + rec->wstate = st_data; + if (rec->remlen == 0) { + Dmsg0(dbgep, "=== wpath 46 st_cont_header\n"); + goto fail_out; + } + continue; + + /* + * We come here only once for each record + */ + case st_data: + /* + * Write data + * + * Part of it may have already been transferred, and we + * may not have enough room to transfer the whole this time. + */ + Dmsg0(dbgep, "=== wpath 47 st_data\n"); + dcr->set_ameta(); + if (rec->remainder > 0) { + Dmsg0(dbgep, "=== wpath 48 st_data\n"); + if (!write_data_to_block(dcr, dcr->ameta_block, rec)) { + Dmsg0(dbgep, "=== wpath 49 st_data\n"); + if (rec->state_bits & REC_NO_SPLIT) { + rec->wstate = st_header; + } else { + rec->wstate = st_cont_header; + } + goto fail_out; + } + } + rec->state_bits &= ~REC_NO_SPLIT; /* clear possible no split bit */ + rec->remainder = 0; /* did whole transfer */ + rec->wstate = st_none; + goto get_out; + + case st_adata_label: + if (!dcr->dev->write_adata_label(dcr, rec)) { + goto fail_out; + } + goto get_out; + + /* + * We come here only once for each record + */ + case st_adata: + dcr->dev->write_adata(dcr, rec); + continue; + + case st_cont_adata: + dcr->dev->write_cont_adata(dcr, rec); + continue; + + /* + * Note, the following two cases are handled differently + * in write_adata_record_header() so take care if you want to + * eliminate one of them. + */ + case st_cont_adata_rechdr: + Dmsg2(200, "=== cont rechdr remainder=%d reclen=%d\n", rec->remainder, dcr->adata_block->reclen); + Dmsg0(200, "st_cont_adata_rechdr\n"); + /* Fall through wanted */ + case st_adata_rechdr: + switch(dcr->dev->write_adata_rechdr(dcr, rec)) { + case -1: + goto fail_out; + case 0: + continue; + case 1: + goto get_out; + } + break; + + default: + Dmsg0(dbgep, "=== wpath 67!!!! default\n"); + Dmsg0(50, "Something went wrong. Default state.\n"); + rec->wstate = st_none; + goto get_out; + } + } +get_out: + rtn = true; + goto out; +fail_out: + rtn = false; +out: + if (save_adata) { + dcr->set_adata(); + } else { + dcr->set_ameta(); + } + Leave(dbgel); + return rtn; +} diff --git a/src/stored/reserve.c b/src/stored/reserve.c new file mode 100644 index 00000000..87c6f246 --- /dev/null +++ b/src/stored/reserve.c @@ -0,0 +1,1312 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Drive reservation functions for Storage Daemon + * + * Written by Kern Sibbald, MM + * + * Split from job.c and acquire.c June 2005 + * + */ + +#include "bacula.h" +#include "stored.h" + +const int dbglvl = 150; + +static brwlock_t reservation_lock; +int reservations_lock_count = 0; + +/* Forward referenced functions */ +static int can_reserve_drive(DCR *dcr, RCTX &rctx); +static bool is_vol_in_autochanger(RCTX &rctx, VOLRES *vol); +static bool reserve_device_for_append(DCR *dcr, RCTX &rctx); +static bool reserve_device_for_read(DCR *dcr); +static bool use_device_cmd(JCR *jcr); +static int reserve_device(RCTX &rctx); +static void pop_reserve_messages(JCR *jcr); +static void queue_reserve_message(JCR *jcr); +static int is_pool_ok(DCR *dcr); +//void switch_device(DCR *dcr, DEVICE *dev); + +/* Requests from the Director daemon */ +static char use_storage[] = "use storage=%127s media_type=%127s " + "pool_name=%127s pool_type=%127s append=%d copy=%d stripe=%d\n"; +static char use_device[] = "use device=%127s\n"; + +/* Responses sent to Director daemon */ +static char OK_device[] = "3000 OK use device device=%s\n"; +static char NO_device[] = "3924 Device \"%s\" not in SD Device" + " resources or no matching Media Type or is disabled.\n"; +static char BAD_use[] = "3913 Bad use command: %s\n"; + +/* + * This allows a given thread to recursively call lock_reservations. + * It must, of course, call unlock_... the same number of times. + */ +void init_reservations_lock() +{ + int errstat; + if ((errstat=rwl_init(&reservation_lock)) != 0) { + berrno be; + Emsg1(M_ABORT, 0, _("Unable to initialize reservation lock. ERR=%s\n"), + be.bstrerror(errstat)); + } + + init_vol_list_lock(); +} + + +/* This applies to a drive and to Volumes */ +void _lock_reservations(const char *file, int line) +{ + int errstat; + reservations_lock_count++; + if ((errstat=rwl_writelock_p(&reservation_lock, file, line)) != 0) { + berrno be; + Emsg2(M_ABORT, 0, "rwl_writelock failure. stat=%d: ERR=%s\n", + errstat, be.bstrerror(errstat)); + } +} + +void _unlock_reservations() +{ + int errstat; + reservations_lock_count--; + if ((errstat=rwl_writeunlock(&reservation_lock)) != 0) { + berrno be; + Emsg2(M_ABORT, 0, "rwl_writeunlock failure. stat=%d: ERR=%s\n", + errstat, be.bstrerror(errstat)); + } +} + +void term_reservations_lock() +{ + rwl_destroy(&reservation_lock); + term_vol_list_lock(); +} + +void DCR::clear_reserved() +{ + if (m_reserved) { + m_reserved = false; + dev->dec_reserved(); + Dmsg3(dbglvl, "Dec reserve=%d writers=%d dev=%s\n", dev->num_reserved(), + dev->num_writers, dev->print_name()); + if (dev->num_reserved() == 0) { + dev->reserved_pool_name[0] = 0; + } + } +} + +void DCR::set_reserved_for_append() +{ + if (dev->num_reserved() == 0) { + bstrncpy(dev->reserved_pool_name, pool_name, sizeof(dev->reserved_pool_name)); + Dmsg1(dbglvl, "Set reserve pool: %s\n", pool_name); + } + m_reserved = true; + dev->set_append_reserve(); + dev->inc_reserved(); + Dmsg3(dbglvl, "Inc reserve=%d writers=%d dev=%s\n", dev->num_reserved(), + dev->num_writers, dev->print_name()); +} + +void DCR::set_reserved_for_read() +{ + /* Called for each volume read, no need to increment each time */ + if (!m_reserved) { + m_reserved = true; + dev->set_read_reserve(); + dev->inc_reserved(); + Dmsg2(dbglvl, "Inc reserve=%d dev=%s\n", dev->num_reserved(), dev->print_name()); + } +} + +/* + * Remove any reservation from a drive and tell the system + * that the volume is unused at least by us. + */ +void DCR::unreserve_device(bool locked) +{ + if (!locked) { + dev->Lock(); + } + if (is_reserved()) { + clear_reserved(); + reserved_volume = false; + /* If we set read mode in reserving, remove it */ + if (dev->can_read()) { + remove_read_volume(jcr, this->VolumeName); + dev->clear_read(); + } + if (dev->num_writers < 0) { + Jmsg1(jcr, M_ERROR, 0, _("Hey! num_writers=%d!!!!\n"), dev->num_writers); + dev->num_writers = 0; + } + if (dev->num_reserved() == 0 && dev->num_writers == 0) { + generate_plugin_event(jcr, bsdEventDeviceClose, this); + volume_unused(this); + } + } + if (!locked) { + dev->Unlock(); + } +} + +bool use_cmd(JCR *jcr) +{ + /* + * Get the device, media, and pool information + */ + if (!use_device_cmd(jcr)) { + jcr->setJobStatus(JS_ErrorTerminated); + memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); + return false; + } + return true; +} + +/* + * Storage search options + */ +struct store_opts_t { + bool PreferMountedVols; + bool exact_match; + bool autochanger_only; + bool try_low_use_drive; + bool any_drive; +}; + +/* *** Old pre-8.8 algorithm *** not used here + * jcr->PreferMountedVols = true + * MntVol exact chgonly lowuse any + * 1 * {true, true, false, false, false}, + * 2 * {true, false, true, false, false}, + * 3 * {true, false, true, false, true}, + + * jcr->PreferMountedVols = false + * MntVol exact chgonly lowuse any + * 4 * {false, false, true, false, false}, + * 5 * {false, false, true, true, false}, * low instantaneous use * + * 6 * {false, false, false, true, false}, + * 7 * {true, true, false, false, false}, + * 8 * {true, false, true, false, false}, + * 9 * {true, false, true, false, true} + *** End old pre-8.8 algorithm *** + */ + +store_opts_t store_opts[] = { +/* jcr->PreferMountedVols = true */ +/* MntVol exact chgonly lowuse any */ +/* 1 */ {true, true, false, true, false}, /* low global use */ +/* 2 */ {true, false, true, true, false}, +/* 3 */ {true, false, true, false, true}, +/* 4 */ {true, true, false, true, false}, +/* 5 */ {false, true, false, false, false}, +/* 6 */ {false, false, false, false, false}, +/* 7 */ {true, false, false, false, true}, + +/* jcr->PreferMountedVols = false */ +/* MntVol exact chgonly lowuse any */ +/* 8 */ {false, false, true, true, false}, +/* 9 */ {false, false, true, false, false}, +/* 10 */ {false, false, false, true, false}, +/* 11 */ {true, true, false, true, true}, +/* 12 */ {true, false, true, true, false}, +/* 13 */ {true, false, true, false, true} +}; + +static void set_options(RCTX &rctx, int num) +{ + rctx.PreferMountedVols = store_opts[num-1].PreferMountedVols; + rctx.exact_match = store_opts[num-1].exact_match; + rctx.autochanger_only = store_opts[num-1].autochanger_only; + rctx.try_low_use_drive = store_opts[num-1].try_low_use_drive; + rctx.any_drive = store_opts[num-1].any_drive; + rctx.low_use_drive = NULL; +} + +static void prt_options(RCTX &rctx, int num) +{ + Dmsg6(dbglvl, "Inx=%d mntVol=%d exact=%d chgonly=%d low_use=%d any=%d\n", + num, rctx.PreferMountedVols, rctx.exact_match, rctx.autochanger_only, + rctx.try_low_use_drive, rctx.any_drive); +} + + + +/* + * We get the following type of information: + * + * use storage=xxx media_type=yyy pool_name=xxx pool_type=yyy append=1 copy=0 strip=0 + * use device=zzz + * use device=aaa + * use device=bbb + * use storage=xxx media_type=yyy pool_name=xxx pool_type=yyy append=0 copy=0 strip=0 + * use device=bbb + * + */ +static bool use_device_cmd(JCR *jcr) +{ + POOL_MEM store_name, dev_name, media_type, pool_name, pool_type; + BSOCK *dir = jcr->dir_bsock; + int32_t append; + bool ok; + int32_t Copy, Stripe; + DIRSTORE *store; + RCTX rctx; + alist *dirstore; + + memset(&rctx, 0, sizeof(RCTX)); + rctx.jcr = jcr; + /* + * If there are multiple devices, the director sends us + * use_device for each device that it wants to use. + */ + dirstore = New(alist(10, not_owned_by_alist)); + jcr->reserve_msgs = New(alist(10, not_owned_by_alist)); + do { + Dmsg1(dbglvl, "msg); + ok = sscanf(dir->msg, use_storage, store_name.c_str(), + media_type.c_str(), pool_name.c_str(), + pool_type.c_str(), &append, &Copy, &Stripe) == 7; + if (!ok) { + break; + } + if (append) { + jcr->write_store = dirstore; + } else { + jcr->read_store = dirstore; + } + rctx.append = append; + unbash_spaces(store_name); + unbash_spaces(media_type); + unbash_spaces(pool_name); + unbash_spaces(pool_type); + store = new DIRSTORE; + dirstore->append(store); + memset(store, 0, sizeof(DIRSTORE)); + store->device = New(alist(10)); + bstrncpy(store->name, store_name, sizeof(store->name)); + bstrncpy(store->media_type, media_type, sizeof(store->media_type)); + bstrncpy(store->pool_name, pool_name, sizeof(store->pool_name)); + bstrncpy(store->pool_type, pool_type, sizeof(store->pool_type)); + store->append = append; + + /* Now get all devices */ + while (dir->recv() >= 0) { + Dmsg1(dbglvl, "msg); + ok = sscanf(dir->msg, use_device, dev_name.c_str()) == 1; + if (!ok) { + break; + } + unbash_spaces(dev_name); + store->device->append(bstrdup(dev_name.c_str())); + } + } while (ok && dir->recv() >= 0); + +#ifdef xxxx + /* Developers debug code */ + char *device_name; + if (debug_level >= dbglvl) { + foreach_alist(store, dirstore) { + Dmsg5(dbglvl, "Storage=%s media_type=%s pool=%s pool_type=%s append=%d\n", + store->name, store->media_type, store->pool_name, + store->pool_type, store->append); + foreach_alist(device_name, store->device) { + Dmsg1(dbglvl, " Device=%s\n", device_name); + } + } + } +#endif + + init_jcr_device_wait_timers(jcr); + jcr->dcr = new_dcr(jcr, NULL, NULL, !rctx.append); /* get a dcr */ + if (!jcr->dcr) { + BSOCK *dir = jcr->dir_bsock; + dir->fsend(_("3939 Could not get dcr\n")); + Dmsg1(dbglvl, ">dird: %s", dir->msg); + ok = false; + } + /* + * At this point, we have a list of all the Director's Storage + * resources indicated for this Job, which include Pool, PoolType, + * storage name, and Media type. + * Then for each of the Storage resources, we have a list of + * device names that were given. + * + * Wiffle through Storage resources sent to us and find one that can do the backup. + */ + if (ok) { + int wait_for_device_retries = 0; + int repeat = 0; + bool fail = false; + rctx.notify_dir = true; + + /* Put new dcr in proper location */ + if (rctx.append) { + rctx.jcr->dcr = jcr->dcr; + } else { + rctx.jcr->read_dcr = jcr->dcr; + } + lock_reservations(); + for ( ; !fail && !job_canceled(jcr); ) { + int i; + pop_reserve_messages(jcr); + rctx.suitable_device = false; + rctx.have_volume = false; + rctx.VolumeName[0] = 0; + rctx.any_drive = false; + if (jcr->PreferMountedVols) { + for (i=1; i<=7; i++) { + set_options(rctx, i); + prt_options(rctx, i); + if ((ok = find_suitable_device_for_job(jcr, rctx))) { + break; + } + } + } else { + for (i=8; i<=13; i++) { + set_options(rctx, i); + prt_options(rctx, i); + if ((ok = find_suitable_device_for_job(jcr, rctx))) { + break; + } + } + } + if (ok) { + break; + } + /* Keep reservations locked *except* during wait_for_device() */ + unlock_reservations(); + /* + * The idea of looping on repeat a few times it to ensure + * that if there is some subtle timing problem between two + * jobs, we will simply try again, and most likely succeed. + * This can happen if one job reserves a drive or finishes using + * a drive at the same time a second job wants it. + */ + if (repeat++ < 20) { /* try sleeping 20 times */ + bmicrosleep(30, 0); /* wait 30 secs */ + Dmsg1(dbglvl, "repeat reserve algorithm JobId=%d\n", jcr->JobId); + } else if (!rctx.suitable_device || !wait_for_any_device(jcr, wait_for_device_retries)) { + Dmsg0(dbglvl, "Fail. !suitable_device || !wait_for_device\n"); + fail = true; + } + lock_reservations(); + dir->signal(BNET_HEARTBEAT); /* Inform Dir that we are alive */ + } + unlock_reservations(); + + if (!ok) { + /* + * If we get here, there are no suitable devices available, which + * means nothing configured. If a device is suitable but busy + * with another Volume, we will not come here. + */ + unbash_spaces(dir->msg); + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg(jcr, M_FATAL, 0, _("Device reservation failed for JobId=%d: %s\n"), + jcr->JobId, jcr->errmsg); + dir->fsend(NO_device, dev_name.c_str()); + + Dmsg1(dbglvl, ">dird: %s", dir->msg); + } + } else { + unbash_spaces(dir->msg); + pm_strcpy(jcr->errmsg, dir->msg); + Jmsg(jcr, M_FATAL, 0, _("Failed command: %s\n"), jcr->errmsg); + dir->fsend(BAD_use, jcr->errmsg); + Dmsg1(dbglvl, ">dird: %s", dir->msg); + } + + release_reserve_messages(jcr); + return ok; +} + +/* + * Search for a device suitable for this job. + * Note, this routine sets sets rctx.suitable_device if any + * device exists within the SD. The device may not be actually + * useable. + * It also returns if it finds a useable device. + */ +bool find_suitable_device_for_job(JCR *jcr, RCTX &rctx) +{ + bool ok = false; + DIRSTORE *store; + char *device_name; + alist *dirstore; + DCR *dcr = jcr->dcr; + + if (rctx.append) { + dirstore = jcr->write_store; + } else { + dirstore = jcr->read_store; + } + Dmsg5(dbglvl, "Start find_suit_dev PrefMnt=%d exact=%d suitable=%d chgronly=%d any=%d\n", + rctx.PreferMountedVols, rctx.exact_match, rctx.suitable_device, + rctx.autochanger_only, rctx.any_drive); + + /* + * If the appropriate conditions of this if are met, namely that + * we are appending and the user wants mounted drive (or we + * force try a mounted drive because they are all busy), we + * start by looking at all the Volumes in the volume list. + */ + if (!is_vol_list_empty() && rctx.append && rctx.PreferMountedVols) { + dlist *temp_vol_list; + VOLRES *vol = NULL; + temp_vol_list = dup_vol_list(jcr); + + /* Look through reserved volumes for one we can use */ + Dmsg0(dbglvl, "look for vol in vol list\n"); + foreach_dlist(vol, temp_vol_list) { + if (!vol->dev) { + Dmsg1(dbglvl, "vol=%s no dev\n", vol->vol_name); + continue; + } + bstrncpy(dcr->VolumeName, vol->vol_name, sizeof(dcr->VolumeName)); + /* Check with Director if this Volume is OK */ + if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_WRITE)) { + continue; + } + Dmsg1(dbglvl, "vol=%s OK for this job\n", vol->vol_name); + foreach_alist(store, dirstore) { + int stat; + rctx.store = store; + foreach_alist(device_name, store->device) { + /* Found a device, try to use it */ + rctx.device_name = device_name; + rctx.device = vol->dev->device; + if (vol->dev->read_only) { + continue; + } + if (vol->dev->is_autochanger()) { + Dmsg1(dbglvl, "vol=%s is in changer\n", vol->vol_name); + if (!is_vol_in_autochanger(rctx, vol) || !vol->dev->autoselect || + !vol->dev->enabled) { + continue; + } + } else if (strcmp(device_name, vol->dev->device->hdr.name) != 0) { + Dmsg2(dbglvl, "device=%s not suitable want %s\n", + vol->dev->device->hdr.name, device_name); + continue; + } + bstrncpy(rctx.VolumeName, vol->vol_name, sizeof(rctx.VolumeName)); + rctx.have_volume = true; + /* Try reserving this device and volume */ + Dmsg2(dbglvl, "Try reserve vol=%s on device=%s\n", rctx.VolumeName, device_name); + stat = reserve_device(rctx); + if (stat == 1) { /* found available device */ + Dmsg1(dbglvl, "Device reserved=%s\n", device_name); + ok = true; + } else { + /* Error or no suitable device found */ + Dmsg0(dbglvl, "No suitable device found.\n"); + rctx.have_volume = false; + rctx.VolumeName[0] = 0; + } + } + if (ok) { + break; + } + } /* end of loop over storages */ + if (ok) { + break; + } + } /* end for loop over reserved volumes */ + + Dmsg0(dbglvl, "lock volumes\n"); + free_temp_vol_list(temp_vol_list); + } /* End test for vol_list, ... */ + if (ok) { + Dmsg1(dbglvl, "OK dev found. Vol=%s from in-use vols list\n", rctx.VolumeName); + return true; + } + + /* + * No reserved volume we can use, so now search for an available device. + * + * For each storage device that the user specified, we + * search and see if there is a resource for that device. + */ + foreach_alist(store, dirstore) { + rctx.store = store; + foreach_alist(device_name, store->device) { + int stat; + rctx.device_name = device_name; + stat = search_res_for_device(rctx); + if (stat == 1) { /* found available device */ + Dmsg1(dbglvl, "available device found=%s\n", device_name); + ok = true; + break; + } else if (stat == 0) { /* device busy */ + Dmsg1(dbglvl, "No usable device=%s, busy: not use\n", device_name); + } else { + /* otherwise error */ + Dmsg0(dbglvl, "No usable device found.\n"); + } + } + if (ok) { + break; + } + } + if (ok) { + Dmsg1(dbglvl, "OK dev found. Vol=%s\n", rctx.VolumeName); + } else { + Dmsg0(dbglvl, "Leave find_suit_dev: no dev found.\n"); + } + return ok; +} + +/* + * Search for a particular storage device with particular storage + * characteristics (MediaType). + */ +int search_res_for_device(RCTX &rctx) +{ + AUTOCHANGER *changer; + int stat; + + Dmsg1(dbglvl, "search res for %s\n", rctx.device_name); + /* Look through Autochangers first */ + foreach_res(changer, R_AUTOCHANGER) { + Dmsg1(dbglvl, "Try match changer res=%s\n", changer->hdr.name); + /* Find resource, and make sure we were able to open it */ + if (strcmp(rctx.device_name, changer->hdr.name) == 0) { + /* Try each device in this AutoChanger */ + foreach_alist(rctx.device, changer->device) { + Dmsg1(dbglvl, "Top try changer device %s\n", rctx.device->hdr.name); + if (rctx.store->append && rctx.device->read_only) { + continue; + } + if (!rctx.device->autoselect) { + Dmsg1(dbglvl, "Device %s not autoselect skipped.\n", + rctx.device->hdr.name); + continue; /* device is not available */ + } + if (rctx.try_low_use_drive) { + if (!rctx.low_use_drive) { + rctx.low_use_drive = rctx.device->dev; + Dmsg2(dbglvl, "Set low_use usage=%lld drv=%s\n", + rctx.low_use_drive->usage, + rctx.low_use_drive->print_name()); + } else if ((rctx.low_use_drive->usage > rctx.device->dev->usage) || + (rctx.low_use_drive->usage == rctx.device->dev->usage && + rctx.low_use_drive->num_reserved() > rctx.device->dev->num_reserved())) { + rctx.low_use_drive = rctx.device->dev; + Dmsg2(dbglvl, "Reset low_use usage=%lld drv=%s\n", + rctx.low_use_drive->usage, + rctx.low_use_drive->print_name()); + } else { + Dmsg2(dbglvl, "Skip low_use usage=%lld drv=%s\n", + rctx.low_use_drive->usage, + rctx.low_use_drive->print_name()); + } + } else { + Dmsg2(dbglvl, "try reserve vol=%s on device=%s\n", rctx.VolumeName, rctx.device->hdr.name); + stat = reserve_device(rctx); + if (stat != 1) { /* try another device */ + continue; + } + /* Debug code */ + if (rctx.store->append) { + Dmsg2(dbglvl, "Device %s reserved=%d for append.\n", + rctx.device->hdr.name, rctx.jcr->dcr->dev->num_reserved()); + } else { + Dmsg2(dbglvl, "Device %s reserved=%d for read.\n", + rctx.device->hdr.name, rctx.jcr->read_dcr->dev->num_reserved()); + } + return stat; + } + } + /* If found a drive try to reserve it */ + if (rctx.try_low_use_drive && rctx.low_use_drive) { + rctx.device = rctx.low_use_drive->device; + Dmsg2(dbglvl, "Try reserve vol=%s on device=%s\n", rctx.VolumeName, rctx.device->hdr.name); + stat = reserve_device(rctx); + if (stat == 1) { + /* Debug code */ + if (rctx.store->append) { + Dmsg3(dbglvl, "JobId=%d device %s reserved=%d for append.\n", + rctx.jcr->JobId, rctx.device->hdr.name, rctx.jcr->dcr->dev->num_reserved()); + } else { + Dmsg3(dbglvl, "JobId=%d device %s reserved=%d for read.\n", + rctx.jcr->JobId, rctx.device->hdr.name, rctx.jcr->read_dcr->dev->num_reserved()); + } + } else { + Dmsg2(dbglvl, "Reserve for %s failed for JobId=%d.\n", + rctx.store->append ? "append" : "read", rctx.jcr->JobId); + } + return stat; + } + } + } + + /* Now if requested look through regular devices */ + if (!rctx.autochanger_only) { + foreach_res(rctx.device, R_DEVICE) { + Dmsg1(dbglvl, "Try match res=%s\n", rctx.device->hdr.name); + /* Find resource, and make sure we were able to open it */ + if (strcmp(rctx.device_name, rctx.device->hdr.name) == 0) { + Dmsg2(dbglvl, "Try reserve vol=%s on device=%s\n", rctx.VolumeName, rctx.device->hdr.name); + stat = reserve_device(rctx); + if (stat != 1) { /* try another device */ + continue; + } + /* Debug code */ + if (rctx.store->append) { + Dmsg2(dbglvl, "Device %s reserved=%d for append.\n", + rctx.device->hdr.name, rctx.jcr->dcr->dev->num_reserved()); + } else { + Dmsg2(dbglvl, "Device %s reserved=%d for read.\n", + rctx.device->hdr.name, rctx.jcr->read_dcr->dev->num_reserved()); + } + return stat; + } + } + } + return -1; /* nothing found */ +} + +/* + * Walk through the autochanger resources and check if + * the volume is in one of them. + * + * Returns: true if volume is in device + * false otherwise + */ +static bool is_vol_in_autochanger(RCTX &rctx, VOLRES *vol) +{ + AUTOCHANGER *changer = vol->dev->device->changer_res; + + /* Find resource, and make sure we were able to open it */ + if (changer && strcmp(rctx.device_name, changer->hdr.name) == 0) { + Dmsg1(dbglvl, "Found changer device %s\n", vol->dev->device->hdr.name); + return true; + } + Dmsg1(dbglvl, "Incorrect changer device %s\n", changer->hdr.name); + return false; +} + + +/* + * Try to reserve a specific device. + * + * Returns: 1 -- OK, have DCR + * 0 -- must wait + * -1 -- fatal error + */ +static int reserve_device(RCTX &rctx) +{ + bool ok; + DCR *dcr; + const int name_len = MAX_NAME_LENGTH; + + /* Make sure MediaType is OK */ + Dmsg2(dbglvl, "chk MediaType device=%s request=%s\n", + rctx.device->media_type, rctx.store->media_type); + if (strcmp(rctx.device->media_type, rctx.store->media_type) != 0) { + return -1; + } + + /* Make sure device exists -- i.e. we can stat() it */ + if (!rctx.device->dev) { + rctx.device->dev = init_dev(rctx.jcr, rctx.device); + } + if (!rctx.device->dev) { + if (rctx.device->changer_res) { + Jmsg(rctx.jcr, M_WARNING, 0, _("\n" + " Device \"%s\" in changer \"%s\" requested by DIR could not be opened or does not exist.\n"), + rctx.device->hdr.name, rctx.device_name); + } else { + Jmsg(rctx.jcr, M_WARNING, 0, _("\n" + " Device \"%s\" requested by DIR could not be opened or does not exist.\n"), + rctx.device_name); + } + return -1; /* no use waiting */ + } else if (!rctx.device->dev->enabled) { + if (verbose) { + Jmsg(rctx.jcr, M_WARNING, 0, _("\n" + " Device \"%s\" requested by DIR is disabled.\n"), + rctx.device_name); + } + return -1; /* no use waiting */ + } + + rctx.suitable_device = true; + Dmsg1(dbglvl, "try reserve %s\n", rctx.device->hdr.name); + if (rctx.store->append) { + dcr = new_dcr(rctx.jcr, rctx.jcr->dcr, rctx.device->dev, SD_APPEND); + } else { + dcr = new_dcr(rctx.jcr, rctx.jcr->read_dcr, rctx.device->dev, SD_READ); + } + if (!dcr) { + BSOCK *dir = rctx.jcr->dir_bsock; + dir->fsend(_("3926 Could not get dcr for device: %s\n"), rctx.device_name); + Dmsg1(dbglvl, ">dird: %s", dir->msg); + return -1; + } + bstrncpy(dcr->pool_name, rctx.store->pool_name, name_len); + bstrncpy(dcr->pool_type, rctx.store->pool_type, name_len); + bstrncpy(dcr->media_type, rctx.store->media_type, name_len); + bstrncpy(dcr->dev_name, rctx.device_name, name_len); + if (rctx.store->append) { + Dmsg2(dbglvl, "call reserve for append: have_vol=%d vol=%s\n", rctx.have_volume, rctx.VolumeName); + ok = reserve_device_for_append(dcr, rctx); + if (!ok) { + goto bail_out; + } + rctx.jcr->dcr = dcr; + Dmsg5(dbglvl, "Reserved=%d dev_name=%s mediatype=%s pool=%s ok=%d\n", + dcr->dev->num_reserved(), + dcr->dev_name, dcr->media_type, dcr->pool_name, ok); + Dmsg4(dbglvl, "Vol=%s num_writers=%d, reserved=%d have_vol=%d\n", + rctx.VolumeName, dcr->dev->num_writers, dcr->dev->num_reserved(), + rctx.have_volume); + if (rctx.have_volume) { + Dmsg0(dbglvl, "Call reserve_volume for append.\n"); + if (reserve_volume(dcr, rctx.VolumeName)) { + Dmsg1(dbglvl, "Reserved vol=%s\n", rctx.VolumeName); + } else { + Dmsg1(dbglvl, "Could not reserve vol=%s\n", rctx.VolumeName); + goto bail_out; + } + } else { + dcr->any_volume = true; + Dmsg0(dbglvl, "no vol, call find_next_appendable_vol.\n"); + if (dir_find_next_appendable_volume(dcr)) { + bstrncpy(rctx.VolumeName, dcr->VolumeName, sizeof(rctx.VolumeName)); + rctx.have_volume = true; + Dmsg1(dbglvl, "looking for Volume=%s\n", rctx.VolumeName); + if (!dcr->can_i_use_volume() || !is_pool_ok(dcr)) { + rctx.have_volume = false; + rctx.VolumeName[0] = 0; + dcr->unreserve_device(false); + goto bail_out; + } + } else { + dcr->dev->clear_wait(); + Dmsg0(dbglvl, "No next volume found\n"); + rctx.have_volume = false; + rctx.VolumeName[0] = 0; + /* + * If there is at least one volume that is valid and in use, + * but we get here, check if we are running with prefers + * non-mounted drives. In that case, we have selected a + * non-used drive and our one and only volume is mounted + * elsewhere, so we bail out and retry using that drive. + */ + if (dcr->found_in_use() && !rctx.PreferMountedVols) { + rctx.PreferMountedVols = true; + if (dcr->VolumeName[0]) { + dcr->unreserve_device(false); + } + goto bail_out; + } + /* + * Note. Under some circumstances, the Director can hand us + * a Volume name that is not the same as the one on the current + * drive, and in that case, the call above to find the next + * volume will fail because in attempting to reserve the Volume + * the code will realize that we already have a tape mounted, + * and it will fail. This *should* only happen if there are + * writers, thus the following test. In that case, we simply + * bail out, and continue waiting, rather than plunging on + * and hoping that the operator can resolve the problem. + */ + if (dcr->dev->num_writers != 0) { + if (dcr->VolumeName[0]) { + dcr->unreserve_device(false); + } + goto bail_out; + } + } + } + } else { + ok = reserve_device_for_read(dcr); + if (ok) { + rctx.jcr->read_dcr = dcr; + Dmsg5(dbglvl, "Read reserved=%d dev_name=%s mediatype=%s pool=%s ok=%d\n", + dcr->dev->num_reserved(), + dcr->dev_name, dcr->media_type, dcr->pool_name, ok); + } + } + if (!ok) { + goto bail_out; + } + + if (rctx.notify_dir) { + POOL_MEM dev_name; + BSOCK *dir = rctx.jcr->dir_bsock; + pm_strcpy(dev_name, rctx.device->hdr.name); + bash_spaces(dev_name); + ok = dir->fsend(OK_device, dev_name.c_str()); /* Return real device name */ + Dmsg1(dbglvl, ">dird: %s", dir->msg); + if (!ok) { + dcr->unreserve_device(false); + } + } else { + ok = true; + } + return ok ? 1 : -1; + +bail_out: + rctx.have_volume = false; + rctx.VolumeName[0] = 0; + Dmsg0(dbglvl, "Not OK.\n"); + return 0; +} + + +/* + * We reserve the device for appending by incrementing + * num_reserved(). We do virtually all the same work that + * is done in acquire_device_for_append(), but we do + * not attempt to mount the device. This routine allows + * the DIR to reserve multiple devices before *really* + * starting the job. It also permits the SD to refuse + * certain devices (not up, ...). + * + * Note, in reserving a device, if the device is for the + * same pool and the same pool type, then it is acceptable. + * The Media Type has already been checked. If we are + * the first to reserve the device, we put the pool + * name and pool type in the device record. + */ +static bool reserve_device_for_append(DCR *dcr, RCTX &rctx) +{ + JCR *jcr = dcr->jcr; + DEVICE *dev = dcr->dev; + bool ok = false; + + ASSERT2(dcr, "No dcr in reserve_device_for_append!"); + if (job_canceled(jcr)) { + return false; + } + + dev->Lock(); + + /* If device is being read or reserved for read, we cannot write it */ + if (dev->can_read() || dev->is_reserved_for_read()) { + Mmsg(jcr->errmsg, _("3603 JobId=%u %s device %s is busy reading.\n"), + jcr->JobId, dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); + goto bail_out; + } + + /* If device is unmounted, we are out of luck */ + if (dev->is_device_unmounted()) { + Mmsg(jcr->errmsg, _("3604 JobId=%u %s device %s is BLOCKED due to user unmount.\n"), + jcr->JobId, dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); + goto bail_out; + } + + Dmsg2(dbglvl, "reserve_append %s device is %s\n", dev->print_type(), dev->print_name()); + + /* Now do detailed tests ... */ + if (can_reserve_drive(dcr, rctx) != 1) { + Dmsg0(dbglvl, "can_reserve_drive!=1\n"); + goto bail_out; + } + + /* Note: on failure this returns jcr->errmsg properly edited */ + if (generate_plugin_event(jcr, bsdEventDeviceTryOpen, dcr) != bRC_OK) { + queue_reserve_message(jcr); + goto bail_out; + } + dcr->set_reserved_for_append(); + ok = true; + +bail_out: + dev->Unlock(); + return ok; +} + +/* + * We "reserve" the drive by setting the ST_READ bit. No one else + * should touch the drive until that is cleared. + * This allows the DIR to "reserve" the device before actually + * starting the job. + */ +static bool reserve_device_for_read(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + bool ok = false; + + ASSERT2(dcr, "No dcr in reserve_device_for_read!"); + if (job_canceled(jcr)) { + return false; + } + + dev->Lock(); + + if (dev->is_device_unmounted()) { + Mmsg(jcr->errmsg, _("3601 JobId=%u %s device %s is BLOCKED due to user unmount.\n"), + jcr->JobId, dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Dmsg1(dbglvl, "Device %s is BLOCKED due to user unmount.\n", dev->print_name()); + goto bail_out; + } + + if (dev->is_busy()) { + Mmsg(jcr->errmsg, _("3602 JobId=%u %s device %s is busy (already reading/writing)." + " read=%d, writers=%d reserved=%d\n"), + jcr->JobId, dev->print_type(), dev->print_name(), + dev->state & ST_READ?1:0, dev->num_writers, dev->num_reserved()); + queue_reserve_message(jcr); + Dmsg4(dbglvl, "Device %s is busy ST_READ=%d num_writers=%d reserved=%d.\n", + dev->print_name(), + dev->state & ST_READ?1:0, dev->num_writers, dev->num_reserved()); + goto bail_out; + } + + /* Note: on failure this returns jcr->errmsg properly edited */ + if (generate_plugin_event(jcr, bsdEventDeviceTryOpen, dcr) != bRC_OK) { + queue_reserve_message(jcr); + goto bail_out; + } + dev->clear_append(); + dcr->set_reserved_for_read(); + ok = true; + +bail_out: + dev->Unlock(); + return ok; +} + +static bool is_max_jobs_ok(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + + Dmsg6(dbglvl, "MaxJobs=%d VolCatJobs=%d writers=%d reserves=%d Status=%s Vol=%s\n", + dcr->VolCatInfo.VolCatMaxJobs, + dcr->VolCatInfo.VolCatJobs, + dev->num_writers, dev->num_reserved(), + dcr->VolCatInfo.VolCatStatus, + dcr->VolumeName); + /* Limit max concurrent jobs on this drive */ + if (dev->max_concurrent_jobs > 0 && (int)dev->max_concurrent_jobs <= + (dev->num_writers + dev->num_reserved())) { + /* Max Concurrent Jobs depassed or already reserved */ + Mmsg(jcr->errmsg, _("3609 JobId=%u Max concurrent jobs=%d exceeded on %s device %s.\n"), + (uint32_t)jcr->JobId, dev->max_concurrent_jobs, + dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); + return false; + } + if (strcmp(dcr->VolCatInfo.VolCatStatus, "Recycle") == 0) { + return true; + } + if (!dev->allow_maxbytes_concurrency(dcr)) { + queue_reserve_message(jcr); + Dmsg1(dbglvl, "reserve dev failed: %s", jcr->errmsg); + return false; /* wait */ + } + + if (dcr->VolCatInfo.VolCatMaxJobs > 0 && (int)dcr->VolCatInfo.VolCatMaxJobs <= + (dev->num_writers + dev->num_reserved())) { + /* Max Job Vols depassed or already reserved */ + Mmsg(jcr->errmsg, _("3611 JobId=%u Volume max jobs=%d exceeded on %s device %s.\n"), + (uint32_t)jcr->JobId, dcr->VolCatInfo.VolCatMaxJobs, + dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Dmsg1(dbglvl, "reserve dev failed: %s", jcr->errmsg); + return false; /* wait */ + } + return true; +} + + +static int is_pool_ok(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + + if (dev->num_writers >= 0) { + /* Now check if we want the same Pool and pool type */ + if (strcmp(dev->pool_name, dcr->pool_name) == 0 && + strcmp(dev->pool_type, dcr->pool_type) == 0) { + /* OK, compatible device */ + Dmsg1(dbglvl, "OK dev: %s pool matches\n", dev->print_name()); + return 1; + } + } else if (dev->num_reserved() > 0) { + if (strcmp(dev->reserved_pool_name, dcr->pool_name) == 0) { + /* OK, compatible device */ + Dmsg1(dbglvl, "OK dev: %s pool matches\n", dev->print_name()); + return 1; + } + } + /* Drive Pool not suitable for us */ + Mmsg(jcr->errmsg, _( +"3608 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" nreserve=%d on %s device %s.\n"), + (uint32_t)jcr->JobId, dcr->pool_name, dev->pool_name, + dev->num_reserved(), dev->print_type(), dev->print_name()); + Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); + queue_reserve_message(jcr); + return 0; +} + +/* + * Returns: 1 if drive can be reserved + * 0 if we should wait + * -1 on error or impossibility + */ +static int can_reserve_drive(DCR *dcr, RCTX &rctx) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + + Dmsg5(dbglvl, "PrefMnt=%d exact=%d suitable=%d chgronly=%d any=%d\n", + rctx.PreferMountedVols, rctx.exact_match, rctx.suitable_device, + rctx.autochanger_only, rctx.any_drive); + + /* Check for max jobs on this Volume */ + if (!is_max_jobs_ok(dcr)) { + return 0; + } + + /* setting any_drive overrides PreferMountedVols flag */ + if (!rctx.any_drive) { + /* + * When PreferMountedVols is set, we keep track of the + * drive in use that has the least number of writers, then if + * no unmounted drive is found, we try that drive. This + * helps spread the load to the least used drives. + */ + if (rctx.try_low_use_drive && dev == rctx.low_use_drive && + is_pool_ok(dcr)) { + Dmsg2(dbglvl, "OK dev=%s == low_drive=%s.\n", + dev->print_name(), rctx.low_use_drive->print_name()); + bstrncpy(dev->pool_name, dcr->pool_name, sizeof(dev->pool_name)); + bstrncpy(dev->pool_type, dcr->pool_type, sizeof(dev->pool_type)); + return 1; + } + /* If he wants a free drive, but this one is busy, no go */ + if (!rctx.PreferMountedVols && dev->is_busy()) { + Mmsg(jcr->errmsg, _("3605 JobId=%u wants free drive but %s device %s is busy.\n"), + jcr->JobId, dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); + return 0; + } + + /* Check for prefer mounted volumes */ + if (rctx.PreferMountedVols && !dev->vol && dev->is_tape()) { + Mmsg(jcr->errmsg, _("3606 JobId=%u prefers mounted drives, but %s device %s has no Volume.\n"), + jcr->JobId, dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); + return 0; /* No volume mounted */ + } + + /* Check for exact Volume name match */ + /* ***FIXME*** for Disk, we can accept any volume that goes with this + * drive. + */ + if (rctx.exact_match && rctx.have_volume) { + bool ok; + Dmsg5(dbglvl, "PrefMnt=%d exact=%d suitable=%d chgronly=%d any=%d\n", + rctx.PreferMountedVols, rctx.exact_match, rctx.suitable_device, + rctx.autochanger_only, rctx.any_drive); + Dmsg4(dbglvl, "have_vol=%d have=%s resvol=%s want=%s\n", + rctx.have_volume, dev->VolHdr.VolumeName, + dev->vol?dev->vol->vol_name:"*none*", rctx.VolumeName); + ok = strcmp(dev->VolHdr.VolumeName, rctx.VolumeName) == 0 || + (dev->vol && strcmp(dev->vol->vol_name, rctx.VolumeName) == 0); + if (!ok) { + Mmsg(jcr->errmsg, _("3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on %s device %s.\n"), + jcr->JobId, rctx.VolumeName, dev->VolHdr.VolumeName, + dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Dmsg3(dbglvl, "not OK: dev have=%s resvol=%s want=%s\n", + dev->VolHdr.VolumeName, dev->vol?dev->vol->vol_name:"*none*", rctx.VolumeName); + return 0; + } + if (!dcr->can_i_use_volume()) { + return 0; /* fail if volume on another drive */ + } + } + } + + /* Check for unused autochanger drive */ + if (rctx.autochanger_only && !dev->is_busy() && + dev->VolHdr.VolumeName[0] == 0 && is_pool_ok(dcr)) { + /* Device is available but not yet reserved, reserve it for us */ + Dmsg1(dbglvl, "OK Res Unused autochanger %s.\n", dev->print_name()); + bstrncpy(dev->pool_name, dcr->pool_name, sizeof(dev->pool_name)); + bstrncpy(dev->pool_type, dcr->pool_type, sizeof(dev->pool_type)); + return 1; /* reserve drive */ + } + + /* + * Handle the case that there are no writers + */ + if (dev->num_writers == 0) { + /* Now check if there are any reservations on the drive */ + if (dev->num_reserved()) { + return is_pool_ok(dcr); + } else if (dev->can_append()) { + if (is_pool_ok(dcr)) { + return 1; + } else { + /* Changing pool, unload old tape if any in drive */ + Dmsg0(dbglvl, "OK dev: num_writers=0, not reserved, pool change, unload changer\n"); + /* ***FIXME*** use set_unload() */ + unload_autochanger(dcr, -1); + } + } + /* Device is available but not yet reserved, reserve it for us */ + Dmsg1(dbglvl, "OK Dev avail reserved %s\n", dev->print_name()); + bstrncpy(dev->pool_name, dcr->pool_name, sizeof(dev->pool_name)); + bstrncpy(dev->pool_type, dcr->pool_type, sizeof(dev->pool_type)); + return 1; /* reserve drive */ + } + + /* + * Check if the device is in append mode with writers (i.e. + * available if pool is the same). + */ + if (dev->can_append() || dev->num_writers > 0 || dev->num_reserved() > 0) { + return is_pool_ok(dcr); + } else { + Pmsg1(000, _("Logic error!!!! JobId=%u Should not get here.\n"), (int)jcr->JobId); + Mmsg(jcr->errmsg, _("3910 JobId=%u Logic error!!!! %s device %s Should not get here.\n"), + jcr->JobId, dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Jmsg0(jcr, M_FATAL, 0, _("Logic error!!!! Should not get here.\n")); + return -1; /* error, should not get here */ + } + Mmsg(jcr->errmsg, _("3911 JobId=%u failed reserve %s device %s.\n"), + jcr->JobId, dev->print_type(), dev->print_name()); + queue_reserve_message(jcr); + Dmsg1(dbglvl, "Failed: No reserve %s\n", dev->print_name()); + return 0; +} + + +/* + * Queue a reservation error or failure message for this jcr + */ +static void queue_reserve_message(JCR *jcr) +{ + int i; + alist *msgs; + char *msg; + + jcr->lock(); + + msgs = jcr->reserve_msgs; + if (!msgs) { + goto bail_out; + } + /* + * Look for duplicate message. If found, do + * not insert + */ + for (i=msgs->size()-1; i >= 0; i--) { + msg = (char *)msgs->get(i); + if (!msg) { + goto bail_out; + } + /* Comparison based on 4 digit message number */ + if (strncmp(msg, jcr->errmsg, 4) == 0) { + goto bail_out; + } + } + /* Message unique, so insert it */ + jcr->reserve_msgs->push(bstrdup(jcr->errmsg)); + +bail_out: + jcr->unlock(); +} + +/* + * Send any reservation messages queued for this jcr + */ +void send_drive_reserve_messages(JCR *jcr, void sendit(const char *msg, int len, void *sarg), void *arg) +{ + int i; + alist *msgs; + char *msg; + + jcr->lock(); + msgs = jcr->reserve_msgs; + if (!msgs || msgs->size() == 0) { + goto bail_out; + } + for (i=msgs->size()-1; i >= 0; i--) { + msg = (char *)msgs->get(i); + if (msg) { + sendit(" ", 3, arg); + sendit(msg, strlen(msg), arg); + } else { + break; + } + } + +bail_out: + jcr->unlock(); +} + +/* + * Pop and release any reservations messages + */ +static void pop_reserve_messages(JCR *jcr) +{ + alist *msgs; + char *msg; + + jcr->lock(); + msgs = jcr->reserve_msgs; + if (!msgs) { + goto bail_out; + } + while ((msg = (char *)msgs->pop())) { + free(msg); + } +bail_out: + jcr->unlock(); +} + +/* + * Also called from acquire.c + */ +void release_reserve_messages(JCR *jcr) +{ + pop_reserve_messages(jcr); + jcr->lock(); + if (!jcr->reserve_msgs) { + goto bail_out; + } + delete jcr->reserve_msgs; + jcr->reserve_msgs = NULL; + +bail_out: + jcr->unlock(); +} diff --git a/src/stored/reserve.h b/src/stored/reserve.h new file mode 100644 index 00000000..bae8d726 --- /dev/null +++ b/src/stored/reserve.h @@ -0,0 +1,63 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Definitions for reservation system. + * + * Kern Sibbald, February MMVI + * + */ + +/* + * Use Device command from Director + * The DIR tells us what Device Name to use, the Media Type, + * the Pool Name, and the Pool Type. + * + * Ensure that the device exists and is opened, then store + * the media and pool info in the JCR. This class is used + * only temporarily in this file. + */ +class DIRSTORE { +public: + alist *device; + bool append; + char name[MAX_NAME_LENGTH]; + char media_type[MAX_NAME_LENGTH]; + char pool_name[MAX_NAME_LENGTH]; + char pool_type[MAX_NAME_LENGTH]; +}; + +/* Reserve context */ +class RCTX { +public: + JCR *jcr; + char *device_name; + DIRSTORE *store; + DEVRES *device; + DEVICE *low_use_drive; /* Low use drive candidate */ + bool try_low_use_drive; /* see if low use drive available */ + bool any_drive; /* Accept any drive if set */ + bool PreferMountedVols; /* Prefer volumes already mounted */ + bool exact_match; /* Want exact volume */ + bool have_volume; /* Have DIR suggested vol name */ + bool suitable_device; /* at least one device is suitable */ + bool autochanger_only; /* look at autochangers only */ + bool notify_dir; /* Notify DIR about device */ + bool append; /* set if append device */ + char VolumeName[MAX_NAME_LENGTH]; /* Vol name suggested by DIR */ +}; diff --git a/src/stored/s3_driver.c b/src/stored/s3_driver.c new file mode 100644 index 00000000..8dd9b861 --- /dev/null +++ b/src/stored/s3_driver.c @@ -0,0 +1,806 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Routines for writing to the Cloud using S3 protocol. + * NOTE!!! This cloud driver is not compatible with + * any disk-changer script for changing Volumes. + * It does however work with Bacula Virtual autochangers. + * + * Written by Kern Sibbald, May MMXVI + */ + +#include "s3_driver.h" + +#ifdef HAVE_LIBS3 + +static const int dbglvl = 100; +static const char *S3Errors[] = { + "OK", + "InternalError", + "OutOfMemory", + "Interrupted", + "InvalidBucketNameTooLong", + "InvalidBucketNameFirstCharacter", + "InvalidBucketNameCharacter", + "InvalidBucketNameCharacterSequence", + "InvalidBucketNameTooShort", + "InvalidBucketNameDotQuadNotation", + "QueryParamsTooLong", + "FailedToInitializeRequest", + "MetaDataHeadersTooLong", + "BadMetaData", + "BadContentType", + "ContentTypeTooLong", + "BadMD5", + "MD5TooLong", + "BadCacheControl", + "CacheControlTooLong", + "BadContentDispositionFilename", + "ContentDispositionFilenameTooLong", + "BadContentEncoding", + "ContentEncodingTooLong", + "BadIfMatchETag", + "IfMatchETagTooLong", + "BadIfNotMatchETag", + "IfNotMatchETagTooLong", + "HeadersTooLong", + "KeyTooLong", + "UriTooLong", + "XmlParseFailure", + "EmailAddressTooLong", + "UserIdTooLong", + "UserDisplayNameTooLong", + "GroupUriTooLong", + "PermissionTooLong", + "TargetBucketTooLong", + "TargetPrefixTooLong", + "TooManyGrants", + "BadGrantee", + "BadPermission", + "XmlDocumentTooLarge", + "NameLookupError", + "FailedToConnect", + "ServerFailedVerification", + "ConnectionFailed", + "AbortedByCallback", + "AccessDenied", + "AccountProblem", + "AmbiguousGrantByEmailAddress", + "BadDigest", + "BucketAlreadyExists", + "BucketAlreadyOwnedByYou", + "BucketNotEmpty", + "CredentialsNotSupported", + "CrossLocationLoggingProhibited", + "EntityTooSmall", + "EntityTooLarge", + "ExpiredToken", + "IllegalVersioningConfigurationException", + "IncompleteBody", + "IncorrectNumberOfFilesInPostRequest", + "InlineDataTooLarge", + "InternalError", + "InvalidAccessKeyId", + "InvalidAddressingHeader", + "InvalidArgument", + "InvalidBucketName", + "InvalidBucketState", + "InvalidDigest", + "InvalidLocationConstraint", + "InvalidObjectState", + "InvalidPart", + "InvalidPartOrder", + "InvalidPayer", + "InvalidPolicyDocument", + "InvalidRange", + "InvalidRequest", + "InvalidSecurity", + "InvalidSOAPRequest", + "InvalidStorageClass", + "InvalidTargetBucketForLogging", + "InvalidToken", + "InvalidURI", + "KeyTooLong", + "MalformedACLError", + "MalformedPOSTRequest", + "MalformedXML", + "MaxMessageLengthExceeded", + "MaxPostPreDataLengthExceededError", + "MetadataTooLarge", + "MethodNotAllowed", + "MissingAttachment", + "MissingContentLength", + "MissingRequestBodyError", + "MissingSecurityElement", + "MissingSecurityHeader", + "NoLoggingStatusForKey", + "NoSuchBucket", + "NoSuchKey", + "NoSuchLifecycleConfiguration", + "NoSuchUpload", + "NoSuchVersion", + "NotImplemented", + "NotSignedUp", + "NotSuchBucketPolicy", + "OperationAborted", + "PermanentRedirect", + "PreconditionFailed", + "Redirect", + "RestoreAlreadyInProgress", + "RequestIsNotMultiPartContent", + "RequestTimeout", + "RequestTimeTooSkewed", + "RequestTorrentOfBucketError", + "SignatureDoesNotMatch", + "ServiceUnavailable", + "SlowDown", + "TemporaryRedirect", + "TokenRefreshRequired", + "TooManyBuckets", + "UnexpectedContent", + "UnresolvableGrantByEmailAddress", + "UserKeyMustBeSpecified", + "Unknown", + "HttpErrorMovedTemporarily", + "HttpErrorBadRequest", + "HttpErrorForbidden", + "HttpErrorNotFound", + "HttpErrorConflict", + "HttpErrorUnknown", + "Undefined" +}; + +#define S3ErrorsSize (sizeof(S3Errors)/sizeof(char *)) + +#include + +/* + * Our Bacula context for s3_xxx callbacks + * NOTE: only items needed for particular callback are set + */ +class bacula_ctx { +public: + JCR *jcr; + transfer *xfer; + POOLMEM *&errMsg; + ilist *parts; + int isTruncated; + char* nextMarker; + int64_t obj_len; + const char *caller; + FILE *infile; + FILE *outfile; + alist *volumes; + S3Status status; + bwlimit *limit; /* Used to control the bandwidth */ + bacula_ctx(POOLMEM *&err) : jcr(NULL), xfer(NULL), errMsg(err), parts(NULL), + isTruncated(0), nextMarker(NULL), obj_len(0), caller(NULL), + infile(NULL), outfile(NULL), volumes(NULL), status(S3StatusOK), limit(NULL) + {} + bacula_ctx(transfer *t) : jcr(NULL), xfer(t), errMsg(t->m_message), parts(NULL), + isTruncated(0), nextMarker(NULL), obj_len(0), caller(NULL), + infile(NULL), outfile(NULL), volumes(NULL), status(S3StatusOK), limit(NULL) + {} +}; + + +/* Imported functions */ +const char *mode_to_str(int mode); + +/* Forward referenced functions */ + +/* Const and Static definitions */ + +static S3Status responsePropertiesCallback( + const S3ResponseProperties *properties, + void *callbackData); + +static void responseCompleteCallback( + S3Status status, + const S3ErrorDetails *oops, + void *callbackData); + + +S3ResponseHandler responseHandler = +{ + &responsePropertiesCallback, + &responseCompleteCallback +}; + + + + +static S3Status responsePropertiesCallback( + const S3ResponseProperties *properties, + void *callbackData) +{ + bacula_ctx *ctx = (bacula_ctx *)callbackData; + ASSERT(ctx); + if (ctx->xfer && properties) { + if (properties->contentLength > 0) { + ctx->xfer->m_res_size = properties->contentLength; + } + if (properties->lastModified > 0) { + ctx->xfer->m_res_mtime = properties->lastModified; + } + } + return S3StatusOK; +} + +static void responseCompleteCallback( + S3Status status, + const S3ErrorDetails *oops, + void *callbackCtx) +{ + bacula_ctx *ctx = (bacula_ctx *)callbackCtx; + const char *msg; + + Enter(dbglvl); + if (ctx) { + ctx->status = status; /* return completion status */ + } + if (status < 0 || status > S3ErrorsSize) { + status = (S3Status)S3ErrorsSize; + } + msg = oops->message; + if (!msg) { + msg = S3Errors[status]; + } + if ((status != S3StatusOK) && ctx->errMsg) { + if (oops->furtherDetails) { + Mmsg(ctx->errMsg, "%s ERR=%s\n" + "furtherDetails=%s\n", ctx->caller, msg, oops->furtherDetails); + Dmsg1(dbglvl, "%s", ctx->errMsg); + } else { + Mmsg(ctx->errMsg, "%s ERR=%s\n", ctx->caller, msg); + Dmsg1(dbglvl, "%s", ctx->errMsg); + } + } + return; +} + + + + +static int putObjectCallback(int buf_len, char *buf, void *callbackCtx) +{ + bacula_ctx *ctx = (bacula_ctx *)callbackCtx; + + ssize_t rbytes = 0; + int read_len; + + if (ctx->xfer->is_cancelled()) { + Mmsg(ctx->errMsg, _("Job cancelled.\n")); + return -1; + } + if (ctx->obj_len) { + read_len = (ctx->obj_len > buf_len) ? buf_len : ctx->obj_len; + rbytes = fread(buf, 1, read_len, ctx->infile); + Dmsg5(dbglvl, "%s thread=%lu rbytes=%d bufsize=%u remlen=%lu\n", + ctx->caller, pthread_self(), rbytes, buf_len, ctx->obj_len); + if (rbytes <= 0) { + berrno be; + Mmsg(ctx->errMsg, "%s Error reading input file: ERR=%s\n", + ctx->caller, be.bstrerror()); + goto get_out; + } + ctx->obj_len -= rbytes; + + if (ctx->limit) { + ctx->limit->control_bwlimit(rbytes); + } + } + +get_out: + return rbytes; +} + +S3PutObjectHandler putObjectHandler = +{ + responseHandler, + &putObjectCallback +}; + + +/* + * Put a cache object into the cloud + */ +S3Status s3_driver::put_object(transfer *xfer, const char *cache_fname, const char *cloud_fname) +{ + Enter(dbglvl); + bacula_ctx ctx(xfer); + ctx.limit = upload_limit.use_bwlimit() ? &upload_limit : NULL; + + struct stat statbuf; + if (lstat(cache_fname, &statbuf) == -1) { + berrno be; + Mmsg2(ctx.errMsg, "Failed to stat file %s. ERR=%s\n", + cache_fname, be.bstrerror()); + goto get_out; + } + + ctx.obj_len = statbuf.st_size; + + if (!(ctx.infile = bfopen(cache_fname, "r"))) { + berrno be; + Mmsg2(ctx.errMsg, "Failed to open input file %s. ERR=%s\n", + cache_fname, be.bstrerror()); + goto get_out; + } + + ctx.caller = "S3_put_object"; + S3_put_object(&s3ctx, cloud_fname, ctx.obj_len, NULL, NULL, + &putObjectHandler, &ctx); + +get_out: + if (ctx.infile) { + fclose(ctx.infile); + } + + /* no error so far -> retrieve uploaded part info */ + if (ctx.errMsg[0] == 0) { + ilist parts; + get_cloud_volume_parts_list(xfer->m_dcr, cloud_fname, &parts, ctx.errMsg); + for (int i=1; i <= parts.last_index() ; i++) { + cloud_part *p = (cloud_part *)parts.get(i); + if (p) { + xfer->m_res_size = p->size; + xfer->m_res_mtime = p->mtime; + break; /* not need to go further */ + } + } + } + + return ctx.status; +} + +static S3Status getObjectDataCallback(int buf_len, const char *buf, + void *callbackCtx) +{ + bacula_ctx *ctx = (bacula_ctx *)callbackCtx; + ssize_t wbytes; + + Enter(dbglvl); + if (ctx->xfer->is_cancelled()) { + Mmsg(ctx->errMsg, _("Job cancelled.\n")); + return S3StatusAbortedByCallback; + } + /* Write buffer to output file */ + wbytes = fwrite(buf, 1, buf_len, ctx->outfile); + if (wbytes < 0) { + berrno be; + Mmsg(ctx->errMsg, "%s Error writing output file: ERR=%s\n", + ctx->caller, be.bstrerror()); + return S3StatusAbortedByCallback; + } + + if (ctx->limit) { + ctx->limit->control_bwlimit(wbytes); + } + return ((wbytes < buf_len) ? + S3StatusAbortedByCallback : S3StatusOK); +} + + +bool s3_driver::get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname) +{ + int64_t ifModifiedSince = -1; + int64_t ifNotModifiedSince = -1; + const char *ifMatch = 0; + const char *ifNotMatch = 0; + uint64_t startByte = 0; + uint64_t byteCount = 0; + bacula_ctx ctx(xfer); + ctx.limit = download_limit.use_bwlimit() ? &download_limit : NULL; + + Enter(dbglvl); + /* Initialize handlers */ + S3GetConditions getConditions = { + ifModifiedSince, + ifNotModifiedSince, + ifMatch, + ifNotMatch + }; + S3GetObjectHandler getObjectHandler = { + { &responsePropertiesCallback, &responseCompleteCallback }, + &getObjectDataCallback + }; + + + /* see if cache file already exists */ + struct stat buf; + if (lstat(cache_fname, &buf) == -1) { + ctx.outfile = bfopen(cache_fname, "w"); + } else { + /* Exists so truncate and write from beginning */ + ctx.outfile = bfopen(cache_fname, "r+"); + } + + if (!ctx.outfile) { + berrno be; + Mmsg2(ctx.errMsg, "Could not open cache file %s. ERR=%s\n", + cache_fname, be.bstrerror()); + goto get_out; + } + + + ctx.caller = "S3_get_object"; + S3_get_object(&s3ctx, cloud_fname, &getConditions, startByte, + byteCount, 0, &getObjectHandler, &ctx); + + if (fclose(ctx.outfile) < 0) { + berrno be; + Mmsg2(ctx.errMsg, "Error closing cache file %s: %s\n", + cache_fname, be.bstrerror()); + } + +get_out: + return (ctx.errMsg[0] == 0); +} + +/* + * Not thread safe + */ +bool s3_driver::truncate_cloud_volume(DCR *dcr, const char *VolumeName, ilist *trunc_parts, POOLMEM *&err) +{ + Enter(dbglvl); + + bacula_ctx ctx(err); + ctx.jcr = dcr->jcr; + + int last_index = (int)trunc_parts->last_index(); + POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); + for (int i=1; (i<=last_index); i++) { + if (!trunc_parts->get(i)) { + continue; + } + if (ctx.jcr->is_canceled()) { + Mmsg(err, _("Job cancelled.\n")); + goto get_out; + } + /* don't forget to specify the volume name is the object path */ + make_cloud_filename(cloud_fname, VolumeName, i); + Dmsg1(dbglvl, "Object to truncate: %s\n", cloud_fname); + ctx.caller = "S3_delete_object"; + S3_delete_object(&s3ctx, cloud_fname, 0, &responseHandler, &ctx); + if (ctx.status != S3StatusOK) { + /* error message should have been filled within response cb */ + goto get_out; + } + } + +get_out: + free_pool_memory(cloud_fname); + bfree_and_null(ctx.nextMarker); + return (err[0] == 0); +} + +void s3_driver::make_cloud_filename(POOLMEM *&filename, + const char *VolumeName, uint32_t apart) +{ + Enter(dbglvl); + filename[0] = 0; + dev->add_vol_and_part(filename, VolumeName, "part", apart); + Dmsg1(dbglvl, "make_cloud_filename: %s\n", filename); +} + +bool s3_driver::retry_put_object(S3Status status) +{ + return ( + status == S3StatusFailedToConnect || + status == S3StatusConnectionFailed + ); +} + +/* + * Copy a single cache part to the cloud + */ +bool s3_driver::copy_cache_part_to_cloud(transfer *xfer) +{ + Enter(dbglvl); + POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); + make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part); + uint32_t retry = max_upload_retries; + S3Status status = S3StatusOK; + do { + status = put_object(xfer, xfer->m_cache_fname, cloud_fname); + --retry; + } while (retry_put_object(status) && (retry>0)); + free_pool_memory(cloud_fname); + return (status == S3StatusOK); +} + +/* + * Copy a single object (part) from the cloud to the cache + */ +bool s3_driver::copy_cloud_part_to_cache(transfer *xfer) +{ + Enter(dbglvl); + POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); + make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part); + bool rtn = get_cloud_object(xfer, cloud_fname, xfer->m_cache_fname); + free_pool_memory(cloud_fname); + return rtn; +} + +/* + * NOTE: See the SD Cloud resource in stored_conf.h +*/ + +bool s3_driver::init(JCR *jcr, cloud_dev *adev, DEVRES *adevice) +{ + S3Status status; + + dev = adev; /* copy cloud device pointer */ + device = adevice; /* copy device resource pointer */ + cloud = device->cloud; /* local pointer to cloud definition */ + + /* Setup bucket context for S3 lib */ + s3ctx.hostName = cloud->host_name; + s3ctx.bucketName = cloud->bucket_name; + s3ctx.protocol = (S3Protocol)cloud->protocol; + s3ctx.uriStyle = (S3UriStyle)cloud->uri_style; + s3ctx.accessKeyId = cloud->access_key; + s3ctx.secretAccessKey = cloud->secret_key; + s3ctx.authRegion = cloud->region; + + /* File I/O buffer */ + buf_len = dev->max_block_size; + if (buf_len == 0) { + buf_len = DEFAULT_BLOCK_SIZE; + } + + if ((status = S3_initialize("s3", S3_INIT_ALL, s3ctx.hostName)) != S3StatusOK) { + Mmsg1(dev->errmsg, "Failed to initialize S3 lib. ERR=%s\n", S3_get_status_name(status)); + Qmsg1(jcr, M_FATAL, 0, "%s", dev->errmsg); + Tmsg1(0, "%s", dev->errmsg); + return false; + } + return true; +} + +bool s3_driver::start_of_job(DCR *dcr) +{ + Jmsg(dcr->jcr, M_INFO, 0, _("Using S3 cloud driver Host=%s Bucket=%s\n"), + s3ctx.hostName, s3ctx.bucketName); + return true; +} + +bool s3_driver::end_of_job(DCR *dcr) +{ + return true; +} + +/* + * Note, dcr may be NULL + */ +bool s3_driver::term(DCR *dcr) +{ + S3_deinitialize(); + return true; +} + + + +/* + * libs3 callback for get_cloud_volume_parts_list() + */ +static S3Status partslistBucketCallback( + int isTruncated, + const char *nextMarker, + int numObj, + const S3ListBucketContent *object, + int commonPrefixesCount, + const char **commonPrefixes, + void *callbackCtx) +{ + bacula_ctx *ctx = (bacula_ctx *)callbackCtx; + + Enter(dbglvl); + for (int i = 0; ctx->parts && (i < numObj); i++) { + const S3ListBucketContent *obj = &(object[i]); + const char *ext=strstr(obj->key, "part."); + if (obj && ext!=NULL) { + cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); + + part->index = atoi(&(ext[5])); + part->mtime = obj->lastModified; + part->size = obj->size; + ctx->parts->put(part->index, part); + } + } + + ctx->isTruncated = isTruncated; + if (ctx->nextMarker) { + bfree_and_null(ctx->nextMarker); + } + if (nextMarker) { + ctx->nextMarker = bstrdup(nextMarker); + } + + Leave(dbglvl); + if (ctx->jcr->is_canceled()) { + Mmsg(ctx->errMsg, _("Job cancelled.\n")); + return S3StatusAbortedByCallback; + } + return S3StatusOK; +} + +S3ListBucketHandler partslistBucketHandler = +{ + responseHandler, + &partslistBucketCallback +}; + +bool s3_driver::get_cloud_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts, POOLMEM *&err) +{ + JCR *jcr = dcr->jcr; + Enter(dbglvl); + + if (!parts || strlen(VolumeName) == 0) { + pm_strcpy(err, "Invalid argument"); + return false; + } + + bacula_ctx ctx(err); + ctx.jcr = jcr; + ctx.parts = parts; + ctx.isTruncated = 1; /* pass into the while loop at least once */ + ctx.caller = "S3_list_bucket"; + while (ctx.isTruncated!=0) { + ctx.isTruncated = 0; + S3_list_bucket(&s3ctx, VolumeName, ctx.nextMarker, NULL, 0, NULL, + &partslistBucketHandler, &ctx); + if (ctx.status != S3StatusOK) { + pm_strcpy(err, S3Errors[ctx.status]); + bfree_and_null(ctx.nextMarker); + return false; + } + } + bfree_and_null(ctx.nextMarker); + return true; + +} + +/* + * libs3 callback for get_cloud_volumes_list() + */ +static S3Status volumeslistBucketCallback( + int isTruncated, + const char *nextMarker, + int numObj, + const S3ListBucketContent *object, + int commonPrefixesCount, + const char **commonPrefixes, + void *callbackCtx) +{ + bacula_ctx *ctx = (bacula_ctx *)callbackCtx; + + Enter(dbglvl); + for (int i = 0; ctx->volumes && (i < commonPrefixesCount); i++) { + char *cp = bstrdup(commonPrefixes[i]); + cp[strlen(cp)-1] = 0; + ctx->volumes->append(cp); + } + + ctx->isTruncated = isTruncated; + if (ctx->nextMarker) { + bfree_and_null(ctx->nextMarker); + } + if (nextMarker) { + ctx->nextMarker = bstrdup(nextMarker); + } + + Leave(dbglvl); + if (ctx->jcr->is_canceled()) { + Mmsg(ctx->errMsg, _("Job cancelled.\n")); + return S3StatusAbortedByCallback; + } + return S3StatusOK; +} + +S3ListBucketHandler volumeslistBucketHandler = +{ + responseHandler, + &volumeslistBucketCallback +}; + +bool s3_driver::get_cloud_volumes_list(DCR *dcr, alist *volumes, POOLMEM *&err) +{ + JCR *jcr = dcr->jcr; + Enter(dbglvl); + + if (!volumes) { + pm_strcpy(err, "Invalid argument"); + return false; + } + + bacula_ctx ctx(err); + ctx.volumes = volumes; + ctx.jcr = jcr; + ctx.isTruncated = 1; /* pass into the while loop at least once */ + ctx.caller = "S3_list_bucket"; + while (ctx.isTruncated!=0) { + ctx.isTruncated = 0; + S3_list_bucket(&s3ctx, NULL, ctx.nextMarker, "/", 0, NULL, + &volumeslistBucketHandler, &ctx); + if (ctx.status != S3StatusOK) { + break; + } + } + bfree_and_null(ctx.nextMarker); + return (err[0] == 0); +} + +#ifdef really_needed +static S3Status listBucketCallback( + int isTruncated, + const char *nextMarker, + int contentsCount, + const S3ListBucketContent *contents, + int commonPrefixesCount, + const char **commonPrefixes, + void *callbackData); + +S3ListBucketHandler listBucketHandler = +{ + responseHandler, + &listBucketCallback +}; + + +/* + * List content of a bucket + */ +static S3Status listBucketCallback( + int isTruncated, + const char *nextMarker, + int numObj, + const S3ListBucketContent *contents, + int commonPrefixesCount, + const char **commonPrefixes, + void *callbackData) +{ + bacula_ctx *ctx = (bacula_ctx *)callbackCtx; + if (print_hdr) { + Pmsg1(000, "\n%-22s", " Object Name"); + Pmsg2(000, " %-5s %-20s", "Size", " Last Modified"); + Pmsg0(000, "\n---------------------- ----- --------------------\n"); + print_hdr = false; /* print header once only */ + } + + for (int i = 0; i < numObj; i++) { + char timebuf[256]; + char sizebuf[16]; + const S3ListBucketContent *content = &(contents[i]); + time_t t = (time_t) content->lastModified; + strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t)); + sprintf(sizebuf, "%5llu", (unsigned long long) content->size); + Pmsg3(000, "%-22s %s %s\n", content->key, sizebuf, timebuf); + } + Pmsg0(000, "\n"); + if (ctx->jcr->is_canceled()) { + Mmsg(ctx->errMsg, _("Job cancelled.\n")); + return S3StatusAbortedByCallback; + } + return S3StatusOK; +} +#endif + +#endif /* HAVE_LIBS3 */ diff --git a/src/stored/s3_driver.h b/src/stored/s3_driver.h new file mode 100644 index 00000000..de039d51 --- /dev/null +++ b/src/stored/s3_driver.h @@ -0,0 +1,67 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Routines for writing to the Cloud using S3 protocol. + * + * Written by Kern Sibbald, May MMXVI + */ + +#ifndef _S3_DRV_H +#define _S3_DRV_H + +#include "bacula.h" +#include "stored.h" + +#ifdef HAVE_LIBS3 +#include +#include "cloud_driver.h" /* get base class definitions */ + +class s3_driver: public cloud_driver { +private: + S3BucketContext s3ctx; /* Main S3 bucket context */ + uint32_t buf_len; + +public: + cloud_dev *dev; /* device that is calling us */ + DEVRES *device; + DCR *dcr; /* dcr set during calls to S3_xxx */ + CLOUD *cloud; /* Pointer to CLOUD resource */ + + s3_driver() { + }; + ~s3_driver() { + }; + + void make_cloud_filename(POOLMEM *&filename, const char *VolumeName, uint32_t part); + bool init(JCR *jcr, cloud_dev *dev, DEVRES *device); + bool start_of_job(DCR *dcr); + bool term(DCR *dcr); + bool end_of_job(DCR *dcr); + bool truncate_cloud_volume(DCR *dcr, const char *VolumeName, ilist *trunc_parts, POOLMEM *&err); + bool copy_cache_part_to_cloud(transfer *xfer); + bool copy_cloud_part_to_cache(transfer *xfer); + bool get_cloud_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts, POOLMEM *&err); + bool get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err); + S3Status put_object(transfer *xfer, const char *cache_fname, const char *cloud_fname); + bool retry_put_object(S3Status status); + bool get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname); +}; + +#endif /* HAVE_LIBS3 */ +#endif /* _S3_DRV_H */ diff --git a/src/stored/scan.c b/src/stored/scan.c new file mode 100644 index 00000000..2ca0a6ed --- /dev/null +++ b/src/stored/scan.c @@ -0,0 +1,159 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * scan.c scan a directory (on a removable file) for a valid + * Volume name. If found, open the file for append. + * + * Kern Sibbald, MMVI + * + */ + +#include "bacula.h" +#include "stored.h" + +int breaddir(DIR *dirp, POOLMEM *&d_name); + +/* Forward referenced functions */ +static bool is_volume_name_legal(char *name); + + +bool DEVICE::scan_dir_for_volume(DCR *dcr) +{ + DIR* dp; + int name_max; + char *mount_point; + VOLUME_CAT_INFO dcrVolCatInfo, devVolCatInfo; + char VolumeName[MAX_NAME_LENGTH]; + struct stat statp; + bool found = false; + POOL_MEM fname(PM_FNAME); + POOL_MEM dname(PM_FNAME); + bool need_slash = false; + int len; + + dcrVolCatInfo = dcr->VolCatInfo; /* structure assignment */ + devVolCatInfo = VolCatInfo; /* structure assignment */ + bstrncpy(VolumeName, dcr->VolumeName, sizeof(VolumeName)); + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + if (device->mount_point) { + mount_point = device->mount_point; + } else { + mount_point = device->device_name; + } + + if (!(dp = opendir(mount_point))) { + berrno be; + dev_errno = errno; + Dmsg3(29, "scan_dir_for_vol: failed to open dir %s (dev=%s), ERR=%s\n", + mount_point, print_name(), be.bstrerror()); + goto get_out; + } + + len = strlen(mount_point); + if (len > 0) { + need_slash = !IsPathSeparator(mount_point[len - 1]); + } + for ( ;; ) { + if (breaddir(dp, dname.addr()) != 0) { + dev_errno = EIO; + Dmsg2(129, "scan_dir_for_vol: failed to find suitable file in dir %s (dev=%s)\n", + mount_point, print_name()); + break; + } + if (strcmp(dname.c_str(), ".") == 0 || + strcmp(dname.c_str(), "..") == 0) { + continue; + } + + if (!is_volume_name_legal(dname.c_str())) { + continue; + } + pm_strcpy(fname, mount_point); + if (need_slash) { + pm_strcat(fname, "/"); + } + pm_strcat(fname, dname); + if (lstat(fname.c_str(), &statp) != 0 || + !S_ISREG(statp.st_mode)) { + continue; /* ignore directories & special files */ + } + + /* + * OK, we got a different volume mounted. First save the + * requested Volume info (dcr) structure, then query if + * this volume is really OK. If not, put back the desired + * volume name, mark it not in changer and continue. + */ + bstrncpy(dcr->VolumeName, dname.c_str(), sizeof(dcr->VolumeName)); + /* Check if this is a valid Volume in the pool */ + if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_WRITE)) { + continue; + } + /* This was not the volume we expected, but it is OK with + * the Director, so use it. + */ + VolCatInfo = dcr->VolCatInfo; /* structure assignment */ + found = true; + break; /* got a Volume */ + } + closedir(dp); + +get_out: + if (!found) { + /* Restore VolumeName we really wanted */ + bstrncpy(dcr->VolumeName, VolumeName, sizeof(dcr->VolumeName)); + dcr->VolCatInfo = dcrVolCatInfo; /* structure assignment */ + VolCatInfo = devVolCatInfo; /* structure assignment */ + } + Dsm_check(100); + return found; +} + +/* + * Check if the Volume name has legal characters + * If ua is non-NULL send the message + */ +static bool is_volume_name_legal(char *name) +{ + int len; + const char *p; + const char *accept = ":.-_"; + + /* Restrict the characters permitted in the Volume name */ + for (p=name; *p; p++) { + if (B_ISALPHA(*p) || B_ISDIGIT(*p) || strchr(accept, (int)(*p))) { + continue; + } + return false; + } + len = strlen(name); + if (len >= MAX_NAME_LENGTH) { + return false; + } + if (len == 0) { + return false; + } + return true; +} diff --git a/src/stored/sd_plugins.c b/src/stored/sd_plugins.c new file mode 100644 index 00000000..8c70440b --- /dev/null +++ b/src/stored/sd_plugins.c @@ -0,0 +1,489 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Main program to test loading and running Bacula plugins. + * Destined to become Bacula pluginloader, ... + * + * Kern Sibbald, October 2007 + */ +#include "bacula.h" +#include "stored.h" +#include "sd_plugins.h" + +const int dbglvl = 250; +const char *plugin_type = "-sd.so"; + + +/* Forward referenced functions */ +static bRC baculaGetValue(bpContext *ctx, bsdrVariable var, void *value); +static bRC baculaSetValue(bpContext *ctx, bsdwVariable var, void *value); +static bRC baculaRegisterEvents(bpContext *ctx, ...); +static bRC baculaJobMsg(bpContext *ctx, const char *file, int line, + int type, utime_t mtime, const char *fmt, ...); +static bRC baculaDebugMsg(bpContext *ctx, const char *file, int line, + int level, const char *fmt, ...); +static char *baculaEditDeviceCodes(DCR *dcr, char *omsg, + const char *imsg, const char *cmd); +static bool is_plugin_compatible(Plugin *plugin); + + +/* Bacula info */ +static bsdInfo binfo = { + sizeof(bsdFuncs), + SD_PLUGIN_INTERFACE_VERSION +}; + +/* Bacula entry points */ +static bsdFuncs bfuncs = { + sizeof(bsdFuncs), + SD_PLUGIN_INTERFACE_VERSION, + baculaRegisterEvents, + baculaGetValue, + baculaSetValue, + baculaJobMsg, + baculaDebugMsg, + baculaEditDeviceCodes, +}; + +/* + * Bacula private context + */ +struct bacula_ctx { + JCR *jcr; /* jcr for plugin */ + bRC rc; /* last return code */ + bool disabled; /* set if plugin disabled */ +}; + +static bool is_plugin_disabled(bpContext *plugin_ctx) +{ + bacula_ctx *b_ctx; + if (!plugin_ctx) { + return true; + } + b_ctx = (bacula_ctx *)plugin_ctx->bContext; + return b_ctx->disabled; +} + +#ifdef needed +static bool is_plugin_disabled(JCR *jcr) +{ + return is_plugin_disabled(jcr->plugin_ctx); +} +#endif + +/* + * Create a plugin event + */ +int generate_plugin_event(JCR *jcr, bsdEventType eventType, void *value) +{ + bpContext *plugin_ctx; + bsdEvent event; + Plugin *plugin; + int i = 0; + bRC rc = bRC_OK; + + if (!b_plugin_list) { + Dmsg0(dbglvl, "No b_plugin_list: generate_plugin_event ignored.\n"); + return bRC_OK; + } + if (!jcr) { + Dmsg0(dbglvl, "No jcr: generate_plugin_event ignored.\n"); + return bRC_OK; + } + if (!jcr->plugin_ctx_list) { + Dmsg0(dbglvl, "No plugin_ctx_list: generate_plugin_event ignored.\n"); + return bRC_OK; /* Return if no plugins loaded */ + } + + /* Always handle JobEnd and DeviceClose requests */ + switch (eventType) { + case bsdEventJobEnd: + case bsdEventDeviceClose: + break; /* pass these through even if canceled */ + default: + if (jcr->is_job_canceled()) { + Dmsg0(dbglvl, "Cancel return from generate_plugin_event\n"); + return bRC_Cancel; + } + } + + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + event.eventType = eventType; + + Dmsg2(dbglvl, "sd-plugin_ctx_list=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); + + foreach_alist_index(i, plugin, b_plugin_list) { + plugin_ctx = &plugin_ctx_list[i]; + if (is_plugin_disabled(plugin_ctx)) { + continue; + } + rc = sdplug_func(plugin)->handlePluginEvent(plugin_ctx, &event, value); + if (rc != bRC_OK) { + break; + } + } + return rc; +} + + +/* + * Create a global plugin event -- i.e. no context + */ +int generate_global_plugin_event(bsdGlobalEventType eventType, void *value) +{ + bsdEvent event; + Plugin *plugin; + int i; + bRC rc = bRC_OK; + + if (!b_plugin_list) { + Dmsg0(dbglvl, "No b_plugin_list: generate_global_plugin_event ignored.\n"); + return bRC_OK; + } + event.eventType = eventType; + + foreach_alist_index(i, plugin, b_plugin_list) { + if (sdplug_func(plugin)->handleGlobalPluginEvent != NULL) { + rc = sdplug_func(plugin)->handleGlobalPluginEvent(&event, value); + if (rc != bRC_OK) { + break; + } + } + } + return rc; +} + + +/* + * Print to file the plugin info. + */ +void dump_sd_plugin(Plugin *plugin, FILE *fp) +{ + if (!plugin) { + return ; + } + psdInfo *info = (psdInfo *) plugin->pinfo; + fprintf(fp, "\tversion=%d\n", info->version); + fprintf(fp, "\tdate=%s\n", NPRTB(info->plugin_date)); + fprintf(fp, "\tmagic=%s\n", NPRTB(info->plugin_magic)); + fprintf(fp, "\tauthor=%s\n", NPRTB(info->plugin_author)); + fprintf(fp, "\tlicence=%s\n", NPRTB(info->plugin_license)); + fprintf(fp, "\tversion=%s\n", NPRTB(info->plugin_version)); + fprintf(fp, "\tdescription=%s\n", NPRTB(info->plugin_description)); +} + +/** + * This entry point is called internally by Bacula to ensure + * that the plugin IO calls come into this code. + */ +void load_sd_plugins(const char *plugin_dir) +{ + Plugin *plugin; + int i; + + Dmsg0(dbglvl, "Load sd plugins\n"); + if (!plugin_dir) { + Dmsg0(dbglvl, "No sd plugin dir!\n"); + return; + } + b_plugin_list = New(alist(10, not_owned_by_alist)); + if (!load_plugins((void *)&binfo, (void *)&bfuncs, plugin_dir, plugin_type, + is_plugin_compatible)) { + /* Either none found, or some error */ + if (b_plugin_list->size() == 0) { + delete b_plugin_list; + b_plugin_list = NULL; + Dmsg0(dbglvl, "No plugins loaded\n"); + return; + } + } + /* + * Verify that the plugin is acceptable, and print information + * about it. + */ + foreach_alist_index(i, plugin, b_plugin_list) { + Jmsg(NULL, M_INFO, 0, _("Loaded plugin: %s\n"), plugin->file); + Dmsg1(dbglvl, "Loaded plugin: %s\n", plugin->file); + } + + Dmsg1(dbglvl, "num plugins=%d\n", b_plugin_list->size()); + dbg_plugin_add_hook(dump_sd_plugin); +} + +/** + * Check if a plugin is compatible. Called by the load_plugin function + * to allow us to verify the plugin. + */ +static bool is_plugin_compatible(Plugin *plugin) +{ + psdInfo *info = (psdInfo *)plugin->pinfo; + Dmsg0(50, "is_plugin_compatible called\n"); + if (chk_dbglvl(50)) { + dump_sd_plugin(plugin, stdin); + } + if (strcmp(info->plugin_magic, SD_PLUGIN_MAGIC) != 0) { + Jmsg(NULL, M_ERROR, 0, _("Plugin magic wrong. Plugin=%s wanted=%s got=%s\n"), + plugin->file, SD_PLUGIN_MAGIC, info->plugin_magic); + Dmsg3(000, "Plugin magic wrong. Plugin=%s wanted=%s got=%s\n", + plugin->file, SD_PLUGIN_MAGIC, info->plugin_magic); + + return false; + } + if (info->version != SD_PLUGIN_INTERFACE_VERSION) { + Jmsg(NULL, M_ERROR, 0, _("Plugin version incorrect. Plugin=%s wanted=%d got=%d\n"), + plugin->file, SD_PLUGIN_INTERFACE_VERSION, info->version); + Dmsg3(000, "Plugin version incorrect. Plugin=%s wanted=%d got=%d\n", + plugin->file, SD_PLUGIN_INTERFACE_VERSION, info->version); + return false; + } + if (strcmp(info->plugin_license, "Bacula AGPLv3") != 0 && + strcmp(info->plugin_license, "AGPLv3") != 0 && + strcmp(info->plugin_license, "Bacula") != 0) { + Jmsg(NULL, M_ERROR, 0, _("Plugin license incompatible. Plugin=%s license=%s\n"), + plugin->file, info->plugin_license); + Dmsg2(000, "Plugin license incompatible. Plugin=%s license=%s\n", + plugin->file, info->plugin_license); + return false; + } + if (info->size != sizeof(psdInfo)) { + Jmsg(NULL, M_ERROR, 0, + _("Plugin size incorrect. Plugin=%s wanted=%d got=%d\n"), + plugin->file, sizeof(psdInfo), info->size); + return false; + } + + return true; +} + + +/* + * Create a new instance of each plugin for this Job + */ +void new_plugins(JCR *jcr) +{ + Plugin *plugin; + int i = 0;; + + Dmsg0(dbglvl, "=== enter new_plugins ===\n"); + if (!b_plugin_list) { + Dmsg0(dbglvl, "No sd plugin list!\n"); + return; + } + if (jcr->is_job_canceled()) { + return; + } + /* + * If plugins already loaded, just return + */ + if (jcr->plugin_ctx_list) { + return; + } + + int num = b_plugin_list->size(); + + Dmsg1(dbglvl, "sd-plugin-list size=%d\n", num); + if (num == 0) { + return; + } + + jcr->plugin_ctx_list = (bpContext *)malloc(sizeof(bpContext) * num); + + bpContext *plugin_ctx_list = jcr->plugin_ctx_list; + Dmsg2(dbglvl, "Instantiate sd-plugin_ctx_list=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); + foreach_alist_index(i, plugin, b_plugin_list) { + /* Start a new instance of each plugin */ + bacula_ctx *b_ctx = (bacula_ctx *)malloc(sizeof(bacula_ctx)); + memset(b_ctx, 0, sizeof(bacula_ctx)); + b_ctx->jcr = jcr; + plugin_ctx_list[i].bContext = (void *)b_ctx; + plugin_ctx_list[i].pContext = NULL; + if (sdplug_func(plugin)->newPlugin(&plugin_ctx_list[i]) != bRC_OK) { + b_ctx->disabled = true; + } + } +} + +/* + * Free the plugin instances for this Job + */ +void free_plugins(JCR *jcr) +{ + Plugin *plugin; + int i = 0; + + if (!b_plugin_list || !jcr->plugin_ctx_list) { + return; + } + + bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; + Dmsg2(dbglvl, "Free instance sd-plugin_ctx_list=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); + foreach_alist_index(i, plugin, b_plugin_list) { + /* Free the plugin instance */ + sdplug_func(plugin)->freePlugin(&plugin_ctx_list[i]); + free(plugin_ctx_list[i].bContext); /* free Bacula private context */ + } + free(plugin_ctx_list); + jcr->plugin_ctx_list = NULL; +} + + +/* ============================================================== + * + * Callbacks from the plugin + * + * ============================================================== + */ + +static bRC baculaGetValue(bpContext *ctx, bsdrVariable var, void *value) +{ + JCR *jcr; + if (!ctx) { + return bRC_Error; + } + jcr = ((bacula_ctx *)ctx->bContext)->jcr; + if (!jcr) { + return bRC_Error; + } + if (!value) { + return bRC_Error; + } + switch (var) { + case bsdVarJobId: + *((int *)value) = jcr->JobId; + Dmsg1(dbglvl, "sd-plugin: return bVarJobId=%d\n", jcr->JobId); + break; + case bsdVarJobName: + *((char **)value) = jcr->Job; + Dmsg1(dbglvl, "Bacula: return Job name=%s\n", jcr->Job); + break; +#if 0 + case bsdVarDevTypes: + *((s_kw **)value) = dev_types; +#endif + default: + break; + } + return bRC_OK; +} + +static bRC baculaSetValue(bpContext *ctx, bsdwVariable var, void *value) +{ + JCR *jcr; + if (!value || !ctx) { + return bRC_Error; + } +// Dmsg1(dbglvl, "bacula: baculaSetValue var=%d\n", var); + jcr = ((bacula_ctx *)ctx->bContext)->jcr; + if (!jcr) { + return bRC_Error; + } +// Dmsg1(dbglvl, "Bacula: jcr=%p\n", jcr); + /* Nothing implemented yet */ + Dmsg1(dbglvl, "sd-plugin: baculaSetValue var=%d\n", var); + return bRC_OK; +} + +static bRC baculaRegisterEvents(bpContext *ctx, ...) +{ + va_list args; + uint32_t event; + + va_start(args, ctx); + while ((event = va_arg(args, uint32_t))) { + Dmsg1(dbglvl, "sd-Plugin wants event=%u\n", event); + } + va_end(args); + return bRC_OK; +} + +static bRC baculaJobMsg(bpContext *ctx, const char *file, int line, + int type, utime_t mtime, const char *fmt, ...) +{ + va_list arg_ptr; + char buf[2000]; + JCR *jcr; + + if (ctx) { + jcr = ((bacula_ctx *)ctx->bContext)->jcr; + } else { + jcr = NULL; + } + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); + va_end(arg_ptr); + Jmsg(jcr, type, mtime, "%s", buf); + return bRC_OK; +} + +static bRC baculaDebugMsg(bpContext *ctx, const char *file, int line, + int level, const char *fmt, ...) +{ + va_list arg_ptr; + char buf[2000]; + + va_start(arg_ptr, fmt); + bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); + va_end(arg_ptr); + d_msg(file, line, level, "%s", buf); + return bRC_OK; +} + +static char *baculaEditDeviceCodes(DCR *dcr, char *omsg, + const char *imsg, const char *cmd) +{ + return edit_device_codes(dcr, omsg, imsg, cmd); +} + +#ifdef TEST_PROGRAM + +int main(int argc, char *argv[]) +{ + char plugin_dir[1000]; + JCR mjcr1, mjcr2; + JCR *jcr1 = &mjcr1; + JCR *jcr2 = &mjcr2; + + strcpy(my_name, "test-dir"); + + getcwd(plugin_dir, sizeof(plugin_dir)-1); + load_sd_plugins(plugin_dir); + + jcr1->JobId = 111; + new_plugins(jcr1); + + jcr2->JobId = 222; + new_plugins(jcr2); + + generate_plugin_event(jcr1, bsdEventJobStart, (void *)"Start Job 1"); + generate_plugin_event(jcr1, bsdEventJobEnd); + generate_plugin_event(jcr2, bsdEventJobStart, (void *)"Start Job 1"); + free_plugins(jcr1); + generate_plugin_event(jcr2, bsdEventJobEnd); + free_plugins(jcr2); + + unload_plugins(); + + Dmsg0(dbglvl, "sd-plugin: Test OK ...\n"); + close_memory_pool(); + sm_dump(false); + return 0; +} + +#endif /* TEST_PROGRAM */ diff --git a/src/stored/sd_plugins.h b/src/stored/sd_plugins.h new file mode 100644 index 00000000..1932f332 --- /dev/null +++ b/src/stored/sd_plugins.h @@ -0,0 +1,190 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Interface definition for Bacula Plugins + * + * Kern Sibbald, October 2007 + * + */ + +#ifndef __SD_PLUGINS_H +#define __SD_PLUGINS_H + +#ifndef _BACULA_H +#ifdef __cplusplus +/* Workaround for SGI IRIX 6.5 */ +#define _LANGUAGE_C_PLUS_PLUS 1 +#endif +#define _REENTRANT 1 +#define _THREAD_SAFE 1 +#define _POSIX_PTHREAD_SEMANTICS 1 +#define _FILE_OFFSET_BITS 64 +#define _LARGEFILE_SOURCE 1 +#define _LARGE_FILES 1 +#endif + +#include +#ifndef __CONFIG_H +#define __CONFIG_H +#include "config.h" +#endif +#include "bc_types.h" +#include "lib/plugins.h" + +#ifdef __cplusplus +extern "C" { +#endif + + + + +/**************************************************************************** + * * + * Bacula definitions * + * * + ****************************************************************************/ + +/* Bacula Variable Ids */ +typedef enum { + bsdVarJob = 1, + bsdVarLevel = 2, + bsdVarType = 3, + bsdVarJobId = 4, + bsdVarClient = 5, + bsdVarNumVols = 6, + bsdVarPool = 7, + bsdVarStorage = 8, + bsdVarCatalog = 9, + bsdVarMediaType = 10, + bsdVarJobName = 11, + bsdVarJobStatus = 12, + bsdVarPriority = 13, + bsdVarVolumeName = 14, + bsdVarCatalogRes = 15, + bsdVarJobErrors = 16, + bsdVarJobFiles = 17, + bsdVarSDJobFiles = 18, + bsdVarSDErrors = 19, + bsdVarFDJobStatus = 20, + bsdVarSDJobStatus = 21 +} bsdrVariable; + +typedef enum { + bsdwVarJobReport = 1, + bsdwVarVolumeName = 2, + bsdwVarPriority = 3, + bsdwVarJobLevel = 4 +} bsdwVariable; + + +typedef enum { + bsdEventJobStart = 1, + bsdEventJobEnd = 2, + bsdEventDeviceInit = 3, + bsdEventDeviceOpen = 4, + bsdEventDeviceTryOpen = 5, + bsdEventDeviceClose = 6 +} bsdEventType; + +typedef enum { + bsdGlobalEventDeviceInit = 1 +} bsdGlobalEventType; + + +typedef struct s_bsdEvent { + uint32_t eventType; +} bsdEvent; + +typedef struct s_sdbaculaInfo { + uint32_t size; + uint32_t version; +} bsdInfo; + +/* Bacula interface version and function pointers */ +typedef struct s_sdbaculaFuncs { + uint32_t size; + uint32_t version; + bRC (*registerBaculaEvents)(bpContext *ctx, ...); + bRC (*getBaculaValue)(bpContext *ctx, bsdrVariable var, void *value); + bRC (*setBaculaValue)(bpContext *ctx, bsdwVariable var, void *value); + bRC (*JobMessage)(bpContext *ctx, const char *file, int line, + int type, utime_t mtime, const char *fmt, ...); + bRC (*DebugMessage)(bpContext *ctx, const char *file, int line, + int level, const char *fmt, ...); + char *(*EditDeviceCodes)(DCR *dcr, char *omsg, + const char *imsg, const char *cmd); +} bsdFuncs; + +/* Bacula Subroutines */ +void load_sd_plugins(const char *plugin_dir); +void new_plugins(JCR *jcr); +void free_plugins(JCR *jcr); +int generate_plugin_event(JCR *jcr, bsdEventType event, void *value=NULL); +int generate_global_plugin_event(bsdGlobalEventType event, void *value=NULL); + + +/**************************************************************************** + * * + * Plugin definitions * + * * + ****************************************************************************/ + +typedef enum { + psdVarName = 1, + psdVarDescription = 2 +} psdVariable; + + +#define SD_PLUGIN_MAGIC "*BaculaSDPluginData*" + +#define SD_PLUGIN_INTERFACE_VERSION ( 12 ) + +typedef struct s_sdpluginInfo { + uint32_t size; + uint32_t version; + const char *plugin_magic; + const char *plugin_license; + const char *plugin_author; + const char *plugin_date; + const char *plugin_version; + const char *plugin_description; +} psdInfo; + +/* + * Functions that must be defined in every plugin + */ +typedef struct s_sdpluginFuncs { + uint32_t size; + uint32_t version; + bRC (*newPlugin)(bpContext *ctx); + bRC (*freePlugin)(bpContext *ctx); + bRC (*getPluginValue)(bpContext *ctx, psdVariable var, void *value); + bRC (*setPluginValue)(bpContext *ctx, psdVariable var, void *value); + bRC (*handlePluginEvent)(bpContext *ctx, bsdEvent *event, void *value); + bRC (*handleGlobalPluginEvent)(bsdEvent *event, void *value); +} psdFuncs; + +#define sdplug_func(plugin) ((psdFuncs *)(plugin->pfuncs)) +#define sdplug_info(plugin) ((psdInfo *)(plugin->pinfo)) + +#ifdef __cplusplus +} +#endif + +#endif /* __SD_PLUGINS_H */ diff --git a/src/stored/spool.c b/src/stored/spool.c new file mode 100644 index 00000000..cfb16f85 --- /dev/null +++ b/src/stored/spool.c @@ -0,0 +1,782 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Spooling code + * + * Kern Sibbald, March 2004 + * + */ + +#include "bacula.h" +#include "stored.h" + +/* Forward referenced subroutines */ +static void make_unique_data_spool_filename(DCR *dcr, POOLMEM **name); +static bool open_data_spool_file(DCR *dcr); +static bool close_data_spool_file(DCR *dcr); +static bool despool_data(DCR *dcr, bool commit); +static int read_block_from_spool_file(DCR *dcr); +static bool open_attr_spool_file(JCR *jcr, BSOCK *bs); +static bool close_attr_spool_file(JCR *jcr, BSOCK *bs); +static ssize_t write_spool_header(DCR *dcr, ssize_t *expected); +static ssize_t write_spool_data(DCR *dcr, ssize_t *expected); +static bool write_spool_block(DCR *dcr); + +struct spool_stats_t { + uint32_t data_jobs; /* current jobs spooling data */ + uint32_t attr_jobs; + uint32_t total_data_jobs; /* total jobs to have spooled data */ + uint32_t total_attr_jobs; + int64_t max_data_size; /* max data size */ + int64_t max_attr_size; + int64_t data_size; /* current data size (all jobs running) */ + int64_t attr_size; +}; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +spool_stats_t spool_stats; + +/* + * Header for data spool record */ +struct spool_hdr { + int32_t FirstIndex; /* FirstIndex for buffer */ + int32_t LastIndex; /* LastIndex for buffer */ + uint32_t len; /* length of next buffer */ +}; + +enum { + RB_EOT = 1, + RB_ERROR, + RB_OK +}; + +void list_spool_stats(void sendit(const char *msg, int len, void *sarg), void *arg) +{ + char ed1[30], ed2[30]; + POOL_MEM msg(PM_MESSAGE); + int len; + + len = Mmsg(msg, _("Spooling statistics:\n")); + + if (spool_stats.data_jobs || spool_stats.max_data_size) { + len = Mmsg(msg, _("Data spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes/job.\n"), + spool_stats.data_jobs, edit_uint64_with_commas(spool_stats.data_size, ed1), + spool_stats.total_data_jobs, + edit_uint64_with_commas(spool_stats.max_data_size, ed2)); + + sendit(msg.c_str(), len, arg); + } + if (spool_stats.attr_jobs || spool_stats.max_attr_size) { + len = Mmsg(msg, _("Attr spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes.\n"), + spool_stats.attr_jobs, edit_uint64_with_commas(spool_stats.attr_size, ed1), + spool_stats.total_attr_jobs, + edit_uint64_with_commas(spool_stats.max_attr_size, ed2)); + + sendit(msg.c_str(), len, arg); + } +} + +bool begin_data_spool(DCR *dcr) +{ + bool stat = true; + if (dcr->dev->is_aligned()) { + dcr->jcr->spool_data = false; + } + if (dcr->jcr->spool_data) { + Dmsg0(100, "Turning on data spooling\n"); + dcr->spool_data = true; + stat = open_data_spool_file(dcr); + if (stat) { + dcr->spooling = true; + Jmsg(dcr->jcr, M_INFO, 0, _("Spooling data ...\n")); + P(mutex); + spool_stats.data_jobs++; + V(mutex); + } + } + return stat; +} + +bool discard_data_spool(DCR *dcr) +{ + if (dcr->spooling) { + Dmsg0(100, "Data spooling discarded\n"); + return close_data_spool_file(dcr); + } + return true; +} + +bool commit_data_spool(DCR *dcr) +{ + bool stat; + + if (dcr->spooling) { + Dmsg0(100, "Committing spooled data\n"); + stat = despool_data(dcr, true /*commit*/); + if (!stat) { + Dmsg1(100, _("Bad return from despool WroteVol=%d\n"), dcr->WroteVol); + close_data_spool_file(dcr); + return false; + } + return close_data_spool_file(dcr); + } + return true; +} + +static void make_unique_data_spool_filename(DCR *dcr, POOLMEM **name) +{ + const char *dir; + if (dcr->dev->device->spool_directory) { + dir = dcr->dev->device->spool_directory; + } else { + dir = working_directory; + } + Mmsg(name, "%s/%s.data.%u.%s.%s.spool", dir, my_name, dcr->jcr->JobId, + dcr->jcr->Job, dcr->device->hdr.name); +} + + +static bool open_data_spool_file(DCR *dcr) +{ + POOLMEM *name = get_pool_memory(PM_MESSAGE); + int spool_fd; + + make_unique_data_spool_filename(dcr, &name); + if ((spool_fd = open(name, O_CREAT|O_TRUNC|O_RDWR|O_BINARY|O_CLOEXEC, 0640)) >= 0) { + dcr->spool_fd = spool_fd; + dcr->jcr->spool_attributes = true; + } else { + berrno be; + Jmsg(dcr->jcr, M_FATAL, 0, _("Open data spool file %s failed: ERR=%s\n"), name, + be.bstrerror()); + free_pool_memory(name); + return false; + } + Dmsg1(100, "Created spool file: %s\n", name); + free_pool_memory(name); + return true; +} + +static const char *spool_name = "*spool*"; + +/* + * NB! This routine locks the device, but if committing will + * not unlock it. If not committing, it will be unlocked. + */ +static bool despool_data(DCR *dcr, bool commit) +{ + DEVICE *rdev; + DCR *rdcr; + bool ok = true; + DEV_BLOCK *block; + JCR *jcr = dcr->jcr; + int stat; + char ec1[50]; + + Dmsg0(100, "Despooling data\n"); + if (jcr->dcr->job_spool_size == 0) { + Jmsg(jcr, M_WARNING, 0, _("Despooling zero bytes. Your disk is probably FULL!\n")); + } + + /* + * Commit means that the job is done, so we commit, otherwise, we + * are despooling because of user spool size max or some error + * (e.g. filesystem full). + */ + if (commit) { + Jmsg(jcr, M_INFO, 0, _("Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n"), + jcr->dcr->VolumeName, + edit_uint64_with_commas(jcr->dcr->job_spool_size, ec1)); + jcr->setJobStatus(JS_DataCommitting); + } else { + Jmsg(jcr, M_INFO, 0, _("Writing spooled data to Volume. Despooling %s bytes ...\n"), + edit_uint64_with_commas(jcr->dcr->job_spool_size, ec1)); + jcr->setJobStatus(JS_DataDespooling); + } + jcr->sendJobStatus(JS_DataDespooling); + dcr->despool_wait = true; + dcr->spooling = false; + /* + * We work with device blocked, but not locked so that + * other threads -- e.g. reservations can lock the device + * structure. + */ + dcr->dblock(BST_DESPOOLING); + dcr->despool_wait = false; + dcr->despooling = true; + + /* + * This is really quite kludgy and should be fixed some time. + * We create a dev structure to read from the spool file + * in rdev and rdcr. + */ + rdev = New(file_dev); + rdev->dev_name = get_memory(strlen(spool_name)+1); + bstrncpy(rdev->dev_name, spool_name, strlen(spool_name)+1); + rdev->errmsg = get_pool_memory(PM_EMSG); + *rdev->errmsg = 0; + rdev->max_block_size = dcr->dev->max_block_size; + rdev->min_block_size = dcr->dev->min_block_size; + rdev->device = dcr->dev->device; + rdcr = new_dcr(jcr, NULL, rdev, SD_READ); + rdcr->spool_fd = dcr->spool_fd; + block = dcr->block; /* save block */ + dcr->block = rdcr->block; /* make read and write block the same */ + + Dmsg1(800, "read/write block size = %d\n", block->buf_len); + lseek(rdcr->spool_fd, 0, SEEK_SET); /* rewind */ + +#if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_WILLNEED) + posix_fadvise(rdcr->spool_fd, 0, 0, POSIX_FADV_WILLNEED); +#endif + + /* Add run time, to get current wait time */ + int32_t despool_start = time(NULL) - jcr->run_time; + + set_new_file_parameters(dcr); + + for ( ; ok; ) { + stat = read_block_from_spool_file(rdcr); + if (stat == RB_EOT) { + break; + } else if (stat == RB_ERROR) { + ok = false; + break; + } + ok = dcr->write_block_to_device(); + + if (jcr->is_canceled()) { + ok = false; + break; + } + if (!ok) { + Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), + dcr->dev->print_name(), dcr->dev->bstrerror()); + Pmsg2(000, "Fatal append error on device %s: ERR=%s\n", + dcr->dev->print_name(), dcr->dev->bstrerror()); + /* Force in case Incomplete set */ + jcr->forceJobStatus(JS_FatalError); + } + Dmsg3(800, "Write block ok=%d FI=%d LI=%d\n", ok, block->FirstIndex, block->LastIndex); + } + + if (!dir_create_jobmedia_record(dcr)) { + Jmsg2(jcr, M_FATAL, 0, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), + dcr->getVolCatName(), jcr->Job); + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + } + flush_jobmedia_queue(jcr); + /* Set new file/block parameters for current dcr */ + set_new_file_parameters(dcr); + + /* + * Subtracting run_time give us elapsed time - wait_time since + * we started despooling. Note, don't use time_t as it is 32 or 64 + * bits depending on the OS and doesn't edit with %d + */ + int32_t despool_elapsed = time(NULL) - despool_start - jcr->run_time; + + if (despool_elapsed <= 0) { + despool_elapsed = 1; + } + + Jmsg(jcr, M_INFO, 0, _("Despooling elapsed time = %02d:%02d:%02d, Transfer rate = %s Bytes/second\n"), + despool_elapsed / 3600, despool_elapsed % 3600 / 60, despool_elapsed % 60, + edit_uint64_with_suffix(jcr->dcr->job_spool_size / despool_elapsed, ec1)); + + dcr->block = block; /* reset block */ + + lseek(rdcr->spool_fd, 0, SEEK_SET); /* rewind */ + if (ftruncate(rdcr->spool_fd, 0) != 0) { + berrno be; + Jmsg(jcr, M_ERROR, 0, _("Ftruncate spool file failed: ERR=%s\n"), + be.bstrerror()); + /* Note, try continuing despite ftruncate problem */ + } + + P(mutex); + if (spool_stats.data_size < dcr->job_spool_size) { + spool_stats.data_size = 0; + } else { + spool_stats.data_size -= dcr->job_spool_size; + } + V(mutex); + P(dcr->dev->spool_mutex); + dcr->dev->spool_size -= dcr->job_spool_size; + dcr->job_spool_size = 0; /* zap size in input dcr */ + V(dcr->dev->spool_mutex); + free_memory(rdev->dev_name); + free_pool_memory(rdev->errmsg); + /* Be careful to NULL the jcr and free rdev after free_dcr() */ + rdcr->jcr = NULL; + rdcr->set_dev(NULL); + free_dcr(rdcr); + free(rdev); + dcr->spooling = true; /* turn on spooling again */ + dcr->despooling = false; + /* + * Note, if committing we leave the device blocked. It will be removed in + * release_device(); + */ + if (!commit) { + dcr->dev->dunblock(); + } + jcr->sendJobStatus(JS_Running); + return ok; +} + +/* + * Read a block from the spool file + * + * Returns RB_OK on success + * RB_EOT when file done + * RB_ERROR on error + */ +static int read_block_from_spool_file(DCR *dcr) +{ + uint32_t rlen; + ssize_t stat; + spool_hdr hdr; + DEV_BLOCK *block = dcr->block; + JCR *jcr = dcr->jcr; + + rlen = sizeof(hdr); + stat = read(dcr->spool_fd, (char *)&hdr, (size_t)rlen); + if (stat == 0) { + Dmsg0(100, "EOT on spool read.\n"); + return RB_EOT; + } else if (stat != (ssize_t)rlen) { + if (stat == -1) { + berrno be; + Jmsg(dcr->jcr, M_FATAL, 0, _("Spool header read error. ERR=%s\n"), + be.bstrerror()); + } else { + Pmsg2(000, _("Spool read error. Wanted %u bytes, got %d\n"), rlen, stat); + Jmsg2(jcr, M_FATAL, 0, _("Spool header read error. Wanted %u bytes, got %d\n"), rlen, stat); + } + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + return RB_ERROR; + } + rlen = hdr.len; + if (rlen > block->buf_len) { + Pmsg2(000, _("Spool block too big. Max %u bytes, got %u\n"), block->buf_len, rlen); + Jmsg2(jcr, M_FATAL, 0, _("Spool block too big. Max %u bytes, got %u\n"), block->buf_len, rlen); + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + return RB_ERROR; + } + stat = read(dcr->spool_fd, (char *)block->buf, (size_t)rlen); + if (stat != (ssize_t)rlen) { + Pmsg2(000, _("Spool data read error. Wanted %u bytes, got %d\n"), rlen, stat); + Jmsg2(dcr->jcr, M_FATAL, 0, _("Spool data read error. Wanted %u bytes, got %d\n"), rlen, stat); + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + return RB_ERROR; + } + /* Setup write pointers */ + block->binbuf = rlen; + block->bufp = block->buf + block->binbuf; + block->FirstIndex = hdr.FirstIndex; + block->LastIndex = hdr.LastIndex; + block->VolSessionId = dcr->jcr->VolSessionId; + block->VolSessionTime = dcr->jcr->VolSessionTime; + + Dmsg2(800, "Read block FI=%d LI=%d\n", block->FirstIndex, block->LastIndex); + return RB_OK; +} + +/* + * Write a block to the spool file + * + * Returns: true on success or EOT + * false on hard error + */ +bool write_block_to_spool_file(DCR *dcr) +{ + uint32_t wlen, hlen; /* length to write */ + bool despool = false; + DEV_BLOCK *block = dcr->block; + + if (job_canceled(dcr->jcr)) { + return false; + } + ASSERT(block->binbuf == ((uint32_t) (block->bufp - block->buf))); + if (block->binbuf <= WRITE_BLKHDR_LENGTH) { /* Does block have data in it? */ + return true; + } + + hlen = sizeof(spool_hdr); + wlen = block->binbuf; + P(dcr->dev->spool_mutex); + dcr->job_spool_size += hlen + wlen; + dcr->dev->spool_size += hlen + wlen; + if ((dcr->max_job_spool_size > 0 && dcr->job_spool_size >= dcr->max_job_spool_size) || + (dcr->dev->max_spool_size > 0 && dcr->dev->spool_size >= dcr->dev->max_spool_size)) { + despool = true; + } + V(dcr->dev->spool_mutex); + P(mutex); + spool_stats.data_size += hlen + wlen; + if (spool_stats.data_size > spool_stats.max_data_size) { + spool_stats.max_data_size = spool_stats.data_size; + } + V(mutex); + if (despool) { + char ec1[30], ec2[30]; + if (dcr->max_job_spool_size > 0) { + Jmsg(dcr->jcr, M_INFO, 0, _("User specified Job spool size reached: " + "JobSpoolSize=%s MaxJobSpoolSize=%s\n"), + edit_uint64_with_commas(dcr->job_spool_size, ec1), + edit_uint64_with_commas(dcr->max_job_spool_size, ec2)); + } else { + Jmsg(dcr->jcr, M_INFO, 0, _("User specified Device spool size reached: " + "DevSpoolSize=%s MaxDevSpoolSize=%s\n"), + edit_uint64_with_commas(dcr->dev->spool_size, ec1), + edit_uint64_with_commas(dcr->dev->max_spool_size, ec2)); + } + + if (!despool_data(dcr, false)) { + Pmsg0(000, _("Bad return from despool in write_block.\n")); + return false; + } + /* Despooling cleared these variables so reset them */ + P(dcr->dev->spool_mutex); + dcr->job_spool_size += hlen + wlen; + dcr->dev->spool_size += hlen + wlen; + V(dcr->dev->spool_mutex); + Jmsg(dcr->jcr, M_INFO, 0, _("Spooling data again ...\n")); + } + + if (!write_spool_block(dcr)) { + return false; + } + + Dmsg2(800, "Wrote block FI=%d LI=%d\n", block->FirstIndex, block->LastIndex); + empty_block(block); + return true; +} + +static bool rewind_spoolfile(DCR *dcr, ssize_t size, ssize_t expected) +{ + JCR *jcr = dcr->jcr; + if (size == 0) { + return true; /* nothing to do */ + } + Jmsg(jcr, M_ERROR, 0, _("Error writing header to spool file." + " Disk probably full. Attempting recovery. Wanted to write=%d got=%d\n"), + (int)expected, (int)size); +#if defined(HAVE_WIN32) + boffset_t pos = _lseeki64(dcr->spool_fd, (__int64)0, SEEK_CUR); +#else + boffset_t pos = lseek(dcr->spool_fd, 0, SEEK_CUR); +#endif + if (ftruncate(dcr->spool_fd, pos - size) != 0) { + berrno be; + Jmsg(dcr->jcr, M_ERROR, 0, _("Ftruncate spool file failed: ERR=%s\n"), + be.bstrerror()); + /* Note, try continuing despite ftruncate problem */ + } + if (!despool_data(dcr, false)) { + Jmsg(jcr, M_FATAL, 0, _("Fatal despooling error.")); + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + return false; + } + return true; +} + +static bool write_spool_block(DCR *dcr) +{ + ssize_t size = 0, ret; + ssize_t expected = 0; + + for (int retry=0; retry <= 1; retry++) { + /* Rewind if needed */ + if (size > 0 && !rewind_spoolfile(dcr, size, expected)) { + return false; + } + + /* Try to write the header */ + ret = write_spool_header(dcr, &expected); + if (ret == -1) { /* I/O error, it's fatal */ + goto bail_out; + + } else { + size += ret; /* Keep the size written for a future rewind */ + } + + if (ret != expected) { /* We don't have the size expected, rewind, despool and retry */ + continue; + } + + ret = write_spool_data(dcr, &expected); + if (ret == -1) { /* I/O Error, it's fatal */ + goto bail_out; + + } else { + size += ret; /* Keep the size written for a furture rewind */ + } + + if (ret != expected) { /* We don't have the size expected, rewind, despool and retry */ + continue; + } + + return true; + } + +bail_out: + berrno be; + Jmsg(dcr->jcr, M_FATAL, 0, _("Error writing block to spool file. ERR=%s\n"), + be.bstrerror()); + dcr->jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + return false; +} + +static ssize_t write_spool_header(DCR *dcr, ssize_t *expected) +{ + spool_hdr hdr; + DEV_BLOCK *block = dcr->block; + + hdr.FirstIndex = block->FirstIndex; + hdr.LastIndex = block->LastIndex; + hdr.len = block->binbuf; + *expected = sizeof(hdr); + + /* Write header */ + return write(dcr->spool_fd, (char*)&hdr, sizeof(hdr)); +} + + +static ssize_t write_spool_data(DCR *dcr, ssize_t *expected) +{ + DEV_BLOCK *block = dcr->block; + *expected = block->binbuf; + return write(dcr->spool_fd, block->buf, (size_t)block->binbuf); +} + +static bool close_data_spool_file(DCR *dcr) +{ + POOLMEM *name = get_pool_memory(PM_MESSAGE); + + P(mutex); + spool_stats.data_jobs--; + spool_stats.total_data_jobs++; + if (spool_stats.data_size < dcr->job_spool_size) { + spool_stats.data_size = 0; + } else { + spool_stats.data_size -= dcr->job_spool_size; + } + V(mutex); + P(dcr->dev->spool_mutex); + dcr->job_spool_size = 0; + V(dcr->dev->spool_mutex); + + make_unique_data_spool_filename(dcr, &name); + close(dcr->spool_fd); + dcr->spool_fd = -1; + dcr->spooling = false; + unlink(name); + Dmsg1(100, "Deleted spool file: %s\n", name); + free_pool_memory(name); + return true; +} + +bool are_attributes_spooled(JCR *jcr) +{ + return jcr->spool_attributes && jcr->dir_bsock->m_spool_fd; +} + +/* + * Create spool file for attributes. + * This is done by "attaching" to the bsock, and when + * it is called, the output is written to a file. + * The actual spooling is turned on and off in + * append.c only during writing of the attributes. + */ +bool begin_attribute_spool(JCR *jcr) +{ + if (!jcr->no_attributes && jcr->spool_attributes) { + return open_attr_spool_file(jcr, jcr->dir_bsock); + } + return true; +} + +static void update_attr_spool_size(ssize_t size) +{ + P(mutex); + if (size > 0) { + if ((spool_stats.attr_size - size) > 0) { + spool_stats.attr_size -= size; + } else { + spool_stats.attr_size = 0; + } + } + V(mutex); +} + +static void make_unique_spool_filename(JCR *jcr, POOLMEM **name, int fd) +{ + Mmsg(name, "%s/%s.attr.%s.%d.spool", working_directory, my_name, + jcr->Job, fd); +} + +/* + * Tell Director where to find the attributes spool file + * Note, if we are not on the same machine, the Director will + * return an error, and the higher level routine will transmit + * the data record by record -- using bsock->despool(). + */ +static bool blast_attr_spool_file(JCR *jcr, boffset_t size) +{ + /* send full spool file name */ + POOLMEM *name = get_pool_memory(PM_MESSAGE); + make_unique_spool_filename(jcr, &name, jcr->dir_bsock->m_fd); + bash_spaces(name); + jcr->dir_bsock->fsend("BlastAttr JobId=%d File=%s\n", jcr->JobId, name); + free_pool_memory(name); + + if (jcr->dir_bsock->recv() <= 0) { + Jmsg(jcr, M_FATAL, 0, _("Network error on BlastAttributes.\n")); + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + return false; + } + + if (!bstrcmp(jcr->dir_bsock->msg, "1000 OK BlastAttr\n")) { + return false; + } + return true; +} + +bool commit_attribute_spool(JCR *jcr) +{ + boffset_t size, data_end; + char ec1[30]; + char tbuf[100]; + BSOCK *dir; + + Dmsg1(100, "Commit attributes at %s\n", bstrftimes(tbuf, sizeof(tbuf), + (utime_t)time(NULL))); + if (are_attributes_spooled(jcr)) { + dir = jcr->dir_bsock; + if (fseeko(dir->m_spool_fd, 0, SEEK_END) != 0) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Fseek on attributes file failed: ERR=%s\n"), + be.bstrerror()); + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + goto bail_out; + } + size = ftello(dir->m_spool_fd); + /* For Incomplete Job truncate spool file to last valid data_end if necssary */ + if (jcr->is_JobStatus(JS_Incomplete)) { + data_end = dir->get_last_data_end(); + if (size > data_end) { + if (ftruncate(fileno(dir->m_spool_fd), data_end) != 0) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Truncate on attributes file failed: ERR=%s\n"), + be.bstrerror()); + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + goto bail_out; + } + Dmsg2(100, "=== Attrib spool truncated from %lld to %lld\n", + size, data_end); + size = data_end; + } + } + if (size < 0) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("Fseek on attributes file failed: ERR=%s\n"), + be.bstrerror()); + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + goto bail_out; + } + P(mutex); + if (spool_stats.attr_size + size > spool_stats.max_attr_size) { + spool_stats.max_attr_size = spool_stats.attr_size + size; + } + spool_stats.attr_size += size; + V(mutex); + jcr->sendJobStatus(JS_AttrDespooling); + Jmsg(jcr, M_INFO, 0, _("Sending spooled attrs to the Director. Despooling %s bytes ...\n"), + edit_uint64_with_commas(size, ec1)); + + if (!blast_attr_spool_file(jcr, size)) { + /* Can't read spool file from director side, + * send content over network. + */ + dir->despool(update_attr_spool_size, size); + } + return close_attr_spool_file(jcr, dir); + } + return true; + +bail_out: + close_attr_spool_file(jcr, dir); + return false; +} + +static bool open_attr_spool_file(JCR *jcr, BSOCK *bs) +{ + POOLMEM *name = get_pool_memory(PM_MESSAGE); + + make_unique_spool_filename(jcr, &name, bs->m_fd); + bs->m_spool_fd = bfopen(name, "w+b"); + if (!bs->m_spool_fd) { + berrno be; + Jmsg(jcr, M_FATAL, 0, _("fopen attr spool file %s failed: ERR=%s\n"), name, + be.bstrerror()); + jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ + free_pool_memory(name); + return false; + } + P(mutex); + spool_stats.attr_jobs++; + V(mutex); + free_pool_memory(name); + return true; +} + +static bool close_attr_spool_file(JCR *jcr, BSOCK *bs) +{ + POOLMEM *name; + + char tbuf[100]; + + Dmsg1(100, "Close attr spool file at %s\n", bstrftimes(tbuf, sizeof(tbuf), + (utime_t)time(NULL))); + if (!bs->m_spool_fd) { + return true; + } + name = get_pool_memory(PM_MESSAGE); + P(mutex); + spool_stats.attr_jobs--; + spool_stats.total_attr_jobs++; + V(mutex); + make_unique_spool_filename(jcr, &name, bs->m_fd); + fclose(bs->m_spool_fd); + unlink(name); + free_pool_memory(name); + bs->m_spool_fd = NULL; + bs->clear_spooling(); + return true; +} + +bool discard_attribute_spool(JCR *jcr) +{ + if (are_attributes_spooled(jcr)) { + return close_attr_spool_file(jcr, jcr->dir_bsock); + } + return true; +} diff --git a/src/stored/status.c b/src/stored/status.c new file mode 100644 index 00000000..f4221cbc --- /dev/null +++ b/src/stored/status.c @@ -0,0 +1,1159 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * This file handles the status command + * + * Kern Sibbald, May MMIII + * + * + */ + +#include "bacula.h" +#include "stored.h" +#include "lib/status.h" +#include "sd_plugins.h" + +/* Imported functions */ +extern void dbg_print_plugin(FILE *fp); + +/* Imported variables */ +extern BSOCK *filed_chan; +extern void *start_heap; + +/* Static variables */ +static char OKqstatus[] = "3000 OK .status\n"; +static char DotStatusJob[] = "JobId=%d JobStatus=%c JobErrors=%d\n"; + + +/* Forward referenced functions */ +static void sendit(POOL_MEM &msg, int len, STATUS_PKT *sp); +static void sendit(const char *msg, int len, void *arg); +static void dbg_sendit(const char *msg, int len, void *arg); +static void send_blocked_status(DEVICE *dev, STATUS_PKT *sp); +static void send_device_status(DEVICE *dev, STATUS_PKT *sp); +static void list_running_jobs(STATUS_PKT *sp); +static void list_jobs_waiting_on_reservation(STATUS_PKT *sp); +static void list_status_header(STATUS_PKT *sp); +static void list_devices(STATUS_PKT *sp, char *name=NULL); +static void list_plugins(STATUS_PKT *sp); +static void list_cloud_transfers(STATUS_PKT *sp, bool verbose); + +void status_alert_callback(void *ctx, const char *short_msg, + const char *long_msg, char *Volume, int severity, + int flags, int alertno, utime_t alert_time) +{ + STATUS_PKT *sp = (STATUS_PKT *)ctx; + const char *type = "Unknown"; + POOL_MEM send_msg(PM_MESSAGE); + char edt[50]; + int len; + + switch (severity) { + case 'C': + type = "Critical"; + break; + case 'W': + type = "Warning"; + break; + case 'I': + type = "Info"; + break; + } + bstrftimes(edt, sizeof(edt), alert_time); + if (chk_dbglvl(10)) { + len = Mmsg(send_msg, _(" %s Alert: at %s Volume=\"%s\" flags=0x%x alert=%s\n"), + type, edt, Volume, flags, long_msg); + } else { + len = Mmsg(send_msg, _(" %s Alert: at %s Volume=\"%s\" alert=%s\n"), + type, edt, Volume, short_msg); + } + sendit(send_msg, len, sp); +} + + +/* + * Status command from Director + */ +void output_status(STATUS_PKT *sp) +{ + POOL_MEM msg(PM_MESSAGE); + int len; + + list_status_header(sp); + + /* + * List running jobs + */ + list_running_jobs(sp); + + /* + * List jobs stuck in reservation system + */ + list_jobs_waiting_on_reservation(sp); + + /* + * List terminated jobs (defined in lib/status.h) + */ + list_terminated_jobs(sp); + + /* + * List devices + */ + list_devices(sp); + + /* + * List cloud transfers + */ + list_cloud_transfers(sp, false); + + + len = Mmsg(msg, _("Used Volume status:\n")); + if (!sp->api) sendit(msg, len, sp); + + list_volumes(sendit, (void *)sp); + if (!sp->api) sendit("====\n\n", 6, sp); + + list_spool_stats(sendit, (void *)sp); + if (!sp->api) sendit("====\n\n", 6, sp); + + if (chk_dbglvl(10)) { + dbg_print_plugin(stdout); + } +} + +static void list_resources(STATUS_PKT *sp) +{ +#ifdef when_working + POOL_MEM msg(PM_MESSAGE); + int len; + + len = Mmsg(msg, _("\nSD Resources:\n")); + if (!sp->api) sendit(msg, len, sp); + dump_resource(R_DEVICE, resources[R_DEVICE-r_first], sp); + if (!sp->api) sendit("====\n\n", 6, sp); +#endif +} + +#ifdef xxxx +static find_device(char *devname) +{ + foreach_res(device, R_DEVICE) { + if (strcasecmp(device->hdr.name, devname) == 0) { + found = true; + break; + } + } + if (!found) { + foreach_res(changer, R_AUTOCHANGER) { + if (strcasecmp(changer->hdr.name, devname) == 0) { + break; + } + } + } +} +#endif + +static void api_list_one_device(char *name, DEVICE *dev, STATUS_PKT *sp) +{ + OutputWriter ow(sp->api_opts); + int zero=0; + int blocked=0; + uint64_t f, t; + const char *p=NULL; + + if (!dev) { + return; + } + + dev->get_freespace(&f, &t); + + ow.get_output(OT_START_OBJ, + OT_STRING, "name", dev->device->hdr.name, + OT_STRING, "archive_device", dev->archive_name(), + OT_STRING, "type", dev->print_type(), + OT_STRING, "media_type", dev->device->media_type, + OT_INT, "open", (int)dev->is_open(), + OT_INT, "writers", dev->num_writers, + OT_INT32, "maximum_concurrent_jobs", dev->max_concurrent_jobs, + OT_INT64, "maximum_volume_size", dev->max_volume_size, + OT_INT, "read_only", dev->device->read_only, + OT_INT, "autoselect", dev->device->autoselect, + OT_INT, "enabled", dev->enabled, + OT_INT64, "free_space", f, + OT_INT64, "total_space", t, + OT_INT64, "devno", dev->devno, + OT_END); + + if (dev->is_open()) { + if (dev->is_labeled()) { + ow.get_output(OT_STRING, "mounted", dev->blocked()?"0":"1", + OT_STRING, "waiting", dev->blocked()?"1":"0", + OT_STRING, "volume", dev->VolHdr.VolumeName, + OT_STRING, "pool", NPRTB(dev->pool_name), + OT_END); + } else { + ow.get_output(OT_INT, "mounted", zero, + OT_INT, "waiting", zero, + OT_STRING, "volume", "", + OT_STRING, "pool", "", + OT_END); + } + + blocked = 1; + switch(dev->blocked()) { + case BST_UNMOUNTED: + p = "User unmounted"; + break; + case BST_UNMOUNTED_WAITING_FOR_SYSOP: + p = "User unmounted during wait for media/mount"; + break; + case BST_DOING_ACQUIRE: + p = "Device is being initialized"; + break; + case BST_WAITING_FOR_SYSOP: + p = "Waiting for mount or create a volume"; + break; + case BST_WRITING_LABEL: + p = "Labeling a Volume"; + break; + default: + blocked=0; + p = NULL; + } + + /* TODO: give more information about blocked status + * and the volume needed if WAITING for SYSOP + */ + ow.get_output(OT_STRING, "blocked_desc", NPRTB(p), + OT_INT, "blocked", blocked, + OT_END); + + ow.get_output(OT_INT, "append", (int)dev->can_append(), + OT_END); + + if (dev->can_append()) { + ow.get_output(OT_INT64, "bytes", dev->VolCatInfo.VolCatBytes, + OT_INT32, "blocks", dev->VolCatInfo.VolCatBlocks, + OT_END); + + } else { /* reading */ + ow.get_output(OT_INT64, "bytes", dev->VolCatInfo.VolCatRBytes, + OT_INT32, "blocks", dev->VolCatInfo.VolCatReads, /* might not be blocks */ + OT_END); + + } + ow.get_output(OT_INT, "file", dev->file, + OT_INT, "block", dev->block_num, + OT_END); + } else { + ow.get_output(OT_INT, "mounted", zero, + OT_INT, "waiting", zero, + OT_STRING, "volume", "", + OT_STRING, "pool", "", + OT_STRING, "blocked_desc", "", + OT_INT, "blocked", zero, + OT_INT, "append", zero, + OT_INT, "bytes", zero, + OT_INT, "blocks", zero, + OT_INT, "file", zero, + OT_INT, "block", zero, + OT_END); + } + + p = ow.get_output(OT_END_OBJ, OT_END); + sendit(p, strlen(p), sp); +} + + +static void list_one_device(char *name, DEVICE *dev, STATUS_PKT *sp) +{ + char b1[35], b2[35], b3[35]; + POOL_MEM msg(PM_MESSAGE); + int len; + int bpb; + + if (sp->api > 1) { + api_list_one_device(name, dev, sp); + return; + } + + if (!dev) { + len = Mmsg(msg, _("\nDevice \"%s\" is not open or does not exist.\n"), + name); + sendit(msg, len, sp); + if (!sp->api) sendit("==\n", 4, sp); + return; + } + + if (dev->is_open()) { + if (dev->is_labeled()) { + len = Mmsg(msg, _("\nDevice %s is %s %s:\n" + " Volume: %s\n" + " Pool: %s\n" + " Media type: %s\n"), + dev->print_type(), dev->print_name(), + dev->blocked()?_("waiting for"):_("mounted with"), + dev->VolHdr.VolumeName, + dev->pool_name[0]?dev->pool_name:_("*unknown*"), + dev->device->media_type); + sendit(msg, len, sp); + } else { + len = Mmsg(msg, _("\nDevice %s: %s open but no Bacula volume is currently mounted.\n"), + dev->print_type(), dev->print_name()); + sendit(msg, len, sp); + } + if (dev->can_append()) { + bpb = dev->VolCatInfo.VolCatBlocks; + if (bpb <= 0) { + bpb = 1; + } + bpb = dev->VolCatInfo.VolCatBytes / bpb; + len = Mmsg(msg, _(" Total Bytes=%s Blocks=%s Bytes/block=%s\n"), + edit_uint64_with_commas(dev->VolCatInfo.VolCatBytes, b1), + edit_uint64_with_commas(dev->VolCatInfo.VolCatBlocks, b2), + edit_uint64_with_commas(bpb, b3)); + sendit(msg, len, sp); + } else { /* reading */ + bpb = dev->VolCatInfo.VolCatReads; + if (bpb <= 0) { + bpb = 1; + } + if (dev->VolCatInfo.VolCatRBytes > 0) { + bpb = dev->VolCatInfo.VolCatRBytes / bpb; + } else { + bpb = 0; + } + len = Mmsg(msg, _(" Total Bytes Read=%s Blocks Read=%s Bytes/block=%s\n"), + edit_uint64_with_commas(dev->VolCatInfo.VolCatRBytes, b1), + edit_uint64_with_commas(dev->VolCatInfo.VolCatReads, b2), + edit_uint64_with_commas(bpb, b3)); + sendit(msg, len, sp); + } + len = Mmsg(msg, _(" Positioned at File=%s Block=%s\n"), + edit_uint64_with_commas(dev->file, b1), + edit_uint64_with_commas(dev->block_num, b2)); + sendit(msg, len, sp); + } else { + len = Mmsg(msg, _("\nDevice %s: %s is not open.\n"), + dev->print_type(), dev->print_name()); + sendit(msg, len, sp); + } + send_blocked_status(dev, sp); + + /* TODO: We need to check with Mount command, maybe we can + * display this number only when the device is open. + */ + if (dev->is_file()) { + char ed1[50]; + uint64_t f, t; + dev->get_freespace(&f, &t); + if (t > 0) { /* We might not have access to numbers */ + len = Mmsg(msg, _(" Available %sSpace=%sB\n"), + dev->is_cloud() ? _("Cache ") : "", + edit_uint64_with_suffix(f, ed1)); + sendit(msg, len, sp); + } + } + + dev->show_tape_alerts((DCR *)sp, list_short, list_all, status_alert_callback); + + if (!sp->api) sendit("==\n", 4, sp); +} + +void _dbg_list_one_device(char *name, DEVICE *dev, const char *file, int line) +{ + STATUS_PKT sp; + sp.bs = NULL; + sp.callback = dbg_sendit; + sp.context = NULL; + d_msg(file, line, 0, "Called dbg_list_one_device():"); + list_one_device(name, dev, &sp); + send_device_status(dev, &sp); +} + +static void list_one_autochanger(char *name, AUTOCHANGER *changer, STATUS_PKT *sp) +{ + int len; + char *p; + DEVRES *device; + POOL_MEM msg(PM_MESSAGE); + OutputWriter ow(sp->api_opts); + + if (sp->api > 1) { + ow.get_output(OT_START_OBJ, + OT_STRING, "autochanger", changer->hdr.name, + OT_END); + + ow.start_group("devices"); + + foreach_alist(device, changer->device) { + ow.get_output(OT_START_OBJ, + OT_STRING, "name", device->hdr.name, + OT_STRING, "device",device->device_name, + OT_END_OBJ, + OT_END); + } + + ow.end_group(); + + p = ow.get_output(OT_END_OBJ, OT_END); + sendit(p, strlen(p), sp); + + } else { + + len = Mmsg(msg, _("Autochanger \"%s\" with devices:\n"), + changer->hdr.name); + sendit(msg, len, sp); + + foreach_alist(device, changer->device) { + if (device->dev) { + len = Mmsg(msg, " %s\n", device->dev->print_name()); + sendit(msg, len, sp); + } else { + len = Mmsg(msg, " %s\n", device->hdr.name); + sendit(msg, len, sp); + } + } + } +} + +static void list_devices(STATUS_PKT *sp, char *name) +{ + int len; + DEVRES *device; + AUTOCHANGER *changer; + POOL_MEM msg(PM_MESSAGE); + + if (!sp->api) { + len = Mmsg(msg, _("\nDevice status:\n")); + sendit(msg, len, sp); + } + + foreach_res(changer, R_AUTOCHANGER) { + if (!name || strcmp(changer->hdr.name, name) == 0) { + list_one_autochanger(changer->hdr.name, changer, sp); + } + } + + foreach_res(device, R_DEVICE) { + if (!name || strcmp(device->hdr.name, name) == 0) { + list_one_device(device->hdr.name, device->dev, sp); + } + } + if (!sp->api) sendit("====\n\n", 6, sp); +} + +static void list_cloud_transfers(STATUS_PKT *sp, bool verbose) +{ + bool first=true; + int len; + DEVRES *device; + POOL_MEM msg(PM_MESSAGE); + + foreach_res(device, R_DEVICE) { + if (device->dev && device->dev->is_cloud()) { + + if (first) { + if (!sp->api) { + len = Mmsg(msg, _("Cloud transfer status:\n")); + sendit(msg, len, sp); + } + first = false; + } + + cloud_dev *cdev = (cloud_dev*)device->dev; + len = cdev->get_cloud_upload_transfer_status(msg, verbose); + sendit(msg, len, sp); + len = cdev->get_cloud_download_transfer_status(msg, verbose); + sendit(msg, len, sp); + break; /* only once, transfer mgr are shared */ + } + } + + if (!first && !sp->api) sendit("====\n\n", 6, sp); +} + +static void api_list_sd_status_header(STATUS_PKT *sp) +{ + char *p; + alist drivers(10, not_owned_by_alist); + OutputWriter wt(sp->api_opts); + + sd_list_loaded_drivers(&drivers); + wt.start_group("header"); + wt.get_output( + OT_STRING, "name", my_name, + OT_STRING, "version", VERSION " (" BDATE ")", + OT_STRING, "uname", HOST_OS " " DISTNAME " " DISTVER, + OT_UTIME, "started", daemon_start_time, + OT_INT64, "pid", (int64_t)getpid(), + OT_INT, "jobs_run", num_jobs_run, + OT_INT, "jobs_running",job_count(), + OT_INT, "ndevices", ((rblist *)res_head[R_DEVICE-r_first]->res_list)->size(), + OT_INT, "nautochgr", ((rblist *)res_head[R_AUTOCHANGER-r_first]->res_list)->size(), + OT_PLUGINS,"plugins", b_plugin_list, + OT_ALIST_STR, "drivers", &drivers, + OT_END); + p = wt.end_group(); + sendit(p, strlen(p), sp); +} + +static void list_status_header(STATUS_PKT *sp) +{ + char dt[MAX_TIME_LENGTH]; + char b1[35], b2[35], b3[35], b4[35], b5[35]; + POOL_MEM msg(PM_MESSAGE); + int len; + + if (sp->api) { + api_list_sd_status_header(sp); + return; + } + + len = Mmsg(msg, _("%s %sVersion: %s (%s) %s %s %s\n"), + my_name, "", VERSION, BDATE, HOST_OS, DISTNAME, DISTVER); + sendit(msg, len, sp); + + bstrftime_nc(dt, sizeof(dt), daemon_start_time); + + + len = Mmsg(msg, _("Daemon started %s. Jobs: run=%d, running=%d.\n"), + dt, num_jobs_run, job_count()); + sendit(msg, len, sp); + len = Mmsg(msg, _(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"), + edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1), + edit_uint64_with_commas(sm_bytes, b2), + edit_uint64_with_commas(sm_max_bytes, b3), + edit_uint64_with_commas(sm_buffers, b4), + edit_uint64_with_commas(sm_max_buffers, b5)); + sendit(msg, len, sp); + len = Mmsg(msg, " Sizes: boffset_t=%d size_t=%d int32_t=%d int64_t=%d " + "mode=%d,%d newbsr=%d\n", + (int)sizeof(boffset_t), (int)sizeof(size_t), (int)sizeof(int32_t), + (int)sizeof(int64_t), (int)DEVELOPER_MODE, 0, use_new_match_all); + sendit(msg, len, sp); + len = Mmsg(msg, _(" Res: ndevices=%d nautochgr=%d\n"), + ((rblist *)res_head[R_DEVICE-r_first]->res_list)->size(), + ((rblist *)res_head[R_AUTOCHANGER-r_first]->res_list)->size()); + sendit(msg, len, sp); + list_plugins(sp); +} + +static void send_blocked_status(DEVICE *dev, STATUS_PKT *sp) +{ + POOL_MEM msg(PM_MESSAGE); + int len; + + if (!dev) { + len = Mmsg(msg, _("No DEVICE structure.\n\n")); + sendit(msg, len, sp); + return; + } + if (!dev->enabled) { + len = Mmsg(msg, _(" Device is disabled. User command.\n")); + sendit(msg, len, sp); + } + switch (dev->blocked()) { + case BST_UNMOUNTED: + len = Mmsg(msg, _(" Device is BLOCKED. User unmounted.\n")); + sendit(msg, len, sp); + break; + case BST_UNMOUNTED_WAITING_FOR_SYSOP: + len = Mmsg(msg, _(" Device is BLOCKED. User unmounted during wait for media/mount.\n")); + sendit(msg, len, sp); + break; + case BST_WAITING_FOR_SYSOP: + { + DCR *dcr; + bool found_jcr = false; + dev->Lock(); + dev->Lock_dcrs(); + foreach_dlist(dcr, dev->attached_dcrs) { + if (dcr->jcr->JobStatus == JS_WaitMount) { + len = Mmsg(msg, _(" Device is BLOCKED waiting for mount of volume \"%s\",\n" + " Pool: %s\n" + " Media type: %s\n"), + dcr->VolumeName, + dcr->pool_name, + dcr->media_type); + sendit(msg, len, sp); + found_jcr = true; + } else if (dcr->jcr->JobStatus == JS_WaitMedia) { + len = Mmsg(msg, _(" Device is BLOCKED waiting to create a volume for:\n" + " Pool: %s\n" + " Media type: %s\n"), + dcr->pool_name, + dcr->media_type); + sendit(msg, len, sp); + found_jcr = true; + } + } + dev->Unlock_dcrs(); + dev->Unlock(); + if (!found_jcr) { + len = Mmsg(msg, _(" Device is BLOCKED waiting for media.\n")); + sendit(msg, len, sp); + } + } + break; + case BST_DOING_ACQUIRE: + len = Mmsg(msg, _(" Device is being initialized.\n")); + sendit(msg, len, sp); + break; + case BST_WRITING_LABEL: + len = Mmsg(msg, _(" Device is blocked labeling a Volume.\n")); + sendit(msg, len, sp); + break; + default: + break; + } + /* Send autochanger slot status */ + if (dev->is_autochanger()) { + if (dev->get_slot() > 0) { + len = Mmsg(msg, _(" Slot %d %s loaded in drive %d.\n"), + dev->get_slot(), dev->is_open()?"is": "was last", dev->drive_index); + sendit(msg, len, sp); + } else if (dev->get_slot() <= 0) { + len = Mmsg(msg, _(" Drive %d is not loaded.\n"), dev->drive_index); + sendit(msg, len, sp); + } + } + if (chk_dbglvl(1)) { + send_device_status(dev, sp); + } +} + +void send_device_status(DEVICE *dev, STATUS_PKT *sp) +{ + POOL_MEM msg(PM_MESSAGE); + int len; + DCR *dcr = NULL; + bool found = false; + char b1[35]; + + + if (chk_dbglvl(5)) { + len = Mmsg(msg, _("Configured device capabilities:\n")); + sendit(msg, len, sp); + len = Mmsg(msg, " %sEOF %sBSR %sBSF %sFSR %sFSF %sEOM %sREM %sRACCESS %sAUTOMOUNT %sLABEL %sANONVOLS %sALWAYSOPEN\n", + dev->capabilities & CAP_EOF ? "" : "!", + dev->capabilities & CAP_BSR ? "" : "!", + dev->capabilities & CAP_BSF ? "" : "!", + dev->capabilities & CAP_FSR ? "" : "!", + dev->capabilities & CAP_FSF ? "" : "!", + dev->capabilities & CAP_EOM ? "" : "!", + dev->capabilities & CAP_REM ? "" : "!", + dev->capabilities & CAP_RACCESS ? "" : "!", + dev->capabilities & CAP_AUTOMOUNT ? "" : "!", + dev->capabilities & CAP_LABEL ? "" : "!", + dev->capabilities & CAP_ANONVOLS ? "" : "!", + dev->capabilities & CAP_ALWAYSOPEN ? "" : "!"); + sendit(msg, len, sp); + } + + len = Mmsg(msg, _("Device state:\n")); + sendit(msg, len, sp); + len = Mmsg(msg, " %sOPENED %sTAPE %sLABEL %sAPPEND %sREAD %sEOT %sWEOT %sEOF %sWORM %sNEXTVOL %sSHORT %sMOUNTED %sMALLOC\n", + dev->is_open() ? "" : "!", + dev->is_tape() ? "" : "!", + dev->is_labeled() ? "" : "!", + dev->can_append() ? "" : "!", + dev->can_read() ? "" : "!", + dev->at_eot() ? "" : "!", + dev->state & ST_WEOT ? "" : "!", + dev->at_eof() ? "" : "!", + dev->is_worm() ? "" : "!", + dev->state & ST_NEXTVOL ? "" : "!", + dev->state & ST_SHORT ? "" : "!", + dev->state & ST_MOUNTED ? "" : "!", + dev->state & ST_MALLOC ? "" : "!"); + sendit(msg, len, sp); + + len = Mmsg(msg, _(" Writers=%d reserves=%d blocked=%d enabled=%d usage=%s\n"), dev->num_writers, + dev->num_reserved(), dev->blocked(), dev->enabled, + edit_uint64_with_commas(dev->usage, b1)); + + sendit(msg, len, sp); + + len = Mmsg(msg, _("Attached JobIds: ")); + sendit(msg, len, sp); + dev->Lock(); + dev->Lock_dcrs(); + foreach_dlist(dcr, dev->attached_dcrs) { + if (dcr->jcr) { + if (found) { + sendit(",", 1, sp); + } + len = Mmsg(msg, "%d", (int)dcr->jcr->JobId); + sendit(msg, len, sp); + found = true; + } + } + dev->Unlock_dcrs(); + dev->Unlock(); + sendit("\n", 1, sp); + + len = Mmsg(msg, _("Device parameters:\n")); + sendit(msg, len, sp); + len = Mmsg(msg, _(" Archive name: %s Device name: %s\n"), dev->archive_name(), + dev->name()); + sendit(msg, len, sp); + len = Mmsg(msg, _(" File=%u block=%u\n"), dev->file, dev->block_num); + sendit(msg, len, sp); + len = Mmsg(msg, _(" Min block=%u Max block=%u\n"), dev->min_block_size, dev->max_block_size); + sendit(msg, len, sp); +} + +static void api_list_running_jobs(STATUS_PKT *sp) +{ + char *p1, *p2, *p3; + int i1, i2, i3; + OutputWriter ow(sp->api_opts); + + uint64_t inst_bps, total_bps; + int inst_sec, total_sec; + JCR *jcr; + DCR *dcr, *rdcr; + time_t now = time(NULL); + + foreach_jcr(jcr) { + if (jcr->getJobType() == JT_SYSTEM) { + continue; + } + ow.get_output(OT_CLEAR, + OT_START_OBJ, + OT_INT32, "jobid", jcr->JobId, + OT_STRING, "job", jcr->Job, + OT_JOBLEVEL,"level", jcr->getJobLevel(), + OT_JOBTYPE, "type", jcr->getJobType(), + OT_JOBSTATUS,"status", jcr->JobStatus, + OT_PINT64, "jobbytes", jcr->JobBytes, + OT_INT32, "jobfiles", jcr->JobFiles, + OT_UTIME, "starttime", jcr->start_time, + OT_INT32, "errors", jcr->JobErrors, + OT_INT32, "newbsr", (int32_t)jcr->use_new_match_all, + OT_END); + + dcr = jcr->dcr; + rdcr = jcr->read_dcr; + + p1 = p2 = p3 = NULL; + if (rdcr && rdcr->device) { + p1 = rdcr->VolumeName; + p2 = rdcr->pool_name; + p3 = rdcr->device->hdr.name; + } + ow.get_output(OT_STRING, "read_volume", NPRTB(p1), + OT_STRING, "read_pool", NPRTB(p2), + OT_STRING, "read_device", NPRTB(p3), + OT_END); + + p1 = p2 = p3 = NULL; + i1 = i2 = i3 = 0; + if (dcr && dcr->device) { + p1 = dcr->VolumeName; + p2 = dcr->pool_name; + p3 = dcr->device->hdr.name; + i1 = dcr->spooling; + i2 = dcr->despooling; + i3 = dcr->despool_wait; + } + + ow.get_output(OT_STRING, "write_volume", NPRTB(p1), + OT_STRING, "write_pool", NPRTB(p2), + OT_STRING, "write_device", NPRTB(p3), + OT_INT, "spooling", i1, + OT_INT, "despooling", i2, + OT_INT, "despool_wait", i3, + OT_END); + + if (jcr->last_time == 0) { + jcr->last_time = jcr->run_time; + } + + total_sec = now - jcr->run_time; + inst_sec = now - jcr->last_time; + + if (total_sec <= 0) { + total_sec = 1; + } + if (inst_sec <= 0) { + inst_sec = 1; + } + + /* Instanteous bps not smoothed */ + inst_bps = (jcr->JobBytes - jcr->LastJobBytes) / inst_sec; + if (jcr->LastRate == 0) { + jcr->LastRate = inst_bps; + } + + /* Smooth the instantaneous bps a bit */ + inst_bps = (2 * jcr->LastRate + inst_bps) / 3; + /* total bps (AveBytes/sec) since start of job */ + total_bps = jcr->JobBytes / total_sec; + + p1 = ow.get_output(OT_PINT64, "avebytes_sec", total_bps, + OT_PINT64, "lastbytes_sec", inst_bps, + OT_END_OBJ, + OT_END); + + sendit(p1, strlen(p1), sp); + + /* Update only every 10 seconds */ + if (now - jcr->last_time > 10) { + jcr->LastRate = inst_bps; + jcr->LastJobBytes = jcr->JobBytes; + jcr->last_time = now; + } + } + endeach_jcr(jcr); + +} + +static void list_running_jobs(STATUS_PKT *sp) +{ + bool found = false; + uint64_t inst_bps, total_bps; + int inst_sec, total_sec; + JCR *jcr; + DCR *dcr, *rdcr; + char JobName[MAX_NAME_LENGTH]; + char b1[50], b2[50], b3[50], b4[50]; + int len; + POOL_MEM msg(PM_MESSAGE); + time_t now = time(NULL); + + if (sp->api > 1) { + api_list_running_jobs(sp); + return; + } + + len = Mmsg(msg, _("\nRunning Jobs:\n")); + if (!sp->api) sendit(msg, len, sp); + + foreach_jcr(jcr) { + if (jcr->JobStatus == JS_WaitFD) { + len = Mmsg(msg, _("%s Job %s waiting for Client connection.\n"), + job_type_to_str(jcr->getJobType()), jcr->Job); + sendit(msg, len, sp); + } + dcr = jcr->dcr; + rdcr = jcr->read_dcr; + if ((dcr && dcr->device) || (rdcr && rdcr->device)) { + bstrncpy(JobName, jcr->Job, sizeof(JobName)); + /* There are three periods after the Job name */ + char *p; + for (int i=0; i<3; i++) { + if ((p=strrchr(JobName, '.')) != NULL) { + *p = 0; + } + } + if (rdcr && rdcr->device) { + len = Mmsg(msg, _("Reading: %s %s job %s JobId=%d Volume=\"%s\"\n" + " pool=\"%s\" device=%s newbsr=%d\n"), + job_level_to_str(jcr->getJobLevel()), + job_type_to_str(jcr->getJobType()), + JobName, + jcr->JobId, + rdcr->VolumeName, + rdcr->pool_name, + rdcr->dev?rdcr->dev->print_name(): + rdcr->device->device_name, + jcr->use_new_match_all + ); + sendit(msg, len, sp); + } else if (dcr && dcr->device) { + len = Mmsg(msg, _("Writing: %s %s job %s JobId=%d Volume=\"%s\"\n" + " pool=\"%s\" device=%s\n"), + job_level_to_str(jcr->getJobLevel()), + job_type_to_str(jcr->getJobType()), + JobName, + jcr->JobId, + dcr->VolumeName, + dcr->pool_name, + dcr->dev?dcr->dev->print_name(): + dcr->device->device_name); + sendit(msg, len, sp); + len= Mmsg(msg, _(" spooling=%d despooling=%d despool_wait=%d\n"), + dcr->spooling, dcr->despooling, dcr->despool_wait); + sendit(msg, len, sp); + } + if (jcr->last_time == 0) { + jcr->last_time = jcr->run_time; + } + total_sec = now - jcr->run_time; + inst_sec = now - jcr->last_time; + if (total_sec <= 0) { + total_sec = 1; + } + if (inst_sec <= 0) { + inst_sec = 1; + } + /* Instanteous bps not smoothed */ + inst_bps = (jcr->JobBytes - jcr->LastJobBytes) / inst_sec; + if (jcr->LastRate == 0) { + jcr->LastRate = inst_bps; + } + /* Smooth the instantaneous bps a bit */ + inst_bps = (2 * jcr->LastRate + inst_bps) / 3; + /* total bps (AveBytes/sec) since start of job */ + total_bps = jcr->JobBytes / total_sec; + len = Mmsg(msg, _(" Files=%s Bytes=%s AveBytes/sec=%s LastBytes/sec=%s\n"), + edit_uint64_with_commas(jcr->JobFiles, b1), + edit_uint64_with_commas(jcr->JobBytes, b2), + edit_uint64_with_commas(total_bps, b3), + edit_uint64_with_commas(inst_bps, b4)); + sendit(msg, len, sp); + /* Update only every 10 seconds */ + if (now - jcr->last_time > 10) { + jcr->LastRate = inst_bps; + jcr->LastJobBytes = jcr->JobBytes; + jcr->last_time = now; + } + found = true; +#ifdef DEBUG + if (jcr->file_bsock) { + len = Mmsg(msg, _(" FDReadSeqNo=%s in_msg=%u out_msg=%d fd=%d\n"), + edit_uint64_with_commas(jcr->file_bsock->read_seqno, b1), + jcr->file_bsock->in_msg_no, jcr->file_bsock->out_msg_no, + jcr->file_bsock->m_fd); + sendit(msg, len, sp); + } else { + len = Mmsg(msg, _(" FDSocket closed\n")); + sendit(msg, len, sp); + } +#endif + } + } + endeach_jcr(jcr); + + if (!found) { + len = Mmsg(msg, _("No Jobs running.\n")); + if (!sp->api) sendit(msg, len, sp); + } + if (!sp->api) sendit("====\n", 5, sp); +} + +static void list_jobs_waiting_on_reservation(STATUS_PKT *sp) +{ + JCR *jcr; + POOL_MEM msg(PM_MESSAGE); + int len; + + len = Mmsg(msg, _("\nJobs waiting to reserve a drive:\n")); + if (!sp->api) sendit(msg, len, sp); + + foreach_jcr(jcr) { + if (!jcr->reserve_msgs) { + continue; + } + send_drive_reserve_messages(jcr, sendit, sp); + } + endeach_jcr(jcr); + + if (!sp->api) sendit("====\n", 5, sp); +} + + +static void sendit(const char *msg, int len, void *sp) +{ + sendit(msg, len, (STATUS_PKT *)sp); +} + +static void sendit(POOL_MEM &msg, int len, STATUS_PKT *sp) +{ + BSOCK *bs = sp->bs; + if (bs) { + bs->msg = check_pool_memory_size(bs->msg, len+1); + memcpy(bs->msg, msg.c_str(), len+1); + bs->msglen = len+1; + bs->send(); + } else { + sp->callback(msg.c_str(), len, sp->context); + } +} + +static void dbg_sendit(const char *msg, int len, void *sp) +{ + if (len > 0) { + Dmsg0(-1, msg); + } +} + +/* + * Status command from Director + */ +bool status_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + STATUS_PKT sp; + + dir->fsend("\n"); + sp.bs = dir; + output_status(&sp); + dir->signal(BNET_EOD); + return true; +} + +/* + * .status command from Director + */ +bool qstatus_cmd(JCR *jcr) +{ + BSOCK *dir = jcr->dir_bsock; + JCR *njcr; + s_last_job* job; + STATUS_PKT sp; + POOLMEM *args = get_pool_memory(PM_MESSAGE); + char *argk[MAX_CMD_ARGS]; /* argument keywords */ + char *argv[MAX_CMD_ARGS]; /* argument values */ + int argc; /* number of arguments */ + bool ret=true; + char *cmd; + char *device=NULL; + int api = true; + + sp.bs = dir; + + parse_args(dir->msg, &args, &argc, argk, argv, MAX_CMD_ARGS); + + /* .status xxxx at the minimum */ + if (argc < 2 || strcmp(argk[0], ".status") != 0) { + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3900 No arg in .status command: %s\n"), jcr->errmsg); + dir->signal(BNET_EOD); + return false; + } + + cmd = argk[1]; + unbash_spaces(cmd); + + /* The status command can contain some arguments + * i=0 => .status + * i=1 => [running | current | last | ... ] + */ + for (int i=0 ; i < argc ; i++) { + if (!strcmp(argk[i], "device") && argv[i]) { + device = argv[i]; + unbash_spaces(device); + + } else if (!strcmp(argk[i], "api") && argv[i]) { + api = atoi(argv[i]); + + } else if (!strcmp(argk[i], "api_opts") && argv[i]) { + strncpy(sp.api_opts, argv[i], sizeof(sp.api_opts));; + } + } + + Dmsg1(100, "cmd=%s\n", cmd); + + if (strcasecmp(cmd, "current") == 0) { + dir->fsend(OKqstatus, cmd); + foreach_jcr(njcr) { + if (njcr->JobId != 0) { + dir->fsend(DotStatusJob, njcr->JobId, njcr->JobStatus, njcr->JobErrors); + } + } + endeach_jcr(njcr); + } else if (strcasecmp(cmd, "last") == 0) { + dir->fsend(OKqstatus, cmd); + if ((last_jobs) && (last_jobs->size() > 0)) { + job = (s_last_job*)last_jobs->last(); + dir->fsend(DotStatusJob, job->JobId, job->JobStatus, job->Errors); + } + } else if (strcasecmp(cmd, "header") == 0) { + sp.api = api; + list_status_header(&sp); + } else if (strcasecmp(cmd, "running") == 0) { + sp.api = api; + list_running_jobs(&sp); + } else if (strcasecmp(cmd, "waitreservation") == 0) { + sp.api = api; + list_jobs_waiting_on_reservation(&sp); + } else if (strcasecmp(cmd, "devices") == 0) { + sp.api = api; + list_devices(&sp, device); + } else if (strcasecmp(cmd, "volumes") == 0) { + sp.api = api; + list_volumes(sendit, &sp); + } else if (strcasecmp(cmd, "spooling") == 0) { + sp.api = api; + list_spool_stats(sendit, &sp); + } else if (strcasecmp(cmd, "terminated") == 0) { + sp.api = api; + list_terminated_jobs(&sp); /* defined in lib/status.h */ + } else if (strcasecmp(cmd, "resources") == 0) { + sp.api = api; + list_resources(&sp); + } else if (strcasecmp(cmd, "cloud") == 0) { + list_cloud_transfers(&sp, true); + } else { + pm_strcpy(jcr->errmsg, dir->msg); + dir->fsend(_("3900 Unknown arg in .status command: %s\n"), jcr->errmsg); + dir->signal(BNET_EOD); + ret = false; + } + dir->signal(BNET_EOD); + free_pool_memory(args); + return ret; +} + +/* List plugins and drivers */ +static void list_plugins(STATUS_PKT *sp) +{ + POOL_MEM msg(PM_MESSAGE); + alist drivers(10, not_owned_by_alist); + int len; + + if (b_plugin_list && b_plugin_list->size() > 0) { + Plugin *plugin; + pm_strcpy(msg, " Plugin: "); + foreach_alist(plugin, b_plugin_list) { + len = pm_strcat(msg, plugin->file); + /* Print plugin version when debug activated */ + if (debug_level > 0 && plugin->pinfo) { + pm_strcat(msg, "("); + pm_strcat(msg, NPRT(sdplug_info(plugin)->plugin_version)); + len = pm_strcat(msg, ")"); + } + if (len > 80) { + pm_strcat(msg, "\n "); + } else { + pm_strcat(msg, " "); + } + } + len = pm_strcat(msg, "\n"); + sendit(msg.c_str(), len, sp); + } + sd_list_loaded_drivers(&drivers); + if (drivers.size() > 0) { + char *drv; + pm_strcpy(msg, " Drivers: "); + foreach_alist(drv, (&drivers)) { + len = pm_strcat(msg, drv); + if (len > 80) { + pm_strcat(msg, "\n "); + } else { + pm_strcat(msg, " "); + } + } + len = pm_strcat(msg, "\n"); + sendit(msg.c_str(), len, sp); + } +} diff --git a/src/stored/stored.c b/src/stored/stored.c new file mode 100644 index 00000000..66430b0b --- /dev/null +++ b/src/stored/stored.c @@ -0,0 +1,808 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Third generation Storage daemon. + * + * Written by Kern Sibbald, MM + * + * It accepts a number of simple commands from the File daemon + * and acts on them. When a request to append data is made, + * it opens a data channel and accepts data from the + * File daemon. + * + */ + +#include "bacula.h" +#include "stored.h" + +/* TODO: fix problem with bls, bextract + * that use findlib and already declare + * filed plugins + */ +#include "sd_plugins.h" + +/* Imported functions and variables */ +extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); + +/* Forward referenced functions */ +void terminate_stored(int sig); +static int check_resources(); +static void cleanup_old_files(); + +extern "C" void *device_initialization(void *arg); + +#define CONFIG_FILE "bacula-sd.conf" /* Default config file */ + +/* Global variables exported */ +char OK_msg[] = "3000 OK\n"; +char TERM_msg[] = "3999 Terminate\n"; +void *start_heap; +static bool test_config = false; + + +static uint32_t VolSessionId = 0; +uint32_t VolSessionTime; +char *configfile = NULL; +bool init_done = false; +static pthread_t server_tid; +static bool server_tid_valid = false; + +/* Global static variables */ +static bool foreground = false; +static bool make_pid_file = true; /* create pid file */ +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +static workq_t dird_workq; /* queue for processing connections */ +static CONFIG *config; + + +static void usage() +{ + fprintf(stderr, _( + PROG_COPYRIGHT + "\n%sVersion: %s (%s)\n\n" + "Usage: bacula-sd [options] [-c config_file] [config_file]\n" + " -c use as configuration file\n" + " -d [,] set debug level to , debug tags to \n" + " -dt print timestamp in debug output\n" + " -T set trace on\n" + " -f run in foreground (for debugging)\n" + " -g set groupid to group\n" + " -m print kaboom output (for debugging)\n" + " -p proceed despite I/O errors\n" + " -P do not create pid file\n" + " -s no signals (for debugging)\n" + " -t test - read config and exit\n" + " -u userid to \n" + " -v verbose user messages\n" + " -? print this message.\n" + "\n"), 2000, "", VERSION, BDATE); + exit(1); +} + +/* + * !!! WARNING !!! Use this function only when bacula is stopped. + * ie, after a fatal signal and before exiting the program + * Print information about a JCR + */ +static void sd_debug_print(JCR *jcr, FILE *fp) +{ + if (jcr->dcr) { + DCR *dcr = jcr->dcr; + fprintf(fp, "\tdcr=%p volumename=%s dev=%p newvol=%d reserved=%d locked=%d\n", + dcr, dcr->VolumeName, dcr->dev, dcr->NewVol, + dcr->is_reserved(), + dcr->is_dev_locked()); + } else { + fprintf(fp, "dcr=*None*\n"); + } +} + +/********************************************************************* + * + * Main Bacula Unix Storage Daemon + * + */ +#if defined(HAVE_WIN32) +#define main BaculaMain +#endif + +int main (int argc, char *argv[]) +{ + int ch; + bool no_signals = false; + pthread_t thid; + char *uid = NULL; + char *gid = NULL; + + start_heap = sbrk(0); + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + init_stack_dump(); + my_name_is(argc, argv, "bacula-sd"); + init_msg(NULL, NULL); + daemon_start_time = time(NULL); + setup_daemon_message_queue(); + + /* Sanity checks */ + if (TAPE_BSIZE % B_DEV_BSIZE != 0 || TAPE_BSIZE / B_DEV_BSIZE == 0) { + Jmsg2(NULL, M_ABORT, 0, _("Tape block size (%d) not multiple of system size (%d)\n"), + TAPE_BSIZE, B_DEV_BSIZE); + } + if (TAPE_BSIZE != (1 << (ffs(TAPE_BSIZE)-1))) { + Jmsg1(NULL, M_ABORT, 0, _("Tape block size (%d) is not a power of 2\n"), TAPE_BSIZE); + } + + while ((ch = getopt(argc, argv, "c:d:fg:mpPstu:v?Ti")) != -1) { + switch (ch) { + case 'c': /* configuration file */ + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(optarg); + break; + + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + char *p; + /* We probably find a tag list -d 10,sql,bvfs */ + if ((p = strchr(optarg, ',')) != NULL) { + *p = 0; + } + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + if (p) { + debug_parse_tags(p+1, &debug_level_tags); + } + } + break; + + case 'T': + set_trace(true); + break; + + case 'f': /* run in foreground */ + foreground = true; + break; + + case 'g': /* set group id */ + gid = optarg; + break; + + /* Temp code to enable new match_bsr() code, not documented */ + case 'i': + use_new_match_all = 1; + + break; + case 'm': /* print kaboom output */ + prt_kaboom = true; + break; + + case 'p': /* proceed in spite of I/O errors */ + forge_on = true; + break; + + case 'P': /* no pid file */ + make_pid_file = false; + break; + + case 's': /* no signals */ + no_signals = true; + break; + + case 't': + test_config = true; + break; + + case 'u': /* set uid */ + uid = optarg; + break; + + case 'v': /* verbose */ + verbose++; + break; + + case '?': + default: + usage(); + break; + } + } + argc -= optind; + argv += optind; + + if (argc) { + if (configfile != NULL) { + free(configfile); + } + configfile = bstrdup(*argv); + argc--; + argv++; + } + if (argc) { + usage(); + } + + if (!foreground && !test_config) { + daemon_start(); /* become daemon */ + init_stack_dump(); /* pick up new pid */ + } + + if (!no_signals) { + init_signals(terminate_stored); + } + + if (configfile == NULL) { + configfile = bstrdup(CONFIG_FILE); + } + + config = New(CONFIG()); + parse_sd_config(config, configfile, M_ERROR_TERM); + + if (init_crypto() != 0) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); + } + + if (!check_resources()) { + Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); + } + + init_reservations_lock(); + + if (test_config) { + terminate_stored(0); + } + + my_name_is(0, (char **)NULL, me->hdr.name); /* Set our real name */ + + if (make_pid_file) { + create_pid_file(me->pid_directory, "bacula-sd", + get_first_port_host_order(me->sdaddrs)); + } + read_state_file(me->working_directory, "bacula-sd", + get_first_port_host_order(me->sdaddrs)); + + set_jcr_in_tsd(INVALID_JCR); + /* Make sure on Solaris we can run concurrent, watch dog + servers + misc */ + set_thread_concurrency(me->max_concurrent_jobs * 2 + 4); + lmgr_init_thread(); /* initialize the lockmanager stack */ + + load_sd_plugins(me->plugin_directory); + + drop(uid, gid, false); + + cleanup_old_files(); + + /* Ensure that Volume Session Time and Id are both + * set and are both non-zero. + */ + VolSessionTime = (uint32_t)daemon_start_time; + if (VolSessionTime == 0) { /* paranoid */ + Jmsg0(NULL, M_ABORT, 0, _("Volume Session Time is ZERO!\n")); + } + + /* + * Start the device allocation thread + */ + create_volume_lists(); /* do before device_init */ + if (pthread_create(&thid, NULL, device_initialization, NULL) != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("Unable to create thread. ERR=%s\n"), be.bstrerror()); + } + + start_watchdog(); /* start watchdog thread */ + init_jcr_subsystem(); /* start JCR watchdogs etc. */ + dbg_jcr_add_hook(sd_debug_print); /* used to director variables */ + + /* Single server used for Director and File daemon */ + server_tid = pthread_self(); + server_tid_valid = true; + bnet_thread_server(me->sdaddrs, me->max_concurrent_jobs * 2 + 1, + &dird_workq, handle_connection_request); + exit(1); /* to keep compiler quiet */ +} + +/* Return a new Session Id */ +uint32_t newVolSessionId() +{ + uint32_t Id; + + P(mutex); + VolSessionId++; + Id = VolSessionId; + V(mutex); + return Id; +} + +/* Check Configuration file for necessary info */ +static int check_resources() +{ + bool OK = true; + bool tls_needed; + AUTOCHANGER *changer; + DEVRES *device; + + me = (STORES *)GetNextRes(R_STORAGE, NULL); + if (!me) { + Jmsg1(NULL, M_ERROR, 0, _("No Storage resource defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + + if (GetNextRes(R_STORAGE, (RES *)me) != NULL) { + Jmsg1(NULL, M_ERROR, 0, _("Only one Storage resource permitted in %s\n"), + configfile); + OK = false; + } + if (GetNextRes(R_DIRECTOR, NULL) == NULL) { + Jmsg1(NULL, M_ERROR, 0, _("No Director resource defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + if (GetNextRes(R_DEVICE, NULL) == NULL){ + Jmsg1(NULL, M_ERROR, 0, _("No Device resource defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + + if (!me->messages) { + me->messages = (MSGS *)GetNextRes(R_MSGS, NULL); + if (!me->messages) { + Jmsg1(NULL, M_ERROR, 0, _("No Messages resource defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + } + + if (!me->working_directory) { + Jmsg1(NULL, M_ERROR, 0, _("No Working Directory defined in %s. Cannot continue.\n"), + configfile); + OK = false; + } + + DIRRES *director; + STORES *store; + foreach_res(store, R_STORAGE) { + /* tls_require implies tls_enable */ + if (store->tls_require) { + if (have_tls) { + store->tls_enable = true; + } else { + Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); + OK = false; + continue; + } + } + + tls_needed = store->tls_enable || store->tls_authenticate; + + if (!store->tls_certfile && tls_needed) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n"), + store->hdr.name, configfile); + OK = false; + } + + if (!store->tls_keyfile && tls_needed) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Storage \"%s\" in %s.\n"), + store->hdr.name, configfile); + OK = false; + } + + if ((!store->tls_ca_certfile && !store->tls_ca_certdir) && tls_needed && store->tls_verify_peer) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Storage \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + store->hdr.name, configfile); + OK = false; + } + + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (tls_needed || store->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + store->tls_ctx = new_tls_context(store->tls_ca_certfile, + store->tls_ca_certdir, store->tls_certfile, + store->tls_keyfile, NULL, NULL, store->tls_dhfile, + store->tls_verify_peer); + + if (!store->tls_ctx) { + Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS context for Storage \"%s\" in %s.\n"), + store->hdr.name, configfile); + OK = false; + } + } + } + + foreach_res(director, R_DIRECTOR) { + /* tls_require implies tls_enable */ + if (director->tls_require) { + director->tls_enable = true; + } + + tls_needed = director->tls_enable || director->tls_authenticate; + + if (!director->tls_certfile && tls_needed) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + + if (!director->tls_keyfile && tls_needed) { + Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + + if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed && director->tls_verify_peer) { + Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate\"" + " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." + " At least one CA certificate store is required" + " when using \"TLS Verify Peer\".\n"), + director->hdr.name, configfile); + OK = false; + } + + /* If everything is well, attempt to initialize our per-resource TLS context */ + if (OK && (tls_needed || director->tls_require)) { + /* Initialize TLS context: + * Args: CA certfile, CA certdir, Certfile, Keyfile, + * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ + director->tls_ctx = new_tls_context(director->tls_ca_certfile, + director->tls_ca_certdir, director->tls_certfile, + director->tls_keyfile, NULL, NULL, director->tls_dhfile, + director->tls_verify_peer); + + if (!director->tls_ctx) { + Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS context for Director \"%s\" in %s.\n"), + director->hdr.name, configfile); + OK = false; + } + } + } + + foreach_res(changer, R_AUTOCHANGER) { + foreach_alist(device, changer->device) { + device->cap_bits |= CAP_AUTOCHANGER; + } + } + + if (OK) { + OK = init_autochangers(); + } + + if (OK) { + close_msg(NULL); /* close temp message handler */ + init_msg(NULL, me->messages); /* open daemon message handler */ + set_working_directory(me->working_directory); + } + + return OK; +} + +/* + * Remove old .spool files written by me from the working directory. + */ +static void cleanup_old_files() +{ + DIR* dp; + int rc, name_max; + int my_name_len = strlen(my_name); + int len = strlen(me->working_directory); + POOLMEM *cleanup = get_pool_memory(PM_MESSAGE); + POOLMEM *basename = get_pool_memory(PM_MESSAGE); + POOL_MEM dname(PM_FNAME); + regex_t preg1; + char prbuf[500]; + const int nmatch = 30; + regmatch_t pmatch[nmatch]; + berrno be; + + /* Look for .spool files but don't allow spaces */ + const char *pat1 = "^[^ ]+\\.spool$"; + + /* Setup working directory prefix */ + pm_strcpy(basename, me->working_directory); + if (len > 0 && !IsPathSeparator(me->working_directory[len-1])) { + pm_strcat(basename, "/"); + } + + /* Compile regex expressions */ + rc = regcomp(&preg1, pat1, REG_EXTENDED); + if (rc != 0) { + regerror(rc, &preg1, prbuf, sizeof(prbuf)); + Pmsg2(000, _("Could not compile regex pattern \"%s\" ERR=%s\n"), + pat1, prbuf); + goto get_out2; + } + + name_max = pathconf(".", _PC_NAME_MAX); + if (name_max < 1024) { + name_max = 1024; + } + + if (!(dp = opendir(me->working_directory))) { + berrno be; + Pmsg2(000, "Failed to open working dir %s for cleanup: ERR=%s\n", + me->working_directory, be.bstrerror()); + goto get_out1; + } + + while (1) { + if (breaddir(dp, dname.addr()) != 0) { + break; + } + /* Exclude any name with ., .., not my_name or containing a space */ + if (strcmp(dname.c_str(), ".") == 0 || strcmp(dname.c_str(), "..") == 0 || + strncmp(dname.c_str(), my_name, my_name_len) != 0) { + Dmsg1(500, "Skipped: %s\n", dname.c_str()); + continue; + } + + /* Unlink files that match regex */ + if (regexec(&preg1, dname.c_str(), nmatch, pmatch, 0) == 0) { + pm_strcpy(cleanup, basename); + pm_strcat(cleanup, dname); + Dmsg1(500, "Unlink: %s\n", cleanup); + unlink(cleanup); + } + } + closedir(dp); + +get_out1: + regfree(&preg1); +get_out2: + free_pool_memory(cleanup); + free_pool_memory(basename); +} + +/* + * Here we attempt to init and open each device. This is done + * once at startup in a separate thread. + */ +extern "C" +void *device_initialization(void *arg) +{ + DEVRES *device; + DCR *dcr; + JCR *jcr; + DEVICE *dev; + struct stat statp; + + pthread_detach(pthread_self()); + jcr = new_jcr(sizeof(JCR), stored_free_jcr); + new_plugins(jcr); /* instantiate plugins */ + jcr->setJobType(JT_SYSTEM); + /* Initialize FD start condition variable */ + int errstat = pthread_cond_init(&jcr->job_start_wait, NULL); + if (errstat != 0) { + berrno be; + Jmsg1(jcr, M_ABORT, 0, _("Unable to init job cond variable: ERR=%s\n"), be.bstrerror(errstat)); + } + + LockRes(); + + foreach_res(device, R_DEVICE) { + Dmsg1(90, "calling init_dev %s\n", device->hdr.name); + dev = init_dev(NULL, device); + Dmsg1(10, "SD init done %s\n", device->hdr.name); + if (!dev) { + Jmsg1(NULL, M_ERROR, 0, _("Could not initialize SD device \"%s\"\n"), device->hdr.name); + continue; + } + + jcr->dcr = dcr = new_dcr(jcr, NULL, dev); + generate_plugin_event(jcr, bsdEventDeviceInit, dcr); + + if (device->control_name && stat(device->control_name, &statp) < 0) { + berrno be; + Jmsg2(jcr, M_ERROR_TERM, 0, _("Unable to stat ControlDevice %s: ERR=%s\n"), + device->control_name, be.bstrerror()); + } + + if ((device->lock_command && device->control_name) && + !me->plugin_directory) { + Jmsg0(jcr, M_ERROR_TERM, 0, _("No plugin directory configured for SAN shared storage\n")); + } + + if (device->min_block_size > device->max_block_size) { + Jmsg1(jcr, M_ERROR_TERM, 0, _("MaximumBlockSize must be greater or equal than MinimumBlockSize for Device \"%s\"\n"), + dev->print_name()); + } + + if (device->min_block_size > device->max_block_size) { + Jmsg1(jcr, M_ERROR_TERM, 0, _("MaximumBlockSize must be greater or equal than MinimumBlockSize for Device \"%s\"\n"), + dev->print_name()); + } + + /* + * Note: be careful setting the slot here. If the drive + * is shared storage, the contents can change before + * the drive is used. + */ + if (device->cap_bits & CAP_ALWAYSOPEN) { + if (dev->is_autochanger()) { + /* If autochanger set slot in dev sturcture */ + get_autochanger_loaded_slot(dcr); + } + Dmsg1(20, "calling first_open_device %s\n", dev->print_name()); + if (generate_plugin_event(jcr, bsdEventDeviceOpen, dcr) != bRC_OK) { + Jmsg(jcr, M_FATAL, 0, _("generate_plugin_event(bsdEventDeviceOpen) Failed\n")); + continue; + } + + if (!first_open_device(dcr)) { + Jmsg1(NULL, M_ERROR, 0, _("Could not open device %s\n"), dev->print_name()); + Dmsg1(20, "Could not open device %s\n", dev->print_name()); + generate_plugin_event(jcr, bsdEventDeviceClose, dcr); + free_dcr(dcr); + jcr->dcr = NULL; + continue; + } + } else { + /* If not always open, we don't know what is in the drive */ + dev->clear_slot(); + } + if (device->cap_bits & CAP_AUTOMOUNT && dev->is_open()) { + switch (dev->read_dev_volume_label(dcr)) { + case VOL_OK: + memcpy(&dev->VolCatInfo, &dcr->VolCatInfo, sizeof(dev->VolCatInfo)); + volume_unused(dcr); /* mark volume "released" */ + break; + default: + Jmsg1(NULL, M_WARNING, 0, _("Could not mount device %s\n"), dev->print_name()); + break; + } + } + + free_dcr(dcr); + jcr->dcr = NULL; + } + + UnlockRes(); + +#ifdef xxx + if (jcr->dcr) { + Pmsg1(000, "free_dcr=%p\n", jcr->dcr); + free_dcr(jcr->dcr); + jcr->dcr = NULL; + } +#endif + free_plugins(jcr); + free_jcr(jcr); + init_done = true; + return NULL; +} + + +/* Clean up and then exit */ +void terminate_stored(int sig) +{ + static bool in_here = false; + DEVRES *device; + JCR *jcr; + + if (in_here) { /* prevent loops */ + bmicrosleep(2, 0); /* yield */ + exit(1); + } + in_here = true; + debug_level = 0; /* turn off any debug */ + stop_watchdog(); + + if (sig == SIGTERM || sig == SIGINT) { /* normal shutdown request? or ^C */ + /* + * This is a normal shutdown request. We wiffle through + * all open jobs canceling them and trying to wake + * them up so that they will report back the correct + * volume status. + */ + foreach_jcr(jcr) { + BSOCK *fd; + if (jcr->JobId == 0) { + free_jcr(jcr); + continue; /* ignore console */ + } + if (jcr->dcr) { + /* Make sure no device remains locked */ + generate_plugin_event(jcr, bsdEventDeviceClose, jcr->dcr); + } + jcr->setJobStatus(JS_Canceled); + fd = jcr->file_bsock; + if (fd) { + fd->set_timed_out(); + jcr->my_thread_send_signal(TIMEOUT_SIGNAL); + Dmsg1(100, "term_stored killing JobId=%d\n", jcr->JobId); + /* ***FIXME*** wiffle through all dcrs */ + if (jcr->dcr && jcr->dcr->dev && jcr->dcr->dev->blocked()) { + pthread_cond_broadcast(&jcr->dcr->dev->wait_next_vol); + Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)jcr->JobId); + pthread_cond_broadcast(&wait_device_release); + } + if (jcr->read_dcr && jcr->read_dcr->dev && jcr->read_dcr->dev->blocked()) { + pthread_cond_broadcast(&jcr->read_dcr->dev->wait_next_vol); + pthread_cond_broadcast(&wait_device_release); + } + bmicrosleep(0, 50000); + } + free_jcr(jcr); + } + bmicrosleep(0, 500000); /* give them 1/2 sec to clean up */ + } + + if (!test_config) { + write_state_file(me->working_directory, + "bacula-sd", get_first_port_host_order(me->sdaddrs)); + if (make_pid_file) { + delete_pid_file(me->pid_directory, + "bacula-sd", get_first_port_host_order(me->sdaddrs)); + } + } + + Dmsg1(200, "In terminate_stored() sig=%d\n", sig); + + unload_plugins(); + free_volume_lists(); + + free_daemon_message_queue(); + + foreach_res(device, R_DEVICE) { + Dmsg2(10, "Term device %s %s\n", device->hdr.name, device->device_name); + if (device->dev) { + device->dev->clear_volhdr(); + device->dev->term(NULL); + device->dev = NULL; + } else { + Dmsg2(10, "No dev structure %s %s\n", device->hdr.name, device->device_name); + } + } + if (server_tid_valid) { + server_tid_valid = false; + bnet_stop_thread_server(server_tid); + } + + if (configfile) { + free(configfile); + configfile = NULL; + } + if (config) { + delete config; + config = NULL; + } + + if (chk_dbglvl(10)) { + print_memory_pool_stats(); + } + term_msg(); + cleanup_crypto(); + term_reservations_lock(); + free(res_head); + res_head = NULL; + close_memory_pool(); + lmgr_cleanup_main(); + + sm_dump(false); /* dump orphaned buffers */ + exit(sig); +} diff --git a/src/stored/stored.conf.in b/src/stored/stored.conf.in new file mode 100644 index 00000000..062e2068 --- /dev/null +++ b/src/stored/stored.conf.in @@ -0,0 +1,59 @@ +# +# Kern's Bacula Storage Daemon Configuration file +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ @DISTVER@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +# "Global" Storage daemon configuration specifications +# +Storage { + Name = "Storage daemon" + Address = @hostname@ + SDPort = @sd_port@ # Directors port + WorkingDirectory = "@working_dir@" + Pid Directory = "@piddir@" + Subsys Directory = "@subsysdir@" +} + +# +# List Directors who are permitted to contact Storage daemon +# +Director { + Name = @hostname@-dir + Password = local_storage_password +} + +# +# Devices supported by this Storage daemon +# To connect, the Director must have the same Name and MediaType, +# which are sent to the File daemon +# +Device { + Name = "HP DLT 80" + Media Type = DLT8000 + Archive Device = /dev/nst0 +# LabelMedia = yes; # lets Bacula label unlabelled media + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; +} + +#Device { +# Name = "Exabyte 8mm" +# Media Type = "8mm" +# Archive Device = /dev/nst1 +# Hardware end of medium = No; +## LabelMedia = yes; # lets Bacula label unlabelled media +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = Yes; +# RemovableMedia = yes; +#} + +Messages { + Name = Standard + director = @hostname@-dir = all + operator = @dump_email@ = mount +} diff --git a/src/stored/stored.h b/src/stored/stored.h new file mode 100644 index 00000000..e9cf8185 --- /dev/null +++ b/src/stored/stored.h @@ -0,0 +1,103 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Storage daemon specific defines and includes + * + */ + +#ifndef __STORED_H_ +#define __STORED_H_ + +#define STORAGE_DAEMON 1 + +/* Set to debug Lock() and Unlock() only */ +#define DEV_DEBUG_LOCK + +/* + * Set to define all SD locks except Lock() + * currently this does not work. More locks + * must be converted. + */ +#define SD_DEBUG_LOCK +#ifdef SD_DEBUG_LOCK +const int sd_dbglvl = 300; +#else +const int sd_dbglvl = 300; +#endif + +#undef SD_DEDUP_SUPPORT + +#ifdef HAVE_MTIO_H +#include +#else +# ifdef HAVE_SYS_MTIO_H +# include +# else +# ifdef HAVE_SYS_TAPE_H +# include +# else + /* Needed for Mac 10.6 (Snow Leopard) */ +# include "lib/bmtio.h" +# endif +# endif +#endif +#include "lock.h" +#include "block.h" +#include "record.h" +#include "dev.h" +#include "stored_conf.h" +#include "bsr.h" +#include "jcr.h" +#include "vol_mgr.h" +#include "reserve.h" +#include "protos.h" +#ifdef HAVE_LIBZ +#include /* compression headers */ +#else +#define uLongf uint32_t +#endif +#ifdef HAVE_FNMATCH +#include +#else +#include "lib/fnmatch.h" +#endif +#ifdef HAVE_DIRENT_H +#include +#endif + +#include "file_dev.h" +#include "tape_dev.h" +#include "fifo_dev.h" +#include "null_dev.h" +#include "vtape_dev.h" +#include "cloud_dev.h" +#include "aligned_dev.h" +#include "win_file_dev.h" +#include "win_tape_dev.h" +#include "sd_plugins.h" + +int breaddir(DIR *dirp, POOLMEM *&d_name); + +/* Daemon globals from stored.c */ +extern STORES *me; /* "Global" daemon resource */ +extern bool forge_on; /* proceed inspite of I/O errors */ +extern pthread_mutex_t device_release_mutex; +extern pthread_cond_t wait_device_release; /* wait for any device to be released */ + +#endif /* __STORED_H_ */ diff --git a/src/stored/stored_conf.c b/src/stored/stored_conf.c new file mode 100644 index 00000000..1752d3dd --- /dev/null +++ b/src/stored/stored_conf.c @@ -0,0 +1,955 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Configuration file parser for Bacula Storage daemon + * + * Kern Sibbald, March MM + */ + +#include "bacula.h" +#include "stored.h" +#include "cloud_driver.h" + +/* First and last resource ids */ +int32_t r_first = R_FIRST; +int32_t r_last = R_LAST; +RES_HEAD **res_head; + +/* We build the current resource here statically, + * then move it to dynamic memory */ +#if defined(_MSC_VER) +extern "C" { // work around visual compiler mangling variables + URES res_all; +} +#else +URES res_all; +#endif +int32_t res_all_size = sizeof(res_all); + +/* Definition of records permitted within each + * resource with the routine to process the record + * information. + */ + +/* + * Globals for the Storage daemon. + * name handler value code flags default_value + */ +static RES_ITEM store_items[] = { + {"Name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, + {"SdAddress", store_addresses_address, ITEM(res_store.sdaddrs), 0, ITEM_DEFAULT, 9103}, + {"SdAddresses", store_addresses, ITEM(res_store.sdaddrs), 0, ITEM_DEFAULT, 9103}, + {"Messages", store_res, ITEM(res_store.messages), R_MSGS, 0, 0}, + {"SdPort", store_addresses_port, ITEM(res_store.sdaddrs), 0, ITEM_DEFAULT, 9103}, + {"WorkingDirectory", store_dir, ITEM(res_store.working_directory), 0, ITEM_REQUIRED, 0}, + {"PidDirectory", store_dir, ITEM(res_store.pid_directory), 0, ITEM_REQUIRED, 0}, + {"SubsysDirectory", store_dir, ITEM(res_store.subsys_directory), 0, 0, 0}, + {"PluginDirectory", store_dir, ITEM(res_store.plugin_directory), 0, 0, 0}, + {"ScriptsDirectory", store_dir, ITEM(res_store.scripts_directory), 0, 0, 0}, + {"MaximumConcurrentJobs", store_pint32, ITEM(res_store.max_concurrent_jobs), 0, ITEM_DEFAULT, 20}, + {"ClientConnectTimeout", store_time, ITEM(res_store.ClientConnectTimeout), 0, ITEM_DEFAULT, 60 * 30}, + {"HeartbeatInterval", store_time, ITEM(res_store.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, + {"TlsAuthenticate", store_bool, ITEM(res_store.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_store.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_store.tls_require), 0, 0, 0}, + {"TlsVerifyPeer", store_bool, ITEM(res_store.tls_verify_peer), 1, ITEM_DEFAULT, 1}, + {"TlsCaCertificateFile", store_dir, ITEM(res_store.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_store.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_store.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_store.tls_keyfile), 0, 0, 0}, + {"TlsDhFile", store_dir, ITEM(res_store.tls_dhfile), 0, 0, 0}, + {"TlsAllowedCn", store_alist_str, ITEM(res_store.tls_allowed_cns), 0, 0, 0}, + {"ClientConnectWait", store_time, ITEM(res_store.client_wait), 0, ITEM_DEFAULT, 30 * 60}, + {"VerId", store_str, ITEM(res_store.verid), 0, 0, 0}, + {"CommCompression", store_bool, ITEM(res_store.comm_compression), 0, ITEM_DEFAULT, true}, + {NULL, NULL, {0}, 0, 0, 0} +}; + + +/* Directors that can speak to the Storage daemon */ +static RES_ITEM dir_items[] = { + {"Name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, + {"Password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0}, + {"Monitor", store_bool, ITEM(res_dir.monitor), 0, 0, 0}, + {"TlsAuthenticate", store_bool, ITEM(res_dir.tls_authenticate), 0, 0, 0}, + {"TlsEnable", store_bool, ITEM(res_dir.tls_enable), 0, 0, 0}, + {"TlsRequire", store_bool, ITEM(res_dir.tls_require), 0, 0, 0}, + {"TlsVerifyPeer", store_bool, ITEM(res_dir.tls_verify_peer), 1, ITEM_DEFAULT, 1}, + {"TlsCaCertificateFile", store_dir, ITEM(res_dir.tls_ca_certfile), 0, 0, 0}, + {"TlsCaCertificateDir", store_dir, ITEM(res_dir.tls_ca_certdir), 0, 0, 0}, + {"TlsCertificate", store_dir, ITEM(res_dir.tls_certfile), 0, 0, 0}, + {"TlsKey", store_dir, ITEM(res_dir.tls_keyfile), 0, 0, 0}, + {"TlsDhFile", store_dir, ITEM(res_dir.tls_dhfile), 0, 0, 0}, + {"TlsAllowedCn", store_alist_str, ITEM(res_dir.tls_allowed_cns), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Device definition */ +static RES_ITEM dev_items[] = { + {"Name", store_name, ITEM(res_dev.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, + {"MediaType", store_strname,ITEM(res_dev.media_type), 0, ITEM_REQUIRED, 0}, + {"DeviceType", store_devtype,ITEM(res_dev.dev_type), 0, 0, 0}, + {"ArchiveDevice", store_strname,ITEM(res_dev.device_name), 0, ITEM_REQUIRED, 0}, + {"AlignedDevice", store_strname,ITEM(res_dev.adevice_name), 0, 0, 0}, + {"HardwareEndOfFile", store_bit, ITEM(res_dev.cap_bits), CAP_EOF, ITEM_DEFAULT, 1}, + {"HardwareEndOfMedium", store_bit, ITEM(res_dev.cap_bits), CAP_EOM, ITEM_DEFAULT, 1}, + {"BackwardSpaceRecord", store_bit, ITEM(res_dev.cap_bits), CAP_BSR, ITEM_DEFAULT, 1}, + {"BackwardSpaceFile", store_bit, ITEM(res_dev.cap_bits), CAP_BSF, ITEM_DEFAULT, 1}, + {"BsfAtEom", store_bit, ITEM(res_dev.cap_bits), CAP_BSFATEOM, ITEM_DEFAULT, 0}, + {"TwoEof", store_bit, ITEM(res_dev.cap_bits), CAP_TWOEOF, ITEM_DEFAULT, 0}, + {"ForwardSpaceRecord", store_bit, ITEM(res_dev.cap_bits), CAP_FSR, ITEM_DEFAULT, 1}, + {"ForwardSpaceFile", store_bit, ITEM(res_dev.cap_bits), CAP_FSF, ITEM_DEFAULT, 1}, + {"FastForwardSpaceFile", store_bit, ITEM(res_dev.cap_bits), CAP_FASTFSF, ITEM_DEFAULT, 1}, + {"RemovableMedia", store_bit, ITEM(res_dev.cap_bits), CAP_REM, ITEM_DEFAULT, 1}, + {"RandomAccess", store_bit, ITEM(res_dev.cap_bits), CAP_RACCESS, 0, 0}, + {"AutomaticMount", store_bit, ITEM(res_dev.cap_bits), CAP_AUTOMOUNT, ITEM_DEFAULT, 0}, + {"LabelMedia", store_bit, ITEM(res_dev.cap_bits), CAP_LABEL, ITEM_DEFAULT, 0}, + {"AlwaysOpen", store_bit, ITEM(res_dev.cap_bits), CAP_ALWAYSOPEN, ITEM_DEFAULT, 1}, + {"Autochanger", store_bit, ITEM(res_dev.cap_bits), CAP_AUTOCHANGER, ITEM_DEFAULT, 0}, + {"CloseOnPoll", store_bit, ITEM(res_dev.cap_bits), CAP_CLOSEONPOLL, ITEM_DEFAULT, 0}, + {"BlockPositioning", store_bit, ITEM(res_dev.cap_bits), CAP_POSITIONBLOCKS, ITEM_DEFAULT, 1}, + {"UseMtiocGet", store_bit, ITEM(res_dev.cap_bits), CAP_MTIOCGET, ITEM_DEFAULT, 1}, + {"CheckLabels", store_bit, ITEM(res_dev.cap_bits), CAP_CHECKLABELS, ITEM_DEFAULT, 0}, + {"RequiresMount", store_bit, ITEM(res_dev.cap_bits), CAP_REQMOUNT, ITEM_DEFAULT, 0}, + {"OfflineOnUnmount", store_bit, ITEM(res_dev.cap_bits), CAP_OFFLINEUNMOUNT, ITEM_DEFAULT, 0}, + {"BlockChecksum", store_bit, ITEM(res_dev.cap_bits), CAP_BLOCKCHECKSUM, ITEM_DEFAULT, 1}, + {"Enabled", store_bool, ITEM(res_dev.enabled), 0, ITEM_DEFAULT, 1}, + {"AutoSelect", store_bool, ITEM(res_dev.autoselect), 0, ITEM_DEFAULT, 1}, + {"ReadOnly", store_bool, ITEM(res_dev.read_only), 0, ITEM_DEFAULT, 0}, + {"ChangerDevice", store_strname,ITEM(res_dev.changer_name), 0, 0, 0}, + {"ControlDevice", store_strname,ITEM(res_dev.control_name), 0, 0, 0}, + {"ChangerCommand", store_strname,ITEM(res_dev.changer_command), 0, 0, 0}, + {"AlertCommand", store_strname,ITEM(res_dev.alert_command), 0, 0, 0}, + {"LockCommand", store_strname,ITEM(res_dev.lock_command), 0, 0, 0}, + {"WormCommand", store_strname,ITEM(res_dev.worm_command), 0, 0, 0}, + {"MaximumChangerWait", store_time, ITEM(res_dev.max_changer_wait), 0, ITEM_DEFAULT, 5 * 60}, + {"MaximumOpenWait", store_time, ITEM(res_dev.max_open_wait), 0, ITEM_DEFAULT, 5 * 60}, + {"MaximumNetworkBufferSize", store_pint32, ITEM(res_dev.max_network_buffer_size), 0, 0, 0}, + {"VolumePollInterval", store_time, ITEM(res_dev.vol_poll_interval), 0, ITEM_DEFAULT, 5 * 60}, + {"MaximumRewindWait", store_time, ITEM(res_dev.max_rewind_wait), 0, ITEM_DEFAULT, 5 * 60}, + {"MinimumBlockSize", store_size32, ITEM(res_dev.min_block_size), 0, 0, 0}, + {"MaximumBlockSize", store_maxblocksize, ITEM(res_dev.max_block_size), 0, 0, 0}, + {"PaddingSize", store_size32, ITEM(res_dev.padding_size), 0, ITEM_DEFAULT, 4096}, + {"FileAlignment", store_size32, ITEM(res_dev.file_alignment), 0, ITEM_DEFAULT, 4096}, + {"MinimumAlignedSize", store_size32, ITEM(res_dev.min_aligned_size), 0, ITEM_DEFAULT, 4096}, + {"MaximumVolumeSize", store_size64, ITEM(res_dev.max_volume_size), 0, 0, 0}, + {"MaximumFileSize", store_size64, ITEM(res_dev.max_file_size), 0, ITEM_DEFAULT, 1000000000}, + {"VolumeCapacity", store_size64, ITEM(res_dev.volume_capacity), 0, 0, 0}, + {"MinimumFeeSpace", store_size64, ITEM(res_dev.min_free_space), 0, ITEM_DEFAULT, 5000000}, + {"MaximumConcurrentJobs", store_pint32, ITEM(res_dev.max_concurrent_jobs), 0, 0, 0}, + {"SpoolDirectory", store_dir, ITEM(res_dev.spool_directory), 0, 0, 0}, + {"MaximumSpoolSize", store_size64, ITEM(res_dev.max_spool_size), 0, 0, 0}, + {"MaximumJobSpoolSize", store_size64, ITEM(res_dev.max_job_spool_size), 0, 0, 0}, + {"DriveIndex", store_pint32, ITEM(res_dev.drive_index), 0, 0, 0}, + {"MaximumPartSize", store_size64, ITEM(res_dev.max_part_size), 0, ITEM_DEFAULT, 0}, + {"MountPoint", store_strname,ITEM(res_dev.mount_point), 0, 0, 0}, + {"MountCommand", store_strname,ITEM(res_dev.mount_command), 0, 0, 0}, + {"UnmountCommand", store_strname,ITEM(res_dev.unmount_command), 0, 0, 0}, + {"WritePartCommand", store_strname,ITEM(res_dev.write_part_command), 0, 0, 0}, + {"FreeSpaceCommand", store_strname,ITEM(res_dev.free_space_command), 0, 0, 0}, + {"LabelType", store_label, ITEM(res_dev.label_type), 0, 0, 0}, + {"Cloud", store_res, ITEM(res_dev.cloud), R_CLOUD, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Autochanger definition */ +static RES_ITEM changer_items[] = { + {"Name", store_name, ITEM(res_changer.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_changer.hdr.desc), 0, 0, 0}, + {"Device", store_alist_res, ITEM(res_changer.device), R_DEVICE, ITEM_REQUIRED, 0}, + {"ChangerDevice", store_strname, ITEM(res_changer.changer_name), 0, ITEM_REQUIRED, 0}, + {"ChangerCommand", store_strname, ITEM(res_changer.changer_command), 0, ITEM_REQUIRED, 0}, + {"LockCommand", store_strname,ITEM(res_changer.lock_command), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + +/* Cloud driver definition */ +static RES_ITEM cloud_items[] = { + {"Name", store_name, ITEM(res_cloud.hdr.name), 0, ITEM_REQUIRED, 0}, + {"Description", store_str, ITEM(res_cloud.hdr.desc), 0, 0, 0}, + {"Driver", store_cloud_driver, ITEM(res_cloud.driver_type), 0, ITEM_REQUIRED, 0}, + {"HostName", store_strname,ITEM(res_cloud.host_name), 0, ITEM_REQUIRED, 0}, + {"BucketName", store_strname,ITEM(res_cloud.bucket_name), 0, ITEM_REQUIRED, 0}, + {"Region", store_strname,ITEM(res_cloud.region), 0, 0, 0}, + {"AccessKey", store_strname,ITEM(res_cloud.access_key), 0, ITEM_REQUIRED, 0}, + {"SecretKey", store_strname,ITEM(res_cloud.secret_key), 0, ITEM_REQUIRED, 0}, + {"Protocol", store_protocol, ITEM(res_cloud.protocol), 0, ITEM_DEFAULT, 0}, /* HTTPS */ + {"UriStyle", store_uri_style, ITEM(res_cloud.uri_style), 0, ITEM_DEFAULT, 0}, /* VirtualHost */ + {"TruncateCache", store_truncate, ITEM(res_cloud.trunc_opt), 0, ITEM_DEFAULT, TRUNC_NO}, + {"Upload", store_upload, ITEM(res_cloud.upload_opt), 0, ITEM_DEFAULT, UPLOAD_NO}, + {"MaximumConcurrentUploads", store_pint32, ITEM(res_cloud.max_concurrent_uploads), 0, ITEM_DEFAULT, 0}, + {"MaximumConcurrentDownloads", store_pint32, ITEM(res_cloud.max_concurrent_downloads), 0, ITEM_DEFAULT, 0}, + {"MaximumUploadBandwidth", store_speed, ITEM(res_cloud.upload_limit), 0, 0, 0}, + {"MaximumDownloadBandwidth", store_speed, ITEM(res_cloud.download_limit), 0, 0, 0}, + {NULL, NULL, {0}, 0, 0, 0} +}; + + +/* Message resource */ +extern RES_ITEM msgs_items[]; + + +/* This is the master resource definition */ +RES_TABLE resources[] = { + {"Director", dir_items, R_DIRECTOR}, + {"Storage", store_items, R_STORAGE}, + {"Device", dev_items, R_DEVICE}, + {"Messages", msgs_items, R_MSGS}, + {"Autochanger", changer_items, R_AUTOCHANGER}, + {"Cloud", cloud_items, R_CLOUD}, + {NULL, NULL, 0} +}; + +/* + * Device types + * + * device type device code = token + */ +s_kw dev_types[] = { + {"File", B_FILE_DEV}, + {"Tape", B_TAPE_DEV}, + {"Fifo", B_FIFO_DEV}, + {"VTape", B_VTAPE_DEV}, + {"Vtl", B_VTL_DEV}, + {"Aligned", B_ALIGNED_DEV}, + {"Null", B_NULL_DEV}, + {"Cloud", B_CLOUD_DEV}, + {NULL, 0} +}; + + +/* + * Store Device Type (File, FIFO, Tape, Cloud, ...) + * + */ +void store_devtype(LEX *lc, RES_ITEM *item, int index, int pass) +{ + bool found = false; + + lex_get_token(lc, T_NAME); + /* Store the label pass 2 so that type is defined */ + for (int i=0; dev_types[i].name; i++) { + if (strcasecmp(lc->str, dev_types[i].name) == 0) { + *(uint32_t *)(item->value) = dev_types[i].token; + found = true; + break; + } + } + if (!found) { + scan_err1(lc, _("Expected a Device Type keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Cloud drivers + * + * driver driver code + */ +s_kw cloud_drivers[] = { + {"S3", C_S3_DRIVER}, + {"File", C_FILE_DRIVER}, + {NULL, 0} +}; + +/* + * Store Device Type (File, FIFO, Tape, Cloud, ...) + * + */ +void store_cloud_driver(LEX *lc, RES_ITEM *item, int index, int pass) +{ + bool found = false; + + lex_get_token(lc, T_NAME); + /* Store the label pass 2 so that type is defined */ + for (int i=0; cloud_drivers[i].name; i++) { + if (strcasecmp(lc->str, cloud_drivers[i].name) == 0) { + *(uint32_t *)(item->value) = cloud_drivers[i].token; + found = true; + break; + } + } + if (!found) { + scan_err1(lc, _("Expected a Cloud driver keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Cloud Truncate cache options + * + * Option option code = token + */ +s_kw trunc_opts[] = { + {"No", TRUNC_NO}, + {"AfterUpload", TRUNC_AFTER_UPLOAD}, + {"AtEndOfJob", TRUNC_AT_ENDOFJOB}, + {NULL, 0} +}; + +/* + * Store Cloud Truncate cache option (AfterUpload, AtEndOfJob, No) + * + */ +void store_truncate(LEX *lc, RES_ITEM *item, int index, int pass) +{ + bool found = false; + + lex_get_token(lc, T_NAME); + /* Store the label pass 2 so that type is defined */ + for (int i=0; trunc_opts[i].name; i++) { + if (strcasecmp(lc->str, trunc_opts[i].name) == 0) { + *(uint32_t *)(item->value) = trunc_opts[i].token; + found = true; + break; + } + } + if (!found) { + scan_err1(lc, _("Expected a Truncate Cache option keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Cloud Upload options + * + * Option option code = token + */ +s_kw upload_opts[] = { + {"No", UPLOAD_NO}, + {"EachPart", UPLOAD_EACHPART}, + {"AtEndOfJob", UPLOAD_AT_ENDOFJOB}, + {NULL, 0} +}; + +/* + * Store Cloud Upload option (EachPart, AtEndOfJob, No) + * + */ +void store_upload(LEX *lc, RES_ITEM *item, int index, int pass) +{ + bool found = false; + + lex_get_token(lc, T_NAME); + /* Store the label pass 2 so that type is defined */ + for (int i=0; upload_opts[i].name; i++) { + if (strcasecmp(lc->str, upload_opts[i].name) == 0) { + *(uint32_t *)(item->value) = upload_opts[i].token; + found = true; + break; + } + } + if (!found) { + scan_err1(lc, _("Expected a Cloud Upload option keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Cloud connection protocol options + * + * Option option code = token + */ +s_kw proto_opts[] = { + {"HTTPS", 0}, + {"HTTP", 1}, + {NULL, 0} +}; + +/* + * Store Cloud connect protocol option (HTTPS, HTTP) + * + */ +void store_protocol(LEX *lc, RES_ITEM *item, int index, int pass) +{ + bool found = false; + + lex_get_token(lc, T_NAME); + /* Store the label pass 2 so that type is defined */ + for (int i=0; proto_opts[i].name; i++) { + if (strcasecmp(lc->str, proto_opts[i].name) == 0) { + *(uint32_t *)(item->value) = proto_opts[i].token; + found = true; + break; + } + } + if (!found) { + scan_err1(lc, _("Expected a Cloud communications protocol option keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + +/* + * Cloud Uri Style options + * + * Option option code = token + */ +s_kw uri_opts[] = { + {"VirtualHost", 0}, + {"Path", 1}, + {NULL, 0} +}; + +/* + * Store Cloud Uri Style option + * + */ +void store_uri_style(LEX *lc, RES_ITEM *item, int index, int pass) +{ + bool found = false; + + lex_get_token(lc, T_NAME); + /* Store the label pass 2 so that type is defined */ + for (int i=0; uri_opts[i].name; i++) { + if (strcasecmp(lc->str, uri_opts[i].name) == 0) { + *(uint32_t *)(item->value) = uri_opts[i].token; + found = true; + break; + } + } + if (!found) { + scan_err1(lc, _("Expected a Cloud Uri Style option keyword, got: %s"), lc->str); + } + scan_to_eol(lc); + set_bit(index, res_all.hdr.item_present); +} + + +/* + * Store Maximum Block Size, and check it is not greater than MAX_BLOCK_SIZE + * + */ +void store_maxblocksize(LEX *lc, RES_ITEM *item, int index, int pass) +{ + store_size32(lc, item, index, pass); + if (*(uint32_t *)(item->value) > MAX_BLOCK_SIZE) { + scan_err2(lc, _("Maximum Block Size configured value %u is greater than allowed maximum: %u"), + *(uint32_t *)(item->value), MAX_BLOCK_SIZE ); + } +} + +/* Dump contents of resource */ +void dump_resource(int type, RES *rres, void sendit(void *sock, const char *fmt, ...), void *sock) +{ + URES *res = (URES *)rres; + char buf[1000]; + int recurse = 1; + IPADDR *p; + if (res == NULL) { + sendit(sock, _("Warning: no \"%s\" resource (%d) defined.\n"), res_to_str(type), type); + return; + } + sendit(sock, _("dump_resource type=%d\n"), type); + if (type < 0) { /* no recursion */ + type = - type; + recurse = 0; + } + switch (type) { + case R_DIRECTOR: + sendit(sock, "Director: name=%s\n", res->res_dir.hdr.name); + break; + case R_STORAGE: + sendit(sock, "Storage: name=%s SDaddr=%s SDport=%d SDDport=%d HB=%s\n", + res->res_store.hdr.name, + NPRT(get_first_address(res->res_store.sdaddrs, buf, sizeof(buf))), + get_first_port_host_order(res->res_store.sdaddrs), + get_first_port_host_order(res->res_store.sddaddrs), + edit_utime(res->res_store.heartbeat_interval, buf, sizeof(buf))); + if (res->res_store.sdaddrs) { + foreach_dlist(p, res->res_store.sdaddrs) { + sendit(sock, " SDaddr=%s SDport=%d\n", + p->get_address(buf, sizeof(buf)), p->get_port_host_order()); + } + } + if (res->res_store.sddaddrs) { + foreach_dlist(p, res->res_store.sddaddrs) { + sendit(sock, " SDDaddr=%s SDDport=%d\n", + p->get_address(buf, sizeof(buf)), p->get_port_host_order()); + } + } + break; + case R_DEVICE: + sendit(sock, "Device: name=%s MediaType=%s Device=%s LabelType=%d\n", + res->res_dev.hdr.name, + res->res_dev.media_type, res->res_dev.device_name, + res->res_dev.label_type); + sendit(sock, " rew_wait=%lld min_bs=%d max_bs=%d chgr_wait=%lld\n", + res->res_dev.max_rewind_wait, res->res_dev.min_block_size, + res->res_dev.max_block_size, res->res_dev.max_changer_wait); + sendit(sock, " max_jobs=%d max_files=%lld max_size=%lld\n", + res->res_dev.max_volume_jobs, res->res_dev.max_volume_files, + res->res_dev.max_volume_size); + sendit(sock, " min_block_size=%lld max_block_size=%lld\n", + res->res_dev.min_block_size, res->res_dev.max_block_size); + sendit(sock, " max_file_size=%lld capacity=%lld\n", + res->res_dev.max_file_size, res->res_dev.volume_capacity); + sendit(sock, " spool_directory=%s\n", NPRT(res->res_dev.spool_directory)); + sendit(sock, " max_spool_size=%lld max_job_spool_size=%lld\n", + res->res_dev.max_spool_size, res->res_dev.max_job_spool_size); + if (res->res_dev.worm_command) { + sendit(sock, " worm command=%s\n", res->res_dev.worm_command); + } + if (res->res_dev.changer_res) { + sendit(sock, " changer=%p\n", res->res_dev.changer_res); + } + bstrncpy(buf, " ", sizeof(buf)); + if (res->res_dev.cap_bits & CAP_EOF) { + bstrncat(buf, "CAP_EOF ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_BSR) { + bstrncat(buf, "CAP_BSR ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_BSF) { + bstrncat(buf, "CAP_BSF ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_FSR) { + bstrncat(buf, "CAP_FSR ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_FSF) { + bstrncat(buf, "CAP_FSF ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_EOM) { + bstrncat(buf, "CAP_EOM ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_REM) { + bstrncat(buf, "CAP_REM ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_RACCESS) { + bstrncat(buf, "CAP_RACCESS ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_AUTOMOUNT) { + bstrncat(buf, "CAP_AUTOMOUNT ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_LABEL) { + bstrncat(buf, "CAP_LABEL ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_ANONVOLS) { + bstrncat(buf, "CAP_ANONVOLS ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_ALWAYSOPEN) { + bstrncat(buf, "CAP_ALWAYSOPEN ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_CHECKLABELS) { + bstrncat(buf, "CAP_CHECKLABELS ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_REQMOUNT) { + bstrncat(buf, "CAP_REQMOUNT ", sizeof(buf)); + } + if (res->res_dev.cap_bits & CAP_OFFLINEUNMOUNT) { + bstrncat(buf, "CAP_OFFLINEUNMOUNT ", sizeof(buf)); + } + bstrncat(buf, "\n", sizeof(buf)); + sendit(sock, buf); /* Send caps string */ + if (res->res_dev.cloud) { + sendit(sock, " --->Cloud: name=%s\n", res->res_dev.cloud->hdr.name); + } + break; + case R_CLOUD: + sendit(sock, "Cloud: name=%s Driver=%d\n" + " HostName=%s\n" + " BucketName=%s\n" + " AccessKey=%s SecretKey=%s\n" + " AuthRegion=%s\n" + " Protocol=%d UriStyle=%d\n", + res->res_cloud.hdr.name, res->res_cloud.driver_type, + res->res_cloud.host_name, + res->res_cloud.bucket_name, + res->res_cloud.access_key, res->res_cloud.secret_key, + res->res_cloud.region, + res->res_cloud.protocol, res->res_cloud.uri_style); + break; + case R_AUTOCHANGER: + DEVRES *dev; + sendit(sock, "Changer: name=%s Changer_devname=%s\n Changer_cmd=%s\n", + res->res_changer.hdr.name, + res->res_changer.changer_name, res->res_changer.changer_command); + foreach_alist(dev, res->res_changer.device) { + sendit(sock, " --->Device: name=%s\n", dev->hdr.name); + } + break; + case R_MSGS: + sendit(sock, "Messages: name=%s\n", res->res_msgs.hdr.name); + if (res->res_msgs.mail_cmd) + sendit(sock, " mailcmd=%s\n", res->res_msgs.mail_cmd); + if (res->res_msgs.operator_cmd) + sendit(sock, " opcmd=%s\n", res->res_msgs.operator_cmd); + break; + default: + sendit(sock, _("Warning: unknown resource type %d\n"), type); + break; + } + rres = GetNextRes(type, rres); + if (recurse && rres) + dump_resource(type, rres, sendit, sock); +} + +/* + * Free memory of resource. + * NB, we don't need to worry about freeing any references + * to other resources as they will be freed when that + * resource chain is traversed. Mainly we worry about freeing + * allocated strings (names). + */ +void free_resource(RES *sres, int type) +{ + URES *res = (URES *)sres; + + if (res == NULL) + return; + + /* common stuff -- free the resource name */ + if (res->res_dir.hdr.name) { + free(res->res_dir.hdr.name); + } + if (res->res_dir.hdr.desc) { + free(res->res_dir.hdr.desc); + } + + + switch (type) { + case R_DIRECTOR: + if (res->res_dir.password) { + free(res->res_dir.password); + } + if (res->res_dir.address) { + free(res->res_dir.address); + } + if (res->res_dir.tls_ctx) { + free_tls_context(res->res_dir.tls_ctx); + } + if (res->res_dir.tls_ca_certfile) { + free(res->res_dir.tls_ca_certfile); + } + if (res->res_dir.tls_ca_certdir) { + free(res->res_dir.tls_ca_certdir); + } + if (res->res_dir.tls_certfile) { + free(res->res_dir.tls_certfile); + } + if (res->res_dir.tls_keyfile) { + free(res->res_dir.tls_keyfile); + } + if (res->res_dir.tls_dhfile) { + free(res->res_dir.tls_dhfile); + } + if (res->res_dir.tls_allowed_cns) { + delete res->res_dir.tls_allowed_cns; + } + break; + case R_AUTOCHANGER: + if (res->res_changer.changer_name) { + free(res->res_changer.changer_name); + } + if (res->res_changer.changer_command) { + free(res->res_changer.changer_command); + } + if (res->res_changer.lock_command) { + free(res->res_changer.lock_command); + } + if (res->res_changer.device) { + delete res->res_changer.device; + } + rwl_destroy(&res->res_changer.changer_lock); + break; + case R_STORAGE: + if (res->res_store.sdaddrs) { + free_addresses(res->res_store.sdaddrs); + } + if (res->res_store.sddaddrs) { + free_addresses(res->res_store.sddaddrs); + } + if (res->res_store.working_directory) { + free(res->res_store.working_directory); + } + if (res->res_store.pid_directory) { + free(res->res_store.pid_directory); + } + if (res->res_store.subsys_directory) { + free(res->res_store.subsys_directory); + } + if (res->res_store.plugin_directory) { + free(res->res_store.plugin_directory); + } + if (res->res_store.scripts_directory) { + free(res->res_store.scripts_directory); + } + if (res->res_store.tls_ctx) { + free_tls_context(res->res_store.tls_ctx); + } + if (res->res_store.tls_ca_certfile) { + free(res->res_store.tls_ca_certfile); + } + if (res->res_store.tls_ca_certdir) { + free(res->res_store.tls_ca_certdir); + } + if (res->res_store.tls_certfile) { + free(res->res_store.tls_certfile); + } + if (res->res_store.tls_keyfile) { + free(res->res_store.tls_keyfile); + } + if (res->res_store.tls_dhfile) { + free(res->res_store.tls_dhfile); + } + if (res->res_store.tls_allowed_cns) { + delete res->res_store.tls_allowed_cns; + } + if (res->res_store.verid) { + free(res->res_store.verid); + } + break; + case R_CLOUD: + if (res->res_cloud.host_name) { + free(res->res_cloud.host_name); + } + if (res->res_cloud.bucket_name) { + free(res->res_cloud.bucket_name); + } + if (res->res_cloud.access_key) { + free(res->res_cloud.access_key); + } + if (res->res_cloud.secret_key) { + free(res->res_cloud.secret_key); + } + if (res->res_cloud.region) { + free(res->res_cloud.region); + } + break; + case R_DEVICE: + if (res->res_dev.media_type) { + free(res->res_dev.media_type); + } + if (res->res_dev.device_name) { + free(res->res_dev.device_name); + } + if (res->res_dev.adevice_name) { + free(res->res_dev.adevice_name); + } + if (res->res_dev.control_name) { + free(res->res_dev.control_name); + } + if (res->res_dev.changer_name) { + free(res->res_dev.changer_name); + } + if (res->res_dev.changer_command) { + free(res->res_dev.changer_command); + } + if (res->res_dev.alert_command) { + free(res->res_dev.alert_command); + } + if (res->res_dev.worm_command) { + free(res->res_dev.worm_command); + } + if (res->res_dev.lock_command) { + free(res->res_dev.lock_command); + } + if (res->res_dev.spool_directory) { + free(res->res_dev.spool_directory); + } + if (res->res_dev.mount_point) { + free(res->res_dev.mount_point); + } + if (res->res_dev.mount_command) { + free(res->res_dev.mount_command); + } + if (res->res_dev.unmount_command) { + free(res->res_dev.unmount_command); + } + if (res->res_dev.write_part_command) { + free(res->res_dev.write_part_command); + } + if (res->res_dev.free_space_command) { + free(res->res_dev.free_space_command); + } + break; + case R_MSGS: + if (res->res_msgs.mail_cmd) { + free(res->res_msgs.mail_cmd); + } + if (res->res_msgs.operator_cmd) { + free(res->res_msgs.operator_cmd); + } + free_msgs_res((MSGS *)res); /* free message resource */ + res = NULL; + break; + default: + Dmsg1(0, _("Unknown resource type %d\n"), type); + break; + } + /* Common stuff again -- free the resource, recurse to next one */ + if (res) { + free(res); + } +} + +/* Save the new resource by chaining it into the head list for + * the resource. If this is pass 2, we update any resource + * or alist pointers. + */ +bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) +{ + URES *res; + int rindex = type - r_first; + int i, size; + int error = 0; + + /* + * Ensure that all required items are present + */ + for (i=0; items[i].name; i++) { + if (items[i].flags & ITEM_REQUIRED) { + if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) { + Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), + items[i].name, resources[rindex].name); + return false; + } + } + /* If this triggers, take a look at lib/parse_conf.h */ + if (i >= MAX_RES_ITEMS) { + Mmsg(config->m_errmsg, _("Too many directives in \"%s\" resource\n"), resources[rindex].name); + return false; + } + } + + /* During pass 2, we looked up pointers to all the resources + * referrenced in the current resource, , now we + * must copy their address from the static record to the allocated + * record. + */ + if (pass == 2) { + DEVRES *dev; + int errstat; + switch (type) { + /* Resources not containing a resource */ + case R_MSGS: + case R_CLOUD: + break; + + /* Resources containing a resource or an alist */ + case R_DIRECTOR: + if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Director resource %s\n"), res_all.res_dir.hdr.name); + return false; + } + res->res_dir.tls_allowed_cns = res_all.res_dir.tls_allowed_cns; + break; + case R_STORAGE: + if ((res = (URES *)GetResWithName(R_STORAGE, res_all.res_dir.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Storage resource %s\n"), res_all.res_dir.hdr.name); + return false; + } + res->res_store.messages = res_all.res_store.messages; + res->res_store.tls_allowed_cns = res_all.res_store.tls_allowed_cns; + break; + case R_AUTOCHANGER: + if ((res = (URES *)GetResWithName(type, res_all.res_changer.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find AutoChanger resource %s\n"), + res_all.res_changer.hdr.name); + return false; + } + /* we must explicitly copy the device alist pointer */ + res->res_changer.device = res_all.res_changer.device; + /* + * Now update each device in this resource to point back + * to the changer resource. + */ + foreach_alist(dev, res->res_changer.device) { + dev->changer_res = (AUTOCHANGER *)&res->res_changer; + } + if ((errstat = rwl_init(&res->res_changer.changer_lock, PRIO_SD_ACH_ACCESS)) != 0) { + berrno be; + Mmsg(config->m_errmsg, _("Unable to init lock for Autochanger=%s: ERR=%s\n"), + res_all.res_changer.hdr.name, be.bstrerror(errstat)); + return false; + } + break; + case R_DEVICE: + if ((res = (URES *)GetResWithName(R_DEVICE, res_all.res_dev.hdr.name)) == NULL) { + Mmsg(config->m_errmsg, _("Cannot find Device resource %s\n"), res_all.res_dir.hdr.name); + return false; + } + res->res_dev.cloud = res_all.res_dev.cloud; + break; + default: + printf(_("Unknown resource type %d\n"), type); + error = 1; + break; + } + + + if (res_all.res_dir.hdr.name) { + free(res_all.res_dir.hdr.name); + res_all.res_dir.hdr.name = NULL; + } + if (res_all.res_dir.hdr.desc) { + free(res_all.res_dir.hdr.desc); + res_all.res_dir.hdr.desc = NULL; + } + return true; + } + + /* The following code is only executed on pass 1 */ + switch (type) { + case R_DIRECTOR: + size = sizeof(DIRRES); + break; + case R_STORAGE: + size = sizeof(STORES); + break; + case R_DEVICE: + size = sizeof(DEVRES); + break; + case R_MSGS: + size = sizeof(MSGS); + break; + case R_AUTOCHANGER: + size = sizeof(AUTOCHANGER); + break; + case R_CLOUD: + size = sizeof(CLOUD); + break; + default: + printf(_("Unknown resource type %d\n"), type); + error = 1; + size = 1; + break; + } + /* Common */ + if (!error) { + if (!config->insert_res(rindex, size)) { + return false; + } + } + return true; +} + +bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code) +{ + config->init(configfile, NULL, exit_code, (void *)&res_all, res_all_size, + r_first, r_last, resources, &res_head); + return config->parse_config(); +} diff --git a/src/stored/stored_conf.h b/src/stored/stored_conf.h new file mode 100644 index 00000000..70a28ca5 --- /dev/null +++ b/src/stored/stored_conf.h @@ -0,0 +1,228 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +extern s_kw dev_types[]; + +/* + * Cloud Truncate Cache options + */ +enum { + TRUNC_NO = 0, /* default value */ + TRUNC_AFTER_UPLOAD = 1, + TRUNC_AT_ENDOFJOB = 2, + TRUNC_CONF_DEFAULT = 3 /* only use as a parameter, not in the conf */ +}; + +/* + * Cloud Upload options + */ +enum { + UPLOAD_EACHPART = 0, /* default value */ + UPLOAD_NO = 1, + UPLOAD_AT_ENDOFJOB = 2 +}; + + +/* + * Resource codes -- they must be sequential for indexing + * + */ + +enum { + R_DIRECTOR = 3001, + R_STORAGE = 3002, + R_DEVICE = 3003, + R_MSGS = 3004, + R_AUTOCHANGER = 3005, + R_CLOUD = 3006, + R_FIRST = R_DIRECTOR, + R_LAST = R_CLOUD /* keep this updated */ +}; + +enum { + R_NAME = 3020, + R_ADDRESS, + R_PASSWORD, + R_TYPE, + R_BACKUP +}; + + +/* Definition of the contents of each Resource */ + +/* + * Cloud drivers + */ +class CLOUD { +public: + RES hdr; + char *host_name; + char *bucket_name; + char *access_key; + char *secret_key; + char *blob_endpoint; + char *file_endpoint; + char *queue_endpoint; + char *table_endpoint; + char *endpoint_suffix; + char *region; + int32_t protocol; + int32_t uri_style; + uint32_t driver_type; /* Cloud driver type */ + uint32_t trunc_opt; + uint32_t upload_opt; + uint32_t max_concurrent_uploads; + uint32_t max_concurrent_downloads; + uint64_t upload_limit; + uint64_t download_limit; +}; + +/* + * Director resource + */ +class DIRRES { +public: + RES hdr; + + char *password; /* Director password */ + char *address; /* Director IP address or zero */ + bool monitor; /* Have only access to status and .status functions */ + bool tls_authenticate; /* Authenticate with TLS */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + bool tls_verify_peer; /* TLS Verify Client Certificate */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Server Certificate File */ + char *tls_keyfile; /* TLS Server Key File */ + char *tls_dhfile; /* TLS Diffie-Hellman Parameters */ + alist *tls_allowed_cns; /* TLS Allowed Clients */ + + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ +}; + + +/* Storage daemon "global" definitions */ +class s_res_store { +public: + RES hdr; + + dlist *sdaddrs; + dlist *sddaddrs; + char *working_directory; /* working directory for checkpoints */ + char *pid_directory; + char *subsys_directory; + char *plugin_directory; /* Plugin directory */ + char *scripts_directory; + uint32_t max_concurrent_jobs; /* maximum concurrent jobs to run */ + MSGS *messages; /* Daemon message handler */ + utime_t ClientConnectTimeout; /* Max time to wait to connect client */ + utime_t heartbeat_interval; /* Interval to send hb to FD */ + utime_t client_wait; /* Time to wait for FD to connect */ + bool comm_compression; /* Set to allow comm line compression */ + bool tls_authenticate; /* Authenticate with TLS */ + bool tls_enable; /* Enable TLS */ + bool tls_require; /* Require TLS */ + bool tls_verify_peer; /* TLS Verify Client Certificate */ + char *tls_ca_certfile; /* TLS CA Certificate File */ + char *tls_ca_certdir; /* TLS CA Certificate Directory */ + char *tls_certfile; /* TLS Server Certificate File */ + char *tls_keyfile; /* TLS Server Key File */ + char *tls_dhfile; /* TLS Diffie-Hellman Parameters */ + alist *tls_allowed_cns; /* TLS Allowed Clients */ + char *verid; /* Custom Id to print in version command */ + TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + +}; +typedef class s_res_store STORES; + +class AUTOCHANGER { +public: + RES hdr; + alist *device; /* List of DEVRES device pointers */ + char *changer_name; /* Changer device name */ + char *changer_command; /* Changer command -- external program */ + char *lock_command; /* Share storage lock command -- external program */ + brwlock_t changer_lock; /* One changer operation at a time */ +}; + +/* Device specific definitions */ +class DEVRES { +public: + RES hdr; + + char *media_type; /* User assigned media type */ + char *device_name; /* Archive device name */ + char *adevice_name; /* Aligned device name */ + char *changer_name; /* Changer device name */ + char *control_name; /* SCSI control device name */ + char *changer_command; /* Changer command -- external program */ + char *alert_command; /* Alert command -- external program */ + char *lock_command; /* Share storage lock command -- external program */ + char *worm_command; /* Worm detection command -- external program */ + char *spool_directory; /* Spool file directory */ + uint32_t dev_type; /* device type */ + uint32_t label_type; /* label type */ + bool enabled; /* Set when enabled (default) */ + bool autoselect; /* Automatically select from AutoChanger */ + bool read_only; /* Drive is read only */ + uint32_t drive_index; /* Autochanger drive index */ + uint32_t cap_bits; /* Capabilities of this device */ + utime_t max_changer_wait; /* Changer timeout */ + utime_t max_rewind_wait; /* maximum secs to wait for rewind */ + utime_t max_open_wait; /* maximum secs to wait for open */ + uint32_t padding_size; /* adata block padding -- bytes */ + uint32_t file_alignment; /* adata file alignment -- bytes */ + uint32_t min_aligned_size; /* minimum adata size */ + uint32_t min_block_size; /* min block size */ + uint32_t max_block_size; /* max block size */ + uint32_t max_volume_jobs; /* max jobs to put on one volume */ + uint32_t max_network_buffer_size; /* max network buf size */ + uint32_t max_concurrent_jobs; /* maximum concurrent jobs this drive */ + utime_t vol_poll_interval; /* interval between polling volume during mount */ + int64_t max_volume_files; /* max files to put on one volume */ + int64_t max_volume_size; /* max bytes to put on one volume */ + int64_t max_file_size; /* max file size in bytes */ + int64_t volume_capacity; /* advisory capacity */ + int64_t min_free_space; /* Minimum disk free space */ + int64_t max_spool_size; /* Max spool size for all jobs */ + int64_t max_job_spool_size; /* Max spool size for any single job */ + + int64_t max_part_size; /* Max part size */ + char *mount_point; /* Mount point for require mount devices */ + char *mount_command; /* Mount command */ + char *unmount_command; /* Unmount command */ + char *write_part_command; /* Write part command */ + char *free_space_command; /* Free space command */ + CLOUD *cloud; /* pointer to cloud resource */ + + /* The following are set at runtime */ + DEVICE *dev; /* Pointer to phyical dev -- set at runtime */ + AUTOCHANGER *changer_res; /* pointer to changer res if any */ +}; + +union URES { + DIRRES res_dir; + STORES res_store; + DEVRES res_dev; + MSGS res_msgs; + AUTOCHANGER res_changer; + CLOUD res_cloud; + RES hdr; +}; diff --git a/src/stored/tape_alert.c b/src/stored/tape_alert.c new file mode 100644 index 00000000..0cf12bea --- /dev/null +++ b/src/stored/tape_alert.c @@ -0,0 +1,230 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Routines for getting and displaying tape alerts + * + * Written by Kern Sibbald, October MMXVI + * + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +#include "tape_alert_msgs.h" + +static const int dbglvl = 120; + +#define MAX_MSG 54 /* Maximum alert message number */ + +void alert_callback(void *ctx, const char *short_msg, const char *long_msg, + char *Volume, int severity, int flags, int alertno, utime_t alert_time) +{ + DCR *dcr = (DCR *)ctx; + JCR *jcr = dcr->jcr; + DEVICE *dev = dcr->dev; + int type = M_INFO; + + switch (severity) { + case 'C': + type = M_FATAL; + break; + case 'W': + type = M_WARNING; + break; + case 'I': + type = M_INFO; + break; + } + if (flags & TA_DISABLE_DRIVE) { + dev->enabled = false; + Jmsg(jcr, M_WARNING, 0, _("Disabled Device %s due to tape alert=%d.\n"), + dev->print_name(), alertno); + Tmsg2(dbglvl, _("Disabled Device %s due to tape alert=%d.\n"), + dev->print_name(), alertno); + } + if (flags & TA_DISABLE_VOLUME) { + dev->setVolCatStatus("Disabled"); + dev->VolCatInfo.VolEnabled = false; + dir_update_volume_info(dcr, false, true); + Jmsg(jcr, M_WARNING, 0, _("Disabled Volume \"%s\" due to tape alert=%d.\n"), + Volume, alertno); + Tmsg2(dbglvl, _("Disabled Volume \"%s\" due to tape alert=%d.\n"), + Volume, alertno); + } + Jmsg(jcr, type, (utime_t)alert_time, _("Alert: Volume=\"%s\" alert=%d: ERR=%s\n"), + Volume, alertno, long_msg); +} + +bool tape_dev::get_tape_alerts(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + + if (!job_canceled(jcr) && dcr->device->alert_command && + dcr->device->control_name) { + POOLMEM *alertcmd; + int status = 1; + int nalerts = 0; + BPIPE *bpipe; + ALERT *alert, *rmalert; + char line[MAXSTRING]; + const char *fmt = "TapeAlert[%d]"; + + if (!alert_list) { + alert_list = New(alist(10)); + } + alertcmd = get_pool_memory(PM_FNAME); + alertcmd = edit_device_codes(dcr, alertcmd, dcr->device->alert_command, ""); + /* Wait maximum 5 minutes */ + bpipe = open_bpipe(alertcmd, 60 * 5, "r"); + if (bpipe) { + int alertno; + alert = (ALERT *)malloc(sizeof(ALERT)); + memset(alert->alerts, 0, sizeof(alert->alerts)); + alert->Volume = bstrdup(getVolCatName()); + alert->alert_time = (utime_t)time(NULL); + while (fgets(line, (int)sizeof(line), bpipe->rfd)) { + alertno = 0; + if (bsscanf(line, fmt, &alertno) == 1) { + if (alertno > 0) { + if (nalerts+1 > (int)sizeof(alert->alerts)) { + break; + } else { + alert->alerts[nalerts++] = alertno; + } + } + } + } + status = close_bpipe(bpipe); + if (nalerts > 0) { + /* Maintain First in, last out list */ + if (alert_list->size() > 8) { + rmalert = (ALERT *)alert_list->last(); + free(rmalert->Volume); + alert_list->pop(); + free(rmalert); + } + alert_list->prepend(alert); + } else { + free(alert->Volume); + free(alert); + } + free_pool_memory(alertcmd); + return true; + } else { + status = errno; + } + if (status != 0) { + berrno be; + Jmsg(jcr, M_ALERT, 0, _("3997 Bad alert command: %s: ERR=%s.\n"), + alertcmd, be.bstrerror(status)); + Tmsg2(10, _("3997 Bad alert command: %s: ERR=%s.\n"), + alertcmd, be.bstrerror(status)); + } + + Dmsg1(400, "alert status=%d\n", status); + free_pool_memory(alertcmd); + } else { + if (!dcr->device->alert_command) { + Dmsg1(dbglvl, "Cannot do tape alerts: no Alert Command specified for device %s\n", + print_name()); + Tmsg1(dbglvl, "Cannot do tape alerts: no Alert Command specified for device %s\n", + print_name()); + + } + if (!dcr->device->control_name) { + Dmsg1(dbglvl, "Cannot do tape alerts: no Control Device specified for device %s\n", + print_name()); + Tmsg1(dbglvl, "Cannot do tape alerts: no Control Device specified for device %s\n", + print_name()); + } + } + return false; +} + + +/* + * Print desired tape alert messages + */ +void tape_dev::show_tape_alerts(DCR *dcr, alert_list_type list_type, + alert_list_which which, alert_cb alert_callback) +{ + int i; + ALERT *alert; + int code; + + if (!alert_list) { + return; + } + Dmsg1(dbglvl, "There are %d alerts.\n", alert_list->size()); + switch (list_type) { + case list_codes: + foreach_alist(alert, alert_list) { + for (i=0; i<(int)sizeof(alert->alerts) && alert->alerts[i]; i++) { + code = alert->alerts[i]; + Dmsg4(dbglvl, "Volume=%s alert=%d severity=%c flags=0x%x\n", alert->Volume, code, + ta_errors[code].severity, (int)ta_errors[code].flags); + alert_callback(dcr, ta_errors[code].short_msg, long_msg[code], + alert->Volume, ta_errors[code].severity, + ta_errors[code].flags, code, (utime_t)alert->alert_time); + } + if (which == list_last) { + break; + } + } + break; + default: + foreach_alist(alert, alert_list) { + for (i=0; i<(int)sizeof(alert->alerts) && alert->alerts[i]; i++) { + code = alert->alerts[i]; + Dmsg4(dbglvl, "Volume=%s severity=%c flags=0x%x alert=%s\n", alert->Volume, + ta_errors[code].severity, (int)ta_errors[code].flags, + ta_errors[code].short_msg); + alert_callback(dcr, ta_errors[code].short_msg, long_msg[code], + alert->Volume, ta_errors[code].severity, + ta_errors[code].flags, code, (utime_t)alert->alert_time); + } + if (which == list_last) { + break; + } + } + break; + } + return; +} + +/* + * Delete alert list returning number deleted + */ +int tape_dev::delete_alerts() +{ + ALERT *alert; + int deleted = 0; + + if (alert_list) { + foreach_alist(alert, alert_list) { + free(alert->Volume); + deleted++; + } + alert_list->destroy(); + free(alert_list); + alert_list = NULL; + } + return deleted; +} diff --git a/src/stored/tape_alert_msgs.h b/src/stored/tape_alert_msgs.h new file mode 100644 index 00000000..090f51bd --- /dev/null +++ b/src/stored/tape_alert_msgs.h @@ -0,0 +1,166 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Routines for getting and displaying tape alerts + * + * Written by Kern Sibbald, October MMXVI + * + */ + +struct ta_error_handling { + const char severity; + const char flags; + const char *short_msg; +}; + +#define TA_NONE (0) +#define TA_DISABLE_DRIVE (1<<0) +#define TA_DISABLE_VOLUME (1<<1) +#define TA_CLEAN_DRIVE (1<<2) +#define TA_PERIODIC_CLEAN (1<<3) +#define TA_RETENTION (1<<4) + +/* + * ta_error_handling determines error handling + * Severity determines if we have info, warning or fail (critical) error. + * Flags allows setting flags + * + */ +static struct ta_error_handling ta_errors[] = { + {' ', TA_NONE, ""}, +/* 1 */ {'W', TA_NONE, "Read Warning"}, + {'W', TA_NONE, "Write Warning"}, + {'C', TA_NONE, "Hard Error"}, + {'C', TA_NONE, "Media"}, + {'C', TA_NONE, "Read Failure"}, + {'C', TA_NONE, "Write Failure"}, + {'W', TA_DISABLE_VOLUME, "Media Life"}, + {'W', TA_DISABLE_VOLUME, "Not Data Grade"}, + {'C', TA_NONE, "Write Protect"}, +/* 10 */ {'I', TA_NONE, "No Removal"}, + {'I', TA_NONE, "Cleaning Media"}, + {'I', TA_NONE, "Unsupported Format"}, + {'C', TA_DISABLE_VOLUME, "Recoverable Snapped Tape"}, + {'C', TA_DISABLE_DRIVE| \ + TA_DISABLE_VOLUME, "Unrecoverable Snapped Tape"}, + {'W', TA_NONE, "Cartridge Memory Chip Failure"}, + {'C', TA_NONE, "Forced Eject"}, + {'W', TA_NONE, "Read Only Format"}, + {'W', TA_NONE, "Tape Directory Corrupted on load"}, + {'I', TA_NONE, "Nearing Media Life"}, +/* 20 */ {'C', TA_CLEAN_DRIVE| \ + TA_DISABLE_DRIVE| \ + TA_DISABLE_VOLUME, "Clean Now"}, + {'W', TA_PERIODIC_CLEAN, "Clean Periodic"}, + {'C', TA_DISABLE_VOLUME, "Expired Cleaning Media"}, + {'C', TA_RETENTION, "Invalid Cleaning Media"}, + {'W', TA_NONE, "Retention Requested"}, + {'W', TA_NONE, "Dual-Port Interface Error"}, + {'W', TA_NONE, "Cooling Fan Failure"}, + {'W', TA_NONE, "Power Supply Failure"}, + {'W', TA_NONE, "Power Consumption"}, + {'W', TA_DISABLE_DRIVE, "Drive Maintenance"}, +/* 30 */ {'C', TA_DISABLE_DRIVE, "Hardware A"}, + {'C', TA_DISABLE_DRIVE, "Hardware B"}, + {'W', TA_NONE, "Interface"}, + {'C', TA_NONE, "Eject Media"}, + {'W', TA_NONE, "Download Fail"}, + {'W', TA_NONE, "Drive Humidity"}, + {'W', TA_NONE, "Drive Temperature"}, + {'W', TA_NONE, "Drive Voltage"}, + {'C', TA_DISABLE_DRIVE, "Predictive Failure"}, + {'W', TA_DISABLE_DRIVE, "Diagnostics Required"}, +/* 40 */ {'C', TA_NONE, "Loader Hardware A"}, + {'C', TA_NONE, "Loader Stray Tape"}, + {'W', TA_NONE, "Loader Hardware B"}, + {'C', TA_NONE, "Loader Door"}, + {'C', TA_NONE, "Loader Hardware C"}, + {'C', TA_NONE, "Loader Magazine"}, +/* 46 */ {'W', TA_NONE, "Loader Predictive Failure"}, + {' ', TA_NONE, ""}, + {' ', TA_NONE, ""}, + {' ', TA_NONE, ""}, +/* 50 */ {'W', TA_NONE, "Lost Statistics"}, + {'W', TA_NONE, "Tape directory invalid at unload"}, + {'C', TA_DISABLE_VOLUME, "Tape system area write failure"}, + {'C', TA_DISABLE_VOLUME, "Tape system area read failure"}, +/* 54 */ {'C', TA_DISABLE_VOLUME, "No start of data"} +}; + +/* + * Long message, sometimes even too verbose. + */ +static const char *long_msg[] = { + "", +/* 1 */ "The tape drive is having problems reading data. No data has been lost, but there has been a reduction in the performance of the tape. The drive is having severe trouble reading", + "The tape drive is having problems writing data. No data has been lost, but there has been a reduction in the capacity of the tape. The drive is having severe trouble writing", + "The operation has stopped because an error has occurred while reading or writing data which the drive cannot correct. The drive had a hard read or write error", + "Your data is at risk: Media cannot be written/read, or media performance is severely degraded.\n 1. Copy any data you require from the tape.\n 2. Do not use this tape again.\n 3. Restart the operation with a different tape.", + "The tape is damaged or the drive is faulty. Call the tape drive supplier helpline. The drive can no longer read data from the tape", + "The tape is from a faulty batch or the tape drive is faulty: The drive can no longer write data to the tape.\n 1. Use a good tape to test the drive.\n 2. If the problem persists, call the tape drive supplier helpline.", + "The tape cartridge has reached the end of its calculated useful life: The media has exceeded its specified life.\n 1. Copy any data you need to another tape.\n 2. Discard the old tape.", + "The tape cartridge is not data-grade. Any data you back up to the tape is at risk. The drive has not been able to read the MRS stripes. Replace the cartridge with a data-grade tape.", + "You are trying to write to a write-protected cartridge. Write command is attempted to a write protected tape. Remove the write-protection or use another tape.", +/* 10 */ "You cannot eject the cartridge because the tape drive is in use. Manual or s/w unload attempted when prevent media removal is enabled. Wait until the operation is complete before ejecting the cartridge.", + "The tape in the drive is a cleaning cartridge. Cleaning tape loaded in drive.", + "You have tried to load a cartridge of a type which is not supported by this drive. Attempted loaded of unsupported tape format, e.g. DDS2 in DDS1 drive.", + "The operation has failed because the tape in the drive has snapped: Tape snapped/cut in the drive where media can be ejected.\n 1. Discard the old tape.\n 2. Restart the operation with a different tape.", + "The operation has failed because the tape in the drive has snapped: Tape snapped/cut in the drive where media cannot be ejected.\n 1. Do not attempt to extract the tape cartridge.\n 2. Call the tape drive supplier helpline.", + "The memory in the tape cartridge has failed, which reduces performance. Memory chip failed in cartridge. Do not use the cartridge for further backup operations.", + "The operation has failed because the tape cartridge was manually ejected while the tape drive was actively writing or reading. Manual or forced eject while drive actively writing or reading", + "You have loaded a cartridge of a type that is read-only in this drive. Media loaded that is read-only format. The cartridge will appear as write-protected.", + "The directory on the tape cartridge has been corrupted. Tape drive powered down with tape loaded, or permanent error prevented the tape directory being updated. File search performance will be degraded. The tape directory can be rebuilt by reading all the data on the cartridge.", + "The tape cartridge is nearing the end of its calculated life. Media may have exceeded its specified number of passes. It is recommended that you:\n 1. Use another tape cartridge for your next backup.\n 2. Store this tape cartridge in a safe place in case you need to restore data from it.", +/* 20 */ "The tape drive needs cleaning: The drive thinks it has a head clog, or needs cleaning.\n 1. If the operation has stopped, eject the tape and clean the drive.\n 2. If the operation has not stopped, wait for it to finish and then clean the drive.\n Check the tape drive users manual for device specific cleaning instructions.", + "The tape drive is due for routine cleaning: The drive is ready for a periodic clean.\n 1. Wait for the current operation to finish.\n 2. Then use a cleaning cartridge.\n Check the tape drive users manual for device specific cleaning instructions.", + "The last cleaning cartridge used in the tape drive has worn out. The cleaning tape has expired.\n 1. Discard the worn out cleaning cartridge.\n 2. Wait for the current operation to finish.\n 3. Then use a new cleaning cartridge.", + "The last cleaning cartridge used in the tape drive was an invalid type: Invalid cleaning tape type used.\n 1. Do not use this cleaning cartridge in this drive.\n 2. Wait for the current operation to finish.\n 3. Then use a valid cleaning cartridge.", + "The tape drive has requested a retention operation. The drive is having severe trouble reading or writing, which will be resolved by a retention cycle.", + "A redundant interface port on the tape drive has failed. Failure of one interface port in a dual-port configuration, e.g. Fibrechannel.", + "A tape drive cooling fan has failed. Fan failure inside tape drive mechanism or tape drive enclosure.", + "A redundant power supply has failed inside the tape drive enclosure. Check the enclosure users manual for instructions on replacing the failed power supply. Redundant PSU failure inside the tape drive enclosure or rack subsystem.", + "The tape drive power consumption is outside the specified range. Power consumption of the tape drive is outside specified range.", + "Preventive maintenance of the tape drive is required. The drive requires preventative maintenance (not cleaning). Check the tape drive users manual for device specific preventive maintenance tasks or call the tape drive supplier helpline.", +/* 30 */ "The tape drive has a hardware fault: The drive has a hardware fault that requires reset to recover.\n 1. Eject the tape or magazine.\n 2. Reset the drive.\n 3. Restart the operation.", + "The tape drive has a hardware fault: The drive has a hardware fault which is not read/write related or requires a power cycle to recover.\n 1. Turn the tape drive off and then on again.\n 2. Restart the operation.\n 3. If the problem persists, call the tape drive supplier helpline.\n Check the tape drive users manual for device specific instructions on turning the device power on and off.", + "The tape drive has a problem with the host interface: The drive has identified an interfacing fault.\n 1. Check the cables and cable connections.\n 2. Restart the operation.", + "The operation has failed. Error recovery action:\n 1. Eject the tape or magazine.\n 2. Insert the tape or magazine again.\n 3. Restart the operation.", + "The firmware download has failed because you have tried to use the incorrect firmware for this tape drive. Firmware download failed. Obtain the correct firmware and try again.", + "Environmental conditions inside the tape drive are outside the specified humidity range. Drive humidity limits exceeded.", + "Environmental conditions inside the tape drive are outside the specified temperature range. Drive temperature limits exceeded.", + "The voltage supply to the tape drive is outside the specified range. Drive voltage limits exceeded.", + "A hardware failure of the tape drive is predicted. Call the tape drive supplier helpline. Predictive failure of drive hardware.", + "The tape drive may have a fault. Check for availability of diagnostic information and run extended diagnostics if applicable. The drive may have had a failure which may be identified by stored diagnostic information or by running extended diagnostics (eg Send Diagnostic). Check the tape drive users manual for instructions on running extended diagnostic tests and retrieving diagnostic data.", +/* 40 */ "The changer mechanism is having difficulty communicating with the tape drive: Loader mech. is having trouble communicating with the tape drive.\n 1. Turn the autoloader off then on.\n 2. Restart the operation.\n 3. If problem persists, call the tape drive supplier helpline.", + "A tape has been left in the autoloader by a previous hardware fault: Stray tape left in loader after previous error recovery.\n 1. Insert an empty magazine to clear the fault.\n 2. If the fault does not clear, turn the autoloader off and then on again.\n 3. If the problem persists, call the tape drive supplier helpline.", + "There is a problem with the autoloader mechanism. Loader mech. has a hardware fault.", + "The operation has failed because the autoloader door is open: Tape changer door open:\n 1. Clear any obstructions from the autoloader door.\n 2. Eject the magazine and then insert it again.\n 3. If the fault does not clear, turn the autoloader off and then on again.\n 4. If the problem persists, call the tape drive supplier helpline.", + "The autoloader has a hardware fault: The loader mechanism has a hardware fault that is not mechanically related.\n 1. Turn the autoloader off and then on again.\n 2. Restart the operation.\n 3. If the problem persists, call the tape drive supplier helpline.\n Check the autoloader users manual for device specific instructions on turning the device power on and off.", + "The autoloader cannot operate without the magazine. Loader magazine not present.\n 1. Insert the magazine into the autoloader.\n 2. Restart the operation.", +/* 46 */ "A hardware failure of the changer mechanism is predicted. Predictive failure of loader mechanism hardware. Call the tape drive supplier helpline.", + "", + "", + "", +/* 50 */ "Media statistics have been lost at some time in the past, Drive or library powered down with tape loaded.", + "The tape directory on the tape cartridge just unloaded has been corrupted. Error prevented the tape directory being updated on unload. File search performance will be degraded. The tape directory can be rebuilt by reading all the data.", + "The tape just unloaded could not write its system area successfully: Write errors while writing the system log on unload.\n 1. Copy data to another tape cartridge.\n 2. Discard the old cartridge.", + "The tape system area could not be read successfully at load time: Read errors while reading the system area on load.\n 1. Copy data to another tape cartridge.\n 2. Discard the old cartridge.", +/* 54 */ "The start of data could not be found on the tape: Tape damaged, bulk erased, or incorrect format.\n 1. Check you are using the correct format tape.\n 2. Discard the tape or return the tape to your supplier." +}; diff --git a/src/stored/tape_dev.c b/src/stored/tape_dev.c new file mode 100644 index 00000000..cb5e9b74 --- /dev/null +++ b/src/stored/tape_dev.c @@ -0,0 +1,1121 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * tape_dev.c -- low level operations on tape devices + * + * written by, Kern Sibbald, MM + * separated from dev.c in February 2014 + * + * The separation between tape and file is not yet clean. + * + */ + +/* + * Handling I/O errors and end of tape conditions are a bit tricky. + * This is how it is currently done when writing. + * On either an I/O error or end of tape, + * we will stop writing on the physical device (no I/O recovery is + * attempted at least in this daemon). The state flag will be sent + * to include ST_EOT, which is ephemeral, and ST_WEOT, which is + * persistent. Lots of routines clear ST_EOT, but ST_WEOT is + * cleared only when the problem goes away. Now when ST_WEOT + * is set all calls to write_block_to_device() call the fix_up + * routine. In addition, all threads are blocked + * from writing on the tape by calling lock_dev(), and thread other + * than the first thread to hit the EOT will block on a condition + * variable. The first thread to hit the EOT will continue to + * be able to read and write the tape (he sort of tunnels through + * the locking mechanism -- see lock_dev() for details). + * + * Now presumably somewhere higher in the chain of command + * (device.c), someone will notice the EOT condition and + * get a new tape up, get the tape label read, and mark + * the label for rewriting. Then this higher level routine + * will write the unwritten buffer to the new volume. + * Finally, he will release + * any blocked threads by doing a broadcast on the condition + * variable. At that point, we should be totally back in + * business with no lost data. + */ + +#include "bacula.h" +#include "stored.h" + +#ifndef O_NONBLOCK +#define O_NONBLOCK 0 +#endif + +/* Imported functions */ +extern void set_os_device_parameters(DCR *dcr); +extern bool dev_get_os_pos(DEVICE *dev, struct mtget *mt_stat); +extern uint32_t status_dev(DEVICE *dev); +const char *mode_to_str(int mode); + +/* + */ +bool tape_dev::open_device(DCR *dcr, int omode) +{ + file_size = 0; + int timeout = max_open_wait; +#if !defined(HAVE_WIN32) + struct mtop mt_com; + utime_t start_time = time(NULL); +#endif + + if (DEVICE::open_device(dcr, omode)) { + return true; /* already open */ + } + omode = openmode; /* pickup possible new options */ + + mount(1); /* do mount if required */ + + Dmsg0(100, "Open dev: device is tape\n"); + + get_autochanger_loaded_slot(dcr); + + openmode = omode; + set_mode(omode); + + if (timeout < 1) { + timeout = 1; + } + errno = 0; + if (is_fifo() && timeout) { + /* Set open timer */ + tid = start_thread_timer(dcr->jcr, pthread_self(), timeout); + } + Dmsg2(100, "Try open %s mode=%s\n", print_name(), mode_to_str(omode)); + +#if defined(HAVE_WIN32) + + /* Windows Code */ + if ((m_fd = d_open(dev_name, mode)) < 0) { + dev_errno = errno; + } + +#else + + /* UNIX Code */ + /* If busy retry each second for max_open_wait seconds */ + for ( ;; ) { + /* Try non-blocking open */ + m_fd = d_open(dev_name, mode+O_NONBLOCK); + if (m_fd < 0) { + berrno be; + dev_errno = errno; + Dmsg5(100, "Open error on %s omode=%d mode=%x errno=%d: ERR=%s\n", + print_name(), omode, mode, errno, be.bstrerror()); + } else { + /* Tape open, now rewind it */ + Dmsg0(100, "Rewind after open\n"); + mt_com.mt_op = MTREW; + mt_com.mt_count = 1; + /* rewind only if dev is a tape */ + if (is_tape() && (d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com) < 0)) { + berrno be; + dev_errno = errno; /* set error status from rewind */ + d_close(m_fd); + clear_opened(); + Dmsg2(100, "Rewind error on %s close: ERR=%s\n", print_name(), + be.bstrerror(dev_errno)); + /* If we get busy, device is probably rewinding, try again */ + if (dev_errno != EBUSY) { + break; /* error -- no medium */ + } + } else { + /* Got fd and rewind worked, so we must have medium in drive */ + d_close(m_fd); + m_fd = d_open(dev_name, mode); /* open normally */ + if (m_fd < 0) { + berrno be; + dev_errno = errno; + Dmsg5(100, "Open error on %s omode=%d mode=%x errno=%d: ERR=%s\n", + print_name(), omode, mode, errno, be.bstrerror()); + break; + } + dev_errno = 0; + lock_door(); + set_os_device_parameters(dcr); /* do system dependent stuff */ + break; /* Successfully opened and rewound */ + } + } + bmicrosleep(5, 0); + /* Exceed wait time ? */ + if (time(NULL) - start_time >= max_open_wait) { + break; /* yes, get out */ + } + } +#endif + + if (!is_open()) { + berrno be; + Mmsg2(errmsg, _("Unable to open device %s: ERR=%s\n"), + print_name(), be.bstrerror(dev_errno)); + if (dcr->jcr) { + pm_strcpy(dcr->jcr->errmsg, errmsg); + } + Dmsg1(100, "%s", errmsg); + } + + /* Stop any open() timer we started */ + if (tid) { + stop_thread_timer(tid); + tid = 0; + } + Dmsg1(100, "open dev: tape %d opened\n", m_fd); + state |= preserve; /* reset any important state info */ + return m_fd >= 0; +} + + +/* + * Rewind the device. + * Returns: true on success + * false on failure + */ +bool tape_dev::rewind(DCR *dcr) +{ + struct mtop mt_com; + unsigned int i; + bool first = true; + + Dmsg3(400, "rewind res=%d fd=%d %s\n", num_reserved(), m_fd, print_name()); + state &= ~(ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ + block_num = file = 0; + file_size = 0; + file_addr = 0; + if (m_fd < 0) { + return false; + } + if (is_tape()) { + mt_com.mt_op = MTREW; + mt_com.mt_count = 1; + /* If we get an I/O error on rewind, it is probably because + * the drive is actually busy. We loop for (about 5 minutes) + * retrying every 5 seconds. + */ + for (i=max_rewind_wait; ; i -= 5) { + if (d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com) < 0) { + berrno be; + clrerror(MTREW); + if (i == max_rewind_wait) { + Dmsg1(200, "Rewind error, %s. retrying ...\n", be.bstrerror()); + } + /* + * This is a gross hack, because if the user has the + * device mounted (i.e. open), then uses mtx to load + * a tape, the current open file descriptor is invalid. + * So, we close the drive and re-open it. + */ + if (first && dcr) { + int open_mode = openmode; + d_close(m_fd); + clear_opened(); + open_device(dcr, open_mode); + if (m_fd < 0) { + return false; + } + first = false; + continue; + } +#ifdef HAVE_SUN_OS + if (dev_errno == EIO) { + Mmsg1(errmsg, _("No tape loaded or drive offline on %s.\n"), print_name()); + return false; + } +#else + if (dev_errno == EIO && i > 0) { + Dmsg0(200, "Sleeping 5 seconds.\n"); + bmicrosleep(5, 0); + continue; + } +#endif + Mmsg2(errmsg, _("Rewind error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + return false; + } + break; + } + } + return true; +} + +/* + * Check if the current position on the volume corresponds to + * what is in the catalog. + * + */ +bool tape_dev::is_eod_valid(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + /* + * Check if we are positioned on the tape at the same place + * that the database says we should be. + */ + if (VolCatInfo.VolCatFiles == get_file()) { + Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volume \"%s\" at file=%d.\n"), + dcr->VolumeName, get_file()); + } else if (get_file() > VolCatInfo.VolCatFiles) { + Jmsg(jcr, M_WARNING, 0, _("For Volume \"%s\":\n" + "The number of files mismatch! Volume=%u Catalog=%u\n" + "Correcting Catalog\n"), + dcr->VolumeName, get_file(), VolCatInfo.VolCatFiles); + VolCatInfo.VolCatFiles = get_file(); + VolCatInfo.VolCatBlocks = get_block_num(); + if (!dir_update_volume_info(dcr, false, true)) { + Jmsg(jcr, M_WARNING, 0, _("Error updating Catalog\n")); + dcr->mark_volume_in_error(); + return false; + } + } else { + Jmsg(jcr, M_ERROR, 0, _("Bacula cannot write on tape Volume \"%s\" because:\n" + "The number of files mismatch! Volume=%u Catalog=%u\n"), + dcr->VolumeName, get_file(), VolCatInfo.VolCatFiles); + dcr->mark_volume_in_error(); + return false; + } + return true; +} + +/* + * Position device to end of medium (end of data) + * Returns: true on succes + * false on error + */ +bool tape_dev::eod(DCR *dcr) +{ + struct mtop mt_com; + bool ok = true; + int32_t os_file; + + Enter(100); + ok = DEVICE::eod(dcr); + if (!ok) { + return false; + } + +#if defined (__digital__) && defined (__unix__) + return fsf(VolCatInfo.VolCatFiles); +#endif + +#ifdef MTEOM + if (has_cap(CAP_FASTFSF) && !has_cap(CAP_EOM)) { + Dmsg0(100,"Using FAST FSF for EOM\n"); + /* If unknown position, rewind */ + if (get_os_tape_file() < 0) { + if (!rewind(dcr)) { + Dmsg0(100, "Rewind error\n"); + Leave(100); + return false; + } + } + mt_com.mt_op = MTFSF; + /* + * ***FIXME*** fix code to handle case that INT16_MAX is + * not large enough. + */ + mt_com.mt_count = INT16_MAX; /* use big positive number */ + if (mt_com.mt_count < 0) { + mt_com.mt_count = INT16_MAX; /* brain damaged system */ + } + } + + if (has_cap(CAP_MTIOCGET) && (has_cap(CAP_FASTFSF) || has_cap(CAP_EOM))) { + if (has_cap(CAP_EOM)) { + Dmsg0(100,"Using EOM for EOM\n"); + mt_com.mt_op = MTEOM; + mt_com.mt_count = 1; + } + + if (d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com) < 0) { + berrno be; + clrerror(mt_com.mt_op); + Dmsg1(50, "ioctl error: %s\n", be.bstrerror()); + update_pos(dcr); + Mmsg2(errmsg, _("ioctl MTEOM error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + Dmsg1(100, "%s", errmsg); + Leave(100); + return false; + } + + os_file = get_os_tape_file(); + if (os_file < 0) { + berrno be; + clrerror(-1); + Mmsg2(errmsg, _("ioctl MTIOCGET error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + Dmsg1(100, "%s", errmsg); + Leave(100); + return false; + } + Dmsg1(100, "EOD file=%d\n", os_file); + set_ateof(); + file = os_file; + } else { +#else + { +#endif + /* + * Rewind then use FSF until EOT reached + */ + if (!rewind(dcr)) { + Dmsg0(100, "Rewind error.\n"); + Leave(100); + return false; + } + /* + * Move file by file to the end of the tape + */ + int file_num; + for (file_num=file; !at_eot(); file_num++) { + Dmsg0(200, "eod: doing fsf 1\n"); + if (!fsf(1)) { + Dmsg0(100, "fsf error.\n"); + Leave(100); + return false; + } + /* + * Avoid infinite loop by ensuring we advance. + */ + if (!at_eot() && file_num == (int)file) { + Dmsg1(100, "fsf did not advance from file %d\n", file_num); + set_ateof(); + os_file = get_os_tape_file(); + if (os_file >= 0) { + Dmsg2(100, "Adjust file from %d to %d\n", file_num, os_file); + file = os_file; + } + break; + } + } + } + /* + * Some drivers leave us after second EOF when doing + * MTEOM, so we must backup so that appending overwrites + * the second EOF. + */ + if (has_cap(CAP_BSFATEOM)) { + /* Backup over EOF */ + ok = bsf(1); + /* If BSF worked and fileno is known (not -1), set file */ + os_file = get_os_tape_file(); + if (os_file >= 0) { + Dmsg2(100, "BSFATEOF adjust file from %d to %d\n", file , os_file); + file = os_file; + } else { + file++; /* wing it -- not correct on all OSes */ + } + } else { + update_pos(dcr); /* update position */ + } + Dmsg1(200, "EOD dev->file=%d\n", file); + Leave(100); + return ok; +} + +/* + * Load medium in device + * Returns: true on success + * false on failure + */ +bool load_dev(DEVICE *dev) +{ +#ifdef MTLOAD + struct mtop mt_com; +#endif + + if (dev->fd() < 0) { + dev->dev_errno = EBADF; + Mmsg0(dev->errmsg, _("Bad call to load_dev. Device not open\n")); + Emsg0(M_FATAL, 0, dev->errmsg); + return false; + } + if (!(dev->is_tape())) { + return true; + } +#ifndef MTLOAD + Dmsg0(200, "stored: MTLOAD command not available\n"); + berrno be; + dev->dev_errno = ENOTTY; /* function not available */ + Mmsg2(dev->errmsg, _("ioctl MTLOAD error on %s. ERR=%s.\n"), + dev->print_name(), be.bstrerror()); + return false; +#else + + dev->block_num = dev->file = 0; + dev->file_size = 0; + dev->file_addr = 0; + mt_com.mt_op = MTLOAD; + mt_com.mt_count = 1; + if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { + berrno be; + dev->dev_errno = errno; + Mmsg2(dev->errmsg, _("ioctl MTLOAD error on %s. ERR=%s.\n"), + dev->print_name(), be.bstrerror()); + return false; + } + return true; +#endif +} + +/* + * Rewind device and put it offline + * Returns: true on success + * false on failure + */ +bool tape_dev::offline(DCR *dcr) +{ + struct mtop mt_com; + + if (!is_tape()) { + return true; /* device not open */ + } + + state &= ~(ST_APPEND|ST_READ|ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ + block_num = file = 0; + file_size = 0; + file_addr = 0; + unlock_door(); + mt_com.mt_op = MTOFFL; + mt_com.mt_count = 1; + if (d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com) < 0) { + berrno be; + dev_errno = errno; + Mmsg2(errmsg, _("ioctl MTOFFL error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + return false; + } + Dmsg1(100, "Offlined device %s\n", print_name()); + return true; +} + +bool DEVICE::offline_or_rewind(DCR *dcr) +{ + if (m_fd < 0) { + return false; + } + if (has_cap(CAP_OFFLINEUNMOUNT)) { + return offline(dcr); + } else { + /* + * Note, this rewind probably should not be here (it wasn't + * in prior versions of Bacula), but on FreeBSD, this is + * needed in the case the tape was "frozen" due to an error + * such as backspacing after writing and EOF. If it is not + * done, all future references to the drive get and I/O error. + */ + clrerror(MTREW); + return rewind(dcr); + } +} + +/* + * Foward space a file + * Returns: true on success + * false on failure + */ +bool tape_dev::fsf(int num) +{ + int32_t os_file = 0; + struct mtop mt_com; + int stat = 0; + + if (!is_open()) { + dev_errno = EBADF; + Mmsg0(errmsg, _("Bad call to fsf. Device not open\n")); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + if (!is_tape()) { + return true; + } + + if (at_eot()) { + dev_errno = 0; + Mmsg1(errmsg, _("Device %s at End of Tape.\n"), print_name()); + return false; + } + if (at_eof()) { + Dmsg0(200, "ST_EOF set on entry to FSF\n"); + } + + Dmsg0(100, "fsf\n"); + block_num = 0; + /* + * If Fast forward space file is set, then we + * use MTFSF to forward space and MTIOCGET + * to get the file position. We assume that + * the SCSI driver will ensure that we do not + * forward space past the end of the medium. + */ + if (has_cap(CAP_FSF) && has_cap(CAP_MTIOCGET) && has_cap(CAP_FASTFSF)) { + int my_errno = 0; + mt_com.mt_op = MTFSF; + mt_com.mt_count = num; + stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); + if (stat < 0) { + my_errno = errno; /* save errno */ + } else if ((os_file=get_os_tape_file()) < 0) { + my_errno = errno; /* save errno */ + } + if (my_errno != 0) { + berrno be; + set_eot(); + Dmsg0(200, "Set ST_EOT\n"); + clrerror(MTFSF); + Mmsg2(errmsg, _("ioctl MTFSF error on %s. ERR=%s.\n"), + print_name(), be.bstrerror(my_errno)); + Dmsg1(200, "%s", errmsg); + return false; + } + + Dmsg1(200, "fsf file=%d\n", os_file); + set_ateof(); + file = os_file; + return true; + + /* + * Here if CAP_FSF is set, and virtually all drives + * these days support it, we read a record, then forward + * space one file. Using this procedure, which is slow, + * is the only way we can be sure that we don't read + * two consecutive EOF marks, which means End of Data. + */ + } else if (has_cap(CAP_FSF)) { + POOLMEM *rbuf; + int rbuf_len; + Dmsg0(200, "FSF has cap_fsf\n"); + if (max_block_size == 0) { + rbuf_len = DEFAULT_BLOCK_SIZE; + } else { + rbuf_len = max_block_size; + } + rbuf = get_memory(rbuf_len); + mt_com.mt_op = MTFSF; + mt_com.mt_count = 1; + while (num-- && !at_eot()) { + Dmsg0(100, "Doing read before fsf\n"); + if ((stat = this->read((char *)rbuf, rbuf_len)) < 0) { + if (errno == ENOMEM) { /* tape record exceeds buf len */ + stat = rbuf_len; /* This is OK */ + /* + * On IBM drives, they return ENOSPC at EOM + * instead of EOF status + */ + } else if (at_eof() && errno == ENOSPC) { + stat = 0; + } else { + berrno be; + set_eot(); + clrerror(-1); + Dmsg2(100, "Set ST_EOT read errno=%d. ERR=%s\n", dev_errno, + be.bstrerror()); + Mmsg2(errmsg, _("read error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + Dmsg1(100, "%s", errmsg); + break; + } + } + if (stat == 0) { /* EOF */ + Dmsg1(100, "End of File mark from read. File=%d\n", file+1); + /* Two reads of zero means end of tape */ + if (at_eof()) { + set_eot(); + Dmsg0(100, "Set ST_EOT\n"); + break; + } else { + set_ateof(); + continue; + } + } else { /* Got data */ + clear_eot(); + clear_eof(); + } + + Dmsg0(100, "Doing MTFSF\n"); + stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); + if (stat < 0) { /* error => EOT */ + berrno be; + set_eot(); + Dmsg0(100, "Set ST_EOT\n"); + clrerror(MTFSF); + Mmsg2(errmsg, _("ioctl MTFSF error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + Dmsg0(100, "Got < 0 for MTFSF\n"); + Dmsg1(100, "%s", errmsg); + } else { + set_ateof(); + } + } + free_memory(rbuf); + + /* + * No FSF, so use FSR to simulate it + */ + } else { + Dmsg0(200, "Doing FSR for FSF\n"); + while (num-- && !at_eot()) { + fsr(INT32_MAX); /* returns -1 on EOF or EOT */ + } + if (at_eot()) { + dev_errno = 0; + Mmsg1(errmsg, _("Device %s at End of Tape.\n"), print_name()); + stat = -1; + } else { + stat = 0; + } + } + Dmsg1(200, "Return %d from FSF\n", stat); + if (at_eof()) { + Dmsg0(200, "ST_EOF set on exit FSF\n"); + } + if (at_eot()) { + Dmsg0(200, "ST_EOT set on exit FSF\n"); + } + Dmsg1(200, "Return from FSF file=%d\n", file); + return stat == 0; +} + +/* + * Backward space a file + * Returns: false on failure + * true on success + */ +bool tape_dev::bsf(int num) +{ + struct mtop mt_com; + int stat; + + if (!is_open()) { + dev_errno = EBADF; + Mmsg0(errmsg, _("Bad call to bsf. Device not open\n")); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + if (!is_tape()) { + Mmsg1(errmsg, _("Device %s cannot BSF because it is not a tape.\n"), + print_name()); + return false; + } + + Dmsg0(100, "bsf\n"); + clear_eot(); + clear_eof(); + file -= num; + file_addr = 0; + file_size = 0; + mt_com.mt_op = MTBSF; + mt_com.mt_count = num; + stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); + if (stat < 0) { + berrno be; + clrerror(MTBSF); + Mmsg2(errmsg, _("ioctl MTBSF error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + } + return stat == 0; +} + + +/* + * Foward space num records + * Returns: false on failure + * true on success + */ +bool DEVICE::fsr(int num) +{ + struct mtop mt_com; + int stat; + + if (!is_open()) { + dev_errno = EBADF; + Mmsg0(errmsg, _("Bad call to fsr. Device not open\n")); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + if (!is_tape()) { + return false; + } + + if (!has_cap(CAP_FSR)) { + Mmsg1(errmsg, _("ioctl MTFSR not permitted on %s.\n"), print_name()); + return false; + } + + Dmsg1(100, "fsr %d\n", num); + mt_com.mt_op = MTFSR; + mt_com.mt_count = num; + stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); + if (stat == 0) { + clear_eof(); + block_num += num; + } else { + berrno be; + struct mtget mt_stat; + clrerror(MTFSR); + Dmsg1(100, "FSF fail: ERR=%s\n", be.bstrerror()); + if (dev_get_os_pos(this, &mt_stat)) { + Dmsg4(100, "Adjust from %d:%d to %d:%d\n", file, + block_num, mt_stat.mt_fileno, mt_stat.mt_blkno); + file = mt_stat.mt_fileno; + block_num = mt_stat.mt_blkno; + } else { + if (at_eof()) { + set_eot(); + } else { + set_ateof(); + } + } + Mmsg3(errmsg, _("ioctl MTFSR %d error on %s. ERR=%s.\n"), + num, print_name(), be.bstrerror()); + } + return stat == 0; +} + +/* + * Backward space a record + * Returns: false on failure + * true on success + */ +bool DEVICE::bsr(int num) +{ + struct mtop mt_com; + int stat; + + if (!is_open()) { + dev_errno = EBADF; + Mmsg0(errmsg, _("Bad call to bsr_dev. Device not open\n")); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + if (!is_tape()) { + return false; + } + + if (!has_cap(CAP_BSR)) { + Mmsg1(errmsg, _("ioctl MTBSR not permitted on %s.\n"), print_name()); + return false; + } + + Dmsg0(100, "bsr_dev\n"); + block_num -= num; + clear_eof(); + clear_eot(); + mt_com.mt_op = MTBSR; + mt_com.mt_count = num; + stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); + if (stat < 0) { + berrno be; + clrerror(MTBSR); + Mmsg2(errmsg, _("ioctl MTBSR error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + } + return stat == 0; +} + +void tape_dev::lock_door() +{ +#ifdef MTLOCK + struct mtop mt_com; + if (!is_tape()) return; + mt_com.mt_op = MTLOCK; + mt_com.mt_count = 1; + d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); +#endif +} + +void tape_dev::unlock_door() +{ +#ifdef MTUNLOCK + struct mtop mt_com; + if (!is_tape()) return; + mt_com.mt_op = MTUNLOCK; + mt_com.mt_count = 1; + d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); +#endif +} + +/* + * Reposition the device to file, block + * Returns: false on failure + * true on success + */ +bool tape_dev::reposition(DCR *dcr, uint64_t raddr) +{ + uint32_t rfile, rblock; + + rfile = (uint32_t)(raddr>>32); + rblock = (uint32_t)raddr; + if (!is_open()) { + dev_errno = EBADF; + Mmsg0(errmsg, _("Bad call to reposition. Device not open\n")); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + /* After this point, we are tape only */ + Dmsg4(100, "reposition from %u:%u to %u:%u\n", file, block_num, rfile, rblock); + if (rfile < file) { + Dmsg0(100, "Rewind\n"); + if (!rewind(dcr)) { + return false; + } + } + if (rfile > file) { + Dmsg1(100, "fsf %d\n", rfile-file); + if (!fsf(rfile-file)) { + Dmsg1(100, "fsf failed! ERR=%s\n", bstrerror()); + return false; + } + Dmsg2(100, "wanted_file=%d at_file=%d\n", rfile, file); + } + if (rblock < block_num) { + Dmsg2(100, "wanted_blk=%d at_blk=%d\n", rblock, block_num); + Dmsg0(100, "bsf 1\n"); + bsf(1); + Dmsg0(100, "fsf 1\n"); + fsf(1); + Dmsg2(100, "wanted_blk=%d at_blk=%d\n", rblock, block_num); + } + if (has_cap(CAP_POSITIONBLOCKS) && rblock > block_num) { + /* Ignore errors as Bacula can read to the correct block */ + Dmsg1(100, "fsr %d\n", rblock-block_num); + return fsr(rblock-block_num); + } else { + while (rblock > block_num) { + if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { + berrno be; + dev_errno = errno; + Dmsg2(30, "Failed to find requested block on %s: ERR=%s", + print_name(), be.bstrerror()); + return false; + } + Dmsg2(300, "moving forward wanted_blk=%d at_blk=%d\n", rblock, block_num); + } + } + return true; +} + +/* + * Write an end of file on the device + * Returns: true on success + * false on failure + */ +bool tape_dev::weof(DCR *dcr, int num) +{ + struct mtop mt_com; + int stat; + Dmsg1(129, "=== weof_dev=%s\n", print_name()); + + if (!is_open()) { + dev_errno = EBADF; + Mmsg0(errmsg, _("Bad call to weof_dev. Device not open\n")); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + file_size = 0; + + if (!is_tape()) { + return true; + } + if (!can_append()) { + Mmsg0(errmsg, _("Attempt to WEOF on non-appendable Volume\n")); + Emsg0(M_FATAL, 0, errmsg); + return false; + } + + clear_eof(); + clear_eot(); + mt_com.mt_op = MTWEOF; + mt_com.mt_count = num; + stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); + if (stat == 0) { + block_num = 0; + file += num; + file_addr = 0; + } else { + berrno be; + clrerror(MTWEOF); + if (stat == -1) { + Mmsg2(errmsg, _("ioctl MTWEOF error on %s. ERR=%s.\n"), + print_name(), be.bstrerror()); + } + } + /* DCR is null if called from within write_ansi_ibm_labels() */ + if (dcr && stat == 0) { + if (!write_ansi_ibm_labels(dcr, ANSI_EOF_LABEL, VolHdr.VolumeName)) { + stat = -1; + } + } + return stat == 0; +} + +/* + * If timeout, wait until the mount command returns 0. + * If !timeout, try to mount the device only once. + */ +bool tape_dev::mount(int timeout) +{ + Dmsg0(190, "Enter tape mount\n"); + + if (!is_mounted() && device->mount_command) { + return mount_tape(1, timeout); + } + return true; +} + +/* + * Unmount the device + * If timeout, wait until the unmount command returns 0. + * If !timeout, try to unmount the device only once. + */ +bool tape_dev::unmount(int timeout) +{ + Dmsg0(100, "Enter tape unmount\n"); + + if (!is_mounted() && requires_mount() && device->unmount_command) { + return mount_tape(0, timeout); + } + return true; +} + + +/* + * (Un)mount the device (for tape devices) + */ +bool tape_dev::mount_tape(int mount, int dotimeout) +{ + POOL_MEM ocmd(PM_FNAME); + POOLMEM *results; + char *icmd; + int status, tries; + berrno be; + + Dsm_check(200); + if (mount) { + icmd = device->mount_command; + } else { + icmd = device->unmount_command; + } + + edit_mount_codes(ocmd, icmd); + + Dmsg2(100, "mount_tape: cmd=%s mounted=%d\n", ocmd.c_str(), !!is_mounted()); + + if (dotimeout) { + /* Try at most 10 times to (un)mount the device. This should perhaps be configurable. */ + tries = 10; + } else { + tries = 1; + } + results = get_memory(4000); + + /* If busy retry each second */ + Dmsg1(100, "mount_tape run_prog=%s\n", ocmd.c_str()); + while ((status = run_program_full_output(ocmd.c_str(), max_open_wait/2, results)) != 0) { + if (tries-- > 0) { + continue; + } + + Dmsg5(100, "Device %s cannot be %smounted. stat=%d result=%s ERR=%s\n", print_name(), + (mount ? "" : "un"), status, results, be.bstrerror(status)); + Mmsg(errmsg, _("Device %s cannot be %smounted. ERR=%s\n"), + print_name(), (mount ? "" : "un"), be.bstrerror(status)); + + set_mounted(false); + free_pool_memory(results); + Dmsg0(200, "============ mount=0\n"); + Dsm_check(200); + return false; + } + + set_mounted(mount); /* set/clear mounted flag */ + free_pool_memory(results); + Dmsg1(200, "============ mount=%d\n", mount); + return true; +} + +void tape_dev::set_ateof() +{ + if (at_eof()) { + return; + } + DEVICE::set_ateof(); + file++; +} + +const char *tape_dev::print_type() +{ + return "Tape"; +} + +DEVICE *tape_dev::get_dev(DCR */*dcr*/) +{ + return this; +} + +uint32_t tape_dev::get_hi_addr() +{ + return file; +} + +uint32_t tape_dev::get_low_addr() +{ + return block_num; +} + +uint64_t tape_dev::get_full_addr() +{ + return (((uint64_t)file) << 32) | (uint64_t)block_num; +} + +bool tape_dev::end_of_volume(DCR *dcr) +{ + return write_ansi_ibm_labels(dcr, ANSI_EOV_LABEL, VolHdr.VolumeName); +} + +/* Print the address */ +char *tape_dev::print_addr(char *buf, int32_t buf_len) +{ + buf[0] = 0; + bsnprintf(buf, buf_len, "%lu:%lu", get_hi_addr(), get_low_addr()); + return buf; +} + +char *tape_dev::print_addr(char *buf, int32_t buf_len, boffset_t addr) +{ + buf[0] = 0; + bsnprintf(buf, buf_len, "%lu:%lu", (uint32_t)(addr>>32), (uint32_t)addr); + return buf; +} + +/* + * Clean up when terminating the device + */ +void tape_dev::term(DCR *dcr) +{ + delete_alerts(); + DEVICE::term(dcr); +} diff --git a/src/stored/tape_dev.h b/src/stored/tape_dev.h new file mode 100644 index 00000000..e729b6f4 --- /dev/null +++ b/src/stored/tape_dev.h @@ -0,0 +1,72 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Inspired by vtape.h + */ + +#ifndef __TAPE_DEV_ +#define __TAPE_DEV_ + +struct ALERT { + char *Volume; + utime_t alert_time; + char alerts[10]; +}; + +class tape_dev : public DEVICE { +public: + + tape_dev() { }; + ~tape_dev() { }; + + /* DEVICE virtual functions that we redefine with our tape code */ + bool fsf(int num); + bool offline(DCR *dcr); + bool rewind(DCR *dcr); + bool bsf(int num); + void lock_door(); + void unlock_door(); + bool reposition(DCR *dcr, uint64_t raddr); + bool mount(int timeout); + bool unmount(int timeout); + bool mount_tape(int mount, int dotimeout); + bool weof(DCR *dcr, int num); + bool eod(DCR *dcr); + bool is_eod_valid(DCR *dcr); + void set_ateof(); + bool open_device(DCR *dcr, int omode); + void term(DCR *dcr); + const char *print_type(); + DEVICE *get_dev(DCR *dcr); + uint32_t get_hi_addr(); + uint32_t get_low_addr(); + uint64_t get_full_addr(); + bool end_of_volume(DCR *dcr); + char *print_addr(char *buf, int32_t buf_len); + char *print_addr(char *buf, int32_t maxlen, boffset_t addr); + bool get_tape_worm(DCR *dcr); + bool get_tape_alerts(DCR *dcr); + void show_tape_alerts(DCR *dcr, alert_list_type type, + alert_list_which which, alert_cb alert_callback); + int delete_alerts(); + + alist *alert_list; +}; + +#endif /* __TAPE_DEV_ */ diff --git a/src/stored/tape_worm.c b/src/stored/tape_worm.c new file mode 100644 index 00000000..e7dd5838 --- /dev/null +++ b/src/stored/tape_worm.c @@ -0,0 +1,93 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Routines for getting tape worm status + * + * Written by Kern Sibbald, September MMXVIII + * + */ + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + + +static const int dbglvl = 50; + + +bool tape_dev::get_tape_worm(DCR *dcr) +{ + JCR *jcr = dcr->jcr; + + if (!job_canceled(jcr) && dcr->device->worm_command && + dcr->device->control_name) { + POOLMEM *wormcmd; + int status = 1; + bool is_worm = false; + int worm_val = 0; + BPIPE *bpipe; + char line[MAXSTRING]; + const char *fmt = " %d"; + + wormcmd = get_pool_memory(PM_FNAME); + wormcmd = edit_device_codes(dcr, wormcmd, dcr->device->worm_command, ""); + /* Wait maximum 5 minutes */ + bpipe = open_bpipe(wormcmd, 60 * 5, "r"); + if (bpipe) { + while (fgets(line, (int)sizeof(line), bpipe->rfd)) { + is_worm = false; + if (bsscanf(line, fmt, &worm_val) == 1) { + if (worm_val > 0) { + is_worm = true; + } + } + } + status = close_bpipe(bpipe); + free_pool_memory(wormcmd); + return is_worm; + } else { + status = errno; + } + if (status != 0) { + berrno be; + Jmsg(jcr, M_WARNING, 0, _("3997 Bad worm command status: %s: ERR=%s.\n"), + wormcmd, be.bstrerror(status)); + Dmsg2(dbglvl, _("3997 Bad worm command status: %s: ERR=%s.\n"), + wormcmd, be.bstrerror(status)); + } + + Dmsg1(400, "worm script status=%d\n", status); + free_pool_memory(wormcmd); + } else { + if (!dcr->device->worm_command) { + Dmsg1(dbglvl, "Cannot get tape worm status: no Worm Command specified for device %s\n", + print_name()); + Dmsg1(dbglvl, "Cannot get tape worm status: no Worm Command specified for device %s\n", + print_name()); + + } + if (!dcr->device->control_name) { + Dmsg1(dbglvl, "Cannot get tape worm status: no Control Device specified for device %s\n", + print_name()); + Dmsg1(dbglvl, "Cannot get tape worm status: no Control Device specified for device %s\n", + print_name()); + } + } + return false; +} diff --git a/src/stored/vbackup.c b/src/stored/vbackup.c new file mode 100644 index 00000000..a7822780 --- /dev/null +++ b/src/stored/vbackup.c @@ -0,0 +1,290 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Storage daemon -- vbackup.c -- responsible for doing a backup that + * does not require a Client -- migration, copy, or virtual backup. + * + * Kern Sibbald, January MMVI + * + */ + +#include "bacula.h" +#include "stored.h" + +/* Import functions */ +extern char Job_end[]; + +/* Forward referenced subroutines */ +static bool record_cb(DCR *dcr, DEV_RECORD *rec); + +/* + * Read Data and send to File Daemon + * Returns: false on failure + * true on success + */ +bool do_vbackup(JCR *jcr) +{ + bool ok = true; + BSOCK *dir = jcr->dir_bsock; + const char *Type; + char ec1[50]; + DEVICE *dev; + + switch(jcr->getJobType()) { + case JT_MIGRATE: + Type = "Migration"; + break; + case JT_ARCHIVE: + Type = "Archive"; + break; + case JT_COPY: + Type = "Copy"; + break; + case JT_BACKUP: + Type = "Virtual Backup"; + break; + default: + Type = "Unknown"; + break; + } + + /* TODO: Remove when the new match_all is well tested */ + jcr->use_new_match_all = use_new_match_all; + + Dmsg1(20, "Start read data. newbsr=%d\n", jcr->use_new_match_all); + + if (!jcr->read_dcr || !jcr->dcr) { + Jmsg(jcr, M_FATAL, 0, _("Read and write devices not properly initialized.\n")); + goto bail_out; + } + Dmsg2(100, "read_dcr=%p write_dcr=%p\n", jcr->read_dcr, jcr->dcr); + + if (jcr->NumReadVolumes == 0) { + Jmsg(jcr, M_FATAL, 0, _("No Volume names found for %s.\n"), Type); + goto bail_out; + } + + Dmsg3(200, "Found %d volumes names for %s. First=%s\n", jcr->NumReadVolumes, + jcr->VolList->VolumeName, Type); + + ASSERT(jcr->read_dcr != jcr->dcr); + ASSERT(jcr->read_dcr->dev != jcr->dcr->dev); + /* Ready devices for reading and writing */ + if (!acquire_device_for_read(jcr->read_dcr) || + !acquire_device_for_append(jcr->dcr)) { + jcr->setJobStatus(JS_ErrorTerminated); + goto bail_out; + } + jcr->dcr->dev->start_of_job(jcr->dcr); + + Dmsg2(200, "===== After acquire pos %u:%u\n", jcr->dcr->dev->file, jcr->dcr->dev->block_num); + jcr->sendJobStatus(JS_Running); + + begin_data_spool(jcr->dcr); + begin_attribute_spool(jcr); + + jcr->dcr->VolFirstIndex = jcr->dcr->VolLastIndex = 0; + jcr->run_time = time(NULL); + set_start_vol_position(jcr->dcr); + + jcr->JobFiles = 0; + jcr->dcr->set_ameta(); + jcr->read_dcr->set_ameta(); + ok = read_records(jcr->read_dcr, record_cb, mount_next_read_volume); + goto ok_out; + +bail_out: + ok = false; + +ok_out: + if (jcr->dcr) { + jcr->dcr->set_ameta(); + dev = jcr->dcr->dev; + Dmsg1(100, "ok=%d\n", ok); + if (ok || dev->can_write()) { + if (!dev->flush_before_eos(jcr->dcr)) { + Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), + dev->print_name(), dev->bstrerror()); + Dmsg0(100, _("Set ok=FALSE after write_block_to_device.\n")); + //possible_incomplete_job(jcr, last_file_index); + ok = false; + } + /* Flush out final ameta partial block of this session */ + if (!jcr->dcr->write_final_block_to_device()) { + Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), + dev->print_name(), dev->bstrerror()); + Dmsg0(100, _("Set ok=FALSE after write_final_block_to_device.\n")); + ok = false; + } + Dmsg2(200, "Flush block to device pos %u:%u\n", dev->file, dev->block_num); + } + flush_jobmedia_queue(jcr); + if (!ok) { + discard_data_spool(jcr->dcr); + } else { + /* Note: if commit is OK, the device will remain blocked */ + commit_data_spool(jcr->dcr); + } + + /* + * Don't use time_t for job_elapsed as time_t can be 32 or 64 bits, + * and the subsequent Jmsg() editing will break + */ + int32_t job_elapsed = time(NULL) - jcr->run_time; + + if (job_elapsed <= 0) { + job_elapsed = 1; + } + + Jmsg(jcr, M_INFO, 0, _("Elapsed time=%02d:%02d:%02d, Transfer rate=%s Bytes/second\n"), + job_elapsed / 3600, job_elapsed % 3600 / 60, job_elapsed % 60, + edit_uint64_with_suffix(jcr->JobBytes / job_elapsed, ec1)); + + /* Release the device -- and send final Vol info to DIR */ + release_device(jcr->dcr); + + if (!ok || job_canceled(jcr)) { + discard_attribute_spool(jcr); + } else { + commit_attribute_spool(jcr); + } + } + + if (jcr->read_dcr) { + if (!release_device(jcr->read_dcr)) { + ok = false; + } + } + + jcr->sendJobStatus(); /* update director */ + + Dmsg0(30, "Done reading.\n"); + jcr->end_time = time(NULL); + dequeue_messages(jcr); /* send any queued messages */ + if (ok) { + jcr->setJobStatus(JS_Terminated); + } + generate_daemon_event(jcr, "JobEnd"); + generate_plugin_event(jcr, bsdEventJobEnd); + dir->fsend(Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, + edit_uint64(jcr->JobBytes, ec1), jcr->JobErrors, jcr->StatusErrMsg); + Dmsg6(100, Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, ec1, jcr->JobErrors, jcr->StatusErrMsg); + dequeue_daemon_messages(jcr); + + dir->signal(BNET_EOD); /* send EOD to Director daemon */ + free_plugins(jcr); /* release instantiated plugins */ + + return ok; +} + + +/* + * Called here for each record from read_records() + * Returns: true if OK + * false if error + */ +static bool record_cb(DCR *dcr, DEV_RECORD *rec) +{ + JCR *jcr = dcr->jcr; + DEVICE *dev = jcr->dcr->dev; + char buf1[100], buf2[100]; + bool restoredatap = false; + POOLMEM *orgdata = NULL; + uint32_t orgdata_len = 0; + bool ret = false; + + /* If label and not for us, discard it */ + if (rec->FileIndex < 0 && rec->match_stat <= 0) { + ret = true; + goto bail_out; + } + /* We want to write SOS_LABEL and EOS_LABEL discard all others */ + switch (rec->FileIndex) { + case PRE_LABEL: + case VOL_LABEL: + case EOT_LABEL: + case EOM_LABEL: + ret = true; /* don't write vol labels */ + goto bail_out; + } + + /* + * For normal migration jobs, FileIndex values are sequential because + * we are dealing with one job. However, for Vbackup (consolidation), + * we will be getting records from multiple jobs and writing them back + * out, so we need to ensure that the output FileIndex is sequential. + * We do so by detecting a FileIndex change and incrementing the + * JobFiles, which we then use as the output FileIndex. + */ + if (rec->FileIndex >= 0) { + /* If something changed, increment FileIndex */ + if (rec->VolSessionId != rec->last_VolSessionId || + rec->VolSessionTime != rec->last_VolSessionTime || + rec->FileIndex != rec->last_FileIndex) { + jcr->JobFiles++; + rec->last_VolSessionId = rec->VolSessionId; + rec->last_VolSessionTime = rec->VolSessionTime; + rec->last_FileIndex = rec->FileIndex; + } + rec->FileIndex = jcr->JobFiles; /* set sequential output FileIndex */ + } + + /* TODO: If user really wants to do rehydrate the data, we should propose + * this option. + */ + + /* + * Modify record SessionId and SessionTime to correspond to + * output. + */ + rec->VolSessionId = jcr->VolSessionId; + rec->VolSessionTime = jcr->VolSessionTime; + Dmsg5(200, "before write JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", + jcr->JobId, + FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); + + if (!jcr->dcr->write_record(rec)) { + Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), + dev->print_name(), dev->bstrerror()); + goto bail_out; + } + /* Restore packet */ + rec->VolSessionId = rec->last_VolSessionId; + rec->VolSessionTime = rec->last_VolSessionTime; + if (rec->FileIndex < 0) { + ret = true; /* don't send LABELs to Dir */ + goto bail_out; + } + jcr->JobBytes += rec->data_len; /* increment bytes this job */ + Dmsg5(500, "wrote_record JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", + jcr->JobId, + FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, + stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); + + send_attrs_to_dir(jcr, rec); + ret = true; + +bail_out: + if (restoredatap) { + rec->data = orgdata; + rec->data_len = orgdata_len; + } + return ret; +} diff --git a/src/stored/vol_mgr.c b/src/stored/vol_mgr.c new file mode 100644 index 00000000..c8e0c6fe --- /dev/null +++ b/src/stored/vol_mgr.c @@ -0,0 +1,936 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Volume management functions for Storage Daemon + * + * Kern Sibbald, MM + * + * Split from reserve.c October 2008 + * + */ + +#include "bacula.h" +#include "stored.h" + +const int dbglvl = 150; + +static dlist *vol_list = NULL; +static brwlock_t vol_list_lock; +static dlist *read_vol_list = NULL; +static bthread_mutex_t read_vol_lock = BTHREAD_MUTEX_PRIORITY(PRIO_SD_READ_VOL_LIST); + +/* Forward referenced functions */ +static void free_vol_item(VOLRES *vol); +static VOLRES *new_vol_item(DCR *dcr, const char *VolumeName); +static void debug_list_volumes(const char *imsg); + +/* + * For append volumes the key is the VolumeName. + */ +static int name_compare(void *item1, void *item2) +{ + return strcmp(((VOLRES *)item1)->vol_name, ((VOLRES *)item2)->vol_name); +} + +/* + * For read volumes the key is JobId, VolumeName. + */ +static int read_compare(void *item1, void *item2) +{ + VOLRES *vol1 = (VOLRES *)item1; + VOLRES *vol2 = (VOLRES *)item2; + + if (vol1->get_jobid() == vol2->get_jobid()) { + return strcmp(vol1->vol_name, vol2->vol_name); + } + if (vol1->get_jobid() < vol2->get_jobid()) { + return -1; + } + return 1; +} + +bool is_vol_list_empty() +{ + return vol_list->empty(); +} + +int vol_list_lock_count = 0; + +/* + * Initialized the main volume list. Note, we are using a recursive lock. + */ +void init_vol_list_lock() +{ + int errstat; + if ((errstat=rwl_init(&vol_list_lock, PRIO_SD_VOL_LIST)) != 0) { + berrno be; + Emsg1(M_ABORT, 0, _("Unable to initialize volume list lock. ERR=%s\n"), + be.bstrerror(errstat)); + } +} + +void term_vol_list_lock() +{ + rwl_destroy(&vol_list_lock); +} + +/* + * This allows a given thread to recursively call to lock_volumes() + */ +void _lock_volumes(const char *file, int line) +{ + int errstat; + vol_list_lock_count++; + if ((errstat=rwl_writelock_p(&vol_list_lock, file, line)) != 0) { + berrno be; + Emsg2(M_ABORT, 0, "rwl_writelock failure. stat=%d: ERR=%s\n", + errstat, be.bstrerror(errstat)); + } +} + +void _unlock_volumes() +{ + int errstat; + vol_list_lock_count--; + if ((errstat=rwl_writeunlock(&vol_list_lock)) != 0) { + berrno be; + Emsg2(M_ABORT, 0, "rwl_writeunlock failure. stat=%d: ERR=%s\n", + errstat, be.bstrerror(errstat)); + } +} + +#define lock_read_volumes() lock_read_volumes_p(__FILE__, __LINE__) +static void lock_read_volumes_p(const char *file="**Unknown", int line=0) +{ + bthread_mutex_lock_p(&read_vol_lock, file, line); +} + +static void unlock_read_volumes() +{ + bthread_mutex_unlock(&read_vol_lock); +} + +/* + * Add a volume to the read list. + * Note, we use VOLRES because it simplifies the code + * even though, the only part of VOLRES that we need is + * the volume name. The same volume may be in the list + * multiple times, but each one is distinguished by the + * JobId. We use JobId, VolumeName as the key. + * We can get called multiple times for the same volume because + * when parsing the bsr, the volume name appears multiple times. + */ +void add_read_volume(JCR *jcr, const char *VolumeName) +{ + VOLRES *nvol, *vol; + + nvol = new_vol_item(NULL, VolumeName); + nvol->set_jobid(jcr->JobId); + nvol->set_reading(); + lock_read_volumes(); + vol = (VOLRES *)read_vol_list->binary_insert(nvol, read_compare); + if (vol != nvol) { + free_vol_item(nvol); + Dmsg2(dbglvl, "read_vol=%s JobId=%d already in list.\n", VolumeName, jcr->JobId); + } else { + Dmsg2(dbglvl, "add read_vol=%s JobId=%d\n", VolumeName, jcr->JobId); + } + unlock_read_volumes(); +} + +/* + * Check if volume name is in the read list. + */ +bool is_read_volume(JCR *jcr, const char *VolumeName) +{ + VOLRES vol, *fvol; + lock_read_volumes(); + vol.vol_name = bstrdup(VolumeName); + fvol = (VOLRES *)read_vol_list->binary_search(&vol, name_compare); + free(vol.vol_name); + unlock_read_volumes(); + return fvol != NULL; +} + +/* + * Remove a given volume name from the read list. + */ +void remove_read_volume(JCR *jcr, const char *VolumeName) +{ + VOLRES vol, *fvol; + lock_read_volumes(); + vol.vol_name = bstrdup(VolumeName); + vol.set_jobid(jcr->JobId); + fvol = (VOLRES *)read_vol_list->binary_search(&vol, read_compare); + free(vol.vol_name); + if (fvol) { + Dmsg3(dbglvl, "remove_read_vol=%s JobId=%d found=%d\n", VolumeName, jcr->JobId, fvol!=NULL); + } + if (fvol) { + read_vol_list->remove(fvol); + free_vol_item(fvol); + } + unlock_read_volumes(); +// pthread_cond_broadcast(&wait_next_vol); +} + +/* + * List Volumes -- this should be moved to status.c + */ +enum { + debug_lock = true, + debug_nolock = false +}; + +static void debug_list_volumes(const char *imsg) +{ + VOLRES *vol; + POOL_MEM msg(PM_MESSAGE); + + if (debug_level < dbglvl) { + return; + } + + foreach_vol(vol) { + if (vol->dev) { + Mmsg(msg, "List %s: %s in_use=%d swap=%d slot=%d on %s device %s\n", imsg, + vol->vol_name, vol->is_in_use(), vol->is_swapping(), + vol->get_slot(), + vol->dev->print_type(), vol->dev->print_name()); + } else { + Mmsg(msg, "List %s: %s in_use=%d swap=%d slot=%d no dev\n", imsg, vol->vol_name, + vol->is_in_use(), vol->is_swapping(), vol->get_slot()); + } + Dmsg1(dbglvl, "%s", msg.c_str()); + } + endeach_vol(vol); +} + + +/* + * List Volumes -- this should be moved to status.c + */ +void list_volumes(void sendit(const char *msg, int len, void *sarg), void *arg) +{ + VOLRES *vol; + POOL_MEM msg(PM_MESSAGE); + int len; + + foreach_vol(vol) { + DEVICE *dev = vol->dev; + if (dev) { + len = Mmsg(msg, "Reserved volume: %s on %s device %s\n", vol->vol_name, + dev->print_type(), dev->print_name()); + sendit(msg.c_str(), len, arg); + len = Mmsg(msg, " Reader=%d writers=%d reserves=%d volinuse=%d worm=%d\n", + dev->can_read()?1:0, dev->num_writers, dev->num_reserved(), + vol->is_in_use(), dev->is_worm()); + sendit(msg.c_str(), len, arg); + } else { + len = Mmsg(msg, "Volume %s no device. volinuse=%d\n", vol->vol_name, + vol->is_in_use()); + sendit(msg.c_str(), len, arg); + } + } + endeach_vol(vol); + + lock_read_volumes(); + foreach_dlist(vol, read_vol_list) { + DEVICE *dev = vol->dev; + if (dev) { + len = Mmsg(msg, "Read volume: %s on %s device %s\n", vol->vol_name, + dev->print_type(), dev->print_name()); + sendit(msg.c_str(), len, arg); + len = Mmsg(msg, " Reader=%d writers=%d reserves=%d volinuse=%d JobId=%d\n", + dev->can_read()?1:0, dev->num_writers, dev->num_reserved(), + vol->is_in_use(), vol->get_jobid()); + sendit(msg.c_str(), len, arg); + } else { + len = Mmsg(msg, "Volume: %s no device. volinuse=%d\n", vol->vol_name, + vol->is_in_use()); + sendit(msg.c_str(), len, arg); + } + } + unlock_read_volumes(); +} + +/* + * Create a Volume item to put in the Volume list + * Ensure that the device points to it. + */ +static VOLRES *new_vol_item(DCR *dcr, const char *VolumeName) +{ + VOLRES *vol; + vol = (VOLRES *)malloc(sizeof(VOLRES)); + memset(vol, 0, sizeof(VOLRES)); + vol->vol_name = bstrdup(VolumeName); + if (dcr) { + vol->dev = dcr->dev; + Dmsg4(dbglvl, "new Vol=%s slot=%d at %p dev=%s\n", + VolumeName, vol->get_slot(), vol->vol_name, vol->dev->print_name()); + } + vol->init_mutex(); + vol->inc_use_count(); + return vol; +} + +static void free_vol_item(VOLRES *vol) +{ + DEVICE *dev = NULL; + + vol->dec_use_count(); + vol->vLock(); + if (vol->use_count() > 0) { + vol->vUnlock(); + return; + } + vol->vUnlock(); + free(vol->vol_name); + if (vol->dev) { + dev = vol->dev; + } + vol->destroy_mutex(); + free(vol); + if (dev) { + dev->vol = NULL; + } +} + +/* + * Put a new Volume entry in the Volume list. This + * effectively reserves the volume so that it will + * not be mounted again. + * + * If the device has any current volume associated with it, + * and it is a different Volume, and the device is not busy, + * we release the old Volume item and insert the new one. + * + * It is assumed that the device is free and locked so that + * we can change the device structure. + * + * Some details of the Volume list handling: + * + * 1. The Volume list entry is attached to the drive (rather than + * attached to a job as it was previously. I.e. the drive that "owns" + * the volume (in use, mounted) + * must point to the volume (still to be maintained in a list). + * + * 2. The Volume is entered in the list when a drive is reserved. + * + * 3. When a drive is in use, the device code must appropriately update the + * volume name as it changes. + * This code keeps the same list entry as long as the drive + * has any volume associated with it but the volume name in the list + * must be updated when the drive has a different volume mounted. + * + * 4. A job that has reserved a volume, can un-reserve the volume, and if the + * volume is not mounted, and not reserved, and not in use, it will be + * removed from the list. + * + * 5. If a job wants to reserve a drive with a different Volume from the one on + * the drive, it can re-use the drive for the new Volume. + + * 6. If a job wants a Volume that is in a different drive, it can either use the + * other drive or take the volume, only if the other drive is not in use or + * not reserved. + * + * One nice aspect of this is that the reserve use count and the writer use count + * already exist and are correctly programmed and will need no changes -- use + * counts are always very tricky. + * + * The old code had a concept of "reserving" a Volume, but was changed + * to reserving and using a drive. A volume is must be attached to (owned by) a + * drive and can move from drive to drive or be unused given certain specific + * conditions of the drive. The key is that the drive must "own" the Volume. + * + * Return: VOLRES entry on success + * NULL volume busy on another drive + * jcr->errmsg has details + */ +VOLRES *reserve_volume(DCR *dcr, const char *VolumeName) +{ + VOLRES *vol, *nvol; + DEVICE * volatile dev = dcr->dev; + JCR *jcr = dcr->jcr; + + jcr->errmsg[0] = 0; + if (job_canceled(dcr->jcr)) { + Mmsg1(jcr->errmsg, _("Could not reserve volume \"%s\", because job canceled.\n"), + dev->VolHdr.VolumeName); + return NULL; + } + ASSERT2(dev != NULL, "No device in reserve_volume!"); + + Dmsg2(dbglvl, "enter reserve_volume=%s drive=%s\n", VolumeName, + dcr->dev->print_name()); + + /* If acquiring to write, don't accept a Volume in read list */ + if (dcr->is_writing() && is_read_volume(dcr->jcr, VolumeName)) { + Mmsg1(jcr->errmsg, _("Could not reserve volume \"%s\" for append, because it will be read.\n"), + dev->VolHdr.VolumeName); + return NULL; + } + + /* + * We lock the reservations system here to ensure + * when adding a new volume that no newly scheduled + * job can reserve it. + */ + lock_volumes(); + debug_list_volumes("begin reserve_volume"); + /* + * First, remove any old volume attached to this device as it + * is no longer used. + */ + if (dev->vol) { + vol = dev->vol; + Dmsg4(dbglvl, "Vol attached=%s, newvol=%s volinuse=%d on %s\n", + vol->vol_name, VolumeName, vol->is_in_use(), dev->print_name()); + /* + * Make sure we don't remove the current volume we are inserting + * because it was probably inserted by another job, or it + * is not being used and is marked as not reserved. + */ + if (strcmp(vol->vol_name, VolumeName) == 0) { + Dmsg3(dbglvl, "set reserved vol=%s slot=%d dev=%s\n", VolumeName, + vol->get_slot(), vol->dev->print_name()); + goto get_out; /* Volume already on this device */ + } else { + /* Don't release a volume if it was reserved by someone other than us */ + if (vol->is_in_use() && !dcr->reserved_volume) { + Dmsg5(dbglvl, "Set wait(). Cannot free vol=%s for %s (JobId=%ld). volinuse=%d on %s\n", + vol->vol_name, VolumeName, vol->get_jobid(), vol->is_in_use(), dev->print_name()); + Mmsg3(dcr->jcr->errmsg, _("Cannot reserve Volume=%s because drive is busy with Volume=%s (JobId=%ld).\n"), + VolumeName, vol->vol_name, vol->get_jobid()); + dev->set_wait(); + vol = NULL; /* vol in use */ + goto get_out; + } + Dmsg2(dbglvl, "reserve_vol free vol=%s at %p\n", vol->vol_name, vol->vol_name); + /* If old Volume is still mounted, must unload it */ + if (strcmp(vol->vol_name, dev->VolHdr.VolumeName) == 0) { + Dmsg2(50, "set_unload vol=%s slot=%d\n", vol->vol_name, vol->get_slot()); + dev->set_unload(); /* have to unload current volume */ + } + free_volume(dev); /* Release old volume entry */ + debug_list_volumes("reserve_vol free"); + } + } + + /* Create a new Volume entry */ + nvol = new_vol_item(dcr, VolumeName); + + /* + * Handle request for read volume for file + * device, for which we assume we can open multiple + * devices to read the Volume. + * + * Note: when doing multiple simultaneous reads + * of the same volume, the volume names are not + * inserted into the write volume list. + */ + if (dcr->is_reading() && dev->is_file()) { + nvol->set_jobid(dcr->jcr->JobId); + nvol->set_reading(); + vol = nvol; + dev->vol = vol; + goto get_out; + } else { + vol = (VOLRES *)vol_list->binary_insert(nvol, name_compare); + } + + /* + * This part handles any write volumes or read volumes that + * cannot be simultaneously on multiple devices. + */ + if (vol != nvol) { + /* + * At this point, a Volume with this name already is in the list, + * so we simply release our new Volume entry. Note, this should + * only happen if we are moving the volume from one drive to another. + */ + Dmsg2(dbglvl, "Found vol=%s dev-same=%d\n", vol->vol_name, dev==vol->dev); + Dmsg2(dbglvl, "reserve_vol free-tmp vol=%s at %p\n", + vol->vol_name, vol->vol_name); + /* + * Clear dev pointer so that free_vol_item() doesn't + * take away our volume. + */ + nvol->dev = NULL; /* don't zap dev entry */ + free_vol_item(nvol); + + if (vol->dev) { + Dmsg2(dbglvl, "dev=%s vol->dev=%s\n", dev->print_name(), vol->dev->print_name()); + } + + /* + * Check if we are trying to use the Volume on a different drive + * dev is our device + * vol->dev is where the Volume we want is + */ + if (dev != vol->dev) { + /* Caller wants to switch Volume to another device */ + if (!vol->dev->is_busy() && !vol->is_swapping()) { + int32_t slot; + Dmsg3(dbglvl, "==== Swap vol=%s from dev=%s to %s\n", + VolumeName, vol->dev->print_name(), dev->print_name()); + free_volume(dev); /* free any volume attached to our drive */ + Dmsg3(50, "set_unload vol=%s slot=%d dev=%s\n", vol->vol_name, + vol->get_slot(), dev->print_name()); + dev->set_unload(); /* Unload any volume that is on our drive */ + dcr->set_dev(vol->dev); /* temp point to other dev */ + slot = get_autochanger_loaded_slot(dcr); /* get slot on other drive */ + dcr->set_dev(dev); /* restore dev */ + vol->set_slot(slot); /* save slot */ + vol->dev->set_unload(); /* unload the other drive */ + vol->set_swapping(); /* swap from other drive */ + dev->swap_dev = vol->dev; /* remember to get this vol */ + dev->set_load(); /* then reload on our drive */ + vol->dev->vol = NULL; /* remove volume from other drive */ + vol->dev = dev; /* point the Volume at our drive */ + dev->vol = vol; /* point our drive at the Volume */ + } else { + if (dev) { + Jmsg8(jcr, M_WARNING, 0, "Need volume for %s from other drive, " + "but swap not possible. Status: reader=%d writers=%d " + "reserves=%d swap=%d vol=%s from dev=%s to %s\n", + dcr->is_writing()?"write":"read", + vol->dev->can_read(), vol->dev->num_writers, + vol->dev->num_reserved(), vol->is_swapping(), + VolumeName, vol->dev->print_name(), dev->print_name()); + } + if (vol->is_swapping()) { + DEVICE *swapdev = dev->swap_dev; + if (vol && dev && swapdev) { + Mmsg3(jcr->errmsg, _("Volume %s is busy swapping from %s to %s\n"), + NPRT(vol->vol_name), dev->print_name(), swapdev->print_name()); + } else { + Mmsg1(jcr->errmsg, _("Volume %s is busy swapping.\n"), + NPRT(vol->vol_name)); + } + } else if (vol->dev) { + Mmsg2(jcr->errmsg, _("%s device %s is busy.\n"), + vol->dev->print_type(), vol->dev->print_name()); + } else { + Mmsg1(jcr->errmsg, _("Volume %s is busy swapping.\n"), + NPRT(vol->vol_name)); + } + debug_list_volumes("failed swap"); + vol = NULL; /* device busy */ + goto get_out; + } + } else { + dev->vol = vol; + } + } else { + dev->vol = vol; /* point to newly inserted volume */ + } + +get_out: + if (vol) { + Dmsg2(dbglvl, "set in_use. vol=%s dev=%s\n", vol->vol_name, + vol->dev->print_name()); + vol->set_in_use(); + dcr->reserved_volume = true; + bstrncpy(dcr->VolumeName, vol->vol_name, sizeof(dcr->VolumeName)); + } + debug_list_volumes("end new volume"); + unlock_volumes(); + return vol; +} + +/* + * Start walk of vol chain + * The proper way to walk the vol chain is: + * VOLRES *vol; + * foreach_vol(vol) { + * ... + * } + * endeach_vol(vol); + * + * It is possible to leave out the endeach_vol(vol), but + * in that case, the last vol referenced must be explicitly + * released with: + * + * free_vol_item(vol); + * + */ +VOLRES *vol_walk_start() +{ + VOLRES *vol; + lock_volumes(); + vol = (VOLRES *)vol_list->first(); + if (vol) { + vol->inc_use_count(); + Dmsg2(dbglvl, "Inc walk_start use_count=%d volname=%s\n", + vol->use_count(), vol->vol_name); + } + unlock_volumes(); + return vol; +} + +/* + * Get next vol from chain, and release current one + */ +VOLRES *vol_walk_next(VOLRES *prev_vol) +{ + VOLRES *vol; + + lock_volumes(); + vol = (VOLRES *)vol_list->next(prev_vol); + if (vol) { + vol->inc_use_count(); + Dmsg2(dbglvl, "Inc walk_next use_count=%d volname=%s\n", + vol->use_count(), vol->vol_name); + } + if (prev_vol) { + free_vol_item(prev_vol); + } + unlock_volumes(); + return vol; +} + +/* + * Release last vol referenced + */ +void vol_walk_end(VOLRES *vol) +{ + if (vol) { + lock_volumes(); + Dmsg2(dbglvl, "Free walk_end use_count=%d volname=%s\n", + vol->use_count(), vol->vol_name); + free_vol_item(vol); + unlock_volumes(); + } +} + +/* + * Search for a Volume name in the Volume list. + * + * Returns: VOLRES entry on success + * NULL if the Volume is not in the list + */ +static VOLRES *find_volume(const char *VolumeName) +{ + VOLRES vol, *fvol; + + if (vol_list->empty()) { + return NULL; + } + /* Do not lock reservations here */ + lock_volumes(); + vol.vol_name = bstrdup(VolumeName); + fvol = (VOLRES *)vol_list->binary_search(&vol, name_compare); + free(vol.vol_name); + Dmsg2(dbglvl, "find_vol=%s found=%d\n", VolumeName, fvol!=NULL); + debug_list_volumes("find_volume"); + unlock_volumes(); + return fvol; +} + +/* + * Search for a Volume name in the read Volume list. + * + * Returns: VOLRES entry on success + * NULL if the Volume is not in the list + */ +static VOLRES *find_read_volume(const char *VolumeName) +{ + VOLRES vol, *fvol; + + if (read_vol_list->empty()) { + Dmsg0(dbglvl, "find_read_vol: read_vol_list empty.\n"); + return NULL; + } + /* Do not lock reservations here */ + lock_read_volumes(); + vol.vol_name = bstrdup(VolumeName); + /* Note, we do want a simple name_compare on volume name only here */ + fvol = (VOLRES *)read_vol_list->binary_search(&vol, name_compare); + free(vol.vol_name); + Dmsg2(dbglvl, "find_read_vol=%s found=%d\n", VolumeName, fvol!=NULL); + unlock_read_volumes(); + return fvol; +} + + +/* + * Free a Volume from the Volume list if it is no longer used + * Note, for tape drives we want to remember where the Volume + * was when last used, so rather than free the volume entry, + * we simply mark it "not reserved" so when the drive is really + * needed for another volume, we can reuse it. + * + * Returns: true if the Volume found and "removed" from the list + * false if the Volume is not in the list or is in use + */ +bool volume_unused(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + + if (!dev->vol) { + Dmsg1(dbglvl, "vol_unused: no vol on %s\n", dev->print_name()); + debug_list_volumes("null vol cannot unreserve_volume"); + return false; + } + + Dmsg2(dbglvl, "Clear in_use vol=%s slot=%d\n", dev->vol->vol_name, + dev->vol->get_slot()); + dev->vol->clear_in_use(); + + if (dev->vol->is_swapping()) { + Dmsg1(dbglvl, "vol_unused: vol being swapped on %s\n", dev->print_name()); + debug_list_volumes("swapping vol cannot free_volume"); + return false; + } + + /* + * If this is a tape, we do not free the volume, rather we wait + * until the autoloader unloads it, or until another tape is + * explicitly read in this drive. This allows the SD to remember + * where the tapes are or last were. + */ + Dmsg5(dbglvl, "set not reserved vol=%s slot=%d writers=%d reserves=%d dev=%s\n", + dev->vol->vol_name, dev->vol->get_slot(), dev->num_writers, + dev->num_reserved(), dev->print_name()); + if (dev->is_tape() || dev->is_autochanger()) { + return true; + } else { + /* + * Note, this frees the volume reservation entry, but the + * file descriptor remains open with the OS. + */ + return free_volume(dev); + } +} + +/* + * Unconditionally release the volume entry + * Note: read volumes are not in the list, so + * do not attempt to remove them. + */ +bool free_volume(DEVICE *dev) +{ + VOLRES *vol; + + lock_volumes(); + vol = dev->vol; + if (vol == NULL) { + Dmsg1(dbglvl, "No vol on dev %s\n", dev->print_name()); + unlock_volumes(); + return false; + } + /* Don't free a volume while it is being swapped */ + if (!vol->is_swapping()) { + Dmsg2(dbglvl, "Clear in_use vol=%s slot=%d\n", vol->vol_name, vol->get_slot()); + dev->vol = NULL; + if (vol->is_writing()) { + vol_list->remove(vol); + } + Dmsg3(dbglvl, "Remove volume %s slot=%d dev=%s\n", vol->vol_name, + vol->get_slot(), dev->print_name()); + free_vol_item(vol); + debug_list_volumes("free_volume"); + } else { + Dmsg1(dbglvl, "=== Cannot clear. Swapping vol=%s\n", vol->vol_name); + } + unlock_volumes(); + return true; +} + + +/* Create the Volume list */ +void create_volume_lists() +{ + VOLRES *vol = NULL; + if (vol_list == NULL) { + vol_list = New(dlist(vol, &vol->link)); + } + if (read_vol_list == NULL) { + read_vol_list = New(dlist(vol, &vol->link)); + } +} + +/* + * Free normal append volumes list + */ +static void free_volume_list() +{ + VOLRES *vol; + if (vol_list) { + lock_volumes(); + foreach_dlist(vol, vol_list) { + if (vol->dev) { + Dmsg2(dbglvl, "free vol_list Volume=%s dev=%s\n", vol->vol_name, vol->dev->print_name()); + } else { + Dmsg1(dbglvl, "free vol_list Volume=%s No dev\n", vol->vol_name); + } + free(vol->vol_name); + vol->vol_name = NULL; + vol->destroy_mutex(); + } + delete vol_list; + vol_list = NULL; + unlock_volumes(); + } +} + +/* Release all Volumes from the list */ +void free_volume_lists() +{ + VOLRES *vol; + + free_volume_list(); /* normal append list */ + + if (read_vol_list) { + lock_read_volumes(); + foreach_dlist(vol, read_vol_list) { + if (vol->dev) { + Dmsg2(dbglvl, "free read_vol_list Volume=%s dev=%s\n", vol->vol_name, vol->dev->print_name()); + } else { + Dmsg1(dbglvl, "free read_vol_list Volume=%s No dev\n", vol->vol_name); + } + free(vol->vol_name); + vol->vol_name = NULL; + vol->destroy_mutex(); + } + delete read_vol_list; + read_vol_list = NULL; + unlock_read_volumes(); + } +} + +/* + * Determine if caller can write on volume. + * If not, return reason in jcr->errmsg + */ +bool DCR::can_i_write_volume() +{ + VOLRES *vol; + + vol = find_read_volume(VolumeName); + if (vol) { + Mmsg(jcr->errmsg, "Found in read list; cannot write vol=%s\n", VolumeName); + Dmsg1(100, "Found in read list; cannot write vol=%s\n", VolumeName); + return false; + } + return can_i_use_volume(); +} + +/* + * Determine if caller can read or write volume. + * If not, return reason in jcr->errmsg + */ +bool DCR::can_i_use_volume() +{ + bool rtn = true; + VOLRES *vol; + + if (job_canceled(jcr)) { + Mmsg(jcr->errmsg, "Job is canceled\n"); + return false; + } + lock_volumes(); + vol = find_volume(VolumeName); + if (!vol) { + Dmsg1(dbglvl, "Vol=%s not in use.\n", VolumeName); + goto get_out; /* vol not in list */ + } + ASSERT2(vol->dev != NULL, "No device in can_i_use_volume!"); + + if (dev == vol->dev) { /* same device OK */ + Dmsg1(dbglvl, "Vol=%s on same dev.\n", VolumeName); + goto get_out; + } else { + Dmsg3(dbglvl, "Vol=%s on %s we have %s\n", VolumeName, + vol->dev->print_name(), dev->print_name()); + } + /* ***FIXME*** check this ... */ + if (!vol->dev->is_busy()) { + Dmsg2(dbglvl, "Vol=%s dev=%s not busy.\n", VolumeName, vol->dev->print_name()); + goto get_out; + } else { + Dmsg2(dbglvl, "Vol=%s dev=%s busy.\n", VolumeName, vol->dev->print_name()); + } + Mmsg(jcr->errmsg, "Volume=%s in use on another device %s.\n", VolumeName, vol->dev->print_name()); + Dmsg2(dbglvl, "Volume=%s in use on another device %s.\n", VolumeName, vol->dev->print_name()); + rtn = false; + +get_out: + unlock_volumes(); + return rtn; + +} + +/* + * Create a temporary copy of the volume list. We do this, + * to avoid having the volume list locked during the + * call to reserve_device(), which would cause a deadlock. + * Note, we may want to add an update counter on the vol_list + * so that if it is modified while we are traversing the copy + * we can take note and act accordingly (probably redo the + * search at least a few times). + */ +dlist *dup_vol_list(JCR *jcr) +{ + dlist *temp_vol_list; + VOLRES *vol = NULL; + + Dmsg0(dbglvl, "lock volumes\n"); + + Dmsg0(dbglvl, "duplicate vol list\n"); + temp_vol_list = New(dlist(vol, &vol->link)); + foreach_vol(vol) { + VOLRES *nvol; + VOLRES *tvol = (VOLRES *)malloc(sizeof(VOLRES)); + memset(tvol, 0, sizeof(VOLRES)); + tvol->vol_name = bstrdup(vol->vol_name); + tvol->dev = vol->dev; + tvol->init_mutex(); + tvol->inc_use_count(); + nvol = (VOLRES *)temp_vol_list->binary_insert(tvol, name_compare); + if (tvol != nvol) { + tvol->dev = NULL; /* don't zap dev entry */ + free_vol_item(tvol); + Pmsg0(000, "Logic error. Duplicating vol list hit duplicate.\n"); + Jmsg(jcr, M_WARNING, 0, "Logic error. Duplicating vol list hit duplicate.\n"); + } + } + endeach_vol(vol); + Dmsg0(dbglvl, "unlock volumes\n"); + return temp_vol_list; +} + +/* + * Free the specified temp list. + */ +void free_temp_vol_list(dlist *temp_vol_list) +{ + dlist *save_vol_list; + + lock_volumes(); + save_vol_list = vol_list; + vol_list = temp_vol_list; + free_volume_list(); /* release temp_vol_list */ + vol_list = save_vol_list; + Dmsg0(dbglvl, "deleted temp vol list\n"); + Dmsg0(dbglvl, "unlock volumes\n"); + unlock_volumes(); + debug_list_volumes("after free temp table"); +} diff --git a/src/stored/vol_mgr.h b/src/stored/vol_mgr.h new file mode 100644 index 00000000..59045c57 --- /dev/null +++ b/src/stored/vol_mgr.h @@ -0,0 +1,91 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Pulled out of dev.h + * + * Kern Sibbald, MMXIII + * + */ + +/* + * Some details of how volume reservations work + * + * class VOLRES: + * set_in_use() volume being used on current drive + * clear_in_use() no longer being used. Can be re-used or moved. + * set_swapping() set volume being moved to another drive + * is_swapping() volume is being moved to another drive + * clear_swapping() volume normal + * + */ + +#ifndef __VOL_MGR_H +#define __VOL_MGR_H 1 + +class VOLRES; +VOLRES *vol_walk_start(); +VOLRES *vol_walk_next(VOLRES *prev_vol); +void vol_walk_end(VOLRES *vol); + +/* + * Volume reservation class -- see vol_mgr.c and reserve.c + */ +class VOLRES { + bool m_swapping; /* set when swapping to another drive */ + bool m_in_use; /* set when volume reserved or in use */ + bool m_reading; /* set when reading */ + int32_t m_slot; /* slot of swapping volume */ + uint32_t m_JobId; /* JobId for read volumes */ + volatile int32_t m_use_count; /* Use count */ + pthread_mutex_t m_mutex; /* Vol muntex */ +public: + dlink link; + char *vol_name; /* Volume name */ + DEVICE *dev; /* Pointer to device to which we are attached */ + + void init_mutex() { pthread_mutex_init(&m_mutex, NULL); }; + void destroy_mutex() { pthread_mutex_destroy(&m_mutex); }; + void vLock() { P(m_mutex); }; + void vUnlock() { V(m_mutex); }; + void inc_use_count(void) {P(m_mutex); m_use_count++; V(m_mutex); }; + void dec_use_count(void) {P(m_mutex); m_use_count--; V(m_mutex); }; + int32_t use_count() const { return m_use_count; }; + bool is_swapping() const { return m_swapping; }; + bool is_reading() const { return m_reading; }; + bool is_writing() const { return !m_reading; }; + void set_reading() { m_reading = true; }; + void clear_reading() { m_reading = false; }; + void set_swapping() { m_swapping = true; }; + void clear_swapping() { m_swapping = false; }; + bool is_in_use() const { return m_in_use; }; + void set_in_use() { m_in_use = true; }; + void clear_in_use() { m_in_use = false; }; + void set_slot(int32_t slot) { m_slot = slot; }; + void clear_slot() { m_slot = -1; }; + int32_t get_slot() const { return m_slot; }; + uint32_t get_jobid() const { return m_JobId; }; + void set_jobid(uint32_t JobId) { m_JobId = JobId; }; +}; + +#define foreach_vol(vol) \ + for (vol=vol_walk_start(); vol; (vol=vol_walk_next(vol)) ) + +#define endeach_vol(vol) vol_walk_end(vol) + +#endif diff --git a/src/stored/vtape_dev.c b/src/stored/vtape_dev.c new file mode 100644 index 00000000..9cf182bd --- /dev/null +++ b/src/stored/vtape_dev.c @@ -0,0 +1,1014 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* + * Please note!!! The VTAPE device is for testing only. + * It simulates a tape drive, which is useful for testing + * without a real drive, but is inefficient for writing + * disk volumes. In addition, we do not test for every + * possible error condition, so please do not use this + * in production. + */ +/* + +Device { + Name = Drive-1 # + Maximum File Size = 800M + Maximum Volume Size = 3G + Device Type = TAPE + Archive Device = /tmp/fake + Media Type = DLT-8000 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; +} + + Block description : + + block { + int32 size; + void *data; + } + + EOF description : + + EOF { + int32 size=0; + } + + + */ + +#include "bacula.h" /* define 64bit file usage */ +#include "stored.h" + +#ifdef USE_VTAPE +#include + +static int dbglevel = 100; +#define FILE_OFFSET 30 + +void vtape_debug(int level) +{ + dbglevel = level; +} + +/* DEVICE virtual that we redefine. */ +int vtape::d_ioctl(int fd, ioctl_req_t request, char *op) +{ + int result = 0; + + if (request == MTIOCTOP) { + result = tape_op((mtop *)op); + } else if (request == MTIOCGET) { + result = tape_get((mtget *)op); + } else if (request == MTIOCPOS) { + result = tape_pos((mtpos *)op); + } else { + errno = ENOTTY; + result = -1; + } + + return result; +} + +int vtape::tape_op(struct mtop *mt_com) +{ + int result=0; + int count = mt_com->mt_count; + + if (!online) { + errno = ENOMEDIUM; + return -1; + } + + switch (mt_com->mt_op) + { + case MTRESET: + case MTNOP: + case MTSETDRVBUFFER: + break; + + default: + case MTRAS1: + case MTRAS2: + case MTRAS3: + case MTSETDENSITY: + errno = ENOTTY; + result = -1; + break; + + case MTFSF: /* Forward space over mt_count filemarks. */ + do { + result = fsf(); + } while (--count > 0 && result == 0); + break; + + case MTBSF: /* Backward space over mt_count filemarks. */ + do { + result = bsf(); + } while (--count > 0 && result == 0); + break; + + case MTFSR: /* Forward space over mt_count records (tape blocks). */ +/* + file number = 1 + block number = 0 + + file number = 1 + block number = 1 + + mt: /dev/lto2: Erreur d'entree/sortie + + file number = 2 + block number = 0 +*/ + /* tester si on se trouve a la fin du fichier */ + result = fsr(mt_com->mt_count); + break; + + case MTBSR: /* Backward space over mt_count records (tape blocks). */ + result = bsr(mt_com->mt_count); + break; + + case MTWEOF: /* Write mt_count filemarks. */ + do { + result = weof(); + } while (result == 0 && --count > 0); + break; + + case MTREW: /* Rewind. */ + Dmsg0(dbglevel, "rewind vtape\n"); + check_eof(); + atEOF = atEOD = false; + atBOT = true; + current_file = 0; + current_block = 0; + lseek(fd, 0, SEEK_SET); + result = !read_fm(VT_READ_EOF); + break; + + case MTOFFL: /* put tape offline */ + result = offline(NULL) ? 0 : -1; + break; + + case MTRETEN: /* Re-tension tape. */ + result = 0; + break; + + case MTBSFM: /* not used by bacula */ + errno = EIO; + result = -1; + break; + + case MTFSFM: /* not used by bacula */ + errno = EIO; + result = -1; + break; + + case MTEOM:/* Go to the end of the recorded media (for appending files). */ + while (next_FM) { + lseek(fd, next_FM, SEEK_SET); + if (read_fm(VT_READ_EOF)) { + current_file++; + } + } + boffset_t l; + while (::read(fd, &l, sizeof(l)) > 0) { + if (l) { + lseek(fd, l, SEEK_CUR); + } else { + ASSERT(0); + } + Dmsg0(dbglevel, "skip 1 block\n"); + } + current_block = -1; + atEOF = false; + atEOD = true; + +/* + file number = 3 + block number = -1 +*/ + /* Can be at EOM */ + break; + + case MTERASE: /* not used by bacula */ + atEOD = true; + atEOF = false; + atEOT = false; + + current_file = 0; + current_block = -1; + lseek(fd, 0, SEEK_SET); + read_fm(VT_READ_EOF); + truncate_file(); + break; + + case MTSETBLK: + break; + + case MTSEEK: + break; + + case MTTELL: + break; + + case MTFSS: + break; + + case MTBSS: + break; + + case MTWSM: + break; + + case MTLOCK: + break; + + case MTUNLOCK: + break; + + case MTLOAD: + break; + + case MTUNLOAD: + break; + + case MTCOMPRESSION: + break; + + case MTSETPART: + break; + + case MTMKPART: + break; + } + + return result == 0 ? 0 : -1; +} + +int vtape::tape_get(struct mtget *mt_get) +{ + int density = 1; + int block_size = 1024; + + mt_get->mt_type = MT_ISSCSI2; + mt_get->mt_blkno = current_block; + mt_get->mt_fileno = current_file; + + mt_get->mt_resid = -1; +// pos_info.PartitionBlockValid ? pos_info.Partition : (ULONG)-1; + + /* TODO */ + mt_get->mt_dsreg = + ((density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK) | + ((block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK); + + + mt_get->mt_gstat = 0x00010000; /* Immediate report mode.*/ + + if (atEOF) { + mt_get->mt_gstat |= 0x80000000; // GMT_EOF + } + + if (atBOT) { + mt_get->mt_gstat |= 0x40000000; // GMT_BOT + } + if (atEOT) { + mt_get->mt_gstat |= 0x20000000; // GMT_EOT + } + + if (atEOD) { + mt_get->mt_gstat |= 0x08000000; // GMT_EOD + } + + if (0) { //WriteProtected) { + mt_get->mt_gstat |= 0x04000000; // GMT_WR_PROT + } + + if (online) { + mt_get->mt_gstat |= 0x01000000; // GMT_ONLINE + } else { + mt_get->mt_gstat |= 0x00040000; // GMT_DR_OPEN + } + mt_get->mt_erreg = 0; + + return 0; +} + +int vtape::tape_pos(struct mtpos *mt_pos) +{ + if (current_block >= 0) { + mt_pos->mt_blkno = current_block; + return 0; + } + + return -1; +} + +/* + * This function try to emulate the append only behavior + * of a tape. When you wrote something, data after the + * current position are discarded. + */ +int vtape::truncate_file() +{ + Dmsg2(dbglevel, "truncate %i:%i\n", current_file, current_block); + ftruncate(fd, lseek(fd, 0, SEEK_CUR)); + last_file = current_file; + atEOD=true; + update_pos(); + return 0; +} + +vtape::~vtape() +{ +} + +vtape::vtape() +{ + lockfd = fd = -1; + + atEOF = false; + atBOT = false; + atEOT = false; + atEOD = false; + online = false; + needEOF = false; + + file_block = 0; + last_file = 0; + current_file = 0; + current_block = -1; + + lockfile = NULL; + + max_block = VTAPE_MAX_BLOCK; +} + +int vtape::get_fd() +{ + return this->fd; +} + +/* + * DEVICE virtual that we redefine. + * + * Write a variable block of count size. + * block = vtape_header + data + * vtape_header = sizeof(data) + * if vtape_header == 0, this is a EOF + */ +ssize_t vtape::d_write(int, const void *buffer, size_t count) +{ + ASSERT(online); + ASSERT(current_file >= 0); + ASSERT(count > 0); + ASSERT(buffer); + + ssize_t nb; + Dmsg3(0, "write len=%i %i:%i\n", + count, current_file,current_block); + + if (atEOT) { + Dmsg0(dbglevel, "write nothing, EOT !\n"); + errno = ENOSPC; + return -1; + } + + if (m_is_worm) { + /* The start of the vtape volume has a WEOF */ + int64_t size = ::lseek(fd, 0, SEEK_END); + if (size < 100) { + size = 0; + } + int64_t pos = DEVICE::get_full_addr(current_file, current_block); + if ( pos < size ) { + Dmsg2(0, "WORM detected. Cannot write at %lld with current size at %lld\n", pos, size -20); + errno = EIO; + return -1; + } + } else { + Dmsg0(0, "Not worm!\n"); + } + + if (!atEOD) { /* if not at the end of the data */ + truncate_file(); + } + + if (current_block != -1) { + current_block++; + } + + atBOT = false; + atEOF = false; + atEOD = true; /* End of data */ + + needEOF = true; /* next operation need EOF mark */ + + uint32_t size = count; + ::write(fd, &size, sizeof(uint32_t)); + nb = ::write(fd, buffer, count); + + if (nb != (ssize_t)count) { + atEOT = true; + Dmsg2(dbglevel, + "Not enough space writing only %i of %i requested\n", + nb, count); + } + + update_pos(); + + return nb; +} + +/* + * +---+---------+---+------------------+---+-------------------+ + * |00N| DATA |0LN| DATA |0LC| DATA | + * +---+---------+---+------------------+---+-------------------+ + * + * 0 : zero + * L : Last FileMark offset + * N : Next FileMark offset + * C : Current FileMark Offset + */ +int vtape::weof() +{ + ASSERT(online); + ASSERT(current_file >= 0); + +#if 0 + if (atEOT) { + errno = ENOSPC; + current_block = -1; + return -1; + } +#endif + + if (!atEOD) { + truncate_file(); /* nothing after this point */ + } + + last_FM = cur_FM; + cur_FM = lseek(fd, 0, SEEK_CUR); // current position + + /* update previous next_FM */ + lseek(fd, last_FM + sizeof(uint32_t)+sizeof(boffset_t), SEEK_SET); + ::write(fd, &cur_FM, sizeof(boffset_t)); + lseek(fd, cur_FM, SEEK_SET); + + next_FM = 0; + + uint32_t c=0; + ::write(fd, &c, sizeof(uint32_t)); // EOF + ::write(fd, &last_FM, sizeof(last_FM)); // F-1 + ::write(fd, &next_FM, sizeof(next_FM)); // F (will be updated next time) + + current_file++; + current_block = 0; + + needEOF = false; + atEOD = false; + atBOT = false; + atEOF = true; + + last_file = MAX(current_file, last_file); + + Dmsg4(dbglevel, "Writing EOF %i:%i last=%lli cur=%lli next=0\n", + current_file, current_block, last_FM, cur_FM); + + return 0; +} + +/* + * Go to next FM + */ +int vtape::fsf() +{ + ASSERT(online); + ASSERT(current_file >= 0); + ASSERT(fd >= 0); +/* + * 1 0 -> fsf -> 2 0 -> fsf -> 2 -1 + */ + + int ret=0; + if (atEOT || atEOD) { + errno = EIO; + current_block = -1; + return -1; + } + + atBOT = false; + Dmsg2(dbglevel+1, "fsf %i <= %i\n", current_file, last_file); + + if (next_FM > cur_FM) { /* not the last file */ + lseek(fd, next_FM, SEEK_SET); + read_fm(VT_READ_EOF); + current_file++; + atEOF = true; + ret = 0; + + } else if (atEOF) { /* last file mark */ + current_block=-1; + errno = EIO; + atEOF = false; + atEOD = true; + + } else { /* last file, but no at the end */ + fsr(100000); + + Dmsg0(dbglevel, "Try to FSF after EOT\n"); + errno = EIO; + current_file = last_file ; + current_block = -1; + atEOD=true; + ret = -1; + } + return ret; +} + +/* /------------\ /---------------\ + * +---+------+---+---------------+-+ + * |OLN| |0LN| | | + * +---+------+---+---------------+-+ + */ + +bool vtape::read_fm(VT_READ_FM_MODE read_all) +{ + int ret; + uint32_t c = 0; + if (read_all == VT_READ_EOF) { + ::read(fd, &c, sizeof(c)); + if (c != 0) { + lseek(fd, cur_FM, SEEK_SET); + return false; + } + } + + cur_FM = lseek(fd, 0, SEEK_CUR) - sizeof(c); + + ::read(fd, &last_FM, sizeof(last_FM)); + ret = ::read(fd, &next_FM, sizeof(next_FM)); + + current_block=0; + + Dmsg3(dbglevel, "Read FM cur=%lli last=%lli next=%lli\n", + cur_FM, last_FM, next_FM); + + return (ret == sizeof(next_FM)); +} + +/* + * TODO: Check fsr with EOF + */ +int vtape::fsr(int count) +{ + ASSERT(online); + ASSERT(current_file >= 0); + ASSERT(fd >= 0); + + int i,nb, ret=0; +// boffset_t where=0; + uint32_t s; + Dmsg4(dbglevel, "fsr %i:%i EOF=%i c=%i\n", + current_file,current_block,atEOF,count); + + check_eof(); + + if (atEOT) { + errno = EIO; + current_block = -1; + return -1; + } + + if (atEOD) { + errno = EIO; + return -1; + } + + atBOT = atEOF = false; + + /* check all block record */ + for(i=0; (i < count) && !atEOF ; i++) { + nb = ::read(fd, &s, sizeof(uint32_t)); /* get size of next block */ + if (nb == sizeof(uint32_t) && s) { + current_block++; + lseek(fd, s, SEEK_CUR); /* seek after this block */ + } else { + Dmsg4(dbglevel, "read EOF %i:%i nb=%i s=%i\n", + current_file, current_block, nb,s); + errno = EIO; + ret = -1; + if (next_FM) { + current_file++; + read_fm(VT_SKIP_EOF); + } + atEOF = true; /* stop the loop */ + } + } + + return ret; +} + +/* + * BSR + EOF => begin of EOF + EIO + * BSR + BSR + EOF => last block + * current_block = -1 + */ +int vtape::bsr(int count) +{ + ASSERT(online); + ASSERT(current_file >= 0); + ASSERT(count == 1); + ASSERT(fd >= 0); + + check_eof(); + + if (!count) { + return 0; + } + + int ret=0; + int last_f=0; + int last_b=0; + + boffset_t last=-1, last2=-1; + boffset_t orig = lseek(fd, 0, SEEK_CUR); + int orig_f = current_file; + int orig_b = current_block; + + Dmsg4(dbglevel, "bsr(%i) cur_blk=%i orig=%lli cur_FM=%lli\n", + count, current_block, orig, cur_FM); + + /* begin of tape, do nothing */ + if (atBOT) { + errno = EIO; + return -1; + } + + /* at EOF 0:-1 BOT=0 EOD=0 EOF=0 ERR: Input/output error */ + if (atEOF) { + lseek(fd, cur_FM, SEEK_SET); + atEOF = false; + if (current_file > 0) { + current_file--; + } + current_block=-1; + errno = EIO; + return -1; + } + + /* + * First, go to cur/last_FM and read all blocks to find the good one + */ + if (cur_FM == orig) { /* already just before EOF */ + lseek(fd, last_FM, SEEK_SET); + + } else { + lseek(fd, cur_FM, SEEK_SET); + } + + ret = read_fm(VT_READ_EOF); + + do { + if (!atEOF) { + last2 = last; /* keep track of the 2 last blocs position */ + last = lseek(fd, 0, SEEK_CUR); + last_f = current_file; + last_b = current_block; + Dmsg6(dbglevel, "EOF=%i last2=%lli last=%lli < orig=%lli %i:%i\n", + atEOF, last2, last, orig, current_file, current_block); + } + ret = fsr(1); + } while ((lseek(fd, 0, SEEK_CUR) < orig) && (ret == 0)); + + if (last2 > 0 && atEOF) { /* we take the previous position */ + lseek(fd, last2, SEEK_SET); + current_file = last_f; + current_block = last_b - 1; + Dmsg3(dbglevel, "1 set offset2=%lli %i:%i\n", + last, current_file, current_block); + + } else if (last > 0) { + lseek(fd, last, SEEK_SET); + current_file = last_f; + current_block = last_b; + Dmsg3(dbglevel, "2 set offset=%lli %i:%i\n", + last, current_file, current_block); + } else { + lseek(fd, orig, SEEK_SET); + current_file = orig_f; + current_block = orig_b; + return -1; + } + + Dmsg2(dbglevel, "bsr %i:%i\n", current_file, current_block); + errno=0; + atEOT = atEOF = atEOD = false; + atBOT = (lseek(fd, 0, SEEK_CUR) - (sizeof(uint32_t)+2*sizeof(boffset_t))) == 0; + + if (orig_b == -1) { + current_block = orig_b; + } + + return 0; +} + +/* BSF => just before last EOF + * EOF + BSF => just before EOF + * file 0 + BSF => BOT + errno + */ +int vtape::bsf() +{ + ASSERT(online); + ASSERT(current_file >= 0); + Dmsg2(dbglevel, "bsf %i:%i count=%i\n", current_file, current_block); + int ret = 0; + + check_eof(); + + atBOT = atEOF = atEOT = atEOD = false; + + if (current_file == 0) {/* BOT + errno */ + lseek(fd, 0, SEEK_SET); + read_fm(VT_READ_EOF); + current_file = 0; + current_block = 0; + atBOT = true; + errno = EIO; + ret = -1; + } else { + Dmsg1(dbglevel, "bsf last=%lli\n", last_FM); + lseek(fd, cur_FM, SEEK_SET); + current_file--; + current_block=-1; + } + return ret; +} + +/* + * DEVICE virtual that we redefine. + * + * Put vtape in offline mode + */ +bool vtape::offline(DCR *dcr) +{ + close(dcr); + + atEOF = false; /* End of file */ + atEOT = false; /* End of tape */ + atEOD = false; /* End of data */ + atBOT = false; /* Begin of tape */ + online = false; + + file_block = 0; + current_file = -1; + current_block = -1; + last_file = -1; + return true; +} + +/* + * DEVICE virtual that we redefine. + * + * A filemark is automatically written to tape if the last tape operation + * before close was a write. + */ +int vtape::d_close(int) +{ + struct flock lock; + + check_eof(); + + if (lockfd >= 0) { + lock.l_type = F_UNLCK; + lock.l_start = 0; + lock.l_whence = SEEK_SET; + lock.l_len = 0; + lock.l_pid = getpid(); + + ASSERT(fcntl(fd, F_SETLK, &lock) != -1); + ::close(lockfd); + free(lockfile); + } + + ::close(fd); + lockfd = fd = -1; + return 0; +} + +/* + * DEVICE virtual that we redefine. + * + * When a filemark is encountered while reading, the following happens. If + * there are data remaining in the buffer when the filemark is found, the + * buffered data is returned. The next read returns zero bytes. The following + * read returns data from the next file. The end of recorded data is signaled + * by returning zero bytes for two consecutive read calls. The third read + * returns an error. + */ +ssize_t vtape::d_read(int, void *buffer, size_t count) +{ + ASSERT(online); + ASSERT(current_file >= 0); + ssize_t nb; + uint32_t s; + + Dmsg2(dbglevel*2, "read %i:%i\n", current_file, current_block); + + if (atEOT || atEOD) { + errno = EIO; + return -1; + } + + if (atEOF) { + if (!next_FM) { + atEOD = true; + atEOF = false; + current_block=-1; + return 0; + } + atEOF=false; + } + + check_eof(); + + atEOD = atBOT = false; + + /* reading size of data */ + nb = ::read(fd, &s, sizeof(uint32_t)); + if (nb <= 0) { + atEOF = true; /* TODO: check this */ + return 0; + } + + if (s > count) { /* not enough buffer to read block */ + Dmsg2(dbglevel, "Need more buffer to read next block %i > %i\n",s,count); + lseek(fd, s, SEEK_CUR); + errno = ENOMEM; + return -1; + } + + if (!s) { /* EOF */ + atEOF = true; + if (read_fm(VT_SKIP_EOF)) { + current_file++; + } + + return 0; + } + + /* reading data itself */ + nb = ::read(fd, buffer, s); + if (nb != (ssize_t)s) { /* read error */ + errno=EIO; + atEOT=true; + current_block = -1; + Dmsg0(dbglevel, "EOT during reading\n"); + return -1; + } /* read ok */ + + if (current_block >= 0) { + current_block++; + } + + return nb; +} + +/* Redefine DEVICE virtual function */ +int vtape::d_open(const char *pathname, int uflags) +{ + Dmsg2(dbglevel, "vtape::d_open(%s, %i)\n", pathname, uflags); + + online = true; /* assume that drive contains a tape */ + struct flock lock; + struct stat statp; + + ASSERT(!m_shstore || (m_shstore_lock && m_shstore_register)); + + if (stat(pathname, &statp) != 0) { + fd = -1; + Dmsg1(dbglevel, "Can't stat on %s\n", pathname); + if (uflags & O_NONBLOCK) { + online = false; + fd = ::open("/dev/null", O_RDWR | O_LARGEFILE, 0600); + } + } else { + fd = ::open(pathname, O_RDWR | O_LARGEFILE | O_CLOEXEC, 0600); + } + + if (fd < 0) { + berrno be; + Dmsg2(0, "Unable to open vtape device %s ERR=%s\n", pathname, be.bstrerror()); + errno = ENOMEDIUM; + return -1; + } + + lockfile = (char *)malloc(strlen(pathname) + 3); + strcpy(lockfile, pathname); + strcat(lockfile, ".l"); + + lockfd = ::open(lockfile, O_CREAT | O_RDWR | O_LARGEFILE | O_CLOEXEC, 0600); + if (lockfd < 0) { + berrno be; + Dmsg2(0, "Unable to open vtape device lock %s ERR=%s\n", lockfile, be.bstrerror()); + + } else { + lock.l_type = F_WRLCK; + lock.l_start = 0; + lock.l_whence = SEEK_SET; + lock.l_len = 0; + lock.l_pid = getpid(); + + ASSERT(fcntl(lockfd, F_SETLK, &lock) != -1); + } + + file_block = 0; + current_block = 0; + current_file = 0; + cur_FM = next_FM = last_FM = 0; + needEOF = false; + atBOT = true; + atEOT = atEOD = false; + + /* If the vtape is empty, start by writing a EOF */ + if (online && !read_fm(VT_READ_EOF)) { + lseek(fd, 0, SEEK_SET); /* rewind */ + cur_FM = next_FM = last_FM = 0; /* reset */ + weof(); /* write the first EOF */ + last_file = current_file=0; + } + + return fd; +} + +/* use this to track file usage */ +void vtape::update_pos() +{ + ASSERT(online); + struct stat statp; + if (fstat(fd, &statp) == 0) { + file_block = statp.st_blocks; + } + + Dmsg1(dbglevel*2, "update_pos=%i\n", file_block); + + if (file_block > max_block) { + atEOT = true; + } else { + atEOT = false; + } +} + +void vtape::dump() +{ + Dmsg0(dbglevel+1, "===================\n"); + Dmsg2(dbglevel, "file:block = %i:%i\n", current_file, current_block); + Dmsg1(dbglevel+1, "last_file=%i\n", last_file); + Dmsg1(dbglevel+1, "file_block=%i\n", file_block); + Dmsg4(dbglevel+1, "EOF=%i EOT=%i EOD=%i BOT=%i\n", + atEOF, atEOT, atEOD, atBOT); +} + +const char *vtape::print_type() +{ + return "Vtape"; +} + +#else /*!USE_VTAPE */ + +/* faked constructor and destructor to avoid undefined reference to vtable */ +vtape::vtape() +{ +} + +vtape::~vtape() +{ +} + +/* dummy implementation */ +const char *vtape::print_type() +{ + return "Vtape"; +} + +#endif /* ! USE_VTAPE */ diff --git a/src/stored/vtape_dev.h b/src/stored/vtape_dev.h new file mode 100644 index 00000000..d0db3211 --- /dev/null +++ b/src/stored/vtape_dev.h @@ -0,0 +1,126 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * vtape.h - Emulate the Linux st (scsi tape) driver on file. + * for regression and bug hunting purpose + * + */ + +#ifndef VTAPE_H +#define VTAPE_H + +#include +#include +#include "bacula.h" +#include "tape_dev.h" + +void vtape_debug(int level); + +#ifdef USE_VTAPE + +#define FTAPE_MAX_DRIVE 50 + +#define VTAPE_MAX_BLOCK 20*1024*2048; /* 20GB */ + +typedef enum { + VT_READ_EOF, /* Need to read the entire EOF struct */ + VT_SKIP_EOF /* Have already read the EOF byte */ +} VT_READ_FM_MODE; + +class vtape: public tape_dev { +private: + int fd; /* Our file descriptor */ + int lockfd; /* File descriptor for the lock file */ + + boffset_t file_block; /* size */ + boffset_t max_block; + + boffset_t last_FM; /* last file mark (last file) */ + boffset_t next_FM; /* next file mark (next file) */ + boffset_t cur_FM; /* current file mark */ + + bool atEOF; /* End of file */ + bool atEOT; /* End of media */ + bool atEOD; /* End of data */ + bool atBOT; /* Begin of tape */ + bool online; /* volume online */ + bool needEOF; /* check if last operation need eof */ + + int32_t last_file; /* last file of the volume */ + int32_t current_file; /* current position */ + int32_t current_block; /* current position */ + char * lockfile; /* Name of the lock file */ + + void destroy(); + int truncate_file(); + void check_eof() { if(needEOF) weof();}; + void update_pos(); + bool read_fm(VT_READ_FM_MODE readfirst); + +public: + int fsf(); + int fsr(int count); + int weof(); + int bsf(); + int bsr(int count); + + vtape(); + ~vtape(); + int get_fd(); + void dump(); + + int tape_op(struct mtop *mt_com); + int tape_get(struct mtget *mt_com); + int tape_pos(struct mtpos *mt_com); + + /* DEVICE virtual interfaces that we redefine */ + int d_close(int); + int d_open(const char *pathname, int flags); + int d_ioctl(int fd, ioctl_req_t request, char *op=NULL); + ssize_t d_read(int, void *buffer, size_t count); + ssize_t d_write(int, const void *buffer, size_t count); + bool offline(DCR *dcr); + + boffset_t lseek(DCR *dcr, off_t offset, int whence) { return -1; } + boffset_t lseek(int fd, off_t offset, int whence) + { return ::lseek(fd, offset, whence); } + const char *print_type(); +}; + + +#else /*!USE_VTAPE */ + +class vtape: public DEVICE { +public: + + vtape(); + ~vtape(); + int d_open(const char *pathname, int flags) { return -1; } + ssize_t d_read(int fd, void *buffer, size_t count) { return -1; } + ssize_t d_write(int fd, const void *buffer, size_t count) { return -1; } + int d_close(int) { return -1; } + int d_ioctl(int fd, ioctl_req_t request, char *mt=NULL) { return -1; } + boffset_t lseek(DCR *dcr, off_t offset, int whence) { return -1; } + bool open_device(DCR *dcr, int omode) { return true; } + const char *print_type(); +}; + +#endif /* USE_VTAPE */ + +#endif /* !VTAPE_H */ diff --git a/src/stored/wait.c b/src/stored/wait.c new file mode 100644 index 00000000..30075954 --- /dev/null +++ b/src/stored/wait.c @@ -0,0 +1,351 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Subroutines to handle waiting for operator intervention + * or waiting for a Device to be released + * + * Code for wait_for_sysop() pulled from askdir.c + * + * Kern Sibbald, March 2005 + */ + + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +const int dbglvl = 400; + +/* + * Wait for SysOp to mount a tape on a specific device + * + * Returns: W_ERROR, W_TIMEOUT, W_POLL, W_MOUNT, or W_WAKE + */ +int wait_for_sysop(DCR *dcr) +{ + struct timeval tv; + struct timezone tz; + struct timespec timeout; + time_t last_heartbeat = 0; + time_t first_start = time(NULL); + int stat = 0; + int add_wait; + bool unmounted; + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + + dev->Lock(); + Dmsg1(dbglvl, "Enter blocked=%s\n", dev->print_blocked()); + + /* + * Since we want to mount a tape, make sure current one is + * not marked as using this drive. + */ + volume_unused(dcr); + + unmounted = dev->is_device_unmounted(); + dev->poll = false; + /* + * Wait requested time (dev->rem_wait_sec). However, we also wake up every + * HB_TIME seconds and send a heartbeat to the FD and the Director + * to keep stateful firewalls from closing them down while waiting + * for the operator. + */ + add_wait = dev->rem_wait_sec; + if (me->heartbeat_interval && add_wait > me->heartbeat_interval) { + add_wait = me->heartbeat_interval; + } + /* If the user did not unmount the tape and we are polling, ensure + * that we poll at the correct interval. + */ + if (!unmounted && dev->vol_poll_interval && add_wait > dev->vol_poll_interval) { + add_wait = dev->vol_poll_interval; + } + + if (!unmounted) { + Dmsg1(dbglvl, "blocked=%s\n", dev->print_blocked()); + dev->dev_prev_blocked = dev->blocked(); + dev->set_blocked(BST_WAITING_FOR_SYSOP); /* indicate waiting for mount */ + } + + for ( ; !job_canceled(jcr); ) { + time_t now, start, total_waited; + + gettimeofday(&tv, &tz); + timeout.tv_nsec = tv.tv_usec * 1000; + timeout.tv_sec = tv.tv_sec + add_wait; + + Dmsg4(dbglvl, "I'm going to sleep on device %s. HB=%d rem_wait=%d add_wait=%d\n", + dev->print_name(), (int)me->heartbeat_interval, dev->rem_wait_sec, add_wait); + start = time(NULL); + + /* Wait required time */ + stat = dev->next_vol_timedwait(&timeout); + + Dmsg2(dbglvl, "Wokeup from sleep on device stat=%d blocked=%s\n", stat, + dev->print_blocked()); + now = time(NULL); + total_waited = now - first_start; + dev->rem_wait_sec -= (now - start); + + /* Note, this always triggers the first time. We want that. */ + if (me->heartbeat_interval) { + if (now - last_heartbeat >= me->heartbeat_interval) { + /* send heartbeats */ + if (jcr->file_bsock) { + jcr->file_bsock->signal(BNET_HEARTBEAT); + Dmsg0(dbglvl, "Send heartbeat to FD.\n"); + } + if (jcr->dir_bsock) { + jcr->dir_bsock->signal(BNET_HEARTBEAT); + } + last_heartbeat = now; + } + } + + if (stat == EINVAL) { + berrno be; + Jmsg1(jcr, M_FATAL, 0, _("pthread timedwait error. ERR=%s\n"), be.bstrerror(stat)); + stat = W_ERROR; /* error */ + break; + } + + /* + * Continue waiting if operator is labeling volumes + */ + if (dev->blocked() == BST_WRITING_LABEL) { + continue; + } + + if (dev->rem_wait_sec <= 0) { /* on exceeding wait time return */ + Dmsg0(dbglvl, "Exceed wait time.\n"); + stat = W_TIMEOUT; + break; + } + + /* + * Check if user unmounted the device while we were waiting + */ + unmounted = dev->is_device_unmounted(); + + if (!unmounted && dev->vol_poll_interval && + (total_waited >= dev->vol_poll_interval)) { + Dmsg1(dbglvl, "Set poll=true return in wait blocked=%s\n", dev->print_blocked()); + dev->poll = true; /* returning a poll event */ + stat = W_POLL; + break; + } + /* + * Check if user mounted the device while we were waiting + */ + if (dev->blocked() == BST_MOUNT) { /* mount request ? */ + Dmsg0(dbglvl, "Mounted return.\n"); + stat = W_MOUNT; + break; + } + + /* + * If we did not timeout, then some event happened, so + * return to check if state changed. + */ + if (stat != ETIMEDOUT) { + berrno be; + Dmsg2(dbglvl, "Wake return. stat=%d. ERR=%s\n", stat, be.bstrerror(stat)); + stat = W_WAKE; /* someone woke us */ + break; + } + + /* + * At this point, we know we woke up because of a timeout, + * that was due to a heartbeat, because any other reason would + * have caused us to return, so update the wait counters and continue. + */ + add_wait = dev->rem_wait_sec; + if (me->heartbeat_interval && add_wait > me->heartbeat_interval) { + add_wait = me->heartbeat_interval; + } + /* If the user did not unmount the tape and we are polling, ensure + * that we poll at the correct interval. + */ + if (!unmounted && dev->vol_poll_interval && + add_wait > dev->vol_poll_interval - total_waited) { + add_wait = dev->vol_poll_interval - total_waited; + } + if (add_wait < 0) { + add_wait = 0; + } + } + + if (!unmounted) { + dev->set_blocked(dev->dev_prev_blocked); /* restore entry state */ + Dmsg1(dbglvl, "set %s\n", dev->print_blocked()); + } + Dmsg2(dbglvl, "Exit blocked=%s poll=%d\n", dev->print_blocked(), dev->poll); + dev->Unlock(); + return stat; +} + + +/* + * Wait for any device to be released, then we return, so + * higher level code can rescan possible devices. Since there + * could be a job waiting for a drive to free up, we wait a maximum + * of 1 minute then retry just in case a broadcast was lost, and + * we return to rescan the devices. + * + * Returns: true if a device has changed state + * false if the total wait time has expired. + */ +bool wait_for_any_device(JCR *jcr, int &retries) +{ + struct timeval tv; + struct timezone tz; + struct timespec timeout; + int stat = 0; + bool ok = true; + const int max_wait_time = 1 * 60; /* wait 1 minute */ + char ed1[50]; + + Dmsg0(dbglvl, "Enter wait_for_any_device\n"); + P(device_release_mutex); + + if (++retries % 5 == 0) { + /* Print message every 5 minutes */ + Jmsg(jcr, M_MOUNT, 0, _("JobId=%s, Job %s waiting to reserve a device.\n"), + edit_uint64(jcr->JobId, ed1), jcr->Job); + } + + gettimeofday(&tv, &tz); + timeout.tv_nsec = tv.tv_usec * 1000; + timeout.tv_sec = tv.tv_sec + max_wait_time; + + Dmsg0(dbglvl, "Going to wait for a device.\n"); + + /* Wait required time */ + stat = pthread_cond_timedwait(&wait_device_release, &device_release_mutex, &timeout); + Dmsg1(dbglvl, "Wokeup from sleep on device stat=%d\n", stat); + + V(device_release_mutex); + Dmsg1(dbglvl, "Return from wait_device ok=%d\n", ok); + return ok; +} + +/* + * Wait for a specific device to be released + * We wait a maximum of 1 minute then + * retry just in case a broadcast was lost. + * + * Returns: true if the device has changed state + * false if the total wait time has expired. + */ +bool wait_for_device(DCR *dcr, int &retries) +{ + struct timeval tv; + struct timezone tz; + struct timespec timeout; + JCR *jcr = dcr->jcr; + DEVICE *dev = dcr->dev; + int stat = 0; + bool ok = true; + const int max_wait_time = 1 * 60; /* wait 1 minute */ + char ed1[50]; + + Dmsg3(40, "Enter wait_for_device. busy=%d dcrvol=%s devvol=%s\n", + dev->is_busy(), dcr->VolumeName, dev->getVolCatName()); + + P(device_release_mutex); + + if (++retries % 5 == 0) { + /* Print message every 5 minutes */ + Jmsg(jcr, M_MOUNT, 0, _("JobId=%s, Job %s waiting device %s.\n"), + edit_uint64(jcr->JobId, ed1), jcr->Job, dcr->dev->print_name()); + } + + gettimeofday(&tv, &tz); + timeout.tv_nsec = tv.tv_usec * 1000; + timeout.tv_sec = tv.tv_sec + max_wait_time; + + Dmsg0(dbglvl, "Going to wait for a device.\n"); + + /* Wait required time */ + stat = pthread_cond_timedwait(&wait_device_release, &device_release_mutex, &timeout); + Dmsg1(dbglvl, "Wokeup from sleep on device stat=%d\n", stat); + + V(device_release_mutex); + Dmsg1(dbglvl, "Return from wait_device ok=%d\n", ok); + return ok; +} + + +/* + * This routine initializes the device wait timers + */ +void init_device_wait_timers(DCR *dcr) +{ + DEVICE *dev = dcr->dev; + JCR *jcr = dcr->jcr; + + /* ******FIXME******* put these on config variables */ + dev->min_wait = 60 * 60; + dev->max_wait = 24 * 60 * 60; + dev->max_num_wait = 9; /* 5 waits =~ 1 day, then 1 day at a time */ + dev->wait_sec = dev->min_wait; + dev->rem_wait_sec = dev->wait_sec; + dev->num_wait = 0; + dev->poll = false; + + jcr->min_wait = 60 * 60; + jcr->max_wait = 24 * 60 * 60; + jcr->max_num_wait = 9; /* 5 waits =~ 1 day, then 1 day at a time */ + jcr->wait_sec = jcr->min_wait; + jcr->rem_wait_sec = jcr->wait_sec; + jcr->num_wait = 0; + +} + +void init_jcr_device_wait_timers(JCR *jcr) +{ + /* ******FIXME******* put these on config variables */ + jcr->min_wait = 60 * 60; + jcr->max_wait = 24 * 60 * 60; + jcr->max_num_wait = 9; /* 5 waits =~ 1 day, then 1 day at a time */ + jcr->wait_sec = jcr->min_wait; + jcr->rem_wait_sec = jcr->wait_sec; + jcr->num_wait = 0; +} + + +/* + * The dev timers are used for waiting on a particular device + * + * Returns: true if time doubled + * false if max time expired + */ +bool double_dev_wait_time(DEVICE *dev) +{ + dev->wait_sec *= 2; /* double wait time */ + if (dev->wait_sec > dev->max_wait) { /* but not longer than maxtime */ + dev->wait_sec = dev->max_wait; + } + dev->num_wait++; + dev->rem_wait_sec = dev->wait_sec; + if (dev->num_wait >= dev->max_num_wait) { + return false; + } + return true; +} diff --git a/src/stored/win_file_dev.h b/src/stored/win_file_dev.h new file mode 100644 index 00000000..2cb1f2df --- /dev/null +++ b/src/stored/win_file_dev.h @@ -0,0 +1,31 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Inspired by vtape.h + */ +#ifndef __WIN_FILE_DEV_H_ +#define __WIN_FILE_DEV_H_ + +class win_file_dev : public file_dev { +public: + + win_file_dev() { }; + ~win_file_dev() { m_fd = -1; }; +}; +#endif diff --git a/src/stored/win_tape_dev.h b/src/stored/win_tape_dev.h new file mode 100644 index 00000000..89b5ff16 --- /dev/null +++ b/src/stored/win_tape_dev.h @@ -0,0 +1,48 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Inspired by vtape.h + */ +#ifndef __WIN_TAPE_DEV_H_ +#define __WIN_TAPE_DEV_H_ + +class win_tape_dev : public DEVICE { +public: + + win_tape_dev() { }; + ~win_tape_dev() { }; + + /* interface from DEVICE */ + int d_close(int); + int d_open(const char *pathname, int flags); + int d_ioctl(int fd, ioctl_req_t request, char *op=NULL); + ssize_t d_read(int, void *buffer, size_t count); + ssize_t d_write(int, const void *buffer, size_t count); + + boffset_t lseek(DCR *dcr, off_t offset, int whence) { return -1; }; + boffset_t lseek(int fd, off_t offset, int whence); + bool open_device(DCR *dcr, int omode); + + int tape_op(struct mtop *mt_com); + int tape_get(struct mtget *mt_com); + int tape_pos(struct mtpos *mt_com); + +}; + +#endif /* __WIN_TAPE_DEV_H_ */ diff --git a/src/streams.h b/src/streams.h new file mode 100644 index 00000000..a19cdb73 --- /dev/null +++ b/src/streams.h @@ -0,0 +1,156 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/** + * Stream definitions. Split from baconfig.h Nov 2010 + * + * Kern Sibbald, MM + */ + +#ifndef __BSTREAMS_H +#define __BSTREAMS_H 1 + +/* Stream bits -- these bits are new as of 24Nov10 */ +#define STREAM_BIT_64 (1<<30) /* 64 bit stream (not yet implemented) */ +#define STREAM_BIT_BITS (1<<29) /* Following bits may be set */ +#define STREAM_BIT_PLUGIN (1<<28) /* Item written by a plugin */ +#define STREAM_BIT_DELTA (1<<27) /* Stream contains delta data */ +#define STREAM_BIT_OFFSETS (1<<26) /* Stream has data offset */ +#define STREAM_BIT_PORTABLE_DATA (1<<25) /* Data is portable */ + +/* TYPE represents our current (old) stream types -- e.g. values 0 - 2047 */ +#define STREAMBASE_TYPE 0 /* base for types */ +#define STREAMBITS_TYPE 11 /* type bit size */ +#define STREAMMASK_TYPE (~((~0u)<< STREAMBITS_TYPE) << STREAMBASE_TYPE) +/* + * Note additional base, bits, and masks can be defined for new + * ranges or subranges of stream attributes. + */ + +/** + * Old, but currently used Stream definitions. Once defined these must NEVER + * change as they go on the storage media. + * Note, the following streams are passed from the SD to the DIR + * so that they may be put into the catalog (actually only the + * stat packet part of the attr record is put in the catalog. + * + * STREAM_UNIX_ATTRIBUTES + * STREAM_UNIX_ATTRIBUTES_EX + * STREAM_MD5_DIGEST + * STREAM_SHA1_DIGEST + * STREAM_SHA256_DIGEST + * STREAM_SHA512_DIGEST + */ +#define STREAM_NONE 0 /* Reserved Non-Stream */ +#define STREAM_UNIX_ATTRIBUTES 1 /* Generic Unix attributes */ +#define STREAM_FILE_DATA 2 /* Standard uncompressed data */ +#define STREAM_MD5_SIGNATURE 3 /* deprecated */ +#define STREAM_MD5_DIGEST 3 /* MD5 digest for the file */ +#define STREAM_GZIP_DATA 4 /* GZip compressed file data */ +#define STREAM_UNIX_ATTRIBUTES_EX 5 /* Extended Unix attr for Win32 EX - Deprecated */ +#define STREAM_SPARSE_DATA 6 /* Sparse data stream */ +#define STREAM_SPARSE_GZIP_DATA 7 /* Sparse gzipped data stream */ +#define STREAM_PROGRAM_NAMES 8 /* program names for program data */ +#define STREAM_PROGRAM_DATA 9 /* Data needing program */ +#define STREAM_SHA1_SIGNATURE 10 /* deprecated */ +#define STREAM_SHA1_DIGEST 10 /* SHA1 digest for the file */ +#define STREAM_WIN32_DATA 11 /* Win32 BackupRead data */ +#define STREAM_WIN32_GZIP_DATA 12 /* Gzipped Win32 BackupRead data */ +#define STREAM_MACOS_FORK_DATA 13 /* Mac resource fork */ +#define STREAM_HFSPLUS_ATTRIBUTES 14 /* Mac OS extra attributes */ +#define STREAM_UNIX_ACCESS_ACL 15 /* Standard ACL attributes on UNIX - Deprecated */ +#define STREAM_UNIX_DEFAULT_ACL 16 /* Default ACL attributes on UNIX - Deprecated */ +#define STREAM_SHA256_DIGEST 17 /* SHA-256 digest for the file */ +#define STREAM_SHA512_DIGEST 18 /* SHA-512 digest for the file */ +#define STREAM_SIGNED_DIGEST 19 /* Signed File Digest, ASN.1, DER Encoded */ +#define STREAM_ENCRYPTED_FILE_DATA 20 /* Encrypted, uncompressed data */ +#define STREAM_ENCRYPTED_WIN32_DATA 21 /* Encrypted, uncompressed Win32 BackupRead data */ +#define STREAM_ENCRYPTED_SESSION_DATA 22 /* Encrypted Session Data, ASN.1, DER Encoded */ +#define STREAM_ENCRYPTED_FILE_GZIP_DATA 23 /* Encrypted, compressed data */ +#define STREAM_ENCRYPTED_WIN32_GZIP_DATA 24 /* Encrypted, compressed Win32 BackupRead data */ +#define STREAM_ENCRYPTED_MACOS_FORK_DATA 25 /* Encrypted, uncompressed Mac resource fork */ +#define STREAM_PLUGIN_NAME 26 /* Plugin "file" string */ +#define STREAM_PLUGIN_DATA 27 /* Plugin specific data */ +#define STREAM_RESTORE_OBJECT 28 /* Plugin restore object */ +/* + * Non-gzip compressed streams. Those streams can handle arbitrary + * compression algorithm data as an additional header is stored + * at the beginning of the stream. See comp_stream_header definition + * in ch.h for more details. + */ +#define STREAM_COMPRESSED_DATA 29 /* Compressed file data */ +#define STREAM_SPARSE_COMPRESSED_DATA 30 /* Sparse compressed data stream */ +#define STREAM_WIN32_COMPRESSED_DATA 31 /* Compressed Win32 BackupRead data */ +#define STREAM_ENCRYPTED_FILE_COMPRESSED_DATA 32 /* Encrypted, compressed data */ +#define STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA 33 /* Encrypted, compressed Win32 BackupRead data */ + +#define STREAM_ADATA_BLOCK_HEADER 200 /* Adata block header */ +#define STREAM_ADATA_RECORD_HEADER 201 /* Adata record header */ + +/* + * Additional Stream definitions. Once defined these must NEVER + * change as they go on the storage media. + * + * The Stream numbers from 1000-1999 are reserved for ACL and extended attribute streams. + * Each different platform has its own stream id(s), if a platform supports multiple stream types + * it should supply different handlers for each type it supports and this should be called + * from the stream dispatch function. Currently in this reserved space we allocate the + * different acl streams from 1000 on and the different extended attributes streams from + * 1999 down. So the two naming spaces grows towards each other. + * + * Rationalize names a bit to correspond to the new XACL classes + * + */ +#define STREAM_XACL_AIX_TEXT 1000 /* AIX string of acl_get */ +#define STREAM_XACL_DARWIN_ACCESS 1001 /* Darwin (OSX) acl_t string of acl_to_text (POSIX acl) */ +#define STREAM_XACL_FREEBSD_DEFAULT 1002 /* FreeBSD acl_t string of acl_to_text (POSIX acl) for default acls */ +#define STREAM_XACL_FREEBSD_ACCESS 1003 /* FreeBSD acl_t string of acl_to_text (POSIX acl) for access acls */ +#define STREAM_XACL_HPUX_ACL_ENTRY 1004 /* HPUX acl_entry string of acltostr (POSIX acl) */ +#define STREAM_XACL_IRIX_DEFAULT 1005 /* IRIX acl_t string of acl_to_text (POSIX acl) for default acls */ +#define STREAM_XACL_IRIX_ACCESS 1006 /* IRIX acl_t string of acl_to_text (POSIX acl) for access acls */ +#define STREAM_XACL_LINUX_DEFAULT 1007 /* Linux acl_t string of acl_to_text (POSIX acl) for default acls */ +#define STREAM_XACL_LINUX_ACCESS 1008 /* Linux acl_t string of acl_to_text (POSIX acl) for access acls */ +#define STREAM_XACL_TRU64_DEFAULT 1009 /* Tru64 acl_t string of acl_to_text (POSIX acl) for default acls */ +#define STREAM_XACL_TRU64_DEFAULT_DIR 1010 /* Tru64 acl_t string of acl_to_text (POSIX acl) for default acls */ +#define STREAM_XACL_TRU64_ACCESS 1011 /* Tru64 acl_t string of acl_to_text (POSIX acl) for access acls */ +#define STREAM_XACL_SOLARIS_POSIX 1012 /* Solaris aclent_t string of acltotext or acl_totext (POSIX acl) */ +#define STREAM_XACL_SOLARIS_NFS4 1013 /* Solaris ace_t string of of acl_totext (NFSv4 or ZFS acl) */ +#define STREAM_XACL_AFS_TEXT 1014 /* AFS string of pioctl */ +#define STREAM_XACL_AIX_AIXC 1015 /* AIX string of aclx_printStr (POSIX acl) */ +#define STREAM_XACL_AIX_NFS4 1016 /* AIX string of aclx_printStr (NFSv4 acl) */ +#define STREAM_XACL_FREEBSD_NFS4 1017 /* FreeBSD acl_t string of acl_to_text (NFSv4 or ZFS acl) */ +#define STREAM_XACL_HURD_DEFAULT 1018 /* GNU HURD acl_t string of acl_to_text (POSIX acl) for default acls */ +#define STREAM_XACL_HURD_ACCESS 1019 /* GNU HURD acl_t string of acl_to_text (POSIX acl) for access acls */ +#define STREAM_XACL_PLUGIN_ACL 1020 /* Plugin ACL data for plugin specific acls */ +#define STREAM_XACL_PLUGIN_XATTR 1988 /* Plugin XATTR data for plugin specific xattrs */ +#define STREAM_XACL_HURD_XATTR 1989 /* GNU HURD extended attributes */ +#define STREAM_XACL_IRIX_XATTR 1990 /* IRIX extended attributes */ +#define STREAM_XACL_TRU64_XATTR 1991 /* TRU64 extended attributes */ +#define STREAM_XACL_AIX_XATTR 1992 /* AIX extended attributes */ +#define STREAM_XACL_OPENBSD_XATTR 1993 /* OpenBSD extended attributes */ +#define STREAM_XACL_SOLARIS_SYS_XATTR 1994 /* Solaris extensible attributes or + * otherwise named extended system attributes. */ +#define STREAM_XACL_SOLARIS_XATTR 1995 /* Solaris extented attributes */ +#define STREAM_XACL_DARWIN_XATTR 1996 /* Darwin (OSX) extended attributes */ +#define STREAM_XACL_FREEBSD_XATTR 1997 /* FreeBSD extended attributes */ +#define STREAM_XACL_LINUX_XATTR 1998 /* Linux specific attributes */ +#define STREAM_XACL_NETBSD_XATTR 1999 /* NetBSD extended attributes */ + +/* WARNING!!! do not define more than 2047 of these old types */ + +#endif /* __BSTREAMS_H */ diff --git a/src/tools/Makefile.in b/src/tools/Makefile.in new file mode 100644 index 00000000..abd66199 --- /dev/null +++ b/src/tools/Makefile.in @@ -0,0 +1,212 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Bacula Tools Makefile +# +@MCOMMON@ + +srcdir = . +VPATH = . +.PATH: . + +# one up +basedir = .. +# top dir +topdir = ../.. +# this dir relative to top dir +thisdir = src/tools + +DEBUG=@DEBUG@ + +ZLIBS=@ZLIBS@ +DB_LIBS=@DB_LIBS@ + +first_rule: all +dummy: + +# + +GETTEXT_LIBS = @LIBINTL@ + +FINDOBJS = testfind.o ../dird/dird_conf.o ../dird/inc_conf.o ../dird/run_conf.o ../dird/ua_acl.o + +# these are the objects that are changed by the .configure process +EXTRAOBJS = @OBJLIST@ + +DIRCONFOBJS = ../dird/dird_conf.o ../dird/run_conf.o ../dird/inc_conf.o ../dird/ua_acl.o + +NODIRTOOLS = bsmtp +DIRTOOLS = bsmtp dbcheck drivetype fstype testfind testls bregex bwild bbatch bregtest bvfs_test +TOOLS = $(@DIR_TOOLS@) + +INSNODIRTOOLS = bsmtp +INSDIRTOOLS = bsmtp dbcheck bwild bregex +INSTOOLS = $(INS@DIR_TOOLS@) + +.SUFFIXES: .c .o +.PHONY: +.DONTCARE: + +# inference rules +.c.o: + @echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< +#------------------------------------------------------------------------- +all: Makefile $(TOOLS) gigaslam grow + @echo "==== Make of tools is good ====" + @echo " " + +bsmtp: Makefile bsmtp.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bsmtp.o -lbac -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bsnapshot: Makefile bsnapshot.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bsnapshot.o ../lib/ini$(DEFAULT_OBJECT_TYPE) -lbac -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) + + +bregtest: Makefile bregtest.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bregtest.o -lbac -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +dbcheck: Makefile dbcheck.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ + ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) $(DIRCONFOBJS) \ + ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L. -L../lib -L../findlib -L../cats -o $@ dbcheck.o $(DIRCONFOBJS) \ + $(DLIB) -lbaccats -lbacsql -lbacfind -lbaccfg -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +fstype: Makefile fstype.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../findlib -o $@ fstype.o -lbacfind -lbac -lm \ + $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +drivetype: Makefile drivetype.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../findlib -o $@ drivetype.o -lbacfind -lbac -lm \ + $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +dird_conf.o: ../dird/dird_conf.c + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +run_conf.o: ../dird/run_conf.c + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +inc_conf.o: ../dird/inc_conf.c + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +timelimit: timelimit.o + ${CC} ${DEFS} ${DEBUG} -pipe -DHAVE_ERRNO_H -DHAVE_SETITIMER -DHAVE_SIGACTION -c timelimit.c + ${CC} -o timelimit timelimit.o + +testfind: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ + ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) $(FINDOBJS) + $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -o $@ $(FINDOBJS) -L. -L../lib -L../findlib \ + $(DLIB) -lbacfind -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +testls: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) testls.o + $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L. -L../lib -L../findlib -o $@ testls.o \ + $(DLIB) -lbacfind -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bregex: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) bregex.o + $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L. -L../lib -o $@ bregex.o \ + $(DLIB) -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bwild: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) bwild.o + $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L. -L../lib -o $@ bwild.o \ + $(DLIB) -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bbatch: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) \ + ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) bbatch.o + $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L../cats -L. -L../lib -L../findlib -o $@ bbatch.o \ + $(DLIB) -lbaccats -lbacsql -lbacfind -lbac -lm $(ZLIBS) $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bvfs_test: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) \ + ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) bvfs_test.o + $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L../cats -L. -L../lib -L../findlib -o $@ bvfs_test.o \ + -lbaccats -lbacsql -lbacfind -lbac -lm $(ZLIBS) $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +cats_test.o: cats_test.c + echo "Compiling $<" + $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< + +cats_test: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) cats_test.o + $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L../cats -L. -L../lib -L../findlib -o $@ cats_test.o \ + -lbaccats -lbacsql -lbacfind -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +gigaslam.o: gigaslam.c + $(CXX) $(CFLAGS) -c $(CPPFLAGS) $< + +gigaslam: gigaslam.o + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -o $@ gigaslam.o + +grow: Makefile grow.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ grow.o -lbac -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +bpluginfo.o: bpluginfo.c + $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) -I../filed -I../dird -I../stored $(DINCLUDE) $(CFLAGS) $< + +bpluginfo: Makefile bpluginfo.o + $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bpluginfo.o -lbac $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) + +Makefile: $(srcdir)/Makefile.in $(topdir)/config.status + cd $(topdir) \ + && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status + +libtool-clean: + @$(RMF) -r .libs _libs + +clean: libtool-clean + @$(RMF) core core.* a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 + @$(RMF) $(DIRTOOLS) gigaslam grow + +realclean: clean + @$(RMF) tags + +distclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +devclean: realclean + if test $(srcdir) = .; then $(MAKE) realclean; fi + (cd $(srcdir); $(RMF) Makefile) + +install-bsnapshot: bsnapshot + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bsnapshot $(DESTDIR)$(sbindir)/bsnapshot + +installall: $(TOOLS) timelimit + @for tool in ${TOOLS} timelimit ; do \ + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $$tool $(DESTDIR)$(sbindir)/$$tool ; \ + done +# chattr +i $(DESTDIR)$(sbindir)/bsmtp +# chmod 755 $(DESTDIR)$(sbindir)/bsmtp + +# Allow non-root execution of bsmtp for non-root Directors +install: $(INSTOOLS) + @for tool in ${INSTOOLS} ; do \ + $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $$tool $(DESTDIR)$(sbindir)/$$tool ; \ + done +# chattr +i $(DESTDIR)$(sbindir)/bsmtp +# chmod 755 $(DESTDIR)$(sbindir)/bsmtp + +uninstall: + @for tool in ${INSTOOLS} ; do \ + $(RMF) $(DESTDIR)$(sbindir)/$$tool ; \ + done + + + +# Semi-automatic generation of dependencies: +# Use gcc -MM because X11 `makedepend' doesn't work on all systems +# and it also includes system headers. +# `semi'-automatic since dependencies are generated at distribution time. + +depend: + @$(MV) Makefile Makefile.bak + @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile + @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile + @$(CXX) -S -M $(CPPFLAGS) -I$(srcdir) -I$(basedir) *.c >> Makefile + @if test -f Makefile ; then \ + $(RMF) Makefile.bak; \ + else \ + $(MV) Makefile.bak Makefile; \ + echo " ===== Something went wrong in make depend ====="; \ + fi + +# ----------------------------------------------------------------------- +# DO NOT DELETE: nice dependency list follows diff --git a/src/tools/bbatch.c b/src/tools/bbatch.c new file mode 100644 index 00000000..fc4e9640 --- /dev/null +++ b/src/tools/bbatch.c @@ -0,0 +1,363 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Program to test batch mode + * + * Eric Bollengier, March 2007 + * + */ + +/* + to create datafile + + for i in $(seq 10000 99999) ; do + j=$((($i % 1000) + 555)) + echo "$i;/tmp/totabofds$j/fiddddle${j}$i;xxxLSTATxxxx;xxxxxxxMD5xxxxxx" + done > dat1 + + or + + j=0 + find / | while read a; do + j=$(($j+1)) + echo "$j;$a;xxxLSTATxxxx;xxxxxxxMD5xxxxxx" + done > dat1 + */ + +#include "bacula.h" +#include "stored/stored.h" +#include "findlib/find.h" +#include "cats/cats.h" + +/* Forward referenced functions */ +static void *do_batch(void *); + + +/* Local variables */ +static BDB *db; + +static const char *db_name = "bacula"; +static const char *db_user = "bacula"; +static const char *db_password = ""; +static const char *db_host = NULL; +static const char *db_ssl_mode = NULL; +static const char *db_ssl_key = NULL; +static const char *db_ssl_cert = NULL; +static const char *db_ssl_ca = NULL; +static const char *db_ssl_capath = NULL; +static const char *db_ssl_cipher = NULL; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n" +"Example : bbatch -w /path/to/workdir -h localhost -f dat1 -f dat -f datx\n" +" will start 3 thread and load dat1, dat and datx in your catalog\n" +"See bbatch.c to generate datafile\n\n" +"Usage: bbatch [ options ] -w working/dir -f datafile\n" +" -b with batch mode\n" +" -B without batch mode\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database host (default NULL)\n" +" -k path name to the key file (default NULL)\n" +" -e path name to the certificate file (default NULL)\n" +" -a path name to the CA certificate file (default NULL)\n" +" -w specify working directory\n" +" -r call restore code with given jobids\n" +" -v verbose\n" +" -f specify data file\n" +" -? print this message\n\n"), 2001, "", VERSION, BDATE); + exit(1); +} + +/* number of thread started */ +int nb=0; + +static int list_handler(void *ctx, int num_fields, char **row) +{ + uint64_t *a = (uint64_t*) ctx; + (*a)++; + return 0; +} + +int main (int argc, char *argv[]) +{ + int ch; + bool use_batch_insert = true; + char *restore_list=NULL; + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + lmgr_init_thread(); + + char **files = (char **) malloc (10 * sizeof(char *)); + int i; + my_name_is(argc, argv, "bbatch"); + init_msg(NULL, NULL); + + OSDependentInit(); + + while ((ch = getopt(argc, argv, "bBh:o:k:e:a:c:d:n:P:Su:vf:w:r:?")) != -1) { + switch (ch) { + case 'r': + restore_list=bstrdup(optarg); + break; + case 'B': + use_batch_insert = true; + break; + case 'b': + use_batch_insert = false; + break; + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 'h': + db_host = optarg; + break; + + case 'o': + db_ssl_mode = optarg; + break; + + case 'k': + db_ssl_key = optarg; + break; + + case 'e': + db_ssl_cert = optarg; + break; + + case 'a': + db_ssl_ca = optarg; + break; + + case 'n': + db_name = optarg; + break; + + case 'w': + working_directory = optarg; + break; + + case 'u': + db_user = optarg; + break; + + case 'P': + db_password = optarg; + break; + + case 'v': + verbose++; + break; + + case 'f': + if (nb < 10 ) { + files[nb++] = optarg; + } + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (argc != 0) { + Pmsg0(0, _("Wrong number of arguments: \n")); + usage(); + } + + if (restore_list) { + uint64_t nb_file=0; + btime_t start, end; + /* To use the -r option, the catalog should already contains records */ + + if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password, + db_host, 0, NULL, + db_ssl_mode, db_ssl_key, + db_ssl_cert, db_ssl_ca, + db_ssl_capath, db_ssl_cipher, + false, !use_batch_insert)) == NULL) { + Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n")); + } + if (!db_open_database(NULL, db)) { + Emsg0(M_ERROR_TERM, 0, db_strerror(db)); + } + + start = get_current_btime(); + db_get_file_list(NULL, db, restore_list, DBL_NONE, list_handler, &nb_file); + end = get_current_btime(); + + Pmsg3(0, _("Computing file list for jobid=%s files=%lld secs=%d\n"), + restore_list, nb_file, (uint32_t)btime_to_unix(end-start)); + + free(restore_list); + return 0; + } + + if (use_batch_insert) { + printf("With Batch Insert mode\n"); + } else { + printf("Without Batch Insert mode\n"); + } + + i = nb; + while (--i >= 0) { + pthread_t thid; + JCR *bjcr = new_jcr(sizeof(JCR), NULL); + bjcr->bsr = NULL; + bjcr->VolSessionId = 1; + bjcr->VolSessionTime = (uint32_t)time(NULL); + bjcr->NumReadVolumes = 0; + bjcr->NumWriteVolumes = 0; + bjcr->JobId = getpid(); + bjcr->setJobType(JT_CONSOLE); + bjcr->setJobLevel(L_FULL); + bjcr->JobStatus = JS_Running; + bjcr->where = bstrdup(files[i]); + bjcr->job_name = get_pool_memory(PM_FNAME); + pm_strcpy(bjcr->job_name, "Dummy.Job.Name"); + bjcr->client_name = get_pool_memory(PM_FNAME); + pm_strcpy(bjcr->client_name, "Dummy.Client.Name"); + bstrncpy(bjcr->Job, "bbatch", sizeof(bjcr->Job)); + bjcr->fileset_name = get_pool_memory(PM_FNAME); + pm_strcpy(bjcr->fileset_name, "Dummy.fileset.name"); + bjcr->fileset_md5 = get_pool_memory(PM_FNAME); + pm_strcpy(bjcr->fileset_md5, "Dummy.fileset.md5"); + + if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password, + db_host, 0, NULL, + db_ssl_mode, db_ssl_key, + db_ssl_cert, db_ssl_ca, + db_ssl_capath, db_ssl_cipher, + false, false)) == NULL) { + Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n")); + } + if (!db_open_database(NULL, db)) { + Emsg0(M_ERROR_TERM, 0, db_strerror(db)); + } + Dmsg0(200, "Database opened\n"); + if (verbose) { + Pmsg2(000, _("Using Database: %s, User: %s\n"), db_name, db_user); + } + + bjcr->db = db; + + pthread_create(&thid, NULL, do_batch, bjcr); + } + + while (nb > 0) { + bmicrosleep(1,0); + } + + return 0; +} + +static void fill_attr(ATTR_DBR *ar, char *data) +{ + char *p; + char *b; + int index=0; + ar->Stream = STREAM_UNIX_ATTRIBUTES; + ar->JobId = getpid(); + + for(p = b = data; *p; p++) { + if (*p == ';') { + *p = '\0'; + switch (index) { + case 0: + ar->FileIndex = str_to_int64(b); + break; + case 1: + ar->fname = b; + break; + case 2: + ar->attr = b; + break; + case 3: + ar->Digest = b; + break; + } + index++; + b = ++p; + } + } +} + +static void *do_batch(void *jcr) +{ + JCR *bjcr = (JCR *)jcr; + char data[1024]; + int lineno = 0; + struct ATTR_DBR ar; + memset(&ar, 0, sizeof(ar)); + btime_t begin = get_current_btime(); + char *datafile = bjcr->where; + + FILE *fd = fopen(datafile, "r"); + if (!fd) { + Emsg1(M_ERROR_TERM, 0, _("Error opening datafile %s\n"), datafile); + } + while (fgets(data, sizeof(data)-1, fd)) { + strip_trailing_newline(data); + lineno++; + if (verbose && ((lineno % 5000) == 1)) { + printf("\r%i", lineno); + } + fill_attr(&ar, data); + if (!db_create_attributes_record(bjcr, bjcr->db, &ar)) { + Emsg0(M_ERROR_TERM, 0, _("Error while inserting file\n")); + } + } + fclose(fd); + db_write_batch_file_records(bjcr); + btime_t end = get_current_btime(); + + P(mutex); + char ed1[200], ed2[200]; + printf("\rbegin = %s, end = %s\n", edit_int64(begin, ed1),edit_int64(end, ed2)); + printf("Insert time = %sms\n", edit_int64((end - begin) / 10000, ed1)); + printf("Create %u files at %.2f/s\n", lineno, + (lineno / ((float)((end - begin) / 1000000)))); + nb--; + V(mutex); + pthread_exit(NULL); + return NULL; +} diff --git a/src/tools/bpluginfo.c b/src/tools/bpluginfo.c new file mode 100644 index 00000000..36fe0ffd --- /dev/null +++ b/src/tools/bpluginfo.c @@ -0,0 +1,632 @@ +/* + * Contributed in 2012 by Inteos sp. z o.o. + * + * Utility tool display various information about Bacula plugin, + * including but not limited to: + * - Name and Author of the plugin + * - Plugin License + * - Description + * - API version + * - Enabled functions, etc. + */ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2012-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifdef working /* currently will not compile KES 21 Nov 2015 */ + +#include +#include +#include +#include +#include +#include +#ifndef __WIN32__ +#include +#endif +#include "bacula.h" +#include "../filed/fd_plugins.h" +#include "../dird/dir_plugins.h" +// I can't include sd_plugins.h here ... +#include "../stored/stored.h" +#include "assert_macro.h" + +extern "C" { + typedef int (*loadPlugin) (void *binfo, void *bfuncs, void **pinfo, + void **pfuncs); + typedef int (*unloadPlugin) (void); +} +#define DEFAULT_API_VERSION 1 +enum plugintype { + DIRPLUGIN, + FDPLUGIN, + SDPLUGIN, + ERRORPLUGIN, +}; + +/* + * pDirInfo + * pInfo + * psdInfo + */ +typedef union _pluginfo pluginfo; +union _pluginfo { + pDirInfo pdirinfo; + pInfo pfdinfo; + psdInfo psdinfo; +}; + +/* + * pDirFuncs + * pFuncs + * psdFuncs + */ +typedef union _plugfuncs plugfuncs; +union _plugfuncs { + pDirFuncs pdirfuncs; + pFuncs pfdfuncs; + psdFuncs psdfuncs; +}; + +/* + * bDirFuncs + * bFuncs + * bsdFuncs + */ +/* + * TODO: change to union + * +typedef union _baculafuncs baculafuncs; +union _baculafuncs { + bDirFuncs bdirfuncs; + bFuncs bfdfuncs; + bsdFuncs bsdfuncs; +}; +*/ +typedef struct _baculafuncs baculafuncs; +struct _baculafuncs { + uint32_t size; + uint32_t version; + int (*registerBaculaEvents) (void *ctx, ...); + int (*getBaculaValue) (void *ctx, int var, void *value); + int (*setBaculaValue) (void *ctx, int var, void *value); + int (*JobMessage) (void *ctx, const char *file, int line, int type, int64_t mtime, + const char *fmt, ...); + int (*DebugMessage) (void *ctx, const char *file, int line, int level, + const char *fmt, ...); + void *(*baculaMalloc) (void *ctx, const char *file, int line, size_t size); + void (*baculaFree) (void *ctx, const char *file, int line, void *mem); +}; + +/* + * bDirInfo + * bInfo + * bsdInfo + */ +typedef union _baculainfos baculainfos; +union _baculainfos { + bDirInfo bdirinfo; + bInfo bfdinfo; + bsdInfo bsdinfo; +}; + +/* +typedef struct _baculainfos baculainfos; +struct _baculainfos { + uint32_t size; + uint32_t version; +}; +*/ + +typedef struct _progdata progdata; +struct _progdata { + int verbose; + int listinfo; + int listfunc; + char *pluginfile; + void *pluginhandle; + int bapiversion; + int bplugtype; + pluginfo *pinfo; + plugfuncs *pfuncs; +}; + +/* memory allocation/deallocation */ +#define MALLOC(size) \ + (char *) bmalloc ( size ); + +#define ASSERT_MEMORY(m) \ + if ( m == NULL ){ \ + printf ( "Error: memory allocation error!\n" ); \ + exit (10); \ + } + +#define FREE(ptr) \ + if ( ptr != NULL ){ \ + bfree ( ptr ); \ + ptr = NULL; \ + } + +int registerBaculaEvents(void *ctx, ...) +{ + return 0; +}; + +int getBaculaValue(void *ctx, int var, void *value) +{ + return 0; +}; + +int setBaculaValue(void *ctx, int var, void *value) +{ + return 0; +}; + +int DebugMessage(void *ctx, const char *file, int line, int level, const char *fmt, ...) +{ +#ifdef DEBUGMSG + printf("DG: %s:%d %s\n", file, line, fmt); +#endif + return 0; +}; + +int JobMessage(void *ctx, const char *file, int line, int type, int64_t mtime, + const char *fmt, ...) +{ +#ifdef DEBUGMSG + printf("JM: %s:%d <%d> %s\n", file, line, type, fmt); +#endif + return 0; +}; + +void *baculaMalloc(void *ctx, const char *file, int line, size_t size) +{ + return MALLOC(size); +}; + +void baculaFree(void *ctx, const char *file, int line, void *mem) +{ + FREE(mem); +}; + +/* + * displays a short help + */ +void print_help(int argc, char *argv[]) +{ + + printf("\n" + "Usage: bpluginfo [options] \n" + " -v verbose\n" + " -i list plugin header information only (default)\n" + " -f list plugin functions information only\n" + " -a bacula api version (default %d)\n" + " -h help screen\n" "\n", DEFAULT_API_VERSION); +} + +/* allocates and resets a main program data variable */ +progdata *allocpdata(void) +{ + + progdata *pdata; + + pdata = (progdata *) bmalloc(sizeof(progdata)); + ASSERT_MEMORY(pdata); + memset(pdata, 0, sizeof(progdata)); + + return pdata; +} + +/* releases all allocated program data resources */ +void freepdata(progdata * pdata) +{ + + if (pdata->pluginfile) { + FREE(pdata->pluginfile); + } + FREE(pdata); +} + +/* + * parse execution arguments and fills required pdata structure fields + * + * input: + * pdata - pointer to program data structure + * argc, argv - execution envinroment variables + * output: + * pdata - required structure fields + * + * supported options: + * -v verbose flag + * -i list plugin header info only (default) + * -f list implemented functions only + * -a bacula api version (default 1) + * -h help screen + */ +void parse_args(progdata * pdata, int argc, char *argv[]) +{ + + int i; + char *dirtmp; + char *progdir; + int api; + int s; + + if (argc < 2) { + /* TODO - add a real help screen */ + printf("\nNot enough parameters!\n"); + print_help(argc, argv); + exit(1); + } + + if (argc > 5) { + /* TODO - add a real help screen */ + printf("\nToo many parameters!\n"); + print_help(argc, argv); + exit(1); + } + + for (i = 1; i < argc; i++) { + if (strcmp(argv[i], "-h") == 0) { + /* help screen */ + print_help(argc, argv); + exit(0); + } + if (strcmp(argv[i], "-v") == 0) { + /* verbose option */ + pdata->verbose = 1; + continue; + } + if (strcmp(argv[i], "-f") == 0) { + /* functions list */ + pdata->listfunc = 1; + continue; + } + if (strcmp(argv[i], "-i") == 0) { + /* header list */ + pdata->listinfo = 1; + continue; + } + if (strcmp(argv[i], "-a") == 0) { + /* bacula api version */ + if (i < argc - 1) { + s = sscanf(argv[i + 1], "%d", &api); + if (s == 1) { + pdata->bapiversion = api; + i++; + continue; + } + } + printf("\nAPI version number required!\n"); + print_help(argc, argv); + exit(1); + } + if (!pdata->pluginfile) { + if (argv[i][0] != '/') { + dirtmp = MALLOC(PATH_MAX); + ASSERT_MEMORY(dirtmp); + progdir = MALLOC(PATH_MAX); + ASSERT_MEMORY(progdir); + dirtmp = getcwd(dirtmp, PATH_MAX); + + strcat(dirtmp, "/"); + strcat(dirtmp, argv[i]); + + if (realpath(dirtmp, progdir) == NULL) { + /* error in resolving path */ + FREE(progdir); + progdir = bstrdup(argv[i]); + } + pdata->pluginfile = bstrdup(progdir); + FREE(dirtmp); + FREE(progdir); + } else { + pdata->pluginfile = bstrdup(argv[i]); + } + continue; + } + } +} + +/* + * checks a plugin type based on a plugin magic string + * + * input: + * pdata - program data with plugin info structure + * output: + * int - enum plugintype + */ +int getplugintype(progdata * pdata) +{ + + ASSERT_NVAL_RET_V(pdata, ERRORPLUGIN); + + pluginfo *pinfo = pdata->pinfo; + + ASSERT_NVAL_RET_V(pinfo, ERRORPLUGIN); + + if (pinfo->pdirinfo.plugin_magic && + strcmp(pinfo->pdirinfo.plugin_magic, DIR_PLUGIN_MAGIC) == 0) { + return DIRPLUGIN; + } else + if (pinfo->pfdinfo.plugin_magic && + strcmp(pinfo->pfdinfo.plugin_magic, FD_PLUGIN_MAGIC) == 0) { + return FDPLUGIN; + } else + if (pinfo->psdinfo.plugin_magic && + strcmp(pinfo->psdinfo.plugin_magic, SD_PLUGIN_MAGIC) == 0) { + return SDPLUGIN; + } else { + return ERRORPLUGIN; + } +} + +/* + * prints any available information about a plugin + * + * input: + * pdata - program data with plugin info structure + * output: + * printed info + */ +void dump_pluginfo(progdata * pdata) +{ + + ASSERT_NVAL_RET(pdata); + + pluginfo *pinfo = pdata->pinfo; + + ASSERT_NVAL_RET(pinfo); + + plugfuncs *pfuncs = pdata->pfuncs; + + ASSERT_NVAL_RET(pfuncs); + + switch (pdata->bplugtype) { + case DIRPLUGIN: + printf("\nPlugin type:\t\tBacula Director plugin\n"); + if (pdata->verbose) { + printf("Plugin magic:\t\t%s\n", NPRT(pinfo->pdirinfo.plugin_magic)); + } + printf("Plugin version:\t\t%s\n", pinfo->pdirinfo.plugin_version); + printf("Plugin release date:\t%s\n", NPRT(pinfo->pdirinfo.plugin_date)); + printf("Plugin author:\t\t%s\n", NPRT(pinfo->pdirinfo.plugin_author)); + printf("Plugin licence:\t\t%s\n", NPRT(pinfo->pdirinfo.plugin_license)); + printf("Plugin description:\t%s\n", NPRT(pinfo->pdirinfo.plugin_description)); + printf("Plugin API version:\t%d\n", pinfo->pdirinfo.version); + break; + case FDPLUGIN: + printf("\nPlugin type:\t\tFile Daemon plugin\n"); + if (pdata->verbose) { + printf("Plugin magic:\t\t%s\n", NPRT(pinfo->pfdinfo.plugin_magic)); + } + printf("Plugin version:\t\t%s\n", pinfo->pfdinfo.plugin_version); + printf("Plugin release date:\t%s\n", NPRT(pinfo->pfdinfo.plugin_date)); + printf("Plugin author:\t\t%s\n", NPRT(pinfo->pfdinfo.plugin_author)); + printf("Plugin licence:\t\t%s\n", NPRT(pinfo->pfdinfo.plugin_license)); + printf("Plugin description:\t%s\n", NPRT(pinfo->pfdinfo.plugin_description)); + printf("Plugin API version:\t%d\n", pinfo->pfdinfo.version); + break; + case SDPLUGIN: + printf("\nPlugin type:\t\tBacula Storage plugin\n"); + if (pdata->verbose) { + printf("Plugin magic:\t\t%s\n", NPRT(pinfo->psdinfo.plugin_magic)); + } + printf("Plugin version:\t\t%s\n", pinfo->psdinfo.plugin_version); + printf("Plugin release date:\t%s\n", NPRT(pinfo->psdinfo.plugin_date)); + printf("Plugin author:\t\t%s\n", NPRT(pinfo->psdinfo.plugin_author)); + printf("Plugin licence:\t\t%s\n", NPRT(pinfo->psdinfo.plugin_license)); + printf("Plugin description:\t%s\n", NPRT(pinfo->psdinfo.plugin_description)); + printf("Plugin API version:\t%d\n", pinfo->psdinfo.version); + break; + default: + printf("\nUnknown plugin type or other Error\n\n"); + } +} + +/* + * prints any available information about plugin' functions + * + * input: + * pdata - program data with plugin info structure + * output: + * printed info + */ +void dump_plugfuncs(progdata * pdata) +{ + + ASSERT_NVAL_RET(pdata); + + plugfuncs *pfuncs = pdata->pfuncs; + + ASSERT_NVAL_RET(pfuncs); + + printf("\nPlugin functions:\n"); + + switch (pdata->bplugtype) { + case DIRPLUGIN: + if (pdata->verbose) { + if (pfuncs->pdirfuncs.newPlugin) { + printf(" newPlugin()\n"); + } + if (pfuncs->pdirfuncs.freePlugin) { + printf(" freePlugin()\n"); + } + } + if (pfuncs->pdirfuncs.getPluginValue) { + printf(" getPluginValue()\n"); + } + if (pfuncs->pdirfuncs.setPluginValue) { + printf(" setPluginValue()\n"); + } + if (pfuncs->pdirfuncs.handlePluginEvent) { + printf(" handlePluginEvent()\n"); + } + break; + case FDPLUGIN: + if (pdata->verbose) { + if (pfuncs->pfdfuncs.newPlugin) { + printf(" newPlugin()\n"); + } + if (pfuncs->pfdfuncs.freePlugin) { + printf(" freePlugin()\n"); + } + } + if (pfuncs->pfdfuncs.getPluginValue) { + printf(" getPluginValue()\n"); + } + if (pfuncs->pfdfuncs.setPluginValue) { + printf(" setPluginValue()\n"); + } + if (pfuncs->pfdfuncs.handlePluginEvent) { + printf(" handlePluginEvent()\n"); + } + if (pfuncs->pfdfuncs.startBackupFile) { + printf(" startBackupFile()\n"); + } + if (pfuncs->pfdfuncs.endBackupFile) { + printf(" endBackupFile()\n"); + } + if (pfuncs->pfdfuncs.startRestoreFile) { + printf(" startRestoreFile()\n"); + } + if (pfuncs->pfdfuncs.endRestoreFile) { + printf(" endRestoreFile()\n"); + } + if (pfuncs->pfdfuncs.pluginIO) { + printf(" pluginIO()\n"); + } + if (pfuncs->pfdfuncs.createFile) { + printf(" createFile()\n"); + } + if (pfuncs->pfdfuncs.setFileAttributes) { + printf(" setFileAttributes()\n"); + } + if (pfuncs->pfdfuncs.checkFile) { + printf(" checkFile()\n"); + } + break; + case SDPLUGIN: + if (pdata->verbose) { + if (pfuncs->psdfuncs.newPlugin) { + printf(" newPlugin()\n"); + } + if (pfuncs->psdfuncs.freePlugin) { + printf(" freePlugin()\n"); + } + } + if (pfuncs->psdfuncs.getPluginValue) { + printf(" getPluginValue()\n"); + } + if (pfuncs->psdfuncs.setPluginValue) { + printf(" setPluginValue()\n"); + } + if (pfuncs->psdfuncs.handlePluginEvent) { + printf(" handlePluginEvent()\n"); + } + break; + default: + printf("\nUnknown plugin type or other Error\n\n"); + } +} + +/* + * input parameters: + * argv[0] [options] + * + * exit codes: + * 0 - success + * 1 - cannot load a plugin + * 2 - cannot find a loadPlugin function + * 3 - cannot find an unloadPlugin function + * 10 - not enough memory + */ +int main(int argc, char *argv[]) +{ + + progdata *pdata; + loadPlugin loadplugfunc; + unloadPlugin unloadplugfunc; + baculafuncs bfuncs = { + sizeof(bfuncs), + 1, + registerBaculaEvents, + getBaculaValue, + setBaculaValue, + JobMessage, + DebugMessage, + baculaMalloc, + baculaFree, + }; + baculainfos binfos; + + pdata = allocpdata(); + parse_args(pdata, argc, argv); + + binfos.bfdinfo.size = sizeof(binfos); + binfos.bfdinfo.version = DEFAULT_API_VERSION; + + pdata->pluginhandle = dlopen(pdata->pluginfile, RTLD_LAZY); + if (pdata->pluginhandle == NULL) { + printf("\nCannot load a plugin: %s\n\n", dlerror()); + freepdata(pdata); + exit(1); + } + + loadplugfunc = (loadPlugin) dlsym(pdata->pluginhandle, "loadPlugin"); + if (loadplugfunc == NULL) { + printf("\nCannot find loadPlugin function: %s\n", dlerror()); + printf("\nWhether the file is a really Bacula plugin?\n\n"); + freepdata(pdata); + exit(2); + } + + unloadplugfunc = (unloadPlugin) dlsym(pdata->pluginhandle, "unloadPlugin"); + if (unloadplugfunc == NULL) { + printf("\nCannot find unloadPlugin function: %s\n", dlerror()); + printf("\nWhether the file is a really Bacula plugin?\n\n"); + freepdata(pdata); + exit(3); + } + + if (pdata->bapiversion > 0) { + binfos.bdirinfo.version = pdata->bapiversion; + } + + loadplugfunc(&binfos, &bfuncs, (void **)&pdata->pinfo, (void **)&pdata->pfuncs); + + pdata->bplugtype = getplugintype(pdata); + + if (!pdata->listfunc) { + dump_pluginfo(pdata); + } + if ((!pdata->listinfo && pdata->listfunc) || pdata->verbose) { + dump_plugfuncs(pdata); + } + printf("\n"); + + unloadplugfunc(); + + dlclose(pdata->pluginhandle); + + freepdata(pdata); + + return 0; +} + +#endif /* working */ diff --git a/src/tools/bregex.c b/src/tools/bregex.c new file mode 100644 index 00000000..8c38c917 --- /dev/null +++ b/src/tools/bregex.c @@ -0,0 +1,166 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Test program for testing regular expressions. + * + * Kern Sibbald, MMVI + * + */ + +#include "bacula.h" + +/* + * If you define BACULA_REGEX, bregex will be built with the + * Bacula bregex library, which is the same code that we + * use on Win32, thus using Linux, you can test your Win32 + * expressions. Otherwise, this program will link with the + * system library routines. + */ +//#define BACULA_REGEX + +#ifdef BACULA_REGEX + +#include "lib/bregex.h" + +#else +#ifndef HAVE_REGEX_H +#include "lib/bregex.h" +#else +#include +#endif + +#endif + + +static void usage() +{ + fprintf(stderr, +"\n" +"Usage: bregex [-d debug_level] -f \n" +" -f specify file of data to be matched\n" +" -l suppress line numbers\n" +" -n print lines that do not match\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -? print this message.\n" +"\n\n"); + + exit(1); +} + + +int main(int argc, char *const *argv) +{ + regex_t preg; + char prbuf[500]; + char *fname = NULL; + int rc, ch; + char data[1000]; + char pat[500]; + FILE *fd; + bool match_only = true; + int lineno; + bool no_linenos = false; + + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + while ((ch = getopt(argc, argv, "d:f:ln?")) != -1) { + switch (ch) { + case 'd': /* set debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 'f': /* data */ + fname = optarg; + break; + + case 'l': + no_linenos = true; + break; + + case 'n': + match_only = false; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (!fname) { + printf("A data file must be specified.\n"); + usage(); + } + + OSDependentInit(); + + for ( ;; ) { + printf("Enter regex pattern: "); + if (fgets(pat, sizeof(pat)-1, stdin) == NULL) { + break; + } + strip_trailing_newline(pat); + if (pat[0] == 0) { + exit(0); + } + rc = regcomp(&preg, pat, REG_EXTENDED); + if (rc != 0) { + regerror(rc, &preg, prbuf, sizeof(prbuf)); + printf("Regex compile error: %s\n", prbuf); + continue; + } + fd = fopen(fname, "r"); + if (!fd) { + printf(_("Could not open data file: %s\n"), fname); + exit(1); + } + lineno = 0; + while (fgets(data, sizeof(data)-1, fd)) { + const int nmatch = 30; + regmatch_t pmatch[nmatch]; + strip_trailing_newline(data); + lineno++; + rc = regexec(&preg, data, nmatch, pmatch, 0); + if ((match_only && rc == 0) || (!match_only && rc != 0)) { + if (no_linenos) { + printf("%s\n", data); + } else { + printf("%5d: %s\n", lineno, data); + } + } + } + fclose(fd); + regfree(&preg); + } + exit(0); +} diff --git a/src/tools/bregtest.c b/src/tools/bregtest.c new file mode 100644 index 00000000..b6d59453 --- /dev/null +++ b/src/tools/bregtest.c @@ -0,0 +1,155 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Test program for testing regular expressions. + * + * Kern Sibbald, MMVI + * + */ + +/* + * If you define BACULA_REGEX, bregex will be built with the + * Bacula bregex library, which is the same code that we + * use on Win32, thus using Linux, you can test your Win32 + * expressions. Otherwise, this program will link with the + * system library routines. + */ +//#define BACULA_REGEX + +#include "bacula.h" +#include +#include "lib/breg.h" + + +static void usage() +{ + fprintf(stderr, +"\n" +"Usage: bregtest [-d debug_level] [-s] -f -e /test/test2/\n" +" -f specify file of data to be matched\n" +" -e specify expression\n" +" -s sed output\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -? print this message.\n" +"\n"); + + exit(1); +} + + +int main(int argc, char *const *argv) +{ + char *fname = NULL; + char *expr = NULL; + int ch; + bool sed=false; + char data[1000]; + FILE *fd; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + while ((ch = getopt(argc, argv, "sd:f:e:")) != -1) { + switch (ch) { + case 'd': /* set debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 'f': /* data */ + fname = optarg; + break; + + case 'e': + expr = optarg; + break; + + case 's': + sed=true; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (!fname) { + printf("A data file must be specified.\n"); + usage(); + } + + if (!expr) { + printf("An expression must be specified.\n"); + usage(); + } + + OSDependentInit(); + + alist *list; + char *p; + + list = get_bregexps(expr); + + if (!list) { + printf("Can't use %s as 'sed' expression\n", expr); + exit (1); + } + + fd = fopen(fname, "r"); + if (!fd) { + printf(_("Could not open data file: %s\n"), fname); + exit(1); + } + + while (fgets(data, sizeof(data)-1, fd)) { + strip_trailing_newline(data); + apply_bregexps(data, list, &p); + if (sed) { + printf("%s\n", p); + } else { + printf("%s => %s\n", data, p); + } + } + fclose(fd); + free_bregexps(list); + delete list; + exit(0); +} +/* + TODO: + - ajout /g + + - tests + * test avec /i + * test avec un sed et faire un diff + * test avec une boucle pour voir les fuites + +*/ diff --git a/src/tools/bsmtp.c b/src/tools/bsmtp.c new file mode 100644 index 00000000..997708d7 --- /dev/null +++ b/src/tools/bsmtp.c @@ -0,0 +1,679 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2015 Kern Sibbald + Copyright (C) 2001-2015 Free Software Foundation Europe e.V. + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + Derived from smtp-orig.c + + AUTHOR(S) + W.Z. Venema + Eindhoven University of Technology + Department of Mathematics and Computer Science + Den Dolech 2, P.O. Box 513, 5600 MB Eindhoven, The Netherlands + CREATION DATE + Wed Dec 1 14:51:13 MET 1993 + LAST UPDATE + Fri Aug 11 12:29:23 MET DST 1995 + COPYRIGHT + None specified. + + Kern Sibbald, July 2001 + + Note, the original W.Z. Venema smtp.c had no license and no + copyright. See: + http://archives.neohapsis.com/archives/postfix/2000-05/1520.html + + In previous versions, I believed that this code cam from + Ralf S. Engelshall's smtpclient_main.c, but in fact 99% was + Wietse Venema's code. + */ + +#include "bacula.h" +#include "jcr.h" +#define MY_NAME "bsmtp" + +#if defined(HAVE_WIN32) +#include +#endif + +#ifndef MAXSTRING +#define MAXSTRING 254 +#endif + +enum resolv_type { + RESOLV_PROTO_ANY, + RESOLV_PROTO_IPV4, + RESOLV_PROTO_IPV6 +}; + +static FILE *sfp; +static FILE *rfp; + +static char *from_addr = NULL; +static char *cc_addr = NULL; +static char *subject = NULL; +static char *err_addr = NULL; +static const char *mailhost = NULL; +static char *reply_addr = NULL; +static int mailport = 25; +static char my_hostname[MAXSTRING]; +static bool content_utf8 = false; +static resolv_type default_resolv_type = RESOLV_PROTO_IPV4; + +/* + * Take input that may have names and other stuff and strip + * it down to the mail box address ... i.e. what is enclosed + * in < >. Otherwise add < >. + */ +static char *cleanup_addr(char *addr, char *buf, int buf_len) +{ + char *p, *q; + + if ((p = strchr(addr, '<')) == NULL) { + snprintf(buf, buf_len, "<%s>", addr); + } else { + /* Copy */ + for (q=buf; *p && *p!='>'; ) { + *q++ = *p++; + } + if (*p) { + *q++ = *p; + } + *q = 0; + } + Dmsg2(100, "cleanup in=%s out=%s\n", addr, buf); + return buf; +} + +/* + * get_response - examine message from server + */ +static void get_response(void) +{ + char buf[1000]; + + Dmsg0(50, "Calling fgets on read socket rfp.\n"); + buf[3] = 0; + while (fgets(buf, sizeof(buf), rfp)) { + int len = strlen(buf); + if (len > 0) { + buf[len-1] = 0; + } + if (debug_level >= 10) { + fprintf(stderr, "%s <-- %s\n", mailhost, buf); + } + Dmsg2(10, "%s --> %s\n", mailhost, buf); + if (!isdigit((int)buf[0]) || buf[0] > '3') { + Pmsg2(0, _("Fatal malformed reply from %s: %s\n"), mailhost, buf); + exit(1); + } + if (buf[3] != '-') { + break; + } + } + if (ferror(rfp)) { + fprintf(stderr, _("Fatal fgets error: ERR=%s\n"), strerror(errno)); + } + return; +} + +/* + * chat - say something to server and check the response + */ +static void chat(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(sfp, fmt, ap); + va_end(ap); + if (debug_level >= 10) { + fprintf(stdout, "%s --> ", my_hostname); + va_start(ap, fmt); + vfprintf(stdout, fmt, ap); + va_end(ap); + } + + /* Send message to server and parse its response. */ + fflush(sfp); + if (debug_level >= 10) { + fflush(stdout); + } + get_response(); +} + +/* + * usage - explain and bail out + */ +static void usage() +{ + fprintf(stderr, +_("\n" +"Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" +" -4 forces bsmtp to use IPv4 addresses only.\n" +#ifdef HAVE_IPV6 +" -6 forces bsmtp to use IPv6 addresses only.\n" +#endif +" -8 set charset to UTF-8\n" +" -a use any ip protocol for address resolution\n" +" -c set the Cc: field\n" +" -d set debug level to \n" +" -dt print a timestamp in debug output\n" +" -f set the From: field\n" +" -h use mailhost:port as the SMTP server\n" +" -s set the Subject: field\n" +" -r set the Reply-To: field\n" +" -l set the maximum number of lines to send (default: unlimited)\n" +" -? print this message.\n" +"\n"), MY_NAME); + + exit(1); +} + +/* + * Return the offset west from localtime to UTC in minutes + * Same as timezone.tz_minuteswest + * Unix tz_offset coded by: Attila Fülöp + */ +static long tz_offset(time_t lnow, struct tm &tm) +{ +#if defined(HAVE_WIN32) +#if defined(HAVE_MINGW) +__MINGW_IMPORT long _dstbias; +#endif +#if defined(MINGW64) +# define _tzset tzset +#endif + /* Win32 code */ + long offset; + _tzset(); + offset = _timezone; + if (tm.tm_isdst) { + offset += _dstbias; + } + return offset /= 60; +#else + + /* Unix/Linux code */ + struct tm tm_utc; + time_t now = lnow; + + (void)gmtime_r(&now, &tm_utc); + tm_utc.tm_isdst = tm.tm_isdst; + return (long)difftime(mktime(&tm_utc), now) / 60; +#endif +} + +static void get_date_string(char *buf, int buf_len) +{ + time_t now = time(NULL); + struct tm tm; + char tzbuf[MAXSTRING]; + long my_timezone; + + /* Add RFC822 date */ + (void)localtime_r(&now, &tm); + + my_timezone = tz_offset(now, tm); + strftime(buf, buf_len, "%a, %d %b %Y %H:%M:%S", &tm); + snprintf(tzbuf, sizeof(tzbuf), " %+2.2ld%2.2u", -my_timezone / 60, (unsigned int)abs(my_timezone) % 60); + strcat(buf, tzbuf); /* add +0100 */ + strftime(tzbuf, sizeof(tzbuf), " (%Z)", &tm); + strcat(buf, tzbuf); /* add (CEST) */ +} + +/********************************************************************* + * + * Program to send email + */ +int main (int argc, char *argv[]) +{ + char buf[1000]; + int i, ch; + unsigned long maxlines, lines; +#if defined(HAVE_WIN32) + SOCKET s; +#else + int s, r; + struct passwd *pwd; +#endif + char *cp, *p; +#ifdef HAVE_GETADDRINFO + int res; + struct addrinfo hints; + struct addrinfo *ai, *rp; + char mail_port[10]; +#else + struct hostent *hp; + struct sockaddr_in sin; +#endif +#ifdef HAVE_IPV6 + const char *options = "468ac:d:f:h:r:s:l:?"; +#else + const char *options = "48ac:d:f:h:r:s:l:?"; +#endif + + setlocale(LC_ALL, "en_US"); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + my_name_is(argc, argv, "bsmtp"); + maxlines = 0; + + while ((ch = getopt(argc, argv, options)) != -1) { + switch (ch) { + case '4': + default_resolv_type = RESOLV_PROTO_IPV4; + break; + +#ifdef HAVE_IPV6 + case '6': + default_resolv_type = RESOLV_PROTO_IPV6; + break; +#endif + + case '8': + content_utf8 = true; + break; + + case 'a': + default_resolv_type = RESOLV_PROTO_ANY; + break; + + case 'c': + Dmsg1(20, "cc=%s\n", optarg); + cc_addr = optarg; + break; + + case 'd': /* set debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + Dmsg1(20, "Debug level = %d\n", debug_level); + break; + + case 'f': /* from */ + from_addr = optarg; + break; + + case 'h': /* smtp host */ + Dmsg1(20, "host=%s\n", optarg); + p = strchr(optarg, ':'); + if (p) { + *p++ = 0; + mailport = atoi(p); + } + mailhost = optarg; + break; + + case 's': /* subject */ + Dmsg1(20, "subject=%s\n", optarg); + subject = optarg; + break; + + case 'r': /* reply address */ + reply_addr = optarg; + break; + + case 'l': + Dmsg1(20, "maxlines=%s\n", optarg); + maxlines = (unsigned long) atol(optarg); + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (argc < 1) { + Pmsg0(0, _("Fatal error: no recipient given.\n")); + usage(); + exit(1); + } + + /* + * Determine SMTP server + */ + if (mailhost == NULL) { + if ((cp = getenv("SMTPSERVER")) != NULL) { + mailhost = cp; + } else { + mailhost = "localhost"; + } + } + +#if defined(HAVE_WIN32) + WSADATA wsaData; + + _setmode(0, _O_BINARY); + WSAStartup(MAKEWORD(2,2), &wsaData); +#endif + + /* + * Find out my own host name for HELO; + * if possible, get the FQDN - fully qualified domain name + */ + if (gethostname(my_hostname, sizeof(my_hostname) - 1) < 0) { + Pmsg1(0, _("Fatal gethostname error: ERR=%s\n"), strerror(errno)); + exit(1); + } +#ifdef HAVE_GETADDRINFO + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = 0; + hints.ai_protocol = 0; + hints.ai_flags = AI_CANONNAME; + + if ((res = getaddrinfo(my_hostname, NULL, &hints, &ai)) != 0) { + Pmsg2(0, _("Fatal getaddrinfo for myself failed \"%s\": ERR=%s\n"), + my_hostname, gai_strerror(res)); + exit(1); + } + bstrncpy(my_hostname, ai->ai_canonname, sizeof(my_hostname)); + freeaddrinfo(ai); +#else + if ((hp = gethostbyname(my_hostname)) == NULL) { + Pmsg2(0, _("Fatal gethostbyname for myself failed \"%s\": ERR=%s\n"), + my_hostname, strerror(errno)); + exit(1); + } + bstrncpy(my_hostname, hp->h_name, sizeof(my_hostname)); +#endif + Dmsg1(20, "My hostname is: %s\n", my_hostname); + + /* + * Determine from address. + */ + if (from_addr == NULL) { +#if defined(HAVE_WIN32) + DWORD dwSize = UNLEN + 1; + LPSTR lpszBuffer = (LPSTR)alloca(dwSize); + + if (GetUserName(lpszBuffer, &dwSize)) { + snprintf(buf, sizeof(buf), "%s@%s", lpszBuffer, my_hostname); + } else { + snprintf(buf, sizeof(buf), "unknown-user@%s", my_hostname); + } +#else + if ((pwd = getpwuid(getuid())) == 0) { + snprintf(buf, sizeof(buf), "userid-%d@%s", (int)getuid(), my_hostname); + } else { + snprintf(buf, sizeof(buf), "%s@%s", pwd->pw_name, my_hostname); + } +#endif + from_addr = bstrdup(buf); + } + Dmsg1(20, "From addr=%s\n", from_addr); + + /* + * Connect to smtp daemon on mailhost. + */ +lookup_host: +#ifdef HAVE_GETADDRINFO + memset(&hints, 0, sizeof(struct addrinfo)); + switch (default_resolv_type) { + case RESOLV_PROTO_ANY: + hints.ai_family = AF_UNSPEC; + break; + case RESOLV_PROTO_IPV4: + hints.ai_family = AF_INET; + break; +#ifdef HAVE_IPV6 + case RESOLV_PROTO_IPV6: + hints.ai_family = AF_INET6; + break; +#endif + default: + hints.ai_family = AF_UNSPEC; + break; + } + hints.ai_socktype = SOCK_STREAM; + hints.ai_protocol = 0; + hints.ai_flags = 0; + snprintf(mail_port, sizeof(mail_port), "%d", mailport); + + if ((res = getaddrinfo(mailhost, mail_port, &hints, &ai)) != 0) { + Pmsg2(0, _("Error unknown mail host \"%s\": ERR=%s\n"), + mailhost, gai_strerror(res)); + if (strcasecmp(mailhost, "localhost")) { + Pmsg0(0, _("Retrying connection using \"localhost\".\n")); + mailhost = "localhost"; + goto lookup_host; + } + exit(1); + } + + for (rp = ai; rp != NULL; rp = rp->ai_next) { +#if defined(HAVE_WIN32) + s = WSASocket(rp->ai_family, rp->ai_socktype, rp->ai_protocol, NULL, 0, 0); +#else + s = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); +#endif + if (s < 0) { + continue; + } + + if (connect(s, rp->ai_addr, rp->ai_addrlen) != -1) { + break; + } + + close(s); + } + + if (!rp) { + Pmsg1(0, _("Failed to connect to mailhost %s\n"), mailhost); + exit(1); + } + + freeaddrinfo(ai); +#else + if ((hp = gethostbyname(mailhost)) == NULL) { + Pmsg2(0, _("Error unknown mail host \"%s\": ERR=%s\n"), mailhost, + strerror(errno)); + if (strcasecmp(mailhost, "localhost") != 0) { + Pmsg0(0, _("Retrying connection using \"localhost\".\n")); + mailhost = "localhost"; + goto lookup_host; + } + exit(1); + } + + if (hp->h_addrtype != AF_INET) { + Pmsg1(0, _("Fatal error: Unknown address family for smtp host: %d\n"), hp->h_addrtype); + exit(1); + } + memset((char *)&sin, 0, sizeof(sin)); + memcpy((char *)&sin.sin_addr, hp->h_addr, hp->h_length); + sin.sin_family = hp->h_addrtype; + sin.sin_port = htons(mailport); +#if defined(HAVE_WIN32) + if ((s = WSASocket(AF_INET, SOCK_STREAM, 0, NULL, 0, 0)) < 0) { + Pmsg1(0, _("Fatal socket error: ERR=%s\n"), strerror(errno)); + exit(1); + } +#else + if ((s = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + Pmsg1(0, _("Fatal socket error: ERR=%s\n"), strerror(errno)); + exit(1); + } +#endif + if (connect(s, (struct sockaddr *)&sin, sizeof(sin)) < 0) { + Pmsg2(0, _("Fatal connect error to %s: ERR=%s\n"), mailhost, strerror(errno)); + exit(1); + } + Dmsg0(20, "Connected\n"); +#endif + +#if defined(HAVE_WIN32) + int fdSocket = _open_osfhandle(s, _O_RDWR | _O_BINARY); + if (fdSocket == -1) { + Pmsg1(0, _("Fatal _open_osfhandle error: ERR=%s\n"), strerror(errno)); + exit(1); + } + + int fdSocket2 = dup(fdSocket); + + if ((sfp = fdopen(fdSocket, "wb")) == NULL) { + Pmsg1(0, _("Fatal fdopen error: ERR=%s\n"), strerror(errno)); + exit(1); + } + if ((rfp = fdopen(fdSocket2, "rb")) == NULL) { + Pmsg1(0, _("Fatal fdopen error: ERR=%s\n"), strerror(errno)); + exit(1); + } +#else + if ((r = dup(s)) < 0) { + Pmsg1(0, _("Fatal dup error: ERR=%s\n"), strerror(errno)); + exit(1); + } + if ((sfp = fdopen(s, "w")) == 0) { + Pmsg1(0, _("Fatal fdopen error: ERR=%s\n"), strerror(errno)); + exit(1); + } + if ((rfp = fdopen(r, "r")) == 0) { + Pmsg1(0, _("Fatal fdopen error: ERR=%s\n"), strerror(errno)); + exit(1); + } +#endif + + /* + * Send SMTP headers. Note, if any of the strings have a < + * in them already, we do not enclose the string in < >, otherwise + * we do. + */ + get_response(); /* banner */ + chat("HELO %s\r\n", my_hostname); + chat("MAIL FROM:%s\r\n", cleanup_addr(from_addr, buf, sizeof(buf))); + + for (i = 0; i < argc; i++) { + Dmsg1(20, "rcpt to: %s\n", argv[i]); + chat("RCPT TO:%s\r\n", cleanup_addr(argv[i], buf, sizeof(buf))); + } + + if (cc_addr) { + chat("RCPT TO:%s\r\n", cleanup_addr(cc_addr, buf, sizeof(buf))); + } + Dmsg0(20, "Data\n"); + chat("DATA\r\n"); + + /* + * Send header + */ + fprintf(sfp, "From: %s\r\n", from_addr); + Dmsg1(10, "From: %s\r\n", from_addr); + if (subject) { + fprintf(sfp, "Subject: %s\r\n", subject); + Dmsg1(10, "Subject: %s\r\n", subject); + } + if (reply_addr) { + fprintf(sfp, "Reply-To: %s\r\n", reply_addr); + Dmsg1(10, "Reply-To: %s\r\n", reply_addr); + } + if (err_addr) { + fprintf(sfp, "Errors-To: %s\r\n", err_addr); + Dmsg1(10, "Errors-To: %s\r\n", err_addr); + } + +#if defined(HAVE_WIN32) + DWORD dwSize = UNLEN + 1; + LPSTR lpszBuffer = (LPSTR)alloca(dwSize); + + if (GetUserName(lpszBuffer, &dwSize)) { + fprintf(sfp, "Sender: %s@%s\r\n", lpszBuffer, my_hostname); + Dmsg2(10, "Sender: %s@%s\r\n", lpszBuffer, my_hostname); + } else { + fprintf(sfp, "Sender: unknown-user@%s\r\n", my_hostname); + Dmsg1(10, "Sender: unknown-user@%s\r\n", my_hostname); + } +#else + if ((pwd = getpwuid(getuid())) == 0) { + fprintf(sfp, "Sender: userid-%d@%s\r\n", (int)getuid(), my_hostname); + Dmsg2(10, "Sender: userid-%d@%s\r\n", (int)getuid(), my_hostname); + } else { + fprintf(sfp, "Sender: %s@%s\r\n", pwd->pw_name, my_hostname); + Dmsg2(10, "Sender: %s@%s\r\n", pwd->pw_name, my_hostname); + } +#endif + + fprintf(sfp, "To: %s", argv[0]); + Dmsg1(10, "To: %s", argv[0]); + for (i = 1; i < argc; i++) { + fprintf(sfp, ",%s", argv[i]); + Dmsg1(10, ",%s", argv[i]); + } + + fprintf(sfp, "\r\n"); + Dmsg0(10, "\r\n"); + if (cc_addr) { + fprintf(sfp, "Cc: %s\r\n", cc_addr); + Dmsg1(10, "Cc: %s\r\n", cc_addr); + } + + if (content_utf8) { + fprintf(sfp, "Content-Type: text/plain; charset=UTF-8\r\n"); + Dmsg0(10, "Content-Type: text/plain; charset=UTF-8\r\n"); + } + + get_date_string(buf, sizeof(buf)); + fprintf(sfp, "Date: %s\r\n", buf); + Dmsg1(10, "Date: %s\r\n", buf); + + fprintf(sfp, "\r\n"); + + /* + * Send message body + */ + lines = 0; + while (fgets(buf, sizeof(buf), stdin)) { + if (maxlines > 0 && ++lines > maxlines) { + Dmsg1(20, "skip line because of maxlines limit: %lu\n", maxlines); + while (fgets(buf, sizeof(buf), stdin)) { + ++lines; + } + break; + } + buf[sizeof(buf)-1] = '\0'; + buf[strlen(buf)-1] = '\0'; + if (buf[0] == '.') { /* add extra . see RFC 2821 4.5.2 */ + fputs(".", sfp); + } + fputs(buf, sfp); + fputs("\r\n", sfp); + } + + if (lines > maxlines) { + Dmsg1(10, "hit maxlines limit: %lu\n", maxlines); + fprintf(sfp, "\r\n\r\n[maximum of %lu lines exceeded, skipped %lu lines of output]\r\n", maxlines, lines-maxlines); + } + + /* + * Send SMTP quit command + */ + chat(".\r\n"); + chat("QUIT\r\n"); + exit(0); +} diff --git a/src/tools/bsnapshot.c b/src/tools/bsnapshot.c new file mode 100644 index 00000000..68495db5 --- /dev/null +++ b/src/tools/bsnapshot.c @@ -0,0 +1,1976 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#include "bacula.h" +#include "lib/ini.h" + +#ifdef HAVE_SUN_OS +#include +#include /* Define major() and minor() */ +#endif + +#define Dmsg(level, ...) do { \ + if (level <= debug_level) { \ + fprintf(debug, "%s:%d ", __FILE__ , __LINE__); \ + fprintf(debug, __VA_ARGS__ ); \ + } \ + } while (0) + +#define Pmsg(level, ...) do { \ + if (level <= debug_level) { \ + fprintf(stderr, "%s:%d ", __FILE__ , __LINE__ ); \ + fprintf(stderr, __VA_ARGS__ ); \ + } \ + } while (0) + +#define BSNAPSHOT_CONF SYSCONFDIR "/bsnapshot.conf" + +static FILE *debug = NULL; + +static void usage(const char *msg=NULL) +{ + if (msg) { + fprintf(stderr, _("ERROR %s\n\n"), msg); + } + + fprintf(stderr, _( + "Bacula %s (%s)\n\n" + "Usage: bsnapshot\n" + " -d level Set debug level\n" + " -v Verbose\n" + " -s Use sudo\n" + " -o logfile send debug to logfile\n" + " -V volume volume\n" + " -T type volume type\n" + " -t check compatibility\n" + " -c specify configuration file\n" + "\n"), VERSION, LSMDATE); + exit(2); +} + +static const char *Months[] = { + NULL, + "Jan", + "Feb", + "Mar", + "Apr", + "Mai", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec" +}; + +/* Skip leading slash(es) */ +static bool makedir(char *path) +{ + char *p = path; + + while (IsPathSeparator(*p)) { + p++; + } + while ((p = first_path_separator(p))) { + char save_p; + save_p = *p; + *p = 0; + mkdir(path, 0700); + *p = save_p; + while (IsPathSeparator(*p)) { + p++; + } + } + /* If not having a ending / */ + if (!IsPathSeparator(path[strlen(path) - 1])) { + mkdir(path, 0700); + } + return true; +} + +/* Strip trailing junk and " */ +void strip_quotes(char *str) +{ + strip_trailing_junk(str); + for(char *p = str; *p ; p++) { + if (*p == '"') { + *p = ' '; + } + } +} + +static void set_trace_file(const char *path) +{ + char dt[MAX_TIME_LENGTH]; + if (debug && debug != stderr) { + fclose(debug); + } + debug = fopen(path, "a"); + if (!debug) { + debug = stderr; + } else { + Dmsg(10, "Starting bsnapshot %s\n", + bstrftime(dt, MAX_TIME_LENGTH, time(NULL))); + } +} + +/* Small function to avoid double // in path name */ +static void path_concat(POOLMEM *&dest, const char *path1, const char *path2, const char *path3) { + int last; + + last = pm_strcpy(dest, path1); + last = MAX(last - 1, 0); + + /* Check if the last char of dest is / and the first of path2 is / */ + if (dest[last] == '/') { + if (path2[0] == '/') { + dest[last] = 0; + } + } else { + if (path2[0] != '/') { + pm_strcat(dest, "/"); + } + } + + last = pm_strcat(dest, path2); + last = MAX(last - 1, 0); + + if (path3) { + if (dest[last] == '/') { + if (path3[0] == '/') { + dest[last] = 0; + } + } else { + if (path3[0] != '/') { + pm_strcat(dest, "/"); + } + } + pm_strcat(dest, path3); + } +} + +static struct ini_items bsnap_cfg[] = { + // name handler comment required default + { "trace", ini_store_str, "", 0, NULL}, + { "debug", ini_store_int32, "", 0, NULL}, + { "sudo", ini_store_bool, "", 0, NULL}, + { "disabled", ini_store_bool, "", 0, "no"}, + { "retry", ini_store_int32, "", 0, "3"}, + { "lvm_snapshot_size", ini_store_alist_str,"", 0, NULL}, + { "skip_volume", ini_store_alist_str,"", 0, NULL}, + { "snapshot_dir", ini_store_str, "", 0, NULL}, + { "fail_job_on_error", ini_store_bool, "", 0, "yes"}, + { NULL, NULL, NULL, 0, NULL} +}; + +class arguments { +public: + char *action; /* list, create, delete... */ + char *volume; /* snapshot device */ + char *device; /* original device name */ + char *name; /* snapshot name */ + char *mountpoint; /* device mountpoint */ + char *snapmountpoint; /* snapshot mountpoint */ + char *type; /* snapshot type */ + char *fstype; /* filesystem type */ + const char *snapdir; /* .snapshot */ + const char *sudo; /* prepend sudo to commands */ + int verbose; + int retry; /* retry some operations */ + bool disabled; /* disabled by config file */ + bool fail_job_on_error; /* Fail job on snapshot error */ + ConfigFile ini; /* Configuration file */ + POOL_MEM config_file; /* Path to a config file */ + + arguments(): + action(getenv("SNAPSHOT_ACTION")), + volume(getenv("SNAPSHOT_VOLUME")), + device(getenv("SNAPSHOT_DEVICE")), + name( getenv("SNAPSHOT_NAME")), + mountpoint(getenv("SNAPSHOT_MOUNTPOINT")), + snapmountpoint(getenv("SNAPSHOT_SNAPMOUNTPOINT")), + type( getenv("SNAPSHOT_TYPE")), + fstype(getenv("SNAPSHOT_FSTYPE")), + snapdir(".snapshots"), + sudo(""), + verbose(0), + retry(3), + disabled(false), + fail_job_on_error(true) + { + struct stat sp; + ini.register_items(bsnap_cfg, sizeof(struct ini_items)); + + if (stat(BSNAPSHOT_CONF, &sp) == 0) { + Dmsg(10, "conf=%s\n", BSNAPSHOT_CONF); + pm_strcpy(config_file, BSNAPSHOT_CONF); + } + }; + + ~arguments() { + }; + + bool validate() { + int pos; + if (!action) { + return false; + } + if (strcmp(config_file.c_str(), "") != 0) { + Dmsg(10, "Reading configuration from %s\n", config_file.c_str()); + if (!ini.parse(config_file.c_str())) { + printf("status=1 error=\"Unable to parse %s\"\n", + config_file.c_str()); + return false; + } + pos = ini.get_item("debug"); + if (ini.items[pos].found && debug_level == 0) { + debug_level = ini.items[pos].val.int32val; + } + pos = ini.get_item("trace"); + if (ini.items[pos].found) { + set_trace_file(ini.items[pos].val.strval); + } + pos = ini.get_item("sudo"); + if (ini.items[pos].found && ini.items[pos].val.boolval) { + sudo = "sudo "; + } + pos = ini.get_item("snapshot_dir"); + if (ini.items[pos].found) { + snapdir = ini.items[pos].val.strval; + } + pos = ini.get_item("retry"); + if (ini.items[pos].found) { + retry = ini.items[pos].val.int32val; + } + pos = ini.get_item("disabled"); + if (ini.items[pos].found) { + disabled = ini.items[pos].val.boolval; + } + pos = ini.get_item("fail_job_on_error"); + if (ini.items[pos].found) { + fail_job_on_error = ini.items[pos].val.boolval; + } + } + return true; + }; +}; + +class snapshot { +public: + const char *type; /* snapshot type, btrfs, zfs, etc.. */ + POOLMEM *cmd; /* buffer to edit a command */ + POOLMEM *path; /* buffer to edit volume path */ + POOLMEM *fname; /* used for split_path_and_filename */ + POOLMEM *errmsg; /* buffer to edit error message */ + arguments *arg; /* program argument */ + int pnl; /* path length */ + int fnl; /* fname length */ + + snapshot(arguments *a, const char *t): + type(t), + cmd(get_pool_memory(PM_NAME)), + path(get_pool_memory(PM_NAME)), + fname(get_pool_memory(PM_NAME)), + errmsg(get_pool_memory(PM_NAME)), + arg(a), + pnl(0), + fnl(0) + { + }; + + virtual ~snapshot() { + free_pool_memory(cmd); + free_pool_memory(path); + free_pool_memory(fname); + free_pool_memory(errmsg); + }; + + /* Basically, we check parameters here that are + * common to all backends + */ + virtual int mount() { + Dmsg(10, "[%s] Doing mount command\n", type); + if (!arg->volume || !arg->name || !arg->device || !arg->mountpoint) { + Dmsg(10, "volume=%s name=%s device=%s mountpoint=%s\n", + NPRT(arg->volume), NPRT(arg->name), + NPRT(arg->device), NPRT(arg->mountpoint)); + return 0; + } + return 1; + }; + + virtual int unmount() { + Dmsg(10, "[%s] Doing unmount command on %s\n", type, + NPRT(arg->snapmountpoint)); + if (!arg->snapmountpoint) { + Dmsg(10, "snapmountpoint=%s\n", NPRT(arg->snapmountpoint)); + return 0; + } + return 1; + }; + + virtual int support() { + Dmsg(10, "[%s] Doing support on %s (%s)\n", type, NPRT(arg->mountpoint), + NPRT(arg->device)); + if (!arg->fstype || !arg->mountpoint || !arg->device) { + Dmsg(10, "fstype=%s mountpoint=%s device=%s\n", + NPRT(arg->fstype), NPRT(arg->mountpoint), NPRT(arg->device)); + return 0; + } + return 1; + }; + + virtual int check() { + Dmsg(10, "[%s] Doing check on %s\n", type, NPRT(arg->mountpoint)); + if (!arg->mountpoint) { + Dmsg(10, "mountpoint=%s\n", NPRT(arg->mountpoint)); + return 0; + } + return 1; + }; + + virtual int create() { + Dmsg(10, "[%s] Doing create %s\n", type, NPRT(arg->mountpoint)); + if (!arg->mountpoint || !arg->name || !arg->device) { + Dmsg(10, "mountpoint=%s name=%s device=%s\n", + NPRT(arg->mountpoint), NPRT(arg->name), NPRT(arg->device)); + return 0; + } + return 1; + }; + + virtual int del() { + Dmsg(10, "[%s] Doing del %s\n", type, NPRT(arg->volume)); + if (!arg->volume || !arg->name) { + Dmsg(10, "volume=%s name=%s\n", + NPRT(arg->volume), NPRT(arg->name)); + return 0; + } + return 1; + }; + + virtual int list() { + Dmsg(10, "[%s] Doing list on %s\n", type, NPRT(arg->device)); + if (!arg->type || !arg->device || !arg->mountpoint) { + return 0; + } + return 1; + }; + + virtual int subvolumes() { + Dmsg(10, "[%s] Doing subvolumes %s\n", type, NPRT(arg->mountpoint)); + if (!arg->fstype || !arg->device || !arg->mountpoint) { + return 0; + } + return 1; + }; + + /* Function used in create() to know if we mark the error as FATAL */ + int get_error_code() { + Dmsg1(0, "get_error_code = %d\n", (int)arg->fail_job_on_error); + /* 1 is OK */ + if (arg->fail_job_on_error) { + return 0; /* Fatal */ + } + return 2; /* Error */ + }; +}; + +/* Structure used to sort subvolumes with btrfs backend */ +struct vols { + rblink link; + int64_t id; + int count; + char uuid[MAX_NAME_LENGTH]; + char puuid[MAX_NAME_LENGTH]; + char otime[MAX_NAME_LENGTH]; + char path[1]; +}; + +int vols_compare_id(void *item1, void *item2) +{ + vols *vol1 = (vols *) item1; + vols *vol2 = (vols *) item2; + + if (vol1->id > vol2->id) { + return 1; + + } else if (vol1->id < vol2->id) { + return -1; + + } else { + return 0; + } +} + +int vols_compare_uuid(void *item1, void *item2) +{ + vols *vol1 = (vols *) item1; + vols *vol2 = (vols *) item2; + + return strcmp(vol1->uuid, vol2->uuid); +} + +/* btrfs backend */ +class btrfs: public snapshot { +public: + btrfs(arguments *arg): snapshot(arg, "btrfs") {}; + + /* With BTRFS, the volume is already mounted */ + int mount() { + if (!snapshot::mount()) { + return 0; + } + split_path_and_filename(arg->volume, &path, &pnl, &fname, &fnl); + fprintf(stdout, "status=1 snapmountpoint=\"%s\" snapdirectory=\"%s\"\n", + arg->volume, path); + return 1; + }; + + int unmount() { + if (!snapshot::unmount()) { + return 0; + } + printf("status=1\n"); + return 1; + }; + + int support() { + if (!snapshot::support()) { + return 0; + } + /* If the fstype is btrfs, snapshots are supported */ +/* + Mmsg(cmd, "%sbtrfs filesystem label \"%s\"", arg->sudo, arg->mountpoint); + if (run_program(cmd, 60, errmsg)) { + printf("status=0 type=btrfs\n"); + return 0; + } + Dmsg(0, "output=%s\n", errmsg); +*/ + printf("status=1 device=\"%s\" type=btrfs\n", arg->mountpoint); + return 1; + }; + + int check() { + if (!snapshot::check()) { + return 0; + } + return 1; + }; + + int create() { + utime_t createdate = 0; + char ed1[50]; + if (!snapshot::create()) { + return 0; + } + + Mmsg(path, "%s/%s", arg->mountpoint, arg->snapdir); + if (!makedir(path)) { + printf("status=%d error=\"Unable to create mountpoint directory %s errno=%d\n", + get_error_code(), + arg->mountpoint, errno); + return 0; + } + + Dmsg(10, "mountpoint=%s snapdir=%s name=%s\n", arg->mountpoint, arg->snapdir, arg->name); + path_concat(path, arg->mountpoint, arg->snapdir, arg->name); + Dmsg(10, "path=%s\n", path); + + /* Create the actual btrfs snapshot */ + Mmsg(cmd, "%sbtrfs subvolume snapshot -r \"%s\" \"%s\"", + arg->sudo, arg->mountpoint, path); + + if (run_program(cmd, 60, errmsg)) { + Dmsg(10, "Unable to create snapshot %s %s\n", arg->mountpoint, errmsg); + strip_quotes(errmsg); + printf("status=%d error=\"Unable to create snapshot %s\"\n", + get_error_code(), + errmsg); + return 0; + } + + /* On SLES12 btrfs 3.16, commands on "/" returns "doesn't belong to btrfs mount point" */ + Mmsg(cmd, "%sbtrfs subvolume show \"%s\"", arg->sudo, path); + if (run_program_full_output(cmd, 60, errmsg)) { + Dmsg(10, "Unable to display snapshot stats %s %s\n", arg->mountpoint, errmsg); + + } else { + /* TODO: Check that btrfs subvolume show is reporting "Creation time:" */ + char *p = strstr(errmsg, "Creation time:"); + if (p) { + p += strlen("Creation time:"); + skip_spaces(&p); + createdate = str_to_utime(p); + + } else { + Dmsg(10, "Unable to find Creation time on %s %s\n", arg->mountpoint, errmsg); + } + } + + if (!createdate) { + createdate = time(NULL); + } + printf("status=1 volume=\"%s\" createtdate=%s type=btrfs\n", + path, edit_uint64(createdate, ed1)); + return 1; + }; + + int del() { + if (!snapshot::del()) { + return 0; + } + + Mmsg(cmd, "%sbtrfs subvolume delete \"%s\"", arg->sudo, arg->volume); + if (run_program(cmd, 300, errmsg)) { + Dmsg(10, "Unable to delete snapshot %s\n", errmsg); + strip_quotes(errmsg); + printf("status=0 type=btrfs error=\"%s\"\n", errmsg); + return 0; + } + printf("status=1\n"); + return 1; + }; + + /* btrfs subvolume list -u -q -s /tmp/regress/btrfs + * ID 259 gen 52 top level 5 parent_uuid - uuid baf4b5d7-28d0-9b4a-856e-36e6fd4fbc96 path .snapshots/aaa + */ + int list() { + char *p, *p2, *end, *path; + char id[50], day[50], hour[50]; + struct vols *v = NULL, *v2; + rblist *lst; + + if (!snapshot::list()) { + return 0; + } + Mmsg(cmd, "%sbtrfs subvolume list -u -q -o -s \"%s\"", arg->sudo, arg->mountpoint); + if (run_program_full_output(cmd, 300, errmsg)) { + Dmsg(10, "Unable to list snapshot %s\n", errmsg); + strip_quotes(errmsg); + printf("status=0 type=btrfs error=\"%s\"\n", errmsg); + return 0; + } + + lst = New(rblist(v, &v->link)); + + /* ID 259 gen 52 top level 5 parent_uuid - uuid baf4b5d7-28d0-9b4a-856e-36e6fd4fbc96 path .snapshots/aaa */ + for (p = errmsg; p && *p ;) { + Dmsg(20, "getting subvolumes from %s", p); + + /* Replace final \n by \0 to have strstr() happy */ + end = strchr(p, '\n'); + + /* If end=NULL, we are at the end of the buffer (without trailing \n) */ + if (end) { + *end = 0; + } + + /* Each line is supposed to start with "ID", and end with "path" */ + bool ok = false; + if (sscanf(p, "ID %50s ", id) == 1) { /* We found ID, look for path */ + p2 = strstr(p, "path "); + if (p2) { + path = p2 + strlen("path "); + v = (struct vols*) malloc(sizeof (vols) + strlen(path) + 1); + *v->otime = *v->uuid = *v->puuid = 0; + v->id = str_to_int64(id); + v->count = 0; + strcpy(v->path, path); + + p2 = strstr(p, "otime"); + if (p2 && sscanf(p2, "otime %50s %50s", day, hour) == 2) { + bsnprintf(v->otime, sizeof(v->otime), "%s %s", day, hour); + } + + p2 = strstr(p, "parent_uuid "); + if (p2 && sscanf(p2, "parent_uuid %127s", v->puuid) == 1) { + + p2 = strstr(p, " uuid "); + if (p2 && sscanf(p2, " uuid %127s", v->uuid) == 1) { + + v2 = (struct vols *)lst->insert(v, vols_compare_uuid); + if (v2 != v) { + v2->count++; + free(v); + } + ok = true; + /* Replace final \n by \0 to have strstr() happy */ + Dmsg(10, "puuid=%s uuid=%s path=%s\n", v2->puuid, v2->uuid, v2->path); + } + } + } + } + if (!ok) { + Dmsg(10, "Unable to decode \"%s\" line\n", p); + } + if (end) { + *end = '\n'; + end++; + } + /* If end==NULL, we stop */ + p = end; + } + + foreach_rblist(v, lst) { + char *name = v->path; + int len = strlen(arg->snapdir); + if ((p = strstr(v->path, arg->snapdir))) { + name = p + len + ((arg->snapdir[len-1] == '/') ? 0 : 1); + } + printf("volume=\"%s%s%s\" name=\"%s\" device=\"%s\" createdate=\"%s\" type=\"btrfs\"\n", + arg->mountpoint, + arg->mountpoint[strlen(arg->mountpoint) - 1] == '/' ? "": "/", + v->path, + name, + arg->mountpoint, + v->otime + ); + } + + delete lst; + return 1; + }; + + void scan_subvolumes(char *buf, rblist *lst) { + char *p, *end; + char id[50]; + bool ok; + struct vols *elt1 = NULL, *elt2 = NULL; + + /* btrfs subvolume list /var/lib/pacman/ + * ID 349 gen 383 top level 5 path test + * ID 354 gen 391 cgen 391 top level 5 otime 2014-11-05 17:49:07 path .snapshots/aa + */ + for (p = buf; p && *p ;) { + Dmsg(20, "getting subvolumes from %s", p); + + /* Replace final \n by \0 to have strstr() happy */ + end = strchr(p, '\n'); + /* If end=NULL, we are at the end of the buffer (without trailing \n) */ + if (end) { + *end = 0; + } + + /* Each line is supposed to start with "ID", and end with "path" */ + ok = (sscanf(p, "ID %50s ", id) == 1); + if (ok) { /* We found ID, look for path */ + p = strstr(p, "path "); + if (p) { + p += strlen("path "); + + elt1 = (struct vols *) malloc(sizeof(struct vols) + strlen(p) + 1); + elt1->id = str_to_int64(id); + elt1->count = 0; + strcpy(elt1->path, p); + Dmsg(10, "Found path %s for id %s\n", elt1->path, id); + elt2 = (struct vols *)lst->insert(elt1, vols_compare_id); + if (elt2 != elt1) { + elt2->count++; + free(elt1); + } + } else { + Dmsg(10, "Unable to find the path in this line\n"); + } + + } else { + Dmsg(10, "Unable to decode %s line\n", p); + } + if (end) { + *end = '\n'; + end++; + } + /* If end==NULL, we stop */ + p = end; + } + }; + + /* List subvolumes, they may not be listed by mount */ + int subvolumes() { + rblist *lst; + struct stat sp; + struct vols *elt1 = NULL; + char ed1[50]; + + Mmsg(cmd, "%sbtrfs subvolume show \"%s\"", arg->sudo, arg->mountpoint); + if (run_program_full_output(cmd, 300, errmsg)) { + Dmsg(10, "Unable to get information %s\n", errmsg); + strip_quotes(errmsg); + printf("status=0 type=btrfs error=\"%s\"\n", errmsg); + return 0; + } + + /* TODO: Very week way to analyse FS */ + if (!strstr(errmsg, "is btrfs root")) { + printf("status=0 type=btrfs error=\"Not btrfs root fs\"\n"); + return 0; + } + + Mmsg(cmd, "%sbtrfs subvolume list -s \"%s\"", arg->sudo, arg->mountpoint); + if (run_program_full_output(cmd, 300, errmsg)) { + Dmsg(10, "Unable to list snapshot snapshot %s\n", errmsg); + strip_quotes(errmsg); + printf("status=0 type=btrfs error=\"%s\"\n", errmsg); + return 0; + } + + lst = New(rblist(elt1, &elt1->link)); + scan_subvolumes(errmsg, lst); + + Mmsg(cmd, "%sbtrfs subvolume list \"%s\"", arg->sudo, arg->mountpoint); + if (run_program_full_output(cmd, 300, errmsg)) { + Dmsg(10, "Unable to list subvolume %s\n", errmsg); + strip_quotes(errmsg); + printf("status=0 type=btrfs error=\"%s\"\n", errmsg); + delete lst; + return 0; + } + scan_subvolumes(errmsg, lst); + + foreach_rblist(elt1, lst) { + if (elt1->count > 0) { /* Looks to be a snapshot, we saw two entries */ + continue; + } + + path_concat(path, arg->mountpoint, elt1->path, NULL); + + if (stat(path, &sp) == 0) { + printf("dev=%s mountpoint=\"%s\" fstype=btrfs\n", + edit_uint64(sp.st_dev, ed1), path); + + } else { + Dmsg(10, "Unable to stat %s (%s)\n", elt1->path, path); + } + } + delete lst; + return 1; + }; +}; + +/* Create pool + * zpool create pool /dev/device + * zfs create pool/eric + * zfs set mountpoint=/mnt test/eric + * zfs mount pool/eric + */ +/* zfs backend */ +class zfs: public snapshot { +public: + zfs(arguments *arg): snapshot(arg, "zfs") { + arg->snapdir = ".zfs/snapshot"; + }; + + /* With ZFS, the volume is already mounted + * but on linux https://github.com/zfsonlinux/zfs/issues/173 + * we need to use the mount command. + * TODO: Adapt the code for solaris + */ + int mount() { + struct stat sp; + + if (!snapshot::mount()) { + return 0; + } + + path_concat(path, arg->mountpoint, arg->snapdir, arg->name); + + if (stat(path, &sp) != 0) { + /* See if we can change the snapdir attribute */ + Mmsg(cmd, "%szfs set snapdir=visible \"%s\"", arg->sudo, arg->device); + if (run_program(cmd, 60, errmsg)) { + Dmsg(10, "Unable to change the snapdir attribute %s %s\n", arg->device, errmsg); + strip_quotes(errmsg); + printf("status=0 error=\"Unable to mount snapshot %s\"\n", errmsg); + return 0; + } + if (stat(path, &sp) != 0) { + Dmsg(10, "Unable to get the snapdir %s %s\n", arg->snapdir, arg->device); + strip_quotes(errmsg); + printf("status=0 error=\"Unable to mount snapshot, no snapdir %s\"\n", arg->snapdir); + return 0; + } + } +#if 0 /* On linux, this function is broken for now */ + makedir(path); + Mmsg(cmd, "%smount -t %s \"%s\" \"%s\"", arg->sudo, arg->fstype, arg->volume, path); + if (run_program(cmd, 60, errmsg)) { + Dmsg(10, "Unable to create mount snapshot %s %s\n", arg->volume, errmsg); + strip_quotes(errmsg); + printf("status=0 error=\"Unable to mount snapshot %s\"\n", errmsg); + return 0; + } + +#endif + fprintf(stdout, "status=1 snapmountpoint=\"%s\" snapdirectory=\"%s/%s\"\n", + path, arg->mountpoint, arg->snapdir); + return 1; + }; + + /* No need to unmount something special */ + int unmount() { + printf("status=1\n"); + return 1; + }; + + int support() { + if (!snapshot::support()) { + return 0; + } + Mmsg(cmd, "%szfs list -H -o name \"%s\"", arg->sudo, arg->mountpoint); + if (run_program(cmd, 60, errmsg)) { + Dmsg(10, "Unable to get device %s %s\n", arg->mountpoint, errmsg); + strip_quotes(errmsg); + printf("status=0 error=\"Unable to get device %s\"\n", errmsg); + return 0; + } + strip_trailing_junk(errmsg); + /* If the fstype is zfs, snapshots are supported */ + printf("status=1 device=\"%s\" type=zfs\n", errmsg); + return 1; + }; + + int create() { + char ed1[50]; + + if (!snapshot::create()) { + return 0; + } + + Mmsg(path, "%s@%s", arg->device, arg->name); + + /* Create the actual zfs snapshot */ + Mmsg(cmd, "%szfs snapshot \"%s\"", arg->sudo, path); + + if (run_program(cmd, 60, errmsg)) { + Dmsg(10, "Unable to create snapshot %s %s\n", arg->device, errmsg); + strip_quotes(errmsg); + printf("status=%d error=\"Unable to create snapshot %s\"\n", + get_error_code(), + errmsg); + return 0; + } + + Mmsg(cmd, "%szfs get -p creation \"%s\"", arg->sudo, path); + if (run_program_full_output(cmd, 60, errmsg)) { + Dmsg(10, "Unable to display snapshot stats %s %s\n", arg->device, errmsg); + strip_quotes(errmsg); + printf("status=%d error=\"Unable to get snapshot info %s\"\n", + get_error_code(), + errmsg); + return 0; + } + + /* TODO: Check that zfs get is reporting "creation" time */ + Mmsg(cmd, "NAME PROPERTY VALUE SOURCE\n%s creation %%s", path); + if (sscanf(errmsg, cmd, ed1) == 1) { + Dmsg(10, "Found CreateTDate=%s\n", ed1); + printf("status=1 volume=\"%s\" createtdate=%s type=zfs\n", + path, ed1); + + } else { + printf("status=1 volume=\"%s\" createtdate=%s type=zfs\n", + path, edit_uint64(time(NULL), ed1)); + } + return 1; + }; + + int del() { + if (!snapshot::del()) { + return 0; + } + + Mmsg(cmd, "%szfs destroy \"%s\"", arg->sudo, arg->volume); + if (run_program(cmd, 300, errmsg)) { + Dmsg(10, "Unable to delete snapshot %s\n", errmsg); + strip_quotes(errmsg); + printf("status=0 type=zfs error=\"%s\"\n", errmsg); + return 0; + } + printf("status=1\n"); + return 1; + }; + + /* zfs list -t snapshot + * test/eric@snap1 17K - 21K - + * test/eric@snap2 17K - 21K - + * + * it is possible to change fields to display with -o + */ + int list() { + POOL_MEM buf2; + if (!snapshot::list()) { + return 0; + } + + Mmsg(cmd, "%szfs list -t snapshot -H -o name,used,creation", arg->sudo); + /* rpool@basezone_snap00 0 Fri Mar 6 9:55 2015 */ + if (run_program_full_output(cmd, 60, errmsg)) { + Dmsg(10, "Unable to list snapshot %s\n", errmsg); + strip_quotes(errmsg); + printf("status=0 error=\"Unable to list snapshot %s\"\n", errmsg); + return 0; + } + + int i = 1, Day, Year, Hour, Min; + char DayW[50], Month[50], CreateDate[50]; + const char *buf[4]; + + buf[0] = errmsg; + for (char *p = errmsg; p && *p ; p++) { + if (*p == '\n') { + *p = 0; + /* Flush the current one */ + if (!arg->device || strcmp(arg->device, buf[0]) == 0) { + + if (sscanf(buf[3], "%s %s %d %d:%d %d", + DayW, Month, &Day, &Hour, &Min, &Year) == 6) + { + /* Get a clean iso format */ + for (int j=1; j <= 12 ; j++) { + if (strcmp(Month, Months[j]) == 0) { + snprintf(Month, sizeof(Month), "%02d", j); + } + } + snprintf(CreateDate, sizeof(CreateDate), "%d-%s-%02d %02d:%02d:00", + Year, Month, Day, Hour, Min); + buf[3] = CreateDate; + } + printf("volume=\"%s@%s\" name=\"%s\" device=\"%s\" size=\"%s\" " + "createdate=\"%s\" status=1 error=\"\" type=\"zfs\"\n", + buf[0], buf[1], buf[1], buf[0], buf[2], buf[3]); + } else { + Dmsg(10, "Do not list %s@%s\n", buf[0], buf[1]); + } + + i = 1; + buf[0] = p+1; + buf[1] = buf[2] = buf[3] = ""; + + } else if ((*p == '\t' || *p == '@') && i < 4) { + buf[i++] = p+1; + *p = 0; + } + } + + return 1; + }; +}; + +/* Structure of the LVS output */ +typedef struct { + const char *name; + int pos; +} Header; + +/* -1 is mandatory, -2 is optionnal */ +static Header lvs_header[] = { + /* KEEP FIRST */ + {"Path", -1}, /* Volume Path: /dev/ubuntu-vg/root */ + {"DMPath",-2}, /* Device mapper Path /dev/mapper/ubuntu--vg-root */ + {"LV", -1}, /* Volume Name: root */ + {"Attr", -1}, /* Attributes: -wi-ao--- */ + {"KMaj", -1}, /* Kernel Major: 252 */ + {"KMin", -1}, /* Kernel Minor: 0 */ + {"LSize", -1}, /* Size (b) */ + {"#Seg", -1}, /* Number of segments */ + {"Origin",-1}, + {"OSize", -1}, + {"Snap%", -1}, + {"Time", -1}, /* Creation date */ + {NULL, -1} +}; + +static Header vgs_header[] = { + /* KEEP FIRST */ + {"VG", -1}, /* VG Name: vgroot */ + {"VSize", -1}, /* Size */ + {"VFree", -1}, /* Space left */ + {"#Ext", -1}, /* Nb Ext */ + {"Free", -1}, /* Nb Ext free */ + {"Ext", -1}, /* Ext size */ + {NULL, -1} +}; + +/* LVM backend, not finished */ +class lvm: public snapshot { +public: + alist *lvs, *vgs; + int lvs_nbelt, vgs_nbelt; + + lvm(arguments *arg): + snapshot(arg, "lvm"), lvs(NULL), vgs(NULL), lvs_nbelt(0), + vgs_nbelt(0) {}; + + ~lvm() { + free_header(lvs, lvs_nbelt); + free_header(vgs, vgs_nbelt); + }; + + void free_header(alist *lst, int nbelt) { + if (lst) { + char **current; + /* cleanup at the end */ + foreach_alist(current, lst) { + for (int j=0; j < nbelt ; j++) { + Dmsg(50, "current[%d] = %s\n", j, current[j]); + free(current[j]); + } + free(current); + } + delete lst; + } + }; + + char *get_vg_from_lv_path(char *path, char *vg, int max) { + char *p; + + if (!path) { + return NULL; + } + + /* Make a copy of the path */ + bstrncpy(vg, path, max); + path = vg; + + if (strncmp(path, "/dev/", 5) != 0) { + Dmsg(10, "Strange path %s\n", path); + return NULL; + } + path += 5; /* skip /dev/ */ + + /* End the string at the last / */ + p = strchr(path, '/'); + if (!p) { + Dmsg(10, "Strange end of path %s\n", path); + return NULL; + } + *p = 0; + + return path; + }; + + /* Report the space available on VG */ + int64_t get_space_available(char *lv) { + char buf[512]; + char *vgname = get_vg_from_lv_path(get_lv_value(lv, "Path"), + buf, sizeof(buf)); + + if (vgname) { + char *s = get_vg_value(vgname, "VFree"); + if (s) { + return str_to_int64(s); + + } else { + Dmsg(10, "Unable to get VFree\n"); + } + + } else { + Dmsg(10, "Unable to get VG from %s\n", lv); + } + return -1; + }; + + /* return vg_ssd-pacman */ + char *get_lv_from_dm(char *dm, POOLMEM **ret, uint32_t *major, uint32_t *minor) { + struct stat sp; + char *p, *start; + uint32_t maj, min; + + /* Looks to be a device mapper, need to convert the name */ + if (strncmp(dm, "/dev/dm", strlen("/dev/dm")) != 0) { + return NULL; + } + if (stat(dm, &sp) < 0) { + return NULL; + } + + Mmsg(cmd, "%sdmsetup ls", arg->sudo); + if (run_program_full_output(cmd, 60, errmsg)) { + Dmsg(10, "Unable to query dmsetup %s\n", errmsg); + return NULL; + } + /* vg_ssd-pacman-real (254:1) + * vg_ssd-pacman (254:0) + * or + * vg_ssd-pacman-real (254, 1) + * vg_ssd-pacman-real (254, 1) + */ + *ret = check_pool_memory_size(*ret, strlen(errmsg)+1); + for (start = p = errmsg; *p ; p++) { + if (*p == '\n') { + *p = 0; + if (sscanf(start, "%s (%d:%d)", *ret, &maj, &min) == 3 || + sscanf(start, "%s (%d, %d)", *ret, &maj, &min) == 3) + { + if (maj == major(sp.st_rdev) && + min == minor(sp.st_rdev)) + { + return *ret; + } + } + start = p+1; + } + } + return NULL; + }; + + /* The LV path from name or dmpath */ + char **get_lv(char *lv) { + char **elt = NULL, *dm = NULL; + int path = get_value_pos(lvs_header, "Path"); + int dmpath = get_value_pos(lvs_header, "DMPath"); + int kmaj = get_value_pos(lvs_header, "KMaj"); + int kmin = get_value_pos(lvs_header, "KMin"); + uint32_t min = 0, maj = 0; + POOLMEM *buf = get_pool_memory(PM_FNAME); + + if (!lv || (path < 0 && dmpath < 0)) { + Dmsg(10, "Unable to get LV parameters\n"); + goto bail_out; + } + + dm = get_lv_from_dm(lv, &buf, &maj, &min); + Dmsg(50, "%s = get_lv_from_dm(%s, %s, %d, %d)\n", dm, lv, buf, maj, min); + + /* HERE: Need to loop over LVs */ + foreach_alist(elt, lvs) { + if (path > 0 && strcmp(NPRT(elt[path]), lv) == 0) { + goto bail_out; + } + + if (dmpath > 0 && strcmp(NPRT(elt[dmpath]), lv) == 0) { + goto bail_out; + } + + /* Try by Minor/Major if comming from device mapper */ + if ((maj && kmaj && str_to_uint64(elt[kmaj]) == maj) && + (min && kmin && str_to_uint64(elt[kmin]) == min)) + { + goto bail_out; + } + + /* Find if /dev/mapper/vg_ssd-pacman matches vg_ssd-pacman */ + if (dm && dmpath && strlen(elt[dmpath]) > strlen("/dev/mapper/")) { + if (strcmp(elt[dmpath] + strlen("/dev/mapper/"), dm) == 0) { + goto bail_out; + } + } + + /* Special case for old LVM where mapper path doesn't exist */ + if (dmpath < 0 && strncmp("/dev/mapper/", lv, 12) == 0) { + + POOLMEM *buf2 = get_memory(strlen(elt[path])*2+10); + pm_strcpy(buf2, "/dev/mapper/"); + + char *d = buf2 + 12; /* Skip /dev/mapper/ */ + bool ret = false; + + /* Keep the same path, but escape - to -- and / to - */ + for (char *p = elt[path]+5; *p ; p++) { + if (*p == '-') { + *d++ = *p; + } + /* Escape / to - if needed */ + *d++ = (*p == '/') ? '-' : *p; + } + *d = 0; + ret = (strcmp(buf2, lv) == 0); + free_pool_memory(buf2); + + if (ret) { + goto bail_out; + } + } + } + Dmsg(10, "%s not found in lv list\n", lv); + return NULL; /* not found */ + + bail_out: + if (buf) { + free_pool_memory(buf); + } + return elt; + }; + + /* Report LV Size in bytes */ + int64_t get_lv_size(char *name) { + char **elt = get_lv(arg->device); + int sp; + + if (!elt) { + return -1; + } + + sp = get_value_pos(lvs_header, "LSize"); + /* Check if we have enough space on the VG */ + return str_to_int64(elt[sp]); + }; + + char *get_lv_value(char *name, const char *value) { + return get_value(lvs_header, lvs_nbelt, lvs, name, value); + }; + + int get_value_pos(Header *header, const char *value) { + for (int i = 0; header[i].name ; i++) { + if (strcmp(header[i].name, value) == 0) { + return header[i].pos; + } + } + return -1; /* not found */ + }; + + /* Return an element value */ + char *get_value(Header *header, int nbelt, alist *lst, + char *name, const char *value) { + char **elt; + int pos = get_value_pos(header, value); + int id = header[0].pos; /* position name */ + + if (pos < 0 || id == -1) { + return NULL; + } + /* Loop over elements we have, and return the value that is asked */ + foreach_alist(elt, lst) { + if (strcmp(NPRT(elt[id]), name) == 0) { + return elt[pos]; + } + } + return NULL; + }; + + /* Return a parameter for a VolumeGroup */ + char *get_vg_value(char *vg, const char *value) { + return get_value(vgs_header, vgs_nbelt, vgs, vg, value); + }; + + /* Get snapshot size, look in config file if needed */ + int get_lvm_snapshot_size(char *lv) { + char *tmp, **elt; + uint64_t s, size; + int sp; + alist *lst; + + int pos = arg->ini.get_item("lvm_snapshot_size"); + if (!arg->ini.items[pos].found) { + return -1; /* Nothing specified, stop here */ + } + + lst = arg->ini.items[pos].val.alistval; + if (lst) { + /* /dev/ubuntu-vg/root:100M + * /dev/ubuntu-vg/home:10% + * /dev/ubuntu-vg/var:200GB + */ + foreach_alist(tmp, lst) { + char *p = strchr(tmp, ':'); + + /* Check the LV name */ + if (p && strncmp(tmp, lv, p - tmp) != 0) { + continue; + } + + /* This is a percent */ + if (strchr(p+1, '%') != NULL) { + Dmsg(10, "Found a %%\n"); + s = str_to_int64(p+1); + + /* Compute the requested size */ + sp = get_value_pos(lvs_header, "LSize"); + elt = get_lv(lv); + size = str_to_int64(elt[sp]); + return size * (s / 100); + } + + /* It might be a size */ + if (size_to_uint64(p+1, strlen(p+1), &s)) { + Dmsg(10, "Found size %ld\n", s); + return s; + } + Dmsg(10, "Unable to use %s\n", tmp); + return -1; + } + } + return -1; + }; + + int create() { + char *name, *ts, buf[128], *lvname; + int64_t size, ssize, maxsize; + if (!snapshot::create()) { + return 0; + } + + if (!parse_lvs_output() || + !parse_vgs_output()) + { + printf("status=%d error=\"Unable parse lvs or vgs output\"\n", + get_error_code()); + return 0; + } + + path_concat(path, arg->mountpoint, arg->snapdir, arg->name); + + if (!makedir(path)) { + printf("status=%d error=\"Unable to create mountpoint directory %s errno=%d\n", + get_error_code(), + arg->mountpoint, errno); + return 0; + } + + name = get_lv_value(arg->device, "LV"); + size = get_lv_size(arg->device); + if (size < 0) { + printf("status=%d error=\"Unable to get lv size\"\n", + get_error_code()); + return 0; + } + + ssize = get_lvm_snapshot_size(arg->device); + if (ssize > 0) { + size = ssize; + } else { + size = size / 10; /* Ask to get 10% */ + } + + size = (size / 512L) * 512L; + + lvname = get_lv_value(arg->device, "Path"); + maxsize = get_space_available(lvname); + Dmsg(10, "maxsize=%ld size=%ld\n", maxsize, size); + + if (maxsize < 0) { + printf("status=%d error=\"Unable to detect maxsize\" type=lvm\n", + get_error_code()); + return 0; + } + + if (size > maxsize) { + char ed1[50], ed2[50]; + printf("status=%d error=\"Not enough space left on VG %sB, " + "%sB is required\" type=lvm\n", + get_error_code(), + edit_uint64_with_suffix(maxsize, ed1), + edit_uint64_with_suffix(size, ed2)); + return 0; + } + + /* TODO: Need to get the volume name and add the snapshot + * name at the end + */ + Mmsg(cmd, "%slvcreate -s -n \"%s_%s\" -L %lldb \"%s\"", + arg->sudo, name, arg->name, size, arg->device); + if (run_program(cmd, 60, errmsg)) { + Dmsg(10, "Unable to create snapshot %s %s\n", arg->name, errmsg); + strip_quotes(errmsg); + printf("status=0 error=\"Unable to create snapshot %s\"\n", errmsg); + return 0; + } + if (!parse_lvs_output()) { + Dmsg(10, "Unable to parse lvm output after snapshot creation\n"); + printf("status=0 error=\"Unable to parse lvs\"\n"); + return 0; + } + + Mmsg(cmd, "%s_%s", arg->device, arg->name); + ts = get_lv_value(cmd, "Time"); + if (!ts) { + Dmsg(10, "Unable to find snapshot in lvs output\n"); + bstrftimes(buf, sizeof(buf), time(NULL)); + ts = buf; + } + Dmsg(10, "status=1 volume=\"%s_%s\" createdate=\"%s\" type=lvm\n", + arg->device, arg->name, ts); + printf("status=1 volume=\"%s_%s\" createdate=\"%s\" type=lvm\n", + arg->device, arg->name, ts); + return 1; + }; + + int del() { + if (!snapshot::del()) { + return 0; + } + Mmsg(cmd, "%slvremove -f \"%s\"", + arg->sudo, arg->volume); + + if (run_program(cmd, 60, errmsg)) { + Dmsg(10, "Unable to delete snapshot %s %s\n", arg->name, errmsg); + strip_quotes(errmsg); + printf("status=0 error=\"Unable to delete snapshot %s\"\n", errmsg); + return 0; + } + + printf("status=1\n"); + return 1; + }; + + int check() { + if (!snapshot::check()) { + return 0; + } + parse_vgs_output(); + for (int i = 0; vgs_header[i].name ; i++) { + if (vgs_header[i].pos == -1) { + printf("status=0 error=\"Unable to use output of vgs command." + " %s is missing.\"\n", + vgs_header[i].name); + return 0; + } + } + + parse_lvs_output(); + for (int i = 0; lvs_header[i].name ; i++) { + if (lvs_header[i].pos == -1) { + printf("status=0 error=\"Unable to use output of lvs command." + " %s is missing.\"\n", + lvs_header[i].name); + return 0; + } + } + return 1; + }; + + void strip_double_slashes(char *fname) + { + char *p = fname; + while (p && *p) { + p = strpbrk(p, "/\\"); + if (p != NULL) { + if (IsPathSeparator(p[1])) { + strcpy(p, p+1); + } + p++; + } + } + }; + + int mount() { + if (!snapshot::mount()) { + return 0; + } + + path_concat(path, arg->mountpoint, arg->snapdir, arg->name); + + if (!makedir(path)) { + printf("status=0 error=\"Unable to create mount point %s errno=%d\"\n", + path, errno); + return 0; + } + + Mmsg(cmd, "%smount -o ro \"%s\" \"%s\"", arg->sudo, arg->volume, path); + if (run_program(cmd, 60, errmsg) != 0) { + Dmsg(10, "Unable to mount volume. ERR=%s\n", errmsg); + strip_quotes(errmsg); + printf("status=0 error=\"Unable to mount the device %s\"\n", errmsg); + return 0; + } + + Dmsg(10, "status=1 snapmountpoint=\"%s\" snapdirectory=\"%s/%s\"\n", + path, arg->mountpoint, arg->snapdir); + printf("status=1 snapmountpoint=\"%s\" snapdirectory=\"%s/%s\"\n", + path, arg->mountpoint, arg->snapdir); + return 1; + }; + + int unmount() { + int ret, retry = arg->retry; + + if (!snapshot::unmount()) { + return 0; + } + + Mmsg(cmd, "%sumount \"%s\"", arg->sudo, arg->snapmountpoint); + do { + ret = run_program(cmd, 60, errmsg); + if (ret != 0) { + Dmsg(10, "Unable to unmount the directory. ERR=%s\n", errmsg); + sleep(3); + } + } while (ret != 0 && retry-- > 0); + + if (ret != 0) { + Dmsg(10, "Unable to mount volume. ERR=%s\n", errmsg); + strip_quotes(errmsg); + printf("status=0 error=\"Unable to umount the device %s\"\n", errmsg); + return 0; + } + + retry = arg->retry; + do { + Dmsg(10, "Trying to delete mountpoint %s\n", arg->snapmountpoint); + if ((ret = rmdir(arg->snapmountpoint)) != 0) { + sleep(3); + } + } while (retry-- > 0 && ret != 0); + + if (ret != 0) { + berrno be; + Dmsg(10, "Unable to delete mountpoint after unmount\n"); + printf("error=\"Unable to delete mountpoint after unmount errno=%s\"", + be.bstrerror(errno)); + } + printf(" status=1\n"); + return 1; + }; + + /* TODO: Here we need to check LVM settings */ + int support() { + char **elt; + int mp; + + if (!snapshot::support()) { + return 0; + } + if (!check()) { + return 0; + } + + elt = get_lv(arg->device); + + if (!elt) { + Dmsg(10, "Not detected as LVM\n"); + printf("status=0 error=\"Not detected as LVM\"\n"); + return 0; + } + mp = get_value_pos(lvs_header ,"Path"); + printf("status=1 device=\"%s\" type=lvm\n", elt[mp]); + return 1; + }; + + /* count the number of column in the output */ + int count_col(char *l, char sep) { + int nb=0; + for (char *p = l ; *p ; p++) { + if (*p == sep) { + nb++; + } + } + return nb; + }; + + /* Decode the Attr field */ + int decode_attr(char *l) { + /* + * Volume type: (m)irrored, (M)irrored without initial sync, + * (o)rigin, (O)rigin with merging snapshot, (r)aid, (R)aid + * without initial sync, (s)napshot, merging (S)napshot, + * (p)vmove, (v)irtual, mirror or raid (i)mage, mirror or raid + * (I)mage out-of-sync, mirror (l)og device, under (c)onversion, + * thin (V)olume, (t)hin pool, (T)hin pool data, raid or thin + * pool m(e)tadata + */ + + return 0; + }; + + bool parse_vgs_output() { + Mmsg(cmd, "%svgs -o vg_all --separator=; --units b --nosuffix", arg->sudo); + if (vgs) { + free_header(vgs, vgs_nbelt); + vgs_nbelt=0; + } + vgs = New(alist(10, not_owned_by_alist)); + if (!parse_output(cmd, vgs, &vgs_nbelt, vgs_header)) { + return false; + } + return true; + }; + + bool parse_lvs_output() { + Mmsg(cmd, "%slvs -o lv_all --separator=; --units b --nosuffix", arg->sudo); + if (lvs) { + free_header(lvs, lvs_nbelt); + lvs_nbelt=0; + } + lvs = New(alist(10, not_owned_by_alist)); + if (!parse_output(cmd, lvs, &lvs_nbelt, lvs_header)) { + return false; + } + return true; + }; + + /* Function to parse LVM command output */ + bool parse_output(char *cmd, alist *ret, int *ret_nbelt, Header *hdr) { + char *p; + int i=0; + int pos=0; + int nbelt=0; + char buf[2048]; /* Size for a single line */ + bool header_done=false; + + if (run_program_full_output(cmd, 60, errmsg)) { + strip_quotes(errmsg); + Dmsg(10, "Unable to run lvs. ERR=%s\n", errmsg); + return false; + } + + char **current = NULL; + + for (p = errmsg; *p ; p++) { + if (*p == ';') { /* We have a separator, handle current value */ + buf[i]=0; + if (!header_done) { + nbelt++; /* Keep the number of element in the line */ + + /* Find if we need this value, and where to store it */ + for (int j=0; hdr[j].name ; j++) { + if (strcasecmp(buf, hdr[j].name) == 0) { + hdr[j].pos = pos; + break; + } + } + + } else { + if (pos == 0) { + /* First item, need to allocate new array */ + current = (char **)malloc(nbelt * sizeof(char *) + 1); + memset(current, 0, nbelt * sizeof(char *) + 1); + ret->append(current); + } + /* Keep the current value */ + current[pos] = bstrdup(buf); + } + pos++; + i = 0; + } else if (*p == '\n') { + /* We deal with a new line, so the header is done (if in) */ + header_done = true; + i = 0; + pos = 0; + + } else if (i < (int)sizeof(buf)) { + buf[i++] = *p; + + } else { + Dmsg(10, "Output too big !!! %s\n", errmsg); + break; + } + } + *ret_nbelt = nbelt; + return true; + }; + + int list() { + char **elt, **elt2 = NULL; + const char *err = NULL; + int p_attr, p_path, p_origin, p_time, p_size; + POOLMEM *p, *f, *d; + int fnl, pnl, status; + + if (!snapshot::list()) { + return false; + } + + if (!parse_lvs_output()) { + return false; + } + + p_attr = get_value_pos(lvs_header, "Attr"); + p_path = get_value_pos(lvs_header, "Path"); + p_time = get_value_pos(lvs_header, "Time"); + p_size = get_value_pos(lvs_header, "Snap%"); + p_origin = get_value_pos(lvs_header, "Origin"); + + if (p_time < 0 || p_origin < 0) { + printf("status=1 error=\"Unable to get snapshot Origin from lvs command\"\n"); + return false; + } + + p = get_pool_memory(PM_FNAME); + f = get_pool_memory(PM_FNAME); + d = get_pool_memory(PM_FNAME); + + elt2 = get_lv(arg->device); + + /* TODO: We need to get the device name from the mount point */ + foreach_alist(elt, lvs) { + char *attr = elt[p_attr]; + /* swi-a-s-- */ + if (attr[0] == 's') { + if (attr[4] == 'I') { + /* 5 State: (a)ctive, (s)uspended, (I)nvalid snapshot, invalid (S)uspended + * snapshot, snapshot (m)erge failed, suspended snapshot (M)erge + * failed, mapped (d)evice present without tables, mapped device + * present with (i)nactive table, (X) unknown + */ + status = 0; + err = "Invalid snapshot"; + } else { + status = 1; + err = ""; + } + + split_path_and_filename(elt[p_path], &p, &pnl, &f, &fnl); + Mmsg(d, "%s%s", p, elt[p_origin]); + + if ((!arg->device || strcmp(arg->device, d) == 0) || + (elt2 && strcmp(elt2[p_path], d) == 0)) + { + /* On LVM, the name is LV_SnapshotName, we can strip the LV_ if we find it */ + Mmsg(p, "%s_", d); /* /dev/mapper/vg_ssd/test_ */ + if (strncmp(p, elt[p_path], strlen(p)) == 0) { + pm_strcpy(f, elt[p_path] + strlen(p));/* test_MySnapshot_2020.. => MySnapshot_2020 */ + } + + printf("volume=\"%s\" device=\"%s\" name=\"%s\" createdate=\"%s\" size=\"%s\" " + "status=%d error=\"%s\" type=lvm\n", + elt[p_path], d, f, elt[p_time], elt[p_size], status, err); + } + } + } + free_pool_memory(p); + free_pool_memory(f); + free_pool_memory(d); + return true; + }; +}; + +/* The simulator is using a simple symlink */ +class simulator: public snapshot { +public: + simulator(arguments *arg): snapshot(arg, "simulator") {}; + + int mount() { + if (!snapshot::mount()) { + return 0; + } + split_path_and_filename(arg->volume, &path, &pnl, &fname, &fnl); + printf("status=1 snapmountpoint=\"%s\" snapdirectory=\"%s\"\n", + arg->volume, path); + return 1; + }; + + int unmount() { + printf("status=1\n"); + return 1; + }; + + int support() { + if (!snapshot::support()) { + return 0; + } + if (access(arg->mountpoint, W_OK) != 0) { + printf("status=0 device=\"%s\" type=simulator " + "error=\"Unable to access mountpoint\"\n", + arg->mountpoint); + return 0; + } + printf("status=1 device=\"%s\" type=simulator\n", arg->mountpoint); + return 1; + }; + + int create() { + char ed1[50]; + utime_t now; + + if (!snapshot::create()) { + return 0; + } + Mmsg(path, "%s/%s", arg->mountpoint, arg->snapdir); + makedir(path); + now = time(NULL); + Mmsg(cmd, "ln -vsf \"%s\" \"%s\"", arg->mountpoint, path); + if (run_program(cmd, 60, errmsg)) { + Dmsg(10, "Unable to create symlink. ERR=%s\n", errmsg); + strip_quotes(errmsg); + printf("status=%d error=\"Unable to umount the device %s\"\n", + get_error_code(), + errmsg); + } + printf("status=1 volume=\"%s\" createtdate=%s type=simulator\n", + path, edit_uint64(now, ed1)); + return 1; + }; + + int del() { + int ret; + if (!snapshot::del()) { + return 0; + } + ret = unlink(arg->volume); + printf("status=%d\n", (ret == 0)? 1 : 0); + return 1; + }; +}; + +snapshot *detect_snapshot_backend(arguments *arg) +{ + if (arg->type) { + if (strcasecmp(arg->type, "btrfs") == 0) { + return new btrfs(arg); + + } else if (strcasecmp(arg->type, "lvm") == 0) { + return new lvm(arg); + + } else if (strcasecmp(arg->type, "simulator") == 0) { + return new simulator(arg); + + } else if (strcasecmp(arg->type, "zfs") == 0) { + return new zfs(arg); + } + } + if (arg->fstype) { + if (strcasecmp(arg->fstype, "btrfs") == 0) { + return new btrfs(arg); + + } else if (strcasecmp(arg->fstype, "tmpfs") == 0) { + return new simulator(arg); + + /* TODO: Need to find something smarter here */ + } else if (strcasecmp(arg->fstype, "ext4") == 0) { + return new lvm(arg); + + } else if (strcasecmp(arg->fstype, "xfs") == 0) { + return new lvm(arg); + + } else if (strcasecmp(arg->fstype, "ext3") == 0) { + return new lvm(arg); + + } else if (strcasecmp(arg->fstype, "zfs") == 0 || + strcasecmp(arg->fstype, "fuse.zfs") == 0) + { + return new zfs(arg); + } + } + Dmsg(10, "Backend not found\n"); + return NULL; +} + +/* defined in jcr.c */ +void create_jcr_key(); + +int main(int argc, char **argv) +{ + snapshot *snap; + arguments arg; + char ch; + int ret=0; + struct stat sp; + + set_trace_file("/dev/null"); + setlocale(LC_ALL, ""); + setenv("LANG", "C", true); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + lmgr_init_thread(); + OSDependentInit(); + init_stack_dump(); + my_name_is(argc, argv, "bsnapshot"); + create_jcr_key(); + + while ((ch = getopt(argc, argv, "?d:vc:so:V:T:t")) != -1) { + switch (ch) { + case 'd': /* set debug level */ + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + break; + + case 'v': + arg.verbose++; + break; + + case 's': /* use sudo */ + arg.sudo = "sudo "; + break; + + case 'c': /* config file */ + pm_strcpy(arg.config_file, optarg); + if (stat(optarg, &sp) < 0) { + Pmsg(000, "Unable to access %s. ERR=%s\n",optarg, strerror(errno)); + usage(_("Unable to open -p argument for reading")); + } + break; + + case 'o': /* where to send the debug output */ + set_trace_file(optarg); + break; + + case 't': + arg.action = (char *)"check"; + break; + + case 'V': /* set volume name */ + arg.volume = optarg; + break; + + case 'T': /* device type */ + arg.type = optarg; + break; + default: + usage(); + } + } + + argc -= optind; + argv += optind; + + if (!arg.validate()) { + usage(); + } + + if (arg.disabled) { + Dmsg(10, "disabled from config file\n"); + exit (1); + } + + snap = detect_snapshot_backend(&arg); + + if (!snap) { + printf("status=0 error=\"Unable to detect snapshot backend\""); + exit(0); + } + + start_watchdog(); + + if (strcasecmp(arg.action, "mount") == 0) { + ret = snap->mount(); + + } else if (strcasecmp(arg.action, "support") == 0) { + ret = snap->support(); + + } else if (strcasecmp(arg.action, "create") == 0) { + ret = snap->create(); + + } else if (strcasecmp(arg.action, "delete") == 0) { + ret = snap->del(); + + } else if (strcasecmp(arg.action, "subvolumes") == 0) { + ret = snap->subvolumes(); + + } else if (strcasecmp(arg.action, "list") == 0) { + ret = snap->list(); + + } else if (strcasecmp(arg.action, "check") == 0) { + ret = snap->check(); + + } else if (strcasecmp(arg.action, "unmount") == 0) { + ret = snap->unmount(); + } + + delete snap; + stop_watchdog(); + close_memory_pool(); + lmgr_cleanup_main(); + + Dmsg(10, "exit code = %d\n", (ret == 1) ? 0 : 1); + return (ret == 1)? 0 : 1; +} diff --git a/src/tools/bvfs_test.c b/src/tools/bvfs_test.c new file mode 100644 index 00000000..21f9da98 --- /dev/null +++ b/src/tools/bvfs_test.c @@ -0,0 +1,334 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Program to test cache path + * + * Eric Bollengier, August 2009 + * + * + */ + +#include "bacula.h" +#include "cats/cats.h" +#include "cats/bvfs.h" +#include "findlib/find.h" + +/* Local variables */ +static BDB *db; +static const char *file = "COPYRIGHT"; +static DBId_t fnid=0; +static const char *db_name = "regress"; +static const char *db_user = "regress"; +static const char *db_password = ""; +static const char *db_host = NULL; +static const char *db_ssl_mode = NULL; +static const char *db_ssl_key = NULL; +static const char *db_ssl_cert = NULL; +static const char *db_ssl_ca = NULL; +static const char *db_ssl_capath = NULL; +static const char *db_ssl_cipher = NULL; + +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database host (default NULL)\n" +" -k path name to the key file (default NULL)\n" +" -e path name to the certificate file (default NULL)\n" +" -a path name to the CA certificate file (default NULL)\n" +" -w specify working directory\n" +" -j specify jobids\n" +" -p specify path\n" +" -f specify file\n" +" -l maximum tuple to fetch\n" +" -T truncate cache table before starting\n" +" -v verbose\n" +" -? print this message\n\n"), 2001, "", VERSION, BDATE); + exit(1); +} + +static int result_handler(void *ctx, int fields, char **row) +{ + Bvfs *vfs = (Bvfs *)ctx; + ATTR *attr = vfs->get_attr(); + char empty[] = "A A A A A A A A A A A A A A"; + + memset(&attr->statp, 0, sizeof(struct stat)); + decode_stat((row[BVFS_LStat] && row[BVFS_LStat][0])?row[BVFS_LStat]:empty, + &attr->statp, sizeof(attr->statp), &attr->LinkFI); + + if (bvfs_is_dir(row) || bvfs_is_file(row)) { + /* display clean stuffs */ + + if (bvfs_is_dir(row)) { + pm_strcpy(attr->ofname, bvfs_basename_dir(row[BVFS_Name])); + } else { + /* if we see the requested file, note his filenameid */ + if (bstrcmp(row[BVFS_Name], file)) { + fnid = str_to_int64(row[BVFS_FilenameId]); + } + pm_strcpy(attr->ofname, row[BVFS_Name]); + } + print_ls_output(vfs->get_jcr(), attr); + + } else { + Pmsg5(0, "JobId=%s FileId=%s\tMd5=%s\tVolName=%s\tVolInChanger=%s\n", + row[BVFS_JobId], row[BVFS_FileId], row[BVFS_Md5], row[BVFS_VolName], + row[BVFS_VolInchanger]); + + pm_strcpy(attr->ofname, file); + print_ls_output(vfs->get_jcr(), attr); + } + return 0; +} + + +/* number of thread started */ + +int main (int argc, char *argv[]) +{ + int ch; + char *jobids = (char *)"1"; + char *path=NULL, *client=NULL; + uint64_t limit=0; + bool clean=false; + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + + Dmsg0(0, "Starting bvfs_test tool\n"); + + my_name_is(argc, argv, "bvfs_test"); + init_msg(NULL, NULL); + + OSDependentInit(); + + while ((ch = getopt(argc, argv, "h:o:k:e:a:c:l:d:n:P:Su:vf:w:?j:p:f:T")) != -1) { + switch (ch) { + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + case 'l': + limit = str_to_int64(optarg); + break; + + case 'c': + client = optarg; + break; + + case 'h': + db_host = optarg; + break; + + case 'o': + db_ssl_mode = optarg; + break; + + case 'k': + db_ssl_key= optarg; + break; + + case 'e': + db_ssl_cert= optarg; + break; + + case 'a': + db_ssl_ca= optarg; + break; + + case 'n': + db_name = optarg; + break; + + case 'w': + working_directory = optarg; + break; + + case 'u': + db_user = optarg; + break; + + case 'P': + db_password = optarg; + break; + + case 'v': + verbose++; + break; + + case 'p': + path = optarg; + break; + + case 'f': + file = optarg; + break; + + case 'j': + jobids = optarg; + break; + + case 'T': + clean = true; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (argc != 0) { + Pmsg0(0, _("Wrong number of arguments: \n")); + usage(); + } + JCR *bjcr = new_jcr(sizeof(JCR), NULL); + bjcr->JobId = getpid(); + bjcr->setJobType(JT_CONSOLE); + bjcr->setJobLevel(L_FULL); + bjcr->JobStatus = JS_Running; + bjcr->client_name = get_pool_memory(PM_FNAME); + pm_strcpy(bjcr->client_name, "Dummy.Client.Name"); + bstrncpy(bjcr->Job, "bvfs_test", sizeof(bjcr->Job)); + + if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password, + db_host, 0, NULL, + db_ssl_mode, db_ssl_key, + db_ssl_cert, db_ssl_ca, + db_ssl_capath, db_ssl_cipher, + false, false)) == NULL) { + Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n")); + } + Dmsg1(0, "db_type=%s\n", db_get_engine_name(db)); + + if (!db_open_database(NULL, db)) { + Emsg0(M_ERROR_TERM, 0, db_strerror(db)); + } + Dmsg0(200, "Database opened\n"); + if (verbose) { + Pmsg2(000, _("Using Database: %s, User: %s\n"), db_name, db_user); + } + + bjcr->db = db; + + if (clean) { + Pmsg0(0, "Clean old table\n"); + db_sql_query(db, "DELETE FROM PathHierarchy", NULL, NULL); + db_sql_query(db, "UPDATE Job SET HasCache=0", NULL, NULL); + db_sql_query(db, "DELETE FROM PathVisibility", NULL, NULL); + bvfs_update_cache(bjcr, db); + } + + Bvfs fs(bjcr, db); + fs.set_handler(result_handler, &fs); + + fs.set_jobids(jobids); + fs.update_cache(); + if (limit) + fs.set_limit(limit); + + if (path) { + fs.ch_dir(path); + fs.ls_special_dirs(); + fs.ls_dirs(); + while (fs.ls_files()) { + fs.next_offset(); + } + + if (fnid && client) { + alist clients(1, not_owned_by_alist); + clients.append(client); + Pmsg0(0, "---------------------------------------------\n"); + Pmsg1(0, "Getting file version for %s\n", file); + fs.get_all_file_versions(fs.get_pwd(), fnid, &clients); + } + + exit (0); + } + + + Pmsg0(0, "list /\n"); + fs.ch_dir("/"); + fs.ls_special_dirs(); + fs.ls_dirs(); + fs.ls_files(); + + Pmsg0(0, "list /tmp/\n"); + fs.ch_dir("/tmp/"); + fs.ls_special_dirs(); + fs.ls_dirs(); + fs.ls_files(); + + Pmsg0(0, "list /tmp/regress/\n"); + fs.ch_dir("/tmp/regress/"); + fs.ls_special_dirs(); + fs.ls_files(); + fs.ls_dirs(); + + Pmsg0(0, "list /tmp/regress/build/\n"); + fs.ch_dir("/tmp/regress/build/"); + fs.ls_special_dirs(); + fs.ls_dirs(); + fs.ls_files(); + + fs.get_all_file_versions(1, 347, (char*)"zog4-fd"); + + char p[200]; + strcpy(p, "/tmp/toto/rep/"); + bvfs_parent_dir(p); + if(strcmp(p, "/tmp/toto/")) { + Pmsg0(000, "Error in bvfs_parent_dir\n"); + } + bvfs_parent_dir(p); + if(strcmp(p, "/tmp/")) { + Pmsg0(000, "Error in bvfs_parent_dir\n"); + } + bvfs_parent_dir(p); + if(strcmp(p, "/")) { + Pmsg0(000, "Error in bvfs_parent_dir\n"); + } + bvfs_parent_dir(p); + if(strcmp(p, "")) { + Pmsg0(000, "Error in bvfs_parent_dir\n"); + } + bvfs_parent_dir(p); + if(strcmp(p, "")) { + Pmsg0(000, "Error in bvfs_parent_dir\n"); + } + + return 0; +} diff --git a/src/tools/bwild.c b/src/tools/bwild.c new file mode 100644 index 00000000..3ebfa917 --- /dev/null +++ b/src/tools/bwild.c @@ -0,0 +1,132 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Test program for testing wild card expressions + * + * Kern Sibbald, MMVI + * + */ + +#include "bacula.h" +#include "lib/fnmatch.h" + +static void usage() +{ + fprintf(stderr, +"\n" +"Usage: bwild [-d debug_level] -f \n" +" -f specify file of data to be matched\n" +" -i use case insenitive match\n" +" -l suppress line numbers\n" +" -n print lines that do not match\n" +" -? print this message.\n" +"\n\n"); + + exit(1); +} + +int main(int argc, char *const *argv) +{ + char *fname = NULL; + int rc, ch; + char data[1000]; + char pat[500]; + FILE *fd; + bool match_only = true; + int lineno; + bool no_linenos = false; + int ic = 0; + + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + while ((ch = getopt(argc, argv, "d:f:iln?")) != -1) { + switch (ch) { + case 'd': /* set debug level */ + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + break; + + case 'f': /* data */ + fname = optarg; + break; + + case 'i': /* ignore case */ + ic = FNM_CASEFOLD; + break; + + case 'l': + no_linenos = true; + break; + + case 'n': + match_only = false; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (!fname) { + printf("A data file must be specified.\n"); + usage(); + } + + OSDependentInit(); + + for ( ;; ) { + printf("Enter a wild-card: "); + if (fgets(pat, sizeof(pat)-1, stdin) == NULL) { + break; + } + strip_trailing_newline(pat); + if (pat[0] == 0) { + exit(0); + } + fd = fopen(fname, "r"); + if (!fd) { + printf(_("Could not open data file: %s\n"), fname); + exit(1); + } + lineno = 0; + while (fgets(data, sizeof(data)-1, fd)) { + strip_trailing_newline(data); + lineno++; + rc = fnmatch(pat, data, ic); + if ((match_only && rc == 0) || (!match_only && rc != 0)) { + if (no_linenos) { + printf("%s\n", data); + } else { + printf("%5d: %s\n", lineno, data); + } + } + } + fclose(fd); + } + exit(0); +} diff --git a/src/tools/cats_test.c b/src/tools/cats_test.c new file mode 100644 index 00000000..55871549 --- /dev/null +++ b/src/tools/cats_test.c @@ -0,0 +1,702 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Program to test CATS DB routines + * + * + */ +#define _BDB_PRIV_INTERFACE_ + +#include "bacula.h" +#include "cats/cats.h" +#include "cats/bvfs.h" +#include "findlib/find.h" + +/* Local variables */ +static BDB *db; +static const char *file = "COPYRIGHT"; +//static DBId_t fnid=0; +static const char *db_name = "bacula"; +static const char *db_user = "bacula"; +static const char *db_password = ""; +static const char *db_host = NULL; +static const char *db_address = NULL; +static int db_port = 0; +static int64_t pid = 0; +static JCR *jcr=NULL; + +#define PLINE "\n============================================================\n" +static void usage() +{ + fprintf(stderr, _( +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -n specify the database name (default bacula)\n" +" -u specify database user name (default bacula)\n" +" -P specify database host (default NULL)\n" +" -w specify working directory\n" +" -p specify path\n" +" -f specify file\n" +" -l maximum tuple to fetch\n" +" -q print only errors\n" +" -v verbose\n" +" -? print this message\n\n"), 2011, "", VERSION, BDATE); + exit(1); +} + +bool print_ok=true; +int _err=0; +int _wrn=0; +int _nb=0; + +bool _warn(const char *file, int l, const char *op, int value, const char *label) +{ + bool ret=false; + _nb++; + if (!value) { + _wrn++; + printf("WRN %.30s %s:%i on %s\n", label, file, l, op); + } else { + ret=true; + printf("OK %.30s\n", label); + } + return ret; +} + +#define warn(x, label) _warn(__FILE__, __LINE__, #x, (x), label) + +bool _ok(const char *file, int l, const char *op, int value, const char *label) +{ + bool ret=false; + _nb++; + if (!value) { + _err++; + printf("ERR %.30s %s:%i on %s\n", label, file, l, op); + } else { + ret=true; + if (print_ok) { + printf("OK %.30s\n", label); + } + } + return ret; +} + +#define ok(x, label) _ok(__FILE__, __LINE__, #x, (x), label) + +bool _nok(const char *file, int l, const char *op, int value, const char *label) +{ + bool ret=false; + _nb++; + if (value) { + _err++; + printf("ERR %.30s %s:%i on !%s\n", label, file, l, op); + } else { + ret = true; + if (print_ok) { + printf("OK %.30s\n", label); + } + } + return ret; +} + +#define nok(x, label) _nok(__FILE__, __LINE__, #x, (x), label) + +int report() +{ + printf("Result %i/%i OK\n", _nb - _err, _nb); + return _err>0; +} + +static void cmp_pool(POOL_DBR &pr, POOL_DBR &pr2) +{ + ok(pr.MaxVols == pr2.MaxVols, " Check Pool MaxVols"); + ok(pr.UseOnce == pr2.UseOnce, " Check Pool UseOnce"); + ok(pr.UseCatalog == pr2.UseCatalog, " Check Pool UseCatalog"); + ok(pr.AcceptAnyVolume == pr2.AcceptAnyVolume," Check Pool AcceptAnyVolume"); + ok(pr.AutoPrune == pr2.AutoPrune, " Check Pool AutoPrune"); + ok(pr.Recycle == pr2.Recycle, " Check Pool Recycle"); + ok(pr.VolRetention == pr2.VolRetention , " Check Pool VolRetention"); + ok(pr.VolUseDuration == pr2.VolUseDuration, " Check Pool VolUseDuration"); + ok(pr.MaxVolJobs == pr2.MaxVolJobs, " Check Pool MaxVolJobs"); + ok(pr.MaxVolFiles == pr2.MaxVolFiles, " Check Pool MaxVolFiles"); + ok(pr.MaxVolBytes == pr2.MaxVolBytes, " Check Pool MaxVolBytes"); + ok(!strcmp(pr.PoolType, pr2.PoolType), " Check Pool PoolType"); + ok(pr.LabelType == pr2.LabelType, " Check Pool LabelType"); + ok(!strcmp(pr.LabelFormat, pr2.LabelFormat), " Check Pool LabelFormat"); + ok(pr.RecyclePoolId == pr2.RecyclePoolId, " Check Pool RecyclePoolId"); + ok(pr.ScratchPoolId == pr2.ScratchPoolId, " Check Pool ScratchPoolId"); + ok(pr.ActionOnPurge == pr2.ActionOnPurge, " Check Pool ActionOnPurge"); +} + +static void cmp_client(CLIENT_DBR &cr, CLIENT_DBR &cr2) +{ + ok(!strcmp(cr2.Name, cr.Name), " Check Client Name"); + ok(!strcmp(cr2.Uname, cr.Uname), " Check Client Uname"); + ok(cr.AutoPrune == cr2.AutoPrune, " Check Client Autoprune"); + ok(cr.JobRetention == cr2.JobRetention, " Check Client JobRetention"); + ok(cr.FileRetention == cr2.FileRetention," Check Client FileRetention"); +} + +static void cmp_job(JOB_DBR &jr, JOB_DBR &jr2) +{ + ok(jr.VolSessionId == jr2.VolSessionId, " Check VolSessionId"); + ok(jr.VolSessionTime == jr2.VolSessionTime, " Check VolSessionTime"); + ok(jr.PoolId == jr2.PoolId, " Check PoolId"); + ok(jr.StartTime == jr2.StartTime, " Check StartTime"); + ok(jr.EndTime == jr2.EndTime, " Check EndTime"); + ok(jr.JobFiles == jr2.JobFiles, " Check JobFiles"); + ok(jr.JobBytes == jr2.JobBytes, " Check JobBytes"); + ok(jr.JobTDate == jr2.JobTDate, " Check JobTDate"); + ok(!strcmp(jr.Job, jr2.Job), " Check Job"); + ok(jr.JobStatus == jr2.JobStatus, " Check JobStatus"); + ok(jr.JobType == jr2.JobType, " Check Type"); + ok(jr.JobLevel == jr2.JobLevel, " Check Level"); + ok(jr.ClientId == jr2.ClientId, " Check ClientId"); + ok(!strcmp(jr.Name, jr2.Name), " Check Name"); + ok(jr.PriorJobId == jr2.PriorJobId, " Check PriorJobId"); + ok(jr.RealEndTime == jr2.RealEndTime, " Check RealEndTime"); + ok(jr.JobId == jr2.JobId, " Check JobId"); + ok(jr.FileSetId == jr2.FileSetId, " Check FileSetId"); + ok(jr.SchedTime == jr2.SchedTime, " Check SchedTime"); + ok(jr.RealEndTime == jr2.RealEndTime, " Check RealEndTime"); + ok(jr.ReadBytes == jr2.ReadBytes, " Check ReadBytes"); + ok(jr.HasBase == jr2.HasBase, " Check HasBase"); + ok(jr.PurgedFiles == jr2.PurgedFiles, " Check PurgedFiles"); +} + + +#define aPATH "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +#define aFILE "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + +static int list_files(void *ctx, int nb_col, char **row) +{ + uint32_t *k = (uint32_t*) ctx; + (*k)++; + ok(nb_col > 4, "Check result columns"); + ok(!strcmp(row[0], aPATH aPATH aPATH aPATH "/"), "Check path"); + ok(!strcmp(row[1], aFILE aFILE ".txt"), "Check filename"); + ok(str_to_int64(row[2]) == 10, "Check FileIndex"); + ok(str_to_int64(row[3]) == jcr->JobId, "Check JobId"); + return 1; +} + +static int count_col(void *ctx, int nb_col, char **row) +{ + *((int32_t*) ctx) = nb_col; + return 1; +} + +/* number of thread started */ + +int main (int argc, char *argv[]) +{ + int ch; + char *path=NULL, *client=NULL; + uint64_t limit=0; + bool clean=false; + bool full_test=false; + int dbtype; + uint32_t j; + char temp[20]; + POOLMEM *buf = get_pool_memory(PM_FNAME); + POOLMEM *buf2 = get_pool_memory(PM_FNAME); + POOLMEM *buf3 = get_pool_memory(PM_FNAME); + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + init_stack_dump(); + pid = getpid(); + + Pmsg0(0, "Starting cats_test tool" PLINE); + + my_name_is(argc, argv, ""); + init_msg(NULL, NULL); + + OSDependentInit(); + + while ((ch = getopt(argc, argv, "qh:c:l:d:n:P:Su:vFw:?p:f:T")) != -1) { + switch (ch) { + case 'q': + print_ok = false; + break; + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + case 'l': + limit = str_to_int64(optarg); + break; + + case 'c': + client = optarg; + break; + + case 'h': + db_host = optarg; + break; + + case 'n': + db_name = optarg; + break; + + case 'w': + working_directory = optarg; + break; + + case 'u': + db_user = optarg; + break; + + case 'P': + db_password = optarg; + break; + + case 'v': + verbose++; + break; + + case 'p': + path = optarg; + break; + + case 'F': + full_test = true; + break; + + case 'f': + file = optarg; + break; + + case 'T': + clean = true; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + if (argc != 0) { + Pmsg0(0, _("Wrong number of arguments: \n")); + usage(); + } + + /* TODO: + * - Open DB + * - With errors + * - With good info + * - With multiple thread in // + * - Test cats.h + * - Test all sql_cmds.c + * - Test all sql.c (db_) + * - Test all sql_create.c + * - Test db_handler + */ + + jcr = new_jcr(sizeof(JCR), NULL); + jcr->setJobType(JT_CONSOLE); + jcr->setJobLevel(L_NONE); + jcr->JobStatus = JS_Running; + bstrncpy(jcr->Job, "**dummy**", sizeof(jcr->Job)); + jcr->JobId = pid; /* this is JobId on tape */ + jcr->start_time = jcr->sched_time = time(NULL); + + /* Test DB connexion */ + Pmsg1(0, PLINE "Test DB connection \"%s\"" PLINE, db_name); + + if (full_test) { + db = db_init_database(jcr /* JCR */, + NULL /* dbi driver */, + db_name, db_user, db_password, db_address, db_port + 100, + NULL /* db_socket */, + db_ssl_mode, db_ssl_key, db_ssl_cert, + db_ssl_ca, db_ssl_capath, db_ssl_cipher, + 0 /* mult_db_connections */, false); + ok(db != NULL, "Test bad connection"); + if (!db) { + report(); + exit (1); + } + nok(db_open_database(jcr, db), "Open bad Database"); + db_close_database(jcr, db); + } + + db = db_init_database(jcr /* JCR */, + NULL /* dbi driver */, + db_name, db_user, db_password, db_address, db_port, + NULL /* db_socket */, + db_ssl_mode, db_ssl_key, db_ssl_cert, + db_ssl_ca, db_ssl_capath, db_ssl_cipher, + false /* mult_db_connections */, false); + + ok(db != NULL, "Test db connection"); + if (!db) { + report(); + exit (1); + } + if (!ok(db_open_database(jcr, db), "Open Database")) { + Pmsg1(000, _("Could not open database \"%s\".\n"), db_name); + Jmsg(jcr, M_FATAL, 0, _("Could not open, database \"%s\".\n"), db_name); + Jmsg(jcr, M_FATAL, 0, _("%s"), db_strerror(db)); + Pmsg1(000, "%s", db_strerror(db)); + db_close_database(jcr, db); + report(); + exit (1); + } + dbtype = db_get_type_index(db); + + + /* Check if the SQL library is thread-safe */ + ok(check_tables_version(jcr, db), "Check table version"); + ok(db_sql_query(db, "SELECT VersionId FROM Version", + db_int_handler, &j), "SELECT VersionId"); + + ok(UPDATE_DB(jcr, db, (char*)"UPDATE Version SET VersionId = 1"), + "Update VersionId"); + nok(check_tables_version(jcr, db), "Check table version"); + Mmsg(buf, "UPDATE Version SET VersionId = %d", j); + ok(UPDATE_DB(jcr, db, buf), "Restore VersionId"); + + if (dbtype != SQL_TYPE_SQLITE3) { + ok(db_check_max_connections(jcr, db, 1), "Test min Max Connexion"); + nok(db_check_max_connections(jcr, db, 10000), "Test max Max Connexion"); + } + + ok(db_open_batch_connexion(jcr, db), "Opening batch connection"); + db_close_database(jcr, jcr->db_batch); + jcr->db_batch = NULL; + + /* ---------------------------------------------------------------- */ + + uint32_t storageid=0; + ok(db_sql_query(db, "SELECT MIN(StorageId) FROM Storage", + db_int_handler, &storageid), "Get StorageId"); + ok(storageid > 0, "Check StorageId"); + if (!storageid) { + Pmsg0(0, "Please, run REGRESS_DEBUG=1 tests/bacula-backup-test before this test"); + exit (1); + } + + /* ---------------------------------------------------------------- */ + Pmsg0(0, PLINE "Doing Basic SQL tests" PLINE); + ok(db_sql_query(db, "SELECT 1,2,3,4,5", count_col, &j), "Count 5 rows"); + ok(j == 5, "Check number of columns"); + ok(db_sql_query(db, "SELECT 1,2,3,4,5,'a','b','c','d','e'", + count_col, &j), "Count 10 rows"); + ok(j == 10, "Check number of columns"); + + bsnprintf(temp, sizeof(temp), "t%lld", pid); + ok(db_sql_query(db, "SELECT 2", db_int_handler, &j), "Good SELECT query"); + ok(db_sql_query(db, "SELECT 1 FROM Media WHERE VolumeName='missing'", + db_int_handler, &j), "Good empty SELECT query"); + + db_int64_ctx i64; + i64.value = 0; i64.count = 0; + ok(db_sql_query(db, "SELECT 1",db_int64_handler, &i64),"db_int64_handler"); + ok(i64.value == 1, "Check db_int64_handler return"); + + db_list_ctx lctx; + ok(db_sql_query(db, "SELECT FileId FROM File ORDER By FileId LIMIT 10", + db_list_handler, &lctx), "db_list_ctx"); + ok(lctx.count == 10, "Check db_list_ctx count "); + ok(!strcmp(lctx.list, "1,2,3,4,5,6,7,8,9,10"), "Check db_list_ctx list"); + + nok(db_sql_query(db, "blabla", db_int_handler, &j), "Bad query"); + + Mmsg(buf, "CREATE Table %s (a int)", temp); + ok(db_sql_query(db, buf, NULL, NULL), "CREATE query"); + + Mmsg(buf, "INSERT INTO %s (a) VALUES (1)", temp); + ok(INSERT_DB(jcr, db, buf), "INSERT query"); + ok(INSERT_DB(jcr, db, buf), "INSERT query"); + ok(sql_affected_rows(db) == 1, "Check sql_affected_rows"); + + Mmsg(buf, "INSERT INTO aaa%s (a) VALUES (1)", temp); + nok(INSERT_DB(jcr, db, buf), "Bad INSERT query"); + ok(sql_affected_rows(db) == 0, "Check sql_affected_rows"); + + Mmsg(buf, "UPDATE %s SET a = 2", temp); + ok(UPDATE_DB(jcr, db, buf), "UPDATE query"); + ok(sql_affected_rows(db) == 2, "Check sql_affected_rows"); + + Mmsg(buf, "UPDATE %s SET a = 2 WHERE a = 1", temp); + nok(UPDATE_DB(jcr, db, buf), "Empty UPDATE query"); + + Mmsg(buf, "UPDATE aaa%s SET a = 2", temp); + nok(UPDATE_DB(jcr, db, buf), "Bad UPDATE query"); + + Mmsg(buf, "DELETE FROM %s", temp); + ok(DELETE_DB(jcr, db, buf), "DELETE query"); + nok(DELETE_DB(jcr, db, buf), "Empty DELETE query"); /* TODO bug ? */ + + Mmsg(buf, "DELETE FROM aaa%s", temp); + ok(DELETE_DB(jcr, db, buf), "Bad DELETE query"); /* TODO bug ? */ + + Mmsg(buf, "DROP TABLE %s", temp); + ok(QUERY_DB(jcr, db, buf), "DROP query"); + nok(QUERY_DB(jcr, db, buf), "Empty DROP query"); + + /* ---------------------------------------------------------------- */ + + strcpy(buf, "This string should be 'escaped'"); + db_escape_string(jcr, db, buf2, buf, strlen(buf)); + ok((strlen(buf) + 2) == strlen(buf2),"Quoted string should be longer"); + Mmsg(buf, "INSERT INTO Path (Path) VALUES ('%lld-%s')", pid, buf2); + ok(db_sql_query(db, buf, NULL, NULL), "Inserting quoted string"); + + /* ---------------------------------------------------------------- */ + Pmsg0(0, PLINE "Doing Job tests" PLINE); + + JOB_DBR jr, jr2; + memset(&jr, 0, sizeof(jr)); + memset(&jr2, 0, sizeof(jr2)); + jr.JobId = 1; + ok(db_get_job_record(jcr, db, &jr), "Get Job record for JobId=1"); + ok(jr.JobFiles > 10, "Check number of files"); + + jr.JobId = (JobId_t)pid; + Mmsg(buf, "%s-%lld", jr.Job, pid); + strcpy(jr.Job, buf); + ok(db_create_job_record(jcr, db, &jr), "Create Job record"); + ok(db_update_job_start_record(jcr, db, &jr), "Update Start Record"); + ok(db_update_job_end_record(jcr, db, &jr), "Update End Record"); + jr2.JobId = jr.JobId; + ok(db_get_job_record(jcr, db, &jr2), "Get Job record by JobId"); + cmp_job(jr, jr2); + + memset(&jr2, 0, sizeof(jr2)); + strcpy(jr2.Job, jr.Job); + ok(db_get_job_record(jcr, db, &jr2), "Get Job record by Job name"); + cmp_job(jr, jr2); + + memset(&jr2, 0, sizeof(jr2)); + jr2.JobId = 99999; + nok(db_get_job_record(jcr, db, &jr2), "Get non existing Job record (JobId)"); + + memset(&jr2, 0, sizeof(jr2)); + strcpy(jr2.Job, "test"); + nok(db_get_job_record(jcr, db, &jr2), "Get non existing Job record (Job)"); + + /* ---------------------------------------------------------------- */ + + ATTR_DBR ar; + memset(&ar, 0, sizeof(ar)); + Mmsg(buf2, aPATH aPATH aPATH aPATH "/" aFILE aFILE ".txt"); + ar.fname = buf2; + Mmsg(buf3, "gD ImIZ IGk B Po Po A A A JY BNNvf5 BNKzS7 BNNuwC A A C"); + ar.attr = buf3; + ar.FileIndex = 10; + ar.Stream = STREAM_UNIX_ATTRIBUTES; + ar.FileType = FT_REG; + jcr->JobId = ar.JobId = jr.JobId; + jcr->JobStatus = JS_Running; + ok(db_create_attributes_record(jcr, db, &ar), "Inserting Filename"); + ok(db_write_batch_file_records(jcr), "Commit batch session"); + Mmsg(buf, "SELECT FileIndex FROM File WHERE JobId=%lld",(int64_t)jcr->JobId); + ok(db_sql_query(db, buf, db_int_handler, &j), "Get Inserted record"); + ok(j == ar.FileIndex, "Check FileIndex"); + Mmsg(buf, "SELECT COUNT(1) FROM File WHERE JobId=%lld",(int64_t)jcr->JobId); + ok(db_sql_query(db, buf, db_int_handler, &j), "List records"); + ok(j == 1, "Check batch session records"); + j = 0; + Mmsg(buf, "%lld", (uint64_t)jcr->JobId); + ok(db_get_file_list(jcr, jcr->db_batch, buf, false, false, list_files, &j), + "List files with db_get_file_list()"); + ok(j == 1, "Check db_get_file_list results"); + /* ---------------------------------------------------------------- */ + + Pmsg0(0, PLINE "Doing Client tests" PLINE); + CLIENT_DBR cr, cr2; + memset(&cr, 0, sizeof(cr)); + memset(&cr2, 0, sizeof(cr2)); + + cr.AutoPrune = 1; + cr.FileRetention = 10; + cr.JobRetention = 15; + bsnprintf(cr.Name, sizeof(cr.Name), "client-%lld-fd", pid); + bsnprintf(cr.Uname, sizeof(cr.Uname), "uname-%lld", pid); + + ok(db_create_client_record(jcr, db, &cr), "db_create_client_record()"); + ok(cr.ClientId > 0, "Check ClientId"); + + cr2.ClientId = cr.ClientId; /* Save it */ + cr.ClientId = 0; + + Pmsg0(0, "Search client by ClientId\n"); + ok(db_create_client_record(jcr, db, &cr),"Should get the client record"); + ok(cr.ClientId == cr2.ClientId, "Check if ClientId is the same"); + + ok(db_get_client_record(jcr, db, &cr2), "Search client by ClientId"); + cmp_client(cr, cr2); + + Pmsg0(0, "Search client by Name\n"); + memset(&cr2, 0, sizeof(cr2)); + strcpy(cr2.Name, cr.Name); + ok(db_get_client_record(jcr, db, &cr2),"Search client by Name"); + cmp_client(cr, cr2); + + Pmsg0(0, "Search non existing client by Name\n"); + memset(&cr2, 0, sizeof(cr2)); + bsnprintf(cr2.Name, sizeof(cr2.Name), "hollow-client-%lld-fd", pid); + nok(db_get_client_record(jcr, db, &cr2), "Search non existing client"); + ok(cr2.ClientId == 0, "Check ClientId after failed search"); + + cr.AutoPrune = 0; + strcpy(cr.Uname, "NewUname"); + ok(db_update_client_record(jcr, db, &cr), "Update Client record"); + memset(&cr2, 0, sizeof(cr2)); + cr2.ClientId = cr.ClientId; + ok(db_get_client_record(jcr, db, &cr2),"Search client by ClientId"); + cmp_client(cr, cr2); + + int nb, i; + uint32_t *ret_ids; + ok(db_get_client_ids(jcr, db, &nb, &ret_ids), "Get Client Ids"); + ok(nb > 0, "Should find at least 1 Id"); + for (i = 0; i < nb; i++) { + if (ret_ids[i] == cr2.ClientId) { + break; + } + } + ok(i < nb, "Check if ClientId was found"); + + /* ---------------------------------------------------------------- */ + Pmsg0(0, PLINE "Doing Pool tests" PLINE); + POOL_DBR pr, pr2; + memset(&pr, 0, sizeof(pr)); + memset(&pr2, 0, sizeof(pr2)); + + bsnprintf(pr.Name, sizeof(pr.Name), "pool-%lld", pid); + pr.MaxVols = 10; + pr.UseOnce = 0; + pr.UseCatalog = true; + pr.AcceptAnyVolume = true; + pr.AutoPrune = true; + pr.Recycle = true; + pr.VolRetention = 1000; + pr.VolUseDuration = 1000; + pr.MaxVolJobs = 100; + pr.MaxVolFiles = 1000; + pr.MaxVolBytes = 1000000; + strcpy(pr.PoolType, "Backup"); + pr.LabelType = 0; + pr.LabelFormat[0] = 0; + pr.RecyclePoolId = 0; + pr.ScratchPoolId = 0; + pr.ActionOnPurge = 1; + + ok(db_create_pool_record(jcr, db, &pr), "db_create_pool_record()"); + ok(pr.PoolId > 0, "Check PoolId"); + + pr2.PoolId = pr.PoolId; + pr.PoolId = 0; + + Pmsg0(0, "Search pool by PoolId\n"); + nok(db_create_pool_record(jcr, db, &pr),"Can't create pool twice"); + ok(db_get_pool_numvols(jcr, db, &pr2), "Search pool by PoolId"); + cmp_pool(pr, pr2); + + pr2.MaxVols++; + pr2.AutoPrune = false; + pr2.Recycle = false; + pr2.VolRetention++; + pr2.VolUseDuration++; + pr2.MaxVolJobs++; + pr2.MaxVolFiles++; + pr2.MaxVolBytes++; + strcpy(pr2.PoolType, "Restore"); + strcpy(pr2.LabelFormat, "VolFormat"); + pr2.RecyclePoolId = 0; + pr2.ScratchPoolId = 0; + pr2.ActionOnPurge = 2; + + ok(db_update_pool_record(jcr, db, &pr2), "Update Pool record"); + memset(&pr, 0, sizeof(pr)); + pr.PoolId = pr2.PoolId; + ok(db_get_pool_numvols(jcr, db, &pr), "Search pool by PoolId"); + cmp_pool(pr, pr2); + + ok(db_delete_pool_record(jcr, db, &pr), "Delete Pool"); + nok(db_delete_pool_record(jcr, db, &pr), "Delete non existing Pool"); + nok(db_update_pool_record(jcr, db, &pr), "Update non existing Pool"); + ok(db_create_pool_record(jcr, db, &pr), "Recreate Pool"); + + /* ---------------------------------------------------------------- */ + Pmsg0(0, PLINE "Doing Media tests" PLINE); + + MEDIA_DBR mr, mr2; + memset(&mr, 0, sizeof(mr)); + memset(&mr2, 0, sizeof(mr2)); + + bsnprintf(mr.VolumeName, sizeof(mr.VolumeName), "media-%lld", pid); + bsnprintf(mr.MediaType, sizeof(mr.MediaType), "type-%lld", pid); + + /* from set_pool_dbr_defaults_in_media_dbr(&mr, &pr); */ + mr.PoolId = pr.PoolId; + bstrncpy(mr.VolStatus, NT_("Append"), sizeof(mr.VolStatus)); + mr.Recycle = pr.Recycle; + mr.VolRetention = pr.VolRetention; + mr.VolUseDuration = pr.VolUseDuration; + mr.ActionOnPurge = pr.ActionOnPurge; + mr.RecyclePoolId = pr.RecyclePoolId; + mr.MaxVolJobs = pr.MaxVolJobs; + mr.MaxVolFiles = pr.MaxVolFiles; + mr.MaxVolBytes = pr.MaxVolBytes; + mr.LabelType = pr.LabelType; + mr.Enabled = 1; + + mr.VolCapacityBytes = 1000; + mr.Slot = 1; + mr.VolBytes = 1000; + mr.InChanger = 1; + mr.VolReadTime = 10000; + mr.VolWriteTime = 99999; + mr.StorageId = 0; + mr.DeviceId = 0; + mr.LocationId = 0; + mr.ScratchPoolId = 0; + mr.RecyclePoolId = 0; + + ok(db_create_media_record(jcr, db, &mr), "Create Media"); + nok(db_create_media_record(jcr, db, &mr), "Create Media twice"); + + /* ---------------------------------------------------------------- */ + Pmsg0(0, PLINE "Doing ... tests" PLINE); + + db_close_database(jcr, db); + report(); + free_pool_memory(buf); + free_pool_memory(buf2); + free_pool_memory(buf3); + return 0; +} diff --git a/src/tools/dbcheck.c b/src/tools/dbcheck.c new file mode 100644 index 00000000..433919b2 --- /dev/null +++ b/src/tools/dbcheck.c @@ -0,0 +1,1501 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Program to check a Bacula database for consistency and to + * make repairs + * + * Kern E. Sibbald, August 2002 + * + */ + +#include "bacula.h" +#include "cats/cats.h" +#include "lib/runscript.h" +#include "dird/dird_conf.h" + +extern bool parse_dir_config(CONFIG *config, const char *configfile, int exit_code); + +typedef struct s_id_ctx { + int64_t *Id; /* ids to be modified */ + int num_ids; /* ids stored */ + int max_ids; /* size of array */ + int num_del; /* number deleted */ + int tot_ids; /* total to process */ +} ID_LIST; + +typedef struct s_name_ctx { + char **name; /* list of names */ + int num_ids; /* ids stored */ + int max_ids; /* size of array */ + int num_del; /* number deleted */ + int tot_ids; /* total to process */ +} NAME_LIST; + +/* Global variables */ + +static bool fix = false; +static bool batch = false; +static BDB *db; +static ID_LIST id_list; +static NAME_LIST name_list; +static char buf[20000]; +static bool quit = false; +static CONFIG *config; +static const char *idx_tmp_name; + +#define MAX_ID_LIST_LEN 10000000 + +/* Forward referenced functions */ +static int make_id_list(const char *query, ID_LIST *id_list); +static int delete_id_list(const char *query, ID_LIST *id_list); +static int make_name_list(const char *query, NAME_LIST *name_list); +static void print_name_list(NAME_LIST *name_list); +static void free_name_list(NAME_LIST *name_list); +static char *get_cmd(const char *prompt); +static void eliminate_duplicate_filenames(); +static void eliminate_duplicate_paths(); +static void eliminate_orphaned_jobmedia_records(); +static void eliminate_orphaned_file_records(); +static void eliminate_orphaned_path_records(); +static void eliminate_orphaned_filename_records(); +static void eliminate_orphaned_fileset_records(); +static void eliminate_orphaned_client_records(); +static void eliminate_orphaned_job_records(); +static void eliminate_admin_records(); +static void eliminate_restore_records(); +static void eliminate_verify_records(); +static void repair_bad_paths(); +static void repair_bad_filenames(); +static void do_interactive_mode(); +static bool yes_no(const char *prompt); +static bool check_idx(const char *col_name); +static bool create_tmp_idx(const char *idx_name, const char *table_name, + const char *col_name); +static bool drop_tmp_idx(const char *idx_name, const char *table_name); +static int check_idx_handler(void *ctx, int num_fields, char **row); + +static void usage() +{ + fprintf(stderr, +PROG_COPYRIGHT +"\n%sVersion: %s (%s)\n\n" +"Usage: dbcheck [-c config ] [-B] [-C catalog name] [-d debug_level] [] [] [] [] [] [] []\n" +" -b batch mode\n" +" -C catalog name in the director conf file\n" +" -c Director conf filename\n" +" -B print catalog configuration and exit\n" +" -d set debug level to \n" +" -dt print a timestamp in debug output\n" +" -f fix inconsistencies\n" +" -t test if client library is thread-safe\n" +" -v verbose\n" +" -? print this message\n" +"\n", 2002, "", VERSION, BDATE); + + exit(1); +} + +int main (int argc, char *argv[]) +{ + int ch; + const char *user, *password, *db_name, *dbhost; + const char *dbsslmode = NULL, *dbsslkey = NULL, *dbsslcert = NULL, *dbsslca = NULL; + const char *dbsslcapath = NULL, *dbsslcipher = NULL; + int dbport = 0; + bool print_catalog=false; + char *configfile = NULL; + char *catalogname = NULL; + char *endptr; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + lmgr_init_thread(); + + my_name_is(argc, argv, "dbcheck"); + init_msg(NULL, NULL); /* setup message handler */ + + memset(&id_list, 0, sizeof(id_list)); + memset(&name_list, 0, sizeof(name_list)); + + while ((ch = getopt(argc, argv, "bc:C:d:fvB?")) != -1) { + switch (ch) { + case 'B': + print_catalog = true; /* get catalog information from config */ + break; + case 'b': /* batch */ + batch = true; + break; + case 'C': /* CatalogName */ + catalogname = optarg; + break; + case 'c': /* configfile */ + configfile = optarg; + break; + case 'd': /* debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + case 'f': /* fix inconsistencies */ + fix = true; + break; + case 'v': + verbose++; + break; + case '?': + default: + usage(); + } + } + argc -= optind; + argv += optind; + + OSDependentInit(); + + if (configfile) { + CAT *catalog = NULL; + int found = 0; + if (argc > 0) { + Pmsg0(0, _("Warning skipping the additional parameters for working directory/dbname/user/password/host.\n")); + } + config = New(CONFIG()); + parse_dir_config(config, configfile, M_ERROR_TERM); + LockRes(); + foreach_res(catalog, R_CATALOG) { + if (catalogname && !strcmp(catalog->hdr.name, catalogname)) { + ++found; + break; + } else if (!catalogname) { // stop on first if no catalogname is given + ++found; + break; + } + } + UnlockRes(); + if (!found) { + if (catalogname) { + Pmsg2(0, _("Error can not find the Catalog name[%s] in the given config file [%s]\n"), catalogname, configfile); + } else { + Pmsg1(0, _("Error there is no Catalog section in the given config file [%s]\n"), configfile); + } + exit(1); + } else { + DIRRES *director; + LockRes(); + director = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); + UnlockRes(); + if (!director) { + Pmsg0(0, _("Error no Director resource defined.\n")); + exit(1); + } + set_working_directory(director->working_directory); + + /* Print catalog information and exit (-B) */ + if (print_catalog) { + + POOLMEM *catalog_details = get_pool_memory(PM_MESSAGE); + db = db_init_database(NULL, catalog->db_driver, catalog->db_name, catalog->db_user, + catalog->db_password, catalog->db_address, + catalog->db_port, catalog->db_socket, + catalog->db_ssl_mode, + catalog->db_ssl_key, catalog->db_ssl_cert, + catalog->db_ssl_ca, + catalog->db_ssl_capath, catalog->db_ssl_cipher, + catalog->mult_db_connections, + catalog->disable_batch_insert); + if (db) { + printf("%sdb_type=%s\nworking_dir=%s\n", catalog->display(catalog_details), + db_get_engine_name(db), working_directory); + db_close_database(NULL, db); + } + free_pool_memory(catalog_details); + exit(0); + } + + db_name = catalog->db_name; + user = catalog->db_user; + password = catalog->db_password; + dbhost = catalog->db_address; + if (dbhost && dbhost[0] == 0) { + dbhost = NULL; + } + dbport = catalog->db_port; + dbsslmode = catalog->db_ssl_mode; + dbsslkey = catalog->db_ssl_key; + dbsslcert = catalog->db_ssl_cert; + dbsslca = catalog->db_ssl_ca; + dbsslcapath = catalog->db_ssl_capath; + dbsslcipher = catalog->db_ssl_cipher; + } + } else { + if (argc > 10) { + Pmsg0(0, _("Wrong number of arguments.\n")); + usage(); + } + + if (argc < 1) { + Pmsg0(0, _("Working directory not supplied.\n")); + usage(); + } + + /* This is needed by SQLite to find the db */ + working_directory = argv[0]; + db_name = "bacula"; + user = db_name; + password = ""; + dbhost = NULL; + + if (argc >= 2) { + db_name = argv[1]; + user = db_name; + if (argc >= 3) { + user = argv[2]; + if (argc >= 4) { + password = argv[3]; + if (argc >= 5) { + dbhost = argv[4]; + if (argc >= 6) { + errno = 0; + dbport = strtol(argv[5], &endptr, 10); + if (*endptr != '\0') { + Pmsg0(0, _("Database port must be a numeric value.\n")); + exit(1); + } else if (errno == ERANGE) { + Pmsg0(0, _("Database port must be a int value.\n")); + exit(1); + } + if (argc >= 7) { + dbsslmode = argv[6]; + if (argc >= 8) { + dbsslkey = argv[7]; + dbsslcert = argv[8]; + if (argc == 10) { + dbsslca = argv[9]; + } /* if (argc == 10) */ + } /* if (argc >= 8) */ + } /* if (argc >= 7) */ + } /* if (argc >= 6) */ + } /* if (argc >= 5) */ + } /* if (argc >= 4) */ + } /* if (argc >= 3) */ + } /* if (argc >= 2) */ + } + + /* Open database */ + db = db_init_database(NULL, NULL, db_name, user, password, dbhost, + dbport, NULL, dbsslmode, dbsslkey, dbsslcert, dbsslca, + dbsslcapath, dbsslcipher, false, false); + + if (!db || !db_open_database(NULL, db)) { + Emsg1(M_FATAL, 0, "%s", db_strerror(db)); + return 1; + } + + /* Drop temporary index idx_tmp_name if it already exists */ + drop_tmp_idx("idxPIchk", "File"); + + if (batch) { + repair_bad_paths(); + repair_bad_filenames(); + eliminate_duplicate_filenames(); + eliminate_duplicate_paths(); + eliminate_orphaned_jobmedia_records(); + eliminate_orphaned_file_records(); + eliminate_orphaned_path_records(); + eliminate_orphaned_filename_records(); + eliminate_orphaned_fileset_records(); + eliminate_orphaned_client_records(); + eliminate_orphaned_job_records(); + eliminate_admin_records(); + eliminate_restore_records(); + } else { + do_interactive_mode(); + } + + /* Drop temporary index idx_tmp_name */ + drop_tmp_idx("idxPIchk", "File"); + + if (db) db_close_database(NULL, db); + close_msg(NULL); + term_msg(); + lmgr_cleanup_main(); + return 0; +} + +void print_catalog_details(CAT *catalog, const char *working_dir) +{ + POOLMEM *catalog_details = get_pool_memory(PM_MESSAGE); + + /* + * Instantiate a BDB class and see what db_type gets assigned to it. + */ + db = db_init_database(NULL, catalog->db_driver, catalog->db_name, catalog->db_user, + catalog->db_password, catalog->db_address, + catalog->db_port, catalog->db_socket, + catalog->db_ssl_mode, catalog->db_ssl_key, + catalog->db_ssl_cert, catalog->db_ssl_ca, + catalog->db_ssl_capath, catalog->db_ssl_cipher, + catalog->mult_db_connections, + catalog->disable_batch_insert); + if (db) { + printf("%sdb_type=%s\nworking_dir=%s\n", catalog->display(catalog_details), + db_get_engine_name(db), working_directory); + db_close_database(NULL, db); + } + free_pool_memory(catalog_details); +} + +static void do_interactive_mode() +{ + const char *cmd; + + printf(_("Hello, this is the database check/correct program.\n")); + if (fix) + printf(_("Modify database is on.")); + else + printf(_("Modify database is off.")); + if (verbose) + printf(_(" Verbose is on.\n")); + else + printf(_(" Verbose is off.\n")); + + printf(_("Please select the function you want to perform.\n")); + + while (!quit) { + if (fix) { + printf(_("\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Repair bad Filename records\n" +" 4) Repair bad Path records\n" +" 5) Eliminate duplicate Filename records\n" +" 6) Eliminate duplicate Path records\n" +" 7) Eliminate orphaned Jobmedia records\n" +" 8) Eliminate orphaned File records\n" +" 9) Eliminate orphaned Path records\n" +" 10) Eliminate orphaned Filename records\n" +" 11) Eliminate orphaned FileSet records\n" +" 12) Eliminate orphaned Client records\n" +" 13) Eliminate orphaned Job records\n" +" 14) Eliminate all Admin records\n" +" 15) Eliminate all Restore records\n" +" 16) Eliminate all Verify records\n" +" 17) All (3-16)\n" +" 18) Quit\n")); + } else { + printf(_("\n" +" 1) Toggle modify database flag\n" +" 2) Toggle verbose flag\n" +" 3) Check for bad Filename records\n" +" 4) Check for bad Path records\n" +" 5) Check for duplicate Filename records\n" +" 6) Check for duplicate Path records\n" +" 7) Check for orphaned Jobmedia records\n" +" 8) Check for orphaned File records\n" +" 9) Check for orphaned Path records\n" +" 10) Check for orphaned Filename records\n" +" 11) Check for orphaned FileSet records\n" +" 12) Check for orphaned Client records\n" +" 13) Check for orphaned Job records\n" +" 14) Check for all Admin records\n" +" 15) Check for all Restore records\n" +" 16) Check for all Verify records\n" +" 17) All (3-16)\n" +" 18) Quit\n")); + } + + cmd = get_cmd(_("Select function number: ")); + if (cmd) { + int item = atoi(cmd); + switch (item) { + case 1: + fix = !fix; + if (fix) + printf(_("Database will be modified.\n")); + else + printf(_("Database will NOT be modified.\n")); + break; + case 2: + verbose = verbose?0:1; + if (verbose) + printf(_(" Verbose is on.\n")); + else + printf(_(" Verbose is off.\n")); + break; + case 3: + repair_bad_filenames(); + break; + case 4: + repair_bad_paths(); + break; + case 5: + eliminate_duplicate_filenames(); + break; + case 6: + eliminate_duplicate_paths(); + break; + case 7: + eliminate_orphaned_jobmedia_records(); + break; + case 8: + eliminate_orphaned_file_records(); + break; + case 9: + eliminate_orphaned_path_records(); + break; + case 10: + eliminate_orphaned_filename_records(); + break; + case 11: + eliminate_orphaned_fileset_records(); + break; + case 12: + eliminate_orphaned_client_records(); + break; + case 13: + eliminate_orphaned_job_records(); + break; + case 14: + eliminate_admin_records(); + break; + case 15: + eliminate_restore_records(); + break; + case 16: + eliminate_verify_records(); + break; + case 17: + repair_bad_filenames(); + repair_bad_paths(); + eliminate_duplicate_filenames(); + eliminate_duplicate_paths(); + eliminate_orphaned_jobmedia_records(); + eliminate_orphaned_file_records(); + eliminate_orphaned_path_records(); + eliminate_orphaned_filename_records(); + eliminate_orphaned_fileset_records(); + eliminate_orphaned_client_records(); + eliminate_orphaned_job_records(); + eliminate_admin_records(); + eliminate_restore_records(); + eliminate_verify_records(); + break; + case 18: + quit = true; + break; + } + } + } +} + +static int print_name_handler(void *ctx, int num_fields, char **row) +{ + if (row[0]) { + printf("%s\n", row[0]); + } + return 0; +} + +static int get_name_handler(void *ctx, int num_fields, char **row) +{ + POOLMEM *buf = (POOLMEM *)ctx; + if (row[0]) { + pm_strcpy(&buf, row[0]); + } + return 0; +} + +static int print_job_handler(void *ctx, int num_fields, char **row) +{ + printf(_("JobId=%s Name=\"%s\" StartTime=%s\n"), + NPRT(row[0]), NPRT(row[1]), NPRT(row[2])); + return 0; +} + +static int print_jobmedia_handler(void *ctx, int num_fields, char **row) +{ + printf(_("Orphaned JobMediaId=%s JobId=%s Volume=\"%s\"\n"), + NPRT(row[0]), NPRT(row[1]), NPRT(row[2])); + return 0; +} + +static int print_file_handler(void *ctx, int num_fields, char **row) +{ + printf(_("Orphaned FileId=%s JobId=%s Volume=\"%s\"\n"), + NPRT(row[0]), NPRT(row[1]), NPRT(row[2])); + return 0; +} + +static int print_fileset_handler(void *ctx, int num_fields, char **row) +{ + printf(_("Orphaned FileSetId=%s FileSet=\"%s\" MD5=%s\n"), + NPRT(row[0]), NPRT(row[1]), NPRT(row[2])); + return 0; +} + +static int print_client_handler(void *ctx, int num_fields, char **row) +{ + printf(_("Orphaned ClientId=%s Name=\"%s\"\n"), + NPRT(row[0]), NPRT(row[1])); + return 0; +} + +/* + * Called here with each id to be added to the list + */ +static int id_list_handler(void *ctx, int num_fields, char **row) +{ + ID_LIST *lst = (ID_LIST *)ctx; + + if (lst->num_ids == MAX_ID_LIST_LEN) { + return 1; + } + if (lst->num_ids == lst->max_ids) { + if (lst->max_ids == 0) { + lst->max_ids = 10000; + lst->Id = (int64_t *)bmalloc(sizeof(int64_t) * lst->max_ids); + } else { + lst->max_ids = (lst->max_ids * 3) / 2; + lst->Id = (int64_t *)brealloc(lst->Id, sizeof(int64_t) * lst->max_ids); + } + } + lst->Id[lst->num_ids++] = str_to_int64(row[0]); + return 0; +} + +/* + * Construct record id list + */ +static int make_id_list(const char *query, ID_LIST *id_list) +{ + id_list->num_ids = 0; + id_list->num_del = 0; + id_list->tot_ids = 0; + + if (!db_sql_query(db, query, id_list_handler, (void *)id_list)) { + printf("%s", db_strerror(db)); + return 0; + } + return 1; +} + +/* + * Delete all entries in the list + */ +static int delete_id_list(const char *query, ID_LIST *id_list) +{ + char ed1[50]; + for (int i=0; i < id_list->num_ids; i++) { + bsnprintf(buf, sizeof(buf), query, edit_int64(id_list->Id[i], ed1)); + if (verbose) { + printf(_("Deleting: %s\n"), buf); + } + db_sql_query(db, buf, NULL, NULL); + } + return 1; +} + +/* + * Called here with each name to be added to the list + */ +static int name_list_handler(void *ctx, int num_fields, char **row) +{ + NAME_LIST *name = (NAME_LIST *)ctx; + + if (name->num_ids == MAX_ID_LIST_LEN) { + return 1; + } + if (name->num_ids == name->max_ids) { + if (name->max_ids == 0) { + name->max_ids = 10000; + name->name = (char **)bmalloc(sizeof(char *) * name->max_ids); + } else { + name->max_ids = (name->max_ids * 3) / 2; + name->name = (char **)brealloc(name->name, sizeof(char *) * name->max_ids); + } + } + name->name[name->num_ids++] = bstrdup(row[0]); + return 0; +} + +/* + * Construct name list + */ +static int make_name_list(const char *query, NAME_LIST *name_list) +{ + name_list->num_ids = 0; + name_list->num_del = 0; + name_list->tot_ids = 0; + + if (!db_sql_query(db, query, name_list_handler, (void *)name_list)) { + printf("%s", db_strerror(db)); + return 0; + } + return 1; +} + +/* + * Print names in the list + */ +static void print_name_list(NAME_LIST *name_list) +{ + for (int i=0; i < name_list->num_ids; i++) { + printf("%s\n", name_list->name[i]); + } +} + +/* + * Free names in the list + */ +static void free_name_list(NAME_LIST *name_list) +{ + for (int i=0; i < name_list->num_ids; i++) { + free(name_list->name[i]); + } + name_list->num_ids = 0; +} + +static void eliminate_duplicate_filenames() +{ + const char *query; + char esc_name[5000]; + + printf(_("Checking for duplicate Filename entries.\n")); + + /* Make list of duplicated names */ + query = "SELECT Name, count(Name) as Count FROM Filename GROUP BY Name " + "HAVING count(Name) > 1"; + + if (!make_name_list(query, &name_list)) { + exit(1); + } + printf(_("Found %d duplicate Filename records.\n"), name_list.num_ids); + if (name_list.num_ids && verbose && yes_no(_("Print the list? (yes/no): "))) { + print_name_list(&name_list); + } + if (quit) { + return; + } + if (fix) { + /* Loop through list of duplicate names */ + for (int i=0; i 1) { + printf("%s\n", buf); + } + if (!make_id_list(buf, &id_list)) { + exit(1); + } + if (verbose) { + printf(_("Found %d for: %s\n"), id_list.num_ids, name_list.name[i]); + } + /* Force all records to use the first id then delete the other ids */ + for (int j=1; j 1) { + printf("%s\n", buf); + } + db_sql_query(db, buf, NULL, NULL); + bsnprintf(buf, sizeof(buf), "DELETE FROM Filename WHERE FilenameId=%s", + ed2); + if (verbose > 2) { + printf("%s\n", buf); + } + db_sql_query(db, buf, NULL, NULL); + } + } + } + free_name_list(&name_list); +} + +static void eliminate_duplicate_paths() +{ + const char *query; + char esc_name[5000]; + + printf(_("Checking for duplicate Path entries.\n")); + + /* Make list of duplicated names */ + query = "SELECT Path, count(Path) as Count FROM Path " + "GROUP BY Path HAVING count(Path) > 1"; + + if (!make_name_list(query, &name_list)) { + exit(1); + } + printf(_("Found %d duplicate Path records.\n"), name_list.num_ids); + if (name_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + print_name_list(&name_list); + } + if (quit) { + return; + } + if (fix) { + /* Loop through list of duplicate names */ + for (int i=0; i 1) { + printf("%s\n", buf); + } + if (!make_id_list(buf, &id_list)) { + exit(1); + } + if (verbose) { + printf(_("Found %d for: %s\n"), id_list.num_ids, name_list.name[i]); + } + /* Force all records to use the first id then delete the other ids */ + for (int j=1; j 1) { + printf("%s\n", buf); + } + db_sql_query(db, buf, NULL, NULL); + bsnprintf(buf, sizeof(buf), "DELETE FROM Path WHERE PathId=%s", ed2); + if (verbose > 2) { + printf("%s\n", buf); + } + db_sql_query(db, buf, NULL, NULL); + } + } + } + free_name_list(&name_list); +} + +static void eliminate_orphaned_jobmedia_records() +{ + const char *query = "SELECT JobMedia.JobMediaId,Job.JobId FROM JobMedia " + "LEFT OUTER JOIN Job ON (JobMedia.JobId=Job.JobId) " + "WHERE Job.JobId IS NULL LIMIT 300000"; + + printf(_("Checking for orphaned JobMedia entries.\n")); + if (!make_id_list(query, &id_list)) { + exit(1); + } + /* Loop doing 300000 at a time */ + while (id_list.num_ids != 0) { + printf(_("Found %d orphaned JobMedia records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), +"SELECT JobMedia.JobMediaId,JobMedia.JobId,Media.VolumeName FROM JobMedia,Media " + "WHERE JobMedia.JobMediaId=%s AND Media.MediaId=JobMedia.MediaId", + edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_jobmedia_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d orphaned JobMedia records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM JobMedia WHERE JobMediaId=%s", &id_list); + } else { + break; /* get out if not updating db */ + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + } +} + +static void eliminate_orphaned_file_records() +{ + const char *query = "SELECT File.FileId,Job.JobId FROM File " + "LEFT OUTER JOIN Job ON (File.JobId=Job.JobId) " + "WHERE Job.JobId IS NULL LIMIT 300000"; + + printf(_("Checking for orphaned File entries. This may take some time!\n")); + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + /* Loop doing 300000 at a time */ + while (id_list.num_ids != 0) { + printf(_("Found %d orphaned File records.\n"), id_list.num_ids); + if (name_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), +"SELECT File.FileId,File.JobId,Filename.Name FROM File,Filename " + "WHERE File.FileId=%s AND File.FilenameId=Filename.FilenameId", + edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_file_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d orphaned File records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM File WHERE FileId=%s", &id_list); + } else { + break; /* get out if not updating db */ + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + } +} + +static void eliminate_orphaned_path_records() +{ + db_int64_ctx lctx; + lctx.count=0; + db_sql_query(db, "SELECT 1 FROM Job WHERE HasCache=1 LIMIT 1", + db_int64_handler, &lctx); + + /* The BVFS code uses Path records that are not in the File table, for + * example if a Job has /home/test/ BVFS will need to create a Path record / + * and /home/ to work correctly + */ + if (lctx.count == 1) { + printf(_("To prune orphaned Path entries, it is necessary to clear the BVFS Cache first with the bconsole \".bvfs_clear_cache yes\" command.\n")); + return; + } + + idx_tmp_name = NULL; + /* Check the existence of the required "one column" index */ + if (!check_idx("PathId")) { + if (yes_no(_("Create temporary index? (yes/no): "))) { + /* create temporary index PathId */ + create_tmp_idx("idxPIchk", "File", "PathId"); + } + } + + const char *query = "SELECT DISTINCT Path.PathId,File.PathId FROM Path " + "LEFT OUTER JOIN File ON (Path.PathId=File.PathId) " + "WHERE File.PathId IS NULL LIMIT 300000"; + + printf(_("Checking for orphaned Path entries. This may take some time!\n")); + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + /* Loop doing 300000 at a time */ + while (id_list.num_ids != 0) { + printf(_("Found %d orphaned Path records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), "SELECT Path FROM Path WHERE PathId=%s", + edit_int64(id_list.Id[i], ed1)); + db_sql_query(db, buf, print_name_handler, NULL); + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d orphaned Path records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM Path WHERE PathId=%s", &id_list); + } else { + break; /* get out if not updating db */ + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + } + /* Drop temporary index idx_tmp_name */ + drop_tmp_idx("idxPIchk", "File"); +} + +static void eliminate_orphaned_filename_records() +{ + idx_tmp_name = NULL; + /* Check the existence of the required "one column" index */ + if (!check_idx("FilenameId") ) { + if (yes_no(_("Create temporary index? (yes/no): "))) { + /* Create temporary index FilenameId */ + create_tmp_idx("idxFIchk", "File", "FilenameId"); + } + } + + const char *query = "SELECT Filename.FilenameId,File.FilenameId FROM Filename " + "LEFT OUTER JOIN File ON (Filename.FilenameId=File.FilenameId) " + "WHERE File.FilenameId IS NULL LIMIT 300000"; + + printf(_("Checking for orphaned Filename entries. This may take some time!\n")); + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + /* Loop doing 300000 at a time */ + while (id_list.num_ids != 0) { + printf(_("Found %d orphaned Filename records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), "SELECT Name FROM Filename WHERE FilenameId=%s", + edit_int64(id_list.Id[i], ed1)); + db_sql_query(db, buf, print_name_handler, NULL); + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d orphaned Filename records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM Filename WHERE FilenameId=%s", &id_list); + } else { + break; /* get out if not updating db */ + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + } + /* Drop temporary index idx_tmp_name */ + drop_tmp_idx("idxFIchk", "File"); + +} + +static void eliminate_orphaned_fileset_records() +{ + const char *query; + + printf(_("Checking for orphaned FileSet entries. This takes some time!\n")); + query = "SELECT FileSet.FileSetId,Job.FileSetId FROM FileSet " + "LEFT OUTER JOIN Job ON (FileSet.FileSetId=Job.FileSetId) " + "WHERE Job.FileSetId IS NULL"; + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + printf(_("Found %d orphaned FileSet records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), "SELECT FileSetId,FileSet,MD5 FROM FileSet " + "WHERE FileSetId=%s", edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_fileset_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d orphaned FileSet records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM FileSet WHERE FileSetId=%s", &id_list); + } +} + +static void eliminate_orphaned_client_records() +{ + const char *query; + + printf(_("Checking for orphaned Client entries.\n")); + /* In English: + * Wiffle through Client for every Client + * joining with the Job table including every Client even if + * there is not a match in Job (left outer join), then + * filter out only those where no Job points to a Client + * i.e. Job.Client is NULL + */ + query = "SELECT Client.ClientId,Client.Name FROM Client " + "LEFT OUTER JOIN Job ON (Client.ClientId=Job.ClientId) " + "WHERE Job.ClientId IS NULL"; + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + printf(_("Found %d orphaned Client records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), "SELECT ClientId,Name FROM Client " + "WHERE ClientId=%s", edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_client_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d orphaned Client records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM Client WHERE ClientId=%s", &id_list); + } +} + +static void eliminate_orphaned_job_records() +{ + const char *query; + + printf(_("Checking for orphaned Job entries.\n")); + /* In English: + * Wiffle through Job for every Job + * joining with the Client table including every Job even if + * there is not a match in Client (left outer join), then + * filter out only those where no Client exists + * i.e. Client.Name is NULL + */ + query = "SELECT Job.JobId,Job.Name FROM Job " + "LEFT OUTER JOIN Client ON (Job.ClientId=Client.ClientId) " + "WHERE Client.Name IS NULL"; + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + printf(_("Found %d orphaned Job records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), "SELECT JobId,Name,StartTime FROM Job " + "WHERE JobId=%s", edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_job_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d orphaned Job records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM Job WHERE JobId=%s", &id_list); + printf(_("Deleting JobMedia records of orphaned Job records.\n")); + delete_id_list("DELETE FROM JobMedia WHERE JobId=%s", &id_list); + printf(_("Deleting Log records of orphaned Job records.\n")); + delete_id_list("DELETE FROM Log WHERE JobId=%s", &id_list); + } +} + +static void eliminate_admin_records() +{ + const char *query; + + printf(_("Checking for Admin Job entries.\n")); + query = "SELECT Job.JobId FROM Job " + "WHERE Job.Type='D'"; + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + printf(_("Found %d Admin Job records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), "SELECT JobId,Name,StartTime FROM Job " + "WHERE JobId=%s", edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_job_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d Admin Job records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM Job WHERE JobId=%s", &id_list); + } +} + +static void eliminate_restore_records() +{ + const char *query; + + printf(_("Checking for Restore Job entries.\n")); + query = "SELECT Job.JobId FROM Job " + "WHERE Job.Type='R'"; + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + printf(_("Found %d Restore Job records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), "SELECT JobId,Name,StartTime FROM Job " + "WHERE JobId=%s", edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_job_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d Restore Job records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM Job WHERE JobId=%s", &id_list); + } +} + +static void eliminate_verify_records() +{ + const char *query; + + printf(_("Checking for Verify Job entries.\n")); + query = "SELECT Job.JobId FROM Job " + "WHERE Job.Type='V'"; + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + printf(_("Found %d Verify Job records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (int i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), "SELECT JobId,Name,StartTime FROM Job " + "WHERE JobId=%s", edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_job_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + printf(_("Deleting %d Verify Job records.\n"), id_list.num_ids); + delete_id_list("DELETE FROM Job WHERE JobId=%s", &id_list); + } +} + +static void repair_bad_filenames() +{ + const char *query; + int i; + + printf(_("Checking for Filenames with a trailing slash\n")); + query = "SELECT FilenameId,Name from Filename " + "WHERE Name LIKE '%/'"; + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + printf(_("Found %d bad Filename records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), + "SELECT Name FROM Filename WHERE FilenameId=%s", + edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_name_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + POOLMEM *name = get_pool_memory(PM_FNAME); + char esc_name[5000]; + printf(_("Reparing %d bad Filename records.\n"), id_list.num_ids); + for (i=0; i < id_list.num_ids; i++) { + int len; + char ed1[50]; + bsnprintf(buf, sizeof(buf), + "SELECT Name FROM Filename WHERE FilenameId=%s", + edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, get_name_handler, name)) { + printf("%s\n", db_strerror(db)); + } + /* Strip trailing slash(es) */ + for (len=strlen(name); len > 0 && IsPathSeparator(name[len-1]); len--) + { } + if (len == 0) { + len = 1; + esc_name[0] = ' '; + esc_name[1] = 0; + } else { + name[len-1] = 0; + db_escape_string(NULL, db, esc_name, name, len); + } + bsnprintf(buf, sizeof(buf), + "UPDATE Filename SET Name='%s' WHERE FilenameId=%s", + esc_name, edit_int64(id_list.Id[i], ed1)); + if (verbose > 1) { + printf("%s\n", buf); + } + db_sql_query(db, buf, NULL, NULL); + } + free_pool_memory(name); + } +} + +static void repair_bad_paths() +{ + const char *query; + int i; + + printf(_("Checking for Paths without a trailing slash\n")); + query = "SELECT PathId,Path from Path " + "WHERE Path NOT LIKE '%/'"; + if (verbose > 1) { + printf("%s\n", query); + } + if (!make_id_list(query, &id_list)) { + exit(1); + } + printf(_("Found %d bad Path records.\n"), id_list.num_ids); + if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { + for (i=0; i < id_list.num_ids; i++) { + char ed1[50]; + bsnprintf(buf, sizeof(buf), + "SELECT Path FROM Path WHERE PathId=%s", edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, print_name_handler, NULL)) { + printf("%s\n", db_strerror(db)); + } + } + } + if (quit) { + return; + } + if (fix && id_list.num_ids > 0) { + POOLMEM *name = get_pool_memory(PM_FNAME); + char esc_name[5000]; + printf(_("Reparing %d bad Filename records.\n"), id_list.num_ids); + for (i=0; i < id_list.num_ids; i++) { + int len; + char ed1[50]; + bsnprintf(buf, sizeof(buf), + "SELECT Path FROM Path WHERE PathId=%s", edit_int64(id_list.Id[i], ed1)); + if (!db_sql_query(db, buf, get_name_handler, name)) { + printf("%s\n", db_strerror(db)); + } + /* Strip trailing blanks */ + for (len=strlen(name); len > 0 && name[len-1]==' '; len--) { + name[len-1] = 0; + } + /* Add trailing slash */ + len = pm_strcat(&name, "/"); + db_escape_string(NULL, db, esc_name, name, len); + bsnprintf(buf, sizeof(buf), "UPDATE Path SET Path='%s' WHERE PathId=%s", + esc_name, edit_int64(id_list.Id[i], ed1)); + if (verbose > 1) { + printf("%s\n", buf); + } + db_sql_query(db, buf, NULL, NULL); + } + free_pool_memory(name); + } +} + +/* + * Gen next input command from the terminal + */ +static char *get_cmd(const char *prompt) +{ + static char cmd[1000]; + + printf("%s", prompt); + if (fgets(cmd, sizeof(cmd), stdin) == NULL) { + printf("\n"); + quit = true; + return NULL; + } + strip_trailing_junk(cmd); + return cmd; +} + +static bool yes_no(const char *prompt) +{ + char *cmd; + cmd = get_cmd(prompt); + if (!cmd) { + quit = true; + return false; + } + return (strcasecmp(cmd, "yes") == 0) || (strcasecmp(cmd, _("yes")) == 0); +} + +bool python_set_prog(JCR*, char const*) { return false; } + +/* + * The code below to add indexes is needed only for MySQL, and + * that to improve the performance. + */ + +#define MAXIDX 100 +typedef struct s_idx_list { + char *key_name; + int count_key; /* how many times the index meets *key_name */ + int count_col; /* how many times meets the desired column name */ +} IDX_LIST; + +static IDX_LIST idx_list[MAXIDX]; + +/* + * Called here with each table index to be added to the list + */ +static int check_idx_handler(void *ctx, int num_fields, char **row) +{ + /* + * Table | Non_unique | Key_name | Seq_in_index | Column_name |... + * File | 0 | PRIMARY | 1 | FileId |... + */ + char *name, *key_name, *col_name; + int i, len; + int found = false; + + name = (char *)ctx; + key_name = row[2]; + col_name = row[4]; + for(i = 0; (idx_list[i].key_name != NULL) && (i < MAXIDX); i++) { + if (strcasecmp(idx_list[i].key_name, key_name) == 0 ) { + idx_list[i].count_key++; + found = true; + if (strcasecmp(col_name, name) == 0) { + idx_list[i].count_col++; + } + break; + } + } + /* If the new Key_name, add it to the list */ + if (!found) { + len = strlen(key_name) + 1; + idx_list[i].key_name = (char *)malloc(len); + bstrncpy(idx_list[i].key_name, key_name, len); + idx_list[i].count_key = 1; + if (strcasecmp(col_name, name) == 0) { + idx_list[i].count_col = 1; + } else { + idx_list[i].count_col = 0; + } + } + return 0; +} + +/* + * Return TRUE if "one column" index over *col_name exists + */ +static bool check_idx(const char *col_name) +{ + int i; + int found = false; + const char *query = "SHOW INDEX FROM File"; + + if (db_get_type_index(db) != SQL_TYPE_MYSQL) { + return true; + } + /* Continue for MySQL */ + memset(&idx_list, 0, sizeof(idx_list)); + if (!db_sql_query(db, query, check_idx_handler, (void *)col_name)) { + printf("%s\n", db_strerror(db)); + } + for (i = 0; (idx_list[i].key_name != NULL) && (i < MAXIDX) ; i++) { + /* + * NOTE : if (idx_list[i].count_key > 1) then index idx_list[i].key_name is "multiple-column" index + */ + if ((idx_list[i].count_key == 1) && (idx_list[i].count_col == 1)) { + /* "one column" index over *col_name found */ + found = true; + } + } + if (found) { + if (verbose) { + printf(_("Ok. Index over the %s column already exists and dbcheck will work faster.\n"), col_name); + } + } else { + printf(_("Note. Index over the %s column not found, that can greatly slow down dbcheck.\n"), col_name); + } + return found; +} + +/* + * Create temporary one-column index + */ +static bool create_tmp_idx(const char *idx_name, const char *table_name, + const char *col_name) +{ + idx_tmp_name = NULL; + printf(_("Create temporary index... This may take some time!\n")); + bsnprintf(buf, sizeof(buf), "CREATE INDEX %s ON %s (%s)", idx_name, table_name, col_name); + if (verbose) { + printf("%s\n", buf); + } + if (db_sql_query(db, buf, NULL, NULL)) { + idx_tmp_name = idx_name; + if (verbose) { + printf(_("Temporary index created.\n")); + } + } else { + printf("%s\n", db_strerror(db)); + return false; + } + return true; +} + +/* + * Drop temporary index + */ +static bool drop_tmp_idx(const char *idx_name, const char *table_name) +{ + if (idx_tmp_name != NULL) { + printf(_("Drop temporary index.\n")); + bsnprintf(buf, sizeof(buf), "DROP INDEX %s ON %s", idx_name, table_name); + if (verbose) { + printf("%s\n", buf); + } + if (!db_sql_query(db, buf, NULL, NULL)) { + printf("%s\n", db_strerror(db)); + return false; + } else { + if (verbose) { + printf(_("Temporary index %s deleted.\n"), idx_tmp_name); + } + } + } + idx_tmp_name = NULL; + return true; +} diff --git a/src/tools/drivetype.c b/src/tools/drivetype.c new file mode 100644 index 00000000..7d421c47 --- /dev/null +++ b/src/tools/drivetype.c @@ -0,0 +1,122 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Program for determining drive type + * + * Written by Robert Nelson, June 2006 + */ + +#include "bacula.h" +#include "findlib/find.h" + +static void usage() +{ + fprintf(stderr, _( +"\n" +"Usage: drivetype [-v] path ...\n" +"\n" +" Print the drive type a given file/directory is on.\n" +" The following options are supported:\n" +"\n" +" -l print local fixed hard drive\n" +" -a display information on all drives\n" +" -v print both path and file system type.\n" +" -? print this message.\n" +"\n")); + + exit(1); +} + +int display_drive(char *drive, bool display_local, int verbose) +{ + char dt[100]; + int status = 0; + + if (drivetype(drive, dt, sizeof(dt))) { + if (display_local) { /* in local mode, display only harddrive */ + if (strcmp(dt, "fixed") == 0) { + printf("%s\n", drive); + } + } else if (verbose) { + printf("%s: %s\n", drive, dt); + } else { + puts(dt); + } + } else if (!display_local) { /* local mode is used by FileSet scripts */ + fprintf(stderr, _("%s: unknown\n"), drive); + status = 1; + } + return status; +} + +int +main (int argc, char *const *argv) +{ + int verbose = 0; + int status = 0; + int ch, i; + bool display_local = false; + bool display_all = false; + char drive='A'; + char buf[16]; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + while ((ch = getopt(argc, argv, "alv?")) != -1) { + switch (ch) { + case 'v': + verbose = 1; + break; + case 'l': + display_local = true; + break; + case 'a': + display_all = true; + break; + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + OSDependentInit(); + + if (argc < 1 && display_all) { + /* Try all letters */ + for (drive = 'A'; drive <= 'Z'; drive++) { + bsnprintf(buf, sizeof(buf), "%c:/", drive); + display_drive(buf, display_local, verbose); + } + exit(status); + } + + if (argc < 1) { + usage(); + } + + for (i = 0; i < argc; --argc, ++argv) { + status += display_drive(*argv, display_local, verbose); + } + exit(status); +} diff --git a/src/tools/fstype.c b/src/tools/fstype.c new file mode 100644 index 00000000..f8240a7c --- /dev/null +++ b/src/tools/fstype.c @@ -0,0 +1,172 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Program for determining file system type + * + * Written by Preben 'Peppe' Guldberg, December MMIV + */ + +#include "bacula.h" +#include "findlib/find.h" + +static void usage() +{ + fprintf(stderr, _( +"\n" +"Usage: fstype [-v] path ...\n" +"\n" +" Print the file system type for each file/directory argument given.\n" +" The following options are supported:\n" +"\n" +" -l print all file system types in mtab.\n" +" -m print full entries in mtab.\n" +" -v print both path and file system type of each argument.\n" +" -? print this message.\n" +"\n")); + + exit(1); +} + +struct mtab_item { + rblink link; + uint64_t dev; + char fstype[1]; +}; + +/* Compare two device types */ +static int compare_mtab_items(void *item1, void *item2) +{ + mtab_item *mtab1, *mtab2; + mtab1 = (mtab_item *)item1; + mtab2 = (mtab_item *)item2; + if (mtab1->dev < mtab2->dev) return -1; + if (mtab1->dev > mtab2->dev) return 1; + return 0; +} + +void print_mtab_item(void *user_ctx, struct stat *st, const char *fstype, + const char *mountpoint, const char *mntopts, + const char *fsname) +{ + fprintf(stderr, "dev=%p fstype=%s mountpoint=%s mntopts=%s\n", + ((void *)st->st_dev), fstype, mountpoint, mntopts); +} + +static void add_mtab_item(void *user_ctx, struct stat *st, const char *fstype, + const char *mountpoint, const char *mntopts, + const char *fsname) +{ + rblist *mtab_list = (rblist *)user_ctx; + mtab_item *item, *ritem; + int len = strlen(fstype) + 1; + + item = (mtab_item *)malloc(sizeof(mtab_item) + len); + item->dev = (uint64_t)st->st_dev; + bstrncpy(item->fstype, fstype, len); + //fprintf(stderr, "Add dev=%lx fstype=%s\n", item->dev, item->fstype); + ritem = (mtab_item *)mtab_list->insert((void *)item, compare_mtab_items); + if (ritem != item) { + fprintf(stderr, "Problem!! Returned item not equal added item\n"); + } + //fprintf(stderr, "dev=%p fstype=%s mountpoint=%s mntopts=%s\n", + // ((void *)st->st_dev), fstype, mountpoint, mntopts); +} + + +int main (int argc, char *const *argv) +{ + char fs[1000]; + bool verbose = false; + bool list = false; + bool mtab = false; + int status = 0; + int ch, i; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + + while ((ch = getopt(argc, argv, "lmv?")) != -1) { + switch (ch) { + case 'l': + list = true; + break; + case 'm': + mtab = true; /* list mtab */ + break; + case 'v': + verbose = true; + break; + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + + OSDependentInit(); + + if (mtab) { + read_mtab(print_mtab_item, NULL); + status = 1; + goto get_out; + } + if (list) { + rblist *mtab_list; + mtab_item *item; + mtab_list = New(rblist()); + read_mtab(add_mtab_item, mtab_list); + fprintf(stderr, "Size of mtab=%d\n", mtab_list->size()); + foreach_rblist(item, mtab_list) { + fprintf(stderr, "Found dev=%lx fstype=%s\n", item->dev, item->fstype); + } + delete mtab_list; + goto get_out; + } + + if (argc < 1) { + usage(); + } + for (i = 0; i < argc; --argc, ++argv) { + FF_PKT ff_pkt; + memset(&ff_pkt, 0, sizeof(ff_pkt)); + ff_pkt.fname = ff_pkt.link = *argv; + if (lstat(ff_pkt.fname, &ff_pkt.statp) != 0) { + fprintf(stderr, "lstat of %s failed.\n", ff_pkt.fname); + status = 1; + break; + } + if (fstype(&ff_pkt, fs, sizeof(fs))) { + if (verbose) { + printf("%s: %s\n", *argv, fs); + } else { + puts(fs); + } + } else { + fprintf(stderr, _("%s: unknown file system type\n"), *argv); + status = 1; + } + } + +get_out: + exit(status); +} diff --git a/src/tools/gigaslam.c b/src/tools/gigaslam.c new file mode 100644 index 00000000..0162f19a --- /dev/null +++ b/src/tools/gigaslam.c @@ -0,0 +1,50 @@ +/* + By John Walker written ages ago. + + Create a sparse file. + + Beat denial of service floggers to death by persuading + them to download a HOW_BIG pseudo GIF file which is actually + a holey file occupying trivial space on our server. + + Make: make gigaslam + Run: ./gigaslam + Output: a file named gigaslam.gif that contains something like + 16K bytes (i.e. 2-8K blocks), but appears to be 1GB in + length because the second block is written at a 1GB + address. + + Be careful what you do with this file as not all programs know + how to deal with sparse files. + +*/ + +#define HOW_BIG 1000000000ll + +#ifdef __GNUC__ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#ifndef _FILE_OFFSET_BITS +#define _FILE_OFFSET_BITS 64 +#endif +#endif + +#include +#include +#include + +int main(int argc, char *const *argv) +{ + FILE *fp = fopen("gigaslam.gif", "w"); + char header[] = "\n\n
\n"; + char trailer[] = "\n"; + off_t howBig = HOW_BIG; + + fwrite(header, sizeof header, 1, fp); + fseeko(fp, howBig - strlen(trailer), 0); + fwrite(trailer, strlen(trailer), 1, fp); + fclose(fp); + return 0; + +} diff --git a/src/tools/grow.c b/src/tools/grow.c new file mode 100644 index 00000000..33486b8a --- /dev/null +++ b/src/tools/grow.c @@ -0,0 +1,58 @@ +/* + By John Walker written ages ago. + + Create a sparse file. + + Beat denial of service floggers to death by persuading + them to download a HOW_BIG pseudo GIF file which is actually + a holey file occupying trivial space on our server. + + Make: make gigaslam + Run: ./gigaslam + Output: a file named gigaslam.gif that contains something like + 16K bytes (i.e. 2-8K blocks), but appears to be 1GB in + length because the second block is written at a 1GB + address. + + Be careful what you do with this file as not all programs know + how to deal with sparse files. + + Tweaked by Kern Sibbald, July 2007 to grow a file to a specified + size. + +*/ + +#ifdef __GNUC__ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#ifndef _FILE_OFFSET_BITS +#define _FILE_OFFSET_BITS 64 +#endif +#endif + +#include "bacula.h" + +int main(int argc, char *argv[]) +{ + off_t howBig; + FILE *fp; + + if (argc != 3) { + Pmsg0(0, "Calling sequence: grow \n"); + exit(1); + } + howBig = str_to_int64(argv[2]); + fp = fopen(argv[1], "r+"); + if (!fp) { + berrno be; + Pmsg2(0, "Could not open %s for write. ERR=%s\n", argv[1], be.bstrerror()); + exit(1); + } + char trailer[] = "xxxxxxx\n"; + + fseeko(fp, howBig - strlen(trailer), SEEK_SET); + fwrite(trailer, strlen(trailer), 1, fp); + fclose(fp); + return 0; +} diff --git a/src/tools/testfind.c b/src/tools/testfind.c new file mode 100644 index 00000000..de3f863d --- /dev/null +++ b/src/tools/testfind.c @@ -0,0 +1,644 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Test program for find files + * + * Kern Sibbald, MM + * + */ + +#include "bacula.h" +#include "dird/dird.h" +#include "findlib/find.h" +#include "ch.h" + +#if defined(HAVE_WIN32) +#define isatty(fd) (fd==0) +#endif + +/* Dummy functions */ +int generate_job_event(JCR *jcr, const char *event) { return 1; } +void generate_plugin_event(JCR *jcr, bEventType eventType, void *value) { } +extern bool parse_dir_config(CONFIG *config, const char *configfile, int exit_code); + +/* Global variables */ +static int num_files = 0; +static int max_file_len = 0; +static int max_path_len = 0; +static int trunc_fname = 0; +static int trunc_path = 0; +static int attrs = 0; +static CONFIG *config; + +static JCR *jcr; + +static int print_file(JCR *jcr, FF_PKT *ff, bool); +static void count_files(FF_PKT *ff); +static bool copy_fileset(FF_PKT *ff, JCR *jcr); +static void set_options(findFOPTS *fo, const char *opts); + +static void usage() +{ + fprintf(stderr, _( +"\n" +"Usage: testfind [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -c specify config file containing FileSet resources\n" +" -f specify which FileSet to use\n" +" -? print this message.\n" +"\n" +"Patterns are used for file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors are always printed.\n" +"Files/paths truncated is the number of files/paths with len > 255.\n" +"Truncation is only in the catalog.\n" +"\n")); + + exit(1); +} + + +int +main (int argc, char *const *argv) +{ + FF_PKT *ff; + const char *configfile = "bacula-dir.conf"; + const char *fileset_name = "Windows-Full-Set"; + int ch, hard_links; + + OSDependentInit(); + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + lmgr_init_thread(); + + while ((ch = getopt(argc, argv, "ac:d:f:?")) != -1) { + switch (ch) { + case 'a': /* print extended attributes *debug* */ + attrs = 1; + break; + + case 'c': /* set debug level */ + configfile = optarg; + break; + + case 'd': /* set debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 'f': /* exclude patterns */ + fileset_name = optarg; + break; + + case '?': + default: + usage(); + + } + } + + argc -= optind; + argv += optind; + + config = New(CONFIG()); + parse_dir_config(config, configfile, M_ERROR_TERM); + + MSGS *msg; + + foreach_res(msg, R_MSGS) + { + init_msg(NULL, msg); + } + + jcr = new_jcr(sizeof(JCR), NULL); + jcr->fileset = (FILESET *)GetResWithName(R_FILESET, fileset_name); + + if (jcr->fileset == NULL) { + fprintf(stderr, "%s: Fileset not found\n", fileset_name); + + FILESET *var; + + fprintf(stderr, "Valid FileSets:\n"); + + foreach_res(var, R_FILESET) { + fprintf(stderr, " %s\n", var->hdr.name); + } + + exit(1); + } + + ff = init_find_files(); + + copy_fileset(ff, jcr); + + find_files(jcr, ff, print_file, NULL); + + free_jcr(jcr); + if (config) { + delete config; + config = NULL; + } + + term_last_jobs_list(); + + /* Clean up fileset */ + findFILESET *fileset = ff->fileset; + + if (fileset) { + int i, j, k; + /* Delete FileSet Include lists */ + for (i=0; iinclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); + for (j=0; jopts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + for (k=0; kregex.size(); k++) { + regfree((regex_t *)fo->regex.get(k)); + } + fo->regex.destroy(); + fo->regexdir.destroy(); + fo->regexfile.destroy(); + fo->wild.destroy(); + fo->wilddir.destroy(); + fo->wildfile.destroy(); + fo->wildbase.destroy(); + fo->fstype.destroy(); + fo->drivetype.destroy(); + } + incexe->opts_list.destroy(); + incexe->name_list.destroy(); + } + fileset->include_list.destroy(); + + /* Delete FileSet Exclude lists */ + for (i=0; iexclude_list.size(); i++) { + findINCEXE *incexe = (findINCEXE *)fileset->exclude_list.get(i); + for (j=0; jopts_list.size(); j++) { + findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); + fo->regex.destroy(); + fo->regexdir.destroy(); + fo->regexfile.destroy(); + fo->wild.destroy(); + fo->wilddir.destroy(); + fo->wildfile.destroy(); + fo->wildbase.destroy(); + fo->fstype.destroy(); + fo->drivetype.destroy(); + } + incexe->opts_list.destroy(); + incexe->name_list.destroy(); + } + fileset->exclude_list.destroy(); + free(fileset); + } + ff->fileset = NULL; + hard_links = term_find_files(ff); + + printf(_("\n" +"Total files : %d\n" +"Max file length: %d\n" +"Max path length: %d\n" +"Files truncated: %d\n" +"Paths truncated: %d\n" +"Hard links : %d\n"), + num_files, max_file_len, max_path_len, + trunc_fname, trunc_path, hard_links); + + term_msg(); + + close_memory_pool(); + lmgr_cleanup_main(); + sm_dump(false); + exit(0); +} + +static int print_file(JCR *jcr, FF_PKT *ff, bool top_level) +{ + + switch (ff->type) { + case FT_LNKSAVED: + if (debug_level == 1) { + printf("%s\n", ff->fname); + } else if (debug_level > 1) { + printf("Lnka: %s -> %s\n", ff->fname, ff->link); + } + break; + case FT_REGE: + if (debug_level == 1) { + printf("%s\n", ff->fname); + } else if (debug_level > 1) { + printf("Empty: %s\n", ff->fname); + } + count_files(ff); + break; + case FT_REG: + if (debug_level == 1) { + printf("%s\n", ff->fname); + } else if (debug_level > 1) { + printf(_("Reg: %s\n"), ff->fname); + } + count_files(ff); + break; + case FT_LNK: + if (debug_level == 1) { + printf("%s\n", ff->fname); + } else if (debug_level > 1) { + printf("Lnk: %s -> %s\n", ff->fname, ff->link); + } + count_files(ff); + break; + case FT_DIRBEGIN: + return 1; + case FT_NORECURSE: + case FT_NOFSCHG: + case FT_INVALIDFS: + case FT_INVALIDDT: + case FT_DIREND: + if (debug_level) { + char errmsg[100] = ""; + if (ff->type == FT_NORECURSE) { + bstrncpy(errmsg, _("\t[will not descend: recursion turned off]"), sizeof(errmsg)); + } else if (ff->type == FT_NOFSCHG) { + bstrncpy(errmsg, _("\t[will not descend: file system change not allowed]"), sizeof(errmsg)); + } else if (ff->type == FT_INVALIDFS) { + bstrncpy(errmsg, _("\t[will not descend: disallowed file system]"), sizeof(errmsg)); + } else if (ff->type == FT_INVALIDDT) { + bstrncpy(errmsg, _("\t[will not descend: disallowed drive type]"), sizeof(errmsg)); + } + printf("%s%s%s\n", (debug_level > 1 ? "Dir: " : ""), ff->fname, errmsg); + } + ff->type = FT_DIREND; + count_files(ff); + break; + case FT_SPEC: + if (debug_level == 1) { + printf("%s\n", ff->fname); + } else if (debug_level > 1) { + printf("Spec: %s\n", ff->fname); + } + count_files(ff); + break; + case FT_NOACCESS: + printf(_("Err: Could not access %s: %s\n"), ff->fname, strerror(errno)); + break; + case FT_NOFOLLOW: + printf(_("Err: Could not follow ff->link %s: %s\n"), ff->fname, strerror(errno)); + break; + case FT_NOSTAT: + printf(_("Err: Could not stat %s: %s\n"), ff->fname, strerror(errno)); + break; + case FT_NOCHG: + printf(_("Skip: File not saved. No change. %s\n"), ff->fname); + break; + case FT_ISARCH: + printf(_("Err: Attempt to backup archive. Not saved. %s\n"), ff->fname); + break; + case FT_NOOPEN: + printf(_("Err: Could not open directory %s: %s\n"), ff->fname, strerror(errno)); + break; + default: + printf(_("Err: Unknown file ff->type %d: %s\n"), ff->type, ff->fname); + break; + } + if (attrs) { + char attr[200]; + encode_attribsEx(NULL, attr, ff); + if (*attr != 0) { + printf("AttrEx=%s\n", attr); + } +// set_attribsEx(NULL, ff->fname, NULL, NULL, ff->type, attr); + } + return 1; +} + +static void count_files(FF_PKT *ar) +{ + int fnl, pnl; + char *l, *p; + char file[MAXSTRING]; + char spath[MAXSTRING]; + + num_files++; + + /* Find path without the filename. + * I.e. everything after the last / is a "filename". + * OK, maybe it is a directory name, but we treat it like + * a filename. If we don't find a / then the whole name + * must be a path name (e.g. c:). + */ + for (p=l=ar->fname; *p; p++) { + if (IsPathSeparator(*p)) { + l = p; /* set pos of last slash */ + } + } + if (IsPathSeparator(*l)) { /* did we find a slash? */ + l++; /* yes, point to filename */ + } else { /* no, whole thing must be path name */ + l = p; + } + + /* If filename doesn't exist (i.e. root directory), we + * simply create a blank name consisting of a single + * space. This makes handling zero length filenames + * easier. + */ + fnl = p - l; + if (fnl > max_file_len) { + max_file_len = fnl; + } + if (fnl > 255) { + printf(_("===== Filename truncated to 255 chars: %s\n"), l); + fnl = 255; + trunc_fname++; + } + if (fnl > 0) { + strncpy(file, l, fnl); /* copy filename */ + file[fnl] = 0; + } else { + file[0] = ' '; /* blank filename */ + file[1] = 0; + } + + pnl = l - ar->fname; + if (pnl > max_path_len) { + max_path_len = pnl; + } + if (pnl > 255) { + printf(_("========== Path name truncated to 255 chars: %s\n"), ar->fname); + pnl = 255; + trunc_path++; + } + strncpy(spath, ar->fname, pnl); + spath[pnl] = 0; + if (pnl == 0) { + spath[0] = ' '; + spath[1] = 0; + printf(_("========== Path length is zero. File=%s\n"), ar->fname); + } + if (debug_level >= 10) { + printf(_("Path: %s\n"), spath); + printf(_("File: %s\n"), file); + } + +} + +bool python_set_prog(JCR*, char const*) { return false; } + +static bool copy_fileset(FF_PKT *ff, JCR *jcr) +{ + FILESET *jcr_fileset = jcr->fileset; + int num; + bool include = true; + + findFILESET *fileset; + findFOPTS *current_opts; + + fileset = (findFILESET *)malloc(sizeof(findFILESET)); + memset(fileset, 0, sizeof(findFILESET)); + ff->fileset = fileset; + + fileset->state = state_none; + fileset->include_list.init(1, true); + fileset->exclude_list.init(1, true); + + for ( ;; ) { + if (include) { + num = jcr_fileset->num_includes; + } else { + num = jcr_fileset->num_excludes; + } + for (int i=0; iinclude_items[i]; + + /* New include */ + fileset->incexe = (findINCEXE *)malloc(sizeof(findINCEXE)); + memset(fileset->incexe, 0, sizeof(findINCEXE)); + fileset->incexe->opts_list.init(1, true); + fileset->incexe->name_list.init(0, 0); + fileset->include_list.append(fileset->incexe); + } else { + ie = jcr_fileset->exclude_items[i]; + + /* New exclude */ + fileset->incexe = (findINCEXE *)malloc(sizeof(findINCEXE)); + memset(fileset->incexe, 0, sizeof(findINCEXE)); + fileset->incexe->opts_list.init(1, true); + fileset->incexe->name_list.init(0, 0); + fileset->exclude_list.append(fileset->incexe); + } + + for (j=0; jnum_opts; j++) { + FOPTS *fo = ie->opts_list[j]; + + current_opts = (findFOPTS *)malloc(sizeof(findFOPTS)); + memset(current_opts, 0, sizeof(findFOPTS)); + fileset->incexe->current_opts = current_opts; + fileset->incexe->opts_list.append(current_opts); + + current_opts->regex.init(1, true); + current_opts->regexdir.init(1, true); + current_opts->regexfile.init(1, true); + current_opts->wild.init(1, true); + current_opts->wilddir.init(1, true); + current_opts->wildfile.init(1, true); + current_opts->wildbase.init(1, true); + current_opts->fstype.init(1, true); + current_opts->drivetype.init(1, true); + + set_options(current_opts, fo->opts); + + for (k=0; kregex.size(); k++) { + // fd->fsend("R %s\n", fo->regex.get(k)); + current_opts->regex.append(bstrdup((const char *)fo->regex.get(k))); + } + for (k=0; kregexdir.size(); k++) { + // fd->fsend("RD %s\n", fo->regexdir.get(k)); + current_opts->regexdir.append(bstrdup((const char *)fo->regexdir.get(k))); + } + for (k=0; kregexfile.size(); k++) { + // fd->fsend("RF %s\n", fo->regexfile.get(k)); + current_opts->regexfile.append(bstrdup((const char *)fo->regexfile.get(k))); + } + for (k=0; kwild.size(); k++) { + current_opts->wild.append(bstrdup((const char *)fo->wild.get(k))); + } + for (k=0; kwilddir.size(); k++) { + current_opts->wilddir.append(bstrdup((const char *)fo->wilddir.get(k))); + } + for (k=0; kwildfile.size(); k++) { + current_opts->wildfile.append(bstrdup((const char *)fo->wildfile.get(k))); + } + for (k=0; kwildbase.size(); k++) { + current_opts->wildbase.append(bstrdup((const char *)fo->wildbase.get(k))); + } + for (k=0; kfstype.size(); k++) { + current_opts->fstype.append(bstrdup((const char *)fo->fstype.get(k))); + } + for (k=0; kdrivetype.size(); k++) { + current_opts->drivetype.append(bstrdup((const char *)fo->drivetype.get(k))); + } + } + + for (j=0; jname_list.size(); j++) { + fileset->incexe->name_list.append(bstrdup((const char *)ie->name_list.get(j))); + } + } + + if (!include) { /* If we just did excludes */ + break; /* all done */ + } + + include = false; /* Now do excludes */ + } + + return true; +} + +static void set_options(findFOPTS *fo, const char *opts) +{ + int j; + const char *p; + + for (p=opts; *p; p++) { + switch (*p) { + case 'a': /* alway replace */ + case '0': /* no option */ + break; + case 'e': + fo->flags |= FO_EXCLUDE; + break; + case 'f': + fo->flags |= FO_MULTIFS; + break; + case 'h': /* no recursion */ + fo->flags |= FO_NO_RECURSION; + break; + case 'H': /* no hard link handling */ + fo->flags |= FO_NO_HARDLINK; + break; + case 'i': + fo->flags |= FO_IGNORECASE; + break; + case 'M': /* MD5 */ + fo->flags |= FO_MD5; + break; + case 'n': + fo->flags |= FO_NOREPLACE; + break; + case 'p': /* use portable data format */ + fo->flags |= FO_PORTABLE; + break; + case 'R': /* Resource forks and Finder Info */ + fo->flags |= FO_HFSPLUS; + case 'r': /* read fifo */ + fo->flags |= FO_READFIFO; + break; + case 'S': + switch(*(p + 1)) { + case ' ': + /* Old director did not specify SHA variant */ + fo->flags |= FO_SHA1; + break; + case '1': + fo->flags |= FO_SHA1; + p++; + break; +#ifdef HAVE_SHA2 + case '2': + fo->flags |= FO_SHA256; + p++; + break; + case '3': + fo->flags |= FO_SHA512; + p++; + break; +#endif + default: + /* Automatically downgrade to SHA-1 if an unsupported + * SHA variant is specified */ + fo->flags |= FO_SHA1; + p++; + break; + } + break; + case 's': + fo->flags |= FO_SPARSE; + break; + case 'm': + fo->flags |= FO_MTIMEONLY; + break; + case 'k': + fo->flags |= FO_KEEPATIME; + break; + case 'A': + fo->flags |= FO_ACL; + break; + case 'V': /* verify options */ + /* Copy Verify Options */ + for (j=0; *p && *p != ':'; p++) { + fo->VerifyOpts[j] = *p; + if (j < (int)sizeof(fo->VerifyOpts) - 1) { + j++; + } + } + fo->VerifyOpts[j] = 0; + break; + case 'w': + fo->flags |= FO_IF_NEWER; + break; + case 'W': + fo->flags |= FO_ENHANCEDWILD; + break; + case 'Z': /* compression */ + p++; /* skip Z */ + if (*p >= '0' && *p <= '9') { + fo->flags |= FO_COMPRESS; + fo->Compress_algo = COMPRESS_GZIP; + fo->Compress_level = *p - '0'; + } + else if (*p == 'o') { + fo->flags |= FO_COMPRESS; + fo->Compress_algo = COMPRESS_LZO1X; + fo->Compress_level = 1; /* not used with LZO */ + } + Dmsg2(200, "Compression alg=%d level=%d\n", fo->Compress_algo, fo->Compress_level); + break; + case 'X': + fo->flags |= FO_XATTR; + break; + default: + Emsg1(M_ERROR, 0, _("Unknown include/exclude option: %c\n"), *p); + break; + } + } +} diff --git a/src/tools/testls.c b/src/tools/testls.c new file mode 100644 index 00000000..bfcab4c1 --- /dev/null +++ b/src/tools/testls.c @@ -0,0 +1,278 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2016 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Test program for listing files during regression testing + * Links have their permissions and time bashed since they cannot + * be set by Bacula. + * + * Kern Sibbald, MM + * + */ + +#include "bacula.h" +#include "findlib/find.h" + +/* Dummy functions */ +int generate_job_event(JCR *jcr, const char *event) { return 1; } +void generate_plugin_event(JCR *jcr, bEventType eventType, void *value) { } + + +/* Global variables */ +int attrs = 0; + +static JCR *jcr; +static int num_files = 0; + +static int print_file(JCR *jcr, FF_PKT *ff, bool); +static void print_ls_output(char *fname, char *link, int type, struct stat *statp); +static int count_files(JCR *jcr, FF_PKT *ff, bool top_level); + +static void usage() +{ + fprintf(stderr, _( +"\n" +"Usage: testls [-d debug_level] [-] [pattern1 ...]\n" +" -a print extended attributes (Win32 debug)\n" +" -d set debug level to \n" +" -dt print timestamp in debug output\n" +" -e specify file of exclude patterns\n" +" -i specify file of include patterns\n" +" -q quiet, don't print filenames (debug)\n" +" - read pattern(s) from stdin\n" +" -? print this message.\n" +"\n" +"Patterns are file inclusion -- normally directories.\n" +"Debug level >= 1 prints each file found.\n" +"Debug level >= 10 prints path/file for catalog.\n" +"Errors always printed.\n" +"Files/paths truncated is number with len > 255.\n" +"Truncation is only in catalog.\n" +"\n")); + + exit(1); +} + + +int main(int argc, char *const *argv) +{ + FF_PKT *ff; + char name[1000]; + bool quiet = false; + int i, ch; + char *inc = NULL; + char *exc = NULL; + FILE *fd; + + setlocale(LC_ALL, ""); + bindtextdomain("bacula", LOCALEDIR); + textdomain("bacula"); + lmgr_init_thread(); + + while ((ch = getopt(argc, argv, "ad:e:i:q?")) != -1) { + switch (ch) { + case 'a': /* print extended attributes *debug* */ + attrs = 1; + break; + + case 'd': /* set debug level */ + if (*optarg == 't') { + dbg_timestamp = true; + } else { + debug_level = atoi(optarg); + if (debug_level <= 0) { + debug_level = 1; + } + } + break; + + case 'e': /* exclude patterns */ + exc = optarg; + break; + + case 'i': /* include patterns */ + inc = optarg; + break; + + case 'q': + quiet = true; + break; + + case '?': + default: + usage(); + + } + } + argc -= optind; + argv += optind; + + jcr = new_jcr(sizeof(JCR), NULL); + + ff = init_find_files(); + if (argc == 0 && !inc) { + add_fname_to_include_list(ff, 0, "/"); /* default to / */ + } else { + for (i=0; i < argc; i++) { + if (strcmp(argv[i], "-") == 0) { + while (fgets(name, sizeof(name)-1, stdin)) { + strip_trailing_junk(name); + add_fname_to_include_list(ff, 0, name); + } + continue; + } + add_fname_to_include_list(ff, 0, argv[i]); + } + } + if (inc) { + fd = fopen(inc, "rb"); + if (!fd) { + printf(_("Could not open include file: %s\n"), inc); + exit(1); + } + while (fgets(name, sizeof(name)-1, fd)) { + strip_trailing_junk(name); + add_fname_to_include_list(ff, 0, name); + } + fclose(fd); + } + + if (exc) { + fd = fopen(exc, "rb"); + if (!fd) { + printf(_("Could not open exclude file: %s\n"), exc); + exit(1); + } + while (fgets(name, sizeof(name)-1, fd)) { + strip_trailing_junk(name); + add_fname_to_exclude_list(ff, name); + } + fclose(fd); + } + if (quiet) { + match_files(jcr, ff, count_files); + } else { + match_files(jcr, ff, print_file); + } + printf(_("Files seen = %d\n"), num_files); + term_include_exclude_files(ff); + term_find_files(ff); + + free_jcr(jcr); + term_last_jobs_list(); /* free jcr chain */ + close_memory_pool(); + lmgr_cleanup_main(); + sm_dump(false); + exit(0); +} + +static int count_files(JCR *jcr, FF_PKT *ff, bool top_level) +{ + num_files++; + return 1; +} + +static int print_file(JCR *jcr, FF_PKT *ff, bool top_level) +{ + + switch (ff->type) { + case FT_LNKSAVED: + case FT_REGE: + case FT_REG: + case FT_LNK: + case FT_DIREND: + case FT_SPEC: + print_ls_output(ff->fname, ff->link, ff->type, &ff->statp); + break; + case FT_DIRBEGIN: + break; + case FT_NOACCESS: + printf(_("Err: Could not access %s: %s\n"), ff->fname, strerror(errno)); + break; + case FT_NOFOLLOW: + printf(_("Err: Could not follow ff->link %s: %s\n"), ff->fname, strerror(errno)); + break; + case FT_NOSTAT: + printf(_("Err: Could not stat %s: %s\n"), ff->fname, strerror(errno)); + break; + case FT_NOCHG: + printf(_("Skip: File not saved. No change. %s\n"), ff->fname); + break; + case FT_ISARCH: + printf(_("Err: Attempt to backup archive. Not saved. %s\n"), ff->fname); + break; + case FT_NORECURSE: + printf(_("Recursion turned off. Directory not entered. %s\n"), ff->fname); + break; + case FT_NOFSCHG: + printf(_("Skip: File system change prohibited. Directory not entered. %s\n"), ff->fname); + break; + case FT_NOOPEN: + printf(_("Err: Could not open directory %s: %s\n"), ff->fname, strerror(errno)); + break; + default: + printf(_("Err: Unknown file ff->type %d: %s\n"), ff->type, ff->fname); + break; + } + num_files++; + return 1; +} + +static void print_ls_output(char *fname, char *link, int type, struct stat *statp) +{ + char buf[2000]; + char ec1[30]; + char *p, *f; + int n; + + if (type == FT_LNK) { + statp->st_mtime = 0; + statp->st_mode |= 0777; + } + p = encode_mode(statp->st_mode, buf); + n = sprintf(p, " %2d ", (uint32_t)statp->st_nlink); + p += n; + n = sprintf(p, "%-4d %-4d", (int)statp->st_uid, (int)statp->st_gid); + p += n; + n = sprintf(p, "%10.10s ", edit_uint64(statp->st_size, ec1)); + p += n; + if (S_ISCHR(statp->st_mode) || S_ISBLK(statp->st_mode)) { + n = sprintf(p, "%4x ", (int)statp->st_rdev); + } else { + n = sprintf(p, " "); + } + p += n; + p = encode_time(statp->st_mtime, p); + *p++ = ' '; + /* Copy file name */ + for (f=fname; *f && (p-buf) < (int)sizeof(buf); ) + *p++ = *f++; + if (type == FT_LNK) { + *p++ = '-'; + *p++ = '>'; + *p++ = ' '; + /* Copy link name */ + for (f=link; *f && (p-buf) < (int)sizeof(buf); ) + *p++ = *f++; + } + *p++ = '\n'; + *p = 0; + fputs(buf, stdout); +} + +bool python_set_prog(JCR*, char const*) { return false; } diff --git a/src/tools/timelimit.1 b/src/tools/timelimit.1 new file mode 100644 index 00000000..1d3a7c93 --- /dev/null +++ b/src/tools/timelimit.1 @@ -0,0 +1,217 @@ +.\" Copyright (c) 2001, 2007 - 2010 Peter Pentchev +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" $Ringlet$ +.\" +.Dd September 29, 2010 +.Dt TIMELIMIT 1 +.Os +.Sh NAME +.Nm timelimit +.Nd effectively limit the absolute execution time of a process +.Sh SYNOPSIS +.Nm +.Op Fl pq +.Op Fl S Ar killsig +.Op Fl s Ar warnsig +.Op Fl T Ar killtime +.Op Fl t Ar warntime +.Ar command +.Op Ar arguments ... +.Sh DESCRIPTION +The +.Nm +utility executes a given +.Ar command +with the supplied +.Ar arguments +and terminates the spawned process after a given time with a given signal. +If the process exits before the time limit has elapsed, +.Nm +will silently exit, too. +.Pp +Options: +.Bl -tag -width indent +.It Fl p +If the child process is terminated by a signal, +.Nm +propagates this condition, i.e. sends the same signal to itself. +This allows the program executing +.Nm +to determine whether the child process was terminated by a signal or +actually exited with an exit code larger than 128. +.It Fl q +Quiet operation - +.Nm +does not output diagnostic messages about signals sent to the child process. +.It Fl S Ar killsig +Specify the number of the signal to be sent to the process +.Ar killtime +seconds after +.Ar warntime +has expired. +Defaults to 9 (SIGKILL). +.It Fl s Ar warnsig +Specify the number of the signal to be sent to the process +.Ar warntime +seconds after it has been started. +Defaults to 15 (SIGTERM). +.It Fl T Ar killtime +Specify the maximum execution time of the process before sending +.Ar killsig +after +.Ar warnsig +has been sent. +Defaults to 120 seconds. +.It Fl t Ar warntime +Specify the maximum execution time of the process in seconds before sending +.Ar warnsig . +Defaults to 3600 seconds. +.El +.Pp +On systems that support the +.Xr setitimer 2 +system call, the +.Ar warntime +and +.Ar killtime +values may be specified in fractional seconds with microsecond precision. +.Sh ENVIRONMENT +.Bl -tag -width indent +.It Ev KILLSIG +The +.Ar killsig +to use if the +.Fl S +option was not specified. +.It Ev KILLTIME +The +.Ar killtime +to use if the +.Fl T +option was not specified. +.It Ev WARNSIG +The +.Ar warnsig +to use if the +.Fl s +option was not specified. +.It Ev WARNTIME +The +.Ar warntime +to use if the +.Fl t +option was not specified. +.El +.Sh EXIT STATUS +If the child process exits normally, the +.Nm +utility will pass its exit code on up. +If the child process is terminated by a signal and the +.Fl p +flag was not specified, the +.Nm +utility's exit status is 128 plus the signal number, similar to +.Xr sh 1 . +If the +.Fl p +flag was specified, the +.Nm +utility will raise the signal itself so that its own parent process +may in turn reliably distinguish between a signal and a larger than 128 +exit code. +.Pp +In rare cases, the +.Nm +utility may encounter a system or user error; then, its exit status is one +of the standard +.Xr sysexits 3 +values: +.Bl -tag -width indent +.It Dv EX_USAGE +The command-line parameters and options were incorrectly specified. +.It Dv EX_SOFTWARE +The +.Nm +utility itself received an unexpected signal while waiting for the child +process to terminate. +.It Dv EX_OSERR +The +.Nm +utility was unable to execute the child process, wait for it to terminate, +or examine its exit status. +.El +.Sh EXAMPLES +.Pp +The following examples are shown as given to the shell: +.Pp +.Dl timelimit -p /usr/local/bin/rsync rsync://some.host/dir /opt/mirror +.Pp +Run the rsync program to mirror a WWW or FTP site and kill it if it +runs longer than 1 hour (that is 3600 seconds) with SIGTERM. +If the rsync process does not exit after receiving the SIGTERM, +.Nm +issues a SIGKILL 120 seconds after the SIGTERM. +If the rsync process is terminated by a signal, +.Nm +will itself raise this signal. +.Pp +.Dl tcpserver 0 8888 timelimit -t600 -T300 /opt/services/chat/stats +.Pp +Start a +.Xr tcpserver n +process listening on tcp port 8888; each client connection shall invoke +an instance of an IRC statistics tool under +.Pa /opt/services/chat +and kill it after 600 seconds have elapsed. +If the stats process is still running after the SIGTERM, it will be +killed by a SIGKILL sent 300 seconds later. +.Pp +.Dl env WARNTIME=4.99 WARNSIG=1 KILLTIME=1.000001 timelimit sh stats.sh +.Pp +Start a shell script and kill it with a SIGHUP in a little under 5 seconds. +If the shell gets stuck and does not respond to the SIGHUP, kill it +with the default SIGKILL just a bit over a second afterwards. +.Sh SEE ALSO +.Xr kill 1 , +.Xr rsync 1 , +.Xr signal 3 , +.Xr tcpserver n +.Sh STANDARDS +No standards documentation was harmed in the process of creating +.Nm . +.Sh BUGS +Please report any bugs in +.Nm +to the author. +.Sh AUTHOR +The +.Nm +utility was conceived and written by +.An Peter Pentchev Aq roam@ringlet.net +with contributions and suggestions by +.An Karsten W Rohrbach Aq karsten@rohrbach.de , +.An Teddy Hogeborn Aq teddy@fukt.bsnet.se , +and +.An Tomasz Nowak Aq nowak2000@poczta.onet.pl . diff --git a/src/tools/timelimit.c b/src/tools/timelimit.c new file mode 100644 index 00000000..d1f03fd5 --- /dev/null +++ b/src/tools/timelimit.c @@ -0,0 +1,543 @@ +/*- + * Copyright (c) 2001, 2007 - 2010 Peter Pentchev + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +#include "../config.h" + +/* we hope all OS's have those..*/ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#ifdef HAVE_ERR +#include +#endif /* HAVE_ERR */ + +#ifdef HAVE_SYSEXITS_H +#include +#else +#define EX_OK 0 /* successful termination */ +#define EX__BASE 64 /* base value for error messages */ +#define EX_USAGE 64 /* command line usage error */ +#define EX_DATAERR 65 /* data format error */ +#define EX_NOINPUT 66 /* cannot open input */ +#define EX_NOUSER 67 /* addressee unknown */ +#define EX_NOHOST 68 /* host name unknown */ +#define EX_UNAVAILABLE 69 /* service unavailable */ +#define EX_SOFTWARE 70 /* internal software error */ +#define EX_OSERR 71 /* system error (e.g., can't fork) */ +#define EX_OSFILE 72 /* critical OS file missing */ +#define EX_CANTCREAT 73 /* can't create (user) output file */ +#define EX_IOERR 74 /* input/output error */ +#define EX_TEMPFAIL 75 /* temp failure; user is invited to retry */ +#define EX_PROTOCOL 76 /* remote error in protocol */ +#define EX_NOPERM 77 /* permission denied */ +#define EX_CONFIG 78 /* configuration error */ +#define EX__MAX 78 /* maximum listed value */ +#endif /* HAVE_SYSEXITS_H */ + +#ifndef __unused +# ifdef __GNUC__ +# if GCC_VERSION >= 3004 +# define __unused __attribute__((unused)) +# else +# define __unused +# endif +# else /* __GNUC__ */ +# define __unused +# endif /* __GNUC__ */ +#endif /* __unused */ + +#ifndef __dead2 +#ifdef __GNUC__ +#define __dead2 __attribute__((noreturn)) +#else /* __GNUC__ */ +#define __dead2 +#endif /* __GNUC__ */ +#endif /* __dead2 */ + +#define PARSE_CMDLINE + +unsigned long warntime, warnmsec, killtime, killmsec; +unsigned long warnsig, killsig; +volatile int fdone, falarm, fsig, sigcaught; +int propagate, quiet; + +static struct { + const char *name, opt, issig; + unsigned long *sec, *msec; +} envopts[] = { + {"KILLSIG", 'S', 1, &killsig, NULL}, + {"KILLTIME", 'T', 0, &killtime, &killmsec}, + {"WARNSIG", 's', 1, &warnsig, NULL}, + {"WARNTIME", 't', 0, &warntime, &warnmsec}, + {NULL, 0, 0, NULL, NULL} +}; + +static struct { + const char *name; + int num; +} signals[] = { + /* We kind of assume that the POSIX-mandated signals are present */ + {"ABRT", SIGABRT}, + {"ALRM", SIGALRM}, + {"BUS", SIGBUS}, + {"CHLD", SIGCHLD}, + {"CONT", SIGCONT}, + {"FPE", SIGFPE}, + {"HUP", SIGHUP}, + {"ILL", SIGILL}, + {"INT", SIGINT}, + {"KILL", SIGKILL}, + {"PIPE", SIGPIPE}, + {"QUIT", SIGQUIT}, + {"SEGV", SIGSEGV}, + {"STOP", SIGSTOP}, + {"TERM", SIGTERM}, + {"TSTP", SIGTSTP}, + {"TTIN", SIGTTIN}, + {"TTOU", SIGTTOU}, + {"USR1", SIGUSR1}, + {"USR2", SIGUSR2}, + {"PROF", SIGPROF}, + {"SYS", SIGSYS}, + {"TRAP", SIGTRAP}, + {"URG", SIGURG}, + {"VTALRM", SIGVTALRM}, + {"XCPU", SIGXCPU}, + {"XFSZ", SIGXFSZ}, + + /* Some more signals found on a Linux 2.6 system */ +#ifdef SIGIO + {"IO", SIGIO}, +#endif +#ifdef SIGIOT + {"IOT", SIGIOT}, +#endif +#ifdef SIGLOST + {"LOST", SIGLOST}, +#endif +#ifdef SIGPOLL + {"POLL", SIGPOLL}, +#endif +#ifdef SIGPWR + {"PWR", SIGPWR}, +#endif +#ifdef SIGSTKFLT + {"STKFLT", SIGSTKFLT}, +#endif +#ifdef SIGWINCH + {"WINCH", SIGWINCH}, +#endif + + /* Some more signals found on a FreeBSD 8.x system */ +#ifdef SIGEMT + {"EMT", SIGEMT}, +#endif +#ifdef SIGINFO + {"INFO", SIGINFO}, +#endif +#ifdef SIGLWP + {"LWP", SIGLWP}, +#endif +#ifdef SIGTHR + {"THR", SIGTHR}, +#endif +}; +#define SIGNALS (sizeof(signals) / sizeof(signals[0])) + +#ifndef HAVE_ERR +static void err(int, const char *, ...); +static void errx(int, const char *, ...); +#endif /* !HAVE_ERR */ + +static void usage(void); + +static void init(int, char *[]); +static pid_t doit(char *[]); +static void child(char *[]); +static void raisesignal(int) __dead2; +static void setsig_fatal(int, void (*)(int)); +static void setsig_fatal_gen(int, void (*)(int), int, const char *); +static void terminated(const char *); + +#ifndef HAVE_ERR +static void +err(int code, const char *fmt, ...) { + va_list v; + + va_start(v, fmt); + vfprintf(stderr, fmt, v); + va_end(v); + + fprintf(stderr, ": %s\n", strerror(errno)); + exit(code); +} + +static void +errx(int code, const char *fmt, ...) { + va_list v; + + va_start(v, fmt); + vfprintf(stderr, fmt, v); + va_end(v); + + fprintf(stderr, "\n"); + exit(code); +} + +static void +warnx(const char *fmt, ...) { + va_list v; + + va_start(v, fmt); + vfprintf(stderr, fmt, v); + va_end(v); + + fprintf(stderr, "\n"); +} +#endif /* !HAVE_ERR */ + +static void +usage(void) { + errx(EX_USAGE, "usage: timelimit [-pq] [-S ksig] [-s wsig] " + "[-T ktime] [-t wtime] command"); +} + +static void +atou_fatal(const char *s, unsigned long *sec, unsigned long *msec, int issig) { + unsigned long v, vm, mul; + const char *p; + size_t i; + + if (s[0] < '0' || s[0] > '9') { + if (s[0] == '\0' || !issig) + usage(); + for (i = 0; i < SIGNALS; i++) + if (!strcmp(signals[i].name, s)) + break; + if (i == SIGNALS) + usage(); + *sec = (unsigned long)signals[i].num; + if (msec != NULL) + *msec = 0; + return; + } + + v = 0; + for (p = s; (*p >= '0') && (*p <= '9'); p++) + v = v * 10 + *p - '0'; + if (*p == '\0') { + *sec = v; + if (msec != NULL) + *msec = 0; + return; + } else if (*p != '.' || msec == NULL) { + usage(); + } + p++; + + vm = 0; + mul = 1000000; + for (; (*p >= '0') && (*p <= '9'); p++) { + vm = vm * 10 + *p - '0'; + mul = mul / 10; + } + if (*p != '\0') + usage(); + else if (mul < 1) + errx(EX_USAGE, "no more than microsecond precision"); +#ifndef HAVE_SETITIMER + if (msec != 0) + errx(EX_UNAVAILABLE, + "subsecond precision not supported on this platform"); +#endif + *sec = v; + *msec = vm * mul; +} + +static void +init(int argc, char *argv[]) { +#ifdef PARSE_CMDLINE + int ch, listsigs; +#endif + int optset; + unsigned i; + char *s; + + /* defaults */ + quiet = 0; + warnsig = SIGTERM; + killsig = SIGKILL; + warntime = 900; + warnmsec = 0; + killtime = 5; + killmsec = 0; + + optset = 0; + + /* process environment variables first */ + for (i = 0; envopts[i].name != NULL; i++) + if ((s = getenv(envopts[i].name)) != NULL) { + atou_fatal(s, envopts[i].sec, envopts[i].msec, + envopts[i].issig); + optset = 1; + } + +#ifdef PARSE_CMDLINE + listsigs = 0; + while ((ch = getopt(argc, argv, "+lqpS:s:T:t:")) != -1) { + switch (ch) { + case 'l': + listsigs = 1; + break; + case 'p': + propagate = 1; + break; + case 'q': + quiet = 1; + break; + default: + /* check if it's a recognized option */ + for (i = 0; envopts[i].name != NULL; i++) + if (ch == envopts[i].opt) { + atou_fatal(optarg, + envopts[i].sec, + envopts[i].msec, + envopts[i].issig); + optset = 1; + break; + } + if (envopts[i].name == NULL) + usage(); + } + } + + if (listsigs) { + for (i = 0; i < SIGNALS; i++) + printf("%s%c", signals[i].name, + i + 1 < SIGNALS? ' ': '\n'); + exit(EX_OK); + } +#else + optind = 1; +#endif + + if (!optset) /* && !quiet? */ + warnx("using defaults: warntime=%lu, warnsig=%lu, " + "killtime=%lu, killsig=%lu", + warntime, warnsig, killtime, killsig); + + argc -= optind; + argv += optind; + if (argc == 0) + usage(); + + /* sanity checks */ + if ((warntime == 0 && warnmsec == 0) || (killtime == 0 && killmsec == 0)) + usage(); +} + +static void +sigchld(int sig __unused) { + + fdone = 1; +} + +static void +sigalrm(int sig __unused) { + + falarm = 1; +} + +static void +sighandler(int sig) { + + sigcaught = sig; + fsig = 1; +} + +static void +setsig_fatal(int sig, void (*handler)(int)) { + + setsig_fatal_gen(sig, handler, 1, "setting"); +} + +static void +setsig_fatal_gen(int sig, void (*handler)(int), int nocld, const char *what) { +#ifdef HAVE_SIGACTION + struct sigaction act; + + memset(&act, 0, sizeof(act)); + act.sa_handler = handler; + act.sa_flags = 0; +#ifdef SA_NOCLDSTOP + if (nocld) + act.sa_flags |= SA_NOCLDSTOP; +#endif /* SA_NOCLDSTOP */ + if (sigaction(sig, &act, NULL) < 0) + err(EX_OSERR, "%s signal handler for %d", what, sig); +#else /* HAVE_SIGACTION */ + if (signal(sig, handler) == SIG_ERR) + err(EX_OSERR, "%s signal handler for %d", what, sig); +#endif /* HAVE_SIGACTION */ +} + +static void +settimer(const char *name, unsigned long sec, unsigned long msec) +{ +#ifdef HAVE_SETITIMER + struct itimerval tval; + + tval.it_interval.tv_sec = tval.it_interval.tv_usec = 0; + tval.it_value.tv_sec = sec; + tval.it_value.tv_usec = msec; + if (setitimer(ITIMER_REAL, &tval, NULL) == -1) + err(EX_OSERR, "could not set the %s timer", name); +#else + alarm(sec); +#endif +} + +static pid_t +doit(char *argv[]) { + pid_t pid; + + /* install signal handlers */ + fdone = falarm = fsig = sigcaught = 0; + setsig_fatal(SIGALRM, sigalrm); + setsig_fatal(SIGCHLD, sigchld); + setsig_fatal(SIGTERM, sighandler); + setsig_fatal(SIGHUP, sighandler); + setsig_fatal(SIGINT, sighandler); + setsig_fatal(SIGQUIT, sighandler); + + /* fork off the child process */ + if ((pid = fork()) < 0) + err(EX_OSERR, "fork"); + if (pid == 0) + child(argv); + + /* sleep for the allowed time */ + settimer("warning", warntime, warnmsec); + while (!(fdone || falarm || fsig)) + pause(); + alarm(0); + + /* send the warning signal */ + if (fdone) + return (pid); + if (fsig) + terminated("run"); + falarm = 0; + if (!quiet) + warnx("sending warning signal %lu", warnsig); + kill(pid, (int) warnsig); + +#ifndef HAVE_SIGACTION + /* reset our signal handlers, just in case */ + setsig_fatal(SIGALRM, sigalrm); + setsig_fatal(SIGCHLD, sigchld); + setsig_fatal(SIGTERM, sighandler); + setsig_fatal(SIGHUP, sighandler); + setsig_fatal(SIGINT, sighandler); + setsig_fatal(SIGQUIT, sighandler); +#endif /* HAVE_SIGACTION */ + + /* sleep for the grace time */ + settimer("kill", killtime, killmsec); + while (!(fdone || falarm || fsig)) + pause(); + alarm(0); + + /* send the kill signal */ + if (fdone) + return (pid); + if (fsig) + terminated("grace"); + if (!quiet) + warnx("sending kill signal %lu", killsig); + kill(pid, (int) killsig); + setsig_fatal_gen(SIGCHLD, SIG_DFL, 0, "restoring"); + return (pid); +} + +static void +terminated(const char *period) { + + errx(EX_SOFTWARE, "terminated by signal %d during the %s period", + sigcaught, period); +} + +static void +child(char *argv[]) { + + execvp(argv[0], argv); + err(EX_OSERR, "executing %s", argv[0]); +} + +static __dead2 void +raisesignal (int sig) { + + setsig_fatal_gen(sig, SIG_DFL, 0, "restoring"); + raise(sig); + while (1) + pause(); + /* NOTREACHED */ +} + +int +main(int argc, char *argv[]) { + pid_t pid; + int status; + + init(argc, argv); + argc -= optind; + argv += optind; + pid = doit(argv); + + if (waitpid(pid, &status, 0) == -1) + err(EX_OSERR, "could not get the exit status for process %ld", + (long)pid); + if (WIFEXITED(status)) + return (WEXITSTATUS(status)); + else if (!WIFSIGNALED(status)) + return (EX_OSERR); + if (propagate) + raisesignal(WTERMSIG(status)); + else + return (WTERMSIG(status) + 128); +} diff --git a/src/version.h b/src/version.h new file mode 100644 index 00000000..1de21b87 --- /dev/null +++ b/src/version.h @@ -0,0 +1,193 @@ +#ifndef VERSION_H +#define VERSION_H +#undef VERSION + +#define COMMUNITY 1 /* Define to create a Windows community binary */ + +/* Note: there can be only *one* VERSION in this file */ +#define VERSION "9.4.2" +#define BDATE "04 February 2019" +#define LSMDATE "04Feb19" + +#define RELEASE 1 /* Use ONLY in rpms */ + +#define PROG_COPYRIGHT "Copyright (C) %d-2019 Kern Sibbald.\n" +#define BYEAR "2019" /* year for copyright messages in progs */ + +/* + * Versions of packages needed to build Bacula components + */ +#define DEPKGS_QT_VERSION "01Jan13" +#define DEPKGS_VERSION "10Oct18" + +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +/* Debug flags */ +#undef DEBUG +#define DEBUG 1 +#define TRACEBACK 1 +#define TRACE_FILE 1 +#define ENTER_LEAVE 1 + +/* If this is set stdout will not be closed on startup */ +/* #define DEVELOPER 1 */ + +/* adjust DEVELOPER_MODE for status command */ +#ifdef DEVELOPER +# define DEVELOPER_MODE 1 +#else +# define DEVELOPER_MODE 0 +#endif + +/* + * SMCHECK does orphaned buffer checking (memory leaks) + * it can always be turned on, but has some minor performance + * penalties. + */ +#ifdef DEVELOPER +# define SMCHECK +#endif + +/* + * _USE_LOCKMGR does lock/unlock mutex tracking (dead lock) + * it can always be turned on, but we advise to use it only + * for debug + */ +# ifndef _USE_LOCKMGR +# define _USE_LOCKMGR +# endif /* _USE_LOCKMGR */ +/* + * Enable priority management with the lock manager + * + * Note, turning this on will cause the Bacula SD to abort if + * mutexes are executed out of order, which could lead to a + * deadlock. However, note that this is not necessarily a + * deadlock, so turn this on only for debugging. + */ +#define USE_LOCKMGR_PRIORITY + +/* + * Enable thread verification before kill + * + * Note, this extra check have a high cost when using + * dozens of thread, so turn this only for debugging. + */ +/* #define USE_LOCKMGR_SAFEKILL */ + +#if !HAVE_LINUX_OS && !HAVE_SUN_OS && !HAVE_DARWIN_OS && !HAVE_FREEBSD_OS && !HAVE_KFREEBSD_OS +# undef _USE_LOCKMGR +#endif + +/* + * USE_VTAPE is a dummy tape driver. This is useful to + * run regress test. + */ +#ifdef HAVE_LINUX_OS +# define USE_VTAPE +#endif + +/* + * USE_FTP is a ftp driver for the FD using curl. + */ +// #define USE_FTP + +/* + * for fastest speed but you must have a UPS to avoid unwanted shutdowns + */ +//#define SQLITE3_INIT_QUERY "PRAGMA synchronous = OFF" + +/* + * for more safety, but is 30 times slower than above + */ +#define SQLITE3_INIT_QUERY "PRAGMA synchronous = NORMAL" + +/* + * This should always be on. It enables data encryption code + * providing it is configured. + */ +#define DATA_ENCRYPTION 1 + +/* + * This uses a Bacula specific bsnprintf rather than the sys lib + * version because it is much more secure. It should always be + * on. + */ +#define USE_BSNPRINTF 1 + +/* Debug flags not normally turned on */ + +/* #define TRACE_JCR_CHAIN 1 */ +/* #define TRACE_RES 1 */ +/* #define DEBUG_MEMSET 1 */ +/* #define DEBUG_MUTEX 1 */ +/* #define DEBUG_BLOCK_CHECKSUM 1 */ + +#define BDEMO "" + +/* + * Set SMALLOC_SANITY_CHECK to zero to turn off, otherwise + * it is the maximum memory malloced before Bacula will + * abort. Except for debug situations, this should be zero + */ +#define SMALLOC_SANITY_CHECK 0 /* 500000000 0.5 GB max */ + + +/* Check if header of tape block is zero before writing */ +/* #define DEBUG_BLOCK_ZEROING 1 */ + +/* #define FULL_DEBUG 1 */ /* normally on for testing only */ + +/* Turn this on ONLY if you want all Dmsg() to append to the + * trace file. Implemented mainly for Win32 ... + */ +/* #define SEND_DMSG_TO_FILE 1 */ + + +/* The following are turned on for performance testing */ +/* + * If you turn on the NO_ATTRIBUTES_TEST and rebuild, the SD + * will receive the attributes from the FD, will write them + * to disk, then when the data is written to tape, it will + * read back the attributes, but they will not be sent to + * the Director. So this will eliminate: 1. the comm time + * to send the attributes to the Director. 2. the time it + * takes the Director to put them in the catalog database. + */ +/* #define NO_ATTRIBUTES_TEST 1 */ + +/* +* If you turn on NO_TAPE_WRITE_TEST and rebuild, the SD +* will do all normal actions, but will not write to the +* Volume. Note, this means a lot of functions such as +* labeling will not work, so you must use it only when +* Bacula is going to append to a Volume. This will eliminate +* the time it takes to write to the Volume (not the time +* it takes to do any positioning). +*/ +/* #define NO_TAPE_WRITE_TEST 1 */ + +/* + * If you turn on FD_NO_SEND_TEST and rebuild, the FD will + * not send any attributes or data to the SD. This will + * eliminate the comm time sending to the SD. + */ +/* #define FD_NO_SEND_TEST 1 */ + +#endif /* VERSION_H */ diff --git a/src/win32/External-mingw-w64 b/src/win32/External-mingw-w64 new file mode 100644 index 00000000..c16b98b4 --- /dev/null +++ b/src/win32/External-mingw-w64 @@ -0,0 +1,74 @@ +# +# This file provides information about the External dependencies required by +# Bacula. +# +# There are four fields delimited by |. Only the first two fields are +# required. The other two are used when the top level directory of the +# archive is not the same as the file name with any suffixes removed. +# +# Field 1 is the name of the dependency. It is used to define the +# name of the three variables which are assigned the values of fields 2 to 4. +# +# Field 2 is the URL of the archive. It is assigned to the variable +# URL_[field1]. +# +# Field 3 is the top directory of the archive or the name of a directory that +# must be created and the archive extracted into it. It is assigned to the +# variable DIR_[field1]. +# +# Field 4 indicates if the directory specified in field 3 must be created +# first and the archive extracted into it. It is assigned to the variable +# MKD_[field1] +# +CMD_UTILS|https://www.bacula.org/downloads/depkgs-mingw32/cmd-utils-0.1.tar.gz +MT|http://old.bacula.org/downloads/depkgs-mingw32/mt-st-0.9b.tar.gz +MTX|http://www.bacula.org/downloads/depkgs-mingw32/mtx-1.3.9.tar.gz +SCONS|http://www.bacula.org/downloads/depkgs-mingw32/scons-0.96.92.tar.gz +SED|https://www.bacula.org/downloads/depkgs-mingw32/sed-4.2k.tar.gz +LZO|https://www.bacula.org/downloads/depkgs-mingw32/lzo-2.10.tar.gz +ZLIB|https://www.bacula.org/downloads/depkgs-mingw32/zlib-1.2.8.tar.gz +RSYNC|https://www.bacula.org/downloads/depkgs-mingw32/librsync-0.9.7b.tar.gz +OPENSSL|https://www.bacula.org/downloads/depkgs-mingw32/openssl-1.0.2k.tar.gz +PCRE|https://www.bacula.org/downloads/depkgs-mingw32/pcre-6.3.tar.bz2 +PTHREADS|https://www.bacula.org/downloads/depkgs-mingw32/pthreads-w32-2-9-1-release.tar.gz +Qt4|https://www.bacula.org/downloads/depkgs-mingw32/qt-everywhere-opensource-src-4.8.4.tar.gz +MSSQL|https://www.bacula.org/downloads/depkgs-mingw32/depkgs-mssql-08Jun17.tar.gz + +#POSTGRESQL|http://www.bacula.org/depkgs-mingw32/postgresql-base-8.1.4.tar.bz2|postgresql-8.1.4 +#DVD_RW_TOOLS|http://www.bacula.org/depkgs-mingw32/dvd+rw-tools-7.0.tar.gz +#MKISOFS|http://www.bacula.org/depkgs-mingw32/mkisofs.exe +#MYSQL|http://www.bacula.org/depkgs-mingw32/mysql-noinstall-5.0.27-win32.zip|mysql-5.0.27-win32 +#NSIS_BIN|http://www.bacula.org/depkgs-mingw32/nsis-2.17.zip +#NSIS_SRC|http://www.bacula.org/depkgs-mingw32/nsis-2.17-src.tar.bz2 +#SCONS|http://www.bacula.org/depkgs-mingw32/scons-0.96.92.tar.gz +#SQLITE|http://www.bacula.org/depkgs-mingw32/sqlite-3.3.17.tar.gz +#STAB2CV|http://www.bacula.org/depkgs-mingw32/stab2cv-0.1.tar.bz2 +#WX|http://www.bacula.org/depkgs-mingw32/wxWidgets-2.8.7.tar.gz +#DB|http://www.bacula.org/depkgs-mingw32/db-4.7.25.tar.gz +# +# +# Original file locations +# +# CDRTOOLS|ftp://ftp.berlios.de/pub/cdrecord/alpha/cdrtools-2.01.01a22.tar.bz2 +# CMD_UTILS|http://superb-west.dl.sourceforge.net/sourceforge/cmd-utils/cmd-utils-0.1.tar.gz +# DVD_RW_TOOLS|http://fy.chalmers.se/~appro/linux/DVD+RW/tools/dvd+rw-tools-7.0.tar.gz +# MKISOFS|http://fy.chalmers.se/~appro/linux/DVD+RW/tools/win32/mkisofs.exe +# MT|http://www.ibiblio.org/pub/linux/system/backup/mt-st-0.9b.tar.gz +# MTX|http://superb-west.dl.sourceforge.net/sourceforge/mtx/mtx-1.3.9.tar.gz +# MYSQL|http://mirror.x10.com/mirror/mysql/Downloads/MySQL-5.0/mysql-noinstall-5.0.27-win32.zip|mysql-5.0.27-win32 +# NSIS_BIN|http://superb-west.dl.sourceforge.net/sourceforge/nsis/nsis-2.17.zip +# NSIS_SRC|http://superb-west.dl.sourceforge.net/sourceforge/nsis/nsis-2.17-src.tar.bz2 +# OPENSSL|http://www.openssl.org/source/openssl-0.9.8b.tar.gz +# PCRE|http://superb-west.dl.sourceforge.net/sourceforge/pcre/pcre-6.3.tar.bz2 +# POSTGRESQL|ftp://ftp2.us.postgresql.org/postgresql/source/v8.1.4/postgresql-base-8.1.4.tar.bz2|postgresql-8.1.4 +# PTHREADS|ftp://sources.redhat.com/pub/pthreads-win32/pthreads-snap-2004-06-22.tar.gz +# Qt4|ftp://ftp.trolltech.com/qt/source/qt-win-opensource-src-4.3.0.zip +# SCONS|http://superb-west.dl.sourceforge.net/sourceforge/scons/scons-0.96.92.tar.gz +# SED|ftp://mirrors.kernel.org/gnu/sed/sed-4.1.5.tar.gz +# SQLITE|http://www.sqlite.org/sqlite-3.3.8.tar.gz +# SQLITE|http://www.sqlite.org/sqlite-3.3.17.tar.gz +# STAB2CV|http://superb-west.dl.sourceforge.net/sourceforge/stab2cv/stab2cv-0.1.tar.bz2 +# WX|http://superb-west.dl.sourceforge.net/sourceforge/wxwindows/wxWidgets-2.7.0.tar.gz +# ZLIB|http://www.zlib.net/zlib-1.2.3.tar.gz +# DB|http://download.oracle.com/berkeley-db/db-4.7.25.tar.gz +# LZO|http://www.oberhumer.com/opensource/lzo/download/lzo-2.05.tar.gz diff --git a/src/win32/External-mingw32 b/src/win32/External-mingw32 new file mode 100644 index 00000000..ff6f55f5 --- /dev/null +++ b/src/win32/External-mingw32 @@ -0,0 +1,70 @@ +# +# This file provides information about the External dependencies required by +# Bacula. +# +# There are four fields delimited by |. Only the first two fields are +# required. The other two are used when the top level directory of the +# archive is not the same as the file name with any suffixes removed. +# +# Field 1 is the name of the dependency. It is used to define the +# name of the three variables which are assigned the values of fields 2 to 4. +# +# Field 2 is the URL of the archive. It is assigned to the variable +# URL_[field1]. +# +# Field 3 is the top directory of the archive or the name of a directory that +# must be created and the archive extracted into it. It is assigned to the +# variable DIR_[field1]. +# +# Field 4 indicates if the directory specified in field 3 must be created +# first and the archive extracted into it. It is assigned to the variable +# MKD_[field1] +# +CMD_UTILS|https://www.bacula.org/downloads/depkgs-mingw32/cmd-utils-0.1.tar.gz +MT|https://www.bacula.org/downloads/depkgs-mingw32/mt-st-0.9b.tar.gz +MTX|https://www.bacula.org/downloads/depkgs-mingw32/mtx-1.3.9.tar.gz +MYSQL|https://www.bacula.org/downloads/depkgs-mingw32/mysql-noinstall-5.0.27-win32.zip|mysql-5.0.27-win32 +NSIS_BIN|https://www.bacula.org/downloads/depkgs-mingw32/nsis-2.17.zip +NSIS_SRC|https://www.bacula.org/downloads/depkgs-mingw32/nsis-2.17-src.tar.bz2 +OPENSSL|https://www.bacula.org/downloads/depkgs-mingw32/openssl-1.0.2k.tar.gz +PCRE|https://www.bacula.org/downloads/depkgs-mingw32/pcre-6.3.tar.bz2 +POSTGRESQL|https://www.bacula.org/downloads/depkgs-mingw32/postgresql-base-8.1.4.tar.bz2|postgresql-8.1.4 +PTHREADS|https://www.bacula.org/downloads/depkgs-mingw32/pthreads-w32-2-9-1-release.tar.gz +Qt4|https://www.bacula.org/downloads/depkgs-mingw32/qt-everywhere-opensource-src-4.8.4.tar.gz +SCONS|https://www.bacula.org/downloads/depkgs-mingw32/scons-0.96.92.tar.gz +SED|https://www.bacula.org/downloads/depkgs-mingw32/sed-4.2k.tar.gz +STAB2CV|https://www.bacula.org/downloads/depkgs-mingw32/stab2cv-0.1.tar.bz2 +WX|https://www.bacula.org/downloads/depkgs-mingw32/wxWidgets-2.8.7.tar.gz +ZLIB|https://www.bacula.org/downloads/depkgs-mingw32/zlib-1.2.8.tar.gz +RSYNC|https://www.bacula.org/downloads/depkgs-mingw32/librsync-0.9.7b.tar.gz +DB|https://www.bacula.org/downloads/depkgs-mingw32/db-4.7.25.tar.gz +LZO|https://www.bacula.org/downloads/depkgs-mingw32/lzo-2.10.tar.gz +MSSQL|https://www.bacula.org/downloads/depkgs-mingw32/depkgs-mssql-08Jun17.tar.gz +# +#DVD_RW_TOOLS|https://www.bacula.org/downloads/depkgs-mingw32/dvd+rw-tools-7.0.tar.gz +# +# Original file locations +# +# CDRTOOLS|ftp://ftp.berlios.de/pub/cdrecord/alpha/cdrtools-2.01.01a22.tar.bz2 +# CMD_UTILS|http://superb-west.dl.sourceforge.net/sourceforge/cmd-utils/cmd-utils-0.1.tar.gz +# DVD_RW_TOOLS|http://fy.chalmers.se/~appro/linux/DVD+RW/tools/dvd+rw-tools-7.0.tar.gz +# MKISOFS|http://fy.chalmers.se/~appro/linux/DVD+RW/tools/win32/mkisofs.exe +# MT|http://www.ibiblio.org/pub/linux/system/backup/mt-st-0.9b.tar.gz +# MTX|http://superb-west.dl.sourceforge.net/sourceforge/mtx/mtx-1.3.9.tar.gz +# MYSQL|http://mirror.x10.com/mirror/mysql/Downloads/MySQL-5.0/mysql-noinstall-5.0.27-win32.zip|mysql-5.0.27-win32 +# NSIS_BIN|http://superb-west.dl.sourceforge.net/sourceforge/nsis/nsis-2.17.zip +# NSIS_SRC|http://superb-west.dl.sourceforge.net/sourceforge/nsis/nsis-2.17-src.tar.bz2 +# OPENSSL|http://www.openssl.org/source/openssl-0.9.8b.tar.gz +# PCRE|http://superb-west.dl.sourceforge.net/sourceforge/pcre/pcre-6.3.tar.bz2 +# POSTGRESQL|ftp://ftp2.us.postgresql.org/postgresql/source/v8.1.4/postgresql-base-8.1.4.tar.bz2|postgresql-8.1.4 +# PTHREADS|ftp://sources.redhat.com/pub/pthreads-win32/pthreads-snap-2004-06-22.tar.gz +# Qt4|ftp://ftp.trolltech.com/qt/source/qt-win-opensource-src-4.3.0.zip +# SCONS|http://superb-west.dl.sourceforge.net/sourceforge/scons/scons-0.96.92.tar.gz +# SED|ftp://mirrors.kernel.org/gnu/sed/sed-4.1.5.tar.gz +# SQLITE|http://www.sqlite.org/sqlite-3.3.8.tar.gz +# SQLITE|http://www.sqlite.org/sqlite-3.3.17.tar.gz +# STAB2CV|http://superb-west.dl.sourceforge.net/sourceforge/stab2cv/stab2cv-0.1.tar.bz2 +# WX|http://superb-west.dl.sourceforge.net/sourceforge/wxwindows/wxWidgets-2.7.0.tar.gz +# ZLIB|http://www.zlib.net/zlib-1.2.3.tar.gz +# DB|http://download.oracle.com/berkeley-db/db-4.7.25.tar.gz +# LZO|http://www.oberhumer.com/opensource/lzo/download/lzo-2.05.tar.gz diff --git a/src/win32/External-msvc b/src/win32/External-msvc new file mode 100644 index 00000000..8808128e --- /dev/null +++ b/src/win32/External-msvc @@ -0,0 +1,59 @@ +# This file provides information about the External dependencies required by +# Bacula. +# +# There are four fields delimited by |. Only the first two fields are +# required. The other two are used when the top level directory of the +# archive is not the same as the file name with any suffixes removed. +# +# Field 1 is the name of the dependency. It is used to define the +# name of the three variables which are assigned the values of fields 2 to 4. +# +# Field 2 is the URL of the archive. It is assigned to the variable +# URL_[field1]. +# +# Field 3 is the top directory of the archive or the name of a directory that +# must be created and the archive extracted into it. It is assigned to the +# variable DIR_[field1]. +# +# Field 4 indicates if the directory specified in field 3 must be created +# first and the archive extracted into it. It is assigned to the variable +# MKD_[field1] +# +# +CMD_UTILS|http://www.bacula.org/depkgs-mingw32/cmd-utils-0.1.tar.gz +DVD_RW_TOOLS|http://www.bacula.org/depkgs-mingw32/dvd+rw-tools-7.0.tar.gz +MKISOFS|http://www.bacula.org/depkgs-mingw32/mkisofs.exe +MT|http://www.bacula.org/depkgs-mingw32/mt-st-0.9b.tar.gz +MTX|http://www.bacula.org/depkgs-mingw32/mtx-1.3.9.tar.gz +MYSQL|http://www.bacula.org/depkgs-mingw32/mysql-noinstall-5.0.27-win32.zip|mysql-5.0.27-win32 +NSIS_BIN|http://www.bacula.org/depkgs-mingw32/nsis-2.17.zip +OPENSSL|http://www.bacula.org/depkgs-mingw32/openssl-0.9.8b.tar.gz +PCRE|http://www.bacula.org/depkgs-mingw32/pcre-6.3.tar.bz2 +POSTGRESQL|http://www.bacula.org/depkgs-mingw32/postgresql-8.1.9-1-binaries-no-installer.zip|pgsql +POSTGRESQL_SRC|http://www.bacula.org/depkgs-mingw32/postgresql-8.1.9.tar.bz2 +PTHREADS|http://www.bacula.org/depkgs-mingw32/pthreads-2004-06-22x.exe|pthreads-2004-06-22x|true +SED|http://www.bacula.org/depkgs-mingw32/sed-4.1.5.tar.gz +SQLITE|http://www.bacula.org/depkgs-mingw32/sqlite-3.3.17.tar.gz +WX|http://www.bacula.org/depkgs-mingw32/wxWidgets-2.7.0.zip +ZLIB|http://www.bacula.org/depkgs-mingw32/zlib123.zip|zlib123|true +# +# +# Original locations +# +# ZLIB|http://www.zlib.net/zlib123.zip|zlib123|true +# PCRE|http://superb-west.dl.sourceforge.net/sourceforge/pcre/pcre-6.3.tar.bz2 +# PTHREADS|ftp://sources.redhat.com/pub/pthreads-win32/pthreads-2004-06-22x.exe|pthreads-2004-06-22x|true +# OPENSSL|http://www.openssl.org/source/openssl-0.9.8b.tar.gz +# MYSQL|http://mirror.x10.com/mirror/mysql/Downloads/MySQL-5.0/mysql-noinstall-5.0.27-win32.zip|mysql-5.0.27-win32 +# POSTGRESQL|ftp://ftp2.us.postgresql.org/postgresql/binary/v8.1.6/win32/postgresql-8.1.6-1-binaries-no-installer.zip|pgsql +# POSTGRESQL_SRC|ftp://ftp4.us.postgresql.org/pub/postgresql/source/v8.1.6/postgresql-8.1.6.tar.bz2 +# SQLITE|http://www.sqlite.org/sqlite-3.3.8.tar.gz +# WX|http://superb-west.dl.sourceforge.net/sourceforge/wxwindows/wxWidgets-2.7.0.zip +# NSIS_BIN|http://superb-west.dl.sourceforge.net/sourceforge/nsis/nsis-2.17.zip +# MTX|http://superb-west.dl.sourceforge.net/sourceforge/mtx/mtx-1.3.9.tar.gz +# MT|ftp://ftp.ibiblio.org/pub/linux/system/backup/mt-st-0.9b.tar.gz +# SED|ftp://mirrors.kernel.org/gnu/sed/sed-4.1.5.tar.gz +# CMD_UTILS|http://superb-west.dl.sourceforge.net/sourceforge/cmd-utils/cmd-utils-0.1.tar.gz +# #CDRTOOLS|ftp://ftp.berlios.de/pub/cdrecord/alpha/cdrtools-2.01.01a22.tar.bz2 +# MKISOFS|http://fy.chalmers.se/~appro/linux/DVD+RW/tools/win32/mkisofs.exe +# DVD_RW_TOOLS|http://fy.chalmers.se/~appro/linux/DVD+RW/tools/dvd+rw-tools-7.0.tar.gz diff --git a/src/win32/Makefile b/src/win32/Makefile new file mode 100644 index 00000000..22626d45 --- /dev/null +++ b/src/win32/Makefile @@ -0,0 +1,168 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +# Makefile for Win32/Win64 Bacula File daemon +# +# If called make win64=yes the Win64 version will be built +# otherwise the Win32 version will be built. +# +# If called with bat=no, bat will not be built, otherwise it will be +# built. +# + +ECHO_CMD=@ + +WIN32_DIRS=lib filed filed/plugins console scripts stored +WIN64_DIRS=lib filed filed/plugins console scripts stored + +ifeq ($(win64),yes) + DIRS=$(WIN64_DIRS) + INSTALLER=win64_installer + SPECIAL=bat64 bsmtp +else + DIRS=$(WIN32_DIRS) + INSTALLER=win32_installer + SPECIAL=bat32 bsmtp +endif + +ifeq ($(bat),no) + SPECIAL=bsmtp +endif + +.PHONY: $(DIRS) clean all Makefile.inc win32_installer full_win32_installer \ + win64_installer bsmtp distclean + +all: Makefile.inc $(DIRS) $(SPECIAL) $(INSTALLER) + +distclean: clean + +clean: $(DIRS) win32_installer win64_installer + $(MAKE) -C tools clean + $(ECHO_CMD)-rm -rf release32 release64 + $(ECHO_CMD)-rm -rf ../qt-console/obj32 ../qt-console/obj64 + $(ECHO_CMD)-rm -f ../qt-console/release/bat.exe ../qt-console/debug/bat.exe + $(ECHO_CMD)-rm -rf ../qt-console/tray-monitor/obj32 ../qt-console/tray-monitor/obj64 + $(ECHO_CMD)-rm -f ../qt-console/tray-monitor/release/bacula-tray-monitor.exe ../qt-console/tray-monitor/debug/bacula-tray-monitor.exe + +is_depkgs_set: + @if test -z $${DEPKGS} ; then \ + echo "variable DEPKGS not set" ; \ + echo "You probably want DEPKGS=\"`(cd ../../..;pwd)`\"" ; \ + exit 1; \ + fi ; \ + +release32/bat.exe: is_depkgs_set + ( cd ../qt-console; ./make-win32 "32" ) + test -f release32/bat.exe + +bat32: release32/bat.exe + +release64/bat.exe: is_depkgs_set + ( cd ../qt-console; ./make-win32 "64" ) + test -f release64/bat.exe + +bat64: release64/bat.exe + +full_win32_installer: + @if test -f Makefile.inc; then \ + if $(MAKE) -C $@ $(MAKECMDGOALS); then \ + echo "\n===== Make of $@ succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of $@ failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + +win32_installer: + echo "Making 32 bit version" + @if test -f Makefile.inc; then \ + if $(MAKE) -C $@ $(MAKECMDGOALS); then \ + echo "\n===== Make of $@ succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of $@ failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + +win64_installer: + echo "Making 64 bit version" + @if test -f Makefile.inc; then \ + if $(MAKE) -C $@ $(MAKECMDGOALS); then \ + echo "\n===== Make of $@ succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of $@ failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + + +$(DIRS): + @if test -f Makefile.inc; then \ + if $(MAKE) -C $@ $(MAKECMDGOALS); then \ + echo "\n===== Make of $@ succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of $@ failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + +bsmtp: + @if test -f Makefile.inc; then \ + if $(MAKE) -C tools bsmtp $(MAKECMDGOALS); then \ + echo "\n===== Make of tools/bsmtp succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of tools/bsmtp failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + + + +Makefile.inc: + @echo Creating $@ + $(ECHO_CMD)TOPDIR=`(cd ../../..;pwd)`; \ + TOPDIR=$${DEPKGS:-$${TOPDIR}}; \ + if test "$(win64)" = yes -a -e $${TOPDIR}/cross-tools/mingw-w64/bin/x86_64-pc-mingw32-gcc; then \ + BINDIR=$${TOPDIR}/cross-tools/mingw-w64/bin; \ + INCDIR=$${TOPDIR}/cross-tools/mingw-w64/x86_64-pc-mingw32/include; \ + DLLDIR=$${TOPDIR}/cross-tools/mingw-w64/x86_64-pc-mingw32/bin; \ + DEPKGSDIR=depkgs-mingw-w64; \ + DEPKGSDIR32=depkgs-mingw32; \ + MINGWBASE=x86_64-pc-mingw32; \ + elif test -e $${TOPDIR}/cross-tools/mingw32/bin/mingw32-gcc; then \ + BINDIR=$${TOPDIR}/cross-tools/mingw32/bin; \ + INCDIR=$${TOPDIR}/cross-tools/mingw32/mingw32/include; \ + DLLDIR=$${TOPDIR}/cross-tools/mingw32/mingw32/bin; \ + DEPKGSDIR=depkgs-mingw32; \ + DEPKGSDIR32=depkgs-mingw32; \ + MINGWBASE=mingw32; \ + elif test "$(win64)" = yes && which x86_64-w64-mingw32-g++ ; then \ + BINDIR=; \ + INCDIR=; \ + DLLDIR=; \ + DEPKGSDIR=depkgs-mingw-w64; \ + DEPKGSDIR32=depkgs-mingw32; \ + MINGWBASE=x86_64-w64-mingw32; \ + elif which i686-w64-mingw32-g++ ; then \ + BINDIR=; \ + INCDIR=; \ + DLLDIR=; \ + DEPKGSDIR=depkgs-mingw32; \ + DEPKGSDIR32=depkgs-mingw32; \ + MINGWBASE=i686-w64-mingw32; \ + else \ + echo "\nThe GCC cross compiler isn't installed."; \ + echo "You must run build-win32-cross-tools and build-dependencies first.\n"; \ + exit 1; \ + fi ; \ + BUILDDIR=`(pwd)`; \ + MAINDIR=`(cd ../..;pwd)`; \ + sed \ + -e "s^@MINGWBASE@^$${MINGWBASE}^" \ + -e "s^@WIN64@^$${win64}^" \ + -e "s^@BAT@^$${bat}^" \ + -e "s^@WIN32DEPKGS@^$${DEPKGSDIR}^" \ + -e "s^@WIN32DEPKGS32@^$${DEPKGSDIR32}^" \ + -e "s^@WIN32BUILDDIR@^$${BUILDDIR}^" \ + -e "s^@WIN32MAINDIR@^$${MAINDIR}^" \ + -e "s^@WIN32TOPDIR@^$${TOPDIR}^" \ + -e "s^@WIN32BINDIR@^$${BINDIR}^" \ + -e "s^@WIN32INCDIR@^$${INCDIR}^" \ + -e "s^@WIN32DLLDIR@^$${DLLDIR}^" < Makefile.inc.in > $@ diff --git a/src/win32/Makefile.full b/src/win32/Makefile.full new file mode 100644 index 00000000..b28828e7 --- /dev/null +++ b/src/win32/Makefile.full @@ -0,0 +1,113 @@ +ECHO_CMD=@ + +WIN32_DIRS=lib cats filed filed/plugins dird stored \ + console wx-console tools scripts + +WIN64_DIRS=lib filed filed/plugins console scripts + +ifeq ($(WIN64),yes) + DIRS=$(WIN64_DIRS) + INSTALLER=win64_installer + SPECIAL=bat64 bsmtp +else + DIRS=$(WIN32_DIRS) + INSTALLER=win32_installer + SPECIAL=bat32 +endif + +.PHONY: $(DIRS) clean all Makefile.inc win32_installer full_win32_installer \ + win64_installer bsmtp + +all: Makefile.inc $(DIRS) $(SPECIAL) $(INSTALLER) + +clean: $(DIRS) win32_installer win64_installer + $(ECHO_CMD)-rm -rf release32 release64 + +bat32: + ( cd ../qt-console; ./make-win32 "32" ) + +bat64: + ( cd ../qt-console; ./make-win32 "64" ) + + +full_win32_installer: + @if test -f Makefile.inc; then \ + if $(MAKE) -C $@ $(MAKECMDGOALS); then \ + echo "\n===== Make of $@ succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of $@ failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + +win32_installer: + @if test -f Makefile.inc; then \ + if $(MAKE) -C $@ $(MAKECMDGOALS); then \ + echo "\n===== Make of $@ succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of $@ failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + +win64_installer: + @if test -f Makefile.inc; then \ + if $(MAKE) -C $@ $(MAKECMDGOALS); then \ + echo "\n===== Make of $@ succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of $@ failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + + +$(DIRS): + @if test -f Makefile.inc; then \ + if $(MAKE) -C $@ $(MAKECMDGOALS); then \ + echo "\n===== Make of $@ succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of $@ failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + +bsmtp: + @if test -f Makefile.inc; then \ + if $(MAKE) -C tools bsmtp $(MAKECMDGOALS); then \ + echo "\n===== Make of tools/bsmtp succeeded =====\n\n" ; \ + else \ + echo "\n!!!!! Make of tools/bsmtp failed !!!!!\n\n" ; exit 1; \ + fi ; \ + fi + + + +Makefile.inc: + @echo Creating $@ + $(ECHO_CMD)TOPDIR=`(cd ../../..;pwd)`; \ + TOPDIR=$${DEPKGS:-$${TOPDIR}}; \ + if test "$(WIN64)" = yes -a -e $${TOPDIR}/cross-tools/mingw-w64/bin/x86_64-pc-mingw32-gcc; then \ + BINDIR=$${TOPDIR}/cross-tools/mingw-w64/bin; \ + INCDIR=$${TOPDIR}/cross-tools/mingw-w64/x86_64-pc-mingw32/include; \ + DLLDIR=$${TOPDIR}/cross-tools/mingw-w64/x86_64-pc-mingw32/bin; \ + DEPKGSDIR=depkgs-mingw-w64; \ + DEPKGSDIR32=depkgs-mingw32; \ + elif test -e $${TOPDIR}/cross-tools/mingw32/bin/mingw32-gcc; then \ + BINDIR=$${TOPDIR}/cross-tools/mingw32/bin; \ + INCDIR=$${TOPDIR}/cross-tools/mingw32/mingw32/include; \ + DLLDIR=$${TOPDIR}/cross-tools/mingw32/mingw32/bin; \ + DEPKGSDIR=depkgs-mingw32; \ + DEPKGSDIR32=depkgs-mingw32; \ + else \ + echo "\nThe GCC cross compiler isn't installed."; \ + echo "You must run build-win32-cross-tools and build-dependencies first.\n"; \ + exit 1; \ + fi ; \ + BUILDDIR=`(pwd)`; \ + MAINDIR=`(cd ../..;pwd)`; \ + sed \ + -e "s^@WIN64@^$${WIN64}^" \ + -e "s^@WIN32DEPKGS@^$${DEPKGSDIR}^" \ + -e "s^@WIN32DEPKGS32@^$${DEPKGSDIR32}^" \ + -e "s^@WIN32BUILDDIR@^$${BUILDDIR}^" \ + -e "s^@WIN32MAINDIR@^$${MAINDIR}^" \ + -e "s^@WIN32TOPDIR@^$${TOPDIR}^" \ + -e "s^@WIN32BINDIR@^$${BINDIR}^" \ + -e "s^@WIN32INCDIR@^$${INCDIR}^" \ + -e "s^@WIN32DLLDIR@^$${DLLDIR}^" < Makefile.inc.in > $@ diff --git a/src/win32/Makefile.inc.in b/src/win32/Makefile.inc.in new file mode 100644 index 00000000..53918abd --- /dev/null +++ b/src/win32/Makefile.inc.in @@ -0,0 +1,161 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Author: Robert Nelson +# Copyright (C), Kern Sibbald 2006-2018 +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Written by Robert Nelson, June 2006 +# +# Absolute paths used in place of relative paths +# Kern Sibbald, October 2008 +# Split Win32 and Win64 objects and binaries. However +# the clean is a significant kludge -- hard coded. +# The object directories are deleted during make clean, +# so don't point them to any source directory. +# + +# Global Configuration + +ECHO_CMD = @ + +# Main Bacula source dir +MAINDIR := @WIN32MAINDIR@ +# +# Build dir -- i.e. normally src/win32 +BUILDDIR := @WIN32BUILDDIR@ +# +# Dir above Bacula where we find depkgs +TOPDIR := @WIN32TOPDIR@ +# +# where we find depkgs +DEPKGS32 := $(TOPDIR)/@WIN32DEPKGS32@ + +DOCDIR := $(TOPDIR)/docs + +MINGW_BASE := @MINGWBASE@ + +MINGW_BIN := @WIN32BINDIR@ +MINGW_INCLUDE := @WIN32INCDIR@ +MINGW_DLLDIR := @WIN32DLLDIR@ + +# Point only to GCC (CC is already used for g++...) +# Used to compile libobk for windows (just need simple gcc) +GCC = $(MINGW_BIN)$(MINGW_BASE)-gcc + +CC = $(MINGW_BIN)$(MINGW_BASE)-g++ $(DEFINES) $(INCLUDES) +CXX = $(MINGW_BIN)$(MINGW_BASE)-g++ $(DEFINES) $(INCLUDES) +AR := $(MINGW_BIN)$(MINGW_BASE)-ar +RANLIB := $(MINGW_BIN)$(MINGW_BASE)-ranlib +WINDRES := $(MINGW_BIN)$(MINGW_BASE)-windres +DLLTOOL := $(MINGW_BIN)$(MINGW_BASE)-dlltool +OBJCPY := $(MINGW_BIN)-objcopy +STAB2CV := $(DEPKGS32)/tools/bin/stab2cv + +WIN64=@WIN64@ + +ifeq ($(WIN64),yes) + DEPKGS := $(TOPDIR)/@WIN32DEPKGS@ + CFLAGS := -g -Wall -mthreads -O3 -fno-strict-aliasing -DHAVE_VSS64 -Wno-unknown-pragmas -Wno-builtin-macro-redefined + LIBS_NETWORK := -lws2_32 + WIN_VERSION := 64 + BINDIR := $(BUILDDIR)/release64 + LIBDIR := $(BUILDDIR)/release64 + OBJDIR := obj64 +else + DEPKGS := $(TOPDIR)/@WIN32DEPKGS32@ +# CFLAGS := -g -Wall -mno-cygwin -m32 -mwin32 -mthreads -O3 -fno-strict-aliasing -Wno-unknown-pragmas + CFLAGS := -g -Wall -m32 -mwin32 -mthreads -O3 -fno-strict-aliasing -Wno-unknown-pragmas -Wno-builtin-macro-redefined + LIBS_NETWORK := -lwsock32 + WIN_VERSION := 32 + BINDIR := $(BUILDDIR)/release32 + LIBDIR := $(BUILDDIR)/release32 + OBJDIR := obj32 +endif + +LDFLAGS := -g -Wall + +ifeq ($(MINGW_BASE),i686-w64-mingw32) + CFLAGS += -DHAVE_MINGW_W64 + WINDRESFLAGS := -DHAVE_MINGW_W64 + LIBS_PTHREADS := $(DEPKGS)/lib/libpthreadGCE2.a + +else ifeq ($(MINGW_BASE),x86_64-w64-mingw32) + CFLAGS += -DHAVE_MINGW_W64 + WINDRESFLAGS := -DHAVE_MINGW_W64 + LIBS_PTHREADS := $(DEPKGS)/lib/libpthreadGCE2.a + +else + # old mingw + LIBS_PTHREADS := $(DEPKGS)/lib/libpthreadGCE.a + LDFLAGS += -mno-cygwin -Wl,--disable-auto-import +endif + +bat=@BAT@ + +ifeq ($(bat),no) + BUILD_BAT=no +else + BUILD_BAT=yes +endif + +OBJDIRS := obj32 obj64 + +INCLUDE_DDK := -I$(MINGW_INCLUDE)/ddk +INCLUDE_BACULA := -I$(MAINDIR)/src -I$(BUILDDIR)/compat +INCLUDE_PTHREADS := -I$(DEPKGS)/include/pthreads +INCLUDE_ZLIB := -I$(DEPKGS)/include +INCLUDE_VSS := -I$(DEPKGS)/vss +INCLUDE_ICONS := -I../libwin32 +INCLUDE_OPENSSL := -I$(DEPKGS)/include +INCLUDE_WX := -I$(DEPKGS)/lib/wx_dll/msw -I$(DEPKGS)/include +INCLUDE_MYSQL := -I$(DEPKGS)/include/mysql +INCLUDE_RSYNC := -I$(DEPKGS)/include + +LIBS_DB := \ + $(DEPKGS)/lib/libdb.a + +LIBS_RSYNC := $(DEPKGS)/lib/librsync.a + +LIBS_ZLIB := \ + $(DEPKGS)/lib/libz.a + +LIBS_LZO := \ + $(DEPKGS)/lib/liblzo2.a + +LIBS_MYSQL := \ + $(DEPKGS)/lib/libmysql.a + +LIBS_POSTGRESQL := \ + $(DEPKGS)/lib/libpq.a + +LIBS_SQLITE := \ + $(DEPKGS)/lib/libsqlite3.a + +LIBS_SSL := \ + $(DEPKGS)/lib/libssl.dll.a + +LIBS_CRYPTO := \ + $(DEPKGS)/lib/libcrypto.dll.a + +LIBS_WX := \ + $(DEPKGS)/lib/wx_dll/libwxmsw28_core.a \ + $(DEPKGS)/lib/wx_dll/libwxbase28.a + +LIBS_CATS := \ + $(LIBDIR)/libcats.a + +LIBS_BACULA := \ + $(LIBDIR)/libbacula.a + +HAVES := \ + -DHAVE_WIN32 \ + -DHAVE_MINGW \ + -DHAVE_ZLIB_H \ + -DHAVE_LZO \ + -DHAVE_LIBZ \ + -DHAVE_CRYPTO \ + -DHAVE_OPENSSL \ + -DHAVE_TLS \ + $(HAVES) diff --git a/src/win32/Makefile.rules b/src/win32/Makefile.rules new file mode 100644 index 00000000..b41d7b5b --- /dev/null +++ b/src/win32/Makefile.rules @@ -0,0 +1,79 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Written by Robert Nelson, June 2006 +# + +define clean_obj + $(ECHO_CMD)rm -f $(1) $(patsubst %.o,%.d,$(filter-out %.res,$(1))) + $(ECHO_CMD)rm -f $(1) $(addsuffix .d.*,$(basename $(1))) +endef + +define clean_exe + $(ECHO_CMD)rm -f $(1) $(addsuffix .dbg,$(basename $(1))) +endef + +define checkdir + @if [ ! -e $(dir $(1)) ]; then mkdir -p $(dir $(1)); fi +endef + +define makedep + @echo "Generating dependencies for $<" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) -MM $(CPPFLAGS) $< > $@.$$$$; \ + sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \ + rm -f $@.$$$$ +endef + +define link_conapp + @echo "Linking $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(CFLAGS) $(LDFLAGS) -mconsole $^ $(1) -o $@ +endef + +define link_winapp + @echo "Linking $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(CFLAGS) $(LDFLAGS) -mwindows $^ $(1) -o $@ +endef + +define makedbg + @echo "Stripping debug info from $@" + $(call checkdir,$@) + $(ECHO_CMD)$(OBJCPY) --only-keep-debug $@ $(addsuffix .dbg,$(basename $@)) ; \ + $(OBJCPY) --strip-debug $@ ; \ + $(OBJCPY) --add-gnu-debuglink=$(addsuffix .dbg,$(basename $@)) $@ +endef + +$(OBJDIR)/%.o: %.c + @echo "Compiling $<" + $(call checkdir, $@) + $(ECHO_CMD)$(CXX) $(CFLAGS) -c $< -o $@ + +$(OBJDIR)/%.o: %.cpp + @echo "Compiling $<" + $(call checkdir, $@) + $(ECHO_CMD)$(CXX) $(CFLAGS) -c $< -o $@ + +$(OBJDIR)/%.res: %.rc + @echo "Compiling $<" + $(call checkdir, $@) + $(ECHO_CMD)$(WINDRES) $(WINDRESFLAGS) -O coff $< -o $@ + +$(OBJDIR)/%.d: %.c + $(call checkdir, $@) + $(makedep) + +$(OBJDIR)/%.d: %.cpp + $(call checkdir, $@) + $(makedep) + +ifneq ($(ALL_OBJS),) +ifneq ($(MAKECMDGOALS),clean) +include $(patsubst %.o,%.d,$(filter-out %.res,$(ALL_OBJS))) +endif +endif diff --git a/src/win32/README.mingw b/src/win32/README.mingw new file mode 100644 index 00000000..ffbbf50c --- /dev/null +++ b/src/win32/README.mingw @@ -0,0 +1,376 @@ + +Instructions for cross compiling the Win32/64 FD on Linux +======================================================= +This part of the file documents the tools (scripts) we use for building Bacula for +Microsoft Windows using the cross-compiler tools on a Linux system. We +use Ubuntu 14.04 so building on that system should definitely work. But there +shouldn't be any issues on other Linux distributions. + +We don't officially support this method, but it is what we use, and it should +build on any Linux machine if you carefully follow the instructions and have +all the prerequisite programs loaded on your machine. + +We expect that there may be problems on systems other than Linux where you +are pretty much on your own. However, we will try to provide responses to +your questions on the bacula-devel list, but we can't guarantee anything. + +Note: the environment variable DEPKGS must point to a directory that +contains: depkgs-mingw32 depkgs-mingw-w64 and cross-tools + +Directory Structure +===================== +The new directory structure is: + + xxx (any directory) + bacula Top level Bacula source directory -- any name + src + win32 Main directory where Windows version is built. + + docs Top level Bacula documentation directory + + yyy (any directory, but pointed to by the DEPKGS environment variable) + depkgs-mingw32 (MinGW32) 3rd Party Dependencies for MinGW32 build + bin -- NOTE! depkgs-msvc is no longer suppored + include | + lib | + man | Created by script + nsis |-- .../bacula/src/win32/build-depkgs-mingw32 + scons | + share | + src | + ssl -- + vss + inc A copy of the Windows VSS/inc directory + + + depkgs-mingw-w64 (MinGW64) 3rd Party Dependencies for MinGW64 build + bin -- + include | + lib | + man | Created by script + nsis |-- .../bacula/src/win32/build-depkgs-mingw-w64 + scons | + share | + src | + ssl -- + vss + inc A copy of the Windows VSS/inc directory + +One-time Setup +============== + +If you're reading this file you've probably already cloned the GIT source tree or +extracted the contents of the source tar. If not you need to do that first. + +You also need to download one of the doc tar balls and extract to your +top level Bacula directory. + +The extracted doc directory name will be bacula-docs-version where version +is the version number. The directory must be renamed to docs (ie remove +the leading bacula- and the -version portion of the name). + +An alternative to setting up the old documents that are needed by the Win32 +installer, you may comment out the following lines in +src/win32/win32_installer/Makefile: + +#DOC_FILES := \ +# manual/bacula.pdf \ +# manual/bacula/*.html \ +# manual/bacula/*.png \ +# manual/bacula/*.css + +Note, a number of packages must be installed to build the the depkgs files. +Most are rather standard such as gcc, g++, +make, ... However a few that you may not have are: + + wget + texinfo + flex + bison + patch (Debian) + m4 + postgresql (at least client) + mysql (at least client) + SQLite3 (from depkgs or as package) + readline (readlineN-dev on Debian) + ... + +NB: On Debian, I had to remove /usr/bin/lorder for +postresql to build correctly. + +Install the cross compiler and makensis + apt-get install g++-mingw-w64-i686 g++-mingw-w64-x86-64 gcc-mingw-w64-i686 gcc-mingw-w64-x86-64 nsis + +For OpenSSL 1.0.2n, I have commented out the fstat() definition in /usr/share/mingw-w64/include/sys/stat.h +with #ifdef 0 / #endif + +Download and build the 3rd party dependencies + ./build-depkgs-mingw32 + ./build-depkgs-mingw64 + +Files are also available on bsweb:/var/www/dl/Depkgs-mingw32-6.6-paa9aiMa/ + + +Make sure that libgcc and libstdc++ mingw files are copied to depkgs-mingw32/bin and depkgs-mingw-w64/bin + /usr/lib/gcc/i686-w64-mingw32/*-posix/libgcc_s_sjlj-1.dll + /usr/lib/gcc/i686-w64-mingw32/*-posix/libstdc++-6.dll + /usr/i686-w64-mingw32/lib/libwinpthread-1.dll + + /usr/lib/gcc/x86_64-w64-mingw32/*-posix/libgcc_s_seh-1.dll + /usr/lib/gcc/x86_64-w64-mingw32/*-posix/libstdc++-6.dll + /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll + +Note, that during the build process you will see a moderate amount of +output and some warnings. If something more serious happens +and the build fails, it is probably because you don't have one +of the build dependencies (hopefully all mentioned above) loaded on your +system. To find out what is going wrong, do the following: + + cd .../depkgs-mingw32/src/ + +where is where the package is unpacked and built. Normally +it is relatively obvious when looking at the src directory. + +In that directory, you should find a make.log, which has the full details +of the compiles, links, and installs done for that package. + +=== + +For the QT part, you must download QT binaries from +QT website + +the http://download.qt-project.org/archive/qt/4.8/4.8.4/ + +Then, copy Qt*dll files to depkgs-mingw32/bin and headers to +depkgs-mingw32/include + +=== + +See External-mingw-w64 for download location + +You need the header files from the Microsoft VSS SDK. Unfortunately the SDK +can only be downloaded and installed on a Windows system. We do not have +the right to distribute it, so you must download it yourself. +You can find it on Microsoft's web-site at: + +http://www.microsoft.com/downloads/details.aspx?FamilyID=0b4f56e4-0ccc-4626-826a-ed2c4c95c871&DisplayLang=en + +If that link doesn't work then go to http://www.microsoft.com and search for + + "download volume shadow copy service sdk" + +we are currently using version 7.2 released 8/3/2005 (a bit old, but it +works). + +Normally the files will be installed in: + + c:\Program Files\Microsoft\VSSSDK72 + +You only need to copy everything under the c:\Program Files\Microsoft\VSSSDK72\inc +directory into .../depkgs-mingw32/vss/inc. and .../depkgs-mingw-w64/vss/inc +In doing so, please ensure that +the case in maintained on the directory and filenames -- some contain uppercase +characters !!! + +Some definitions are already defined in other headers, it produces warnings +and the depkgs-mingw-w64.tar.bz2 archive contains fixes with #ifndef + +The above only needs to be done once unless we change the cross-tools +or the dependencies versions. In general, you can run the script multiple +times with no problem. For it to work, you must have at a minimum the +following: + + gcc + g++ + patch + wget + texinfo + bison + flex + python + unzip + tar + +and possibly other packages. + + +Building +======== + +Finally, to build the Microsoft Windows version of Bacula, do the following: + + cd .../bacula/src/win32 + make clean + ./makeall + +or + cd .../bacula/src/win32 + make clean + make + make win64=yes + +This builds both the 32 bit version and the 64 bit version. +The binaries are in the release32 and release64 directories. + + + +Updating the 3rd party package patches +====================================== + +If one of the patches changes in .../bacula/src/win32/patches, you will +need to update the corresponding dependency. + +Adding a new global function or global data +=========================================== + +bacula.dll +---------- + +The code from the following directories is built into bacula.dll: + + .../bacula/src/lib + .../bacula/src/libfind + .../bacula/src/win32/compat + +A new function or data variable which must be accessed from outside +of bacula.dll requires special treatment. It must be specifically +exported. + +New data variables are exported by adding the macro DLL_IMP_EXP to +the variable declaration in the header file. All exported variables +must be declared in a header file and MUST NOT be declared in a +source file referencing the variable. Example, src/lib/runscript.h: + +extern DLL_IMP_EXP bool (*console_command)(JCR *jcr, const char *cmd); + +or src/jcr.h + +extern int DLL_IMP_EXP num_jobs_run; +extern DLL_IMP_EXP dlist * last_jobs; +... + + +Exporting functions is now more or less automated. If you find that +a function name has been added, changed, or an argument modified, +simply do the following: + + cd .../bacula/src/win32/lib + make (to build the .o files, note the link will fail) + ./make_def >bacula64.def or >bacula64.def + +This should rebuild the bacula.def file, but it uses relative paths +and assumes you have the directory structure noted above. If you +are using something different, you can set the NM variable at the +top of the make_def file to use an absolute path to the correct +directory. + +===== manual changing of bacula32.def or bacula64.def no longer necessary ===== +If you want to do it manually, please see below: +Exporting a function requires a bit more work. You must determine the +C++ mangled name of the new function. + + strings .../bacula/src/win32/lib/.o | grep + +Note, strings often will not show the desired symbol. In that case, +use: + + nm .../bacula/src/win32/lib/.o + +Replace with the base part of the name of the source code file +which contains the new function. Replace with the name of +the new function. Remove the leading underscore and place the result +in the file + + .../bacula/src/win32/lib/bacula64.def +=== end manual changing of bacula64.def ========== + +If you add a new file, you will need to specify its name in + + .../bacula/src/win32/lib/Makefile +and + .../bacula/src/win32/libbac/Makefile + + +Running gdb on the Win32 files +================================================== +You can use the mingw64 gdb to debug Bacula on Win64 by downloading +it from Source Forge: + +http://sourceforge.net/project/showfiles.php?group_id=202880&package_id=311650 + + +Download one of their .exe versions, which is an installer that you +can run on Win32 to install gdb. This gdb is built with mingw64 so will +run independently of any cygwin installation. Note, not all the releases +come with an installer. I had to go back 3 or 4 versions to find it. Otherwise +you can download the source and build it. Thanks to Eric Bollengier for +this tip. + +Build Trial version +=================== + +To build trial version, just add the HAVES=-DBEEF_DEMO_ENABLED=1 environment +variable before compiling everything. + +Structure of the MinGW64/32 build environment +========================================== + +The basic strategy is each Makefile in the various subdirectories includes +Makefile.inc, defines variables and rules specific to what is being built, +then includes Makefile.rules which defines all the rules. + +Makefile.inc defines the locations of all the dependencies and the compiler +and linker flags. It is automatically created from Makefile.inc.in. Any +changes must be made to Makefile.inc.in not Makefile.inc or they will be +overwritten the next time Makefile.inc.in is updated. + +Makefile.rules defines a bunch of macros to simplify building. It also +includes all the basic rules for building objects, GUI and console +executables, etc. + +Makefile.template is a template for creating new Makefiles, if you are +creating a new directory, copy Makefile.template to Makefile in that +directory and edit to suit. + +Upgrading the system mingw +================================== +Every time you upgrade the system mingw, for example when changing from +Ubuntu 12.04 to Ubuntu 14.04, you much update the system library dll +files in your build environment. + +Do so by running from src/win32 + +./build-depkgs-mingw32 mingw +./build-depkgs-mingw-w64 mingw + +All that does is copy the system .dll files into the appropriate +depkgs directory. Then while building the installer, these files +are copied from the depkgs directory into the installer binary. + +Alternatively, you can look at the build-depkgs-xxx script and +manually run the code in the function process_mingw(). + + +Upgrading your depkgs mingw +==================================== + +mkdir old +mv cross-tools old +mv depkgs-mingw32 old +mv depkgs-mingw-w64 old + +# from src/win32 +./build-depkgs-mingw32 +./build-depkgs-mingw-w64 + +# from new depkgs-mingw32 dir +cp ../old/depkgs-mingw32/bin/libgcc_s_dw2-1.dll bin/ +cp ../old/depkgs-mingw32/bin/Qt* bin/ +cp -r ../old/depkgs-mingw32/lib/qt lib +cp -r ../old/depkgs-mingw32/include/qt include/ +cp -r ../old/depkgs-mingw32/include/src include/ +cp -r ../old/depkgs-mingw32/vss . + + +# from new depkgs-mingw-w64 +cp -r ../old/depkgs-mingw-w64/vss . diff --git a/src/win32/README.vc8 b/src/win32/README.vc8 new file mode 100644 index 00000000..c810f9ad --- /dev/null +++ b/src/win32/README.vc8 @@ -0,0 +1,246 @@ +Instructions to build Bacula with Microsoft Visual C++ 2005 +=========================================================== + +NOTE: These instructions are probably quite accurate as several people +have used them. However, the project no longer maintains the files necessary +to build using MSVC. As a consequence, some of the file and scripts may +be out of date, and you will almost surely need to manually update the +MSVC project files. We do not supply any support on this. + +The project uses Mingw to cross-compile. Those files are kept up to date though +during development, they may sometimes be broken for a few days ... + +Using the Express Edition (free version) +---------------------------------------- + Download instructions: + - Visual C++ 2005 Express Edition (2MB + 66MB) + http://msdn.microsoft.com/vstudio/express/visualc/download/ + NOTE: You may want to download the whole CD for offline usage + instead of the web installer, as Microsoft will + start to charge for VC++ one year after the product launch + (launch was in November 2005, see VC++ FAQ). + NOTE: last modifications in order to compile have been tested with + Visual C++ Standard Edition, not with Express Edition, but this shouldn't change anything. + + - Microsoft VSS SDK. You can find it on Microsoft's web-site at: + http://www.microsoft.com/downloads/details.aspx?FamilyID=0b4f56e4-0ccc-4626-826a-ed2c4c95c871&DisplayLang=en + If that link doesn't work then go to http://www.microsoft.com and search for + "download volume shadow copy service sdk" + + - Windows Server 2003 SP1 Platform SDK Full Download (385MB) + http://www.microsoft.com/downloads/details.aspx?FamilyId=A55B6B43-E24F-4EA3-A93E-40C0EC4F68E5&displaylang=en + NOTE: choose "Full Download" version on the bottom of that page. + + Installation instructions: + - Visual C++ Express Edition Beta 2: + + Run vcsetup.exe. + + When asked for Installation Options, only check "Graphical IDE" + (MSDN Library is NOT needed). + + Remember where you install it (e.g. E:\Microsoft Visual Studio 8\) + + - Windows Server 2003 SP1 Platform SDK Full Download + + Run psdk-full.exe, type the directory where you downloaded the cab + files. + + In a command prompt, run "PSDK-full.bat " + (e.g. "PSDK-full.bat E:\temp") + + Run \setup.exe + + When asked for the installation directory, choose + \VC\PlatformSDK (e.g. + E:\Microsoft Visual Studio 8\VC\PlatformSDK\) + + When asked for components, you can safely remove documentation, + samples, and all 64-bit tools and libs if you want to save disk + space. + NOTE: Just after having installed "Windows Server 2003 SP1 Platform SDK Full Download", + my XP was no more considered as "genuine". You are warned. + That's why it is recommended to download "volume shadow copy service sdk" before installing SDK + + - Microsoft VSS SDK + + Normally the files will be installed in: C:\Program Files\Microsoft\VSSSDK72 + + You only need to copy everything under the C:\Program Files\Microsoft\VSSSDK72\inc + directory into .../depkgs-msvc/vss/inc. + + - Verify what you've got in your system variables : + + %INCLUDE% (echo %INCLUDE%) must contain + \VC\include;\VC\PlatformSDK\Include; + + %LIB% (echo %LIB%) must contain + \VC\lib;\VC\PlatformSDK\Lib + +One-time Setup +============== + +If you're reading this file you've probably already enlisted in the CVS +tree or extracted the contents of the source tar. If not you need to do +that first. + +You also need to download one of the doc tar balls and extract to your +top level Bacula directory. It is referred to as bacula-top in the +diagram located in README.win32. It will be signified in this file as ... + +The extracted doc directory name will be bacula-docs-version where +version is the version number. The directory must be renamed to docs +(ie remove the leading bacula- and the trailing -version portion of the +name). + +The script build-depkgs-msvc.cmd is used to download and build all the +third party dependencies required by Bacula. In order to build the +dependencies you need the following utilities. Only the binaries listed +in parenthesis are required. Perl must be listed in the PATH, the other +utilities can either be on the PATH or copied to ...\depkgs-msvc\tools. + + Perl (Normal Install, with binaries in the PATH) + http://downloads.activestate.com/ActivePerl/Windows/5.8/ActivePerl-5.8.8.819-MSWin32-x86-267479.msi + + 7-Zip (7z.exe) + http://prdownloads.sourceforge.net/sevenzip/7za443.zip?download + + NASM (nasmw.exe) + http://prdownloads.sourceforge.net/nasm/nasm-0.98.39-win32.zip?download + + patch (patch.exe) + http://prdownloads.sourceforge.net/gnuwin32/patch-2.5.9-6-bin.zip?download + + sed (sed.exe) Depends on libintl, libiconv + http://prdownloads.sourceforge.net/gnuwin32/sed-4.1.4-bin.zip?download + + tee (tee.exe) Depends on libintl, libiconv + http://prdownloads.sourceforge.net/gnuwin32/coreutils-5.3.0-bin.zip?download + + wget (wget.exe) Depends on libintl, libiconv, openssl + http://prdownloads.sourceforge.net/gnuwin32/wget-1.10.1-bin.zip?download + + libintl (libintl3.dll) Dependency of sed, tee, wget + http://prdownloads.sourceforge.net/gnuwin32/libintl-0.14.4-bin.zip?download + + libiconv (libiconv2.dll) Dependency of sed, tee, wget + http://prdownloads.sourceforge.net/gnuwin32/libiconv-1.9.2-1-bin.zip?download + + openssl (libeay32.dll, ssleay32.dll) Dependency of wget + http://prdownloads.sourceforge.net/gnuwin32/openssl-0.9.7c-bin.zip?download + +Once the utilities are installed continue with the next steps. + +Start VC++ 2005 command prompt (in the start menu) + + cd ...\bacula\src\win32 directory. + +Only if you are using Microsoft Visual C++ Express + + "%VCINSTALLDIR%\PlatformSDK\SetEnv" + +The following step should only need to be done the first time and +whenever the dependencies change. + + build-depkgs-msvc + +At this point all of the dependencies should have been downloaded and +built. +The above only needs to be done once unless we change the cross-tools or +the dependencies versions. In general, you can run the script multiple +times with no problem. + + +Building +======== + +Start VC++ 2005 command prompt (in the start menu) + + cd ...\bacula\src\win32 + +If you are using Microsoft Visual C++ Express + + "%VCINSTALLDIR%\PlatformSDK\SetEnv" + +To build the debug version + + build-msvc "Debug|Win32" + +To build the release version + + build-msvc "Release|Win32" + +To cleanup all the built files + + build-msvc /c + +To cleanup only the debug built files + + build-msvc /c "Debug|Win32" + +To rebuild the debug version + + build-msvc /r "Debug|Win32" + + +Updating the 3rd party package patches +====================================== + +If one of the patches changes in ...\bacula\src\win32\patches, you will +need to update the corresponding dependency. You can install new patches +by doing the following (we assume the patch in question is for openssl). + + cd ...\bacula\src\win32 + set CLOBBER_SOURCE=true + build-depkgs-msvc openssl + set CLOBBER_SOURCE= + +NOTE: Setting CLOBBER_SOURCE=true means that any local changes to the + source code in the .../depkgs-msvc/src directory will be lost. + The source will be reextracted from the archive and the current + patches will be applied. + + +Adding a new global function or global data +=========================================== + +bacula.dll +---------- + +The code from the following directories is built into bacula.dll: + + .../bacula/src/lib + .../bacula/src/libfind + .../bacula/src/win32/compat + +A new function or data variable which must be accessed from outside +of bacula.dll requires special treatment. It must be specifically +exported. + +New data variables are exported by adding the macro DLL_IMP_EXP to +the variable declaration in the header file. All exported variables +must be declared in a header file and MUST NOT be declared in a +source file referencing the variable. + +Exporting a function requires a bit more work. You must determine the +C++ mangled name of the new function. Fortunately it is displayed in +parentheses in the linker error message. Place it in the +.../bacula/src/win32/libbac/bacula.def file. + +In .../bacula/src/win32/libbac/msvc there is a shell "make_def_msvc" that creates +a def file (not really clean way). +In cygwin, one has to type ./make_def_msvc > bacula.def + +bacula_cats.dll +--------------- + +The code from the src/cats directory is built into database provider +specific libraries of the form cats_.dll. + +The database specific portion is mysql for MySQL, pgsql for +PostgreSQL, sqlite for SQLite, and bdb for the built-in Bacula database. + +During installation of the Director, one of the database libraries is +copied to the system and renamed bacula_cats.dll. + +A new function or data variable which must be accessed from outside +of bacula_cats.dll requires special treatment. It must be specifically +exported. + +New data variables and functions are exported by placing their mangled +name in the file .../bacula/src/win32/cats/bacula_cats/bacula_cats.def. + +The mangled name is printed, surrounded by parentheses, in the Linker +error message. + +In .../bacula/src/win32/bacula_cats/ there is a shell "make_def_msvc" that creates +a def file (not really clean way). +In cygwin, one has to type ./make_def_msvc > bacula_cats.def diff --git a/src/win32/bacula.sln b/src/win32/bacula.sln new file mode 100644 index 00000000..a81b5edd --- /dev/null +++ b/src/win32/bacula.sln @@ -0,0 +1,403 @@ + +Microsoft Visual Studio Solution File, Format Version 9.00 +# Visual Studio 2005 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "wx-console", "wx-console\wx-console.vcproj", "{9BA8E10D-0D82-4B25-8543-DE34641FBC10}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "console", "console\console.vcproj", "{A0F65E06-9F18-40AC-81F6-A080852F1104}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fstype", "tools\fstype\fstype.vcproj", "{AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbac", "libbac\libbac.vcproj", "{374BF775-AF68-4A88-814A-48F692DFFE5A}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "drivetype", "tools\drivetype\drivetype.vcproj", "{E5BC5B2E-976D-4DED-AA07-5DD52BF2163F}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "testfind", "tools\testfind\testfind.vcproj", "{558838F9-D792-4F56-AAB2-99C03687C5FF}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "testls", "tools\testls\testls.vcproj", "{28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "baculafd", "filed\baculafd.vcproj", "{6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{825DFFD0-4747-43CA-8326-529655E31935}" + ProjectSection(SolutionItems) = preProject + build-depkgs-mingw32 = build-depkgs-mingw32 + build-msvc.cmd = build-msvc.cmd + build-win32-cross-tools = build-win32-cross-tools + External-mingw32 = External-mingw32 + README.mingw32 = README.mingw32 + README.vc8 = README.vc8 + README.win32 = README.win32 + ..\..\technotes-1.39 = ..\..\technotes-1.39 + EndProjectSection +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Header Files", "Header Files", "{C8301485-CFD1-43D4-827C-8EA050C8E256}" + ProjectSection(SolutionItems) = preProject + ..\baconfig.h = ..\baconfig.h + ..\bacula.h = ..\bacula.h + ..\bc_types.h = ..\bc_types.h + ..\jcr.h = ..\jcr.h + ..\version.h = ..\version.h + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "baculadird", "dird\dird.vcproj", "{D03415F7-654E-42F4-B0E9-CB8FBE3F22FA}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} = {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "baculasd", "stored\baculasd\baculasd.vcproj", "{F5F063F8-11A1-475A-82E2-19759BB40B25}" + ProjectSection(ProjectDependencies) = postProject + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} = {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bcopy", "stored\bcopy\bcopy.vcproj", "{614CE916-0972-4126-9392-CD9FC0ADD7DE}" + ProjectSection(ProjectDependencies) = postProject + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} = {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bextract", "stored\bextract\bextract.vcproj", "{6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA}" + ProjectSection(ProjectDependencies) = postProject + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} = {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bls", "stored\bls\bls.vcproj", "{F8AF7D74-2918-422B-A7B6-4D98566B7160}" + ProjectSection(ProjectDependencies) = postProject + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} = {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bscan", "stored\bscan\bscan.vcproj", "{56EADEDB-FBED-4758-8B54-7B0B47ABDABF}" + ProjectSection(ProjectDependencies) = postProject + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} = {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "btape", "stored\btape\btape.vcproj", "{496415E0-AF44-4AD8-8C99-91B837DDF469}" + ProjectSection(ProjectDependencies) = postProject + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} = {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "storelib", "stored\storelib\storelib.vcproj", "{CAD30B43-D93B-47D5-9161-6A3E9BADCC1D}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} = {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "postest", "stored\postest\postest.vcproj", "{208D3989-794B-47A2-9D04-D7AEE1524078}" + ProjectSection(ProjectDependencies) = postProject + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} = {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Libraries", "Libraries", "{0377E151-3352-487B-A5CF-24BCDC9EC43F}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Test Tools", "Test Tools", "{D6767108-F420-41C0-A834-2E6F487E1AB3}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Daemons", "Daemons", "{B9099DDA-18C9-4DE0-AECB-5D8139EA619F}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Utilities", "Utilities", "{37F903FE-3474-4C93-AD5B-987CB6A92E62}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Consoles", "Consoles", "{C66C8B3B-C156-4498-91E9-CA9A24CF9051}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bacula", "bacula\bacula.vcproj", "{2D729599-C008-4154-BCCB-53E6A260F220}" + ProjectSection(ProjectDependencies) = postProject + {374BF775-AF68-4A88-814A-48F692DFFE5A} = {374BF775-AF68-4A88-814A-48F692DFFE5A} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "cats_mysql", "cats\cats_mysql\cats_mysql.vcproj", "{B52BD53B-0E57-4E9A-A601-8E8171BA1CFC}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} = {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bacula_cats", "cats\bacula_cats\bacula_cats.vcproj", "{8B79A2B5-8889-43D4-9B92-9AE8A6F00413}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "dbcheck", "tools\dbcheck\dbcheck.vcproj", "{85696E20-777A-41F6-BC00-2E7AB375B171}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} = {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "cats_postgresql", "cats\cats_postgresql\cats_postgresql.vcproj", "{2FB961E5-213C-4475-8CB3-72F904D40752}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "scsilist", "tools\scsilist\scsilist.vcproj", "{56D8C233-610E-4EE4-A73A-72CEF1C6A33A}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "installer", "installer\installer.vcproj", "{6D1B0964-FB32-4916-A61C-49D7F715EAD8}" + ProjectSection(ProjectDependencies) = postProject + {A0F65E06-9F18-40AC-81F6-A080852F1104} = {A0F65E06-9F18-40AC-81F6-A080852F1104} + {9BA8E10D-0D82-4B25-8543-DE34641FBC10} = {9BA8E10D-0D82-4B25-8543-DE34641FBC10} + {614CE916-0972-4126-9392-CD9FC0ADD7DE} = {614CE916-0972-4126-9392-CD9FC0ADD7DE} + {85696E20-777A-41F6-BC00-2E7AB375B171} = {85696E20-777A-41F6-BC00-2E7AB375B171} + {E5BC5B2E-976D-4DED-AA07-5DD52BF2163F} = {E5BC5B2E-976D-4DED-AA07-5DD52BF2163F} + {56D8C233-610E-4EE4-A73A-72CEF1C6A33A} = {56D8C233-610E-4EE4-A73A-72CEF1C6A33A} + {23BFE838-5682-4F39-969F-0B40366D4D98} = {23BFE838-5682-4F39-969F-0B40366D4D98} + {B52BD53B-0E57-4E9A-A601-8E8171BA1CFC} = {B52BD53B-0E57-4E9A-A601-8E8171BA1CFC} + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} = {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} + {F8AF7D74-2918-422B-A7B6-4D98566B7160} = {F8AF7D74-2918-422B-A7B6-4D98566B7160} + {374BF775-AF68-4A88-814A-48F692DFFE5A} = {374BF775-AF68-4A88-814A-48F692DFFE5A} + {6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA} = {6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA} + {AB67F297-8491-4515-8E52-BFF5340EC242} = {AB67F297-8491-4515-8E52-BFF5340EC242} + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + {0F56AEB0-14DA-4A80-8962-1F85A17339D0} = {0F56AEB0-14DA-4A80-8962-1F85A17339D0} + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} = {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} + {6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B} = {6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B} + {28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF} = {28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF} + {56EADEDB-FBED-4758-8B54-7B0B47ABDABF} = {56EADEDB-FBED-4758-8B54-7B0B47ABDABF} + {AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E} = {AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E} + {496415E0-AF44-4AD8-8C99-91B837DDF469} = {496415E0-AF44-4AD8-8C99-91B837DDF469} + {2FB961E5-213C-4475-8CB3-72F904D40752} = {2FB961E5-213C-4475-8CB3-72F904D40752} + {D03415F7-654E-42F4-B0E9-CB8FBE3F22FA} = {D03415F7-654E-42F4-B0E9-CB8FBE3F22FA} + {F5F063F8-11A1-475A-82E2-19759BB40B25} = {F5F063F8-11A1-475A-82E2-19759BB40B25} + {558838F9-D792-4F56-AAB2-99C03687C5FF} = {558838F9-D792-4F56-AAB2-99C03687C5FF} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bsleep", "scripts\bsleep.vcproj", "{0F56AEB0-14DA-4A80-8962-1F85A17339D0}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bsmtp", "tools\bsmtp\bsmtp.vcproj", "{AB67F297-8491-4515-8E52-BFF5340EC242}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Scripts", "Scripts", "{40CADEE4-8D53-4157-AA36-B256F4934FC3}" + ProjectSection(SolutionItems) = preProject + build-depkgs-msvc.cmd = build-depkgs-msvc.cmd + scripts\disk-changer.cmd = scripts\disk-changer.cmd + scripts\dvd-handler.cmd = scripts\dvd-handler.cmd + scripts\mtx-changer.cmd = scripts\mtx-changer.cmd + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "cats_sqlite3", "cats\cats_sqlite3\cats_sqlite3.vcproj", "{23BFE838-5682-4F39-969F-0B40366D4D98}" + ProjectSection(ProjectDependencies) = postProject + {2D729599-C008-4154-BCCB-53E6A260F220} = {2D729599-C008-4154-BCCB-53E6A260F220} + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|Win32 = Debug|Win32 + Release|Any CPU = Release|Any CPU + Release|Win32 = Release|Win32 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {9BA8E10D-0D82-4B25-8543-DE34641FBC10}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {9BA8E10D-0D82-4B25-8543-DE34641FBC10}.Debug|Win32.ActiveCfg = Debug|Win32 + {9BA8E10D-0D82-4B25-8543-DE34641FBC10}.Debug|Win32.Build.0 = Debug|Win32 + {9BA8E10D-0D82-4B25-8543-DE34641FBC10}.Release|Any CPU.ActiveCfg = Release|Win32 + {9BA8E10D-0D82-4B25-8543-DE34641FBC10}.Release|Win32.ActiveCfg = Release|Win32 + {9BA8E10D-0D82-4B25-8543-DE34641FBC10}.Release|Win32.Build.0 = Release|Win32 + {A0F65E06-9F18-40AC-81F6-A080852F1104}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {A0F65E06-9F18-40AC-81F6-A080852F1104}.Debug|Win32.ActiveCfg = Debug|Win32 + {A0F65E06-9F18-40AC-81F6-A080852F1104}.Debug|Win32.Build.0 = Debug|Win32 + {A0F65E06-9F18-40AC-81F6-A080852F1104}.Release|Any CPU.ActiveCfg = Release|Win32 + {A0F65E06-9F18-40AC-81F6-A080852F1104}.Release|Win32.ActiveCfg = Release|Win32 + {A0F65E06-9F18-40AC-81F6-A080852F1104}.Release|Win32.Build.0 = Release|Win32 + {AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E}.Debug|Win32.ActiveCfg = Debug|Win32 + {AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E}.Debug|Win32.Build.0 = Debug|Win32 + {AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E}.Release|Any CPU.ActiveCfg = Release|Win32 + {AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E}.Release|Win32.ActiveCfg = Release|Win32 + {AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E}.Release|Win32.Build.0 = Release|Win32 + {374BF775-AF68-4A88-814A-48F692DFFE5A}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {374BF775-AF68-4A88-814A-48F692DFFE5A}.Debug|Win32.ActiveCfg = Debug|Win32 + {374BF775-AF68-4A88-814A-48F692DFFE5A}.Debug|Win32.Build.0 = Debug|Win32 + {374BF775-AF68-4A88-814A-48F692DFFE5A}.Release|Any CPU.ActiveCfg = Release|Win32 + {374BF775-AF68-4A88-814A-48F692DFFE5A}.Release|Win32.ActiveCfg = Release|Win32 + {374BF775-AF68-4A88-814A-48F692DFFE5A}.Release|Win32.Build.0 = Release|Win32 + {E5BC5B2E-976D-4DED-AA07-5DD52BF2163F}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {E5BC5B2E-976D-4DED-AA07-5DD52BF2163F}.Debug|Win32.ActiveCfg = Debug|Win32 + {E5BC5B2E-976D-4DED-AA07-5DD52BF2163F}.Debug|Win32.Build.0 = Debug|Win32 + {E5BC5B2E-976D-4DED-AA07-5DD52BF2163F}.Release|Any CPU.ActiveCfg = Release|Win32 + {E5BC5B2E-976D-4DED-AA07-5DD52BF2163F}.Release|Win32.ActiveCfg = Release|Win32 + {E5BC5B2E-976D-4DED-AA07-5DD52BF2163F}.Release|Win32.Build.0 = Release|Win32 + {558838F9-D792-4F56-AAB2-99C03687C5FF}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {558838F9-D792-4F56-AAB2-99C03687C5FF}.Debug|Win32.ActiveCfg = Debug|Win32 + {558838F9-D792-4F56-AAB2-99C03687C5FF}.Debug|Win32.Build.0 = Debug|Win32 + {558838F9-D792-4F56-AAB2-99C03687C5FF}.Release|Any CPU.ActiveCfg = Release|Win32 + {558838F9-D792-4F56-AAB2-99C03687C5FF}.Release|Win32.ActiveCfg = Release|Win32 + {558838F9-D792-4F56-AAB2-99C03687C5FF}.Release|Win32.Build.0 = Release|Win32 + {28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF}.Debug|Win32.ActiveCfg = Debug|Win32 + {28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF}.Debug|Win32.Build.0 = Debug|Win32 + {28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF}.Release|Any CPU.ActiveCfg = Release|Win32 + {28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF}.Release|Win32.ActiveCfg = Release|Win32 + {28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF}.Release|Win32.Build.0 = Release|Win32 + {6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B}.Debug|Win32.ActiveCfg = Debug|Win32 + {6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B}.Debug|Win32.Build.0 = Debug|Win32 + {6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B}.Release|Any CPU.ActiveCfg = Release|Win32 + {6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B}.Release|Win32.ActiveCfg = Release|Win32 + {6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B}.Release|Win32.Build.0 = Release|Win32 + {D03415F7-654E-42F4-B0E9-CB8FBE3F22FA}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {D03415F7-654E-42F4-B0E9-CB8FBE3F22FA}.Debug|Win32.ActiveCfg = Debug|Win32 + {D03415F7-654E-42F4-B0E9-CB8FBE3F22FA}.Debug|Win32.Build.0 = Debug|Win32 + {D03415F7-654E-42F4-B0E9-CB8FBE3F22FA}.Release|Any CPU.ActiveCfg = Release|Win32 + {D03415F7-654E-42F4-B0E9-CB8FBE3F22FA}.Release|Win32.ActiveCfg = Release|Win32 + {D03415F7-654E-42F4-B0E9-CB8FBE3F22FA}.Release|Win32.Build.0 = Release|Win32 + {F5F063F8-11A1-475A-82E2-19759BB40B25}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {F5F063F8-11A1-475A-82E2-19759BB40B25}.Debug|Win32.ActiveCfg = Debug|Win32 + {F5F063F8-11A1-475A-82E2-19759BB40B25}.Debug|Win32.Build.0 = Debug|Win32 + {F5F063F8-11A1-475A-82E2-19759BB40B25}.Release|Any CPU.ActiveCfg = Release|Win32 + {F5F063F8-11A1-475A-82E2-19759BB40B25}.Release|Win32.ActiveCfg = Release|Win32 + {F5F063F8-11A1-475A-82E2-19759BB40B25}.Release|Win32.Build.0 = Release|Win32 + {614CE916-0972-4126-9392-CD9FC0ADD7DE}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {614CE916-0972-4126-9392-CD9FC0ADD7DE}.Debug|Win32.ActiveCfg = Debug|Win32 + {614CE916-0972-4126-9392-CD9FC0ADD7DE}.Debug|Win32.Build.0 = Debug|Win32 + {614CE916-0972-4126-9392-CD9FC0ADD7DE}.Release|Any CPU.ActiveCfg = Release|Win32 + {614CE916-0972-4126-9392-CD9FC0ADD7DE}.Release|Win32.ActiveCfg = Release|Win32 + {614CE916-0972-4126-9392-CD9FC0ADD7DE}.Release|Win32.Build.0 = Release|Win32 + {6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA}.Debug|Win32.ActiveCfg = Debug|Win32 + {6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA}.Debug|Win32.Build.0 = Debug|Win32 + {6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA}.Release|Any CPU.ActiveCfg = Release|Win32 + {6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA}.Release|Win32.ActiveCfg = Release|Win32 + {6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA}.Release|Win32.Build.0 = Release|Win32 + {F8AF7D74-2918-422B-A7B6-4D98566B7160}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {F8AF7D74-2918-422B-A7B6-4D98566B7160}.Debug|Win32.ActiveCfg = Debug|Win32 + {F8AF7D74-2918-422B-A7B6-4D98566B7160}.Debug|Win32.Build.0 = Debug|Win32 + {F8AF7D74-2918-422B-A7B6-4D98566B7160}.Release|Any CPU.ActiveCfg = Release|Win32 + {F8AF7D74-2918-422B-A7B6-4D98566B7160}.Release|Win32.ActiveCfg = Release|Win32 + {F8AF7D74-2918-422B-A7B6-4D98566B7160}.Release|Win32.Build.0 = Release|Win32 + {56EADEDB-FBED-4758-8B54-7B0B47ABDABF}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {56EADEDB-FBED-4758-8B54-7B0B47ABDABF}.Debug|Win32.ActiveCfg = Debug|Win32 + {56EADEDB-FBED-4758-8B54-7B0B47ABDABF}.Debug|Win32.Build.0 = Debug|Win32 + {56EADEDB-FBED-4758-8B54-7B0B47ABDABF}.Release|Any CPU.ActiveCfg = Release|Win32 + {56EADEDB-FBED-4758-8B54-7B0B47ABDABF}.Release|Win32.ActiveCfg = Release|Win32 + {56EADEDB-FBED-4758-8B54-7B0B47ABDABF}.Release|Win32.Build.0 = Release|Win32 + {496415E0-AF44-4AD8-8C99-91B837DDF469}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {496415E0-AF44-4AD8-8C99-91B837DDF469}.Debug|Win32.ActiveCfg = Debug|Win32 + {496415E0-AF44-4AD8-8C99-91B837DDF469}.Debug|Win32.Build.0 = Debug|Win32 + {496415E0-AF44-4AD8-8C99-91B837DDF469}.Release|Any CPU.ActiveCfg = Release|Win32 + {496415E0-AF44-4AD8-8C99-91B837DDF469}.Release|Win32.ActiveCfg = Release|Win32 + {496415E0-AF44-4AD8-8C99-91B837DDF469}.Release|Win32.Build.0 = Release|Win32 + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D}.Debug|Win32.ActiveCfg = Debug|Win32 + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D}.Debug|Win32.Build.0 = Debug|Win32 + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D}.Release|Any CPU.ActiveCfg = Release|Win32 + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D}.Release|Win32.ActiveCfg = Release|Win32 + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D}.Release|Win32.Build.0 = Release|Win32 + {208D3989-794B-47A2-9D04-D7AEE1524078}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {208D3989-794B-47A2-9D04-D7AEE1524078}.Debug|Win32.ActiveCfg = Debug|Win32 + {208D3989-794B-47A2-9D04-D7AEE1524078}.Release|Any CPU.ActiveCfg = Release|Win32 + {208D3989-794B-47A2-9D04-D7AEE1524078}.Release|Win32.ActiveCfg = Release|Win32 + {2D729599-C008-4154-BCCB-53E6A260F220}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {2D729599-C008-4154-BCCB-53E6A260F220}.Debug|Win32.ActiveCfg = Debug|Win32 + {2D729599-C008-4154-BCCB-53E6A260F220}.Debug|Win32.Build.0 = Debug|Win32 + {2D729599-C008-4154-BCCB-53E6A260F220}.Release|Any CPU.ActiveCfg = Release|Win32 + {2D729599-C008-4154-BCCB-53E6A260F220}.Release|Win32.ActiveCfg = Release|Win32 + {2D729599-C008-4154-BCCB-53E6A260F220}.Release|Win32.Build.0 = Release|Win32 + {B52BD53B-0E57-4E9A-A601-8E8171BA1CFC}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {B52BD53B-0E57-4E9A-A601-8E8171BA1CFC}.Debug|Win32.ActiveCfg = Debug|Win32 + {B52BD53B-0E57-4E9A-A601-8E8171BA1CFC}.Debug|Win32.Build.0 = Debug|Win32 + {B52BD53B-0E57-4E9A-A601-8E8171BA1CFC}.Release|Any CPU.ActiveCfg = Release|Win32 + {B52BD53B-0E57-4E9A-A601-8E8171BA1CFC}.Release|Win32.ActiveCfg = Release|Win32 + {B52BD53B-0E57-4E9A-A601-8E8171BA1CFC}.Release|Win32.Build.0 = Release|Win32 + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413}.Debug|Win32.ActiveCfg = Debug|Win32 + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413}.Debug|Win32.Build.0 = Debug|Win32 + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413}.Release|Any CPU.ActiveCfg = Release|Win32 + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413}.Release|Win32.ActiveCfg = Release|Win32 + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413}.Release|Win32.Build.0 = Release|Win32 + {85696E20-777A-41F6-BC00-2E7AB375B171}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {85696E20-777A-41F6-BC00-2E7AB375B171}.Debug|Win32.ActiveCfg = Debug|Win32 + {85696E20-777A-41F6-BC00-2E7AB375B171}.Debug|Win32.Build.0 = Debug|Win32 + {85696E20-777A-41F6-BC00-2E7AB375B171}.Release|Any CPU.ActiveCfg = Release|Win32 + {85696E20-777A-41F6-BC00-2E7AB375B171}.Release|Win32.ActiveCfg = Release|Win32 + {85696E20-777A-41F6-BC00-2E7AB375B171}.Release|Win32.Build.0 = Release|Win32 + {2FB961E5-213C-4475-8CB3-72F904D40752}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {2FB961E5-213C-4475-8CB3-72F904D40752}.Debug|Win32.ActiveCfg = Debug|Win32 + {2FB961E5-213C-4475-8CB3-72F904D40752}.Debug|Win32.Build.0 = Debug|Win32 + {2FB961E5-213C-4475-8CB3-72F904D40752}.Release|Any CPU.ActiveCfg = Release|Win32 + {2FB961E5-213C-4475-8CB3-72F904D40752}.Release|Win32.ActiveCfg = Release|Win32 + {2FB961E5-213C-4475-8CB3-72F904D40752}.Release|Win32.Build.0 = Release|Win32 + {56D8C233-610E-4EE4-A73A-72CEF1C6A33A}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {56D8C233-610E-4EE4-A73A-72CEF1C6A33A}.Debug|Win32.ActiveCfg = Debug|Win32 + {56D8C233-610E-4EE4-A73A-72CEF1C6A33A}.Debug|Win32.Build.0 = Debug|Win32 + {56D8C233-610E-4EE4-A73A-72CEF1C6A33A}.Release|Any CPU.ActiveCfg = Release|Win32 + {56D8C233-610E-4EE4-A73A-72CEF1C6A33A}.Release|Win32.ActiveCfg = Release|Win32 + {56D8C233-610E-4EE4-A73A-72CEF1C6A33A}.Release|Win32.Build.0 = Release|Win32 + {6D1B0964-FB32-4916-A61C-49D7F715EAD8}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {6D1B0964-FB32-4916-A61C-49D7F715EAD8}.Debug|Win32.ActiveCfg = Debug|Win32 + {6D1B0964-FB32-4916-A61C-49D7F715EAD8}.Debug|Win32.Build.0 = Debug|Win32 + {6D1B0964-FB32-4916-A61C-49D7F715EAD8}.Release|Any CPU.ActiveCfg = Release|Win32 + {6D1B0964-FB32-4916-A61C-49D7F715EAD8}.Release|Win32.ActiveCfg = Release|Win32 + {6D1B0964-FB32-4916-A61C-49D7F715EAD8}.Release|Win32.Build.0 = Release|Win32 + {0F56AEB0-14DA-4A80-8962-1F85A17339D0}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {0F56AEB0-14DA-4A80-8962-1F85A17339D0}.Debug|Win32.ActiveCfg = Debug|Win32 + {0F56AEB0-14DA-4A80-8962-1F85A17339D0}.Debug|Win32.Build.0 = Debug|Win32 + {0F56AEB0-14DA-4A80-8962-1F85A17339D0}.Release|Any CPU.ActiveCfg = Release|Win32 + {0F56AEB0-14DA-4A80-8962-1F85A17339D0}.Release|Win32.ActiveCfg = Release|Win32 + {0F56AEB0-14DA-4A80-8962-1F85A17339D0}.Release|Win32.Build.0 = Release|Win32 + {AB67F297-8491-4515-8E52-BFF5340EC242}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {AB67F297-8491-4515-8E52-BFF5340EC242}.Debug|Win32.ActiveCfg = Debug|Win32 + {AB67F297-8491-4515-8E52-BFF5340EC242}.Debug|Win32.Build.0 = Debug|Win32 + {AB67F297-8491-4515-8E52-BFF5340EC242}.Release|Any CPU.ActiveCfg = Release|Win32 + {AB67F297-8491-4515-8E52-BFF5340EC242}.Release|Win32.ActiveCfg = Release|Win32 + {AB67F297-8491-4515-8E52-BFF5340EC242}.Release|Win32.Build.0 = Release|Win32 + {23BFE838-5682-4F39-969F-0B40366D4D98}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {23BFE838-5682-4F39-969F-0B40366D4D98}.Debug|Win32.ActiveCfg = Debug|Win32 + {23BFE838-5682-4F39-969F-0B40366D4D98}.Debug|Win32.Build.0 = Debug|Win32 + {23BFE838-5682-4F39-969F-0B40366D4D98}.Release|Any CPU.ActiveCfg = Release|Win32 + {23BFE838-5682-4F39-969F-0B40366D4D98}.Release|Win32.ActiveCfg = Release|Win32 + {23BFE838-5682-4F39-969F-0B40366D4D98}.Release|Win32.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(NestedProjects) = preSolution + {A0F65E06-9F18-40AC-81F6-A080852F1104} = {C66C8B3B-C156-4498-91E9-CA9A24CF9051} + {9BA8E10D-0D82-4B25-8543-DE34641FBC10} = {C66C8B3B-C156-4498-91E9-CA9A24CF9051} + {E5BC5B2E-976D-4DED-AA07-5DD52BF2163F} = {D6767108-F420-41C0-A834-2E6F487E1AB3} + {558838F9-D792-4F56-AAB2-99C03687C5FF} = {D6767108-F420-41C0-A834-2E6F487E1AB3} + {28FB58CE-AB8C-4C60-83DA-BC1BFCC59BFF} = {D6767108-F420-41C0-A834-2E6F487E1AB3} + {208D3989-794B-47A2-9D04-D7AEE1524078} = {D6767108-F420-41C0-A834-2E6F487E1AB3} + {AAF33ADD-A4F9-4BCA-B7F9-0C35C843CC7E} = {D6767108-F420-41C0-A834-2E6F487E1AB3} + {CAD30B43-D93B-47D5-9161-6A3E9BADCC1D} = {0377E151-3352-487B-A5CF-24BCDC9EC43F} + {374BF775-AF68-4A88-814A-48F692DFFE5A} = {0377E151-3352-487B-A5CF-24BCDC9EC43F} + {2D729599-C008-4154-BCCB-53E6A260F220} = {0377E151-3352-487B-A5CF-24BCDC9EC43F} + {B52BD53B-0E57-4E9A-A601-8E8171BA1CFC} = {0377E151-3352-487B-A5CF-24BCDC9EC43F} + {8B79A2B5-8889-43D4-9B92-9AE8A6F00413} = {0377E151-3352-487B-A5CF-24BCDC9EC43F} + {2FB961E5-213C-4475-8CB3-72F904D40752} = {0377E151-3352-487B-A5CF-24BCDC9EC43F} + {23BFE838-5682-4F39-969F-0B40366D4D98} = {0377E151-3352-487B-A5CF-24BCDC9EC43F} + {D03415F7-654E-42F4-B0E9-CB8FBE3F22FA} = {B9099DDA-18C9-4DE0-AECB-5D8139EA619F} + {F5F063F8-11A1-475A-82E2-19759BB40B25} = {B9099DDA-18C9-4DE0-AECB-5D8139EA619F} + {6A435DBB-4D3D-4DAE-8CB3-E0AF169A240B} = {B9099DDA-18C9-4DE0-AECB-5D8139EA619F} + {C8301485-CFD1-43D4-827C-8EA050C8E256} = {825DFFD0-4747-43CA-8326-529655E31935} + {40CADEE4-8D53-4157-AA36-B256F4934FC3} = {825DFFD0-4747-43CA-8326-529655E31935} + {6A7AA493-E46C-4994-B8D6-AA6C9C19C9BA} = {37F903FE-3474-4C93-AD5B-987CB6A92E62} + {F8AF7D74-2918-422B-A7B6-4D98566B7160} = {37F903FE-3474-4C93-AD5B-987CB6A92E62} + {56EADEDB-FBED-4758-8B54-7B0B47ABDABF} = {37F903FE-3474-4C93-AD5B-987CB6A92E62} + {496415E0-AF44-4AD8-8C99-91B837DDF469} = {37F903FE-3474-4C93-AD5B-987CB6A92E62} + {614CE916-0972-4126-9392-CD9FC0ADD7DE} = {37F903FE-3474-4C93-AD5B-987CB6A92E62} + {85696E20-777A-41F6-BC00-2E7AB375B171} = {37F903FE-3474-4C93-AD5B-987CB6A92E62} + {56D8C233-610E-4EE4-A73A-72CEF1C6A33A} = {37F903FE-3474-4C93-AD5B-987CB6A92E62} + {0F56AEB0-14DA-4A80-8962-1F85A17339D0} = {37F903FE-3474-4C93-AD5B-987CB6A92E62} + {AB67F297-8491-4515-8E52-BFF5340EC242} = {37F903FE-3474-4C93-AD5B-987CB6A92E62} + EndGlobalSection +EndGlobal diff --git a/src/win32/bacula/bacula.vcproj b/src/win32/bacula/bacula.vcproj new file mode 100644 index 00000000..ae9fe374 --- /dev/null +++ b/src/win32/bacula/bacula.vcproj @@ -0,0 +1,225 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/build-depkgs-mingw-w64 b/src/win32/build-depkgs-mingw-w64 new file mode 100755 index 00000000..b688e6e6 --- /dev/null +++ b/src/win32/build-depkgs-mingw-w64 @@ -0,0 +1,451 @@ +#!/bin/sh +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This file may help you build the dependency packages that +# are needed to cross compile the Win64 bit version of the Bacula +# File daemon. This file is provided as is, and we don't guarantee +# that it will work. We run it only on Ubuntu Hardy. Trying to use +# it on any other GNU/Linux distro will probably require changes. +# +# This file is driven by the parameters that are defined in +# the file External-mingw-w64 +# + +usage() +{ + echo "usage: $0 [-h] [-C] [] [] ..." + echo " -h Displays this usage" + echo " -C Clobbers (overwrites) the source code by " + echo " reextracting the archive and reapplying the" + echo " patches." + echo "" + echo " Optional dependency, If none are given then all" + echo " of them will be built." + echo "" + echo "Valid dependencies are:" + grep -v '^#' < External-mingw-w64 | cut -d'|' -f1 | cut -d'_' -f1 | tr A-Z a-z | sort -u | awk '{ print " " $1 }' +} + +CLOBBER_SOURCE= + +while getopts "hHC" opt; do + case ${opt} in + H|h|\?) usage;exit 1;; + C) CLOBBER_SOURCE=true;; + esac +done + +[ ${OPTIND} -gt 1 ] && shift `expr ${OPTIND} - 1` + +cwd=`pwd` +cd `dirname $0` +SCRIPT_DIR=`pwd` + +cd ../../.. +TOP_DIR=`pwd` +TOP_DIR=${DEPKGS:-${TOP_DIR}} + +if [ -e ${TOP_DIR}/cross-tools/mingw-w64/bin/x86_64-pc-mingw32-gcc ] +then + cd ${TOP_DIR}/cross-tools/mingw-w64/bin + BIN_DIR=`pwd` + +elif which x86_64-w64-mingw32-gcc > /dev/null; then + BIN_DIR= + BASE=x86_64-w64-mingw32 + +else + echo "The GCC cross compiler is not installed." + echo "You must run build-win64-cross-tools first" + exit 1 +fi + +[ ! -e ${TOP_DIR}/depkgs-mingw-w64 ] && mkdir ${TOP_DIR}/depkgs-mingw-w64 +cd ${TOP_DIR}/depkgs-mingw-w64 +DEPPKG_DIR=`pwd` + +export PATH=${BIN_DIR}:${PATH} + +[ ! -e bin ] && mkdir bin +[ ! -e src ] && mkdir src +[ ! -e include ] && mkdir include +[ ! -e lib ] && mkdir lib + +OLD_IFS=${IFS};IFS="|"; +while read package url dir mkd; do + echo "Got package ${package}" + case ${package} in + \#*) ;; + *) eval "URL_${package}=${url};DIR_${package}=${dir};MKD_${package}=${mkd}";; + esac +done < ${SCRIPT_DIR}/External-mingw-w64 +IFS=${OLD_IFS};unset OLD_IFS + +get_source() +{ + URL=$1 + SRC_DIR=$2 + MAKE_DIR=$3 + echo "Processing ${URL}" + ARCHIVE=`basename ${URL}` + + case ${ARCHIVE} in + *.tar.gz) ARCHIVER="tar xzf"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.gz'`;; + *.tar.bz2) ARCHIVER="tar xjf"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.bz2'`;; + *.zip) ARCHIVER="unzip -d ."; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.zip'`;; + *.exe) ARCHIVER=""; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.zip'`;; + *) echo Unsupported archive type - $ARCHIVE; exit 1;; + esac + + cd ${DEPPKG_DIR}/src + + if [ ! -e "${ARCHIVE}" ] + then + echo Downloading "${URL}" + if wget --passive-ftp "${URL}" + then + : + else + echo Unable to download ${ARCHIVE} + exit 1 + fi + fi + + [ -z "${ARCHIVER}" ] && return 0 + + if [ ! -e "${SRC_DIR}" -o "${CLOBBER_SOURCE}" = "true" ] + then + rm -rf ${SRC_DIR} + echo Extracting ${ARCHIVE} + if [ "${MAKE_DIR}" = "true" ] + then + mkdir ${SRC_DIR} + cd ${SRC_DIR} + ${ARCHIVER} ../${ARCHIVE} > ../${ARCHIVE}.log 2>&1 + else + ${ARCHIVER} ${ARCHIVE} > ${ARCHIVE}.log 2>&1 + cd ${SRC_DIR} + fi + return 0 + fi + + cd ${SRC_DIR} + return 1 +} + +parse_output() +{ + sed -ne '/\\$/N' -e 's/\\\n//' -e 's/\t\+/ /g' -e 's/ \+/ /g' \ + -e '/ error: /p' \ + -e "s%.*Entering directory[ ]\\+.${DEPPKG_DIR}/\\([^ ]\+\).%Entering \\1%p" \ + -e "s%.*Leaving directory[ ]\\+.${DEPPKG_DIR}/\\([^ ]\+.\).%Leaving \\1%p" \ + -e '/gcc \|g\+\+ \|ar /!d' \ + -e 's/ \(\.\.\/\)\+/ /g' \ + -e 's/.* \([^ ]\+\(\.c\|\.cpp\|\.cc\|\.cxx\)\)\( .*\|\)$/Compiling \1/p' \ + -e 's/.* \([^ ]\+\.s\)\( .*\|\)$/Assembling \1/p' \ + -e 's/.*ar [^ ]\+ \([^ ]\+\)\(\( [^ ]\+\.o\)\+\)/Updating \1 -\2/p' \ + -e 's/.* -o \([^ ]\+\)\( .*\|\)$/Linking \1/p' +} + +do_patch() +{ + PATCH_FILE=${SCRIPT_DIR}/patches/$1; shift + + if patch -f -p0 "$@" >>patch.log < ${PATCH_FILE} + then + : + else + echo "Patch failed - Check `pwd`/patch.log" > /dev/tty + exit 1 + fi +} + +do_make() +{ + if make -f "$@" 2>&1 + then + : + else + echo "Make failed - Check `pwd`/make.log" > /dev/tty + exit 1 + fi | tee -a make.log | parse_output +} + +process_rsync() +{ + get_source "${URL_RSYNC}" "${DIR_RSYNC}" "${MKD_RSYNC}" + ./configure --host=${BASE} + make -k # rdiff.exe has a problem + cp .libs/librsync.a ${DEPPKG_DIR}/lib + cp librsync*h ${DEPPKG_DIR}/include +} + +process_zlib() +{ + if get_source "${URL_ZLIB}" "${DIR_ZLIB}" "${MKD_ZLIB}" + then + true + fi + echo Building zlib + > make.log + do_make win32/Makefile.gcc PREFIX=${BASE}- DESTDIR=${DEPPKG_DIR}/ all + echo Installing zlib + do_make win32/Makefile.gcc PREFIX=${BASE}- DESTDIR=${DEPPKG_DIR}/ LIBRARY_PATH=lib BINARY_PATH=bin INCLUDE_PATH=include SHARED_MODE=1 install +} + +process_pcre() +{ + if get_source "${URL_PCRE}" "${DIR_PCRE}" "${MKD_PCRE}" + then + echo Patching PCRE + >patch.log + do_patch pcre.patch + echo Configuring PCRE + ./configure CC_FOR_BUILD=gcc \ + CXX_FOR_BUILD=g++ \ + --host=${BASE} \ + --prefix=${DEPPKG_DIR} \ + --enable-utf8 \ + --enable-unicode-properties >make.log 2>&1 + fi + echo Building PCRE + do_make Makefile PREFIX=${DEPPKG_DIR} all + echo Installing PCRE + do_make Makefile PREFIX=${DEPPKG_DIR} install +} + +process_db() +{ + if get_source "${URL_DB}" "${DIR_DB}" "${MKD_DB}" + then + echo "No patch needed for this package" + fi + cd build_unix + ../dist/configure --host=${BASE} --enable-mingw --prefix=${DEPPKG_DIR} + > make.log + echo Building DB + do_make Makefile + echo Installing DB + do_make Makefile install_setup install_include install_lib +} + +process_pthreads() +{ + if get_source "${URL_PTHREADS}" "${DIR_PTHREADS}" "${MKD_PTHREADS}" + then + echo "No patch needed for this package" + fi + echo Building pthreads + cd pthreads.2 + > make.log + do_make GNUmakefile CROSS=${BASE}- clean GCE-inlined + echo Installing pthreads + rm -rf ${DEPPKG_DIR}/include/pthreads + mkdir ${DEPPKG_DIR}/include/pthreads + cp -p *.h ${DEPPKG_DIR}/include/pthreads + cp -p *.dll ${DEPPKG_DIR}/bin + cp -p *.a ${DEPPKG_DIR}/lib +} + +process_openssl() +{ + if get_source "${URL_OPENSSL}" "${DIR_OPENSSL}" "${MKD_OPENSSL}" + then + echo Configuring openssl + CROSS_COMPILE=${BASE}- ./Configure --prefix=${DEPPKG_DIR} \ + shared zlib-dynamic \ + threads \ + --with-zlib-include=${DEPPKG_DIR}/include \ + mingw64 > make.log 2>&1 + fi + echo Building openssl + do_make Makefile all + echo Installing openssl + do_make Makefile -k install_sw install + cp *.dll ${DEPPKG_DIR}/bin +} + +process_lzo() +{ + if get_source "${URL_LZO}" "${DIR_LZO}" "${MKD_LZO}" + then + sed -i s/-lwinmm// configure + fi + echo Building lzo + ./configure --host=${BASE} --prefix=${DEPPKG_DIR}/ + echo Installing lzo + do_make Makefile -k PREFIX=${DEPPKG_DIR}/ all + do_make Makefile -k PREFIX=${DEPPKG_DIR}/ install +} + +process_qt4() +{ + if get_source "${URL_Qt4}" "${DIR_Qt4}" "${MKD_Qt4}" + then + echo Patching Qt4 + >patch.log + patch -p1 < ${SCRIPT_DIR}/patches/qt4-intrinsics.patch + patch -p1 < ${SCRIPT_DIR}/patches/qt4-widget-ui.patch + patch -p1 < ${SCRIPT_DIR}/patches/qt4-compilation-see.patch + patch -p1 < ${SCRIPT_DIR}/patches/qt4-compilation.patch + fi + echo "Configuring Qt4" + ./configure -opensource -confirm-license -fast -xplatform win32-g++-4.6 \ + -device-option CROSS_COMPILE=x86_64-w64-mingw32- -device-option \ + PKG_CONFIG='x86_64-w64-mingw32-pkg-config' -force-pkg-config -release \ + -exceptions -shared -prefix ${DEPPKG_DIR}/qt-out -prefix-install -no-script \ + -no-iconv -no-webkit -no-glib -no-gstreamer -no-phonon -no-phonon-backend \ + -accessibility -no-reduce-exports -no-rpath -make libs -nomake demos \ + -nomake docs -nomake examples -system-zlib -no-mitshm -no-libjpeg \ + -no-libmng -no-libtiff -no-sql-db2 -no-sql-ibase -no-sql-mysql -no-sql-oci \ + -no-sql-odbc -no-sql-psql -no-sql-sqlite -no-sql-sqlite2 \ + -no-sql-sqlite_symbian -no-sql-symsql -no-sql-tds -no-nis -no-cups -no-dbus \ + -no-openvg -no-openssl -no-nas-sound -no-audio-backend -no-sm -no-opengl \ + -no-javascript-jit -no-qt3support -nomake tools \ + -no-xmlpatterns -no-multimedia -nomake tools -silent + + local qt_source_dir=`pwd` + # Required libz.dll.a, zlib.h and zconf.h for proper Qt build purpose + process_zlib + cd "${qt_source_dir}" + local zlib_dir=`basename "${URL_ZLIB}" | sed 's/.tar.gz//'` + cp "../${zlib_dir}/libz.dll.a" ./lib/ + cp "../${zlib_dir}/zlib.h" ./include/ + cp "../${zlib_dir}/zconf.h" ./include/ + + make + + echo "Installing Qt4" + rm -rf ${DEPPKG_DIR}/include/qt ${DEPPKG_DIR}/lib/qt + mkdir -p ${DEPPKG_DIR}/include/qt ${DEPPKG_DIR}/lib/qt + cp -rf include/* ${DEPPKG_DIR}/include/qt + cp -rf lib/* ${DEPPKG_DIR}/lib/qt + cp -rf src/corelib ${DEPPKG_DIR}/src/ + cp -rf src/gui ${DEPPKG_DIR}/src/ + cp -rf lib/QtCore4.dll lib/QtGui4.dll ${DEPPKG_DIR}/bin/ + cp -rf lib/QtCore4.dll lib/QtGui4.dll ${DEPPKG_DIR}/lib/qt/ +} + + +process_mingw() +{ + if test -f /usr/lib/gcc/${BASE}/*posix/libstdc++-6.dll; then + cp -f /usr/lib/gcc/${BASE}/*posix/libstdc++-6.dll ${DEPPKG_DIR}/bin + cp -f /usr/lib/gcc/${BASE}/*posix/libgcc*dll ${DEPPKG_DIR}/bin + elif test -f /usr/lib/gcc/${BASE}/*/libstdc++-6.dll; then + cp -f /usr/${BASE}/lib/libstdc++-6.dll ${DEPPKG_DIR}/bin + cp -f /usr/${BASE}/lib/libgcc*dll ${DEPPKG_DIR}/bin + else + echo "ERROR: Unable to find ${BASE} on this system" + fi + if test -f /usr/$BASE/lib/libwinpthread-1.dll; then + cp -f /usr/$BASE/lib/libwinpthread-1.dll ${DEPPKG_DIR}/bin + fi +} + +process_mtx() +{ + if get_source "${URL_MTX}" "${DIR_MTX}" "${MKD_MTX}" + then + echo Patching mtx + # We can't run configure in a cross-compile environment so we + # patch the files to the correct values + cp -f config.h.in config.h + cp -f Makefile.in Makefile + rm -f configure + >patch.log + do_patch mtx.patch + fi + echo Building mtx + do_make Makefile prefix=${DEPPKG_DIR} all + echo Installing mtx + do_make Makefile prefix=${DEPPKG_DIR} install +} + +process_mt() +{ + if get_source "${URL_MT}" "${DIR_MT}" "${MKD_MT}" + then + echo "Patching mt" + >patch.log + do_patch mt.patch + fi + echo "Building mt" + do_make Makefile PREFIX=${DEPPKG_DIR} all + echo Installing mt + do_make Makefile PREFIX=${DEPPKG_DIR} install +} + +process_sed() +{ + if get_source "${URL_SED}" "${DIR_SED}" "${MKD_SED}" + then + echo Patching sed + >patch.log + # patch not needed for 4.2k which is already updated + # do_patch sed.patch + echo Configuring sed + ./configure --host=$BASE \ + --prefix=${DEPPKG_DIR} \ + --disable-nls >make.log 2>&1 + fi + echo Building sed + do_make Makefile all + echo Installing sed + do_make Makefile install +} + +process_cmd_utils() +{ + if get_source "${URL_CMD_UTILS}" "${DIR_CMD_UTILS}" "${MKD_CMD_UTILS}" + then + # echo Patching cmd-utils + # >patch.log + # do_patch cmd-utils.patch + sed -i "s:strrchr:NULL;//:" expr64/expr64.cpp + echo Configuring cmd-utils + ./configure --host=$BASE \ + --prefix=${DEPPKG_DIR} \ + >make.log 2>&1 + fi + echo Building cmd-utils + do_make Makefile + echo Installing cmd-utils + do_make Makefile install +} + +# MSSQL dlls are created from Visual Studio +process_mssql() +{ + get_source "${URL_MSSQL}" "${DIR_MSSQL}" "${MKD_MSSQL}" + echo Installing MSSQL driver + cp include/*.h ${DEPPKG_DIR}/include + cp x64/mssql-driver.dll ${DEPPKG_DIR}/bin + cp x64/mssql-regression.exe ${DEPPKG_DIR}/bin +} + +if [ "$#" -eq 0 ] +then + process_mingw + process_rsync + process_lzo + process_zlib +# process_pcre + process_pthreads + process_openssl + process_qt4 + process_sed + process_cmd_utils + process_mssql +# process_mtx +# process_mt +else + for dependency in "$@" + do + eval "process_${dependency}" + done +fi +#vss +#Need to download from Microsoft diff --git a/src/win32/build-depkgs-mingw32 b/src/win32/build-depkgs-mingw32 new file mode 100755 index 00000000..285e7cab --- /dev/null +++ b/src/win32/build-depkgs-mingw32 @@ -0,0 +1,659 @@ +#!/bin/sh +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +# This file is driven by the parameters that are defined in +# the file External-mingw32 +# + +usage() +{ + echo "usage: $0 [-h] [-C] [] [] ..." + echo " -h Displays this usage" + echo " -C Clobbers (overwrites) the source code by " + echo " reextracting the archive and reapplying the" + echo " patches." + echo " -A Rebuild everything (wipes out most of depkgs)" + echo "" + echo " Optional dependency, If none are given then all" + echo " of them will be built." + echo "" + echo "Valid dependencies are:" + grep -v '^#' < External-mingw32 | cut -d'|' -f1 | cut -d'_' -f1 | tr A-Z a-z | sort -u | awk '{ print " " $1 }' +} + +CLOBBER_SOURCE= +CLOBBER_ALL= + +while getopts "hHCA" opt; do + case ${opt} in + H|h|\?) usage;exit 1;; + C) CLOBBER_SOURCE=true;; + A) CLOBBER_ALL=true;; + esac +done + +[ ${OPTIND} -gt 1 ] && shift `expr ${OPTIND} - 1` + +cwd=`pwd` +cd `dirname $0` +SCRIPT_DIR=`pwd` + +cd ../../.. +TOP_DIR=`pwd` +TOP_DIR=${DEPKGS:-${TOP_DIR}} + +if [ -e ${TOP_DIR}/cross-tools/mingw32/bin/mingw32-gcc ]; then + cd ${TOP_DIR}/cross-tools/mingw32/bin + BIN_DIR=`pwd` + BASE=mingw32 +elif [ -e /mingw/bin/mingw32-gcc ]; then + BIN_DIR=/mingw/bin + BASE=mingw32 + +elif which i686-w64-mingw32-gcc > /dev/null; then + BIN_DIR= + BASE=i686-w64-mingw32 + +else + echo "The GCC cross compiler is not installed." + echo "You must run build-win32-cross-tools first" + exit 1 +fi + +[ ! -e ${TOP_DIR}/depkgs-mingw32 ] && mkdir ${TOP_DIR}/depkgs-mingw32 +cd ${TOP_DIR}/depkgs-mingw32 +DEPPKG_DIR=`pwd` + +if [ "${CLOBBER_ALL}" = "true" ] +then + echo "Clobbering ${DEPPKG_DIR}" + echo "rm -rf bin include info lib man qt-out/ share src tools" + rm -rf bin include info lib man qt-out/ share src tools +fi + +export PATH=${BIN_DIR}:${PATH} + +[ ! -e bin ] && mkdir bin +[ ! -e src ] && mkdir src +[ ! -e include ] && mkdir include +[ ! -e lib ] && mkdir lib + +OLD_IFS=${IFS};IFS="|"; +while read package url dir mkd; do +# echo "Got package ${package}" + case ${package} in + \#*) ;; + *) eval "URL_${package}=${url};DIR_${package}=${dir};MKD_${package}=${mkd}";; + esac +done < ${SCRIPT_DIR}/External-mingw32 +IFS=${OLD_IFS};unset OLD_IFS + + +get_source() +{ + URL=$1 + SRC_DIR=$2 + MAKE_DIR=$3 + echo "Processing ${URL}" + ARCHIVE=`basename ${URL}` + + case ${ARCHIVE} in + *.tar.gz) ARCHIVER="tar xzf"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.gz'`;; + *.tar.bz2) ARCHIVER="tar xjf"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.bz2'`;; + *.zip) ARCHIVER="unzip -q"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.zip'`;; + *.exe) ARCHIVER=""; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.zip'`;; + *) echo Unsupported archive type - $ARCHIVE; exit 1;; + esac + + cd ${DEPPKG_DIR}/src + + if [ ! -e "${ARCHIVE}" ] + then + echo Downloading "${URL}" + if wget --passive-ftp "${URL}" + then + : + else + echo Unable to download ${ARCHIVE} + exit 1 + fi + fi + + [ -z "${ARCHIVER}" ] && return 0 + + if [ ! -e "${SRC_DIR}" -o "${CLOBBER_SOURCE}" = "true" ] + then + rm -rf ${SRC_DIR} + echo Extracting ${ARCHIVE} + if [ "${MAKE_DIR}" = "true" ] + then + mkdir ${SRC_DIR} + cd ${SRC_DIR} + ${ARCHIVER} ../${ARCHIVE} > ../${ARCHIVE}.log 2>&1 + else + ${ARCHIVER} ${ARCHIVE} > ${ARCHIVE}.log 2>&1 + cd ${SRC_DIR} + fi + return 0 + fi + + cd ${SRC_DIR} + return 1 +} + +parse_output() +{ + sed -ne '/\\$/N' -e 's/\\\n//' -e 's/\t\+/ /g' -e 's/ \+/ /g' \ + -e '/ error: /p' \ + -e "s%.*Entering directory[ ]\\+.${DEPPKG_DIR}/\\([^ ]\+\).%Entering \\1%p" \ + -e "s%.*Leaving directory[ ]\\+.${DEPPKG_DIR}/\\([^ ]\+.\).%Leaving \\1%p" \ + -e '/gcc \|g\+\+ \|ar /!d' \ + -e 's/ \(\.\.\/\)\+/ /g' \ + -e 's/.* \([^ ]\+\(\.c\|\.cpp\|\.cc\|\.cxx\)\)\( .*\|\)$/Compiling \1/p' \ + -e 's/.* \([^ ]\+\.s\)\( .*\|\)$/Assembling \1/p' \ + -e 's/.*ar [^ ]\+ \([^ ]\+\)\(\( [^ ]\+\.o\)\+\)/Updating \1 -\2/p' \ + -e 's/.* -o \([^ ]\+\)\( .*\|\)$/Linking \1/p' +} + +do_patch() +{ + PATCH_FILE=${SCRIPT_DIR}/patches/$1; shift + + if patch -f -p0 "$@" >>patch.log < ${PATCH_FILE} + then + : + else + echo "Patch failed - Check `pwd`/patch.log" > /dev/tty + exit 1 + fi +} + +do_make() +{ + if make -f "$@" 2>&1 + then + : + else + echo "Make failed - Check `pwd`/make.log" > /dev/tty + exit 1 + fi | tee -a make.log | parse_output +} + +process_lzo() +{ + if get_source "${URL_LZO}" "${DIR_LZO}" "${MKD_LZO}" + then + true + fi + echo Building lzo + ./configure --host=$BASE --prefix=${DEPPKG_DIR}/ + echo Installing lzo + do_make Makefile PREFIX=${DEPPKG_DIR}/ all + do_make Makefile PREFIX=${DEPPKG_DIR}/ install +} + +process_zlib() +{ + if get_source "${URL_ZLIB}" "${DIR_ZLIB}" "${MKD_ZLIB}" + then + true + fi + echo Building zlib + > make.log + do_make win32/Makefile.gcc PREFIX=${BASE}- DESTDIR=${DEPPKG_DIR}/ all + echo Installing zlib + do_make win32/Makefile.gcc PREFIX=${BASE}- DESTDIR=${DEPPKG_DIR}/ LIBRARY_PATH=lib BINARY_PATH=bin INCLUDE_PATH=include SHARED_MODE=1 install +} + + +process_rsync() +{ + get_source "${URL_RSYNC}" "${DIR_RSYNC}" "${MKD_RSYNC}" + ./configure --host=$BASE --with-pic + echo Building librsync + do_make Makefile + echo Installing lib and headers + cp .libs/librsync.a ${DEPPKG_DIR}/lib + cp librsync*h ${DEPPKG_DIR}/include +} + +process_pcre() +{ + if get_source "${URL_PCRE}" "${DIR_PCRE}" "${MKD_PCRE}" + then + echo Patching PCRE + >patch.log + do_patch pcre.patch + echo Configuring PCRE + ./configure CC_FOR_BUILD=gcc \ + CXX_FOR_BUILD=g++ \ + --host=$BASE \ + --prefix=${DEPPKG_DIR} \ + --enable-utf8 \ + --enable-unicode-properties >make.log 2>&1 + fi + echo Building PCRE + do_make Makefile PREFIX=${DEPPKG_DIR} all + echo Installing PCRE + do_make Makefile PREFIX=${DEPPKG_DIR} install +} + +process_db() +{ + if get_source "${URL_DB}" "${DIR_DB}" "${MKD_DB}" + then + echo No Patch + fi + cd build_unix + ../dist/configure --host=$BASE --enable-mingw --prefix=${DEPPKG_DIR} + > make.log + echo Building DB + do_make Makefile + echo Installing DB + do_make Makefile install_setup install_include install_lib +} + +process_pthreads() +{ + if get_source "${URL_PTHREADS}" "${DIR_PTHREADS}" "${MKD_PTHREADS}" + then + sed -i '140 s/INLINE//' pthread_mutex_consistent.c + fi + echo Building pthreads + > make.log + do_make GNUmakefile CROSS=${BASE}- clean GCE + echo Installing pthreads + rm -rf ${DEPPKG_DIR}/include/pthreads + mkdir ${DEPPKG_DIR}/include/pthreads + cp -p *.h ${DEPPKG_DIR}/include/pthreads + cp -p *.dll ${DEPPKG_DIR}/bin + cp -p *.a ${DEPPKG_DIR}/lib +} + +process_openssl() +{ + if get_source "${URL_OPENSSL}" "${DIR_OPENSSL}" "${MKD_OPENSSL}" + then + true + fi + echo Configuring openssl + CROSS_COMPILE=${BASE}- ./Configure --prefix=${DEPPKG_DIR} \ + shared zlib-dynamic \ + threads \ + --with-zlib-include=${DEPPKG_DIR}/include \ + mingw > make.log 2>&1 + do_make Makefile all + echo Installing openssl + do_make Makefile install_sw +} + +process_mysql() +{ + get_source "${URL_MYSQL}" "${DIR_MYSQL}" "${DIR_MYSQL}" "${MKD_MYSQL}" + echo Converting mysql lib file + if [ -e ${BIN_DIR}/reimp ] + then + reimp --dlltool ${BIN_DIR}/${BASE}-dlltool --as ${BIN_DIR}/${BASE}-as lib/opt/libmysql.lib + else + ${BIN_DIR}/../mingw32/bin/reimp --dlltool ${BIN_DIR}/${BASE}-dlltool --as ${BIN_DIR}/${BASE}-as lib/opt/libmysql.lib + fi + echo Installing mysql + cp -p liblibmysql.a ../../lib/libmysql.a + rm -rf ../../include/mysql + mkdir ../../include/mysql + cp -p include/* ../../include/mysql 2>&1 | grep -v 'omitting directory' + cp -p lib/opt/libmysql.dll ../../bin +} + +process_postgresql() +{ + if get_source "${URL_POSTGRESQL}" "${DIR_POSTGRESQL}" "${MKD_POSTGRESQL}" + then + echo Patching postgreSQL + >patch.log + do_patch postgresql.patch + + # We need a native version of zic to build the timezone tables. + echo Configuring postgreSQL to build native zic + ./configure > make.log 2>&1 + + echo Building native zic + cd src/timezone + do_make Makefile >> make.log 2>&1 + cp zic ../.. + cd ../.. + do_make GNUmakefile distclean >> make.log 2>&1 + + echo Configuring postgreSQL for MinGW32 + + ./configure --host=$BASE \ + --enable-shared \ + --enable-thread-safety \ + --prefix=${DEPPKG_DIR} \ + --with-includes=${DEPPKG_DIR}/include:${DEPPKG_DIR}/include/pthreads \ + --with-libraries=${DEPPKG_DIR}/lib >> make.log 2>&1 + fi + echo Building postgreSQL + ZIC=`pwd`/zic + do_make GNUmakefile AR=$BASE-ar DLLTOOL=$BASE-dlltool DLLWRAP=$BASE-dllwrap WINDRES=$BASE-windres PTHREAD_LIBS=-lpthreadGCE ZIC=${ZIC} + echo Installing postgreSQL + do_make GNUmakefile AR=$BASE-ar DLLTOOL=$BASE-dlltool DLLWRAP=$BASE-dllwrap WINDRES=$BASE-windres PTHREAD_LIBS=-lpthreadGCE ZIC=${ZIC} install +} + +process_sqlite() +{ + if get_source "${URL_SQLITE}" "${DIR_SQLITE}" "${MKD_SQLITE}" + then + echo Patching SQLite + >patch.log + do_patch sqlite.patch + fi + echo Building SQLite + [ ! -e bld ] && mkdir bld + cd bld + > make.log + do_make ../Makefile.mingw32 CROSSTOOLS=${BIN_DIR} TLIBS="-L${DEPPKG_DIR}/lib" TCL_FLAGS="-I${DEPPKG_DIR}/include" + echo Installing SQLite + cp -p sqlite3.exe ${DEPPKG_DIR}/bin + cp -p libsqlite3.a ${DEPPKG_DIR}/lib + cp -p sqlite3.h ${DEPPKG_DIR}/include +} + +process_wx() +{ + if get_source "${URL_WX}" "${DIR_WX}" "${MKD_WX}" + then + echo Patching wxWidgets + >patch.log + cp build/msw/config.gcc build/msw/config.mingw32 + do_patch wxWidgets.patch + find . -name makefile.gcc -exec sh -c "sed -f ${SCRIPT_DIR}/patches/wx.sed {} > \`echo {} | sed -e 's/\.gcc$/\.mingw32/'\`" \; + fi + echo Building wxWidgets + cd build/msw + > make.log + do_make makefile.mingw32 SHARED=1 VENDOR=bacula DEBUG_INFO=1 + echo Installing wxWidgets + cd ../.. + rm -rf ../../include/wx + mkdir ../../include/wx + cp -p include/wx/* ../../include/wx 2>&1 | grep -v 'omitting directory' + mkdir ../../include/wx/generic + cp -p include/wx/generic/* ../../include/wx/generic 2>&1 | grep -v 'omitting directory' + mkdir ../../include/wx/msw + cp -p include/wx/msw/* ../../include/wx/msw 2>&1 | grep -v 'omitting directory' + mkdir ../../include/wx/msw/ole + cp -p include/wx/msw/ole/* ../../include/wx/msw/ole 2>&1 | grep -v 'omitting directory' + cp -p lib/gcc_dll/*.dll ../../bin + rm -rf ../../lib/wx_dll + mkdir ../../lib/wx_dll + cp -p lib/gcc_dll/*.a ../../lib/wx_dll + mkdir ../../lib/wx_dll/msw + cp -p lib/gcc_dll/msw/* ../../lib/wx_dll/msw 2>&1 | grep -v 'omitting directory' + mkdir ../../lib/wx_dll/msw/wx + cp -p lib/gcc_dll/msw/wx/* ../../lib/wx_dll/msw/wx 2>&1 | grep -v 'omitting directory' + mkdir ../../lib/wx_dll/msw/wx/msw + cp -p lib/gcc_dll/msw/wx/msw/* ../../lib/wx_dll/msw/wx/msw 2>&1 | grep -v 'omitting directory' +} + +process_scons() +{ + get_source "${URL_SCONS}" "${DIR_SCONS}" "${MKD_SCONS}" + echo "Installing scons" + if python setup.py install --prefix=${DEPPKG_DIR}/scons > make.log 2>&1 + then + : + else + echo "Make failed - Check `pwd`/make.log" + exit 1 + fi +} + +process_nsis() +{ + get_source "${URL_NSIS_BIN}" "${DIR_NSIS_BIN}" "${MKD_NSIS_BIN}" + cd .. + rm -rf ../nsis + mv nsis-2.17 ../nsis + if get_source "${URL_NSIS_SRC}" "${DIR_NSIS_SRC}" "${MKD_NSIS_SRC}" + then + echo "Patching nsis" + >patch.log + do_patch nsis.patch + fi + echo "Building nsis" + if ../../scons/bin/scons SKIPSTUBS=all SKIPPLUGINS=all SKIPUTILS=all SKIPMISC=all \ + PREFIX=${DEPPKG_DIR}/nsis PREFIX_BIN=${DEPPKG_DIR}/nsis/Bin \ + PREFIX_CONF=${DEPPKG_DIR}/nsis PREFIX_DATA=${DEPPKG_DIR}/nsis \ + PREFIX_DOC=${DEPPKG_DIR}/nsis/Docs 2>&1 | tee make.log | parse_output + then + : + else + echo "Scons failed - Check `pwd`/make.log" + exit 1 + fi + echo "Installing nsis" + cp -p build/release/makensis/makensis ../../nsis +} + +process_mtx() +{ + if get_source "${URL_MTX}" "${DIR_MTX}" "${MKD_MTX}" + then + echo Patching mtx + # We can't run configure in a cross-compile environment so we + # patch the files to the correct values + cp -f config.h.in config.h + cp -f Makefile.in Makefile + rm -f configure + >patch.log + do_patch mtx.patch + fi + echo Building mtx + do_make Makefile prefix=${DEPPKG_DIR} all + echo Installing mtx + do_make Makefile prefix=${DEPPKG_DIR} install +} + +process_mt() +{ + if get_source "${URL_MT}" "${DIR_MT}" "${MKD_MT}" + then + echo "Patching mt" + >patch.log + do_patch mt.patch + fi + echo "Building mt" + do_make Makefile PREFIX=${DEPPKG_DIR} all + echo Installing mt + do_make Makefile PREFIX=${DEPPKG_DIR} install +} + +process_sed() +{ + if get_source "${URL_SED}" "${DIR_SED}" "${MKD_SED}" + then + echo Patching sed + >patch.log + # patch not needed for 4.2k which is already updated + # do_patch sed.patch + echo Configuring sed + ./configure --host=$BASE \ + --prefix=${DEPPKG_DIR} \ + --disable-nls >make.log 2>&1 + fi + echo Building sed + do_make Makefile all + echo Installing sed + do_make Makefile install +} + +process_stab2cv() +{ + if get_source "${URL_STAB2CV}" "${DIR_STAB2CV}" "${MKD_STAB2CV}" + then + echo Patching stab2cv + >patch.log + do_patch stab2cv.patch + echo Configuring stab2cv + ./configure --prefix=${DEPPKG_DIR}/tools \ + >make.log 2>&1 + fi + echo Building stab2cv + do_make Makefile + echo Installing stab2cv + do_make Makefile install +} + +process_cmd_utils() +{ + if get_source "${URL_CMD_UTILS}" "${DIR_CMD_UTILS}" "${MKD_CMD_UTILS}" + then + # echo Patching cmd-utils + # >patch.log + # do_patch cmd-utils.patch + sed -i "s:strrchr:NULL;//:" expr64/expr64.cpp + echo Configuring cmd-utils + ./configure --host=$BASE \ + --prefix=${DEPPKG_DIR} \ + >make.log 2>&1 + fi + echo Building cmd-utils + do_make Makefile + echo Installing cmd-utils + do_make Makefile install +} + +process_mkisofs() +{ + get_source "${URL_MKISOFS}" "${DIR_MKISOFS}" "${MKD_MKISOFS}" + echo Installing mkisofs + cp `basename ${URL_MKISOFS}` ${DEPPKG_DIR}/bin +} + +process_dvd_rw_tools() +{ + if get_source "${URL_DVD_RW_TOOLS}" "${DIR_DVD_RW_TOOLS}" "${MKD_DVD_RW_TOOLS}" + then + echo Patching dvd+rw-tools + >patch.log + do_patch dvd+rw-tools.patch + fi + echo "Building dvd+rw-tools" + m4 -DOS=XMINGW32 Makefile.m4 | make -f - dvd+rw-tools >make.log 2>&1 + echo "Installing dvd+rw-tools" + m4 -DOS=XMINGW32 Makefile.m4 | make -f - prefix=${DEPPKG_DIR} manprefix=${DEPPKG_DIR} install >>make.log 2>&1 +} + +process_qt4() +{ + if get_source "${URL_Qt4}" "${DIR_Qt4}" "${MKD_Qt4}" + then + echo Patching Qt4 + >patch.log + patch -p1 < ${SCRIPT_DIR}/patches/qt4-intrinsics.patch + patch -p1 < ${SCRIPT_DIR}/patches/qt4-widget-ui.patch + patch -p1 < ${SCRIPT_DIR}/patches/qt4-compilation-see.patch + patch -p1 < ${SCRIPT_DIR}/patches/qt4-compilation.patch + fi + echo "Configuring Qt4" + ./configure -opensource -confirm-license -fast -xplatform win32-g++-4.6 \ + -device-option CROSS_COMPILE=i686-w64-mingw32- -device-option \ + PKG_CONFIG='i686-w64-mingw32-pkg-config' -force-pkg-config -release \ + -exceptions -shared -prefix ${DEPPKG_DIR}/qt-out -prefix-install -no-script \ + -no-iconv -no-webkit -no-glib -no-gstreamer -no-phonon -no-phonon-backend \ + -accessibility -no-reduce-exports -no-rpath -make libs -nomake demos \ + -nomake docs -nomake examples -system-zlib -no-mitshm -no-libjpeg \ + -no-libmng -no-libtiff -no-sql-db2 -no-sql-ibase -no-sql-mysql -no-sql-oci \ + -no-sql-odbc -no-sql-psql -no-sql-sqlite -no-sql-sqlite2 \ + -no-sql-sqlite_symbian -no-sql-symsql -no-sql-tds -no-nis -no-cups -no-dbus \ + -no-openvg -no-openssl -no-nas-sound -no-audio-backend -no-sm -no-opengl \ + -no-javascript-jit -no-qt3support -nomake tools \ + -no-xmlpatterns -no-multimedia -nomake tools -silent + + local qt_source_dir=`pwd` + # Required libz.dll.a, zlib.h and zconf.h for proper Qt build purpose + process_zlib + cd "${qt_source_dir}" + local zlib_dir=`basename "${URL_ZLIB}" | sed 's/.tar.gz//'` + cp "../${zlib_dir}/libz.dll.a" ./lib/ + cp "../${zlib_dir}/zlib.h" ./include/ + cp "../${zlib_dir}/zconf.h" ./include/ + + make + + echo "Installing Qt4" + rm -rf ${DEPPKG_DIR}/include/qt ${DEPPKG_DIR}/lib/qt + mkdir -p ${DEPPKG_DIR}/include/qt ${DEPPKG_DIR}/lib/qt + cp -rf include/* ${DEPPKG_DIR}/include/qt + cp -rf lib/* ${DEPPKG_DIR}/lib/qt + cp -rf src/corelib ${DEPPKG_DIR}/src/ + cp -rf src/gui ${DEPPKG_DIR}/src/ + cp -rf lib/QtCore4.dll lib/QtGui4.dll ${DEPPKG_DIR}/bin/ + cp -rf lib/QtCore4.dll lib/QtGui4.dll ${DEPPKG_DIR}/lib/qt/ +} + +process_mingw() +{ + if test -f /usr/lib/gcc/$BASE/*posix/libstdc++-6.dll; then + echo "Installing MinGW libs" + cp -f /usr/lib/gcc/$BASE/*posix/libstdc++-6.dll ${DEPPKG_DIR}/bin + cp -f /usr/lib/gcc/$BASE/*posix/libgcc*dll ${DEPPKG_DIR}/bin + elif test -f /usr/lib/gcc/$BASE/*/libstdc++-6.dll; then + echo "Installing MinGW libs" + cp -f /usr/$BASE/lib/libstdc++-6.dll ${DEPPKG_DIR}/bin + cp -f /usr/$BASE/lib/libgcc*dll ${DEPPKG_DIR}/bin + else + echo "ERROR: Unable to find $BASE on this system" + fi + + if test -f /usr/$BASE/lib/libwinpthread-1.dll; then + cp -f /usr/$BASE/lib/libwinpthread-1.dll ${DEPPKG_DIR}/bin + fi +} + +# MSSQL dlls are created from Visual Studio +process_mssql() +{ + get_source "${URL_MSSQL}" "${DIR_MSSQL}" "${MKD_MSSQL}" + echo Installing MSSQL driver + cp include/*.h ${DEPPKG_DIR}/include + cp x86/mssql-driver.dll ${DEPPKG_DIR}/bin + cp x86/mssql-regression.exe ${DEPPKG_DIR}/bin +} + +if [ "$#" -eq 0 ] +then + process_mingw + process_lzo + process_zlib + process_rsync + process_pthreads + process_openssl + process_stab2cv + process_sed + process_cmd_utils + process_mssql +# process_pcre +# process_mysql +# process_sqlite +# process_postgresql +# process_wx +# process_scons +# process_nsis +# process_mtx +# process_mt +# process_mkisofs +# process_dvd_rw_tools + process_qt4 +else + for dependency in "$@" + do + eval "process_${dependency}" + done +fi +#vss +#Need to download from Microsoft diff --git a/src/win32/cats/Makefile b/src/win32/cats/Makefile new file mode 100644 index 00000000..3429d53c --- /dev/null +++ b/src/win32/cats/Makefile @@ -0,0 +1,110 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# +# Author: Robert Nelson +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Written by Robert Nelson, June 2006 +# + +include ../Makefile.inc + +INCLUDES = \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_BACULA) \ + $(INCLUDE_ZLIB) \ + $(INCLUDE_OPENSSL) \ + $(INCLUDE_MYSQL) + +DEFINES = \ + $(HAVES) + +vpath %.c $(MAINDIR)/src/cats +vpath %.cpp $(MAINDIR)/src/cats + +###################################################################### + +# Files files in src/lib + +SQL_OBJS = \ + sql.o \ + sql_cmds.o \ + sql_create.o \ + sql_delete.o \ + sql_find.o \ + sql_get.o \ + sql_glue.o \ + sql_list.o \ + sql_update.o \ + cats.o \ + bvfs.o + +LIBS_DLL = \ + $(LIBS_BACULA) + +###################################################################### + +# Targets + +.PHONY: all clean + +#all: $(LIBDIR)/libcats.a $(BINDIR)/cats_mysql.dll $(BINDIR)/cats_postgresql.dll $(BINDIR)/cats_sqlite3.dll +all: $(LIBDIR)/libcats.a $(BINDIR)/cats_postgresql.dll + +clean: + @echo "Cleaning `pwd`" + $(call clean_obj,$(addprefix $(OBJDIR)/cats_mysql/,mysql.o $(SQL_OBJS))) + $(call clean_obj,$(addprefix $(OBJDIR)/cats_postgresql/,postgresql.o $(SQL_OBJS))) + $(call clean_obj,$(addprefix $(OBJDIR)/cats_sqlite3/,sqlite.o $(SQL_OBJS))) + $(call clean_exe,$(BINDIR)/cats_mysql.dll) + $(call clean_exe,$(BINDIR)/cats_postgresql.dll) + $(call clean_exe,$(BINDIR)/cats_sqlite3.dll) + $(ECHO_CMD)rm -f $(OBJDIR)/libcats.exp $(LIBDIR)/libcats.a + $(ECHO_CMD)rm -rf $(OBJDIRS) + +$(LIBDIR)/libcats.a $(OBJDIR)/libcats.exp: bacula_cats.def + $(call checkdir,$@) + @mkdir -p $(OBJDIRS) + $(DLLTOOL) --dllname bacula_cats.dll --no-export-all-symbols --input-def bacula_cats.def --output-exp $(OBJDIR)/libcats.exp --output-lib $(LIBDIR)/libcats.a $^ + +$(BINDIR)/cats_mysql.dll: $(addprefix $(OBJDIR)/cats_mysql/,mysql.o $(SQL_OBJS)) $(OBJDIR)/libcats.exp + @echo "Linking $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(LDFLAGS) -mdll -mwindows $^ $(LIBS_MYSQL) $(LIBS_DLL) -o $@ + +$(BINDIR)/cats_postgresql.dll: $(addprefix $(OBJDIR)/cats_postgresql/,postgresql.o $(SQL_OBJS)) $(OBJDIR)/libcats.exp + @echo "Linking $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(LDFLAGS) -mdll -mwindows $^ $(LIBS_POSTGRESQL) $(LIBS_DLL) -o $@ + +$(BINDIR)/cats_sqlite3.dll: $(addprefix $(OBJDIR)/cats_sqlite3/,sqlite.o $(SQL_OBJS)) $(OBJDIR)/libcats.exp + @echo "Linking $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(LDFLAGS) -mdll -mwindows $^ $(LIBS_SQLITE) $(LIBS_DLL) -o $@ + +# +# Rules for generating from ../cats +# + +include ../Makefile.rules + +define Link_Dll +$(OBJDIR)/$(1)/%.o: %.c + @echo "Compiling $$<" + $$(call checkdir,$$@) + $(ECHO_CMD)$(CXX) -D_BDB_PRIV_INTERFACE_ -DBUILDING_CATS -DUSING_DLL -DHAVE_$(2) $(CFLAGS) -c $$< -o $$@ + +$(OBJDIR)/$(1)/%.o: %.cpp + @echo "Compiling $$<" + $$(call checkdir,$$@) + $(ECHO_CMD)$(CXX) -D_BDB_PRIV_INTERFACE_ -DBUILDING_CATS -DUSING_DLL -DHAVE_$(2) $(CFLAGS) -c $$< -o $$@ + +endef + +$(eval $(call Link_Dll,cats_mysql,MYSQL)) + +$(eval $(call Link_Dll,cats_postgresql,POSTGRESQL)) + +$(eval $(call Link_Dll,cats_sqlite3,SQLITE3)) diff --git a/src/win32/cats/bacula_cats.def b/src/win32/cats/bacula_cats.def new file mode 100644 index 00000000..57e4ebcb --- /dev/null +++ b/src/win32/cats/bacula_cats.def @@ -0,0 +1,273 @@ +LIBRARY bacula_cats.dll +EXPORTS + +; sql.o +_Z11db_strerrorP4B_DB +_Z11list_dashesP4B_DBPFvPvPKcES1_ +_Z11list_resultP3JCRP4B_DBPFvPvPKcES3_11e_list_type +_Z11list_resultPviPPc +_Z12check_limitsP3JCRP4B_DB +_Z14db_debug_printP3JCRP6_iobuf +_Z14db_int_handlerPviPPc +_Z15db_list_handlerPviPPc +_Z16db_int64_handlerPviPPc +_Z18db_strtime_handlerPviPPc +_Z18get_sql_record_maxP3JCRP4B_DB +_Z19split_path_and_fileP3JCRP4B_DBPKc +_Z20check_tables_versionP3JCRP4B_DB +_Z22db_string_list_handlerPviPPc +_Z23db_open_batch_connexionP3JCRP4B_DB +_Z24db_check_max_connectionsP3JCRP4B_DBj +_Z7QueryDBPKciP3JCRP4B_DBPc +_Z8DeleteDBPKciP3JCRP4B_DBPc +_Z8InsertDBPKciP3JCRP4B_DBPc +_Z8UpdateDBPKciP3JCRP4B_DBPc +_ZN9dbid_listC1Ev +_ZN9dbid_listC2Ev +_ZN9dbid_listD1Ev +_ZN9dbid_listD2Ev + +; sql_create.o +_Z20db_create_job_recordP3JCRP4B_DBP7JOB_DBR +_Z21db_create_path_recordP3JCRP4B_DBP8ATTR_DBR +_Z21db_create_pool_recordP3JCRP4B_DBP8POOL_DBR +_Z22db_create_media_recordP3JCRP4B_DBP9MEDIA_DBR +_Z23db_create_client_recordP3JCRP4B_DBP10CLIENT_DBR +_Z23db_create_device_recordP3JCRP4B_DBP10DEVICE_DBR +_Z23db_disable_batch_insertb +_Z24db_create_base_file_listP3JCRP4B_DBPc +_Z24db_create_counter_recordP3JCRP4B_DBP11COUNTER_DBR +_Z24db_create_fileset_recordP3JCRP4B_DBP11FILESET_DBR +_Z24db_create_storage_recordP3JCRP4B_DBP11STORAGE_DBR +_Z25db_create_jobmedia_recordP3JCRP4B_DBP12JOBMEDIA_DBR +_Z26db_create_mediatype_recordP3JCRP4B_DBP13MEDIATYPE_DBR +_Z27db_create_attributes_recordP3JCRP4B_DBP8ATTR_DBR +_Z27db_write_batch_file_recordsP3JCR +_Z31db_create_restore_object_recordP3JCRP4B_DBP11ROBJECT_DBR +_Z32db_create_file_attributes_recordP3JCRP4B_DBP8ATTR_DBR +_Z37db_commit_base_file_attributes_recordP3JCRP4B_DB +_Z37db_create_base_file_attributes_recordP3JCRP4B_DBP8ATTR_DBR +_Z38db_create_batch_file_attributes_recordP3JCRP4B_DBP8ATTR_DBR +_ZN8POOL_MEMD1Ev + +; sql_delete.o +_Z21db_delete_pool_recordP3JCRP4B_DBP8POOL_DBR +_Z21db_purge_media_recordP3JCRP4B_DBP9MEDIA_DBR +_Z22db_delete_media_recordP3JCRP4B_DBP9MEDIA_DBR + +; sql_find.o +_Z18db_find_last_jobidP3JCRP4B_DBPKcP7JOB_DBR +_Z19db_find_next_volumeP3JCRP4B_DBibP9MEDIA_DBR +_Z22db_find_job_start_timeP3JCRP4B_DBP7JOB_DBRPPcS5_ +_Z24db_find_failed_job_sinceP3JCRP4B_DBP7JOB_DBRPcRi +_Z27db_find_last_job_start_timeP3JCRP4B_DBP7JOB_DBRPPcS5_i + +; sql_get.o +_Z15db_get_pool_idsP3JCRP4B_DBPiPPj +_Z16db_get_file_listP3JCRP4B_DBPcbbPFiPviPS3_ES4_ +_Z16db_get_media_idsP3JCRP4B_DBP9MEDIA_DBRPiPPj +_Z17db_get_base_jobidP3JCRP4B_DBP7JOB_DBRPj +_Z17db_get_client_idsP3JCRP4B_DBPiPPj +_Z17db_get_job_recordP3JCRP4B_DBP7JOB_DBR +_Z18db_get_path_recordP3JCRP4B_DB +_Z18db_get_pool_recordP3JCRP4B_DBP8POOL_DBR +_Z18db_get_query_dbidsP3JCRP4B_DBR8POOL_MEMR9dbid_list +_Z19db_get_media_recordP3JCRP4B_DBP9MEDIA_DBR +_Z19db_get_pool_numvolsP3JCRP4B_DBP8POOL_DBR +_Z20db_get_client_recordP3JCRP4B_DBP10CLIENT_DBR +_Z20db_get_volume_jobidsP3JCRP4B_DBP9MEDIA_DBRP11db_list_ctx +_Z21db_get_base_file_listP3JCRP4B_DBbPFiPviPPcES3_ +_Z21db_get_counter_recordP3JCRP4B_DBP11COUNTER_DBR +_Z21db_get_fileset_recordP3JCRP4B_DBP11FILESET_DBR +_Z22db_accurate_get_jobidsP3JCRP4B_DBP7JOB_DBRP11db_list_ctx +_Z23db_get_job_volume_namesP3JCRP4B_DBjPPc +_Z23db_get_num_pool_recordsP3JCRP4B_DB +_Z23db_get_used_base_jobidsP3JCRP4B_DBPcP11db_list_ctx +_Z24db_get_num_media_recordsP3JCRP4B_DB +_Z27db_get_restoreobject_recordP3JCRP4B_DBP11ROBJECT_DBR +_Z28db_free_restoreobject_recordP3JCRP11ROBJECT_DBR +_Z28db_get_job_volume_parametersP3JCRP4B_DBjPP10VOL_PARAMS +_Z29db_get_file_attributes_recordP3JCRP4B_DBPcP7JOB_DBRP8FILE_DBR +_ZN8POOL_MEMD1Ev + +; sql_list.o +_Z17db_list_sql_queryP3JCRP4B_DBPKcPFvPvS4_ES5_i11e_list_type +_Z18db_list_job_totalsP3JCRP4B_DBP7JOB_DBRPFvPvPKcES5_ +_Z19db_list_job_recordsP3JCRP4B_DBP7JOB_DBRPFvPvPKcES5_11e_list_type +_Z20db_list_pool_recordsP3JCRP4B_DBP8POOL_DBRPFvPvPKcES5_11e_list_type +_Z21db_list_files_for_jobP3JCRP4B_DBjPFvPvPKcES3_ +_Z21db_list_media_recordsP3JCRP4B_DBP9MEDIA_DBRPFvPvPKcES5_11e_list_type +_Z22db_list_client_recordsP3JCRP4B_DBPFvPvPKcES3_11e_list_type +_Z22db_list_copies_recordsP3JCRP4B_DBjPcPFvPvPKcES4_11e_list_type +_Z22db_list_joblog_recordsP3JCRP4B_DBjPFvPvPKcES3_11e_list_type +_Z23db_list_restore_objectsP3JCRP4B_DBP11ROBJECT_DBRPFvPvPKcES5_11e_list_type +_Z24db_list_jobmedia_recordsP3JCRP4B_DBjPFvPvPKcES3_11e_list_type +_Z26db_list_base_files_for_jobP3JCRP4B_DBjPFvPvPKcES3_ +_ZN8POOL_MEMD1Ev + +; sql_update.o +_Z15db_update_statsP3JCRP4B_DBx +_Z19db_mark_file_recordP3JCRP4B_DByj +_Z21db_update_pool_recordP3JCRP4B_DBP8POOL_DBR +_Z22db_update_media_recordP3JCRP4B_DBP9MEDIA_DBR +_Z23db_update_client_recordP3JCRP4B_DBP10CLIENT_DBR +_Z24db_make_inchanger_uniqueP3JCRP4B_DBP9MEDIA_DBR +_Z24db_update_counter_recordP3JCRP4B_DBP11COUNTER_DBR +_Z24db_update_job_end_recordP3JCRP4B_DBP7JOB_DBR +_Z24db_update_media_defaultsP3JCRP4B_DBP9MEDIA_DBR +_Z24db_update_storage_recordP3JCRP4B_DBP11STORAGE_DBR +_Z26db_update_job_start_recordP3JCRP4B_DBP7JOB_DBR +_Z28db_add_digest_to_file_recordP3JCRP4B_DByPci + +; bvfs.o +_Z15bvfs_parent_dirPc +_Z17bvfs_basename_dirPc +_Z17bvfs_update_cacheP3JCRP4B_DB +_Z20build_ls_files_queryP4B_DBR8POOL_MEMPKcS4_S4_xx +_Z20bvfs_update_fv_cacheP3JCRP4B_DBPc +_Z32bvfs_update_path_hierarchy_cacheP3JCRP4B_DBPc +_ZN4Bvfs10set_jobidsEPc +_ZN4Bvfs11clear_cacheEv +_ZN4Bvfs11escape_listEP5alist +_ZN4Bvfs11get_volumesEy +_ZN4Bvfs12_handle_pathEPviPPc +_ZN4Bvfs12filter_jobidEv +_ZN4Bvfs12update_cacheEv +_ZN4Bvfs15fv_update_cacheEv +_ZN4Bvfs15ls_special_dirsEv +_ZN4Bvfs16fv_get_big_filesExxi +_ZN4Bvfs17drop_restore_listEPc +_ZN4Bvfs20compute_restore_listEPcS0_S0_S0_ +_ZN4Bvfs20insert_missing_deltaEPcPx +_ZN4Bvfs21fv_get_size_and_countExPxS0_ +_ZN4Bvfs21get_all_file_versionsEjyPKc +_ZN4Bvfs24fv_update_size_and_countExxx +_ZN4Bvfs25fv_compute_size_and_countExPxS0_ +_ZN4Bvfs29fv_get_current_size_and_countExPxS0_ +_ZN4Bvfs6ch_dirEPKc +_ZN4Bvfs7ls_dirsEv +_ZN4Bvfs8get_rootEv +_ZN4Bvfs8ls_filesEv +_ZN4Bvfs9set_jobidEj +_ZN4BvfsC1EP3JCRP4B_DB +_ZN4BvfsC2EP3JCRP4B_DB +_ZN4BvfsD0Ev +_ZN4BvfsD1Ev +_ZN4BvfsD2Ev +_ZN8POOL_MEMD1Ev + +; sql_glue.o +_Z11db_get_typeP4B_DB +_Z12db_sql_queryP4B_DBPKcPFiPviPPcES3_ +_Z12db_sql_queryP4B_DBPKci +_Z12sql_num_rowsP4B_DB +_Z12sql_strerrorP4B_DB +_Z13sql_batch_endP3JCRP4B_DBPKc +_Z13sql_data_seekP4B_DBi +_Z13sql_fetch_rowP4B_DB +_Z14sql_field_seekP4B_DBi +_Z14sql_num_fieldsP4B_DB +_Z15sql_batch_startP3JCRP4B_DB +_Z15sql_fetch_fieldP4B_DB +_Z15sql_free_resultP4B_DB +_Z16db_big_sql_queryP4B_DBPKcPFiPviPPcES3_ +_Z16db_escape_objectP3JCRP4B_DBPci +_Z16db_escape_stringP3JCRP4B_DBPcS3_i +_Z16db_open_databaseP3JCRP4B_DB +_Z16sql_batch_insertP3JCRP4B_DBP8ATTR_DBR +_Z17db_close_databaseP3JCRP4B_DB +_Z17db_get_type_indexP4B_DB +_Z17db_match_databaseP4B_DBPKcS2_S2_i +_Z17db_thread_cleanupP4B_DB +_Z17sql_affected_rowsP4B_DB +_Z18db_end_transactionP3JCRP4B_DB +_Z18db_unescape_objectP3JCRP4B_DBPciPS3_Pi +_Z20db_start_transactionP3JCRP4B_DB +_Z20sql_field_is_numericP4B_DBi +_Z21sql_field_is_not_nullP4B_DBi +_Z25sql_insert_autokey_recordP4B_DBPKcS2_ +_Z28db_clone_database_connectionP4B_DBP3JCRb +_Z9sql_queryP4B_DBPKci + +; cats.o +_ZN4B_DB10_db_unlockEPKci +_ZN4B_DB11db_get_typeEv +_ZN4B_DB12db_sql_queryEPKci +_ZN4B_DB15print_lock_infoEP6_iobuf +_ZN4B_DB17db_match_databaseEPKcS1_S1_i +_ZN4B_DB28db_clone_database_connectionEP3JCRb +_ZN4B_DB8_db_lockEPKci + +; postgresql.o +_Z16db_init_databaseP3JCRPKcS2_S2_S2_S2_iS2_bb +_ZN15B_DB_POSTGRESQL12db_sql_queryEPKcPFiPviPPcES2_ +_ZN15B_DB_POSTGRESQL12sql_strerrorEv +_ZN15B_DB_POSTGRESQL13sql_batch_endEP3JCRPKc +_ZN15B_DB_POSTGRESQL13sql_data_seekEi +_ZN15B_DB_POSTGRESQL13sql_fetch_rowEv +_ZN15B_DB_POSTGRESQL15sql_batch_startEP3JCR +_ZN15B_DB_POSTGRESQL15sql_fetch_fieldEv +_ZN15B_DB_POSTGRESQL15sql_free_resultEv +_ZN15B_DB_POSTGRESQL16db_big_sql_queryEPKcPFiPviPPcES2_ +_ZN15B_DB_POSTGRESQL16db_escape_objectEP3JCRPci +_ZN15B_DB_POSTGRESQL16db_escape_stringEP3JCRPcS2_i +_ZN15B_DB_POSTGRESQL16db_open_databaseEP3JCR +_ZN15B_DB_POSTGRESQL16sql_batch_insertEP3JCRP8ATTR_DBR +_ZN15B_DB_POSTGRESQL17db_close_databaseEP3JCR +_ZN15B_DB_POSTGRESQL17db_thread_cleanupEv +_ZN15B_DB_POSTGRESQL17sql_affected_rowsEv +_ZN15B_DB_POSTGRESQL18db_end_transactionEP3JCR +_ZN15B_DB_POSTGRESQL18db_unescape_objectEP3JCRPciPS2_Pi +_ZN15B_DB_POSTGRESQL20db_start_transactionEP3JCR +_ZN15B_DB_POSTGRESQL20sql_field_is_numericEi +_ZN15B_DB_POSTGRESQL21sql_field_is_not_nullEi +_ZN15B_DB_POSTGRESQL25sql_insert_autokey_recordEPKcS1_ +_ZN15B_DB_POSTGRESQL9sql_queryEPKci +_ZN15B_DB_POSTGRESQLC1EP3JCRPKcS3_S3_S3_S3_iS3_bb +_ZN15B_DB_POSTGRESQLC2EP3JCRPKcS3_S3_S3_S3_iS3_bb +_ZN15B_DB_POSTGRESQLD0Ev +_ZN15B_DB_POSTGRESQLD1Ev +_ZN15B_DB_POSTGRESQLD2Ev +_ZN4B_DB16db_big_sql_queryEPKcPFiPviPPcES2_ +_ZN4B_DBD0Ev +_ZN4B_DBD1Ev +_ZN9B_DB_PRIVD0Ev +_ZN9B_DB_PRIVD1Ev + +client_backups DATA +list_pool DATA +drop_deltabs DATA +create_deltabs DATA +sel_JobMedia DATA +uar_list_jobs DATA +uar_file DATA +uar_count_files DATA +uar_sel_files DATA +uar_del_temp DATA +uar_del_temp1 DATA +uar_create_temp DATA +uar_create_temp1 DATA +uar_last_full DATA +uar_full DATA +uar_inc DATA +uar_list_temp DATA +uar_sel_all_temp1 DATA +uar_sel_fileset DATA +uar_jobid_fileindex DATA +uar_dif DATA +uar_sel_all_temp DATA +uar_count_files DATA +uar_jobids_fileindex DATA +uar_jobid_fileindex_from_dir DATA +uar_jobid_fileindex_from_table DATA +cleanup_created_job DATA +cleanup_running_job DATA +get_restore_objects DATA +get_restore_objects DATA +uar_sel_filesetid DATA +create_delindex DATA +expired_volumes DATA +uap_upgrade_copies_oldest_job DATA +uar_print_jobs DATA +get_restore_objects DATA +uar_sel_jobid_temp DATA diff --git a/src/win32/cats/bacula_cats/bacula_cats.vcproj b/src/win32/cats/bacula_cats/bacula_cats.vcproj new file mode 100644 index 00000000..294b2fc0 --- /dev/null +++ b/src/win32/cats/bacula_cats/bacula_cats.vcproj @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/cats/cats_mysql/cats_mysql.vcproj b/src/win32/cats/cats_mysql/cats_mysql.vcproj new file mode 100644 index 00000000..c863fa57 --- /dev/null +++ b/src/win32/cats/cats_mysql/cats_mysql.vcproj @@ -0,0 +1,428 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/cats/cats_postgresql/cats_postgresql.vcproj b/src/win32/cats/cats_postgresql/cats_postgresql.vcproj new file mode 100644 index 00000000..78b46d93 --- /dev/null +++ b/src/win32/cats/cats_postgresql/cats_postgresql.vcproj @@ -0,0 +1,432 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/cats/cats_sqlite3/cats_sqlite3.vcproj b/src/win32/cats/cats_sqlite3/cats_sqlite3.vcproj new file mode 100644 index 00000000..7eb1bc57 --- /dev/null +++ b/src/win32/cats/cats_sqlite3/cats_sqlite3.vcproj @@ -0,0 +1,421 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/cats/create_mysql_database.cmd b/src/win32/cats/create_mysql_database.cmd new file mode 100644 index 00000000..550c83bb --- /dev/null +++ b/src/win32/cats/create_mysql_database.cmd @@ -0,0 +1,14 @@ +@ECHO off +REM +REM Script to create Bacula database(s) +REM + +"@SQL_BINDIR@\mysql" %* -e "CREATE DATABASE bacula;" +SET RESULT=%ERRORLEVEL% +IF %RESULT% GTR 0 GOTO :ERROR +ECHO Creation of bacula database succeeded. +EXIT /b 0 + +:ERROR +ECHO Creation of bacula database failed. +EXIT /b %RESULT% diff --git a/src/win32/cats/create_postgresql_database.cmd b/src/win32/cats/create_postgresql_database.cmd new file mode 100644 index 00000000..90b029cb --- /dev/null +++ b/src/win32/cats/create_postgresql_database.cmd @@ -0,0 +1,23 @@ +@ECHO off +REM +REM Script to create Bacula database(s) +REM + +REM use SQL_ASCII to be able to put any filename into +REM the database even those created with unusual character sets +SET ENCODING=ENCODING 'SQL_ASCII' + +REM use UTF8 if you are using standard Unix/Linux LANG specifications +REM that use UTF8 -- this is normally the default and *should* be +REM your standard. Bacula consoles work correctly *only* with UTF8. +REM SET ENCODING=ENCODING 'UTF8' + +"@SQL_BINDIR@\psql" -f "@bin_dir_cmd@\create_postgresql_database.sql" -d template1 %* +IF ERRORLEVEL 1 GOTO :ERROR +ECHO Creation of bacula database succeeded. +EXIT /b 0 +GOTO :EOF + +:ERROR +ECHO Creation of bacula database failed. +EXIT /b 1 diff --git a/src/win32/cats/create_postgresql_database.sql b/src/win32/cats/create_postgresql_database.sql new file mode 100644 index 00000000..bebc2513 --- /dev/null +++ b/src/win32/cats/create_postgresql_database.sql @@ -0,0 +1,2 @@ +CREATE DATABASE bacula ENCODING 'SQL_ASCII'; +ALTER DATABASE bacula SET datestyle TO 'ISO, YMD'; diff --git a/src/win32/cats/create_sqlite3_database.cmd b/src/win32/cats/create_sqlite3_database.cmd new file mode 100644 index 00000000..a2f24d26 --- /dev/null +++ b/src/win32/cats/create_sqlite3_database.cmd @@ -0,0 +1,6 @@ +@ECHO off +REM +REM Script to create Bacula SQLite tables + +ECHO .databases | "@bin_dir_cmd@\sqlite3" %* "@working_dir_cmd@\bacula.db" +EXIT /b 0 diff --git a/src/win32/cats/delete_catalog_backup.cmd b/src/win32/cats/delete_catalog_backup.cmd new file mode 100644 index 00000000..30c84a21 --- /dev/null +++ b/src/win32/cats/delete_catalog_backup.cmd @@ -0,0 +1,5 @@ +REM +REM This script deletes a catalog dump +REM +DEL /f "@working_dir_cmd@\bacula.sql" +EXIT /b 0 diff --git a/src/win32/cats/drop_mysql_database.cmd b/src/win32/cats/drop_mysql_database.cmd new file mode 100644 index 00000000..a6cd0a9d --- /dev/null +++ b/src/win32/cats/drop_mysql_database.cmd @@ -0,0 +1,14 @@ +@ECHO off +REM +REM Script to drop Bacula database(s) +REM + +"@SQL_BINDIR@\mysql" %* -f -e "DROP DATABASE bacula;" +SET RESULT=%ERRORLEVEL% +IF %RESULT% GTR 0 GOTO :ERROR +ECHO Drop of bacula database succeeded. +EXIT /b 0 + +:ERROR +ECHO Drop of bacula database failed. +EXIT /b %RESULT% diff --git a/src/win32/cats/drop_mysql_tables.cmd b/src/win32/cats/drop_mysql_tables.cmd new file mode 100644 index 00000000..fd7cc332 --- /dev/null +++ b/src/win32/cats/drop_mysql_tables.cmd @@ -0,0 +1,14 @@ +@ECHO off +REM +REM Script to delete Bacula tables for MySQL +REM + +"@SQL_BINDIR@\mysql" %* < "@bin_dir_cmd@\drop_mysql_tables.sql" +SET RESULT=%ERRORLEVEL% +IF %RESULT% GTR 0 goto :ERROR +ECHO Deletion of Bacula MySQL tables succeeded. +EXIT /b 0 + +:ERROR +ECHO Deletion of Bacula MySQL tables failed. +EXIT /b %RESULT% diff --git a/src/win32/cats/drop_mysql_tables.sql b/src/win32/cats/drop_mysql_tables.sql new file mode 100644 index 00000000..b57d7e50 --- /dev/null +++ b/src/win32/cats/drop_mysql_tables.sql @@ -0,0 +1,26 @@ +USE bacula; +DROP TABLE IF EXISTS Filename; +DROP TABLE IF EXISTS Path; +DROP TABLE IF EXISTS LongName; +DROP TABLE IF EXISTS Device; +DROP TABLE IF EXISTS Storage; +DROP TABLE IF EXISTS MediaType; +DROP TABLE IF EXISTS File; +DROP TABLE IF EXISTS Client; +DROP TABLE IF EXISTS Job; +DROP TABLE IF EXISTS Media; +DROP TABLE IF EXISTS JobMedia; +DROP TABLE IF EXISTS Pool; +DROP TABLE IF EXISTS MultiVolume; +DROP TABLE IF EXISTS FileSave; +DROP TABLE IF EXISTS FileSet; +DROP TABLE IF EXISTS Version; +DROP TABLE IF EXISTS Counters; +DROP TABLE IF EXISTS BaseFiles; +DROP TABLE IF EXISTS UnsavedFiles; +DROP TABLE IF EXISTS CDImages; +DROP TABLE IF EXISTS Status; +DROP TABLE IF EXISTS MAC; +DROP TABLE IF EXISTS Log; +DROP TABLE IF EXISTS Location; +DROP TABLE IF EXISTS LocationLog; diff --git a/src/win32/cats/drop_postgresql_database.cmd b/src/win32/cats/drop_postgresql_database.cmd new file mode 100644 index 00000000..f5979a52 --- /dev/null +++ b/src/win32/cats/drop_postgresql_database.cmd @@ -0,0 +1,14 @@ +@ECHO off +REM +REM Script to drop Bacula database(s) +REM + +"@SQL_BINDIR@\dropdb" %* bacula +IF ERRORLEVEL 1 GOTO :ERROR +ECHO Drop of bacula database succeeded. +EXIT /b 0 +GOTO :EOF + +:ERROR +ECHO Drop of bacula database failed. +EXIT /b 1 diff --git a/src/win32/cats/drop_postgresql_tables.cmd b/src/win32/cats/drop_postgresql_tables.cmd new file mode 100644 index 00000000..ae6215ca --- /dev/null +++ b/src/win32/cats/drop_postgresql_tables.cmd @@ -0,0 +1,14 @@ +@ECHO off +REM +REM Script to delete Bacula tables for PostgreSQL +REM + +"@SQL_BINDIR@\psql" -f "@bin_dir_cmd@\drop_postgresql_tables.sql" -d bacula %* +IF ERRORLEVEL 1 GOTO :ERROR +ECHO Deletion of Bacula PostgreSQL tables succeeded. +EXIT /b 0 +GOTO :EOF + +:ERROR +ECHO Deletion of Bacula PostgreSQL tables failed. +EXIT /b 1 diff --git a/src/win32/cats/drop_postgresql_tables.sql b/src/win32/cats/drop_postgresql_tables.sql new file mode 100644 index 00000000..4057b920 --- /dev/null +++ b/src/win32/cats/drop_postgresql_tables.sql @@ -0,0 +1,22 @@ +drop table unsavedfiles; +drop table basefiles; +drop table jobmedia; +drop table file; +drop table job; +drop table media; +drop table client; +drop table pool; +drop table fileset; +drop table path; +drop table filename; +drop table counters; +drop table version; +drop table CDImages; +drop table Device; +drop table Storage; +drop table MediaType; +drop table Status; +drop table MAC; +drop table log; +drop table Location; +drop table locationlog; diff --git a/src/win32/cats/drop_sqlite3_database.cmd b/src/win32/cats/drop_sqlite3_database.cmd new file mode 100644 index 00000000..654ca6f0 --- /dev/null +++ b/src/win32/cats/drop_sqlite3_database.cmd @@ -0,0 +1,7 @@ +@ECHO off +REM +REM Script to drop Bacula SQLite tables + +DEL "@working_dir_cmd@\bacula.db" +ECHO SQLite database dropped. +EXIT /b 0 diff --git a/src/win32/cats/drop_sqlite3_tables.cmd b/src/win32/cats/drop_sqlite3_tables.cmd new file mode 100644 index 00000000..ac5975fc --- /dev/null +++ b/src/win32/cats/drop_sqlite3_tables.cmd @@ -0,0 +1,8 @@ +@ECHO off +REM +REM Script to delete the SQLite Bacula database (same as deleting +REM the tables) +REM + +DEL "@working_dir_cmd@\bacula.db" +EXIT /b 0 diff --git a/src/win32/cats/grant_mysql_privileges.cmd b/src/win32/cats/grant_mysql_privileges.cmd new file mode 100644 index 00000000..c37c5d8a --- /dev/null +++ b/src/win32/cats/grant_mysql_privileges.cmd @@ -0,0 +1,14 @@ +@ECHO off +REM +REM Script to grant privileges to the bacula database +REM + +"@SQL_BINDIR@\mysql" -u root -f %* < "@bin_dir_cmd@\grant_mysql_privileges.sql" +SET RESULT=%ERRORLEVEL% +IF %RESULT% GTR 0 GOTO :ERROR +ECHO Privileges for bacula granted. +EXIT /b 0 + +:ERROR +ECHO Error creating privileges. +EXIT /b %RESULT% diff --git a/src/win32/cats/grant_mysql_privileges.sql b/src/win32/cats/grant_mysql_privileges.sql new file mode 100644 index 00000000..1b9d371e --- /dev/null +++ b/src/win32/cats/grant_mysql_privileges.sql @@ -0,0 +1,5 @@ +use mysql +grant all privileges on bacula.* to bacula@localhost; +grant all privileges on bacula.* to bacula@"%"; +select * from user; +flush privileges; diff --git a/src/win32/cats/grant_postgresql_privileges.cmd b/src/win32/cats/grant_postgresql_privileges.cmd new file mode 100644 index 00000000..69e80768 --- /dev/null +++ b/src/win32/cats/grant_postgresql_privileges.cmd @@ -0,0 +1,15 @@ +@ECHO off +REM +REM Script to grant privileges to the bacula database +REM +USER=bacula + +"@SQL_BINDIR@\psql" -f "@bin_dir_cmd@\grant_postgresql_privileges.sql" -d bacula %* +IF ERRORLEVEL 1 GOTO :ERROR +ECHO Error creating privileges. +EXIT /b 0 +GOTO :EOF + +:ERROR +ECHO Drop of bacula database failed. +EXIT /b 1 diff --git a/src/win32/cats/grant_postgresql_privileges.sql b/src/win32/cats/grant_postgresql_privileges.sql new file mode 100644 index 00000000..3a73f6d2 --- /dev/null +++ b/src/win32/cats/grant_postgresql_privileges.sql @@ -0,0 +1,37 @@ +create user bacula; + +-- for tables +grant all on unsavedfiles to bacula; +grant all on basefiles to bacula; +grant all on jobmedia to bacula; +grant all on file to bacula; +grant all on job to bacula; +grant all on media to bacula; +grant all on client to bacula; +grant all on pool to bacula; +grant all on fileset to bacula; +grant all on path to bacula; +grant all on filename to bacula; +grant all on counters to bacula; +grant all on version to bacula; +grant all on cdimages to bacula; +grant all on mediatype to bacula; +grant all on storage to bacula; +grant all on device to bacula; +grant all on status to bacula; + +-- for sequences on those tables + +grant select, update on filename_filenameid_seq to bacula; +grant select, update on path_pathid_seq to bacula; +grant select, update on fileset_filesetid_seq to bacula; +grant select, update on pool_poolid_seq to bacula; +grant select, update on client_clientid_seq to bacula; +grant select, update on media_mediaid_seq to bacula; +grant select, update on job_jobid_seq to bacula; +grant select, update on file_fileid_seq to bacula; +grant select, update on jobmedia_jobmediaid_seq to bacula; +grant select, update on basefiles_baseid_seq to bacula; +grant select, update on storage_storageid_seq to bacula; +grant select, update on mediatype_mediatypeid_seq to bacula; +grant select, update on device_deviceid_seq to bacula; diff --git a/src/win32/cats/grant_sqlite3_privileges.cmd b/src/win32/cats/grant_sqlite3_privileges.cmd new file mode 100644 index 00000000..a824a34c --- /dev/null +++ b/src/win32/cats/grant_sqlite3_privileges.cmd @@ -0,0 +1,7 @@ +@ECHO off +REM +REM Script to grant privileges to the bacula database +REM + +REM nothing to do here +EXIT /b 0 diff --git a/src/win32/cats/make_def b/src/win32/cats/make_def new file mode 100755 index 00000000..1b539b2a --- /dev/null +++ b/src/win32/cats/make_def @@ -0,0 +1,72 @@ +#!/bin/sh +# +# Make the stupid bacula.def file so that we don't have to do it +# manually +# +# Kern Sibbald, June 2007 +# + +echo "LIBRARY bacula_cats.dll" +echo "EXPORTS" +echo " " + +TOPDIR=`(cd ../../..;pwd)` +TOPDIR=${DEPKGS:-${TOPDIR}} +NM=i686-w64-mingw32-nm + +OBJS="sql.o sql_create.o sql_delete.o sql_find.o sql_get.o \ + sql_list.o sql_update.o bvfs.o sql_glue.o cats.o postgresql.o" + +# +# The data could be automated too +# +DATA="\ + client_backups \ + list_pool \ + drop_deltabs \ + create_deltabs \ + sel_JobMedia \ + uar_list_jobs \ + uar_file \ + uar_count_files \ + uar_sel_files \ + uar_del_temp \ + uar_del_temp1 \ + uar_create_temp \ + uar_create_temp1 \ + uar_last_full \ + uar_full \ + uar_inc \ + uar_list_temp \ + uar_sel_all_temp1 \ + uar_sel_fileset \ + uar_jobid_fileindex \ + uar_dif \ + uar_sel_all_temp \ + uar_count_files \ + uar_jobids_fileindex \ + uar_jobid_fileindex_from_dir \ + uar_jobid_fileindex_from_table \ + cleanup_created_job \ + cleanup_running_job \ + get_restore_objects \ + get_restore_objects \ + uar_sel_filesetid \ + create_delindex \ + expired_volumes \ + uap_upgrade_copies_oldest_job \ + uar_print_jobs \ + get_restore_objects \ + uar_sel_jobid_temp" + + +cd obj32/cats_postgresql +for i in ${OBJS}; do \ + echo "; $i"; \ + ${NM} $i | grep "^[0-9a-f]* T _" | cut -c13-; \ + echo " "; \ +done + +for i in ${DATA}; do \ + echo "$i DATA"; \ +done diff --git a/src/win32/cats/make_mysql_catalog_backup.cmd b/src/win32/cats/make_mysql_catalog_backup.cmd new file mode 100644 index 00000000..abf55e18 --- /dev/null +++ b/src/win32/cats/make_mysql_catalog_backup.cmd @@ -0,0 +1,41 @@ +@ECHO off +REM +REM This script dumps your Bacula catalog in ASCII format +REM It works for MySQL, SQLite, and PostgreSQL +REM +REM %1 is the name of the database to be backed up and the name +REM of the output file (default = bacula +REM %2 is the user name with which to access the database +REM (default = bacula). +REM %3 is the password with which to access the database or "" if no password +REM (default "") +REM +REM +@ECHO on + +DEL /f "@working_dir_cmd@\%1.sql" 2>nul + +set MYSQLPASSWORD= + +IF NOT "%3"=="" SET MYSQLPASSWORD=--password=%3 +"@SQL_BINDIR@\mysqldump" -u %2 %MYSQLPASSWORD% -f --opt %1 > "@working_dir_cmd@\%1.sql" + +@ECHO off +REM +REM To read back a MySQL database use: +REM cd @working_dir_cmd@ +REM rd /s /q @SQL_BINDIR@\..\data\bacula +REM mysql < bacula.sql +REM +REM To read back a SQLite database use: +REM cd @working_dir_cmd@ +REM del /f bacula.db +REM sqlite bacula.db < bacula.sql +REM +REM To read back a PostgreSQL database use: +REM cd @working_dir_cmd@ +REM dropdb bacula +REM createdb -T template0 -E SQL_ASCII bacula +REM psql bacula < bacula.sql +REM +EXIT /b 0 diff --git a/src/win32/cats/make_mysql_tables.cmd b/src/win32/cats/make_mysql_tables.cmd new file mode 100644 index 00000000..bab38d86 --- /dev/null +++ b/src/win32/cats/make_mysql_tables.cmd @@ -0,0 +1,14 @@ +@ECHO off +REM +REM Script to create Bacula MySQL tables +REM + +"@SQL_BINDIR@\mysql" -f %* < "@bin_dir_cmd@\make_mysql_tables.sql" +SET RESULT=%ERRORLEVEL% +IF %RESULT% GTR 0 GOTO :ERROR +ECHO Creation of Bacula MySQL tables succeeded. +EXIT /b 0 + +:ERROR +ECHO Creation of Bacula MySQL tables failed. +EXIT /b %RESULT% diff --git a/src/win32/cats/make_mysql_tables.sql b/src/win32/cats/make_mysql_tables.sql new file mode 100644 index 00000000..86a3c8b4 --- /dev/null +++ b/src/win32/cats/make_mysql_tables.sql @@ -0,0 +1,357 @@ +USE ${db_name}; +-- +-- Note, we use BLOB rather than TEXT because in MySQL, +-- BLOBs are identical to TEXT except that BLOB is case +-- sensitive in sorts, which is what we want, and TEXT +-- is case insensitive. +-- +CREATE TABLE Filename ( + FilenameId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name BLOB NOT NULL, + PRIMARY KEY(FilenameId), + INDEX (Name(255)) + ); + +CREATE TABLE Path ( + PathId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Path BLOB NOT NULL, + PRIMARY KEY(PathId), + INDEX (Path(255)) + ); + + +CREATE TABLE File ( + FileId BIGINT UNSIGNED NOT NULL AUTO_INCREMENT, + FileIndex INTEGER UNSIGNED DEFAULT 0, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + PathId INTEGER UNSIGNED NOT NULL REFERENCES Path, + FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename, + MarkId INTEGER UNSIGNED DEFAULT 0, + LStat TINYBLOB NOT NULL, + MD5 TINYBLOB, + PRIMARY KEY(FileId), + INDEX (JobId), + INDEX (JobId, PathId, FilenameId) + ); + +-- +-- Possibly add one or more of the following indexes +-- to the above File table if your Verifies are +-- too slow. +-- +-- INDEX (PathId), +-- INDEX (FilenameId), +-- INDEX (FilenameId, PathId) +-- INDEX (JobId), +-- + +CREATE TABLE MediaType ( + MediaTypeId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + MediaType TINYBLOB NOT NULL, + ReadOnly TINYINT DEFAULT 0, + PRIMARY KEY(MediaTypeId) + ); + +CREATE TABLE Storage ( + StorageId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + AutoChanger TINYINT DEFAULT 0, + PRIMARY KEY(StorageId) + ); + +CREATE TABLE Device ( + DeviceId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + MediaTypeId INTEGER UNSIGNED DEFAULT 0 REFERENCES MediaType, + StorageId INTEGER UNSIGNED DEFAULT 0 REFERENCES Storage, + DevMounts INTEGER UNSIGNED DEFAULT 0, + DevReadBytes BIGINT UNSIGNED DEFAULT 0, + DevWriteBytes BIGINT UNSIGNED DEFAULT 0, + DevReadBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0, + DevWriteBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0, + DevReadTime BIGINT UNSIGNED DEFAULT 0, + DevWriteTime BIGINT UNSIGNED DEFAULT 0, + DevReadTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0, + DevWriteTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0, + CleaningDate DATETIME DEFAULT 0, + CleaningPeriod BIGINT UNSIGNED DEFAULT 0, + PRIMARY KEY(DeviceId) + ); + + +CREATE TABLE Job ( + JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Job TINYBLOB NOT NULL, + Name TINYBLOB NOT NULL, + Type BINARY(1) NOT NULL, + Level BINARY(1) NOT NULL, + ClientId INTEGER DEFAULT 0 REFERENCES Client, + JobStatus BINARY(1) NOT NULL, + SchedTime DATETIME DEFAULT 0, + StartTime DATETIME DEFAULT 0, + EndTime DATETIME DEFAULT 0, + RealEndTime DATETIME DEFAULT 0, + JobTDate BIGINT UNSIGNED DEFAULT 0, + VolSessionId INTEGER UNSIGNED DEFAULT 0, + VolSessionTime INTEGER UNSIGNED DEFAULT 0, + JobFiles INTEGER UNSIGNED DEFAULT 0, + JobBytes BIGINT UNSIGNED DEFAULT 0, + ReadBytes BIGINT UNSIGNED DEFAULT 0, + JobErrors INTEGER UNSIGNED DEFAULT 0, + JobMissingFiles INTEGER UNSIGNED DEFAULT 0, + PoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + FileSetId INTEGER UNSIGNED DEFAULT 0 REFERENCES FileSet, + PriorJobId INTEGER UNSIGNED DEFAULT 0 REFERENCES Job, + PurgedFiles TINYINT DEFAULT 0, + HasBase TINYINT DEFAULT 0, + PRIMARY KEY(JobId), + INDEX (Name(128)) + ); + +-- Create a table like Job for long term statistics +CREATE TABLE JobHisto ( + JobId INTEGER UNSIGNED NOT NULL, + Job TINYBLOB NOT NULL, + Name TINYBLOB NOT NULL, + Type BINARY(1) NOT NULL, + Level BINARY(1) NOT NULL, + ClientId INTEGER DEFAULT 0, + JobStatus BINARY(1) NOT NULL, + SchedTime DATETIME DEFAULT 0, + StartTime DATETIME DEFAULT 0, + EndTime DATETIME DEFAULT 0, + RealEndTime DATETIME DEFAULT 0, + JobTDate BIGINT UNSIGNED DEFAULT 0, + VolSessionId INTEGER UNSIGNED DEFAULT 0, + VolSessionTime INTEGER UNSIGNED DEFAULT 0, + JobFiles INTEGER UNSIGNED DEFAULT 0, + JobBytes BIGINT UNSIGNED DEFAULT 0, + ReadBytes BIGINT UNSIGNED DEFAULT 0, + JobErrors INTEGER UNSIGNED DEFAULT 0, + JobMissingFiles INTEGER UNSIGNED DEFAULT 0, + PoolId INTEGER UNSIGNED DEFAULT 0, + FileSetId INTEGER UNSIGNED DEFAULT 0, + PriorJobId INTEGER UNSIGNED DEFAULT 0, + PurgedFiles TINYINT DEFAULT 0, + HasBase TINYINT DEFAULT 0, + INDEX (StartTime) + ); + +CREATE TABLE Location ( + LocationId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Location TINYBLOB NOT NULL, + Cost INTEGER DEFAULT 0, + Enabled TINYINT, + PRIMARY KEY(LocationId) + ); + +CREATE TABLE LocationLog ( + LocLogId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Date DATETIME DEFAULT 0, + Comment BLOB NOT NULL, + MediaId INTEGER UNSIGNED DEFAULT 0 REFERENCES Media, + LocationId INTEGER UNSIGNED DEFAULT 0 REFERENCES Location, + NewVolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged', + 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL, + NewEnabled TINYINT, + PRIMARY KEY(LocLogId) +); + + +CREATE TABLE FileSet ( + FileSetId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + FileSet TINYBLOB NOT NULL, + MD5 TINYBLOB, + CreateTime DATETIME DEFAULT 0, + PRIMARY KEY(FileSetId) + ); + +CREATE TABLE JobMedia ( + JobMediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + MediaId INTEGER UNSIGNED NOT NULL REFERENCES Media, + FirstIndex INTEGER UNSIGNED DEFAULT 0, + LastIndex INTEGER UNSIGNED DEFAULT 0, + StartFile INTEGER UNSIGNED DEFAULT 0, + EndFile INTEGER UNSIGNED DEFAULT 0, + StartBlock INTEGER UNSIGNED DEFAULT 0, + EndBlock INTEGER UNSIGNED DEFAULT 0, + VolIndex INTEGER UNSIGNED DEFAULT 0, + Copy INTEGER UNSIGNED DEFAULT 0, + Stripe INTEGER UNSIGNED DEFAULT 0, + PRIMARY KEY(JobMediaId), + INDEX (JobId, MediaId) + ); + + +CREATE TABLE Media ( + MediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + VolumeName TINYBLOB NOT NULL, + Slot INTEGER DEFAULT 0, + PoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + MediaType TINYBLOB NOT NULL, + MediaTypeId INTEGER UNSIGNED DEFAULT 0 REFERENCES MediaType, + LabelType TINYINT DEFAULT 0, + FirstWritten DATETIME DEFAULT 0, + LastWritten DATETIME DEFAULT 0, + LabelDate DATETIME DEFAULT 0, + VolJobs INTEGER UNSIGNED DEFAULT 0, + VolFiles INTEGER UNSIGNED DEFAULT 0, + VolBlocks INTEGER UNSIGNED DEFAULT 0, + VolMounts INTEGER UNSIGNED DEFAULT 0, + VolBytes BIGINT UNSIGNED DEFAULT 0, + VolParts INTEGER UNSIGNED DEFAULT 0, + VolErrors INTEGER UNSIGNED DEFAULT 0, + VolWrites INTEGER UNSIGNED DEFAULT 0, + VolCapacityBytes BIGINT UNSIGNED DEFAULT 0, + VolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged', + 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL, + Enabled TINYINT DEFAULT 1, + Recycle TINYINT DEFAULT 0, + ActionOnPurge TINYINT DEFAULT 0, + VolRetention BIGINT UNSIGNED DEFAULT 0, + VolUseDuration BIGINT UNSIGNED DEFAULT 0, + MaxVolJobs INTEGER UNSIGNED DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED DEFAULT 0, + InChanger TINYINT DEFAULT 0, + StorageId INTEGER UNSIGNED DEFAULT 0 REFERENCES Storage, + DeviceId INTEGER UNSIGNED DEFAULT 0 REFERENCES Device, + MediaAddressing TINYINT DEFAULT 0, + VolReadTime BIGINT UNSIGNED DEFAULT 0, + VolWriteTime BIGINT UNSIGNED DEFAULT 0, + EndFile INTEGER UNSIGNED DEFAULT 0, + EndBlock INTEGER UNSIGNED DEFAULT 0, + LocationId INTEGER UNSIGNED DEFAULT 0 REFERENCES Location, + RecycleCount INTEGER UNSIGNED DEFAULT 0, + InitialWrite DATETIME DEFAULT 0, + ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + Comment BLOB, + PRIMARY KEY(MediaId), + UNIQUE (VolumeName(128)), + INDEX (PoolId) + ); + +CREATE TABLE Pool ( + PoolId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + NumVols INTEGER UNSIGNED DEFAULT 0, + MaxVols INTEGER UNSIGNED DEFAULT 0, + UseOnce TINYINT DEFAULT 0, + UseCatalog TINYINT DEFAULT 0, + AcceptAnyVolume TINYINT DEFAULT 0, + VolRetention BIGINT UNSIGNED DEFAULT 0, + VolUseDuration BIGINT UNSIGNED DEFAULT 0, + MaxVolJobs INTEGER UNSIGNED DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED DEFAULT 0, + AutoPrune TINYINT DEFAULT 0, + Recycle TINYINT DEFAULT 0, + ActionOnPurge TINYINT DEFAULT 0, + PoolType ENUM('Backup', 'Copy', 'Cloned', 'Archive', 'Migration', 'Scratch') NOT NULL, + LabelType TINYINT DEFAULT 0, + LabelFormat TINYBLOB, + Enabled TINYINT DEFAULT 1, + ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + NextPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + MigrationHighBytes BIGINT UNSIGNED DEFAULT 0, + MigrationLowBytes BIGINT UNSIGNED DEFAULT 0, + MigrationTime BIGINT UNSIGNED DEFAULT 0, + UNIQUE (Name(128)), + PRIMARY KEY (PoolId) + ); + + +CREATE TABLE Client ( + ClientId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + Uname TINYBLOB NOT NULL, /* full uname -a of client */ + AutoPrune TINYINT DEFAULT 0, + FileRetention BIGINT UNSIGNED DEFAULT 0, + JobRetention BIGINT UNSIGNED DEFAULT 0, + UNIQUE (Name(128)), + PRIMARY KEY(ClientId) + ); + +CREATE TABLE Log ( + LogId INTEGER UNSIGNED AUTO_INCREMENT, + JobId INTEGER UNSIGNED DEFAULT 0 REFERENCES Job, + Time DATETIME DEFAULT 0, + LogText BLOB NOT NULL, + PRIMARY KEY(LogId), + INDEX (JobId) + ); + + +CREATE TABLE BaseFiles ( + BaseId INTEGER UNSIGNED AUTO_INCREMENT, + BaseJobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + FileId BIGINT UNSIGNED NOT NULL REFERENCES File, + FileIndex INTEGER UNSIGNED, + PRIMARY KEY(BaseId) + ); + +CREATE TABLE UnsavedFiles ( + UnsavedId INTEGER UNSIGNED AUTO_INCREMENT, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + PathId INTEGER UNSIGNED NOT NULL REFERENCES Path, + FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename, + PRIMARY KEY (UnsavedId) + ); + + + +CREATE TABLE Counters ( + Counter TINYBLOB NOT NULL, + MinValue INTEGER DEFAULT 0, + MaxValue INTEGER DEFAULT 0, + CurrentValue INTEGER DEFAULT 0, + WrapCounter TINYBLOB NOT NULL, + PRIMARY KEY (Counter(128)) + ); + +CREATE TABLE CDImages ( + MediaId INTEGER UNSIGNED NOT NULL, + LastBurn DATETIME NOT NULL, + PRIMARY KEY (MediaId) + ); + +CREATE TABLE Status ( + JobStatus CHAR(1) BINARY NOT NULL, + JobStatusLong BLOB, + PRIMARY KEY (JobStatus) + ); + +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('C', 'Created, not yet running'), + ('R', 'Running'), + ('B', 'Blocked'), + ('T', 'Completed successfully'), + ('E', 'Terminated with errors'), + ('e', 'Non-fatal error'), + ('f', 'Fatal error'), + ('D', 'Verify found differences'), + ('A', 'Canceled by user'), + ('F', 'Waiting for Client'), + ('S', 'Waiting for Storage daemon'), + ('m', 'Waiting for new media'), + ('M', 'Waiting for media mount'), + ('s', 'Waiting for storage resource'), + ('j', 'Waiting for job resource'), + ('c', 'Waiting for client resource'), + ('d', 'Waiting on maximum jobs'), + ('t', 'Waiting on start time'), + ('p', 'Waiting on higher priority jobs'), + ('i', 'Doing batch insert file records'), + ('a', 'SD despooling attributes'); + +CREATE TABLE Version ( + VersionId INTEGER UNSIGNED NOT NULL + ); + +-- Initialize Version +INSERT INTO Version (VersionId) VALUES (11); + diff --git a/src/win32/cats/make_postgresql_catalog_backup.cmd b/src/win32/cats/make_postgresql_catalog_backup.cmd new file mode 100644 index 00000000..759a1011 --- /dev/null +++ b/src/win32/cats/make_postgresql_catalog_backup.cmd @@ -0,0 +1,41 @@ +@ECHO off +REM +REM This script dumps your Bacula catalog in ASCII format +REM It works for MySQL, SQLite, and PostgreSQL +REM +REM %1 is the name of the database to be backed up and the name +REM of the output file (default = bacula +REM %2 is the user name with which to access the database +REM (default = bacula). +REM %3 is the password with which to access the database or "" if no password +REM (default "") +REM +REM +@ECHO on + +DEL /f "@working_dir_cmd@\%1.sql" 2>nul + +SET PGPASSWORD= + +IF NOT "%3"=="" SET PGPASSWORD=--password=%3 +"@SQL_BINDIR@\pg_dump" -c -U %2 %1 >"@working_dir_cmd@\%1.sql" + +@ECHO off +REM +REM To read back a MySQL database use: +REM cd @working_dir_cmd@ +REM rd /s /q @SQL_BINDIR@\..\data\bacula +REM mysql < bacula.sql +REM +REM To read back a SQLite database use: +REM cd @working_dir_cmd@ +REM del /f bacula.db +REM sqlite bacula.db < bacula.sql +REM +REM To read back a PostgreSQL database use: +REM cd @working_dir_cmd@ +REM dropdb bacula +REM createdb -T template0 -E SQL_ASCII bacula +REM psql bacula < bacula.sql +REM +EXIT /b 0 diff --git a/src/win32/cats/make_postgresql_tables.cmd b/src/win32/cats/make_postgresql_tables.cmd new file mode 100644 index 00000000..0dae4585 --- /dev/null +++ b/src/win32/cats/make_postgresql_tables.cmd @@ -0,0 +1,14 @@ +@ECHO off +REM +REM Script to create Bacula PostgreSQL tables +REM + +"@SQL_BINDIR@\psql" -f "@bin_dir_cmd@\make_postgresql_tables.sql" -d bacula %* +IF ERRORLEVEL 1 GOTO :ERROR +ECHO Creation of Bacula PostgreSQL tables succeeded. +EXIT /b 0 +GOTO :EOF + +:ERROR +ECHO Creation of Bacula PostgreSQL tables failed. +EXIT /b 1 diff --git a/src/win32/cats/make_postgresql_tables.sql b/src/win32/cats/make_postgresql_tables.sql new file mode 100644 index 00000000..e7d0d86a --- /dev/null +++ b/src/win32/cats/make_postgresql_tables.sql @@ -0,0 +1,380 @@ + +CREATE TABLE filename +( + filenameid serial not null, + name text not null, + primary key (filenameid) +); + +ALTER TABLE filename ALTER COLUMN name SET STATISTICS 1000; +CREATE UNIQUE INDEX filename_name_idx on filename (name); + +CREATE TABLE path +( + pathid serial not null, + path text not null, + primary key (pathid) +); + +ALTER TABLE path ALTER COLUMN path SET STATISTICS 1000; +CREATE UNIQUE INDEX path_name_idx on path (path); + +CREATE TABLE file +( + fileid bigserial not null, + fileindex integer not null default 0, + jobid integer not null, + pathid integer not null, + filenameid integer not null, + markid integer not null default 0, + lstat text not null, + md5 text not null, + primary key (fileid) +); + +CREATE INDEX file_jobid_idx on file (jobid); +CREATE INDEX file_fp_idx on file (filenameid, pathid); + +-- +-- Add this if you have a good number of job +-- that run at the same time +-- ALTER SEQUENCE file_fileid_seq CACHE 1000; + +-- +-- Possibly add one or more of the following indexes +-- if your Verifies are too slow. +-- +-- CREATE INDEX file_pathid_idx on file(pathid); +-- CREATE INDEX file_filenameid_idx on file(filenameid); +-- CREATE INDEX file_jpfid_idx on file (jobid, pathid, filenameid); + +CREATE TABLE job +( + jobid serial not null, + job text not null, + name text not null, + type char(1) not null, + level char(1) not null, + clientid integer default 0, + jobstatus char(1) not null, + schedtime timestamp without time zone, + starttime timestamp without time zone, + endtime timestamp without time zone, + realendtime timestamp without time zone, + jobtdate bigint default 0, + volsessionid integer default 0, + volsessiontime integer default 0, + jobfiles integer default 0, + jobbytes bigint default 0, + readbytes bigint default 0, + joberrors integer default 0, + jobmissingfiles integer default 0, + poolid integer default 0, + filesetid integer default 0, + purgedfiles smallint default 0, + hasbase smallint default 0, + priorjobid integer default 0, + primary key (jobid) +); + +CREATE INDEX job_name_idx on job (name); + +-- Create a table like Job for long term statistics +CREATE TABLE JobHisto (LIKE Job); +CREATE INDEX jobhisto_idx ON jobhisto ( starttime ); + + +CREATE TABLE Location ( + LocationId serial not null, + Location text not null, + Cost integer default 0, + Enabled smallint, + primary key (LocationId) +); + + +CREATE TABLE fileset +( + filesetid serial not null, + fileset text not null, + md5 text not null, + createtime timestamp without time zone not null, + primary key (filesetid) +); + +CREATE INDEX fileset_name_idx on fileset (fileset); + +CREATE TABLE jobmedia +( + jobmediaid serial not null, + jobid integer not null, + mediaid integer not null, + firstindex integer default 0, + lastindex integer default 0, + startfile integer default 0, + endfile integer default 0, + startblock bigint default 0, + endblock bigint default 0, + volindex integer default 0, + copy integer default 0, + primary key (jobmediaid) +); + +CREATE INDEX job_media_job_id_media_id_idx on jobmedia (jobid, mediaid); + +CREATE TABLE media +( + mediaid serial not null, + volumename text not null, + slot integer default 0, + poolid integer default 0, + mediatype text not null, + mediatypeid integer default 0, + labeltype integer default 0, + firstwritten timestamp without time zone, + lastwritten timestamp without time zone, + labeldate timestamp without time zone, + voljobs integer default 0, + volfiles integer default 0, + volblocks integer default 0, + volmounts integer default 0, + volbytes bigint default 0, + volparts integer default 0, + volerrors integer default 0, + volwrites integer default 0, + volcapacitybytes bigint default 0, + volstatus text not null + check (volstatus in ('Full','Archive','Append', + 'Recycle','Purged','Read-Only','Disabled', + 'Error','Busy','Used','Cleaning','Scratch')), + enabled smallint default 1, + recycle smallint default 0, + ActionOnPurge smallint default 0, + volretention bigint default 0, + voluseduration bigint default 0, + maxvoljobs integer default 0, + maxvolfiles integer default 0, + maxvolbytes bigint default 0, + inchanger smallint default 0, + StorageId integer default 0, + DeviceId integer default 0, + mediaaddressing smallint default 0, + volreadtime bigint default 0, + volwritetime bigint default 0, + endfile integer default 0, + endblock bigint default 0, + LocationId integer default 0, + recyclecount integer default 0, + initialwrite timestamp without time zone, + scratchpoolid integer default 0, + recyclepoolid integer default 0, + comment text, + primary key (mediaid) +); + +create unique index media_volumename_id on media (volumename); + + +CREATE TABLE MediaType ( + MediaTypeId SERIAL, + MediaType TEXT NOT NULL, + ReadOnly INTEGER DEFAULT 0, + PRIMARY KEY(MediaTypeId) + ); + +CREATE TABLE Storage ( + StorageId SERIAL, + Name TEXT NOT NULL, + AutoChanger INTEGER DEFAULT 0, + PRIMARY KEY(StorageId) + ); + +CREATE TABLE Device ( + DeviceId SERIAL, + Name TEXT NOT NULL, + MediaTypeId INTEGER NOT NULL, + StorageId INTEGER NOT NULL, + DevMounts INTEGER NOT NULL DEFAULT 0, + DevReadBytes BIGINT NOT NULL DEFAULT 0, + DevWriteBytes BIGINT NOT NULL DEFAULT 0, + DevReadBytesSinceCleaning BIGINT NOT NULL DEFAULT 0, + DevWriteBytesSinceCleaning BIGINT NOT NULL DEFAULT 0, + DevReadTime BIGINT NOT NULL DEFAULT 0, + DevWriteTime BIGINT NOT NULL DEFAULT 0, + DevReadTimeSinceCleaning BIGINT NOT NULL DEFAULT 0, + DevWriteTimeSinceCleaning BIGINT NOT NULL DEFAULT 0, + CleaningDate timestamp without time zone, + CleaningPeriod BIGINT NOT NULL DEFAULT 0, + PRIMARY KEY(DeviceId) + ); + + +CREATE TABLE pool +( + poolid serial not null, + name text not null, + numvols integer default 0, + maxvols integer default 0, + useonce smallint default 0, + usecatalog smallint default 0, + acceptanyvolume smallint default 0, + volretention bigint default 0, + voluseduration bigint default 0, + maxvoljobs integer default 0, + maxvolfiles integer default 0, + maxvolbytes bigint default 0, + autoprune smallint default 0, + recycle smallint default 0, + ActionOnPurge smallint default 0, + pooltype text + check (pooltype in ('Backup','Copy','Cloned','Archive','Migration','Scratch')), + labeltype integer default 0, + labelformat text not null, + enabled smallint default 1, + scratchpoolid integer default 0, + recyclepoolid integer default 0, + NextPoolId integer default 0, + MigrationHighBytes BIGINT DEFAULT 0, + MigrationLowBytes BIGINT DEFAULT 0, + MigrationTime BIGINT DEFAULT 0, + primary key (poolid) +); + +CREATE INDEX pool_name_idx on pool (name); + +CREATE TABLE client +( + clientid serial not null, + name text not null, + uname text not null, + autoprune smallint default 0, + fileretention bigint default 0, + jobretention bigint default 0, + primary key (clientid) +); + +create unique index client_name_idx on client (name); + +CREATE TABLE Log +( + LogId serial not null, + JobId integer not null, + Time timestamp without time zone, + LogText text not null, + primary key (LogId) +); +create index log_name_idx on Log (JobId); + +CREATE TABLE LocationLog ( + LocLogId SERIAL NOT NULL, + Date timestamp without time zone, + Comment TEXT NOT NULL, + MediaId INTEGER DEFAULT 0, + LocationId INTEGER DEFAULT 0, + newvolstatus text not null + check (newvolstatus in ('Full','Archive','Append', + 'Recycle','Purged','Read-Only','Disabled', + 'Error','Busy','Used','Cleaning','Scratch')), + newenabled smallint, + PRIMARY KEY(LocLogId) +); + + + +CREATE TABLE counters +( + counter text not null, + minvalue integer default 0, + maxvalue integer default 0, + currentvalue integer default 0, + wrapcounter text not null, + primary key (counter) +); + + + +CREATE TABLE basefiles +( + baseid serial not null, + jobid integer not null, + fileid bigint not null, + fileindex integer , + basejobid integer , + primary key (baseid) +); + +CREATE TABLE unsavedfiles +( + UnsavedId integer not null, + jobid integer not null, + pathid integer not null, + filenameid integer not null, + primary key (UnsavedId) +); + +CREATE TABLE CDImages +( + MediaId integer not null, + LastBurn timestamp without time zone not null, + primary key (MediaId) +); + + +CREATE TABLE version +( + versionid integer not null +); + +CREATE TABLE Status ( + JobStatus CHAR(1) NOT NULL, + JobStatusLong TEXT, + PRIMARY KEY (JobStatus) + ); + +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('C', 'Created, not yet running'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('R', 'Running'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('B', 'Blocked'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('T', 'Completed successfully'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('E', 'Terminated with errors'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('e', 'Non-fatal error'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('f', 'Fatal error'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('D', 'Verify found differences'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('A', 'Canceled by user'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('F', 'Waiting for Client'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('S', 'Waiting for Storage daemon'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('m', 'Waiting for new media'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('M', 'Waiting for media mount'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('s', 'Waiting for storage resource'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('j', 'Waiting for job resource'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('c', 'Waiting for client resource'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('d', 'Waiting on maximum jobs'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('t', 'Waiting on start time'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('p', 'Waiting on higher priority jobs'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('a', 'SD despooling attributes'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('i', 'Doing batch insert file records'); + +INSERT INTO Version (VersionId) VALUES (11); + +-- Make sure we have appropriate permissions + diff --git a/src/win32/cats/make_sqlite3_catalog_backup.cmd b/src/win32/cats/make_sqlite3_catalog_backup.cmd new file mode 100644 index 00000000..4c968614 --- /dev/null +++ b/src/win32/cats/make_sqlite3_catalog_backup.cmd @@ -0,0 +1,38 @@ +@ECHO off +REM +REM This script dumps your Bacula catalog in ASCII format +REM It works for MySQL, SQLite, and PostgreSQL +REM +REM %1 is the name of the database to be backed up and the name +REM of the output file (default = bacula +REM %2 is the user name with which to access the database +REM (default = bacula). +REM %3 is the password with which to access the database or "" if no password +REM (default "") +REM +REM +@ECHO on + +DEL /f "@working_dir_cmd@\%1.sql" 2>nul + +ECHO .dump | "@bin_dir_cmd@\sqlite3" "@working_dir_cmd@\%1.db" > "@working_dir_cmd@\%1.sql" + +@ECHO off +REM +REM To read back a MySQL database use: +REM cd @working_dir_cmd@ +REM rd /s /q @SQL_BINDIR@\..\data\bacula +REM mysql < bacula.sql +REM +REM To read back a SQLite database use: +REM cd @working_dir_cmd@ +REM del /f bacula.db +REM sqlite bacula.db < bacula.sql +REM +REM To read back a PostgreSQL database use: +REM cd @working_dir_cmd@ +REM dropdb bacula +REM createdb -T template0 -E SQL_ASCII bacula +REM psql bacula < bacula.sql +REM +EXIT /b 0 diff --git a/src/win32/cats/make_sqlite3_tables.cmd b/src/win32/cats/make_sqlite3_tables.cmd new file mode 100644 index 00000000..aa739806 --- /dev/null +++ b/src/win32/cats/make_sqlite3_tables.cmd @@ -0,0 +1,7 @@ +@ECHO off +REM +REM Script to create Bacula SQLite tables + +"@bin_dir_cmd@\sqlite3" %* "@working_dir_cmd@\bacula.db" < "@bin_dir_cmd@\make_sqlite3_tables.sql" + +EXIT /b 0 diff --git a/src/win32/cats/make_sqlite3_tables.sql b/src/win32/cats/make_sqlite3_tables.sql new file mode 100644 index 00000000..83a4e4ca --- /dev/null +++ b/src/win32/cats/make_sqlite3_tables.sql @@ -0,0 +1,384 @@ + +CREATE TABLE Filename ( + FilenameId INTEGER, + Name TEXT DEFAULT '', + PRIMARY KEY(FilenameId) + ); + +CREATE INDEX inx1 ON Filename (Name); + +CREATE TABLE Path ( + PathId INTEGER, + Path TEXT DEFAULT '', + PRIMARY KEY(PathId) + ); + +CREATE INDEX inx2 ON Path (Path); + + +CREATE TABLE File ( + FileId INTEGER, + FileIndex INTEGER UNSIGNED NOT NULL, + JobId INTEGER UNSIGNED REFERENCES Job NOT NULL, + PathId INTEGER UNSIGNED REFERENCES Path NOT NULL, + FilenameId INTEGER UNSIGNED REFERENCES Filename NOT NULL, + MarkId INTEGER UNSIGNED DEFAULT 0, + LStat VARCHAR(255) NOT NULL, + MD5 VARCHAR(255) NOT NULL, + PRIMARY KEY(FileId) + ); + +CREATE INDEX inx3 ON File (JobId); +CREATE INDEX inx4 ON File (FilenameId, PathId); +-- +-- Possibly add one or more of the following indexes +-- if your Verifies are too slow. +-- +-- CREATE INDEX inx4 ON File (PathId); +-- CREATE INDEX inx5 ON File (FileNameId); +-- CREATE INDEX inx9 ON File (JobId, PathId, FilenameId); + +CREATE TABLE Job ( + JobId INTEGER, + Job VARCHAR(128) NOT NULL, + Name VARCHAR(128) NOT NULL, + Type CHAR(1) NOT NULL, + Level CHAR(1) NOT NULL, + ClientId INTEGER REFERENCES Client DEFAULT 0, + JobStatus CHAR(1) NOT NULL, + SchedTime DATETIME NOT NULL, + StartTime DATETIME DEFAULT 0, + EndTime DATETIME DEFAULT 0, + RealEndTime DATETIME DEFAULT 0, + JobTDate BIGINT UNSIGNED DEFAULT 0, + VolSessionId INTEGER UNSIGNED DEFAULT 0, + VolSessionTime INTEGER UNSIGNED DEFAULT 0, + JobFiles INTEGER UNSIGNED DEFAULT 0, + JobBytes BIGINT UNSIGNED DEFAULT 0, + ReadBytes BIGINT UNSIGNED DEFAULT 0, + JobErrors INTEGER UNSIGNED DEFAULT 0, + JobMissingFiles INTEGER UNSIGNED DEFAULT 0, + PoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0, + FileSetId INTEGER UNSIGNED REFERENCES FileSet DEFAULT 0, + PriorJobId INTEGER UNSIGNED REFERENCES Job DEFAULT 0, + PurgedFiles TINYINT DEFAULT 0, + HasBase TINYINT DEFAULT 0, + PRIMARY KEY(JobId) + ); +CREATE INDEX inx6 ON Job (Name); + +-- Create a table like Job for long term statistics +CREATE TABLE JobHisto ( + JobId INTEGER, + Job VARCHAR(128) NOT NULL, + Name VARCHAR(128) NOT NULL, + Type CHAR(1) NOT NULL, + Level CHAR(1) NOT NULL, + ClientId INTEGER DEFAULT 0, + JobStatus CHAR(1) NOT NULL, + SchedTime DATETIME NOT NULL, + StartTime DATETIME DEFAULT 0, + EndTime DATETIME DEFAULT 0, + RealEndTime DATETIME DEFAULT 0, + JobTDate BIGINT UNSIGNED DEFAULT 0, + VolSessionId INTEGER UNSIGNED DEFAULT 0, + VolSessionTime INTEGER UNSIGNED DEFAULT 0, + JobFiles INTEGER UNSIGNED DEFAULT 0, + JobBytes BIGINT UNSIGNED DEFAULT 0, + ReadBytes BIGINT UNSIGNED DEFAULT 0, + JobErrors INTEGER UNSIGNED DEFAULT 0, + JobMissingFiles INTEGER UNSIGNED DEFAULT 0, + PoolId INTEGER UNSIGNED DEFAULT 0, + FileSetId INTEGER UNSIGNED DEFAULT 0, + PriorJobId INTEGER UNSIGNED DEFAULT 0, + PurgedFiles TINYINT DEFAULT 0, + HasBase TINYINT DEFAULT 0 + ); +CREATE INDEX inx61 ON JobHisto (StartTime); + +CREATE TABLE Location ( + LocationId INTEGER, + Location TEXT NOT NULL, + Cost INTEGER DEFAULT 0, + Enabled TINYINT, + PRIMARY KEY(LocationId) + ); + +CREATE TABLE LocationLog ( + LocLogId INTEGER, + Date DATETIME NOT NULL, + Comment TEXT NOT NULL, + MediaId INTEGER UNSIGNED REFERENCES Media DEFAULT 0, + LocationId INTEGER UNSIGNED REFERENCES LocationId DEFAULT 0, + NewVolStatus VARCHAR(20) NOT NULL, + NewEnabled TINYINT NOT NULL, + PRIMARY KEY(LocLogId) +); + + +CREATE TABLE Log ( + LogId INTEGER, + JobId INTEGER UNSIGNED REFERENCES Job NOT NULL, + Time DATETIME NOT NULL, + LogText TEXT NOT NULL, + PRIMARY KEY(LogId) + ); +CREATE INDEX LogInx1 ON Log (JobId); + + +CREATE TABLE FileSet ( + FileSetId INTEGER, + FileSet VARCHAR(128) NOT NULL, + MD5 VARCHAR(25) NOT NULL, + CreateTime DATETIME DEFAULT 0, + PRIMARY KEY(FileSetId) + ); + +CREATE TABLE JobMedia ( + JobMediaId INTEGER, + JobId INTEGER UNSIGNED REFERENCES Job NOT NULL, + MediaId INTEGER UNSIGNED REFERENCES Media NOT NULL, + FirstIndex INTEGER UNSIGNED NOT NULL, + LastIndex INTEGER UNSIGNED NOT NULL, + StartFile INTEGER UNSIGNED DEFAULT 0, + EndFile INTEGER UNSIGNED DEFAULT 0, + StartBlock INTEGER UNSIGNED DEFAULT 0, + EndBlock INTEGER UNSIGNED DEFAULT 0, + VolIndex INTEGER UNSIGNED DEFAULT 0, + Copy INTEGER UNSIGNED DEFAULT 0, + PRIMARY KEY(JobMediaId) + ); + +CREATE INDEX inx7 ON JobMedia (JobId, MediaId); + + +CREATE TABLE Media ( + MediaId INTEGER, + VolumeName VARCHAR(128) NOT NULL, + Slot INTEGER DEFAULT 0, + PoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0, + MediaType VARCHAR(128) NOT NULL, + MediaTypeId INTEGER UNSIGNED REFERENCES MediaType DEFAULT 0, + LabelType TINYINT DEFAULT 0, + FirstWritten DATETIME DEFAULT 0, + LastWritten DATETIME DEFAULT 0, + LabelDate DATETIME DEFAULT 0, + VolJobs INTEGER UNSIGNED DEFAULT 0, + VolFiles INTEGER UNSIGNED DEFAULT 0, + VolBlocks INTEGER UNSIGNED DEFAULT 0, + VolMounts INTEGER UNSIGNED DEFAULT 0, + VolBytes BIGINT UNSIGNED DEFAULT 0, + VolParts INTEGER UNSIGNED DEFAULT 0, + VolErrors INTEGER UNSIGNED DEFAULT 0, + VolWrites INTEGER UNSIGNED DEFAULT 0, + VolCapacityBytes BIGINT UNSIGNED DEFAULT 0, + VolStatus VARCHAR(20) NOT NULL, + Enabled TINYINT DEFAULT 1, + Recycle TINYINT DEFAULT 0, + ActionOnPurge TINYINT DEFAULT 0, + VolRetention BIGINT UNSIGNED DEFAULT 0, + VolUseDuration BIGINT UNSIGNED DEFAULT 0, + MaxVolJobs INTEGER UNSIGNED DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED DEFAULT 0, + InChanger TINYINT DEFAULT 0, + StorageId INTEGER UNSIGNED REFERENCES Storage DEFAULT 0, + DeviceId INTEGER UNSIGNED REFERENCES Device DEFAULT 0, + MediaAddressing TINYINT DEFAULT 0, + VolReadTime BIGINT UNSIGNED DEFAULT 0, + VolWriteTime BIGINT UNSIGNED DEFAULT 0, + EndFile INTEGER UNSIGNED DEFAULT 0, + EndBlock INTEGER UNSIGNED DEFAULT 0, + LocationId INTEGER UNSIGNED REFERENCES Location DEFAULT 0, + RecycleCount INTEGER UNSIGNED DEFAULT 0, + InitialWrite DATETIME DEFAULT 0, + ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0, + RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0, + Comment TEXT, + PRIMARY KEY(MediaId) + ); + +CREATE INDEX inx8 ON Media (PoolId); + +CREATE TABLE MediaType ( + MediaTypeId INTEGER, + MediaType VARCHAR(128) NOT NULL, + ReadOnly TINYINT DEFAULT 0, + PRIMARY KEY(MediaTypeId) + ); + +CREATE TABLE Storage ( + StorageId INTEGER, + Name VARCHAR(128) NOT NULL, + AutoChanger TINYINT DEFAULT 0, + PRIMARY KEY(StorageId) + ); + +CREATE TABLE Device ( + DeviceId INTEGER, + Name VARCHAR(128) NOT NULL, + MediaTypeId INTEGER UNSIGNED REFERENCES MediaType NOT NULL, + StorageId INTEGER UNSIGNED REFERENCES Storage, + DevMounts INTEGER UNSIGNED DEFAULT 0, + DevReadBytes BIGINT UNSIGNED DEFAULT 0, + DevWriteBytes BIGINT UNSIGNED DEFAULT 0, + DevReadBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0, + DevWriteBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0, + DevReadTime BIGINT UNSIGNED DEFAULT 0, + DevWriteTime BIGINT UNSIGNED DEFAULT 0, + DevReadTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0, + DevWriteTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0, + CleaningDate DATETIME DEFAULT 0, + CleaningPeriod BIGINT UNSIGNED DEFAULT 0, + PRIMARY KEY(DeviceId) + ); + + +CREATE TABLE Pool ( + PoolId INTEGER, + Name VARCHAR(128) NOT NULL, + NumVols INTEGER UNSIGNED DEFAULT 0, + MaxVols INTEGER UNSIGNED DEFAULT 0, + UseOnce TINYINT DEFAULT 0, + UseCatalog TINYINT DEFAULT 1, + AcceptAnyVolume TINYINT DEFAULT 0, + VolRetention BIGINT UNSIGNED DEFAULT 0, + VolUseDuration BIGINT UNSIGNED DEFAULT 0, + MaxVolJobs INTEGER UNSIGNED DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED DEFAULT 0, + AutoPrune TINYINT DEFAULT 0, + Recycle TINYINT DEFAULT 0, + ActionOnPurge TINYINT DEFAULT 0, + PoolType VARCHAR(20) NOT NULL, + LabelType TINYINT DEFAULT 0, + LabelFormat VARCHAR(128) NOT NULL, + Enabled TINYINT DEFAULT 1, + ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0, + RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0, + NextPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0, + MigrationHighBytes BIGINT UNSIGNED DEFAULT 0, + MigrationLowBytes BIGINT UNSIGNED DEFAULT 0, + MigrationTime BIGINT UNSIGNED DEFAULT 0, + UNIQUE (Name), + PRIMARY KEY (PoolId) + ); + + +CREATE TABLE Client ( + ClientId INTEGER, + Name VARCHAR(128) NOT NULL, + Uname VARCHAR(255) NOT NULL, -- uname -a field + AutoPrune TINYINT DEFAULT 0, + FileRetention BIGINT UNSIGNED DEFAULT 0, + JobRetention BIGINT UNSIGNED DEFAULT 0, + UNIQUE (Name), + PRIMARY KEY(ClientId) + ); + +CREATE TABLE BaseFiles ( + BaseId INTEGER, + BaseJobId INTEGER UNSIGNED REFERENCES Job NOT NULL, + JobId INTEGER UNSIGNED REFERENCES Job NOT NULL, + FileId INTEGER UNSIGNED REFERENCES File NOT NULL, + FileIndex INTEGER UNSIGNED, + PRIMARY KEY(BaseId) + ); + +CREATE TABLE UnsavedFiles ( + UnsavedId INTEGER, + JobId INTEGER UNSIGNED REFERENCES Job NOT NULL, + PathId INTEGER UNSIGNED REFERENCES Path NOT NULL, + FilenameId INTEGER UNSIGNED REFERENCES Filename NOT NULL, + PRIMARY KEY (UnsavedId) + ); + + +CREATE TABLE NextId ( + id INTEGER UNSIGNED DEFAULT 0, + TableName TEXT NOT NULL, + PRIMARY KEY (TableName) + ); + + + +-- Initialize JobId to start at 1 +INSERT INTO NextId (id, TableName) VALUES (1, 'Job'); + +CREATE TABLE Version ( + VersionId INTEGER UNSIGNED NOT NULL + ); + + +CREATE TABLE Counters ( + Counter TEXT NOT NULL, + MinValue INTEGER DEFAULT 0, + MaxValue INTEGER DEFAULT 0, + CurrentValue INTEGER DEFAULT 0, + WrapCounter TEXT NOT NULL, + PRIMARY KEY (Counter) + ); + +CREATE TABLE CDImages ( + MediaId INTEGER UNSIGNED NOT NULL, + LastBurn DATETIME NOT NULL, + PRIMARY KEY (MediaId) + ); + + +CREATE TABLE Status ( + JobStatus CHAR(1) NOT NULL, + JobStatusLong BLOB, + PRIMARY KEY (JobStatus) + ); + +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('C', 'Created, not yet running'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('R', 'Running'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('B', 'Blocked'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('T', 'Completed successfully'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('E', 'Terminated with errors'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('e', 'Non-fatal error'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('f', 'Fatal error'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('D', 'Verify found differences'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('A', 'Canceled by user'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('F', 'Waiting for Client'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('S', 'Waiting for Storage daemon'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('m', 'Waiting for new media'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('M', 'Waiting for media mount'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('s', 'Waiting for storage resource'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('j', 'Waiting for job resource'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('c', 'Waiting for client resource'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('d', 'Waiting on maximum jobs'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('t', 'Waiting on start time'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('p', 'Waiting on higher priority jobs'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('a', 'SD despooling attributes'); +INSERT INTO Status (JobStatus,JobStatusLong) VALUES + ('i', 'Doing batch insert file records'); + + +-- Initialize Version +INSERT INTO Version (VersionId) VALUES (11); + + +PRAGMA default_cache_size = 100000; +PRAGMA synchronous = NORMAL; diff --git a/src/win32/compat/Makefile b/src/win32/compat/Makefile new file mode 100644 index 00000000..d1fa6772 --- /dev/null +++ b/src/win32/compat/Makefile @@ -0,0 +1,66 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# +# Author: Howard Thomson +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Written for Bacula by Howard Thomson, April 2006 +# + +include ../Makefile.inc + +INCLUDES = \ + $(INCLUDE_GCC) \ + $(INCLUDE_MINGW) \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_BACULA) \ + $(INCLUDE_ZLIB) \ + $(INCLUDE_VSS) \ + $(INCLUDE_ICONS) \ + $(INCLUDE_OPENSSL) + +DEFINES = \ + -DWIN32 \ + $(HAVES) + +###################################################################### + +# Files files in src/win32/compat + +LIB_OBJS = \ + $(OBJDIR)/compat.o \ + $(OBJDIR)/getopt.o \ + $(OBJDIR)/print.o \ + $(OBJDIR)/vss.o \ + $(OBJDIR)/vss_XP.o \ + $(OBJDIR)/vss_W2K3.o \ + $(OBJDIR)/vss_Vista.o + +###################################################################### + +# Targets + +.PHONY: all clean + +all: $(LIBDIR)/libcompat.a + +clean: + @echo "Cleaning `pwd`" + $(ECHO_CMD)rm -f $(OBJDIR)/*.[od] $(LIBDIR)/libcompat.a + +# +# Rules +# + +$(LIBDIR)/libcompat.a: $(LIB_OBJS) + @echo "Updating archive $@" + $(call checkdir,$@) + $(ECHO_CMD)$(AR) rs $@ $^ + +include ../Makefile.rules + +ifneq ($(MAKECMDGOALS),clean) +include $(patsubst %.o,%.d,$(filter-out %.res,$(LIB_OBJS))) +endif diff --git a/src/win32/compat/alloca.h b/src/win32/compat/alloca.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/arpa/inet.h b/src/win32/compat/arpa/inet.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/compat.cpp b/src/win32/compat/compat.cpp new file mode 100644 index 00000000..fa27669b --- /dev/null +++ b/src/win32/compat/compat.cpp @@ -0,0 +1,2958 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +// -*- Mode: C++ -*- +// compat.cpp -- compatibilty layer to make bacula-fd run +// natively under windows +// +// Copyright transferred from Raider Solutions, Inc to +// Kern Sibbald and John Walker by express permission. +// +// Author : Christopher S. Hull +// Created On : Sat Jan 31 15:55:00 2004 + +#include "bacula.h" +#include "compat.h" +#include "jcr.h" +#include "findlib/find.h" + +/* Note, if you want to see what Windows variables and structures + * are defined, bacula.h includes , which is found in: + * + * cross-tools/mingw32/mingw32/include + * or + * cross-tools/mingw-w64/x86_64-pc-mingw32/include + * + * depending on whether we are building the 32 bit version or + * the 64 bit version. + */ + +static const int dbglvl = 500; + +#define b_errno_win32 (1<<29) + +#define MAX_PATHLENGTH 1024 + +/** + UTF-8 to UCS2 path conversion is expensive, + so we cache the conversion. During backup the + conversion is called 3 times (lstat, attribs, open), + by using the cache this is reduced to 1 time + */ +static POOLMEM *g_pWin32ConvUTF8Cache = NULL; +static POOLMEM *g_pWin32ConvUCS2Cache = NULL; +static DWORD g_dwWin32ConvUTF8strlen = 0; +static pthread_mutex_t Win32Convmutex = PTHREAD_MUTEX_INITIALIZER; + +/* Forward referenced functions */ +static const char *errorString(void); + +/* The following functions are available only in the FileDaemon with VSS + * These functions uses the VSSObject to resolve a Path to a Snapshot Path, + * the VSSObject is available "per job", and some jobs such as Restore or Verify + * may not have a VSSObject. + */ + +static BOOL default_VSSPathConverter() +{ + return false; +} + +static t_pVSSPathConvert g_pVSSPathConvert = NULL; +static t_pVSSPathConvertW g_pVSSPathConvertW = NULL; +static t_pVSSPathConverter g_pVSSPathConverter = default_VSSPathConverter; /* To know if we can use the VSSPath functions */ + + +void SetVSSPathConvert(t_pVSSPathConverter pPathConverter, t_pVSSPathConvert pPathConvert, t_pVSSPathConvertW pPathConvertW) +{ + g_pVSSPathConvert = pPathConvert; + g_pVSSPathConvertW = pPathConvertW; + g_pVSSPathConverter = pPathConverter; +} + +static void Win32ConvInitCache() +{ + if (g_pWin32ConvUTF8Cache) { + return; + } + g_pWin32ConvUTF8Cache = get_pool_memory(PM_FNAME); + g_pWin32ConvUCS2Cache = get_pool_memory(PM_FNAME); +} + +void Win32ConvCleanupCache() +{ + P(Win32Convmutex); + if (g_pWin32ConvUTF8Cache) { + free_pool_memory(g_pWin32ConvUTF8Cache); + g_pWin32ConvUTF8Cache = NULL; + } + + if (g_pWin32ConvUCS2Cache) { + free_pool_memory(g_pWin32ConvUCS2Cache); + g_pWin32ConvUCS2Cache = NULL; + } + + g_dwWin32ConvUTF8strlen = 0; + V(Win32Convmutex); +} + + +/* to allow the usage of the original version in this file here */ +#undef fputs + + +//#define USE_WIN32_COMPAT_IO 1 +#define USE_WIN32_32KPATHCONVERSION 1 + +extern DWORD g_platform_id; +extern DWORD g_MinorVersion; + +/* From Microsoft SDK (KES) is the diff between Jan 1 1601 and Jan 1 1970 */ +#ifdef HAVE_MINGW +#define WIN32_FILETIME_ADJUST 0x19DB1DED53E8000ULL +#else +#define WIN32_FILETIME_ADJUST 0x19DB1DED53E8000I64 +#endif + +#define WIN32_FILETIME_SCALE 10000000 // 100ns/second + +/** + * Convert from UTF-8 to VSS Windows path/file + * Used by compatibility layer for Unix system calls + */ +static void conv_unix_to_vss_win32_path(const char *name, char *win32_name, DWORD dwSize) +{ + const char *fname = name; + char *tname = win32_name; + + Dmsg0(dbglvl, "Enter convert_unix_to_win32_path\n"); + + if (IsPathSeparator(name[0]) && + IsPathSeparator(name[1]) && + name[2] == '.' && + IsPathSeparator(name[3])) { + + *win32_name++ = '\\'; + *win32_name++ = '\\'; + *win32_name++ = '.'; + *win32_name++ = '\\'; + + name += 4; + } else if (g_platform_id != VER_PLATFORM_WIN32_WINDOWS && !g_pVSSPathConverter()) { + /* allow path to be 32767 bytes */ + *win32_name++ = '\\'; + *win32_name++ = '\\'; + *win32_name++ = '?'; + *win32_name++ = '\\'; + } + + while (*name) { + /** Check for Unix separator and convert to Win32 */ + if (name[0] == '/' && name[1] == '/') { /* double slash? */ + name++; /* yes, skip first one */ + } + if (*name == '/') { + *win32_name++ = '\\'; /* convert char */ + /* If Win32 separator that is "quoted", remove quote */ + } else if (*name == '\\' && name[1] == '\\') { + *win32_name++ = '\\'; + name++; /* skip first \ */ + } else { + *win32_name++ = *name; /* copy character */ + } + name++; + } + /** Strip any trailing slash, if we stored something + * but leave "c:\" with backslash (root directory case + */ + if (*fname != 0 && win32_name[-1] == '\\' && strlen (fname) != 3) { + win32_name[-1] = 0; + } else { + *win32_name = 0; + } + + /** here we convert to VSS specific file name which + can get longer because VSS will make something like + \\\\?\\GLOBALROOT\\Device\\HarddiskVolumeShadowCopy1\\bacula\\uninstall.exe + from c:\bacula\uninstall.exe + */ + Dmsg1(dbglvl, "path=%s\n", tname); + if (g_pVSSPathConverter()) { + POOLMEM *pszBuf = get_pool_memory (PM_FNAME); + pszBuf = check_pool_memory_size(pszBuf, dwSize); + bstrncpy(pszBuf, tname, strlen(tname)+1); + g_pVSSPathConvert(pszBuf, tname, dwSize); + free_pool_memory(pszBuf); + } + + Dmsg1(dbglvl, "Leave cvt_u_to_win32_path path=%s\n", tname); +} + +/** Conversion of a Unix filename to a Win32 filename */ +void unix_name_to_win32(POOLMEM **win32_name, const char *name) +{ + /* One extra byte should suffice, but we double it */ + /* add MAX_PATH bytes for VSS shadow copy name */ + DWORD dwSize = 2*strlen(name)+MAX_PATH; + *win32_name = check_pool_memory_size(*win32_name, dwSize); + conv_unix_to_vss_win32_path(name, *win32_name, dwSize); +} + + +/** + * This function expects an UCS-encoded standard wchar_t in pszUCSPath and + * will complete the input path to an absolue path of the form \\?\c:\path\file + * + * With this trick, it is possible to have 32K characters long paths. + * + * Optionally one can use pBIsRawPath to determine id pszUCSPath contains a path + * to a raw windows partition. + * + * created 02/27/2006 Thorsten Engel + */ +static POOLMEM* +make_wchar_win32_path(POOLMEM *pszUCSPath, BOOL *pBIsRawPath /*= NULL*/) +{ + + Dmsg0(dbglvl, "Enter wchar_win32_path\n"); + if (pBIsRawPath) { + *pBIsRawPath = FALSE; /* Initialize, set later */ + } + + if (!p_GetCurrentDirectoryW) { + Dmsg0(dbglvl, "Leave wchar_win32_path no change \n"); + return pszUCSPath; + } + + wchar_t *name = (wchar_t *)pszUCSPath; + + /* if it has already the desired form, exit without changes */ + if (wcslen(name) > 3 && wcsncmp(name, L"\\\\?\\", 4) == 0) { + Dmsg0(dbglvl, "Leave wchar_win32_path no change \n"); + return pszUCSPath; + } + + wchar_t *pwszBuf = (wchar_t *)get_pool_memory(PM_FNAME); + wchar_t *pwszCurDirBuf = (wchar_t *)get_pool_memory(PM_FNAME); + DWORD dwCurDirPathSize = 0; + + /* get buffer with enough size (name+max 6. wchars+1 null terminator */ + DWORD dwBufCharsNeeded = (wcslen(name)+7); + pwszBuf = (wchar_t *)check_pool_memory_size((POOLMEM *)pwszBuf, dwBufCharsNeeded*sizeof(wchar_t)); + + /* add \\?\ to support 32K long filepaths + it is important to make absolute paths, so we add drive and + current path if necessary */ + + BOOL bAddDrive = TRUE; + BOOL bAddCurrentPath = TRUE; + BOOL bAddPrefix = TRUE; + + /* does path begin with drive? if yes, it is absolute */ + if (iswalpha(name[0]) && name[1] == ':' && IsPathSeparator(name[2])) { + bAddDrive = FALSE; + bAddCurrentPath = FALSE; + } + + /* is path absolute? */ + if (IsPathSeparator(name[0])) + bAddCurrentPath = FALSE; + + /* is path relative to itself?, if yes, skip ./ */ + if (name[0] == '.' && IsPathSeparator(name[1])) { + name += 2; + } + + /* is path of form '//./'? */ + if (IsPathSeparator(name[0]) && + IsPathSeparator(name[1]) && + name[2] == '.' && + IsPathSeparator(name[3])) { + bAddDrive = FALSE; + bAddCurrentPath = FALSE; + bAddPrefix = FALSE; + if (pBIsRawPath) { + *pBIsRawPath = TRUE; + } + } + + int nParseOffset = 0; + + /* add 4 bytes header */ + if (bAddPrefix) { + nParseOffset = 4; + wcscpy(pwszBuf, L"\\\\?\\"); + } + + /* get current path if needed */ + if (bAddDrive || bAddCurrentPath) { + dwCurDirPathSize = p_GetCurrentDirectoryW(0, NULL); + if (dwCurDirPathSize > 0) { + /* get directory into own buffer as it may either return c:\... or \\?\C:\.... */ + pwszCurDirBuf = (wchar_t *)check_pool_memory_size((POOLMEM *)pwszCurDirBuf, (dwCurDirPathSize+1)*sizeof(wchar_t)); + p_GetCurrentDirectoryW(dwCurDirPathSize, pwszCurDirBuf); + } else { + /* we have no info for doing so */ + bAddDrive = FALSE; + bAddCurrentPath = FALSE; + } + } + + + /* add drive if needed */ + if (bAddDrive && !bAddCurrentPath) { + wchar_t szDrive[3]; + + if (IsPathSeparator(pwszCurDirBuf[0]) && + IsPathSeparator(pwszCurDirBuf[1]) && + pwszCurDirBuf[2] == '?' && + IsPathSeparator(pwszCurDirBuf[3])) { + /* copy drive character */ + szDrive[0] = pwszCurDirBuf[4]; + } else { + /* copy drive character */ + szDrive[0] = pwszCurDirBuf[0]; + } + + szDrive[1] = ':'; + szDrive[2] = 0; + + wcscat(pwszBuf, szDrive); + nParseOffset +=2; + } + + /* add path if needed */ + if (bAddCurrentPath) { + /* the 1 add. character is for the eventually added backslash */ + dwBufCharsNeeded += dwCurDirPathSize+1; + pwszBuf = (wchar_t *)check_pool_memory_size((POOLMEM *)pwszBuf, dwBufCharsNeeded*sizeof(wchar_t)); + /* get directory into own buffer as it may either return c:\... or \\?\C:\.... */ + + if (IsPathSeparator(pwszCurDirBuf[0]) && + IsPathSeparator(pwszCurDirBuf[1]) && + pwszCurDirBuf[2] == '?' && + IsPathSeparator(pwszCurDirBuf[3])) { + /* copy complete string */ + wcscpy(pwszBuf, pwszCurDirBuf); + } else { + /* append path */ + wcscat(pwszBuf, pwszCurDirBuf); + } + + nParseOffset = wcslen((LPCWSTR) pwszBuf); + + /* check if path ends with backslash, if not, add one */ + if (!IsPathSeparator(pwszBuf[nParseOffset-1])) { + wcscat(pwszBuf, L"\\"); + nParseOffset++; + } + } + + wchar_t *win32_name = &pwszBuf[nParseOffset]; + wchar_t *name_start = name; + + while (*name) { + /* Check for Unix separator and convert to Win32, eliminating + * duplicate separators. + */ + if (IsPathSeparator(*name)) { + *win32_name++ = '\\'; /* convert char */ + + /* Eliminate consecutive slashes, but not at the start so that + * \\.\ still works. + */ + if (name_start != name && IsPathSeparator(name[1])) { + name++; + } + } else { + *win32_name++ = *name; /* copy character */ + } + name++; + } + + /* null terminate string */ + *win32_name = 0; + + /* here we convert to VSS specific file name which + * can get longer because VSS will make something like + * \\\\?\\GLOBALROOT\\Device\\HarddiskVolumeShadowCopy1\\bacula\\uninstall.exe + * from c:\bacula\uninstall.exe + */ + if (g_pVSSPathConvertW != NULL && g_pVSSPathConverter()) { + /* is output buffer large enough? */ + pwszBuf = (wchar_t *)check_pool_memory_size((POOLMEM *)pwszBuf, + (dwBufCharsNeeded+MAX_PATH)*sizeof(wchar_t)); + /* create temp. buffer */ + wchar_t *pszBuf = (wchar_t *)get_pool_memory(PM_FNAME); + pszBuf = (wchar_t *)check_pool_memory_size((POOLMEM *)pszBuf, + (dwBufCharsNeeded+MAX_PATH)*sizeof(wchar_t)); + if (bAddPrefix) + nParseOffset = 4; + else + nParseOffset = 0; + wcsncpy(pszBuf, &pwszBuf[nParseOffset], wcslen(pwszBuf)+1-nParseOffset); + g_pVSSPathConvertW(pszBuf, pwszBuf, dwBufCharsNeeded+MAX_PATH); + free_pool_memory((POOLMEM *)pszBuf); + } + + free_pool_memory(pszUCSPath); + free_pool_memory((POOLMEM *)pwszCurDirBuf); + + Dmsg1(dbglvl, "Leave wchar_win32_path=%s\n", pwszBuf); + return (POOLMEM *)pwszBuf; +} + +/* + * Convert from WCHAR (UCS) to UTF-8 + */ +int +wchar_2_UTF8(POOLMEM **pszUTF, const wchar_t *pszUCS) +{ + /** + * The return value is the number of bytes written to the buffer. + * The number includes the byte for the null terminator. + */ + + if (p_WideCharToMultiByte) { + int nRet = p_WideCharToMultiByte(CP_UTF8,0,pszUCS,-1,NULL,0,NULL,NULL); + *pszUTF = check_pool_memory_size(*pszUTF, nRet); + return p_WideCharToMultiByte(CP_UTF8,0,pszUCS,-1,*pszUTF,nRet,NULL,NULL); + + } + return 0; +} + +/* + * Convert from WCHAR (UCS) to UTF-8 + */ +int +wchar_2_UTF8(char *pszUTF, const wchar_t *pszUCS, int cchChar) +{ + /** + * The return value is the number of bytes written to the buffer. + * The number includes the byte for the null terminator. + */ + + if (p_WideCharToMultiByte) { + int nRet = p_WideCharToMultiByte(CP_UTF8,0,pszUCS,-1,pszUTF,cchChar,NULL,NULL); + ASSERT (nRet > 0); + return nRet; + } + return 0; +} + +int +UTF8_2_wchar(POOLMEM **ppszUCS, const char *pszUTF) +{ + /* the return value is the number of wide characters written to the buffer. */ + /* convert null terminated string from utf-8 to ucs2, enlarge buffer if necessary */ + + if (p_MultiByteToWideChar) { + /* strlen of UTF8 +1 is enough */ + DWORD cchSize = (strlen(pszUTF)+1); + *ppszUCS = check_pool_memory_size(*ppszUCS, cchSize*sizeof (wchar_t)); + + int nRet = p_MultiByteToWideChar(CP_UTF8, 0, pszUTF, -1, (LPWSTR) *ppszUCS,cchSize); + ASSERT (nRet > 0); + return nRet; + } + return 0; +} + + +void +wchar_win32_path(const char *name, wchar_t *win32_name) +{ + const char *fname = name; + while (*name) { + /* Check for Unix separator and convert to Win32 */ + if (*name == '/') { + *win32_name++ = '\\'; /* convert char */ + /* If Win32 separated that is "quoted", remove quote */ + } else if (*name == '\\' && name[1] == '\\') { + *win32_name++ = '\\'; + name++; /* skip first \ */ + } else { + *win32_name++ = *name; /* copy character */ + } + name++; + } + /* Strip any trailing slash, if we stored something */ + if (*fname != 0 && win32_name[-1] == '\\') { + win32_name[-1] = 0; + } else { + *win32_name = 0; + } +} + +int +make_win32_path_UTF8_2_wchar(POOLMEM **pszUCS, const char *pszUTF, BOOL* pBIsRawPath /*= NULL*/) +{ + P(Win32Convmutex); + /* if we find the utf8 string in cache, we use the cached ucs2 version. + we compare the stringlength first (quick check) and then compare the content. + */ + if (!g_pWin32ConvUTF8Cache) { + Win32ConvInitCache(); + } else if (g_dwWin32ConvUTF8strlen == strlen(pszUTF)) { + if (bstrcmp(pszUTF, g_pWin32ConvUTF8Cache)) { + /* Return cached value */ + int32_t nBufSize = sizeof_pool_memory(g_pWin32ConvUCS2Cache); + *pszUCS = check_pool_memory_size(*pszUCS, nBufSize); + wcscpy((LPWSTR) *pszUCS, (LPWSTR)g_pWin32ConvUCS2Cache); + V(Win32Convmutex); + return nBufSize / sizeof (WCHAR); + } + } + + /* helper to convert from utf-8 to UCS-2 and to complete a path for 32K path syntax */ + int nRet = UTF8_2_wchar(pszUCS, pszUTF); + +#ifdef USE_WIN32_32KPATHCONVERSION + /* add \\?\ to support 32K long filepaths */ + *pszUCS = make_wchar_win32_path(*pszUCS, pBIsRawPath); +#else + if (pBIsRawPath) + *pBIsRawPath = FALSE; +#endif + + /* populate cache */ + g_pWin32ConvUCS2Cache = check_pool_memory_size(g_pWin32ConvUCS2Cache, sizeof_pool_memory(*pszUCS)); + wcscpy((LPWSTR) g_pWin32ConvUCS2Cache, (LPWSTR) *pszUCS); + + g_dwWin32ConvUTF8strlen = strlen(pszUTF); + g_pWin32ConvUTF8Cache = check_pool_memory_size(g_pWin32ConvUTF8Cache, g_dwWin32ConvUTF8strlen+2); + bstrncpy(g_pWin32ConvUTF8Cache, pszUTF, g_dwWin32ConvUTF8strlen+1); + V(Win32Convmutex); + + return nRet; +} + +#if !defined(_MSC_VER) || (_MSC_VER < 1400) // VC8+ +int umask(int) +{ + return 0; +} +#endif + +#ifndef LOAD_WITH_ALTERED_SEARCH_PATH +#define LOAD_WITH_ALTERED_SEARCH_PATH 0x00000008 +#endif + +void *dlopen(const char *file, int mode) +{ + void *handle; + + handle = LoadLibraryEx(file, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + return handle; +} + +void *dlsym(void *handle, const char *name) +{ + void *symaddr; + symaddr = (void *)GetProcAddress((HMODULE)handle, name); + return symaddr; +} + +int dlclose(void *handle) +{ + if (handle && !FreeLibrary((HMODULE)handle)) { + errno = b_errno_win32; + return 1; /* failed */ + } + return 0; /* OK */ +} + +char *dlerror(void) +{ + static char buf[200]; + const char *err = errorString(); + bstrncpy(buf, (char *)err, sizeof(buf)); + LocalFree((void *)err); + return buf; +} + +int fcntl(int fd, int cmd) +{ + return 0; +} + +int chown(const char *k, uid_t, gid_t) +{ + return 0; +} + +int lchown(const char *k, uid_t, gid_t) +{ + return 0; +} + +long int +random(void) +{ + return rand(); +} + +void +srandom(unsigned int seed) +{ + srand(seed); +} +// ///////////////////////////////////////////////////////////////// +// convert from Windows concept of time to Unix concept of time +// ///////////////////////////////////////////////////////////////// +void +cvt_utime_to_ftime(const time_t &time, FILETIME &wintime) +{ + uint64_t mstime = time; + mstime *= WIN32_FILETIME_SCALE; + mstime += WIN32_FILETIME_ADJUST; + +#if defined(_MSC_VER) + wintime.dwLowDateTime = (DWORD)(mstime & 0xffffffffI64); +#else + wintime.dwLowDateTime = (DWORD)(mstime & 0xffffffffUL); +#endif + wintime.dwHighDateTime = (DWORD) ((mstime>>32)& 0xffffffffUL); +} + +time_t +cvt_ftime_to_utime(const FILETIME &time) +{ + uint64_t mstime = time.dwHighDateTime; + mstime <<= 32; + mstime |= time.dwLowDateTime; + + mstime -= WIN32_FILETIME_ADJUST; + mstime /= WIN32_FILETIME_SCALE; // convert to seconds. + + return (time_t) (mstime & 0xffffffff); +} + +static const char *errorString(void) +{ + LPVOID lpMsgBuf; + + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + GetLastError(), + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default lang + (LPTSTR) &lpMsgBuf, + 0, + NULL); + + /* Strip any \r or \n */ + char *rval = (char *) lpMsgBuf; + char *cp = strchr(rval, '\r'); + if (cp != NULL) { + *cp = 0; + } else { + cp = strchr(rval, '\n'); + if (cp != NULL) + *cp = 0; + } + return rval; +} + + +/* + * This is only called for directories, and is used to get the directory + * attributes and find out if we have a junction point or a mount point + * or other kind of "funny" directory. + */ +static int +statDir(const char *file, struct stat *sb, POOLMEM **readlnk=NULL) +{ + WIN32_FIND_DATAW info_w; // window's file info + WIN32_FIND_DATAA info_a; // window's file info + + // cache some common vars to make code more transparent + DWORD *pdwFileAttributes; + DWORD *pnFileSizeHigh; + DWORD *pnFileSizeLow; + DWORD *pdwReserved0; + FILETIME *pftLastAccessTime; + FILETIME *pftLastWriteTime; + HANDLE h = INVALID_HANDLE_VALUE; + + /* + * Oh, cool, another exception: Microsoft doesn't let us do + * FindFile operations on a Drive, so simply fake root attibutes. + */ + if (file[1] == ':' && file[2] == 0) { + time_t now = time(NULL); + Dmsg1(dbglvl, "faking ROOT attrs(%s).\n", file); + sb->st_mode = S_IFDIR; + sb->st_mode |= S_IREAD|S_IEXEC|S_IWRITE; + sb->st_ctime = now; /* File change time (inode change...) */ + sb->st_mtime = now; /* File modify time */ + sb->st_atime = now; /* File access time */ + sb->st_rdev = 0; + return 0; + } + + + // use unicode + if (p_FindFirstFileW) { + POOLMEM* pwszBuf = get_pool_memory (PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, file); + + Dmsg1(dbglvl, "FindFirstFileW=%s\n", file); + h = p_FindFirstFileW((LPCWSTR)pwszBuf, &info_w); + free_pool_memory(pwszBuf); + + pdwFileAttributes = &info_w.dwFileAttributes; + pdwReserved0 = &info_w.dwReserved0; + pnFileSizeHigh = &info_w.nFileSizeHigh; + pnFileSizeLow = &info_w.nFileSizeLow; + pftLastAccessTime = &info_w.ftLastAccessTime; + pftLastWriteTime = &info_w.ftLastWriteTime; + + // use ASCII + } else if (p_FindFirstFileA) { + Dmsg1(dbglvl, "FindFirstFileA=%s\n", file); + h = p_FindFirstFileA(file, &info_a); + + pdwFileAttributes = &info_a.dwFileAttributes; + pdwReserved0 = &info_a.dwReserved0; + pnFileSizeHigh = &info_a.nFileSizeHigh; + pnFileSizeLow = &info_a.nFileSizeLow; + pftLastAccessTime = &info_a.ftLastAccessTime; + pftLastWriteTime = &info_a.ftLastWriteTime; + } else { + Dmsg0(dbglvl, "No findFirstFile A or W found\n"); + } + + if (h == INVALID_HANDLE_VALUE) { + const char *err = errorString(); + /* + * Note, in creating leading paths, it is normal that + * the file does not exist. + */ + Dmsg2(2099, "FindFirstFile(%s):%s\n", file, err); + LocalFree((void *)err); + errno = b_errno_win32; + return -1; + } + + FindClose(h); + + sb->st_mode = 0777; /* start with everything */ + if (*pdwFileAttributes & FILE_ATTRIBUTE_READONLY) + sb->st_mode &= ~(S_IRUSR|S_IRGRP|S_IROTH); + if (*pdwFileAttributes & FILE_ATTRIBUTE_SYSTEM) + sb->st_mode &= ~S_IRWXO; /* remove everything for other */ + if (*pdwFileAttributes & FILE_ATTRIBUTE_HIDDEN) + sb->st_mode |= S_ISVTX; /* use sticky bit -> hidden */ + if (*pdwFileAttributes & FILE_ATTRIBUTE_ENCRYPTED) + sb->st_mode |= S_ISGID; /* use set group ID -> encrypted */ + sb->st_mode |= S_IFDIR; + sb->st_fattrs = *pdwFileAttributes; + Dmsg1(200, "Fattrs=0x%x\n", sb->st_fattrs); + /* + * Store reparse/mount point info in st_rdev. Note a + * Win32 reparse point (junction point) is like a link + * though it can have many properties (directory link, + * soft link, hard link, HSM, ... + * A mount point is a reparse point where another volume + * is mounted, so it is like a Unix mount point (change of + * filesystem). + */ + if (*pdwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) { + sb->st_rdev = WIN32_MOUNT_POINT; + } else { + sb->st_rdev = 0; + } + /* This is a lot of work just to know that it is deduped */ + if (*pdwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT && + (*pdwReserved0 & IO_REPARSE_TAG_DEDUP)) { + sb->st_fattrs |= FILE_ATTRIBUTE_DEDUP; /* add our own bit */ + } + if ((*pdwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) && + (*pdwReserved0 & IO_REPARSE_TAG_MOUNT_POINT)) { + sb->st_rdev = WIN32_MOUNT_POINT; /* mount point */ + /* + * Now to find out if the directory is a mount point or + * a reparse point, we must do a song and a dance. + * Explicitly open the file to read the reparse point, then + * call DeviceIoControl to find out if it points to a Volume + * or to a directory. + */ + h = INVALID_HANDLE_VALUE; + if (p_GetFileAttributesW) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, file); + if (p_CreateFileW) { + h = CreateFileW((LPCWSTR)pwszBuf, GENERIC_READ, + FILE_SHARE_READ, NULL, OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT, + NULL); + } + free_pool_memory(pwszBuf); + } else if (p_GetFileAttributesA) { + h = CreateFileA(file, GENERIC_READ, + FILE_SHARE_READ, NULL, OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT, + NULL); + } + if (h != INVALID_HANDLE_VALUE) { + char dummy[1000]; + REPARSE_DATA_BUFFER *rdb = (REPARSE_DATA_BUFFER *)dummy; + rdb->ReparseTag = IO_REPARSE_TAG_MOUNT_POINT; + DWORD bytes; + bool ok; + ok = DeviceIoControl(h, FSCTL_GET_REPARSE_POINT, + NULL, 0, /* in buffer, bytes */ + (LPVOID)rdb, (DWORD)sizeof(dummy), /* out buffer, btyes */ + (LPDWORD)&bytes, (LPOVERLAPPED)0); + if (ok) { + POOLMEM *utf8 = get_pool_memory(PM_NAME); + wchar_2_UTF8(&utf8, (wchar_t *)rdb->SymbolicLinkReparseBuffer.PathBuffer); + Dmsg2(dbglvl, "Junction %s points to: %s\n", file, utf8); + if (strncasecmp(utf8, "\\??\\volume{", 11) == 0) { + sb->st_rdev = WIN32_MOUNT_POINT; + } else { + /* It points to a directory so we ignore it. */ + sb->st_rdev = WIN32_JUNCTION_POINT; + } + /* If requested, store the link for future use */ + if (readlnk) { + pm_strcpy(readlnk, utf8); + } + free_pool_memory(utf8); + } + CloseHandle(h); + } else { + Dmsg1(dbglvl, "Invalid handle from CreateFile(%s)\n", file); + } + } + Dmsg2(dbglvl, "st_rdev=%d file=%s\n", sb->st_rdev, file); + sb->st_size = *pnFileSizeHigh; + sb->st_size <<= 32; + sb->st_size |= *pnFileSizeLow; + sb->st_blksize = 4096; + sb->st_blocks = (uint32_t)(sb->st_size + 4095)/4096; + + sb->st_atime = cvt_ftime_to_utime(*pftLastAccessTime); + sb->st_mtime = cvt_ftime_to_utime(*pftLastWriteTime); + sb->st_ctime = MAX(sb->st_mtime, sb->st_ctime); + /* Note ctime is last change time -- not creation time */ + Dmsg1(200, "Fattrs=0x%x\n", sb->st_fattrs); + + return 0; +} + +/* On success, readlink() returns the number of bytes placed in buf. On + * error, -1 is returned and errno is set to indicate the error. + * + * TODO: Still need to activate the readlink() call in find_one.c + * by returning a S_ISLNK(st_mode) compatible flag, probably + * in statDir(); + */ +int +readlink(const char *path, char *buf, int bufsiz) +{ + int ret=-1; + struct stat sb; + POOLMEM *lnk = get_pool_memory(PM_FNAME); + *lnk = 0; + if (statDir(path, &sb, &lnk) == 0) { + ret = bstrncpy(buf, lnk, bufsiz) - buf - 1; // Don't count the last \0 + } + free_pool_memory(lnk); + return ret; +} + +/* symlink() shall return 0; otherwise, it shall return -1 and set errno to + * indicate the error. + */ +int +symlink(const char *path1, const char *path2) +{ + int ret=0; + struct stat st; + DWORD isdir=0; + POOLMEM* pwszBuf = NULL; + POOLMEM* pwszBuf2 = NULL; + + if (stat(path1, &st) == 0) { + if (st.st_mode & S_IFDIR) { + isdir=1; + } + } else { + Dmsg1(200, "Canot find the source directory %s\n", path1); + return -1; + } + + if (p_CreateSymbolicLinkW) { + pwszBuf = get_pool_memory (PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, path1); + + pwszBuf2 = get_pool_memory (PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf2, path2); + + Dmsg2(dbglvl, "Trying to symlink (%ls -> %ls)\n", pwszBuf, pwszBuf2); + + if (!p_CreateSymbolicLinkW((LPCWSTR)pwszBuf2, (LPCWSTR)pwszBuf, isdir)) { + const char *err = errorString(); + Dmsg3(200, "Cannot create symlink (%ls -> %ls):%s\n", pwszBuf, pwszBuf2, err); + LocalFree((void *)err); + errno = b_errno_win32; + ret = -1; + } + + } else if (p_CreateSymbolicLinkA) { + + if (!p_CreateSymbolicLinkA(path2, path1, isdir)) { + const char *err = errorString(); + Dmsg3(200, "Cannot create symlink (%s -> %s):%s\n", path1, path2, err); + LocalFree((void *)err); + errno = b_errno_win32; + ret = -1; + } + + } else { + Dmsg0(200, "No implementation of CreateSymbolicLink available\n"); + ret = -1; + } + + if (pwszBuf) { + free_pool_memory(pwszBuf2); + free_pool_memory(pwszBuf); + } + + return ret; +} + +/* Do a stat() on a valid HANDLE (opened with CreateFile()) */ +int hstat(HANDLE h, struct stat *sb) +{ + BY_HANDLE_FILE_INFORMATION info; + + if (!GetFileInformationByHandle(h, &info)) { + const char *err = errorString(); + Dmsg1(dbglvl, "GetfileInformationByHandle: %s\n", err); + LocalFree((void *)err); + errno = b_errno_win32; + return -1; + } + + /* We should modify only variables that are modified in stat() + * everything else should be carefully tested. + */ + + /* When turned on, we wee a lot of messages such as + * C:/PerfLogs is a different filesystem. Will not descend from C:/ into it. + */ + //sb->st_dev = info.dwVolumeSerialNumber; + + /* The st_ino is not used in stat() */ + sb->st_ino = info.nFileIndexHigh; + sb->st_ino <<= 32; + sb->st_ino |= info.nFileIndexLow; + + sb->st_nlink = 1; +#if 0 // We don't have the link() call right now + // TODO: something with CreateHardLinkFunc() + sb->st_nlink = (short)info.nNumberOfLinks; + if (sb->st_nlink > 1) { + Dmsg1(dbglvl, "st_nlink=%d\n", sb->st_nlink); + } +#endif + sb->st_mode = 0777; /* start with everything */ + if (info.dwFileAttributes & FILE_ATTRIBUTE_READONLY) + sb->st_mode &= ~(S_IRUSR|S_IRGRP|S_IROTH); + if (info.dwFileAttributes & FILE_ATTRIBUTE_SYSTEM) + sb->st_mode &= ~S_IRWXO; /* remove everything for other */ + if (info.dwFileAttributes & FILE_ATTRIBUTE_HIDDEN) + sb->st_mode |= S_ISVTX; /* use sticky bit -> hidden */ + if (info.dwFileAttributes & FILE_ATTRIBUTE_ENCRYPTED) + sb->st_mode |= S_ISGID; /* use set group ID -> encrypted */ + if (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { + sb->st_mode |= S_IFDIR; + } else { + sb->st_mode |= S_IFREG; + } + sb->st_fattrs = info.dwFileAttributes; + + /* Use st_rdev to store reparse attribute */ + if (info.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) { + sb->st_rdev = WIN32_REPARSE_POINT; + } + Dmsg3(dbglvl, "st_rdev=%d sizino=%d ino=%lld\n", sb->st_rdev, sizeof(sb->st_ino), + (long long)sb->st_ino); + + sb->st_size = info.nFileSizeHigh; + sb->st_size <<= 32; + sb->st_size |= info.nFileSizeLow; + sb->st_blksize = 4096; + sb->st_blocks = (uint32_t)(sb->st_size + 4095)/4096; + sb->st_atime = cvt_ftime_to_utime(info.ftLastAccessTime); + sb->st_mtime = cvt_ftime_to_utime(info.ftLastWriteTime); + sb->st_ctime = cvt_ftime_to_utime(info.ftCreationTime); + + /* Get the ChangeTime information with an other API, when attributes are modified + * the ChangeTime is modified while CreationTime and WriteTime are not + */ + FILE_BASIC_INFO file_basic_info; + if (p_GetFileInformationByHandleEx && + p_GetFileInformationByHandleEx(h, FileBasicInfo, &file_basic_info, sizeof(file_basic_info))) { + FILETIME *pftChangeTime = (FILETIME *)&file_basic_info.ChangeTime; + sb->st_ctime = cvt_ftime_to_utime(*pftChangeTime); + } + + Dmsg1(200, "Fattrs=0x%x\n", sb->st_fattrs); + return 0; +} + + +/* Emulate unix stat() call on windows */ +static int stat2(const char *file, struct stat *sb) +{ + int rval = 0; + HANDLE h = INVALID_HANDLE_VALUE; + POOLMEM *fname; + + errno = 0; + memset(sb, 0, sizeof(*sb)); + + /* We cannot stat a drive */ + if (file[1] == ':' && (file[2] == 0 || (IsPathSeparator(file[2]) && file[3] == 0))) { + return statDir(file, sb); + } + + fname = get_pool_memory(PM_FNAME); + unix_name_to_win32(&fname, file); + + if (p_CreateFileW) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, fname); + + h = p_CreateFileW((LPCWSTR)pwszBuf, GENERIC_READ, + FILE_SHARE_READ, NULL, OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, NULL); + free_pool_memory(pwszBuf); + + } else { + h = CreateFileA(fname, GENERIC_READ, + FILE_SHARE_READ, NULL, OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, NULL); + } + + if (h == INVALID_HANDLE_VALUE) { + const char *err = errorString(); + Dmsg2(dbglvl, "Cannot open file for stat (%s):%s\n", fname, err); + LocalFree((void *)err); + errno = b_errno_win32; + rval = -1; + goto bail_out; + } + + rval = hstat(h, sb); + CloseHandle(h); + + if (sb->st_mode & S_IFDIR && + file[1] == ':' && file[2] != 0) { + rval = statDir(file, sb); + // TODO: See if we really need statDir(), we can probably take only + // the code for the ReparsePoint + } +bail_out: + free_pool_memory(fname); + return rval; +} + +int +stat(const char *file, struct stat *sb) +{ + int ret; + WIN32_FILE_ATTRIBUTE_DATA data; + + errno = 0; + memset(sb, 0, sizeof(*sb)); + + /* We do the first try with a file HANDLER, because we want to use the + * ChangeTime that is only available with GetFileInformationByHandleEx + */ + ret = stat2(file, sb); + + if (!ret) { + return ret; + } + + /* We were not able to open a filehandler on the file to get attributes, so + * so we try with the name. It may happen of example with encrypted files. + */ + + if (p_GetFileAttributesExW) { + /* dynamically allocate enough space for UCS2 filename */ + POOLMEM *pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, file); + + BOOL b = p_GetFileAttributesExW((LPCWSTR)pwszBuf, GetFileExInfoStandard, &data); + free_pool_memory(pwszBuf); + + if (!b) { + const char *err = errorString(); + Dmsg2(10, "GetFileAttributesExW(%s):%s\n", file, err); + LocalFree((void *)err); + return -1; + } + + } else if (p_GetFileAttributesExA) { + if (!p_GetFileAttributesExA(file, GetFileExInfoStandard, &data)) { + const char *err = errorString(); + Dmsg2(10, "GetFileAttributesExW(%s):%s\n", file, err); + LocalFree((void *)err); + return -1; + } + } else { + return -1; // Not implemented + } + + sb->st_mode = 0777; /* start with everything */ + if (data.dwFileAttributes & FILE_ATTRIBUTE_READONLY) { + sb->st_mode &= ~(S_IRUSR|S_IRGRP|S_IROTH); + } + if (data.dwFileAttributes & FILE_ATTRIBUTE_SYSTEM) { + sb->st_mode &= ~S_IRWXO; /* remove everything for other */ + } + if (data.dwFileAttributes & FILE_ATTRIBUTE_HIDDEN) { + sb->st_mode |= S_ISVTX; /* use sticky bit -> hidden */ + } + if (data.dwFileAttributes & FILE_ATTRIBUTE_ENCRYPTED) { + sb->st_mode |= S_ISGID; /* use set group ID -> encrypted */ + } + if (data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { + sb->st_mode |= S_IFDIR; + } else { + sb->st_mode |= S_IFREG; + } + sb->st_fattrs = data.dwFileAttributes; + + /* Use st_rdev to store reparse attribute */ + sb->st_rdev = (data.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) ? 1 : 0; + + sb->st_nlink = 1; + sb->st_size = data.nFileSizeHigh; + sb->st_size <<= 32; + sb->st_size |= data.nFileSizeLow; + sb->st_blksize = 4096; + sb->st_blocks = (uint32_t)(sb->st_size + 4095)/4096; + sb->st_atime = cvt_ftime_to_utime(data.ftLastAccessTime); + sb->st_mtime = cvt_ftime_to_utime(data.ftLastWriteTime); + sb->st_ctime = sb->st_mtime; + + /* + * If we are not at the root, then to distinguish a reparse + * point from a mount point, we must call FindFirstFile() to + * get the WIN32_FIND_DATA, which has the bit that indicates + * that this directory is a mount point -- aren't Win32 APIs + * wonderful? (sarcasm). The code exists in the statDir + * subroutine. + */ + if (data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY && + file[1] == ':' && file[2] != 0) { + statDir(file, sb); + } + Dmsg3(dbglvl, "sizino=%d ino=%lld file=%s\n", sizeof(sb->st_ino), + (long long)sb->st_ino, file); + Dmsg1(200, "Fattrs=0x%x\n", sb->st_fattrs); + return 0; +} + +int +fstat(intptr_t fd, struct stat *sb) +{ + return hstat((HANDLE)_get_osfhandle(fd), sb); +} + +/* + * We write our own ftruncate because the one in the + * Microsoft library mrcrt.dll does not truncate + * files greater than 2GB. + * KES - May 2007 + */ +int win32_ftruncate(int fd, int64_t length) +{ + /* Set point we want to truncate file */ + __int64 pos = _lseeki64(fd, (__int64)length, SEEK_SET); + + if (pos != (__int64)length) { + errno = EACCES; /* truncation failed, get out */ + return -1; + } + + /* Truncate file */ + if (SetEndOfFile((HANDLE)_get_osfhandle(fd)) == 0) { + errno = b_errno_win32; + return -1; + } + errno = 0; + return 0; +} + +int fcntl(int fd, int cmd, long arg) +{ + int rval = 0; + + switch (cmd) { + case F_GETFL: + rval = O_NONBLOCK; + break; + + case F_SETFL: + rval = 0; + break; + + default: + errno = EINVAL; + rval = -1; + break; + } + + return rval; +} + +int +lstat(const char *file, struct stat *sb) +{ + return stat(file, sb); +} + +void +sleep(int sec) +{ + Sleep(sec * 1000); +} + +int +geteuid(void) +{ + return 0; +} + +int +execvp(const char *, char *[]) { + errno = ENOSYS; + return -1; +} + + +int +fork(void) +{ + errno = ENOSYS; + return -1; +} + +int +pipe(int[]) +{ + errno = ENOSYS; + return -1; +} + +int +waitpid(int, int*, int) +{ + errno = ENOSYS; + return -1; +} + +#ifndef HAVE_MINGW +int +strcasecmp(const char *s1, const char *s2) +{ + register int ch1, ch2; + + if (s1==s2) + return 0; /* strings are equal if same object. */ + else if (!s1) + return -1; + else if (!s2) + return 1; + do { + ch1 = *s1; + ch2 = *s2; + s1++; + s2++; + } while (ch1 != 0 && tolower(ch1) == tolower(ch2)); + + return(ch1 - ch2); +} +#endif //HAVE_MINGW + +int +strncasecmp(const char *s1, const char *s2, int len) +{ + register int ch1 = 0, ch2 = 0; + + if (s1==s2) + return 0; /* strings are equal if same object. */ + else if (!s1) + return -1; + else if (!s2) + return 1; + + while (len--) { + ch1 = *s1; + ch2 = *s2; + s1++; + s2++; + if (ch1 == 0 || tolower(ch1) != tolower(ch2)) break; + } + + return (ch1 - ch2); +} + +int +gettimeofday(struct timeval *tv, struct timezone *) +{ + SYSTEMTIME now; + FILETIME tmp; + + GetSystemTime(&now); + + if (tv == NULL) { + errno = EINVAL; + return -1; + } + if (!SystemTimeToFileTime(&now, &tmp)) { + errno = b_errno_win32; + return -1; + } + + int64_t _100nsec = tmp.dwHighDateTime; + _100nsec <<= 32; + _100nsec |= tmp.dwLowDateTime; + _100nsec -= WIN32_FILETIME_ADJUST; + + tv->tv_sec = (long)(_100nsec / 10000000); + tv->tv_usec = (long)((_100nsec % 10000000)/10); + return 0; + +} + +/* + * Write in Windows System log + */ +void syslog(int type, const char *fmt, ...) +{ + va_list arg_ptr; + int len, maxlen; + POOLMEM *msg; + + msg = get_pool_memory(PM_EMSG); + + for (;;) { + maxlen = sizeof_pool_memory(msg) - 1; + va_start(arg_ptr, fmt); + len = bvsnprintf(msg, maxlen, fmt, arg_ptr); + va_end(arg_ptr); + if (len < 0 || len >= (maxlen-5)) { + msg = realloc_pool_memory(msg, maxlen + maxlen/2); + continue; + } + break; + } + LogErrorMsg((const char *)msg); + free_memory(msg); +} + +void +closelog() +{ +} + +struct passwd * +getpwuid(uid_t) +{ + return NULL; +} + +struct group * +getgrgid(uid_t) +{ + return NULL; +} + +// implement opendir/readdir/closedir on top of window's API + +typedef struct _dir +{ + WIN32_FIND_DATAW data_w; // window's file info (wchar version) + const char *spec; // the directory we're traversing + HANDLE dirh; // the search handle + bool call_findnextfile; // use FindFirstFile data first +} _dir; + +DIR * +opendir(const char *path) +{ + /* enough space for VSS !*/ + int max_len = strlen(path) + MAX_PATH; + char *tspec = NULL; + _dir *rval = NULL; + POOLMEM* pwcBuf; + + if (path == NULL) { + errno = ENOENT; + return NULL; + } + if (!p_FindFirstFileW || !p_FindNextFileW) { + errno = ENOMEM; + return NULL; + } + + Dmsg1(dbglvl, "Opendir path=%s\n", path); + rval = (_dir *)malloc(sizeof(_dir)); + if (!rval) { + goto err; + } + memset (rval, 0, sizeof (_dir)); + + tspec = (char *)malloc(max_len); + if (!tspec) { + goto err; + } + + conv_unix_to_vss_win32_path(path, tspec, max_len); + Dmsg1(dbglvl, "win32 path=%s\n", tspec); + + // add backslash only if there is none yet (think of c:\) + if (tspec[strlen(tspec)-1] != '\\') + bstrncat(tspec, "\\*", max_len); + else + bstrncat(tspec, "*", max_len); + + rval->spec = tspec; + + // convert to wchar_t + pwcBuf = get_pool_memory(PM_FNAME);; + make_win32_path_UTF8_2_wchar(&pwcBuf, rval->spec); + rval->dirh = p_FindFirstFileW((LPCWSTR)pwcBuf, &rval->data_w); + rval->call_findnextfile = false; + free_pool_memory(pwcBuf); + + if (rval->dirh == INVALID_HANDLE_VALUE) { + if (GetLastError() == ERROR_FILE_NOT_FOUND) { + /* the directory is empty, continue with an INVALID_HANDLE_VALUE handle */ + rval->data_w.cFileName[0]='\0'; + } else { + goto err; + } + } + Dmsg4(dbglvl, "opendir(%s)\n\tspec=%s,\n\tFindFirstFile returns %d cFileName=%s\n", + path, rval->spec, rval->dirh, rval->data_w.cFileName); + + return (DIR *)rval; + +err: + if (rval) { + free(rval); + } + if (tspec) { + free(tspec); + } + errno = b_errno_win32; + return NULL; +} + +int +closedir(DIR *dirp) +{ + _dir *dp = (_dir *)dirp; + if (dp->dirh != INVALID_HANDLE_VALUE) { + FindClose(dp->dirh); + } + free((void *)dp->spec); + free((void *)dp); + return 0; +} + +/* + typedef struct _WIN32_FIND_DATA { + DWORD dwFileAttributes; + FILETIME ftCreationTime; + FILETIME ftLastAccessTime; + FILETIME ftLastWriteTime; + DWORD nFileSizeHigh; + DWORD nFileSizeLow; + DWORD dwReserved0; + DWORD dwReserved1; + TCHAR cFileName[MAX_PATH]; + TCHAR cAlternateFileName[14]; +} WIN32_FIND_DATA, *PWIN32_FIND_DATA; +*/ + +int breaddir(DIR *dirp, POOLMEM *&dname) +{ + _dir *dp = (_dir *)dirp; + + if (dirp == NULL) { + errno = EBADF; + return EBADF; + } + + if (dp->call_findnextfile) { + if (p_FindNextFileW(dp->dirh, &dp->data_w)) { + } else { + if (GetLastError() == ERROR_NO_MORE_FILES) { + Dmsg1(dbglvl, "breaddir(%p) ERROR_NO_MORE_FILES\n", dirp); + return -1; // end of directory reached + } else { + errno = b_errno_win32; + return b_errno_win32; + } + } + } else { + // use data from FindFirstFile first then next time call FindNextFileW + if (dp->dirh == INVALID_HANDLE_VALUE) { + return -1; // the directory is empty, no "." nor ".." (special case) + } + dp->call_findnextfile = true; + } + + wchar_2_UTF8(&dname, dp->data_w.cFileName); + + return 0; +} + +/* + * Dotted IP address to network address + * + * Returns 1 if OK + * 0 on error + */ +int +inet_aton(const char *a, struct in_addr *inp) +{ + const char *cp = a; + uint32_t acc = 0, tmp = 0; + int dotc = 0; + + if (!isdigit(*cp)) { /* first char must be digit */ + return 0; /* error */ + } + do { + if (isdigit(*cp)) { + tmp = (tmp * 10) + (*cp -'0'); + } else if (*cp == '.' || *cp == 0) { + if (tmp > 255) { + return 0; /* error */ + } + acc = (acc << 8) + tmp; + dotc++; + tmp = 0; + } else { + return 0; /* error */ + } + } while (*cp++ != 0); + if (dotc != 4) { /* want 3 .'s plus EOS */ + return 0; /* error */ + } + inp->s_addr = htonl(acc); /* store addr in network format */ + return 1; +} + + +/* + * Convert from presentation format (which usually means ASCII printable) + * to network format (which is usually some kind of binary format). + * return: + * 1 if the address was valid for the specified address family + * 0 if the address wasn't valid (`dst' is untouched in this case) + */ +int +binet_pton(int af, const char *src, void *dst) +{ + switch (af) { + case AF_INET: + case AF_INET6: + if (p_InetPton) { + return p_InetPton(af, src, dst); + } + return 0; + default: + return 0; + } +} + + +int +nanosleep(const struct timespec *req, struct timespec *rem) +{ + if (rem) + rem->tv_sec = rem->tv_nsec = 0; + Sleep((req->tv_sec * 1000) + (req->tv_nsec/1000000)); + return 0; +} + +void +init_signals(void terminate(int sig)) +{ + +} + +void +init_stack_dump(void) +{ + +} + + +long +pathconf(const char *path, int name) +{ + switch(name) { + case _PC_PATH_MAX : + if (strncmp(path, "\\\\?\\", 4) == 0) + return 32767; + case _PC_NAME_MAX : + return 255; + } + errno = ENOSYS; + return -1; +} + +int +WSA_Init(void) +{ + WORD wVersionRequested = MAKEWORD(2, 2); + WSADATA wsaData; + + int err = WSAStartup(wVersionRequested, &wsaData); + if (err != 0) { + wVersionRequested = MAKEWORD(2, 0); + err = WSAStartup(wVersionRequested, &wsaData); + if (err != 0) { + wVersionRequested = MAKEWORD(1, 1); + err = WSAStartup(wVersionRequested, &wsaData); + } + } + + if (err != 0) { + printf("Can not start Windows Sockets\n"); + errno = ENOSYS; + return -1; + } + + return 0; +} + +static DWORD fill_attribute(DWORD attr, mode_t mode) +{ + Dmsg1(dbglvl, " before attr=%lld\n", (uint64_t) attr); + /* Use Bacula mappings define in stat() above */ + if (mode & (S_IRUSR|S_IRGRP|S_IROTH)) { // If file is readable + attr &= ~FILE_ATTRIBUTE_READONLY; // then this is not READONLY + } else { + attr |= FILE_ATTRIBUTE_READONLY; + } + if (mode & S_ISVTX) { // The sticky bit <=> HIDDEN + attr |= FILE_ATTRIBUTE_HIDDEN; + } else { + attr &= ~FILE_ATTRIBUTE_HIDDEN; + } + if (mode & S_ISGID) { // The set group ID <=> ENCRYPTED + attr |= FILE_ATTRIBUTE_ENCRYPTED; + } else { + attr &= ~FILE_ATTRIBUTE_ENCRYPTED; + } + if (mode & S_IRWXO) { // Other can read/write/execute ? + attr &= ~FILE_ATTRIBUTE_SYSTEM; // => Not system + } else { + attr |= FILE_ATTRIBUTE_SYSTEM; + } + Dmsg1(dbglvl, " after attr=%lld\n", (uint64_t)attr); + return attr; +} + +int win32_chmod(const char *path, mode_t mode) +{ + bool ret=false; + DWORD attr; + + Dmsg2(dbglvl, "win32_chmod(path=%s mode=%lld)\n", path, (uint64_t)mode); + if (p_GetFileAttributesW) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, path); + + attr = p_GetFileAttributesW((LPCWSTR) pwszBuf); + if (attr != INVALID_FILE_ATTRIBUTES) { + /* Use Bacula mappings define in stat() above */ + attr = fill_attribute(attr, mode); + ret = p_SetFileAttributesW((LPCWSTR)pwszBuf, attr); + } + free_pool_memory(pwszBuf); + Dmsg0(dbglvl, "Leave win32_chmod. AttributesW\n"); + } else if (p_GetFileAttributesA) { + attr = p_GetFileAttributesA(path); + if (attr != INVALID_FILE_ATTRIBUTES) { + attr = fill_attribute(attr, mode); + ret = p_SetFileAttributesA(path, attr); + } + Dmsg0(dbglvl, "Leave win32_chmod did AttributesA\n"); + } else { + Dmsg0(dbglvl, "Leave win32_chmod did nothing\n"); + } + + if (!ret) { + const char *err = errorString(); + Dmsg2(dbglvl, "Get/SetFileAttributes(%s): %s\n", path, err); + LocalFree((void *)err); + errno = b_errno_win32; + return -1; + } + return 0; +} + + +int +win32_chdir(const char *dir) +{ + if (p_SetCurrentDirectoryW) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, dir); + + BOOL b=p_SetCurrentDirectoryW((LPCWSTR)pwszBuf); + + free_pool_memory(pwszBuf); + + if (!b) { + errno = b_errno_win32; + return -1; + } + } else if (p_SetCurrentDirectoryA) { + if (0 == p_SetCurrentDirectoryA(dir)) { + errno = b_errno_win32; + return -1; + } + } else { + return -1; + } + + return 0; +} + +int +win32_mkdir(const char *dir) +{ + Dmsg1(dbglvl, "enter win32_mkdir. dir=%s\n", dir); + if (p_wmkdir){ + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, dir); + + int n = p_wmkdir((LPCWSTR)pwszBuf); + free_pool_memory(pwszBuf); + Dmsg0(dbglvl, "Leave win32_mkdir did wmkdir\n"); + return n; + } + + Dmsg0(dbglvl, "Leave win32_mkdir did _mkdir\n"); + return _mkdir(dir); +} + + +char * +win32_getcwd(char *buf, int maxlen) +{ + int n=0; + + if (p_GetCurrentDirectoryW) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + pwszBuf = check_pool_memory_size (pwszBuf, maxlen*sizeof(wchar_t)); + + n = p_GetCurrentDirectoryW(maxlen, (LPWSTR) pwszBuf); + if (n!=0) + n = wchar_2_UTF8 (buf, (wchar_t *)pwszBuf, maxlen)-1; + free_pool_memory(pwszBuf); + + } else if (p_GetCurrentDirectoryA) + n = p_GetCurrentDirectoryA(maxlen, buf); + + if (n <= 0 || n > maxlen) return NULL; + + if (n+1 > maxlen) return NULL; + if (n != 3) { + buf[n] = '\\'; + buf[n+1] = 0; + } + return buf; +} + +int +win32_fputs(const char *string, FILE *stream) +{ + /* we use WriteConsoleA / WriteConsoleA + so we can be sure that unicode support works on win32. + with fallback if something fails + */ + + HANDLE hOut = GetStdHandle (STD_OUTPUT_HANDLE); + if (hOut && (hOut != INVALID_HANDLE_VALUE) && p_WideCharToMultiByte && + p_MultiByteToWideChar && (stream == stdout)) { + + POOLMEM* pwszBuf = get_pool_memory(PM_MESSAGE); + + DWORD dwCharsWritten; + DWORD dwChars; + + dwChars = UTF8_2_wchar(&pwszBuf, string); + + /* try WriteConsoleW */ + if (WriteConsoleW (hOut, pwszBuf, dwChars-1, &dwCharsWritten, NULL)) { + free_pool_memory(pwszBuf); + return dwCharsWritten; + } + + /* convert to local codepage and try WriteConsoleA */ + POOLMEM* pszBuf = get_pool_memory(PM_MESSAGE); + pszBuf = check_pool_memory_size(pszBuf, dwChars+1); + + dwChars = p_WideCharToMultiByte(GetConsoleOutputCP(),0,(LPCWSTR)pwszBuf,-1,pszBuf,dwChars,NULL,NULL); + free_pool_memory(pwszBuf); + + if (WriteConsoleA (hOut, pszBuf, dwChars-1, &dwCharsWritten, NULL)) { + free_pool_memory(pszBuf); + return dwCharsWritten; + } + free_pool_memory(pszBuf); + } + /* Fall back */ + return fputs(string, stream); +} + +char* +win32_cgets (char* buffer, int len) +{ + /* we use console ReadConsoleA / ReadConsoleW to be able to read unicode + from the win32 console and fallback if seomething fails */ + + HANDLE hIn = GetStdHandle (STD_INPUT_HANDLE); + if (hIn && (hIn != INVALID_HANDLE_VALUE) && p_WideCharToMultiByte && p_MultiByteToWideChar) { + DWORD dwRead; + wchar_t wszBuf[1024]; + char szBuf[1024]; + + /* nt and unicode conversion */ + if (ReadConsoleW (hIn, wszBuf, 1024, &dwRead, NULL)) { + + /* null terminate at end */ + if (wszBuf[dwRead-1] == L'\n') { + wszBuf[dwRead-1] = L'\0'; + dwRead --; + } + + if (wszBuf[dwRead-1] == L'\r') { + wszBuf[dwRead-1] = L'\0'; + dwRead --; + } + + wchar_2_UTF8(buffer, wszBuf, len); + return buffer; + } + + /* win 9x and unicode conversion */ + if (ReadConsoleA (hIn, szBuf, 1024, &dwRead, NULL)) { + + /* null terminate at end */ + if (szBuf[dwRead-1] == L'\n') { + szBuf[dwRead-1] = L'\0'; + dwRead --; + } + + if (szBuf[dwRead-1] == L'\r') { + szBuf[dwRead-1] = L'\0'; + dwRead --; + } + + /* convert from ansii to wchar_t */ + p_MultiByteToWideChar(GetConsoleCP(), 0, szBuf, -1, wszBuf,1024); + /* convert from wchar_t to UTF-8 */ + if (wchar_2_UTF8(buffer, wszBuf, len)) + return buffer; + } + } + + /* fallback */ + if (fgets(buffer, len, stdin)) + return buffer; + else + return NULL; +} + +int +win32_unlink(const char *filename) +{ + int nRetCode; + if (p_wunlink) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, filename); + + nRetCode = _wunlink((LPCWSTR) pwszBuf); + + /* + * special case if file is readonly, + * we retry but unset attribute before + */ + if (nRetCode == -1 && errno == EACCES && p_SetFileAttributesW && p_GetFileAttributesW) { + DWORD dwAttr = p_GetFileAttributesW((LPCWSTR)pwszBuf); + if (dwAttr != INVALID_FILE_ATTRIBUTES) { + if (p_SetFileAttributesW((LPCWSTR)pwszBuf, dwAttr & ~FILE_ATTRIBUTE_READONLY)) { + nRetCode = _wunlink((LPCWSTR) pwszBuf); + /* reset to original if it didn't help */ + if (nRetCode == -1) + p_SetFileAttributesW((LPCWSTR)pwszBuf, dwAttr); + } + } + } + free_pool_memory(pwszBuf); + } else { + nRetCode = _unlink(filename); + + /* special case if file is readonly, + we retry but unset attribute before */ + if (nRetCode == -1 && errno == EACCES && p_SetFileAttributesA && p_GetFileAttributesA) { + DWORD dwAttr = p_GetFileAttributesA(filename); + if (dwAttr != INVALID_FILE_ATTRIBUTES) { + if (p_SetFileAttributesA(filename, dwAttr & ~FILE_ATTRIBUTE_READONLY)) { + nRetCode = _unlink(filename); + /* reset to original if it didn't help */ + if (nRetCode == -1) + p_SetFileAttributesA(filename, dwAttr); + } + } + } + } + return nRetCode; +} + + +#include "mswinver.h" + +char WIN_VERSION_LONG[64]; +char WIN_VERSION[32]; +char WIN_RAWVERSION[32]; + +class winver { +public: + winver(void); +}; + +static winver INIT; // cause constructor to be called before main() + + +winver::winver(void) +{ + const char *version = ""; + const char *platform = ""; + OSVERSIONINFO osvinfo; + osvinfo.dwOSVersionInfoSize = sizeof(osvinfo); + + // Get the current OS version + if (!GetVersionEx(&osvinfo)) { + version = "Unknown"; + platform = "Unknown"; + } + const int ver = _mkversion(osvinfo.dwPlatformId, + osvinfo.dwMajorVersion, + osvinfo.dwMinorVersion); + snprintf(WIN_RAWVERSION, sizeof(WIN_RAWVERSION), "Windows %#08x", ver); + switch (ver) + { + case MS_WINDOWS_95: (version = "Windows 95"); break; + case MS_WINDOWS_98: (version = "Windows 98"); break; + case MS_WINDOWS_ME: (version = "Windows ME"); break; + case MS_WINDOWS_NT4:(version = "Windows NT 4.0"); platform = "NT"; break; + case MS_WINDOWS_2K: (version = "Windows 2000");platform = "NT"; break; + case MS_WINDOWS_XP: (version = "Windows XP");platform = "NT"; break; + case MS_WINDOWS_S2003: (version = "Windows Server 2003");platform = "NT"; break; + default: version = WIN_RAWVERSION; break; + } + + bstrncpy(WIN_VERSION_LONG, version, sizeof(WIN_VERSION_LONG)); + snprintf(WIN_VERSION, sizeof(WIN_VERSION), "%s %lu.%lu.%lu", + platform, osvinfo.dwMajorVersion, osvinfo.dwMinorVersion, osvinfo.dwBuildNumber); + +#if 0 + HANDLE h = CreateFile("G:\\foobar", GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, 0, NULL); + CloseHandle(h); +#endif +#if 0 + BPIPE *b = open_bpipe("ls -l", 10, "r"); + char buf[1024]; + while (!feof(b->rfd)) { + fgets(buf, sizeof(buf), b->rfd); + } + close_bpipe(b); +#endif +} + +BOOL CreateChildProcess(VOID); +VOID WriteToPipe(VOID); +VOID ReadFromPipe(VOID); +VOID ErrorExit(LPCSTR); +VOID ErrMsg(LPTSTR, BOOL); + +/** + * Check for a quoted path, if an absolute path name is given and it contains + * spaces it will need to be quoted. i.e. "c:/Program Files/foo/bar.exe" + * CreateProcess() says the best way to ensure proper results with executables + * with spaces in path or filename is to quote the string. + */ +const char * +getArgv0(const char *cmdline) +{ + + int inquote = 0; + const char *cp; + for (cp = cmdline; *cp; cp++) + { + if (*cp == '"') { + inquote = !inquote; + } + if (!inquote && isspace(*cp)) + break; + } + + + int len = cp - cmdline; + char *rval = (char *)malloc(len+1); + + cp = cmdline; + char *rp = rval; + + while (len--) + *rp++ = *cp++; + + *rp = 0; + return rval; +} + +/* + * Extracts the executable or script name from the first string in + * cmdline. + * + * If the name contains blanks then it must be quoted with double quotes, + * otherwise quotes are optional. If the name contains blanks then it + * will be converted to a short name. + * + * The optional quotes will be removed. The result is copied to a malloc'ed + * buffer and returned through the pexe argument. The pargs parameter is set + * to the address of the character in cmdline located after the name. + * + * The malloc'ed buffer returned in *pexe must be freed by the caller. + */ +bool +GetApplicationName(const char *cmdline, char **pexe, const char **pargs) +{ + const char *pExeStart = NULL; /* Start of executable name in cmdline */ + const char *pExeEnd = NULL; /* Character after executable name (separator) */ + + const char *pBasename = NULL; /* Character after last path separator */ + const char *pExtension = NULL; /* Period at start of extension */ + + const char *current = cmdline; + + bool bQuoted = false; + + /* Skip initial whitespace */ + + while (*current == ' ' || *current == '\t') + { + current++; + } + + /* Calculate start of name and determine if quoted */ + + if (*current == '"') { + pExeStart = ++current; + bQuoted = true; + } else { + pExeStart = current; + bQuoted = false; + } + + *pargs = NULL; + *pexe = NULL; + + /* + * Scan command line looking for path separators (/ and \\) and the + * terminator, either a quote or a blank. The location of the + * extension is also noted. + */ + + for ( ; *current != '\0'; current++) + { + if (*current == '.') { + pExtension = current; + } else if (IsPathSeparator(*current) && current[1] != '\0') { + pBasename = ¤t[1]; + pExtension = NULL; + } + + /* Check for terminator, either quote or blank */ + if (bQuoted) { + if (*current != '"') { + continue; + } + } else { + if (*current != ' ') { + continue; + } + } + + /* + * Hit terminator, remember end of name (address of terminator) and + * start of arguments + */ + pExeEnd = current; + + if (bQuoted && *current == '"') { + *pargs = ¤t[1]; + } else { + *pargs = current; + } + + break; + } + + if (pBasename == NULL) { + pBasename = pExeStart; + } + + if (pExeEnd == NULL) { + pExeEnd = current; + } + + if (*pargs == NULL) + { + *pargs = current; + } + + bool bHasPathSeparators = pExeStart != pBasename; + + /* We have pointers to all the useful parts of the name */ + + /* Default extensions in the order cmd.exe uses to search */ + + static const char ExtensionList[][5] = { ".com", ".exe", ".bat", ".cmd" }; + DWORD dwBasePathLength = pExeEnd - pExeStart; + + DWORD dwAltNameLength = 0; + char *pPathname = (char *)alloca(MAX_PATHLENGTH + 1); + char *pAltPathname = (char *)alloca(MAX_PATHLENGTH + 1); + + pPathname[MAX_PATHLENGTH] = '\0'; + pAltPathname[MAX_PATHLENGTH] = '\0'; + + memcpy(pPathname, pExeStart, dwBasePathLength); + pPathname[dwBasePathLength] = '\0'; + + if (pExtension == NULL) { + /* Try appending extensions */ + for (int index = 0; index < (int)(sizeof(ExtensionList) / sizeof(ExtensionList[0])); index++) { + + if (!bHasPathSeparators) { + /* There are no path separators, search in the standard locations */ + dwAltNameLength = SearchPath(NULL, pPathname, ExtensionList[index], MAX_PATHLENGTH, pAltPathname, NULL); + if (dwAltNameLength > 0 && dwAltNameLength <= MAX_PATHLENGTH) { + memcpy(pPathname, pAltPathname, dwAltNameLength); + pPathname[dwAltNameLength] = '\0'; + break; + } + } else { + bstrncpy(&pPathname[dwBasePathLength], ExtensionList[index], MAX_PATHLENGTH - dwBasePathLength); + if (GetFileAttributes(pPathname) != INVALID_FILE_ATTRIBUTES) { + break; + } + pPathname[dwBasePathLength] = '\0'; + } + } + } else if (!bHasPathSeparators) { + /* There are no path separators, search in the standard locations */ + dwAltNameLength = SearchPath(NULL, pPathname, NULL, MAX_PATHLENGTH, pAltPathname, NULL); + if (dwAltNameLength > 0 && dwAltNameLength < MAX_PATHLENGTH) { + memcpy(pPathname, pAltPathname, dwAltNameLength); + pPathname[dwAltNameLength] = '\0'; + } + } + + if (strchr(pPathname, ' ') != NULL) { + dwAltNameLength = GetShortPathName(pPathname, pAltPathname, MAX_PATHLENGTH); + + if (dwAltNameLength > 0 && dwAltNameLength <= MAX_PATHLENGTH) { + *pexe = (char *)malloc(dwAltNameLength + 1); + if (*pexe == NULL) { + return false; + } + memcpy(*pexe, pAltPathname, dwAltNameLength + 1); + } + } + + if (*pexe == NULL) { + DWORD dwPathnameLength = strlen(pPathname); + *pexe = (char *)malloc(dwPathnameLength + 1); + if (*pexe == NULL) { + return false; + } + memcpy(*pexe, pPathname, dwPathnameLength + 1); + } + + return true; +} + +/** + * Create the process with WCHAR API + */ +static BOOL +CreateChildProcessW(const char *comspec, const char *cmdLine, + PROCESS_INFORMATION *hProcInfo, + HANDLE in, HANDLE out, HANDLE err) +{ + STARTUPINFOW siStartInfo; + BOOL bFuncRetn = FALSE; + + // Set up members of the STARTUPINFO structure. + ZeroMemory( &siStartInfo, sizeof(siStartInfo) ); + siStartInfo.cb = sizeof(siStartInfo); + // setup new process to use supplied handles for stdin,stdout,stderr + + siStartInfo.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW; + siStartInfo.wShowWindow = SW_SHOWMINNOACTIVE; + + siStartInfo.hStdInput = in; + siStartInfo.hStdOutput = out; + siStartInfo.hStdError = err; + + // Convert argument to WCHAR + POOLMEM *cmdLine_wchar = get_pool_memory(PM_FNAME); + POOLMEM *comspec_wchar = get_pool_memory(PM_FNAME); + + UTF8_2_wchar(&cmdLine_wchar, cmdLine); + UTF8_2_wchar(&comspec_wchar, comspec); + + // Create the child process. + Dmsg2(dbglvl, "Calling CreateProcess(%s, %s, ...)\n", comspec_wchar, cmdLine_wchar); + + // try to execute program + bFuncRetn = p_CreateProcessW((WCHAR*)comspec_wchar, + (WCHAR*)cmdLine_wchar,// command line + NULL, // process security attributes + NULL, // primary thread security attributes + TRUE, // handles are inherited + 0, // creation flags + NULL, // use parent's environment + NULL, // use parent's current directory + &siStartInfo, // STARTUPINFO pointer + hProcInfo); // receives PROCESS_INFORMATION + free_pool_memory(cmdLine_wchar); + free_pool_memory(comspec_wchar); + + return bFuncRetn; +} + + +/** + * Create the process with ANSI API + */ +static BOOL +CreateChildProcessA(const char *comspec, char *cmdLine, + PROCESS_INFORMATION *hProcInfo, + HANDLE in, HANDLE out, HANDLE err) +{ + STARTUPINFOA siStartInfo; + BOOL bFuncRetn = FALSE; + + // Set up members of the STARTUPINFO structure. + ZeroMemory( &siStartInfo, sizeof(siStartInfo) ); + siStartInfo.cb = sizeof(siStartInfo); + // setup new process to use supplied handles for stdin,stdout,stderr + siStartInfo.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW; + siStartInfo.wShowWindow = SW_SHOWMINNOACTIVE; + + siStartInfo.hStdInput = in; + siStartInfo.hStdOutput = out; + siStartInfo.hStdError = err; + + // Create the child process. + Dmsg2(dbglvl, "Calling CreateProcess(%s, %s, ...)\n", comspec, cmdLine); + + // try to execute program + bFuncRetn = p_CreateProcessA(comspec, + cmdLine, // command line + NULL, // process security attributes + NULL, // primary thread security attributes + TRUE, // handles are inherited + 0, // creation flags + NULL, // use parent's environment + NULL, // use parent's current directory + &siStartInfo,// STARTUPINFO pointer + hProcInfo);// receives PROCESS_INFORMATION + return bFuncRetn; +} + +/** + * OK, so it would seem CreateProcess only handles true executables: + * .com or .exe files. So grab $COMSPEC value and pass command line to it. + */ +HANDLE +CreateChildProcess(const char *cmdline, HANDLE in, HANDLE out, HANDLE err) +{ + static const char *comspec = NULL; + PROCESS_INFORMATION piProcInfo; + BOOL bFuncRetn = FALSE; + + if (!p_CreateProcessA || !p_CreateProcessW) + return INVALID_HANDLE_VALUE; + + if (comspec == NULL) + comspec = getenv("COMSPEC"); + if (comspec == NULL) // should never happen + return INVALID_HANDLE_VALUE; + + // Set up members of the PROCESS_INFORMATION structure. + ZeroMemory( &piProcInfo, sizeof(PROCESS_INFORMATION) ); + + // if supplied handles are not used the send a copy of our STD_HANDLE + // as appropriate + if (in == INVALID_HANDLE_VALUE) + in = GetStdHandle(STD_INPUT_HANDLE); + + if (out == INVALID_HANDLE_VALUE) + out = GetStdHandle(STD_OUTPUT_HANDLE); + + if (err == INVALID_HANDLE_VALUE) + err = GetStdHandle(STD_ERROR_HANDLE); + + char *exeFile; + const char *argStart; + + if (!GetApplicationName(cmdline, &exeFile, &argStart)) { + return INVALID_HANDLE_VALUE; + } + + POOL_MEM cmdLine(PM_FNAME); + Mmsg(cmdLine, "%s /c %s%s", comspec, exeFile, argStart); + + free(exeFile); + + // New function disabled + if (p_CreateProcessW && p_MultiByteToWideChar) { + bFuncRetn = CreateChildProcessW(comspec, cmdLine.c_str(), &piProcInfo, + in, out, err); + } else { + bFuncRetn = CreateChildProcessA(comspec, cmdLine.c_str(), &piProcInfo, + in, out, err); + } + + if (bFuncRetn == 0) { + ErrorExit("CreateProcess failed\n"); + Dmsg2(dbglvl, " CreateProcess(%s, %s) failed\n",comspec,cmdLine.c_str()); + return INVALID_HANDLE_VALUE; + } + // we don't need a handle on the process primary thread so we close + // this now. + CloseHandle(piProcInfo.hThread); + return piProcInfo.hProcess; +} + +void +ErrorExit (LPCSTR lpszMessage) +{ + const char *err = errorString(); + Dmsg2(dbglvl, "%s: %s", lpszMessage, err); + LocalFree((void *)err); + errno = b_errno_win32; +} + + +/* +typedef struct s_bpipe { + pid_t worker_pid; + time_t worker_stime; + int wait; + btimer_t *timer_id; + FILE *rfd; + FILE *wfd; +} BPIPE; +*/ + +static void +CloseHandleIfValid(HANDLE handle) +{ + if (handle != INVALID_HANDLE_VALUE) { + CloseHandle(handle); + } +} + +BPIPE * +open_bpipe(char *prog, int wait, const char *mode, char *envp[]) +{ + HANDLE hChildStdinRd, hChildStdinWr, hChildStdinWrDup, + hChildStdoutRd, hChildStdoutWr, hChildStdoutRdDup, + hInputFile; + + SECURITY_ATTRIBUTES saAttr; + + BOOL fSuccess; + + hChildStdinRd = hChildStdinWr = hChildStdinWrDup = + hChildStdoutRd = hChildStdoutWr = hChildStdoutRdDup = + hInputFile = INVALID_HANDLE_VALUE; + + BPIPE *bpipe = (BPIPE *)malloc(sizeof(BPIPE)); + memset((void *)bpipe, 0, sizeof(BPIPE)); + + int mode_read = (mode[0] == 'r'); + int mode_write = (mode[0] == 'w' || mode[1] == 'w'); + + + // Set the bInheritHandle flag so pipe handles are inherited. + + saAttr.nLength = sizeof(SECURITY_ATTRIBUTES); + saAttr.bInheritHandle = TRUE; + saAttr.lpSecurityDescriptor = NULL; + + if (mode_read) { + + // Create a pipe for the child process's STDOUT. + if (! CreatePipe(&hChildStdoutRd, &hChildStdoutWr, &saAttr, 0)) { + ErrorExit("Stdout pipe creation failed\n"); + goto cleanup; + } + // Create noninheritable read handle and close the inheritable read + // handle. + + fSuccess = DuplicateHandle(GetCurrentProcess(), hChildStdoutRd, + GetCurrentProcess(), &hChildStdoutRdDup , 0, + FALSE, + DUPLICATE_SAME_ACCESS); + if ( !fSuccess ) { + ErrorExit("DuplicateHandle failed"); + goto cleanup; + } + + CloseHandle(hChildStdoutRd); + hChildStdoutRd = INVALID_HANDLE_VALUE; + } + + if (mode_write) { + + // Create a pipe for the child process's STDIN. + + if (!CreatePipe(&hChildStdinRd, &hChildStdinWr, &saAttr, 0)) { + ErrorExit("Stdin pipe creation failed\n"); + goto cleanup; + } + + // Duplicate the write handle to the pipe so it is not inherited. + fSuccess = DuplicateHandle(GetCurrentProcess(), hChildStdinWr, + GetCurrentProcess(), &hChildStdinWrDup, + 0, + FALSE, // not inherited + DUPLICATE_SAME_ACCESS); + if (!fSuccess) { + ErrorExit("DuplicateHandle failed"); + goto cleanup; + } + + CloseHandle(hChildStdinWr); + hChildStdinWr = INVALID_HANDLE_VALUE; + } + // spawn program with redirected handles as appropriate + bpipe->worker_pid = (pid_t) + CreateChildProcess(prog, // commandline + hChildStdinRd, // stdin HANDLE + hChildStdoutWr, // stdout HANDLE + hChildStdoutWr); // stderr HANDLE + + if ((HANDLE) bpipe->worker_pid == INVALID_HANDLE_VALUE) { + ErrorExit("CreateChildProcess failed"); + goto cleanup; + } + + bpipe->wait = wait; + bpipe->worker_stime = time(NULL); + + if (mode_read) { + CloseHandle(hChildStdoutWr); // close our write side so when + // process terminates we can + // detect eof. + // ugly but convert WIN32 HANDLE to FILE* + int rfd = _open_osfhandle((intptr_t)hChildStdoutRdDup, O_RDONLY | O_BINARY); + if (rfd >= 0) { + bpipe->rfd = _fdopen(rfd, "rb"); + } + } + if (mode_write) { + CloseHandle(hChildStdinRd); // close our read side so as not + // to interfre with child's copy + // ugly but convert WIN32 HANDLE to FILE* + int wfd = _open_osfhandle((intptr_t)hChildStdinWrDup, O_WRONLY | O_BINARY); + if (wfd >= 0) { + bpipe->wfd = _fdopen(wfd, "wb"); + } + } + + if (wait > 0) { + bpipe->timer_id = start_child_timer(NULL, bpipe->worker_pid, wait); + } + + return bpipe; + +cleanup: + + CloseHandleIfValid(hChildStdoutWr); + CloseHandleIfValid(hChildStdoutRd); + CloseHandleIfValid(hChildStdoutRdDup); + CloseHandleIfValid(hChildStdinWr); + CloseHandleIfValid(hChildStdinRd); + CloseHandleIfValid(hChildStdinWrDup); + + free((void *)bpipe); + errno = b_errno_win32; /* do GetLastError() for error code */ + return NULL; +} + + +int +kill(pid_t pid, int signal) +{ + int rval = 0; + if (!TerminateProcess((HANDLE)pid, (UINT)signal)) { + rval = -1; + errno = b_errno_win32; + } + CloseHandle((HANDLE)pid); + return rval; +} + + +int +close_bpipe(BPIPE *bpipe) +{ + int rval = 0; + int32_t remaining_wait = bpipe->wait; + + /* Close pipes */ + if (bpipe->rfd) { + fclose(bpipe->rfd); + bpipe->rfd = NULL; + } + if (bpipe->wfd) { + fclose(bpipe->wfd); + bpipe->wfd = NULL; + } + + if (remaining_wait == 0) { /* wait indefinitely */ + remaining_wait = INT32_MAX; + } + for ( ;; ) { + DWORD exitCode; + if (!GetExitCodeProcess((HANDLE)bpipe->worker_pid, &exitCode)) { + const char *err = errorString(); + rval = b_errno_win32; + Dmsg1(dbglvl, "GetExitCode error %s\n", err); + LocalFree((void *)err); + break; + } + if (exitCode == STILL_ACTIVE) { + if (remaining_wait <= 0) { + rval = ETIME; /* timed out */ + break; + } + bmicrosleep(1, 0); /* wait one second */ + remaining_wait--; + } else if (exitCode != 0) { + /* Truncate exit code as it doesn't seem to be correct */ + rval = (exitCode & 0xFF) | b_errno_exit; + break; + } else { + break; /* Shouldn't get here */ + } + } + + if (bpipe->timer_id) { + stop_child_timer(bpipe->timer_id); + } + if (bpipe->rfd) fclose(bpipe->rfd); + if (bpipe->wfd) fclose(bpipe->wfd); + free((void *)bpipe); + return rval; +} + +int +close_wpipe(BPIPE *bpipe) +{ + int result = 1; + + if (bpipe->wfd) { + fflush(bpipe->wfd); + if (fclose(bpipe->wfd) != 0) { + result = 0; + } + bpipe->wfd = NULL; + } + return result; +} + +#ifndef MINGW64 +int +utime(const char *fname, struct utimbuf *times) +{ + FILETIME acc, mod; + char tmpbuf[5000]; + + conv_unix_to_vss_win32_path(fname, tmpbuf, 5000); + + cvt_utime_to_ftime(times->actime, acc); + cvt_utime_to_ftime(times->modtime, mod); + + HANDLE h = INVALID_HANDLE_VALUE; + + if (p_CreateFileW) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, tmpbuf); + + h = p_CreateFileW((LPCWSTR)pwszBuf, + FILE_WRITE_ATTRIBUTES, + FILE_SHARE_WRITE|FILE_SHARE_READ|FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, // required for directories + NULL); + + free_pool_memory(pwszBuf); + } else if (p_CreateFileA) { + h = p_CreateFileA(tmpbuf, + FILE_WRITE_ATTRIBUTES, + FILE_SHARE_WRITE|FILE_SHARE_READ|FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, // required for directories + NULL); + } + + if (h == INVALID_HANDLE_VALUE) { + const char *err = errorString(); + Dmsg2(dbglvl, "Cannot open file \"%s\" for utime(): ERR=%s", tmpbuf, err); + LocalFree((void *)err); + errno = b_errno_win32; + return -1; + } + + int rval = SetFileTime(h, NULL, &acc, &mod) ? 0 : -1; + CloseHandle(h); + if (rval == -1) { + errno = b_errno_win32; + } + return rval; +} +#endif + +#if 0 +int +file_open(const char *file, int flags, int mode) +{ + DWORD access = 0; + DWORD shareMode = 0; + DWORD create = 0; + DWORD msflags = 0; + HANDLE foo = INVALID_HANDLE_VALUE; + const char *remap = file; + + if (flags & O_WRONLY) access = GENERIC_WRITE; + else if (flags & O_RDWR) access = GENERIC_READ|GENERIC_WRITE; + else access = GENERIC_READ; + + if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) + create = CREATE_NEW; + else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) + create = CREATE_ALWAYS; + else if (flags & O_CREAT) + create = OPEN_ALWAYS; + else if (flags & O_TRUNC) + create = TRUNCATE_EXISTING; + else + create = OPEN_EXISTING; + + shareMode = 0; + + if (flags & O_APPEND) { + printf("open...APPEND not implemented yet."); + exit(-1); + } + + if (p_CreateFileW) { + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + make_win32_path_UTF8_2_wchar(&pwszBuf, file); + + foo = p_CreateFileW((LPCWSTR) pwszBuf, access, shareMode, NULL, create, msflags, NULL); + free_pool_memory(pwszBuf); + } else if (p_CreateFileA) + foo = CreateFile(file, access, shareMode, NULL, create, msflags, NULL); + + if (INVALID_HANDLE_VALUE == foo) { + errno = b_errno_win32; + return (int)-1; + } + return (int)foo; + +} + + +int +file_close(int fd) +{ + if (!CloseHandle((HANDLE)fd)) { + errno = b_errno_win32; + return -1; + } + + return 0; +} + +ssize_t +file_write(int fd, const void *data, ssize_t len) +{ + BOOL status; + DWORD bwrite; + status = WriteFile((HANDLE)fd, data, len, &bwrite, NULL); + if (status) return bwrite; + errno = b_errno_win32; + return -1; +} + + +ssize_t +file_read(int fd, void *data, ssize_t len) +{ + BOOL status; + DWORD bread; + + status = ReadFile((HANDLE)fd, data, len, &bread, NULL); + if (status) return bread; + errno = b_errno_win32; + return -1; +} + +boffset_t +file_seek(int fd, boffset_t offset, int whence) +{ + DWORD method = 0; + DWORD val; + LONG offset_low = (LONG)offset; + LONG offset_high = (LONG)(offset >> 32); + + switch (whence) { + case SEEK_SET : + method = FILE_BEGIN; + break; + case SEEK_CUR: + method = FILE_CURRENT; + break; + case SEEK_END: + method = FILE_END; + break; + default: + errno = EINVAL; + return -1; + } + + + if ((val=SetFilePointer((HANDLE)fd, offset_low, &offset_high, method)) == INVALID_SET_FILE_POINTER) { + errno = b_errno_win32; + return -1; + } + /* ***FIXME*** I doubt this works right */ + return val; +} + +int +file_dup2(int, int) +{ + errno = ENOSYS; + return -1; +} +#endif + +#ifdef xxx +/* + * Emulation of mmap and unmmap for tokyo dbm + */ +void *mmap(void *start, size_t length, int prot, int flags, + int fd, off_t offset) +{ + DWORD fm_access = 0; + DWORD mv_access = 0; + HANDLE h; + HANDLE mv; + + if (length == 0) { + return MAP_FAILED; + } + if (!fd) { + return MAP_FAILED; + } + + if (flags & PROT_WRITE) { + fm_access |= PAGE_READWRITE; + } else if (flags & PROT_READ) { + fm_access |= PAGE_READONLY; + } + + if (flags & PROT_READ) { + mv_access |= FILE_MAP_READ; + } + if (flags & PROT_WRITE) { + mv_access |= FILE_MAP_WRITE; + } + + h = CreateFileMapping((HANDLE)_get_osfhandle (fd), + NULL /* security */, + fm_access, + 0 /* MaximumSizeHigh */, + 0 /* MaximumSizeLow */, + NULL /* name of the file mapping object */); + + if (!h || h == INVALID_HANDLE_VALUE) { + return MAP_FAILED; + } + + mv = MapViewOfFile(h, mv_access, + 0 /* offset hi */, + 0 /* offset lo */, + length); + CloseHandle(h); + + if (!mv || mv == INVALID_HANDLE_VALUE) { + return MAP_FAILED; + } + + return (void *) mv; +} + +int munmap(void *start, size_t length) +{ + if (!start) { + return -1; + } + UnmapViewOfFile(start); + return 0; +} +#endif + +#ifdef HAVE_MINGW +/* syslog function, added by Nicolas Boichat */ +void openlog(const char *ident, int option, int facility) {} +#endif //HAVE_MINGW + +/* Log an error message */ +void LogErrorMsg(const char *message) +{ + HANDLE eventHandler; + const char *strings[2]; + + /* Use the OS event logging to log the error */ + eventHandler = RegisterEventSource(NULL, "Bacula"); + + strings[0] = _("\n\nBacula ERROR: "); + strings[1] = message; + + if (eventHandler) { + ReportEvent(eventHandler, EVENTLOG_ERROR_TYPE, + 0, /* category */ + 0, /* ID */ + NULL, /* SID */ + 2, /* Number of strings */ + 0, /* raw data size */ + (const char **)strings, /* error strings */ + NULL); /* raw data */ + DeregisterEventSource(eventHandler); + } +} + +/* + * Don't allow OS to suspend while backup running + * Note, the OS automatically tracks these for each thread + */ +void prevent_os_suspensions() +{ + SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED); +} + +void allow_os_suspensions() +{ + SetThreadExecutionState(ES_CONTINUOUS); +} + +int mkstemp(char *t) +{ + char *filename = mktemp(t); + if (filename == NULL) { + return -1; + } + return open(filename, O_RDWR | O_CREAT, 0600); +} + +void malloc_trim(int) +{ + if (p_EmptyWorkingSet) { + HANDLE hProcess = GetCurrentProcess(); + if (!p_EmptyWorkingSet(hProcess)) { + const char *err = errorString(); + Dmsg1(dbglvl, "EmptyWorkingSet() = %s\n", err); + LocalFree((void *)err); + } + CloseHandle( hProcess ); + } +} + +uint64_t get_memory_info(char *buf, int buflen) +{ + char ed1[50], ed2[50], ed3[50], ed4[50]; + uint64_t ret=0; + HANDLE hProcess = GetCurrentProcess(); + PROCESS_MEMORY_COUNTERS pmc; + buf[0] = '\0'; + + if (p_GetProcessMemoryInfo) { + if (p_GetProcessMemoryInfo( hProcess, &pmc, sizeof(pmc))) { + bsnprintf(buf, buflen, + "WorkingSetSize: %s QuotaPagedPoolUsage: %s QuotaNonPagedPoolUsage: %s PagefileUsage: %s", + edit_uint64_with_commas(pmc.WorkingSetSize, ed1), + edit_uint64_with_commas(pmc.QuotaPagedPoolUsage, ed2), + edit_uint64_with_commas(pmc.QuotaNonPagedPoolUsage, ed3), + edit_uint64_with_commas(pmc.PagefileUsage, ed4)); + ret = pmc.WorkingSetSize; + + } else { + const char *err = errorString(); + bsnprintf(buf, buflen, "%s", err); + LocalFree((void *)err); + } + } + + CloseHandle( hProcess ); + return ret; +} diff --git a/src/win32/compat/compat.h b/src/win32/compat/compat.h new file mode 100644 index 00000000..aefd30e8 --- /dev/null +++ b/src/win32/compat/compat.h @@ -0,0 +1,469 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* -*- Mode: C -*- + * compat.h -- + */ +// Copyright transferred from Raider Solutions, Inc to +// Kern Sibbald and John Walker by express permission. +// +/* + * Author : Christopher S. Hull + * Created On : Fri Jan 30 13:00:51 2004 + */ + +#include +#include + +#if !defined(__COMPAT_H_) +#define __COMPAT_H_ +#if !defined(_STAT_H) +#define _STAT_H /* don't pull in MinGW stat.h */ +#endif + +#ifndef _STAT_DEFINED +#define _STAT_DEFINED 1 /* don't pull in MinGW struct stat from wchar.h */ +#endif + +#if defined(_MSC_VER) && (_MSC_VER >= 1400) // VC8+ +#pragma warning(disable : 4996) // Either disable all deprecation warnings, +// #define _CRT_SECURE_NO_DEPRECATE // Or just turn off warnings about the newly deprecated CRT functions. +#elif !defined(HAVE_MINGW) && !defined(HAVE_WXCONSOLE) +#define __STDC__ 1 +#endif + +#include + +#ifdef HAVE_MINGW_W64 +/* Was defined in pthread.h before */ +# define localtime_r( _clock, _result ) \ + ( *(_result) = *localtime( (_clock) ), \ + (_result) ) + +# define strtok_r( _s, _sep, _lasts ) \ + ( *(_lasts) = strtok( (_s), (_sep) ) ) + +# define asctime_r( _tm, _buf ) \ + ( strcpy( (_buf), asctime( (_tm) ) ), \ + (_buf) ) + +# define ctime_r( _clock, _buf ) \ + ( strcpy( (_buf), ctime( (_clock) ) ), \ + (_buf) ) + +# define gmtime_r( _clock, _result ) \ + ( *(_result) = *gmtime( (_clock) ), \ + (_result) ) + +#define rand_r( _seed ) \ + ( _seed == _seed? rand() : rand() ) + +#endif +#ifdef MINGW64 +#include +#include +#define _declspec __declspec +#endif + +#include + +#ifdef _WIN64 +# define GWL_USERDATA GWLP_USERDATA +#endif + +#ifndef INT64 +#define INT64 long long int +#endif + +typedef UINT64 u_int64_t; +typedef UINT64 uint64_t; +typedef INT64 int64_t; +typedef UINT32 uint32_t; +typedef INT64 intmax_t; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef signed short int16_t; +typedef signed char int8_t; +typedef int __daddr_t; + +#if !defined(HAVE_MINGW) +typedef long int32_t; +typedef float float32_t; +typedef double float64_t; +#endif + +#if !defined(_MSC_VER) || (_MSC_VER < 1400) // VC8+ +#ifndef _TIME_T_DEFINED +#define _TIME_T_DEFINED +typedef long time_t; +#endif +#endif + +#if __STDC__ && !defined(HAVE_MINGW) +typedef _dev_t dev_t; +#if !defined(HAVE_WXCONSOLE) +typedef __int64 ino_t; +#endif +#endif + +typedef UINT32 u_int32_t; +typedef unsigned char u_int8_t; +typedef unsigned short u_int16_t; + +#if !defined(HAVE_MINGW) +#undef uint32_t +#endif + +void sleep(int); + +typedef UINT32 key_t; + +#if defined(HAVE_MINGW) +#if !defined(uid_t) +typedef UINT32 uid_t; +typedef UINT32 gid_t; +#endif +#else +typedef UINT32 uid_t; +typedef UINT32 gid_t; +typedef UINT32 mode_t; +typedef INT32 ssize_t; +typedef UINT32 size_t; +#define HAVE_SSIZE_T 1 + +#endif /* HAVE_MINGW */ + +struct dirent { + uint64_t d_ino; + uint32_t d_off; + uint16_t d_reclen; + char d_name[256]; +}; +typedef void DIR; + + +#if !defined(__cplusplus) +#if !defined(true) +#define true 1 +#endif +#if !defined(false) +#define false 0 +#endif +#endif + +#ifndef _TIMEZONE_DEFINED /* also in sys/time.h */ +#define _TIMEZONE_DEFINED +struct timezone { + int foo; +}; +#endif + +int strcasecmp(const char*, const char *); +int gettimeofday(struct timeval *, struct timezone *); + +#if !defined(EETXTBUSY) +#define EETXTBUSY 26 +#endif + +#if !defined(ETIMEDOUT) +#define ETIMEDOUT 55 +#endif + +#if !defined(ENOMEDIUM) +#define ENOMEDIUM 123 +#endif + +#if !defined(ENODATA) +#define ENODATA 61 +#endif + +/* + * Stat packet that we use for Windows + */ +struct stat +{ + _dev_t st_dev; + uint64_t st_ino; + uint16_t st_mode; + int16_t st_nlink; + uint32_t st_uid; + uint32_t st_gid; + _dev_t st_rdev; + uint64_t st_size; + time_t st_atime; + time_t st_mtime; + time_t st_ctime; + uint32_t st_blksize; + uint64_t st_blocks; + uint32_t st_fattrs; /* Windows file attributes */ +}; + +#ifndef SOCK_CLOEXEC +#define SOCK_CLOEXEC 0x00000000 +#endif + +#undef S_IFMT +#define S_IFMT 0170000 /* file type mask */ +#undef S_IFDIR +#define S_IFDIR 0040000 /* directory */ +#define S_IFCHR 0020000 /* character special */ +#define S_IFBLK 0060000 /* block special */ +#define S_IFIFO 0010000 /* pipe */ +#undef S_IFREG +#define S_IFREG 0100000 /* regular */ +#define S_IREAD 0000400 /* read permission, owner */ +#define S_IWRITE 0000200 /* write permission, owner */ +#define S_IEXEC 0000100 /* execute/search permission, owner */ + +#define S_IRUSR S_IREAD +#define S_IWUSR S_IWRITE +#define S_IXUSR S_IEXEC +#define S_ISREG(x) (((x) & S_IFMT) == S_IFREG) +#define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR) +#define S_ISCHR(x) 0 +#define S_ISBLK(x) (((x) & S_IFMT) == S_IFBLK) +#define S_ISFIFO(x) 0 + +#define S_IRGRP 000040 +#define S_IWGRP 000020 +#define S_IXGRP 000010 + +#define S_IROTH 00004 +#define S_IWOTH 00002 +#define S_IXOTH 00001 + +#define S_IRWXO 000007 +#define S_IRWXG 000070 +#define S_ISUID 004000 +#define S_ISGID 002000 +#define S_ISVTX 001000 +#define S_ISSOCK(x) 0 +#define S_ISLNK(x) 0 + +#if __STDC__ +#define O_RDONLY _O_RDONLY +#define O_WRONLY _O_WRONLY +#define O_RDWR _O_RDWR +#define O_CREAT _O_CREAT +#define O_TRUNC _O_TRUNC + +#define isascii __isascii +#define toascii __toascii +#define iscsymf __iscsymf +#define iscsym __iscsym +#endif + +typedef BOOL (*t_pVSSPathConvert)(const char *szFilePath, char *szShadowPath, int nBuflen); +typedef BOOL (*t_pVSSPathConvertW)(const wchar_t *szFilePath, wchar_t *szShadowPath, int nBuflen); +/* To know if we can use the VSSPathConvert function */ +typedef BOOL (*t_pVSSPathConverter)(); + +void SetVSSPathConvert(t_pVSSPathConverter pPathConverter, t_pVSSPathConvert pPathConvert, t_pVSSPathConvertW pPathConvertW); + +int lchown(const char *, uid_t uid, gid_t gid); +int chown(const char *, uid_t uid, gid_t gid); +#if !defined(HAVE_MINGW) +int chmod(const char *, mode_t mode); +#endif +#define O_NONBLOCK 04000 +#define F_GETFL 3 +#define F_SETFL 4 + +#ifndef MINGW64 +#define open _open +#endif + +int fcntl(int fd, int cmd, long arg); +int fstat(intptr_t fd, struct stat *sb); + +int inet_aton(const char *cp, struct in_addr *inp); +int binet_pton(int af, const char *src, void *dst); +int kill(pid_t pid, int signo); +int pipe(int []); +int fork(); +int waitpid(int, int *, int); + +#if !defined(HAVE_MINGW) +#define strncasecmp strnicmp +//int strncasecmp(const char*, const char *, int); +int utime(const char *filename, struct utimbuf *buf); +#define vsnprintf _vsnprintf +#define snprintf _snprintf +#endif //HAVE_MINGW + + +#define WNOHANG 0 +#define WIFEXITED(x) 0 +#define WEXITSTATUS(x) x +#define WIFSIGNALED(x) 0 +#define WTERMSIG(x) x +#define SIGKILL 9 +#define SIGUSR2 9999 + +#define HAVE_OLD_SOCKOPT + +struct timespec; +int readdir(unsigned int fd, struct dirent *dirp, unsigned int count); +int nanosleep(const struct timespec*, struct timespec *); +long int random(void); +void srandom(unsigned int seed); +int lstat(const char *, struct stat *); +int stat(const char *file, struct stat *sb); +long pathconf(const char *, int); +int readlink(const char *, char *, int); +int symlink(const char *path1, const char *path2); +#define _PC_PATH_MAX 1 +#define _PC_NAME_MAX 2 + +int geteuid(); + +DIR *opendir(const char *name); +int closedir(DIR *dir); + +struct passwd { + char *foo; +}; + +struct group { + char *foo; +}; + +struct passwd *getpwuid(uid_t); +struct group *getgrgid(uid_t); + +#ifdef xxx_needed +struct sigaction { + int sa_flags; + void (*sa_handler)(int); +}; +#define sigfillset(x) +#define sigaction(a, b, c) +#endif + +#define mkdir(p, m) win32_mkdir(p) +#define unlink win32_unlink +#define chdir win32_chdir +#define chmod win32_chmod +extern void syslog(int type, const char *fmt, ...); +#if !defined(LOG_DAEMON) +#define LOG_DAEMON 0 +#endif + +#if !defined(HAVE_MINGW) +#define R_OK 04 +#define W_OK 02 +int stat(const char *, struct stat *); +#if defined(__cplusplus) +#define access _access +extern "C" _CRTIMP int __cdecl _access(const char *, int); +int execvp(const char *, char *[]); +extern "C" void * __cdecl _alloca(size_t); +#endif +#endif //HAVE_MINGW + +#define getpid _getpid + +#define getppid() 0 +#define gethostid() 0 +#define getuid() 0 +#define getgid() 0 + +#define getcwd win32_getcwd +#define chdir win32_chdir +#define chmod win32_chmod +#define fputs win32_fputs +char *win32_getcwd(char *buf, int maxlen); +int win32_chdir(const char *buf); +int win32_mkdir(const char *buf); +int win32_fputs(const char *string, FILE *stream); +int win32_unlink(const char *filename); +int win32_chmod(const char *, mode_t); + + +char* win32_cgets (char* buffer, int len); + +int WSA_Init(void); +void Win32ConvCleanupCache(); + +#if defined(HAVE_MINGW) +void closelog(); +void openlog(const char *ident, int option, int facility); +#endif //HAVE_MINGW + +typedef DWORD EXECUTION_STATE; +#ifndef ES_CONTINUOUS +#define ES_CONTINUOUS 0x80000000 +#endif +#ifndef ES_SYSTEM_REQUIRED +#define ES_SYSTEM_REQUIRED 0x00000001 +#endif +#ifndef ES_DISPLAY_REQUIRED +#define ES_DISPLAY_REQUIRED 0x00000002 +#endif +#ifndef ES_USER_PRESENT +# define ES_USER_PRESENT 0x00000004 +#endif + +WINBASEAPI EXECUTION_STATE WINAPI SetThreadExecutionState(EXECUTION_STATE esFlags); + + +extern void LogErrorMsg(const char *message); + +#if !defined(INVALID_FILE_ATTRIBUTES) +#define INVALID_FILE_ATTRIBUTES ((DWORD)-1) +#endif + +/* + * Since the FileAttributes flag in Windows does not hold the + * dedup bit, we simplify the code by defining our own + * bit. + */ +#define FILE_ATTRIBUTE_DEDUP 0x80000000 +#define CREATE_FOR_EXPORT 0 /* default in OpenEncryptedFileRaw */ + +#if defined(_MSC_VER) +inline unsigned long ffs(unsigned long word) +{ + unsigned long index; + + if (_BitScanForward(&index, word) != 0) + return index + 1; + else + return 0; +} + +#else +#define ffs __builtin_ffs +#endif + + +int win32_ftruncate(int fd, int64_t length); +int mkstemp(char *t); + +void malloc_trim(int); +uint64_t get_memory_info(char *buf, int buflen); + +#undef ftruncate +#define ftruncate win32_ftruncate + +#ifndef O_DIRECTORY +#define O_DIRECTORY 0100000000 +#endif +#define O_ENCRYPTED 0200000000 + +#endif /* __COMPAT_H_ */ diff --git a/src/win32/compat/dirent.h b/src/win32/compat/dirent.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/dlfcn.h b/src/win32/compat/dlfcn.h new file mode 100644 index 00000000..8f6777ea --- /dev/null +++ b/src/win32/compat/dlfcn.h @@ -0,0 +1,33 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by Kern Sibbald, February 2008 + */ + +#ifndef __DLFCN_H_ +#define __DLFCN_H_ + +#define RTDL_NOW 2 + +void *dlopen(const char *file, int mode); +void *dlsym(void *handle, const char *name); +int dlclose(void *handle); +char *dlerror(void); + +#endif /* __DLFCN_H_ */ diff --git a/src/win32/compat/getopt.c b/src/win32/compat/getopt.c new file mode 100644 index 00000000..ea293b12 --- /dev/null +++ b/src/win32/compat/getopt.c @@ -0,0 +1,187 @@ +/***************************************************************************** + * + * MODULE NAME : GETOPT.C + * + * COPYRIGHTS: + * This module contains code made available by IBM + * Corporation on an AS IS basis. Any one receiving the + * module is considered to be licensed under IBM copyrights + * to use the IBM-provided source code in any way he or she + * deems fit, including copying it, compiling it, modifying + * it, and redistributing it, with or without + * modifications. No license under any IBM patents or + * patent applications is to be implied from this copyright + * license. + * + * A user of the module should understand that IBM cannot + * provide technical support for the module and will not be + * responsible for any consequences of use of the program. + * + * Any notices, including this one, are not to be removed + * from the module without the prior written consent of + * IBM. + * + * AUTHOR: Original author: + * G. R. Blair (BOBBLAIR at AUSVM1) + * Internet: bobblair@bobblair.austin.ibm.com + * + * Extensively revised by: + * John Q. Walker II, Ph.D. (JOHHQ at RALVM6) + * Internet: johnq@ralvm6.vnet.ibm.com + * + * Tweaked by Kern Sibbald for use in Bacula September 2007 + * + *****************************************************************************/ + +/****************************************************************************** + * getopt() + * + * The getopt() function is a command line parser. It returns the next + * option character in argv that matches an option character in opstring. + * + * The argv argument points to an array of argc+1 elements containing argc + * pointers to character strings followed by a null pointer. + * + * The opstring argument points to a string of option characters; if an + * option character is followed by a colon, the option is expected to have + * an argument that may or may not be separated from it by white space. + * The external variable optarg is set to point to the start of the option + * argument on return from getopt(). + * + * The getopt() function places in optind the argv index of the next argument + * to be processed. The system initializes the external variable optind to + * 1 before the first call to getopt(). + * + * When all options have been processed (that is, up to the first nonoption + * argument), getopt() returns EOF. The special option "--" may be used to + * delimit the end of the options; EOF will be returned, and "--" will be + * skipped. + * + * The getopt() function returns a question mark (?) when it encounters an + * option character not included in opstring. This error message can be + * disabled by setting opterr to zero. Otherwise, it returns the option + * character that was detected. + * + * If the special option "--" is detected, or all options have been + * processed, EOF is returned. + * + * Options are marked by either a minus sign (-) or a slash (/) if + * GETOPT_USE_SLASH is defined. + * + * No errors are defined. + *****************************************************************************/ + +#include /* for EOF */ +#include /* for strchr() */ +#include "getopt.h" + + +/* static (global) variables that are specified as exported by getopt() */ +char *optarg = NULL; /* pointer to the start of the option argument */ +int optind = 1; /* number of the next argv[] to be evaluated */ +int opterr = 1; /* non-zero if a question mark should be returned + when a non-valid option character is detected */ +int optopt = '?'; /* Not used */ + +/* handle possible future character set concerns by putting this in a macro */ +#define _next_char(string) (char)(*(string+1)) + +int getopt(int argc, char *const argv[], const char *opstring) +{ + static char *pIndexPosition = NULL; /* place inside current argv string */ + char *pArgString = NULL; /* where to start from next */ + char *pOptString; /* the string in our program */ + + + if (pIndexPosition != NULL) { + /* we last left off inside an argv string */ + if (*(++pIndexPosition)) { + /* there is more to come in the most recent argv */ + pArgString = pIndexPosition; + } + } + + if (pArgString == NULL) { + /* we didn't leave off in the middle of an argv string */ + if (optind >= argc) { + /* more command-line arguments than the argument count */ + pIndexPosition = NULL; /* not in the middle of anything */ + return EOF; /* used up all command-line arguments */ + } + + /*--------------------------------------------------------------------- + * If the next argv[] is not an option, there can be no more options. + *-------------------------------------------------------------------*/ + pArgString = argv[optind++]; /* set this to the next argument ptr */ + +#ifdef GETOPT_USE_SLASH + if (('/' != *pArgString) && /* doesn't start with a slash or a dash? */ + ('-' != *pArgString)) { + --optind; /* point to current arg once we're done */ + optarg = NULL; /* no argument follows the option */ + pIndexPosition = NULL; /* not in the middle of anything */ + return EOF; /* used up all the command-line flags */ + } +#else + if ('-' != *pArgString) { /* doesn't start with a dash? */ + --optind; /* point to current arg once we're done */ + optarg = NULL; /* no argument follows the option */ + pIndexPosition = NULL; /* not in the middle of anything */ + return EOF; /* used up all the command-line flags */ + } +#endif + + /* check for special end-of-flags markers */ + if ((strcmp(pArgString, "-") == 0) || + (strcmp(pArgString, "--") == 0)) { + optarg = NULL; /* no argument follows the option */ + pIndexPosition = NULL; /* not in the middle of anything */ + return EOF; /* encountered the special flag */ + } + + pArgString++; /* look past the / or - */ + } + + if (':' == *pArgString) { /* is it a colon? */ + /*--------------------------------------------------------------------- + * Rare case: if opterr is non-zero, return a question mark; + * otherwise, just return the colon we're on. + *-------------------------------------------------------------------*/ + return (opterr ? (int) '?' : (int) ':'); + } else if ((pOptString = strchr(opstring, *pArgString)) == 0) { + /*--------------------------------------------------------------------- + * The letter on the command-line wasn't any good. + *-------------------------------------------------------------------*/ + optarg = NULL; /* no argument follows the option */ + pIndexPosition = NULL; /* not in the middle of anything */ + return (opterr ? (int) '?' : (int) *pArgString); + } else { + /*--------------------------------------------------------------------- + * The letter on the command-line matches one we expect to see + *-------------------------------------------------------------------*/ + if (':' == _next_char(pOptString)) { /* is the next letter a colon? */ + /* It is a colon. Look for an argument string. */ + if ('\0' != _next_char(pArgString)) { /* argument in this argv? */ + optarg = &pArgString[1]; /* Yes, it is */ + } else { + /*------------------------------------------------------------- + * The argument string must be in the next argv. + * But, what if there is none (bad input from the user)? + * In that case, return the letter, and optarg as NULL. + *-----------------------------------------------------------*/ + if (optind < argc) + optarg = argv[optind++]; + else { + optarg = NULL; + return (opterr ? (int) '?' : (int) *pArgString); + } + } + pIndexPosition = NULL; /* not in the middle of anything */ + } else { + /* it's not a colon, so just return the letter */ + optarg = NULL; /* no argument follows the option */ + pIndexPosition = pArgString; /* point to the letter we're on */ + } + return (int) *pArgString; /* return the letter that matched */ + } +} diff --git a/src/win32/compat/getopt.h b/src/win32/compat/getopt.h new file mode 100644 index 00000000..e29ffc89 --- /dev/null +++ b/src/win32/compat/getopt.h @@ -0,0 +1,41 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, September, MMVII + * + * Written from man page definitions + */ + +#ifndef _GETOPT_H_ +#define _GETOPT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +int getopt(int argc, char * const argv[], const char *optstring); + +extern char *optarg; +extern int optind, opterr, optopt; + +#ifdef __cplusplus +} +#endif + +#endif /* _GETOPT_H_ */ diff --git a/src/win32/compat/grp.h b/src/win32/compat/grp.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/mingwconfig.h b/src/win32/compat/mingwconfig.h new file mode 100644 index 00000000..c14a3e2b --- /dev/null +++ b/src/win32/compat/mingwconfig.h @@ -0,0 +1,439 @@ +/* + * This file was originally generated by configure, but has been edited + * to provide the correct defines for the Native Win32 build under + * Visual Studio. + * + * Note!!! We normally build the Windows versions with this header + * file. We use the Mingw environment. + */ +/* ------------------------------------------------------------------------- */ +/* -- CONFIGURE SPECIFIED FEATURES -- */ +/* ------------------------------------------------------------------------- */ + +#ifndef __MINGWCONFIG_H +#define __MINGWCONFIG_H + +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0602 +#undef _WIN32_IE +#define _WIN32_IE 0x7000 + +/* To compile with the new mingw-w64*/ +#ifdef HAVE_MINGW_W64 +# define HAVE_FSEEKO 1 +# define MINGW64 +# define HAVE_OPENSSLv1 +# define __MIDL_user_allocate_free_DEFINED__ +#endif + +#define HAVE_SHA2 + +#undef USE_LOCKMGR + +/* Define if you want SmartAlloc debug code enabled */ +#define SMARTALLOC 1 + +/* Use EmptyWorkSet as malloc_trim emulation */ +#define HAVE_MALLOC_TRIM 1 + +/* Define if you want to use Batch Mode */ +/* #define HAVE_BATCH_FILE_INSERT 1 */ + +/* Define if you need function prototypes */ +#define PROTOTYPES 1 + +/* Define if you have GCC */ +#define HAVE_GCC 1 + +/* Define to 1 if utime.h exists and declares struct utimbuf. */ +#define HAVE_UTIME_H 1 + +/* Data types */ +#define HAVE_U_INT 1 +#define HAVE_INTXX_T 1 +#define HAVE_U_INTXX_T 1 +/* #undef HAVE_UINTXX_T */ +#define HAVE_INT64_T 1 +#define HAVE_U_INT64_T 1 +#define HAVE_UINT64_T 1 +#define HAVE_INTMAX_T 1 +/* #undef HAVE_U_INTMAX_T */ +#define HAVE_UINTPTR_T 1 +#define HAVE_INTPTR_T 1 + +/* Define if you want TCP Wrappers support */ +/* #undef HAVE_LIBWRAP */ + +/* Define if you have sys/bitypes.h */ +/* #undef HAVE_SYS_BITYPES_H */ + +/* Define if you have zlib */ +#define HAVE_LIBZ 1 + +/* Define if you have lzo lib */ +#define HAVE_LZO 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_LZO_LZOCONF_H 1 + + +/* File daemon specif libraries */ +#define FDLIBS 1 + + +/* What kind of signals we have */ +/*#define HAVE_POSIX_SIGNALS 1 */ +/* #undef HAVE_BSD_SIGNALS */ +/* #undef HAVE_USG_SIGHOLD */ + + +/* Set to correct scanf value for long long int */ +#define lld "lld" +#define llu "llu" +/* #define USE_BSNPRINTF */ + +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 +#ifndef HAVE_MINGW +#define alloca _alloca +#endif + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* Define to 1 if you have the `getcwd' function. */ +#define HAVE_GETCWD 1 + +/* Define to 1 if you have the `gethostid' function. */ +#define HAVE_GETHOSTID 1 + +/* Define to 1 if you have the `gethostname' function. */ +#define HAVE_GETHOSTNAME 1 + +/* Define to 1 if you have the `getmntent' function. */ +/*#define HAVE_GETMNTENT 1 */ + +/* Define to 1 if you have the `getpid' function. */ +#define HAVE_GETPID 1 +#define getpid _getpid + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +/*#define HAVE_GRP_H 1*/ + +/* Define to 1 if you have the `inet_pton' function. */ +/* #undef HAVE_INET_PTON */ + +/* Define to 1 if you have the `inet_pton' function. */ +/* #undef HAVE_INET_NTOP */ + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `lchown' function. */ +#define HAVE_LCHOWN 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* Define to 1 if you have the `localtime_r' function. */ +#define HAVE_LOCALTIME_R 1 + +/* Define to 1 if you have the `lstat' function. */ +#define HAVE_LSTAT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MTIO_H */ + +/* Define to 1 if you have the `nanosleep' function. */ +#define HAVE_NANOSLEEP 1 + +/* Define to 1 if you have the header file. */ +/*#define HAVE_PWD_H 1*/ + +/* Define to 1 if you have the `readdir_r' function. */ +/* #undef HAVE_READDIR_R */ + +/* Define to 1 if you have the header file. */ +/*#define HAVE_RESOLV_H 1*/ + +/* Define to 1 if you have the `select' function. */ +#define HAVE_SELECT 1 + +/* Define to 1 if you have the `setenv' function. */ +/* #undef HAVE_SETENV */ + +#define HAVE_PUTENV 1 + +/* Define to 1 if you have the `setlocale' function. */ +#undef HAVE_SETLOCALE + +/* Define to 1 if translation of program messages to the user's native + language is requested. */ +#if (defined _MSC_VER) && (_MSC_VER >= 1400) // VC8+ +/* Enable NLS only if we are using the new VC++. + * NLS should also work with VC++ 7.1, but the Makefiles are + * not adapted to support it (include, lib...). */ +#define ENABLE_NLS 1 +#endif + +#undef LOCALEDIR +#define LOCALEDIR "." + +#undef HAVE_NL_LANGINFO + +/* Define to 1 if you have the `setpgid' function. */ +#define HAVE_SETPGID 1 + +/* Define to 1 if you have the `setpgrp' function. */ +#define HAVE_SETPGRP 1 + +/* Define to 1 if you have the `setsid' function. */ +#define HAVE_SETSID 1 + +/* Define to 1 if you have the `signal' function. */ +/*#define HAVE_SIGNAL 1 */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDARG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strcasecmp' function. */ +#define HAVE_STRCASECMP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strncmp' function. */ +#define HAVE_STRNCMP 1 + +/* Define to 1 if you have the `strncpy' function. */ +#define HAVE_STRNCPY 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if `st_blksize' is member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if `st_blocks' is member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLOCKS 1 + +/* Define to 1 if `st_rdev' is member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_RDEV 1 + +/* Define to 1 if `tm_zone' is member of `struct tm'. */ +/* #undef HAVE_STRUCT_TM_TM_ZONE */ + +/* Define to 1 if your `struct stat' has `st_blksize'. Deprecated, use + `HAVE_STRUCT_STAT_ST_BLKSIZE' instead. */ +#define HAVE_ST_BLKSIZE 1 + +/* Define to 1 if your `struct stat' has `st_blocks'. Deprecated, use + `HAVE_STRUCT_STAT_ST_BLOCKS' instead. */ +#define HAVE_ST_BLOCKS 1 + +/* Define to 1 if your `struct stat' has `st_rdev'. Deprecated, use + `HAVE_STRUCT_STAT_ST_RDEV' instead. */ +#define HAVE_ST_RDEV 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_BYTEORDER_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MTIO_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_SOCKIO_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* Define to 1 if you have the `tcgetattr' function. */ +#define HAVE_TCGETATTR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_TERMIOS_H 1 + +/* Define to 1 if your `struct tm' has `tm_zone'. Deprecated, use + `HAVE_STRUCT_TM_TM_ZONE' instead. */ +/* #undef HAVE_TM_ZONE */ + +/* Define to 1 if you don't have `tm_zone' but do have the external array + `tzname'. */ +#define HAVE_TZNAME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_VARARGS_H */ + +/* Define to 1 if you have the `vfprintf' function. */ +#define HAVE_VFPRINTF 1 + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ZLIB_H 1 + +/* Define to 1 if `major', `minor', and `makedev' are declared in . + */ +/* #undef MAJOR_IN_MKDEV */ + +/* Define to 1 if `major', `minor', and `makedev' are declared in + . */ +/* #undef MAJOR_IN_SYSMACROS */ + +/* Define to 1 if your C compiler doesn't accept -c and -o together. */ +/* #undef NO_MINUS_C_MINUS_O */ + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to 1 if the `setpgrp' function takes no argument. */ +#define SETPGRP_VOID 1 + +/* The size of a `char', as computed by sizeof. */ +#define SIZEOF_CHAR 1 + +/* The size of a `int', as computed by sizeof. */ +#define SIZEOF_INT 4 + +/* The size of a `int *', as computed by sizeof. */ +#define SIZEOF_INT_P 4 + +/* The size of a `long int', as computed by sizeof. */ +#define SIZEOF_LONG_INT 4 + +/* The size of a `long long int', as computed by sizeof. */ +#define SIZEOF_LONG_LONG_INT 8 + +/* The size of a `short int', as computed by sizeof. */ +#define SIZEOF_SHORT_INT 2 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at run-time. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* Use long unsigned int for ioctl request */ +#define HAVE_IOCTL_ULINT_REQUEST + +/* For now, we only support Little endian on Win32 */ +#define HAVE_LITTLE_ENDIAN 1 + +/* Number of bits in a file offset, on hosts where this is settable. */ +#define _FILE_OFFSET_BITS 64 + +/* Define to make fseeko etc. visible, on some hosts. */ +#define _LARGEFILE_SOURCE 1 + +/* Define for large files, on AIX-style hosts. */ +#define _LARGE_FILES 1 + +/* Whether to enable IPv6 support */ +#define HAVE_IPV6 1 + +/* Set the default configuration file */ +#undef SYSCONFDIR +#define SYSCONFDIR "C:/Program Files/Bacula" + +#define O_CLOEXEC 0 + +#endif /* __MINGWNCONFIG_H */ diff --git a/src/win32/compat/ms_atl.h b/src/win32/compat/ms_atl.h new file mode 100644 index 00000000..93782115 --- /dev/null +++ b/src/win32/compat/ms_atl.h @@ -0,0 +1,53 @@ +/* + * Minimal replacement for class CComPtr and CComBSTR + * Based on common public IUnknown interface only + */ + +template class CComPtr +{ + +public: + +/* Attribute(s) ... */ + T* p; + +/* Creation ... */ + CComPtr() + { + p = NULL; + } + +/* Destructor ... */ + ~CComPtr() + { + if (p) + p->Release(); + } +}; + +class CComBSTR +{ + +public: + + BSTR p; + +/* Creation ... */ + CComBSTR() + { + p = NULL; + } + +/* Destructor ... */ + ~CComBSTR() + { + ::SysFreeString(p); + } + +/* Address-of operator */ + BSTR* operator&() + { + return &p; + } + +}; diff --git a/src/win32/compat/mswinver.h b/src/win32/compat/mswinver.h new file mode 100644 index 00000000..730ac919 --- /dev/null +++ b/src/win32/compat/mswinver.h @@ -0,0 +1,35 @@ +#ifndef __MSWINVER_H_ +#define __MSWINVER_H_ + +#define MS_MAJOR_WINDOWS_3 3 +#define MS_MAJOR_WINDOWS_95 4 +#define MS_MAJOR_WINDOWS_98 4 +#define MS_MAJOR_WINDOWS_ME 4 +#define MS_MAJOR_WINDOWS_NT4 4 +#define MS_MAJOR_WINDOWS_2K 5 +#define MS_MAJOR_WINDOWS_XP 5 +#define MS_MAJOR_WINDOWS_S2003 5 + + +#define MS_MINOR_WINDOWS_3 51 +#define MS_MINOR_WINDOWS_95 0 +#define MS_MINOR_WINDOWS_98 10 +#define MS_MINOR_WINDOWS_ME 90 +#define MS_MINOR_WINDOWS_NT4 0 +#define MS_MINOR_WINDOWS_2K 0 +#define MS_MINOR_WINDOWS_XP 1 +#define MS_MINOR_WINDOWS_S2003 2 + + +#define _mkversion(p, m, r) (((p)<<24)|((m)<<8)|(r)) + +#define MS_WINDOWS_95 _mkversion(VER_PLATFORM_WIN32_WINDOWS, MS_MAJOR_WINDOWS_95, MS_MINOR_WINDOWS_95) +#define MS_WINDOWS_98 _mkversion(VER_PLATFORM_WIN32_WINDOWS, MS_MAJOR_WINDOWS_98, MS_MINOR_WINDOWS_98) +#define MS_WINDOWS_ME _mkversion(VER_PLATFORM_WIN32_WINDOWS, MS_MAJOR_WINDOWS_ME, MS_MINOR_WINDOWS_ME) +#define MS_WINDOWS_NT4 _mkversion(VER_PLATFORM_WIN32_NT, MS_MAJOR_WINDOWS_NT4, MS_MINOR_WINDOWS_NT4) +#define MS_WINDOWS_2K _mkversion(VER_PLATFORM_WIN32_NT, MS_MAJOR_WINDOWS_2K, MS_MINOR_WINDOWS_2K) +#define MS_WINDOWS_XP _mkversion(VER_PLATFORM_WIN32_NT, MS_MAJOR_WINDOWS_XP, MS_MINOR_WINDOWS_XP) +#define MS_WINDOWS_S2003 _mkversion(VER_PLATFORM_WIN32_NT, MS_MAJOR_WINDOWS_S2003, MS_MINOR_WINDOWS_S2003) + + +#endif diff --git a/src/win32/compat/netdb.h b/src/win32/compat/netdb.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/netinet/in.h b/src/win32/compat/netinet/in.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/netinet/tcp.h b/src/win32/compat/netinet/tcp.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/print.cpp b/src/win32/compat/print.cpp new file mode 100644 index 00000000..695e0dcd --- /dev/null +++ b/src/win32/compat/print.cpp @@ -0,0 +1,774 @@ +/* + * + * Note: 25 May 2018. This code appears not to have a copyright, however, + * it appears to come from exactly the same code source as bsnprintf.c + * in the Bacula src/lib. The code in that file has two licenses that + * am adding here: + */ + +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Copyright Patrick Powell 1995 + * + * This code is based on code written by Patrick Powell + * (papowell@astart.com) It may be used for any purpose as long + * as this notice remains intact on all source code distributions. + * + * Adapted for Bacula -- note there were lots of bugs in + * the original code: %lld and %s were seriously broken, and + * with FP turned off %f seg faulted. + * + * Kern Sibbald, November MMV + * + */ +/************************************************************** + * Original: + * Patrick Powell Tue Apr 11 09:48:21 PDT 1995 + * A bombproof version of doprnt(dopr) included. + * Sigh. This sort of thing is always nasty do deal with. Note that + * the version here does not include floating point... + * + * snprintf() is used instead of sprintf() as it does limit checks + * for string length. This covers a nasty loophole. + * + * The other functions are there to prevent NULL pointers from + * causing nast effects. + * + * More Recently: + * Brandon Long 9/15/96 for mutt 0.43 + * This was ugly. It is still ugly. I opted out of floating point + * numbers, but the formatter understands just about everything + * from the normal C string format, at least as far as I can tell from + * the Solaris 2.5 printf(3S) man page. + * + * Brandon Long 10/22/97 for mutt 0.87.1 + * Ok, added some minimal floating point support, which means this + * probably requires libm on most operating systems. Don't yet + * support the exponent(e, E) and sigfig(g, G). Also, fmtint() + * was pretty badly broken, it just wasn't being exercised in ways + * which showed it, so that's been fixed. Also, formated the code + * to mutt conventions, and removed dead code left over from the + * original. Also, there is now a builtin-test, just compile with: + * gcc -DTEST_SNPRINTF -o snprintf snprintf.c -lm + * and run snprintf for results. + * + * Thomas Roessler 01/27/98 for mutt 0.89i + * The PGP code was using unsigned hexadecimal formats. + * Unfortunately, unsigned formats simply didn't work. + * + * Michael Elkins 03/05/98 for mutt 0.90.8 + * The original code assumed that both snprintf() and vsnprintf() were + * missing. Some systems only have snprintf() but not vsnprintf(), so + * the code is now broken down under HAVE_SNPRINTF and HAVE_VSNPRINTF. + * + * Ben Lindstrom 09/27/00 for OpenSSH + * Welcome to the world of %lld and %qd support. With other + * long long support. This is needed for sftp-server to work + * right. + * + * Ben Lindstrom 02/12/01 for OpenSSH + * Removed all hint of VARARGS stuff and banished it to the void, + * and did a bit of KNF style work to make things a bit more + * acceptable. Consider stealing from mutt or enlightenment. + **************************************************************/ + +#include "bacula.h" +#include "compat.h" + +typedef void (prfun)(char *, size_t *, size_t, int); + +int +dopr(char *buffer, size_t maxlen, const char *format, va_list args, prfun); + + +static void +fmtstr(char *buffer, size_t *currlen, size_t maxlen, char *value, int flags, + int min, int max, prfun); + +static void +fmtint(char *buffer, size_t *currlen, size_t maxlen, INT64 value, int base, + int min, int max, int flags, prfun); + +static void +fmtfp(char *buffer, size_t *currlen, size_t maxlen, long double fvalue, + int min, int max, int flags, prfun); + +static void +dopr_outch(char *buffer, size_t *currlen, size_t maxlen, int c); + +/* + * dopr(): poor man's version of doprintf + */ + +#ifndef MAX +#define MAX(a,b) ((a)>(b)?(a):(b)) +#endif + +/* format read states */ +#define DP_S_DEFAULT 0 +#define DP_S_FLAGS 1 +#define DP_S_MIN 2 +#define DP_S_DOT 3 +#define DP_S_MAX 4 +#define DP_S_MOD 5 +#define DP_S_CONV 6 +#define DP_S_DONE 7 + +/* format flags - Bits */ +#define DP_F_MINUS (1 << 0) +#define DP_F_PLUS (1 << 1) +#define DP_F_SPACE (1 << 2) +#define DP_F_NUM (1 << 3) +#define DP_F_ZERO (1 << 4) +#define DP_F_UP (1 << 5) +#define DP_F_UNSIGNED (1 << 6) + +/* Conversion Flags */ +#define DP_C_SHORT 1 +#define DP_C_LONG 2 +#define DP_C_LDOUBLE 3 +#define DP_C_LONG_LONG 4 + +#define char_to_int(p) (p - '0') +#define abs_val(p) (p < 0 ? -p : p) + +static const char digitval[] = "0123456789abcdef0123456789ABCDEF"; + +int +dopr(char *buffer, size_t maxlen, const char *format, va_list args, prfun outch) +{ + char *strvalue; + char ch; + INT64 value; + long double fvalue; + int min = 0; + int max = -1; + int state = DP_S_DEFAULT; + int flags = 0; + int cflags = 0; + size_t currlen = 0; + + ch = *format++; + + while (state != DP_S_DONE) + { + if ((ch == '\0') || (currlen >= maxlen)) + state = DP_S_DONE; + + switch (state) + { + case DP_S_DEFAULT: + if (ch == '%') + state = DP_S_FLAGS; + else + outch(buffer, &currlen, maxlen, ch); + ch = *format++; + break; + case DP_S_FLAGS: + switch (ch) + { + case '-': + flags |= DP_F_MINUS; + ch = *format++; + break; + case '+': + flags |= DP_F_PLUS; + ch = *format++; + break; + case ' ': + flags |= DP_F_SPACE; + ch = *format++; + break; + case '#': + flags |= DP_F_NUM; + ch = *format++; + break; + case '0': + flags |= DP_F_ZERO; + ch = *format++; + break; + default: + state = DP_S_MIN; + break; + } + break; + case DP_S_MIN: + if (isdigit((unsigned char)ch)) + { + min = 10*min + char_to_int(ch); + ch = *format++; + } + else if (ch == '*') + { + min = va_arg(args, int); + ch = *format++; + state = DP_S_DOT; + } + else + state = DP_S_DOT; + break; + case DP_S_DOT: + if (ch == '.') + { + state = DP_S_MAX; + ch = *format++; + } + else + state = DP_S_MOD; + break; + case DP_S_MAX: + if (isdigit((unsigned char)ch)) + { + if (max < 0) + max = 0; + max = 10*max + char_to_int(ch); + ch = *format++; + } + else if (ch == '*') + { + max = va_arg(args, int); + ch = *format++; + state = DP_S_MOD; + } + else + state = DP_S_MOD; + break; + case DP_S_MOD: + switch (ch) + { + case 'h': + cflags = DP_C_SHORT; + ch = *format++; + break; + case 'l': + cflags = DP_C_LONG; + ch = *format++; + if (ch == 'l') + { + cflags = DP_C_LONG_LONG; + ch = *format++; + } + break; + case 'q': + cflags = DP_C_LONG_LONG; + ch = *format++; + break; + case 'L': + cflags = DP_C_LDOUBLE; + ch = *format++; + break; + default: + break; + } + state = DP_S_CONV; + break; + case DP_S_CONV: + switch (ch) + { + case 'b': + flags |= DP_F_UNSIGNED; + if (cflags == DP_C_SHORT) + value = va_arg(args, unsigned int); + else if (cflags == DP_C_LONG) + value = va_arg(args, unsigned long int); + else if (cflags == DP_C_LONG_LONG) + value = va_arg(args, UINT64); + else + value = va_arg(args, unsigned int); + fmtint(buffer, &currlen, maxlen, value, 2, min, max, flags, outch); + break; + case 'd': + case 'i': + if (cflags == DP_C_SHORT) + value = va_arg(args, int); + else if (cflags == DP_C_LONG) + value = va_arg(args, long int); + else if (cflags == DP_C_LONG_LONG) + value = va_arg(args, INT64); + else + value = va_arg(args, int); + fmtint(buffer, &currlen, maxlen, value, 10, min, max, flags, outch); + break; + case 'o': + flags |= DP_F_UNSIGNED; + if (cflags == DP_C_SHORT) + value = va_arg(args, unsigned int); + else if (cflags == DP_C_LONG) + value = va_arg(args, unsigned long int); + else if (cflags == DP_C_LONG_LONG) + value = va_arg(args, UINT64); + else + value = va_arg(args, unsigned int); + fmtint(buffer, &currlen, maxlen, value, 8, min, max, flags, outch); + break; + case 'u': + flags |= DP_F_UNSIGNED; + if (cflags == DP_C_SHORT) + value = va_arg(args, unsigned int); + else if (cflags == DP_C_LONG) + value = va_arg(args, unsigned long int); + else if (cflags == DP_C_LONG_LONG) + value = va_arg(args, UINT64); + else + value = va_arg(args, unsigned int); + fmtint(buffer, &currlen, maxlen, value, 10, min, max, flags, outch); + break; + case 'X': + flags |= DP_F_UP; + case 'x': + flags |= DP_F_UNSIGNED; + if (cflags == DP_C_SHORT) + value = va_arg(args, unsigned int); + else if (cflags == DP_C_LONG) + value = va_arg(args, unsigned long int); + else if (cflags == DP_C_LONG_LONG) + value = va_arg(args, UINT64); + else + value = va_arg(args, unsigned int); + fmtint(buffer, &currlen, maxlen, value, 16, min, max, flags, outch); + break; + case 'f': + if (cflags == DP_C_LDOUBLE) + fvalue = va_arg(args, long double); + else + fvalue = va_arg(args, double); + /* um, floating point? */ + fmtfp(buffer, &currlen, maxlen, fvalue, min, max, flags, outch); + break; + case 'E': + flags |= DP_F_UP; + case 'e': + if (cflags == DP_C_LDOUBLE) + fvalue = va_arg(args, long double); + else + fvalue = va_arg(args, double); + break; + case 'G': + flags |= DP_F_UP; + case 'g': + if (cflags == DP_C_LDOUBLE) + fvalue = va_arg(args, long double); + else + fvalue = va_arg(args, double); + break; + case 'c': + outch(buffer, &currlen, maxlen, va_arg(args, int)); + break; + case 's': + strvalue = va_arg(args, char *); + if (max < 0) { + max = maxlen; /* ie, no max */ + } + fmtstr(buffer, &currlen, maxlen, strvalue, flags, min, max, outch); + break; + case 'p': + flags |= DP_F_UNSIGNED; + if (sizeof(char *) == 4) { + value = va_arg(args, uint32_t); + } else if (sizeof(char *) == 8) { + value = va_arg(args, uint64_t); + } else { + value = 0; /* we have a problem */ + } + fmtint(buffer, &currlen, maxlen, value, 16, min, max, + flags, outch); + break; + case 'n': + if (cflags == DP_C_SHORT) + { + short int *num; + num = va_arg(args, short int *); + *num = currlen; + } + else if (cflags == DP_C_LONG) + { + long int *num; + num = va_arg(args, long int *); + *num = currlen; + } + else if (cflags == DP_C_LONG_LONG) + { + INT64 *num; + num = va_arg(args, INT64 *); + *num = currlen; + } + else + { + int *num; + num = va_arg(args, int *); + *num = currlen; + } + break; + case '%': + outch(buffer, &currlen, maxlen, ch); + break; + case 'w': /* not supported yet, treat as next char */ + ch = *format++; + break; + default: /* Unknown, skip */ + break; + } + ch = *format++; + state = DP_S_DEFAULT; + flags = cflags = min = 0; + max = -1; + break; + case DP_S_DONE: + break; + default: /* hmm? */ + break; /* some picky compilers need this */ + } + } + outch(buffer, &currlen, maxlen, -1); + return currlen; +} + +static void +fmtstr(char *buffer, size_t *currlen, size_t maxlen, + char *value, int flags, int min, int max, prfun outch) +{ + int padlen, strln; /* amount to pad */ + int cnt = 0; + + if (value == NULL) + value = (char *)""; + + for (strln = 0; value[strln]; ++strln); /* strlen */ + padlen = min - strln; + if (padlen < 0) + padlen = 0; + if (flags & DP_F_MINUS) + padlen = -padlen; /* Left Justify */ + + while ((padlen > 0) && (cnt < max)) { + outch(buffer, currlen, maxlen, ' '); + --padlen; + ++cnt; + } + while (*value && (cnt < max)) { + outch(buffer, currlen, maxlen, *value++); + ++cnt; + } + while ((padlen < 0) && (cnt < max)) { + outch(buffer, currlen, maxlen, ' '); + ++padlen; + ++cnt; + } +} + +/* Have to handle DP_F_NUM(ie 0x and 0 alternates) */ + +static void +fmtint(char *buffer, size_t *currlen, size_t maxlen, + INT64 value, int base, int min, int max, int flags, prfun outch) +{ + UINT64 uvalue; + char convert[20]; + int signvalue = 0; + int place = 0; + int spadlen = 0; /* amount to space pad */ + int zpadlen = 0; /* amount to zero pad */ + int caps = 0; + + if (max < 0) + max = 0; + + uvalue = value; + + if (!(flags & DP_F_UNSIGNED)) { + if (value < 0) { + signvalue = '-'; + uvalue = -value; + } + else if (flags & DP_F_PLUS) /* Do a sign(+/i) */ + signvalue = '+'; + else if (flags & DP_F_SPACE) + signvalue = ' '; + } + + if (flags & DP_F_UP) + caps = 16; /* Should characters be upper case? */ + + do { + convert[place++] = digitval[(uvalue%base)+caps]; + uvalue = (uvalue / (unsigned)base); + } while (uvalue && (place < 20)); + + if (place == 20) + place--; + + convert[place] = 0; + + zpadlen = max - place; + spadlen = min - MAX(max, place) - (signvalue ? 1 : 0); + + if (zpadlen < 0) + zpadlen = 0; + if (spadlen < 0) + spadlen = 0; + if (flags & DP_F_ZERO) { + zpadlen = MAX(zpadlen, spadlen); + spadlen = 0; + } + if (flags & DP_F_MINUS) + spadlen = -spadlen; /* Left Justifty */ + + + /* Spaces */ + while (spadlen > 0) { + outch(buffer, currlen, maxlen, ' '); + --spadlen; + } + + /* Sign */ + if (signvalue) + outch(buffer, currlen, maxlen, signvalue); + + /* Zeros */ + if (zpadlen > 0) { + while (zpadlen > 0) { + outch(buffer, currlen, maxlen, '0'); + --zpadlen; + } + } + + /* Digits */ + while (place > 0) + outch(buffer, currlen, maxlen, convert[--place]); + + /* Left Justified spaces */ + while (spadlen < 0) { + outch(buffer, currlen, maxlen, ' '); + ++spadlen; + } +} + +static long double +pow10(int exp) +{ + long double result = 1; + + while (exp) + { + result *= 10; + exp--; + } + + return result; +} + +static long +round(long double value) +{ + long intpart = (long)value; + + value -= intpart; + if (value >= 0.5) + intpart++; + + return intpart; +} + +static void +fmtfp(char *buffer, size_t *currlen, size_t maxlen, long double fvalue, + int min, int max, int flags, prfun outch) +{ + char iconvert[20]; + char fconvert[20]; + int signvalue = 0; + int iplace = 0; + int fplace = 0; + int padlen = 0; /* amount to pad */ + int zpadlen = 0; + long intpart; + long fracpart; + long double ufvalue; + + /* + * AIX manpage says the default is 0, but Solaris says the default + * is 6, and sprintf on AIX defaults to 6 + */ + if (max < 0) + max = 6; + + ufvalue = abs_val(fvalue); + + if (fvalue < 0) + signvalue = '-'; + else if (flags & DP_F_PLUS) /* Do a sign(+/i) */ + signvalue = '+'; + else if (flags & DP_F_SPACE) + signvalue = ' '; + + intpart = (long)ufvalue; + + /* + * Sorry, we only support 9 digits past the decimal because of our + * conversion method + */ + if (max > 9) + max = 9; + + /* We "cheat" by converting the fractional part to integer by + * multiplying by a factor of 10 + */ + fracpart = round((pow10 (max)) * (ufvalue - intpart)); + + if (fracpart >= pow10 (max)) + { + intpart++; + fracpart -= (long)pow10 (max); + } + + /* Convert integer part */ + do + { + iconvert[iplace++] = digitval[intpart % 10]; + intpart = (intpart / 10); + } while (intpart && (iplace < 20)); + + if (iplace == 20) + iplace--; + + iconvert[iplace] = 0; + + /* Convert fractional part */ + do + { + fconvert[fplace++] = digitval[fracpart % 10]; + fracpart = (fracpart / 10); + } while (fracpart && (fplace < 20)); + + if (fplace == 20) + fplace--; + + fconvert[fplace] = 0; + + /* -1 for decimal point, another -1 if we are printing a sign */ + padlen = min - iplace - max - 1 - ((signvalue) ? 1 : 0); + zpadlen = max - fplace; + if (zpadlen < 0) + zpadlen = 0; + if (padlen < 0) + padlen = 0; + if (flags & DP_F_MINUS) + padlen = -padlen; /* Left Justifty */ + + if ((flags & DP_F_ZERO) && (padlen > 0)) + { + if (signvalue) + { + outch(buffer, currlen, maxlen, signvalue); + --padlen; + signvalue = 0; + } + while (padlen > 0) + { + outch(buffer, currlen, maxlen, '0'); + --padlen; + } + } + + while (padlen > 0) + { + outch(buffer, currlen, maxlen, ' '); + --padlen; + } + + if (signvalue) + outch(buffer, currlen, maxlen, signvalue); + + while (iplace > 0) + outch(buffer, currlen, maxlen, iconvert[--iplace]); + + /* + * Decimal point. This should probably use locale to find the correct + * char to print out. + */ + outch(buffer, currlen, maxlen, '.'); + + while (fplace > 0) + outch(buffer, currlen, maxlen, fconvert[--fplace]); + + while (zpadlen > 0) + { + outch(buffer, currlen, maxlen, '0'); + --zpadlen; + } + + while (padlen < 0) + { + outch(buffer, currlen, maxlen, ' '); + ++padlen; + } +} + +static void +dopr_outch(char *buffer, size_t *currlen, size_t maxlen, int c) +{ + if (c == -1) + { + if (*currlen < maxlen - 1) + buffer[*currlen] = '\0'; + else + buffer[maxlen - 1] = '\0'; + } + else if (*currlen < maxlen) + buffer[(*currlen)++] = c; +} + +int +__sprintf(char *str, const char *fmt, ...) +{ + int rval; + va_list ap; + va_start(ap, fmt); + rval = vsnprintf(str, 128*1024, fmt, ap); + va_end(ap); + return rval; +} + + +int +__snprintf(char *str, size_t count, const char *fmt, ...) +{ + int rval; + va_list ap; + + va_start(ap, fmt); + rval = vsnprintf(str, count, fmt, ap); + va_end(ap); + + return rval; +} + +int +__vsprintf(char *s, const char *format, va_list args) +{ + s[0] = 0; + return dopr(s, 0xfffff, format, args, dopr_outch); +} + +int +__vsnprintf(char *s, size_t count, const char *format, va_list args) +{ + s[0] = 0; + return dopr(s, count, format, args, dopr_outch); +} diff --git a/src/win32/compat/pwd.h b/src/win32/compat/pwd.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/stdint.h b/src/win32/compat/stdint.h new file mode 100644 index 00000000..048a4541 --- /dev/null +++ b/src/win32/compat/stdint.h @@ -0,0 +1,3 @@ +#if defined(__GNUC__) +#include_next +#endif diff --git a/src/win32/compat/strings.h b/src/win32/compat/strings.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/sys/file.h b/src/win32/compat/sys/file.h new file mode 100644 index 00000000..36508054 --- /dev/null +++ b/src/win32/compat/sys/file.h @@ -0,0 +1,2 @@ + +#include "compat.h" diff --git a/src/win32/compat/sys/ioctl.h b/src/win32/compat/sys/ioctl.h new file mode 100644 index 00000000..c09d6d96 --- /dev/null +++ b/src/win32/compat/sys/ioctl.h @@ -0,0 +1 @@ +extern int ioctl(int __fd, unsigned long int __request, ...); diff --git a/src/win32/compat/sys/mtio.h b/src/win32/compat/sys/mtio.h new file mode 100644 index 00000000..51fa550c --- /dev/null +++ b/src/win32/compat/sys/mtio.h @@ -0,0 +1,277 @@ +/* Structures and definitions for magnetic tape I/O control commands. + Copyright (C) 1996, 1997 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* Written by H. Bergman . */ + +#ifndef _SYS_MTIO_H +#define _SYS_MTIO_H 1 + +/* Get necessary definitions from system and kernel headers. */ +#include +#include + + +/* Structure for MTIOCTOP - magnetic tape operation command. */ +struct mtop + { + short int mt_op; /* Operations defined below. */ + int mt_count; /* How many of them. */ + }; +#define _IOT_mtop /* Hurd ioctl type field. */ \ + _IOT (_IOTS (short), 1, _IOTS (int), 1, 0, 0) + +/* Magnetic Tape operations [Not all operations supported by all drivers]. */ +#define MTRESET 0 /* +reset drive in case of problems. */ +#define MTFSF 1 /* Forward space over FileMark, + * position at first record of next file. */ +#define MTBSF 2 /* Backward space FileMark (position before FM). */ +#define MTFSR 3 /* Forward space record. */ +#define MTBSR 4 /* Backward space record. */ +#define MTWEOF 5 /* Write an end-of-file record (mark). */ +#define MTREW 6 /* Rewind. */ +#define MTOFFL 7 /* Rewind and put the drive offline (eject?). */ +#define MTNOP 8 /* No op, set status only (read with MTIOCGET). */ +#define MTRETEN 9 /* Retension tape. */ +#define MTBSFM 10 /* +backward space FileMark, position at FM. */ +#define MTFSFM 11 /* +forward space FileMark, position at FM. */ +#define MTEOM 12 /* Goto end of recorded media (for appending files). + MTEOM positions after the last FM, ready for + appending another file. */ +#define MTERASE 13 /* Erase tape -- be careful! */ + +#define MTRAS1 14 /* Run self test 1 (nondestructive). */ +#define MTRAS2 15 /* Run self test 2 (destructive). */ +#define MTRAS3 16 /* Reserved for self test 3. */ + +#define MTSETBLK 20 /* Set block length (SCSI). */ +#define MTSETDENSITY 21 /* Set tape density (SCSI). */ +#define MTSEEK 22 /* Seek to block (Tandberg, etc.). */ +#define MTTELL 23 /* Tell block (Tandberg, etc.). */ +#define MTSETDRVBUFFER 24 /* Set the drive buffering according to SCSI-2. + Ordinary buffered operation with code 1. */ +#define MTFSS 25 /* Space forward over setmarks. */ +#define MTBSS 26 /* Space backward over setmarks. */ +#define MTWSM 27 /* Write setmarks. */ + +#define MTLOCK 28 /* Lock the drive door. */ +#define MTUNLOCK 29 /* Unlock the drive door. */ +#define MTLOAD 30 /* Execute the SCSI load command. */ +#define MTUNLOAD 31 /* Execute the SCSI unload command. */ +#define MTCOMPRESSION 32/* Control compression with SCSI mode page 15. */ +#define MTSETPART 33 /* Change the active tape partition. */ +#define MTMKPART 34 /* Format the tape with one or two partitions. */ + +/* structure for MTIOCGET - mag tape get status command */ + +struct mtget + { + long int mt_type; /* Type of magtape device. */ + long int mt_resid; /* Residual count: (not sure) + number of bytes ignored, or + number of files not skipped, or + number of records not skipped. */ + /* The following registers are device dependent. */ + long int mt_dsreg; /* Status register. */ + long int mt_gstat; /* Generic (device independent) status. */ + long int mt_erreg; /* Error register. */ + /* The next two fields are not always used. */ + __daddr_t mt_fileno; /* Number of current file on tape. */ + __daddr_t mt_blkno; /* Current block number. */ + }; +#define _IOT_mtget /* Hurd ioctl type field. */ \ + _IOT (_IOTS (long), 7, 0, 0, 0, 0) + + +/* Constants for mt_type. Not all of these are supported, and + these are not all of the ones that are supported. */ +#define MT_ISUNKNOWN 0x01 +#define MT_ISQIC02 0x02 /* Generic QIC-02 tape streamer. */ +#define MT_ISWT5150 0x03 /* Wangtek 5150EQ, QIC-150, QIC-02. */ +#define MT_ISARCHIVE_5945L2 0x04 /* Archive 5945L-2, QIC-24, QIC-02?. */ +#define MT_ISCMSJ500 0x05 /* CMS Jumbo 500 (QIC-02?). */ +#define MT_ISTDC3610 0x06 /* Tandberg 6310, QIC-24. */ +#define MT_ISARCHIVE_VP60I 0x07 /* Archive VP60i, QIC-02. */ +#define MT_ISARCHIVE_2150L 0x08 /* Archive Viper 2150L. */ +#define MT_ISARCHIVE_2060L 0x09 /* Archive Viper 2060L. */ +#define MT_ISARCHIVESC499 0x0A /* Archive SC-499 QIC-36 controller. */ +#define MT_ISQIC02_ALL_FEATURES 0x0F /* Generic QIC-02 with all features. */ +#define MT_ISWT5099EEN24 0x11 /* Wangtek 5099-een24, 60MB, QIC-24. */ +#define MT_ISTEAC_MT2ST 0x12 /* Teac MT-2ST 155mb drive, + Teac DC-1 card (Wangtek type). */ +#define MT_ISEVEREX_FT40A 0x32 /* Everex FT40A (QIC-40). */ +#define MT_ISDDS1 0x51 /* DDS device without partitions. */ +#define MT_ISDDS2 0x52 /* DDS device with partitions. */ +#define MT_ISSCSI1 0x71 /* Generic ANSI SCSI-1 tape unit. */ +#define MT_ISSCSI2 0x72 /* Generic ANSI SCSI-2 tape unit. */ + +/* QIC-40/80/3010/3020 ftape supported drives. + 20bit vendor ID + 0x800000 (see vendors.h in ftape distribution). */ +#define MT_ISFTAPE_UNKNOWN 0x800000 /* obsolete */ +#define MT_ISFTAPE_FLAG 0x800000 + +struct mt_tape_info + { + long int t_type; /* Device type id (mt_type). */ + char *t_name; /* Descriptive name. */ + }; + +#define MT_TAPE_INFO \ + { \ + {MT_ISUNKNOWN, "Unknown type of tape device"}, \ + {MT_ISQIC02, "Generic QIC-02 tape streamer"}, \ + {MT_ISWT5150, "Wangtek 5150, QIC-150"}, \ + {MT_ISARCHIVE_5945L2, "Archive 5945L-2"}, \ + {MT_ISCMSJ500, "CMS Jumbo 500"}, \ + {MT_ISTDC3610, "Tandberg TDC 3610, QIC-24"}, \ + {MT_ISARCHIVE_VP60I, "Archive VP60i, QIC-02"}, \ + {MT_ISARCHIVE_2150L, "Archive Viper 2150L"}, \ + {MT_ISARCHIVE_2060L, "Archive Viper 2060L"}, \ + {MT_ISARCHIVESC499, "Archive SC-499 QIC-36 controller"}, \ + {MT_ISQIC02_ALL_FEATURES, "Generic QIC-02 tape, all features"}, \ + {MT_ISWT5099EEN24, "Wangtek 5099-een24, 60MB"}, \ + {MT_ISTEAC_MT2ST, "Teac MT-2ST 155mb data cassette drive"}, \ + {MT_ISEVEREX_FT40A, "Everex FT40A, QIC-40"}, \ + {MT_ISSCSI1, "Generic SCSI-1 tape"}, \ + {MT_ISSCSI2, "Generic SCSI-2 tape"}, \ + {0, NULL} \ + } + + +/* Structure for MTIOCPOS - mag tape get position command. */ + +struct mtpos + { + long int mt_blkno; /* Current block number. */ + }; +#define _IOT_mtpos /* Hurd ioctl type field. */ \ + _IOT_SIMPLE (long) + + +/* Structure for MTIOCGETCONFIG/MTIOCSETCONFIG primarily intended + as an interim solution for QIC-02 until DDI is fully implemented. */ +struct mtconfiginfo + { + long int mt_type; /* Drive type. */ + long int ifc_type; /* Interface card type. */ + unsigned short int irqnr; /* IRQ number to use. */ + unsigned short int dmanr; /* DMA channel to use. */ + unsigned short int port; /* IO port base address. */ + + unsigned long int debug; /* Debugging flags. */ + + unsigned have_dens:1; + unsigned have_bsf:1; + unsigned have_fsr:1; + unsigned have_bsr:1; + unsigned have_eod:1; + unsigned have_seek:1; + unsigned have_tell:1; + unsigned have_ras1:1; + unsigned have_ras2:1; + unsigned have_ras3:1; + unsigned have_qfa:1; + + unsigned pad1:5; + char reserved[10]; + }; +#define _IOT_mtconfiginfo /* Hurd ioctl type field. */ \ + _IOT (_IOTS (long), 2, _IOTS (short), 3, _IOTS (long), 1) /* XXX wrong */ + + +/* Magnetic tape I/O control commands. */ +#define MTIOCTOP _IOW('m', 1, struct mtop) /* Do a mag tape op. */ +#define MTIOCGET _IOR('m', 2, struct mtget) /* Get tape status. */ +#define MTIOCPOS _IOR('m', 3, struct mtpos) /* Get tape position.*/ + +/* The next two are used by the QIC-02 driver for runtime reconfiguration. + See tpqic02.h for struct mtconfiginfo. */ +#define MTIOCGETCONFIG _IOR('m', 4, struct mtconfiginfo) /* Get tape config.*/ +#define MTIOCSETCONFIG _IOW('m', 5, struct mtconfiginfo) /* Set tape config.*/ + +/* Generic Mag Tape (device independent) status macros for examining + mt_gstat -- HP-UX compatible. + There is room for more generic status bits here, but I don't + know which of them are reserved. At least three or so should + be added to make this really useful. */ +#define GMT_EOF(x) ((x) & 0x80000000) +#define GMT_BOT(x) ((x) & 0x40000000) +#define GMT_EOT(x) ((x) & 0x20000000) +#define GMT_SM(x) ((x) & 0x10000000) /* DDS setmark */ +#define GMT_EOD(x) ((x) & 0x08000000) /* DDS EOD */ +#define GMT_WR_PROT(x) ((x) & 0x04000000) +/* #define GMT_ ? ((x) & 0x02000000) */ +#define GMT_ONLINE(x) ((x) & 0x01000000) +#define GMT_D_6250(x) ((x) & 0x00800000) +#define GMT_D_1600(x) ((x) & 0x00400000) +#define GMT_D_800(x) ((x) & 0x00200000) +/* #define GMT_ ? ((x) & 0x00100000) */ +/* #define GMT_ ? ((x) & 0x00080000) */ +#define GMT_DR_OPEN(x) ((x) & 0x00040000) /* Door open (no tape). */ +/* #define GMT_ ? ((x) & 0x00020000) */ +#define GMT_IM_REP_EN(x) ((x) & 0x00010000) /* Immediate report mode.*/ +/* 16 generic status bits unused. */ + + +/* SCSI-tape specific definitions. Bitfield shifts in the status */ +#define MT_ST_BLKSIZE_SHIFT 0 +#define MT_ST_BLKSIZE_MASK 0xffffff +#define MT_ST_DENSITY_SHIFT 24 +#define MT_ST_DENSITY_MASK 0xff000000 + +#define MT_ST_SOFTERR_SHIFT 0 +#define MT_ST_SOFTERR_MASK 0xffff + +/* Bitfields for the MTSETDRVBUFFER ioctl. */ +#define MT_ST_OPTIONS 0xf0000000 +#define MT_ST_BOOLEANS 0x10000000 +#define MT_ST_SETBOOLEANS 0x30000000 +#define MT_ST_CLEARBOOLEANS 0x40000000 +#define MT_ST_WRITE_THRESHOLD 0x20000000 +#define MT_ST_DEF_BLKSIZE 0x50000000 +#define MT_ST_DEF_OPTIONS 0x60000000 + +#define MT_ST_BUFFER_WRITES 0x1 +#define MT_ST_ASYNC_WRITES 0x2 +#define MT_ST_READ_AHEAD 0x4 +#define MT_ST_DEBUGGING 0x8 +#define MT_ST_TWO_FM 0x10 +#define MT_ST_FAST_MTEOM 0x20 +#define MT_ST_AUTO_LOCK 0x40 +#define MT_ST_DEF_WRITES 0x80 +#define MT_ST_CAN_BSR 0x100 +#define MT_ST_NO_BLKLIMS 0x200 +#define MT_ST_CAN_PARTITIONS 0x400 +#define MT_ST_SCSI2LOGICAL 0x800 + +/* The mode parameters to be controlled. Parameter chosen with bits 20-28. */ +#define MT_ST_CLEAR_DEFAULT 0xfffff +#define MT_ST_DEF_DENSITY (MT_ST_DEF_OPTIONS | 0x100000) +#define MT_ST_DEF_COMPRESSION (MT_ST_DEF_OPTIONS | 0x200000) +#define MT_ST_DEF_DRVBUFFER (MT_ST_DEF_OPTIONS | 0x300000) + +/* The offset for the arguments for the special HP changer load command. */ +#define MT_ST_HPLOADER_OFFSET 10000 + + +/* Specify default tape device. */ +#ifndef DEFTAPE +# define DEFTAPE "/dev/tape" +#endif + +#endif /* mtio.h */ diff --git a/src/win32/compat/sys/socket.h b/src/win32/compat/sys/socket.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/sys/stat.h b/src/win32/compat/sys/stat.h new file mode 100644 index 00000000..7273fef6 --- /dev/null +++ b/src/win32/compat/sys/stat.h @@ -0,0 +1 @@ +#include "compat.h" diff --git a/src/win32/compat/sys/time.h b/src/win32/compat/sys/time.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/sys/wait.h b/src/win32/compat/sys/wait.h new file mode 100644 index 00000000..e69de29b diff --git a/src/win32/compat/syslog.h b/src/win32/compat/syslog.h new file mode 100644 index 00000000..fbe2cf19 --- /dev/null +++ b/src/win32/compat/syslog.h @@ -0,0 +1,32 @@ +#ifndef _SYSLOG_H +#define _SYSLOG_H + +#define LOG_DAEMON 0 +#define LOG_ERR 1 +#define LOG_CRIT 2 +#define LOG_ALERT 3 +#define LOG_WARNING 4 +#define LOG_NOTICE 5 +#define LOG_INFO 6 +#define LOG_LOCAL0 10 +#define LOG_LOCAL1 11 +#define LOG_LOCAL2 12 +#define LOG_LOCAL3 13 +#define LOG_LOCAL4 14 +#define LOG_LOCAL5 15 +#define LOG_LOCAL6 16 +#define LOG_LOCAL7 17 +#define LOG_LPR 20 +#define LOG_MAIL 21 +#define LOG_NEWS 22 +#define LOG_UUCP 23 +#define LOG_USER 24 +#define LOG_CONS 0 +#define LOG_PID 0 + + +extern void syslog(int type, const char *fmt, ...); +void openlog(const char *app, int, int); +void closelog(void); + +#endif /* _SYSLOG_H */ diff --git a/src/win32/compat/unistd.h b/src/win32/compat/unistd.h new file mode 100644 index 00000000..beb96285 --- /dev/null +++ b/src/win32/compat/unistd.h @@ -0,0 +1,3 @@ +#if defined(__GNUC__) +#include_next +#endif diff --git a/src/win32/compat/winapi.c b/src/win32/compat/winapi.c new file mode 100644 index 00000000..e2598b16 --- /dev/null +++ b/src/win32/compat/winapi.c @@ -0,0 +1,349 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Windows APIs that are different for each system. + * We use pointers to the entry points so that a + * single binary will run on all Windows systems. + * + * Kern Sibbald MMIII + */ + +#include "bacula.h" + + +#ifdef HAVE_VSS64 +/* 64 bit entrypoint name */ +#define VSSVBACK_ENTRY "?CreateVssBackupComponents@@YAJPEAPEAVIVssBackupComponents@@@Z" +#define VSSVMETA_ENTRY "?CreateVssExamineWriterMetadata@@YAJPEAGPEAPEAVIVssExamineWriterMetadata@@@Z" +#else +/* 32 bit entrypoint name */ +#define VSSVMETA_ENTRY "?CreateVssExamineWriterMetadata@@YGJPAGPAPAVIVssExamineWriterMetadata@@@Z" +#define VSSVBACK_ENTRY "?CreateVssBackupComponents@@YGJPAPAVIVssBackupComponents@@@Z" +#endif + + +// init with win9x, but maybe set to NT in InitWinAPI +DWORD g_platform_id = VER_PLATFORM_WIN32_WINDOWS; +DWORD g_MinorVersion = 0; +DWORD g_MajorVersion = 0; + +/* API Pointers */ + +t_OpenProcessToken p_OpenProcessToken = NULL; +t_AdjustTokenPrivileges p_AdjustTokenPrivileges = NULL; +t_LookupPrivilegeValue p_LookupPrivilegeValue = NULL; + +t_SetProcessShutdownParameters p_SetProcessShutdownParameters = NULL; + +t_CreateFileA p_CreateFileA = NULL; +t_CreateFileW p_CreateFileW = NULL; + +t_OpenEncryptedFileRawA p_OpenEncryptedFileRawA = NULL; +t_OpenEncryptedFileRawW p_OpenEncryptedFileRawW = NULL; +t_ReadEncryptedFileRaw p_ReadEncryptedFileRaw = NULL; +t_WriteEncryptedFileRaw p_WriteEncryptedFileRaw = NULL; +t_CloseEncryptedFileRaw p_CloseEncryptedFileRaw = NULL; + + +t_CreateDirectoryA p_CreateDirectoryA; +t_CreateDirectoryW p_CreateDirectoryW; + +t_GetFileInformationByHandleEx p_GetFileInformationByHandleEx = NULL; + +t_wunlink p_wunlink = NULL; +t_wmkdir p_wmkdir = NULL; + +t_GetFileAttributesA p_GetFileAttributesA = NULL; +t_GetFileAttributesW p_GetFileAttributesW = NULL; + +t_GetFileAttributesExA p_GetFileAttributesExA = NULL; +t_GetFileAttributesExW p_GetFileAttributesExW = NULL; + +t_SetFileAttributesA p_SetFileAttributesA = NULL; +t_SetFileAttributesW p_SetFileAttributesW = NULL; +t_BackupRead p_BackupRead = NULL; +t_BackupWrite p_BackupWrite = NULL; +t_WideCharToMultiByte p_WideCharToMultiByte = NULL; +t_MultiByteToWideChar p_MultiByteToWideChar = NULL; + +t_AttachConsole p_AttachConsole = NULL; + +t_FindFirstFileA p_FindFirstFileA = NULL; +t_FindFirstFileW p_FindFirstFileW = NULL; + +t_FindNextFileA p_FindNextFileA = NULL; +t_FindNextFileW p_FindNextFileW = NULL; + +t_SetCurrentDirectoryA p_SetCurrentDirectoryA = NULL; +t_SetCurrentDirectoryW p_SetCurrentDirectoryW = NULL; + +t_GetCurrentDirectoryA p_GetCurrentDirectoryA = NULL; +t_GetCurrentDirectoryW p_GetCurrentDirectoryW = NULL; + +t_GetVolumePathNameW p_GetVolumePathNameW = NULL; +t_GetVolumeNameForVolumeMountPointW p_GetVolumeNameForVolumeMountPointW = NULL; + +t_SHGetFolderPath p_SHGetFolderPath = NULL; + +t_CreateProcessA p_CreateProcessA = NULL; +t_CreateProcessW p_CreateProcessW = NULL; + +t_CreateSymbolicLinkA p_CreateSymbolicLinkA = NULL; +t_CreateSymbolicLinkW p_CreateSymbolicLinkW = NULL; +t_InetPton p_InetPton = NULL; +t_GetProcessMemoryInfo p_GetProcessMemoryInfo = NULL; +t_EmptyWorkingSet p_EmptyWorkingSet = NULL; + +HMODULE vsslib = NULL; +t_CreateVssBackupComponents p_CreateVssBackupComponents = NULL; +t_VssFreeSnapshotProperties p_VssFreeSnapshotProperties = NULL; +t_CreateVssExamineWriterMetadata p_CreateVssExamineWriterMetadata; + + +static void atexit_handler() +{ + CoUninitialize(); +} + +/* http://thrysoee.dk/InsideCOM+/ch18d.htm + * The COM+ security infrastructure is initialized on a per-process basis at + * start-up. The CoInitializeSecurity function sets the default security values + * for the process. If an application does not call CoInitializeSecurity, COM+ + * calls the function automatically the first time an interface pointer is + * marshaled into or out of an apartment (or context) in the + * process. Attempting to call CoInitializeSecurity after marshaling takes + * place yields the infamous RPC_E_TOO_LATE error. Thus, programs that want to + * call CoInitializeSecurity explicitly are advised to do so immediately after + * calling CoInitializeEx. Note that CoInitializeSecurity is called only once per + * process, not in each thread that calls CoInitializeEx. +*/ +static void InitComInterface() +{ + /* Setup ComSecurity */ + HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED); + if (FAILED(hr)) { + Dmsg1(0, "CoInitializeEx returned 0x%08X\n", hr); + + } else { + // Initialize COM security + hr = + CoInitializeSecurity( + NULL, // Allow *all* VSS writers to communicate back! + -1, // Default COM authentication service + NULL, // Default COM authorization service + NULL, // reserved parameter + RPC_C_AUTHN_LEVEL_PKT_PRIVACY, // Strongest COM authentication level + RPC_C_IMP_LEVEL_IDENTIFY, // Minimal impersonation abilities + NULL, // Default COM authentication settings + EOAC_NONE, // No special options + NULL // Reserved parameter + ); + if (FAILED(hr) && (hr != RPC_E_TOO_LATE)) { + Dmsg1(0, "CoInitializeSecurity returned 0x%08X\n", hr); + } + atexit(atexit_handler); + } +} + +void +InitWinAPIWrapper() +{ + OSVERSIONINFO osversioninfo = { sizeof(OSVERSIONINFO) }; + + // Get the current OS version + if (!GetVersionEx(&osversioninfo)) { + g_platform_id = 0; + } else { + g_platform_id = osversioninfo.dwPlatformId; + g_MinorVersion = osversioninfo.dwMinorVersion; + g_MajorVersion = osversioninfo.dwMajorVersion; + } + + HMODULE hLib = LoadLibraryA("KERNEL32.DLL"); + if (hLib) { + /* Might be defined in Kernel32.dll or PSAPI.DLL */ + p_GetProcessMemoryInfo = (t_GetProcessMemoryInfo) + GetProcAddress(hLib, "K32GetProcessMemoryInfo"); + + /* Might be defined in Kernel32.dll or PSAPI.DLL */ + p_EmptyWorkingSet = (t_EmptyWorkingSet) + GetProcAddress(hLib, "K32EmptyWorkingSet"); + + /* Not defined before win2008 */ + p_CreateSymbolicLinkA = (t_CreateSymbolicLinkA) + GetProcAddress(hLib, "CreateSymbolicLinkA"); + p_CreateSymbolicLinkW = (t_CreateSymbolicLinkW) + GetProcAddress(hLib, "CreateSymbolicLinkW"); + + /* create process calls */ + p_CreateProcessA = (t_CreateProcessA) + GetProcAddress(hLib, "CreateProcessA"); + p_CreateProcessW = (t_CreateProcessW) + GetProcAddress(hLib, "CreateProcessW"); + + /* create file calls */ + p_CreateFileA = (t_CreateFileA)GetProcAddress(hLib, "CreateFileA"); + p_CreateDirectoryA = (t_CreateDirectoryA)GetProcAddress(hLib, "CreateDirectoryA"); + + p_GetFileInformationByHandleEx = (t_GetFileInformationByHandleEx) + GetProcAddress(hLib, "GetFileInformationByHandleEx"); + + /* attribute calls */ + p_GetFileAttributesA = (t_GetFileAttributesA)GetProcAddress(hLib, "GetFileAttributesA"); + p_GetFileAttributesExA = (t_GetFileAttributesExA)GetProcAddress(hLib, "GetFileAttributesExA"); + p_SetFileAttributesA = (t_SetFileAttributesA)GetProcAddress(hLib, "SetFileAttributesA"); + + /* process calls */ + p_SetProcessShutdownParameters = (t_SetProcessShutdownParameters) + GetProcAddress(hLib, "SetProcessShutdownParameters"); + + /* char conversion calls */ + p_WideCharToMultiByte = (t_WideCharToMultiByte) + GetProcAddress(hLib, "WideCharToMultiByte"); + p_MultiByteToWideChar = (t_MultiByteToWideChar) + GetProcAddress(hLib, "MultiByteToWideChar"); + + /* find files */ + p_FindFirstFileA = (t_FindFirstFileA)GetProcAddress(hLib, "FindFirstFileA"); + p_FindNextFileA = (t_FindNextFileA)GetProcAddress(hLib, "FindNextFileA"); + + /* get and set directory */ + p_GetCurrentDirectoryA = (t_GetCurrentDirectoryA) + GetProcAddress(hLib, "GetCurrentDirectoryA"); + p_SetCurrentDirectoryA = (t_SetCurrentDirectoryA) + GetProcAddress(hLib, "SetCurrentDirectoryA"); + + if (g_platform_id != VER_PLATFORM_WIN32_WINDOWS) { + p_CreateFileW = (t_CreateFileW) + GetProcAddress(hLib, "CreateFileW"); + p_CreateDirectoryW = (t_CreateDirectoryW) + GetProcAddress(hLib, "CreateDirectoryW"); + + /* backup calls */ + p_BackupRead = (t_BackupRead)GetProcAddress(hLib, "BackupRead"); + p_BackupWrite = (t_BackupWrite)GetProcAddress(hLib, "BackupWrite"); + + p_GetFileAttributesW = (t_GetFileAttributesW) + GetProcAddress(hLib, "GetFileAttributesW"); + p_GetFileAttributesExW = (t_GetFileAttributesExW) + GetProcAddress(hLib, "GetFileAttributesExW"); + p_SetFileAttributesW = (t_SetFileAttributesW) + GetProcAddress(hLib, "SetFileAttributesW"); + p_FindFirstFileW = (t_FindFirstFileW) + GetProcAddress(hLib, "FindFirstFileW"); + p_FindNextFileW = (t_FindNextFileW) + GetProcAddress(hLib, "FindNextFileW"); + p_GetCurrentDirectoryW = (t_GetCurrentDirectoryW) + GetProcAddress(hLib, "GetCurrentDirectoryW"); + p_SetCurrentDirectoryW = (t_SetCurrentDirectoryW) + GetProcAddress(hLib, "SetCurrentDirectoryW"); + + /* some special stuff we need for VSS + but static linkage doesn't work on Win 9x */ + p_GetVolumePathNameW = (t_GetVolumePathNameW) + GetProcAddress(hLib, "GetVolumePathNameW"); + p_GetVolumeNameForVolumeMountPointW = (t_GetVolumeNameForVolumeMountPointW) + GetProcAddress(hLib, "GetVolumeNameForVolumeMountPointW"); + + p_AttachConsole = (t_AttachConsole) + GetProcAddress(hLib, "AttachConsole"); + } + } + + if (g_platform_id != VER_PLATFORM_WIN32_WINDOWS) { + hLib = LoadLibraryA("MSVCRT.DLL"); + if (hLib) { + /* unlink */ + p_wunlink = (t_wunlink) + GetProcAddress(hLib, "_wunlink"); + /* wmkdir */ + p_wmkdir = (t_wmkdir) + GetProcAddress(hLib, "_wmkdir"); + } + + hLib = LoadLibraryA("ADVAPI32.DLL"); + if (hLib) { + p_OpenProcessToken = (t_OpenProcessToken) + GetProcAddress(hLib, "OpenProcessToken"); + p_AdjustTokenPrivileges = (t_AdjustTokenPrivileges) + GetProcAddress(hLib, "AdjustTokenPrivileges"); + p_LookupPrivilegeValue = (t_LookupPrivilegeValue) + GetProcAddress(hLib, "LookupPrivilegeValueA"); + + p_OpenEncryptedFileRawA = (t_OpenEncryptedFileRawA) + GetProcAddress(hLib, "OpenEncryptedFileRawA"); + p_OpenEncryptedFileRawW = (t_OpenEncryptedFileRawW) + GetProcAddress(hLib, "OpenEncryptedFileRawW"); + p_ReadEncryptedFileRaw = (t_ReadEncryptedFileRaw) + GetProcAddress(hLib, "ReadEncryptedFileRaw"); + p_WriteEncryptedFileRaw = (t_WriteEncryptedFileRaw) + GetProcAddress(hLib, "WriteEncryptedFileRaw"); + p_CloseEncryptedFileRaw = (t_CloseEncryptedFileRaw) + GetProcAddress(hLib, "CloseEncryptedFileRaw"); + } + } + + hLib = LoadLibraryA("SHELL32.DLL"); + if (hLib) { + p_SHGetFolderPath = (t_SHGetFolderPath) + GetProcAddress(hLib, "SHGetFolderPathA"); + } else { + /* If SHELL32 isn't found try SHFOLDER for older systems */ + hLib = LoadLibraryA("SHFOLDER.DLL"); + if (hLib) { + p_SHGetFolderPath = (t_SHGetFolderPath) + GetProcAddress(hLib, "SHGetFolderPathA"); + } + } + hLib = LoadLibraryA("WS2_32.DLL"); + if (hLib) { + p_InetPton = (t_InetPton)GetProcAddress(hLib, "InetPtonA"); + } + if (!p_GetProcessMemoryInfo) { + hLib = LoadLibraryA("PSAPI.DLL"); + if (hLib) { + p_GetProcessMemoryInfo = (t_GetProcessMemoryInfo)GetProcAddress(hLib, "GetProcessMemoryInfo"); + p_EmptyWorkingSet = (t_EmptyWorkingSet) GetProcAddress(hLib, "EmptyWorkingSet"); + } + } + + vsslib = LoadLibraryA("VSSAPI.DLL"); + if (vsslib) { + p_CreateVssBackupComponents = (t_CreateVssBackupComponents) + GetProcAddress(vsslib, VSSVBACK_ENTRY); + p_VssFreeSnapshotProperties = (t_VssFreeSnapshotProperties) + GetProcAddress(vsslib, "VssFreeSnapshotProperties"); + p_CreateVssExamineWriterMetadata = (t_CreateVssExamineWriterMetadata) + GetProcAddress(vsslib, VSSVMETA_ENTRY); + } + + /* In recent version of windows, the function is in Kernel32 */ + if (!p_GetFileInformationByHandleEx) { + hLib = LoadLibraryA("FileExtd.lib"); + if (hLib) { + p_GetFileInformationByHandleEx = (t_GetFileInformationByHandleEx) + GetProcAddress(hLib, "GetFileInformationByHandleEx"); + } + } + + atexit(Win32ConvCleanupCache); + + /* Setup Com Object security interface (called once per process) */ + InitComInterface(); +} diff --git a/src/win32/compat/winapi.h b/src/win32/compat/winapi.h new file mode 100644 index 00000000..46a5ce98 --- /dev/null +++ b/src/win32/compat/winapi.h @@ -0,0 +1,242 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Windows APIs that are different for each system. + * We use pointers to the entry points so that a + * single binary will run on all Windows systems. + * + * Kern Sibbald MMIII + */ + +#ifndef __WINAPI_H +#define __WINAPI_H + +#ifndef POOLMEM +typedef char POOLMEM; +#endif + +// unicode enabling of win 32 needs some defines and functions + +// using an average of 3 bytes per character is probably fine in +// practice but I believe that Windows actually uses UTF-16 encoding +// as opposed to UCS2 which means characters 0x10000-0x10ffff are +// valid and result in 4 byte UTF-8 encodings. +#define MAX_PATH_UTF8 MAX_PATH*4 // strict upper bound on UTF-16 to UTF-8 conversion + +// from +// http://msdn.microsoft.com/library/default.asp?url=/library/en-us/fileio/fs/getfileattributesex.asp +// In the ANSI version of this function, the name is limited to +// MAX_PATH characters. To extend this limit to 32,767 wide +// characters, call the Unicode version of the function and prepend +// "\\?\" to the path. For more information, see Naming a File. +#define MAX_PATH_W 32767 + +int wchar_2_UTF8(POOLMEM **pszUTF, const wchar_t *pszUCS); +int wchar_2_UTF8(char *pszUTF, const WCHAR *pszUCS, int cchChar = MAX_PATH_UTF8); +int UTF8_2_wchar(POOLMEM **pszUCS, const char *pszUTF); +int make_win32_path_UTF8_2_wchar(POOLMEM **pszUCS, const char *pszUTF, BOOL* pBIsRawPath = NULL); + +// init with win9x, but maybe set to NT in InitWinAPI +extern DWORD DLL_IMP_EXP g_platform_id; +extern DWORD DLL_IMP_EXP g_MinorVersion; +extern DWORD DLL_IMP_EXP g_MajorVersion; + +/* In ADVAPI32.DLL */ +typedef BOOL (WINAPI * t_OpenProcessToken)(HANDLE, DWORD, PHANDLE); +typedef BOOL (WINAPI * t_AdjustTokenPrivileges)(HANDLE, BOOL, + PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); +typedef BOOL (WINAPI * t_LookupPrivilegeValue)(LPCTSTR, LPCTSTR, PLUID); + +extern t_OpenProcessToken DLL_IMP_EXP p_OpenProcessToken; +extern t_AdjustTokenPrivileges DLL_IMP_EXP p_AdjustTokenPrivileges; +extern t_LookupPrivilegeValue DLL_IMP_EXP p_LookupPrivilegeValue; + +/* In MSVCRT.DLL */ +typedef int (__cdecl * t_wunlink) (const wchar_t *); +typedef int (__cdecl * t_wmkdir) (const wchar_t *); +typedef int (__cdecl * t_wopen) (const wchar_t *, int, ...); + +extern t_wunlink DLL_IMP_EXP p_wunlink; +extern t_wmkdir DLL_IMP_EXP p_wmkdir; + +/* In KERNEL32.DLL */ +typedef BOOL (WINAPI * t_GetFileAttributesExA)(LPCSTR, GET_FILEEX_INFO_LEVELS, + LPVOID); +typedef BOOL (WINAPI * t_GetFileAttributesExW)(LPCWSTR, GET_FILEEX_INFO_LEVELS, + LPVOID); + +typedef DWORD (WINAPI * t_GetFileAttributesA)(LPCSTR); +typedef DWORD (WINAPI * t_GetFileAttributesW)(LPCWSTR); +typedef BOOL (WINAPI * t_SetFileAttributesA)(LPCSTR, DWORD); +typedef BOOL (WINAPI * t_SetFileAttributesW)(LPCWSTR, DWORD); + +typedef HANDLE (WINAPI * t_CreateFileA) (LPCSTR, DWORD ,DWORD, LPSECURITY_ATTRIBUTES, + DWORD , DWORD, HANDLE); +typedef HANDLE (WINAPI * t_CreateFileW) (LPCWSTR, DWORD ,DWORD, LPSECURITY_ATTRIBUTES, + DWORD , DWORD, HANDLE); + +typedef DWORD (WINAPI * t_OpenEncryptedFileRawA) (LPCSTR, ULONG, PVOID); +typedef DWORD (WINAPI * t_OpenEncryptedFileRawW) (LPCWSTR, ULONG, PVOID); +typedef DWORD (WINAPI * t_ReadEncryptedFileRaw) (PFE_EXPORT_FUNC, PVOID, PVOID); +typedef DWORD (WINAPI * t_WriteEncryptedFileRaw) (PFE_IMPORT_FUNC, PVOID, PVOID); +typedef void (WINAPI * t_CloseEncryptedFileRaw) (PVOID); + +typedef BOOL (WINAPI * t_CreateDirectoryA) (LPCSTR, LPSECURITY_ATTRIBUTES); +typedef BOOL (WINAPI * t_CreateDirectoryW) (LPCWSTR, LPSECURITY_ATTRIBUTES); + +typedef DWORD (WINAPI * t_GetFileInformationByHandleEx)(HANDLE, FILE_INFO_BY_HANDLE_CLASS, LPVOID, DWORD); + +typedef BOOL (WINAPI * t_SetProcessShutdownParameters)(DWORD, DWORD); +typedef BOOL (WINAPI * t_BackupRead)(HANDLE,LPBYTE,DWORD,LPDWORD,BOOL,BOOL,LPVOID*); +typedef BOOL (WINAPI * t_BackupWrite)(HANDLE,LPBYTE,DWORD,LPDWORD,BOOL,BOOL,LPVOID*); + +typedef int (WINAPI * t_WideCharToMultiByte) (UINT CodePage, DWORD , LPCWSTR, int, + LPSTR, int, LPCSTR, LPBOOL); + +typedef int (WINAPI * t_MultiByteToWideChar) (UINT, DWORD, LPCSTR, int, LPWSTR, int); +typedef HANDLE (WINAPI * t_FindFirstFileA) (LPCSTR, LPWIN32_FIND_DATAA); +typedef HANDLE (WINAPI * t_FindFirstFileW) (LPCWSTR, LPWIN32_FIND_DATAW); + +typedef BOOL (WINAPI * t_FindNextFileA) (HANDLE, LPWIN32_FIND_DATAA); +typedef BOOL (WINAPI * t_FindNextFileW) (HANDLE, LPWIN32_FIND_DATAW); + +typedef BOOL (WINAPI * t_SetCurrentDirectoryA) (LPCSTR); +typedef BOOL (WINAPI * t_SetCurrentDirectoryW) (LPCWSTR); + +typedef DWORD (WINAPI * t_GetCurrentDirectoryA) (DWORD, LPSTR); +typedef DWORD (WINAPI * t_GetCurrentDirectoryW) (DWORD, LPWSTR); + +typedef BOOL (WINAPI * t_GetVolumePathNameW) (LPCWSTR, LPWSTR, DWORD); +typedef BOOL (WINAPI * t_GetVolumeNameForVolumeMountPointW) (LPCWSTR, LPWSTR, DWORD); + +typedef BOOL (WINAPI * t_CreateSymbolicLinkA) (LPCSTR, LPCSTR, DWORD); +typedef BOOL (WINAPI * t_CreateSymbolicLinkW) (LPCWSTR, LPCWSTR, DWORD); +/* See: http://msdn.microsoft.com/en-us/library/windows/desktop/cc805844%28v=vs.85%29.aspx */ +typedef int (WSAAPI * t_InetPton)(int, const char *, void *); + + +typedef BOOL (WINAPI * t_AttachConsole) (DWORD); + +typedef BOOL (WINAPI *t_CreateProcessA) ( + LPCSTR, + LPSTR, + LPSECURITY_ATTRIBUTES, + LPSECURITY_ATTRIBUTES, + BOOL, + DWORD, + PVOID, + LPCSTR, + LPSTARTUPINFOA, + LPPROCESS_INFORMATION); +typedef BOOL (WINAPI *t_CreateProcessW) ( + LPCWSTR, + LPWSTR, + LPSECURITY_ATTRIBUTES, + LPSECURITY_ATTRIBUTES, + BOOL, + DWORD, + PVOID, + LPCWSTR, + LPSTARTUPINFOW, + LPPROCESS_INFORMATION); + +typedef BOOL (WINAPI *t_GetProcessMemoryInfo) ( + HANDLE Process, + PPROCESS_MEMORY_COUNTERS ppsmemCounters, + DWORD cb); + +typedef BOOL (WINAPI *t_EmptyWorkingSet) (HANDLE hProcess); + +typedef struct _VSS_SNAPSHOT_PROP VSS_SNAPSHOT_PROP; +class IVssBackupComponents; +class IVssExamineWriterMetadata; + +typedef HRESULT (STDAPICALLTYPE* t_CreateVssBackupComponents)(IVssBackupComponents **); +typedef void (APIENTRY* t_VssFreeSnapshotProperties)(VSS_SNAPSHOT_PROP*); +typedef HRESULT (WINAPI* t_CreateVssExamineWriterMetadata)(BSTR, IVssExamineWriterMetadata **); + +extern t_CreateVssBackupComponents DLL_IMP_EXP p_CreateVssBackupComponents; +extern t_VssFreeSnapshotProperties DLL_IMP_EXP p_VssFreeSnapshotProperties; +extern t_CreateVssExamineWriterMetadata DLL_IMP_EXP p_CreateVssExamineWriterMetadata; + +extern t_EmptyWorkingSet DLL_IMP_EXP p_EmptyWorkingSet; +extern t_GetProcessMemoryInfo DLL_IMP_EXP p_GetProcessMemoryInfo; +extern t_CreateProcessA DLL_IMP_EXP p_CreateProcessA; +extern t_CreateProcessW DLL_IMP_EXP p_CreateProcessW; + +extern t_GetFileAttributesA DLL_IMP_EXP p_GetFileAttributesA; +extern t_GetFileAttributesW DLL_IMP_EXP p_GetFileAttributesW; + +extern t_GetFileAttributesExA DLL_IMP_EXP p_GetFileAttributesExA; +extern t_GetFileAttributesExW DLL_IMP_EXP p_GetFileAttributesExW; + +extern t_SetFileAttributesA DLL_IMP_EXP p_SetFileAttributesA; +extern t_SetFileAttributesW DLL_IMP_EXP p_SetFileAttributesW; + +extern t_CreateFileA DLL_IMP_EXP p_CreateFileA; +extern t_CreateFileW DLL_IMP_EXP p_CreateFileW; + +extern t_OpenEncryptedFileRawA DLL_IMP_EXP p_OpenEncryptedFileRawA; +extern t_OpenEncryptedFileRawW DLL_IMP_EXP p_OpenEncryptedFileRawW; +extern t_ReadEncryptedFileRaw DLL_IMP_EXP p_ReadEncryptedFileRaw; +extern t_WriteEncryptedFileRaw DLL_IMP_EXP p_WriteEncryptedFileRaw; +extern t_CloseEncryptedFileRaw DLL_IMP_EXP p_CloseEncryptedFileRaw; + +extern t_CreateDirectoryA DLL_IMP_EXP p_CreateDirectoryA; +extern t_CreateDirectoryW DLL_IMP_EXP p_CreateDirectoryW; + +extern t_GetFileInformationByHandleEx DLL_IMP_EXP p_GetFileInformationByHandleEx; + +extern t_SetProcessShutdownParameters DLL_IMP_EXP p_SetProcessShutdownParameters; +extern t_BackupRead DLL_IMP_EXP p_BackupRead; +extern t_BackupWrite DLL_IMP_EXP p_BackupWrite; + +extern t_WideCharToMultiByte DLL_IMP_EXP p_WideCharToMultiByte; +extern t_MultiByteToWideChar DLL_IMP_EXP p_MultiByteToWideChar; + +extern t_FindFirstFileA DLL_IMP_EXP p_FindFirstFileA; +extern t_FindFirstFileW DLL_IMP_EXP p_FindFirstFileW; + +extern t_FindNextFileA DLL_IMP_EXP p_FindNextFileA; +extern t_FindNextFileW DLL_IMP_EXP p_FindNextFileW; + +extern t_SetCurrentDirectoryA DLL_IMP_EXP p_SetCurrentDirectoryA; +extern t_SetCurrentDirectoryW DLL_IMP_EXP p_SetCurrentDirectoryW; + +extern t_GetCurrentDirectoryA DLL_IMP_EXP p_GetCurrentDirectoryA; +extern t_GetCurrentDirectoryW DLL_IMP_EXP p_GetCurrentDirectoryW; + +extern t_GetVolumePathNameW DLL_IMP_EXP p_GetVolumePathNameW; +extern t_GetVolumeNameForVolumeMountPointW DLL_IMP_EXP p_GetVolumeNameForVolumeMountPointW; + +extern t_AttachConsole DLL_IMP_EXP p_AttachConsole; + +extern t_CreateSymbolicLinkW DLL_IMP_EXP p_CreateSymbolicLinkW; +extern t_CreateSymbolicLinkA DLL_IMP_EXP p_CreateSymbolicLinkA; +extern t_InetPton DLL_IMP_EXP p_InetPton; + +extern HMODULE DLL_IMP_EXP vsslib; + +void InitWinAPIWrapper(); + +/* In SHFolder.dll on older systems, and now Shell32.dll */ +typedef BOOL (WINAPI * t_SHGetFolderPath)(HWND, int, HANDLE, DWORD, LPTSTR); +extern t_SHGetFolderPath DLL_IMP_EXP p_SHGetFolderPath; + +#endif /* __WINAPI_H */ diff --git a/src/win32/compat/winhdrs.h b/src/win32/compat/winhdrs.h new file mode 100644 index 00000000..46b7cbaf --- /dev/null +++ b/src/win32/compat/winhdrs.h @@ -0,0 +1,14 @@ + +#ifndef __WINHDRS_H_ +#define __WINHDRS_H_ + +#include "mingwconfig.h" +#include +#include +#include +#ifdef HAVE_IPV6 +#include +#endif +#include + +#endif diff --git a/src/win32/compat/winhost.h b/src/win32/compat/winhost.h new file mode 100644 index 00000000..5d7b56ab --- /dev/null +++ b/src/win32/compat/winhost.h @@ -0,0 +1,51 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Define Host machine + */ + + +#include "host.h" +#undef HOST_OS +#undef DISTNAME +#undef DISTVER + +#ifdef HAVE_MINGW +extern char win_os[]; +#define HOST_OS "Linux" +#define DISTNAME "Cross-compile" +#ifndef BACULA +#define BACULA "Bacula" +#endif +#ifdef _WIN64 +# define DISTVER "Win64" +#else +# define DISTVER "Win32" +#endif + +#else + +extern DLL_IMP_EXP char WIN_VERSION_LONG[]; +extern DLL_IMP_EXP char WIN_VERSION[]; + +#define HOST_OS WIN_VERSION_LONG +#define DISTNAME "MVS" +#define DISTVER WIN_VERSION + +#endif diff --git a/src/win32/compat/winsock.h b/src/win32/compat/winsock.h new file mode 100644 index 00000000..0274d585 --- /dev/null +++ b/src/win32/compat/winsock.h @@ -0,0 +1,6 @@ +/* +#include +#ifdef HAVE_IPV6 +#include +#endif +*/ diff --git a/src/win32/console/Makefile b/src/win32/console/Makefile new file mode 100644 index 00000000..c44f3431 --- /dev/null +++ b/src/win32/console/Makefile @@ -0,0 +1,59 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Author: Robert Nelson +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Written by Robert Nelson, June 2006 +# + +include ../Makefile.inc + +INCLUDES = \ + $(INCLUDE_BACULA) \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_OPENSSL) + +DEFINES = \ + -DUSING_DLL \ + $(HAVES) + +vpath %.c $(MAINDIR)/src/console +vpath %.cpp $(MAINDIR)/src/console + +########################################################################## + +CONSOLE_OBJS = \ + $(OBJDIR)/authenticate.o \ + $(OBJDIR)/console.o \ + $(OBJDIR)/console_conf.o + +ALL_OBJS = $(CONSOLE_OBJS) + +CONSOLE_LIBS = $(LIBS_NETWORK) + +###################################################################### + +# Targets + +.PHONY: all clean distclean + +all: $(BINDIR)/bconsole.exe + +distclean: clean + +clean: + @echo "Cleaning `pwd`" + $(call clean_obj,$(ALL_OBJS)) + $(call clean_exe,$(BINDIR)/bconsole.exe) + $(ECHO_CMD)rm -rf $(OBJDIRS) + +# +# Rules +# + +$(BINDIR)/bconsole.exe: $(CONSOLE_OBJS) $(LIBS_BACULA) + $(call link_conapp,$(CONSOLE_LIBS)) + +include ../Makefile.rules diff --git a/src/win32/console/console.vcproj b/src/win32/console/console.vcproj new file mode 100644 index 00000000..57bbea9c --- /dev/null +++ b/src/win32/console/console.vcproj @@ -0,0 +1,293 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/cygwin.NET.bashrc b/src/win32/cygwin.NET.bashrc new file mode 100644 index 00000000..07f4c639 --- /dev/null +++ b/src/win32/cygwin.NET.bashrc @@ -0,0 +1,63 @@ +export ALLUSERSPROFILE='C:\Documents and Settings\All Users' +export APPDATA='C:\Documents and Settings\Rick\Application Data' +export BASEMAKE='C:\Program Files\Microsoft SDK\Include\BKOffice.Mak' +export BASH=/usr/bin/bash +export BKOFFICE='C:\Program Files\Microsoft SDK\.' +export CLIENTNAME=Console +export COLORFGBG='0;default;15' +export COLORTERM=rxvt-xpm +export COLUMNS=70 +export COMMONPROGRAMFILES='C:\Program Files\Common Files' +export COMPUTERNAME=RICK_LPATOP +export COMSPEC='C:\WINDOWS\system32\cmd.exe' +export CYGWIN=notty +export DIRSTACK=() +export DISPLAY=:0 +export GROUPS=() +export HISTFILE=/cygdrive/c/home/Rick/.bash_history +export HISTFILESIZE=500 +export HISTSIZE=500 +export HOME=/cygdrive/c/home/Rick +export HOMEDRIVE=C: +export HOMEPATH='\Documents and Settings\Rick' +export HOSTNAME=RICK-LPATOP +export HOSTTYPE=i686 +export IFS=$' \t\n' +export INCLUDE='C:\Program Files\Microsoft SDK\Include\.;C:\Program Files\Microsoft Visual Studio .NET 2003\VC7\atlmfc\include;C:\Program Files\Microsoft Visual Studio .NET 2003\VC7\include' +export INETSDK='C:\Program Files\Microsoft SDK\.' +export LIB='C:\Program Files\Microsoft SDK\Lib\.;C:\Program Files\Microsoft Visual Studio .NET 2003\VC7\atlmfc\lib;C:\Program Files\Microsoft Visual Studio .NET 2003\VC7\lib' +export LINES=65 +export LOGONSERVER='\\RICK-LPATOP' +export MACHTYPE=i686-pc-cygwin +export MAILCHECK=60 +export MAKE_MODE=UNIX +export MSSDK='C:\Program Files\Microsoft SDK\.' +export MSTOOLS='C:\Program Files\Microsoft SDK\.' +export NUMBER_OF_PROCESSORS=1 +export OLDPWD=/home/Rick/bacula +export OPTERR=1 +export OPTIND=1 +export OS=Windows_NT +export OSTYPE=cygwin +export PATH='/cygdrive/c/Program Files/Microsoft Visual Studio .NET 2003/VC7/Bin:/cygdrive/c/Program Files/Microsoft Visual Studio .NET 2003/VC7:/cygdrive/c/Program Files/Microsoft Visual Studio .NET 2003/Common/Tools/Bin:/cygdrive/c/Program Files/Microsoft Visual Studio .NET 2003/Common/Tools:/cygdrive/c/Program Files/Microsoft Visual Studio .NET 2003/Common7/IDE:c:/cygwin/bin:/usr/bin:/home/sbarn/bin:/sbin:/usr/sbin:' +export PATHEXT='.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH' +export PROCESSOR_ARCHITECTURE=x86 +export PROCESSOR_IDENTIFIER='x86 Family 6 Model 8 Stepping 10, GenuineIntel' +export PROGRAMFILES='C:\Program Files' +export PROMPT='$P$G' +export PWD=/home/Rick +export SESSIONNAME=Console +export SHELL=/usr/bin/bash +export SHLVL=1 +export SYSTEMDRIVE=C: +export SYSTEMROOT='C:\WINDOWS' +export TEMP=/cygdrive/c/DOCUME~1/Rick/LOCALS~1/Temp +export TERM=xterm +export TMP=/cygdrive/c/DOCUME~1/Rick/LOCALS~1/Temp +export USERDOMAIN=RICK-LPATOP +export USERNAME=rick +export USERPROFILE='C:\Documents and Settings\Rick' +export WINDIR='C:\WINDOWS' +export WINDOWID=168050736 +export WXWIN='c:\cygwin\home\Rick\bacula\depkgs-win32\wx' + diff --git a/src/win32/dird/Makefile b/src/win32/dird/Makefile new file mode 100644 index 00000000..5febebc0 --- /dev/null +++ b/src/win32/dird/Makefile @@ -0,0 +1,117 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# +# Author: Robert Nelson +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Written by Robert Nelson, June 2006 +# + +include ../Makefile.inc + +INCLUDES = \ + -I. \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_BACULA) \ + $(INCLUDE_ZLIB) \ + $(INCLUDE_OPENSSL) \ + $(INCLUDE_ICONS) + +DEFINES = \ + -DUSING_DLL \ + -DUSING_CATS \ + -DWINVER=0x500 \ + $(HAVES) + +vpath %.c $(MAINDIR)/src/dird $(BUILDDIR)/libwin32 +vpath %.cpp $(MAINDIR)/src/dird $(BUILDDIR)/libwin32 + +########################################################################## + +DIRD_OBJS = \ + $(OBJDIR)/admin.o \ + $(OBJDIR)/authenticate.o \ + $(OBJDIR)/autoprune.o \ + $(OBJDIR)/backup.o \ + $(OBJDIR)/bsr.o \ + $(OBJDIR)/catreq.o \ + $(OBJDIR)/dir_plugins.o \ + $(OBJDIR)/dird.o \ + $(OBJDIR)/dird_conf.o \ + $(OBJDIR)/expand.o \ + $(OBJDIR)/fd_cmds.o \ + $(OBJDIR)/getmsg.o \ + $(OBJDIR)/inc_conf.o \ + $(OBJDIR)/job.o \ + $(OBJDIR)/jobq.o \ + $(OBJDIR)/mountreq.o \ + $(OBJDIR)/msgchan.o \ + $(OBJDIR)/newvol.o \ + $(OBJDIR)/next_vol.o \ + $(OBJDIR)/recycle.o \ + $(OBJDIR)/restore.o \ + $(OBJDIR)/run_conf.o \ + $(OBJDIR)/scheduler.o \ + $(OBJDIR)/ua_acl.o \ + $(OBJDIR)/ua_cmds.o \ + $(OBJDIR)/ua_dotcmds.o \ + $(OBJDIR)/ua_input.o \ + $(OBJDIR)/ua_label.o \ + $(OBJDIR)/ua_output.o \ + $(OBJDIR)/ua_prune.o \ + $(OBJDIR)/ua_purge.o \ + $(OBJDIR)/ua_query.o \ + $(OBJDIR)/ua_restore.o \ + $(OBJDIR)/ua_run.o \ + $(OBJDIR)/ua_select.o \ + $(OBJDIR)/ua_server.o \ + $(OBJDIR)/ua_status.o \ + $(OBJDIR)/ua_tree.o \ + $(OBJDIR)/ua_update.o \ + $(OBJDIR)/ua_dde.o \ + $(OBJDIR)/vbackup.o \ + $(OBJDIR)/verify.o \ + $(OBJDIR)/service.o \ + $(OBJDIR)/mac_sql.o \ + $(OBJDIR)/mac.o \ + $(OBJDIR)/main.o \ + $(OBJDIR)/bacula.res + +ALL_OBJS = $(DIRD_OBJS) + + +DIRD_LIBS = \ + $(LIBS_PTHREADS) \ + $(LIBS_NETWORK) \ + -lole32 \ + -loleaut32 \ + -luuid \ + -lcomctl32 + +###################################################################### + +# Targets + +.PHONY: all clean + +all: $(BINDIR)/bacula-dir.exe + +clean: + @echo "Cleaning `pwd`" + $(call clean_obj,$(ALL_OBJS)) + $(call clean_exe,$(BINDIR)/bacula-dir.exe) + $(ECHO_CMD)rm -rf $(OBJDIRS) + +# +# Rules +# + +$(BINDIR)/bacula-dir.exe: $(DIRD_OBJS) $(LIBS_CATS) $(LIBS_BACULA) + $(call link_winapp,$(DIRD_LIBS)) + +$(OBJDIR)/winres.res: winres.rc + $(WINDRES) $(INCLUDE_ICONS) -O coff $< -o $@ + +include ../Makefile.rules diff --git a/src/win32/dird/bacula.rc b/src/win32/dird/bacula.rc new file mode 100644 index 00000000..92b889cb --- /dev/null +++ b/src/win32/dird/bacula.rc @@ -0,0 +1 @@ +#include "../libwin32/bacula.rc" diff --git a/src/win32/dird/dird.vcproj b/src/win32/dird/dird.vcproj new file mode 100644 index 00000000..4a11e264 --- /dev/null +++ b/src/win32/dird/dird.vcproj @@ -0,0 +1,1118 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/dird/main.cpp b/src/win32/dird/main.cpp new file mode 100644 index 00000000..a39c61bd --- /dev/null +++ b/src/win32/dird/main.cpp @@ -0,0 +1,36 @@ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2007-2007 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation, which is + listed in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of Kern Sibbald. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ +/* + * Kern Sibbald, August 2007 + * + * Version $Id$ + * + */ + +#include "who.h" +#include "../libwin32/main.cpp" diff --git a/src/win32/dird/service.cpp b/src/win32/dird/service.cpp new file mode 100644 index 00000000..e108c654 --- /dev/null +++ b/src/win32/dird/service.cpp @@ -0,0 +1,36 @@ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2007-2007 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation, which is + listed in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of Kern Sibbald. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ +/* + * Kern Sibbald, August 2007 + * + * Version $Id$ + * + */ + +#include "who.h" +#include "../libwin32/service.cpp" diff --git a/src/win32/dird/who.h b/src/win32/dird/who.h new file mode 100644 index 00000000..2cf806ff --- /dev/null +++ b/src/win32/dird/who.h @@ -0,0 +1,33 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + * This file is pulled in by certain generic routines in libwin32 + * to define the names of the daemon that is being built. + */ + +#define APP_NAME "Bacula-dir" +#define LC_APP_NAME "bacula-dir" +#define APP_DESC "Bacula Director Service" + +#define terminate_app(x) terminate_dird(x) +extern void terminate_dird(int sig); + +#define VSSInit() diff --git a/src/win32/filed/Makefile b/src/win32/filed/Makefile new file mode 100644 index 00000000..c83357a5 --- /dev/null +++ b/src/win32/filed/Makefile @@ -0,0 +1,123 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Author: Robert Nelson +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Written by Robert Nelson, June 2006 +# + +# +# Change the following depending on where this directory is located +# in the Bacula tree. It should point to the src/win32 directory. +BUILDDIR = .. + + +include $(BUILDDIR)/Makefile.inc + +INCLUDES = \ + -I. \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_BACULA) \ + $(INCLUDE_ZLIB) \ + $(INCLUDE_VSS) \ + $(INCLUDE_OPENSSL) \ + $(INCLUDE_ICONS) + +DEFINES = \ + -DUSING_DLL \ + -DWIN32_VSS \ + -DWINVER=0x500 \ + $(HAVES) + +# -DHAVE_TRAY_MONITOR \ + + +vpath %.c $(MAINDIR)/src/filed $(BUILDDIR)/libwin32 +vpath %.cpp $(MAINDIR)/src/filed $(BUILDDIR)/libwin32 + +########################################################################## + +FILED_OBJS = \ + $(OBJDIR)/accurate.o \ + $(OBJDIR)/authenticate.o \ + $(OBJDIR)/backup.o \ + $(OBJDIR)/crypto.o \ + $(OBJDIR)/win_efs.o \ + $(OBJDIR)/estimate.o \ + $(OBJDIR)/fd_plugins.o \ + $(OBJDIR)/fd_snapshot.o \ + $(OBJDIR)/filed.o \ + $(OBJDIR)/filed_conf.o \ + $(OBJDIR)/heartbeat.o \ + $(OBJDIR)/hello.o \ + $(OBJDIR)/job.o \ + $(OBJDIR)/restore.o \ + $(OBJDIR)/status.o \ + $(OBJDIR)/verify.o \ + $(OBJDIR)/verify_vol.o \ + $(OBJDIR)/vss.o \ + $(OBJDIR)/vss_XP.o \ + $(OBJDIR)/vss_W2K3.o \ + $(OBJDIR)/vss_Vista.o \ + $(OBJDIR)/service.o \ + $(OBJDIR)/main.o \ + $(OBJDIR)/bacl.o \ + $(OBJDIR)/bacl_linux.o \ + $(OBJDIR)/bacl_solaris.o \ + $(OBJDIR)/bxattr_freebsd.o \ + $(OBJDIR)/bxattr_osx.o \ + $(OBJDIR)/bacl_freebsd.o \ + $(OBJDIR)/bacl_osx.o \ + $(OBJDIR)/bxattr.o \ + $(OBJDIR)/bxattr_linux.o \ + $(OBJDIR)/bxattr_solaris.o \ + $(OBJDIR)/bacula.res + +# $(OBJDIR)/trayMonitor.o \ +# $(OBJDIR)/aboutDialog.o \ +# $(OBJDIR)/statusDialog.o \ + + +ALL_OBJS = $(FILED_OBJS) + +FILED_LIBS = \ + $(LIBS_PTHREADS) \ + $(LIBS_ZLIB) \ + $(LIBS_LZO) \ + $(LIBS_NETWORK) \ + -lole32 \ + -loleaut32 \ + -luuid \ + -lcomctl32 + +###################################################################### + +# Targets + +.PHONY: all clean distclean + +all: $(BINDIR)/bacula-fd.exe + +distclean: clean + +clean: + @echo "Cleaning `pwd`" + $(call clean_obj,$(ALL_OBJS)) + $(call clean_exe,$(BINDIR)/bacula-fd.exe) + $(ECHO_CMD)rm -rf $(OBJDIRS) + +# +# Rules +# + +$(BINDIR)/bacula-fd.exe: $(FILED_OBJS) $(LIBS_BACULA) + $(call link_winapp,$(FILED_LIBS)) + +$(OBJDIR)/winres.res: $(BUILDDIR)/libwin32/winres.rc + @echo "Compiling $@" + $(call checkdir,$@) + $(ECHO_CMD)$(WINDRES) $(INCLUDE_ICONS) -DMINGW64 -O coff $< -o $@ + +include $(BUILDDIR)/Makefile.rules diff --git a/src/win32/filed/bacula-fd.manifest b/src/win32/filed/bacula-fd.manifest new file mode 100644 index 00000000..bf1c6746 --- /dev/null +++ b/src/win32/filed/bacula-fd.manifest @@ -0,0 +1,21 @@ + + + + Bacula File daemon for Win32 + + + + + + + diff --git a/src/win32/filed/bacula.rc b/src/win32/filed/bacula.rc new file mode 100644 index 00000000..92b889cb --- /dev/null +++ b/src/win32/filed/bacula.rc @@ -0,0 +1 @@ +#include "../libwin32/bacula.rc" diff --git a/src/win32/filed/baculafd.vcproj b/src/win32/filed/baculafd.vcproj new file mode 100644 index 00000000..3a0aecad --- /dev/null +++ b/src/win32/filed/baculafd.vcproj @@ -0,0 +1,610 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/filed/main.cpp b/src/win32/filed/main.cpp new file mode 100644 index 00000000..ae1dd2cd --- /dev/null +++ b/src/win32/filed/main.cpp @@ -0,0 +1,24 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#include "who.h" +#include "../libwin32/main.cpp" diff --git a/src/win32/filed/plugins/Makefile b/src/win32/filed/plugins/Makefile new file mode 100644 index 00000000..d871e298 --- /dev/null +++ b/src/win32/filed/plugins/Makefile @@ -0,0 +1,124 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Written by James Harper, October 2008 +# Patterned after a Makefile by Robert Nelson, June 2006 +# + +# +# Change the following depending on where this directory is located +# in the Bacula tree. It should point to the src/win32 directory. +BUILDDIR = ../.. + +include $(BUILDDIR)/Makefile.inc + +INCLUDES = \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_BACULA) \ + $(INCLUDE_ZLIB) \ + $(INCLUDE_OPENSSL) \ + $(INCLUDE_VSS) \ + -I$(MAINDIR)/src \ + -I$(MAINDIR)/src/filed \ + -I$(MAINDIR)/src/win32/filed + +DEFINES = \ + $(HAVES) + +#vpath %.c $(BUILDDIR)/compat $(MAINDIR)/src/findlib $(MAINDIR)/src/lib +#vpath %.cpp $(BUILDDIR)/compat $(MAINDIR)/src/findlib $(MAINDIR)/src/lib + +# Look files in src/lib and plugins/fd mainly for SAP and Oracle +vpath %.c $(MAINDIR)/src/lib $(MAINDIR)/src/plugins/fd +vpath %.h $(MAINDIR)/src/lib $(MAINDIR)/src/plugins/fd + +###################################################################### + +# Files files in src/win32/filed/plugins + +EXCHANGE_OBJS = \ + $(OBJDIR)/exchange-fd.o \ + $(OBJDIR)/exch_api.o \ + $(OBJDIR)/exch_node.o \ + $(OBJDIR)/exch_root_node.o \ + $(OBJDIR)/exch_service_node.o \ + $(OBJDIR)/exch_storage_group_node.o \ + $(OBJDIR)/exch_store_node.o \ + $(OBJDIR)/exch_dbi_node.o \ + $(OBJDIR)/exch_file_node.o + +BPIPE_OBJS = \ + $(OBJDIR)/bpipe-fd.o + +LIBS_DLL = \ + $(LIBS_SSL) \ + $(LIBS_CRYPTO) \ + $(LIBS_PTHREADS) \ + $(LIBS_ZLIB) \ + $(LIBS_NETWORK) \ + -lole32 \ + -loleaut32 \ + -luuid + +###################################################################### + +# Targets + +.PHONY: all clean distclean + +all: $(BINDIR)/exchange-fd.dll $(BINDIR)/bpipe-fd.dll $(BINDIR)/alldrives-fd.dll + +clean: + @echo "Cleaning `pwd`" + $(call clean_obj,$(EXCHANGE_OBJS)) + $(call clean_exe,$(BINDIR)/exchange-fd.dll) + $(ECHO_CMD)rm -f $(OBJDIR)/exchange-fd.a $(LIBDIR)/libexchange-fd.a + $(ECHO_CMD)rm -rf $(OBJDIRS) + +distclean: clean + +# +# Rules for generating from ../lib +# + +#$(LIBDIR)/libexchange-fd.a: DLL_DEFINE=USING_DLL + +#$(LIBDIR)/libexchange-fd.a: $(BINDIR)/exchange-fd.dll $(STATIC_OBJS) +# @echo "Updating archive $@" +# $(call checkdir,$@) +# $(ECHO_CMD)cp $(OBJDIR)/exchange-fd.a $@ +# $(ECHO_CMD)$(AR) rsv $@ $(filter %.o,$^) + +$(BINDIR)/exchange-fd.dll: DLL_DEFINE=BUILDING_DLL + +$(BINDIR)/exchange-fd.dll: $(EXCHANGE_OBJS) exchange-fd.def + @echo "Linking $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(LDFLAGS) -mdll -mwindows -Wl,--out-implib,$(OBJDIR)/exchange-fd.a $^ $(LIBS_DLL) -o $@ + +$(BINDIR)/bpipe-fd.dll: DLL_DEFINE=BUILDING_DLL + +$(BINDIR)/bpipe-fd.dll: $(BPIPE_OBJS) bpipe-fd.def $(LIBS_BACULA) + @echo "Linking $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(CFLAGS) $(LDFLAGS) -mdll -mwindows -Wl,--out-implib,$(OBJDIR)/bpipe-fd.a $^ $(LIBS_DLL) -o $@ + +$(BINDIR)/alldrives-fd.dll: DLL_DEFINE=BUILDING_DLL + +$(BINDIR)/alldrives-fd.dll: $(OBJDIR)/alldrives-fd.o $(LIBS_BACULA) + @echo "Linking $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(CFLAGS) $(LDFLAGS) -mdll -mwindows -Wl,--out-implib,$(OBJDIR)/alldrives-fd.a $^ $(LIBS_DLL) -o $@ + +include $(BUILDDIR)/Makefile.rules + +$(OBJDIR)/%.o: %.c + @echo "Compiling $<" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) -D$(DLL_DEFINE) $(CFLAGS) -c $< -o $@ + +$(OBJDIR)/%.o: %.cpp + @echo "Compiling $<" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) -D$(DLL_DEFINE) $(CFLAGS) -c $< -o $@ diff --git a/src/win32/filed/plugins/alldrives-fd.c b/src/win32/filed/plugins/alldrives-fd.c new file mode 100644 index 00000000..f93e83b3 --- /dev/null +++ b/src/win32/filed/plugins/alldrives-fd.c @@ -0,0 +1,437 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ + +#ifndef BUILDING_DLL +# define BUILDING_DLL +#endif +#define BUILD_PLUGIN + +#include "bacula.h" +#include "fd_plugins.h" +#include "lib/mem_pool.h" + +/* from lib/scan.c */ +extern int parse_args(POOLMEM *cmd, POOLMEM **args, int *argc, + char **argk, char **argv, int max_args); +#define Dmsg(context, level, message, ...) bfuncs->DebugMessage(context, __FILE__, __LINE__, level, message, ##__VA_ARGS__) + +#ifdef __cplusplus +extern "C" { +#endif + +#define PLUGIN_LICENSE "Bacula" +#define PLUGIN_AUTHOR "Eric Bollengier" +#define PLUGIN_DATE "Oct 2013" +#define PLUGIN_VERSION "1.2" +#define PLUGIN_DESCRIPTION "Select all local drives" + +/* Forward referenced functions */ +static bRC newPlugin(bpContext *ctx); +static bRC freePlugin(bpContext *ctx); +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); +static bRC endBackupFile(bpContext *ctx); +static bRC pluginIO(bpContext *ctx, struct io_pkt *io); +static bRC startRestoreFile(bpContext *ctx, const char *cmd); +static bRC endRestoreFile(bpContext *ctx); +static bRC createFile(bpContext *ctx, struct restore_pkt *rp); +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); + + +/* Pointers to Bacula functions */ +static bFuncs *bfuncs = NULL; +static bInfo *binfo = NULL; + +static pInfo pluginInfo = { + sizeof(pluginInfo), + FD_PLUGIN_INTERFACE_VERSION, + FD_PLUGIN_MAGIC, + PLUGIN_LICENSE, + PLUGIN_AUTHOR, + PLUGIN_DATE, + PLUGIN_VERSION, + PLUGIN_DESCRIPTION +}; + +static pFuncs pluginFuncs = { + sizeof(pluginFuncs), + FD_PLUGIN_INTERFACE_VERSION, + + /* Entry points into plugin */ + newPlugin, /* new plugin instance */ + freePlugin, /* free plugin instance */ + getPluginValue, + setPluginValue, + handlePluginEvent, + startBackupFile, + endBackupFile, + startRestoreFile, + endRestoreFile, + pluginIO, + createFile, + setFileAttributes, + NULL, /* No checkFiles */ + NULL /* No ACL/XATTR */ +}; + +/* + * Plugin called here when it is first loaded + */ +bRC DLL_IMP_EXP +loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) +{ + bfuncs = lbfuncs; /* set Bacula funct pointers */ + binfo = lbinfo; + + *pinfo = &pluginInfo; /* return pointer to our info */ + *pfuncs = &pluginFuncs; /* return pointer to our functions */ + + return bRC_OK; +} + +/* + * Plugin called here when it is unloaded, normally when + * Bacula is going to exit. + */ +bRC DLL_IMP_EXP +unloadPlugin() +{ + return bRC_OK; +} + +#define get_self(ctx) ((barg *)ctx->pContext) + +class barg { +public: + POOLMEM *args; + POOLMEM *cmd; + char *argk[MAX_CMD_ARGS]; /* Argument keywords */ + char *argv[MAX_CMD_ARGS]; /* Argument values */ + int argc; + char *exclude; + bool snapshot_only; + + barg() { + args = cmd = NULL; + exclude = NULL; + argc = 0; + snapshot_only = false; + } + + ~barg() { + free_and_null_pool_memory(args); + free_and_null_pool_memory(cmd); + } + + /* + * Given a single keyword, find it in the argument list, but + * it must have a value + * Returns: -1 if not found or no value + * list index (base 0) on success + */ + int find_arg_with_value(const char *keyword) + { + for (int i=0; iexclude with simple string "ABCD" + */ + void parse(char *command) { + char *p; + char *q; + if ((p = strchr(command, ':')) == NULL) { + Dmsg(NULL, 10, "No options\n"); + return; + } + + args = get_pool_memory(PM_FNAME); + cmd = get_pool_memory(PM_FNAME); + + pm_strcpy(cmd, ++p); /* copy string after : */ + parse_args(cmd, &args, &argc, argk, argv, MAX_CMD_ARGS); + + for (int i=0; i < argc ; i++) { + if (strcmp(argk[i], "exclude") == 0) { + /* a,B,C d => ABCD */ + q = p = exclude = argv[i]; + for (; *p ; p++) { + if ((*p >= 'A' && *p <= 'Z') || (*p >= 'a' && *p <= 'z')) { + *q = toupper(*p); + q++; + } + } + *q = 0; + Dmsg(NULL, 50, "%s => %s\n", command, exclude); + + } else if (strcmp(argk[i], "snapshot") == 0) { + Dmsg(NULL, 50, "Doing only snapshot\n"); + snapshot_only = true; + + } else { + Dmsg(NULL, 10, "Unknown keyword %s\n", argk[i]); + } + } + } +}; + +/* + * Called here to make a new instance of the plugin -- i.e. when + * a new Job is started. There can be multiple instances of + * each plugin that are running at the same time. Your + * plugin instance must be thread safe and keep its own + * local data. + */ +static bRC newPlugin(bpContext *ctx) +{ + barg *self = new barg(); + ctx->pContext = (void *)self; /* set our context pointer */ + return bRC_OK; +} + +/* + * Release everything concerning a particular instance of a + * plugin. Normally called when the Job terminates. + */ +static bRC freePlugin(bpContext *ctx) +{ + barg *self = get_self(ctx); + if (self) { + delete self; + } + return bRC_OK; +} + +/* + * Called by core code to get a variable from the plugin. + * Not currently used. + */ +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) +{ +// printf("plugin: getPluginValue var=%d\n", var); + return bRC_OK; +} + +/* + * Called by core code to set a plugin variable. + * Not currently used. + */ +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) +{ +// printf("plugin: setPluginValue var=%d\n", var); + return bRC_OK; +} + +/* TODO: use findlib/drivetype instead */ +static bool drivetype(const char *fname, char *dt, int dtlen) +{ + CHAR rootpath[4]; + UINT type; + + /* Copy Drive Letter, colon, and backslash to rootpath */ + bstrncpy(rootpath, fname, 3); + rootpath[3] = '\0'; + + type = GetDriveType(rootpath); + + switch (type) { + case DRIVE_REMOVABLE: bstrncpy(dt, "removable", dtlen); return true; + case DRIVE_FIXED: bstrncpy(dt, "fixed", dtlen); return true; + case DRIVE_REMOTE: bstrncpy(dt, "remote", dtlen); return true; + case DRIVE_CDROM: bstrncpy(dt, "cdrom", dtlen); return true; + case DRIVE_RAMDISK: bstrncpy(dt, "ramdisk", dtlen); return true; + case DRIVE_UNKNOWN: + case DRIVE_NO_ROOT_DIR: + default: + return false; + } +} + +static void add_drives(bpContext *ctx, char *cmd) +{ + char buf[32]; + char dt[100]; + char drive; + barg *arg = get_self(ctx); + arg->parse(cmd); + + if (arg->snapshot_only) { + return; + } + + for (drive = 'A'; drive <= 'Z'; drive++) { + if (arg->exclude && strchr(arg->exclude, drive)) { + Dmsg(ctx, 10, "%c is in exclude list\n", drive); + continue; + } + snprintf(buf, sizeof(buf), "%c:/", drive); + if (drivetype(buf, dt, sizeof(dt))) { + if (strcmp(dt, "fixed") == 0) { + Dmsg(ctx, 10, "Adding %c to include list\n", drive); + bfuncs->AddInclude(ctx, buf); + snprintf(buf, sizeof(buf), "%c:/pagefile.sys", drive); + bfuncs->AddExclude(ctx, buf); + snprintf(buf, sizeof(buf), "%c:/System Volume Information", drive); + bfuncs->AddExclude(ctx, buf); + } else { + Dmsg(ctx, 10, "Discarding %c from include list\n", drive); + } + } + } +} + +static void add_snapshot(bpContext *ctx, char *ret) +{ + char buf[32]; + char dt[100]; + char drive; + char *p = ret; + barg *arg = get_self(ctx); + + /* Start from blank */ + *p = 0; + + if (!arg->snapshot_only) { + return; + } + + for (drive = 'A'; drive <= 'Z'; drive++) { + if (arg->exclude && strchr(arg->exclude, drive)) { + Dmsg(ctx, 10, "%c is in exclude list\n", drive); + continue; + } + + snprintf(buf, sizeof(buf), "%c:/", drive); + + if (drivetype(buf, dt, sizeof(dt))) { + if (strcmp(dt, "fixed") == 0) { + Dmsg(ctx, 10, "Adding %c to snapshot list\n", drive); + *p++ = drive; + } else { + Dmsg(ctx, 10, "Discarding %c from snapshot list\n", drive); + } + } + } + *p = 0; + Dmsg(ctx, 10, "ret = %s\n", ret); +} + +/* + * Called by Bacula when there are certain events that the + * plugin might want to know. The value depends on the + * event. + */ +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) +{ + barg arguments; + + switch (event->eventType) { + case bEventPluginCommand: + add_drives(ctx, (char *)value); /* command line */ + break; + + case bEventVssPrepareSnapshot: + add_snapshot(ctx, (char *)value); /* snapshot list */ + break; + default: + break; + } + + return bRC_OK; +} + +/* + * Called when starting to backup a file. Here the plugin must + * return the "stat" packet for the directory/file and provide + * certain information so that Bacula knows what the file is. + * The plugin can create "Virtual" files by giving them a + * name that is not normally found on the file system. + */ +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) +{ + return bRC_Stop; +} + +/* + * Done backing up a file. + */ +static bRC endBackupFile(bpContext *ctx) +{ + return bRC_Stop; +} + +/* + * Do actual I/O. Bacula calls this after startBackupFile + * or after startRestoreFile to do the actual file + * input or output. + */ +static bRC pluginIO(bpContext *ctx, struct io_pkt *io) +{ + io->status = 0; + io->io_errno = 0; + return bRC_Error; +} + +static bRC startRestoreFile(bpContext *ctx, const char *cmd) +{ + return bRC_Error; +} + +static bRC endRestoreFile(bpContext *ctx) +{ + return bRC_Error; +} + +/* + * Called here to give the plugin the information needed to + * re-create the file on a restore. It basically gets the + * stat packet that was created during the backup phase. + * This data is what is needed to create the file, but does + * not contain actual file data. + */ +static bRC createFile(bpContext *ctx, struct restore_pkt *rp) +{ + return bRC_Error; +} + +/* + * Called after the file has been restored. This can be used to + * set directory permissions, ... + */ +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) +{ + return bRC_Error; +} + + +#ifdef __cplusplus +} +#endif diff --git a/src/win32/filed/plugins/alldrives-fd.def b/src/win32/filed/plugins/alldrives-fd.def new file mode 100644 index 00000000..05cb96be --- /dev/null +++ b/src/win32/filed/plugins/alldrives-fd.def @@ -0,0 +1,13 @@ +LIBRARY bacula.dll +EXPORTS + +;drivetype.o +_Z9drivetypePKcPci + +;console_command DATA +;b_plugin_list DATA +;plugin_bopen DATA +;plugin_bclose DATA +;plugin_bwrite DATA +;plugin_bread DATA +;plugin_blseek DATA diff --git a/src/win32/filed/plugins/api.c b/src/win32/filed/plugins/api.c new file mode 100644 index 00000000..8cb8c0de --- /dev/null +++ b/src/win32/filed/plugins/api.c @@ -0,0 +1,137 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, October 2008 + */ + +#include "exchange-fd.h" + +HrESEBackupRestoreGetNodes_t HrESEBackupRestoreGetNodes; +HrESEBackupPrepare_t HrESEBackupPrepare; +HrESEBackupGetLogAndPatchFiles_t HrESEBackupGetLogAndPatchFiles; +HrESEBackupTruncateLogs_t HrESEBackupTruncateLogs; +HrESEBackupEnd_t HrESEBackupEnd; +HrESEBackupSetup_t HrESEBackupSetup; +HrESEBackupInstanceEnd_t HrESEBackupInstanceEnd; +HrESEBackupOpenFile_t HrESEBackupOpenFile; +HrESEBackupReadFile_t HrESEBackupReadFile; +HrESEBackupCloseFile_t HrESEBackupCloseFile; + +HrESERestoreOpen_t HrESERestoreOpen; +HrESERestoreReopen_t HrESERestoreReopen; +HrESERestoreComplete_t HrESERestoreComplete; +HrESERestoreClose_t HrESERestoreClose; +HrESERestoreGetEnvironment_t HrESERestoreGetEnvironment; +HrESERestoreSaveEnvironment_t HrESERestoreSaveEnvironment; +HrESERestoreAddDatabase_t HrESERestoreAddDatabase; +HrESERestoreOpenFile_t HrESERestoreOpenFile; + +bRC +loadExchangeApi() +{ + HMODULE h; + LONG status; + HKEY key_handle; + WCHAR *buf; + DWORD buf_len; + DWORD type; + + status = RegOpenKeyW(HKEY_LOCAL_MACHINE, L"SYSTEM\\CurrentControlSet\\Control\\BackupRestore\\DLLPaths", &key_handle); + if (status != ERROR_SUCCESS) + { + _JobMessageNull(M_FATAL, "Cannot get key for Exchange DLL path, result = %08x\n", status); + return bRC_Error; + } + + type = REG_EXPAND_SZ; + status = RegQueryValueExW(key_handle, L"esebcli2", NULL, &type, NULL, &buf_len); + if (status != ERROR_SUCCESS) + { + _JobMessageNull(M_FATAL, "Cannot get key for Exchange DLL path, result = %08x\n", status); + return bRC_Error; + } + buf_len += 2; + buf = new WCHAR[buf_len]; + + type = REG_EXPAND_SZ; + status = RegQueryValueExW(key_handle, L"esebcli2", NULL, &type, (LPBYTE)buf, &buf_len); + if (status != ERROR_SUCCESS) + { + _JobMessageNull(M_FATAL, "Cannot get key for Exchange DLL path, result = %08x\n", status); + delete buf; + return bRC_Error; + } + +printf("Got value %S\n", buf); + + // strictly speaking, a REG_EXPAND_SZ should be run through ExpandEnvironmentStrings + + h = LoadLibraryW(buf); + delete buf; + if (!h) { + _JobMessageNull(M_FATAL, "Cannot load Exchange DLL\n"); + return bRC_Error; + } + HrESEBackupRestoreGetNodes = (HrESEBackupRestoreGetNodes_t)GetProcAddress(h, "HrESEBackupRestoreGetNodes"); + HrESEBackupPrepare = (HrESEBackupPrepare_t)GetProcAddress(h, "HrESEBackupPrepare"); + HrESEBackupEnd = (HrESEBackupEnd_t)GetProcAddress(h, "HrESEBackupEnd"); + HrESEBackupSetup = (HrESEBackupSetup_t)GetProcAddress(h, "HrESEBackupSetup"); + HrESEBackupGetLogAndPatchFiles = (HrESEBackupGetLogAndPatchFiles_t)GetProcAddress(h, "HrESEBackupGetLogAndPatchFiles"); + HrESEBackupTruncateLogs = (HrESEBackupTruncateLogs_t)GetProcAddress(h, "HrESEBackupTruncateLogs"); + HrESEBackupInstanceEnd = (HrESEBackupInstanceEnd_t)GetProcAddress(h, "HrESEBackupInstanceEnd"); + HrESEBackupOpenFile = (HrESEBackupOpenFile_t)GetProcAddress(h, "HrESEBackupOpenFile"); + HrESEBackupReadFile = (HrESEBackupReadFile_t)GetProcAddress(h, "HrESEBackupReadFile"); + HrESEBackupCloseFile = (HrESEBackupCloseFile_t)GetProcAddress(h, "HrESEBackupCloseFile"); + HrESERestoreOpen = (HrESERestoreOpen_t)GetProcAddress(h, "HrESERestoreOpen"); + HrESERestoreReopen = (HrESERestoreReopen_t)GetProcAddress(h, "HrESERestoreReopen"); + HrESERestoreComplete = (HrESERestoreComplete_t)GetProcAddress(h, "HrESERestoreComplete"); + HrESERestoreClose = (HrESERestoreClose_t)GetProcAddress(h, "HrESERestoreClose"); + HrESERestoreSaveEnvironment = (HrESERestoreSaveEnvironment_t)GetProcAddress(h, "HrESERestoreSaveEnvironment"); + HrESERestoreGetEnvironment = (HrESERestoreGetEnvironment_t)GetProcAddress(h, "HrESERestoreGetEnvironment"); + HrESERestoreAddDatabase = (HrESERestoreAddDatabase_t)GetProcAddress(h, "HrESERestoreAddDatabase"); + HrESERestoreOpenFile = (HrESERestoreOpenFile_t)GetProcAddress(h, "HrESERestoreOpenFile"); + return bRC_OK; +} + +const char * +ESEErrorMessage(HRESULT result) +{ + switch (result) { + case 0: + return "No error."; + case hrLogfileHasBadSignature: + return "Log file has bad signature. Check that no stale files are left in the Exchange data/log directories."; + case hrCBDatabaseInUse: + return "Database in use. Make sure database is dismounted."; + case hrRestoreAtFileLevel: + return "File must be restored using Windows file I/O calls."; + case hrMissingFullBackup: + return "Exchange reports that no previous full backup has been done."; + case hrBackupInProgress: + return "Exchange backup already in progress."; + case hrLogfileNotContiguous: + return "Existing log file is not contiguous. Check that no stale files are left in the Exchange data/log directories."; + case hrErrorFromESECall: + return "Error returned from ESE function call. Check the Windows Event Logs for more information."; + case hrCBDatabaseNotFound: + return "Database not found. Check that the Database you are trying to restore actually exists in the Storage Group you are restoring to."; + default: + return "Unknown error."; + } +} diff --git a/src/win32/filed/plugins/api.h b/src/win32/filed/plugins/api.h new file mode 100644 index 00000000..08a01ba0 --- /dev/null +++ b/src/win32/filed/plugins/api.h @@ -0,0 +1,302 @@ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2008-2009 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation, which is + listed in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of Kern Sibbald. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ +/* + * Written by James Harper, October 2008 + */ + +extern "C" { + +#define BACKUP_NODE_TYPE_MACHINE 0x001 +#define BACKUP_NODE_TYPE_ANNOTATION 0x010 +#define BACKUP_NODE_TYPE_DISPLAY 0x100 + +#define ESE_BACKUP_INSTANCE_END_ERROR 0x0 +#define ESE_BACKUP_INSTANCE_END_SUCCESS 0x1 + +#define BACKUP_TYPE_FULL 0x1 +#define BACKUP_TYPE_LOGS_ONLY 0x2 +#define BACKUP_TYPE_FULL_WITH_ALL_LOGS 0x3 + +#define RESTORE_CLOSE_ABORT 0x1 +#define RESTORE_CLOSE_NORMAL 0x0 + +#define ESE_RESTORE_COMPLETE_NOWAIT 0x00010000 +#define ESE_RESTORE_COMPLETE_ATTACH_DBS 0x00000001 +#define ESE_RESTORE_KEEP_LOG_FILES 0x00020000 + +//#include + +struct ESE_ICON_DESCRIPTION { + uint32_t ulSize; + char *pvData; +}; + +struct BACKUP_NODE_TREE { + WCHAR *wszName; + uint32_t fFlags; + ESE_ICON_DESCRIPTION iconDescription; + struct BACKUP_NODE_TREE *pNextNode; + struct BACKUP_NODE_TREE *pChildNode; +}; + +struct DATABASE_BACKUP_INFO { + WCHAR *wszDatabaseDisplayName; + uint32_t cwDatabaseStreams; + WCHAR *wszDatabaseStreams; + GUID rguidDatabase; + uint32_t *rgIconIndexDatabase; + uint32_t fDatabaseFlags; +}; + +struct INSTANCE_BACKUP_INFO { + uint64_t hInstanceId; + //RPC_STRING wszInstanceName; + WCHAR *wszInstanceName; + uint32_t ulIconIndexInstance; + uint32_t cDatabase; + DATABASE_BACKUP_INFO *rgDatabase; + uint32_t cIconDescription; + ESE_ICON_DESCRIPTION *rgIconDescription; +}; + +enum RECOVER_STATUS { + recoverInvalid = 0, + recoverNotStarted = 1, + recoverStarted = 2, + recoverEnded = 3, + recoverStatusMax +}; + +struct RESTORE_ENVIRONMENT { + WCHAR * m_wszRestoreLogPath; + WCHAR * m_wszSrcInstanceName; + uint32_t m_cDatabases; + WCHAR **m_wszDatabaseDisplayName; + GUID * m_rguidDatabase; + WCHAR * m_wszRestoreInstanceSystemPath; + WCHAR * m_wszRestoreInstanceLogPath; + WCHAR * m_wszTargetInstanceName; + WCHAR ** m_wszDatabaseStreamsS; + WCHAR ** m_wszDatabaseStreamsD; + uint32_t m_ulGenLow; + uint32_t m_ulGenHigh; + WCHAR * m_wszLogBaseName; + time_t m_timeLastRestore; + RECOVER_STATUS m_statusLastRecover; + HRESULT m_hrLastRecover; + time_t m_timeLastRecover; + WCHAR * m_wszAnnotation; +}; + +typedef HANDLE HCCX; + +typedef HRESULT (WINAPI *HrESEBackupRestoreGetNodes_t) +( + WCHAR* wszComputerName, + BACKUP_NODE_TREE* pBackupNodeTree +); + +typedef HRESULT (WINAPI *HrESEBackupPrepare_t) +( + WCHAR* wszBackupServer, + WCHAR* wszBackupAnnotation, + uint32_t *pcInstanceInfo, + INSTANCE_BACKUP_INFO **paInstanceInfo, + HCCX *phccxBackupContext +); + +typedef HRESULT (WINAPI *HrESEBackupEnd_t) +( + HCCX hccsBackupContext +); + +typedef HRESULT (WINAPI *HrESEBackupSetup_t) +( + HCCX hccsBackupContext, + uint64_t hInstanceID, + uint32_t btBackupType +); + +typedef HRESULT (WINAPI *HrESEBackupGetLogAndPatchFiles_t) +( + HCCX hccsBackupContext, + WCHAR** pwszFiles +); + +typedef HRESULT (WINAPI *HrESEBackupInstanceEnd_t) +( + HCCX hccsBackupContext, + uint32_t fFlags +); + +typedef HRESULT (WINAPI *HrESEBackupOpenFile_t) +( + HCCX hccsBackupContext, + WCHAR* wszFileName, + uint32_t cbReadHintSize, + uint32_t cSections, + void** rghFile, + uint64_t* rgliSectionSize +); + +typedef HRESULT (WINAPI *HrESEBackupReadFile_t) +( + HCCX hccsBackupContext, + void* hFile, + void* pvBuffer, + uint32_t cbBuffer, + uint32_t* pcbRead +); + +typedef HRESULT (WINAPI *HrESEBackupCloseFile_t) +( + HCCX hccsBackupContext, + void* hFile +); + +typedef HRESULT (WINAPI *HrESEBackupTruncateLogs_t) +( + HCCX hccsBackupContext +); + +typedef HRESULT (WINAPI *HrESERestoreOpen_t) +( + WCHAR* wszBackupServer, + WCHAR* wszBackupAnnotation, + WCHAR* wszSrcInstanceName, + WCHAR* wszRestoreLogPath, + HCCX* phccxRestoreContext +); + +typedef HRESULT (WINAPI *HrESERestoreReopen_t) +( + WCHAR* wszBackupServer, + WCHAR* wszBackupAnnotation, + WCHAR* wszRestoreLogPath, + HCCX* phccxRestoreContext +); + +typedef HRESULT (WINAPI *HrESERestoreClose_t) +( + HCCX phccxRestoreContext, + uint32_t fRestoreAbort +); + +typedef HRESULT (WINAPI *HrESERestoreComplete_t) +( + HCCX phccxRestoreContext, + WCHAR* wszCheckpointFilePath, + WCHAR* wszLogFilePath, + WCHAR* wszTargetInstanceName, + uint32_t fFlags +); + +typedef HRESULT (WINAPI *HrESERestoreSaveEnvironment_t) +( + HCCX phccxRestoreContext +); + +typedef HRESULT (WINAPI *HrESERestoreGetEnvironment_t) +( + HCCX phccxRestoreContext, + RESTORE_ENVIRONMENT **ppRestoreEnvironment +); + +typedef HRESULT (WINAPI *HrESERestoreAddDatabase_t) +( + HCCX phccxRestoreContext, + WCHAR* wszDatabaseDisplayName, + GUID guidDatabase, + WCHAR* wszDatabaseStreamsS, + WCHAR** wszDatabaseStreamsD +); + +typedef HRESULT (WINAPI *HrESERestoreOpenFile_t) +( + HCCX phccxRestoreContext, + WCHAR* wszFileName, + uint32_t cSections, + void* rghFile +); + +bRC +loadExchangeApi(); + +const char * +ESEErrorMessage(HRESULT result); + +#define hrLogfileHasBadSignature (HRESULT)0xC8000262L +#define hrLogfileNotContiguous (HRESULT)0xC8000263L +#define hrCBDatabaseInUse (HRESULT)0xC7FE1F41L +#define hrRestoreAtFileLevel (HRESULT)0xC7FF0FA5L +#define hrMissingFullBackup (HRESULT)0xC8000230L +#define hrBackupInProgress (HRESULT)0xC80001F9L +#define hrCBDatabaseNotFound (HRESULT)0xC7FE1F42L +#define hrErrorFromESECall (HRESULT)0xC7FF1004L + +extern HrESEBackupRestoreGetNodes_t HrESEBackupRestoreGetNodes; +extern HrESEBackupPrepare_t HrESEBackupPrepare; +extern HrESEBackupGetLogAndPatchFiles_t HrESEBackupGetLogAndPatchFiles; +extern HrESEBackupTruncateLogs_t HrESEBackupTruncateLogs; +extern HrESEBackupEnd_t HrESEBackupEnd; +extern HrESEBackupSetup_t HrESEBackupSetup; +extern HrESEBackupInstanceEnd_t HrESEBackupInstanceEnd; +extern HrESEBackupOpenFile_t HrESEBackupOpenFile; +extern HrESEBackupReadFile_t HrESEBackupReadFile; +extern HrESEBackupCloseFile_t HrESEBackupCloseFile; + +extern HrESERestoreOpen_t HrESERestoreOpen; +extern HrESERestoreReopen_t HrESERestoreReopen; +extern HrESERestoreComplete_t HrESERestoreComplete; +extern HrESERestoreClose_t HrESERestoreClose; +extern HrESERestoreGetEnvironment_t HrESERestoreGetEnvironment; +extern HrESERestoreSaveEnvironment_t HrESERestoreSaveEnvironment; +extern HrESERestoreAddDatabase_t HrESERestoreAddDatabase; +extern HrESERestoreOpenFile_t HrESERestoreOpenFile; + +#if !defined(MINGW64) && (_WIN32_WINNT < 0x0500) +typedef enum _COMPUTER_NAME_FORMAT { + ComputerNameNetBIOS, + ComputerNameDnsHostname, + ComputerNameDnsDomain, + ComputerNameDnsFullyQualified, + ComputerNamePhysicalNetBIOS, + ComputerNamePhysicalDnsHostname, + ComputerNamePhysicalDnsDomain, + ComputerNamePhysicalDnsFullyQualified, + ComputerNameMax +} COMPUTER_NAME_FORMAT; + +BOOL WINAPI GetComputerNameExW( + COMPUTER_NAME_FORMAT NameType, + LPWSTR lpBuffer, + LPDWORD lpnSize +); +#endif + +} diff --git a/src/win32/filed/plugins/bpipe-fd.c b/src/win32/filed/plugins/bpipe-fd.c new file mode 100644 index 00000000..7b7e6602 --- /dev/null +++ b/src/win32/filed/plugins/bpipe-fd.c @@ -0,0 +1,557 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * A simple pipe plugin for the Bacula File Daemon + * + * Kern Sibbald, October 2007 + * Copied into Windows plugin environment March, 2010 (KES) + * + */ +#include "bacula.h" +#include "fd_plugins.h" + +#undef malloc +#undef free +#undef strdup + +#define fi __FILE__ +#define li __LINE__ + +#ifdef __cplusplus +extern "C" { +#endif + +static const int dbglvl = 150; + +#define PLUGIN_LICENSE "Bacula" +#define PLUGIN_AUTHOR "Kern Sibbald" +#define PLUGIN_DATE "January 2010" +#define PLUGIN_VERSION "1" +#define PLUGIN_DESCRIPTION "Bacula Pipe Windows File Daemon Plugin" + +/* Forward referenced functions */ +static bRC newPlugin(bpContext *ctx); +static bRC freePlugin(bpContext *ctx); +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); +static bRC endBackupFile(bpContext *ctx); +static bRC pluginIO(bpContext *ctx, struct io_pkt *io); +static bRC startRestoreFile(bpContext *ctx, const char *cmd); +static bRC endRestoreFile(bpContext *ctx); +static bRC createFile(bpContext *ctx, struct restore_pkt *rp); +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); + +static char *apply_rp_codes(struct plugin_ctx * p_ctx); + +/* Pointers to Bacula functions */ +static bFuncs *bfuncs = NULL; +static bInfo *binfo = NULL; + +/* Plugin Information block */ +static pInfo pluginInfo = { + sizeof(pluginInfo), + FD_PLUGIN_INTERFACE_VERSION, + FD_PLUGIN_MAGIC, + PLUGIN_LICENSE, + PLUGIN_AUTHOR, + PLUGIN_DATE, + PLUGIN_VERSION, + PLUGIN_DESCRIPTION +}; + +/* Plugin entry points for Bacula */ +static pFuncs pluginFuncs = { + sizeof(pluginFuncs), + FD_PLUGIN_INTERFACE_VERSION, + + /* Entry points into plugin */ + newPlugin, /* new plugin instance */ + freePlugin, /* free plugin instance */ + getPluginValue, + setPluginValue, + handlePluginEvent, + startBackupFile, + endBackupFile, + startRestoreFile, + endRestoreFile, + pluginIO, + createFile, + setFileAttributes, + NULL, /* No checkFile */ + NULL /* No ACL/XATTR */ +}; + +/* + * Plugin private context + */ +struct plugin_ctx { + boffset_t offset; + FILE *fd; /* pipe file descriptor */ + bool backup; /* set for backup (not needed) */ + char *cmd; /* plugin command line */ + char *fname; /* filename to "backup/restore" */ + char *reader; /* reader program for backup */ + char *writer; /* writer program for backup */ + + char where[512]; + int replace; +}; + +/* + * loadPlugin() and unloadPlugin() are entry points that are + * exported, so Bacula can directly call these two entry points + * they are common to all Bacula plugins. + */ +/* + * External entry point called by Bacula to "load the plugin + */ +bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) +{ + bfuncs = lbfuncs; /* set Bacula funct pointers */ + binfo = lbinfo; + *pinfo = &pluginInfo; /* return pointer to our info */ + *pfuncs = &pluginFuncs; /* return pointer to our functions */ + + return bRC_OK; +} + +/* + * External entry point to unload the plugin + */ +bRC unloadPlugin() +{ +// printf("bpipe-fd: Unloaded\n"); + return bRC_OK; +} + +/* + * The following entry points are accessed through the function + * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) + * has its own set of entry points that the plugin must define. + */ +/* + * Create a new instance of the plugin i.e. allocate our private storage + */ +static bRC newPlugin(bpContext *ctx) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)malloc(sizeof(struct plugin_ctx)); + if (!p_ctx) { + return bRC_Error; + } + memset(p_ctx, 0, sizeof(struct plugin_ctx)); + ctx->pContext = (void *)p_ctx; /* set our context pointer */ + return bRC_OK; +} + +/* + * Free a plugin instance, i.e. release our private storage + */ +static bRC freePlugin(bpContext *ctx) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + if (p_ctx->cmd) { + free(p_ctx->cmd); /* free any allocated command string */ + } + free(p_ctx); /* free our private context */ + p_ctx = NULL; + return bRC_OK; +} + +/* + * Return some plugin value (none defined) + */ +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) +{ + return bRC_OK; +} + +/* + * Set a plugin value (none defined) + */ +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) +{ + return bRC_OK; +} + +/* + * Handle an event that was generated in Bacula + */ +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + +// char *name; + + /* + * Most events don't interest us so we ignore them. + * the printfs are so that plugin writers can enable them to see + * what is really going on. + */ + switch (event->eventType) { + case bEventJobStart: + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: JobStart=%s\n", (char *)value); + break; + case bEventJobEnd: +// printf("bpipe-fd: JobEnd\n"); + break; + case bEventStartBackupJob: +// printf("bpipe-fd: StartBackupJob\n"); + break; + case bEventEndBackupJob: +// printf("bpipe-fd: EndBackupJob\n"); + break; + case bEventLevel: +// printf("bpipe-fd: JobLevel=%c %d\n", (int)value, (int)value); + break; + case bEventSince: +// printf("bpipe-fd: since=%d\n", (int)value); + break; + + case bEventStartRestoreJob: +// printf("bpipe-fd: StartRestoreJob\n"); + break; + + case bEventEndRestoreJob: +// printf("bpipe-fd: EndRestoreJob\n"); + break; + + /* Plugin command e.g. plugin = ::read command:write command */ + case bEventRestoreCommand: +// printf("bpipe-fd: EventRestoreCommand cmd=%s\n", (char *)value); + /* Fall-through wanted */ + case bEventBackupCommand: + char *p; + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: pluginEvent cmd=%s\n", (char *)value); + p_ctx->cmd = strdup((char *)value); + p = strchr(p_ctx->cmd, ':'); + if (!p) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Plugin terminator not found: %s\n", (char *)value); + return bRC_Error; + } + *p++ = 0; /* terminate plugin */ + p_ctx->fname = p; + p = strchr(p, ':'); + if (!p) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "File terminator not found: %s\n", (char *)value); + return bRC_Error; + } + *p++ = 0; /* terminate file */ + p_ctx->reader = p; + p = strchr(p, ':'); + if (!p) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Reader terminator not found: %s\n", (char *)value); + return bRC_Error; + } + *p++ = 0; /* terminate reader string */ + p_ctx->writer = p; +// printf("bpipe-fd: plugin=%s fname=%s reader=%s writer=%s\n", +// p_ctx->cmd, p_ctx->fname, p_ctx->reader, p_ctx->writer); + break; + + /* Ignore all unknown event types */ + default: + break; + } + return bRC_OK; +} + +/* + * Start the backup of a specific file + */ +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + time_t now = time(NULL); + sp->fname = p_ctx->fname; + sp->type = FT_REG; + sp->statp.st_mode = 0700 | S_IFREG; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = 0; + sp->statp.st_blksize = 4096; + sp->statp.st_blocks = 1; + p_ctx->backup = true; +// printf("bpipe-fd: startBackupFile\n"); + return bRC_OK; +} + +/* + * Done with backup of this file + */ +static bRC endBackupFile(bpContext *ctx) +{ + /* + * We would return bRC_More if we wanted startBackupFile to be + * called again to backup another file + */ + return bRC_OK; +} + + +/* + * Bacula is calling us to do the actual I/O + */ +static bRC pluginIO(bpContext *ctx, struct io_pkt *io) +{ + struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; + if (!p_ctx) { + return bRC_Error; + } + + io->status = 0; + io->io_errno = 0; + switch(io->func) { + case IO_OPEN: + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_OPEN\n"); + if (io->flags & (O_CREAT | O_WRONLY)) { + char *writer_codes = apply_rp_codes(p_ctx); + + p_ctx->fd = popen(writer_codes, "w"); + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_OPEN fd=%d writer=%s\n", + p_ctx->fd, writer_codes); + if (!p_ctx->fd) { + io->io_errno = errno; + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, + "Open pipe writer=%s failed: ERR=%s\n", writer_codes, strerror(errno)); + if (writer_codes) { + free(writer_codes); + } + return bRC_Error; + } + if (writer_codes) { + free(writer_codes); + } + } else { + p_ctx->fd = popen(p_ctx->reader, "r"); + bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_OPEN fd=%p reader=%s\n", + p_ctx->fd, p_ctx->reader); + if (!p_ctx->fd) { + io->io_errno = errno; + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, + "Open pipe reader=%s failed: ERR=%s\n", p_ctx->reader, strerror(errno)); + return bRC_Error; + } + } + bmicrosleep(1,0); /* let pipe connect */ + break; + + case IO_READ: + if (!p_ctx->fd) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Logic error: NULL read FD\n"); + return bRC_Error; + } + io->status = fread(io->buf, 1, io->count, p_ctx->fd); +// bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_READ buf=%p len=%d\n", io->buf, io->status); + if (io->status == 0 && ferror(p_ctx->fd)) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, + "Pipe read error: ERR=%s\n", strerror(errno)); + bfuncs->DebugMessage(ctx, fi, li, dbglvl, + "Pipe read error: ERR=%s\n", strerror(errno)); + return bRC_Error; + } + break; + + case IO_WRITE: + if (!p_ctx->fd) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Logic error: NULL write FD\n"); + return bRC_Error; + } +// printf("bpipe-fd: IO_WRITE fd=%p buf=%p len=%d\n", p_ctx->fd, io->buf, io->count); + io->status = fwrite(io->buf, 1, io->count, p_ctx->fd); +// printf("bpipe-fd: IO_WRITE buf=%p len=%d\n", io->buf, io->status); + if (io->status == 0 && ferror(p_ctx->fd)) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, + "Pipe write error\n"); + bfuncs->DebugMessage(ctx, fi, li, dbglvl, + "Pipe read error: ERR=%s\n", strerror(errno)); + return bRC_Error; + } + break; + + case IO_CLOSE: + if (!p_ctx->fd) { + bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Logic error: NULL FD on bpipe close\n"); + return bRC_Error; + } + io->status = pclose(p_ctx->fd); + break; + + case IO_SEEK: + io->offset = p_ctx->offset; + break; + } + return bRC_OK; +} + +/* + * Bacula is notifying us that a plugin name string was found, and + * passing us the plugin command, so we can prepare for a restore. + */ +static bRC startRestoreFile(bpContext *ctx, const char *cmd) +{ +// printf("bpipe-fd: startRestoreFile cmd=%s\n", cmd); + return bRC_OK; +} + +/* + * Bacula is notifying us that the plugin data has terminated, so + * the restore for this particular file is done. + */ +static bRC endRestoreFile(bpContext *ctx) +{ +// printf("bpipe-fd: endRestoreFile\n"); + return bRC_OK; +} + +/* + * This is called during restore to create the file (if necessary) + * We must return in rp->create_status: + * + * CF_ERROR -- error + * CF_SKIP -- skip processing this file + * CF_EXTRACT -- extract the file (i.e.call i/o routines) + * CF_CREATED -- created, but no content to extract (typically directories) + * + */ +static bRC createFile(bpContext *ctx, struct restore_pkt *rp) +{ +// printf("bpipe-fd: createFile\n"); + if (strlen(rp->where) > 512) { + printf("Restore target dir too long. Restricting to first 512 bytes.\n"); + } + strncpy(((struct plugin_ctx *)ctx->pContext)->where, rp->where, 513); + ((struct plugin_ctx *)ctx->pContext)->replace = rp->replace; + rp->create_status = CF_EXTRACT; + return bRC_OK; +} + +/* + * We will get here if the File is a directory after everything + * is written in the directory. + */ +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) +{ +// printf("bpipe-fd: setFileAttributes\n"); + return bRC_OK; +} + +/************************************************************************* + * Apply codes in writer command: + * %w -> "where" + * %r -> "replace" + * + * Replace: + * 'always' => 'a', chr(97) + * 'ifnewer' => 'w', chr(119) + * 'ifolder' => 'o', chr(111) + * 'never' => 'n', chr(110) + * + * This function will allocate the required amount of memory with malloc. + * Need to be free()d manually. + * Inspired by edit_job_codes in lib/util.c + */ + +static char *apply_rp_codes(struct plugin_ctx * p_ctx) +{ + char *p, *q; + const char *str; + char add[10]; + int w_count = 0, r_count = 0; + char *omsg; + + char *imsg = p_ctx->writer; + + if (!imsg) { + return NULL; + } + + if ((p = imsg)) { + while ((q = strstr(p, "%w"))) { + w_count++; + p=q+1; + } + + p = imsg; + while ((q = strstr(p, "%r"))) { + r_count++; + p=q+1; + } + } + + /* Required mem: + * len(imsg) + * + number of "where" codes * (len(where)-2) + * - number of "replace" codes + */ + omsg = (char*)malloc(strlen(imsg) + (w_count * (strlen(p_ctx->where)-2)) - r_count + 1); + if (!omsg) { + fprintf(stderr, "Out of memory."); + return NULL; + } + + *omsg = 0; + //printf("apply_rp_codes: %s\n", imsg); + for (p=imsg; *p; p++) { + if (*p == '%') { + switch (*++p) { + case '%': + str = "%"; + break; + case 'w': + str = p_ctx->where; + break; + case 'r': + snprintf(add, 2, "%c", p_ctx->replace); + str = add; + break; + default: + add[0] = '%'; + add[1] = *p; + add[2] = 0; + str = add; + break; + } + } else { + add[0] = *p; + add[1] = 0; + str = add; + } + //printf("add_str %s\n", str); + strcat(omsg, str); + //printf("omsg=%s\n", omsg); + } + return omsg; +} + + +#ifdef __cplusplus +} +#endif diff --git a/src/win32/filed/plugins/bpipe-fd.def b/src/win32/filed/plugins/bpipe-fd.def new file mode 100644 index 00000000..e79095cb --- /dev/null +++ b/src/win32/filed/plugins/bpipe-fd.def @@ -0,0 +1,15 @@ +LIBRARY bacula.dll +EXPORTS + +; compat.o +;_Z10open_bpipePciPKc +;_Z11close_bpipeP5BPIPE +;_Z11close_wpipeP5BPIPE + +;console_command DATA +;b_plugin_list DATA +;plugin_bopen DATA +;plugin_bclose DATA +;plugin_bwrite DATA +;plugin_bread DATA +;plugin_blseek DATA diff --git a/src/win32/filed/plugins/comadmin.h b/src/win32/filed/plugins/comadmin.h new file mode 100644 index 00000000..9773b9bf --- /dev/null +++ b/src/win32/filed/plugins/comadmin.h @@ -0,0 +1,878 @@ +/** + * This file has no copyright assigned and is placed in the Public Domain. + * This file is part of the w64 mingw-runtime package. + * No warranty is given; refer to the file DISCLAIMER within this package. + */ +#ifndef __REQUIRED_RPCNDR_H_VERSION__ +#define __REQUIRED_RPCNDR_H_VERSION__ 475 +#endif + +#include "rpc.h" +#include "rpcndr.h" + +#ifndef __RPCNDR_H_VERSION__ +#error This stub requires an updated version of +#endif + +#ifndef COM_NO_WINDOWS_H +#include "windows.h" +#include "ole2.h" +#endif + +#ifndef __comadmin_h__ +#define __comadmin_h__ + +#ifndef __ICOMAdminCatalog_FWD_DEFINED__ +#define __ICOMAdminCatalog_FWD_DEFINED__ +typedef struct ICOMAdminCatalog ICOMAdminCatalog; +#endif + +#ifndef __ICOMAdminCatalog2_FWD_DEFINED__ +#define __ICOMAdminCatalog2_FWD_DEFINED__ +typedef struct ICOMAdminCatalog2 ICOMAdminCatalog2; +#endif + +#ifndef __ICatalogObject_FWD_DEFINED__ +#define __ICatalogObject_FWD_DEFINED__ +typedef struct ICatalogObject ICatalogObject; +#endif + +#ifndef __ICatalogCollection_FWD_DEFINED__ +#define __ICatalogCollection_FWD_DEFINED__ +typedef struct ICatalogCollection ICatalogCollection; +#endif + +#ifndef __COMAdminCatalog_FWD_DEFINED__ +#define __COMAdminCatalog_FWD_DEFINED__ +#ifdef __cplusplus +typedef class COMAdminCatalog COMAdminCatalog; +#else +typedef struct COMAdminCatalog COMAdminCatalog; +#endif +#endif + +#ifndef __COMAdminCatalogObject_FWD_DEFINED__ +#define __COMAdminCatalogObject_FWD_DEFINED__ +#ifdef __cplusplus +typedef class COMAdminCatalogObject COMAdminCatalogObject; +#else +typedef struct COMAdminCatalogObject COMAdminCatalogObject; +#endif +#endif + +#ifndef __COMAdminCatalogCollection_FWD_DEFINED__ +#define __COMAdminCatalogCollection_FWD_DEFINED__ +#ifdef __cplusplus +typedef class COMAdminCatalogCollection COMAdminCatalogCollection; +#else +typedef struct COMAdminCatalogCollection COMAdminCatalogCollection; +#endif +#endif + +#include "unknwn.h" +#include "oaidl.h" + +#ifdef __cplusplus +extern "C"{ +#endif + +#ifndef __MIDL_user_allocate_free_DEFINED__ +#define __MIDL_user_allocate_free_DEFINED__ + void *__RPC_API MIDL_user_allocate(size_t); + void __RPC_API MIDL_user_free(void *); +#endif + +#include + + extern RPC_IF_HANDLE __MIDL_itf_comadmin_0000_v0_0_c_ifspec; + extern RPC_IF_HANDLE __MIDL_itf_comadmin_0000_v0_0_s_ifspec; + +#ifndef __ICOMAdminCatalog_INTERFACE_DEFINED__ +#define __ICOMAdminCatalog_INTERFACE_DEFINED__ + EXTERN_C const IID IID_ICOMAdminCatalog; +#if defined(__cplusplus) && !defined(CINTERFACE) + struct ICOMAdminCatalog : public IDispatch { + public: + virtual HRESULT WINAPI GetCollection(BSTR bstrCollName,IDispatch **ppCatalogCollection) = 0; + virtual HRESULT WINAPI Connect(BSTR bstrCatalogServerName,IDispatch **ppCatalogCollection) = 0; + virtual HRESULT WINAPI get_MajorVersion(long *plMajorVersion) = 0; + virtual HRESULT WINAPI get_MinorVersion(long *plMinorVersion) = 0; + virtual HRESULT WINAPI GetCollectionByQuery(BSTR bstrCollName,SAFEARRAY **ppsaVarQuery,IDispatch **ppCatalogCollection) = 0; + virtual HRESULT WINAPI ImportComponent(BSTR bstrApplIDOrName,BSTR bstrCLSIDOrProgID) = 0; + virtual HRESULT WINAPI InstallComponent(BSTR bstrApplIDOrName,BSTR bstrDLL,BSTR bstrTLB,BSTR bstrPSDLL) = 0; + virtual HRESULT WINAPI ShutdownApplication(BSTR bstrApplIDOrName) = 0; + virtual HRESULT WINAPI ExportApplication(BSTR bstrApplIDOrName,BSTR bstrApplicationFile,long lOptions) = 0; + virtual HRESULT WINAPI InstallApplication(BSTR bstrApplicationFile,BSTR bstrDestinationDirectory,long lOptions,BSTR bstrUserId,BSTR bstrPassword,BSTR bstrRSN) = 0; + virtual HRESULT WINAPI StopRouter(void) = 0; + virtual HRESULT WINAPI RefreshRouter(void) = 0; + virtual HRESULT WINAPI StartRouter(void) = 0; + virtual HRESULT WINAPI Reserved1(void) = 0; + virtual HRESULT WINAPI Reserved2(void) = 0; + virtual HRESULT WINAPI InstallMultipleComponents(BSTR bstrApplIDOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDs) = 0; + virtual HRESULT WINAPI GetMultipleComponentsInfo(BSTR bstrApplIdOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDs,SAFEARRAY **ppsaVarClassNames,SAFEARRAY **ppsaVarFileFlags,SAFEARRAY **ppsaVarComponentFlags) = 0; + virtual HRESULT WINAPI RefreshComponents(void) = 0; + virtual HRESULT WINAPI BackupREGDB(BSTR bstrBackupFilePath) = 0; + virtual HRESULT WINAPI RestoreREGDB(BSTR bstrBackupFilePath) = 0; + virtual HRESULT WINAPI QueryApplicationFile(BSTR bstrApplicationFile,BSTR *pbstrApplicationName,BSTR *pbstrApplicationDescription,VARIANT_BOOL *pbHasUsers,VARIANT_BOOL *pbIsProxy,SAFEARRAY **ppsaVarFileNames) = 0; + virtual HRESULT WINAPI StartApplication(BSTR bstrApplIdOrName) = 0; + virtual HRESULT WINAPI ServiceCheck(long lService,long *plStatus) = 0; + virtual HRESULT WINAPI InstallMultipleEventClasses(BSTR bstrApplIdOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDS) = 0; + virtual HRESULT WINAPI InstallEventClass(BSTR bstrApplIdOrName,BSTR bstrDLL,BSTR bstrTLB,BSTR bstrPSDLL) = 0; + virtual HRESULT WINAPI GetEventClassesForIID(BSTR bstrIID,SAFEARRAY **ppsaVarCLSIDs,SAFEARRAY **ppsaVarProgIDs,SAFEARRAY **ppsaVarDescriptions) = 0; + }; +#else + typedef struct ICOMAdminCatalogVtbl { + BEGIN_INTERFACE + HRESULT (WINAPI *QueryInterface)(ICOMAdminCatalog *This,REFIID riid,void **ppvObject); + ULONG (WINAPI *AddRef)(ICOMAdminCatalog *This); + ULONG (WINAPI *Release)(ICOMAdminCatalog *This); + HRESULT (WINAPI *GetTypeInfoCount)(ICOMAdminCatalog *This,UINT *pctinfo); + HRESULT (WINAPI *GetTypeInfo)(ICOMAdminCatalog *This,UINT iTInfo,LCID lcid,ITypeInfo **ppTInfo); + HRESULT (WINAPI *GetIDsOfNames)(ICOMAdminCatalog *This,REFIID riid,LPOLESTR *rgszNames,UINT cNames,LCID lcid,DISPID *rgDispId); + HRESULT (WINAPI *Invoke)(ICOMAdminCatalog *This,DISPID dispIdMember,REFIID riid,LCID lcid,WORD wFlags,DISPPARAMS *pDispParams,VARIANT *pVarResult,EXCEPINFO *pExcepInfo,UINT *puArgErr); + HRESULT (WINAPI *GetCollection)(ICOMAdminCatalog *This,BSTR bstrCollName,IDispatch **ppCatalogCollection); + HRESULT (WINAPI *Connect)(ICOMAdminCatalog *This,BSTR bstrCatalogServerName,IDispatch **ppCatalogCollection); + HRESULT (WINAPI *get_MajorVersion)(ICOMAdminCatalog *This,long *plMajorVersion); + HRESULT (WINAPI *get_MinorVersion)(ICOMAdminCatalog *This,long *plMinorVersion); + HRESULT (WINAPI *GetCollectionByQuery)(ICOMAdminCatalog *This,BSTR bstrCollName,SAFEARRAY **ppsaVarQuery,IDispatch **ppCatalogCollection); + HRESULT (WINAPI *ImportComponent)(ICOMAdminCatalog *This,BSTR bstrApplIDOrName,BSTR bstrCLSIDOrProgID); + HRESULT (WINAPI *InstallComponent)(ICOMAdminCatalog *This,BSTR bstrApplIDOrName,BSTR bstrDLL,BSTR bstrTLB,BSTR bstrPSDLL); + HRESULT (WINAPI *ShutdownApplication)(ICOMAdminCatalog *This,BSTR bstrApplIDOrName); + HRESULT (WINAPI *ExportApplication)(ICOMAdminCatalog *This,BSTR bstrApplIDOrName,BSTR bstrApplicationFile,long lOptions); + HRESULT (WINAPI *InstallApplication)(ICOMAdminCatalog *This,BSTR bstrApplicationFile,BSTR bstrDestinationDirectory,long lOptions,BSTR bstrUserId,BSTR bstrPassword,BSTR bstrRSN); + HRESULT (WINAPI *StopRouter)(ICOMAdminCatalog *This); + HRESULT (WINAPI *RefreshRouter)(ICOMAdminCatalog *This); + HRESULT (WINAPI *StartRouter)(ICOMAdminCatalog *This); + HRESULT (WINAPI *Reserved1)(ICOMAdminCatalog *This); + HRESULT (WINAPI *Reserved2)(ICOMAdminCatalog *This); + HRESULT (WINAPI *InstallMultipleComponents)(ICOMAdminCatalog *This,BSTR bstrApplIDOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDs); + HRESULT (WINAPI *GetMultipleComponentsInfo)(ICOMAdminCatalog *This,BSTR bstrApplIdOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDs,SAFEARRAY **ppsaVarClassNames,SAFEARRAY **ppsaVarFileFlags,SAFEARRAY **ppsaVarComponentFlags); + HRESULT (WINAPI *RefreshComponents)(ICOMAdminCatalog *This); + HRESULT (WINAPI *BackupREGDB)(ICOMAdminCatalog *This,BSTR bstrBackupFilePath); + HRESULT (WINAPI *RestoreREGDB)(ICOMAdminCatalog *This,BSTR bstrBackupFilePath); + HRESULT (WINAPI *QueryApplicationFile)(ICOMAdminCatalog *This,BSTR bstrApplicationFile,BSTR *pbstrApplicationName,BSTR *pbstrApplicationDescription,VARIANT_BOOL *pbHasUsers,VARIANT_BOOL *pbIsProxy,SAFEARRAY **ppsaVarFileNames); + HRESULT (WINAPI *StartApplication)(ICOMAdminCatalog *This,BSTR bstrApplIdOrName); + HRESULT (WINAPI *ServiceCheck)(ICOMAdminCatalog *This,long lService,long *plStatus); + HRESULT (WINAPI *InstallMultipleEventClasses)(ICOMAdminCatalog *This,BSTR bstrApplIdOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDS); + HRESULT (WINAPI *InstallEventClass)(ICOMAdminCatalog *This,BSTR bstrApplIdOrName,BSTR bstrDLL,BSTR bstrTLB,BSTR bstrPSDLL); + HRESULT (WINAPI *GetEventClassesForIID)(ICOMAdminCatalog *This,BSTR bstrIID,SAFEARRAY **ppsaVarCLSIDs,SAFEARRAY **ppsaVarProgIDs,SAFEARRAY **ppsaVarDescriptions); + END_INTERFACE + } ICOMAdminCatalogVtbl; + struct ICOMAdminCatalog { + CONST_VTBL struct ICOMAdminCatalogVtbl *lpVtbl; + }; +#ifdef COBJMACROS +#define ICOMAdminCatalog_QueryInterface(This,riid,ppvObject) (This)->lpVtbl->QueryInterface(This,riid,ppvObject) +#define ICOMAdminCatalog_AddRef(This) (This)->lpVtbl->AddRef(This) +#define ICOMAdminCatalog_Release(This) (This)->lpVtbl->Release(This) +#define ICOMAdminCatalog_GetTypeInfoCount(This,pctinfo) (This)->lpVtbl->GetTypeInfoCount(This,pctinfo) +#define ICOMAdminCatalog_GetTypeInfo(This,iTInfo,lcid,ppTInfo) (This)->lpVtbl->GetTypeInfo(This,iTInfo,lcid,ppTInfo) +#define ICOMAdminCatalog_GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) (This)->lpVtbl->GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) +#define ICOMAdminCatalog_Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) (This)->lpVtbl->Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) +#define ICOMAdminCatalog_GetCollection(This,bstrCollName,ppCatalogCollection) (This)->lpVtbl->GetCollection(This,bstrCollName,ppCatalogCollection) +#define ICOMAdminCatalog_Connect(This,bstrCatalogServerName,ppCatalogCollection) (This)->lpVtbl->Connect(This,bstrCatalogServerName,ppCatalogCollection) +#define ICOMAdminCatalog_get_MajorVersion(This,plMajorVersion) (This)->lpVtbl->get_MajorVersion(This,plMajorVersion) +#define ICOMAdminCatalog_get_MinorVersion(This,plMinorVersion) (This)->lpVtbl->get_MinorVersion(This,plMinorVersion) +#define ICOMAdminCatalog_GetCollectionByQuery(This,bstrCollName,ppsaVarQuery,ppCatalogCollection) (This)->lpVtbl->GetCollectionByQuery(This,bstrCollName,ppsaVarQuery,ppCatalogCollection) +#define ICOMAdminCatalog_ImportComponent(This,bstrApplIDOrName,bstrCLSIDOrProgID) (This)->lpVtbl->ImportComponent(This,bstrApplIDOrName,bstrCLSIDOrProgID) +#define ICOMAdminCatalog_InstallComponent(This,bstrApplIDOrName,bstrDLL,bstrTLB,bstrPSDLL) (This)->lpVtbl->InstallComponent(This,bstrApplIDOrName,bstrDLL,bstrTLB,bstrPSDLL) +#define ICOMAdminCatalog_ShutdownApplication(This,bstrApplIDOrName) (This)->lpVtbl->ShutdownApplication(This,bstrApplIDOrName) +#define ICOMAdminCatalog_ExportApplication(This,bstrApplIDOrName,bstrApplicationFile,lOptions) (This)->lpVtbl->ExportApplication(This,bstrApplIDOrName,bstrApplicationFile,lOptions) +#define ICOMAdminCatalog_InstallApplication(This,bstrApplicationFile,bstrDestinationDirectory,lOptions,bstrUserId,bstrPassword,bstrRSN) (This)->lpVtbl->InstallApplication(This,bstrApplicationFile,bstrDestinationDirectory,lOptions,bstrUserId,bstrPassword,bstrRSN) +#define ICOMAdminCatalog_StopRouter(This) (This)->lpVtbl->StopRouter(This) +#define ICOMAdminCatalog_RefreshRouter(This) (This)->lpVtbl->RefreshRouter(This) +#define ICOMAdminCatalog_StartRouter(This) (This)->lpVtbl->StartRouter(This) +#define ICOMAdminCatalog_Reserved1(This) (This)->lpVtbl->Reserved1(This) +#define ICOMAdminCatalog_Reserved2(This) (This)->lpVtbl->Reserved2(This) +#define ICOMAdminCatalog_InstallMultipleComponents(This,bstrApplIDOrName,ppsaVarFileNames,ppsaVarCLSIDs) (This)->lpVtbl->InstallMultipleComponents(This,bstrApplIDOrName,ppsaVarFileNames,ppsaVarCLSIDs) +#define ICOMAdminCatalog_GetMultipleComponentsInfo(This,bstrApplIdOrName,ppsaVarFileNames,ppsaVarCLSIDs,ppsaVarClassNames,ppsaVarFileFlags,ppsaVarComponentFlags) (This)->lpVtbl->GetMultipleComponentsInfo(This,bstrApplIdOrName,ppsaVarFileNames,ppsaVarCLSIDs,ppsaVarClassNames,ppsaVarFileFlags,ppsaVarComponentFlags) +#define ICOMAdminCatalog_RefreshComponents(This) (This)->lpVtbl->RefreshComponents(This) +#define ICOMAdminCatalog_BackupREGDB(This,bstrBackupFilePath) (This)->lpVtbl->BackupREGDB(This,bstrBackupFilePath) +#define ICOMAdminCatalog_RestoreREGDB(This,bstrBackupFilePath) (This)->lpVtbl->RestoreREGDB(This,bstrBackupFilePath) +#define ICOMAdminCatalog_QueryApplicationFile(This,bstrApplicationFile,pbstrApplicationName,pbstrApplicationDescription,pbHasUsers,pbIsProxy,ppsaVarFileNames) (This)->lpVtbl->QueryApplicationFile(This,bstrApplicationFile,pbstrApplicationName,pbstrApplicationDescription,pbHasUsers,pbIsProxy,ppsaVarFileNames) +#define ICOMAdminCatalog_StartApplication(This,bstrApplIdOrName) (This)->lpVtbl->StartApplication(This,bstrApplIdOrName) +#define ICOMAdminCatalog_ServiceCheck(This,lService,plStatus) (This)->lpVtbl->ServiceCheck(This,lService,plStatus) +#define ICOMAdminCatalog_InstallMultipleEventClasses(This,bstrApplIdOrName,ppsaVarFileNames,ppsaVarCLSIDS) (This)->lpVtbl->InstallMultipleEventClasses(This,bstrApplIdOrName,ppsaVarFileNames,ppsaVarCLSIDS) +#define ICOMAdminCatalog_InstallEventClass(This,bstrApplIdOrName,bstrDLL,bstrTLB,bstrPSDLL) (This)->lpVtbl->InstallEventClass(This,bstrApplIdOrName,bstrDLL,bstrTLB,bstrPSDLL) +#define ICOMAdminCatalog_GetEventClassesForIID(This,bstrIID,ppsaVarCLSIDs,ppsaVarProgIDs,ppsaVarDescriptions) (This)->lpVtbl->GetEventClassesForIID(This,bstrIID,ppsaVarCLSIDs,ppsaVarProgIDs,ppsaVarDescriptions) +#endif +#endif + HRESULT WINAPI ICOMAdminCatalog_GetCollection_Proxy(ICOMAdminCatalog *This,BSTR bstrCollName,IDispatch **ppCatalogCollection); + void __RPC_STUB ICOMAdminCatalog_GetCollection_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_Connect_Proxy(ICOMAdminCatalog *This,BSTR bstrCatalogServerName,IDispatch **ppCatalogCollection); + void __RPC_STUB ICOMAdminCatalog_Connect_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_get_MajorVersion_Proxy(ICOMAdminCatalog *This,long *plMajorVersion); + void __RPC_STUB ICOMAdminCatalog_get_MajorVersion_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_get_MinorVersion_Proxy(ICOMAdminCatalog *This,long *plMinorVersion); + void __RPC_STUB ICOMAdminCatalog_get_MinorVersion_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_GetCollectionByQuery_Proxy(ICOMAdminCatalog *This,BSTR bstrCollName,SAFEARRAY **ppsaVarQuery,IDispatch **ppCatalogCollection); + void __RPC_STUB ICOMAdminCatalog_GetCollectionByQuery_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_ImportComponent_Proxy(ICOMAdminCatalog *This,BSTR bstrApplIDOrName,BSTR bstrCLSIDOrProgID); + void __RPC_STUB ICOMAdminCatalog_ImportComponent_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_InstallComponent_Proxy(ICOMAdminCatalog *This,BSTR bstrApplIDOrName,BSTR bstrDLL,BSTR bstrTLB,BSTR bstrPSDLL); + void __RPC_STUB ICOMAdminCatalog_InstallComponent_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_ShutdownApplication_Proxy(ICOMAdminCatalog *This,BSTR bstrApplIDOrName); + void __RPC_STUB ICOMAdminCatalog_ShutdownApplication_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_ExportApplication_Proxy(ICOMAdminCatalog *This,BSTR bstrApplIDOrName,BSTR bstrApplicationFile,long lOptions); + void __RPC_STUB ICOMAdminCatalog_ExportApplication_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_InstallApplication_Proxy(ICOMAdminCatalog *This,BSTR bstrApplicationFile,BSTR bstrDestinationDirectory,long lOptions,BSTR bstrUserId,BSTR bstrPassword,BSTR bstrRSN); + void __RPC_STUB ICOMAdminCatalog_InstallApplication_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_StopRouter_Proxy(ICOMAdminCatalog *This); + void __RPC_STUB ICOMAdminCatalog_StopRouter_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_RefreshRouter_Proxy(ICOMAdminCatalog *This); + void __RPC_STUB ICOMAdminCatalog_RefreshRouter_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_StartRouter_Proxy(ICOMAdminCatalog *This); + void __RPC_STUB ICOMAdminCatalog_StartRouter_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_Reserved1_Proxy(ICOMAdminCatalog *This); + void __RPC_STUB ICOMAdminCatalog_Reserved1_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_Reserved2_Proxy(ICOMAdminCatalog *This); + void __RPC_STUB ICOMAdminCatalog_Reserved2_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_InstallMultipleComponents_Proxy(ICOMAdminCatalog *This,BSTR bstrApplIDOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDs); + void __RPC_STUB ICOMAdminCatalog_InstallMultipleComponents_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_GetMultipleComponentsInfo_Proxy(ICOMAdminCatalog *This,BSTR bstrApplIdOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDs,SAFEARRAY **ppsaVarClassNames,SAFEARRAY **ppsaVarFileFlags,SAFEARRAY **ppsaVarComponentFlags); + void __RPC_STUB ICOMAdminCatalog_GetMultipleComponentsInfo_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_RefreshComponents_Proxy(ICOMAdminCatalog *This); + void __RPC_STUB ICOMAdminCatalog_RefreshComponents_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_BackupREGDB_Proxy(ICOMAdminCatalog *This,BSTR bstrBackupFilePath); + void __RPC_STUB ICOMAdminCatalog_BackupREGDB_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_RestoreREGDB_Proxy(ICOMAdminCatalog *This,BSTR bstrBackupFilePath); + void __RPC_STUB ICOMAdminCatalog_RestoreREGDB_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_QueryApplicationFile_Proxy(ICOMAdminCatalog *This,BSTR bstrApplicationFile,BSTR *pbstrApplicationName,BSTR *pbstrApplicationDescription,VARIANT_BOOL *pbHasUsers,VARIANT_BOOL *pbIsProxy,SAFEARRAY **ppsaVarFileNames); + void __RPC_STUB ICOMAdminCatalog_QueryApplicationFile_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_StartApplication_Proxy(ICOMAdminCatalog *This,BSTR bstrApplIdOrName); + void __RPC_STUB ICOMAdminCatalog_StartApplication_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_ServiceCheck_Proxy(ICOMAdminCatalog *This,long lService,long *plStatus); + void __RPC_STUB ICOMAdminCatalog_ServiceCheck_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_InstallMultipleEventClasses_Proxy(ICOMAdminCatalog *This,BSTR bstrApplIdOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDS); + void __RPC_STUB ICOMAdminCatalog_InstallMultipleEventClasses_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_InstallEventClass_Proxy(ICOMAdminCatalog *This,BSTR bstrApplIdOrName,BSTR bstrDLL,BSTR bstrTLB,BSTR bstrPSDLL); + void __RPC_STUB ICOMAdminCatalog_InstallEventClass_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog_GetEventClassesForIID_Proxy(ICOMAdminCatalog *This,BSTR bstrIID,SAFEARRAY **ppsaVarCLSIDs,SAFEARRAY **ppsaVarProgIDs,SAFEARRAY **ppsaVarDescriptions); + void __RPC_STUB ICOMAdminCatalog_GetEventClassesForIID_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); +#endif + + typedef enum COMAdminInUse { + COMAdminNotInUse = 0,COMAdminInUseByCatalog = 0x1,COMAdminInUseByRegistryUnknown = 0x2,COMAdminInUseByRegistryProxyStub = 0x3, + COMAdminInUseByRegistryTypeLib = 0x4,COMAdminInUseByRegistryClsid = 0x5 + } COMAdminInUse; + + extern RPC_IF_HANDLE __MIDL_itf_comadmin_0116_v0_0_c_ifspec; + extern RPC_IF_HANDLE __MIDL_itf_comadmin_0116_v0_0_s_ifspec; + +#ifndef __ICOMAdminCatalog2_INTERFACE_DEFINED__ +#define __ICOMAdminCatalog2_INTERFACE_DEFINED__ + EXTERN_C const IID IID_ICOMAdminCatalog2; +#if defined(__cplusplus) && !defined(CINTERFACE) + struct ICOMAdminCatalog2 : public ICOMAdminCatalog { + public: + virtual HRESULT WINAPI GetCollectionByQuery2(BSTR bstrCollectionName,VARIANT *pVarQueryStrings,IDispatch **ppCatalogCollection) = 0; + virtual HRESULT WINAPI GetApplicationInstanceIDFromProcessID(long lProcessID,BSTR *pbstrApplicationInstanceID) = 0; + virtual HRESULT WINAPI ShutdownApplicationInstances(VARIANT *pVarApplicationInstanceID) = 0; + virtual HRESULT WINAPI PauseApplicationInstances(VARIANT *pVarApplicationInstanceID) = 0; + virtual HRESULT WINAPI ResumeApplicationInstances(VARIANT *pVarApplicationInstanceID) = 0; + virtual HRESULT WINAPI RecycleApplicationInstances(VARIANT *pVarApplicationInstanceID,long lReasonCode) = 0; + virtual HRESULT WINAPI AreApplicationInstancesPaused(VARIANT *pVarApplicationInstanceID,VARIANT_BOOL *pVarBoolPaused) = 0; + virtual HRESULT WINAPI DumpApplicationInstance(BSTR bstrApplicationInstanceID,BSTR bstrDirectory,long lMaxImages,BSTR *pbstrDumpFile) = 0; + virtual HRESULT WINAPI get_IsApplicationInstanceDumpSupported(VARIANT_BOOL *pVarBoolDumpSupported) = 0; + virtual HRESULT WINAPI CreateServiceForApplication(BSTR bstrApplicationIDOrName,BSTR bstrServiceName,BSTR bstrStartType,BSTR bstrErrorControl,BSTR bstrDependencies,BSTR bstrRunAs,BSTR bstrPassword,VARIANT_BOOL bDesktopOk) = 0; + virtual HRESULT WINAPI DeleteServiceForApplication(BSTR bstrApplicationIDOrName) = 0; + virtual HRESULT WINAPI GetPartitionID(BSTR bstrApplicationIDOrName,BSTR *pbstrPartitionID) = 0; + virtual HRESULT WINAPI GetPartitionName(BSTR bstrApplicationIDOrName,BSTR *pbstrPartitionName) = 0; + virtual HRESULT WINAPI put_CurrentPartition(BSTR bstrPartitionIDOrName) = 0; + virtual HRESULT WINAPI get_CurrentPartitionID(BSTR *pbstrPartitionID) = 0; + virtual HRESULT WINAPI get_CurrentPartitionName(BSTR *pbstrPartitionName) = 0; + virtual HRESULT WINAPI get_GlobalPartitionID(BSTR *pbstrGlobalPartitionID) = 0; + virtual HRESULT WINAPI FlushPartitionCache(void) = 0; + virtual HRESULT WINAPI CopyApplications(BSTR bstrSourcePartitionIDOrName,VARIANT *pVarApplicationID,BSTR bstrDestinationPartitionIDOrName) = 0; + virtual HRESULT WINAPI CopyComponents(BSTR bstrSourceApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,BSTR bstrDestinationApplicationIDOrName) = 0; + virtual HRESULT WINAPI MoveComponents(BSTR bstrSourceApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,BSTR bstrDestinationApplicationIDOrName) = 0; + virtual HRESULT WINAPI AliasComponent(BSTR bstrSrcApplicationIDOrName,BSTR bstrCLSIDOrProgID,BSTR bstrDestApplicationIDOrName,BSTR bstrNewProgId,BSTR bstrNewClsid) = 0; + virtual HRESULT WINAPI IsSafeToDelete(BSTR bstrDllName,COMAdminInUse *pCOMAdminInUse) = 0; + virtual HRESULT WINAPI ImportUnconfiguredComponents(BSTR bstrApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,VARIANT *pVarComponentType) = 0; + virtual HRESULT WINAPI PromoteUnconfiguredComponents(BSTR bstrApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,VARIANT *pVarComponentType) = 0; + virtual HRESULT WINAPI ImportComponents(BSTR bstrApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,VARIANT *pVarComponentType) = 0; + virtual HRESULT WINAPI get_Is64BitCatalogServer(VARIANT_BOOL *pbIs64Bit) = 0; + virtual HRESULT WINAPI ExportPartition(BSTR bstrPartitionIDOrName,BSTR bstrPartitionFileName,long lOptions) = 0; + virtual HRESULT WINAPI InstallPartition(BSTR bstrFileName,BSTR bstrDestDirectory,long lOptions,BSTR bstrUserID,BSTR bstrPassword,BSTR bstrRSN) = 0; + virtual HRESULT WINAPI QueryApplicationFile2(BSTR bstrApplicationFile,IDispatch **ppFilesForImport) = 0; + virtual HRESULT WINAPI GetComponentVersionCount(BSTR bstrCLSIDOrProgID,long *plVersionCount) = 0; + }; +#else + typedef struct ICOMAdminCatalog2Vtbl { + BEGIN_INTERFACE + HRESULT (WINAPI *QueryInterface)(ICOMAdminCatalog2 *This,REFIID riid,void **ppvObject); + ULONG (WINAPI *AddRef)(ICOMAdminCatalog2 *This); + ULONG (WINAPI *Release)(ICOMAdminCatalog2 *This); + HRESULT (WINAPI *GetTypeInfoCount)(ICOMAdminCatalog2 *This,UINT *pctinfo); + HRESULT (WINAPI *GetTypeInfo)(ICOMAdminCatalog2 *This,UINT iTInfo,LCID lcid,ITypeInfo **ppTInfo); + HRESULT (WINAPI *GetIDsOfNames)(ICOMAdminCatalog2 *This,REFIID riid,LPOLESTR *rgszNames,UINT cNames,LCID lcid,DISPID *rgDispId); + HRESULT (WINAPI *Invoke)(ICOMAdminCatalog2 *This,DISPID dispIdMember,REFIID riid,LCID lcid,WORD wFlags,DISPPARAMS *pDispParams,VARIANT *pVarResult,EXCEPINFO *pExcepInfo,UINT *puArgErr); + HRESULT (WINAPI *GetCollection)(ICOMAdminCatalog2 *This,BSTR bstrCollName,IDispatch **ppCatalogCollection); + HRESULT (WINAPI *Connect)(ICOMAdminCatalog2 *This,BSTR bstrCatalogServerName,IDispatch **ppCatalogCollection); + HRESULT (WINAPI *get_MajorVersion)(ICOMAdminCatalog2 *This,long *plMajorVersion); + HRESULT (WINAPI *get_MinorVersion)(ICOMAdminCatalog2 *This,long *plMinorVersion); + HRESULT (WINAPI *GetCollectionByQuery)(ICOMAdminCatalog2 *This,BSTR bstrCollName,SAFEARRAY **ppsaVarQuery,IDispatch **ppCatalogCollection); + HRESULT (WINAPI *ImportComponent)(ICOMAdminCatalog2 *This,BSTR bstrApplIDOrName,BSTR bstrCLSIDOrProgID); + HRESULT (WINAPI *InstallComponent)(ICOMAdminCatalog2 *This,BSTR bstrApplIDOrName,BSTR bstrDLL,BSTR bstrTLB,BSTR bstrPSDLL); + HRESULT (WINAPI *ShutdownApplication)(ICOMAdminCatalog2 *This,BSTR bstrApplIDOrName); + HRESULT (WINAPI *ExportApplication)(ICOMAdminCatalog2 *This,BSTR bstrApplIDOrName,BSTR bstrApplicationFile,long lOptions); + HRESULT (WINAPI *InstallApplication)(ICOMAdminCatalog2 *This,BSTR bstrApplicationFile,BSTR bstrDestinationDirectory,long lOptions,BSTR bstrUserId,BSTR bstrPassword,BSTR bstrRSN); + HRESULT (WINAPI *StopRouter)(ICOMAdminCatalog2 *This); + HRESULT (WINAPI *RefreshRouter)(ICOMAdminCatalog2 *This); + HRESULT (WINAPI *StartRouter)(ICOMAdminCatalog2 *This); + HRESULT (WINAPI *Reserved1)(ICOMAdminCatalog2 *This); + HRESULT (WINAPI *Reserved2)(ICOMAdminCatalog2 *This); + HRESULT (WINAPI *InstallMultipleComponents)(ICOMAdminCatalog2 *This,BSTR bstrApplIDOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDs); + HRESULT (WINAPI *GetMultipleComponentsInfo)(ICOMAdminCatalog2 *This,BSTR bstrApplIdOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDs,SAFEARRAY **ppsaVarClassNames,SAFEARRAY **ppsaVarFileFlags,SAFEARRAY **ppsaVarComponentFlags); + HRESULT (WINAPI *RefreshComponents)(ICOMAdminCatalog2 *This); + HRESULT (WINAPI *BackupREGDB)(ICOMAdminCatalog2 *This,BSTR bstrBackupFilePath); + HRESULT (WINAPI *RestoreREGDB)(ICOMAdminCatalog2 *This,BSTR bstrBackupFilePath); + HRESULT (WINAPI *QueryApplicationFile)(ICOMAdminCatalog2 *This,BSTR bstrApplicationFile,BSTR *pbstrApplicationName,BSTR *pbstrApplicationDescription,VARIANT_BOOL *pbHasUsers,VARIANT_BOOL *pbIsProxy,SAFEARRAY **ppsaVarFileNames); + HRESULT (WINAPI *StartApplication)(ICOMAdminCatalog2 *This,BSTR bstrApplIdOrName); + HRESULT (WINAPI *ServiceCheck)(ICOMAdminCatalog2 *This,long lService,long *plStatus); + HRESULT (WINAPI *InstallMultipleEventClasses)(ICOMAdminCatalog2 *This,BSTR bstrApplIdOrName,SAFEARRAY **ppsaVarFileNames,SAFEARRAY **ppsaVarCLSIDS); + HRESULT (WINAPI *InstallEventClass)(ICOMAdminCatalog2 *This,BSTR bstrApplIdOrName,BSTR bstrDLL,BSTR bstrTLB,BSTR bstrPSDLL); + HRESULT (WINAPI *GetEventClassesForIID)(ICOMAdminCatalog2 *This,BSTR bstrIID,SAFEARRAY **ppsaVarCLSIDs,SAFEARRAY **ppsaVarProgIDs,SAFEARRAY **ppsaVarDescriptions); + HRESULT (WINAPI *GetCollectionByQuery2)(ICOMAdminCatalog2 *This,BSTR bstrCollectionName,VARIANT *pVarQueryStrings,IDispatch **ppCatalogCollection); + HRESULT (WINAPI *GetApplicationInstanceIDFromProcessID)(ICOMAdminCatalog2 *This,long lProcessID,BSTR *pbstrApplicationInstanceID); + HRESULT (WINAPI *ShutdownApplicationInstances)(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID); + HRESULT (WINAPI *PauseApplicationInstances)(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID); + HRESULT (WINAPI *ResumeApplicationInstances)(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID); + HRESULT (WINAPI *RecycleApplicationInstances)(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID,long lReasonCode); + HRESULT (WINAPI *AreApplicationInstancesPaused)(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID,VARIANT_BOOL *pVarBoolPaused); + HRESULT (WINAPI *DumpApplicationInstance)(ICOMAdminCatalog2 *This,BSTR bstrApplicationInstanceID,BSTR bstrDirectory,long lMaxImages,BSTR *pbstrDumpFile); + HRESULT (WINAPI *get_IsApplicationInstanceDumpSupported)(ICOMAdminCatalog2 *This,VARIANT_BOOL *pVarBoolDumpSupported); + HRESULT (WINAPI *CreateServiceForApplication)(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,BSTR bstrServiceName,BSTR bstrStartType,BSTR bstrErrorControl,BSTR bstrDependencies,BSTR bstrRunAs,BSTR bstrPassword,VARIANT_BOOL bDesktopOk); + HRESULT (WINAPI *DeleteServiceForApplication)(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName); + HRESULT (WINAPI *GetPartitionID)(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,BSTR *pbstrPartitionID); + HRESULT (WINAPI *GetPartitionName)(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,BSTR *pbstrPartitionName); + HRESULT (WINAPI *put_CurrentPartition)(ICOMAdminCatalog2 *This,BSTR bstrPartitionIDOrName); + HRESULT (WINAPI *get_CurrentPartitionID)(ICOMAdminCatalog2 *This,BSTR *pbstrPartitionID); + HRESULT (WINAPI *get_CurrentPartitionName)(ICOMAdminCatalog2 *This,BSTR *pbstrPartitionName); + HRESULT (WINAPI *get_GlobalPartitionID)(ICOMAdminCatalog2 *This,BSTR *pbstrGlobalPartitionID); + HRESULT (WINAPI *FlushPartitionCache)(ICOMAdminCatalog2 *This); + HRESULT (WINAPI *CopyApplications)(ICOMAdminCatalog2 *This,BSTR bstrSourcePartitionIDOrName,VARIANT *pVarApplicationID,BSTR bstrDestinationPartitionIDOrName); + HRESULT (WINAPI *CopyComponents)(ICOMAdminCatalog2 *This,BSTR bstrSourceApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,BSTR bstrDestinationApplicationIDOrName); + HRESULT (WINAPI *MoveComponents)(ICOMAdminCatalog2 *This,BSTR bstrSourceApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,BSTR bstrDestinationApplicationIDOrName); + HRESULT (WINAPI *AliasComponent)(ICOMAdminCatalog2 *This,BSTR bstrSrcApplicationIDOrName,BSTR bstrCLSIDOrProgID,BSTR bstrDestApplicationIDOrName,BSTR bstrNewProgId,BSTR bstrNewClsid); + HRESULT (WINAPI *IsSafeToDelete)(ICOMAdminCatalog2 *This,BSTR bstrDllName,COMAdminInUse *pCOMAdminInUse); + HRESULT (WINAPI *ImportUnconfiguredComponents)(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,VARIANT *pVarComponentType); + HRESULT (WINAPI *PromoteUnconfiguredComponents)(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,VARIANT *pVarComponentType); + HRESULT (WINAPI *ImportComponents)(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,VARIANT *pVarComponentType); + HRESULT (WINAPI *get_Is64BitCatalogServer)(ICOMAdminCatalog2 *This,VARIANT_BOOL *pbIs64Bit); + HRESULT (WINAPI *ExportPartition)(ICOMAdminCatalog2 *This,BSTR bstrPartitionIDOrName,BSTR bstrPartitionFileName,long lOptions); + HRESULT (WINAPI *InstallPartition)(ICOMAdminCatalog2 *This,BSTR bstrFileName,BSTR bstrDestDirectory,long lOptions,BSTR bstrUserID,BSTR bstrPassword,BSTR bstrRSN); + HRESULT (WINAPI *QueryApplicationFile2)(ICOMAdminCatalog2 *This,BSTR bstrApplicationFile,IDispatch **ppFilesForImport); + HRESULT (WINAPI *GetComponentVersionCount)(ICOMAdminCatalog2 *This,BSTR bstrCLSIDOrProgID,long *plVersionCount); + END_INTERFACE + } ICOMAdminCatalog2Vtbl; + struct ICOMAdminCatalog2 { + CONST_VTBL struct ICOMAdminCatalog2Vtbl *lpVtbl; + }; +#ifdef COBJMACROS +#define ICOMAdminCatalog2_QueryInterface(This,riid,ppvObject) (This)->lpVtbl->QueryInterface(This,riid,ppvObject) +#define ICOMAdminCatalog2_AddRef(This) (This)->lpVtbl->AddRef(This) +#define ICOMAdminCatalog2_Release(This) (This)->lpVtbl->Release(This) +#define ICOMAdminCatalog2_GetTypeInfoCount(This,pctinfo) (This)->lpVtbl->GetTypeInfoCount(This,pctinfo) +#define ICOMAdminCatalog2_GetTypeInfo(This,iTInfo,lcid,ppTInfo) (This)->lpVtbl->GetTypeInfo(This,iTInfo,lcid,ppTInfo) +#define ICOMAdminCatalog2_GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) (This)->lpVtbl->GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) +#define ICOMAdminCatalog2_Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) (This)->lpVtbl->Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) +#define ICOMAdminCatalog2_GetCollection(This,bstrCollName,ppCatalogCollection) (This)->lpVtbl->GetCollection(This,bstrCollName,ppCatalogCollection) +#define ICOMAdminCatalog2_Connect(This,bstrCatalogServerName,ppCatalogCollection) (This)->lpVtbl->Connect(This,bstrCatalogServerName,ppCatalogCollection) +#define ICOMAdminCatalog2_get_MajorVersion(This,plMajorVersion) (This)->lpVtbl->get_MajorVersion(This,plMajorVersion) +#define ICOMAdminCatalog2_get_MinorVersion(This,plMinorVersion) (This)->lpVtbl->get_MinorVersion(This,plMinorVersion) +#define ICOMAdminCatalog2_GetCollectionByQuery(This,bstrCollName,ppsaVarQuery,ppCatalogCollection) (This)->lpVtbl->GetCollectionByQuery(This,bstrCollName,ppsaVarQuery,ppCatalogCollection) +#define ICOMAdminCatalog2_ImportComponent(This,bstrApplIDOrName,bstrCLSIDOrProgID) (This)->lpVtbl->ImportComponent(This,bstrApplIDOrName,bstrCLSIDOrProgID) +#define ICOMAdminCatalog2_InstallComponent(This,bstrApplIDOrName,bstrDLL,bstrTLB,bstrPSDLL) (This)->lpVtbl->InstallComponent(This,bstrApplIDOrName,bstrDLL,bstrTLB,bstrPSDLL) +#define ICOMAdminCatalog2_ShutdownApplication(This,bstrApplIDOrName) (This)->lpVtbl->ShutdownApplication(This,bstrApplIDOrName) +#define ICOMAdminCatalog2_ExportApplication(This,bstrApplIDOrName,bstrApplicationFile,lOptions) (This)->lpVtbl->ExportApplication(This,bstrApplIDOrName,bstrApplicationFile,lOptions) +#define ICOMAdminCatalog2_InstallApplication(This,bstrApplicationFile,bstrDestinationDirectory,lOptions,bstrUserId,bstrPassword,bstrRSN) (This)->lpVtbl->InstallApplication(This,bstrApplicationFile,bstrDestinationDirectory,lOptions,bstrUserId,bstrPassword,bstrRSN) +#define ICOMAdminCatalog2_StopRouter(This) (This)->lpVtbl->StopRouter(This) +#define ICOMAdminCatalog2_RefreshRouter(This) (This)->lpVtbl->RefreshRouter(This) +#define ICOMAdminCatalog2_StartRouter(This) (This)->lpVtbl->StartRouter(This) +#define ICOMAdminCatalog2_Reserved1(This) (This)->lpVtbl->Reserved1(This) +#define ICOMAdminCatalog2_Reserved2(This) (This)->lpVtbl->Reserved2(This) +#define ICOMAdminCatalog2_InstallMultipleComponents(This,bstrApplIDOrName,ppsaVarFileNames,ppsaVarCLSIDs) (This)->lpVtbl->InstallMultipleComponents(This,bstrApplIDOrName,ppsaVarFileNames,ppsaVarCLSIDs) +#define ICOMAdminCatalog2_GetMultipleComponentsInfo(This,bstrApplIdOrName,ppsaVarFileNames,ppsaVarCLSIDs,ppsaVarClassNames,ppsaVarFileFlags,ppsaVarComponentFlags) (This)->lpVtbl->GetMultipleComponentsInfo(This,bstrApplIdOrName,ppsaVarFileNames,ppsaVarCLSIDs,ppsaVarClassNames,ppsaVarFileFlags,ppsaVarComponentFlags) +#define ICOMAdminCatalog2_RefreshComponents(This) (This)->lpVtbl->RefreshComponents(This) +#define ICOMAdminCatalog2_BackupREGDB(This,bstrBackupFilePath) (This)->lpVtbl->BackupREGDB(This,bstrBackupFilePath) +#define ICOMAdminCatalog2_RestoreREGDB(This,bstrBackupFilePath) (This)->lpVtbl->RestoreREGDB(This,bstrBackupFilePath) +#define ICOMAdminCatalog2_QueryApplicationFile(This,bstrApplicationFile,pbstrApplicationName,pbstrApplicationDescription,pbHasUsers,pbIsProxy,ppsaVarFileNames) (This)->lpVtbl->QueryApplicationFile(This,bstrApplicationFile,pbstrApplicationName,pbstrApplicationDescription,pbHasUsers,pbIsProxy,ppsaVarFileNames) +#define ICOMAdminCatalog2_StartApplication(This,bstrApplIdOrName) (This)->lpVtbl->StartApplication(This,bstrApplIdOrName) +#define ICOMAdminCatalog2_ServiceCheck(This,lService,plStatus) (This)->lpVtbl->ServiceCheck(This,lService,plStatus) +#define ICOMAdminCatalog2_InstallMultipleEventClasses(This,bstrApplIdOrName,ppsaVarFileNames,ppsaVarCLSIDS) (This)->lpVtbl->InstallMultipleEventClasses(This,bstrApplIdOrName,ppsaVarFileNames,ppsaVarCLSIDS) +#define ICOMAdminCatalog2_InstallEventClass(This,bstrApplIdOrName,bstrDLL,bstrTLB,bstrPSDLL) (This)->lpVtbl->InstallEventClass(This,bstrApplIdOrName,bstrDLL,bstrTLB,bstrPSDLL) +#define ICOMAdminCatalog2_GetEventClassesForIID(This,bstrIID,ppsaVarCLSIDs,ppsaVarProgIDs,ppsaVarDescriptions) (This)->lpVtbl->GetEventClassesForIID(This,bstrIID,ppsaVarCLSIDs,ppsaVarProgIDs,ppsaVarDescriptions) +#define ICOMAdminCatalog2_GetCollectionByQuery2(This,bstrCollectionName,pVarQueryStrings,ppCatalogCollection) (This)->lpVtbl->GetCollectionByQuery2(This,bstrCollectionName,pVarQueryStrings,ppCatalogCollection) +#define ICOMAdminCatalog2_GetApplicationInstanceIDFromProcessID(This,lProcessID,pbstrApplicationInstanceID) (This)->lpVtbl->GetApplicationInstanceIDFromProcessID(This,lProcessID,pbstrApplicationInstanceID) +#define ICOMAdminCatalog2_ShutdownApplicationInstances(This,pVarApplicationInstanceID) (This)->lpVtbl->ShutdownApplicationInstances(This,pVarApplicationInstanceID) +#define ICOMAdminCatalog2_PauseApplicationInstances(This,pVarApplicationInstanceID) (This)->lpVtbl->PauseApplicationInstances(This,pVarApplicationInstanceID) +#define ICOMAdminCatalog2_ResumeApplicationInstances(This,pVarApplicationInstanceID) (This)->lpVtbl->ResumeApplicationInstances(This,pVarApplicationInstanceID) +#define ICOMAdminCatalog2_RecycleApplicationInstances(This,pVarApplicationInstanceID,lReasonCode) (This)->lpVtbl->RecycleApplicationInstances(This,pVarApplicationInstanceID,lReasonCode) +#define ICOMAdminCatalog2_AreApplicationInstancesPaused(This,pVarApplicationInstanceID,pVarBoolPaused) (This)->lpVtbl->AreApplicationInstancesPaused(This,pVarApplicationInstanceID,pVarBoolPaused) +#define ICOMAdminCatalog2_DumpApplicationInstance(This,bstrApplicationInstanceID,bstrDirectory,lMaxImages,pbstrDumpFile) (This)->lpVtbl->DumpApplicationInstance(This,bstrApplicationInstanceID,bstrDirectory,lMaxImages,pbstrDumpFile) +#define ICOMAdminCatalog2_get_IsApplicationInstanceDumpSupported(This,pVarBoolDumpSupported) (This)->lpVtbl->get_IsApplicationInstanceDumpSupported(This,pVarBoolDumpSupported) +#define ICOMAdminCatalog2_CreateServiceForApplication(This,bstrApplicationIDOrName,bstrServiceName,bstrStartType,bstrErrorControl,bstrDependencies,bstrRunAs,bstrPassword,bDesktopOk) (This)->lpVtbl->CreateServiceForApplication(This,bstrApplicationIDOrName,bstrServiceName,bstrStartType,bstrErrorControl,bstrDependencies,bstrRunAs,bstrPassword,bDesktopOk) +#define ICOMAdminCatalog2_DeleteServiceForApplication(This,bstrApplicationIDOrName) (This)->lpVtbl->DeleteServiceForApplication(This,bstrApplicationIDOrName) +#define ICOMAdminCatalog2_GetPartitionID(This,bstrApplicationIDOrName,pbstrPartitionID) (This)->lpVtbl->GetPartitionID(This,bstrApplicationIDOrName,pbstrPartitionID) +#define ICOMAdminCatalog2_GetPartitionName(This,bstrApplicationIDOrName,pbstrPartitionName) (This)->lpVtbl->GetPartitionName(This,bstrApplicationIDOrName,pbstrPartitionName) +#define ICOMAdminCatalog2_put_CurrentPartition(This,bstrPartitionIDOrName) (This)->lpVtbl->put_CurrentPartition(This,bstrPartitionIDOrName) +#define ICOMAdminCatalog2_get_CurrentPartitionID(This,pbstrPartitionID) (This)->lpVtbl->get_CurrentPartitionID(This,pbstrPartitionID) +#define ICOMAdminCatalog2_get_CurrentPartitionName(This,pbstrPartitionName) (This)->lpVtbl->get_CurrentPartitionName(This,pbstrPartitionName) +#define ICOMAdminCatalog2_get_GlobalPartitionID(This,pbstrGlobalPartitionID) (This)->lpVtbl->get_GlobalPartitionID(This,pbstrGlobalPartitionID) +#define ICOMAdminCatalog2_FlushPartitionCache(This) (This)->lpVtbl->FlushPartitionCache(This) +#define ICOMAdminCatalog2_CopyApplications(This,bstrSourcePartitionIDOrName,pVarApplicationID,bstrDestinationPartitionIDOrName) (This)->lpVtbl->CopyApplications(This,bstrSourcePartitionIDOrName,pVarApplicationID,bstrDestinationPartitionIDOrName) +#define ICOMAdminCatalog2_CopyComponents(This,bstrSourceApplicationIDOrName,pVarCLSIDOrProgID,bstrDestinationApplicationIDOrName) (This)->lpVtbl->CopyComponents(This,bstrSourceApplicationIDOrName,pVarCLSIDOrProgID,bstrDestinationApplicationIDOrName) +#define ICOMAdminCatalog2_MoveComponents(This,bstrSourceApplicationIDOrName,pVarCLSIDOrProgID,bstrDestinationApplicationIDOrName) (This)->lpVtbl->MoveComponents(This,bstrSourceApplicationIDOrName,pVarCLSIDOrProgID,bstrDestinationApplicationIDOrName) +#define ICOMAdminCatalog2_AliasComponent(This,bstrSrcApplicationIDOrName,bstrCLSIDOrProgID,bstrDestApplicationIDOrName,bstrNewProgId,bstrNewClsid) (This)->lpVtbl->AliasComponent(This,bstrSrcApplicationIDOrName,bstrCLSIDOrProgID,bstrDestApplicationIDOrName,bstrNewProgId,bstrNewClsid) +#define ICOMAdminCatalog2_IsSafeToDelete(This,bstrDllName,pCOMAdminInUse) (This)->lpVtbl->IsSafeToDelete(This,bstrDllName,pCOMAdminInUse) +#define ICOMAdminCatalog2_ImportUnconfiguredComponents(This,bstrApplicationIDOrName,pVarCLSIDOrProgID,pVarComponentType) (This)->lpVtbl->ImportUnconfiguredComponents(This,bstrApplicationIDOrName,pVarCLSIDOrProgID,pVarComponentType) +#define ICOMAdminCatalog2_PromoteUnconfiguredComponents(This,bstrApplicationIDOrName,pVarCLSIDOrProgID,pVarComponentType) (This)->lpVtbl->PromoteUnconfiguredComponents(This,bstrApplicationIDOrName,pVarCLSIDOrProgID,pVarComponentType) +#define ICOMAdminCatalog2_ImportComponents(This,bstrApplicationIDOrName,pVarCLSIDOrProgID,pVarComponentType) (This)->lpVtbl->ImportComponents(This,bstrApplicationIDOrName,pVarCLSIDOrProgID,pVarComponentType) +#define ICOMAdminCatalog2_get_Is64BitCatalogServer(This,pbIs64Bit) (This)->lpVtbl->get_Is64BitCatalogServer(This,pbIs64Bit) +#define ICOMAdminCatalog2_ExportPartition(This,bstrPartitionIDOrName,bstrPartitionFileName,lOptions) (This)->lpVtbl->ExportPartition(This,bstrPartitionIDOrName,bstrPartitionFileName,lOptions) +#define ICOMAdminCatalog2_InstallPartition(This,bstrFileName,bstrDestDirectory,lOptions,bstrUserID,bstrPassword,bstrRSN) (This)->lpVtbl->InstallPartition(This,bstrFileName,bstrDestDirectory,lOptions,bstrUserID,bstrPassword,bstrRSN) +#define ICOMAdminCatalog2_QueryApplicationFile2(This,bstrApplicationFile,ppFilesForImport) (This)->lpVtbl->QueryApplicationFile2(This,bstrApplicationFile,ppFilesForImport) +#define ICOMAdminCatalog2_GetComponentVersionCount(This,bstrCLSIDOrProgID,plVersionCount) (This)->lpVtbl->GetComponentVersionCount(This,bstrCLSIDOrProgID,plVersionCount) +#endif +#endif + HRESULT WINAPI ICOMAdminCatalog2_GetCollectionByQuery2_Proxy(ICOMAdminCatalog2 *This,BSTR bstrCollectionName,VARIANT *pVarQueryStrings,IDispatch **ppCatalogCollection); + void __RPC_STUB ICOMAdminCatalog2_GetCollectionByQuery2_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_GetApplicationInstanceIDFromProcessID_Proxy(ICOMAdminCatalog2 *This,long lProcessID,BSTR *pbstrApplicationInstanceID); + void __RPC_STUB ICOMAdminCatalog2_GetApplicationInstanceIDFromProcessID_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_ShutdownApplicationInstances_Proxy(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID); + void __RPC_STUB ICOMAdminCatalog2_ShutdownApplicationInstances_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_PauseApplicationInstances_Proxy(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID); + void __RPC_STUB ICOMAdminCatalog2_PauseApplicationInstances_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_ResumeApplicationInstances_Proxy(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID); + void __RPC_STUB ICOMAdminCatalog2_ResumeApplicationInstances_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_RecycleApplicationInstances_Proxy(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID,long lReasonCode); + void __RPC_STUB ICOMAdminCatalog2_RecycleApplicationInstances_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_AreApplicationInstancesPaused_Proxy(ICOMAdminCatalog2 *This,VARIANT *pVarApplicationInstanceID,VARIANT_BOOL *pVarBoolPaused); + void __RPC_STUB ICOMAdminCatalog2_AreApplicationInstancesPaused_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_DumpApplicationInstance_Proxy(ICOMAdminCatalog2 *This,BSTR bstrApplicationInstanceID,BSTR bstrDirectory,long lMaxImages,BSTR *pbstrDumpFile); + void __RPC_STUB ICOMAdminCatalog2_DumpApplicationInstance_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_get_IsApplicationInstanceDumpSupported_Proxy(ICOMAdminCatalog2 *This,VARIANT_BOOL *pVarBoolDumpSupported); + void __RPC_STUB ICOMAdminCatalog2_get_IsApplicationInstanceDumpSupported_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_CreateServiceForApplication_Proxy(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,BSTR bstrServiceName,BSTR bstrStartType,BSTR bstrErrorControl,BSTR bstrDependencies,BSTR bstrRunAs,BSTR bstrPassword,VARIANT_BOOL bDesktopOk); + void __RPC_STUB ICOMAdminCatalog2_CreateServiceForApplication_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_DeleteServiceForApplication_Proxy(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName); + void __RPC_STUB ICOMAdminCatalog2_DeleteServiceForApplication_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_GetPartitionID_Proxy(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,BSTR *pbstrPartitionID); + void __RPC_STUB ICOMAdminCatalog2_GetPartitionID_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_GetPartitionName_Proxy(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,BSTR *pbstrPartitionName); + void __RPC_STUB ICOMAdminCatalog2_GetPartitionName_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_put_CurrentPartition_Proxy(ICOMAdminCatalog2 *This,BSTR bstrPartitionIDOrName); + void __RPC_STUB ICOMAdminCatalog2_put_CurrentPartition_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_get_CurrentPartitionID_Proxy(ICOMAdminCatalog2 *This,BSTR *pbstrPartitionID); + void __RPC_STUB ICOMAdminCatalog2_get_CurrentPartitionID_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_get_CurrentPartitionName_Proxy(ICOMAdminCatalog2 *This,BSTR *pbstrPartitionName); + void __RPC_STUB ICOMAdminCatalog2_get_CurrentPartitionName_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_get_GlobalPartitionID_Proxy(ICOMAdminCatalog2 *This,BSTR *pbstrGlobalPartitionID); + void __RPC_STUB ICOMAdminCatalog2_get_GlobalPartitionID_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_FlushPartitionCache_Proxy(ICOMAdminCatalog2 *This); + void __RPC_STUB ICOMAdminCatalog2_FlushPartitionCache_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_CopyApplications_Proxy(ICOMAdminCatalog2 *This,BSTR bstrSourcePartitionIDOrName,VARIANT *pVarApplicationID,BSTR bstrDestinationPartitionIDOrName); + void __RPC_STUB ICOMAdminCatalog2_CopyApplications_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_CopyComponents_Proxy(ICOMAdminCatalog2 *This,BSTR bstrSourceApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,BSTR bstrDestinationApplicationIDOrName); + void __RPC_STUB ICOMAdminCatalog2_CopyComponents_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_MoveComponents_Proxy(ICOMAdminCatalog2 *This,BSTR bstrSourceApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,BSTR bstrDestinationApplicationIDOrName); + void __RPC_STUB ICOMAdminCatalog2_MoveComponents_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_AliasComponent_Proxy(ICOMAdminCatalog2 *This,BSTR bstrSrcApplicationIDOrName,BSTR bstrCLSIDOrProgID,BSTR bstrDestApplicationIDOrName,BSTR bstrNewProgId,BSTR bstrNewClsid); + void __RPC_STUB ICOMAdminCatalog2_AliasComponent_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_IsSafeToDelete_Proxy(ICOMAdminCatalog2 *This,BSTR bstrDllName,COMAdminInUse *pCOMAdminInUse); + void __RPC_STUB ICOMAdminCatalog2_IsSafeToDelete_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_ImportUnconfiguredComponents_Proxy(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,VARIANT *pVarComponentType); + void __RPC_STUB ICOMAdminCatalog2_ImportUnconfiguredComponents_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_PromoteUnconfiguredComponents_Proxy(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,VARIANT *pVarComponentType); + void __RPC_STUB ICOMAdminCatalog2_PromoteUnconfiguredComponents_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_ImportComponents_Proxy(ICOMAdminCatalog2 *This,BSTR bstrApplicationIDOrName,VARIANT *pVarCLSIDOrProgID,VARIANT *pVarComponentType); + void __RPC_STUB ICOMAdminCatalog2_ImportComponents_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_get_Is64BitCatalogServer_Proxy(ICOMAdminCatalog2 *This,VARIANT_BOOL *pbIs64Bit); + void __RPC_STUB ICOMAdminCatalog2_get_Is64BitCatalogServer_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_ExportPartition_Proxy(ICOMAdminCatalog2 *This,BSTR bstrPartitionIDOrName,BSTR bstrPartitionFileName,long lOptions); + void __RPC_STUB ICOMAdminCatalog2_ExportPartition_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_InstallPartition_Proxy(ICOMAdminCatalog2 *This,BSTR bstrFileName,BSTR bstrDestDirectory,long lOptions,BSTR bstrUserID,BSTR bstrPassword,BSTR bstrRSN); + void __RPC_STUB ICOMAdminCatalog2_InstallPartition_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_QueryApplicationFile2_Proxy(ICOMAdminCatalog2 *This,BSTR bstrApplicationFile,IDispatch **ppFilesForImport); + void __RPC_STUB ICOMAdminCatalog2_QueryApplicationFile2_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICOMAdminCatalog2_GetComponentVersionCount_Proxy(ICOMAdminCatalog2 *This,BSTR bstrCLSIDOrProgID,long *plVersionCount); + void __RPC_STUB ICOMAdminCatalog2_GetComponentVersionCount_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); +#endif + +#ifndef __ICatalogObject_INTERFACE_DEFINED__ +#define __ICatalogObject_INTERFACE_DEFINED__ + EXTERN_C const IID IID_ICatalogObject; +#if defined(__cplusplus) && !defined(CINTERFACE) + struct ICatalogObject : public IDispatch { + public: + virtual HRESULT WINAPI get_Value(BSTR bstrPropName,VARIANT *pvarRetVal) = 0; + virtual HRESULT WINAPI put_Value(BSTR bstrPropName,VARIANT val) = 0; + virtual HRESULT WINAPI get_Key(VARIANT *pvarRetVal) = 0; + virtual HRESULT WINAPI get_Name(VARIANT *pvarRetVal) = 0; + virtual HRESULT WINAPI IsPropertyReadOnly(BSTR bstrPropName,VARIANT_BOOL *pbRetVal) = 0; + virtual HRESULT WINAPI get_Valid(VARIANT_BOOL *pbRetVal) = 0; + virtual HRESULT WINAPI IsPropertyWriteOnly(BSTR bstrPropName,VARIANT_BOOL *pbRetVal) = 0; + }; +#else + typedef struct ICatalogObjectVtbl { + BEGIN_INTERFACE + HRESULT (WINAPI *QueryInterface)(ICatalogObject *This,REFIID riid,void **ppvObject); + ULONG (WINAPI *AddRef)(ICatalogObject *This); + ULONG (WINAPI *Release)(ICatalogObject *This); + HRESULT (WINAPI *GetTypeInfoCount)(ICatalogObject *This,UINT *pctinfo); + HRESULT (WINAPI *GetTypeInfo)(ICatalogObject *This,UINT iTInfo,LCID lcid,ITypeInfo **ppTInfo); + HRESULT (WINAPI *GetIDsOfNames)(ICatalogObject *This,REFIID riid,LPOLESTR *rgszNames,UINT cNames,LCID lcid,DISPID *rgDispId); + HRESULT (WINAPI *Invoke)(ICatalogObject *This,DISPID dispIdMember,REFIID riid,LCID lcid,WORD wFlags,DISPPARAMS *pDispParams,VARIANT *pVarResult,EXCEPINFO *pExcepInfo,UINT *puArgErr); + HRESULT (WINAPI *get_Value)(ICatalogObject *This,BSTR bstrPropName,VARIANT *pvarRetVal); + HRESULT (WINAPI *put_Value)(ICatalogObject *This,BSTR bstrPropName,VARIANT val); + HRESULT (WINAPI *get_Key)(ICatalogObject *This,VARIANT *pvarRetVal); + HRESULT (WINAPI *get_Name)(ICatalogObject *This,VARIANT *pvarRetVal); + HRESULT (WINAPI *IsPropertyReadOnly)(ICatalogObject *This,BSTR bstrPropName,VARIANT_BOOL *pbRetVal); + HRESULT (WINAPI *get_Valid)(ICatalogObject *This,VARIANT_BOOL *pbRetVal); + HRESULT (WINAPI *IsPropertyWriteOnly)(ICatalogObject *This,BSTR bstrPropName,VARIANT_BOOL *pbRetVal); + END_INTERFACE + } ICatalogObjectVtbl; + struct ICatalogObject { + CONST_VTBL struct ICatalogObjectVtbl *lpVtbl; + }; +#ifdef COBJMACROS +#define ICatalogObject_QueryInterface(This,riid,ppvObject) (This)->lpVtbl->QueryInterface(This,riid,ppvObject) +#define ICatalogObject_AddRef(This) (This)->lpVtbl->AddRef(This) +#define ICatalogObject_Release(This) (This)->lpVtbl->Release(This) +#define ICatalogObject_GetTypeInfoCount(This,pctinfo) (This)->lpVtbl->GetTypeInfoCount(This,pctinfo) +#define ICatalogObject_GetTypeInfo(This,iTInfo,lcid,ppTInfo) (This)->lpVtbl->GetTypeInfo(This,iTInfo,lcid,ppTInfo) +#define ICatalogObject_GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) (This)->lpVtbl->GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) +#define ICatalogObject_Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) (This)->lpVtbl->Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) +#define ICatalogObject_get_Value(This,bstrPropName,pvarRetVal) (This)->lpVtbl->get_Value(This,bstrPropName,pvarRetVal) +#define ICatalogObject_put_Value(This,bstrPropName,val) (This)->lpVtbl->put_Value(This,bstrPropName,val) +#define ICatalogObject_get_Key(This,pvarRetVal) (This)->lpVtbl->get_Key(This,pvarRetVal) +#define ICatalogObject_get_Name(This,pvarRetVal) (This)->lpVtbl->get_Name(This,pvarRetVal) +#define ICatalogObject_IsPropertyReadOnly(This,bstrPropName,pbRetVal) (This)->lpVtbl->IsPropertyReadOnly(This,bstrPropName,pbRetVal) +#define ICatalogObject_get_Valid(This,pbRetVal) (This)->lpVtbl->get_Valid(This,pbRetVal) +#define ICatalogObject_IsPropertyWriteOnly(This,bstrPropName,pbRetVal) (This)->lpVtbl->IsPropertyWriteOnly(This,bstrPropName,pbRetVal) +#endif +#endif + HRESULT WINAPI ICatalogObject_get_Value_Proxy(ICatalogObject *This,BSTR bstrPropName,VARIANT *pvarRetVal); + void __RPC_STUB ICatalogObject_get_Value_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogObject_put_Value_Proxy(ICatalogObject *This,BSTR bstrPropName,VARIANT val); + void __RPC_STUB ICatalogObject_put_Value_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogObject_get_Key_Proxy(ICatalogObject *This,VARIANT *pvarRetVal); + void __RPC_STUB ICatalogObject_get_Key_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogObject_get_Name_Proxy(ICatalogObject *This,VARIANT *pvarRetVal); + void __RPC_STUB ICatalogObject_get_Name_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogObject_IsPropertyReadOnly_Proxy(ICatalogObject *This,BSTR bstrPropName,VARIANT_BOOL *pbRetVal); + void __RPC_STUB ICatalogObject_IsPropertyReadOnly_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogObject_get_Valid_Proxy(ICatalogObject *This,VARIANT_BOOL *pbRetVal); + void __RPC_STUB ICatalogObject_get_Valid_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogObject_IsPropertyWriteOnly_Proxy(ICatalogObject *This,BSTR bstrPropName,VARIANT_BOOL *pbRetVal); + void __RPC_STUB ICatalogObject_IsPropertyWriteOnly_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); +#endif + +#ifndef __ICatalogCollection_INTERFACE_DEFINED__ +#define __ICatalogCollection_INTERFACE_DEFINED__ + EXTERN_C const IID IID_ICatalogCollection; +#if defined(__cplusplus) && !defined(CINTERFACE) + struct ICatalogCollection : public IDispatch { + public: + virtual HRESULT WINAPI get__NewEnum(IUnknown **ppEnumVariant) = 0; + virtual HRESULT WINAPI get_Item(long lIndex,IDispatch **ppCatalogObject) = 0; + virtual HRESULT WINAPI get_Count(long *plObjectCount) = 0; + virtual HRESULT WINAPI Remove(long lIndex) = 0; + virtual HRESULT WINAPI Add(IDispatch **ppCatalogObject) = 0; + virtual HRESULT WINAPI Populate(void) = 0; + virtual HRESULT WINAPI SaveChanges(long *pcChanges) = 0; + virtual HRESULT WINAPI GetCollection(BSTR bstrCollName,VARIANT varObjectKey,IDispatch **ppCatalogCollection) = 0; + virtual HRESULT WINAPI get_Name(VARIANT *pVarNamel) = 0; + virtual HRESULT WINAPI get_AddEnabled(VARIANT_BOOL *pVarBool) = 0; + virtual HRESULT WINAPI get_RemoveEnabled(VARIANT_BOOL *pVarBool) = 0; + virtual HRESULT WINAPI GetUtilInterface(IDispatch **ppIDispatch) = 0; + virtual HRESULT WINAPI get_DataStoreMajorVersion(long *plMajorVersion) = 0; + virtual HRESULT WINAPI get_DataStoreMinorVersion(long *plMinorVersionl) = 0; + virtual HRESULT WINAPI PopulateByKey(SAFEARRAY *psaKeys) = 0; + virtual HRESULT WINAPI PopulateByQuery(BSTR bstrQueryString,long lQueryType) = 0; + }; +#else + typedef struct ICatalogCollectionVtbl { + BEGIN_INTERFACE + HRESULT (WINAPI *QueryInterface)(ICatalogCollection *This,REFIID riid,void **ppvObject); + ULONG (WINAPI *AddRef)(ICatalogCollection *This); + ULONG (WINAPI *Release)(ICatalogCollection *This); + HRESULT (WINAPI *GetTypeInfoCount)(ICatalogCollection *This,UINT *pctinfo); + HRESULT (WINAPI *GetTypeInfo)(ICatalogCollection *This,UINT iTInfo,LCID lcid,ITypeInfo **ppTInfo); + HRESULT (WINAPI *GetIDsOfNames)(ICatalogCollection *This,REFIID riid,LPOLESTR *rgszNames,UINT cNames,LCID lcid,DISPID *rgDispId); + HRESULT (WINAPI *Invoke)(ICatalogCollection *This,DISPID dispIdMember,REFIID riid,LCID lcid,WORD wFlags,DISPPARAMS *pDispParams,VARIANT *pVarResult,EXCEPINFO *pExcepInfo,UINT *puArgErr); + HRESULT (WINAPI *get__NewEnum)(ICatalogCollection *This,IUnknown **ppEnumVariant); + HRESULT (WINAPI *get_Item)(ICatalogCollection *This,long lIndex,IDispatch **ppCatalogObject); + HRESULT (WINAPI *get_Count)(ICatalogCollection *This,long *plObjectCount); + HRESULT (WINAPI *Remove)(ICatalogCollection *This,long lIndex); + HRESULT (WINAPI *Add)(ICatalogCollection *This,IDispatch **ppCatalogObject); + HRESULT (WINAPI *Populate)(ICatalogCollection *This); + HRESULT (WINAPI *SaveChanges)(ICatalogCollection *This,long *pcChanges); + HRESULT (WINAPI *GetCollection)(ICatalogCollection *This,BSTR bstrCollName,VARIANT varObjectKey,IDispatch **ppCatalogCollection); + HRESULT (WINAPI *get_Name)(ICatalogCollection *This,VARIANT *pVarNamel); + HRESULT (WINAPI *get_AddEnabled)(ICatalogCollection *This,VARIANT_BOOL *pVarBool); + HRESULT (WINAPI *get_RemoveEnabled)(ICatalogCollection *This,VARIANT_BOOL *pVarBool); + HRESULT (WINAPI *GetUtilInterface)(ICatalogCollection *This,IDispatch **ppIDispatch); + HRESULT (WINAPI *get_DataStoreMajorVersion)(ICatalogCollection *This,long *plMajorVersion); + HRESULT (WINAPI *get_DataStoreMinorVersion)(ICatalogCollection *This,long *plMinorVersionl); + HRESULT (WINAPI *PopulateByKey)(ICatalogCollection *This,SAFEARRAY *psaKeys); + HRESULT (WINAPI *PopulateByQuery)(ICatalogCollection *This,BSTR bstrQueryString,long lQueryType); + END_INTERFACE + } ICatalogCollectionVtbl; + struct ICatalogCollection { + CONST_VTBL struct ICatalogCollectionVtbl *lpVtbl; + }; +#ifdef COBJMACROS +#define ICatalogCollection_QueryInterface(This,riid,ppvObject) (This)->lpVtbl->QueryInterface(This,riid,ppvObject) +#define ICatalogCollection_AddRef(This) (This)->lpVtbl->AddRef(This) +#define ICatalogCollection_Release(This) (This)->lpVtbl->Release(This) +#define ICatalogCollection_GetTypeInfoCount(This,pctinfo) (This)->lpVtbl->GetTypeInfoCount(This,pctinfo) +#define ICatalogCollection_GetTypeInfo(This,iTInfo,lcid,ppTInfo) (This)->lpVtbl->GetTypeInfo(This,iTInfo,lcid,ppTInfo) +#define ICatalogCollection_GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) (This)->lpVtbl->GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) +#define ICatalogCollection_Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) (This)->lpVtbl->Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) +#define ICatalogCollection_get__NewEnum(This,ppEnumVariant) (This)->lpVtbl->get__NewEnum(This,ppEnumVariant) +#define ICatalogCollection_get_Item(This,lIndex,ppCatalogObject) (This)->lpVtbl->get_Item(This,lIndex,ppCatalogObject) +#define ICatalogCollection_get_Count(This,plObjectCount) (This)->lpVtbl->get_Count(This,plObjectCount) +#define ICatalogCollection_Remove(This,lIndex) (This)->lpVtbl->Remove(This,lIndex) +#define ICatalogCollection_Add(This,ppCatalogObject) (This)->lpVtbl->Add(This,ppCatalogObject) +#define ICatalogCollection_Populate(This) (This)->lpVtbl->Populate(This) +#define ICatalogCollection_SaveChanges(This,pcChanges) (This)->lpVtbl->SaveChanges(This,pcChanges) +#define ICatalogCollection_GetCollection(This,bstrCollName,varObjectKey,ppCatalogCollection) (This)->lpVtbl->GetCollection(This,bstrCollName,varObjectKey,ppCatalogCollection) +#define ICatalogCollection_get_Name(This,pVarNamel) (This)->lpVtbl->get_Name(This,pVarNamel) +#define ICatalogCollection_get_AddEnabled(This,pVarBool) (This)->lpVtbl->get_AddEnabled(This,pVarBool) +#define ICatalogCollection_get_RemoveEnabled(This,pVarBool) (This)->lpVtbl->get_RemoveEnabled(This,pVarBool) +#define ICatalogCollection_GetUtilInterface(This,ppIDispatch) (This)->lpVtbl->GetUtilInterface(This,ppIDispatch) +#define ICatalogCollection_get_DataStoreMajorVersion(This,plMajorVersion) (This)->lpVtbl->get_DataStoreMajorVersion(This,plMajorVersion) +#define ICatalogCollection_get_DataStoreMinorVersion(This,plMinorVersionl) (This)->lpVtbl->get_DataStoreMinorVersion(This,plMinorVersionl) +#define ICatalogCollection_PopulateByKey(This,psaKeys) (This)->lpVtbl->PopulateByKey(This,psaKeys) +#define ICatalogCollection_PopulateByQuery(This,bstrQueryString,lQueryType) (This)->lpVtbl->PopulateByQuery(This,bstrQueryString,lQueryType) +#endif +#endif + HRESULT WINAPI ICatalogCollection_get__NewEnum_Proxy(ICatalogCollection *This,IUnknown **ppEnumVariant); + void __RPC_STUB ICatalogCollection_get__NewEnum_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_get_Item_Proxy(ICatalogCollection *This,long lIndex,IDispatch **ppCatalogObject); + void __RPC_STUB ICatalogCollection_get_Item_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_get_Count_Proxy(ICatalogCollection *This,long *plObjectCount); + void __RPC_STUB ICatalogCollection_get_Count_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_Remove_Proxy(ICatalogCollection *This,long lIndex); + void __RPC_STUB ICatalogCollection_Remove_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_Add_Proxy(ICatalogCollection *This,IDispatch **ppCatalogObject); + void __RPC_STUB ICatalogCollection_Add_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_Populate_Proxy(ICatalogCollection *This); + void __RPC_STUB ICatalogCollection_Populate_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_SaveChanges_Proxy(ICatalogCollection *This,long *pcChanges); + void __RPC_STUB ICatalogCollection_SaveChanges_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_GetCollection_Proxy(ICatalogCollection *This,BSTR bstrCollName,VARIANT varObjectKey,IDispatch **ppCatalogCollection); + void __RPC_STUB ICatalogCollection_GetCollection_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_get_Name_Proxy(ICatalogCollection *This,VARIANT *pVarNamel); + void __RPC_STUB ICatalogCollection_get_Name_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_get_AddEnabled_Proxy(ICatalogCollection *This,VARIANT_BOOL *pVarBool); + void __RPC_STUB ICatalogCollection_get_AddEnabled_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_get_RemoveEnabled_Proxy(ICatalogCollection *This,VARIANT_BOOL *pVarBool); + void __RPC_STUB ICatalogCollection_get_RemoveEnabled_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_GetUtilInterface_Proxy(ICatalogCollection *This,IDispatch **ppIDispatch); + void __RPC_STUB ICatalogCollection_GetUtilInterface_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_get_DataStoreMajorVersion_Proxy(ICatalogCollection *This,long *plMajorVersion); + void __RPC_STUB ICatalogCollection_get_DataStoreMajorVersion_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_get_DataStoreMinorVersion_Proxy(ICatalogCollection *This,long *plMinorVersionl); + void __RPC_STUB ICatalogCollection_get_DataStoreMinorVersion_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_PopulateByKey_Proxy(ICatalogCollection *This,SAFEARRAY *psaKeys); + void __RPC_STUB ICatalogCollection_PopulateByKey_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); + HRESULT WINAPI ICatalogCollection_PopulateByQuery_Proxy(ICatalogCollection *This,BSTR bstrQueryString,long lQueryType); + void __RPC_STUB ICatalogCollection_PopulateByQuery_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); +#endif + +#ifndef __COMAdmin_LIBRARY_DEFINED__ +#define __COMAdmin_LIBRARY_DEFINED__ + typedef enum COMAdminComponentType { + COMAdmin32BitComponent = 0x1,COMAdmin64BitComponent = 0x2 + } COMAdminComponentType; + + typedef enum COMAdminApplicationInstallOptions { + COMAdminInstallNoUsers = 0,COMAdminInstallUsers = 1,COMAdminInstallForceOverwriteOfFiles = 2 + } COMAdminApplicationInstallOptions; + + typedef enum COMAdminApplicationExportOptions { + COMAdminExportNoUsers = 0,COMAdminExportUsers = 1,COMAdminExportApplicationProxy = 2,COMAdminExportForceOverwriteOfFiles = 4, + COMAdminExportIn10Format = 16 + } COMAdminApplicationExportOptions; + + typedef enum COMAdminThreadingModels { + COMAdminThreadingModelApartment = 0,COMAdminThreadingModelFree = 1,COMAdminThreadingModelMain = 2,COMAdminThreadingModelBoth = 3, + COMAdminThreadingModelNeutral = 4,COMAdminThreadingModelNotSpecified = 5 + } COMAdminThreadingModels; + + typedef enum COMAdminTransactionOptions { + COMAdminTransactionIgnored = 0,COMAdminTransactionNone = 1,COMAdminTransactionSupported = 2,COMAdminTransactionRequired = 3, + COMAdminTransactionRequiresNew = 4 + } COMAdminTransactionOptions; + + typedef enum COMAdminTxIsolationLevelOptions { + COMAdminTxIsolationLevelAny = 0,COMAdminTxIsolationLevelReadUnCommitted = COMAdminTxIsolationLevelAny + 1, + COMAdminTxIsolationLevelReadCommitted = COMAdminTxIsolationLevelReadUnCommitted + 1, + COMAdminTxIsolationLevelRepeatableRead = COMAdminTxIsolationLevelReadCommitted + 1, + COMAdminTxIsolationLevelSerializable = COMAdminTxIsolationLevelRepeatableRead + 1 + } COMAdminTxIsolationLevelOptions; + + typedef enum COMAdminSynchronizationOptions { + COMAdminSynchronizationIgnored = 0,COMAdminSynchronizationNone = 1,COMAdminSynchronizationSupported = 2,COMAdminSynchronizationRequired = 3, + COMAdminSynchronizationRequiresNew = 4 + } COMAdminSynchronizationOptions; + + typedef enum COMAdminActivationOptions { + COMAdminActivationInproc = 0,COMAdminActivationLocal = 1 + } COMAdminActivationOptions; + + typedef enum COMAdminAccessChecksLevelOptions { + COMAdminAccessChecksApplicationLevel = 0,COMAdminAccessChecksApplicationComponentLevel = 1 + } COMAdminAccessChecksLevelOptions; + + typedef enum COMAdminAuthenticationLevelOptions { + COMAdminAuthenticationDefault = 0,COMAdminAuthenticationNone = 1,COMAdminAuthenticationConnect = 2,COMAdminAuthenticationCall = 3, + COMAdminAuthenticationPacket = 4,COMAdminAuthenticationIntegrity = 5,COMAdminAuthenticationPrivacy = 6 + } COMAdminAuthenticationLevelOptions; + + typedef enum COMAdminImpersonationLevelOptions { + COMAdminImpersonationAnonymous = 1,COMAdminImpersonationIdentify = 2,COMAdminImpersonationImpersonate = 3,COMAdminImpersonationDelegate = 4 + } COMAdminImpersonationLevelOptions; + + typedef enum COMAdminAuthenticationCapabilitiesOptions { + COMAdminAuthenticationCapabilitiesNone = 0,COMAdminAuthenticationCapabilitiesSecureReference = 0x2, + COMAdminAuthenticationCapabilitiesStaticCloaking = 0x20,COMAdminAuthenticationCapabilitiesDynamicCloaking = 0x40 + } COMAdminAuthenticationCapabilitiesOptions; + + typedef enum COMAdminOS { + COMAdminOSNotInitialized = 0,COMAdminOSWindows3_1 = 1,COMAdminOSWindows9x = 2,COMAdminOSWindows2000 = 3, + COMAdminOSWindows2000AdvancedServer = 4,COMAdminOSWindows2000Unknown = 5,COMAdminOSUnknown = 6,COMAdminOSWindowsXPPersonal = 11, + COMAdminOSWindowsXPProfessional = 12,COMAdminOSWindowsNETStandardServer = 13,COMAdminOSWindowsNETEnterpriseServer = 14, + COMAdminOSWindowsNETDatacenterServer = 15,COMAdminOSWindowsNETWebServer = 16,COMAdminOSWindowsLonghornPersonal = 17, + COMAdminOSWindowsLonghornProfessional = 18,COMAdminOSWindowsLonghornStandardServer = 19,COMAdminOSWindowsLonghornEnterpriseServer = 20, + COMAdminOSWindowsLonghornDatacenterServer = 21,COMAdminOSWindowsLonghornWebServer = 22 + } COMAdminOS; + + typedef enum COMAdminServiceOptions { + COMAdminServiceLoadBalanceRouter = 1 + } COMAdminServiceOptions; + + typedef enum COMAdminServiceStatusOptions { + COMAdminServiceStopped = 0,COMAdminServiceStartPending = COMAdminServiceStopped + 1,COMAdminServiceStopPending = COMAdminServiceStartPending + 1, + COMAdminServiceRunning = COMAdminServiceStopPending + 1,COMAdminServiceContinuePending = COMAdminServiceRunning + 1, + COMAdminServicePausePending = COMAdminServiceContinuePending + 1,COMAdminServicePaused = COMAdminServicePausePending + 1, + COMAdminServiceUnknownState = COMAdminServicePaused + 1 + } COMAdminServiceStatusOptions; + + typedef enum COMAdminQCMessageAuthenticateOptions { + COMAdminQCMessageAuthenticateSecureApps = 0,COMAdminQCMessageAuthenticateOff = 1,COMAdminQCMessageAuthenticateOn = 2 + } COMAdminQCMessageAuthenticateOptions; + + typedef enum COMAdminFileFlags { + COMAdminFileFlagLoadable = 0x1,COMAdminFileFlagCOM = 0x2,COMAdminFileFlagContainsPS = 0x4,COMAdminFileFlagContainsComp = 0x8, + COMAdminFileFlagContainsTLB = 0x10,COMAdminFileFlagSelfReg = 0x20,COMAdminFileFlagSelfUnReg = 0x40,COMAdminFileFlagUnloadableDLL = 0x80, + COMAdminFileFlagDoesNotExist = 0x100,COMAdminFileFlagAlreadyInstalled = 0x200,COMAdminFileFlagBadTLB = 0x400, + COMAdminFileFlagGetClassObjFailed = 0x800,COMAdminFileFlagClassNotAvailable = 0x1000,COMAdminFileFlagRegistrar = 0x2000, + COMAdminFileFlagNoRegistrar = 0x4000,COMAdminFileFlagDLLRegsvrFailed = 0x8000,COMAdminFileFlagRegTLBFailed = 0x10000, + COMAdminFileFlagRegistrarFailed = 0x20000,COMAdminFileFlagError = 0x40000 + } COMAdminFileFlags; + + typedef enum COMAdminComponentFlags { + COMAdminCompFlagTypeInfoFound = 0x1,COMAdminCompFlagCOMPlusPropertiesFound = 0x2,COMAdminCompFlagProxyFound = 0x4, + COMAdminCompFlagInterfacesFound = 0x8,COMAdminCompFlagAlreadyInstalled = 0x10,COMAdminCompFlagNotInApplication = 0x20 + } COMAdminComponentFlags; + +#define COMAdminCollectionRoot ("Root") +#define COMAdminCollectionApplications ("Applications") +#define COMAdminCollectionComponents ("Components") +#define COMAdminCollectionComputerList ("ComputerList") +#define COMAdminCollectionApplicationCluster ("ApplicationCluster") +#define COMAdminCollectionLocalComputer ("LocalComputer") +#define COMAdminCollectionInprocServers ("InprocServers") +#define COMAdminCollectionRelatedCollectionInfo ("RelatedCollectionInfo") +#define COMAdminCollectionPropertyInfo ("PropertyInfo") +#define COMAdminCollectionRoles ("Roles") +#define COMAdminCollectionErrorInfo ("ErrorInfo") +#define COMAdminCollectionInterfacesForComponent ("InterfacesForComponent") +#define COMAdminCollectionRolesForComponent ("RolesForComponent") +#define COMAdminCollectionMethodsForInterface ("MethodsForInterface") +#define COMAdminCollectionRolesForInterface ("RolesForInterface") +#define COMAdminCollectionRolesForMethod ("RolesForMethod") +#define COMAdminCollectionUsersInRole ("UsersInRole") +#define COMAdminCollectionDCOMProtocols ("DCOMProtocols") +#define COMAdminCollectionPartitions ("Partitions") + + enum COMAdminErrorCodes { + COMAdminErrObjectErrors = (HRESULT)0x80110401L,COMAdminErrObjectInvalid = (HRESULT)0x80110402L,COMAdminErrKeyMissing = (HRESULT)0x80110403L, + COMAdminErrAlreadyInstalled = (HRESULT)0x80110404L,COMAdminErrAppFileWriteFail = (HRESULT)0x80110407L, + COMAdminErrAppFileReadFail = (HRESULT)0x80110408L,COMAdminErrAppFileVersion = (HRESULT)0x80110409L,COMAdminErrBadPath = (HRESULT)0x8011040aL, + COMAdminErrApplicationExists = (HRESULT)0x8011040bL,COMAdminErrRoleExists = (HRESULT)0x8011040cL,COMAdminErrCantCopyFile = (HRESULT)0x8011040dL, + COMAdminErrNoUser = (HRESULT)0x8011040fL,COMAdminErrInvalidUserids = (HRESULT)0x80110410L,COMAdminErrNoRegistryCLSID = (HRESULT)0x80110411L, + COMAdminErrBadRegistryProgID = (HRESULT)0x80110412L,COMAdminErrAuthenticationLevel = (HRESULT)0x80110413L, + COMAdminErrUserPasswdNotValid = (HRESULT)0x80110414L,COMAdminErrCLSIDOrIIDMismatch = (HRESULT)0x80110418L, + COMAdminErrRemoteInterface = (HRESULT)0x80110419L,COMAdminErrDllRegisterServer = (HRESULT)0x8011041aL, + COMAdminErrNoServerShare = (HRESULT)0x8011041bL,COMAdminErrDllLoadFailed = (HRESULT)0x8011041dL,COMAdminErrBadRegistryLibID = (HRESULT)0x8011041eL, + COMAdminErrAppDirNotFound = (HRESULT)0x8011041fL,COMAdminErrRegistrarFailed = (HRESULT)0x80110423L, + COMAdminErrCompFileDoesNotExist = (HRESULT)0x80110424L,COMAdminErrCompFileLoadDLLFail = (HRESULT)0x80110425L, + COMAdminErrCompFileGetClassObj = (HRESULT)0x80110426L,COMAdminErrCompFileClassNotAvail = (HRESULT)0x80110427L, + COMAdminErrCompFileBadTLB = (HRESULT)0x80110428L,COMAdminErrCompFileNotInstallable = (HRESULT)0x80110429L, + COMAdminErrNotChangeable = (HRESULT)0x8011042aL,COMAdminErrNotDeletable = (HRESULT)0x8011042bL,COMAdminErrSession = (HRESULT)0x8011042cL, + COMAdminErrCompMoveLocked = (HRESULT)0x8011042dL,COMAdminErrCompMoveBadDest = (HRESULT)0x8011042eL,COMAdminErrRegisterTLB = (HRESULT)0x80110430L, + COMAdminErrSystemApp = (HRESULT)0x80110433L,COMAdminErrCompFileNoRegistrar = (HRESULT)0x80110434L, + COMAdminErrCoReqCompInstalled = (HRESULT)0x80110435L,COMAdminErrServiceNotInstalled = (HRESULT)0x80110436L, + COMAdminErrPropertySaveFailed = (HRESULT)0x80110437L,COMAdminErrObjectExists = (HRESULT)0x80110438L, + COMAdminErrComponentExists = (HRESULT)0x80110439L,COMAdminErrRegFileCorrupt = (HRESULT)0x8011043bL, + COMAdminErrPropertyOverflow = (HRESULT)0x8011043cL,COMAdminErrNotInRegistry = (HRESULT)0x8011043eL, + COMAdminErrObjectNotPoolable = (HRESULT)0x8011043fL,COMAdminErrApplidMatchesClsid = (HRESULT)0x80110446L, + COMAdminErrRoleDoesNotExist = (HRESULT)0x80110447L,COMAdminErrStartAppNeedsComponents = (HRESULT)0x80110448L, + COMAdminErrRequiresDifferentPlatform = (HRESULT)0x80110449L,COMAdminErrQueuingServiceNotAvailable = (HRESULT)0x80110602L, + COMAdminErrObjectParentMissing = (HRESULT)0x80110808L,COMAdminErrObjectDoesNotExist = (HRESULT)0x80110809L, + COMAdminErrCanNotExportAppProxy = (HRESULT)0x8011044aL,COMAdminErrCanNotStartApp = (HRESULT)0x8011044bL, + COMAdminErrCanNotExportSystemApp = (HRESULT)0x8011044cL,COMAdminErrCanNotSubscribeToComponent = (HRESULT)0x8011044dL, + COMAdminErrAppNotRunning = (HRESULT)0x8011080aL,COMAdminErrEventClassCannotBeSubscriber = (HRESULT)0x8011044eL, + COMAdminErrLibAppProxyIncompatible = (HRESULT)0x8011044fL,COMAdminErrBasePartitionOnly = (HRESULT)0x80110450L, + COMAdminErrDuplicatePartitionName = (HRESULT)0x80110457L,COMAdminErrPartitionInUse = (HRESULT)0x80110459L, + COMAdminErrImportedComponentsNotAllowed = (HRESULT)0x8011045bL,COMAdminErrRegdbNotInitialized = (HRESULT)0x80110472L, + COMAdminErrRegdbNotOpen = (HRESULT)0x80110473L,COMAdminErrRegdbSystemErr = (HRESULT)0x80110474L, + COMAdminErrRegdbAlreadyRunning = (HRESULT)0x80110475L,COMAdminErrMigVersionNotSupported = (HRESULT)0x80110480L, + COMAdminErrMigSchemaNotFound = (HRESULT)0x80110481L,COMAdminErrCatBitnessMismatch = (HRESULT)0x80110482L, + COMAdminErrCatUnacceptableBitness = (HRESULT)0x80110483L,COMAdminErrCatWrongAppBitnessBitness = (HRESULT)0x80110484L, + COMAdminErrCatPauseResumeNotSupported = (HRESULT)0x80110485L,COMAdminErrCatServerFault = (HRESULT)0x80110486L, + COMAdminErrCantRecycleLibraryApps = (HRESULT)0x8011080fL,COMAdminErrCantRecycleServiceApps = (HRESULT)0x80110811L, + COMAdminErrProcessAlreadyRecycled = (HRESULT)0x80110812L,COMAdminErrPausedProcessMayNotBeRecycled = (HRESULT)0x80110813L, + COMAdminErrInvalidPartition = (HRESULT)0x8011080bL,COMAdminErrPartitionMsiOnly = (HRESULT)0x80110819L, + COMAdminErrStartAppDisabled = (HRESULT)0x80110451L,COMAdminErrCompMoveSource = (HRESULT)0x8011081cL, + COMAdminErrCompMoveDest = (HRESULT)0x8011081dL,COMAdminErrCompMovePrivate = (HRESULT)0x8011081eL, + COMAdminErrCannotCopyEventClass = (HRESULT)0x80110820L + }; + + EXTERN_C const IID LIBID_COMAdmin; + EXTERN_C const CLSID CLSID_COMAdminCatalog; +#ifdef __cplusplus + class COMAdminCatalog; +#endif + EXTERN_C const CLSID CLSID_COMAdminCatalogObject; +#ifdef __cplusplus + class COMAdminCatalogObject; +#endif + EXTERN_C const CLSID CLSID_COMAdminCatalogCollection; +#ifdef __cplusplus + class COMAdminCatalogCollection; +#endif +#endif + + unsigned long __RPC_API BSTR_UserSize(unsigned long *,unsigned long,BSTR *); + unsigned char *__RPC_API BSTR_UserMarshal(unsigned long *,unsigned char *,BSTR *); + unsigned char *__RPC_API BSTR_UserUnmarshal(unsigned long *,unsigned char *,BSTR *); + void __RPC_API BSTR_UserFree(unsigned long *,BSTR *); + unsigned long __RPC_API LPSAFEARRAY_UserSize(unsigned long *,unsigned long,LPSAFEARRAY *); + unsigned char *__RPC_API LPSAFEARRAY_UserMarshal(unsigned long *,unsigned char *,LPSAFEARRAY *); + unsigned char *__RPC_API LPSAFEARRAY_UserUnmarshal(unsigned long *,unsigned char *,LPSAFEARRAY *); + void __RPC_API LPSAFEARRAY_UserFree(unsigned long *,LPSAFEARRAY *); + unsigned long __RPC_API VARIANT_UserSize(unsigned long *,unsigned long,VARIANT *); + unsigned char *__RPC_API VARIANT_UserMarshal(unsigned long *,unsigned char *,VARIANT *); + unsigned char *__RPC_API VARIANT_UserUnmarshal(unsigned long *,unsigned char *,VARIANT *); + void __RPC_API VARIANT_UserFree(unsigned long *,VARIANT *); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/src/win32/filed/plugins/dbi_node.c b/src/win32/filed/plugins/dbi_node.c new file mode 100644 index 00000000..a5eb4234 --- /dev/null +++ b/src/win32/filed/plugins/dbi_node.c @@ -0,0 +1,283 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, October 2008 + */ + +#include "exchange-fd.h" + +dbi_node_t::dbi_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_DATABASE_INFO, parent_node) +{ + restore_display_name = NULL; + restore_input_streams = NULL; + buffer = NULL; +} + +dbi_node_t::~dbi_node_t() +{ + if (buffer != NULL) + delete buffer; + if (restore_input_streams != NULL) + delete restore_input_streams; + if (restore_display_name != NULL) + delete restore_display_name; +} + +bRC +dbi_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + time_t now = time(NULL); + + _DebugMessage(100, "startBackupNode_DBI state = %d\n", state); + + if (context->job_level == 'F') { + sp->fname = full_path; + sp->link = full_path; + sp->statp.st_mode = 0700 | S_IFREG; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = (uint64_t)-1; + sp->type = FT_REG; + return bRC_OK; + } + else + { + bfuncs->setBaculaValue(context->bpContext, bVarFileSeen, (void *)full_path); + return bRC_Seen; + } +} + +bRC +dbi_node_t::endBackupFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endBackupNode_DBI state = %d\n", state); + + context->current_node = parent; + + return bRC_OK; +} + +bRC +dbi_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + _DebugMessage(100, "createFile_DBI state = %d\n", state); + + rp->create_status = CF_EXTRACT; + + return bRC_OK; +} + +bRC +dbi_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endRestoreFile_DBI state = %d\n", state); + + context->current_node = parent; + + return bRC_OK; +} + +bRC +dbi_node_t::pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io) +{ + uint32_t len; + WCHAR *ptr; + WCHAR *stream; + //char tmp[512]; + + buffer_pos = 0; + buffer_size = 65536; + buffer = new char[buffer_size]; + + if (context->job_type == JOB_TYPE_BACKUP) + { + ptr = (WCHAR *)buffer; + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"DatabaseBackupInfo\n"); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"%d\n", EXCHANGE_PLUGIN_VERSION); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"%s\n", dbi->wszDatabaseDisplayName); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n", + dbi->rguidDatabase.Data1, dbi->rguidDatabase.Data2, dbi->rguidDatabase.Data3, + dbi->rguidDatabase.Data4[0], dbi->rguidDatabase.Data4[1], + dbi->rguidDatabase.Data4[2], dbi->rguidDatabase.Data4[3], + dbi->rguidDatabase.Data4[4], dbi->rguidDatabase.Data4[5], + dbi->rguidDatabase.Data4[6], dbi->rguidDatabase.Data4[7]); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + + stream = dbi->wszDatabaseStreams; + while (*stream) + { + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"%s\n", stream); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + stream += wcslen(stream) + 1; + } + + buffer_size = buffer_pos; + buffer_pos = 0; + } + + io->status = 0; + io->io_errno = 0; + return bRC_OK; + +fail: + io->status = 0; + io->io_errno = 1; + return bRC_Error; +} + +bRC +dbi_node_t::pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io) +{ + io->status = 0; + io->io_errno = 0; + + io->status = MIN(io->count, (int)(buffer_size - buffer_pos)); + if (io->status == 0) + return bRC_OK; + memcpy(io->buf, buffer + buffer_pos, io->status); + buffer_pos += io->status; + + return bRC_OK; +} + +bRC +dbi_node_t::pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io) +{ + memcpy(&buffer[buffer_pos], io->buf, io->count); + buffer_pos += io->count; + io->status = io->count; + io->io_errno = 0; + return bRC_OK; +} + +bRC +dbi_node_t::pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io) +{ + WCHAR tmp[128]; + WCHAR *ptr; + WCHAR eol; + int wchars_read; + int version; + int stream_buf_count; + WCHAR *streams_start; + + if (context->job_type == JOB_TYPE_RESTORE) + { + // need to think about making this buffer overflow proof... + _DebugMessage(100, "analyzing DatabaseBackupInfo\n"); + ptr = (WCHAR *)buffer; + + if (swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) != 2) + goto restore_fail; + ptr += wchars_read; + _DebugMessage(150, "Header = %S\n", tmp); + // verify that header == "DatabaseBackupInfo" + + if (swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) != 2) + goto restore_fail; + if (swscanf(tmp, L"%d%c", &version, &eol) != 1) + { + version = 0; + _DebugMessage(150, "Version = 0 (inferred)\n"); + } + else + { + ptr += wchars_read; + _DebugMessage(150, "Version = %d\n", version); + if (swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) != 2) + goto restore_fail; + } + restore_display_name = new WCHAR[wchars_read]; + swscanf(ptr, L"%127[^\n]", restore_display_name); + _DebugMessage(150, "Database Display Name = %S\n", restore_display_name); + ptr += wchars_read; + + if (swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) != 2) + goto restore_fail; + + if (swscanf(ptr, L"%8x-%4x-%4x-%2x%2x-%2x%2x%2x%2x%2x%2x", + &restore_guid.Data1, &restore_guid.Data2, &restore_guid.Data3, + &restore_guid.Data4[0], &restore_guid.Data4[1], + &restore_guid.Data4[2], &restore_guid.Data4[3], + &restore_guid.Data4[4], &restore_guid.Data4[5], + &restore_guid.Data4[6], &restore_guid.Data4[7]) != 11) + { + goto restore_fail; + } + _DebugMessage(150, "GUID = %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n", + restore_guid.Data1, restore_guid.Data2, restore_guid.Data3, + restore_guid.Data4[0], restore_guid.Data4[1], + restore_guid.Data4[2], restore_guid.Data4[3], + restore_guid.Data4[4], restore_guid.Data4[5], + restore_guid.Data4[6], restore_guid.Data4[7]); + + ptr += wchars_read; + + stream_buf_count = 1; + streams_start = ptr; + while (ptr < (WCHAR *)(buffer + buffer_pos) && swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) == 2) + { + _DebugMessage(150, "File = %S\n", tmp); + ptr += wchars_read; + stream_buf_count += wchars_read; + } + restore_input_streams = new WCHAR[stream_buf_count]; + ptr = streams_start; + stream_buf_count = 0; + while (ptr < (WCHAR *)(buffer + buffer_pos) && swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) == 2) + { + snwprintf(&restore_input_streams[stream_buf_count], 65535, L"%s", tmp); + ptr += wchars_read; + stream_buf_count += wchars_read; + } + restore_input_streams[stream_buf_count] = 0; + + _DebugMessage(100, "done analyzing DatabasePluginInfo\n"); + } + delete buffer; + buffer = NULL; + return bRC_OK; +restore_fail: + _JobMessage(M_FATAL, "Format of %s is incorrect", full_path); + delete buffer; + buffer = NULL; + return bRC_Error; +} diff --git a/src/win32/filed/plugins/exch_api.c b/src/win32/filed/plugins/exch_api.c new file mode 100644 index 00000000..82c625bf --- /dev/null +++ b/src/win32/filed/plugins/exch_api.c @@ -0,0 +1,139 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + * + * Used only in "old Exchange plugin" now deprecated. + */ + +#include "exchange-fd.h" + +HrESEBackupRestoreGetNodes_t HrESEBackupRestoreGetNodes; +HrESEBackupPrepare_t HrESEBackupPrepare; +HrESEBackupGetLogAndPatchFiles_t HrESEBackupGetLogAndPatchFiles; +HrESEBackupTruncateLogs_t HrESEBackupTruncateLogs; +HrESEBackupEnd_t HrESEBackupEnd; +HrESEBackupSetup_t HrESEBackupSetup; +HrESEBackupInstanceEnd_t HrESEBackupInstanceEnd; +HrESEBackupOpenFile_t HrESEBackupOpenFile; +HrESEBackupReadFile_t HrESEBackupReadFile; +HrESEBackupCloseFile_t HrESEBackupCloseFile; + +HrESERestoreOpen_t HrESERestoreOpen; +HrESERestoreReopen_t HrESERestoreReopen; +HrESERestoreComplete_t HrESERestoreComplete; +HrESERestoreClose_t HrESERestoreClose; +HrESERestoreGetEnvironment_t HrESERestoreGetEnvironment; +HrESERestoreSaveEnvironment_t HrESERestoreSaveEnvironment; +HrESERestoreAddDatabase_t HrESERestoreAddDatabase; +HrESERestoreOpenFile_t HrESERestoreOpenFile; + +bRC +loadExchangeApi() +{ + HMODULE h; + LONG status; + HKEY key_handle; + WCHAR *buf; + DWORD buf_len; + DWORD type; + + status = RegOpenKeyW(HKEY_LOCAL_MACHINE, L"SYSTEM\\CurrentControlSet\\Control\\BackupRestore\\DLLPaths", &key_handle); + if (status != ERROR_SUCCESS) + { + _JobMessageNull(M_FATAL, "Cannot get key for Exchange DLL path, result = %08x\n", status); + return bRC_Error; + } + + type = REG_EXPAND_SZ; + status = RegQueryValueExW(key_handle, L"esebcli2", NULL, &type, NULL, &buf_len); + if (status != ERROR_SUCCESS) + { + _JobMessageNull(M_FATAL, "Cannot get key for Exchange DLL path, result = %08x\n", status); + return bRC_Error; + } + buf_len += 2; + buf = new WCHAR[buf_len]; + + type = REG_EXPAND_SZ; + status = RegQueryValueExW(key_handle, L"esebcli2", NULL, &type, (LPBYTE)buf, &buf_len); + if (status != ERROR_SUCCESS) + { + _JobMessageNull(M_FATAL, "Cannot get key for Exchange DLL path, result = %08x\n", status); + delete buf; + return bRC_Error; + } + +printf("Got value %S\n", buf); + + // strictly speaking, a REG_EXPAND_SZ should be run through ExpandEnvironmentStrings + + h = LoadLibraryW(buf); + delete buf; + if (!h) { + _JobMessageNull(M_FATAL, "Cannot load Exchange DLL\n"); + return bRC_Error; + } + HrESEBackupRestoreGetNodes = (HrESEBackupRestoreGetNodes_t)GetProcAddress(h, "HrESEBackupRestoreGetNodes"); + HrESEBackupPrepare = (HrESEBackupPrepare_t)GetProcAddress(h, "HrESEBackupPrepare"); + HrESEBackupEnd = (HrESEBackupEnd_t)GetProcAddress(h, "HrESEBackupEnd"); + HrESEBackupSetup = (HrESEBackupSetup_t)GetProcAddress(h, "HrESEBackupSetup"); + HrESEBackupGetLogAndPatchFiles = (HrESEBackupGetLogAndPatchFiles_t)GetProcAddress(h, "HrESEBackupGetLogAndPatchFiles"); + HrESEBackupTruncateLogs = (HrESEBackupTruncateLogs_t)GetProcAddress(h, "HrESEBackupTruncateLogs"); + HrESEBackupInstanceEnd = (HrESEBackupInstanceEnd_t)GetProcAddress(h, "HrESEBackupInstanceEnd"); + HrESEBackupOpenFile = (HrESEBackupOpenFile_t)GetProcAddress(h, "HrESEBackupOpenFile"); + HrESEBackupReadFile = (HrESEBackupReadFile_t)GetProcAddress(h, "HrESEBackupReadFile"); + HrESEBackupCloseFile = (HrESEBackupCloseFile_t)GetProcAddress(h, "HrESEBackupCloseFile"); + HrESERestoreOpen = (HrESERestoreOpen_t)GetProcAddress(h, "HrESERestoreOpen"); + HrESERestoreReopen = (HrESERestoreReopen_t)GetProcAddress(h, "HrESERestoreReopen"); + HrESERestoreComplete = (HrESERestoreComplete_t)GetProcAddress(h, "HrESERestoreComplete"); + HrESERestoreClose = (HrESERestoreClose_t)GetProcAddress(h, "HrESERestoreClose"); + HrESERestoreSaveEnvironment = (HrESERestoreSaveEnvironment_t)GetProcAddress(h, "HrESERestoreSaveEnvironment"); + HrESERestoreGetEnvironment = (HrESERestoreGetEnvironment_t)GetProcAddress(h, "HrESERestoreGetEnvironment"); + HrESERestoreAddDatabase = (HrESERestoreAddDatabase_t)GetProcAddress(h, "HrESERestoreAddDatabase"); + HrESERestoreOpenFile = (HrESERestoreOpenFile_t)GetProcAddress(h, "HrESERestoreOpenFile"); + return bRC_OK; +} + +const char * +ESEErrorMessage(HRESULT result) +{ + switch (result) { + case 0: + return "No error."; + case hrLogfileHasBadSignature: + return "Log file has bad signature. Check that no stale files are left in the Exchange data/log directories."; + case hrCBDatabaseInUse: + return "Database in use. Make sure database is dismounted."; + case hrRestoreAtFileLevel: + return "File must be restored using Windows file I/O calls."; + case hrMissingFullBackup: + return "Exchange reports that no previous full backup has been done."; + case hrBackupInProgress: + return "Exchange backup already in progress."; + case hrLogfileNotContiguous: + return "Existing log file is not contiguous. Check that no stale files are left in the Exchange data/log directories."; + case hrErrorFromESECall: + return "Error returned from ESE function call. Check the Windows Event Logs for more information."; + case hrCBDatabaseNotFound: + return "Database not found. Check that the Database you are trying to restore actually exists in the Storage Group you are restoring to."; + default: + return "Unknown error."; + } +} diff --git a/src/win32/filed/plugins/exch_api.h b/src/win32/filed/plugins/exch_api.h new file mode 100644 index 00000000..4e6b2afd --- /dev/null +++ b/src/win32/filed/plugins/exch_api.h @@ -0,0 +1,293 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + */ + +extern "C" { + +#define BACKUP_NODE_TYPE_MACHINE 0x001 +#define BACKUP_NODE_TYPE_ANNOTATION 0x010 +#define BACKUP_NODE_TYPE_DISPLAY 0x100 + +#define ESE_BACKUP_INSTANCE_END_ERROR 0x0 +#define ESE_BACKUP_INSTANCE_END_SUCCESS 0x1 + +#define BACKUP_TYPE_FULL 0x1 +#define BACKUP_TYPE_LOGS_ONLY 0x2 +#define BACKUP_TYPE_FULL_WITH_ALL_LOGS 0x3 + +#define RESTORE_CLOSE_ABORT 0x1 +#define RESTORE_CLOSE_NORMAL 0x0 + +#define ESE_RESTORE_COMPLETE_NOWAIT 0x00010000 +#define ESE_RESTORE_COMPLETE_ATTACH_DBS 0x00000001 +#define ESE_RESTORE_KEEP_LOG_FILES 0x00020000 + +//#include + +struct ESE_ICON_DESCRIPTION { + uint32_t ulSize; + char *pvData; +}; + +struct BACKUP_NODE_TREE { + WCHAR *wszName; + uint32_t fFlags; + ESE_ICON_DESCRIPTION iconDescription; + struct BACKUP_NODE_TREE *pNextNode; + struct BACKUP_NODE_TREE *pChildNode; +}; + +struct DATABASE_BACKUP_INFO { + WCHAR *wszDatabaseDisplayName; + uint32_t cwDatabaseStreams; + WCHAR *wszDatabaseStreams; + GUID rguidDatabase; + uint32_t *rgIconIndexDatabase; + uint32_t fDatabaseFlags; +}; + +struct INSTANCE_BACKUP_INFO { + uint64_t hInstanceId; + //RPC_STRING wszInstanceName; + WCHAR *wszInstanceName; + uint32_t ulIconIndexInstance; + uint32_t cDatabase; + DATABASE_BACKUP_INFO *rgDatabase; + uint32_t cIconDescription; + ESE_ICON_DESCRIPTION *rgIconDescription; +}; + +enum RECOVER_STATUS { + recoverInvalid = 0, + recoverNotStarted = 1, + recoverStarted = 2, + recoverEnded = 3, + recoverStatusMax +}; + +struct RESTORE_ENVIRONMENT { + WCHAR * m_wszRestoreLogPath; + WCHAR * m_wszSrcInstanceName; + uint32_t m_cDatabases; + WCHAR **m_wszDatabaseDisplayName; + GUID * m_rguidDatabase; + WCHAR * m_wszRestoreInstanceSystemPath; + WCHAR * m_wszRestoreInstanceLogPath; + WCHAR * m_wszTargetInstanceName; + WCHAR ** m_wszDatabaseStreamsS; + WCHAR ** m_wszDatabaseStreamsD; + uint32_t m_ulGenLow; + uint32_t m_ulGenHigh; + WCHAR * m_wszLogBaseName; + time_t m_timeLastRestore; + RECOVER_STATUS m_statusLastRecover; + HRESULT m_hrLastRecover; + time_t m_timeLastRecover; + WCHAR * m_wszAnnotation; +}; + +typedef HANDLE HCCX; + +typedef HRESULT (WINAPI *HrESEBackupRestoreGetNodes_t) +( + WCHAR* wszComputerName, + BACKUP_NODE_TREE* pBackupNodeTree +); + +typedef HRESULT (WINAPI *HrESEBackupPrepare_t) +( + WCHAR* wszBackupServer, + WCHAR* wszBackupAnnotation, + uint32_t *pcInstanceInfo, + INSTANCE_BACKUP_INFO **paInstanceInfo, + HCCX *phccxBackupContext +); + +typedef HRESULT (WINAPI *HrESEBackupEnd_t) +( + HCCX hccsBackupContext +); + +typedef HRESULT (WINAPI *HrESEBackupSetup_t) +( + HCCX hccsBackupContext, + uint64_t hInstanceID, + uint32_t btBackupType +); + +typedef HRESULT (WINAPI *HrESEBackupGetLogAndPatchFiles_t) +( + HCCX hccsBackupContext, + WCHAR** pwszFiles +); + +typedef HRESULT (WINAPI *HrESEBackupInstanceEnd_t) +( + HCCX hccsBackupContext, + uint32_t fFlags +); + +typedef HRESULT (WINAPI *HrESEBackupOpenFile_t) +( + HCCX hccsBackupContext, + WCHAR* wszFileName, + uint32_t cbReadHintSize, + uint32_t cSections, + void** rghFile, + uint64_t* rgliSectionSize +); + +typedef HRESULT (WINAPI *HrESEBackupReadFile_t) +( + HCCX hccsBackupContext, + void* hFile, + void* pvBuffer, + uint32_t cbBuffer, + uint32_t* pcbRead +); + +typedef HRESULT (WINAPI *HrESEBackupCloseFile_t) +( + HCCX hccsBackupContext, + void* hFile +); + +typedef HRESULT (WINAPI *HrESEBackupTruncateLogs_t) +( + HCCX hccsBackupContext +); + +typedef HRESULT (WINAPI *HrESERestoreOpen_t) +( + WCHAR* wszBackupServer, + WCHAR* wszBackupAnnotation, + WCHAR* wszSrcInstanceName, + WCHAR* wszRestoreLogPath, + HCCX* phccxRestoreContext +); + +typedef HRESULT (WINAPI *HrESERestoreReopen_t) +( + WCHAR* wszBackupServer, + WCHAR* wszBackupAnnotation, + WCHAR* wszRestoreLogPath, + HCCX* phccxRestoreContext +); + +typedef HRESULT (WINAPI *HrESERestoreClose_t) +( + HCCX phccxRestoreContext, + uint32_t fRestoreAbort +); + +typedef HRESULT (WINAPI *HrESERestoreComplete_t) +( + HCCX phccxRestoreContext, + WCHAR* wszCheckpointFilePath, + WCHAR* wszLogFilePath, + WCHAR* wszTargetInstanceName, + uint32_t fFlags +); + +typedef HRESULT (WINAPI *HrESERestoreSaveEnvironment_t) +( + HCCX phccxRestoreContext +); + +typedef HRESULT (WINAPI *HrESERestoreGetEnvironment_t) +( + HCCX phccxRestoreContext, + RESTORE_ENVIRONMENT **ppRestoreEnvironment +); + +typedef HRESULT (WINAPI *HrESERestoreAddDatabase_t) +( + HCCX phccxRestoreContext, + WCHAR* wszDatabaseDisplayName, + GUID guidDatabase, + WCHAR* wszDatabaseStreamsS, + WCHAR** wszDatabaseStreamsD +); + +typedef HRESULT (WINAPI *HrESERestoreOpenFile_t) +( + HCCX phccxRestoreContext, + WCHAR* wszFileName, + uint32_t cSections, + void* rghFile +); + +bRC +loadExchangeApi(); + +const char * +ESEErrorMessage(HRESULT result); + +#define hrLogfileHasBadSignature (HRESULT)0xC8000262L +#define hrLogfileNotContiguous (HRESULT)0xC8000263L +#define hrCBDatabaseInUse (HRESULT)0xC7FE1F41L +#define hrRestoreAtFileLevel (HRESULT)0xC7FF0FA5L +#define hrMissingFullBackup (HRESULT)0xC8000230L +#define hrBackupInProgress (HRESULT)0xC80001F9L +#define hrCBDatabaseNotFound (HRESULT)0xC7FE1F42L +#define hrErrorFromESECall (HRESULT)0xC7FF1004L + +extern HrESEBackupRestoreGetNodes_t HrESEBackupRestoreGetNodes; +extern HrESEBackupPrepare_t HrESEBackupPrepare; +extern HrESEBackupGetLogAndPatchFiles_t HrESEBackupGetLogAndPatchFiles; +extern HrESEBackupTruncateLogs_t HrESEBackupTruncateLogs; +extern HrESEBackupEnd_t HrESEBackupEnd; +extern HrESEBackupSetup_t HrESEBackupSetup; +extern HrESEBackupInstanceEnd_t HrESEBackupInstanceEnd; +extern HrESEBackupOpenFile_t HrESEBackupOpenFile; +extern HrESEBackupReadFile_t HrESEBackupReadFile; +extern HrESEBackupCloseFile_t HrESEBackupCloseFile; + +extern HrESERestoreOpen_t HrESERestoreOpen; +extern HrESERestoreReopen_t HrESERestoreReopen; +extern HrESERestoreComplete_t HrESERestoreComplete; +extern HrESERestoreClose_t HrESERestoreClose; +extern HrESERestoreGetEnvironment_t HrESERestoreGetEnvironment; +extern HrESERestoreSaveEnvironment_t HrESERestoreSaveEnvironment; +extern HrESERestoreAddDatabase_t HrESERestoreAddDatabase; +extern HrESERestoreOpenFile_t HrESERestoreOpenFile; + +#if !defined(MINGW64) && (_WIN32_WINNT < 0x0500) +typedef enum _COMPUTER_NAME_FORMAT { + ComputerNameNetBIOS, + ComputerNameDnsHostname, + ComputerNameDnsDomain, + ComputerNameDnsFullyQualified, + ComputerNamePhysicalNetBIOS, + ComputerNamePhysicalDnsHostname, + ComputerNamePhysicalDnsDomain, + ComputerNamePhysicalDnsFullyQualified, + ComputerNameMax +} COMPUTER_NAME_FORMAT; + +BOOL WINAPI GetComputerNameExW( + COMPUTER_NAME_FORMAT NameType, + LPWSTR lpBuffer, + LPDWORD lpnSize +); +#endif + +} diff --git a/src/win32/filed/plugins/exch_dbi_node.c b/src/win32/filed/plugins/exch_dbi_node.c new file mode 100644 index 00000000..8c96781c --- /dev/null +++ b/src/win32/filed/plugins/exch_dbi_node.c @@ -0,0 +1,280 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + * + * Used only in "old Exchange plugin" now deprecated. + */ + +#include "exchange-fd.h" + +dbi_node_t::dbi_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_DATABASE_INFO, parent_node) +{ + restore_display_name = NULL; + restore_input_streams = NULL; + buffer = NULL; +} + +dbi_node_t::~dbi_node_t() +{ + safe_delete(buffer); + safe_delete(restore_input_streams); + safe_delete(restore_display_name); +} + +bRC +dbi_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + time_t now = time(NULL); + + _DebugMessage(100, "startBackupNode_DBI state = %d\n", state); + + if (context->job_level == 'F') { + sp->fname = full_path; + sp->link = full_path; + sp->statp.st_mode = 0700 | S_IFREG; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = (uint64_t)-1; + sp->type = FT_REG; + return bRC_OK; + } + else + { + bfuncs->setBaculaValue(context->bpContext, bVarFileSeen, (void *)full_path); + return bRC_Seen; + } +} + +bRC +dbi_node_t::endBackupFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endBackupNode_DBI state = %d\n", state); + + context->current_node = parent; + + return bRC_OK; +} + +bRC +dbi_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + _DebugMessage(0, "createFile_DBI state = %d\n", state); + + rp->create_status = CF_EXTRACT; + + return bRC_OK; +} + +bRC +dbi_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(0, "endRestoreFile_DBI state = %d\n", state); + + context->current_node = parent; + + return bRC_OK; +} + +bRC +dbi_node_t::pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io) +{ + uint32_t len; + WCHAR *ptr; + WCHAR *stream; + //char tmp[512]; + + buffer_pos = 0; + buffer_size = 65536; + buffer = new char[buffer_size]; + + if (context->job_type == JOB_TYPE_BACKUP) + { + ptr = (WCHAR *)buffer; + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"DatabaseBackupInfo\n"); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"%d\n", EXCHANGE_PLUGIN_VERSION); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"%s\n", dbi->wszDatabaseDisplayName); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n", + dbi->rguidDatabase.Data1, dbi->rguidDatabase.Data2, dbi->rguidDatabase.Data3, + dbi->rguidDatabase.Data4[0], dbi->rguidDatabase.Data4[1], + dbi->rguidDatabase.Data4[2], dbi->rguidDatabase.Data4[3], + dbi->rguidDatabase.Data4[4], dbi->rguidDatabase.Data4[5], + dbi->rguidDatabase.Data4[6], dbi->rguidDatabase.Data4[7]); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + + stream = dbi->wszDatabaseStreams; + while (*stream) + { + len = snwprintf(ptr, (buffer_size - buffer_pos) / 2, L"%s\n", stream); + if (len < 0) + goto fail; + buffer_pos += len * 2; + ptr += len; + stream += wcslen(stream) + 1; + } + + buffer_size = buffer_pos; + buffer_pos = 0; + } + + io->status = 0; + io->io_errno = 0; + return bRC_OK; + +fail: + io->status = 0; + io->io_errno = 1; + return bRC_Error; +} + +bRC +dbi_node_t::pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io) +{ + io->status = 0; + io->io_errno = 0; + + io->status = MIN(io->count, (int)(buffer_size - buffer_pos)); + if (io->status == 0) + return bRC_OK; + memcpy(io->buf, buffer + buffer_pos, io->status); + buffer_pos += io->status; + + return bRC_OK; +} + +bRC +dbi_node_t::pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io) +{ + memcpy(&buffer[buffer_pos], io->buf, io->count); + buffer_pos += io->count; + io->status = io->count; + io->io_errno = 0; + return bRC_OK; +} + +bRC +dbi_node_t::pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io) +{ + WCHAR tmp[128]; + WCHAR *ptr; + WCHAR eol; + int wchars_read; + int version; + int stream_buf_count; + WCHAR *streams_start; + + if (context->job_type == JOB_TYPE_RESTORE) + { + // need to think about making this buffer overflow proof... + _DebugMessage(100, "analyzing DatabaseBackupInfo\n"); + ptr = (WCHAR *)buffer; + + if (swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) != 2) + goto restore_fail; + ptr += wchars_read; + _DebugMessage(150, "Header = %S\n", tmp); + // verify that header == "DatabaseBackupInfo" + + if (swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) != 2) + goto restore_fail; + if (swscanf(tmp, L"%d%c", &version, &eol) != 1) + { + version = 0; + _DebugMessage(150, "Version = 0 (inferred)\n"); + } + else + { + ptr += wchars_read; + _DebugMessage(150, "Version = %d\n", version); + if (swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) != 2) + goto restore_fail; + } + restore_display_name = new WCHAR[wchars_read]; + swscanf(ptr, L"%127[^\n]", restore_display_name); + _DebugMessage(150, "Database Display Name = %S\n", restore_display_name); + ptr += wchars_read; + + if (swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) != 2) + goto restore_fail; + + if (swscanf(ptr, L"%8x-%4x-%4x-%2x%2x-%2x%2x%2x%2x%2x%2x", + &restore_guid.Data1, &restore_guid.Data2, &restore_guid.Data3, + &restore_guid.Data4[0], &restore_guid.Data4[1], + &restore_guid.Data4[2], &restore_guid.Data4[3], + &restore_guid.Data4[4], &restore_guid.Data4[5], + &restore_guid.Data4[6], &restore_guid.Data4[7]) != 11) + { + goto restore_fail; + } + _DebugMessage(150, "GUID = %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n", + restore_guid.Data1, restore_guid.Data2, restore_guid.Data3, + restore_guid.Data4[0], restore_guid.Data4[1], + restore_guid.Data4[2], restore_guid.Data4[3], + restore_guid.Data4[4], restore_guid.Data4[5], + restore_guid.Data4[6], restore_guid.Data4[7]); + + ptr += wchars_read; + + stream_buf_count = 1; + streams_start = ptr; + while (ptr < (WCHAR *)(buffer + buffer_pos) && swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) == 2) + { + _DebugMessage(150, "File = %S\n", tmp); + ptr += wchars_read; + stream_buf_count += wchars_read; + } + restore_input_streams = new WCHAR[stream_buf_count]; + ptr = streams_start; + stream_buf_count = 0; + while (ptr < (WCHAR *)(buffer + buffer_pos) && swscanf(ptr, L"%127[^\n]%c%n", tmp, &eol, &wchars_read) == 2) + { + snwprintf(&restore_input_streams[stream_buf_count], 65535, L"%s", tmp); + ptr += wchars_read; + stream_buf_count += wchars_read; + } + restore_input_streams[stream_buf_count] = 0; + + _DebugMessage(100, "done analyzing DatabasePluginInfo\n"); + } + safe_delete(buffer); + return bRC_OK; +restore_fail: + _JobMessage(M_FATAL, "Format of %s is incorrect", full_path); + safe_delete(buffer); + return bRC_Error; +} diff --git a/src/win32/filed/plugins/exch_file_node.c b/src/win32/filed/plugins/exch_file_node.c new file mode 100644 index 00000000..7c7b137d --- /dev/null +++ b/src/win32/filed/plugins/exch_file_node.c @@ -0,0 +1,229 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + * + * Used only in "old Exchange plugin" now deprecated. + */ + +#include "exchange-fd.h" + +file_node_t::file_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_FILE, parent_node) +{ + backup_file_handle = INVALID_HANDLE_VALUE; + restore_file_handle = INVALID_HANDLE_VALUE; + restore_at_file_level = FALSE; +} + +file_node_t::~file_node_t() +{ + if (backup_file_handle != INVALID_HANDLE_VALUE) + { + //_DebugMessage(100, "closing file handle in destructor\n"); + CloseHandle(backup_file_handle); + } + if (restore_file_handle != INVALID_HANDLE_VALUE) + { + //_DebugMessage(100, "closing file handle in destructor\n"); + if (restore_at_file_level) + { + CloseHandle(restore_file_handle); + } + else + { + // maybe one day + } + } +} + +bRC +file_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + time_t now = time(NULL); + _DebugMessage(100, "startBackupNode_FILE state = %d\n", state); + + if (context->job_level == 'F' || parent->type == NODE_TYPE_STORAGE_GROUP) { + sp->fname = full_path; + sp->link = full_path; + _DebugMessage(100, "fname = %s\n", sp->fname); + sp->statp.st_mode = 0700 | S_IFREG; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = (uint64_t)-1; + sp->type = FT_REG; + return bRC_OK; + } else { + bfuncs->setBaculaValue(context->bpContext, bVarFileSeen, (void *)full_path); + return bRC_Seen; + } +} + +bRC +file_node_t::endBackupFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endBackupNode_FILE state = %d\n", state); + + context->current_node = parent; + + return bRC_OK; +} + +bRC +file_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + //HrESERestoreOpenFile with name of log file + + _DebugMessage(0, "createFile_FILE state = %d\n", state); + rp->create_status = CF_EXTRACT; + return bRC_OK; +} + +bRC +file_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(0, "endRestoreFile_FILE state = %d\n", state); + context->current_node = parent; + return bRC_OK; +} + +bRC +file_node_t::pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io) +{ + HRESULT result; + HANDLE handle; + char *tmp = new char[wcslen(filename) + 1]; + wcstombs(tmp, filename, wcslen(filename) + 1); + + _DebugMessage(0, "pluginIoOpen_FILE - filename = %s\n", tmp); + io->status = 0; + io->io_errno = 0; + if (context->job_type == JOB_TYPE_BACKUP) + { + _DebugMessage(10, "Calling HrESEBackupOpenFile\n"); + result = HrESEBackupOpenFile(hccx, filename, 65535, 1, &backup_file_handle, §ion_size); + if (result) + { + _JobMessage(M_FATAL, "HrESEBackupOpenFile failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + backup_file_handle = INVALID_HANDLE_VALUE; + io->io_errno = 1; + return bRC_Error; + } + } + else + { + _DebugMessage(10, "Calling HrESERestoreOpenFile for '%s'\n", tmp); + result = HrESERestoreOpenFile(hccx, filename, 1, &restore_file_handle); + if (result == hrRestoreAtFileLevel) + { + restore_at_file_level = true; + _DebugMessage(100, "Calling CreateFileW for '%s'\n", tmp); + handle = CreateFileW(filename, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); + if (handle == INVALID_HANDLE_VALUE) + { + _JobMessage(M_FATAL, "CreateFile failed"); + return bRC_Error; + } + restore_file_handle = (void *)handle; + return bRC_OK; + } + else if (result == 0) + { + _JobMessage(M_FATAL, "Exchange File IO API not yet supported for restore\n"); + restore_at_file_level = false; + return bRC_Error; + } + else + { + _JobMessage(M_FATAL, "HrESERestoreOpenFile failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + } + return bRC_OK; +} + +bRC +file_node_t::pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io) +{ + HRESULT result; + uint32_t readLength; + + io->status = 0; + io->io_errno = 0; + _DebugMessage(200, "Calling HrESEBackupReadFile\n"); + result = HrESEBackupReadFile(hccx, backup_file_handle, io->buf, io->count, &readLength); + if (result) + { + io->io_errno = 1; + return bRC_Error; + } + io->status = readLength; + size += readLength; + return bRC_OK; +} + +bRC +file_node_t::pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io) +{ + DWORD bytes_written; + + io->io_errno = 0; + if (!restore_at_file_level) + return bRC_Error; + + if (!WriteFile(restore_file_handle, io->buf, io->count, &bytes_written, NULL)) + { + _JobMessage(M_FATAL, "Write Error"); + return bRC_Error; + } + + if (bytes_written != (DWORD)io->count) + { + _JobMessage(M_FATAL, "Short write"); + return bRC_Error; + } + io->status = bytes_written; + + return bRC_OK; +} + +bRC +file_node_t::pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io) +{ + if (context->job_type == JOB_TYPE_BACKUP) + { + _DebugMessage(100, "Calling HrESEBackupCloseFile\n"); + HrESEBackupCloseFile(hccx, backup_file_handle); + backup_file_handle = INVALID_HANDLE_VALUE; + return bRC_OK; + } + else + { + if (restore_at_file_level) + { + CloseHandle(restore_file_handle); + restore_file_handle = INVALID_HANDLE_VALUE; + return bRC_OK; + } + else + { + return bRC_OK; + } + } +} diff --git a/src/win32/filed/plugins/exch_node.c b/src/win32/filed/plugins/exch_node.c new file mode 100644 index 00000000..c8453392 --- /dev/null +++ b/src/win32/filed/plugins/exch_node.c @@ -0,0 +1,120 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + * + * Used only in "old Exchange plugin" now deprecated. + */ + +#include "exchange-fd.h" + +node_t::node_t(char *name, int type) +{ + this->type = type; + state = 0; + parent = NULL; + this->name = bstrdup(name); + full_path = make_full_path(); + size = 0; + level = 0; +} + +node_t::node_t(char *name, int type, node_t *parent_node) +{ + this->type = type; + state = 0; + parent = parent_node; + this->name = bstrdup(name); + full_path = make_full_path(); + size = 0; + level = parent->level + 1; +} + +node_t::~node_t() +{ + safe_delete(name); + safe_delete(full_path); +} + +char * +node_t::make_full_path() +{ + node_t *curr_node; + int len; + char *retval; + + for (len = 0, curr_node = this; curr_node != NULL; curr_node = curr_node->parent) + { + len += strlen(curr_node->name) + 1; + } + if (type == NODE_TYPE_FILE || type == NODE_TYPE_DATABASE_INFO) + { + retval = new char[len + 1]; + retval[len] = 0; + } + else + { + retval = new char[len + 2]; + retval[len] = '/'; + retval[len + 1] = 0; + } + for (curr_node = this; curr_node != NULL; curr_node = curr_node->parent) + { + len -= strlen(curr_node->name); + memcpy(retval + len, curr_node->name, strlen(curr_node->name)); + retval[--len] = '/'; + } + return retval; +} + +bRC +node_t::pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io) +{ + _DebugMessage(100, "pluginIoOpen_Node\n"); + io->status = 0; + io->io_errno = 0; + return bRC_OK; +} + +bRC +node_t::pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io) +{ + _DebugMessage(100, "pluginIoRead_Node\n"); + io->status = 0; + io->io_errno = 0; + return bRC_OK; +} + +bRC +node_t::pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io) +{ + _DebugMessage(100, "pluginIoWrite_Node\n"); + io->status = 0; + io->io_errno = 1; + return bRC_Error; +} + +bRC +node_t::pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io) +{ + _DebugMessage(100, "pluginIoClose_Node\n"); + io->status = 0; + io->io_errno = 0; + return bRC_OK; +} diff --git a/src/win32/filed/plugins/exch_node.h b/src/win32/filed/plugins/exch_node.h new file mode 100644 index 00000000..9943d63b --- /dev/null +++ b/src/win32/filed/plugins/exch_node.h @@ -0,0 +1,177 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + */ + +#define NODE_TYPE_UNKNOWN 0 +#define NODE_TYPE_ROOT 1 +#define NODE_TYPE_SERVICE 2 +#define NODE_TYPE_STORAGE_GROUP 3 +#define NODE_TYPE_STORE 4 +#define NODE_TYPE_DATABASE_INFO 5 +#define NODE_TYPE_FILE 6 + +class node_t { +public: + int type; + int state; + node_t *parent; + char *name; + char *full_path; + size_t size; + int level; + + node_t(char *name, int type); + node_t(char *name, int type, node_t *parent_node); + virtual ~node_t(); + + char *make_full_path(); + + virtual bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) = 0; + virtual bRC endBackupFile(exchange_fd_context_t *context) = 0; + + virtual bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp) = 0; + virtual bRC endRestoreFile(exchange_fd_context_t *context) = 0; + + virtual bRC pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io); + virtual bRC pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io); + virtual bRC pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io); + virtual bRC pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io); +}; + +class file_node_t : public node_t { +public: + WCHAR *filename; + HCCX hccx; + VOID *backup_file_handle; + VOID *restore_file_handle; + uint64_t section_size; + bool restore_at_file_level; + + file_node_t(char *name, node_t *parent_node); + virtual ~file_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); + + bRC pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io); +}; + +class dbi_node_t : public node_t { +public: + DATABASE_BACKUP_INFO *dbi; + char *buffer; + uint32_t buffer_size; + uint32_t buffer_pos; + WCHAR *restore_display_name; + GUID restore_guid; + WCHAR *restore_input_streams; + WCHAR *restore_output_streams; + + dbi_node_t(char *name, node_t *parent_node); + virtual ~dbi_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); + + bRC pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io); +}; + +class store_node_t : public node_t { +public: + HCCX hccx; + DATABASE_BACKUP_INFO *dbi; + WCHAR *stream_ptr; + file_node_t *file_node; + dbi_node_t *dbi_node; + WCHAR *out_stream_ptr; + + store_node_t(char *name, node_t *parent_node); + virtual ~store_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); +}; + +class storage_group_node_t : public node_t { +public: + HCCX hccx; + INSTANCE_BACKUP_INFO *ibi; + store_node_t *store_node; + file_node_t *file_node; + uint32_t current_dbi; + WCHAR *logfiles; + WCHAR *logfile_ptr; + RESTORE_ENVIRONMENT *restore_environment; + WCHAR *service_name; + WCHAR *storage_group_name; + WCHAR *saved_log_path; + storage_group_node_t *next; + + storage_group_node_t(char *name, node_t *parent_node); + virtual ~storage_group_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); +}; + +class service_node_t : public node_t { +public: + uint32_t ibi_count; + INSTANCE_BACKUP_INFO *ibi; + HCCX hccx; + uint32_t current_ibi; + storage_group_node_t *first_storage_group_node; + + service_node_t(char *name, node_t *parent_node); + virtual ~service_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); +}; + +class root_node_t : public node_t { +public: + service_node_t *service_node; + + root_node_t(char *name); + virtual ~root_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); +}; diff --git a/src/win32/filed/plugins/exch_root_node.c b/src/win32/filed/plugins/exch_root_node.c new file mode 100644 index 00000000..875433c7 --- /dev/null +++ b/src/win32/filed/plugins/exch_root_node.c @@ -0,0 +1,155 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + * + * Used only in "old Exchange plugin" now deprecated. + */ + +#include "exchange-fd.h" + +root_node_t::root_node_t(char *name) : node_t(name, NODE_TYPE_ROOT) +{ + service_node = NULL; +} + +root_node_t::~root_node_t() +{ +} + +bRC +root_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + bRC retval = bRC_OK; + time_t now; + + _DebugMessage(100, "startBackupNode_ROOT state = %d\n", state); + switch(state) + { + case 0: + if (strcmp(PLUGIN_PATH_PREFIX_BASE, name) != 0) + { + _JobMessage(M_FATAL, "Invalid backup path specified, must start with '/" PLUGIN_PATH_PREFIX_BASE "/'\n"); + state = 999; + return bRC_Error; + } + // check that service_node == NULL + service_node = new service_node_t(bstrdup(context->path_bits[level + 1]), this); + state = 1; + // fall through + case 1: + context->current_node = service_node; + break; + case 2: + now = time(NULL); + sp->fname = full_path; + sp->link = full_path; + sp->statp.st_mode = 0700 | S_IFDIR; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = 0; + sp->type = FT_DIREND; + break; + case 999: + return bRC_Error; + default: + _JobMessage(M_FATAL, "startBackupFile: invalid internal state %d", state); + state = 999; + } + return retval; +} + +bRC +root_node_t::endBackupFile(exchange_fd_context_t *context) +{ + bRC retval = bRC_OK; + + _DebugMessage(100, "endBackupNode_ROOT state = %d\n", state); + switch(state) + { + case 1: + state = 2; + retval = bRC_More; + // free service_node here? + break; + case 2: + retval = bRC_OK; + break; + case 999: + retval = bRC_Error; + default: + _JobMessage(M_FATAL, "endBackupFile: invalid internal state %d", state); + state = 999; + return bRC_Error; + } + return retval; +} + +bRC +root_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + _DebugMessage(0, "createFile_ROOT state = %d\n", state); + switch (state) { + case 0: + if (strcmp(name, PLUGIN_PATH_PREFIX_BASE) != 0) { + _JobMessage(M_FATAL, "Invalid restore path specified, must start with '/" PLUGIN_PATH_PREFIX_BASE "/'\n"); + state = 999; + return bRC_Error; + } + service_node = new service_node_t(bstrdup(context->path_bits[level + 1]), this); + context->current_node = service_node; + return bRC_OK; + case 1: + rp->create_status = CF_CREATED; + return bRC_OK; + + /* Skip this file */ + case 900: + rp->create_status = CF_SKIP; + return bRC_OK; + /* Error */ + case 999: + return bRC_Error; + default: + _JobMessage(M_FATAL, "createFile: invalid internal state %d", state); + state = 999; + } + return bRC_Error; +} + +bRC +root_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(0, "endRestoreFile_ROOT state = %d\n", state); + switch (state) { + case 0: + safe_delete(service_node); + state = 1; + return bRC_OK; + case 1: + return bRC_OK; + case 900: + return bRC_OK; + default: + _JobMessage(M_FATAL, "endRestore: invalid internal state %d", state); + state = 999; + } + return bRC_Error; +} diff --git a/src/win32/filed/plugins/exch_service_node.c b/src/win32/filed/plugins/exch_service_node.c new file mode 100644 index 00000000..16a3fed6 --- /dev/null +++ b/src/win32/filed/plugins/exch_service_node.c @@ -0,0 +1,223 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + * + * Used only in "old Exchange plugin" now deprecated. + */ + +#include "exchange-fd.h" + +service_node_t::service_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_SERVICE, parent_node) +{ + current_ibi = 0; + hccx = NULL; + ibi = NULL; + ibi_count = 0; + first_storage_group_node = NULL; +} + +service_node_t::~service_node_t() +{ +} + +bRC +service_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + HRESULT result; + char aname[256]; + + _DebugMessage(100, "startBackupNode_SERVICE state = %d\n", state); + switch(state) + { + case 0: + if (strcmp(PLUGIN_PATH_PREFIX_SERVICE, name) != 0) + { + _JobMessage(M_FATAL, "Invalid restore path specified, must start with /" PLUGIN_PATH_PREFIX_BASE "/" PLUGIN_PATH_PREFIX_SERVICE "/\n"); + return bRC_Error; + } + // convert name to a wide string + + _DebugMessage(100, "Calling HrESEBackupPrepare\n"); + wcstombs(aname, context->computer_name, 256); + _JobMessage(M_INFO, "Preparing Exchange Backup for %s\n", aname); + result = HrESEBackupPrepare(context->computer_name, + (WCHAR *)PLUGIN_PATH_PREFIX_SERVICE_W, &ibi_count, &ibi, &hccx); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupPrepare failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + state = 1; + // fall through + case 1: + if (context->path_bits[level + 1] == NULL) + { + _DebugMessage(100, "No specific storage group specified - backing them all up\n"); + char *tmp = new char[wcslen(ibi[current_ibi].wszInstanceName) + 1]; + wcstombs(tmp, ibi[current_ibi].wszInstanceName, wcslen(ibi[current_ibi].wszInstanceName) + 1); + first_storage_group_node = new storage_group_node_t(tmp, this); + delete tmp; + _DebugMessage(100, "storage group name = %s\n", first_storage_group_node->name); + first_storage_group_node->ibi = &ibi[current_ibi]; + first_storage_group_node->hccx = hccx; + context->current_node = first_storage_group_node; + } + else + { + char *tmp = NULL; + for (current_ibi = 0; current_ibi < ibi_count; current_ibi++) + { + tmp = new char[wcslen(ibi[current_ibi].wszInstanceName) + 1]; + wcstombs(tmp, ibi[current_ibi].wszInstanceName, wcslen(ibi[current_ibi].wszInstanceName) + 1); + if (stricmp(tmp, context->path_bits[level + 1]) == 0) + break; + } + first_storage_group_node = new storage_group_node_t(tmp, this); + delete tmp; + if (current_ibi == ibi_count) + { + _JobMessage(M_FATAL, "Invalid Storage Group '%s'\n", context->path_bits[level + 1]); + return bRC_Error; + } + _DebugMessage(100, "storage group name = %s\n", first_storage_group_node->name); + first_storage_group_node->ibi = &ibi[current_ibi]; + first_storage_group_node->hccx = hccx; + context->current_node = first_storage_group_node; + } + break; + case 2: + time_t now = time(NULL); + sp->fname = full_path; + sp->link = full_path; + sp->statp.st_mode = 0700 | S_IFDIR; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = 0; + sp->statp.st_nlink = 1; + //sp->statp.st_blocks = 0; + sp->type = FT_DIREND; + break; + } + _DebugMessage(100, "ending startBackupNode_SERVICE state = %d\n", state); + return bRC_OK; +} + +bRC +service_node_t::endBackupFile(exchange_fd_context_t *context) +{ + HRESULT result; + bRC retval = bRC_OK; + + _DebugMessage(100, "endBackupNode_SERVICE state = %d\n", state); + switch(state) + { + case 0: + // should never happen + break; + case 1: + // free node->storage_group_node + if (context->path_bits[level + 1] == NULL) + { + current_ibi++; + if (current_ibi == ibi_count) + state = 2; + } + else + state = 2; + retval = bRC_More; + break; + case 2: + _DebugMessage(100, "calling HrESEBackupEnd\n"); + result = HrESEBackupEnd(hccx); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupEnd failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + + context->current_node = parent; + retval = bRC_OK; + break; + } + return retval; +} + +bRC +service_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + storage_group_node_t *curr_sg, *prev_sg; + + _DebugMessage(0, "createFile_SERVICE state = %d\n", state); + if (strcmp(name, "Microsoft Information Store") != 0) + { + _JobMessage(M_FATAL, "Invalid restore path specified, must start with '/" PLUGIN_PATH_PREFIX_BASE "/" PLUGIN_PATH_PREFIX_SERVICE "/'\n", state); + return bRC_Error; + } + for(;;) + { + switch (state) + { + case 0: + if (context->path_bits[level + 1] == NULL) + { + state = 1; + break; + } + for (prev_sg = NULL, curr_sg = first_storage_group_node; curr_sg != NULL; prev_sg = curr_sg, curr_sg = curr_sg->next) + { + if (strcmp(curr_sg->name, context->path_bits[level + 1]) == 0) + { + break; + } + } + if (curr_sg == NULL) + { + curr_sg = new storage_group_node_t(bstrdup(context->path_bits[level + 1]), this); + if (prev_sg == NULL) + first_storage_group_node = curr_sg; + else + prev_sg->next = curr_sg; + } + context->current_node = curr_sg; + return bRC_OK; + case 1: + rp->create_status = CF_CREATED; + return bRC_OK; + } + } + return bRC_Error; +} + +bRC +service_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(0, "endRestoreFile_SERVICE state = %d\n", state); + switch(state) + { + case 0: + return bRC_Error; + case 1: + context->current_node = parent; + return bRC_OK; + } + + return bRC_Error; +} diff --git a/src/win32/filed/plugins/exch_storage_group_node.c b/src/win32/filed/plugins/exch_storage_group_node.c new file mode 100644 index 00000000..195b84ce --- /dev/null +++ b/src/win32/filed/plugins/exch_storage_group_node.c @@ -0,0 +1,471 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + * + * Used only in "old Exchange plugin" now deprecated. + */ + +#include "exchange-fd.h" + +storage_group_node_t::storage_group_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_STORAGE_GROUP, parent_node) +{ + ibi = NULL; + store_node = NULL; + current_dbi = 0; + restore_environment = NULL; + saved_log_path = NULL; + next = NULL; +} + +storage_group_node_t::~storage_group_node_t() +{ +/* + safe_delete(dbi_node); + safe_delete(file_node); +*/ +} + +bRC +storage_group_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + HRESULT result; + int len; + WCHAR *tmp_logfiles, *tmp_logfile_ptr; + char *tmp; + + for(;;) + { + _DebugMessage(100, "startBackupNode_STORAGE_GROUP state = %d, name = %s\n", state, name); + switch(state) + { + case 0: + current_dbi = 0; + store_node = NULL; + logfile_ptr = NULL; + if (context->job_level == 'F') + { + _DebugMessage(100, "Calling HrESEBackupSetup (BACKUP_TYPE_FULL)\n"); + result = HrESEBackupSetup(hccx, ibi->hInstanceId, BACKUP_TYPE_FULL); + state = 1; + } + else + { + _DebugMessage(100, "Calling HrESEBackupSetup (BACKUP_TYPE_LOGS_ONLY)\n"); + result = HrESEBackupSetup(hccx, ibi->hInstanceId, BACKUP_TYPE_LOGS_ONLY); + if (context->accurate) + state = 1; + else + state = 2; + } + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupSetup failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + return bRC_Error; + } + break; + case 1: + if (context->path_bits[level + 1] == NULL) + { + _DebugMessage(100, "No specific database specified - backing them all up\n"); + DATABASE_BACKUP_INFO *dbi = &ibi->rgDatabase[current_dbi]; + char *tmp = new char[wcslen(dbi->wszDatabaseDisplayName) + 1]; + wcstombs(tmp, dbi->wszDatabaseDisplayName, wcslen(dbi->wszDatabaseDisplayName) + 1); + store_node = new store_node_t(tmp, this); + store_node->dbi = dbi; + store_node->hccx = hccx; + context->current_node = store_node; + } + else + { + DATABASE_BACKUP_INFO *dbi = NULL; + char *tmp = NULL; + for (current_dbi = 0; current_dbi < ibi->cDatabase; current_dbi++) + { + dbi = &ibi->rgDatabase[current_dbi]; + char *tmp = new char[wcslen(dbi->wszDatabaseDisplayName) + 1]; + wcstombs(tmp, dbi->wszDatabaseDisplayName, wcslen(dbi->wszDatabaseDisplayName) + 1); + if (stricmp(tmp, context->path_bits[level + 1]) == 0) + break; + safe_delete(tmp); + } + if (current_dbi == ibi->cDatabase) + { + _JobMessage(M_FATAL, "Invalid Database '%s'\n", context->path_bits[level + 1]); + return bRC_Error; + } + store_node = new store_node_t(tmp, this); + _DebugMessage(100, "Database name = %s\n", store_node->name); + safe_delete(tmp); + store_node->hccx = hccx; + store_node->dbi = dbi; + context->current_node = store_node; + } + return bRC_OK; + case 2: + _DebugMessage(100, "Calling HrESEBackupGetLogAndPatchFiles\n"); + result = HrESEBackupGetLogAndPatchFiles(hccx, &tmp_logfiles); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupGetLogAndPatchFiles failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + for (len = 0, tmp_logfile_ptr = tmp_logfiles; *tmp_logfile_ptr != 0; tmp_logfile_ptr += wcslen(tmp_logfile_ptr) + 1) + { + len += wcslen(tmp_logfile_ptr) + 1; + } + logfiles = new WCHAR[len + 1]; + logfile_ptr = logfiles; + for (tmp_logfile_ptr = tmp_logfiles; *tmp_logfile_ptr != 0; tmp_logfile_ptr += wcslen(tmp_logfile_ptr) + 1) + { + // check file modification date + HANDLE handle; + FILETIME modified_time; + //int64_t tmp_time; + __int64 tmp_time; + bool include_file; + include_file = false; + handle = INVALID_HANDLE_VALUE; + if (context->job_since == 0) + include_file = true; + if (!include_file) + { + handle = CreateFileW(tmp_logfile_ptr, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); + if (handle == INVALID_HANDLE_VALUE) + { + //_JobMessage(M_WARNING, "Could not open '%S' to check last modified date (0x%08x), including anyway\n", tmp_logfile_ptr, GetLastError()); + include_file = true; + } + } + if (!include_file) + { + if (GetFileTime(handle, NULL, NULL, &modified_time) == 0) + { + //_JobMessage(M_WARNING, "Could not check last modified date for '%S' (0x%08x), including anyway\n", tmp_logfile_ptr, GetLastError()); + include_file = true; + } + } + if (!include_file) + { + tmp_time = (((int64_t)modified_time.dwHighDateTime) << 32) | modified_time.dwLowDateTime; + tmp_time -= 116444736000000000LL; + tmp_time /= 10000000; + if (tmp_time > context->job_since) + { + include_file = true; + } + } + if (include_file) + { + memcpy(logfile_ptr, tmp_logfile_ptr, (wcslen(tmp_logfile_ptr) + 1) * 2); + logfile_ptr += wcslen(logfile_ptr) + 1; + //_DebugMessage(100, "Including file %S\n", logfile_ptr); + } +#if 0 +/* this is handled via checkFile now */ + else + { + if (context->accurate) { + tmp = new char[strlen(full_path) + wcslen(tmp_logfile_ptr) + 1]; + strcpy(tmp, full_path); + wcstombs(tmp + strlen(full_path), tmp_logfile_ptr, wcslen(tmp_logfile_ptr) + 1); + bfuncs->setBaculaValue(context->bpContext, bVarFileSeen, (void *)tmp); + delete tmp; + } + } +#endif + + if (handle != INVALID_HANDLE_VALUE) + CloseHandle(handle); + + } + *logfile_ptr = 0; + logfile_ptr = logfiles; + state = 3; + break; + case 3: + tmp = new char[wcslen(logfile_ptr) + 1]; + wcstombs(tmp, logfile_ptr, wcslen(logfile_ptr) + 1); + file_node = new file_node_t(tmp, this); + delete tmp; + file_node->hccx = hccx; + file_node->filename = logfile_ptr; + context->current_node = file_node; + return bRC_OK; + case 4: + time_t now = time(NULL); + sp->fname = full_path; + sp->link = full_path; + _DebugMessage(100, "fname = %s\n", sp->fname); + sp->statp.st_mode = 0700 | S_IFDIR; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = 0; + //sp->statp.st_blocks = 0; + sp->type = FT_DIREND; + return bRC_OK; + } + } +} + +bRC +storage_group_node_t::endBackupFile(exchange_fd_context_t *context) +{ + HRESULT result; + bRC retval = bRC_Error; + + _DebugMessage(100, "endBackupNode_STORAGE_GROUP state = %d\n", state); + + switch(state) + { + case 0: + // should never happen + break; + case 1: + // free node->storage_group_node + if (context->path_bits[level + 1] == NULL) + { + current_dbi++; + if (current_dbi == ibi->cDatabase) + state = 2; + } + else + state = 2; + retval = bRC_More; + break; + case 2: + // should never happen + break; + case 3: + safe_delete(file_node); + logfile_ptr += wcslen(logfile_ptr) + 1; + if (*logfile_ptr == 0) + state = 4; + retval = bRC_More; + break; + case 4: + if (context->truncate_logs) { + _DebugMessage(100, "Calling HrESEBackupTruncateLogs\n"); + result = HrESEBackupTruncateLogs(hccx); + if (result != 0) { + _JobMessage(M_FATAL, "HrESEBackupTruncateLogs failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + } else { + _JobMessage(M_INFO, "Truncated database logs for Storage Group %s\n", name); + } + } else { + _JobMessage(M_INFO, "Did NOT truncate database logs for Storage Group %s\n", name); + } + _DebugMessage(100, "Calling HrESEBackupInstanceEnd\n"); + result = HrESEBackupInstanceEnd(hccx, ESE_BACKUP_INSTANCE_END_SUCCESS); + if (result != 0) { + _JobMessage(M_FATAL, "HrESEBackupInstanceEnd failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + retval = bRC_OK; + context->current_node = parent; + break; + } + return retval; +} + +bRC +storage_group_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + HRESULT result; + int len; + + _DebugMessage(0, "createFile_STORAGE_GROUP state = %d\n", state); + + if (strcmp(context->path_bits[level], name) != 0) { + _DebugMessage(0, "Different storage group - switching back to parent\n", state); + saved_log_path = new WCHAR[wcslen(restore_environment->m_wszRestoreLogPath) + 1]; + wcscpy(saved_log_path, restore_environment->m_wszRestoreLogPath); + _DebugMessage(100, "Calling HrESERestoreSaveEnvironment\n"); + result = HrESERestoreSaveEnvironment(hccx); + if (result != 0) { + _JobMessage(M_FATAL, "HrESERestoreSaveEnvironment failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + rp->create_status = CF_CREATED; + return bRC_OK; + } + _DebugMessage(100, "Calling HrESERestoreClose\n"); + result = HrESERestoreClose(hccx, RESTORE_CLOSE_NORMAL); + if (result != 0) { + _JobMessage(M_FATAL, "HrESERestoreClose failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + rp->create_status = CF_CREATED; + return bRC_OK; + } + context->current_node = parent; + return bRC_OK; + } + if (saved_log_path != NULL) { + _DebugMessage(0, "Calling HrESERestoreReopen\n"); + result = HrESERestoreReopen(context->computer_name, service_name, saved_log_path, &hccx); + if (result != 0) { + _JobMessage(M_FATAL, "HrESERestoreReopen failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + saved_log_path = NULL; + rp->create_status = CF_CREATED; + return bRC_OK; + } + _DebugMessage(0, "Calling HrESERestoreGetEnvironment\n"); + result = HrESERestoreGetEnvironment(hccx, &restore_environment); + if (result != 0) { + _JobMessage(M_FATAL, "HrESERestoreGetEnvironment failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + saved_log_path = NULL; + rp->create_status = CF_CREATED; + return bRC_OK; + } + saved_log_path = NULL; + } + + for (;;) { + switch (state) { + case 0: + if (context->path_bits[level + 2] == NULL) { + _JobMessage(M_ERROR, "Unexpected log file '%s%s' - expecting database\n", full_path, context->path_bits[level + 1]); + state = 999; + break; + } + service_name = new WCHAR[strlen(parent->name) + 1]; + storage_group_name = new WCHAR[strlen(name) + 1]; + mbstowcs(service_name, parent->name, strlen(parent->name) + 1); + mbstowcs(storage_group_name, name, strlen(name) + 1); + _DebugMessage(0, "Calling HrESERestoreOpen\n"); + result = HrESERestoreOpen(context->computer_name, service_name, storage_group_name, NULL, &hccx); + if (result != 0) { + _JobMessage(M_FATAL, "HrESERestoreOpen failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + _DebugMessage(0, "Calling HrESERestoreGetEnvironment\n"); + result = HrESERestoreGetEnvironment(hccx, &restore_environment); + if (result != 0) { + _JobMessage(M_FATAL, "HrESERestoreGetEnvironment failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + state = 1; + break; + case 1: + if (context->path_bits[level + 2] == NULL) { + state = 2; + break; + } + store_node = new store_node_t(bstrdup(context->path_bits[level + 1]), this); + store_node->hccx = hccx; + context->current_node = store_node; + return bRC_OK; + case 2: + if (context->path_bits[level + 2] != NULL) { + _JobMessage(M_ERROR, "Unexpected file '%s'\n", full_path); + state = 999; + break; + } + if (context->path_bits[level + 1] == NULL) { + state = 3; + break; + } + state = 2; + file_node = new file_node_t(bstrdup(context->path_bits[level + 1]), this); + file_node->hccx = hccx; + int i; + for (i = strlen(file_node->name) - 1; i >= 0; i--) { + if (file_node->name[i] == '\\') { + i++; + break; + } + } + len = wcslen(restore_environment->m_wszRestoreLogPath) + strlen(file_node->name + i) + 1 + 1; + file_node->filename = new WCHAR[len]; + wcscpy(file_node->filename, restore_environment->m_wszRestoreLogPath); + wcscat(file_node->filename, L"\\"); + mbstowcs(&file_node->filename[wcslen(file_node->filename)], file_node->name + i, strlen(file_node->name + i) + 1); + context->current_node = file_node; + return bRC_OK; + case 3: + if (rp->type != FT_DIREND) { + _JobMessage(M_ERROR, "Unexpected file '%s'\n", full_path); + state = 999; + break; + } + // must be the storage group node + _DebugMessage(100, "Calling HrESERestoreSaveEnvironment\n"); + result = HrESERestoreSaveEnvironment(hccx); + if (result != 0) { + _JobMessage(M_FATAL, "HrESERestoreSaveEnvironment failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + + _DebugMessage(100, "Calling HrESERestoreComplete\n"); + result = HrESERestoreComplete(hccx, restore_environment->m_wszRestoreLogPath, + restore_environment->m_wszRestoreLogPath, storage_group_name, ESE_RESTORE_COMPLETE_ATTACH_DBS); + if (result != 0) { + _JobMessage(M_FATAL, "HrESERestoreComplete failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + _DebugMessage(100, "Calling HrESERestoreClose\n"); + result = HrESERestoreClose(hccx, RESTORE_CLOSE_NORMAL); + state = 999; + break; + } else { + _JobMessage(M_INFO, "Storage Group '%s' restored successfully\n", name); + } + + _DebugMessage(100, "Calling HrESERestoreClose\n"); + result = HrESERestoreClose(hccx, RESTORE_CLOSE_NORMAL); + if (result != 0) { + _JobMessage(M_FATAL, "HrESERestoreClose failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + + rp->create_status = CF_CREATED; + return bRC_OK; + case 999: + rp->create_status = CF_CREATED; + return bRC_OK; + } + } +} + +bRC +storage_group_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(0, "endRestoreFile_STORAGE_GROUP state = %d\n", state); + switch (state) { + case 0: + return bRC_Error; + case 1: + return bRC_OK; + case 2: + return bRC_OK; + case 3: + context->current_node = parent; + return bRC_OK; + case 999: + context->current_node = parent; + return bRC_OK; + } + + return bRC_Error; +} diff --git a/src/win32/filed/plugins/exch_store_node.c b/src/win32/filed/plugins/exch_store_node.c new file mode 100644 index 00000000..ab47993f --- /dev/null +++ b/src/win32/filed/plugins/exch_store_node.c @@ -0,0 +1,237 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + */ + +#include "exchange-fd.h" + +store_node_t::store_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_STORE, parent_node) +{ + dbi = NULL; + hccx = NULL; + dbi_node = NULL; + file_node = NULL; +} + +store_node_t::~store_node_t() +{ + safe_delete(dbi_node); + safe_delete(file_node); +} + +bRC +store_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + char *tmp; + + _DebugMessage(100, "startBackupNode_STORE state = %d\n", state); + + switch(state) + { + case 0: + stream_ptr = dbi->wszDatabaseStreams; + state = 1; + // fall through + case 1: + dbi_node = new dbi_node_t((char *)"DatabaseBackupInfo", this); + dbi_node->dbi = dbi; + context->current_node = dbi_node; + break; + case 2: + tmp = new char[wcslen(stream_ptr) + 1]; + wcstombs(tmp, stream_ptr, wcslen(stream_ptr) + 1); + file_node = new file_node_t(tmp, this); + file_node->hccx = hccx; + file_node->filename = stream_ptr; + context->current_node = file_node; + break; + case 3: + if (context->job_level == 'F') + { + time_t now = time(NULL); + sp->fname = full_path; + sp->link = full_path; + sp->statp.st_mode = 0700 | S_IFDIR; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = 0; + sp->type = FT_DIREND; + } + else + { + bfuncs->setBaculaValue(context->bpContext, bVarFileSeen, (void *)full_path); + return bRC_Seen; + } + break; + } + + return bRC_OK; +} + +bRC +store_node_t::endBackupFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endBackupNode_STORE state = %d\n", state); + bRC retval = bRC_OK; + + switch(state) + { + case 0: + // should never happen + break; + case 1: + state = 2; + retval = bRC_More; + break; + case 2: + safe_delete(file_node); + stream_ptr += wcslen(stream_ptr) + 1; + if (*stream_ptr == 0) + state = 3; + retval = bRC_More; + break; + case 3: + //delete dbi_node; + context->current_node = parent; + break; + } + return retval; +} + +bRC +store_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + _DebugMessage(0, "createFile_STORE state = %d\n", state); + + if (strcmp(context->path_bits[level - 1], parent->name) != 0) + { + _DebugMessage(0, "Different storage group - switching back to parent\n", state); + context->current_node = parent; + return bRC_OK; + } + for (;;) + { + switch (state) + { + case 0: + if (strcmp("DatabaseBackupInfo", context->path_bits[level + 1]) != 0) + { + _JobMessage(M_FATAL, "DatabaseBackupInfo file must exist and must be first in directory\n"); + state = 999; + break; + } + dbi_node = new dbi_node_t(bstrdup(context->path_bits[level + 1]), this); + context->current_node = dbi_node; + return bRC_OK; + case 1: + if (strcmp(context->path_bits[level - 1], parent->name) != 0) + { + _JobMessage(M_ERROR, "Unexpected Storage Group Change\n"); + state = 999; + break; + } + + if (*stream_ptr != 0) + { + // verify that stream_ptr == context->path_bits[level + 1]; + _DebugMessage(150, "stream_ptr = %S\n", stream_ptr); + _DebugMessage(150, "out_stream_ptr = %S\n", out_stream_ptr); + file_node = new file_node_t(bstrdup(context->path_bits[level + 1]), this); + file_node->hccx = hccx; + file_node->filename = out_stream_ptr; + context->current_node = file_node; + return bRC_OK; + } + else + { + _JobMessage(M_ERROR, "Extra file found '%s'\n", full_path); + state = 999; + break; + } + case 2: + if (rp->type != FT_DIREND) + { + _JobMessage(M_ERROR, "Unexpected file '%s'\n", full_path); + state = 999; + break; + } + rp->create_status = CF_CREATED; + return bRC_OK; + case 999: + if (strcmp(context->path_bits[level], name) != 0) + { + _DebugMessage(0, "End of Store when in error state - switching back to parent\n", state); + context->current_node = parent; + return bRC_OK; + } + rp->create_status = CF_CREATED; + return bRC_OK; + } + } +} + +bRC +store_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + HRESULT result; + + _DebugMessage(0, "endRestoreFile_STORE state = %d\n", state); + for (;;) + { + switch (state) + { + case 0: + state = 1; + _DebugMessage(0, "Calling HrESERestoreAddDatabase\n"); + result = HrESERestoreAddDatabase(hccx, dbi_node->restore_display_name, dbi_node->restore_guid, dbi_node->restore_input_streams, &dbi_node->restore_output_streams); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreAddDatabase failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + stream_ptr = dbi_node->restore_input_streams; + out_stream_ptr = dbi_node->restore_output_streams; + return bRC_OK; + case 1: + if (*stream_ptr != 0) + { + safe_delete(file_node); + file_node = NULL; + stream_ptr += wcslen(stream_ptr) + 1; + out_stream_ptr += wcslen(out_stream_ptr) + 1; + if (*stream_ptr == 0) + state = 2; + return bRC_OK; + } + else + { + state = 999; + break; + } + case 2: + context->current_node = parent; + return bRC_OK; + case 999: + return bRC_OK; + } + } +} diff --git a/src/win32/filed/plugins/exchange-fd.c b/src/win32/filed/plugins/exchange-fd.c new file mode 100644 index 00000000..001dd43c --- /dev/null +++ b/src/win32/filed/plugins/exchange-fd.c @@ -0,0 +1,528 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + * + * Used only in "old Exchange plugin" now deprecated. + */ + +#include "exchange-fd.h" + +/* Pointers to Bacula functions */ +bFuncs *bfuncs = NULL; +bInfo *binfo = NULL; + +#define PLUGIN_LICENSE "Bacula" +#define PLUGIN_AUTHOR "James Harper" +#define PLUGIN_DATE "September 2008" +#define PLUGIN_VERSION "1" +#define PLUGIN_DESCRIPTION "Exchange Plugin" + +static pInfo pluginInfo = { + sizeof(pluginInfo), + FD_PLUGIN_INTERFACE_VERSION, + FD_PLUGIN_MAGIC, + PLUGIN_LICENSE, + PLUGIN_AUTHOR, + PLUGIN_DATE, + PLUGIN_VERSION, + PLUGIN_DESCRIPTION +}; + +/* Forward referenced functions */ +static bRC newPlugin(bpContext *ctx); +static bRC freePlugin(bpContext *ctx); +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); +static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); +static bRC endBackupFile(bpContext *ctx); +static bRC pluginIO(bpContext *ctx, struct io_pkt *io); +static bRC startRestoreFile(bpContext *ctx, const char *cmd); +static bRC endRestoreFile(bpContext *ctx); +static bRC createFile(bpContext *ctx, struct restore_pkt *rp); +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); +static bRC checkFile(bpContext *ctx, char *fname); + +static pFuncs pluginFuncs = { + sizeof(pluginFuncs), + FD_PLUGIN_INTERFACE_VERSION, + + /* Entry points into plugin */ + newPlugin, /* new plugin instance */ + freePlugin, /* free plugin instance */ + getPluginValue, + setPluginValue, + handlePluginEvent, + startBackupFile, + endBackupFile, + startRestoreFile, + endRestoreFile, + pluginIO, + createFile, + setFileAttributes, + checkFile, + NULL /* No ACL/XATTR */ +}; + +extern "C" { + +static char ** +splitString(char *string, char split, int maxParts, int *count) +{ + char **RetVal; + char *first; + char *last; + + //KdPrint((__DRIVER_NAME " a\n")); + + *count = 0; + + RetVal = (char **)malloc((maxParts + 1) * sizeof(char *)); + last = string; + do + { + if (*count == maxParts) + break; + first = last; + for (last = first; *last != '\0' && *last != split; last++); + RetVal[*count] = (char *)malloc(last - first + 1); + strncpy(RetVal[*count], first, last - first); + RetVal[*count][last - first] = 0; + (*count)++; + if (*last == split) + last++; + } while (*last != 0); + RetVal[*count] = NULL; + return RetVal; +} + +bRC DLL_IMP_EXP +loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) +{ + bRC retval; + bfuncs = lbfuncs; /* set Bacula funct pointers */ + binfo = lbinfo; + *pinfo = &pluginInfo; + *pfuncs = &pluginFuncs; + retval = loadExchangeApi(); + if (retval != bRC_OK) { + printf("Cannot load Exchange DLL\n"); + return retval; + } + return retval; +} + +bRC DLL_IMP_EXP +unloadPlugin() +{ + return bRC_OK; +} + +} +/* +void * +b_malloc(const char *file, int lone, size_t size) +{ + return NULL; +} + +void * +sm_malloc(const char *file, int lone, size_t size) +{ + return NULL; +} +*/ + +static bRC newPlugin(bpContext *ctx) +{ + exchange_fd_context_t *context; + bRC retval = bRC_OK; + DWORD size; + + int JobId = 0; + ctx->pContext = new exchange_fd_context_t; + context = (exchange_fd_context_t *)ctx->pContext; + context->bpContext = ctx; + context->job_since = 0; + context->notrunconfull_option = false; + context->plugin_active = false; + bfuncs->getBaculaValue(ctx, bVarJobId, (void *)&JobId); + _DebugMessage(0, "newPlugin JobId=%d\n", JobId); + bfuncs->registerBaculaEvents(ctx, 1, 2, 0); + size = MAX_COMPUTERNAME_LENGTH + 1; + context->computer_name = new WCHAR[size]; + /* + GetComputerNameW(context->computer_name, &size); + */ + GetComputerNameExW(ComputerNameNetBIOS, context->computer_name, &size); + context->current_node = NULL; + context->root_node = NULL; + return retval; +} + +static bRC freePlugin(bpContext *ctx) +{ + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + int JobId = 0; + bfuncs->getBaculaValue(ctx, bVarJobId, (void *)&JobId); + _DebugMessage(100, "freePlugin JobId=%d\n", JobId); + delete context; + return bRC_OK; +} + +static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) +{ + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + _DebugMessage(100, "getPluginValue var=%d\n", var); + return bRC_OK; +} + +static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) +{ + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + _DebugMessage(100, "setPluginValue var=%d\n", var); + return bRC_OK; +} + +static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) +{ + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + char *name; + int i, intval; + int accurate; + char *command; + char *plugin_name; + + switch (event->eventType) { + case bEventJobStart: + _DebugMessage(0, "JobStart=%s\n", (char *)value); + context->plugin_active = false; + break; + case bEventJobEnd: + _DebugMessage(0, "JobEnd\n"); + break; + case bEventPluginCommand: + _DebugMessage(0, "bEventPluginCommand %s\n", value); + command = bstrdup((char *)value); + /* this isn't really unused */ + plugin_name = strtok((char *)command, ":"); + if (strcmp(plugin_name, "exchange") != 0) { + context->plugin_active = false; + } else { + context->plugin_active = true; + } + free(command); + break; + case bEventStartBackupJob: + if (!context->plugin_active) { + break; + } + _DebugMessage(0, "BackupStart\n"); + bfuncs->getBaculaValue(ctx, bVarAccurate, (void *)&accurate); + context->accurate = accurate; + context->job_type = JOB_TYPE_BACKUP; + // level should have been specified by now - check it + // if level is D or I, since should have been specified too + switch (context->job_level) { + case 'F': + if (context->notrunconfull_option) { + context->truncate_logs = false; + } else { + context->truncate_logs = true; + } + break; + case 'D': + context->truncate_logs = false; + break; + case 'I': + context->truncate_logs = false; + break; + default: + _DebugMessage(0, "Invalid job level %c\n", context->job_level); + return bRC_Error; + } + break; + case bEventEndBackupJob: + _DebugMessage(0, "BackupEnd\n"); + if (!context->plugin_active) { + break; + } + break; + case bEventLevel: + /* We don't know if the plugin is active here yet */ + intval = (intptr_t)value; + _DebugMessage(0, "JobLevel=%c %d\n", intval, intval); + context->job_level = intval; + break; + case bEventSince: + /* We don't know if the plugin is active here yet */ + intval = (intptr_t)value; + _DebugMessage(0, "since=%d\n", intval); + context->job_since = (time_t)value; + break; + case bEventStartRestoreJob: + _DebugMessage(0, "StartRestoreJob\n"); + context->job_type = JOB_TYPE_RESTORE; + context->plugin_active = true; + break; + case bEventEndRestoreJob: + if (!context->plugin_active) { + break; + } + _DebugMessage(0, "EndRestoreJob\n"); + context->plugin_active = false; + break; + + /* Plugin command e.g. plugin = ::command */ + case bEventRestoreCommand: + _DebugMessage(0, "restore\n"); // command=%s\n", (char *)value); + if (!context->plugin_active) { + break; + } + break; + + case bEventBackupCommand: + if (!context->plugin_active) { + break; + } + { + _DebugMessage(0, "backup command=%s\n", (char *)value); + char *command = new char[strlen((char *)value) + 1]; + strcpy(command, (char *)value); + char *plugin_name = strtok((char *)command, ":"); + char *path = strtok(NULL, ":"); + char *option; + while ((option = strtok(NULL, ":")) != NULL) { + _DebugMessage(100, "option %s\n", option); + if (stricmp(option, "notrunconfull") == 0) { + context->notrunconfull_option = true; + } else { + _JobMessage(M_WARNING, "Unknown plugin option '%s'\n", option); + } + } + _DebugMessage(0, "name = %s\n", plugin_name); + _DebugMessage(0, "path = %s\n", path); + if (*path != '/') { + _JobMessage(M_FATAL, "Path does not begin with a '/'\n"); + return bRC_Error; + } + + for (i = 0; i < 6; i++) { + context->path_bits[i] = NULL; + } + + char *path_bit = strtok(path, "/"); + for (i = 0; path_bit != NULL && i < 6; i++) { + context->path_bits[i] = new char[strlen(path_bit) + 1]; + strcpy(context->path_bits[i], path_bit); + path_bit = strtok(NULL, "/"); + } + + if (i < 2 || i > 4) { + _JobMessage(M_FATAL, "Invalid plugin backup path\n"); + return bRC_Error; + } + context->root_node = new root_node_t(context->path_bits[0]); + context->current_node = context->root_node; + + } + break; + + case bEventVssBeforeCloseRestore: + break; + case bEventComponentInfo: + break; + + default: + _DebugMessage(0, "Ignored event=%d\n", event->eventType); + break; + } + bfuncs->getBaculaValue(ctx, bVarFDName, (void *)&name); + return bRC_OK; +} + +static bRC +startBackupFile(bpContext *ctx, struct save_pkt *sp) +{ + bRC retval; + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + node_t *current_node; + + _DebugMessage(100, "startBackupFile, cmd = %s\n", sp->cmd); + if (sp->pkt_size != sizeof(struct save_pkt) || sp->pkt_end != sizeof(struct save_pkt)) { + _JobMessage(M_FATAL, "save_pkt size mismatch - sizeof(struct save_pkt) = %d, pkt_size = %d, pkt_end = %d\n", sizeof(struct save_pkt), sp->pkt_size, sp->pkt_end); + return bRC_Error; + } + + //context->root_node = new root_node_t(PLUGIN_PATH_PREFIX_BASE); + //context->current_node = context->root_node; + do { + current_node = context->current_node; + retval = current_node->startBackupFile(context, sp); + if (retval == bRC_Seen) + endBackupFile(ctx); + } while (current_node != context->current_node); + _DebugMessage(100, "startBackupFile done - type = %d, fname = %s, retval = %d\n", sp->type, sp->fname, retval); + return retval; +} + +static bRC endBackupFile(bpContext *ctx) +{ + bRC retval; + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + node_t *current_node; + + _DebugMessage(100, "endBackupFile\n"); + + do { + current_node = context->current_node; + retval = current_node->endBackupFile(context); + } while (current_node != context->current_node); + _DebugMessage(100, "endBackupFile done - retval = %d\n", retval); + return retval; +} + +/* + * Do actual I/O + */ +static bRC pluginIO(bpContext *ctx, struct io_pkt *io) +{ + bRC retval = bRC_OK; + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + + if (io->pkt_size != sizeof(struct io_pkt) || io->pkt_end != sizeof(struct io_pkt)) + { + _JobMessage(M_FATAL, "io_pkt size mismatch - sizeof(struct io_pkt) = %d, pkt_size = %d, pkt_end = %d\n", sizeof(struct io_pkt), io->pkt_size, io->pkt_end); + } + + switch(io->func) { + case IO_OPEN: + _DebugMessage(100, "IO_OPEN\n"); + retval = context->current_node->pluginIoOpen(context, io); + break; + case IO_READ: + //_DebugMessage(100, "IO_READ buf=%p len=%d\n", io->buf, io->count); + retval = context->current_node->pluginIoRead(context, io); + break; + case IO_WRITE: + //_DebugMessage(100, "IO_WRITE buf=%p len=%d\n", io->buf, io->count); + retval = context->current_node->pluginIoWrite(context, io); + break; + case IO_CLOSE: + _DebugMessage(100, "IO_CLOSE\n"); + retval = context->current_node->pluginIoClose(context, io); + break; + } + return retval; +} + +static bRC startRestoreFile(bpContext *ctx, const char *cmd) +{ + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + _DebugMessage(100, "startRestoreFile\n"); + + return bRC_OK; +} + +static bRC endRestoreFile(bpContext *ctx) +{ + bRC retval; + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + node_t *current_node; + + _DebugMessage(100, "endRestoreFile\n"); + + do { + current_node = context->current_node; + retval = current_node->endRestoreFile(context); + } while (current_node != context->current_node); + _DebugMessage(100, "endRestoreFile done - retval = %d\n", retval); + return retval; +} + +static bRC createFile(bpContext *ctx, struct restore_pkt *rp) +{ + bRC retval; + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + node_t *current_node; + char **path_bits; + int count; + int i; + + + _DebugMessage(100, "createFile - type = %d, ofname = %s\n", rp->type, rp->ofname); + if (rp->pkt_size != sizeof(struct restore_pkt) || rp->pkt_end != sizeof(struct restore_pkt)) + { + _JobMessage(M_FATAL, "restore_pkt size mismatch - sizeof(struct restore_pkt) = %d, pkt_size = %d, pkt_end = %d\n", sizeof(struct restore_pkt), rp->pkt_size, rp->pkt_end); + return bRC_Error; + } + + for (i = 0; i < 6; i++) + { + context->path_bits[i] = NULL; + } + + path_bits = splitString((char *)rp->ofname, '/', 7, &count); + + _DebugMessage(100, "count = %d\n", count); + + for (i = 1; i < count; i++) + { + _DebugMessage(150, "%d = '%s'\n", i, path_bits[i]); + context->path_bits[i - 1] = path_bits[i]; + } + + if (context->current_node == NULL) + { + context->root_node = new root_node_t(context->path_bits[0]); + context->current_node = context->root_node; + } + + do { + current_node = context->current_node; + retval = current_node->createFile(context, rp); + } while (current_node != context->current_node); + _DebugMessage(100, "createFile done - retval = %d\n", retval); + return retval; +} + +static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) +{ + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + _DebugMessage(100, "setFileAttributes\n"); + return bRC_OK; +} + +/* + * At the end of the job, the accurate code loops over all files + * that are in the accurate list but not marked as seen during the + * backup. Unless the checkFile routine returns bRC_Seen, + * these files look to be deleted and hence will not be + * returned during a restore. + */ +static bRC checkFile(bpContext *ctx, char *fname) +{ + exchange_fd_context_t *context = (exchange_fd_context_t *)ctx->pContext; + if (context->plugin_active) { + _DebugMessage(100, "marked as seen %s\n", fname); + return bRC_Seen; + } + return bRC_OK; +} diff --git a/src/win32/filed/plugins/exchange-fd.def b/src/win32/filed/plugins/exchange-fd.def new file mode 100644 index 00000000..e79095cb --- /dev/null +++ b/src/win32/filed/plugins/exchange-fd.def @@ -0,0 +1,15 @@ +LIBRARY bacula.dll +EXPORTS + +; compat.o +;_Z10open_bpipePciPKc +;_Z11close_bpipeP5BPIPE +;_Z11close_wpipeP5BPIPE + +;console_command DATA +;b_plugin_list DATA +;plugin_bopen DATA +;plugin_bclose DATA +;plugin_bwrite DATA +;plugin_bread DATA +;plugin_blseek DATA diff --git a/src/win32/filed/plugins/exchange-fd.h b/src/win32/filed/plugins/exchange-fd.h new file mode 100644 index 00000000..793c2a49 --- /dev/null +++ b/src/win32/filed/plugins/exchange-fd.h @@ -0,0 +1,157 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, July 2010 + */ + +#ifndef _EXCHANGE_FD_H +#define _EXCHANGE_FD_H + +#define BUILD_PLUGIN + +#if defined(BUILDING_DLL) +# define DLL_IMP_EXP __declspec(dllexport) +#elif defined(USING_DLL) +# define DLL_IMP_EXP __declspec(dllimport) +#else +# define DLL_IMP_EXP +#endif + +#if defined(HAVE_WIN32) +#if defined(HAVE_MINGW) +#include "winhdrs.h" +#else +#include "winconfig.h" +#endif +#else +#include "config.h" +#endif +#define __CONFIG_H + +enum { + /* Keep M_ABORT=1 for dlist.h */ + M_ABORT = 1, /* MUST abort immediately */ + M_DEBUG, /* debug message */ + M_FATAL, /* Fatal error, stopping job */ + M_ERROR, /* Error, but recoverable */ + M_WARNING, /* Warning message */ + M_INFO, /* Informational message */ + M_SAVED, /* Info on saved file */ + M_NOTSAVED, /* Info on notsaved file */ + M_SKIPPED, /* File skipped during backup by option setting */ + M_MOUNT, /* Mount requests */ + M_ERROR_TERM, /* Error termination request (no dump) */ + M_TERM, /* Terminating daemon normally */ + M_RESTORED, /* ls -l of restored files */ + M_SECURITY, /* security violation */ + M_ALERT, /* tape alert messages */ + M_VOLMGMT /* Volume management messages */ +}; + +#define FT_REG 3 +#define FT_DIREND 5 + +#define _REENTRANT 1 +#define _THREAD_SAFE 1 +#define _POSIX_PTHREAD_SEMANTICS 1 + +#include + +#include +#include +#include +#include +#ifdef HAVE_SYS_BITYPES_H +#include +#endif + +//#include "bacula.h" +#include "compat.h" +#include "bc_types.h" + +typedef int64_t boffset_t; +//#define bstrdup(str) strcpy((char *)bmalloc(strlen((str))+1),(str)) +#define bstrdup(str) strdup(str) + +#include "fd_plugins.h" +#include "exch_api.h" + +#if defined(HAVE_WIN32) +#include "winapi.h" +#include "winhost.h" +#else +#include "host.h" +#endif + +#define EXCHANGE_PLUGIN_VERSION 1 + +#define JOB_TYPE_BACKUP 1 +#define JOB_TYPE_RESTORE 2 + +#define JOB_LEVEL_FULL ((int)'F') +#define JOB_LEVEL_INCREMENTAL ((int)'I') +#define JOB_LEVEL_DIFFERENTIAL ((int)'D') + +struct exchange_fd_context_t; + +#include "exch_node.h" + +struct exchange_fd_context_t { + struct bpContext *bpContext; + WCHAR *computer_name; + char *path_bits[6]; + root_node_t *root_node; + node_t *current_node; + int job_type; + int job_level; + time_t job_since; + bool notrunconfull_option; + bool truncate_logs; + bool accurate; + bool plugin_active; +}; + +static inline char *tocharstring(WCHAR *src) +{ + char *tmp = new char[wcslen(src) + 1]; + wcstombs(tmp, src, wcslen(src) + 1); + return tmp; +} + +static inline WCHAR *towcharstring(char *src) +{ + WCHAR *tmp = new WCHAR[strlen(src) + 1]; + mbstowcs(tmp, src, strlen(src) + 1); + return tmp; +} + +#define safe_delete(buf) if (buf) { delete buf; buf=NULL; } + +extern bFuncs *bfuncs; +extern bInfo *binfo; + +#define _DebugMessage(level, message, ...) bfuncs->DebugMessage(context->bpContext, __FILE__, __LINE__, level, message, ##__VA_ARGS__) +#define _JobMessage(type, message, ...) bfuncs->JobMessage(context->bpContext, __FILE__, __LINE__, type, 0, message, ##__VA_ARGS__) +#define _JobMessageNull(type, message, ...) bfuncs->JobMessage(NULL, __FILE__, __LINE__, type, 0, message, ##__VA_ARGS__) + +#define PLUGIN_PATH_PREFIX_BASE "@EXCHANGE" +#define PLUGIN_PATH_PREFIX_SERVICE "Microsoft Information Store" +#define PLUGIN_PATH_PREFIX_SERVICE_W L"Microsoft Information Store" + +#endif /* _EXCHANGE_FD_H */ diff --git a/src/win32/filed/plugins/file_node.c b/src/win32/filed/plugins/file_node.c new file mode 100644 index 00000000..dfa3b0a2 --- /dev/null +++ b/src/win32/filed/plugins/file_node.c @@ -0,0 +1,227 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, October 2008 + */ + +#include "exchange-fd.h" + +file_node_t::file_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_FILE, parent_node) +{ + backup_file_handle = INVALID_HANDLE_VALUE; + restore_file_handle = INVALID_HANDLE_VALUE; + restore_at_file_level = FALSE; +} + +file_node_t::~file_node_t() +{ + if (backup_file_handle != INVALID_HANDLE_VALUE) + { + //_DebugMessage(100, "closing file handle in destructor\n"); + CloseHandle(backup_file_handle); + } + if (restore_file_handle != INVALID_HANDLE_VALUE) + { + //_DebugMessage(100, "closing file handle in destructor\n"); + if (restore_at_file_level) + { + CloseHandle(restore_file_handle); + } + else + { + // maybe one day + } + } +} + +bRC +file_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + time_t now = time(NULL); + _DebugMessage(100, "startBackupNode_FILE state = %d\n", state); + + if (context->job_level == 'F' || parent->type == NODE_TYPE_STORAGE_GROUP) { + sp->fname = full_path; + sp->link = full_path; + _DebugMessage(100, "fname = %s\n", sp->fname); + sp->statp.st_mode = 0700 | S_IFREG; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = (uint64_t)-1; + sp->type = FT_REG; + return bRC_OK; + } else { + bfuncs->setBaculaValue(context->bpContext, bVarFileSeen, (void *)full_path); + return bRC_Seen; + } +} + +bRC +file_node_t::endBackupFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endBackupNode_FILE state = %d\n", state); + + context->current_node = parent; + + return bRC_OK; +} + +bRC +file_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + //HrESERestoreOpenFile with name of log file + + _DebugMessage(100, "createFile_FILE state = %d\n", state); + rp->create_status = CF_EXTRACT; + return bRC_OK; +} + +bRC +file_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endRestoreFile_FILE state = %d\n", state); + context->current_node = parent; + return bRC_OK; +} + +bRC +file_node_t::pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io) +{ + HRESULT result; + HANDLE handle; + char *tmp = new char[wcslen(filename) + 1]; + wcstombs(tmp, filename, wcslen(filename) + 1); + + _DebugMessage(100, "pluginIoOpen_FILE - filename = %s\n", tmp); + io->status = 0; + io->io_errno = 0; + if (context->job_type == JOB_TYPE_BACKUP) + { + _DebugMessage(10, "Calling HrESEBackupOpenFile\n"); + result = HrESEBackupOpenFile(hccx, filename, 65535, 1, &backup_file_handle, §ion_size); + if (result) + { + _JobMessage(M_FATAL, "HrESEBackupOpenFile failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + backup_file_handle = INVALID_HANDLE_VALUE; + io->io_errno = 1; + return bRC_Error; + } + } + else + { + _DebugMessage(10, "Calling HrESERestoreOpenFile for '%s'\n", tmp); + result = HrESERestoreOpenFile(hccx, filename, 1, &restore_file_handle); + if (result == hrRestoreAtFileLevel) + { + restore_at_file_level = true; + _DebugMessage(100, "Calling CreateFileW for '%s'\n", tmp); + handle = CreateFileW(filename, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); + if (handle == INVALID_HANDLE_VALUE) + { + _JobMessage(M_FATAL, "CreateFile failed"); + return bRC_Error; + } + restore_file_handle = (void *)handle; + return bRC_OK; + } + else if (result == 0) + { + _JobMessage(M_FATAL, "Exchange File IO API not yet supported for restore\n"); + restore_at_file_level = false; + return bRC_Error; + } + else + { + _JobMessage(M_FATAL, "HrESERestoreOpenFile failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + } + return bRC_OK; +} + +bRC +file_node_t::pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io) +{ + HRESULT result; + uint32_t readLength; + + io->status = 0; + io->io_errno = 0; + _DebugMessage(200, "Calling HrESEBackupReadFile\n"); + result = HrESEBackupReadFile(hccx, backup_file_handle, io->buf, io->count, &readLength); + if (result) + { + io->io_errno = 1; + return bRC_Error; + } + io->status = readLength; + size += readLength; + return bRC_OK; +} + +bRC +file_node_t::pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io) +{ + DWORD bytes_written; + + io->io_errno = 0; + if (!restore_at_file_level) + return bRC_Error; + + if (!WriteFile(restore_file_handle, io->buf, io->count, &bytes_written, NULL)) + { + _JobMessage(M_FATAL, "Write Error"); + return bRC_Error; + } + + if (bytes_written != (DWORD)io->count) + { + _JobMessage(M_FATAL, "Short write"); + return bRC_Error; + } + io->status = bytes_written; + + return bRC_OK; +} + +bRC +file_node_t::pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io) +{ + if (context->job_type == JOB_TYPE_BACKUP) + { + _DebugMessage(100, "Calling HrESEBackupCloseFile\n"); + HrESEBackupCloseFile(hccx, backup_file_handle); + backup_file_handle = INVALID_HANDLE_VALUE; + return bRC_OK; + } + else + { + if (restore_at_file_level) + { + CloseHandle(restore_file_handle); + restore_file_handle = INVALID_HANDLE_VALUE; + return bRC_OK; + } + else + { + return bRC_OK; + } + } +} diff --git a/src/win32/filed/plugins/node.c b/src/win32/filed/plugins/node.c new file mode 100644 index 00000000..64500238 --- /dev/null +++ b/src/win32/filed/plugins/node.c @@ -0,0 +1,118 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, October 2008 + */ + +#include "exchange-fd.h" + +node_t::node_t(char *name, int type) +{ + this->type = type; + state = 0; + parent = NULL; + this->name = bstrdup(name); + full_path = make_full_path(); + size = 0; + level = 0; +} + +node_t::node_t(char *name, int type, node_t *parent_node) +{ + this->type = type; + state = 0; + parent = parent_node; + this->name = bstrdup(name); + full_path = make_full_path(); + size = 0; + level = parent->level + 1; +} + +node_t::~node_t() +{ + delete name; + delete full_path; +} + +char * +node_t::make_full_path() +{ + node_t *curr_node; + int len; + char *retval; + + for (len = 0, curr_node = this; curr_node != NULL; curr_node = curr_node->parent) + { + len += strlen(curr_node->name) + 1; + } + if (type == NODE_TYPE_FILE || type == NODE_TYPE_DATABASE_INFO) + { + retval = new char[len + 1]; + retval[len] = 0; + } + else + { + retval = new char[len + 2]; + retval[len] = '/'; + retval[len + 1] = 0; + } + for (curr_node = this; curr_node != NULL; curr_node = curr_node->parent) + { + len -= strlen(curr_node->name); + memcpy(retval + len, curr_node->name, strlen(curr_node->name)); + retval[--len] = '/'; + } + return retval; +} + +bRC +node_t::pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io) +{ + _DebugMessage(100, "pluginIoOpen_Node\n"); + io->status = 0; + io->io_errno = 0; + return bRC_OK; +} + +bRC +node_t::pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io) +{ + _DebugMessage(100, "pluginIoRead_Node\n"); + io->status = 0; + io->io_errno = 0; + return bRC_OK; +} + +bRC +node_t::pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io) +{ + _DebugMessage(100, "pluginIoWrite_Node\n"); + io->status = 0; + io->io_errno = 1; + return bRC_Error; +} + +bRC +node_t::pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io) +{ + _DebugMessage(100, "pluginIoClose_Node\n"); + io->status = 0; + io->io_errno = 0; + return bRC_OK; +} diff --git a/src/win32/filed/plugins/node.h b/src/win32/filed/plugins/node.h new file mode 100644 index 00000000..591b997a --- /dev/null +++ b/src/win32/filed/plugins/node.h @@ -0,0 +1,177 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, October 2008 + */ + +#define NODE_TYPE_UNKNOWN 0 +#define NODE_TYPE_ROOT 1 +#define NODE_TYPE_SERVICE 2 +#define NODE_TYPE_STORAGE_GROUP 3 +#define NODE_TYPE_STORE 4 +#define NODE_TYPE_DATABASE_INFO 5 +#define NODE_TYPE_FILE 6 + +class node_t { +public: + int type; + int state; + node_t *parent; + char *name; + char *full_path; + size_t size; + int level; + + node_t(char *name, int type); + node_t(char *name, int type, node_t *parent_node); + virtual ~node_t(); + + char *make_full_path(); + + virtual bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) = 0; + virtual bRC endBackupFile(exchange_fd_context_t *context) = 0; + + virtual bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp) = 0; + virtual bRC endRestoreFile(exchange_fd_context_t *context) = 0; + + virtual bRC pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io); + virtual bRC pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io); + virtual bRC pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io); + virtual bRC pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io); +}; + +class file_node_t : public node_t { +public: + WCHAR *filename; + HCCX hccx; + VOID *backup_file_handle; + VOID *restore_file_handle; + uint64_t section_size; + bool restore_at_file_level; + + file_node_t(char *name, node_t *parent_node); + virtual ~file_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); + + bRC pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io); +}; + +class dbi_node_t : public node_t { +public: + DATABASE_BACKUP_INFO *dbi; + char *buffer; + uint32_t buffer_size; + uint32_t buffer_pos; + WCHAR *restore_display_name; + GUID restore_guid; + WCHAR *restore_input_streams; + WCHAR *restore_output_streams; + + dbi_node_t(char *name, node_t *parent_node); + virtual ~dbi_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); + + bRC pluginIoOpen(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoRead(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoWrite(exchange_fd_context_t *context, struct io_pkt *io); + bRC pluginIoClose(exchange_fd_context_t *context, struct io_pkt *io); +}; + +class store_node_t : public node_t { +public: + HCCX hccx; + DATABASE_BACKUP_INFO *dbi; + WCHAR *stream_ptr; + file_node_t *file_node; + dbi_node_t *dbi_node; + WCHAR *out_stream_ptr; + + store_node_t(char *name, node_t *parent_node); + virtual ~store_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); +}; + +class storage_group_node_t : public node_t { +public: + HCCX hccx; + INSTANCE_BACKUP_INFO *ibi; + store_node_t *store_node; + file_node_t *file_node; + uint32_t current_dbi; + WCHAR *logfiles; + WCHAR *logfile_ptr; + RESTORE_ENVIRONMENT *restore_environment; + WCHAR *service_name; + WCHAR *storage_group_name; + WCHAR *saved_log_path; + storage_group_node_t *next; + + storage_group_node_t(char *name, node_t *parent_node); + virtual ~storage_group_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); +}; + +class service_node_t : public node_t { +public: + uint32_t ibi_count; + INSTANCE_BACKUP_INFO *ibi; + HCCX hccx; + uint32_t current_ibi; + storage_group_node_t *first_storage_group_node; + + service_node_t(char *name, node_t *parent_node); + virtual ~service_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); +}; + +class root_node_t : public node_t { +public: + service_node_t *service_node; + + root_node_t(char *name); + virtual ~root_node_t(); + bRC startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp); + bRC endBackupFile(exchange_fd_context_t *context); + + bRC createFile(exchange_fd_context_t *context, struct restore_pkt *rp); + bRC endRestoreFile(exchange_fd_context_t *context); +}; diff --git a/src/win32/filed/plugins/root_node.c b/src/win32/filed/plugins/root_node.c new file mode 100644 index 00000000..989bb4e2 --- /dev/null +++ b/src/win32/filed/plugins/root_node.c @@ -0,0 +1,153 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, October 2008 + */ + +#include "exchange-fd.h" + +root_node_t::root_node_t(char *name) : node_t(name, NODE_TYPE_ROOT) +{ + service_node = NULL; +} + +root_node_t::~root_node_t() +{ +} + +bRC +root_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + bRC retval = bRC_OK; + time_t now; + + _DebugMessage(100, "startBackupNode_ROOT state = %d\n", state); + switch(state) + { + case 0: + if (strcmp(PLUGIN_PATH_PREFIX_BASE, name) != 0) + { + _JobMessage(M_FATAL, "Invalid backup path specified, must start with '/" PLUGIN_PATH_PREFIX_BASE "/'\n"); + state = 999; + return bRC_Error; + } + // check that service_node == NULL + service_node = new service_node_t(bstrdup(context->path_bits[level + 1]), this); + state = 1; + // fall through + case 1: + context->current_node = service_node; + break; + case 2: + now = time(NULL); + sp->fname = full_path; + sp->link = full_path; + sp->statp.st_mode = 0700 | S_IFDIR; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = 0; + sp->type = FT_DIREND; + break; + case 999: + return bRC_Error; + default: + _JobMessage(M_FATAL, "startBackupFile: invalid internal state %d", state); + state = 999; + } + return retval; +} + +bRC +root_node_t::endBackupFile(exchange_fd_context_t *context) +{ + bRC retval = bRC_OK; + + _DebugMessage(100, "endBackupNode_ROOT state = %d\n", state); + switch(state) + { + case 1: + state = 2; + retval = bRC_More; + // free service_node here? + break; + case 2: + retval = bRC_OK; + break; + case 999: + retval = bRC_Error; + default: + _JobMessage(M_FATAL, "endBackupFile: invalid internal state %d", state); + state = 999; + return bRC_Error; + } + return retval; +} + +bRC +root_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + _DebugMessage(100, "createFile_ROOT state = %d\n", state); + switch (state) { + case 0: + if (strcmp(name, PLUGIN_PATH_PREFIX_BASE) != 0) { + _JobMessage(M_FATAL, "Invalid restore path specified, must start with '/" PLUGIN_PATH_PREFIX_BASE "/'\n"); + state = 999; + return bRC_Error; + } + service_node = new service_node_t(bstrdup(context->path_bits[level + 1]), this); + context->current_node = service_node; + return bRC_OK; + case 1: + rp->create_status = CF_CREATED; + return bRC_OK; + + /* Skip this file */ + case 900: + rp->create_status = CF_SKIP; + return bRC_OK; + /* Error */ + case 999: + return bRC_Error; + default: + _JobMessage(M_FATAL, "createFile: invalid internal state %d", state); + state = 999; + } + return bRC_Error; +} + +bRC +root_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endRestoreFile_ROOT state = %d\n", state); + switch (state) { + case 0: + delete service_node; + state = 1; + return bRC_OK; + case 1: + return bRC_OK; + case 900: + return bRC_OK; + default: + _JobMessage(M_FATAL, "endRestore: invalid internal state %d", state); + state = 999; + } + return bRC_Error; +} diff --git a/src/win32/filed/plugins/service_node.c b/src/win32/filed/plugins/service_node.c new file mode 100644 index 00000000..0f95fbb0 --- /dev/null +++ b/src/win32/filed/plugins/service_node.c @@ -0,0 +1,220 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, October 2008 + */ + +#include "exchange-fd.h" + +service_node_t::service_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_SERVICE, parent_node) +{ + current_ibi = 0; + hccx = NULL; + ibi = NULL; + ibi_count = 0; + first_storage_group_node = NULL; +} + +service_node_t::~service_node_t() +{ +} + +bRC +service_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + HRESULT result; + char aname[256]; + + _DebugMessage(100, "startBackupNode_SERVICE state = %d\n", state); + switch(state) + { + case 0: + if (strcmp(PLUGIN_PATH_PREFIX_SERVICE, name) != 0) + { + _JobMessage(M_FATAL, "Invalid restore path specified, must start with /" PLUGIN_PATH_PREFIX_BASE "/" PLUGIN_PATH_PREFIX_SERVICE "/\n"); + return bRC_Error; + } + // convert name to a wide string + + _DebugMessage(100, "Calling HrESEBackupPrepare\n"); + wcstombs(aname, context->computer_name, 256); + _JobMessage(M_INFO, "Preparing Exchange Backup for %s\n", aname); + result = HrESEBackupPrepare(context->computer_name, PLUGIN_PATH_PREFIX_SERVICE_W, &ibi_count, &ibi, &hccx); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupPrepare failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + state = 1; + // fall through + case 1: + if (context->path_bits[level + 1] == NULL) + { + _DebugMessage(100, "No specific storage group specified - backing them all up\n"); + char *tmp = new char[wcslen(ibi[current_ibi].wszInstanceName) + 1]; + wcstombs(tmp, ibi[current_ibi].wszInstanceName, wcslen(ibi[current_ibi].wszInstanceName) + 1); + first_storage_group_node = new storage_group_node_t(tmp, this); + delete tmp; + _DebugMessage(100, "storage group name = %s\n", first_storage_group_node->name); + first_storage_group_node->ibi = &ibi[current_ibi]; + first_storage_group_node->hccx = hccx; + context->current_node = first_storage_group_node; + } + else + { + char *tmp = NULL; + for (current_ibi = 0; current_ibi < ibi_count; current_ibi++) + { + tmp = new char[wcslen(ibi[current_ibi].wszInstanceName) + 1]; + wcstombs(tmp, ibi[current_ibi].wszInstanceName, wcslen(ibi[current_ibi].wszInstanceName) + 1); + if (stricmp(tmp, context->path_bits[level + 1]) == 0) + break; + } + first_storage_group_node = new storage_group_node_t(tmp, this); + delete tmp; + if (current_ibi == ibi_count) + { + _JobMessage(M_FATAL, "Invalid Storage Group '%s'\n", context->path_bits[level + 1]); + return bRC_Error; + } + _DebugMessage(100, "storage group name = %s\n", first_storage_group_node->name); + first_storage_group_node->ibi = &ibi[current_ibi]; + first_storage_group_node->hccx = hccx; + context->current_node = first_storage_group_node; + } + break; + case 2: + time_t now = time(NULL); + sp->fname = full_path; + sp->link = full_path; + sp->statp.st_mode = 0700 | S_IFDIR; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = 0; + sp->statp.st_nlink = 1; + //sp->statp.st_blocks = 0; + sp->type = FT_DIREND; + break; + } + _DebugMessage(100, "ending startBackupNode_SERVICE state = %d\n", state); + return bRC_OK; +} + +bRC +service_node_t::endBackupFile(exchange_fd_context_t *context) +{ + HRESULT result; + bRC retval = bRC_OK; + + _DebugMessage(100, "endBackupNode_SERVICE state = %d\n", state); + switch(state) + { + case 0: + // should never happen + break; + case 1: + // free node->storage_group_node + if (context->path_bits[level + 1] == NULL) + { + current_ibi++; + if (current_ibi == ibi_count) + state = 2; + } + else + state = 2; + retval = bRC_More; + break; + case 2: + _DebugMessage(100, "calling HrESEBackupEnd\n"); + result = HrESEBackupEnd(hccx); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupEnd failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + + context->current_node = parent; + retval = bRC_OK; + break; + } + return retval; +} + +bRC +service_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + storage_group_node_t *curr_sg, *prev_sg; + + _DebugMessage(100, "createFile_SERVICE state = %d\n", state); + if (strcmp(name, "Microsoft Information Store") != 0) + { + _JobMessage(M_FATAL, "Invalid restore path specified, must start with '/" PLUGIN_PATH_PREFIX_BASE "/" PLUGIN_PATH_PREFIX_SERVICE "/'\n", state); + return bRC_Error; + } + for(;;) + { + switch (state) + { + case 0: + if (context->path_bits[level + 1] == NULL) + { + state = 1; + break; + } + for (prev_sg = NULL, curr_sg = first_storage_group_node; curr_sg != NULL; prev_sg = curr_sg, curr_sg = curr_sg->next) + { + if (strcmp(curr_sg->name, context->path_bits[level + 1]) == 0) + { + break; + } + } + if (curr_sg == NULL) + { + curr_sg = new storage_group_node_t(bstrdup(context->path_bits[level + 1]), this); + if (prev_sg == NULL) + first_storage_group_node = curr_sg; + else + prev_sg->next = curr_sg; + } + context->current_node = curr_sg; + return bRC_OK; + case 1: + rp->create_status = CF_CREATED; + return bRC_OK; + } + } + return bRC_Error; +} + +bRC +service_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endRestoreFile_SERVICE state = %d\n", state); + switch(state) + { + case 0: + return bRC_Error; + case 1: + context->current_node = parent; + return bRC_OK; + } + + return bRC_Error; +} diff --git a/src/win32/filed/plugins/storage_group_node.c b/src/win32/filed/plugins/storage_group_node.c new file mode 100644 index 00000000..1e149b0d --- /dev/null +++ b/src/win32/filed/plugins/storage_group_node.c @@ -0,0 +1,502 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, October 2008 + */ + +#include "exchange-fd.h" + +storage_group_node_t::storage_group_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_STORAGE_GROUP, parent_node) +{ + ibi = NULL; + store_node = NULL; + current_dbi = 0; + restore_environment = NULL; + saved_log_path = NULL; + next = NULL; +} + +storage_group_node_t::~storage_group_node_t() +{ +/* + if (dbi_node != NULL) + delete dbi_node; + + if (file_node != NULL) + delete file_node; +*/ +} + +bRC +storage_group_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + HRESULT result; + int len; + WCHAR *tmp_logfiles, *tmp_logfile_ptr; + char *tmp; + + for(;;) + { + _DebugMessage(100, "startBackupNode_STORAGE_GROUP state = %d, name = %s\n", state, name); + switch(state) + { + case 0: + current_dbi = 0; + store_node = NULL; + logfile_ptr = NULL; + if (context->job_level == 'F') + { + _DebugMessage(100, "Calling HrESEBackupSetup (BACKUP_TYPE_FULL)\n"); + result = HrESEBackupSetup(hccx, ibi->hInstanceId, BACKUP_TYPE_FULL); + state = 1; + } + else + { + _DebugMessage(100, "Calling HrESEBackupSetup (BACKUP_TYPE_LOGS_ONLY)\n"); + result = HrESEBackupSetup(hccx, ibi->hInstanceId, BACKUP_TYPE_LOGS_ONLY); + if (context->accurate) + state = 1; + else + state = 2; + } + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupSetup failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + return bRC_Error; + } + break; + case 1: + if (context->path_bits[level + 1] == NULL) + { + _DebugMessage(100, "No specific database specified - backing them all up\n"); + DATABASE_BACKUP_INFO *dbi = &ibi->rgDatabase[current_dbi]; + char *tmp = new char[wcslen(dbi->wszDatabaseDisplayName) + 1]; + wcstombs(tmp, dbi->wszDatabaseDisplayName, wcslen(dbi->wszDatabaseDisplayName) + 1); + store_node = new store_node_t(tmp, this); + store_node->dbi = dbi; + store_node->hccx = hccx; + context->current_node = store_node; + } + else + { + DATABASE_BACKUP_INFO *dbi = NULL; + char *tmp = NULL; + for (current_dbi = 0; current_dbi < ibi->cDatabase; current_dbi++) + { + dbi = &ibi->rgDatabase[current_dbi]; + char *tmp = new char[wcslen(dbi->wszDatabaseDisplayName) + 1]; + wcstombs(tmp, dbi->wszDatabaseDisplayName, wcslen(dbi->wszDatabaseDisplayName) + 1); + if (stricmp(tmp, context->path_bits[level + 1]) == 0) + break; + delete tmp; + } + if (current_dbi == ibi->cDatabase) + { + _JobMessage(M_FATAL, "Invalid Database '%s'\n", context->path_bits[level + 1]); + return bRC_Error; + } + store_node = new store_node_t(tmp, this); + _DebugMessage(100, "Database name = %s\n", store_node->name); + delete tmp; + store_node->hccx = hccx; + store_node->dbi = dbi; + context->current_node = store_node; + } + return bRC_OK; + case 2: + _DebugMessage(100, "Calling HrESEBackupGetLogAndPatchFiles\n"); + result = HrESEBackupGetLogAndPatchFiles(hccx, &tmp_logfiles); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupGetLogAndPatchFiles failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + for (len = 0, tmp_logfile_ptr = tmp_logfiles; *tmp_logfile_ptr != 0; tmp_logfile_ptr += wcslen(tmp_logfile_ptr) + 1) + { + len += wcslen(tmp_logfile_ptr) + 1; + } + logfiles = new WCHAR[len + 1]; + logfile_ptr = logfiles; + for (tmp_logfile_ptr = tmp_logfiles; *tmp_logfile_ptr != 0; tmp_logfile_ptr += wcslen(tmp_logfile_ptr) + 1) + { + // check file modification date + HANDLE handle; + FILETIME modified_time; + //int64_t tmp_time; + __int64 tmp_time; + bool include_file; + include_file = false; + handle = INVALID_HANDLE_VALUE; + if (context->job_since == 0) + include_file = true; + if (!include_file) + { + handle = CreateFileW(tmp_logfile_ptr, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); + if (handle == INVALID_HANDLE_VALUE) + { + //_JobMessage(M_WARNING, "Could not open '%S' to check last modified date (0x%08x), including anyway\n", tmp_logfile_ptr, GetLastError()); + include_file = true; + } + } + if (!include_file) + { + if (GetFileTime(handle, NULL, NULL, &modified_time) == 0) + { + //_JobMessage(M_WARNING, "Could not check last modified date for '%S' (0x%08x), including anyway\n", tmp_logfile_ptr, GetLastError()); + include_file = true; + } + } + if (!include_file) + { + tmp_time = (((int64_t)modified_time.dwHighDateTime) << 32) | modified_time.dwLowDateTime; + tmp_time -= 116444736000000000LL; + tmp_time /= 10000000; + if (tmp_time > context->job_since) + { + include_file = true; + } + } + if (include_file) + { + memcpy(logfile_ptr, tmp_logfile_ptr, (wcslen(tmp_logfile_ptr) + 1) * 2); + logfile_ptr += wcslen(logfile_ptr) + 1; + //_DebugMessage(100, "Including file %S\n", logfile_ptr); + } +#if 0 +/* this is handled via checkFile now */ + else + { + if (context->accurate) { + tmp = new char[strlen(full_path) + wcslen(tmp_logfile_ptr) + 1]; + strcpy(tmp, full_path); + wcstombs(tmp + strlen(full_path), tmp_logfile_ptr, wcslen(tmp_logfile_ptr) + 1); + bfuncs->setBaculaValue(context->bpContext, bVarFileSeen, (void *)tmp); + delete tmp; + } + } +#endif + + if (handle != INVALID_HANDLE_VALUE) + CloseHandle(handle); + + } + *logfile_ptr = 0; + logfile_ptr = logfiles; + state = 3; + break; + case 3: + tmp = new char[wcslen(logfile_ptr) + 1]; + wcstombs(tmp, logfile_ptr, wcslen(logfile_ptr) + 1); + file_node = new file_node_t(tmp, this); + delete tmp; + file_node->hccx = hccx; + file_node->filename = logfile_ptr; + context->current_node = file_node; + return bRC_OK; + case 4: + time_t now = time(NULL); + sp->fname = full_path; + sp->link = full_path; + _DebugMessage(100, "fname = %s\n", sp->fname); + sp->statp.st_mode = 0700 | S_IFDIR; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = 0; + //sp->statp.st_blocks = 0; + sp->type = FT_DIREND; + return bRC_OK; + } + } +} + +bRC +storage_group_node_t::endBackupFile(exchange_fd_context_t *context) +{ + HRESULT result; + bRC retval = bRC_Error; + + _DebugMessage(100, "endBackupNode_STORAGE_GROUP state = %d\n", state); + + switch(state) + { + case 0: + // should never happen + break; + case 1: + // free node->storage_group_node + if (context->path_bits[level + 1] == NULL) + { + current_dbi++; + if (current_dbi == ibi->cDatabase) + state = 2; + } + else + state = 2; + retval = bRC_More; + break; + case 2: + // should never happen + break; + case 3: + delete file_node; + logfile_ptr += wcslen(logfile_ptr) + 1; + if (*logfile_ptr == 0) + state = 4; + retval = bRC_More; + break; + case 4: + if (context->truncate_logs) + { + _DebugMessage(100, "Calling HrESEBackupTruncateLogs\n"); + result = HrESEBackupTruncateLogs(hccx); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupTruncateLogs failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + } + else + { + _JobMessage(M_INFO, "Truncated database logs for Storage Group %s\n", name); + } + } + else + { + _JobMessage(M_INFO, "Did NOT truncate database logs for Storage Group %s\n", name); + } + _DebugMessage(100, "Calling HrESEBackupInstanceEnd\n"); + result = HrESEBackupInstanceEnd(hccx, ESE_BACKUP_INSTANCE_END_SUCCESS); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESEBackupInstanceEnd failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + return bRC_Error; + } + retval = bRC_OK; + context->current_node = parent; + break; + } + return retval; +} + +bRC +storage_group_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + HRESULT result; + int len; + + _DebugMessage(100, "createFile_STORAGE_GROUP state = %d\n", state); + + if (strcmp(context->path_bits[level], name) != 0) + { + _DebugMessage(100, "Different storage group - switching back to parent\n", state); + saved_log_path = new WCHAR[wcslen(restore_environment->m_wszRestoreLogPath) + 1]; + wcscpy(saved_log_path, restore_environment->m_wszRestoreLogPath); + _DebugMessage(100, "Calling HrESERestoreSaveEnvironment\n"); + result = HrESERestoreSaveEnvironment(hccx); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreSaveEnvironment failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + rp->create_status = CF_CREATED; + return bRC_OK; + } + _DebugMessage(100, "Calling HrESERestoreClose\n"); + result = HrESERestoreClose(hccx, RESTORE_CLOSE_NORMAL); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreClose failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + rp->create_status = CF_CREATED; + return bRC_OK; + } + context->current_node = parent; + return bRC_OK; + } + if (saved_log_path != NULL) + { + _DebugMessage(100, "Calling HrESERestoreReopen\n"); + result = HrESERestoreReopen(context->computer_name, service_name, saved_log_path, &hccx); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreReopen failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + saved_log_path = NULL; + rp->create_status = CF_CREATED; + return bRC_OK; + } + _DebugMessage(100, "Calling HrESERestoreGetEnvironment\n"); + result = HrESERestoreGetEnvironment(hccx, &restore_environment); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreGetEnvironment failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + saved_log_path = NULL; + rp->create_status = CF_CREATED; + return bRC_OK; + } + saved_log_path = NULL; + } + + for (;;) + { + switch (state) + { + case 0: + if (context->path_bits[level + 2] == NULL) + { + _JobMessage(M_ERROR, "Unexpected log file '%s%s' - expecting database\n", full_path, context->path_bits[level + 1]); + state = 999; + break; + } + service_name = new WCHAR[strlen(parent->name) + 1]; + storage_group_name = new WCHAR[strlen(name) + 1]; + mbstowcs(service_name, parent->name, strlen(parent->name) + 1); + mbstowcs(storage_group_name, name, strlen(name) + 1); + _DebugMessage(100, "Calling HrESERestoreOpen\n"); + result = HrESERestoreOpen(context->computer_name, service_name, storage_group_name, NULL, &hccx); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreOpen failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + _DebugMessage(100, "Calling HrESERestoreGetEnvironment\n"); + result = HrESERestoreGetEnvironment(hccx, &restore_environment); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreGetEnvironment failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + state = 1; + break; + case 1: + if (context->path_bits[level + 2] == NULL) + { + state = 2; + break; + } + store_node = new store_node_t(bstrdup(context->path_bits[level + 1]), this); + store_node->hccx = hccx; + context->current_node = store_node; + return bRC_OK; + case 2: + if (context->path_bits[level + 2] != NULL) + { + _JobMessage(M_ERROR, "Unexpected file '%s'\n", full_path); + state = 999; + break; + } + if (context->path_bits[level + 1] == NULL) + { + state = 3; + break; + } + state = 2; + file_node = new file_node_t(bstrdup(context->path_bits[level + 1]), this); + file_node->hccx = hccx; + int i; + for (i = strlen(file_node->name) - 1; i >= 0; i--) + { + if (file_node->name[i] == '\\') + { + i++; + break; + } + } + len = wcslen(restore_environment->m_wszRestoreLogPath) + strlen(file_node->name + i) + 1 + 1; + file_node->filename = new WCHAR[len]; + wcscpy(file_node->filename, restore_environment->m_wszRestoreLogPath); + wcscat(file_node->filename, L"\\"); + mbstowcs(&file_node->filename[wcslen(file_node->filename)], file_node->name + i, strlen(file_node->name + i) + 1); + context->current_node = file_node; + return bRC_OK; + case 3: + if (rp->type != FT_DIREND) + { + _JobMessage(M_ERROR, "Unexpected file '%s'\n", full_path); + state = 999; + break; + } + // must be the storage group node + _DebugMessage(100, "Calling HrESERestoreSaveEnvironment\n"); + result = HrESERestoreSaveEnvironment(hccx); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreSaveEnvironment failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + + _DebugMessage(100, "Calling HrESERestoreComplete\n"); + result = HrESERestoreComplete(hccx, restore_environment->m_wszRestoreLogPath, + restore_environment->m_wszRestoreLogPath, storage_group_name, ESE_RESTORE_COMPLETE_ATTACH_DBS); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreComplete failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + _DebugMessage(100, "Calling HrESERestoreClose\n"); + result = HrESERestoreClose(hccx, RESTORE_CLOSE_NORMAL); + state = 999; + break; + } + else + { + _JobMessage(M_INFO, "Storage Group '%s' restored successfully\n", name); + } + + _DebugMessage(100, "Calling HrESERestoreClose\n"); + result = HrESERestoreClose(hccx, RESTORE_CLOSE_NORMAL); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreClose failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + + rp->create_status = CF_CREATED; + return bRC_OK; + case 999: + rp->create_status = CF_CREATED; + return bRC_OK; + } + } +} + +bRC +storage_group_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endRestoreFile_STORAGE_GROUP state = %d\n", state); + switch (state) + { + case 0: + return bRC_Error; + case 1: + return bRC_OK; + case 2: + return bRC_OK; + case 3: + context->current_node = parent; + return bRC_OK; + case 999: + context->current_node = parent; + return bRC_OK; + } + + return bRC_Error; +} diff --git a/src/win32/filed/plugins/store_node.c b/src/win32/filed/plugins/store_node.c new file mode 100644 index 00000000..5d543522 --- /dev/null +++ b/src/win32/filed/plugins/store_node.c @@ -0,0 +1,240 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Written by James Harper, October 2008 + */ + +#include "exchange-fd.h" + +store_node_t::store_node_t(char *name, node_t *parent_node) : node_t(name, NODE_TYPE_STORE, parent_node) +{ + dbi = NULL; + hccx = NULL; + dbi_node = NULL; + file_node = NULL; +} + +store_node_t::~store_node_t() +{ + if (dbi_node != NULL) + delete dbi_node; + + if (file_node != NULL) + delete file_node; +} + +bRC +store_node_t::startBackupFile(exchange_fd_context_t *context, struct save_pkt *sp) +{ + char *tmp; + + _DebugMessage(100, "startBackupNode_STORE state = %d\n", state); + + switch(state) + { + case 0: + stream_ptr = dbi->wszDatabaseStreams; + state = 1; + // fall through + case 1: + dbi_node = new dbi_node_t("DatabaseBackupInfo", this); + dbi_node->dbi = dbi; + context->current_node = dbi_node; + break; + case 2: + tmp = new char[wcslen(stream_ptr) + 1]; + wcstombs(tmp, stream_ptr, wcslen(stream_ptr) + 1); + file_node = new file_node_t(tmp, this); + file_node->hccx = hccx; + file_node->filename = stream_ptr; + context->current_node = file_node; + break; + case 3: + if (context->job_level == 'F') + { + time_t now = time(NULL); + sp->fname = full_path; + sp->link = full_path; + sp->statp.st_mode = 0700 | S_IFDIR; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = 0; + sp->type = FT_DIREND; + } + else + { + bfuncs->setBaculaValue(context->bpContext, bVarFileSeen, (void *)full_path); + return bRC_Seen; + } + break; + } + + return bRC_OK; +} + +bRC +store_node_t::endBackupFile(exchange_fd_context_t *context) +{ + _DebugMessage(100, "endBackupNode_STORE state = %d\n", state); + bRC retval = bRC_OK; + + switch(state) + { + case 0: + // should never happen + break; + case 1: + state = 2; + retval = bRC_More; + break; + case 2: + delete file_node; + stream_ptr += wcslen(stream_ptr) + 1; + if (*stream_ptr == 0) + state = 3; + retval = bRC_More; + break; + case 3: + //delete dbi_node; + context->current_node = parent; + break; + } + return retval; +} + +bRC +store_node_t::createFile(exchange_fd_context_t *context, struct restore_pkt *rp) +{ + _DebugMessage(100, "createFile_STORE state = %d\n", state); + + if (strcmp(context->path_bits[level - 1], parent->name) != 0) + { + _DebugMessage(100, "Different storage group - switching back to parent\n", state); + context->current_node = parent; + return bRC_OK; + } + for (;;) + { + switch (state) + { + case 0: + if (strcmp("DatabaseBackupInfo", context->path_bits[level + 1]) != 0) + { + _JobMessage(M_FATAL, "DatabaseBackupInfo file must exist and must be first in directory\n"); + state = 999; + break; + } + dbi_node = new dbi_node_t(bstrdup(context->path_bits[level + 1]), this); + context->current_node = dbi_node; + return bRC_OK; + case 1: + if (strcmp(context->path_bits[level - 1], parent->name) != 0) + { + _JobMessage(M_ERROR, "Unexpected Storage Group Change\n"); + state = 999; + break; + } + + if (*stream_ptr != 0) + { + // verify that stream_ptr == context->path_bits[level + 1]; + _DebugMessage(150, "stream_ptr = %S\n", stream_ptr); + _DebugMessage(150, "out_stream_ptr = %S\n", out_stream_ptr); + file_node = new file_node_t(bstrdup(context->path_bits[level + 1]), this); + file_node->hccx = hccx; + file_node->filename = out_stream_ptr; + context->current_node = file_node; + return bRC_OK; + } + else + { + _JobMessage(M_ERROR, "Extra file found '%s'\n", full_path); + state = 999; + break; + } + case 2: + if (rp->type != FT_DIREND) + { + _JobMessage(M_ERROR, "Unexpected file '%s'\n", full_path); + state = 999; + break; + } + rp->create_status = CF_CREATED; + return bRC_OK; + case 999: + if (strcmp(context->path_bits[level], name) != 0) + { + _DebugMessage(100, "End of Store when in error state - switching back to parent\n", state); + context->current_node = parent; + return bRC_OK; + } + rp->create_status = CF_CREATED; + return bRC_OK; + } + } +} + +bRC +store_node_t::endRestoreFile(exchange_fd_context_t *context) +{ + HRESULT result; + + _DebugMessage(100, "endRestoreFile_STORE state = %d\n", state); + for (;;) + { + switch (state) + { + case 0: + state = 1; + _DebugMessage(100, "Calling HrESERestoreAddDatabase\n"); + result = HrESERestoreAddDatabase(hccx, dbi_node->restore_display_name, dbi_node->restore_guid, dbi_node->restore_input_streams, &dbi_node->restore_output_streams); + if (result != 0) + { + _JobMessage(M_FATAL, "HrESERestoreAddDatabase failed with error 0x%08x - %s\n", result, ESEErrorMessage(result)); + state = 999; + break; + } + stream_ptr = dbi_node->restore_input_streams; + out_stream_ptr = dbi_node->restore_output_streams; + return bRC_OK; + case 1: + if (*stream_ptr != 0) + { + delete file_node; + file_node = NULL; + stream_ptr += wcslen(stream_ptr) + 1; + out_stream_ptr += wcslen(out_stream_ptr) + 1; + if (*stream_ptr == 0) + state = 2; + return bRC_OK; + } + else + { + state = 999; + break; + } + case 2: + context->current_node = parent; + return bRC_OK; + case 999: + return bRC_OK; + } + } +} diff --git a/src/win32/filed/service.cpp b/src/win32/filed/service.cpp new file mode 100644 index 00000000..f7e3a158 --- /dev/null +++ b/src/win32/filed/service.cpp @@ -0,0 +1,25 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#include "who.h" +#include "../libwin32/service.cpp" diff --git a/src/win32/filed/trayMonitor.cpp b/src/win32/filed/trayMonitor.cpp new file mode 100644 index 00000000..8e134989 --- /dev/null +++ b/src/win32/filed/trayMonitor.cpp @@ -0,0 +1,25 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#include "who.h" +#include "../libwin32/trayMonitor.cpp" diff --git a/src/win32/filed/vss.cpp b/src/win32/filed/vss.cpp new file mode 100644 index 00000000..1cfb4096 --- /dev/null +++ b/src/win32/filed/vss.cpp @@ -0,0 +1,589 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +// vss.cpp -- Interface to Volume Shadow Copies (VSS) +// +// Copyright transferred from MATRIX-Computer GmbH to +// Kern Sibbald by express permission. +// +// Author : Thorsten Engel +// Created On : Fri May 06 21:44:00 2005 + + +#ifdef WIN32_VSS +#include "bacula.h" +#include "compat.h" +#include "ms_atl.h" +#include +#undef setlocale +#include +using namespace std; + +#include "vss.h" + +#define dbglvl_snap DT_VOLUME|50 + +wstring GetUniqueVolumeNameForPath(wstring path, wstring &rootPath); + +static int volume_search(void *i1, void *i2) +{ + wstring *volname = (wstring *) i1; + MTabEntry *vol = (MTabEntry *) i2; + + return volname->compare(vol->volumeName); +} + +static int volume_cmp(void *e1, void *e2) +{ + MTabEntry *v1 = (MTabEntry *) e1; + MTabEntry *v2 = (MTabEntry *) e2; + return wcscmp(v1->volumeName, v2->volumeName); +} + +UINT MTabEntry::getDriveType() +{ + WCHAR *root = first(); + + // Make sure to discard CD-ROM and network drives + if (!root) { + return 0; + } + + driveType = GetDriveTypeW(root); + return driveType; +} + +/* Return true if the current volume can be snapshoted (ie not CDROM or fat32) */ +bool MTabEntry::isSuitableForSnapshot() +{ + DWORD componentlength, fsflags; + WCHAR fstype[50]; + WCHAR *root = first(); + UINT oldmode; + BOOL result; + + // Make sure to discard CD-ROM and network drives + if (!root) { + Dmsg1(dbglvl_snap, "No mount point for %ls\n", volumeName); + goto bail_out; + } + + if (getDriveType() != DRIVE_FIXED) { + Dmsg2(dbglvl_snap, "Invalid disk type %d for %ls\n", driveType, root); + goto bail_out; + } + + /* From fstype.c, except that we have WCHAR instead of char */ + /* We don't want any popups if there isn't any media in the drive */ + oldmode = SetErrorMode(SEM_FAILCRITICALERRORS); + result = GetVolumeInformationW(root, NULL, 0, NULL, + &componentlength, &fsflags, fstype, ARRAYSIZE(fstype)); + SetErrorMode(oldmode); + + if (result) { + /* Windows returns NTFS, FAT, etc. Make it lowercase to be consistent with other OSes */ + Dmsg1(dbglvl_snap, "fstype=%ls\n", fstype); + if (!_wcsicmp(fstype, L"ntfs")) { + can_Snapshot = true; + } + if (!_wcsicmp(fstype, L"refs")) { + can_Snapshot = true; + } + } +bail_out: + Dmsg2(dbglvl_snap, "%ls is %s suitable for VSS snapshot\n", root, can_Snapshot?"":"not"); + return can_Snapshot; +} + +/* Find a volume for a specific path */ +MTabEntry *MTab::search(char *p) +{ + wstring volume; + wstring path; + wstring rootPath; + + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + UTF8_2_wchar(&pwszBuf, p); + path.assign((wchar_t *)pwszBuf); + volume = GetUniqueVolumeNameForPath(path, rootPath); + + MTabEntry *elt = (MTabEntry *)entries->search(&volume, volume_search); + free_pool_memory(pwszBuf); + + if (!elt) { + Dmsg1(000, "Unable to find %ls in volume list\n", volume.c_str()); + } + + return elt; +} + +bool MTab::addInSnapshotSet(char *p) +{ + MTabEntry *elt = search(p); + if (elt) { + if (!elt->in_SnapshotSet && elt->isSuitableForSnapshot()) { + nb_in_SnapshotSet++; + elt->setInSnapshotSet(); + } + } + return nb_in_SnapshotSet == entries->size(); +} + +/* Initialize the "entries" list will all existing volumes */ +bool MTab::get() +{ + DWORD count = 0; + WCHAR DeviceName[MAX_PATH] = L""; + HANDLE FindHandle = INVALID_HANDLE_VALUE; + size_t Index = 0; + bool Success = FALSE; + WCHAR VolumeName[MAX_PATH] = L""; + + Dmsg0(dbglvl_snap, "Filling MTAB\n"); + + + // Enumerate all volumes in the system. + FindHandle = FindFirstVolumeW(VolumeName, ARRAYSIZE(VolumeName)); + + if (FindHandle == INVALID_HANDLE_VALUE) { + lasterror = GetLastError(); + return false; + } + + for (;;) { + // Skip the \\?\ prefix and remove the trailing backslash. + Index = wcslen(VolumeName) - 1; + + if (VolumeName[0] != L'\\' || + VolumeName[1] != L'\\' || + VolumeName[2] != L'?' || + VolumeName[3] != L'\\' || + VolumeName[Index] != L'\\') + { + lasterror = ERROR_BAD_PATHNAME; + lasterror_str = "FindFirstVolumeW/FindNextVolumeW returned a bad path"; + Dmsg1(000, "FindFirstVolumeW/FindNextVolumeW returned a bad path %ls\n", VolumeName); + break; + } + + // + // QueryDosDeviceW does not allow a trailing backslash, + // so temporarily remove it. + VolumeName[Index] = L'\0'; + + count = QueryDosDeviceW(&VolumeName[4], DeviceName, + ARRAYSIZE(DeviceName)); + + VolumeName[Index] = L'\\'; + + if (count == 0) { + lasterror = GetLastError(); + Dmsg1(000, "QueryDosDeviceW failed with error code %d\n", lasterror); + break; + } + + MTabEntry *entry = New(MTabEntry(DeviceName, VolumeName)); + entries->insert(entry, volume_cmp); + + // + // Move on to the next volume. + Success = FindNextVolumeW(FindHandle, VolumeName, ARRAYSIZE(VolumeName)); + + if (!Success) { + lasterror = GetLastError(); + if (lasterror != ERROR_NO_MORE_FILES) { + Dmsg1(000, "FindNextVolumeW failed with error code %d\n", lasterror); + break; + } + + // Finished iterating + // through all the volumes. + lasterror = ERROR_SUCCESS; + break; + } + } + + FindVolumeClose(FindHandle); + FindHandle = INVALID_HANDLE_VALUE; + + return true; +} + +BOOL VSSPathConverter(); +BOOL VSSPathConvert(const char *szFilePath, char *szShadowPath, int nBuflen); +BOOL VSSPathConvertW(const wchar_t *szFilePath, wchar_t *szShadowPath, int nBuflen); + +// {b5946137-7b9f-4925-af80-51abd60b20d5} + +static const GUID VSS_SWPRV_ProviderID = + { 0xb5946137, 0x7b9f, 0x4925, { 0xaf, 0x80, 0x51, 0xab, 0xd6, 0x0b, 0x20, 0xd5 } }; + +static pthread_once_t key_vss_once = PTHREAD_ONCE_INIT; +static pthread_key_t vssclient_key; + +static void create_vss_key() +{ + int status = pthread_key_create(&vssclient_key, NULL); + if (status != 0) { + berrno be; + Pmsg1(000, _("pthread key create failed: ERR=%s\n"), + be.bstrerror(status)); + ASSERT2(0, "pthread_key_create failed"); + } + SetVSSPathConvert(VSSPathConverter, VSSPathConvert, VSSPathConvertW); +} + +/* TODO: Use the JCR variable to get the VSSClient pointer + * the JCR FileDaemon part is not known in the VSS library + */ +static void store_vssclient_in_tsd(VSSClient *cl) +{ + int status = pthread_once(&key_vss_once, create_vss_key); + if (status != 0) { + berrno be; + Pmsg1(000, _("pthread key create failed: ERR=%s\n"), + be.bstrerror(status)); + ASSERT2(0, "pthread_once failed"); + } + + status = pthread_setspecific(vssclient_key, (void *)cl); + if (status != 0) { + berrno be; + Jmsg1(NULL, M_ABORT, 0, _("pthread_setspecific failed: ERR=%s\n"), + be.bstrerror(status)); + } +} + +static VSSClient *get_vssclient_from_tsd() +{ + return (VSSClient *)pthread_getspecific(vssclient_key); +} + +void +VSSCleanup(VSSClient *pVSSClient) +{ + store_vssclient_in_tsd(NULL); + if (pVSSClient) { + delete (pVSSClient); + } +} + +/* + * May be called multiple times + */ +VSSClient *VSSInit() +{ + VSSClient *pVSSClient = NULL; + /* decide which vss class to initialize */ + if (g_MajorVersion == 5) { + switch (g_MinorVersion) { + case 1: + pVSSClient = new VSSClientXP(); + break; + case 2: + pVSSClient = new VSSClient2003(); + break; + } + /* Vista or Longhorn or later */ + } else if (g_MajorVersion >= 6) { + pVSSClient = new VSSClientVista(); + } + store_vssclient_in_tsd(pVSSClient); + return pVSSClient; +} + +BOOL VSSPathConverter() +{ + if (get_vssclient_from_tsd() == NULL) { + return false; + } + return true; +} + +BOOL +VSSPathConvert(const char *szFilePath, char *szShadowPath, int nBuflen) +{ + VSSClient *pVSSClient = get_vssclient_from_tsd(); + if (pVSSClient) { + return pVSSClient->GetShadowPath(szFilePath, szShadowPath, nBuflen); + } else { + return false; + } +} + +BOOL +VSSPathConvertW(const wchar_t *szFilePath, wchar_t *szShadowPath, int nBuflen) +{ + VSSClient *pVSSClient = get_vssclient_from_tsd(); + if (pVSSClient) { + return pVSSClient->GetShadowPathW(szFilePath, szShadowPath, nBuflen); + } else { + return false; + } +} + +// Constructor +VSSClient::VSSClient() +{ + memset(this, 0, sizeof(VSSClient)); + m_pAlistWriterState = New(alist(10, not_owned_by_alist)); + m_pAlistWriterInfoText = New(alist(10, owned_by_alist)); + m_uidCurrentSnapshotSet = GUID_NULL; +} + +// Destructor +VSSClient::~VSSClient() +{ + // Release the IVssBackupComponents interface + // WARNING: this must be done BEFORE calling CoUninitialize() + if (m_pVssObject) { +// m_pVssObject->Release(); + m_pVssObject = NULL; + } + + DestroyWriterInfo(); + delete m_pAlistWriterState; + delete m_pAlistWriterInfoText; + + // Call CoUninitialize if the CoInitialize was performed successfully + if (m_bCoInitializeCalled) { + CoUninitialize(); + } + + delete m_VolumeList; +} + +bool VSSClient::InitializeForBackup(JCR *jcr) +{ + //return Initialize (VSS_CTX_BACKUP); + m_jcr = jcr; + return Initialize(0); +} + + +bool VSSClient::InitializeForRestore(JCR *jcr) +{ + m_metadata = NULL; + m_jcr = jcr; + return Initialize(0, true/*=>Restore*/); +} + +// Append a backslash to the current string +wstring AppendBackslash(wstring str) +{ + if (str.length() == 0) { + return wstring(L"\\"); + } + if (str[str.length() - 1] == L'\\') { + return str; + } + return str.append(L"\\"); +} + +// Get the unique volume name for the given path +wstring GetUniqueVolumeNameForPath(wstring path, wstring &rootPath) +{ + if (path.length() <= 0) { + //Dmsg0(50, "Failed path.len <= 0\n"); + return L""; + } + + // Add the backslash termination, if needed + path = AppendBackslash(path); + //Dmsg1(50, "Path=%ls\n", path.c_str()); + + // Get the root path of the volume + wchar_t volumeRootPath[MAX_PATH]; + wchar_t volumeName[MAX_PATH]; + wchar_t volumeUniqueName[MAX_PATH]; + + volumeRootPath[0] = 0; + volumeName[0] = 0; + volumeUniqueName[0] = 0; + + if (!p_GetVolumePathNameW || !p_GetVolumePathNameW((LPCWSTR)path.c_str(), volumeRootPath, MAX_PATH)) { + Dmsg1(50, "Failed GetVolumePathNameW path=%ls\n", path.c_str()); + return L""; + } + rootPath.assign(volumeRootPath); + Dmsg1(dbglvl_snap, "VolumeRootPath=%ls\n", volumeRootPath); + + // Get the volume name alias (might be different from the unique volume name in rare cases) + if (!p_GetVolumeNameForVolumeMountPointW || !p_GetVolumeNameForVolumeMountPointW(volumeRootPath, volumeName, MAX_PATH)) { + Dmsg1(50, "Failed GetVolumeNameForVolumeMountPointW path=%ls\n", volumeRootPath); + return L""; + } + Dmsg1(dbglvl_snap, "VolumeName=%ls\n", volumeName); + + // Get the unique volume name + if (!p_GetVolumeNameForVolumeMountPointW(volumeName, volumeUniqueName, MAX_PATH)) { + Dmsg1(50, "Failed GetVolumeNameForVolumeMountPointW path=%ls\n", volumeName); + return L""; + } + Dmsg1(dbglvl_snap, "VolumeUniqueName=%ls\n", volumeUniqueName); + return volumeUniqueName; +} + +bool VSSClient::GetShadowPath(const char *szFilePath, char *szShadowPath, int nBuflen) +{ + Dmsg1(dbglvl_snap, "GetShadowPath(%s)\n", szFilePath); + + if (m_bDuringRestore) { + return false; + } + + if (!m_bBackupIsInitialized) { + Jmsg0(m_jcr, M_FATAL, 0, "Backup is not Initialized\n"); + return false; + } + + wstring path, rootPath, volume; + POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); + + UTF8_2_wchar(&pwszBuf, szFilePath); + path.assign((wchar_t *)pwszBuf); + + /* TODO: Have some cache here? */ + volume = GetUniqueVolumeNameForPath(path, rootPath); + + MTabEntry *vol = (MTabEntry *)m_VolumeList->entries->search(&volume,volume_search); + free_pool_memory(pwszBuf); + + if (vol && vol->shadowCopyName) { + if (WideCharToMultiByte(CP_UTF8,0,vol->shadowCopyName,-1,szShadowPath,nBuflen-1,NULL,NULL)) { + nBuflen -= (int)strlen(szShadowPath); + + bstrncat(szShadowPath, "\\", nBuflen); + nBuflen -= 1; + //Dmsg4(200,"szFilePath=%s rootPath=%ls len(rootPath)=%d nBuflen=%d\n", + // szFilePath, rootPath.c_str(), rootPath.length(), nBuflen); + + /* here we skip C:, we skip volume root */ + /* TODO: I'm not 100% sure that rootPath.lenght() WCHAR means X CHAR + * The main goal here is to convert + * c:/tmp/mounted/test -> \\?\Device\HardDiskSnapshot10\test + * + * So, we skip c:/tmp/mounted/ from the base file. + */ + if (strlen(szFilePath) > rootPath.length()) { + bstrncat(szShadowPath, szFilePath+rootPath.length(), nBuflen); + } + Dmsg2(dbglvl_snap, "GetShadowPath(%s) -> %s\n", szFilePath, szShadowPath); + return true; + } + } + + bstrncpy(szShadowPath, szFilePath, nBuflen); + Dmsg2(dbglvl_snap, "GetShadowPath(%s) -> %s\n", szFilePath, szShadowPath); + errno = EINVAL; + return false; +} + +/* + * c:/tmp -> \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy15\tmp + */ +bool VSSClient::GetShadowPathW(const wchar_t *szFilePath, wchar_t *szShadowPath, int nBuflen) +{ + Dmsg1(dbglvl_snap, "GetShadowPathW(%ls)\n", szFilePath); + + if (m_bDuringRestore) { + return false; + } + + if (!m_bBackupIsInitialized) { + Jmsg0(m_jcr, M_FATAL, 0, "Backup is not Initialized\n"); + return false; + } + wstring path, rootPath, volume; + path.assign((wchar_t *)szFilePath); + /* TODO: Have some cache here? */ + volume = GetUniqueVolumeNameForPath(path, rootPath); + MTabEntry *vol = (MTabEntry *)m_VolumeList->entries->search(&volume,volume_search); + + if (vol && vol->shadowCopyName) { + Dmsg5(dbglvl_snap, "szFilePath=%ls rootPath=%ls len(rootPath)=%d nBuflen=%d shadowCopyName=%ls\n", + szFilePath, rootPath.c_str(), rootPath.length(), nBuflen, vol->shadowCopyName); + + wcsncpy(szShadowPath, vol->shadowCopyName, nBuflen); + nBuflen -= (int)wcslen(vol->shadowCopyName); + + wcsncat(szShadowPath, L"\\", nBuflen); + nBuflen -= 1; + + //Dmsg4(200, "szFilePath=%ls rootPath=%ls len(rootPath)=%d nBuflen=%d\n", + // szFilePath, rootPath.c_str(), rootPath.length(), nBuflen); + + if (wcslen(szFilePath) > rootPath.length()) { + /* here we skip C:, we skip volume root */ + wcsncat(szShadowPath, szFilePath+rootPath.length(), nBuflen); + } + Dmsg2(dbglvl_snap, "GetShadowPathW(%ls) -> %ls\n", szFilePath, szShadowPath); + return true; + } + + wcsncpy(szShadowPath, szFilePath, nBuflen); + Dmsg2(dbglvl_snap, "GetShadowPathW(%ls) -> %ls\n", szFilePath, szShadowPath); + errno = EINVAL; + return false; +} + +const size_t VSSClient::GetWriterCount() +{ + return m_pAlistWriterInfoText->size(); +} + +const char* VSSClient::GetWriterInfo(int nIndex) +{ + return (char*)m_pAlistWriterInfoText->get(nIndex); +} + + +const int VSSClient::GetWriterState(int nIndex) +{ + void *item = m_pAlistWriterState->get(nIndex); + +/* Eliminate compiler warnings */ +#ifdef HAVE_VSS64 + return (int64_t)(char *)item; +#else + return (int)(char *)item; +#endif +} + +void VSSClient::AppendWriterInfo(int nState, const char* pszInfo) +{ + m_pAlistWriterInfoText->push(bstrdup(pszInfo)); + m_pAlistWriterState->push((void*)(intptr_t)nState); +} + +/* + * Note, this is called at the end of every job, so release all + * the items in the alists, but do not delete the alist. + */ +void VSSClient::DestroyWriterInfo() +{ + while (!m_pAlistWriterInfoText->empty()) { + free(m_pAlistWriterInfoText->pop()); + } + + while (!m_pAlistWriterState->empty()) { + m_pAlistWriterState->pop(); + } +} + +#endif diff --git a/src/win32/filed/vss.h b/src/win32/filed/vss.h new file mode 100644 index 00000000..e453df55 --- /dev/null +++ b/src/win32/filed/vss.h @@ -0,0 +1,344 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* -*- Mode: C -*- + * vss.h -- + */ +// +// Copyright transferred from MATRIX-Computer GmbH to +// Kern Sibbald by express permission. +/* + * + * Author : Thorsten Engel + * Created On : Fri May 06 21:44:00 2006 + */ + +#ifndef __VSS_H_ +#define __VSS_H_ + +#ifndef b_errno_win32 +#define b_errno_win32 (1<<29) +#endif + +#ifdef WIN32_VSS + +#define VSS_INIT_RESTORE_AFTER_INIT 1 +#define VSS_INIT_RESTORE_AFTER_GATHER 2 + +// some forward declarations +struct IVssAsync; + +#define bwcsdup(str) wcscpy((WCHAR *)bmalloc((wcslen(str)+1)*sizeof(WCHAR)),(str)) + +/* The MTabEntry class is representing a mounted volume, + * it associates a volume name with mount paths and a device name + */ +class MTabEntry: public SMARTALLOC { +public: + WCHAR *volumeName; // Name of the current volume + WCHAR *mountPaths; // List of mount paths + WCHAR *deviceName; + WCHAR *shadowCopyName; + bool in_SnapshotSet; + bool can_Snapshot; + UINT driveType; + rblink link; + + MTabEntry() { + mountPaths = NULL; + volumeName = NULL; + deviceName = NULL; + in_SnapshotSet = false; + shadowCopyName = NULL; + can_Snapshot = false; + driveType = 0; + }; + + MTabEntry(WCHAR *DeviceName, WCHAR *VolumeName) { + int last = wcslen(VolumeName); + if (VolumeName[last - 1] == L'\\') { + volumeName = bwcsdup(VolumeName); + } else { /* \\ + \0 */ + volumeName = (WCHAR *)bmalloc(last+2*sizeof(WCHAR)); + wcscpy(volumeName, VolumeName); + volumeName[last] = L'\\'; + volumeName[last+1] = L'\0'; + } + mountPaths = NULL; + in_SnapshotSet = false; + deviceName = bwcsdup(DeviceName); + shadowCopyName = NULL; + driveType = 0; + can_Snapshot = false; + get_paths(); + }; + + ~MTabEntry() { + destroy(); + }; + + void destroy() { + if (mountPaths) { + free(mountPaths); + mountPaths = NULL; + } + if (volumeName) { + free(volumeName); + volumeName = NULL; + } + if (deviceName) { + free(deviceName); + deviceName = NULL; + } + if (shadowCopyName) { + free(shadowCopyName); + shadowCopyName = NULL; + } + }; + + /* Return the drive type (cdrom, fixed, network, ...) */ + UINT getDriveType(); + + /* Return true if the current volume can be snapshoted (ie not CDROM or fat32) */ + bool isSuitableForSnapshot(); + + void setInSnapshotSet() { + Dmsg1(050, "Marking %ls for the SnapshotSet\n", mountPaths); + in_SnapshotSet = true; + } + + void debug_paths() { + WCHAR *p; + /* Display the paths in the list. */ + if (mountPaths != NULL) { + Dmsg2(DT_VOLUME|10, "Device: [%ls], Volume: [%ls]\n", deviceName, volumeName); + for ( p = mountPaths; p[0] != L'\0'; p += wcslen(p) + 1) { + Dmsg1(DT_VOLUME|10, " %ls\n", p); + } + } + }; + + /* Compute the path list assiciated with the current volume */ + bool get_paths() { + DWORD count = MAX_PATH + 1; + bool ret = false; + + for (;;) { + // Allocate a buffer to hold the paths. + mountPaths = (WCHAR*) malloc(count * sizeof(WCHAR)); + + // Obtain all of the paths + // for this volume. + ret = GetVolumePathNamesForVolumeNameW(volumeName, mountPaths, + count, &count); + if (ret) { + break; + } + + if (GetLastError() != ERROR_MORE_DATA) { + break; + } + + // Try again with the + // new suggested size. + free(mountPaths); + mountPaths = NULL; + } + debug_paths(); + return ret; + }; + + /* Return the first mount point */ + WCHAR *first() { + return mountPaths; + }; + + /* Return the next mount point */ + WCHAR *next(WCHAR *prev) { + if (prev == NULL || prev[0] == L'\0') { + return NULL; + } + + prev += wcslen(prev) + 1; + + return (prev[0] == L'\0') ? NULL : prev; + }; +}; + +/* Class to handle all volumes of the system, it contains + * a list of all current volumes (MTabEntry) + */ +class MTab: public SMARTALLOC { +public: + DWORD lasterror; + const char *lasterror_str; + rblist *entries; /* MTabEntry */ + int nb_in_SnapshotSet; + + MTab() { + MTabEntry *elt = NULL; + lasterror = ERROR_SUCCESS; + lasterror_str = ""; + nb_in_SnapshotSet = 0; + entries = New(rblist(elt, &elt->link)); + }; + + ~MTab() { + if (entries) { + MTabEntry *elt; + foreach_rblist(elt, entries) { + elt->destroy(); + } + delete entries; + } + }; + /* Get a Volume by name */ + MTabEntry *search(char *file); + + /* Try to add a volume to the current snapshotset */ + bool addInSnapshotSet(char *file); + + /* Fill the "entries" list will all detected volumes of the system*/ + bool get(); +}; + +class VSSClient +{ +public: + VSSClient(); + virtual ~VSSClient(); + + // Backup Process + bool InitializeForBackup(JCR *jcr); + bool InitializeForRestore(JCR *jcr); + virtual bool CreateSnapshots(alist *mount_points) = 0; + virtual bool CloseBackup() = 0; + virtual bool CloseRestore() = 0; + virtual WCHAR *GetMetadata() = 0; + virtual const char* GetDriverName() = 0; + bool GetShadowPath (const char* szFilePath, char* szShadowPath, int nBuflen); + bool GetShadowPathW (const wchar_t* szFilePath, wchar_t* szShadowPath, int nBuflen); /* nBuflen in characters */ + + const size_t GetWriterCount(); + const char* GetWriterInfo(int nIndex); + const int GetWriterState(int nIndex); + void DestroyWriterInfo(); + void AppendWriterInfo(int nState, const char* pszInfo); + const bool IsInitialized() { return m_bBackupIsInitialized; }; + IUnknown *GetVssObject() { return m_pVssObject; }; + +private: + virtual bool Initialize(DWORD dwContext, bool bDuringRestore = FALSE) = 0; + virtual bool WaitAndCheckForAsyncOperation(IVssAsync* pAsync) = 0; + virtual void QuerySnapshotSet(GUID snapshotSetID) = 0; + +protected: + JCR *m_jcr; + + DWORD m_dwContext; + + IUnknown* m_pVssObject; + GUID m_uidCurrentSnapshotSet; + + MTab *m_VolumeList; + + alist *m_pAlistWriterState; + alist *m_pAlistWriterInfoText; + + bool m_bCoInitializeCalled; + bool m_bCoInitializeSecurityCalled; + bool m_bDuringRestore; /* true if we are doing a restore */ + bool m_bBackupIsInitialized; + bool m_bWriterStatusCurrent; + + WCHAR *m_metadata; + + void CreateVSSVolumeList(); + void DeleteVSSVolumeList(); +}; + +class VSSClientXP:public VSSClient +{ +public: + VSSClientXP(); + virtual ~VSSClientXP(); + virtual bool CreateSnapshots(alist *mount_points); + virtual bool CloseBackup(); + virtual bool CloseRestore(); + virtual WCHAR *GetMetadata(); +#ifdef _WIN64 + virtual const char* GetDriverName() { return "Win64 VSS"; }; +#else + virtual const char* GetDriverName() { return "Win32 VSS"; }; +#endif +private: + virtual bool Initialize(DWORD dwContext, bool bDuringRestore); + virtual bool WaitAndCheckForAsyncOperation(IVssAsync* pAsync); + virtual void QuerySnapshotSet(GUID snapshotSetID); + bool CheckWriterStatus(); +}; + +class VSSClient2003:public VSSClient +{ +public: + VSSClient2003(); + virtual ~VSSClient2003(); + virtual bool CreateSnapshots(alist *mount_points); + virtual bool CloseBackup(); + virtual bool CloseRestore(); + virtual WCHAR *GetMetadata(); +#ifdef _WIN64 + virtual const char* GetDriverName() { return "Win64 VSS"; }; +#else + virtual const char* GetDriverName() { return "Win32 VSS"; }; +#endif +private: + virtual bool Initialize(DWORD dwContext, bool bDuringRestore); + virtual bool WaitAndCheckForAsyncOperation(IVssAsync* pAsync); + virtual void QuerySnapshotSet(GUID snapshotSetID); + bool CheckWriterStatus(); +}; + +class VSSClientVista:public VSSClient +{ +public: + VSSClientVista(); + virtual ~VSSClientVista(); + virtual bool CreateSnapshots(alist *mount_points); + virtual bool CloseBackup(); + virtual bool CloseRestore(); + virtual WCHAR *GetMetadata(); +#ifdef _WIN64 + virtual const char* GetDriverName() { return "Win64 VSS"; }; +#else + virtual const char* GetDriverName() { return "Win32 VSS"; }; +#endif +private: + virtual bool Initialize(DWORD dwContext, bool bDuringRestore); + virtual bool WaitAndCheckForAsyncOperation(IVssAsync* pAsync); + virtual void QuerySnapshotSet(GUID snapshotSetID); + bool CheckWriterStatus(); +}; + + +BOOL VSSPathConvert(const char *szFilePath, char *szShadowPath, int nBuflen); +BOOL VSSPathConvertW(const wchar_t *szFilePath, wchar_t *szShadowPath, int nBuflen); + +#endif /* WIN32_VSS */ + +#endif /* __VSS_H_ */ diff --git a/src/win32/filed/vss_Vista.cpp b/src/win32/filed/vss_Vista.cpp new file mode 100644 index 00000000..c9e81842 --- /dev/null +++ b/src/win32/filed/vss_Vista.cpp @@ -0,0 +1,4 @@ +#ifdef WIN32_VSS +#define B_VSS_VISTA +#include "vss_generic.cpp" +#endif diff --git a/src/win32/filed/vss_W2K3.cpp b/src/win32/filed/vss_W2K3.cpp new file mode 100644 index 00000000..1c397dad --- /dev/null +++ b/src/win32/filed/vss_W2K3.cpp @@ -0,0 +1,13 @@ +/* + * We need one class per OS as Microsofts API + * differs only by calling convention and some + * function we don't use. + * + * vss_generic will handle all versions and + * switch between different headers to include. +*/ + +#ifdef WIN32_VSS +#define B_VSS_W2K3 +#include "vss_generic.cpp" +#endif diff --git a/src/win32/filed/vss_XP.cpp b/src/win32/filed/vss_XP.cpp new file mode 100644 index 00000000..20871296 --- /dev/null +++ b/src/win32/filed/vss_XP.cpp @@ -0,0 +1,4 @@ +#ifdef WIN32_VSS +#define B_VSS_XP +#include "vss_generic.cpp" +#endif diff --git a/src/win32/filed/vss_generic.cpp b/src/win32/filed/vss_generic.cpp new file mode 100644 index 00000000..6e1afa59 --- /dev/null +++ b/src/win32/filed/vss_generic.cpp @@ -0,0 +1,847 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +// -*- Mode: C++ -*- +// vss.cpp -- Interface to Volume Shadow Copies (VSS) +// +// Copyright transferred from MATRIX-Computer GmbH to +// Kern Sibbald by express permission. +// +// Author : Thorsten Engel +// Created On : Fri May 06 21:44:00 2005 + + +#ifdef WIN32_VSS + +#include "bacula.h" +#include "filed/filed.h" + +#undef setlocale + +// STL includes +#include +#include +#include +#include +#include +using namespace std; + +#include "ms_atl.h" +#include + +/* + * Kludges to get Vista code to compile. + * by Kern Sibbald - June 2007 + */ +#define __in IN +#define __out OUT +#define __RPC_unique_pointer +#define __RPC_string +#ifndef __RPC__out_ecount_part +#define __RPC__out_ecount_part(x, y) +#endif +#define __RPC__deref_inout_opt +#define __RPC__out + +#if !defined(ENABLE_NLS) +#define setlocale(p, d) +#endif + +#ifdef HAVE_STRSAFE_H +// Used for safe string manipulation +#include +#endif + +#ifdef HAVE_MINGW +class IXMLDOMDocument; +#endif + +/* Reduce compiler warnings from Windows vss code */ +#undef uuid +#define uuid(x) + +#ifdef B_VSS_XP + #define VSSClientGeneric VSSClientXP + #include "inc/WinXP/vss.h" + #include "inc/WinXP/vswriter.h" + #include "inc/WinXP/vsbackup.h" + +#endif + +#ifdef B_VSS_W2K3 + #define VSSClientGeneric VSSClient2003 + #include "inc/Win2003/vss.h" + #include "inc/Win2003/vswriter.h" + #include "inc/Win2003/vsbackup.h" +#endif + +#ifdef B_VSS_VISTA + #define VSSClientGeneric VSSClientVista + #include "inc/Win2003/vss.h" + #include "inc/Win2003/vswriter.h" + #include "inc/Win2003/vsbackup.h" +#endif + +#include "vss.h" + +static void JmsgVssApiStatus(JCR *jcr, int msg_status, HRESULT hr, const char *apiName) +{ + const char *errmsg; + if (hr == S_OK || hr == VSS_S_ASYNC_FINISHED) { + return; + } + switch (hr) { + case E_INVALIDARG: + errmsg = "One of the parameter values is not valid."; + break; + case E_OUTOFMEMORY: + errmsg = "The caller is out of memory or other system resources."; + break; + case E_ACCESSDENIED: + errmsg = "The caller does not have sufficient backup privileges or is not an administrator."; + break; + case VSS_E_INVALID_XML_DOCUMENT: + errmsg = "The XML document is not valid."; + break; + case VSS_E_OBJECT_NOT_FOUND: + errmsg = "The specified file does not exist."; + break; + case VSS_E_BAD_STATE: + errmsg = "Object is not initialized; called during restore or not called in correct sequence."; + break; + case VSS_E_WRITER_INFRASTRUCTURE: + errmsg = "The writer infrastructure is not operating properly. Check that the Event Service and VSS have been started, and check for errors associated with those services in the error log."; + break; + case VSS_S_ASYNC_CANCELLED: + errmsg = "The asynchronous operation was canceled by a previous call to IVssAsync::Cancel."; + break; + case VSS_S_ASYNC_PENDING: + errmsg = "The asynchronous operation is still running."; + break; + case RPC_E_CHANGED_MODE: + errmsg = "Previous call to CoInitializeEx specified the multithread apartment (MTA). This call indicates single-threaded apartment has occurred."; + break; + case S_FALSE: + errmsg = "No writer found for the current component."; + break; + default: + errmsg = "Unexpected error. The error code is logged in the error log file."; + break; + } + Jmsg(jcr, msg_status, 0, "VSS API failure calling \"%s\". ERR=%s\n", apiName, errmsg); +} + +#ifndef VSS_WS_FAILED_AT_BACKUPSHUTDOWN +#define VSS_WS_FAILED_AT_BACKUPSHUTDOWN (VSS_WRITER_STATE)15 +#endif + + +static void JmsgVssWriterStatus(JCR *jcr, int msg_status, VSS_WRITER_STATE eWriterStatus, char *writer_name) +{ + const char *errmsg; + + /* The following are normal states */ + if (eWriterStatus == VSS_WS_STABLE || + eWriterStatus == VSS_WS_WAITING_FOR_BACKUP_COMPLETE) { + return; + } + + /* Potential errors */ + switch (eWriterStatus) { + default: + case VSS_WS_UNKNOWN: + errmsg = "The writer's state is not known. This is a writer error."; + break; + case VSS_WS_WAITING_FOR_FREEZE: + errmsg = "The writer is waiting for the freeze state."; + break; + case VSS_WS_WAITING_FOR_THAW: + errmsg = "The writer is waiting for the thaw state."; + break; + case VSS_WS_WAITING_FOR_POST_SNAPSHOT: + errmsg = "The writer is waiting for the PostSnapshot state."; + break; + case VSS_WS_WAITING_FOR_BACKUP_COMPLETE: + errmsg = "The writer is waiting for the requester to finish its backup operation."; + break; + case VSS_WS_FAILED_AT_IDENTIFY: + errmsg = "The writer vetoed the shadow copy creation process at the writer identification state."; + break; + case VSS_WS_FAILED_AT_PREPARE_BACKUP: + errmsg = "The writer vetoed the shadow copy creation process during the backup preparation state."; + break; + case VSS_WS_FAILED_AT_PREPARE_SNAPSHOT: + errmsg = "The writer vetoed the shadow copy creation process during the PrepareForSnapshot state."; + break; + case VSS_WS_FAILED_AT_FREEZE: + errmsg = "The writer vetoed the shadow copy creation process during the freeze state."; + break; + case VSS_WS_FAILED_AT_THAW: + errmsg = "The writer vetoed the shadow copy creation process during the thaw state."; + break; + case VSS_WS_FAILED_AT_POST_SNAPSHOT: + errmsg = "The writer vetoed the shadow copy creation process during the PostSnapshot state."; + break; + case VSS_WS_FAILED_AT_BACKUP_COMPLETE: + errmsg = "The shadow copy has been created and the writer failed during the BackupComplete state."; + break; + case VSS_WS_FAILED_AT_PRE_RESTORE: + errmsg = "The writer failed during the PreRestore state."; + break; + case VSS_WS_FAILED_AT_POST_RESTORE: + errmsg = "The writer failed during the PostRestore state."; + break; + case VSS_WS_FAILED_AT_BACKUPSHUTDOWN: + errmsg = "The writer failed during the shutdown of the backup application."; + + } + Jmsg(jcr, msg_status, 0, "VSS Writer \"%s\" has invalid state. ERR=%s\n", writer_name, errmsg); +} + +/* + * + * some helper functions + * + * + */ + + +// Defined in vss.cpp +// Append a backslash to the current string +wstring AppendBackslash(wstring str); +// Get the unique volume name for the given path +wstring GetUniqueVolumeNameForPath(wstring path, wstring &rootPath); + +// Helper macro for quick treatment of case statements for error codes +#define GEN_MERGE(A, B) A##B +#define GEN_MAKE_W(A) GEN_MERGE(L, A) + +#define CHECK_CASE_FOR_CONSTANT(value) \ + case value: return (GEN_MAKE_W(#value)); + + +// Convert a writer status into a string +inline const wchar_t* GetStringFromWriterStatus(VSS_WRITER_STATE eWriterStatus) +{ + switch (eWriterStatus) { + CHECK_CASE_FOR_CONSTANT(VSS_WS_STABLE); + CHECK_CASE_FOR_CONSTANT(VSS_WS_WAITING_FOR_FREEZE); + CHECK_CASE_FOR_CONSTANT(VSS_WS_WAITING_FOR_THAW); + CHECK_CASE_FOR_CONSTANT(VSS_WS_WAITING_FOR_POST_SNAPSHOT); + CHECK_CASE_FOR_CONSTANT(VSS_WS_WAITING_FOR_BACKUP_COMPLETE); + CHECK_CASE_FOR_CONSTANT(VSS_WS_FAILED_AT_IDENTIFY); + CHECK_CASE_FOR_CONSTANT(VSS_WS_FAILED_AT_PREPARE_BACKUP); + CHECK_CASE_FOR_CONSTANT(VSS_WS_FAILED_AT_PREPARE_SNAPSHOT); + CHECK_CASE_FOR_CONSTANT(VSS_WS_FAILED_AT_FREEZE); + CHECK_CASE_FOR_CONSTANT(VSS_WS_FAILED_AT_THAW); + CHECK_CASE_FOR_CONSTANT(VSS_WS_FAILED_AT_POST_SNAPSHOT); + CHECK_CASE_FOR_CONSTANT(VSS_WS_FAILED_AT_BACKUP_COMPLETE); + CHECK_CASE_FOR_CONSTANT(VSS_WS_FAILED_AT_PRE_RESTORE); + CHECK_CASE_FOR_CONSTANT(VSS_WS_FAILED_AT_POST_RESTORE); + default: + return L"Error or Undefined"; + } +} + +// Constructor + +VSSClientGeneric::VSSClientGeneric() +{ +} + +// Destructor +VSSClientGeneric::~VSSClientGeneric() +{ +} + +// Initialize the COM infrastructure and the internal pointers +bool VSSClientGeneric::Initialize(DWORD dwContext, bool bDuringRestore) +{ + CComPtr pAsync1; + VSS_BACKUP_TYPE backup_type; + IVssBackupComponents* pVssObj = (IVssBackupComponents*)m_pVssObject; + + if (!(p_CreateVssBackupComponents && p_VssFreeSnapshotProperties)) { + Dmsg2(0, "VSSClientGeneric::Initialize: p_CreateVssBackupComponents=0x%08X, p_VssFreeSnapshotProperties=0x%08X\n", p_CreateVssBackupComponents, p_VssFreeSnapshotProperties); + Jmsg(m_jcr, M_FATAL, 0, "Entry point CreateVssBackupComponents or VssFreeSnapshotProperties missing.\n"); + return false; + } + + if (m_VolumeList) { + delete m_VolumeList; + } + + m_VolumeList = New(MTab()); // TODO: See if we do this part only in backup + if (!m_VolumeList->get()) { + Jmsg(m_jcr, M_ERROR, 0, "Unable to list devices and volumes.\n"); + return false; + } + + HRESULT hr; + // Initialize COM + if (!m_bCoInitializeCalled) { + hr = CoInitializeEx(NULL, COINIT_MULTITHREADED); + if (FAILED(hr)) { + Dmsg1(0, "VSSClientGeneric::Initialize: CoInitializeEx returned 0x%08X\n", hr); + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "CoInitializeEx"); + errno = b_errno_win32; + return false; + } + m_bCoInitializeCalled = true; + } + + // Release the any old IVssBackupComponents interface + if (pVssObj) { + pVssObj->Release(); + m_pVssObject = NULL; + } + + // Create new internal backup components object + hr = p_CreateVssBackupComponents((IVssBackupComponents**)&m_pVssObject); + if (FAILED(hr)) { + berrno be; + Dmsg2(0, "VSSClientGeneric::Initialize: CreateVssBackupComponents returned 0x%08X. ERR=%s\n", + hr, be.bstrerror(b_errno_win32)); + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "CreateVssBackupComponents"); + errno = b_errno_win32; + return false; + } + + /* Define shorthand VssObject with time */ + pVssObj = (IVssBackupComponents*)m_pVssObject; + + + if (!bDuringRestore) { +#if defined(B_VSS_W2K3) || defined(B_VSS_VISTA) + if (dwContext != VSS_CTX_BACKUP) { + hr = pVssObj->SetContext(dwContext); + if (FAILED(hr)) { + Dmsg1(0, "VSSClientGeneric::Initialize: IVssBackupComponents->SetContext returned 0x%08X\n", hr); + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "SetContext"); + errno = b_errno_win32; + return false; + } + } +#endif + + // 1. InitializeForBackup + hr = pVssObj->InitializeForBackup(); + if (FAILED(hr)) { + Dmsg1(0, "VSSClientGeneric::Initialize: IVssBackupComponents->InitializeForBackup returned 0x%08X\n", hr); + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "InitializeForBackup"); + errno = b_errno_win32; + return false; + } + + // 2. SetBackupState + switch (m_jcr->getJobLevel()) { + case L_FULL: + backup_type = VSS_BT_FULL; + break; + case L_DIFFERENTIAL: + backup_type = VSS_BT_DIFFERENTIAL; + break; + case L_INCREMENTAL: + backup_type = VSS_BT_INCREMENTAL; + break; + default: + Dmsg1(0, "VSSClientGeneric::Initialize: unknown backup level %d\n", m_jcr->getJobLevel()); + backup_type = VSS_BT_FULL; + break; + } + hr = pVssObj->SetBackupState(true, true, backup_type, false); /* FIXME: need to support partial files - make last parameter true when done */ + if (FAILED(hr)) { + Dmsg1(0, "VSSClientGeneric::Initialize: IVssBackupComponents->SetBackupState returned 0x%08X\n", hr); + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "SetBackupState"); + errno = b_errno_win32; + return false; + } + + // 3. GatherWriterMetaData + hr = pVssObj->GatherWriterMetadata(&pAsync1.p); + if (FAILED(hr)) { + Dmsg1(0, "VSSClientGeneric::Initialize: IVssBackupComponents->GatherWriterMetadata returned 0x%08X\n", hr); + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "GatherWriterMetadata"); + errno = b_errno_win32; + return false; + } + // Waits for the async operation to finish and checks the result + if (!WaitAndCheckForAsyncOperation(pAsync1.p)) { + /* Error message already printed */ + errno = b_errno_win32; + return false; + } + } + + // We are during restore now? + m_bDuringRestore = bDuringRestore; + + // Keep the context + m_dwContext = dwContext; + + return true; +} + +bool VSSClientGeneric::WaitAndCheckForAsyncOperation(IVssAsync* pAsync) +{ + // Wait until the async operation finishes + // unfortunately we can't use a timeout here yet. + // the interface would allow it on W2k3, + // but it is not implemented yet.... + + HRESULT hr; + + // Check the result of the asynchronous operation + HRESULT hrReturned = S_OK; + + int timeout = 1800; // 30 minutes ... + + int queryErrors = 0; + do { + if (hrReturned != S_OK) { + Sleep(1000); + } + hrReturned = S_OK; + hr = pAsync->QueryStatus(&hrReturned, NULL); + if (FAILED(hr)) { + queryErrors++; + } + } while ((timeout-- > 0) && (hrReturned == VSS_S_ASYNC_PENDING)); + + if (hrReturned == VSS_S_ASYNC_FINISHED) { + return true; + } + + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "Query Async Status after 30 minute wait"); + return false; +} + +static int volume_cmp(void *e1, void *e2) +{ + WCHAR *v1 = (WCHAR *) e1; + MTabEntry *v2 = (MTabEntry *) e2; + return wcscmp(v1, v2->volumeName); +} + +static pthread_mutex_t create_mutex = PTHREAD_MUTEX_INITIALIZER; + +bool VSSClientGeneric::CreateSnapshots(alist *mount_points) +{ + IVssBackupComponents *pVssObj; + bool ret = false; + HRESULT hr; + + /* AddToSnapshotSet */ + CComPtr pAsync1; + CComPtr pAsync2; + VSS_ID pid; + + /* While testing the concurrent snapshot creation, I found out that the entire snapshot + * creation process should be protected by a mutex. (InitializeForBackups and CreateSnapshots). + */ + + /* Create only one snapshot set at a time */ + P(create_mutex); + + /* szDriveLetters contains all drive letters in uppercase */ + /* if a drive can not being added, it's converted to lowercase in szDriveLetters */ + /* http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vss/base/ivssbackupcomponents_startsnapshotset.asp */ + if (!m_pVssObject || m_bBackupIsInitialized) { + Jmsg(m_jcr, M_FATAL, 0, "No pointer to VssObject or Backup is not Initialized\n"); + errno = ENOSYS; + goto bail_out; + } + + m_uidCurrentSnapshotSet = GUID_NULL; + + pVssObj = (IVssBackupComponents*)m_pVssObject; + + /* startSnapshotSet */ + hr = pVssObj->StartSnapshotSet(&m_uidCurrentSnapshotSet); + if (FAILED(hr)) { + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "StartSnapshotSet"); + errno = ENOSYS; + goto bail_out; + } + + /* + * Now try all paths in case they are mount points + */ + for (int i=0; i < mount_points->size(); i++) { + wchar_t *p = (wchar_t *)mount_points->get(i); + // store uniquevolumname + if (SUCCEEDED(pVssObj->AddToSnapshotSet(p, GUID_NULL, &pid))) { + MTabEntry *elt = (MTabEntry*)m_VolumeList->entries->search(p, volume_cmp); + ASSERT2(elt, "Should find the volume in the list"); + Jmsg(m_jcr, M_INFO, 0, " Snapshot mount point: %ls\n", elt->first()); + Dmsg1(50, "AddToSnapshot OK for Vol: %ls\n", p); + } else { + //Dmsg1(50, "AddToSnapshot() failed for Vol: %ls\n", (LPWSTR)volume.c_str()); + //Dmsg1(50, "AddToSnapshot() failed for path: %s\n", p); + } + } + + /* PrepareForBackup */ + hr = pVssObj->PrepareForBackup(&pAsync1.p); + if (FAILED(hr)) { + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "PrepareForBackup"); + errno = b_errno_win32; + goto bail_out; + } + + // Waits for the async operation to finish and checks the result + if (!WaitAndCheckForAsyncOperation(pAsync1.p)) { + /* Error message already printed */ + errno = b_errno_win32; + goto bail_out; + } + + /* get latest info about writer status */ + if (!CheckWriterStatus()) { + /* Error message already printed */ + errno = b_errno_win32; /* Error already printed */ + goto bail_out; + } + + /* DoSnapShotSet */ + hr = pVssObj->DoSnapshotSet(&pAsync2.p); + if (FAILED(hr)) { + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "DoSnapshotSet"); + errno = b_errno_win32; + goto bail_out; + } + + // Waits for the async operation to finish and checks the result + if (!WaitAndCheckForAsyncOperation(pAsync2.p)) { + /* Error message already printed */ + errno = b_errno_win32; + goto bail_out; + } + + /* get latest info about writer status */ + if (!CheckWriterStatus()) { + /* Error message already printed */ + errno = b_errno_win32; /* Error already printed */ + goto bail_out; + } + + /* query snapshot info */ + QuerySnapshotSet(m_uidCurrentSnapshotSet); + + m_bBackupIsInitialized = true; + + ret = true; +bail_out: + V(create_mutex); + return ret; +} + +bool VSSClientGeneric::CloseBackup() +{ + bool bRet = false; + HRESULT hr; + BSTR xml; + IVssBackupComponents* pVssObj = (IVssBackupComponents*)m_pVssObject; + + if (!m_pVssObject) { + Jmsg(m_jcr, M_FATAL, 0, "VssOject is NULL.\n"); + errno = ENOSYS; + return bRet; + } + /* Create or Delete Snapshot one at a time */ + P(create_mutex); + + CComPtr pAsync; + m_bBackupIsInitialized = false; + + hr = pVssObj->BackupComplete(&pAsync.p); + if (SUCCEEDED(hr)) { + // Waits for the async operation to finish and checks the result + if (!WaitAndCheckForAsyncOperation(pAsync.p)) { + /* Error message already printed */ + errno = b_errno_win32; + } else { + bRet = true; + } + } else { + JmsgVssApiStatus(m_jcr, M_ERROR, hr, "BackupComplete"); + errno = b_errno_win32; + pVssObj->AbortBackup(); + } + + /* get latest info about writer status */ + CheckWriterStatus(); + + hr = pVssObj->SaveAsXML(&xml); + if (SUCCEEDED(hr)) { + m_metadata = xml; + } else { + m_metadata = NULL; + } + + /* FIXME?: The docs http://msdn.microsoft.com/en-us/library/aa384582%28v=VS.85%29.aspx say this isn't required... */ + if (m_uidCurrentSnapshotSet != GUID_NULL) { + VSS_ID idNonDeletedSnapshotID = GUID_NULL; + LONG lSnapshots; + + pVssObj->DeleteSnapshots( + m_uidCurrentSnapshotSet, + VSS_OBJECT_SNAPSHOT_SET, + false, + &lSnapshots, + &idNonDeletedSnapshotID); + + m_uidCurrentSnapshotSet = GUID_NULL; + } + + if (m_bWriterStatusCurrent) { + m_bWriterStatusCurrent = false; + pVssObj->FreeWriterStatus(); + } + + pVssObj->Release(); + m_pVssObject = NULL; + + // Call CoUninitialize if the CoInitialize was performed sucesfully + if (m_bCoInitializeCalled) { + CoUninitialize(); + m_bCoInitializeCalled = false; + } + + V(create_mutex); + return bRet; +} + +WCHAR *VSSClientGeneric::GetMetadata() +{ + return m_metadata; +} + +bool VSSClientGeneric::CloseRestore() +{ + //HRESULT hr; + IVssBackupComponents* pVssObj = (IVssBackupComponents*)m_pVssObject; + CComPtr pAsync; + + if (!pVssObj) { + Jmsg(m_jcr, M_FATAL, 0, "No pointer to VssObject or Backup is not Initialized\n"); + errno = ENOSYS; + return false; + } +#if 0 +/* done by plugin now */ + if (SUCCEEDED(hr = pVssObj->PostRestore(&pAsync.p))) { + // Waits for the async operation to finish and checks the result + if (!WaitAndCheckForAsyncOperation(pAsync1.p)) { + /* Error message already printed */ + errno = b_errno_win32; + return false; + } + /* get latest info about writer status */ + if (!CheckWriterStatus()) { + /* Error message already printed */ + errno = b_errno_win32; + return false; + } + } else { + errno = b_errno_win32; + return false; + } +#endif + return true; +} + +// Query all the shadow copies in the given set +void VSSClientGeneric::QuerySnapshotSet(GUID snapshotSetID) +{ + if (!(p_CreateVssBackupComponents && p_VssFreeSnapshotProperties)) { + Jmsg(m_jcr, M_FATAL, 0, "CreateVssBackupComponents or VssFreeSnapshotProperties API is NULL.\n"); + errno = ENOSYS; + return; + } + + if (snapshotSetID == GUID_NULL || m_pVssObject == NULL) { + Jmsg(m_jcr, M_FATAL, 0, "snapshotSetID == NULL or VssObject is NULL.\n"); + errno = ENOSYS; + return; + } + + IVssBackupComponents* pVssObj = (IVssBackupComponents*) m_pVssObject; + + // Get list all shadow copies. + CComPtr pIEnumSnapshots; + HRESULT hr = pVssObj->Query( GUID_NULL, + VSS_OBJECT_NONE, + VSS_OBJECT_SNAPSHOT, + (IVssEnumObject**)(&pIEnumSnapshots) ); + + // If there are no shadow copies, just return + if (FAILED(hr)) { + Jmsg(m_jcr, M_FATAL, 0, "No Volume Shadow copies made.\n"); + errno = b_errno_win32; + return; + } + + // Enumerate all shadow copies. + VSS_OBJECT_PROP Prop; + VSS_SNAPSHOT_PROP& Snap = Prop.Obj.Snap; + + while (true) { + // Get the next element + ULONG ulFetched; + hr = (pIEnumSnapshots.p)->Next(1, &Prop, &ulFetched); + + // We reached the end of list + if (ulFetched == 0) { + break; + } + + Dmsg2(DT_VOLUME|50, "Adding %ls => %ls to m_VolumeList\n", + Snap.m_pwszOriginalVolumeName, Snap.m_pwszSnapshotDeviceObject); + + // Print the shadow copy (if not filtered out) + if (Snap.m_SnapshotSetId == snapshotSetID) { + MTabEntry *elt = (MTabEntry*)m_VolumeList->entries->search(Snap.m_pwszOriginalVolumeName, volume_cmp); + if (!elt) { + Dmsg1(DT_VOLUME|50, "Unable to find [%ls] in the device list\n", Snap.m_pwszOriginalVolumeName); + foreach_rblist(elt, m_VolumeList->entries) { + elt->debug_paths(); + } + Jmsg(m_jcr, M_WARNING, 0, _("Unable to find volume %ls in the device list\n"), Snap.m_pwszOriginalVolumeName); + } else { + elt->shadowCopyName = bwcsdup(Snap.m_pwszSnapshotDeviceObject); + elt->setInSnapshotSet(); + } + } + p_VssFreeSnapshotProperties(&Snap); + } + errno = 0; +} + +// Check the status for all selected writers +bool VSSClientGeneric::CheckWriterStatus() +{ + /* + http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vss/base/ivssbackupcomponents_startsnapshotset.asp + */ + IVssBackupComponents* pVssObj = (IVssBackupComponents*)m_pVssObject; + if (!pVssObj) { + Jmsg(m_jcr, M_FATAL, 0, "Cannot get IVssBackupComponents pointer.\n"); + errno = ENOSYS; + return false; + } + DestroyWriterInfo(); + + if (m_bWriterStatusCurrent) { + m_bWriterStatusCurrent = false; + pVssObj->FreeWriterStatus(); + } + // Gather writer status to detect potential errors + CComPtr pAsync; + + HRESULT hr = pVssObj->GatherWriterStatus(&pAsync.p); + if (FAILED(hr)) { + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "GatherWriterStatus"); + errno = b_errno_win32; + return false; + } + + // Waits for the async operation to finish and checks the result + if (!WaitAndCheckForAsyncOperation(pAsync.p)) { + /* Error message already printed */ + errno = b_errno_win32; + return false; + } + + m_bWriterStatusCurrent = true; + + unsigned cWriters = 0; + + hr = pVssObj->GetWriterStatusCount(&cWriters); + if (FAILED(hr)) { + JmsgVssApiStatus(m_jcr, M_FATAL, hr, "GetWriterStatusCount"); + errno = b_errno_win32; + return false; + } + + int nState; + POOLMEM *szBuf = get_pool_memory(PM_FNAME); + // Enumerate each writer + for (unsigned iWriter = 0; iWriter < cWriters; iWriter++) { + VSS_ID idInstance = GUID_NULL; + VSS_ID idWriter= GUID_NULL; + VSS_WRITER_STATE eWriterStatus = VSS_WS_UNKNOWN; + CComBSTR bstrWriterName; + HRESULT hrWriterFailure = S_OK; + + // Get writer status + hr = pVssObj->GetWriterStatus(iWriter, + &idInstance, + &idWriter, + &bstrWriterName, + &eWriterStatus, + &hrWriterFailure); + if (FAILED(hr)) { + /* Api failed */ + JmsgVssApiStatus(m_jcr, M_WARNING, hr, "GetWriterStatus"); + nState = 0; /* Unknown writer state -- API failed */ + } else { + switch(eWriterStatus) { + case VSS_WS_FAILED_AT_IDENTIFY: + case VSS_WS_FAILED_AT_PREPARE_BACKUP: + case VSS_WS_FAILED_AT_PREPARE_SNAPSHOT: + case VSS_WS_FAILED_AT_FREEZE: + case VSS_WS_FAILED_AT_THAW: + case VSS_WS_FAILED_AT_POST_SNAPSHOT: + case VSS_WS_FAILED_AT_BACKUP_COMPLETE: + case VSS_WS_FAILED_AT_PRE_RESTORE: + case VSS_WS_FAILED_AT_POST_RESTORE: + #if defined(B_VSS_W2K3) || defined(B_VSS_VISTA) + case VSS_WS_FAILED_AT_BACKUPSHUTDOWN: + #endif + /* Writer status problem */ + wchar_2_UTF8(&szBuf, bstrWriterName.p); + JmsgVssWriterStatus(m_jcr, M_WARNING, eWriterStatus, szBuf); + nState = -1; /* bad writer state */ + break; + + default: + /* ok */ + nState = 1; /* Writer state OK */ + } + } + /* store text info */ + char str[1000]; + bstrncpy(str, "\"", sizeof(str)); + wchar_2_UTF8(&szBuf, bstrWriterName.p); + bstrncat(str, szBuf, sizeof(str)); + bstrncat(str, "\", State: 0x", sizeof(str)); + itoa(eWriterStatus, szBuf, sizeof_pool_memory(szBuf)); + bstrncat(str, szBuf, sizeof(str)); + bstrncat(str, " (", sizeof(str)); + wchar_2_UTF8(&szBuf, GetStringFromWriterStatus(eWriterStatus)); + bstrncat(str, szBuf, sizeof(str)); + bstrncat(str, ")", sizeof(str)); + AppendWriterInfo(nState, (const char *)str); + } + free_pool_memory(szBuf); + errno = 0; + return true; +} + +#endif /* WIN32_VSS */ diff --git a/src/win32/filed/who.h b/src/win32/filed/who.h new file mode 100644 index 00000000..0a7b512e --- /dev/null +++ b/src/win32/filed/who.h @@ -0,0 +1,33 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + * This file is pulled in by certain generic routines in libwin32 + * to define the names of the daemon that is being built. + */ + +#define APP_NAME "Bacula-fd" +#define LC_APP_NAME "bacula-fd" +#define APP_DESC "Bacula File Backup Service" + +#define terminate_app(x) terminate_filed(x) +extern void terminate_filed(int sig); +class VSSClient; +extern VSSClient *VSSInit(); diff --git a/src/win32/full_win32_installer/ConfigPage1.nsh b/src/win32/full_win32_installer/ConfigPage1.nsh new file mode 100644 index 00000000..71965a2a --- /dev/null +++ b/src/win32/full_win32_installer/ConfigPage1.nsh @@ -0,0 +1,294 @@ +Function EnterConfigPage1 + ${If} $AutomaticInstall = 1 + Abort + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsFileAndStorage} + + ${If} $R0 = 0 + Abort + ${EndIf} + + FileOpen $R5 "$PLUGINSDIR\ConfigPage1.ini" w + + StrCpy $R6 1 ; Field Number + StrCpy $R7 0 ; Top + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 52 + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Client"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigClientName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=158$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Port"$\r$\nLeft=172$\r$\nTop=$R7$\r$\nRight=188$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigClientPort$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=218$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Max Jobs"$\r$\nLeft=238$\r$\nTop=$R7$\r$\nRight=270$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigClientMaxJobs$\r$\nLeft=274$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigClientPassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + + IntOp $R8 $R7 + 10 + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigClientInstallService$\r$\nText="Install as service"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=118$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigClientStartService$\r$\nText="Start after install"$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=260$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 52 + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Storage"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigStorageName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=158$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Port"$\r$\nLeft=172$\r$\nTop=$R7$\r$\nRight=188$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigStoragePort$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=218$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Max Jobs"$\r$\nLeft=238$\r$\nTop=$R7$\r$\nRight=270$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigStorageMaxJobs$\r$\nLeft=274$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigStoragePassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + + IntOp $R8 $R7 + 10 + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigStorageInstallService$\r$\nText="Install as service"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=118$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigStorageStartService$\r$\nText="Start after install"$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=260$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + ${Endif} + + IntOp $R6 $R6 - 1 + + FileWrite $R5 "[Settings]$\r$\nNumFields=$R6$\r$\n" + + FileClose $R5 + + !insertmacro MUI_HEADER_TEXT "$(TITLE_ConfigPage1)" "$(SUBTITLE_ConfigPage1)" + !insertmacro MUI_INSTALLOPTIONS_INITDIALOG "ConfigPage1.ini" + Pop $HDLG ;HWND of dialog + + ; Initialize Controls + + StrCpy $R6 1 ; Field Number + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + + ; Client Name + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + + IntOp $R6 $R6 + 2 + + ; Client Port Number + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 5 0 + + IntOp $R6 $R6 + 2 + + ; Max Jobs + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 3 0 + + IntOp $R6 $R6 + 5 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + + ; Storage Name + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + + IntOp $R6 $R6 + 2 + + ; Storage Port Number + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 5 0 + + IntOp $R6 $R6 + 2 + + ; Max Jobs + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 3 0 + + IntOp $R6 $R6 + 5 + ${Endif} + + !insertmacro MUI_INSTALLOPTIONS_SHOW + + ; Process results + + StrCpy $R6 3 + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientName "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientPort "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientMaxJobs "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientPassword "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientInstallService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientStartService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 3 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageName "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStoragePort "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageMaxJobs "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStoragePassword "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageInstallService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageStartService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 3 + ${Endif} +FunctionEnd + +Function LeaveConfigPage1 + StrCpy $R6 5 + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1024 + ${OrIf} $R0 > 65535 + MessageBox MB_OK "Port must be between 1024 and 65535 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1 + ${OrIf} $R0 > 99 + MessageBox MB_OK "Max Jobs must be between 1 and 99 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 9 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1024 + ${OrIf} $R0 > 65535 + MessageBox MB_OK "Port must be between 1024 and 65535 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1 + ${OrIf} $R0 > 99 + MessageBox MB_OK "Max Jobs must be between 1 and 99 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 9 + ${Endif} +FunctionEnd diff --git a/src/win32/full_win32_installer/ConfigPage2.nsh b/src/win32/full_win32_installer/ConfigPage2.nsh new file mode 100644 index 00000000..532af9b0 --- /dev/null +++ b/src/win32/full_win32_installer/ConfigPage2.nsh @@ -0,0 +1,454 @@ +Function EnterConfigPage2 + IntOp $R0 $NewComponents & ${ComponentsRequiringUserConfig} + + ${If} $R0 = 0 + Abort + ${EndIf} + + FileOpen $R5 "$PLUGINSDIR\ConfigPage2.ini" w + + StrCpy $R6 1 ; Field Number + StrCpy $R7 0 ; Top + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + ${If} $AutomaticInstall = 1 + IntOp $R8 $R7 + 54 + ${Else} + IntOp $R8 $R7 + 92 + ${EndIf} + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Director"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 54 + ${Else} + IntOp $R8 $R7 + 26 + ${EndIf} + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Enter data for Director allowed to access this Client"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + ${EndIf} + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + ${If} "$ConfigDirectorName" == "" + StrCpy $ConfigDirectorName "$HostName-dir" + ${EndIf} + ${If} "$ConfigDirectorPassword" == "" + StrCpy $ConfigDirectorPassword "$LocalDirectorPassword" + ${EndIf} + ${Else} + ${If} "$ConfigDirectorName" == "$HostName-dir" + StrCpy $ConfigDirectorName "" + ${EndIf} + ${If} "$ConfigDirectorPassword" == "$LocalDirectorPassword" + StrCpy $ConfigDirectorPassword "" + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=158$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Port"$\r$\nLeft=172$\r$\nTop=$R7$\r$\nRight=188$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigDirectorPort$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=218$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Max Jobs"$\r$\nLeft=238$\r$\nTop=$R7$\r$\nRight=270$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigDirectorMaxJobs$\r$\nLeft=274$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + + IntOp $R7 $R7 + 14 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorPassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Mail Server"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=48$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorMailServer$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Mail Address"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=48$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorMailAddress$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Database"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + ${If} $ConfigDirectorDB = 0 + ${If} $MySQLPath != "" + StrCpy $ConfigDirectorDB 1 + ${ElseIf} $PostgreSQLPath != "" + StrCpy $ConfigDirectorDB 2 + ${Else} + StrCpy $ConfigDirectorDB 3 + ${EndIf} + ${EndIf} + + ${If} $ConfigDirectorDB = 1 + StrCpy $R9 1 + ${Else} + StrCpy $R9 0 + ${EndIf} + + FileWrite $R5 '[Field $R6]$\r$\nType="RadioButton"$\r$\nState=$R9$\r$\nText="MySQL"$\r$\nFlags="GROUP"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=90$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + + ${If} $ConfigDirectorDB = 2 + StrCpy $R9 1 + ${Else} + StrCpy $R9 0 + ${EndIf} + + FileWrite $R5 '[Field $R6]$\r$\nType="RadioButton"$\r$\nState=$R9$\r$\nText="PostgreSQL"$\r$\nFlags="NOTABSTOP"$\r$\nLeft=94$\r$\nTop=$R7$\r$\nRight=146$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + + ${If} $ConfigDirectorDB = 3 + StrCpy $R9 1 + ${Else} + StrCpy $R9 0 + ${EndIf} + + FileWrite $R5 '[Field $R6]$\r$\nType="RadioButton"$\r$\nState=$R9$\r$\nText="Sqlite"$\r$\nFlags="NOTABSTOP"$\r$\nLeft=150$\r$\nTop=$R7$\r$\nRight=182$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + ${If} $AutomaticInstall = 0 + IntOp $R8 $R7 + 10 + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigDirectorInstallService$\r$\nText="Install as service"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=118$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigDirectorStartService$\r$\nText="Start after install"$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=260$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + ${EndIf} + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Address"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=48$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorAddress$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + IntOp $R8 $R7 + 8 + ${EndIf} + ${EndIf} + + IntOp $R7 $R7 + 4 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsFileAndStorageAndDirector} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 42 + + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Monitor"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigMonitorName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=150$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigMonitorPassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 20 + ${EndIf} + ${EndIf} + + IntOp $R6 $R6 - 1 + FileWrite $R5 "[Settings]$\r$\nNumFields=$R6$\r$\n" + + FileClose $R5 + + IntOp $R0 $NewComponents & ${ComponentsFileAndStorage} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 1 + !insertmacro MUI_HEADER_TEXT "$(TITLE_ConfigPage1)" "$(SUBTITLE_ConfigPage1)" + ${Else} + !insertmacro MUI_HEADER_TEXT "$(TITLE_ConfigPage2)" "$(SUBTITLE_ConfigPage2)" + ${EndIf} + + !insertmacro MUI_INSTALLOPTIONS_INITDIALOG "ConfigPage2.ini" + Pop $HDLG ;HWND of dialog + + ; Initialize Controls + StrCpy $R6 2 ; Field Number + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 0 + ; Name + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + ; Port Number + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 5 0 + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + ; Max Jobs + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 3 0 + + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 9 + + ${If} $AutomaticInstall = 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${EndIf} + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsFileAndStorageAndDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${EndIf} + + !insertmacro MUI_INSTALLOPTIONS_SHOW + + ; Process results + + StrCpy $R6 2 + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorName "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorPort "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorMaxJobs "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorPassword "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorMailServer "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorMailAddress "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $R5 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R5 = 1 + StrCpy $ConfigDirectorDB 1 + ${Endif} + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R5 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R5 = 1 + StrCpy $ConfigDirectorDB 2 + ${Endif} + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R5 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R5 = 1 + StrCpy $ConfigDirectorDB 3 + ${Endif} + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorInstallService "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorStartService "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorAddress "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsFileAndStorageAndDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigMonitorName "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigMonitorPassword "ConfigPage2.ini" "Field $R6" "State" + ${EndIf} + ${EndIf} +FunctionEnd + +Function LeaveConfigPage2 + ${If} $AutomaticInstall = 0 + StrCpy $R6 4 + + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R0 < 1024 + ${OrIf} $R0 > 65535 + MessageBox MB_OK "Port must be between 1024 and 65535 inclusive." + Abort + ${EndIf} + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R0 < 1 + ${OrIf} $R0 > 99 + MessageBox MB_OK "Max Jobs must be between 1 and 99 inclusive." + Abort + ${EndIf} + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} +FunctionEnd diff --git a/src/win32/full_win32_installer/DumpLog.nsh b/src/win32/full_win32_installer/DumpLog.nsh new file mode 100644 index 00000000..c450044a --- /dev/null +++ b/src/win32/full_win32_installer/DumpLog.nsh @@ -0,0 +1,46 @@ +!define LVM_GETITEMCOUNT 0x1004 +!define LVM_GETITEMTEXT 0x102D + +Function DumpLog + Exch $5 + Push $0 + Push $1 + Push $2 + Push $3 + Push $4 + Push $6 + + FindWindow $0 "#32770" "" $HWNDPARENT + GetDlgItem $0 $0 1016 + StrCmp $0 0 error + FileOpen $5 $5 "w" + StrCmp $5 0 error + SendMessage $0 ${LVM_GETITEMCOUNT} 0 0 $6 + System::Alloc ${NSIS_MAX_STRLEN} + Pop $3 + StrCpy $2 0 + System::Call "*(i, i, i, i, i, i, i, i, i) i \ + (0, 0, 0, 0, 0, r3, ${NSIS_MAX_STRLEN}) .r1" + loop: StrCmp $2 $6 done + System::Call "User32::SendMessageA(i, i, i, i) i \ + ($0, ${LVM_GETITEMTEXT}, $2, r1)" + System::Call "*$3(&t${NSIS_MAX_STRLEN} .r4)" + FileWrite $5 "$4$\r$\n" + IntOp $2 $2 + 1 + Goto loop + done: + FileClose $5 + System::Free $1 + System::Free $3 + Goto exit + error: + MessageBox MB_OK error + exit: + Pop $6 + Pop $4 + Pop $3 + Pop $2 + Pop $1 + Pop $0 + Exch $5 +FunctionEnd diff --git a/src/win32/full_win32_installer/InstallType.ini b/src/win32/full_win32_installer/InstallType.ini new file mode 100644 index 00000000..73fb8d93 --- /dev/null +++ b/src/win32/full_win32_installer/InstallType.ini @@ -0,0 +1,56 @@ +; +; Note: certain text in this file is overwritten by the code in +; InstallType.nsh +; + +[Settings] +NumFields=6 + +[Field 1] +Type=Label +Text=This is a new installation. Please choose the installation type. +Left=0 +Right=300 +Top=0 +Bottom=28 + +[Field 2] +Type=GroupBox +Text=Installation Type +Left=0 +Right=300 +Top=32 +Bottom=136 + +[Field 3] +Type=RadioButton +Text=Automatic +State=1 +Left=6 +Right=52 +Top=44 +Bottom=54 + +[Field 4] +Type=RadioButton +Text=Custom (not recommended) +Left=6 +Right=252 +Top=90 +Bottom=100 + +[Field 5] +Type=Label +Text=The software will be installed in the default directory "Program Files\\Bacula". The configuration files will be generated using defaults applicable to most installations. +Left=17 +Right=295 +Top=58 +Bottom=86 + +[Field 6] +Type=Label +Text=You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work. +Left=17 +Right=295 +Top=104 +Bottom=132 diff --git a/src/win32/full_win32_installer/InstallType.nsh b/src/win32/full_win32_installer/InstallType.nsh new file mode 100644 index 00000000..373f68cc --- /dev/null +++ b/src/win32/full_win32_installer/InstallType.nsh @@ -0,0 +1,102 @@ +Function EnterInstallType + Push $R0 + Push $R1 + Push $R2 + + ; Check if this is an upgrade by looking for an uninstaller configured + ; in the registry. + ReadRegStr $R0 HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "UninstallString" + + ${If} "$R0" != "" + ; Check registry for new installer + ReadRegStr $R1 HKLM "Software\Bacula" "InstallLocation" + ${If} "$R1" != "" + ; New Installer + StrCpy $OldInstallDir $R1 + StrCpy $InstallType ${UpgradeInstall} + + SetShellVarContext all + + StrCpy $R1 "$APPDATA\Bacula" + StrCpy $R2 "$INSTDIR\Doc" + + ReadRegDWORD $PreviousComponents HKLM "Software\Bacula" "Components" + + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 1" "Text" "A previous installation has been found in $OldInstallDir. Please choose the installation type for any additional components you select." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 5" "Text" "The configuration files for additional components will be generated using defaults applicable to most installations." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 6" "Text" "You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work." + + ReadRegDWORD $ConfigDirectorDB HKLM Software\Bacula Database + + ${If} $ConfigDirectorDB = 0 + IntOp $R0 $PreviousComponents & ${ComponentDirector} + ${If} $R0 <> 0 + StrCpy $ConfigDirectorDB 1 + ${EndIf} + ${EndIf} + ${Else} + ; Processing Upgrade - Get Install Directory + ${StrRep} $R0 $R0 '"' '' + ${GetParent} $R0 $OldInstallDir + + ; Old Installer + StrCpy $InstallType ${MigrateInstall} + StrCpy $R1 "$OldInstallDir\bin" + StrCpy $R2 "$OldInstallDir\Doc" + + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 1" "Text" "An old installation has been found in $OldInstallDir. The Configuration will be migrated. Please choose the installation type for any additional components you select." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 5" "Text" "The software will be installed in the default directory $\"$PROGRAMFILES\Bacula$\". The configuration files for additional components will be generated using defaults applicable to most installations." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 6" "Text" "You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work." + ${EndIf} + ${Else} + ; New Install + StrCpy $InstallType ${NewInstall} + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 5" "Text" "The software will be installed in the default directory $\"$PROGRAMFILES\Bacula$\". The configuration files will be generated using defaults applicable to most installations." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 6" "Text" "You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work." + ${EndIf} + + ${If} $InstallType <> ${NewInstall} + ${AndIf} $PreviousComponents = 0 + ${If} ${FileExists} "$R1\bacula-fd.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentFile} + ${EndIf} + ${If} ${FileExists} "$R1\bconsole.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentTextConsole} + ${EndIf} + ${If} ${FileExists} "$R1\bat.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentBatConsole} + ${EndIf} + ${If} ${FileExists} "$R1\wx-console.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentGUIConsole} + ${EndIf} + ${If} ${FileExists} "$R2\bacula.pdf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentPDFDocs} + ${EndIf} + ${If} ${FileExists} "$R2\bacula\bacula.html" + IntOp $PreviousComponents $PreviousComponents | ${ComponentHTMLDocs} + ${EndIf} + ${If} ${FileExists} "$R2\bacula.html" + IntOp $PreviousComponents $PreviousComponents | ${ComponentHTMLDocs} + ${EndIf} + ${EndIf} + + !InsertMacro MUI_HEADER_TEXT "$(TITLE_InstallType)" "$(SUBTITLE_InstallType)" + !InsertMacro MUI_INSTALLOPTIONS_INITDIALOG "InstallType.ini" + Pop $HDLG ;HWND of dialog + + !insertmacro MUI_INSTALLOPTIONS_SHOW + + ; Process Results + + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "InstallType.ini" "Field 3" "State" + + ${If} $R0 = 1 + StrCpy $AutomaticInstall 1 + ${Else} + StrCpy $AutomaticInstall 0 + ${EndIf} + + Pop $R2 + Pop $R1 + Pop $R0 +FunctionEnd diff --git a/src/win32/full_win32_installer/Start.bat b/src/win32/full_win32_installer/Start.bat new file mode 100644 index 00000000..0b61f7ec --- /dev/null +++ b/src/win32/full_win32_installer/Start.bat @@ -0,0 +1,5 @@ +rem +rem Bacula start file for Win95/98/Me +rem +cd c:\bacula\bin +c:\bacula\bin\bacula-fd.exe /service -c c:\bacula\bin\bacula-fd.conf diff --git a/src/win32/full_win32_installer/Stop.bat b/src/win32/full_win32_installer/Stop.bat new file mode 100644 index 00000000..3b1d0e5b --- /dev/null +++ b/src/win32/full_win32_installer/Stop.bat @@ -0,0 +1,5 @@ +rem +rem Bacula stop file for Win95/98/Me +rem +cd c:\bacula\bin +c:\bacula\bin\bacula-fd.exe /kill diff --git a/src/win32/full_win32_installer/WriteTemplates.ini b/src/win32/full_win32_installer/WriteTemplates.ini new file mode 100644 index 00000000..e4fb1de8 --- /dev/null +++ b/src/win32/full_win32_installer/WriteTemplates.ini @@ -0,0 +1,48 @@ +[Settings] +NumFields=5 +CancelEnabled=0 +BackEnabled=0 + +[Field 1] +Type="Label" +Text="Templates of the Client and Storage resources can be generated that are customized with the information about this system. These templates can then be copied to the Director computer and included in the Director's configuration file." +Left=7 +Right=293 +Top=6 +Bottom=32 + +[Field 2] +Type="CheckBox" +Text="Save Client template in:" +Left=6 +Right=240 +Top=38 +Bottom=48 + +[Field 3] +Type="FileRequest" +State="Client.conf" +Flags= +Filter=Configuration Files|*.conf|All Files|*.* +Left=16 +Right=288 +Top=50 +Bottom=62 + +[Field 4] +Type="CheckBox" +Text="Save Storage template in:" +Left=6 +Right=248 +Top=70 +Bottom=80 + +[Field 5] +Type="FileRequest" +State="Storage.conf" +Flags=REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST +Filter=Configuration Files|*.conf|All Files|*.* +Left=16 +Right=288 +Top=82 +Bottom=94 diff --git a/src/win32/full_win32_installer/bacula-dir.conf.in b/src/win32/full_win32_installer/bacula-dir.conf.in new file mode 100644 index 00000000..6b3fe43a --- /dev/null +++ b/src/win32/full_win32_installer/bacula-dir.conf.in @@ -0,0 +1,380 @@ +# +# Default Bacula Director Configuration file +# +# The only thing that MUST be changed is to add one or more +# file or directory names in the Include directive of the +# FileSet resource. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ @DISTVER@ +# +# You might also want to change the default email address +# from root to your address. See the "mail" and "operator" +# directives in the Messages resource. +# + +Director { # define myself + Name = @director_name@ + DIRport = @director_port@ # where we listen for UA connections + QueryFile = "@bin_dir@\\query.sql" + WorkingDirectory = "@working_dir@" + PidDirectory = "@working_dir@" + Maximum Concurrent Jobs = @director_maxjobs@ + Password = "@director_password@" # Console password + Messages = Daemon +} + +JobDefs { + Name = "DefaultJob" + Type = Backup + Level = Incremental + Client = @client_name@ + FileSet = "Test Set" + Schedule = "WeeklyCycle" + Storage = File + Messages = Standard + Pool = Default + Priority = 10 +} + + +# +# Define the main nightly save backup job +# By default, this job will back up to disk in C:/tmp +Job { + Name = "Client1" + JobDefs = "DefaultJob" + Write Bootstrap = "@working_dir@\\Client1.bsr" +} + +#Job { +# Name = "Client2" +# Client = @client_name@2 +# JobDefs = "DefaultJob" +# Write Bootstrap = "@working_dir@\\Client2.bsr" +#} + +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + JobDefs = "DefaultJob" + Level = Full + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + # This creates an ASCII copy of the catalog + RunBeforeJob = "\"@bin_dir@\\make_catalog_backup\" bacula bacula" + # This deletes the copy of the catalog + RunAfterJob = "\"@bin_dir@\\delete_catalog_backup\"" + Write Bootstrap = "@working_dir@\\BackupCatalog.bsr" + Priority = 11 # run after main backup +} + +# +# Standard Restore template, to be changed by Console program +# Only one such job is needed for all Jobs/Clients/Storage ... +# +Job { + Name = "RestoreFiles" + Type = Restore + Client=@client_name@ + FileSet="Test Set" + Storage = File + Pool = Default + Messages = Standard + Where = "C:\\tmp\\bacula-restores" +} + +# +# Note: Windows path separators do NOT work correctly in FileSets. +# +# List of files to be backed up +FileSet { + Name = "Test Set" + Include { + Options { + signature = MD5 + ignore case = yes + } +# +# Put your list of files here, preceded by 'File =', one per line +# or include an external list with: +# +# File = + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/full_win32_installer/storage.conf.in b/src/win32/full_win32_installer/storage.conf.in new file mode 100644 index 00000000..5928815c --- /dev/null +++ b/src/win32/full_win32_installer/storage.conf.in @@ -0,0 +1,10 @@ +# Definition of file storage device +Storage { + Name = File +# Do not use "localhost" here + Address = @storage_address@ # N.B. Use a fully qualified name here + SDPort = @storage_port@ + Password = "@storage_password@" + Device = FileStorage + Media Type = File +} diff --git a/src/win32/full_win32_installer/winbacula.nsi b/src/win32/full_win32_installer/winbacula.nsi new file mode 100644 index 00000000..c56e5015 --- /dev/null +++ b/src/win32/full_win32_installer/winbacula.nsi @@ -0,0 +1,1470 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +; winbacula.nsi +; +; Began as a version written by Michel Meyers (michel@tcnnet.dyndns.org) +; +; Adapted by Kern Sibbald for native Win32 Bacula +; added a number of elements from Christopher Hull's installer +; +; D. Scott Barninger Nov 13 2004 +; added configuration editing for bconsole.conf and bwx-console.conf +; better explanation in dialog boxes for editing config files +; added Start Menu items +; fix uninstall of config files to do all not just bacula-fd.conf +; +; D. Scott Barninger Dec 05 2004 +; added specification of default permissions for bacula-fd.conf +; - thanks to Jamie Ffolliott for pointing me at cacls +; added removal of working-dir files if user selects to remove config +; uninstall is now 100% clean +; +; D. Scott Barninger Apr 17 2005 +; 1.36.3 release docs update +; add pdf manual and menu shortcut +; +; Robert Nelson May 15 2006 +; Added server installs and implemented Microsoft install locations +; Use LogicLib.nsh +; Added Bacula-SD and Bacula-DIR +; Replaced ParameterGiven with standard GetOptions +; +; Command line options: +; +; /service - +; /start +; +; netsh firewall add portopening protocol=tcp port=9102 name="Bacula-FD" + + +!define PRODUCT "Bacula" + +; +; Include the Modern UI +; + +!include "MUI.nsh" +!include "LogicLib.nsh" +!include "FileFunc.nsh" +!include "Sections.nsh" +!include "StrFunc.nsh" +!include "WinMessages.nsh" +; +; Basics +; +Name "Bacula" +OutFile "${OUT_DIR}\winbacula-${VERSION}.exe" +SetCompressor lzma +InstallDir "$PROGRAMFILES\Bacula" +InstallDirRegKey HKLM "Software\Bacula" "InstallLocation" + +InstType "Client" +InstType "Server" +InstType "Full" + +!insertmacro GetParent + +${StrCase} +${StrRep} +${StrTok} +${StrTrimNewLines} + +; +; Pull in pages +; + +!define MUI_COMPONENTSPAGE_SMALLDESC + +!define MUI_HEADERIMAGE +!define MUI_BGCOLOR 739AB9 +!define MUI_HEADERIMAGE_BITMAP "bacula-logo.bmp" + +!InsertMacro MUI_PAGE_WELCOME +!InsertMacro MUI_PAGE_LICENSE "..\..\..\LICENSE" +Page custom EnterInstallType +!define MUI_PAGE_CUSTOMFUNCTION_SHOW PageComponentsShow +!InsertMacro MUI_PAGE_COMPONENTS +!define MUI_PAGE_CUSTOMFUNCTION_PRE PageDirectoryPre +!InsertMacro MUI_PAGE_DIRECTORY +Page custom EnterConfigPage1 LeaveConfigPage1 +Page custom EnterConfigPage2 LeaveConfigPage2 +!Define MUI_PAGE_CUSTOMFUNCTION_LEAVE LeaveInstallPage +!InsertMacro MUI_PAGE_INSTFILES +Page custom EnterWriteTemplates +!Define MUI_FINISHPAGE_SHOWREADME $INSTDIR\Readme.txt +!InsertMacro MUI_PAGE_FINISH + +!InsertMacro MUI_UNPAGE_WELCOME +!InsertMacro MUI_UNPAGE_CONFIRM +!InsertMacro MUI_UNPAGE_INSTFILES +!InsertMacro MUI_UNPAGE_FINISH + +!define MUI_ABORTWARNING + +!InsertMacro MUI_LANGUAGE "English" + +!InsertMacro GetParameters +!InsertMacro GetOptions + +DirText "Setup will install Bacula ${VERSION} to the directory specified below. To install in a different folder, click Browse and select another folder." + +!InsertMacro MUI_RESERVEFILE_INSTALLOPTIONS +; +; Global Variables +; +Var OptService +Var OptStart +Var OptSilent + +Var CommonFilesDone + +Var OsIsNT + +Var HostName + +Var ConfigClientName +Var ConfigClientPort +Var ConfigClientMaxJobs +Var ConfigClientPassword +Var ConfigClientInstallService +Var ConfigClientStartService + +Var ConfigStorageName +Var ConfigStoragePort +Var ConfigStorageMaxJobs +Var ConfigStoragePassword +Var ConfigStorageInstallService +Var ConfigStorageStartService + +Var ConfigDirectorName +Var ConfigDirectorPort +Var ConfigDirectorMaxJobs +Var ConfigDirectorPassword +Var ConfigDirectorAddress +Var ConfigDirectorMailServer +Var ConfigDirectorMailAddress +Var ConfigDirectorDB +Var ConfigDirectorInstallService +Var ConfigDirectorStartService + +Var ConfigMonitorName +Var ConfigMonitorPassword + +Var LocalDirectorPassword +Var LocalHostAddress + +Var MySQLPath +Var MySQLVersion +Var PostgreSQLPath +Var PostgreSQLVersion + +Var AutomaticInstall +Var InstallType +!define NewInstall 0 +!define UpgradeInstall 1 +!define MigrateInstall 2 + +Var OldInstallDir +Var PreviousComponents +Var NewComponents + +; Bit 0 = File Service +; 1 = Storage Service +; 2 = Director Service +; 3 = Command Console +; 4 = Bat Console +; 5 = wxWidgets Console +; 7 = Documentation (PDF) +; 7 = Documentation (HTML) + +!define ComponentFile 1 +!define ComponentStorage 2 +!define ComponentDirector 4 +!define ComponentTextConsole 8 +!define ComponentBatConsole 16 +!define ComponentGUIConsole 32 +!define ComponentPDFDocs 64 +!define ComponentHTMLDocs 128 + +!define ComponentsRequiringUserConfig 63 +!define ComponentsFileAndStorage 3 +!define ComponentsFileAndStorageAndDirector 7 +!define ComponentsDirectorAndTextGuiConsoles 60 +!define ComponentsTextAndGuiConsoles 56 + +Var HDLG +Var HCTL + +Function .onInit + Push $R0 + Push $R1 + + ; Process Command Line Options + StrCpy $OptService 1 + StrCpy $OptStart 1 + StrCpy $OptSilent 0 + StrCpy $CommonFilesDone 0 + StrCpy $OsIsNT 0 + StrCpy $AutomaticInstall 0 + StrCpy $InstallType ${NewInstall} + StrCpy $OldInstallDir "" + StrCpy $PreviousComponents 0 + StrCpy $NewComponents 0 + StrCpy $MySQLPath "" + StrCpy $MySQLVersion "" + StrCpy $PostgreSQLPath "" + StrCpy $PostgreSQLVersion "" + + ${GetParameters} $R0 + + ClearErrors + ${GetOptions} $R0 "/noservice" $R1 + IfErrors +2 + StrCpy $OptService 0 + + ClearErrors + ${GetOptions} $R0 "/nostart" $R1 + IfErrors +2 + StrCpy $OptStart 0 + + IfSilent 0 +2 + StrCpy $OptSilent 1 + + ReadRegStr $R0 HKLM "SOFTWARE\Microsoft\Windows NT\CurrentVersion" CurrentVersion + ${If} $R0 != "" + StrCpy $OsIsNT 1 + ${EndIf} + + Call GetComputerName + Pop $HostName + + Call GetHostName + Pop $LocalHostAddress + + Call GetUserName + Pop $ConfigDirectorMailAddress + + Call FindDatabaseApps + + ; Configuration Defaults + + StrCpy $ConfigClientName "$HostName-fd" + StrCpy $ConfigClientPort 9102 + StrCpy $ConfigClientMaxJobs 5 + ;StrCpy $ConfigClientPassword + StrCpy $ConfigClientInstallService "$OptService" + StrCpy $ConfigClientStartService "$OptStart" + + StrCpy $ConfigStorageName "$HostName-sd" + StrCpy $ConfigStoragePort 9103 + StrCpy $ConfigStorageMaxJobs 10 + ;StrCpy $ConfigStoragePassword + StrCpy $ConfigStorageInstallService "$OptService" + StrCpy $ConfigStorageStartService "$OptStart" + + ;StrCpy $ConfigDirectorName "$HostName-dir" + StrCpy $ConfigDirectorPort 9101 + StrCpy $ConfigDirectorMaxJobs 1 + ;StrCpy $ConfigDirectorPassword + StrCpy $ConfigDirectorDB 0 + StrCpy $ConfigDirectorInstallService "$OptService" + StrCpy $ConfigDirectorStartService "$OptStart" + + StrCpy $ConfigMonitorName "$HostName-mon" + ;StrCpy $ConfigMonitorPassword + + InitPluginsDir + File "/oname=$PLUGINSDIR\openssl.exe" "${SRC_DIR}\openssl.exe" + File "/oname=$PLUGINSDIR\libeay32.dll" "${SRC_DIR}\libeay32.dll" + File "/oname=$PLUGINSDIR\ssleay32.dll" "${SRC_DIR}\ssleay32.dll" + File "/oname=$PLUGINSDIR\sed.exe" "${SRC_DIR}\sed.exe" + + !InsertMacro MUI_INSTALLOPTIONS_EXTRACT "InstallType.ini" + !InsertMacro MUI_INSTALLOPTIONS_EXTRACT "WriteTemplates.ini" + + SetPluginUnload alwaysoff + + nsExec::Exec '"$PLUGINSDIR\openssl.exe" rand -base64 -out $PLUGINSDIR\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$PLUGINSDIR\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $ConfigClientPassword $R0 + FileClose $R1 + ${EndIf} + + nsExec::Exec '"$PLUGINSDIR\openssl.exe" rand -base64 -out $PLUGINSDIR\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$PLUGINSDIR\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $ConfigStoragePassword $R0 + FileClose $R1 + ${EndIf} + + nsExec::Exec '"$PLUGINSDIR\openssl.exe" rand -base64 -out $PLUGINSDIR\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$PLUGINSDIR\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $LocalDirectorPassword $R0 + FileClose $R1 + ${EndIf} + + SetPluginUnload manual + + nsExec::Exec '"$PLUGINSDIR\openssl.exe" rand -base64 -out $PLUGINSDIR\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$PLUGINSDIR\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $ConfigMonitorPassword $R0 + FileClose $R1 + ${EndIf} + + Pop $R1 + Pop $R0 +FunctionEnd + +Function .onSelChange + Call UpdateComponentUI +FunctionEnd + +Function InstallCommonFiles + ${If} $CommonFilesDone = 0 + SetOutPath "$INSTDIR" + File "Readme.txt" + + SetOutPath "$INSTDIR\bin" +!if "${BUILD_TOOLS}" == "VC8" + File "${SRC_DIR}\msvcm80.dll" + File "${SRC_DIR}\msvcp80.dll" + File "${SRC_DIR}\msvcr80.dll" + File "${SRC_DIR}\Microsoft.VC80.CRT.manifest" + File "${SRC_DIR}\pthreadVCE.dll" +!endif +!if "${BUILD_TOOLS}" == "VC8_DEBUG" + File "${SRC_DIR}\msvcm80.dll" + File "${SRC_DIR}\msvcp80.dll" + File "${SRC_DIR}\msvcr80.dll" + File "${SRC_DIR}\Microsoft.VC80.CRT.manifest" + File "${SRC_DIR}\msvcm80d.dll" + File "${SRC_DIR}\msvcp80d.dll" + File "${SRC_DIR}\msvcr80d.dll" + File "${SRC_DIR}\Microsoft.VC80.DebugCRT.manifest" + File "${SRC_DIR}\pthreadVCE.dll" +!endif +!if "${BUILD_TOOLS}" == "MinGW" + File "${SRC_DIR}\mingwm10.dll" + File "${SRC_DIR}\pthreadGCE.dll" +!endif + File "${SRC_DIR}\libeay32.dll" + File "${SRC_DIR}\ssleay32.dll" + File "${SRC_DIR}\zlib1.dll" +!if "${BUILD_TOOLS}" == "VC8" + File "${SRC_DIR}\zlib1.dll.manifest" +!endif +!If "${BUILD_TOOLS}" == "VC8_DEBUG" + File "${SRC_DIR}\zlib1.dll.manifest" +!endif + File "/oname=$INSTDIR\openssl.cnf" "${SRC_DIR}\openssl.cnf" + File "${SRC_DIR}\openssl.exe" + File "${SRC_DIR}\bsleep.exe" + File "${SRC_DIR}\bsmtp.exe" + File "${SRC_DIR}\bacula.dll" + File "${SRC_DIR}\expr64.exe" + File "${SRC_DIR}\snooze.exe" + + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\View Readme.lnk" "write.exe" '"$INSTDIR\Readme.txt"' + + StrCpy $CommonFilesDone 1 + ${EndIf} +FunctionEnd + +Section "-Initialize" + ${If} $MySQLPath != "" + DetailPrint "Found MySQL (version $MySQLVersion)" + ${EndIf} + ${If} $PostgreSQLPath != "" + DetailPrint "Found PostgreSQL (version $PostgreSQLVersion)" + ${EndIf} + + WriteRegStr HKLM Software\Bacula InstallLocation "$INSTDIR" + + Call GetSelectedComponents + Pop $R2 + WriteRegDWORD HKLM Software\Bacula Components $R2 + + WriteRegDWORD HKLM Software\Bacula Database $ConfigDirectorDB + + ; remove start menu items + SetShellVarContext all + + Delete /REBOOTOK "$SMPROGRAMS\Bacula\Configuration\*" + Delete /REBOOTOK "$SMPROGRAMS\Bacula\Documentation\*" + Delete /REBOOTOK "$SMPROGRAMS\Bacula\*" + RMDir "$SMPROGRAMS\Bacula\Configuration" + RMDir "$SMPROGRAMS\Bacula\Documentation" + RMDir "$SMPROGRAMS\Bacula" + CreateDirectory "$SMPROGRAMS\Bacula" + CreateDirectory "$SMPROGRAMS\Bacula\Configuration" + CreateDirectory "$SMPROGRAMS\Bacula\Documentation" + + CreateDirectory "$INSTDIR" + CreateDirectory "$INSTDIR\bin" + CreateDirectory "$APPDATA\Bacula" + CreateDirectory "$APPDATA\Bacula\Work" + CreateDirectory "$APPDATA\Bacula\Spool" + + SetOutPath "$INSTDIR" + File "..\..\..\LICENSE" + Delete /REBOOTOK "$INSTDIR\bin\License.txt" + + FileOpen $R1 $PLUGINSDIR\config.sed w + FileWrite $R1 "s;@VERSION@;${VERSION};g$\r$\n" + FileWrite $R1 "s;@DATE@;${__DATE__};g$\r$\n" + FileWrite $R1 "s;@DISTNAME@;Windows;g$\r$\n" + +!If "$BUILD_TOOLS" == "MinGW" + StrCpy $R2 "MinGW32" +!Else + StrCpy $R2 "MVS" +!EndIf + + Call GetHostName + Exch $R3 + Pop $R3 + + FileWrite $R1 "s;@DISTVER@;$R2;g$\r$\n" + + ${StrRep} $R2 "$APPDATA\Bacula\Work" "\" "\\\\" + FileWrite $R1 's;@working_dir@;$R2;g$\r$\n' + ${StrRep} $R2 "$APPDATA\Bacula\Work" "\" "\\" + FileWrite $R1 's;@working_dir_cmd@;$R2;g$\r$\n' + + ${StrRep} $R2 "$INSTDIR\bin" "\" "\\\\" + FileWrite $R1 's;@bin_dir@;$R2;g$\r$\n' + ${StrRep} $R2 "$INSTDIR\bin" "\" "\\" + FileWrite $R1 's;@bin_dir_cmd@;$R2;g$\r$\n' + + ${StrRep} $R2 "$INSTDIR\bin\fdplugins" "\" "\\\\" + FileWrite $R1 's;@fdplugins_dir@;$R2;g$\r$\n' + + ${StrRep} $R2 "$INSTDIR" "\" "/" + FileWrite $R1 "s;@BUILD_DIR@;$R2;g$\r$\n" + + Call IsDirectorSelected + Pop $R2 + ${If} $R2 = 1 + FileWrite $R1 "s;@director_address@;$LocalHostAddress;g$\r$\n" + ${Else} + ${If} "$ConfigDirectorAddress" != "" + FileWrite $R1 "s;@director_address@;$ConfigDirectorAddress;g$\r$\n" + ${EndIf} + ${EndIf} + + FileWrite $R1 "s;@client_address@;$LocalHostAddress;g$\r$\n" + FileWrite $R1 "s;@storage_address@;$LocalHostAddress;g$\r$\n" + + ${If} "$ConfigClientName" != "" + FileWrite $R1 "s;@client_name@;$ConfigClientName;g$\r$\n" + ${EndIf} + ${If} "$ConfigClientPort" != "" + FileWrite $R1 "s;@client_port@;$ConfigClientPort;g$\r$\n" + ${EndIf} + ${If} "$ConfigClientMaxJobs" != "" + FileWrite $R1 "s;@client_maxjobs@;$ConfigClientMaxJobs;g$\r$\n" + ${EndIf} + ${If} "$ConfigClientPassword" != "" + FileWrite $R1 "s;@client_password@;$ConfigClientPassword;g$\r$\n" + ${EndIf} + ${If} "$ConfigStorageName" != "" + FileWrite $R1 "s;@storage_name@;$ConfigStorageName;g$\r$\n" + ${EndIf} + ${If} "$ConfigStoragePort" != "" + FileWrite $R1 "s;@storage_port@;$ConfigStoragePort;g$\r$\n" + ${EndIf} + ${If} "$ConfigStorageMaxJobs" != "" + FileWrite $R1 "s;@storage_maxjobs@;$ConfigStorageMaxJobs;g$\r$\n" + ${EndIf} + ${If} "$ConfigStoragePassword" != "" + FileWrite $R1 "s;@storage_password@;$ConfigStoragePassword;g$\r$\n" + ${EndIf} + ${If} "$ConfigDirectorName" != "" + FileWrite $R1 "s;@director_name@;$ConfigDirectorName;g$\r$\n" + ${EndIf} + ${If} "$ConfigDirectorPort" != "" + FileWrite $R1 "s;@director_port@;$ConfigDirectorPort;g$\r$\n" + ${EndIf} + ${If} "$ConfigDirectorMaxJobs" != "" + FileWrite $R1 "s;@director_maxjobs@;$ConfigDirectorMaxJobs;g$\r$\n" + ${EndIf} + ${If} "$ConfigDirectorPassword" != "" + FileWrite $R1 "s;@director_password@;$ConfigDirectorPassword;g$\r$\n" + ${EndIf} + ${If} "$ConfigDirectorMailServer" != "" + FileWrite $R1 "s;@smtp_host@;$ConfigDirectorMailServer;g$\r$\n" + ${EndIf} + ${If} "$ConfigDirectorMailAddress" != "" + FileWrite $R1 "s;@job_email@;$ConfigDirectorMailAddress;g$\r$\n" + ${EndIf} + ${If} "$ConfigMonitorName" != "" + FileWrite $R1 "s;@monitor_name@;$ConfigMonitorName;g$\r$\n" + ${EndIf} + ${If} "$ConfigMonitorPassword" != "" + FileWrite $R1 "s;@monitor_password@;$ConfigMonitorPassword;g$\r$\n" + ${EndIf} + + ${If} $ConfigDirectorDB = 1 + ${If} $MySQLPath != "" + ${StrRep} $R2 "$MySQLPath\bin" "\" "\\" + FileWrite $R1 "s;@SQL_BINDIR@;$R2;g$\r$\n" + ${EndIf} + ${ElseIf} $ConfigDirectorDB = 2 + ${If} $PostgreSQLPath != "" + ${StrRep} $R2 "$PostgreSQLPath\bin" "\" "\\" + FileWrite $R1 "s;@SQL_BINDIR@;$R2;g$\r$\n" + ${EndIf} + ${EndIf} + + FileClose $R1 + + ${If} $InstallType = ${MigrateInstall} + FileOpen $R1 $PLUGINSDIR\migrate.sed w + ${StrRep} $R2 "$APPDATA\Bacula\Work" "\" "\\\\" + FileWrite $R1 's;\(Working *Directory *= *\)[^ ][^ ]*.*$$;\1"$R2";$\r$\n' + FileWrite $R1 's;\(Pid *Directory *= *\)[^ ][^ ]*.*$$;\1"$R2";$\r$\n' + FileClose $R1 + ${EndIf} + + ${If} ${FileExists} "$OldInstallDir\bin\bacula-fd.exe" + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-fd.exe" /kill' ; Shutdown any bacula that could be running + Sleep 3000 + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-fd.exe" /remove' ; Remove existing service + ${EndIf} + + ${If} ${FileExists} "$OldInstallDir\bin\bacula-sd.exe" + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-sd.exe" /kill' ; Shutdown any bacula that could be running + Sleep 3000 + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-sd.exe" /remove' ; Remove existing service + ${EndIf} + + ${If} ${FileExists} "$OldInstallDir\bin\bacula-dir.exe" + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-dir.exe" /kill' ; Shutdown any bacula that could be running + Sleep 3000 + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-dir.exe" /remove' ; Remove existing service + ${EndIf} + +SectionEnd + +SectionGroup "Client" SecGroupClient + +Section "File Service" SecFileDaemon + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\bin\fdplugins" + File "${SRC_DIR}\exchange-fd.dll" + + SetOutPath "$INSTDIR\bin" + + File "${SRC_DIR}\bacula-fd.exe" + + ${If} $InstallType = ${MigrateInstall} + ${AndIf} ${FileExists} "$OldInstallDir\bin\bacula-fd.conf" + CopyFiles "$OldInstallDir\bin\bacula-fd.conf" "$APPDATA\Bacula" + nsExec::ExecToLog '$PLUGINSDIR\sed.exe -f "$PLUGINSDIR\migrate.sed" -i.bak "$APPDATA\Bacula\bacula-fd.conf"' + ${Else} + File "/oname=$PLUGINSDIR\bacula-fd.conf" "bacula-fd.conf.in" + + StrCpy $0 "$APPDATA\Bacula" + StrCpy $1 bacula-fd.conf + Call ConfigEditAndCopy + ${EndIf} + + StrCpy $0 bacula-fd + StrCpy $1 "File Service" + StrCpy $2 $ConfigClientInstallService + StrCpy $3 $ConfigClientStartService + + Call InstallDaemon + + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Client Configuration.lnk" "write.exe" '"$APPDATA\Bacula\bacula-fd.conf"' +SectionEnd + +SectionGroupEnd + +SectionGroup "Server" SecGroupServer + +Section "Storage Service" SecStorageDaemon + SectionIn 2 3 + + SetOutPath "$INSTDIR\bin" + + File "${SRC_DIR}\loaderinfo.exe" + File "${SRC_DIR}\mt.exe" + File "${SRC_DIR}\mtx.exe" + File "${SRC_DIR}\scsitape.exe" + File "${SRC_DIR}\tapeinfo.exe" + File "${SRC_DIR}\bacula-sd.exe" + File "${SRC_DIR}\bcopy.exe" + File "${SRC_DIR}\bextract.exe" + File "${SRC_DIR}\bls.exe" + File "${SRC_DIR}\bscan.exe" + File "${SRC_DIR}\btape.exe" + File "${SRC_DIR}\scsilist.exe" + File "${SRC_DIR}\mkisofs.exe" + File "${SRC_DIR}\growisofs.exe" + File "${SRC_DIR}\dvd-ram-control.exe" + File "${SRC_DIR}\dvd+rw-booktype.exe" + File "${SRC_DIR}\dvd+rw-format.exe" + File "${SRC_DIR}\dvd+rw-mediainfo.exe" + + File "/oname=$PLUGINSDIR\mtx-changer.cmd" "${SRC_DIR}\mtx-changer.cmd" + + StrCpy $0 "$INSTDIR\bin" + StrCpy $1 mtx-changer.cmd + Call ConfigEditAndCopy + + File "/oname=$PLUGINSDIR\disk-changer.cmd" "${SRC_DIR}\disk-changer.cmd" + + StrCpy $0 "$INSTDIR\bin" + StrCpy $1 disk-changer.cmd + Call ConfigEditAndCopy + + File "/oname=$PLUGINSDIR\dvd-handler.cmd" "${SRC_DIR}\dvd-handler.cmd" + + StrCpy $0 "$INSTDIR\bin" + StrCpy $1 dvd-handler.cmd + Call ConfigEditAndCopy + + File "/oname=$PLUGINSDIR\bacula-sd.conf" "bacula-sd.conf.in" + + StrCpy $0 "$APPDATA\Bacula" + StrCpy $1 bacula-sd.conf + Call ConfigEditAndCopy + + StrCpy $0 bacula-sd + StrCpy $1 "Storage Service" + StrCpy $2 $ConfigStorageInstallService + StrCpy $3 $ConfigStorageStartService + Call InstallDaemon + + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\List Devices.lnk" "$INSTDIR\bin\scsilist.exe" "/pause" + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Storage Configuration.lnk" "write.exe" '"$APPDATA\Bacula\bacula-sd.conf"' +SectionEnd + +Section "Director Service" SecDirectorDaemon + SectionIn 2 3 + + SetOutPath "$INSTDIR\bin" + + ${If} $ConfigDirectorDB = 1 + File /oname=bacula_cats.dll "${SRC_DIR}\cats_mysql.dll" + File "${SRC_DIR}\libmysql.dll" + File /oname=$PLUGINSDIR\create_database.cmd ${SRC_DIR}\create_mysql_database.cmd + File /oname=$PLUGINSDIR\drop_database.cmd ${SRC_DIR}\drop_mysql_database.cmd + File /oname=$PLUGINSDIR\make_tables.cmd ${SRC_DIR}\make_mysql_tables.cmd + File ${SRC_DIR}\make_mysql_tables.sql + File /oname=$PLUGINSDIR\drop_tables.cmd ${SRC_DIR}\drop_mysql_tables.cmd + File ${SRC_DIR}\drop_mysql_tables.sql + File /oname=$PLUGINSDIR\grant_privileges.cmd ${SRC_DIR}\grant_mysql_privileges.cmd + File ${SRC_DIR}\grant_mysql_privileges.sql + File /oname=$PLUGINSDIR\make_catalog_backup.cmd ${SRC_DIR}\make_mysql_catalog_backup.cmd + ${ElseIf} $ConfigDirectorDB = 2 + File /oname=bacula_cats.dll "${SRC_DIR}\cats_postgresql.dll" + File "${SRC_DIR}\libpq.dll" +!if "${BUILD_TOOLS}" == "VC8" + File "${SRC_DIR}\comerr32.dll" + File "${SRC_DIR}\libintl-2.dll" + File "${SRC_DIR}\libiconv-2.dll" + File "${SRC_DIR}\krb5_32.dll" +!endif +!If "${BUILD_TOOLS}" == "VC8_DEBUG" + File "${SRC_DIR}\comerr32.dll" + File "${SRC_DIR}\libintl-2.dll" + File "${SRC_DIR}\libiconv-2.dll" + File "${SRC_DIR}\krb5_32.dll" +!endif + File /oname=$PLUGINSDIR\create_database.cmd ${SRC_DIR}\create_postgresql_database.cmd + File /oname=$PLUGINSDIR\drop_database.cmd ${SRC_DIR}\drop_postgresql_database.cmd + File /oname=$PLUGINSDIR\make_tables.cmd ${SRC_DIR}\make_postgresql_tables.cmd + File ${SRC_DIR}\create_postgresql_database.sql + File ${SRC_DIR}\make_postgresql_tables.sql + File /oname=$PLUGINSDIR\drop_tables.cmd ${SRC_DIR}\drop_postgresql_tables.cmd + File ${SRC_DIR}\drop_postgresql_tables.sql + File /oname=$PLUGINSDIR\grant_privileges.cmd ${SRC_DIR}\grant_postgresql_privileges.cmd + File ${SRC_DIR}\grant_postgresql_privileges.sql + File /oname=$PLUGINSDIR\make_catalog_backup.cmd ${SRC_DIR}\make_postgresql_catalog_backup.cmd + ${ElseIf} $ConfigDirectorDB = 3 + File "${SRC_DIR}\sqlite3.exe" +!if "${BUILD_TOOLS}" == "VC8" + File "${SRC_DIR}\sqlite3.exe.manifest" +!endif +!If "${BUILD_TOOLS}" == "VC8_DEBUG" + File "${SRC_DIR}\sqlite3.exe.manifest" +!endif + File /oname=bacula_cats.dll "${SRC_DIR}\cats_sqlite3.dll" + File /oname=$PLUGINSDIR\create_database.cmd ${SRC_DIR}\create_sqlite3_database.cmd + File /oname=$PLUGINSDIR\drop_database.cmd ${SRC_DIR}\drop_sqlite3_database.cmd + File /oname=$PLUGINSDIR\make_tables.cmd ${SRC_DIR}\make_sqlite3_tables.cmd + File ${SRC_DIR}\make_sqlite3_tables.sql + File /oname=$PLUGINSDIR\drop_tables.cmd ${SRC_DIR}\drop_sqlite3_tables.cmd + File /oname=$PLUGINSDIR\grant_privileges.cmd ${SRC_DIR}\grant_sqlite3_privileges.cmd + File /oname=$PLUGINSDIR\make_catalog_backup.cmd ${SRC_DIR}\make_sqlite3_catalog_backup.cmd + ${EndIf} + + File "${SRC_DIR}\bacula-dir.exe" + File "${SRC_DIR}\dbcheck.exe" + + File "/oname=$PLUGINSDIR\delete_catalog_backup.cmd" "${SRC_DIR}\delete_catalog_backup.cmd" + + StrCpy $0 "$INSTDIR\bin" + + StrCpy $1 create_database.cmd + Call ConfigEditAndCopy + + StrCpy $1 drop_database.cmd + Call ConfigEditAndCopy + + StrCpy $1 make_tables.cmd + Call ConfigEditAndCopy + + StrCpy $1 drop_tables.cmd + Call ConfigEditAndCopy + + StrCpy $1 grant_privileges.cmd + Call ConfigEditAndCopy + + StrCpy $1 make_catalog_backup.cmd + Call ConfigEditAndCopy + + StrCpy $1 delete_catalog_backup.cmd + Call ConfigEditAndCopy + + File "${SRC_DIR}\query.sql" + + File "/oname=$PLUGINSDIR\bacula-dir.conf" "bacula-dir.conf.in" + + StrCpy $0 "$APPDATA\Bacula" + StrCpy $1 bacula-dir.conf + Call ConfigEditAndCopy + + StrCpy $0 bacula-dir + StrCpy $1 "Director Service" + StrCpy $2 $ConfigDirectorInstallService + StrCpy $3 $ConfigDirectorStartService + Call InstallDaemon + + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Director Configuration.lnk" "write.exe" '"$APPDATA\Bacula\bacula-dir.conf"' +SectionEnd + +SectionGroupEnd + +SectionGroup "Consoles" SecGroupConsoles + +Section "Command Console" SecConsole + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\bin" + + File "${SRC_DIR}\bconsole.exe" + Call InstallCommonFiles + + ${If} $InstallType = ${MigrateInstall} + ${AndIf} ${FileExists} "$OldInstallDir\bin\bconsole.conf" + CopyFiles "$OldInstallDir\bin\bconsole.conf" "$APPDATA\Bacula" + ${Else} + File "/oname=$PLUGINSDIR\bconsole.conf" "bconsole.conf.in" + StrCpy $0 "$APPDATA\Bacula" + StrCpy $1 bconsole.conf + Call ConfigEditAndCopy + ${EndIf} + + CreateShortCut "$SMPROGRAMS\Bacula\bconsole.lnk" "$INSTDIR\bin\bconsole.exe" '-c "$APPDATA\Bacula\bconsole.conf"' "$INSTDIR\bin\bconsole.exe" 0 + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Command Console Configuration.lnk" "write.exe" '"$APPDATA\Bacula\bconsole.conf"' + +SectionEnd + +Section "Bat Console" SecBatConsole + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\bin" + + Call InstallCommonFiles + File "${SRC_DIR}\QtCore4.dll" + File "${SRC_DIR}\QtGui4.dll" + + File "${SRC_DIR}\bat.exe" + + ${If} $InstallType = ${MigrateInstall} + ${AndIf} ${FileExists} "$OldInstallDir\bin\bat.conf" + CopyFiles "$OldInstallDir\bin\bat.conf" "$APPDATA\Bacula" + ${Else} + File "/oname=$PLUGINSDIR\bat.conf" "bat.conf.in" + StrCpy $0 "$APPDATA\Bacula" + StrCpy $1 bat.conf + Call ConfigEditAndCopy + ${EndIf} + + ; Create Start Menu entry + CreateShortCut "$SMPROGRAMS\Bacula\Bat.lnk" "$INSTDIR\bin\bat.exe" '-c "$APPDATA\Bacula\bat.conf"' "$INSTDIR\bin\bat.exe" 0 + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Bat Configuration.lnk" "write.exe" '"$APPDATA\Bacula\bat.conf"' +SectionEnd + + +Section "Graphical Console" SecWxConsole + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\bin" + + Call InstallCommonFiles +!if "${BUILD_TOOLS}" == "VC8" + File "${SRC_DIR}\wxbase28_vc_bacula.dll" + File "${SRC_DIR}\wxmsw28_core_vc_bacula.dll" +!endif +!If "${BUILD_TOOLS}" == "VC8_DEBUG" + File "${SRC_DIR}\wxbase28_vc_bacula.dll" + File "${SRC_DIR}\wxmsw28_core_vc_bacula.dll" +!endif +!if "${BUILD_TOOLS}" == "MinGW" + File "${SRC_DIR}\wxbase28_gcc_bacula.dll" + File "${SRC_DIR}\wxmsw28_core_gcc_bacula.dll" +!endif + + File "${SRC_DIR}\bwx-console.exe" + + ${If} $InstallType = ${MigrateInstall} + ${AndIf} ${FileExists} "$OldInstallDir\bin\bwx-console.conf" + CopyFiles "$OldInstallDir\bin\bwx-console.conf" "$APPDATA\Bacula" + ${Else} + File "/oname=$PLUGINSDIR\bwx-console.conf" "bwx-console.conf.in" + StrCpy $0 "$APPDATA\Bacula" + StrCpy $1 bwx-console.conf + Call ConfigEditAndCopy + ${EndIf} + + ; Create Start Menu entry + CreateShortCut "$SMPROGRAMS\Bacula\bwx-console.lnk" "$INSTDIR\bin\bwx-console.exe" '-c "$APPDATA\Bacula\bwx-console.conf"' "$INSTDIR\bin\bwx-console.exe" 0 + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Graphical Console Configuration.lnk" "write.exe" '"$APPDATA\Bacula\bwx-console.conf"' +SectionEnd + +SectionGroupEnd + +SectionGroup "Documentation" SecGroupDocumentation + +Section "Documentation (Acrobat Format)" SecDocPdf + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\doc" + CreateDirectory "$INSTDIR\doc" + + File "${SRC_DIR}\docs\manuals\en\console\console.pdf" + File "${SRC_DIR}\docs\manuals\en\misc\misc.pdf" + File "${SRC_DIR}\docs\manuals\en\main\main.pdf" + File "${SRC_DIR}\docs\manuals\en\utility\utility.pdf" + File "${SRC_DIR}\docs\manuals\en\problems\problems.pdf" + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Console.lnk" '"$INSTDIR\doc\console.pdf"' + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Main.lnk" '"$INSTDIR\doc\main.pdf"' + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Misc.lnk" '"$INSTDIR\doc\misc.pdf"' + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Utility.lnk" '"$INSTDIR\doc\utility.pdf"' + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Problems.lnk" '"$INSTDIR\doc\problems.pdf"' +SectionEnd + +Section "Documentation (HTML Format)" SecDocHtml + SectionIn 3 + + SetOutPath "$INSTDIR\doc" + CreateDirectory "$INSTDIR\doc" + +; File "${SRC_DIR}\manual\bacula\*.html" +; File "${SRC_DIR}\manual\bacula\*.png" +; File "${SRC_DIR}\manual\bacula\*.css" +; CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Manual (HTML).lnk" '"$INSTDIR\doc\index.html"' +SectionEnd + +SectionGroupEnd + +Section "-Finish" + Push $R0 + + ${If} $OsIsNT = 1 + nsExec::ExecToLog 'cmd.exe /C echo Y|cacls "$INSTDIR" /T /G SYSTEM:F Administrators:F' + nsExec::ExecToLog 'cmd.exe /C echo Y|cacls "$APPDATA\Bacula" /T /G SYSTEM:F Administrators:F' + ${EndIf} + + ; Write the uninstall keys for Windows & create Start Menu entry + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "DisplayName" "Bacula" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "InstallLocation" "$INSTDIR" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "DisplayVersion" "${VERSION}" + ${StrTok} $R0 "${VERSION}" "." 0 0 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "VersionMajor" $R0 + ${StrTok} $R0 "${VERSION}" "." 1 0 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "VersionMinor" $R0 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "NoModify" 1 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "NoRepair" 1 + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "URLUpdateInfo" "http://sourceforge.net/project/showfiles.php?group_id=50727" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "URLInfoAbout" "http://www.bacula.org" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "HelpLink" "http://www.bacula.org/?page=support" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "UninstallString" '"$INSTDIR\uninstall.exe"' + WriteUninstaller "$INSTDIR\Uninstall.exe" + CreateShortCut "$SMPROGRAMS\Bacula\Uninstall Bacula.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0 + Pop $R0 +SectionEnd + +; Extra Page descriptions + +LangString DESC_SecFileDaemon ${LANG_ENGLISH} "Install Bacula File Daemon on this system." +LangString DESC_SecStorageDaemon ${LANG_ENGLISH} "Install Bacula Storage Daemon on this system." +LangString DESC_SecDirectorDaemon ${LANG_ENGLISH} "Install Bacula Director Daemon on this system." +LangString DESC_SecConsole ${LANG_ENGLISH} "Install command console program on this system." +LangString DESC_SecBatConsole ${LANG_ENGLISH} "Install Bat graphical console program on this system." +LangString DESC_SecWxConsole ${LANG_ENGLISH} "Install wxWidgets graphical console program on this system." +LangString DESC_SecDocPdf ${LANG_ENGLISH} "Install documentation in Acrobat format on this system." +LangString DESC_SecDocHtml ${LANG_ENGLISH} "Install documentation in HTML format on this system." + +LangString TITLE_ConfigPage1 ${LANG_ENGLISH} "Configuration" +LangString SUBTITLE_ConfigPage1 ${LANG_ENGLISH} "Set installation configuration." + +LangString TITLE_ConfigPage2 ${LANG_ENGLISH} "Configuration (continued)" +LangString SUBTITLE_ConfigPage2 ${LANG_ENGLISH} "Set installation configuration." + +LangString TITLE_InstallType ${LANG_ENGLISH} "Installation Type" +LangString SUBTITLE_InstallType ${LANG_ENGLISH} "Choose installation type." + +LangString TITLE_WriteTemplates ${LANG_ENGLISH} "Create Templates" +LangString SUBTITLE_WriteTemplates ${LANG_ENGLISH} "Create resource templates for inclusion in the Director's configuration file." + +!InsertMacro MUI_FUNCTION_DESCRIPTION_BEGIN + !InsertMacro MUI_DESCRIPTION_TEXT ${SecFileDaemon} $(DESC_SecFileDaemon) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecStorageDaemon} $(DESC_SecStorageDaemon) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecDirectorDaemon} $(DESC_SecDirectorDaemon) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecConsole} $(DESC_SecConsole) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecBatConsole} $(DESC_SecBatConsole) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecWxConsole} $(DESC_SecWxConsole) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecDocPdf} $(DESC_SecDocPdf) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecDocHtml} $(DESC_SecDocHtml) +!InsertMacro MUI_FUNCTION_DESCRIPTION_END + +; Uninstall section + +UninstallText "This will uninstall Bacula. Hit next to continue." + +Section "Uninstall" + ; Shutdown any baculum that could be running + nsExec::ExecToLog '"$INSTDIR\bin\bacula-fd.exe" /kill' + nsExec::ExecToLog '"$INSTDIR\bin\bacula-sd.exe" /kill' + nsExec::ExecToLog '"$INSTDIR\bin\bacula-dir.exe" /kill' + Sleep 3000 + + ReadRegDWORD $R0 HKLM "Software\Bacula" "Service_Bacula-fd" + ${If} $R0 = 1 + ; Remove bacula service + nsExec::ExecToLog '"$INSTDIR\bin\bacula-fd.exe" /remove' + nsExec::ExecToLog '"$INSTDIR\bin\exchange-fd.dll" /remove' + ${EndIf} + + ReadRegDWORD $R0 HKLM "Software\Bacula" "Service_Bacula-sd" + ${If} $R0 = 1 + ; Remove bacula service + nsExec::ExecToLog '"$INSTDIR\bin\bacula-sd.exe" /remove' + ${EndIf} + + ReadRegDWORD $R0 HKLM "Software\Bacula" "Service_Bacula-dir" + ${If} $R0 = 1 + ; Remove bacula service + nsExec::ExecToLog '"$INSTDIR\bin\bacula-dir.exe" /remove' + ${EndIf} + + ; remove registry keys + DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" + DeleteRegKey HKLM "Software\Bacula" + + ; remove start menu items + SetShellVarContext all + Delete /REBOOTOK "$SMPROGRAMS\Bacula\*" + RMDir "$SMPROGRAMS\Bacula" + + ; remove files and uninstaller (preserving config for now) + Delete /REBOOTOK "$INSTDIR\bin\*" + Delete /REBOOTOK "$INSTDIR\doc\*" + Delete /REBOOTOK "$INSTDIR\*" + + ; Check for existing installation + MessageBox MB_YESNO|MB_ICONQUESTION \ + "Would you like to delete the current configuration files and the working state file?" IDNO NoDel + Delete /REBOOTOK "$APPDATA\Bacula\*" + Delete /REBOOTOK "$APPDATA\Bacula\Work\*" + Delete /REBOOTOK "$APPDATA\Bacula\Spool\*" + Delete /REBOOTOK "$PLUGINSDIR\bacula-*.conf" + Delete /REBOOTOK "$PLUGINSDIR\*console.conf" + Delete /REBOOTOK "$PLUGINSDIR\*conf.in" + Delete /REBOOTOK "$PLUGINSDIR\openssl.exe" + Delete /REBOOTOK "$PLUGINSDIR\libeay32.dll" + Delete /REBOOTOK "$PLUGINSDIR\ssleay32.dll" + Delete /REBOOTOK "$PLUGINSDIR\sed.exe" + Delete /REBOOTOK "$PLUGINSDIR\pw.txt" + Delete /REBOOTOK "$PLUGINSDIR\*.sed" + Delete /REBOOTOK "$PLUGINSDIR\*.cmd" + Delete /REBOOTOK "$PLUGINSDIR\*.sql" + RMDir "$APPDATA\Bacula\Work" + RMDir "$APPDATA\Bacula\Spool" + RMDir "$APPDATA\Bacula" +NoDel: + + ; remove directories used + RMDir "$INSTDIR\bin" + RMDir "$INSTDIR\doc" + RMDir "$INSTDIR" +SectionEnd + +; +; $0 - Service Name (ie Bacula-FD) +; $1 - Service Description (ie Bacula File Daemon) +; $2 - Install as Service +; $3 - Start Service now +; +Function InstallDaemon + Call InstallCommonFiles + + WriteRegDWORD HKLM "Software\Bacula" "Service_$0" $2 + + ${If} $2 = 1 + nsExec::ExecToLog '"$INSTDIR\bin\$0.exe" /install -c "$APPDATA\Bacula\$0.conf"' + + ${If} $OsIsNT <> 1 + File "Start.bat" + File "Stop.bat" + ${EndIf} + + ; Start the service? + + ${If} $3 = 1 + ${If} $OsIsNT = 1 + nsExec::ExecToLog 'net start $0' + ${Else} + Exec '"$INSTDIR\bin\$0.exe" -c "$APPDATA\Bacula\$0.conf"' + ${EndIf} + ${EndIf} + ${Else} + CreateShortCut "$SMPROGRAMS\Bacula\Start $1.lnk" "$INSTDIR\bin\$0.exe" '-c "$APPDATA\Bacula\$0.conf"' "$INSTDIR\bin\$0.exe" 0 + ${EndIf} +FunctionEnd + +Function GetComputerName + Push $R0 + Push $R1 + Push $R2 + + System::Call "kernel32::GetComputerNameA(t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2" + + ${StrCase} $R0 $R0 "L" + + Pop $R2 + Pop $R1 + Exch $R0 +FunctionEnd + +!define ComputerNameDnsFullyQualified 3 + +Function GetHostName + Push $R0 + Push $R1 + Push $R2 + + ${If} $OsIsNT = 1 + System::Call "kernel32::GetComputerNameExA(i ${ComputerNameDnsFullyQualified}, t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2 ?e" + ${If} $R2 = 0 + Pop $R2 + DetailPrint "GetComputerNameExA failed - LastError = $R2" + Call GetComputerName + Pop $R0 + ${Else} + Pop $R2 + ${EndIf} + ${Else} + Call GetComputerName + Pop $R0 + ${EndIf} + + Pop $R2 + Pop $R1 + Exch $R0 +FunctionEnd + +!define NameUserPrincipal 8 + +Function GetUserName + Push $R0 + Push $R1 + Push $R2 + + ${If} $OsIsNT = 1 + System::Call "secur32::GetUserNameExA(i ${NameUserPrincipal}, t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2 ?e" + ${If} $R2 = 0 + Pop $R2 + DetailPrint "GetUserNameExA failed - LastError = $R2" + Pop $R0 + StrCpy $R0 "" + ${Else} + Pop $R2 + ${EndIf} + ${Else} + StrCpy $R0 "" + ${EndIf} + + ${If} $R0 == "" + System::Call "advapi32::GetUserNameA(t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2 ?e" + ${If} $R2 = 0 + Pop $R2 + DetailPrint "GetUserNameA failed - LastError = $R2" + StrCpy $R0 "" + ${Else} + Pop $R2 + ${EndIf} + ${EndIf} + + Pop $R2 + Pop $R1 + Exch $R0 +FunctionEnd + +Function ConfigEditAndCopy + Push $R1 + + ${If} ${FileExists} "$0\$1" + StrCpy $R1 ".new" + ${Else} + StrCpy $R1 "" + ${EndIf} + + nsExec::ExecToLog '$PLUGINSDIR\sed.exe -f "$PLUGINSDIR\config.sed" -i.bak "$PLUGINSDIR\$1"' + CopyFiles "$PLUGINSDIR\$1" "$0\$1$R1" + + Pop $R1 +FunctionEnd + +Function FindDatabaseApps + Push $R1 + + ReadRegStr $0 HKLM "Software\MySQL AB\MySQL Server 5.0" "Location" + + ${If} $0 != "" + Call RemoveTrailingSlash + StrCpy $MySQLPath $0 + ReadRegStr $0 HKLM "Software\MySQL AB\MySQL Server 5.0" "Version" + StrCpy $MySQLVersion $0 + ${EndIf} + + EnumRegKey $R1 HKLM "Software\PostgreSQL\Installations" 0 + ${If} $R1 != "" + ReadRegStr $0 HKLM "Software\PostgreSQL\Installations\$R1" "Base Directory" + Call RemoveTrailingSlash + StrCpy $PostgreSQLPath $0 + ReadRegStr $0 HKLM "Software\PostgreSQL\Installations\$R1" "Version" + StrCpy $PostgreSQLVersion $0 + ${EndIf} + + Pop $R1 +FunctionEnd + +Function RemoveTrailingSlash + Push $R1 + StrCpy $R1 $0 "" -1 + ${If} $R1 == "\" + StrCpy $0 $0 -1 + ${EndIf} + Pop $R1 +FunctionEnd + +Function IsDirectorSelected + Push $R0 + SectionGetFlags ${SecDirectorDaemon} $R0 + IntOp $R0 $R0 & ${SF_SELECTED} + Exch $R0 +FunctionEnd + +Function GetSelectedComponents + Push $R0 + StrCpy $R0 0 + ${If} ${SectionIsSelected} ${SecFileDaemon} + IntOp $R0 $R0 | ${ComponentFile} + ${EndIf} + ${If} ${SectionIsSelected} ${SecStorageDaemon} + IntOp $R0 $R0 | ${ComponentStorage} + ${EndIf} + ${If} ${SectionIsSelected} ${SecDirectorDaemon} + IntOp $R0 $R0 | ${ComponentDirector} + ${EndIf} + ${If} ${SectionIsSelected} ${SecConsole} + IntOp $R0 $R0 | ${ComponentTextConsole} + ${EndIf} + ${If} ${SectionIsSelected} ${SecBatConsole} + IntOp $R0 $R0 | ${ComponentBatConsole} + ${EndIf} + ${If} ${SectionIsSelected} ${SecWxConsole} + IntOp $R0 $R0 | ${ComponentGUIConsole} + ${EndIf} + ${If} ${SectionIsSelected} ${SecDocPdf} + IntOp $R0 $R0 | ${ComponentPDFDocs} + ${EndIf} + ${If} ${SectionIsSelected} ${SecDocHtml} + IntOp $R0 $R0 | ${ComponentHTMLDocs} + ${EndIf} + Exch $R0 +FunctionEnd + +Function PageComponentsShow + ${If} $OsIsNT <> 1 + Call DisableServerSections + ${EndIf} + + Call SelectPreviousComponents + Call UpdateComponentUI +FunctionEnd + +Function PageDirectoryPre + ${If} $AutomaticInstall = 1 + ${OrIf} $InstallType = ${UpgradeInstall} + Abort + ${EndIf} +FunctionEnd + +Function LeaveInstallPage + Push "$INSTDIR\install.log" + Call DumpLog +FunctionEnd + +Function EnterWriteTemplates + Push $R0 + Push $R1 + + Call GetSelectedComponents + Pop $R0 + + IntOp $R0 $R0 & ${ComponentDirector} + IntOp $R1 $NewComponents & ${ComponentsFileAndStorage} + + ${If} $R0 <> 0 + ${OrIf} $R1 = 0 + Pop $R1 + Pop $R0 + Abort + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 = 0 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 2" State 0 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 2" Flags DISABLED + DeleteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 3" State + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 3" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST|DISABLED + ${Else} + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 2" State 1 + DeleteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 2" Flags + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 3" State "C:\$ConfigClientName.conf" + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 = 0 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 4" State 0 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 4" Flags DISABLED + DeleteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" State + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST|DISABLED + ${Else} + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 4" State 1 + DeleteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 4" Flags + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" State "C:\$ConfigStorageName.conf" + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST + ${EndIf} + + !InsertMacro MUI_HEADER_TEXT "$(TITLE_WriteTemplates)" "$(SUBTITLE_WriteTemplates)" + !InsertMacro MUI_INSTALLOPTIONS_DISPLAY "WriteTemplates.ini" + + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 2" State + ${If} $R0 <> 0 + File "/oname=$PLUGINSDIR\client.conf.in" "client.conf.in" + + nsExec::ExecToLog '$PLUGINSDIR\sed.exe -f "$PLUGINSDIR\config.sed" -i.bak "$PLUGINSDIR\client.conf.in"' + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 3" State + ${If} $R0 != "" + CopyFiles "$PLUGINSDIR\client.conf.in" "$R0" + ${EndIf} + ${EndIf} + + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 4" State + ${If} $R0 <> 0 + File "/oname=$PLUGINSDIR\storage.conf.in" "storage.conf.in" + + nsExec::ExecToLog '$PLUGINSDIR\sed.exe -f "$PLUGINSDIR\config.sed" -i.bak "$PLUGINSDIR\storage.conf.in"' + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 5" State + ${If} $R0 != "" + CopyFiles "$PLUGINSDIR\storage.conf.in" "$R0" + ${EndIf} + ${EndIf} + + Pop $R1 + Pop $R0 +FunctionEnd + +Function SelectPreviousComponents + ${If} $InstallType <> ${NewInstall} + IntOp $R1 $PreviousComponents & ${ComponentFile} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecFileDaemon} + !InsertMacro SetSectionFlag ${SecFileDaemon} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecFileDaemon} + !InsertMacro ClearSectionFlag ${SecFileDaemon} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentStorage} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecStorageDaemon} + !InsertMacro SetSectionFlag ${SecStorageDaemon} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecStorageDaemon} + !InsertMacro ClearSectionFlag ${SecStorageDaemon} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentDirector} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecDirectorDaemon} + !InsertMacro SetSectionFlag ${SecDirectorDaemon} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecDirectorDaemon} + !InsertMacro ClearSectionFlag ${SecDirectorDaemon} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentTextConsole} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecConsole} + !InsertMacro SetSectionFlag ${SecConsole} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecConsole} + !InsertMacro ClearSectionFlag ${SecConsole} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentBatConsole} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecBatConsole} + !InsertMacro SetSectionFlag ${SecBatConsole} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecBatConsole} + !InsertMacro ClearSectionFlag ${SecBatConsole} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentGUIConsole} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecWxConsole} + !InsertMacro SetSectionFlag ${SecWxConsole} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecWxConsole} + !InsertMacro ClearSectionFlag ${SecWxConsole} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentPDFDocs} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecDocPdf} + !InsertMacro SetSectionFlag ${SecDocPdf} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecDocPdf} + !InsertMacro ClearSectionFlag ${SecDocPdf} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentHTMLDocs} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecDocHtml} + !InsertMacro SetSectionFlag ${SecDocHtml} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecDocHtml} + !InsertMacro ClearSectionFlag ${SecDocHtml} ${SF_RO} + ${EndIf} + ${EndIf} +FunctionEnd + +Function DisableServerSections + !InsertMacro UnselectSection ${SecGroupServer} + !InsertMacro SetSectionFlag ${SecGroupServer} ${SF_RO} + !InsertMacro UnselectSection ${SecStorageDaemon} + !InsertMacro SetSectionFlag ${SecStorageDaemon} ${SF_RO} + !InsertMacro UnselectSection ${SecDirectorDaemon} + !InsertMacro SetSectionFlag ${SecDirectorDaemon} ${SF_RO} +FunctionEnd + +Function UpdateComponentUI + Push $R0 + Push $R1 + + Call GetSelectedComponents + Pop $R0 + + IntOp $R1 $R0 ^ $PreviousComponents + IntOp $NewComponents $R0 & $R1 + + ${If} $InstallType <> ${NewInstall} + IntOp $R1 $NewComponents & ${ComponentFile} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecFileDaemon} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecFileDaemon} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentStorage} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecStorageDaemon} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecStorageDaemon} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentDirector} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecDirectorDaemon} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecDirectorDaemon} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentTextConsole} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecConsole} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecConsole} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentBatConsole} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecBatConsole} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecBatConsole} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentGUIConsole} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecWxConsole} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecWxConsole} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentPDFDocs} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecDocPdf} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecDocPdf} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentHTMLDocs} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecDocHtml} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecDocHtml} ${SF_BOLD} + ${EndIf} + ${EndIf} + + GetDlgItem $R0 $HWNDPARENT 1 + + IntOp $R1 $NewComponents & ${ComponentsRequiringUserConfig} + ${If} $R1 = 0 + SendMessage $R0 ${WM_SETTEXT} 0 "STR:Install" + ${Else} + SendMessage $R0 ${WM_SETTEXT} 0 "STR:&Next >" + ${EndIf} + + Pop $R1 + Pop $R0 +FunctionEnd + +!include "InstallType.nsh" +!include "ConfigPage1.nsh" +!include "ConfigPage2.nsh" +!include "DumpLog.nsh" diff --git a/src/win32/lib/Makefile b/src/win32/lib/Makefile new file mode 100644 index 00000000..87409705 --- /dev/null +++ b/src/win32/lib/Makefile @@ -0,0 +1,182 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Written by Robert Nelson, June 2006 +# + +include ../Makefile.inc + +INCLUDES = \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_BACULA) \ + $(INCLUDE_ZLIB) \ + $(INCLUDE_OPENSSL) + +DEFINES = \ + $(HAVES) + +vpath %.c $(BUILDDIR)/compat $(MAINDIR)/src/findlib $(MAINDIR)/src/lib +vpath %.cpp $(BUILDDIR)/compat $(MAINDIR)/src/findlib $(MAINDIR)/src/lib + +###################################################################### + +# Files files in src/lib + +COMPAT_OBJS = \ + $(OBJDIR)/compat.o \ + $(OBJDIR)/print.o \ + $(OBJDIR)/winapi.o + +# $(OBJDIR)/getopt.o \ + +FIND_OBJS = \ + $(OBJDIR)/attribs.o \ + $(OBJDIR)/bfile.o \ + $(OBJDIR)/win32filter.o \ + $(OBJDIR)/create_file.o \ + $(OBJDIR)/drivetype.o \ + $(OBJDIR)/enable_priv.o \ + $(OBJDIR)/find.o \ + $(OBJDIR)/find_one.o \ + $(OBJDIR)/fstype.o \ + $(OBJDIR)/mkpath.o \ + $(OBJDIR)/match.o + +LIB_OBJS = \ + $(OBJDIR)/address_conf.o \ + $(OBJDIR)/alist.o \ + $(OBJDIR)/attr.o \ + $(OBJDIR)/base64.o \ + $(OBJDIR)/berrno.o \ + $(OBJDIR)/bget_msg.o \ + $(OBJDIR)/bnet.o \ + $(OBJDIR)/bnet_server.o \ + $(OBJDIR)/bpipe.o \ + $(OBJDIR)/breg.o \ + $(OBJDIR)/bregex.o \ + $(OBJDIR)/bsock.o \ + $(OBJDIR)/bsockcore.o \ + $(OBJDIR)/bsnprintf.o \ + $(OBJDIR)/bsys.o \ + $(OBJDIR)/btime.o \ + $(OBJDIR)/btimers.o \ + $(OBJDIR)/cram-md5.o \ + $(OBJDIR)/crc32.o \ + $(OBJDIR)/crypto.o \ + $(OBJDIR)/daemon.o \ + $(OBJDIR)/dlist.o \ + $(OBJDIR)/edit.o \ + $(OBJDIR)/fnmatch.o \ + $(OBJDIR)/guid_to_name.o \ + $(OBJDIR)/hmac.o \ + $(OBJDIR)/htable.o \ + $(OBJDIR)/jcr.o \ + $(OBJDIR)/lex.o \ + $(OBJDIR)/lz4.o \ + $(OBJDIR)/md5.o \ + $(OBJDIR)/mem_pool.o \ + $(OBJDIR)/message.o \ + $(OBJDIR)/openssl.o \ + $(OBJDIR)/plugins.o \ + $(OBJDIR)/priv.o \ + $(OBJDIR)/queue.o \ + $(OBJDIR)/rblist.o \ + $(OBJDIR)/runscript.o \ + $(OBJDIR)/rwlock.o \ + $(OBJDIR)/scan.o \ + $(OBJDIR)/serial.o \ + $(OBJDIR)/sha1.o \ + $(OBJDIR)/signal.o \ + $(OBJDIR)/smartall.o \ + $(OBJDIR)/tls.o \ + $(OBJDIR)/tree.o \ + $(OBJDIR)/util.o \ + $(OBJDIR)/var.o \ + $(OBJDIR)/watchdog.o \ + $(OBJDIR)/workq.o \ + $(OBJDIR)/binflate.o \ + $(OBJDIR)/ini.o \ + $(OBJDIR)/output.o \ + $(OBJDIR)/sellist.o \ + $(OBJDIR)/lockmgr.o \ + $(OBJDIR)/worker.o \ + $(OBJDIR)/flist.o + +DLL_OBJS = \ + $(COMPAT_OBJS) $(FIND_OBJS) $(LIB_OBJS) + +STATIC_OBJS = \ + $(OBJDIR)/parse_conf.o \ + $(OBJDIR)/res.o + +ALL_OBJS = \ + $(DLL_OBJS) $(STATIC_OBJS) + +LIBS_DLL = \ + $(LIBS_SSL) \ + $(LIBS_CRYPTO) \ + $(LIBS_PTHREADS) \ + $(LIBS_ZLIB) \ + $(LIBS_NETWORK) \ + -lole32 \ + -loleaut32 \ + -lws2_32 \ + -luuid + +###################################################################### + +# Targets + +.PHONY: all clean distclean + +all: $(BINDIR)/bacula.dll $(LIBDIR)/libbacula.a + +distclean: clean + +clean: + @echo "Cleaning `pwd`" + $(call clean_obj,$(ALL_OBJS)) + $(call clean_exe,$(BINDIR)/bacula.dll) + $(ECHO_CMD)rm -f $(OBJDIR)/bacula.a $(LIBDIR)/libbacula.a + $(ECHO_CMD)rm -rf $(OBJDIRS) + +# +# Rules for generating from ../lib +# + +$(LIBDIR)/libbacula.a: DLL_DEFINE=USING_DLL + +$(LIBDIR)/libbacula.a: $(BINDIR)/bacula.dll $(STATIC_OBJS) + @echo "Updating archive $@" + $(call checkdir,$@) + $(ECHO_CMD)cp $(OBJDIR)/bacula.a $@ + $(ECHO_CMD)$(AR) rsv $@ $(filter %.o,$^) + +$(BINDIR)/bacula.dll: DLL_DEFINE=BUILDING_DLL + +$(BINDIR)/bacula.dll: $(DLL_OBJS) bacula$(WIN_VERSION).def + @echo "Linking $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(LDFLAGS) -mdll -mwindows -Wl,--out-implib,$(OBJDIR)/bacula.a $^ $(LIBS_DLL) -o $@ + +bacula$(WIN_VERSION).def: $(DLL_OBJS) + ./make_def$(WIN_VERSION) $(DLL_OBJS) >bacula$(WIN_VERSION).def.new && \ + mv bacula$(WIN_VERSION).def.new bacula$(WIN_VERSION).def + + +include ../Makefile.rules + +$(OBJDIR)/%.o: %.c + @echo "Compiling $<" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) -D$(DLL_DEFINE) $(CFLAGS) -c $< -o $@ + +$(OBJDIR)/%.o: %.cpp + @echo "Compiling $<" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) -D$(DLL_DEFINE) $(CFLAGS) -c $< -o $@ diff --git a/src/win32/lib/bacula32.def b/src/win32/lib/bacula32.def new file mode 100644 index 00000000..af2ce8c4 --- /dev/null +++ b/src/win32/lib/bacula32.def @@ -0,0 +1,1063 @@ +LIBRARY bacula.dll +EXPORTS + +; address_conf.o +_Z14free_addressesP5dlist +_Z15store_addressesP13s_lex_contextP8RES_ITEMii +_Z17get_first_addressP5dlistPci +_Z17sockaddr_get_portPK8sockaddr +_Z17sockaddr_to_asciiPK8sockaddriPci +_Z19build_addresses_strP5dlistPci +_Z20store_addresses_portP13s_lex_contextP8RES_ITEMii +_Z22init_default_addressesPP5dlisti +_Z23store_addresses_addressP13s_lex_contextP8RES_ITEMii +_Z24get_first_port_net_orderP5dlist +_Z25get_first_port_host_orderP5dlist +_Z26remove_duplicate_addressesP5dlist +_Z27sockaddr_get_port_net_orderPK8sockaddr +_ZN6IPADDR11get_addressEPci +_ZN6IPADDR12get_sockaddrEv +_ZN6IPADDR12set_addr_anyEv +_ZN6IPADDR12set_port_netEt +_ZN6IPADDR16get_sockaddr_lenEv +_ZN6IPADDR17build_address_strEPci +_ZN6IPADDR8set_typeENS_6i_typeE +_ZN6IPADDR9copy_addrEPS_ +_ZN6IPADDR9set_addr4EP7in_addr +_ZN6IPADDR9set_addr6EP8in6_addr +_ZN6IPADDRC1Ei +_ZN6IPADDRC1ERKS_ +_ZN6IPADDRC2Ei +_ZN6IPADDRC2ERKS_ +_ZNK6IPADDR10get_familyEv +_ZNK6IPADDR18get_port_net_orderEv +_ZNK6IPADDR8get_typeEv + +; alist.o +_ZN5alist4lastEv +_ZN5alist4nextEv +_ZN5alist4prevEv +_ZN5alist5firstEv +_ZN5alist7prependEPv +_ZN5ilist3putEiPv +_ZN8baselist11remove_itemEi +_ZN8baselist3getEi +_ZN8baselist6appendEPv +_ZN8baselist7destroyEv +_ZN8baselist9grow_listEv + +; attr.o +_Z15print_ls_outputP3JCRP4ATTRi +_Z24build_attr_output_fnamesP3JCRP4ATTR +_Z24unpack_attributes_recordP3JCRiPciP4ATTR +_Z7sprintfPcPKcz +_Z8new_attrP3JCR +_Z9free_attrP4ATTR + +; attribs.o +_Z11decode_statPcP4statiPi +_Z11encode_statPcP4statiii +_Z11set_own_modP4ATTRPcjjt +_Z13decode_LinkFIPcP4stati +_Z14set_attributesP3JCRP4ATTRP5BFILE +_Z16encode_attribsExP3JCRPcP6FF_PKT +_Z16set_mod_own_timeP3JCRP5BFILEP4ATTR +_Z18select_data_streamP6FF_PKT +_Z9win_errorP3JCRiPKcPc +_Z9win_errorP3JCRPKcm +_Z9win_errorP3JCRPKcPc + +; base64.o +_Z11base64_initv +_Z11from_base64PxPc +_Z13base64_to_binPciS_i +_Z13bin_to_base64PciS_ii +_Z9to_base64xPc + +; berrno.o +_ZN6berrno20format_win32_messageEv +_ZN6berrno9bstrerrorEv + +; bfile.o +_Z10set_fattrsP5BFILEP4stat +_Z11bget_handleP5BFILE +_Z11int32_LE2BEPii +_Z11int64_LE2BEPxx +_Z14have_win32_apiv +_Z14is_plugin_dataP5BFILE +_Z14set_cmd_pluginP5BFILEP3JCR +_Z15is_win32_streami +_Z15stream_to_asciii +_Z16set_win32_backupP5BFILE +_Z18is_portable_backupP5BFILE +_Z19set_portable_backupP5BFILE +_Z26processWin32BackupAPIBlockP5BFILEPvi +_Z27is_restore_stream_supportedi +_Z5binitP5BFILE +_Z5bopenP5BFILEPKcyt +_Z5breadP5BFILEPvj +_Z6bcloseP5BFILE +_Z6blseekP5BFILExi +_Z6bwriteP5BFILEPvj +_Z7sprintfPcPKcz +_Z8is_bopenP5BFILE +_Z9pause_msgPKcS0_iS0_ + +; bget_msg.o +_Z8bget_msgP5BSOCK +_ZN6GetMsg14wait_read_sockEi +_ZN6GetMsg15start_read_sockEv +_ZN6GetMsg19do_read_sock_threadEv +_ZN6GetMsg7is_doneEv +_ZN6GetMsg7is_stopEv +_ZN6GetMsg8bget_msgEPP8bmessage +_ZN6GetMsg8is_errorEv +_ZN6GetMsgC1EP3JCRP5BSOCKPKci +_ZN6GetMsgC2EP3JCRP5BSOCKPKci +_ZN6GetMsgD0Ev +_ZN6GetMsgD1Ev +_ZN6GetMsgD2Ev +_ZN8bmessage4swapEP5BSOCK +_ZN8bmessageC1Ei +_ZN8bmessageC2Ei +_ZN8bmessageD0Ev +_ZN8bmessageD1Ev +_ZN8bmessageD2Ev + +; binflate.o +_Z8ZdeflatePciS_Ri +_Z8ZinflatePciS_Ri + +; bnet.o +_Z11resolv_hostiPKcP5dlist +_Z15bnet_tls_clientP11TLS_ContextP5BSOCKP5alist +_Z15bnet_tls_serverP11TLS_ContextP5BSOCKP5alist +_Z16set_socket_errnoi +_Z17bnet_host2ipaddrsPKciPS0_ +_Z17bnet_sig_to_asciii + +; bnet_server.o +_Z18bnet_thread_serverP5dlistiP9workq_tagPFPvS3_E +_Z23bnet_stop_thread_server14ptw32_handle_t + +; bpipe.o +_Z11run_programPciRS_ +_Z23run_program_full_outputPciRS_PS_ + +; breg.o +_Z11new_bregexpPKc +_Z12free_bregexpP7BREGEXP +_Z12get_bregexpsPKc +_Z13free_bregexpsP5alist +_Z14apply_bregexpsPKcP4statP5alistPPc +_Z14apply_bregexpsPKcP5alistPPc +_Z19bregexp_build_wherePciS_S_S_ +_Z21bregexp_escape_stringPcPKcc +_Z28bregexp_get_build_where_sizePcS_S_ +_Z6printfPKcz +_ZN7BREGEXP10edit_substEPKcP4statP12b_regmatch_t +_ZN7BREGEXP12return_fnameEPKci +_ZN7BREGEXP14extract_regexpEPKc +_ZN7BREGEXP16compute_dest_lenEPKcP12b_regmatch_t +_ZN7BREGEXP5debugEv +_ZN7BREGEXP7replaceEPKcP4stat + +; bregex.o +b_re_compile_fastmap +b_re_compile_initialize +b_re_compile_pattern +b_regcomp +b_regerror +b_regexec +b_regfree +b_re_match +b_re_search +b_re_set_syntax +_Z24re_registers_to_regmatchP12re_registersP12b_regmatch_tj + +; bsnprintf.o +_Z10bvsnprintfPciPKcS_ +_Z9bsnprintfPciPKcz + +; bsock.o +_Z10init_bsockP3JCRiPKcS2_iP8sockaddr +_Z9dup_bsockP5BSOCK +_Z9new_bsockv +_ZN5BSOCK12write_nbytesEPci +_ZN5BSOCK13comm_compressEv +_ZN5BSOCK21authenticate_directorEPKcS1_P11TLS_ContextPci +_ZN5BSOCK4dumpEv +_ZN5BSOCK4initEv +_ZN5BSOCK4openEP3JCRPKcPcS4_ixPi +_ZN5BSOCK4recvEv +_ZN5BSOCK4sendEi +_ZN5BSOCK4sendEv +_ZN5BSOCK5closeEv +_ZN5BSOCK6signalEi +_ZN5BSOCK7despoolEPFviEi +_ZN5BSOCK8_destroyEv +_ZN5BSOCKC1Ei +_ZN5BSOCKC1Ev +_ZN5BSOCKC2Ei +_ZN5BSOCKC2Ev +_ZN5BSOCKD0Ev +_ZN5BSOCKD1Ev +_ZN5BSOCKD2Ev + +; bsockcore.o +_Z14dump_bsock_msgijPKcjijPci +_ZN13BSOCKCallbackC1Ev +_ZN13BSOCKCallbackC2Ev +_ZN13BSOCKCallbackD0Ev +_ZN13BSOCKCallbackD1Ev +_ZN13BSOCKCallbackD2Ev +_ZN9BSOCKCORE11read_nbytesEPci +_ZN9BSOCKCORE11set_lockingEv +_ZN9BSOCKCORE12set_blockingEv +_ZN9BSOCKCORE12set_killableEb +_ZN9BSOCKCORE12write_nbytesEPci +_ZN9BSOCKCORE13clear_lockingEv +_ZN9BSOCKCORE14wait_data_intrEii +_ZN9BSOCKCORE15control_bwlimitEi +_ZN9BSOCKCORE15set_buffer_sizeEji +_ZN9BSOCKCORE15set_nonblockingEv +_ZN9BSOCKCORE16restore_blockingEi +_ZN9BSOCKCORE18set_source_addressEP5dlist +_ZN9BSOCKCORE4dumpEv +_ZN9BSOCKCORE4initEv +_ZN9BSOCKCORE4openEP3JCRPKcPcS4_ixPi +_ZN9BSOCKCORE4sendEv +_ZN9BSOCKCORE5closeEv +_ZN9BSOCKCORE5fsendEPKcz +_ZN9BSOCKCORE5recvnEi +_ZN9BSOCKCORE6cancelEv +_ZN9BSOCKCORE7connectEP3JCRixxPKcPcS4_ii +_ZN9BSOCKCORE7destroyEv +_ZN9BSOCKCORE8_destroyEv +_ZN9BSOCKCORE8fin_initEP3JCRiPKcS3_iP8sockaddr +_ZN9BSOCKCORE8free_tlsEv +_ZN9BSOCKCORE8get_peerEPci +_ZN9BSOCKCORE9bstrerrorEv +_ZN9BSOCKCORE9wait_dataEii +_ZN9BSOCKCOREC1Ev +_ZN9BSOCKCOREC2Ev +_ZN9BSOCKCORED0Ev +_ZN9BSOCKCORED1Ev +_ZN9BSOCKCORED2Ev + +; bsys.o +_Z10b_strerroriPcj +_Z11bmicrosleepii +_Z11bstrcasecmpPKcS0_ +_Z11quote_wherePcPKc +_Z11stack_tracev +_Z12fd_wait_datai12fd_wait_modeii +_Z12quote_stringPcPKc +_Z12safer_unlinkPKcS0_ +_Z15create_pid_filePcPKci +_Z15delete_pid_filePcPKci +_Z15escape_filenamePKc +_Z15read_state_filePcPKci +_Z16create_lock_filePcPKcS1_PS_Pi +_Z16write_state_filePcPKci +_Z17fs_get_free_spacePKcPxS1_ +_Z20make_unique_filenamePPciS_ +_Z5bfreePv +_Z6bfgetsPciP6_iobuf +_Z6bfgetsRPcP6_iobuf +_Z6bfopenPKcS0_ +_Z7bacceptiP8sockaddrPi +_Z7bcallocjj +_Z7bstrcmpPKcS0_ +_Z7cstrlenPKc +_Z7ucfirstPcPKci +_Z8b_mallocPKcij +_Z8breallocPvj +_Z8bstrncatPcPKci +_Z8bstrncatPcR8POOL_MEMi +_Z8bstrncpyPcPKci +_Z8bstrncpyPcR8POOL_MEMi +_Z8copyfilePKcS0_ +_Z9setup_envPPc + +; btime.o +_Z10bstrftimesPcix +_Z11date_decodedPjPhS0_ +_Z11date_encodejhh +_Z11time_decodedPhS_S_Pf +_Z11time_encodehhhf +_Z12bstrftime_dnPcix +_Z12bstrftime_ncPcix +_Z12bstrftime_nyPcix +_Z12str_to_utimePc +_Z13btime_to_unixx +_Z14btime_to_utimex +_Z16date_time_decodeP9date_timePjPhS2_S2_S2_S2_Pf +_Z16date_time_encodeP9date_timejhhhhhf +_Z16get_current_timeP9date_time +_Z17date_time_compareP9date_timeS0_ +_Z17get_current_btimev +_Z6tm_womii +_Z6tm_woyl +_Z7tm_ldomii +_Z9bstrftimePcix +_Z9bstrutimePcix +_Z9tm_decodeP9date_timeP2tm +_Z9tm_encodeP9date_timeP2tm + +; btimers.o +_Z16stop_bsock_timerP8btimer_t +_Z16stop_child_timerP8btimer_t +_Z17start_bsock_timerP5BSOCKj +_Z17start_bsock_timerP9BSOCKCOREj +_Z17start_child_timerP3JCRij +_Z17stop_thread_timerP8btimer_t +_Z18_start_bsock_timerP5BSOCKj +_Z18start_thread_timerP3JCR14ptw32_handle_tj + +; compat.o +mkstemp +nanosleep +umask +_Z10binet_ptoniPKcPv +_Z10open_bpipePciPKcPS_ +_Z11close_bpipeP5BPIPE +_Z11close_wpipeP5BPIPE +_Z11LogErrorMsgPKc +_Z11malloc_trimi +_Z11strncasecmpPKcS0_i +_Z11win32_cgetsPci +_Z11win32_chdirPKc +_Z11win32_chmodPKct +_Z11win32_fputsPKcP6_iobuf +_Z11win32_mkdirPKc +_Z12gettimeofdayP7timevalP8timezone +_Z12init_signalsPFviE +_Z12UTF8_2_wcharPPcPKc +_Z12wchar_2_UTF8PcPKwi +_Z12wchar_2_UTF8PPcPKw +_Z12win32_getcwdPci +_Z12win32_unlinkPKc +_Z15get_memory_infoPci +_Z15init_stack_dumpv +_Z15win32_ftruncateix +_Z16wchar_win32_pathPKcPw +_Z17SetVSSPathConvertPFivEPFiPKcPciEPFiPKwPwiE +_Z18CreateChildProcessPKcPvS1_S1_ +_Z18cvt_ftime_to_utimeRK9_FILETIME +_Z18cvt_utime_to_ftimeRKlR9_FILETIME +_Z18GetApplicationNamePKcPPcPS0_ +_Z18unix_name_to_win32PPcPKc +_Z20allow_os_suspensionsv +_Z21Win32ConvCleanupCachev +_Z22prevent_os_suspensionsv +_Z28make_win32_path_UTF8_2_wcharPPcPKcPi +_Z4forkv +_Z4killii +_Z4pipePi +_Z4statPKcP4stat +_Z5chownPKcjj +_Z5dlsymPvPKc +_Z5fcntlii +_Z5fcntliil +_Z5fstatiP4stat +_Z5hstatPvP4stat +_Z5lstatPKcP4stat +_Z5sleepi +_Z6dlopenPKci +_Z6execvpPKcPPc +_Z6lchownPKcjj +_Z6printfPKcz +_Z6randomv +_Z6syslogiPKcz +_Z7dlclosePv +_Z7dlerrorv +_Z7geteuidv +_Z7opendirPKc +_Z7openlogPKcii +_Z7srandomj +_Z7symlinkPKcS0_ +_Z7waitpidiPii +_Z8breaddirPvRPc +_Z8closedirPv +_Z8closelogv +_Z8getArgv0PKc +_Z8getgrgidj +_Z8getpwuidj +_Z8pathconfPKci +_Z8readlinkPKcPci +_Z8snprintfPcjPKcz +_Z8WSA_Initv +_Z9ErrorExitPKc +_Z9inet_atonPKcP7in_addr +_ZN6winverC1Ev +_ZN6winverC2Ev + +; cram-md5.o +_Z16cram_md5_respondP5BSOCKPKcPiS3_ +_Z18cram_md5_challengeP5BSOCKPKcii +_ZN5BSOCK4sendEv + +; crc32.o +_Z6bcrc32Phi + +; create_file.o +_Z11create_fileP3JCRP4ATTRP5BFILEi + +; crypto.o +_Z13CryptoData_itv +_Z13SignerInfo_itv +_Z14CryptoData_newv +_Z14d2i_CryptoDataPP10CryptoDataPPKhl +_Z14d2i_SignerInfoPP10SignerInfoPPKhl +_Z14i2d_CryptoDataP10CryptoDataPPh +_Z14i2d_SignerInfoP10SignerInfoPPh +_Z14SignerInfo_newv +_Z15CryptoData_freeP10CryptoData +_Z15crypto_sign_newP3JCR +_Z15crypto_strerror14crypto_error_t +_Z15SignerInfo_freeP10SignerInfo +_Z16crypto_sign_freeP9Signature +_Z16RecipientInfo_itv +_Z16SignatureData_itv +_Z17crypto_cipher_newP14Crypto_SessionbPj +_Z17crypto_digest_newP3JCR15crypto_digest_t +_Z17d2i_RecipientInfoPP13RecipientInfoPPKhl +_Z17d2i_SignatureDataPP13SignatureDataPPKhl +_Z17i2d_RecipientInfoP13RecipientInfoPPh +_Z17i2d_SignatureDataP13SignatureDataPPh +_Z17RecipientInfo_newv +_Z17SignatureData_newv +_Z18crypto_cipher_freeP14Cipher_Context +_Z18crypto_digest_freeP6Digest +_Z18crypto_digest_nameP6Digest +_Z18crypto_keypair_dupP12X509_Keypair +_Z18crypto_keypair_newv +_Z18crypto_session_new15crypto_cipher_tP5alist +_Z18crypto_sign_decodeP3JCRPKhj +_Z18crypto_sign_encodeP9SignaturePhPj +_Z18crypto_sign_verifyP9SignatureP12X509_KeypairP6Digest +_Z18RecipientInfo_freeP13RecipientInfo +_Z18SignatureData_freeP13SignatureData +_Z19crypto_keypair_freeP12X509_Keypair +_Z19crypto_session_freeP14Crypto_Session +_Z20crypto_cipher_updateP14Cipher_ContextPKhjS2_Pj +_Z20crypto_digest_updateP6DigestPKhj +_Z21crypto_session_decodePKhjP5alistPP14Crypto_Session +_Z21crypto_session_encodeP14Crypto_SessionPhPj +_Z22crypto_cipher_finalizeP14Cipher_ContextPhPj +_Z22crypto_digest_finalizeP6DigestPhPj +_Z22crypto_keypair_has_keyPKc +_Z22crypto_sign_add_signerP9SignatureP6DigestP12X509_Keypair +_Z22crypto_sign_get_digestP9SignatureP12X509_KeypairR15crypto_digest_tPP6Digest +_Z23crypto_keypair_load_keyP12X509_KeypairPKcPFiPciPKvES5_ +_Z24crypto_keypair_load_certP12X509_KeypairPKc +_Z25crypto_digest_stream_typei +_Z27crypto_default_pem_callbackPciPKv + +; daemon.o +_Z12daemon_startv + +; dlist.o +_Z15new_dlistStringPKc +_Z15new_dlistStringPKci +_ZN5dlist12insert_afterEPvS0_ +_ZN5dlist13binary_insertEPvPFiS0_S0_E +_ZN5dlist13binary_searchEPvPFiS0_S0_E +_ZN5dlist13insert_beforeEPvS0_ +_ZN5dlist22binary_insert_multipleEPvPFiS0_S0_E +_ZN5dlist4nextEPv +_ZN5dlist4prevEPv +_ZN5dlist6appendEPv +_ZN5dlist6removeEPv +_ZN5dlist7destroyEv +_ZN5dlist7prependEPv + +; drivetype.o +_Z9drivetypePKcPci + +; edit.o +_Z10add_commasPcS_ +_Z10edit_int64xPc +_Z10edit_utimexPci +_Z11edit_uint64yPc +_Z11is_a_numberPKc +_Z12str_to_int64Pc +_Z13is_an_integerPKc +_Z13is_name_validPKcPPc +_Z13str_to_uint64Pc +_Z14size_to_uint64PciPy +_Z15speed_to_uint64PciPy +_Z16is_a_number_listPKc +_Z17duration_to_utimePcPx +_Z22edit_int64_with_commasxPc +_Z23edit_uint64_with_commasyPc +_Z23edit_uint64_with_suffixyPc + +; enable_priv.o +_Z24enable_backup_privilegesP3JCRi + +; find.o +_Z10find_filesP3JCRP6FF_PKTPFiS0_S2_bES4_ +_Z11accept_fileP6FF_PKT +_Z13is_in_filesetP6FF_PKT +_Z15init_find_filesv +_Z15term_find_filesP6FF_PKT +_Z16set_find_optionsP6FF_PKTil +_Z25set_find_changed_functionP6FF_PKTPFbP3JCRS0_E +_Z26set_find_snapshot_functionP6FF_PKTPFbP3JCRS0_P5dlistP11dlistStringE + +; find_one.o +_Z13check_changesP3JCRP6FF_PKT +_Z13find_one_fileP3JCRP6FF_PKTPFiS0_S2_bEPcjb +_Z13term_find_oneP6FF_PKT +_Z16has_file_changedP3JCRP6FF_PKT +_Z22ff_pkt_set_link_digestP6FF_PKTiPKcj + +; flist.o +_ZN5flist5queueEPv +_ZN5flist7dequeueEv +_ZN5flist7destroyEv + +; fnmatch.o +fnmatch + +; fstype.o +_Z13add_mtab_itemPvP4statPKcS3_S3_S3_ +_Z6fstypeP6FF_PKTPci +_Z9read_mtabPFvPvP4statPKcS3_S3_S3_ES_ + +; guid_to_name.o +_Z13new_guid_listv +_Z14free_guid_listP9guid_list +_ZN9guid_list11gid_to_nameEjPci +_ZN9guid_list11uid_to_nameEjPci + +; hmac.o +_Z8hmac_md5PhiS_iS_ + +; htable.o +_Z6printfPKcz +_ZN6htable10grow_tableEv +_ZN6htable10hash_indexEPc +_ZN6htable10hash_indexEy +_ZN6htable11hash_mallocEi +_ZN6htable13hash_big_freeEv +_ZN6htable14malloc_big_bufEi +_ZN6htable4initEPvS0_i +_ZN6htable4nextEv +_ZN6htable4sizeEv +_ZN6htable5firstEv +_ZN6htable5statsEv +_ZN6htable6insertEPcPv +_ZN6htable6insertEyPv +_ZN6htable6lookupEPc +_ZN6htable6lookupEy +_ZN6htable7destroyEv +_ZN6htableC1EPvS0_i +_ZN6htableC2EPvS0_i + +; ini.o +_Z13ini_store_strP13s_lex_contextP10ConfigFileP9ini_items +_Z14ini_store_boolP13s_lex_contextP10ConfigFileP9ini_items +_Z14ini_store_dateP13s_lex_contextP10ConfigFileP9ini_items +_Z14ini_store_nameP13s_lex_contextP10ConfigFileP9ini_items +_Z15ini_store_int32P13s_lex_contextP10ConfigFileP9ini_items +_Z15ini_store_int64P13s_lex_contextP10ConfigFileP9ini_items +_Z16ini_store_pint32P13s_lex_contextP10ConfigFileP9ini_items +_Z16ini_store_pint64P13s_lex_contextP10ConfigFileP9ini_items +_Z18ini_get_store_codePFbP13s_lex_contextP10ConfigFileP9ini_itemsE +_Z19ini_store_alist_strP13s_lex_contextP10ConfigFileP9ini_items +_Z21ini_get_store_handlerPKc +_ZN10ConfigFile10free_itemsEv +_ZN10ConfigFile11clear_itemsEv +_ZN10ConfigFile11dump_stringEPKci +_ZN10ConfigFile11unserializeEPKc +_ZN10ConfigFile12dump_resultsEPPc +_ZN10ConfigFile5parseEPKc +_ZN10ConfigFile5parseEv +_ZN10ConfigFile8get_itemEPKc +_ZN10ConfigFile9parse_bufEPKc +_ZN10ConfigFile9serializeEPKc +_ZN10ConfigFile9serializeEPPc +_ZN10ConfigFileD0Ev +_ZN10ConfigFileD1Ev + +; jcr.o +timeout_handler +_Z10b_free_jcrPKciP3JCR +_Z11unlock_jobsv +_Z12jcr_walk_endP3JCR +_Z12job_end_pushP3JCRPFvS0_PvES1_ +_Z13dbg_print_jcrP6_iobuf +_Z13get_jcr_by_idj +_Z13jcr_walk_nextP3JCR +_Z14create_jcr_keyv +_Z14jcr_walk_startv +_Z14set_jcr_in_tsdP3JCR +_Z16dbg_jcr_add_hookPFvP3JCRP6_iobufE +_Z16get_jcr_from_tsdv +_Z18get_jcr_by_sessionjj +_Z18get_jobid_from_tid14ptw32_handle_t +_Z18get_jobid_from_tsdv +_Z18init_jcr_subsystemv +_Z19init_last_jobs_listv +_Z19lock_last_jobs_listv +_Z19read_last_jobs_listiy +_Z19remove_jcr_from_tsdP3JCR +_Z19term_last_jobs_listv +_Z20get_jcr_by_full_namePc +_Z20write_last_jobs_listiy +_Z21unlock_last_jobs_listv +_Z23get_jcr_by_partial_namePc +_Z24get_next_jobid_from_listPPcPj +_Z7fprintfP6_iobufPKcz +_Z7new_jcriPFvP3JCRE +_Z9job_countv +_Z9lock_jobsv +_ZN3JCR12setJobStatusEi +_ZN3JCR12set_killableEb +_ZN3JCR13sendJobStatusEi +_ZN3JCR13sendJobStatusEv +_ZN3JCR13setJobStartedEv +_ZN3JCR14can_be_stoppedEv +_ZN3JCR14get_ActionNameEb +_ZN3JCR17get_OperationNameEv +_ZN3JCR21my_thread_send_signalEi +_ZN3JCR8JobReadsEv + +; lex.o +_Z11scan_to_eolP13s_lex_context +_Z12lex_get_charP13s_lex_context +_Z12lex_open_bufP13s_lex_contextPKcPFvS2_iS0_S2_zE +_Z13lex_check_eolP13s_lex_context +_Z13lex_get_tokenP13s_lex_contexti +_Z13lex_open_fileP13s_lex_contextPKcPFvS2_iS0_S2_zE +_Z14lex_close_fileP13s_lex_context +_Z14lex_tok_to_stri +_Z14lex_unget_charP13s_lex_context +_Z20scan_to_next_not_eolP13s_lex_context +_Z25lex_store_clear_passwordsP13s_lex_context +_Z29lex_set_default_error_handlerP13s_lex_context +_Z32lex_set_error_handler_error_typeP13s_lex_contexti + +; lockmgr.o +_Z14dbg_print_lockP6_iobuf +_Z18bthread_change_uidjj +_Z21bthread_get_thread_idv +_Z6lmgr_pPP16pthread_mutex_t_ +_Z6lmgr_vPP16pthread_mutex_t_ + +; lz4.o +LZ4_compress +LZ4_compressBound +LZ4_compress_continue +LZ4_compress_default +LZ4_compress_destSize +LZ4_compress_fast +LZ4_compress_fast_continue +LZ4_compress_fast_extState +LZ4_compress_limitedOutput +LZ4_compress_limitedOutput_continue +LZ4_compress_limitedOutput_withState +LZ4_compress_withState +LZ4_create +LZ4_createStream +LZ4_createStreamDecode +LZ4_decompress_fast +LZ4_decompress_fast_continue +LZ4_decompress_fast_usingDict +LZ4_decompress_fast_withPrefix64k +LZ4_decompress_safe +LZ4_decompress_safe_continue +LZ4_decompress_safe_partial +LZ4_decompress_safe_usingDict +LZ4_decompress_safe_withPrefix64k +LZ4_freeStream +LZ4_freeStreamDecode +LZ4_loadDict +LZ4_resetStream +LZ4_resetStreamState +LZ4_saveDict +LZ4_setStreamDecode +LZ4_sizeofState +LZ4_sizeofStreamState +LZ4_slideInputBuffer +LZ4_uncompress +LZ4_uncompress_unknownOutputSize +LZ4_versionNumber +LZ4_versionString +_Z23LZ4_compress_fast_forcePKcPciii +_Z25LZ4_compress_forceExtDictP12LZ4_stream_uPKcPci +_Z32LZ4_decompress_safe_forceExtDictPKcPciiS0_i + +; match.o +_Z11match_filesP3JCRP6FF_PKTPFiS0_S2_bE +_Z16file_is_excludedP6FF_PKTPKc +_Z16file_is_includedP6FF_PKTPKc +_Z22get_next_included_fileP6FF_PKTP15s_included_file +_Z25add_fname_to_exclude_listP6FF_PKTPKc +_Z25add_fname_to_include_listP6FF_PKTiPKc +_Z26term_include_exclude_filesP6FF_PKT + +; md5.o +_Z11byteReversePhj +_Z12MD5TransformPjS_ +_Z7MD5InitP10MD5Context +_Z8MD5FinalPhP10MD5Context +_Z9MD5UpdateP10MD5ContextPhj + +; mem_pool.o +_Z13sm_get_memoryPKcii +_Z17close_memory_poolv +_Z18sm_get_pool_memoryPKcii +_Z19sm_free_pool_memoryPKciPc +_Z21sm_sizeof_pool_memoryPKciPc +_Z22garbage_collect_memoryv +_Z22sm_realloc_pool_memoryPKciPci +_Z23print_memory_pool_statsv +_Z25sm_check_pool_memory_sizePKciPci +_Z27garbage_collect_memory_poolv +_Z9pm_memcpyPPcPKci +_Z9pm_memcpyR8POOL_MEMPKci +_Z9pm_memcpyRPcPKci +_Z9pm_memcpyRPcR8POOL_MEMi +_Z9pm_strcatPPcPKc +_Z9pm_strcatR8POOL_MEMPKc +_Z9pm_strcatR8POOL_MEMS0_ +_Z9pm_strcatRPcPKc +_Z9pm_strcatRPcR8POOL_MEM +_Z9pm_strcpyPPcPKc +_Z9pm_strcpyR8POOL_MEMPKc +_Z9pm_strcpyRPcPKc +_Z9pm_strcpyRPcR8POOL_MEM +_ZN8POOL_MEM10realloc_pmEi +_ZN8POOL_MEM6strcatEPKc +_ZN8POOL_MEM6strcpyEPKc +_ZN8POOL_MEM8max_sizeEv + +; message.o +_Z10get_blowupv +_Z10get_hangupv +_Z10my_name_isiPPcPKc +_Z10set_blowupi +_Z10set_hangupi +_Z12add_msg_destP4MSGSiiPcS1_ +_Z12get_basenamePKc +_Z12rem_msg_destP4MSGSiiPc +_Z13debug_get_tagjPPKc +_Z13free_msgs_resP4MSGS +_Z14debug_find_tagPKcbPx +_Z14set_assert_msgPKciS0_ +_Z15set_debug_flagsPc +_Z16debug_parse_tagsPKcPx +_Z16dequeue_messagesP3JCR +_Z16dispatch_messageP3JCRixPc +_Z16init_console_msgPKc +_Z18set_db_engine_namePKc +_Z19is_message_type_setP3JCRi +_Z19set_trace_for_toolsP6_iobuf +_Z20handle_hangup_blowupP3JCRjy +_Z21generate_daemon_eventP3JCRPKc +_Z23dequeue_daemon_messagesP3JCR +_Z25free_daemon_message_queuev +_Z25register_message_callbackPFviPcE +_Z26setup_daemon_message_queuev +_Z4JmsgP3JCRixPKcz +_Z4MmsgPPcPKcz +_Z4MmsgR8POOL_MEMPKcz +_Z4MmsgRPcPKcz +_Z4QmsgP3JCRixPKcz +_Z5d_msgPKcixS0_z +_Z5e_msgPKciiiS0_z +_Z5j_msgPKciP3JCRixS0_z +_Z5m_msgPKciPPcS0_z +_Z5m_msgPKciRPcS0_z +_Z5p_msgPKciiS0_z +_Z5q_msgPKciP3JCRixS0_z +_Z5t_msgPKcixS0_z +_Z6printfPKcz +_Z6vd_msgPKcixS0_Pc +_Z7fprintfP6_iobufPKcz +_Z7sprintfPcPKcz +_Z8init_msgP3JCRP4MSGSPFPcS0_PKcS3_iE +_Z8term_msgv +_Z9close_msgP3JCR +_Z9get_tracev +_Z9set_tracei +_ZN4MSGS15wait_not_in_useEv +_ZN4MSGS4lockEv +_ZN4MSGS6unlockEv +_ZN5BSOCK4sendEv + +; mkpath.o +_Z13path_list_addP3JCRjPc +_Z14free_path_listP3JCR +_Z16path_list_lookupP3JCRPc +_Z8makepathP4ATTRPKcttjji + +; openssl.o +_Z11init_cryptov +_Z14cleanup_cryptov +_Z19openssl_post_errorsiPKc +_Z19openssl_post_errorsP3JCRiPKc + +; output.o +_Z8snprintfPcjPKcz +_ZN12OutputWriter10get_outputEiz +_ZN12OutputWriter10get_outputEPcPS0_i +_ZN12OutputWriter10get_outputEPPciz +_ZN12OutputWriter10start_listEPKcb +_ZN12OutputWriter11get_optionsEPc +_ZN12OutputWriter11start_groupEPKcb +_ZN12OutputWriter13parse_optionsEPKc +_ZN12OutputWriter7get_bufEb +_ZN12OutputWriter8end_listEb +_ZN12OutputWriter9end_groupEb +_ZN12OutputWriterD0Ev +_ZN12OutputWriterD1Ev + +; plugins.o +_Z10new_pluginv +_Z12load_pluginsPvS_PKcS1_PFbP6PluginE +_Z14unload_pluginsv +_Z16dbg_print_pluginP6_iobuf +_Z19dbg_plugin_add_hookPFvP6PluginP6_iobufE +_Z7fprintfP6_iobufPKcz + +; print.o +_Z10__snprintfPcjPKcz +_Z10__vsprintfPcPKcS_ +_Z11__vsnprintfPcjPKcS_ +_Z4doprPcjPKcS_PFvS_PjjiE +_Z9__sprintfPcPKcz + +; priv.o +_Z4dropPcS_b + +; queue.o +_Z5qnextP7b_queueS0_ +_Z7qdchainP7b_queue +_Z7qinsertP7b_queueS0_ +_Z7qremoveP7b_queue + +; rblist.o +_ZN6rblist11left_rotateEPv +_ZN6rblist12right_rotateEPv +_ZN6rblist3anyEPv +_ZN6rblist4nextEPv +_ZN6rblist5firstEv +_ZN6rblist6insertEPvPFiS0_S0_E +_ZN6rblist6removeEPv +_ZN6rblist6searchEPvPFiS0_S0_E +_ZN6rblist7destroyEv + +; runscript.o +_Z11run_scriptsP3JCRP5alistPKc +_Z13new_runscriptv +_Z14copy_runscriptP9RUNSCRIPT +_Z14free_runscriptP9RUNSCRIPT +_Z15free_runscriptsP5alist +_ZN9RUNSCRIPT10set_targetEPKc +_ZN9RUNSCRIPT11set_commandEPKci +_ZN9RUNSCRIPT13reset_defaultEb +_ZN9RUNSCRIPT21set_job_code_callbackEPFPcP3JCRPKcS0_iE +_ZN9RUNSCRIPT3runEP3JCRPKc +_ZN9RUNSCRIPT5debugEv +_ZN9RUNSCRIPT8is_localEv + +; rwlock.o +_Z11rwl_destroyP12s_rwlock_tag +_Z12is_rwl_validP12s_rwlock_tag +_Z12rwl_readlockP12s_rwlock_tag +_Z14rwl_readunlockP12s_rwlock_tag +_Z15rwl_readtrylockP12s_rwlock_tag +_Z15rwl_writelock_pP12s_rwlock_tagPKci +_Z15rwl_writeunlockP12s_rwlock_tag +_Z16rwl_writetrylockP12s_rwlock_tag +_Z8rwl_initP12s_rwlock_tagi + +; scan.o +_Z10parse_argsPcPS_PiS0_S0_i +_Z11skip_spacesPPc +_Z14skip_nonspacesPPc +_Z15parse_args_onlyPcPS_PiS0_S0_i +_Z19strip_leading_spacePc +_Z19strip_trailing_junkPc +_Z22strip_trailing_newlinePc +_Z22strip_trailing_slashesPc +_Z23split_path_and_filenamePKcPPcPiS2_S3_ +_Z7bsscanfPKcS0_z +_Z7fstrschPKcS0_ +_Z8next_argPPc +_Z9next_namePPc + +; sellist.o +_ZN7sellist10set_stringEPKcb +_ZN7sellist17get_expanded_listEv +_ZN7sellist4nextEv + +; serial.o +_Z12serial_btimePPhx +_Z12serial_int16PPhs +_Z12serial_int32PPhi +_Z12serial_int64PPhx +_Z13serial_stringPPhPKc +_Z13serial_uint16PPht +_Z13serial_uint32PPhj +_Z13serial_uint64PPhy +_Z14serial_float64PPhd +_Z14unserial_btimePPh +_Z14unserial_int16PPh +_Z14unserial_int32PPh +_Z14unserial_int64PPh +_Z15unserial_stringPPhPci +_Z15unserial_uint16PPh +_Z15unserial_uint32PPh +_Z15unserial_uint64PPh +_Z16unserial_float64PPh + +; sha1.o +_Z10SHA1UpdateP11SHA1ContextPKhj +_Z8SHA1InitP11SHA1Context +_Z9SHA1FinalP11SHA1ContextPh + +; signal.o + +; smartall.o +_Z10sm_reallocPKciPvj +_Z12actuallyfreePv +_Z12sm_check_rtnPKcib +_Z12sm_new_ownerPKciPc +_Z14actuallycallocjj +_Z14actuallymallocj +_Z15actuallyreallocPvj +_Z7bmemsetPvij +_Z7sm_dumpbb +_Z7sm_freePKciPv +_Z7sprintfPcPKcz +_Z8sm_checkPKcib +_Z9sm_callocPKcijj +_Z9sm_mallocPKcij +_Z9sm_staticb + +; tls.o +_Z14get_tls_enableP11TLS_Context +_Z15get_tls_requireP11TLS_Context +_Z15new_tls_contextPKcS0_S0_S0_PFiPciPKvES3_S0_b +_Z15tls_bsock_probeP9BSOCKCORE +_Z15tls_bsock_readnP5BSOCKPci +_Z16free_tls_contextP11TLS_Context +_Z16tls_bsock_acceptP5BSOCK +_Z16tls_bsock_writenP5BSOCKPci +_Z17tls_bsock_connectP5BSOCK +_Z18new_tls_connectionP11TLS_Contexti +_Z18tls_bsock_shutdownP9BSOCKCORE +_Z19free_tls_connectionP14TLS_Connection +_Z25tls_postconnect_verify_cnP3JCRP14TLS_ConnectionP5alist +_Z27tls_postconnect_verify_hostP3JCRP14TLS_ConnectionPKc + +; tree.o +_Z11tree_relcwdPcP11s_tree_rootP11s_tree_node +_Z12tree_getpathP11s_tree_nodePci +_Z14make_tree_pathPcP11s_tree_root +_Z16insert_tree_nodePcS_iP11s_tree_rootP11s_tree_node +_Z16tree_remove_nodeP11s_tree_rootP11s_tree_node +_Z19tree_add_delta_partP11s_tree_rootP11s_tree_nodeji +_Z8new_treei +_Z8tree_cwdPcP11s_tree_rootP11s_tree_node +_Z9free_treeP11s_tree_root + +; util.o +_Z11bash_spacesPc +_Z11bash_spacesR8POOL_MEM +_Z11encode_modetPc +_Z11encode_timexPc +_Z11is_buf_zeroPKci +_Z13unbash_spacesPc +_Z13unbash_spacesR8POOL_MEM +_Z14edit_job_codesP3JCRPcS1_PKcPFS1_S0_S3_S1_iE +_Z15is_power_of_twoy +_Z15job_type_to_stri +_Z16job_level_to_stri +_Z16make_session_keyPcS_i +_Z17job_status_to_strii +_Z18decode_session_keyPcS_S_i +_Z18do_shell_expansionPci +_Z18encode_session_keyPcS_S_i +_Z18jobstatus_to_asciiiPci +_Z19last_path_separatorPKc +_Z20volume_status_to_strPKc +_Z21set_working_directoryPc +_Z22jobstatus_to_ascii_guiiPci +_Z25action_on_purge_to_stringiR8POOL_MEM +_Z5lcasePc +_Z7hexdumpPKciPcib +_Z7is_nullPKv +_Z7sprintfPcPKcz +_Z8bmemzeroPvj +_Z9asciidumpPKciPci +_Z9smartdumpPKciPciPb + +; var.o +_Z10var_configP6var_stiz +_Z10var_createPP6var_st +_Z10var_expandP6var_stPKciPPcPii +_Z10var_formatP6var_stPPciPKcz +_Z11var_destroyP6var_st +_Z11var_formatvP6var_stPPciPKcS1_ +_Z12var_strerrorP6var_st8var_rc_t +_Z12var_unescapeP6var_stPKciPcii +_Z7sprintfPcPKcz + +; watchdog.o +watchdog_thread +_Z11is_watchdogv +_Z12new_watchdogv +_Z13stop_watchdogv +_Z14start_watchdogv +_Z17register_watchdogP12s_watchdog_t +_Z19unregister_watchdogP12s_watchdog_t + +; win32filter.o +_ZN11Win32Filter9have_dataEPPcPxS2_ + +; winapi.o +_Z17InitWinAPIWrapperv + +; worker.o +_ZN6worker11finish_workEv +_ZN6worker12release_lockEv +_ZN6worker13discard_queueEv +_ZN6worker13set_run_stateEv +_ZN6worker14set_quit_stateEv +_ZN6worker14set_wait_stateEv +_ZN6worker15pop_free_bufferEv +_ZN6worker16push_free_bufferEPv +_ZN6worker16wait_queue_emptyEv +_ZN6worker4initEi +_ZN6worker4stopEv +_ZN6worker4waitEv +_ZN6worker5queueEPv +_ZN6worker5startEPFPvS0_ES0_ +_ZN6worker7dequeueEv +_ZN6worker7destroyEv + +; workq.o +workq_server +_Z10workq_initP9workq_tagiPFPvS1_E +_Z12workq_removeP9workq_tagP13workq_ele_tag +_Z13workq_destroyP9workq_tag +_Z15workq_wait_idleP9workq_tag +_Z9workq_addP9workq_tagPvPP13workq_ele_tagi + +console_command DATA +b_plugin_list DATA +plugin_bopen DATA +plugin_bclose DATA +plugin_bwrite DATA +plugin_bread DATA +plugin_blseek DATA +exepath DATA +version DATA +dist_name DATA diff --git a/src/win32/lib/bacula64.def b/src/win32/lib/bacula64.def new file mode 100644 index 00000000..3cbc6468 --- /dev/null +++ b/src/win32/lib/bacula64.def @@ -0,0 +1,1013 @@ +LIBRARY bacula.dll +EXPORTS + +; address_conf.o +_Z14free_addressesP5dlist +_Z15store_addressesP13s_lex_contextP8RES_ITEMii +_Z17get_first_addressP5dlistPci +_Z17sockaddr_get_portPK8sockaddr +_Z17sockaddr_to_asciiPK8sockaddriPci +_Z19build_addresses_strP5dlistPci +_Z20store_addresses_portP13s_lex_contextP8RES_ITEMii +_Z22init_default_addressesPP5dlisti +_Z23store_addresses_addressP13s_lex_contextP8RES_ITEMii +_Z24get_first_port_net_orderP5dlist +_Z25get_first_port_host_orderP5dlist +_Z26remove_duplicate_addressesP5dlist +_Z27sockaddr_get_port_net_orderPK8sockaddr +_ZN6IPADDR11get_addressEPci +_ZN6IPADDR12get_sockaddrEv +_ZN6IPADDR12set_addr_anyEv +_ZN6IPADDR12set_port_netEt +_ZN6IPADDR16get_sockaddr_lenEv +_ZN6IPADDR17build_address_strEPci +_ZN6IPADDR8set_typeENS_6i_typeE +_ZN6IPADDR9copy_addrEPS_ +_ZN6IPADDR9set_addr4EP7in_addr +_ZN6IPADDR9set_addr6EP8in6_addr +_ZN6IPADDRC1Ei +_ZN6IPADDRC1ERKS_ +_ZN6IPADDRC2Ei +_ZN6IPADDRC2ERKS_ +_ZNK6IPADDR10get_familyEv +_ZNK6IPADDR18get_port_net_orderEv +_ZNK6IPADDR8get_typeEv + +; alist.o +_ZN5alist4lastEv +_ZN5alist4nextEv +_ZN5alist4prevEv +_ZN5alist5firstEv +_ZN5alist7prependEPv +_ZN5ilist3putEiPv +_ZN8baselist11remove_itemEi +_ZN8baselist3getEi +_ZN8baselist6appendEPv +_ZN8baselist7destroyEv +_ZN8baselist9grow_listEv + +; attribs.o +_Z11decode_statPcP4statiPi +_Z11encode_statPcP4statiii +_Z11set_own_modP4ATTRPcjjt +_Z13decode_LinkFIPcP4stati +_Z14set_attributesP3JCRP4ATTRP5BFILE +_Z16encode_attribsExP3JCRPcP6FF_PKT +_Z16set_mod_own_timeP3JCRP5BFILEP4ATTR +_Z18select_data_streamP6FF_PKT +_Z9win_errorP3JCRiPKcPc +_Z9win_errorP3JCRPKcm +_Z9win_errorP3JCRPKcPc + +; attr.o +_Z15print_ls_outputP3JCRP4ATTRi +_Z24build_attr_output_fnamesP3JCRP4ATTR +_Z24unpack_attributes_recordP3JCRiPciP4ATTR +_Z7sprintfPcPKcz +_Z8new_attrP3JCR +_Z9free_attrP4ATTR + +; base64.o +_Z11base64_initv +_Z11from_base64PxPc +_Z13base64_to_binPciS_i +_Z13bin_to_base64PciS_ii +_Z9to_base64xPc + +; berrno.o +_ZN6berrno20format_win32_messageEv +_ZN6berrno9bstrerrorEv + +; bfile.o +_Z10set_fattrsP5BFILEP4stat +_Z11bget_handleP5BFILE +_Z11int32_LE2BEPii +_Z11int64_LE2BEPxx +_Z14have_win32_apiv +_Z14is_plugin_dataP5BFILE +_Z14set_cmd_pluginP5BFILEP3JCR +_Z15is_win32_streami +_Z15stream_to_asciii +_Z16set_win32_backupP5BFILE +_Z18is_portable_backupP5BFILE +_Z19set_portable_backupP5BFILE +_Z26processWin32BackupAPIBlockP5BFILEPvx +_Z27is_restore_stream_supportedi +_Z5binitP5BFILE +_Z5bopenP5BFILEPKcyt +_Z5breadP5BFILEPvy +_Z6bcloseP5BFILE +_Z6blseekP5BFILExi +_Z6bwriteP5BFILEPvy +_Z7sprintfPcPKcz +_Z8is_bopenP5BFILE +_Z9pause_msgPKcS0_iS0_ + +; bget_msg.o +_Z8bget_msgP5BSOCK +_ZN6GetMsg14wait_read_sockEi +_ZN6GetMsg15start_read_sockEv +_ZN6GetMsg19do_read_sock_threadEv +_ZN6GetMsg7is_doneEv +_ZN6GetMsg7is_stopEv +_ZN6GetMsg8bget_msgEPP8bmessage +_ZN6GetMsg8is_errorEv +_ZN6GetMsgC1EP3JCRP5BSOCKPKci +_ZN6GetMsgC2EP3JCRP5BSOCKPKci +_ZN6GetMsgD0Ev +_ZN6GetMsgD1Ev +_ZN6GetMsgD2Ev +_ZN8bmessage4swapEP5BSOCK +_ZN8bmessageC1Ei +_ZN8bmessageC2Ei +_ZN8bmessageD0Ev +_ZN8bmessageD1Ev +_ZN8bmessageD2Ev + +; binflate.o +_Z8ZdeflatePciS_Ri +_Z8ZinflatePciS_Ri + +; bnet.o +_Z11resolv_hostiPKcP5dlist +_Z15bnet_tls_clientP11TLS_ContextP5BSOCKP5alist +_Z15bnet_tls_serverP11TLS_ContextP5BSOCKP5alist +_Z16set_socket_errnoi +_Z17bnet_host2ipaddrsPKciPS0_ +_Z17bnet_sig_to_asciii + +; bnet_server.o +_Z18bnet_thread_serverP5dlistiP9workq_tagPFPvS3_E +_Z23bnet_stop_thread_server14ptw32_handle_t + +; bpipe.o +_Z11run_programPciRS_ +_Z23run_program_full_outputPciRS_PS_ + +; bregex.o +b_regcomp +b_regerror +b_regexec +b_regfree +_Z24re_registers_to_regmatchP12re_registersP12b_regmatch_ty + +; breg.o +_Z11new_bregexpPKc +_Z12free_bregexpP7BREGEXP +_Z12get_bregexpsPKc +_Z13free_bregexpsP5alist +_Z14apply_bregexpsPKcP4statP5alistPPc +_Z14apply_bregexpsPKcP5alistPPc +_Z19bregexp_build_wherePciS_S_S_ +_Z21bregexp_escape_stringPcPKcc +_Z28bregexp_get_build_where_sizePcS_S_ +_Z6printfPKcz +_ZN7BREGEXP10edit_substEPKcP4statP12b_regmatch_t +_ZN7BREGEXP12return_fnameEPKci +_ZN7BREGEXP14extract_regexpEPKc +_ZN7BREGEXP16compute_dest_lenEPKcP12b_regmatch_t +_ZN7BREGEXP5debugEv +_ZN7BREGEXP7replaceEPKcP4stat + +; bsnprintf.o +_Z10bvsnprintfPciPKcS_ +_Z9bsnprintfPciPKcz + +; bsockcore.o +_Z14dump_bsock_msgijPKcjijPci +_ZN13BSOCKCallbackC1Ev +_ZN13BSOCKCallbackC2Ev +_ZN13BSOCKCallbackD0Ev +_ZN13BSOCKCallbackD1Ev +_ZN13BSOCKCallbackD2Ev +_ZN9BSOCKCORE11read_nbytesEPci +_ZN9BSOCKCORE11set_lockingEv +_ZN9BSOCKCORE12set_blockingEv +_ZN9BSOCKCORE12set_killableEb +_ZN9BSOCKCORE12write_nbytesEPci +_ZN9BSOCKCORE13clear_lockingEv +_ZN9BSOCKCORE14wait_data_intrEii +_ZN9BSOCKCORE15control_bwlimitEi +_ZN9BSOCKCORE15set_buffer_sizeEji +_ZN9BSOCKCORE15set_nonblockingEv +_ZN9BSOCKCORE16restore_blockingEi +_ZN9BSOCKCORE18set_source_addressEP5dlist +_ZN9BSOCKCORE4dumpEv +_ZN9BSOCKCORE4initEv +_ZN9BSOCKCORE4openEP3JCRPKcPcS4_ixPi +_ZN9BSOCKCORE4sendEv +_ZN9BSOCKCORE5closeEv +_ZN9BSOCKCORE5fsendEPKcz +_ZN9BSOCKCORE5recvnEi +_ZN9BSOCKCORE6cancelEv +_ZN9BSOCKCORE7connectEP3JCRixxPKcPcS4_ii +_ZN9BSOCKCORE7destroyEv +_ZN9BSOCKCORE8_destroyEv +_ZN9BSOCKCORE8fin_initEP3JCRiPKcS3_iP8sockaddr +_ZN9BSOCKCORE8free_tlsEv +_ZN9BSOCKCORE8get_peerEPci +_ZN9BSOCKCORE9bstrerrorEv +_ZN9BSOCKCORE9wait_dataEii +_ZN9BSOCKCOREC1Ev +_ZN9BSOCKCOREC2Ev +_ZN9BSOCKCORED0Ev +_ZN9BSOCKCORED1Ev +_ZN9BSOCKCORED2Ev + +; bsock.o +_Z10init_bsockP3JCRiPKcS2_iP8sockaddr +_Z9dup_bsockP5BSOCK +_Z9new_bsockv +_ZN5BSOCK12write_nbytesEPci +_ZN5BSOCK13comm_compressEv +_ZN5BSOCK21authenticate_directorEPKcS1_P11TLS_ContextPci +_ZN5BSOCK4dumpEv +_ZN5BSOCK4initEv +_ZN5BSOCK4openEP3JCRPKcPcS4_ixPi +_ZN5BSOCK4recvEv +_ZN5BSOCK4sendEi +_ZN5BSOCK4sendEv +_ZN5BSOCK5closeEv +_ZN5BSOCK6signalEi +_ZN5BSOCK7despoolEPFvxEx +_ZN5BSOCK8_destroyEv +_ZN5BSOCKC1Ei +_ZN5BSOCKC1Ev +_ZN5BSOCKC2Ei +_ZN5BSOCKC2Ev +_ZN5BSOCKD0Ev +_ZN5BSOCKD1Ev +_ZN5BSOCKD2Ev + +; bsys.o +_Z10b_strerroriPcy +_Z11bmicrosleepii +_Z11bstrcasecmpPKcS0_ +_Z11quote_wherePcPKc +_Z11stack_tracev +_Z12fd_wait_datai12fd_wait_modeii +_Z12quote_stringPcPKc +_Z12safer_unlinkPKcS0_ +_Z15create_pid_filePcPKci +_Z15delete_pid_filePcPKci +_Z15escape_filenamePKc +_Z15read_state_filePcPKci +_Z16create_lock_filePcPKcS1_PS_Pi +_Z16write_state_filePcPKci +_Z17fs_get_free_spacePKcPxS1_ +_Z20make_unique_filenamePPciS_ +_Z5bfreePv +_Z6bfgetsPciP6_iobuf +_Z6bfgetsRPcP6_iobuf +_Z6bfopenPKcS0_ +_Z7bacceptiP8sockaddrPi +_Z7bcallocyy +_Z7bstrcmpPKcS0_ +_Z7cstrlenPKc +_Z7ucfirstPcPKci +_Z8b_mallocPKciy +_Z8breallocPvy +_Z8bstrncatPcPKci +_Z8bstrncatPcR8POOL_MEMi +_Z8bstrncpyPcPKci +_Z8bstrncpyPcR8POOL_MEMi +_Z8copyfilePKcS0_ +_Z9setup_envPPc + +; btime.o +_Z10bstrftimesPcix +_Z11date_decodedPjPhS0_ +_Z11date_encodejhh +_Z11time_decodedPhS_S_Pf +_Z11time_encodehhhf +_Z12bstrftime_dnPcix +_Z12bstrftime_ncPcix +_Z12bstrftime_nyPcix +_Z12str_to_utimePc +_Z13btime_to_unixx +_Z14btime_to_utimex +_Z16date_time_decodeP9date_timePjPhS2_S2_S2_S2_Pf +_Z16date_time_encodeP9date_timejhhhhhf +_Z16get_current_timeP9date_time +_Z17date_time_compareP9date_timeS0_ +_Z17get_current_btimev +_Z6tm_womii +_Z6tm_woyx +_Z7tm_ldomii +_Z9bstrftimePcix +_Z9bstrutimePcix +_Z9tm_decodeP9date_timeP2tm +_Z9tm_encodeP9date_timeP2tm + +; btimers.o +_Z16stop_bsock_timerP8btimer_t +_Z16stop_child_timerP8btimer_t +_Z17start_bsock_timerP5BSOCKj +_Z17start_bsock_timerP9BSOCKCOREj +_Z17start_child_timerP3JCRxj +_Z17stop_thread_timerP8btimer_t +_Z18_start_bsock_timerP5BSOCKj +_Z18start_thread_timerP3JCR14ptw32_handle_tj + +; compat.o +_Z10binet_ptoniPKcPv +_Z10open_bpipePciPKcPS_ +_Z11close_bpipeP5BPIPE +_Z11close_wpipeP5BPIPE +_Z11LogErrorMsgPKc +_Z11malloc_trimi +_Z11strncasecmpPKcS0_i +_Z11win32_cgetsPci +_Z11win32_chdirPKc +_Z11win32_chmodPKct +_Z11win32_fputsPKcP6_iobuf +_Z11win32_mkdirPKc +_Z12gettimeofdayP7timevalP8timezone +_Z12init_signalsPFviE +_Z12UTF8_2_wcharPPcPKc +_Z12wchar_2_UTF8PcPKwi +_Z12wchar_2_UTF8PPcPKw +_Z12win32_getcwdPci +_Z12win32_unlinkPKc +_Z15get_memory_infoPci +_Z15init_stack_dumpv +_Z15win32_ftruncateix +_Z16wchar_win32_pathPKcPw +_Z17SetVSSPathConvertPFivEPFiPKcPciEPFiPKwPwiE +_Z18CreateChildProcessPKcPvS1_S1_ +_Z18cvt_ftime_to_utimeRK9_FILETIME +_Z18cvt_utime_to_ftimeRKxR9_FILETIME +_Z18GetApplicationNamePKcPPcPS0_ +_Z18unix_name_to_win32PPcPKc +_Z20allow_os_suspensionsv +_Z21Win32ConvCleanupCachev +_Z22prevent_os_suspensionsv +_Z28make_win32_path_UTF8_2_wcharPPcPKcPi +_Z4forkv +_Z4killxi +_Z4pipePi +_Z4statPKcP4stat +_Z5chownPKcjj +_Z5dlsymPvPKc +_Z5fcntlii +_Z5fcntliil +_Z5fstatxP4stat +_Z5hstatPvP4stat +_Z5lstatPKcP4stat +_Z5sleepi +_Z6dlopenPKci +_Z6execvpPKcPPc +_Z6lchownPKcjj +_Z6printfPKcz +_Z6randomv +_Z6syslogiPKcz +_Z7dlclosePv +_Z7dlerrorv +_Z7geteuidv +_Z7opendirPKc +_Z7openlogPKcii +_Z7srandomj +_Z7symlinkPKcS0_ +_Z7waitpidiPii +_Z8breaddirPvRPc +_Z8closedirPv +_Z8closelogv +_Z8getArgv0PKc +_Z8getgrgidj +_Z8getpwuidj +_Z8pathconfPKci +_Z8readlinkPKcPci +_Z8snprintfPcyPKcz +_Z8WSA_Initv +_Z9ErrorExitPKc +_Z9inet_atonPKcP7in_addr +_ZN6winverC1Ev +_ZN6winverC2Ev + +; cram-md5.o +_Z16cram_md5_respondP5BSOCKPKcPiS3_ +_Z18cram_md5_challengeP5BSOCKPKcii +_ZN5BSOCK4sendEv + +; crc32.o +_Z6bcrc32Phi + +; create_file.o +_Z11create_fileP3JCRP4ATTRP5BFILEi + +; crypto.o +_Z13CryptoData_itv +_Z13SignerInfo_itv +_Z14CryptoData_newv +_Z14d2i_CryptoDataPP10CryptoDataPPKhl +_Z14d2i_SignerInfoPP10SignerInfoPPKhl +_Z14i2d_CryptoDataP10CryptoDataPPh +_Z14i2d_SignerInfoP10SignerInfoPPh +_Z14SignerInfo_newv +_Z15CryptoData_freeP10CryptoData +_Z15crypto_sign_newP3JCR +_Z15crypto_strerror14crypto_error_t +_Z15SignerInfo_freeP10SignerInfo +_Z16crypto_sign_freeP9Signature +_Z16RecipientInfo_itv +_Z16SignatureData_itv +_Z17crypto_cipher_newP14Crypto_SessionbPj +_Z17crypto_digest_newP3JCR15crypto_digest_t +_Z17d2i_RecipientInfoPP13RecipientInfoPPKhl +_Z17d2i_SignatureDataPP13SignatureDataPPKhl +_Z17i2d_RecipientInfoP13RecipientInfoPPh +_Z17i2d_SignatureDataP13SignatureDataPPh +_Z17RecipientInfo_newv +_Z17SignatureData_newv +_Z18crypto_cipher_freeP14Cipher_Context +_Z18crypto_digest_freeP6Digest +_Z18crypto_digest_nameP6Digest +_Z18crypto_keypair_dupP12X509_Keypair +_Z18crypto_keypair_newv +_Z18crypto_session_new15crypto_cipher_tP5alist +_Z18crypto_sign_decodeP3JCRPKhj +_Z18crypto_sign_encodeP9SignaturePhPj +_Z18crypto_sign_verifyP9SignatureP12X509_KeypairP6Digest +_Z18RecipientInfo_freeP13RecipientInfo +_Z18SignatureData_freeP13SignatureData +_Z19crypto_keypair_freeP12X509_Keypair +_Z19crypto_session_freeP14Crypto_Session +_Z20crypto_cipher_updateP14Cipher_ContextPKhjS2_Pj +_Z20crypto_digest_updateP6DigestPKhj +_Z21crypto_session_decodePKhjP5alistPP14Crypto_Session +_Z21crypto_session_encodeP14Crypto_SessionPhPj +_Z22crypto_cipher_finalizeP14Cipher_ContextPhPj +_Z22crypto_digest_finalizeP6DigestPhPj +_Z22crypto_keypair_has_keyPKc +_Z22crypto_sign_add_signerP9SignatureP6DigestP12X509_Keypair +_Z22crypto_sign_get_digestP9SignatureP12X509_KeypairR15crypto_digest_tPP6Digest +_Z23crypto_keypair_load_keyP12X509_KeypairPKcPFiPciPKvES5_ +_Z24crypto_keypair_load_certP12X509_KeypairPKc +_Z25crypto_digest_stream_typei +_Z27crypto_default_pem_callbackPciPKv + +; daemon.o +_Z12daemon_startv + +; dlist.o +_Z15new_dlistStringPKc +_Z15new_dlistStringPKci +_ZN5dlist12insert_afterEPvS0_ +_ZN5dlist13binary_insertEPvPFiS0_S0_E +_ZN5dlist13binary_searchEPvPFiS0_S0_E +_ZN5dlist13insert_beforeEPvS0_ +_ZN5dlist22binary_insert_multipleEPvPFiS0_S0_E +_ZN5dlist4nextEPv +_ZN5dlist4prevEPv +_ZN5dlist6appendEPv +_ZN5dlist6removeEPv +_ZN5dlist7destroyEv +_ZN5dlist7prependEPv + +; drivetype.o +_Z9drivetypePKcPci + +; edit.o +_Z10add_commasPcS_ +_Z10edit_int64xPc +_Z10edit_utimexPci +_Z11edit_uint64yPc +_Z11is_a_numberPKc +_Z12str_to_int64Pc +_Z13is_an_integerPKc +_Z13is_name_validPKcPPc +_Z13str_to_uint64Pc +_Z14size_to_uint64PciPy +_Z15speed_to_uint64PciPy +_Z16is_a_number_listPKc +_Z17duration_to_utimePcPx +_Z22edit_int64_with_commasxPc +_Z23edit_uint64_with_commasyPc +_Z23edit_uint64_with_suffixyPc + +; enable_priv.o +_Z24enable_backup_privilegesP3JCRi + +; find.o +_Z10find_filesP3JCRP6FF_PKTPFiS0_S2_bES4_ +_Z11accept_fileP6FF_PKT +_Z13is_in_filesetP6FF_PKT +_Z15init_find_filesv +_Z15term_find_filesP6FF_PKT +_Z16set_find_optionsP6FF_PKTix +_Z25set_find_changed_functionP6FF_PKTPFbP3JCRS0_E +_Z26set_find_snapshot_functionP6FF_PKTPFbP3JCRS0_P5dlistP11dlistStringE + +; find_one.o +_Z13check_changesP3JCRP6FF_PKT +_Z13find_one_fileP3JCRP6FF_PKTPFiS0_S2_bEPcjb +_Z13term_find_oneP6FF_PKT +_Z16has_file_changedP3JCRP6FF_PKT +_Z22ff_pkt_set_link_digestP6FF_PKTiPKcj + +; flist.o +_ZN5flist5queueEPv +_ZN5flist7dequeueEv +_ZN5flist7destroyEv + +; fnmatch.o +fnmatch + +; fstype.o +_Z13add_mtab_itemPvP4statPKcS3_S3_S3_ +_Z6fstypeP6FF_PKTPci +_Z9read_mtabPFvPvP4statPKcS3_S3_S3_ES_ + +; guid_to_name.o +_Z13new_guid_listv +_Z14free_guid_listP9guid_list +_ZN9guid_list11gid_to_nameEjPci +_ZN9guid_list11uid_to_nameEjPci + +; hmac.o +_Z8hmac_md5PhiS_iS_ + +; htable.o +_Z6printfPKcz +_ZN6htable10grow_tableEv +_ZN6htable10hash_indexEPc +_ZN6htable10hash_indexEy +_ZN6htable11hash_mallocEi +_ZN6htable13hash_big_freeEv +_ZN6htable14malloc_big_bufEi +_ZN6htable4initEPvS0_i +_ZN6htable4nextEv +_ZN6htable4sizeEv +_ZN6htable5firstEv +_ZN6htable5statsEv +_ZN6htable6insertEPcPv +_ZN6htable6insertEyPv +_ZN6htable6lookupEPc +_ZN6htable6lookupEy +_ZN6htable7destroyEv +_ZN6htableC1EPvS0_i +_ZN6htableC2EPvS0_i + +; ini.o +_Z13ini_store_strP13s_lex_contextP10ConfigFileP9ini_items +_Z14ini_store_boolP13s_lex_contextP10ConfigFileP9ini_items +_Z14ini_store_dateP13s_lex_contextP10ConfigFileP9ini_items +_Z14ini_store_nameP13s_lex_contextP10ConfigFileP9ini_items +_Z15ini_store_int32P13s_lex_contextP10ConfigFileP9ini_items +_Z15ini_store_int64P13s_lex_contextP10ConfigFileP9ini_items +_Z16ini_store_pint32P13s_lex_contextP10ConfigFileP9ini_items +_Z16ini_store_pint64P13s_lex_contextP10ConfigFileP9ini_items +_Z18ini_get_store_codePFbP13s_lex_contextP10ConfigFileP9ini_itemsE +_Z19ini_store_alist_strP13s_lex_contextP10ConfigFileP9ini_items +_Z21ini_get_store_handlerPKc +_ZN10ConfigFile10free_itemsEv +_ZN10ConfigFile11clear_itemsEv +_ZN10ConfigFile11dump_stringEPKci +_ZN10ConfigFile11unserializeEPKc +_ZN10ConfigFile12dump_resultsEPPc +_ZN10ConfigFile5parseEPKc +_ZN10ConfigFile5parseEv +_ZN10ConfigFile8get_itemEPKc +_ZN10ConfigFile9parse_bufEPKc +_ZN10ConfigFile9serializeEPKc +_ZN10ConfigFile9serializeEPPc +_ZN10ConfigFileD0Ev +_ZN10ConfigFileD1Ev + +; jcr.o +_Z10b_free_jcrPKciP3JCR +_Z11unlock_jobsv +_Z12jcr_walk_endP3JCR +_Z12job_end_pushP3JCRPFvS0_PvES1_ +_Z13dbg_print_jcrP6_iobuf +_Z13get_jcr_by_idj +_Z13jcr_walk_nextP3JCR +_Z14create_jcr_keyv +_Z14jcr_walk_startv +_Z14set_jcr_in_tsdP3JCR +_Z16dbg_jcr_add_hookPFvP3JCRP6_iobufE +_Z16get_jcr_from_tsdv +_Z18get_jcr_by_sessionjj +_Z18get_jobid_from_tid14ptw32_handle_t +_Z18get_jobid_from_tsdv +_Z18init_jcr_subsystemv +_Z19init_last_jobs_listv +_Z19lock_last_jobs_listv +_Z19read_last_jobs_listiy +_Z19remove_jcr_from_tsdP3JCR +_Z19term_last_jobs_listv +_Z20get_jcr_by_full_namePc +_Z20write_last_jobs_listiy +_Z21unlock_last_jobs_listv +_Z23get_jcr_by_partial_namePc +_Z24get_next_jobid_from_listPPcPj +_Z7fprintfP6_iobufPKcz +_Z7new_jcriPFvP3JCRE +_Z9job_countv +_Z9lock_jobsv +_ZN3JCR12setJobStatusEi +_ZN3JCR12set_killableEb +_ZN3JCR13sendJobStatusEi +_ZN3JCR13sendJobStatusEv +_ZN3JCR13setJobStartedEv +_ZN3JCR14can_be_stoppedEv +_ZN3JCR14get_ActionNameEb +_ZN3JCR17get_OperationNameEv +_ZN3JCR21my_thread_send_signalEi +_ZN3JCR8JobReadsEv + +; lex.o +_Z11scan_to_eolP13s_lex_context +_Z12lex_get_charP13s_lex_context +_Z12lex_open_bufP13s_lex_contextPKcPFvS2_iS0_S2_zE +_Z13lex_check_eolP13s_lex_context +_Z13lex_get_tokenP13s_lex_contexti +_Z13lex_open_fileP13s_lex_contextPKcPFvS2_iS0_S2_zE +_Z14lex_close_fileP13s_lex_context +_Z14lex_tok_to_stri +_Z14lex_unget_charP13s_lex_context +_Z20scan_to_next_not_eolP13s_lex_context +_Z25lex_store_clear_passwordsP13s_lex_context +_Z29lex_set_default_error_handlerP13s_lex_context +_Z32lex_set_error_handler_error_typeP13s_lex_contexti + +; lockmgr.o +_Z14dbg_print_lockP6_iobuf +_Z18bthread_change_uidjj +_Z21bthread_get_thread_idv +_Z6lmgr_pPP16pthread_mutex_t_ +_Z6lmgr_vPP16pthread_mutex_t_ + +; lz4.o +_Z23LZ4_compress_fast_forcePKcPciii +_Z25LZ4_compress_forceExtDictP12LZ4_stream_uPKcPci +_Z32LZ4_decompress_safe_forceExtDictPKcPciiS0_i + +; match.o +_Z11match_filesP3JCRP6FF_PKTPFiS0_S2_bE +_Z16file_is_excludedP6FF_PKTPKc +_Z16file_is_includedP6FF_PKTPKc +_Z22get_next_included_fileP6FF_PKTP15s_included_file +_Z25add_fname_to_exclude_listP6FF_PKTPKc +_Z25add_fname_to_include_listP6FF_PKTiPKc +_Z26term_include_exclude_filesP6FF_PKT + +; md5.o +_Z11byteReversePhj +_Z12MD5TransformPjS_ +_Z7MD5InitP10MD5Context +_Z8MD5FinalPhP10MD5Context +_Z9MD5UpdateP10MD5ContextPhj + +; mem_pool.o +_Z13sm_get_memoryPKcii +_Z17close_memory_poolv +_Z18sm_get_pool_memoryPKcii +_Z19sm_free_pool_memoryPKciPc +_Z21sm_sizeof_pool_memoryPKciPc +_Z22garbage_collect_memoryv +_Z22sm_realloc_pool_memoryPKciPci +_Z23print_memory_pool_statsv +_Z25sm_check_pool_memory_sizePKciPci +_Z27garbage_collect_memory_poolv +_Z9pm_memcpyPPcPKci +_Z9pm_memcpyR8POOL_MEMPKci +_Z9pm_memcpyRPcPKci +_Z9pm_memcpyRPcR8POOL_MEMi +_Z9pm_strcatPPcPKc +_Z9pm_strcatR8POOL_MEMPKc +_Z9pm_strcatR8POOL_MEMS0_ +_Z9pm_strcatRPcPKc +_Z9pm_strcatRPcR8POOL_MEM +_Z9pm_strcpyPPcPKc +_Z9pm_strcpyR8POOL_MEMPKc +_Z9pm_strcpyRPcPKc +_Z9pm_strcpyRPcR8POOL_MEM +_ZN8POOL_MEM10realloc_pmEi +_ZN8POOL_MEM6strcatEPKc +_ZN8POOL_MEM6strcpyEPKc +_ZN8POOL_MEM8max_sizeEv + +; message.o +_Z10get_blowupv +_Z10get_hangupv +_Z10my_name_isiPPcPKc +_Z10set_blowupi +_Z10set_hangupi +_Z12add_msg_destP4MSGSiiPcS1_ +_Z12get_basenamePKc +_Z12rem_msg_destP4MSGSiiPc +_Z13debug_get_tagjPPKc +_Z13free_msgs_resP4MSGS +_Z14debug_find_tagPKcbPx +_Z14set_assert_msgPKciS0_ +_Z15set_debug_flagsPc +_Z16debug_parse_tagsPKcPx +_Z16dequeue_messagesP3JCR +_Z16dispatch_messageP3JCRixPc +_Z16init_console_msgPKc +_Z18set_db_engine_namePKc +_Z19is_message_type_setP3JCRi +_Z19set_trace_for_toolsP6_iobuf +_Z20handle_hangup_blowupP3JCRjy +_Z21generate_daemon_eventP3JCRPKc +_Z23dequeue_daemon_messagesP3JCR +_Z25free_daemon_message_queuev +_Z25register_message_callbackPFviPcE +_Z26setup_daemon_message_queuev +_Z4JmsgP3JCRixPKcz +_Z4MmsgPPcPKcz +_Z4MmsgR8POOL_MEMPKcz +_Z4MmsgRPcPKcz +_Z4QmsgP3JCRixPKcz +_Z5d_msgPKcixS0_z +_Z5e_msgPKciiiS0_z +_Z5j_msgPKciP3JCRixS0_z +_Z5m_msgPKciPPcS0_z +_Z5m_msgPKciRPcS0_z +_Z5p_msgPKciiS0_z +_Z5q_msgPKciP3JCRixS0_z +_Z5t_msgPKcixS0_z +_Z6printfPKcz +_Z6vd_msgPKcixS0_Pc +_Z7fprintfP6_iobufPKcz +_Z7sprintfPcPKcz +_Z8init_msgP3JCRP4MSGSPFPcS0_PKcS3_iE +_Z8term_msgv +_Z9close_msgP3JCR +_Z9get_tracev +_Z9set_tracei +_ZN4MSGS15wait_not_in_useEv +_ZN4MSGS4lockEv +_ZN4MSGS6unlockEv +_ZN5BSOCK4sendEv + +; mkpath.o +_Z13path_list_addP3JCRjPc +_Z14free_path_listP3JCR +_Z16path_list_lookupP3JCRPc +_Z8makepathP4ATTRPKcttjji + +; openssl.o +_Z11init_cryptov +_Z14cleanup_cryptov +_Z19openssl_post_errorsiPKc +_Z19openssl_post_errorsP3JCRiPKc + +; output.o +_Z8snprintfPcyPKcz +_ZN12OutputWriter10get_outputEiz +_ZN12OutputWriter10get_outputEPcPS0_i +_ZN12OutputWriter10get_outputEPPciz +_ZN12OutputWriter10start_listEPKcb +_ZN12OutputWriter11get_optionsEPc +_ZN12OutputWriter11start_groupEPKcb +_ZN12OutputWriter13parse_optionsEPKc +_ZN12OutputWriter7get_bufEb +_ZN12OutputWriter8end_listEb +_ZN12OutputWriter9end_groupEb +_ZN12OutputWriterD0Ev +_ZN12OutputWriterD1Ev + +; plugins.o +_Z10new_pluginv +_Z12load_pluginsPvS_PKcS1_PFbP6PluginE +_Z14unload_pluginsv +_Z16dbg_print_pluginP6_iobuf +_Z19dbg_plugin_add_hookPFvP6PluginP6_iobufE +_Z7fprintfP6_iobufPKcz + +; print.o +_Z10__snprintfPcyPKcz +_Z10__vsprintfPcPKcS_ +_Z11__vsnprintfPcyPKcS_ +_Z4doprPcyPKcS_PFvS_PyyiE +_Z9__sprintfPcPKcz + +; priv.o +_Z4dropPcS_b + +; queue.o +_Z5qnextP7b_queueS0_ +_Z7qdchainP7b_queue +_Z7qinsertP7b_queueS0_ +_Z7qremoveP7b_queue + +; rblist.o +_ZN6rblist11left_rotateEPv +_ZN6rblist12right_rotateEPv +_ZN6rblist3anyEPv +_ZN6rblist4nextEPv +_ZN6rblist5firstEv +_ZN6rblist6insertEPvPFiS0_S0_E +_ZN6rblist6removeEPv +_ZN6rblist6searchEPvPFiS0_S0_E +_ZN6rblist7destroyEv + +; runscript.o +_Z11run_scriptsP3JCRP5alistPKc +_Z13new_runscriptv +_Z14copy_runscriptP9RUNSCRIPT +_Z14free_runscriptP9RUNSCRIPT +_Z15free_runscriptsP5alist +_ZN9RUNSCRIPT10set_targetEPKc +_ZN9RUNSCRIPT11set_commandEPKci +_ZN9RUNSCRIPT13reset_defaultEb +_ZN9RUNSCRIPT21set_job_code_callbackEPFPcP3JCRPKcS0_iE +_ZN9RUNSCRIPT3runEP3JCRPKc +_ZN9RUNSCRIPT5debugEv +_ZN9RUNSCRIPT8is_localEv + +; rwlock.o +_Z11rwl_destroyP12s_rwlock_tag +_Z12is_rwl_validP12s_rwlock_tag +_Z12rwl_readlockP12s_rwlock_tag +_Z14rwl_readunlockP12s_rwlock_tag +_Z15rwl_readtrylockP12s_rwlock_tag +_Z15rwl_writelock_pP12s_rwlock_tagPKci +_Z15rwl_writeunlockP12s_rwlock_tag +_Z16rwl_writetrylockP12s_rwlock_tag +_Z8rwl_initP12s_rwlock_tagi + +; scan.o +_Z10parse_argsPcPS_PiS0_S0_i +_Z11skip_spacesPPc +_Z14skip_nonspacesPPc +_Z15parse_args_onlyPcPS_PiS0_S0_i +_Z19strip_leading_spacePc +_Z19strip_trailing_junkPc +_Z22strip_trailing_newlinePc +_Z22strip_trailing_slashesPc +_Z23split_path_and_filenamePKcPPcPiS2_S3_ +_Z7bsscanfPKcS0_z +_Z7fstrschPKcS0_ +_Z8next_argPPc +_Z9next_namePPc + +; sellist.o +_ZN7sellist10set_stringEPKcb +_ZN7sellist17get_expanded_listEv +_ZN7sellist4nextEv + +; serial.o +_Z12serial_btimePPhx +_Z12serial_int16PPhs +_Z12serial_int32PPhi +_Z12serial_int64PPhx +_Z13serial_stringPPhPKc +_Z13serial_uint16PPht +_Z13serial_uint32PPhj +_Z13serial_uint64PPhy +_Z14serial_float64PPhd +_Z14unserial_btimePPh +_Z14unserial_int16PPh +_Z14unserial_int32PPh +_Z14unserial_int64PPh +_Z15unserial_stringPPhPci +_Z15unserial_uint16PPh +_Z15unserial_uint32PPh +_Z15unserial_uint64PPh +_Z16unserial_float64PPh + +; sha1.o +_Z10SHA1UpdateP11SHA1ContextPKhj +_Z8SHA1InitP11SHA1Context +_Z9SHA1FinalP11SHA1ContextPh + +; signal.o + +; smartall.o +_Z10sm_reallocPKciPvj +_Z12actuallyfreePv +_Z12sm_check_rtnPKcib +_Z12sm_new_ownerPKciPc +_Z14actuallycallocjj +_Z14actuallymallocj +_Z15actuallyreallocPvj +_Z7bmemsetPviy +_Z7sm_dumpbb +_Z7sm_freePKciPv +_Z7sprintfPcPKcz +_Z8sm_checkPKcib +_Z9sm_callocPKcijj +_Z9sm_mallocPKcij +_Z9sm_staticb + +; tls.o +_Z14get_tls_enableP11TLS_Context +_Z15get_tls_requireP11TLS_Context +_Z15new_tls_contextPKcS0_S0_S0_PFiPciPKvES3_S0_b +_Z15tls_bsock_probeP9BSOCKCORE +_Z15tls_bsock_readnP5BSOCKPci +_Z16free_tls_contextP11TLS_Context +_Z16tls_bsock_acceptP5BSOCK +_Z16tls_bsock_writenP5BSOCKPci +_Z17tls_bsock_connectP5BSOCK +_Z18new_tls_connectionP11TLS_Contexti +_Z18tls_bsock_shutdownP9BSOCKCORE +_Z19free_tls_connectionP14TLS_Connection +_Z25tls_postconnect_verify_cnP3JCRP14TLS_ConnectionP5alist +_Z27tls_postconnect_verify_hostP3JCRP14TLS_ConnectionPKc + +; tree.o +_Z11tree_relcwdPcP11s_tree_rootP11s_tree_node +_Z12tree_getpathP11s_tree_nodePci +_Z14make_tree_pathPcP11s_tree_root +_Z16insert_tree_nodePcS_iP11s_tree_rootP11s_tree_node +_Z16tree_remove_nodeP11s_tree_rootP11s_tree_node +_Z19tree_add_delta_partP11s_tree_rootP11s_tree_nodeji +_Z8new_treei +_Z8tree_cwdPcP11s_tree_rootP11s_tree_node +_Z9free_treeP11s_tree_root + +; util.o +_Z11bash_spacesPc +_Z11bash_spacesR8POOL_MEM +_Z11encode_modetPc +_Z11encode_timexPc +_Z11is_buf_zeroPKci +_Z13unbash_spacesPc +_Z13unbash_spacesR8POOL_MEM +_Z14edit_job_codesP3JCRPcS1_PKcPFS1_S0_S3_S1_iE +_Z15is_power_of_twoy +_Z15job_type_to_stri +_Z16job_level_to_stri +_Z16make_session_keyPcS_i +_Z17job_status_to_strii +_Z18decode_session_keyPcS_S_i +_Z18do_shell_expansionPci +_Z18encode_session_keyPcS_S_i +_Z18jobstatus_to_asciiiPci +_Z19last_path_separatorPKc +_Z20volume_status_to_strPKc +_Z21set_working_directoryPc +_Z22jobstatus_to_ascii_guiiPci +_Z25action_on_purge_to_stringiR8POOL_MEM +_Z5lcasePc +_Z7hexdumpPKciPcib +_Z7is_nullPKv +_Z7sprintfPcPKcz +_Z8bmemzeroPvy +_Z9asciidumpPKciPci +_Z9smartdumpPKciPciPb + +; var.o +_Z10var_configP6var_stiz +_Z10var_createPP6var_st +_Z10var_expandP6var_stPKciPPcPii +_Z10var_formatP6var_stPPciPKcz +_Z11var_destroyP6var_st +_Z11var_formatvP6var_stPPciPKcS1_ +_Z12var_strerrorP6var_st8var_rc_t +_Z12var_unescapeP6var_stPKciPcii +_Z7sprintfPcPKcz + +; watchdog.o +_Z11is_watchdogv +_Z12new_watchdogv +_Z13stop_watchdogv +_Z14start_watchdogv +_Z17register_watchdogP12s_watchdog_t +_Z19unregister_watchdogP12s_watchdog_t + +; win32filter.o +_ZN11Win32Filter9have_dataEPPcPxS2_ + +; winapi.o +_Z17InitWinAPIWrapperv + +; worker.o +_ZN6worker11finish_workEv +_ZN6worker12release_lockEv +_ZN6worker13discard_queueEv +_ZN6worker13set_run_stateEv +_ZN6worker14set_quit_stateEv +_ZN6worker14set_wait_stateEv +_ZN6worker15pop_free_bufferEv +_ZN6worker16push_free_bufferEPv +_ZN6worker16wait_queue_emptyEv +_ZN6worker4initEi +_ZN6worker4stopEv +_ZN6worker4waitEv +_ZN6worker5queueEPv +_ZN6worker5startEPFPvS0_ES0_ +_ZN6worker7dequeueEv +_ZN6worker7destroyEv + +; workq.o +_Z10workq_initP9workq_tagiPFPvS1_E +_Z12workq_removeP9workq_tagP13workq_ele_tag +_Z13workq_destroyP9workq_tag +_Z15workq_wait_idleP9workq_tag +_Z9workq_addP9workq_tagPvPP13workq_ele_tagi + +console_command DATA +b_plugin_list DATA +plugin_bopen DATA +plugin_bclose DATA +plugin_bwrite DATA +plugin_bread DATA +plugin_blseek DATA +exepath DATA +version DATA +dist_name DATA diff --git a/src/win32/lib/make_def32 b/src/win32/lib/make_def32 new file mode 100755 index 00000000..04ec04ab --- /dev/null +++ b/src/win32/lib/make_def32 @@ -0,0 +1,34 @@ +#!/bin/sh +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Make the stupid bacula.def file so that we don't have to do it +# manually +# +# Kern Sibbald, June 2007 +# + +TOPDIR=`(cd ../../..;pwd)` +TOPDIR=${DEPKGS:-${TOPDIR}} +NM=i686-w64-mingw32-nm + +echo "LIBRARY bacula.dll" +echo "EXPORTS" +echo " " + +cd obj32 +# remove "static" objects before running +rm -f parse_conf.* res.* + +for i in *.o ; do \ + echo "; $i"; \ + ${NM} $i | grep "^[0-9a-f]* T _" | cut -c13- ; \ + echo " "; \ +done + +DATA="console_command b_plugin_list plugin_bopen plugin_bclose plugin_bwrite plugin_bread plugin_blseek exepath version dist_name" + +for i in ${DATA}; do \ + echo "$i DATA"; \ +done diff --git a/src/win32/lib/make_def64 b/src/win32/lib/make_def64 new file mode 100755 index 00000000..9a8682cd --- /dev/null +++ b/src/win32/lib/make_def64 @@ -0,0 +1,34 @@ +#!/bin/sh +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Make the stupid bacula.def file so that we don't have to do it +# manually +# +# Kern Sibbald, June 2007 +# + +TOPDIR=`(cd ../../..;pwd)` +TOPDIR=${DEPKGS:-${TOPDIR}} +NM=x86_64-w64-mingw32-nm + +echo "LIBRARY bacula.dll" +echo "EXPORTS" +echo " " + +cd obj64 +# remove "static" objects before running +rm -f parse_conf.* res.* + +for i in `ls *.o | grep -v -e parse_conf.o -e res.o`; do \ + echo "; $i"; \ + ${NM} $i | grep -e "^[0-9a-f]* T _" -e "^[0-9a-f]* T b_reg" -e "^[0-9a-f]* T fnmatch" | cut -c20- ; \ + echo " "; \ +done + +DATA="console_command b_plugin_list plugin_bopen plugin_bclose plugin_bwrite plugin_bread plugin_blseek exepath version dist_name" + +for i in ${DATA}; do \ + echo "$i DATA"; \ +done diff --git a/src/win32/libbac/Makefile b/src/win32/libbac/Makefile new file mode 100644 index 00000000..49f2dd99 --- /dev/null +++ b/src/win32/libbac/Makefile @@ -0,0 +1,112 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Written for Bacula by Howard Thomson, April 2006 +# for building using cross-complilation. +# + +include ../Makefile.inc + +INCLUDES = \ + $(INCLUDE_GCC) \ + $(INCLUDE_MINGW) \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_BACULA) \ + $(INCLUDE_ZLIB) \ + $(INCLUDE_VSS) \ + $(INCLUDE_ICONS) \ + $(INCLUDE_OPENSSL) + +DEFINES = \ + -DWIN32 \ + $(HAVES) + +###################################################################### + +# Files files in src/lib + +LIB_OBJS = \ + $(OBJDIR)/address_conf.o \ + $(OBJDIR)/alist.o \ + $(OBJDIR)/attr.o \ + $(OBJDIR)/base64.o \ + $(OBJDIR)/berrno.o \ + $(OBJDIR)/bget_msg.o \ + $(OBJDIR)/bnet.o \ + $(OBJDIR)/bnet_server.o \ + $(OBJDIR)/bpipe.o \ + $(OBJDIR)/bregex.o \ + $(OBJDIR)/bshm.o \ + $(OBJDIR)/bsnprintf.o \ + $(OBJDIR)/bsock.o \ + $(OBJDIR)/bsockcore.o \ + $(OBJDIR)/bsys.o \ + $(OBJDIR)/btime.o \ + $(OBJDIR)/btimers.o \ + $(OBJDIR)/cram-md5.o \ + $(OBJDIR)/crc32.o \ + $(OBJDIR)/crypto.o \ + $(OBJDIR)/daemon.o \ + $(OBJDIR)/dlist.o \ + $(OBJDIR)/edit.o \ + $(OBJDIR)/enh_fnmatch.o \ + $(OBJDIR)/fnmatch.o \ + $(OBJDIR)/hmac.o \ + $(OBJDIR)/htable.o \ + $(OBJDIR)/idcache.o \ + $(OBJDIR)/jcr.o \ + $(OBJDIR)/lex.o \ + $(OBJDIR)/md5.o \ + $(OBJDIR)/mem_pool.o \ + $(OBJDIR)/message.o \ + $(OBJDIR)/openssl.o \ + $(OBJDIR)/parse_conf.o \ + $(OBJDIR)/pythonlib.o \ + $(OBJDIR)/queue.o \ + $(OBJDIR)/rblist.o \ + $(OBJDIR)/res.o \ + $(OBJDIR)/runscript.o \ + $(OBJDIR)/rwlock.o \ + $(OBJDIR)/scan.o \ + $(OBJDIR)/serial.o \ + $(OBJDIR)/sha1.o \ + $(OBJDIR)/signal.o \ + $(OBJDIR)/smartall.o \ + $(OBJDIR)/tls.o \ + $(OBJDIR)/tree.o \ + $(OBJDIR)/util.o \ + $(OBJDIR)/var.o \ + $(OBJDIR)/watchdog.o \ + $(OBJDIR)/winapi.o \ + $(OBJDIR)/workq.o \ + $(OBJDIR)/lockmgr.o + +# $(LIBDIR)/events.o + +###################################################################### + +# Targets + +.PHONY: all clean + +all: $(LIBDIR)/libbac.a + +clean: + @echo "Cleaning `pwd`" + $(ECHO_CMD)rm -f $(OBJDIR)/*.[od] $(LIBDIR)/libbac.a + +# +# Rules +# + +$(LIBDIR)/libbac.a: $(LIB_OBJS) + @echo "Updating archive $@" + $(call checkdir,$@) + $(ECHO_CMD)$(AR) rs $@ $^ + +include ../Makefile.rules + +ifneq ($(MAKECMDGOALS),clean) +include $(patsubst %.o,%.d,$(filter-out %.res,$(LIB_OBJS))) +endif diff --git a/src/win32/libbac/libbac.vcproj b/src/win32/libbac/libbac.vcproj new file mode 100644 index 00000000..d8e1c6c8 --- /dev/null +++ b/src/win32/libbac/libbac.vcproj @@ -0,0 +1,1733 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/libbac/msvc/bacula.def b/src/win32/libbac/msvc/bacula.def new file mode 100644 index 00000000..86830d1d --- /dev/null +++ b/src/win32/libbac/msvc/bacula.def @@ -0,0 +1,510 @@ +LIBRARY bacula.dll +EXPORTS + +; address_conf.obj +?get_port_net_order@IPADDR@@QBEGXZ +?get_address@IPADDR@@QAEPBDPADH@Z +?get_first_address@@YAPBDPAVdlist@@PADH@Z +?get_first_port_host_order@@YAHPAVdlist@@@Z +?init_default_addresses@@YAXPAPAVdlist@@H@Z +?store_addresses@@YAXPAUs_lex_context@@PAURES_ITEM@@HH@Z +?store_addresses_address@@YAXPAUs_lex_context@@PAURES_ITEM@@HH@Z +?store_addresses_port@@YAXPAUs_lex_context@@PAURES_ITEM@@HH@Z +?free_addresses@@YAXPAVdlist@@@Z +?sockaddr_get_port_net_order@@YAHPBUsockaddr@@@Z +?sockaddr_to_ascii@@YAPADPBUsockaddr@@PADH@Z + +; alist.obj +?first@alist@@QAEPAXXZ +?next@alist@@QAEPAXXZ +?prepend@alist@@QAEXPAX@Z +?append@alist@@QAEXPAX@Z +?remove@alist@@QAEPAXH@Z +?get@alist@@QAEPAXH@Z +?destroy@alist@@QAEXXZ + +; attr.obj +?new_attr@@YAPAUATTR@@XZ +?free_attr@@YAXPAUATTR@@@Z +?unpack_attributes_record@@YAHPAVJCR@@JPADPAUATTR@@@Z +?build_attr_output_fnames@@YAXPAVJCR@@PAUATTR@@@Z +?print_ls_output@@YAXPAVJCR@@PAUATTR@@@Z + +; attribs.obj +?select_data_stream@@YAHPAUFF_PKT@@@Z +?encode_stat@@YAXPADPAUFF_PKT@@H@Z +?decode_stat@@YAHPADPAUstat@@PAJ@Z +?decode_LinkFI@@YAJPADPAUstat@@@Z +?set_attributes@@YA_NPAVJCR@@PAUATTR@@PAUBFILE@@@Z +?encode_attribsEx@@YAHPAVJCR@@PADPAUFF_PKT@@@Z + +; base64.obj +?bin_to_base64@@YAHPADH0HH@Z + +; berrno.obj +?strerror@berrno@@QAEPBDXZ +?format_win32_message@berrno@@AAEXXZ + +; bfile.obj +?is_win32_stream@@YA_NH@Z +?stream_to_ascii@@YAPBDH@Z +?processWin32BackupAPIBlock@@YA_NPAUBFILE@@PAXH@Z +?binit@@YAXPAUBFILE@@@Z +?set_portable_backup@@YA_NPAUBFILE@@@Z +?set_prog@@YA_NPAUBFILE@@PADPAVJCR@@@Z +?is_portable_backup@@YA_NPAUBFILE@@@Z +?have_win32_api@@YA_NXZ +?is_restore_stream_supported@@YA_NH@Z +?bopen@@YAHPAUBFILE@@PBDHI@Z +?bclose@@YAHPAUBFILE@@@Z +?bread@@YAHPAUBFILE@@PAXI@Z +?bwrite@@YAHPAUBFILE@@PAXI@Z +?is_bopen@@YA_NPAUBFILE@@@Z +?blseek@@YA_JPAUBFILE@@_JH@Z + +; bget_msg.obj +?bget_msg@@YAHPAVBSOCK@@@Z + +; bnet.obj +?bnet_recv@@YAJPAVBSOCK@@@Z +?is_bnet_stop@@YA_NPAVBSOCK@@@Z +?is_bnet_error@@YAHPAVBSOCK@@@Z +?bnet_suppress_error_messages@@YAXPAVBSOCK@@_N@Z +?bnet_despool_to_bsock@@YAHPAVBSOCK@@P6AXH@ZH@Z +?bnet_send@@YA_NPAVBSOCK@@@Z +?bnet_tls_server@@YA_NPAUTLS_Context@@PAVBSOCK@@PAValist@@@Z +?bnet_tls_client@@YA_NPAUTLS_Context@@PAVBSOCK@@@Z +?bnet_wait_data@@YAHPAVBSOCK@@H@Z +?bnet_wait_data_intr@@YAHPAVBSOCK@@H@Z +?bnet_connect@@YAPAVBSOCK@@PAVJCR@@HHPBDPAD2HH@Z +?bnet_strerror@@YAPBDPAVBSOCK@@@Z +?bnet_fsend@@YA_NPAVBSOCK@@PBDZZ +?bnet_get_peer@@YAHPAVBSOCK@@PADH@Z +?bnet_set_buffer_size@@YA_NPAVBSOCK@@IH@Z +?bnet_sig@@YA_NPAVBSOCK@@H@Z +?bnet_sig_to_ascii@@YAPBDPAVBSOCK@@@Z +?bnet_close@@YAXPAVBSOCK@@@Z +?term_bsock@@YAXPAVBSOCK@@@Z + +; bnet_server.obj +?bnet_stop_thread_server@@YAXPAUpthread_t_@@@Z +?bnet_thread_server@@YAXPAVdlist@@HPAUworkq_tag@@P6APAXPAX@Z@Z + +; bpipe.obj +?run_program@@YAHPADH0@Z +?run_program_full_output@@YAHPADH0@Z + +; bregex.obj +b_regcomp +b_regexec +b_regerror +b_regfree + +; bsnprintf.obj +?bsnprintf@@YAHPADJPBDZZ +?bvsnprintf@@YAHPADJPBD0@Z + +; bsock.obj +?close@BSOCK@@QAEXXZ +?dup_bsock@@YAPAVBSOCK@@PAV1@@Z +?fsend@BSOCK@@QAA_NPBDZZ +?init_bsock@@YAPAVBSOCK@@PAVJCR@@HPBD1HPAUsockaddr@@@Z +?recv@BSOCK@@QAEJXZ +?send@BSOCK@@QAE_NXZ +?signal@BSOCK@@QAE_NH@Z + +; bsys.obj +?bmicrosleep@@YAHJJ@Z +?bstrncpy@@YAPADPADPBDH@Z +?bstrncpy@@YAPADPADAAVPOOL_MEM@@H@Z +?bstrncat@@YAPADPADPBDH@Z +?bstrcmp@@YA_NPBD0@Z +?cstrlen@@YAHPBD@Z +?bfree@@YAXPAX@Z +?b_malloc@@YAPAXPBDHI@Z +?brealloc@@YAPAXPAXI@Z +?_p@@YAXPAPAUpthread_mutex_t_@@@Z +?_v@@YAXPAPAUpthread_mutex_t_@@@Z +?create_pid_file@@YAXPADPBDH@Z +?delete_pid_file@@YAHPADPBDH@Z +?read_state_file@@YAXPADPBDH@Z +?write_state_file@@YAXPADPBDH@Z +?drop@@YAXPAD0@Z +?bfgets@@YAPADPADHPAU_iobuf@@@Z +?escape_filename@@YAPADPBD@Z + +; btime.obj +?bstrftime@@YAPADPADH_J@Z +?bstrftimes@@YAPADPADH_J@Z +?bstrftime_nc@@YAPADPADH_J@Z +?bstrutime@@YAPADPADH_J@Z +?str_to_utime@@YA_JPAD@Z +?get_current_btime@@YA_JXZ +?btime_to_unix@@YAJ_J@Z +?btime_to_utime@@YA_J_J@Z +?tm_woy@@YAHJ@Z +?get_current_time@@YAXPAUdate_time@@@Z +?tm_decode@@YAXPAUdate_time@@PAUtm@@@Z + +; btimers.obj +?start_thread_timer@@YAPAUs_btimer_t@@PAUpthread_t_@@I@Z +?start_bsock_timer@@YAPAUs_btimer_t@@PAVBSOCK@@I@Z +?stop_bsock_timer@@YAXPAUs_btimer_t@@@Z +?stop_thread_timer@@YAXPAUs_btimer_t@@@Z + +; compat.obj +?SetVSSPathConvert@@YAXP6AHPBDPADH@ZP6AHPB_WPA_WH@Z@Z +?wchar_2_UTF8@@YAHPADPB_WH@Z +?random@@YAJXZ +?srandom@@YAXI@Z +?fstat@@YAHHPAUstat@@@Z +?stat@@YAHPBDPAU0@@Z +?fcntl@@YAHHHJ@Z +?lstat@@YAHPBDPAUstat@@@Z +?sleep@@YAXH@Z +?strcasecmp@@YAHPBD0@Z +?gettimeofday@@YAHPAUtimeval@@PAUtimezone@@@Z +?opendir@@YAPAXPBD@Z +?closedir@@YAHPAX@Z +?readdir_r@@YAHPAXPAUdirent@@PAPAU1@@Z +?init_signals@@YAXP6AXH@Z@Z +?init_stack_dump@@YAXXZ +?pathconf@@YAJPBDH@Z +?WSA_Init@@YAHXZ +?win32_fputs@@YAHPBDPAU_iobuf@@@Z +?win32_cgets@@YAPADPADH@Z +?win32_unlink@@YAHPBD@Z +?open_bpipe@@YAPAVBPIPE@@PADHPBD@Z +?kill@@YAHHH@Z +?close_bpipe@@YAHPAVBPIPE@@@Z +?utime@@YAHPBDPAUutimbuf@@@Z + +; cram-md5.obj +?cram_md5_challenge@@YA_NPAVBSOCK@@PADHH@Z +?cram_md5_respond@@YA_NPAVBSOCK@@PADPAH2@Z + +; crc32.obj +?bcrc32@@YAIPAEH@Z + +;create_file.obj +?create_file@@YAHPAVJCR@@PAUATTR@@PAUBFILE@@H@Z + +; crypto.obj +?crypto_digest_new@@YAPAUDigest@@W4crypto_digest_t@@@Z +?crypto_digest_update@@YA_NPAUDigest@@PBEI@Z +?crypto_digest_finalize@@YA_NPAUDigest@@PAEPAI@Z +?crypto_digest_free@@YAXPAUDigest@@@Z +?init_crypto@@YAHXZ +?cleanup_crypto@@YAHXZ +?crypto_sign_new@@YAPAUSignature@@XZ +?crypto_sign_get_digest@@YA?AW4crypto_error_t@@PAUSignature@@PAUX509_Keypair@@PAPAUDigest@@@Z +?crypto_sign_verify@@YA?AW4crypto_error_t@@PAUSignature@@PAUX509_Keypair@@PAUDigest@@@Z +?crypto_sign_add_signer@@YAHPAUSignature@@PAUDigest@@PAUX509_Keypair@@@Z +?crypto_sign_encode@@YAHPAUSignature@@PAEPAI@Z +?crypto_sign_decode@@YAPAUSignature@@PBEI@Z +?crypto_sign_free@@YAXPAUSignature@@@Z +?crypto_keypair_new@@YAPAUX509_Keypair@@XZ +?crypto_keypair_dup@@YAPAUX509_Keypair@@PAU1@@Z +?crypto_keypair_load_cert@@YAHPAUX509_Keypair@@PBD@Z +?crypto_keypair_has_key@@YA_NPBD@Z +?crypto_keypair_load_key@@YAHPAUX509_Keypair@@PBDP6AHPADHPBX@Z3@Z +?crypto_keypair_free@@YAXPAUX509_Keypair@@@Z +?crypto_session_new@@YAPAUCrypto_Session@@W4crypto_cipher_t@@PAValist@@@Z +?crypto_session_free@@YAXPAUCrypto_Session@@@Z +?crypto_session_encode@@YA_NPAUCrypto_Session@@PAEPAI@Z +?crypto_session_decode@@YA?AW4crypto_error_t@@PBEIPAValist@@PAPAUCrypto_Session@@@Z +?crypto_cipher_new@@YAPAUCipher_Context@@PAUCrypto_Session@@_NPAI@Z +?crypto_cipher_update@@YA_NPAUCipher_Context@@PBEI1PAI@Z +?crypto_cipher_finalize@@YA_NPAUCipher_Context@@PAEPAI@Z +?crypto_cipher_free@@YAXPAUCipher_Context@@@Z +?crypto_default_pem_callback@@YAHPADHPBX@Z +?crypto_digest_name@@YAPBDPAUDigest@@@Z +?crypto_digest_stream_type@@YA?AW4crypto_digest_t@@H@Z +?crypto_strerror@@YAPBDW4crypto_error_t@@@Z + +; daemon.obj +?daemon_start@@YAXXZ + +; dlist.obj +?append@dlist@@QAEXPAX@Z +?prepend@dlist@@QAEXPAX@Z +?insert_before@dlist@@QAEXPAX0@Z +?binary_insert@dlist@@QAEPAXPAXP6AH00@Z@Z +?first@dlist@@QBEPAXXZ +?last@dlist@@QBEPAXXZ +?binary_insert_multiple@dlist@@QAEXPAXP6AH00@Z@Z +?binary_search@dlist@@QAEPAXPAXP6AH00@Z@Z +?remove@dlist@@QAEXPAX@Z +?next@dlist@@QAEPAXPAX@Z +?destroy@dlist@@QAEXXZ +?new_dlistString@@YAPAVdlistString@@PBD@Z + +; drivetype.obj +?drivetype@@YA_NPBDPADH@Z + +; edit.obj +?str_to_uint64@@YA_KPAD@Z +?str_to_int64@@YA_JPAD@Z +?edit_uint64_with_commas@@YAPAD_KPAD@Z +?edit_uint64_with_suffix@@YAPAD_KPAD@Z +?edit_uint64@@YAPAD_KPAD@Z +?edit_int64@@YAPAD_JPAD@Z +?duration_to_utime@@YA_NPADPA_J@Z +?edit_utime@@YAPAD_JPADH@Z +?size_to_uint64@@YA_NPADHPA_K@Z +?is_a_number@@YA_NPBD@Z +?is_an_integer@@YA_NPBD@Z +?is_name_valid@@YA_NPADPAPAD@Z +?add_commas@@YAPADPAD0@Z + +; enable_priv.obj +?enable_backup_privileges@@YAHPAVJCR@@H@Z + +; enh_fnmatch.obj + +; find.obj +?init_find_files@@YAPAUFF_PKT@@XZ +?set_find_options@@YAXPAUFF_PKT@@HJ@Z +?get_win32_driveletters@@YAHPAUFF_PKT@@PAD@Z +?find_files@@YAHPAVJCR@@PAUFF_PKT@@P6AH1PAX_N@Z2@Z +?term_find_files@@YAHPAUFF_PKT@@@Z + +; find_one.obj +?find_one_file@@YAHPAVJCR@@PAUFF_PKT@@P6AH1PAX_N@Z2PADI3@Z + +; fnmatch.obj +fnmatch + +; fstype.obj +?fstype@@YA_NPBDPADH@Z + +; hmac.obj +?hmac_md5@@YAXPAEH0H0@Z + +; htable.obj + +; idcache.obj +?getuser@@YAPADIPADH@Z +?getgroup@@YAPADIPADH@Z + +; jcr.obj +?lock_jobs@@YAXXZ +?unlock_jobs@@YAXXZ +?term_last_jobs_list@@YAXXZ +?lock_last_jobs_list@@YAXXZ +?unlock_last_jobs_list@@YAXXZ +?job_end_push@@YAXPAVJCR@@P6AX0PAX@Z1@Z +?new_jcr@@YAPAVJCR@@HP6AXPAV1@@Z@Z +?inc_use_count@JCR@@QAEXXZ +?lock@JCR@@QAEXXZ +?unlock@JCR@@QAEXXZ +?init_mutex@JCR@@QAEXXZ +?b_free_jcr@@YAXPBDHPAVJCR@@@Z +?dec_use_count@JCR@@QAEXXZ +?use_count@JCR@@QAEHXZ +?destroy_mutex@JCR@@QAEXXZ +?get_jcr_by_id@@YAPAVJCR@@I@Z +?get_jcr_by_session@@YAPAVJCR@@II@Z +?get_jcr_by_partial_name@@YAPAVJCR@@PAD@Z +?get_jcr_by_full_name@@YAPAVJCR@@PAD@Z +?set_jcr_job_status@@YAXPAVJCR@@H@Z +?jcr_walk_start@@YAPAVJCR@@XZ +?jcr_walk_next@@YAPAVJCR@@PAV1@@Z +?jcr_walk_end@@YAXPAVJCR@@@Z +?init_jcr_subsystem@@YA_NXZ + +; lex.obj +?scan_to_eol@@YAXPAUs_lex_context@@@Z +?lex_set_default_error_handler@@YAXPAUs_lex_context@@@Z +?lex_set_error_handler_error_type@@YAHPAUs_lex_context@@H@Z +?lex_close_file@@YAPAUs_lex_context@@PAU1@@Z +?lex_open_file@@YAPAUs_lex_context@@PAU1@PBDP6AX1H01ZZ@Z +?lex_tok_to_str@@YAPBDH@Z +?lex_get_token@@YAHPAUs_lex_context@@H@Z + +; makepath.obj + +; match.obj +?match_files@@YAHPAVJCR@@PAUFF_PKT@@P6AH1PAX_N@Z2@Z +?term_include_exclude_files@@YAXPAUFF_PKT@@@Z +?add_fname_to_include_list@@YAXPAUFF_PKT@@HPBD@Z +?add_fname_to_exclude_list@@YAXPAUFF_PKT@@PBD@Z +?file_is_included@@YAHPAUFF_PKT@@PBD@Z +?file_is_excluded@@YAHPAUFF_PKT@@PBD@Z + +; md5.obj +?MD5Init@@YAXPAUMD5Context@@@Z +?MD5Update@@YAXPAUMD5Context@@PAEI@Z +?MD5Final@@YAXQAEPAUMD5Context@@@Z + +; mem_pool.obj +?sm_get_pool_memory@@YAPADPBDHH@Z +?sm_get_memory@@YAPADPBDHJ@Z +?sm_sizeof_pool_memory@@YAJPBDHPAD@Z +?sm_realloc_pool_memory@@YAPADPBDHPADJ@Z +?sm_check_pool_memory_size@@YAPADPBDHPADJ@Z +?sm_free_pool_memory@@YAXPBDHPAD@Z +?close_memory_pool@@YAXXZ +?print_memory_pool_stats@@YAXXZ +?pm_strcat@@YAHPAPADPBD@Z +?pm_strcat@@YAHAAPADPBD@Z +?c_str@POOL_MEM@@QBEPADXZ +?pm_strcat@@YAHAAVPOOL_MEM@@PBD@Z +?check_size@POOL_MEM@@QAEPADJ@Z +?pm_strcpy@@YAHPAPADPBD@Z +?pm_strcpy@@YAHAAPADPBD@Z +?pm_strcpy@@YAHAAPADAAVPOOL_MEM@@@Z +?pm_strcpy@@YAHAAVPOOL_MEM@@PBD@Z +?max_size@POOL_MEM@@QAEJXZ + +; message.obj +?my_name_is@@YAXHQAPADPBD@Z +?init_msg@@YAXPAVJCR@@PAVMSGS@@@Z +?init_console_msg@@YAXPBD@Z +?add_msg_dest@@YAXPAVMSGS@@HHPAD1@Z +?rem_msg_dest@@YAXPAVMSGS@@HHPAD@Z +?close_msg@@YAXPAVJCR@@@Z +?set_errno@berrno@@QAEXH@Z +?free_msgs_res@@YAXPAVMSGS@@@Z +?term_msg@@YAXXZ +?dispatch_message@@YAXPAVJCR@@HJPAD@Z +?c_str@POOL_MEM@@QBEPADXZ +?d_msg@@YAXPBDHH0ZZ +?get_basename@@YAPBDPBD@Z +?set_trace@@YAXH@Z +?get_trace@@YA_NXZ +?p_msg@@YAXPBDHH0ZZ +?e_msg@@YAXPBDHHH0ZZ +?Jmsg@@YAXPAVJCR@@HJPBDZZ +?j_msg@@YAXPBDHPAVJCR@@HJ0ZZ +?m_msg@@YAHPBDHPAPAD0ZZ +?m_msg@@YAHPBDHAAPAD0ZZ +?Mmsg@@YAHPAPADPBDZZ +?Mmsg@@YAHAAPADPBDZZ +?Mmsg@@YAHAAVPOOL_MEM@@PBDZZ +?Qmsg@@YAXPAVJCR@@HJPBDZZ +?dequeue_messages@@YAXPAVJCR@@@Z +?q_msg@@YAXPBDHPAVJCR@@HJ0ZZ + +; print.obj + +; pythonlib.obj +?generate_daemon_event@@YAHPAVJCR@@PBD@Z +?init_python_interpreter@@YAXPBD00@Z +?term_python_interpreter@@YAXXZ + +; queue.obj +?qinsert@@YAXPAUb_queue@@0@Z +?qnext@@YAPAUb_queue@@PAU1@0@Z +?qdchain@@YAPAUb_queue@@PAU1@@Z + +; runscript.obj +?new_runscript@@YAPAVRUNSCRIPT@@XZ +?reset_default@RUNSCRIPT@@QAEX_N@Z +?copy_runscript@@YAPAVRUNSCRIPT@@PAV1@@Z +?free_runscript@@YAXPAVRUNSCRIPT@@@Z +?run_scripts@@YAHPAVJCR@@PAValist@@PBD@Z +?set_command@RUNSCRIPT@@QAEXPBD@Z +?set_target@RUNSCRIPT@@QAEXPBD@Z +?run@RUNSCRIPT@@QAEHPAVJCR@@PBD@Z +?free_runscripts@@YAXPAValist@@@Z +?debug@RUNSCRIPT@@QAEXXZ + +; rwlock.obj +?rwl_init@@YAHPAUs_rwlock_tag@@@Z +?rwl_destroy@@YAHPAUs_rwlock_tag@@@Z +?rwl_writelock@@YAHPAUs_rwlock_tag@@@Z +?rwl_writeunlock@@YAHPAUs_rwlock_tag@@@Z + +; save-cwd.obj + +; scan.obj +?strip_leading_space@@YAXPAD@Z +?strip_trailing_junk@@YAXPAD@Z +?strip_trailing_newline@@YAXPAD@Z +?skip_spaces@@YA_NPAPAD@Z +?skip_nonspaces@@YA_NPAPAD@Z +?fstrsch@@YAHPBD0@Z +?next_arg@@YAPADPAPAD@Z +?parse_args@@YAHPADPAPADPAH11H@Z +?parse_args_only@@YAHPADPAPADPAH11H@Z +?bsscanf@@YAHPBD0ZZ + +; serial.obj +?serial_int32@@YAXQAPAEJ@Z +?serial_uint32@@YAXQAPAEI@Z +?serial_uint64@@YAXQAPAE_K@Z +?serial_btime@@YAXQAPAE_J@Z +?serial_float64@@YAXQAPAEN@Z +?serial_string@@YAXQAPAEQBD@Z +?unserial_int32@@YAJQAPAE@Z +?unserial_uint32@@YAIQAPAE@Z +?unserial_uint64@@YA_KQAPAE@Z +?unserial_btime@@YA_JQAPAE@Z +?unserial_float64@@YANQAPAE@Z +?unserial_string@@YAXQAPAEQAD@Z + +; sha1.obj + +; signal.obj + +; smartall.obj +?sm_free@@YAXPBDHPAX@Z +?sm_malloc@@YAPAXPBDHI@Z +?sm_calloc@@YAPAXPBDHII@Z +?sm_realloc@@YAPAXPBDHPAXI@Z +?sm_dump@@YAX_N@Z +?sm_check@@YAXPBDH_N@Z +?sm_static@@YAXH@Z + +; tls.obj +?new_tls_context@@YAPAUTLS_Context@@PBD000P6AHPADHPBX@Z20_N@Z +?free_tls_context@@YAXPAUTLS_Context@@@Z + +; tree.obj +?new_tree@@YAPAUs_tree_root@@H@Z +?free_tree@@YAXPAUs_tree_root@@@Z +?insert_tree_node@@YAPAUs_tree_node@@PAD0HPAUs_tree_root@@PAU1@@Z +?strrchr@@YAPADPADH@Z +?tree_getpath@@YAHPAUs_tree_node@@PADH@Z +?tree_cwd@@YAPAUs_tree_node@@PADPAUs_tree_root@@PAU1@@Z +; rblist +?insert@rblist@@QAEPAXPAXP6AH00@Z@Z +?next@rblist@@QAEPAXPAX@Z + +; util.obj +?is_buf_zero@@YA_NPADH@Z +?lcase@@YAXPAD@Z +?bash_spaces@@YAXPAD@Z +?bash_spaces@@YAXAAVPOOL_MEM@@@Z +?unbash_spaces@@YAXPAD@Z +?unbash_spaces@@YAXAAVPOOL_MEM@@@Z +?encode_time@@YAPADJPAD@Z +?jobstatus_to_ascii@@YAXHPADH@Z +?job_status_to_str@@YAPBDH@Z +?job_type_to_str@@YAPBDH@Z +?job_level_to_str@@YAPBDH@Z +?encode_mode@@YAPADIPAD@Z +?do_shell_expansion@@YAHPADH@Z +?make_session_key@@YAXPAD0H@Z +?edit_job_codes@@YAPADPAVJCR@@PAD1PBD@Z +?set_working_directory@@YAXPAD@Z + +; var.obj +?var_create@@YA?AW4var_rc_t@@PAPAUvar_st@@@Z +?var_destroy@@YA?AW4var_rc_t@@PAUvar_st@@@Z +?var_config@@YA?AW4var_rc_t@@PAUvar_st@@W4var_config_t@@ZZ +?var_unescape@@YA?AW4var_rc_t@@PAUvar_st@@PBDHPADHH@Z +?var_expand@@YA?AW4var_rc_t@@PAUvar_st@@PBDHPAPADPAHH@Z +?var_strerror@@YAPBDPAUvar_st@@W4var_rc_t@@@Z + +; watchdog.obj +?start_watchdog@@YAHXZ +?stop_watchdog@@YAHXZ +?new_watchdog@@YAPAUs_watchdog_t@@XZ +?register_watchdog@@YA_NPAUs_watchdog_t@@@Z + +; winapi.obj +?InitWinAPIWrapper@@YAXXZ + +; workq.obj diff --git a/src/win32/libwin32/aboutDialog.cpp b/src/win32/libwin32/aboutDialog.cpp new file mode 100644 index 00000000..aa5fcb3b --- /dev/null +++ b/src/win32/libwin32/aboutDialog.cpp @@ -0,0 +1,71 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Kern Sibbald, August 2007 + * + * +*/ + +#include "bacula.h" +#include "win32.h" + +static BOOL CALLBACK DialogProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam) +{ + /* Get the dialog class pointer from USERDATA */ + aboutDialog *about; + + switch (uMsg) { + case WM_INITDIALOG: + /* save the dialog class pointer */ + SetWindowLong(hwnd, GWL_USERDATA, lParam); + about = (aboutDialog *)lParam; + + /* Show the dialog */ + SetForegroundWindow(hwnd); + about->m_visible = true; + return true; + + case WM_COMMAND: + switch (LOWORD(wParam)) { + case IDCANCEL: + case IDOK: + EndDialog(hwnd, true); + about = (aboutDialog *)GetWindowLong(hwnd, GWL_USERDATA); + about->m_visible = false; + return true; + } + break; + + case WM_DESTROY: + EndDialog(hwnd, false); + about = (aboutDialog *)GetWindowLong(hwnd, GWL_USERDATA); + about->m_visible = false; + return true; + } + return false; +} + +void aboutDialog::show(bool show) +{ + if (show && !m_visible) { + DialogBoxParam(appInstance, MAKEINTRESOURCE(IDD_ABOUT), NULL, + (DLGPROC)DialogProc, (LPARAM)this); + } +} diff --git a/src/win32/libwin32/aboutDialog.h b/src/win32/libwin32/aboutDialog.h new file mode 100644 index 00000000..63bf0795 --- /dev/null +++ b/src/win32/libwin32/aboutDialog.h @@ -0,0 +1,39 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#ifndef __ABOUT_DIALOG_H_ +#define __ABOUT_DIALOG_H_ 1 + +/* Define the aboutDialog class */ +class aboutDialog +{ +public: + aboutDialog() { m_visible =false; }; + ~aboutDialog() { }; + + void show(bool show); + + bool m_visible; +}; + +#endif /* __ABOUT_H_ */ diff --git a/src/win32/libwin32/bacula.bmp b/src/win32/libwin32/bacula.bmp new file mode 100644 index 00000000..a31eb163 Binary files /dev/null and b/src/win32/libwin32/bacula.bmp differ diff --git a/src/win32/libwin32/bacula.ico b/src/win32/libwin32/bacula.ico new file mode 100644 index 00000000..f6d50bf5 Binary files /dev/null and b/src/win32/libwin32/bacula.ico differ diff --git a/src/win32/libwin32/bacula.rc b/src/win32/libwin32/bacula.rc new file mode 100644 index 00000000..7bbbb8a2 --- /dev/null +++ b/src/win32/libwin32/bacula.rc @@ -0,0 +1,171 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Generic version of Bacula resource + * + */ + +#include +#include +#include "res.h" +#include "../../version.h" + +/******************************************************************** + * + * Icons + * + * Icon with lowest ID value placed first to ensure application icon + * remains consistent on all systems. + */ +IDI_BACULA ICON "../libwin32/bacula.ico" +IDI_IDLE ICON "../libwin32/idle.ico" +IDI_RUNNING ICON "../libwin32/running.ico" +IDI_JOB_ERROR ICON "../libwin32/error.ico" +IDI_JOB_WARNING ICON "../libwin32/warn.ico" + +/******************************************************************** + * + * Menu + * + */ +IDR_TRAYMENU MENU +BEGIN + POPUP "tray" + BEGIN + MENUITEM "&Status", ID_STATUS + MENUITEM SEPARATOR + MENUITEM "&About Bacula", ID_ABOUT +// MENUITEM SEPARATOR +// MENUITEM "&Close Bacula", ID_CLOSE + END +END + + +/******************************************************************** + * + * Version + * + +VS_VERSION_INFO VERSIONINFO + FILEVERSION 1,1,0,0 + PRODUCTVERSION 1,1,0,0 + FILEFLAGSMASK VS_FFI_FILEFLAGSMASK +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0 +#endif + FILEOS VOS_NT_WINDOWS32 + FILETYPE VFT_APP + FILESUBTYPE 0 +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904E0" // Lang=US English, CharSet=Windows Multiligual + BEGIN + VALUE "Comments", "by Kern Sibbald\0" + VALUE "CompanyName", "Bacula\0" +#ifdef _WIN64 + VALUE "FileDescription", "Bacula File daemon for Win64\0" + VALUE "ProductName", "Bacula(R) - Win64 Version\0" +#else + VALUE "FileDescription", "Bacula File daemon for Win32\0" + VALUE "ProductName", "Bacula(R) - Win32 Version\0" +#endif + VALUE "FileVersion", VERSION "\0" + VALUE "InternalName", "Bacula\0" + VALUE "LegalCopyright", "Copyright Kern Sibbald, 2000-2019\0" + VALUE "LegalTrademarks", "Bacula(R)\0" + VALUE "OriginalFilename", "bacula-fd.exe\0" + VALUE "PrivateBuild", "\0" + VALUE "ProductVersion", VERSION + VALUE "SpecialBuild", "\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1252 // US English, Multilingual + END +END + + +/*********************************************************************** + * + * Bitmap + * + */ +IDB_BACULABMP BITMAP DISCARDABLE "../libwin32/bacula.bmp" + +/*********************************************************************** + * + * String Table + */ +STRINGTABLE +BEGIN + IDI_BACULA "Bacula" +END + +/*********************************************************************** + * + * About Dialog + */ +IDD_ABOUT DIALOG 0, 0, 250, 145 +#if !defined(MINGW64) && !defined(HAVE_MINGW_W64) +STYLE DS_SETFONT | DS_MODALFRAME | DS_CENTER | WS_POPUP | WS_CAPTION | WS_SYSMENU +#endif +CAPTION "About Bacula" +FONT 8, "MS Sans Serif" +BEGIN +#if !defined(MINGW64) && !defined(HAVE_MINGW_W64) + DEFPUSHBUTTON "OK",IDOK,190,120,50,15 +#endif +// CONTROL "\3", IDC_BACULABMP,"Static",SS_ICON | SS_CENTERIMAGE | +// SS_SUNKEN,7,5,73,65 +#if !defined(HAVE_MINGW) && !defined(MINGW64) && !defined(HAVE_MINGW_W64) + CONTROL IDB_BACULABMP,IDB_BACULABMP,"Static",SS_BITMAP|SS_SUNKEN,7,5,32,32 +#endif + + LTEXT " by Kern Sibbald",-1,134,38,78,10 + LTEXT "For more information, see:",-1,115,60,100,10 + LTEXT " www.bacula.org",-1,115,70,100,10 + LTEXT "Copyright (C) 2000-2019, Kern Sibbald",-1,7,120,175,10 + LTEXT "Licensed by Kern Sibbald",-1,7,130,175,10 + RTEXT "Build Date:",-1,108,24,42,8 + RTEXT "Bacula Version:",-1,100,9,50,8 + LTEXT VERSION,-1,159,10,65,8 + LTEXT BDATE,-1,159,24,65,10 + +END + +/************************************************************************ + * + * Status Dialog + */ +IDD_STATUS DIALOGEX 0, 0, 411, 244 +#if !defined(MINGW64) && !defined(HAVE_MINGW_W64) +STYLE DS_SETFONT | DS_3DLOOK | DS_CENTER | WS_MINIMIZEBOX | WS_POPUP | WS_VISIBLE | WS_CAPTION | WS_SYSMENU | WS_THICKFRAME +#endif +CAPTION "Bacula Status" +FONT 8, "Courier New" +BEGIN +#if !defined(MINGW64) && !defined(HAVE_MINGW_W64) + DEFPUSHBUTTON "&OK",IDOK,355,5,51,15 + EDITTEXT IDC_TEXTDISPLAY, 2, 2, 350, 240, WS_VSCROLL | WS_HSCROLL | WS_BORDER | ES_READONLY | ES_MULTILINE +#endif +END diff --git a/src/win32/libwin32/error.ico b/src/win32/libwin32/error.ico new file mode 100644 index 00000000..a8f85f12 Binary files /dev/null and b/src/win32/libwin32/error.ico differ diff --git a/src/win32/libwin32/idle.ico b/src/win32/libwin32/idle.ico new file mode 100644 index 00000000..ce5315f2 Binary files /dev/null and b/src/win32/libwin32/idle.ico differ diff --git a/src/win32/libwin32/main.cpp b/src/win32/libwin32/main.cpp new file mode 100644 index 00000000..05423913 --- /dev/null +++ b/src/win32/libwin32/main.cpp @@ -0,0 +1,666 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Kern Sibbald, August 2007 + * + * Note, some of the original Bacula Windows startup and service handling code + * was derived from VNC code that was used in apcupsd then ported to + * Bacula. However, since then the code has been significantly enhanced + * and largely rewritten. + * + * Evidently due to the nature of Windows startup code and service + * handling code, certain similarities remain. Thanks to the original + * VNC authors. + * + * This is a generic main routine, which is used by all three + * of the daemons. Each one compiles it with slightly different + * #defines. + * + */ + +#define LOCKMGR_COMPLIANT +#include "bacula.h" +#include "win32.h" +#include +#include + +#include + + +/* Globals */ +HINSTANCE appInstance; +DWORD mainthreadId; +bool opt_debug = false; +bool have_service_api; +DWORD service_thread_id = 0; +char win_os[300]; + +bool GetWindowsVersionString(LPTSTR osbuf, int maxsiz); + + +#define MAX_COMMAND_ARGS 100 +static char *command_args[MAX_COMMAND_ARGS] = {(char *)LC_APP_NAME, NULL}; +static int num_command_args = 1; +static pid_t main_pid; +static pthread_t main_tid; + +const char usage[] = APP_NAME "[/debug] [/service] [/run] [/kill] [/install] [/remove] [/help]\n"; + +/* + * + * Main Windows entry point. + * + * We parse the command line and either calls the main App + * or starts up the service. + */ +int WINAPI WinMain(HINSTANCE Instance, HINSTANCE /*PrevInstance*/, PSTR CmdLine, + int /*show*/) +{ + char *cmdLine = CmdLine; + char *wordPtr, *tempPtr; + int i, quote; + OSVERSIONINFO osversioninfo; + osversioninfo.dwOSVersionInfoSize = sizeof(osversioninfo); + + /* Save the application instance and main thread id */ + appInstance = Instance; + mainthreadId = GetCurrentThreadId(); + + if (GetVersionEx(&osversioninfo) && + osversioninfo.dwPlatformId == VER_PLATFORM_WIN32_NT) { + have_service_api = true; + } + + GetWindowsVersionString(win_os, sizeof(win_os)); + + main_pid = getpid(); + main_tid = pthread_self(); + + INITCOMMONCONTROLSEX initCC = { + sizeof(INITCOMMONCONTROLSEX), + ICC_STANDARD_CLASSES + }; + + InitCommonControlsEx(&initCC); + + /* + * Funny things happen with the command line if the + * execution comes from c:/Program Files/bacula/bacula.exe + * We get a command line like: Files/bacula/bacula.exe" options + * I.e. someone stops scanning command line on a space, not + * realizing that the filename is quoted!!!!!!!!!! + * So if first character is not a double quote and + * the last character before first space is a double + * quote, we throw away the junk. + */ + + wordPtr = cmdLine; + while (*wordPtr && *wordPtr != ' ') + wordPtr++; + if (wordPtr > cmdLine) /* backup to char before space */ + wordPtr--; + /* if first character is not a quote and last is, junk it */ + if (*cmdLine != '"' && *wordPtr == '"') { + cmdLine = wordPtr + 1; + } + + /* + * Build Unix style argc *argv[] for the main "Unix" code + * stripping out any Windows options + */ + + /* Don't NULL command_args[0] !!! */ + for (i=1;i +#include + +#ifndef PRODUCT_UNLICENSED +#define PRODUCT_UNLICENSED 0xABCDABCD +#define PRODUCT_BUSINESS 0x00000006 +#define PRODUCT_BUSINESS_N 0x00000010 +#define PRODUCT_CLUSTER_SERVER 0x00000012 +#define PRODUCT_DATACENTER_SERVER 0x00000008 +#define PRODUCT_DATACENTER_SERVER_CORE 0x0000000C +#define PRODUCT_DATACENTER_SERVER_CORE_V 0x00000027 +#define PRODUCT_DATACENTER_SERVER_V 0x00000025 +#define PRODUCT_ENTERPRISE 0x00000004 +#define PRODUCT_ENTERPRISE_E 0x00000046 +#define PRODUCT_ENTERPRISE_N 0x0000001B +#define PRODUCT_ENTERPRISE_SERVER 0x0000000A +#define PRODUCT_ENTERPRISE_SERVER_CORE 0x0000000E +#define PRODUCT_ENTERPRISE_SERVER_CORE_V 0x00000029 +#define PRODUCT_ENTERPRISE_SERVER_IA64 0x0000000F +#define PRODUCT_ENTERPRISE_SERVER_V 0x00000026 +#define PRODUCT_HOME_BASIC 0x00000002 +#define PRODUCT_HOME_BASIC_E 0x00000043 +#define PRODUCT_HOME_BASIC_N 0x00000005 +#define PRODUCT_HOME_PREMIUM 0x00000003 +#define PRODUCT_HOME_PREMIUM_E 0x00000044 +#define PRODUCT_HOME_PREMIUM_N 0x0000001A +#define PRODUCT_HYPERV 0x0000002A +#define PRODUCT_MEDIUMBUSINESS_SERVER_MANAGEMENT 0x0000001E +#define PRODUCT_MEDIUMBUSINESS_SERVER_MESSAGING 0x00000020 +#define PRODUCT_MEDIUMBUSINESS_SERVER_SECURITY 0x0000001F +#define PRODUCT_PROFESSIONAL 0x00000030 +#define PRODUCT_PROFESSIONAL_E 0x00000045 +#define PRODUCT_PROFESSIONAL_N 0x00000031 +#define PRODUCT_SERVER_FOR_SMALLBUSINESS 0x00000018 +#define PRODUCT_SERVER_FOR_SMALLBUSINESS_V 0x00000023 +#define PRODUCT_SERVER_FOUNDATION 0x00000021 +#define PRODUCT_SMALLBUSINESS_SERVER 0x00000009 +#define PRODUCT_SOLUTION_EMBEDDEDSERVER 0x00000038 +#define PRODUCT_STANDARD_SERVER 0x00000007 +#define PRODUCT_STANDARD_SERVER_CORE 0x0000000D +#define PRODUCT_STANDARD_SERVER_CORE_V 0x00000028 +#define PRODUCT_STANDARD_SERVER_V 0x00000024 +#define PRODUCT_STARTER 0x0000000B +#define PRODUCT_STARTER_E 0x00000042 +#define PRODUCT_STARTER_N 0x0000002F +#define PRODUCT_STORAGE_ENTERPRISE_SERVER 0x00000017 +#define PRODUCT_STORAGE_EXPRESS_SERVER 0x00000014 +#define PRODUCT_STORAGE_STANDARD_SERVER 0x00000015 +#define PRODUCT_STORAGE_WORKGROUP_SERVER 0x00000016 +#define PRODUCT_UNDEFINED 0x00000000 +#define PRODUCT_ULTIMATE 0x00000001 +#define PRODUCT_ULTIMATE_E 0x00000047 +#define PRODUCT_ULTIMATE_N 0x0000001C +#define PRODUCT_WEB_SERVER 0x00000011 +#define PRODUCT_WEB_SERVER_CORE 0x0000001D + +#define PRODUCT_SMALLBUSINESS_SERVER_PREMIUM 0x19 +#define SM_SERVERR2 89 +#define VER_SERVER_NT 0x80000000 + +#endif + +#ifndef PRODUCT_PROFESSIONAL +#define PRODUCT_PROFESSIONAL 0x00000030 +#endif +#ifndef VER_SUITE_STORAGE_SERVER +#define VER_SUITE_STORAGE_SERVER 0x00002000 +#endif +#ifndef VER_SUITE_COMPUTE_SERVER +#define VER_SUITE_COMPUTE_SERVER 0x00004000 +#endif + +/* Unknown value */ +#ifndef VER_SUITE_WH_SERVER +#define VER_SUITE_WH_SERVER -1 +#endif + +typedef void (WINAPI *PGNSI)(LPSYSTEM_INFO); +typedef BOOL (WINAPI *PGPI)(DWORD, DWORD, DWORD, DWORD, PDWORD); + +/* + * Get Windows version display string + */ +bool GetWindowsVersionString(LPTSTR osbuf, int maxsiz) +{ + OSVERSIONINFOEX osvi; + SYSTEM_INFO si; + PGNSI pGNSI; + PGPI pGPI; + BOOL bOsVersionInfoEx; + DWORD dwType; + + memset(&si, 0, sizeof(SYSTEM_INFO)); + memset(&osvi, 0, sizeof(OSVERSIONINFOEX)); + + osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); + + if( !(bOsVersionInfoEx = GetVersionEx ((OSVERSIONINFO *) &osvi)) ) + return 1; + + // Call GetNativeSystemInfo if supported or GetSystemInfo otherwise. + + pGNSI = (PGNSI)GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), + "GetNativeSystemInfo"); + if (pGNSI) { + pGNSI(&si); + } else { + GetSystemInfo(&si); + } + + if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT && osvi.dwMajorVersion > 4) { + bstrncpy(osbuf, TEXT("Microsoft "), maxsiz); + + // Test for the specific product. + + if (osvi.dwMajorVersion == 6) { + if (osvi.dwMinorVersion == 0) { + if (osvi.wProductType == VER_NT_WORKSTATION) + bstrncat(osbuf, TEXT("Windows Vista "), maxsiz); + else + bstrncat(osbuf, TEXT("Windows Server 2008 " ), maxsiz); + } + + if (osvi.dwMinorVersion == 1) { + if (osvi.wProductType == VER_NT_WORKSTATION ) + bstrncat(osbuf, TEXT("Windows 7 "), maxsiz); + else + bstrncat(osbuf, TEXT("Windows Server 2008 R2 " ), maxsiz); + } + + pGPI = (PGPI)GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), + "GetProductInfo"); + + if (pGPI) { + pGPI(osvi.dwMajorVersion, osvi.dwMinorVersion, 0, 0, &dwType); + } else { + dwType = PRODUCT_HOME_BASIC; + } + + switch (dwType) { + case PRODUCT_ULTIMATE: + bstrncat(osbuf, TEXT("Ultimate Edition" ), maxsiz); + break; + case PRODUCT_PROFESSIONAL: + bstrncat(osbuf, TEXT("Professional" ), maxsiz); + break; + case PRODUCT_HOME_PREMIUM: + bstrncat(osbuf, TEXT("Home Premium Edition" ), maxsiz); + break; + case PRODUCT_HOME_BASIC: + bstrncat(osbuf, TEXT("Home Basic Edition" ), maxsiz); + break; + case PRODUCT_ENTERPRISE: + bstrncat(osbuf, TEXT("Enterprise Edition" ), maxsiz); + break; + case PRODUCT_BUSINESS: + bstrncat(osbuf, TEXT("Business Edition" ), maxsiz); + break; + case PRODUCT_STARTER: + bstrncat(osbuf, TEXT("Starter Edition" ), maxsiz); + break; + case PRODUCT_CLUSTER_SERVER: + bstrncat(osbuf, TEXT("Cluster Server Edition" ), maxsiz); + break; + case PRODUCT_DATACENTER_SERVER: + bstrncat(osbuf, TEXT("Datacenter Edition" ), maxsiz); + break; + case PRODUCT_DATACENTER_SERVER_CORE: + bstrncat(osbuf, TEXT("Datacenter Edition (core installation)" ), maxsiz); + break; + case PRODUCT_ENTERPRISE_SERVER: + bstrncat(osbuf, TEXT("Enterprise Edition" ), maxsiz); + break; + case PRODUCT_ENTERPRISE_SERVER_CORE: + bstrncat(osbuf, TEXT("Enterprise Edition (core installation)" ), maxsiz); + break; + case PRODUCT_ENTERPRISE_SERVER_IA64: + bstrncat(osbuf, TEXT("Enterprise Edition for Itanium-based Systems" ), maxsiz); + break; + case PRODUCT_SMALLBUSINESS_SERVER: + bstrncat(osbuf, TEXT("Small Business Server" ), maxsiz); + break; + case PRODUCT_SMALLBUSINESS_SERVER_PREMIUM: + bstrncat(osbuf, TEXT("Small Business Server Premium Edition" ), maxsiz); + break; + case PRODUCT_STANDARD_SERVER: + bstrncat(osbuf, TEXT("Standard Edition" ), maxsiz); + break; + case PRODUCT_STANDARD_SERVER_CORE: + bstrncat(osbuf, TEXT("Standard Edition (core installation)" ), maxsiz); + break; + case PRODUCT_WEB_SERVER: + bstrncat(osbuf, TEXT("Web Server Edition" ), maxsiz); + break; + } + } + + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) { + if( GetSystemMetrics(SM_SERVERR2) ) + bstrncat(osbuf, TEXT( "Windows Server 2003 R2 "), maxsiz); + else if (osvi.wSuiteMask & VER_SUITE_STORAGE_SERVER) + bstrncat(osbuf, TEXT( "Windows Storage Server 2003"), maxsiz); + else if (osvi.wSuiteMask & VER_SUITE_WH_SERVER ) + bstrncat(osbuf, TEXT( "Windows Home Server"), maxsiz); + else if (osvi.wProductType == VER_NT_WORKSTATION && + si.wProcessorArchitecture==PROCESSOR_ARCHITECTURE_AMD64) + bstrncat(osbuf, TEXT( "Windows XP Professional x64 Edition"), maxsiz); + else + bstrncat(osbuf, TEXT("Windows Server 2003 "), maxsiz); + + // Test for the server type. + if (osvi.wProductType != VER_NT_WORKSTATION) { + if (si.wProcessorArchitecture==PROCESSOR_ARCHITECTURE_IA64) { + if( osvi.wSuiteMask & VER_SUITE_DATACENTER ) + bstrncat(osbuf, TEXT( "Datacenter Edition for Itanium-based Systems" ), maxsiz); + else if( osvi.wSuiteMask & VER_SUITE_ENTERPRISE ) + bstrncat(osbuf, TEXT( "Enterprise Edition for Itanium-based Systems" ), maxsiz); + } + + else if (si.wProcessorArchitecture==PROCESSOR_ARCHITECTURE_AMD64) { + if( osvi.wSuiteMask & VER_SUITE_DATACENTER ) + bstrncat(osbuf, TEXT( "Datacenter x64 Edition" ), maxsiz); + else if( osvi.wSuiteMask & VER_SUITE_ENTERPRISE ) + bstrncat(osbuf, TEXT( "Enterprise x64 Edition" ), maxsiz); + else bstrncat(osbuf, TEXT( "Standard x64 Edition" ), maxsiz); + } else { + if ( osvi.wSuiteMask & VER_SUITE_COMPUTE_SERVER ) + bstrncat(osbuf, TEXT( "Compute Cluster Edition" ), maxsiz); + else if( osvi.wSuiteMask & VER_SUITE_DATACENTER ) + bstrncat(osbuf, TEXT( "Datacenter Edition" ), maxsiz); + else if( osvi.wSuiteMask & VER_SUITE_ENTERPRISE ) + bstrncat(osbuf, TEXT( "Enterprise Edition" ), maxsiz); + else if ( osvi.wSuiteMask & VER_SUITE_BLADE ) + bstrncat(osbuf, TEXT( "Web Edition" ), maxsiz); + else bstrncat(osbuf, TEXT( "Standard Edition" ), maxsiz); + } + } + } + + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { + bstrncat(osbuf, TEXT("Windows XP "), maxsiz); + if( osvi.wSuiteMask & VER_SUITE_PERSONAL ) + bstrncat(osbuf, TEXT( "Home Edition" ), maxsiz); + else + bstrncat(osbuf, TEXT( "Professional" ), maxsiz); + } + + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) { + bstrncat(osbuf, TEXT("Windows 2000 "), maxsiz); + if ( osvi.wProductType == VER_NT_WORKSTATION ) { + bstrncat(osbuf, TEXT( "Professional" ), maxsiz); + } else { + if( osvi.wSuiteMask & VER_SUITE_DATACENTER ) + bstrncat(osbuf, TEXT( "Datacenter Server" ), maxsiz); + else if( osvi.wSuiteMask & VER_SUITE_ENTERPRISE ) + bstrncat(osbuf, TEXT( "Advanced Server" ), maxsiz); + else bstrncat(osbuf, TEXT( "Server" ), maxsiz); + } + } + + // Include service pack (if any) and build number. + + if (_tcslen(osvi.szCSDVersion) > 0) { + bstrncat(osbuf, TEXT(" ") , maxsiz); + bstrncat(osbuf, osvi.szCSDVersion, maxsiz); + } + + char buf[80]; + + snprintf(buf, 80, " (build %d)", (int)osvi.dwBuildNumber); + bstrncat(osbuf, buf, maxsiz); + + if (osvi.dwMajorVersion >= 6) { + if ( si.wProcessorArchitecture==PROCESSOR_ARCHITECTURE_AMD64 ) + bstrncat(osbuf, TEXT( ", 64-bit" ), maxsiz); + else if (si.wProcessorArchitecture==PROCESSOR_ARCHITECTURE_INTEL ) + bstrncat(osbuf, TEXT(", 32-bit"), maxsiz); + } + + return true; + } else { + bstrncpy(osbuf, "Unknown Windows version.", maxsiz); + return true; + } +} diff --git a/src/win32/libwin32/protos.h b/src/win32/libwin32/protos.h new file mode 100644 index 00000000..64d02160 --- /dev/null +++ b/src/win32/libwin32/protos.h @@ -0,0 +1,50 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#define log_error_message(msg) LogLastErrorMsg((msg), __FILE__, __LINE__) + +extern int BaculaAppMain(); +extern void LogLastErrorMsg(const char *msg, const char *fname, int lineno); + +extern int BaculaMain(int argc, char *argv[]); +extern BOOL ReportStatus(DWORD state, DWORD exitcode, DWORD waithint); +extern void d_msg(const char *, int, int, const char *, ...); +extern char *bac_status(char *buf, int buf_len); + + +/* service.cpp */ +bool postToBacula(UINT message, WPARAM wParam, LPARAM lParam); +bool isAService(); +int installService(const char *svc); +int removeService(); +int stopRunningBacula(); +int baculaServiceMain(); + + +/* Globals */ +extern DWORD service_thread_id; +extern DWORD service_error; +extern bool opt_debug; +extern bool have_service_api; +extern HINSTANCE appInstance; +extern int bacstat; diff --git a/src/win32/libwin32/res.h b/src/win32/libwin32/res.h new file mode 100644 index 00000000..058fa989 --- /dev/null +++ b/src/win32/libwin32/res.h @@ -0,0 +1,41 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +/* Icon definitions */ +#define IDI_BACULA 100 +#define IDI_IDLE 101 +#define IDI_RUNNING 102 +#define IDI_JOB_ERROR 103 +#define IDI_JOB_WARNING 104 +#define IDR_TRAYMENU 105 +#define IDB_BACULABMP 106 + +#define IDC_TEXTDISPLAY 1000 + +#define ID_ABOUT 40000 +#define ID_STATUS 40001 +#define ID_CLOSE 40002 + +/* Dialog definitions */ +#define IDD_ABOUT 201 +#define IDD_STATUS 202 diff --git a/src/win32/libwin32/running.ico b/src/win32/libwin32/running.ico new file mode 100644 index 00000000..32dc42cd Binary files /dev/null and b/src/win32/libwin32/running.ico differ diff --git a/src/win32/libwin32/saving.ico b/src/win32/libwin32/saving.ico new file mode 100644 index 00000000..f6d50bf5 Binary files /dev/null and b/src/win32/libwin32/saving.ico differ diff --git a/src/win32/libwin32/service.cpp b/src/win32/libwin32/service.cpp new file mode 100644 index 00000000..0d9b9091 --- /dev/null +++ b/src/win32/libwin32/service.cpp @@ -0,0 +1,586 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Kern Sibbald, August 2007 + * + * This is a generic service routine, which is used by all three + * of the daemons. Each one compiles it with slightly different + * #defines. + * + */ + +#include "bacula.h" +#include "win32.h" + +/* Forward reference */ +static void set_service_description(SC_HANDLE hSCManager, + SC_HANDLE hService, LPSTR lpDesc); + +/* Other Window component dependencies */ +#define BAC_DEPENDENCIES __TEXT("tcpip\0afd\0") + +/* Service globals */ +SERVICE_STATUS_HANDLE service_handle; +SERVICE_STATUS service_status; +DWORD service_error = 0; +static bool is_service = false; + +/* Forward references */ +void WINAPI serviceControlCallback(DWORD ctrlcode); +BOOL ReportStatus(DWORD state, DWORD exitcode, DWORD waithint); +DWORD WINAPI baculaWorkerThread(LPVOID lpwThreadParam); + + +/* + * Post a message to a running instance of the app + */ +bool postToBacula(UINT message, WPARAM wParam, LPARAM lParam) +{ + /* Locate the Bacula menu window */ + HWND hservwnd = FindWindow(APP_NAME, NULL); + if (hservwnd == NULL) { + return false; + } + + /* Post the message to Bacula */ + PostMessage(hservwnd, message, wParam, lParam); + return true; +} + + +/* + * Running as a service? + */ +bool isAService() +{ + return is_service; +} + +/* + * terminate any running Bacula + */ +int stopRunningBacula() +{ + postToBacula(WM_CLOSE, 0, 0); + sleep(5); + return 0; +} + +/* + * New style service start callback handler for the OS. + * the OS returns control here immediately after starting + * the service. + */ +void WINAPI serviceStartCallback(DWORD argc, char **argv) +{ + DWORD dwThreadID; + + /* Register our service */ + service_handle = RegisterServiceCtrlHandler(APP_NAME, serviceControlCallback); + if (!service_handle) { + log_error_message(_("RegisterServiceCtlHandler failed")); + MessageBox(NULL, _("Failure contacting the Service Handler"), + APP_DESC, MB_OK); + return; + } + + service_status.dwServiceType = SERVICE_WIN32; + service_status.dwServiceSpecificExitCode = 0; + + /* Report status */ + if (!ReportStatus(SERVICE_START_PENDING, NO_ERROR, 45000)) { + ReportStatus(SERVICE_STOPPED, service_error, 0); + log_error_message(_("Service start report failed")); + return; + } + + /* Now create the Bacula worker thread */ + (void)CreateThread(NULL, 0, baculaWorkerThread, NULL, 0, &dwThreadID); + return; +} + +/* + * Stop our service + */ +static void serviceStop() +{ + /* Post a quit message our service thread */ + if (service_thread_id != 0) { + PostThreadMessage(service_thread_id, WM_QUIT, 0, 0); + } +} + +/* + * Service Control callback handler. The OS can call us here + * at any time, most often to stop the service. + */ +void WINAPI serviceControlCallback(DWORD ctrlcode) +{ + switch(ctrlcode) { + case SERVICE_CONTROL_STOP: + service_status.dwCurrentState = SERVICE_STOP_PENDING; + serviceStop(); /* our stop service routine */ + break; + } + + /* Report our status */ + ReportStatus(service_status.dwCurrentState, NO_ERROR, 0); +} + + +/* + * Run Bacula as a service + */ +int baculaServiceMain() +{ + is_service = true; /* indicate we are running as a service */ + + if (have_service_api) { /* New style service API */ + /* Tell OS where to dispatch service calls to us */ + SERVICE_TABLE_ENTRY dispatchTable[] = { + {(char *)APP_NAME, (LPSERVICE_MAIN_FUNCTION)serviceStartCallback}, + {NULL, NULL}}; + + /* Start the service control dispatcher */ + if (!StartServiceCtrlDispatcher(dispatchTable)) { + log_error_message(_("StartServiceCtrlDispatcher failed.")); + } + /* Note, this thread continues in the ServiceCallback routine */ + + } else { /* old style Win95/98/Me */ + HINSTANCE kerneldll = LoadLibrary("KERNEL32.DLL"); + if (kerneldll == NULL) { + MessageBox(NULL, _("KERNEL32.DLL not found: Bacula service not started"), + APP_DESC, MB_OK); + return 1; + } + + /* Get entry point for RegisterServiceProcess function */ + DWORD (WINAPI *RegisterService)(DWORD, DWORD); + RegisterService = (DWORD (WINAPI *)(DWORD, DWORD)) + GetProcAddress(kerneldll, "RegisterServiceProcess"); + if (RegisterService == NULL) { + MessageBox(NULL, _("Registry service not found: Bacula service not started"), + APP_DESC, MB_OK); + log_error_message(_("Registry service entry point not found")); + FreeLibrary(kerneldll); /* free up kernel dll */ + return 1; + } + + RegisterService(0, 1); /* register us as a service */ + BaculaAppMain(); /* call the main Bacula code */ + RegisterService(0, 0); /* terminate the service */ + FreeLibrary(kerneldll); /* free up kernel dll */ + } + return 0; +} + + +/* + * New style service bacula worker thread + */ +DWORD WINAPI baculaWorkerThread(LPVOID lpwThreadParam) +{ + service_thread_id = GetCurrentThreadId(); + + if (!ReportStatus(SERVICE_RUNNING, NO_ERROR, 0)) { + MessageBox(NULL, _("Report Service failure"), APP_DESC, MB_OK); + log_error_message("ReportStatus RUNNING failed"); + return 0; + } + + /* Call Bacula main code */ + BaculaAppMain(); + + /* Mark that we're no longer running */ + service_thread_id = 0; + + /* Tell the service manager that we've stopped */ + ReportStatus(SERVICE_STOPPED, service_error, 0); + return 0; +} + + + +/* + * Install the Bacula service on the OS -- very complicated + */ +int installService(const char *cmdOpts) +{ + const int maxlen = 2048; + char path[maxlen]; + char svcmd[maxlen]; + + bsnprintf(svcmd, sizeof(svcmd), "service: install: %s", cmdOpts, APP_DESC, MB_OK); + + /* Get our filename */ + if (GetModuleFileName(NULL, path, maxlen-11) == 0) { + MessageBox(NULL, _("Unable to install the service"), APP_DESC, MB_ICONEXCLAMATION | MB_OK); + return 0; + } + + /* Create a valid command for starting the service */ + if ((int)strlen(path) + (int)strlen(cmdOpts) + 30 < maxlen) { + bsnprintf(svcmd, sizeof(svcmd), "\"%s\" /service %s", path, cmdOpts); + } else { + log_error_message(_("Service command length too long")); + MessageBox(NULL, _("Service command length too long. Service not registered."), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + return 0; + } + + if (have_service_api) { + SC_HANDLE baculaService, serviceManager; + + /* Open the service control manager */ + serviceManager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); + if (!serviceManager) { + log_error_message("Open Service Manager failed"); + MessageBox(NULL, + _("The Service Control Manager could not be contacted - the service was not installed"), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + return 0; + } + + /* Now actually create the Bacula service entry */ + baculaService = CreateService( + serviceManager, + APP_NAME, /* Our service name */ + APP_DESC, /* Display name */ + SERVICE_ALL_ACCESS, + SERVICE_WIN32_OWN_PROCESS, /* | SERVICE_INTERACTIVE_PROCESS, */ + SERVICE_AUTO_START, + SERVICE_ERROR_NORMAL, + svcmd, /* Command string to start the service */ + NULL, + NULL, + BAC_DEPENDENCIES, /* Services to start before us */ + NULL, /* Use default SYSTEM account */ + NULL); + if (!baculaService) { + CloseServiceHandle(serviceManager); + log_error_message("CreateService failed for " APP_DESC); + MessageBox(NULL, _("The Bacula service: " APP_NAME " could not be installed"), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + return 0; + } + + /* Set a text description in the service manager's control panel */ + set_service_description(serviceManager, baculaService, +(char *)_("Provides file backup and restore services. Bacula -- the network backup solution.")); + + CloseServiceHandle(serviceManager); + CloseServiceHandle(baculaService); + + } else { + /* Old style service -- create appropriate registry key path */ + HKEY runservices; + if (RegCreateKey(HKEY_LOCAL_MACHINE, + "Software\\Microsoft\\Windows\\CurrentVersion\\RunServices", + &runservices) != ERROR_SUCCESS) { + log_error_message(_("Cannot write System Registry for " APP_DESC)); + MessageBox(NULL, _("The System Registry could not be updated - the Bacula service was not installed"), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + return 0; + } + + /* Add the Bacula values */ + if (RegSetValueEx(runservices, APP_NAME, 0, REG_SZ, + (unsigned char *)svcmd, strlen(svcmd)+1) != ERROR_SUCCESS) { + RegCloseKey(runservices); + log_error_message(_("Cannot add Bacula key to System Registry")); + MessageBox(NULL, _("The Bacula service: " APP_NAME " could not be installed"), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + return 0; + } + RegCloseKey(runservices); + } + + /* At this point the service is installed */ + if (opt_debug) { + MessageBox(NULL, + _("The " APP_DESC "was successfully installed.\n" + "The service may be started by double clicking on the\n" + "Bacula \"Start\" icon and will be automatically\n" + "be run the next time this machine is rebooted. "), + APP_DESC, MB_ICONINFORMATION | MB_OK); + } + return 0; +} + + +/* + * Remove a service from the OS (normally done when we are installing + * a new version). + */ +int removeService() +{ + SC_HANDLE serviceManager, baculaService; + int stat = 0; + + if (have_service_api) { /* Newer Windows platforms (NT, Win2K, ...) */ + serviceManager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); + if (serviceManager) { + /* Now get the Bacula service entry */ + baculaService = OpenService(serviceManager, APP_NAME, SERVICE_ALL_ACCESS); + if (baculaService) { + SERVICE_STATUS status; + /* If the service is running, stop it */ + if (ControlService(baculaService, SERVICE_CONTROL_STOP, &status)) { + while(QueryServiceStatus(baculaService, &status)) { + if (status.dwCurrentState != SERVICE_STOP_PENDING) { + break; + } + sleep(1); + } + if (status.dwCurrentState != SERVICE_STOPPED) { + if (opt_debug) { + MessageBox(NULL, _("The Bacula service: " APP_NAME " could not be stopped"), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + } + } + } + if (DeleteService(baculaService)) { + if (opt_debug) { + MessageBox(NULL, _("The Bacula service: " APP_NAME " has been removed"), + APP_DESC, MB_ICONINFORMATION | MB_OK); + } + } else { + MessageBox(NULL, _("The Bacula service: " APP_NAME " could not be removed"), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + stat = 1; /* error */ + } + CloseServiceHandle(baculaService); + } else { + if (opt_debug) { + MessageBox(NULL, _("An existing Bacula service: " APP_NAME " could not be found for " + "removal. This is not normally an error."), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + } + } + CloseServiceHandle(serviceManager); + return stat; + } else { + MessageBox(NULL, _("The service Manager could not be contacted - the Bacula service was not removed"), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + return 1; /* error */ + } + + } else { /* Old Win95/98/Me OS */ + /* Open the registry path key */ + HKEY runservices; + if (RegOpenKey(HKEY_LOCAL_MACHINE, + "Software\\Microsoft\\Windows\\CurrentVersion\\RunServices", + &runservices) != ERROR_SUCCESS) { + if (opt_debug) { + MessageBox(NULL, + _("Could not find registry entry.\nService probably not registerd - the Bacula service was not removed"), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + } + } else { + /* Now delete the Bacula entry */ + if (RegDeleteValue(runservices, APP_NAME) != ERROR_SUCCESS) { + RegCloseKey(runservices); + MessageBox(NULL, _("Could not delete Registry key for " APP_NAME ".\n" + "The Bacula service could not be removed"), APP_DESC, MB_ICONEXCLAMATION | MB_OK); + } + RegCloseKey(runservices); + return 1; + } + /* Stop any running Bacula */ + if (!stopRunningBacula()) { + if (opt_debug) { + MessageBox(NULL, + _("Bacula could not be contacted, probably not running"), + APP_DESC, MB_ICONEXCLAMATION | MB_OK); + } + return 0; /* not really an error */ + } + /* At this point, the service has been removed */ + if (opt_debug) { + MessageBox(NULL, _("The Bacula service has been removed"), APP_DESC, MB_ICONINFORMATION | MB_OK); + } + } + return 0; +} + + +/* + * This subroutine is called to report our current status to the + * new style service manager + */ +BOOL ReportStatus(DWORD state, DWORD exitcode, DWORD waithint) +{ + static DWORD checkpoint = 1; + BOOL result = TRUE; + + /* No callbacks until we are started */ + if (state == SERVICE_START_PENDING) { + service_status.dwControlsAccepted = 0; + } else { + service_status.dwControlsAccepted = SERVICE_ACCEPT_STOP; + } + + /* Save global service_status state */ + service_status.dwCurrentState = state; + service_status.dwWin32ExitCode = exitcode; + service_status.dwWaitHint = waithint; + + /* + * Update the checkpoint variable so the service manager knows + * we are alive. + */ + if (state == SERVICE_RUNNING || state == SERVICE_STOPPED) { + service_status.dwCheckPoint = 0; + } else { + service_status.dwCheckPoint = checkpoint++; + } + + /* Send our new status */ + result = SetServiceStatus(service_handle, &service_status); + if (!result) { + log_error_message(_("SetServiceStatus failed")); + } + return result; +} + +/* Log an error message for the last Windows error */ +void LogLastErrorMsg(const char *message, const char *fname, int lineno) +{ + char msgbuf[500]; + HANDLE eventHandler; + const char *strings[3]; + LPTSTR msg; + + service_error = GetLastError(); + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER| + FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + service_error, + 0, + (LPTSTR)&msg, + 0, + NULL); + + /* Use the OS event logging to log the error */ + eventHandler = RegisterEventSource(NULL, APP_NAME); + + bsnprintf(msgbuf, sizeof(msgbuf), _("\n\n%s error: %ld at %s:%d"), + APP_NAME, service_error, fname, lineno); + + strings[0] = msgbuf; + strings[1] = message; + strings[2] = msg; + + if (eventHandler) { + ReportEvent(eventHandler, EVENTLOG_ERROR_TYPE, + 0, /* category */ + 0, /* ID */ + NULL, /* SID */ + 3, /* Number of strings */ + 0, /* raw data size */ + (const char **)strings, /* error strings */ + NULL); /* raw data */ + DeregisterEventSource(eventHandler); + } + LocalFree(msg); +} + +typedef BOOL (WINAPI * WinAPI)(SC_HANDLE, DWORD, LPVOID); + +/* + * This is amazingly complicated just to get a bit of English explanation + * in the service manager's dialog box. + */ +static void set_service_description(SC_HANDLE hSCManager, SC_HANDLE hService, + LPSTR lpDesc) +{ + SC_LOCK sclLock; + LPQUERY_SERVICE_LOCK_STATUS lpqslsBuf; + SERVICE_DESCRIPTION sdBuf; + DWORD dwBytesNeeded; + WinAPI ChangeServiceDescription; + + HINSTANCE hLib = LoadLibrary("ADVAPI32.DLL"); + if (!hLib) { + return; + } + ChangeServiceDescription = (WinAPI)GetProcAddress(hLib, + "ChangeServiceConfig2A"); + FreeLibrary(hLib); + if (!ChangeServiceDescription) { + return; + } + + // Need to acquire database lock before reconfiguring. + sclLock = LockServiceDatabase(hSCManager); + + // If the database cannot be locked, report the details. + if (sclLock == NULL) { + // Exit if the database is not locked by another process. + if (GetLastError() != ERROR_SERVICE_DATABASE_LOCKED) { + log_error_message("LockServiceDatabase"); + return; + } + + // Allocate a buffer to get details about the lock. + lpqslsBuf = (LPQUERY_SERVICE_LOCK_STATUS)LocalAlloc( + LPTR, sizeof(QUERY_SERVICE_LOCK_STATUS)+256); + if (lpqslsBuf == NULL) { + log_error_message("LocalAlloc"); + return; + } + + // Get and print the lock status information. + if (!QueryServiceLockStatus( + hSCManager, + lpqslsBuf, + sizeof(QUERY_SERVICE_LOCK_STATUS)+256, + &dwBytesNeeded)) { + log_error_message("QueryServiceLockStatus"); + } + + if (lpqslsBuf->fIsLocked) { + printf(_("Locked by: %s, duration: %ld seconds\n"), + lpqslsBuf->lpLockOwner, + lpqslsBuf->dwLockDuration); + } else { + printf(_("No longer locked\n")); + } + + LocalFree(lpqslsBuf); + log_error_message(_("Could not lock database")); + return; + } + + // The database is locked, so it is safe to make changes. + + sdBuf.lpDescription = lpDesc; + + if (!ChangeServiceDescription( + hService, // handle to service + SERVICE_CONFIG_DESCRIPTION, // change: description + &sdBuf) ) { // value: new description + log_error_message("ChangeServiceConfig2"); + } + + // Release the database lock. + UnlockServiceDatabase(sclLock); +} diff --git a/src/win32/libwin32/statusDialog.cpp b/src/win32/libwin32/statusDialog.cpp new file mode 100644 index 00000000..4989cf2d --- /dev/null +++ b/src/win32/libwin32/statusDialog.cpp @@ -0,0 +1,165 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula File daemon Status Dialog box + * + * Kern Sibbald, August 2007 + * + */ + +#include "bacula.h" +#include "win32.h" +#include "statusDialog.h" +#include "lib/status.h" + +static BOOL CALLBACK dialogProc(HWND hDlg, UINT uMsg, WPARAM wParam, LPARAM lParam) +{ + /* Get class pointer from user data */ + statusDialog *statDlg = (statusDialog *)GetWindowLong(hDlg, GWL_USERDATA); + + switch (uMsg) { + case WM_INITDIALOG: + /* Set class pointer in user data */ + SetWindowLong(hDlg, GWL_USERDATA, lParam); + statDlg = (statusDialog *)lParam; + statDlg->m_textWin = GetDlgItem(hDlg, IDC_TEXTDISPLAY); + + /* show the dialog */ + SetForegroundWindow(hDlg); + + /* Update every 5 seconds */ + SetTimer(hDlg, 1, 5000, NULL); + statDlg->m_visible = true; + statDlg->display(); + return true; + + case WM_TIMER: + statDlg->display(); + return true; + + case WM_SIZE: + statDlg->resize(hDlg, LOWORD(lParam), HIWORD(lParam)); + return true; + + case WM_COMMAND: + switch (LOWORD(wParam)) { + case IDCANCEL: + case IDOK: + statDlg->m_visible = false; + KillTimer(hDlg, 1); + EndDialog(hDlg, true); + return true; + } + break; + + case WM_DESTROY: + statDlg->m_textWin = NULL; + statDlg->m_visible = false; + KillTimer(hDlg, 1); + EndDialog(hDlg, false); + return true; + } + return false; +} + + +static void displayString(const char *msg, int len, void *context) +{ + /* Get class pointer from user data */ + statusDialog *statDlg = (statusDialog *)context; + const char *start = msg; + const char *p; + char *str; + + for (p=start; *p; p++) { + if (*p == '\n') { + int len = p - start; + if (len > 0) { + str = (char *)alloca(len + 1); + bstrncpy(str, start, len + 1); + + SendMessage(statDlg->m_textWin, EM_SETSEL, (WPARAM)-1, (LPARAM)-1); + SendMessage(statDlg->m_textWin, EM_REPLACESEL, 0, (LPARAM)str); + } + + if (*p == '\n') { + SendMessage(statDlg->m_textWin, EM_SETSEL, (WPARAM)-1, (LPARAM)-1); + SendMessage(statDlg->m_textWin, EM_REPLACESEL, 0, (LPARAM)"\r\n"); + } + + if (*p == '\0'){ + break; + } + start = p + 1; + } + } +} + +void statusDialog::display() +{ + if (m_textWin != NULL) { + STATUS_PKT sp; + long hPos = GetScrollPos(m_textWin, SB_HORZ); + long vPos = GetScrollPos(m_textWin, SB_VERT); + long selStart; + long selEnd; + + SendMessage(m_textWin, EM_GETSEL, (WPARAM)&selStart, (LPARAM)&selEnd); + + SetWindowText(m_textWin, ""); + sp.bs = NULL; + sp.context = this; + sp.callback = displayString; + output_status(&sp); + + SendMessage(m_textWin, EM_SETSEL, selStart, selEnd); + SendMessage(m_textWin, WM_HSCROLL, MAKEWPARAM(SB_THUMBPOSITION, hPos), 0); + SendMessage(m_textWin, WM_VSCROLL, MAKEWPARAM(SB_THUMBPOSITION, vPos), 0); + } +} + +/* Dialog box handling functions */ +void statusDialog::show(bool show) +{ + if (show && !m_visible) { + DialogBoxParam(appInstance, MAKEINTRESOURCE(IDD_STATUS), NULL, + (DLGPROC)dialogProc, (LPARAM)this); + } +} + +/* + * Make sure OK button is positioned in the right place + */ +void statusDialog::resize(HWND dWin, int dWidth, int dHeight) +{ + int bWidth, bHeight; + RECT bRect; + HWND bWin; + + if (m_textWin != NULL) { + bWin = GetDlgItem(dWin, IDOK); /* get size of OK button */ + + GetWindowRect(bWin, &bRect); + bWidth = bRect.right - bRect.left; + bHeight = bRect.bottom - bRect.top; + + MoveWindow(m_textWin, 8, 8, dWidth-bWidth-24, dHeight-16, true); + MoveWindow(bWin, dWidth - bWidth-8, 8, bWidth, bHeight, true); + } +} diff --git a/src/win32/libwin32/statusDialog.h b/src/win32/libwin32/statusDialog.h new file mode 100644 index 00000000..4ea91f8f --- /dev/null +++ b/src/win32/libwin32/statusDialog.h @@ -0,0 +1,45 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Bacula Status Dialog header file + * + * Kern Sibbald, August 2007 + * + */ + +#ifndef __STATUS_DIALOG_H_ +#define __STATUS_DIALOG_H_ + +class statusDialog +{ +public: + statusDialog() { m_visible = false; m_textWin = NULL; }; + ~statusDialog() { }; + + void display(); + + void show(bool show); + + void resize(HWND win, int width, int height); + + bool m_visible; + HWND m_textWin; +}; + +#endif diff --git a/src/win32/libwin32/trayMonitor.cpp b/src/win32/libwin32/trayMonitor.cpp new file mode 100644 index 00000000..229984ce --- /dev/null +++ b/src/win32/libwin32/trayMonitor.cpp @@ -0,0 +1,263 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * + * Kern Sibbald, August 2007 + * + * This is a generic tray monitor routine, which is used by all three + * of the daemons. Each one compiles it with slightly different + * #defines. + * + */ + +#include "bacula.h" +#include "jcr.h" +#include "win32.h" + +trayMonitor::trayMonitor() +{ + +// m_tbcreated_msg = RegisterWindowMessage("TaskbarCreated"); + + /* Create a window to handle tray icon messages */ + WNDCLASSEX trayclass; + + trayclass.cbSize = sizeof(trayclass); + trayclass.style = 0; + trayclass.lpfnWndProc = trayMonitor::trayWinProc; + trayclass.cbClsExtra = 0; + trayclass.cbWndExtra = 0; + trayclass.hInstance = appInstance; + trayclass.hIcon = LoadIcon(NULL, IDI_APPLICATION); + trayclass.hCursor = LoadCursor(NULL, IDC_ARROW); + trayclass.hbrBackground = (HBRUSH)GetStockObject(WHITE_BRUSH); + trayclass.lpszMenuName = NULL; + trayclass.lpszClassName = APP_NAME; + trayclass.hIconSm = LoadIcon(NULL, IDI_APPLICATION); + + RegisterClassEx(&trayclass); + + m_hwnd = CreateWindow(APP_NAME, APP_NAME, WS_OVERLAPPEDWINDOW, + CW_USEDEFAULT, CW_USEDEFAULT, 200, 200, + NULL, NULL, appInstance, NULL); + if (!m_hwnd) { + PostQuitMessage(0); + return; + } + + /* Save our class pointer */ + SetWindowLong(m_hwnd, GWL_USERDATA, (LPARAM)this); + + + // Load the icons for the tray + m_idle_icon = LoadIcon(appInstance, MAKEINTRESOURCE(IDI_IDLE)); + m_running_icon = LoadIcon(appInstance, MAKEINTRESOURCE(IDI_RUNNING)); + m_error_icon = LoadIcon(appInstance, MAKEINTRESOURCE(IDI_JOB_ERROR)); + m_warn_icon = LoadIcon(appInstance, MAKEINTRESOURCE(IDI_JOB_WARNING)); + + /* Load the menu */ + m_hmenu = LoadMenu(appInstance, MAKEINTRESOURCE(IDR_TRAYMENU)); + m_visible = false; + m_installed = false; + + /* Install the icon in the tray */ + install(); + + /* Timer to trigger icon updating */ + SetTimer(m_hwnd, 1, 5000, NULL); +} + +trayMonitor::~trayMonitor() +{ + /* Remove the icon from the tray */ + sendMessage(NIM_DELETE, 0); + + if (m_hmenu) { + DestroyMenu(m_hmenu); + m_hmenu = NULL; + } +} + +void trayMonitor::install() +{ + m_installed = true; + sendMessage(NIM_ADD, bacstat); +} + +void trayMonitor::update(int bacstat) +{ + if (!m_installed) { + install(); + } + (void)bac_status(NULL, 0); + sendMessage(NIM_MODIFY, bacstat); +} + +void trayMonitor::sendMessage(DWORD msg, int bacstat) +{ + struct s_last_job *job; + + // Create the tray icon message + m_nid.hWnd = m_hwnd; + m_nid.cbSize = sizeof(m_nid); + m_nid.uID = IDI_BACULA; // never changes after construction + switch (bacstat) { + case 0: + m_nid.hIcon = m_idle_icon; + break; + case JS_Running: + m_nid.hIcon = m_running_icon; + break; + case JS_ErrorTerminated: + m_nid.hIcon = m_error_icon; + break; + default: + if (last_jobs->size() > 0) { + job = (struct s_last_job *)last_jobs->last(); + if (job->Errors) { + m_nid.hIcon = m_warn_icon; + } else { + m_nid.hIcon = m_idle_icon; + } + } else { + m_nid.hIcon = m_idle_icon; + } + break; + } + + m_nid.uFlags = NIF_ICON | NIF_MESSAGE; + m_nid.uCallbackMessage = WM_TRAYNOTIFY; + + + /* Use the resource string as tip */ + if (LoadString(appInstance, IDI_BACULA, m_nid.szTip, sizeof(m_nid.szTip))) { + m_nid.uFlags |= NIF_TIP; + } + + /* Add the Bacula status to the tip string */ + if (m_nid.uFlags & NIF_TIP) { + bac_status(m_nid.szTip, sizeof(m_nid.szTip)); + } + + if (Shell_NotifyIcon(msg, &m_nid)) { + EnableMenuItem(m_hmenu, ID_CLOSE, MF_ENABLED); + } +} + +/* + * This is the windows call back for our tray window + */ +LRESULT CALLBACK trayMonitor::trayWinProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam) +{ + HMENU menu; + trayMonitor *mon = (trayMonitor *)GetWindowLong(hwnd, GWL_USERDATA); + + switch (iMsg) { + + /* Every five seconds, a timer message causes the icon to update */ + case WM_TIMER: + if (isAService()) { + mon->install(); + } + mon->update(bacstat); + break; + + case WM_CREATE: + return 0; + + case WM_COMMAND: + /* User has clicked an item on the tray monitor menu */ + switch (LOWORD(wParam)) { + case ID_STATUS: + /* show the dialog box */ + mon->m_status.show(true); + mon->update(bacstat); + break; + + case ID_ABOUT: + /* Show the About box */ + mon->m_about.show(true); + break; + + /* This is turned off now */ +#ifdef xxx + case ID_CLOSE: + /* User selected Close from the tray menu */ + PostMessage(hwnd, WM_CLOSE, 0, 0); + break; +#endif + + } + return 0; + + /* Our special command to check for mouse events */ + case WM_TRAYNOTIFY: + /* Right button click pops up the menu */ + if (lParam == WM_RBUTTONUP) { + POINT mouse; + /* Get the menu and pop it up */ + menu = GetSubMenu(mon->m_hmenu, 0); + if (!menu) { + return 0; + } + + /* The first menu item (Status) is the default */ + SetMenuDefaultItem(menu, 0, TRUE); + GetCursorPos(&mouse); + SetForegroundWindow(mon->m_nid.hWnd); /* display the menu */ + + /* Open the menu at the mouse position */ + TrackPopupMenu(menu, 0, mouse.x, mouse.y, 0, mon->m_nid.hWnd, NULL); + + /* Left double click brings up status dialog directly */ + } else if (lParam == WM_LBUTTONDBLCLK) { + mon->m_status.show(true); + mon->update(bacstat); + } + return 0; + + case WM_CLOSE: + if (isAService()) { + mon->sendMessage(NIM_DELETE, 0); + } + terminate_app(0); + break; + + case WM_DESTROY: + /* zap everything */ + PostQuitMessage(0); + return 0; + + case WM_QUERYENDSESSION: + if (!isAService() || lParam == 0) { + PostQuitMessage(0); + return TRUE; + } + return TRUE; + + default: + /* Need to redraw tray icon */ +// if (iMsg == mon->m_tbcreated_msg) { +// mon->install(); +// } + break; + } + + return DefWindowProc(hwnd, iMsg, wParam, lParam); +} diff --git a/src/win32/libwin32/trayMonitor.h b/src/win32/libwin32/trayMonitor.h new file mode 100644 index 00000000..979237c5 --- /dev/null +++ b/src/win32/libwin32/trayMonitor.h @@ -0,0 +1,59 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#ifndef __TRAY_MONITOR_H_ +#define __TRAY_MONITOR_H_ 1 + +#define WM_TRAYNOTIFY WM_USER+1 + +/* Define the trayMonitor class */ +class trayMonitor +{ +public: + trayMonitor(); + ~trayMonitor(); + + void show(bool show); + void install(); + void update(int bacstat); + void sendMessage(DWORD msg, int bacstat); + + static LRESULT CALLBACK trayWinProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam); + + bool m_visible; + bool m_installed; + UINT m_tbcreated_msg; + + aboutDialog m_about; + statusDialog m_status; + + HWND m_hwnd; + HMENU m_hmenu; + NOTIFYICONDATA m_nid; + HICON m_idle_icon; + HICON m_running_icon; + HICON m_error_icon; + HICON m_warn_icon; +}; + +#endif /* __TRAY_MONITOR_H_ */ diff --git a/src/win32/libwin32/warn.ico b/src/win32/libwin32/warn.ico new file mode 100644 index 00000000..26e1d398 Binary files /dev/null and b/src/win32/libwin32/warn.ico differ diff --git a/src/win32/libwin32/win32.h b/src/win32/libwin32/win32.h new file mode 100644 index 00000000..b9456c6e --- /dev/null +++ b/src/win32/libwin32/win32.h @@ -0,0 +1,31 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#include "protos.h" +#include "res.h" + +#ifdef HAVE_TRAY_MONITOR +#include "aboutDialog.h" +#include "statusDialog.h" +#include "trayMonitor.h" +#endif diff --git a/src/win32/makeall b/src/win32/makeall new file mode 100755 index 00000000..0a31ac5d --- /dev/null +++ b/src/win32/makeall @@ -0,0 +1,7 @@ +#!/bin/sh +make clean +# Build win32 Bacula +make bat=no +# +# Build win64 Bacula +make win64=yes diff --git a/src/win32/patches/binutils_texinfo_version.patch b/src/win32/patches/binutils_texinfo_version.patch new file mode 100644 index 00000000..01100e9a --- /dev/null +++ b/src/win32/patches/binutils_texinfo_version.patch @@ -0,0 +1,31 @@ +diff -Nur binutils-2.16.91-20060119-1.orig/configure binutils-2.16.91-20060119-1.patched/configure +--- binutils-2.16.91-20060119-1.orig/configure 2006-01-19 04:57:59.000000000 +0000 ++++ binutils-2.16.91-20060119-1.patched/configure 2008-10-04 02:43:58.768687939 +0000 +@@ -3549,7 +3549,7 @@ + # For an installed makeinfo, we require it to be from texinfo 4.2 or + # higher, else we use the "missing" dummy. + if ${MAKEINFO} --version \ +- | egrep 'texinfo[^0-9]*([1-3][0-9]|4\.[2-9]|[5-9])' >/dev/null 2>&1; then ++ | egrep 'texinfo[^0-9]*([1-3][0-9]|4\.[2-9]|4\.1[0-9]|[5-9])' >/dev/null 2>&1; then + : + else + MAKEINFO="$MISSING makeinfo" +diff -Nur binutils-2.16.91-20060119-1.orig/gas/Makefile.in binutils-2.16.91-20060119-1.patched/gas/Makefile.in +--- binutils-2.16.91-20060119-1.orig/gas/Makefile.in 2006-01-19 04:59:26.000000000 +0000 ++++ binutils-2.16.91-20060119-1.patched/gas/Makefile.in 2008-10-04 03:13:51.380719189 +0000 +@@ -3266,6 +3266,7 @@ + .PHONY: dep dep-in dep-am + #MKDEP DO NOT PUT ANYTHING BETWEEN THIS LINE AND THE MATCHING WARNING BELOW. + app.o: app.c $(INCDIR)/symcat.h ++ $(COMPILE) -c $< $(NO_WERROR) + as.o: as.c $(INCDIR)/symcat.h subsegs.h $(INCDIR)/obstack.h \ + output-file.h sb.h macro.h dwarf2dbg.h dw2gencfi.h \ + $(INCDIR)/elf/dwarf2.h $(BFDVER_H) +@@ -3302,6 +3303,7 @@ + read.o: read.c $(INCDIR)/symcat.h $(INCDIR)/safe-ctype.h \ + subsegs.h $(INCDIR)/obstack.h sb.h macro.h ecoff.h \ + dw2gencfi.h $(INCDIR)/elf/dwarf2.h ++ $(COMPILE) -c $< $(NO_WERROR) + sb.o: sb.c sb.h $(INCDIR)/symcat.h + stabs.o: stabs.c $(INCDIR)/symcat.h $(INCDIR)/obstack.h \ + subsegs.h ecoff.h $(INCDIR)/aout/stab_gnu.h $(INCDIR)/aout/stab.def diff --git a/src/win32/patches/dvd+rw-tools.patch b/src/win32/patches/dvd+rw-tools.patch new file mode 100644 index 00000000..3f73d350 --- /dev/null +++ b/src/win32/patches/dvd+rw-tools.patch @@ -0,0 +1,264 @@ +--- ../orig/dvd+rw-tools-7.0/growisofs.c 2006-09-24 10:28:53.000000000 -0700 ++++ ./growisofs.c 2006-12-07 01:56:17.000000000 -0800 +@@ -370,12 +370,17 @@ + * upon Layer Break command, therefore longer timeout is required; + * - Linux: deploy BLKFLSBUF to avoid media reloads when possible; + * - add unit buffer utilization indicator [by request from K3b]; ++ * 7.0.1: (by Nicolas Boichat, Bacula project) ++ * - Allow session to cross 4GB boundary regardless of medium type ++ * (don't need to have a DL media) ++ * - Add a -F option (used instead of -M or -Z), which displays next_session ++ * offset and capacity (free space = next_session - capacity). + */ + #define PRINT_VERSION(cmd) do { \ + char *s=strrchr((cmd),'/'); \ + s ? s++ : (s=(cmd)); \ + printf ("* %.*sgrowisofs by ,"\ +- " version 7.0,\n",(int)(s-(cmd)),(cmd)); \ ++ " version 7.0.1,\n",(int)(s-(cmd)),(cmd)); \ + } while (0) + + #define _LARGEFILE_SOURCE +@@ -2576,7 +2581,8 @@ + else arg = mkisofs_argv[0]; + + cmdsz--; +- assert ((len0=strlen (arg)) < cmdsz); ++ len0 = strlen (arg); ++ assert (len0 < cmdsz); + + strcpy (cmd,arg), cmd[len0++] = ' ', cmdsz -= len0; + +@@ -2773,6 +2779,18 @@ + else in_device = argv[++i]; + dev_found = 'Z'; + } ++ else if (argv[i][1] == 'F') ++ { if (len > 2) in_device = argv[i]+2; ++ else in_device = argv[++i]; ++ dev_found = 'F'; ++ dry_run = 1; /* NEVER write anything with -F */ ++ } ++ else if (!strncmp(opt,"-free-space",11)) ++ { if (len > 11) in_device = opt+11; ++ else in_device = argv[++i]; ++ dev_found = 'F'; ++ dry_run = 1; /* NEVER write anything with -F */ ++ } + else if (!strcmp(opt,"-poor-man")) + { if (poor_man<0) poor_man = 1; + continue; +@@ -2898,7 +2916,7 @@ + else if (argv[i][1] == '?' || !strcmp(opt,"-help")) + { PRINT_VERSION (argv[0]); + printf ("- usage: %s [-dvd-compat] [-overburn] [-speed=1] \\\n" +- " -[ZM] /dev/dvd \n",argv[0]); ++ " -[ZMF] /dev/dvd \n",argv[0]); + printf (" for see 'mkisofs %s'\n",opt); + exit (FATAL_START(EINVAL)); + } +@@ -2993,7 +3011,13 @@ + fprintf (stderr," you most likely want to use -Z option.\n"), + exit (FATAL_START(errno)); + +- if (dev_found == 'M') ++ if ((dev_found == 'M') || ++ ((dev_found == 'F') && !(mmc_profile&0x10000)) && ++ (saved_descriptors[0].type[0] || ++ saved_descriptors[0].type[1] || ++ saved_descriptors[0].type[2])) ++ /* -F : The medium is not blank, there is a fs on it (the_buffer[0,1 or 2] != 0), ++ so compute next_session. */ + { if (memcmp (saved_descriptors[0].type,"\1CD001",6)) + fprintf (stderr,":-( %s doesn't look like isofs...\n", + in_device), exit(FATAL_START(EMEDIUMTYPE)); +@@ -3016,8 +3040,7 @@ + exit(FATAL_START(EINVAL)); + } + else if (next_session > (0x200000-0x5000)) /* 4GB/2K-40MB/2K */ +- if ((mmc_profile&0xFFFF)<0x20 || +- ((mmc_profile&0xFFFF)<0x40 && !no_4gb_check)) ++ if ((mmc_profile&0xFFFF)<0x40 && !no_4gb_check) + fprintf (stderr,":-( next session would cross 4GB " + "boundary, aborting...\n"), + exit (FATAL_START(ENOSPC)); +@@ -3060,7 +3083,7 @@ + exit (FATAL_START(EINVAL)); + + if (imgfd<0) +- { if (mkisofs_argc==1) ++ { if ((mkisofs_argc==1) && (dev_found != 'F')) + fprintf (stderr,"%s: no mkisofs options specified, " + "aborting...\n",argv[0]), + exit (FATAL_START(EINVAL)); +@@ -3244,6 +3267,15 @@ + } + } + ++ if (dev_found == 'F') { ++ off64_t capacity = 0; ++ printf("next_session=%d\n", next_session * CD_BLOCK); ++ if (ioctl_handle!=INVALID_HANDLE) ++ capacity = get_capacity (ioctl_handle); ++ printf("capacity=%lld\n", capacity); ++ exit(0); ++ } ++ + if (imgfd>=0) + { quiet--; + if (builtin_dd (imgfd,out_fd,next_session*CD_BLOCK) < 0) +--- ../orig/dvd+rw-tools-7.0/Makefile 2006-08-27 13:07:37.000000000 -0700 ++++ ./Makefile 2006-12-07 01:56:17.000000000 -0800 +@@ -9,7 +9,7 @@ + + CHAIN=growisofs dvd+rw-format dvd+rw-booktype dvd+rw-mediainfo dvd-ram-control + clean: +- -(rm *.o $(CHAIN) rpl8 btcflash; exit 0) < /dev/null > /dev/null 2>&1 ++ -(rm *.o $(CHAIN) $(CHAIN:=.exe) rpl8 btcflash; exit 0) < /dev/null > /dev/null 2>&1 + + VER=7.0 + DIST=dvd+rw-tools-$(VER) +--- ../orig/dvd+rw-tools-7.0/Makefile.m4 2006-09-24 10:55:19.000000000 -0700 ++++ ./Makefile.m4 2006-12-07 01:59:30.000000000 -0800 +@@ -1,12 +1,5 @@ + # OBS! M4 processed! + changequote([, ]) +-[ +-CHAIN=growisofs dvd+rw-format dvd+rw-booktype dvd+rw-mediainfo dvd-ram-control +- +-dvd+rw-tools: $(CHAIN) +- +-WARN=#-Wall # developers are welcomed to build with `make WARN=-Wall' +-] + + # fix-up OS macro + ifelse(substr(OS,0,7),[CYGWIN_],[define([OS],[MINGW32])]) +@@ -38,11 +31,36 @@ + # + # MINGW section + # ++SUFFIXES+=.exe + CC =gcc + CFLAGS +=$(WARN) -mno-cygwin -O2 + CXX =g++ + CXXFLAGS+=$(WARN) -mno-cygwin -O2 -fno-exceptions + LINK.o =$(LINK.cc) ++EXE =.exe ++]) ++ ++ifelse(OS,XMINGW32,[ ++# ++# MINGW cross-compile section ++# ++SUFFIXES+=.exe ++CC =mingw32-gcc ++CFLAGS +=$(WARN) -mno-cygwin -O2 ++CXX =mingw32-g++ ++CXXFLAGS+=$(WARN) -mno-cygwin -O2 -fno-exceptions ++LINK.o =$(LINK.cc) ++EXE =.exe ++%.exe:%.o ++ $(CXX) $(CXXFLAGS) -o $@ $> $^ $(LDFLAGS) $(LDLIBS) ++ ++install: dvd+rw-tools ++ [[ -d $(prefix)/bin ]] || mkdir -p $(prefix)/bin ++ install -m 0755 $(CHAIN) $(prefix)/bin ++ [[ -d $(manprefix)/man1 ]] || mkdir -p $(manprefix)/man1 ++ install -m 0644 growisofs.1 $(manprefix)/man1 ++ -[[ -f rpl8 ]] && install -m 0755 rpl8 $(prefix)/bin; : ++ -[[ -f btcflash ]] && install -m 0755 btcflash $(prefix)/bin; : + ]) + + ifelse(OS,BSD,[ +@@ -207,27 +225,33 @@ + + # common section + [ +-growisofs: growisofs_mmc.o growisofs.o ++CHAIN=growisofs$(EXE) dvd+rw-format$(EXE) dvd+rw-booktype$(EXE) dvd+rw-mediainfo$(EXE) dvd-ram-control$(EXE) ++ ++dvd+rw-tools: $(CHAIN) ++ ++WARN=#-Wall # developers are welcomed to build with `make WARN=-Wall' ++ ++growisofs$(EXE): growisofs_mmc.o growisofs.o + growisofs.o: growisofs.c mp.h + growisofs_mmc.o: growisofs_mmc.cpp transport.hxx + +-dvd+rw-format: dvd+rw-format.o ++dvd+rw-format$(EXE): dvd+rw-format.o + dvd+rw-format.o: dvd+rw-format.cpp transport.hxx + +-dvd+rw-mediainfo: dvd+rw-mediainfo.o ++dvd+rw-mediainfo$(EXE): dvd+rw-mediainfo.o + dvd+rw-mediainfo.o: dvd+rw-mediainfo.cpp transport.hxx + +-dvd+rw-booktype: dvd+rw-booktype.o ++dvd+rw-booktype$(EXE): dvd+rw-booktype.o + dvd+rw-booktype.o: dvd+rw-booktype.cpp transport.hxx + +-dvd-ram-control: dvd-ram-control.o ++dvd-ram-control$(EXE): dvd-ram-control.o + dvd-ram-control.o: dvd-ram-control.cpp transport.hxx + +-rpl8: rpl8.o ++rpl8$(EXE): rpl8.o + rpl8.o: rpl8.cpp transport.hxx + +rpl8: rpl8 + #so that I can invoke `make +rpl8' to build rpl8... +-btcflash: btcflash.o ++btcflash$(EXE): btcflash.o + btcflash.o: btcflash.cpp transport.hxx + +btcflash: btcflash + #so that I can invoke `make +btcflash' to build btcflash... +--- ../orig/dvd+rw-tools-7.0/Makefile.msc 1969-12-31 16:00:00.000000000 -0800 ++++ ./Makefile.msc 2006-12-07 01:56:17.000000000 -0800 +@@ -0,0 +1,48 @@ ++CHAIN=growisofs.exe dvd+rw-format.exe dvd+rw-booktype.exe dvd+rw-mediainfo.exe dvd-ram-control.exe ++ ++DEFINES=/D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_CRT_SECURE_NO_DEPRECATE" /D "_CRT_NONSTDC_NO_DEPRECATE" /D "_MBCS" ++CFLAGS=$(DEFINES) /O2 /FD /EHsc /MD /W3 /nologo /c /Wp64 /Zi ++CPPFLAGS=$(DEFINES) /O2 /FD /EHsc /MD /W3 /nologo /c /Wp64 /Zi ++LDFLAGS=/NOLOGO /SUBSYSTEM:CONSOLE /MANIFEST ++ ++all: ++ ++clean: ++ -del *.obj *.manifest *.pdb *.idb $(CHAIN) > nul 2>&1 ++ ++install: $(CHAIN) ++ !xcopy $** $(prefix)\bin\ /y ++ ++dvd+rw-tools: $(CHAIN) ++ ++growisofs.exe: growisofs_mmc.obj growisofs.obj ++ link $(LDFLAGS) /MANIFESTFILE:$@.manifest /out:$@ $** ++ mt /nologo /outputresource:"$@;#1" -manifest $@.manifest ++ ++growisofs.obj: growisofs.c mp.h ++ ++growisofs_mmc.obj: growisofs_mmc.cpp transport.hxx ++ ++dvd+rw-format.exe: dvd+rw-format.obj ++ link $(LDFLAGS) /MANIFESTFILE:$@.manifest /out:$@ $** ++ mt /nologo /outputresource:"$@;#1" -manifest $@.manifest ++ ++dvd+rw-format.obj: dvd+rw-format.cpp transport.hxx ++ ++dvd+rw-mediainfo.exe: dvd+rw-mediainfo.obj ++ link $(LDFLAGS) /MANIFESTFILE:$@.manifest /out:$@ $** ++ mt /nologo /outputresource:"$@;#1" -manifest $@.manifest ++ ++dvd+rw-mediainfo.obj: dvd+rw-mediainfo.cpp transport.hxx ++ ++dvd+rw-booktype.exe: dvd+rw-booktype.obj ++ link $(LDFLAGS) /MANIFESTFILE:$@.manifest /out:$@ $** ++ mt /nologo /outputresource:"$@;#1" -manifest $@.manifest ++ ++dvd+rw-booktype.obj: dvd+rw-booktype.cpp transport.hxx ++ ++dvd-ram-control.exe: dvd-ram-control.obj ++ link $(LDFLAGS) /MANIFESTFILE:$@.manifest /out:$@ $** ++ mt /nologo /outputresource:"$@;#1" -manifest $@.manifest ++ ++dvd-ram-control.obj: dvd-ram-control.cpp transport.hxx diff --git a/src/win32/patches/mingw-utils.patch b/src/win32/patches/mingw-utils.patch new file mode 100644 index 00000000..e2d225f5 --- /dev/null +++ b/src/win32/patches/mingw-utils.patch @@ -0,0 +1,6670 @@ +diff -ru ../release/mingw-utils-0.3/configure ./configure +--- ../release/mingw-utils-0.3/configure 2003-11-25 08:31:12.000000000 -0800 ++++ ./configure 2006-08-11 05:25:25.000000000 -0700 +@@ -1,6 +1,6 @@ + #! /bin/sh + # Guess values for system-dependent variables and create Makefiles. +-# Generated by GNU Autoconf 2.58 for mingw-utils 0.3. ++# Generated by GNU Autoconf 2.59 for mingw-utils 0.3. + # + # Copyright (C) 2003 Free Software Foundation, Inc. + # This configure script is free software; the Free Software Foundation +@@ -309,7 +309,7 @@ + # include + #endif" + +-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO AMTAR install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM AWK SET_MAKE am__leading_dot MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE CXX CXXFLAGS ac_ct_CXX CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE RANLIB ac_ct_RANLIB LEX LEXLIB LEX_OUTPUT_ROOT YACC WINDRES ac_pt_WINDRES CPP EGREP LIBOBJS LTLIBOBJS' ++ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT BUILD_DRMINGW_TRUE BUILD_DRMINGW_FALSE BUILD_REDIR_TRUE BUILD_REDIR_FALSE BUILD_RES2COFF_TRUE BUILD_RES2COFF_FALSE CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE CXX CXXFLAGS ac_ct_CXX CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE RANLIB ac_ct_RANLIB LEX LEXLIB LEX_OUTPUT_ROOT YACC WINDRES ac_pt_WINDRES CPP EGREP LIBOBJS LTLIBOBJS' + ac_subst_files='' + + # Initialize some variables set by options. +@@ -859,10 +859,15 @@ + Optional Features: + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] +- --enable-maintainer-mode enable make rules and dependencies not useful +- (and sometimes confusing) to the casual installer +- --disable-dependency-tracking Speeds up one-time builds +- --enable-dependency-tracking Do not reject slow dependency extractors ++ --enable-maintainer-mode enable make rules and dependencies not useful ++ (and sometimes confusing) to the casual installer ++ --disable-drmingw Don't build drmingw (default is build) ++ --disable-redir Don't build redir (default is build) ++ --disable-res2coff Don't build res2coff (default is build) ++ --disable-nonportable shortcut for --disable-drmingw, --disable-redir and ++ --disable-res2coff ++ --disable-dependency-tracking speeds up one-time build ++ --enable-dependency-tracking do not reject slow dependency extractors + + Some influential environment variables: + CC C compiler command +@@ -967,7 +972,7 @@ + else + echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi +- cd "$ac_popdir" ++ cd $ac_popdir + done + fi + +@@ -975,7 +980,7 @@ + if $ac_init_version; then + cat <<\_ACEOF + mingw-utils configure 0.3 +-generated by GNU Autoconf 2.58 ++generated by GNU Autoconf 2.59 + + Copyright (C) 2003 Free Software Foundation, Inc. + This configure script is free software; the Free Software Foundation +@@ -989,7 +994,7 @@ + running configure, to aid debugging if configure makes a mistake. + + It was created by mingw-utils $as_me 0.3, which was +-generated by GNU Autoconf 2.58. Invocation command line was ++generated by GNU Autoconf 2.59. Invocation command line was + + $ $0 $@ + +@@ -1325,7 +1330,7 @@ + + + +-am__api_version="1.7" ++am__api_version="1.9" + ac_aux_dir= + for ac_dir in $srcdir $srcdir/.. $srcdir/../..; do + if test -f $ac_dir/install-sh; then +@@ -1488,7 +1493,6 @@ + program_transform_name=`echo $program_transform_name | sed -f conftest.sed` + rm conftest.sed + +- + # expand $ac_aux_dir to an absolute path + am_aux_dir=`cd $ac_aux_dir && pwd` + +@@ -1502,6 +1506,39 @@ + echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} + fi + ++if mkdir -p --version . >/dev/null 2>&1 && test ! -d ./--version; then ++ # We used to keeping the `.' as first argument, in order to ++ # allow $(mkdir_p) to be used without argument. As in ++ # $(mkdir_p) $(somedir) ++ # where $(somedir) is conditionally defined. However this is wrong ++ # for two reasons: ++ # 1. if the package is installed by a user who cannot write `.' ++ # make install will fail, ++ # 2. the above comment should most certainly read ++ # $(mkdir_p) $(DESTDIR)$(somedir) ++ # so it does not work when $(somedir) is undefined and ++ # $(DESTDIR) is not. ++ # To support the latter case, we have to write ++ # test -z "$(somedir)" || $(mkdir_p) $(DESTDIR)$(somedir), ++ # so the `.' trick is pointless. ++ mkdir_p='mkdir -p --' ++else ++ # On NextStep and OpenStep, the `mkdir' command does not ++ # recognize any option. It will interpret all options as ++ # directories to create, and then abort because `.' already ++ # exists. ++ for d in ./-p ./--version; ++ do ++ test -d $d && rmdir $d ++ done ++ # $(mkinstalldirs) is defined by Automake if mkinstalldirs exists. ++ if test -f "$ac_aux_dir/mkinstalldirs"; then ++ mkdir_p='$(mkinstalldirs)' ++ else ++ mkdir_p='$(install_sh) -d' ++ fi ++fi ++ + for ac_prog in gawk mawk nawk awk + do + # Extract the first word of "$ac_prog", so it can be a program name with args. +@@ -1580,7 +1617,7 @@ + fi + rmdir .tst 2>/dev/null + +- # test to see if srcdir already configured ++# test to see if srcdir already configured + if test "`cd $srcdir && pwd`" != "`pwd`" && + test -f $srcdir/config.status; then + { { echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5 +@@ -1619,9 +1656,6 @@ + + MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +- +-AMTAR=${AMTAR-"${am_missing_run}tar"} +- + install_sh=${install_sh-"$am_aux_dir/install-sh"} + + # Installed binaries are usually stripped using `strip' when the user +@@ -1714,6 +1748,13 @@ + + # We need awk for the "check" target. The system "awk" is bad on + # some platforms. ++# Always define AMTAR for backward compatibility. ++ ++AMTAR=${AMTAR-"${am_missing_run}tar"} ++ ++am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -' ++ ++ + + + +@@ -1745,6 +1786,99 @@ + + + ++# Check whether --enable-drmingw or --disable-drmingw was given. ++if test "${enable_drmingw+set}" = set; then ++ enableval="$enable_drmingw" ++ case "${enableval}" in ++ yes) build_drmingw=true ;; ++ no) build_drmingw=false ;; ++ *) { { echo "$as_me:$LINENO: error: --enable-drmingw argument must be yes or no, not \"${enableval}\"" >&5 ++echo "$as_me: error: --enable-drmingw argument must be yes or no, not \"${enableval}\"" >&2;} ++ { (exit 1); exit 1; }; } ;; ++ esac ++else ++ build_drmingw=true ++fi; ++ ++# Check whether --enable-redir or --disable-redir was given. ++if test "${enable_redir+set}" = set; then ++ enableval="$enable_redir" ++ case "${enableval}" in ++ yes) build_redir=true ;; ++ no) build_redir=false ;; ++ *) { { echo "$as_me:$LINENO: error: --enable-redir argument must be yes or no, not \"${enableval}\"" >&5 ++echo "$as_me: error: --enable-redir argument must be yes or no, not \"${enableval}\"" >&2;} ++ { (exit 1); exit 1; }; } ;; ++ esac ++else ++ build_redir=true ++fi; ++ ++# Check whether --enable-res2coff or --disable-res2coff was given. ++if test "${enable_res2coff+set}" = set; then ++ enableval="$enable_res2coff" ++ case "${enableval}" in ++ yes) build_res2coff=true ;; ++ no) build_res2coff=false ;; ++ *) { { echo "$as_me:$LINENO: error: --enable-res2coff argument must be yes or no, not \"${enableval}\"" >&5 ++echo "$as_me: error: --enable-res2coff argument must be yes or no, not \"${enableval}\"" >&2;} ++ { (exit 1); exit 1; }; } ;; ++ esac ++else ++ build_res2coff=true ++fi; ++ ++# Check whether --enable-nonportable or --disable-nonportable was given. ++if test "${enable_nonportable+set}" = set; then ++ enableval="$enable_nonportable" ++ case "${enableval}" in ++ yes) build_drmingw=true ++ build_redir=true ++ build_res2coff=true ++ ;; ++ no) build_res2coff=false ++ build_redir=false ++ build_res2coff=false ++ ;; ++ *) { { echo "$as_me:$LINENO: error: --enable-nonportable argument must be yes or no, not \"${enableval}\"" >&5 ++echo "$as_me: error: --enable-nonportable argument must be yes or no, not \"${enableval}\"" >&2;} ++ { (exit 1); exit 1; }; } ;; ++ esac ++else ++ build_res2coff=true ++fi; ++ ++ ++ ++if test "${build_drmingw}" = "true"; then ++ BUILD_DRMINGW_TRUE= ++ BUILD_DRMINGW_FALSE='#' ++else ++ BUILD_DRMINGW_TRUE='#' ++ BUILD_DRMINGW_FALSE= ++fi ++ ++ ++ ++if test "${build_redir}" = "true"; then ++ BUILD_REDIR_TRUE= ++ BUILD_REDIR_FALSE='#' ++else ++ BUILD_REDIR_TRUE='#' ++ BUILD_REDIR_FALSE= ++fi ++ ++ ++ ++if test "${build_res2coff}" = "true"; then ++ BUILD_RES2COFF_TRUE= ++ BUILD_RES2COFF_FALSE='#' ++else ++ BUILD_RES2COFF_TRUE='#' ++ BUILD_RES2COFF_FALSE= ++fi ++ ++ + # Checks for programs. + ac_ext=c + ac_cpp='$CPP $CPPFLAGS' +@@ -2314,7 +2448,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -2372,7 +2507,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -2488,7 +2624,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -2542,7 +2679,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -2587,7 +2725,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -2631,7 +2770,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -2776,7 +2916,9 @@ + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c +- : > sub/conftst$i.h ++ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with ++ # Solaris 8's {/usr,}/bin/sh. ++ touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + +@@ -2804,9 +2946,14 @@ + grep sub/conftest.${OBJEXT-o} sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings +- # (even with -Werror). So we grep stderr for any message +- # that says an option was ignored. +- if grep 'ignoring option' conftest.err >/dev/null 2>&1; then :; else ++ # or remarks (even with -Werror). So we grep stderr for any message ++ # that says an option was ignored or not supported. ++ # When given -MP, icc 7.0 and 7.1 complain thusly: ++ # icc: Command line warning: ignoring option '-M'; no argument required ++ # The diagnosis changed in icc 8.0: ++ # icc: Command line remark: option '-MP' not supported ++ if (grep 'ignoring option' conftest.err || ++ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi +@@ -2982,7 +3129,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_cxx_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -3040,7 +3188,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_cxx_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -3111,7 +3260,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_cxx_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -3155,7 +3305,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_cxx_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -3229,7 +3380,9 @@ + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c +- : > sub/conftst$i.h ++ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with ++ # Solaris 8's {/usr,}/bin/sh. ++ touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + +@@ -3257,9 +3410,14 @@ + grep sub/conftest.${OBJEXT-o} sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings +- # (even with -Werror). So we grep stderr for any message +- # that says an option was ignored. +- if grep 'ignoring option' conftest.err >/dev/null 2>&1; then :; else ++ # or remarks (even with -Werror). So we grep stderr for any message ++ # that says an option was ignored or not supported. ++ # When given -MP, icc 7.0 and 7.1 complain thusly: ++ # icc: Command line warning: ignoring option '-M'; no argument required ++ # The diagnosis changed in icc 8.0: ++ # icc: Command line remark: option '-MP' not supported ++ if (grep 'ignoring option' conftest.err || ++ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CXX_dependencies_compiler_type=$depmode + break + fi +@@ -3601,7 +3759,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -3667,7 +3826,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -3756,7 +3916,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -3903,9 +4064,6 @@ + done + done + +- test -z "$ac_cv_path_ac_pt_WINDRES" && ac_cv_path_ac_pt_WINDRES="{ echo "$as_me:$LINENO: WARNING: Could not find a windres tool in your PATH. Will not be able to compile drmingw." >&5 +-echo "$as_me: WARNING: Could not find a windres tool in your PATH. Will not be able to compile drmingw." >&2;} +-" + ;; + esac + fi +@@ -3925,6 +4083,22 @@ + fi + + ++if test -z "${WINDRES}"; then ++ { echo "$as_me:$LINENO: WARNING: windres tool isn't in your PATH, drmingw can't be built!!" >&5 ++echo "$as_me: WARNING: windres tool isn't in your PATH, drmingw can't be built!!" >&2;} ++ ++ ++if false; then ++ BUILD_DRMINGW_TRUE= ++ BUILD_DRMINGW_FALSE='#' ++else ++ BUILD_DRMINGW_TRUE='#' ++ BUILD_DRMINGW_FALSE= ++fi ++ ++fi ++ ++ + + # Checks for header files. + ac_ext=c +@@ -4210,7 +4384,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -4342,9 +4517,7 @@ + + fi + +-where_toGet_utime_h='not found!' +-# this part is scrambled by autoconf. phooey. so we just cannot tell the user what we are doing. +-# AC_MSG_CHECKING([for whether utime.h is found as "utime.h" or "sys/utime.h"]) ++ + # On IRIX 5.3, sys/types and inttypes.h are conflicting. + + +@@ -4383,7 +4556,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -4416,6 +4590,160 @@ + done + + ++ ++for ac_header in windows.h ++do ++as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` ++if eval "test \"\${$as_ac_Header+set}\" = set"; then ++ echo "$as_me:$LINENO: checking for $ac_header" >&5 ++echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 ++if eval "test \"\${$as_ac_Header+set}\" = set"; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++fi ++echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 ++echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 ++else ++ # Is the header compilable? ++echo "$as_me:$LINENO: checking $ac_header usability" >&5 ++echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++#include <$ac_header> ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_header_compiler=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_header_compiler=no ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 ++echo "${ECHO_T}$ac_header_compiler" >&6 ++ ++# Is the header present? ++echo "$as_me:$LINENO: checking $ac_header presence" >&5 ++echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++#include <$ac_header> ++_ACEOF ++if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 ++ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } >/dev/null; then ++ if test -s conftest.err; then ++ ac_cpp_err=$ac_c_preproc_warn_flag ++ ac_cpp_err=$ac_cpp_err$ac_c_werror_flag ++ else ++ ac_cpp_err= ++ fi ++else ++ ac_cpp_err=yes ++fi ++if test -z "$ac_cpp_err"; then ++ ac_header_preproc=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ ac_header_preproc=no ++fi ++rm -f conftest.err conftest.$ac_ext ++echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 ++echo "${ECHO_T}$ac_header_preproc" >&6 ++ ++# So? What about this header? ++case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in ++ yes:no: ) ++ { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 ++echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} ++ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 ++echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ++ ac_header_preproc=yes ++ ;; ++ no:yes:* ) ++ { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 ++echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} ++ { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 ++echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} ++ { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 ++echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} ++ { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 ++echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} ++ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 ++echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} ++ { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 ++echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ++ ( ++ cat <<\_ASBOX ++## -------------------------------------- ## ++## Report this to the mingw-utils lists. ## ++## -------------------------------------- ## ++_ASBOX ++ ) | ++ sed "s/^/$as_me: WARNING: /" >&2 ++ ;; ++esac ++echo "$as_me:$LINENO: checking for $ac_header" >&5 ++echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 ++if eval "test \"\${$as_ac_Header+set}\" = set"; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ eval "$as_ac_Header=\$ac_header_preproc" ++fi ++echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 ++echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 ++ ++fi ++if test `eval echo '${'$as_ac_Header'}'` = yes; then ++ cat >>confdefs.h <<_ACEOF ++#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 ++_ACEOF ++ ++fi ++ ++done ++ ++ ++where_toGet_utime_h='not found!' ++# this part is scrambled by autoconf. phooey. so we just cannot tell the user what we are doing. ++# AC_MSG_CHECKING([for whether utime.h is found as "utime.h" or "sys/utime.h"]) + if test "${ac_cv_header_utime_h+set}" = set; then + echo "$as_me:$LINENO: checking for utime.h" >&5 + echo $ECHO_N "checking for utime.h... $ECHO_C" >&6 +@@ -4446,7 +4774,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -4590,7 +4919,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -4787,7 +5117,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -4888,7 +5219,8 @@ + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && +- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? +@@ -4921,24 +5253,1681 @@ + done + + +- ac_config_files="$ac_config_files Makefile dos2unix/Makefile drmingw/Makefile pexports/Makefile redir/Makefile reimp/Makefile res2coff/Makefile scripts/Makefile scripts/a2dll scripts/dsw2mak unix2dos/Makefile" ++echo "$as_me:$LINENO: checking for _int64" >&5 ++echo $ECHO_N "checking for _int64... $ECHO_C" >&6 ++if test "${ac_cv_type__int64+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++if ((_int64 *) 0) ++ return 0; ++if (sizeof (_int64)) ++ return 0; ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_cv_type__int64=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 + +-cat >confcache <<\_ACEOF +-# This file is a shell script that caches the results of configure +-# tests run on this system so they can be shared between configure +-# scripts and configure runs, see configure's option --config-cache. +-# It is not useful on other systems. If it contains results you don't +-# want to keep, you may remove or edit it. +-# +-# config.status only pays attention to the cache file if you give it +-# the --recheck option to rerun configure. +-# +-# `ac_cv_env_foo' variables (set or unset) will be overridden when +-# loading this file, other *unset* `ac_cv_foo' will be assigned the +-# following values. ++ac_cv_type__int64=no ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++fi ++echo "$as_me:$LINENO: result: $ac_cv_type__int64" >&5 ++echo "${ECHO_T}$ac_cv_type__int64" >&6 + ++echo "$as_me:$LINENO: checking size of _int64" >&5 ++echo $ECHO_N "checking size of _int64... $ECHO_C" >&6 ++if test "${ac_cv_sizeof__int64+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ if test "$ac_cv_type__int64" = yes; then ++ # The cast to unsigned long works around a bug in the HP C Compiler ++ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects ++ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. ++ # This bug is HP SR number 8606223364. ++ if test "$cross_compiling" = yes; then ++ # Depending upon the size, compute the lo and hi bounds. ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ + _ACEOF +- ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (_int64))) >= 0)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_lo=0 ac_mid=0 ++ while :; do ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (_int64))) <= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=$ac_mid; break ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo=`expr $ac_mid + 1` ++ if test $ac_lo -le $ac_mid; then ++ ac_lo= ac_hi= ++ break ++ fi ++ ac_mid=`expr 2 '*' $ac_mid + 1` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ done ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (_int64))) < 0)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=-1 ac_mid=-1 ++ while :; do ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (_int64))) >= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_lo=$ac_mid; break ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_hi=`expr '(' $ac_mid ')' - 1` ++ if test $ac_mid -le $ac_hi; then ++ ac_lo= ac_hi= ++ break ++ fi ++ ac_mid=`expr 2 '*' $ac_mid` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ done ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo= ac_hi= ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++# Binary search between lo and hi bounds. ++while test "x$ac_lo" != "x$ac_hi"; do ++ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (_int64))) <= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=$ac_mid ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo=`expr '(' $ac_mid ')' + 1` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++done ++case $ac_lo in ++?*) ac_cv_sizeof__int64=$ac_lo;; ++'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (_int64), 77 ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot compute sizeof (_int64), 77 ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ;; ++esac ++else ++ if test "$cross_compiling" = yes; then ++ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot run test program while cross compiling ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ++else ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++long longval () { return (long) (sizeof (_int64)); } ++unsigned long ulongval () { return (long) (sizeof (_int64)); } ++#include ++#include ++int ++main () ++{ ++ ++ FILE *f = fopen ("conftest.val", "w"); ++ if (! f) ++ exit (1); ++ if (((long) (sizeof (_int64))) < 0) ++ { ++ long i = longval (); ++ if (i != ((long) (sizeof (_int64)))) ++ exit (1); ++ fprintf (f, "%ld\n", i); ++ } ++ else ++ { ++ unsigned long i = ulongval (); ++ if (i != ((long) (sizeof (_int64)))) ++ exit (1); ++ fprintf (f, "%lu\n", i); ++ } ++ exit (ferror (f) || fclose (f) != 0); ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest$ac_exeext ++if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ++ (eval $ac_link) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && { ac_try='./conftest$ac_exeext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_cv_sizeof__int64=`cat conftest.val` ++else ++ echo "$as_me: program exited with status $ac_status" >&5 ++echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++( exit $ac_status ) ++{ { echo "$as_me:$LINENO: error: cannot compute sizeof (_int64), 77 ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot compute sizeof (_int64), 77 ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ++fi ++rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext ++fi ++fi ++rm -f conftest.val ++else ++ ac_cv_sizeof__int64=0 ++fi ++fi ++echo "$as_me:$LINENO: result: $ac_cv_sizeof__int64" >&5 ++echo "${ECHO_T}$ac_cv_sizeof__int64" >&6 ++cat >>confdefs.h <<_ACEOF ++#define SIZEOF__INT64 $ac_cv_sizeof__int64 ++_ACEOF ++ ++ ++echo "$as_me:$LINENO: checking for long long" >&5 ++echo $ECHO_N "checking for long long... $ECHO_C" >&6 ++if test "${ac_cv_type_long_long+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++if ((long long *) 0) ++ return 0; ++if (sizeof (long long)) ++ return 0; ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_cv_type_long_long=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_cv_type_long_long=no ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++fi ++echo "$as_me:$LINENO: result: $ac_cv_type_long_long" >&5 ++echo "${ECHO_T}$ac_cv_type_long_long" >&6 ++ ++echo "$as_me:$LINENO: checking size of long long" >&5 ++echo $ECHO_N "checking size of long long... $ECHO_C" >&6 ++if test "${ac_cv_sizeof_long_long+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ if test "$ac_cv_type_long_long" = yes; then ++ # The cast to unsigned long works around a bug in the HP C Compiler ++ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects ++ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. ++ # This bug is HP SR number 8606223364. ++ if test "$cross_compiling" = yes; then ++ # Depending upon the size, compute the lo and hi bounds. ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long long))) >= 0)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_lo=0 ac_mid=0 ++ while :; do ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long long))) <= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=$ac_mid; break ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo=`expr $ac_mid + 1` ++ if test $ac_lo -le $ac_mid; then ++ ac_lo= ac_hi= ++ break ++ fi ++ ac_mid=`expr 2 '*' $ac_mid + 1` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ done ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long long))) < 0)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=-1 ac_mid=-1 ++ while :; do ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long long))) >= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_lo=$ac_mid; break ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_hi=`expr '(' $ac_mid ')' - 1` ++ if test $ac_mid -le $ac_hi; then ++ ac_lo= ac_hi= ++ break ++ fi ++ ac_mid=`expr 2 '*' $ac_mid` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ done ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo= ac_hi= ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++# Binary search between lo and hi bounds. ++while test "x$ac_lo" != "x$ac_hi"; do ++ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long long))) <= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=$ac_mid ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo=`expr '(' $ac_mid ')' + 1` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++done ++case $ac_lo in ++?*) ac_cv_sizeof_long_long=$ac_lo;; ++'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (long long), 77 ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot compute sizeof (long long), 77 ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ;; ++esac ++else ++ if test "$cross_compiling" = yes; then ++ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot run test program while cross compiling ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ++else ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++long longval () { return (long) (sizeof (long long)); } ++unsigned long ulongval () { return (long) (sizeof (long long)); } ++#include ++#include ++int ++main () ++{ ++ ++ FILE *f = fopen ("conftest.val", "w"); ++ if (! f) ++ exit (1); ++ if (((long) (sizeof (long long))) < 0) ++ { ++ long i = longval (); ++ if (i != ((long) (sizeof (long long)))) ++ exit (1); ++ fprintf (f, "%ld\n", i); ++ } ++ else ++ { ++ unsigned long i = ulongval (); ++ if (i != ((long) (sizeof (long long)))) ++ exit (1); ++ fprintf (f, "%lu\n", i); ++ } ++ exit (ferror (f) || fclose (f) != 0); ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest$ac_exeext ++if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ++ (eval $ac_link) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && { ac_try='./conftest$ac_exeext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_cv_sizeof_long_long=`cat conftest.val` ++else ++ echo "$as_me: program exited with status $ac_status" >&5 ++echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++( exit $ac_status ) ++{ { echo "$as_me:$LINENO: error: cannot compute sizeof (long long), 77 ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot compute sizeof (long long), 77 ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ++fi ++rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext ++fi ++fi ++rm -f conftest.val ++else ++ ac_cv_sizeof_long_long=0 ++fi ++fi ++echo "$as_me:$LINENO: result: $ac_cv_sizeof_long_long" >&5 ++echo "${ECHO_T}$ac_cv_sizeof_long_long" >&6 ++cat >>confdefs.h <<_ACEOF ++#define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long ++_ACEOF ++ ++ ++echo "$as_me:$LINENO: checking for long" >&5 ++echo $ECHO_N "checking for long... $ECHO_C" >&6 ++if test "${ac_cv_type_long+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++if ((long *) 0) ++ return 0; ++if (sizeof (long)) ++ return 0; ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_cv_type_long=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_cv_type_long=no ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++fi ++echo "$as_me:$LINENO: result: $ac_cv_type_long" >&5 ++echo "${ECHO_T}$ac_cv_type_long" >&6 ++ ++echo "$as_me:$LINENO: checking size of long" >&5 ++echo $ECHO_N "checking size of long... $ECHO_C" >&6 ++if test "${ac_cv_sizeof_long+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ if test "$ac_cv_type_long" = yes; then ++ # The cast to unsigned long works around a bug in the HP C Compiler ++ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects ++ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. ++ # This bug is HP SR number 8606223364. ++ if test "$cross_compiling" = yes; then ++ # Depending upon the size, compute the lo and hi bounds. ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long))) >= 0)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_lo=0 ac_mid=0 ++ while :; do ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long))) <= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=$ac_mid; break ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo=`expr $ac_mid + 1` ++ if test $ac_lo -le $ac_mid; then ++ ac_lo= ac_hi= ++ break ++ fi ++ ac_mid=`expr 2 '*' $ac_mid + 1` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ done ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long))) < 0)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=-1 ac_mid=-1 ++ while :; do ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long))) >= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_lo=$ac_mid; break ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_hi=`expr '(' $ac_mid ')' - 1` ++ if test $ac_mid -le $ac_hi; then ++ ac_lo= ac_hi= ++ break ++ fi ++ ac_mid=`expr 2 '*' $ac_mid` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ done ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo= ac_hi= ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++# Binary search between lo and hi bounds. ++while test "x$ac_lo" != "x$ac_hi"; do ++ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (long))) <= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=$ac_mid ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo=`expr '(' $ac_mid ')' + 1` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++done ++case $ac_lo in ++?*) ac_cv_sizeof_long=$ac_lo;; ++'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (long), 77 ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot compute sizeof (long), 77 ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ;; ++esac ++else ++ if test "$cross_compiling" = yes; then ++ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot run test program while cross compiling ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ++else ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++long longval () { return (long) (sizeof (long)); } ++unsigned long ulongval () { return (long) (sizeof (long)); } ++#include ++#include ++int ++main () ++{ ++ ++ FILE *f = fopen ("conftest.val", "w"); ++ if (! f) ++ exit (1); ++ if (((long) (sizeof (long))) < 0) ++ { ++ long i = longval (); ++ if (i != ((long) (sizeof (long)))) ++ exit (1); ++ fprintf (f, "%ld\n", i); ++ } ++ else ++ { ++ unsigned long i = ulongval (); ++ if (i != ((long) (sizeof (long)))) ++ exit (1); ++ fprintf (f, "%lu\n", i); ++ } ++ exit (ferror (f) || fclose (f) != 0); ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest$ac_exeext ++if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ++ (eval $ac_link) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && { ac_try='./conftest$ac_exeext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_cv_sizeof_long=`cat conftest.val` ++else ++ echo "$as_me: program exited with status $ac_status" >&5 ++echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++( exit $ac_status ) ++{ { echo "$as_me:$LINENO: error: cannot compute sizeof (long), 77 ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot compute sizeof (long), 77 ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ++fi ++rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext ++fi ++fi ++rm -f conftest.val ++else ++ ac_cv_sizeof_long=0 ++fi ++fi ++echo "$as_me:$LINENO: result: $ac_cv_sizeof_long" >&5 ++echo "${ECHO_T}$ac_cv_sizeof_long" >&6 ++cat >>confdefs.h <<_ACEOF ++#define SIZEOF_LONG $ac_cv_sizeof_long ++_ACEOF ++ ++ ++echo "$as_me:$LINENO: checking for void *" >&5 ++echo $ECHO_N "checking for void *... $ECHO_C" >&6 ++if test "${ac_cv_type_void_p+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++if ((void * *) 0) ++ return 0; ++if (sizeof (void *)) ++ return 0; ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_cv_type_void_p=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_cv_type_void_p=no ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++fi ++echo "$as_me:$LINENO: result: $ac_cv_type_void_p" >&5 ++echo "${ECHO_T}$ac_cv_type_void_p" >&6 ++ ++echo "$as_me:$LINENO: checking size of void *" >&5 ++echo $ECHO_N "checking size of void *... $ECHO_C" >&6 ++if test "${ac_cv_sizeof_void_p+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ if test "$ac_cv_type_void_p" = yes; then ++ # The cast to unsigned long works around a bug in the HP C Compiler ++ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects ++ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. ++ # This bug is HP SR number 8606223364. ++ if test "$cross_compiling" = yes; then ++ # Depending upon the size, compute the lo and hi bounds. ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (void *))) >= 0)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_lo=0 ac_mid=0 ++ while :; do ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (void *))) <= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=$ac_mid; break ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo=`expr $ac_mid + 1` ++ if test $ac_lo -le $ac_mid; then ++ ac_lo= ac_hi= ++ break ++ fi ++ ac_mid=`expr 2 '*' $ac_mid + 1` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ done ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (void *))) < 0)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=-1 ac_mid=-1 ++ while :; do ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (void *))) >= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_lo=$ac_mid; break ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_hi=`expr '(' $ac_mid ')' - 1` ++ if test $ac_mid -le $ac_hi; then ++ ac_lo= ac_hi= ++ break ++ fi ++ ac_mid=`expr 2 '*' $ac_mid` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ done ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo= ac_hi= ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++# Binary search between lo and hi bounds. ++while test "x$ac_lo" != "x$ac_hi"; do ++ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main () ++{ ++static int test_array [1 - 2 * !(((long) (sizeof (void *))) <= $ac_mid)]; ++test_array [0] = 0 ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_hi=$ac_mid ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_lo=`expr '(' $ac_mid ')' + 1` ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++done ++case $ac_lo in ++?*) ac_cv_sizeof_void_p=$ac_lo;; ++'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (void *), 77 ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot compute sizeof (void *), 77 ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ;; ++esac ++else ++ if test "$cross_compiling" = yes; then ++ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot run test program while cross compiling ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ++else ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++long longval () { return (long) (sizeof (void *)); } ++unsigned long ulongval () { return (long) (sizeof (void *)); } ++#include ++#include ++int ++main () ++{ ++ ++ FILE *f = fopen ("conftest.val", "w"); ++ if (! f) ++ exit (1); ++ if (((long) (sizeof (void *))) < 0) ++ { ++ long i = longval (); ++ if (i != ((long) (sizeof (void *)))) ++ exit (1); ++ fprintf (f, "%ld\n", i); ++ } ++ else ++ { ++ unsigned long i = ulongval (); ++ if (i != ((long) (sizeof (void *)))) ++ exit (1); ++ fprintf (f, "%lu\n", i); ++ } ++ exit (ferror (f) || fclose (f) != 0); ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest$ac_exeext ++if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ++ (eval $ac_link) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && { ac_try='./conftest$ac_exeext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_cv_sizeof_void_p=`cat conftest.val` ++else ++ echo "$as_me: program exited with status $ac_status" >&5 ++echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++( exit $ac_status ) ++{ { echo "$as_me:$LINENO: error: cannot compute sizeof (void *), 77 ++See \`config.log' for more details." >&5 ++echo "$as_me: error: cannot compute sizeof (void *), 77 ++See \`config.log' for more details." >&2;} ++ { (exit 1); exit 1; }; } ++fi ++rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext ++fi ++fi ++rm -f conftest.val ++else ++ ac_cv_sizeof_void_p=0 ++fi ++fi ++echo "$as_me:$LINENO: result: $ac_cv_sizeof_void_p" >&5 ++echo "${ECHO_T}$ac_cv_sizeof_void_p" >&6 ++cat >>confdefs.h <<_ACEOF ++#define SIZEOF_VOID_P $ac_cv_sizeof_void_p ++_ACEOF ++ ++ ++ ++ ac_config_files="$ac_config_files Makefile dos2unix/Makefile drmingw/Makefile pexports/Makefile redir/Makefile reimp/Makefile res2coff/Makefile scripts/Makefile scripts/a2dll scripts/dsw2mak unix2dos/Makefile" ++ ++cat >confcache <<\_ACEOF ++# This file is a shell script that caches the results of configure ++# tests run on this system so they can be shared between configure ++# scripts and configure runs, see configure's option --config-cache. ++# It is not useful on other systems. If it contains results you don't ++# want to keep, you may remove or edit it. ++# ++# config.status only pays attention to the cache file if you give it ++# the --recheck option to rerun configure. ++# ++# `ac_cv_env_foo' variables (set or unset) will be overridden when ++# loading this file, other *unset* `ac_cv_foo' will be assigned the ++# following values. ++ ++_ACEOF ++ + # The following way of writing the cache mishandles newlines in values, + # but we know of no workaround that is simple, portable, and efficient. + # So, don't put newlines in cache variables' values. +@@ -5021,6 +7010,27 @@ + Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } + fi ++if test -z "${BUILD_DRMINGW_TRUE}" && test -z "${BUILD_DRMINGW_FALSE}"; then ++ { { echo "$as_me:$LINENO: error: conditional \"BUILD_DRMINGW\" was never defined. ++Usually this means the macro was only invoked conditionally." >&5 ++echo "$as_me: error: conditional \"BUILD_DRMINGW\" was never defined. ++Usually this means the macro was only invoked conditionally." >&2;} ++ { (exit 1); exit 1; }; } ++fi ++if test -z "${BUILD_REDIR_TRUE}" && test -z "${BUILD_REDIR_FALSE}"; then ++ { { echo "$as_me:$LINENO: error: conditional \"BUILD_REDIR\" was never defined. ++Usually this means the macro was only invoked conditionally." >&5 ++echo "$as_me: error: conditional \"BUILD_REDIR\" was never defined. ++Usually this means the macro was only invoked conditionally." >&2;} ++ { (exit 1); exit 1; }; } ++fi ++if test -z "${BUILD_RES2COFF_TRUE}" && test -z "${BUILD_RES2COFF_FALSE}"; then ++ { { echo "$as_me:$LINENO: error: conditional \"BUILD_RES2COFF\" was never defined. ++Usually this means the macro was only invoked conditionally." >&5 ++echo "$as_me: error: conditional \"BUILD_RES2COFF\" was never defined. ++Usually this means the macro was only invoked conditionally." >&2;} ++ { (exit 1); exit 1; }; } ++fi + if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + { { echo "$as_me:$LINENO: error: conditional \"AMDEP\" was never defined. + Usually this means the macro was only invoked conditionally." >&5 +@@ -5042,6 +7052,13 @@ + Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } + fi ++if test -z "${BUILD_DRMINGW_TRUE}" && test -z "${BUILD_DRMINGW_FALSE}"; then ++ { { echo "$as_me:$LINENO: error: conditional \"BUILD_DRMINGW\" was never defined. ++Usually this means the macro was only invoked conditionally." >&5 ++echo "$as_me: error: conditional \"BUILD_DRMINGW\" was never defined. ++Usually this means the macro was only invoked conditionally." >&2;} ++ { (exit 1); exit 1; }; } ++fi + + : ${CONFIG_STATUS=./config.status} + ac_clean_files_save=$ac_clean_files +@@ -5314,7 +7331,7 @@ + cat >&5 <<_CSEOF + + This file was extended by mingw-utils $as_me 0.3, which was +-generated by GNU Autoconf 2.58. Invocation command line was ++generated by GNU Autoconf 2.59. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS +@@ -5377,7 +7394,7 @@ + cat >>$CONFIG_STATUS <<_ACEOF + ac_cs_version="\\ + mingw-utils config.status 0.3 +-configured by $0, generated by GNU Autoconf 2.58, ++configured by $0, generated by GNU Autoconf 2.59, + with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\" + + Copyright (C) 2003 Free Software Foundation, Inc. +@@ -5595,17 +7612,26 @@ + s,@AUTOMAKE@,$AUTOMAKE,;t t + s,@AUTOHEADER@,$AUTOHEADER,;t t + s,@MAKEINFO@,$MAKEINFO,;t t +-s,@AMTAR@,$AMTAR,;t t + s,@install_sh@,$install_sh,;t t + s,@STRIP@,$STRIP,;t t + s,@ac_ct_STRIP@,$ac_ct_STRIP,;t t + s,@INSTALL_STRIP_PROGRAM@,$INSTALL_STRIP_PROGRAM,;t t ++s,@mkdir_p@,$mkdir_p,;t t + s,@AWK@,$AWK,;t t + s,@SET_MAKE@,$SET_MAKE,;t t + s,@am__leading_dot@,$am__leading_dot,;t t ++s,@AMTAR@,$AMTAR,;t t ++s,@am__tar@,$am__tar,;t t ++s,@am__untar@,$am__untar,;t t + s,@MAINTAINER_MODE_TRUE@,$MAINTAINER_MODE_TRUE,;t t + s,@MAINTAINER_MODE_FALSE@,$MAINTAINER_MODE_FALSE,;t t + s,@MAINT@,$MAINT,;t t ++s,@BUILD_DRMINGW_TRUE@,$BUILD_DRMINGW_TRUE,;t t ++s,@BUILD_DRMINGW_FALSE@,$BUILD_DRMINGW_FALSE,;t t ++s,@BUILD_REDIR_TRUE@,$BUILD_REDIR_TRUE,;t t ++s,@BUILD_REDIR_FALSE@,$BUILD_REDIR_FALSE,;t t ++s,@BUILD_RES2COFF_TRUE@,$BUILD_RES2COFF_TRUE,;t t ++s,@BUILD_RES2COFF_FALSE@,$BUILD_RES2COFF_FALSE,;t t + s,@CC@,$CC,;t t + s,@CFLAGS@,$CFLAGS,;t t + s,@LDFLAGS@,$LDFLAGS,;t t +@@ -6262,27 +8288,21 @@ + else + continue + fi +- grep '^DEP_FILES *= *[^ #]' < "$mf" > /dev/null || continue +- # Extract the definition of DEP_FILES from the Makefile without +- # running `make'. +- DEPDIR=`sed -n -e '/^DEPDIR = / s///p' < "$mf"` ++ # Extract the definition of DEPDIR, am__include, and am__quote ++ # from the Makefile without running `make'. ++ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue ++ am__include=`sed -n 's/^am__include = //p' < "$mf"` ++ test -z "am__include" && continue ++ am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it +- U=`sed -n -e '/^U = / s///p' < "$mf"` +- test -d "$dirpart/$DEPDIR" || mkdir "$dirpart/$DEPDIR" +- # We invoke sed twice because it is the simplest approach to +- # changing $(DEPDIR) to its actual value in the expansion. +- for file in `sed -n -e ' +- /^DEP_FILES = .*\\\\$/ { +- s/^DEP_FILES = // +- :loop +- s/\\\\$// +- p +- n +- /\\\\$/ b loop +- p +- } +- /^DEP_FILES = / s/^DEP_FILES = //p' < "$mf" | \ ++ U=`sed -n 's/^U = //p' < "$mf"` ++ # Find all dependency output files, they are included files with ++ # $(DEPDIR) in their names. We invoke sed twice because it is the ++ # simplest approach to changing $(DEPDIR) to its actual value in the ++ # expansion. ++ for file in `sed -n " ++ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue +diff -ru ../release/mingw-utils-0.3/configure.ac ./configure.ac +--- ../release/mingw-utils-0.3/configure.ac 2003-11-25 08:24:13.000000000 -0800 ++++ ./configure.ac 2006-08-11 05:25:25.000000000 -0700 +@@ -7,6 +7,52 @@ + AM_CONFIG_HEADER(config.h) + AM_MAINTAINER_MODE + ++AC_ARG_ENABLE(drmingw, ++ AS_HELP_STRING([--disable-drmingw],[Don't build drmingw (default is build)]), ++ [case "${enableval}" in ++ yes) build_drmingw=true ;; ++ no) build_drmingw=false ;; ++ *) AC_MSG_ERROR([--enable-drmingw argument must be yes or no, not "${enableval}"]) ;; ++ esac], ++ [build_drmingw=true]) ++ ++AC_ARG_ENABLE(redir, ++ AS_HELP_STRING([--disable-redir],[Don't build redir (default is build)]), ++ [case "${enableval}" in ++ yes) build_redir=true ;; ++ no) build_redir=false ;; ++ *) AC_MSG_ERROR([--enable-redir argument must be yes or no, not "${enableval}"]) ;; ++ esac], ++ [build_redir=true]) ++ ++AC_ARG_ENABLE(res2coff, ++ AS_HELP_STRING([--disable-res2coff],[Don't build res2coff (default is build)]), ++ [case "${enableval}" in ++ yes) build_res2coff=true ;; ++ no) build_res2coff=false ;; ++ *) AC_MSG_ERROR([--enable-res2coff argument must be yes or no, not "${enableval}"]) ;; ++ esac], ++ [build_res2coff=true]) ++ ++AC_ARG_ENABLE(nonportable, ++ AS_HELP_STRING([--disable-nonportable],[shortcut for --disable-drmingw, --disable-redir and --disable-res2coff]), ++ [case "${enableval}" in ++ yes) build_drmingw=true ++ build_redir=true ++ build_res2coff=true ++ ;; ++ no) build_res2coff=false ++ build_redir=false ++ build_res2coff=false ++ ;; ++ *) AC_MSG_ERROR([--enable-nonportable argument must be yes or no, not "${enableval}"]) ;; ++ esac], ++ [build_res2coff=true]) ++ ++AM_CONDITIONAL(BUILD_DRMINGW, [test "${build_drmingw}" = "true"]) ++AM_CONDITIONAL(BUILD_REDIR, [test "${build_redir}" = "true"]) ++AM_CONDITIONAL(BUILD_RES2COFF, [test "${build_res2coff}" = "true"]) ++ + # Checks for programs. + AC_PROG_CC + AC_PROG_CXX +@@ -19,13 +65,20 @@ + AC_PROG_YACC + + AC_ARG_VAR(WINDRES, [Windows Resource compiler tool path]) +-AC_PATH_TOOL(WINDRES,windres, +- [AC_MSG_WARN(Could not find a windres tool in your PATH. Will not be able to compile drmingw.)] +-) ++AC_PATH_TOOL(WINDRES,windres) ++ ++if test -z "${WINDRES}"; then ++ AC_MSG_WARN([windres tool isn't in your PATH, drmingw can't be built!!]) ++ AM_CONDITIONAL(BUILD_DRMINGW, false) ++fi ++ + AC_SUBST(WINDRES) + + # Checks for header files. + AC_HEADER_STDC ++ ++AC_CHECK_HEADERS([windows.h]) ++ + where_toGet_utime_h='not found!' + # this part is scrambled by autoconf. phooey. so we just cannot tell the user what we are doing. + # AC_MSG_CHECKING([for whether utime.h is found as "utime.h" or "sys/utime.h"]) +@@ -44,6 +97,11 @@ + AC_CHECK_FUNCS(mkstemp mktemp, break) + AC_CHECK_FUNCS(utime) + ++AC_CHECK_SIZEOF(_int64) ++AC_CHECK_SIZEOF(long long) ++AC_CHECK_SIZEOF(long) ++AC_CHECK_SIZEOF(void *) ++ + AC_CONFIG_FILES([ + Makefile + dos2unix/Makefile +diff -ru ../release/mingw-utils-0.3/dos2unix/Makefile.am ./dos2unix/Makefile.am +--- ../release/mingw-utils-0.3/dos2unix/Makefile.am 2002-12-04 04:07:55.000000000 -0800 ++++ ./dos2unix/Makefile.am 2006-09-25 20:06:24.000000000 -0700 +@@ -6,7 +6,8 @@ + + docdir = $(prefix)/doc/dos2unix + +-doc_DATA = COPYING dos2unix.html ++#doc_DATA = COPYING dos2unix.html ++doc_DATA = COPYING + + dos2unix.html: dos2unix.1 + man2html $< > $@ +diff -ru ../release/mingw-utils-0.3/dos2unix/Makefile.in ./dos2unix/Makefile.in +--- ../release/mingw-utils-0.3/dos2unix/Makefile.in 2003-11-25 08:31:11.000000000 -0800 ++++ ./dos2unix/Makefile.in 2006-09-25 20:08:13.000000000 -0700 +@@ -1,8 +1,8 @@ +-# Makefile.in generated by automake 1.7.9 from Makefile.am. ++# Makefile.in generated by automake 1.9.6 from Makefile.am. + # @configure_input@ + +-# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +-# Free Software Foundation, Inc. ++# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, ++# 2003, 2004, 2005 Free Software Foundation, Inc. + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, + # with or without modifications, as long as this notice is preserved. +@@ -14,6 +14,7 @@ + + @SET_MAKE@ + ++ + srcdir = @srcdir@ + top_srcdir = @top_srcdir@ + VPATH = @srcdir@ +@@ -21,7 +22,6 @@ + pkglibdir = $(libdir)/@PACKAGE@ + pkgincludedir = $(includedir)/@PACKAGE@ + top_builddir = .. +- + am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd + INSTALL = @INSTALL@ + install_sh_DATA = $(install_sh) -c -m 644 +@@ -35,6 +35,42 @@ + NORMAL_UNINSTALL = : + PRE_UNINSTALL = : + POST_UNINSTALL = : ++bin_PROGRAMS = dos2unix$(EXEEXT) ++subdir = dos2unix ++DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in COPYING ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs ++CONFIG_HEADER = $(top_builddir)/config.h ++CONFIG_CLEAN_FILES = ++am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)" ++binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++PROGRAMS = $(bin_PROGRAMS) ++am_dos2unix_OBJECTS = dos2unix.$(OBJEXT) ++dos2unix_OBJECTS = $(am_dos2unix_OBJECTS) ++dos2unix_LDADD = $(LDADD) ++DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) ++depcomp = $(SHELL) $(top_srcdir)/depcomp ++am__depfiles_maybe = depfiles ++COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ ++ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) ++CCLD = $(CC) ++LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ ++SOURCES = $(dos2unix_SOURCES) ++DIST_SOURCES = $(dos2unix_SOURCES) ++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; ++am__vpath_adj = case $$p in \ ++ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ ++ *) f=$$p;; \ ++ esac; ++am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; ++docDATA_INSTALL = $(INSTALL_DATA) ++DATA = $(doc_DATA) ++ETAGS = etags ++CTAGS = ctags ++DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) + ACLOCAL = @ACLOCAL@ + AMDEP_FALSE = @AMDEP_FALSE@ + AMDEP_TRUE = @AMDEP_TRUE@ +@@ -43,6 +79,12 @@ + AUTOHEADER = @AUTOHEADER@ + AUTOMAKE = @AUTOMAKE@ + AWK = @AWK@ ++BUILD_DRMINGW_FALSE = @BUILD_DRMINGW_FALSE@ ++BUILD_DRMINGW_TRUE = @BUILD_DRMINGW_TRUE@ ++BUILD_REDIR_FALSE = @BUILD_REDIR_FALSE@ ++BUILD_REDIR_TRUE = @BUILD_REDIR_TRUE@ ++BUILD_RES2COFF_FALSE = @BUILD_RES2COFF_FALSE@ ++BUILD_RES2COFF_TRUE = @BUILD_RES2COFF_TRUE@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ +@@ -101,6 +143,8 @@ + am__include = @am__include@ + am__leading_dot = @am__leading_dot@ + am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ + bindir = @bindir@ + build_alias = @build_alias@ + datadir = @datadir@ +@@ -113,6 +157,7 @@ + libexecdir = @libexecdir@ + localstatedir = @localstatedir@ + mandir = @mandir@ ++mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +@@ -120,63 +165,55 @@ + sharedstatedir = @sharedstatedir@ + sysconfdir = @sysconfdir@ + target_alias = @target_alias@ +-bin_PROGRAMS = dos2unix +- + dos2unix_SOURCES = dos2unix.c dos2unix.h +- + noinst_man_MANS = dos2unix.1 +- + docdir = $(prefix)/doc/dos2unix + +-doc_DATA = COPYING dos2unix.html +-subdir = dos2unix +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +-CONFIG_HEADER = $(top_builddir)/config.h +-CONFIG_CLEAN_FILES = +-bin_PROGRAMS = dos2unix$(EXEEXT) +-PROGRAMS = $(bin_PROGRAMS) +- +-am_dos2unix_OBJECTS = dos2unix.$(OBJEXT) +-dos2unix_OBJECTS = $(am_dos2unix_OBJECTS) +-dos2unix_LDADD = $(LDADD) +-dos2unix_DEPENDENCIES = +-dos2unix_LDFLAGS = +- +-DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +-depcomp = $(SHELL) $(top_srcdir)/depcomp +-am__depfiles_maybe = depfiles +-@AMDEP_TRUE@DEP_FILES = ./$(DEPDIR)/dos2unix.Po +-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ +- $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +-CCLD = $(CC) +-LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +-DIST_SOURCES = $(dos2unix_SOURCES) +-DATA = $(doc_DATA) +- +-DIST_COMMON = $(srcdir)/Makefile.in COPYING Makefile.am +-SOURCES = $(dos2unix_SOURCES) +- ++#doc_DATA = COPYING dos2unix.html ++doc_DATA = COPYING + all: all-am + + .SUFFIXES: + .SUFFIXES: .c .o .obj +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu dos2unix/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu dos2unix/Makefile +-Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status +- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe) +-binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++.PRECIOUS: Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++ ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(bindir) ++ test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f"; \ +- $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f || exit 1; \ ++ echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ ++ $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +@@ -184,8 +221,8 @@ + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " rm -f $(DESTDIR)$(bindir)/$$f"; \ +- rm -f $(DESTDIR)$(bindir)/$$f; \ ++ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + + clean-binPROGRAMS: +@@ -195,7 +232,7 @@ + $(LINK) $(dos2unix_LDFLAGS) $(dos2unix_OBJECTS) $(dos2unix_LDADD) $(LIBS) + + mostlyclean-compile: +- -rm -f *.$(OBJEXT) core *.core ++ -rm -f *.$(OBJEXT) + + distclean-compile: + -rm -f *.tab.c +@@ -203,54 +240,37 @@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dos2unix.Po@am__quote@ + + .c.o: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$< ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c $< + + .c.obj: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi` ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + uninstall-info-am: +-docDATA_INSTALL = $(INSTALL_DATA) + install-docDATA: $(doc_DATA) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(docdir) ++ test -z "$(docdir)" || $(mkdir_p) "$(DESTDIR)$(docdir)" + @list='$(doc_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f"; \ +- $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " $(docDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(docdir)/$$f'"; \ ++ $(docDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(docdir)/$$f"; \ + done + + uninstall-docDATA: + @$(NORMAL_UNINSTALL) + @list='$(doc_DATA)'; for p in $$list; do \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " rm -f $(DESTDIR)$(docdir)/$$f"; \ +- rm -f $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " rm -f '$(DESTDIR)$(docdir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(docdir)/$$f"; \ + done + +-ETAGS = etags +-ETAGSFLAGS = +- +-CTAGS = ctags +-CTAGSFLAGS = +- +-tags: TAGS +- + ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ +@@ -259,6 +279,7 @@ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique ++tags: TAGS + + TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -270,10 +291,11 @@ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ +- test -z "$(ETAGS_ARGS)$$tags$$unique" \ +- || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- $$tags $$unique +- ++ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$tags $$unique; \ ++ fi + ctags: CTAGS + CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -296,10 +318,6 @@ + + distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +- +-top_distdir = .. +-distdir = $(top_distdir)/$(PACKAGE)-$(VERSION) + + distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ +@@ -313,7 +331,7 @@ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ +- $(mkinstalldirs) "$(distdir)$$dir"; \ ++ $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ +@@ -331,9 +349,10 @@ + check-am: all-am + check: check-am + all-am: Makefile $(PROGRAMS) $(DATA) +- + installdirs: +- $(mkinstalldirs) $(DESTDIR)$(bindir) $(DESTDIR)$(docdir) ++ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)"; do \ ++ test -z "$$dir" || $(mkdir_p) "$$dir"; \ ++ done + install: install-am + install-exec: install-exec-am + install-data: install-data-am +@@ -353,7 +372,7 @@ + clean-generic: + + distclean-generic: +- -rm -f $(CONFIG_CLEAN_FILES) ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + + maintainer-clean-generic: + @echo "This command is intended for maintainers to use" +@@ -372,6 +391,8 @@ + + dvi-am: + ++html: html-am ++ + info: info-am + + info-am: +@@ -403,19 +424,20 @@ + + ps-am: + +-uninstall-am: uninstall-binPROGRAMS uninstall-docDATA uninstall-info-am ++uninstall-am: uninstall-binPROGRAMS uninstall-docDATA \ ++ uninstall-info-am + + .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ + clean-generic ctags distclean distclean-compile \ +- distclean-generic distclean-tags distdir dvi dvi-am info \ +- info-am install install-am install-binPROGRAMS install-data \ +- install-data-am install-docDATA install-exec install-exec-am \ +- install-info install-info-am install-man install-strip \ +- installcheck installcheck-am installdirs maintainer-clean \ +- maintainer-clean-generic mostlyclean mostlyclean-compile \ +- mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ +- uninstall-am uninstall-binPROGRAMS uninstall-docDATA \ +- uninstall-info-am ++ distclean-generic distclean-tags distdir dvi dvi-am html \ ++ html-am info info-am install install-am install-binPROGRAMS \ ++ install-data install-data-am install-docDATA install-exec \ ++ install-exec-am install-info install-info-am install-man \ ++ install-strip installcheck installcheck-am installdirs \ ++ maintainer-clean maintainer-clean-generic mostlyclean \ ++ mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \ ++ tags uninstall uninstall-am uninstall-binPROGRAMS \ ++ uninstall-docDATA uninstall-info-am + + + dos2unix.html: dos2unix.1 +diff -ru ../release/mingw-utils-0.3/drmingw/Makefile.in ./drmingw/Makefile.in +--- ../release/mingw-utils-0.3/drmingw/Makefile.in 2003-11-25 09:56:14.000000000 -0800 ++++ ./drmingw/Makefile.in 2006-09-25 20:08:13.000000000 -0700 +@@ -1,8 +1,8 @@ +-# Makefile.in generated by automake 1.7.9 from Makefile.am. ++# Makefile.in generated by automake 1.9.6 from Makefile.am. + # @configure_input@ + +-# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +-# Free Software Foundation, Inc. ++# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, ++# 2003, 2004, 2005 Free Software Foundation, Inc. + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, + # with or without modifications, as long as this notice is preserved. +@@ -14,6 +14,8 @@ + + @SET_MAKE@ + ++ ++ + srcdir = @srcdir@ + top_srcdir = @top_srcdir@ + VPATH = @srcdir@ +@@ -21,7 +23,6 @@ + pkglibdir = $(libdir)/@PACKAGE@ + pkgincludedir = $(includedir)/@PACKAGE@ + top_builddir = .. +- + am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd + INSTALL = @INSTALL@ + install_sh_DATA = $(install_sh) -c -m 644 +@@ -35,6 +36,73 @@ + NORMAL_UNINSTALL = : + PRE_UNINSTALL = : + POST_UNINSTALL = : ++bin_PROGRAMS = drmingw$(EXEEXT) ++samples_PROGRAMS = test$(EXEEXT) testcpp$(EXEEXT) ++subdir = drmingw ++DIST_COMMON = $(dist_doc_DATA) $(srcdir)/Makefile.am \ ++ $(srcdir)/Makefile.in COPYING COPYING.LIB ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs ++CONFIG_HEADER = $(top_builddir)/config.h ++CONFIG_CLEAN_FILES = ++LIBRARIES = $(noinst_LIBRARIES) ++AR = ar ++ARFLAGS = cru ++libexchndl2_a_AR = $(AR) $(ARFLAGS) ++libexchndl2_a_LIBADD = ++am_libexchndl2_a_OBJECTS = exchndl2.$(OBJEXT) ++libexchndl2_a_OBJECTS = $(am_libexchndl2_a_OBJECTS) ++am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(samplesdir)" \ ++ "$(DESTDIR)$(docdir)" "$(DESTDIR)$(samplesdir)" ++binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++samplesPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++PROGRAMS = $(bin_PROGRAMS) $(samples_PROGRAMS) ++am_drmingw_OBJECTS = debugger.$(OBJEXT) debugx.$(OBJEXT) \ ++ dialog.$(OBJEXT) log.$(OBJEXT) main.$(OBJEXT) misc.$(OBJEXT) \ ++ module.$(OBJEXT) prdbg.$(OBJEXT) symbols.$(OBJEXT) \ ++ ieee.$(OBJEXT) rdcoff.$(OBJEXT) rddbg.$(OBJEXT) \ ++ stabs.$(OBJEXT) debug.$(OBJEXT) ++drmingw_OBJECTS = $(am_drmingw_OBJECTS) ++drmingw_DEPENDENCIES = resource.o ++am_test_OBJECTS = test-test.$(OBJEXT) ++test_OBJECTS = $(am_test_OBJECTS) ++test_DEPENDENCIES = exchndl2.o ++am_testcpp_OBJECTS = testcpp-testcpp.$(OBJEXT) ++testcpp_OBJECTS = $(am_testcpp_OBJECTS) ++testcpp_DEPENDENCIES = exchndl2.o ++DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) ++depcomp = $(SHELL) $(top_srcdir)/depcomp ++am__depfiles_maybe = depfiles ++COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ ++ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) ++CCLD = $(CC) ++LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ ++CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ ++ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) ++CXXLD = $(CXX) ++CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \ ++ -o $@ ++SOURCES = $(libexchndl2_a_SOURCES) $(drmingw_SOURCES) \ ++ $(dist_EXTRA_drmingw_SOURCES) $(test_SOURCES) \ ++ $(testcpp_SOURCES) ++DIST_SOURCES = $(libexchndl2_a_SOURCES) $(drmingw_SOURCES) \ ++ $(dist_EXTRA_drmingw_SOURCES) $(test_SOURCES) \ ++ $(testcpp_SOURCES) ++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; ++am__vpath_adj = case $$p in \ ++ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ ++ *) f=$$p;; \ ++ esac; ++am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; ++dist_docDATA_INSTALL = $(INSTALL_DATA) ++samplesDATA_INSTALL = $(INSTALL_DATA) ++DATA = $(dist_doc_DATA) $(samples_DATA) ++ETAGS = etags ++CTAGS = ctags ++DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) + ACLOCAL = @ACLOCAL@ + AMDEP_FALSE = @AMDEP_FALSE@ + AMDEP_TRUE = @AMDEP_TRUE@ +@@ -43,6 +111,12 @@ + AUTOHEADER = @AUTOHEADER@ + AUTOMAKE = @AUTOMAKE@ + AWK = @AWK@ ++BUILD_DRMINGW_FALSE = @BUILD_DRMINGW_FALSE@ ++BUILD_DRMINGW_TRUE = @BUILD_DRMINGW_TRUE@ ++BUILD_REDIR_FALSE = @BUILD_REDIR_FALSE@ ++BUILD_REDIR_TRUE = @BUILD_REDIR_TRUE@ ++BUILD_RES2COFF_FALSE = @BUILD_RES2COFF_FALSE@ ++BUILD_RES2COFF_TRUE = @BUILD_RES2COFF_TRUE@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ +@@ -101,6 +175,8 @@ + am__include = @am__include@ + am__leading_dot = @am__leading_dot@ + am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ + bindir = @bindir@ + build_alias = @build_alias@ + datadir = @datadir@ +@@ -113,6 +189,7 @@ + libexecdir = @libexecdir@ + localstatedir = @localstatedir@ + mandir = @mandir@ ++mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +@@ -120,9 +197,6 @@ + sharedstatedir = @sharedstatedir@ + sysconfdir = @sysconfdir@ + target_alias = @target_alias@ +- +-bin_PROGRAMS = drmingw +- + drmingw_SOURCES = \ + debugger.c \ + debugger.h \ +@@ -158,116 +232,62 @@ + include/libcoff.h \ + include/libiberty.h + +- + dist_EXTRA_drmingw_SOURCES = \ + resource.rc \ + icon.ico + +- + INCLUDES = -I$(srcdir)/include +- + drmingw_LDFLAGS = -mwindows + drmingw_LDADD = resource.o -lbfd -liberty +- + RC = @WINDRES@ + RCFLAGS = --use-temp-file --verbose -O COFF +- + EXTRA_DIST = exchndl.c +- + CLEANFILES = exchndl.dll +- + docdir = $(prefix)/doc/drmingw +- + dist_doc_DATA = COPYING COPYING.LIB doc/drmingw.html doc/drmingw.reg doc/exception-nt.gif doc/install.gif doc/sample.gif +- + noinst_LIBRARIES = libexchndl2.a +- + libexchndl2_a_SOURCES = samples/exchndl2.cxx +- + samplesdir = $(docdir)/samples +- +-samples_PROGRAMS = test testcpp + samples_DATA = samples/exchndl2.cxx samples/test.c samples/testcpp.cxx +- + test_SOURCES = samples/test.c + test_LDADD = exchndl2.o -lstdc++ + test_CFLAGS = -ggdb $(AM_CFLAGS) +- + testcpp_SOURCES = samples/testcpp.cxx + testcpp_LDADD = exchndl2.o -lstdc++ + testcpp_CXXFLAGS = -ggdb $(AM_CXXFLAGS) +-subdir = drmingw +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +-CONFIG_HEADER = $(top_builddir)/config.h +-CONFIG_CLEAN_FILES = +-LIBRARIES = $(noinst_LIBRARIES) +- +-libexchndl2_a_AR = $(AR) cru +-libexchndl2_a_LIBADD = +-am_libexchndl2_a_OBJECTS = exchndl2.$(OBJEXT) +-libexchndl2_a_OBJECTS = $(am_libexchndl2_a_OBJECTS) +-bin_PROGRAMS = drmingw$(EXEEXT) +-samples_PROGRAMS = test$(EXEEXT) testcpp$(EXEEXT) +-PROGRAMS = $(bin_PROGRAMS) $(samples_PROGRAMS) +- +-am_drmingw_OBJECTS = debugger.$(OBJEXT) debugx.$(OBJEXT) \ +- dialog.$(OBJEXT) log.$(OBJEXT) main.$(OBJEXT) misc.$(OBJEXT) \ +- module.$(OBJEXT) prdbg.$(OBJEXT) symbols.$(OBJEXT) \ +- ieee.$(OBJEXT) rdcoff.$(OBJEXT) rddbg.$(OBJEXT) stabs.$(OBJEXT) \ +- debug.$(OBJEXT) +-drmingw_OBJECTS = $(am_drmingw_OBJECTS) +-drmingw_DEPENDENCIES = resource.o +-am_test_OBJECTS = test-test.$(OBJEXT) +-test_OBJECTS = $(am_test_OBJECTS) +-test_DEPENDENCIES = exchndl2.o +-test_LDFLAGS = +-am_testcpp_OBJECTS = testcpp-testcpp.$(OBJEXT) +-testcpp_OBJECTS = $(am_testcpp_OBJECTS) +-testcpp_DEPENDENCIES = exchndl2.o +-testcpp_LDFLAGS = +- +-DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +-depcomp = $(SHELL) $(top_srcdir)/depcomp +-am__depfiles_maybe = depfiles +-@AMDEP_TRUE@DEP_FILES = ./$(DEPDIR)/debug.Po ./$(DEPDIR)/debugger.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/debugx.Po ./$(DEPDIR)/dialog.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/exchndl2.Po ./$(DEPDIR)/ieee.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/log.Po ./$(DEPDIR)/main.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/misc.Po ./$(DEPDIR)/module.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/prdbg.Po ./$(DEPDIR)/rdcoff.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/rddbg.Po ./$(DEPDIR)/stabs.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/symbols.Po ./$(DEPDIR)/test-test.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/testcpp-testcpp.Po +-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ +- $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +-CCLD = $(CC) +-LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +-CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ +- $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +-CXXLD = $(CXX) +-CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \ +- -o $@ +-DIST_SOURCES = $(libexchndl2_a_SOURCES) $(drmingw_SOURCES) \ +- $(dist_EXTRA_drmingw_SOURCES) $(test_SOURCES) \ +- $(testcpp_SOURCES) +-DATA = $(dist_doc_DATA) $(samples_DATA) +- +-DIST_COMMON = $(dist_doc_DATA) $(srcdir)/Makefile.in COPYING \ +- COPYING.LIB Makefile.am +-SOURCES = $(libexchndl2_a_SOURCES) $(drmingw_SOURCES) $(dist_EXTRA_drmingw_SOURCES) $(test_SOURCES) $(testcpp_SOURCES) +- + all: all-am + + .SUFFIXES: + .SUFFIXES: .c .cxx .o .obj +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu drmingw/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu drmingw/Makefile +-Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status +- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe) +- +-AR = ar ++.PRECIOUS: Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++ ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + + clean-noinstLIBRARIES: + -test -z "$(noinst_LIBRARIES)" || rm -f $(noinst_LIBRARIES) +@@ -275,17 +295,16 @@ + -rm -f libexchndl2.a + $(libexchndl2_a_AR) libexchndl2.a $(libexchndl2_a_OBJECTS) $(libexchndl2_a_LIBADD) + $(RANLIB) libexchndl2.a +-binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) + install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(bindir) ++ test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f"; \ +- $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f || exit 1; \ ++ echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ ++ $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +@@ -293,23 +312,22 @@ + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " rm -f $(DESTDIR)$(bindir)/$$f"; \ +- rm -f $(DESTDIR)$(bindir)/$$f; \ ++ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + + clean-binPROGRAMS: + -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) +-samplesPROGRAMS_INSTALL = $(INSTALL_PROGRAM) + install-samplesPROGRAMS: $(samples_PROGRAMS) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(samplesdir) ++ test -z "$(samplesdir)" || $(mkdir_p) "$(DESTDIR)$(samplesdir)" + @list='$(samples_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " $(INSTALL_PROGRAM_ENV) $(samplesPROGRAMS_INSTALL) $$p $(DESTDIR)$(samplesdir)/$$f"; \ +- $(INSTALL_PROGRAM_ENV) $(samplesPROGRAMS_INSTALL) $$p $(DESTDIR)$(samplesdir)/$$f || exit 1; \ ++ echo " $(INSTALL_PROGRAM_ENV) $(samplesPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(samplesdir)/$$f'"; \ ++ $(INSTALL_PROGRAM_ENV) $(samplesPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(samplesdir)/$$f" || exit 1; \ + else :; fi; \ + done + +@@ -317,8 +335,8 @@ + @$(NORMAL_UNINSTALL) + @list='$(samples_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " rm -f $(DESTDIR)$(samplesdir)/$$f"; \ +- rm -f $(DESTDIR)$(samplesdir)/$$f; \ ++ echo " rm -f '$(DESTDIR)$(samplesdir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(samplesdir)/$$f"; \ + done + + clean-samplesPROGRAMS: +@@ -334,7 +352,7 @@ + $(CXXLINK) $(testcpp_LDFLAGS) $(testcpp_OBJECTS) $(testcpp_LDADD) $(LIBS) + + mostlyclean-compile: +- -rm -f *.$(OBJEXT) core *.core ++ -rm -f *.$(OBJEXT) + + distclean-compile: + -rm -f *.tab.c +@@ -358,160 +376,110 @@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/testcpp-testcpp.Po@am__quote@ + + .c.o: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$< ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c $< + + .c.obj: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi` ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + + test-test.o: samples/test.c +-@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CFLAGS) $(CFLAGS) -MT test-test.o -MD -MP -MF "$(DEPDIR)/test-test.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o test-test.o `test -f 'samples/test.c' || echo '$(srcdir)/'`samples/test.c; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/test-test.Tpo" "$(DEPDIR)/test-test.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/test-test.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CFLAGS) $(CFLAGS) -MT test-test.o -MD -MP -MF "$(DEPDIR)/test-test.Tpo" -c -o test-test.o `test -f 'samples/test.c' || echo '$(srcdir)/'`samples/test.c; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/test-test.Tpo" "$(DEPDIR)/test-test.Po"; else rm -f "$(DEPDIR)/test-test.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='samples/test.c' object='test-test.o' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/test-test.Po' tmpdepfile='$(DEPDIR)/test-test.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ + @am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CFLAGS) $(CFLAGS) -c -o test-test.o `test -f 'samples/test.c' || echo '$(srcdir)/'`samples/test.c + + test-test.obj: samples/test.c +-@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CFLAGS) $(CFLAGS) -MT test-test.obj -MD -MP -MF "$(DEPDIR)/test-test.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o test-test.obj `if test -f 'samples/test.c'; then $(CYGPATH_W) 'samples/test.c'; else $(CYGPATH_W) '$(srcdir)/samples/test.c'; fi`; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/test-test.Tpo" "$(DEPDIR)/test-test.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/test-test.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CFLAGS) $(CFLAGS) -MT test-test.obj -MD -MP -MF "$(DEPDIR)/test-test.Tpo" -c -o test-test.obj `if test -f 'samples/test.c'; then $(CYGPATH_W) 'samples/test.c'; else $(CYGPATH_W) '$(srcdir)/samples/test.c'; fi`; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/test-test.Tpo" "$(DEPDIR)/test-test.Po"; else rm -f "$(DEPDIR)/test-test.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='samples/test.c' object='test-test.obj' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/test-test.Po' tmpdepfile='$(DEPDIR)/test-test.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ + @am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CFLAGS) $(CFLAGS) -c -o test-test.obj `if test -f 'samples/test.c'; then $(CYGPATH_W) 'samples/test.c'; else $(CYGPATH_W) '$(srcdir)/samples/test.c'; fi` + + .cxx.o: +-@am__fastdepCXX_TRUE@ if $(CXXCOMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCXX_TRUE@ -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<; \ +-@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCXX_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCXX_TRUE@ fi ++@am__fastdepCXX_TRUE@ if $(CXXCOMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ ++@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$< ++@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< + + .cxx.obj: +-@am__fastdepCXX_TRUE@ if $(CXXCOMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCXX_TRUE@ -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`; \ +-@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCXX_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCXX_TRUE@ fi ++@am__fastdepCXX_TRUE@ if $(CXXCOMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ ++@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi` ++@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + + exchndl2.o: samples/exchndl2.cxx +-@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT exchndl2.o -MD -MP -MF "$(DEPDIR)/exchndl2.Tpo" \ +-@am__fastdepCXX_TRUE@ -c -o exchndl2.o `test -f 'samples/exchndl2.cxx' || echo '$(srcdir)/'`samples/exchndl2.cxx; \ +-@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/exchndl2.Tpo" "$(DEPDIR)/exchndl2.Po"; \ +-@am__fastdepCXX_TRUE@ else rm -f "$(DEPDIR)/exchndl2.Tpo"; exit 1; \ +-@am__fastdepCXX_TRUE@ fi ++@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT exchndl2.o -MD -MP -MF "$(DEPDIR)/exchndl2.Tpo" -c -o exchndl2.o `test -f 'samples/exchndl2.cxx' || echo '$(srcdir)/'`samples/exchndl2.cxx; \ ++@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/exchndl2.Tpo" "$(DEPDIR)/exchndl2.Po"; else rm -f "$(DEPDIR)/exchndl2.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='samples/exchndl2.cxx' object='exchndl2.o' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ depfile='$(DEPDIR)/exchndl2.Po' tmpdepfile='$(DEPDIR)/exchndl2.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ + @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o exchndl2.o `test -f 'samples/exchndl2.cxx' || echo '$(srcdir)/'`samples/exchndl2.cxx + + exchndl2.obj: samples/exchndl2.cxx +-@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT exchndl2.obj -MD -MP -MF "$(DEPDIR)/exchndl2.Tpo" \ +-@am__fastdepCXX_TRUE@ -c -o exchndl2.obj `if test -f 'samples/exchndl2.cxx'; then $(CYGPATH_W) 'samples/exchndl2.cxx'; else $(CYGPATH_W) '$(srcdir)/samples/exchndl2.cxx'; fi`; \ +-@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/exchndl2.Tpo" "$(DEPDIR)/exchndl2.Po"; \ +-@am__fastdepCXX_TRUE@ else rm -f "$(DEPDIR)/exchndl2.Tpo"; exit 1; \ +-@am__fastdepCXX_TRUE@ fi ++@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT exchndl2.obj -MD -MP -MF "$(DEPDIR)/exchndl2.Tpo" -c -o exchndl2.obj `if test -f 'samples/exchndl2.cxx'; then $(CYGPATH_W) 'samples/exchndl2.cxx'; else $(CYGPATH_W) '$(srcdir)/samples/exchndl2.cxx'; fi`; \ ++@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/exchndl2.Tpo" "$(DEPDIR)/exchndl2.Po"; else rm -f "$(DEPDIR)/exchndl2.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='samples/exchndl2.cxx' object='exchndl2.obj' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ depfile='$(DEPDIR)/exchndl2.Po' tmpdepfile='$(DEPDIR)/exchndl2.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ + @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o exchndl2.obj `if test -f 'samples/exchndl2.cxx'; then $(CYGPATH_W) 'samples/exchndl2.cxx'; else $(CYGPATH_W) '$(srcdir)/samples/exchndl2.cxx'; fi` + + testcpp-testcpp.o: samples/testcpp.cxx +-@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testcpp_CXXFLAGS) $(CXXFLAGS) -MT testcpp-testcpp.o -MD -MP -MF "$(DEPDIR)/testcpp-testcpp.Tpo" \ +-@am__fastdepCXX_TRUE@ -c -o testcpp-testcpp.o `test -f 'samples/testcpp.cxx' || echo '$(srcdir)/'`samples/testcpp.cxx; \ +-@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/testcpp-testcpp.Tpo" "$(DEPDIR)/testcpp-testcpp.Po"; \ +-@am__fastdepCXX_TRUE@ else rm -f "$(DEPDIR)/testcpp-testcpp.Tpo"; exit 1; \ +-@am__fastdepCXX_TRUE@ fi ++@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testcpp_CXXFLAGS) $(CXXFLAGS) -MT testcpp-testcpp.o -MD -MP -MF "$(DEPDIR)/testcpp-testcpp.Tpo" -c -o testcpp-testcpp.o `test -f 'samples/testcpp.cxx' || echo '$(srcdir)/'`samples/testcpp.cxx; \ ++@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/testcpp-testcpp.Tpo" "$(DEPDIR)/testcpp-testcpp.Po"; else rm -f "$(DEPDIR)/testcpp-testcpp.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='samples/testcpp.cxx' object='testcpp-testcpp.o' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ depfile='$(DEPDIR)/testcpp-testcpp.Po' tmpdepfile='$(DEPDIR)/testcpp-testcpp.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ + @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testcpp_CXXFLAGS) $(CXXFLAGS) -c -o testcpp-testcpp.o `test -f 'samples/testcpp.cxx' || echo '$(srcdir)/'`samples/testcpp.cxx + + testcpp-testcpp.obj: samples/testcpp.cxx +-@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testcpp_CXXFLAGS) $(CXXFLAGS) -MT testcpp-testcpp.obj -MD -MP -MF "$(DEPDIR)/testcpp-testcpp.Tpo" \ +-@am__fastdepCXX_TRUE@ -c -o testcpp-testcpp.obj `if test -f 'samples/testcpp.cxx'; then $(CYGPATH_W) 'samples/testcpp.cxx'; else $(CYGPATH_W) '$(srcdir)/samples/testcpp.cxx'; fi`; \ +-@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/testcpp-testcpp.Tpo" "$(DEPDIR)/testcpp-testcpp.Po"; \ +-@am__fastdepCXX_TRUE@ else rm -f "$(DEPDIR)/testcpp-testcpp.Tpo"; exit 1; \ +-@am__fastdepCXX_TRUE@ fi ++@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testcpp_CXXFLAGS) $(CXXFLAGS) -MT testcpp-testcpp.obj -MD -MP -MF "$(DEPDIR)/testcpp-testcpp.Tpo" -c -o testcpp-testcpp.obj `if test -f 'samples/testcpp.cxx'; then $(CYGPATH_W) 'samples/testcpp.cxx'; else $(CYGPATH_W) '$(srcdir)/samples/testcpp.cxx'; fi`; \ ++@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/testcpp-testcpp.Tpo" "$(DEPDIR)/testcpp-testcpp.Po"; else rm -f "$(DEPDIR)/testcpp-testcpp.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='samples/testcpp.cxx' object='testcpp-testcpp.obj' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ depfile='$(DEPDIR)/testcpp-testcpp.Po' tmpdepfile='$(DEPDIR)/testcpp-testcpp.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ + @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testcpp_CXXFLAGS) $(CXXFLAGS) -c -o testcpp-testcpp.obj `if test -f 'samples/testcpp.cxx'; then $(CYGPATH_W) 'samples/testcpp.cxx'; else $(CYGPATH_W) '$(srcdir)/samples/testcpp.cxx'; fi` + uninstall-info-am: +-dist_docDATA_INSTALL = $(INSTALL_DATA) + install-dist_docDATA: $(dist_doc_DATA) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(docdir) ++ test -z "$(docdir)" || $(mkdir_p) "$(DESTDIR)$(docdir)" + @list='$(dist_doc_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " $(dist_docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f"; \ +- $(dist_docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " $(dist_docDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(docdir)/$$f'"; \ ++ $(dist_docDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(docdir)/$$f"; \ + done + + uninstall-dist_docDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_doc_DATA)'; for p in $$list; do \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " rm -f $(DESTDIR)$(docdir)/$$f"; \ +- rm -f $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " rm -f '$(DESTDIR)$(docdir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(docdir)/$$f"; \ + done +-samplesDATA_INSTALL = $(INSTALL_DATA) + install-samplesDATA: $(samples_DATA) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(samplesdir) ++ test -z "$(samplesdir)" || $(mkdir_p) "$(DESTDIR)$(samplesdir)" + @list='$(samples_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " $(samplesDATA_INSTALL) $$d$$p $(DESTDIR)$(samplesdir)/$$f"; \ +- $(samplesDATA_INSTALL) $$d$$p $(DESTDIR)$(samplesdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " $(samplesDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(samplesdir)/$$f'"; \ ++ $(samplesDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(samplesdir)/$$f"; \ + done + + uninstall-samplesDATA: + @$(NORMAL_UNINSTALL) + @list='$(samples_DATA)'; for p in $$list; do \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " rm -f $(DESTDIR)$(samplesdir)/$$f"; \ +- rm -f $(DESTDIR)$(samplesdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " rm -f '$(DESTDIR)$(samplesdir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(samplesdir)/$$f"; \ + done + +-ETAGS = etags +-ETAGSFLAGS = +- +-CTAGS = ctags +-CTAGSFLAGS = +- +-tags: TAGS +- + ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ +@@ -520,6 +488,7 @@ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique ++tags: TAGS + + TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -531,10 +500,11 @@ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ +- test -z "$(ETAGS_ARGS)$$tags$$unique" \ +- || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- $$tags $$unique +- ++ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$tags $$unique; \ ++ fi + ctags: CTAGS + CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -557,13 +527,9 @@ + + distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +- +-top_distdir = .. +-distdir = $(top_distdir)/$(PACKAGE)-$(VERSION) + + distdir: $(DISTFILES) +- $(mkinstalldirs) $(distdir)/doc ++ $(mkdir_p) $(distdir)/doc + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ + list='$(DISTFILES)'; for file in $$list; do \ +@@ -575,7 +541,7 @@ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ +- $(mkinstalldirs) "$(distdir)$$dir"; \ ++ $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ +@@ -593,9 +559,10 @@ + check-am: all-am + check: check-am + all-am: Makefile $(LIBRARIES) $(PROGRAMS) $(DATA) +- + installdirs: +- $(mkinstalldirs) $(DESTDIR)$(bindir) $(DESTDIR)$(samplesdir) $(DESTDIR)$(docdir) $(DESTDIR)$(samplesdir) ++ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(samplesdir)" "$(DESTDIR)$(docdir)" "$(DESTDIR)$(samplesdir)"; do \ ++ test -z "$$dir" || $(mkdir_p) "$$dir"; \ ++ done + install: install-am + install-exec: install-exec-am + install-data: install-data-am +@@ -616,7 +583,7 @@ + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + + distclean-generic: +- -rm -f $(CONFIG_CLEAN_FILES) ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + + maintainer-clean-generic: + @echo "This command is intended for maintainers to use" +@@ -636,6 +603,8 @@ + + dvi-am: + ++html: html-am ++ + info: info-am + + info-am: +@@ -673,14 +642,15 @@ + uninstall-samplesPROGRAMS + + .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ +- clean-generic clean-noinstLIBRARIES clean-samplesPROGRAMS ctags \ +- distclean distclean-compile distclean-generic distclean-tags \ +- distdir dvi dvi-am info info-am install install-am \ +- install-binPROGRAMS install-data install-data-am \ +- install-dist_docDATA install-exec install-exec-am \ +- install-exec-local install-info install-info-am install-man \ +- install-samplesDATA install-samplesPROGRAMS install-strip \ +- installcheck installcheck-am installdirs maintainer-clean \ ++ clean-generic clean-noinstLIBRARIES clean-samplesPROGRAMS \ ++ ctags distclean distclean-compile distclean-generic \ ++ distclean-tags distdir dvi dvi-am html html-am info info-am \ ++ install install-am install-binPROGRAMS install-data \ ++ install-data-am install-dist_docDATA install-exec \ ++ install-exec-am install-exec-local install-info \ ++ install-info-am install-man install-samplesDATA \ ++ install-samplesPROGRAMS install-strip installcheck \ ++ installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ + uninstall-am uninstall-binPROGRAMS uninstall-dist_docDATA \ +diff -ru ../release/mingw-utils-0.3/Makefile.am ./Makefile.am +--- ../release/mingw-utils-0.3/Makefile.am 2002-12-04 04:06:21.000000000 -0800 ++++ ./Makefile.am 2006-08-11 05:25:25.000000000 -0700 +@@ -1,4 +1,18 @@ +-SUBDIRS = dos2unix drmingw pexports redir reimp res2coff scripts unix2dos ++if BUILD_DRMINGW ++ DRMINGW = drmingw ++endif ++ ++if BUILD_REDIR ++ REDIR = redir ++endif ++ ++if BUILD_RES2COFF ++ RES2COFF = res2coff ++endif ++ ++SUBDIRS = dos2unix $(DRMINGW) pexports ${REDIR} reimp ${RES2COFF} scripts unix2dos ++ ++EXTRA_DIST = dos2unix/dos2unix.1 unix2dos/unix2dos.1 + + instdir = /tmp/$(PACKAGE)-$(VERSION) + +diff -ru ../release/mingw-utils-0.3/Makefile.in ./Makefile.in +--- ../release/mingw-utils-0.3/Makefile.in 2003-11-25 08:31:11.000000000 -0800 ++++ ./Makefile.in 2006-09-25 20:08:14.000000000 -0700 +@@ -1,8 +1,8 @@ +-# Makefile.in generated by automake 1.7.9 from Makefile.am. ++# Makefile.in generated by automake 1.9.6 from Makefile.am. + # @configure_input@ + +-# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +-# Free Software Foundation, Inc. ++# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, ++# 2003, 2004, 2005 Free Software Foundation, Inc. + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, + # with or without modifications, as long as this notice is preserved. +@@ -13,7 +13,6 @@ + # PARTICULAR PURPOSE. + + @SET_MAKE@ +- + srcdir = @srcdir@ + top_srcdir = @top_srcdir@ + VPATH = @srcdir@ +@@ -21,7 +20,6 @@ + pkglibdir = $(libdir)/@PACKAGE@ + pkgincludedir = $(includedir)/@PACKAGE@ + top_builddir = . +- + am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd + INSTALL = @INSTALL@ + install_sh_DATA = $(install_sh) -c -m 644 +@@ -35,6 +33,43 @@ + NORMAL_UNINSTALL = : + PRE_UNINSTALL = : + POST_UNINSTALL = : ++subdir = . ++DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \ ++ $(srcdir)/Makefile.in $(srcdir)/config.h.in \ ++ $(top_srcdir)/configure AUTHORS COPYING ChangeLog INSTALL NEWS \ ++ compile depcomp install-sh missing mkinstalldirs ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ ++ configure.lineno configure.status.lineno ++mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs ++CONFIG_HEADER = config.h ++CONFIG_CLEAN_FILES = ++SOURCES = ++DIST_SOURCES = ++RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ ++ html-recursive info-recursive install-data-recursive \ ++ install-exec-recursive install-info-recursive \ ++ install-recursive installcheck-recursive installdirs-recursive \ ++ pdf-recursive ps-recursive uninstall-info-recursive \ ++ uninstall-recursive ++ETAGS = etags ++CTAGS = ctags ++DIST_SUBDIRS = dos2unix drmingw pexports redir reimp res2coff scripts \ ++ unix2dos ++DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ++distdir = $(PACKAGE)-$(VERSION) ++top_distdir = $(distdir) ++am__remove_distdir = \ ++ { test ! -d $(distdir) \ ++ || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ ++ && rm -fr $(distdir); }; } ++DIST_ARCHIVES = $(distdir).tar.gz ++GZIP_ENV = --best ++distuninstallcheck_listfiles = find . -type f -print ++distcleancheck_listfiles = find . -type f -print + ACLOCAL = @ACLOCAL@ + AMDEP_FALSE = @AMDEP_FALSE@ + AMDEP_TRUE = @AMDEP_TRUE@ +@@ -43,6 +78,12 @@ + AUTOHEADER = @AUTOHEADER@ + AUTOMAKE = @AUTOMAKE@ + AWK = @AWK@ ++BUILD_DRMINGW_FALSE = @BUILD_DRMINGW_FALSE@ ++BUILD_DRMINGW_TRUE = @BUILD_DRMINGW_TRUE@ ++BUILD_REDIR_FALSE = @BUILD_REDIR_FALSE@ ++BUILD_REDIR_TRUE = @BUILD_REDIR_TRUE@ ++BUILD_RES2COFF_FALSE = @BUILD_RES2COFF_FALSE@ ++BUILD_RES2COFF_TRUE = @BUILD_RES2COFF_TRUE@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ +@@ -101,6 +142,8 @@ + am__include = @am__include@ + am__leading_dot = @am__leading_dot@ + am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ + bindir = @bindir@ + build_alias = @build_alias@ + datadir = @datadir@ +@@ -113,6 +156,7 @@ + libexecdir = @libexecdir@ + localstatedir = @localstatedir@ + mandir = @mandir@ ++mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +@@ -120,45 +164,48 @@ + sharedstatedir = @sharedstatedir@ + sysconfdir = @sysconfdir@ + target_alias = @target_alias@ +-SUBDIRS = dos2unix drmingw pexports redir reimp res2coff scripts unix2dos +- ++@BUILD_DRMINGW_TRUE@DRMINGW = drmingw ++@BUILD_REDIR_TRUE@REDIR = redir ++@BUILD_RES2COFF_TRUE@RES2COFF = res2coff ++SUBDIRS = dos2unix $(DRMINGW) pexports ${REDIR} reimp ${RES2COFF} scripts unix2dos ++EXTRA_DIST = dos2unix/dos2unix.1 unix2dos/unix2dos.1 + instdir = /tmp/$(PACKAGE)-$(VERSION) +-subdir = . +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +-CONFIG_HEADER = config.h +-CONFIG_CLEAN_FILES = +-DIST_SOURCES = +- +-RECURSIVE_TARGETS = info-recursive dvi-recursive pdf-recursive \ +- ps-recursive install-info-recursive uninstall-info-recursive \ +- all-recursive install-data-recursive install-exec-recursive \ +- installdirs-recursive install-recursive uninstall-recursive \ +- check-recursive installcheck-recursive +-DIST_COMMON = README $(srcdir)/Makefile.in $(srcdir)/configure AUTHORS \ +- COPYING ChangeLog INSTALL Makefile.am NEWS aclocal.m4 compile \ +- config.h.in configure configure.ac depcomp install-sh missing \ +- mkinstalldirs +-DIST_SUBDIRS = $(SUBDIRS) + all: config.h + $(MAKE) $(AM_MAKEFLAGS) all-recursive + + .SUFFIXES: +- +-am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ +- configure.lineno +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++am--refresh: ++ @: ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \ ++ cd $(srcdir) && $(AUTOMAKE) --gnu \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu Makefile +-Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status +- cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe) ++.PRECIOUS: Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ echo ' $(SHELL) ./config.status'; \ ++ $(SHELL) ./config.status;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ ++ esac; + +-$(top_builddir)/config.status: $(srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck +-$(srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(srcdir)/configure.ac $(ACLOCAL_M4) $(CONFIGURE_DEPENDENCIES) +- cd $(srcdir) && $(AUTOCONF) + +-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ configure.ac ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ cd $(srcdir) && $(AUTOCONF) ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) + + config.h: stamp-h1 +@@ -170,10 +217,10 @@ + stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status + @rm -f stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status config.h +- +-$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_srcdir) && $(AUTOHEADER) +- touch $(srcdir)/config.h.in ++ rm -f stamp-h1 ++ touch $@ + + distclean-hdr: + -rm -f config.h stamp-h1 +@@ -186,7 +233,13 @@ + # (which will cause the Makefiles to be regenerated when you run `make'); + # (2) otherwise, pass the desired values on the `make' command line. + $(RECURSIVE_TARGETS): +- @set fnord $$MAKEFLAGS; amf=$$2; \ ++ @failcom='exit 1'; \ ++ for f in x $$MAKEFLAGS; do \ ++ case $$f in \ ++ *=* | --[!k]*);; \ ++ *k*) failcom='fail=yes';; \ ++ esac; \ ++ done; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ +@@ -198,7 +251,7 @@ + local_target="$$target"; \ + fi; \ + (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ +- || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \ ++ || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ +@@ -206,7 +259,13 @@ + + mostlyclean-recursive clean-recursive distclean-recursive \ + maintainer-clean-recursive: +- @set fnord $$MAKEFLAGS; amf=$$2; \ ++ @failcom='exit 1'; \ ++ for f in x $$MAKEFLAGS; do \ ++ case $$f in \ ++ *=* | --[!k]*);; \ ++ *k*) failcom='fail=yes';; \ ++ esac; \ ++ done; \ + dot_seen=no; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ +@@ -227,7 +286,7 @@ + local_target="$$target"; \ + fi; \ + (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ +- || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \ ++ || eval $$failcom; \ + done && test -z "$$fail" + tags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ +@@ -238,14 +297,6 @@ + test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ + done + +-ETAGS = etags +-ETAGSFLAGS = +- +-CTAGS = ctags +-CTAGSFLAGS = +- +-tags: TAGS +- + ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ +@@ -254,19 +305,22 @@ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique ++tags: TAGS + + TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ +- if (etags --etags-include --version) >/dev/null 2>&1; then \ ++ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ ++ empty_fix=.; \ + else \ + include_option=--include; \ ++ empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ +- test -f $$subdir/TAGS && \ ++ test ! -f $$subdir/TAGS || \ + tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ +@@ -276,10 +330,11 @@ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ +- test -z "$(ETAGS_ARGS)$$tags$$unique" \ +- || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- $$tags $$unique +- ++ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$tags $$unique; \ ++ fi + ctags: CTAGS + CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -302,24 +357,11 @@ + + distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +- +-top_distdir = . +-distdir = $(PACKAGE)-$(VERSION) +- +-am__remove_distdir = \ +- { test ! -d $(distdir) \ +- || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ +- && rm -fr $(distdir); }; } +- +-GZIP_ENV = --best +-distuninstallcheck_listfiles = find . -type f -print +-distcleancheck_listfiles = find . -type f -print + + distdir: $(DISTFILES) + $(am__remove_distdir) + mkdir $(distdir) +- $(mkinstalldirs) $(distdir)/scripts ++ $(mkdir_p) $(distdir)/dos2unix $(distdir)/scripts $(distdir)/unix2dos + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ + list='$(DISTFILES)'; for file in $$list; do \ +@@ -331,7 +373,7 @@ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ +- $(mkinstalldirs) "$(distdir)$$dir"; \ ++ $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ +@@ -346,15 +388,17 @@ + || exit 1; \ + fi; \ + done +- list='$(SUBDIRS)'; for subdir in $$list; do \ ++ list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ +- test -d $(distdir)/$$subdir \ +- || mkdir $(distdir)/$$subdir \ ++ test -d "$(distdir)/$$subdir" \ ++ || $(mkdir_p) "$(distdir)/$$subdir" \ + || exit 1; \ ++ distdir=`$(am__cd) $(distdir) && pwd`; \ ++ top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ + (cd $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ +- top_distdir="$(top_distdir)" \ +- distdir=../$(distdir)/$$subdir \ ++ top_distdir="$$top_distdir" \ ++ distdir="$$distdir/$$subdir" \ + distdir) \ + || exit 1; \ + fi; \ +@@ -365,19 +409,46 @@ + ! -type d ! -perm -444 -exec $(SHELL) $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r $(distdir) + dist-gzip: distdir +- $(AMTAR) chof - $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz ++ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz ++ $(am__remove_distdir) ++ ++dist-bzip2: distdir ++ tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 ++ $(am__remove_distdir) ++ ++dist-tarZ: distdir ++ tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z ++ $(am__remove_distdir) ++ ++dist-shar: distdir ++ shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz ++ $(am__remove_distdir) ++ ++dist-zip: distdir ++ -rm -f $(distdir).zip ++ zip -rq $(distdir).zip $(distdir) + $(am__remove_distdir) + + dist dist-all: distdir +- $(AMTAR) chof - $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz ++ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + $(am__remove_distdir) + + # This target untars the dist file and tries a VPATH configuration. Then + # it guarantees that the distribution is self-contained by making another + # tarfile. + distcheck: dist +- $(am__remove_distdir) +- GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(AMTAR) xf - ++ case '$(DIST_ARCHIVES)' in \ ++ *.tar.gz*) \ ++ GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ ++ *.tar.bz2*) \ ++ bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ ++ *.tar.Z*) \ ++ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ ++ *.shar.gz*) \ ++ GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\ ++ *.zip*) \ ++ unzip $(distdir).zip ;;\ ++ esac + chmod -R a-w $(distdir); chmod a+w $(distdir) + mkdir $(distdir)/_build + mkdir $(distdir)/_inst +@@ -397,19 +468,20 @@ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ +- (cd ../.. && $(mkinstalldirs) "$$dc_destdir") \ ++ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ +- && $(MAKE) $(AM_MAKEFLAGS) dist-gzip \ +- && rm -f $(distdir).tar.gz \ ++ && $(MAKE) $(AM_MAKEFLAGS) dist \ ++ && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck + $(am__remove_distdir) +- @echo "$(distdir).tar.gz is ready for distribution" | \ +- sed 'h;s/./=/g;p;x;p;x' ++ @(echo "$(distdir) archives ready for distribution: "; \ ++ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ ++ sed -e '1{h;s/./=/g;p;x;}' -e '$${p;x;}' + distuninstallcheck: + @cd $(distuninstallcheck_dir) \ + && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ +@@ -433,7 +505,6 @@ + all-am: Makefile config.h + installdirs: installdirs-recursive + installdirs-am: +- + install: install-recursive + install-exec: install-exec-recursive + install-data: install-data-recursive +@@ -453,7 +524,7 @@ + clean-generic: + + distclean-generic: +- -rm -f $(CONFIG_CLEAN_FILES) ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + + maintainer-clean-generic: + @echo "This command is intended for maintainers to use" +@@ -471,6 +542,8 @@ + + dvi-am: + ++html: html-recursive ++ + info: info-recursive + + info-am: +@@ -507,22 +580,20 @@ + + uninstall-info: uninstall-info-recursive + +-.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am clean \ +- clean-generic clean-recursive ctags ctags-recursive dist \ +- dist-all dist-gzip distcheck distclean distclean-generic \ +- distclean-hdr distclean-recursive distclean-tags distcleancheck \ +- distdir distuninstallcheck dvi dvi-am dvi-recursive info \ +- info-am info-recursive install install-am install-data \ +- install-data-am install-data-recursive install-exec \ +- install-exec-am install-exec-recursive install-info \ +- install-info-am install-info-recursive install-man \ +- install-recursive install-strip installcheck installcheck-am \ +- installdirs installdirs-am installdirs-recursive \ +- maintainer-clean maintainer-clean-generic \ +- maintainer-clean-recursive mostlyclean mostlyclean-generic \ +- mostlyclean-recursive pdf pdf-am pdf-recursive ps ps-am \ +- ps-recursive tags tags-recursive uninstall uninstall-am \ +- uninstall-info-am uninstall-info-recursive uninstall-recursive ++.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am am--refresh check \ ++ check-am clean clean-generic clean-recursive ctags \ ++ ctags-recursive dist dist-all dist-bzip2 dist-gzip dist-shar \ ++ dist-tarZ dist-zip distcheck distclean distclean-generic \ ++ distclean-hdr distclean-recursive distclean-tags \ ++ distcleancheck distdir distuninstallcheck dvi dvi-am html \ ++ html-am info info-am install install-am install-data \ ++ install-data-am install-exec install-exec-am install-info \ ++ install-info-am install-man install-strip installcheck \ ++ installcheck-am installdirs installdirs-am maintainer-clean \ ++ maintainer-clean-generic maintainer-clean-recursive \ ++ mostlyclean mostlyclean-generic mostlyclean-recursive pdf \ ++ pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ ++ uninstall-info-am + + + sdist: dist +diff -ru ../release/mingw-utils-0.3/pexports/Makefile.in ./pexports/Makefile.in +--- ../release/mingw-utils-0.3/pexports/Makefile.in 2003-11-25 08:31:11.000000000 -0800 ++++ ./pexports/Makefile.in 2006-09-25 20:08:14.000000000 -0700 +@@ -1,8 +1,8 @@ +-# Makefile.in generated by automake 1.7.9 from Makefile.am. ++# Makefile.in generated by automake 1.9.6 from Makefile.am. + # @configure_input@ + +-# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +-# Free Software Foundation, Inc. ++# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, ++# 2003, 2004, 2005 Free Software Foundation, Inc. + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, + # with or without modifications, as long as this notice is preserved. +@@ -14,6 +14,7 @@ + + @SET_MAKE@ + ++ + srcdir = @srcdir@ + top_srcdir = @top_srcdir@ + VPATH = @srcdir@ +@@ -21,7 +22,6 @@ + pkglibdir = $(libdir)/@PACKAGE@ + pkgincludedir = $(includedir)/@PACKAGE@ + top_builddir = .. +- + am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd + INSTALL = @INSTALL@ + install_sh_DATA = $(install_sh) -c -m 644 +@@ -35,6 +35,46 @@ + NORMAL_UNINSTALL = : + PRE_UNINSTALL = : + POST_UNINSTALL = : ++bin_PROGRAMS = pexports$(EXEEXT) ++subdir = pexports ++DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ ++ AUTHORS COPYING ChangeLog hlex.c hparse.c ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs ++CONFIG_HEADER = $(top_builddir)/config.h ++CONFIG_CLEAN_FILES = ++am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)" ++binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++PROGRAMS = $(bin_PROGRAMS) ++am_pexports_OBJECTS = hlex.$(OBJEXT) hparse.$(OBJEXT) \ ++ pexports.$(OBJEXT) str_tree.$(OBJEXT) ++pexports_OBJECTS = $(am_pexports_OBJECTS) ++pexports_LDADD = $(LDADD) ++DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) ++depcomp = $(SHELL) $(top_srcdir)/depcomp ++am__depfiles_maybe = depfiles ++COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ ++ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) ++CCLD = $(CC) ++LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ ++LEXCOMPILE = $(LEX) $(LFLAGS) $(AM_LFLAGS) ++YACCCOMPILE = $(YACC) $(YFLAGS) $(AM_YFLAGS) ++SOURCES = $(pexports_SOURCES) ++DIST_SOURCES = $(pexports_SOURCES) ++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; ++am__vpath_adj = case $$p in \ ++ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ ++ *) f=$$p;; \ ++ esac; ++am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; ++docDATA_INSTALL = $(INSTALL_DATA) ++DATA = $(doc_DATA) ++ETAGS = etags ++CTAGS = ctags ++DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) + ACLOCAL = @ACLOCAL@ + AMDEP_FALSE = @AMDEP_FALSE@ + AMDEP_TRUE = @AMDEP_TRUE@ +@@ -43,6 +83,12 @@ + AUTOHEADER = @AUTOHEADER@ + AUTOMAKE = @AUTOMAKE@ + AWK = @AWK@ ++BUILD_DRMINGW_FALSE = @BUILD_DRMINGW_FALSE@ ++BUILD_DRMINGW_TRUE = @BUILD_DRMINGW_TRUE@ ++BUILD_REDIR_FALSE = @BUILD_REDIR_FALSE@ ++BUILD_REDIR_TRUE = @BUILD_REDIR_TRUE@ ++BUILD_RES2COFF_FALSE = @BUILD_RES2COFF_FALSE@ ++BUILD_RES2COFF_TRUE = @BUILD_RES2COFF_TRUE@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ +@@ -101,6 +147,8 @@ + am__include = @am__include@ + am__leading_dot = @am__leading_dot@ + am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ + bindir = @bindir@ + build_alias = @build_alias@ + datadir = @datadir@ +@@ -113,6 +161,7 @@ + libexecdir = @libexecdir@ + localstatedir = @localstatedir@ + mandir = @mandir@ ++mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +@@ -120,66 +169,52 @@ + sharedstatedir = @sharedstatedir@ + sysconfdir = @sysconfdir@ + target_alias = @target_alias@ +-bin_PROGRAMS = pexports +- + pexports_SOURCES = hlex.l hparse.h hparse.y pe.h pexports.c pexports.h str_tree.c str_tree.h +- + docdir = $(prefix)/doc/pexports +- + doc_DATA = AUTHORS COPYING README +-subdir = pexports +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +-CONFIG_HEADER = $(top_builddir)/config.h +-CONFIG_CLEAN_FILES = +-bin_PROGRAMS = pexports$(EXEEXT) +-PROGRAMS = $(bin_PROGRAMS) +- +-am_pexports_OBJECTS = hlex.$(OBJEXT) hparse.$(OBJEXT) pexports.$(OBJEXT) \ +- str_tree.$(OBJEXT) +-pexports_OBJECTS = $(am_pexports_OBJECTS) +-pexports_LDADD = $(LDADD) +-pexports_DEPENDENCIES = +-pexports_LDFLAGS = +- +-DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +-depcomp = $(SHELL) $(top_srcdir)/depcomp +-am__depfiles_maybe = depfiles +-@AMDEP_TRUE@DEP_FILES = ./$(DEPDIR)/hlex.Po ./$(DEPDIR)/hparse.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/pexports.Po ./$(DEPDIR)/str_tree.Po +-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ +- $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +-CCLD = $(CC) +-LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +-LEXCOMPILE = $(LEX) $(LFLAGS) $(AM_LFLAGS) +-YACCCOMPILE = $(YACC) $(YFLAGS) $(AM_YFLAGS) +-DIST_SOURCES = $(pexports_SOURCES) +-DATA = $(doc_DATA) +- +-DIST_COMMON = README $(srcdir)/Makefile.in AUTHORS COPYING ChangeLog \ +- Makefile.am hlex.c hparse.c +-SOURCES = $(pexports_SOURCES) +- + all: all-am + + .SUFFIXES: + .SUFFIXES: .c .l .o .obj .y +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu pexports/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu pexports/Makefile +-Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status +- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe) +-binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++.PRECIOUS: Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++ ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(bindir) ++ test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f"; \ +- $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f || exit 1; \ ++ echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ ++ $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +@@ -187,8 +222,8 @@ + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " rm -f $(DESTDIR)$(bindir)/$$f"; \ +- rm -f $(DESTDIR)$(bindir)/$$f; \ ++ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + + clean-binPROGRAMS: +@@ -198,7 +233,7 @@ + $(LINK) $(pexports_LDFLAGS) $(pexports_OBJECTS) $(pexports_LDADD) $(LIBS) + + mostlyclean-compile: +- -rm -f *.$(OBJEXT) core *.core ++ -rm -f *.$(OBJEXT) + + distclean-compile: + -rm -f *.tab.c +@@ -209,39 +244,32 @@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/str_tree.Po@am__quote@ + + .c.o: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$< ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c $< + + .c.obj: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi` ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + + .l.c: +- $(LEXCOMPILE) `test -f $< || echo '$(srcdir)/'`$< ++ $(LEXCOMPILE) $< + sed '/^#/ s|$(LEX_OUTPUT_ROOT)\.c|$@|' $(LEX_OUTPUT_ROOT).c >$@ + rm -f $(LEX_OUTPUT_ROOT).c + + .y.c: +- $(YACCCOMPILE) `test -f '$<' || echo '$(srcdir)/'`$< ++ $(YACCCOMPILE) $< + if test -f y.tab.h; then \ + to=`echo "$*_H" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^ABCDEFGHIJKLMNOPQRSTUVWXYZ]/_/g'`; \ +- sed "/^#/ s/Y_TAB_H/$$to/g" y.tab.h >$*.ht; \ ++ sed -e "/^#/!b" -e "s/Y_TAB_H/$$to/g" -e "s|y\.tab\.h|$*.h|" \ ++ y.tab.h >$*.ht; \ + rm -f y.tab.h; \ + if cmp -s $*.ht $*.h; then \ + rm -f $*.ht ;\ +@@ -255,33 +283,24 @@ + sed '/^#/ s|y\.tab\.c|$@|' y.tab.c >$@t && mv $@t $@ + rm -f y.tab.c + uninstall-info-am: +-docDATA_INSTALL = $(INSTALL_DATA) + install-docDATA: $(doc_DATA) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(docdir) ++ test -z "$(docdir)" || $(mkdir_p) "$(DESTDIR)$(docdir)" + @list='$(doc_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f"; \ +- $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " $(docDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(docdir)/$$f'"; \ ++ $(docDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(docdir)/$$f"; \ + done + + uninstall-docDATA: + @$(NORMAL_UNINSTALL) + @list='$(doc_DATA)'; for p in $$list; do \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " rm -f $(DESTDIR)$(docdir)/$$f"; \ +- rm -f $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " rm -f '$(DESTDIR)$(docdir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(docdir)/$$f"; \ + done + +-ETAGS = etags +-ETAGSFLAGS = +- +-CTAGS = ctags +-CTAGSFLAGS = +- +-tags: TAGS +- + ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ +@@ -290,6 +309,7 @@ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique ++tags: TAGS + + TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -301,10 +321,11 @@ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ +- test -z "$(ETAGS_ARGS)$$tags$$unique" \ +- || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- $$tags $$unique +- ++ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$tags $$unique; \ ++ fi + ctags: CTAGS + CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -327,10 +348,6 @@ + + distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +- +-top_distdir = .. +-distdir = $(top_distdir)/$(PACKAGE)-$(VERSION) + + distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ +@@ -344,7 +361,7 @@ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ +- $(mkinstalldirs) "$(distdir)$$dir"; \ ++ $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ +@@ -362,9 +379,10 @@ + check-am: all-am + check: check-am + all-am: Makefile $(PROGRAMS) $(DATA) +- + installdirs: +- $(mkinstalldirs) $(DESTDIR)$(bindir) $(DESTDIR)$(docdir) ++ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)"; do \ ++ test -z "$$dir" || $(mkdir_p) "$$dir"; \ ++ done + install: install-am + install-exec: install-exec-am + install-data: install-data-am +@@ -384,7 +402,7 @@ + clean-generic: + + distclean-generic: +- -rm -f $(CONFIG_CLEAN_FILES) ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + + maintainer-clean-generic: + @echo "This command is intended for maintainers to use" +@@ -405,6 +423,8 @@ + + dvi-am: + ++html: html-am ++ + info: info-am + + info-am: +@@ -436,19 +456,20 @@ + + ps-am: + +-uninstall-am: uninstall-binPROGRAMS uninstall-docDATA uninstall-info-am ++uninstall-am: uninstall-binPROGRAMS uninstall-docDATA \ ++ uninstall-info-am + + .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ + clean-generic ctags distclean distclean-compile \ +- distclean-generic distclean-tags distdir dvi dvi-am info \ +- info-am install install-am install-binPROGRAMS install-data \ +- install-data-am install-docDATA install-exec install-exec-am \ +- install-info install-info-am install-man install-strip \ +- installcheck installcheck-am installdirs maintainer-clean \ +- maintainer-clean-generic mostlyclean mostlyclean-compile \ +- mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ +- uninstall-am uninstall-binPROGRAMS uninstall-docDATA \ +- uninstall-info-am ++ distclean-generic distclean-tags distdir dvi dvi-am html \ ++ html-am info info-am install install-am install-binPROGRAMS \ ++ install-data install-data-am install-docDATA install-exec \ ++ install-exec-am install-info install-info-am install-man \ ++ install-strip installcheck installcheck-am installdirs \ ++ maintainer-clean maintainer-clean-generic mostlyclean \ ++ mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \ ++ tags uninstall uninstall-am uninstall-binPROGRAMS \ ++ uninstall-docDATA uninstall-info-am + + # Tell versions [3.59,3.63) of GNU make to not export all variables. + # Otherwise a system limit (for SysV at least) may be exceeded. +diff -ru ../release/mingw-utils-0.3/pexports/pexports.c ./pexports/pexports.c +--- ../release/mingw-utils-0.3/pexports/pexports.c 2003-11-25 07:23:09.000000000 -0800 ++++ ./pexports/pexports.c 2006-08-11 05:25:25.000000000 -0700 +@@ -19,7 +19,7 @@ + #endif + + /* get pointer to section header n */ +-#define IMAGE_SECTION_HDR(n) ((PIMAGE_SECTION_HEADER) ((DWORD) nt_hdr + \ ++#define IMAGE_SECTION_HDR(n) ((PIMAGE_SECTION_HEADER) ((ULONG_PTR) nt_hdr + \ + 4 + sizeof(IMAGE_FILE_HEADER) + \ + nt_hdr->FileHeader.SizeOfOptionalHeader + \ + n * sizeof(IMAGE_SECTION_HEADER))) +@@ -146,7 +146,7 @@ + return 1; + } + +- nt_hdr = (PIMAGE_NT_HEADERS) ((DWORD) dos_hdr + dos_hdr->e_lfanew); ++ nt_hdr = (PIMAGE_NT_HEADERS) ((ULONG_PTR) dos_hdr + dos_hdr->e_lfanew); + + exp_rva = nt_hdr->OptionalHeader.DataDirectory[0].VirtualAddress; + +@@ -181,7 +181,7 @@ + PIMAGE_EXPORT_DIRECTORY exports; + char *export_name; + PWORD ordinal_table; +- char **name_table; ++ DWORD *name_table; + DWORD *function_table; + int i; + static int first = 1; +@@ -203,7 +203,7 @@ + /* set up various pointers */ + export_name = RVA_TO_PTR(exports->Name,char*); + ordinal_table = RVA_TO_PTR(exports->AddressOfNameOrdinals,PWORD); +- name_table = RVA_TO_PTR(exports->AddressOfNames,char**); ++ name_table = RVA_TO_PTR(exports->AddressOfNames,DWORD*); + function_table = RVA_TO_PTR(exports->AddressOfFunctions,void*); + + if (verbose) +@@ -297,14 +297,14 @@ + } + + /* convert rva to pointer into loaded file */ +-DWORD ++ULONG_PTR + rva_to_ptr(DWORD rva) + { + PIMAGE_SECTION_HEADER section = find_section(rva); + if (section->PointerToRawData == 0) + return 0; + else +- return ((DWORD) dos_hdr + (DWORD) rva - (section->VirtualAddress - section->PointerToRawData)); ++ return ((ULONG_PTR) dos_hdr + (DWORD) rva - (section->VirtualAddress - section->PointerToRawData)); + } + + /* Load a portable executable into memory */ +diff -ru ../release/mingw-utils-0.3/pexports/pexports.h ./pexports/pexports.h +--- ../release/mingw-utils-0.3/pexports/pexports.h 2002-05-26 03:13:58.000000000 -0700 ++++ ./pexports/pexports.h 2006-08-11 05:25:25.000000000 -0700 +@@ -21,12 +21,26 @@ + #define VER_MINOR 43 + + /* These are needed */ +-typedef unsigned short WORD; +-typedef unsigned int DWORD; +-typedef unsigned char BYTE; +-typedef long LONG; +-typedef WORD *PWORD; +-typedef DWORD *PDWORD; ++typedef unsigned short WORD, *PWORD; ++typedef unsigned char BYTE, *PBYTE; ++ ++#if SIZEOF_LONG == 4 ++typedef unsigned long DWORD, *PDWORD; ++typedef long LONG, *PLONG; ++#else ++typedef unsigned int DWORD, *PDWORD; ++typedef int LONG, *PLONG; ++#endif ++ ++#if SIZEOF_LONG == SIZEOF_VOID_P ++typedef unsigned long ULONG_PTR; ++#elif SIZEOF_LONG_LONG == SIZEOF_VOID_P ++typedef unsigned long long ULONG_PTR; ++#elif SIZEOF__INT64 == SIZEOF_VOID_P ++typedef unsigned _int64 ULONG_PTR; ++#else ++typedef unsigned int ULONG_PTR; ++#endif + + /* PE structures */ + typedef struct _IMAGE_DATA_DIRECTORY { +@@ -111,9 +125,9 @@ + DWORD Base; + DWORD NumberOfFunctions; + DWORD NumberOfNames; +- PDWORD *AddressOfFunctions; +- PDWORD *AddressOfNames; +- PWORD *AddressOfNameOrdinals; ++ DWORD AddressOfFunctions; ++ DWORD AddressOfNames; ++ DWORD AddressOfNameOrdinals; + } IMAGE_EXPORT_DIRECTORY, *PIMAGE_EXPORT_DIRECTORY; + + typedef struct _IMAGE_DOS_HEADER { +@@ -144,7 +158,7 @@ + PIMAGE_DOS_HEADER + load_pe_image(const char *filename); + +-DWORD ++ULONG_PTR + rva_to_ptr(DWORD rva); + + void +diff -ru ../release/mingw-utils-0.3/redir/Makefile.in ./redir/Makefile.in +--- ../release/mingw-utils-0.3/redir/Makefile.in 2003-11-25 08:31:12.000000000 -0800 ++++ ./redir/Makefile.in 2006-09-25 20:08:14.000000000 -0700 +@@ -1,8 +1,8 @@ +-# Makefile.in generated by automake 1.7.9 from Makefile.am. ++# Makefile.in generated by automake 1.9.6 from Makefile.am. + # @configure_input@ + +-# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +-# Free Software Foundation, Inc. ++# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, ++# 2003, 2004, 2005 Free Software Foundation, Inc. + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, + # with or without modifications, as long as this notice is preserved. +@@ -14,6 +14,7 @@ + + @SET_MAKE@ + ++ + srcdir = @srcdir@ + top_srcdir = @top_srcdir@ + VPATH = @srcdir@ +@@ -21,7 +22,6 @@ + pkglibdir = $(libdir)/@PACKAGE@ + pkgincludedir = $(includedir)/@PACKAGE@ + top_builddir = .. +- + am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd + INSTALL = @INSTALL@ + install_sh_DATA = $(install_sh) -c -m 644 +@@ -35,6 +35,42 @@ + NORMAL_UNINSTALL = : + PRE_UNINSTALL = : + POST_UNINSTALL = : ++bin_PROGRAMS = redir$(EXEEXT) ++subdir = redir ++DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs ++CONFIG_HEADER = $(top_builddir)/config.h ++CONFIG_CLEAN_FILES = ++am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)" ++binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++PROGRAMS = $(bin_PROGRAMS) ++am_redir_OBJECTS = redir.$(OBJEXT) ++redir_OBJECTS = $(am_redir_OBJECTS) ++redir_LDADD = $(LDADD) ++DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) ++depcomp = $(SHELL) $(top_srcdir)/depcomp ++am__depfiles_maybe = depfiles ++COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ ++ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) ++CCLD = $(CC) ++LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ ++SOURCES = $(redir_SOURCES) ++DIST_SOURCES = $(redir_SOURCES) ++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; ++am__vpath_adj = case $$p in \ ++ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ ++ *) f=$$p;; \ ++ esac; ++am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; ++docDATA_INSTALL = $(INSTALL_DATA) ++DATA = $(doc_DATA) ++ETAGS = etags ++CTAGS = ctags ++DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) + ACLOCAL = @ACLOCAL@ + AMDEP_FALSE = @AMDEP_FALSE@ + AMDEP_TRUE = @AMDEP_TRUE@ +@@ -43,6 +79,12 @@ + AUTOHEADER = @AUTOHEADER@ + AUTOMAKE = @AUTOMAKE@ + AWK = @AWK@ ++BUILD_DRMINGW_FALSE = @BUILD_DRMINGW_FALSE@ ++BUILD_DRMINGW_TRUE = @BUILD_DRMINGW_TRUE@ ++BUILD_REDIR_FALSE = @BUILD_REDIR_FALSE@ ++BUILD_REDIR_TRUE = @BUILD_REDIR_TRUE@ ++BUILD_RES2COFF_FALSE = @BUILD_RES2COFF_FALSE@ ++BUILD_RES2COFF_TRUE = @BUILD_RES2COFF_TRUE@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ +@@ -101,6 +143,8 @@ + am__include = @am__include@ + am__leading_dot = @am__leading_dot@ + am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ + bindir = @bindir@ + build_alias = @build_alias@ + datadir = @datadir@ +@@ -113,6 +157,7 @@ + libexecdir = @libexecdir@ + localstatedir = @localstatedir@ + mandir = @mandir@ ++mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +@@ -120,61 +165,52 @@ + sharedstatedir = @sharedstatedir@ + sysconfdir = @sysconfdir@ + target_alias = @target_alias@ +-bin_PROGRAMS = redir +- + redir_SOURCES = redir.c +- + docdir = $(prefix)/doc/redir +- + doc_DATA = README +-subdir = redir +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +-CONFIG_HEADER = $(top_builddir)/config.h +-CONFIG_CLEAN_FILES = +-bin_PROGRAMS = redir$(EXEEXT) +-PROGRAMS = $(bin_PROGRAMS) +- +-am_redir_OBJECTS = redir.$(OBJEXT) +-redir_OBJECTS = $(am_redir_OBJECTS) +-redir_LDADD = $(LDADD) +-redir_DEPENDENCIES = +-redir_LDFLAGS = +- +-DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +-depcomp = $(SHELL) $(top_srcdir)/depcomp +-am__depfiles_maybe = depfiles +-@AMDEP_TRUE@DEP_FILES = ./$(DEPDIR)/redir.Po +-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ +- $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +-CCLD = $(CC) +-LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +-DIST_SOURCES = $(redir_SOURCES) +-DATA = $(doc_DATA) +- +-DIST_COMMON = README $(srcdir)/Makefile.in Makefile.am +-SOURCES = $(redir_SOURCES) +- + all: all-am + + .SUFFIXES: + .SUFFIXES: .c .o .obj +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu redir/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu redir/Makefile +-Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status +- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe) +-binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++.PRECIOUS: Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++ ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(bindir) ++ test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f"; \ +- $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f || exit 1; \ ++ echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ ++ $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +@@ -182,8 +218,8 @@ + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " rm -f $(DESTDIR)$(bindir)/$$f"; \ +- rm -f $(DESTDIR)$(bindir)/$$f; \ ++ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + + clean-binPROGRAMS: +@@ -193,7 +229,7 @@ + $(LINK) $(redir_LDFLAGS) $(redir_OBJECTS) $(redir_LDADD) $(LIBS) + + mostlyclean-compile: +- -rm -f *.$(OBJEXT) core *.core ++ -rm -f *.$(OBJEXT) + + distclean-compile: + -rm -f *.tab.c +@@ -201,54 +237,37 @@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/redir.Po@am__quote@ + + .c.o: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$< ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c $< + + .c.obj: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi` ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + uninstall-info-am: +-docDATA_INSTALL = $(INSTALL_DATA) + install-docDATA: $(doc_DATA) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(docdir) ++ test -z "$(docdir)" || $(mkdir_p) "$(DESTDIR)$(docdir)" + @list='$(doc_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f"; \ +- $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " $(docDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(docdir)/$$f'"; \ ++ $(docDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(docdir)/$$f"; \ + done + + uninstall-docDATA: + @$(NORMAL_UNINSTALL) + @list='$(doc_DATA)'; for p in $$list; do \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " rm -f $(DESTDIR)$(docdir)/$$f"; \ +- rm -f $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " rm -f '$(DESTDIR)$(docdir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(docdir)/$$f"; \ + done + +-ETAGS = etags +-ETAGSFLAGS = +- +-CTAGS = ctags +-CTAGSFLAGS = +- +-tags: TAGS +- + ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ +@@ -257,6 +276,7 @@ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique ++tags: TAGS + + TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -268,10 +288,11 @@ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ +- test -z "$(ETAGS_ARGS)$$tags$$unique" \ +- || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- $$tags $$unique +- ++ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$tags $$unique; \ ++ fi + ctags: CTAGS + CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -294,10 +315,6 @@ + + distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +- +-top_distdir = .. +-distdir = $(top_distdir)/$(PACKAGE)-$(VERSION) + + distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ +@@ -311,7 +328,7 @@ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ +- $(mkinstalldirs) "$(distdir)$$dir"; \ ++ $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ +@@ -329,9 +346,10 @@ + check-am: all-am + check: check-am + all-am: Makefile $(PROGRAMS) $(DATA) +- + installdirs: +- $(mkinstalldirs) $(DESTDIR)$(bindir) $(DESTDIR)$(docdir) ++ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)"; do \ ++ test -z "$$dir" || $(mkdir_p) "$$dir"; \ ++ done + install: install-am + install-exec: install-exec-am + install-data: install-data-am +@@ -351,7 +369,7 @@ + clean-generic: + + distclean-generic: +- -rm -f $(CONFIG_CLEAN_FILES) ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + + maintainer-clean-generic: + @echo "This command is intended for maintainers to use" +@@ -370,6 +388,8 @@ + + dvi-am: + ++html: html-am ++ + info: info-am + + info-am: +@@ -401,19 +421,20 @@ + + ps-am: + +-uninstall-am: uninstall-binPROGRAMS uninstall-docDATA uninstall-info-am ++uninstall-am: uninstall-binPROGRAMS uninstall-docDATA \ ++ uninstall-info-am + + .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ + clean-generic ctags distclean distclean-compile \ +- distclean-generic distclean-tags distdir dvi dvi-am info \ +- info-am install install-am install-binPROGRAMS install-data \ +- install-data-am install-docDATA install-exec install-exec-am \ +- install-info install-info-am install-man install-strip \ +- installcheck installcheck-am installdirs maintainer-clean \ +- maintainer-clean-generic mostlyclean mostlyclean-compile \ +- mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ +- uninstall-am uninstall-binPROGRAMS uninstall-docDATA \ +- uninstall-info-am ++ distclean-generic distclean-tags distdir dvi dvi-am html \ ++ html-am info info-am install install-am install-binPROGRAMS \ ++ install-data install-data-am install-docDATA install-exec \ ++ install-exec-am install-info install-info-am install-man \ ++ install-strip installcheck installcheck-am installdirs \ ++ maintainer-clean maintainer-clean-generic mostlyclean \ ++ mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \ ++ tags uninstall uninstall-am uninstall-binPROGRAMS \ ++ uninstall-docDATA uninstall-info-am + + # Tell versions [3.59,3.63) of GNU make to not export all variables. + # Otherwise a system limit (for SysV at least) may be exceeded. +diff -ru ../release/mingw-utils-0.3/reimp/ar.c ./reimp/ar.c +--- ../release/mingw-utils-0.3/reimp/ar.c 2002-04-10 06:39:07.000000000 -0700 ++++ ./reimp/ar.c 2006-08-11 05:25:25.000000000 -0700 +@@ -21,6 +21,7 @@ + if (fread (long_names, size, 1, f) != 1) + error (0, "unexpected end-of-file\n"); + } ++ return 1; + } + else + return 0; +diff -ru ../release/mingw-utils-0.3/reimp/Makefile.in ./reimp/Makefile.in +--- ../release/mingw-utils-0.3/reimp/Makefile.in 2003-11-25 08:31:12.000000000 -0800 ++++ ./reimp/Makefile.in 2006-09-25 20:08:14.000000000 -0700 +@@ -1,8 +1,8 @@ +-# Makefile.in generated by automake 1.7.9 from Makefile.am. ++# Makefile.in generated by automake 1.9.6 from Makefile.am. + # @configure_input@ + +-# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +-# Free Software Foundation, Inc. ++# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, ++# 2003, 2004, 2005 Free Software Foundation, Inc. + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, + # with or without modifications, as long as this notice is preserved. +@@ -14,6 +14,7 @@ + + @SET_MAKE@ + ++ + srcdir = @srcdir@ + top_srcdir = @top_srcdir@ + VPATH = @srcdir@ +@@ -21,7 +22,6 @@ + pkglibdir = $(libdir)/@PACKAGE@ + pkgincludedir = $(includedir)/@PACKAGE@ + top_builddir = .. +- + am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd + INSTALL = @INSTALL@ + install_sh_DATA = $(install_sh) -c -m 644 +@@ -35,6 +35,42 @@ + NORMAL_UNINSTALL = : + PRE_UNINSTALL = : + POST_UNINSTALL = : ++bin_PROGRAMS = reimp$(EXEEXT) ++subdir = reimp ++DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs ++CONFIG_HEADER = $(top_builddir)/config.h ++CONFIG_CLEAN_FILES = ++am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)" ++binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++PROGRAMS = $(bin_PROGRAMS) ++am_reimp_OBJECTS = ar.$(OBJEXT) reimp.$(OBJEXT) util.$(OBJEXT) ++reimp_OBJECTS = $(am_reimp_OBJECTS) ++reimp_LDADD = $(LDADD) ++DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) ++depcomp = $(SHELL) $(top_srcdir)/depcomp ++am__depfiles_maybe = depfiles ++COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ ++ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) ++CCLD = $(CC) ++LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ ++SOURCES = $(reimp_SOURCES) ++DIST_SOURCES = $(reimp_SOURCES) ++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; ++am__vpath_adj = case $$p in \ ++ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ ++ *) f=$$p;; \ ++ esac; ++am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; ++docDATA_INSTALL = $(INSTALL_DATA) ++DATA = $(doc_DATA) ++ETAGS = etags ++CTAGS = ctags ++DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) + ACLOCAL = @ACLOCAL@ + AMDEP_FALSE = @AMDEP_FALSE@ + AMDEP_TRUE = @AMDEP_TRUE@ +@@ -43,6 +79,12 @@ + AUTOHEADER = @AUTOHEADER@ + AUTOMAKE = @AUTOMAKE@ + AWK = @AWK@ ++BUILD_DRMINGW_FALSE = @BUILD_DRMINGW_FALSE@ ++BUILD_DRMINGW_TRUE = @BUILD_DRMINGW_TRUE@ ++BUILD_REDIR_FALSE = @BUILD_REDIR_FALSE@ ++BUILD_REDIR_TRUE = @BUILD_REDIR_TRUE@ ++BUILD_RES2COFF_FALSE = @BUILD_RES2COFF_FALSE@ ++BUILD_RES2COFF_TRUE = @BUILD_RES2COFF_TRUE@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ +@@ -101,6 +143,8 @@ + am__include = @am__include@ + am__leading_dot = @am__leading_dot@ + am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ + bindir = @bindir@ + build_alias = @build_alias@ + datadir = @datadir@ +@@ -113,6 +157,7 @@ + libexecdir = @libexecdir@ + localstatedir = @localstatedir@ + mandir = @mandir@ ++mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +@@ -120,62 +165,52 @@ + sharedstatedir = @sharedstatedir@ + sysconfdir = @sysconfdir@ + target_alias = @target_alias@ +-bin_PROGRAMS = reimp +- + reimp_SOURCES = ar.c reimp.c reimp.h util.c +- + docdir = $(prefix)/doc/reimp +- + doc_DATA = README +-subdir = reimp +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +-CONFIG_HEADER = $(top_builddir)/config.h +-CONFIG_CLEAN_FILES = +-bin_PROGRAMS = reimp$(EXEEXT) +-PROGRAMS = $(bin_PROGRAMS) +- +-am_reimp_OBJECTS = ar.$(OBJEXT) reimp.$(OBJEXT) util.$(OBJEXT) +-reimp_OBJECTS = $(am_reimp_OBJECTS) +-reimp_LDADD = $(LDADD) +-reimp_DEPENDENCIES = +-reimp_LDFLAGS = +- +-DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +-depcomp = $(SHELL) $(top_srcdir)/depcomp +-am__depfiles_maybe = depfiles +-@AMDEP_TRUE@DEP_FILES = ./$(DEPDIR)/ar.Po ./$(DEPDIR)/reimp.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/util.Po +-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ +- $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +-CCLD = $(CC) +-LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +-DIST_SOURCES = $(reimp_SOURCES) +-DATA = $(doc_DATA) +- +-DIST_COMMON = README $(srcdir)/Makefile.in Makefile.am +-SOURCES = $(reimp_SOURCES) +- + all: all-am + + .SUFFIXES: + .SUFFIXES: .c .o .obj +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu reimp/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu reimp/Makefile +-Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status +- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe) +-binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++.PRECIOUS: Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++ ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(bindir) ++ test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f"; \ +- $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f || exit 1; \ ++ echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ ++ $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +@@ -183,8 +218,8 @@ + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " rm -f $(DESTDIR)$(bindir)/$$f"; \ +- rm -f $(DESTDIR)$(bindir)/$$f; \ ++ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + + clean-binPROGRAMS: +@@ -194,7 +229,7 @@ + $(LINK) $(reimp_LDFLAGS) $(reimp_OBJECTS) $(reimp_LDADD) $(LIBS) + + mostlyclean-compile: +- -rm -f *.$(OBJEXT) core *.core ++ -rm -f *.$(OBJEXT) + + distclean-compile: + -rm -f *.tab.c +@@ -204,54 +239,37 @@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/util.Po@am__quote@ + + .c.o: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$< ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c $< + + .c.obj: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi` ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + uninstall-info-am: +-docDATA_INSTALL = $(INSTALL_DATA) + install-docDATA: $(doc_DATA) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(docdir) ++ test -z "$(docdir)" || $(mkdir_p) "$(DESTDIR)$(docdir)" + @list='$(doc_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f"; \ +- $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " $(docDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(docdir)/$$f'"; \ ++ $(docDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(docdir)/$$f"; \ + done + + uninstall-docDATA: + @$(NORMAL_UNINSTALL) + @list='$(doc_DATA)'; for p in $$list; do \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " rm -f $(DESTDIR)$(docdir)/$$f"; \ +- rm -f $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " rm -f '$(DESTDIR)$(docdir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(docdir)/$$f"; \ + done + +-ETAGS = etags +-ETAGSFLAGS = +- +-CTAGS = ctags +-CTAGSFLAGS = +- +-tags: TAGS +- + ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ +@@ -260,6 +278,7 @@ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique ++tags: TAGS + + TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -271,10 +290,11 @@ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ +- test -z "$(ETAGS_ARGS)$$tags$$unique" \ +- || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- $$tags $$unique +- ++ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$tags $$unique; \ ++ fi + ctags: CTAGS + CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -297,10 +317,6 @@ + + distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +- +-top_distdir = .. +-distdir = $(top_distdir)/$(PACKAGE)-$(VERSION) + + distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ +@@ -314,7 +330,7 @@ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ +- $(mkinstalldirs) "$(distdir)$$dir"; \ ++ $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ +@@ -332,9 +348,10 @@ + check-am: all-am + check: check-am + all-am: Makefile $(PROGRAMS) $(DATA) +- + installdirs: +- $(mkinstalldirs) $(DESTDIR)$(bindir) $(DESTDIR)$(docdir) ++ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)"; do \ ++ test -z "$$dir" || $(mkdir_p) "$$dir"; \ ++ done + install: install-am + install-exec: install-exec-am + install-data: install-data-am +@@ -354,7 +371,7 @@ + clean-generic: + + distclean-generic: +- -rm -f $(CONFIG_CLEAN_FILES) ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + + maintainer-clean-generic: + @echo "This command is intended for maintainers to use" +@@ -373,6 +390,8 @@ + + dvi-am: + ++html: html-am ++ + info: info-am + + info-am: +@@ -404,19 +423,20 @@ + + ps-am: + +-uninstall-am: uninstall-binPROGRAMS uninstall-docDATA uninstall-info-am ++uninstall-am: uninstall-binPROGRAMS uninstall-docDATA \ ++ uninstall-info-am + + .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ + clean-generic ctags distclean distclean-compile \ +- distclean-generic distclean-tags distdir dvi dvi-am info \ +- info-am install install-am install-binPROGRAMS install-data \ +- install-data-am install-docDATA install-exec install-exec-am \ +- install-info install-info-am install-man install-strip \ +- installcheck installcheck-am installdirs maintainer-clean \ +- maintainer-clean-generic mostlyclean mostlyclean-compile \ +- mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ +- uninstall-am uninstall-binPROGRAMS uninstall-docDATA \ +- uninstall-info-am ++ distclean-generic distclean-tags distdir dvi dvi-am html \ ++ html-am info info-am install install-am install-binPROGRAMS \ ++ install-data install-data-am install-docDATA install-exec \ ++ install-exec-am install-info install-info-am install-man \ ++ install-strip installcheck installcheck-am installdirs \ ++ maintainer-clean maintainer-clean-generic mostlyclean \ ++ mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \ ++ tags uninstall uninstall-am uninstall-binPROGRAMS \ ++ uninstall-docDATA uninstall-info-am + + # Tell versions [3.59,3.63) of GNU make to not export all variables. + # Otherwise a system limit (for SysV at least) may be exceeded. +diff -ru ../release/mingw-utils-0.3/reimp/reimp.h ./reimp/reimp.h +--- ../release/mingw-utils-0.3/reimp/reimp.h 2002-04-10 06:39:07.000000000 -0700 ++++ ./reimp/reimp.h 2006-08-11 05:25:25.000000000 -0700 +@@ -3,7 +3,7 @@ + + /* we need integers of specific sizes */ + #ifndef uint32 +-#define uint32 unsigned long ++#define uint32 unsigned int + #endif + + #ifndef uint16 +diff -ru ../release/mingw-utils-0.3/res2coff/Makefile.in ./res2coff/Makefile.in +--- ../release/mingw-utils-0.3/res2coff/Makefile.in 2003-11-25 08:31:12.000000000 -0800 ++++ ./res2coff/Makefile.in 2006-09-25 20:08:14.000000000 -0700 +@@ -1,8 +1,8 @@ +-# Makefile.in generated by automake 1.7.9 from Makefile.am. ++# Makefile.in generated by automake 1.9.6 from Makefile.am. + # @configure_input@ + +-# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +-# Free Software Foundation, Inc. ++# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, ++# 2003, 2004, 2005 Free Software Foundation, Inc. + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, + # with or without modifications, as long as this notice is preserved. +@@ -14,6 +14,7 @@ + + @SET_MAKE@ + ++ + srcdir = @srcdir@ + top_srcdir = @top_srcdir@ + VPATH = @srcdir@ +@@ -21,7 +22,6 @@ + pkglibdir = $(libdir)/@PACKAGE@ + pkgincludedir = $(includedir)/@PACKAGE@ + top_builddir = .. +- + am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd + INSTALL = @INSTALL@ + install_sh_DATA = $(install_sh) -c -m 644 +@@ -35,6 +35,43 @@ + NORMAL_UNINSTALL = : + PRE_UNINSTALL = : + POST_UNINSTALL = : ++bin_PROGRAMS = res2coff$(EXEEXT) ++subdir = res2coff ++DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs ++CONFIG_HEADER = $(top_builddir)/config.h ++CONFIG_CLEAN_FILES = ++am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)" ++binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++PROGRAMS = $(bin_PROGRAMS) ++am_res2coff_OBJECTS = objimage.$(OBJEXT) res2coff.$(OBJEXT) \ ++ resimage.$(OBJEXT) ++res2coff_OBJECTS = $(am_res2coff_OBJECTS) ++res2coff_LDADD = $(LDADD) ++DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) ++depcomp = $(SHELL) $(top_srcdir)/depcomp ++am__depfiles_maybe = depfiles ++COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ ++ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) ++CCLD = $(CC) ++LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ ++SOURCES = $(res2coff_SOURCES) ++DIST_SOURCES = $(res2coff_SOURCES) ++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; ++am__vpath_adj = case $$p in \ ++ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ ++ *) f=$$p;; \ ++ esac; ++am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; ++docDATA_INSTALL = $(INSTALL_DATA) ++DATA = $(doc_DATA) ++ETAGS = etags ++CTAGS = ctags ++DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) + ACLOCAL = @ACLOCAL@ + AMDEP_FALSE = @AMDEP_FALSE@ + AMDEP_TRUE = @AMDEP_TRUE@ +@@ -43,6 +80,12 @@ + AUTOHEADER = @AUTOHEADER@ + AUTOMAKE = @AUTOMAKE@ + AWK = @AWK@ ++BUILD_DRMINGW_FALSE = @BUILD_DRMINGW_FALSE@ ++BUILD_DRMINGW_TRUE = @BUILD_DRMINGW_TRUE@ ++BUILD_REDIR_FALSE = @BUILD_REDIR_FALSE@ ++BUILD_REDIR_TRUE = @BUILD_REDIR_TRUE@ ++BUILD_RES2COFF_FALSE = @BUILD_RES2COFF_FALSE@ ++BUILD_RES2COFF_TRUE = @BUILD_RES2COFF_TRUE@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ +@@ -101,6 +144,8 @@ + am__include = @am__include@ + am__leading_dot = @am__leading_dot@ + am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ + bindir = @bindir@ + build_alias = @build_alias@ + datadir = @datadir@ +@@ -113,6 +158,7 @@ + libexecdir = @libexecdir@ + localstatedir = @localstatedir@ + mandir = @mandir@ ++mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +@@ -120,63 +166,52 @@ + sharedstatedir = @sharedstatedir@ + sysconfdir = @sysconfdir@ + target_alias = @target_alias@ +-bin_PROGRAMS = res2coff +- + res2coff_SOURCES = objimage.c protos.h res2coff.c res2coff.h resimage.c +- + docdir = $(prefix)/doc/res2coff +- + doc_DATA = README +-subdir = res2coff +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +-CONFIG_HEADER = $(top_builddir)/config.h +-CONFIG_CLEAN_FILES = +-bin_PROGRAMS = res2coff$(EXEEXT) +-PROGRAMS = $(bin_PROGRAMS) +- +-am_res2coff_OBJECTS = objimage.$(OBJEXT) res2coff.$(OBJEXT) \ +- resimage.$(OBJEXT) +-res2coff_OBJECTS = $(am_res2coff_OBJECTS) +-res2coff_LDADD = $(LDADD) +-res2coff_DEPENDENCIES = +-res2coff_LDFLAGS = +- +-DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +-depcomp = $(SHELL) $(top_srcdir)/depcomp +-am__depfiles_maybe = depfiles +-@AMDEP_TRUE@DEP_FILES = ./$(DEPDIR)/objimage.Po ./$(DEPDIR)/res2coff.Po \ +-@AMDEP_TRUE@ ./$(DEPDIR)/resimage.Po +-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ +- $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +-CCLD = $(CC) +-LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +-DIST_SOURCES = $(res2coff_SOURCES) +-DATA = $(doc_DATA) +- +-DIST_COMMON = README $(srcdir)/Makefile.in Makefile.am +-SOURCES = $(res2coff_SOURCES) +- + all: all-am + + .SUFFIXES: + .SUFFIXES: .c .o .obj +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu res2coff/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu res2coff/Makefile +-Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status +- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe) +-binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++.PRECIOUS: Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++ ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(bindir) ++ test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f"; \ +- $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f || exit 1; \ ++ echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ ++ $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +@@ -184,8 +219,8 @@ + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " rm -f $(DESTDIR)$(bindir)/$$f"; \ +- rm -f $(DESTDIR)$(bindir)/$$f; \ ++ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + + clean-binPROGRAMS: +@@ -195,7 +230,7 @@ + $(LINK) $(res2coff_LDFLAGS) $(res2coff_OBJECTS) $(res2coff_LDADD) $(LIBS) + + mostlyclean-compile: +- -rm -f *.$(OBJEXT) core *.core ++ -rm -f *.$(OBJEXT) + + distclean-compile: + -rm -f *.tab.c +@@ -205,54 +240,37 @@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/resimage.Po@am__quote@ + + .c.o: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$< ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c $< + + .c.obj: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi` ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + uninstall-info-am: +-docDATA_INSTALL = $(INSTALL_DATA) + install-docDATA: $(doc_DATA) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(docdir) ++ test -z "$(docdir)" || $(mkdir_p) "$(DESTDIR)$(docdir)" + @list='$(doc_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f"; \ +- $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " $(docDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(docdir)/$$f'"; \ ++ $(docDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(docdir)/$$f"; \ + done + + uninstall-docDATA: + @$(NORMAL_UNINSTALL) + @list='$(doc_DATA)'; for p in $$list; do \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " rm -f $(DESTDIR)$(docdir)/$$f"; \ +- rm -f $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " rm -f '$(DESTDIR)$(docdir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(docdir)/$$f"; \ + done + +-ETAGS = etags +-ETAGSFLAGS = +- +-CTAGS = ctags +-CTAGSFLAGS = +- +-tags: TAGS +- + ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ +@@ -261,6 +279,7 @@ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique ++tags: TAGS + + TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -272,10 +291,11 @@ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ +- test -z "$(ETAGS_ARGS)$$tags$$unique" \ +- || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- $$tags $$unique +- ++ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$tags $$unique; \ ++ fi + ctags: CTAGS + CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -298,10 +318,6 @@ + + distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +- +-top_distdir = .. +-distdir = $(top_distdir)/$(PACKAGE)-$(VERSION) + + distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ +@@ -315,7 +331,7 @@ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ +- $(mkinstalldirs) "$(distdir)$$dir"; \ ++ $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ +@@ -333,9 +349,10 @@ + check-am: all-am + check: check-am + all-am: Makefile $(PROGRAMS) $(DATA) +- + installdirs: +- $(mkinstalldirs) $(DESTDIR)$(bindir) $(DESTDIR)$(docdir) ++ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)"; do \ ++ test -z "$$dir" || $(mkdir_p) "$$dir"; \ ++ done + install: install-am + install-exec: install-exec-am + install-data: install-data-am +@@ -355,7 +372,7 @@ + clean-generic: + + distclean-generic: +- -rm -f $(CONFIG_CLEAN_FILES) ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + + maintainer-clean-generic: + @echo "This command is intended for maintainers to use" +@@ -374,6 +391,8 @@ + + dvi-am: + ++html: html-am ++ + info: info-am + + info-am: +@@ -405,19 +424,20 @@ + + ps-am: + +-uninstall-am: uninstall-binPROGRAMS uninstall-docDATA uninstall-info-am ++uninstall-am: uninstall-binPROGRAMS uninstall-docDATA \ ++ uninstall-info-am + + .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ + clean-generic ctags distclean distclean-compile \ +- distclean-generic distclean-tags distdir dvi dvi-am info \ +- info-am install install-am install-binPROGRAMS install-data \ +- install-data-am install-docDATA install-exec install-exec-am \ +- install-info install-info-am install-man install-strip \ +- installcheck installcheck-am installdirs maintainer-clean \ +- maintainer-clean-generic mostlyclean mostlyclean-compile \ +- mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ +- uninstall-am uninstall-binPROGRAMS uninstall-docDATA \ +- uninstall-info-am ++ distclean-generic distclean-tags distdir dvi dvi-am html \ ++ html-am info info-am install install-am install-binPROGRAMS \ ++ install-data install-data-am install-docDATA install-exec \ ++ install-exec-am install-info install-info-am install-man \ ++ install-strip installcheck installcheck-am installdirs \ ++ maintainer-clean maintainer-clean-generic mostlyclean \ ++ mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \ ++ tags uninstall uninstall-am uninstall-binPROGRAMS \ ++ uninstall-docDATA uninstall-info-am + + # Tell versions [3.59,3.63) of GNU make to not export all variables. + # Otherwise a system limit (for SysV at least) may be exceeded. +diff -ru ../release/mingw-utils-0.3/scripts/a2dll.in ./scripts/a2dll.in +--- ../release/mingw-utils-0.3/scripts/a2dll.in 2002-05-26 03:13:58.000000000 -0700 ++++ ./scripts/a2dll.in 2006-08-11 05:25:25.000000000 -0700 +@@ -12,6 +12,13 @@ + exit 0 + } + ++# Figure out where the script is located and then use that path as the location ++# for the tools ++ ++cwd=`pwd` ++cd `dirname $0` ++SCRIPTDIR=`pwd` ++cd $cwd + + cmdline=$@ + +@@ -43,14 +50,14 @@ + rm -f .dll/* + /usr/bin/mkdir -p .dll + cd .dll +- ar x ../$in ++ ${SCRIPTDIR}/ar x ../$in + else + cd .dll + fi + + echo Creating shared library \'$out\' + +-dllwrap --export-all -o ../$out `ls` $libs >../ld.err 2>&1 ++${SCRIPTDIR}/dllwrap --export-all -o ../$out `ls` $libs >../ld.err 2>&1 + + cd .. + if [ `wc ld.err|awk ' {print $1}' ` -gt 2 ] +@@ -72,17 +79,17 @@ + # 2. I just saw that dlltool lies about assembly-sourced files, it + # lists their symbols as data + +- pexports $out >$base.def ++ ${SCRIPTDIR}/pexports $out >$base.def + + # create import library + + mv $in $in.static +- dlltool --dllname $out --def $base.def --output-lib $in ++ ${SCRIPTDIR}/dlltool --dllname $out --def $base.def --output-lib $in + + # finally, we check whether dll exports data symbols + # if yes, we suggest user on steps to perform + +- pexports $out | awk '/DATA/ { print $1}' >$out.data ++ ${SCRIPTDIR}/pexports $out | awk '/DATA/ { print $1}' >$out.data + if test -s $out.data + then + echo +diff -ru ../release/mingw-utils-0.3/scripts/Makefile.in ./scripts/Makefile.in +--- ../release/mingw-utils-0.3/scripts/Makefile.in 2003-11-25 08:31:12.000000000 -0800 ++++ ./scripts/Makefile.in 2006-09-25 20:08:14.000000000 -0700 +@@ -1,8 +1,8 @@ +-# Makefile.in generated by automake 1.7.9 from Makefile.am. ++# Makefile.in generated by automake 1.9.6 from Makefile.am. + # @configure_input@ + +-# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +-# Free Software Foundation, Inc. ++# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, ++# 2003, 2004, 2005 Free Software Foundation, Inc. + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, + # with or without modifications, as long as this notice is preserved. +@@ -21,7 +21,6 @@ + pkglibdir = $(libdir)/@PACKAGE@ + pkgincludedir = $(includedir)/@PACKAGE@ + top_builddir = .. +- + am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd + INSTALL = @INSTALL@ + install_sh_DATA = $(install_sh) -c -m 644 +@@ -35,6 +34,22 @@ + NORMAL_UNINSTALL = : + PRE_UNINSTALL = : + POST_UNINSTALL = : ++subdir = scripts ++DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ ++ $(srcdir)/a2dll.in $(srcdir)/dsw2mak.in ChangeLog ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs ++CONFIG_HEADER = $(top_builddir)/config.h ++CONFIG_CLEAN_FILES = a2dll dsw2mak ++am__installdirs = "$(DESTDIR)$(bindir)" ++binSCRIPT_INSTALL = $(INSTALL_SCRIPT) ++SCRIPTS = $(bin_SCRIPTS) ++SOURCES = ++DIST_SOURCES = ++DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) + ACLOCAL = @ACLOCAL@ + AMDEP_FALSE = @AMDEP_FALSE@ + AMDEP_TRUE = @AMDEP_TRUE@ +@@ -43,6 +58,12 @@ + AUTOHEADER = @AUTOHEADER@ + AUTOMAKE = @AUTOMAKE@ + AWK = @AWK@ ++BUILD_DRMINGW_FALSE = @BUILD_DRMINGW_FALSE@ ++BUILD_DRMINGW_TRUE = @BUILD_DRMINGW_TRUE@ ++BUILD_REDIR_FALSE = @BUILD_REDIR_FALSE@ ++BUILD_REDIR_TRUE = @BUILD_REDIR_TRUE@ ++BUILD_RES2COFF_FALSE = @BUILD_RES2COFF_FALSE@ ++BUILD_RES2COFF_TRUE = @BUILD_RES2COFF_TRUE@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ +@@ -101,6 +122,8 @@ + am__include = @am__include@ + am__leading_dot = @am__leading_dot@ + am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ + bindir = @bindir@ + build_alias = @build_alias@ + datadir = @datadir@ +@@ -113,6 +136,7 @@ + libexecdir = @libexecdir@ + localstatedir = @localstatedir@ + mandir = @mandir@ ++mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +@@ -121,40 +145,52 @@ + sysconfdir = @sysconfdir@ + target_alias = @target_alias@ + bin_SCRIPTS = a2dll dsw2mak +- + EXTRA_DIST = a2dll.html static2dll_howto.txt +-subdir = scripts +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +-CONFIG_HEADER = $(top_builddir)/config.h +-CONFIG_CLEAN_FILES = a2dll dsw2mak +-SCRIPTS = $(bin_SCRIPTS) +- +-DIST_SOURCES = +-DIST_COMMON = $(srcdir)/Makefile.in ChangeLog Makefile.am a2dll.in \ +- dsw2mak.in + all: all-am + + .SUFFIXES: +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu scripts/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu scripts/Makefile +-Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status +- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe) +-a2dll: $(top_builddir)/config.status a2dll.in ++.PRECIOUS: Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++ ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++a2dll: $(top_builddir)/config.status $(srcdir)/a2dll.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ +-dsw2mak: $(top_builddir)/config.status dsw2mak.in ++dsw2mak: $(top_builddir)/config.status $(srcdir)/dsw2mak.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ +-binSCRIPT_INSTALL = $(INSTALL_SCRIPT) + install-binSCRIPTS: $(bin_SCRIPTS) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(bindir) ++ test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + @list='$(bin_SCRIPTS)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f $$d$$p; then \ + f=`echo "$$p" | sed 's|^.*/||;$(transform)'`; \ +- echo " $(binSCRIPT_INSTALL) $$d$$p $(DESTDIR)$(bindir)/$$f"; \ +- $(binSCRIPT_INSTALL) $$d$$p $(DESTDIR)$(bindir)/$$f; \ ++ echo " $(binSCRIPT_INSTALL) '$$d$$p' '$(DESTDIR)$(bindir)/$$f'"; \ ++ $(binSCRIPT_INSTALL) "$$d$$p" "$(DESTDIR)$(bindir)/$$f"; \ + else :; fi; \ + done + +@@ -162,8 +198,8 @@ + @$(NORMAL_UNINSTALL) + @list='$(bin_SCRIPTS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's|^.*/||;$(transform)'`; \ +- echo " rm -f $(DESTDIR)$(bindir)/$$f"; \ +- rm -f $(DESTDIR)$(bindir)/$$f; \ ++ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + uninstall-info-am: + tags: TAGS +@@ -172,10 +208,6 @@ + ctags: CTAGS + CTAGS: + +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +- +-top_distdir = .. +-distdir = $(top_distdir)/$(PACKAGE)-$(VERSION) + + distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ +@@ -189,7 +221,7 @@ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ +- $(mkinstalldirs) "$(distdir)$$dir"; \ ++ $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ +@@ -207,9 +239,10 @@ + check-am: all-am + check: check-am + all-am: Makefile $(SCRIPTS) +- + installdirs: +- $(mkinstalldirs) $(DESTDIR)$(bindir) ++ for dir in "$(DESTDIR)$(bindir)"; do \ ++ test -z "$$dir" || $(mkdir_p) "$$dir"; \ ++ done + install: install-am + install-exec: install-exec-am + install-data: install-data-am +@@ -229,7 +262,7 @@ + clean-generic: + + distclean-generic: +- -rm -f $(CONFIG_CLEAN_FILES) ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + + maintainer-clean-generic: + @echo "This command is intended for maintainers to use" +@@ -246,6 +279,8 @@ + + dvi-am: + ++html: html-am ++ + info: info-am + + info-am: +@@ -279,14 +314,14 @@ + uninstall-am: uninstall-binSCRIPTS uninstall-info-am + + .PHONY: all all-am check check-am clean clean-generic distclean \ +- distclean-generic distdir dvi dvi-am info info-am install \ +- install-am install-binSCRIPTS install-data install-data-am \ +- install-exec install-exec-am install-exec-local install-info \ +- install-info-am install-man install-strip installcheck \ +- installcheck-am installdirs maintainer-clean \ +- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ +- pdf-am ps ps-am uninstall uninstall-am uninstall-binSCRIPTS \ +- uninstall-info-am ++ distclean-generic distdir dvi dvi-am html html-am info info-am \ ++ install install-am install-binSCRIPTS install-data \ ++ install-data-am install-exec install-exec-am \ ++ install-exec-local install-info install-info-am install-man \ ++ install-strip installcheck installcheck-am installdirs \ ++ maintainer-clean maintainer-clean-generic mostlyclean \ ++ mostlyclean-generic pdf pdf-am ps ps-am uninstall uninstall-am \ ++ uninstall-binSCRIPTS uninstall-info-am + + + install-exec-local: +diff -ru ../release/mingw-utils-0.3/unix2dos/Makefile.am ./unix2dos/Makefile.am +--- ../release/mingw-utils-0.3/unix2dos/Makefile.am 2002-12-04 04:08:07.000000000 -0800 ++++ ./unix2dos/Makefile.am 2006-09-25 20:07:20.000000000 -0700 +@@ -6,7 +6,8 @@ + + docdir = $(prefix)/doc/unix2dos + +-doc_DATA = COPYING unix2dos.html ++#doc_DATA = COPYING unix2dos.html ++doc_DATA = COPYING + + unix2dos.html: unix2dos.1 + man2html $< > $@ +diff -ru ../release/mingw-utils-0.3/unix2dos/Makefile.in ./unix2dos/Makefile.in +--- ../release/mingw-utils-0.3/unix2dos/Makefile.in 2003-11-25 08:31:12.000000000 -0800 ++++ ./unix2dos/Makefile.in 2006-09-25 20:08:14.000000000 -0700 +@@ -1,8 +1,8 @@ +-# Makefile.in generated by automake 1.7.9 from Makefile.am. ++# Makefile.in generated by automake 1.9.6 from Makefile.am. + # @configure_input@ + +-# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +-# Free Software Foundation, Inc. ++# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, ++# 2003, 2004, 2005 Free Software Foundation, Inc. + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, + # with or without modifications, as long as this notice is preserved. +@@ -14,6 +14,7 @@ + + @SET_MAKE@ + ++ + srcdir = @srcdir@ + top_srcdir = @top_srcdir@ + VPATH = @srcdir@ +@@ -21,7 +22,6 @@ + pkglibdir = $(libdir)/@PACKAGE@ + pkgincludedir = $(includedir)/@PACKAGE@ + top_builddir = .. +- + am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd + INSTALL = @INSTALL@ + install_sh_DATA = $(install_sh) -c -m 644 +@@ -35,6 +35,42 @@ + NORMAL_UNINSTALL = : + PRE_UNINSTALL = : + POST_UNINSTALL = : ++bin_PROGRAMS = unix2dos$(EXEEXT) ++subdir = unix2dos ++DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in COPYING ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs ++CONFIG_HEADER = $(top_builddir)/config.h ++CONFIG_CLEAN_FILES = ++am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)" ++binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++PROGRAMS = $(bin_PROGRAMS) ++am_unix2dos_OBJECTS = unix2dos.$(OBJEXT) ++unix2dos_OBJECTS = $(am_unix2dos_OBJECTS) ++unix2dos_LDADD = $(LDADD) ++DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) ++depcomp = $(SHELL) $(top_srcdir)/depcomp ++am__depfiles_maybe = depfiles ++COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ ++ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) ++CCLD = $(CC) ++LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ ++SOURCES = $(unix2dos_SOURCES) ++DIST_SOURCES = $(unix2dos_SOURCES) ++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; ++am__vpath_adj = case $$p in \ ++ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ ++ *) f=$$p;; \ ++ esac; ++am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; ++docDATA_INSTALL = $(INSTALL_DATA) ++DATA = $(doc_DATA) ++ETAGS = etags ++CTAGS = ctags ++DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) + ACLOCAL = @ACLOCAL@ + AMDEP_FALSE = @AMDEP_FALSE@ + AMDEP_TRUE = @AMDEP_TRUE@ +@@ -43,6 +79,12 @@ + AUTOHEADER = @AUTOHEADER@ + AUTOMAKE = @AUTOMAKE@ + AWK = @AWK@ ++BUILD_DRMINGW_FALSE = @BUILD_DRMINGW_FALSE@ ++BUILD_DRMINGW_TRUE = @BUILD_DRMINGW_TRUE@ ++BUILD_REDIR_FALSE = @BUILD_REDIR_FALSE@ ++BUILD_REDIR_TRUE = @BUILD_REDIR_TRUE@ ++BUILD_RES2COFF_FALSE = @BUILD_RES2COFF_FALSE@ ++BUILD_RES2COFF_TRUE = @BUILD_RES2COFF_TRUE@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ +@@ -101,6 +143,8 @@ + am__include = @am__include@ + am__leading_dot = @am__leading_dot@ + am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ + bindir = @bindir@ + build_alias = @build_alias@ + datadir = @datadir@ +@@ -113,6 +157,7 @@ + libexecdir = @libexecdir@ + localstatedir = @localstatedir@ + mandir = @mandir@ ++mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +@@ -120,63 +165,55 @@ + sharedstatedir = @sharedstatedir@ + sysconfdir = @sysconfdir@ + target_alias = @target_alias@ +-bin_PROGRAMS = unix2dos +- + unix2dos_SOURCES = unix2dos.c unix2dos.h +- + noinst_man_MANS = unix2dos.1 +- + docdir = $(prefix)/doc/unix2dos + +-doc_DATA = COPYING unix2dos.html +-subdir = unix2dos +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +-CONFIG_HEADER = $(top_builddir)/config.h +-CONFIG_CLEAN_FILES = +-bin_PROGRAMS = unix2dos$(EXEEXT) +-PROGRAMS = $(bin_PROGRAMS) +- +-am_unix2dos_OBJECTS = unix2dos.$(OBJEXT) +-unix2dos_OBJECTS = $(am_unix2dos_OBJECTS) +-unix2dos_LDADD = $(LDADD) +-unix2dos_DEPENDENCIES = +-unix2dos_LDFLAGS = +- +-DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +-depcomp = $(SHELL) $(top_srcdir)/depcomp +-am__depfiles_maybe = depfiles +-@AMDEP_TRUE@DEP_FILES = ./$(DEPDIR)/unix2dos.Po +-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ +- $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +-CCLD = $(CC) +-LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +-DIST_SOURCES = $(unix2dos_SOURCES) +-DATA = $(doc_DATA) +- +-DIST_COMMON = $(srcdir)/Makefile.in COPYING Makefile.am +-SOURCES = $(unix2dos_SOURCES) +- ++#doc_DATA = COPYING unix2dos.html ++doc_DATA = COPYING + all: all-am + + .SUFFIXES: + .SUFFIXES: .c .o .obj +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4) ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu unix2dos/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu unix2dos/Makefile +-Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status +- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe) +-binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) ++.PRECIOUS: Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++ ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) ++ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(bindir) ++ test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f"; \ +- $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) $$p $(DESTDIR)$(bindir)/$$f || exit 1; \ ++ echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ ++ $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +@@ -184,8 +221,8 @@ + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ +- echo " rm -f $(DESTDIR)$(bindir)/$$f"; \ +- rm -f $(DESTDIR)$(bindir)/$$f; \ ++ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + + clean-binPROGRAMS: +@@ -195,7 +232,7 @@ + $(LINK) $(unix2dos_LDFLAGS) $(unix2dos_OBJECTS) $(unix2dos_LDADD) $(LIBS) + + mostlyclean-compile: +- -rm -f *.$(OBJEXT) core *.core ++ -rm -f *.$(OBJEXT) + + distclean-compile: + -rm -f *.tab.c +@@ -203,54 +240,37 @@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unix2dos.Po@am__quote@ + + .c.o: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$< ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c $< + + .c.obj: +-@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +-@am__fastdepCC_TRUE@ -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`; \ +-@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +-@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +-@am__fastdepCC_TRUE@ fi ++@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ ++@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi + @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(COMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi` ++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ ++@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + uninstall-info-am: +-docDATA_INSTALL = $(INSTALL_DATA) + install-docDATA: $(doc_DATA) + @$(NORMAL_INSTALL) +- $(mkinstalldirs) $(DESTDIR)$(docdir) ++ test -z "$(docdir)" || $(mkdir_p) "$(DESTDIR)$(docdir)" + @list='$(doc_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f"; \ +- $(docDATA_INSTALL) $$d$$p $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " $(docDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(docdir)/$$f'"; \ ++ $(docDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(docdir)/$$f"; \ + done + + uninstall-docDATA: + @$(NORMAL_UNINSTALL) + @list='$(doc_DATA)'; for p in $$list; do \ +- f="`echo $$p | sed -e 's|^.*/||'`"; \ +- echo " rm -f $(DESTDIR)$(docdir)/$$f"; \ +- rm -f $(DESTDIR)$(docdir)/$$f; \ ++ f=$(am__strip_dir) \ ++ echo " rm -f '$(DESTDIR)$(docdir)/$$f'"; \ ++ rm -f "$(DESTDIR)$(docdir)/$$f"; \ + done + +-ETAGS = etags +-ETAGSFLAGS = +- +-CTAGS = ctags +-CTAGSFLAGS = +- +-tags: TAGS +- + ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ +@@ -259,6 +279,7 @@ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique ++tags: TAGS + + TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -270,10 +291,11 @@ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ +- test -z "$(ETAGS_ARGS)$$tags$$unique" \ +- || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- $$tags $$unique +- ++ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$tags $$unique; \ ++ fi + ctags: CTAGS + CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) +@@ -296,10 +318,6 @@ + + distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +- +-top_distdir = .. +-distdir = $(top_distdir)/$(PACKAGE)-$(VERSION) + + distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ +@@ -313,7 +331,7 @@ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ +- $(mkinstalldirs) "$(distdir)$$dir"; \ ++ $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ +@@ -331,9 +349,10 @@ + check-am: all-am + check: check-am + all-am: Makefile $(PROGRAMS) $(DATA) +- + installdirs: +- $(mkinstalldirs) $(DESTDIR)$(bindir) $(DESTDIR)$(docdir) ++ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)"; do \ ++ test -z "$$dir" || $(mkdir_p) "$$dir"; \ ++ done + install: install-am + install-exec: install-exec-am + install-data: install-data-am +@@ -353,7 +372,7 @@ + clean-generic: + + distclean-generic: +- -rm -f $(CONFIG_CLEAN_FILES) ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + + maintainer-clean-generic: + @echo "This command is intended for maintainers to use" +@@ -372,6 +391,8 @@ + + dvi-am: + ++html: html-am ++ + info: info-am + + info-am: +@@ -403,19 +424,20 @@ + + ps-am: + +-uninstall-am: uninstall-binPROGRAMS uninstall-docDATA uninstall-info-am ++uninstall-am: uninstall-binPROGRAMS uninstall-docDATA \ ++ uninstall-info-am + + .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ + clean-generic ctags distclean distclean-compile \ +- distclean-generic distclean-tags distdir dvi dvi-am info \ +- info-am install install-am install-binPROGRAMS install-data \ +- install-data-am install-docDATA install-exec install-exec-am \ +- install-info install-info-am install-man install-strip \ +- installcheck installcheck-am installdirs maintainer-clean \ +- maintainer-clean-generic mostlyclean mostlyclean-compile \ +- mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ +- uninstall-am uninstall-binPROGRAMS uninstall-docDATA \ +- uninstall-info-am ++ distclean-generic distclean-tags distdir dvi dvi-am html \ ++ html-am info info-am install install-am install-binPROGRAMS \ ++ install-data install-data-am install-docDATA install-exec \ ++ install-exec-am install-info install-info-am install-man \ ++ install-strip installcheck installcheck-am installdirs \ ++ maintainer-clean maintainer-clean-generic mostlyclean \ ++ mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \ ++ tags uninstall uninstall-am uninstall-binPROGRAMS \ ++ uninstall-docDATA uninstall-info-am + + + unix2dos.html: unix2dos.1 diff --git a/src/win32/patches/mt.patch b/src/win32/patches/mt.patch new file mode 100644 index 00000000..7b0914b4 --- /dev/null +++ b/src/win32/patches/mt.patch @@ -0,0 +1,1689 @@ +diff -ru ..\release\mt-st-0.9b/Makefile ./Makefile +--- ..\release\mt-st-0.9b/Makefile 2005-08-16 12:16:28.000000000 -0700 ++++ ./Makefile 2006-08-09 03:26:58.292856500 -0700 +@@ -1,29 +1,27 @@ ++CC= mingw32-gcc + CFLAGS= -Wall -O2 +-SBINDIR= /sbin +-BINDIR= /bin +-MANDIR= /usr/share/man ++PREFIX= ++SBINDIR= $(PREFIX)/sbin ++BINDIR= $(PREFIX)/bin ++MANDIR= $(PREFIX)/man + +-all: mt stinit ++all: mt.exe + +-mt: mt.c +- $(CC) $(CFLAGS) -o mt mt.c ++mt.exe: mt.c ++ $(CC) $(CFLAGS) -o mt.exe mt.c mtops.c + +-stinit: stinit.c ++stinit.exe: stinit.c + $(CC) $(CFLAGS) -o stinit stinit.c + +-install: mt stinit +- install -s mt $(BINDIR) ++install: mt.exe ++ install mt.exe $(BINDIR) + install -c -m 444 mt.1 $(MANDIR)/man1 + (if [ -f $(MANDIR)/man1/mt.1.gz ] ; then \ + rm -f $(MANDIR)/man1/mt.1.gz; gzip $(MANDIR)/man1/mt.1; fi) +- install -s stinit $(SBINDIR) +- install -c -m 444 stinit.8 $(MANDIR)/man8 +- (if [ -f $(MANDIR)/man8/stinit.8.gz ] ; then \ +- rm -f $(MANDIR)/man8/stinit.8.gz; gzip $(MANDIR)/man8/stinit.8; fi) + + dist: clean + (mydir=`basename \`pwd\``;\ + cd .. && tar cvvf - $$mydir | gzip -9 > $${mydir}.tar.gz) + + clean: +- rm -f *~ \#*\# *.o mt stinit ++ rm -f *~ \#*\# *.o mt.exe stinit.exe +diff -ru ..\release\mt-st-0.9b/mt.1 ./mt.1 +--- ..\release\mt-st-0.9b/mt.1 2005-08-21 11:53:50.000000000 -0700 ++++ ./mt.1 2006-08-09 03:26:58.302871100 -0700 +@@ -48,20 +48,22 @@ + files. + The tape is positioned on the first block of the next file. + .IP fsfm +-Forward space ++Forward space past + .I count +-files. +-The tape is positioned on the last block of the previous file. ++file marks, then backward space one file record. ++This leaves the tape positioned on the last block of the file that is count-1 ++files past the current file. + .IP bsf + Backward space + .I count + files. + The tape is positioned on the last block of the previous file. + .IP bsfm +-Backward space ++Backward space past + .I count +-files. +-The tape is positioned on the first block of the next file. ++file marks, then forward space one file record. ++This leaves the tape positioned on the first block of the file that is count-1 ++files before the current file. + .IP asf + The tape is positioned at the beginning of the + .I count +diff -ru ..\release\mt-st-0.9b/mt.c ./mt.c +--- ..\release\mt-st-0.9b/mt.c 2005-08-21 11:48:06.000000000 -0700 ++++ ./mt.c 2006-08-09 04:00:01.093525100 -0700 +@@ -11,25 +11,35 @@ + Last Modified: Sun Aug 21 21:48:06 2005 by kai.makisara + */ + ++#include ++#include ++#include ++ ++#define O_NONBLOCK 0 ++ + #include ++#if !defined(_MSC_VER) + #include ++#endif + #include + #include + #include + #include + #include + #include +-#include + ++#include "mtops.h" + #include "mtio.h" + ++#define ioctl tape_ioctl ++ + #ifndef DEFTAPE +-#define DEFTAPE "/dev/tape" /* default tape device */ ++#define DEFTAPE "Tape0" /* default tape device */ + #endif /* DEFTAPE */ + +-#define VERSION "0.9b" ++#define VERSION "0.9b-bacula" + +-typedef int (* cmdfunc)(/* int, struct cmdef_tr *, int, char ** */); ++typedef int (* cmdfunc)(int, struct cmdef_tr *, int, char **); + + typedef struct cmdef_tr { + char *cmd_name; +@@ -143,12 +153,14 @@ + FD_RDONLY, ONE_ARG, 0}, + { "defcompression", MTSETDRVBUFFER, do_drvbuffer, MT_ST_DEF_COMPRESSION, + FD_RDONLY, ONE_ARG, 0}, ++#if 0 + { "stsetcln", MTSETDRVBUFFER, do_drvbuffer, MT_ST_SET_CLN, + FD_RDONLY, ONE_ARG, 0}, + { "sttimeout", MTSETDRVBUFFER, do_drvbuffer, MT_ST_SET_TIMEOUT, + FD_RDONLY, ONE_ARG, 0}, + { "stlongtimeout", MTSETDRVBUFFER, do_drvbuffer, MT_ST_SET_LONG_TIMEOUT, + FD_RDONLY, ONE_ARG, 0}, ++#endif + { "densities", 0, print_densities, 0, NO_FD, NO_ARGS, + 0 }, + { "setpartition", MTSETPART, do_standard, 0, FD_RDONLY, ONE_ARG, +@@ -211,13 +223,19 @@ + {0x30, "AIT-1 or MLR3"}, + {0x31, "AIT-2"}, + {0x32, "AIT-3"}, +- {0x33, "SLR6"}, ++ {0x33, "AIT-4 or SLR6"}, + {0x34, "SLR100"}, ++ {0x38, "AIT-E Turbo"}, ++ {0x39, "AIT-1 Turbo"}, ++ {0x3A, "AIT-2 Turbo"}, ++ {0x3B, "AIT-3Ex"}, + {0x40, "DLT1 40 GB, or Ultrium"}, + {0x41, "DLT 40GB, or Ultrium2"}, + {0x42, "LTO-2"}, + {0x45, "QIC-3095-MC (TR-4)"}, + {0x47, "TR-5"}, ++ {0x48, "Quantum SDLT220"}, ++ {0x49, "Quantum SDLT320"}, + {0x80, "DLT 15GB uncomp. or Ecrix"}, + {0x81, "DLT 15GB compressed"}, + {0x82, "DLT 20GB uncompressed"}, +@@ -254,20 +272,25 @@ + {"no-blklimits", MT_ST_NO_BLKLIMS, "drive doesn't support read block limits"}, + {"can-partitions",MT_ST_CAN_PARTITIONS,"drive can handle partitioned tapes"}, + {"scsi2logical", MT_ST_SCSI2LOGICAL, "logical block addresses used with SCSI-2"}, ++#if 0 + {"no-wait", MT_ST_NOWAIT, "immediate mode for rewind, etc."}, ++#endif + #ifdef MT_ST_SYSV + {"sysv", MT_ST_SYSV, "enable the SystemV semantics"}, + #endif ++#if 0 + {"cleaning", MT_ST_SET_CLN, "set the cleaning bit location and mask"}, ++#endif + {NULL, 0}}; + + static char *tape_name; /* The tape name for messages */ + + +- int ++int + main(int argc, char **argv) + { +- int mtfd, cmd_code, i, argn, len, oflags; ++ int mtfd, cmd_code, i, argn, oflags; ++ unsigned int len; + char *cmdstr; + cmdef_tr *comp, *comp2; + +@@ -344,7 +367,7 @@ + oflags = comp->cmd_fdtype == FD_RDONLY ? O_RDONLY : O_RDWR; + if ((comp->error_tests & ET_ONLINE) == 0) + oflags |= O_NONBLOCK; +- if ((mtfd = open(tape_name, oflags)) < 0) { ++ if ((mtfd = tape_open(tape_name, oflags, 0)) < 0) { + perror(tape_name); + exit(1); + } +@@ -368,7 +391,7 @@ + } + + if (mtfd >= 0) +- close(mtfd); ++ tape_close(mtfd); + return i; + } + +@@ -409,9 +432,9 @@ + do_standard(int mtfd, cmdef_tr *cmd, int argc, char **argv) + { + struct mtop mt_com; +- char *endp; ++ char *endp = NULL; + +- mt_com.mt_op = cmd->cmd_code; ++ mt_com.mt_op = (short)cmd->cmd_code; + mt_com.mt_count = (argc > 0 ? strtol(*argv, &endp, 0) : 1); + if (argc > 0 && endp != *argv) { + if (*endp == 'k') +@@ -464,7 +487,8 @@ + static int + do_options(int mtfd, cmdef_tr *cmd, int argc, char **argv) + { +- int i, an, len; ++ int i, an; ++ unsigned int len; + struct mtop mt_com; + + mt_com.mt_op = MTSETDRVBUFFER; +@@ -596,8 +620,10 @@ + type = "SCSI 1"; + else if (status.mt_type == MT_ISSCSI2) + type = "SCSI 2"; ++#if 0 + else if (status.mt_type == MT_ISONSTREAM_SC) + type = "OnStream SC-, DI-, DP-, or USB"; ++#endif + else + type = NULL; + if (type == NULL) { +@@ -607,7 +633,7 @@ + printf("IDE-Tape (type code 0) ?\n"); + else + printf("Unknown tape drive type (type code %ld)\n", status.mt_type); +- printf("File number=%d, block number=%d.\n", ++ printf("File number=%ld, block number=%ld.\n", + status.mt_fileno, status.mt_blkno); + printf("mt_resid: %ld, mt_erreg: 0x%lx\n", + status.mt_resid, status.mt_erreg); +@@ -617,14 +643,17 @@ + else { + printf("%s tape drive:\n", type); + if (status.mt_type == MT_ISSCSI2) +- printf("File number=%d, block number=%d, partition=%ld.\n", ++ printf("File number=%ld, block number=%ld, partition=%ld.\n", + status.mt_fileno, status.mt_blkno, (status.mt_resid & 0xff)); + else +- printf("File number=%d, block number=%d.\n", ++ printf("File number=%ld, block number=%ld.\n", + status.mt_fileno, status.mt_blkno); +- if (status.mt_type == MT_ISSCSI1 || +- status.mt_type == MT_ISSCSI2 || +- status.mt_type == MT_ISONSTREAM_SC) { ++ if (status.mt_type == MT_ISSCSI1 ++ || status.mt_type == MT_ISSCSI2 ++#if 0 ++ || status.mt_type == MT_ISONSTREAM_SC ++#endif ++ ) { + dens = (status.mt_dsreg & MT_ST_DENSITY_MASK) >> MT_ST_DENSITY_SHIFT; + density = "no translation"; + for (i=0; i < NBR_DENSITIES; i++) +@@ -666,8 +695,10 @@ + printf(" DR_OPEN"); + if (GMT_IM_REP_EN(status.mt_gstat)) + printf(" IM_REP_EN"); ++#if 0 + if (GMT_CLN(status.mt_gstat)) + printf(" CLN"); ++#endif + printf("\n"); + return 0; + } +diff -ru ..\release\mt-st-0.9b/mtio.h ./mtio.h +--- ..\release\mt-st-0.9b/mtio.h 2005-08-16 12:16:28.000000000 -0700 ++++ ./mtio.h 2006-08-09 03:26:58.352944100 -0700 +@@ -8,9 +8,7 @@ + #ifndef _LINUX_MTIO_H + #define _LINUX_MTIO_H + +-#include +-#include +-#include ++#include + + /* + * Structures and definitions for mag tape io control commands +@@ -150,6 +148,7 @@ + }; + + ++#ifdef USE_QIC02 + /* structure for MTIOCGETCONFIG/MTIOCSETCONFIG primarily intended + * as an interim solution for QIC-02 until DDI is fully implemented. + */ +@@ -281,6 +280,7 @@ + * command + */ + }; ++#endif + + /* mag tape io control commands */ + #define MTIOCTOP _IOW('m', 1, struct mtop) /* do a mag tape op */ +diff -ru ..\release\mt-st-0.9b/stinit.def.examples ./stinit.def.examples +--- ..\release\mt-st-0.9b/stinit.def.examples 2005-08-16 12:16:28.000000000 -0700 ++++ ./stinit.def.examples 2006-08-09 03:26:58.362958700 -0700 +@@ -56,3 +56,169 @@ + mode3 blocksize=0 density=1 # 800 bpi + } + ++# DLT2000 / 2000XT ++manufacturer="QUANTUM" model = "DLT2000" { ++scsi2logical=1 ++can-bsr ++auto-lock=0 ++two-fms=0 ++drive-buffering=1 ++buffer-writes ++read-ahead=1 ++async-writes=1 ++can-partitions=0 ++fast-mteom=1 ++# ++# If your stinit supports the timeouts: ++timeout=3600 # 1 hour ++long-timeout=14400 # 4 hours ++# ++mode1 blocksize=0 density=0x81 # 10GB + compression on DLTtape III, 15+ with DLTtape IIIXT in 2000XT ++mode2 blocksize=0 density=0x80 # 10GB, no compression on DLTtape III, 15 with DLTtape IIIXT in 2000XT ++mode3 blocksize=0 density=0x18 # 6GB, compression not available, on DLTtape III ++mode4 blocksize=0 density=0x17 #2.6GB, compression not available, on DLTtape III ++} ++ ++# DLT4000 ++manufacturer="QUANTUM" model = "DLT4000" { ++scsi2logical=1 ++can-bsr ++auto-lock=0 ++two-fms=0 ++drive-buffering=1 ++buffer-writes ++read-ahead=1 ++async-writes=1 ++can-partitions=0 ++fast-mteom=1 ++# ++# If your stinit supports the timeouts: ++timeout=3600 # 1 hour ++long-timeout=14400 # 4 hours ++# ++# Drive is backwards compatible, use older modes (e.g. from above) as required ++mode1 blocksize=0 density=0x83 # 20GB + compression ++mode2 blocksize=0 density=0x82 # 20GB, no compression ++mode3 blocksize=0 density=0x81 # 10GB + compression (DLT2000 mode) with DLTtape III, 15+ with DLTtape IIIXT in 2000XT ++mode4 blocksize=0 density=0x80 # 10GB, no compression (DLT2000 mode) with DLTtape III, 15 with DLTtape IIIXT in 2000XT ++} ++ ++# DLT7000 ++manufacturer="QUANTUM" model = "DLT7000" { ++scsi2logical=1 ++can-bsr ++auto-lock=0 ++two-fms=0 ++drive-buffering=1 ++buffer-writes ++read-ahead=1 ++async-writes=1 ++can-partitions=0 ++fast-mteom=1 ++# ++# If your stinit supports the timeouts: ++timeout=3600 # 1 hour ++long-timeout=14400 # 4 hours ++# ++# Drive is backwards compatible, use older modes (e.g. from above) as required. ++mode1 blocksize=0 density=0x85 # 35GB + compression ++mode2 blocksize=0 density=0x84 # 35GB, no compression ++mode3 blocksize=0 density=0x83 # 20GB + compression (DLT4000 mode) ++mode4 blocksize=0 density=0x82 # 20GB, no compression (DLT4000 mode) ++} ++ ++# DLT8000 ++manufacturer="QUANTUM" model = "DLT8000" { ++scsi2logical=1 ++can-bsr=1 ++auto-lock=0 ++two-fms=0 ++drive-buffering=1 ++buffer-writes ++read-ahead=1 ++async-writes=1 ++can-partitions=0 ++fast-mteom=1 ++# ++# If your stinit supports the timeouts: ++timeout=3600 # 1 hour ++long-timeout=14400 # 4 hours ++# ++# Drive is backwards compatible to DLT7000, use older modes (e.g. from above) as required. Modes <10GB (<0x19) not supported! ++mode1 blocksize=0 density=0x89 # 40GB + compression ++mode2 blocksize=0 density=0x88 # 40GB, no compression ++mode3 blocksize=0 density=0x85 # 35GB + compression (DLT7000 mode) ++mode4 blocksize=0 density=0x84 # 35GB, no compression (DLT7000 mode) ++} ++ ++ ++# SDLT220 ++manufacturer="QUANTUM" model = "SuperDLT1" { ++scsi2logical=1 ++can-bsr=1 ++auto-lock=0 ++two-fms=0 ++drive-buffering=1 ++buffer-writes ++read-ahead=1 ++async-writes=1 ++can-partitions=0 ++fast-mteom=1 ++# ++# If your stinit supports the timeouts: ++timeout=3600 # 1 hour ++long-timeout=14400 # 4 hours ++# ++# Drive is backwards read compatible to DLT4000/7000/8000. Mode settings are only required for writing, so no need to define any other modes here. ++mode1 blocksize=0 density=0x48 compression=1 # 110 GB + compression ++mode2 blocksize=0 density=0x48 compression=0 # 110 GB, no ompression ++} ++ ++# SDLT320 ++manufacturer="QUANTUM" model = "SDLT320" { ++scsi2logical=1 ++can-bsr=1 ++auto-lock=0 ++two-fms=0 ++drive-buffering=1 ++buffer-writes ++read-ahead=1 ++async-writes=1 ++can-partitions=0 ++fast-mteom=1 ++# ++# If your stinit supports the timeouts: ++timeout=3600 # 1 hour ++long-timeout=14400 # 4 hours ++# ++# Drive is backwards write compatible to SDLT220 and read compatible to DLT4000/7000/8000. Mode settings are only required for writing, so we need only the SDL220/320 modes here ++mode1 blocksize=0 density=0x49 compression=1 # 160 GB + compression ++mode2 blocksize=0 density=0x49 compression=0 # 160 GB, no ompression ++mode3 blocksize=0 density=0x48 compression=1 # 110 GB + compression ++mode4 blocksize=0 density=0x48 compression=0 # 110 GB, no ompression ++} ++ ++# SDLT600 ++manufacturer="QUANTUM" model = "SDLT600" { ++scsi2logical=1 ++can-bsr=1 ++auto-lock=0 ++two-fms=0 ++drive-buffering=1 ++buffer-writes ++read-ahead=1 ++async-writes=1 ++can-partitions=0 ++fast-mteom=1 ++# ++# If your stinit supports the timeouts: ++timeout=3600 # 1 hour ++long-timeout=14400 # 4 hours ++# ++# Drive is backwards read compatible to SDLT220/320 and VS160. Mode settings are only required for writing, so we need only the native SDLT600 mode here. ++mode1 blocksize=0 density=0x4a compression=1 # 300 GB + compression ++mode2 blocksize=0 density=0x4a compression=0 # 300 GB, no ompression ++mode3 blocksize=0 density=0x4a compression=1 # 300 GB + compression ++mode4 blocksize=0 density=0x4a compression=0 # 300 GB, no ompression ++} ++ + +--- /dev/null 1969-12-31 16:00:00.000000000 -0800 ++++ mtops.c 2006-08-09 04:03:09.307917500 -0700 +@@ -0,0 +1,1163 @@ ++/* ++ * mtops.cpp - Emulate the Linux st (scsi tape) driver on Microsoft Windows. ++ * ++ * Author: Robert Nelson, May, 2006 ++ * ++ * Version $Id: mt.patch 3802 2006-12-14 11:41:02Z kerns $ ++ * ++ * Copyright (C) 2006 Free Software Foundation Europe e.V. ++ * ++ * This file was contributed to the Bacula project by Robert Nelson. ++ * ++ * Robert Nelson has been granted a perpetual, worldwide, ++ * non-exclusive, no-charge, royalty-free, irrevocable copyright ++ * license to reproduce, prepare derivative works of, publicly ++ * display, publicly perform, sublicense, and distribute the original ++ * work contributed by Robert Nelson to the Bacula project in source ++ * or object form. ++ * ++ * If you wish to license contributions from Robert Nelson ++ * under an alternate open source license please contact ++ * Robert Nelson . ++ */ ++/* ++ Copyright (C) 2006 Free Software Foundation Europe e.V. ++ ++ This program is free software; you can redistribute it and/or ++ modify it under the terms of the GNU General Public License ++ version 2 as amended with additional clauses defined in the ++ file LICENSE in the main source directory. ++ ++ This program is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ the file LICENSE for additional details. ++ ++ */ ++ ++#include ++#include ++ ++#include ++#include ++ ++#include "mtops.h" ++#include "mtio.h" ++#if defined(_MSC_VER) ++#include ++#include ++#else ++#include ++#include ++#endif ++ ++#ifndef __cplusplus ++typedef char bool; ++#define true 1 ++#define false 0 ++#endif ++ ++// ++// SCSI bus status codes. ++// ++ ++#define SCSISTAT_GOOD 0x00 ++#define SCSISTAT_CHECK_CONDITION 0x02 ++#define SCSISTAT_CONDITION_MET 0x04 ++#define SCSISTAT_BUSY 0x08 ++#define SCSISTAT_INTERMEDIATE 0x10 ++#define SCSISTAT_INTERMEDIATE_COND_MET 0x14 ++#define SCSISTAT_RESERVATION_CONFLICT 0x18 ++#define SCSISTAT_COMMAND_TERMINATED 0x22 ++#define SCSISTAT_QUEUE_FULL 0x28 ++ ++/* Forward referenced functions */ ++ ++extern char my_name[]; ++extern int debug_level; ++ ++inline SHORT Read16BitSigned(const unsigned char *pValue) ++{ ++ return (SHORT)(((USHORT)pValue[0] << 8) | (USHORT)pValue[1]); ++} ++ ++inline USHORT Read16BitUnsigned(const unsigned char *pValue) ++{ ++ return (((USHORT)pValue[0] << 8) | (USHORT)pValue[1]); ++} ++ ++inline LONG Read24BitSigned(const unsigned char *pValue) ++{ ++ return ((LONG)(((ULONG)pValue[0] << 16) | ((ULONG)pValue[1] << 8) | ++ (ULONG)pValue[2])) << 8 >> 8; ++} ++ ++inline ULONG Read24BitUnsigned(const unsigned char *pValue) ++{ ++ return ((ULONG)pValue[0] << 16) | ((ULONG)pValue[1] << 8) | (ULONG)pValue[2]; ++} ++ ++inline LONG Read32BitSigned(const unsigned char *pValue) ++{ ++ return (LONG)(((ULONG)pValue[0] << 24) | ((ULONG)pValue[1] << 16) | ++ ((ULONG)pValue[2] << 8) | (ULONG)pValue[3]); ++} ++ ++inline ULONG Read32BitUnsigned(const unsigned char *pValue) ++{ ++ return (((ULONG)pValue[0] << 24) | ((ULONG)pValue[1] << 16) | ++ ((ULONG)pValue[2] << 8) | (ULONG)pValue[3]); ++} ++ ++inline LONGLONG Read64BitSigned(const unsigned char *pValue) ++{ ++ return (LONGLONG)(((ULONGLONG)pValue[0] << 56) | ((ULONGLONG)pValue[1] << 48) | ++ ((ULONGLONG)pValue[2] << 40) | ((ULONGLONG)pValue[3] << 32) | ++ ((ULONGLONG)pValue[4] << 24) | ((ULONGLONG)pValue[5] << 16) | ++ ((ULONGLONG)pValue[6] << 8) | (ULONGLONG)pValue[7]); ++} ++ ++inline ULONGLONG Read64BitUnsigned(const unsigned char *pValue) ++{ ++ return (LONGLONG)(((ULONGLONG)pValue[0] << 56) | ((ULONGLONG)pValue[1] << 48) | ++ ((ULONGLONG)pValue[2] << 40) | ((ULONGLONG)pValue[3] << 32) | ++ ((ULONGLONG)pValue[4] << 24) | ((ULONGLONG)pValue[5] << 16) | ++ ((ULONGLONG)pValue[6] << 8) | (ULONGLONG)pValue[7]); ++} ++ ++typedef struct _TAPE_POSITION_INFO ++{ ++ UCHAR AtPartitionStart:1; ++ UCHAR AtPartitionEnd:1; ++ UCHAR PartitionBlockValid:1; ++ UCHAR FileSetValid:1; ++ UCHAR :4; ++ UCHAR Reserved1[3]; ++ ULONG Partition; ++ ULONGLONG BlockNumber; ++ ULONGLONG FileNumber; ++ ULONGLONG SetNumber; ++} TAPE_POSITION_INFO, *PTAPE_POSITION_INFO; ++ ++typedef struct _TAPE_HANDLE_INFO ++{ ++ HANDLE OSHandle; ++ bool bEOD; ++ bool bEOF; ++ bool bEOT; ++ bool bBlockValid; ++ ULONG FeaturesLow; ++ ULONG FeaturesHigh; ++ ULONG ulFile; ++ ULONGLONG ullFileStart; ++ ++} TAPE_HANDLE_INFO, *PTAPE_HANDLE_INFO; ++ ++TAPE_HANDLE_INFO TapeHandleTable[] = ++{ ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE }, ++ { INVALID_HANDLE_VALUE } ++}; ++ ++#define NUMBER_HANDLE_ENTRIES (sizeof(TapeHandleTable) / sizeof(TapeHandleTable[0])) ++ ++DWORD GetTapePositionInfo(HANDLE hDevice, PTAPE_POSITION_INFO TapePositionInfo); ++DWORD GetDensityBlockSize(HANDLE hDevice, DWORD *pdwDensity, DWORD *pdwBlockSize); ++ ++int tape_get(int fd, struct mtget *mt_get); ++int tape_op(int fd, struct mtop *mt_com); ++int tape_pos(int fd, struct mtpos *mt_pos); ++ ++int ++tape_open(const char *file, int flags, int mode) ++{ ++ HANDLE hDevice = INVALID_HANDLE_VALUE; ++ char szDeviceName[256] = "\\\\.\\"; ++ int idxFile; ++ DWORD dwResult; ++ ++ for (idxFile = 0; idxFile < (int)NUMBER_HANDLE_ENTRIES; idxFile++) { ++ if (TapeHandleTable[idxFile].OSHandle == INVALID_HANDLE_VALUE) { ++ break; ++ } ++ } ++ ++ if (idxFile >= (int)NUMBER_HANDLE_ENTRIES) { ++ return EMFILE; ++ } ++ ++ memset(&TapeHandleTable[idxFile], 0, sizeof(TapeHandleTable[idxFile])); ++ ++ if (file[0] != '\\' && file[0] != '/') { ++ strncpy(&szDeviceName[4], file, sizeof(szDeviceName) - 4); ++ } else { ++ strncpy(&szDeviceName[0], file, sizeof(szDeviceName)); ++ } ++ ++ szDeviceName[sizeof(szDeviceName) - 1] = '\0'; ++ ++ hDevice = CreateFile(szDeviceName, FILE_ALL_ACCESS, 0, NULL, OPEN_EXISTING, 0, NULL); ++ ++ if (hDevice != INVALID_HANDLE_VALUE) { ++ PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[idxFile]; ++ ++ memset(pHandleInfo, 0, sizeof(*pHandleInfo)); ++ ++ pHandleInfo->OSHandle = hDevice; ++ ++ TAPE_GET_DRIVE_PARAMETERS TapeDriveParameters; ++ DWORD dwSize = sizeof(TapeDriveParameters); ++ ++ dwResult = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_DRIVE_INFORMATION, &dwSize, &TapeDriveParameters); ++ if (dwResult == NO_ERROR) { ++ pHandleInfo->FeaturesLow = TapeDriveParameters.FeaturesLow; ++ pHandleInfo->FeaturesHigh = TapeDriveParameters.FeaturesHigh; ++ } ++ ++ TAPE_POSITION_INFO TapePositionInfo; ++ ++ dwResult = GetTapePositionInfo(pHandleInfo->OSHandle, &TapePositionInfo); ++ ++ if (dwResult == NO_ERROR) { ++ if (TapePositionInfo.AtPartitionStart || TapePositionInfo.AtPartitionEnd || ++ (TapePositionInfo.PartitionBlockValid && TapePositionInfo.BlockNumber == 0)) { ++ pHandleInfo->ulFile = 0; ++ pHandleInfo->bBlockValid = true; ++ pHandleInfo->ullFileStart = 0; ++ } else if (TapePositionInfo.FileSetValid) { ++ pHandleInfo->ulFile = (ULONG)TapePositionInfo.FileNumber; ++ } ++ } ++ } else { ++ DWORD dwError = GetLastError(); ++ ++ switch (dwError) { ++ case ERROR_FILE_NOT_FOUND: ++ case ERROR_PATH_NOT_FOUND: ++ errno = ENOENT; ++ break; ++ ++ case ERROR_TOO_MANY_OPEN_FILES: ++ errno = EMFILE; ++ break; ++ ++ default: ++ case ERROR_ACCESS_DENIED: ++ case ERROR_SHARING_VIOLATION: ++ case ERROR_LOCK_VIOLATION: ++ case ERROR_INVALID_NAME: ++ errno = EACCES; ++ break; ++ ++ case ERROR_FILE_EXISTS: ++ errno = EEXIST; ++ break; ++ ++ case ERROR_INVALID_PARAMETER: ++ errno = EINVAL; ++ break; ++ } ++ ++ return(int) -1; ++ } ++ ++ return (int)idxFile + 3; ++} ++ ++int ++tape_read(int fd, void *buffer, unsigned int count) ++{ ++ if (buffer == NULL) { ++ errno = EINVAL; ++ return -1; ++ } ++ ++ if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || ++ TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) ++ { ++ errno = EBADF; ++ return -1; ++ } ++ ++ PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; ++ ++ DWORD bytes_read; ++ BOOL bResult; ++ ++ bResult = ReadFile(pHandleInfo->OSHandle, buffer, count, &bytes_read, NULL); ++ ++ if (bResult) { ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->bEOD = false; ++ return bytes_read; ++ } else { ++ int iReturnValue = 0; ++ DWORD last_error = GetLastError(); ++ ++ switch (last_error) { ++ ++ case ERROR_FILEMARK_DETECTED: ++ pHandleInfo->bEOF = true; ++ break; ++ ++ case ERROR_END_OF_MEDIA: ++ pHandleInfo->bEOT = true; ++ break; ++ ++ case ERROR_NO_MEDIA_IN_DRIVE: ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->bEOD = false; ++ errno = ENOMEDIUM; ++ iReturnValue = -1; ++ break; ++ ++ case ERROR_NO_DATA_DETECTED: ++ pHandleInfo->bEOD = true; ++ break; ++ ++ case ERROR_INVALID_HANDLE: ++ case ERROR_ACCESS_DENIED: ++ case ERROR_LOCK_VIOLATION: ++ errno = EBADF; ++ iReturnValue = -1; ++ break; ++ ++ default: ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->bEOD = false; ++ errno = EIO; ++ iReturnValue = -1; ++ } ++ ++ return iReturnValue; ++ } ++} ++ ++int ++tape_write(int fd, const void *buffer, unsigned int count) ++{ ++ if (buffer == NULL) { ++ errno = EINVAL; ++ return -1; ++ } ++ ++ if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) ++ { ++ errno = EBADF; ++ return -1; ++ } ++ ++ PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; ++ ++ DWORD bytes_written; ++ BOOL bResult; ++ ++ bResult = WriteFile(pHandleInfo->OSHandle, buffer, count, &bytes_written, NULL); ++ ++ if (bResult) { ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ return bytes_written; ++ } else { ++ DWORD last_error = GetLastError(); ++ ++ switch (last_error) { ++ case ERROR_END_OF_MEDIA: ++ case ERROR_DISK_FULL: ++ pHandleInfo->bEOT = true; ++ errno = ENOSPC; ++ break; ++ ++ case ERROR_NO_MEDIA_IN_DRIVE: ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->bEOD = false; ++ errno = ENOMEDIUM; ++ break; ++ ++ case ERROR_INVALID_HANDLE: ++ case ERROR_ACCESS_DENIED: ++ errno = EBADF; ++ break; ++ ++ default: ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->bEOD = false; ++ errno = EIO; ++ break; ++ } ++ return -1; ++ } ++} ++ ++int ++tape_close(int fd) ++{ ++ if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || ++ TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { ++ errno = EBADF; ++ return -1; ++ } ++ ++ PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; ++ ++ if (!CloseHandle(pHandleInfo->OSHandle)) { ++ pHandleInfo->OSHandle = INVALID_HANDLE_VALUE; ++ errno = EBADF; ++ return -1; ++ } ++ ++ pHandleInfo->OSHandle = INVALID_HANDLE_VALUE; ++ ++ return 0; ++} ++ ++int ++tape_ioctl(int fd, unsigned long int request, ...) ++{ ++ va_list argp; ++ int result; ++ ++ va_start(argp, request); ++ ++ switch (request) { ++ case MTIOCTOP: ++ result = tape_op(fd, va_arg(argp, struct mtop *)); ++ break; ++ ++ case MTIOCGET: ++ result = tape_get(fd, va_arg(argp, struct mtget *)); ++ break; ++ ++ case MTIOCPOS: ++ result = tape_pos(fd, va_arg(argp, struct mtpos *)); ++ break; ++ ++ default: ++ errno = ENOTTY; ++ result = -1; ++ break; ++ } ++ ++ va_end(argp); ++ ++ return result; ++} ++ ++int tape_op(int fd, struct mtop *mt_com) ++{ ++ DWORD result = NO_ERROR; ++ int index; ++ ++ if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || ++ TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) ++ { ++ errno = EBADF; ++ return -1; ++ } ++ ++ PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; ++ ++ switch (mt_com->mt_op) ++ { ++ case MTRESET: ++ case MTNOP: ++ case MTSETDRVBUFFER: ++ break; ++ ++ default: ++ case MTRAS1: ++ case MTRAS2: ++ case MTRAS3: ++ case MTSETDENSITY: ++ errno = ENOTTY; ++ result = (DWORD)-1; ++ break; ++ ++ case MTFSF: ++ for (index = 0; index < mt_com->mt_count; index++) { ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, 1, 0, FALSE); ++ if (result == NO_ERROR) { ++ pHandleInfo->ulFile++; ++ pHandleInfo->bEOF = true; ++ pHandleInfo->bEOT = false; ++ } ++ } ++ break; ++ ++ case MTBSF: ++ for (index = 0; index < mt_com->mt_count; index++) { ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, (DWORD)-1, ~0, FALSE); ++ if (result == NO_ERROR) { ++ pHandleInfo->ulFile--; ++ pHandleInfo->bBlockValid = false; ++ pHandleInfo->bEOD = false; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ } ++ } ++ break; ++ ++ case MTFSR: ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_RELATIVE_BLOCKS, 0, mt_com->mt_count, 0, FALSE); ++ if (result == NO_ERROR) { ++ pHandleInfo->bEOD = false; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ } else if (result == ERROR_FILEMARK_DETECTED) { ++ pHandleInfo->bEOF = true; ++ } ++ break; ++ ++ case MTBSR: ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_RELATIVE_BLOCKS, 0, -mt_com->mt_count, ~0, FALSE); ++ if (result == NO_ERROR) { ++ pHandleInfo->bEOD = false; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ } else if (result == ERROR_FILEMARK_DETECTED) { ++ pHandleInfo->ulFile--; ++ pHandleInfo->bBlockValid = false; ++ pHandleInfo->bEOD = false; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ } ++ break; ++ ++ case MTWEOF: ++ result = WriteTapemark(pHandleInfo->OSHandle, TAPE_FILEMARKS, mt_com->mt_count, FALSE); ++ if (result == NO_ERROR) { ++ pHandleInfo->bEOF = true; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->ulFile += mt_com->mt_count; ++ pHandleInfo->bBlockValid = true; ++ pHandleInfo->ullFileStart = 0; ++ } ++ break; ++ ++ case MTREW: ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_REWIND, 0, 0, 0, FALSE); ++ if (result == NO_ERROR) { ++ pHandleInfo->bEOD = false; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->ulFile = 0; ++ pHandleInfo->bBlockValid = true; ++ pHandleInfo->ullFileStart = 0; ++ } ++ break; ++ ++ case MTOFFL: ++ result = PrepareTape(pHandleInfo->OSHandle, TAPE_UNLOAD, FALSE); ++ if (result == NO_ERROR) { ++ pHandleInfo->bEOD = false; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->ulFile = 0; ++ pHandleInfo->ullFileStart = 0; ++ } ++ break; ++ ++ case MTRETEN: ++ result = PrepareTape(pHandleInfo->OSHandle, TAPE_TENSION, FALSE); ++ if (result == NO_ERROR) { ++ pHandleInfo->bEOD = false; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->ulFile = 0; ++ pHandleInfo->bBlockValid = true; ++ pHandleInfo->ullFileStart = 0; ++ } ++ break; ++ ++ case MTBSFM: ++ for (index = 0; index < mt_com->mt_count; index++) { ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, (DWORD)-1, ~0, FALSE); ++ if (result == NO_ERROR) { ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, 1, 0, FALSE); ++ pHandleInfo->bEOD = false; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ } ++ } ++ break; ++ ++ case MTFSFM: ++ for (index = 0; index < mt_com->mt_count; index++) { ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, mt_com->mt_count, 0, FALSE); ++ if (result == NO_ERROR) { ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, (DWORD)-1, ~0, FALSE); ++ pHandleInfo->bEOD = false; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ } ++ } ++ break; ++ ++ case MTEOM: ++ for ( ; ; ) { ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, 1, 0, FALSE); ++ if (result != NO_ERROR) { ++ pHandleInfo->bEOF = false; ++ ++ if (result == ERROR_END_OF_MEDIA) { ++ pHandleInfo->bEOD = true; ++ pHandleInfo->bEOT = true; ++ return 0; ++ } ++ if (result == ERROR_NO_DATA_DETECTED) { ++ pHandleInfo->bEOD = true; ++ pHandleInfo->bEOT = false; ++ return 0; ++ } ++ break; ++ } else { ++ pHandleInfo->bEOF = true; ++ pHandleInfo->ulFile++; ++ } ++ } ++ break; ++ ++ case MTERASE: ++ result = EraseTape(pHandleInfo->OSHandle, TAPE_ERASE_LONG, FALSE); ++ if (result == NO_ERROR) { ++ pHandleInfo->bEOD = true; ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->ulFile = 0; ++ pHandleInfo->bBlockValid = true; ++ pHandleInfo->ullFileStart = 0; ++ } ++ break; ++ ++ case MTSETBLK: ++ { ++ TAPE_SET_MEDIA_PARAMETERS SetMediaParameters; ++ ++ SetMediaParameters.BlockSize = mt_com->mt_count; ++ result = SetTapeParameters(pHandleInfo->OSHandle, SET_TAPE_MEDIA_INFORMATION, &SetMediaParameters); ++ } ++ break; ++ ++ case MTSEEK: ++ { ++ TAPE_POSITION_INFO TapePositionInfo; ++ ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_ABSOLUTE_BLOCK, 0, mt_com->mt_count, 0, FALSE); ++ ++ memset(&TapePositionInfo, 0, sizeof(TapePositionInfo)); ++ DWORD dwPosResult = GetTapePositionInfo(pHandleInfo->OSHandle, &TapePositionInfo); ++ if (dwPosResult == NO_ERROR && TapePositionInfo.FileSetValid) { ++ pHandleInfo->ulFile = (ULONG)TapePositionInfo.FileNumber; ++ } else { ++ pHandleInfo->ulFile = ~0U; ++ } ++ } ++ break; ++ ++ case MTTELL: ++ { ++ DWORD partition; ++ DWORD offset; ++ DWORD offsetHi; ++ ++ result = GetTapePosition(pHandleInfo->OSHandle, TAPE_ABSOLUTE_BLOCK, &partition, &offset, &offsetHi); ++ if (result == NO_ERROR) { ++ return offset; ++ } ++ } ++ break; ++ ++ case MTFSS: ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_SETMARKS, 0, mt_com->mt_count, 0, FALSE); ++ break; ++ ++ case MTBSS: ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_SETMARKS, 0, -mt_com->mt_count, ~0, FALSE); ++ break; ++ ++ case MTWSM: ++ result = WriteTapemark(pHandleInfo->OSHandle, TAPE_SETMARKS, mt_com->mt_count, FALSE); ++ break; ++ ++ case MTLOCK: ++ result = PrepareTape(pHandleInfo->OSHandle, TAPE_LOCK, FALSE); ++ break; ++ ++ case MTUNLOCK: ++ result = PrepareTape(pHandleInfo->OSHandle, TAPE_UNLOCK, FALSE); ++ break; ++ ++ case MTLOAD: ++ result = PrepareTape(pHandleInfo->OSHandle, TAPE_LOAD, FALSE); ++ break; ++ ++ case MTUNLOAD: ++ result = PrepareTape(pHandleInfo->OSHandle, TAPE_UNLOAD, FALSE); ++ break; ++ ++ case MTCOMPRESSION: ++ { ++ TAPE_GET_DRIVE_PARAMETERS GetDriveParameters; ++ TAPE_SET_DRIVE_PARAMETERS SetDriveParameters; ++ DWORD size; ++ ++ size = sizeof(GetDriveParameters); ++ ++ result = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_DRIVE_INFORMATION, &size, &GetDriveParameters); ++ ++ if (result == NO_ERROR) ++ { ++ SetDriveParameters.ECC = GetDriveParameters.ECC; ++ SetDriveParameters.Compression = (BOOLEAN)mt_com->mt_count; ++ SetDriveParameters.DataPadding = GetDriveParameters.DataPadding; ++ SetDriveParameters.ReportSetmarks = GetDriveParameters.ReportSetmarks; ++ SetDriveParameters.EOTWarningZoneSize = GetDriveParameters.EOTWarningZoneSize; ++ ++ result = SetTapeParameters(pHandleInfo->OSHandle, SET_TAPE_DRIVE_INFORMATION, &SetDriveParameters); ++ } ++ } ++ break; ++ ++ case MTSETPART: ++ result = SetTapePosition(pHandleInfo->OSHandle, TAPE_LOGICAL_BLOCK, mt_com->mt_count, 0, 0, FALSE); ++ break; ++ ++ case MTMKPART: ++ if (mt_com->mt_count == 0) ++ { ++ result = CreateTapePartition(pHandleInfo->OSHandle, TAPE_INITIATOR_PARTITIONS, 1, 0); ++ } ++ else ++ { ++ result = CreateTapePartition(pHandleInfo->OSHandle, TAPE_INITIATOR_PARTITIONS, 2, mt_com->mt_count); ++ } ++ break; ++ } ++ ++ if ((result == NO_ERROR && pHandleInfo->bEOF) || ++ (result == ERROR_FILEMARK_DETECTED && mt_com->mt_op == MTFSR)) { ++ ++ TAPE_POSITION_INFO TapePositionInfo; ++ ++ if (GetTapePositionInfo(pHandleInfo->OSHandle, &TapePositionInfo) == NO_ERROR) { ++ pHandleInfo->bBlockValid = true; ++ pHandleInfo->ullFileStart = TapePositionInfo.BlockNumber; ++ } ++ } ++ ++ switch (result) { ++ case NO_ERROR: ++ case (DWORD)-1: /* Error has already been translated into errno */ ++ break; ++ ++ default: ++ case ERROR_FILEMARK_DETECTED: ++ errno = EIO; ++ break; ++ ++ case ERROR_END_OF_MEDIA: ++ pHandleInfo->bEOT = true; ++ errno = EIO; ++ break; ++ ++ case ERROR_NO_DATA_DETECTED: ++ pHandleInfo->bEOD = true; ++ errno = EIO; ++ break; ++ ++ case ERROR_NO_MEDIA_IN_DRIVE: ++ pHandleInfo->bEOF = false; ++ pHandleInfo->bEOT = false; ++ pHandleInfo->bEOD = false; ++ errno = ENOMEDIUM; ++ break; ++ ++ case ERROR_INVALID_HANDLE: ++ case ERROR_ACCESS_DENIED: ++ case ERROR_LOCK_VIOLATION: ++ errno = EBADF; ++ break; ++ } ++ ++ return result == NO_ERROR ? 0 : -1; ++} ++ ++int tape_get(int fd, struct mtget *mt_get) ++{ ++ TAPE_POSITION_INFO pos_info; ++ BOOL result; ++ ++ if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || ++ TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { ++ errno = EBADF; ++ return -1; ++ } ++ ++ PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; ++ ++ if (GetTapePositionInfo(pHandleInfo->OSHandle, &pos_info) != NO_ERROR) { ++ return -1; ++ } ++ ++ DWORD density = 0; ++ DWORD blocksize = 0; ++ ++ result = GetDensityBlockSize(pHandleInfo->OSHandle, &density, &blocksize); ++ ++ if (result != NO_ERROR) { ++ TAPE_GET_DRIVE_PARAMETERS drive_params; ++ DWORD size; ++ ++ size = sizeof(drive_params); ++ ++ result = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_DRIVE_INFORMATION, &size, &drive_params); ++ ++ if (result == NO_ERROR) { ++ blocksize = drive_params.DefaultBlockSize; ++ } ++ } ++ ++ mt_get->mt_type = MT_ISSCSI2; ++ ++ // Partition # ++ mt_get->mt_resid = pos_info.PartitionBlockValid ? pos_info.Partition : (ULONG)-1; ++ ++ // Density / Block Size ++ mt_get->mt_dsreg = ((density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK) | ++ ((blocksize << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK); ++ ++ mt_get->mt_gstat = 0x00010000; /* Immediate report mode.*/ ++ ++ if (pHandleInfo->bEOF) { ++ mt_get->mt_gstat |= 0x80000000; // GMT_EOF ++ } ++ ++ if (pos_info.PartitionBlockValid && pos_info.BlockNumber == 0) { ++ mt_get->mt_gstat |= 0x40000000; // GMT_BOT ++ } ++ ++ if (pHandleInfo->bEOT) { ++ mt_get->mt_gstat |= 0x20000000; // GMT_EOT ++ } ++ ++ if (pHandleInfo->bEOD) { ++ mt_get->mt_gstat |= 0x08000000; // GMT_EOD ++ } ++ ++ TAPE_GET_MEDIA_PARAMETERS media_params; ++ DWORD size = sizeof(media_params); ++ ++ result = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_MEDIA_INFORMATION, &size, &media_params); ++ ++ if (result == NO_ERROR && media_params.WriteProtected) { ++ mt_get->mt_gstat |= 0x04000000; // GMT_WR_PROT ++ } ++ ++ result = GetTapeStatus(pHandleInfo->OSHandle); ++ ++ if (result != NO_ERROR) { ++ if (result == ERROR_NO_MEDIA_IN_DRIVE) { ++ mt_get->mt_gstat |= 0x00040000; // GMT_DR_OPEN ++ } ++ } else { ++ mt_get->mt_gstat |= 0x01000000; // GMT_ONLINE ++ } ++ ++ // Recovered Error Count ++ mt_get->mt_erreg = 0; ++ ++ // File Number ++ mt_get->mt_fileno = (__kernel_daddr_t)pHandleInfo->ulFile; ++ ++ // Block Number ++ mt_get->mt_blkno = (__kernel_daddr_t)(pHandleInfo->bBlockValid ? pos_info.BlockNumber - pHandleInfo->ullFileStart : (ULONGLONG)-1); ++ ++ return 0; ++} ++ ++#define SERVICEACTION_SHORT_FORM_BLOCKID 0 ++#define SERVICEACTION_SHORT_FORM_VENDOR_SPECIFIC 1 ++#define SERVICEACTION_LONG_FORM 6 ++#define SERVICEACTION_EXTENDED_FORM 8 ++ ++ ++typedef struct _SCSI_READ_POSITION_SHORT_BUFFER ++{ ++ UCHAR :1; ++ UCHAR PERR:1; ++ UCHAR BPU:1; ++ UCHAR :1; ++ UCHAR BYCU:1; ++ UCHAR BCU:1; ++ UCHAR EOP:1; ++ UCHAR BOP:1; ++ UCHAR Partition; ++ UCHAR Reserved1[2]; ++ UCHAR FirstBlock[4]; ++ UCHAR LastBlock[4]; ++ UCHAR Reserved2; ++ UCHAR NumberBufferBlocks[3]; ++ UCHAR NumberBufferBytes[4]; ++} SCSI_READ_POSITION_SHORT_BUFFER, *PSCSI_READ_POSITION_SHORT_BUFFER; ++ ++typedef struct _SCSI_READ_POSITION_LONG_BUFFER ++{ ++ UCHAR :2; ++ UCHAR BPU:1; ++ UCHAR MPU:1; ++ UCHAR :2; ++ UCHAR EOP:1; ++ UCHAR BOP:1; ++ UCHAR Reserved3[3]; ++ UCHAR Partition[4]; ++ UCHAR BlockNumber[8]; ++ UCHAR FileNumber[8]; ++ UCHAR SetNumber[8]; ++} SCSI_READ_POSITION_LONG_BUFFER, *PSCSI_READ_POSITION_LONG_BUFFER; ++ ++typedef struct _SCSI_READ_POSITION_EXTENDED_BUFFER ++{ ++ UCHAR :1; ++ UCHAR PERR:1; ++ UCHAR LOPU:1; ++ UCHAR :1; ++ UCHAR BYCU:1; ++ UCHAR LOCU:1; ++ UCHAR EOP:1; ++ UCHAR BOP:1; ++ UCHAR Partition; ++ UCHAR AdditionalLength[2]; ++ UCHAR Reserved1; ++ UCHAR NumberBufferObjects[3]; ++ UCHAR FirstLogicalObject[8]; ++ UCHAR LastLogicalObject[8]; ++ UCHAR NumberBufferObjectBytes[8]; ++} SCSI_READ_POSITION_EXTENDED_BUFFER, *PSCSI_READ_POSITION_EXTENDED_BUFFER; ++ ++typedef union _READ_POSITION_RESULT { ++ SCSI_READ_POSITION_SHORT_BUFFER ShortBuffer; ++ SCSI_READ_POSITION_LONG_BUFFER LongBuffer; ++ SCSI_READ_POSITION_EXTENDED_BUFFER ExtendedBuffer; ++} READ_POSITION_RESULT, *PREAD_POSITION_RESULT; ++ ++DWORD GetTapePositionInfo(HANDLE hDevice, PTAPE_POSITION_INFO TapePositionInfo) ++{ ++ PSCSI_PASS_THROUGH ScsiPassThrough; ++ BOOL bResult; ++ DWORD dwBytesReturned; ++ int pass; ++ ++ const DWORD dwBufferSize = sizeof(SCSI_PASS_THROUGH) + sizeof(READ_POSITION_RESULT) + 28; ++ ++ memset(TapePositionInfo, 0, sizeof(*TapePositionInfo)); ++ ++ ScsiPassThrough = (PSCSI_PASS_THROUGH)malloc(dwBufferSize); ++ ++ for (pass = 0; pass < 2; pass++) ++ { ++ memset(ScsiPassThrough, 0, dwBufferSize); ++ ++ ScsiPassThrough->Length = sizeof(SCSI_PASS_THROUGH); ++ ++ ScsiPassThrough->CdbLength = 10; ++ ScsiPassThrough->SenseInfoLength = 28; ++ ScsiPassThrough->DataIn = 1; ++ ScsiPassThrough->DataTransferLength = sizeof(SCSI_READ_POSITION_LONG_BUFFER); ++ ScsiPassThrough->TimeOutValue = 1000; ++ ScsiPassThrough->DataBufferOffset = sizeof(SCSI_PASS_THROUGH) + 28; ++ ScsiPassThrough->SenseInfoOffset = sizeof(SCSI_PASS_THROUGH); ++ ++ ScsiPassThrough->Cdb[0] = 0x34; // READ POSITION ++ ++ switch (pass) ++ { ++ case 0: ++ ScsiPassThrough->Cdb[1] = SERVICEACTION_LONG_FORM; ++ break; ++ ++ case 1: ++ ScsiPassThrough->Cdb[1] = SERVICEACTION_SHORT_FORM_BLOCKID; ++ break; ++ } ++ ++ bResult = DeviceIoControl( hDevice, ++ IOCTL_SCSI_PASS_THROUGH, ++ ScsiPassThrough, sizeof(SCSI_PASS_THROUGH), ++ ScsiPassThrough, dwBufferSize, ++ &dwBytesReturned, ++ NULL); ++ ++ if (bResult && dwBytesReturned >= (offsetof(SCSI_PASS_THROUGH, ScsiStatus) + sizeof(ScsiPassThrough->ScsiStatus))) { ++ if (ScsiPassThrough->ScsiStatus == SCSISTAT_GOOD) { ++ PREAD_POSITION_RESULT pPosResult = (PREAD_POSITION_RESULT)((PUCHAR)ScsiPassThrough + ScsiPassThrough->DataBufferOffset); ++ ++ switch (pass) ++ { ++ case 0: // SERVICEACTION_LONG_FORM ++ { ++ TapePositionInfo->AtPartitionStart = pPosResult->LongBuffer.BOP; ++ TapePositionInfo->AtPartitionEnd = pPosResult->LongBuffer.EOP; ++ ++ if (!TapePositionInfo->PartitionBlockValid) { ++ TapePositionInfo->PartitionBlockValid = !pPosResult->LongBuffer.BPU; ++ ++ if (TapePositionInfo->PartitionBlockValid) { ++ TapePositionInfo->Partition = Read32BitUnsigned(pPosResult->LongBuffer.Partition); ++ TapePositionInfo->BlockNumber = Read64BitUnsigned(pPosResult->LongBuffer.BlockNumber); ++ } ++ } ++ ++ TapePositionInfo->FileSetValid = !pPosResult->LongBuffer.MPU; ++ if (TapePositionInfo->FileSetValid) { ++ TapePositionInfo->FileNumber = Read64BitUnsigned(pPosResult->LongBuffer.FileNumber); ++ TapePositionInfo->SetNumber = Read64BitUnsigned(pPosResult->LongBuffer.SetNumber); ++ } ++ } ++ break; ++ ++ case 1: // SERVICEACTION_SHORT_FORM_BLOCKID ++ { ++ // pPosResult->ShortBuffer.PERR; ++ // pPosResult->ShortBuffer.BYCU; ++ // pPosResult->ShortBuffer.BCU; ++ TapePositionInfo->AtPartitionStart = pPosResult->ShortBuffer.BOP; ++ TapePositionInfo->AtPartitionEnd = pPosResult->ShortBuffer.EOP; ++ ++ if (!TapePositionInfo->PartitionBlockValid) { ++ TapePositionInfo->PartitionBlockValid = !pPosResult->ShortBuffer.BPU; ++ ++ if (TapePositionInfo->PartitionBlockValid) { ++ TapePositionInfo->Partition = pPosResult->ShortBuffer.Partition; ++ TapePositionInfo->BlockNumber = Read32BitUnsigned(pPosResult->ShortBuffer.FirstBlock); ++ } ++ } ++ // Read32BitsUnsigned(pPosResult->ShortBuffer.LastBlock); ++ // Read24BitsUnsigned(pPosResult->ShortBuffer.NumberBufferBlocks); ++ // Read32BitsUnsigned(pPosResult->ShortBuffer.NumberBufferBytes); ++ } ++ break; ++ } ++ } ++ } ++ } ++ free(ScsiPassThrough); ++ ++ return NO_ERROR; ++} ++ ++DWORD GetDensityBlockSize(HANDLE hDevice, DWORD *pdwDensity, DWORD *pdwBlockSize) ++{ ++ DWORD dwBufferSize = sizeof(GET_MEDIA_TYPES) + 5 * sizeof(DEVICE_MEDIA_INFO); ++ GET_MEDIA_TYPES * pGetMediaTypes = (GET_MEDIA_TYPES *)malloc(dwBufferSize); ++ BOOL bResult; ++ DWORD dwResult; ++ DWORD idxMedia; ++ ++ if (pGetMediaTypes == NULL) { ++ return ERROR_OUTOFMEMORY; ++ } ++ ++ do { ++ DWORD dwBytesReturned; ++ ++ bResult = DeviceIoControl( hDevice, ++ IOCTL_STORAGE_GET_MEDIA_TYPES_EX, ++ NULL, 0, ++ (LPVOID)pGetMediaTypes, dwBufferSize, ++ &dwBytesReturned, ++ NULL); ++ ++ if (!bResult) { ++ dwResult = GetLastError(); ++ ++ if (dwResult != ERROR_INSUFFICIENT_BUFFER) { ++ free(pGetMediaTypes); ++ return dwResult; ++ } ++ ++ dwBufferSize += 6 * sizeof(DEVICE_MEDIA_INFO); ++ ++ GET_MEDIA_TYPES * pNewBuffer = (GET_MEDIA_TYPES *)realloc(pGetMediaTypes, dwBufferSize); ++ ++ if (pNewBuffer != pGetMediaTypes) { ++ free(pGetMediaTypes); ++ ++ if (pNewBuffer == NULL) { ++ return ERROR_OUTOFMEMORY; ++ } ++ ++ pGetMediaTypes = pNewBuffer; ++ } ++ } ++ } while (!bResult); ++ ++ if (pGetMediaTypes->DeviceType != FILE_DEVICE_TAPE) { ++ free(pGetMediaTypes); ++ return ERROR_BAD_DEVICE; ++ } ++ ++ for (idxMedia = 0; idxMedia < pGetMediaTypes->MediaInfoCount; idxMedia++) { ++ ++ if (pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.MediaCharacteristics & MEDIA_CURRENTLY_MOUNTED) { ++ ++ if (pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.BusType == BusTypeScsi) { ++ *pdwDensity = pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.BusSpecificData.ScsiInformation.DensityCode; ++ } else { ++ *pdwDensity = 0; ++ } ++ ++ *pdwBlockSize = pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.CurrentBlockSize; ++ ++ free(pGetMediaTypes); ++ ++ return NO_ERROR; ++ } ++ } ++ ++ free(pGetMediaTypes); ++ ++ return ERROR_NO_MEDIA_IN_DRIVE; ++} ++ ++int tape_pos(int fd, struct mtpos *mt_pos) ++{ ++ DWORD partition; ++ DWORD offset; ++ DWORD offsetHi; ++ BOOL result; ++ ++ if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || ++ TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { ++ errno = EBADF; ++ return -1; ++ } ++ ++ PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; ++ ++ result = GetTapePosition(pHandleInfo->OSHandle, TAPE_ABSOLUTE_BLOCK, &partition, &offset, &offsetHi); ++ if (result == NO_ERROR) { ++ mt_pos->mt_blkno = offset; ++ return 0; ++ } ++ ++ return -1; ++} +--- /dev/null 1969-12-31 16:00:00.000000000 -0800 ++++ mtops.h 2006-08-09 03:26:58.372973300 -0700 +@@ -0,0 +1,15 @@ ++int tape_open(const char *file, int flags, int mode); ++int tape_read(int fd, void *buffer, unsigned int count); ++int tape_write(int fd, const void *buffer, unsigned int count); ++int tape_ioctl(int fd, unsigned long int request, ...); ++int tape_close(int fd); ++ ++typedef unsigned long __kernel_daddr_t; ++ ++#ifndef ENOMEDIUM ++#define ENOMEDIUM 123 ++#endif ++ ++#ifndef PATH_MAX ++#define PATH_MAX 1024 ++#endif +--- /dev/null 1969-12-31 16:00:00.000000000 -0800 ++++ Makefile.msc 2006-08-09 04:00:53.970613100 -0700 +@@ -0,0 +1,20 @@ ++CC= cl ++CFLAGS= /nologo /Ox /Gy /Zi /W3 /TP \ ++ /D_CRT_SECURE_NO_DEPRECATE ++LDFLAGS= /link /DEBUG /INCREMENTAL:NO /OPT:NOREF /PDB:$*.pdb /OUT:$@ ++PREFIX= C:\ ++ ++all: mt.exe ++ ++mt.exe: mt.c ++ $(CC) $(CFLAGS) mt.c mtops.c $(LDFLAGS) ++ ++stinit.exe: stinit.c ++ $(CC) $(CFLAGS) stinit.c $(LDFLAGS) ++ ++install: mt.exe ++ if not exist $(PREFIX)\bin\nul mkdir $(PREFIX)\bin ++ !copy /y $** $(PREFIX)\bin ++ ++clean: ++ del /f *~ *.obj mt.exe stinit.exe diff --git a/src/win32/patches/mtx-msvc1.patch b/src/win32/patches/mtx-msvc1.patch new file mode 100644 index 00000000..c4712c9d --- /dev/null +++ b/src/win32/patches/mtx-msvc1.patch @@ -0,0 +1,75 @@ +--- /dev/null 1969-12-31 16:00:00.000000000 -0800 ++++ Makefile.msc 2006-08-09 02:52:26.191999700 -0700 +@@ -0,0 +1,72 @@ ++# WARNING -- THIS HAS BEEN RE-WRITTEN TO USE MICROSOFT NMAKE. ++# ++# Valid targets: ++# Microsoft Visual Studio ++# ++# Makefile changes by Lars Kellogg-Stedman for better integration with ++# GNU Autoconf. ++ ++# Version # for 'make dist'... ++VERSION=1.3.9 ++PREFIX=C:\ ++ ++BINS = mtx.exe tapeinfo.exe loaderinfo.exe scsitape.exe nsmhack.exe ++ ++CC = cl ++ ++CFLAGS = /nologo /Ox /Gy /Zi /W3 \ ++ -D_CRT_SECURE_NO_DEPRECATE \ ++ -DVERSION="\"$(VERSION)\"" \ ++ -DLONG_PRINT_REQUEST_SENSE=1 ++ ++LINK = link ++LDFLAGS = /nologo /DEBUG /INCREMENTAL:NO /OPT:NOREF /PDB:$*.pdb /OUT:$@ ++LIBS = ++ ++all: $(BINS) ++ ++install: $(BINS) ++ -mkdir $(PREFIX)\bin ++ !copy /y $** $(PREFIX)\bin ++ ++clean: ++ -del /f *.obj *~ ++ -del /f $(BINS) ++ -del /f mam2debug.exe mam2debug2.exe ++ ++loaderinfo.exe: loaderinfo.obj mtxl.obj mtxl.h mtx.h ++ $(LINK) $(LDFLAGS) loaderinfo.obj mtxl.obj $(LIBS) ++ ++nsmhack.exe: nsmhack.obj mtxl.obj ++ $(LINK) $(LDFLAGS) nsmhack.obj mtxl.obj $(LIBS) ++ ++mtx.exe: mtx.obj mtxl.obj mtxl.h mtx.h ++ $(LINK) $(LDFLAGS) mtx.obj mtxl.obj $(LIBS) ++ ++mam2debug.exe: mtxl.obj mam2debug.obj mtx.h ++ $(LINK) $(LDFLAGS) mtxl.obj mam2debug.obj $(LIBS) ++ ++tapeinfo.exe: tapeinfo.obj mtxl.obj mtx.h mtxl.h ++ $(LINK) $(LDFLAGS) tapeinfo.obj mtxl.obj $(LIBS) ++ ++mam2debug2.exe: mtxl.obj mam2debug2.obj mtx.h ++ $(LINK) $(LDFLAGS) mtxl.obj mam2debug2.obj $(LIBS) ++ ++scsitape.exe: scsitape.obj mtxl.obj mtxl.h mtx.h ++ $(LINK) $(LDFLAGS) scsitape.obj mtxl.obj $(LIBS) ++ ++scsitape.obj: scsitape.c mtx.h mtxl.h ++ ++loaderinfo.obj: loaderinfo.c mtx.h mtxl.h ++ ++tapeinfo.obj: tapeinfo.c mtx.h mtxl.h ++ ++mam2debug.obj: mam2debug.c mtx.h mtxl.h ++ ++mam2debug2.obj: mam2debug2.c mtx.h mtxl.h ++ ++mtx.obj: mtx.c mtx.h mtxl.h ++ ++mtxl.obj: mtxl.c mtx.h mtxl.h scsi_linux.c scsi_win32.c ++ ++nsmhack.obj: nsmhack.c mtxl.h mtx.h diff --git a/src/win32/patches/mtx-msvc2.patch b/src/win32/patches/mtx-msvc2.patch new file mode 100644 index 00000000..e8261e2b --- /dev/null +++ b/src/win32/patches/mtx-msvc2.patch @@ -0,0 +1,40 @@ +--- /dev/null 1969-12-31 16:00:00.000000000 -0800 ++++ msvc/config.h 2006-05-26 15:10:34.558337600 -0700 +@@ -0,0 +1,37 @@ ++/* config.h. Generated by configure. */ ++/* Copyright 2001 Enhanced Software Technologies Inc. ++ * Released under GNU General Public License V2 or Above ++ * See http://www.gnu.org for more information about the terms of ++ * the GNU General Public License. ++ * $Date: 2006-08-12 16:18:24 -0700 (Sat, 12 Aug 2006) $ ++ * $Revision: 3282 $ ++ */ ++ ++#ifndef CONFIG_H ++#define CONFIG_H 1 ++ ++/* autoconf changes these. */ ++#define HAVE_STRING_H 1 ++#define HAVE_UNISTD_H 0 ++#define HAVE_STDLIB_H 1 ++#define HAVE_STDARG_H 1 ++#define HAVE_SCSI_SCSI_H 0 ++#define HAVE_SCSI_SCSI_IOCTL_H 0 ++#define HAVE_SCSI_SG_H 0 ++#define HAVE_SYS_GSCDDS_H 0 ++#define HAVE_CAMLIB_H 0 ++#define HAVE_SYS_SCSI_IMPL_USCSI_H 0 ++#define HAVE_SYS_SCSI_CTL_H 0 ++#define HAVE_DSLIB_H 0 ++#define HAVE_DU_DEFS_H 0 ++#define HAVE_SYS_STAT_H 1 ++#define HAVE_SYS_TYPES_H 1 ++#define HAVE_FCNTL_H 1 ++#define HAVE_SYS_IOCTL_H 0 ++#define HAVE_SYS_MTIO_H 0 ++#define HAVE_DDK_NTDDSCSI_H 0 ++ ++#define WORDS_BIGENDIAN 0 ++ ++#endif ++ diff --git a/src/win32/patches/mtx.patch b/src/win32/patches/mtx.patch new file mode 100644 index 00000000..d22e2611 --- /dev/null +++ b/src/win32/patches/mtx.patch @@ -0,0 +1,1576 @@ +Index: README.win32 +=================================================================== +--- /dev/null Sat Jul 29 14:54:52 2006 ++++ README.win32 Sat Jul 29 14:46:45 2006 +@@ -0,0 +1,22 @@ ++CHANGES FROM UNIX ++================= ++ ++The only difference is in the naming of devices. On Linux the changer is ++accessed using /dev/sg, on Windows you use Changer. ++ ++On Linux the tape drive is referenced using /dev/nst, on Windows you use ++Tape. ++ ++There is one exception in the case where there isn't a driver loaded for the ++device. This is usually only the case on Windows 2000 or if the Windows XP or ++Windows Server 2003 system supplied driver has been disabled. ++ ++In the case where there is no driver loaded you can access the device directly ++through the SCSI driver using the following notation: ++ ++ ::: ++ ++ Port is the adapter number ++ Bus is the SCSI bus number relative to the adapter ++ Target is the SCSI device's target ID ++ LUN is the SCSI device's logical unit number +Index: scsi_win32.c +=================================================================== +--- /dev/null Sat Jul 29 14:55:00 2006 ++++ scsi_win32.c Sat Jul 29 14:54:08 2006 +@@ -0,0 +1,353 @@ ++/* Copyright 2006 Robert Nelson ++ ++$Date: 2006-07-30 06:32:36 -0700 (Sun, 30 Jul 2006) $ ++$Revision: 3200 $ ++ ++ This program is free software; you may redistribute and/or modify it under ++ the terms of the GNU General Public License Version 2 as published by the ++ Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ for complete details. ++ ++*/ ++ ++/* These are the SCSI commands for Microsoft Windows. This is derived from ++ * the file scsi_linux.c substituting Windows specific emulation of the Linux ++ * behaviour. ++ */ ++ ++#include ++#include ++ ++#ifdef _MSC_VER ++#include ++#else ++#include ++#endif ++ ++#ifndef HZ ++#define HZ 1000 ++#endif ++ ++/* These are copied out of BRU 16.1, with all the boolean masks changed ++ * to our bitmasks. ++*/ ++#define S_NO_SENSE(s) ((s)->SenseKey == 0x0) ++#define S_RECOVERED_ERROR(s) ((s)->SenseKey == 0x1) ++ ++#define S_NOT_READY(s) ((s)->SenseKey == 0x2) ++#define S_MEDIUM_ERROR(s) ((s)->SenseKey == 0x3) ++#define S_HARDWARE_ERROR(s) ((s)->SenseKey == 0x4) ++#define S_UNIT_ATTENTION(s) ((s)->SenseKey == 0x6) ++#define S_BLANK_CHECK(s) ((s)->SenseKey == 0x8) ++#define S_VOLUME_OVERFLOW(s) ((s)->SenseKey == 0xd) ++ ++#define DEFAULT_TIMEOUT 3 * 60 /* 3 minutes here */ ++ ++/* Sigh, the T-10 SSC spec says all of the following is needed to ++ * detect a short read while in variable block mode, and that even ++ * though we got a BLANK_CHECK or MEDIUM_ERROR, it's still a valid read. ++ */ ++ ++#define HIT_FILEMARK(s) (S_NO_SENSE((s)) && (s)->Filemark && (s)->Valid) ++#define SHORT_READ(s) (S_NO_SENSE((s)) && (s)->ILI && (s)->Valid && (s)->AdditionalSenseCode==0 && (s)->AdditionalSenseCodeQualifier==0) ++#define HIT_EOD(s) (S_BLANK_CHECK((s)) && (s)->Valid) ++#define HIT_EOP(s) (S_MEDIUM_ERROR((s)) && (s)->EOM && (s)->Valid) ++#define HIT_EOM(s) ((s)->EOM && (s)->Valid) ++ ++#define STILL_A_VALID_READ(s) (HIT_FILEMARK(s) || SHORT_READ(s) || HIT_EOD(s) || HIT_EOP(s) || HIT_EOM(s)) ++ ++#define SCSI_DEFAULT_TIMEOUT 60 /* 1 minute */ ++#define SCSI_MAX_TIMEOUT 108 /* 1 minute 48 seconds */ ++ ++typedef struct _HANDLE_ENTRY { ++ HANDLE hDevice; ++ UCHAR PortId; ++ UCHAR PathId; ++ UCHAR TargetId; ++ UCHAR Lun; ++} HANDLE_ENTRY, *PHANDLE_ENTRY; ++ ++PHANDLE_ENTRY HandleTable = NULL; ++int nEntries = 0; ++ ++DEVICE_TYPE SCSI_OpenDevice(char *DeviceName) ++{ ++ int DeviceIndex; ++ TCHAR szDevicePath[256]; ++ ++ int nColons = 0; ++ int index; ++ ++ int port, path, target, lun; ++ ++ for (DeviceIndex = 0; DeviceIndex < nEntries; DeviceIndex++) ++ { ++ if (HandleTable[DeviceIndex].hDevice == INVALID_HANDLE_VALUE) ++ break; ++ } ++ ++ if (DeviceIndex >= nEntries) ++ { ++ PHANDLE_ENTRY pNewTable; ++ ++ nEntries += 4; ++ ++ if (HandleTable == NULL) ++ { ++ pNewTable = (PHANDLE_ENTRY)malloc(nEntries * sizeof(HANDLE_ENTRY)); ++ } ++ else ++ { ++ pNewTable = (PHANDLE_ENTRY)realloc(HandleTable, nEntries * sizeof(HANDLE_ENTRY)); ++ } ++ ++ if (pNewTable == NULL) ++ { ++ FatalError("cannot open SCSI device '%s' - %m\n", DeviceName); ++ } ++ ++ HandleTable = pNewTable; ++ } ++ ++ for (index = 0; DeviceName[index] != '\0'; index++) ++ { ++ if (DeviceName[index] == ':') ++ nColons++; ++ else if (DeviceName[index] < '0' || DeviceName[index] > '9') ++ break; ++ } ++ ++ if (DeviceName[index] == '\0' && nColons == 3 && ++ sscanf(DeviceName, "%d:%d:%d:%d", &port, &path, &target, &lun) == 4) { ++ ++ HandleTable[DeviceIndex].PortId = (UCHAR)port; ++ HandleTable[DeviceIndex].PathId = (UCHAR)path; ++ HandleTable[DeviceIndex].TargetId = (UCHAR)target; ++ HandleTable[DeviceIndex].Lun = (UCHAR)lun; ++ ++ sprintf(szDevicePath, "\\\\.\\scsi%d:", port); ++ } ++ else ++ { ++ int nPrefixLength = 0; ++ ++ if (DeviceName[0] != '\\') { ++ memcpy(szDevicePath, "\\\\.\\", 4 * sizeof(TCHAR)); ++ nPrefixLength = 4; ++ } ++ ++ HandleTable[DeviceIndex].PortId = 0; ++ HandleTable[DeviceIndex].PathId = 0; ++ HandleTable[DeviceIndex].TargetId = 0; ++ HandleTable[DeviceIndex].Lun = 0; ++ ++ strncpy( &szDevicePath[nPrefixLength], ++ DeviceName, ++ sizeof(szDevicePath) / sizeof(TCHAR) - nPrefixLength - 1); ++ ++ szDevicePath[sizeof(szDevicePath) / sizeof(TCHAR) - 1] = '\0'; ++ } ++ ++ HandleTable[DeviceIndex].hDevice = CreateFile(szDevicePath, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL); ++ ++ if (HandleTable[DeviceIndex].hDevice == INVALID_HANDLE_VALUE) ++ { ++ DWORD dwError = GetLastError(); ++ ++#if DEBUG ++ LPSTR lpszMessage; ++ ++ FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, NULL, dwError, 0, (LPSTR)&lpszMessage, 0, NULL); ++ fputs(lpszMessage, stderr); ++#endif ++ ++ switch (dwError) { ++ case ERROR_FILE_NOT_FOUND: ++ case ERROR_PATH_NOT_FOUND: ++ errno = ENOENT; ++ break; ++ ++ case ERROR_TOO_MANY_OPEN_FILES: ++ errno = EMFILE; ++ break; ++ ++ default: ++ case ERROR_ACCESS_DENIED: ++ case ERROR_SHARING_VIOLATION: ++ case ERROR_LOCK_VIOLATION: ++ case ERROR_INVALID_NAME: ++ errno = EACCES; ++ break; ++ ++ case ERROR_FILE_EXISTS: ++ errno = EEXIST; ++ break; ++ ++ case ERROR_INVALID_PARAMETER: ++ errno = EINVAL; ++ break; ++ } ++ ++ FatalError("cannot open SCSI device '%s' - %m\n", DeviceName); ++ } ++ ++ return DeviceIndex; ++} ++ ++static int scsi_timeout = SCSI_DEFAULT_TIMEOUT; ++ ++void SCSI_Set_Timeout(int secs) ++{ ++ if (secs > SCSI_MAX_TIMEOUT) { ++ secs = SCSI_MAX_TIMEOUT; ++ } ++ scsi_timeout = secs * HZ; ++} ++ ++void SCSI_Default_Timeout(void) ++{ ++ scsi_timeout = SCSI_DEFAULT_TIMEOUT * HZ; ++} ++ ++void SCSI_CloseDevice(char *DeviceName, DEVICE_TYPE DeviceFD) ++{ ++ if (DeviceFD < nEntries) ++ { ++ CloseHandle(HandleTable[DeviceFD].hDevice); ++ HandleTable[DeviceFD].hDevice = INVALID_HANDLE_VALUE; ++ } ++ else ++ { ++ errno = EBADF; ++ FatalError("cannot close SCSI device '%s' - %m\n", DeviceName); ++ } ++} ++ ++ ++/* Added by Eric Green to deal with burping ++ * Seagate autoloader (hopefully!). ++ */ ++/* Get the SCSI ID and LUN... */ ++scsi_id_t *SCSI_GetIDLun(DEVICE_TYPE fd) { ++ scsi_id_t * retval; ++ ++ SCSI_ADDRESS ScsiAddress; ++ BOOL bResult; ++ DWORD dwBytesReturned; ++ ++ if (fd < nEntries) { ++ retval = (scsi_id_t *)xmalloc(sizeof(scsi_id_t)); ++ retval->id = HandleTable[fd].TargetId; ++ retval->lun = HandleTable[fd].Lun; ++ ++#ifdef DEBUG ++ fprintf(stderr,"SCSI:ID=%d LUN=%d\n", retval->id, retval->lun); ++#endif ++ return retval; ++ } else { ++ errno = EBADF; ++ FatalError("cannot close SCSI device - %m\n"); ++ } ++ ++ memset(&ScsiAddress, 0, sizeof(ScsiAddress)); ++ ++ ScsiAddress.Length = sizeof(ScsiAddress); ++ ++ bResult = DeviceIoControl(HandleTable[fd].hDevice, ++ IOCTL_SCSI_GET_ADDRESS, ++ &ScsiAddress, sizeof(ScsiAddress), ++ &ScsiAddress, sizeof(ScsiAddress), ++ &dwBytesReturned, ++ NULL); ++ ++ if (!bResult) { ++ return NULL; ++ } ++ ++ retval = (scsi_id_t *)xmalloc(sizeof(scsi_id_t)); ++ retval->id = ScsiAddress.TargetId; ++ retval->lun = ScsiAddress.Lun; ++ ++#ifdef DEBUG ++ fprintf(stderr,"SCSI:ID=%d LUN=%d\n",retval->id,retval->lun); ++#endif ++ return retval; ++} ++ ++int SCSI_ExecuteCommand(DEVICE_TYPE DeviceFD, ++ Direction_T Direction, ++ CDB_T *CDB, ++ int CDB_Length, ++ void *DataBuffer, ++ int DataBufferLength, ++ RequestSense_T *RequestSense) ++{ ++ PSCSI_PASS_THROUGH_DIRECT ScsiPassThrough; ++ ++ const DWORD dwBufferSize = sizeof(SCSI_PASS_THROUGH_DIRECT) + sizeof(RequestSense_T); ++ BOOL bResult; ++ DWORD dwBytesReturned; ++ ++ if (DeviceFD >= nEntries || HandleTable[DeviceFD].hDevice == INVALID_HANDLE_VALUE) ++ { ++ errno = EBADF; ++ return -1; ++ } ++ ++ ScsiPassThrough = (PSCSI_PASS_THROUGH_DIRECT)malloc(dwBufferSize); ++ ++ memset(ScsiPassThrough, 0, dwBufferSize); ++ ++ ScsiPassThrough->Length = sizeof(SCSI_PASS_THROUGH_DIRECT); ++ ++ ScsiPassThrough->PathId = HandleTable[DeviceFD].PathId; ++ ScsiPassThrough->TargetId = HandleTable[DeviceFD].TargetId; ++ ScsiPassThrough->Lun = HandleTable[DeviceFD].Lun; ++ ScsiPassThrough->CdbLength = (UCHAR)CDB_Length; ++ ScsiPassThrough->SenseInfoLength = sizeof(RequestSense_T); ++ ScsiPassThrough->DataIn = Direction == Input; ++ ScsiPassThrough->DataTransferLength = DataBufferLength; ++ ScsiPassThrough->TimeOutValue = scsi_timeout; ++ ScsiPassThrough->DataBuffer = DataBuffer; ++ ScsiPassThrough->SenseInfoOffset = sizeof(SCSI_PASS_THROUGH_DIRECT); ++ ++ memcpy(ScsiPassThrough->Cdb, CDB, CDB_Length); ++ dwBytesReturned = 0; ++ ++ bResult = DeviceIoControl(HandleTable[DeviceFD].hDevice, ++ IOCTL_SCSI_PASS_THROUGH_DIRECT, ++ ScsiPassThrough, sizeof(SCSI_PASS_THROUGH_DIRECT), ++ ScsiPassThrough, dwBufferSize, ++ &dwBytesReturned, ++ NULL); ++ if (bResult) { ++ if (ScsiPassThrough->ScsiStatus != 0) { ++ memcpy(RequestSense, &ScsiPassThrough[1], sizeof(RequestSense_T)); ++#if DEBUG ++ fprintf(stderr, "Command failed - ScsiStatus = %d\n", ScsiPassThrough->ScsiStatus); ++ PrintRequestSense(RequestSense); ++#endif ++ bResult = false; ++ } ++ } ++ else ++ { ++#if DEBUG ++ DWORD dwError = GetLastError(); ++ LPSTR lpszMessage; ++ ++ FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, NULL, dwError, 0, (LPSTR)&lpszMessage, 0, NULL); ++ fputs(lpszMessage, stderr); ++#endif ++ ++ memset(RequestSense, 0, sizeof(RequestSense_T)); ++ } ++ ++ free(ScsiPassThrough); ++ ++ return bResult ? 0 : -1; ++} +Index: tapeinfo.c +=================================================================== +--- tapeinfo.c (revision 139) ++++ tapeinfo.c (revision 147) +@@ -211,25 +211,18 @@ + unsigned int partition1_size; + } TapeCapacity; + ++#if defined(DEBUG) + /* DEBUG */ + static void dump_data(unsigned char *data, int len) { +- int i; + if (!len) { + fprintf(stderr,"**NO DATA**\n"); + return; + } + +- for (i=0;i argument '%d' to 'unload' command\n", + arg2); + } +- if (ElementStatus->DataTransferElementFull[arg2] < 0 ) { ++ if (!ElementStatus->DataTransferElementFull[arg2]) { + FatalError("Data Transfer Element %d is Empty\n", arg2); + } + /* Now see if something already lives where we wanna go... */ +@@ -715,7 +715,7 @@ + } + ElementStatus = ReadElementStatus(MediumChangerFD,&RequestSense,inquiry_info,&SCSI_Flags); + if (!ElementStatus) { +- PrintRequestSense(&RequestSense); ++ PrintRequestSense(&RequestSense); + FatalError("READ ELEMENT STATUS Command Failed\n"); + } + } +@@ -813,9 +813,6 @@ + + argv0=argv[0]; + +- +- +- + parse_args(); /* also executes them as it sees them, sigh. */ + + #ifndef VMS +Index: scsitape.c +=================================================================== +--- scsitape.c (revision 139) ++++ scsitape.c (revision 147) +@@ -41,11 +41,26 @@ + #include "mtx.h" + #include "mtxl.h" + ++#if HAVE_UNISTD_H + #include ++#endif ++ ++#if HAVE_SYS_TYPES_H + #include ++#endif ++ ++#if HAVE_SYS_IOCTL_H + #include ++#endif ++ ++#if HAVE_SYS_MTIO_H + #include /* will try issuing some ioctls for Solaris, sigh. */ ++#endif + ++#ifdef _MSC_VER ++#include ++#endif ++ + void Usage(void) { + FatalError("Usage: scsitape -f where is:\n setblk | fsf | bsf | eod | rewind | eject | mark |\n seek | read [ [] \n"); + } +@@ -54,7 +69,7 @@ + static int arg[4]; /* the argument for the command, sigh. */ + + /* the device handle we're operating upon, sigh. */ +-static unsigned char *device; /* the text of the device thingy. */ ++static char *device; /* the text of the device thingy. */ + static DEVICE_TYPE MediumChangerFD = (DEVICE_TYPE) 0; + + +@@ -96,43 +111,7 @@ + + char *argv0; + +-/* A table for printing out the peripheral device type as ASCII. */ +-static char *PeripheralDeviceType[32] = { +- "Disk Drive", +- "Tape Drive", +- "Printer", +- "Processor", +- "Write-once", +- "CD-ROM", +- "Scanner", +- "Optical", +- "Medium Changer", +- "Communications", +- "ASC IT8", +- "ASC IT8", +- "RAID Array", +- "Enclosure Services", +- "OCR/W", +- "Bridging Expander", /* 0x10 */ +- "Reserved", /* 0x11 */ +- "Reserved", /* 0x12 */ +- "Reserved", /* 0x13 */ +- "Reserved", /* 0x14 */ +- "Reserved", /* 0x15 */ +- "Reserved", /* 0x16 */ +- "Reserved", /* 0x17 */ +- "Reserved", /* 0x18 */ +- "Reserved", /* 0x19 */ +- "Reserved", /* 0x1a */ +- "Reserved", /* 0x1b */ +- "Reserved", /* 0x1c */ +- "Reserved", /* 0x1d */ +- "Reserved", /* 0x1e */ +- "Unknown" /* 0x1f */ +-}; + +- +- + /* open_device() -- set the 'fh' variable.... */ + void open_device(void) { + +@@ -301,7 +280,7 @@ + CDB[5]=0; + + /* we really don't care if this command works or not, sigh. */ +- slow_bzero((unsigned char *)&RequestSense,sizeof(RequestSense_T)); ++ slow_bzero((char *)&RequestSense,sizeof(RequestSense_T)); + if (SCSI_ExecuteCommand(MediumChangerFD,Input,&CDB,6,buffer,0,&RequestSense)!=0){ + PrintRequestSense(&RequestSense); + return 1; +@@ -324,7 +303,7 @@ + CDB[5]=0; + + /* we really don't care if this command works or not, sigh. */ +- slow_bzero((unsigned char *)&sense,sizeof(RequestSense_T)); ++ slow_bzero((char *)&sense,sizeof(RequestSense_T)); + if (SCSI_ExecuteCommand(MediumChangerFD,Input,&CDB,6,buffer,0,&sense)!=0){ + PrintRequestSense(&sense); + return 1; +@@ -349,7 +328,7 @@ + CDB[5]=0; + + /* we really don't care if this command works or not, sigh. */ +- slow_bzero((unsigned char *)&sense,sizeof(RequestSense_T)); ++ slow_bzero((char *)&sense,sizeof(RequestSense_T)); + if (SCSI_ExecuteCommand(MediumChangerFD,Input,&CDB,6,buffer,0,&sense)!=0){ + PrintRequestSense(&sense); + return 1; +@@ -392,7 +371,7 @@ + CDB[5]=0; + + /* we really don't care if this command works or not, sigh. */ +- slow_bzero((unsigned char *)&sense,sizeof(RequestSense_T)); ++ slow_bzero((char *)&sense,sizeof(RequestSense_T)); + if (SCSI_ExecuteCommand(MediumChangerFD,Input,&CDB,6,buffer,0,&sense)!=0){ + PrintRequestSense(&sense); + return 1; +@@ -422,7 +401,7 @@ + CDB[9]=0; + + /* we really don't care if this command works or not, sigh. */ +- slow_bzero((unsigned char *)&sense,sizeof(RequestSense_T)); ++ slow_bzero((char *)&sense,sizeof(RequestSense_T)); + if (SCSI_ExecuteCommand(MediumChangerFD,Input,&CDB,10,buffer,0,&sense)!=0){ + PrintRequestSense(&sense); + return 1; +@@ -462,7 +441,7 @@ + static int S_setblk(void) { + RequestSense_T sense; + CDB_T CDB; +- unsigned char buffer[12]; ++ char buffer[12]; + unsigned int count = (unsigned int) arg1; + + +@@ -473,7 +452,7 @@ + CDB[4]=12; /* length of data */ + CDB[5]=0; + +- slow_bzero((unsigned char *)&sense,sizeof(RequestSense_T)); ++ slow_bzero((char *)&sense,sizeof(RequestSense_T)); + slow_bzero(buffer,12); + + /* Now to set the mode page header: */ +@@ -679,9 +658,9 @@ + + /* S_write is not implemented yet! */ + static int S_write(void) { +- unsigned char *buffer; /* the buffer we're gonna read/write out of. */ ++ char *buffer; /* the buffer we're gonna read/write out of. */ + int buffersize; +- int len; /* the length of the data in the buffer */ ++ unsigned int len; /* the length of the data in the buffer */ + int blocksize=arg[0]; + int numblocks=arg[1]; + int varsize=0; /* variable size block flag */ +@@ -755,9 +734,9 @@ + + + static int S_read(void) { +- unsigned char *buffer; /* the buffer we're going to be reading out of */ ++ char *buffer; /* the buffer we're going to be reading out of */ + int buffersize; +- int len; /* the length of the data in the buffer */ ++ unsigned int len; /* the length of the data in the buffer */ + int blocksize=arg[0]; + int numblocks=arg[1]; + int varsize=0; /* variable size block flag. */ +Index: mtx.h +=================================================================== +--- mtx.h (revision 139) ++++ mtx.h (working copy) +@@ -18,7 +18,11 @@ + #include "[.vms]defs.h" + #else /* all the Unix stuff: */ + ++#ifdef _MSC_VER ++#include "msvc/config.h" /* all the autoconf stuff. */ ++#else + #include "config.h" /* all the autoconf stuff. */ ++#endif + + /* all the general Unix includes: */ + +@@ -59,7 +63,7 @@ + # include + #endif + +-/* Now greately modified to use GNU Autoconf stuff: */ ++/* Now greatly modified to use GNU Autoconf stuff: */ + /* If we use the 'sg' interface, like Linux, do this: */ + #if HAVE_SCSI_SG_H + # include +@@ -69,6 +73,27 @@ + # define HAVE_GET_ID_LUN 1 /* signal that we have it... */ + #endif + ++/* Windows Native programs built using MinGW */ ++#if HAVE_DDK_NTDDSCSI_H ++# include ++# include ++# undef DEVICE_TYPE ++ ++typedef int DEVICE_TYPE; ++# define HAVE_GET_ID_LUN 1 /* signal that we have it... */ ++#endif ++ ++/* Windows Native programs built using Microsoft Visual C */ ++#ifdef _MSC_VER ++# include ++# include ++# include ++# undef DEVICE_TYPE ++ ++typedef int DEVICE_TYPE; ++# define HAVE_GET_ID_LUN 1 /* signal that we have it... */ ++#endif ++ + /* The 'cam' interface, like FreeBSD: */ + #if HAVE_CAMLIB_H + # include /* easy (?) access to the CAM user library. */ +@@ -176,10 +201,23 @@ + unsigned char invert2; /* used for EXCHANGE command, sigh. */ + } SCSI_Flags_T; + ++#ifdef _MSC_VER ++typedef unsigned char boolean; ++ ++#define false 0 ++#define true 1 ++ ++ ++typedef unsigned char Direction_T; ++ ++#define Input 0 ++#define Output 1 ++#else + typedef enum { false, true } boolean; + + + typedef enum { Input, Output } Direction_T; ++#endif + + + typedef unsigned char CDB_T[12]; +@@ -354,6 +392,15 @@ + } ElementModeSense_T; + + ++#ifdef _MSC_VER ++typedef char ElementTypeCode_T; ++ ++#define AllElementTypes 0 ++#define MediumTransportElement 1 ++#define StorageElement 2 ++#define ImportExportElement 3 ++#define DataTransferElement 4 ++#else + typedef enum ElementTypeCode + { + AllElementTypes = 0, +@@ -363,6 +410,7 @@ + DataTransferElement = 4 + } + ElementTypeCode_T; ++#endif + + + typedef struct ElementStatusDataHeader +Index: nsmhack.c +=================================================================== +--- nsmhack.c (revision 139) ++++ nsmhack.c (revision 147) +@@ -33,13 +33,13 @@ + + #include "mtxl.h" /* get the SCSI routines out of the main file */ + +-/***************************************************************** ++/****************************************************************/ + /* Variables: */ + /****************************************************************/ + + /* the device handle we're operating upon, sigh. */ +-static unsigned char *device; /* the text of the device thingy. */ +-static DEVICE_TYPE MediumChangerFD = (DEVICE_TYPE) 0; ++static char *device; /* the text of the device thingy. */ ++static DEVICE_TYPE MediumChangerFD = (DEVICE_TYPE) -1; + char *argv0; + int arg[4]; /* arguments for the command. */ + #define arg1 (arg[0]) /* for backward compatibility, sigh */ +@@ -74,7 +74,7 @@ + /* open_device() -- set the 'fh' variable.... */ + void open_device(void) { + +- if (MediumChangerFD) { ++ if (MediumChangerFD != -1) { + SCSI_CloseDevice("Unknown",MediumChangerFD); /* close it, sigh... new device now! */ + } + +@@ -101,7 +101,7 @@ + /* if the device is not already open, then open it from the + * environment. + */ +- if (!MediumChangerFD) { ++ if (MediumChangerFD == -1) { + /* try to get it from STAPE or TAPE environment variable... */ + device=getenv("STAPE"); + if (device==NULL) { +@@ -302,7 +302,7 @@ + } + + static int S_tongue_in(void) { +- ++ return 0; + } + + /* okay, stick our tongue out. We need a slot ID to grab a caddy from. */ +@@ -326,6 +326,7 @@ + } + + /* Okay, we have element status, so now let's assume that */ ++ return 0; + } + + /* See parse_args for the scoop. parse_args does all. */ +Index: mtxl.h +=================================================================== +--- mtxl.h (revision 139) ++++ mtxl.h (revision 147) +@@ -27,6 +27,9 @@ + + #include "mtx.h" + ++#undef min ++#undef max ++ + void FatalError(char *ErrorMessage, ...); + void *xmalloc(size_t Size); + void *xzmalloc(size_t Size); +Index: config.h +=================================================================== +--- ../release/mtx-1.3.9/config.h.in 2003-09-29 19:43:20.000000000 -0700 ++++ config.h 2006-07-30 00:42:37.000000000 -0700 +@@ -1,3 +1,4 @@ ++/* config.h. Generated by configure. */ + /* Copyright 2001 Enhanced Software Technologies Inc. + * Released under GNU General Public License V2 or Above + * See http://www.gnu.org for more information about the terms of +@@ -10,10 +11,10 @@ + #define CONFIG_H 1 + + /* autoconf changes these. */ +-#define HAVE_STRING_H 0 +-#define HAVE_UNISTD_H 0 +-#define HAVE_STDLIB_H 0 +-#define HAVE_STDARG_H 0 ++#define HAVE_STRING_H 1 ++#define HAVE_UNISTD_H 1 ++#define HAVE_STDLIB_H 1 ++#define HAVE_STDARG_H 1 + #define HAVE_SCSI_SCSI_H 0 + #define HAVE_SCSI_SCSI_IOCTL_H 0 + #define HAVE_SCSI_SG_H 0 +@@ -23,10 +24,12 @@ + #define HAVE_SYS_SCSI_CTL_H 0 + #define HAVE_DSLIB_H 0 + #define HAVE_DU_DEFS_H 0 +-#define HAVE_SYS_STAT_H 0 +-#define HAVE_SYS_TYPES_H 0 +-#define HAVE_FCNTL_H 0 ++#define HAVE_SYS_STAT_H 1 ++#define HAVE_SYS_TYPES_H 1 ++#define HAVE_FCNTL_H 1 + #define HAVE_SYS_IOCTL_H 0 ++#define HAVE_SYS_MTIO_H 0 ++#define HAVE_DDK_NTDDSCSI_H 1 + + #define WORDS_BIGENDIAN 0 + +Index: mtxl.c +=================================================================== +--- ../release/mtx-1.3.9/mtxl.c 2003-10-02 23:03:20.000000000 -0700 ++++ mtxl.c 2006-07-30 00:49:31.000000000 -0700 +@@ -53,6 +53,11 @@ + # include "scsi_linux.c" + #endif + ++/* the IOCTL_SCSI_PASSTHROUGH interface is used on Windows. */ ++#if HAVE_DDK_NTDDSCSI_H || defined(_MSC_VER) ++# include "scsi_win32.c" ++#endif ++ + /* The 'uscsi' interface is used on Solaris. */ + #if HAVE_SYS_SCSI_IMPL_USCSI_H + # include "scsi_sun.c" +@@ -78,6 +83,7 @@ + #include "[.vms]scsi.c" + #endif + ++void PrintHex(int Indent, unsigned char *Buffer, int Length); + extern char *argv0; /* something to let us do good error messages. */ + + /* create a global RequestSenseT value. */ +@@ -104,6 +110,9 @@ + if (SCSI_ExecuteCommand(fd, Input, &CDB, 6, + Inquiry, sizeof(Inquiry_T), RequestSense) != 0) + { ++#ifdef DEBUG ++ fprintf(stderr, "SCSI Inquiry Command failed\n"); ++#endif + free(Inquiry); + return NULL; /* sorry! */ + } +@@ -111,36 +120,27 @@ + } + + ++#if defined(DEBUG_NSM) || defined(DEBUG_EXCHANGE) + /* DEBUG */ + static void dump_cdb(unsigned char *CDB, int len) { +- int i; + fprintf(stderr,"CDB:"); +- for (i=0;iNumStorageHi,sense_page->NumStorageLo, +- sense_page->NumImportExportHi, sense_page->NumImportExportLo); ++ sense_page->NumStorageHi,sense_page->NumStorageLo, ++ sense_page->NumImportExportHi, sense_page->NumImportExportLo); + fprintf(stderr,"rawNumTransport=%d %d rawNumDataTransfer=%d %d\n", +- sense_page->NumMediumTransportHi,sense_page->NumMediumTransportLo, +- sense_page->NumDataTransferHi,sense_page->NumDataTransferLo); ++ sense_page->NumMediumTransportHi,sense_page->NumMediumTransportLo, ++ sense_page->NumDataTransferHi,sense_page->NumDataTransferLo); + fflush(stderr); + #endif + +@@ -620,19 +618,16 @@ + CDB[11] = 0; /* Control */ + + #ifdef DEBUG_BARCODE +- { +- int i; +- fprintf(stderr,"CDB= "); +- for (i=0;i<12;i++) { +- fprintf(stderr,"%x ",CDB[i]); +- } +- fprintf(stderr,"\n"); +- fflush(stderr); +- } ++ fprintf(stderr,"CDB:\n"); ++ PrintHex(2, CDB, 12); + #endif + + if (SCSI_ExecuteCommand(MediumChangerFD, Input, &CDB, 12, + DataBuffer,NumBytes, RequestSense) != 0){ ++ ++#ifdef DEBUG ++ fprintf(stderr, "Read Element Status (0x%02X) failed\n", CDB[0]); ++#endif + /* okay, first see if we have sense key of 'illegal request', + additional sense code of '24', additional sense qualfier of + '0', and field in error of '4'. This means that we issued a request +@@ -654,15 +649,8 @@ + CDB[1] &= ~0x10; /* clear bar code flag! */ + + #ifdef DEBUG_BARCODE +- { +- int i; +- fprintf(stderr,"CDB= "); +- for (i=0;i<12;i++) { +- fprintf(stderr,"%x ",CDB[i]); +- } +- fprintf(stderr,"\n"); +- fflush(stderr); +- } ++ fprintf(stderr,"CDB:\n"); ++ PrintHex(2, CDB, 12); + #endif + + if (SCSI_ExecuteCommand(MediumChangerFD, Input, &CDB, 12, +@@ -679,14 +667,8 @@ + #ifdef DEBUG_BARCODE + /* print a bunch of extra debug data :-(. */ + PrintRequestSense(RequestSense); /* see what it sez :-(. */ +- { +- int i; +- fprintf(stderr,"Data:"); +- for (i=0;i<40;i++) { +- fprintf(stderr,"%02x ",DataBuffer[i]); +- } +- fprintf(stderr,"\n"); +- } ++ fprintf(stderr,"Data:\n"); ++ PrintHex(2, DataBuffer, 40); + #endif + return DataBuffer; /* we succeeded! */ + } +@@ -703,7 +685,7 @@ + ) { + + unsigned char *DataBuffer; /* size of data... */ +- unsigned int real_numbytes; ++ int real_numbytes; + + + DataBuffer=SendElementStatusRequestActual(MediumChangerFD, +@@ -950,34 +932,42 @@ + BigEndian16(TransportElementDescriptor + ->SourceStorageElementAddress); + +- if (ElementStatus->DataTransferElementCount >= mode_sense->NumDataTransfer) { +- FatalError("Too many Data Transfer Elements Reported\n"); +- } +- if (ElementStatusPage->VolBits & E2_PVOLTAG) { +- copy_barcode(TransportElementDescriptor->PrimaryVolumeTag, +- ElementStatus->DataTransferPrimaryVolumeTag[ElementStatus->DataTransferElementCount]); +- } else { +- ElementStatus->DataTransferPrimaryVolumeTag[ElementStatus->DataTransferElementCount][0]=0; /* null string */ +- } +- if (ElementStatusPage->VolBits & E2_AVOLTAG) { +- copy_barcode(TransportElementDescriptor->AlternateVolumeTag, +- ElementStatus->DataTransferAlternateVolumeTag[ElementStatus->DataTransferElementCount]); +- } else { +- ElementStatus->DataTransferAlternateVolumeTag[ElementStatus->DataTransferElementCount][0]=0; /* null string */ +- } +- ElementStatus->DataTransferElementCount++; +- /* 0 actually is a usable element address */ +- /* if (DataTransferElementAddress == 0) */ +- /* FatalError( */ +- /* "illegal Data Transfer Element Address %d reported\n", */ +- /* DataTransferElementAddress); */ +- break; +- default: +- FatalError("illegal Element Type Code %d reported\n", +- ElementStatusPage->ElementTypeCode); +- } +- } ++#if DEBUG ++ fprintf(stderr, "%d: ElementAddress = %d, Full = %d, SourceElement = %d\n", ++ ElementStatus->DataTransferElementCount, ++ ElementStatus->DataTransferElementAddress[ElementStatus->DataTransferElementCount], ++ ElementStatus->DataTransferElementFull[ElementStatus->DataTransferElementCount], ++ ElementStatus->DataTransferElementSourceStorageElementNumber[ElementStatus->DataTransferElementCount]); ++#endif ++ if (ElementStatus->DataTransferElementCount >= mode_sense->NumDataTransfer) { ++ FatalError("Too many Data Transfer Elements Reported\n"); ++ } ++ if (ElementStatusPage->VolBits & E2_PVOLTAG) { ++ copy_barcode(TransportElementDescriptor->PrimaryVolumeTag, ++ ElementStatus->DataTransferPrimaryVolumeTag[ElementStatus->DataTransferElementCount]); ++ } else { ++ ElementStatus->DataTransferPrimaryVolumeTag[ElementStatus->DataTransferElementCount][0]=0; /* null string */ ++ } ++ if (ElementStatusPage->VolBits & E2_AVOLTAG) { ++ copy_barcode(TransportElementDescriptor->AlternateVolumeTag, ++ ElementStatus->DataTransferAlternateVolumeTag[ElementStatus->DataTransferElementCount]); ++ } else { ++ ElementStatus->DataTransferAlternateVolumeTag[ElementStatus->DataTransferElementCount][0]=0; /* null string */ ++ } ++ ElementStatus->DataTransferElementCount++; ++ ++ /* 0 actually is a usable element address */ ++ /* if (DataTransferElementAddress == 0) */ ++ /* FatalError( */ ++ /* "illegal Data Transfer Element Address %d reported\n", */ ++ /* DataTransferElementAddress); */ ++ break; ++ default: ++ FatalError("illegal Element Type Code %d reported\n", ++ ElementStatusPage->ElementTypeCode); ++ } + } ++ } + } + + /********************* Real ReadElementStatus ********************* */ +@@ -1008,7 +998,6 @@ + int *EmptyStorageElementAddress; /* [MAX_STORAGE_ELEMENTS]; */ + + int empty_idx=0; +- int invalid_sources=0; + boolean is_attached = false; + int i,j; + +@@ -1049,7 +1038,7 @@ + + EmptyStorageElementAddress=(int *)xzmalloc((mode_sense->NumStorage+1)*sizeof(int)); + for (i=0;iNumStorage;i++) { +- EmptyStorageElementAddress[i]=-1; ++ EmptyStorageElementAddress[i] = -1; + } + + /* Okay, now to send some requests for the various types of stuff: */ +@@ -1076,6 +1065,9 @@ + #endif + /* darn. Free up stuff and return. */ + /****FIXME**** do a free on element data! */ ++#ifdef DEBUG_MODE_SENSE ++ PrintRequestSense(RequestSense); ++#endif + FreeElementData(ElementStatus); + return NULL; + } +@@ -1107,6 +1099,9 @@ + #endif + /* darn. Free up stuff and return. */ + /****FIXME**** do a free on element data! */ ++#ifdef DEBUG_MODE_SENSE ++ PrintRequestSense(RequestSense); ++#endif + FreeElementData(ElementStatus); + return NULL; + } +@@ -1138,6 +1133,9 @@ + #endif + /* darn. Free up stuff and return. */ + /****FIXME**** do a free on element data! */ ++#ifdef DEBUG_MODE_SENSE ++ PrintRequestSense(RequestSense); ++#endif + FreeElementData(ElementStatus); + return NULL; + } +@@ -1172,6 +1170,9 @@ + #endif + /* darn. Free up stuff and return. */ + /****FIXME**** do a free on element data! */ ++#ifdef DEBUG_MODE_SENSE ++ PrintRequestSense(RequestSense); ++#endif + FreeElementData(ElementStatus); + return NULL; + } +@@ -1223,34 +1224,24 @@ + * is obviously defective: + */ + /* pass one: */ +- invalid_sources=0; /* no invalid sources yet! */ + for (i=0;iDataTransferElementCount;i++) { + int elnum; +- int translated_elnum = -1; + /* if we have an element, then ... */ + if (ElementStatus->DataTransferElementFull[i]) { + elnum=ElementStatus->DataTransferElementSourceStorageElementNumber[i]; + /* if we have an element number, then ... */ + if (elnum >= 0) { +- /* Now to translate the elnum: */ +- for (j=0; jStorageElementCount; j++) { +- if (elnum == ElementStatus->StorageElementAddress[j]) { +- translated_elnum=j; +- } +- } +- /* now see if the element # is already occupied: */ +- if (ElementStatus->StorageElementFull[translated_elnum]) { +- invalid_sources=1; +- break; /* break out of the loop! */ +- } else { +- /* properly set the source... */ +- ElementStatus->DataTransferElementSourceStorageElementNumber[i]= +- translated_elnum; +- } +- +- } else { +- /* if element # was not >=0, then we had an invalid source anyhow! */ +- invalid_sources=1; ++ /* Now to translate the elnum: */ ++ ElementStatus->DataTransferElementSourceStorageElementNumber[i] = -1; ++ for (j=0; jStorageElementCount; j++) { ++ if (elnum == ElementStatus->StorageElementAddress[j]) { ++ /* now see if the element # is already occupied:*/ ++ if (!ElementStatus->StorageElementFull[j]) { ++ /* properly set the source... */ ++ ElementStatus->DataTransferElementSourceStorageElementNumber[i]= j; ++ } ++ } ++ } + } + } + } +@@ -1267,21 +1258,19 @@ + * by the user interface. This is an invalid value, but more useful for us + * to have than just crapping out here :-(. + */ +- if (invalid_sources) { +- empty_idx=0; +- for (i=0;iDataTransferElementCount;i++) { +- if (ElementStatus->DataTransferElementFull[i]) { ++ empty_idx=0; ++ for (i = 0; i < ElementStatus->DataTransferElementCount; i++) { ++ if (ElementStatus->DataTransferElementFull[i] && ++ ElementStatus->DataTransferElementSourceStorageElementNumber[i] < 0) { + #ifdef DEBUG_TAPELIST +- fprintf(stderr,"for drive %d, changing source %d to %d (empty slot #%d)\n", +- i, +- ElementStatus->DataTransferElementSourceStorageElementNumber[i], +- EmptyStorageElementAddress[empty_idx], +- empty_idx); ++ fprintf(stderr,"for drive %d, changing to %d (empty slot #%d)\n", ++ i, ++ EmptyStorageElementAddress[empty_idx], ++ empty_idx); + #endif + + ElementStatus->DataTransferElementSourceStorageElementNumber[i]= + EmptyStorageElementAddress[empty_idx++]; +- } + } + } + +@@ -1337,9 +1326,9 @@ + CDB[2] = (ElementStatus->TransportElementAddress >> 8) & 0xFF; /* Transport Element Address MSB */ + CDB[3] = (ElementStatus->TransportElementAddress) & 0xff; /* Transport Element Address LSB */ + CDB[4] = (SourceAddress >> 8) & 0xFF; /* Source Address MSB */ +- CDB[5] = SourceAddress & 0xFF; /* Source Address MSB */ ++ CDB[5] = SourceAddress & 0xFF; /* Source Address LSB */ + CDB[6] = (DestinationAddress >> 8) & 0xFF; /* Destination Address MSB */ +- CDB[7] = DestinationAddress & 0xFF; /* Destination Address MSB */ ++ CDB[7] = DestinationAddress & 0xFF; /* Destination Address LSB */ + CDB[8] = 0; /* Reserved */ + CDB[9] = 0; /* Reserved */ + if (flags->invert) { +@@ -1351,7 +1340,11 @@ + CDB[11] = 0 | (flags->eepos <<6); /* Control */ + + if (SCSI_ExecuteCommand(MediumChangerFD, Output, &CDB, 12, +- NULL, 0, RequestSense) != 0) { ++ NULL, 0, RequestSense) != 0) { ++ ++#ifdef DEBUG ++ fprintf(stderr, "Move Medium (0x%02X) failed\n", CDB[0]); ++#endif + return RequestSense; + } + free(RequestSense); +@@ -1372,9 +1365,9 @@ + CDB[2] = (ElementStatus->TransportElementAddress >> 8) & 0xFF; /* Transport Element Address MSB */ + CDB[3] = (ElementStatus->TransportElementAddress) & 0xff; /* Transport Element Address LSB */ + CDB[4] = (SourceAddress >> 8) & 0xFF; /* Source Address MSB */ +- CDB[5] = SourceAddress & 0xFF; /* Source Address MSB */ ++ CDB[5] = SourceAddress & 0xFF; /* Source Address LSB */ + CDB[6] = (DestinationAddress >> 8) & 0xFF; /* Destination Address MSB */ +- CDB[7] = DestinationAddress & 0xFF; /* Destination Address MSB */ ++ CDB[7] = DestinationAddress & 0xFF; /* Destination Address LSB */ + CDB[8] = (Dest2Address>>8) & 0xFF; /* move destination back to source? */ + CDB[9] = Dest2Address & 0xFF; /* move destination back to source? */ + CDB[10]=0; +@@ -1418,12 +1411,53 @@ + + if (SCSI_ExecuteCommand(MediumChangerFD, Output, &CDB, 6, + NULL, 0, RequestSense) != 0) { ++#ifdef DEBUG ++ fprintf(stderr, "Erase (0x19) failed\n"); ++#endif + return RequestSense; + } + free(RequestSense); + return NULL; /* Success! */ + } + ++static char Spaces[] = " "; ++ ++void PrintHex(int Indent, unsigned char *Buffer, int Length) ++{ ++ int idxBuffer; ++ int idxAscii; ++ int PadLength; ++ char cAscii; ++ ++ for (idxBuffer = 0; idxBuffer < Length; idxBuffer++) { ++ if ((idxBuffer % 16) == 0) { ++ if (idxBuffer > 0) { ++ fputc('\'', stderr); ++ ++ for (idxAscii = idxBuffer - 16; idxAscii < idxBuffer; idxAscii++) { ++ cAscii = Buffer[idxAscii] >= 0x20 && Buffer[idxAscii] < 0x7F ? Buffer[idxAscii] : '.'; ++ fputc(cAscii, stderr); ++ } ++ fputs("'\n", stderr); ++ } ++ fprintf(stderr, "%.*s%04X: ", Indent, Spaces, idxBuffer); ++ } ++ fprintf(stderr, "%02X ", (unsigned char)Buffer[idxBuffer]); ++ } ++ ++ PadLength = 16 - (Length % 16); ++ ++ if (PadLength > 0) { ++ fprintf(stderr, "%.*s'", 3 * PadLength, Spaces); ++ ++ for (idxAscii = idxBuffer - (16 - PadLength); idxAscii < idxBuffer; idxAscii++) { ++ cAscii = Buffer[idxAscii] >= 0x20 && Buffer[idxAscii] < 0x80 ? Buffer[idxAscii] : '.'; ++ fputc(cAscii, stderr); ++ } ++ fputs("'\n", stderr); ++ } ++ fflush(stderr); ++} + + #ifdef LONG_PRINT_REQUEST_SENSE + +@@ -1488,12 +1522,9 @@ + #else + void PrintRequestSense(RequestSense_T *RequestSense) + { +- int i; +- fprintf(stderr, "mtx: Request Sense: %02X", +- ((unsigned char *) RequestSense)[0]); +- for (i = 1; i < sizeof(RequestSense_T); i++) +- fprintf(stderr, " %02X", ((unsigned char *) RequestSense)[i]); +- fprintf(stderr, "\n"); ++ fprintf(stderr, "mtx: Request Sense: %02X\n", ++ ((unsigned char *) RequestSense)[0]); ++ PrintHex(2, (char *)RequestSense, sizeof(RequestSense_T)); + } + + #endif +Index: Makefile +=================================================================== +--- ../release/mtx-1.3.9/Makefile.in 2006-02-20 13:42:10.000000000 -0800 ++++ Makefile 2006-07-30 01:22:00.000000000 -0700 +@@ -11,26 +11,28 @@ + # Version # for 'make dist'... + VERSION=1.3.9 + +-BINS = mtx tapeinfo loaderinfo scsitape nsmhack ++BINS = mtx.exe tapeinfo.exe loaderinfo.exe scsitape.exe nsmhack.exe ++DBGS := $(BINS:%.exe=%.dbg) + +-TARGET = @TARGET@ +-CPU = @CPU@ +-CC = @CC@ +-INSTALL = @INSTALL@ +- +-CFLAGS = @CFLAGS@ +-CPPFLAGS = @CPPFLAGS@ -DVERSION="\"$(VERSION)\"" +-LDFLAGS = @LDFLAGS@ +-LIBS = @LIBS@ ++TARGET = mingw ++CPU = 386 ++CC = mingw32-gcc ++INSTALL = install -c ++ ++CFLAGS = -g -O2 ++CPPFLAGS = -DVERSION="\"$(VERSION)\"" ++LDFLAGS = ++LIBS = ++USE_OBJCOPY = yes + + INSTALL_DOC = $(INSTALL) -m 644 + INSTALL_BIN = $(INSTALL) -m 755 + INSTALL_DIR = $(INSTALL) -m 755 -d + +-prefix = @prefix@ +-exec_prefix = @exec_prefix@ +-sbindir = @sbindir@ +-mandir = @mandir@ ++prefix = dummy ++exec_prefix = ${prefix} ++sbindir = ${exec_prefix}/bin ++mandir = ${prefix}/man + + # + # Linux on x86... +@@ -40,6 +42,11 @@ + CPPFLAGS += -I/usr/src/linux/include -DLONG_PRINT_REQUEST_SENSE=1 + endif + ++ifeq ($(TARGET),mingw) ++CFLAGS += -Wall ++CPPFLAGS += -DLONG_PRINT_REQUEST_SENSE=1 ++endif ++ + # + # FreeBSD on x86... + # +@@ -82,12 +89,22 @@ + See vms/000readme for information. + endif + ++%.dbg : %.exe ++ifeq ($(USE_OBJCOPY),yes) ++ mingw32-objcopy --only-keep-debug $< $@ ++ mingw32-objcopy --strip-debug $< ++ mingw32-objcopy --add-gnu-debuglink=$@ $< ++else ++ strip $< -o $@ ++endif ++ + all: $(BINS) + +-install: $(BINS) ++dbgs: $(DBGS) ++ ++install: $(BINS) $(DBGS) + $(INSTALL_DIR) $(sbindir) + for file in $(BINS); do \ +- strip "$$file" ; \ + $(INSTALL_BIN) "$$file" $(sbindir) ; \ + done + $(INSTALL_DIR) $(mandir) $(mandir)/man1 +@@ -98,7 +115,9 @@ + clean: + rm -f *.o *~ + rm -f $(BINS) +- rm -f mam2debug mam2debug2 ++ rm -f $(DBGS) ++ rm -f mam2debug.exe mam2debug2.exe ++ rm -rf autom4te.cache + + distclean: clean + rm -f Makefile config.log config.cache config.status +@@ -106,27 +125,26 @@ + dist: distclean + ./makedist $(VERSION) + +-loaderinfo: loaderinfo.o mtxl.o mtxl.h mtx.h $(EXTRA) +- $(CC) $(LDFLAGS) -o loaderinfo loaderinfo.o mtxl.o $(EXTRA) $(LIBS) +- ++loaderinfo.exe: loaderinfo.o mtxl.o mtxl.h mtx.h $(EXTRA) ++ $(CC) $(LDFLAGS) -o $@ loaderinfo.o mtxl.o $(EXTRA) $(LIBS) + +-nsmhack: nsmhack.o mtxl.o $(EXTRA) +- $(CC) $(LDFLAGS) -o nsmhack nsmhack.o mtxl.o $(EXTRA) $(LIBS) ++nsmhack.exe: nsmhack.o mtxl.o $(EXTRA) ++ $(CC) $(LDFLAGS) -o $@ nsmhack.o mtxl.o $(EXTRA) $(LIBS) + +-mtx: mtx.o mtxl.o mtxl.h mtx.h $(EXTRA) +- $(CC) $(LDFLAGS) -o mtx mtx.o mtxl.o $(EXTRA) $(LIBS) ++mtx.exe: mtx.o mtxl.o mtxl.h mtx.h $(EXTRA) ++ $(CC) $(LDFLAGS) -o $@ mtx.o mtxl.o $(EXTRA) $(LIBS) + +-mam2debug: mtxl.o mam2debug.o mtx.h $(EXTRA) +- $(CC) $(LDFLAGS) -o mam2debug mtxl.o mam2debug.o $(EXTRA) $(LIBS) ++mam2debug.exe: mtxl.o mam2debug.o mtx.h $(EXTRA) ++ $(CC) $(LDFLAGS) -o $@ mtxl.o mam2debug.o $(EXTRA) $(LIBS) + +-tapeinfo: tapeinfo.o mtxl.o mtx.h mtxl.h $(EXTRA) +- $(CC) $(LDFLAGS) -o tapeinfo tapeinfo.o mtxl.o $(EXTRA) $(LIBS) ++tapeinfo.exe: tapeinfo.o mtxl.o mtx.h mtxl.h $(EXTRA) ++ $(CC) $(LDFLAGS) -o $@ tapeinfo.o mtxl.o $(EXTRA) $(LIBS) + +-mam2debug2: mtxl.o mam2debug2.o mtx.h $(EXTRA) +- $(CC) $(LDFLAGS) -o mam2debug2 mtxl.o mam2debug2.o $(EXTRA) $(LIBS) ++mam2debug2.exe: mtxl.o mam2debug2.o mtx.h $(EXTRA) ++ $(CC) $(LDFLAGS) -o $@ mtxl.o mam2debug2.o $(EXTRA) $(LIBS) + +-scsitape: scsitape.o mtxl.o mtxl.h mtx.h $(EXTRA) +- $(CC) $(LDFLAGS) -o scsitape scsitape.o mtxl.o $(EXTRA) $(LIBS) ++scsitape.exe: scsitape.o mtxl.o mtxl.h mtx.h $(EXTRA) ++ $(CC) $(LDFLAGS) -o $@ scsitape.o mtxl.o $(EXTRA) $(LIBS) + + scsitape.o: scsitape.c mtx.h mtxl.h + +@@ -140,6 +158,6 @@ + + mtx.o: mtx.c mtx.h mtxl.h + +-mtxl.o: mtxl.c mtx.h mtxl.h scsi_linux.c ++mtxl.o: mtxl.c mtx.h mtxl.h scsi_linux.c scsi_win32.c + + nsmhack.o: nsmhack.c mtxl.h mtx.h diff --git a/src/win32/patches/nsis.patch b/src/win32/patches/nsis.patch new file mode 100644 index 00000000..0a37cd7c --- /dev/null +++ b/src/win32/patches/nsis.patch @@ -0,0 +1,428 @@ +Index: SCons/Config/gnu +--- ../release/nsis-2.17-src/SCons/Config/gnu 2006-04-28 08:54:41.000000000 -0700 ++++ SCons/Config/gnu 2006-08-07 18:49:47.000000000 -0700 +@@ -65,7 +65,7 @@ + cross_env(stub_env) + + if not defenv['DEBUG']: +- stub_env.Append(CCFLAGS = '-Os') # optimize for size ++ stub_env.Append(CCFLAGS = '-Os -fno-strict-aliasing') # optimize for size + stub_env.Append(CCFLAGS = '-Wall') # all warnings + stub_env.Append(CCFLAGS = '-x c') # force compile as c + +@@ -82,8 +82,8 @@ + makensis_env = defenv.Copy() + + if not defenv['DEBUG']: +- makensis_env.Append(CCFLAGS = '-O2') # optimize ++ makensis_env.Append(CCFLAGS = '-O2 -fno-strict-aliasing') # optimize + makensis_env.Append(CCFLAGS = '-Wall') # all warnings + + conf = FlagsConfigure(makensis_env) + conf.CheckLinkFlag('$MAP_FLAG') # generate map file +@@ -97,7 +97,7 @@ + cross_env(plugin_env) + + if not defenv['DEBUG']: +- plugin_env.Append(CCFLAGS = '-Os') # optimize for size ++ plugin_env.Append(CCFLAGS = '-Os -fno-strict-aliasing') # optimize for size + plugin_env.Append(CCFLAGS = '-Wall') # level 3 warnings + + if not defenv['DEBUG']: +@@ -111,7 +111,7 @@ + cp_util_env = defenv.Copy() + + if not defenv['DEBUG']: +- cp_util_env.Append(CCFLAGS = '-O2') # optimize ++ cp_util_env.Append(CCFLAGS = '-O2 -fno-strict-aliasing') # optimize + cp_util_env.Append(CCFLAGS = '-Wall') # all warnings + + conf = FlagsConfigure(cp_util_env) +Index: Source/build.cpp +--- ../release/nsis-2.17-src/Source/build.cpp 2006-04-14 03:05:01.000000000 -0700 ++++ ./Source/build.cpp 2006-07-31 13:26:38.000000000 -0700 +@@ -2384,7 +2384,7 @@ + return PS_ERROR; + } + #ifdef NSIS_CONFIG_CRC_SUPPORT +- crc_writer_sink crc_sink((unsigned long *) &crc); ++ crc_writer_sink crc_sink((unsigned int *) &crc); + firstheader_writer w(&crc_sink); + w.write(&fh); + +Index: Source/build.h +--- ../release/nsis-2.17-src/Source/build.h 2005-04-02 04:04:06.000000000 -0800 ++++ ./Source/build.h 2006-07-31 13:28:44.000000000 -0700 +@@ -38,7 +38,7 @@ + #ifdef NSIS_CONFIG_CRC_SUPPORT + extern "C" + { +- unsigned long NSISCALL CRC32(unsigned long crc, const unsigned char *buf, unsigned int len); ++ unsigned int NSISCALL CRC32(unsigned int crc, const unsigned char *buf, unsigned int len); + }; + #endif + +Index: Source/crc32.c +--- ../release/nsis-2.17-src/Source/crc32.c 2004-03-12 12:43:54.000000000 -0800 ++++ ./Source/crc32.c 2006-07-31 13:27:12.000000000 -0700 +@@ -3,18 +3,18 @@ + #ifdef NSIS_CONFIG_CRC_SUPPORT + + // this is based on the (slow,small) CRC32 implementation from zlib. +-unsigned long NSISCALL CRC32(unsigned long crc, const unsigned char *buf, unsigned int len) ++unsigned int NSISCALL CRC32(unsigned int crc, const unsigned char *buf, unsigned int len) + { +- static unsigned long crc_table[256]; ++ static unsigned int crc_table[256]; + + if (!crc_table[1]) + { +- unsigned long c; ++ unsigned int c; + int n, k; + + for (n = 0; n < 256; n++) + { +- c = (unsigned long)n; ++ c = (unsigned int)n; + for (k = 0; k < 8; k++) c = (c >> 1) ^ (c & 1 ? 0xedb88320L : 0); + crc_table[n] = c; + } +Index: Source/DialogTemplate.cpp +--- ../release/nsis-2.17-src/Source/DialogTemplate.cpp 2006-03-24 10:36:24.000000000 -0800 ++++ ./Source/DialogTemplate.cpp 2006-07-31 05:48:44.000000000 -0700 +@@ -93,7 +93,7 @@ + if (IS_INTRESOURCE(x)) { \ + *(WORD*)seeker = 0xFFFF; \ + seeker += sizeof(WORD); \ +- *(WORD*)seeker = ConvertEndianness(WORD(DWORD(x))); \ ++ *(WORD*)seeker = ConvertEndianness(WORD(ULONG_PTR(x))); \ + seeker += sizeof(WORD); \ + } \ + else { \ +@@ -629,7 +629,7 @@ + } + } + +- assert((DWORD) seeker - (DWORD) pbDlg == dwSize); ++ assert((ULONG_PTR) seeker - (ULONG_PTR) pbDlg == dwSize); + + // DONE! + return pbDlg; +Index: Source/exehead/fileform.c +--- ../release/nsis-2.17-src/Source/exehead/fileform.c 2005-09-09 09:08:44.000000000 -0700 ++++ ./Source/exehead/fileform.c 2006-07-31 13:26:08.000000000 -0700 +@@ -95,7 +95,7 @@ + static z_stream g_inflate_stream; + #endif + +-extern unsigned long NSISCALL CRC32(unsigned long crc, const unsigned char *buf, unsigned int len); ++extern unsigned int NSISCALL CRC32(unsigned int crc, const unsigned char *buf, unsigned int len); + + const char * NSISCALL loadHeaders(int cl_flags) + { +Index: Source/Platform.h +--- ../release/nsis-2.17-src/Source/Platform.h 2006-05-03 08:43:54.000000000 -0700 ++++ ./Source/Platform.h 2006-07-31 05:48:44.000000000 -0700 +@@ -16,15 +16,15 @@ + // basic types + typedef unsigned char BYTE, *PBYTE, *LPBYTE; + typedef unsigned short WORD, *LPWORD; +-typedef unsigned long DWORD, *LPDWORD; ++typedef unsigned int DWORD, *LPDWORD; + typedef short SHORT; + typedef unsigned short USHORT; + typedef unsigned int UINT; + typedef unsigned int UINT32; + typedef int INT; + typedef int INT32; +-typedef long LONG; +-typedef unsigned long ULONG; ++typedef int LONG; ++typedef unsigned int ULONG; + typedef long long INT64, LARGE_INTEGER; + typedef unsigned long long UINT64, ULARGE_INTEGER; + typedef int BOOL, *LPBOOL; +@@ -35,13 +35,14 @@ + typedef const char *LPCCH, *PCSTR, *LPCSTR; + typedef unsigned short WCHAR, *PWCHAR, *LPWCH, *PWCH, *NWPSTR, *LPWSTR, *PWSTR; + typedef const unsigned short *LPCWCH, *PCWCH, *LPCWSTR, *PCWSTR; +-typedef unsigned int UINT_PTR; ++typedef unsigned long UINT_PTR; ++typedef unsigned long ULONG_PTR; + // basic stuff + typedef void * HANDLE; +-typedef unsigned long HKEY; ++typedef unsigned int HKEY; + // some gdi +-typedef unsigned long COLORREF; +-typedef unsigned long HBRUSH; ++typedef unsigned int COLORREF; ++typedef unsigned int HBRUSH; + // bool + # define FALSE 0 + # define TRUE 1 +@@ -129,13 +130,13 @@ + + #ifndef _WIN32 + # ifndef FIELD_OFFSET +-# define FIELD_OFFSET(t,f) ((LONG)&(((t*)0)->f)) ++# define FIELD_OFFSET(t,f) ((ULONG_PTR)&(((t*)0)->f)) + # endif + # ifndef MAKEINTRESOURCE + # define MAKEINTRESOURCE(i) (LPSTR)((DWORD)((WORD)(i))) + # endif + # ifndef IMAGE_FIRST_SECTION +-# define IMAGE_FIRST_SECTION(h) ( PIMAGE_SECTION_HEADER( (DWORD) h + \ ++# define IMAGE_FIRST_SECTION(h) ( PIMAGE_SECTION_HEADER( (ULONG_PTR) h + \ + FIELD_OFFSET(IMAGE_NT_HEADERS, OptionalHeader) + \ + FIX_ENDIAN_INT16(PIMAGE_NT_HEADERS(h)->FileHeader.SizeOfOptionalHeader) ) ) + # endif +@@ -166,9 +167,9 @@ + # define FOF_NOERRORUI 0x0400 + #endif + +-#ifndef ULONG_PTR +-# define ULONG_PTR DWORD +-#endif ++//#ifndef ULONG_PTR ++//# define ULONG_PTR ULONG ++//#endif + + #ifndef IDC_HAND + # define IDC_HAND MAKEINTRESOURCE(32649) +Index: Source/Plugins.cpp +--- ../release/nsis-2.17-src/Source/Plugins.cpp 2006-04-05 11:42:12.000000000 -0700 ++++ ./Source/Plugins.cpp 2006-07-31 06:50:08.000000000 -0700 +@@ -120,8 +120,8 @@ + DWORD prd = FIX_ENDIAN_INT32(sections[i].PointerToRawData); + PIMAGE_EXPORT_DIRECTORY exports = PIMAGE_EXPORT_DIRECTORY(&dlldata[0] + prd + ExportDirVA - va); + DWORD na = FIX_ENDIAN_INT32(exports->AddressOfNames); +- unsigned long *names = (unsigned long*)((unsigned long) exports + (char *) na - ExportDirVA); +- for (unsigned long j = 0; j < FIX_ENDIAN_INT32(exports->NumberOfNames); j++) ++ unsigned int *names = (unsigned int*)((unsigned long) exports + (char *) na - ExportDirVA); ++ for (unsigned int j = 0; j < FIX_ENDIAN_INT32(exports->NumberOfNames); j++) + { + const string name = string((char*)exports + FIX_ENDIAN_INT32(names[j]) - ExportDirVA); + const string signature = dllName + "::" + name; +Index: Source/ResourceEditor.cpp +--- ../release/nsis-2.17-src/Source/ResourceEditor.cpp 2006-04-05 11:40:09.000000000 -0700 ++++ ./Source/ResourceEditor.cpp 2006-07-31 05:48:44.000000000 -0700 +@@ -545,7 +545,7 @@ + rdDir.NumberOfIdEntries = ConvertEndianness(rdDir.NumberOfIdEntries); + + CopyMemory(seeker, &rdDir, sizeof(IMAGE_RESOURCE_DIRECTORY)); +- crd->m_dwWrittenAt = DWORD(seeker); ++ crd->m_dwWrittenAt = ULONG_PTR(seeker); + seeker += sizeof(IMAGE_RESOURCE_DIRECTORY); + + for (int i = 0; i < crd->CountEntries(); i++) { +@@ -566,7 +566,7 @@ + rDirE.NameString.NameIsString = (crd->GetEntry(i)->HasName()) ? 1 : 0; + + CopyMemory(seeker, &rDirE, sizeof(MY_IMAGE_RESOURCE_DIRECTORY_ENTRY)); +- crd->GetEntry(i)->m_dwWrittenAt = DWORD(seeker); ++ crd->GetEntry(i)->m_dwWrittenAt = ULONG_PTR(seeker); + seeker += sizeof(MY_IMAGE_RESOURCE_DIRECTORY_ENTRY); + } + qDirs.pop(); +@@ -582,7 +582,7 @@ + rDataE.Size = ConvertEndianness(cRDataE->GetSize()); + + CopyMemory(seeker, &rDataE, sizeof(IMAGE_RESOURCE_DATA_ENTRY)); +- cRDataE->m_dwWrittenAt = DWORD(seeker); ++ cRDataE->m_dwWrittenAt = ULONG_PTR(seeker); + seeker += sizeof(IMAGE_RESOURCE_DATA_ENTRY); + + qDataEntries.pop(); +@@ -594,7 +594,7 @@ + while (!qStrings.empty()) { + CResourceDirectoryEntry* cRDirE = qStrings.front(); + +- PMY_IMAGE_RESOURCE_DIRECTORY_ENTRY(cRDirE->m_dwWrittenAt)->NameString.NameOffset = ConvertEndianness(DWORD(seeker) - DWORD(pbRsrcSec)); ++ PMY_IMAGE_RESOURCE_DIRECTORY_ENTRY(cRDirE->m_dwWrittenAt)->NameString.NameOffset = ConvertEndianness(DWORD(ULONG_PTR(seeker) - ULONG_PTR(pbRsrcSec))); + + char* szName = cRDirE->GetName(); + WORD iLen = strlen(szName) + 1; +@@ -626,7 +626,7 @@ + while (!qDataEntries2.empty()) { + CResourceDataEntry* cRDataE = qDataEntries2.front(); + CopyMemory(seeker, cRDataE->GetData(), cRDataE->GetSize()); +- PIMAGE_RESOURCE_DATA_ENTRY(cRDataE->m_dwWrittenAt)->OffsetToData = ConvertEndianness(seeker - pbRsrcSec + m_dwResourceSectionVA); ++ PIMAGE_RESOURCE_DATA_ENTRY(cRDataE->m_dwWrittenAt)->OffsetToData = ConvertEndianness(DWORD(seeker - pbRsrcSec + m_dwResourceSectionVA)); + + seeker += RALIGN(cRDataE->GetSize(), 8); + +@@ -636,7 +636,7 @@ + /* + * Set all of the directory entries offsets. + */ +- SetOffsets(m_cResDir, DWORD(pbRsrcSec)); ++ SetOffsets(m_cResDir, ULONG_PTR(pbRsrcSec)); + } + + // Sets the offsets in directory entries +@@ -650,7 +650,7 @@ + SetOffsets(resDir->GetEntry(i)->GetSubDirectory(), newResDirAt); + } + else { +- rde->OffsetToData = ConvertEndianness(resDir->GetEntry(i)->GetDataEntry()->m_dwWrittenAt - newResDirAt); ++ rde->OffsetToData = ConvertEndianness(DWORD(resDir->GetEntry(i)->GetDataEntry()->m_dwWrittenAt - newResDirAt)); + } + } + } +@@ -758,7 +758,7 @@ + // Returns -1 if can not be found + int CResourceDirectory::Find(char* szName) { + if (IS_INTRESOURCE(szName)) +- return Find((WORD) (DWORD) szName); ++ return Find((WORD) (ULONG_PTR) szName); + else + if (szName[0] == '#') + return Find(WORD(atoi(szName + 1))); +@@ -836,7 +836,7 @@ + if (IS_INTRESOURCE(szName)) { + m_bHasName = false; + m_szName = 0; +- m_wId = (WORD) (DWORD) szName; ++ m_wId = (WORD) (ULONG_PTR) szName; + } + else { + m_bHasName = true; +@@ -851,7 +851,7 @@ + if (IS_INTRESOURCE(szName)) { + m_bHasName = false; + m_szName = 0; +- m_wId = (WORD) (DWORD) szName; ++ m_wId = (WORD) (ULONG_PTR) szName; + } + else { + m_bHasName = true; +Index: Source/ResourceEditor.h +--- ../release/nsis-2.17-src/Source/ResourceEditor.h 2006-04-28 08:54:42.000000000 -0700 ++++ ./Source/ResourceEditor.h 2006-07-31 05:48:44.000000000 -0700 +@@ -173,7 +173,7 @@ + + void Destroy(); + +- DWORD m_dwWrittenAt; ++ ULONG_PTR m_dwWrittenAt; + + private: + IMAGE_RESOURCE_DIRECTORY m_rdDir; +@@ -197,7 +197,7 @@ + + CResourceDataEntry* GetDataEntry(); + +- DWORD m_dwWrittenAt; ++ ULONG_PTR m_dwWrittenAt; + + private: + bool m_bHasName; +@@ -226,7 +226,7 @@ + DWORD GetSize(); + DWORD GetCodePage(); + +- DWORD m_dwWrittenAt; ++ ULONG_PTR m_dwWrittenAt; + + private: + BYTE* m_pbData; +Index: Source/script.cpp +--- ../release/nsis-2.17-src/Source/script.cpp 2006-03-28 10:22:34.000000000 -0800 ++++ ./Source/script.cpp 2006-07-31 20:56:03.000000000 -0700 +@@ -4748,8 +4748,8 @@ + { + struct + { +- long l; +- long h; ++ int l; ++ int h; + } words; + long long ll; + }; +@@ -6075,8 +6075,8 @@ + { + struct + { +- long l; +- long h; ++ int l; ++ int h; + } words; + long long ll; + }; +Index: Source/util.cpp +--- ../release/nsis-2.17-src/Source/util.cpp 2006-04-28 08:54:42.000000000 -0700 ++++ ./Source/util.cpp 2006-07-31 05:48:44.000000000 -0700 +@@ -312,7 +312,7 @@ + FIX_ENDIAN_INT32_INPLACE(rdEntry.OffsetToData); + MY_ASSERT(!rdEntry.DirectoryOffset.DataIsDirectory, "bad resource directory"); + +- PRESOURCE_DIRECTORY rdIcons = PRESOURCE_DIRECTORY(rdEntry.DirectoryOffset.OffsetToDirectory + DWORD(rdRoot)); ++ PRESOURCE_DIRECTORY rdIcons = PRESOURCE_DIRECTORY(rdEntry.DirectoryOffset.OffsetToDirectory + ULONG_PTR(rdRoot)); + + MY_ASSERT((size_t)rdIcons - (size_t)exeHeader > exeHeaderSize, "corrupted EXE - invalid pointer"); + +@@ -325,7 +325,7 @@ + FIX_ENDIAN_INT32_INPLACE(icoEntry.OffsetToData); + + MY_ASSERT(!icoEntry.DirectoryOffset.DataIsDirectory, "bad resource directory"); +- PRESOURCE_DIRECTORY rd = PRESOURCE_DIRECTORY(icoEntry.DirectoryOffset.OffsetToDirectory + DWORD(rdRoot)); ++ PRESOURCE_DIRECTORY rd = PRESOURCE_DIRECTORY(icoEntry.DirectoryOffset.OffsetToDirectory + ULONG_PTR(rdRoot)); + + MY_ASSERT((size_t)rd - (size_t)exeHeader > exeHeaderSize, "corrupted EXE - invalid pointer"); + +@@ -334,7 +334,7 @@ + + MY_ASSERT(datEntry.DirectoryOffset.DataIsDirectory, "bad resource directory"); + +- PIMAGE_RESOURCE_DATA_ENTRY rde = PIMAGE_RESOURCE_DATA_ENTRY(datEntry.OffsetToData + DWORD(rdRoot)); ++ PIMAGE_RESOURCE_DATA_ENTRY rde = PIMAGE_RESOURCE_DATA_ENTRY(datEntry.OffsetToData + ULONG_PTR(rdRoot)); + + MY_ASSERT((size_t)rde - (size_t)exeHeader > exeHeaderSize, "corrupted EXE - invalid pointer"); + +@@ -355,10 +355,10 @@ + } + + // Set offset +- DWORD dwOffset = FIX_ENDIAN_INT32(rde->OffsetToData) + DWORD(rdRoot) - dwResourceSectionVA - DWORD(exeHeader); ++ DWORD dwOffset = FIX_ENDIAN_INT32(rde->OffsetToData) + ULONG_PTR(rdRoot) - dwResourceSectionVA - ULONG_PTR(exeHeader); + *(LPDWORD) seeker = FIX_ENDIAN_INT32(dwOffset); + +- MY_ASSERT(dwOffset > exeHeaderSize || dwOffset < (DWORD)rdRoot - (DWORD)exeHeader, "invalid data offset - icon resource probably compressed"); ++ MY_ASSERT(dwOffset > exeHeaderSize || dwOffset < (ULONG_PTR)rdRoot - (ULONG_PTR)exeHeader, "invalid data offset - icon resource probably compressed"); + } + + LPBYTE seeker = uninstIconData; +Index: Source/writer.cpp +--- ../release/nsis-2.17-src/Source/writer.cpp 2006-03-11 03:13:07.000000000 -0800 ++++ ./Source/writer.cpp 2006-07-31 13:27:37.000000000 -0700 +@@ -64,7 +64,7 @@ + } + + #ifdef NSIS_CONFIG_CRC_SUPPORT +-extern "C" unsigned long NSISCALL CRC32(unsigned long crc, const unsigned char *buf, unsigned int len); ++extern "C" unsigned int NSISCALL CRC32(unsigned int crc, const unsigned char *buf, unsigned int len); + + void crc_writer_sink::write_data(const void *data, const size_t size) + { +Index: Source/writer.h +--- ../release/nsis-2.17-src/Source/writer.h 2006-03-11 03:13:07.000000000 -0800 ++++ ./Source/writer.h 2006-07-31 13:27:58.000000000 -0700 +@@ -57,12 +57,12 @@ + #ifdef NSIS_CONFIG_CRC_SUPPORT + class crc_writer_sink : public writer_sink { + public: +- crc_writer_sink(unsigned long *crc) : m_crc(crc) {} ++ crc_writer_sink(unsigned int *crc) : m_crc(crc) {} + + virtual void write_data(const void *data, const size_t size); + + private: +- unsigned long *m_crc; ++ unsigned int *m_crc; + + }; + #endif diff --git a/src/win32/patches/openssl-w64.patch b/src/win32/patches/openssl-w64.patch new file mode 100644 index 00000000..2c754285 --- /dev/null +++ b/src/win32/patches/openssl-w64.patch @@ -0,0 +1,2663 @@ +diff -Naur ../openssl-0.9.8j/Configure ./Configure +--- ../openssl-0.9.8j/Configure 2008-12-29 01:18:23.000000000 +0100 ++++ ./Configure 2009-01-08 10:24:35.000000000 +0100 +@@ -477,6 +477,8 @@ + # MinGW + "mingw", "gcc:-mno-cygwin -DL_ENDIAN -fomit-frame-pointer -O3 -march=i486 -Wall -D_WIN32_WINNT=0x333:::MINGW32:-lwsock32 -lgdi32:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts} EXPORT_VAR_AS_FN:${x86_coff_asm}:win32:cygwin-shared:-D_WINDLL -DOPENSSL_USE_APPLINK:-mno-cygwin -shared:.dll.a", + ++"mingw64", "mingw32-gcc:-mno-cygwin -DL_ENDIAN -DWIN32_LEAN_AND_MEAN -fomit-frame-pointer -O3 -Wall -D_WIN32_WINNT=0x333:::MINGW64:-lws2_32 -lgdi32:SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN:${no_asm}:win32:cygwin-shared:-D_WINDLL:-mno-cygwin:.dll.a", ++ + # UWIN + "UWIN", "cc:-DTERMIOS -DL_ENDIAN -O -Wall:::UWIN::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${no_asm}:win32", + +@@ -1055,7 +1057,7 @@ + + $no_shared = 0 if ($fipsdso && !$IsMK1MF); + +-$exe_ext=".exe" if ($target eq "Cygwin" || $target eq "DJGPP" || $target eq "mingw"); ++$exe_ext=".exe" if ($target eq "Cygwin" || $target eq "DJGPP" || $target =~ /^mingw/); + $exe_ext=".nlm" if ($target =~ /netware/); + $exe_ext=".pm" if ($target =~ /vos/); + if ($openssldir eq "" and $prefix eq "") +@@ -1501,6 +1503,7 @@ + s/^SHLIB_TARGET=.*/SHLIB_TARGET=$shared_target/; + s/^SHLIB_MARK=.*/SHLIB_MARK=$shared_mark/; + s/^SHARED_LIBS=.*/SHARED_LIBS=\$(SHARED_FIPS) \$(SHARED_CRYPTO) \$(SHARED_SSL)/ if (!$no_shared); ++ s/^DIRS= crypto ssl engines apps test tools/DIRS= crypto ssl apps test tools/ if ($disabled{'hw'}); + if ($shared_extension ne "" && $shared_extension =~ /^\.s([ol])\.[^\.]*$/) + { + my $sotmp = $1; +diff -Naur ../openssl-0.9.8j/Configure.orig ./Configure.orig +--- ../openssl-0.9.8j/Configure.orig 1970-01-01 01:00:00.000000000 +0100 ++++ ./Configure.orig 2008-12-29 01:18:23.000000000 +0100 +@@ -0,0 +1,2030 @@ ++: ++eval 'exec perl -S $0 ${1+"$@"}' ++ if $running_under_some_shell; ++## ++## Configure -- OpenSSL source tree configuration script ++## ++ ++require 5.000; ++eval 'use strict;'; ++ ++print STDERR "Warning: perl module strict not found.\n" if ($@); ++ ++# see INSTALL for instructions. ++ ++my $usage="Usage: Configure [no- ...] [enable- ...] [experimental- ...] [-Dxxx] [-lxxx] [-Lxxx] [-fxxx] [-Kxxx] [no-hw-xxx|no-hw] [[no-]threads] [[no-]shared] [[no-]zlib|zlib-dynamic] [enable-montasm] [no-asm] [no-dso] [no-krb5] [386] [--prefix=DIR] [--openssldir=OPENSSLDIR] [--with-xxx[=vvv]] [--test-sanity] os/compiler[:flags]\n"; ++ ++# Options: ++# ++# --openssldir install OpenSSL in OPENSSLDIR (Default: DIR/ssl if the ++# --prefix option is given; /usr/local/ssl otherwise) ++# --prefix prefix for the OpenSSL include, lib and bin directories ++# (Default: the OPENSSLDIR directory) ++# ++# --install_prefix Additional prefix for package builders (empty by ++# default). This needn't be set in advance, you can ++# just as well use "make INSTALL_PREFIX=/whatever install". ++# ++# --with-krb5-dir Declare where Kerberos 5 lives. The libraries are expected ++# to live in the subdirectory lib/ and the header files in ++# include/. A value is required. ++# --with-krb5-lib Declare where the Kerberos 5 libraries live. A value is ++# required. ++# (Default: KRB5_DIR/lib) ++# --with-krb5-include Declare where the Kerberos 5 header files live. A ++# value is required. ++# (Default: KRB5_DIR/include) ++# --with-krb5-flavor Declare what flavor of Kerberos 5 is used. Currently ++# supported values are "MIT" and "Heimdal". A value is required. ++# ++# --test-sanity Make a number of sanity checks on the data in this file. ++# This is a debugging tool for OpenSSL developers. ++# ++# no-hw-xxx do not compile support for specific crypto hardware. ++# Generic OpenSSL-style methods relating to this support ++# are always compiled but return NULL if the hardware ++# support isn't compiled. ++# no-hw do not compile support for any crypto hardware. ++# [no-]threads [don't] try to create a library that is suitable for ++# multithreaded applications (default is "threads" if we ++# know how to do it) ++# [no-]shared [don't] try to create shared libraries when supported. ++# no-asm do not use assembler ++# no-dso do not compile in any native shared-library methods. This ++# will ensure that all methods just return NULL. ++# no-krb5 do not compile in any KRB5 library or code. ++# [no-]zlib [don't] compile support for zlib compression. ++# zlib-dynamic Like "zlib", but the zlib library is expected to be a shared ++# library and will be loaded in run-time by the OpenSSL library. ++# enable-montasm 0.9.8 branch only: enable Montgomery x86 assembler backport ++# from 0.9.9 ++# 386 generate 80386 code ++# no-sse2 disables IA-32 SSE2 code, above option implies no-sse2 ++# no- build without specified algorithm (rsa, idea, rc5, ...) ++# - + compiler options are passed through ++# ++# DEBUG_SAFESTACK use type-safe stacks to enforce type-safety on stack items ++# provided to stack calls. Generates unique stack functions for ++# each possible stack type. ++# DES_PTR use pointer lookup vs arrays in the DES in crypto/des/des_locl.h ++# DES_RISC1 use different DES_ENCRYPT macro that helps reduce register ++# dependancies but needs to more registers, good for RISC CPU's ++# DES_RISC2 A different RISC variant. ++# DES_UNROLL unroll the inner DES loop, sometimes helps, somtimes hinders. ++# DES_INT use 'int' instead of 'long' for DES_LONG in crypto/des/des.h ++# This is used on the DEC Alpha where long is 8 bytes ++# and int is 4 ++# BN_LLONG use the type 'long long' in crypto/bn/bn.h ++# MD2_CHAR use 'char' instead of 'int' for MD2_INT in crypto/md2/md2.h ++# MD2_LONG use 'long' instead of 'int' for MD2_INT in crypto/md2/md2.h ++# IDEA_SHORT use 'short' instead of 'int' for IDEA_INT in crypto/idea/idea.h ++# IDEA_LONG use 'long' instead of 'int' for IDEA_INT in crypto/idea/idea.h ++# RC2_SHORT use 'short' instead of 'int' for RC2_INT in crypto/rc2/rc2.h ++# RC2_LONG use 'long' instead of 'int' for RC2_INT in crypto/rc2/rc2.h ++# RC4_CHAR use 'char' instead of 'int' for RC4_INT in crypto/rc4/rc4.h ++# RC4_LONG use 'long' instead of 'int' for RC4_INT in crypto/rc4/rc4.h ++# RC4_INDEX define RC4_INDEX in crypto/rc4/rc4_locl.h. This turns on ++# array lookups instead of pointer use. ++# RC4_CHUNK enables code that handles data aligned at long (natural CPU ++# word) boundary. ++# RC4_CHUNK_LL enables code that handles data aligned at long long boundary ++# (intended for 64-bit CPUs running 32-bit OS). ++# BF_PTR use 'pointer arithmatic' for Blowfish (unsafe on Alpha). ++# BF_PTR2 intel specific version (generic version is more efficient). ++# ++# Following are set automatically by this script ++# ++# MD5_ASM use some extra md5 assember, ++# SHA1_ASM use some extra sha1 assember, must define L_ENDIAN for x86 ++# RMD160_ASM use some extra ripemd160 assember, ++# SHA256_ASM sha256_block is implemented in assembler ++# SHA512_ASM sha512_block is implemented in assembler ++# AES_ASM ASE_[en|de]crypt is implemented in assembler ++ ++my $x86_gcc_des="DES_PTR DES_RISC1 DES_UNROLL"; ++ ++# MD2_CHAR slags pentium pros ++my $x86_gcc_opts="RC4_INDEX MD2_INT"; ++ ++# MODIFY THESE PARAMETERS IF YOU ARE GOING TO USE THE 'util/speed.sh SCRIPT ++# Don't worry about these normally ++ ++my $tcc="cc"; ++my $tflags="-fast -Xa"; ++my $tbn_mul=""; ++my $tlib="-lnsl -lsocket"; ++#$bits1="SIXTEEN_BIT "; ++#$bits2="THIRTY_TWO_BIT "; ++my $bits1="THIRTY_TWO_BIT "; ++my $bits2="SIXTY_FOUR_BIT "; ++ ++my $x86_elf_asm="x86cpuid-elf.o:bn86-elf.o co86-elf.o MAYBE-MO86-elf.o:dx86-elf.o yx86-elf.o:ax86-elf.o:bx86-elf.o:mx86-elf.o:sx86-elf.o s512sse2-elf.o:cx86-elf.o:rx86-elf.o rc4_skey.o:rm86-elf.o:r586-elf.o"; ++my $x86_coff_asm="x86cpuid-cof.o:bn86-cof.o co86-cof.o MAYBE-MO86-cof.o:dx86-cof.o yx86-cof.o:ax86-cof.o:bx86-cof.o:mx86-cof.o:sx86-cof.o s512sse2-cof.o:cx86-cof.o:rx86-cof.o rc4_skey.o:rm86-cof.o:r586-cof.o"; ++my $x86_out_asm="x86cpuid-out.o:bn86-out.o co86-out.o MAYBE-MO86-out.o:dx86-out.o yx86-out.o:ax86-out.o:bx86-out.o:mx86-out.o:sx86-out.o s512sse2-out.o:cx86-out.o:rx86-out.o rc4_skey.o:rm86-out.o:r586-out.o"; ++ ++my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o::aes-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o::"; ++my $ia64_asm=":bn-ia64.o::aes_core.o aes_cbc.o aes-ia64.o:::sha1-ia64.o sha256-ia64.o sha512-ia64.o::rc4-ia64.o rc4_skey.o::"; ++ ++my $no_asm="::::::::::"; ++ ++# As for $BSDthreads. Idea is to maintain "collective" set of flags, ++# which would cover all BSD flavors. -pthread applies to them all, ++# but is treated differently. OpenBSD expands is as -D_POSIX_THREAD ++# -lc_r, which is sufficient. FreeBSD 4.x expands it as -lc_r, ++# which has to be accompanied by explicit -D_THREAD_SAFE and ++# sometimes -D_REENTRANT. FreeBSD 5.x expands it as -lc_r, which ++# seems to be sufficient? ++my $BSDthreads="-pthread -D_THREAD_SAFE -D_REENTRANT"; ++ ++#config-string $cc : $cflags : $unistd : $thread_cflag : $sys_id : $lflags : $bn_ops : $cpuid_obj : $bn_obj : $des_obj : $aes_obj : $bf_obj : $md5_obj : $sha1_obj : $cast_obj : $rc4_obj : $rmd160_obj : $rc5_obj : $dso_scheme : $shared_target : $shared_cflag : $shared_ldflag : $shared_extension : $ranlib : $arflags ++ ++my %table=( ++# File 'TABLE' (created by 'make TABLE') contains the data from this list, ++# formatted for better readability. ++ ++ ++#"b", "${tcc}:${tflags}::${tlib}:${bits1}:${tbn_mul}::", ++#"bl-4c-2c", "${tcc}:${tflags}::${tlib}:${bits1}BN_LLONG RC4_CHAR MD2_CHAR:${tbn_mul}::", ++#"bl-4c-ri", "${tcc}:${tflags}::${tlib}:${bits1}BN_LLONG RC4_CHAR RC4_INDEX:${tbn_mul}::", ++#"b2-is-ri-dp", "${tcc}:${tflags}::${tlib}:${bits2}IDEA_SHORT RC4_INDEX DES_PTR:${tbn_mul}::", ++ ++# Our development configs ++"purify", "purify gcc:-g -DPURIFY -Wall::(unknown)::-lsocket -lnsl::::", ++"debug", "gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DOPENSSL_NO_ASM -ggdb -g2 -Wformat -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror::(unknown)::-lefence::::", ++"debug-ben", "gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DPEDANTIC -DDEBUG_SAFESTACK -O2 -pedantic -Wall -Wshadow -Werror -pipe::(unknown):::::bn86-elf.o co86-elf.o", ++"debug-ben-openbsd","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DPEDANTIC -DDEBUG_SAFESTACK -DOPENSSL_OPENBSD_DEV_CRYPTO -DOPENSSL_NO_ASM -O2 -pedantic -Wall -Wshadow -Werror -pipe::(unknown)::::", ++"debug-ben-openbsd-debug","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DPEDANTIC -DDEBUG_SAFESTACK -DOPENSSL_OPENBSD_DEV_CRYPTO -DOPENSSL_NO_ASM -g3 -O2 -pedantic -Wall -Wshadow -Werror -pipe::(unknown)::::", ++"debug-ben-debug", "gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DPEDANTIC -DDEBUG_SAFESTACK -g3 -O2 -pedantic -Wall -Wshadow -Werror -pipe::(unknown)::::::", ++"debug-ben-strict", "gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DCONST_STRICT -O2 -Wall -Wshadow -Werror -Wpointer-arith -Wcast-qual -Wwrite-strings -pipe::(unknown)::::::", ++"debug-rse","cc:-DTERMIOS -DL_ENDIAN -pipe -O -g -ggdb3 -Wall::(unknown):::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}", ++"debug-bodo", "gcc:-DL_ENDIAN -DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBIO_PAIR_DEBUG -DPEDANTIC -g -march=i486 -pedantic -Wshadow -Wall -Wcast-align -Wstrict-prototypes -Wmissing-prototypes -Wno-long-long -Wundef -Wconversion -pipe::-D_REENTRANT:::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}", ++"debug-ulf", "gcc:-DTERMIOS -DL_ENDIAN -march=i486 -Wall -DBN_DEBUG -DBN_DEBUG_RAND -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DOPENSSL_NO_ASM -g -Wformat -Wshadow -Wmissing-prototypes -Wmissing-declarations:::CYGWIN32:::${no_asm}:win32:cygwin-shared:::.dll", ++"debug-steve64", "gcc:-m64 -DL_ENDIAN -DTERMIO -DREF_CHECK -DCONF_DEBUG -DDEBUG_SAFESTACK -DCRYPTO_MDEBUG_ALL -DPEDANTIC -DOPENSSL_NO_DEPRECATED -g -pedantic -Wall -Werror -Wno-long-long -Wsign-compare -DMD32_REG_T=int::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL:${x86_64_asm}:dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-steve32", "gcc:-m32 -DL_ENDIAN -DREF_CHECK -DCONF_DEBUG -DDEBUG_SAFESTACK -DCRYPTO_MDEBUG_ALL -DPEDANTIC -DOPENSSL_NO_DEPRECATED -g -pedantic -Wno-long-long -Wall -Werror -Wshadow -pipe::-D_REENTRANT::-rdynamic -ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-fPIC:-m32:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-steve", "gcc:-DL_ENDIAN -DREF_CHECK -DCONF_DEBUG -DDEBUG_SAFESTACK -DCRYPTO_MDEBUG_ALL -DPEDANTIC -m32 -g -pedantic -Wno-long-long -Wall -Werror -Wshadow -pipe::-D_REENTRANT::-rdynamic -ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared", ++"debug-steve-opt", "gcc:-DL_ENDIAN -DREF_CHECK -DCONF_DEBUG -DDEBUG_SAFESTACK -DCRYPTO_MDEBUG_ALL -DPEDANTIC -m32 -O3 -g -pedantic -Wno-long-long -Wall -Werror -Wshadow -pipe::-D_REENTRANT::-rdynamic -ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared", ++"debug-steve-linux-pseudo64", "gcc:-DL_ENDIAN -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DDEBUG_SAFESTACK -DCRYPTO_MDEBUG_ALL -DOPENSSL_NO_ASM -g -mcpu=i486 -Wall -Werror -Wshadow -pipe::-D_REENTRANT::-rdynamic -ldl:SIXTY_FOUR_BIT:${no_asm}:dlfcn:linux-shared", ++"debug-levitte-linux-elf","gcc:-DLEVITTE_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_DEBUG -DBN_DEBUG_RAND -DCRYPTO_MDEBUG -DENGINE_CONF_DEBUG -DL_ENDIAN -DTERMIO -D_POSIX_SOURCE -DPEDANTIC -ggdb -g3 -mcpu=i486 -pedantic -ansi -Wall -Wshadow -Wcast-align -Wstrict-prototypes -Wmissing-prototypes -Wno-long-long -Wundef -Wconversion -pipe::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-levitte-linux-noasm","gcc:-DLEVITTE_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_DEBUG -DBN_DEBUG_RAND -DCRYPTO_MDEBUG -DENGINE_CONF_DEBUG -DOPENSSL_NO_ASM -DL_ENDIAN -DTERMIO -D_POSIX_SOURCE -DPEDANTIC -ggdb -g3 -mcpu=i486 -pedantic -ansi -Wall -Wshadow -Wcast-align -Wstrict-prototypes -Wmissing-prototypes -Wno-long-long -Wundef -Wconversion -pipe::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-levitte-linux-elf-extreme","gcc:-DLEVITTE_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_DEBUG -DBN_DEBUG_RAND -DCRYPTO_MDEBUG -DENGINE_CONF_DEBUG -DL_ENDIAN -DTERMIO -D_POSIX_SOURCE -DPEDANTIC -ggdb -g3 -mcpu=i486 -pedantic -ansi -Wall -W -Wundef -Wshadow -Wcast-align -Wstrict-prototypes -Wmissing-prototypes -Wno-long-long -Wundef -Wconversion -pipe::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-levitte-linux-noasm-extreme","gcc:-DLEVITTE_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_DEBUG -DBN_DEBUG_RAND -DCRYPTO_MDEBUG -DENGINE_CONF_DEBUG -DOPENSSL_NO_ASM -DL_ENDIAN -DTERMIO -D_POSIX_SOURCE -DPEDANTIC -ggdb -g3 -mcpu=i486 -pedantic -ansi -Wall -W -Wundef -Wshadow -Wcast-align -Wstrict-prototypes -Wmissing-prototypes -Wno-long-long -Wundef -Wconversion -pipe::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-geoff","gcc:-DBN_DEBUG -DBN_DEBUG_RAND -DBN_STRICT -DPURIFY -DOPENSSL_NO_DEPRECATED -DOPENSSL_NO_ASM -DOPENSSL_NO_INLINE_ASM -DL_ENDIAN -DTERMIO -DPEDANTIC -O1 -ggdb2 -Wall -Werror -Wundef -pedantic -Wshadow -Wpointer-arith -Wbad-function-cast -Wcast-align -Wsign-compare -Wmissing-prototypes -Wmissing-declarations -Wno-long-long::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-linux-pentium","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DL_ENDIAN -DTERMIO -g -mcpu=pentium -Wall::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn", ++"debug-linux-ppro","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DL_ENDIAN -DTERMIO -g -mcpu=pentiumpro -Wall::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn", ++"debug-linux-elf","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DL_ENDIAN -DTERMIO -g -march=i486 -Wall::-D_REENTRANT::-lefence -ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-linux-elf-noefence","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DL_ENDIAN -DTERMIO -g -march=i486 -Wall::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"dist", "cc:-O::(unknown)::::::", ++ ++# Basic configs that should work on any (32 and less bit) box ++"gcc", "gcc:-O3::(unknown):::BN_LLONG:::", ++"cc", "cc:-O::(unknown)::::::", ++ ++####VOS Configurations ++"vos-gcc","gcc:-O3 -Wall -D_POSIX_C_SOURCE=200112L -D_BSD -DB_ENDIAN::(unknown):VOS:-Wl,-map:BN_LLONG:${no_asm}:::::.so:", ++"debug-vos-gcc","gcc:-O0 -g -Wall -D_POSIX_C_SOURCE=200112L -D_BSD -DB_ENDIAN -DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG::(unknown):VOS:-Wl,-map:BN_LLONG:${no_asm}:::::.so:", ++ ++#### Solaris x86 with GNU C setups ++# -DOPENSSL_NO_INLINE_ASM switches off inline assembler. We have to do it ++# here because whenever GNU C instantiates an assembler template it ++# surrounds it with #APP #NO_APP comment pair which (at least Solaris ++# 7_x86) /usr/ccs/bin/as fails to assemble with "Illegal mnemonic" ++# error message. ++"solaris-x86-gcc","gcc:-O3 -fomit-frame-pointer -march=pentium -Wall -DL_ENDIAN -DOPENSSL_NO_INLINE_ASM::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:solaris-shared:-fPIC:-shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++# -shared -static-libgcc might appear controversial, but modules taken ++# from static libgcc do not have relocations and linking them into our ++# shared objects doesn't have any negative side-effects. On the contrary, ++# doing so makes it possible to use gcc shared build with Sun C. Given ++# that gcc generates faster code [thanks to inline assembler], I would ++# actually recommend to consider using gcc shared build even with vendor ++# compiler:-) ++# ++"solaris64-x86_64-gcc","gcc:-m64 -O3 -Wall -DL_ENDIAN -DMD32_REG_T=int::-D_REENTRANT::-lsocket -lnsl -ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL:${x86_64_asm}:dlfcn:solaris-shared:-fPIC:-m64 -shared -static-libgcc:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++#### Solaris x86 with Sun C setups ++"solaris-x86-cc","cc:-fast -O -Xa::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_PTR DES_UNROLL BF_PTR:${no_asm}:dlfcn:solaris-shared:-KPIC:-G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"solaris64-x86_64-cc","cc:-fast -xarch=amd64 -xstrconst -Xa -DL_ENDIAN::-D_REENTRANT::-lsocket -lnsl -ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL:${x86_64_asm}:dlfcn:solaris-shared:-KPIC:-xarch=amd64 -G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++#### SPARC Solaris with GNU C setups ++"solaris-sparcv7-gcc","gcc:-O3 -fomit-frame-pointer -Wall -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}:dlfcn:solaris-shared:-fPIC:-shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"solaris-sparcv8-gcc","gcc:-mv8 -O3 -fomit-frame-pointer -Wall -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:solaris-shared:-fPIC:-shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++# -m32 should be safe to add as long as driver recognizes -mcpu=ultrasparc ++"solaris-sparcv9-gcc","gcc:-m32 -mcpu=ultrasparc -O3 -fomit-frame-pointer -Wall -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT:ULTRASPARC:-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::sparcv8plus.o:des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:solaris-shared:-fPIC:-shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"solaris64-sparcv9-gcc","gcc:-m64 -mcpu=ultrasparc -O3 -Wall -DB_ENDIAN::-D_REENTRANT:ULTRASPARC:-lsocket -lnsl -ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_PTR DES_RISC1 DES_UNROLL BF_PTR:::des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:solaris-shared:-fPIC:-m64 -shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++#### ++"debug-solaris-sparcv8-gcc","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG_ALL -O -g -mv8 -Wall -DB_ENDIAN::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::sparcv8.o::::::::::dlfcn:solaris-shared:-fPIC:-shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-solaris-sparcv9-gcc","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG_ALL -DPEDANTIC -O -g -mcpu=ultrasparc -pedantic -ansi -Wall -Wshadow -Wno-long-long -D__EXTENSIONS__ -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT:ULTRASPARC:-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::sparcv8plus.o:des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:solaris-shared:-fPIC:-shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++#### SPARC Solaris with Sun C setups ++# SC4.0 doesn't pass 'make test', upgrade to SC5.0 or SC4.2. ++# SC4.2 is ok, better than gcc even on bn as long as you tell it -xarch=v8 ++# SC5.0 note: Compiler common patch 107357-01 or later is required! ++"solaris-sparcv7-cc","cc:-xO5 -xstrconst -xdepend -Xa -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_PTR DES_RISC1 DES_UNROLL BF_PTR:${no_asm}:dlfcn:solaris-shared:-KPIC:-G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"solaris-sparcv8-cc","cc:-xarch=v8 -xO5 -xstrconst -xdepend -Xa -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_PTR DES_RISC1 DES_UNROLL BF_PTR::sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:solaris-shared:-KPIC:-G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"solaris-sparcv9-cc","cc:-xtarget=ultra -xarch=v8plus -xO5 -xstrconst -xdepend -Xa -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT:ULTRASPARC:-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK_LL DES_PTR DES_RISC1 DES_UNROLL BF_PTR::sparcv8plus.o:des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:solaris-shared:-KPIC:-G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"solaris64-sparcv9-cc","cc:-xtarget=ultra -xarch=v9 -xO5 -xstrconst -xdepend -Xa -DB_ENDIAN::-D_REENTRANT:ULTRASPARC:-lsocket -lnsl -ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_PTR DES_RISC1 DES_UNROLL BF_PTR:::des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:solaris-shared:-KPIC:-xarch=v9 -G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR):/usr/ccs/bin/ar rs", ++#### ++"debug-solaris-sparcv8-cc","cc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG_ALL -xarch=v8 -g -O -xstrconst -Xa -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_PTR DES_RISC1 DES_UNROLL BF_PTR::sparcv8.o::::::::::dlfcn:solaris-shared:-KPIC:-G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-solaris-sparcv9-cc","cc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG_ALL -xtarget=ultra -xarch=v8plus -g -O -xstrconst -Xa -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT:ULTRASPARC:-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK_LL DES_PTR DES_RISC1 DES_UNROLL BF_PTR::sparcv8plus.o::::::::::dlfcn:solaris-shared:-KPIC:-G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++#### SunOS configs, assuming sparc for the gcc one. ++#"sunos-cc", "cc:-O4 -DNOPROTO -DNOCONST::(unknown):SUNOS::DES_UNROLL:${no_asm}::", ++"sunos-gcc","gcc:-O3 -mv8 -Dssize_t=int::(unknown):SUNOS::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL DES_PTR DES_RISC1:${no_asm}::", ++ ++#### IRIX 5.x configs ++# -mips2 flag is added by ./config when appropriate. ++"irix-gcc","gcc:-O3 -DTERMIOS -DB_ENDIAN::(unknown):::BN_LLONG MD2_CHAR RC4_INDEX RC4_CHAR RC4_CHUNK DES_UNROLL DES_RISC2 DES_PTR BF_PTR:${no_asm}:dlfcn:irix-shared:::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"irix-cc", "cc:-O2 -use_readonly_const -DTERMIOS -DB_ENDIAN::(unknown):::BN_LLONG RC4_CHAR RC4_CHUNK DES_PTR DES_RISC2 DES_UNROLL BF_PTR:${no_asm}:dlfcn:irix-shared:::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++#### IRIX 6.x configs ++# Only N32 and N64 ABIs are supported. If you need O32 ABI build, invoke ++# './Configure irix-cc -o32' manually. ++"irix-mips3-gcc","gcc:-mabi=n32 -O3 -DTERMIOS -DB_ENDIAN -DBN_DIV3W::-D_SGI_MP_SOURCE:::MD2_CHAR RC4_INDEX RC4_CHAR RC4_CHUNK_LL DES_UNROLL DES_RISC2 DES_PTR BF_PTR SIXTY_FOUR_BIT::bn-mips3.o::::::::::dlfcn:irix-shared::-mabi=n32:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"irix-mips3-cc", "cc:-n32 -mips3 -O2 -use_readonly_const -G0 -rdata_shared -DTERMIOS -DB_ENDIAN -DBN_DIV3W::-D_SGI_MP_SOURCE:::DES_PTR RC4_CHAR RC4_CHUNK_LL DES_RISC2 DES_UNROLL BF_PTR SIXTY_FOUR_BIT::bn-mips3.o::::::::::dlfcn:irix-shared::-n32:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++# N64 ABI builds. ++"irix64-mips4-gcc","gcc:-mabi=64 -mips4 -O3 -DTERMIOS -DB_ENDIAN -DBN_DIV3W::-D_SGI_MP_SOURCE:::RC4_CHAR RC4_CHUNK DES_RISC2 DES_UNROLL SIXTY_FOUR_BIT_LONG::bn-mips3.o::::::::::dlfcn:irix-shared::-mabi=64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"irix64-mips4-cc", "cc:-64 -mips4 -O2 -use_readonly_const -G0 -rdata_shared -DTERMIOS -DB_ENDIAN -DBN_DIV3W::-D_SGI_MP_SOURCE:::RC4_CHAR RC4_CHUNK DES_RISC2 DES_UNROLL SIXTY_FOUR_BIT_LONG::bn-mips3.o::::::::::dlfcn:irix-shared::-64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++#### Unified HP-UX ANSI C configs. ++# Special notes: ++# - Originally we were optimizing at +O4 level. It should be noted ++# that the only difference between +O3 and +O4 is global inter- ++# procedural analysis. As it has to be performed during the link ++# stage the compiler leaves behind certain pseudo-code in lib*.a ++# which might be release or even patch level specific. Generating ++# the machine code for and analyzing the *whole* program appears ++# to be *extremely* memory demanding while the performance gain is ++# actually questionable. The situation is intensified by the default ++# HP-UX data set size limit (infamous 'maxdsiz' tunable) of 64MB ++# which is way too low for +O4. In other words, doesn't +O3 make ++# more sense? ++# - Keep in mind that the HP compiler by default generates code ++# suitable for execution on the host you're currently compiling at. ++# If the toolkit is ment to be used on various PA-RISC processors ++# consider './config +DAportable'. ++# - +DD64 is chosen in favour of +DA2.0W because it's meant to be ++# compatible with *future* releases. ++# - If you run ./Configure hpux-parisc-[g]cc manually don't forget to ++# pass -D_REENTRANT on HP-UX 10 and later. ++# - -DMD32_XARRAY triggers workaround for compiler bug we ran into in ++# 32-bit message digests. (For the moment of this writing) HP C ++# doesn't seem to "digest" too many local variables (they make "him" ++# chew forever:-). For more details look-up MD32_XARRAY comment in ++# crypto/sha/sha_lcl.h. ++# ++# ++# Since there is mention of this in shlib/hpux10-cc.sh ++"hpux-parisc-cc-o4","cc:-Ae +O4 +ESlit -z -DB_ENDIAN -DBN_DIV2W -DMD32_XARRAY::-D_REENTRANT::-ldld:BN_LLONG DES_PTR DES_UNROLL DES_RISC1:${no_asm}:dl:hpux-shared:+Z:-b:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"hpux-parisc-gcc","gcc:-O3 -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT::-Wl,+s -ldld:BN_LLONG DES_PTR DES_UNROLL DES_RISC1:${no_asm}:dl:hpux-shared:-fPIC:-shared:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"hpux-parisc2-gcc","gcc:-march=2.0 -O3 -DB_ENDIAN -D_REENTRANT::::-Wl,+s -ldld:SIXTY_FOUR_BIT RC4_CHAR RC4_CHUNK DES_PTR DES_UNROLL DES_RISC1::pa-risc2.o::::::::::dl:hpux-shared:-fPIC:-shared:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"hpux64-parisc2-gcc","gcc:-O3 -DB_ENDIAN -D_REENTRANT::::-ldl:SIXTY_FOUR_BIT_LONG MD2_CHAR RC4_INDEX RC4_CHAR DES_UNROLL DES_RISC1 DES_INT::pa-risc2W.o::::::::::dlfcn:hpux-shared:-fpic:-shared:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++# More attempts at unified 10.X and 11.X targets for HP C compiler. ++# ++# Chris Ruemmler ++# Kevin Steves ++"hpux-parisc-cc","cc:+O3 +Optrs_strongly_typed -Ae +ESlit -DB_ENDIAN -DBN_DIV2W -DMD32_XARRAY::-D_REENTRANT::-Wl,+s -ldld:MD2_CHAR RC4_INDEX RC4_CHAR DES_UNROLL DES_RISC1 DES_INT:${no_asm}:dl:hpux-shared:+Z:-b:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"hpux-parisc1_0-cc","cc:+DAportable +O3 +Optrs_strongly_typed -Ae +ESlit -DB_ENDIAN -DMD32_XARRAY::-D_REENTRANT::-Wl,+s -ldld:MD2_CHAR RC4_INDEX RC4_CHAR DES_UNROLL DES_RISC1 DES_INT:${no_asm}:dl:hpux-shared:+Z:-b:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"hpux-parisc2-cc","cc:+DA2.0 +DS2.0 +O3 +Optrs_strongly_typed -Ae +ESlit -DB_ENDIAN -DMD32_XARRAY -D_REENTRANT::::-Wl,+s -ldld:SIXTY_FOUR_BIT MD2_CHAR RC4_INDEX RC4_CHAR DES_UNROLL DES_RISC1 DES_INT::pa-risc2.o::::::::::dl:hpux-shared:+Z:-b:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"hpux64-parisc2-cc","cc:+DD64 +O3 +Optrs_strongly_typed -Ae +ESlit -DB_ENDIAN -DMD32_XARRAY -D_REENTRANT::::-ldl:SIXTY_FOUR_BIT_LONG MD2_CHAR RC4_INDEX RC4_CHAR DES_UNROLL DES_RISC1 DES_INT::pa-risc2W.o::::::::::dlfcn:hpux-shared:+Z:+DD64 -b:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++# HP/UX IA-64 targets ++"hpux-ia64-cc","cc:-Ae +DD32 +O2 +Olit=all -z -DB_ENDIAN -D_REENTRANT::::-ldl:SIXTY_FOUR_BIT MD2_CHAR RC4_INDEX DES_UNROLL DES_RISC1 DES_INT:${ia64_asm}:dlfcn:hpux-shared:+Z:+DD32 -b:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++# Frank Geurts has patiently assisted with ++# with debugging of the following config. ++"hpux64-ia64-cc","cc:-Ae +DD64 +O3 +Olit=all -z -DB_ENDIAN -D_REENTRANT::::-ldl:SIXTY_FOUR_BIT_LONG MD2_CHAR RC4_INDEX DES_UNROLL DES_RISC1 DES_INT:${ia64_asm}:dlfcn:hpux-shared:+Z:+DD64 -b:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++# GCC builds... ++"hpux-ia64-gcc","gcc:-O3 -DB_ENDIAN -D_REENTRANT::::-ldl:SIXTY_FOUR_BIT MD2_CHAR RC4_INDEX DES_UNROLL DES_RISC1 DES_INT:${ia64_asm}:dlfcn:hpux-shared:-fpic:-shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"hpux64-ia64-gcc","gcc:-mlp64 -O3 -DB_ENDIAN -D_REENTRANT::::-ldl:SIXTY_FOUR_BIT_LONG MD2_CHAR RC4_INDEX DES_UNROLL DES_RISC1 DES_INT:${ia64_asm}:dlfcn:hpux-shared:-fpic:-mlp64 -shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++# Legacy HPUX 9.X configs... ++"hpux-cc", "cc:-DB_ENDIAN -DBN_DIV2W -DMD32_XARRAY -Ae +ESlit +O2 -z::(unknown)::-Wl,+s -ldld:DES_PTR DES_UNROLL DES_RISC1:${no_asm}:dl:hpux-shared:+Z:-b:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"hpux-gcc", "gcc:-DB_ENDIAN -DBN_DIV2W -O3::(unknown)::-Wl,+s -ldld:DES_PTR DES_UNROLL DES_RISC1:${no_asm}:dl:hpux-shared:-fPIC:-shared:.sl.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++#### HP MPE/iX http://jazz.external.hp.com/src/openssl/ ++"MPE/iX-gcc", "gcc:-D_ENDIAN -DBN_DIV2W -O3 -D_POSIX_SOURCE -D_SOCKET_SOURCE -I/SYSLOG/PUB::(unknown):MPE:-L/SYSLOG/PUB -lsyslog -lsocket -lcurses:BN_LLONG DES_PTR DES_UNROLL DES_RISC1:::", ++ ++# DEC Alpha OSF/1/Tru64 targets. ++# ++# "What's in a name? That which we call a rose ++# By any other word would smell as sweet." ++# ++# - William Shakespeare, "Romeo & Juliet", Act II, scene II. ++# ++# For gcc, the following gave a %50 speedup on a 164 over the 'DES_INT' version ++# ++"osf1-alpha-gcc", "gcc:-O3::(unknown):::SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_UNROLL DES_RISC1:${no_asm}:dlfcn:alpha-osf1-shared:::.so", ++"osf1-alpha-cc", "cc:-std1 -tune host -O4 -readonly_strings::(unknown):::SIXTY_FOUR_BIT_LONG RC4_CHUNK:${no_asm}:dlfcn:alpha-osf1-shared:::.so", ++"tru64-alpha-cc", "cc:-std1 -tune host -fast -readonly_strings::-pthread:::SIXTY_FOUR_BIT_LONG RC4_CHUNK:${no_asm}:dlfcn:alpha-osf1-shared::-msym:.so", ++ ++#### ++#### Variety of LINUX:-) ++#### ++# *-generic* is endian-neutral target, but ./config is free to ++# throw in -D[BL]_ENDIAN, whichever appropriate... ++"linux-generic32","gcc:-DTERMIO -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-ppc", "gcc:-DB_ENDIAN -DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL::linux_ppc32.o::::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++#### IA-32 targets... ++"linux-ia32-icc", "icc:-DL_ENDIAN -DTERMIO -O2 -no_cpprt::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-KPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-elf", "gcc:-DL_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-aout", "gcc:-DL_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -march=i486 -Wall::(unknown):::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_out_asm}", ++#### ++"linux-generic64","gcc:-DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-ppc64", "gcc:-m64 -DB_ENDIAN -DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL::linux_ppc64.o::::::::::dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-ia64", "gcc:-DL_ENDIAN -DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK:${ia64_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-ia64-ecc","ecc:-DL_ENDIAN -DTERMIO -O2 -Wall -no_cpprt::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK:${ia64_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-ia64-icc","icc:-DL_ENDIAN -DTERMIO -O2 -Wall -no_cpprt::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK:${ia64_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-x86_64", "gcc:-m64 -DL_ENDIAN -DTERMIO -O3 -Wall -DMD32_REG_T=int::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL:${x86_64_asm}:dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++#### SPARC Linux setups ++# Ray Miller has patiently ++# assisted with debugging of following two configs. ++"linux-sparcv8","gcc:-mv8 -DB_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall -DBN_DIV2W::-D_REENTRANT::-ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++# it's a real mess with -mcpu=ultrasparc option under Linux, but ++# -Wa,-Av8plus should do the trick no matter what. ++"linux-sparcv9","gcc:-m32 -mcpu=ultrasparc -DB_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall -Wa,-Av8plus -DBN_DIV2W::-D_REENTRANT:ULTRASPARC:-ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::sparcv8plus.o:des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:linux-shared:-fPIC:-m32:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++# GCC 3.1 is a requirement ++"linux64-sparcv9","gcc:-m64 -mcpu=ultrasparc -DB_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall::-D_REENTRANT:ULTRASPARC:-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::::::::::::dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++#### Alpha Linux with GNU C and Compaq C setups ++# Special notes: ++# - linux-alpha+bwx-gcc is ment to be used from ./config only. If you ++# ought to run './Configure linux-alpha+bwx-gcc' manually, do ++# complement the command line with -mcpu=ev56, -mcpu=ev6 or whatever ++# which is appropriate. ++# - If you use ccc keep in mind that -fast implies -arch host and the ++# compiler is free to issue instructions which gonna make elder CPU ++# choke. If you wish to build "blended" toolkit, add -arch generic ++# *after* -fast and invoke './Configure linux-alpha-ccc' manually. ++# ++# ++# ++"linux-alpha-gcc","gcc:-O3 -DL_ENDIAN -DTERMIO::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_RISC1 DES_UNROLL:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-alpha+bwx-gcc","gcc:-O3 -DL_ENDIAN -DTERMIO::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"linux-alpha-ccc","ccc:-fast -readonly_strings -DL_ENDIAN -DTERMIO::-D_REENTRANT:::SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_PTR DES_RISC1 DES_UNROLL:${no_asm}", ++"linux-alpha+bwx-ccc","ccc:-fast -readonly_strings -DL_ENDIAN -DTERMIO::-D_REENTRANT:::SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_PTR DES_RISC1 DES_UNROLL:${no_asm}", ++ ++#### *BSD [do see comment about ${BSDthreads} above!] ++"BSD-generic32","gcc:-DTERMIOS -O3 -fomit-frame-pointer -Wall::${BSDthreads}:::BN_LLONG RC2_CHAR RC4_INDEX DES_INT DES_UNROLL:${no_asm}:dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"BSD-x86", "gcc:-DL_ENDIAN -DTERMIOS -O3 -fomit-frame-pointer -Wall::${BSDthreads}:::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_out_asm}:dlfcn:bsd-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"BSD-x86-elf", "gcc:-DL_ENDIAN -DTERMIOS -O3 -fomit-frame-pointer -Wall::${BSDthreads}:::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:bsd-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"debug-BSD-x86-elf", "gcc:-DL_ENDIAN -DTERMIOS -O3 -Wall -g::${BSDthreads}:::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:bsd-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"BSD-sparcv8", "gcc:-DB_ENDIAN -DTERMIOS -O3 -mv8 -Wall::${BSDthreads}:::BN_LLONG RC2_CHAR RC4_INDEX DES_INT DES_UNROLL::sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++"BSD-generic64","gcc:-DTERMIOS -O3 -Wall::${BSDthreads}:::SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL:${no_asm}:dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++# -DMD32_REG_T=int doesn't actually belong in sparc64 target, it ++# simply *happens* to work around a compiler bug in gcc 3.3.3, ++# triggered by RIPEMD160 code. ++"BSD-sparc64", "gcc:-DB_ENDIAN -DTERMIOS -O3 -DMD32_REG_T=int -Wall::${BSDthreads}:::SIXTY_FOUR_BIT_LONG RC2_CHAR RC4_CHUNK DES_INT DES_PTR DES_RISC2 BF_PTR:::des_enc-sparc.o fcrypt_b.o:::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"BSD-ia64", "gcc:-DL_ENDIAN -DTERMIOS -O3 -Wall::${BSDthreads}:::SIXTY_FOUR_BIT_LONG RC4_CHUNK:${ia64_asm}:dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"BSD-x86_64", "gcc:-DL_ENDIAN -DTERMIOS -O3 -DMD32_REG_T=int -Wall::${BSDthreads}:::SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL:${x86_64_asm}:dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++"bsdi-elf-gcc", "gcc:-DPERL5 -DL_ENDIAN -fomit-frame-pointer -O3 -march=i486 -Wall::(unknown)::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++"nextstep", "cc:-O -Wall::(unknown):::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:::", ++"nextstep3.3", "cc:-O3 -Wall::(unknown):::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:::", ++ ++# NCR MP-RAS UNIX ver 02.03.01 ++"ncr-scde","cc:-O6 -Xa -Hoff=BEHAVED -686 -Hwide -Hiw::(unknown)::-lsocket -lnsl -lc89:${x86_gcc_des} ${x86_gcc_opts}:::", ++ ++# QNX ++"qnx4", "cc:-DL_ENDIAN -DTERMIO::(unknown):::${x86_gcc_des} ${x86_gcc_opts}:", ++"qnx6", "cc:-DL_ENDIAN -DTERMIOS::(unknown)::-lsocket:${x86_gcc_des} ${x86_gcc_opts}:", ++ ++#### SCO/Caldera targets. ++# ++# Originally we had like unixware-*, unixware-*-pentium, unixware-*-p6, etc. ++# Now we only have blended unixware-* as it's the only one used by ./config. ++# If you want to optimize for particular microarchitecture, bypass ./config ++# and './Configure unixware-7 -Kpentium_pro' or whatever appropriate. ++# Note that not all targets include assembler support. Mostly because of ++# lack of motivation to support out-of-date platforms with out-of-date ++# compiler drivers and assemblers. Tim Rice has ++# patiently assisted to debug most of it. ++# ++# UnixWare 2.0x fails destest with -O. ++"unixware-2.0","cc:-DFILIO_H -DNO_STRINGS_H::-Kthread::-lsocket -lnsl -lresolv -lx:${x86_gcc_des} ${x86_gcc_opts}:::", ++"unixware-2.1","cc:-O -DFILIO_H::-Kthread::-lsocket -lnsl -lresolv -lx:${x86_gcc_des} ${x86_gcc_opts}:::", ++"unixware-7","cc:-O -DFILIO_H -Kalloca::-Kthread::-lsocket -lnsl:BN_LLONG MD2_CHAR RC4_INDEX ${x86_gcc_des}:${x86_elf_asm}:dlfcn:svr5-shared:-Kpic::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"unixware-7-gcc","gcc:-DL_ENDIAN -DFILIO_H -O3 -fomit-frame-pointer -march=pentium -Wall::-D_REENTRANT::-lsocket -lnsl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:gnu-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++# SCO 5 - Ben Laurie says the -O breaks the SCO cc. ++"sco5-cc", "cc:-belf::(unknown)::-lsocket -lnsl:${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:svr3-shared:-Kpic::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"sco5-gcc", "gcc:-O3 -fomit-frame-pointer::(unknown)::-lsocket -lnsl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:svr3-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++ ++#### IBM's AIX. ++"aix3-cc", "cc:-O -DB_ENDIAN -qmaxmem=16384::(unknown):AIX::BN_LLONG RC4_CHAR:::", ++"aix-gcc", "gcc:-O -DB_ENDIAN::-pthread:AIX::BN_LLONG RC4_CHAR::aix_ppc32.o::::::::::dlfcn:aix-shared::-shared -Wl,-G:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)::-X 32", ++"aix64-gcc","gcc:-maix64 -O -DB_ENDIAN::-pthread:AIX::SIXTY_FOUR_BIT_LONG RC4_CHAR::aix_ppc64.o::::::::::dlfcn:aix-shared::-maix64 -shared -Wl,-G:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)::-X64", ++# Below targets assume AIX 5. Idea is to effectively disregard $OBJECT_MODE ++# at build time. $OBJECT_MODE is respected at ./config stage! ++"aix-cc", "cc:-q32 -O -DB_ENDIAN -qmaxmem=16384 -qro -qroconst::-qthreaded:AIX::BN_LLONG RC4_CHAR::aix_ppc32.o::::::::::dlfcn:aix-shared::-q32 -G:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)::-X 32", ++"aix64-cc", "cc:-q64 -O -DB_ENDIAN -qmaxmem=16384 -qro -qroconst::-qthreaded:AIX::SIXTY_FOUR_BIT_LONG RC4_CHAR::aix_ppc64.o::::::::::dlfcn:aix-shared::-q64 -G:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)::-X 64", ++ ++# ++# Cray T90 and similar (SDSC) ++# It's Big-endian, but the algorithms work properly when B_ENDIAN is NOT ++# defined. The T90 ints and longs are 8 bytes long, and apparently the ++# B_ENDIAN code assumes 4 byte ints. Fortunately, the non-B_ENDIAN and ++# non L_ENDIAN code aligns the bytes in each word correctly. ++# ++# The BIT_FIELD_LIMITS define is to avoid two fatal compiler errors: ++#'Taking the address of a bit field is not allowed. ' ++#'An expression with bit field exists as the operand of "sizeof" ' ++# (written by Wayne Schroeder ) ++# ++# j90 is considered the base machine type for unicos machines, ++# so this configuration is now called "cray-j90" ... ++"cray-j90", "cc: -DBIT_FIELD_LIMITS -DTERMIOS::(unknown):CRAY::SIXTY_FOUR_BIT_LONG DES_INT:::", ++ ++# ++# Cray T3E (Research Center Juelich, beckman@acl.lanl.gov) ++# ++# The BIT_FIELD_LIMITS define was written for the C90 (it seems). I added ++# another use. Basically, the problem is that the T3E uses some bit fields ++# for some st_addr stuff, and then sizeof and address-of fails ++# I could not use the ams/alpha.o option because the Cray assembler, 'cam' ++# did not like it. ++"cray-t3e", "cc: -DBIT_FIELD_LIMITS -DTERMIOS::(unknown):CRAY::SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT:::", ++ ++# DGUX, 88100. ++"dgux-R3-gcc", "gcc:-O3 -fomit-frame-pointer::(unknown):::RC4_INDEX DES_UNROLL:::", ++"dgux-R4-gcc", "gcc:-O3 -fomit-frame-pointer::(unknown)::-lnsl -lsocket:RC4_INDEX DES_UNROLL:::", ++"dgux-R4-x86-gcc", "gcc:-O3 -fomit-frame-pointer -DL_ENDIAN::(unknown)::-lnsl -lsocket:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}", ++ ++# Sinix/ReliantUNIX RM400 ++# NOTE: The CDS++ Compiler up to V2.0Bsomething has the IRIX_CC_BUG optimizer problem. Better use -g */ ++"ReliantUNIX","cc:-KPIC -g -DTERMIOS -DB_ENDIAN::-Kthread:SNI:-lsocket -lnsl -lc -L/usr/ucblib -lucb:BN_LLONG DES_PTR DES_RISC2 DES_UNROLL BF_PTR:${no_asm}:dlfcn:reliantunix-shared:::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", ++"SINIX","cc:-O::(unknown):SNI:-lsocket -lnsl -lc -L/usr/ucblib -lucb:RC4_INDEX RC4_CHAR:::", ++"SINIX-N","/usr/ucb/cc:-O2 -misaligned::(unknown)::-lucb:RC4_INDEX RC4_CHAR:::", ++ ++# SIEMENS BS2000/OSD: an EBCDIC-based mainframe ++"BS2000-OSD","c89:-O -XLLML -XLLMK -XL -DB_ENDIAN -DTERMIOS -DCHARSET_EBCDIC::(unknown)::-lsocket -lnsl:THIRTY_TWO_BIT DES_PTR DES_UNROLL MD2_CHAR RC4_INDEX RC4_CHAR BF_PTR:::", ++ ++# OS/390 Unix an EBCDIC-based Unix system on IBM mainframe ++# You need to compile using the c89.sh wrapper in the tools directory, because the ++# IBM compiler does not like the -L switch after any object modules. ++# ++"OS390-Unix","c89.sh:-O -DB_ENDIAN -DCHARSET_EBCDIC -DNO_SYS_PARAM_H -D_ALL_SOURCE::(unknown):::THIRTY_TWO_BIT DES_PTR DES_UNROLL MD2_CHAR RC4_INDEX RC4_CHAR BF_PTR:::", ++ ++# Win64 targets, WIN64I denotes IA-64 and WIN64A - AMD64 ++"VC-WIN64I","cl::::WIN64I::SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN:${no_asm}:win32", ++"VC-WIN64A","cl::::WIN64A::SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN:${no_asm}:win32", ++ ++# Visual C targets ++"VC-NT","cl::::WINNT::BN_LLONG RC4_INDEX EXPORT_VAR_AS_FN ${x86_gcc_opts}:${no_asm}:win32", ++"VC-CE","cl::::WINCE::BN_LLONG RC4_INDEX EXPORT_VAR_AS_FN ${x86_gcc_opts}:${no_asm}:win32", ++"VC-WIN32","cl::::WIN32::BN_LLONG RC4_INDEX EXPORT_VAR_AS_FN ${x86_gcc_opts}:${no_asm}:win32", ++ ++# Borland C++ 4.5 ++"BC-32","bcc32::::WIN32::BN_LLONG DES_PTR RC4_INDEX EXPORT_VAR_AS_FN:${no_asm}:win32", ++ ++# MinGW ++"mingw", "gcc:-mno-cygwin -DL_ENDIAN -fomit-frame-pointer -O3 -march=i486 -Wall -D_WIN32_WINNT=0x333:::MINGW32:-lwsock32 -lgdi32:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts} EXPORT_VAR_AS_FN:${x86_coff_asm}:win32:cygwin-shared:-D_WINDLL -DOPENSSL_USE_APPLINK:-mno-cygwin -shared:.dll.a", ++ ++# UWIN ++"UWIN", "cc:-DTERMIOS -DL_ENDIAN -O -Wall:::UWIN::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${no_asm}:win32", ++ ++# Cygwin ++"Cygwin-pre1.3", "gcc:-DTERMIOS -DL_ENDIAN -fomit-frame-pointer -O3 -m486 -Wall::(unknown):CYGWIN32::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${no_asm}:win32", ++"Cygwin", "gcc:-DTERMIOS -DL_ENDIAN -fomit-frame-pointer -O3 -march=i486 -Wall:::CYGWIN32::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_coff_asm}:dlfcn:cygwin-shared:-D_WINDLL:-shared:.dll.a", ++"debug-Cygwin", "gcc:-DTERMIOS -DL_ENDIAN -march=i486 -Wall -DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DOPENSSL_NO_ASM -g -Wformat -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror:::CYGWIN32:::${no_asm}:dlfcn:cygwin-shared:-D_WINDLL:-shared:.dll.a", ++ ++# NetWare from David Ward (dsward@novell.com) ++# requires either MetroWerks NLM development tools, or gcc / nlmconv ++# NetWare defaults socket bio to WinSock sockets. However, ++# the builds can be configured to use BSD sockets instead. ++# netware-clib => legacy CLib c-runtime support ++"netware-clib", "mwccnlm::::::${x86_gcc_opts}::", ++"netware-clib-bsdsock", "mwccnlm::::::${x86_gcc_opts}::", ++"netware-clib-gcc", "i586-netware-gcc:-nostdinc -I/ndk/nwsdk/include/nlm -I/ndk/ws295sdk/include -DL_ENDIAN -DNETWARE_CLIB -DOPENSSL_SYSNAME_NETWARE -O2 -Wall:::::${x86_gcc_opts}::", ++"netware-clib-bsdsock-gcc", "i586-netware-gcc:-nostdinc -I/ndk/nwsdk/include/nlm -DNETWARE_BSDSOCK -DNETDB_USE_INTERNET -DL_ENDIAN -DNETWARE_CLIB -DOPENSSL_SYSNAME_NETWARE -O2 -Wall:::::${x86_gcc_opts}::", ++# netware-libc => LibC/NKS support ++"netware-libc", "mwccnlm::::::BN_LLONG ${x86_gcc_opts}::", ++"netware-libc-bsdsock", "mwccnlm::::::BN_LLONG ${x86_gcc_opts}::", ++"netware-libc-gcc", "i586-netware-gcc:-nostdinc -I/ndk/libc/include -I/ndk/libc/include/winsock -DL_ENDIAN -DNETWARE_LIBC -DOPENSSL_SYSNAME_NETWARE -DTERMIO -O2 -Wall:::::BN_LLONG ${x86_gcc_opts}::", ++"netware-libc-bsdsock-gcc", "i586-netware-gcc:-nostdinc -I/ndk/libc/include -DNETWARE_BSDSOCK -DL_ENDIAN -DNETWARE_LIBC -DOPENSSL_SYSNAME_NETWARE -DTERMIO -O2 -Wall:::::BN_LLONG ${x86_gcc_opts}::", ++ ++# DJGPP ++"DJGPP", "gcc:-I/dev/env/WATT_ROOT/inc -DTERMIOS -DL_ENDIAN -fomit-frame-pointer -O2 -Wall:::MSDOS:-L/dev/env/WATT_ROOT/lib -lwatt:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_out_asm}:", ++ ++# Ultrix from Bernhard Simon ++"ultrix-cc","cc:-std1 -O -Olimit 2500 -DL_ENDIAN::(unknown):::::::", ++"ultrix-gcc","gcc:-O3 -DL_ENDIAN::(unknown):::BN_LLONG::::", ++# K&R C is no longer supported; you need gcc on old Ultrix installations ++##"ultrix","cc:-O2 -DNOPROTO -DNOCONST -DL_ENDIAN::(unknown):::::::", ++ ++##### MacOS X (a.k.a. Rhapsody or Darwin) setup ++"rhapsody-ppc-cc","cc:-O3 -DB_ENDIAN::(unknown):MACOSX_RHAPSODY::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}::", ++"darwin-ppc-cc","cc:-arch ppc -O3 -DB_ENDIAN::-D_REENTRANT:MACOSX:-Wl,-search_paths_first%:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::osx_ppc32.o::::::::::dlfcn:darwin-shared:-fPIC -fno-common:-arch ppc -dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", ++"darwin64-ppc-cc","cc:-arch ppc64 -O3 -DB_ENDIAN::-D_REENTRANT:MACOSX:-Wl,-search_paths_first%:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::osx_ppc64.o::::::::::dlfcn:darwin-shared:-fPIC -fno-common:-arch ppc64 -dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", ++"darwin-i386-cc","cc:-arch i386 -O3 -fomit-frame-pointer -DL_ENDIAN::-D_REENTRANT:MACOSX:-Wl,-search_paths_first%:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}:dlfcn:darwin-shared:-fPIC -fno-common:-arch i386 -dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", ++"debug-darwin-i386-cc","cc:-arch i386 -g3 -DL_ENDIAN::-D_REENTRANT:MACOSX:-Wl,-search_paths_first%:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}:dlfcn:darwin-shared:-fPIC -fno-common:-arch i386 -dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", ++"darwin64-x86_64-cc","cc:-arch x86_64 -O3 -fomit-frame-pointer -DL_ENDIAN -DMD32_REG_T=int -Wall::-D_REENTRANT:MACOSX:-Wl,-search_paths_first%:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL:${no_asm}:dlfcn:darwin-shared:-fPIC -fno-common:-arch x86_64 -dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", ++"debug-darwin-ppc-cc","cc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DB_ENDIAN -g -Wall -O::-D_REENTRANT:MACOSX::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::osx_ppc32.o::::::::::dlfcn:darwin-shared:-fPIC -fno-common:-dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", ++ ++##### A/UX ++"aux3-gcc","gcc:-O2 -DTERMIO::(unknown):AUX:-lbsd:RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:::", ++ ++##### Sony NEWS-OS 4.x ++"newsos4-gcc","gcc:-O -DB_ENDIAN::(unknown):NEWS4:-lmld -liberty:BN_LLONG RC4_CHAR RC4_CHUNK DES_PTR DES_RISC1 DES_UNROLL BF_PTR::::", ++ ++##### GNU Hurd ++"hurd-x86", "gcc:-DL_ENDIAN -DTERMIOS -O3 -fomit-frame-pointer -march=i486 -Wall::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-fPIC", ++ ++##### OS/2 EMX ++"OS2-EMX", "gcc::::::::", ++ ++##### VxWorks for various targets ++"vxworks-ppc405","ccppc:-g -msoft-float -mlongcall -DCPU=PPC405 -I\$(WIND_BASE)/target/h:::VXWORKS:-r:::::", ++"vxworks-ppc750","ccppc:-ansi -nostdinc -DPPC750 -D_REENTRANT -fvolatile -fno-builtin -fno-for-scope -fsigned-char -Wall -msoft-float -mlongcall -DCPU=PPC604 -I\$(WIND_BASE)/target/h \$(DEBUG_FLAG):::VXWORKS:-r:::::", ++"vxworks-ppc750-debug","ccppc:-ansi -nostdinc -DPPC750 -D_REENTRANT -fvolatile -fno-builtin -fno-for-scope -fsigned-char -Wall -msoft-float -mlongcall -DCPU=PPC604 -I\$(WIND_BASE)/target/h -DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DPEDANTIC -DDEBUG_SAFESTACK -DDEBUG -g:::VXWORKS:-r:::::", ++"vxworks-ppc860","ccppc:-nostdinc -msoft-float -DCPU=PPC860 -DNO_STRINGS_H -I\$(WIND_BASE)/target/h:::VXWORKS:-r:::::", ++"vxworks-mipsle","ccmips:-B\$(WIND_BASE)/host/\$(WIND_HOST_TYPE)/lib/gcc-lib/ -DL_ENDIAN -EL -Wl,-EL -mips2 -mno-branch-likely -G 0 -fno-builtin -msoft-float -DCPU=MIPS32 -DMIPSEL -DNO_STRINGS_H -I\$(WIND_BASE)/target/h:::VXWORKS:-r::${no_asm}::::::ranlibmips:", ++ ++##### Compaq Non-Stop Kernel (Tandem) ++"tandem-c89","c89:-Ww -D__TANDEM -D_XOPEN_SOURCE -D_XOPEN_SOURCE_EXTENDED=1 -D_TANDEM_SOURCE -DB_ENDIAN::(unknown):::THIRTY_TWO_BIT:::", ++ ++); ++ ++my @MK1MF_Builds=qw(VC-WIN64I VC-WIN64A ++ VC-NT VC-CE VC-WIN32 ++ BC-32 OS2-EMX ++ netware-clib netware-clib-bsdsock ++ netware-libc netware-libc-bsdsock); ++ ++my $idx = 0; ++my $idx_cc = $idx++; ++my $idx_cflags = $idx++; ++my $idx_unistd = $idx++; ++my $idx_thread_cflag = $idx++; ++my $idx_sys_id = $idx++; ++my $idx_lflags = $idx++; ++my $idx_bn_ops = $idx++; ++my $idx_cpuid_obj = $idx++; ++my $idx_bn_obj = $idx++; ++my $idx_des_obj = $idx++; ++my $idx_aes_obj = $idx++; ++my $idx_bf_obj = $idx++; ++my $idx_md5_obj = $idx++; ++my $idx_sha1_obj = $idx++; ++my $idx_cast_obj = $idx++; ++my $idx_rc4_obj = $idx++; ++my $idx_rmd160_obj = $idx++; ++my $idx_rc5_obj = $idx++; ++my $idx_dso_scheme = $idx++; ++my $idx_shared_target = $idx++; ++my $idx_shared_cflag = $idx++; ++my $idx_shared_ldflag = $idx++; ++my $idx_shared_extension = $idx++; ++my $idx_ranlib = $idx++; ++my $idx_arflags = $idx++; ++ ++my $prefix=""; ++my $openssldir=""; ++my $exe_ext=""; ++my $install_prefix=""; ++my $fipslibdir="/usr/local/ssl/fips-1.0/lib/"; ++my $nofipscanistercheck=0; ++my $fipsdso=0; ++my $fipscanisterinternal="n"; ++my $baseaddr="0xFB00000"; ++my $no_threads=0; ++my $threads=0; ++my $no_shared=0; # but "no-shared" is default ++my $zlib=1; # but "no-zlib" is default ++my $no_krb5=0; # but "no-krb5" is implied unless "--with-krb5-..." is used ++my $no_rfc3779=1; # but "no-rfc3779" is default ++my $montasm=1; # but "no-montasm" is default ++my $no_asm=0; ++my $no_dso=0; ++my $no_gmp=0; ++my @skip=(); ++my $Makefile="Makefile"; ++my $des_locl="crypto/des/des_locl.h"; ++my $des ="crypto/des/des.h"; ++my $bn ="crypto/bn/bn.h"; ++my $md2 ="crypto/md2/md2.h"; ++my $rc4 ="crypto/rc4/rc4.h"; ++my $rc4_locl="crypto/rc4/rc4_locl.h"; ++my $idea ="crypto/idea/idea.h"; ++my $rc2 ="crypto/rc2/rc2.h"; ++my $bf ="crypto/bf/bf_locl.h"; ++my $bn_asm ="bn_asm.o"; ++my $des_enc="des_enc.o fcrypt_b.o"; ++my $fips_des_enc="fips_des_enc.o"; ++my $aes_enc="aes_core.o aes_cbc.o"; ++my $bf_enc ="bf_enc.o"; ++my $cast_enc="c_enc.o"; ++my $rc4_enc="rc4_enc.o rc4_skey.o"; ++my $rc5_enc="rc5_enc.o"; ++my $md5_obj=""; ++my $sha1_obj=""; ++my $rmd160_obj=""; ++my $processor=""; ++my $default_ranlib; ++my $perl; ++my $fips=0; ++ ++ ++# All of the following is disabled by default (RC5 was enabled before 0.9.8): ++ ++my %disabled = ( # "what" => "comment" [or special keyword "experimental"] ++ "camellia" => "default", ++ "capieng" => "default", ++ "cms" => "default", ++ "gmp" => "default", ++ "jpake" => "experimental", ++ "mdc2" => "default", ++ "montasm" => "default", # explicit option in 0.9.8 only (implicitly enabled in 0.9.9) ++ "rc5" => "default", ++ "rfc3779" => "default", ++ "seed" => "default", ++ "shared" => "default", ++ "zlib" => "default", ++ "zlib-dynamic" => "default" ++ ); ++my @experimental = (); ++ ++# This is what $depflags will look like with the above defaults ++# (we need this to see if we should advise the user to run "make depend"): ++my $default_depflags = " -DOPENSSL_NO_CAMELLIA -DOPENSSL_NO_CAPIENG -DOPENSSL_NO_CMS -DOPENSSL_NO_GMP -DOPENSSL_NO_JPAKE -DOPENSSL_NO_MDC2 -DOPENSSL_NO_RC5 -DOPENSSL_NO_RFC3779 -DOPENSSL_NO_SEED"; ++ ++ ++# Explicit "no-..." options will be collected in %disabled along with the defaults. ++# To remove something from %disabled, use "enable-foo" (unless it's experimental). ++# For symmetry, "disable-foo" is a synonym for "no-foo". ++ ++# For features called "experimental" here, a more explicit "experimental-foo" is needed to enable. ++# We will collect such requests in @experimental. ++# To avoid accidental use of experimental features, applications will have to use -DOPENSSL_EXPERIMENTAL_FOO. ++ ++ ++my $no_sse2=0; ++ ++&usage if ($#ARGV < 0); ++ ++my $flags; ++my $depflags; ++my $openssl_experimental_defines; ++my $openssl_algorithm_defines; ++my $openssl_thread_defines; ++my $openssl_sys_defines=""; ++my $openssl_other_defines; ++my $libs; ++my $libkrb5=""; ++my $target; ++my $options; ++my $symlink; ++my $make_depend=0; ++my %withargs=(); ++ ++my @argvcopy=@ARGV; ++my $argvstring=""; ++my $argv_unprocessed=1; ++ ++while($argv_unprocessed) ++ { ++ $flags=""; ++ $depflags=""; ++ $openssl_experimental_defines=""; ++ $openssl_algorithm_defines=""; ++ $openssl_thread_defines=""; ++ $openssl_sys_defines=""; ++ $openssl_other_defines=""; ++ $libs=""; ++ $target=""; ++ $options=""; ++ $symlink=1; ++ ++ $argv_unprocessed=0; ++ $argvstring=join(' ',@argvcopy); ++ ++PROCESS_ARGS: ++ foreach (@argvcopy) ++ { ++ s /^-no-/no-/; # some people just can't read the instructions ++ ++ # rewrite some options in "enable-..." form ++ s /^-?-?shared$/enable-shared/; ++ s /^threads$/enable-threads/; ++ s /^zlib$/enable-zlib/; ++ s /^zlib-dynamic$/enable-zlib-dynamic/; ++ ++ if (/^no-(.+)$/ || /^disable-(.+)$/) ++ { ++ if (!($disabled{$1} eq "experimental")) ++ { ++ if ($1 eq "ssl") ++ { ++ $disabled{"ssl2"} = "option(ssl)"; ++ $disabled{"ssl3"} = "option(ssl)"; ++ } ++ elsif ($1 eq "tls") ++ { ++ $disabled{"tls1"} = "option(tls)" ++ } ++ else ++ { ++ $disabled{$1} = "option"; ++ } ++ } ++ } ++ elsif (/^enable-(.+)$/ || /^experimental-(.+)$/) ++ { ++ my $algo = $1; ++ if ($disabled{$algo} eq "experimental") ++ { ++ die "You are requesting an experimental feature; please say 'experimental-$algo' if you are sure\n" ++ unless (/^experimental-/); ++ push @experimental, $algo; ++ } ++ delete $disabled{$algo}; ++ ++ $threads = 1 if ($algo eq "threads"); ++ } ++ elsif (/^--test-sanity$/) ++ { ++ exit(&test_sanity()); ++ } ++ elsif (/^reconfigure/ || /^reconf/) ++ { ++ if (open(IN,"<$Makefile")) ++ { ++ while () ++ { ++ chomp; ++ if (/^CONFIGURE_ARGS=(.*)/) ++ { ++ $argvstring=$1; ++ @argvcopy=split(' ',$argvstring); ++ die "Incorrect data to reconfigure, please do a normal configuration\n" ++ if (grep(/^reconf/,@argvcopy)); ++ print "Reconfiguring with: $argvstring\n"; ++ $argv_unprocessed=1; ++ close(IN); ++ last PROCESS_ARGS; ++ } ++ } ++ close(IN); ++ } ++ die "Insufficient data to reconfigure, please do a normal configuration\n"; ++ } ++ elsif (/^386$/) ++ { $processor=386; } ++ elsif (/^fips$/) ++ { ++ $fips=1; ++ } ++ elsif (/^rsaref$/) ++ { ++ # No RSAref support any more since it's not needed. ++ # The check for the option is there so scripts aren't ++ # broken ++ } ++ elsif (/^nofipscanistercheck$/) ++ { ++ $fips = 1; ++ $nofipscanistercheck = 1; ++ } ++ elsif (/^fipscanisterbuild$/) ++ { ++ $fips = 1; ++ $nofipscanistercheck = 1; ++ $fipslibdir=""; ++ $fipscanisterinternal="y"; ++ } ++ elsif (/^fipsdso$/) ++ { ++ $fips = 1; ++ $nofipscanistercheck = 1; ++ $fipslibdir=""; ++ $fipscanisterinternal="y"; ++ $fipsdso = 1; ++ } ++ elsif (/^[-+]/) ++ { ++ if (/^-[lL](.*)$/) ++ { ++ $libs.=$_." "; ++ } ++ elsif (/^-[^-]/ or /^\+/) ++ { ++ $flags.=$_." "; ++ } ++ elsif (/^--prefix=(.*)$/) ++ { ++ $prefix=$1; ++ } ++ elsif (/^--openssldir=(.*)$/) ++ { ++ $openssldir=$1; ++ } ++ elsif (/^--install.prefix=(.*)$/) ++ { ++ $install_prefix=$1; ++ } ++ elsif (/^--with-krb5-(dir|lib|include|flavor)=(.*)$/) ++ { ++ $withargs{"krb5-".$1}=$2; ++ } ++ elsif (/^--with-zlib-lib=(.*)$/) ++ { ++ $withargs{"zlib-lib"}=$1; ++ } ++ elsif (/^--with-zlib-include=(.*)$/) ++ { ++ $withargs{"zlib-include"}="-I$1"; ++ } ++ elsif (/^--with-fipslibdir=(.*)$/) ++ { ++ $fipslibdir="$1/"; ++ } ++ elsif (/^--with-baseaddr=(.*)$/) ++ { ++ $baseaddr="$1"; ++ } ++ else ++ { ++ print STDERR $usage; ++ exit(1); ++ } ++ } ++ elsif ($_ =~ /^([^:]+):(.+)$/) ++ { ++ eval "\$table{\$1} = \"$2\""; # allow $xxx constructs in the string ++ $target=$1; ++ } ++ else ++ { ++ die "target already defined - $target (offending arg: $_)\n" if ($target ne ""); ++ $target=$_; ++ } ++ ++ unless ($_ eq $target || /^no-/ || /^disable-/) ++ { ++ # "no-..." follows later after implied disactivations ++ # have been derived. (Don't take this too seroiusly, ++ # we really only write OPTIONS to the Makefile out of ++ # nostalgia.) ++ ++ if ($options eq "") ++ { $options = $_; } ++ else ++ { $options .= " ".$_; } ++ } ++ } ++ } ++ ++ ++ ++if ($processor eq "386") ++ { ++ $disabled{"sse2"} = "forced"; ++ } ++ ++if (!defined($withargs{"krb5-flavor"}) || $withargs{"krb5-flavor"} eq "") ++ { ++ $disabled{"krb5"} = "krb5-flavor not specified"; ++ } ++ ++if (!defined($disabled{"zlib-dynamic"})) ++ { ++ # "zlib-dynamic" was specifically enabled, so enable "zlib" ++ delete $disabled{"zlib"}; ++ } ++ ++if (defined($disabled{"rijndael"})) ++ { ++ $disabled{"aes"} = "forced"; ++ } ++if (defined($disabled{"des"})) ++ { ++ $disabled{"mdc2"} = "forced"; ++ } ++if (defined($disabled{"ec"})) ++ { ++ $disabled{"ecdsa"} = "forced"; ++ $disabled{"ecdh"} = "forced"; ++ } ++ ++# SSL 2.0 requires MD5 and RSA ++if (defined($disabled{"md5"}) || defined($disabled{"rsa"})) ++ { ++ $disabled{"ssl2"} = "forced"; ++ } ++ ++# SSL 3.0 and TLS requires MD5 and SHA and either RSA or DSA+DH ++if (defined($disabled{"md5"}) || defined($disabled{"sha"}) ++ || (defined($disabled{"rsa"}) ++ && (defined($disabled{"dsa"}) || defined($disabled{"dh"})))) ++ { ++ $disabled{"ssl3"} = "forced"; ++ $disabled{"tls1"} = "forced"; ++ } ++ ++if (defined($disabled{"tls1"})) ++ { ++ $disabled{"tlsext"} = "forced"; ++ } ++ ++if ($target eq "TABLE") { ++ foreach $target (sort keys %table) { ++ print_table_entry($target); ++ } ++ exit 0; ++} ++ ++if ($target eq "LIST") { ++ foreach (sort keys %table) { ++ print; ++ print "\n"; ++ } ++ exit 0; ++} ++ ++if ($target =~ m/^CygWin32(-.*)$/) { ++ $target = "Cygwin".$1; ++} ++ ++print "Configuring for $target\n"; ++ ++&usage if (!defined($table{$target})); ++ ++my @fields = split(/\s*:\s*/,$table{$target} . ":" x 30 , -1); ++my $cc = $fields[$idx_cc]; ++my $cflags = $fields[$idx_cflags]; ++my $unistd = $fields[$idx_unistd]; ++my $thread_cflag = $fields[$idx_thread_cflag]; ++my $sys_id = $fields[$idx_sys_id]; ++my $lflags = $fields[$idx_lflags]; ++my $bn_ops = $fields[$idx_bn_ops]; ++my $cpuid_obj = $fields[$idx_cpuid_obj]; ++my $bn_obj = $fields[$idx_bn_obj]; ++my $des_obj = $fields[$idx_des_obj]; ++my $aes_obj = $fields[$idx_aes_obj]; ++my $bf_obj = $fields[$idx_bf_obj]; ++my $md5_obj = $fields[$idx_md5_obj]; ++my $sha1_obj = $fields[$idx_sha1_obj]; ++my $cast_obj = $fields[$idx_cast_obj]; ++my $rc4_obj = $fields[$idx_rc4_obj]; ++my $rmd160_obj = $fields[$idx_rmd160_obj]; ++my $rc5_obj = $fields[$idx_rc5_obj]; ++my $dso_scheme = $fields[$idx_dso_scheme]; ++my $shared_target = $fields[$idx_shared_target]; ++my $shared_cflag = $fields[$idx_shared_cflag]; ++my $shared_ldflag = $fields[$idx_shared_ldflag]; ++my $shared_extension = $fields[$idx_shared_extension]; ++my $ranlib = $fields[$idx_ranlib]; ++my $arflags = $fields[$idx_arflags]; ++ ++if ($fips) ++ { ++ delete $disabled{"shared"} if ($disabled{"shared"} eq "default"); ++ $disabled{"asm"}="forced" ++ if ($target !~ "VC\-.*" && ++ "$cpuid_obj:$bn_obj:$aes_obj:$des_obj:$sha1_obj" eq "::::"); ++ } ++ ++foreach (sort @experimental) ++ { ++ my $ALGO; ++ ($ALGO = $_) =~ tr/[a-z]/[A-Z]/; ++ ++ # opensslconf.h will set OPENSSL_NO_... unless OPENSSL_EXPERIMENTAL_... is defined ++ $openssl_experimental_defines .= "#define OPENSSL_NO_$ALGO\n"; ++ $cflags .= " -DOPENSSL_EXPERIMENTAL_$ALGO"; ++ } ++ ++foreach (sort (keys %disabled)) ++ { ++ $options .= " no-$_"; ++ ++ printf " no-%-12s %-10s", $_, "[$disabled{$_}]"; ++ ++ if (/^dso$/) ++ { $no_dso = 1; } ++ elsif (/^threads$/) ++ { $no_threads = 1; } ++ elsif (/^shared$/) ++ { $no_shared = 1; } ++ elsif (/^zlib$/) ++ { $zlib = 0; } ++ elsif (/^montasm$/) ++ { $montasm = 0; } ++ elsif (/^static-engine$/) ++ { } ++ elsif (/^zlib-dynamic$/) ++ { } ++ elsif (/^symlinks$/) ++ { $symlink = 0; } ++ elsif (/^sse2$/) ++ { $no_sse2 = 1; } ++ else ++ { ++ my ($ALGO, $algo); ++ ($ALGO = $algo = $_) =~ tr/[a-z]/[A-Z]/; ++ ++ if (/^asm$/ || /^err$/ || /^hw$/ || /^hw-/) ++ { ++ $openssl_other_defines .= "#define OPENSSL_NO_$ALGO\n"; ++ print " OPENSSL_NO_$ALGO"; ++ ++ if (/^err$/) { $flags .= "-DOPENSSL_NO_ERR "; } ++ elsif (/^asm$/) { $no_asm = 1; } ++ } ++ else ++ { ++ $openssl_algorithm_defines .= "#define OPENSSL_NO_$ALGO\n"; ++ print " OPENSSL_NO_$ALGO"; ++ ++ if (/^krb5$/) ++ { $no_krb5 = 1; } ++ else ++ { ++ push @skip, $algo; ++ print " (skip dir)"; ++ ++ $depflags .= " -DOPENSSL_NO_$ALGO"; ++ } ++ } ++ } ++ ++ print "\n"; ++ } ++ ++ ++my $IsMK1MF=scalar grep /^$target$/,@MK1MF_Builds; ++ ++$IsMK1MF=1 if ($target eq "mingw" && $^O ne "cygwin" && !is_msys()); ++ ++$no_shared = 0 if ($fipsdso && !$IsMK1MF); ++ ++$exe_ext=".exe" if ($target eq "Cygwin" || $target eq "DJGPP" || $target eq "mingw"); ++$exe_ext=".nlm" if ($target =~ /netware/); ++$exe_ext=".pm" if ($target =~ /vos/); ++if ($openssldir eq "" and $prefix eq "") ++ { ++ if ($fips) ++ { ++ $openssldir="/usr/local/ssl/fips"; ++ } ++ else ++ { ++ $openssldir="/usr/local/ssl"; ++ } ++ } ++$prefix=$openssldir if $prefix eq ""; ++ ++$default_ranlib= &which("ranlib") or $default_ranlib="true"; ++$perl=$ENV{'PERL'} or $perl=&which("perl5") or $perl=&which("perl") ++ or $perl="perl"; ++ ++chop $openssldir if $openssldir =~ /\/$/; ++chop $prefix if $prefix =~ /.\/$/; ++ ++$openssldir=$prefix . "/ssl" if $openssldir eq ""; ++$openssldir=$prefix . "/" . $openssldir if $openssldir !~ /(^\/|^[a-zA-Z]:[\\\/])/; ++ ++ ++print "IsMK1MF=$IsMK1MF\n"; ++ ++# '%' in $lflags is used to split flags to "pre-" and post-flags ++my ($prelflags,$postlflags)=split('%',$lflags); ++if (defined($postlflags)) { $lflags=$postlflags; } ++else { $lflags=$prelflags; undef $prelflags; } ++ ++my $no_shared_warn=0; ++my $no_user_cflags=0; ++ ++if ($flags ne "") { $cflags="$flags$cflags"; } ++else { $no_user_cflags=1; } ++ ++# Kerberos settings. The flavor must be provided from outside, either through ++# the script "config" or manually. ++if (!$no_krb5) ++ { ++ my ($lresolv, $lpath, $lext); ++ if ($withargs{"krb5-flavor"} =~ /^[Hh]eimdal$/) ++ { ++ die "Sorry, Heimdal is currently not supported\n"; ++ } ++ ##### HACK to force use of Heimdal. ++ ##### WARNING: Since we don't really have adequate support for Heimdal, ++ ##### using this will break the build. You'll have to make ++ ##### changes to the source, and if you do, please send ++ ##### patches to openssl-dev@openssl.org ++ if ($withargs{"krb5-flavor"} =~ /^force-[Hh]eimdal$/) ++ { ++ warn "Heimdal isn't really supported. Your build WILL break\n"; ++ warn "If you fix the problems, please send a patch to openssl-dev\@openssl.org\n"; ++ $withargs{"krb5-dir"} = "/usr/heimdal" ++ if $withargs{"krb5-dir"} eq ""; ++ $withargs{"krb5-lib"} = "-L".$withargs{"krb5-dir"}. ++ "/lib -lgssapi -lkrb5 -lcom_err" ++ if $withargs{"krb5-lib"} eq "" && !$IsMK1MF; ++ $cflags="-DKRB5_HEIMDAL $cflags"; ++ } ++ if ($withargs{"krb5-flavor"} =~ /^[Mm][Ii][Tt]/) ++ { ++ $withargs{"krb5-dir"} = "/usr/kerberos" ++ if $withargs{"krb5-dir"} eq ""; ++ $withargs{"krb5-lib"} = "-L".$withargs{"krb5-dir"}. ++ "/lib -lgssapi_krb5 -lkrb5 -lcom_err -lk5crypto" ++ if $withargs{"krb5-lib"} eq "" && !$IsMK1MF; ++ $cflags="-DKRB5_MIT $cflags"; ++ $withargs{"krb5-flavor"} =~ s/^[Mm][Ii][Tt][._-]*//; ++ if ($withargs{"krb5-flavor"} =~ /^1[._-]*[01]/) ++ { ++ $cflags="-DKRB5_MIT_OLD11 $cflags"; ++ } ++ } ++ LRESOLV: ++ foreach $lpath ("/lib", "/usr/lib") ++ { ++ foreach $lext ("a", "so") ++ { ++ $lresolv = "$lpath/libresolv.$lext"; ++ last LRESOLV if (-r "$lresolv"); ++ $lresolv = ""; ++ } ++ } ++ $withargs{"krb5-lib"} .= " -lresolv" ++ if ("$lresolv" ne ""); ++ $withargs{"krb5-include"} = "-I".$withargs{"krb5-dir"}."/include" ++ if $withargs{"krb5-include"} eq "" && ++ $withargs{"krb5-dir"} ne ""; ++ } ++ ++# The DSO code currently always implements all functions so that no ++# applications will have to worry about that from a compilation point ++# of view. However, the "method"s may return zero unless that platform ++# has support compiled in for them. Currently each method is enabled ++# by a define "DSO_" ... we translate the "dso_scheme" config ++# string entry into using the following logic; ++my $dso_cflags; ++if (!$no_dso && $dso_scheme ne "") ++ { ++ $dso_scheme =~ tr/[a-z]/[A-Z]/; ++ if ($dso_scheme eq "DLFCN") ++ { ++ $dso_cflags = "-DDSO_DLFCN -DHAVE_DLFCN_H"; ++ } ++ elsif ($dso_scheme eq "DLFCN_NO_H") ++ { ++ $dso_cflags = "-DDSO_DLFCN"; ++ } ++ else ++ { ++ $dso_cflags = "-DDSO_$dso_scheme"; ++ } ++ $cflags = "$dso_cflags $cflags"; ++ } ++ ++my $thread_cflags; ++my $thread_defines; ++if ($thread_cflag ne "(unknown)" && !$no_threads) ++ { ++ # If we know how to do it, support threads by default. ++ $threads = 1; ++ } ++if ($thread_cflag eq "(unknown)" && $threads) ++ { ++ # If the user asked for "threads", [s]he is also expected to ++ # provide any system-dependent compiler options that are ++ # necessary. ++ if ($no_user_cflags) ++ { ++ print "You asked for multi-threading support, but didn't\n"; ++ print "provide any system-specific compiler options\n"; ++ exit(1); ++ } ++ $thread_cflags="-DOPENSSL_THREADS $cflags" ; ++ $thread_defines .= "#define OPENSSL_THREADS\n"; ++ } ++else ++ { ++ $thread_cflags="-DOPENSSL_THREADS $thread_cflag $cflags"; ++ $thread_defines .= "#define OPENSSL_THREADS\n"; ++# my $def; ++# foreach $def (split ' ',$thread_cflag) ++# { ++# if ($def =~ s/^-D// && $def !~ /^_/) ++# { ++# $thread_defines .= "#define $def\n"; ++# } ++# } ++ } ++ ++$lflags="$libs$lflags" if ($libs ne ""); ++ ++if ($no_asm) ++ { ++ $cpuid_obj=$bn_obj=$des_obj=$aes_obj=$bf_obj=$cast_obj=$rc4_obj=$rc5_obj=""; ++ $sha1_obj=$md5_obj=$rmd160_obj=""; ++ $cflags=~s/\-D[BL]_ENDIAN// if ($fips); ++ $thread_cflags=~s/\-D[BL]_ENDIAN// if ($fips); ++ } ++if ($montasm) ++ { ++ $bn_obj =~ s/MAYBE-MO86-/mo86-/; ++ } ++else ++ { ++ $bn_obj =~ s/MAYBE-MO86-[a-z.]*//; ++ } ++ ++if (!$no_shared) ++ { ++ $cast_obj=""; # CAST assembler is not PIC ++ } ++ ++if ($threads) ++ { ++ $cflags=$thread_cflags; ++ $openssl_thread_defines .= $thread_defines; ++ } ++ ++if ($zlib) ++ { ++ $cflags = "-DZLIB $cflags"; ++ if (defined($disabled{"zlib-dynamic"})) ++ { ++ $lflags = "$lflags -lz"; ++ } ++ else ++ { ++ $cflags = "-DZLIB_SHARED $cflags"; ++ } ++ } ++ ++# You will find shlib_mark1 and shlib_mark2 explained in Makefile.org ++my $shared_mark = ""; ++if ($shared_target eq "") ++ { ++ $no_shared_warn = 1 if !$no_shared && !$fips; ++ $no_shared = 1; ++ } ++if (!$no_shared) ++ { ++ if ($shared_cflag ne "") ++ { ++ $cflags = "$shared_cflag -DOPENSSL_PIC $cflags"; ++ } ++ } ++ ++if (!$IsMK1MF) ++ { ++ if ($no_shared) ++ { ++ $openssl_other_defines.="#define OPENSSL_NO_DYNAMIC_ENGINE\n"; ++ } ++ else ++ { ++ $openssl_other_defines.="#define OPENSSL_NO_STATIC_ENGINE\n"; ++ } ++ } ++ ++$cpuid_obj.=" uplink.o uplink-cof.o" if ($cflags =~ /\-DOPENSSL_USE_APPLINK/); ++ ++# ++# Platform fix-ups ++# ++if ($target =~ /\-icc$/) # Intel C compiler ++ { ++ my $iccver=0; ++ if (open(FD,"$cc -V 2>&1 |")) ++ { ++ while() { $iccver=$1 if (/Version ([0-9]+)\./); } ++ close(FD); ++ } ++ if ($iccver>=8) ++ { ++ # Eliminate unnecessary dependency from libirc.a. This is ++ # essential for shared library support, as otherwise ++ # apps/openssl can end up in endless loop upon startup... ++ $cflags.=" -Dmemcpy=__builtin_memcpy -Dmemset=__builtin_memset"; ++ } ++ if ($iccver>=9) ++ { ++ $cflags.=" -i-static"; ++ $cflags=~s/\-no_cpprt/-no-cpprt/; ++ } ++ if ($iccver>=10) ++ { ++ $cflags=~s/\-i\-static/-static-intel/; ++ } ++ } ++ ++# Unlike other OSes (like Solaris, Linux, Tru64, IRIX) BSD run-time ++# linkers (tested OpenBSD, NetBSD and FreeBSD) "demand" RPATH set on ++# .so objects. Apparently application RPATH is not global and does ++# not apply to .so linked with other .so. Problem manifests itself ++# when libssl.so fails to load libcrypto.so. One can argue that we ++# should engrave this into Makefile.shared rules or into BSD-* config ++# lines above. Meanwhile let's try to be cautious and pass -rpath to ++# linker only when --prefix is not /usr. ++if ($target =~ /^BSD\-/) ++ { ++ $shared_ldflag.=" -Wl,-rpath,\$(LIBRPATH)" if ($prefix !~ m|^/usr[/]*$|); ++ } ++ ++if ($sys_id ne "") ++ { ++ #$cflags="-DOPENSSL_SYSNAME_$sys_id $cflags"; ++ $openssl_sys_defines="#define OPENSSL_SYSNAME_$sys_id\n"; ++ } ++ ++if ($ranlib eq "") ++ { ++ $ranlib = $default_ranlib; ++ } ++ ++#my ($bn1)=split(/\s+/,$bn_obj); ++#$bn1 = "" unless defined $bn1; ++#$bn1=$bn_asm unless ($bn1 =~ /\.o$/); ++#$bn_obj="$bn1"; ++ ++$cpuid_obj="" if ($processor eq "386"); ++ ++$bn_obj = $bn_asm unless $bn_obj ne ""; ++# bn86* is the only one implementing bn_*_part_words ++$cflags.=" -DOPENSSL_BN_ASM_PART_WORDS" if ($bn_obj =~ /bn86/); ++$cflags.=" -DOPENSSL_IA32_SSE2" if (!$no_sse2 && $bn_obj =~ /bn86/); ++ ++$cflags.=" -DOPENSSL_BN_ASM_MONT" if ($bn_obj =~ /\-mont|mo86\-/); ++ ++if ($fips) ++ { ++ $openssl_other_defines.="#define OPENSSL_FIPS\n"; ++ } ++ ++$des_obj=$des_enc unless ($des_obj =~ /\.o$/); ++$bf_obj=$bf_enc unless ($bf_obj =~ /\.o$/); ++$cast_obj=$cast_enc unless ($cast_obj =~ /\.o$/); ++$rc4_obj=$rc4_enc unless ($rc4_obj =~ /\.o$/); ++$rc5_obj=$rc5_enc unless ($rc5_obj =~ /\.o$/); ++if ($sha1_obj =~ /\.o$/) ++ { ++# $sha1_obj=$sha1_enc; ++ $cflags.=" -DSHA1_ASM" if ($sha1_obj =~ /sx86/ || $sha1_obj =~ /sha1/); ++ $cflags.=" -DSHA256_ASM" if ($sha1_obj =~ /sha256/); ++ $cflags.=" -DSHA512_ASM" if ($sha1_obj =~ /sha512/); ++ if ($sha1_obj =~ /sse2/) ++ { if ($no_sse2) ++ { $sha1_obj =~ s/\S*sse2\S+//; } ++ elsif ($cflags !~ /OPENSSL_IA32_SSE2/) ++ { $cflags.=" -DOPENSSL_IA32_SSE2"; } ++ } ++ } ++if ($md5_obj =~ /\.o$/) ++ { ++# $md5_obj=$md5_enc; ++ $cflags.=" -DMD5_ASM"; ++ } ++if ($rmd160_obj =~ /\.o$/) ++ { ++# $rmd160_obj=$rmd160_enc; ++ $cflags.=" -DRMD160_ASM"; ++ } ++if ($aes_obj =~ /\.o$/) ++ { ++ $cflags.=" -DAES_ASM"; ++ } ++else { ++ $aes_obj=$aes_enc; ++ } ++ ++# "Stringify" the C flags string. This permits it to be made part of a string ++# and works as well on command lines. ++$cflags =~ s/([\\\"])/\\\1/g; ++ ++my $version = "unknown"; ++my $version_num = "unknown"; ++my $major = "unknown"; ++my $minor = "unknown"; ++my $shlib_version_number = "unknown"; ++my $shlib_version_history = "unknown"; ++my $shlib_major = "unknown"; ++my $shlib_minor = "unknown"; ++ ++open(IN,') ++ { ++ $version=$1 if /OPENSSL.VERSION.TEXT.*OpenSSL (\S+) /; ++ $version_num=$1 if /OPENSSL.VERSION.NUMBER.*0x(\S+)/; ++ $shlib_version_number=$1 if /SHLIB_VERSION_NUMBER *"([^"]+)"/; ++ $shlib_version_history=$1 if /SHLIB_VERSION_HISTORY *"([^"]*)"/; ++ } ++close(IN); ++if ($shlib_version_history ne "") { $shlib_version_history .= ":"; } ++ ++if ($version =~ /(^[0-9]*)\.([0-9\.]*)/) ++ { ++ $major=$1; ++ $minor=$2; ++ } ++ ++if ($shlib_version_number =~ /(^[0-9]*)\.([0-9\.]*)/) ++ { ++ $shlib_major=$1; ++ $shlib_minor=$2; ++ } ++ ++open(IN,'$Makefile.new") || die "unable to create $Makefile.new:$!\n"; ++print OUT "### Generated automatically from Makefile.org by Configure.\n\n"; ++my $sdirs=0; ++while () ++ { ++ chomp; ++ $sdirs = 1 if /^SDIRS=/; ++ if ($sdirs) { ++ my $dir; ++ foreach $dir (@skip) { ++ s/(\s)$dir\s/$1/; ++ s/\s$dir$//; ++ } ++ } ++ $sdirs = 0 unless /\\$/; ++ s/^VERSION=.*/VERSION=$version/; ++ s/^MAJOR=.*/MAJOR=$major/; ++ s/^MINOR=.*/MINOR=$minor/; ++ s/^SHLIB_VERSION_NUMBER=.*/SHLIB_VERSION_NUMBER=$shlib_version_number/; ++ s/^SHLIB_VERSION_HISTORY=.*/SHLIB_VERSION_HISTORY=$shlib_version_history/; ++ s/^SHLIB_MAJOR=.*/SHLIB_MAJOR=$shlib_major/; ++ s/^SHLIB_MINOR=.*/SHLIB_MINOR=$shlib_minor/; ++ s/^SHLIB_EXT=.*/SHLIB_EXT=$shared_extension/; ++ s/^INSTALLTOP=.*$/INSTALLTOP=$prefix/; ++ s/^OPENSSLDIR=.*$/OPENSSLDIR=$openssldir/; ++ s/^INSTALL_PREFIX=.*$/INSTALL_PREFIX=$install_prefix/; ++ s/^PLATFORM=.*$/PLATFORM=$target/; ++ s/^OPTIONS=.*$/OPTIONS=$options/; ++ s/^CONFIGURE_ARGS=.*$/CONFIGURE_ARGS=$argvstring/; ++ s/^CC=.*$/CC= $cc/; ++ s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ if $cc eq "gcc"; ++ s/^CFLAG=.*$/CFLAG= $cflags/; ++ s/^DEPFLAG=.*$/DEPFLAG=$depflags/; ++ s/^PEX_LIBS=.*$/PEX_LIBS= $prelflags/; ++ s/^EX_LIBS=.*$/EX_LIBS= $lflags/; ++ s/^EXE_EXT=.*$/EXE_EXT= $exe_ext/; ++ s/^CPUID_OBJ=.*$/CPUID_OBJ= $cpuid_obj/; ++ s/^BN_ASM=.*$/BN_ASM= $bn_obj/; ++ s/^DES_ENC=.*$/DES_ENC= $des_obj/; ++ s/^AES_ASM_OBJ=.*$/AES_ASM_OBJ= $aes_obj/; ++ s/^BF_ENC=.*$/BF_ENC= $bf_obj/; ++ s/^CAST_ENC=.*$/CAST_ENC= $cast_obj/; ++ s/^RC4_ENC=.*$/RC4_ENC= $rc4_obj/; ++ s/^RC5_ENC=.*$/RC5_ENC= $rc5_obj/; ++ s/^MD5_ASM_OBJ=.*$/MD5_ASM_OBJ= $md5_obj/; ++ s/^SHA1_ASM_OBJ=.*$/SHA1_ASM_OBJ= $sha1_obj/; ++ s/^RMD160_ASM_OBJ=.*$/RMD160_ASM_OBJ= $rmd160_obj/; ++ s/^PROCESSOR=.*/PROCESSOR= $processor/; ++ s/^RANLIB=.*/RANLIB= $ranlib/; ++ s/^ARFLAGS=.*/ARFLAGS= $arflags/; ++ s/^PERL=.*/PERL= $perl/; ++ s/^KRB5_INCLUDES=.*/KRB5_INCLUDES=$withargs{"krb5-include"}/; ++ s/^LIBKRB5=.*/LIBKRB5=$withargs{"krb5-lib"}/; ++ s/^LIBZLIB=.*/LIBZLIB=$withargs{"zlib-lib"}/; ++ s/^ZLIB_INCLUDE=.*/ZLIB_INCLUDE=$withargs{"zlib-include"}/; ++ s/^FIPSLIBDIR=.*/FIPSLIBDIR=$fipslibdir/; ++ if ($fipsdso) ++ { ++ s/^FIPSCANLIB=.*/FIPSCANLIB=libfips/; ++ s/^SHARED_FIPS=.*/SHARED_FIPS=libfips\$(SHLIB_EXT)/; ++ s/^SHLIBDIRS=.*/SHLIBDIRS= crypto ssl fips/; ++ } ++ else ++ { ++ s/^FIPSCANLIB=.*/FIPSCANLIB=libcrypto/ if $fips; ++ s/^SHARED_FIPS=.*/SHARED_FIPS=/; ++ s/^SHLIBDIRS=.*/SHLIBDIRS= crypto ssl/; ++ } ++ s/^FIPSCANISTERINTERNAL=.*/FIPSCANISTERINTERNAL=$fipscanisterinternal/; ++ s/^BASEADDR=.*/BASEADDR=$baseaddr/; ++ s/^SHLIB_TARGET=.*/SHLIB_TARGET=$shared_target/; ++ s/^SHLIB_MARK=.*/SHLIB_MARK=$shared_mark/; ++ s/^SHARED_LIBS=.*/SHARED_LIBS=\$(SHARED_FIPS) \$(SHARED_CRYPTO) \$(SHARED_SSL)/ if (!$no_shared); ++ if ($shared_extension ne "" && $shared_extension =~ /^\.s([ol])\.[^\.]*$/) ++ { ++ my $sotmp = $1; ++ s/^SHARED_LIBS_LINK_EXTS=.*/SHARED_LIBS_LINK_EXTS=.s$sotmp/; ++ } ++ elsif ($shared_extension ne "" && $shared_extension =~ /^\.[^\.]*\.dylib$/) ++ { ++ s/^SHARED_LIBS_LINK_EXTS=.*/SHARED_LIBS_LINK_EXTS=.dylib/; ++ } ++ elsif ($shared_extension ne "" && $shared_extension =~ /^\.s([ol])\.[^\.]*\.[^\.]*$/) ++ { ++ my $sotmp = $1; ++ s/^SHARED_LIBS_LINK_EXTS=.*/SHARED_LIBS_LINK_EXTS=.s$sotmp.\$(SHLIB_MAJOR) .s$sotmp/; ++ } ++ elsif ($shared_extension ne "" && $shared_extension =~ /^\.[^\.]*\.[^\.]*\.dylib$/) ++ { ++ s/^SHARED_LIBS_LINK_EXTS=.*/SHARED_LIBS_LINK_EXTS=.\$(SHLIB_MAJOR).dylib .dylib/; ++ } ++ s/^SHARED_LDFLAGS=.*/SHARED_LDFLAGS=$shared_ldflag/; ++ print OUT $_."\n"; ++ } ++close(IN); ++close(OUT); ++rename($Makefile,"$Makefile.bak") || die "unable to rename $Makefile\n" if -e $Makefile; ++rename("$Makefile.new",$Makefile) || die "unable to rename $Makefile.new\n"; ++ ++print "CC =$cc\n"; ++print "CFLAG =$cflags\n"; ++print "EX_LIBS =$lflags\n"; ++print "CPUID_OBJ =$cpuid_obj\n"; ++print "BN_ASM =$bn_obj\n"; ++print "DES_ENC =$des_obj\n"; ++print "AES_ASM_OBJ =$aes_obj\n"; ++print "BF_ENC =$bf_obj\n"; ++print "CAST_ENC =$cast_obj\n"; ++print "RC4_ENC =$rc4_obj\n"; ++print "RC5_ENC =$rc5_obj\n"; ++print "MD5_OBJ_ASM =$md5_obj\n"; ++print "SHA1_OBJ_ASM =$sha1_obj\n"; ++print "RMD160_OBJ_ASM=$rmd160_obj\n"; ++print "PROCESSOR =$processor\n"; ++print "RANLIB =$ranlib\n"; ++print "ARFLAGS =$arflags\n"; ++print "PERL =$perl\n"; ++print "KRB5_INCLUDES =",$withargs{"krb5-include"},"\n" ++ if $withargs{"krb5-include"} ne ""; ++ ++my $des_ptr=0; ++my $des_risc1=0; ++my $des_risc2=0; ++my $des_unroll=0; ++my $bn_ll=0; ++my $def_int=2; ++my $rc4_int=$def_int; ++my $md2_int=$def_int; ++my $idea_int=$def_int; ++my $rc2_int=$def_int; ++my $rc4_idx=0; ++my $rc4_chunk=0; ++my $bf_ptr=0; ++my @type=("char","short","int","long"); ++my ($b64l,$b64,$b32,$b16,$b8)=(0,0,1,0,0); ++my $export_var_as_fn=0; ++ ++my $des_int; ++ ++foreach (sort split(/\s+/,$bn_ops)) ++ { ++ $des_ptr=1 if /DES_PTR/; ++ $des_risc1=1 if /DES_RISC1/; ++ $des_risc2=1 if /DES_RISC2/; ++ $des_unroll=1 if /DES_UNROLL/; ++ $des_int=1 if /DES_INT/; ++ $bn_ll=1 if /BN_LLONG/; ++ $rc4_int=0 if /RC4_CHAR/; ++ $rc4_int=3 if /RC4_LONG/; ++ $rc4_idx=1 if /RC4_INDEX/; ++ $rc4_chunk=1 if /RC4_CHUNK/; ++ $rc4_chunk=2 if /RC4_CHUNK_LL/; ++ $md2_int=0 if /MD2_CHAR/; ++ $md2_int=3 if /MD2_LONG/; ++ $idea_int=1 if /IDEA_SHORT/; ++ $idea_int=3 if /IDEA_LONG/; ++ $rc2_int=1 if /RC2_SHORT/; ++ $rc2_int=3 if /RC2_LONG/; ++ $bf_ptr=1 if $_ eq "BF_PTR"; ++ $bf_ptr=2 if $_ eq "BF_PTR2"; ++ ($b64l,$b64,$b32,$b16,$b8)=(0,1,0,0,0) if /SIXTY_FOUR_BIT/; ++ ($b64l,$b64,$b32,$b16,$b8)=(1,0,0,0,0) if /SIXTY_FOUR_BIT_LONG/; ++ ($b64l,$b64,$b32,$b16,$b8)=(0,0,1,0,0) if /THIRTY_TWO_BIT/; ++ ($b64l,$b64,$b32,$b16,$b8)=(0,0,0,1,0) if /SIXTEEN_BIT/; ++ ($b64l,$b64,$b32,$b16,$b8)=(0,0,0,0,1) if /EIGHT_BIT/; ++ $export_var_as_fn=1 if /EXPORT_VAR_AS_FN/; ++ } ++ ++open(IN,'crypto/opensslconf.h.new') || die "unable to create crypto/opensslconf.h.new:$!\n"; ++print OUT "/* opensslconf.h */\n"; ++print OUT "/* WARNING: Generated automatically from opensslconf.h.in by Configure. */\n\n"; ++ ++print OUT "/* OpenSSL was configured with the following options: */\n"; ++my $openssl_algorithm_defines_trans = $openssl_algorithm_defines; ++$openssl_experimental_defines =~ s/^\s*#\s*define\s+OPENSSL_NO_(.*)/#ifndef OPENSSL_EXPERIMENTAL_$1\n# ifndef OPENSSL_NO_$1\n# define OPENSSL_NO_$1\n# endif\n#endif/mg; ++$openssl_algorithm_defines_trans =~ s/^\s*#\s*define\s+OPENSSL_(.*)/# if defined(OPENSSL_$1) \&\& !defined($1)\n# define $1\n# endif/mg; ++$openssl_algorithm_defines =~ s/^\s*#\s*define\s+(.*)/#ifndef $1\n# define $1\n#endif/mg; ++$openssl_algorithm_defines = " /* no ciphers excluded */\n" if $openssl_algorithm_defines eq ""; ++$openssl_thread_defines =~ s/^\s*#\s*define\s+(.*)/#ifndef $1\n# define $1\n#endif/mg; ++$openssl_sys_defines =~ s/^\s*#\s*define\s+(.*)/#ifndef $1\n# define $1\n#endif/mg; ++$openssl_other_defines =~ s/^\s*#\s*define\s+(.*)/#ifndef $1\n# define $1\n#endif/mg; ++print OUT $openssl_sys_defines; ++print OUT "#ifndef OPENSSL_DOING_MAKEDEPEND\n\n"; ++print OUT $openssl_experimental_defines; ++print OUT "\n"; ++print OUT $openssl_algorithm_defines; ++print OUT "\n#endif /* OPENSSL_DOING_MAKEDEPEND */\n\n"; ++print OUT $openssl_thread_defines; ++print OUT $openssl_other_defines,"\n"; ++ ++print OUT "/* The OPENSSL_NO_* macros are also defined as NO_* if the application\n"; ++print OUT " asks for it. This is a transient feature that is provided for those\n"; ++print OUT " who haven't had the time to do the appropriate changes in their\n"; ++print OUT " applications. */\n"; ++print OUT "#ifdef OPENSSL_ALGORITHM_DEFINES\n"; ++print OUT $openssl_algorithm_defines_trans; ++print OUT "#endif\n\n"; ++ ++print OUT "#define OPENSSL_CPUID_OBJ\n\n" if ($cpuid_obj); ++ ++while () ++ { ++ if (/^#define\s+OPENSSLDIR/) ++ { print OUT "#define OPENSSLDIR \"$openssldir\"\n"; } ++ elsif (/^#define\s+ENGINESDIR/) ++ { print OUT "#define ENGINESDIR \"$prefix/lib/engines\"\n"; } ++ elsif (/^#((define)|(undef))\s+OPENSSL_EXPORT_VAR_AS_FUNCTION/) ++ { printf OUT "#undef OPENSSL_EXPORT_VAR_AS_FUNCTION\n" ++ if $export_var_as_fn; ++ printf OUT "#%s OPENSSL_EXPORT_VAR_AS_FUNCTION\n", ++ ($export_var_as_fn)?"define":"undef"; } ++ elsif (/^#define\s+OPENSSL_UNISTD/) ++ { ++ $unistd = "" if $unistd eq ""; ++ print OUT "#define OPENSSL_UNISTD $unistd\n"; ++ } ++ elsif (/^#((define)|(undef))\s+SIXTY_FOUR_BIT_LONG/) ++ { printf OUT "#%s SIXTY_FOUR_BIT_LONG\n",($b64l)?"define":"undef"; } ++ elsif (/^#((define)|(undef))\s+SIXTY_FOUR_BIT/) ++ { printf OUT "#%s SIXTY_FOUR_BIT\n",($b64)?"define":"undef"; } ++ elsif (/^#((define)|(undef))\s+THIRTY_TWO_BIT/) ++ { printf OUT "#%s THIRTY_TWO_BIT\n",($b32)?"define":"undef"; } ++ elsif (/^#((define)|(undef))\s+SIXTEEN_BIT/) ++ { printf OUT "#%s SIXTEEN_BIT\n",($b16)?"define":"undef"; } ++ elsif (/^#((define)|(undef))\s+EIGHT_BIT/) ++ { printf OUT "#%s EIGHT_BIT\n",($b8)?"define":"undef"; } ++ elsif (/^#((define)|(undef))\s+BN_LLONG\s*$/) ++ { printf OUT "#%s BN_LLONG\n",($bn_ll)?"define":"undef"; } ++ elsif (/^\#define\s+DES_LONG\s+.*/) ++ { printf OUT "#define DES_LONG unsigned %s\n", ++ ($des_int)?'int':'long'; } ++ elsif (/^\#(define|undef)\s+DES_PTR/) ++ { printf OUT "#%s DES_PTR\n",($des_ptr)?'define':'undef'; } ++ elsif (/^\#(define|undef)\s+DES_RISC1/) ++ { printf OUT "#%s DES_RISC1\n",($des_risc1)?'define':'undef'; } ++ elsif (/^\#(define|undef)\s+DES_RISC2/) ++ { printf OUT "#%s DES_RISC2\n",($des_risc2)?'define':'undef'; } ++ elsif (/^\#(define|undef)\s+DES_UNROLL/) ++ { printf OUT "#%s DES_UNROLL\n",($des_unroll)?'define':'undef'; } ++ elsif (/^#define\s+RC4_INT\s/) ++ { printf OUT "#define RC4_INT unsigned %s\n",$type[$rc4_int]; } ++ elsif (/^#undef\s+RC4_CHUNK/) ++ { ++ printf OUT "#undef RC4_CHUNK\n" if $rc4_chunk==0; ++ printf OUT "#define RC4_CHUNK unsigned long\n" if $rc4_chunk==1; ++ printf OUT "#define RC4_CHUNK unsigned long long\n" if $rc4_chunk==2; ++ } ++ elsif (/^#((define)|(undef))\s+RC4_INDEX/) ++ { printf OUT "#%s RC4_INDEX\n",($rc4_idx)?"define":"undef"; } ++ elsif (/^#(define|undef)\s+I386_ONLY/) ++ { printf OUT "#%s I386_ONLY\n", ($processor eq "386")? ++ "define":"undef"; } ++ elsif (/^#define\s+MD2_INT\s/) ++ { printf OUT "#define MD2_INT unsigned %s\n",$type[$md2_int]; } ++ elsif (/^#define\s+IDEA_INT\s/) ++ {printf OUT "#define IDEA_INT unsigned %s\n",$type[$idea_int];} ++ elsif (/^#define\s+RC2_INT\s/) ++ {printf OUT "#define RC2_INT unsigned %s\n",$type[$rc2_int];} ++ elsif (/^#(define|undef)\s+BF_PTR/) ++ { ++ printf OUT "#undef BF_PTR\n" if $bf_ptr == 0; ++ printf OUT "#define BF_PTR\n" if $bf_ptr == 1; ++ printf OUT "#define BF_PTR2\n" if $bf_ptr == 2; ++ } ++ else ++ { print OUT $_; } ++ } ++close(IN); ++close(OUT); ++rename("crypto/opensslconf.h","crypto/opensslconf.h.bak") || die "unable to rename crypto/opensslconf.h\n" if -e "crypto/opensslconf.h"; ++rename("crypto/opensslconf.h.new","crypto/opensslconf.h") || die "unable to rename crypto/opensslconf.h.new\n"; ++ ++ ++# Fix the date ++ ++print "SIXTY_FOUR_BIT_LONG mode\n" if $b64l; ++print "SIXTY_FOUR_BIT mode\n" if $b64; ++print "THIRTY_TWO_BIT mode\n" if $b32; ++print "SIXTEEN_BIT mode\n" if $b16; ++print "EIGHT_BIT mode\n" if $b8; ++print "DES_PTR used\n" if $des_ptr; ++print "DES_RISC1 used\n" if $des_risc1; ++print "DES_RISC2 used\n" if $des_risc2; ++print "DES_UNROLL used\n" if $des_unroll; ++print "DES_INT used\n" if $des_int; ++print "BN_LLONG mode\n" if $bn_ll; ++print "RC4 uses u$type[$rc4_int]\n" if $rc4_int != $def_int; ++print "RC4_INDEX mode\n" if $rc4_idx; ++print "RC4_CHUNK is undefined\n" if $rc4_chunk==0; ++print "RC4_CHUNK is unsigned long\n" if $rc4_chunk==1; ++print "RC4_CHUNK is unsigned long long\n" if $rc4_chunk==2; ++print "MD2 uses u$type[$md2_int]\n" if $md2_int != $def_int; ++print "IDEA uses u$type[$idea_int]\n" if $idea_int != $def_int; ++print "RC2 uses u$type[$rc2_int]\n" if $rc2_int != $def_int; ++print "BF_PTR used\n" if $bf_ptr == 1; ++print "BF_PTR2 used\n" if $bf_ptr == 2; ++ ++if($IsMK1MF) { ++ open (OUT,">crypto/buildinf.h") || die "Can't open buildinf.h"; ++ printf OUT <ms/version32.rc") || die "Can't open ms/version32.rc"; ++ print OUT < ++ ++LANGUAGE 0x09,0x01 ++ ++1 VERSIONINFO ++ FILEVERSION $v1,$v2,$v3,$v4 ++ PRODUCTVERSION $v1,$v2,$v3,$v4 ++ FILEFLAGSMASK 0x3fL ++#ifdef _DEBUG ++ FILEFLAGS 0x01L ++#else ++ FILEFLAGS 0x00L ++#endif ++ FILEOS VOS__WINDOWS32 ++ FILETYPE VFT_DLL ++ FILESUBTYPE 0x0L ++BEGIN ++ BLOCK "StringFileInfo" ++ BEGIN ++ BLOCK "040904b0" ++ BEGIN ++#if defined(FIPS) ++ VALUE "Comments", "WARNING: TEST VERSION ONLY ***NOT*** FIPS 140-2 VALIDATED.\\0" ++#endif ++ // Required: ++ VALUE "CompanyName", "The OpenSSL Project, http://www.openssl.org/\\0" ++#if defined(FIPS) ++ VALUE "FileDescription", "TEST UNVALIDATED FIPS140-2 DLL\\0" ++#else ++ VALUE "FileDescription", "OpenSSL Shared Library\\0" ++#endif ++ VALUE "FileVersion", "$version\\0" ++#if defined(CRYPTO) ++ VALUE "InternalName", "libeay32\\0" ++ VALUE "OriginalFilename", "libeay32.dll\\0" ++#elif defined(SSL) ++ VALUE "InternalName", "ssleay32\\0" ++ VALUE "OriginalFilename", "ssleay32.dll\\0" ++#elif defined(FIPS) ++ VALUE "InternalName", "libosslfips\\0" ++ VALUE "OriginalFilename", "libosslfips.dll\\0" ++#endif ++ VALUE "ProductName", "The OpenSSL Toolkit\\0" ++ VALUE "ProductVersion", "$version\\0" ++ // Optional: ++ //VALUE "Comments", "\\0" ++ VALUE "LegalCopyright", "Copyright 1998-2007 The OpenSSL Project. Copyright 1995-1998 Eric A. Young, Tim J. Hudson. All rights reserved.\\0" ++ //VALUE "LegalTrademarks", "\\0" ++ //VALUE "PrivateBuild", "\\0" ++ //VALUE "SpecialBuild", "\\0" ++ END ++ END ++ BLOCK "VarFileInfo" ++ BEGIN ++ VALUE "Translation", 0x409, 0x4b0 ++ END ++END ++EOF ++ close(OUT); ++ } ++ ++print < 78) ++ { ++ print STDERR "\n"; ++ $k=length($i); ++ } ++ print STDERR $i . " "; ++ } ++ foreach $i (sort keys %table) ++ { ++ next if $i !~ /^debug/; ++ $k += length($i) + 1; ++ if ($k > 78) ++ { ++ print STDERR "\n"; ++ $k=length($i); ++ } ++ print STDERR $i . " "; ++ } ++ print STDERR "\n\nNOTE: If in doubt, on Unix-ish systems use './config'.\n"; ++ exit(1); ++ } ++ ++sub which ++ { ++ my($name)=@_; ++ my $path; ++ foreach $path (split /:/, $ENV{PATH}) ++ { ++ if (-f "$path/$name$exe_ext" and -x _) ++ { ++ return "$path/$name$exe_ext" unless ($name eq "perl" and ++ system("$path/$name$exe_ext -e " . '\'exit($]<5.0);\'')); ++ } ++ } ++ } ++ ++sub dofile ++ { ++ my $f; my $p; my %m; my @a; my $k; my $ff; ++ ($f,$p,%m)=@_; ++ ++ open(IN,"<$f.in") || open(IN,"<$f") || die "unable to open $f:$!\n"; ++ @a=; ++ close(IN); ++ foreach $k (keys %m) ++ { ++ grep(/$k/ && ($_=sprintf($m{$k}."\n",$p)),@a); ++ } ++ open(OUT,">$f.new") || die "unable to open $f.new:$!\n"; ++ print OUT @a; ++ close(OUT); ++ rename($f,"$f.bak") || die "unable to rename $f\n" if -e $f; ++ rename("$f.new",$f) || die "unable to rename $f.new\n"; ++ } ++ ++sub print_table_entry ++ { ++ my $target = shift; ++ ++ (my $cc,my $cflags,my $unistd,my $thread_cflag,my $sys_id,my $lflags, ++ my $bn_ops,my $cpuid_obj,my $bn_obj,my $des_obj,my $aes_obj, my $bf_obj, ++ my $md5_obj,my $sha1_obj,my $cast_obj,my $rc4_obj,my $rmd160_obj, ++ my $rc5_obj,my $dso_scheme,my $shared_target,my $shared_cflag, ++ my $shared_ldflag,my $shared_extension,my $ranlib,my $arflags)= ++ split(/\s*:\s*/,$table{$target} . ":" x 30 , -1); ++ ++ print < + #include + #include ++#include + + /* + * In the definition, (xa, xb, xc, xd) are Alice's (x1, x2, x3, x4) or +diff -Naur ../openssl-0.9.8j/crypto/sha/sha512.c ./crypto/sha/sha512.c +--- ../openssl-0.9.8j/crypto/sha/sha512.c 2008-09-16 12:47:28.000000000 +0200 ++++ ./crypto/sha/sha512.c 2009-01-08 10:24:35.000000000 +0100 +@@ -314,7 +314,7 @@ + #ifndef PEDANTIC + # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) + # if defined(__x86_64) || defined(__x86_64__) +-# define ROTR(a,n) ({ unsigned long ret; \ ++# define ROTR(a,n) ({ SHA_LONG64 ret; \ + asm ("rorq %1,%0" \ + : "=r"(ret) \ + : "J"(n),"0"(a) \ +diff -Naur ../openssl-0.9.8j/crypto/sha/sha512.c.orig ./crypto/sha/sha512.c.orig +--- ../openssl-0.9.8j/crypto/sha/sha512.c.orig 1970-01-01 01:00:00.000000000 +0100 ++++ ./crypto/sha/sha512.c.orig 2009-01-08 10:24:35.000000000 +0100 +@@ -0,0 +1,547 @@ ++/* crypto/sha/sha512.c */ ++/* ==================================================================== ++ * Copyright (c) 2004 The OpenSSL Project. All rights reserved ++ * according to the OpenSSL license [found in ../../LICENSE]. ++ * ==================================================================== ++ */ ++#include ++#ifdef OPENSSL_FIPS ++#include ++#endif ++ ++#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA512) ++/* ++ * IMPLEMENTATION NOTES. ++ * ++ * As you might have noticed 32-bit hash algorithms: ++ * ++ * - permit SHA_LONG to be wider than 32-bit (case on CRAY); ++ * - optimized versions implement two transform functions: one operating ++ * on [aligned] data in host byte order and one - on data in input ++ * stream byte order; ++ * - share common byte-order neutral collector and padding function ++ * implementations, ../md32_common.h; ++ * ++ * Neither of the above applies to this SHA-512 implementations. Reasons ++ * [in reverse order] are: ++ * ++ * - it's the only 64-bit hash algorithm for the moment of this writing, ++ * there is no need for common collector/padding implementation [yet]; ++ * - by supporting only one transform function [which operates on ++ * *aligned* data in input stream byte order, big-endian in this case] ++ * we minimize burden of maintenance in two ways: a) collector/padding ++ * function is simpler; b) only one transform function to stare at; ++ * - SHA_LONG64 is required to be exactly 64-bit in order to be able to ++ * apply a number of optimizations to mitigate potential performance ++ * penalties caused by previous design decision; ++ * ++ * Caveat lector. ++ * ++ * Implementation relies on the fact that "long long" is 64-bit on ++ * both 32- and 64-bit platforms. If some compiler vendor comes up ++ * with 128-bit long long, adjustment to sha.h would be required. ++ * As this implementation relies on 64-bit integer type, it's totally ++ * inappropriate for platforms which don't support it, most notably ++ * 16-bit platforms. ++ * ++ */ ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "cryptlib.h" ++ ++const char SHA512_version[]="SHA-512" OPENSSL_VERSION_PTEXT; ++ ++#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \ ++ defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) || \ ++ defined(__s390__) || defined(__s390x__) || \ ++ defined(SHA512_ASM) ++#define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA ++#endif ++ ++int SHA384_Init (SHA512_CTX *c) ++ { ++#ifdef OPENSSL_FIPS ++ FIPS_selftest_check(); ++#endif ++ c->h[0]=U64(0xcbbb9d5dc1059ed8); ++ c->h[1]=U64(0x629a292a367cd507); ++ c->h[2]=U64(0x9159015a3070dd17); ++ c->h[3]=U64(0x152fecd8f70e5939); ++ c->h[4]=U64(0x67332667ffc00b31); ++ c->h[5]=U64(0x8eb44a8768581511); ++ c->h[6]=U64(0xdb0c2e0d64f98fa7); ++ c->h[7]=U64(0x47b5481dbefa4fa4); ++ c->Nl=0; c->Nh=0; ++ c->num=0; c->md_len=SHA384_DIGEST_LENGTH; ++ return 1; ++ } ++ ++int SHA512_Init (SHA512_CTX *c) ++ { ++#ifdef OPENSSL_FIPS ++ FIPS_selftest_check(); ++#endif ++ c->h[0]=U64(0x6a09e667f3bcc908); ++ c->h[1]=U64(0xbb67ae8584caa73b); ++ c->h[2]=U64(0x3c6ef372fe94f82b); ++ c->h[3]=U64(0xa54ff53a5f1d36f1); ++ c->h[4]=U64(0x510e527fade682d1); ++ c->h[5]=U64(0x9b05688c2b3e6c1f); ++ c->h[6]=U64(0x1f83d9abfb41bd6b); ++ c->h[7]=U64(0x5be0cd19137e2179); ++ c->Nl=0; c->Nh=0; ++ c->num=0; c->md_len=SHA512_DIGEST_LENGTH; ++ return 1; ++ } ++ ++#ifndef SHA512_ASM ++static ++#endif ++void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num); ++ ++int SHA512_Final (unsigned char *md, SHA512_CTX *c) ++ { ++ unsigned char *p=(unsigned char *)c->u.p; ++ size_t n=c->num; ++ ++ p[n]=0x80; /* There always is a room for one */ ++ n++; ++ if (n > (sizeof(c->u)-16)) ++ memset (p+n,0,sizeof(c->u)-n), n=0, ++ sha512_block_data_order (c,p,1); ++ ++ memset (p+n,0,sizeof(c->u)-16-n); ++#ifdef B_ENDIAN ++ c->u.d[SHA_LBLOCK-2] = c->Nh; ++ c->u.d[SHA_LBLOCK-1] = c->Nl; ++#else ++ p[sizeof(c->u)-1] = (unsigned char)(c->Nl); ++ p[sizeof(c->u)-2] = (unsigned char)(c->Nl>>8); ++ p[sizeof(c->u)-3] = (unsigned char)(c->Nl>>16); ++ p[sizeof(c->u)-4] = (unsigned char)(c->Nl>>24); ++ p[sizeof(c->u)-5] = (unsigned char)(c->Nl>>32); ++ p[sizeof(c->u)-6] = (unsigned char)(c->Nl>>40); ++ p[sizeof(c->u)-7] = (unsigned char)(c->Nl>>48); ++ p[sizeof(c->u)-8] = (unsigned char)(c->Nl>>56); ++ p[sizeof(c->u)-9] = (unsigned char)(c->Nh); ++ p[sizeof(c->u)-10] = (unsigned char)(c->Nh>>8); ++ p[sizeof(c->u)-11] = (unsigned char)(c->Nh>>16); ++ p[sizeof(c->u)-12] = (unsigned char)(c->Nh>>24); ++ p[sizeof(c->u)-13] = (unsigned char)(c->Nh>>32); ++ p[sizeof(c->u)-14] = (unsigned char)(c->Nh>>40); ++ p[sizeof(c->u)-15] = (unsigned char)(c->Nh>>48); ++ p[sizeof(c->u)-16] = (unsigned char)(c->Nh>>56); ++#endif ++ ++ sha512_block_data_order (c,p,1); ++ ++ if (md==0) return 0; ++ ++ switch (c->md_len) ++ { ++ /* Let compiler decide if it's appropriate to unroll... */ ++ case SHA384_DIGEST_LENGTH: ++ for (n=0;nh[n]; ++ ++ *(md++) = (unsigned char)(t>>56); ++ *(md++) = (unsigned char)(t>>48); ++ *(md++) = (unsigned char)(t>>40); ++ *(md++) = (unsigned char)(t>>32); ++ *(md++) = (unsigned char)(t>>24); ++ *(md++) = (unsigned char)(t>>16); ++ *(md++) = (unsigned char)(t>>8); ++ *(md++) = (unsigned char)(t); ++ } ++ break; ++ case SHA512_DIGEST_LENGTH: ++ for (n=0;nh[n]; ++ ++ *(md++) = (unsigned char)(t>>56); ++ *(md++) = (unsigned char)(t>>48); ++ *(md++) = (unsigned char)(t>>40); ++ *(md++) = (unsigned char)(t>>32); ++ *(md++) = (unsigned char)(t>>24); ++ *(md++) = (unsigned char)(t>>16); ++ *(md++) = (unsigned char)(t>>8); ++ *(md++) = (unsigned char)(t); ++ } ++ break; ++ /* ... as well as make sure md_len is not abused. */ ++ default: return 0; ++ } ++ ++ return 1; ++ } ++ ++int SHA384_Final (unsigned char *md,SHA512_CTX *c) ++{ return SHA512_Final (md,c); } ++ ++int SHA512_Update (SHA512_CTX *c, const void *_data, size_t len) ++ { ++ SHA_LONG64 l; ++ unsigned char *p=c->u.p; ++ const unsigned char *data=(const unsigned char *)_data; ++ ++ if (len==0) return 1; ++ ++ l = (c->Nl+(((SHA_LONG64)len)<<3))&U64(0xffffffffffffffff); ++ if (l < c->Nl) c->Nh++; ++ if (sizeof(len)>=8) c->Nh+=(((SHA_LONG64)len)>>61); ++ c->Nl=l; ++ ++ if (c->num != 0) ++ { ++ size_t n = sizeof(c->u) - c->num; ++ ++ if (len < n) ++ { ++ memcpy (p+c->num,data,len), c->num += len; ++ return 1; ++ } ++ else { ++ memcpy (p+c->num,data,n), c->num = 0; ++ len-=n, data+=n; ++ sha512_block_data_order (c,p,1); ++ } ++ } ++ ++ if (len >= sizeof(c->u)) ++ { ++#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA ++ if ((size_t)data%sizeof(c->u.d[0]) != 0) ++ while (len >= sizeof(c->u)) ++ memcpy (p,data,sizeof(c->u)), ++ sha512_block_data_order (c,p,1), ++ len -= sizeof(c->u), ++ data += sizeof(c->u); ++ else ++#endif ++ sha512_block_data_order (c,data,len/sizeof(c->u)), ++ data += len, ++ len %= sizeof(c->u), ++ data -= len; ++ } ++ ++ if (len != 0) memcpy (p,data,len), c->num = (int)len; ++ ++ return 1; ++ } ++ ++int SHA384_Update (SHA512_CTX *c, const void *data, size_t len) ++{ return SHA512_Update (c,data,len); } ++ ++void SHA512_Transform (SHA512_CTX *c, const unsigned char *data) ++{ sha512_block_data_order (c,data,1); } ++ ++unsigned char *SHA384(const unsigned char *d, size_t n, unsigned char *md) ++ { ++ SHA512_CTX c; ++ static unsigned char m[SHA384_DIGEST_LENGTH]; ++ ++ if (md == NULL) md=m; ++ SHA384_Init(&c); ++ SHA512_Update(&c,d,n); ++ SHA512_Final(md,&c); ++ OPENSSL_cleanse(&c,sizeof(c)); ++ return(md); ++ } ++ ++unsigned char *SHA512(const unsigned char *d, size_t n, unsigned char *md) ++ { ++ SHA512_CTX c; ++ static unsigned char m[SHA512_DIGEST_LENGTH]; ++ ++ if (md == NULL) md=m; ++ SHA512_Init(&c); ++ SHA512_Update(&c,d,n); ++ SHA512_Final(md,&c); ++ OPENSSL_cleanse(&c,sizeof(c)); ++ return(md); ++ } ++ ++#ifndef SHA512_ASM ++static const SHA_LONG64 K512[80] = { ++ U64(0x428a2f98d728ae22),U64(0x7137449123ef65cd), ++ U64(0xb5c0fbcfec4d3b2f),U64(0xe9b5dba58189dbbc), ++ U64(0x3956c25bf348b538),U64(0x59f111f1b605d019), ++ U64(0x923f82a4af194f9b),U64(0xab1c5ed5da6d8118), ++ U64(0xd807aa98a3030242),U64(0x12835b0145706fbe), ++ U64(0x243185be4ee4b28c),U64(0x550c7dc3d5ffb4e2), ++ U64(0x72be5d74f27b896f),U64(0x80deb1fe3b1696b1), ++ U64(0x9bdc06a725c71235),U64(0xc19bf174cf692694), ++ U64(0xe49b69c19ef14ad2),U64(0xefbe4786384f25e3), ++ U64(0x0fc19dc68b8cd5b5),U64(0x240ca1cc77ac9c65), ++ U64(0x2de92c6f592b0275),U64(0x4a7484aa6ea6e483), ++ U64(0x5cb0a9dcbd41fbd4),U64(0x76f988da831153b5), ++ U64(0x983e5152ee66dfab),U64(0xa831c66d2db43210), ++ U64(0xb00327c898fb213f),U64(0xbf597fc7beef0ee4), ++ U64(0xc6e00bf33da88fc2),U64(0xd5a79147930aa725), ++ U64(0x06ca6351e003826f),U64(0x142929670a0e6e70), ++ U64(0x27b70a8546d22ffc),U64(0x2e1b21385c26c926), ++ U64(0x4d2c6dfc5ac42aed),U64(0x53380d139d95b3df), ++ U64(0x650a73548baf63de),U64(0x766a0abb3c77b2a8), ++ U64(0x81c2c92e47edaee6),U64(0x92722c851482353b), ++ U64(0xa2bfe8a14cf10364),U64(0xa81a664bbc423001), ++ U64(0xc24b8b70d0f89791),U64(0xc76c51a30654be30), ++ U64(0xd192e819d6ef5218),U64(0xd69906245565a910), ++ U64(0xf40e35855771202a),U64(0x106aa07032bbd1b8), ++ U64(0x19a4c116b8d2d0c8),U64(0x1e376c085141ab53), ++ U64(0x2748774cdf8eeb99),U64(0x34b0bcb5e19b48a8), ++ U64(0x391c0cb3c5c95a63),U64(0x4ed8aa4ae3418acb), ++ U64(0x5b9cca4f7763e373),U64(0x682e6ff3d6b2b8a3), ++ U64(0x748f82ee5defb2fc),U64(0x78a5636f43172f60), ++ U64(0x84c87814a1f0ab72),U64(0x8cc702081a6439ec), ++ U64(0x90befffa23631e28),U64(0xa4506cebde82bde9), ++ U64(0xbef9a3f7b2c67915),U64(0xc67178f2e372532b), ++ U64(0xca273eceea26619c),U64(0xd186b8c721c0c207), ++ U64(0xeada7dd6cde0eb1e),U64(0xf57d4f7fee6ed178), ++ U64(0x06f067aa72176fba),U64(0x0a637dc5a2c898a6), ++ U64(0x113f9804bef90dae),U64(0x1b710b35131c471b), ++ U64(0x28db77f523047d84),U64(0x32caab7b40c72493), ++ U64(0x3c9ebe0a15c9bebc),U64(0x431d67c49c100d4c), ++ U64(0x4cc5d4becb3e42b6),U64(0x597f299cfc657e2a), ++ U64(0x5fcb6fab3ad6faec),U64(0x6c44198c4a475817) }; ++ ++#ifndef PEDANTIC ++# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) ++# if defined(__x86_64) || defined(__x86_64__) ++# define ROTR(a,n) ({ unsigned long ret; \ ++ asm ("rorq %1,%0" \ ++ : "=r"(ret) \ ++ : "J"(n),"0"(a) \ ++ : "cc"); ret; }) ++# if !defined(B_ENDIAN) ++# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \ ++ asm ("bswapq %0" \ ++ : "=r"(ret) \ ++ : "0"(ret)); ret; }) ++# endif ++# elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN) ++# if defined(I386_ONLY) ++# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ ++ unsigned int hi=p[0],lo=p[1]; \ ++ asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\ ++ "roll $16,%%eax; roll $16,%%edx; "\ ++ "xchgb %%ah,%%al;xchgb %%dh,%%dl;" \ ++ : "=a"(lo),"=d"(hi) \ ++ : "0"(lo),"1"(hi) : "cc"); \ ++ ((SHA_LONG64)hi)<<32|lo; }) ++# else ++# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ ++ unsigned int hi=p[0],lo=p[1]; \ ++ asm ("bswapl %0; bswapl %1;" \ ++ : "=r"(lo),"=r"(hi) \ ++ : "0"(lo),"1"(hi)); \ ++ ((SHA_LONG64)hi)<<32|lo; }) ++# endif ++# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64) ++# define ROTR(a,n) ({ unsigned long ret; \ ++ asm ("rotrdi %0,%1,%2" \ ++ : "=r"(ret) \ ++ : "r"(a),"K"(n)); ret; }) ++# endif ++# elif defined(_MSC_VER) ++# if defined(_WIN64) /* applies to both IA-64 and AMD64 */ ++# define ROTR(a,n) _rotr64((a),n) ++# endif ++# if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) ++# if defined(I386_ONLY) ++ static SHA_LONG64 __fastcall __pull64be(const void *x) ++ { _asm mov edx, [ecx + 0] ++ _asm mov eax, [ecx + 4] ++ _asm xchg dh,dl ++ _asm xchg ah,al ++ _asm rol edx,16 ++ _asm rol eax,16 ++ _asm xchg dh,dl ++ _asm xchg ah,al ++ } ++# else ++ static SHA_LONG64 __fastcall __pull64be(const void *x) ++ { _asm mov edx, [ecx + 0] ++ _asm mov eax, [ecx + 4] ++ _asm bswap edx ++ _asm bswap eax ++ } ++# endif ++# define PULL64(x) __pull64be(&(x)) ++# if _MSC_VER<=1200 ++# pragma inline_depth(0) ++# endif ++# endif ++# endif ++#endif ++ ++#ifndef PULL64 ++#define B(x,j) (((SHA_LONG64)(*(((const unsigned char *)(&x))+j)))<<((7-j)*8)) ++#define PULL64(x) (B(x,0)|B(x,1)|B(x,2)|B(x,3)|B(x,4)|B(x,5)|B(x,6)|B(x,7)) ++#endif ++ ++#ifndef ROTR ++#define ROTR(x,s) (((x)>>s) | (x)<<(64-s)) ++#endif ++ ++#define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) ++#define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) ++#define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) ++#define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) ++ ++#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) ++#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) ++ ++#if defined(OPENSSL_IA32_SSE2) && !defined(OPENSSL_NO_ASM) && !defined(I386_ONLY) ++#define GO_FOR_SSE2(ctx,in,num) do { \ ++ void sha512_block_sse2(void *,const void *,size_t); \ ++ if (!(OPENSSL_ia32cap_P & (1<<26))) break; \ ++ sha512_block_sse2(ctx->h,in,num); return; \ ++ } while (0) ++#endif ++ ++#ifdef OPENSSL_SMALL_FOOTPRINT ++ ++static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num) ++ { ++ const SHA_LONG64 *W=in; ++ SHA_LONG64 a,b,c,d,e,f,g,h,s0,s1,T1,T2; ++ SHA_LONG64 X[16]; ++ int i; ++ ++#ifdef GO_FOR_SSE2 ++ GO_FOR_SSE2(ctx,in,num); ++#endif ++ ++ while (num--) { ++ ++ a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; ++ e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; ++ ++ for (i=0;i<16;i++) ++ { ++#ifdef B_ENDIAN ++ T1 = X[i] = W[i]; ++#else ++ T1 = X[i] = PULL64(W[i]); ++#endif ++ T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; ++ T2 = Sigma0(a) + Maj(a,b,c); ++ h = g; g = f; f = e; e = d + T1; ++ d = c; c = b; b = a; a = T1 + T2; ++ } ++ ++ for (;i<80;i++) ++ { ++ s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); ++ s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); ++ ++ T1 = X[i&0xf] += s0 + s1 + X[(i+9)&0xf]; ++ T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; ++ T2 = Sigma0(a) + Maj(a,b,c); ++ h = g; g = f; f = e; e = d + T1; ++ d = c; c = b; b = a; a = T1 + T2; ++ } ++ ++ ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; ++ ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; ++ ++ W+=SHA_LBLOCK; ++ } ++ } ++ ++#else ++ ++#define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \ ++ T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; \ ++ h = Sigma0(a) + Maj(a,b,c); \ ++ d += T1; h += T1; } while (0) ++ ++#define ROUND_16_80(i,a,b,c,d,e,f,g,h,X) do { \ ++ s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); \ ++ s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); \ ++ T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \ ++ ROUND_00_15(i,a,b,c,d,e,f,g,h); } while (0) ++ ++static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num) ++ { ++ const SHA_LONG64 *W=in; ++ SHA_LONG64 a,b,c,d,e,f,g,h,s0,s1,T1; ++ SHA_LONG64 X[16]; ++ int i; ++ ++#ifdef GO_FOR_SSE2 ++ GO_FOR_SSE2(ctx,in,num); ++#endif ++ ++ while (num--) { ++ ++ a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; ++ e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; ++ ++#ifdef B_ENDIAN ++ T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h); ++ T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g); ++ T1 = X[2] = W[2]; ROUND_00_15(2,g,h,a,b,c,d,e,f); ++ T1 = X[3] = W[3]; ROUND_00_15(3,f,g,h,a,b,c,d,e); ++ T1 = X[4] = W[4]; ROUND_00_15(4,e,f,g,h,a,b,c,d); ++ T1 = X[5] = W[5]; ROUND_00_15(5,d,e,f,g,h,a,b,c); ++ T1 = X[6] = W[6]; ROUND_00_15(6,c,d,e,f,g,h,a,b); ++ T1 = X[7] = W[7]; ROUND_00_15(7,b,c,d,e,f,g,h,a); ++ T1 = X[8] = W[8]; ROUND_00_15(8,a,b,c,d,e,f,g,h); ++ T1 = X[9] = W[9]; ROUND_00_15(9,h,a,b,c,d,e,f,g); ++ T1 = X[10] = W[10]; ROUND_00_15(10,g,h,a,b,c,d,e,f); ++ T1 = X[11] = W[11]; ROUND_00_15(11,f,g,h,a,b,c,d,e); ++ T1 = X[12] = W[12]; ROUND_00_15(12,e,f,g,h,a,b,c,d); ++ T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c); ++ T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b); ++ T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a); ++#else ++ T1 = X[0] = PULL64(W[0]); ROUND_00_15(0,a,b,c,d,e,f,g,h); ++ T1 = X[1] = PULL64(W[1]); ROUND_00_15(1,h,a,b,c,d,e,f,g); ++ T1 = X[2] = PULL64(W[2]); ROUND_00_15(2,g,h,a,b,c,d,e,f); ++ T1 = X[3] = PULL64(W[3]); ROUND_00_15(3,f,g,h,a,b,c,d,e); ++ T1 = X[4] = PULL64(W[4]); ROUND_00_15(4,e,f,g,h,a,b,c,d); ++ T1 = X[5] = PULL64(W[5]); ROUND_00_15(5,d,e,f,g,h,a,b,c); ++ T1 = X[6] = PULL64(W[6]); ROUND_00_15(6,c,d,e,f,g,h,a,b); ++ T1 = X[7] = PULL64(W[7]); ROUND_00_15(7,b,c,d,e,f,g,h,a); ++ T1 = X[8] = PULL64(W[8]); ROUND_00_15(8,a,b,c,d,e,f,g,h); ++ T1 = X[9] = PULL64(W[9]); ROUND_00_15(9,h,a,b,c,d,e,f,g); ++ T1 = X[10] = PULL64(W[10]); ROUND_00_15(10,g,h,a,b,c,d,e,f); ++ T1 = X[11] = PULL64(W[11]); ROUND_00_15(11,f,g,h,a,b,c,d,e); ++ T1 = X[12] = PULL64(W[12]); ROUND_00_15(12,e,f,g,h,a,b,c,d); ++ T1 = X[13] = PULL64(W[13]); ROUND_00_15(13,d,e,f,g,h,a,b,c); ++ T1 = X[14] = PULL64(W[14]); ROUND_00_15(14,c,d,e,f,g,h,a,b); ++ T1 = X[15] = PULL64(W[15]); ROUND_00_15(15,b,c,d,e,f,g,h,a); ++#endif ++ ++ for (i=16;i<80;i+=8) ++ { ++ ROUND_16_80(i+0,a,b,c,d,e,f,g,h,X); ++ ROUND_16_80(i+1,h,a,b,c,d,e,f,g,X); ++ ROUND_16_80(i+2,g,h,a,b,c,d,e,f,X); ++ ROUND_16_80(i+3,f,g,h,a,b,c,d,e,X); ++ ROUND_16_80(i+4,e,f,g,h,a,b,c,d,X); ++ ROUND_16_80(i+5,d,e,f,g,h,a,b,c,X); ++ ROUND_16_80(i+6,c,d,e,f,g,h,a,b,X); ++ ROUND_16_80(i+7,b,c,d,e,f,g,h,a,X); ++ } ++ ++ ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; ++ ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; ++ ++ W+=SHA_LBLOCK; ++ } ++ } ++ ++#endif ++ ++#endif /* SHA512_ASM */ ++ ++#endif /* OPENSSL_NO_SHA512 */ +diff -Naur ../openssl-0.9.8j/engines/e_aep.c ./engines/e_aep.c +--- ../openssl-0.9.8j/engines/e_aep.c 2008-12-30 14:30:57.000000000 +0100 ++++ ./engines/e_aep.c 2009-01-08 10:24:35.000000000 +0100 +@@ -62,8 +62,10 @@ + #include + #else + #include ++#ifndef _PID_T_ + typedef int pid_t; + #endif ++#endif + + #if defined(OPENSSL_SYS_NETWARE) && defined(NETWARE_CLIB) + #define getpid GetThreadID +diff -Naur ../openssl-0.9.8j/e_os2.h ./e_os2.h +--- ../openssl-0.9.8j/e_os2.h 2005-12-18 19:57:07.000000000 +0100 ++++ ./e_os2.h 2009-01-08 10:24:35.000000000 +0100 +@@ -264,7 +264,7 @@ + # define OPENSSL_IMPLEMENT_GLOBAL(type,name) \ + extern type _hide_##name; \ + type *_shadow_##name(void) { return &_hide_##name; } \ +- static type _hide_##name ++ type _hide_##name + # define OPENSSL_DECLARE_GLOBAL(type,name) type *_shadow_##name(void) + # define OPENSSL_GLOBAL_REF(name) (*(_shadow_##name())) + #else diff --git a/src/win32/patches/openssl.patch b/src/win32/patches/openssl.patch new file mode 100644 index 00000000..626f3792 --- /dev/null +++ b/src/win32/patches/openssl.patch @@ -0,0 +1,309 @@ +Index: Configure +--- ../tmp/openssl-0.9.8b/Configure 2006-04-03 02:15:40.000000000 -0700 ++++ ./Configure 2006-06-27 02:39:02.000000000 -0700 +@@ -132,7 +132,7 @@ + # seems to be sufficient? + my $BSDthreads="-pthread -D_THREAD_SAFE -D_REENTRANT"; + +-#config-string $cc : $cflags : $unistd : $thread_cflag : $sys_id : $lflags : $bn_ops : $cpuid_obj : $bn_obj : $des_obj : $aes_obj : $bf_obj : $md5_obj : $sha1_obj : $cast_obj : $rc4_obj : $rmd160_obj : $rc5_obj : $dso_scheme : $shared_target : $shared_cflag : $shared_ldflag : $shared_extension : $ranlib : $arflags ++#config-string $cc : $cflags : $unistd : $thread_cflag : $sys_id : $lflags : $bn_ops : $cpuid_obj : $bn_obj : $des_obj : $aes_obj : $bf_obj : $md5_obj : $sha1_obj : $cast_obj : $rc4_obj : $rmd160_obj : $rc5_obj : $dso_scheme : $shared_target : $shared_cflag : $shared_ldflag : $shared_extension : $ranlib : $arflags : $ar : $nm + + my %table=( + # File 'TABLE' (created by 'make TABLE') contains the data from this list, +@@ -468,6 +468,9 @@ + # Borland C++ 4.5 + "BC-32","bcc32::::WIN32::BN_LLONG DES_PTR RC4_INDEX EXPORT_VAR_AS_FN:${no_asm}:win32", + ++# MinGW32 ++"mingw32", "mingw32-gcc:-mno-cygwin -DL_ENDIAN -fomit-frame-pointer -O3 -march=i486 -Wall -D_WIN32_WINNT=0x333:::MINGW32:-lwsock32 -lgdi32:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts} EXPORT_VAR_AS_FN:${x86_coff_asm}:win32:cygwin-shared:-D_WINDLL -DOPENSSL_USE_APPLINK:-mno-cygwin -shared:.dll.a:mingw32-ranlib::mingw32-ar:mingw32-nm", ++ + # MinGW + "mingw", "gcc:-mno-cygwin -DL_ENDIAN -fomit-frame-pointer -O3 -march=i486 -Wall -D_WIN32_WINNT=0x333:::MINGW32:-lwsock32 -lgdi32:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts} EXPORT_VAR_AS_FN:${x86_coff_asm}:win32:cygwin-shared:-D_WINDLL -DOPENSSL_USE_APPLINK:-mno-cygwin -shared:.dll.a", + +@@ -558,6 +561,8 @@ + my $idx_shared_extension = $idx++; + my $idx_ranlib = $idx++; + my $idx_arflags = $idx++; ++my $idx_ar= $idx++; ++my $idx_nm= $idx++; + + my $prefix=""; + my $openssldir=""; +@@ -920,7 +925,7 @@ + + $IsMK1MF=1 if ($target eq "mingw" && $^O ne "cygwin"); + +-$exe_ext=".exe" if ($target eq "Cygwin" || $target eq "DJGPP" || $target eq "mingw"); ++$exe_ext=".exe" if ($target eq "Cygwin" || $target eq "DJGPP" || $target eq "mingw" || $target eq "mingw32"); + $exe_ext=".pm" if ($target =~ /vos/); + $openssldir="/usr/local/ssl" if ($openssldir eq "" and $prefix eq ""); + $prefix=$openssldir if $prefix eq ""; +@@ -964,6 +969,8 @@ + my $shared_extension = $fields[$idx_shared_extension]; + my $ranlib = $fields[$idx_ranlib]; + my $arflags = $fields[$idx_arflags]; ++my $ar = $fields[$idx_ar]; ++my $nm = $fields[$idx_nm]; + + my $no_shared_warn=0; + my $no_user_cflags=0; +@@ -1172,6 +1179,18 @@ + { + $ranlib = $default_ranlib; + } ++if ($arflags eq "") ++ { ++ $arflags = "r"; ++ } ++if ($ar eq "") ++ { ++ $ar = "ar"; ++ } ++if ($nm eq "") ++ { ++ $nm = "nm"; ++ } + + #my ($bn1)=split(/\s+/,$bn_obj); + #$bn1 = "" unless defined $bn1; +@@ -1307,6 +1326,8 @@ + s/^PROCESSOR=.*/PROCESSOR= $processor/; + s/^RANLIB=.*/RANLIB= $ranlib/; + s/^ARFLAGS=.*/ARFLAGS= $arflags/; ++ s/^AR=.*/AR= $ar/; ++ s/^NM=.*/NM= $nm/; + s/^PERL=.*/PERL= $perl/; + s/^KRB5_INCLUDES=.*/KRB5_INCLUDES=$withargs{"krb5-include"}/; + s/^LIBKRB5=.*/LIBKRB5=$withargs{"krb5-lib"}/; +@@ -1358,6 +1379,8 @@ + print "PROCESSOR =$processor\n"; + print "RANLIB =$ranlib\n"; + print "ARFLAGS =$arflags\n"; ++print "AR =$ar\n"; ++print "NM =$nm\n"; + print "PERL =$perl\n"; + print "KRB5_INCLUDES =",$withargs{"krb5-include"},"\n" + if $withargs{"krb5-include"} ne ""; +@@ -1737,7 +1760,7 @@ + my $bn_ops,my $cpuid_obj,my $bn_obj,my $des_obj,my $aes_obj, my $bf_obj, + my $md5_obj,my $sha1_obj,my $cast_obj,my $rc4_obj,my $rmd160_obj, + my $rc5_obj,my $dso_scheme,my $shared_target,my $shared_cflag, +- my $shared_ldflag,my $shared_extension,my $ranlib,my $arflags)= ++ my $shared_ldflag,my $shared_extension,my $ranlib,my $arflags, my $ar, my $nm)= + split(/\s*:\s*/,$table{$target} . ":" x 30 , -1); + + print < lib$(LIBNAME).exp; \ ++ ${NM} -Pg $$SHOBJECTS | grep ' [BDT] ' | cut -f1 -d' ' > lib$(LIBNAME).exp; \ + LIBPATH=`for x in $$LIBDEPS; do if echo $$x | grep '^ *-L' > /dev/null 2>&1; then echo $$x | sed -e 's/^ *-L//'; fi; done | uniq`; \ + LIBPATH=`echo $$LIBPATH | sed -e 's/ /:/g'`; \ + LD_LIBRARY_PATH=$$LIBPATH:$$LD_LIBRARY_PATH \ +@@ -112,7 +113,20 @@ + ( $(SET_X); rm -f lib$(LIBNAME).exp ) + + SYMLINK_SO= \ +- if [ -n "$$INHIBIT_SYMLINKS" ]; then :; else \ ++ if [ -n "$$INHIBIT_SYMLINKS" ]; then \ ++ prev=$$SHLIB$$SHLIB_SOVER$$SHLIB_SUFFIX; \ ++ if [ -n "$$SHLIB_COMPAT" ]; then \ ++ for x in $$SHLIB_COMPAT; do \ ++ ( $(SET_X); rm -f $$SHLIB$$x$$SHLIB_SUFFIX; \ ++ ln -s $$prev $$SHLIB$$x$$SHLIB_SUFFIX ); \ ++ prev=$$SHLIB$$x$$SHLIB_SUFFIX; \ ++ done; \ ++ fi; \ ++ if [ -n "$$SHLIB_SOVER" ]; then \ ++ ( $(SET_X); rm -f $$SHLIB$$SHLIB_SUFFIX; \ ++ cp -p $$prev $$SHLIB$$SHLIB_SUFFIX ); \ ++ fi; \ ++ else \ + prev=$$SHLIB$$SHLIB_SOVER$$SHLIB_SUFFIX; \ + if [ -n "$$SHLIB_COMPAT" ]; then \ + for x in $$SHLIB_COMPAT; do \ +@@ -249,6 +263,9 @@ + INHIBIT_SYMLINKS=yes; \ + SHLIB=cyg$(LIBNAME); \ + expr $(PLATFORM) : 'mingw' > /dev/null && SHLIB=$(LIBNAME)eay32; \ ++ if [ "$(PLATFORM)" = "mingw32" -a "$(LIBNAME)" = "crypto" ]; then \ ++ SHLIB=libeay32; \ ++ fi; \ + SHLIB_SUFFIX=.dll; \ + SHLIB_SOVER=-$(LIBVERSION); \ + ALLSYMSFLAGS='-Wl,--whole-archive'; \ +@@ -258,8 +275,8 @@ + [ -f apps/$$SHLIB$$SHLIB_SUFFIX ] && rm apps/$$SHLIB$$SHLIB_SUFFIX; \ + [ -f test/$$SHLIB$$SHLIB_SUFFIX ] && rm test/$$SHLIB$$SHLIB_SUFFIX; \ + $(LINK_SO_A) || exit 1; \ +- cp -p $$SHLIB$$SHLIB_SOVER$$SHLIB_SUFFIX apps/; \ +- cp -p $$SHLIB$$SHLIB_SOVER$$SHLIB_SUFFIX test/ ++ cp -p $$SHLIB$$SHLIB_SUFFIX apps/; \ ++ cp -p $$SHLIB$$SHLIB_SUFFIX test/ + link_app.cygwin: + $(LINK_APP) + +Index: util/mkdef.pl +--- ../tmp/openssl-0.9.8b/util/mkdef.pl 2006-01-02 06:08:22.000000000 -0800 ++++ ./util/mkdef.pl 2006-07-26 23:21:46.000000000 -0700 +@@ -1204,7 +1204,7 @@ + ; Definition file for the DLL version of the $name library from OpenSSL + ; + +-LIBRARY $libname $liboptions ++LIBRARY $libname.DLL $liboptions + + DESCRIPTION '$description' + diff --git a/src/win32/patches/pcre.patch b/src/win32/patches/pcre.patch new file mode 100644 index 00000000..189ac455 --- /dev/null +++ b/src/win32/patches/pcre.patch @@ -0,0 +1,130 @@ +Index: /Makefile.in +--- ../orig/pcre-6.3/Makefile.in 2005-08-18 06:08:28.000000000 -0700 ++++ ./Makefile.in 2006-06-30 09:11:02.000000000 -0700 +@@ -103,12 +103,10 @@ + LIBTOOL = @LIBTOOL@ + LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) -c $(CFLAGS) -I. -I$(top_srcdir) $(NEWLINE) $(LINK_SIZE) $(MATCH_LIMIT) $(NO_RECURSE) $(EBCDIC) + LTCXXCOMPILE = $(LIBTOOL) --mode=compile $(CXX) -c $(CXXFLAGS) -I. -I$(top_srcdir) $(NEWLINE) $(LINK_SIZE) $(MATCH_LIMIT) $(NO_RECURSE) $(EBCDIC) +-@ON_WINDOWS@LINK = $(CC) $(LDFLAGS) -I. -I$(top_srcdir) -L.libs +-@NOT_ON_WINDOWS@LINK = $(LIBTOOL) --mode=link $(CC) $(CFLAGS) $(LDFLAGS) -I. -I$(top_srcdir) ++LINK = $(LIBTOOL) --mode=link $(CC) $(CFLAGS) $(LDFLAGS) -I. -I$(top_srcdir) + LINKLIB = $(LIBTOOL) --mode=link $(CC) -export-symbols-regex '^[^_]|__?pcre_.*utf8|__?pcre_printint' $(LDFLAGS) -I. -I$(top_srcdir) + LINK_FOR_BUILD = $(LIBTOOL) --mode=link $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) -I. -I$(top_srcdir) +-@ON_WINDOWS@CXXLINK = $(CXX) $(LDFLAGS) -I. -I$(top_srcdir) -L.libs +-@NOT_ON_WINDOWS@CXXLINK = $(LIBTOOL) --mode=link $(CXX) $(LDFLAGS) -I. -I$(top_srcdir) ++CXXLINK = $(LIBTOOL) --mode=link $(CXX) $(LDFLAGS) -I. -I$(top_srcdir) + CXXLINKLIB = $(LIBTOOL) --mode=link $(CXX) $(LDFLAGS) -I. -I$(top_srcdir) + + # These are the version numbers for the shared libraries +@@ -355,54 +353,54 @@ + + # Some Windows-specific targets for MinGW. Do not use for Cygwin. + +-winshared : .libs/@WIN_PREFIX@pcre.dll .libs/@WIN_PREFIX@pcreposix.dll \ +- .libs/@WIN_PREFIX@pcrecpp.dll ++winshared : .libs/pcre.dll .libs/pcreposix.dll \ ++ .libs/pcrecpp.dll + +-.libs/@WIN_PREFIX@pcre.dll : libpcre.la ++.libs/pcre.dll : libpcre.la + $(CC) $(CFLAGS) -shared -o $@ \ + -Wl,--whole-archive .libs/libpcre.a \ + -Wl,--out-implib,.libs/libpcre.dll.a \ +- -Wl,--output-def,.libs/@WIN_PREFIX@pcre.dll-def \ ++ -Wl,--output-def,.libs/pcre.dll-def \ + -Wl,--export-all-symbols \ + -Wl,--no-whole-archive +- sed -e "s#dlname=''#dlname='../bin/@WIN_PREFIX@pcre.dll'#" \ ++ sed -e "s#dlname=''#dlname='../bin/pcre.dll'#" \ + -e "s#library_names=''#library_names='libpcre.dll.a'#" \ + < .libs/libpcre.lai > .libs/libpcre.lai.tmp && \ + mv -f .libs/libpcre.lai.tmp .libs/libpcre.lai +- sed -e "s#dlname=''#dlname='../bin/@WIN_PREFIX@pcre.dll'#" \ ++ sed -e "s#dlname=''#dlname='../bin/pcre.dll'#" \ + -e "s#library_names=''#library_names='libpcre.dll.a'#" \ + < libpcre.la > libpcre.la.tmp && \ + mv -f libpcre.la.tmp libpcre.la + + +-.libs/@WIN_PREFIX@pcreposix.dll: libpcreposix.la libpcre.la ++.libs/pcreposix.dll: libpcreposix.la libpcre.la + $(CC) $(CFLAGS) -shared -o $@ \ + -Wl,--whole-archive .libs/libpcreposix.a \ +- -Wl,--out-implib,.libs/@WIN_PREFIX@pcreposix.dll.a \ +- -Wl,--output-def,.libs/@WIN_PREFIX@libpcreposix.dll-def \ ++ -Wl,--out-implib,.libs/libpcreposix.dll.a \ ++ -Wl,--output-def,.libs/libpcreposix.dll-def \ + -Wl,--export-all-symbols \ + -Wl,--no-whole-archive .libs/libpcre.a +- sed -e "s#dlname=''#dlname='../bin/@WIN_PREFIX@pcreposix.dll'#" \ ++ sed -e "s#dlname=''#dlname='../bin/pcreposix.dll'#" \ + -e "s#library_names=''#library_names='libpcreposix.dll.a'#"\ + < .libs/libpcreposix.lai > .libs/libpcreposix.lai.tmp && \ + mv -f .libs/libpcreposix.lai.tmp .libs/libpcreposix.lai +- sed -e "s#dlname=''#dlname='../bin/@WIN_PREFIX@pcreposix.dll'#" \ ++ sed -e "s#dlname=''#dlname='../bin/pcreposix.dll'#" \ + -e "s#library_names=''#library_names='libpcreposix.dll.a'#"\ + < libpcreposix.la > libpcreposix.la.tmp && \ + mv -f libpcreposix.la.tmp libpcreposix.la + +-.libs/@WIN_PREFIX@pcrecpp.dll: libpcrecpp.la libpcre.la ++.libs/pcrecpp.dll: libpcrecpp.la libpcre.la + $(CXX) $(CXXFLAGS) -shared -o $@ \ + -Wl,--whole-archive .libs/libpcrecpp.a \ +- -Wl,--out-implib,.libs/@WIN_PREFIX@pcrecpp.dll.a \ +- -Wl,--output-def,.libs/@WIN_PREFIX@libpcrecpp.dll-def \ ++ -Wl,--out-implib,.libs/libpcrecpp.dll.a \ ++ -Wl,--output-def,.libs/libpcrecpp.dll-def \ + -Wl,--export-all-symbols \ + -Wl,--no-whole-archive .libs/libpcre.a +- sed -e "s#dlname=''#dlname='../bin/@WIN_PREFIX@pcrecpp.dll'#" \ ++ sed -e "s#dlname=''#dlname='../bin/pcrecpp.dll'#" \ + -e "s#library_names=''#library_names='libpcrecpp.dll.a'#"\ + < .libs/libpcrecpp.lai > .libs/libpcrecpp.lai.tmp && \ + mv -f .libs/libpcrecpp.lai.tmp .libs/libpcrecpp.lai +- sed -e "s#dlname=''#dlname='../bin/@WIN_PREFIX@pcrecpp.dll'#" \ ++ sed -e "s#dlname=''#dlname='../bin/pcrecpp.dll'#" \ + -e "s#library_names=''#library_names='libpcrecpp.dll.a'#"\ + < libpcrecpp.la > libpcrecpp.la.tmp && \ + mv -f libpcrecpp.la.tmp libpcrecpp.la +@@ -411,15 +409,15 @@ + wininstall : winshared + $(mkinstalldirs) $(DESTDIR)$(LIBDIR) + $(mkinstalldirs) $(DESTDIR)$(BINDIR) +- $(INSTALL) .libs/@WIN_PREFIX@pcre.dll $(DESTDIR)$(BINDIR)/@WIN_PREFIX@pcre.dll +- $(INSTALL) .libs/@WIN_PREFIX@pcreposix.dll $(DESTDIR)$(BINDIR)/@WIN_PREFIX@pcreposix.dll +- $(INSTALL) .libs/@WIN_PREFIX@libpcreposix.dll.a $(DESTDIR)$(LIBDIR)/@WIN_PREFIX@libpcreposix.dll.a +- $(INSTALL) .libs/@WIN_PREFIX@libpcre.dll.a $(DESTDIR)$(LIBDIR)/@WIN_PREFIX@libpcre.dll.a +-@HAVE_CPP@ $(INSTALL) .libs/@WIN_PREFIX@pcrecpp.dll $(DESTDIR)$(BINDIR)/@WIN_PREFIX@pcrecpp.dll +-@HAVE_CPP@ $(INSTALL) .libs/@WIN_PREFIX@libpcrecpp.dll.a $(DESTDIR)$(LIBDIR)/@WIN_PREFIX@libpcrecpp.dll.a +- -strip -g $(DESTDIR)$(BINDIR)/@WIN_PREFIX@pcre.dll +- -strip -g $(DESTDIR)$(BINDIR)/@WIN_PREFIX@pcreposix.dll +-@HAVE_CPP@ -strip -g $(DESTDIR)$(BINDIR)/@WIN_PREFIX@pcrecpp.dll ++ $(INSTALL) .libs/pcre.dll $(DESTDIR)$(BINDIR)/pcre.dll ++ $(INSTALL) .libs/pcreposix.dll $(DESTDIR)$(BINDIR)/pcreposix.dll ++ $(INSTALL) .libs/libpcreposix.dll.a $(DESTDIR)$(LIBDIR)/libpcreposix.dll.a ++ $(INSTALL) .libs/libpcre.dll.a $(DESTDIR)$(LIBDIR)/libpcre.dll.a ++@HAVE_CPP@ $(INSTALL) .libs/pcrecpp.dll $(DESTDIR)$(BINDIR)/pcrecpp.dll ++@HAVE_CPP@ $(INSTALL) .libs/libpcrecpp.dll.a $(DESTDIR)$(LIBDIR)/libpcrecpp.dll.a ++ -strip -g $(DESTDIR)$(BINDIR)/pcre.dll ++ -strip -g $(DESTDIR)$(BINDIR)/pcreposix.dll ++@HAVE_CPP@ -strip -g $(DESTDIR)$(BINDIR)/pcrecpp.dll + -strip $(DESTDIR)$(BINDIR)/pcregrep@EXEEXT@ + -strip $(DESTDIR)$(BINDIR)/pcretest@EXEEXT@ + +@@ -555,12 +553,12 @@ + + check: runtest + +-@WIN_PREFIX@pcre.dll : winshared +- cp .libs/@WIN_PREFIX@pcre.dll . ++pcre.dll : winshared ++ cp .libs/pcre.dll . + + test: runtest + +-runtest: all @ON_WINDOWS@ @WIN_PREFIX@pcre.dll ++runtest: all @ON_WINDOWS@ pcre.dll + @./RunTest + @./RunGrepTest + @HAVE_CPP@ @echo "" diff --git a/src/win32/patches/postgresql.patch b/src/win32/patches/postgresql.patch new file mode 100644 index 00000000..d270d2c1 --- /dev/null +++ b/src/win32/patches/postgresql.patch @@ -0,0 +1,123 @@ +Index: doc/Makefile +--- ../original/postgresql-8.1.4/doc/Makefile 2003-11-29 11:51:36.000000000 -0800 ++++ ./doc/Makefile 2006-06-29 03:44:10.000000000 -0700 +@@ -101,4 +101,4 @@ + + clean distclean maintainer-clean: + rm -rf man1/ man$(sqlmansectnum)/ man$(sqlmansect_dummy)/ +- $(MAKE) -C src $@ ++ -$(MAKE) -C src $@ +Index: src/bin/pgevent/Makefile +--- ../original/postgresql-8.1.4/src/bin/pgevent/Makefile 2004-12-31 14:03:14.000000000 -0800 ++++ ./src/bin/pgevent/Makefile 2006-06-29 01:19:18.000000000 -0700 +@@ -14,16 +14,18 @@ + + OBJS=pgevent.o pgmsgevent.o + NAME=pgevent.dll ++DLLWRAP=dllwrap ++WINDRES=windres + + all: $(NAME) + + install: all install-lib + + pgevent.dll: $(OBJS) pgevent.def +- dllwrap --def pgevent.def -o $(NAME) $(OBJS) ++ $(DLLWRAP) --def pgevent.def -o $(NAME) $(OBJS) + + pgmsgevent.o: pgmsgevent.rc win32ver.rc +- windres pgmsgevent.rc -o pgmsgevent.o --include-dir=$(top_builddir)/src/include ++ $(WINDRES) pgmsgevent.rc -o pgmsgevent.o --include-dir=$(top_builddir)/src/include + + all-lib: $(NAME) + +Index: src/interfaces/libpq/Makefile +--- ../original/postgresql-8.1.4/src/interfaces/libpq/Makefile 2005-08-28 17:47:35.000000000 -0700 ++++ ./src/interfaces/libpq/Makefile 2006-06-29 01:15:35.000000000 -0700 +@@ -13,6 +13,7 @@ + top_builddir = ../../.. + include $(top_builddir)/src/Makefile.global + ++WINDRES=windres + + # shared library parameters + NAME= pq +@@ -42,7 +43,7 @@ + DLL_DEFFILE=libpqdll.def + + libpqrc.o: libpq.rc +- windres -i libpq.rc -o libpqrc.o ++ $(WINDRES) -i libpq.rc -o libpqrc.o + + ifeq ($(enable_thread_safety), yes) + OBJS += pthread-win32.o +Index: src/Makefile +--- ../original/postgresql-8.1.4/src/Makefile 2005-01-13 10:23:21.000000000 -0800 ++++ ./src/Makefile 2006-06-29 04:07:54.000000000 -0700 +@@ -52,10 +52,10 @@ + $(MAKE) -C bin $@ + $(MAKE) -C pl $@ + $(MAKE) -C makefiles $@ +- $(MAKE) -C test $@ +- $(MAKE) -C tutorial NO_PGXS=1 $@ ++ -$(MAKE) -C test $@ ++ -$(MAKE) -C tutorial NO_PGXS=1 $@ + $(MAKE) -C utils $@ +- $(MAKE) -C tools/thread $@ ++ -$(MAKE) -C tools/thread $@ + + distclean maintainer-clean: + -$(MAKE) -C port $@ +Index: src/Makefile.global.in +--- ../original/postgresql-8.1.4/src/Makefile.global.in 2005-09-27 10:39:32.000000000 -0700 ++++ ./src/Makefile.global.in 2006-06-29 01:11:44.000000000 -0700 +@@ -31,6 +31,7 @@ + # PostgreSQL version number + VERSION = @PACKAGE_VERSION@ + ++WINDRES=windres + # Support for VPATH builds + vpath_build = @vpath_build@ + abs_top_srcdir = @abs_top_srcdir@ +@@ -456,7 +457,7 @@ + sed -e 's;FILEDESC;$(PGFILEDESC);' -e 's;VFT_APP;$(PGFTYPE);' -e 's;_ICO_;$(PGICOSTR);' -e 's;\(VERSION.*\),0 *$$;\1,'`date '+%y%j' | sed 's/^0*//'`';' $(top_builddir)/src/port/win32ver.rc > win32ver.rc + win32ver.o: $(top_builddir)/src/port/win32ver.rc + sed -e 's;FILEDESC;$(PGFILEDESC);' -e 's;VFT_APP;$(PGFTYPE);' -e 's;_ICO_;$(PGICOSTR);' -e 's;\(VERSION.*\),0 *$$;\1,'`date '+%y%j' | sed 's/^0*//'`';' $(top_builddir)/src/port/win32ver.rc > win32ver.rc +- windres -i win32ver.rc -o win32ver.o --include-dir=$(top_builddir)/src/include ++ $(WINDRES) -i win32ver.rc -o win32ver.o --include-dir=$(top_builddir)/src/include + rm -f win32ver.rc + endif + +Index: src/timezone/Makefile +--- ../original/postgresql-8.1.4/src/timezone/Makefile 2005-07-06 14:40:09.000000000 -0700 ++++ ./src/timezone/Makefile 2006-06-29 03:22:26.000000000 -0700 +@@ -12,6 +12,8 @@ + top_builddir = ../.. + include $(top_builddir)/src/Makefile.global + ++ZIC=./zic ++ + override CPPFLAGS := $(CPPFLAGS) + + # files to build into backend +@@ -25,16 +27,16 @@ + pacificnew etcetera factory backward systemv solar87 solar88 solar89 + TZDATAFILES := $(TZDATA:%=$(srcdir)/data/%) + +-all: SUBSYS.o submake-libpgport zic ++all: SUBSYS.o submake-libpgport zic$(X) + + SUBSYS.o: $(OBJS) + $(LD) $(LDREL) $(LDOUT) SUBSYS.o $(OBJS) + +-zic: $(ZICOBJS) +- $(CC) $(CFLAGS) $(ZICOBJS) $(LDFLAGS) $(LIBS) -o $@$(X) ++zic$(X): $(ZICOBJS) ++ $(CC) $(CFLAGS) $(ZICOBJS) $(LDFLAGS) $(LIBS) -o $@ + + install: all installdirs +- ./zic -d $(DESTDIR)$(datadir)/timezone $(TZDATAFILES) ++ $(ZIC) -d $(DESTDIR)$(datadir)/timezone $(TZDATAFILES) + + installdirs: + $(mkinstalldirs) $(DESTDIR)$(datadir) diff --git a/src/win32/patches/pthreads-w64.patch b/src/win32/patches/pthreads-w64.patch new file mode 100644 index 00000000..209ff6dc --- /dev/null +++ b/src/win32/patches/pthreads-w64.patch @@ -0,0 +1,970 @@ +diff -Naur ../pthreads-snap-2004-06-22/GNUmakefile ./GNUmakefile +--- ../pthreads-snap-2004-06-22/GNUmakefile 2004-05-20 02:56:52.000000000 +0200 ++++ ./GNUmakefile 2009-01-07 15:57:36.000000000 +0100 +@@ -48,9 +48,9 @@ + + #OPT = -g + #OPT = -O3 -DTEST_ICE +-OPT = -O3 -finline-functions ++OPT = $(CLEANUP) -O3 -DHAVE_STRUCT_TIMESPEC -D__MINGW32__ -finline-functions + +-LFLAGS = -lwsock32 ++LFLAGS = -lws2_32 + + GC_CFLAGS = -D__CLEANUP_C + GCE_CFLAGS = -D__CLEANUP_CXX -mthreads +@@ -408,16 +408,16 @@ + @ $(MAKE) clean GC + + GC: +- $(MAKE) CC=gcc CLEANUP_FLAGS="$(GC_CFLAGS)" OBJ="$(DLL_OBJS)" $(GC_DLL) ++ $(MAKE) CC=mingw32-gcc CLEANUP_FLAGS="$(GC_CFLAGS)" OBJ="$(DLL_OBJS)" $(GC_DLL) + + GCE: +- $(MAKE) CC=g++ CLEANUP_FLAGS="$(GCE_CFLAGS)" OBJ="$(DLL_OBJS)" $(GCE_DLL) ++ $(MAKE) CC=mingw32-g++ CLEANUP_FLAGS="$(GCE_CFLAGS)" OBJ="$(DLL_OBJS)" $(GCE_DLL) + + GC-inlined: +- $(MAKE) CC=gcc CLEANUP_FLAGS="$(GC_CFLAGS)" OBJ="$(DLL_INLINED_OBJS)" $(GC_INLINED_STAMP) ++ $(MAKE) CC=mingw32-gcc CLEANUP_FLAGS="$(GC_CFLAGS)" OBJ="$(DLL_INLINED_OBJS)" $(GC_INLINED_STAMP) + + GCE-inlined: +- $(MAKE) CC=g++ CLEANUP_FLAGS="$(GCE_CFLAGS)" OBJ="$(DLL_INLINED_OBJS)" $(GCE_INLINED_STAMP) ++ $(MAKE) CC=mingw32-g++ CLEANUP_FLAGS="$(GCE_CFLAGS)" OBJ="$(DLL_INLINED_OBJS)" $(GCE_INLINED_STAMP) + + tests: + @ cd tests +@@ -436,24 +436,24 @@ + + $(GC_DLL): $(DLL_OBJS) + $(CC) $(OPT) -shared -o $(GC_DLL) $(DLL_OBJS) $(LFLAGS) +- dlltool -z pthread.def $(DLL_OBJS) +- dlltool -k --dllname $@ --output-lib $(GC_LIB) --def $(PTHREAD_DEF) ++ mingw32-dlltool -z pthread.def $(DLL_OBJS) ++ mingw32-dlltool -k --dllname $@ --output-lib $(GC_LIB) --def $(PTHREAD_DEF) + + $(GCE_DLL): $(DLL_OBJS) + $(CC) $(OPT) -mthreads -shared -o $(GCE_DLL) $(DLL_OBJS) $(LFLAGS) +- dlltool -z pthread.def $(DLL_OBJS) +- dlltool -k --dllname $@ --output-lib $(GCE_LIB) --def $(PTHREAD_DEF) ++ mingw32-dlltool -z pthread.def $(DLL_OBJS) ++ mingw32-dlltool -k --dllname $@ --output-lib $(GCE_LIB) --def $(PTHREAD_DEF) + + $(GC_INLINED_STAMP): $(DLL_INLINED_OBJS) + $(CC) $(OPT) -shared -o $(GC_DLL) $(DLL_INLINED_OBJS) $(LFLAGS) +- dlltool -z pthread.def $(DLL_INLINED_OBJS) +- dlltool -k --dllname $(GC_DLL) --output-lib $(GC_LIB) --def $(PTHREAD_DEF) ++ mingw32-dlltool -z pthread.def $(DLL_INLINED_OBJS) ++ mingw32-dlltool -k --dllname $(GC_DLL) --output-lib $(GC_LIB) --def $(PTHREAD_DEF) + echo touched > $(GC_INLINED_STAMP) + + $(GCE_INLINED_STAMP): $(DLL_INLINED_OBJS) + $(CC) $(OPT) -mthreads -shared -o $(GCE_DLL) $(DLL_INLINED_OBJS) $(LFLAGS) +- dlltool -z pthread.def $(DLL_INLINED_OBJS) +- dlltool -k --dllname $(GCE_DLL) --output-lib $(GCE_LIB) --def $(PTHREAD_DEF) ++ mingw32-dlltool -z pthread.def $(DLL_INLINED_OBJS) ++ mingw32-dlltool -k --dllname $(GCE_DLL) --output-lib $(GCE_LIB) --def $(PTHREAD_DEF) + echo touched > $(GCE_INLINED_STAMP) + + clean: +diff -Naur ../pthreads-snap-2004-06-22/implement.h ./implement.h +--- ../pthreads-snap-2004-06-22/implement.h 2004-06-22 08:12:54.000000000 +0200 ++++ ./implement.h 2009-01-07 15:57:36.000000000 +0100 +@@ -38,7 +38,9 @@ + #ifndef _IMPLEMENT_H + #define _IMPLEMENT_H + ++#ifndef _WIN32_WINNT + #define _WIN32_WINNT 0x400 ++#endif + + #include + +diff -Naur ../pthreads-snap-2004-06-22/pthread_cancel.c ./pthread_cancel.c +--- ../pthreads-snap-2004-06-22/pthread_cancel.c 2004-05-17 03:38:02.000000000 +0200 ++++ ./pthread_cancel.c 2009-01-07 15:57:36.000000000 +0100 +@@ -70,7 +70,7 @@ + } + + static void CALLBACK +-ptw32_cancel_callback (DWORD unused) ++ptw32_cancel_callback (ULONG_PTR unused) + { + ptw32_throw (PTW32_EPS_CANCEL); + +diff -Naur ../pthreads-snap-2004-06-22/pthread_exit.c ./pthread_exit.c +--- ../pthreads-snap-2004-06-22/pthread_exit.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./pthread_exit.c 2009-01-07 15:57:36.000000000 +0100 +@@ -89,7 +89,7 @@ + */ + + #if ! defined (__MINGW32__) || defined (__MSVCRT__) +- _endthreadex ((unsigned) value_ptr); ++ _endthreadex ((unsigned) ((ULONG_PTR)value_ptr)); + #else + _endthread (); + #endif +diff -Naur ../pthreads-snap-2004-06-22/pthread_getspecific.c ./pthread_getspecific.c +--- ../pthreads-snap-2004-06-22/pthread_getspecific.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./pthread_getspecific.c 2009-01-07 15:57:36.000000000 +0100 +@@ -63,13 +63,18 @@ + * ------------------------------------------------------ + */ + { +- int lasterror = GetLastError (); +- int lastWSAerror = WSAGetLastError (); ++ void *ptr; + +- void *ptr = TlsGetValue (key->key); ++ if (key == NULL) { ++ ptr = NULL; ++ } else { ++ int lasterror = GetLastError (); ++ int lastWSAerror = WSAGetLastError (); + +- SetLastError (lasterror); +- WSASetLastError (lastWSAerror); ++ ptr = TlsGetValue(key->key); + ++ SetLastError(lasterror); ++ WSASetLastError(lastWSAerror); ++ } + return ptr; + } +diff -Naur ../pthreads-snap-2004-06-22/pthread.h ./pthread.h +--- ../pthreads-snap-2004-06-22/pthread.h 2004-06-22 08:12:54.000000000 +0200 ++++ ./pthread.h 2009-01-07 15:57:36.000000000 +0100 +@@ -274,10 +274,6 @@ + #endif /* PTW32_LEVEL >= PTW32_LEVEL_MAX */ + + #ifndef HAVE_STRUCT_TIMESPEC +-struct timespec { +- long tv_sec; +- long tv_nsec; +-}; + #endif /* HAVE_STRUCT_TIMESPEC */ + + #ifndef SIG_BLOCK +diff -Naur ../pthreads-snap-2004-06-22/ptw32_InterlockedCompareExchange.c ./ptw32_InterlockedCompareExchange.c +--- ../pthreads-snap-2004-06-22/ptw32_InterlockedCompareExchange.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./ptw32_InterlockedCompareExchange.c 2009-01-07 15:57:36.000000000 +0100 +@@ -101,6 +101,19 @@ + #endif + + #else ++#if defined(_WIN64) ++ /* ++ * Microsoft Visual C++ 7.1 and newer have _Interlocked intrinsics ++ */ ++ { ++ result = InterlockedCompareExchange(LONG volatile *Destination,LONG ExChange,LONG Comperand); ++ } ++ else ++ { ++ result = InterlockedCompareExchange(LONG volatile *Destination,LONG ExChange,LONG Comperand); ++ } ++ #else ++ + + /* + * If execution gets to here then we should be running on a Win95 system +@@ -110,7 +123,8 @@ + */ + + result = 0; +- ++#error Unsupported platform or compiler! ++#endif + #endif + + /* *INDENT-ON* */ +diff -Naur ../pthreads-snap-2004-06-22/ptw32_semwait.c ./ptw32_semwait.c +--- ../pthreads-snap-2004-06-22/ptw32_semwait.c 2004-05-17 09:59:47.000000000 +0200 ++++ ./ptw32_semwait.c 2009-01-07 15:57:36.000000000 +0100 +@@ -41,7 +41,7 @@ + #include "implement.h" + + +-INLINE int ++int + ptw32_semwait (sem_t * sem) + /* + * ------------------------------------------------------ +diff -Naur ../pthreads-snap-2004-06-22/ptw32_threadStart.c ./ptw32_threadStart.c +--- ../pthreads-snap-2004-06-22/ptw32_threadStart.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./ptw32_threadStart.c 2009-01-07 15:57:36.000000000 +0100 +@@ -356,7 +356,7 @@ + } + + #if ! defined (__MINGW32__) || defined (__MSVCRT__) +- _endthreadex ((unsigned) status); ++ _endthreadex ((unsigned) ((ULONG_PTR)status)); + #else + _endthread (); + #endif +@@ -366,7 +366,7 @@ + */ + + #if ! defined (__MINGW32__) || defined (__MSVCRT__) +- return (unsigned) status; ++ return (unsigned) ((ULONG_PTR)status); + #endif + + } /* ptw32_threadStart */ +diff -Naur ../pthreads-snap-2004-06-22/ptw32_throw.c ./ptw32_throw.c +--- ../pthreads-snap-2004-06-22/ptw32_throw.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./ptw32_throw.c 2009-01-07 15:57:36.000000000 +0100 +@@ -78,10 +78,10 @@ + switch (exception) + { + case PTW32_EPS_CANCEL: +- exitCode = (unsigned) PTHREAD_CANCELED; ++ exitCode = (unsigned) ((ULONG_PTR)PTHREAD_CANCELED); + break; + case PTW32_EPS_EXIT: +- exitCode = (unsigned) self->exitStatus;; ++ exitCode = (unsigned) ((ULONG_PTR)self->exitStatus);; + break; + } + +diff -Naur ../pthreads-snap-2004-06-22/tests/barrier3.c ./tests/barrier3.c +--- ../pthreads-snap-2004-06-22/tests/barrier3.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/barrier3.c 2009-01-07 15:57:36.000000000 +0100 +@@ -41,7 +41,7 @@ + #include "test.h" + + pthread_barrier_t barrier = NULL; +-static int result = 1; ++static DWORD_PTR result = 1; + + void * func(void * arg) + { +diff -Naur ../pthreads-snap-2004-06-22/tests/barrier5.c ./tests/barrier5.c +--- ../pthreads-snap-2004-06-22/tests/barrier5.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/barrier5.c 2009-01-07 15:57:36.000000000 +0100 +@@ -90,7 +90,7 @@ + main() + { + int i, j; +- int result; ++ DWORD_PTR result; + int serialThreadsTotal; + pthread_t t[NUMTHREADS + 1]; + +@@ -112,7 +112,7 @@ + for (i = 1; i <= j; i++) + { + assert(pthread_join(t[i], (void **) &result) == 0); +- serialThreadsTotal += result; ++ serialThreadsTotal += (int)result; + } + + assert(serialThreadsTotal == BARRIERS - 1); +diff -Naur ../pthreads-snap-2004-06-22/tests/cancel2.c ./tests/cancel2.c +--- ../pthreads-snap-2004-06-22/tests/cancel2.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/cancel2.c 2009-01-07 15:57:36.000000000 +0100 +@@ -217,7 +217,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + assert(pthread_join(t[i], (void **) &result) == 0); + fail = (result != (int) PTHREAD_CANCELED); +diff -Naur ../pthreads-snap-2004-06-22/tests/cancel3.c ./tests/cancel3.c +--- ../pthreads-snap-2004-06-22/tests/cancel3.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./tests/cancel3.c 2009-01-07 15:57:36.000000000 +0100 +@@ -173,7 +173,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + /* + * The thread does not contain any cancelation points, so +diff -Naur ../pthreads-snap-2004-06-22/tests/cancel4.c ./tests/cancel4.c +--- ../pthreads-snap-2004-06-22/tests/cancel4.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./tests/cancel4.c 2009-01-07 15:57:36.000000000 +0100 +@@ -173,7 +173,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + /* + * The thread does not contain any cancelation points, so +diff -Naur ../pthreads-snap-2004-06-22/tests/cancel5.c ./tests/cancel5.c +--- ../pthreads-snap-2004-06-22/tests/cancel5.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./tests/cancel5.c 2009-01-07 15:57:36.000000000 +0100 +@@ -171,7 +171,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + /* + * The thread does not contain any cancelation points, so +diff -Naur ../pthreads-snap-2004-06-22/tests/cancel6a.c ./tests/cancel6a.c +--- ../pthreads-snap-2004-06-22/tests/cancel6a.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./tests/cancel6a.c 2009-01-07 15:57:36.000000000 +0100 +@@ -161,7 +161,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + /* + * The thread does not contain any cancelation points, so +diff -Naur ../pthreads-snap-2004-06-22/tests/cancel6d.c ./tests/cancel6d.c +--- ../pthreads-snap-2004-06-22/tests/cancel6d.c 2004-05-17 03:38:03.000000000 +0200 ++++ ./tests/cancel6d.c 2009-01-07 15:57:36.000000000 +0100 +@@ -165,7 +165,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + /* + * The thread does not contain any cancelation points, so +diff -Naur ../pthreads-snap-2004-06-22/tests/cleanup0.c ./tests/cleanup0.c +--- ../pthreads-snap-2004-06-22/tests/cleanup0.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/cleanup0.c 2009-01-07 15:57:36.000000000 +0100 +@@ -180,7 +180,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + assert(pthread_join(t[i], (void **) &result) == 0); + +diff -Naur ../pthreads-snap-2004-06-22/tests/cleanup1.c ./tests/cleanup1.c +--- ../pthreads-snap-2004-06-22/tests/cleanup1.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/cleanup1.c 2009-01-07 15:57:36.000000000 +0100 +@@ -195,7 +195,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + assert(pthread_join(t[i], (void **) &result) == 0); + +diff -Naur ../pthreads-snap-2004-06-22/tests/cleanup2.c ./tests/cleanup2.c +--- ../pthreads-snap-2004-06-22/tests/cleanup2.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/cleanup2.c 2009-01-07 15:57:36.000000000 +0100 +@@ -169,7 +169,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + assert(pthread_join(t[i], (void **) &result) == 0); + +diff -Naur ../pthreads-snap-2004-06-22/tests/cleanup3.c ./tests/cleanup3.c +--- ../pthreads-snap-2004-06-22/tests/cleanup3.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/cleanup3.c 2009-01-07 15:57:36.000000000 +0100 +@@ -172,7 +172,7 @@ + for (i = 1; i <= NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + assert(pthread_join(t[i], (void **) &result) == 0); + +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar1_2.c ./tests/condvar1_2.c +--- ../pthreads-snap-2004-06-22/tests/condvar1_2.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/condvar1_2.c 2009-01-07 15:57:36.000000000 +0100 +@@ -89,7 +89,7 @@ + main() + { + int i, j, k; +- int result = -1; ++ DWORD_PTR result = -1; + pthread_t t; + + for (k = 0; k < NUM_LOOPS; k++) +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar2_1.c ./tests/condvar2_1.c +--- ../pthreads-snap-2004-06-22/tests/condvar2_1.c 2004-05-20 02:56:52.000000000 +0200 ++++ ./tests/condvar2_1.c 2009-01-07 15:57:36.000000000 +0100 +@@ -105,7 +105,7 @@ + { + int i; + pthread_t t[NUMTHREADS + 1]; +- int result = 0; ++ DWORD_PTR result = 0; + struct _timeb currSysTime; + const DWORD NANOSEC_PER_MILLISEC = 1000000; + +@@ -116,7 +116,7 @@ + /* get current system time */ + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 5; +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar2.c ./tests/condvar2.c +--- ../pthreads-snap-2004-06-22/tests/condvar2.c 2004-05-20 02:56:52.000000000 +0200 ++++ ./tests/condvar2.c 2009-01-07 15:57:36.000000000 +0100 +@@ -99,7 +99,7 @@ + /* get current system time */ + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 5; +@@ -109,7 +109,7 @@ + assert(pthread_mutex_unlock(&mutex) == 0); + + { +- int result = pthread_cond_destroy(&cv); ++ DWORD_PTR result = pthread_cond_destroy(&cv); + if (result != 0) + { + fprintf(stderr, "Result = %s\n", error_string[result]); +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar3_1.c ./tests/condvar3_1.c +--- ../pthreads-snap-2004-06-22/tests/condvar3_1.c 2004-05-20 02:56:52.000000000 +0200 ++++ ./tests/condvar3_1.c 2009-01-07 15:57:36.000000000 +0100 +@@ -125,7 +125,7 @@ + { + int i; + pthread_t t[NUMTHREADS + 1]; +- int result = 0; ++ DWORD_PTR result = 0; + struct _timeb currSysTime; + const DWORD NANOSEC_PER_MILLISEC = 1000000; + +@@ -137,7 +137,7 @@ + /* get current system time */ + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 5; +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar3_2.c ./tests/condvar3_2.c +--- ../pthreads-snap-2004-06-22/tests/condvar3_2.c 2004-05-20 02:56:52.000000000 +0200 ++++ ./tests/condvar3_2.c 2009-01-07 15:57:36.000000000 +0100 +@@ -127,7 +127,7 @@ + { + int i; + pthread_t t[NUMTHREADS + 1]; +- int result = 0; ++ DWORD_PTR result = 0; + struct _timeb currSysTime; + const DWORD NANOSEC_PER_MILLISEC = 1000000; + +@@ -138,7 +138,7 @@ + /* get current system time */ + _ftime(&currSysTime); + +- abstime.tv_sec = abstime.tv_sec = currSysTime.time + 5; ++ abstime.tv_sec = abstime.tv_sec = (long)currSysTime.time + 5; + abstime.tv_nsec = abstime2.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + assert(pthread_mutex_lock(&mutex) == 0); +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar3_3.c ./tests/condvar3_3.c +--- ../pthreads-snap-2004-06-22/tests/condvar3_3.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/condvar3_3.c 2009-01-07 15:57:36.000000000 +0100 +@@ -96,7 +96,7 @@ + /* get current system time */ + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + abstime.tv_sec += 1; + +@@ -120,7 +120,7 @@ + + assert(pthread_mutex_lock(&mtx) == 0); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + abstime.tv_sec += 1; + +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar3.c ./tests/condvar3.c +--- ../pthreads-snap-2004-06-22/tests/condvar3.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/condvar3.c 2009-01-07 15:57:36.000000000 +0100 +@@ -127,7 +127,7 @@ + /* get current system time */ + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + assert(pthread_create(&t[1], NULL, mythread, (void *) 1) == 0); +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar4.c ./tests/condvar4.c +--- ../pthreads-snap-2004-06-22/tests/condvar4.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/condvar4.c 2009-01-07 15:57:36.000000000 +0100 +@@ -132,7 +132,7 @@ + /* get current system time */ + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 5; +@@ -145,7 +145,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 5; +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar5.c ./tests/condvar5.c +--- ../pthreads-snap-2004-06-22/tests/condvar5.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/condvar5.c 2009-01-07 15:57:36.000000000 +0100 +@@ -131,7 +131,7 @@ + /* get current system time */ + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 5; +@@ -144,7 +144,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 5; +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar6.c ./tests/condvar6.c +--- ../pthreads-snap-2004-06-22/tests/condvar6.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/condvar6.c 2009-01-07 15:57:36.000000000 +0100 +@@ -159,7 +159,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 5; +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar7.c ./tests/condvar7.c +--- ../pthreads-snap-2004-06-22/tests/condvar7.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/condvar7.c 2009-01-07 15:57:36.000000000 +0100 +@@ -169,7 +169,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 10; +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar8.c ./tests/condvar8.c +--- ../pthreads-snap-2004-06-22/tests/condvar8.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/condvar8.c 2009-01-07 15:57:36.000000000 +0100 +@@ -166,7 +166,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 10; +diff -Naur ../pthreads-snap-2004-06-22/tests/condvar9.c ./tests/condvar9.c +--- ../pthreads-snap-2004-06-22/tests/condvar9.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/condvar9.c 2009-01-07 15:57:36.000000000 +0100 +@@ -172,7 +172,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 5; +diff -Naur ../pthreads-snap-2004-06-22/tests/context1.c ./tests/context1.c +--- ../pthreads-snap-2004-06-22/tests/context1.c 2004-05-20 02:56:52.000000000 +0200 ++++ ./tests/context1.c 2009-01-07 15:57:36.000000000 +0100 +@@ -125,7 +125,11 @@ + /* + *_x86 only!!! + */ ++#if defined(_M_IX86) + context.Eip = (DWORD) anotherEnding; ++#else ++ context.Rip = (DWORD64) anotherEnding; ++#endif + SetThreadContext(hThread, &context); + ResumeThread(hThread); + } +diff -Naur ../pthreads-snap-2004-06-22/tests/delay2.c ./tests/delay2.c +--- ../pthreads-snap-2004-06-22/tests/delay2.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/delay2.c 2009-01-07 15:57:36.000000000 +0100 +@@ -65,7 +65,7 @@ + main(int argc, char * argv[]) + { + pthread_t t; +- int result = 0; ++ DWORD_PTR result = 0; + + assert(pthread_mutex_lock(&mx) == 0); + +diff -Naur ../pthreads-snap-2004-06-22/tests/exception1.c ./tests/exception1.c +--- ../pthreads-snap-2004-06-22/tests/exception1.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/exception1.c 2009-01-07 15:57:36.000000000 +0100 +@@ -226,7 +226,7 @@ + for (i = 0; i < NUMTHREADS; i++) + { + int fail = 0; +- int result = 0; ++ DWORD_PTR result = 0; + + /* Canceled thread */ + assert(pthread_join(ct[i], (void **) &result) == 0); +diff -Naur ../pthreads-snap-2004-06-22/tests/GNUmakefile ./tests/GNUmakefile +--- ../pthreads-snap-2004-06-22/tests/GNUmakefile 2004-06-22 08:12:55.000000000 +0200 ++++ ./tests/GNUmakefile 2009-01-07 15:57:36.000000000 +0100 +@@ -64,7 +64,7 @@ + DLL = pthread$(GCX).dll + QAPC = ../QueueUserAPCEx/User/quserex.dll + +-COPYFILES = $(HDR) $(LIB) $(DLL) $(QAPC) ++COPYFILES = $(HDR) $(LIB) $(DLL) + + # If a test case returns a non-zero exit code to the shell, make will + # stop. +diff -Naur ../pthreads-snap-2004-06-22/tests/join0.c ./tests/join0.c +--- ../pthreads-snap-2004-06-22/tests/join0.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/join0.c 2009-01-07 15:57:36.000000000 +0100 +@@ -53,7 +53,7 @@ + main(int argc, char * argv[]) + { + pthread_t id; +- int result; ++ DWORD_PTR result; + + /* Create a single thread and wait for it to exit. */ + assert(pthread_create(&id, NULL, func, (void *) 123) == 0); +diff -Naur ../pthreads-snap-2004-06-22/tests/join1.c ./tests/join1.c +--- ../pthreads-snap-2004-06-22/tests/join1.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/join1.c 2009-01-07 15:57:36.000000000 +0100 +@@ -56,7 +56,7 @@ + { + pthread_t id[4]; + int i; +- int result; ++ DWORD_PTR result; + + /* Create a few threads and then exit. */ + for (i = 0; i < 4; i++) +diff -Naur ../pthreads-snap-2004-06-22/tests/join2.c ./tests/join2.c +--- ../pthreads-snap-2004-06-22/tests/join2.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/join2.c 2009-01-07 15:57:36.000000000 +0100 +@@ -50,7 +50,7 @@ + { + pthread_t id[4]; + int i; +- int result; ++ DWORD_PTR result; + + /* Create a few threads and then exit. */ + for (i = 0; i < 4; i++) +diff -Naur ../pthreads-snap-2004-06-22/tests/join3.c ./tests/join3.c +--- ../pthreads-snap-2004-06-22/tests/join3.c 2004-05-22 03:17:58.000000000 +0200 ++++ ./tests/join3.c 2009-01-07 15:57:36.000000000 +0100 +@@ -50,7 +50,7 @@ + { + pthread_t id[4]; + int i; +- int result; ++ DWORD_PTR result; + + /* Create a few threads and then exit. */ + for (i = 0; i < 4; i++) +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex6e.c ./tests/mutex6e.c +--- ../pthreads-snap-2004-06-22/tests/mutex6e.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/mutex6e.c 2009-01-07 15:57:36.000000000 +0100 +@@ -74,7 +74,7 @@ + main() + { + pthread_t t; +- int result = 0; ++ DWORD_PTR result = 0; + int mxType = -1; + + assert(pthread_mutexattr_init(&mxAttr) == 0); +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex6es.c ./tests/mutex6es.c +--- ../pthreads-snap-2004-06-22/tests/mutex6es.c 2004-06-22 08:12:55.000000000 +0200 ++++ ./tests/mutex6es.c 2009-01-07 15:57:36.000000000 +0100 +@@ -73,7 +73,7 @@ + main() + { + pthread_t t; +- int result = 0; ++ DWORD_PTR result = 0; + + assert(mutex == PTHREAD_ERRORCHECK_MUTEX_INITIALIZER); + +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex6r.c ./tests/mutex6r.c +--- ../pthreads-snap-2004-06-22/tests/mutex6r.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/mutex6r.c 2009-01-07 15:57:36.000000000 +0100 +@@ -73,7 +73,7 @@ + main() + { + pthread_t t; +- int result = 0; ++ DWORD_PTR result = 0; + int mxType = -1; + + assert(pthread_mutexattr_init(&mxAttr) == 0); +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex6rs.c ./tests/mutex6rs.c +--- ../pthreads-snap-2004-06-22/tests/mutex6rs.c 2004-06-22 08:12:55.000000000 +0200 ++++ ./tests/mutex6rs.c 2009-01-07 15:57:36.000000000 +0100 +@@ -72,7 +72,7 @@ + main() + { + pthread_t t; +- int result = 0; ++ DWORD_PTR result = 0; + + assert(mutex == PTHREAD_RECURSIVE_MUTEX_INITIALIZER); + +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex7e.c ./tests/mutex7e.c +--- ../pthreads-snap-2004-06-22/tests/mutex7e.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/mutex7e.c 2009-01-07 15:57:36.000000000 +0100 +@@ -74,7 +74,7 @@ + main() + { + pthread_t t; +- int result = 0; ++ DWORD_PTR result = 0; + int mxType = -1; + + assert(pthread_mutexattr_init(&mxAttr) == 0); +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex7r.c ./tests/mutex7r.c +--- ../pthreads-snap-2004-06-22/tests/mutex7r.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/mutex7r.c 2009-01-07 15:57:36.000000000 +0100 +@@ -73,7 +73,7 @@ + main() + { + pthread_t t; +- int result = 0; ++ DWORD_PTR result = 0; + int mxType = -1; + + assert(pthread_mutexattr_init(&mxAttr) == 0); +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex8.c ./tests/mutex8.c +--- ../pthreads-snap-2004-06-22/tests/mutex8.c 2002-02-20 05:39:56.000000000 +0100 ++++ ./tests/mutex8.c 2009-01-07 15:57:36.000000000 +0100 +@@ -49,7 +49,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 1; +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex8e.c ./tests/mutex8e.c +--- ../pthreads-snap-2004-06-22/tests/mutex8e.c 2002-02-20 05:39:56.000000000 +0100 ++++ ./tests/mutex8e.c 2009-01-07 15:57:36.000000000 +0100 +@@ -57,7 +57,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 1; +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex8n.c ./tests/mutex8n.c +--- ../pthreads-snap-2004-06-22/tests/mutex8n.c 2002-02-20 05:39:56.000000000 +0100 ++++ ./tests/mutex8n.c 2009-01-07 15:57:36.000000000 +0100 +@@ -57,7 +57,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 1; +diff -Naur ../pthreads-snap-2004-06-22/tests/mutex8r.c ./tests/mutex8r.c +--- ../pthreads-snap-2004-06-22/tests/mutex8r.c 2002-02-20 05:39:56.000000000 +0100 ++++ ./tests/mutex8r.c 2009-01-07 15:57:36.000000000 +0100 +@@ -57,7 +57,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 1; +diff -Naur ../pthreads-snap-2004-06-22/tests/rwlock2_t.c ./tests/rwlock2_t.c +--- ../pthreads-snap-2004-06-22/tests/rwlock2_t.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/rwlock2_t.c 2009-01-07 15:57:36.000000000 +0100 +@@ -55,7 +55,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 1; +diff -Naur ../pthreads-snap-2004-06-22/tests/rwlock3_t.c ./tests/rwlock3_t.c +--- ../pthreads-snap-2004-06-22/tests/rwlock3_t.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/rwlock3_t.c 2009-01-07 15:57:36.000000000 +0100 +@@ -68,7 +68,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 1; +diff -Naur ../pthreads-snap-2004-06-22/tests/rwlock4_t.c ./tests/rwlock4_t.c +--- ../pthreads-snap-2004-06-22/tests/rwlock4_t.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/rwlock4_t.c 2009-01-07 15:57:36.000000000 +0100 +@@ -68,7 +68,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 1; +diff -Naur ../pthreads-snap-2004-06-22/tests/rwlock5_t.c ./tests/rwlock5_t.c +--- ../pthreads-snap-2004-06-22/tests/rwlock5_t.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/rwlock5_t.c 2009-01-07 15:57:36.000000000 +0100 +@@ -70,7 +70,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 1; +diff -Naur ../pthreads-snap-2004-06-22/tests/rwlock6.c ./tests/rwlock6.c +--- ../pthreads-snap-2004-06-22/tests/rwlock6.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/rwlock6.c 2009-01-07 15:57:36.000000000 +0100 +@@ -77,9 +77,9 @@ + pthread_t wrt1; + pthread_t wrt2; + pthread_t rdt; +- int wr1Result = 0; +- int wr2Result = 0; +- int rdResult = 0; ++ DWORD_PTR wr1Result = 0; ++ DWORD_PTR wr2Result = 0; ++ DWORD_PTR rdResult = 0; + + bankAccount = 0; + +diff -Naur ../pthreads-snap-2004-06-22/tests/rwlock6_t2.c ./tests/rwlock6_t2.c +--- ../pthreads-snap-2004-06-22/tests/rwlock6_t2.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/rwlock6_t2.c 2009-01-07 15:57:36.000000000 +0100 +@@ -86,15 +86,15 @@ + pthread_t wrt1; + pthread_t wrt2; + pthread_t rdt; +- int wr1Result = 0; +- int wr2Result = 0; +- int rdResult = 0; ++ DWORD_PTR wr1Result = 0; ++ DWORD_PTR wr2Result = 0; ++ DWORD_PTR rdResult = 0; + struct _timeb currSysTime; + const DWORD NANOSEC_PER_MILLISEC = 1000000; + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + abstime.tv_sec += 1; +diff -Naur ../pthreads-snap-2004-06-22/tests/rwlock6_t.c ./tests/rwlock6_t.c +--- ../pthreads-snap-2004-06-22/tests/rwlock6_t.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/rwlock6_t.c 2009-01-07 15:57:36.000000000 +0100 +@@ -67,7 +67,7 @@ + + _ftime(&currSysTime); + +- abstime.tv_sec = currSysTime.time; ++ abstime.tv_sec = (long)currSysTime.time; + abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm; + + +@@ -95,10 +95,10 @@ + pthread_t wrt2; + pthread_t rdt1; + pthread_t rdt2; +- int wr1Result = 0; +- int wr2Result = 0; +- int rd1Result = 0; +- int rd2Result = 0; ++ DWORD_PTR wr1Result = 0; ++ DWORD_PTR wr2Result = 0; ++ DWORD_PTR rd1Result = 0; ++ DWORD_PTR rd2Result = 0; + + bankAccount = 0; + +diff -Naur ../pthreads-snap-2004-06-22/tests/semaphore1.c ./tests/semaphore1.c +--- ../pthreads-snap-2004-06-22/tests/semaphore1.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/semaphore1.c 2009-01-07 15:57:36.000000000 +0100 +@@ -117,7 +117,7 @@ + { + pthread_t t; + sem_t s; +- int result; ++ DWORD_PTR result; + + assert(pthread_create(&t, NULL, thr, NULL) == 0); + assert(pthread_join(t, (void **)&result) == 0); +diff -Naur ../pthreads-snap-2004-06-22/tests/spin4.c ./tests/spin4.c +--- ../pthreads-snap-2004-06-22/tests/spin4.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/spin4.c 2009-01-07 15:57:36.000000000 +0100 +@@ -63,7 +63,7 @@ + int + main() + { +- long result = 0; ++ DWORD_PTR result = 0; + pthread_t t; + int CPUs; + struct _timeb sysTime; +diff -Naur ../pthreads-snap-2004-06-22/tests/tsd1.c ./tests/tsd1.c +--- ../pthreads-snap-2004-06-22/tests/tsd1.c 2003-08-19 05:31:51.000000000 +0200 ++++ ./tests/tsd1.c 2009-01-07 15:57:36.000000000 +0100 +@@ -171,7 +171,7 @@ + */ + for (i = 1; i < 10; i++) + { +- int result = 0; ++ DWORD_PTR result = 0; + + assert(pthread_join(thread[i], (void **) &result) == 0); + } diff --git a/src/win32/patches/pthreads.patch b/src/win32/patches/pthreads.patch new file mode 100644 index 00000000..3f611373 --- /dev/null +++ b/src/win32/patches/pthreads.patch @@ -0,0 +1,95 @@ +Index: GNUmakefile +--- ../tmp/pthreads-snap-2004-06-22/GNUmakefile 2004-05-19 17:56:52.000000000 -0700 ++++ ./GNUmakefile 2006-06-27 05:48:10.000000000 -0700 +@@ -408,16 +408,16 @@ + @ $(MAKE) clean GC + + GC: +- $(MAKE) CC=gcc CLEANUP_FLAGS="$(GC_CFLAGS)" OBJ="$(DLL_OBJS)" $(GC_DLL) ++ $(MAKE) CC=mingw32-gcc CLEANUP_FLAGS="$(GC_CFLAGS)" OBJ="$(DLL_OBJS)" $(GC_DLL) + + GCE: +- $(MAKE) CC=g++ CLEANUP_FLAGS="$(GCE_CFLAGS)" OBJ="$(DLL_OBJS)" $(GCE_DLL) ++ $(MAKE) CC=mingw32-g++ CLEANUP_FLAGS="$(GCE_CFLAGS)" OBJ="$(DLL_OBJS)" $(GCE_DLL) + + GC-inlined: +- $(MAKE) CC=gcc CLEANUP_FLAGS="$(GC_CFLAGS)" OBJ="$(DLL_INLINED_OBJS)" $(GC_INLINED_STAMP) ++ $(MAKE) CC=mingw32-gcc CLEANUP_FLAGS="$(GC_CFLAGS)" OBJ="$(DLL_INLINED_OBJS)" $(GC_INLINED_STAMP) + + GCE-inlined: +- $(MAKE) CC=g++ CLEANUP_FLAGS="$(GCE_CFLAGS)" OBJ="$(DLL_INLINED_OBJS)" $(GCE_INLINED_STAMP) ++ $(MAKE) CC=mingw32-g++ CLEANUP_FLAGS="$(GCE_CFLAGS)" OBJ="$(DLL_INLINED_OBJS)" $(GCE_INLINED_STAMP) + + tests: + @ cd tests +@@ -436,24 +436,24 @@ + + $(GC_DLL): $(DLL_OBJS) + $(CC) $(OPT) -shared -o $(GC_DLL) $(DLL_OBJS) $(LFLAGS) +- dlltool -z pthread.def $(DLL_OBJS) +- dlltool -k --dllname $@ --output-lib $(GC_LIB) --def $(PTHREAD_DEF) ++ mingw32-dlltool -z pthread.def $(DLL_OBJS) ++ mingw32-dlltool -k --dllname $@ --output-lib $(GC_LIB) --def $(PTHREAD_DEF) + + $(GCE_DLL): $(DLL_OBJS) + $(CC) $(OPT) -mthreads -shared -o $(GCE_DLL) $(DLL_OBJS) $(LFLAGS) +- dlltool -z pthread.def $(DLL_OBJS) +- dlltool -k --dllname $@ --output-lib $(GCE_LIB) --def $(PTHREAD_DEF) ++ mingw32-dlltool -z pthread.def $(DLL_OBJS) ++ mingw32-dlltool -k --dllname $@ --output-lib $(GCE_LIB) --def $(PTHREAD_DEF) + + $(GC_INLINED_STAMP): $(DLL_INLINED_OBJS) + $(CC) $(OPT) -shared -o $(GC_DLL) $(DLL_INLINED_OBJS) $(LFLAGS) +- dlltool -z pthread.def $(DLL_INLINED_OBJS) +- dlltool -k --dllname $(GC_DLL) --output-lib $(GC_LIB) --def $(PTHREAD_DEF) ++ mingw32-dlltool -z pthread.def $(DLL_INLINED_OBJS) ++ mingw32-dlltool -k --dllname $(GC_DLL) --output-lib $(GC_LIB) --def $(PTHREAD_DEF) + echo touched > $(GC_INLINED_STAMP) + + $(GCE_INLINED_STAMP): $(DLL_INLINED_OBJS) + $(CC) $(OPT) -mthreads -shared -o $(GCE_DLL) $(DLL_INLINED_OBJS) $(LFLAGS) +- dlltool -z pthread.def $(DLL_INLINED_OBJS) +- dlltool -k --dllname $(GCE_DLL) --output-lib $(GCE_LIB) --def $(PTHREAD_DEF) ++ mingw32-dlltool -z pthread.def $(DLL_INLINED_OBJS) ++ mingw32-dlltool -k --dllname $(GCE_DLL) --output-lib $(GCE_LIB) --def $(PTHREAD_DEF) + echo touched > $(GCE_INLINED_STAMP) + + clean: +Index: ptw32_semwait.c +--- ../tmp/pthreads-snap-2004-06-22/ptw32_semwait.c 2004-05-17 00:59:47.000000000 -0700 ++++ ./ptw32_semwait.c 2006-06-27 05:52:59.000000000 -0700 +@@ -41,7 +41,7 @@ + #include "implement.h" + + +-INLINE int ++int + ptw32_semwait (sem_t * sem) + /* + * ------------------------------------------------------ +--- /tmp/pthread_getspecific.c.bak 2008-07-12 17:26:03.000000000 +0200 ++++ pthread_getspecific.c 2008-07-12 17:26:03.000000000 +0200 +@@ -63,13 +63,18 @@ + * ------------------------------------------------------ + */ + { +- int lasterror = GetLastError (); +- int lastWSAerror = WSAGetLastError (); ++ void *ptr; + +- void *ptr = TlsGetValue (key->key); ++ if (key == NULL) { ++ ptr = NULL; ++ } else { ++ int lasterror = GetLastError (); ++ int lastWSAerror = WSAGetLastError (); + +- SetLastError (lasterror); +- WSASetLastError (lastWSAerror); ++ ptr = TlsGetValue(key->key); + ++ SetLastError(lasterror); ++ WSASetLastError(lastWSAerror); ++ } + return ptr; + } diff --git a/src/win32/patches/qt4-compilation-see.patch b/src/win32/patches/qt4-compilation-see.patch new file mode 100644 index 00000000..55582caa --- /dev/null +++ b/src/win32/patches/qt4-compilation-see.patch @@ -0,0 +1,21 @@ +diff -rup a/src/corelib/tools/qsimd_p.h b/src/corelib/tools/qsimd_p.h +--- a/src/corelib/tools/qsimd_p.h 2012-11-23 11:09:55.000000000 +0100 ++++ b/src/corelib/tools/qsimd_p.h 2014-07-22 17:11:48.552071394 +0200 +@@ -48,7 +48,7 @@ + QT_BEGIN_HEADER + + +-#if defined(QT_NO_MAC_XARCH) || (defined(Q_OS_DARWIN) && (defined(__ppc__) || defined(__ppc64__))) ++//#if defined(QT_NO_MAC_XARCH) || (defined(Q_OS_DARWIN) && (defined(__ppc__) || defined(__ppc64__))) + // Disable MMX and SSE on Mac/PPC builds, or if the compiler + // does not support -Xarch argument passing + #undef QT_HAVE_SSE +@@ -60,7 +60,7 @@ QT_BEGIN_HEADER + #undef QT_HAVE_AVX + #undef QT_HAVE_3DNOW + #undef QT_HAVE_MMX +-#endif ++//#endif + + // SSE intrinsics + #if defined(QT_HAVE_SSE2) && (defined(__SSE2__) || defined(Q_CC_MSVC)) diff --git a/src/win32/patches/qt4-compilation.patch b/src/win32/patches/qt4-compilation.patch new file mode 100644 index 00000000..03488b96 --- /dev/null +++ b/src/win32/patches/qt4-compilation.patch @@ -0,0 +1,45 @@ +diff --git a/src/gui/widgets/qdialogbuttonbox.cpp b/src/gui/widgets/qdialogbuttonbox.cpp +index b0f14ca..8e69e5f 100644 (file) +--- a/src/gui/widgets/qdialogbuttonbox.cpp ++++ b/src/gui/widgets/qdialogbuttonbox.cpp +@@ -212,7 +212,7 @@ static QDialogButtonBox::ButtonRole roleFor(QDialogButtonBox::StandardButton but + return QDialogButtonBox::InvalidRole; + } + +-static const int layouts[2][5][14] = ++static const uint layouts[2][5][14] = + { + // Qt::Horizontal + { +@@ -407,7 +407,7 @@ void QDialogButtonBoxPrivate::layoutButtons() + tmpPolicy = 4; // Mac modeless + } + +- const int *currentLayout = layouts[orientation == Qt::Vertical][tmpPolicy]; ++ const uint *currentLayout = layouts[orientation == Qt::Vertical][tmpPolicy]; + + if (center) + buttonLayout->addStretch(); +diff --git a/src/opengl/gl2paintengineex/qtriangulator.cpp b/src/opengl/gl2paintengineex/qtriangulator.cpp +index 3e9dbb8..e8d7db5 100644 (file) +--- a/src/opengl/gl2paintengineex/qtriangulator.cpp ++++ b/src/opengl/gl2paintengineex/qtriangulator.cpp +@@ -1710,7 +1710,7 @@ void QTriangulator::ComplexToSimple::initEdges() + } else { + Q_ASSERT(i + 1 < m_parent->m_indices.size()); + // {node, from, to, next, previous, winding, mayIntersect, pointingUp, originallyPointingUp} +- Edge edge = {0, m_parent->m_indices.at(i), m_parent->m_indices.at(i + 1), -1, -1, 0, true, false, false}; ++ Edge edge = {0, int(m_parent->m_indices.at(i)), int(m_parent->m_indices.at(i + 1)), -1, -1, 0, true, false, false}; + m_edges.add(edge); + } + } +--- ./src/plugins/accessible/widgets/itemviews.cpp 2018-04-11 19:01:29.073721147 +0000 ++++ /home/bsbuild/dev/git/depkgs-mingw32/src/qt-everywhere-opensource-src-4.8.4/src/plugins/accessible/widgets/itemviews.cpp 2012-11-23 10:09:55.000000000 +0000 +@@ -393,7 +393,7 @@ + QModelIndex index = view()->model()->index(0, column, view()->rootIndex()); + if (!index.isValid() || view()->selectionMode() & QAbstractItemView::NoSelection) + return false; +- view()->selectionModel()->select(index, QItemSelectionModel::Columns & QItemSelectionModel::Deselect); ++ view()->selectionModel()->select(index, (QItemSelectionModel::SelectionFlag)(QItemSelectionModel::Columns & QItemSelectionModel::Deselect)); + return true; + } diff --git a/src/win32/patches/qt4-intrinsics.patch b/src/win32/patches/qt4-intrinsics.patch new file mode 100644 index 00000000..884c421b --- /dev/null +++ b/src/win32/patches/qt4-intrinsics.patch @@ -0,0 +1,13 @@ +diff --git a/src/corelib/tools/qsimd.cpp b/src/corelib/tools/qsimd.cpp +index 540b615..1ce1e00 100644 +--- a/src/corelib/tools/qsimd.cpp ++++ b/src/corelib/tools/qsimd.cpp +@@ -47,7 +47,7 @@ + #include + #endif + +-#if defined(Q_OS_WIN64) && !defined(Q_CC_GNU) ++#if defined(Q_OS_WIN64) || defined(Q_OS_WIN32) + #include + #endif + diff --git a/src/win32/patches/qt4-widget-ui.patch b/src/win32/patches/qt4-widget-ui.patch new file mode 100644 index 00000000..ef8589c3 --- /dev/null +++ b/src/win32/patches/qt4-widget-ui.patch @@ -0,0 +1,12 @@ +diff -rup a/src/3rdparty/webkit/Source/WebKit/qt/tests/hybridPixmap/widget.ui b/src/3rdparty/webkit/Source/WebKit/qt/tests/hybridPixmap/widget.ui +--- a/src/3rdparty/webkit/Source/WebKit/qt/tests/hybridPixmap/widget.ui 2014-07-22 17:04:10.504085732 +0200 ++++ b/src/3rdparty/webkit/Source/WebKit/qt/tests/hybridPixmap/widget.ui 2014-07-22 17:01:08.000000000 +0200 +@@ -87,7 +87,7 @@ + + WebView + QWidget +-
widget.h
++
../../widget.h
+
+ + diff --git a/src/win32/patches/sed.patch b/src/win32/patches/sed.patch new file mode 100644 index 00000000..a599cb3b --- /dev/null +++ b/src/win32/patches/sed.patch @@ -0,0 +1,15 @@ +Index: regex_internal.h +--- ../released/sed-4.1.5/lib/regex_internal.h 2005-12-06 00:50:56.000000000 -0800 ++++ ./lib/regex_internal.h 2006-08-31 02:24:05.000000000 -0700 +@@ -410,7 +410,11 @@ + #define re_string_skip_bytes(pstr,idx) ((pstr)->cur_idx += (idx)) + #define re_string_set_index(pstr,idx) ((pstr)->cur_idx = (idx)) + ++#if defined(WIN32) ++#include ++#else + #include ++#endif + + #ifndef _LIBC + # if HAVE_ALLOCA diff --git a/src/win32/patches/sed_msc.patch b/src/win32/patches/sed_msc.patch new file mode 100644 index 00000000..b347e250 --- /dev/null +++ b/src/win32/patches/sed_msc.patch @@ -0,0 +1,927 @@ +diff -Nru ../release/sed-4.1.5/config_h.msc ./config_h.msc +--- ../release/sed-4.1.5/config_h.msc 1969-12-31 16:00:00.000000000 -0800 ++++ ./config_h.msc 2006-09-28 19:34:18.620414300 -0700 +@@ -0,0 +1,387 @@ ++/* config_h.in. Generated from configure.ac by autoheader. */ ++/* config_h.msc. Modified for Microsoft Visual Studio. */ ++ ++#define O_CREAT _O_CREAT ++#define O_EXCL _O_EXCL ++#define O_RDWR _O_RDWR ++ ++#define popen _popen ++#define pclose _pclose ++#define alloca _alloca ++#define strcasecmp stricmp ++ ++#define S_ISREG(x) (((x) & _S_IFMT) == _S_IFREG) ++ ++#define HAVE_FCNTL_H 1 ++ ++/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP ++ systems. This function is required for `alloca.c' support on those systems. ++ */ ++#undef CRAY_STACKSEG_END ++ ++/* Define to 1 if using `alloca.c'. */ ++#undef C_ALLOCA ++ ++/* Define to 1 if translation of program messages to the user's native ++ language is requested. */ ++#undef ENABLE_NLS ++ ++/* Define to 1 if you have `alloca', as a function or macro. */ ++#define HAVE_ALLOCA 1 ++ ++/* Define to 1 if you have and it should be used (not on Ultrix). ++ */ ++#undef HAVE_ALLOCA_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_ARGZ_H ++ ++/* Define to 1 if you have the `bcopy' function. */ ++#undef HAVE_BCOPY ++ ++/* Define to 1 if you have the `btowc' function. */ ++#undef HAVE_BTOWC ++ ++/* Define to 1 if you have the `bzero' function. */ ++#undef HAVE_BZERO ++ ++/* Define if the GNU dcgettext() function is already present or preinstalled. ++ */ ++#undef HAVE_DCGETTEXT ++ ++/* Define to 1 if you have the header file, and it defines `DIR'. ++ */ ++#define HAVE_DIRENT_H 1 ++ ++/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ ++#undef HAVE_DOPRNT ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_ERRNO_H 1 ++ ++/* Define to 1 if you have the `fchmod' function. */ ++#undef HAVE_FCHMOD ++ ++/* Define to 1 if you have the `fchown' function. */ ++#undef HAVE_FCHOWN ++ ++/* Define to 1 if you have the `feof_unlocked' function. */ ++#undef HAVE_FEOF_UNLOCKED ++ ++/* Define to 1 if you have the `fgets_unlocked' function. */ ++#undef HAVE_FGETS_UNLOCKED ++ ++/* Define to 1 if you have the `getcwd' function. */ ++#define HAVE_GETCWD 1 ++ ++/* Define to 1 if you have the `getc_unlocked' function. */ ++#undef HAVE_GETC_UNLOCKED ++ ++/* Define to 1 if you have the `getegid' function. */ ++#undef HAVE_GETEGID ++ ++/* Define to 1 if you have the `geteuid' function. */ ++#undef HAVE_GETEUID ++ ++/* Define to 1 if you have the `getgid' function. */ ++#undef HAVE_GETGID ++ ++/* Define to 1 if you have the `getpagesize' function. */ ++#undef HAVE_GETPAGESIZE ++ ++/* Define if the GNU gettext() function is already present or preinstalled. */ ++#undef HAVE_GETTEXT ++ ++/* Define to 1 if you have the `getuid' function. */ ++#undef HAVE_GETUID ++ ++/* Define if you have the iconv() function. */ ++#undef HAVE_ICONV ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_INTTYPES_H ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_IO_H 1 ++ ++/* Define to 1 if you have the `isascii' function. */ ++#define HAVE_ISASCII 1 ++ ++/* Define to 1 if you have the `isatty' function. */ ++#undef HAVE_ISATTY ++ ++/* Define to 1 if you have the `isblank' function. */ ++#undef HAVE_ISBLANK ++ ++/* Define if you have and nl_langinfo(CODESET). */ ++#undef HAVE_LANGINFO_CODESET ++ ++/* Define if your file defines LC_MESSAGES. */ ++#define HAVE_LC_MESSAGES 1 ++ ++/* Define to 1 if you have the `regex' library (-lregex). */ ++#undef HAVE_LIBREGEX ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_LIMITS_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_LOCALE_H ++ ++/* Define to 1 if you support file names longer than 14 characters. */ ++#define HAVE_LONG_FILE_NAMES 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_MALLOC_H 1 ++ ++/* Define to 1 if mbrtowc and mbstate_t are properly declared. */ ++#undef HAVE_MBRTOWC ++ ++/* Define to 1 if declares mbstate_t. */ ++#undef HAVE_MBSTATE_T ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_MCHECK_H ++ ++/* Define to 1 if you have the `memchr' function. */ ++#define HAVE_MEMCHR 1 ++ ++/* Define to 1 if you have the `memcmp' function. */ ++#define HAVE_MEMCMP 1 ++ ++/* Define to 1 if you have the `memcpy' function. */ ++#define HAVE_MEMCPY 1 ++ ++/* Define to 1 if you have the `memmove' function. */ ++#define HAVE_MEMMOVE 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_MEMORY_H 1 ++ ++/* Define to 1 if you have the `mempcpy' function. */ ++#undef HAVE_MEMPCPY ++ ++/* Define to 1 if you have the `memset' function. */ ++#define HAVE_MEMSET 1 ++ ++/* Define to 1 if you have the `mkstemp' function. */ ++#undef HAVE_MKSTEMP ++ ++/* Define to 1 if you have a working `mmap' system call. */ ++#undef HAVE_MMAP ++ ++/* Define to 1 if you have the `munmap' function. */ ++#undef HAVE_MUNMAP ++ ++/* Define to 1 if you have the header file, and it defines `DIR'. */ ++#undef HAVE_NDIR_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_NL_TYPES_H ++ ++/* Define to 1 if libc includes obstacks. */ ++#undef HAVE_OBSTACK ++ ++/* Define to 1 if you have the `pathconf' function. */ ++#undef HAVE_PATHCONF ++ ++/* Define to 1 if you have the `popen' function. */ ++#define HAVE_POPEN 1 ++ ++/* Define to 1 if you have the `putenv' function. */ ++#define HAVE_PUTENV 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_REGEX_H ++ ++/* Define to 1 if you have the `setenv' function. */ ++#define HAVE_SETENV 1 ++ ++/* Define to 1 if you have the `setlocale' function. */ ++#undef HAVE_SETLOCALE ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_STDARG_H 1 ++ ++/* Define to 1 if stdbool.h conforms to C99. */ ++#undef HAVE_STDBOOL_H ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_STDDEF_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_STDINT_H ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_STDLIB_H 1 ++ ++/* Define to 1 if you have the `stpcpy' function. */ ++#undef HAVE_STPCPY ++ ++/* Define to 1 if you have the `strcasecmp' function. */ ++#undef HAVE_STRCASECMP ++ ++/* Define to 1 if you have the `strchr' function. */ ++#define HAVE_STRCHR 1 ++ ++/* Define to 1 if you have the `strdup' function. */ ++#define HAVE_STRDUP 1 ++ ++/* Define to 1 if you have the `strerror' function. */ ++#define HAVE_STRERROR 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_STRINGS_H ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_STRING_H 1 ++ ++/* Define to 1 if you have the `strtoul' function. */ ++#define HAVE_STRTOUL 1 ++ ++/* Define to 1 if you have the `strverscmp' function. */ ++#undef HAVE_STRVERSCMP ++ ++/* Define to 1 if you have the header file, and it defines `DIR'. ++ */ ++#define HAVE_SYS_DIR_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_SYS_FILE_H ++ ++/* Define to 1 if you have the header file, and it defines `DIR'. ++ */ ++#define HAVE_SYS_NDIR_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_SYS_PARAM_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_SYS_STAT_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_SYS_TYPES_H 1 ++ ++/* Define to 1 if you have the `tsearch' function. */ ++#define HAVE_TSEARCH 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_UNISTD_H ++ ++/* Define to 1 if you have the `vprintf' function. */ ++#define HAVE_VPRINTF 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_WCHAR_H ++ ++/* Define to 1 if you have the `wcrtomb' function. */ ++#undef HAVE_WCRTOMB ++ ++/* Define to 1 if you have the `wcscoll' function. */ ++#undef HAVE_WCSCOLL ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_WCTYPE_H ++ ++/* Define to 1 if the system has the type `_Bool'. */ ++#undef HAVE__BOOL ++ ++/* Define to 1 if you have the `__argz_count' function. */ ++#undef HAVE___ARGZ_COUNT ++ ++/* Define to 1 if you have the `__argz_next' function. */ ++#undef HAVE___ARGZ_NEXT ++ ++/* Define to 1 if you have the `__argz_stringify' function. */ ++#undef HAVE___ARGZ_STRINGIFY ++ ++/* Define as const if the declaration of iconv() needs const. */ ++#undef ICONV_CONST ++ ++/* Name of package */ ++#define PACKAGE "sed" ++ ++/* Define to the address where bug reports for this package should be sent. */ ++#undef PACKAGE_BUGREPORT ++ ++/* Define to the full name of this package. */ ++#undef PACKAGE_NAME ++ ++/* Define to the full name and version of this package. */ ++#undef PACKAGE_STRING ++ ++/* Define to the one symbol short name of this package. */ ++#undef PACKAGE_TARNAME ++ ++/* Define to the version of this package. */ ++#undef PACKAGE_VERSION ++ ++/* Define to the version of GNU sed whose features are supported by this sed. ++ */ ++#define SED_FEATURE_VERSION "5.1" ++ ++/* If using the C implementation of alloca, define if you know the ++ direction of stack growth for your system; otherwise it will be ++ automatically deduced at run-time. ++ STACK_DIRECTION > 0 => grows toward higher addresses ++ STACK_DIRECTION < 0 => grows toward lower addresses ++ STACK_DIRECTION = 0 => direction of growth unknown */ ++#undef STACK_DIRECTION ++ ++/* Define to 1 if you have the ANSI C header files. */ ++#define STDC_HEADERS 1 ++ ++/* Version number of package */ ++#define VERSION "4.1.5" ++ ++/* Define to 1 if on AIX 3. ++ System headers sometimes define this. ++ We just want to avoid a redefinition error message. */ ++#ifndef _ALL_SOURCE ++# undef _ALL_SOURCE ++#endif ++ ++/* Number of bits in a file offset, on hosts where this is settable. */ ++#undef _FILE_OFFSET_BITS ++ ++/* Enable GNU extensions on systems that have them. */ ++#ifndef _GNU_SOURCE ++# undef _GNU_SOURCE ++#endif ++ ++/* Define for large files, on AIX-style hosts. */ ++#undef _LARGE_FILES ++ ++/* Define to 1 if on MINIX. */ ++#undef _MINIX ++ ++/* Define to 2 if the system does not provide POSIX.1 features except with ++ this defined. */ ++#undef _POSIX_1_SOURCE ++ ++/* Define to 1 if you need to in order for `stat' and other things to work. */ ++#undef _POSIX_SOURCE ++ ++/* Include BSD functions in regex, used by the testsuite */ ++#undef _REGEX_RE_COMP ++ ++/* Define to empty if `const' does not conform to ANSI C. */ ++#undef const ++ ++/* Define to `__inline__' or `__inline' if that's what the C compiler ++ calls it, or to nothing if 'inline' is not supported under any name. */ ++#ifndef __cplusplus ++#define inline __inline ++#endif ++ ++/* Define to a type if does not define. */ ++#define mbstate_t int ++ ++/* Define to `long' if does not define. */ ++#undef off_t ++ ++/* Define to `unsigned' if does not define. */ ++#undef size_t ++ ++/* Define to `int' if does not define. */ ++#undef ssize_t +diff -Nru ../release/sed-4.1.5/config_h.msc~ ./config_h.msc~ +--- ../release/sed-4.1.5/config_h.msc~ 1969-12-31 16:00:00.000000000 -0800 ++++ ./config_h.msc~ 2006-09-28 13:56:59.922178500 -0700 +@@ -0,0 +1,386 @@ ++/* config_h.in. Generated from configure.ac by autoheader. */ ++ ++#define O_CREAT _O_CREAT ++#define O_EXCL _O_EXCL ++#define O_RDWR _O_RDWR ++ ++#define popen _popen ++#define pclose _pclose ++#define alloca _alloca ++#define strcasecmp stricmp ++ ++#define S_ISREG(x) (((x) & _S_IFMT) == _S_IFREG) ++ ++#define HAVE_FCNTL_H 1 ++ ++/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP ++ systems. This function is required for `alloca.c' support on those systems. ++ */ ++#undef CRAY_STACKSEG_END ++ ++/* Define to 1 if using `alloca.c'. */ ++#undef C_ALLOCA ++ ++/* Define to 1 if translation of program messages to the user's native ++ language is requested. */ ++#undef ENABLE_NLS ++ ++/* Define to 1 if you have `alloca', as a function or macro. */ ++#define HAVE_ALLOCA 1 ++ ++/* Define to 1 if you have and it should be used (not on Ultrix). ++ */ ++#undef HAVE_ALLOCA_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_ARGZ_H ++ ++/* Define to 1 if you have the `bcopy' function. */ ++#undef HAVE_BCOPY ++ ++/* Define to 1 if you have the `btowc' function. */ ++#undef HAVE_BTOWC ++ ++/* Define to 1 if you have the `bzero' function. */ ++#undef HAVE_BZERO ++ ++/* Define if the GNU dcgettext() function is already present or preinstalled. ++ */ ++#undef HAVE_DCGETTEXT ++ ++/* Define to 1 if you have the header file, and it defines `DIR'. ++ */ ++#define HAVE_DIRENT_H 1 ++ ++/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ ++#undef HAVE_DOPRNT ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_ERRNO_H 1 ++ ++/* Define to 1 if you have the `fchmod' function. */ ++#undef HAVE_FCHMOD ++ ++/* Define to 1 if you have the `fchown' function. */ ++#undef HAVE_FCHOWN ++ ++/* Define to 1 if you have the `feof_unlocked' function. */ ++#undef HAVE_FEOF_UNLOCKED ++ ++/* Define to 1 if you have the `fgets_unlocked' function. */ ++#undef HAVE_FGETS_UNLOCKED ++ ++/* Define to 1 if you have the `getcwd' function. */ ++#define HAVE_GETCWD 1 ++ ++/* Define to 1 if you have the `getc_unlocked' function. */ ++#undef HAVE_GETC_UNLOCKED ++ ++/* Define to 1 if you have the `getegid' function. */ ++#undef HAVE_GETEGID ++ ++/* Define to 1 if you have the `geteuid' function. */ ++#undef HAVE_GETEUID ++ ++/* Define to 1 if you have the `getgid' function. */ ++#undef HAVE_GETGID ++ ++/* Define to 1 if you have the `getpagesize' function. */ ++#undef HAVE_GETPAGESIZE ++ ++/* Define if the GNU gettext() function is already present or preinstalled. */ ++#undef HAVE_GETTEXT ++ ++/* Define to 1 if you have the `getuid' function. */ ++#undef HAVE_GETUID ++ ++/* Define if you have the iconv() function. */ ++#undef HAVE_ICONV ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_INTTYPES_H ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_IO_H 1 ++ ++/* Define to 1 if you have the `isascii' function. */ ++#define HAVE_ISASCII 1 ++ ++/* Define to 1 if you have the `isatty' function. */ ++#undef HAVE_ISATTY ++ ++/* Define to 1 if you have the `isblank' function. */ ++#undef HAVE_ISBLANK ++ ++/* Define if you have and nl_langinfo(CODESET). */ ++#undef HAVE_LANGINFO_CODESET ++ ++/* Define if your file defines LC_MESSAGES. */ ++#define HAVE_LC_MESSAGES 1 ++ ++/* Define to 1 if you have the `regex' library (-lregex). */ ++#undef HAVE_LIBREGEX ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_LIMITS_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_LOCALE_H ++ ++/* Define to 1 if you support file names longer than 14 characters. */ ++#define HAVE_LONG_FILE_NAMES 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_MALLOC_H 1 ++ ++/* Define to 1 if mbrtowc and mbstate_t are properly declared. */ ++#undef HAVE_MBRTOWC ++ ++/* Define to 1 if declares mbstate_t. */ ++#undef HAVE_MBSTATE_T ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_MCHECK_H ++ ++/* Define to 1 if you have the `memchr' function. */ ++#define HAVE_MEMCHR 1 ++ ++/* Define to 1 if you have the `memcmp' function. */ ++#define HAVE_MEMCMP 1 ++ ++/* Define to 1 if you have the `memcpy' function. */ ++#define HAVE_MEMCPY 1 ++ ++/* Define to 1 if you have the `memmove' function. */ ++#define HAVE_MEMMOVE 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_MEMORY_H 1 ++ ++/* Define to 1 if you have the `mempcpy' function. */ ++#undef HAVE_MEMPCPY ++ ++/* Define to 1 if you have the `memset' function. */ ++#define HAVE_MEMSET 1 ++ ++/* Define to 1 if you have the `mkstemp' function. */ ++#undef HAVE_MKSTEMP ++ ++/* Define to 1 if you have a working `mmap' system call. */ ++#undef HAVE_MMAP ++ ++/* Define to 1 if you have the `munmap' function. */ ++#undef HAVE_MUNMAP ++ ++/* Define to 1 if you have the header file, and it defines `DIR'. */ ++#undef HAVE_NDIR_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_NL_TYPES_H ++ ++/* Define to 1 if libc includes obstacks. */ ++#undef HAVE_OBSTACK ++ ++/* Define to 1 if you have the `pathconf' function. */ ++#undef HAVE_PATHCONF ++ ++/* Define to 1 if you have the `popen' function. */ ++#define HAVE_POPEN 1 ++ ++/* Define to 1 if you have the `putenv' function. */ ++#define HAVE_PUTENV 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_REGEX_H ++ ++/* Define to 1 if you have the `setenv' function. */ ++#define HAVE_SETENV 1 ++ ++/* Define to 1 if you have the `setlocale' function. */ ++#undef HAVE_SETLOCALE ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_STDARG_H 1 ++ ++/* Define to 1 if stdbool.h conforms to C99. */ ++#undef HAVE_STDBOOL_H ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_STDDEF_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_STDINT_H ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_STDLIB_H 1 ++ ++/* Define to 1 if you have the `stpcpy' function. */ ++#undef HAVE_STPCPY ++ ++/* Define to 1 if you have the `strcasecmp' function. */ ++#undef HAVE_STRCASECMP ++ ++/* Define to 1 if you have the `strchr' function. */ ++#define HAVE_STRCHR 1 ++ ++/* Define to 1 if you have the `strdup' function. */ ++#define HAVE_STRDUP 1 ++ ++/* Define to 1 if you have the `strerror' function. */ ++#define HAVE_STRERROR 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_STRINGS_H ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_STRING_H 1 ++ ++/* Define to 1 if you have the `strtoul' function. */ ++#define HAVE_STRTOUL 1 ++ ++/* Define to 1 if you have the `strverscmp' function. */ ++#undef HAVE_STRVERSCMP ++ ++/* Define to 1 if you have the header file, and it defines `DIR'. ++ */ ++#define HAVE_SYS_DIR_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_SYS_FILE_H ++ ++/* Define to 1 if you have the header file, and it defines `DIR'. ++ */ ++#define HAVE_SYS_NDIR_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_SYS_PARAM_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_SYS_STAT_H 1 ++ ++/* Define to 1 if you have the header file. */ ++#define HAVE_SYS_TYPES_H 1 ++ ++/* Define to 1 if you have the `tsearch' function. */ ++#define HAVE_TSEARCH 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_UNISTD_H ++ ++/* Define to 1 if you have the `vprintf' function. */ ++#define HAVE_VPRINTF 1 ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_WCHAR_H ++ ++/* Define to 1 if you have the `wcrtomb' function. */ ++#undef HAVE_WCRTOMB ++ ++/* Define to 1 if you have the `wcscoll' function. */ ++#undef HAVE_WCSCOLL ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_WCTYPE_H ++ ++/* Define to 1 if the system has the type `_Bool'. */ ++#undef HAVE__BOOL ++ ++/* Define to 1 if you have the `__argz_count' function. */ ++#undef HAVE___ARGZ_COUNT ++ ++/* Define to 1 if you have the `__argz_next' function. */ ++#undef HAVE___ARGZ_NEXT ++ ++/* Define to 1 if you have the `__argz_stringify' function. */ ++#undef HAVE___ARGZ_STRINGIFY ++ ++/* Define as const if the declaration of iconv() needs const. */ ++#undef ICONV_CONST ++ ++/* Name of package */ ++#define PACKAGE "sed" ++ ++/* Define to the address where bug reports for this package should be sent. */ ++#undef PACKAGE_BUGREPORT ++ ++/* Define to the full name of this package. */ ++#undef PACKAGE_NAME ++ ++/* Define to the full name and version of this package. */ ++#undef PACKAGE_STRING ++ ++/* Define to the one symbol short name of this package. */ ++#undef PACKAGE_TARNAME ++ ++/* Define to the version of this package. */ ++#undef PACKAGE_VERSION ++ ++/* Define to the version of GNU sed whose features are supported by this sed. ++ */ ++#define SED_FEATURE_VERSION "5.1" ++ ++/* If using the C implementation of alloca, define if you know the ++ direction of stack growth for your system; otherwise it will be ++ automatically deduced at run-time. ++ STACK_DIRECTION > 0 => grows toward higher addresses ++ STACK_DIRECTION < 0 => grows toward lower addresses ++ STACK_DIRECTION = 0 => direction of growth unknown */ ++#undef STACK_DIRECTION ++ ++/* Define to 1 if you have the ANSI C header files. */ ++#define STDC_HEADERS 1 ++ ++/* Version number of package */ ++#define VERSION "4.1.5" ++ ++/* Define to 1 if on AIX 3. ++ System headers sometimes define this. ++ We just want to avoid a redefinition error message. */ ++#ifndef _ALL_SOURCE ++# undef _ALL_SOURCE ++#endif ++ ++/* Number of bits in a file offset, on hosts where this is settable. */ ++#undef _FILE_OFFSET_BITS ++ ++/* Enable GNU extensions on systems that have them. */ ++#ifndef _GNU_SOURCE ++# undef _GNU_SOURCE ++#endif ++ ++/* Define for large files, on AIX-style hosts. */ ++#undef _LARGE_FILES ++ ++/* Define to 1 if on MINIX. */ ++#undef _MINIX ++ ++/* Define to 2 if the system does not provide POSIX.1 features except with ++ this defined. */ ++#undef _POSIX_1_SOURCE ++ ++/* Define to 1 if you need to in order for `stat' and other things to work. */ ++#undef _POSIX_SOURCE ++ ++/* Include BSD functions in regex, used by the testsuite */ ++#undef _REGEX_RE_COMP ++ ++/* Define to empty if `const' does not conform to ANSI C. */ ++#undef const ++ ++/* Define to `__inline__' or `__inline' if that's what the C compiler ++ calls it, or to nothing if 'inline' is not supported under any name. */ ++#ifndef __cplusplus ++#define inline __inline ++#endif ++ ++/* Define to a type if does not define. */ ++#define mbstate_t int ++ ++/* Define to `long' if does not define. */ ++#undef off_t ++ ++/* Define to `unsigned' if does not define. */ ++#undef size_t ++ ++/* Define to `int' if does not define. */ ++#undef ssize_t +diff -Nru ../release/sed-4.1.5/lib/Makefile.msc ./lib/Makefile.msc +--- ../release/sed-4.1.5/lib/Makefile.msc 1969-12-31 16:00:00.000000000 -0800 ++++ ./lib/Makefile.msc 2006-09-28 19:10:18.880835600 -0700 +@@ -0,0 +1,20 @@ ++SOURCES=getline.c getopt.c getopt1.c mkstemp.c obstack.c regex.c strverscmp.c utils.c ++ ++OBJECTS=$(SOURCES:.c=.obj) ++ ++CFLAGS=-I.. -I. -DBOOTSTRAP -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE -DHAVE_CONFIG_H ++ ++all : libsed.lib ++ ++install : ++ ++clean : ++ del $(OBJECTS) regex.h libsed.lib *~ 2>nul ++ ++regex.h : regex_.h ++ copy /b $**+nul $@ ++ ++$(OBJECTS) : ..\config.h ++ ++libsed.lib : regex.h $(OBJECTS) ++ lib /out:$@ $(OBJECTS) +diff -Nru ../release/sed-4.1.5/lib/regcomp.c ./lib/regcomp.c +--- ../release/sed-4.1.5/lib/regcomp.c 2005-12-06 00:46:51.000000000 -0800 ++++ ./lib/regcomp.c 2006-09-28 04:36:14.084476200 -0700 +@@ -506,11 +506,7 @@ + from either regcomp or regexec. We don't use PREG here. */ + + size_t +-regerror (errcode, preg, errbuf, errbuf_size) +- int errcode; +- const regex_t *__restrict preg; +- char *__restrict errbuf; +- size_t errbuf_size; ++regerror (int errcode, const regex_t *__restrict preg, char *__restrict errbuf, size_t errbuf_size) + { + const char *msg; + size_t msg_size; +diff -Nru ../release/sed-4.1.5/lib/regex_internal.h ./lib/regex_internal.h +--- ../release/sed-4.1.5/lib/regex_internal.h 2005-12-06 00:50:56.000000000 -0800 ++++ ./lib/regex_internal.h 2006-09-28 04:33:10.987210800 -0700 +@@ -410,7 +410,9 @@ + #define re_string_skip_bytes(pstr,idx) ((pstr)->cur_idx += (idx)) + #define re_string_set_index(pstr,idx) ((pstr)->cur_idx = (idx)) + ++#ifdef HAVE_ALLOCA_H + #include ++#endif + + #ifndef _LIBC + # if HAVE_ALLOCA +diff -Nru ../release/sed-4.1.5/lib/regexec.c ./lib/regexec.c +--- ../release/sed-4.1.5/lib/regexec.c 2005-12-06 00:46:56.000000000 -0800 ++++ ./lib/regexec.c 2006-09-28 04:50:51.101314600 -0700 +@@ -18,6 +18,12 @@ + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + ++#ifdef BOOTSTRAP ++#define bool int ++#define true 1 ++#define false 0 ++#endif ++ + static reg_errcode_t match_ctx_init (re_match_context_t *cache, int eflags, + int n) internal_function; + static void match_ctx_clean (re_match_context_t *mctx) internal_function; +@@ -2894,7 +2900,7 @@ + sizeof (re_dfastate_t *) * (path->alloc - old_alloc)); + } + +- str_idx = path->next_idx ?: top_str; ++ str_idx = path->next_idx ? path->next_idx : top_str; + + /* Temporary modify MCTX. */ + backup_state_log = mctx->state_log; +diff -Nru ../release/sed-4.1.5/Makefile.msc ./Makefile.msc +--- ../release/sed-4.1.5/Makefile.msc 1969-12-31 16:00:00.000000000 -0800 ++++ ./Makefile.msc 2006-09-28 19:10:18.552737900 -0700 +@@ -0,0 +1,17 @@ ++DIRS=lib sed ++ ++all : config.h ++ ++dummy_target : ++ ++install : dummy_target ++ ++all install : ++ @for %%i in ( $(DIRS) ) do @( cd %%i & nmake /nologo -f Makefile.msc $@ & cd .. ) ++ ++clean : ++ del config.h *~ 2>nul ++ @for %%i in ( $(DIRS) ) do @( cd %%i & nmake /nologo -f Makefile.msc $@ & cd .. ) ++ ++config.h : config_h.msc ++ copy /b $**+nul $@ +diff -Nru ../release/sed-4.1.5/sed/compile.c ./sed/compile.c +--- ../release/sed-4.1.5/sed/compile.c 2006-02-03 01:06:26.000000000 -0800 ++++ ./sed/compile.c 2006-09-28 05:48:12.636140800 -0700 +@@ -451,9 +451,7 @@ + + static int snarf_char_class P_((struct buffer *b, mbstate_t *cur_stat)); + static int +-snarf_char_class(b, cur_stat) +- struct buffer *b; +- mbstate_t *cur_stat; ++snarf_char_class(struct buffer *b, mbstate_t *cur_stat) + { + int ch; + int state = 0; +@@ -531,9 +529,7 @@ + + static struct buffer *match_slash P_((int slash, bool regex)); + static struct buffer * +-match_slash(slash, regex) +- int slash; +- bool regex; ++match_slash(int slash, bool regex) + { + struct buffer *b; + int ch; +diff -Nru ../release/sed-4.1.5/sed/Makefile.msc ./sed/Makefile.msc +--- ../release/sed-4.1.5/sed/Makefile.msc 1969-12-31 16:00:00.000000000 -0800 ++++ ./sed/Makefile.msc 2006-09-28 19:10:19.193309600 -0700 +@@ -0,0 +1,18 @@ ++SOURCES=sed.c compile.c execute.c regexp.c fmt.c mbcs.c ++ ++OBJECTS=$(SOURCES:.c=.obj) ++ ++CFLAGS=-I.. -I. -I../lib -DBOOTSTRAP -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE -DHAVE_CONFIG_H ++ ++all : sed.exe ++ ++install : ++ copy sed.exe ..\..\..\bin ++ ++clean : ++ del $(OBJECTS) sed.exe *~ 2>nul ++ ++$(OBJECTS) : ..\config.h ++ ++sed.exe : $(OBJECTS) ++ link /out:sed.exe /subsystem:console /opt:ref $(OBJECTS) ..\lib\libsed.lib diff --git a/src/win32/patches/sqlite.patch b/src/win32/patches/sqlite.patch new file mode 100644 index 00000000..4898bbb2 --- /dev/null +++ b/src/win32/patches/sqlite.patch @@ -0,0 +1,162 @@ +Index: main.mk +--- ../orig/sqlite-3.3.6/main.mk 2006-06-06 03:52:26.000000000 -0700 ++++ ./main.mk 2006-06-30 15:16:28.000000000 -0700 +@@ -60,7 +60,7 @@ + expr.o func.o hash.o insert.o \ + main.o opcodes.o os.o os_os2.o os_unix.o os_win.o \ + pager.o parse.o pragma.o prepare.o printf.o random.o \ +- select.o table.o tclsqlite.o tokenize.o trigger.o \ ++ select.o table.o tokenize.o trigger.o \ + update.o util.o vacuum.o \ + vdbe.o vdbeapi.o vdbeaux.o vdbefifo.o vdbemem.o \ + where.o utf.o legacy.o +@@ -394,8 +394,8 @@ + + # Rules for building test programs and for running tests + # +-tclsqlite3: $(TOP)/src/tclsqlite.c libsqlite3.a +- $(TCCX) $(TCL_FLAGS) -DTCLSH=1 -o tclsqlite3 \ ++tclsqlite3$(EXE): $(TOP)/src/tclsqlite.c libsqlite3.a ++ $(TCCX) $(TCL_FLAGS) -DTCLSH=1 -o tclsqlite3$(EXE) \ + $(TOP)/src/tclsqlite.c libsqlite3.a $(LIBTCL) $(THREADLIB) + + testfixture$(EXE): $(TOP)/src/tclsqlite.c libsqlite3.a $(TESTSRC) +@@ -604,8 +604,8 @@ + + # Standard install and cleanup targets + # +-install: sqlite3 libsqlite3.a sqlite3.h +- mv sqlite3 /usr/bin ++install: sqlite3$(EXE) libsqlite3.a sqlite3.h ++ mv sqlite3$(EXE) /usr/bin + mv libsqlite3.a /usr/lib + mv sqlite3.h /usr/include + +Index: Makefile.mingw32 +--- ../orig/sqlite-3.3.6/Makefile.mingw32 1969-12-31 16:00:00.000000000 -0800 ++++ ./Makefile.mingw32 2006-06-30 15:14:29.000000000 -0700 +@@ -0,0 +1,124 @@ ++#!/usr/make ++# ++# Makefile for SQLITE ++# ++# This is a template makefile for SQLite. Most people prefer to ++# use the autoconf generated "configure" script to generate the ++# makefile automatically. But that does not work for everybody ++# and in every situation. If you are having problems with the ++# "configure" script, you might want to try this makefile as an ++# alternative. Create a copy of this file, edit the parameters ++# below and type "make". ++# ++ ++#### The toplevel directory of the source tree. This is the directory ++# that contains this "Makefile.in" and the "configure.in" script. ++# ++TOP = .. ++ ++#### C Compiler and options for use in building executables that ++# will run on the platform that is doing the build. ++# ++BCC = gcc -g -O2 ++#BCC = /opt/ancic/bin/c89 -0 ++ ++#### If the target operating system supports the "usleep()" system ++# call, then define the HAVE_USLEEP macro for all C modules. ++# ++#USLEEP = ++USLEEP = -DHAVE_USLEEP=1 ++ ++#### If you want the SQLite library to be safe for use within a ++# multi-threaded program, then define the following macro ++# appropriately: ++# ++THREADSAFE = -DTHREADSAFE=1 ++#THREADSAFE = -DTHREADSAFE=0 ++ ++#### Specify any extra linker options needed to make the library ++# thread safe ++# ++THREADLIB = -lpthreadGCE ++#THREADLIB = ++ ++#### Specify any extra libraries needed to access required functions. ++# ++#TLIBS = -lrt # fdatasync on Solaris 8 ++TLIBS = ++ ++#### Leave SQLITE_DEBUG undefined for maximum speed. Use SQLITE_DEBUG=1 ++# to check for memory leaks. Use SQLITE_DEBUG=2 to print a log of all ++# malloc()s and free()s in order to track down memory leaks. ++# ++# SQLite uses some expensive assert() statements in the inner loop. ++# You can make the library go almost twice as fast if you compile ++# with -DNDEBUG=1 ++# ++#OPTS = -DSQLITE_DEBUG=2 ++#OPTS = -DSQLITE_DEBUG=1 ++#OPTS = ++OPTS = -DNDEBUG=1 ++OPTS += -DHAVE_FDATASYNC=1 ++ ++#### The suffix to add to executable files. ".exe" for windows. ++# Nothing for unix. ++# ++EXE = .exe ++#EXE = ++ ++#### C Compile and options for use in building executables that ++# will run on the target platform. This is usually the same ++# as BCC, unless you are cross-compiling. ++# ++#TCC = gcc -O6 ++#TCC = gcc -g -O0 -Wall ++#TCC = gcc -g -O0 -Wall -fprofile-arcs -ftest-coverage ++TCC = $(CROSSTOOLS)/mingw32-gcc -O6 ++#TCC = /opt/ansic/bin/c89 -O +z -Wl,-a,archive ++ ++#### Tools used to build a static library. ++# ++#AR = ar cr ++AR = $(CROSSTOOLS)/mingw32-ar cr ++#RANLIB = ranlib ++RANLIB = $(CROSSTOOLS)/mingw32-ranlib ++ ++#### Extra compiler options needed for programs that use the TCL library. ++# ++#TCL_FLAGS = ++#TCL_FLAGS = -DSTATIC_BUILD=1 ++#TCL_FLAGS = -I/home/drh/tcltk/8.4linux ++#TCL_FLAGS = -I/home/drh/tcltk/8.4win -DSTATIC_BUILD=1 ++#TCL_FLAGS = -I/home/drh/tcltk/8.3hpux ++ ++#### Linker options needed to link against the TCL library. ++# ++#LIBTCL = -ltcl84 ++#LIBTCL = /home/drh/tcltk/8.4linux/libtcl8.4g.a -lm -ldl ++#LIBTCL = /home/drh/tcltk/8.4win/libtcl84s.a -lmsvcrt ++#LIBTCL = /home/drh/tcltk/8.3hpux/libtcl8.3.a -ldld -lm -lc ++ ++#### Compiler options needed for programs that use the readline() library. ++# ++READLINE_FLAGS = ++#READLINE_FLAGS = -DHAVE_READLINE=1 -I/usr/include/readline ++ ++#### Linker options needed by programs using readline() must link against. ++# ++LIBREADLINE = ++#LIBREADLINE = -static -lreadline -ltermcap ++ ++#### Should the database engine assume text is coded as UTF-8 or iso8859? ++# ++# ENCODING = UTF8 ++ENCODING = ISO8859 ++ ++ ++#### Which "awk" program provides nawk compatibilty ++# ++# NAWK = nawk ++NAWK = awk ++ ++# You should not have to change anything below this line ++############################################################################### ++include $(TOP)/main.mk diff --git a/src/win32/patches/sqlite_msc.patch b/src/win32/patches/sqlite_msc.patch new file mode 100644 index 00000000..5f8161d8 --- /dev/null +++ b/src/win32/patches/sqlite_msc.patch @@ -0,0 +1,768 @@ +--- /dev/null 1969-12-31 16:00:00.000000000 -0800 ++++ Makefile.msvc 2006-11-23 12:38:22.724805900 -0800 +@@ -0,0 +1,136 @@ ++#!/usr/make ++# ++# Makefile for SQLITE ++# ++# This is a template makefile for SQLite. Most people prefer to ++# use the autoconf generated "configure" script to generate the ++# makefile automatically. But that does not work for everybody ++# and in every situation. If you are having problems with the ++# "configure" script, you might want to try this makefile as an ++# alternative. Create a copy of this file, edit the parameters ++# below and type "make". ++# ++ ++#### The toplevel directory of the source tree. This is the directory ++# that contains this "Makefile.in" and the "configure.in" script. ++# ++TOP = .. ++ ++#### C Compiler and options for use in building executables that ++# will run on the platform that is doing the build. ++# ++BCC = cl /Zi /Ox /Gy /MD ++#BCC = /opt/ancic/bin/c89 -0 ++ ++#### If the target operating system supports the "usleep()" system ++# call, then define the HAVE_USLEEP macro for all C modules. ++# ++USLEEP = ++#USLEEP = -DHAVE_USLEEP=1 ++ ++#### If you want the SQLite library to be safe for use within a ++# multi-threaded program, then define the following macro ++# appropriately: ++# ++THREADSAFE = -DTHREADSAFE=1 ++#THREADSAFE = -DTHREADSAFE=0 ++ ++#### Specify any extra linker options needed to make the library ++# thread safe ++# ++#THREADLIB = -lpthread ++THREADLIB = ++ ++#### Specify any extra libraries needed to access required functions. ++# ++#TLIBS = -lrt # fdatasync on Solaris 8 ++TLIBS = ++ ++#### Leave SQLITE_DEBUG undefined for maximum speed. Use SQLITE_DEBUG=1 ++# to check for memory leaks. Use SQLITE_DEBUG=2 to print a log of all ++# malloc()s and free()s in order to track down memory leaks. ++# ++# SQLite uses some expensive assert() statements in the inner loop. ++# You can make the library go almost twice as fast if you compile ++# with -DNDEBUG=1 ++# ++#OPTS = -DSQLITE_DEBUG=2 ++#OPTS = -DSQLITE_DEBUG=1 ++#OPTS = ++OPTS = -DNDEBUG=1 -D_CRT_SECURE_NO_DEPRECATE ++#OPTS += -DHAVE_FDATASYNC=1 ++ ++#### The suffix to add to executable files. ".exe" for windows. ++# Nothing for unix. ++# ++EXE = .exe ++#EXE = ++ ++#### C Compile and options for use in building executables that ++# will run on the target platform. This is usually the same ++# as BCC, unless you are cross-compiling. ++# ++#TCC = gcc -O6 ++#TCC = gcc -g -O0 -Wall ++#TCC = gcc -g -O0 -Wall -fprofile-arcs -ftest-coverage ++#TCC = /opt/mingw/bin/i386-mingw32-gcc -O6 ++#TCC = /opt/ansic/bin/c89 -O +z -Wl,-a,archive ++TCC = cl /Zi /Ox /Gy /MD ++ ++#### Tools used to build a static library. ++# ++AR = lib ++#AR = /opt/mingw/bin/i386-mingw32-ar cr ++#RANLIB = ranlib ++#RANLIB = /opt/mingw/bin/i386-mingw32-ranlib ++ ++#MKSHLIB = gcc -shared ++#SO = so ++#SHPREFIX = lib ++SO = dll ++SHPREFIX = ++ ++#### Extra compiler options needed for programs that use the TCL library. ++# ++#TCL_FLAGS = ++#TCL_FLAGS = -DSTATIC_BUILD=1 ++#TCL_FLAGS = -I/home/drh/tcltk/8.4linux ++#TCL_FLAGS = -I/home/drh/tcltk/8.4win -DSTATIC_BUILD=1 ++#TCL_FLAGS = -I/home/drh/tcltk/8.3hpux ++ ++#### Linker options needed to link against the TCL library. ++# ++#LIBTCL = -ltcl -lm -ldl ++#LIBTCL = /home/drh/tcltk/8.4linux/libtcl8.4g.a -lm -ldl ++#LIBTCL = /home/drh/tcltk/8.4win/libtcl84s.a -lmsvcrt ++#LIBTCL = /home/drh/tcltk/8.3hpux/libtcl8.3.a -ldld -lm -lc ++ ++#### Compiler options needed for programs that use the readline() library. ++# ++READLINE_FLAGS = ++#READLINE_FLAGS = -DHAVE_READLINE=1 -I/usr/include/readline ++ ++#### Linker options needed by programs using readline() must link against. ++# ++LIBREADLINE = ++#LIBREADLINE = -static -lreadline -ltermcap ++ ++#### Should the database engine assume text is coded as UTF-8 or iso8859? ++# ++ENCODING = UTF8 ++#ENCODING = ISO8859 ++ ++ ++#### Which "awk" program provides nawk compatibilty ++# ++NAWK = nawk ++# NAWK = awk ++ ++#### Where to install ++BINDIR = /usr/bin ++LIBDIR = /usr/lib ++INCDIR = /usr/include ++ ++# You should not have to change anything below this line ++############################################################################### ++include ../main.mk.msvc +--- /dev/null 1969-12-31 16:00:00.000000000 -0800 ++++ main.mk.msvc 2006-11-23 12:38:22.709180800 -0800 +@@ -0,0 +1,619 @@ ++############################################################################### ++# The following macros should be defined before this script is ++# invoked: ++# ++# TOP The toplevel directory of the source tree. This is the ++# directory that contains this "Makefile.in" and the ++# "configure.in" script. ++# ++# BCC C Compiler and options for use in building executables that ++# will run on the platform that is doing the build. ++# ++# USLEEP If the target operating system supports the "usleep()" system ++# call, then define the HAVE_USLEEP macro for all C modules. ++# ++# THREADSAFE If you want the SQLite library to be safe for use within a ++# multi-threaded program, then define the following macro ++# appropriately: ++# ++# THREADLIB Specify any extra linker options needed to make the library ++# thread safe ++# ++# OPTS Extra compiler command-line options. ++# ++# EXE The suffix to add to executable files. ".exe" for windows ++# and "" for Unix. ++# ++# TCC C Compiler and options for use in building executables that ++# will run on the target platform. This is usually the same ++# as BCC, unless you are cross-compiling. ++# ++# AR Tools used to build a static library. ++# RANLIB ++# ++# TCL_FLAGS Extra compiler options needed for programs that use the ++# TCL library. ++# ++# LIBTCL Linker options needed to link against the TCL library. ++# ++# READLINE_FLAGS Compiler options needed for programs that use the ++# readline() library. ++# ++# LIBREADLINE Linker options needed by programs using readline() must ++# link against. ++# ++# NAWK Nawk compatible awk program. Older (obsolete?) solaris ++# systems need this to avoid using the original AT&T AWK. ++# ++# Once the macros above are defined, the rest of this make script will ++# build the SQLite library and testing tools. ++################################################################################ ++ ++# This is how we compile ++# ++TCCX = $(TCC) $(OPTS) $(THREADSAFE) $(USLEEP) -I. -I$(TOP)/src ++ ++# Object files for the SQLite library. ++# ++LIBOBJ = alter.obj analyze.obj attach.obj auth.obj btree.obj build.obj \ ++ callback.obj complete.obj date.obj delete.obj \ ++ expr.obj func.obj hash.obj insert.obj loadext.obj \ ++ main.obj opcodes.obj os.obj os_os2.obj os_unix.obj os_win.obj \ ++ pager.obj parse.obj pragma.obj prepare.obj printf.obj random.obj \ ++ select.obj table.obj tokenize.obj trigger.obj \ ++ update.obj util.obj vacuum.obj \ ++ vdbe.obj vdbeapi.obj vdbeaux.obj vdbefifo.obj vdbemem.obj \ ++ where.obj utf.obj legacy.obj vtab.obj ++# tclsqlite.obj ++ ++# ++# All of the source code files. ++# ++SRC = \ ++ $(TOP)/src/alter.c \ ++ $(TOP)/src/analyze.c \ ++ $(TOP)/src/attach.c \ ++ $(TOP)/src/auth.c \ ++ $(TOP)/src/btree.c \ ++ $(TOP)/src/btree.h \ ++ $(TOP)/src/build.c \ ++ $(TOP)/src/callback.c \ ++ $(TOP)/src/complete.c \ ++ $(TOP)/src/date.c \ ++ $(TOP)/src/delete.c \ ++ $(TOP)/src/expr.c \ ++ $(TOP)/src/func.c \ ++ $(TOP)/src/hash.c \ ++ $(TOP)/src/hash.h \ ++ $(TOP)/src/insert.c \ ++ $(TOP)/src/legacy.c \ ++ $(TOP)/src/loadext.c \ ++ $(TOP)/src/main.c \ ++ $(TOP)/src/os.c \ ++ $(TOP)/src/os_os2.c \ ++ $(TOP)/src/os_unix.c \ ++ $(TOP)/src/os_win.c \ ++ $(TOP)/src/pager.c \ ++ $(TOP)/src/pager.h \ ++ $(TOP)/src/parse.y \ ++ $(TOP)/src/pragma.c \ ++ $(TOP)/src/prepare.c \ ++ $(TOP)/src/printf.c \ ++ $(TOP)/src/random.c \ ++ $(TOP)/src/select.c \ ++ $(TOP)/src/shell.c \ ++ $(TOP)/src/sqlite.h.in \ ++ $(TOP)/src/sqliteInt.h \ ++ $(TOP)/src/table.c \ ++ $(TOP)/src/tclsqlite.c \ ++ $(TOP)/src/tokenize.c \ ++ $(TOP)/src/trigger.c \ ++ $(TOP)/src/utf.c \ ++ $(TOP)/src/update.c \ ++ $(TOP)/src/util.c \ ++ $(TOP)/src/vacuum.c \ ++ $(TOP)/src/vdbe.c \ ++ $(TOP)/src/vdbe.h \ ++ $(TOP)/src/vdbeapi.c \ ++ $(TOP)/src/vdbeaux.c \ ++ $(TOP)/src/vdbefifo.c \ ++ $(TOP)/src/vdbemem.c \ ++ $(TOP)/src/vdbeInt.h \ ++ $(TOP)/src/vtab.c \ ++ $(TOP)/src/where.c ++ ++# Source code for extensions ++# ++SRC = \ ++ $(TOP)/ext/fts1/fts1.c \ ++ $(TOP)/ext/fts1/fts1.h \ ++ $(TOP)/ext/fts1/fts1_hash.c \ ++ $(TOP)/ext/fts1/fts1_hash.h \ ++ $(TOP)/ext/fts1/fts1_porter.c \ ++ $(TOP)/ext/fts1/fts1_tokenizer.h \ ++ $(TOP)/ext/fts1/fts1_tokenizer1.c ++ ++ ++# Source code to the test files. ++# ++TESTSRC = \ ++ $(TOP)/src/btree.c \ ++ $(TOP)/src/date.c \ ++ $(TOP)/src/func.c \ ++ $(TOP)/src/main.c \ ++ $(TOP)/src/os.c \ ++ $(TOP)/src/os_os2.c \ ++ $(TOP)/src/os_unix.c \ ++ $(TOP)/src/os_win.c \ ++ $(TOP)/src/pager.c \ ++ $(TOP)/src/pragma.c \ ++ $(TOP)/src/printf.c \ ++ $(TOP)/src/test1.c \ ++ $(TOP)/src/test2.c \ ++ $(TOP)/src/test3.c \ ++ $(TOP)/src/test4.c \ ++ $(TOP)/src/test5.c \ ++ $(TOP)/src/test6.c \ ++ $(TOP)/src/test7.c \ ++ $(TOP)/src/test8.c \ ++ $(TOP)/src/test_autoext.c \ ++ $(TOP)/src/test_async.c \ ++ $(TOP)/src/test_md5.c \ ++ $(TOP)/src/test_schema.c \ ++ $(TOP)/src/test_server.c \ ++ $(TOP)/src/test_tclvar.c \ ++ $(TOP)/src/utf.c \ ++ $(TOP)/src/util.c \ ++ $(TOP)/src/vdbe.c \ ++ $(TOP)/src/vdbeaux.c \ ++ $(TOP)/src/where.c ++ ++# Header files used by all library source files. ++# ++HDR = \ ++ sqlite3.h \ ++ $(TOP)/src/btree.h \ ++ $(TOP)/src/hash.h \ ++ opcodes.h \ ++ $(TOP)/src/os.h \ ++ $(TOP)/src/os_common.h \ ++ $(TOP)/src/sqlite3ext.h \ ++ $(TOP)/src/sqliteInt.h \ ++ $(TOP)/src/vdbe.h \ ++ parse.h ++ ++# Header files used by extensions ++# ++HDR = \ ++ $(TOP)/ext/fts1/fts1.h \ ++ $(TOP)/ext/fts1/fts1_hash.h \ ++ $(TOP)/ext/fts1/fts1_tokenizer.h ++ ++ ++# Header files used by the VDBE submodule ++# ++VDBEHDR = \ ++ $(HDR) \ ++ $(TOP)/src/vdbeInt.h ++ ++# This is the default Makefile target. The objects listed here ++# are what get build when you type just "make" with no arguments. ++# ++all: sqlite3.h parse.h opcodes.h sqlite3.lib sqlite3$(EXE) ++ ++# Generate the file "last_change" which contains the date of change ++# of the most recently modified source code file ++# ++last_change: $(SRC) ++ cat $(SRC) | grep '$Id: ' | sort -k 5 | tail -1 \ ++ | $(NAWK) '{print $5,$6}' >last_change ++ ++sqlite3.lib: $(LIBOBJ) ++ $(AR) /out:sqlite3.lib $(LIBOBJ) ++ ++sqlite3$(EXE): $(TOP)/src/shell.c sqlite3.lib sqlite3.h ++ $(TCCX) $(READLINE_FLAGS) /Fesqlite3$(EXE) $(TOP)/src/shell.c \ ++ sqlite3.lib $(LIBREADLINE) $(TLIBS) $(THREADLIB) ++ ++objects: $(LIBOBJ_ORIG) ++ ++# This target creates a directory named "tsrc" and fills it with ++# copies of all of the C source code and header files needed to ++# build on the target system. Some of the C source code and header ++# files are automatically generated. This target takes care of ++# all that automatic generation. ++# ++target_source: $(SRC) $(VDBEHDR) opcodes.c keywordhash.h ++ rd /s /q tsrc ++ mkdir tsrc ++ copy $(SRC) $(VDBEHDR) tsrc ++ del tsrc\sqlite.h.in tsrc\parse.y ++ copy parse.c opcodes.c keywordhash.h tsrc ++ ++# Rules to build the LEMON compiler generator ++# ++lemon: $(TOP)/tool/lemon.c $(TOP)/tool/lempar.c ++ $(BCC) /Felemon $(OPTS) $(TOP)/tool/lemon.c ++ copy $(TOP)\tool\lempar.c . ++ ++# Rules to build individual files ++# ++alter.obj: $(TOP)/src/alter.c $(HDR) ++ $(TCCX) -c $(TOP)/src/alter.c ++ ++analyze.obj: $(TOP)/src/analyze.c $(HDR) ++ $(TCCX) -c $(TOP)/src/analyze.c ++ ++attach.obj: $(TOP)/src/attach.c $(HDR) ++ $(TCCX) -c $(TOP)/src/attach.c ++ ++auth.obj: $(TOP)/src/auth.c $(HDR) ++ $(TCCX) -c $(TOP)/src/auth.c ++ ++btree.obj: $(TOP)/src/btree.c $(HDR) $(TOP)/src/pager.h ++ $(TCCX) -c $(TOP)/src/btree.c ++ ++build.obj: $(TOP)/src/build.c $(HDR) ++ $(TCCX) -c $(TOP)/src/build.c ++ ++callback.obj: $(TOP)/src/callback.c $(HDR) ++ $(TCCX) -c $(TOP)/src/callback.c ++ ++complete.obj: $(TOP)/src/complete.c $(HDR) ++ $(TCCX) -c $(TOP)/src/complete.c ++ ++date.obj: $(TOP)/src/date.c $(HDR) ++ $(TCCX) -c $(TOP)/src/date.c ++ ++delete.obj: $(TOP)/src/delete.c $(HDR) ++ $(TCCX) -c $(TOP)/src/delete.c ++ ++expr.obj: $(TOP)/src/expr.c $(HDR) ++ $(TCCX) -c $(TOP)/src/expr.c ++ ++func.obj: $(TOP)/src/func.c $(HDR) ++ $(TCCX) -c $(TOP)/src/func.c ++ ++hash.obj: $(TOP)/src/hash.c $(HDR) ++ $(TCCX) -c $(TOP)/src/hash.c ++ ++insert.obj: $(TOP)/src/insert.c $(HDR) ++ $(TCCX) -c $(TOP)/src/insert.c ++ ++legacy.obj: $(TOP)/src/legacy.c $(HDR) ++ $(TCCX) -c $(TOP)/src/legacy.c ++ ++loadext.obj: $(TOP)/src/loadext.c $(HDR) ++ $(TCCX) -c $(TOP)/src/loadext.c ++ ++main.obj: $(TOP)/src/main.c $(HDR) ++ $(TCCX) -c $(TOP)/src/main.c ++ ++pager.obj: $(TOP)/src/pager.c $(HDR) $(TOP)/src/pager.h ++ $(TCCX) -c $(TOP)/src/pager.c ++ ++opcodes.obj: opcodes.c ++ $(TCCX) -c opcodes.c ++ ++opcodes.c: opcodes.h $(TOP)/mkopcodec.awk ++ sort -n -b -k 3 opcodes.h | $(NAWK) -f $(TOP)/mkopcodec.awk >opcodes.c ++ ++opcodes.h: parse.h $(TOP)/src/vdbe.c $(TOP)/mkopcodeh.awk ++ copy parse.h+$(TOP)\src\vdbe.c input.tmp ++ $(NAWK) -f $(TOP)/mkopcodeh.awk opcodes.h ++ del input.tmp ++ ++os.obj: $(TOP)/src/os.c $(HDR) ++ $(TCCX) -c $(TOP)/src/os.c ++ ++os_os2.obj: $(TOP)/src/os_os2.c $(HDR) ++ $(TCCX) -c $(TOP)/src/os_os2.c ++ ++os_unix.obj: $(TOP)/src/os_unix.c $(HDR) ++ $(TCCX) -c $(TOP)/src/os_unix.c ++ ++os_win.obj: $(TOP)/src/os_win.c $(HDR) ++ $(TCCX) -c $(TOP)/src/os_win.c ++ ++parse.obj: parse.c $(HDR) ++ $(TCCX) -c parse.c ++ ++#parse.h: parse.c ++ ++parse.c parse.h: $(TOP)/src/parse.y lemon $(TOP)/addopcodes.awk ++ copy $(TOP)\src\parse.y . ++ lemon $(OPTS) parse.y ++ ren parse.h parse.h.temp ++ nawk -f $(TOP)/addopcodes.awk parse.h.temp >parse.h ++ del parse.h.temp ++ ++pragma.obj: $(TOP)/src/pragma.c $(HDR) ++ $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/pragma.c ++ ++prepare.obj: $(TOP)/src/prepare.c $(HDR) ++ $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/prepare.c ++ ++printf.obj: $(TOP)/src/printf.c $(HDR) ++ $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/printf.c ++ ++random.obj: $(TOP)/src/random.c $(HDR) ++ $(TCCX) -c $(TOP)/src/random.c ++ ++select.obj: $(TOP)/src/select.c $(HDR) ++ $(TCCX) -c $(TOP)/src/select.c ++ ++sqlite3.h: $(TOP)/src/sqlite.h.in ++ ..\update_ver $(TOP)\src\sqlite.h.in >sqlite3.h ++ ++table.obj: $(TOP)/src/table.c $(HDR) ++ $(TCCX) -c $(TOP)/src/table.c ++ ++tclsqlite.obj: $(TOP)/src/tclsqlite.c $(HDR) ++ $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/tclsqlite.c ++ ++tokenize.obj: $(TOP)/src/tokenize.c keywordhash.h $(HDR) ++ $(TCCX) -c $(TOP)/src/tokenize.c ++ ++keywordhash.h: $(TOP)/tool/mkkeywordhash.c ++ $(BCC) /Femkkeywordhash $(OPTS) $(TOP)/tool/mkkeywordhash.c ++ mkkeywordhash >keywordhash.h ++ ++trigger.obj: $(TOP)/src/trigger.c $(HDR) ++ $(TCCX) -c $(TOP)/src/trigger.c ++ ++update.obj: $(TOP)/src/update.c $(HDR) ++ $(TCCX) -c $(TOP)/src/update.c ++ ++utf.obj: $(TOP)/src/utf.c $(HDR) ++ $(TCCX) -c $(TOP)/src/utf.c ++ ++util.obj: $(TOP)/src/util.c $(HDR) ++ $(TCCX) -c $(TOP)/src/util.c ++ ++vacuum.obj: $(TOP)/src/vacuum.c $(HDR) ++ $(TCCX) -c $(TOP)/src/vacuum.c ++ ++vdbe.obj: $(TOP)/src/vdbe.c $(VDBEHDR) ++ $(TCCX) -c $(TOP)/src/vdbe.c ++ ++vdbeapi.obj: $(TOP)/src/vdbeapi.c $(VDBEHDR) ++ $(TCCX) -c $(TOP)/src/vdbeapi.c ++ ++vdbeaux.obj: $(TOP)/src/vdbeaux.c $(VDBEHDR) ++ $(TCCX) -c $(TOP)/src/vdbeaux.c ++ ++vdbefifo.obj: $(TOP)/src/vdbefifo.c $(VDBEHDR) ++ $(TCCX) -c $(TOP)/src/vdbefifo.c ++ ++vdbemem.obj: $(TOP)/src/vdbemem.c $(VDBEHDR) ++ $(TCCX) -c $(TOP)/src/vdbemem.c ++ ++vtab.obj: $(TOP)/src/vtab.c $(VDBEHDR) ++ $(TCCX) -c $(TOP)/src/vtab.c ++ ++where.obj: $(TOP)/src/where.c $(HDR) ++ $(TCCX) -c $(TOP)/src/where.c ++ ++# Rules for building test programs and for running tests ++# ++tclsqlite3: $(TOP)/src/tclsqlite.c sqlite3.lib ++ $(TCCX) $(TCL_FLAGS) -DTCLSH=1 /Fetclsqlite3 \ ++ $(TOP)/src/tclsqlite.c sqlite3.lib $(LIBTCL) $(THREADLIB) ++ ++testfixture$(EXE): $(TOP)/src/tclsqlite.c sqlite3.lib $(TESTSRC) ++ $(TCCX) $(TCL_FLAGS) -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 \ ++ -DSQLITE_SERVER=1 /Fetestfixture$(EXE) \ ++ $(TESTSRC) $(TOP)/src/tclsqlite.c \ ++ sqlite3.lib $(LIBTCL) $(THREADLIB) ++ ++fulltest: testfixture$(EXE) sqlite3$(EXE) ++ testfixture$(EXE) $(TOP)/test/all.test ++ ++test: testfixture$(EXE) sqlite3$(EXE) ++ testfixture$(EXE) $(TOP)/test/quick.test ++ ++sqlite3_analyzer$(EXE): $(TOP)/src/tclsqlite.c sqlite3.lib $(TESTSRC) \ ++ $(TOP)/tool/spaceanal.tcl ++ sed \ ++ -e '/^#/d' \ ++ -e 's,\\,\\\\,g' \ ++ -e 's,",\\",g' \ ++ -e 's,^,",' \ ++ -e 's,$$,\\n",' \ ++ $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h ++ $(TCCX) $(TCL_FLAGS) -DTCLSH=2 -DSQLITE_TEST=1 -DSQLITE_DEBUG=1 \ ++ /Fesqlite3_analyzer$(EXE) $(TESTSRC) $(TOP)/src/tclsqlite.c \ ++ sqlite3.lib $(LIBTCL) $(THREADLIB) ++ ++TEST_EXTENSION = $(SHPREFIX)testloadext.$(SO) ++$(TEST_EXTENSION): $(TOP)/src/test_loadext.c ++ $(MKSHLIB) $(TOP)/src/test_loadext.c /Fe$(TEST_EXTENSION) ++ ++extensiontest: testfixture$(EXE) $(TEST_EXTENSION) ++ testfixture$(EXE) $(TOP)/test/loadext.test ++ ++# Rules used to build documentation ++# ++arch.html: $(TOP)/www/arch.tcl ++ tclsh $(TOP)/www/arch.tcl >arch.html ++ ++autoinc.html: $(TOP)/www/autoinc.tcl ++ tclsh $(TOP)/www/autoinc.tcl >autoinc.html ++ ++c_interface.html: $(TOP)/www/c_interface.tcl ++ tclsh $(TOP)/www/c_interface.tcl >c_interface.html ++ ++capi3.html: $(TOP)/www/capi3.tcl ++ tclsh $(TOP)/www/capi3.tcl >capi3.html ++ ++capi3ref.html: $(TOP)/www/capi3ref.tcl ++ tclsh $(TOP)/www/capi3ref.tcl >capi3ref.html ++ ++changes.html: $(TOP)/www/changes.tcl ++ tclsh $(TOP)/www/changes.tcl >changes.html ++ ++compile.html: $(TOP)/www/compile.tcl ++ tclsh $(TOP)/www/compile.tcl >compile.html ++ ++copyright.html: $(TOP)/www/copyright.tcl ++ tclsh $(TOP)/www/copyright.tcl >copyright.html ++ ++copyright-release.html: $(TOP)/www/copyright-release.html ++ copy $(TOP)/www/copyright-release.html . ++ ++copyright-release.pdf: $(TOP)/www/copyright-release.pdf ++ copy $(TOP)/www/copyright-release.pdf . ++ ++common.tcl: $(TOP)/www/common.tcl ++ copy $(TOP)/www/common.tcl . ++ ++conflict.html: $(TOP)/www/conflict.tcl ++ tclsh $(TOP)/www/conflict.tcl >conflict.html ++ ++datatypes.html: $(TOP)/www/datatypes.tcl ++ tclsh $(TOP)/www/datatypes.tcl >datatypes.html ++ ++datatype3.html: $(TOP)/www/datatype3.tcl ++ tclsh $(TOP)/www/datatype3.tcl >datatype3.html ++ ++different.html: $(TOP)/www/different.tcl ++ tclsh $(TOP)/www/different.tcl >different.html ++ ++docs.html: $(TOP)/www/docs.tcl ++ tclsh $(TOP)/www/docs.tcl >docs.html ++ ++download.html: $(TOP)/www/download.tcl ++ mkdir -p doc ++ tclsh $(TOP)/www/download.tcl >download.html ++ ++faq.html: $(TOP)/www/faq.tcl ++ tclsh $(TOP)/www/faq.tcl >faq.html ++ ++fileformat.html: $(TOP)/www/fileformat.tcl ++ tclsh $(TOP)/www/fileformat.tcl >fileformat.html ++ ++formatchng.html: $(TOP)/www/formatchng.tcl ++ tclsh $(TOP)/www/formatchng.tcl >formatchng.html ++ ++index.html: $(TOP)/www/index.tcl last_change ++ tclsh $(TOP)/www/index.tcl >index.html ++ ++lang.html: $(TOP)/www/lang.tcl ++ tclsh $(TOP)/www/lang.tcl doc >lang.html ++ ++pragma.html: $(TOP)/www/pragma.tcl ++ tclsh $(TOP)/www/pragma.tcl >pragma.html ++ ++lockingv3.html: $(TOP)/www/lockingv3.tcl ++ tclsh $(TOP)/www/lockingv3.tcl >lockingv3.html ++ ++sharedcache.html: $(TOP)/www/sharedcache.tcl ++ tclsh $(TOP)/www/sharedcache.tcl >sharedcache.html ++ ++mingw.html: $(TOP)/www/mingw.tcl ++ tclsh $(TOP)/www/mingw.tcl >mingw.html ++ ++nulls.html: $(TOP)/www/nulls.tcl ++ tclsh $(TOP)/www/nulls.tcl >nulls.html ++ ++oldnews.html: $(TOP)/www/oldnews.tcl ++ tclsh $(TOP)/www/oldnews.tcl >oldnews.html ++ ++omitted.html: $(TOP)/www/omitted.tcl ++ tclsh $(TOP)/www/omitted.tcl >omitted.html ++ ++opcode.html: $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c ++ tclsh $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c >opcode.html ++ ++optimizer.html: $(TOP)/www/optimizer.tcl ++ tclsh $(TOP)/www/optimizer.tcl >optimizer.html ++ ++optoverview.html: $(TOP)/www/optoverview.tcl ++ tclsh $(TOP)/www/optoverview.tcl >optoverview.html ++ ++quickstart.html: $(TOP)/www/quickstart.tcl ++ tclsh $(TOP)/www/quickstart.tcl >quickstart.html ++ ++speed.html: $(TOP)/www/speed.tcl ++ tclsh $(TOP)/www/speed.tcl >speed.html ++ ++sqlite.html: $(TOP)/www/sqlite.tcl ++ tclsh $(TOP)/www/sqlite.tcl >sqlite.html ++ ++support.html: $(TOP)/www/support.tcl ++ tclsh $(TOP)/www/support.tcl >support.html ++ ++tclsqlite.html: $(TOP)/www/tclsqlite.tcl ++ tclsh $(TOP)/www/tclsqlite.tcl >tclsqlite.html ++ ++vdbe.html: $(TOP)/www/vdbe.tcl ++ tclsh $(TOP)/www/vdbe.tcl >vdbe.html ++ ++version3.html: $(TOP)/www/version3.tcl ++ tclsh $(TOP)/www/version3.tcl >version3.html ++ ++whentouse.html: $(TOP)/www/whentouse.tcl ++ tclsh $(TOP)/www/whentouse.tcl >whentouse.html ++ ++ ++# Files to be published on the website. ++# ++DOC = \ ++ arch.html \ ++ autoinc.html \ ++ c_interface.html \ ++ capi3.html \ ++ capi3ref.html \ ++ changes.html \ ++ compile.html \ ++ copyright.html \ ++ copyright-release.html \ ++ copyright-release.pdf \ ++ conflict.html \ ++ datatypes.html \ ++ datatype3.html \ ++ different.html \ ++ docs.html \ ++ download.html \ ++ faq.html \ ++ fileformat.html \ ++ formatchng.html \ ++ index.html \ ++ lang.html \ ++ lockingv3.html \ ++ mingw.html \ ++ nulls.html \ ++ oldnews.html \ ++ omitted.html \ ++ opcode.html \ ++ optimizer.html \ ++ optoverview.html \ ++ pragma.html \ ++ quickstart.html \ ++ sharedcache.html \ ++ speed.html \ ++ sqlite.html \ ++ support.html \ ++ tclsqlite.html \ ++ vdbe.html \ ++ version3.html \ ++ whentouse.html ++ ++doc: common.tcl $(DOC) ++ mkdir -p doc ++ ren $(DOC) doc ++ copy $(TOP)/www/*.gif $(TOP)/art/*.gif doc ++ ++# Standard install and cleanup targets ++# ++install: sqlite3 sqlite3.lib sqlite3.h ++ copy sqlite3 $(BINDIR) ++ copy sqlite3.lib $(LIBDIR) ++ copy sqlite3.h $(INCDIR) ++ ++clean: ++ -del *~ *.obj *.ilk *.pdb sqlite3.exe sqlite3.lib sqlite3.h opcodes.* 2>nul ++ -del lemon.exe lempar.c parse.* sqlite*.tar.gz mkkeywordhash.exe keywordhash.h 2>nul ++ -del *.da *.bb *.bbg gmon.out 2>nul ++ -rd /s /q tsrc 2>nul ++ -del testloadext.dll 2>nul +--- /dev/null 1969-12-31 16:00:00.000000000 -0800 ++++ update_ver.cmd 2006-11-23 07:09:03.334906700 -0800 +@@ -0,0 +1,4 @@ ++@echo off ++for /f %%i in ( ..\VERSION ) do set VERSION=%%i ++for /f "usebackq" %%i in ( `cmd /c sed "s/[^0-9]/ /g" ^< ..\VERSION ^| nawk "{printf \"%%d%%03d%%03d\",$1,$2,$3}"` ) do set VERS_NUM=%%i ++sed -e s/--VERS--/%VERSION%/ -e s/--VERSION-NUMBER--/%VERS_NUM%/ %1 diff --git a/src/win32/patches/stab2cv.patch b/src/win32/patches/stab2cv.patch new file mode 100644 index 00000000..696eea1a --- /dev/null +++ b/src/win32/patches/stab2cv.patch @@ -0,0 +1,11 @@ +--- src/PEExecutable.cpp.orig 2014-07-09 15:34:55.630375959 +0200 ++++ src/PEExecutable.cpp 2014-07-09 15:35:35.966377397 +0200 +@@ -16,6 +16,8 @@ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + ++#include ++#include + #include "stdafx.h" + #include "defs.h" + #include "ntexe.h" diff --git a/src/win32/patches/wx.sed b/src/win32/patches/wx.sed new file mode 100644 index 00000000..3d33c132 --- /dev/null +++ b/src/win32/patches/wx.sed @@ -0,0 +1,9 @@ +s%config.gcc%config.mingw32% +s%\\\(.\)%/\1%g +s%ranlib%mingw32-ranlib% +s%windres%mingw32-windres% +s%ar rc%mingw32-ar rc% +s%makefile\.gcc%makefile\.mingw32% +s%if exist \([^ ][^ ]*\) del \1%if [ -e \1 ]; then rm \1; fi% +s%if not exist \([^ ][^ ]*\) mkdir \1%if [ ! -e \1 ]; then mkdir \1; fi% +s%if not exist \([^ ][^ ]*\) copy \([^ ][^ ]*\) \1%if [ ! -e \1 ]; then cp \2 \1; fi% diff --git a/src/win32/patches/wx1.patch b/src/win32/patches/wx1.patch new file mode 100644 index 00000000..268497ff --- /dev/null +++ b/src/win32/patches/wx1.patch @@ -0,0 +1,37 @@ +--- build/msw/config.gcc Sun Feb 5 13:37:26 2006 ++++ build/msw/config.mingw32 Mon Jun 26 16:54:42 2006 +@@ -11,10 +11,10 @@ + # ------------------------------------------------------------------------- + + # C compiler +-CC = gcc ++CC = mingw32-gcc + + # C++ compiler +-CXX = g++ ++CXX = mingw32-g++ + + # Standard flags for CC + CFLAGS = +@@ -23,10 +23,10 @@ + CXXFLAGS = + + # Standard preprocessor flags (common for CC and CXX) +-CPPFLAGS = ++CPPFLAGS = -I../../../zlib + + # Standard linker flags +-LDFLAGS = ++LDFLAGS = -L../../../zlib + + # The C preprocessor + CPP = $(CC) -E +@@ -44,7 +44,7 @@ + MSLU = 0 + + # Type of compiled binaries [debug,release] +-BUILD = debug ++BUILD = release + + # Should debugging info be included in the executables? The default value + # "default" means that debug info will be included if BUILD=debug diff --git a/src/win32/patches/wx2.patch b/src/win32/patches/wx2.patch new file mode 100644 index 00000000..f074d3d5 --- /dev/null +++ b/src/win32/patches/wx2.patch @@ -0,0 +1,22 @@ +--- ../release/wxWidgets-2.6.3/include/wx/string.h 2005-12-15 11:26:27.000000000 -0800 ++++ ./include/wx/string.h 2006-07-31 20:50:32.000000000 -0700 +@@ -229,7 +229,7 @@ + #endif + // VC++ free must take place in same DLL as allocation when using non dll + // run-time library (e.g. Multithreaded instead of Multithreaded DLL) +-#if defined(__VISUALC__) && defined(_MT) && !defined(_DLL) ++#if 1 || defined(__VISUALC__) && defined(_MT) && !defined(_DLL) + void Unlock() { if ( !IsEmpty() && --nRefs == 0) Free(); } + // we must not inline deallocation since allocation is not inlined + void Free(); +--- ../release/wxWidgets-2.6.3/src/common/string.cpp 2005-11-30 05:30:08.000000000 -0800 ++++ ./src/common/string.cpp 2006-07-31 20:48:53.000000000 -0700 +@@ -172,7 +172,7 @@ + // wxStringData class deallocation + // =========================================================================== + +-#if defined(__VISUALC__) && defined(_MT) && !defined(_DLL) ++#if 1 || defined(__VISUALC__) && defined(_MT) && !defined(_DLL) + # pragma message (__FILE__ ": building with Multithreaded non DLL runtime has a performance impact on wxString!") + void wxStringData::Free() + { diff --git a/src/win32/patches/wxWidgets.patch b/src/win32/patches/wxWidgets.patch new file mode 100644 index 00000000..64afc4dc --- /dev/null +++ b/src/win32/patches/wxWidgets.patch @@ -0,0 +1,67 @@ +diff -ur wxWidgets-2.8.7-rel/build/msw/config.gcc wxWidgets-2.8.7/build/msw/config.gcc +--- build/msw/config.mingw32 2007-11-21 13:42:45.000000000 +0100 ++++ build/msw/config.mingw32 2008-03-11 15:45:30.000000000 +0100 +@@ -11,10 +11,10 @@ + # ------------------------------------------------------------------------- + + # C compiler +-CC := gcc ++CC := mingw32-gcc + + # C++ compiler +-CXX := g++ ++CXX := mingw32-g++ + + # Standard flags for CC + CFLAGS := +@@ -23,10 +23,10 @@ + CXXFLAGS := + + # Standard preprocessor flags (common for CC and CXX) +-CPPFLAGS := ++CPPFLAGS := -I../../../zlib + + # Standard linker flags +-LDFLAGS := ++LDFLAGS := -L../../../zlib + + # The C preprocessor + CPP := $(CC) -E +@@ -44,7 +44,7 @@ + MSLU := 0 + + # Type of compiled binaries [debug,release] +-BUILD := debug ++BUILD := release + + # Should debugging info be included in the executables? The default value + # "default" means that debug info will be included if BUILD=debug +@@ -131,4 +131,3 @@ + # "3" ...... this is for Mingw 2.0 or newer (comes with gcc3) + # "2.95" ... for Mingw 1.1 or any of the older versions [3,2.95] + GCC_VERSION := 3 +- +diff -ur wxWidgets-2.8.7-rel/include/wx/string.h wxWidgets-2.8.7/include/wx/string.h +--- include/wx/string.h 2007-11-21 13:41:54.000000000 +0100 ++++ include/wx/string.h 2008-03-11 15:50:58.000000000 +0100 +@@ -235,7 +235,7 @@ + #endif + // VC++ free must take place in same DLL as allocation when using non dll + // run-time library (e.g. Multithreaded instead of Multithreaded DLL) +-#if defined(__VISUALC__) && defined(_MT) && !defined(_DLL) ++#if 1 || defined(__VISUALC__) && defined(_MT) && !defined(_DLL) + void Unlock() { if ( !IsEmpty() && --nRefs == 0) Free(); } + // we must not inline deallocation since allocation is not inlined + void Free(); +diff -ur wxWidgets-2.8.7-rel/src/common/string.cpp wxWidgets-2.8.7/src/common/string.cpp +--- src/common/string.cpp 2007-11-21 13:41:57.000000000 +0100 ++++ src/common/string.cpp 2008-03-11 15:53:17.000000000 +0100 +@@ -140,7 +140,7 @@ + // wxStringData class deallocation + // =========================================================================== + +-#if defined(__VISUALC__) && defined(_MT) && !defined(_DLL) ++#if 1 || defined(__VISUALC__) && defined(_MT) && !defined(_DLL) + # pragma message (__FILE__ ": building with Multithreaded non DLL runtime has a performance impact on wxString!") + void wxStringData::Free() + { diff --git a/src/win32/patches/zlib.patch b/src/win32/patches/zlib.patch new file mode 100644 index 00000000..6b51f492 --- /dev/null +++ b/src/win32/patches/zlib.patch @@ -0,0 +1,150 @@ +--- /dev/null Sun Jun 25 06:11:31 2006 ++++ win32/Makefile.mingw32 Sun Jun 25 06:11:06 2006 +@@ -0,0 +1,147 @@ ++# Makefile for zlib, derived from Makefile.dj2. ++# Modified for mingw32 by C. Spieler, 6/16/98. ++# Updated for zlib 1.2.x by Christian Spieler and Cosmin Truta, Mar-2003. ++# Last updated: 1-Aug-2003. ++# Tested under Cygwin and MinGW. ++ ++# Copyright (C) 1995-2003 Jean-loup Gailly. ++# For conditions of distribution and use, see copyright notice in zlib.h ++ ++# To compile, or to compile and test, type: ++# ++# make -fmakefile.gcc; make test testdll -fmakefile.gcc ++# ++# To use the asm code, type: ++# cp contrib/asm?86/match.S ./match.S ++# make LOC=-DASMV OBJA=match.o -fmakefile.gcc ++# ++# To install libz.a, zconf.h and zlib.h in the system directories, type: ++# ++# make install -fmakefile.gcc ++ ++# Note: ++# If the platform is *not* MinGW (e.g. it is Cygwin or UWIN), ++# the DLL name should be changed from "zlib1.dll". ++ ++STATICLIB = libz.a ++SHAREDLIB = zlib1.dll ++IMPLIB = libzdll.a ++ ++PREFIX = /usr/local ++EXEC_PREFIX = $(prefix) ++ ++INCLUDE_PATH = $(PREFIX)/include ++LIBRARY_PATH = $(PREFIX)/lib ++BIN_PATH = $(PREFIX)/bin ++ ++#LOC = -DASMV ++#LOC = -DDEBUG -g ++ ++CC = mingw32-gcc ++CFLAGS = $(LOC) -O3 -Wall ++ ++AS = $(CC) ++ASFLAGS = $(LOC) -Wall ++ ++LD = $(CC) ++LDFLAGS = $(LOC) -s ++ ++AR = mingw32-ar ++ARFLAGS = rcs ++ ++RC = mingw32-windres ++RCFLAGS = --define GCC_WINDRES ++ ++CP = cp -fp ++# If GNU install is available, replace $(CP) with install. ++INSTALL = $(CP) ++RM = rm -f ++ ++OBJS = adler32.o compress.o crc32.o deflate.o gzio.o infback.o \ ++ inffast.o inflate.o inftrees.o trees.o uncompr.o zutil.o ++OBJA = ++ ++all: $(STATICLIB) $(SHAREDLIB) $(IMPLIB) example.exe minigzip.exe example_d.exe minigzip_d.exe ++ ++test: example.exe minigzip.exe ++ ./example.exe ++ echo hello world | ./minigzip.exe | ./minigzip.exe -d ++ ++testdll: example_d.exe minigzip_d.exe ++ ./example_d.exe ++ echo hello world | ./minigzip_d.exe | ./minigzip_d.exe -d ++ ++.c.o: ++ $(CC) $(CFLAGS) -c -o $@ $< ++ ++.S.o: ++ $(AS) $(ASFLAGS) -c -o $@ $< ++ ++$(STATICLIB): $(OBJS) $(OBJA) ++ $(AR) $(ARFLAGS) $@ $(OBJS) $(OBJA) ++ ++$(IMPLIB): $(SHAREDLIB) ++ ++$(SHAREDLIB): win32/zlib.def $(OBJS) $(OBJA) zlibrc.o ++ mingw32-dllwrap --dlltool-name mingw32-dlltool --driver-name $(CC) --def win32/zlib.def \ ++ --implib $(IMPLIB) -o $@ $(OBJS) $(OBJA) zlibrc.o ++ mingw32-strip $@ ++ ++example.exe: example.o $(STATICLIB) ++ $(LD) $(LDFLAGS) -o $@ example.o $(STATICLIB) ++ ++minigzip.exe: minigzip.o $(STATICLIB) ++ $(LD) $(LDFLAGS) -o $@ minigzip.o $(STATICLIB) ++ ++example_d.exe: example.o $(IMPLIB) ++ $(LD) $(LDFLAGS) -o $@ example.o $(IMPLIB) ++ ++minigzip_d.exe: minigzip.o $(IMPLIB) ++ $(LD) $(LDFLAGS) -o $@ minigzip.o $(IMPLIB) ++ ++zlibrc.o: win32/zlib1.rc ++ $(RC) $(RCFLAGS) -o $@ win32/zlib1.rc ++ ++ ++# INCLUDE_PATH and LIBRARY_PATH must be set. ++ ++.PHONY: install uninstall clean ++ ++install: zlib.h zconf.h $(STATICLIB) $(SHAREDLIB) $(IMPLIB) ++ -@if test ! -e $(INCLUDE_PATH); then mkdir $(INCLUDE_PATH); fi ++ -@if test ! -e $(LIBRARY_PATH); then mkdir $(LIBRARY_PATH); fi ++ -@if test ! -e $(BIN_PATH); then mkdir $(BIN_PATH); fi ++ -$(INSTALL) zlib.h $(INCLUDE_PATH) ++ -$(INSTALL) zconf.h $(INCLUDE_PATH) ++ -$(INSTALL) $(STATICLIB) $(LIBRARY_PATH) ++ -$(INSTALL) $(IMPLIB) $(LIBRARY_PATH) ++ -$(INSTALL) $(SHAREDLIB) $(BIN_PATH) ++ ++uninstall: ++ -$(RM) $(INCLUDE_PATH)/zlib.h ++ -$(RM) $(INCLUDE_PATH)/zconf.h ++ -$(RM) $(LIBRARY_PATH)/$(STATICLIB) ++ -$(RM) $(LIBRARY_PATH)/$(IMPLIB) ++ ++clean: ++ -$(RM) $(STATICLIB) ++ -$(RM) $(SHAREDLIB) ++ -$(RM) $(IMPLIB) ++ -$(RM) *.o ++ -$(RM) *.exe ++ -$(RM) foo.gz ++ ++adler32.o: zlib.h zconf.h ++compress.o: zlib.h zconf.h ++crc32.o: crc32.h zlib.h zconf.h ++deflate.o: deflate.h zutil.h zlib.h zconf.h ++example.o: zlib.h zconf.h ++gzio.o: zutil.h zlib.h zconf.h ++inffast.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h ++inflate.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h ++infback.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h ++inftrees.o: zutil.h zlib.h zconf.h inftrees.h ++minigzip.o: zlib.h zconf.h ++trees.o: deflate.h zutil.h zlib.h zconf.h trees.h ++uncompr.o: zlib.h zconf.h ++zutil.o: zutil.h zlib.h zconf.h diff --git a/src/win32/pebuilder/Makefile.in b/src/win32/pebuilder/Makefile.in new file mode 100644 index 00000000..075b4215 --- /dev/null +++ b/src/win32/pebuilder/Makefile.in @@ -0,0 +1,55 @@ +# +# Makefile to build the BartPE plugin +# +# +# D. Scott Barninger, Nov 2004 +# + +srcdir = . +VPATH = . +.PATH: . + +# one up +basedir = .. +# top dir +topdir = ../../.. +# this dir relative to top dir +thisdir = src/win32/pebuilder + +first_rule: all + +dummy: + +all: pebuilder zip + +pebuilder: + mkdir -p bacula/files + cp -f ../baculafd/bacula-fd.conf bacula/files/ + cp -f ../console/bconsole.conf bacula/files/ + cp -f ../wx-console/wx-console.conf bacula/files/ + cp -f ../baculafd/Release/bacula-fd.exe bacula/files/ + cp -f ../console/Release/bconsole.exe bacula/files/ + cp -f ../wx-console/Release/wx-console.exe bacula/files/ + cp -f ../../../../depkgs-win32/pthreads/pthreadVCE.dll bacula/files/ + cp -f c:/windows/system32/msvcr71.dll bacula/files + cp -f ../../../LICENSE bacula/files/ + +zip: pebuilder + zip -r ../winbacula-bartpe-@VERSION@.zip bacula README + +depend: + + +#------------------------------------------------------------------------- + + +install: + +uninstall: + + +clean: + rm -rf bacula/files + +# clean for distribution +distclean: clean diff --git a/src/win32/pebuilder/README b/src/win32/pebuilder/README new file mode 100644 index 00000000..228e96e4 --- /dev/null +++ b/src/win32/pebuilder/README @@ -0,0 +1,13 @@ +Bacula plugin for BartPE +http://www.nu2.nu/pebuilder/ +Wed Nov 10 2004 +Copyright 2004 D. Scott Barninger +Licensed under the GNU GPL v2 + +Copy the bacula plugin folder to the plugins directory of your +BartPE build directory. Edit the config files in the files directory +to suit your bacula installation. + +Build your BartPE disk according to the instructions at +http://www.nu2.nu/pebuilder/#build along with whatever other plugins +you want. Be sure that 'Bacula Client' is enabled on the plugin selection screen. diff --git a/src/win32/pebuilder/bacula/bacula.inf b/src/win32/pebuilder/bacula/bacula.inf new file mode 100644 index 00000000..c57fb447 --- /dev/null +++ b/src/win32/pebuilder/bacula/bacula.inf @@ -0,0 +1,37 @@ +; PE Builder v3 plug-in INF file for bacula +; Copyright (C) 2004-2006 Free Software Foundation Europe e.V. + +; Thu Dec 09 2004 D. Scott Barninger +; ASSIGNMENT OF COPYRIGHT +; FOR VALUE RECEIVED, D. Scott Barninger hereby sells, transfers and +; assigns unto Kern Sibbald, his successors, assigns and personal representatives, +; all right, title and interest in and to the copyright in this software. +; D. Scott Barninger warrants good title to said copyright, that it is +; free of all liens, encumbrances or any known claims against said copyright. + +; licensed under the GNU GPL v2 + +[Version] +Signature= "$Windows NT$" + +[PEBuilder] +Name="Bacula Client" +Enable=1 + +[WinntDirectories] +a="Programs\bacula",3 + +[SourceDisksFiles] +files\bacula-fd.exe=a,,1 +files\bacula-fd.conf=a,,1 +files\bconsole.exe=a,,1 +files\bconsole.conf=a,,1 +files\License.txt=a,,1 +files\pthreadVCE.dll=a,,1 +files\msvcr71.dll=a,,1 +files\wx-console.exe=a,,1 +files\wx-console.conf=a,,1 +files\msvcr71.dll=a,,1 + +[Append] +nu2menu.xml, bacula_nu2menu.xml diff --git a/src/win32/pebuilder/bacula/bacula_nu2menu.xml b/src/win32/pebuilder/bacula/bacula_nu2menu.xml new file mode 100644 index 00000000..7b41bda2 --- /dev/null +++ b/src/win32/pebuilder/bacula/bacula_nu2menu.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + Install Bacula Client Service + Start Bacula Client Service + Bacula WX-Console + + diff --git a/src/win32/scripts/Makefile b/src/win32/scripts/Makefile new file mode 100644 index 00000000..a0f844e9 --- /dev/null +++ b/src/win32/scripts/Makefile @@ -0,0 +1,39 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Written by Robert Nelson, June 2006 +# + +include ../Makefile.inc + +########################################################################## + +BSLEEP_OBJS = \ + $(OBJDIR)/bsleep.o + +ALL_OBJS = \ + $(BSLEEP_OBJS) + +###################################################################### + +# Targets + +.PHONY: all clean + +all: $(BINDIR)/bsleep.exe + +clean: + @echo "Cleaning `pwd`" + $(call clean_obj,$(ALL_OBJS)) + $(call clean_exe,$(BINDIR)/bsleep.exe) + $(ECHO_CMD)rm -rf $(OBJDIRS) + +# +# Rules +# + +$(BINDIR)/bsleep.exe: $(BSLEEP_OBJS) + $(call link_conapp) + +include ../Makefile.rules diff --git a/src/win32/scripts/bsleep.c b/src/win32/scripts/bsleep.c new file mode 100644 index 00000000..4fd6cd66 --- /dev/null +++ b/src/win32/scripts/bsleep.c @@ -0,0 +1,23 @@ +#include +#include + +int +main(int argc, const char ** argv) +{ + int nsecs; + + if (argc != 2) + { + fputs("usage: bsleep \n n = number of seconds\n", stderr); + exit(1); + } + + if (sscanf(argv[1], "%d", &nsecs) != 1) + { + fputs("sleep: incorrect argument, must be number of seconds to sleep\n", stderr); + exit(1); + } + + Sleep(nsecs * 1000); + exit(0); +} diff --git a/src/win32/scripts/bsleep.vcproj b/src/win32/scripts/bsleep.vcproj new file mode 100644 index 00000000..9d9ad1ec --- /dev/null +++ b/src/win32/scripts/bsleep.vcproj @@ -0,0 +1,199 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/scripts/disk-changer.cmd b/src/win32/scripts/disk-changer.cmd new file mode 100644 index 00000000..4deb4e9c --- /dev/null +++ b/src/win32/scripts/disk-changer.cmd @@ -0,0 +1,201 @@ +@ECHO off +setlocal ENABLEDELAYEDEXPANSION +REM +REM +REM Bacula interface to virtual autoloader using disk storage +REM +REM $Id: disk-changer.in,v 1.3 2006/07/30 16:00:32 kerns Exp $ +REM +REM If you set in your Device resource +REM +REM Changer Command = "disk-changer %c %o %S %a %d" +REM you will have the following input to this script: +REM +REM So Bacula will always call with all the following arguments, even though +REM in some cases, not all are used. +REM +REM disk-changer "changer-device" "command" "slot" "archive-device" "drive-index" +REM %1 %2 %3 %4 %5 +REM +REM By default the autochanger has 10 Volumes and 1 Drive. +REM +REM Note: For this script to work, you *must" specify +REM Device Type = File +REM in each of the Devices associated with your AutoChanger resource. +REM +REM changer-device is the name of a file that overrides the default +REM volumes and drives. It may have: +REM maxslot=n where n is one based (default 10) +REM maxdrive=m where m is zero based (default 1 -- i.e. 2 drives) +REM +REM This code can also simulate barcodes. You simply put +REM a list of the slots and barcodes in the "base" directory/barcodes. +REM See below for the base directory definition. Example of a +REM barcodes file: +REM C:\TEMP\bacula\barcodes +REM 1:Vol001 +REM 2:Vol002 +REM ... +REM +REM archive-device is the name of the base directory where you want the +REM Volumes stored appended with \drive0 for the first drive; \drive1 +REM for the second drive, ... For example, you might use +REM C:\Temp\bacula\drive0 Note: you must not have a trailing slash, and +REM the string (e.g. \drive0) must be unique, and it must not match +REM any other part of the directory name. These restrictions could be +REM easily removed by any clever script jockey. +REM +REM Full example: disk-changer C:\Temp\bacula\conf load 1 C:\Temp\bacula\drive0 0 +REM +REM The Volumes will be created with names slot1, slot2, slot3, ... maxslot in the +REM base directory. In the above example the base directory is C:\Temp\bacula. +REM However, as with tapes, their Bacula Volume names will be stored inside the +REM Volume label. In addition to the Volumes (e.g. C:\Temp\bacula\slot1, +REM C:\Temp\bacula\slot3, ...) this script will create a C:\Temp\bacula\loadedn +REM file to keep track of what Slot is loaded. You should not change this file. +REM + +SET dbgfile=%CD%\disk-changer.log + +REM to turn on logging, uncomment the following line +IF NOT EXIST %dbgfile% COPY nul %dbgfile% >nul + +REM +REM check parameter count on commandline +REM +REM Check for special cases where only 2 arguments are needed, +REM all others are a minimum of 5 +REM +IF "%1" EQU "" goto :param_count_invalid +IF "%2" EQU "" goto :param_count_invalid +IF "%2" EQU "list" goto :param_count_valid +IF "%2" EQU "slots" goto :param_count_valid +IF "%3" EQU "" goto :param_count_invalid +IF "%4" EQU "" goto :param_count_invalid +IF "%5" EQU "" goto :param_count_invalid +GOTO :param_count_valid + +:param_count_invalid + echo Insufficient number of arguments given. + IF "%2" EQU "" ( + echo At least two arguments must be specified. + ) else echo Command expected 5 arguments. +:usage + ECHO. + ECHO usage: disk-changer ctl-device command [slot archive-device drive-index] + ECHO Valid commands are: unload, load, list, loaded, and slots. + EXIT /B 1 + +:param_count_valid + +REM Setup arguments +SET ctl=%1 +SET cmd=%2 +SET slot=%3 +SET device=%4 +SET drive=%5 + +REM set defaults +SET maxdrive=1 +SET maxslot=10 + +SET ctl=%ctl:/=\% +SET ctl=%ctl:\\=\% + +SET device=%device:/=\% +SET device=%device:\\=\% + +REM Pull in conf file +IF EXIST %ctl% CALL %ctl% + +FOR %%i IN ( %ctl% ) DO SET dirname=%%~dpi +IF NOT EXIST %dirname%nul ( + ECHO ERROR: Autochanger directory "%dirname%" does not exist. + ECHO You must create it. + EXIT /b 1 +) + +CALL :debug "Parms: %ctl% %cmd% %slot% %device% %drive%" +IF "%cmd%" EQU "unload" GOTO :cmdUnload +IF "%cmd%" EQU "load" GOTO :cmdLoad +IF "%cmd%" EQU "list" GOTO :cmdList +IF "%cmd%" EQU "loaded" GOTO :cmdLoaded +IF "%cmd%" EQU "slots" GOTO :cmdSlots +GOTO :cmdUnknown + +:cmdUnload + CALL :debug "Doing disk -f %ctl% unload %slot% %device% %drive%" + IF NOT EXIST %dirname%loaded%drive% ECHO 0 >%dirname%loaded%drive% + FOR /f %%i IN ( %dirname%loaded%drive% ) DO SET ld=%%i + + IF "%slot%" EQU "%ld%" ( + CALL :debug "Unloaded slot %ld% from drive %drive%" + ECHO 0 >%dirname%loaded%drive% + DEL %dirname%%device% >nul 2>nul + SET rtn=0 + ) ELSE ( + ECHO Storage Element %slot% is Already Full + CALL :debug "Storage Element %slot% is Already Full" + SET rtn=1 + ) + GOTO :cmdExit + +:cmdLoad + CALL :debug "Doing disk -f %ctl% load %slot% %device% %drive%" + SET ld=0 + IF NOT EXIST %dirname%loaded%drive% ECHO 0 >%dirname%loaded%drive% + FOR /f %%i IN ( %dirname%loaded%drive% ) DO SET ld=%%i + IF %ld% EQU 0 ( + IF NOT EXIST %dirname%slot%slot% COPY nul %dirname%slot%slot% >nul + DEL %device% + fsutil hardlink create %device% %dirname%slot%slot% >nul 2>&1 + SET rtn=%ERRORLEVEL% + IF !rtn! EQU 0 ( + ECHO %slot% >%dirname%loaded%drive% + CALL :debug "Loaded slot %slot% into drive %drive%" + ) ELSE ( + CALL :debug "Create hardlink failed, return = !rtn!" + ) + ) ELSE ( + ECHO Drive %drive% Full - Storage element %ld% loaded + CALL :debug "Drive %drive% Full - Storage element %ld% loaded" + ) + GOTO :cmdExit + +:cmdList + CALL :debug "Doing disk -f %ctl% -- to list volumes" + IF EXIST %dirname%barcodes ( + TYPE %dirname%barcodes + ) ELSE ( + FOR /l %%i IN ( 1, 1, %maxslot% ) DO ECHO %%i: + ) + SET rtn=0 + GOTO :cmdExit + +:cmdLoaded + CALL :debug "Doing disk -f %ctl% %drive% -- to find what is loaded" + IF EXIST %dirname%loaded%drive% ( TYPE %dirname%loaded%drive% ) ELSE ECHO 0 + SET rtn=0 + GOTO :cmdExit + +:cmdSlots + CALL :debug "Doing disk -f %ctl% -- to get count of slots" + ECHO %maxslot% + SET rtn=0 + GOTO :cmdExit + +:cmdExit + EXIT /b %rtn% + +:cmdUnknown + ECHO '%cmd%' is an invalid command. + GOTO :usage + +REM +REM log whats done +REM +:debug + IF NOT EXIST %dbgfile% GOTO :EOF + FOR /f "usebackq tokens=2-4,5-7 delims=/:. " %%i IN ( '%DATE% %TIME%' ) do SET TIMESTAMP=%%k%%i%%j-%%l:%%m:%%n + ECHO %TIMESTAMP% %* >> %dbgfile% + GOTO :EOF diff --git a/src/win32/scripts/dvd-handler.cmd b/src/win32/scripts/dvd-handler.cmd new file mode 100644 index 00000000..2f21e7e0 --- /dev/null +++ b/src/win32/scripts/dvd-handler.cmd @@ -0,0 +1,387 @@ +@ECHO off +REM +REM Check the free space available on a writable DVD +REM Should always exit with 0 status, otherwise it indicates a serious error. +REM (wrong number of arguments, Python exception...) +REM +REM called: dvd-handler operation args +REM +REM operations used by Bacula: +REM +REM free (no arguments) +REM Scan the device and report the available space. It returns: +REM Prints on the first output line the free space available in +REM bytes. +REM If an error occurs, prints a negative number (-errno), followed, +REM on the second line, by an error message. +REM +REM write op filename +REM Write a part file to disk. +REM This operation needs two additional arguments. +REM The first (op) indicates to +REM 0 -- append +REM 1 -- first write to a blank disk +REM 2 -- blank or truncate a disk +REM +REM The second is the filename to write +REM +REM operations available but not used by Bacula: +REM +REM test Scan the device and report the information found. +REM This operation needs no further arguments. +REM prepare Prepare a DVD+/-RW for being used by Bacula. +REM Note: This is only useful if you already have some +REM non-Bacula data on a medium, and you want to use +REM it with Bacula. Don't run this on blank media, it +REM is useless. +REM +REM +REM $Id: dvd-handler.in,v 1.11 2006/08/30 16:19:30 kerns Exp $ +REM + +setlocal ENABLEDELAYEDEXPANSION +SET PATH=%PATH%;@bin_dir_cmd@ +REM Configurable values: + +SET dvdrwmediainfo=dvd+rw-mediainfo.exe +SET growcmd=growisofs.exe +SET dvdrwformat=dvd+rw-format.exe +SET dd=dd.exe +SET margin=10485760 + +REM Comment the following line if you want the tray to be reloaded +REM when writing ends. +SET growcmd=%growcmd% -use-the-force-luke^^^^^=notray + +REM end of configurable values + +IF "%1" == "" GOTO :usage +IF "%2" == "" GOTO :usage + +CALL :init %1 + +IF "%2" == "free" ( + CALL :free + ECHO !ERRORLEVEL! + ECHO No Error reported. +) ELSE IF "%2" == "prepare" ( + CALL :prepare + ECHO Medium prepared successfully. +) ELSE IF "%2" == "test" ( + IF %freespace_collected% EQU 0 CALL :collect_freespace + IF %mediumtype_collected% EQU 0 CALL :collect_mediumtype + ECHO Class disk, initialized with device %device% + ECHO type = '!disktype!' mode='!diskmode!' status = '!diskstatus!' + ECHO next_session = !next_session! capacity = !capacity! + ECHO Hardware device is '!hardwaredevice!' + ECHO growcmd = '!growcmd!' + ECHO growparams = '!growparams!' + ECHO. + SET empty_disk=false + CALL :is_blank + IF !ERRORLEVEL! EQU 1 SET empty_disk=true + SET rewritable=false + CALL :is_RW + IF !ERRORLEVEL! EQU 1 SET rewritable=true + SET plus_RW_disk=false + CALL :is_plus_RW + IF !ERRORLEVEL! EQU 1 SET plus_RW_disk=true + SET minus_RW_disk=false + CALL :is_minus_RW + IF !ERRORLEVEL! EQU 1 SET minus_RW_disk=true + SET restricted_overwrite_disk=false + CALL :is_restricted_overwrite + IF !ERRORLEVEL! EQU 1 SET restricted_overwrite_disk=true + SET blank_disk=false + CALL :is_blank + IF !ERRORLEVEL! EQU 1 SET blank_disk=true + ECHO Empty disk: !empty_disk! Blank Disk: !blank_disk! ReWritable disk: !rewritable! + ECHO Plus RW: !plus_RW_disk! Minus RW: !minus_RW_disk! Restricted Overwrite: !restricted_overwrite_disk! + CALL :free + ECHO Free space: !ERRORLEVEL! +) ELSE IF "%2" == "write" ( + IF "%3" == "" GOTO :usage + IF "%4" == "" GOTO :usage + CALL :write %3 %4 + ECHO Part file %4 successfully written to disk. +) ELSE ( + ECHO No operation - use test, free, prepare or write. + ECHO THIS MIGHT BE A CASE OF DEBUGGING BACULA OR AN ERROR! +) +EXIT /b 0 + +REM ########################################################################## +REM +REM The rest of this file is a set of subroutines that return DVD disk +REM information. +REM +REM Status information about the device and the disk loaded is collected +REM only when asked for (for example dvd-freespace doesn't need to know the +REM media type, and dvd-writepart doesn't not always need to know the free +REM space). +REM +REM The following subroutines are implemented: +REM init we need that... +REM is_empty Set ERRORLEVEL to TRUE if the disk is empty, blank... +REM this needs more work, especially concerning non-RW media +REM and blank vs. no filesystem considerations. Here, we +REM should also look for other filesystems - probably we don't +REM want to silently overwrite UDF or ext2 or anything not +REM mentioned in fstab... +REM (NB: I don't think it is a problem) +REM is_RW Set ERRORLEVEL to TRUE if the disk is RW (DVD-RW or DVD+RW) +REM is_plus_RW Set ERRORLEVEL to TRUE if the disk is DVD+RW +REM is_minus_RW Set ERRORLEVEL to TRUE if the disk is DVD-RW +REM is_blank Set ERRORLEVEL to TRUE if the disk is blank +REM free Returns the available free space. +REM write Writes one part file to disk, either starting a new file +REM system on disk, or appending to it. +REM This method should also prepare a blank disk so that a +REM certain part of the disk is used to allow detection of a +REM used disk by all / more disk drives. +REM blank Blank the device +REM +REM ########################################################################## + +:init +SET device=%1 +SET disktype=none +SET diskmode=none +SET diskstatus=none +SET hardwaredevice=none +SET pid=0 +SET next_session=-1 +SET capacity=-1 + +SET freespace_collected=0 +SET mediumtype_collected=0 + +SET growcmd=%growcmd% -quiet -use-the-force-luke^^^=4gms + +SET growparams=-A "Bacula Data" -input-charset=default -iso-level 3 -pad +SET growparams=%growparams% -p "dvd-handler / growisofs" -sysid "BACULADATA" -R +GOTO :EOF + +:collect_freespace +SET next_session=0 +SET capacity=0 +FOR /f "delims== tokens=1*" %%i in ( '%growcmd% -F %device%' ) DO ( + IF "%%i" == "next_session" ( + SET next_session=%%j + ) ELSE IF "%%i" == "capacity" ( + SET capacity=%%j + ) ELSE IF "%%j" == "" ( + SET result=!result! %%i + ) ELSE ( + SET RESULT=!result! %%i=%%j + ) +) +SET status=%ERRORLEVEL% +IF %STATUS% NEQ 0 ( + SET /a STATUS=STATUS ^& 0x7F + IF !STATUS! EQU 112 ( + REM Kludge to force dvd-handler to return a free space of 0 + next_session = 1 + capacity = 1 + freespace_collected = 1 + GOTO :EOF + ) ELSE ( + ECHO growisofs returned with an error !STATUS!. Please check you are using a patched version of dvd+rw-tools. + EXIT !STATUS! + ) +) + +IF %next_session% EQU 0 IF %capacity% EQU 0 ( + ECHO Cannot get next_session and capacity from growisofs. + ECHO Returned: %result:|=^|% + EXIT 1 +) +SET freespace_collected=1 +GOTO :EOF + +:collect_mediumtype +SET hardwaredevice= +SET disktype= +SET diskmode= +SET diskstatus= +SET lasterror= +FOR /f "delims=: tokens=1,2 usebackq" %%i in ( `"%dvdrwmediainfo%" %device%` ) DO ( + IF "%%i" == "INQUIRY" FOR /f "tokens=*" %%k in ( "%%j" ) DO SET hardwaredevice=%%k + IF "%%i" == " Mounted Media" FOR /f "tokens=1,2* delims=, " %%k in ( "%%j" ) DO ( + SET disktype=%%l + SET diskmode=%%m + ) + IF "%%i" == " Disc status" FOR /f "tokens=*" %%k in ( "%%j" ) DO SET diskstatus=%%k +) + +IF NOT DEFINED disktype ( + ECHO Media type not found in %dvdrwmediainfo% output + EXIT 1 +) + +IF "%disktype%" == "DVD-RW" IF NOT DEFINED diskmode ( + ECHO Media mode not found for DVD-RW in %dvdrwmediainfo% output + EXIT 1 +) + +IF NOT DEFINED diskstatus ( + ECHO Disc status not found in %dvdrwmediainfo% output + EXIT 1 +) + +SET mediumtype_collected=1 +GOTO :EOF + +:is_empty +IF %freespace_collected% EQU 0 CALL :collect_freespace +IF %next_session% EQU 0 ( EXIT /b 1 ) ELSE EXIT /b 0 + +:is_RW +IF %mediumtype_collected% EQU 0 CALL :collect_mediumtype +IF %disktype% == "DVD-RW" EXIT /b 1 +IF %disktype% == "DVD+RW" EXIT /b 1 +IF %disktype% == "DVD-RAM" EXIT /b 1 +EXIT /b 0 + +:is_plus_RW +IF %mediumtype_collected% EQU 0 CALL :collect_mediumtype +IF "%disktype%" == "DVD+RW" EXIT /b 1 +EXIT /b 0 + +:is_minus_RW +IF %mediumtype_collected% EQU 0 CALL :collect_mediumtype +IF "%disktype%" == "DVD-RW" EXIT /b 1 +EXIT /b 0 + +:is_restricted_overwrite +IF %mediumtype_collected% EQU 0 CALL :collect_mediumtype +IF "%diskmode%" == "Restricted Overwrite" EXIT /b 1 +EXIT /b 0 + +:is_blank +IF %mediumtype_collected% EQU 0 CALL :collect_mediumtype +IF "%diskstatus%" == "blank" EXIT /b 1 +EXIT /b 0 + +:free +IF %freespace_collected% EQU 0 CALL :collect_freespace +FOR /f %%i in ( 'expr64 "capacity - next_session - margin"' ) DO SET fr=%%i +expr64 /q "fr < 0" && ( EXIT /b 0 ) || EXIT /b %fr% + +REM %1 - newvol, %2 - partfile +:write +REM Blank DVD+RW when there is no data on it +CALL :is_plus_RW +SET tmpvar=%ERRORLEVEL% +CALL :is_blank +SET /a tmpvar=tmpvar + ERRORLEVEL +IF %1 EQU 1 IF %tmpvar% EQU 2 ( + ECHO DVD+RW looks brand-new, blank it to fix some DVD-writers bugs. + CALL :blank + ECHO Done, now writing the part file. +) +CALL :is_minus_RW +IF %ERRORLEVEL% NEQ 0 IF %1 NEQ 0 ( + CALL :is_restricted_overwrite + IF !ERRORLEVEL! EQU 0 ( + ECHO DVD-RW is in %diskmode% mode, reformating it to Restricted Overwrite + CALL :reformat_minus_RW + ECHO Done, now writing the part file. + ) +) +if %1 NEQ 0 ( + REM Ignore any existing iso9660 filesystem - used for truncate + if %1 EQU 2 ( + SET cmd_opts= -use-the-force-luke^^^=tty -Z + ) ELSE ( + SET cmd_opts= -Z + ) +) ELSE ( + SET cmd_opts= -M +) +ECHO Running %growcmd% %growparams% %cmd_opts% %device% %2 +%growcmd% %growparams% %cmd_opts% %device% %2 +IF %ERRORLEVEL% NEQ 0 ( + ECHO Exited with status !ERRORLEVEL! + EXIT !ERRORLEVEL! +) +GOTO :EOF + +:prepare +CALL :is_RW +IF %ERRORLEVEL% EQU 0 ( + ECHO I won't prepare a non-rewritable medium + EXIT /b 1 +) + +REM Blank DVD+RW when there is no data on it +CALL :is_plus_RW +SET result=%ERRORLEVEL% +CALL :is_blank +SET /a result=result + ERRORLEVEL +IF %result% EQU 2 ( + ECHO DVD+RW looks brand-new, blank it to fix some DVD-writers bugs. + CALL :blank + GOTO :EOF +) + +CALL :is_minus_RW +IF %ERRORLEVEL% EQU 1 ( + CALL :is_restricted_overwrite + IF !ERRORLEVEL! EQU 0 ( + ECHO DVD-RW is in %diskmode% mode, reformating it to Restricted Overwrite + CALL :reformat_minus_RW + GOTO :EOF + ) +) +CALL :blank +GOTO :EOF + +:blank +ECHO Running %growcmd% -Z %device% =/dev/zero +%growcmd% -Z %device% =/dev/zero +IF %ERRORLEVEL% NEQ 0 ( + ECHO Exited with status !ERRORLEVEL! + EXIT !ERRORLEVEL! +) +GOTO :EOF + +:reformat_minus_RW +ECHO Running %dvdrwformat% -force %device% +%dvdrwformat% -force %device% +IF %ERRORLEVEL% NEQ 0 ( + ECHO Exited with status !ERRORLEVEL! + EXIT !ERRORLEVEL! +) +GOTO :EOF + +REM class disk ends here. + +:usage +ECHO Wrong number of arguments. +ECHO. +ECHO Usage: +ECHO. +ECHO dvd-handler DVD-DRIVE test +ECHO dvd-handler DVD-DRIVE free +ECHO dvd-handler DVD-DRIVE write APPEND FILE +ECHO dvd-handler DVD-DRIVE blank +ECHO. +ECHO where DVD-DRIVE is the drive letter of the DVD burner like D: +ECHO. +ECHO Operations: +ECHO test Scan the device and report the information found. +ECHO This operation needs no further arguments. +ECHO free Scan the device and report the available space. +ECHO write Write a part file to disk. +ECHO This operation needs two additional arguments. +ECHO The first indicates to append (0), restart the +ECHO disk (1) or restart existing disk (2). The second +ECHO is the file to write. +ECHO prepare Prepare a DVD+/-RW for being used by Bacula. +ECHO Note: This is only useful if you already have some +ECHO non-Bacula data on a medium, and you want to use +ECHO it with Bacula. Don't run this on blank media, it +ECHO is useless. + +EXIT /b 1 diff --git a/src/win32/scripts/mtx-changer.cmd b/src/win32/scripts/mtx-changer.cmd new file mode 100644 index 00000000..8cc2c81d --- /dev/null +++ b/src/win32/scripts/mtx-changer.cmd @@ -0,0 +1,189 @@ +@echo off +REM +REM +REM Bacula interface to mtx autoloader +REM +REM $Id: mtx-changer.cmd 3718 2006-12-01 08:45:40Z robertnelson $ +REM +REM If you set in your Device resource +REM +REM Changer Command = "mtx-changer %c %o %S %a %d" +REM you will have the following input to this script: +REM +REM Bacula will always call with all the following arguments, even though +REM in some cases, not all are used. +REM +REM mtx-changer "changer-device" "command" "slot" "archive-device" "drive-index" +REM %1 %2 %3 %4 %5 +REM +REM for example: +REM +REM mtx-changer Changer0 load 1 Tape0 0 +REM +REM will request to load the first cartidge into drive 0, where +REM the changer device is Changer0, and the read/write device +REM is Tape0. +REM +REM If you need to an offline, refer to the drive as %4 +REM e.g. mt -f %4 offline +REM +REM Many changers need an offline after the unload. Also many +REM changers need a sleep 60 after the mtx load. +REM +REM N.B. If you change the script, take care to return either +REM the mtx exit code or a 0. If the script exits with a non-zero +REM exit code, Bacula will assume the request failed. +REM + +SET MTX="@bin_dir_cmd@\mtx.exe" +SET MT="@bin_dir_cmd@\mt.exe" +SET working_dir=@working_dir_cmd@ + +SET dbgfile="%working_dir%\mtx.log" + +REM to turn on logging, uncomment the following line +REM copy nul "%working_dir%\mtx.log" + +REM +REM check parameter count on commandline +REM +REM Check for special cases where only 2 arguments are needed, +REM all others are a minimum of 5 +REM +IF "%1" EQU "" goto :param_count_invalid +IF "%2" EQU "" goto :param_count_invalid +IF "%2" EQU "list" goto :param_count_valid +IF "%2" EQU "slots" goto :param_count_valid +IF "%3" EQU "" goto :param_count_invalid +IF "%4" EQU "" goto :param_count_invalid +IF "%5" EQU "" goto :param_count_invalid +GOTO :param_count_valid + +:param_count_invalid + echo Insufficient number of arguments given. + IF "%2" EQU "" ( + echo At least two arguments must be specified. + ) else echo Command expected 5 arguments. +:usage + ECHO. + ECHO usage: mtx-changer ctl-device command [slot archive-device drive-index] + ECHO Valid commands are: unload, load, list, loaded, and slots. + EXIT /B 1 + +:param_count_valid + +REM Setup arguments +SET ctl=%1 +SET cmd=%2 +SET slot=%3 +SET device=%4 +SET drive=%5 + +CALL :debug "Parms: %ctl% %cmd% %slot% %device% %drive%" +IF "%cmd%" EQU "unload" GOTO :cmdUnload +IF "%cmd%" EQU "load" GOTO :cmdLoad +IF "%cmd%" EQU "list" GOTO :cmdList +IF "%cmd%" EQU "loaded" GOTO :cmdLoaded +IF "%cmd%" EQU "slots" GOTO :cmdSlots +GOTO :cmdUnknown + +:cmdUnload + CALL :debug "Doing mtx -f %ctl% unload %slot% %drive%" + %MT% -f %device% eject + %MTX% -f %ctl% unload %slot% %drive% + SET rtn=%ERRORLEVEL% + GOTO :cmdExit + +:cmdLoad + CALL :debug "Doing mtx -f %ctl% load %slot% %drive%" + %MTX% -f %ctl% load %slot% %drive% + SET rtn=%ERRORLEVEL% + IF ERRORLEVEL 1 GOTO :cmdExit +REM %MT% -f %device% load +REM bsleep 5 + CALL :wait_for_drive %device% + REM Force block size to 0 for variable + %MT% -f %device% setblk 0 + GOTO :cmdExit + +:cmdList + CALL :debug "Doing mtx -f %ctl% -- to list volumes" + CALL :make_temp_file +REM Enable the following if you are using barcodes and need an inventory +REM %MTX% -f %ctl% inventory + %MTX% -f %ctl% status >%TMPFILE% + SET rtn=%ERRORLEVEL% + IF ERRORLEVEL 1 GOTO :cmdExit + FOR /F "usebackq tokens=3,6 delims==: " %%i in ( `findstr /R /C:" *Storage Element [0-9]*:.*Full" %TMPFILE%` ) do echo %%i:%%j + FOR /F "usebackq tokens=7,10" %%i in ( `findstr /R /C:"^Data Transfer Element [0-9]*:Full (Storage Element [0-9]" %TMPFILE%` ) do echo %%i:%%j + DEL /F %TMPFILE% >nul 2>&1 +REM +REM If you have a VXA PacketLoader and the above does not work, try +REM turning it off and enabling the following line. +REM %MTX% -f %ctl% status | grep " *Storage Element [0-9]*:.*Full" | sed "s/*Storage Element //" | sed "s/Full :VolumeTag=//" + + GOTO :cmdExit + +:cmdLoaded + CALL :debug "Doing mtx -f %ctl% %drive% -- to find what is loaded" + CALL :make_temp_file + %MTX% -f %ctl% status >%TMPFILE% + SET rtn=%ERRORLEVEL% + IF ERRORLEVEL 1 GOTO :cmdExit + FOR /F "usebackq tokens=7" %%i in ( `findstr /R /C:"^Data Transfer Element %drive%:Full" %TMPFILE%` ) do echo %%i + findstr /R /C:"^Data Transfer Element %drive%:Empty" %TMPFILE% >nul && echo 0 + DEL /F %TMPFILE% >nul 2>&1 + GOTO :cmdExit + +:cmdSlots + CALL :debug "Doing mtx -f %ctl% -- to get count of slots" + CALL :make_temp_file + %MTX% -f %ctl% status >%TMPFILE% + SET rtn=%ERRORLEVEL% + IF ERRORLEVEL 1 GOTO :cmdExit + FOR /F "usebackq tokens=5" %%i in ( `findstr /R /C:" *Storage Changer" %TMPFILE%` ) do echo %%i + DEL /F %TMPFILE% >nul 2>&1 + GOTO :cmdExit + +:cmdExit + EXIT /B %rtn% + +:cmdUnknown + ECHO '%cmd%' is an invalid command. + GOTO :usage + +REM +REM log whats done +REM +:debug + IF NOT EXIST %dbgfile% GOTO :EOF + FOR /F "usebackq tokens=2-4,5-7 delims=/:. " %%i in ( '%DATE% %TIME%' ) do SET TIMESTAMP=%%k%%i%%j-%%l:%%m:%%n + ECHO %TIMESTAMP% %*>> %dbgfile% + GOTO :EOF + +REM +REM Create a temporary file +REM +:make_temp_file + SET TMPFILE="%working_dir%\mtx.tmp" + IF EXIST %TMPFILE% ( + ECHO Temp file security problem on: %TMPFILE% + EXIT /B 1 + ) + GOTO :EOF + +REM +REM The purpose of this function to wait a maximum +REM time for the drive. It will return as soon as +REM the drive is ready, or after waiting a maximum +REM of 300 seconds. +REM +:wait_for_drive + FOR /L %%i IN ( 1, 1, 300 ) DO ( + %MT% -f %1 status | findstr ONLINE >NUL 2>&1 + IF %ERRORLEVEL%==0 GOTO :EOF + CALL :debug "Device %1 - not ready, retrying..." + bsleep 1 + ) + CALL :debug "Device %1 - not ready, timed out..." + GOTO :EOF diff --git a/src/win32/stored/Makefile b/src/win32/stored/Makefile new file mode 100644 index 00000000..0cb06af9 --- /dev/null +++ b/src/win32/stored/Makefile @@ -0,0 +1,182 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# +# Author: Robert Nelson +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Written by Robert Nelson, June 2006 +# + +# Configuration + +include ../Makefile.inc + +INCLUDES = \ + -I. \ + -I$(MAINDIR)/src/stored \ + $(INCLUDE_DDK) \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_BACULA) \ + $(INCLUDE_ZLIB) \ + $(INCLUDE_OPENSSL) \ + $(INCLUDE_ICONS) + +DEFINES = \ + -DUSING_DLL \ + -DWINVER=0x500 \ + $(HAVES) + +# -DHAVE_TRAY_MONITOR \ + + +vpath %.c $(MAINDIR)/src/stored $(BUILDDIR)/libwin32 +vpath %.cpp $(MAINDIR)/src/stored $(BUILDDIR)/libwin32 + +########################################################################## + +COMMON_OBJS = \ + $(OBJDIR)/askdir.o \ + $(OBJDIR)/tape_alert.o \ + $(OBJDIR)/tape_worm.o \ + $(OBJDIR)/vtape_dev.o \ + $(OBJDIR)/tape_dev.o \ + $(OBJDIR)/init_dev.o \ + $(OBJDIR)/global.o \ + $(OBJDIR)/acquire.o \ + $(OBJDIR)/aligned_dev.o \ + $(OBJDIR)/aligned_read.o \ + $(OBJDIR)/aligned_write.o \ + $(OBJDIR)/ansi_label.o \ + $(OBJDIR)/authenticate.o \ + $(OBJDIR)/autochanger.o \ + $(OBJDIR)/block.o \ + $(OBJDIR)/block_util.o \ + $(OBJDIR)/butil.o \ + $(OBJDIR)/dev.o \ + $(OBJDIR)/device.o \ + $(OBJDIR)/ebcdic.o \ + $(OBJDIR)/hello.o \ + $(OBJDIR)/label.o \ + $(OBJDIR)/lock.o \ + $(OBJDIR)/match_bsr.o \ + $(OBJDIR)/mount.o \ + $(OBJDIR)/parse_bsr.o \ + $(OBJDIR)/read_records.o \ + $(OBJDIR)/record_read.o \ + $(OBJDIR)/record_util.o \ + $(OBJDIR)/record_write.o \ + $(OBJDIR)/reserve.o \ + $(OBJDIR)/scan.o \ + $(OBJDIR)/spool.o \ + $(OBJDIR)/stored_conf.o \ + $(OBJDIR)/vol_mgr.o \ + $(OBJDIR)/wait.o \ + $(OBJDIR)/file_dev.o \ + $(OBJDIR)/sd_plugins.o \ + $(OBJDIR)/os.o + + + +# $(OBJDIR)/mtops.o + +# bacula-sd +SD_OBJS = \ + $(OBJDIR)/stored.o \ + $(OBJDIR)/append.o \ + $(OBJDIR)/dircmd.o \ + $(OBJDIR)/fd_cmds.o \ + $(OBJDIR)/job.o \ + $(OBJDIR)/read.o \ + $(OBJDIR)/vbackup.o \ + $(OBJDIR)/status.o + +# bextract +BEXTRACT_OBJS = \ + $(OBJDIR)/bextract.o + +OBJS_WIN = \ + $(OBJDIR)/service.o \ + $(OBJDIR)/main.o \ + $(OBJDIR)/bacula.res + +# $(OBJDIR)/trayMonitor.o \ +# $(OBJDIR)/aboutDialog.o \ +# $(OBJDIR)/statusDialog.o \ + + +ALL_OBJS = \ + $(COMMON_OBJS) \ + $(SD_OBJS) \ + $(BEXTRACT_OBJS) \ + $(OBJS_WIN) \ + $(OBJDIR)/bcopy.o \ + $(OBJDIR)/bls.o \ + $(OBJDIR)/btape.o \ + $(OBJDIR)/bscan.o + +LIBS_STORED = \ + $(LIBS_PTHREADS) \ + $(LIBS_NETWORK) \ + -lole32 \ + -loleaut32 \ + -luuid \ + -lcomctl32 + +###################################################################### + +# Targets + +.PHONY: all clean + +all: $(BINDIR)/bacula-sd.exe $(BINDIR)/bcopy.exe $(BINDIR)/bextract.exe $(BINDIR)/bls.exe +# $(BINDIR)/bscan.exe $(BINDIR)/btape.exe + +clean: + @echo "Cleaning `pwd`" + $(call clean_obj,$(ALL_OBJS)) + $(call clean_exe,$(BINDIR)/bacula-sd.exe) + $(call clean_exe,$(BINDIR)/bcopy.exe) + $(call clean_exe,$(BINDIR)/bextract.exe) + $(call clean_exe,$(BINDIR)/bls.exe) + $(call clean_exe,$(BINDIR)/bscan.exe) + $(call clean_exe,$(BINDIR)/btape.exe) + $(ECHO_CMD)rm -rf $(OBJDIRS) + +realclean: clean + rm -f tags bacula-sd.conf + +# +# Rules +# + +$(BINDIR)/bacula-sd.exe: $(SD_OBJS) $(COMMON_OBJS) $(OBJS_WIN) $(LIBS_BACULA) + $(call link_winapp,$(LIBS_STORED)) + +$(BINDIR)/btape.exe: $(OBJDIR)/btape.o $(COMMON_OBJS) $(LIBS_BACULA) + $(call link_conapp,$(LIBS_STORED)) + +$(BINDIR)/bls.exe: $(OBJDIR)/bls.o $(COMMON_OBJS) $(LIBS_BACULA) + $(call link_conapp,$(LIBS_STORED)) + +$(BINDIR)/bextract.exe: $(BEXTRACT_OBJS) $(COMMON_OBJS) $(LIBS_BACULA) $(LIBS_LZO) + $(call link_conapp,$(LIBS_STORED) $(LIBS_ZLIB)) + +$(BINDIR)/bscan.exe: $(OBJDIR)/bscan.o $(COMMON_OBJS) $(LIBS_CATS) $(LIBS_BACULA) + $(call link_conapp,$(LIBS_STORED)) + +$(BINDIR)/bcopy.exe: $(OBJDIR)/bcopy.o $(COMMON_OBJS) $(LIBS_BACULA) + $(call link_conapp,$(LIBS_STORED)) + +$(OBJDIR)/mtops.o: mtops.cpp + @echo "Compiling $@" + $(call checkdir,$@) + $(ECHO_CMD)$(CXX) $(CFLAGS) $(INCLUDE_DDK) -I../../stored -c $< -o $@ + +$(OBJDIR)/winres.res: baculasd/winres.rc + @echo "Compiling $@" + $(call checkdir,$@) + $(ECHO_CMD)$(WINDRES) $(INCLUDE_ICONS) -I baculasd -O coff $< -o $@ + +include ../Makefile.rules diff --git a/src/win32/stored/bacula.rc b/src/win32/stored/bacula.rc new file mode 100644 index 00000000..92b889cb --- /dev/null +++ b/src/win32/stored/bacula.rc @@ -0,0 +1 @@ +#include "../libwin32/bacula.rc" diff --git a/src/win32/stored/baculasd.vcproj b/src/win32/stored/baculasd.vcproj new file mode 100644 index 00000000..a9de8f2e --- /dev/null +++ b/src/win32/stored/baculasd.vcproj @@ -0,0 +1,396 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/stored/bcopy/bcopy.vcproj b/src/win32/stored/bcopy/bcopy.vcproj new file mode 100644 index 00000000..e721e2f4 --- /dev/null +++ b/src/win32/stored/bcopy/bcopy.vcproj @@ -0,0 +1,230 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/stored/bextract/bextract.vcproj b/src/win32/stored/bextract/bextract.vcproj new file mode 100644 index 00000000..8a173434 --- /dev/null +++ b/src/win32/stored/bextract/bextract.vcproj @@ -0,0 +1,229 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/stored/bls/bls.vcproj b/src/win32/stored/bls/bls.vcproj new file mode 100644 index 00000000..1a07d418 --- /dev/null +++ b/src/win32/stored/bls/bls.vcproj @@ -0,0 +1,229 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/stored/bscan/bscan.vcproj b/src/win32/stored/bscan/bscan.vcproj new file mode 100644 index 00000000..15caf4bf --- /dev/null +++ b/src/win32/stored/bscan/bscan.vcproj @@ -0,0 +1,229 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/stored/btape/btape.vcproj b/src/win32/stored/btape/btape.vcproj new file mode 100644 index 00000000..14ed030c --- /dev/null +++ b/src/win32/stored/btape/btape.vcproj @@ -0,0 +1,228 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/stored/main.cpp b/src/win32/stored/main.cpp new file mode 100644 index 00000000..ae1dd2cd --- /dev/null +++ b/src/win32/stored/main.cpp @@ -0,0 +1,24 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#include "who.h" +#include "../libwin32/main.cpp" diff --git a/src/win32/stored/mtops.cpp b/src/win32/stored/mtops.cpp new file mode 100644 index 00000000..2eeeafa6 --- /dev/null +++ b/src/win32/stored/mtops.cpp @@ -0,0 +1,1175 @@ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2006-2008 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation and included + in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of Kern Sibbald. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ +/* + * mtops.cpp - Emulate the Linux st (scsi tape) driver on Microsoft Windows. + * + * Author: Robert Nelson, May, 2006 + * + * Version $Id$ + * + * This file was contributed to the Bacula project by Robert Nelson. + * + * Robert Nelson has been granted a perpetual, worldwide, + * non-exclusive, no-charge, royalty-free, irrevocable copyright + * license to reproduce, prepare derivative works of, publicly + * display, publicly perform, sublicense, and distribute the original + * work contributed by Robert Nelson to the Bacula project in source + * or object form. + * + * If you wish to license contributions from Robert Nelson + * under an alternate open source license please contact + * Robert Nelson . + */ + +#include +#include + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ + +#include "sys/mtio.h" +#if defined(_MSC_VER) +#include +#else +#include +#endif +#include + +// +// SCSI bus status codes. +// + +#define SCSISTAT_GOOD 0x00 +#define SCSISTAT_CHECK_CONDITION 0x02 +#define SCSISTAT_CONDITION_MET 0x04 +#define SCSISTAT_BUSY 0x08 +#define SCSISTAT_INTERMEDIATE 0x10 +#define SCSISTAT_INTERMEDIATE_COND_MET 0x14 +#define SCSISTAT_RESERVATION_CONFLICT 0x18 +#define SCSISTAT_COMMAND_TERMINATED 0x22 +#define SCSISTAT_QUEUE_FULL 0x28 + +inline SHORT Read16BitSigned(const unsigned char *pValue) +{ + return (SHORT)(((USHORT)pValue[0] << 8) | (USHORT)pValue[1]); +} + +inline USHORT Read16BitUnsigned(const unsigned char *pValue) +{ + return (((USHORT)pValue[0] << 8) | (USHORT)pValue[1]); +} + +inline LONG Read24BitSigned(const unsigned char *pValue) +{ + return ((LONG)(((ULONG)pValue[0] << 16) | ((ULONG)pValue[1] << 8) | + (ULONG)pValue[2])) << 8 >> 8; +} + +inline ULONG Read24BitUnsigned(const unsigned char *pValue) +{ + return ((ULONG)pValue[0] << 16) | ((ULONG)pValue[1] << 8) | (ULONG)pValue[2]; +} + +inline LONG Read32BitSigned(const unsigned char *pValue) +{ + return (LONG)(((ULONG)pValue[0] << 24) | ((ULONG)pValue[1] << 16) | + ((ULONG)pValue[2] << 8) | (ULONG)pValue[3]); +} + +inline ULONG Read32BitUnsigned(const unsigned char *pValue) +{ + return (((ULONG)pValue[0] << 24) | ((ULONG)pValue[1] << 16) | + ((ULONG)pValue[2] << 8) | (ULONG)pValue[3]); +} + +inline LONGLONG Read64BitSigned(const unsigned char *pValue) +{ + return (LONGLONG)(((ULONGLONG)pValue[0] << 56) | ((ULONGLONG)pValue[1] << 48) | + ((ULONGLONG)pValue[2] << 40) | ((ULONGLONG)pValue[3] << 32) | + ((ULONGLONG)pValue[4] << 24) | ((ULONGLONG)pValue[5] << 16) | + ((ULONGLONG)pValue[6] << 8) | (ULONGLONG)pValue[7]); +} + +inline ULONGLONG Read64BitUnsigned(const unsigned char *pValue) +{ + return (LONGLONG)(((ULONGLONG)pValue[0] << 56) | ((ULONGLONG)pValue[1] << 48) | + ((ULONGLONG)pValue[2] << 40) | ((ULONGLONG)pValue[3] << 32) | + ((ULONGLONG)pValue[4] << 24) | ((ULONGLONG)pValue[5] << 16) | + ((ULONGLONG)pValue[6] << 8) | (ULONGLONG)pValue[7]); +} + +typedef struct _TAPE_POSITION_INFO +{ + UCHAR AtPartitionStart:1; + UCHAR AtPartitionEnd:1; + UCHAR PartitionBlockValid:1; + UCHAR FileSetValid:1; + UCHAR :4; + UCHAR Reserved1[3]; + ULONG Partition; + ULONGLONG BlockNumber; + ULONGLONG FileNumber; + ULONGLONG SetNumber; +} TAPE_POSITION_INFO, *PTAPE_POSITION_INFO; + +typedef struct _TAPE_HANDLE_INFO +{ + HANDLE OSHandle; + bool bEOD; + bool bEOF; + bool bEOT; + bool bBlockValid; + ULONG FeaturesLow; + ULONG FeaturesHigh; + ULONG ulFile; + ULONGLONG ullFileStart; + +} TAPE_HANDLE_INFO, *PTAPE_HANDLE_INFO; + +TAPE_HANDLE_INFO TapeHandleTable[] = +{ + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE } +}; + +#define NUMBER_HANDLE_ENTRIES (sizeof(TapeHandleTable) / sizeof(TapeHandleTable[0])) + +static DWORD GetTapePositionInfo(HANDLE hDevice, PTAPE_POSITION_INFO TapePositionInfo); +static DWORD GetDensityBlockSize(HANDLE hDevice, DWORD *pdwDensity, DWORD *pdwBlockSize); + +static int tape_get(int fd, struct mtget *mt_get); +static int tape_op(int fd, struct mtop *mt_com); +static int tape_pos(int fd, struct mtpos *mt_pos); + +int +win32_tape_open(const char *file, int flags, ...) +{ + HANDLE hDevice = INVALID_HANDLE_VALUE; + char szDeviceName[256] = "\\\\.\\"; + int idxFile; + DWORD dwResult; + + for (idxFile = 0; idxFile < (int)NUMBER_HANDLE_ENTRIES; idxFile++) { + if (TapeHandleTable[idxFile].OSHandle == INVALID_HANDLE_VALUE) { + break; + } + } + + if (idxFile >= (int)NUMBER_HANDLE_ENTRIES) { + return EMFILE; + } + + memset(&TapeHandleTable[idxFile], 0, sizeof(TapeHandleTable[idxFile])); + + if (!IsPathSeparator(file[0])) { + bstrncpy(&szDeviceName[4], file, sizeof(szDeviceName) - 4); + } else { + bstrncpy(&szDeviceName[0], file, sizeof(szDeviceName)); + } + + hDevice = CreateFile(szDeviceName, FILE_ALL_ACCESS, 0, NULL, OPEN_EXISTING, 0, NULL); + + if (hDevice != INVALID_HANDLE_VALUE) { + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[idxFile]; + + memset(pHandleInfo, 0, sizeof(*pHandleInfo)); + + pHandleInfo->OSHandle = hDevice; + + TAPE_GET_DRIVE_PARAMETERS TapeDriveParameters; + DWORD dwSize = sizeof(TapeDriveParameters); + + dwResult = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_DRIVE_INFORMATION, &dwSize, &TapeDriveParameters); + if (dwResult == NO_ERROR) { + pHandleInfo->FeaturesLow = TapeDriveParameters.FeaturesLow; + pHandleInfo->FeaturesHigh = TapeDriveParameters.FeaturesHigh; + } + + TAPE_POSITION_INFO TapePositionInfo; + + dwResult = GetTapePositionInfo(pHandleInfo->OSHandle, &TapePositionInfo); + + if (dwResult == NO_ERROR) { + if (TapePositionInfo.AtPartitionStart || TapePositionInfo.AtPartitionEnd || + (TapePositionInfo.PartitionBlockValid && TapePositionInfo.BlockNumber == 0)) { + pHandleInfo->ulFile = 0; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } else if (TapePositionInfo.FileSetValid) { + pHandleInfo->ulFile = (ULONG)TapePositionInfo.FileNumber; + } + } + } else { + DWORD dwError = GetLastError(); + + switch (dwError) { + case ERROR_FILE_NOT_FOUND: + case ERROR_PATH_NOT_FOUND: + errno = ENOENT; + break; + + case ERROR_TOO_MANY_OPEN_FILES: + errno = EMFILE; + break; + + default: + case ERROR_ACCESS_DENIED: + case ERROR_SHARING_VIOLATION: + case ERROR_LOCK_VIOLATION: + case ERROR_INVALID_NAME: + errno = EACCES; + break; + + case ERROR_FILE_EXISTS: + errno = EEXIST; + break; + + case ERROR_INVALID_PARAMETER: + errno = EINVAL; + break; + } + + return(int) -1; + } + + return (int)idxFile + 3; +} + +ssize_t +win32_read(int fd, void *buffer, size_t count) +{ + return read(fd, buffer, count); +} + +ssize_t +win32_write(int fd, const void *buffer, size_t count) +{ + return write(fd, buffer, count); +} + +int +win32_ioctl(int d, unsigned long int req, ...) +{ + return -1; +} + +ssize_t +win32_tape_read(int fd, void *buffer, size_t count) +{ + if (buffer == NULL) { + errno = EINVAL; + return -1; + } + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) + { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + DWORD bytes_read; + BOOL bResult; + + bResult = ReadFile(pHandleInfo->OSHandle, buffer, count, &bytes_read, NULL); + + if (bResult) { + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + return bytes_read; + } else { + int iReturnValue = 0; + DWORD last_error = GetLastError(); + + switch (last_error) { + + case ERROR_FILEMARK_DETECTED: + pHandleInfo->bEOF = true; + break; + + case ERROR_END_OF_MEDIA: + pHandleInfo->bEOT = true; + break; + + case ERROR_NO_MEDIA_IN_DRIVE: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = ENOMEDIUM; + iReturnValue = -1; + break; + + case ERROR_NO_DATA_DETECTED: + pHandleInfo->bEOD = true; + break; + + case ERROR_INVALID_HANDLE: + case ERROR_ACCESS_DENIED: + case ERROR_LOCK_VIOLATION: + errno = EBADF; + iReturnValue = -1; + break; + + default: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = EIO; + iReturnValue = -1; + } + + return iReturnValue; + } +} + +ssize_t +win32_tape_write(int fd, const void *buffer, size_t count) +{ + if (buffer == NULL) { + errno = EINVAL; + return -1; + } + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) + { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + DWORD bytes_written; + BOOL bResult; + + bResult = WriteFile(pHandleInfo->OSHandle, buffer, count, &bytes_written, NULL); + + if (bResult) { + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + return bytes_written; + } else { + DWORD last_error = GetLastError(); + + switch (last_error) { + case ERROR_END_OF_MEDIA: + case ERROR_DISK_FULL: + pHandleInfo->bEOT = true; + errno = ENOSPC; + break; + + case ERROR_NO_MEDIA_IN_DRIVE: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = ENOMEDIUM; + break; + + case ERROR_INVALID_HANDLE: + case ERROR_ACCESS_DENIED: + errno = EBADF; + break; + + default: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = EIO; + break; + } + return -1; + } +} + +int +win32_tape_close(int fd) +{ + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + if (!CloseHandle(pHandleInfo->OSHandle)) { + pHandleInfo->OSHandle = INVALID_HANDLE_VALUE; + errno = EBADF; + return -1; + } + + pHandleInfo->OSHandle = INVALID_HANDLE_VALUE; + + return 0; +} + +int +win32_tape_ioctl(int fd, unsigned long int request, ...) +{ + va_list argp; + int result; + + va_start(argp, request); + + switch (request) { + case MTIOCTOP: + result = tape_op(fd, va_arg(argp, mtop *)); + break; + + case MTIOCGET: + result = tape_get(fd, va_arg(argp, mtget *)); + break; + + case MTIOCPOS: + result = tape_pos(fd, va_arg(argp, mtpos *)); + break; + + default: + errno = ENOTTY; + result = -1; + break; + } + + va_end(argp); + + return result; +} + +static int tape_op(int fd, struct mtop *mt_com) +{ + DWORD result = NO_ERROR; + int index; + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) + { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + switch (mt_com->mt_op) + { + case MTRESET: + case MTNOP: + case MTSETDRVBUFFER: + break; + + default: + case MTRAS1: + case MTRAS2: + case MTRAS3: + case MTSETDENSITY: + errno = ENOTTY; + result = (DWORD)-1; + break; + + case MTFSF: + for (index = 0; index < mt_com->mt_count; index++) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, 1, 0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->ulFile++; + pHandleInfo->bEOF = true; + pHandleInfo->bEOT = false; + } + } + break; + + case MTBSF: + for (index = 0; index < mt_com->mt_count; index++) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, (DWORD)-1, ~0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->ulFile--; + pHandleInfo->bBlockValid = false; + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } + } + break; + + case MTFSR: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_RELATIVE_BLOCKS, 0, mt_com->mt_count, 0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } else if (result == ERROR_FILEMARK_DETECTED) { + pHandleInfo->bEOF = true; + } + break; + + case MTBSR: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_RELATIVE_BLOCKS, 0, -mt_com->mt_count, ~0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } else if (result == ERROR_FILEMARK_DETECTED) { + pHandleInfo->ulFile--; + pHandleInfo->bBlockValid = false; + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } + break; + + case MTWEOF: + result = WriteTapemark(pHandleInfo->OSHandle, TAPE_FILEMARKS, mt_com->mt_count, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOF = true; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile += mt_com->mt_count; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTREW: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_REWIND, 0, 0, 0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile = 0; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTOFFL: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_UNLOAD, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile = 0; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTRETEN: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_TENSION, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile = 0; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTBSFM: + for (index = 0; index < mt_com->mt_count; index++) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, (DWORD)-1, ~0, FALSE); + if (result == NO_ERROR) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, 1, 0, FALSE); + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } + } + break; + + case MTFSFM: + for (index = 0; index < mt_com->mt_count; index++) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, mt_com->mt_count, 0, FALSE); + if (result == NO_ERROR) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, (DWORD)-1, ~0, FALSE); + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } + } + break; + + case MTEOM: + for ( ; ; ) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, 1, 0, FALSE); + if (result != NO_ERROR) { + pHandleInfo->bEOF = false; + + if (result == ERROR_END_OF_MEDIA) { + pHandleInfo->bEOD = true; + pHandleInfo->bEOT = true; + return 0; + } + if (result == ERROR_NO_DATA_DETECTED) { + pHandleInfo->bEOD = true; + pHandleInfo->bEOT = false; + return 0; + } + break; + } else { + pHandleInfo->bEOF = true; + pHandleInfo->ulFile++; + } + } + break; + + case MTERASE: + result = EraseTape(pHandleInfo->OSHandle, TAPE_ERASE_LONG, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = true; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile = 0; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTSETBLK: + { + TAPE_SET_MEDIA_PARAMETERS SetMediaParameters; + + SetMediaParameters.BlockSize = mt_com->mt_count; + result = SetTapeParameters(pHandleInfo->OSHandle, SET_TAPE_MEDIA_INFORMATION, &SetMediaParameters); + } + break; + + case MTSEEK: + { + TAPE_POSITION_INFO TapePositionInfo; + + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_ABSOLUTE_BLOCK, 0, mt_com->mt_count, 0, FALSE); + + memset(&TapePositionInfo, 0, sizeof(TapePositionInfo)); + DWORD dwPosResult = GetTapePositionInfo(pHandleInfo->OSHandle, &TapePositionInfo); + if (dwPosResult == NO_ERROR && TapePositionInfo.FileSetValid) { + pHandleInfo->ulFile = (ULONG)TapePositionInfo.FileNumber; + } else { + pHandleInfo->ulFile = ~0U; + } + } + break; + + case MTTELL: + { + DWORD partition; + DWORD offset; + DWORD offsetHi; + + result = GetTapePosition(pHandleInfo->OSHandle, TAPE_ABSOLUTE_BLOCK, &partition, &offset, &offsetHi); + if (result == NO_ERROR) { + return offset; + } + } + break; + + case MTFSS: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_SETMARKS, 0, mt_com->mt_count, 0, FALSE); + break; + + case MTBSS: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_SETMARKS, 0, -mt_com->mt_count, ~0, FALSE); + break; + + case MTWSM: + result = WriteTapemark(pHandleInfo->OSHandle, TAPE_SETMARKS, mt_com->mt_count, FALSE); + break; + + case MTLOCK: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_LOCK, FALSE); + break; + + case MTUNLOCK: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_UNLOCK, FALSE); + break; + + case MTLOAD: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_LOAD, FALSE); + break; + + case MTUNLOAD: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_UNLOAD, FALSE); + break; + + case MTCOMPRESSION: + { + TAPE_GET_DRIVE_PARAMETERS GetDriveParameters; + TAPE_SET_DRIVE_PARAMETERS SetDriveParameters; + DWORD size; + + size = sizeof(GetDriveParameters); + + result = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_DRIVE_INFORMATION, &size, &GetDriveParameters); + + if (result == NO_ERROR) + { + SetDriveParameters.ECC = GetDriveParameters.ECC; + SetDriveParameters.Compression = (BOOLEAN)mt_com->mt_count; + SetDriveParameters.DataPadding = GetDriveParameters.DataPadding; + SetDriveParameters.ReportSetmarks = GetDriveParameters.ReportSetmarks; + SetDriveParameters.EOTWarningZoneSize = GetDriveParameters.EOTWarningZoneSize; + + result = SetTapeParameters(pHandleInfo->OSHandle, SET_TAPE_DRIVE_INFORMATION, &SetDriveParameters); + } + } + break; + + case MTSETPART: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_LOGICAL_BLOCK, mt_com->mt_count, 0, 0, FALSE); + break; + + case MTMKPART: + if (mt_com->mt_count == 0) + { + result = CreateTapePartition(pHandleInfo->OSHandle, TAPE_INITIATOR_PARTITIONS, 1, 0); + } + else + { + result = CreateTapePartition(pHandleInfo->OSHandle, TAPE_INITIATOR_PARTITIONS, 2, mt_com->mt_count); + } + break; + } + + if ((result == NO_ERROR && pHandleInfo->bEOF) || + (result == ERROR_FILEMARK_DETECTED && mt_com->mt_op == MTFSR)) { + + TAPE_POSITION_INFO TapePositionInfo; + + if (GetTapePositionInfo(pHandleInfo->OSHandle, &TapePositionInfo) == NO_ERROR) { + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = TapePositionInfo.BlockNumber; + } + } + + switch (result) { + case NO_ERROR: + case (DWORD)-1: /* Error has already been translated into errno */ + break; + + default: + case ERROR_FILEMARK_DETECTED: + errno = EIO; + break; + + case ERROR_END_OF_MEDIA: + pHandleInfo->bEOT = true; + errno = EIO; + break; + + case ERROR_NO_DATA_DETECTED: + pHandleInfo->bEOD = true; + errno = EIO; + break; + + case ERROR_NO_MEDIA_IN_DRIVE: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = ENOMEDIUM; + break; + + case ERROR_INVALID_HANDLE: + case ERROR_ACCESS_DENIED: + case ERROR_LOCK_VIOLATION: + errno = EBADF; + break; + } + + return result == NO_ERROR ? 0 : -1; +} + +static int tape_get(int fd, struct mtget *mt_get) +{ + TAPE_POSITION_INFO pos_info; + BOOL result; + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + if (GetTapePositionInfo(pHandleInfo->OSHandle, &pos_info) != NO_ERROR) { + return -1; + } + + DWORD density = 0; + DWORD blocksize = 0; + + result = GetDensityBlockSize(pHandleInfo->OSHandle, &density, &blocksize); + + if (result != NO_ERROR) { + TAPE_GET_DRIVE_PARAMETERS drive_params; + DWORD size; + + size = sizeof(drive_params); + + result = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_DRIVE_INFORMATION, &size, &drive_params); + + if (result == NO_ERROR) { + blocksize = drive_params.DefaultBlockSize; + } + } + + mt_get->mt_type = MT_ISSCSI2; + + // Partition # + mt_get->mt_resid = pos_info.PartitionBlockValid ? pos_info.Partition : (ULONG)-1; + + // Density / Block Size + mt_get->mt_dsreg = ((density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK) | + ((blocksize << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK); + + mt_get->mt_gstat = 0x00010000; /* Immediate report mode.*/ + + if (pHandleInfo->bEOF) { + mt_get->mt_gstat |= 0x80000000; // GMT_EOF + } + + if (pos_info.PartitionBlockValid && pos_info.BlockNumber == 0) { + mt_get->mt_gstat |= 0x40000000; // GMT_BOT + } + + if (pHandleInfo->bEOT) { + mt_get->mt_gstat |= 0x20000000; // GMT_EOT + } + + if (pHandleInfo->bEOD) { + mt_get->mt_gstat |= 0x08000000; // GMT_EOD + } + + TAPE_GET_MEDIA_PARAMETERS media_params; + DWORD size = sizeof(media_params); + + result = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_MEDIA_INFORMATION, &size, &media_params); + + if (result == NO_ERROR && media_params.WriteProtected) { + mt_get->mt_gstat |= 0x04000000; // GMT_WR_PROT + } + + result = GetTapeStatus(pHandleInfo->OSHandle); + + if (result != NO_ERROR) { + if (result == ERROR_NO_MEDIA_IN_DRIVE) { + mt_get->mt_gstat |= 0x00040000; // GMT_DR_OPEN + } + } else { + mt_get->mt_gstat |= 0x01000000; // GMT_ONLINE + } + + // Recovered Error Count + mt_get->mt_erreg = 0; + + // File Number + mt_get->mt_fileno = (__daddr_t)pHandleInfo->ulFile; + + // Block Number + mt_get->mt_blkno = (__daddr_t)(pHandleInfo->bBlockValid ? pos_info.BlockNumber - pHandleInfo->ullFileStart : (ULONGLONG)-1); + + return 0; +} + +#define SERVICEACTION_SHORT_FORM_BLOCKID 0 +#define SERVICEACTION_SHORT_FORM_VENDOR_SPECIFIC 1 +#define SERVICEACTION_LONG_FORM 6 +#define SERVICEACTION_EXTENDED_FORM 8 + + +typedef struct _SCSI_READ_POSITION_SHORT_BUFFER +{ + UCHAR :1; + UCHAR PERR:1; + UCHAR BPU:1; + UCHAR :1; + UCHAR BYCU:1; + UCHAR BCU:1; + UCHAR EOP:1; + UCHAR BOP:1; + UCHAR Partition; + UCHAR Reserved1[2]; + UCHAR FirstBlock[4]; + UCHAR LastBlock[4]; + UCHAR Reserved2; + UCHAR NumberBufferBlocks[3]; + UCHAR NumberBufferBytes[4]; +} SCSI_READ_POSITION_SHORT_BUFFER, *PSCSI_READ_POSITION_SHORT_BUFFER; + +typedef struct _SCSI_READ_POSITION_LONG_BUFFER +{ + UCHAR :2; + UCHAR BPU:1; + UCHAR MPU:1; + UCHAR :2; + UCHAR EOP:1; + UCHAR BOP:1; + UCHAR Reserved3[3]; + UCHAR Partition[4]; + UCHAR BlockNumber[8]; + UCHAR FileNumber[8]; + UCHAR SetNumber[8]; +} SCSI_READ_POSITION_LONG_BUFFER, *PSCSI_READ_POSITION_LONG_BUFFER; + +typedef struct _SCSI_READ_POSITION_EXTENDED_BUFFER +{ + UCHAR :1; + UCHAR PERR:1; + UCHAR LOPU:1; + UCHAR :1; + UCHAR BYCU:1; + UCHAR LOCU:1; + UCHAR EOP:1; + UCHAR BOP:1; + UCHAR Partition; + UCHAR AdditionalLength[2]; + UCHAR Reserved1; + UCHAR NumberBufferObjects[3]; + UCHAR FirstLogicalObject[8]; + UCHAR LastLogicalObject[8]; + UCHAR NumberBufferObjectBytes[8]; +} SCSI_READ_POSITION_EXTENDED_BUFFER, *PSCSI_READ_POSITION_EXTENDED_BUFFER; + +typedef union _READ_POSITION_RESULT { + SCSI_READ_POSITION_SHORT_BUFFER ShortBuffer; + SCSI_READ_POSITION_LONG_BUFFER LongBuffer; + SCSI_READ_POSITION_EXTENDED_BUFFER ExtendedBuffer; +} READ_POSITION_RESULT, *PREAD_POSITION_RESULT; + +static DWORD GetTapePositionInfo(HANDLE hDevice, PTAPE_POSITION_INFO TapePositionInfo) +{ + PSCSI_PASS_THROUGH ScsiPassThrough; + BOOL bResult; + DWORD dwBytesReturned; + + const DWORD dwBufferSize = sizeof(SCSI_PASS_THROUGH) + sizeof(READ_POSITION_RESULT) + 28; + + memset(TapePositionInfo, 0, sizeof(*TapePositionInfo)); + + ScsiPassThrough = (PSCSI_PASS_THROUGH)malloc(dwBufferSize); + + for (int pass = 0; pass < 2; pass++) + { + memset(ScsiPassThrough, 0, dwBufferSize); + + ScsiPassThrough->Length = sizeof(SCSI_PASS_THROUGH); + + ScsiPassThrough->CdbLength = 10; + ScsiPassThrough->SenseInfoLength = 28; + ScsiPassThrough->DataIn = 1; + ScsiPassThrough->DataTransferLength = sizeof(SCSI_READ_POSITION_LONG_BUFFER); + ScsiPassThrough->TimeOutValue = 1000; + ScsiPassThrough->DataBufferOffset = sizeof(SCSI_PASS_THROUGH) + 28; + ScsiPassThrough->SenseInfoOffset = sizeof(SCSI_PASS_THROUGH); + + ScsiPassThrough->Cdb[0] = 0x34; // READ POSITION + + switch (pass) + { + case 0: + ScsiPassThrough->Cdb[1] = SERVICEACTION_LONG_FORM; + break; + + case 1: + ScsiPassThrough->Cdb[1] = SERVICEACTION_SHORT_FORM_BLOCKID; + break; + } + + bResult = DeviceIoControl( hDevice, + IOCTL_SCSI_PASS_THROUGH, + ScsiPassThrough, sizeof(SCSI_PASS_THROUGH), + ScsiPassThrough, dwBufferSize, + &dwBytesReturned, + NULL); + + if (bResult && dwBytesReturned >= (offsetof(SCSI_PASS_THROUGH, ScsiStatus) + sizeof(ScsiPassThrough->ScsiStatus))) { + if (ScsiPassThrough->ScsiStatus == SCSISTAT_GOOD) { + PREAD_POSITION_RESULT pPosResult = (PREAD_POSITION_RESULT)((PUCHAR)ScsiPassThrough + ScsiPassThrough->DataBufferOffset); + + switch (pass) + { + case 0: // SERVICEACTION_LONG_FORM + { + TapePositionInfo->AtPartitionStart = pPosResult->LongBuffer.BOP; + TapePositionInfo->AtPartitionEnd = pPosResult->LongBuffer.EOP; + + if (!TapePositionInfo->PartitionBlockValid) { + TapePositionInfo->PartitionBlockValid = !pPosResult->LongBuffer.BPU; + + if (TapePositionInfo->PartitionBlockValid) { + TapePositionInfo->Partition = Read32BitUnsigned(pPosResult->LongBuffer.Partition); + TapePositionInfo->BlockNumber = Read64BitUnsigned(pPosResult->LongBuffer.BlockNumber); + } + } + + TapePositionInfo->FileSetValid = !pPosResult->LongBuffer.MPU; + if (TapePositionInfo->FileSetValid) { + TapePositionInfo->FileNumber = Read64BitUnsigned(pPosResult->LongBuffer.FileNumber); + TapePositionInfo->SetNumber = Read64BitUnsigned(pPosResult->LongBuffer.SetNumber); + } + } + break; + + case 1: // SERVICEACTION_SHORT_FORM_BLOCKID + { + // pPosResult->ShortBuffer.PERR; + // pPosResult->ShortBuffer.BYCU; + // pPosResult->ShortBuffer.BCU; + TapePositionInfo->AtPartitionStart = pPosResult->ShortBuffer.BOP; + TapePositionInfo->AtPartitionEnd = pPosResult->ShortBuffer.EOP; + + if (!TapePositionInfo->PartitionBlockValid) { + TapePositionInfo->PartitionBlockValid = !pPosResult->ShortBuffer.BPU; + + if (TapePositionInfo->PartitionBlockValid) { + TapePositionInfo->Partition = pPosResult->ShortBuffer.Partition; + TapePositionInfo->BlockNumber = Read32BitUnsigned(pPosResult->ShortBuffer.FirstBlock); + } + } + // Read32BitsUnsigned(pPosResult->ShortBuffer.LastBlock); + // Read24BitsUnsigned(pPosResult->ShortBuffer.NumberBufferBlocks); + // Read32BitsUnsigned(pPosResult->ShortBuffer.NumberBufferBytes); + } + break; + } + } + } + } + free(ScsiPassThrough); + + return NO_ERROR; +} + +static DWORD GetDensityBlockSize(HANDLE hDevice, DWORD *pdwDensity, DWORD *pdwBlockSize) +{ + DWORD dwBufferSize = sizeof(GET_MEDIA_TYPES) + 5 * sizeof(DEVICE_MEDIA_INFO); + GET_MEDIA_TYPES * pGetMediaTypes = (GET_MEDIA_TYPES *)malloc(dwBufferSize); + BOOL bResult; + DWORD dwResult; + + if (pGetMediaTypes == NULL) { + return ERROR_OUTOFMEMORY; + } + + do { + DWORD dwBytesReturned; + + bResult = DeviceIoControl( hDevice, + IOCTL_STORAGE_GET_MEDIA_TYPES_EX, + NULL, 0, + (LPVOID)pGetMediaTypes, dwBufferSize, + &dwBytesReturned, + NULL); + + if (!bResult) { + dwResult = GetLastError(); + + if (dwResult != ERROR_INSUFFICIENT_BUFFER) { + free(pGetMediaTypes); + return dwResult; + } + + dwBufferSize += 6 * sizeof(DEVICE_MEDIA_INFO); + + GET_MEDIA_TYPES * pNewBuffer = (GET_MEDIA_TYPES *)realloc(pGetMediaTypes, dwBufferSize); + + if (pNewBuffer != pGetMediaTypes) { + free(pGetMediaTypes); + + if (pNewBuffer == NULL) { + return ERROR_OUTOFMEMORY; + } + + pGetMediaTypes = pNewBuffer; + } + } + } while (!bResult); + + if (pGetMediaTypes->DeviceType != FILE_DEVICE_TAPE) { + free(pGetMediaTypes); + return ERROR_BAD_DEVICE; + } + + for (DWORD idxMedia = 0; idxMedia < pGetMediaTypes->MediaInfoCount; idxMedia++) { + + if (pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.MediaCharacteristics & MEDIA_CURRENTLY_MOUNTED) { + + if (pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.BusType == BusTypeScsi) { + *pdwDensity = pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.BusSpecificData.ScsiInformation.DensityCode; + } else { + *pdwDensity = 0; + } + + *pdwBlockSize = pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.CurrentBlockSize; + + free(pGetMediaTypes); + + return NO_ERROR; + } + } + + free(pGetMediaTypes); + + return ERROR_NO_MEDIA_IN_DRIVE; +} + +static int tape_pos(int fd, struct mtpos *mt_pos) +{ + DWORD partition; + DWORD offset; + DWORD offsetHi; + BOOL result; + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + result = GetTapePosition(pHandleInfo->OSHandle, TAPE_ABSOLUTE_BLOCK, &partition, &offset, &offsetHi); + if (result == NO_ERROR) { + mt_pos->mt_blkno = offset; + return 0; + } + + return -1; +} diff --git a/src/win32/stored/postest/postest.cpp b/src/win32/stored/postest/postest.cpp new file mode 100644 index 00000000..597691a6 --- /dev/null +++ b/src/win32/stored/postest/postest.cpp @@ -0,0 +1,299 @@ +#ifdef HAVE_WIN32 +#include +typedef int __daddr_t; +#else +#define tape_open open +#define tape_write write +#define tape_ioctl ioctl +#define tape_close close + +typedef unsigned char UCHAR, *PUCHAR; +typedef unsigned int UINT, *PUINT; +typedef unsigned long ULONG, *PULONG; +typedef unsigned long long ULONGLONG, *PULONGLONG; +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +char *szCommands[] = +{ + "MTRESET", + "MTFSF", + "MTBSF", + "MTFSR", + "MTBSR", + "MTWEOF", + "MTREW", + "MTOFFL", + "MTNOP", + "MTRETEN", + "MTBSFM", + "MTFSFM ", + "MTEOM", + "MTERASE", + "MTRAS1", + "MTRAS2", + "MTRAS3", + "UNKNOWN_17", + "UNKNOWN_18", + "UNKNOWN_19", + "MTSETBLK", + "MTSETDENSITY", + "MTSEEK", + "MTTELL", + "MTSETDRVBUFFER", + "MTFSS", + "MTBSS", + "MTWSM", + "MTLOCK", + "MTUNLOCK", + "MTLOAD", + "MTUNLOAD", + "MTCOMPRESSION", + "MTSETPART", + "MTMKPART", +}; + +#define NUMBER_COMMANDS (sizeof(szCommands) / sizeof(szCommands[0])) + +typedef struct _SCRIPT_ENTRY { + short Command; + int Count; + PUCHAR pszDescription; + ULONG ExpectedFile; + ULONGLONG ExpectedBlock; +} SCRIPT_ENTRY, *PSCRIPT_ENTRY; + +SCRIPT_ENTRY TestScript[] = +{ + { MTREW, 1, 0, 0 }, + { MTFSF, 2, 0, 0 }, + { MTBSR, 1, 0, 0 }, + { MTBSR, 3, 0, 0 }, + { MTFSR, 6, 0, 0 }, + { MTREW, 1, 0, 0 }, + { MTFSF, 3, 0, 0 }, + { MTFSR, 8, 0, 0 }, + { MTFSF, 1, 0, 0 }, + { MTBSF, 1, 0, 0 } +}; + +#define SCRIPT_LENGTH (sizeof(TestScript) / sizeof(TestScript[0])) + +void printpos(int fd, ULONG ulFile, ULONG ulBlock); + +void +run_script(int fd, PSCRIPT_ENTRY entries, size_t count) +{ + mtop op; + + for (size_t idxScript = 0; idxScript < count; idxScript++) + { + PSCRIPT_ENTRY pEntry = &entries[idxScript]; + + fprintf(stderr, "%s %d: ", szCommands[pEntry->Command], pEntry->Count); + + op.mt_op = pEntry->Command; + op.mt_count = pEntry->Count; + + int iResult = tape_ioctl(fd, MTIOCTOP, &op); + + if (iResult >= 0) + { + printpos(fd, pEntry->ExpectedFile, (ULONG)pEntry->ExpectedBlock); + } + else + { + fprintf(stderr, "tape_ioctl returned %d, error = %s\n", errno, strerror(errno)); + } + } +} + +void +weof(int fd) +{ + mtop op; + + op.mt_op = MTWEOF; + op.mt_count = 1; + + if (tape_ioctl(fd, MTIOCTOP, &op) != 0) + { + fprintf(stderr, "tape_ioctl return error %d - %s", errno, strerror(errno)); + } +} + +void +wdata(int fd, ULONG ulBufferNumber, void *pBuffer, size_t size) +{ + ((PUCHAR)pBuffer)[0] = (UCHAR)ulBufferNumber; + ((PUCHAR)pBuffer)[1] = (UCHAR)(ulBufferNumber >> 8); + ((PUCHAR)pBuffer)[2] = (UCHAR)(ulBufferNumber >> 16); + ((PUCHAR)pBuffer)[3] = (UCHAR)(ulBufferNumber >> 24); + + UCHAR ucChar = (UCHAR)ulBufferNumber; + UCHAR ucIncrement = (UCHAR)(ulBufferNumber >> 8); + + if (ucIncrement == 0) + { + ucIncrement++; + } + + for (size_t index = 4; index < size; index++) + { + ((PUCHAR)pBuffer)[index] = ucChar; + ucChar += ucIncrement; + } + + + if (tape_write(fd, pBuffer, (UINT)size) < 0) + { + fprintf(stderr, "tape_write returned error %d - %s", errno, strerror(errno)); + } +} + +void +printpos(int fd, ULONG ulExpectedFile, ULONG ulExpectedBlock) +{ + mtget st; + + tape_ioctl(fd, MTIOCGET, &st); + if (tape_ioctl(fd, MTIOCGET, &st) != 0) + { + fprintf(stderr, "tape_ioctl(MTIOCGET) returned error %d - %s\n", errno, strerror(errno)); + } + + mtpos pos; + + if (tape_ioctl(fd, MTIOCPOS, &pos) != 0) + { + fprintf(stderr, "tape_ioctl(MTIOCPOS) returned error %d - %s\n", errno, strerror(errno)); + } + + fprintf( stderr, "File = %d s/b %d, Block = %d, s/b %d, Absolute = %d, Flags =%s%s%s%s%s%s%s%s\n", + st.mt_fileno, ulExpectedFile, st.mt_blkno, ulExpectedBlock, pos.mt_blkno, + GMT_EOF(st.mt_gstat) ? " EOF" : "", + GMT_BOT(st.mt_gstat) ? " BOT" : "", + GMT_EOT(st.mt_gstat) ? " EOT" : "", + GMT_EOD(st.mt_gstat) ? " EOD" : "", + GMT_WR_PROT(st.mt_gstat) ? " WR_PROT" : "", + GMT_ONLINE(st.mt_gstat) ? " ONLINE" : "", + GMT_DR_OPEN(st.mt_gstat) ? " DR_OPEN" : "", + GMT_IM_REP_EN(st.mt_gstat) ? " IM_REP_EN" : ""); +} + +void +rewind(int fd) +{ + mtop op; + + op.mt_op = MTREW; + op.mt_count = 1; + + if (tape_ioctl(fd, MTIOCTOP, &op) != 0) + { + fprintf(stderr, "tape_ioctl return error %d - %s", errno, strerror(errno)); + } +} + +#define BLOCK_SIZE 32768 + +int +main(int argc, char **argv) +{ + PUCHAR pBuffer; + ULONG ulBlockNumber = 0; + ULONG filenumber = 0; + int index; + + OSDependentInit(); + + int fd = tape_open(argv[1], O_RDWR, 0); + + if (fd == -1) + { + fprintf(stderr, "tape_open return error %d - %s", errno, strerror(errno)); + exit(1); + } + pBuffer = (PUCHAR)malloc(BLOCK_SIZE); + + rewind(fd); + + printpos(fd, 0, 0); + + fprintf(stderr, "file = %d, first block = %d\n", filenumber, ulBlockNumber); + + for (index = 0; index < 10; index++) + { + wdata(fd, ulBlockNumber++, pBuffer, BLOCK_SIZE); + } + + weof(fd); + filenumber++; + ulBlockNumber++; + + fprintf(stderr, "file = %d, first block = %d\n", filenumber, ulBlockNumber); + + for (index = 0; index < 5; index++) + { + wdata(fd, ulBlockNumber++, pBuffer, BLOCK_SIZE); + } + + weof(fd); + filenumber++; + ulBlockNumber++; + + fprintf(stderr, "file = %d, first block = %d\n", filenumber, ulBlockNumber); + + for (index = 0; index < 11; index++) + { + wdata(fd, ulBlockNumber++, pBuffer, BLOCK_SIZE); + } + + weof(fd); + filenumber++; + ulBlockNumber++; + + fprintf(stderr, "file = %d, first block = %d\n", filenumber, ulBlockNumber); + + for (index = 0; index < 8; index++) + { + wdata(fd, ulBlockNumber++, pBuffer, BLOCK_SIZE); + } + + weof(fd); + filenumber++; + ulBlockNumber++; + + fprintf(stderr, "file = %d, first block = %d\n", filenumber, ulBlockNumber); + for (index = 0; index < 12; index++) + { + wdata(fd, ulBlockNumber++, pBuffer, BLOCK_SIZE); + } + + weof(fd); + filenumber++; + ulBlockNumber++; + + fprintf(stderr, "file = %d, first block = %d\n", filenumber, ulBlockNumber); + for (index = 0; index < 7; index++) + { + wdata(fd, ulBlockNumber++, pBuffer, BLOCK_SIZE); + } + + weof(fd); + filenumber++; + ulBlockNumber++; + + run_script(fd, TestScript, SCRIPT_LENGTH); + tape_close(fd); + free(pBuffer); + return 0; +} diff --git a/src/win32/stored/postest/postest.vcproj b/src/win32/stored/postest/postest.vcproj new file mode 100644 index 00000000..345f1956 --- /dev/null +++ b/src/win32/stored/postest/postest.vcproj @@ -0,0 +1,213 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/stored/service.cpp b/src/win32/stored/service.cpp new file mode 100644 index 00000000..c8f4a6f6 --- /dev/null +++ b/src/win32/stored/service.cpp @@ -0,0 +1,25 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#include "who.h" +#include "../libwin32/service.cpp" diff --git a/src/win32/stored/storelib/storelib.vcproj b/src/win32/stored/storelib/storelib.vcproj new file mode 100644 index 00000000..d33e4ea1 --- /dev/null +++ b/src/win32/stored/storelib/storelib.vcproj @@ -0,0 +1,827 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/stored/trayMonitor.cpp b/src/win32/stored/trayMonitor.cpp new file mode 100644 index 00000000..25494340 --- /dev/null +++ b/src/win32/stored/trayMonitor.cpp @@ -0,0 +1,25 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + */ + +#include "who.h" +#include "../libwin32/trayMonitor.cpp" diff --git a/src/win32/stored/who.h b/src/win32/stored/who.h new file mode 100644 index 00000000..7558421b --- /dev/null +++ b/src/win32/stored/who.h @@ -0,0 +1,31 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Kern Sibbald, August 2007 + * + * This file is pulled in by certain generic routines in libwin32 + * to define the names of the daemon that is being built. + */ + +#define APP_NAME "Bacula-sd" +#define LC_APP_NAME "bacula-sd" +#define APP_DESC "Bacula Storage Service" + +#define terminate_app(x) terminate_stored(x) +extern void terminate_stored(int sig); diff --git a/src/win32/stored/win_tape_device.cpp b/src/win32/stored/win_tape_device.cpp new file mode 100644 index 00000000..dcb903e7 --- /dev/null +++ b/src/win32/stored/win_tape_device.cpp @@ -0,0 +1,1142 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2018 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * mtops.cpp - Emulate the Linux st (scsi tape) driver on Microsoft Windows. + * + * Author: Robert Nelson, May, 2006 + * + * + * This file was contributed to the Bacula project by Robert Nelson. + * + * Robert Nelson has been granted a perpetual, worldwide, + * non-exclusive, no-charge, royalty-free, irrevocable copyright + * license to reproduce, prepare derivative works of, publicly + * display, publicly perform, sublicense, and distribute the original + * work contributed by Robert Nelson to the Bacula project in source + * or object form. + * + * If you wish to license contributions from Robert Nelson + * under an alternate open source license please contact + * Robert Nelson . + */ + +#include +#include + +#include "bacula.h" /* pull in global headers */ +#include "stored.h" /* pull in Storage Deamon headers */ +#include "win_tape_device.h" + +#include "sys/mtio.h" +#if defined(_MSC_VER) +#include +#else +#include +#endif +#include + +// +// SCSI bus status codes. +// + +#define SCSISTAT_GOOD 0x00 +#define SCSISTAT_CHECK_CONDITION 0x02 +#define SCSISTAT_CONDITION_MET 0x04 +#define SCSISTAT_BUSY 0x08 +#define SCSISTAT_INTERMEDIATE 0x10 +#define SCSISTAT_INTERMEDIATE_COND_MET 0x14 +#define SCSISTAT_RESERVATION_CONFLICT 0x18 +#define SCSISTAT_COMMAND_TERMINATED 0x22 +#define SCSISTAT_QUEUE_FULL 0x28 + +static inline SHORT Read16BitSigned(const unsigned char *pValue) +{ + return (SHORT)(((USHORT)pValue[0] << 8) | (USHORT)pValue[1]); +} + +static inline USHORT Read16BitUnsigned(const unsigned char *pValue) +{ + return (((USHORT)pValue[0] << 8) | (USHORT)pValue[1]); +} + +static inline LONG Read24BitSigned(const unsigned char *pValue) +{ + return ((LONG)(((ULONG)pValue[0] << 16) | ((ULONG)pValue[1] << 8) | + (ULONG)pValue[2])) << 8 >> 8; +} + +static inline ULONG Read24BitUnsigned(const unsigned char *pValue) +{ + return ((ULONG)pValue[0] << 16) | ((ULONG)pValue[1] << 8) | (ULONG)pValue[2]; +} + +static inline LONG Read32BitSigned(const unsigned char *pValue) +{ + return (LONG)(((ULONG)pValue[0] << 24) | ((ULONG)pValue[1] << 16) | + ((ULONG)pValue[2] << 8) | (ULONG)pValue[3]); +} + +static inline ULONG Read32BitUnsigned(const unsigned char *pValue) +{ + return (((ULONG)pValue[0] << 24) | ((ULONG)pValue[1] << 16) | + ((ULONG)pValue[2] << 8) | (ULONG)pValue[3]); +} + +static inline LONGLONG Read64BitSigned(const unsigned char *pValue) +{ + return (LONGLONG)(((ULONGLONG)pValue[0] << 56) | ((ULONGLONG)pValue[1] << 48) | + ((ULONGLONG)pValue[2] << 40) | ((ULONGLONG)pValue[3] << 32) | + ((ULONGLONG)pValue[4] << 24) | ((ULONGLONG)pValue[5] << 16) | + ((ULONGLONG)pValue[6] << 8) | (ULONGLONG)pValue[7]); +} + +static inline ULONGLONG Read64BitUnsigned(const unsigned char *pValue) +{ + return (LONGLONG)(((ULONGLONG)pValue[0] << 56) | ((ULONGLONG)pValue[1] << 48) | + ((ULONGLONG)pValue[2] << 40) | ((ULONGLONG)pValue[3] << 32) | + ((ULONGLONG)pValue[4] << 24) | ((ULONGLONG)pValue[5] << 16) | + ((ULONGLONG)pValue[6] << 8) | (ULONGLONG)pValue[7]); +} + +typedef struct _TAPE_POSITION_INFO +{ + UCHAR AtPartitionStart:1; + UCHAR AtPartitionEnd:1; + UCHAR PartitionBlockValid:1; + UCHAR FileSetValid:1; + UCHAR :4; + UCHAR Reserved1[3]; + ULONG Partition; + ULONGLONG BlockNumber; + ULONGLONG FileNumber; + ULONGLONG SetNumber; +} TAPE_POSITION_INFO, *PTAPE_POSITION_INFO; + +typedef struct _TAPE_HANDLE_INFO +{ + HANDLE OSHandle; + bool bEOD; + bool bEOF; + bool bEOT; + bool bBlockValid; + ULONG FeaturesLow; + ULONG FeaturesHigh; + ULONG ulFile; + ULONGLONG ullFileStart; + +} TAPE_HANDLE_INFO, *PTAPE_HANDLE_INFO; + +TAPE_HANDLE_INFO TapeHandleTable[] = +{ + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE }, + { INVALID_HANDLE_VALUE } +}; + +#define NUMBER_HANDLE_ENTRIES (sizeof(TapeHandleTable) / sizeof(TapeHandleTable[0])) + +static DWORD GetTapePositionInfo(HANDLE hDevice, PTAPE_POSITION_INFO TapePositionInfo); +static DWORD GetDensityBlockSize(HANDLE hDevice, DWORD *pdwDensity, DWORD *pdwBlockSize); + +win_tape_device::win_tape_device() +{ + m_fd = -1; +} + +win_tape_device::~win_tape_device() +{ } + +int +win_tape_device::d_open(const char *file, int flags, ...) +{ + HANDLE hDevice = INVALID_HANDLE_VALUE; + char szDeviceName[256] = "\\\\.\\"; + int idxFile; + DWORD dwResult; + + for (idxFile = 0; idxFile < (int)NUMBER_HANDLE_ENTRIES; idxFile++) { + if (TapeHandleTable[idxFile].OSHandle == INVALID_HANDLE_VALUE) { + break; + } + } + + if (idxFile >= (int)NUMBER_HANDLE_ENTRIES) { + return EMFILE; + } + + memset(&TapeHandleTable[idxFile], 0, sizeof(TapeHandleTable[idxFile])); + + if (!IsPathSeparator(file[0])) { + bstrncpy(&szDeviceName[4], file, sizeof(szDeviceName) - 4); + } else { + bstrncpy(&szDeviceName[0], file, sizeof(szDeviceName)); + } + + hDevice = CreateFile(szDeviceName, FILE_ALL_ACCESS, 0, NULL, OPEN_EXISTING, 0, NULL); + + if (hDevice != INVALID_HANDLE_VALUE) { + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[idxFile]; + + memset(pHandleInfo, 0, sizeof(*pHandleInfo)); + + pHandleInfo->OSHandle = hDevice; + + TAPE_GET_DRIVE_PARAMETERS TapeDriveParameters; + DWORD dwSize = sizeof(TapeDriveParameters); + + dwResult = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_DRIVE_INFORMATION, &dwSize, &TapeDriveParameters); + if (dwResult == NO_ERROR) { + pHandleInfo->FeaturesLow = TapeDriveParameters.FeaturesLow; + pHandleInfo->FeaturesHigh = TapeDriveParameters.FeaturesHigh; + } + + TAPE_POSITION_INFO TapePositionInfo; + + dwResult = GetTapePositionInfo(pHandleInfo->OSHandle, &TapePositionInfo); + + if (dwResult == NO_ERROR) { + if (TapePositionInfo.AtPartitionStart || TapePositionInfo.AtPartitionEnd || + (TapePositionInfo.PartitionBlockValid && TapePositionInfo.BlockNumber == 0)) { + pHandleInfo->ulFile = 0; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } else if (TapePositionInfo.FileSetValid) { + pHandleInfo->ulFile = (ULONG)TapePositionInfo.FileNumber; + } + } + } else { + DWORD dwError = GetLastError(); + + switch (dwError) { + case ERROR_FILE_NOT_FOUND: + case ERROR_PATH_NOT_FOUND: + errno = ENOENT; + break; + + case ERROR_TOO_MANY_OPEN_FILES: + errno = EMFILE; + break; + + default: + case ERROR_ACCESS_DENIED: + case ERROR_SHARING_VIOLATION: + case ERROR_LOCK_VIOLATION: + case ERROR_INVALID_NAME: + errno = EACCES; + break; + + case ERROR_FILE_EXISTS: + errno = EEXIST; + break; + + case ERROR_INVALID_PARAMETER: + errno = EINVAL; + break; + } + + return(int) -1; + } + + return (int)idxFile + 3; +} + +ssize_t win_tape_device::d_read(int fd, void *buffer, size_t count) +{ + if (buffer == NULL) { + errno = EINVAL; + return -1; + } + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + DWORD bytes_read; + BOOL bResult; + + bResult = ReadFile(pHandleInfo->OSHandle, buffer, count, &bytes_read, NULL); + + if (bResult) { + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + return bytes_read; + } else { + int iReturnValue = 0; + DWORD last_error = GetLastError(); + + switch (last_error) { + + case ERROR_FILEMARK_DETECTED: + pHandleInfo->bEOF = true; + break; + + case ERROR_END_OF_MEDIA: + pHandleInfo->bEOT = true; + break; + + case ERROR_NO_MEDIA_IN_DRIVE: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = ENOMEDIUM; + iReturnValue = -1; + break; + + case ERROR_NO_DATA_DETECTED: + pHandleInfo->bEOD = true; + break; + + case ERROR_INVALID_HANDLE: + case ERROR_ACCESS_DENIED: + case ERROR_LOCK_VIOLATION: + errno = EBADF; + iReturnValue = -1; + break; + + default: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = EIO; + iReturnValue = -1; + } + + return iReturnValue; + } +} + +ssize_t win_tape_device::d_write(int fd, const void *buffer, size_t count) +{ + if (buffer == NULL) { + errno = EINVAL; + return -1; + } + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + DWORD bytes_written; + BOOL bResult; + + bResult = WriteFile(pHandleInfo->OSHandle, buffer, count, &bytes_written, NULL); + + if (bResult) { + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + return bytes_written; + } else { + DWORD last_error = GetLastError(); + + switch (last_error) { + case ERROR_END_OF_MEDIA: + case ERROR_DISK_FULL: + pHandleInfo->bEOT = true; + errno = ENOSPC; + break; + + case ERROR_NO_MEDIA_IN_DRIVE: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = ENOMEDIUM; + break; + + case ERROR_INVALID_HANDLE: + case ERROR_ACCESS_DENIED: + errno = EBADF; + break; + + default: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = EIO; + break; + } + return -1; + } +} + +int win_tape_device::d_close(int fd) +{ + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + if (!CloseHandle(pHandleInfo->OSHandle)) { + pHandleInfo->OSHandle = INVALID_HANDLE_VALUE; + errno = EBADF; + return -1; + } + + pHandleInfo->OSHandle = INVALID_HANDLE_VALUE; + + return 0; +} + +int win_tape_device::d_ioctl(int fd, ioctl_req_t request, char *op) +{ + va_list argp; + int result; + + va_start(argp, request); + + switch (request) { + case MTIOCTOP: + result = tape_op(fd, va_arg(argp, mtop *)); + break; + + case MTIOCGET: + result = tape_get(fd, va_arg(argp, mtget *)); + break; + + case MTIOCPOS: + result = tape_pos(fd, va_arg(argp, mtpos *)); + break; + + default: + errno = ENOTTY; + result = -1; + break; + } + + va_end(argp); + + return result; +} + +int win_tape_device::tape_op(int fd, struct mtop *mt_com) +{ + DWORD result = NO_ERROR; + int index; + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) + { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + switch (mt_com->mt_op) { + case MTRESET: + case MTNOP: + case MTSETDRVBUFFER: + break; + + default: + case MTRAS1: + case MTRAS2: + case MTRAS3: + case MTSETDENSITY: + errno = ENOTTY; + result = (DWORD)-1; + break; + + case MTFSF: + for (index = 0; index < mt_com->mt_count; index++) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, 1, 0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->ulFile++; + pHandleInfo->bEOF = true; + pHandleInfo->bEOT = false; + } + } + break; + + case MTBSF: + for (index = 0; index < mt_com->mt_count; index++) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, (DWORD)-1, ~0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->ulFile--; + pHandleInfo->bBlockValid = false; + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } + } + break; + + case MTFSR: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_RELATIVE_BLOCKS, 0, mt_com->mt_count, 0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } else if (result == ERROR_FILEMARK_DETECTED) { + pHandleInfo->bEOF = true; + } + break; + + case MTBSR: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_RELATIVE_BLOCKS, 0, -mt_com->mt_count, ~0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } else if (result == ERROR_FILEMARK_DETECTED) { + pHandleInfo->ulFile--; + pHandleInfo->bBlockValid = false; + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } + break; + + case MTWEOF: + result = WriteTapemark(pHandleInfo->OSHandle, TAPE_FILEMARKS, mt_com->mt_count, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOF = true; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile += mt_com->mt_count; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTREW: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_REWIND, 0, 0, 0, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile = 0; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTOFFL: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_UNLOAD, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile = 0; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTRETEN: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_TENSION, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile = 0; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTBSFM: + for (index = 0; index < mt_com->mt_count; index++) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, (DWORD)-1, ~0, FALSE); + if (result == NO_ERROR) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, 1, 0, FALSE); + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } + } + break; + + case MTFSFM: + for (index = 0; index < mt_com->mt_count; index++) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, mt_com->mt_count, 0, FALSE); + if (result == NO_ERROR) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, (DWORD)-1, ~0, FALSE); + pHandleInfo->bEOD = false; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + } + } + break; + + case MTEOM: + for ( ; ; ) { + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_FILEMARKS, 0, 1, 0, FALSE); + if (result != NO_ERROR) { + pHandleInfo->bEOF = false; + + if (result == ERROR_END_OF_MEDIA) { + pHandleInfo->bEOD = true; + pHandleInfo->bEOT = true; + return 0; + } + if (result == ERROR_NO_DATA_DETECTED) { + pHandleInfo->bEOD = true; + pHandleInfo->bEOT = false; + return 0; + } + break; + } else { + pHandleInfo->bEOF = true; + pHandleInfo->ulFile++; + } + } + break; + + case MTERASE: + result = EraseTape(pHandleInfo->OSHandle, TAPE_ERASE_LONG, FALSE); + if (result == NO_ERROR) { + pHandleInfo->bEOD = true; + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->ulFile = 0; + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = 0; + } + break; + + case MTSETBLK: + { + TAPE_SET_MEDIA_PARAMETERS SetMediaParameters; + + SetMediaParameters.BlockSize = mt_com->mt_count; + result = SetTapeParameters(pHandleInfo->OSHandle, SET_TAPE_MEDIA_INFORMATION, &SetMediaParameters); + } + break; + + case MTSEEK: + { + TAPE_POSITION_INFO TapePositionInfo; + + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_ABSOLUTE_BLOCK, 0, mt_com->mt_count, 0, FALSE); + + memset(&TapePositionInfo, 0, sizeof(TapePositionInfo)); + DWORD dwPosResult = GetTapePositionInfo(pHandleInfo->OSHandle, &TapePositionInfo); + if (dwPosResult == NO_ERROR && TapePositionInfo.FileSetValid) { + pHandleInfo->ulFile = (ULONG)TapePositionInfo.FileNumber; + } else { + pHandleInfo->ulFile = ~0U; + } + } + break; + + case MTTELL: + { + DWORD partition; + DWORD offset; + DWORD offsetHi; + + result = GetTapePosition(pHandleInfo->OSHandle, TAPE_ABSOLUTE_BLOCK, &partition, &offset, &offsetHi); + if (result == NO_ERROR) { + return offset; + } + } + break; + + case MTFSS: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_SETMARKS, 0, mt_com->mt_count, 0, FALSE); + break; + + case MTBSS: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_SPACE_SETMARKS, 0, -mt_com->mt_count, ~0, FALSE); + break; + + case MTWSM: + result = WriteTapemark(pHandleInfo->OSHandle, TAPE_SETMARKS, mt_com->mt_count, FALSE); + break; + + case MTLOCK: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_LOCK, FALSE); + break; + + case MTUNLOCK: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_UNLOCK, FALSE); + break; + + case MTLOAD: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_LOAD, FALSE); + break; + + case MTUNLOAD: + result = PrepareTape(pHandleInfo->OSHandle, TAPE_UNLOAD, FALSE); + break; + + case MTCOMPRESSION: + { + TAPE_GET_DRIVE_PARAMETERS GetDriveParameters; + TAPE_SET_DRIVE_PARAMETERS SetDriveParameters; + DWORD size; + + size = sizeof(GetDriveParameters); + + result = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_DRIVE_INFORMATION, &size, &GetDriveParameters); + + if (result == NO_ERROR) { + SetDriveParameters.ECC = GetDriveParameters.ECC; + SetDriveParameters.Compression = (BOOLEAN)mt_com->mt_count; + SetDriveParameters.DataPadding = GetDriveParameters.DataPadding; + SetDriveParameters.ReportSetmarks = GetDriveParameters.ReportSetmarks; + SetDriveParameters.EOTWarningZoneSize = GetDriveParameters.EOTWarningZoneSize; + + result = SetTapeParameters(pHandleInfo->OSHandle, SET_TAPE_DRIVE_INFORMATION, &SetDriveParameters); + } + } + break; + + case MTSETPART: + result = SetTapePosition(pHandleInfo->OSHandle, TAPE_LOGICAL_BLOCK, mt_com->mt_count, 0, 0, FALSE); + break; + + case MTMKPART: + if (mt_com->mt_count == 0) { + result = CreateTapePartition(pHandleInfo->OSHandle, TAPE_INITIATOR_PARTITIONS, 1, 0); + } else { + result = CreateTapePartition(pHandleInfo->OSHandle, TAPE_INITIATOR_PARTITIONS, 2, mt_com->mt_count); + } + break; + } + + if ((result == NO_ERROR && pHandleInfo->bEOF) || + (result == ERROR_FILEMARK_DETECTED && mt_com->mt_op == MTFSR)) { + + TAPE_POSITION_INFO TapePositionInfo; + + if (GetTapePositionInfo(pHandleInfo->OSHandle, &TapePositionInfo) == NO_ERROR) { + pHandleInfo->bBlockValid = true; + pHandleInfo->ullFileStart = TapePositionInfo.BlockNumber; + } + } + + switch (result) { + case NO_ERROR: + case (DWORD)-1: /* Error has already been translated into errno */ + break; + + default: + case ERROR_FILEMARK_DETECTED: + errno = EIO; + break; + + case ERROR_END_OF_MEDIA: + pHandleInfo->bEOT = true; + errno = EIO; + break; + + case ERROR_NO_DATA_DETECTED: + pHandleInfo->bEOD = true; + errno = EIO; + break; + + case ERROR_NO_MEDIA_IN_DRIVE: + pHandleInfo->bEOF = false; + pHandleInfo->bEOT = false; + pHandleInfo->bEOD = false; + errno = ENOMEDIUM; + break; + + case ERROR_INVALID_HANDLE: + case ERROR_ACCESS_DENIED: + case ERROR_LOCK_VIOLATION: + errno = EBADF; + break; + } + + return result == NO_ERROR ? 0 : -1; +} + +int win_tape_device::tape_get(int fd, struct mtget *mt_get) +{ + TAPE_POSITION_INFO pos_info; + BOOL result; + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + if (GetTapePositionInfo(pHandleInfo->OSHandle, &pos_info) != NO_ERROR) { + return -1; + } + + DWORD density = 0; + DWORD blocksize = 0; + + result = GetDensityBlockSize(pHandleInfo->OSHandle, &density, &blocksize); + + if (result != NO_ERROR) { + TAPE_GET_DRIVE_PARAMETERS drive_params; + DWORD size; + + size = sizeof(drive_params); + + result = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_DRIVE_INFORMATION, &size, &drive_params); + + if (result == NO_ERROR) { + blocksize = drive_params.DefaultBlockSize; + } + } + + mt_get->mt_type = MT_ISSCSI2; + + // Partition # + mt_get->mt_resid = pos_info.PartitionBlockValid ? pos_info.Partition : (ULONG)-1; + + // Density / Block Size + mt_get->mt_dsreg = ((density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK) | + ((blocksize << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK); + + mt_get->mt_gstat = 0x00010000; /* Immediate report mode.*/ + + if (pHandleInfo->bEOF) { + mt_get->mt_gstat |= 0x80000000; // GMT_EOF + } + + if (pos_info.PartitionBlockValid && pos_info.BlockNumber == 0) { + mt_get->mt_gstat |= 0x40000000; // GMT_BOT + } + + if (pHandleInfo->bEOT) { + mt_get->mt_gstat |= 0x20000000; // GMT_EOT + } + + if (pHandleInfo->bEOD) { + mt_get->mt_gstat |= 0x08000000; // GMT_EOD + } + + TAPE_GET_MEDIA_PARAMETERS media_params; + DWORD size = sizeof(media_params); + + result = GetTapeParameters(pHandleInfo->OSHandle, GET_TAPE_MEDIA_INFORMATION, &size, &media_params); + + if (result == NO_ERROR && media_params.WriteProtected) { + mt_get->mt_gstat |= 0x04000000; // GMT_WR_PROT + } + + result = GetTapeStatus(pHandleInfo->OSHandle); + + if (result != NO_ERROR) { + if (result == ERROR_NO_MEDIA_IN_DRIVE) { + mt_get->mt_gstat |= 0x00040000; // GMT_DR_OPEN + } + } else { + mt_get->mt_gstat |= 0x01000000; // GMT_ONLINE + } + + // Recovered Error Count + mt_get->mt_erreg = 0; + + // File Number + mt_get->mt_fileno = (__daddr_t)pHandleInfo->ulFile; + + // Block Number + mt_get->mt_blkno = (__daddr_t)(pHandleInfo->bBlockValid ? pos_info.BlockNumber - pHandleInfo->ullFileStart : (ULONGLONG)-1); + + return 0; +} + +#define SERVICEACTION_SHORT_FORM_BLOCKID 0 +#define SERVICEACTION_SHORT_FORM_VENDOR_SPECIFIC 1 +#define SERVICEACTION_LONG_FORM 6 +#define SERVICEACTION_EXTENDED_FORM 8 + + +typedef struct _SCSI_READ_POSITION_SHORT_BUFFER +{ + UCHAR :1; + UCHAR PERR:1; + UCHAR BPU:1; + UCHAR :1; + UCHAR BYCU:1; + UCHAR BCU:1; + UCHAR EOP:1; + UCHAR BOP:1; + UCHAR Partition; + UCHAR Reserved1[2]; + UCHAR FirstBlock[4]; + UCHAR LastBlock[4]; + UCHAR Reserved2; + UCHAR NumberBufferBlocks[3]; + UCHAR NumberBufferBytes[4]; +} SCSI_READ_POSITION_SHORT_BUFFER, *PSCSI_READ_POSITION_SHORT_BUFFER; + +typedef struct _SCSI_READ_POSITION_LONG_BUFFER +{ + UCHAR :2; + UCHAR BPU:1; + UCHAR MPU:1; + UCHAR :2; + UCHAR EOP:1; + UCHAR BOP:1; + UCHAR Reserved3[3]; + UCHAR Partition[4]; + UCHAR BlockNumber[8]; + UCHAR FileNumber[8]; + UCHAR SetNumber[8]; +} SCSI_READ_POSITION_LONG_BUFFER, *PSCSI_READ_POSITION_LONG_BUFFER; + +typedef struct _SCSI_READ_POSITION_EXTENDED_BUFFER +{ + UCHAR :1; + UCHAR PERR:1; + UCHAR LOPU:1; + UCHAR :1; + UCHAR BYCU:1; + UCHAR LOCU:1; + UCHAR EOP:1; + UCHAR BOP:1; + UCHAR Partition; + UCHAR AdditionalLength[2]; + UCHAR Reserved1; + UCHAR NumberBufferObjects[3]; + UCHAR FirstLogicalObject[8]; + UCHAR LastLogicalObject[8]; + UCHAR NumberBufferObjectBytes[8]; +} SCSI_READ_POSITION_EXTENDED_BUFFER, *PSCSI_READ_POSITION_EXTENDED_BUFFER; + +typedef union _READ_POSITION_RESULT { + SCSI_READ_POSITION_SHORT_BUFFER ShortBuffer; + SCSI_READ_POSITION_LONG_BUFFER LongBuffer; + SCSI_READ_POSITION_EXTENDED_BUFFER ExtendedBuffer; +} READ_POSITION_RESULT, *PREAD_POSITION_RESULT; + +static DWORD GetTapePositionInfo(HANDLE hDevice, PTAPE_POSITION_INFO TapePositionInfo) +{ + PSCSI_PASS_THROUGH ScsiPassThrough; + BOOL bResult; + DWORD dwBytesReturned; + + const DWORD dwBufferSize = sizeof(SCSI_PASS_THROUGH) + sizeof(READ_POSITION_RESULT) + 28; + + memset(TapePositionInfo, 0, sizeof(*TapePositionInfo)); + + ScsiPassThrough = (PSCSI_PASS_THROUGH)malloc(dwBufferSize); + + for (int pass = 0; pass < 2; pass++) + { + memset(ScsiPassThrough, 0, dwBufferSize); + + ScsiPassThrough->Length = sizeof(SCSI_PASS_THROUGH); + + ScsiPassThrough->CdbLength = 10; + ScsiPassThrough->SenseInfoLength = 28; + ScsiPassThrough->DataIn = 1; + ScsiPassThrough->DataTransferLength = sizeof(SCSI_READ_POSITION_LONG_BUFFER); + ScsiPassThrough->TimeOutValue = 1000; + ScsiPassThrough->DataBufferOffset = sizeof(SCSI_PASS_THROUGH) + 28; + ScsiPassThrough->SenseInfoOffset = sizeof(SCSI_PASS_THROUGH); + + ScsiPassThrough->Cdb[0] = 0x34; // READ POSITION + + switch (pass) + { + case 0: + ScsiPassThrough->Cdb[1] = SERVICEACTION_LONG_FORM; + break; + + case 1: + ScsiPassThrough->Cdb[1] = SERVICEACTION_SHORT_FORM_BLOCKID; + break; + } + + bResult = DeviceIoControl( hDevice, + IOCTL_SCSI_PASS_THROUGH, + ScsiPassThrough, sizeof(SCSI_PASS_THROUGH), + ScsiPassThrough, dwBufferSize, + &dwBytesReturned, + NULL); + + if (bResult && dwBytesReturned >= (offsetof(SCSI_PASS_THROUGH, ScsiStatus) + sizeof(ScsiPassThrough->ScsiStatus))) { + if (ScsiPassThrough->ScsiStatus == SCSISTAT_GOOD) { + PREAD_POSITION_RESULT pPosResult = (PREAD_POSITION_RESULT)((PUCHAR)ScsiPassThrough + ScsiPassThrough->DataBufferOffset); + + switch (pass) + { + case 0: // SERVICEACTION_LONG_FORM + { + TapePositionInfo->AtPartitionStart = pPosResult->LongBuffer.BOP; + TapePositionInfo->AtPartitionEnd = pPosResult->LongBuffer.EOP; + + if (!TapePositionInfo->PartitionBlockValid) { + TapePositionInfo->PartitionBlockValid = !pPosResult->LongBuffer.BPU; + + if (TapePositionInfo->PartitionBlockValid) { + TapePositionInfo->Partition = Read32BitUnsigned(pPosResult->LongBuffer.Partition); + TapePositionInfo->BlockNumber = Read64BitUnsigned(pPosResult->LongBuffer.BlockNumber); + } + } + + TapePositionInfo->FileSetValid = !pPosResult->LongBuffer.MPU; + if (TapePositionInfo->FileSetValid) { + TapePositionInfo->FileNumber = Read64BitUnsigned(pPosResult->LongBuffer.FileNumber); + TapePositionInfo->SetNumber = Read64BitUnsigned(pPosResult->LongBuffer.SetNumber); + } + } + break; + + case 1: // SERVICEACTION_SHORT_FORM_BLOCKID + { + // pPosResult->ShortBuffer.PERR; + // pPosResult->ShortBuffer.BYCU; + // pPosResult->ShortBuffer.BCU; + TapePositionInfo->AtPartitionStart = pPosResult->ShortBuffer.BOP; + TapePositionInfo->AtPartitionEnd = pPosResult->ShortBuffer.EOP; + + if (!TapePositionInfo->PartitionBlockValid) { + TapePositionInfo->PartitionBlockValid = !pPosResult->ShortBuffer.BPU; + + if (TapePositionInfo->PartitionBlockValid) { + TapePositionInfo->Partition = pPosResult->ShortBuffer.Partition; + TapePositionInfo->BlockNumber = Read32BitUnsigned(pPosResult->ShortBuffer.FirstBlock); + } + } + // Read32BitsUnsigned(pPosResult->ShortBuffer.LastBlock); + // Read24BitsUnsigned(pPosResult->ShortBuffer.NumberBufferBlocks); + // Read32BitsUnsigned(pPosResult->ShortBuffer.NumberBufferBytes); + } + break; + } + } + } + } + free(ScsiPassThrough); + + return NO_ERROR; +} + +static DWORD GetDensityBlockSize(HANDLE hDevice, DWORD *pdwDensity, DWORD *pdwBlockSize) +{ + DWORD dwBufferSize = sizeof(GET_MEDIA_TYPES) + 5 * sizeof(DEVICE_MEDIA_INFO); + GET_MEDIA_TYPES * pGetMediaTypes = (GET_MEDIA_TYPES *)malloc(dwBufferSize); + BOOL bResult; + DWORD dwResult; + + if (pGetMediaTypes == NULL) { + return ERROR_OUTOFMEMORY; + } + + do { + DWORD dwBytesReturned; + + bResult = DeviceIoControl( hDevice, + IOCTL_STORAGE_GET_MEDIA_TYPES_EX, + NULL, 0, + (LPVOID)pGetMediaTypes, dwBufferSize, + &dwBytesReturned, + NULL); + + if (!bResult) { + dwResult = GetLastError(); + + if (dwResult != ERROR_INSUFFICIENT_BUFFER) { + free(pGetMediaTypes); + return dwResult; + } + + dwBufferSize += 6 * sizeof(DEVICE_MEDIA_INFO); + + GET_MEDIA_TYPES * pNewBuffer = (GET_MEDIA_TYPES *)realloc(pGetMediaTypes, dwBufferSize); + + if (pNewBuffer != pGetMediaTypes) { + free(pGetMediaTypes); + + if (pNewBuffer == NULL) { + return ERROR_OUTOFMEMORY; + } + + pGetMediaTypes = pNewBuffer; + } + } + } while (!bResult); + + if (pGetMediaTypes->DeviceType != FILE_DEVICE_TAPE) { + free(pGetMediaTypes); + return ERROR_BAD_DEVICE; + } + + for (DWORD idxMedia = 0; idxMedia < pGetMediaTypes->MediaInfoCount; idxMedia++) { + + if (pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.MediaCharacteristics & MEDIA_CURRENTLY_MOUNTED) { + + if (pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.BusType == BusTypeScsi) { + *pdwDensity = pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.BusSpecificData.ScsiInformation.DensityCode; + } else { + *pdwDensity = 0; + } + + *pdwBlockSize = pGetMediaTypes->MediaInfo[idxMedia].DeviceSpecific.TapeInfo.CurrentBlockSize; + + free(pGetMediaTypes); + + return NO_ERROR; + } + } + + free(pGetMediaTypes); + + return ERROR_NO_MEDIA_IN_DRIVE; +} + +int win_tape_device::tape_pos(int fd, struct mtpos *mt_pos) +{ + DWORD partition; + DWORD offset; + DWORD offsetHi; + BOOL result; + + if (fd < 3 || fd >= (int)(NUMBER_HANDLE_ENTRIES + 3) || + TapeHandleTable[fd - 3].OSHandle == INVALID_HANDLE_VALUE) { + errno = EBADF; + return -1; + } + + PTAPE_HANDLE_INFO pHandleInfo = &TapeHandleTable[fd - 3]; + + result = GetTapePosition(pHandleInfo->OSHandle, TAPE_ABSOLUTE_BLOCK, &partition, &offset, &offsetHi); + if (result == NO_ERROR) { + mt_pos->mt_blkno = offset; + return 0; + } + + return -1; +} diff --git a/src/win32/tools/Makefile b/src/win32/tools/Makefile new file mode 100644 index 00000000..a5757123 --- /dev/null +++ b/src/win32/tools/Makefile @@ -0,0 +1,107 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Written by Robert Nelson, June 2006 +# + +include ../Makefile.inc + +INCLUDES = \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_BACULA) \ + $(INCLUDE_ZLIB) \ + $(INCLUDE_OPENSSL) \ + $(INCLUDE_ICONS) + +DEFINES = \ + -DUSING_DLL \ + -DUSING_CATS \ + $(HAVES) + +LDFLAGS:=$(LDFLAGS) -lstdc++ + +vpath %.c $(MAINDIR)/src/tools $(MAINDIR)/src/dird +vpath %.cpp $(MAINDIR)/src/tools $(MAINDIR)/src/dird + +########################################################################## + +DIRCONF_OBJS = \ + $(OBJDIR)/dird_conf.o \ + $(OBJDIR)/run_conf.o \ + $(OBJDIR)/inc_conf.o + +ALL_OBJS = \ + $(DIRCONF_OBJS) \ + $(OBJDIR)/bsmtp.o \ + $(OBJDIR)/dbcheck.o \ + $(OBJDIR)/scsilist.o \ + $(OBJDIR)/ScsiDeviceList.o \ + $(OBJDIR)/fstype.o \ + $(OBJDIR)/drivetype.o \ + $(OBJDIR)/testfind.o \ + $(OBJDIR)/testls.o \ + $(OBJDIR)/bregex.o \ + $(OBJDIR)/bwild.o + +########################################################################## + +# Targets + +.PHONY: all clean bsmtp + +all: \ + $(BINDIR)/bsmtp.exe $(BINDIR)/dbcheck.exe $(BINDIR)/scsilist.exe \ + $(BINDIR)/drivetype.exe $(BINDIR)/fstype.exe \ + $(BINDIR)/testfind.exe $(BINDIR)/testls.exe \ + $(BINDIR)/bregex.exe $(BINDIR)/bwild.exe + +bsmtp: \ + $(BINDIR)/bsmtp.exe + +clean: + @echo "Cleaning `pwd`" + $(call clean_obj,$(ALL_OBJS)) + $(call clean_exe,$(BINDIR)/bsmtp.exe) + $(call clean_exe,$(BINDIR)/dbcheck.exe) + $(call clean_exe,$(BINDIR)/scsilist.exe) + $(call clean_exe,$(BINDIR)/drivetype.exe) + $(call clean_exe,$(BINDIR)/fstype.exe) + $(call clean_exe,$(BINDIR)/testfind.exe) + $(call clean_exe,$(BINDIR)/testls.exe) + $(call clean_exe,$(BINDIR)/bregex.exe) + $(call clean_exe,$(BINDIR)/bwild.exe) + $(ECHO_CMD)rm -rf $(OBJDIRS) + +# +# Rules +# + +$(BINDIR)/bsmtp.exe: $(OBJDIR)/bsmtp.o $(LIBS_BACULA) + $(call link_conapp,-lws2_32) + +$(BINDIR)/dbcheck.exe: $(OBJDIR)/dbcheck.o $(DIRCONF_OBJS) $(LIBS_BACULA) $(LIBS_CATS) + $(call link_conapp,) + +$(BINDIR)/scsilist.exe: $(OBJDIR)/scsilist.o $(OBJDIR)/ScsiDeviceList.o $(LIBS_BACULA) + $(call link_conapp,) + +$(BINDIR)/drivetype.exe: $(OBJDIR)/drivetype.o $(LIBS_BACULA) + $(call link_conapp,) + +$(BINDIR)/fstype.exe: $(OBJDIR)/fstype.o $(LIBS_BACULA) + $(call link_conapp,) + +$(BINDIR)/testfind.exe: $(OBJDIR)/testfind.o $(DIRCONF_OBJS) $(LIBS_BACULA) + $(call link_conapp,) + +$(BINDIR)/testls.exe: $(OBJDIR)/testls.o $(LIBS_BACULA) + $(call link_conapp,) + +$(BINDIR)/bregex.exe: $(OBJDIR)/bregex.o $(LIBS_BACULA) + $(call link_conapp,) + +$(BINDIR)/bwild.exe: $(OBJDIR)/bwild.o $(LIBS_BACULA) + $(call link_conapp,) + +include ../Makefile.rules diff --git a/src/win32/tools/ScsiDeviceList.cpp b/src/win32/tools/ScsiDeviceList.cpp new file mode 100644 index 00000000..fcaecba2 --- /dev/null +++ b/src/win32/tools/ScsiDeviceList.cpp @@ -0,0 +1,363 @@ +/* + * ScsiDeviceList.cpp - Class which provides information on installed devices. + * + * Author: Robert Nelson, August, 2006 + * + * Version $Id$ + * + * This file was contributed to the Bacula project by Robert Nelson. + * + * Robert Nelson has been granted a perpetual, worldwide, + * non-exclusive, no-charge, royalty-free, irrevocable copyright + * license to reproduce, prepare derivative works of, publicly + * display, publicly perform, sublicense, and distribute the original + * work contributed by Robert Nelson to the Bacula project in source + * or object form. + * + * If you wish to license contributions from Robert Nelson + * under an alternate open source license please contact + * Robert Nelson . + */ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2006-2006 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation and included + in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of Kern Sibbald. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ + +#if defined(_MSC_VER) && defined(_DEBUG) +#include +#else +#include +#endif + +#include +#include + +#include "ScsiDeviceList.h" + +#if defined(_MSC_VER) && defined(_DEBUG) +#define new DEBUG_NEW +#endif + +TCHAR CScsiDeviceList::c_ScsiPath[] = _T("HARDWARE\\DEVICEMAP\\Scsi"); + +LPCTSTR CScsiDeviceList::c_lpszFormatList[] = +{ + _T("Logical Unit Id %d"), + _T("Target Id %d"), + _T("Scsi Bus %d"), + _T("Scsi Port %d") +}; + +LPCTSTR CScsiDeviceListEntry::c_DeviceTypes[] = +{ + _T("Unknown"), + _T("CDRom"), + _T("Changer"), + _T("Disk"), + _T("Tape") +}; + +CScsiDeviceListEntry::CScsiDeviceListEntry(const CScsiDeviceListEntry &other) +{ + m_eDeviceType = other.m_eDeviceType; + + m_lpszIdentifier = other.m_lpszIdentifier != NULL ? _tcsdup(other.m_lpszIdentifier) : NULL; + + m_lpszDeviceName = other.m_lpszDeviceName != NULL ? _tcsdup(other.m_lpszDeviceName) : NULL; + + m_dwDeviceId = other.m_dwDeviceId; + _tcscpy(m_szDevicePath, other.m_szDevicePath); +} + +CScsiDeviceListEntry::CScsiDeviceListEntry(void) +{ + m_eDeviceType = Unknown; + m_lpszIdentifier = NULL; + m_lpszDeviceName = NULL; + m_dwDeviceId = 0; + m_szDevicePath[0] = _T('\0'); +} + +CScsiDeviceListEntry::~CScsiDeviceListEntry(void) +{ + if (m_lpszIdentifier != NULL) + { + free(m_lpszIdentifier); + } + + if (m_lpszDeviceName != NULL) + { + free(m_lpszDeviceName); + } +} + +bool +CScsiDeviceList::Populate() +{ + this->clear(); + + HKEY hScsiKey; + + _tcscpy(m_szLastKey, _T("\\Scsi")); + m_dwLastKeyLength = 5; + + m_lLastError = RegOpenKeyEx( HKEY_LOCAL_MACHINE, + c_ScsiPath, + 0, + KEY_READ, + &hScsiKey); + + if (m_lLastError != ERROR_SUCCESS) { + _tcscpy(m_szLastOperation, _T("Opening key ")); + _tcscpy(m_szLastKey, c_ScsiPath); + return false; + } + + if (!ProcessKey(hScsiKey, c_MaxKeyDepth - 1, 0)) { + return false; + } + +#if defined(_DEBUG) + _fputtc(_T('\n'), stderr); +#endif + + return true; +} + +bool +CScsiDeviceList::ProcessKey(HKEY hKey, int iLevel, DWORD dwDeviceId) +{ +#if defined(_DEBUG) + switch (iLevel) + { + case 3: + _ftprintf( stderr, + _T("%-64s\n"), + &m_szLastKey[1]); + break; + + case 2: + _ftprintf( stderr, + _T("%-64s%d\n"), + &m_szLastKey[1], + dwDeviceId & 0xFF); + break; + + case 1: + _ftprintf( stderr, + _T("%-64s%d:%d\n"), + &m_szLastKey[1], + (dwDeviceId >> 8) & 0xFF, + dwDeviceId & 0xFF); + break; + + case 0: + _ftprintf( stderr, + _T("%-64s%d:%d:%d\n"), + &m_szLastKey[1], + (dwDeviceId >> 16) & 0xFF, + (dwDeviceId >> 8) & 0xFF, + dwDeviceId & 0xFF); + break; + } +#endif + + for (int idxSubkey = 0; ; idxSubkey++) { + + TCHAR szSubkeyName[c_MaxSubkeyLength + 1]; + DWORD dwLength; + + dwLength = sizeof(szSubkeyName); + + m_lLastError = RegEnumKeyEx( hKey, + idxSubkey, + szSubkeyName, + &dwLength, + NULL, + NULL, + NULL, + NULL); + + if (m_lLastError == ERROR_NO_MORE_ITEMS) { + break; + } else if (m_lLastError == ERROR_MORE_DATA) { +#if defined(_DEBUG) + _tcscpy(m_szLastOperation, _T("Enumerating subkeys of ")); + PrintLastError(); +#endif + // Subkey name is too long + continue; + } else if (m_lLastError != ERROR_SUCCESS) { + // Unexpected Error + _tcscpy(m_szLastOperation, _T("Enumerating subkeys of ")); + return false; + } + + int iValue; + + if (_stscanf(szSubkeyName, c_lpszFormatList[iLevel], &iValue) != 1) { + // Ignore this subkey, it is probably Initiator Id n + continue; + } + + m_szLastKey[m_dwLastKeyLength++] = _T('\\'); + + DWORD dwSubkeyLength = (DWORD)_tcslen(szSubkeyName); + memcpy(&m_szLastKey[m_dwLastKeyLength], szSubkeyName, (dwSubkeyLength + 1) * sizeof(TCHAR)); + m_dwLastKeyLength += dwSubkeyLength; + + HKEY hSubkey; + + m_lLastError = RegOpenKeyEx(hKey, szSubkeyName, 0, KEY_READ, &hSubkey); + + if (m_lLastError != ERROR_SUCCESS) { + _tcscpy(m_szLastOperation, _T("Opening key ")); + return false; + } + + if (iLevel == 0) { +#if defined(_DEBUG) + _ftprintf( stderr, + _T("%-64s%d:%d:%d:%d\n"), + &m_szLastKey[1], + (dwDeviceId >> 16) & 0xFF, + (dwDeviceId >> 8) & 0xFF, + dwDeviceId & 0xFF, + iValue); +#endif + + ProcessValues(hSubkey, (dwDeviceId << 8) | iValue); + } else { + if (!ProcessKey(hSubkey, iLevel - 1, (dwDeviceId << 8) | iValue)) { + return false; + } + } + + m_dwLastKeyLength -= dwSubkeyLength; + m_dwLastKeyLength--; + m_szLastKey[m_dwLastKeyLength] = _T('\0'); + } + + return true; +} + +bool +CScsiDeviceList::ProcessValues(HKEY hKey, DWORD dwDeviceId) +{ + CScsiDeviceListEntry EntryTemplate; + DWORD dwType; + DWORD dwSize; + TCHAR szValue[c_MaxValueLength + 1]; + + this->push_back(EntryTemplate); + CScsiDeviceListEntry & entry = this->back(); + + dwSize = sizeof(szValue); + + m_lLastError = RegQueryValueEx( hKey, + _T("Identifier"), + NULL, + &dwType, + (LPBYTE)&szValue[0], + &dwSize); + + if (m_lLastError == ERROR_SUCCESS) { + entry.m_lpszIdentifier = _tcsdup(szValue); + } else { +#if defined(_DEBUG) + _tcscpy(m_szLastOperation, _T("Reading value ")); + PrintLastError(_T("Identifier")); +#endif + } + + dwSize = sizeof(szValue); + + m_lLastError = RegQueryValueEx( hKey, + _T("DeviceName"), + NULL, + &dwType, + (LPBYTE)&szValue[0], + &dwSize); + + if (m_lLastError == ERROR_SUCCESS) { + entry.m_lpszDeviceName = _tcsdup(szValue); + } else { +#if defined(_DEBUG) + _tcscpy(m_szLastOperation, _T("Reading value ")); + PrintLastError(_T("DeviceName")); +#endif + } + + dwSize = sizeof(szValue); + + m_lLastError = RegQueryValueEx( hKey, + _T("Type"), + NULL, + &dwType, + (LPBYTE)&szValue[0], + &dwSize); + + if (m_lLastError == ERROR_SUCCESS) { + if (_tcscmp(_T("CdRomPeripheral"), szValue) == 0) { + entry.m_eDeviceType = CScsiDeviceListEntry::CDRom; + } else if (_tcscmp(_T("DiskPeripheral"), szValue) == 0) { + entry.m_eDeviceType = CScsiDeviceListEntry::Disk; + } else if (_tcscmp(_T("MediumChangerPeripheral"), szValue) == 0) { + entry.m_eDeviceType = CScsiDeviceListEntry::Changer; + } else if (_tcscmp(_T("TapePeripheral"), szValue) == 0) { + entry.m_eDeviceType = CScsiDeviceListEntry::Tape; + } + } else { +#if defined(_DEBUG) + _tcscpy(m_szLastOperation, _T("Reading value ")); + PrintLastError(_T("Type")); +#endif + } + + entry.m_dwDeviceId = dwDeviceId; + + return true; +} + +void +CScsiDeviceList::PrintLastError(LPTSTR lpszName) +{ + LPTSTR lpszMessage = NULL; + + _fputts(_T("Error: "), stderr); + _fputts(m_szLastOperation, stderr); + _fputtc(_T('"'), stderr); + _fputts(lpszName != NULL ? lpszName : m_szLastKey, stderr); + _fputts(_T("\" - "), stderr); + + FormatMessage( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, m_lLastError, 0, (LPTSTR)&lpszMessage, 0, NULL); + + if (lpszMessage != NULL) { + _fputts(lpszMessage, stderr); + LocalFree(lpszMessage); + } +} diff --git a/src/win32/tools/ScsiDeviceList.h b/src/win32/tools/ScsiDeviceList.h new file mode 100644 index 00000000..ce88981a --- /dev/null +++ b/src/win32/tools/ScsiDeviceList.h @@ -0,0 +1,169 @@ +/* + * ScsiDeviceList.cpp - Class which provides information on installed devices. + * + * Author: Robert Nelson, August, 2006 + * + * Version $Id$ + * + * This file was contributed to the Bacula project by Robert Nelson. + * + * Robert Nelson has been granted a perpetual, worldwide, + * non-exclusive, no-charge, royalty-free, irrevocable copyright + * license to reproduce, prepare derivative works of, publicly + * display, publicly perform, sublicense, and distribute the original + * work contributed by Robert Nelson to the Bacula project in source + * or object form. + * + * If you wish to license contributions from Robert Nelson + * under an alternate open source license please contact + * Robert Nelson . + */ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2006-2006 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation and included + in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of Kern Sibbald. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ + +#pragma once +#include "vector" + +class CScsiDeviceListEntry +{ + friend class CScsiDeviceList; + + static LPCTSTR c_DeviceTypes[]; + static const int c_MaxDevicePathLength = 16; // :255:255:255:255 + +public: + enum DeviceType { Unknown, CDRom, Changer, Disk, Tape }; + + CScsiDeviceListEntry(void); + CScsiDeviceListEntry(const CScsiDeviceListEntry &other); + ~CScsiDeviceListEntry(void); + + inline CScsiDeviceListEntry &operator =(const CScsiDeviceListEntry &other); + + inline DeviceType GetType() { return m_eDeviceType; } + inline LPCTSTR GetTypeName() { return c_DeviceTypes[m_eDeviceType]; } + inline LPCTSTR GetIdentifier() { return m_lpszIdentifier != NULL ? m_lpszIdentifier : _T(""); } + inline LPCTSTR GetDeviceName() { return m_lpszDeviceName != NULL ? m_lpszDeviceName : _T(""); } + inline LPCTSTR GetDevicePath(); + +private: + DeviceType m_eDeviceType; + LPTSTR m_lpszIdentifier; + LPTSTR m_lpszDeviceName; + DWORD m_dwDeviceId; + TCHAR m_szDevicePath[c_MaxDevicePathLength + 1]; +}; + +CScsiDeviceListEntry & +CScsiDeviceListEntry::operator =(const CScsiDeviceListEntry &other) +{ + m_eDeviceType = other.m_eDeviceType; + + if (m_lpszIdentifier != NULL) + { + free(m_lpszIdentifier); + } + m_lpszIdentifier = other.m_lpszIdentifier != NULL ? _tcsdup(other.m_lpszIdentifier) : NULL; + + if (m_lpszDeviceName != NULL) + { + free(m_lpszDeviceName); + } + m_lpszDeviceName = other.m_lpszDeviceName != NULL ? _tcsdup(other.m_lpszDeviceName) : NULL; + + m_dwDeviceId = other.m_dwDeviceId; + _tcscpy(m_szDevicePath, other.m_szDevicePath); + + return *this; +} + +LPCTSTR +CScsiDeviceListEntry::GetDevicePath() +{ + if (m_szDevicePath[0] == _T('\0')) + { + _sntprintf( m_szDevicePath, c_MaxDevicePathLength, + _T("%d:%d:%d:%d"), + (m_dwDeviceId >> 24) & 0xFF, + (m_dwDeviceId >> 16) & 0xFF, + (m_dwDeviceId >> 8) & 0xFF, + m_dwDeviceId & 0xFF); + m_szDevicePath[c_MaxDevicePathLength] = _T('\0'); + } + + return m_szDevicePath; +} + +class CScsiDeviceList : + public std::vector +{ + static TCHAR c_ScsiPath[]; + static LPCTSTR c_lpszFormatList[]; + +// \\Scsi\\Scsi Port 255\\Scsi Bus 255\\Target Id 255\\Logical Unit Id 255 +// 1 4 1 13 1 12 1 13 1 19 = 66 + static const int c_MaxKeyPathLength = 66; + +// Logical Unit Id 255 + static const int c_MaxSubkeyLength = 19; + +// Identifier = 28, DeviceName = 10+, Type = 23 + static const int c_MaxValueLength = 30; + +// Adapter \\ Bus \\ Target \\ LUN + static const int c_MaxKeyDepth = 4; + +public: + inline CScsiDeviceList(void); + inline ~CScsiDeviceList(void); + + bool Populate(); + void PrintLastError(LPTSTR lpszName = NULL); + +protected: + bool ProcessKey(HKEY hKey, int iLevel, DWORD dwDeviceId); + bool ProcessValues(HKEY hKey, DWORD dwDeviceId); + +private: + TCHAR m_szLastOperation[80 + 1]; // Max length "Enumerating subkeys of " + TCHAR m_szLastKey[c_MaxKeyPathLength + 1]; + DWORD m_dwLastKeyLength; + LONG m_lLastError; +}; + +CScsiDeviceList::CScsiDeviceList(void) +{ + m_szLastOperation[0] = _T('\0'); + m_szLastKey[0] = _T('\0'); + m_dwLastKeyLength = 0; + m_lLastError = 0; +} + +CScsiDeviceList::~CScsiDeviceList(void) +{ +} diff --git a/src/win32/tools/bsmtp/bsmtp.vcproj b/src/win32/tools/bsmtp/bsmtp.vcproj new file mode 100644 index 00000000..97fb96b1 --- /dev/null +++ b/src/win32/tools/bsmtp/bsmtp.vcproj @@ -0,0 +1,229 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/tools/dbcheck/dbcheck.vcproj b/src/win32/tools/dbcheck/dbcheck.vcproj new file mode 100644 index 00000000..d847d39e --- /dev/null +++ b/src/win32/tools/dbcheck/dbcheck.vcproj @@ -0,0 +1,293 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/tools/drivetype/drivetype.vcproj b/src/win32/tools/drivetype/drivetype.vcproj new file mode 100644 index 00000000..cbd8568d --- /dev/null +++ b/src/win32/tools/drivetype/drivetype.vcproj @@ -0,0 +1,228 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/tools/fstype/fstype.vcproj b/src/win32/tools/fstype/fstype.vcproj new file mode 100644 index 00000000..6a52b0f0 --- /dev/null +++ b/src/win32/tools/fstype/fstype.vcproj @@ -0,0 +1,230 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/tools/scsilist.cpp b/src/win32/tools/scsilist.cpp new file mode 100644 index 00000000..7b9f3ecf --- /dev/null +++ b/src/win32/tools/scsilist.cpp @@ -0,0 +1,119 @@ +/* + * scsilist.cpp - Outputs the contents of a ScsiDeviceList. + * + * Author: Robert Nelson, August, 2006 + * + * Version $Id$ + * + * This file was contributed to the Bacula project by Robert Nelson. + * + * Robert Nelson has been granted a perpetual, worldwide, + * non-exclusive, no-charge, royalty-free, irrevocable copyright + * license to reproduce, prepare derivative works of, publicly + * display, publicly perform, sublicense, and distribute the original + * work contributed by Robert Nelson to the Bacula project in source + * or object form. + * + * If you wish to license contributions from Robert Nelson + * under an alternate open source license please contact + * Robert Nelson . + */ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2006-2006 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation and included + in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of Kern Sibbald. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ + +#if defined(_MSC_VER) && defined(_DEBUG) +#include +#else +#include +#endif + +#include +#include +#include + +#include "ScsiDeviceList.h" + +#if defined(_MSC_VER) && defined(_DEBUG) +#define new DEBUG_NEW +#endif + +int _tmain(int argc, _TCHAR* argv[]) +{ +#if defined(_MSC_VER) && defined(_DEBUG) + CMemoryState InitialMemState, FinalMemState, DiffMemState; + + InitialMemState.Checkpoint(); + + { +#endif + + CScsiDeviceList DeviceList; + + if (!DeviceList.Populate()) + { + DeviceList.PrintLastError(); + return 1; + } + +#define HEADING \ + _T("Device Type Physical Name\n") \ + _T("====== ==== ======== ====\n") + + _fputts(HEADING, stdout); + + for (DWORD index = 0; index < DeviceList.size(); index++) { + + CScsiDeviceListEntry &entry = DeviceList[index]; + + if (entry.GetType() != CScsiDeviceListEntry::Disk) { + + _tprintf(_T("%-28s %-7s %-11s %-27s\n"), + entry.GetIdentifier(), + entry.GetTypeName(), + entry.GetDevicePath(), + entry.GetDeviceName()); + } + } + +#if defined(_MSC_VER) && defined(_DEBUG) + } + + InitialMemState.DumpAllObjectsSince(); + + FinalMemState.Checkpoint(); + DiffMemState.Difference(InitialMemState, FinalMemState); + DiffMemState.DumpStatistics(); +#endif + + if (argc > 1 && _tcsnicmp(argv[1], _T("/pause"), sizeof(_T("/pause")) - sizeof(TCHAR)) == 0) { + _fputts(_T("\nPress any key to continue\n"), stderr); + _getch(); + } + + return 0; +} diff --git a/src/win32/tools/scsilist/scsilist.vcproj b/src/win32/tools/scsilist/scsilist.vcproj new file mode 100644 index 00000000..3afa5ca3 --- /dev/null +++ b/src/win32/tools/scsilist/scsilist.vcproj @@ -0,0 +1,216 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/tools/testfind/testfind.vcproj b/src/win32/tools/testfind/testfind.vcproj new file mode 100644 index 00000000..e94d4587 --- /dev/null +++ b/src/win32/tools/testfind/testfind.vcproj @@ -0,0 +1,289 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/tools/testls/testls.vcproj b/src/win32/tools/testls/testls.vcproj new file mode 100644 index 00000000..a56303cb --- /dev/null +++ b/src/win32/tools/testls/testls.vcproj @@ -0,0 +1,229 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/win32_installer/ConfigPage1.nsh b/src/win32/win32_installer/ConfigPage1.nsh new file mode 100644 index 00000000..2a16fb55 --- /dev/null +++ b/src/win32/win32_installer/ConfigPage1.nsh @@ -0,0 +1,294 @@ +Function EnterConfigPage1 + ${If} $AutomaticInstall = 1 + Abort + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsFileAndStorage} + + ${If} $R0 = 0 + Abort + ${EndIf} + + FileOpen $R5 "$PLUGINSDIR\ConfigPage1.ini" w + + StrCpy $R6 1 ; Field Number + StrCpy $R7 0 ; Top + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 52 + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Client"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigClientName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=158$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Port"$\r$\nLeft=172$\r$\nTop=$R7$\r$\nRight=188$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigClientPort$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=218$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Max Jobs"$\r$\nLeft=238$\r$\nTop=$R7$\r$\nRight=270$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigClientMaxJobs$\r$\nLeft=274$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigClientPassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + + IntOp $R8 $R7 + 10 + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigClientInstallService$\r$\nText="Install as service"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=118$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigClientStartService$\r$\nText="Start after install"$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=260$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 52 + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Storage"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigStorageName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=158$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Port"$\r$\nLeft=172$\r$\nTop=$R7$\r$\nRight=188$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigStoragePort$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=218$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Max Jobs"$\r$\nLeft=238$\r$\nTop=$R7$\r$\nRight=270$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigStorageMaxJobs$\r$\nLeft=274$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigStoragePassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + + IntOp $R8 $R7 + 10 + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigStorageInstallService$\r$\nText="Install as service"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=118$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigStorageStartService$\r$\nText="Start after install"$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=260$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + ${Endif} + + IntOp $R6 $R6 - 1 + + FileWrite $R5 "[Settings]$\r$\nNumFields=$R6$\r$\n" + + FileClose $R5 + + !insertmacro MUI_HEADER_TEXT "$(TITLE_ConfigPage1)" "$(SUBTITLE_ConfigPage1)" + !insertmacro MUI_INSTALLOPTIONS_INITDIALOG "ConfigPage1.ini" + Pop $HDLG ;HWND of dialog + + ; Initialize Controls + + StrCpy $R6 1 ; Field Number + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + + ; Client Name + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + + IntOp $R6 $R6 + 2 + + ; Client Port Number + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 5 0 + + IntOp $R6 $R6 + 2 + + ; Max Jobs + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 3 0 + + IntOp $R6 $R6 + 5 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + + ; Storage Name + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + + IntOp $R6 $R6 + 2 + + ; Storage Port Number + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 5 0 + + IntOp $R6 $R6 + 2 + + ; Max Jobs + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 3 0 + + IntOp $R6 $R6 + 5 + ${Endif} + + !insertmacro MUI_INSTALLOPTIONS_SHOW + + ; Process results + + StrCpy $R6 3 + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientName "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientPort "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientMaxJobs "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientPassword "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientInstallService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientStartService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 3 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageName "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStoragePort "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageMaxJobs "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStoragePassword "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageInstallService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageStartService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 3 + ${Endif} +FunctionEnd + +Function LeaveConfigPage1 + StrCpy $R6 5 + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1024 + ${OrIf} $R0 > 65535 + MessageBox MB_OK "Port must be between 1024 and 65535 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1 + ${OrIf} $R0 > 99 + MessageBox MB_OK "Max Jobs must be between 1 and 99 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 9 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1024 + ${OrIf} $R0 > 65535 + MessageBox MB_OK "Port must be between 1024 and 65535 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1 + ${OrIf} $R0 > 99 + MessageBox MB_OK "Max Jobs must be between 1 and 99 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 9 + ${Endif} +FunctionEnd diff --git a/src/win32/win32_installer/ConfigPage2.nsh b/src/win32/win32_installer/ConfigPage2.nsh new file mode 100644 index 00000000..d1cafb1f --- /dev/null +++ b/src/win32/win32_installer/ConfigPage2.nsh @@ -0,0 +1,454 @@ +Function EnterConfigPage2 + IntOp $R0 $NewComponents & ${ComponentsRequiringUserConfig} + + ${If} $R0 = 0 + Abort + ${EndIf} + + FileOpen $R5 "$PLUGINSDIR\ConfigPage2.ini" w + + StrCpy $R6 1 ; Field Number + StrCpy $R7 0 ; Top + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + ${If} $AutomaticInstall = 1 + IntOp $R8 $R7 + 54 + ${Else} + IntOp $R8 $R7 + 92 + ${EndIf} + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Director"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 54 + ${Else} + IntOp $R8 $R7 + 26 + ${EndIf} + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Enter Director Information"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + ${EndIf} + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + ${If} "$ConfigDirectorName" == "" + StrCpy $ConfigDirectorName "$HostName-dir" + ${EndIf} + ${If} "$ConfigDirectorPassword" == "" + StrCpy $ConfigDirectorPassword "$LocalDirectorPassword" + ${EndIf} + ${Else} + ${If} "$ConfigDirectorName" == "$HostName-dir" + StrCpy $ConfigDirectorName "" + ${EndIf} + ${If} "$ConfigDirectorPassword" == "$LocalDirectorPassword" + StrCpy $ConfigDirectorPassword "" + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="DIR Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=60$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorName$\r$\nLeft=60$\r$\nTop=$R7$\r$\nRight=158$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="DIR Port"$\r$\nLeft=172$\r$\nTop=$R7$\r$\nRight=188$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigDirectorPort$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=218$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Max Jobs"$\r$\nLeft=238$\r$\nTop=$R7$\r$\nRight=270$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigDirectorMaxJobs$\r$\nLeft=274$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + + IntOp $R7 $R7 + 14 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="DIR Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=60$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorPassword$\r$\nLeft=60$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Mail Server"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=48$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorMailServer$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Mail Address"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=48$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorMailAddress$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Database"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + ${If} $ConfigDirectorDB = 0 + ${If} $MySQLPath != "" + StrCpy $ConfigDirectorDB 1 + ${ElseIf} $PostgreSQLPath != "" + StrCpy $ConfigDirectorDB 2 + ${Else} + StrCpy $ConfigDirectorDB 3 + ${EndIf} + ${EndIf} + + ${If} $ConfigDirectorDB = 1 + StrCpy $R9 1 + ${Else} + StrCpy $R9 0 + ${EndIf} + + FileWrite $R5 '[Field $R6]$\r$\nType="RadioButton"$\r$\nState=$R9$\r$\nText="MySQL"$\r$\nFlags="GROUP"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=90$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + + ${If} $ConfigDirectorDB = 2 + StrCpy $R9 1 + ${Else} + StrCpy $R9 0 + ${EndIf} + + FileWrite $R5 '[Field $R6]$\r$\nType="RadioButton"$\r$\nState=$R9$\r$\nText="PostgreSQL"$\r$\nFlags="NOTABSTOP"$\r$\nLeft=94$\r$\nTop=$R7$\r$\nRight=146$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + + ${If} $ConfigDirectorDB = 3 + StrCpy $R9 1 + ${Else} + StrCpy $R9 0 + ${EndIf} + + FileWrite $R5 '[Field $R6]$\r$\nType="RadioButton"$\r$\nState=$R9$\r$\nText="Sqlite"$\r$\nFlags="NOTABSTOP"$\r$\nLeft=150$\r$\nTop=$R7$\r$\nRight=182$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + ${If} $AutomaticInstall = 0 + IntOp $R8 $R7 + 10 + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigDirectorInstallService$\r$\nText="Install as service"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=118$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigDirectorStartService$\r$\nText="Start after install"$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=260$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + ${EndIf} + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="DIR Address"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=60$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorAddress$\r$\nLeft=60$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + IntOp $R8 $R7 + 8 + ${EndIf} + ${EndIf} + + IntOp $R7 $R7 + 4 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsFileAndStorageAndDirector} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 42 + + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Monitor"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigMonitorName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=150$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigMonitorPassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 20 + ${EndIf} + ${EndIf} + + IntOp $R6 $R6 - 1 + FileWrite $R5 "[Settings]$\r$\nNumFields=$R6$\r$\n" + + FileClose $R5 + + IntOp $R0 $NewComponents & ${ComponentsFileAndStorage} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 1 + !insertmacro MUI_HEADER_TEXT "$(TITLE_ConfigPage1)" "$(SUBTITLE_ConfigPage1)" + ${Else} + !insertmacro MUI_HEADER_TEXT "$(TITLE_ConfigPage2)" "$(SUBTITLE_ConfigPage2)" + ${EndIf} + + !insertmacro MUI_INSTALLOPTIONS_INITDIALOG "ConfigPage2.ini" + Pop $HDLG ;HWND of dialog + + ; Initialize Controls + StrCpy $R6 2 ; Field Number + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 0 + ; Name + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + ; Port Number + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 5 0 + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + ; Max Jobs + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 3 0 + + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 9 + + ${If} $AutomaticInstall = 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${EndIf} + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsFileAndStorageAndDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${EndIf} + + !insertmacro MUI_INSTALLOPTIONS_SHOW + + ; Process results + + StrCpy $R6 2 + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorName "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorPort "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorMaxJobs "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorPassword "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorMailServer "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorMailAddress "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $R5 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R5 = 1 + StrCpy $ConfigDirectorDB 1 + ${Endif} + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R5 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R5 = 1 + StrCpy $ConfigDirectorDB 2 + ${Endif} + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R5 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R5 = 1 + StrCpy $ConfigDirectorDB 3 + ${Endif} + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorInstallService "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorStartService "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorAddress "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsFileAndStorageAndDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigMonitorName "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigMonitorPassword "ConfigPage2.ini" "Field $R6" "State" + ${EndIf} + ${EndIf} +FunctionEnd + +Function LeaveConfigPage2 + ${If} $AutomaticInstall = 0 + StrCpy $R6 4 + + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R0 < 1024 + ${OrIf} $R0 > 65535 + MessageBox MB_OK "Port must be between 1024 and 65535 inclusive." + Abort + ${EndIf} + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R0 < 1 + ${OrIf} $R0 > 99 + MessageBox MB_OK "Max Jobs must be between 1 and 99 inclusive." + Abort + ${EndIf} + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} +FunctionEnd diff --git a/src/win32/win32_installer/DumpLog.nsh b/src/win32/win32_installer/DumpLog.nsh new file mode 100644 index 00000000..064a3700 --- /dev/null +++ b/src/win32/win32_installer/DumpLog.nsh @@ -0,0 +1,48 @@ +!ifndef LVM_GETITEMCOUNT + !define LVM_GETITEMCOUNT 0x1004 +!endif +!define LVM_GETITEMTEXT 0x102D + +Function DumpLog + Exch $5 + Push $0 + Push $1 + Push $2 + Push $3 + Push $4 + Push $6 + + FindWindow $0 "#32770" "" $HWNDPARENT + GetDlgItem $0 $0 1016 + StrCmp $0 0 error + FileOpen $5 $5 "w" + StrCmp $5 0 error + SendMessage $0 ${LVM_GETITEMCOUNT} 0 0 $6 + System::Alloc ${NSIS_MAX_STRLEN} + Pop $3 + StrCpy $2 0 + System::Call "*(i, i, i, i, i, i, i, i, i) i \ + (0, 0, 0, 0, 0, r3, ${NSIS_MAX_STRLEN}) .r1" + loop: StrCmp $2 $6 done + System::Call "User32::SendMessageA(i, i, i, i) i \ + ($0, ${LVM_GETITEMTEXT}, $2, r1)" + System::Call "*$3(&t${NSIS_MAX_STRLEN} .r4)" + FileWrite $5 "$4$\r$\n" + IntOp $2 $2 + 1 + Goto loop + done: + FileClose $5 + System::Free $1 + System::Free $3 + Goto exit + error: + MessageBox MB_OK error + exit: + Pop $6 + Pop $4 + Pop $3 + Pop $2 + Pop $1 + Pop $0 + Exch $5 +FunctionEnd diff --git a/src/win32/win32_installer/InstallType.ini b/src/win32/win32_installer/InstallType.ini new file mode 100644 index 00000000..73fb8d93 --- /dev/null +++ b/src/win32/win32_installer/InstallType.ini @@ -0,0 +1,56 @@ +; +; Note: certain text in this file is overwritten by the code in +; InstallType.nsh +; + +[Settings] +NumFields=6 + +[Field 1] +Type=Label +Text=This is a new installation. Please choose the installation type. +Left=0 +Right=300 +Top=0 +Bottom=28 + +[Field 2] +Type=GroupBox +Text=Installation Type +Left=0 +Right=300 +Top=32 +Bottom=136 + +[Field 3] +Type=RadioButton +Text=Automatic +State=1 +Left=6 +Right=52 +Top=44 +Bottom=54 + +[Field 4] +Type=RadioButton +Text=Custom (not recommended) +Left=6 +Right=252 +Top=90 +Bottom=100 + +[Field 5] +Type=Label +Text=The software will be installed in the default directory "Program Files\\Bacula". The configuration files will be generated using defaults applicable to most installations. +Left=17 +Right=295 +Top=58 +Bottom=86 + +[Field 6] +Type=Label +Text=You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work. +Left=17 +Right=295 +Top=104 +Bottom=132 diff --git a/src/win32/win32_installer/InstallType.nsh b/src/win32/win32_installer/InstallType.nsh new file mode 100644 index 00000000..e4a9711d --- /dev/null +++ b/src/win32/win32_installer/InstallType.nsh @@ -0,0 +1,93 @@ +Function EnterInstallType + Push $R0 + Push $R1 + Push $R2 + + ; Check if this is an upgrade by looking for an uninstaller configured + ; in the registry. + ReadRegStr $R0 HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "UninstallString" + + ${If} "$R0" != "" + ; Check registry for new installer + ReadRegStr $R1 HKLM "Software\Bacula" "InstallLocation" + ${If} "$R1" != "" + ; New Installer + StrCpy $OldInstallDir $R1 + StrCpy $InstallType ${UpgradeInstall} + + SetShellVarContext all + + StrCpy $R1 "$APPDATA\Bacula" + StrCpy $R2 "$INSTDIR\Doc" + + ReadRegDWORD $PreviousComponents HKLM "Software\Bacula" "Components" + + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 1" "Text" "A previous installation has been found in $OldInstallDir. Please choose the installation type for any additional components you select." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 5" "Text" "The configuration files for additional components will be generated using defaults applicable to most installations." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 6" "Text" "You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work." + + ReadRegDWORD $ConfigDirectorDB HKLM Software\Bacula Database + + ${If} $ConfigDirectorDB = 0 + IntOp $R0 $PreviousComponents & ${ComponentDirector} + ${If} $R0 <> 0 + StrCpy $ConfigDirectorDB 1 + ${EndIf} + ${EndIf} + ${Else} + ; Processing Upgrade - Get Install Directory + ${StrRep} $R0 $R0 '"' '' + ${GetParent} $R0 $OldInstallDir + + ; Old Installer + StrCpy $InstallType ${MigrateInstall} + StrCpy $R1 "$OldInstallDir\bin" + StrCpy $R2 "$OldInstallDir\Doc" + + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 1" "Text" "An old installation has been found in $OldInstallDir. The Configuration will be migrated. Please choose the installation type for any additional components you select." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 5" "Text" "The software will be installed in the default directory $\"C:\Program Files\Bacula$\". The configuration files for additional components will be generated using defaults applicable to most installations." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 6" "Text" "You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work." + ${EndIf} + ${Else} + ; New Install + StrCpy $InstallType ${NewInstall} + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 5" "Text" "The software will be installed in the default directory $\"C:\Program Files\Bacula$\". The configuration files will be generated using defaults applicable to most installations." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 6" "Text" "You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work." + ${EndIf} + + ${If} $InstallType <> ${NewInstall} + ${AndIf} $PreviousComponents = 0 + ${If} ${FileExists} "$R1\bacula-fd.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentFile} + ${EndIf} + ${If} ${FileExists} "$R1\bconsole.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentTextConsole} + ${EndIf} + ${If} ${FileExists} "$R1\bat.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentBatConsole} + ${EndIf} + ${If} ${FileExists} "$R2\main.pdf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentPDFDocs} + ${EndIf} + ${EndIf} + + !InsertMacro MUI_HEADER_TEXT "$(TITLE_InstallType)" "$(SUBTITLE_InstallType)" + !InsertMacro MUI_INSTALLOPTIONS_INITDIALOG "InstallType.ini" + Pop $HDLG ;HWND of dialog + + !insertmacro MUI_INSTALLOPTIONS_SHOW + + ; Process Results + + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "InstallType.ini" "Field 3" "State" + + ${If} $R0 = 1 + StrCpy $AutomaticInstall 1 + ${Else} + StrCpy $AutomaticInstall 0 + ${EndIf} + + Pop $R2 + Pop $R1 + Pop $R0 +FunctionEnd diff --git a/src/win32/win32_installer/Makefile b/src/win32/win32_installer/Makefile new file mode 100644 index 00000000..5b8d0970 --- /dev/null +++ b/src/win32/win32_installer/Makefile @@ -0,0 +1,178 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Written by Robert Nelson, June 2006 +# + +include ../Makefile.inc + +VERSION := $(shell sed -ne 's/^.define[ \t]VERSION[ \t][ \t]*"\(.*\)"/\1/p' < ../../version.h) +RELEASE ?= $(shell awk '/define RELEASE [0-9]+/ { print $$3 }' ../../version.h) + +DEFINES := \ + -DVERSION=$(VERSION) \ + -DOUT_DIR=$(BUILDDIR)/release32 \ + -DSRC_DIR=release32 \ + -DBUILD_TOOLS=MinGW32 \ + -DWINVER=32 \ + -DBUILD_BAT=$(BUILD_BAT) + +INSTALL_EXE := $(BUILDDIR)/release32/winbacula-$(VERSION).exe + +BACULABINARIES := \ + bacula.dll \ + alldrives-fd.dll \ + exchange-fd.dll \ + bacula-fd.exe \ + bconsole.exe \ + bsleep.exe \ + bsmtp.exe \ + bacula-sd.exe \ + bextract.exe \ + bls.exe \ + bcopy.exe + +ifeq ($(bat),no) + BACULA_BINARIES=$(BACULABINARIES) + HELP= +else + BACULA_BINARIES=$(BACULABINARIES) bat.exe bacula-tray-monitor.exe + HELP=help +endif + +EXTRA= + +# bacula-sd.exe \ + + +DEPKGS_BINARIES := \ + libeay32.dll \ + pthreadGCE2.dll \ + zlib1.dll \ + ssleay32.dll \ + openssl.exe \ + sed.exe \ + expr64.exe \ + snooze.exe \ + QtCore4.dll \ + QtGui4.dll \ + libwinpthread-1.dll \ + libgcc_s_sjlj-1.dll \ + libstdc++-6.dll +# libgcc_s_dw2-1.dll + + +NONGCC_BINARIES := + +NONGCC_LIBRARIES := + +MINGW_BINARIES := + +SCRIPT_FILES := + +CAT_FILES := + +DIRD_FILES := + +SSL_FILES := \ + openssl.cnf + +LICENSE_FILES := \ + LICENSE + +########################################################################## + +# Targets + +.PHONY: all clean installer distclean + +all: $(HELP) docs $(INSTALL_EXE) $(EXTRA) + +installer: $(HELP) docs $(INSTALL_EXE) + +distclean: clean + +clean: + @echo "Cleaning `pwd`" + $(CMD_ECHO)-rm -f $(INSTALL_EXE) + $(CMD_ECHO)-rm -rf release32 + +help: + rm -rf release32/help + mkdir -p release32/help + cp -f $(BINDIR)/help/* release32/help/ + +docs: + rm -rf release32/docs + mkdir -p release32/docs/manuals/en/console + mkdir -p release32/docs/manuals/en/main + mkdir -p release32/docs/manuals/en/misc + mkdir -p release32/docs/manuals/en/problems + mkdir -p release32/docs/manuals/en/utility + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/console/console.pdf release32/docs/manuals/en/console/ + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/main/main.pdf release32/docs/manuals/en/main/ + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/misc/misc.pdf release32/docs/manuals/en/misc/ + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/problems/problems.pdf release32/docs/manuals/en/problems/ + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/utility/utility.pdf release32/docs/manuals/en/utility/ + +# +# Rules +# + +define Convert_Binary +release32/$$(notdir $(1)): $(1) + $$(call checkdir,$$@) + $(ECHO_CMD)cp -f $$^ $$@ ; \ + $(STAB2CV) $$@ +endef + +define Copy_Binary +release32/$$(notdir $(1)): $(1) + $$(call checkdir,$$@) + $(ECHO_CMD)cp -f $$^ $$@ +endef + +define Copy_Docs +release32/$(1): $(DOCDIR)/$(1) + $$(call checkdir,$$@) + cp -f $$^ $$(dir $$@) + $(ECHO_CMD)cp -f $$^ $$(dir $$@) +endef + +define Copy_Licenses +release32/$$(notdir $(1)): $(1) + $$(call checkdir,$$@) + $(ECHO_CMD)cp -f $$^ $$(dir $$@) +endef + +$(foreach file,$(addprefix $(DEPKGS)/bin/, $(DEPKGS_BINARIES)),$(eval $(call Convert_Binary,$(file)))) + +$(foreach file,$(addprefix $(DEPKGS)/bin/, $(NONGCC_BINARIES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix $(DEPKGS)/lib/, $(NONGCC_LIBRARIES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix $(BINDIR)/, $(BACULA_BINARIES)),$(eval $(call Convert_Binary,$(file)))) + +$(foreach file,$(addprefix $(DEPKGS)/ssl/, $(SSL_FILES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix ../scripts/, $(SCRIPT_FILES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix ../cats/, $(CAT_FILES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix ../../dird/, $(DIRD_FILES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix $(MAINDIR)/, $(LICENSE_FILES)),$(eval $(call Copy_Licenses,$(file)))) + +#$(foreach file,$(DOC_FILES),$(eval $(call Copy_Docs,$(file)))) + + +$(INSTALL_EXE): winbacula.nsi $(addprefix release32/,$(BACULA_BINARIES) $(SCRIPT_FILES) $(CAT_FILES) $(DEPKGS_BINARIES) $(NONGCC_BINARIES) $(NONGCC_LIBRARIES) $(MINGW_BINARIES) $(SSL_FILES) $(DIRD_FILES) $(LICENSE_FILES) ) + echo "makensis -V3 $(DEFINES) winbacula.nsi" + makensis -V3 $(DEFINES) winbacula.nsi + +include $(BUILDDIR)/Makefile.rules diff --git a/src/win32/win32_installer/Readme.txt b/src/win32/win32_installer/Readme.txt new file mode 100755 index 00000000..5068751a --- /dev/null +++ b/src/win32/win32_installer/Readme.txt @@ -0,0 +1,47 @@ +Bacula - Windows Version Disclaimer +=================================== + +Please note, only the Win32 Client (File daemon) is supported, and the other +components (Director, Storage daemon, their utilities) are not provided +because they have not been ported. + + +Bacula - Windows Version Notes +============================== + +These notes highlight how the Windows version of Bacula differs from the +other versions. It also provides any notes additional to the documentation. + +For detailed documentation on using, configuring and troubleshooting Bacula, +please consult the installed documentation or the online documentation at +http://www.bacula.org/?page=documentation. + + +Start Menu Items +---------------- +A number of menu items have been created in the Start menu under All Programs +in the Bacula submenu. They may be selected to edit the configuration files, +view the documentation or run one of the console or utility programs. The +choices available will vary depending on the options you chose to install. + + +File Locations +-------------- +Everything is installed in the directory +"C:\Program Files\Bacula" unless a different directory was selected during +installation. + + +Code Page Problems +------------------- +Please note that Bacula expects the contents of the configuration files to be +written in UTF-8 format. Some translations of "Application Data" have accented +characters, and apparently the installer writes this translated data in the +standard Windows code page coding. This occurs for the Working Directory, and +when it happens the daemon will not start since Bacula cannot find the directory. +The workaround is to manually edit the appropriate conf file and ensure that it +is written out in UTF-8 format. + +The conf files can be edited with any UTF-8 compatible editor, or on most +modern Windows machines, you can edit them with notepad, then choose UTF-8 +output encoding before saving them. diff --git a/src/win32/win32_installer/Start.bat b/src/win32/win32_installer/Start.bat new file mode 100644 index 00000000..9d297aad --- /dev/null +++ b/src/win32/win32_installer/Start.bat @@ -0,0 +1,5 @@ +rem +rem Bacula start file for Win95/98/Me +rem +cd c:\Program Files\Bacula +c:\Program Files\Bacula\bacula-fd /service -c c:\Program Files\Bacula\bacula-fd.conf diff --git a/src/win32/win32_installer/Stop.bat b/src/win32/win32_installer/Stop.bat new file mode 100644 index 00000000..6330d958 --- /dev/null +++ b/src/win32/win32_installer/Stop.bat @@ -0,0 +1,5 @@ +rem +rem Bacula stop file for Win95/98/Me +rem +cd c:\Program Files\Bacula +c:\Program Files\Bacula\bacula-fd /kill diff --git a/src/win32/win32_installer/WriteTemplates.ini b/src/win32/win32_installer/WriteTemplates.ini new file mode 100644 index 00000000..3b3631c3 --- /dev/null +++ b/src/win32/win32_installer/WriteTemplates.ini @@ -0,0 +1,30 @@ +[Settings] +NumFields=3 +CancelEnabled=0 +BackEnabled=0 + +[Field 1] +Type="Label" +Text="A Template of the Client resource can be generated that contains the information about this Client. This template can then be copied to the Director computer and included in the Director's configuration file." +Left=7 +Right=293 +Top=6 +Bottom=32 + +[Field 2] +Type="CheckBox" +Text="Save Client template in:" +Left=6 +Right=240 +Top=38 +Bottom=48 + +[Field 3] +Type="FileRequest" +State="Client.conf" +Flags= +Filter=Configuration Files|*.conf|All Files|*.* +Left=16 +Right=288 +Top=50 +Bottom=62 diff --git a/src/win32/win32_installer/bacula-dir.conf.in b/src/win32/win32_installer/bacula-dir.conf.in new file mode 100644 index 00000000..d13bc77a --- /dev/null +++ b/src/win32/win32_installer/bacula-dir.conf.in @@ -0,0 +1,383 @@ +# +# Default Bacula Director Configuration file +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# The only thing that MUST be changed is to add one or more +# file or directory names in the Include directive of the +# FileSet resource. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ @DISTVER@ +# +# You might also want to change the default email address +# from root to your address. See the "mail" and "operator" +# directives in the Messages resource. +# + +Director { # define myself + Name = @director_name@ + DIRport = @director_port@ # where we listen for UA connections + QueryFile = "@bin_dir@\\query.sql" + WorkingDirectory = "@working_dir@" + PidDirectory = "@working_dir@" + Maximum Concurrent Jobs = @director_maxjobs@ + Password = "@director_password@" # Console password + Messages = Daemon +} + +JobDefs { + Name = "DefaultJob" + Type = Backup + Level = Incremental + Client = @client_name@ + FileSet = "Test Set" + Schedule = "WeeklyCycle" + Storage = File + Messages = Standard + Pool = Default + Priority = 10 +} + + +# +# Define the main nightly save backup job +# By default, this job will back up to disk in C:/tmp +Job { + Name = "Client1" + JobDefs = "DefaultJob" + Write Bootstrap = "@working_dir@\\Client1.bsr" +} + +#Job { +# Name = "Client2" +# Client = @client_name@2 +# JobDefs = "DefaultJob" +# Write Bootstrap = "@working_dir@\\Client2.bsr" +#} + +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + JobDefs = "DefaultJob" + Level = Full + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + # This creates an ASCII copy of the catalog + RunBeforeJob = "\"@bin_dir@\\make_catalog_backup\" bacula bacula" + # This deletes the copy of the catalog + RunAfterJob = "\"@bin_dir@\\delete_catalog_backup\"" + Write Bootstrap = "@working_dir@\\BackupCatalog.bsr" + Priority = 11 # run after main backup +} + +# +# Standard Restore template, to be changed by Console program +# Only one such job is needed for all Jobs/Clients/Storage ... +# +Job { + Name = "RestoreFiles" + Type = Restore + Client=@client_name@ + FileSet="Test Set" + Storage = File + Pool = Default + Messages = Standard + Where = "C:\\tmp\\bacula-restores" +} + +# +# Note: Windows path separators do NOT work correctly in FileSets. +# +# List of files to be backed up +FileSet { + Name = "Test Set" + Include { + Options { + signature = MD5 + ignore case = yes + } +# +# Put your list of files here, preceded by 'File =', one per line +# or include an external list with: +# +# File = + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/win32_installer/storage.conf.in b/src/win32/win32_installer/storage.conf.in new file mode 100644 index 00000000..ed4933d7 --- /dev/null +++ b/src/win32/win32_installer/storage.conf.in @@ -0,0 +1,14 @@ +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Definition of file storage device +Storage { + Name = File +# Do not use "localhost" here + Address = @storage_address@ # N.B. Use a fully qualified name here + SDPort = @storage_port@ + Password = "@storage_password@" + Device = FileStorage + Media Type = File +} diff --git a/src/win32/win32_installer/tray-monitor.conf.in b/src/win32/win32_installer/tray-monitor.conf.in new file mode 100644 index 00000000..a9812256 --- /dev/null +++ b/src/win32/win32_installer/tray-monitor.conf.in @@ -0,0 +1,30 @@ +# +# Bacula Tray Monitor Configuration File +# + +Monitor { + Name = @monitor_name@ + Password = "@mon_password@" # password for the Directors + RefreshInterval = 30 seconds +} + +Client { + Name = @client_name@ + Address = localhost + FDPort = @client_port@ + Password = "@monitor_password@" +} + +#Storage { +# Name = @basename@-sd +# Address = @hostname@ +# SDPort = @sd_port@ +# Password = "@mon_sd_password@" # password for StorageDaemon +#} +# +#Director { +# Name = @basename@-dir +# DIRport = @dir_port@ +# address = @hostname@ +#} +# diff --git a/src/win32/win32_installer/winbacula.nsi b/src/win32/win32_installer/winbacula.nsi new file mode 100644 index 00000000..82d33a5e --- /dev/null +++ b/src/win32/win32_installer/winbacula.nsi @@ -0,0 +1,1241 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +##{{NSIS_PLUS_BEGIN_PROJECT_SETTINGS}}## +#NAME "Release" +# CMD -DSRC_DIR=release32 -DSRC32_DIR=..\win32_installer\release32 -DSRC64_DIR=..\win64_installer\release64 -DOUT_DIR=release32 -DWINVER=32 -DVERSION=6.7.9 -DBUILD_TOOLS=NSIS-3.0b0 +# EXE C:\Program Files (x86)\NSIS\makensis.exe +# FLAGS 2 +##{{NSIS_PLUS_END_PROJECT_SETTINGS}}## + +; winbacula.nsi +; +; Began as a version written by Michel Meyers (michel@tcnnet.dyndns.org) +; +; Adapted by Kern Sibbald for native Win32 Bacula +; added a number of elements from Christopher Hull's installer +; +; D. Scott Barninger Nov 13 2004 +; D. Scott Barninger Dec 05 2004 +; D. Scott Barninger Apr 17 2005 +; Robert Nelson May 15 2006 +; +; Kern Sibbald October 2010 +; Remove server installs and +; install into a single bacula directory +; +; +; Command line options: +; +; /service - +; /start +; +; netsh firewall add portopening protocol=tcp port=9102 name="Bacula-FD" + + +!define PRODUCT "Bacula" + +; +; Include the Modern UI +; + +!include "MUI.nsh" +!include "LogicLib.nsh" +!include "FileFunc.nsh" +!include "Sections.nsh" +!include "StrFunc.nsh" +!include "WinMessages.nsh" +!include "x64.nsh" + +; +; Basics +; +; Name in Welcome screen +Name "Bacula 32 bit" +OutFile "${OUT_DIR}\bacula-win${WINVER}-${VERSION}.exe" +SetCompressor lzma +Caption "Bacula 32 bit Edition ${VERSION}" +VIProductVersion ${VERSION}.1 +VIAddVersionKey CompanyName "Bacula Project" +VIAddVersionKey LegalCopyright "Kern Sibbald" +VIAddVersionKey FileDescription "Bacula network backup and restore" +VIAddVersionKey FileVersion win${WINVER}-${VERSION} +VIAddVersionKey ProductVersion win${WINVER}-${VERSION} +VIAddVersionKey ProductName "Bacula" +VIAddVersionKey InternalName "Bacula" +VIAddVersionKey LegalTrademarks "Bacula is a registered trademark of Kern Sibbald" +VIAddVersionKey OriginalFilename "bacula.exe" + +InstallDir "C:\Program Files\Bacula" +InstallDirRegKey HKLM "Software\Bacula" "InstallLocation" + +InstType "Client" +InstType "Server" +;InstType "Full" + +!insertmacro GetParent + +${StrCase} +${StrRep} +${StrTok} +${StrTrimNewLines} + +; +; Pull in pages +; + +!define MUI_COMPONENTSPAGE_SMALLDESC + +!define MUI_HEADERIMAGE +!define MUI_BGCOLOR 739AB9 +!define MUI_HEADERIMAGE_BITMAP "bacula-logo.bmp" +!define MUI_HEADERIMAGE_LEFT +!define MUI_HEADERIMAGE_BITMAP_NOSTRETCH + +!InsertMacro MUI_PAGE_WELCOME +!InsertMacro MUI_PAGE_LICENSE "${SRC_DIR}\LICENSE" +Page custom EnterInstallType +!define MUI_PAGE_CUSTOMFUNCTION_SHOW PageComponentsShow +!InsertMacro MUI_PAGE_COMPONENTS +!define MUI_PAGE_CUSTOMFUNCTION_PRE PageDirectoryPre +!InsertMacro MUI_PAGE_DIRECTORY +Page custom EnterConfigPage1 LeaveConfigPage1 +Page custom EnterConfigPage2 LeaveConfigPage2 +!Define MUI_PAGE_CUSTOMFUNCTION_LEAVE LeaveInstallPage +!InsertMacro MUI_PAGE_INSTFILES +Page custom EnterWriteTemplates +!Define MUI_FINISHPAGE_SHOWREADME $INSTDIR\Readme.txt +!InsertMacro MUI_PAGE_FINISH + +!InsertMacro MUI_UNPAGE_WELCOME +!InsertMacro MUI_UNPAGE_CONFIRM +!InsertMacro MUI_UNPAGE_INSTFILES +!InsertMacro MUI_UNPAGE_FINISH + +!define MUI_ABORTWARNING + +!InsertMacro MUI_LANGUAGE "English" + +!InsertMacro GetParameters +!InsertMacro GetOptions + +DirText "Setup will install Bacula 32 bit ${VERSION} to the directory specified below. To install in a different folder, click Browse and select another folder." + +!InsertMacro MUI_RESERVEFILE_INSTALLOPTIONS +; +; Global Variables +; +Var OptService +Var OptStart +Var OptSilent + +Var CommonFilesDone + +Var OsIsNT + +Var HostName + +Var ConfigClientName +Var ConfigClientPort +Var ConfigClientMaxJobs +Var ConfigClientPassword +Var ConfigClientInstallService +Var ConfigClientStartService + +Var ConfigStorageName +Var ConfigStoragePort +Var ConfigStorageMaxJobs +Var ConfigStoragePassword +Var ConfigStorageInstallService +Var ConfigStorageStartService + +Var ConfigDirectorName +Var ConfigDirectorPort +Var ConfigDirectorMaxJobs +Var ConfigDirectorPassword +Var ConfigDirectorAddress +Var ConfigDirectorMailServer +Var ConfigDirectorMailAddress +Var ConfigDirectorDB +Var ConfigDirectorInstallService +Var ConfigDirectorStartService + +Var ConfigMonitorName +Var ConfigMonitorPassword + +Var LocalDirectorPassword +Var LocalHostAddress + +Var MySQLPath +Var MySQLVersion +Var PostgreSQLPath +Var PostgreSQLVersion + +Var AutomaticInstall +Var InstallType + +!define NewInstall 0 +!define UpgradeInstall 1 +!define MigrateInstall 2 + +Var OldInstallDir +Var PreviousComponents +Var NewComponents + +; Bit 0 = File Service +; 1 = Storage Service +; 2 = Director Service +; 3 = Command Console +; 4 = Bat Console +; 5 = wxWidgits Console +; 6 = Documentation (PDF) +; 7 = Documentation (HTML) +; 8 = alldrives Plugin +; 9 = Old Exchange Plugin +; 10 = Tray Monitor +; 11 = winbmr Plugin + +!define ComponentFile 1 +!define ComponentStorage 2 +!define ComponentDirector 4 +!define ComponentTextConsole 8 +!define ComponentBatConsole 16 +!define ComponentGUIConsole 32 +!define ComponentPDFDocs 64 +!define ComponentHTMLDocs 128 +!define MUI_PAGE_LICENSE "${SRC_DIR}\INSTALL" +!define ComponentAllDrivesPlugin 256 +!define ComponentOldExchangePlugin 512 +!define ComponentTrayMonitor 1024 +; !define ComponentWinBMRPlugin 2048 + +!define ComponentsRequiringUserConfig 63 +!define ComponentsFileAndStorage 3 +!define ComponentsFileAndStorageAndDirector 7 +!define ComponentsDirectorAndTextGuiConsoles 60 +!define ComponentsTextAndGuiConsoles 56 + +Var HDLG +Var HCTL + +Function .onInit + Push $R0 + Push $R1 + + ; Process Command Line Options + StrCpy $OptService 1 + StrCpy $OptStart 1 + StrCpy $OptSilent 0 + StrCpy $CommonFilesDone 0 + StrCpy $OsIsNT 0 + StrCpy $AutomaticInstall 0 + StrCpy $InstallType ${NewInstall} + StrCpy $OldInstallDir "" + StrCpy $PreviousComponents 0 + StrCpy $NewComponents 0 + StrCpy $MySQLPath "" + StrCpy $MySQLVersion "" + StrCpy $PostgreSQLPath "" + StrCpy $PostgreSQLVersion "" + StrCpy $LocalDirectorPassword "" + + ${GetParameters} $R0 + + ClearErrors + ${If} ${RunningX64} + MessageBox MB_OK "This is a 32 bit program, but the OS is an x64. Aborting ..." /SD IDOK + Abort + ${EndIf} + + + ${GetOptions} $R0 "/noservice" $R1 + IfErrors +2 + StrCpy $OptService 0 + + ClearErrors + ${GetOptions} $R0 "/nostart" $R1 + IfErrors +2 + StrCpy $OptStart 0 + + IfSilent 0 +2 + StrCpy $OptSilent 1 + + ReadRegStr $R0 HKLM "SOFTWARE\Microsoft\Windows NT\CurrentVersion" CurrentVersion + ${If} $R0 != "" + StrCpy $OsIsNT 1 + ${EndIf} + + Call GetComputerName + Pop $HostName + + Call GetHostName + Pop $LocalHostAddress + + Call GetUserName + + ; Configuration Defaults + + StrCpy $ConfigClientName "$HostName-fd" + StrCpy $ConfigClientPort 9102 + StrCpy $ConfigClientMaxJobs 10 + ;StrCpy $ConfigClientPassword + StrCpy $ConfigClientInstallService "$OptService" + StrCpy $ConfigClientStartService "$OptStart" + + StrCpy $ConfigStorageName "$HostName-sd" + StrCpy $ConfigStoragePort 9103 + StrCpy $ConfigStorageMaxJobs 10 + ;StrCpy $ConfigStoragePassword + StrCpy $ConfigStorageInstallService "$OptService" + StrCpy $ConfigStorageStartService "$OptStart" + + StrCpy $ConfigDirectorPort 9101 + + StrCpy $ConfigMonitorName "$HostName-mon" + ;StrCpy $ConfigMonitorPassword + +; PLUGINSDIR refers to temporary helper programs and not Bacula plugins! + InitPluginsDir + File "/oname=$PLUGINSDIR\openssl.exe" "${SRC_DIR}\openssl.exe" + File "/oname=$PLUGINSDIR\libeay32.dll" "${SRC_DIR}\libeay32.dll" + File "/oname=$PLUGINSDIR\ssleay32.dll" "${SRC_DIR}\ssleay32.dll" + File "/oname=$PLUGINSDIR\sed.exe" "${SRC_DIR}\sed.exe" + + !InsertMacro MUI_INSTALLOPTIONS_EXTRACT "InstallType.ini" + !InsertMacro MUI_INSTALLOPTIONS_EXTRACT "WriteTemplates.ini" + + SetPluginUnload alwaysoff + +; Generate random client password + nsExec::Exec '"$PLUGINSDIR\openssl.exe" rand -base64 -out $PLUGINSDIR\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$PLUGINSDIR\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $ConfigClientPassword $R0 + FileClose $R1 + ${EndIf} + + SetPluginUnload manual + +; Generate random Storage daemon password + nsExec::Exec '"$PLUGINSDIR\openssl.exe" rand -base64 -out $PLUGINSDIR\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$PLUGINSDIR\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $ConfigStoragePassword $R0 + FileClose $R1 + ${EndIf} + +; Generate random monitor password + nsExec::Exec '"$PLUGINSDIR\openssl.exe" rand -base64 -out $PLUGINSDIR\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$PLUGINSDIR\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $ConfigMonitorPassword $R0 + FileClose $R1 + ${EndIf} + + Pop $R1 + Pop $R0 +FunctionEnd + +Function .onSelChange + Call UpdateComponentUI +FunctionEnd + +Function InstallCommonFiles + ${If} $CommonFilesDone = 0 + SetOutPath "$INSTDIR" + File "Readme.txt" + + SetOutPath "$INSTDIR" +!if "${BUILD_TOOLS}" == "MinGW32" + File "${SRC_DIR}\pthreadGCE2.dll" + File "${SRC_DIR}\ssleay32.dll" + File "${SRC_DIR}\libeay32.dll" + File "${SRC_DIR}\libwinpthread-1.dll" + File "${SRC_DIR}\libgcc_s_sjlj-1.dll" + File "${SRC_DIR}\libstdc++-6.dll" +!endif +!if "${BUILD_TOOLS}" == "MinGW64" + File "${SRC_DIR}\pthreadGCE.dll" + File "${SRC_DIR}\cryptoeay32-0.9.8.dll" + File "${SRC_DIR}\ssleay32-0.9.8.dll" +!endif + File "${SRC_DIR}\zlib1.dll" + File "${SRC_DIR}\bacula.dll" + + File "/oname=$INSTDIR\openssl.cnf" "${SRC_DIR}\openssl.cnf" + File "${SRC_DIR}\openssl.exe" + File "${SRC_DIR}\bsleep.exe" + File "${SRC_DIR}\bsmtp.exe" + File "${SRC_DIR}\expr64.exe" + File "${SRC_DIR}\snooze.exe" + + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\View Readme.lnk" "write.exe" '"$INSTDIR\Readme.txt"' + + StrCpy $CommonFilesDone 1 + ${EndIf} +FunctionEnd + +Section "-Initialize" + + WriteRegStr HKLM Software\Bacula InstallLocation "$INSTDIR" + + Call GetSelectedComponents + Pop $R2 + WriteRegDWORD HKLM Software\Bacula Components $R2 + + ; remove start menu items + SetShellVarContext all + + Delete /REBOOTOK "$SMPROGRAMS\Bacula\Configuration\*" + Delete /REBOOTOK "$SMPROGRAMS\Bacula\Documentation\*" + Delete /REBOOTOK "$SMPROGRAMS\Bacula\*" + RMDir "$SMPROGRAMS\Bacula\Configuration" + RMDir "$SMPROGRAMS\Bacula\Documentation" + RMDir "$SMPROGRAMS\Bacula" + CreateDirectory "$SMPROGRAMS\Bacula" + CreateDirectory "$SMPROGRAMS\Bacula\Configuration" + CreateDirectory "$SMPROGRAMS\Bacula\Documentation" + + CreateDirectory "$INSTDIR" + CreateDirectory "$INSTDIR\working" + CreateDirectory "$INSTDIR\plugins" + + SetOutPath "$INSTDIR" + File "${SRC_DIR}\LICENSE" + Delete /REBOOTOK "$INSTDIR\License.txt" + +; Output a series of SED commands to configure the .conf file(s) + FileOpen $R1 $PLUGINSDIR\config.sed w + FileWrite $R1 "s;@VERSION@;${VERSION};g$\r$\n" + FileWrite $R1 "s;@DATE@;${__DATE__};g$\r$\n" + FileWrite $R1 "s;@DISTNAME@;Windows;g$\r$\n" + + StrCpy $R2 ${BUILD_TOOLS} + + Call GetHostName + Exch $R3 + Pop $R3 + + FileWrite $R1 "s;@DISTVER@;$R2;g$\r$\n" + + ${StrRep} $R2 "$INSTDIR\working" "\" "\\\\" + FileWrite $R1 's;@working_dir@;$R2;g$\r$\n' +; ${StrRep} $R2 "$INSTDIR\working" "\" "\\" +; FileWrite $R1 's;@working_dir_cmd@;$R2;g$\r$\n' + + ${StrRep} $R2 "$INSTDIR\plugins" "\" "\\\\" + FileWrite $R1 's;@fdplugins_dir@;$R2;g$\r$\n' + + ${StrRep} $R2 "$INSTDIR" "\" "/" + FileWrite $R1 "s;@BUILD_DIR@;$R2;g$\r$\n" + + FileWrite $R1 "s;@client_address@;$LocalHostAddress;g$\r$\n" + FileWrite $R1 "s;@client_name@;$ConfigClientName;g$\r$\n" + FileWrite $R1 "s;@client_port@;$ConfigClientPort;g$\r$\n" + FileWrite $R1 "s;@client_maxjobs@;$ConfigClientMaxJobs;g$\r$\n" + FileWrite $R1 "s;@client_password@;$ConfigClientPassword;g$\r$\n" + FileWrite $R1 "s;@storage_address@;$LocalHostAddress;g$\r$\n" + FileWrite $R1 "s;@storage_name@;$ConfigStorageName;g$\r$\n" + FileWrite $R1 "s;@storage_port@;$ConfigStoragePort;g$\r$\n" + FileWrite $R1 "s;@storage_maxjobs@;$ConfigStorageMaxJobs;g$\r$\n" + FileWrite $R1 "s;@storage_password@;$ConfigStoragePassword;g$\r$\n" + FileWrite $R1 "s;@director_name@;$ConfigDirectorName;g$\r$\n" + FileWrite $R1 "s;@director_port@;$ConfigDirectorPort;g$\r$\n" + FileWrite $R1 "s;@director_password@;$ConfigDirectorPassword;g$\r$\n" + FileWrite $R1 "s;@director_address@;$ConfigDirectorAddress;g$\r$\n" + FileWrite $R1 "s;@monitor_name@;$ConfigMonitorName;g$\r$\n" + FileWrite $R1 "s;@monitor_password@;$ConfigMonitorPassword;g$\r$\n" + + FileClose $R1 + + ${If} ${FileExists} "$OldInstallDir\bacula-fd.exe" + nsExec::ExecToLog '"$OldInstallDir\bacula-fd.exe" /kill' ; Shutdown any bacula that could be running + nsExec::Exec /TIMEOUT=200 'net stop bacula-fd' + Sleep 1000 + nsExec::ExecToLog '"$OldInstallDir\bacula-fd.exe" /remove' ; Remove existing service + ${EndIf} + + ${If} ${FileExists} "$INSTDIR\bacula-fd.exe" + nsExec::ExecToLog '"$INSTDIR\bacula-fd.exe" /kill' ; Shutdown any bacula that could be running + nsExec::Exec /TIMEOUT=200 'net stop bacula-fd' + ${EndIf} + + ${If} ${FileExists} "$OldInstallDir\bin\bacula-sd.exe" + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-sd.exe" /kill' ; Shutdown any bacula that could be running + nsExec::Exec /TIMEOUT=200 'net stop bacula-sd' + Sleep 1000 + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-sd.exe" /remove' ; Remove existing service + ${EndIf} + + ${If} ${FileExists} "$INSTDIR\bacula-sd.exe" + nsExec::ExecToLog '"$INSTDIR\bacula-sd.exe" /kill' ; Shutdown any bacula that could be running + nsExec::Exec /TIMEOUT=200 'net stop bacula-sd' + ${EndIf} + Sleep 1000 + + +SectionEnd + +SectionGroup "Client" SecGroupClient + +Section "File Service" SecFileDaemon + SectionIn 1 2 3 + + SetOutPath "$INSTDIR" + + File "${SRC_DIR}\bacula-fd.exe" + File "/oname=$PLUGINSDIR\bacula-fd.conf" "bacula-fd.conf.in" + + StrCpy $0 "$INSTDIR" + StrCpy $1 bacula-fd.conf + Call ConfigEditAndCopy + + StrCpy $0 bacula-fd + StrCpy $1 "File Service" + StrCpy $2 $ConfigClientInstallService + StrCpy $3 $ConfigClientStartService + + Call InstallDaemon + + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Client Configuration.lnk" "write.exe" '"$INSTDIR\bacula-fd.conf"' +SectionEnd + +SectionGroupEnd + +SectionGroup "Server" SecGroupServer + +Section "Storage Service" SecStorageDaemon + SectionIn 2 3 + + SetOutPath "$INSTDIR" + + File "${SRC_DIR}\bacula-sd.exe" + File "${SRC_DIR}\bcopy.exe" + File "${SRC_DIR}\bextract.exe" + File "${SRC_DIR}\bls.exe" + + File "/oname=$PLUGINSDIR\bacula-sd.conf" "bacula-sd.conf.in" + + StrCpy $0 "$INSTDIR" + StrCpy $1 bacula-sd.conf + Call ConfigEditAndCopy + + StrCpy $0 bacula-sd + StrCpy $1 "Storage Service" + StrCpy $2 $ConfigStorageInstallService + StrCpy $3 $ConfigStorageStartService + Call InstallDaemon + + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Storage Configuration.lnk" "write.exe" '"$INSTDIR\bacula-sd.conf"' +SectionEnd + +SectionGroupEnd + + +SectionGroup "Consoles" SecGroupConsoles + +Section "Command Console" SecConsole + SectionIn 1 2 3 + + SetOutPath "$INSTDIR" + + File "${SRC_DIR}\bconsole.exe" + Call InstallCommonFiles + + File "/oname=$PLUGINSDIR\bconsole.conf" "bconsole.conf.in" + StrCpy $0 "$INSTDIR" + StrCpy $1 bconsole.conf + Call ConfigEditAndCopy + + CreateShortCut "$SMPROGRAMS\Bacula\bconsole.lnk" "$INSTDIR\bconsole.exe" '-c "$INSTDIR\bconsole.conf"' "$INSTDIR\bconsole.exe" 0 + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Command Console Configuration.lnk" "write.exe" '"$INSTDIR\bconsole.conf"' + +SectionEnd + +Section "Bat Console" SecBatConsole + SectionIn 1 2 3 + + SetOutPath "$INSTDIR" + +!if "${BUILD_BAT}" == "yes" + Call InstallCommonFiles + File "${SRC_DIR}\QtCore4.dll" + File "${SRC_DIR}\QtGui4.dll" + File "${SRC_DIR}\libgcc_s_sjlj-1.dll" + + File "${SRC_DIR}\bat.exe" + + File "/oname=$PLUGINSDIR\bat.conf" "bat.conf.in" + StrCpy $0 "$INSTDIR" + StrCpy $1 bat.conf + Call ConfigEditAndCopy + + SetOutPath "$INSTDIR\help" + File "${SRC_DIR}\help\*" + SetOutPath "$INSTDIR" + + ; Create Start Menu entry + CreateShortCut "$SMPROGRAMS\Bacula\Bat.lnk" "$INSTDIR\bat.exe" '-c "$INSTDIR\bat.conf"' "$INSTDIR\bat.exe" 0 + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Bat Configuration.lnk" "write.exe" '"$INSTDIR\bat.conf"' +!endif + +SectionEnd + +Section "Bacula Tray Monitor" SecTrayMonitor + SectionIn 1 2 3 + + SetOutPath "$INSTDIR" + +!if "${BUILD_BAT}" == "yes" + Call InstallCommonFiles + File "${SRC_DIR}\QtCore4.dll" + File "${SRC_DIR}\QtGui4.dll" + File "${SRC_DIR}\libgcc_s_sjlj-1.dll" + File "${SRC_DIR}\bacula-tray-monitor.exe" + + ;File "/oname=$PLUGINSDIR\bacula-tray-monitor.conf" "bacula-tray-monitor.conf.in" + StrCpy $0 "$INSTDIR" + StrCpy $1 bacula-tray-monitor.conf + ;Call ConfigEditAndCopy + + ; Create Start Menu entry + CreateShortCut "$SMPROGRAMS\Bacula\TrayMonitor.lnk" "$INSTDIR\bacula-tray-monitor.exe" "" "$INSTDIR\bacula-tray-monitor.exe" 0 +!endif + +SectionEnd + + +; Deleted because wxconsole is deprecated +;Section "Graphical Console" SecWxConsole +; SectionIn 1 2 3 + +; SetOutPath "$INSTDIR" +; +;SectionEnd + +SectionGroupEnd + + +SectionGroup "Plugins" SecGroupPlugins + +Section "alldrives Plugin" SecAllDrivesPlugin + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\plugins" + File "${SRC_DIR}\alldrives-fd.dll" + SetOutPath "$INSTDIR" + +SectionEnd + +;Section "winbmr Plugin" SecWinBMRPlugin +; SectionIn 1 2 3 + +; SetOutPath "$INSTDIR\plugins" +; File "${SRC_DIR}\winbmr-fd.dll" +; SetOutPath "$INSTDIR" + +;SectionEnd + +Section "Old (deprecated) Exchange Plugin" SecOldExchangePlugin + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\plugins" + File "${SRC_DIR}\exchange-fd.dll" + SetOutPath "$INSTDIR" + +SectionEnd + +SectionGroupEnd + + + +SectionGroup "Documentation" SecGroupDocumentation + +Section "Documentation (Acrobat Format)" SecDocPdf + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\doc" + CreateDirectory "$INSTDIR\doc" + + File "${SRC_DIR}\docs\manuals\en\console\console.pdf" + File "${SRC_DIR}\docs\manuals\en\misc\misc.pdf" + File "${SRC_DIR}\docs\manuals\en\main\main.pdf" + File "${SRC_DIR}\docs\manuals\en\utility\utility.pdf" + File "${SRC_DIR}\docs\manuals\en\problems\problems.pdf" + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Console.lnk" '"$INSTDIR\doc\console.pdf"' + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Main.lnk" '"$INSTDIR\doc\main.pdf"' + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Misc.lnk" '"$INSTDIR\doc\misc.pdf"' + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Utility.lnk" '"$INSTDIR\doc\utility.pdf"' + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Problems.lnk" '"$INSTDIR\doc\problems.pdf"' + SetOutPath "$INSTDIR" +SectionEnd + +;Section "Documentation (HTML Format)" SecDocHtml +; SectionIn 3 + +; SetOutPath "$INSTDIR\doc" +; CreateDirectory "$INSTDIR\doc" + +; File "${SRC_DIR}\manual\bacula\*.html" +; File "${SRC_DIR}\manual\bacula\*.png" +; File "${SRC_DIR}\manual\bacula\*.css" +; CreateShortCut "$SMPROGRAMS\Bacula\Documentation\Manual (HTML).lnk" '"$INSTDIR\doc\index.html"' +;SectionEnd + +SectionGroupEnd + +Section "-Finish" + Push $R0 + + ${If} $OsIsNT = 1 + nsExec::ExecToLog 'cmd.exe /C echo Y|cacls "$INSTDIR\bacula-fd.conf" /G SYSTEM:F Administrators:F' + nsExec::ExecToLog 'cmd.exe /C echo Y|cacls "$INSTDIR\bacula-sd.conf" /G SYSTEM:F Administrators:F' + nsExec::ExecToLog 'cmd.exe /C echo Y|cacls "$INSTDIR\bat.conf" /G SYSTEM:F Administrators:F' + ${EndIf} + + ; Write the uninstall keys for Windows & create Start Menu entry + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "DisplayName" "Bacula" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "InstallLocation" "$INSTDIR" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "DisplayVersion" "${VERSION}" + ${StrTok} $R0 "${VERSION}" "." 0 0 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "VersionMajor" $R0 + ${StrTok} $R0 "${VERSION}" "." 1 0 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "VersionMinor" $R0 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "NoModify" 1 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "NoRepair" 1 + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "URLUpdateInfo" "http://www.bacula.org" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "URLInfoAbout" "http://www.bacula.org" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "HelpLink" "http://www.baculas.org" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "UninstallString" '"$INSTDIR\uninstall.exe"' + WriteUninstaller "$INSTDIR\Uninstall.exe" + CreateShortCut "$SMPROGRAMS\Bacula\Uninstall Bacula.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0 + + ${If} $OsIsNT = 1 + nsExec::ExecToLog 'net start bacula-fd' + nsExec::ExecToLog 'net start bacula-sd' + ${Else} + Exec '"$INSTDIR\bacula-fd.exe" /service -c "$INSTDIR\bacula-fd.conf"' + Exec '"$INSTDIR\bacula-sd.exe" /service -c "$INSTDIR\bacula-sd.conf"' + ${EndIf} + + Pop $R0 +SectionEnd + +; Extra Page descriptions + +LangString DESC_SecFileDaemon ${LANG_ENGLISH} "Install Bacula 32 bit File Daemon on this system." +LangString DESC_SecStorageDaemon ${LANG_ENGLISH} "Install Bacula 32 bit Storage Daemon on this system." +LangString DESC_SecConsole ${LANG_ENGLISH} "Install bconsole program on this system." +LangString DESC_SecBatConsole ${LANG_ENGLISH} "Install Bat graphical console program on this system." +LangString DESC_SecTrayMonitor ${LANG_ENGLISH} "Install Tray Monitor graphical program on this system." +LangString DESC_SecAllDrivesPlugin ${LANG_ENGLISH} "Install alldrives Plugin on this system." +; LangString DESC_SecWinBMRPlugin ${LANG_ENGLISH} "Install winbmr Plugin on this system." +LangString DESC_SecOldExchangePlugin ${LANG_ENGLISH} "Install old (deprecated) Exchange Plugin on this system." + + +LangString TITLE_ConfigPage1 ${LANG_ENGLISH} "Configuration" +LangString SUBTITLE_ConfigPage1 ${LANG_ENGLISH} "Set installation configuration." + +LangString TITLE_ConfigPage2 ${LANG_ENGLISH} "Configuration (continued)" +LangString SUBTITLE_ConfigPage2 ${LANG_ENGLISH} "Set installation configuration." + +LangString TITLE_InstallType ${LANG_ENGLISH} "Installation Type" +LangString SUBTITLE_InstallType ${LANG_ENGLISH} "Choose installation type." + +LangString TITLE_WriteTemplates ${LANG_ENGLISH} "Create Templates" +LangString SUBTITLE_WriteTemplates ${LANG_ENGLISH} "Create a resource template for inclusion in the Director's configuration file." + +!InsertMacro MUI_FUNCTION_DESCRIPTION_BEGIN + !InsertMacro MUI_DESCRIPTION_TEXT ${SecFileDaemon} $(DESC_SecFileDaemon) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecStorageDaemon} $(DESC_SecStorageDaemon) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecConsole} $(DESC_SecConsole) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecBatConsole} $(DESC_SecBatConsole) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecTrayMonitor} $(DESC_SecTrayMonitor) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecAllDrivesPlugin} $(DESC_SecAllDrivesPlugin) +; !InsertMacro MUI_DESCRIPTION_TEXT ${SecWinBMRPlugin} $(DESC_SecWinBMRPlugin) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecOldExchangePlugin} $(DESC_SecOldExchangePlugin) +!InsertMacro MUI_FUNCTION_DESCRIPTION_END + +; Uninstall section + +UninstallText "This will uninstall Bacula. Click Uninstall to continue." + +Section "Uninstall" + ; Shutdown any baculum that could be running + nsExec::ExecToLog '"$INSTDIR\bacula-fd.exe" /kill' + nsExec::Exec /TIMEOUT=200 'net stop bacula-fd' + Sleep 3000 + +; ReadRegDWORD $R0 HKLM "Software\Bacula" "Service_Bacula-fd" + ; Remove Bacula File Daemon service + nsExec::ExecToLog '"$INSTDIR\bacula-fd.exe" /remove' + + ; Remove Bacula Storage Daemon service + nsExec::ExecToLog '"$INSTDIR\bacula-sd.exe" /remove' + + ; remove registry keys + DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" + DeleteRegKey HKLM "Software\Bacula" + + ; remove start menu items + SetShellVarContext all + Delete /REBOOTOK "$SMPROGRAMS\Bacula\*" + RMDir "$SMPROGRAMS\Bacula" + + ; remove files and uninstaller (preserving config for now) + Delete /REBOOTOK "$INSTDIR\doc\*" + Delete /REBOOTOK "$INSTDIR\openssl.exe" + Delete /REBOOTOK "$INSTDIR\bacula-fd.exe" + Delete /REBOOTOK "$INSTDIR\bsleep.exe" + Delete /REBOOTOK "$INSTDIR\bsmtp.exe" + Delete /REBOOTOK "$INSTDIR\bconsole.exe" + Delete /REBOOTOK "$INSTDIR\expr64.exe" + Delete /REBOOTOK "$INSTDIR\snooze.exe" + Delete /REBOOTOK "$INSTDIR\LICENSE" + Delete /REBOOTOK "$INSTDIR\Readme.txt" + Delete /REBOOTOK "$INSTDIR\*.dll" + Delete /REBOOTOK "$INSTDIR\*.cnf" + Delete /REBOOTOK "$INSTDIR\*.sed" + Delete /REBOOTOK "$INSTDIR\*.cmd" + Delete /REBOOTOK "$INSTDIR\*.sql" + Delete /REBOOTOK "$INSTDIR\help\*" + Delete /REBOOTOK "$INSTDIR\plugins\alldrives-fd.dll" + Delete /REBOOTOK "$INSTDIR\plugins\exchange-fd.dll" +; Delete /REBOOTOK "$INSTDIR\plugins\winbmr-fd.dll" + + ; Check for existing installation + IfSilent +2 + MessageBox MB_YESNO|MB_ICONQUESTION \ + "Would you like to delete the current configuration files and the working state file?" /SD IDNO IDNO NoDel + + + Delete /REBOOTOK "$INSTDIR\*" + Delete /REBOOTOK "$INSTDIR\working\*" + Delete /REBOOTOK "$PLUGINSDIR\bacula-*.conf" + Delete /REBOOTOK "$PLUGINSDIR\*console.conf" + Delete /REBOOTOK "$PLUGINSDIR\*conf.in" + RMDir /REBOOTOK "$INSTDIR\plugins" + RMDir /REBOOTOK "$INSTDIR\working" + RMDir /REBOOTOK "$INSTDIR" +NoDel: + ; remove directories used + + RMDir "$INSTDIR\plugins" + RMDir "$INSTDIR\working" + RMDir "$INSTDIR\doc" + RMDir "$INSTDIR\help" + RMDir "$INSTDIR" +SectionEnd + +; +; $0 - Service Name (ie Bacula-FD) +; $1 - Service Description (ie Bacula File Daemon) +; $2 - Install as Service +; $3 - Start Service now +; +Function InstallDaemon + Call InstallCommonFiles + + WriteRegDWORD HKLM "Software\Bacula" "Service_$0" $2 + + ${If} $2 = 1 + nsExec::ExecToLog '"$INSTDIR\bacula-fd.exe" /kill' + nsExec::Exec /TIMEOUT=200 'net stop bacula-fd' + nsExec::ExecToLog '"$INSTDIR\bacula-sd.exe" /kill' + nsExec::Exec /TIMEOUT=200 'net stop bacula-sd' + nsExec::ExecToLog '"$INSTDIR\$0.exe" /remove' + nsExec::ExecToLog '"$INSTDIR\$0.exe" /install -c "$INSTDIR\$0.conf"' + + ${If} $OsIsNT <> 1 + File "Start.bat" + File "Stop.bat" + ${EndIf} + + ${EndIf} + +FunctionEnd + +Function GetComputerName + Push $R0 + Push $R1 + Push $R2 + + System::Call "kernel32::GetComputerNameA(t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2" + + ${StrCase} $R0 $R0 "L" + + Pop $R2 + Pop $R1 + Exch $R0 +FunctionEnd + +!define ComputerNameDnsFullyQualified 3 + +Function GetHostName + Push $R0 + Push $R1 + Push $R2 + + ${If} $OsIsNT = 1 + System::Call "kernel32::GetComputerNameExA(i ${ComputerNameDnsFullyQualified}, t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2 ?e" + ${If} $R2 = 0 + Pop $R2 + DetailPrint "GetComputerNameExA failed - LastError = $R2" + Call GetComputerName + Pop $R0 + ${Else} + Pop $R2 + ${EndIf} + ${Else} + Call GetComputerName + Pop $R0 + ${EndIf} + + Pop $R2 + Pop $R1 + Exch $R0 +FunctionEnd + +!define NameUserPrincipal 8 + +Function GetUserName + Push $R0 + Push $R1 + Push $R2 + + ${If} $OsIsNT = 1 + System::Call "secur32::GetUserNameExA(i ${NameUserPrincipal}, t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2 ?e" + ${If} $R2 = 0 + Pop $R2 + DetailPrint "GetUserNameExA failed - LastError = $R2" + Pop $R0 + StrCpy $R0 "" + ${Else} + Pop $R2 + ${EndIf} + ${Else} + StrCpy $R0 "" + ${EndIf} + + ${If} $R0 == "" + System::Call "advapi32::GetUserNameA(t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2 ?e" + ${If} $R2 = 0 + Pop $R2 + DetailPrint "GetUserNameA failed - LastError = $R2" + StrCpy $R0 "" + ${Else} + Pop $R2 + ${EndIf} + ${EndIf} + + Pop $R2 + Pop $R1 + Exch $R0 +FunctionEnd + +Function ConfigEditAndCopy + Push $R1 + + ${If} ${FileExists} "$0\$1" + StrCpy $R1 ".new" + ${Else} + StrCpy $R1 "" + ${EndIf} + + nsExec::ExecToLog '$PLUGINSDIR\sed.exe -f "$PLUGINSDIR\config.sed" -i.bak "$PLUGINSDIR\$1"' + CopyFiles "$PLUGINSDIR\$1" "$0\$1$R1" + + Pop $R1 +FunctionEnd + +Function GetSelectedComponents + Push $R0 + StrCpy $R0 0 + ${If} ${SectionIsSelected} ${SecFileDaemon} + IntOp $R0 $R0 | ${ComponentFile} + ${EndIf} + ${If} ${SectionIsSelected} ${SecStorageDaemon} + IntOp $R0 $R0 | ${ComponentStorage} + ${EndIf} + ${If} ${SectionIsSelected} ${SecConsole} + IntOp $R0 $R0 | ${ComponentTextConsole} + ${EndIf} + ${If} ${SectionIsSelected} ${SecBatConsole} + IntOp $R0 $R0 | ${ComponentBatConsole} + ${EndIf} + ${If} ${SectionIsSelected} ${SecTrayMonitor} + IntOp $R0 $R0 | ${ComponentTrayMonitor} + ${EndIf} + ${If} ${SectionIsSelected} ${SecAllDrivesPlugin} + IntOp $R0 $R0 | ${ComponentAllDrivesPlugin} + ${EndIf} +; ${If} ${SectionIsSelected} ${SecWinBMRPlugin} +; IntOp $R0 $R0 | ${ComponentWinBMRPlugin} +; ${EndIf} + ${If} ${SectionIsSelected} ${SecOldExchangePlugin} + IntOp $R0 $R0 | ${ComponentOldExchangePlugin} + ${EndIf} + ${If} ${SectionIsSelected} ${SecDocPdf} + IntOp $R0 $R0 | ${ComponentPDFDocs} + ${EndIf} + Exch $R0 +FunctionEnd + +Function PageComponentsShow + Call SelectPreviousComponents + Call UpdateComponentUI +FunctionEnd + +Function PageDirectoryPre + ${If} $AutomaticInstall = 1 + ${OrIf} $InstallType = ${UpgradeInstall} + Abort + ${EndIf} +FunctionEnd + +Function LeaveInstallPage + Push "$INSTDIR\install.log" + Call DumpLog +FunctionEnd + +Function EnterWriteTemplates + Push $R0 + Push $R1 + + Call GetSelectedComponents + Pop $R0 + + IntOp $R0 $R0 & ${ComponentDirector} + IntOp $R1 $NewComponents & ${ComponentsFileAndStorage} + + ${If} $R0 <> 0 + ${OrIf} $R1 = 0 + Pop $R1 + Pop $R0 + Abort + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 = 0 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 2" State 0 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 2" Flags DISABLED + DeleteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 3" State + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 3" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST|DISABLED + ${Else} + ;; TODO: See why this procedure causes a problem on Windows 2012 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 2" State 0 + DeleteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 2" Flags + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 3" State "$INSTDIR\$ConfigClientName.conf" + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST + + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 = 0 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 4" State 0 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 4" Flags DISABLED + DeleteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" State + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST|DISABLED + ${Else} + ;; TODO: See why this procedure causes a problem on Windows 2012 + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 4" State 0 + DeleteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 4" Flags + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" State "$INSTDIR\$ConfigStorageName.conf" + WriteINIStr "$PLUGINSDIR\WriteTemplates.ini" "Field 5" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST + ${EndIf} + + !InsertMacro MUI_HEADER_TEXT "$(TITLE_WriteTemplates)" "$(SUBTITLE_WriteTemplates)" + !InsertMacro MUI_INSTALLOPTIONS_DISPLAY "WriteTemplates.ini" + + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 2" State + ${If} $R0 <> 0 + File "/oname=$PLUGINSDIR\client.conf.in" "client.conf.in" + + nsExec::ExecToLog '$PLUGINSDIR\sed.exe -f "$PLUGINSDIR\config.sed" -i.bak "$PLUGINSDIR\client.conf.in"' + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 3" State + ${If} $R0 != "" + CopyFiles "$PLUGINSDIR\client.conf.in" "$R0" + ${EndIf} + ${EndIf} + + + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 4" State + ${If} $R0 <> 0 + File "/oname=$PLUGINSDIR\storage.conf.in" "storage.conf.in" + + nsExec::ExecToLog '$PLUGINSDIR\sed.exe -f "$PLUGINSDIR\config.sed" -i.bak "$PLUGINSDIR\storage.conf.in"' + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 5" State + ${If} $R0 != "" + CopyFiles "$PLUGINSDIR\storage.conf.in" "$R0" + ${EndIf} + ${EndIf} + + Pop $R1 + Pop $R0 +FunctionEnd + +Function SelectPreviousComponents + ${If} $InstallType <> ${NewInstall} + IntOp $R1 $PreviousComponents & ${ComponentFile} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecFileDaemon} + !InsertMacro SetSectionFlag ${SecFileDaemon} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecFileDaemon} + !InsertMacro ClearSectionFlag ${SecFileDaemon} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentStorage} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecStorageDaemon} + !InsertMacro SetSectionFlag ${SecStorageDaemon} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecStorageDaemon} + !InsertMacro ClearSectionFlag ${SecStorageDaemon} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentTextConsole} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecConsole} + !InsertMacro SetSectionFlag ${SecConsole} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecConsole} + !InsertMacro ClearSectionFlag ${SecConsole} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentBatConsole} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecBatConsole} + !InsertMacro SetSectionFlag ${SecBatConsole} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecBatConsole} + !InsertMacro ClearSectionFlag ${SecBatConsole} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentTrayMonitor} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecTrayMonitor} + !InsertMacro SetSectionFlag ${SecTrayMonitor} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecTrayMonitor} + !InsertMacro ClearSectionFlag ${SecTrayMonitor} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentAllDrivesPlugin} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecAllDrivesPlugin} + !InsertMacro SetSectionFlag ${SecAllDrivesPlugin} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecAllDrivesPlugin} + !InsertMacro ClearSectionFlag ${SecAllDrivesPlugin} ${SF_RO} + ${EndIf} +; IntOp $R1 $PreviousComponents & ${ComponentWinBMRPlugin} +; ${If} $R1 <> 0 +; !InsertMacro SelectSection ${SecWinBMRPlugin} +; !InsertMacro SetSectionFlag ${SecWinBMRPlugin} ${SF_RO} +; ${Else} +; !InsertMacro UnselectSection ${SecWinBMRPlugin} +; !InsertMacro ClearSectionFlag ${SecWinBMRPlugin} ${SF_RO} +; ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentOldExchangePlugin} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecOldExchangePlugin} + !InsertMacro SetSectionFlag ${SecOldExchangePlugin} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecOldExchangePlugin} + !InsertMacro ClearSectionFlag ${SecOldExchangePlugin} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentPDFDocs} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecDocPdf} + !InsertMacro SetSectionFlag ${SecDocPdf} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecDocPdf} + !InsertMacro ClearSectionFlag ${SecDocPdf} ${SF_RO} + ${EndIf} + ${EndIf} +FunctionEnd + +Function UpdateComponentUI + Push $R0 + Push $R1 + + Call GetSelectedComponents + Pop $R0 + + IntOp $R1 $R0 ^ $PreviousComponents + IntOp $NewComponents $R0 & $R1 + + ${If} $InstallType <> ${NewInstall} + IntOp $R1 $NewComponents & ${ComponentFile} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecFileDaemon} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecFileDaemon} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentStorage} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecStorageDaemon} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecStorageDaemon} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentTextConsole} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecConsole} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecConsole} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentBatConsole} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecBatConsole} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecBatConsole} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentTrayMonitor} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecTrayMonitor} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecTrayMonitor} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentAllDrivesPlugin} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecAllDrivesPlugin} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecAllDrivesPlugin} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentOldExchangePlugin} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecOldExchangePlugin} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecOldExchangePlugin} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentPDFDocs} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecDocPdf} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecDocPdf} ${SF_BOLD} + ${EndIf} + ${EndIf} + + GetDlgItem $R0 $HWNDPARENT 1 + + IntOp $R1 $NewComponents & ${ComponentsRequiringUserConfig} + ${If} $R1 = 0 + SendMessage $R0 ${WM_SETTEXT} 0 "STR:Install" + ${Else} + SendMessage $R0 ${WM_SETTEXT} 0 "STR:&Next >" + ${EndIf} + + Pop $R1 + Pop $R0 +FunctionEnd + +!include "InstallType.nsh" +!include "ConfigPage1.nsh" +!include "ConfigPage2.nsh" +!include "DumpLog.nsh" diff --git a/src/win32/win32_installer/x64.nsh b/src/win32/win32_installer/x64.nsh new file mode 100644 index 00000000..e694c1e6 --- /dev/null +++ b/src/win32/win32_installer/x64.nsh @@ -0,0 +1,54 @@ +; --------------------- +; x64.nsh +; --------------------- +; +; A few simple macros to handle installations on x64 machines. +; +; RunningX64 checks if the installer is running on x64. +; +; ${If} ${RunningX64} +; MessageBox MB_OK "running on x64" +; ${EndIf} +; +; DisableX64FSRedirection disables file system redirection. +; EnableX64FSRedirection enables file system redirection. +; +; SetOutPath $SYSDIR +; ${DisableX64FSRedirection} +; File some.dll # extracts to C:\Windows\System32 +; ${EnableX64FSRedirection} +; File some.dll # extracts to C:\Windows\SysWOW64 +; + +!ifndef ___X64__NSH___ +!define ___X64__NSH___ + +!include LogicLib.nsh + +!macro _RunningX64 _a _b _t _f + !insertmacro _LOGICLIB_TEMP + System::Call kernel32::GetCurrentProcess()i.s + System::Call kernel32::IsWow64Process(is,*i.s) + Pop $_LOGICLIB_TEMP + !insertmacro _!= $_LOGICLIB_TEMP 0 `${_t}` `${_f}` +!macroend + +!define RunningX64 `"" RunningX64 ""` + +!macro DisableX64FSRedirection + + System::Call kernel32::Wow64EnableWow64FsRedirection(i0) + +!macroend + +!define DisableX64FSRedirection "!insertmacro DisableX64FSRedirection" + +!macro EnableX64FSRedirection + + System::Call kernel32::Wow64EnableWow64FsRedirection(i1) + +!macroend + +!define EnableX64FSRedirection "!insertmacro EnableX64FSRedirection" + +!endif # !___X64__NSH___ diff --git a/src/win32/win64_installer/ConfigPage1.nsh b/src/win32/win64_installer/ConfigPage1.nsh new file mode 100644 index 00000000..2a16fb55 --- /dev/null +++ b/src/win32/win64_installer/ConfigPage1.nsh @@ -0,0 +1,294 @@ +Function EnterConfigPage1 + ${If} $AutomaticInstall = 1 + Abort + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsFileAndStorage} + + ${If} $R0 = 0 + Abort + ${EndIf} + + FileOpen $R5 "$PLUGINSDIR\ConfigPage1.ini" w + + StrCpy $R6 1 ; Field Number + StrCpy $R7 0 ; Top + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 52 + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Client"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigClientName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=158$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Port"$\r$\nLeft=172$\r$\nTop=$R7$\r$\nRight=188$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigClientPort$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=218$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Max Jobs"$\r$\nLeft=238$\r$\nTop=$R7$\r$\nRight=270$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigClientMaxJobs$\r$\nLeft=274$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigClientPassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + + IntOp $R8 $R7 + 10 + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigClientInstallService$\r$\nText="Install as service"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=118$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigClientStartService$\r$\nText="Start after install"$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=260$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 52 + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Storage"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigStorageName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=158$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Port"$\r$\nLeft=172$\r$\nTop=$R7$\r$\nRight=188$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigStoragePort$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=218$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 2 + + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Max Jobs"$\r$\nLeft=238$\r$\nTop=$R7$\r$\nRight=270$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigStorageMaxJobs$\r$\nLeft=274$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigStoragePassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + + IntOp $R8 $R7 + 10 + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigStorageInstallService$\r$\nText="Install as service"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=118$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigStorageStartService$\r$\nText="Start after install"$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=260$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + ${Endif} + + IntOp $R6 $R6 - 1 + + FileWrite $R5 "[Settings]$\r$\nNumFields=$R6$\r$\n" + + FileClose $R5 + + !insertmacro MUI_HEADER_TEXT "$(TITLE_ConfigPage1)" "$(SUBTITLE_ConfigPage1)" + !insertmacro MUI_INSTALLOPTIONS_INITDIALOG "ConfigPage1.ini" + Pop $HDLG ;HWND of dialog + + ; Initialize Controls + + StrCpy $R6 1 ; Field Number + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + + ; Client Name + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + + IntOp $R6 $R6 + 2 + + ; Client Port Number + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 5 0 + + IntOp $R6 $R6 + 2 + + ; Max Jobs + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 3 0 + + IntOp $R6 $R6 + 5 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + + ; Storage Name + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + + IntOp $R6 $R6 + 2 + + ; Storage Port Number + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 5 0 + + IntOp $R6 $R6 + 2 + + ; Max Jobs + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage1.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 3 0 + + IntOp $R6 $R6 + 5 + ${Endif} + + !insertmacro MUI_INSTALLOPTIONS_SHOW + + ; Process results + + StrCpy $R6 3 + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientName "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientPort "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientMaxJobs "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientPassword "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientInstallService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigClientStartService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 3 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageName "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStoragePort "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageMaxJobs "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStoragePassword "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageInstallService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 1 + + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigStorageStartService "ConfigPage1.ini" "Field $R6" "State" + + IntOp $R6 $R6 + 3 + ${Endif} +FunctionEnd + +Function LeaveConfigPage1 + StrCpy $R6 5 + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1024 + ${OrIf} $R0 > 65535 + MessageBox MB_OK "Port must be between 1024 and 65535 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1 + ${OrIf} $R0 > 99 + MessageBox MB_OK "Max Jobs must be between 1 and 99 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 9 + ${Endif} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 <> 0 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1024 + ${OrIf} $R0 > 65535 + MessageBox MB_OK "Port must be between 1024 and 65535 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 2 + + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage1.ini" "Field $R6" "State" + ${If} $R0 < 1 + ${OrIf} $R0 > 99 + MessageBox MB_OK "Max Jobs must be between 1 and 99 inclusive." + Abort + ${EndIf} + + IntOp $R6 $R6 + 9 + ${Endif} +FunctionEnd diff --git a/src/win32/win64_installer/ConfigPage2.nsh b/src/win32/win64_installer/ConfigPage2.nsh new file mode 100644 index 00000000..ed7c5f51 --- /dev/null +++ b/src/win32/win64_installer/ConfigPage2.nsh @@ -0,0 +1,455 @@ +Function EnterConfigPage2 + IntOp $R0 $NewComponents & ${ComponentsRequiringUserConfig} + + ${If} $R0 = 0 + Abort + ${EndIf} + + FileOpen $R5 "$PLUGINSDIR\ConfigPage2.ini" w + + StrCpy $R6 1 ; Field Number + StrCpy $R7 0 ; Top + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + ${If} $AutomaticInstall = 1 + IntOp $R8 $R7 + 54 + ${Else} + IntOp $R8 $R7 + 92 + ${EndIf} + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Director"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R8 $R7 + 54 + ${Else} + IntOp $R8 $R7 + 26 + ${EndIf} + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Enter Director Information"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + ${EndIf} + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + ${If} "$ConfigDirectorName" == "" + StrCpy $ConfigDirectorName "$HostName-dir" + ${EndIf} + ${If} "$ConfigDirectorPassword" == "" + StrCpy $ConfigDirectorPassword "$LocalDirectorPassword" + ${EndIf} + ${Else} + ${If} "$ConfigDirectorName" == "$HostName-dir" + StrCpy $ConfigDirectorName "" + ${EndIf} + ${If} "$ConfigDirectorPassword" == "$LocalDirectorPassword" + StrCpy $ConfigDirectorPassword "" + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="DIR Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=60$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorName$\r$\nLeft=60$\r$\nTop=$R7$\r$\nRight=158$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="DIR Port"$\r$\nLeft=172$\r$\nTop=$R7$\r$\nRight=188$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigDirectorPort$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=218$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R8 - 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Max Jobs"$\r$\nLeft=238$\r$\nTop=$R7$\r$\nRight=270$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nFlags="ONLY_NUMBERS"$\r$\nState=$ConfigDirectorMaxJobs$\r$\nLeft=274$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + + IntOp $R7 $R7 + 14 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="DIR Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=60$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorPassword$\r$\nLeft=60$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Mail Server"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=48$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorMailServer$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Mail Address"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=48$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorMailAddress$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Database"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + ${If} $ConfigDirectorDB = 0 + ${If} $MySQLPath != "" + StrCpy $ConfigDirectorDB 1 + ${ElseIf} $PostgreSQLPath != "" + StrCpy $ConfigDirectorDB 2 + ${Else} + StrCpy $ConfigDirectorDB 3 + ${EndIf} + ${EndIf} + + ${If} $ConfigDirectorDB = 1 + StrCpy $R9 1 + ${Else} + StrCpy $R9 0 + ${EndIf} + + FileWrite $R5 '[Field $R6]$\r$\nType="RadioButton"$\r$\nState=$R9$\r$\nText="MySQL"$\r$\nFlags="GROUP"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=90$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + + ${If} $ConfigDirectorDB = 2 + StrCpy $R9 1 + ${Else} + StrCpy $R9 0 + ${EndIf} + + FileWrite $R5 '[Field $R6]$\r$\nType="RadioButton"$\r$\nState=$R9$\r$\nText="PostgreSQL"$\r$\nFlags="NOTABSTOP"$\r$\nLeft=94$\r$\nTop=$R7$\r$\nRight=146$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + + ${If} $ConfigDirectorDB = 3 + StrCpy $R9 1 + ${Else} + StrCpy $R9 0 + ${EndIf} + + FileWrite $R5 '[Field $R6]$\r$\nType="RadioButton"$\r$\nState=$R9$\r$\nText="Sqlite"$\r$\nFlags="NOTABSTOP"$\r$\nLeft=150$\r$\nTop=$R7$\r$\nRight=182$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + ${If} $AutomaticInstall = 0 + IntOp $R8 $R7 + 10 + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigDirectorInstallService$\r$\nText="Install as service"$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=118$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + + FileWrite $R5 '[Field $R6]$\r$\nType="Checkbox"$\r$\nState=$ConfigDirectorStartService$\r$\nText="Start after install"$\r$\nLeft=190$\r$\nTop=$R7$\r$\nRight=260$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + ${EndIf} + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R7 $R7 + 2 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="DIR Address"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=60$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigDirectorAddress$\r$\nLeft=60$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 14 + IntOp $R8 $R7 + 8 + ${EndIf} + ${EndIf} + + IntOp $R7 $R7 + 4 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsFileAndStorageAndDirector} + IntOp $R0 0 & 0 + ${If} $R0 <> 0 + IntOp $R8 $R7 + 42 + + FileWrite $R5 '[Field $R6]$\r$\nType="GroupBox"$\r$\nText="Monitor"$\r$\nLeft=0$\r$\nTop=$R7$\r$\nRight=300$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 12 + + IntOp $R8 $R7 + 8 + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Name"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=26$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + + IntOp $R8 $R8 + 2 + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigMonitorName$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=150$\r$\nBottom=$R8$\r$\n$\r$\n' + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 16 + IntOp $R8 $R7 + 8 + + FileWrite $R5 '[Field $R6]$\r$\nType="Label"$\r$\nText="Password"$\r$\nLeft=6$\r$\nTop=$R7$\r$\nRight=38$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 - 2 + IntOp $R8 $R8 + 2 + + FileWrite $R5 '[Field $R6]$\r$\nType="Text"$\r$\nState=$ConfigMonitorPassword$\r$\nLeft=50$\r$\nTop=$R7$\r$\nRight=294$\r$\nBottom=$R8$\r$\n$\r$\n' + + IntOp $R6 $R6 + 1 + IntOp $R7 $R7 + 20 + ${EndIf} + ${EndIf} + + IntOp $R6 $R6 - 1 + FileWrite $R5 "[Settings]$\r$\nNumFields=$R6$\r$\n" + + FileClose $R5 + + IntOp $R0 $NewComponents & ${ComponentsFileAndStorage} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 1 + !insertmacro MUI_HEADER_TEXT "$(TITLE_ConfigPage1)" "$(SUBTITLE_ConfigPage1)" + ${Else} + !insertmacro MUI_HEADER_TEXT "$(TITLE_ConfigPage2)" "$(SUBTITLE_ConfigPage2)" + ${EndIf} + + !insertmacro MUI_INSTALLOPTIONS_INITDIALOG "ConfigPage2.ini" + Pop $HDLG ;HWND of dialog + + ; Initialize Controls + StrCpy $R6 2 ; Field Number + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 0 + ; Name + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + ; Port Number + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 5 0 + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + ; Max Jobs + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 3 0 + + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 9 + + ${If} $AutomaticInstall = 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${EndIf} + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsFileAndStorageAndDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $HCTL "ConfigPage2.ini" "Field $R6" "HWND" + SendMessage $HCTL ${EM_LIMITTEXT} 30 0 + IntOp $R6 $R6 + 2 + ${EndIf} + ${EndIf} + + !insertmacro MUI_INSTALLOPTIONS_SHOW + + ; Process results + + StrCpy $R6 2 + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 = 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorName "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorPort "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorMaxJobs "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + ${OrIf} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorPassword "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorMailServer "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorMailAddress "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $R5 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R5 = 1 + StrCpy $ConfigDirectorDB 1 + ${Endif} + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R5 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R5 = 1 + StrCpy $ConfigDirectorDB 2 + ${Endif} + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R5 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R5 = 1 + StrCpy $ConfigDirectorDB 3 + ${Endif} + IntOp $R6 $R6 + 1 + + ${If} $AutomaticInstall = 0 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorInstallService "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorStartService "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${Else} + IntOp $R0 $NewComponents & ${ComponentsTextAndGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigDirectorAddress "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} + + ${If} $AutomaticInstall = 0 + IntOp $R0 $NewComponents & ${ComponentsFileAndStorageAndDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigMonitorName "ConfigPage2.ini" "Field $R6" "State" + IntOp $R6 $R6 + 2 + !insertmacro MUI_INSTALLOPTIONS_READ $ConfigMonitorPassword "ConfigPage2.ini" "Field $R6" "State" + ${EndIf} + ${EndIf} +FunctionEnd + +Function LeaveConfigPage2 + ${If} $AutomaticInstall = 0 + StrCpy $R6 4 + + IntOp $R0 $NewComponents & ${ComponentsDirectorAndTextGuiConsoles} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R0 < 1024 + ${OrIf} $R0 > 65535 + MessageBox MB_OK "Port must be between 1024 and 65535 inclusive." + Abort + ${EndIf} + IntOp $R6 $R6 + 1 + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentDirector} + ${If} $R0 <> 0 + IntOp $R6 $R6 + 1 + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "ConfigPage2.ini" "Field $R6" "State" + ${If} $R0 < 1 + ${OrIf} $R0 > 99 + MessageBox MB_OK "Max Jobs must be between 1 and 99 inclusive." + Abort + ${EndIf} + IntOp $R6 $R6 + 1 + ${EndIf} + ${EndIf} +FunctionEnd diff --git a/src/win32/win64_installer/DumpLog.nsh b/src/win32/win64_installer/DumpLog.nsh new file mode 100644 index 00000000..71fb2580 --- /dev/null +++ b/src/win32/win64_installer/DumpLog.nsh @@ -0,0 +1,48 @@ +!ifndef LVM_GETITEMCOUNT +!define LVM_GETITEMCOUNT 0x1004 +!endif +!define LVM_GETITEMTEXT 0x102D + +Function DumpLog + Exch $5 + Push $0 + Push $1 + Push $2 + Push $3 + Push $4 + Push $6 + + FindWindow $0 "#32770" "" $HWNDPARENT + GetDlgItem $0 $0 1016 + StrCmp $0 0 error + FileOpen $5 $5 "w" + StrCmp $5 0 error + SendMessage $0 ${LVM_GETITEMCOUNT} 0 0 $6 + System::Alloc ${NSIS_MAX_STRLEN} + Pop $3 + StrCpy $2 0 + System::Call "*(i, i, i, i, i, i, i, i, i) i \ + (0, 0, 0, 0, 0, r3, ${NSIS_MAX_STRLEN}) .r1" + loop: StrCmp $2 $6 done + System::Call "User32::SendMessageA(i, i, i, i) i \ + ($0, ${LVM_GETITEMTEXT}, $2, r1)" + System::Call "*$3(&t${NSIS_MAX_STRLEN} .r4)" + FileWrite $5 "$4$\r$\n" + IntOp $2 $2 + 1 + Goto loop + done: + FileClose $5 + System::Free $1 + System::Free $3 + Goto exit + error: + MessageBox MB_OK error + exit: + Pop $6 + Pop $4 + Pop $3 + Pop $2 + Pop $1 + Pop $0 + Exch $5 +FunctionEnd diff --git a/src/win32/win64_installer/InstallType.ini b/src/win32/win64_installer/InstallType.ini new file mode 100644 index 00000000..73fb8d93 --- /dev/null +++ b/src/win32/win64_installer/InstallType.ini @@ -0,0 +1,56 @@ +; +; Note: certain text in this file is overwritten by the code in +; InstallType.nsh +; + +[Settings] +NumFields=6 + +[Field 1] +Type=Label +Text=This is a new installation. Please choose the installation type. +Left=0 +Right=300 +Top=0 +Bottom=28 + +[Field 2] +Type=GroupBox +Text=Installation Type +Left=0 +Right=300 +Top=32 +Bottom=136 + +[Field 3] +Type=RadioButton +Text=Automatic +State=1 +Left=6 +Right=52 +Top=44 +Bottom=54 + +[Field 4] +Type=RadioButton +Text=Custom (not recommended) +Left=6 +Right=252 +Top=90 +Bottom=100 + +[Field 5] +Type=Label +Text=The software will be installed in the default directory "Program Files\\Bacula". The configuration files will be generated using defaults applicable to most installations. +Left=17 +Right=295 +Top=58 +Bottom=86 + +[Field 6] +Type=Label +Text=You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work. +Left=17 +Right=295 +Top=104 +Bottom=132 diff --git a/src/win32/win64_installer/InstallType.nsh b/src/win32/win64_installer/InstallType.nsh new file mode 100644 index 00000000..cde1791b --- /dev/null +++ b/src/win32/win64_installer/InstallType.nsh @@ -0,0 +1,99 @@ +Function EnterInstallType + Push $R0 + Push $R1 + Push $R2 + + ; Check if this is an upgrade by looking for an uninstaller configured + ; in the registry. + ReadRegStr $R0 HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "UninstallString" + + ${If} "$R0" != "" + ; Check registry for new installer + ReadRegStr $R1 HKLM "Software\Bacula" "InstallLocation" + ${If} "$R1" != "" + ; New Installer + StrCpy $OldInstallDir $R1 + StrCpy $InstallType ${UpgradeInstall} + + SetShellVarContext all + + StrCpy $R1 "$APPDATA\Bacula" + StrCpy $R2 "$INSTDIR\Doc" + + ReadRegDWORD $PreviousComponents HKLM "Software\Bacula" "Components" + + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 1" "Text" "A previous installation has been found in $OldInstallDir. Please choose the installation type for any additional components you select." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 5" "Text" "The configuration files for additional components will be generated using defaults applicable to most installations." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 6" "Text" "You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work." + + ReadRegDWORD $ConfigDirectorDB HKLM Software\Bacula Database + + ${If} $ConfigDirectorDB = 0 + IntOp $R0 $PreviousComponents & ${ComponentDirector} + ${If} $R0 <> 0 + StrCpy $ConfigDirectorDB 1 + ${EndIf} + ${EndIf} + ${Else} + ; Processing Upgrade - Get Install Directory + ${StrRep} $R0 $R0 '"' '' + ${GetParent} $R0 $OldInstallDir + + ; Old Installer + StrCpy $InstallType ${MigrateInstall} + StrCpy $R1 "$OldInstallDir\bin" + StrCpy $R2 "$OldInstallDir\Doc" + + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 1" "Text" "An old installation has been found in $OldInstallDir. The Configuration will be migrated. Please choose the installation type for any additional components you select." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 5" "Text" "The software will be installed in the default directory $\"C:\Program Files\Bacula$\". The configuration files for additional components will be generated using defaults applicable to most installations." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 6" "Text" "You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work." + ${EndIf} + ${Else} + ; New Install + StrCpy $InstallType ${NewInstall} + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 5" "Text" "The software will be installed in the default directory $\"C:\Program Files\Bacula$\". The configuration files will be generated using defaults applicable to most installations." + WriteINIStr "$PLUGINSDIR\InstallType.ini" "Field 6" "Text" "You have more options, but you will have to manually edit your bacula-fd.conf file before Bacula will work." + ${EndIf} + + ${If} $InstallType <> ${NewInstall} + ${AndIf} $PreviousComponents = 0 + ${If} ${FileExists} "$R1\bacula-fd.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentFile} + ${EndIf} + ${If} ${FileExists} "$R1\bconsole.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentTextConsole} + ${EndIf} + ${If} ${FileExists} "$R1\bat.conf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentBatConsole} + ${EndIf} + ${If} ${FileExists} "$R1\plugins\alldrives-fd.dll" + IntOp $PreviousComponents $PreviousComponents | ${ComponentAllDrivesPlugin} + ${EndIf} + ${If} ${FileExists} "$R1\plugins\exchange-fd.dll" + IntOp $PreviousComponents $PreviousComponents | ${ComponentOldExchangePlugin} + ${EndIf} + ${If} ${FileExists} "$R2\main.pdf" + IntOp $PreviousComponents $PreviousComponents | ${ComponentPDFDocs} + ${EndIf} + ${EndIf} + + !InsertMacro MUI_HEADER_TEXT "$(TITLE_InstallType)" "$(SUBTITLE_InstallType)" + !InsertMacro MUI_INSTALLOPTIONS_INITDIALOG "InstallType.ini" + Pop $HDLG ;HWND of dialog + + !insertmacro MUI_INSTALLOPTIONS_SHOW + + ; Process Results + + !insertmacro MUI_INSTALLOPTIONS_READ $R0 "InstallType.ini" "Field 3" "State" + + ${If} $R0 = 1 + StrCpy $AutomaticInstall 1 + ${Else} + StrCpy $AutomaticInstall 0 + ${EndIf} + + Pop $R2 + Pop $R1 + Pop $R0 +FunctionEnd diff --git a/src/win32/win64_installer/Makefile b/src/win32/win64_installer/Makefile new file mode 100644 index 00000000..98008b90 --- /dev/null +++ b/src/win32/win64_installer/Makefile @@ -0,0 +1,207 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Written by Eric Bollengier, March 2009 +# based on work by Robert Nelson +# + +include ../Makefile.inc + +VERSION := $(shell sed -ne 's/^.define[ \t]VERSION[ \t][ \t]*"\(.*\)"/\1/p' < ../../version.h) +RELEASE ?= $(shell awk '/define RELEASE [0-9]+/ { print $$3 }' ../../version.h) + +DEFINES := \ + -DVERSION=$(VERSION) \ + -DOUT_DIR=$(BUILDDIR)/release64 \ + -DSRC_DIR=release64 \ + -DSRC64_DIR=$(BUILDDIR)/win64_installer/release64 \ + -DSRC32_DIR=$(BUILDDIR)/win32_installer/release32 \ + -DWINVER=64 \ + -DBUILD_TOOLS=MinGW64 \ + -DBUILD_BAT=$(BUILD_BAT) + +INSTALL_EXE := $(BUILDDIR)/release64/winbacula-$(VERSION).exe + +BACULABINARIES := \ + bacula.dll \ + alldrives-fd.dll \ + exchange-fd.dll \ + bsleep.exe \ + bsmtp.exe \ + bconsole.exe \ + bacula-fd.exe \ + bacula-sd.exe \ + bextract.exe \ + bls.exe \ + bcopy.exe + +DEPKGS_BINARIES := \ + openssl.exe \ + sed.exe \ + expr64.exe \ + snooze.exe \ + QtCore4.dll \ + QtGui4.dll \ + pthreadGCE2.dll \ + libwinpthread-1.dll \ + libgcc_s_seh-1.dll \ + libstdc++-6.dll \ + libeay32.dll \ + ssleay32.dll \ + zlib1.dll + +# libgcc_s_dw2-1.dll + +NONGCC_BINARIES := \ + libmysql.dll + +NONGCC_LIBRARIES := \ + libpq.dll + +MINGW_BINARIES := + +SCRIPT_FILES := \ + mtx-changer.cmd \ + disk-changer.cmd \ + dvd-handler.cmd + +CAT_FILES := \ + create_mysql_database.cmd \ + drop_mysql_database.cmd \ + make_mysql_tables.cmd \ + make_mysql_tables.sql \ + drop_mysql_tables.cmd \ + drop_mysql_tables.sql \ + grant_mysql_privileges.cmd \ + grant_mysql_privileges.sql \ + make_mysql_catalog_backup.cmd \ + create_postgresql_database.cmd \ + create_postgresql_database.sql \ + drop_postgresql_database.cmd \ + make_postgresql_tables.cmd \ + make_postgresql_tables.sql \ + drop_postgresql_tables.cmd \ + drop_postgresql_tables.sql \ + grant_postgresql_privileges.cmd \ + grant_postgresql_privileges.sql \ + make_postgresql_catalog_backup.cmd \ + create_sqlite3_database.cmd \ + drop_sqlite3_database.cmd \ + make_sqlite3_tables.cmd \ + make_sqlite3_tables.sql \ + drop_sqlite3_tables.cmd \ + grant_sqlite3_privileges.cmd \ + make_sqlite3_catalog_backup.cmd \ + delete_catalog_backup.cmd + +DIRD_FILES := \ + query.sql + +SSL_FILES := \ + openssl.cnf + +LICENSE_FILES := \ + LICENSE + +ifeq ($(bat),no) + BACULA_BINARIES=$(BACULABINARIES) + HELP= +else + BACULA_BINARIES=$(BACULABINARIES) bat.exe bacula-tray-monitor.exe + HELP=help +endif + + +EXTRA= + +########################################################################## + +# Targets + +.PHONY: all clean installer distclean + +all: $(HELP) docs $(INSTALL_EXE) $(EXTRA) + +installer: $(HELP) docs $(INSTALL_EXE) + +distclean: clean + +clean: + @echo "Cleaning `pwd`" + $(CMD_ECHO)-rm -f $(INSTALL_EXE) + $(CMD_ECHO)-rm -rf release64 + +help: + rm -rf release64/help + mkdir -p release64/help + cp -f $(BINDIR)/help/* release64/help/ + +docs: + rm -rf release64/docs + mkdir -p release64/docs/manuals/en/console + mkdir -p release64/docs/manuals/en/main + mkdir -p release64/docs/manuals/en/misc + mkdir -p release64/docs/manuals/en/problems + mkdir -p release64/docs/manuals/en/utility + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/console/console.pdf release64/docs/manuals/en/console/ + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/main/main.pdf release64/docs/manuals/en/main/ + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/misc/misc.pdf release64/docs/manuals/en/misc/ + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/problems/problems.pdf release64/docs/manuals/en/problems/ + cp -f $(DOCDIR)/docs/manuals/en/pdf-and-html/utility/utility.pdf release64/docs/manuals/en/utility/ + +# +# Rules +# + +define Convert_Binary +release64/$$(notdir $(1)): $(1) + $$(call checkdir,$$@) + $(ECHO_CMD)cp -f $$^ $$@ +endef + +define Copy_Binary +release64/$$(notdir $(1)): $(1) + $$(call checkdir,$$@) + $(ECHO_CMD)cp -f $$^ $$@ +endef + +define Copy_Docs +release64/$(1): $(DOCDIR)/$(1) + $$(call checkdir,$$@) + $(ECHO_CMD)cp -f $$^ $$(dir $$@) +endef + +define Copy_Licenses +release64/$$(notdir $(1)): $(1) + $$(call checkdir,$$@) + $(ECHO_CMD)cp -f $$^ $$(dir $$@) +endef + +$(foreach file,$(addprefix $(DEPKGS)/bin/, $(DEPKGS_BINARIES)),$(eval $(call Convert_Binary,$(file)))) + +$(foreach file,$(addprefix $(DEPKGS)/bin/, $(NONGCC_BINARIES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix $(DEPKGS)/lib/, $(NONGCC_LIBRARIES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix $(BINDIR)/, $(BACULA_BINARIES)),$(eval $(call Convert_Binary,$(file)))) + +$(foreach file,$(addprefix $(MINGW_DLLDIR)/, $(MINGW_BINARIES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix $(DEPKGS)/ssl/, $(SSL_FILES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix ../scripts/, $(SCRIPT_FILES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix ../cats/, $(CAT_FILES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix ../../dird/, $(DIRD_FILES)),$(eval $(call Copy_Binary,$(file)))) + +$(foreach file,$(addprefix $(MAINDIR)/, $(LICENSE_FILES)),$(eval $(call Copy_Licenses,$(file)))) + +#$(foreach file,$(DOC_FILES),$(eval $(call Copy_Docs,$(file)))) + +$(INSTALL_EXE): winbacula.nsi $(addprefix release64/,$(BACULA_BINARIES) $(DEPKGS_BINARIES) $(SSL_FILES) $(LICENSE_FILES)) + makensis -V3 $(DEFINES) winbacula.nsi + echo " " + +include $(BUILDDIR)/Makefile.rules diff --git a/src/win32/win64_installer/Readme.txt b/src/win32/win64_installer/Readme.txt new file mode 100755 index 00000000..2e5a3d1d --- /dev/null +++ b/src/win32/win64_installer/Readme.txt @@ -0,0 +1,50 @@ +Bacula - Windows Version Disclaimer +=================================== + +Please note, only the Win64 Client (File daemon) is supported. +The other components (Director, Storage daemon, +their utilities) are not provided because they have not been ported. + +Note: the Win64 Client can only be installed on 64 bit Windows Operating +systems. + + +Bacula - Windows Version Notes +============================== + +These notes highlight how the Windows version of Bacula differs from the +other versions. It also provides any notes additional to the documentation. + +For detailed documentation on using, configuring and troubleshooting Bacula, +please consult the installed documentation or the online documentation at +http://www.bacula.org/?page=documentation. + + +Start Menu Items +---------------- +A number of menu items have been created in the Start menu under All Programs +in the Bacula submenu. They may be selected to edit the configuration files, +view the documentation or run one of the console or utility programs. The +choices available will vary depending on the options you chose to install. + + +File Locations +-------------- +Everything is installed in the directory +"C:\Program Files\Bacula" unless a different directory was selected during +installation. Note: due to a bug in the NSIS installer we are using, on +64 bit machines appears to install Bacula in "C:\Program Files (x86)\Bacula". + +Code Page Problems +------------------- +Please note that Bacula expects the contents of the configuration files to be +written in UTF-8 format. Some translations of "Application Data" have accented +characters, and apparently the installer writes this translated data in the +standard Windows code page coding. This occurs for the Working Directory, and +when it happens the daemon will not start since Bacula cannot find the directory. +The workaround is to manually edit the appropriate conf file and ensure that it +is written out in UTF-8 format. + +The conf files can be edited with any UTF-8 compatible editor, or on most +modern Windows machines, you can edit them with notepad, then choose UTF-8 +output encoding before saving them. diff --git a/src/win32/win64_installer/Start.bat b/src/win32/win64_installer/Start.bat new file mode 100644 index 00000000..0b61f7ec --- /dev/null +++ b/src/win32/win64_installer/Start.bat @@ -0,0 +1,5 @@ +rem +rem Bacula start file for Win95/98/Me +rem +cd c:\bacula\bin +c:\bacula\bin\bacula-fd.exe /service -c c:\bacula\bin\bacula-fd.conf diff --git a/src/win32/win64_installer/Stop.bat b/src/win32/win64_installer/Stop.bat new file mode 100644 index 00000000..3b1d0e5b --- /dev/null +++ b/src/win32/win64_installer/Stop.bat @@ -0,0 +1,5 @@ +rem +rem Bacula stop file for Win95/98/Me +rem +cd c:\bacula\bin +c:\bacula\bin\bacula-fd.exe /kill diff --git a/src/win32/win64_installer/WriteTemplates.ini b/src/win32/win64_installer/WriteTemplates.ini new file mode 100644 index 00000000..3b3631c3 --- /dev/null +++ b/src/win32/win64_installer/WriteTemplates.ini @@ -0,0 +1,30 @@ +[Settings] +NumFields=3 +CancelEnabled=0 +BackEnabled=0 + +[Field 1] +Type="Label" +Text="A Template of the Client resource can be generated that contains the information about this Client. This template can then be copied to the Director computer and included in the Director's configuration file." +Left=7 +Right=293 +Top=6 +Bottom=32 + +[Field 2] +Type="CheckBox" +Text="Save Client template in:" +Left=6 +Right=240 +Top=38 +Bottom=48 + +[Field 3] +Type="FileRequest" +State="Client.conf" +Flags= +Filter=Configuration Files|*.conf|All Files|*.* +Left=16 +Right=288 +Top=50 +Bottom=62 diff --git a/src/win32/win64_installer/bacula-dir.conf.in b/src/win32/win64_installer/bacula-dir.conf.in new file mode 100644 index 00000000..35c4dddf --- /dev/null +++ b/src/win32/win64_installer/bacula-dir.conf.in @@ -0,0 +1,383 @@ +# +# Default Bacula Director Configuration file +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# The only thing that MUST be changed is to add one or more +# file or directory names in the Include directive of the +# FileSet resource. +# +# For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ @DISTVER@ +# +# You might also want to change the default email address +# from root to your address. See the "mail" and "operator" +# directives in the Messages resource. +# + +Director { # define myself + Name = @director_name@ + DIRport = @director_port@ # where we listen for UA connections + QueryFile = "@bin_dir@\\query.sql" + WorkingDirectory = "@working_dir@" + PidDirectory = "@working_dir@" + Maximum Concurrent Jobs = @director_maxjobs@ + Password = "@director_password@" # Console password + Messages = Daemon +} + +JobDefs { + Name = "DefaultJob" + Type = Backup + Level = Incremental + Client = @client_name@ + FileSet = "Test Set" + Schedule = "WeeklyCycle" + Storage = File + Messages = Standard + Pool = Default + Priority = 10 +} + + +# +# Define the main nightly save backup job +# By default, this job will back up to disk in C:/tmp +Job { + Name = "Client1" + JobDefs = "DefaultJob" + Write Bootstrap = "@working_dir@\\Client1.bsr" +} + +#Job { +# Name = "Client2" +# Client = @client_name@2 +# JobDefs = "DefaultJob" +# Write Bootstrap = "@working_dir@\\Client2.bsr" +#} + +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + JobDefs = "DefaultJob" + Level = Full + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + # This creates an ASCII copy of the catalog + RunBeforeJob = "\"@bin_dir@\\make_catalog_backup\" bacula bacula" + # This deletes the copy of the catalog + RunAfterJob = "\"@bin_dir@\\delete_catalog_backup\"" + Write Bootstrap = "@working_dir@\\BackupCatalog.bsr" + Priority = 11 # run after main backup +} + +# +# Standard Restore template, to be changed by Console program +# Only one such job is needed for all Jobs/Clients/Storage ... +# +Job { + Name = "RestoreFiles" + Type = Restore + Client=@client_name@ + FileSet="Test Set" + Storage = File + Pool = Default + Messages = Standard + Where = "C:\\tmp\\bacula-restores" +} + +# +# Note: Windows path separators do NOT work correctly in FileSets. +# +# List of files to be backed up +FileSet { + Name = "Test Set" + Include { + Options { + signature = MD5 + ignore case = yes + } +# +# Put your list of files here, preceded by 'File =', one per line +# or include an external list with: +# +# File = + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/win32/win64_installer/storage.conf.in b/src/win32/win64_installer/storage.conf.in new file mode 100644 index 00000000..e4fe6665 --- /dev/null +++ b/src/win32/win64_installer/storage.conf.in @@ -0,0 +1,14 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Definition of file storage device +Storage { + Name = File +# Do not use "localhost" here + Address = @storage_address@ # N.B. Use a fully qualified name here + SDPort = @storage_port@ + Password = "@storage_password@" + Device = FileStorage + Media Type = File +} diff --git a/src/win32/win64_installer/tray-monitor.conf.in b/src/win32/win64_installer/tray-monitor.conf.in new file mode 100644 index 00000000..55ff2d42 --- /dev/null +++ b/src/win32/win64_installer/tray-monitor.conf.in @@ -0,0 +1,34 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# +# Bacula Tray Monitor Configuration File +# + +Monitor { + Name = @monitor_name@ + Password = "@mon_password@" # password for the Directors + RefreshInterval = 30 seconds +} + +Client { + Name = @client_name@ + Address = localhost + FDPort = @client_port@ + Password = "@monitor_password@" +} + +#Storage { +# Name = @basename@-sd +# Address = @hostname@ +# SDPort = @sd_port@ +# Password = "@mon_sd_password@" # password for StorageDaemon +#} +# +#Director { +# Name = @basename@-dir +# DIRport = @dir_port@ +# address = @hostname@ +#} +# diff --git a/src/win32/win64_installer/winbacula.nsi b/src/win32/win64_installer/winbacula.nsi new file mode 100644 index 00000000..fd82c831 --- /dev/null +++ b/src/win32/win64_installer/winbacula.nsi @@ -0,0 +1,1223 @@ +# +# Copyright (C) 2000-2018 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +##{{NSIS_PLUS_BEGIN_PROJECT_SETTINGS}}## +#NAME "Release" +# CMD -DSRC_DIR=release64 -DSRC32_DIR=..\win32_installer\release32 -DSRC64_DIR=..\win64_installer\release64 -DOUT_DIR=release64 -DWINVER=64 -DVERSION=9.0.7 -DBUILD_TOOLS=NSIS-3.0b0 +# EXE C:\Program Files (x86)\NSIS\makensis.exe +# FLAGS 2 +##{{NSIS_PLUS_END_PROJECT_SETTINGS}}## + +; winbacula.nsi +; +; Began as a version written by Michel Meyers (michel@tcnnet.dyndns.org) +; +; Adapted by Kern Sibbald for native Win32 Bacula +; added a number of elements from Christopher Hull's installer +; +; D. Scott Barninger Nov 13 2004 +; added configuration editing for bconsole.conf and bwx-console.conf +; better explanation in dialog boxes for editing config files +; added Start Menu items +; fix uninstall of config files to do all not just bacula-fd.conf +; +; D. Scott Barninger Dec 05 2004 +; added specification of default permissions for bacula-fd.conf +; - thanks to Jamie Ffolliott for pointing me at cacls +; added removal of working-dir files if user selects to remove config +; uninstall is now 100% clean +; +; D. Scott Barninger Apr 17 2005 +; 1.36.3 release docs update +; add pdf manual and menu shortcut +; +; Robert Nelson May 15 2006 +; Added server installs and implemented Microsoft install locations +; Use LogicLib.nsh +; Added Bacula-SD and Bacula-DIR +; Replaced ParameterGiven with standard GetOptions +; +; Kern Sibbald October 2008 +; Remove server installs +; Install into single bacula directory +; (i.e. undo a large part of what Robert Nelson did) +; +; Eric Bollengier March 2009 +; Updated to handle Win64 installation +; +; Kern Sibbald April 2009 +; Correct some Win64 install problems +; It is mind boggling how many lines of this insane scripting language +; have been written with absolutely no comments +; +; Command line options: +; +; /service - +; /start +; +; netsh firewall add portopening protocol=tcp port=9102 name="Bacula-FD" + + +!define PRODUCT "Bacula" + +; +; Include the Modern UI +; + +!include "MUI.nsh" +!include "LogicLib.nsh" +!include "FileFunc.nsh" +!include "Sections.nsh" +!include "StrFunc.nsh" +!include "WinMessages.nsh" +!include "x64.nsh" + +; +; Basics +; +Name "Bacula" +OutFile "${OUT_DIR}\bacula-win${WINVER}-${VERSION}.exe" +SetCompressor lzma +Caption "Bacula 64 bit Edition ${VERSION}" +VIProductVersion ${VERSION}.1 +VIAddVersionKey CompanyName "Bacula Project" +VIAddVersionKey LegalCopyright "Kern Sibbald" +VIAddVersionKey FileDescription "Bacula network backup and restore" +VIAddVersionKey FileVersion win${WINVER}-${VERSION} +VIAddVersionKey ProductVersion win${WINVER}-${VERSION} +VIAddVersionKey ProductName "Bacula" +VIAddVersionKey InternalName "Bacula" +VIAddVersionKey LegalTrademarks "Bacula is a registered trademark of Kern Sibbald" +VIAddVersionKey OriginalFilename "bacula.exe" + +InstallDir "C:\Program Files\Bacula" +InstallDirRegKey HKLM "Software\Bacula" "InstallLocation" + +InstType "Client" +InstType "Server" +;InstType "Full" + +!insertmacro GetParent + +${StrCase} +${StrRep} +${StrTok} +${StrTrimNewLines} + +; +; Pull in pages +; + +!define MUI_COMPONENTSPAGE_SMALLDESC + +!define MUI_HEADERIMAGE +!define MUI_BGCOLOR 739AB9 +!define MUI_HEADERIMAGE_BITMAP "bacula-logo.bmp" + +!InsertMacro MUI_PAGE_WELCOME +!InsertMacro MUI_PAGE_LICENSE "${SRC_DIR}\LICENSE" +Page custom EnterInstallType +!define MUI_PAGE_CUSTOMFUNCTION_SHOW PageComponentsShow +!InsertMacro MUI_PAGE_COMPONENTS +!define MUI_PAGE_CUSTOMFUNCTION_PRE PageDirectoryPre +!InsertMacro MUI_PAGE_DIRECTORY +Page custom EnterConfigPage1 LeaveConfigPage1 +Page custom EnterConfigPage2 LeaveConfigPage2 +!Define MUI_PAGE_CUSTOMFUNCTION_LEAVE LeaveInstallPage +!InsertMacro MUI_PAGE_INSTFILES +Page custom EnterWriteTemplates +!Define MUI_FINISHPAGE_SHOWREADME $INSTDIR\Readme.txt +!InsertMacro MUI_PAGE_FINISH + +!InsertMacro MUI_UNPAGE_WELCOME +!InsertMacro MUI_UNPAGE_CONFIRM +!InsertMacro MUI_UNPAGE_INSTFILES +!InsertMacro MUI_UNPAGE_FINISH + +!define MUI_ABORTWARNING + +!InsertMacro MUI_LANGUAGE "English" + +!InsertMacro GetParameters +!InsertMacro GetOptions + +DirText "Setup will install Bacula 64 bit ${VERSION} to the directory specified below. To install in a different folder, click Browse and select another folder." + +!InsertMacro MUI_RESERVEFILE_INSTALLOPTIONS +; +; Global Variables +; +Var OptService +Var OptStart +Var OptSilent + +Var CommonFilesDone + +Var OsIsNT + +Var HostName + +Var ConfigClientName +Var ConfigClientPort +Var ConfigClientMaxJobs +Var ConfigClientPassword +Var ConfigClientInstallService +Var ConfigClientStartService + +Var ConfigStorageName +Var ConfigStoragePort +Var ConfigStorageMaxJobs +Var ConfigStoragePassword +Var ConfigStorageInstallService +Var ConfigStorageStartService + +Var ConfigDirectorName +Var ConfigDirectorPort +Var ConfigDirectorMaxJobs +Var ConfigDirectorPassword +Var ConfigDirectorAddress +Var ConfigDirectorMailServer +Var ConfigDirectorMailAddress +Var ConfigDirectorDB +Var ConfigDirectorInstallService +Var ConfigDirectorStartService + +Var ConfigMonitorName +Var ConfigMonitorPassword + +Var LocalDirectorPassword +Var LocalHostAddress + +Var MySQLPath +Var MySQLVersion +Var PostgreSQLPath +Var PostgreSQLVersion + +Var AutomaticInstall +Var InstallType + +!define NewInstall 0 +!define UpgradeInstall 1 +!define MigrateInstall 2 + +Var OldInstallDir +Var PreviousComponents +Var NewComponents + +; Bit 0 = File Service +; 1 = Storage Service +; 2 = Director Service +; 3 = Command Console +; 4 = Bat Console +; 5 = wxWidgits Console +; 6 = Documentation (PDF) +; 7 = Documentation (HTML) +; 8 = alldrives Plugin +; 9 = Old Exchange Plugin +; 10 = Tray Monitor +; 11 = winbmr Plugin (not implemented in community version) + +!define ComponentFile 1 +!define ComponentStorage 2 +!define ComponentDirector 4 +!define ComponentTextConsole 8 +!define ComponentBatConsole 16 +!define ComponentGUIConsole 32 +!define ComponentPDFDocs 64 +!define ComponentHTMLDocs 128 +!define ComponentAllDrivesPlugin 256 +!define ComponentOldExchangePlugin 512 +!define ComponentTrayMonitor 1024 + +!define ComponentsRequiringUserConfig 63 +!define ComponentsFileAndStorage 3 +!define ComponentsFileAndStorageAndDirector 7 +!define ComponentsDirectorAndTextGuiConsoles 60 +!define ComponentsTextAndGuiConsoles 56 + +Var HDLG +Var HCTL + +Function .onInit + Push $R0 + Push $R1 + + ;LogSet on + + ; Process Command Line Options + StrCpy $OptService 1 + StrCpy $OptStart 1 + StrCpy $OptSilent 0 + StrCpy $CommonFilesDone 0 + StrCpy $OsIsNT 0 + StrCpy $AutomaticInstall 0 + StrCpy $InstallType ${NewInstall} + StrCpy $OldInstallDir "" + StrCpy $PreviousComponents 0 + StrCpy $NewComponents 0 + StrCpy $MySQLPath "" + StrCpy $MySQLVersion "" + StrCpy $PostgreSQLPath "" + StrCpy $PostgreSQLVersion "" + StrCpy $LocalDirectorPassword "" + + ${GetParameters} $R0 + + ClearErrors + + ${If} ${RunningX64} + ${Else} + MessageBox MB_OK "This is a 64 bit installer, but the OS is not an x64 -- Aborting ..." /SD IDOK + Abort + ${EndIf} + + ${GetOptions} $R0 "/noservice" $R1 + IfErrors +2 + StrCpy $OptService 0 + + ClearErrors + ${GetOptions} $R0 "/nostart" $R1 + IfErrors +2 + StrCpy $OptStart 0 + + IfSilent 0 +2 + StrCpy $OptSilent 1 + + ReadRegStr $R0 HKLM "SOFTWARE\Microsoft\Windows NT\CurrentVersion" CurrentVersion + ${If} $R0 != "" + StrCpy $OsIsNT 1 + ${EndIf} + + Call GetComputerName + Pop $HostName + + Call GetHostName + Pop $LocalHostAddress + + Call GetUserName + + ; Configuration Defaults + + StrCpy $ConfigClientName "$HostName-fd" + StrCpy $ConfigClientPort 9102 + StrCpy $ConfigClientMaxJobs 10 + ;StrCpy $ConfigClientPassword + StrCpy $ConfigClientInstallService "$OptService" + StrCpy $ConfigClientStartService "$OptStart" + + StrCpy $ConfigStorageName "$HostName-sd" + StrCpy $ConfigStoragePort 9103 + StrCpy $ConfigStorageMaxJobs 10 + ;StrCpy $ConfigStoragePassword + StrCpy $ConfigStorageInstallService "$OptService" + StrCpy $ConfigStorageStartService "$OptStart" + StrCpy $ConfigDirectorPort 9101 + + StrCpy $ConfigMonitorName "$HostName-mon" + ;StrCpy $ConfigMonitorPassword + +; TEMP refers to temporary helper programs and not Bacula plugins! +; InitTEMP + CreateDirectory "$INSTDIR" + CreateDirectory "$INSTDIR\working" + File "/oname=$INSTDIR\working\openssl.exe" "${SRC_DIR}\openssl.exe" + File "/oname=$INSTDIR\working\libeay32.dll" "${SRC_DIR}\libeay32.dll" + File "/oname=$INSTDIR\working\ssleay32.dll" "${SRC_DIR}\ssleay32.dll" + File "/oname=$INSTDIR\working\sed.exe" "${SRC_DIR}\sed.exe" + + !InsertMacro MUI_INSTALLOPTIONS_EXTRACT "InstallType.ini" + !InsertMacro MUI_INSTALLOPTIONS_EXTRACT "WriteTemplates.ini" + + SetPluginUnload alwaysoff + +; Generate random File daemon password + nsExec::Exec '"$INSTDIR\working\openssl.exe" rand -base64 -out $INSTDIR\working\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$INSTDIR\working\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $ConfigClientPassword $R0 + FileClose $R1 + ${EndIf} + +; Generate random Storage daemon password + nsExec::Exec '"$INSTDIR\working\openssl.exe" rand -base64 -out $INSTDIR\working\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$INSTDIR\working\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $ConfigStoragePassword $R0 + FileClose $R1 + ${EndIf} + + SetPluginUnload manual + +; Generate random monitor password + nsExec::Exec '"$INSTDIR\working\openssl.exe" rand -base64 -out $INSTDIR\working\pw.txt 33' + pop $R0 + ${If} $R0 = 0 + FileOpen $R1 "$INSTDIR\working\pw.txt" r + IfErrors +4 + FileRead $R1 $R0 + ${StrTrimNewLines} $ConfigMonitorPassword $R0 + FileClose $R1 + ${EndIf} + + Pop $R1 + Pop $R0 +FunctionEnd + +Function .onSelChange + Call UpdateComponentUI +FunctionEnd + +Function InstallCommonFiles + ${If} $CommonFilesDone = 0 + SetOutPath "$INSTDIR" + File "Readme.txt" + + SetOutPath "$INSTDIR" +!if "${BUILD_TOOLS}" == "MinGW32" + File "${SRC_DIR}\mingwm10.dll" +!endif + File "${SRC_DIR}\libwinpthread-1.dll" + File "${SRC_DIR}\pthreadGCE2.dll" + File "${SRC_DIR}\libgcc_s_seh-1.dll" + File "${SRC_DIR}\libstdc++-6.dll" + File "${SRC_DIR}\ssleay32.dll" + File "${SRC_DIR}\libeay32.dll" + File "${SRC_DIR}\zlib1.dll" + File "${SRC_DIR}\bacula.dll" + + File "/oname=$INSTDIR\openssl.cnf" "${SRC_DIR}\openssl.cnf" + File "${SRC_DIR}\openssl.exe" + File "${SRC_DIR}\bsleep.exe" + File "${SRC_DIR}\bsmtp.exe" + File "${SRC_DIR}\expr64.exe" + File "${SRC_DIR}\snooze.exe" + + CreateShortCut "$SMPROGRAMS\Bacula\Documentation\View Readme.lnk" "write.exe" '"$INSTDIR\Readme.txt"' + + StrCpy $CommonFilesDone 1 + ${EndIf} +FunctionEnd + +Section "-Initialize" + + WriteRegStr HKLM Software\Bacula InstallLocation "$INSTDIR" + + Call GetSelectedComponents + Pop $R2 + WriteRegDWORD HKLM Software\Bacula Components $R2 + + ; remove start menu items + SetShellVarContext all + + Delete /REBOOTOK "$SMPROGRAMS\Bacula\Configuration\*" + Delete /REBOOTOK "$SMPROGRAMS\Bacula\Documentation\*" + Delete /REBOOTOK "$SMPROGRAMS\Bacula\*" + RMDir "$SMPROGRAMS\Bacula\Configuration" + RMDir "$SMPROGRAMS\Bacula\Documentation" + RMDir "$SMPROGRAMS\Bacula" + CreateDirectory "$SMPROGRAMS\Bacula" + CreateDirectory "$SMPROGRAMS\Bacula\Configuration" + CreateDirectory "$SMPROGRAMS\Bacula\Documentation" + + CreateDirectory "$INSTDIR" + CreateDirectory "$INSTDIR\working" + CreateDirectory "$INSTDIR\plugins" + + SetOutPath "$INSTDIR" + File "${SRC_DIR}\LICENSE" + Delete /REBOOTOK "$INSTDIR\License.txt" + +; Output a series of SED commands to configure the .conf file(s) + FileOpen $R1 $INSTDIR\working\config.sed w + FileWrite $R1 "s;@VERSION@;${VERSION};g$\r$\n" + FileWrite $R1 "s;@DATE@;${__DATE__};g$\r$\n" + FileWrite $R1 "s;@DISTNAME@;Windows;g$\r$\n" + + StrCpy $R2 ${BUILD_TOOLS} + + Call GetHostName + Exch $R3 + Pop $R3 + + FileWrite $R1 "s;@DISTVER@;$R2;g$\r$\n" + + ${StrRep} $R2 "$INSTDIR\working" "\" "\\\\" + FileWrite $R1 's;@working_dir@;$R2;g$\r$\n' + + ${StrRep} $R2 "$INSTDIR" "\" "\\\\" + FileWrite $R1 's;@bin_dir@;$R2;g$\r$\n' + ${StrRep} $R2 "$INSTDIR" "\" "\\" + FileWrite $R1 's;@bin_dir_cmd@;$R2;g$\r$\n' + + ${StrRep} $R2 "$INSTDIR\plugins" "\" "\\\\" + FileWrite $R1 's;@fdplugins_dir@;$R2;g$\r$\n' + + ${StrRep} $R2 "$INSTDIR" "\" "/" + FileWrite $R1 "s;@BUILD_DIR@;$R2;g$\r$\n" + + FileWrite $R1 "s;@client_address@;$LocalHostAddress;g$\r$\n" + FileWrite $R1 "s;@client_name@;$ConfigClientName;g$\r$\n" + FileWrite $R1 "s;@client_port@;$ConfigClientPort;g$\r$\n" + FileWrite $R1 "s;@client_maxjobs@;$ConfigClientMaxJobs;g$\r$\n" + FileWrite $R1 "s;@client_password@;$ConfigClientPassword;g$\r$\n" + FileWrite $R1 "s;@storage_address@;$LocalHostAddress;g$\r$\n" + FileWrite $R1 "s;@storage_name@;$ConfigStorageName;g$\r$\n" + FileWrite $R1 "s;@storage_port@;$ConfigStoragePort;g$\r$\n" + FileWrite $R1 "s;@storage_maxjobs@;$ConfigStorageMaxJobs;g$\r$\n" + FileWrite $R1 "s;@storage_password@;$ConfigStoragePassword;g$\r$\n" + FileWrite $R1 "s;@director_name@;$ConfigDirectorName;g$\r$\n" + FileWrite $R1 "s;@director_port@;$ConfigDirectorPort;g$\r$\n" + FileWrite $R1 "s;@director_password@;$ConfigDirectorPassword;g$\r$\n" + FileWrite $R1 "s;@director_address@;$ConfigDirectorAddress;g$\r$\n" + FileWrite $R1 "s;@monitor_name@;$ConfigMonitorName;g$\r$\n" + FileWrite $R1 "s;@monitor_password@;$ConfigMonitorPassword;g$\r$\n" + + FileClose $R1 + + ${If} ${FileExists} "$OldInstallDir\bacula-fd.exe" + nsExec::ExecToLog '"$OldInstallDir\bacula-fd.exe" /kill' ; Shutdown any bacula that could be running + nsExec::ExecToLog '"$OldInstallDir\bacula-fd.exe" /kill' ; Shutdown any bacula that could be running + Sleep 1000 + nsExec::ExecToLog '"$OldInstallDir\bacula-fd.exe" /remove' ; Remove existing service + ${EndIf} + + ${If} ${FileExists} "$INSTDIR\bacula-fd.exe" + nsExec::ExecToLog '"$INSTDIR\bacula-fd.exe" /kill' ; Shutdown any bacula that could be running + nsExec::Exec /TIMEOUT=200 'net stop bacula-fd' + ${EndIf} + + ${If} ${FileExists} "$OldInstallDir\bin\bacula-sd.exe" + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-sd.exe" /kill' ; Shutdown any bacula that could be running + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-sd.exe" /kill' ; Shutdown any bacula that could be running + nsExec::Exec /TIMEOUT=200 'net stop bacula-sd' + Sleep 1000 + nsExec::ExecToLog '"$OldInstallDir\bin\bacula-sd.exe" /remove' ; Remove existing service + ${EndIf} + + ${If} ${FileExists} "$INSTDIR\bacula-sd.exe" + nsExec::ExecToLog '"$INSTDIR\bacula-sd.exe" /kill' ; Shutdown any bacula that could be running + nsExec::Exec /TIMEOUT=200 'net stop bacula-sd' + ${EndIf} + Sleep 1000 + + +SectionEnd + +SectionGroup "Client" SecGroupClient + +Section "File Service" SecFileDaemon + SectionIn 1 2 3 + + SetOutPath "$INSTDIR" + + File "${SRC_DIR}\bacula-fd.exe" + Delete "$INSTDIR\working\bacula-fd.conf.in" + File "/oname=$INSTDIR\working\bacula-fd.conf.in" "bacula-fd.conf.in" + + StrCpy $0 "$INSTDIR" + StrCpy $1 bacula-fd.conf + Call ConfigEditAndCopy + + StrCpy $0 bacula-fd + StrCpy $1 "File Service" + StrCpy $2 $ConfigClientInstallService + StrCpy $3 $ConfigClientStartService + + Call InstallDaemon + + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Client Configuration.lnk" "write.exe" '"$INSTDIR\bacula-fd.conf"' +SectionEnd + +SectionGroupEnd + +SectionGroup "Server" SecGroupServer + +Section "Storage Service" SecStorageDaemon + SectionIn 2 3 + + SetOutPath "$INSTDIR" + + File "${SRC_DIR}\bacula-sd.exe" + File "${SRC_DIR}\bcopy.exe" + File "${SRC_DIR}\bextract.exe" + File "${SRC_DIR}\bls.exe" + + File "/oname=$INSTDIR\working\bacula-sd.conf.in" "bacula-sd.conf.in" + + StrCpy $0 "$INSTDIR" + StrCpy $1 bacula-sd.conf + Call ConfigEditAndCopy + +# File "${SRC_DIR}\loaderinfo.exe" +# File "${SRC_DIR}\mt.exe" +# File "${SRC_DIR}\mtx.exe" +# File "${SRC_DIR}\scsitape.exe" +# File "${SRC_DIR}\tapeinfo.exe" +# File "${SRC_DIR}\bscan.exe" +# File "${SRC_DIR}\btape.exe" +# File "${SRC_DIR}\scsilist.exe" +# File "${SRC_DIR}\mkisofs.exe" +# File "${SRC_DIR}\growisofs.exe" + +# File "/oname=$INSTDIR\working\mtx-changer.cmd" "scripts\mtx-changer.cmd" + +# StrCpy $0 "$INSTDIR\bin" +# StrCpy $1 mtx-changer.cmd +# Call ConfigEditAndCopy + +# File "/oname=$INSTDIR\working\disk-changer.cmd" "scripts\disk-changer.cmd" + +# StrCpy $0 "$INSTDIR" +# StrCpy $1 disk-changer.cmd +# Call ConfigEditAndCopy + + StrCpy $0 bacula-sd + StrCpy $1 "Storage Service" + StrCpy $2 $ConfigStorageInstallService + StrCpy $3 $ConfigStorageStartService + Call InstallDaemon + +# CreateShortCut "$SMPROGRAMS\Bacula\Configuration\List Devices.lnk" "$INSTDIR\bin\scsilist.exe" "/pause" + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Storage Configuration.lnk" "write.exe" '"$INSTDIR\bacula-sd.conf"' +SectionEnd + +SectionGroupEnd + +SectionGroup "Consoles" SecGroupConsoles + +Section "Command Console" SecConsole + SectionIn 1 2 3 + + SetOutPath "$INSTDIR" + + File "${SRC_DIR}\bconsole.exe" + Call InstallCommonFiles + + File "/oname=$INSTDIR\working\bconsole.conf.in" "bconsole.conf.in" + StrCpy $0 "$INSTDIR" + StrCpy $1 bconsole.conf + Call ConfigEditAndCopy + + CreateShortCut "$SMPROGRAMS\Bacula\bconsole.lnk" "$INSTDIR\bconsole.exe" '-c "$INSTDIR\bconsole.conf"' "$INSTDIR\bconsole.exe" 0 + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Command Console Configuration.lnk" "write.exe" '"$INSTDIR\bconsole.conf"' + +SectionEnd + +Section "Bat Console" SecBatConsole + SectionIn 1 2 3 + + SetOutPath "$INSTDIR" + +!if "${BUILD_BAT}" == "yes" + Call InstallCommonFiles + File "${SRC64_DIR}\QtCore4.dll" + File "${SRC64_DIR}\QtGui4.dll" + File "${SRC64_DIR}\bat.exe" + + File "/oname=$INSTDIR\working\bat.conf.in" "bat.conf.in" + StrCpy $0 "$INSTDIR" + StrCpy $1 bat.conf + Call ConfigEditAndCopy + + SetOutPath "$INSTDIR\help" + File "${SRC64_DIR}\help\*" + SetOutPath "$INSTDIR" + + ; Create Start Menu entry + CreateShortCut "$SMPROGRAMS\Bacula\Bat.lnk" "$INSTDIR\bat.exe" '-c "$INSTDIR\bat.conf"' "$INSTDIR\bat.exe" 0 + CreateShortCut "$SMPROGRAMS\Bacula\Configuration\Edit Bat Configuration.lnk" "write.exe" '"$INSTDIR\bat.conf"' + SetOutPath "$INSTDIR" +!endif + +SectionEnd + +Section "Tray Monitor" SecTrayMonitor + SectionIn 1 2 3 + + SetOutPath "$INSTDIR" + +!if "${BUILD_BAT}" == "yes" + Call InstallCommonFiles + File "${SRC64_DIR}\QtCore4.dll" + File "${SRC64_DIR}\QtGui4.dll" + File "${SRC64_DIR}\bacula-tray-monitor.exe" + + File "/oname=$INSTDIR\working\bacula-tray-monitor.conf.in" "bacula-tray-monitor.conf.in" + StrCpy $0 "$INSTDIR" + StrCpy $1 bacula-tray-monitor.conf + Call ConfigEditAndCopy + + ; Create Start Menu entry + CreateShortCut "$SMPROGRAMS\Bacula\TrayMonitor.lnk" "$INSTDIR\bacula-tray-monitor.exe" "" "$INSTDIR\bacula-tray-monitor.exe" 0 + SetOutPath "$INSTDIR" +!endif + +SectionEnd + +SectionGroupEnd + +SectionGroup "Plugins" SecGroupPlugins + +Section "alldrives Plugin" SecAllDrivesPlugin + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\plugins" + File "${SRC_DIR}\alldrives-fd.dll" + SetOutPath "$INSTDIR" + +SectionEnd + +Section "Old (deprecated) Exchange Plugin" SecOldExchangePlugin + SectionIn 1 2 3 + + SetOutPath "$INSTDIR\plugins" + File "${SRC_DIR}\exchange-fd.dll" + SetOutPath "$INSTDIR" + +SectionEnd + +SectionGroupEnd + +Section "-Finish" + Push $R0 + + ${If} $OsIsNT = 1 + nsExec::ExecToLog 'cmd.exe /C echo Y|cacls "$INSTDIR\bacula-fd.conf" /G SYSTEM:F Administrators:F' + nsExec::ExecToLog 'cmd.exe /C echo Y|cacls "$INSTDIR\bacula-sd.conf" /G SYSTEM:F Administrators:F' + nsExec::ExecToLog 'cmd.exe /C echo Y|cacls "$INSTDIR\bat.conf" /G SYSTEM:F Administrators:F' + ${EndIf} + + ; Write the uninstall keys for Windows & create Start Menu entry + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "DisplayName" "Bacula" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "InstallLocation" "$INSTDIR" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "DisplayVersion" "${VERSION}" + ${StrTok} $R0 "${VERSION}" "." 0 0 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "VersionMajor" $R0 + ${StrTok} $R0 "${VERSION}" "." 1 0 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "VersionMinor" $R0 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "NoModify" 1 + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "NoRepair" 1 + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "URLUpdateInfo" "http://www.bacula.org" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "URLInfoAbout" "http://www.bacula.org" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "HelpLink" "http://www.bacula.org?page=support" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" "UninstallString" '"$INSTDIR\uninstall.exe"' + WriteUninstaller "$INSTDIR\Uninstall.exe" + CreateShortCut "$SMPROGRAMS\Bacula\Uninstall Bacula.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0 + + ${If} $OsIsNT = 1 + nsExec::ExecToLog 'net start bacula-fd' + nsExec::ExecToLog 'net start bacula-sd' + ${Else} + Exec '"$INSTDIR\bacula-fd.exe" /service -c "$INSTDIR\bacula-fd.conf"' + Exec '"$INSTDIR\bacula-sd.exe" /service -c "$INSTDIR\bacula-sd.conf"' + ${EndIf} + + Pop $R0 +SectionEnd + +; Extra Page descriptions + +LangString DESC_SecFileDaemon ${LANG_ENGLISH} "Install Bacula 64 bit File Daemon on this system." +LangString DESC_SecStorageDaemon ${LANG_ENGLISH} "Install Bacula 64 bit Storage Daemon on this system." +LangString DESC_SecConsole ${LANG_ENGLISH} "Install command console program on this system." +LangString DESC_SecBatConsole ${LANG_ENGLISH} "Install Bat graphical console program on this system." +LangString DESC_SecTrayMonitor ${LANG_ENGLISH} "Install Tray Monitor graphical program on this system." +LangString DESC_SecAllDrivesPlugin ${LANG_ENGLISH} "Install alldrives Plugin on this system." +LangString DESC_SecOldExchangePlugin ${LANG_ENGLISH} "Install old (deprecated) Exchange Plugin on this system." + +LangString TITLE_ConfigPage1 ${LANG_ENGLISH} "Configuration" +LangString SUBTITLE_ConfigPage1 ${LANG_ENGLISH} "Set installation configuration." + +LangString TITLE_ConfigPage2 ${LANG_ENGLISH} "Configuration (continued)" +LangString SUBTITLE_ConfigPage2 ${LANG_ENGLISH} "Set installation configuration." + +LangString TITLE_InstallType ${LANG_ENGLISH} "Installation Type" +LangString SUBTITLE_InstallType ${LANG_ENGLISH} "Choose installation type." + +LangString TITLE_WriteTemplates ${LANG_ENGLISH} "Create Templates" +LangString SUBTITLE_WriteTemplates ${LANG_ENGLISH} "Create a resource template for inclusion in the Director's configuration file." + +!InsertMacro MUI_FUNCTION_DESCRIPTION_BEGIN + !InsertMacro MUI_DESCRIPTION_TEXT ${SecFileDaemon} $(DESC_SecFileDaemon) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecStorageDaemon} $(DESC_SecStorageDaemon) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecConsole} $(DESC_SecConsole) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecBatConsole} $(DESC_SecBatConsole) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecTrayMonitor} $(DESC_SecTrayMonitor) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecAllDrivesPlugin} $(DESC_SecAllDrivesPlugin) + !InsertMacro MUI_DESCRIPTION_TEXT ${SecOldExchangePlugin} $(DESC_SecOldExchangePlugin) +!InsertMacro MUI_FUNCTION_DESCRIPTION_END + +; Uninstall section + +UninstallText "This will uninstall Bacula. Click Uninstall to continue." + +Section "Uninstall" + ; Shutdown any baculum that could be running + nsExec::ExecToLog '"$INSTDIR\bacula-fd.exe" /kill' + nsExec::Exec /TIMEOUT=200 'net stop bacula-fd' + nsExec::ExecToLog '"$INSTDIR\bacula-sd.exe" /kill' + nsExec::Exec /TIMEOUT=200 'net stop bacula-sd' + Sleep 3000 + +; ReadRegDWORD $R0 HKLM "Software\Bacula" "Service_Bacula-fd" + ; Remove Bacula File Daemon service + nsExec::ExecToLog '"$INSTDIR\bacula-fd.exe" /remove' + +; Remove Bacula Storage Daemon service + nsExec::ExecToLog '"$INSTDIR\bacula-sd.exe" /remove' + + nsExec::ExecToLog '"$INSTDIR\plugins\exchange-fd.dll" /remove' + + ; remove registry keys + DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Bacula" + DeleteRegKey HKLM "Software\Bacula" + + ; remove start menu items + SetShellVarContext all + Delete /REBOOTOK "$SMPROGRAMS\Bacula\*" + RMDir /REBOOTOK "$SMPROGRAMS\Bacula" + + ; remove files and uninstaller (preserving config for now) + Delete /REBOOTOK "$INSTDIR\doc\*" + Delete /REBOOTOK "$INSTDIR\help\*" + Delete /REBOOTOK "$INSTDIR\plugins\*" + Delete /REBOOTOK "$INSTDIR\openssl.exe" + Delete /REBOOTOK "$INSTDIR\bacula-fd.exe" + Delete /REBOOTOK "$INSTDIR\bat.exe" + Delete /REBOOTOK "$INSTDIR\bacula-tray-monitor.exe" + Delete /REBOOTOK "$INSTDIR\bsleep.exe" + Delete /REBOOTOK "$INSTDIR\bsmtp.exe" + Delete /REBOOTOK "$INSTDIR\bconsole.exe" + Delete /REBOOTOK "$INSTDIR\expr64.exe" + Delete /REBOOTOK "$INSTDIR\snooze.exe" + Delete /REBOOTOK "$INSTDIR\Uninstall.exe" + Delete /REBOOTOK "$INSTDIR\LICENSE" + Delete /REBOOTOK "$INSTDIR\Readme.txt" + Delete /REBOOTOK "$INSTDIR\*.dll" + Delete /REBOOTOK "$INSTDIR\plugins\alldrives-fd.dll" + Delete /REBOOTOK "$INSTDIR\plugins\exchange-fd.dll" + + ; Check for existing installation + MessageBox MB_YESNO|MB_ICONQUESTION \ + "Would you like to delete the current configuration files and the working state file?" IDNO NoDel + Delete /REBOOTOK "$INSTDIR\*" + Delete /REBOOTOK "$INSTDIR\bin32\*" + Delete /REBOOTOK "$INSTDIR\working\*" + Delete /REBOOTOK "$INSTDIR\plugins\*" + Delete /REBOOTOK "$INSTDIR\working\*" + Delete /REBOOTOK "$INSTDIR\*" + RMDir "$INSTDIR\plugins" + RMDir "$INSTDIR\working" + RMDir "$INSTDIR\bin32" + RMDir /REBOOTOK "$INSTDIR\plugins" + RMDir /REBOOTOK "$INSTDIR\working" + RMDir /REBOOTOK "$INSTDIR" +NoDel: + + ; remove directories used + RMDir "$INSTDIR\plugins" + RMDir "$INSTDIR\working" + RMDir "$INSTDIR\doc" + RMDir "$INSTDIR\help" + RMDir "$INSTDIR" +SectionEnd + +; +; $0 - Service Name (ie Bacula-FD) +; $1 - Service Description (ie Bacula File Daemon) +; $2 - Install as Service +; $3 - Start Service now +; +Function InstallDaemon + Call InstallCommonFiles + + WriteRegDWORD HKLM "Software\Bacula" "Service_$0" $2 + + ${If} $2 = 1 + nsExec::ExecToLog '"$INSTDIR\bacula-fd.exe" /kill' + nsExec::Exec /TIMEOUT=200 'net stop bacula-fd' + nsExec::ExecToLog '"$INSTDIR\bacula-sd.exe" /kill' + nsExec::Exec /TIMEOUT=200 'net stop bacula-sd' + sleep 3000 + nsExec::ExecToLog '"$INSTDIR\$0.exe" /remove' + nsExec::ExecToLog '"$INSTDIR\$0.exe" /install -c "$INSTDIR\$0.conf"' + + ${If} $OsIsNT <> 1 + File "Start.bat" + File "Stop.bat" + ${EndIf} + + ; Start the service? + + ${If} $3 = 1 + ${If} $OsIsNT = 1 + nsExec::ExecToLog 'net start $0' + ${Else} + Exec '"$INSTDIR\$0.exe" /service -c "$INSTDIR\$0.conf"' + ${EndIf} + ${EndIf} + ${Else} + CreateShortCut "$SMPROGRAMS\Bacula\Start $1.lnk" "$INSTDIR\$0.exe" '-c "$INSTDIR\$0.conf"' "$INSTDIR\$0.exe" 0 + ${EndIf} +FunctionEnd + +Function GetComputerName + Push $R0 + Push $R1 + Push $R2 + + System::Call "kernel32::GetComputerNameA(t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2" + + ${StrCase} $R0 $R0 "L" + + Pop $R2 + Pop $R1 + Exch $R0 +FunctionEnd + +!define ComputerNameDnsFullyQualified 3 + +Function GetHostName + Push $R0 + Push $R1 + Push $R2 + + ${If} $OsIsNT = 1 + System::Call "kernel32::GetComputerNameExA(i ${ComputerNameDnsFullyQualified}, t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2 ?e" + ${If} $R2 = 0 + Pop $R2 + DetailPrint "GetComputerNameExA failed - LastError = $R2" + Call GetComputerName + Pop $R0 + ${Else} + Pop $R2 + ${EndIf} + ${Else} + Call GetComputerName + Pop $R0 + ${EndIf} + + Pop $R2 + Pop $R1 + Exch $R0 +FunctionEnd + +!define NameUserPrincipal 8 + +Function GetUserName + Push $R0 + Push $R1 + Push $R2 + + ${If} $OsIsNT = 1 + System::Call "secur32::GetUserNameExA(i ${NameUserPrincipal}, t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2 ?e" + ${If} $R2 = 0 + Pop $R2 + DetailPrint "GetUserNameExA failed - LastError = $R2" + Pop $R0 + StrCpy $R0 "" + ${Else} + Pop $R2 + ${EndIf} + ${Else} + StrCpy $R0 "" + ${EndIf} + + ${If} $R0 == "" + System::Call "advapi32::GetUserNameA(t .R0, *i ${NSIS_MAX_STRLEN} R1) i.R2 ?e" + ${If} $R2 = 0 + Pop $R2 + DetailPrint "GetUserNameA failed - LastError = $R2" + StrCpy $R0 "" + ${Else} + Pop $R2 + ${EndIf} + ${EndIf} + + Pop $R2 + Pop $R1 + Exch $R0 +FunctionEnd + +Function ConfigEditAndCopy + Push $R1 + + ${If} ${FileExists} "$0\$1" + StrCpy $R1 ".new" + ${Else} + StrCpy $R1 "" + ${EndIf} + + nsExec::ExecToLog '$INSTDIR\working\sed.exe -i.bak -f "$INSTDIR\working\config.sed" "$INSTDIR\working\$1.in"' + CopyFiles "$INSTDIR\working\$1.in" "$0\$1$R1" + + Pop $R1 +FunctionEnd + +Function GetSelectedComponents + Push $R0 + StrCpy $R0 0 + ${If} ${SectionIsSelected} ${SecFileDaemon} + IntOp $R0 $R0 | ${ComponentFile} + ${EndIf} + ${If} ${SectionIsSelected} ${SecStorageDaemon} + IntOp $R0 $R0 | ${ComponentStorage} + ${EndIf} + ${If} ${SectionIsSelected} ${SecConsole} + IntOp $R0 $R0 | ${ComponentTextConsole} + ${EndIf} + ${If} ${SectionIsSelected} ${SecBatConsole} + IntOp $R0 $R0 | ${ComponentBatConsole} + ${EndIf} + ${If} ${SectionIsSelected} ${SecTrayMonitor} + ;IntOp $R0 $R0 | ${ComponentTrayMonitor} + ${EndIf} + ${If} ${SectionIsSelected} ${SecAllDrivesPlugin} + IntOp $R0 $R0 | ${ComponentAllDrivesPlugin} + ${EndIf} + ${If} ${SectionIsSelected} ${SecOldExchangePlugin} + IntOp $R0 $R0 | ${ComponentOldExchangePlugin} + ${EndIf} + Exch $R0 +FunctionEnd + +Function PageComponentsShow + + Call SelectPreviousComponents + Call UpdateComponentUI +FunctionEnd + +Function PageDirectoryPre + ${If} $AutomaticInstall = 1 + ${OrIf} $InstallType = ${UpgradeInstall} + Abort + ${EndIf} +FunctionEnd + +Function LeaveInstallPage + Push "$INSTDIR\install.log" + Call DumpLog +FunctionEnd + +Function EnterWriteTemplates + Push $R0 + Push $R1 + + Call GetSelectedComponents + Pop $R0 + + IntOp $R0 $R0 & ${ComponentDirector} + IntOp $R1 $NewComponents & ${ComponentsFileAndStorage} + + ${If} $R0 <> 0 + ${OrIf} $R1 = 0 + Pop $R1 + Pop $R0 + Abort + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentFile} + ${If} $R0 = 0 + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 2" State 0 + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 2" Flags DISABLED + DeleteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 3" State + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 3" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST|DISABLED + ${Else} + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 2" State 1 + DeleteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 2" Flags + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 3" State "C:\$ConfigClientName.conf" + ${EndIf} + + IntOp $R0 $NewComponents & ${ComponentStorage} + ${If} $R0 = 0 + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 4" State 0 + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 4" Flags DISABLED + DeleteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 5" State + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 5" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST|DISABLED + ${Else} + ;; TODO: See why this procedure causes a problem on Windows 2012 + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 4" State 0 + DeleteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 4" Flags + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 5" State "$INSTDIR\$ConfigStorageName.conf" + WriteINIStr "$INSTDIR\working\WriteTemplates.ini" "Field 5" Flags REQ_SAVE|FILE_EXPLORER|WARN_IF_EXIST + ${EndIf} + + + !InsertMacro MUI_HEADER_TEXT "$(TITLE_WriteTemplates)" "$(SUBTITLE_WriteTemplates)" + !InsertMacro MUI_INSTALLOPTIONS_DISPLAY "WriteTemplates.ini" + + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 2" State + ${If} $R0 <> 0 + File "/oname=$INSTDIR\working\client.conf.in" "client.conf.in" + + nsExec::ExecToLog '$INSTDIR\working\sed.exe -f "$INSTDIR\working\config.sed" "$INSTDIR\working\client.conf.in" "$INSTDIR\working\client.conf"' + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 3" State + ${If} $R0 != "" + CopyFiles "$INSTDIR\working\client.conf" "$R0" + ${EndIf} + ${EndIf} + + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 4" State + ${If} $R0 <> 0 + File "/oname=$INSTDIR\working\storage.conf.in" "storage.conf.in" + + nsExec::ExecToLog '$INSTDIR\working\sed.exe -f "$PLUGINSDIR\config.sed" -i.bak "$PLUGINSDIR\storage.conf.in"' + !InsertMacro MUI_INSTALLOPTIONS_READ $R0 "WriteTemplates.ini" "Field 5" State + ${If} $R0 != "" + CopyFiles "$INSTDIR\working\storage.conf.in" "$R0" + ${EndIf} + ${EndIf} + + + Pop $R1 + Pop $R0 +FunctionEnd + +Function SelectPreviousComponents + ${If} $InstallType <> ${NewInstall} + IntOp $R1 $PreviousComponents & ${ComponentFile} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecFileDaemon} + !InsertMacro SetSectionFlag ${SecFileDaemon} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecFileDaemon} + !InsertMacro ClearSectionFlag ${SecFileDaemon} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentStorage} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecStorageDaemon} + !InsertMacro SetSectionFlag ${SecStorageDaemon} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecStorageDaemon} + !InsertMacro ClearSectionFlag ${SecStorageDaemon} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentTextConsole} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecConsole} + !InsertMacro SetSectionFlag ${SecConsole} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecConsole} + !InsertMacro ClearSectionFlag ${SecConsole} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentBatConsole} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecBatConsole} + !InsertMacro SetSectionFlag ${SecBatConsole} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecBatConsole} + !InsertMacro ClearSectionFlag ${SecBatConsole} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentTrayMonitor} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecTrayMonitor} + ;!InsertMacro SetSectionFlag ${SecTrayMonitor} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecTrayMonitor} + !InsertMacro ClearSectionFlag ${SecTrayMonitor} ${SF_RO} + ${EndIf} + IntOp $R1 $PreviousComponents & ${ComponentAllDrivesPlugin} + ${If} $R1 <> 0 + !InsertMacro SelectSection ${SecAllDrivesPlugin} + !InsertMacro SetSectionFlag ${SecAllDrivesPlugin} ${SF_RO} + ${Else} + !InsertMacro UnselectSection ${SecAllDrivesPlugin} + !InsertMacro ClearSectionFlag ${SecAllDrivesPlugin} ${SF_RO} + ${EndIf} +; IntOp $R1 $PreviousComponents & ${ComponentWinBMRPlugin} +; ${If} $R1 <> 0 +; !InsertMacro SelectSection ${SecWinBMRPlugin} +; !InsertMacro SetSectionFlag ${SecWinBMRPlugin} ${SF_RO} +; ${Else} +; !InsertMacro UnselectSection ${SecWinBMRPlugin} +; !InsertMacro ClearSectionFlag ${SecWinBMRPlugin} ${SF_RO} +; ${EndIf} +; IntOp $R1 $PreviousComponents & ${ComponentOldExchangePlugin} +; ${If} $R1 <> 0 +; !InsertMacro SelectSection ${SecOldExchangePlugin} +; !InsertMacro SetSectionFlag ${SecOldExchangePlugin} ${SF_RO} +; ${Else} +; !InsertMacro UnselectSection ${SecOldExchangePlugin} +; !InsertMacro ClearSectionFlag ${SecOldExchangePlugin} ${SF_RO} +; ${EndIf} + ${EndIf} +FunctionEnd + +Function UpdateComponentUI + Push $R0 + Push $R1 + + Call GetSelectedComponents + Pop $R0 + + IntOp $R1 $R0 ^ $PreviousComponents + IntOp $NewComponents $R0 & $R1 + + ${If} $InstallType <> ${NewInstall} + IntOp $R1 $NewComponents & ${ComponentFile} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecFileDaemon} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecFileDaemon} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentStorage} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecStorageDaemon} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecStorageDaemon} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentTextConsole} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecConsole} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecConsole} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentBatConsole} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecBatConsole} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecBatConsole} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentTrayMonitor} + ${If} $R1 <> 0 + ;!InsertMacro SetSectionFlag ${SecTrayMonitor} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecTrayMonitor} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentAllDrivesPlugin} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecAllDrivesPlugin} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecAllDrivesPlugin} ${SF_BOLD} + ${EndIf} + IntOp $R1 $NewComponents & ${ComponentOldExchangePlugin} + ${If} $R1 <> 0 + !InsertMacro SetSectionFlag ${SecOldExchangePlugin} ${SF_BOLD} + ${Else} + !InsertMacro ClearSectionFlag ${SecOldExchangePlugin} ${SF_BOLD} + ${EndIf} + ${EndIf} + + GetDlgItem $R0 $HWNDPARENT 1 + + IntOp $R1 $NewComponents & ${ComponentsRequiringUserConfig} + ${If} $R1 = 0 + SendMessage $R0 ${WM_SETTEXT} 0 "STR:Install" + ${Else} + SendMessage $R0 ${WM_SETTEXT} 0 "STR:&Next >" + ${EndIf} + + Pop $R1 + Pop $R0 +FunctionEnd + +!include "InstallType.nsh" +!include "ConfigPage1.nsh" +!include "ConfigPage2.nsh" +!include "DumpLog.nsh" diff --git a/src/win32/winapi.h b/src/win32/winapi.h new file mode 100644 index 00000000..945313ec --- /dev/null +++ b/src/win32/winapi.h @@ -0,0 +1,206 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2019 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ +/* + * Windows APIs that are different for each system. + * We use pointers to the entry points so that a + * single binary will run on all Windows systems. + * + * Kern Sibbald MMIII + */ + +#ifndef __WINAPI_H +#define __WINAPI_H + +#if defined(HAVE_WIN32) +/* + * Commented out native.h include statement, which is not distributed with the + * free version of VC++, and which is not used in bacula. + * + * #if !defined(HAVE_MINGW) // native.h not present on mingw + * #include + * #endif + */ +#include + +#ifndef POOLMEM +typedef char POOLMEM; +#endif + +// unicode enabling of win 32 needs some defines and functions + +// using an average of 3 bytes per character is probably fine in +// practice but I believe that Windows actually uses UTF-16 encoding +// as opposed to UCS2 which means characters 0x10000-0x10ffff are +// valid and result in 4 byte UTF-8 encodings. +#define MAX_PATH_UTF8 MAX_PATH*4 // strict upper bound on UTF-16 to UTF-8 conversion + +// from +// http://msdn.microsoft.com/library/default.asp?url=/library/en-us/fileio/fs/getfileattributesex.asp +// In the ANSI version of this function, the name is limited to +// MAX_PATH characters. To extend this limit to 32,767 wide +// characters, call the Unicode version of the function and prepend +// "\\?\" to the path. For more information, see Naming a File. +#define MAX_PATH_W 32767 + +int wchar_2_UTF8(POOLMEM **pszUTF, const wchar_t *pszUCS); +int wchar_2_UTF8(char *pszUTF, const WCHAR *pszUCS, int cchChar = MAX_PATH_UTF8); +int UTF8_2_wchar(POOLMEM **pszUCS, const char *pszUTF); +int make_win32_path_UTF8_2_wchar(POOLMEM **pszUCS, const char *pszUTF, BOOL* pBIsRawPath = NULL); + +// init with win9x, but maybe set to NT in InitWinAPI +extern DWORD DLL_IMP_EXP g_platform_id; +extern DWORD DLL_IMP_EXP g_MinorVersion; +extern DWORD DLL_IMP_EXP g_MajorVersion; + +/* In ADVAPI32.DLL */ +typedef BOOL (WINAPI * t_OpenProcessToken)(HANDLE, DWORD, PHANDLE); +typedef BOOL (WINAPI * t_AdjustTokenPrivileges)(HANDLE, BOOL, + PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); +typedef BOOL (WINAPI * t_LookupPrivilegeValue)(LPCTSTR, LPCTSTR, PLUID); + +extern t_OpenProcessToken DLL_IMP_EXP p_OpenProcessToken; +extern t_AdjustTokenPrivileges DLL_IMP_EXP p_AdjustTokenPrivileges; +extern t_LookupPrivilegeValue DLL_IMP_EXP p_LookupPrivilegeValue; + +/* In MSVCRT.DLL */ +typedef int (__cdecl * t_wunlink) (const wchar_t *); +typedef int (__cdecl * t_wmkdir) (const wchar_t *); +typedef int (__cdecl * t_wopen) (const wchar_t *, int, ...); + +extern t_wunlink DLL_IMP_EXP p_wunlink; +extern t_wmkdir DLL_IMP_EXP p_wmkdir; + +/* In KERNEL32.DLL */ +typedef BOOL (WINAPI * t_GetFileAttributesExA)(LPCSTR, GET_FILEEX_INFO_LEVELS, + LPVOID); +typedef BOOL (WINAPI * t_GetFileAttributesExW)(LPCWSTR, GET_FILEEX_INFO_LEVELS, + LPVOID); + +typedef DWORD (WINAPI * t_GetFileAttributesA)(LPCSTR); +typedef DWORD (WINAPI * t_GetFileAttributesW)(LPCWSTR); +typedef BOOL (WINAPI * t_SetFileAttributesA)(LPCSTR, DWORD); +typedef BOOL (WINAPI * t_SetFileAttributesW)(LPCWSTR, DWORD); + +typedef HANDLE (WINAPI * t_CreateFileA) (LPCSTR, DWORD ,DWORD, LPSECURITY_ATTRIBUTES, + DWORD , DWORD, HANDLE); +typedef HANDLE (WINAPI * t_CreateFileW) (LPCWSTR, DWORD ,DWORD, LPSECURITY_ATTRIBUTES, + DWORD , DWORD, HANDLE); + +typedef BOOL (WINAPI * t_CreateDirectoryA) (LPCSTR, LPSECURITY_ATTRIBUTES); +typedef BOOL (WINAPI * t_CreateDirectoryW) (LPCWSTR, LPSECURITY_ATTRIBUTES); + +typedef BOOL (WINAPI * t_SetProcessShutdownParameters)(DWORD, DWORD); +typedef BOOL (WINAPI * t_BackupRead)(HANDLE,LPBYTE,DWORD,LPDWORD,BOOL,BOOL,LPVOID*); +typedef BOOL (WINAPI * t_BackupWrite)(HANDLE,LPBYTE,DWORD,LPDWORD,BOOL,BOOL,LPVOID*); + +typedef int (WINAPI * t_WideCharToMultiByte) (UINT CodePage, DWORD , LPCWSTR, int, + LPSTR, int, LPCSTR, LPBOOL); + +typedef int (WINAPI * t_MultiByteToWideChar) (UINT, DWORD, LPCSTR, int, LPWSTR, int); +typedef HANDLE (WINAPI * t_FindFirstFileA) (LPCSTR, LPWIN32_FIND_DATAA); +typedef HANDLE (WINAPI * t_FindFirstFileW) (LPCWSTR, LPWIN32_FIND_DATAW); + +typedef BOOL (WINAPI * t_FindNextFileA) (HANDLE, LPWIN32_FIND_DATAA); +typedef BOOL (WINAPI * t_FindNextFileW) (HANDLE, LPWIN32_FIND_DATAW); + +typedef BOOL (WINAPI * t_SetCurrentDirectoryA) (LPCSTR); +typedef BOOL (WINAPI * t_SetCurrentDirectoryW) (LPCWSTR); + +typedef DWORD (WINAPI * t_GetCurrentDirectoryA) (DWORD, LPSTR); +typedef DWORD (WINAPI * t_GetCurrentDirectoryW) (DWORD, LPWSTR); + +typedef BOOL (WINAPI * t_GetVolumePathNameW) (LPCWSTR, LPWSTR, DWORD); +typedef BOOL (WINAPI * t_GetVolumeNameForVolumeMountPointW) (LPCWSTR, LPWSTR, DWORD); + +typedef BOOL (WINAPI * t_AttachConsole) (DWORD); + +typedef BOOL (WINAPI *t_CreateProcessA) ( + LPCSTR, + LPSTR, + LPSECURITY_ATTRIBUTES, + LPSECURITY_ATTRIBUTES, + BOOL, + DWORD, + PVOID, + LPCSTR, + LPSTARTUPINFOA, + LPPROCESS_INFORMATION); +typedef BOOL (WINAPI *t_CreateProcessW) ( + LPCWSTR, + LPWSTR, + LPSECURITY_ATTRIBUTES, + LPSECURITY_ATTRIBUTES, + BOOL, + DWORD, + PVOID, + LPCWSTR, + LPSTARTUPINFOW, + LPPROCESS_INFORMATION); + +extern t_CreateProcessA DLL_IMP_EXP p_CreateProcessA; +extern t_CreateProcessW DLL_IMP_EXP p_CreateProcessW; + +extern t_GetFileAttributesA DLL_IMP_EXP p_GetFileAttributesA; +extern t_GetFileAttributesW DLL_IMP_EXP p_GetFileAttributesW; + +extern t_GetFileAttributesExA DLL_IMP_EXP p_GetFileAttributesExA; +extern t_GetFileAttributesExW DLL_IMP_EXP p_GetFileAttributesExW; + +extern t_SetFileAttributesA DLL_IMP_EXP p_SetFileAttributesA; +extern t_SetFileAttributesW DLL_IMP_EXP p_SetFileAttributesW; + +extern t_CreateFileA DLL_IMP_EXP p_CreateFileA; +extern t_CreateFileW DLL_IMP_EXP p_CreateFileW; + +extern t_CreateDirectoryA DLL_IMP_EXP p_CreateDirectoryA; +extern t_CreateDirectoryW DLL_IMP_EXP p_CreateDirectoryW; + +extern t_SetProcessShutdownParameters DLL_IMP_EXP p_SetProcessShutdownParameters; +extern t_BackupRead DLL_IMP_EXP p_BackupRead; +extern t_BackupWrite DLL_IMP_EXP p_BackupWrite; + +extern t_WideCharToMultiByte DLL_IMP_EXP p_WideCharToMultiByte; +extern t_MultiByteToWideChar DLL_IMP_EXP p_MultiByteToWideChar; + +extern t_FindFirstFileA DLL_IMP_EXP p_FindFirstFileA; +extern t_FindFirstFileW DLL_IMP_EXP p_FindFirstFileW; + +extern t_FindNextFileA DLL_IMP_EXP p_FindNextFileA; +extern t_FindNextFileW DLL_IMP_EXP p_FindNextFileW; + +extern t_SetCurrentDirectoryA DLL_IMP_EXP p_SetCurrentDirectoryA; +extern t_SetCurrentDirectoryW DLL_IMP_EXP p_SetCurrentDirectoryW; + +extern t_GetCurrentDirectoryA DLL_IMP_EXP p_GetCurrentDirectoryA; +extern t_GetCurrentDirectoryW DLL_IMP_EXP p_GetCurrentDirectoryW; + +extern t_GetVolumePathNameW DLL_IMP_EXP p_GetVolumePathNameW; +extern t_GetVolumeNameForVolumeMountPointW DLL_IMP_EXP p_GetVolumeNameForVolumeMountPointW; + +extern t_AttachConsole DLL_IMP_EXP p_AttachConsole; + +void InitWinAPIWrapper(); + +/* In SHFolder.dll on older systems, and now Shell32.dll */ +typedef BOOL (WINAPI * t_SHGetFolderPath)(HWND, int, HANDLE, DWORD, LPTSTR); +extern t_SHGetFolderPath DLL_IMP_EXP p_SHGetFolderPath; + +#endif + +#endif /* __WINAPI_H */ diff --git a/src/win32/wx-console/Makefile b/src/win32/wx-console/Makefile new file mode 100755 index 00000000..561d019d --- /dev/null +++ b/src/win32/wx-console/Makefile @@ -0,0 +1,87 @@ +# +# Makefile for win32 bacula executables +# Using MinGW cross-compiler on GNU/Linux +# +# Written by Robert Nelson, June 2006 +# + +include ../Makefile.inc + +INCLUDES = \ + -I../../wx-console \ + $(INCLUDE_BACULA) \ + $(INCLUDE_WX) \ + $(INCLUDE_PTHREADS) \ + $(INCLUDE_OPENSSL) \ + $(INCLUDE_ICONS) + +DEFINES = \ + -DUSING_DLL \ + -DHAVE_WXCONSOLE \ + -D__WXMSW__ \ + -DWXUSINGDLL \ + -D_STAT_H \ + -D_STAT_DEFINED \ + $(HAVES) + +vpath %.c $(MAINDIR)/src/wx-console +vpath %.cpp $(MAINDIR)/src/wx-console +vpath %.rc $(MAINDIR)/src/wx-console + +########################################################################## + +# Files in $(MAINDIR)/src/wx-console + +CONSOLE_OBJS = \ + $(OBJDIR)/authenticate.o \ + $(OBJDIR)/console_conf.o \ + $(OBJDIR)/console_thread.o \ + $(OBJDIR)/main.o \ + $(OBJDIR)/wxbconfigfileeditor.o \ + $(OBJDIR)/wxbconfigpanel.o \ + $(OBJDIR)/wxbhistorytextctrl.o \ + $(OBJDIR)/wxblistctrl.o \ + $(OBJDIR)/wxbmainframe.o \ + $(OBJDIR)/wxbrestorepanel.o \ + $(OBJDIR)/wxbtableparser.o \ + $(OBJDIR)/wxbtreectrl.o \ + $(OBJDIR)/wxbutils.o \ + $(OBJDIR)/wx-console_private.res + +ALL_OBJS = $(CONSOLE_OBJS) + +CONSOLE_LIBS = \ + $(LIBS_WX) \ + $(LIBS_NETWORK) \ + -lcomctl32 \ + -lole32 \ + -loleaut32 \ + -luuid + +###################################################################### + +# Targets + +.PHONY: all clean + +all: $(BINDIR)/bwx-console.exe + +clean: + $(call clean_obj,$(ALL_OBJS)) + $(call clean_exe,$(BINDIR)/bwx-console.exe) + $(call clean_exe,$(BINDIR)/wx-console.exe) + $(ECHO_CMD)rm -rf $(OBJDIRS) + +# +# Rules +# + +$(BINDIR)/bwx-console.exe: $(CONSOLE_OBJS) $(LIBS_BACULA) + $(call link_winapp,$(CONSOLE_LIBS)) + +$(OBJDIR)/wx-console_private.res: wx-console_private.rc + @echo "Compiling $@" + $(call checkdir,$@) + $(ECHO_CMD)$(WINDRES) $(INCLUDE_ICONS) -I$(MAINDIR)/src/wx-console -O coff $< -o $@ + +include ../Makefile.rules diff --git a/src/win32/wx-console/bwx-console.manifest b/src/win32/wx-console/bwx-console.manifest new file mode 100644 index 00000000..b8e5f55f --- /dev/null +++ b/src/win32/wx-console/bwx-console.manifest @@ -0,0 +1,21 @@ + + + + Bacula bwx-console for Win32 + + + + + + + diff --git a/src/win32/wx-console/w32api.h b/src/win32/wx-console/w32api.h new file mode 100644 index 00000000..d509c189 --- /dev/null +++ b/src/win32/wx-console/w32api.h @@ -0,0 +1,11 @@ +#ifndef _W32API_H_ +#define _W32API_H_ +#if __GNUC__ >=3 +#pragma GCC system_header +#endif + +#define __W32API_VERSION 2.4 +#define __W32API_MAJOR_VERSION 2 +#define __W32API_MINOR_VERSION 4 + +#endif /* ndef _W32API_H_ */ diff --git a/src/win32/wx-console/wx-console.vcproj b/src/win32/wx-console/wx-console.vcproj new file mode 100644 index 00000000..bab6aca1 --- /dev/null +++ b/src/win32/wx-console/wx-console.vcproj @@ -0,0 +1,352 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/updatedb/README b/updatedb/README new file mode 100644 index 00000000..d84fe790 --- /dev/null +++ b/updatedb/README @@ -0,0 +1,22 @@ +updatedb scripts +13 Feb 2004 + +These scripts may be used to update older versions of bacula +databases. The scripts are designed using the standard install +locations for mysql, sqlite and the bacula working directory +as configured in the rpm builds. If you have different locations +you will need to edit the $bindir variable and the working +directory location before running them. + +01 Nov 2005 +Please note normally the scripts in this directory are needed only +if you want to update multiple versions of the database. If if +you are upgrading from an older database one version before the +current database version, (e.g. version 8 to 9) the normal +procedure is to use the: + + ./update_bacula_tables + +script which will be installed in your scripts directory (default +/etc/bacula). It can also be found in the +/src/cats directory. diff --git a/updatedb/update_bacula_tables.in b/updatedb/update_bacula_tables.in new file mode 100755 index 00000000..cafeaa17 --- /dev/null +++ b/updatedb/update_bacula_tables.in @@ -0,0 +1,50 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# This routine alters the appropriately configured +# Bacula tables for PostgreSQL, Ingres, MySQL, or SQLite. +# + +# can be used to change the current user with su +pre_command="sh -c" + +default_db_type=@DEFAULT_DB_TYPE@ + +# +# See if the first argument is a valid backend name. +# If so the user overrides the default database backend. +# +if [ $# -gt 0 ]; then + case $1 in + sqlite3) + db_type=$1 + shift + ;; + mysql) + db_type=$1 + shift + ;; + postgresql) + db_type=$1 + shift + ;; + *) + ;; + esac +fi + +# +# If no new db_type is gives use the default db_type. +# +if [ -z "${db_type}" ]; then + db_type="${default_db_type}" +fi + +if [ $db_type = postgresql -a "$UID" = 0 ]; then + pre_command="su - postgres -c" +fi + +echo "Altering ${db_type} tables" +$pre_command "@scriptdir@/update_${db_type}_tables $*" diff --git a/updatedb/update_bacula_tables_8_to_9 b/updatedb/update_bacula_tables_8_to_9 new file mode 100755 index 00000000..b7f3ca0f --- /dev/null +++ b/updatedb/update_bacula_tables_8_to_9 @@ -0,0 +1,17 @@ +#!/bin/sh +# +# This routine alters the appropriately configured +# Bacula tables for PostgreSQL, MySQL, or SQLite. +# +if test xsqlite = xpostgresql -o xsqlite3 = xpostgresql ; then + echo "Altering SQLite tables" + /etc/bacula/update_postgresql_tables $* +fi +if test xmysql = xpostgresql ; then + echo "Altering MySQL tables" + /etc/bacula/update_mysql_tables $* +fi +if test xpostgresql = xpostgresql ; then + echo "Altering PostgreSQL tables" + /etc/bacula/update_postgresql_tables $* +fi diff --git a/updatedb/update_mysql_tables.in b/updatedb/update_mysql_tables.in new file mode 100644 index 00000000..0c732928 --- /dev/null +++ b/updatedb/update_mysql_tables.in @@ -0,0 +1,190 @@ +#!/bin/sh +# +# Copyright (C) 2000-2017 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Shell script to update MySQL tables from Bacula Community version +# 5.0.x, 5.2.x, 7.0.x, 7.2.x, 7.4.x +# +echo " " +echo "This script will update a Bacula MySQL database from version 12-15 to 16" +echo " " +echo "Depending on the current version of your catalog," +echo "you may have to run this script multiple times." +echo " " + +bindir=@MYSQL_BINDIR@ +PATH="$bindir:$PATH" +db_name=@db_name@ + +ARGS=$* + +getVersion() +{ + mysql $ARGS -D ${db_name} -e "select VersionId from Version LIMIT 1\G" >/tmp/$$ + DBVERSION=`sed -n -e 's/^VersionId: \(.*\)$/\1/p' /tmp/$$` +} + +getVersion + +if [ "x$DBVERSION" = x ]; then + echo + echo "Unable to detect database version, you can specify connection information" + echo "on the command line." + echo "Error. Cannot upgrade this database." + exit 1 +fi + +if [ "$DBVERSION" -lt 12 -o "$DBVERSION" -gt 15 ] ; then + echo " " + echo "The existing database is version $DBVERSION !!" + echo "This script can only update an existing version 12-15 to version 16." + echo "Error. Cannot upgrade this database." + echo " " + exit 1 +fi + +# For all versions, we need to create the Index on Media(StorageId) +# It may fail, but it's not a big problem +# mysql $* -f </dev/null 2> /dev/null +# CREATE INDEX media_storageid_idx ON Media (StorageId); +# END-OF-DATA + +if [ "$DBVERSION" -eq 12 ] ; then + if mysql $* -f </tmp/$$ +DBVERSION=`sed -n -e 's/^VersionId: \(.*\)$/\1/p' /tmp/$$` +if [ $DBVERSION != 11 ] ; then + echo " " + echo "The existing database is version $DBVERSION !!" + echo "This script can only update an existing version 11 database to version 12." + echo "Error. Cannot upgrade this database." + echo " " + exit 1 +fi + +if mysql $* -f </dev/null` +if [ -z "$DB_VER" ]; then + echo "Sorry, I can't seem to locate a bacula database." + exit 1 +fi + +if [ -n "$DB_VER" ] && [ "$DB_VER" -ne "4" ]; then + echo "Sorry, this script is designed to update a version 4 database" + echo "and you have a version $DB_VER database." + exit 1 +fi + +if $bindir/mysql -f </dev/null` +if [ -z "$DB_VER" ]; then + echo "Sorry, I can't seem to locate a bacula database." + exit 1 +fi + +if [ -n "$DB_VER" ] && [ "$DB_VER" -ne "5" ]; then + echo "Sorry, this script is designed to update a version 5 database" + echo "and you have a version $DB_VER database." + exit 1 +fi + +if $bindir/mysql $* -f </dev/null` +if [ -z "$DB_VER" ]; then + echo "Sorry, I can't seem to locate a bacula database." + exit 1 +fi + +if [ -n "$DB_VER" ] && [ "$DB_VER" -ne "6" ]; then + echo "Sorry, this script is designed to update a version 6 database" + echo "and you have a version $DB_VER database." + exit 1 +fi + +if $bindir/mysql $* -f </dev/null` +if [ -z "$DB_VER" ]; then + echo "Sorry, I can't seem to locate a bacula database." + exit 1 +fi + +if [ -n "$DB_VER" ]; then + + if [ "$DB_VER" = "8" ]; then + echo "The Catalog is already at version 8. Nothing to do!" + exit 0 + elif [ "$DB_VER" -ne "7" ]; then + echo "Sorry, this script is designed to update a version 7 database" + echo "and you have a version $DB_VER database." + exit 1 + fi +fi + +if $bindir/mysql $* -f << END_OF_DATA +USE bacula; + +ALTER TABLE Media ADD COLUMN EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0; +ALTER TABLE Media ADD COLUMN EndBlock INTEGER UNSIGNED NOT NULL DEFAULT 0; + +ALTER TABLE File ADD INDEX (JobId, PathId, FilenameId); + +UPDATE Filename SET Name='' WHERE Name=' '; + +DELETE FROM Version; +INSERT INTO Version (VersionId) VALUES (8); + +END_OF_DATA +then + echo "Update of Bacula MySQL tables succeeded." +else + echo "Update of Bacula MySQL tables failed." +fi + +exit 0 diff --git a/updatedb/update_mysql_tables_8_to_9 b/updatedb/update_mysql_tables_8_to_9 new file mode 100755 index 00000000..f43f2598 --- /dev/null +++ b/updatedb/update_mysql_tables_8_to_9 @@ -0,0 +1,99 @@ +#!/bin/sh +# +# Shell script to update MySQL tables from version 1.36 to 1.38 +# +echo " " +echo "This script will update a Bacula MySQL database from version 8 to 9" +echo "Depending on the size of your database," +echo "this script may take several minutes to run." +echo " " +bindir=/usr/bin + +if $bindir/mysql $* -f </dev/null` + if [ -n "$DB_VER" ] && [ "$DB_VER" -ne "4" ]; then + echo "Sorry, this script is designed to update a version 4 database" + echo "and you have a version $DB_VER database." + exit 1 + fi +else + echo "Sorry, I can't seem to locate a bacula database." + exit 1 +fi + +$bindir/sqlite bacula.db </dev/null` + if [ -n "$DB_VER" ] && [ "$DB_VER" -ne "5" ]; then + echo "Sorry, this script is designed to update a version 5 database" + echo "and you have a version $DB_VER database." + exit 1 + fi +else + echo "Sorry, I can't seem to locate a bacula database." + exit 1 +fi + +$bindir/sqlite $* bacula.db </dev/null` + if [ -n "$DB_VER" ] && [ "$DB_VER" -ne "6" ]; then + echo "Sorry, this script is designed to update a version 6 database" + echo "and you have a version $DB_VER database." + exit 1 + fi +else + echo "Sorry, I can't seem to locate a bacula database." + exit 1 +fi + +$bindir/sqlite $* bacula.db </dev/null`" +if [ -n "$DB_VER" ]; then + + if [ "$DB_VER" = "8" ]; then + echo "The Catalog is already at version 8. Nothing to do!" + exit 0 + elif [ "$DB_VER" -ne "7" ]; then + echo "Sorry, this script is designed to update a version 7 database" + echo "and you have a version $DB_VER database." + exit 1 + fi + +else + echo "Sorry, I can't seem to locate a bacula database." + exit 1 +fi + + +$bindir/sqlite $* bacula.db < Date: Thu, 28 Feb 2019 12:05:39 +0100 Subject: Import bacula_9.4.2-2.debian.tar.xz [dgit import tarball bacula 9.4.2-2 bacula_9.4.2-2.debian.tar.xz] --- .gitlab-ci.yml | 175 ++ NEWS | 93 + README.Debian | 195 ++ README.Debian-kFreeBSD | 32 + README.source | 37 + TODO | 85 + additions/bacula-tray-monitor.png | Bin 0 -> 2093 bytes additions/bacula-tray-monitor.svg | 24 + additions/common-functions.dpkg | 182 ++ additions/common-functions.init | 21 + additions/make_catalog_backup.md5sum | 7 + additions/mysql/database/9.2.1-2 | 44 + additions/mysql/make_catalog_backup_awk | 53 + additions/pgsql/database/7.4.1 | 37 + additions/pgsql/database/pgsql | 1 + additions/pgsql/make_catalog_backup_awk | 51 + additions/sqlite3/database/9.2.1-2 | 66 + additions/sqlite3/make_catalog_backup_awk | 3 + autogen.sh | 21 + bacula-bscan.install | 1 + bacula-bscan.lintian-overrides | 2 + bacula-bscan.manpages | 1 + bacula-common-db.install.in | 1 + bacula-common-db.links.in | 2 + bacula-common-db.lintian-overrides.in | 4 + bacula-common.dirs | 3 + bacula-common.docs | 6 + bacula-common.examples | 1 + bacula-common.install | 21 + bacula-common.links | 3 + bacula-common.lintian-overrides | 14 + bacula-common.logrotate | 9 + bacula-common.maintscript | 1 + bacula-common.manpages | 3 + bacula-common.postinst | 36 + bacula-common.postrm | 21 + bacula-common.preinst | 30 + bacula-console-qt.autopostrm | 2 + bacula-console-qt.dirs | 1 + bacula-console-qt.doc-base | 11 + bacula-console-qt.install | 5 + bacula-console-qt.manpages | 1 + bacula-console-qt.postinst | 35 + bacula-console.autopostrm | 2 + bacula-console.install | 1 + bacula-console.links | 2 + bacula-console.lintian-overrides | 2 + bacula-console.manpages | 1 + bacula-console.menu | 2 + bacula-console.postinst | 35 + bacula-director-common.maintscript | 1 + bacula-director-db.config.in | 34 + bacula-director-db.lintian-overrides.in | 2 + bacula-director-db.maintscript.in | 1 + bacula-director-db.postinst.in | 47 + bacula-director-db.postrm.in | 22 + bacula-director-db.prerm.in | 21 + bacula-director-mysql.install | 3 + bacula-director-pgsql.install | 4 + bacula-director-sqlite3.install | 3 + bacula-director.autopostrm | 2 + bacula-director.dirs | 2 + bacula-director.init | 134 + bacula-director.install | 17 + bacula-director.lintian-overrides | 5 + bacula-director.manpages | 4 + bacula-director.postinst | 139 ++ bacula-fd.autopostrm | 2 + bacula-fd.dirs | 3 + bacula-fd.init | 119 + bacula-fd.install | 2 + bacula-fd.lintian-overrides | 3 + bacula-fd.manpages | 1 + bacula-fd.postinst | 36 + bacula-sd.autopostrm | 2 + bacula-sd.dirs | 1 + bacula-sd.init | 111 + bacula-sd.install | 12 + bacula-sd.lintian-overrides | 6 + bacula-sd.maintscript | 2 + bacula-sd.manpages | 5 + bacula-sd.postinst | 42 + bacula-tray-monitor.autopostrm | 2 + bacula-tray-monitor.dirs | 1 + bacula-tray-monitor.install | 5 + bacula-tray-monitor.manpages | 1 + bacula-tray-monitor.postinst | 37 + changelog | 2615 ++++++++++++++++++++ control | 367 +++ copyright | 1413 +++++++++++ gbp.conf | 16 + patches/debian/customize-systemd-240-units | 36 + patches/debian/customize-systemd-units | 118 + patches/debian/enable-hardening-for-qmake | 28 + patches/debian/fix-baculabackupreport-defaults | 11 + patches/debian/fix-default-config | 263 ++ patches/debian/fix-default-dbtype | 17 + patches/debian/fix-desktop-in-files | 26 + patches/debian/fix-libbaccats-rpath | 60 + patches/debian/fix-paths-examples-devices | 63 + patches/debian/fix-qmake-binary-source-path | 25 + patches/debian/fix-sql-bindir | 26 + patches/debian/make-buildoutput-verbose | 14 + patches/debian/non-silent-build | 52 + patches/debian/remove-upstream-autoconf-log | 10 + patches/debian/unescape-mysql-syntax | 20 + patches/debian/use-debian-path-for-interpreter | 20 + patches/series | 22 + patches/upstream/add-libs3-checks | 35 + patches/upstream/fix-baculabackupreport-defaults | 60 + patches/upstream/fix-pkg-config-autoconf-macro | 35 + patches/upstream/fix-update_sqlite3_tables | 111 + .../upstream/relative-dirs-for-reproduceability | 33 + patches/upstream/tray-monitor-installation | 47 + rules | 209 ++ scripts/autopostrm | 59 + scripts/extract-dbupgrade.awk | 89 + scripts/generate-copyright | 15 + scripts/generate-doc-dir-to-symlink-migration | 28 + scripts/install-dbconfig | 126 + scripts/template.copyright | 13 + scripts/template.postrm | 68 + source/format | 1 + source/include-binaries | 1 + source/lintian-overrides | 3 + tests/backup-test | 71 + tests/control | 13 + tests/control-mysql | 4 + tests/control-pgsql | 4 + tests/control-sqlite3 | 3 + tests/program-test | 60 + tests/scripts/diff.pl | 253 ++ upstream/metadata | 7 + upstream/signing-key.asc | 52 + watch | 3 + 135 files changed, 8845 insertions(+) create mode 100644 .gitlab-ci.yml create mode 100644 NEWS create mode 100644 README.Debian create mode 100644 README.Debian-kFreeBSD create mode 100644 README.source create mode 100644 TODO create mode 100644 additions/bacula-tray-monitor.png create mode 100644 additions/bacula-tray-monitor.svg create mode 100644 additions/common-functions.dpkg create mode 100644 additions/common-functions.init create mode 100644 additions/make_catalog_backup.md5sum create mode 100644 additions/mysql/database/9.2.1-2 create mode 100755 additions/mysql/make_catalog_backup_awk create mode 100755 additions/pgsql/database/7.4.1 create mode 100644 additions/pgsql/database/pgsql create mode 100755 additions/pgsql/make_catalog_backup_awk create mode 100644 additions/sqlite3/database/9.2.1-2 create mode 100755 additions/sqlite3/make_catalog_backup_awk create mode 100755 autogen.sh create mode 100644 bacula-bscan.install create mode 100644 bacula-bscan.lintian-overrides create mode 100644 bacula-bscan.manpages create mode 100644 bacula-common-db.install.in create mode 100644 bacula-common-db.links.in create mode 100644 bacula-common-db.lintian-overrides.in create mode 100644 bacula-common.dirs create mode 100644 bacula-common.docs create mode 100644 bacula-common.examples create mode 100644 bacula-common.install create mode 100644 bacula-common.links create mode 100644 bacula-common.lintian-overrides create mode 100644 bacula-common.logrotate create mode 100644 bacula-common.maintscript create mode 100644 bacula-common.manpages create mode 100644 bacula-common.postinst create mode 100644 bacula-common.postrm create mode 100644 bacula-common.preinst create mode 100644 bacula-console-qt.autopostrm create mode 100644 bacula-console-qt.dirs create mode 100644 bacula-console-qt.doc-base create mode 100644 bacula-console-qt.install create mode 100644 bacula-console-qt.manpages create mode 100644 bacula-console-qt.postinst create mode 100644 bacula-console.autopostrm create mode 100644 bacula-console.install create mode 100644 bacula-console.links create mode 100644 bacula-console.lintian-overrides create mode 100644 bacula-console.manpages create mode 100644 bacula-console.menu create mode 100644 bacula-console.postinst create mode 100644 bacula-director-common.maintscript create mode 100644 bacula-director-db.config.in create mode 100644 bacula-director-db.lintian-overrides.in create mode 100644 bacula-director-db.maintscript.in create mode 100644 bacula-director-db.postinst.in create mode 100644 bacula-director-db.postrm.in create mode 100644 bacula-director-db.prerm.in create mode 100644 bacula-director-mysql.install create mode 100644 bacula-director-pgsql.install create mode 100644 bacula-director-sqlite3.install create mode 100644 bacula-director.autopostrm create mode 100644 bacula-director.dirs create mode 100644 bacula-director.init create mode 100755 bacula-director.install create mode 100644 bacula-director.lintian-overrides create mode 100644 bacula-director.manpages create mode 100644 bacula-director.postinst create mode 100644 bacula-fd.autopostrm create mode 100644 bacula-fd.dirs create mode 100644 bacula-fd.init create mode 100644 bacula-fd.install create mode 100644 bacula-fd.lintian-overrides create mode 100644 bacula-fd.manpages create mode 100644 bacula-fd.postinst create mode 100644 bacula-sd.autopostrm create mode 100644 bacula-sd.dirs create mode 100644 bacula-sd.init create mode 100644 bacula-sd.install create mode 100644 bacula-sd.lintian-overrides create mode 100644 bacula-sd.maintscript create mode 100644 bacula-sd.manpages create mode 100644 bacula-sd.postinst create mode 100644 bacula-tray-monitor.autopostrm create mode 100644 bacula-tray-monitor.dirs create mode 100644 bacula-tray-monitor.install create mode 100644 bacula-tray-monitor.manpages create mode 100644 bacula-tray-monitor.postinst create mode 100644 changelog create mode 100644 control create mode 100644 copyright create mode 100644 gbp.conf create mode 100644 patches/debian/customize-systemd-240-units create mode 100644 patches/debian/customize-systemd-units create mode 100644 patches/debian/enable-hardening-for-qmake create mode 100644 patches/debian/fix-baculabackupreport-defaults create mode 100644 patches/debian/fix-default-config create mode 100644 patches/debian/fix-default-dbtype create mode 100644 patches/debian/fix-desktop-in-files create mode 100644 patches/debian/fix-libbaccats-rpath create mode 100644 patches/debian/fix-paths-examples-devices create mode 100644 patches/debian/fix-qmake-binary-source-path create mode 100644 patches/debian/fix-sql-bindir create mode 100644 patches/debian/make-buildoutput-verbose create mode 100644 patches/debian/non-silent-build create mode 100644 patches/debian/remove-upstream-autoconf-log create mode 100644 patches/debian/unescape-mysql-syntax create mode 100644 patches/debian/use-debian-path-for-interpreter create mode 100644 patches/series create mode 100644 patches/upstream/add-libs3-checks create mode 100644 patches/upstream/fix-baculabackupreport-defaults create mode 100644 patches/upstream/fix-pkg-config-autoconf-macro create mode 100644 patches/upstream/fix-update_sqlite3_tables create mode 100644 patches/upstream/relative-dirs-for-reproduceability create mode 100644 patches/upstream/tray-monitor-installation create mode 100755 rules create mode 100755 scripts/autopostrm create mode 100755 scripts/extract-dbupgrade.awk create mode 100755 scripts/generate-copyright create mode 100755 scripts/generate-doc-dir-to-symlink-migration create mode 100755 scripts/install-dbconfig create mode 100644 scripts/template.copyright create mode 100644 scripts/template.postrm create mode 100644 source/format create mode 100644 source/include-binaries create mode 100644 source/lintian-overrides create mode 100755 tests/backup-test create mode 100644 tests/control create mode 100644 tests/control-mysql create mode 100644 tests/control-pgsql create mode 100644 tests/control-sqlite3 create mode 100755 tests/program-test create mode 100755 tests/scripts/diff.pl create mode 100644 upstream/metadata create mode 100644 upstream/signing-key.asc create mode 100644 watch diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 00000000..027db83d --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,175 @@ +variables: + DEBFULLNAME: "Salsa Pipeline" + DEBEMAIL: "" + DEBIAN_FRONTEND: noninteractive + WORKING_DIR: ${CI_PROJECT_DIR}/debian/output + REGISTRY_BASE_URL: "registry.salsa.debian.org/bacula-team/images" + RELEASE: 'unstable' + +stages: + - build + - test + +build full: + stage: build + image: ${REGISTRY_BASE_URL}/gbp + services: + - docker:dind + artifacts: + name: "$CI_JOB_NAME:$CI_COMMIT_REF_NAME" + paths: + - ${WORKING_DIR}/ + cache: + paths: + - ${CCACHE_WORK_DIR} + variables: + CCACHE_TMP_DIR: ${CI_PROJECT_DIR}/../.ccache + CCACHE_WORK_DIR: ${WORKING_DIR}/.ccache + script: + - | + curl https://prittiau.debian.net/post -X POST --data "{\"project_id\":$CI_PROJECT_ID, \"pipeline_id\":$CI_PIPELINE_ID}" --header "Content-Type: application/json" + - mkdir -p ${CCACHE_WORK_DIR} + - mv ${CCACHE_WORK_DIR} ${CCACHE_TMP_DIR} + - gbp pull --ignore-branch --pristine-tar --track-missing + - gbp buildpackage --git-ignore-branch --git-export-dir=${WORKING_DIR} --git-builder="docker-build.sh ${REGISTRY_BASE_URL}/dockerbuilder:${RELEASE} --build=full" + - mv ${CCACHE_TMP_DIR} ${CCACHE_WORK_DIR} + +build arch: + stage: build + image: ${REGISTRY_BASE_URL}/gbp + services: + - docker:dind + cache: + paths: + - ${CCACHE_WORK_DIR} + variables: + CCACHE_TMP_DIR: ${CI_PROJECT_DIR}/../.ccache + CCACHE_WORK_DIR: ${WORKING_DIR}/.ccache + script: + - | + curl https://prittiau.debian.net/post -X POST --data "{\"project_id\":$CI_PROJECT_ID, \"pipeline_id\":$CI_PIPELINE_ID}" --header "Content-Type: application/json" + - mkdir -p ${CCACHE_WORK_DIR} + - mv ${CCACHE_WORK_DIR} ${CCACHE_TMP_DIR} + - gbp pull --ignore-branch --pristine-tar --track-missing + - gbp buildpackage --git-ignore-branch --git-export-dir=${WORKING_DIR} --git-builder="docker-build.sh ${REGISTRY_BASE_URL}/dockerbuilder:${RELEASE} --build=any" + - mv ${CCACHE_TMP_DIR} ${CCACHE_WORK_DIR} + +build indep: + stage: build + image: ${REGISTRY_BASE_URL}/gbp + services: + - docker:dind + cache: + paths: + - ${CCACHE_WORK_DIR} + variables: + CCACHE_TMP_DIR: ${CI_PROJECT_DIR}/../.ccache + CCACHE_WORK_DIR: ${WORKING_DIR}/.ccache + script: + - | + curl https://prittiau.debian.net/post -X POST --data "{\"project_id\":$CI_PROJECT_ID, \"pipeline_id\":$CI_PIPELINE_ID}" --header "Content-Type: application/json" + - mkdir -p ${CCACHE_WORK_DIR} + - mv ${CCACHE_WORK_DIR} ${CCACHE_TMP_DIR} + - gbp pull --ignore-branch --pristine-tar --track-missing + - gbp buildpackage --git-ignore-branch --git-export-dir=${WORKING_DIR} --git-builder="docker-build.sh ${REGISTRY_BASE_URL}/dockerbuilder:${RELEASE} --build=all" + - mv ${CCACHE_TMP_DIR} ${CCACHE_WORK_DIR} + +lintian: + stage: test + image: ${REGISTRY_BASE_URL}/lintian:${RELEASE} + variables: + GIT_STRATEGY: none + script: + - lintian -EIL +pedantic --no-tag-display-limit --profile debian/main ${WORKING_DIR}/*.changes + +build log scanner: + stage: test + image: ${REGISTRY_BASE_URL}/blhc + variables: + GIT_STRATEGY: none + script: + - blhc --debian --line-numbers --color ${WORKING_DIR}/*.build + +autopkgtest sqlite3: + stage: test + image: ${REGISTRY_BASE_URL}/autopkgtest:${RELEASE} + script: + - eatmydata autopkgtest --override-control=debian/tests/control-sqlite3 -U ${WORKING_DIR}/*.changes -- null || [ $? -eq 8 ] + +autopkgtest mysql: + stage: test + image: ${REGISTRY_BASE_URL}/autopkgtest:${RELEASE} + script: + - eatmydata autopkgtest --override-control=debian/tests/control-mysql -U ${WORKING_DIR}/*.changes -- null || [ $? -eq 8 ] + +autopkgtest pgsql: + stage: test + image: ${REGISTRY_BASE_URL}/autopkgtest:${RELEASE} + script: + - eatmydata autopkgtest --override-control=debian/tests/control-pgsql -U ${WORKING_DIR}/*.changes -- null || [ $? -eq 8 ] + +reprotest: + stage: test + image: ${REGISTRY_BASE_URL}/reprotest + artifacts: + name: "$CI_JOB_NAME:$CI_COMMIT_REF_NAME" + paths: + - ./reprotest.log + when: always + script: + - apt-get update + - eatmydata apt-get build-dep -y . + - export DEB_BUILD_OPTIONS=nocheck + - eatmydata reprotest --min-cpus $(nproc --all) . -- null |& tee "${CI_PROJECT_DIR}/reprotest.log" + +piuparts sqlite3: + stage: test + image: ${REGISTRY_BASE_URL}/piuparts + variables: + GIT_STRATEGY: none + services: + - docker:dind + script: + - CHROOT_PATH=/tmp/debian-chroot + - CONTAINER_ID=$(docker run --rm -d debian:${RELEASE} sleep infinity) + - docker exec ${CONTAINER_ID} bash -c "apt-get update && apt-get install eatmydata -y" + - mkdir -p ${CHROOT_PATH} + - docker export ${CONTAINER_ID} | tar -C ${CHROOT_PATH} -xf - + - mknod -m 666 ${CHROOT_PATH}/dev/urandom c 1 9 + - TESTDEBS=$(ls ${WORKING_DIR}/*.deb | egrep -v "pgsql|mysql") + - piuparts --keep-sources-list --scriptsdir /etc/piuparts/scripts --allow-database --warn-on-leftovers-after-purge -d ${RELEASE} --distupgrade-to-testdebs --hard-link -e ${CHROOT_PATH} ${TESTDEBS} + +piuparts mysql: + stage: test + image: ${REGISTRY_BASE_URL}/piuparts + variables: + GIT_STRATEGY: none + services: + - docker:dind + script: + - CHROOT_PATH=/tmp/debian-chroot + - CONTAINER_ID=$(docker run --rm -d debian:${RELEASE} sleep infinity) + - docker exec ${CONTAINER_ID} bash -c "apt-get update && apt-get install eatmydata -y" + - mkdir -p ${CHROOT_PATH} + - docker export ${CONTAINER_ID} | tar -C ${CHROOT_PATH} -xf - + - mknod -m 666 ${CHROOT_PATH}/dev/urandom c 1 9 + - TESTDEBS=$(ls ${WORKING_DIR}/*.deb | egrep -v "pgsql|sqlite3") + - piuparts --keep-sources-list --scriptsdir /etc/piuparts/scripts --allow-database --warn-on-leftovers-after-purge -d ${RELEASE} --distupgrade-to-testdebs --hard-link -e ${CHROOT_PATH} ${TESTDEBS} + +piuparts pgsql: + stage: test + image: ${REGISTRY_BASE_URL}/piuparts + variables: + GIT_STRATEGY: none + services: + - docker:dind + script: + - CHROOT_PATH=/tmp/debian-chroot + - CONTAINER_ID=$(docker run --rm -d debian:${RELEASE} sleep infinity) + - docker exec ${CONTAINER_ID} bash -c "apt-get update && apt-get install eatmydata -y" + - mkdir -p ${CHROOT_PATH} + - docker export ${CONTAINER_ID} | tar -C ${CHROOT_PATH} -xf - + - mknod -m 666 ${CHROOT_PATH}/dev/urandom c 1 9 + - TESTDEBS=$(ls ${WORKING_DIR}/*.deb | egrep -v "mysql|sqlite3") + - piuparts --keep-sources-list --scriptsdir /etc/piuparts/scripts --allow-database --warn-on-leftovers-after-purge -d ${RELEASE} --distupgrade-to-testdebs --hard-link -e ${CHROOT_PATH} ${TESTDEBS} + diff --git a/NEWS b/NEWS new file mode 100644 index 00000000..e4b2b2af --- /dev/null +++ b/NEWS @@ -0,0 +1,93 @@ +bacula (9.0.3+dfsg-1) experimental; urgency=low + + Please be aware that using SQLite3 databases with Bacula is no longer + officially supported by the upstream author and might be removed at + short notice. We strongly recommend to switch existing installations + to use PostgreSQL or MariaDB/MySQL. We also very strongly discourage + new installations using SQlite3. + + The script /etc/bacula/script/make_catalog_backup_awk is now + deprecated and will be removed after the release of Debian 10 (buster) + + -- Carsten Leonhardt Tue, 15 Aug 2017 16:25:54 +0200 + +bacula (7.4.3+dfsg-3) experimental; urgency=medium + + This version introduces a major overhaul of the packaging: + - bacula-director is now the main director package + - the contents of bacula-sd-*sql* were moved to bacula-sd (bcopy, + btape) and bacula-bscan (bscan) + + Native systemd support, thanks to Sven Hartge + + -- Carsten Leonhardt Fri, 29 Jul 2016 11:03:56 +0200 + +bacula (7.0.5+dfsg-1) unstable; urgency=medium + + Significant changes since version 5.2.6: + + - bacula-traymonitor has been removed + - wx-console has been removed + - python support has been removed + + -- Carsten Leonhardt Tue, 10 Nov 2015 14:06:42 +0100 + +bacula (5.2.6+dfsg-1) unstable; urgency=high + + Significant changes since versions before 5.2.6+dfsg-1: + + [ Jan Hauke Rahm ] + bacula-fd, -sd, and -director now ship /etc/default/bacula-{fd,sd,dir} + accordingly. The new files contain possibilities to disable the service, + provide additional arguments passed to the daemon, and specify a + configuration file other than the default. Do not delete these files. + + The binary /usr/bin/bconsole (formerly a wrapper around + /usr/sbin/bacula-console) has been dropped as it made assumptions + about where your configuration file is (/etc/bacula/bconsole.conf). + It is now a symlink to /usr/sbin/bconsole: if no configuration file is + specified, /etc/bacula/bconsole.conf is used by default. + + [ Luca Capello ] + Upstream installs console and tray-monitor binaries (bat, bconsole and + bacula-tray-monitor) into /usr/sbin/, while Debian were installing + them into /usr/bin/. However, in such case there is no real advantage + in deviating from upstream, also because the default configuration + files have anyway tighter ownership (root:bacula) and permissions + (640). Thus, these binaries have been migrated to /usr/sbin/, with + symlinks in place to allow a smooth migration from squeeze and they + will be removed once wheezy is released. + + -- Luca Capello Wed, 13 Jun 2012 17:38:50 +0200 + +bacula (3.0.0-1) unstable; urgency=low + + SSL/TLS is now available in Bacula in Debian. + + SQLite v2 support has been deprecated upstream. Debian + packages will convert your catalog to SQLite v3. + + The WX console has been deprecated upstream. Please instead + use bat, available in package bacula-console-qt. + + -- John Goerzen Thu, 30 Apr 2009 16:25:36 -0500 + +bacula (2.2.0-1) unstable; urgency=low + + SSL/TLS has been disabled in this version of Bacula due to licensing + concerns. See README.Debian and the thread at + http://lists.debian.org/debian-legal/2007/07/msg00144.html for more + details. + + -- John Goerzen Wed, 19 Sep 2007 06:46:55 -0500 + +bacula (1.38.9-3) unstable; urgency=low + + Welcome to the 1.38 series of Bacula. + + This version of Bacula in Debian introduces some significant changes in the + Debian packages. PostgreSQL director packages and MySQL packages + have already switched to using dbconfig-common. And new Sqlite3 + packages are available. + + -- John Goerzen Thu, 11 May 2006 06:46:55 -0500 diff --git a/README.Debian b/README.Debian new file mode 100644 index 00000000..9477942a --- /dev/null +++ b/README.Debian @@ -0,0 +1,195 @@ +Bacula, network-based backup, recovery & verification system +http://www.bacula.org +=============================================================================== + +The 'bacula-common' package provides some of the common infrastructure +for Bacula. You can find Bacula READMEs and other documentation files +there. + +BASIC INFORMATION +----------------- + +To use Bacula you must install one of the flavors of director, +depending on your preferred database, onto a server. + +You must also install bacula-sd on the machine where you will be +storing your backed-up data. And you'll need bacula-fd on any machine +that you want to back up. + +The installation will run the necessary daemons automatically, but to +get any backups working you must customize the relevant configuration +files in /etc/bacula as described in the Bacula manual (available in +the package bacula-doc). Once you have edited the files you should +issue an appropriate "service bacula-xx force-reload". + +NETWORK ENABLING +---------------- + +By default, Debian's Bacula packages install config files in which the +servers listen only on localhost by default. If you want to be able +to do backups over the network, you'll need to find the lines in +bacula-dir.conf, bacula-sd.conf and bacula-fd.conf that reference +127.0.0.1 and remove them. + +Note that all hostnames used in "Address"-lines need to be resolvable +on all hosts involved, so fully qualified domain names are +recommended. + +Using a hostname in the listen addresses (DirAddress, SDAddress, +FDAddress) will only work if they are resolvable on startup, this is +not the case if using NetworkManager. + +PACKAGES +======== + +The package 'bacula-director' provides the bacula-dir binary. + +To select a database backend, one of the following packages +has to be installed: + + - bacula-director-sqlite3 + - bacula-director-mysql + - bacula-director-pgsql + +These packages provide the needed scripts and configurations for +the database indicated in the package's name. + +CHANGING DATABASE BACKEND +========================= + +This is NOT RECOMMENDED and NOT SUPPORTED FOR LIVE DATA. + +Your current database IS NOT transferred to the new database +backend. The new database backend will have an EMPTY database. Your +/etc/bacula/bacula-dir.conf file WILL NOT be automatically adapted to +function with the new database backend, YOU need to do the necessary +changes to the Catalog{} section YOURSELF. + +If you want to go ahead anyway, you change the database backend by +installing a different bacula-director-DBTYPE package. This will pull +in the needed dependencies for the new database backend and remove the +ones from the old one. + +CONSOLE +======= + +The package bacula-console provides the administration console for +Bacula, so that users can contact the director. + +The BAT graphical admin console is contained in the +bacula-console-qt package. + +USERS & SECURITY +================ + +The passwords in the configuration files in /etc/bacula have been +auto-generated individually during installation. They are saved in +/etc/bacula/common_default_passwords. + +The installation will create a "bacula" user on the system, and the +daemons and RunBefore/RunAfter jobs will run as that user. However, +the file daemon runs as root by default, as will your +ClientRunBefore/After jobs. The installation of the director will ask +you for a database user to use. + +You can run Bacula consoles as any user, but the user you use will +need to be able to read the console config files. These files are, by +default, under /etc/bacula. You'll need to specify the config file +with -c for any of the consoles. + +RUNNING BACULA-FD AS UNPRIVILEGED USER ON LINUX +=============================================== + +When using systemd: +------------------- + +bacula-fd can be run as unprivileged user under systemd using the +following steps: + +- Use "systemctl edit bacula-fd.service" to create an override + configuration file in /etc/systemd/system/bacula-fd.service.d/. + +- Put the following content into the override configuration: + +# Run bacula-fd as unprivileged user +[Service] +User=bacula +Group=bacula +AmbientCapabilities=CAP_DAC_READ_SEARCH + +- Restart bacula-fd via "systemctl restart bacula-fd.service" + +Now the FD runs as unprivileged user "bacula" but the capabilities +provided allow it read-only access to any file and directory on +the system, just as if it where run as user "root". + +When using sysvinit: +-------------------- + +- Edit the file /etc/defaults/bacula-fd and add the following lines: + +# Run bacula-fd as unprivileged user +BUSER="bacula" +BGROUP="bacula" + +- Restart bacula-fd via "service bacula-fd restart" + +Now the FD runs as unprivileged user "bacula" but it retains the +read-only capability to any file and directory on the system, +just as if it where run as user "root". + +Notes: +------ + +This setup has some caveats though: + +- ClientScripts no longer run as "root". If you need those scripts + to run as root you need to adapt them to use a mechanism to run + them as a different user, for example via sudo. + +- Restored files no longer have their original permissions and ACLs. + This can be changed when using systemd by also specifying the + capabilities "CAP_CHOWN CAP_DAC_OVERRIDE CAP_FOWNER" but this puts + the running process at nearly the same level as running as root + would do to beging with. + When using SysVinit, there is no way to allow a non-root FD to + restore files with the original permissions and ACLs. + +UPGRADES +======== + +Debian packages will, unless you have asked them not to, attempt to +automatically upgrade your catalog during a major Bacula upgrade. + +However, please be sure to examine the Release Notes in +/usr/share/doc/bacula-common/NEWS.gz for potential changes in the +configuration files. + +CATALOG BACKUPS +=============== + +Please see the comments in the bacula-dir.conf file regarding the +make_catalog_backup script for information on tailoring it for your own +particular environment. + +MONITORING ("NAGIOS") PLUGIN +============================ + +The monitoring plugin "check_bacula" from the examples is included in +bacula-common. Please note that it does not support TLS and that is +not an official part of bacula and thus unsupported. + +USING ON KFREEBSD +================= + +For some notes specific to kFreeBSD see +/usr/share/doc/bacula-common/README.Debian-kFreeBSD.gz + +FURTHER READING +=============== + +See the Bacula documentation provided in the package bacula-doc and on +the upstream author's website, http://www.bacula.org. + +-- Jose Luis Tallon, updated by John Goerzen, Carsten Leonahrdt and +Sven Hartge diff --git a/README.Debian-kFreeBSD b/README.Debian-kFreeBSD new file mode 100644 index 00000000..51a8796f --- /dev/null +++ b/README.Debian-kFreeBSD @@ -0,0 +1,32 @@ +Notes on using Bacula in kFreeBSD +================================= + +Using tapes +----------- + +Using tapes is entirely untested. If you are willing to test please +note the following advice taken from FreeBSD(1) and report to +bacula@packages.debian.org: + +"Due to lack of some features in the FreeBSD tape driver +implementation you MUST add some OS dependent options to the +bacula-sd.conf file: + + Hardware End of Medium = no; + Backward Space Record = no; + Backward Space File = no; + +With 2 filemarks at EOT (see man mt): + Fast Forward Space File = no; + BSF at EOM = yes; + TWO EOF = yes; + +With 1 filemarks at EOT (see man mt): + Fast Forward Space File = yes; + BSF at EOM = no; + TWO EOF = no; + +NOTE: YOU CAN SWITCH EOT model ONLY when starting from scratch with + EMPTY tapes." + +(1) https://github.com/freebsd/freebsd-ports/blob/master/sysutils/bacula-server/files/pkg-message.server.in diff --git a/README.source b/README.source new file mode 100644 index 00000000..65ae1e49 --- /dev/null +++ b/README.source @@ -0,0 +1,37 @@ +Steps to update to a new upstream release +----------------------------------------- + +1. Review the upstream changes (also wrt licencing) + +2. If the upstream tarball can be used as-is, "gbp import-orig --uscan --pristine-tar" can be used. + +Otherwise: + + a) Use uscan to download the upstream tarball and check the signature + b) unpack, remove problematic files, document in list below and create cleaned up tarball with version +dfsg + c) gbp import-orig --pristine-tar + +3. Update debian/changelog to reflect new version + +4. If needed, refresh quilt patches + +[to be continued if needed ...] + + + +Problematic files for version 9.0.7 +----------------------------------- + +Proprietary licence: + +bacula/src/filed/win_efs.c + +These (longer) files don't have a copyright/licence notice: + +src/win32/compat/print.cpp +src/win32/stored/postest/postest.cpp +src/win32/full_win32_installer/winbacula.nsi +src/win32/filed/plugins/node.h +src/win32/full_win32_installer/winbacula.nsi + +(and possibly others, I removed all of src/win32 as we don't need it anyway) diff --git a/TODO b/TODO new file mode 100644 index 00000000..82d3ed84 --- /dev/null +++ b/TODO @@ -0,0 +1,85 @@ +v9.4.0 + + * src/lib/Makefile *_test installs unit tests -> install somewhere (new package?) and use for autopkgtest + - bsock test -> depends on /bin/netcat + +Normal: + + * Show a warning when upgrading bacula-director-sqlite3 about lacking + upstream support + - or - + submit a bug report on release-notes to inform users that sqlite3 + will be gone in buster / after buster (???) + - or - + rely on the warnings already in place + + * user creation/locking is suboptimal (bacula-common.preinst) + check dh-sysuser + https://lists.debian.org/debian-devel/2016/10/msg00665.html + https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=621833 + +Minor: + + * Remove the make_bacula_catalog_awk scripts using rm_config in + bacula-director-.maintscript after the release of + Debian 10/buster + * Remove the transitional bacula-director-common package after the + release of Debian 10/buster + +Wishlist: + + * autopkgtests + + binaries not yet tested: + + bacula-bscan: /usr/sbin/bscan + bacula-common: /usr/lib/bacula/btraceback + bacula-common: /usr/lib/nagios/plugins/check_bacula + bacula-common: /usr/sbin/bsmtp + bacula-console-qt: /usr/sbin/bat + bacula-director: /usr/sbin/dbcheck + bacula-sd: /usr/sbin/bcopy + bacula-sd: /usr/sbin/bextract + bacula-sd: /usr/sbin/bls + bacula-sd: /usr/sbin/btape + bacula-tray-monitor: /usr/sbin/bacula-tray-monitor + + * convert debian/copyright into machine readable format + see: https://wiki.debian.org/UscanEnhancements + + * autopostrm would ideally become a debhelper script ... + + * Consider tending to users not using dbconfig-common + see 2nd half of http://lists.alioth.debian.org/pipermail/pkg-bacula-devel/2016-July/001520.html + + * Reactivate patches for baculabackupreport.in and continue work on them + finish work on baculabackupreport script: + Still missing: + - Automatic handling of the database password + - modifying the debian placeholders in postinst + - handling mysql vs. mariadb + - mysql needs "-p" before the password, mariadb not (-> upstream) + +Problems: + + * dpkg --status bacula-director-(DBTYPE) lists conffiles as obsolete + that moved to bacula-director. We probably have to live with that + as moving conffiles between packages isn't well supported. + See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=595112 - but + this doesn't even take a move from dpkg- to ucf-handled conffiles + into account + +Review unmerged old changes: + + * Ask for passwords via debconf (git tag development/2011-04-29) + d4676fc19978a0588bb09a58234cf451d15859d6 Don't set passwords at build time but ask for them + aea2145a4f135fa6d18d33384a1af2c2bffc8617 Get rid of useless common-functions + +Hints + + Bacula packaging in other distributions: + * Fedora/Red Hat: https://src.fedoraproject.org/rpms/bacula/tree/master + * Gentoo: https://gitweb.gentoo.org/repo/gentoo.git/tree/app-backup/bacula/ + * Arch Linux: https://aur.archlinux.org/pkgbase/bacula + * OpenBSD: https://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/sysutils/bacula/ + * FreeBSD: https://svnweb.freebsd.org/ports/head/sysutils/bacula9-server/ diff --git a/additions/bacula-tray-monitor.png b/additions/bacula-tray-monitor.png new file mode 100644 index 00000000..8fd1972a Binary files /dev/null and b/additions/bacula-tray-monitor.png differ diff --git a/additions/bacula-tray-monitor.svg b/additions/bacula-tray-monitor.svg new file mode 100644 index 00000000..35fdddb1 --- /dev/null +++ b/additions/bacula-tray-monitor.svg @@ -0,0 +1,24 @@ + + + + + +image/svg+xml + + + + + + + + + + + + + + + + + + diff --git a/additions/common-functions.dpkg b/additions/common-functions.dpkg new file mode 100644 index 00000000..e180d083 --- /dev/null +++ b/additions/common-functions.dpkg @@ -0,0 +1,182 @@ +# -*-shell-script-*- +# +# Common functions, used from dpkg scripts +# + +genRandomPassword() +{ + cat /dev/urandom | tr -dc _A-Z-a-z-0-9 | head -c33 +} + +readOrCreatePasswords() +{ + BACULAPWFILE=/etc/bacula/common_default_passwords + if [ -f $BACULAPWFILE ]; then + . $BACULAPWFILE + fi + + if [ -z "$DIRPASSWD" -o -z "$DIRMPASSWD" -o -z "$SDPASSWD" -o \ + -z "$SDMPASSWD" -o -z "$FDPASSWD" -o -z "$FDMPASSWD" ] ; then + # Create the passwords. + DIRPASSWD=`genRandomPassword` + DIRMPASSWD=`genRandomPassword` + SDPASSWD=`genRandomPassword` + SDMPASSWD=`genRandomPassword` + FDPASSWD=`genRandomPassword` + FDMPASSWD=`genRandomPassword` + + # Save the passwords. + echo "# " > $BACULAPWFILE + chown root:root $BACULAPWFILE + chmod 0600 $BACULAPWFILE + cat >>$BACULAPWFILE < +# +# will replace all occurance of XXX_var_XXX to value of environment variable `var' +# + +substitute() { + local TEMPLATE DEST HOSTNAME SUBST_VARIABLES SEDCMD + TEMPLATE="$1" + DEST="$2" + + HOSTNAME="${HOSTNAME:-`hostname --fqdn 2>/dev/null || hostname`}" + + SUBST_VARIABLES="${SUBST_VARIABLES:-HOSTNAME DBNAME DBUSER DBPASS \ + DIRPASSWD DIRMPASSWD SDPASSWD SDMPASSWD FDPASSWD FDMPASSWD}" + + SEDCMD="sed" + for var in $SUBST_VARIABLES; do + value=$(eval echo "\$$var" |sed -e 's/[\/&]/\\&/g') + SEDCMD="$SEDCMD -e s/XXX_${var}_XXX/$value/g" + done + + $SEDCMD "$TEMPLATE" >"$DEST" +} + +# +# Function to install config file from template +# Usage: install_config [